Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc

Pull ARM SoC fixes from Olof Johansson:
 "A first set of batches of fixes for 3.13.  The diffstat is large
  mostly because we're adding a defconfig for a family that's been
  lacking it, and there's some missing clock information added for i.MX
  and OMAP.

  The at91 new code is around dealing with RTC/RTT reset at boot to fix
  possible hangs due to pending wakeup interrupts coming in during early
  boot"

* tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (29 commits)
  ARM: OMAP2+: Fix build for dra7xx without omap4 and 5
  ARM: OMAP2+: omap_device: maintain sane runtime pm status around suspend/resume
  doc: devicetree: Add bindings documentation for omap-des driver
  ARM: dts: doc: Document missing compatible property for omap-sham driver
  ARM: OMAP3: Beagle: fix return value check in beagle_opp_init()
  ARM: at91: fix hanged boot due to early rtt-interrupt
  ARM: at91: fix hanged boot due to early rtc-interrupt
  video: exynos_mipi_dsim: Remove unused variable
  ARM: highbank: only select errata 764369 if SMP
  ARM: sti: only select errata 764369 if SMP
  ARM: tegra: init fuse before setting reset handler
  ARM: vt8500: add defconfig for v6/v7 chips
  ARM: integrator_cp: Set LCD{0,1} enable lines when turning on CLCD
  ARM: OMAP: devicetree: fix SPI node compatible property syntax items
  pinctrl: single: call pcs_soc->rearm() whenever IRQ mask is changed
  ARM: OMAP2+: smsc911x: fix return value check in gpmc_smsc911x_init()
  MAINTAINERS: drop discontinued mailing list
  ARM: dts: i.MX51: Fix OTG PHY clock
  ARM: imx: set up pllv3 POWER and BYPASS sequentially
  ARM: imx: pllv3 needs relock in .set_rate() call
  ...
diff --git a/CREDITS b/CREDITS
index b928516..4fc997d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2576,7 +2576,7 @@
 S: Canada
 
 N: Zwane Mwaikambo
-E: zwane@arm.linux.org.uk
+E: zwanem@gmail.com
 D: Various driver hacking
 D: Lowlevel x86 kernel hacking
 D: General debugging
@@ -2895,6 +2895,11 @@
 S: Wexham SL3 6PJ
 S: United Kingdom
 
+N: Richard Purdie
+E: rpurdie@rpsys.net
+D: Backlight subsystem maintainer
+S: United Kingdom
+
 N: Daniel Quinlan
 E: quinlan@pathname.com
 W: http://www.pathname.com/~quinlan/
diff --git a/Documentation/ABI/README b/Documentation/ABI/README
index 1006982..1fafc4b0 100644
--- a/Documentation/ABI/README
+++ b/Documentation/ABI/README
@@ -72,3 +72,16 @@
 
 It's up to the developer to place their interfaces in the category they
 wish for it to start out in.
+
+
+Notable bits of non-ABI, which should not under any circumstances be considered
+stable:
+
+- Kconfig.  Userspace should not rely on the presence or absence of any
+  particular Kconfig symbol, in /proc/config.gz, in the copy of .config
+  commonly installed to /boot, or in any invocation of the kernel build
+  process.
+
+- Kernel-internal symbols.  Do not rely on the presence, absence, location, or
+  type of any kernel symbol, either in System.map files or the kernel binary
+  itself.  See Documentation/stable_api_nonsense.txt.
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index bfd119a..1399bb2 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -104,7 +104,7 @@
 		One of the following ASCII strings, representing the device
 		type:
 
-		absent, ram, rom, nor, nand, dataflash, ubi, unknown
+		absent, ram, rom, nor, nand, mlc-nand, dataflash, ubi, unknown
 
 What:		/sys/class/mtd/mtdX/writesize
 Date:		April 2009
diff --git a/Documentation/ABI/testing/sysfs-class-net-batman-adv b/Documentation/ABI/testing/sysfs-class-net-batman-adv
index bdc0070..7f34a95 100644
--- a/Documentation/ABI/testing/sysfs-class-net-batman-adv
+++ b/Documentation/ABI/testing/sysfs-class-net-batman-adv
@@ -1,13 +1,13 @@
 
 What:           /sys/class/net/<iface>/batman-adv/iface_status
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Indicates the status of <iface> as it is seen by batman.
 
 What:           /sys/class/net/<iface>/batman-adv/mesh_iface
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 The /sys/class/net/<iface>/batman-adv/mesh_iface file
                 displays the batman mesh interface this <iface>
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index bdcd8b4..0baa657 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -1,22 +1,23 @@
 
 What:           /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Indicates whether the batman protocol messages of the
                 mesh <mesh_iface> shall be aggregated or not.
 
-What:           /sys/class/net/<mesh_iface>/mesh/ap_isolation
+What:           /sys/class/net/<mesh_iface>/mesh/<vlan_subdir>/ap_isolation
 Date:           May 2011
-Contact:        Antonio Quartulli <ordex@autistici.org>
+Contact:        Antonio Quartulli <antonio@meshcoding.com>
 Description:
                 Indicates whether the data traffic going from a
                 wireless client to another wireless client will be
-                silently dropped.
+                silently dropped. <vlan_subdir> is empty when referring
+		to the untagged lan.
 
 What:           /sys/class/net/<mesh_iface>/mesh/bonding
 Date:           June 2010
-Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Contact:        Simon Wunderlich <sw@simonwunderlich.de>
 Description:
                 Indicates whether the data traffic going through the
                 mesh will be sent using multiple interfaces at the
@@ -24,7 +25,7 @@
 
 What:           /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
 Date:           November 2011
-Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Contact:        Simon Wunderlich <sw@simonwunderlich.de>
 Description:
                 Indicates whether the bridge loop avoidance feature
                 is enabled. This feature detects and avoids loops
@@ -41,21 +42,21 @@
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_bandwidth
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the bandwidth which is propagated by this
                 node if gw_mode was set to 'server'.
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_mode
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the state of the gateway features. Can be
                 either 'off', 'client' or 'server'.
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_sel_class
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the selection criteria this node will use
                 to choose a gateway if gw_mode was set to 'client'.
@@ -77,25 +78,14 @@
 
 What:           /sys/class/net/<mesh_iface>/mesh/orig_interval
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the interval in milliseconds in which batman
                 sends its protocol messages.
 
 What:           /sys/class/net/<mesh_iface>/mesh/routing_algo
 Date:           Dec 2011
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the routing procotol this mesh instance
                 uses to find the optimal paths through the mesh.
-
-What:           /sys/class/net/<mesh_iface>/mesh/vis_mode
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                Each batman node only maintains information about its
-                own local neighborhood, therefore generating graphs
-                showing the topology of the entire mesh is not easily
-                feasible without having a central instance to collect
-                the local topologies from all nodes. This file allows
-                to activate the collecting (server) mode.
diff --git a/Documentation/ABI/testing/sysfs-class-powercap b/Documentation/ABI/testing/sysfs-class-powercap
new file mode 100644
index 0000000..db3b3ff
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-powercap
@@ -0,0 +1,152 @@
+What:		/sys/class/powercap/
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		The powercap/ class sub directory belongs to the power cap
+		subsystem. Refer to
+		Documentation/power/powercap/powercap.txt for details.
+
+What:		/sys/class/powercap/<control type>
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		A <control type> is a unique name under /sys/class/powercap.
+		Here <control type> determines how the power is going to be
+		controlled. A <control type> can contain multiple power zones.
+
+What:		/sys/class/powercap/<control type>/enabled
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		This allows to enable/disable power capping for a "control type".
+		This status affects every power zone using this "control_type.
+
+What:		/sys/class/powercap/<control type>/<power zone>
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		A power zone is a single or a collection of devices, which can
+		be independently monitored and controlled. A power zone sysfs
+		entry is qualified with the name of the <control type>.
+		E.g. intel-rapl:0:1:1.
+
+What:		/sys/class/powercap/<control type>/<power zone>/<child power zone>
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Power zones may be organized in a hierarchy in which child
+		power zones provide monitoring and control for a subset of
+		devices under the parent. For example, if there is a parent
+		power zone for a whole CPU package, each CPU core in it can
+		be a child power zone.
+
+What:		/sys/class/powercap/.../<power zone>/name
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Specifies the name of this power zone.
+
+What:		/sys/class/powercap/.../<power zone>/energy_uj
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Current energy counter in micro-joules. Write "0" to reset.
+		If the counter can not be reset, then this attribute is
+		read-only.
+
+What:		/sys/class/powercap/.../<power zone>/max_energy_range_uj
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Range of the above energy counter in micro-joules.
+
+
+What:		/sys/class/powercap/.../<power zone>/power_uw
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Current power in micro-watts.
+
+What:		/sys/class/powercap/.../<power zone>/max_power_range_uw
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Range of the above power value in micro-watts.
+
+What:		/sys/class/powercap/.../<power zone>/constraint_X_name
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Each power zone can define one or more constraints. Each
+		constraint can have an optional name. Here "X" can have values
+		from 0 to max integer.
+
+What:		/sys/class/powercap/.../<power zone>/constraint_X_power_limit_uw
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Power limit in micro-watts should be applicable for
+		the time window specified by "constraint_X_time_window_us".
+		Here "X" can have values from 0 to max integer.
+
+What:		/sys/class/powercap/.../<power zone>/constraint_X_time_window_us
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Time window in micro seconds. This is used along with
+		constraint_X_power_limit_uw to define a power constraint.
+		Here "X" can have values from 0 to max integer.
+
+
+What:		/sys/class/powercap/<control type>/.../constraint_X_max_power_uw
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Maximum allowed power in micro watts for this constraint.
+		Here "X" can have values from 0 to max integer.
+
+What:		/sys/class/powercap/<control type>/.../constraint_X_min_power_uw
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Minimum allowed power in micro watts for this constraint.
+		Here "X" can have values from 0 to max integer.
+
+What:		/sys/class/powercap/.../<power zone>/constraint_X_max_time_window_us
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Maximum allowed time window in micro seconds for this
+		constraint. Here "X" can have values from 0 to max integer.
+
+What:		/sys/class/powercap/.../<power zone>/constraint_X_min_time_window_us
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description:
+		Minimum allowed time window in micro seconds for this
+		constraint. Here "X" can have values from 0 to max integer.
+
+What:		/sys/class/powercap/.../<power zone>/enabled
+Date:		September 2013
+KernelVersion:	3.13
+Contact:	linux-pm@vger.kernel.org
+Description
+		This allows to enable/disable power capping at power zone level.
+		This applies to current power zone and its children.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-ryos b/Documentation/ABI/testing/sysfs-driver-hid-roccat-ryos
new file mode 100644
index 0000000..1d6a8cf
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-ryos
@@ -0,0 +1,178 @@
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/control
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one select which data from which
+		profile will be	read next. The data has to be 3 bytes long.
+		This file is writeonly.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/profile
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse can store 5 profiles which can be switched by the
+		press of a button. profile holds index of actual profile.
+		This value is persistent, so its value determines the profile
+		that's active when the device is powered on next time.
+		When written, the device activates the set profile immediately.
+		The data has to be 3 bytes long.
+		The device will reject invalid data.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_primary
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set the default of all keys for
+		a specific profile. Profile index is included in written data.
+		The data has to be 125 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_function
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set the function of the
+		function keys for a specific profile. Profile index is included
+		in written data. The data has to be 95 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_macro
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set the function of the macro
+		keys for a specific profile. Profile index is included in
+		written data. The data has to be 35 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_thumbster
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set the function of the
+		thumbster keys for a specific profile. Profile index is included
+		in written data. The data has to be 23 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_extra
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set the function of the
+		capslock and function keys for a specific profile. Profile index
+		is included in written data. The data has to be 8 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_easyzone
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set the function of the
+		easyzone keys for a specific profile. Profile index is included
+		in written data. The data has to be 294 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/key_mask
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one deactivate certain keys like
+		windows and application keys, to prevent accidental presses.
+		Profile index for which this settings occur is included in
+		written data. The data has to be 6 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/light
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set the backlight intensity for
+		a specific profile. Profile index is included in written data.
+		This attribute is only valid for the glow and pro variant.
+		The data has to be 16 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/macro
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one store macros with max 480
+		keystrokes for a specific button for a specific profile.
+		Button and profile indexes are included in written data.
+		The data has to be 2002 bytes long.
+		Before reading this file, control has to be written to select
+		which profile and key to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/info
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When read, this file returns general data like firmware version.
+		The data is 8 bytes long.
+		This file is readonly.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/reset
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one reset the device.
+		The data has to be 3 bytes long.
+		This file is writeonly.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/talk
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one trigger easyshift functionality
+		from the host.
+		The data has to be 16 bytes long.
+		This file is writeonly.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/light_control
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one switch between stored and custom
+		light settings.
+		This attribute is only valid for the pro variant.
+		The data has to be 8 bytes long.
+		This file is writeonly.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/stored_lights
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set per-key lighting for different
+		layers.
+		This attribute is only valid for the pro variant.
+		The data has to be 1382 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/custom_lights
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set the actual per-key lighting.
+		This attribute is only valid for the pro variant.
+		The data has to be 20 bytes long.
+		This file is writeonly.
+Users:		http://roccat.sourceforge.net
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/light_macro
+Date:		October 2013
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written, this file lets one set a light macro that is looped
+		whenever the device gets in dimness mode.
+		This attribute is only valid for the pro variant.
+		The data has to be 2002 bytes long.
+		Before reading this file, control has to be written to select
+		which profile to read.
+Users:		http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-wiimote b/Documentation/ABI/testing/sysfs-driver-hid-wiimote
index ed5dd56..39dfa5c 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-wiimote
+++ b/Documentation/ABI/testing/sysfs-driver-hid-wiimote
@@ -57,3 +57,21 @@
 		Calibration data is already applied by the kernel to all input
 		values but may be used by user-space to perform other
 		transformations.
+
+What:		/sys/bus/hid/drivers/wiimote/<dev>/pro_calib
+Date:		October 2013
+KernelVersion:	3.13
+Contact:	David Herrmann <dh.herrmann@gmail.com>
+Description:	This attribute is only provided if the device was detected as a
+		pro-controller. It provides a single line with 4 calibration
+		values for all 4 analog sticks. Format is: "x1:y1 x2:y2". Data
+		is prefixed with a +/-. Each value is a signed 16bit number.
+		Data is encoded as decimal numbers and specifies the offsets of
+		the analog sticks of the pro-controller.
+		Calibration data is already applied by the kernel to all input
+		values but may be used by user-space to perform other
+		transformations.
+		Calibration data is detected by the kernel during device setup.
+		You can write "scan\n" into this file to re-trigger calibration.
+		You can also write data directly in the form "x1:y1 x2:y2" to
+		set the calibration values manually.
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 14129f1..5e98303 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -101,14 +101,23 @@
 because this shows that you did think about these issues wrt. your
 device.
 
-The query is performed via a call to dma_set_mask():
+The query is performed via a call to dma_set_mask_and_coherent():
 
-	int dma_set_mask(struct device *dev, u64 mask);
+	int dma_set_mask_and_coherent(struct device *dev, u64 mask);
 
-The query for consistent allocations is performed via a call to
-dma_set_coherent_mask():
+which will query the mask for both streaming and coherent APIs together.
+If you have some special requirements, then the following two separate
+queries can be used instead:
 
-	int dma_set_coherent_mask(struct device *dev, u64 mask);
+	The query for streaming mappings is performed via a call to
+	dma_set_mask():
+
+		int dma_set_mask(struct device *dev, u64 mask);
+
+	The query for consistent allocations is performed via a call
+	to dma_set_coherent_mask():
+
+		int dma_set_coherent_mask(struct device *dev, u64 mask);
 
 Here, dev is a pointer to the device struct of your device, and mask
 is a bit mask describing which bits of an address your device
@@ -137,7 +146,7 @@
 
 The standard 32-bit addressing device would do something like this:
 
-	if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
+	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
 		printk(KERN_WARNING
 		       "mydev: No suitable DMA available.\n");
 		goto ignore_this_device;
@@ -171,22 +180,20 @@
 
 	int using_dac, consistent_using_dac;
 
-	if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
+	if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
 		using_dac = 1;
 	   	consistent_using_dac = 1;
-		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
-	} else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
+	} else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
 		using_dac = 0;
 		consistent_using_dac = 0;
-		dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
 	} else {
 		printk(KERN_WARNING
 		       "mydev: No suitable DMA available.\n");
 		goto ignore_this_device;
 	}
 
-dma_set_coherent_mask() will always be able to set the same or a
-smaller mask as dma_set_mask(). However for the rare case that a
+The coherent coherent mask will always be able to set the same or a
+smaller mask as the streaming mask. However for the rare case that a
 device driver only uses consistent allocations, one would have to
 check the return value from dma_set_coherent_mask().
 
@@ -199,9 +206,9 @@
 		goto ignore_this_device;
 	}
 
-When dma_set_mask() is successful, and returns zero, the kernel saves
-away this mask you have provided.  The kernel will use this
-information later when you make DMA mappings.
+When dma_set_mask() or dma_set_mask_and_coherent() is successful, and
+returns zero, the kernel saves away this mask you have provided.  The
+kernel will use this information later when you make DMA mappings.
 
 There is a case which we are aware of at this time, which is worth
 mentioning in this documentation.  If your device supports multiple
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 78a6c56..e865279 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -142,6 +142,14 @@
 driver writers.
 
 int
+dma_set_mask_and_coherent(struct device *dev, u64 mask)
+
+Checks to see if the mask is possible and updates the device
+streaming and coherent DMA mask parameters if it is.
+
+Returns: 0 if successful and a negative error if not.
+
+int
 dma_set_mask(struct device *dev, u64 mask)
 
 Checks to see if the mask is possible and updates the device
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index e59480d..cc2450d 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -13,7 +13,7 @@
 strictly order DMA from a device across all intervening busses and
 bridges.  This barrier is not specific to a particular type of
 interconnect, it applies to the system as a whole, and so its
-implementation must account for the idiosyncracies of the system all
+implementation must account for the idiosyncrasies of the system all
 the way from the DMA device to memory.
 
 As an example of a situation where DMA_ATTR_WRITE_BARRIER would be
@@ -60,7 +60,7 @@
 Buffers allocated with this attribute can be only passed to user space
 by calling dma_mmap_attrs(). By using this API, you are guaranteeing
 that you won't dereference the pointer returned by dma_alloc_attr(). You
-can threat it as a cookie that must be passed to dma_mmap_attrs() and
+can treat it as a cookie that must be passed to dma_mmap_attrs() and
 dma_free_attrs(). Make sure that both of these also get this attribute
 set on each call.
 
@@ -82,7 +82,7 @@
 (usually it means that the cache has been flushed or invalidated
 depending on the dma direction). However, next calls to
 dma_map_{single,page,sg}() for other devices will perform exactly the
-same sychronization operation on the CPU cache. CPU cache sychronization
+same synchronization operation on the CPU cache. CPU cache synchronization
 might be a time consuming operation, especially if the buffers are
 large, so it is highly recommended to avoid it if possible.
 DMA_ATTR_SKIP_CPU_SYNC allows platform code to skip synchronization of
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index f403ec3..46ad6fa 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -152,8 +152,8 @@
 !Finclude/net/cfg80211.h cfg80211_scan_request
 !Finclude/net/cfg80211.h cfg80211_scan_done
 !Finclude/net/cfg80211.h cfg80211_bss
-!Finclude/net/cfg80211.h cfg80211_inform_bss_frame
-!Finclude/net/cfg80211.h cfg80211_inform_bss
+!Finclude/net/cfg80211.h cfg80211_inform_bss_width_frame
+!Finclude/net/cfg80211.h cfg80211_inform_bss_width
 !Finclude/net/cfg80211.h cfg80211_unlink_bss
 !Finclude/net/cfg80211.h cfg80211_find_ie
 !Finclude/net/cfg80211.h ieee80211_bss_get_ie
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 09e884e..19f2a5a 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1958,7 +1958,7 @@
   <chapter id="apiref-mutex">
    <title>Mutex API reference</title>
 !Iinclude/linux/mutex.h
-!Ekernel/mutex.c
+!Ekernel/locking/mutex.c
   </chapter>
 
   <chapter id="apiref-futex">
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index a248f42..cd11926 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -1222,8 +1222,6 @@
 #define NAND_BBT_VERSION	0x00000100
 /* Create a bbt if none axists */
 #define NAND_BBT_CREATE		0x00000200
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES	0x00000400
 /* Write bbt if neccecary */
 #define NAND_BBT_WRITE		0x00001000
 /* Read and write back block contents when writing bbt */
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index bccf602..6f45856 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -525,8 +525,9 @@
 6. Other interesting functions
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-pci_find_slot()			Find pci_dev corresponding to given bus and
-				slot numbers.
+pci_get_domain_bus_and_slot()	Find pci_dev corresponding to given domain,
+				bus and slot and number. If the device is
+				found, its reference count is increased.
 pci_set_power_state()		Set PCI Power Management state (0=D0 ... 3=D3)
 pci_find_capability()		Find specified capability in device's capability
 				list.
@@ -582,7 +583,8 @@
 
 pci_find_device()	Superseded by pci_get_device()
 pci_find_subsys()	Superseded by pci_get_subsys()
-pci_find_slot()		Superseded by pci_get_slot()
+pci_find_slot()		Superseded by pci_get_domain_bus_and_slot()
+pci_get_slot()		Superseded by pci_get_domain_bus_and_slot()
 
 
 The alternative is the traditional PCI device driver that walks PCI
diff --git a/Documentation/backlight/lp855x-driver.txt b/Documentation/backlight/lp855x-driver.txt
index 1c732f0..01bce24 100644
--- a/Documentation/backlight/lp855x-driver.txt
+++ b/Documentation/backlight/lp855x-driver.txt
@@ -4,7 +4,8 @@
 Backlight driver for LP855x ICs
 
 Supported chips:
-	Texas Instruments LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
+	Texas Instruments LP8550, LP8551, LP8552, LP8553, LP8555, LP8556 and
+	LP8557
 
 Author: Milo(Woogyom) Kim <milo.kim@ti.com>
 
@@ -24,7 +25,7 @@
 
 2) chip_id
 The lp855x chip id.
-Value : lp8550/lp8551/lp8552/lp8553/lp8556/lp8557
+Value : lp8550/lp8551/lp8552/lp8553/lp8555/lp8556/lp8557
 
 Platform data for lp855x
 ------------------------
diff --git a/Documentation/blockdev/floppy.txt b/Documentation/blockdev/floppy.txt
index 470fe4b..e2240f5 100644
--- a/Documentation/blockdev/floppy.txt
+++ b/Documentation/blockdev/floppy.txt
@@ -39,15 +39,15 @@
 ============================
 
  If you use the floppy driver as a module, use the following syntax:
-modprobe floppy <options>
+modprobe floppy floppy="<options>"
 
 Example:
- modprobe floppy omnibook messages
+ modprobe floppy floppy="omnibook messages"
 
  If you need certain options enabled every time you load the floppy driver,
 you can put:
 
- options floppy omnibook messages
+ options floppy floppy="omnibook messages"
 
 in a configuration file in /etc/modprobe.d/.
 
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 8af4ad1..e2bc132 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -573,15 +573,19 @@
 node.  One of the use cases is evaluating application performance by
 combining this information with the application's CPU allocation.
 
-We export "total", "file", "anon" and "unevictable" pages per-node for
-each memcg.  The ouput format of memory.numa_stat is:
+Each memcg's numa_stat file includes "total", "file", "anon" and "unevictable"
+per-node page counts including "hierarchical_<counter>" which sums up all
+hierarchical children's values in addition to the memcg's own value.
+
+The ouput format of memory.numa_stat is:
 
 total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ...
 file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ...
 anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
 unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
+hierarchical_<counter>=<counter pages> N0=<node 0 pages> N1=<node 1 pages> ...
 
-And we have total = file + anon + unevictable.
+The "total" count is sum of file + anon + unevictable.
 
 6. Hierarchy support
 
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index 40282e6..8b1a445 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -23,8 +23,8 @@
 1.1  Initialization
 1.2  Per-CPU Initialization
 1.3  verify
-1.4  target or setpolicy?
-1.5  target
+1.4  target/target_index or setpolicy?
+1.5  target/target_index
 1.6  setpolicy
 2.   Frequency Table Helpers
 
@@ -56,7 +56,8 @@
 cpufreq_driver.verify -		A pointer to a "verification" function.
 
 cpufreq_driver.setpolicy _or_ 
-cpufreq_driver.target -		See below on the differences.
+cpufreq_driver.target/
+target_index		-	See below on the differences.
 
 And optionally
 
@@ -66,7 +67,7 @@
 				which is called with interrupts disabled
 				and _before_ the pre-suspend frequency
 				and/or policy is restored by a call to
-				->target or ->setpolicy.
+				->target/target_index or ->setpolicy.
 
 cpufreq_driver.attr -		A pointer to a NULL-terminated list of
 				"struct freq_attr" which allow to
@@ -103,8 +104,8 @@
 				this CPU. A few moments later,
 				cpufreq_driver.verify and either
 				cpufreq_driver.setpolicy or
-				cpufreq_driver.target is called with
-				these values.
+				cpufreq_driver.target/target_index is called
+				with these values.
 
 For setting some of these values (cpuinfo.min[max]_freq, policy->min[max]), the
 frequency table helpers might be helpful. See the section 2 for more information
@@ -133,20 +134,28 @@
 policy->max first, and only if this is no solution, decrease policy->min.
 
 
-1.4 target or setpolicy?
+1.4 target/target_index or setpolicy?
 ----------------------------
 
 Most cpufreq drivers or even most cpu frequency scaling algorithms 
 only allow the CPU to be set to one frequency. For these, you use the
-->target call.
+->target/target_index call.
 
 Some cpufreq-capable processors switch the frequency between certain
 limits on their own. These shall use the ->setpolicy call
 
 
-1.4. target
+1.4. target/target_index
 -------------
 
+The target_index call has two arguments: struct cpufreq_policy *policy,
+and unsigned int index (into the exposed frequency table).
+
+The CPUfreq driver must set the new frequency when called here. The
+actual frequency must be determined by freq_table[index].frequency.
+
+Deprecated:
+----------
 The target call has three arguments: struct cpufreq_policy *policy,
 unsigned int target_frequency, unsigned int relation.
 
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 219970b..77ec215 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -40,7 +40,7 @@
 cpu frequency scaling algorithms only offer the CPU to be set to one
 frequency. In order to offer dynamic frequency scaling, the cpufreq
 core must be able to tell these drivers of a "target frequency". So
-these specific drivers will be transformed to offer a "->target"
+these specific drivers will be transformed to offer a "->target/target_index"
 call instead of the existing "->setpolicy" call. For "longrun", all
 stays the same, though.
 
@@ -71,7 +71,7 @@
 		    /			       the limits of policy->{min,max}
 		   /			            \
 		  /				     \
-	Using the ->setpolicy call,		 Using the ->target call,
+	Using the ->setpolicy call,		 Using the ->target/target_index call,
 	    the limits and the			  the frequency closest
 	     "policy" is set.			  to target_freq is set.
 						  It is assured that it
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 786dc82..8cb9938 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -5,7 +5,7 @@
 			Rusty Russell <rusty@rustcorp.com.au>
 			Srivatsa Vaddagiri <vatsa@in.ibm.com>
 		i386:
-			Zwane Mwaikambo <zwane@arm.linux.org.uk>
+			Zwane Mwaikambo <zwanem@gmail.com>
 		ppc64:
 			Nathan Lynch <nathanl@austin.ibm.com>
 			Joel Schopp <jschopp@austin.ibm.com>
diff --git a/Documentation/cpuidle/governor.txt b/Documentation/cpuidle/governor.txt
index 12c6bd5..d9020f5 100644
--- a/Documentation/cpuidle/governor.txt
+++ b/Documentation/cpuidle/governor.txt
@@ -25,5 +25,4 @@
 
 Interfaces:
 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
 struct cpuidle_governor
diff --git a/Documentation/device-mapper/cache-policies.txt b/Documentation/device-mapper/cache-policies.txt
index d7c440b..df52a84 100644
--- a/Documentation/device-mapper/cache-policies.txt
+++ b/Documentation/device-mapper/cache-policies.txt
@@ -30,8 +30,10 @@
 
 This policy is the default.
 
-The multiqueue policy has two sets of 16 queues: one set for entries
-waiting for the cache and another one for those in the cache.
+The multiqueue policy has three sets of 16 queues: one set for entries
+waiting for the cache and another two for those in the cache (a set for
+clean entries and a set for dirty entries).
+
 Cache entries in the queues are aged based on logical time. Entry into
 the cache is based on variable thresholds and queue selection is based
 on hit count on entry. The policy aims to take different cache miss
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index 33d45ee..274752f 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -68,10 +68,11 @@
 block sizes are bad because they increase the amount of metadata (both
 in core and on disk).
 
-Writeback/writethrough
-----------------------
+Cache operating modes
+---------------------
 
-The cache has two modes, writeback and writethrough.
+The cache has three operating modes: writeback, writethrough and
+passthrough.
 
 If writeback, the default, is selected then a write to a block that is
 cached will go only to the cache and the block will be marked dirty in
@@ -81,8 +82,31 @@
 complete until it has hit both the origin and cache devices.  Clean
 blocks should remain clean.
 
+If passthrough is selected, useful when the cache contents are not known
+to be coherent with the origin device, then all reads are served from
+the origin device (all reads miss the cache) and all writes are
+forwarded to the origin device; additionally, write hits cause cache
+block invalidates.  To enable passthrough mode the cache must be clean.
+Passthrough mode allows a cache device to be activated without having to
+worry about coherency.  Coherency that exists is maintained, although
+the cache will gradually cool as writes take place.  If the coherency of
+the cache can later be verified, or established through use of the
+"invalidate_cblocks" message, the cache device can be transitioned to
+writethrough or writeback mode while still warm.  Otherwise, the cache
+contents can be discarded prior to transitioning to the desired
+operating mode.
+
 A simple cleaner policy is provided, which will clean (write back) all
-dirty blocks in a cache.  Useful for decommissioning a cache.
+dirty blocks in a cache.  Useful for decommissioning a cache or when
+shrinking a cache.  Shrinking the cache's fast device requires all cache
+blocks, in the area of the cache being removed, to be clean.  If the
+area being removed from the cache still contains dirty blocks the resize
+will fail.  Care must be taken to never reduce the volume used for the
+cache's fast device until the cache is clean.  This is of particular
+importance if writeback mode is used.  Writethrough and passthrough
+modes already maintain a clean cache.  Future support to partially clean
+the cache, above a specified threshold, will allow for keeping the cache
+warm and in writeback mode during resize.
 
 Migration throttling
 --------------------
@@ -161,7 +185,7 @@
  block size      : cache unit size in sectors
 
  #feature args   : number of feature arguments passed
- feature args    : writethrough.  (The default is writeback.)
+ feature args    : writethrough or passthrough (The default is writeback.)
 
  policy          : the replacement policy to use
  #policy args    : an even number of arguments corresponding to
@@ -177,6 +201,13 @@
 		   back cache block contents later for performance reasons,
 		   so they may differ from the corresponding origin blocks.
 
+   passthrough	 : a degraded mode useful for various cache coherency
+		   situations (e.g., rolling back snapshots of
+		   underlying storage).	 Reads and writes always go to
+		   the origin.	If a write goes to a cached origin
+		   block, then the cache block is invalidated.
+		   To enable passthrough mode the cache must be clean.
+
 A policy called 'default' is always registered.  This is an alias for
 the policy we currently think is giving best all round performance.
 
@@ -231,12 +262,26 @@
 E.g.
    dmsetup message my_cache 0 sequential_threshold 1024
 
+
+Invalidation is removing an entry from the cache without writing it
+back.  Cache blocks can be invalidated via the invalidate_cblocks
+message, which takes an arbitrary number of cblock ranges.  Each cblock
+must be expressed as a decimal value, in the future a variant message
+that takes cblock ranges expressed in hexidecimal may be needed to
+better support efficient invalidation of larger caches.  The cache must
+be in passthrough mode when invalidate_cblocks is used.
+
+   invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]*
+
+E.g.
+   dmsetup message my_cache 0 invalidate_cblocks 2345 3456-4567 5678-6789
+
 Examples
 ========
 
 The test suite can be found here:
 
-https://github.com/jthornber/thinp-test-suite
+https://github.com/jthornber/device-mapper-test-suite
 
 dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
 	/dev/mapper/ssd /dev/mapper/origin 512 1 writeback default 0'
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 2c656ae..c81839b 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -4,12 +4,15 @@
 Device-Mapper's "crypt" target provides transparent encryption of block devices
 using the kernel crypto API.
 
+For a more detailed description of supported parameters see:
+http://code.google.com/p/cryptsetup/wiki/DMCrypt
+
 Parameters: <cipher> <key> <iv_offset> <device path> \
 	      <offset> [<#opt_params> <opt_params>]
 
 <cipher>
     Encryption cipher and an optional IV generation mode.
-    (In format cipher[:keycount]-chainmode-ivopts:ivmode).
+    (In format cipher[:keycount]-chainmode-ivmode[:ivopts]).
     Examples:
        des
        aes-cbc-essiv:sha256
@@ -19,7 +22,11 @@
 
 <key>
     Key used for encryption. It is encoded as a hexadecimal number.
-    You can only use key sizes that are valid for the selected cipher.
+    You can only use key sizes that are valid for the selected cipher
+    in combination with the selected iv mode.
+    Note that for some iv modes the key string can contain additional
+    keys (for example IV seed) so the key contains more parts concatenated
+    into a single string.
 
 <keycount>
     Multi-key compatibility mode. You can define <keycount> keys and
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 23721d3..80b7241 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -414,6 +414,7 @@
 		200 = /dev/net/tun	TAP/TUN network device
 		201 = /dev/button/gulpb	Transmeta GULP-B buttons
 		202 = /dev/emd/ctl	Enhanced Metadisk RAID (EMD) control
+		203 = /dev/cuse		Cuse (character device in user-space)
 		204 = /dev/video/em8300		EM8300 DVD decoder control
 		205 = /dev/video/em8300_mv	EM8300 DVD decoder video
 		206 = /dev/video/em8300_ma	EM8300 DVD decoder audio
diff --git a/Documentation/devicetree/bindings/hwmon/lm90.txt b/Documentation/devicetree/bindings/hwmon/lm90.txt
new file mode 100644
index 0000000..e863248
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/lm90.txt
@@ -0,0 +1,44 @@
+* LM90 series thermometer.
+
+Required node properties:
+- compatible: manufacturer and chip name, one of
+		"adi,adm1032"
+		"adi,adt7461"
+		"adi,adt7461a"
+		"gmt,g781"
+		"national,lm90"
+		"national,lm86"
+		"national,lm89"
+		"national,lm99"
+		"dallas,max6646"
+		"dallas,max6647"
+		"dallas,max6649"
+		"dallas,max6657"
+		"dallas,max6658"
+		"dallas,max6659"
+		"dallas,max6680"
+		"dallas,max6681"
+		"dallas,max6695"
+		"dallas,max6696"
+		"onnn,nct1008"
+		"winbond,w83l771"
+		"nxp,sa56004"
+
+- reg: I2C bus address of the device
+
+- vcc-supply: vcc regulator for the supply voltage.
+
+Optional properties:
+- interrupts: Contains a single interrupt specifier which describes the
+              LM90 "-ALERT" pin output.
+              See interrupt-controller/interrupts.txt for the format.
+
+Example LM90 node:
+
+temp-sensor {
+	compatible = "onnn,nct1008";
+	reg = <0x4c>;
+	vcc-supply = <&palmas_ldo6_reg>;
+	interrupt-parent = <&gpio>;
+	interrupts = <TEGRA_GPIO(O, 4) IRQ_TYPE_LEVEL_LOW>;
+}
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt b/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
index 491c97b..878549b 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
@@ -6,7 +6,7 @@
 	ti,wires: Wires refer to application modes i.e. 4/5/8 wire touchscreen
 		  support on the platform.
 	ti,x-plate-resistance: X plate resistance
-	ti,coordiante-readouts: The sequencer supports a total of 16
+	ti,coordinate-readouts: The sequencer supports a total of 16
 				programmable steps each step is used to
 				read a single coordinate. A single
                                 readout is enough but multiple reads can
diff --git a/Documentation/devicetree/bindings/mfd/as3722.txt b/Documentation/devicetree/bindings/mfd/as3722.txt
new file mode 100644
index 0000000..fc2191e
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/as3722.txt
@@ -0,0 +1,194 @@
+* ams AS3722 Power management IC.
+
+Required properties:
+-------------------
+- compatible: Must be "ams,as3722".
+- reg: I2C device address.
+- interrupt-controller: AS3722 has internal interrupt controller which takes the
+  interrupt request from internal sub-blocks like RTC, regulators, GPIOs as well
+  as external input.
+- #interrupt-cells: Should be set to 2 for IRQ number and flags.
+  The first cell is the IRQ number. IRQ numbers for different interrupt source
+  of AS3722 are defined at dt-bindings/mfd/as3722.h
+  The second cell is the flags, encoded as the trigger masks from binding document
+	interrupts.txt, using dt-bindings/irq.
+
+Optional submodule and their properties:
+=======================================
+
+Pinmux and GPIO:
+===============
+Device has 8 GPIO pins which can be configured as GPIO as well as the special IO
+functions.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Following are properties which is needed if GPIO and pinmux functionality
+is required:
+    Required properties:
+    -------------------
+	- gpio-controller: Marks the device node as a GPIO controller.
+	- #gpio-cells: Number of GPIO cells. Refer to binding document
+			gpio/gpio.txt
+
+    Optional properties:
+    --------------------
+	Following properties are require if pin control setting is required
+	at boot.
+	- pinctrl-names: A pinctrl state named "default" be defined, using the
+		bindings in pinctrl/pinctrl-binding.txt.
+	- pinctrl[0...n]: Properties to contain the phandle that refer to
+		different nodes of pin control settings. These nodes represents
+		the pin control setting of state 0 to state n. Each of these
+		nodes contains different subnodes to represents some desired
+		configuration for a list of pins. This configuration can
+		include the mux function to select on those pin(s), and
+		various pin configuration parameters, such as pull-up,
+		open drain.
+
+		Each subnode have following properties:
+		Required properties:
+		    - pins: List of pins. Valid values of pins properties are:
+				gpio0, gpio1, gpio2, gpio3, gpio4, gpio5,
+				gpio6, gpio7
+
+		Optional properties:
+			function, bias-disable, bias-pull-up, bias-pull-down,
+			bias-high-impedance, drive-open-drain.
+
+			Valid values for function properties are:
+				gpio, interrupt-out, gpio-in-interrupt,
+				vsup-vbat-low-undebounce-out,
+				vsup-vbat-low-debounce-out,
+				voltage-in-standby, oc-pg-sd0, oc-pg-sd6,
+				powergood-out, pwm-in, pwm-out, clk32k-out,
+				watchdog-in, soft-reset-in
+
+Regulators:
+===========
+Device has multiple DCDC and LDOs. The node "regulators" is require if regulator
+functionality is needed.
+
+Following are properties of regulator subnode.
+
+    Optional properties:
+    -------------------
+	The input supply of regulators are the optional properties on the
+	regulator node. The input supply of these regulators are provided
+	through following properties:
+		vsup-sd2-supply: Input supply for SD2.
+		vsup-sd3-supply: Input supply for SD3.
+		vsup-sd4-supply: Input supply for SD4.
+		vsup-sd5-supply: Input supply for SD5.
+		vin-ldo0-supply: Input supply for LDO0.
+		vin-ldo1-6-supply: Input supply for LDO1 and LDO6.
+		vin-ldo2-5-7-supply: Input supply for LDO2, LDO5 and LDO7.
+		vin-ldo3-4-supply: Input supply for LDO3 and LDO4.
+		vin-ldo9-10-supply: Input supply for LDO9 and LDO10.
+		vin-ldo11-supply: Input supply for LDO11.
+
+    Optional sub nodes for regulators:
+    ---------------------------------
+	The subnodes name is the name of regulator and it must be one of:
+	sd[0-6], ldo[0-7], ldo[9-11]
+
+	Each sub-node should contain the constraints and initialization
+	information for that regulator. See regulator.txt for a description
+	of standard properties for these sub-nodes.
+	Additional optional custom properties  are listed below.
+		ams,ext-control: External control of the rail. The option of
+			this properties will tell which external input is
+			controlling this rail. Valid values are 0, 1, 2 ad 3.
+			0: There is no external control of this rail.
+			1: Rail is controlled by ENABLE1 input pin.
+			2: Rail is controlled by ENABLE2 input pin.
+			3: Rail is controlled by ENABLE3 input pin.
+			Missing this property on DT will be assume as no
+			external control. The external control pin macros
+			are defined @dt-bindings/mfd/as3722.h
+
+		ams,enable-tracking: Enable tracking with SD1, only supported
+			by LDO3.
+
+Example:
+--------
+#include <dt-bindings/mfd/as3722.h>
+...
+ams3722 {
+	compatible = "ams,as3722";
+	reg = <0x48>;
+
+	interrupt-parent = <&intc>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+
+	gpio-controller;
+	#gpio-cells = <2>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&as3722_default>;
+
+	as3722_default: pinmux {
+			gpio0 {
+				pins = "gpio0";
+				function = "gpio";
+				bias-pull-down;
+			};
+
+			gpio1_2_4_7 {
+				pins = "gpio1", "gpio2", "gpio4", "gpio7";
+				function = "gpio";
+				bias-pull-up;
+			};
+
+			gpio5 {
+				pins = "gpio5";
+				function = "clk32k_out";
+			};
+	}
+
+	regulators {
+			vsup-sd2-supply = <...>;
+			...
+
+			sd0 {
+				regulator-name = "vdd_cpu";
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1400000>;
+				regulator-always-on;
+				ams,ext-control = <2>;
+			};
+
+			sd1 {
+				regulator-name = "vdd_core";
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1400000>;
+				regulator-always-on;
+				ams,ext-control = <1>;
+			};
+
+			sd2 {
+				regulator-name = "vddio_ddr";
+				regulator-min-microvolt = <1350000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-always-on;
+			};
+
+			sd4 {
+				regulator-name = "avdd-hdmi-pex";
+				regulator-min-microvolt = <1050000>;
+				regulator-max-microvolt = <1050000>;
+				regulator-always-on;
+			};
+
+			sd5 {
+				regulator-name = "vdd-1v8";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+			....
+	};
+};
diff --git a/Documentation/devicetree/bindings/mfd/s2mps11.txt b/Documentation/devicetree/bindings/mfd/s2mps11.txt
index c9332c6..78a840d 100644
--- a/Documentation/devicetree/bindings/mfd/s2mps11.txt
+++ b/Documentation/devicetree/bindings/mfd/s2mps11.txt
@@ -1,10 +1,10 @@
 
 * Samsung S2MPS11 Voltage and Current Regulator
 
-The Samsung S2MP211 is a multi-function device which includes voltage and
+The Samsung S2MPS11 is a multi-function device which includes voltage and
 current regulators, RTC, charger controller and other sub-blocks. It is
-interfaced to the host controller using a I2C interface. Each sub-block is
-addressed by the host system using different I2C slave address.
+interfaced to the host controller using an I2C interface. Each sub-block is
+addressed by the host system using different I2C slave addresses.
 
 Required properties:
 - compatible: Should be "samsung,s2mps11-pmic".
@@ -43,7 +43,8 @@
 
  BUCK[2/3/4/6] supports disabling ramp delay on hardware, so explictly
  regulator-ramp-delay = <0> can be used for them to disable ramp delay.
- In absence of regulator-ramp-delay property, default ramp delay will be used.
+ In the absence of the regulator-ramp-delay property, the default ramp
+ delay will be used.
 
 NOTE: Some BUCKs share the ramp rate setting i.e. same ramp value will be set
 for a particular group of BUCKs. So provide same regulator-ramp-delay<value>.
@@ -58,10 +59,10 @@
 as per the datasheet of s2mps11.
 
 	- LDOn
-		  - valid values for n are 1 to 28
+		  - valid values for n are 1 to 38
 		  - Example: LDO0, LD01, LDO28
 	- BUCKn
-		  - valid values for n are 1 to 9.
+		  - valid values for n are 1 to 10.
 		  - Example: BUCK1, BUCK2, BUCK9
 
 Example:
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
index df338cb..5e1f31b 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
@@ -22,10 +22,10 @@
 				width of 8 is assumed.
 
  - ti,nand-ecc-opt:		A string setting the ECC layout to use. One of:
-
-		"sw"		Software method (default)
-		"hw"		Hardware method
-		"hw-romcode"	gpmc hamming mode method & romcode layout
+		"sw"		<deprecated> use "ham1" instead
+		"hw"		<deprecated> use "ham1" instead
+		"hw-romcode"	<deprecated> use "ham1" instead
+		"ham1"		1-bit Hamming ecc code
 		"bch4"		4-bit BCH ecc code
 		"bch8"		8-bit BCH ecc code
 
@@ -36,8 +36,12 @@
 		"prefetch-dma"		Prefetch enabled sDMA mode
 		"prefetch-irq"		Prefetch enabled irq mode
 
- - elm_id:	Specifies elm device node. This is required to support BCH
- 		error correction using ELM module.
+ - elm_id:	<deprecated> use "ti,elm-id" instead
+ - ti,elm-id:	Specifies phandle of the ELM devicetree node.
+		ELM is an on-chip hardware engine on TI SoC which is used for
+		locating ECC errors for BCHx algorithms. SoC devices which have
+		ELM hardware engines should specify this device node in .dtsi
+		Using ELM for ECC error correction frees some CPU cycles.
 
 For inline partiton table parsing (optional):
 
diff --git a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
new file mode 100644
index 0000000..7ff57a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
@@ -0,0 +1,28 @@
+TI CPSW Phy mode Selection Device Tree Bindings
+-----------------------------------------------
+
+Required properties:
+- compatible		: Should be "ti,am3352-cpsw-phy-sel"
+- reg			: physical base address and size of the cpsw
+			  registers map
+- reg-names		: names of the register map given in "reg" node
+
+Optional properties:
+-rmii-clock-ext		: If present, the driver will configure the RMII
+			  interface to external clock usage
+
+Examples:
+
+	phy_sel: cpsw-phy-sel@44e10650 {
+		compatible = "ti,am3352-cpsw-phy-sel";
+		reg= <0x44e10650 0x4>;
+		reg-names = "gmii-sel";
+	};
+
+(or)
+	phy_sel: cpsw-phy-sel@44e10650 {
+		compatible = "ti,am3352-cpsw-phy-sel";
+		reg= <0x44e10650 0x4>;
+		reg-names = "gmii-sel";
+		rmii-clock-ext;
+	};
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index e216af3..d5d26d4 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -3,7 +3,7 @@
 Required properties:
 - compatible: should contain "snps,dw-pcie" to identify the
 	core, plus an identifier for the specific instance, such
-	as "samsung,exynos5440-pcie".
+	as "samsung,exynos5440-pcie" or "fsl,imx6q-pcie".
 - reg: base addresses and lengths of the pcie controller,
 	the phy controller, additional register for the phy controller.
 - interrupts: interrupt values for level interrupt,
@@ -21,6 +21,11 @@
 - num-lanes: number of lanes to use
 - reset-gpio: gpio pin number of power good signal
 
+Optional properties for fsl,imx6q-pcie
+- power-on-gpio: gpio pin number of power-enable signal
+- wake-up-gpio: gpio pin number of incoming wakeup signal
+- disable-gpio: gpio pin number of outgoing rfkill/endpoint disable signal
+
 Example:
 
 SoC specific DT Entry:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-samsung.txt b/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
index d61fccd..5538de9 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
@@ -15,7 +15,7 @@
     samsung,s5pc100-pwm - for 32-bit timers present on S5PC100, S5PV210,
 			  Exynos4210 rev0 SoCs
     samsung,exynos4210-pwm - for 32-bit timers present on Exynos4210,
-                          Exynos4x12 and Exynos5250 SoCs
+                          Exynos4x12, Exynos5250 and Exynos5420 SoCs
 - reg: base address and size of register area
 - interrupts: list of timer interrupts (one interrupt per timer, starting at
   timer 0)
diff --git a/Documentation/devicetree/bindings/video/atmel,lcdc.txt b/Documentation/devicetree/bindings/video/atmel,lcdc.txt
new file mode 100644
index 0000000..1ec175e
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/atmel,lcdc.txt
@@ -0,0 +1,75 @@
+Atmel LCDC Framebuffer
+-----------------------------------------------------
+
+Required properties:
+- compatible :
+	"atmel,at91sam9261-lcdc" , 
+	"atmel,at91sam9263-lcdc" ,
+	"atmel,at91sam9g10-lcdc" ,
+	"atmel,at91sam9g45-lcdc" ,
+	"atmel,at91sam9g45es-lcdc" ,
+	"atmel,at91sam9rl-lcdc" ,
+	"atmel,at32ap-lcdc"
+- reg : Should contain 1 register ranges(address and length)
+- interrupts : framebuffer controller interrupt
+- display: a phandle pointing to the display node
+
+Required nodes:
+- display: a display node is required to initialize the lcd panel
+	This should be in the board dts.
+- default-mode: a videomode within the display with timing parameters
+	as specified below.
+
+Example:
+
+	fb0: fb@0x00500000 {
+		compatible = "atmel,at91sam9g45-lcdc";
+		reg = <0x00500000 0x1000>;
+		interrupts = <23 3 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_fb>;
+		display = <&display0>;
+		status = "okay";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+	};
+
+Atmel LCDC Display
+-----------------------------------------------------
+Required properties (as per of_videomode_helper):
+
+ - atmel,dmacon: dma controler configuration
+ - atmel,lcdcon2: lcd controler configuration
+ - atmel,guard-time: lcd guard time (Delay in frame periods)
+ - bits-per-pixel: lcd panel bit-depth.
+
+Optional properties (as per of_videomode_helper):
+ - atmel,lcdcon-backlight: enable backlight
+ - atmel,lcd-wiring-mode: lcd wiring mode "RGB" or "BRG"
+ - atmel,power-control-gpio: gpio to power on or off the LCD (as many as needed)
+
+Example:
+	display0: display {
+		bits-per-pixel = <32>;
+		atmel,lcdcon-backlight;
+		atmel,dmacon = <0x1>;
+		atmel,lcdcon2 = <0x80008002>;
+		atmel,guard-time = <9>;
+		atmel,lcd-wiring-mode = <1>;
+
+		display-timings {
+			native-mode = <&timing0>;
+			timing0: timing0 {
+				clock-frequency = <9000000>;
+				hactive = <480>;
+				vactive = <272>;
+				hback-porch = <1>;
+				hfront-porch = <1>;
+				vback-porch = <40>;
+				vfront-porch = <1>;
+				hsync-len = <45>;
+				vsync-len = <1>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/video/backlight/lp855x.txt b/Documentation/devicetree/bindings/video/backlight/lp855x.txt
index 1482103..96e83a56 100644
--- a/Documentation/devicetree/bindings/video/backlight/lp855x.txt
+++ b/Documentation/devicetree/bindings/video/backlight/lp855x.txt
@@ -2,7 +2,7 @@
 
 Required properties:
   - compatible: "ti,lp8550", "ti,lp8551", "ti,lp8552", "ti,lp8553",
-                "ti,lp8556", "ti,lp8557"
+                "ti,lp8555", "ti,lp8556", "ti,lp8557"
   - reg: I2C slave address (u8)
   - dev-ctrl: Value of DEVICE CONTROL register (u8). It depends on the device.
 
@@ -15,6 +15,33 @@
 
 Example:
 
+	/* LP8555 */
+	backlight@2c {
+		compatible = "ti,lp8555";
+		reg = <0x2c>;
+
+		dev-ctrl = /bits/ 8 <0x00>;
+		pwm-period = <10000>;
+
+		/* 4V OV, 4 output LED0 string enabled */
+		rom_14h {
+			rom-addr = /bits/ 8 <0x14>;
+			rom-val = /bits/ 8 <0xcf>;
+		};
+
+		/* Heavy smoothing, 24ms ramp time step */
+		rom_15h {
+			rom-addr = /bits/ 8 <0x15>;
+			rom-val = /bits/ 8 <0xc7>;
+		};
+
+		/* 4 output LED1 string enabled */
+		rom_19h {
+			rom-addr = /bits/ 8 <0x19>;
+			rom-val = /bits/ 8 <0x0f>;
+		};
+	};
+
 	/* LP8556 */
 	backlight@2c {
 		compatible = "ti,lp8556";
diff --git a/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt b/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt
index 1e4fc72..764db86 100644
--- a/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt
+++ b/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt
@@ -10,12 +10,16 @@
       last value in the array represents a 100% duty cycle (brightest).
   - default-brightness-level: the default brightness level (index into the
       array defined by the "brightness-levels" property)
+  - power-supply: regulator for supply voltage
 
 Optional properties:
   - pwm-names: a list of names for the PWM devices specified in the
                "pwms" property (see PWM binding[0])
+  - enable-gpios: contains a single GPIO specifier for the GPIO which enables
+                  and disables the backlight (see GPIO binding[1])
 
 [0]: Documentation/devicetree/bindings/pwm/pwm.txt
+[1]: Documentation/devicetree/bindings/gpio/gpio.txt
 
 Example:
 
@@ -25,4 +29,7 @@
 
 		brightness-levels = <0 4 8 16 32 64 128 255>;
 		default-brightness-level = <6>;
+
+		power-supply = <&vdd_bl_reg>;
+		enable-gpios = <&gpio 58 0>;
 	};
diff --git a/Documentation/filesystems/directory-locking b/Documentation/filesystems/directory-locking
index ff7b611..09bbf9a 100644
--- a/Documentation/filesystems/directory-locking
+++ b/Documentation/filesystems/directory-locking
@@ -2,6 +2,10 @@
 kinds of locks - per-inode (->i_mutex) and per-filesystem
 (->s_vfs_rename_mutex).
 
+	When taking the i_mutex on multiple non-directory objects, we
+always acquire the locks in order by increasing address.  We'll call
+that "inode pointer" order in the following.
+
 	For our purposes all operations fall in 5 classes:
 
 1) read access.  Locking rules: caller locks directory we are accessing.
@@ -12,8 +16,9 @@
 locks victim and calls the method.
 
 4) rename() that is _not_ cross-directory.  Locking rules: caller locks
-the parent, finds source and target, if target already exists - locks it
-and then calls the method.
+the parent and finds source and target.  If target already exists, lock
+it.  If source is a non-directory, lock it.  If that means we need to
+lock both, lock them in inode pointer order.
 
 5) link creation.  Locking rules:
 	* lock parent
@@ -30,7 +35,9 @@
 		fail with -ENOTEMPTY
 	* if new parent is equal to or is a descendent of source
 		fail with -ELOOP
-	* if target exists - lock it.
+	* If target exists, lock it.  If source is a non-directory, lock
+	  it.  In case that means we need to lock both source and target,
+	  do so in inode pointer order.
 	* call the method.
 
 
@@ -56,9 +63,11 @@
     renames will be blocked on filesystem lock and we don't start changing
     the order until we had acquired all locks).
 
-(3) any operation holds at most one lock on non-directory object and
-    that lock is acquired after all other locks.  (Proof: see descriptions
-    of operations).
+(3) locks on non-directory objects are acquired only after locks on
+    directory objects, and are acquired in inode pointer order.
+    (Proof: all operations but renames take lock on at most one
+    non-directory object, except renames, which take locks on source and
+    target in inode pointer order in the case they are not directories.)
 
 	Now consider the minimal deadlock.  Each process is blocked on
 attempt to acquire some lock and already holds at least one lock.  Let's
@@ -66,9 +75,13 @@
 not contended, since any process blocked on it is not holding any locks.
 Thus all processes are blocked on ->i_mutex.
 
-	Non-directory objects are not contended due to (3).  Thus link
-creation can't be a part of deadlock - it can't be blocked on source
-and it means that it doesn't hold any locks.
+	By (3), any process holding a non-directory lock can only be
+waiting on another non-directory lock with a larger address.  Therefore
+the process holding the "largest" such lock can always make progress, and
+non-directory objects are not included in the set of contended locks.
+
+	Thus link creation can't be a part of deadlock - it can't be
+blocked on source and it means that it doesn't hold any locks.
 
 	Any contended object is either held by cross-directory rename or
 has a child that is also contended.  Indeed, suppose that it is held by
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 3cd27be..a3fe811 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -119,6 +119,7 @@
                        Default number is 6.
 disable_ext_identify   Disable the extension list configured by mkfs, so f2fs
                        does not aware of cold files such as media files.
+inline_xattr           Enable the inline xattrs feature.
 
 ================================================================================
 DEBUGFS ENTRIES
@@ -164,6 +165,12 @@
                               gc_idle = 1 will select the Cost Benefit approach
                               & setting gc_idle = 2 will select the greedy aproach.
 
+ reclaim_segments             This parameter controls the number of prefree
+                              segments to be reclaimed. If the number of prefree
+			      segments is larger than this number, f2fs tries to
+			      conduct checkpoint to reclaim the prefree segments
+			      to free segments. By default, 100 segments, 200MB.
+
 ================================================================================
 USAGE
 ================================================================================
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index f089058..fe2b7ae 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -455,3 +455,11 @@
 	vfs_follow_link has been removed.  Filesystems must use nd_set_link
 	from ->follow_link for normal symlinks, or nd_jump_link for magic
 	/proc/<pid> style links.
+--
+[mandatory]
+	iget5_locked()/ilookup5()/ilookup5_nowait() test() callback used to be
+	called with both ->i_lock and inode_hash_lock held; the former is *not*
+	taken anymore, so verify that your callbacks do not rely on it (none
+	of the in-tree instances did).  inode_hash_lock is still held,
+	of course, so they are still serialized wrt removal from inode hash,
+	as well as wrt set() callback of iget5_locked().
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 823c95f..22d89aa3 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -460,6 +460,7 @@
     nl  - non-linear mapping
     ar  - architecture specific flag
     dd  - do not include area into core dump
+    sd  - soft-dirty flag
     mm  - mixed map area
     hg  - huge page advise flag
     nh  - no-huge page advise flag
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index aa1f459..4a93e98 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -307,7 +307,7 @@
 
                 <proceeding files...>
                 <slot #3, id = 0x43, characters = "h is long">
-                <slot #2, id = 0x02, characters = "xtension which">
+                <slot #2, id = 0x02, characters = "xtension whic">
                 <slot #1, id = 0x01, characters = "My Big File.E">
                 <directory entry, name = "MYBIGFIL.EXT">
 
diff --git a/Documentation/gcov.txt b/Documentation/gcov.txt
index e7ca647..7b72778 100644
--- a/Documentation/gcov.txt
+++ b/Documentation/gcov.txt
@@ -50,6 +50,10 @@
         CONFIG_DEBUG_FS=y
         CONFIG_GCOV_KERNEL=y
 
+select the gcc's gcov format, default is autodetect based on gcc version:
+
+        CONFIG_GCOV_FORMAT_AUTODETECT=y
+
 and to get coverage data for the entire kernel:
 
         CONFIG_GCOV_PROFILE_ALL=y
diff --git a/Documentation/hwmon/lm90 b/Documentation/hwmon/lm90
index b466974..ab81013 100644
--- a/Documentation/hwmon/lm90
+++ b/Documentation/hwmon/lm90
@@ -122,6 +122,12 @@
     Prefix: 'g781'
     Addresses scanned: I2C 0x4c, 0x4d
     Datasheet: Not publicly available from GMT
+  * Texas Instruments TMP451
+    Prefix: 'tmp451'
+    Addresses scanned: I2C 0x4c
+    Datasheet: Publicly available at TI website
+               http://www.ti.com/litv/pdf/sbos686
+
 
 Author: Jean Delvare <khali@linux-fr.org>
 
diff --git a/Documentation/input/gamepad.txt b/Documentation/input/gamepad.txt
index 8002c89..31bb6a4 100644
--- a/Documentation/input/gamepad.txt
+++ b/Documentation/input/gamepad.txt
@@ -122,12 +122,14 @@
       BTN_DPAD_*
     Analog buttons are reported as:
       ABS_HAT0X and ABS_HAT0Y
+      (for ABS values negative is left/up, positive is right/down)
 
 Analog-Sticks:
   The left analog-stick is reported as ABS_X, ABS_Y. The right analog stick is
   reported as ABS_RX, ABS_RY. Zero, one or two sticks may be present.
   If analog-sticks provide digital buttons, they are mapped accordingly as
   BTN_THUMBL (first/left) and BTN_THUMBR (second/right).
+    (for ABS values negative is left/up, positive is right/down)
 
 Triggers:
   Trigger buttons can be available as digital or analog buttons or both. User-
@@ -138,6 +140,7 @@
   ABS_HAT2X (right/ZR) and BTN_TL2 or ABS_HAT2Y (left/ZL).
   If only one trigger-button combination is present (upper+lower), they are
   reported as "right" triggers (BTN_TR/ABS_HAT1X).
+    (ABS trigger values start at 0, pressure is reported as positive values)
 
 Menu-Pad:
   Menu buttons are always digital and are mapped according to their location
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 8ef6dbb..bbc99c0 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -20,16 +20,9 @@
 To see a list of new config symbols when using "make oldconfig", use
 
 	cp user/some/old.config .config
-	yes "" | make oldconfig >conf.new
+	make listnewconfig
 
-and the config program will list as (NEW) any new symbols that have
-unknown values.  Of course, the .config file is also updated with
-new (default) values, so you can use:
-
-	grep "(NEW)" conf.new
-
-to see the new config symbols or you can use diffconfig to see the
-differences between the previous and new .config files:
+and the config program will list any new symbols, one per line.
 
 	scripts/diffconfig .config.old .config | less
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fd3eced..9ca3e74 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1070,6 +1070,9 @@
 				VIA, nVidia)
 			verbose: show contents of HPET registers during setup
 
+	hpet_mmap=	[X86, HPET_MMAP] Allow userspace to mmap HPET
+			registers.  Default set by CONFIG_HPET_MMAP_DEFAULT.
+
 	hugepages=	[HW,X86-32,IA-64] HugeTLB pages to allocate at boot.
 	hugepagesz=	[HW,IA-64,PPC,X86-64] The size of the HugeTLB pages.
 			On x86-64 and powerpc, this option can be specified
@@ -1775,6 +1778,9 @@
 			that the amount of memory usable for all allocations
 			is not too small.
 
+	movable_node	[KNL,X86] Boot-time switch to enable the effects
+			of CONFIG_MOVABLE_NODE=y. See mm/Kconfig for details.
+
 	MTD_Partition=	[MTD]
 			Format: <name>,<region-number>,<size>,<offset>
 
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
index dd2f7b2..72d0106 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/lockstat.txt
@@ -46,16 +46,14 @@
  contentions       - number of lock acquisitions that had to wait
  wait time min     - shortest (non-0) time we ever had to wait for a lock
            max     - longest time we ever had to wait for a lock
-           total   - total time we spend waiting on this lock
+	   total   - total time we spend waiting on this lock
+	   avg     - average time spent waiting on this lock
  acq-bounces       - number of lock acquisitions that involved x-cpu data
  acquisitions      - number of times we took the lock
  hold time min     - shortest (non-0) time we ever held the lock
-           max     - longest time we ever held the lock
-           total   - total time this lock was held
-
-From these number various other statistics can be derived, such as:
-
- hold time average = hold time total / acquisitions
+	   max     - longest time we ever held the lock
+	   total   - total time this lock was held
+	   avg     - average time this lock was held
 
 These numbers are gathered per lock class, per read/write state (when
 applicable).
@@ -84,37 +82,38 @@
 
 # less /proc/lock_stat
 
-01 lock_stat version 0.3
-02 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-03                               class name    con-bounces    contentions   waittime-min   waittime-max waittime-total    acq-bounces   acquisitions   holdtime-min   holdtime-max holdtime-total
-04 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+01 lock_stat version 0.4
+02-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+03                              class name    con-bounces    contentions   waittime-min   waittime-max waittime-total   waittime-avg    acq-bounces   acquisitions   holdtime-min   holdtime-max holdtime-total   holdtime-avg
+04-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 05
-06                          &mm->mmap_sem-W:           233            538 18446744073708       22924.27      607243.51           1342          45806           1.71        8595.89     1180582.34
-07                          &mm->mmap_sem-R:           205            587 18446744073708       28403.36      731975.00           1940         412426           0.58      187825.45     6307502.88
-08                          ---------------
-09                            &mm->mmap_sem            487          [<ffffffff8053491f>] do_page_fault+0x466/0x928
-10                            &mm->mmap_sem            179          [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
-11                            &mm->mmap_sem            279          [<ffffffff80210a57>] sys_mmap+0x75/0xce
-12                            &mm->mmap_sem             76          [<ffffffff802a490b>] sys_munmap+0x32/0x59
-13                          ---------------
-14                            &mm->mmap_sem            270          [<ffffffff80210a57>] sys_mmap+0x75/0xce
-15                            &mm->mmap_sem            431          [<ffffffff8053491f>] do_page_fault+0x466/0x928
-16                            &mm->mmap_sem            138          [<ffffffff802a490b>] sys_munmap+0x32/0x59
-17                            &mm->mmap_sem            145          [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
+06                         &mm->mmap_sem-W:            46             84           0.26         939.10       16371.53         194.90          47291        2922365           0.16     2220301.69 17464026916.32        5975.99
+07                         &mm->mmap_sem-R:            37            100           1.31      299502.61      325629.52        3256.30         212344       34316685           0.10        7744.91    95016910.20           2.77
+08                         ---------------
+09                           &mm->mmap_sem              1          [<ffffffff811502a7>] khugepaged_scan_mm_slot+0x57/0x280
+19                           &mm->mmap_sem             96          [<ffffffff815351c4>] __do_page_fault+0x1d4/0x510
+11                           &mm->mmap_sem             34          [<ffffffff81113d77>] vm_mmap_pgoff+0x87/0xd0
+12                           &mm->mmap_sem             17          [<ffffffff81127e71>] vm_munmap+0x41/0x80
+13                         ---------------
+14                           &mm->mmap_sem              1          [<ffffffff81046fda>] dup_mmap+0x2a/0x3f0
+15                           &mm->mmap_sem             60          [<ffffffff81129e29>] SyS_mprotect+0xe9/0x250
+16                           &mm->mmap_sem             41          [<ffffffff815351c4>] __do_page_fault+0x1d4/0x510
+17                           &mm->mmap_sem             68          [<ffffffff81113d77>] vm_mmap_pgoff+0x87/0xd0
 18
-19 ...............................................................................................................................................................................................
+19.............................................................................................................................................................................................................................
 20
-21                              dcache_lock:           621            623           0.52         118.26        1053.02           6745          91930           0.29         316.29      118423.41
-22                              -----------
-23                              dcache_lock            179          [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
-24                              dcache_lock            113          [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
-25                              dcache_lock             99          [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
-26                              dcache_lock            104          [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
-27                              -----------
-28                              dcache_lock            192          [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
-29                              dcache_lock             98          [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
-30                              dcache_lock             72          [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
-31                              dcache_lock            112          [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
+21                         unix_table_lock:           110            112           0.21          49.24         163.91           1.46          21094          66312           0.12         624.42       31589.81           0.48
+22                         ---------------
+23                         unix_table_lock             45          [<ffffffff8150ad8e>] unix_create1+0x16e/0x1b0
+24                         unix_table_lock             47          [<ffffffff8150b111>] unix_release_sock+0x31/0x250
+25                         unix_table_lock             15          [<ffffffff8150ca37>] unix_find_other+0x117/0x230
+26                         unix_table_lock              5          [<ffffffff8150a09f>] unix_autobind+0x11f/0x1b0
+27                         ---------------
+28                         unix_table_lock             39          [<ffffffff8150b111>] unix_release_sock+0x31/0x250
+29                         unix_table_lock             49          [<ffffffff8150ad8e>] unix_create1+0x16e/0x1b0
+30                         unix_table_lock             20          [<ffffffff8150ca37>] unix_find_other+0x117/0x230
+31                         unix_table_lock              4          [<ffffffff8150a09f>] unix_autobind+0x11f/0x1b0
+
 
 This excerpt shows the first two lock class statistics. Line 01 shows the
 output version - each time the format changes this will be updated. Line 02-04
@@ -131,30 +130,30 @@
 
 Dealing with nested locks, subclasses may appear:
 
-32...............................................................................................................................................................................................
+32...........................................................................................................................................................................................................................
 33
-34                               &rq->lock:         13128          13128           0.43         190.53      103881.26          97454        3453404           0.00         401.11    13224683.11
+34                               &rq->lock:       13128          13128           0.43         190.53      103881.26           7.91          97454        3453404           0.00         401.11    13224683.11           3.82
 35                               ---------
-36                               &rq->lock            645          [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
-37                               &rq->lock            297          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
-38                               &rq->lock            360          [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
-39                               &rq->lock            428          [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
+36                               &rq->lock          645          [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+37                               &rq->lock          297          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+38                               &rq->lock          360          [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
+39                               &rq->lock          428          [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
 40                               ---------
-41                               &rq->lock             77          [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
-42                               &rq->lock            174          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
-43                               &rq->lock           4715          [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
-44                               &rq->lock            893          [<ffffffff81340524>] schedule+0x157/0x7b8
+41                               &rq->lock           77          [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+42                               &rq->lock          174          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+43                               &rq->lock         4715          [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+44                               &rq->lock          893          [<ffffffff81340524>] schedule+0x157/0x7b8
 45
-46...............................................................................................................................................................................................
+46...........................................................................................................................................................................................................................
 47
-48                             &rq->lock/1:         11526          11488           0.33         388.73      136294.31          21461          38404           0.00          37.93      109388.53
+48                             &rq->lock/1:        1526          11488           0.33         388.73      136294.31          11.86          21461          38404           0.00          37.93      109388.53           2.84
 49                             -----------
-50                             &rq->lock/1          11526          [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+50                             &rq->lock/1        11526          [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
 51                             -----------
-52                             &rq->lock/1           5645          [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
-53                             &rq->lock/1           1224          [<ffffffff81340524>] schedule+0x157/0x7b8
-54                             &rq->lock/1           4336          [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
-55                             &rq->lock/1            181          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+52                             &rq->lock/1         5645          [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+53                             &rq->lock/1         1224          [<ffffffff81340524>] schedule+0x157/0x7b8
+54                             &rq->lock/1         4336          [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+55                             &rq->lock/1          181          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
 
 Line 48 shows statistics for the second subclass (/1) of &rq->lock class
 (subclass starts from 0), since in this case, as line 50 suggests,
@@ -163,16 +162,16 @@
 View the top contending locks:
 
 # grep : /proc/lock_stat | head
-              &inode->i_data.tree_lock-W:            15          21657           0.18     1093295.30 11547131054.85             58          10415           0.16          87.51        6387.60
-              &inode->i_data.tree_lock-R:             0              0           0.00           0.00           0.00          23302         231198           0.25           8.45       98023.38
-                             dcache_lock:          1037           1161           0.38          45.32         774.51           6611         243371           0.15         306.48       77387.24
-                         &inode->i_mutex:           161            286 18446744073709       62882.54     1244614.55           3653          20598 18446744073709       62318.60     1693822.74
-                         &zone->lru_lock:            94             94           0.53           7.33          92.10           4366          32690           0.29          59.81       16350.06
-              &inode->i_data.i_mmap_mutex:            79             79           0.40           3.77          53.03          11779          87755           0.28         116.93       29898.44
-                        &q->__queue_lock:            48             50           0.52          31.62          86.31            774          13131           0.17         113.08       12277.52
-                        &rq->rq_lock_key:            43             47           0.74          68.50         170.63           3706          33929           0.22         107.99       17460.62
-                      &rq->rq_lock_key#2:            39             46           0.75           6.68          49.03           2979          32292           0.17         125.17       17137.63
-                         tasklist_lock-W:            15             15           1.45          10.87          32.70           1201           7390           0.58          62.55       13648.47
+			clockevents_lock:       2926159        2947636           0.15       46882.81  1784540466.34         605.41        3381345        3879161           0.00        2260.97    53178395.68          13.71
+		     tick_broadcast_lock:        346460         346717           0.18        2257.43    39364622.71         113.54        3642919        4242696           0.00        2263.79    49173646.60          11.59
+		  &mapping->i_mmap_mutex:        203896         203899           3.36      645530.05 31767507988.39      155800.21        3361776        8893984           0.17        2254.15    14110121.02           1.59
+			       &rq->lock:        135014         136909           0.18         606.09      842160.68           6.15        1540728       10436146           0.00         728.72    17606683.41           1.69
+	       &(&zone->lru_lock)->rlock:         93000          94934           0.16          59.18      188253.78           1.98        1199912        3809894           0.15         391.40     3559518.81           0.93
+			 tasklist_lock-W:         40667          41130           0.23        1189.42      428980.51          10.43         270278         510106           0.16         653.51     3939674.91           7.72
+			 tasklist_lock-R:         21298          21305           0.20        1310.05      215511.12          10.12         186204         241258           0.14        1162.33     1179779.23           4.89
+			      rcu_node_1:         47656          49022           0.16         635.41      193616.41           3.95         844888        1865423           0.00         764.26     1656226.96           0.89
+       &(&dentry->d_lockref.lock)->rlock:         39791          40179           0.15        1302.08       88851.96           2.21        2790851       12527025           0.10        1910.75     3379714.27           0.27
+			      rcu_node_0:         29203          30064           0.16         786.55     1555573.00          51.74          88963         244254           0.00         398.87      428872.51           1.76
 
 Clear the statistics:
 
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt
index 38c10fd..1dfe62c 100644
--- a/Documentation/mutex-design.txt
+++ b/Documentation/mutex-design.txt
@@ -116,11 +116,11 @@
 Implementation of mutexes
 -------------------------
 
-'struct mutex' is the new mutex type, defined in include/linux/mutex.h
-and implemented in kernel/mutex.c. It is a counter-based mutex with a
-spinlock and a wait-list. The counter has 3 states: 1 for "unlocked",
-0 for "locked" and negative numbers (usually -1) for "locked, potential
-waiters queued".
+'struct mutex' is the new mutex type, defined in include/linux/mutex.h and
+implemented in kernel/locking/mutex.c. It is a counter-based mutex with a
+spinlock and a wait-list. The counter has 3 states: 1 for "unlocked", 0 for
+"locked" and negative numbers (usually -1) for "locked, potential waiters
+queued".
 
 the APIs of 'struct mutex' have been streamlined:
 
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index c1d8204..89490beb 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -69,8 +69,7 @@
 # aggregated_ogms        gw_bandwidth           log_level
 # ap_isolation           gw_mode                orig_interval
 # bonding                gw_sel_class           routing_algo
-# bridge_loop_avoidance  hop_penalty            vis_mode
-# fragmentation
+# bridge_loop_avoidance  hop_penalty            fragmentation
 
 
 There is a special folder for debugging information:
@@ -78,7 +77,7 @@
 # ls /sys/kernel/debug/batman_adv/bat0/
 # bla_backbone_table  log                 transtable_global
 # bla_claim_table     originators         transtable_local
-# gateways            socket              vis_data
+# gateways            socket
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
@@ -127,51 +126,6 @@
 # ifconfig eth0 0.0.0.0
 
 
-VISUALIZATION
--------------
-
-If you want topology visualization, at least one mesh  node  must
-be configured as VIS-server:
-
-# echo "server" > /sys/class/net/bat0/mesh/vis_mode
-
-Each  node  is  either configured as "server" or as "client" (de-
-fault: "client").  Clients send their topology data to the server
-next to them, and server synchronize with other servers. If there
-is no server configured (default) within the  mesh,  no  topology
-information   will  be  transmitted.  With  these  "synchronizing
-servers", there can be 1 or more vis servers sharing the same (or
-at least very similar) data.
-
-When  configured  as  server,  you can get a topology snapshot of
-your mesh:
-
-# cat /sys/kernel/debug/batman_adv/bat0/vis_data
-
-This raw output is intended to be easily parsable and convertable
-with  other tools. Have a look at the batctl README if you want a
-vis output in dot or json format for instance and how those  out-
-puts could then be visualised in an image.
-
-The raw format consists of comma separated values per entry where
-each entry is giving information about a  certain  source  inter-
-face.  Each  entry can/has to have the following values:
--> "mac" - mac address of an originator's source interface
-           (each line begins with it)
--> "TQ mac  value"  -  src mac's link quality towards mac address
-                       of a neighbor originator's interface which
-                       is being used for routing
--> "TT mac" - TT announced by source mac
--> "PRIMARY" - this  is a primary interface
--> "SEC mac" - secondary mac address of source
-               (requires preceding PRIMARY)
-
-The TQ value has a range from 4 to 255 with 255 being  the  best.
-The TT entries are showing which hosts are connected to the mesh
-via bat0 or being bridged into the mesh network.  The PRIMARY/SEC
-values are only applied on primary interfaces
-
-
 LOGGING/DEBUGGING
 -----------------
 
@@ -245,5 +199,5 @@
 
 You can also contact the Authors:
 
-Marek  Lindner  <lindner_marek@yahoo.de>
-Simon  Wunderlich  <siwu@hrz.tu-chemnitz.de>
+Marek  Lindner  <mareklindner@neomailbox.ch>
+Simon  Wunderlich  <sw@simonwunderlich.de>
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 9b28e71..2cdb8b6 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -639,6 +639,15 @@
 	are generated by the ipv4 and ipv6 code and the numbers of
 	repetitions cannot be set independently.
 
+packets_per_slave
+
+	Specify the number of packets to transmit through a slave before
+	moving to the next one. When set to 0 then a slave is chosen at
+	random.
+
+	The valid range is 0 - 65535; the default value is 1. This option
+	has effect only in balance-rr mode.
+
 primary
 
 	A string (eth0, eth2, etc) specifying which slave is the
@@ -743,21 +752,16 @@
 		protocol information to generate the hash.
 
 		Uses XOR of hardware MAC addresses and IP addresses to
-		generate the hash.  The IPv4 formula is
+		generate the hash.  The formula is
 
-		(((source IP XOR dest IP) AND 0xffff) XOR
-			( source MAC XOR destination MAC ))
-				modulo slave count
+		hash = source MAC XOR destination MAC
+		hash = hash XOR source IP XOR destination IP
+		hash = hash XOR (hash RSHIFT 16)
+		hash = hash XOR (hash RSHIFT 8)
+		And then hash is reduced modulo slave count.
 
-		The IPv6 formula is
-
-		hash = (source ip quad 2 XOR dest IP quad 2) XOR
-		       (source ip quad 3 XOR dest IP quad 3) XOR
-		       (source ip quad 4 XOR dest IP quad 4)
-
-		(((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
-			XOR (source MAC XOR destination MAC))
-				modulo slave count
+		If the protocol is IPv6 then the source and destination
+		addresses are first hashed using ipv6_addr_hash.
 
 		This algorithm will place all traffic to a particular
 		network peer on the same slave.  For non-IP traffic,
@@ -779,21 +783,16 @@
 		slaves, although a single connection will not span
 		multiple slaves.
 
-		The formula for unfragmented IPv4 TCP and UDP packets is
+		The formula for unfragmented TCP and UDP packets is
 
-		((source port XOR dest port) XOR
-			 ((source IP XOR dest IP) AND 0xffff)
-				modulo slave count
+		hash = source port, destination port (as in the header)
+		hash = hash XOR source IP XOR destination IP
+		hash = hash XOR (hash RSHIFT 16)
+		hash = hash XOR (hash RSHIFT 8)
+		And then hash is reduced modulo slave count.
 
-		The formula for unfragmented IPv6 TCP and UDP packets is
-
-		hash = (source port XOR dest port) XOR
-		       ((source ip quad 2 XOR dest IP quad 2) XOR
-			(source ip quad 3 XOR dest IP quad 3) XOR
-			(source ip quad 4 XOR dest IP quad 4))
-
-		((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
-			modulo slave count
+		If the protocol is IPv6 then the source and destination
+		addresses are first hashed using ipv6_addr_hash.
 
 		For fragmented TCP or UDP packets and all other IPv4 and
 		IPv6 protocol traffic, the source and destination port
@@ -801,10 +800,6 @@
 		formula is the same as for the layer2 transmit hash
 		policy.
 
-		The IPv4 policy is intended to mimic the behavior of
-		certain switches, notably Cisco switches with PFC2 as
-		well as some Foundry and IBM products.
-
 		This algorithm is not fully 802.3ad compliant.  A
 		single TCP or UDP conversation containing both
 		fragmented and unfragmented packets will see packets
@@ -815,6 +810,26 @@
 		conversations.  Other implementations of 802.3ad may
 		or may not tolerate this noncompliance.
 
+	encap2+3
+
+		This policy uses the same formula as layer2+3 but it
+		relies on skb_flow_dissect to obtain the header fields
+		which might result in the use of inner headers if an
+		encapsulation protocol is used. For example this will
+		improve the performance for tunnel users because the
+		packets will be distributed according to the encapsulated
+		flows.
+
+	encap3+4
+
+		This policy uses the same formula as layer3+4 but it
+		relies on skb_flow_dissect to obtain the header fields
+		which might result in the use of inner headers if an
+		encapsulation protocol is used. For example this will
+		improve the performance for tunnel users because the
+		packets will be distributed according to the encapsulated
+		flows.
+
 	The default value is layer2.  This option was added in bonding
 	version 2.6.3.  In earlier versions of bonding, this parameter
 	does not exist, and the layer2 policy is the only policy.  The
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 820f553..4c07241 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -25,6 +25,12 @@
       4.1.5 RAW socket option CAN_RAW_FD_FRAMES
       4.1.6 RAW socket returned message flags
     4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
+      4.2.1 Broadcast Manager operations
+      4.2.2 Broadcast Manager message flags
+      4.2.3 Broadcast Manager transmission timers
+      4.2.4 Broadcast Manager message sequence transmission
+      4.2.5 Broadcast Manager receive filter timers
+      4.2.6 Broadcast Manager multiplex message receive filter
     4.3 connected transport protocols (SOCK_SEQPACKET)
     4.4 unconnected transport protocols (SOCK_DGRAM)
 
@@ -593,6 +599,217 @@
       In order to receive such messages, CAN_RAW_RECV_OWN_MSGS must be set.
 
   4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
+
+  The Broadcast Manager protocol provides a command based configuration
+  interface to filter and send (e.g. cyclic) CAN messages in kernel space.
+
+  Receive filters can be used to down sample frequent messages; detect events
+  such as message contents changes, packet length changes, and do time-out
+  monitoring of received messages.
+
+  Periodic transmission tasks of CAN frames or a sequence of CAN frames can be
+  created and modified at runtime; both the message content and the two
+  possible transmit intervals can be altered.
+
+  A BCM socket is not intended for sending individual CAN frames using the
+  struct can_frame as known from the CAN_RAW socket. Instead a special BCM
+  configuration message is defined. The basic BCM configuration message used
+  to communicate with the broadcast manager and the available operations are
+  defined in the linux/can/bcm.h include. The BCM message consists of a
+  message header with a command ('opcode') followed by zero or more CAN frames.
+  The broadcast manager sends responses to user space in the same form:
+
+    struct bcm_msg_head {
+            __u32 opcode;                   /* command */
+            __u32 flags;                    /* special flags */
+            __u32 count;                    /* run 'count' times with ival1 */
+            struct timeval ival1, ival2;    /* count and subsequent interval */
+            canid_t can_id;                 /* unique can_id for task */
+            __u32 nframes;                  /* number of can_frames following */
+            struct can_frame frames[0];
+    };
+
+  The aligned payload 'frames' uses the same basic CAN frame structure defined
+  at the beginning of section 4 and in the include/linux/can.h include. All
+  messages to the broadcast manager from user space have this structure.
+
+  Note a CAN_BCM socket must be connected instead of bound after socket
+  creation (example without error checking):
+
+    int s;
+    struct sockaddr_can addr;
+    struct ifreq ifr;
+
+    s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
+
+    strcpy(ifr.ifr_name, "can0");
+    ioctl(s, SIOCGIFINDEX, &ifr);
+
+    addr.can_family = AF_CAN;
+    addr.can_ifindex = ifr.ifr_ifindex;
+
+    connect(s, (struct sockaddr *)&addr, sizeof(addr))
+
+    (..)
+
+  The broadcast manager socket is able to handle any number of in flight
+  transmissions or receive filters concurrently. The different RX/TX jobs are
+  distinguished by the unique can_id in each BCM message. However additional
+  CAN_BCM sockets are recommended to communicate on multiple CAN interfaces.
+  When the broadcast manager socket is bound to 'any' CAN interface (=> the
+  interface index is set to zero) the configured receive filters apply to any
+  CAN interface unless the sendto() syscall is used to overrule the 'any' CAN
+  interface index. When using recvfrom() instead of read() to retrieve BCM
+  socket messages the originating CAN interface is provided in can_ifindex.
+
+  4.2.1 Broadcast Manager operations
+
+  The opcode defines the operation for the broadcast manager to carry out,
+  or details the broadcast managers response to several events, including
+  user requests.
+
+  Transmit Operations (user space to broadcast manager):
+
+    TX_SETUP:   Create (cyclic) transmission task.
+
+    TX_DELETE:  Remove (cyclic) transmission task, requires only can_id.
+
+    TX_READ:    Read properties of (cyclic) transmission task for can_id.
+
+    TX_SEND:    Send one CAN frame.
+
+  Transmit Responses (broadcast manager to user space):
+
+    TX_STATUS:  Reply to TX_READ request (transmission task configuration).
+
+    TX_EXPIRED: Notification when counter finishes sending at initial interval
+      'ival1'. Requires the TX_COUNTEVT flag to be set at TX_SETUP.
+
+  Receive Operations (user space to broadcast manager):
+
+    RX_SETUP:   Create RX content filter subscription.
+
+    RX_DELETE:  Remove RX content filter subscription, requires only can_id.
+
+    RX_READ:    Read properties of RX content filter subscription for can_id.
+
+  Receive Responses (broadcast manager to user space):
+
+    RX_STATUS:  Reply to RX_READ request (filter task configuration).
+
+    RX_TIMEOUT: Cyclic message is detected to be absent (timer ival1 expired).
+
+    RX_CHANGED: BCM message with updated CAN frame (detected content change).
+      Sent on first message received or on receipt of revised CAN messages.
+
+  4.2.2 Broadcast Manager message flags
+
+  When sending a message to the broadcast manager the 'flags' element may
+  contain the following flag definitions which influence the behaviour:
+
+    SETTIMER:           Set the values of ival1, ival2 and count
+
+    STARTTIMER:         Start the timer with the actual values of ival1, ival2
+      and count. Starting the timer leads simultaneously to emit a CAN frame.
+
+    TX_COUNTEVT:        Create the message TX_EXPIRED when count expires
+
+    TX_ANNOUNCE:        A change of data by the process is emitted immediately.
+
+    TX_CP_CAN_ID:       Copies the can_id from the message header to each
+      subsequent frame in frames. This is intended as usage simplification. For
+      TX tasks the unique can_id from the message header may differ from the
+      can_id(s) stored for transmission in the subsequent struct can_frame(s).
+
+    RX_FILTER_ID:       Filter by can_id alone, no frames required (nframes=0).
+
+    RX_CHECK_DLC:       A change of the DLC leads to an RX_CHANGED.
+
+    RX_NO_AUTOTIMER:    Prevent automatically starting the timeout monitor.
+
+    RX_ANNOUNCE_RESUME: If passed at RX_SETUP and a receive timeout occured, a
+      RX_CHANGED message will be generated when the (cyclic) receive restarts.
+
+    TX_RESET_MULTI_IDX: Reset the index for the multiple frame transmission.
+
+    RX_RTR_FRAME:       Send reply for RTR-request (placed in op->frames[0]).
+
+  4.2.3 Broadcast Manager transmission timers
+
+  Periodic transmission configurations may use up to two interval timers.
+  In this case the BCM sends a number of messages ('count') at an interval
+  'ival1', then continuing to send at another given interval 'ival2'. When
+  only one timer is needed 'count' is set to zero and only 'ival2' is used.
+  When SET_TIMER and START_TIMER flag were set the timers are activated.
+  The timer values can be altered at runtime when only SET_TIMER is set.
+
+  4.2.4 Broadcast Manager message sequence transmission
+
+  Up to 256 CAN frames can be transmitted in a sequence in the case of a cyclic
+  TX task configuration. The number of CAN frames is provided in the 'nframes'
+  element of the BCM message head. The defined number of CAN frames are added
+  as array to the TX_SETUP BCM configuration message.
+
+    /* create a struct to set up a sequence of four CAN frames */
+    struct {
+            struct bcm_msg_head msg_head;
+            struct can_frame frame[4];
+    } mytxmsg;
+
+    (..)
+    mytxmsg.nframes = 4;
+    (..)
+
+    write(s, &mytxmsg, sizeof(mytxmsg));
+
+  With every transmission the index in the array of CAN frames is increased
+  and set to zero at index overflow.
+
+  4.2.5 Broadcast Manager receive filter timers
+
+  The timer values ival1 or ival2 may be set to non-zero values at RX_SETUP.
+  When the SET_TIMER flag is set the timers are enabled:
+
+  ival1: Send RX_TIMEOUT when a received message is not received again within
+    the given time. When START_TIMER is set at RX_SETUP the timeout detection
+    is activated directly - even without a former CAN frame reception.
+
+  ival2: Throttle the received message rate down to the value of ival2. This
+    is useful to reduce messages for the application when the signal inside the
+    CAN frame is stateless as state changes within the ival2 periode may get
+    lost.
+
+  4.2.6 Broadcast Manager multiplex message receive filter
+
+  To filter for content changes in multiplex message sequences an array of more
+  than one CAN frames can be passed in a RX_SETUP configuration message. The
+  data bytes of the first CAN frame contain the mask of relevant bits that
+  have to match in the subsequent CAN frames with the received CAN frame.
+  If one of the subsequent CAN frames is matching the bits in that frame data
+  mark the relevant content to be compared with the previous received content.
+  Up to 257 CAN frames (multiplex filter bit mask CAN frame plus 256 CAN
+  filters) can be added as array to the TX_SETUP BCM configuration message.
+
+    /* usually used to clear CAN frame data[] - beware of endian problems! */
+    #define U64_DATA(p) (*(unsigned long long*)(p)->data)
+
+    struct {
+            struct bcm_msg_head msg_head;
+            struct can_frame frame[5];
+    } msg;
+
+    msg.msg_head.opcode  = RX_SETUP;
+    msg.msg_head.can_id  = 0x42;
+    msg.msg_head.flags   = 0;
+    msg.msg_head.nframes = 5;
+    U64_DATA(&msg.frame[0]) = 0xFF00000000000000ULL; /* MUX mask */
+    U64_DATA(&msg.frame[1]) = 0x01000000000000FFULL; /* data mask (MUX 0x01) */
+    U64_DATA(&msg.frame[2]) = 0x0200FFFF000000FFULL; /* data mask (MUX 0x02) */
+    U64_DATA(&msg.frame[3]) = 0x330000FFFFFF0003ULL; /* data mask (MUX 0x33) */
+    U64_DATA(&msg.frame[4]) = 0x4F07FC0FF0000000ULL; /* data mask (MUX 0x4F) */
+
+    write(s, &msg, sizeof(msg));
+
   4.3 connected transport protocols (SOCK_SEQPACKET)
   4.4 unconnected transport protocols (SOCK_DGRAM)
 
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index a46d785..8b8a057 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -267,17 +267,6 @@
 	more aggressively. Let me to remind again: each orphan eats
 	up to ~64K of unswappable memory.
 
-tcp_max_ssthresh - INTEGER
-	Limited Slow-Start for TCP with large congestion windows (cwnd) defined in
-	RFC3742. Limited slow-start is a mechanism to limit growth of the cwnd
-	on the region where cwnd is larger than tcp_max_ssthresh. TCP increases cwnd
-	by at most tcp_max_ssthresh segments, and by at least tcp_max_ssthresh/2
-	segments per RTT when the cwnd is above tcp_max_ssthresh.
-	If TCP connection increased cwnd to thousands (or tens of thousands) segments,
-	and thousands of packets were being dropped during slow-start, you can set
-	tcp_max_ssthresh to improve performance for new TCP connection.
-	Default: 0 (off)
-
 tcp_max_syn_backlog - INTEGER
 	Maximal number of remembered connection requests, which have not
 	received an acknowledgment from connecting client.
@@ -451,7 +440,7 @@
 	connect() to perform a TCP handshake automatically.
 
 	The values (bitmap) are
-	1: Enables sending data in the opening SYN on the client.
+	1: Enables sending data in the opening SYN on the client w/ MSG_FASTOPEN.
 	2: Enables TCP Fast Open on the server side, i.e., allowing data in
 	   a SYN packet to be accepted and passed to the application before
 	   3-way hand shake finishes.
@@ -464,7 +453,7 @@
 	   different ways of setting max_qlen without the TCP_FASTOPEN socket
 	   option.
 
-	Default: 0
+	Default: 1
 
 	Note that the client & server side Fast Open flags (1 and 2
 	respectively) must be also enabled before the rest of flags can take
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index c7ecc70..0b1cf6b 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -10,12 +10,12 @@
 struct net_device allocation rules
 ==================================
 Network device structures need to persist even after module is unloaded and
-must be allocated with kmalloc.  If device has registered successfully,
-it will be freed on last use by free_netdev.  This is required to handle the
-pathologic case cleanly (example: rmmod mydriver </sys/class/net/myeth/mtu )
+must be allocated with alloc_netdev_mqs() and friends.
+If device has registered successfully, it will be freed on last use
+by free_netdev(). This is required to handle the pathologic case cleanly
+(example: rmmod mydriver </sys/class/net/myeth/mtu )
 
-There are routines in net_init.c to handle the common cases of
-alloc_etherdev, alloc_netdev.  These reserve extra space for driver
+alloc_netdev_mqs()/alloc_netdev() reserve extra space for driver
 private data which gets freed when the network device is freed. If
 separately allocated data is attached to the network device
 (netdev_priv(dev)) then it is up to the module exit handler to free that.
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 425c51d..b8a907d 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -42,7 +42,7 @@
 
 OPP library provides a set of helper functions to organize and query the OPP
 information. The library is located in drivers/base/power/opp.c and the header
-is located in include/linux/opp.h. OPP library can be enabled by enabling
+is located in include/linux/pm_opp.h. OPP library can be enabled by enabling
 CONFIG_PM_OPP from power management menuconfig menu. OPP library depends on
 CONFIG_PM as certain SoCs such as Texas Instrument's OMAP framework allows to
 optionally boot at a certain OPP without needing cpufreq.
@@ -71,14 +71,14 @@
 
 OPP library facilitates this concept in it's implementation. The following
 operational functions operate only on available opps:
-opp_find_freq_{ceil, floor}, opp_get_voltage, opp_get_freq, opp_get_opp_count
-and opp_init_cpufreq_table
+opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq, dev_pm_opp_get_opp_count
+and dev_pm_opp_init_cpufreq_table
 
-opp_find_freq_exact is meant to be used to find the opp pointer which can then
-be used for opp_enable/disable functions to make an opp available as required.
+dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer which can then
+be used for dev_pm_opp_enable/disable functions to make an opp available as required.
 
 WARNING: Users of OPP library should refresh their availability count using
-get_opp_count if opp_enable/disable functions are invoked for a device, the
+get_opp_count if dev_pm_opp_enable/disable functions are invoked for a device, the
 exact mechanism to trigger these or the notification mechanism to other
 dependent subsystems such as cpufreq are left to the discretion of the SoC
 specific framework which uses the OPP library. Similar care needs to be taken
@@ -96,24 +96,24 @@
 opp_get_{voltage, freq, opp_count} fall into this category.
 
 opp_{add,enable,disable} are updaters which use mutex and implement it's own
-RCU locking mechanisms. opp_init_cpufreq_table acts as an updater and uses
+RCU locking mechanisms. dev_pm_opp_init_cpufreq_table acts as an updater and uses
 mutex to implment RCU updater strategy. These functions should *NOT* be called
 under RCU locks and other contexts that prevent blocking functions in RCU or
 mutex operations from working.
 
 2. Initial OPP List Registration
 ================================
-The SoC implementation calls opp_add function iteratively to add OPPs per
+The SoC implementation calls dev_pm_opp_add function iteratively to add OPPs per
 device. It is expected that the SoC framework will register the OPP entries
 optimally- typical numbers range to be less than 5. The list generated by
 registering the OPPs is maintained by OPP library throughout the device
 operation. The SoC framework can subsequently control the availability of the
-OPPs dynamically using the opp_enable / disable functions.
+OPPs dynamically using the dev_pm_opp_enable / disable functions.
 
-opp_add - Add a new OPP for a specific domain represented by the device pointer.
+dev_pm_opp_add - Add a new OPP for a specific domain represented by the device pointer.
 	The OPP is defined using the frequency and voltage. Once added, the OPP
 	is assumed to be available and control of it's availability can be done
-	with the opp_enable/disable functions. OPP library internally stores
+	with the dev_pm_opp_enable/disable functions. OPP library internally stores
 	and manages this information in the opp struct. This function may be
 	used by SoC framework to define a optimal list as per the demands of
 	SoC usage environment.
@@ -124,7 +124,7 @@
 	 soc_pm_init()
 	 {
 		/* Do things */
-		r = opp_add(mpu_dev, 1000000, 900000);
+		r = dev_pm_opp_add(mpu_dev, 1000000, 900000);
 		if (!r) {
 			pr_err("%s: unable to register mpu opp(%d)\n", r);
 			goto no_cpufreq;
@@ -143,44 +143,44 @@
 found, else returns error. These errors are expected to be handled by standard
 error checks such as IS_ERR() and appropriate actions taken by the caller.
 
-opp_find_freq_exact - Search for an OPP based on an *exact* frequency and
+dev_pm_opp_find_freq_exact - Search for an OPP based on an *exact* frequency and
 	availability. This function is especially useful to enable an OPP which
 	is not available by default.
 	Example: In a case when SoC framework detects a situation where a
 	higher frequency could be made available, it can use this function to
-	find the OPP prior to call the opp_enable to actually make it available.
+	find the OPP prior to call the dev_pm_opp_enable to actually make it available.
 	 rcu_read_lock();
-	 opp = opp_find_freq_exact(dev, 1000000000, false);
+	 opp = dev_pm_opp_find_freq_exact(dev, 1000000000, false);
 	 rcu_read_unlock();
 	 /* dont operate on the pointer.. just do a sanity check.. */
 	 if (IS_ERR(opp)) {
 		pr_err("frequency not disabled!\n");
 		/* trigger appropriate actions.. */
 	 } else {
-		opp_enable(dev,1000000000);
+		dev_pm_opp_enable(dev,1000000000);
 	 }
 
 	NOTE: This is the only search function that operates on OPPs which are
 	not available.
 
-opp_find_freq_floor - Search for an available OPP which is *at most* the
+dev_pm_opp_find_freq_floor - Search for an available OPP which is *at most* the
 	provided frequency. This function is useful while searching for a lesser
 	match OR operating on OPP information in the order of decreasing
 	frequency.
 	Example: To find the highest opp for a device:
 	 freq = ULONG_MAX;
 	 rcu_read_lock();
-	 opp_find_freq_floor(dev, &freq);
+	 dev_pm_opp_find_freq_floor(dev, &freq);
 	 rcu_read_unlock();
 
-opp_find_freq_ceil - Search for an available OPP which is *at least* the
+dev_pm_opp_find_freq_ceil - Search for an available OPP which is *at least* the
 	provided frequency. This function is useful while searching for a
 	higher match OR operating on OPP information in the order of increasing
 	frequency.
 	Example 1: To find the lowest opp for a device:
 	 freq = 0;
 	 rcu_read_lock();
-	 opp_find_freq_ceil(dev, &freq);
+	 dev_pm_opp_find_freq_ceil(dev, &freq);
 	 rcu_read_unlock();
 	Example 2: A simplified implementation of a SoC cpufreq_driver->target:
 	 soc_cpufreq_target(..)
@@ -188,7 +188,7 @@
 		/* Do stuff like policy checks etc. */
 		/* Find the best frequency match for the req */
 		rcu_read_lock();
-		opp = opp_find_freq_ceil(dev, &freq);
+		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
 		rcu_read_unlock();
 		if (!IS_ERR(opp))
 			soc_switch_to_freq_voltage(freq);
@@ -208,34 +208,34 @@
 
 WARNING: Do not use these functions in interrupt context.
 
-opp_enable - Make a OPP available for operation.
+dev_pm_opp_enable - Make a OPP available for operation.
 	Example: Lets say that 1GHz OPP is to be made available only if the
 	SoC temperature is lower than a certain threshold. The SoC framework
 	implementation might choose to do something as follows:
 	 if (cur_temp < temp_low_thresh) {
 		/* Enable 1GHz if it was disabled */
 		rcu_read_lock();
-		opp = opp_find_freq_exact(dev, 1000000000, false);
+		opp = dev_pm_opp_find_freq_exact(dev, 1000000000, false);
 		rcu_read_unlock();
 		/* just error check */
 		if (!IS_ERR(opp))
-			ret = opp_enable(dev, 1000000000);
+			ret = dev_pm_opp_enable(dev, 1000000000);
 		else
 			goto try_something_else;
 	 }
 
-opp_disable - Make an OPP to be not available for operation
+dev_pm_opp_disable - Make an OPP to be not available for operation
 	Example: Lets say that 1GHz OPP is to be disabled if the temperature
 	exceeds a threshold value. The SoC framework implementation might
 	choose to do something as follows:
 	 if (cur_temp > temp_high_thresh) {
 		/* Disable 1GHz if it was enabled */
 		rcu_read_lock();
-		opp = opp_find_freq_exact(dev, 1000000000, true);
+		opp = dev_pm_opp_find_freq_exact(dev, 1000000000, true);
 		rcu_read_unlock();
 		/* just error check */
 		if (!IS_ERR(opp))
-			ret = opp_disable(dev, 1000000000);
+			ret = dev_pm_opp_disable(dev, 1000000000);
 		else
 			goto try_something_else;
 	 }
@@ -247,7 +247,7 @@
 retrieved using the search functions, the following functions can be used by SoC
 framework to retrieve the information represented inside the OPP layer.
 
-opp_get_voltage - Retrieve the voltage represented by the opp pointer.
+dev_pm_opp_get_voltage - Retrieve the voltage represented by the opp pointer.
 	Example: At a cpufreq transition to a different frequency, SoC
 	framework requires to set the voltage represented by the OPP using
 	the regulator framework to the Power Management chip providing the
@@ -256,15 +256,15 @@
 	 {
 		/* do things */
 		rcu_read_lock();
-		opp = opp_find_freq_ceil(dev, &freq);
-		v = opp_get_voltage(opp);
+		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+		v = dev_pm_opp_get_voltage(opp);
 		rcu_read_unlock();
 		if (v)
 			regulator_set_voltage(.., v);
 		/* do other things */
 	 }
 
-opp_get_freq - Retrieve the freq represented by the opp pointer.
+dev_pm_opp_get_freq - Retrieve the freq represented by the opp pointer.
 	Example: Lets say the SoC framework uses a couple of helper functions
 	we could pass opp pointers instead of doing additional parameters to
 	handle quiet a bit of data parameters.
@@ -273,8 +273,8 @@
 		/* do things.. */
 		 max_freq = ULONG_MAX;
 		 rcu_read_lock();
-		 max_opp = opp_find_freq_floor(dev,&max_freq);
-		 requested_opp = opp_find_freq_ceil(dev,&freq);
+		 max_opp = dev_pm_opp_find_freq_floor(dev,&max_freq);
+		 requested_opp = dev_pm_opp_find_freq_ceil(dev,&freq);
 		 if (!IS_ERR(max_opp) && !IS_ERR(requested_opp))
 			r = soc_test_validity(max_opp, requested_opp);
 		 rcu_read_unlock();
@@ -282,25 +282,25 @@
 	 }
 	 soc_test_validity(..)
 	 {
-		 if(opp_get_voltage(max_opp) < opp_get_voltage(requested_opp))
+		 if(dev_pm_opp_get_voltage(max_opp) < dev_pm_opp_get_voltage(requested_opp))
 			 return -EINVAL;
-		 if(opp_get_freq(max_opp) < opp_get_freq(requested_opp))
+		 if(dev_pm_opp_get_freq(max_opp) < dev_pm_opp_get_freq(requested_opp))
 			 return -EINVAL;
 		/* do things.. */
 	 }
 
-opp_get_opp_count - Retrieve the number of available opps for a device
+dev_pm_opp_get_opp_count - Retrieve the number of available opps for a device
 	Example: Lets say a co-processor in the SoC needs to know the available
 	frequencies in a table, the main processor can notify as following:
 	 soc_notify_coproc_available_frequencies()
 	 {
 		/* Do things */
 		rcu_read_lock();
-		num_available = opp_get_opp_count(dev);
+		num_available = dev_pm_opp_get_opp_count(dev);
 		speeds = kzalloc(sizeof(u32) * num_available, GFP_KERNEL);
 		/* populate the table in increasing order */
 		freq = 0;
-		while (!IS_ERR(opp = opp_find_freq_ceil(dev, &freq))) {
+		while (!IS_ERR(opp = dev_pm_opp_find_freq_ceil(dev, &freq))) {
 			speeds[i] = freq;
 			freq++;
 			i++;
@@ -313,7 +313,7 @@
 
 6. Cpufreq Table Generation
 ===========================
-opp_init_cpufreq_table - cpufreq framework typically is initialized with
+dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
 	cpufreq_frequency_table_cpuinfo which is provided with the list of
 	frequencies that are available for operation. This function provides
 	a ready to use conversion routine to translate the OPP layer's internal
@@ -326,7 +326,7 @@
 	 soc_pm_init()
 	 {
 		/* Do things */
-		r = opp_init_cpufreq_table(dev, &freq_table);
+		r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
 		if (!r)
 			cpufreq_frequency_table_cpuinfo(policy, freq_table);
 		/* Do other things */
@@ -336,7 +336,7 @@
 	addition to CONFIG_PM as power management feature is required to
 	dynamically scale voltage and frequency in a system.
 
-opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
+dev_pm_opp_free_cpufreq_table - Free up the table allocated by dev_pm_opp_init_cpufreq_table
 
 7. Data Structures
 ==================
@@ -358,16 +358,16 @@
 representing the actual OPPs and domains are internal to the OPP library itself
 to allow for suitable abstraction reusable across systems.
 
-struct opp - The internal data structure of OPP library which is used to
+struct dev_pm_opp - The internal data structure of OPP library which is used to
 	represent an OPP. In addition to the freq, voltage, availability
 	information, it also contains internal book keeping information required
 	for the OPP library to operate on.  Pointer to this structure is
 	provided back to the users such as SoC framework to be used as a
 	identifier for OPP in the interactions with OPP layer.
 
-	WARNING: The struct opp pointer should not be parsed or modified by the
-	users. The defaults of for an instance is populated by opp_add, but the
-	availability of the OPP can be modified by opp_enable/disable functions.
+	WARNING: The struct dev_pm_opp pointer should not be parsed or modified by the
+	users. The defaults of for an instance is populated by dev_pm_opp_add, but the
+	availability of the OPP can be modified by dev_pm_opp_enable/disable functions.
 
 struct device - This is used to identify a domain to the OPP layer. The
 	nature of the device and it's implementation is left to the user of
@@ -377,19 +377,19 @@
 following:
 
 Initialization / modification:
-            +-----+        /- opp_enable
-opp_add --> | opp | <-------
-  |         +-----+        \- opp_disable
+            +-----+        /- dev_pm_opp_enable
+dev_pm_opp_add --> | opp | <-------
+  |         +-----+        \- dev_pm_opp_disable
   \-------> domain_info(device)
 
 Search functions:
-             /-- opp_find_freq_ceil  ---\   +-----+
-domain_info<---- opp_find_freq_exact -----> | opp |
-             \-- opp_find_freq_floor ---/   +-----+
+             /-- dev_pm_opp_find_freq_ceil  ---\   +-----+
+domain_info<---- dev_pm_opp_find_freq_exact -----> | opp |
+             \-- dev_pm_opp_find_freq_floor ---/   +-----+
 
 Retrieval functions:
-+-----+     /- opp_get_voltage
++-----+     /- dev_pm_opp_get_voltage
 | opp | <---
-+-----+     \- opp_get_freq
++-----+     \- dev_pm_opp_get_freq
 
-domain_info <- opp_get_opp_count
+domain_info <- dev_pm_opp_get_opp_count
diff --git a/Documentation/power/powercap/powercap.txt b/Documentation/power/powercap/powercap.txt
new file mode 100644
index 0000000..1e6ef16
--- /dev/null
+++ b/Documentation/power/powercap/powercap.txt
@@ -0,0 +1,236 @@
+Power Capping Framework
+==================================
+
+The power capping framework provides a consistent interface between the kernel
+and the user space that allows power capping drivers to expose the settings to
+user space in a uniform way.
+
+Terminology
+=========================
+The framework exposes power capping devices to user space via sysfs in the
+form of a tree of objects. The objects at the root level of the tree represent
+'control types', which correspond to different methods of power capping.  For
+example, the intel-rapl control type represents the Intel "Running Average
+Power Limit" (RAPL) technology, whereas the 'idle-injection' control type
+corresponds to the use of idle injection for controlling power.
+
+Power zones represent different parts of the system, which can be controlled and
+monitored using the power capping method determined by the control type the
+given zone belongs to. They each contain attributes for monitoring power, as
+well as controls represented in the form of power constraints.  If the parts of
+the system represented by different power zones are hierarchical (that is, one
+bigger part consists of multiple smaller parts that each have their own power
+controls), those power zones may also be organized in a hierarchy with one
+parent power zone containing multiple subzones and so on to reflect the power
+control topology of the system.  In that case, it is possible to apply power
+capping to a set of devices together using the parent power zone and if more
+fine grained control is required, it can be applied through the subzones.
+
+
+Example sysfs interface tree:
+
+/sys/devices/virtual/powercap
+??? intel-rapl
+    ??? intel-rapl:0
+    ?   ??? constraint_0_name
+    ?   ??? constraint_0_power_limit_uw
+    ?   ??? constraint_0_time_window_us
+    ?   ??? constraint_1_name
+    ?   ??? constraint_1_power_limit_uw
+    ?   ??? constraint_1_time_window_us
+    ?   ??? device -> ../../intel-rapl
+    ?   ??? energy_uj
+    ?   ??? intel-rapl:0:0
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:0
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? intel-rapl:0:1
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:0
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? max_energy_range_uj
+    ?   ??? max_power_range_uw
+    ?   ??? name
+    ?   ??? enabled
+    ?   ??? power
+    ?   ?   ??? async
+    ?   ?   []
+    ?   ??? subsystem -> ../../../../../class/power_cap
+    ?   ??? enabled
+    ?   ??? uevent
+    ??? intel-rapl:1
+    ?   ??? constraint_0_name
+    ?   ??? constraint_0_power_limit_uw
+    ?   ??? constraint_0_time_window_us
+    ?   ??? constraint_1_name
+    ?   ??? constraint_1_power_limit_uw
+    ?   ??? constraint_1_time_window_us
+    ?   ??? device -> ../../intel-rapl
+    ?   ??? energy_uj
+    ?   ??? intel-rapl:1:0
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:1
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? intel-rapl:1:1
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:1
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? max_energy_range_uj
+    ?   ??? max_power_range_uw
+    ?   ??? name
+    ?   ??? enabled
+    ?   ??? power
+    ?   ?   ??? async
+    ?   ?   []
+    ?   ??? subsystem -> ../../../../../class/power_cap
+    ?   ??? uevent
+    ??? power
+    ?   ??? async
+    ?   []
+    ??? subsystem -> ../../../../class/power_cap
+    ??? enabled
+    ??? uevent
+
+The above example illustrates a case in which the Intel RAPL technology,
+available in Intel® IA-64 and IA-32 Processor Architectures, is used. There is one
+control type called intel-rapl which contains two power zones, intel-rapl:0 and
+intel-rapl:1, representing CPU packages.  Each of these power zones contains
+two subzones, intel-rapl:j:0 and intel-rapl:j:1 (j = 0, 1), representing the
+"core" and the "uncore" parts of the given CPU package, respectively.  All of
+the zones and subzones contain energy monitoring attributes (energy_uj,
+max_energy_range_uj) and constraint attributes (constraint_*) allowing controls
+to be applied (the constraints in the 'package' power zones apply to the whole
+CPU packages and the subzone constraints only apply to the respective parts of
+the given package individually). Since Intel RAPL doesn't provide instantaneous
+power value, there is no power_uw attribute.
+
+In addition to that, each power zone contains a name attribute, allowing the
+part of the system represented by that zone to be identified.
+For example:
+
+cat /sys/class/power_cap/intel-rapl/intel-rapl:0/name
+package-0
+
+The Intel RAPL technology allows two constraints, short term and long term,
+with two different time windows to be applied to each power zone.  Thus for
+each zone there are 2 attributes representing the constraint names, 2 power
+limits and 2 attributes representing the sizes of the time windows. Such that,
+constraint_j_* attributes correspond to the jth constraint (j = 0,1).
+
+For example:
+	constraint_0_name
+	constraint_0_power_limit_uw
+	constraint_0_time_window_us
+	constraint_1_name
+	constraint_1_power_limit_uw
+	constraint_1_time_window_us
+
+Power Zone Attributes
+=================================
+Monitoring attributes
+----------------------
+
+energy_uj (rw): Current energy counter in micro joules. Write "0" to reset.
+If the counter can not be reset, then this attribute is read only.
+
+max_energy_range_uj (ro): Range of the above energy counter in micro-joules.
+
+power_uw (ro): Current power in micro watts.
+
+max_power_range_uw (ro): Range of the above power value in micro-watts.
+
+name (ro): Name of this power zone.
+
+It is possible that some domains have both power ranges and energy counter ranges;
+however, only one is mandatory.
+
+Constraints
+----------------
+constraint_X_power_limit_uw (rw): Power limit in micro watts, which should be
+applicable for the time window specified by "constraint_X_time_window_us".
+
+constraint_X_time_window_us (rw): Time window in micro seconds.
+
+constraint_X_name (ro): An optional name of the constraint
+
+constraint_X_max_power_uw(ro): Maximum allowed power in micro watts.
+
+constraint_X_min_power_uw(ro): Minimum allowed power in micro watts.
+
+constraint_X_max_time_window_us(ro): Maximum allowed time window in micro seconds.
+
+constraint_X_min_time_window_us(ro): Minimum allowed time window in micro seconds.
+
+Except power_limit_uw and time_window_us other fields are optional.
+
+Common zone and control type attributes
+----------------------------------------
+enabled (rw): Enable/Disable controls at zone level or for all zones using
+a control type.
+
+Power Cap Client Driver Interface
+==================================
+The API summary:
+
+Call powercap_register_control_type() to register control type object.
+Call powercap_register_zone() to register a power zone (under a given
+control type), either as a top-level power zone or as a subzone of another
+power zone registered earlier.
+The number of constraints in a power zone and the corresponding callbacks have
+to be defined prior to calling powercap_register_zone() to register that zone.
+
+To Free a power zone call powercap_unregister_zone().
+To free a control type object call powercap_unregister_control_type().
+Detailed API can be generated using kernel-doc on include/linux/powercap.h.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 71d8fe4..0f54333 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -145,11 +145,13 @@
 if the device can be suspended (i.e. if all of the conditions necessary for
 suspending the device are satisfied) and to queue up a suspend request for the
 device in that case.  If there is no idle callback, or if the callback returns
-0, then the PM core will attempt to carry out a runtime suspend of the device;
-in essence, it will call pm_runtime_suspend() directly.  To prevent this (for
-example, if the callback routine has started a delayed suspend), the routine
-should return a non-zero value.  Negative error return codes are ignored by the
-PM core.
+0, then the PM core will attempt to carry out a runtime suspend of the device,
+also respecting devices configured for autosuspend.  In essence this means a
+call to pm_runtime_autosuspend() (do note that drivers needs to update the
+device last busy mark, pm_runtime_mark_last_busy(), to control the delay under
+this circumstance).  To prevent this (for example, if the callback routine has
+started a delayed suspend), the routine must return a non-zero value.  Negative
+error return codes are ignored by the PM core.
 
 The helper functions provided by the PM core, described in Section 4, guarantee
 that the following constraints are met with respect to runtime PM callbacks for
@@ -308,7 +310,7 @@
     - execute the subsystem-level idle callback for the device; returns an
       error code on failure, where -EINPROGRESS means that ->runtime_idle() is
       already being executed; if there is no callback or the callback returns 0
-      then run pm_runtime_suspend(dev) and return its result
+      then run pm_runtime_autosuspend(dev) and return its result
 
   int pm_runtime_suspend(struct device *dev);
     - execute the subsystem-level suspend callback for the device; returns 0 on
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index f59ded0..a74d0a8 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -100,6 +100,11 @@
 	return (long) (ppb * 65.536);
 }
 
+static int64_t pctns(struct ptp_clock_time *t)
+{
+	return t->sec * 1000000000LL + t->nsec;
+}
+
 static void usage(char *progname)
 {
 	fprintf(stderr,
@@ -112,6 +117,8 @@
 		" -f val     adjust the ptp clock frequency by 'val' ppb\n"
 		" -g         get the ptp clock time\n"
 		" -h         prints this message\n"
+		" -k val     measure the time offset between system and phc clock\n"
+		"            for 'val' times (Maximum 25)\n"
 		" -p val     enable output with a period of 'val' nanoseconds\n"
 		" -P val     enable or disable (val=1|0) the system clock PPS\n"
 		" -s         set the ptp clock time from the system time\n"
@@ -133,8 +140,12 @@
 	struct itimerspec timeout;
 	struct sigevent sigevent;
 
+	struct ptp_clock_time *pct;
+	struct ptp_sys_offset *sysoff;
+
+
 	char *progname;
-	int c, cnt, fd;
+	int i, c, cnt, fd;
 
 	char *device = DEVICE;
 	clockid_t clkid;
@@ -144,14 +155,19 @@
 	int extts = 0;
 	int gettime = 0;
 	int oneshot = 0;
+	int pct_offset = 0;
+	int n_samples = 0;
 	int periodic = 0;
 	int perout = -1;
 	int pps = -1;
 	int settime = 0;
 
+	int64_t t1, t2, tp;
+	int64_t interval, offset;
+
 	progname = strrchr(argv[0], '/');
 	progname = progname ? 1+progname : argv[0];
-	while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghp:P:sSt:v"))) {
+	while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) {
 		switch (c) {
 		case 'a':
 			oneshot = atoi(optarg);
@@ -174,6 +190,10 @@
 		case 'g':
 			gettime = 1;
 			break;
+		case 'k':
+			pct_offset = 1;
+			n_samples = atoi(optarg);
+			break;
 		case 'p':
 			perout = atoi(optarg);
 			break;
@@ -376,6 +396,47 @@
 		}
 	}
 
+	if (pct_offset) {
+		if (n_samples <= 0 || n_samples > 25) {
+			puts("n_samples should be between 1 and 25");
+			usage(progname);
+			return -1;
+		}
+
+		sysoff = calloc(1, sizeof(*sysoff));
+		if (!sysoff) {
+			perror("calloc");
+			return -1;
+		}
+		sysoff->n_samples = n_samples;
+
+		if (ioctl(fd, PTP_SYS_OFFSET, sysoff))
+			perror("PTP_SYS_OFFSET");
+		else
+			puts("system and phc clock time offset request okay");
+
+		pct = &sysoff->ts[0];
+		for (i = 0; i < sysoff->n_samples; i++) {
+			t1 = pctns(pct+2*i);
+			tp = pctns(pct+2*i+1);
+			t2 = pctns(pct+2*i+2);
+			interval = t2 - t1;
+			offset = (t2 + t1) / 2 - tp;
+
+			printf("system time: %ld.%ld\n",
+				(pct+2*i)->sec, (pct+2*i)->nsec);
+			printf("phc    time: %ld.%ld\n",
+				(pct+2*i+1)->sec, (pct+2*i+1)->nsec);
+			printf("system time: %ld.%ld\n",
+				(pct+2*i+2)->sec, (pct+2*i+2)->nsec);
+			printf("system/phc clock time offset is %ld ns\n"
+				"system     clock time delay  is %ld ns\n",
+				offset, interval);
+		}
+
+		free(sysoff);
+	}
+
 	close(fd);
 	return 0;
 }
diff --git a/Documentation/pwm.txt b/Documentation/pwm.txt
index 1039b68..93cb979 100644
--- a/Documentation/pwm.txt
+++ b/Documentation/pwm.txt
@@ -39,7 +39,7 @@
 device or a consumer name. pwm_put() is used to free the PWM device. Managed
 variants of these functions, devm_pwm_get() and devm_pwm_put(), also exist.
 
-After being requested a PWM has to be configured using:
+After being requested, a PWM has to be configured using:
 
 int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
 
@@ -94,7 +94,7 @@
 A new PWM controller/chip can be added using pwmchip_add() and removed
 again with pwmchip_remove(). pwmchip_add() takes a filled in struct
 pwm_chip as argument which provides a description of the PWM chip, the
-number of PWM devices provider by the chip and the chip-specific
+number of PWM devices provided by the chip and the chip-specific
 implementation of the supported PWM operations to the framework.
 
 Locking
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 4273b2d..26b7ee4 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -290,13 +290,24 @@
 kptr_restrict:
 
 This toggle indicates whether restrictions are placed on
-exposing kernel addresses via /proc and other interfaces.  When
-kptr_restrict is set to (0), there are no restrictions.  When
-kptr_restrict is set to (1), the default, kernel pointers
-printed using the %pK format specifier will be replaced with 0's
-unless the user has CAP_SYSLOG.  When kptr_restrict is set to
-(2), kernel pointers printed using %pK will be replaced with 0's
-regardless of privileges.
+exposing kernel addresses via /proc and other interfaces.
+
+When kptr_restrict is set to (0), the default, there are no restrictions.
+
+When kptr_restrict is set to (1), kernel pointers printed using the %pK
+format specifier will be replaced with 0's unless the user has CAP_SYSLOG
+and effective user and group ids are equal to the real ids. This is
+because %pK checks are done at read() time rather than open() time, so
+if permissions are elevated between the open() and the read() (e.g via
+a setuid binary) then %pK will not leak kernel pointers to unprivileged
+users. Note, this is a temporary solution only. The correct long-term
+solution is to do the permission checks at open() time. Consider removing
+world read permissions from files that use %pK, and using dmesg_restrict
+to protect against uses of %pK in dmesg(8) if leaking kernel pointer
+values to unprivileged users is a concern.
+
+When kptr_restrict is set to (2), kernel pointers printed using
+%pK will be replaced with 0's regardless of privileges.
 
 ==============================================================
 
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 79a797e..1fbd4eb 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -119,8 +119,11 @@
 
 dirty_background_ratio
 
-Contains, as a percentage of total system memory, the number of pages at which
-the background kernel flusher threads will start writing out dirty data.
+Contains, as a percentage of total available memory that contains free pages
+and reclaimable pages, the number of pages at which the background kernel
+flusher threads will start writing out dirty data.
+
+The total avaiable memory is not equal to total system memory.
 
 ==============================================================
 
@@ -151,9 +154,11 @@
 
 dirty_ratio
 
-Contains, as a percentage of total system memory, the number of pages at which
-a process which is generating disk writes will itself start writing out dirty
-data.
+Contains, as a percentage of total available memory that contains free pages
+and reclaimable pages, the number of pages at which a process which is
+generating disk writes will itself start writing out dirty data.
+
+The total avaiable memory is not equal to total system memory.
 
 ==============================================================
 
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index a9248da..ef2ccbf 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -8,5 +8,9 @@
 	- sample hpet timer test program
 hrtimers.txt
 	- subsystem for high-resolution kernel timers
+NO_HZ.txt
+	- Summary of the different methods for the scheduler clock-interrupts management.
+timers-howto.txt
+	- how to insert delays in the kernel the right (tm) way.
 timer_stats.txt
 	- timer usage statistics
diff --git a/Documentation/trace/tracepoints.txt b/Documentation/trace/tracepoints.txt
index ac4170d..6b018b5 100644
--- a/Documentation/trace/tracepoints.txt
+++ b/Documentation/trace/tracepoints.txt
@@ -114,3 +114,8 @@
 If the tracepoint has to be used in kernel modules, an
 EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be
 used to export the defined tracepoints.
+
+Note: The convenience macro TRACE_EVENT provides an alternative way to
+      define tracepoints. Check http://lwn.net/Articles/379903,
+      http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362
+      for a series of articles with more details.
diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt
index 8ec2a67..4cf53e4 100644
--- a/Documentation/usb/gadget_configfs.txt
+++ b/Documentation/usb/gadget_configfs.txt
@@ -26,7 +26,7 @@
 Creating a gadget means deciding what configurations there will be
 and which functions each configuration will provide.
 
-Configfs (please see Documentation/filesystems/configfs/*) lends itslef nicely
+Configfs (please see Documentation/filesystems/configfs/*) lends itself nicely
 for the purpose of telling the kernel about the above mentioned decision.
 This document is about how to do it.
 
@@ -99,7 +99,7 @@
 $ mkdir configs/<name>.<number>
 
 where <name> can be any string which is legal in a filesystem and the
-<numebr> is the configuration's number, e.g.:
+<number> is the configuration's number, e.g.:
 
 $ mkdir configs/c.1
 
@@ -327,7 +327,7 @@
 two functions to decide what they actually do.
 
 typedef struct configured_structure cs;
-typedef struc specific_attribute sa;
+typedef struct specific_attribute sa;
 
                                        sa
                        +----------------------------------+
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
new file mode 100644
index 0000000..641ec92
--- /dev/null
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -0,0 +1,24 @@
+00-INDEX
+	- this file.
+api.txt
+	- KVM userspace API.
+cpuid.txt
+	- KVM-specific cpuid leaves (x86).
+devices/
+	- KVM_CAP_DEVICE_CTRL userspace API.
+hypercalls.txt
+	- KVM hypercalls.
+locking.txt
+	- notes on KVM locks.
+mmu.txt
+	- the x86 kvm shadow mmu.
+msr.txt
+	- KVM-specific MSRs (x86).
+nested-vmx.txt
+	- notes on nested virtualization for Intel x86 processors.
+ppc-pv.txt
+	- the paravirtualization interface on PowerPC.
+review-checklist.txt
+	- review checklist for KVM patches.
+timekeeping.txt
+	- timekeeping virtualization for x86-based architectures.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 858aecf..a30035d 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1122,9 +1122,9 @@
 	struct kvm_cpuid_entry2 entries[0];
 };
 
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
-#define KVM_CPUID_FLAG_STATEFUL_FUNC    2
-#define KVM_CPUID_FLAG_STATE_READ_NEXT  4
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX		BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC		BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT		BIT(2)
 
 struct kvm_cpuid_entry2 {
 	__u32 function;
@@ -1810,6 +1810,50 @@
   PPC   | KVM_REG_PPC_TLB3PS	| 32
   PPC   | KVM_REG_PPC_EPTCFG	| 32
   PPC   | KVM_REG_PPC_ICP_STATE | 64
+  PPC   | KVM_REG_PPC_TB_OFFSET	| 64
+  PPC   | KVM_REG_PPC_SPMC1	| 32
+  PPC   | KVM_REG_PPC_SPMC2	| 32
+  PPC   | KVM_REG_PPC_IAMR	| 64
+  PPC   | KVM_REG_PPC_TFHAR	| 64
+  PPC   | KVM_REG_PPC_TFIAR	| 64
+  PPC   | KVM_REG_PPC_TEXASR	| 64
+  PPC   | KVM_REG_PPC_FSCR	| 64
+  PPC   | KVM_REG_PPC_PSPB	| 32
+  PPC   | KVM_REG_PPC_EBBHR	| 64
+  PPC   | KVM_REG_PPC_EBBRR	| 64
+  PPC   | KVM_REG_PPC_BESCR	| 64
+  PPC   | KVM_REG_PPC_TAR	| 64
+  PPC   | KVM_REG_PPC_DPDES	| 64
+  PPC   | KVM_REG_PPC_DAWR	| 64
+  PPC   | KVM_REG_PPC_DAWRX	| 64
+  PPC   | KVM_REG_PPC_CIABR	| 64
+  PPC   | KVM_REG_PPC_IC	| 64
+  PPC   | KVM_REG_PPC_VTB	| 64
+  PPC   | KVM_REG_PPC_CSIGR	| 64
+  PPC   | KVM_REG_PPC_TACR	| 64
+  PPC   | KVM_REG_PPC_TCSCR	| 64
+  PPC   | KVM_REG_PPC_PID	| 64
+  PPC   | KVM_REG_PPC_ACOP	| 64
+  PPC   | KVM_REG_PPC_VRSAVE	| 32
+  PPC   | KVM_REG_PPC_LPCR	| 64
+  PPC   | KVM_REG_PPC_PPR	| 64
+  PPC   | KVM_REG_PPC_ARCH_COMPAT 32
+  PPC   | KVM_REG_PPC_TM_GPR0	| 64
+          ...
+  PPC   | KVM_REG_PPC_TM_GPR31	| 64
+  PPC   | KVM_REG_PPC_TM_VSR0	| 128
+          ...
+  PPC   | KVM_REG_PPC_TM_VSR63	| 128
+  PPC   | KVM_REG_PPC_TM_CR	| 64
+  PPC   | KVM_REG_PPC_TM_LR	| 64
+  PPC   | KVM_REG_PPC_TM_CTR	| 64
+  PPC   | KVM_REG_PPC_TM_FPSCR	| 64
+  PPC   | KVM_REG_PPC_TM_AMR	| 64
+  PPC   | KVM_REG_PPC_TM_PPR	| 64
+  PPC   | KVM_REG_PPC_TM_VRSAVE	| 64
+  PPC   | KVM_REG_PPC_TM_VSCR	| 32
+  PPC   | KVM_REG_PPC_TM_DSCR	| 64
+  PPC   | KVM_REG_PPC_TM_TAR	| 64
 
 ARM registers are mapped using the lower 32 bits.  The upper 16 of that
 is the register group type, or coprocessor number:
@@ -2304,7 +2348,31 @@
 	  Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
 
 
-4.83 KVM_GET_REG_LIST
+4.83 KVM_ARM_PREFERRED_TARGET
+
+Capability: basic
+Architectures: arm, arm64
+Type: vm ioctl
+Parameters: struct struct kvm_vcpu_init (out)
+Returns: 0 on success; -1 on error
+Errors:
+  ENODEV:    no preferred target available for the host
+
+This queries KVM for preferred CPU target type which can be emulated
+by KVM on underlying host.
+
+The ioctl returns struct kvm_vcpu_init instance containing information
+about preferred CPU target type and recommended features for it.  The
+kvm_vcpu_init->features bitmap returned will have feature bits set if
+the preferred target recommends setting these features, but this is
+not mandatory.
+
+The information returned by this ioctl can be used to prepare an instance
+of struct kvm_vcpu_init for KVM_ARM_VCPU_INIT ioctl which will result in
+in VCPU matching underlying host.
+
+
+4.84 KVM_GET_REG_LIST
 
 Capability: basic
 Architectures: arm, arm64
@@ -2323,8 +2391,7 @@
 This ioctl returns the guest registers that are supported for the
 KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
 
-
-4.84 KVM_ARM_SET_DEVICE_ADDR
+4.85 KVM_ARM_SET_DEVICE_ADDR
 
 Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
 Architectures: arm, arm64
@@ -2362,7 +2429,7 @@
 KVM_RUN on any of the VCPUs.  Calling this ioctl twice for any of the
 base addresses will return -EEXIST.
 
-4.85 KVM_PPC_RTAS_DEFINE_TOKEN
+4.86 KVM_PPC_RTAS_DEFINE_TOKEN
 
 Capability: KVM_CAP_PPC_RTAS
 Architectures: ppc
@@ -2661,6 +2728,77 @@
 };
 
 
+4.81 KVM_GET_EMULATED_CPUID
+
+Capability: KVM_CAP_EXT_EMUL_CPUID
+Architectures: x86
+Type: system ioctl
+Parameters: struct kvm_cpuid2 (in/out)
+Returns: 0 on success, -1 on error
+
+struct kvm_cpuid2 {
+	__u32 nent;
+	__u32 flags;
+	struct kvm_cpuid_entry2 entries[0];
+};
+
+The member 'flags' is used for passing flags from userspace.
+
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX		BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC		BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT		BIT(2)
+
+struct kvm_cpuid_entry2 {
+	__u32 function;
+	__u32 index;
+	__u32 flags;
+	__u32 eax;
+	__u32 ebx;
+	__u32 ecx;
+	__u32 edx;
+	__u32 padding[3];
+};
+
+This ioctl returns x86 cpuid features which are emulated by
+kvm.Userspace can use the information returned by this ioctl to query
+which features are emulated by kvm instead of being present natively.
+
+Userspace invokes KVM_GET_EMULATED_CPUID by passing a kvm_cpuid2
+structure with the 'nent' field indicating the number of entries in
+the variable-size array 'entries'. If the number of entries is too low
+to describe the cpu capabilities, an error (E2BIG) is returned. If the
+number is too high, the 'nent' field is adjusted and an error (ENOMEM)
+is returned. If the number is just right, the 'nent' field is adjusted
+to the number of valid entries in the 'entries' array, which is then
+filled.
+
+The entries returned are the set CPUID bits of the respective features
+which kvm emulates, as returned by the CPUID instruction, with unknown
+or unsupported feature bits cleared.
+
+Features like x2apic, for example, may not be present in the host cpu
+but are exposed by kvm in KVM_GET_SUPPORTED_CPUID because they can be
+emulated efficiently and thus not included here.
+
+The fields in each entry are defined as follows:
+
+  function: the eax value used to obtain the entry
+  index: the ecx value used to obtain the entry (for entries that are
+         affected by ecx)
+  flags: an OR of zero or more of the following:
+        KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
+           if the index field is valid
+        KVM_CPUID_FLAG_STATEFUL_FUNC:
+           if cpuid for this function returns different values for successive
+           invocations; there will be several entries with the same function,
+           all with this flag set
+        KVM_CPUID_FLAG_STATE_READ_NEXT:
+           for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
+           the first entry to be read by a cpu
+   eax, ebx, ecx, edx: the values returned by the cpuid instruction for
+         this function/index combination
+
+
 6. Capabilities that can be enabled
 -----------------------------------
 
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index 22ff659..3c65feb 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -43,6 +43,13 @@
 KVM_FEATURE_ASYNC_PF               ||     4 || async pf can be enabled by
                                    ||       || writing to msr 0x4b564d02
 ------------------------------------------------------------------------------
+KVM_FEATURE_STEAL_TIME             ||     5 || steal time can be enabled by
+                                   ||       || writing to msr 0x4b564d03.
+------------------------------------------------------------------------------
+KVM_FEATURE_PV_EOI                 ||     6 || paravirtualized end of interrupt
+                                   ||       || handler can be enabled by writing
+                                   ||       || to msr 0x4b564d04.
+------------------------------------------------------------------------------
 KVM_FEATURE_PV_UNHALT              ||     7 || guest checks this feature bit
                                    ||       || before enabling paravirtualized
                                    ||       || spinlock support.
diff --git a/Documentation/virtual/kvm/devices/vfio.txt b/Documentation/virtual/kvm/devices/vfio.txt
new file mode 100644
index 0000000..ef51740
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/vfio.txt
@@ -0,0 +1,22 @@
+VFIO virtual device
+===================
+
+Device types supported:
+  KVM_DEV_TYPE_VFIO
+
+Only one VFIO instance may be created per VM.  The created device
+tracks VFIO groups in use by the VM and features of those groups
+important to the correctness and acceleration of the VM.  As groups
+are enabled and disabled for use by the VM, KVM should be updated
+about their presence.  When registered with KVM, a reference to the
+VFIO-group is held by KVM.
+
+Groups:
+  KVM_DEV_VFIO_GROUP
+
+KVM_DEV_VFIO_GROUP attributes:
+  KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking
+  KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking
+
+For each, kvm_device_attr.addr points to an int32_t file descriptor
+for the VFIO group.
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index 41b7ac9..f886941 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -132,10 +132,14 @@
 ------------
 
 Name:		kvm_lock
-Type:		raw_spinlock
+Type:		spinlock_t
 Arch:		any
 Protects:	- vm_list
-		- hardware virtualization enable/disable
+
+Name:		kvm_count_lock
+Type:		raw_spinlock_t
+Arch:		any
+Protects:	- hardware virtualization enable/disable
 Comment:	'raw' because hardware enabling/disabling must be atomic /wrt
 		migration.
 
@@ -151,3 +155,14 @@
 Arch:		any
 Protects:	-shadow page/shadow tlb entry
 Comment:	it is a spinlock since it is used in mmu notifier.
+
+Name:		kvm->srcu
+Type:		srcu lock
+Arch:		any
+Protects:	- kvm->memslots
+		- kvm->buses
+Comment:	The srcu read lock must be held while accessing memslots (e.g.
+		when using gfn_to_* functions) and while accessing in-kernel
+		MMIO/PIO address->device structure mapping (kvm->buses).
+		The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
+		if it is needed by multiple functions.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index 5481c8b..a39d066 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -4,10 +4,12 @@
 	- An explanation from Linus about tsk->active_mm vs tsk->mm.
 balance
 	- various information on memory balancing.
-hugepage-mmap.c
-	- Example app using huge page memory with the mmap system call.
-hugepage-shm.c
-	- Example app using huge page memory with Sys V shared memory system calls.
+cleancache.txt
+	- Intro to cleancache and page-granularity victim cache.
+frontswap.txt
+	- Outline frontswap, part of the transcendent memory frontend.
+highmem.txt
+	- Outline of highmem and common issues.
 hugetlbpage.txt
 	- a brief summary of hugetlbpage support in the Linux kernel.
 hwpoison.txt
@@ -16,21 +18,23 @@
 	- how to use the Kernel Samepage Merging feature.
 locking
 	- info on how locking and synchronization is done in the Linux vm code.
-map_hugetlb.c
-	- an example program that uses the MAP_HUGETLB mmap flag.
 numa
 	- information about NUMA specific code in the Linux vm.
 numa_memory_policy.txt
 	- documentation of concepts and APIs of the 2.6 memory policy support.
 overcommit-accounting
 	- description of the Linux kernels overcommit handling modes.
-page-types.c
-	- Tool for querying page flags
 page_migration
 	- description of page migration in NUMA systems.
 pagemap.txt
 	- pagemap, from the userspace perspective
 slub.txt
 	- a short users guide for SLUB.
+soft-dirty.txt
+	- short explanation for soft-dirty PTEs
+transhuge.txt
+	- Transparent Hugepage Support, alternative way of using hugepages.
 unevictable-lru.txt
 	- Unevictable LRU infrastructure
+zswap.txt
+	- Intro to compressed cache for swap pages
diff --git a/Documentation/vm/split_page_table_lock b/Documentation/vm/split_page_table_lock
new file mode 100644
index 0000000..7521d36
--- /dev/null
+++ b/Documentation/vm/split_page_table_lock
@@ -0,0 +1,94 @@
+Split page table lock
+=====================
+
+Originally, mm->page_table_lock spinlock protected all page tables of the
+mm_struct. But this approach leads to poor page fault scalability of
+multi-threaded applications due high contention on the lock. To improve
+scalability, split page table lock was introduced.
+
+With split page table lock we have separate per-table lock to serialize
+access to the table. At the moment we use split lock for PTE and PMD
+tables. Access to higher level tables protected by mm->page_table_lock.
+
+There are helpers to lock/unlock a table and other accessor functions:
+ - pte_offset_map_lock()
+	maps pte and takes PTE table lock, returns pointer to the taken
+	lock;
+ - pte_unmap_unlock()
+	unlocks and unmaps PTE table;
+ - pte_alloc_map_lock()
+	allocates PTE table if needed and take the lock, returns pointer
+	to taken lock or NULL if allocation failed;
+ - pte_lockptr()
+	returns pointer to PTE table lock;
+ - pmd_lock()
+	takes PMD table lock, returns pointer to taken lock;
+ - pmd_lockptr()
+	returns pointer to PMD table lock;
+
+Split page table lock for PTE tables is enabled compile-time if
+CONFIG_SPLIT_PTLOCK_CPUS (usually 4) is less or equal to NR_CPUS.
+If split lock is disabled, all tables guaded by mm->page_table_lock.
+
+Split page table lock for PMD tables is enabled, if it's enabled for PTE
+tables and the architecture supports it (see below).
+
+Hugetlb and split page table lock
+---------------------------------
+
+Hugetlb can support several page sizes. We use split lock only for PMD
+level, but not for PUD.
+
+Hugetlb-specific helpers:
+ - huge_pte_lock()
+	takes pmd split lock for PMD_SIZE page, mm->page_table_lock
+	otherwise;
+ - huge_pte_lockptr()
+	returns pointer to table lock;
+
+Support of split page table lock by an architecture
+---------------------------------------------------
+
+There's no need in special enabling of PTE split page table lock:
+everything required is done by pgtable_page_ctor() and pgtable_page_dtor(),
+which must be called on PTE table allocation / freeing.
+
+Make sure the architecture doesn't use slab allocator for page table
+allocation: slab uses page->slab_cache and page->first_page for its pages.
+These fields share storage with page->ptl.
+
+PMD split lock only makes sense if you have more than two page table
+levels.
+
+PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table
+allocation and pgtable_pmd_page_dtor() on freeing.
+
+Allocation usually happens in pmd_alloc_one(), freeing in pmd_free(), but
+make sure you cover all PMD table allocation / freeing paths: i.e X86_PAE
+preallocate few PMDs on pgd_alloc().
+
+With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
+
+NOTE: pgtable_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
+be handled properly.
+
+page->ptl
+---------
+
+page->ptl is used to access split page table lock, where 'page' is struct
+page of page containing the table. It shares storage with page->private
+(and few other fields in union).
+
+To avoid increasing size of struct page and have best performance, we use a
+trick:
+ - if spinlock_t fits into long, we use page->ptr as spinlock, so we
+   can avoid indirect access and save a cache line.
+ - if size of spinlock_t is bigger then size of long, we use page->ptl as
+   pointer to spinlock_t and allocate it dynamically. This allows to use
+   split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs
+   one more cache line for indirect access;
+
+The spinlock_t allocated in pgtable_page_ctor() for PTE table and in
+pgtable_pmd_page_ctor() for PMD table.
+
+Please, never access page->ptl directly -- use appropriate helper.
diff --git a/Documentation/vm/zswap.txt b/Documentation/vm/zswap.txt
index 7e492d8..00c3d31 100644
--- a/Documentation/vm/zswap.txt
+++ b/Documentation/vm/zswap.txt
@@ -8,7 +8,7 @@
 faster than reads from a swap device.
 
 NOTE: Zswap is a new feature as of v3.11 and interacts heavily with memory
-reclaim.  This interaction has not be fully explored on the large set of
+reclaim.  This interaction has not been fully explored on the large set of
 potential configurations and workloads that exist.  For this reason, zswap
 is a work in progress and should be considered experimental.
 
@@ -23,7 +23,7 @@
     drastically reducing life-shortening writes.
 
 Zswap evicts pages from compressed cache on an LRU basis to the backing swap
-device when the compressed pool reaches it size limit.  This requirement had
+device when the compressed pool reaches its size limit.  This requirement had
 been identified in prior community discussions.
 
 To enabled zswap, the "enabled" attribute must be set to 1 at boot time.  e.g.
@@ -37,7 +37,7 @@
 
 Zswap makes use of zbud for the managing the compressed memory pool.  Each
 allocation in zbud is not directly accessible by address.  Rather, a handle is
-return by the allocation routine and that handle must be mapped before being
+returned by the allocation routine and that handle must be mapped before being
 accessed.  The compressed memory pool grows on demand and shrinks as compressed
 pages are freed.  The pool is not preallocated.
 
@@ -56,7 +56,7 @@
 via frontswap, to free the compressed entry.
 
 Zswap seeks to be simple in its policies.  Sysfs attributes allow for one user
-controlled policies:
+controlled policy:
 * max_pool_percent - The maximum percentage of memory that the compressed
     pool can occupy.
 
diff --git a/MAINTAINERS b/MAINTAINERS
index a30f783..678f074 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -253,6 +253,20 @@
 F:	drivers/pci/*/*acpi*
 F:	drivers/pci/*/*/*acpi*
 
+ACPI COMPONENT ARCHITECTURE (ACPICA)
+M:	Robert Moore <robert.moore@intel.com>
+M:	Lv Zheng <lv.zheng@intel.com>
+M:	Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+L:	linux-acpi@vger.kernel.org
+L:	devel@acpica.org
+W:	https://acpica.org/
+W:	https://github.com/acpica/acpica/
+Q:	https://patchwork.kernel.org/project/linux-acpi/list/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+S:	Supported
+F:	drivers/acpi/acpica/
+F:	include/acpi/
+
 ACPI FAN DRIVER
 M:	Zhang Rui <rui.zhang@intel.com>
 L:	linux-acpi@vger.kernel.org
@@ -1660,16 +1674,15 @@
 F:	drivers/net/wireless/b43legacy/
 
 BACKLIGHT CLASS/SUBSYSTEM
-M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Jingoo Han <jg1.han@samsung.com>
 S:	Maintained
 F:	drivers/video/backlight/
 F:	include/linux/backlight.h
 
 BATMAN ADVANCED
-M:	Marek Lindner <lindner_marek@yahoo.de>
-M:	Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-M:	Antonio Quartulli <ordex@autistici.org>
+M:	Marek Lindner <mareklindner@neomailbox.ch>
+M:	Simon Wunderlich <sw@simonwunderlich.de>
+M:	Antonio Quartulli <antonio@meshcoding.com>
 L:	b.a.t.m.a.n@lists.open-mesh.org
 W:	http://www.open-mesh.org/
 S:	Maintained
@@ -1822,7 +1835,7 @@
 F:	drivers/net/ethernet/broadcom/bnx2_*
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M:	Eilon Greenstein <eilong@broadcom.com>
+M:	Ariel Elior <ariele@broadcom.com>
 L:	netdev@vger.kernel.org
 S:	Supported
 F:	drivers/net/ethernet/broadcom/bnx2x/
@@ -1867,7 +1880,7 @@
 F:	drivers/net/wireless/brcm80211/
 
 BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
-M:	Bhanu Prakash Gollapudi <bprakash@broadcom.com>
+M:	Eddie Wai <eddie.wai@broadcom.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/bnx2fc/
@@ -2372,7 +2385,7 @@
 
 CRAMFS FILESYSTEM
 W:	http://sourceforge.net/projects/cramfs/
-S:	Orphan
+S:	Orphan / Obsolete
 F:	Documentation/filesystems/cramfs.txt
 F:	fs/cramfs/
 
@@ -2647,6 +2660,7 @@
 L:	dm-devel@redhat.com
 W:	http://sources.redhat.com/dm
 Q:	http://patchwork.kernel.org/project/dm-devel/list/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
 T:	quilt http://people.redhat.com/agk/patches/linux/editing/
 S:	Maintained
 F:	Documentation/device-mapper/
@@ -2834,7 +2848,9 @@
 L:	linux-tegra@vger.kernel.org
 T:	git git://anongit.freedesktop.org/tegra/linux.git
 S:	Supported
+F:	drivers/gpu/drm/tegra/
 F:	drivers/gpu/host1x/
+F:	include/linux/host1x.h
 F:	include/uapi/drm/tegra_drm.h
 F:	Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
 
@@ -4791,9 +4807,10 @@
 F:	drivers/staging/ktap/
 
 KCONFIG
-M:	Michal Marek <mmarek@suse.cz>
+M:	"Yann E. MORIN" <yann.morin.1998@free.fr>
 L:	linux-kbuild@vger.kernel.org
-S:	Odd Fixes
+T:	git://gitorious.org/linux-kconfig/linux-kconfig
+S:	Maintained
 F:	Documentation/kbuild/kconfig-language.txt
 F:	scripts/kconfig/
 
@@ -4856,7 +4873,8 @@
 M:	Gleb Natapov <gleb@redhat.com>
 M:	Paolo Bonzini <pbonzini@redhat.com>
 L:	kvm@vger.kernel.org
-W:	http://linux-kvm.org
+W:	http://www.linux-kvm.org
+T:	git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
 S:	Supported
 F:	Documentation/*/kvm*.txt
 F:	Documentation/virtual/kvm/
@@ -5196,6 +5214,7 @@
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/lm90
+F:	Documentation/devicetree/bindings/hwmon/lm90.txt
 F:	drivers/hwmon/lm90.c
 
 LM95234 HARDWARE MONITOR DRIVER
@@ -5374,7 +5393,7 @@
 F:	drivers/net/wireless/libertas/
 
 MARVELL MV643XX ETHERNET DRIVER
-M:	Lennert Buytenhek <buytenh@wantstofly.org>
+M:	Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/net/ethernet/marvell/mv643xx_eth.*
@@ -6412,6 +6431,7 @@
 F:	Documentation/PCI/
 F:	drivers/pci/
 F:	include/linux/pci*
+F:	arch/x86/pci/
 
 PCI DRIVER FOR NVIDIA TEGRA
 M:	Thierry Reding <thierry.reding@gmail.com>
@@ -6420,6 +6440,12 @@
 F:	Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
 F:	drivers/pci/host/pci-tegra.c
 
+PCI DRIVER FOR SAMSUNG EXYNOS
+M:	Jingoo Han <jg1.han@samsung.com>
+L:	linux-pci@vger.kernel.org
+S:	Maintained
+F:	drivers/pci/host/pci-exynos.c
+
 PCMCIA SUBSYSTEM
 P:	Linux PCMCIA Team
 L:	linux-pcmcia@lists.infradead.org
@@ -6758,8 +6784,7 @@
 M:	Thierry Reding <thierry.reding@gmail.com>
 L:	linux-pwm@vger.kernel.org
 S:	Maintained
-W:	http://gitorious.org/linux-pwm
-T:	git git://gitorious.org/linux-pwm/linux-pwm.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/thierry.reding/linux-pwm.git
 F:	Documentation/pwm.txt
 F:	Documentation/devicetree/bindings/pwm/
 F:	include/linux/pwm.h
@@ -6890,6 +6915,14 @@
 S:	Supported
 F:	arch/hexagon/
 
+QUALCOMM WCN36XX WIRELESS DRIVER
+M:	Eugene Krasnikov <k.eugene.e@gmail.com>
+L:	wcn36xx@lists.infradead.org
+W:	http://wireless.kernel.org/en/users/Drivers/wcn36xx
+T:	git git://github.com/KrasnikovEugene/wcn36xx.git
+S:	Supported
+F:	drivers/net/wireless/ath/wcn36xx/
+
 QUICKCAM PARALLEL PORT WEBCAMS
 M:	Hans Verkuil <hverkuil@xs4all.nl>
 L:	linux-media@vger.kernel.org
@@ -7319,7 +7352,7 @@
 F:	drivers/media/usb/tlg2300/
 
 SC1200 WDT DRIVER
-M:	Zwane Mwaikambo <zwane@arm.linux.org.uk>
+M:	Zwane Mwaikambo <zwanem@gmail.com>
 S:	Maintained
 F:	drivers/watchdog/sc1200wdt.c
 
diff --git a/Makefile b/Makefile
index 67077ad..920ad07 100644
--- a/Makefile
+++ b/Makefile
@@ -22,6 +22,9 @@
 LC_NUMERIC=C
 export LC_COLLATE LC_NUMERIC
 
+# Avoid interference with shell env settings
+unexport GREP_OPTIONS
+
 # We are using a recursive build, so we need to do a little thinking
 # to get the ordering right.
 #
@@ -659,6 +662,12 @@
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
+# disallow errors like 'EXPORT_GPL(foo);' with missing header
+KBUILD_CFLAGS   += $(call cc-option,-Werror=implicit-int)
+
+# require functions to have arguments in prototypes, not empty 'int foo()'
+KBUILD_CFLAGS   += $(call cc-option,-Werror=strict-prototypes)
+
 # use the deterministic mode of AR if available
 KBUILD_ARFLAGS := $(call ar-option,D)
 
@@ -720,6 +729,22 @@
 endif # INSTALL_MOD_STRIP
 export mod_strip_cmd
 
+# Select initial ramdisk compression format, default is gzip(1).
+# This shall be used by the dracut(8) tool while creating an initramfs image.
+#
+INITRD_COMPRESS=gzip
+ifeq ($(CONFIG_RD_BZIP2), y)
+        INITRD_COMPRESS=bzip2
+else ifeq ($(CONFIG_RD_LZMA), y)
+        INITRD_COMPRESS=lzma
+else ifeq ($(CONFIG_RD_XZ), y)
+        INITRD_COMPRESS=xz
+else ifeq ($(CONFIG_RD_LZO), y)
+        INITRD_COMPRESS=lzo
+else ifeq ($(CONFIG_RD_LZ4), y)
+        INITRD_COMPRESS=lz4
+endif
+export INITRD_COMPRESS
 
 ifdef CONFIG_MODULE_SIG_ALL
 MODSECKEY = ./signing_key.priv
diff --git a/arch/Kconfig b/arch/Kconfig
index ded747c..f1cf895 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -207,9 +207,6 @@
 config HAVE_DMA_CONTIGUOUS
 	bool
 
-config USE_GENERIC_SMP_HELPERS
-	bool
-
 config GENERIC_SMP_IDLE_THREAD
        bool
 
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 35a300d..135c674 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -1,6 +1,7 @@
 config ALPHA
 	bool
 	default y
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select HAVE_AOUT
 	select HAVE_IDE
 	select HAVE_OPROFILE
@@ -522,7 +523,6 @@
 config SMP
 	bool "Symmetric multi-processing support"
 	depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
-	select USE_GENERIC_SMP_HELPERS
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index bc2a0da..aab14a0 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -72,7 +72,10 @@
 	if (!pte)
 		return NULL;
 	page = virt_to_page(pte);
-	pgtable_page_ctor(page);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 	return page;
 }
 
diff --git a/arch/alpha/include/uapi/asm/errno.h b/arch/alpha/include/uapi/asm/errno.h
index e5f29ca..17f92aa 100644
--- a/arch/alpha/include/uapi/asm/errno.h
+++ b/arch/alpha/include/uapi/asm/errno.h
@@ -43,7 +43,7 @@
 
 #define	EUSERS		68	/* Too many users */
 #define	EDQUOT		69	/* Quota exceeded */
-#define	ESTALE		70	/* Stale NFS file handle */
+#define	ESTALE		70	/* Stale file handle */
 #define	EREMOTE		71	/* Object is remote */
 
 #define	ENOLCK		77	/* No record locks available */
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 467de01..e3a1491 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -81,6 +81,8 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_BUSY_POLL			46
+#define SO_BUSY_POLL		46
+
+#define SO_MAX_PACING_RATE	47
 
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5ede546..2ee0c9b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -125,7 +125,6 @@
 config SMP
 	bool "Symmetric Multi-Processing (Incomplete)"
 	default n
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 36a9f20..81208bfd 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -105,11 +105,16 @@
 pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
 	pgtable_t pte_pg;
+	struct page *page;
 
 	pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
-	if (pte_pg) {
-		memzero((void *)pte_pg, PTRS_PER_PTE * 4);
-		pgtable_page_ctor(virt_to_page(pte_pg));
+	if (!pte_pg)
+		return 0;
+	memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+	page = virt_to_page(pte_pg);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return 0;
 	}
 
 	return pte_pg;
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index eb1c2ee..42b0504 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -327,7 +327,7 @@
 		 */
 
 		/* We increment the nmissed count for accounting,
-		 * we can also use npre/npostfault count for accouting
+		 * we can also use npre/npostfault count for accounting
 		 * these specific fault cases.
 		 */
 		kprobes_inc_nmissed_count(cur);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index acb8070..214b698 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,8 @@
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_HAVE_CUSTOM_GPIO_H
+	select ARCH_MIGHT_HAVE_PC_PARPORT
+	select ARCH_USE_CMPXCHG_LOCKREF
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select BUILDTIME_EXTABLE_SORT if MMU
 	select CLONE_BACKWARDS
@@ -51,6 +53,8 @@
 	select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
 	select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
 	select HAVE_PERF_EVENTS
+	select HAVE_PERF_REGS
+	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_UID16
@@ -481,6 +485,7 @@
 	bool "IXP4xx-based"
 	depends on MMU
 	select ARCH_HAS_DMA_SET_COHERENT_MASK
+	select ARCH_SUPPORTS_BIG_ENDIAN
 	select ARCH_REQUIRE_GPIOLIB
 	select CLKSRC_MMIO
 	select CPU_XSCALE
@@ -688,7 +693,6 @@
 	select GENERIC_CLOCKEVENTS
 	select HAVE_IDE
 	select ISA
-	select NEED_MACH_GPIO_H
 	select NEED_MACH_MEMORY_H
 	select SPARSE_IRQ
 	help
@@ -1064,11 +1068,6 @@
 	  Enable support for iWMMXt context switching at run time if
 	  running on a CPU that supports it.
 
-config XSCALE_PMU
-	bool
-	depends on CPU_XSCALE
-	default y
-
 config MULTI_IRQ_HANDLER
 	bool
 	help
@@ -1434,7 +1433,6 @@
 	depends on GENERIC_CLOCKEVENTS
 	depends on HAVE_SMP
 	depends on MMU || ARM_MPU
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
@@ -1516,6 +1514,32 @@
 	  for (multi-)cluster based systems, such as big.LITTLE based
 	  systems.
 
+config BIG_LITTLE
+	bool "big.LITTLE support (Experimental)"
+	depends on CPU_V7 && SMP
+	select MCPM
+	help
+	  This option enables support selections for the big.LITTLE
+	  system architecture.
+
+config BL_SWITCHER
+	bool "big.LITTLE switcher support"
+	depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+	select CPU_PM
+	select ARM_CPU_SUSPEND
+	help
+	  The big.LITTLE "switcher" provides the core functionality to
+	  transparently handle transition between a cluster of A15's
+	  and a cluster of A7's in a big.LITTLE system.
+
+config BL_SWITCHER_DUMMY_IF
+	tristate "Simple big.LITTLE switcher user interface"
+	depends on BL_SWITCHER && DEBUG_KERNEL
+	help
+	  This is a simple and dummy char dev interface to control
+	  the big.LITTLE switcher core code.  It is meant for
+	  debugging purposes only.
+
 choice
 	prompt "Memory split"
 	default VMSPLIT_3G
@@ -1839,6 +1863,12 @@
 	  neutralized via a kernel panic.
 	  This feature requires gcc version 4.2 or above.
 
+config SWIOTLB
+	def_bool y
+
+config IOMMU_HELPER
+	def_bool SWIOTLB
+
 config XEN_DOM0
 	def_bool y
 	depends on XEN
@@ -1849,6 +1879,7 @@
 	depends on CPU_V7 && !CPU_V6
 	depends on !GENERIC_ATOMIC64
 	select ARM_PSCI
+	select SWIOTLB_XEN
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index d597c6b..5765abf 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -318,6 +318,7 @@
 	config DEBUG_MSM_UART1
 		bool "Kernel low-level debugging messages via MSM UART1"
 		depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+		select DEBUG_MSM_UART
 		help
 		  Say Y here if you want the debug print routines to direct
 		  their output to the first serial port on MSM devices.
@@ -325,6 +326,7 @@
 	config DEBUG_MSM_UART2
 		bool "Kernel low-level debugging messages via MSM UART2"
 		depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+		select DEBUG_MSM_UART
 		help
 		  Say Y here if you want the debug print routines to direct
 		  their output to the second serial port on MSM devices.
@@ -332,6 +334,7 @@
 	config DEBUG_MSM_UART3
 		bool "Kernel low-level debugging messages via MSM UART3"
 		depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+		select DEBUG_MSM_UART
 		help
 		  Say Y here if you want the debug print routines to direct
 		  their output to the third serial port on MSM devices.
@@ -340,6 +343,7 @@
 		bool "Kernel low-level debugging messages via MSM 8660 UART"
 		depends on ARCH_MSM8X60
 		select MSM_HAS_DEBUG_UART_HS
+		select DEBUG_MSM_UART
 		help
 		  Say Y here if you want the debug print routines to direct
 		  their output to the serial port on MSM 8660 devices.
@@ -348,10 +352,20 @@
 		bool "Kernel low-level debugging messages via MSM 8960 UART"
 		depends on ARCH_MSM8960
 		select MSM_HAS_DEBUG_UART_HS
+		select DEBUG_MSM_UART
 		help
 		  Say Y here if you want the debug print routines to direct
 		  their output to the serial port on MSM 8960 devices.
 
+	config DEBUG_MSM8974_UART
+		bool "Kernel low-level debugging messages via MSM 8974 UART"
+		depends on ARCH_MSM8974
+		select MSM_HAS_DEBUG_UART_HS
+		select DEBUG_MSM_UART
+		help
+		  Say Y here if you want the debug print routines to direct
+		  their output to the serial port on MSM 8974 devices.
+
 	config DEBUG_MVEBU_UART
 		bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
 		depends on ARCH_MVEBU
@@ -841,6 +855,20 @@
 		  options; the platform specific options are deprecated
 		  and will be soon removed.
 
+	config DEBUG_LL_UART_EFM32
+		bool "Kernel low-level debugging via efm32 UART"
+		depends on ARCH_EFM32
+		help
+		  Say Y here if you want the debug print routines to direct
+		  their output to an UART or USART port on efm32 based
+		  machines. Use the following addresses for DEBUG_UART_PHYS:
+
+		    0x4000c000 | USART0
+		    0x4000c400 | USART1
+		    0x4000c800 | USART2
+		    0x4000e000 | UART0
+		    0x4000e400 | UART1
+
 	config DEBUG_LL_UART_PL01X
 		bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
 		help
@@ -887,11 +915,16 @@
 	bool
 	depends on ARCH_STI
 
+config DEBUG_MSM_UART
+	bool
+	depends on ARCH_MSM
+
 config DEBUG_LL_INCLUDE
 	string
 	default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
 	default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
 	default "debug/exynos.S" if DEBUG_EXYNOS_UART
+	default "debug/efm32.S" if DEBUG_LL_UART_EFM32
 	default "debug/icedcc.S" if DEBUG_ICEDCC
 	default "debug/imx.S" if DEBUG_IMX1_UART || \
 				 DEBUG_IMX25_UART || \
@@ -902,11 +935,7 @@
 				 DEBUG_IMX53_UART ||\
 				 DEBUG_IMX6Q_UART || \
 				 DEBUG_IMX6SL_UART
-	default "debug/msm.S" if DEBUG_MSM_UART1 || \
-				 DEBUG_MSM_UART2 || \
-				 DEBUG_MSM_UART3 || \
-				 DEBUG_MSM8660_UART || \
-				 DEBUG_MSM8960_UART
+	default "debug/msm.S" if DEBUG_MSM_UART
 	default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
 	default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
 	default "debug/sti.S" if DEBUG_STI_UART
@@ -959,6 +988,7 @@
 	default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
 	default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
 	default 0x20201000 if DEBUG_BCM2835
+	default 0x4000e400 if DEBUG_LL_UART_EFM32
 	default 0x40090000 if ARCH_LPC32XX
 	default 0x40100000 if DEBUG_PXA_UART1
 	default 0x42000000 if ARCH_GEMINI
@@ -989,6 +1019,7 @@
 	default 0xfff36000 if DEBUG_HIGHBANK_UART
 	default 0xfffff700 if ARCH_IOP33X
 	depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+		DEBUG_LL_UART_EFM32 || \
 		DEBUG_UART_8250 || DEBUG_UART_PL01X
 
 config DEBUG_UART_VIRT
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 8b66713..c99b108 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,6 +16,7 @@
 LDFLAGS_vmlinux	:=-p --no-undefined -X
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux	+= --be8
+LDFLAGS_MODULE	+= --be8
 endif
 
 OBJCOPYFLAGS	:=-O binary -R .comment -S
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 75189f1..066b034 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -135,6 +135,7 @@
 		.word	_edata			@ zImage end address
  THUMB(		.thumb			)
 1:
+ ARM_BE8(	setend	be )			@ go BE8 if compiled for BE8
 		mrs	r9, cpsr
 #ifdef CONFIG_ARM_VIRT_EXT
 		bl	__hyp_stub_install	@ get into SVC mode, reversibly
@@ -699,9 +700,7 @@
 		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
 		orr	r0, r0, #0x5000		@ I-cache enable, RR cache replacement
 		orr	r0, r0, #0x0030
-#ifdef CONFIG_CPU_ENDIAN_BE8
-		orr	r0, r0, #1 << 25	@ big-endian page tables
-#endif
+ ARM_BE8(	orr	r0, r0, #1 << 25 )	@ big-endian page tables
 		bl	__common_mmu_cache_on
 		mov	r0, #0
 		mcr	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
@@ -728,9 +727,7 @@
 		orr	r0, r0, #1 << 22	@ U (v6 unaligned access model)
 						@ (needed for ARM1176)
 #ifdef CONFIG_MMU
-#ifdef CONFIG_CPU_ENDIAN_BE8
-		orr	r0, r0, #1 << 25	@ big-endian page tables
-#endif
+ ARM_BE8(	orr	r0, r0, #1 << 25 )	@ big-endian page tables
 		mrcne   p15, 0, r6, c2, c0, 2   @ read ttb control reg
 		orrne	r0, r0, #1		@ MMU enabled
 		movne	r1, #0xfffffffd		@ domain 0 = client
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 9874294..7e6c64e 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -630,7 +630,7 @@
 	tsc {
 		ti,wires = <4>;
 		ti,x-plate-resistance = <200>;
-		ti,coordiante-readouts = <5>;
+		ti,coordinate-readouts = <5>;
 		ti,wire-config = <0x00 0x11 0x22 0x33>;
 	};
 
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 03febf8..4718ec4 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -485,3 +485,13 @@
 		tx-num-evt = <1>;
 		rx-num-evt = <1>;
 };
+
+&tscadc {
+	status = "okay";
+	tsc {
+		ti,wires = <4>;
+		ti,x-plate-resistance = <200>;
+		ti,coordinate-readouts = <5>;
+		ti,wire-config = <0x00 0x11 0x22 0x33>;
+	};
+};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index fcb9c8e..f6d8ffe 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -670,6 +670,12 @@
 				/* Filled in by U-Boot */
 				mac-address = [ 00 00 00 00 00 00 ];
 			};
+
+			phy_sel: cpsw-phy-sel@44e10650 {
+				compatible = "ti,am3352-cpsw-phy-sel";
+				reg= <0x44e10650 0x4>;
+				reg-names = "gmii-sel";
+			};
 		};
 
 		ocmcram: ocmcram@40300000 {
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index eaa9cf4..4bdc416 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -16,3 +16,5 @@
 AFLAGS_mcpm_head.o		:= -march=armv7-a
 AFLAGS_vlock.o			:= -march=armv7-a
 obj-$(CONFIG_TI_PRIV_EDMA)	+= edma.o
+obj-$(CONFIG_BL_SWITCHER)	+= bL_switcher.o
+obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
new file mode 100644
index 0000000..5774b6e
--- /dev/null
+++ b/arch/arm/common/bL_switcher.c
@@ -0,0 +1,822 @@
+/*
+ * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
+ *
+ * Created by:	Nicolas Pitre, March 2012
+ * Copyright:	(C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/notifier.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/moduleparam.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cputype.h>
+#include <asm/suspend.h>
+#include <asm/mcpm.h>
+#include <asm/bL_switcher.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power_cpu_migrate.h>
+
+
+/*
+ * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
+ * __attribute_const__ and we don't want the compiler to assume any
+ * constness here as the value _does_ change along some code paths.
+ */
+
+static int read_mpidr(void)
+{
+	unsigned int id;
+	asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
+	return id & MPIDR_HWID_BITMASK;
+}
+
+/*
+ * Get a global nanosecond time stamp for tracing.
+ */
+static s64 get_ns(void)
+{
+	struct timespec ts;
+	getnstimeofday(&ts);
+	return timespec_to_ns(&ts);
+}
+
+/*
+ * bL switcher core code.
+ */
+
+static void bL_do_switch(void *_arg)
+{
+	unsigned ib_mpidr, ib_cpu, ib_cluster;
+	long volatile handshake, **handshake_ptr = _arg;
+
+	pr_debug("%s\n", __func__);
+
+	ib_mpidr = cpu_logical_map(smp_processor_id());
+	ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+	ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+	/* Advertise our handshake location */
+	if (handshake_ptr) {
+		handshake = 0;
+		*handshake_ptr = &handshake;
+	} else
+		handshake = -1;
+
+	/*
+	 * Our state has been saved at this point.  Let's release our
+	 * inbound CPU.
+	 */
+	mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
+	sev();
+
+	/*
+	 * From this point, we must assume that our counterpart CPU might
+	 * have taken over in its parallel world already, as if execution
+	 * just returned from cpu_suspend().  It is therefore important to
+	 * be very careful not to make any change the other guy is not
+	 * expecting.  This is why we need stack isolation.
+	 *
+	 * Fancy under cover tasks could be performed here.  For now
+	 * we have none.
+	 */
+
+	/*
+	 * Let's wait until our inbound is alive.
+	 */
+	while (!handshake) {
+		wfe();
+		smp_mb();
+	}
+
+	/* Let's put ourself down. */
+	mcpm_cpu_power_down();
+
+	/* should never get here */
+	BUG();
+}
+
+/*
+ * Stack isolation.  To ensure 'current' remains valid, we just use another
+ * piece of our thread's stack space which should be fairly lightly used.
+ * The selected area starts just above the thread_info structure located
+ * at the very bottom of the stack, aligned to a cache line, and indexed
+ * with the cluster number.
+ */
+#define STACK_SIZE 512
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+static int bL_switchpoint(unsigned long _arg)
+{
+	unsigned int mpidr = read_mpidr();
+	unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+	void *stack = current_thread_info() + 1;
+	stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
+	stack += clusterid * STACK_SIZE + STACK_SIZE;
+	call_with_stack(bL_do_switch, (void *)_arg, stack);
+	BUG();
+}
+
+/*
+ * Generic switcher interface
+ */
+
+static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
+static int bL_switcher_cpu_pairing[NR_CPUS];
+
+/*
+ * bL_switch_to - Switch to a specific cluster for the current CPU
+ * @new_cluster_id: the ID of the cluster to switch to.
+ *
+ * This function must be called on the CPU to be switched.
+ * Returns 0 on success, else a negative status code.
+ */
+static int bL_switch_to(unsigned int new_cluster_id)
+{
+	unsigned int mpidr, this_cpu, that_cpu;
+	unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
+	struct completion inbound_alive;
+	struct tick_device *tdev;
+	enum clock_event_mode tdev_mode;
+	long volatile *handshake_ptr;
+	int ipi_nr, ret;
+
+	this_cpu = smp_processor_id();
+	ob_mpidr = read_mpidr();
+	ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
+	ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
+	BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
+
+	if (new_cluster_id == ob_cluster)
+		return 0;
+
+	that_cpu = bL_switcher_cpu_pairing[this_cpu];
+	ib_mpidr = cpu_logical_map(that_cpu);
+	ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+	ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+	pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
+		 this_cpu, ob_mpidr, ib_mpidr);
+
+	this_cpu = smp_processor_id();
+
+	/* Close the gate for our entry vectors */
+	mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
+	mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
+
+	/* Install our "inbound alive" notifier. */
+	init_completion(&inbound_alive);
+	ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
+	ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
+	mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
+
+	/*
+	 * Let's wake up the inbound CPU now in case it requires some delay
+	 * to come online, but leave it gated in our entry vector code.
+	 */
+	ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
+	if (ret) {
+		pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
+		return ret;
+	}
+
+	/*
+	 * Raise a SGI on the inbound CPU to make sure it doesn't stall
+	 * in a possible WFI, such as in bL_power_down().
+	 */
+	gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
+
+	/*
+	 * Wait for the inbound to come up.  This allows for other
+	 * tasks to be scheduled in the mean time.
+	 */
+	wait_for_completion(&inbound_alive);
+	mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
+
+	/*
+	 * From this point we are entering the switch critical zone
+	 * and can't take any interrupts anymore.
+	 */
+	local_irq_disable();
+	local_fiq_disable();
+	trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+
+	/* redirect GIC's SGIs to our counterpart */
+	gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
+
+	tdev = tick_get_device(this_cpu);
+	if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
+		tdev = NULL;
+	if (tdev) {
+		tdev_mode = tdev->evtdev->mode;
+		clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+	}
+
+	ret = cpu_pm_enter();
+
+	/* we can not tolerate errors at this point */
+	if (ret)
+		panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
+
+	/* Swap the physical CPUs in the logical map for this logical CPU. */
+	cpu_logical_map(this_cpu) = ib_mpidr;
+	cpu_logical_map(that_cpu) = ob_mpidr;
+
+	/* Let's do the actual CPU switch. */
+	ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
+	if (ret > 0)
+		panic("%s: cpu_suspend() returned %d\n", __func__, ret);
+
+	/* We are executing on the inbound CPU at this point */
+	mpidr = read_mpidr();
+	pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
+	BUG_ON(mpidr != ib_mpidr);
+
+	mcpm_cpu_powered_up();
+
+	ret = cpu_pm_exit();
+
+	if (tdev) {
+		clockevents_set_mode(tdev->evtdev, tdev_mode);
+		clockevents_program_event(tdev->evtdev,
+					  tdev->evtdev->next_event, 1);
+	}
+
+	trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+	local_fiq_enable();
+	local_irq_enable();
+
+	*handshake_ptr = 1;
+	dsb_sev();
+
+	if (ret)
+		pr_err("%s exiting with error %d\n", __func__, ret);
+	return ret;
+}
+
+struct bL_thread {
+	spinlock_t lock;
+	struct task_struct *task;
+	wait_queue_head_t wq;
+	int wanted_cluster;
+	struct completion started;
+	bL_switch_completion_handler completer;
+	void *completer_cookie;
+};
+
+static struct bL_thread bL_threads[NR_CPUS];
+
+static int bL_switcher_thread(void *arg)
+{
+	struct bL_thread *t = arg;
+	struct sched_param param = { .sched_priority = 1 };
+	int cluster;
+	bL_switch_completion_handler completer;
+	void *completer_cookie;
+
+	sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+	complete(&t->started);
+
+	do {
+		if (signal_pending(current))
+			flush_signals(current);
+		wait_event_interruptible(t->wq,
+				t->wanted_cluster != -1 ||
+				kthread_should_stop());
+
+		spin_lock(&t->lock);
+		cluster = t->wanted_cluster;
+		completer = t->completer;
+		completer_cookie = t->completer_cookie;
+		t->wanted_cluster = -1;
+		t->completer = NULL;
+		spin_unlock(&t->lock);
+
+		if (cluster != -1) {
+			bL_switch_to(cluster);
+
+			if (completer)
+				completer(completer_cookie);
+		}
+	} while (!kthread_should_stop());
+
+	return 0;
+}
+
+static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
+{
+	struct task_struct *task;
+
+	task = kthread_create_on_node(bL_switcher_thread, arg,
+				      cpu_to_node(cpu), "kswitcher_%d", cpu);
+	if (!IS_ERR(task)) {
+		kthread_bind(task, cpu);
+		wake_up_process(task);
+	} else
+		pr_err("%s failed for CPU %d\n", __func__, cpu);
+	return task;
+}
+
+/*
+ * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
+ *      with completion notification via a callback
+ *
+ * @cpu: the CPU to switch
+ * @new_cluster_id: the ID of the cluster to switch to.
+ * @completer: switch completion callback.  if non-NULL,
+ *	@completer(@completer_cookie) will be called on completion of
+ *	the switch, in non-atomic context.
+ * @completer_cookie: opaque context argument for @completer.
+ *
+ * This function causes a cluster switch on the given CPU by waking up
+ * the appropriate switcher thread.  This function may or may not return
+ * before the switch has occurred.
+ *
+ * If a @completer callback function is supplied, it will be called when
+ * the switch is complete.  This can be used to determine asynchronously
+ * when the switch is complete, regardless of when bL_switch_request()
+ * returns.  When @completer is supplied, no new switch request is permitted
+ * for the affected CPU until after the switch is complete, and @completer
+ * has returned.
+ */
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+			 bL_switch_completion_handler completer,
+			 void *completer_cookie)
+{
+	struct bL_thread *t;
+
+	if (cpu >= ARRAY_SIZE(bL_threads)) {
+		pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
+		return -EINVAL;
+	}
+
+	t = &bL_threads[cpu];
+
+	if (IS_ERR(t->task))
+		return PTR_ERR(t->task);
+	if (!t->task)
+		return -ESRCH;
+
+	spin_lock(&t->lock);
+	if (t->completer) {
+		spin_unlock(&t->lock);
+		return -EBUSY;
+	}
+	t->completer = completer;
+	t->completer_cookie = completer_cookie;
+	t->wanted_cluster = new_cluster_id;
+	spin_unlock(&t->lock);
+	wake_up(&t->wq);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bL_switch_request_cb);
+
+/*
+ * Activation and configuration code.
+ */
+
+static DEFINE_MUTEX(bL_switcher_activation_lock);
+static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
+static unsigned int bL_switcher_active;
+static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
+static cpumask_t bL_switcher_removed_logical_cpus;
+
+int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
+
+int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
+
+static int bL_activation_notify(unsigned long val)
+{
+	int ret;
+
+	ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
+	if (ret & NOTIFY_STOP_MASK)
+		pr_err("%s: notifier chain failed with status 0x%x\n",
+			__func__, ret);
+	return notifier_to_errno(ret);
+}
+
+static void bL_switcher_restore_cpus(void)
+{
+	int i;
+
+	for_each_cpu(i, &bL_switcher_removed_logical_cpus)
+		cpu_up(i);
+}
+
+static int bL_switcher_halve_cpus(void)
+{
+	int i, j, cluster_0, gic_id, ret;
+	unsigned int cpu, cluster, mask;
+	cpumask_t available_cpus;
+
+	/* First pass to validate what we have */
+	mask = 0;
+	for_each_online_cpu(i) {
+		cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+		cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+		if (cluster >= 2) {
+			pr_err("%s: only dual cluster systems are supported\n", __func__);
+			return -EINVAL;
+		}
+		if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
+			return -EINVAL;
+		mask |= (1 << cluster);
+	}
+	if (mask != 3) {
+		pr_err("%s: no CPU pairing possible\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Now let's do the pairing.  We match each CPU with another CPU
+	 * from a different cluster.  To get a uniform scheduling behavior
+	 * without fiddling with CPU topology and compute capacity data,
+	 * we'll use logical CPUs initially belonging to the same cluster.
+	 */
+	memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
+	cpumask_copy(&available_cpus, cpu_online_mask);
+	cluster_0 = -1;
+	for_each_cpu(i, &available_cpus) {
+		int match = -1;
+		cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+		if (cluster_0 == -1)
+			cluster_0 = cluster;
+		if (cluster != cluster_0)
+			continue;
+		cpumask_clear_cpu(i, &available_cpus);
+		for_each_cpu(j, &available_cpus) {
+			cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
+			/*
+			 * Let's remember the last match to create "odd"
+			 * pairings on purpose in order for other code not
+			 * to assume any relation between physical and
+			 * logical CPU numbers.
+			 */
+			if (cluster != cluster_0)
+				match = j;
+		}
+		if (match != -1) {
+			bL_switcher_cpu_pairing[i] = match;
+			cpumask_clear_cpu(match, &available_cpus);
+			pr_info("CPU%d paired with CPU%d\n", i, match);
+		}
+	}
+
+	/*
+	 * Now we disable the unwanted CPUs i.e. everything that has no
+	 * pairing information (that includes the pairing counterparts).
+	 */
+	cpumask_clear(&bL_switcher_removed_logical_cpus);
+	for_each_online_cpu(i) {
+		cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+		cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+
+		/* Let's take note of the GIC ID for this CPU */
+		gic_id = gic_get_cpu_id(i);
+		if (gic_id < 0) {
+			pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
+			bL_switcher_restore_cpus();
+			return -EINVAL;
+		}
+		bL_gic_id[cpu][cluster] = gic_id;
+		pr_info("GIC ID for CPU %u cluster %u is %u\n",
+			cpu, cluster, gic_id);
+
+		if (bL_switcher_cpu_pairing[i] != -1) {
+			bL_switcher_cpu_original_cluster[i] = cluster;
+			continue;
+		}
+
+		ret = cpu_down(i);
+		if (ret) {
+			bL_switcher_restore_cpus();
+			return ret;
+		}
+		cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
+	}
+
+	return 0;
+}
+
+/* Determine the logical CPU a given physical CPU is grouped on. */
+int bL_switcher_get_logical_index(u32 mpidr)
+{
+	int cpu;
+
+	if (!bL_switcher_active)
+		return -EUNATCH;
+
+	mpidr &= MPIDR_HWID_BITMASK;
+	for_each_online_cpu(cpu) {
+		int pairing = bL_switcher_cpu_pairing[cpu];
+		if (pairing == -1)
+			continue;
+		if ((mpidr == cpu_logical_map(cpu)) ||
+		    (mpidr == cpu_logical_map(pairing)))
+			return cpu;
+	}
+	return -EINVAL;
+}
+
+static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
+{
+	trace_cpu_migrate_current(get_ns(), read_mpidr());
+}
+
+int bL_switcher_trace_trigger(void)
+{
+	int ret;
+
+	preempt_disable();
+
+	bL_switcher_trace_trigger_cpu(NULL);
+	ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+
+	preempt_enable();
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
+
+static int bL_switcher_enable(void)
+{
+	int cpu, ret;
+
+	mutex_lock(&bL_switcher_activation_lock);
+	lock_device_hotplug();
+	if (bL_switcher_active) {
+		unlock_device_hotplug();
+		mutex_unlock(&bL_switcher_activation_lock);
+		return 0;
+	}
+
+	pr_info("big.LITTLE switcher initializing\n");
+
+	ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
+	if (ret)
+		goto error;
+
+	ret = bL_switcher_halve_cpus();
+	if (ret)
+		goto error;
+
+	bL_switcher_trace_trigger();
+
+	for_each_online_cpu(cpu) {
+		struct bL_thread *t = &bL_threads[cpu];
+		spin_lock_init(&t->lock);
+		init_waitqueue_head(&t->wq);
+		init_completion(&t->started);
+		t->wanted_cluster = -1;
+		t->task = bL_switcher_thread_create(cpu, t);
+	}
+
+	bL_switcher_active = 1;
+	bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+	pr_info("big.LITTLE switcher initialized\n");
+	goto out;
+
+error:
+	pr_warn("big.LITTLE switcher initialization failed\n");
+	bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+	unlock_device_hotplug();
+	mutex_unlock(&bL_switcher_activation_lock);
+	return ret;
+}
+
+#ifdef CONFIG_SYSFS
+
+static void bL_switcher_disable(void)
+{
+	unsigned int cpu, cluster;
+	struct bL_thread *t;
+	struct task_struct *task;
+
+	mutex_lock(&bL_switcher_activation_lock);
+	lock_device_hotplug();
+
+	if (!bL_switcher_active)
+		goto out;
+
+	if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
+		bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+		goto out;
+	}
+
+	bL_switcher_active = 0;
+
+	/*
+	 * To deactivate the switcher, we must shut down the switcher
+	 * threads to prevent any other requests from being accepted.
+	 * Then, if the final cluster for given logical CPU is not the
+	 * same as the original one, we'll recreate a switcher thread
+	 * just for the purpose of switching the CPU back without any
+	 * possibility for interference from external requests.
+	 */
+	for_each_online_cpu(cpu) {
+		t = &bL_threads[cpu];
+		task = t->task;
+		t->task = NULL;
+		if (!task || IS_ERR(task))
+			continue;
+		kthread_stop(task);
+		/* no more switch may happen on this CPU at this point */
+		cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+		if (cluster == bL_switcher_cpu_original_cluster[cpu])
+			continue;
+		init_completion(&t->started);
+		t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
+		task = bL_switcher_thread_create(cpu, t);
+		if (!IS_ERR(task)) {
+			wait_for_completion(&t->started);
+			kthread_stop(task);
+			cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+			if (cluster == bL_switcher_cpu_original_cluster[cpu])
+				continue;
+		}
+		/* If execution gets here, we're in trouble. */
+		pr_crit("%s: unable to restore original cluster for CPU %d\n",
+			__func__, cpu);
+		pr_crit("%s: CPU %d can't be restored\n",
+			__func__, bL_switcher_cpu_pairing[cpu]);
+		cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
+				  &bL_switcher_removed_logical_cpus);
+	}
+
+	bL_switcher_restore_cpus();
+	bL_switcher_trace_trigger();
+
+	bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+	unlock_device_hotplug();
+	mutex_unlock(&bL_switcher_activation_lock);
+}
+
+static ssize_t bL_switcher_active_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%u\n", bL_switcher_active);
+}
+
+static ssize_t bL_switcher_active_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+
+	switch (buf[0]) {
+	case '0':
+		bL_switcher_disable();
+		ret = 0;
+		break;
+	case '1':
+		ret = bL_switcher_enable();
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return (ret >= 0) ? count : ret;
+}
+
+static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = bL_switcher_trace_trigger();
+
+	return ret ? ret : count;
+}
+
+static struct kobj_attribute bL_switcher_active_attr =
+	__ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
+
+static struct kobj_attribute bL_switcher_trace_trigger_attr =
+	__ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
+
+static struct attribute *bL_switcher_attrs[] = {
+	&bL_switcher_active_attr.attr,
+	&bL_switcher_trace_trigger_attr.attr,
+	NULL,
+};
+
+static struct attribute_group bL_switcher_attr_group = {
+	.attrs = bL_switcher_attrs,
+};
+
+static struct kobject *bL_switcher_kobj;
+
+static int __init bL_switcher_sysfs_init(void)
+{
+	int ret;
+
+	bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
+	if (!bL_switcher_kobj)
+		return -ENOMEM;
+	ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
+	if (ret)
+		kobject_put(bL_switcher_kobj);
+	return ret;
+}
+
+#endif  /* CONFIG_SYSFS */
+
+bool bL_switcher_get_enabled(void)
+{
+	mutex_lock(&bL_switcher_activation_lock);
+
+	return bL_switcher_active;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
+
+void bL_switcher_put_enabled(void)
+{
+	mutex_unlock(&bL_switcher_activation_lock);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
+
+/*
+ * Veto any CPU hotplug operation on those CPUs we've removed
+ * while the switcher is active.
+ * We're just not ready to deal with that given the trickery involved.
+ */
+static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
+					unsigned long action, void *hcpu)
+{
+	if (bL_switcher_active) {
+		int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
+		switch (action & 0xf) {
+		case CPU_UP_PREPARE:
+		case CPU_DOWN_PREPARE:
+			if (pairing == -1)
+				return NOTIFY_BAD;
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+static bool no_bL_switcher;
+core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
+
+static int __init bL_switcher_init(void)
+{
+	int ret;
+
+	if (MAX_NR_CLUSTERS != 2) {
+		pr_err("%s: only dual cluster systems are supported\n", __func__);
+		return -EINVAL;
+	}
+
+	cpu_notifier(bL_switcher_hotplug_callback, 0);
+
+	if (!no_bL_switcher) {
+		ret = bL_switcher_enable();
+		if (ret)
+			return ret;
+	}
+
+#ifdef CONFIG_SYSFS
+	ret = bL_switcher_sysfs_init();
+	if (ret)
+		pr_err("%s: unable to create sysfs entry\n", __func__);
+#endif
+
+	return 0;
+}
+
+late_initcall(bL_switcher_init);
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
new file mode 100644
index 0000000..3f47f12
--- /dev/null
+++ b/arch/arm/common/bL_switcher_dummy_if.c
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
+ *
+ * Created by:	Nicolas Pitre, November 2012
+ * Copyright:	(C) 2012-2013  Linaro Limited
+ *
+ * Dummy interface to user space for debugging purpose only.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+#include <asm/bL_switcher.h>
+
+static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
+			size_t len, loff_t *pos)
+{
+	unsigned char val[3];
+	unsigned int cpu, cluster;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+
+	if (len < 3)
+		return -EINVAL;
+
+	if (copy_from_user(val, buf, 3))
+		return -EFAULT;
+
+	/* format: <cpu#>,<cluster#> */
+	if (val[0] < '0' || val[0] > '9' ||
+	    val[1] != ',' ||
+	    val[2] < '0' || val[2] > '1')
+		return -EINVAL;
+
+	cpu = val[0] - '0';
+	cluster = val[2] - '0';
+	ret = bL_switch_request(cpu, cluster);
+
+	return ret ? : len;
+}
+
+static const struct file_operations bL_switcher_fops = {
+	.write		= bL_switcher_write,
+	.owner	= THIS_MODULE,
+};
+
+static struct miscdevice bL_switcher_device = {
+	MISC_DYNAMIC_MINOR,
+	"b.L_switcher",
+	&bL_switcher_fops
+};
+
+static int __init bL_switcher_dummy_if_init(void)
+{
+	return misc_register(&bL_switcher_device);
+}
+
+static void __exit bL_switcher_dummy_if_exit(void)
+{
+	misc_deregister(&bL_switcher_device);
+}
+
+module_init(bL_switcher_dummy_if_init);
+module_exit(bL_switcher_dummy_if_exit);
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 9902509..26020a0 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -27,6 +27,18 @@
 	sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
 }
 
+extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
+
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+			 unsigned long poke_phys_addr, unsigned long poke_val)
+{
+	unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
+	poke[0] = poke_phys_addr;
+	poke[1] = poke_val;
+	__cpuc_flush_dcache_area((void *)poke, 8);
+	outer_clean_range(__pa(poke), __pa(poke + 2));
+}
+
 static const struct mcpm_platform_ops *platform_ops;
 
 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
@@ -90,6 +102,21 @@
 	BUG();
 }
 
+int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster)
+{
+	int ret;
+
+	if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish))
+		return -EUNATCH;
+
+	ret = platform_ops->power_down_finish(cpu, cluster);
+	if (ret)
+		pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
+			__func__, cpu, cluster, ret);
+
+	return ret;
+}
+
 void mcpm_cpu_suspend(u64 expected_residency)
 {
 	phys_reset_t phys_reset;
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 39c96df..e02db4b 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -15,6 +15,7 @@
 
 #include <linux/linkage.h>
 #include <asm/mcpm.h>
+#include <asm/assembler.h>
 
 #include "vlock.h"
 
@@ -47,6 +48,7 @@
 
 ENTRY(mcpm_entry_point)
 
+ ARM_BE8(setend        be)
  THUMB(	adr	r12, BSYM(1f)	)
  THUMB(	bx	r12		)
  THUMB(	.thumb			)
@@ -71,12 +73,19 @@
 	 * position independent way.
 	 */
 	adr	r5, 3f
-	ldmia	r5, {r6, r7, r8, r11}
+	ldmia	r5, {r0, r6, r7, r8, r11}
+	add	r0, r5, r0			@ r0 = mcpm_entry_early_pokes
 	add	r6, r5, r6			@ r6 = mcpm_entry_vectors
 	ldr	r7, [r5, r7]			@ r7 = mcpm_power_up_setup_phys
 	add	r8, r5, r8			@ r8 = mcpm_sync
 	add	r11, r5, r11			@ r11 = first_man_locks
 
+	@ Perform an early poke, if any
+	add	r0, r0, r4, lsl #3
+	ldmia	r0, {r0, r1}
+	teq	r0, #0
+	strne	r1, [r0]
+
 	mov	r0, #MCPM_SYNC_CLUSTER_SIZE
 	mla	r8, r0, r10, r8			@ r8 = sync cluster base
 
@@ -195,7 +204,8 @@
 
 	.align	2
 
-3:	.word	mcpm_entry_vectors - .
+3:	.word	mcpm_entry_early_pokes - .
+	.word	mcpm_entry_vectors - 3b
 	.word	mcpm_power_up_setup_phys - 3b
 	.word	mcpm_sync - 3b
 	.word	first_man_locks - 3b
@@ -214,6 +224,10 @@
 ENTRY(mcpm_entry_vectors)
 	.space	4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
 
+	.type	mcpm_entry_early_pokes, #object
+ENTRY(mcpm_entry_early_pokes)
+	.space	8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
 	.type	mcpm_power_up_setup_phys, #object
 ENTRY(mcpm_power_up_setup_phys)
 	.space  4		@ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 1bc34c7..177251a 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,14 +19,23 @@
 #include <asm/smp.h>
 #include <asm/smp_plat.h>
 
-static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static void cpu_to_pcpu(unsigned int cpu,
+			unsigned int *pcpu, unsigned int *pcluster)
 {
-	unsigned int mpidr, pcpu, pcluster, ret;
-	extern void secondary_startup(void);
+	unsigned int mpidr;
 
 	mpidr = cpu_logical_map(cpu);
-	pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
-	pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+	*pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+	*pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+}
+
+static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	unsigned int pcpu, pcluster, ret;
+	extern void secondary_startup(void);
+
+	cpu_to_pcpu(cpu, &pcpu, &pcluster);
+
 	pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
 		 __func__, cpu, pcpu, pcluster);
 
@@ -47,6 +56,15 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 
+static int mcpm_cpu_kill(unsigned int cpu)
+{
+	unsigned int pcpu, pcluster;
+
+	cpu_to_pcpu(cpu, &pcpu, &pcluster);
+
+	return !mcpm_cpu_power_down_finish(pcpu, pcluster);
+}
+
 static int mcpm_cpu_disable(unsigned int cpu)
 {
 	/*
@@ -73,6 +91,7 @@
 	.smp_boot_secondary	= mcpm_boot_secondary,
 	.smp_secondary_init	= mcpm_secondary_init,
 #ifdef CONFIG_HOTPLUG_CPU
+	.cpu_kill		= mcpm_cpu_kill,
 	.cpu_disable		= mcpm_cpu_disable,
 	.cpu_die		= mcpm_cpu_die,
 #endif
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
index e901d0f..ce922d0 100644
--- a/arch/arm/common/timer-sp.c
+++ b/arch/arm/common/timer-sp.c
@@ -175,7 +175,7 @@
 
 static struct irqaction sp804_timer_irq = {
 	.name		= "timer",
-	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
 	.handler	= sp804_timer_interrupt,
 	.dev_id		= &sp804_clockevent,
 };
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index 317960f..0142ec3 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -1,5 +1,6 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_MODULES=y
@@ -11,11 +12,11 @@
 CONFIG_SA1100_H3600=y
 CONFIG_PCCARD=y
 CONFIG_PCMCIA_SA1100=y
+CONFIG_PREEMPT=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
@@ -24,13 +25,10 @@
 CONFIG_IRLAN=m
 CONFIG_IRNET=m
 CONFIG_IRCOMM=m
-CONFIG_SA1100_FIR=m
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -41,19 +39,15 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECS=y
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-CONFIG_NET_PCMCIA=y
 CONFIG_PCMCIA_PCNET=y
 CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
+# CONFIG_WLAN is not set
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -64,8 +58,6 @@
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_SA1100=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_MSDOS_FS=m
@@ -74,6 +66,4 @@
 CONFIG_CRAMFS=m
 CONFIG_NFS_FS=y
 CONFIG_NFSD=m
-CONFIG_SMB_FS=m
 CONFIG_NLS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
new file mode 100644
index 0000000..6231d36
--- /dev/null
+++ b/arch/arm/crypto/.gitignore
@@ -0,0 +1 @@
+aesbs-core.S
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index a2c8385..81cda39 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -3,7 +3,17 @@
 #
 
 obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
+obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
 obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
 
-aes-arm-y  := aes-armv4.o aes_glue.o
-sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+aes-arm-y	:= aes-armv4.o aes_glue.o
+aes-arm-bs-y	:= aesbs-core.o aesbs-glue.o
+sha1-arm-y	:= sha1-armv4-large.o sha1_glue.o
+
+quiet_cmd_perl = PERL    $@
+      cmd_perl = $(PERL) $(<) > $(@)
+
+$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
+	$(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
index 59f7877..3003fa1 100644
--- a/arch/arm/crypto/aes_glue.c
+++ b/arch/arm/crypto/aes_glue.c
@@ -6,22 +6,12 @@
 #include <linux/crypto.h>
 #include <crypto/aes.h>
 
-#define AES_MAXNR 14
+#include "aes_glue.h"
 
-typedef struct {
-	unsigned int rd_key[4 *(AES_MAXNR + 1)];
-	int rounds;
-} AES_KEY;
-
-struct AES_CTX {
-	AES_KEY enc_key;
-	AES_KEY dec_key;
-};
-
-asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
-asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
+EXPORT_SYMBOL(AES_encrypt);
+EXPORT_SYMBOL(AES_decrypt);
+EXPORT_SYMBOL(private_AES_set_encrypt_key);
+EXPORT_SYMBOL(private_AES_set_decrypt_key);
 
 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
@@ -81,7 +71,7 @@
 		.cipher	= {
 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
-			.cia_setkey			= aes_set_key,
+			.cia_setkey		= aes_set_key,
 			.cia_encrypt		= aes_encrypt,
 			.cia_decrypt		= aes_decrypt
 		}
diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h
new file mode 100644
index 0000000..cca3e51
--- /dev/null
+++ b/arch/arm/crypto/aes_glue.h
@@ -0,0 +1,19 @@
+
+#define AES_MAXNR 14
+
+struct AES_KEY {
+	unsigned int rd_key[4 * (AES_MAXNR + 1)];
+	int rounds;
+};
+
+struct AES_CTX {
+	struct AES_KEY enc_key;
+	struct AES_KEY dec_key;
+};
+
+asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
+					   const int bits, struct AES_KEY *key);
+asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
+					   const int bits, struct AES_KEY *key);
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
new file mode 100644
index 0000000..64205d4
--- /dev/null
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -0,0 +1,2544 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+@ granted.
+@ ====================================================================
+
+@ Bit-sliced AES for ARM NEON
+@
+@ February 2012.
+@
+@ This implementation is direct adaptation of bsaes-x86_64 module for
+@ ARM NEON. Except that this module is endian-neutral [in sense that
+@ it can be compiled for either endianness] by courtesy of vld1.8's
+@ neutrality. Initial version doesn't implement interface to OpenSSL,
+@ only low-level primitives and unsupported entry points, just enough
+@ to collect performance results, which for Cortex-A8 core are:
+@
+@ encrypt	19.5 cycles per byte processed with 128-bit key
+@ decrypt	22.1 cycles per byte processed with 128-bit key
+@ key conv.	440  cycles per 128-bit key/0.18 of 8x block
+@
+@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+@ which is [much] worse than anticipated (for further details see
+@ http://www.openssl.org/~appro/Snapdragon-S4.html).
+@
+@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+@ manages in 20.0 cycles].
+@
+@ When comparing to x86_64 results keep in mind that NEON unit is
+@ [mostly] single-issue and thus can't [fully] benefit from
+@ instruction-level parallelism. And when comparing to aes-armv4
+@ results keep in mind key schedule conversion overhead (see
+@ bsaes-x86_64.pl for further details)...
+@
+@						<appro@openssl.org>
+
+@ April-August 2013
+@
+@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
+@
+@					<ard.biesheuvel@linaro.org>
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH	vstmdb	sp!,{d8-d15}
+# define VFP_ABI_POP	vldmia	sp!,{d8-d15}
+# define VFP_ABI_FRAME	0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME	0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax	unified 	@ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu	neon
+
+.type	_bsaes_decrypt8,%function
+.align	4
+_bsaes_decrypt8:
+	adr	r6,_bsaes_decrypt8
+	vldmia	r4!, {q9}		@ round 0 key
+	add	r6,r6,#.LM0ISR-_bsaes_decrypt8
+
+	vldmia	r6!, {q8}		@ .LM0ISR
+	veor	q10, q0, q9	@ xor with round0 key
+	veor	q11, q1, q9
+	 vtbl.8	d0, {q10}, d16
+	 vtbl.8	d1, {q10}, d17
+	veor	q12, q2, q9
+	 vtbl.8	d2, {q11}, d16
+	 vtbl.8	d3, {q11}, d17
+	veor	q13, q3, q9
+	 vtbl.8	d4, {q12}, d16
+	 vtbl.8	d5, {q12}, d17
+	veor	q14, q4, q9
+	 vtbl.8	d6, {q13}, d16
+	 vtbl.8	d7, {q13}, d17
+	veor	q15, q5, q9
+	 vtbl.8	d8, {q14}, d16
+	 vtbl.8	d9, {q14}, d17
+	veor	q10, q6, q9
+	 vtbl.8	d10, {q15}, d16
+	 vtbl.8	d11, {q15}, d17
+	veor	q11, q7, q9
+	 vtbl.8	d12, {q10}, d16
+	 vtbl.8	d13, {q10}, d17
+	 vtbl.8	d14, {q11}, d16
+	 vtbl.8	d15, {q11}, d17
+	vmov.i8	q8,#0x55			@ compose .LBS0
+	vmov.i8	q9,#0x33			@ compose .LBS1
+	vshr.u64	q10, q6, #1
+	 vshr.u64	q11, q4, #1
+	veor		q10, q10, q7
+	 veor		q11, q11, q5
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #1
+	 veor		q5, q5, q11
+	 vshl.u64	q11, q11, #1
+	veor		q6, q6, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q2, #1
+	 vshr.u64	q11, q0, #1
+	veor		q10, q10, q3
+	 veor		q11, q11, q1
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q3, q3, q10
+	vshl.u64	q10, q10, #1
+	 veor		q1, q1, q11
+	 vshl.u64	q11, q11, #1
+	veor		q2, q2, q10
+	 veor		q0, q0, q11
+	vmov.i8	q8,#0x0f			@ compose .LBS2
+	vshr.u64	q10, q5, #2
+	 vshr.u64	q11, q4, #2
+	veor		q10, q10, q7
+	 veor		q11, q11, q6
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #2
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #2
+	veor		q5, q5, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q1, #2
+	 vshr.u64	q11, q0, #2
+	veor		q10, q10, q3
+	 veor		q11, q11, q2
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q3, q3, q10
+	vshl.u64	q10, q10, #2
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #2
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vshr.u64	q10, q3, #4
+	 vshr.u64	q11, q2, #4
+	veor		q10, q10, q7
+	 veor		q11, q11, q6
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #4
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #4
+	veor		q3, q3, q10
+	 veor		q2, q2, q11
+	vshr.u64	q10, q1, #4
+	 vshr.u64	q11, q0, #4
+	veor		q10, q10, q5
+	 veor		q11, q11, q4
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #4
+	 veor		q4, q4, q11
+	 vshl.u64	q11, q11, #4
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	sub	r5,r5,#1
+	b	.Ldec_sbox
+.align	4
+.Ldec_loop:
+	vldmia	r4!, {q8-q11}
+	veor	q8, q8, q0
+	veor	q9, q9, q1
+	vtbl.8	d0, {q8}, d24
+	vtbl.8	d1, {q8}, d25
+	vldmia	r4!, {q8}
+	veor	q10, q10, q2
+	vtbl.8	d2, {q9}, d24
+	vtbl.8	d3, {q9}, d25
+	vldmia	r4!, {q9}
+	veor	q11, q11, q3
+	vtbl.8	d4, {q10}, d24
+	vtbl.8	d5, {q10}, d25
+	vldmia	r4!, {q10}
+	vtbl.8	d6, {q11}, d24
+	vtbl.8	d7, {q11}, d25
+	vldmia	r4!, {q11}
+	veor	q8, q8, q4
+	veor	q9, q9, q5
+	vtbl.8	d8, {q8}, d24
+	vtbl.8	d9, {q8}, d25
+	veor	q10, q10, q6
+	vtbl.8	d10, {q9}, d24
+	vtbl.8	d11, {q9}, d25
+	veor	q11, q11, q7
+	vtbl.8	d12, {q10}, d24
+	vtbl.8	d13, {q10}, d25
+	vtbl.8	d14, {q11}, d24
+	vtbl.8	d15, {q11}, d25
+.Ldec_sbox:
+	 veor	q1, q1, q4
+	veor	q3, q3, q4
+
+	veor	q4, q4, q7
+	 veor	q1, q1, q6
+	veor	q2, q2, q7
+	veor	q6, q6, q4
+
+	veor	q0, q0, q1
+	veor	q2, q2, q5
+	 veor	q7, q7, q6
+	veor	q3, q3, q0
+	veor	q5, q5, q0
+	veor	q1, q1, q3
+	veor	q11, q3, q0
+	veor	q10, q7, q4
+	veor	q9, q1, q6
+	veor	q13, q4, q0
+	 vmov	q8, q10
+	veor	q12, q5, q2
+
+	vorr	q10, q10, q9
+	veor	q15, q11, q8
+	vand	q14, q11, q12
+	vorr	q11, q11, q12
+	veor	q12, q12, q9
+	vand	q8, q8, q9
+	veor	q9, q6, q2
+	vand	q15, q15, q12
+	vand	q13, q13, q9
+	veor	q9, q3, q7
+	veor	q12, q1, q5
+	veor	q11, q11, q13
+	veor	q10, q10, q13
+	vand	q13, q9, q12
+	vorr	q9, q9, q12
+	veor	q11, q11, q15
+	veor	q8, q8, q13
+	veor	q10, q10, q14
+	veor	q9, q9, q15
+	veor	q8, q8, q14
+	vand	q12, q4, q6
+	veor	q9, q9, q14
+	vand	q13, q0, q2
+	vand	q14, q7, q1
+	vorr	q15, q3, q5
+	veor	q11, q11, q12
+	veor	q9, q9, q14
+	veor	q8, q8, q15
+	veor	q10, q10, q13
+
+	@ Inv_GF16 	0, 	1, 	2, 	3, s0, s1, s2, s3
+
+	@ new smaller inversion
+
+	vand	q14, q11, q9
+	vmov	q12, q8
+
+	veor	q13, q10, q14
+	veor	q15, q8, q14
+	veor	q14, q8, q14	@ q14=q15
+
+	vbsl	q13, q9, q8
+	vbsl	q15, q11, q10
+	veor	q11, q11, q10
+
+	vbsl	q12, q13, q14
+	vbsl	q8, q14, q13
+
+	vand	q14, q12, q15
+	veor	q9, q9, q8
+
+	veor	q14, q14, q11
+	veor	q12, q5, q2
+	veor	q8, q1, q6
+	veor 	q10, q15, q14
+	vand	q10, q10, q5
+	veor	q5, q5, q1
+	vand	q11, q1, q15
+	vand	q5, q5, q14
+	veor	q1, q11, q10
+	veor	q5, q5, q11
+	veor	q15, q15, q13
+	veor	q14, q14, q9
+	veor	q11, q15, q14
+	 veor 	q10, q13, q9
+	vand	q11, q11, q12
+	 vand	q10, q10, q2
+	veor	q12, q12, q8
+	 veor	q2, q2, q6
+	vand	q8, q8, q15
+	 vand	q6, q6, q13
+	vand	q12, q12, q14
+	 vand	q2, q2, q9
+	veor	q8, q8, q12
+	 veor	q2, q2, q6
+	veor	q12, q12, q11
+	 veor	q6, q6, q10
+	veor	q5, q5, q12
+	veor	q2, q2, q12
+	veor	q1, q1, q8
+	veor	q6, q6, q8
+
+	veor	q12, q3, q0
+	veor	q8, q7, q4
+	veor	q11, q15, q14
+	 veor 	q10, q13, q9
+	vand	q11, q11, q12
+	 vand	q10, q10, q0
+	veor	q12, q12, q8
+	 veor	q0, q0, q4
+	vand	q8, q8, q15
+	 vand	q4, q4, q13
+	vand	q12, q12, q14
+	 vand	q0, q0, q9
+	veor	q8, q8, q12
+	 veor	q0, q0, q4
+	veor	q12, q12, q11
+	 veor	q4, q4, q10
+	veor	q15, q15, q13
+	veor	q14, q14, q9
+	veor 	q10, q15, q14
+	vand	q10, q10, q3
+	veor	q3, q3, q7
+	vand	q11, q7, q15
+	vand	q3, q3, q14
+	veor	q7, q11, q10
+	veor	q3, q3, q11
+	veor	q3, q3, q12
+	veor	q0, q0, q12
+	veor	q7, q7, q8
+	veor	q4, q4, q8
+	veor	q1, q1, q7
+	veor	q6, q6, q5
+
+	veor	q4, q4, q1
+	veor	q2, q2, q7
+	veor	q5, q5, q7
+	veor	q4, q4, q2
+	 veor 	q7, q7, q0
+	veor	q4, q4, q5
+	 veor	q3, q3, q6
+	 veor	q6, q6, q1
+	veor	q3, q3, q4
+
+	veor	q4, q4, q0
+	veor	q7, q7, q3
+	subs	r5,r5,#1
+	bcc	.Ldec_done
+	@ multiplication by 0x05-0x00-0x04-0x00
+	vext.8	q8, q0, q0, #8
+	vext.8	q14, q3, q3, #8
+	vext.8	q15, q5, q5, #8
+	veor	q8, q8, q0
+	vext.8	q9, q1, q1, #8
+	veor	q14, q14, q3
+	vext.8	q10, q6, q6, #8
+	veor	q15, q15, q5
+	vext.8	q11, q4, q4, #8
+	veor	q9, q9, q1
+	vext.8	q12, q2, q2, #8
+	veor	q10, q10, q6
+	vext.8	q13, q7, q7, #8
+	veor	q11, q11, q4
+	veor	q12, q12, q2
+	veor	q13, q13, q7
+
+	 veor	q0, q0, q14
+	 veor	q1, q1, q14
+	 veor	q6, q6, q8
+	 veor	q2, q2, q10
+	 veor	q4, q4, q9
+	 veor	q1, q1, q15
+	 veor	q6, q6, q15
+	 veor	q2, q2, q14
+	 veor	q7, q7, q11
+	 veor	q4, q4, q14
+	 veor	q3, q3, q12
+	 veor	q2, q2, q15
+	 veor	q7, q7, q15
+	 veor	q5, q5, q13
+	vext.8	q8, q0, q0, #12	@ x0 <<< 32
+	vext.8	q9, q1, q1, #12
+	 veor	q0, q0, q8		@ x0 ^ (x0 <<< 32)
+	vext.8	q10, q6, q6, #12
+	 veor	q1, q1, q9
+	vext.8	q11, q4, q4, #12
+	 veor	q6, q6, q10
+	vext.8	q12, q2, q2, #12
+	 veor	q4, q4, q11
+	vext.8	q13, q7, q7, #12
+	 veor	q2, q2, q12
+	vext.8	q14, q3, q3, #12
+	 veor	q7, q7, q13
+	vext.8	q15, q5, q5, #12
+	 veor	q3, q3, q14
+
+	veor	q9, q9, q0
+	 veor	q5, q5, q15
+	 vext.8	q0, q0, q0, #8		@ (x0 ^ (x0 <<< 32)) <<< 64)
+	veor	q10, q10, q1
+	veor	q8, q8, q5
+	veor	q9, q9, q5
+	 vext.8	q1, q1, q1, #8
+	veor	q13, q13, q2
+	 veor	q0, q0, q8
+	veor	q14, q14, q7
+	 veor	q1, q1, q9
+	 vext.8	q8, q2, q2, #8
+	veor	q12, q12, q4
+	 vext.8	q9, q7, q7, #8
+	veor	q15, q15, q3
+	 vext.8	q2, q4, q4, #8
+	veor	q11, q11, q6
+	 vext.8	q7, q5, q5, #8
+	veor	q12, q12, q5
+	 vext.8	q4, q3, q3, #8
+	veor	q11, q11, q5
+	 vext.8	q3, q6, q6, #8
+	veor	q5, q9, q13
+	veor	q11, q11, q2
+	veor	q7, q7, q15
+	veor	q6, q4, q14
+	veor	q4, q8, q12
+	veor	q2, q3, q10
+	vmov	q3, q11
+	 @ vmov	q5, q9
+	vldmia	r6, {q12}		@ .LISR
+	ite	eq				@ Thumb2 thing, sanity check in ARM
+	addeq	r6,r6,#0x10
+	bne	.Ldec_loop
+	vldmia	r6, {q12}		@ .LISRM0
+	b	.Ldec_loop
+.align	4
+.Ldec_done:
+	vmov.i8	q8,#0x55			@ compose .LBS0
+	vmov.i8	q9,#0x33			@ compose .LBS1
+	vshr.u64	q10, q3, #1
+	 vshr.u64	q11, q2, #1
+	veor		q10, q10, q5
+	 veor		q11, q11, q7
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #1
+	 veor		q7, q7, q11
+	 vshl.u64	q11, q11, #1
+	veor		q3, q3, q10
+	 veor		q2, q2, q11
+	vshr.u64	q10, q6, #1
+	 vshr.u64	q11, q0, #1
+	veor		q10, q10, q4
+	 veor		q11, q11, q1
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q4, q4, q10
+	vshl.u64	q10, q10, #1
+	 veor		q1, q1, q11
+	 vshl.u64	q11, q11, #1
+	veor		q6, q6, q10
+	 veor		q0, q0, q11
+	vmov.i8	q8,#0x0f			@ compose .LBS2
+	vshr.u64	q10, q7, #2
+	 vshr.u64	q11, q2, #2
+	veor		q10, q10, q5
+	 veor		q11, q11, q3
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #2
+	 veor		q3, q3, q11
+	 vshl.u64	q11, q11, #2
+	veor		q7, q7, q10
+	 veor		q2, q2, q11
+	vshr.u64	q10, q1, #2
+	 vshr.u64	q11, q0, #2
+	veor		q10, q10, q4
+	 veor		q11, q11, q6
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q4, q4, q10
+	vshl.u64	q10, q10, #2
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #2
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vshr.u64	q10, q4, #4
+	 vshr.u64	q11, q6, #4
+	veor		q10, q10, q5
+	 veor		q11, q11, q3
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #4
+	 veor		q3, q3, q11
+	 vshl.u64	q11, q11, #4
+	veor		q4, q4, q10
+	 veor		q6, q6, q11
+	vshr.u64	q10, q1, #4
+	 vshr.u64	q11, q0, #4
+	veor		q10, q10, q7
+	 veor		q11, q11, q2
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #4
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #4
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vldmia	r4, {q8}			@ last round key
+	veor	q6, q6, q8
+	veor	q4, q4, q8
+	veor	q2, q2, q8
+	veor	q7, q7, q8
+	veor	q3, q3, q8
+	veor	q5, q5, q8
+	veor	q0, q0, q8
+	veor	q1, q1, q8
+	bx	lr
+.size	_bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type	_bsaes_const,%object
+.align	6
+_bsaes_const:
+.LM0ISR:	@ InvShiftRows constants
+	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:		@ ShiftRows constants
+	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+	.quad	0x090d01050c000408, 0x03070b0f060a0e02
+.asciz	"Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align	6
+.size	_bsaes_const,.-_bsaes_const
+
+.type	_bsaes_encrypt8,%function
+.align	4
+_bsaes_encrypt8:
+	adr	r6,_bsaes_encrypt8
+	vldmia	r4!, {q9}		@ round 0 key
+	sub	r6,r6,#_bsaes_encrypt8-.LM0SR
+
+	vldmia	r6!, {q8}		@ .LM0SR
+_bsaes_encrypt8_alt:
+	veor	q10, q0, q9	@ xor with round0 key
+	veor	q11, q1, q9
+	 vtbl.8	d0, {q10}, d16
+	 vtbl.8	d1, {q10}, d17
+	veor	q12, q2, q9
+	 vtbl.8	d2, {q11}, d16
+	 vtbl.8	d3, {q11}, d17
+	veor	q13, q3, q9
+	 vtbl.8	d4, {q12}, d16
+	 vtbl.8	d5, {q12}, d17
+	veor	q14, q4, q9
+	 vtbl.8	d6, {q13}, d16
+	 vtbl.8	d7, {q13}, d17
+	veor	q15, q5, q9
+	 vtbl.8	d8, {q14}, d16
+	 vtbl.8	d9, {q14}, d17
+	veor	q10, q6, q9
+	 vtbl.8	d10, {q15}, d16
+	 vtbl.8	d11, {q15}, d17
+	veor	q11, q7, q9
+	 vtbl.8	d12, {q10}, d16
+	 vtbl.8	d13, {q10}, d17
+	 vtbl.8	d14, {q11}, d16
+	 vtbl.8	d15, {q11}, d17
+_bsaes_encrypt8_bitslice:
+	vmov.i8	q8,#0x55			@ compose .LBS0
+	vmov.i8	q9,#0x33			@ compose .LBS1
+	vshr.u64	q10, q6, #1
+	 vshr.u64	q11, q4, #1
+	veor		q10, q10, q7
+	 veor		q11, q11, q5
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #1
+	 veor		q5, q5, q11
+	 vshl.u64	q11, q11, #1
+	veor		q6, q6, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q2, #1
+	 vshr.u64	q11, q0, #1
+	veor		q10, q10, q3
+	 veor		q11, q11, q1
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q3, q3, q10
+	vshl.u64	q10, q10, #1
+	 veor		q1, q1, q11
+	 vshl.u64	q11, q11, #1
+	veor		q2, q2, q10
+	 veor		q0, q0, q11
+	vmov.i8	q8,#0x0f			@ compose .LBS2
+	vshr.u64	q10, q5, #2
+	 vshr.u64	q11, q4, #2
+	veor		q10, q10, q7
+	 veor		q11, q11, q6
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #2
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #2
+	veor		q5, q5, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q1, #2
+	 vshr.u64	q11, q0, #2
+	veor		q10, q10, q3
+	 veor		q11, q11, q2
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q3, q3, q10
+	vshl.u64	q10, q10, #2
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #2
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vshr.u64	q10, q3, #4
+	 vshr.u64	q11, q2, #4
+	veor		q10, q10, q7
+	 veor		q11, q11, q6
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #4
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #4
+	veor		q3, q3, q10
+	 veor		q2, q2, q11
+	vshr.u64	q10, q1, #4
+	 vshr.u64	q11, q0, #4
+	veor		q10, q10, q5
+	 veor		q11, q11, q4
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #4
+	 veor		q4, q4, q11
+	 vshl.u64	q11, q11, #4
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	sub	r5,r5,#1
+	b	.Lenc_sbox
+.align	4
+.Lenc_loop:
+	vldmia	r4!, {q8-q11}
+	veor	q8, q8, q0
+	veor	q9, q9, q1
+	vtbl.8	d0, {q8}, d24
+	vtbl.8	d1, {q8}, d25
+	vldmia	r4!, {q8}
+	veor	q10, q10, q2
+	vtbl.8	d2, {q9}, d24
+	vtbl.8	d3, {q9}, d25
+	vldmia	r4!, {q9}
+	veor	q11, q11, q3
+	vtbl.8	d4, {q10}, d24
+	vtbl.8	d5, {q10}, d25
+	vldmia	r4!, {q10}
+	vtbl.8	d6, {q11}, d24
+	vtbl.8	d7, {q11}, d25
+	vldmia	r4!, {q11}
+	veor	q8, q8, q4
+	veor	q9, q9, q5
+	vtbl.8	d8, {q8}, d24
+	vtbl.8	d9, {q8}, d25
+	veor	q10, q10, q6
+	vtbl.8	d10, {q9}, d24
+	vtbl.8	d11, {q9}, d25
+	veor	q11, q11, q7
+	vtbl.8	d12, {q10}, d24
+	vtbl.8	d13, {q10}, d25
+	vtbl.8	d14, {q11}, d24
+	vtbl.8	d15, {q11}, d25
+.Lenc_sbox:
+	veor	q2, q2, q1
+	veor	q5, q5, q6
+	veor	q3, q3, q0
+	veor	q6, q6, q2
+	veor	q5, q5, q0
+
+	veor	q6, q6, q3
+	veor	q3, q3, q7
+	veor	q7, q7, q5
+	veor	q3, q3, q4
+	veor	q4, q4, q5
+
+	veor	q2, q2, q7
+	veor	q3, q3, q1
+	veor	q1, q1, q5
+	veor	q11, q7, q4
+	veor	q10, q1, q2
+	veor	q9, q5, q3
+	veor	q13, q2, q4
+	 vmov	q8, q10
+	veor	q12, q6, q0
+
+	vorr	q10, q10, q9
+	veor	q15, q11, q8
+	vand	q14, q11, q12
+	vorr	q11, q11, q12
+	veor	q12, q12, q9
+	vand	q8, q8, q9
+	veor	q9, q3, q0
+	vand	q15, q15, q12
+	vand	q13, q13, q9
+	veor	q9, q7, q1
+	veor	q12, q5, q6
+	veor	q11, q11, q13
+	veor	q10, q10, q13
+	vand	q13, q9, q12
+	vorr	q9, q9, q12
+	veor	q11, q11, q15
+	veor	q8, q8, q13
+	veor	q10, q10, q14
+	veor	q9, q9, q15
+	veor	q8, q8, q14
+	vand	q12, q2, q3
+	veor	q9, q9, q14
+	vand	q13, q4, q0
+	vand	q14, q1, q5
+	vorr	q15, q7, q6
+	veor	q11, q11, q12
+	veor	q9, q9, q14
+	veor	q8, q8, q15
+	veor	q10, q10, q13
+
+	@ Inv_GF16 	0, 	1, 	2, 	3, s0, s1, s2, s3
+
+	@ new smaller inversion
+
+	vand	q14, q11, q9
+	vmov	q12, q8
+
+	veor	q13, q10, q14
+	veor	q15, q8, q14
+	veor	q14, q8, q14	@ q14=q15
+
+	vbsl	q13, q9, q8
+	vbsl	q15, q11, q10
+	veor	q11, q11, q10
+
+	vbsl	q12, q13, q14
+	vbsl	q8, q14, q13
+
+	vand	q14, q12, q15
+	veor	q9, q9, q8
+
+	veor	q14, q14, q11
+	veor	q12, q6, q0
+	veor	q8, q5, q3
+	veor 	q10, q15, q14
+	vand	q10, q10, q6
+	veor	q6, q6, q5
+	vand	q11, q5, q15
+	vand	q6, q6, q14
+	veor	q5, q11, q10
+	veor	q6, q6, q11
+	veor	q15, q15, q13
+	veor	q14, q14, q9
+	veor	q11, q15, q14
+	 veor 	q10, q13, q9
+	vand	q11, q11, q12
+	 vand	q10, q10, q0
+	veor	q12, q12, q8
+	 veor	q0, q0, q3
+	vand	q8, q8, q15
+	 vand	q3, q3, q13
+	vand	q12, q12, q14
+	 vand	q0, q0, q9
+	veor	q8, q8, q12
+	 veor	q0, q0, q3
+	veor	q12, q12, q11
+	 veor	q3, q3, q10
+	veor	q6, q6, q12
+	veor	q0, q0, q12
+	veor	q5, q5, q8
+	veor	q3, q3, q8
+
+	veor	q12, q7, q4
+	veor	q8, q1, q2
+	veor	q11, q15, q14
+	 veor 	q10, q13, q9
+	vand	q11, q11, q12
+	 vand	q10, q10, q4
+	veor	q12, q12, q8
+	 veor	q4, q4, q2
+	vand	q8, q8, q15
+	 vand	q2, q2, q13
+	vand	q12, q12, q14
+	 vand	q4, q4, q9
+	veor	q8, q8, q12
+	 veor	q4, q4, q2
+	veor	q12, q12, q11
+	 veor	q2, q2, q10
+	veor	q15, q15, q13
+	veor	q14, q14, q9
+	veor 	q10, q15, q14
+	vand	q10, q10, q7
+	veor	q7, q7, q1
+	vand	q11, q1, q15
+	vand	q7, q7, q14
+	veor	q1, q11, q10
+	veor	q7, q7, q11
+	veor	q7, q7, q12
+	veor	q4, q4, q12
+	veor	q1, q1, q8
+	veor	q2, q2, q8
+	veor	q7, q7, q0
+	veor	q1, q1, q6
+	veor	q6, q6, q0
+	veor	q4, q4, q7
+	veor	q0, q0, q1
+
+	veor	q1, q1, q5
+	veor	q5, q5, q2
+	veor	q2, q2, q3
+	veor	q3, q3, q5
+	veor	q4, q4, q5
+
+	veor	q6, q6, q3
+	subs	r5,r5,#1
+	bcc	.Lenc_done
+	vext.8	q8, q0, q0, #12	@ x0 <<< 32
+	vext.8	q9, q1, q1, #12
+	 veor	q0, q0, q8		@ x0 ^ (x0 <<< 32)
+	vext.8	q10, q4, q4, #12
+	 veor	q1, q1, q9
+	vext.8	q11, q6, q6, #12
+	 veor	q4, q4, q10
+	vext.8	q12, q3, q3, #12
+	 veor	q6, q6, q11
+	vext.8	q13, q7, q7, #12
+	 veor	q3, q3, q12
+	vext.8	q14, q2, q2, #12
+	 veor	q7, q7, q13
+	vext.8	q15, q5, q5, #12
+	 veor	q2, q2, q14
+
+	veor	q9, q9, q0
+	 veor	q5, q5, q15
+	 vext.8	q0, q0, q0, #8		@ (x0 ^ (x0 <<< 32)) <<< 64)
+	veor	q10, q10, q1
+	veor	q8, q8, q5
+	veor	q9, q9, q5
+	 vext.8	q1, q1, q1, #8
+	veor	q13, q13, q3
+	 veor	q0, q0, q8
+	veor	q14, q14, q7
+	 veor	q1, q1, q9
+	 vext.8	q8, q3, q3, #8
+	veor	q12, q12, q6
+	 vext.8	q9, q7, q7, #8
+	veor	q15, q15, q2
+	 vext.8	q3, q6, q6, #8
+	veor	q11, q11, q4
+	 vext.8	q7, q5, q5, #8
+	veor	q12, q12, q5
+	 vext.8	q6, q2, q2, #8
+	veor	q11, q11, q5
+	 vext.8	q2, q4, q4, #8
+	veor	q5, q9, q13
+	veor	q4, q8, q12
+	veor	q3, q3, q11
+	veor	q7, q7, q15
+	veor	q6, q6, q14
+	 @ vmov	q4, q8
+	veor	q2, q2, q10
+	 @ vmov	q5, q9
+	vldmia	r6, {q12}		@ .LSR
+	ite	eq				@ Thumb2 thing, samity check in ARM
+	addeq	r6,r6,#0x10
+	bne	.Lenc_loop
+	vldmia	r6, {q12}		@ .LSRM0
+	b	.Lenc_loop
+.align	4
+.Lenc_done:
+	vmov.i8	q8,#0x55			@ compose .LBS0
+	vmov.i8	q9,#0x33			@ compose .LBS1
+	vshr.u64	q10, q2, #1
+	 vshr.u64	q11, q3, #1
+	veor		q10, q10, q5
+	 veor		q11, q11, q7
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #1
+	 veor		q7, q7, q11
+	 vshl.u64	q11, q11, #1
+	veor		q2, q2, q10
+	 veor		q3, q3, q11
+	vshr.u64	q10, q4, #1
+	 vshr.u64	q11, q0, #1
+	veor		q10, q10, q6
+	 veor		q11, q11, q1
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q6, q6, q10
+	vshl.u64	q10, q10, #1
+	 veor		q1, q1, q11
+	 vshl.u64	q11, q11, #1
+	veor		q4, q4, q10
+	 veor		q0, q0, q11
+	vmov.i8	q8,#0x0f			@ compose .LBS2
+	vshr.u64	q10, q7, #2
+	 vshr.u64	q11, q3, #2
+	veor		q10, q10, q5
+	 veor		q11, q11, q2
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #2
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #2
+	veor		q7, q7, q10
+	 veor		q3, q3, q11
+	vshr.u64	q10, q1, #2
+	 vshr.u64	q11, q0, #2
+	veor		q10, q10, q6
+	 veor		q11, q11, q4
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q6, q6, q10
+	vshl.u64	q10, q10, #2
+	 veor		q4, q4, q11
+	 vshl.u64	q11, q11, #2
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vshr.u64	q10, q6, #4
+	 vshr.u64	q11, q4, #4
+	veor		q10, q10, q5
+	 veor		q11, q11, q2
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #4
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #4
+	veor		q6, q6, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q1, #4
+	 vshr.u64	q11, q0, #4
+	veor		q10, q10, q7
+	 veor		q11, q11, q3
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #4
+	 veor		q3, q3, q11
+	 vshl.u64	q11, q11, #4
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vldmia	r4, {q8}			@ last round key
+	veor	q4, q4, q8
+	veor	q6, q6, q8
+	veor	q3, q3, q8
+	veor	q7, q7, q8
+	veor	q2, q2, q8
+	veor	q5, q5, q8
+	veor	q0, q0, q8
+	veor	q1, q1, q8
+	bx	lr
+.size	_bsaes_encrypt8,.-_bsaes_encrypt8
+.type	_bsaes_key_convert,%function
+.align	4
+_bsaes_key_convert:
+	adr	r6,_bsaes_key_convert
+	vld1.8	{q7},  [r4]!		@ load round 0 key
+	sub	r6,r6,#_bsaes_key_convert-.LM0
+	vld1.8	{q15}, [r4]!		@ load round 1 key
+
+	vmov.i8	q8,  #0x01			@ bit masks
+	vmov.i8	q9,  #0x02
+	vmov.i8	q10, #0x04
+	vmov.i8	q11, #0x08
+	vmov.i8	q12, #0x10
+	vmov.i8	q13, #0x20
+	vldmia	r6, {q14}		@ .LM0
+
+#ifdef __ARMEL__
+	vrev32.8	q7,  q7
+	vrev32.8	q15, q15
+#endif
+	sub	r5,r5,#1
+	vstmia	r12!, {q7}		@ save round 0 key
+	b	.Lkey_loop
+
+.align	4
+.Lkey_loop:
+	vtbl.8	d14,{q15},d28
+	vtbl.8	d15,{q15},d29
+	vmov.i8	q6,  #0x40
+	vmov.i8	q15, #0x80
+
+	vtst.8	q0, q7, q8
+	vtst.8	q1, q7, q9
+	vtst.8	q2, q7, q10
+	vtst.8	q3, q7, q11
+	vtst.8	q4, q7, q12
+	vtst.8	q5, q7, q13
+	vtst.8	q6, q7, q6
+	vtst.8	q7, q7, q15
+	vld1.8	{q15}, [r4]!		@ load next round key
+	vmvn	q0, q0		@ "pnot"
+	vmvn	q1, q1
+	vmvn	q5, q5
+	vmvn	q6, q6
+#ifdef __ARMEL__
+	vrev32.8	q15, q15
+#endif
+	subs	r5,r5,#1
+	vstmia	r12!,{q0-q7}		@ write bit-sliced round key
+	bne	.Lkey_loop
+
+	vmov.i8	q7,#0x63			@ compose .L63
+	@ don't save last round key
+	bx	lr
+.size	_bsaes_key_convert,.-_bsaes_key_convert
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global	bsaes_cbc_encrypt
+.type	bsaes_cbc_encrypt,%function
+.align	5
+bsaes_cbc_encrypt:
+#ifndef	__KERNEL__
+	cmp	r2, #128
+#ifndef	__thumb__
+	blo	AES_cbc_encrypt
+#else
+	bhs	1f
+	b	AES_cbc_encrypt
+1:
+#endif
+#endif
+
+	@ it is up to the caller to make sure we are called with enc == 0
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	r8, [ip]			@ IV is 1st arg on the stack
+	mov	r2, r2, lsr#4		@ len in 16 byte blocks
+	sub	sp, #0x10			@ scratch space to carry over the IV
+	mov	r9, sp				@ save sp
+
+	ldr	r10, [r3, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, r10, lsl#7		@ 128 bytes per inner round key
+	add	r12, #96			@ sifze of bit-slices key schedule
+
+	@ populate the key schedule
+	mov	r4, r3			@ pass key
+	mov	r5, r10			@ pass # of rounds
+	mov	sp, r12				@ sp is sp
+	bl	_bsaes_key_convert
+	vldmia	sp, {q6}
+	vstmia	r12,  {q15}		@ save last round key
+	veor	q7, q7, q6	@ fix up round 0 key
+	vstmia	sp, {q7}
+#else
+	ldr	r12, [r3, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [r3, #244]
+	mov	r4, r3			@ pass key
+	mov	r5, r10			@ pass # of rounds
+	add	r12, r3, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, r3, #248
+	vldmia	r4, {q6}
+	vstmia	r12, {q15}			@ save last round key
+	veor	q7, q7, q6	@ fix up round 0 key
+	vstmia	r4, {q7}
+
+.align	2
+0:
+#endif
+
+	vld1.8	{q15}, [r8]		@ load IV
+	b	.Lcbc_dec_loop
+
+.align	4
+.Lcbc_dec_loop:
+	subs	r2, r2, #0x8
+	bmi	.Lcbc_dec_loop_finish
+
+	vld1.8	{q0-q1}, [r0]!	@ load input
+	vld1.8	{q2-q3}, [r0]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, sp			@ pass the key
+#else
+	add	r4, r3, #248
+#endif
+	vld1.8	{q4-q5}, [r0]!
+	mov	r5, r10
+	vld1.8	{q6-q7}, [r0]
+	sub	r0, r0, #0x60
+	vstmia	r9, {q15}			@ put aside IV
+
+	bl	_bsaes_decrypt8
+
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10-q11}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q12-q13}, [r0]!
+	veor	q4, q4, q10
+	veor	q2, q2, q11
+	vld1.8	{q14-q15}, [r0]!
+	veor	q7, q7, q12
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	veor	q3, q3, q13
+	vst1.8	{q6}, [r1]!
+	veor	q5, q5, q14
+	vst1.8	{q4}, [r1]!
+	vst1.8	{q2}, [r1]!
+	vst1.8	{q7}, [r1]!
+	vst1.8	{q3}, [r1]!
+	vst1.8	{q5}, [r1]!
+
+	b	.Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+	adds	r2, r2, #8
+	beq	.Lcbc_dec_done
+
+	vld1.8	{q0}, [r0]!		@ load input
+	cmp	r2, #2
+	blo	.Lcbc_dec_one
+	vld1.8	{q1}, [r0]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, sp			@ pass the key
+#else
+	add	r4, r3, #248
+#endif
+	mov	r5, r10
+	vstmia	r9, {q15}			@ put aside IV
+	beq	.Lcbc_dec_two
+	vld1.8	{q2}, [r0]!
+	cmp	r2, #4
+	blo	.Lcbc_dec_three
+	vld1.8	{q3}, [r0]!
+	beq	.Lcbc_dec_four
+	vld1.8	{q4}, [r0]!
+	cmp	r2, #6
+	blo	.Lcbc_dec_five
+	vld1.8	{q5}, [r0]!
+	beq	.Lcbc_dec_six
+	vld1.8	{q6}, [r0]!
+	sub	r0, r0, #0x70
+
+	bl	_bsaes_decrypt8
+
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10-q11}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q12-q13}, [r0]!
+	veor	q4, q4, q10
+	veor	q2, q2, q11
+	vld1.8	{q15}, [r0]!
+	veor	q7, q7, q12
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	veor	q3, q3, q13
+	vst1.8	{q6}, [r1]!
+	vst1.8	{q4}, [r1]!
+	vst1.8	{q2}, [r1]!
+	vst1.8	{q7}, [r1]!
+	vst1.8	{q3}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_six:
+	sub	r0, r0, #0x60
+	bl	_bsaes_decrypt8
+	vldmia	r9,{q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10-q11}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q12}, [r0]!
+	veor	q4, q4, q10
+	veor	q2, q2, q11
+	vld1.8	{q15}, [r0]!
+	veor	q7, q7, q12
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	vst1.8	{q6}, [r1]!
+	vst1.8	{q4}, [r1]!
+	vst1.8	{q2}, [r1]!
+	vst1.8	{q7}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_five:
+	sub	r0, r0, #0x50
+	bl	_bsaes_decrypt8
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10-q11}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q15}, [r0]!
+	veor	q4, q4, q10
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	veor	q2, q2, q11
+	vst1.8	{q6}, [r1]!
+	vst1.8	{q4}, [r1]!
+	vst1.8	{q2}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_four:
+	sub	r0, r0, #0x40
+	bl	_bsaes_decrypt8
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q15}, [r0]!
+	veor	q4, q4, q10
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	vst1.8	{q6}, [r1]!
+	vst1.8	{q4}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_three:
+	sub	r0, r0, #0x30
+	bl	_bsaes_decrypt8
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q15}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	vst1.8	{q6}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_two:
+	sub	r0, r0, #0x20
+	bl	_bsaes_decrypt8
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8}, [r0]!		@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q15}, [r0]!		@ reload input
+	veor	q1, q1, q8
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_one:
+	sub	r0, r0, #0x10
+	mov	r10, r1			@ save original out pointer
+	mov	r1, r9			@ use the iv scratch space as out buffer
+	mov	r2, r3
+	vmov	q4,q15		@ just in case ensure that IV
+	vmov	q5,q0			@ and input are preserved
+	bl	AES_decrypt
+	vld1.8	{q0}, [r9,:64]		@ load result
+	veor	q0, q0, q4	@ ^= IV
+	vmov	q15, q5		@ q5 holds input
+	vst1.8	{q0}, [r10]		@ write output
+
+.Lcbc_dec_done:
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+.Lcbc_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r9
+	bne		.Lcbc_dec_bzero
+#endif
+
+	mov	sp, r9
+	add	sp, #0x10			@ add sp,r9,#0x10 is no good for thumb
+	vst1.8	{q15}, [r8]		@ return IV
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}
+.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+.extern	AES_encrypt
+.global	bsaes_ctr32_encrypt_blocks
+.type	bsaes_ctr32_encrypt_blocks,%function
+.align	5
+bsaes_ctr32_encrypt_blocks:
+	cmp	r2, #8			@ use plain AES for
+	blo	.Lctr_enc_short			@ small sizes
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	r8, [ip]			@ ctr is 1st arg on the stack
+	sub	sp, sp, #0x10			@ scratch space to carry over the ctr
+	mov	r9, sp				@ save sp
+
+	ldr	r10, [r3, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, r10, lsl#7		@ 128 bytes per inner round key
+	add	r12, #96			@ size of bit-sliced key schedule
+
+	@ populate the key schedule
+	mov	r4, r3			@ pass key
+	mov	r5, r10			@ pass # of rounds
+	mov	sp, r12				@ sp is sp
+	bl	_bsaes_key_convert
+	veor	q7,q7,q15	@ fix up last round key
+	vstmia	r12, {q7}			@ save last round key
+
+	vld1.8	{q0}, [r8]		@ load counter
+	add	r8, r6, #.LREVM0SR-.LM0	@ borrow r8
+	vldmia	sp, {q4}		@ load round0 key
+#else
+	ldr	r12, [r3, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [r3, #244]
+	mov	r4, r3			@ pass key
+	mov	r5, r10			@ pass # of rounds
+	add	r12, r3, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	q7,q7,q15	@ fix up last round key
+	vstmia	r12, {q7}			@ save last round key
+
+.align	2
+0:	add	r12, r3, #248
+	vld1.8	{q0}, [r8]		@ load counter
+	adrl	r8, .LREVM0SR			@ borrow r8
+	vldmia	r12, {q4}			@ load round0 key
+	sub	sp, #0x10			@ place for adjusted round0 key
+#endif
+
+	vmov.i32	q8,#1		@ compose 1<<96
+	veor		q9,q9,q9
+	vrev32.8	q0,q0
+	vext.8		q8,q9,q8,#4
+	vrev32.8	q4,q4
+	vadd.u32	q9,q8,q8	@ compose 2<<96
+	vstmia	sp, {q4}		@ save adjusted round0 key
+	b	.Lctr_enc_loop
+
+.align	4
+.Lctr_enc_loop:
+	vadd.u32	q10, q8, q9	@ compose 3<<96
+	vadd.u32	q1, q0, q8	@ +1
+	vadd.u32	q2, q0, q9	@ +2
+	vadd.u32	q3, q0, q10	@ +3
+	vadd.u32	q4, q1, q10
+	vadd.u32	q5, q2, q10
+	vadd.u32	q6, q3, q10
+	vadd.u32	q7, q4, q10
+	vadd.u32	q10, q5, q10	@ next counter
+
+	@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+	@ to flip byte order in 32-bit counter
+
+	vldmia		sp, {q9}		@ load round0 key
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x10		@ pass next round key
+#else
+	add		r4, r3, #264
+#endif
+	vldmia		r8, {q8}			@ .LREVM0SR
+	mov		r5, r10			@ pass rounds
+	vstmia		r9, {q10}			@ save next counter
+	sub		r6, r8, #.LREVM0SR-.LSR	@ pass constants
+
+	bl		_bsaes_encrypt8_alt
+
+	subs		r2, r2, #8
+	blo		.Lctr_enc_loop_done
+
+	vld1.8		{q8-q9}, [r0]!	@ load input
+	vld1.8		{q10-q11}, [r0]!
+	veor		q0, q8
+	veor		q1, q9
+	vld1.8		{q12-q13}, [r0]!
+	veor		q4, q10
+	veor		q6, q11
+	vld1.8		{q14-q15}, [r0]!
+	veor		q3, q12
+	vst1.8		{q0-q1}, [r1]!	@ write output
+	veor		q7, q13
+	veor		q2, q14
+	vst1.8		{q4}, [r1]!
+	veor		q5, q15
+	vst1.8		{q6}, [r1]!
+	vmov.i32	q8, #1			@ compose 1<<96
+	vst1.8		{q3}, [r1]!
+	veor		q9, q9, q9
+	vst1.8		{q7}, [r1]!
+	vext.8		q8, q9, q8, #4
+	vst1.8		{q2}, [r1]!
+	vadd.u32	q9,q8,q8		@ compose 2<<96
+	vst1.8		{q5}, [r1]!
+	vldmia		r9, {q0}			@ load counter
+
+	bne		.Lctr_enc_loop
+	b		.Lctr_enc_done
+
+.align	4
+.Lctr_enc_loop_done:
+	add		r2, r2, #8
+	vld1.8		{q8}, [r0]!	@ load input
+	veor		q0, q8
+	vst1.8		{q0}, [r1]!	@ write output
+	cmp		r2, #2
+	blo		.Lctr_enc_done
+	vld1.8		{q9}, [r0]!
+	veor		q1, q9
+	vst1.8		{q1}, [r1]!
+	beq		.Lctr_enc_done
+	vld1.8		{q10}, [r0]!
+	veor		q4, q10
+	vst1.8		{q4}, [r1]!
+	cmp		r2, #4
+	blo		.Lctr_enc_done
+	vld1.8		{q11}, [r0]!
+	veor		q6, q11
+	vst1.8		{q6}, [r1]!
+	beq		.Lctr_enc_done
+	vld1.8		{q12}, [r0]!
+	veor		q3, q12
+	vst1.8		{q3}, [r1]!
+	cmp		r2, #6
+	blo		.Lctr_enc_done
+	vld1.8		{q13}, [r0]!
+	veor		q7, q13
+	vst1.8		{q7}, [r1]!
+	beq		.Lctr_enc_done
+	vld1.8		{q14}, [r0]
+	veor		q2, q14
+	vst1.8		{q2}, [r1]!
+
+.Lctr_enc_done:
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifndef	BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:			@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r9
+	bne		.Lctr_enc_bzero
+#else
+	vstmia		sp, {q0-q1}
+#endif
+
+	mov	sp, r9
+	add	sp, #0x10		@ add sp,r9,#0x10 is no good for thumb
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}	@ return
+
+.align	4
+.Lctr_enc_short:
+	ldr	ip, [sp]		@ ctr pointer is passed on stack
+	stmdb	sp!, {r4-r8, lr}
+
+	mov	r4, r0		@ copy arguments
+	mov	r5, r1
+	mov	r6, r2
+	mov	r7, r3
+	ldr	r8, [ip, #12]		@ load counter LSW
+	vld1.8	{q1}, [ip]		@ load whole counter value
+#ifdef __ARMEL__
+	rev	r8, r8
+#endif
+	sub	sp, sp, #0x10
+	vst1.8	{q1}, [sp,:64]	@ copy counter value
+	sub	sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+	add	r0, sp, #0x10		@ input counter value
+	mov	r1, sp			@ output on the stack
+	mov	r2, r7			@ key
+
+	bl	AES_encrypt
+
+	vld1.8	{q0}, [r4]!	@ load input
+	vld1.8	{q1}, [sp,:64]	@ load encrypted counter
+	add	r8, r8, #1
+#ifdef __ARMEL__
+	rev	r0, r8
+	str	r0, [sp, #0x1c]		@ next counter value
+#else
+	str	r8, [sp, #0x1c]		@ next counter value
+#endif
+	veor	q0,q0,q1
+	vst1.8	{q0}, [r5]!	@ store output
+	subs	r6, r6, #1
+	bne	.Lctr_enc_short_loop
+
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+	vstmia		sp!, {q0-q1}
+
+	ldmia	sp!, {r4-r8, pc}
+.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+.globl	bsaes_xts_encrypt
+.type	bsaes_xts_encrypt,%function
+.align	4
+bsaes_xts_encrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future r3
+
+	mov	r7, r0
+	mov	r8, r1
+	mov	r9, r2
+	mov	r10, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0,sp				@ pointer to initial tweak
+#endif
+
+	ldr	r1, [r10, #240]		@ get # of rounds
+	mov	r3, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, r1, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #96			@ size of bit-sliced key schedule
+	sub	r12, #48			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, r10			@ pass key
+	mov	r5, r1			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	q7, q7, q15	@ fix up last round key
+	vstmia	r12, {q7}			@ save last round key
+#else
+	ldr	r12, [r10, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [r10, #244]
+	mov	r4, r10			@ pass key
+	mov	r5, r1			@ pass # of rounds
+	add	r12, r10, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	q7, q7, q15	@ fix up last round key
+	vstmia	r12, {q7}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+
+	vld1.8	{q8}, [r0]			@ initial tweak
+	adr	r2, .Lxts_magic
+
+	subs	r9, #0x80
+	blo	.Lxts_enc_short
+	b	.Lxts_enc_loop
+
+.align	4
+.Lxts_enc_loop:
+	vldmia		r2, {q5}	@ load XTS magic
+	vshr.s64	q6, q8, #63
+	mov		r0, sp
+	vand		q6, q6, q5
+	vadd.u64	q9, q8, q8
+	vst1.64		{q8}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q9, #63
+	veor		q9, q9, q6
+	vand		q7, q7, q5
+	vadd.u64	q10, q9, q9
+	vst1.64		{q9}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q10, #63
+	veor		q10, q10, q7
+	vand		q6, q6, q5
+	vld1.8		{q0}, [r7]!
+	vadd.u64	q11, q10, q10
+	vst1.64		{q10}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q11, #63
+	veor		q11, q11, q6
+	vand		q7, q7, q5
+	vld1.8		{q1}, [r7]!
+	veor		q0, q0, q8
+	vadd.u64	q12, q11, q11
+	vst1.64		{q11}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q12, #63
+	veor		q12, q12, q7
+	vand		q6, q6, q5
+	vld1.8		{q2}, [r7]!
+	veor		q1, q1, q9
+	vadd.u64	q13, q12, q12
+	vst1.64		{q12}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q13, #63
+	veor		q13, q13, q6
+	vand		q7, q7, q5
+	vld1.8		{q3}, [r7]!
+	veor		q2, q2, q10
+	vadd.u64	q14, q13, q13
+	vst1.64		{q13}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q14, #63
+	veor		q14, q14, q7
+	vand		q6, q6, q5
+	vld1.8		{q4}, [r7]!
+	veor		q3, q3, q11
+	vadd.u64	q15, q14, q14
+	vst1.64		{q14}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q15, #63
+	veor		q15, q15, q6
+	vand		q7, q7, q5
+	vld1.8		{q5}, [r7]!
+	veor		q4, q4, q12
+	vadd.u64	q8, q15, q15
+	vst1.64		{q15}, [r0,:128]!
+	vswp		d15,d14
+	veor		q8, q8, q7
+	vst1.64		{q8}, [r0,:128]		@ next round tweak
+
+	vld1.8		{q6-q7}, [r7]!
+	veor		q5, q5, q13
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q6, q6, q14
+	mov		r5, r1			@ pass rounds
+	veor		q7, q7, q15
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	vld1.64		{q14-q15}, [r0,:128]!
+	veor		q10, q3, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	veor		q12, q2, q14
+	vst1.8		{q10-q11}, [r8]!
+	veor		q13, q5, q15
+	vst1.8		{q12-q13}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+
+	subs		r9, #0x80
+	bpl		.Lxts_enc_loop
+
+.Lxts_enc_short:
+	adds		r9, #0x70
+	bmi		.Lxts_enc_done
+
+	vldmia		r2, {q5}	@ load XTS magic
+	vshr.s64	q7, q8, #63
+	mov		r0, sp
+	vand		q7, q7, q5
+	vadd.u64	q9, q8, q8
+	vst1.64		{q8}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q9, #63
+	veor		q9, q9, q7
+	vand		q6, q6, q5
+	vadd.u64	q10, q9, q9
+	vst1.64		{q9}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q10, #63
+	veor		q10, q10, q6
+	vand		q7, q7, q5
+	vld1.8		{q0}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_1
+	vadd.u64	q11, q10, q10
+	vst1.64		{q10}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q11, #63
+	veor		q11, q11, q7
+	vand		q6, q6, q5
+	vld1.8		{q1}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_2
+	veor		q0, q0, q8
+	vadd.u64	q12, q11, q11
+	vst1.64		{q11}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q12, #63
+	veor		q12, q12, q6
+	vand		q7, q7, q5
+	vld1.8		{q2}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_3
+	veor		q1, q1, q9
+	vadd.u64	q13, q12, q12
+	vst1.64		{q12}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q13, #63
+	veor		q13, q13, q7
+	vand		q6, q6, q5
+	vld1.8		{q3}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_4
+	veor		q2, q2, q10
+	vadd.u64	q14, q13, q13
+	vst1.64		{q13}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q14, #63
+	veor		q14, q14, q6
+	vand		q7, q7, q5
+	vld1.8		{q4}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_5
+	veor		q3, q3, q11
+	vadd.u64	q15, q14, q14
+	vst1.64		{q14}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q15, #63
+	veor		q15, q15, q7
+	vand		q6, q6, q5
+	vld1.8		{q5}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_6
+	veor		q4, q4, q12
+	sub		r9, #0x10
+	vst1.64		{q15}, [r0,:128]		@ next round tweak
+
+	vld1.8		{q6}, [r7]!
+	veor		q5, q5, q13
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q6, q6, q14
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	vld1.64		{q14}, [r0,:128]!
+	veor		q10, q3, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	veor		q12, q2, q14
+	vst1.8		{q10-q11}, [r8]!
+	vst1.8		{q12}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_6:
+	vst1.64		{q14}, [r0,:128]		@ next round tweak
+
+	veor		q4, q4, q12
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q5, q5, q13
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	veor		q10, q3, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	vst1.8		{q10-q11}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align	5
+.Lxts_magic:
+	.quad	1, 0x87
+
+.align	5
+.Lxts_enc_5:
+	vst1.64		{q13}, [r0,:128]		@ next round tweak
+
+	veor		q3, q3, q11
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q4, q4, q12
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	veor		q10, q3, q12
+	vst1.8		{q8-q9}, [r8]!
+	vst1.8		{q10}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_4:
+	vst1.64		{q12}, [r0,:128]		@ next round tweak
+
+	veor		q2, q2, q10
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q3, q3, q11
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	vst1.8		{q8-q9}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_3:
+	vst1.64		{q11}, [r0,:128]		@ next round tweak
+
+	veor		q1, q1, q9
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q2, q2, q10
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	vst1.8		{q8}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_2:
+	vst1.64		{q10}, [r0,:128]		@ next round tweak
+
+	veor		q0, q0, q8
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q1, q1, q9
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	vst1.8		{q0-q1}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_1:
+	mov		r0, sp
+	veor		q0, q8
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+	mov		r4, r3				@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q8
+	vst1.8		{q0}, [r8]!
+	mov		r3, r4
+
+	vmov		q8, q9		@ next round tweak
+
+.Lxts_enc_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		r9, #0x10
+	beq		.Lxts_enc_ret
+	sub		r6, r8, #0x10
+
+.Lxts_enc_steal:
+	ldrb		r0, [r7], #1
+	ldrb		r1, [r8, #-0x10]
+	strb		r0, [r8, #-0x10]
+	strb		r1, [r8], #1
+
+	subs		r9, #1
+	bhi		.Lxts_enc_steal
+
+	vld1.8		{q0}, [r6]
+	mov		r0, sp
+	veor		q0, q0, q8
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+	mov		r4, r3			@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q8
+	vst1.8		{q0}, [r6]
+	mov		r3, r4
+#endif
+
+.Lxts_enc_ret:
+	bic		r0, r3, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [r3, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_enc_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_enc_bzero
+
+	mov		sp, r3
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{q8}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl	bsaes_xts_decrypt
+.type	bsaes_xts_decrypt,%function
+.align	4
+bsaes_xts_decrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future r3
+
+	mov	r7, r0
+	mov	r8, r1
+	mov	r9, r2
+	mov	r10, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0, sp				@ pointer to initial tweak
+#endif
+
+	ldr	r1, [r10, #240]		@ get # of rounds
+	mov	r3, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, r1, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #96			@ size of bit-sliced key schedule
+	sub	r12, #48			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, r10			@ pass key
+	mov	r5, r1			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, sp, #0x90
+	vldmia	r4, {q6}
+	vstmia	r12,  {q15}		@ save last round key
+	veor	q7, q7, q6	@ fix up round 0 key
+	vstmia	r4, {q7}
+#else
+	ldr	r12, [r10, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [r10, #244]
+	mov	r4, r10			@ pass key
+	mov	r5, r1			@ pass # of rounds
+	add	r12, r10, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, r10, #248
+	vldmia	r4, {q6}
+	vstmia	r12,  {q15}		@ save last round key
+	veor	q7, q7, q6	@ fix up round 0 key
+	vstmia	r4, {q7}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+	vld1.8	{q8}, [r0]			@ initial tweak
+	adr	r2, .Lxts_magic
+
+	tst	r9, #0xf			@ if not multiple of 16
+	it	ne				@ Thumb2 thing, sanity check in ARM
+	subne	r9, #0x10			@ subtract another 16 bytes
+	subs	r9, #0x80
+
+	blo	.Lxts_dec_short
+	b	.Lxts_dec_loop
+
+.align	4
+.Lxts_dec_loop:
+	vldmia		r2, {q5}	@ load XTS magic
+	vshr.s64	q6, q8, #63
+	mov		r0, sp
+	vand		q6, q6, q5
+	vadd.u64	q9, q8, q8
+	vst1.64		{q8}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q9, #63
+	veor		q9, q9, q6
+	vand		q7, q7, q5
+	vadd.u64	q10, q9, q9
+	vst1.64		{q9}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q10, #63
+	veor		q10, q10, q7
+	vand		q6, q6, q5
+	vld1.8		{q0}, [r7]!
+	vadd.u64	q11, q10, q10
+	vst1.64		{q10}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q11, #63
+	veor		q11, q11, q6
+	vand		q7, q7, q5
+	vld1.8		{q1}, [r7]!
+	veor		q0, q0, q8
+	vadd.u64	q12, q11, q11
+	vst1.64		{q11}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q12, #63
+	veor		q12, q12, q7
+	vand		q6, q6, q5
+	vld1.8		{q2}, [r7]!
+	veor		q1, q1, q9
+	vadd.u64	q13, q12, q12
+	vst1.64		{q12}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q13, #63
+	veor		q13, q13, q6
+	vand		q7, q7, q5
+	vld1.8		{q3}, [r7]!
+	veor		q2, q2, q10
+	vadd.u64	q14, q13, q13
+	vst1.64		{q13}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q14, #63
+	veor		q14, q14, q7
+	vand		q6, q6, q5
+	vld1.8		{q4}, [r7]!
+	veor		q3, q3, q11
+	vadd.u64	q15, q14, q14
+	vst1.64		{q14}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q15, #63
+	veor		q15, q15, q6
+	vand		q7, q7, q5
+	vld1.8		{q5}, [r7]!
+	veor		q4, q4, q12
+	vadd.u64	q8, q15, q15
+	vst1.64		{q15}, [r0,:128]!
+	vswp		d15,d14
+	veor		q8, q8, q7
+	vst1.64		{q8}, [r0,:128]		@ next round tweak
+
+	vld1.8		{q6-q7}, [r7]!
+	veor		q5, q5, q13
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q6, q6, q14
+	mov		r5, r1			@ pass rounds
+	veor		q7, q7, q15
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	vld1.64		{q14-q15}, [r0,:128]!
+	veor		q10, q2, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	veor		q12, q3, q14
+	vst1.8		{q10-q11}, [r8]!
+	veor		q13, q5, q15
+	vst1.8		{q12-q13}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+
+	subs		r9, #0x80
+	bpl		.Lxts_dec_loop
+
+.Lxts_dec_short:
+	adds		r9, #0x70
+	bmi		.Lxts_dec_done
+
+	vldmia		r2, {q5}	@ load XTS magic
+	vshr.s64	q7, q8, #63
+	mov		r0, sp
+	vand		q7, q7, q5
+	vadd.u64	q9, q8, q8
+	vst1.64		{q8}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q9, #63
+	veor		q9, q9, q7
+	vand		q6, q6, q5
+	vadd.u64	q10, q9, q9
+	vst1.64		{q9}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q10, #63
+	veor		q10, q10, q6
+	vand		q7, q7, q5
+	vld1.8		{q0}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_1
+	vadd.u64	q11, q10, q10
+	vst1.64		{q10}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q11, #63
+	veor		q11, q11, q7
+	vand		q6, q6, q5
+	vld1.8		{q1}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_2
+	veor		q0, q0, q8
+	vadd.u64	q12, q11, q11
+	vst1.64		{q11}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q12, #63
+	veor		q12, q12, q6
+	vand		q7, q7, q5
+	vld1.8		{q2}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_3
+	veor		q1, q1, q9
+	vadd.u64	q13, q12, q12
+	vst1.64		{q12}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q13, #63
+	veor		q13, q13, q7
+	vand		q6, q6, q5
+	vld1.8		{q3}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_4
+	veor		q2, q2, q10
+	vadd.u64	q14, q13, q13
+	vst1.64		{q13}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q14, #63
+	veor		q14, q14, q6
+	vand		q7, q7, q5
+	vld1.8		{q4}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_5
+	veor		q3, q3, q11
+	vadd.u64	q15, q14, q14
+	vst1.64		{q14}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q15, #63
+	veor		q15, q15, q7
+	vand		q6, q6, q5
+	vld1.8		{q5}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_6
+	veor		q4, q4, q12
+	sub		r9, #0x10
+	vst1.64		{q15}, [r0,:128]		@ next round tweak
+
+	vld1.8		{q6}, [r7]!
+	veor		q5, q5, q13
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q6, q6, q14
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	vld1.64		{q14}, [r0,:128]!
+	veor		q10, q2, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	veor		q12, q3, q14
+	vst1.8		{q10-q11}, [r8]!
+	vst1.8		{q12}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_6:
+	vst1.64		{q14}, [r0,:128]		@ next round tweak
+
+	veor		q4, q4, q12
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q5, q5, q13
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	veor		q10, q2, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	vst1.8		{q10-q11}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_5:
+	vst1.64		{q13}, [r0,:128]		@ next round tweak
+
+	veor		q3, q3, q11
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q4, q4, q12
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	veor		q10, q2, q12
+	vst1.8		{q8-q9}, [r8]!
+	vst1.8		{q10}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_4:
+	vst1.64		{q12}, [r0,:128]		@ next round tweak
+
+	veor		q2, q2, q10
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q3, q3, q11
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	vst1.8		{q8-q9}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_3:
+	vst1.64		{q11}, [r0,:128]		@ next round tweak
+
+	veor		q1, q1, q9
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q2, q2, q10
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	vst1.8		{q8}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_2:
+	vst1.64		{q10}, [r0,:128]		@ next round tweak
+
+	veor		q0, q0, q8
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q1, q1, q9
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	vst1.8		{q0-q1}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_1:
+	mov		r0, sp
+	veor		q0, q8
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+	mov		r4, r3				@ preserve fp
+	mov		r5, r2			@ preserve magic
+
+	bl		AES_decrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q8
+	vst1.8		{q0}, [r8]!
+	mov		r3, r4
+	mov		r2, r5
+
+	vmov		q8, q9		@ next round tweak
+
+.Lxts_dec_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		r9, #0x10
+	beq		.Lxts_dec_ret
+
+	@ calculate one round of extra tweak for the stolen ciphertext
+	vldmia		r2, {q5}
+	vshr.s64	q6, q8, #63
+	vand		q6, q6, q5
+	vadd.u64	q9, q8, q8
+	vswp		d13,d12
+	veor		q9, q9, q6
+
+	@ perform the final decryption with the last tweak value
+	vld1.8		{q0}, [r7]!
+	mov		r0, sp
+	veor		q0, q0, q9
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+	mov		r4, r3			@ preserve fp
+
+	bl		AES_decrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q9
+	vst1.8		{q0}, [r8]
+
+	mov		r6, r8
+.Lxts_dec_steal:
+	ldrb		r1, [r8]
+	ldrb		r0, [r7], #1
+	strb		r1, [r8, #0x10]
+	strb		r0, [r8], #1
+
+	subs		r9, #1
+	bhi		.Lxts_dec_steal
+
+	vld1.8		{q0}, [r6]
+	mov		r0, sp
+	veor		q0, q8
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+
+	bl		AES_decrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q8
+	vst1.8		{q0}, [r6]
+	mov		r3, r4
+#endif
+
+.Lxts_dec_ret:
+	bic		r0, r3, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [r3, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_dec_bzero
+
+	mov		sp, r3
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{q8}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
+#endif
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
new file mode 100644
index 0000000..4522366
--- /dev/null
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -0,0 +1,434 @@
+/*
+ * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+#include "aes_glue.h"
+
+#define BIT_SLICED_KEY_MAXSIZE	(128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
+
+struct BS_KEY {
+	struct AES_KEY	rk;
+	int		converted;
+	u8 __aligned(8)	bs[BIT_SLICED_KEY_MAXSIZE];
+} __aligned(8);
+
+asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
+asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
+
+asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
+				  struct BS_KEY *key, u8 iv[]);
+
+asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
+					   struct BS_KEY *key, u8 const iv[]);
+
+asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
+				  struct BS_KEY *key, u8 tweak[]);
+
+asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
+				  struct BS_KEY *key, u8 tweak[]);
+
+struct aesbs_cbc_ctx {
+	struct AES_KEY	enc;
+	struct BS_KEY	dec;
+};
+
+struct aesbs_ctr_ctx {
+	struct BS_KEY	enc;
+};
+
+struct aesbs_xts_ctx {
+	struct BS_KEY	enc;
+	struct BS_KEY	dec;
+	struct AES_KEY	twkey;
+};
+
+static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+			     unsigned int key_len)
+{
+	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	int bits = key_len * 8;
+
+	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+	ctx->dec.rk = ctx->enc;
+	private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+	ctx->dec.converted = 0;
+	return 0;
+}
+
+static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+			     unsigned int key_len)
+{
+	struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+	int bits = key_len * 8;
+
+	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+	ctx->enc.converted = 0;
+	return 0;
+}
+
+static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+			     unsigned int key_len)
+{
+	struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+	int bits = key_len * 4;
+
+	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+	ctx->dec.rk = ctx->enc.rk;
+	private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+	private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
+	ctx->enc.converted = ctx->dec.converted = 0;
+	return 0;
+}
+
+static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst,
+			     struct scatterlist *src, unsigned int nbytes)
+{
+	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while (walk.nbytes) {
+		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+		u8 *src = walk.src.virt.addr;
+
+		if (walk.dst.virt.addr == walk.src.virt.addr) {
+			u8 *iv = walk.iv;
+
+			do {
+				crypto_xor(src, iv, AES_BLOCK_SIZE);
+				AES_encrypt(src, src, &ctx->enc);
+				iv = src;
+				src += AES_BLOCK_SIZE;
+			} while (--blocks);
+			memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+		} else {
+			u8 *dst = walk.dst.virt.addr;
+
+			do {
+				crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
+				AES_encrypt(walk.iv, dst, &ctx->enc);
+				memcpy(walk.iv, dst, AES_BLOCK_SIZE);
+				src += AES_BLOCK_SIZE;
+				dst += AES_BLOCK_SIZE;
+			} while (--blocks);
+		}
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	return err;
+}
+
+static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst,
+			     struct scatterlist *src, unsigned int nbytes)
+{
+	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+	while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
+		kernel_neon_begin();
+		bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+				  walk.nbytes, &ctx->dec, walk.iv);
+		kernel_neon_end();
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	while (walk.nbytes) {
+		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+		u8 *dst = walk.dst.virt.addr;
+		u8 *src = walk.src.virt.addr;
+		u8 bk[2][AES_BLOCK_SIZE];
+		u8 *iv = walk.iv;
+
+		do {
+			if (walk.dst.virt.addr == walk.src.virt.addr)
+				memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+
+			AES_decrypt(src, dst, &ctx->dec.rk);
+			crypto_xor(dst, iv, AES_BLOCK_SIZE);
+
+			if (walk.dst.virt.addr == walk.src.virt.addr)
+				iv = bk[blocks & 1];
+			else
+				iv = src;
+
+			dst += AES_BLOCK_SIZE;
+			src += AES_BLOCK_SIZE;
+		} while (--blocks);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	return err;
+}
+
+static void inc_be128_ctr(__be32 ctr[], u32 addend)
+{
+	int i;
+
+	for (i = 3; i >= 0; i--, addend = 1) {
+		u32 n = be32_to_cpu(ctr[i]) + addend;
+
+		ctr[i] = cpu_to_be32(n);
+		if (n >= addend)
+			break;
+	}
+}
+
+static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst, struct scatterlist *src,
+			     unsigned int nbytes)
+{
+	struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	u32 blocks;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+	while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
+		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+		__be32 *ctr = (__be32 *)walk.iv;
+		u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
+
+		/* avoid 32 bit counter overflow in the NEON code */
+		if (unlikely(headroom < blocks)) {
+			blocks = headroom + 1;
+			tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
+		}
+		kernel_neon_begin();
+		bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
+					   walk.dst.virt.addr, blocks,
+					   &ctx->enc, walk.iv);
+		kernel_neon_end();
+		inc_be128_ctr(ctr, blocks);
+
+		nbytes -= blocks * AES_BLOCK_SIZE;
+		if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
+			break;
+
+		err = blkcipher_walk_done(desc, &walk, tail);
+	}
+	if (walk.nbytes) {
+		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+		u8 ks[AES_BLOCK_SIZE];
+
+		AES_encrypt(walk.iv, ks, &ctx->enc.rk);
+		if (tdst != tsrc)
+			memcpy(tdst, tsrc, nbytes);
+		crypto_xor(tdst, ks, nbytes);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	return err;
+}
+
+static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst,
+			     struct scatterlist *src, unsigned int nbytes)
+{
+	struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+	/* generate the initial tweak */
+	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+	while (walk.nbytes) {
+		kernel_neon_begin();
+		bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+				  walk.nbytes, &ctx->enc, walk.iv);
+		kernel_neon_end();
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	return err;
+}
+
+static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst,
+			     struct scatterlist *src, unsigned int nbytes)
+{
+	struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+	/* generate the initial tweak */
+	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+	while (walk.nbytes) {
+		kernel_neon_begin();
+		bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+				  walk.nbytes, &ctx->dec, walk.iv);
+		kernel_neon_end();
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	return err;
+}
+
+static struct crypto_alg aesbs_algs[] = { {
+	.cra_name		= "__cbc-aes-neonbs",
+	.cra_driver_name	= "__driver-cbc-aes-neonbs",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct aesbs_cbc_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= aesbs_cbc_set_key,
+		.encrypt	= aesbs_cbc_encrypt,
+		.decrypt	= aesbs_cbc_decrypt,
+	},
+}, {
+	.cra_name		= "__ctr-aes-neonbs",
+	.cra_driver_name	= "__driver-ctr-aes-neonbs",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct aesbs_ctr_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= aesbs_ctr_set_key,
+		.encrypt	= aesbs_ctr_encrypt,
+		.decrypt	= aesbs_ctr_encrypt,
+	},
+}, {
+	.cra_name		= "__xts-aes-neonbs",
+	.cra_driver_name	= "__driver-xts-aes-neonbs",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct aesbs_xts_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= aesbs_xts_set_key,
+		.encrypt	= aesbs_xts_encrypt,
+		.decrypt	= aesbs_xts_decrypt,
+	},
+}, {
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "cbc-aes-neonbs",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= __ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+}, {
+	.cra_name		= "ctr(aes)",
+	.cra_driver_name	= "ctr-aes-neonbs",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+}, {
+	.cra_name		= "xts(aes)",
+	.cra_driver_name	= "xts-aes-neonbs",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+} };
+
+static int __init aesbs_mod_init(void)
+{
+	if (!cpu_has_neon())
+		return -ENODEV;
+
+	return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+static void __exit aesbs_mod_exit(void)
+{
+	crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+module_init(aesbs_mod_init);
+module_exit(aesbs_mod_exit);
+
+MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
new file mode 100644
index 0000000..f3d96d9
--- /dev/null
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -0,0 +1,2467 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt	19.5 cycles per byte processed with 128-bit key
+# decrypt	22.1 cycles per byte processed with 128-bit key
+# key conv.	440  cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+#						<appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+#					<ard.biesheuvel@linaro.org>
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InBasisChange	(@b);
+	&Inv_GF256	(@b[6,5,0,3,7,1,4,2],@t,@s);
+	&OutBasisChange	(@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb 
+my @b=@_[0..7];
+$code.=<<___;
+	veor	@b[2], @b[2], @b[1]
+	veor	@b[5], @b[5], @b[6]
+	veor	@b[3], @b[3], @b[0]
+	veor	@b[6], @b[6], @b[2]
+	veor	@b[5], @b[5], @b[0]
+
+	veor	@b[6], @b[6], @b[3]
+	veor	@b[3], @b[3], @b[7]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[3], @b[3], @b[4]
+	veor	@b[4], @b[4], @b[5]
+
+	veor	@b[2], @b[2], @b[7]
+	veor	@b[3], @b[3], @b[1]
+	veor	@b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	veor	@b[0], @b[0], @b[6]
+	veor	@b[1], @b[1], @b[4]
+	veor	@b[4], @b[4], @b[6]
+	veor	@b[2], @b[2], @b[0]
+	veor	@b[6], @b[6], @b[1]
+
+	veor	@b[1], @b[1], @b[5]
+	veor	@b[5], @b[5], @b[3]
+	veor	@b[3], @b[3], @b[7]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[2], @b[2], @b[5]
+
+	veor	@b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb 	> [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb	> [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InvInBasisChange	(@b);
+	&Inv_GF256		(@b[5,1,2,6,3,7,0,4],@t,@s);
+	&InvOutBasisChange	(@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {		# OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+	 veor	@b[1], @b[1], @b[7]
+	veor	@b[4], @b[4], @b[7]
+
+	veor	@b[7], @b[7], @b[5]
+	 veor	@b[1], @b[1], @b[3]
+	veor	@b[2], @b[2], @b[5]
+	veor	@b[3], @b[3], @b[7]
+
+	veor	@b[6], @b[6], @b[1]
+	veor	@b[2], @b[2], @b[0]
+	 veor	@b[5], @b[5], @b[3]
+	veor	@b[4], @b[4], @b[6]
+	veor	@b[0], @b[0], @b[6]
+	veor	@b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange {		# InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+	veor	@b[1], @b[1], @b[5]
+	veor	@b[2], @b[2], @b[7]
+
+	veor	@b[3], @b[3], @b[1]
+	veor	@b[4], @b[4], @b[5]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[3], @b[3], @b[4]
+	 veor 	@b[5], @b[5], @b[0]
+	veor	@b[3], @b[3], @b[7]
+	 veor	@b[6], @b[6], @b[2]
+	 veor	@b[2], @b[2], @b[1]
+	veor	@b[6], @b[6], @b[3]
+
+	veor	@b[3], @b[3], @b[0]
+	veor	@b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+	veor 	$t0, $y0, $y1
+	vand	$t0, $t0, $x0
+	veor	$x0, $x0, $x1
+	vand	$t1, $x1, $y0
+	vand	$x0, $x0, $y1
+	veor	$x1, $t1, $t0
+	veor	$x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N {				# not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+	veor	$t0, $y0, $y1
+	vand	$t0, $t0, $x0
+	veor	$x0, $x0, $x1
+	vand	$x1, $x1, $y0
+	vand	$x0, $x0, $y1
+	veor	$x1, $x1, $x0
+	veor	$x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+	veor	$t0, $y0, $y1
+	 veor 	$t1, $y2, $y3
+	vand	$t0, $t0, $x0
+	 vand	$t1, $t1, $x2
+	veor	$x0, $x0, $x1
+	 veor	$x2, $x2, $x3
+	vand	$x1, $x1, $y0
+	 vand	$x3, $x3, $y2
+	vand	$x0, $x0, $y1
+	 vand	$x2, $x2, $y3
+	veor	$x1, $x1, $x0
+	 veor	$x2, $x2, $x3
+	veor	$x0, $x0, $t0
+	 veor	$x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+	veor	@t[0], @x[0], @x[2]
+	veor	@t[1], @x[1], @x[3]
+___
+	&Mul_GF4  	(@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+	veor	@y[0], @y[0], @y[2]
+	veor	@y[1], @y[1], @y[3]
+___
+	Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	veor	@x[0], @x[0], @t[0]
+	veor	@x[2], @x[2], @t[0]
+	veor	@x[1], @x[1], @t[1]
+	veor	@x[3], @x[3], @t[1]
+
+	veor	@t[0], @x[4], @x[6]
+	veor	@t[1], @x[5], @x[7]
+___
+	&Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	veor	@y[0], @y[0], @y[2]
+	veor	@y[1], @y[1], @y[3]
+___
+	&Mul_GF4  	(@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+	veor	@x[4], @x[4], @t[0]
+	veor	@x[6], @x[6], @t[0]
+	veor	@x[5], @x[5], @t[1]
+	veor	@x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+	veor	@t[3], @x[4], @x[6]
+	veor	@t[2], @x[5], @x[7]
+	veor	@t[1], @x[1], @x[3]
+	veor	@s[1], @x[7], @x[6]
+	 vmov	@t[0], @t[2]
+	veor	@s[0], @x[0], @x[2]
+
+	vorr	@t[2], @t[2], @t[1]
+	veor	@s[3], @t[3], @t[0]
+	vand	@s[2], @t[3], @s[0]
+	vorr	@t[3], @t[3], @s[0]
+	veor	@s[0], @s[0], @t[1]
+	vand	@t[0], @t[0], @t[1]
+	veor	@t[1], @x[3], @x[2]
+	vand	@s[3], @s[3], @s[0]
+	vand	@s[1], @s[1], @t[1]
+	veor	@t[1], @x[4], @x[5]
+	veor	@s[0], @x[1], @x[0]
+	veor	@t[3], @t[3], @s[1]
+	veor	@t[2], @t[2], @s[1]
+	vand	@s[1], @t[1], @s[0]
+	vorr	@t[1], @t[1], @s[0]
+	veor	@t[3], @t[3], @s[3]
+	veor	@t[0], @t[0], @s[1]
+	veor	@t[2], @t[2], @s[2]
+	veor	@t[1], @t[1], @s[3]
+	veor	@t[0], @t[0], @s[2]
+	vand	@s[0], @x[7], @x[3]
+	veor	@t[1], @t[1], @s[2]
+	vand	@s[1], @x[6], @x[2]
+	vand	@s[2], @x[5], @x[1]
+	vorr	@s[3], @x[4], @x[0]
+	veor	@t[3], @t[3], @s[0]
+	veor	@t[1], @t[1], @s[2]
+	veor	@t[0], @t[0], @s[3]
+	veor	@t[2], @t[2], @s[1]
+
+	@ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+	@ new smaller inversion
+
+	vand	@s[2], @t[3], @t[1]
+	vmov	@s[0], @t[0]
+
+	veor	@s[1], @t[2], @s[2]
+	veor	@s[3], @t[0], @s[2]
+	veor	@s[2], @t[0], @s[2]	@ @s[2]=@s[3]
+
+	vbsl	@s[1], @t[1], @t[0]
+	vbsl	@s[3], @t[3], @t[2]
+	veor	@t[3], @t[3], @t[2]
+
+	vbsl	@s[0], @s[1], @s[2]
+	vbsl	@t[0], @s[2], @s[1]
+
+	vand	@s[2], @s[0], @s[3]
+	veor	@t[1], @t[1], @t[0]
+
+	veor	@s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+	&Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+	vldmia	$key!, {@t[0]-@t[3]}
+	veor	@t[0], @t[0], @x[0]
+	veor	@t[1], @t[1], @x[1]
+	vtbl.8	`&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[0]}
+	veor	@t[2], @t[2], @x[2]
+	vtbl.8	`&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[1]}
+	veor	@t[3], @t[3], @x[3]
+	vtbl.8	`&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[2]}
+	vtbl.8	`&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[3]}
+	veor	@t[0], @t[0], @x[4]
+	veor	@t[1], @t[1], @x[5]
+	vtbl.8	`&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+	veor	@t[2], @t[2], @x[6]
+	vtbl.8	`&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+	veor	@t[3], @t[3], @x[7]
+	vtbl.8	`&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+	vtbl.8	`&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16];	# optional
+$code.=<<___;
+	vext.8	@t[0], @x[0], @x[0], #12	@ x0 <<< 32
+	vext.8	@t[1], @x[1], @x[1], #12
+	 veor	@x[0], @x[0], @t[0]		@ x0 ^ (x0 <<< 32)
+	vext.8	@t[2], @x[2], @x[2], #12
+	 veor	@x[1], @x[1], @t[1]
+	vext.8	@t[3], @x[3], @x[3], #12
+	 veor	@x[2], @x[2], @t[2]
+	vext.8	@t[4], @x[4], @x[4], #12
+	 veor	@x[3], @x[3], @t[3]
+	vext.8	@t[5], @x[5], @x[5], #12
+	 veor	@x[4], @x[4], @t[4]
+	vext.8	@t[6], @x[6], @x[6], #12
+	 veor	@x[5], @x[5], @t[5]
+	vext.8	@t[7], @x[7], @x[7], #12
+	 veor	@x[6], @x[6], @t[6]
+
+	veor	@t[1], @t[1], @x[0]
+	 veor	@x[7], @x[7], @t[7]
+	 vext.8	@x[0], @x[0], @x[0], #8		@ (x0 ^ (x0 <<< 32)) <<< 64)
+	veor	@t[2], @t[2], @x[1]
+	veor	@t[0], @t[0], @x[7]
+	veor	@t[1], @t[1], @x[7]
+	 vext.8	@x[1], @x[1], @x[1], #8
+	veor	@t[5], @t[5], @x[4]
+	 veor	@x[0], @x[0], @t[0]
+	veor	@t[6], @t[6], @x[5]
+	 veor	@x[1], @x[1], @t[1]
+	 vext.8	@t[0], @x[4], @x[4], #8
+	veor	@t[4], @t[4], @x[3]
+	 vext.8	@t[1], @x[5], @x[5], #8
+	veor	@t[7], @t[7], @x[6]
+	 vext.8	@x[4], @x[3], @x[3], #8
+	veor	@t[3], @t[3], @x[2]
+	 vext.8	@x[5], @x[7], @x[7], #8
+	veor	@t[4], @t[4], @x[7]
+	 vext.8	@x[3], @x[6], @x[6], #8
+	veor	@t[3], @t[3], @x[7]
+	 vext.8	@x[6], @x[2], @x[2], #8
+	veor	@x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+	veor	@x[2], @t[0], @t[4]
+	veor	@x[4], @x[4], @t[3]
+	veor	@x[5], @x[5], @t[7]
+	veor	@x[3], @x[3], @t[6]
+	 @ vmov	@x[2], @t[0]
+	veor	@x[6], @x[6], @t[2]
+	 @ vmov	@x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+	veor	@t[3], @t[3], @x[4]
+	veor	@x[5], @x[5], @t[7]
+	veor	@x[2], @x[3], @t[6]
+	veor	@x[3], @t[0], @t[4]
+	veor	@x[4], @x[6], @t[2]
+	vmov	@x[6], @t[3]
+	 @ vmov	@x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+	@ multiplication by 0x0e
+	vext.8	@t[7], @x[7], @x[7], #12
+	vmov	@t[2], @x[2]
+	veor	@x[2], @x[2], @x[5]		@ 2 5
+	veor	@x[7], @x[7], @x[5]		@ 7 5
+	vext.8	@t[0], @x[0], @x[0], #12
+	vmov	@t[5], @x[5]
+	veor	@x[5], @x[5], @x[0]		@ 5 0		[1]
+	veor	@x[0], @x[0], @x[1]		@ 0 1
+	vext.8	@t[1], @x[1], @x[1], #12
+	veor	@x[1], @x[1], @x[2]		@ 1 25
+	veor	@x[0], @x[0], @x[6]		@ 01 6		[2]
+	vext.8	@t[3], @x[3], @x[3], #12
+	veor	@x[1], @x[1], @x[3]		@ 125 3		[4]
+	veor	@x[2], @x[2], @x[0]		@ 25 016	[3]
+	veor	@x[3], @x[3], @x[7]		@ 3 75
+	veor	@x[7], @x[7], @x[6]		@ 75 6		[0]
+	vext.8	@t[6], @x[6], @x[6], #12
+	vmov	@t[4], @x[4]
+	veor	@x[6], @x[6], @x[4]		@ 6 4
+	veor	@x[4], @x[4], @x[3]		@ 4 375		[6]
+	veor	@x[3], @x[3], @x[7]		@ 375 756=36
+	veor	@x[6], @x[6], @t[5]		@ 64 5		[7]
+	veor	@x[3], @x[3], @t[2]		@ 36 2
+	vext.8	@t[5], @t[5], @t[5], #12
+	veor	@x[3], @x[3], @t[4]		@ 362 4		[5]
+___
+					my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+	@ multiplication by 0x0b
+	veor	@y[1], @y[1], @y[0]
+	veor	@y[0], @y[0], @t[0]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[1], @y[1], @t[1]
+	veor	@y[0], @y[0], @t[5]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[1], @y[1], @t[6]
+	veor	@y[0], @y[0], @t[7]
+	veor	@t[7], @t[7], @t[6]		@ clobber t[7]
+
+	veor	@y[3], @y[3], @t[0]
+	 veor	@y[1], @y[1], @y[0]
+	vext.8	@t[0], @t[0], @t[0], #12
+	veor	@y[2], @y[2], @t[1]
+	veor	@y[4], @y[4], @t[1]
+	vext.8	@t[1], @t[1], @t[1], #12
+	veor	@y[2], @y[2], @t[2]
+	veor	@y[3], @y[3], @t[2]
+	veor	@y[5], @y[5], @t[2]
+	veor	@y[2], @y[2], @t[7]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[3], @y[3], @t[3]
+	veor	@y[6], @y[6], @t[3]
+	veor	@y[4], @y[4], @t[3]
+	veor	@y[7], @y[7], @t[4]
+	vext.8	@t[3], @t[3], @t[3], #12
+	veor	@y[5], @y[5], @t[4]
+	veor	@y[7], @y[7], @t[7]
+	veor	@t[7], @t[7], @t[5]		@ clobber t[7] even more
+	veor	@y[3], @y[3], @t[5]
+	veor	@y[4], @y[4], @t[4]
+
+	veor	@y[5], @y[5], @t[7]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[6], @y[6], @t[7]
+	veor	@y[4], @y[4], @t[7]
+
+	veor	@t[7], @t[7], @t[5]
+	vext.8	@t[5], @t[5], @t[5], #12
+
+	@ multiplication by 0x0d
+	veor	@y[4], @y[4], @y[7]
+	 veor	@t[7], @t[7], @t[6]		@ restore t[7]
+	veor	@y[7], @y[7], @t[4]
+	vext.8	@t[6], @t[6], @t[6], #12
+	veor	@y[2], @y[2], @t[0]
+	veor	@y[7], @y[7], @t[5]
+	vext.8	@t[7], @t[7], @t[7], #12
+	veor	@y[2], @y[2], @t[2]
+
+	veor	@y[3], @y[3], @y[1]
+	veor	@y[1], @y[1], @t[1]
+	veor	@y[0], @y[0], @t[0]
+	veor	@y[3], @y[3], @t[0]
+	veor	@y[1], @y[1], @t[5]
+	veor	@y[0], @y[0], @t[5]
+	vext.8	@t[0], @t[0], @t[0], #12
+	veor	@y[1], @y[1], @t[7]
+	veor	@y[0], @y[0], @t[6]
+	veor	@y[3], @y[3], @y[1]
+	veor	@y[4], @y[4], @t[1]
+	vext.8	@t[1], @t[1], @t[1], #12
+
+	veor	@y[7], @y[7], @t[7]
+	veor	@y[4], @y[4], @t[2]
+	veor	@y[5], @y[5], @t[2]
+	veor	@y[2], @y[2], @t[6]
+	veor	@t[6], @t[6], @t[3]		@ clobber t[6]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[4], @y[4], @y[7]
+	veor	@y[3], @y[3], @t[6]
+
+	veor	@y[6], @y[6], @t[6]
+	veor	@y[5], @y[5], @t[5]
+	vext.8	@t[5], @t[5], @t[5], #12
+	veor	@y[6], @y[6], @t[4]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[5], @y[5], @t[6]
+	veor	@y[6], @y[6], @t[7]
+	vext.8	@t[7], @t[7], @t[7], #12
+	veor	@t[6], @t[6], @t[3]		@ restore t[6]
+	vext.8	@t[3], @t[3], @t[3], #12
+
+	@ multiplication by 0x09
+	veor	@y[4], @y[4], @y[1]
+	veor	@t[1], @t[1], @y[1]		@ t[1]=y[1]
+	veor	@t[0], @t[0], @t[5]		@ clobber t[0]
+	vext.8	@t[6], @t[6], @t[6], #12
+	veor	@t[1], @t[1], @t[5]
+	veor	@y[3], @y[3], @t[0]
+	veor	@t[0], @t[0], @y[0]		@ t[0]=y[0]
+	veor	@t[1], @t[1], @t[6]
+	veor	@t[6], @t[6], @t[7]		@ clobber t[6]
+	veor	@y[4], @y[4], @t[1]
+	veor	@y[7], @y[7], @t[4]
+	veor	@y[6], @y[6], @t[3]
+	veor	@y[5], @y[5], @t[2]
+	veor	@t[4], @t[4], @y[4]		@ t[4]=y[4]
+	veor	@t[3], @t[3], @y[3]		@ t[3]=y[3]
+	veor	@t[5], @t[5], @y[5]		@ t[5]=y[5]
+	veor	@t[2], @t[2], @y[2]		@ t[2]=y[2]
+	veor	@t[3], @t[3], @t[7]
+	veor	@XMM[5], @t[5], @t[6]
+	veor	@XMM[6], @t[6], @y[6]		@ t[6]=y[6]
+	veor	@XMM[2], @t[2], @t[6]
+	veor	@XMM[7], @t[7], @y[7]		@ t[7]=y[7]
+
+	vmov	@XMM[0], @t[0]
+	vmov	@XMM[1], @t[1]
+	@ vmov	@XMM[2], @t[2]
+	vmov	@XMM[3], @t[3]
+	vmov	@XMM[4], @t[4]
+	@ vmov	@XMM[5], @t[5]
+	@ vmov	@XMM[6], @t[6]
+	@ vmov	@XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
+# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
+
+$code.=<<___;
+	@ multiplication by 0x05-0x00-0x04-0x00
+	vext.8	@t[0], @x[0], @x[0], #8
+	vext.8	@t[6], @x[6], @x[6], #8
+	vext.8	@t[7], @x[7], @x[7], #8
+	veor	@t[0], @t[0], @x[0]
+	vext.8	@t[1], @x[1], @x[1], #8
+	veor	@t[6], @t[6], @x[6]
+	vext.8	@t[2], @x[2], @x[2], #8
+	veor	@t[7], @t[7], @x[7]
+	vext.8	@t[3], @x[3], @x[3], #8
+	veor	@t[1], @t[1], @x[1]
+	vext.8	@t[4], @x[4], @x[4], #8
+	veor	@t[2], @t[2], @x[2]
+	vext.8	@t[5], @x[5], @x[5], #8
+	veor	@t[3], @t[3], @x[3]
+	veor	@t[4], @t[4], @x[4]
+	veor	@t[5], @t[5], @x[5]
+
+	 veor	@x[0], @x[0], @t[6]
+	 veor	@x[1], @x[1], @t[6]
+	 veor	@x[2], @x[2], @t[0]
+	 veor	@x[4], @x[4], @t[2]
+	 veor	@x[3], @x[3], @t[1]
+	 veor	@x[1], @x[1], @t[7]
+	 veor	@x[2], @x[2], @t[7]
+	 veor	@x[4], @x[4], @t[6]
+	 veor	@x[5], @x[5], @t[3]
+	 veor	@x[3], @x[3], @t[6]
+	 veor	@x[6], @x[6], @t[4]
+	 veor	@x[4], @x[4], @t[7]
+	 veor	@x[5], @x[5], @t[7]
+	 veor	@x[7], @x[7], @t[5]
+___
+	&MixColumns	(@x,@t,1);	# flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+	vshr.u64	$t, $b, #$n
+	veor		$t, $t, $a
+	vand		$t, $t, $mask
+	veor		$a, $a, $t
+	vshl.u64	$t, $t, #$n
+	veor		$b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+	vshr.u64	$t0, $b0, #$n
+	 vshr.u64	$t1, $b1, #$n
+	veor		$t0, $t0, $a0
+	 veor		$t1, $t1, $a1
+	vand		$t0, $t0, $mask
+	 vand		$t1, $t1, $mask
+	veor		$a0, $a0, $t0
+	vshl.u64	$t0, $t0, #$n
+	 veor		$a1, $a1, $t1
+	 vshl.u64	$t1, $t1, #$n
+	veor		$b0, $b0, $t0
+	 veor		$b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+	vmov.i8	$t0,#0x55			@ compose .LBS0
+	vmov.i8	$t1,#0x33			@ compose .LBS1
+___
+	&swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+	&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+	vmov.i8	$t0,#0x0f			@ compose .LBS2
+___
+	&swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+	&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+	&swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+	&swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH	vstmdb	sp!,{d8-d15}
+# define VFP_ABI_POP	vldmia	sp!,{d8-d15}
+# define VFP_ABI_FRAME	0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME	0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax	unified 	@ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu	neon
+
+.type	_bsaes_decrypt8,%function
+.align	4
+_bsaes_decrypt8:
+	adr	$const,_bsaes_decrypt8
+	vldmia	$key!, {@XMM[9]}		@ round 0 key
+	add	$const,$const,#.LM0ISR-_bsaes_decrypt8
+
+	vldmia	$const!, {@XMM[8]}		@ .LM0ISR
+	veor	@XMM[10], @XMM[0], @XMM[9]	@ xor with round0 key
+	veor	@XMM[11], @XMM[1], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	veor	@XMM[12], @XMM[2], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+	veor	@XMM[13], @XMM[3], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+	veor	@XMM[14], @XMM[4], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+	veor	@XMM[15], @XMM[5], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+	veor	@XMM[10], @XMM[6], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+	veor	@XMM[11], @XMM[7], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	 vtbl.8	`&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	sub	$rounds,$rounds,#1
+	b	.Ldec_sbox
+.align	4
+.Ldec_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+	&InvSbox	(@XMM[0..7, 8..15]);
+$code.=<<___;
+	subs	$rounds,$rounds,#1
+	bcc	.Ldec_done
+___
+	&InvMixColumns	(@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+	vldmia	$const, {@XMM[12]}		@ .LISR
+	ite	eq				@ Thumb2 thing, sanity check in ARM
+	addeq	$const,$const,#0x10
+	bne	.Ldec_loop
+	vldmia	$const, {@XMM[12]}		@ .LISRM0
+	b	.Ldec_loop
+.align	4
+.Ldec_done:
+___
+	&bitslice	(@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+	vldmia	$key, {@XMM[8]}			@ last round key
+	veor	@XMM[6], @XMM[6], @XMM[8]
+	veor	@XMM[4], @XMM[4], @XMM[8]
+	veor	@XMM[2], @XMM[2], @XMM[8]
+	veor	@XMM[7], @XMM[7], @XMM[8]
+	veor	@XMM[3], @XMM[3], @XMM[8]
+	veor	@XMM[5], @XMM[5], @XMM[8]
+	veor	@XMM[0], @XMM[0], @XMM[8]
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	bx	lr
+.size	_bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type	_bsaes_const,%object
+.align	6
+_bsaes_const:
+.LM0ISR:	@ InvShiftRows constants
+	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:		@ ShiftRows constants
+	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+	.quad	0x090d01050c000408, 0x03070b0f060a0e02
+.asciz	"Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align	6
+.size	_bsaes_const,.-_bsaes_const
+
+.type	_bsaes_encrypt8,%function
+.align	4
+_bsaes_encrypt8:
+	adr	$const,_bsaes_encrypt8
+	vldmia	$key!, {@XMM[9]}		@ round 0 key
+	sub	$const,$const,#_bsaes_encrypt8-.LM0SR
+
+	vldmia	$const!, {@XMM[8]}		@ .LM0SR
+_bsaes_encrypt8_alt:
+	veor	@XMM[10], @XMM[0], @XMM[9]	@ xor with round0 key
+	veor	@XMM[11], @XMM[1], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	veor	@XMM[12], @XMM[2], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+	veor	@XMM[13], @XMM[3], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+	veor	@XMM[14], @XMM[4], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+	veor	@XMM[15], @XMM[5], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+	veor	@XMM[10], @XMM[6], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+	veor	@XMM[11], @XMM[7], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	 vtbl.8	`&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	sub	$rounds,$rounds,#1
+	b	.Lenc_sbox
+.align	4
+.Lenc_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+	&Sbox		(@XMM[0..7, 8..15]);
+$code.=<<___;
+	subs	$rounds,$rounds,#1
+	bcc	.Lenc_done
+___
+	&MixColumns	(@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+	vldmia	$const, {@XMM[12]}		@ .LSR
+	ite	eq				@ Thumb2 thing, samity check in ARM
+	addeq	$const,$const,#0x10
+	bne	.Lenc_loop
+	vldmia	$const, {@XMM[12]}		@ .LSRM0
+	b	.Lenc_loop
+.align	4
+.Lenc_done:
+___
+	# output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+	&bitslice	(@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+	vldmia	$key, {@XMM[8]}			@ last round key
+	veor	@XMM[4], @XMM[4], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[8]
+	veor	@XMM[3], @XMM[3], @XMM[8]
+	veor	@XMM[7], @XMM[7], @XMM[8]
+	veor	@XMM[2], @XMM[2], @XMM[8]
+	veor	@XMM[5], @XMM[5], @XMM[8]
+	veor	@XMM[0], @XMM[0], @XMM[8]
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	bx	lr
+.size	_bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+	&swapmove	(@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+	@ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+	vmov	@x[2], @x[0]
+	vmov	@x[3], @x[1]
+___
+	#&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+	&swapmove2x	(@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+	@ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+	vmov	@x[4], @x[0]
+	vmov	@x[6], @x[2]
+	vmov	@x[5], @x[1]
+	vmov	@x[7], @x[3]
+___
+	&swapmove2x	(@x[0,4,1,5],4,$bs2,$t2,$t3);
+	&swapmove2x	(@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type	_bsaes_key_convert,%function
+.align	4
+_bsaes_key_convert:
+	adr	$const,_bsaes_key_convert
+	vld1.8	{@XMM[7]},  [$inp]!		@ load round 0 key
+	sub	$const,$const,#_bsaes_key_convert-.LM0
+	vld1.8	{@XMM[15]}, [$inp]!		@ load round 1 key
+
+	vmov.i8	@XMM[8],  #0x01			@ bit masks
+	vmov.i8	@XMM[9],  #0x02
+	vmov.i8	@XMM[10], #0x04
+	vmov.i8	@XMM[11], #0x08
+	vmov.i8	@XMM[12], #0x10
+	vmov.i8	@XMM[13], #0x20
+	vldmia	$const, {@XMM[14]}		@ .LM0
+
+#ifdef __ARMEL__
+	vrev32.8	@XMM[7],  @XMM[7]
+	vrev32.8	@XMM[15], @XMM[15]
+#endif
+	sub	$rounds,$rounds,#1
+	vstmia	$out!, {@XMM[7]}		@ save round 0 key
+	b	.Lkey_loop
+
+.align	4
+.Lkey_loop:
+	vtbl.8	`&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+	vtbl.8	`&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+	vmov.i8	@XMM[6],  #0x40
+	vmov.i8	@XMM[15], #0x80
+
+	vtst.8	@XMM[0], @XMM[7], @XMM[8]
+	vtst.8	@XMM[1], @XMM[7], @XMM[9]
+	vtst.8	@XMM[2], @XMM[7], @XMM[10]
+	vtst.8	@XMM[3], @XMM[7], @XMM[11]
+	vtst.8	@XMM[4], @XMM[7], @XMM[12]
+	vtst.8	@XMM[5], @XMM[7], @XMM[13]
+	vtst.8	@XMM[6], @XMM[7], @XMM[6]
+	vtst.8	@XMM[7], @XMM[7], @XMM[15]
+	vld1.8	{@XMM[15]}, [$inp]!		@ load next round key
+	vmvn	@XMM[0], @XMM[0]		@ "pnot"
+	vmvn	@XMM[1], @XMM[1]
+	vmvn	@XMM[5], @XMM[5]
+	vmvn	@XMM[6], @XMM[6]
+#ifdef __ARMEL__
+	vrev32.8	@XMM[15], @XMM[15]
+#endif
+	subs	$rounds,$rounds,#1
+	vstmia	$out!,{@XMM[0]-@XMM[7]}		@ write bit-sliced round key
+	bne	.Lkey_loop
+
+	vmov.i8	@XMM[7],#0x63			@ compose .L63
+	@ don't save last round key
+	bx	lr
+.size	_bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) {		# following four functions are unsupported interface
+			# used for benchmarking...
+$code.=<<___;
+.globl	bsaes_enc_key_convert
+.type	bsaes_enc_key_convert,%function
+.align	4
+bsaes_enc_key_convert:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	ldr	r5,[$inp,#240]			@ pass rounds
+	mov	r4,$inp				@ pass key
+	mov	r12,$out			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl	bsaes_encrypt_128
+.type	bsaes_encrypt_128,%function
+.align	4
+bsaes_encrypt_128:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+.Lenc128_loop:
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+	mov	r4,$key				@ pass the key
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5,#10				@ pass rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]!
+
+	bl	_bsaes_encrypt8
+
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	subs	$len,$len,#0x80
+	vst1.8	{@XMM[5]}, [$out]!
+	bhi	.Lenc128_loop
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl	bsaes_dec_key_convert
+.type	bsaes_dec_key_convert,%function
+.align	4
+bsaes_dec_key_convert:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	ldr	r5,[$inp,#240]			@ pass rounds
+	mov	r4,$inp				@ pass key
+	mov	r12,$out			@ pass key schedule
+	bl	_bsaes_key_convert
+	vldmia	$out, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	$out, {@XMM[7]}
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl	bsaes_decrypt_128
+.type	bsaes_decrypt_128,%function
+.align	4
+bsaes_decrypt_128:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+.Ldec128_loop:
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+	mov	r4,$key				@ pass the key
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5,#10				@ pass rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]!
+
+	bl	_bsaes_decrypt8
+
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	subs	$len,$len,#0x80
+	vst1.8	{@XMM[5]}, [$out]!
+	bhi	.Ldec128_loop
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global	bsaes_cbc_encrypt
+.type	bsaes_cbc_encrypt,%function
+.align	5
+bsaes_cbc_encrypt:
+#ifndef	__KERNEL__
+	cmp	$len, #128
+#ifndef	__thumb__
+	blo	AES_cbc_encrypt
+#else
+	bhs	1f
+	b	AES_cbc_encrypt
+1:
+#endif
+#endif
+
+	@ it is up to the caller to make sure we are called with enc == 0
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	$ivp, [ip]			@ IV is 1st arg on the stack
+	mov	$len, $len, lsr#4		@ len in 16 byte blocks
+	sub	sp, #0x10			@ scratch space to carry over the IV
+	mov	$fp, sp				@ save sp
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	add	r12, #`128-32`			@ sifze of bit-slices key schedule
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12				@ sp is $keysched
+	bl	_bsaes_key_convert
+	vldmia	$keysched, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	$keysched, {@XMM[7]}
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, $key, #248
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12, {@XMM[15]}			@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+
+.align	2
+0:
+#endif
+
+	vld1.8	{@XMM[15]}, [$ivp]		@ load IV
+	b	.Lcbc_dec_loop
+
+.align	4
+.Lcbc_dec_loop:
+	subs	$len, $len, #0x8
+	bmi	.Lcbc_dec_loop_finish
+
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, $keysched			@ pass the key
+#else
+	add	r4, $key, #248
+#endif
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5, $rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]
+	sub	$inp, $inp, #0x60
+	vstmia	$fp, {@XMM[15]}			@ put aside IV
+
+	bl	_bsaes_decrypt8
+
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]-@XMM[13]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[14]-@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[3], @XMM[3], @XMM[13]
+	vst1.8	{@XMM[6]}, [$out]!
+	veor	@XMM[5], @XMM[5], @XMM[14]
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	vst1.8	{@XMM[5]}, [$out]!
+
+	b	.Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+	adds	$len, $len, #8
+	beq	.Lcbc_dec_done
+
+	vld1.8	{@XMM[0]}, [$inp]!		@ load input
+	cmp	$len, #2
+	blo	.Lcbc_dec_one
+	vld1.8	{@XMM[1]}, [$inp]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, $keysched			@ pass the key
+#else
+	add	r4, $key, #248
+#endif
+	mov	r5, $rounds
+	vstmia	$fp, {@XMM[15]}			@ put aside IV
+	beq	.Lcbc_dec_two
+	vld1.8	{@XMM[2]}, [$inp]!
+	cmp	$len, #4
+	blo	.Lcbc_dec_three
+	vld1.8	{@XMM[3]}, [$inp]!
+	beq	.Lcbc_dec_four
+	vld1.8	{@XMM[4]}, [$inp]!
+	cmp	$len, #6
+	blo	.Lcbc_dec_five
+	vld1.8	{@XMM[5]}, [$inp]!
+	beq	.Lcbc_dec_six
+	vld1.8	{@XMM[6]}, [$inp]!
+	sub	$inp, $inp, #0x70
+
+	bl	_bsaes_decrypt8
+
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]-@XMM[13]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[3], @XMM[3], @XMM[13]
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_six:
+	sub	$inp, $inp, #0x60
+	bl	_bsaes_decrypt8
+	vldmia	$fp,{@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_five:
+	sub	$inp, $inp, #0x50
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_four:
+	sub	$inp, $inp, #0x40
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_three:
+	sub	$inp, $inp, #0x30
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_two:
+	sub	$inp, $inp, #0x20
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]}, [$inp]!		@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[15]}, [$inp]!		@ reload input
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_one:
+	sub	$inp, $inp, #0x10
+	mov	$rounds, $out			@ save original out pointer
+	mov	$out, $fp			@ use the iv scratch space as out buffer
+	mov	r2, $key
+	vmov	@XMM[4],@XMM[15]		@ just in case ensure that IV
+	vmov	@XMM[5],@XMM[0]			@ and input are preserved
+	bl	AES_decrypt
+	vld1.8	{@XMM[0]}, [$fp,:64]		@ load result
+	veor	@XMM[0], @XMM[0], @XMM[4]	@ ^= IV
+	vmov	@XMM[15], @XMM[5]		@ @XMM[5] holds input
+	vst1.8	{@XMM[0]}, [$rounds]		@ write output
+
+.Lcbc_dec_done:
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+.Lcbc_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		$keysched!, {q0-q1}
+	cmp		$keysched, $fp
+	bne		.Lcbc_dec_bzero
+#endif
+
+	mov	sp, $fp
+	add	sp, #0x10			@ add sp,$fp,#0x10 is no good for thumb
+	vst1.8	{@XMM[15]}, [$ivp]		@ return IV
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}
+.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6";	# shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern	AES_encrypt
+.global	bsaes_ctr32_encrypt_blocks
+.type	bsaes_ctr32_encrypt_blocks,%function
+.align	5
+bsaes_ctr32_encrypt_blocks:
+	cmp	$len, #8			@ use plain AES for
+	blo	.Lctr_enc_short			@ small sizes
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	$ctr, [ip]			@ ctr is 1st arg on the stack
+	sub	sp, sp, #0x10			@ scratch space to carry over the ctr
+	mov	$fp, sp				@ save sp
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	add	r12, #`128-32`			@ size of bit-sliced key schedule
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12				@ sp is $keysched
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+	vld1.8	{@XMM[0]}, [$ctr]		@ load counter
+	add	$ctr, $const, #.LREVM0SR-.LM0	@ borrow $ctr
+	vldmia	$keysched, {@XMM[4]}		@ load round0 key
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+.align	2
+0:	add	r12, $key, #248
+	vld1.8	{@XMM[0]}, [$ctr]		@ load counter
+	adrl	$ctr, .LREVM0SR			@ borrow $ctr
+	vldmia	r12, {@XMM[4]}			@ load round0 key
+	sub	sp, #0x10			@ place for adjusted round0 key
+#endif
+
+	vmov.i32	@XMM[8],#1		@ compose 1<<96
+	veor		@XMM[9],@XMM[9],@XMM[9]
+	vrev32.8	@XMM[0],@XMM[0]
+	vext.8		@XMM[8],@XMM[9],@XMM[8],#4
+	vrev32.8	@XMM[4],@XMM[4]
+	vadd.u32	@XMM[9],@XMM[8],@XMM[8]	@ compose 2<<96
+	vstmia	$keysched, {@XMM[4]}		@ save adjusted round0 key
+	b	.Lctr_enc_loop
+
+.align	4
+.Lctr_enc_loop:
+	vadd.u32	@XMM[10], @XMM[8], @XMM[9]	@ compose 3<<96
+	vadd.u32	@XMM[1], @XMM[0], @XMM[8]	@ +1
+	vadd.u32	@XMM[2], @XMM[0], @XMM[9]	@ +2
+	vadd.u32	@XMM[3], @XMM[0], @XMM[10]	@ +3
+	vadd.u32	@XMM[4], @XMM[1], @XMM[10]
+	vadd.u32	@XMM[5], @XMM[2], @XMM[10]
+	vadd.u32	@XMM[6], @XMM[3], @XMM[10]
+	vadd.u32	@XMM[7], @XMM[4], @XMM[10]
+	vadd.u32	@XMM[10], @XMM[5], @XMM[10]	@ next counter
+
+	@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+	@ to flip byte order in 32-bit counter
+
+	vldmia		$keysched, {@XMM[9]}		@ load round0 key
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, $keysched, #0x10		@ pass next round key
+#else
+	add		r4, $key, #`248+16`
+#endif
+	vldmia		$ctr, {@XMM[8]}			@ .LREVM0SR
+	mov		r5, $rounds			@ pass rounds
+	vstmia		$fp, {@XMM[10]}			@ save next counter
+	sub		$const, $ctr, #.LREVM0SR-.LSR	@ pass constants
+
+	bl		_bsaes_encrypt8_alt
+
+	subs		$len, $len, #8
+	blo		.Lctr_enc_loop_done
+
+	vld1.8		{@XMM[8]-@XMM[9]}, [$inp]!	@ load input
+	vld1.8		{@XMM[10]-@XMM[11]}, [$inp]!
+	veor		@XMM[0], @XMM[8]
+	veor		@XMM[1], @XMM[9]
+	vld1.8		{@XMM[12]-@XMM[13]}, [$inp]!
+	veor		@XMM[4], @XMM[10]
+	veor		@XMM[6], @XMM[11]
+	vld1.8		{@XMM[14]-@XMM[15]}, [$inp]!
+	veor		@XMM[3], @XMM[12]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor		@XMM[7], @XMM[13]
+	veor		@XMM[2], @XMM[14]
+	vst1.8		{@XMM[4]}, [$out]!
+	veor		@XMM[5], @XMM[15]
+	vst1.8		{@XMM[6]}, [$out]!
+	vmov.i32	@XMM[8], #1			@ compose 1<<96
+	vst1.8		{@XMM[3]}, [$out]!
+	veor		@XMM[9], @XMM[9], @XMM[9]
+	vst1.8		{@XMM[7]}, [$out]!
+	vext.8		@XMM[8], @XMM[9], @XMM[8], #4
+	vst1.8		{@XMM[2]}, [$out]!
+	vadd.u32	@XMM[9],@XMM[8],@XMM[8]		@ compose 2<<96
+	vst1.8		{@XMM[5]}, [$out]!
+	vldmia		$fp, {@XMM[0]}			@ load counter
+
+	bne		.Lctr_enc_loop
+	b		.Lctr_enc_done
+
+.align	4
+.Lctr_enc_loop_done:
+	add		$len, $len, #8
+	vld1.8		{@XMM[8]}, [$inp]!	@ load input
+	veor		@XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!	@ write output
+	cmp		$len, #2
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[9]}, [$inp]!
+	veor		@XMM[1], @XMM[9]
+	vst1.8		{@XMM[1]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[10]}, [$inp]!
+	veor		@XMM[4], @XMM[10]
+	vst1.8		{@XMM[4]}, [$out]!
+	cmp		$len, #4
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[11]}, [$inp]!
+	veor		@XMM[6], @XMM[11]
+	vst1.8		{@XMM[6]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[12]}, [$inp]!
+	veor		@XMM[3], @XMM[12]
+	vst1.8		{@XMM[3]}, [$out]!
+	cmp		$len, #6
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[13]}, [$inp]!
+	veor		@XMM[7], @XMM[13]
+	vst1.8		{@XMM[7]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[14]}, [$inp]
+	veor		@XMM[2], @XMM[14]
+	vst1.8		{@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifndef	BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:			@ wipe key schedule [if any]
+	vstmia		$keysched!, {q0-q1}
+	cmp		$keysched, $fp
+	bne		.Lctr_enc_bzero
+#else
+	vstmia		$keysched, {q0-q1}
+#endif
+
+	mov	sp, $fp
+	add	sp, #0x10		@ add sp,$fp,#0x10 is no good for thumb
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}	@ return
+
+.align	4
+.Lctr_enc_short:
+	ldr	ip, [sp]		@ ctr pointer is passed on stack
+	stmdb	sp!, {r4-r8, lr}
+
+	mov	r4, $inp		@ copy arguments
+	mov	r5, $out
+	mov	r6, $len
+	mov	r7, $key
+	ldr	r8, [ip, #12]		@ load counter LSW
+	vld1.8	{@XMM[1]}, [ip]		@ load whole counter value
+#ifdef __ARMEL__
+	rev	r8, r8
+#endif
+	sub	sp, sp, #0x10
+	vst1.8	{@XMM[1]}, [sp,:64]	@ copy counter value
+	sub	sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+	add	r0, sp, #0x10		@ input counter value
+	mov	r1, sp			@ output on the stack
+	mov	r2, r7			@ key
+
+	bl	AES_encrypt
+
+	vld1.8	{@XMM[0]}, [r4]!	@ load input
+	vld1.8	{@XMM[1]}, [sp,:64]	@ load encrypted counter
+	add	r8, r8, #1
+#ifdef __ARMEL__
+	rev	r0, r8
+	str	r0, [sp, #0x1c]		@ next counter value
+#else
+	str	r8, [sp, #0x1c]		@ next counter value
+#endif
+	veor	@XMM[0],@XMM[0],@XMM[1]
+	vst1.8	{@XMM[0]}, [r5]!	@ store output
+	subs	r6, r6, #1
+	bne	.Lctr_enc_short_loop
+
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+	vstmia		sp!, {q0-q1}
+
+	ldmia	sp!, {r4-r8, pc}
+.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6";		# returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl	bsaes_xts_encrypt
+.type	bsaes_xts_encrypt,%function
+.align	4
+bsaes_xts_encrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future $fp
+
+	mov	$inp, r0
+	mov	$out, r1
+	mov	$len, r2
+	mov	$key, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0,sp				@ pointer to initial tweak
+#endif
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+	mov	$fp, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #`128-32`			@ size of bit-sliced key schedule
+	sub	r12, #`32+16`			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7], @XMM[7], @XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7], @XMM[7], @XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+
+	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+	adr	$magic, .Lxts_magic
+
+	subs	$len, #0x80
+	blo	.Lxts_enc_short
+	b	.Lxts_enc_loop
+
+.align	4
+.Lxts_enc_loop:
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	vadd.u64	@XMM[8], @XMM[15], @XMM[15]
+	vst1.64		{@XMM[15]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	veor		@XMM[8], @XMM[8], @T[0]
+	vst1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]-@XMM[7]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	veor		@XMM[7], @XMM[7], @XMM[15]
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vld1.64		{@XMM[14]-@XMM[15]}, [r0,:128]!
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[2], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	veor		@XMM[13], @XMM[5], @XMM[15]
+	vst1.8		{@XMM[12]-@XMM[13]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	subs		$len, #0x80
+	bpl		.Lxts_enc_loop
+
+.Lxts_enc_short:
+	adds		$len, #0x70
+	bmi		.Lxts_enc_done
+
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+	subs		$len, #0x10
+	bmi		.Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	sub		$len, #0x10
+	vst1.64		{@XMM[15]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vld1.64		{@XMM[14]}, [r0,:128]!
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[2], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	vst1.8		{@XMM[12]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_6:
+	vst1.64		{@XMM[14]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[4], @XMM[4], @XMM[12]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[5], @XMM[5], @XMM[13]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align	5
+.Lxts_magic:
+	.quad	1, 0x87
+
+.align	5
+.Lxts_enc_5:
+	vst1.64		{@XMM[13]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[3], @XMM[3], @XMM[11]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[4], @XMM[4], @XMM[12]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	vst1.8		{@XMM[10]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_4:
+	vst1.64		{@XMM[12]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[2], @XMM[2], @XMM[10]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[3], @XMM[3], @XMM[11]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_3:
+	vst1.64		{@XMM[11]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[1], @XMM[1], @XMM[9]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[2], @XMM[2], @XMM[10]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	vld1.64		{@XMM[10]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	vst1.8		{@XMM[8]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_2:
+	vst1.64		{@XMM[10]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[0], @XMM[0], @XMM[8]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[1], @XMM[1], @XMM[9]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_1:
+	mov		r0, sp
+	veor		@XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp				@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!
+	mov		$fp, r4
+
+	vmov		@XMM[8], @XMM[9]		@ next round tweak
+
+.Lxts_enc_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		$len, #0x10
+	beq		.Lxts_enc_ret
+	sub		r6, $out, #0x10
+
+.Lxts_enc_steal:
+	ldrb		r0, [$inp], #1
+	ldrb		r1, [$out, #-0x10]
+	strb		r0, [$out, #-0x10]
+	strb		r1, [$out], #1
+
+	subs		$len, #1
+	bhi		.Lxts_enc_steal
+
+	vld1.8		{@XMM[0]}, [r6]
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp			@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [r6]
+	mov		$fp, r4
+#endif
+
+.Lxts_enc_ret:
+	bic		r0, $fp, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [$fp, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_enc_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_enc_bzero
+
+	mov		sp, $fp
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{@XMM[8]}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl	bsaes_xts_decrypt
+.type	bsaes_xts_decrypt,%function
+.align	4
+bsaes_xts_decrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future $fp
+
+	mov	$inp, r0
+	mov	$out, r1
+	mov	$len, r2
+	mov	$key, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0, sp				@ pointer to initial tweak
+#endif
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+	mov	$fp, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #`128-32`			@ size of bit-sliced key schedule
+	sub	r12, #`32+16`			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, sp, #0x90
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, $key, #248
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+	adr	$magic, .Lxts_magic
+
+	tst	$len, #0xf			@ if not multiple of 16
+	it	ne				@ Thumb2 thing, sanity check in ARM
+	subne	$len, #0x10			@ subtract another 16 bytes
+	subs	$len, #0x80
+
+	blo	.Lxts_dec_short
+	b	.Lxts_dec_loop
+
+.align	4
+.Lxts_dec_loop:
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	vadd.u64	@XMM[8], @XMM[15], @XMM[15]
+	vst1.64		{@XMM[15]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	veor		@XMM[8], @XMM[8], @T[0]
+	vst1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]-@XMM[7]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	veor		@XMM[7], @XMM[7], @XMM[15]
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vld1.64		{@XMM[14]-@XMM[15]}, [r0,:128]!
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[3], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	veor		@XMM[13], @XMM[5], @XMM[15]
+	vst1.8		{@XMM[12]-@XMM[13]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	subs		$len, #0x80
+	bpl		.Lxts_dec_loop
+
+.Lxts_dec_short:
+	adds		$len, #0x70
+	bmi		.Lxts_dec_done
+
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+	subs		$len, #0x10
+	bmi		.Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	sub		$len, #0x10
+	vst1.64		{@XMM[15]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vld1.64		{@XMM[14]}, [r0,:128]!
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[3], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	vst1.8		{@XMM[12]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_6:
+	vst1.64		{@XMM[14]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[4], @XMM[4], @XMM[12]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[5], @XMM[5], @XMM[13]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_5:
+	vst1.64		{@XMM[13]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[3], @XMM[3], @XMM[11]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[4], @XMM[4], @XMM[12]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	vst1.8		{@XMM[10]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_4:
+	vst1.64		{@XMM[12]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[2], @XMM[2], @XMM[10]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[3], @XMM[3], @XMM[11]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_3:
+	vst1.64		{@XMM[11]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[1], @XMM[1], @XMM[9]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[2], @XMM[2], @XMM[10]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	vld1.64		{@XMM[10]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	vst1.8		{@XMM[8]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_2:
+	vst1.64		{@XMM[10]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[0], @XMM[0], @XMM[8]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[1], @XMM[1], @XMM[9]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_1:
+	mov		r0, sp
+	veor		@XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp				@ preserve fp
+	mov		r5, $magic			@ preserve magic
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!
+	mov		$fp, r4
+	mov		$magic, r5
+
+	vmov		@XMM[8], @XMM[9]		@ next round tweak
+
+.Lxts_dec_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		$len, #0x10
+	beq		.Lxts_dec_ret
+
+	@ calculate one round of extra tweak for the stolen ciphertext
+	vldmia		$magic, {$twmask}
+	vshr.s64	@XMM[6], @XMM[8], #63
+	vand		@XMM[6], @XMM[6], $twmask
+	vadd.u64	@XMM[9], @XMM[8], @XMM[8]
+	vswp		`&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+	veor		@XMM[9], @XMM[9], @XMM[6]
+
+	@ perform the final decryption with the last tweak value
+	vld1.8		{@XMM[0]}, [$inp]!
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[9]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp			@ preserve fp
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[9]
+	vst1.8		{@XMM[0]}, [$out]
+
+	mov		r6, $out
+.Lxts_dec_steal:
+	ldrb		r1, [$out]
+	ldrb		r0, [$inp], #1
+	strb		r1, [$out, #0x10]
+	strb		r0, [$out], #1
+
+	subs		$len, #1
+	bhi		.Lxts_dec_steal
+
+	vld1.8		{@XMM[0]}, [r6]
+	mov		r0, sp
+	veor		@XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [r6]
+	mov		$fp, r4
+#endif
+
+.Lxts_dec_ret:
+	bic		r0, $fp, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [$fp, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_dec_bzero
+
+	mov		sp, $fp
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{@XMM[8]}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+        last if (!s/^#/@/ and !/^$/);
+        print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 1a7024b..c38b58c 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -24,6 +24,7 @@
 generic-y += serial.h
 generic-y += shmbuf.h
 generic-y += siginfo.h
+generic-y += simd.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index fcc1b5b..5c22851 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -53,6 +53,13 @@
 #define put_byte_3      lsl #0
 #endif
 
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
 /*
  * Data preload for architectures that support it
  */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d..62d2cb5 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -12,6 +12,7 @@
 #define __ASM_ARM_ATOMIC_H
 
 #include <linux/compiler.h>
+#include <linux/prefetch.h>
 #include <linux/types.h>
 #include <linux/irqflags.h>
 #include <asm/barrier.h>
@@ -41,6 +42,7 @@
 	unsigned long tmp;
 	int result;
 
+	prefetchw(&v->counter);
 	__asm__ __volatile__("@ atomic_add\n"
 "1:	ldrex	%0, [%3]\n"
 "	add	%0, %0, %4\n"
@@ -79,6 +81,7 @@
 	unsigned long tmp;
 	int result;
 
+	prefetchw(&v->counter);
 	__asm__ __volatile__("@ atomic_sub\n"
 "1:	ldrex	%0, [%3]\n"
 "	sub	%0, %0, %4\n"
@@ -114,7 +117,8 @@
 
 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 {
-	unsigned long oldval, res;
+	int oldval;
+	unsigned long res;
 
 	smp_mb();
 
@@ -134,21 +138,6 @@
 	return oldval;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-	unsigned long tmp, tmp2;
-
-	__asm__ __volatile__("@ atomic_clear_mask\n"
-"1:	ldrex	%0, [%3]\n"
-"	bic	%0, %0, %4\n"
-"	strex	%1, %0, [%3]\n"
-"	teq	%1, #0\n"
-"	bne	1b"
-	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
-	: "r" (addr), "Ir" (mask)
-	: "cc");
-}
-
 #else /* ARM_ARCH_6 */
 
 #ifdef CONFIG_SMP
@@ -197,15 +186,6 @@
 	return ret;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-	unsigned long flags;
-
-	raw_local_irq_save(flags);
-	*addr &= ~mask;
-	raw_local_irq_restore(flags);
-}
-
 #endif /* __LINUX_ARM_ARCH__ */
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -238,15 +218,15 @@
 
 #ifndef CONFIG_GENERIC_ATOMIC64
 typedef struct {
-	u64 __aligned(8) counter;
+	long long counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(i) { (i) }
 
 #ifdef CONFIG_ARM_LPAE
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
-	u64 result;
+	long long result;
 
 	__asm__ __volatile__("@ atomic64_read\n"
 "	ldrd	%0, %H0, [%1]"
@@ -257,7 +237,7 @@
 	return result;
 }
 
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
 {
 	__asm__ __volatile__("@ atomic64_set\n"
 "	strd	%2, %H2, [%1]"
@@ -266,9 +246,9 @@
 	);
 }
 #else
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
-	u64 result;
+	long long result;
 
 	__asm__ __volatile__("@ atomic64_read\n"
 "	ldrexd	%0, %H0, [%1]"
@@ -279,10 +259,11 @@
 	return result;
 }
 
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
 {
-	u64 tmp;
+	long long tmp;
 
+	prefetchw(&v->counter);
 	__asm__ __volatile__("@ atomic64_set\n"
 "1:	ldrexd	%0, %H0, [%2]\n"
 "	strexd	%0, %3, %H3, [%2]\n"
@@ -294,15 +275,16 @@
 }
 #endif
 
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
 {
-	u64 result;
+	long long result;
 	unsigned long tmp;
 
+	prefetchw(&v->counter);
 	__asm__ __volatile__("@ atomic64_add\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	adds	%0, %0, %4\n"
-"	adc	%H0, %H0, %H4\n"
+"	adds	%Q0, %Q0, %Q4\n"
+"	adc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
@@ -311,17 +293,17 @@
 	: "cc");
 }
 
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
-	u64 result;
+	long long result;
 	unsigned long tmp;
 
 	smp_mb();
 
 	__asm__ __volatile__("@ atomic64_add_return\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	adds	%0, %0, %4\n"
-"	adc	%H0, %H0, %H4\n"
+"	adds	%Q0, %Q0, %Q4\n"
+"	adc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
@@ -334,15 +316,16 @@
 	return result;
 }
 
-static inline void atomic64_sub(u64 i, atomic64_t *v)
+static inline void atomic64_sub(long long i, atomic64_t *v)
 {
-	u64 result;
+	long long result;
 	unsigned long tmp;
 
+	prefetchw(&v->counter);
 	__asm__ __volatile__("@ atomic64_sub\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	subs	%0, %0, %4\n"
-"	sbc	%H0, %H0, %H4\n"
+"	subs	%Q0, %Q0, %Q4\n"
+"	sbc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
@@ -351,17 +334,17 @@
 	: "cc");
 }
 
-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
 {
-	u64 result;
+	long long result;
 	unsigned long tmp;
 
 	smp_mb();
 
 	__asm__ __volatile__("@ atomic64_sub_return\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	subs	%0, %0, %4\n"
-"	sbc	%H0, %H0, %H4\n"
+"	subs	%Q0, %Q0, %Q4\n"
+"	sbc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
@@ -374,9 +357,10 @@
 	return result;
 }
 
-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+					long long new)
 {
-	u64 oldval;
+	long long oldval;
 	unsigned long res;
 
 	smp_mb();
@@ -398,9 +382,9 @@
 	return oldval;
 }
 
-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
 {
-	u64 result;
+	long long result;
 	unsigned long tmp;
 
 	smp_mb();
@@ -419,18 +403,18 @@
 	return result;
 }
 
-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
-	u64 result;
+	long long result;
 	unsigned long tmp;
 
 	smp_mb();
 
 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	subs	%0, %0, #1\n"
-"	sbc	%H0, %H0, #0\n"
-"	teq	%H0, #0\n"
+"	subs	%Q0, %Q0, #1\n"
+"	sbc	%R0, %R0, #0\n"
+"	teq	%R0, #0\n"
 "	bmi	2f\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
@@ -445,9 +429,9 @@
 	return result;
 }
 
-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 {
-	u64 val;
+	long long val;
 	unsigned long tmp;
 	int ret = 1;
 
@@ -459,8 +443,8 @@
 "	teqeq	%H0, %H5\n"
 "	moveq	%1, #0\n"
 "	beq	2f\n"
-"	adds	%0, %0, %6\n"
-"	adc	%H0, %H0, %H6\n"
+"	adds	%Q0, %Q0, %Q6\n"
+"	adc	%R0, %R0, %R6\n"
 "	strexd	%2, %0, %H0, [%4]\n"
 "	teq	%2, #0\n"
 "	bne	1b\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 0000000..1714800
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by:  Nicolas Pitre, April 2012
+ * Copyright:   (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+			 bL_switch_completion_handler completer,
+			 void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+	return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE	0
+#define BL_NOTIFY_POST_ENABLE	1
+#define BL_NOTIFY_PRE_DISABLE	2
+#define BL_NOTIFY_POST_DISABLE	3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled.  Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled().  These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c..b274bde 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,6 +2,8 @@
 #define _ASMARM_BUG_H
 
 #include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/opcodes.h>
 
 #ifdef CONFIG_BUG
 
@@ -12,10 +14,10 @@
  */
 #ifdef CONFIG_THUMB2_KERNEL
 #define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR_TYPE ".hword "
+#define BUG_INSTR(__value) __inst_thumb16(__value)
 #else
 #define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR_TYPE ".word "
+#define BUG_INSTR(__value) __inst_arm(__value)
 #endif
 
 
@@ -33,7 +35,7 @@
 
 #define __BUG(__file, __line, __value)				\
 do {								\
-	asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n"	\
+	asm volatile("1:\t" BUG_INSTR(__value) "\n"  \
 		".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
 		"2:\t.asciz " #__file "\n" 			\
 		".popsection\n" 				\
@@ -48,7 +50,7 @@
 
 #define __BUG(__file, __line, __value)				\
 do {								\
-	asm volatile(BUG_INSTR_TYPE #__value);			\
+	asm volatile(BUG_INSTR(__value) "\n");			\
 	unreachable();						\
 } while (0)
 #endif  /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 15f2d5b..ee753f1 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -435,4 +435,50 @@
 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
 
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ *   however some exceptions may exist.  Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ *   fp is preserved to the stack explicitly prior disabling the cache
+ *   since adding it to the clobber list is incompatible with having
+ *   CONFIG_FRAME_POINTER=y.  ip is saved as well if ever r12-clobbering
+ *   trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+	asm volatile( \
+	"stmfd	sp!, {fp, ip} \n\t" \
+	"mrc	p15, 0, r0, c1, c0, 0	@ get SCTLR \n\t" \
+	"bic	r0, r0, #"__stringify(CR_C)" \n\t" \
+	"mcr	p15, 0, r0, c1, c0, 0	@ set SCTLR \n\t" \
+	"isb	\n\t" \
+	"bl	v7_flush_dcache_"__stringify(level)" \n\t" \
+	"clrex	\n\t" \
+	"mrc	p15, 0, r0, c1, c0, 1	@ get ACTLR \n\t" \
+	"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t" \
+	"mcr	p15, 0, r0, c1, c0, 1	@ set ACTLR \n\t" \
+	"isb	\n\t" \
+	"dsb	\n\t" \
+	"ldmfd	sp!, {fp, ip}" \
+	: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+	      "r9","r10","lr","memory" )
+
 #endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 4f009c1..df2fbba 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -223,6 +223,42 @@
 	return ret;
 }
 
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+					     unsigned long long old,
+					     unsigned long long new)
+{
+	unsigned long long oldval;
+	unsigned long res;
+
+	__asm__ __volatile__(
+"1:	ldrexd		%1, %H1, [%3]\n"
+"	teq		%1, %4\n"
+"	teqeq		%H1, %H4\n"
+"	bne		2f\n"
+"	strexd		%0, %5, %H5, [%3]\n"
+"	teq		%0, #0\n"
+"	bne		1b\n"
+"2:"
+	: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+	: "r" (ptr), "r" (old), "r" (new)
+	: "cc");
+
+	return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+						unsigned long long old,
+						unsigned long long new)
+{
+	unsigned long long ret;
+
+	smp_mb();
+	ret = __cmpxchg64(ptr, old, new);
+	smp_mb();
+
+	return ret;
+}
+
 #define cmpxchg_local(ptr,o,n)						\
 	((__typeof__(*(ptr)))__cmpxchg_local((ptr),			\
 				       (unsigned long)(o),		\
@@ -230,18 +266,16 @@
 				       sizeof(*(ptr))))
 
 #define cmpxchg64(ptr, o, n)						\
-	((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr),	\
-						atomic64_t,		\
-						counter),		\
-					      (unsigned long long)(o),	\
-					      (unsigned long long)(n)))
+	((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
+					(unsigned long long)(o),	\
+					(unsigned long long)(n)))
 
-#define cmpxchg64_local(ptr, o, n)					\
-	((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr),	\
-						local64_t,		\
-						a),			\
-					     (unsigned long long)(o),	\
-					     (unsigned long long)(n)))
+#define cmpxchg64_relaxed(ptr, o, n)					\
+	((__typeof__(*(ptr)))__cmpxchg64((ptr),				\
+					(unsigned long long)(o),	\
+					(unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr, o, n)	cmpxchg64_relaxed((ptr), (o), (n))
 
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 9672e97..acdde76 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -10,6 +10,7 @@
 #define CPUID_TLBTYPE	3
 #define CPUID_MPUIR	4
 #define CPUID_MPIDR	5
+#define CPUID_REVIDR	6
 
 #ifdef CONFIG_CPU_V7M
 #define CPUID_EXT_PFR0	0x40
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 5b579b9..e701a4d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -11,17 +11,28 @@
 #include <asm-generic/dma-coherent.h>
 #include <asm/memory.h>
 
+#include <xen/xen.h>
+#include <asm/xen/hypervisor.h>
+
 #define DMA_ERROR_CODE	(~0)
 extern struct dma_map_ops arm_dma_ops;
 extern struct dma_map_ops arm_coherent_dma_ops;
 
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
 {
 	if (dev && dev->archdata.dma_ops)
 		return dev->archdata.dma_ops;
 	return &arm_dma_ops;
 }
 
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+	if (xen_initial_domain())
+		return xen_dma_ops;
+	else
+		return __generic_dma_ops(dev);
+}
+
 static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
 {
 	BUG_ON(!dev);
@@ -64,6 +75,7 @@
 {
 	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
 }
+
 #else
 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
 {
@@ -86,6 +98,46 @@
 }
 #endif
 
+/* The ARM override for dma_max_pfn() */
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+	return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
+}
+#define dma_max_pfn(dev) dma_max_pfn(dev)
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+	unsigned int offset = paddr & ~PAGE_MASK;
+	return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+	unsigned int offset = dev_addr & ~PAGE_MASK;
+	return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+	u64 limit, mask;
+
+	if (!dev->dma_mask)
+		return 0;
+
+	mask = *dev->dma_mask;
+
+	limit = (mask + 1) & ~mask;
+	if (limit && size > limit)
+		return 0;
+
+	if ((addr | (addr + size - 1)) & ~mask)
+		return 0;
+
+	return 1;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) { }
+
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a..fe3ea77 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI	6
+#define NR_IPI	8
 
 typedef struct {
 	unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 0cf7a6b..ad774f3 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -24,8 +24,8 @@
 #define TRACER_TIMEOUT 10000
 
 #define etm_writel(t, v, x) \
-	(__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+	(writel_relaxed((v), (t)->etm_regs + (x)))
+#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
 
 /* CoreSight Management Registers */
 #define CSMR_LOCKACCESS 0xfb0
@@ -142,8 +142,8 @@
 #define ETBFF_TRIGFL		BIT(10)
 
 #define etb_writel(t, v, x) \
-	(__raw_writel((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
+	(writel_relaxed((v), (t)->etb_regs + (x)))
+#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
 
 #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etm_unlock(t) \
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d070741..3c597c2 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -24,9 +24,11 @@
 #ifdef __KERNEL__
 
 #include <linux/types.h>
+#include <linux/blk_types.h>
 #include <asm/byteorder.h>
 #include <asm/memory.h>
 #include <asm-generic/pci_iomap.h>
+#include <xen/xen.h>
 
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -372,6 +374,13 @@
 #define BIOVEC_MERGEABLE(vec1, vec2)	\
 	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
 
+struct bio_vec;
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+				      const struct bio_vec *vec2);
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
+	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\
+	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+
 #ifdef CONFIG_MMU
 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce..0a9d5dd 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
 #define __ARM_KGDB_H__
 
 #include <linux/ptrace.h>
+#include <asm/opcodes.h>
 
 /*
  * GDB assumes that we're a user process being debugged, so
@@ -41,7 +42,7 @@
 
 static inline void arch_kgdb_breakpoint(void)
 {
-	asm(".word 0xe7ffdeff");
+	asm(__inst_arm(0xe7ffdeff));
 }
 
 extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 64e9696..1d3153c 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -57,6 +57,7 @@
  * TSC:		Trap SMC
  * TSW:		Trap cache operations by set/way
  * TWI:		Trap WFI
+ * TWE:		Trap WFE
  * TIDCP:	Trap L2CTLR/L2ECTLR
  * BSU_IS:	Upgrade barriers to the inner shareable domain
  * FB:		Force broadcast of all maintainance operations
@@ -67,7 +68,7 @@
  */
 #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
 			HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
-			HCR_SWIO | HCR_TIDCP)
+			HCR_TWE | HCR_SWIO | HCR_TIDCP)
 #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
 
 /* System Control Register (SCTLR) bits */
@@ -95,12 +96,12 @@
 #define TTBCR_IRGN1	(3 << 24)
 #define TTBCR_EPD1	(1 << 23)
 #define TTBCR_A1	(1 << 22)
-#define TTBCR_T1SZ	(3 << 16)
+#define TTBCR_T1SZ	(7 << 16)
 #define TTBCR_SH0	(3 << 12)
 #define TTBCR_ORGN0	(3 << 10)
 #define TTBCR_IRGN0	(3 << 8)
 #define TTBCR_EPD0	(1 << 7)
-#define TTBCR_T0SZ	3
+#define TTBCR_T0SZ	(7 << 0)
 #define HTCR_MASK	(TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
 
 /* Hyp System Trap Register */
@@ -208,6 +209,8 @@
 #define HSR_EC_DABT	(0x24)
 #define HSR_EC_DABT_HYP	(0x25)
 
+#define HSR_WFI_IS_WFE		(1U << 0)
+
 #define HSR_HVC_IMM_MASK	((1UL << 16) - 1)
 
 #define HSR_DABT_S1PTW		(1U << 7)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index a2f43dd..661da11 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -39,7 +39,7 @@
 #define c6_IFAR		17	/* Instruction Fault Address Register */
 #define c7_PAR		18	/* Physical Address Register */
 #define c7_PAR_high	19	/* PAR top 32 bits */
-#define c9_L2CTLR	20	/* Cortex A15 L2 Control Register */
+#define c9_L2CTLR	20	/* Cortex A15/A7 L2 Control Register */
 #define c10_PRRR	21	/* Primary Region Remap Register */
 #define c10_NMRR	22	/* Normal Memory Remap Register */
 #define c12_VBAR	23	/* Vector Base Address Register */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index a464e8d7..0fa90c9 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -157,4 +157,55 @@
 	return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
 }
 
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.cp15[c0_MPIDR];
+}
+
+static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+{
+	*vcpu_cpsr(vcpu) |= PSR_E_BIT;
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+	return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
+}
+
+static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+						    unsigned long data,
+						    unsigned int len)
+{
+	if (kvm_vcpu_is_be(vcpu)) {
+		switch (len) {
+		case 1:
+			return data & 0xff;
+		case 2:
+			return be16_to_cpu(data & 0xffff);
+		default:
+			return be32_to_cpu(data);
+		}
+	}
+
+	return data;		/* Leave LE untouched */
+}
+
+static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+						    unsigned long data,
+						    unsigned int len)
+{
+	if (kvm_vcpu_is_be(vcpu)) {
+		switch (len) {
+		case 1:
+			return data & 0xff;
+		case 2:
+			return cpu_to_be16(data & 0xffff);
+		default:
+			return cpu_to_be32(data);
+		}
+	}
+
+	return data;		/* Leave LE untouched */
+}
+
 #endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 7d22517..8a6f6db 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -38,11 +38,6 @@
 
 #define KVM_VCPU_MAX_FEATURES 1
 
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x)	0
-#define KVM_NR_PAGE_SIZES	1
-#define KVM_PAGES_PER_HPAGE(x)	(1UL<<31)
-
 #include <kvm/arm_vgic.h>
 
 struct kvm_vcpu;
@@ -154,6 +149,7 @@
 struct kvm_vcpu_init;
 int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 			const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 struct kvm_one_reg;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 9b28c41..77de4a4 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -62,6 +62,12 @@
 int kvm_mmu_init(void);
 void kvm_clear_hyp_idmap(void);
 
+static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
+{
+	*pmd = new_pmd;
+	flush_pmd_entry(pmd);
+}
+
 static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
 {
 	*pte = new_pte;
@@ -103,9 +109,15 @@
 	pte_val(*pte) |= L_PTE_S2_RDWR;
 }
 
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+	pmd_val(*pmd) |= L_PMD_S2_RDWR;
+}
+
 struct kvm;
 
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+					      unsigned long size)
 {
 	/*
 	 * If we are going to insert an instruction page and the icache is
@@ -120,8 +132,7 @@
 	 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
 	 */
 	if (icache_is_pipt()) {
-		unsigned long hva = gfn_to_hva(kvm, gfn);
-		__cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+		__cpuc_coherent_user_range(hva, hva + size);
 	} else if (!icache_is_vivt_asid_tagged()) {
 		/* any kind of VIPT cache */
 		__flush_icache_all();
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 402a2bc..17a3fa2 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -49,6 +49,7 @@
 	bool			(*smp_init)(void);
 	void			(*fixup)(struct tag *, char **,
 					 struct meminfo *);
+	void			(*init_meminfo)(void);
 	void			(*reserve)(void);/* reserve mem blocks	*/
 	void			(*map_io)(void);/* IO mapping function	*/
 	void			(*init_early)(void);
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index fc82a88..608516e 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -42,6 +42,14 @@
 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
 
 /*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated.  The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+			 unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
  * CPU/cluster power operations API for higher subsystems to use.
  */
 
@@ -81,10 +89,40 @@
  *
  * This will return if mcpm_platform_register() has not been called
  * previously in which case the caller should take appropriate action.
+ *
+ * On success, the CPU is not guaranteed to be truly halted until
+ * mcpm_cpu_power_down_finish() subsequently returns non-zero for the
+ * specified cpu.  Until then, other CPUs should make sure they do not
+ * trash memory the target CPU might be executing/accessing.
  */
 void mcpm_cpu_power_down(void);
 
 /**
+ * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
+ *	make sure it is powered off
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * Call this function to ensure that a pending powerdown has taken
+ * effect and the CPU is safely parked before performing non-mcpm
+ * operations that may affect the CPU (such as kexec trashing the
+ * kernel text).
+ *
+ * It is *not* necessary to call this function if you only need to
+ * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
+ * event.
+ *
+ * Do not call this function unless the specified CPU has already
+ * called mcpm_cpu_power_down() or has committed to doing so.
+ *
+ * @return:
+ *	- zero if the CPU is in a safely parked state
+ *	- nonzero otherwise (e.g., timeout)
+ */
+int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster);
+
+/**
  * mcpm_cpu_suspend - bring the calling CPU in a suspended state
  *
  * @expected_residency: duration in microseconds the CPU is expected
@@ -126,6 +164,7 @@
 struct mcpm_platform_ops {
 	int (*power_up)(unsigned int cpu, unsigned int cluster);
 	void (*power_down)(void);
+	int (*power_down_finish)(unsigned int cpu, unsigned int cluster);
 	void (*suspend)(u64);
 	void (*powered_up)(void);
 };
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e750a93..4dd2145 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -172,8 +172,13 @@
  * so that all we need to do is modify the 8-bit constant field.
  */
 #define __PV_BITS_31_24	0x81000000
+#define __PV_BITS_7_0	0x81
 
-extern unsigned long __pv_phys_offset;
+extern u64 __pv_phys_offset;
+extern u64 __pv_offset;
+extern void fixup_pv_table(const void *, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
+
 #define PHYS_OFFSET __pv_phys_offset
 
 #define __pv_stub(from,to,instr,type)			\
@@ -185,22 +190,58 @@
 	: "=r" (to)					\
 	: "r" (from), "I" (type))
 
-static inline unsigned long __virt_to_phys(unsigned long x)
+#define __pv_stub_mov_hi(t)				\
+	__asm__ volatile("@ __pv_stub_mov\n"		\
+	"1:	mov	%R0, %1\n"			\
+	"	.pushsection .pv_table,\"a\"\n"		\
+	"	.long	1b\n"				\
+	"	.popsection\n"				\
+	: "=r" (t)					\
+	: "I" (__PV_BITS_7_0))
+
+#define __pv_add_carry_stub(x, y)			\
+	__asm__ volatile("@ __pv_add_carry_stub\n"	\
+	"1:	adds	%Q0, %1, %2\n"			\
+	"	adc	%R0, %R0, #0\n"			\
+	"	.pushsection .pv_table,\"a\"\n"		\
+	"	.long	1b\n"				\
+	"	.popsection\n"				\
+	: "+r" (y)					\
+	: "r" (x), "I" (__PV_BITS_31_24)		\
+	: "cc")
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
 {
-	unsigned long t;
-	__pv_stub(x, t, "add", __PV_BITS_31_24);
+	phys_addr_t t;
+
+	if (sizeof(phys_addr_t) == 4) {
+		__pv_stub(x, t, "add", __PV_BITS_31_24);
+	} else {
+		__pv_stub_mov_hi(t);
+		__pv_add_carry_stub(x, t);
+	}
 	return t;
 }
 
-static inline unsigned long __phys_to_virt(unsigned long x)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
 {
 	unsigned long t;
 	__pv_stub(x, t, "sub", __PV_BITS_31_24);
 	return t;
 }
+
 #else
-#define __virt_to_phys(x)	((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET)
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
+{
+	return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+	return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
 #endif
 #endif
 #endif /* __ASSEMBLY__ */
@@ -238,16 +279,33 @@
 
 static inline void *phys_to_virt(phys_addr_t x)
 {
-	return (void *)(__phys_to_virt((unsigned long)(x)));
+	return (void *)__phys_to_virt(x);
 }
 
 /*
  * Drivers should NOT use these either.
  */
 #define __pa(x)			__virt_to_phys((unsigned long)(x))
-#define __va(x)			((void *)__phys_to_virt((unsigned long)(x)))
+#define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 
+extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
+
+/*
+ * These are for systems that have a hardware interconnect supported alias of
+ * physical memory for idmap purposes.  Most cases should leave these
+ * untouched.
+ */
+static inline phys_addr_t __virt_to_idmap(unsigned long x)
+{
+	if (arch_virt_to_idmap)
+		return arch_virt_to_idmap(x);
+	else
+		return __virt_to_phys(x);
+}
+
+#define virt_to_idmap(x)	__virt_to_idmap((unsigned long)(x))
+
 /*
  * Virtual <-> DMA view memory address translations
  * Again, these are *only* valid on the kernel direct mapped RAM
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 6f18da0..64fd151 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -16,7 +16,7 @@
 #ifdef CONFIG_CPU_HAS_ASID
 #define ASID_BITS	8
 #define ASID_MASK	((~0ULL) << ASID_BITS)
-#define ASID(mm)	((mm)->context.id.counter & ~ASID_MASK)
+#define ASID(mm)	((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
 #else
 #define ASID(mm)	(0)
 #endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 943504f..78a7793 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -102,12 +102,14 @@
 #else
 	pte = alloc_pages(PGALLOC_GFP, 0);
 #endif
-	if (pte) {
-		if (!PageHighMem(pte))
-			clean_pte_table(page_address(pte));
-		pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	if (!PageHighMem(pte))
+		clean_pte_table(page_address(pte));
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
 	}
-
 	return pte;
 }
 
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02..86a659a 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -181,6 +181,13 @@
 
 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd)	(0)
+#define pmd_thp_or_huge(pmd)	(0)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18..4f95039 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -126,6 +126,8 @@
 #define L_PTE_S2_RDONLY		 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */
 #define L_PTE_S2_RDWR		 (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 
+#define L_PMD_S2_RDWR		 (_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
+
 /*
  * Hyp-mode PL2 PTE definitions for LPAE.
  */
@@ -206,6 +208,9 @@
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)		(!(pmd_val(pmd) & PMD_SECT_RDONLY))
 
+#define pmd_hugewillfault(pmd)	(!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
 #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 413f387..c3d5fc1 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -22,6 +22,7 @@
 #include <asm/hw_breakpoint.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
+#include <asm/unified.h>
 
 #ifdef __KERNEL__
 #define STACK_TOP	((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -87,6 +88,17 @@
 #define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc
 #define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp
 
+#ifdef CONFIG_SMP
+#define __ALT_SMP_ASM(smp, up)						\
+	"9998:	" smp "\n"						\
+	"	.pushsection \".alt.smp.init\", \"a\"\n"		\
+	"	.long	9998b\n"					\
+	"	" up "\n"						\
+	"	.popsection\n"
+#else
+#define __ALT_SMP_ASM(smp, up)	up
+#endif
+
 /*
  * Prefetching support - only ARMv5.
  */
@@ -97,17 +109,22 @@
 {
 	__asm__ __volatile__(
 		"pld\t%a0"
-		:
-		: "p" (ptr)
-		: "cc");
+		:: "p" (ptr));
 }
 
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
 #define ARCH_HAS_PREFETCHW
-#define prefetchw(ptr)	prefetch(ptr)
-
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) do { } while (0)
-
+static inline void prefetchw(const void *ptr)
+{
+	__asm__ __volatile__(
+		".arch_extension	mp\n"
+		__ALT_SMP_ASM(
+			WASM(pldw)		"\t%a0",
+			WASM(pld)		"\t%a0"
+		)
+		:: "p" (ptr));
+}
+#endif
 #endif
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index c50f0560..8d6a089 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -49,7 +49,7 @@
 #define bank_phys_end(bank)	((bank)->start + (bank)->size)
 #define bank_phys_size(bank)	(bank)->size
 
-extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
+extern int arm_add_memory(u64 start, u64 size);
 extern void early_print(const char *str, ...);
 extern void dump_machine_table(void);
 
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a8cae71c..22a3b9b 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -84,6 +84,8 @@
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
 
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
 struct smp_operations {
 #ifdef CONFIG_SMP
 	/*
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4f2c280..ef3c607 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,21 +5,13 @@
 #error SMP not supported on pre-ARMv6 CPUs
 #endif
 
-#include <asm/processor.h>
+#include <linux/prefetch.h>
 
 /*
  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
  * extensions, so when running on UP, we have to patch these instructions away.
  */
-#define ALT_SMP(smp, up)					\
-	"9998:	" smp "\n"					\
-	"	.pushsection \".alt.smp.init\", \"a\"\n"	\
-	"	.long	9998b\n"				\
-	"	" up "\n"					\
-	"	.popsection\n"
-
 #ifdef CONFIG_THUMB2_KERNEL
-#define SEV		ALT_SMP("sev.w", "nop.w")
 /*
  * For Thumb-2, special care is needed to ensure that the conditional WFE
  * instruction really does assemble to exactly 4 bytes (as required by
@@ -31,17 +23,18 @@
  * the assembler won't change IT instructions which are explicitly present
  * in the input.
  */
-#define WFE(cond)	ALT_SMP(		\
+#define WFE(cond)	__ALT_SMP_ASM(		\
 	"it " cond "\n\t"			\
 	"wfe" cond ".n",			\
 						\
 	"nop.w"					\
 )
 #else
-#define SEV		ALT_SMP("sev", "nop")
-#define WFE(cond)	ALT_SMP("wfe" cond, "nop")
+#define WFE(cond)	__ALT_SMP_ASM("wfe" cond, "nop")
 #endif
 
+#define SEV		__ALT_SMP_ASM(WASM(sev), WASM(nop))
+
 static inline void dsb_sev(void)
 {
 #if __LINUX_ARM_ARCH__ >= 7
@@ -77,6 +70,7 @@
 	u32 newval;
 	arch_spinlock_t lockval;
 
+	prefetchw(&lock->slock);
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%3]\n"
 "	add	%1, %0, %4\n"
@@ -100,6 +94,7 @@
 	unsigned long contended, res;
 	u32 slock;
 
+	prefetchw(&lock->slock);
 	do {
 		__asm__ __volatile__(
 		"	ldrex	%0, [%3]\n"
@@ -127,10 +122,14 @@
 	dsb_sev();
 }
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.tickets.owner == lock.tickets.next;
+}
+
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
-	return tickets.owner != tickets.next;
+	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -152,6 +151,7 @@
 {
 	unsigned long tmp;
 
+	prefetchw(&rw->lock);
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%1]\n"
 "	teq	%0, #0\n"
@@ -170,6 +170,7 @@
 {
 	unsigned long contended, res;
 
+	prefetchw(&rw->lock);
 	do {
 		__asm__ __volatile__(
 		"	ldrex	%0, [%2]\n"
@@ -203,7 +204,7 @@
 }
 
 /* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x)		((x)->lock == 0)
+#define arch_write_can_lock(x)		(ACCESS_ONCE((x)->lock) == 0)
 
 /*
  * Read locks are a bit more hairy:
@@ -221,6 +222,7 @@
 {
 	unsigned long tmp, tmp2;
 
+	prefetchw(&rw->lock);
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%2]\n"
 "	adds	%0, %0, #1\n"
@@ -241,6 +243,7 @@
 
 	smp_mb();
 
+	prefetchw(&rw->lock);
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%2]\n"
 "	sub	%0, %0, #1\n"
@@ -259,6 +262,7 @@
 {
 	unsigned long contended, res;
 
+	prefetchw(&rw->lock);
 	do {
 		__asm__ __volatile__(
 		"	ldrex	%0, [%2]\n"
@@ -280,7 +284,7 @@
 }
 
 /* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x)		((x)->lock < 0x80000000)
+#define arch_read_can_lock(x)		(ACCESS_ONCE((x)->lock) < 0x80000000)
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index b262d2f..47663fc 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -25,7 +25,7 @@
 #define __ARCH_SPIN_LOCK_UNLOCKED	{ { 0 } }
 
 typedef struct {
-	volatile unsigned int lock;
+	u32 lock;
 } arch_rwlock_t;
 
 #define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 3896026..def9e57 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -560,37 +560,6 @@
 		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
 }
 
-#include <asm/cputype.h>
-#ifdef CONFIG_ARM_ERRATA_798181
-static inline int erratum_a15_798181(void)
-{
-	unsigned int midr = read_cpuid_id();
-
-	/* Cortex-A15 r0p0..r3p2 affected */
-	if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
-		return 0;
-	return 1;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-	/*
-	 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
-	 */
-	asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
-	dsb(ish);
-}
-#else
-static inline int erratum_a15_798181(void)
-{
-	return 0;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-}
-#endif
-
 /*
  *	flush_pmd_entry
  *
@@ -697,4 +666,21 @@
 
 #endif
 
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+	if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+		erratum_a15_798181_handler))
+		return erratum_a15_798181_handler();
+	return false;
+}
+#endif
+
 #endif
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index f5989f4..b88beab 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -38,6 +38,8 @@
 #ifdef __ASSEMBLY__
 #define W(instr)	instr.w
 #define BSYM(sym)	sym + 1
+#else
+#define WASM(instr)	#instr ".w"
 #endif
 
 #else	/* !CONFIG_THUMB2_KERNEL */
@@ -50,6 +52,8 @@
 #ifdef __ASSEMBLY__
 #define W(instr)	instr
 #define BSYM(sym)	sym
+#else
+#define WASM(instr)	#instr
 #endif
 
 #endif	/* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index d7ab99a..1317ee4 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -16,4 +16,6 @@
 	return PARAVIRT_LAZY_NONE;
 }
 
+extern struct dma_map_ops *xen_dma_ops;
+
 #endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
new file mode 100644
index 0000000..1109017
--- /dev/null
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+		dma_addr_t *dma_handle, gfp_t flags,
+		struct dma_attrs *attrs)
+{
+	return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+		void *cpu_addr, dma_addr_t dma_handle,
+		struct dma_attrs *attrs)
+{
+	__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs)
+{
+	__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
+{
+	if (__generic_dma_ops(hwdev)->unmap_page)
+		__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+		__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	if (__generic_dma_ops(hwdev)->sync_single_for_device)
+		__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 359a7b5..75579a9 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -6,12 +6,12 @@
 
 #include <linux/pfn.h>
 #include <linux/types.h>
+#include <linux/dma-mapping.h>
 
+#include <xen/xen.h>
 #include <xen/interface/grant_table.h>
 
-#define pfn_to_mfn(pfn)			(pfn)
 #define phys_to_machine_mapping_valid(pfn) (1)
-#define mfn_to_pfn(mfn)			(mfn)
 #define mfn_to_virt(m)			(__va(mfn_to_pfn(m) << PAGE_SHIFT))
 
 #define pte_mfn	    pte_pfn
@@ -32,6 +32,38 @@
 
 #define INVALID_P2M_ENTRY      (~0UL)
 
+unsigned long __pfn_to_mfn(unsigned long pfn);
+unsigned long __mfn_to_pfn(unsigned long mfn);
+extern struct rb_root phys_to_mach;
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+	unsigned long mfn;
+
+	if (phys_to_mach.rb_node != NULL) {
+		mfn = __pfn_to_mfn(pfn);
+		if (mfn != INVALID_P2M_ENTRY)
+			return mfn;
+	}
+
+	return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+	unsigned long pfn;
+
+	if (phys_to_mach.rb_node != NULL) {
+		pfn = __mfn_to_pfn(mfn);
+		if (pfn != INVALID_P2M_ENTRY)
+			return pfn;
+	}
+
+	return mfn;
+}
+
+#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
+
 static inline xmaddr_t phys_to_machine(xpaddr_t phys)
 {
 	unsigned offset = phys.paddr & ~PAGE_MASK;
@@ -76,11 +108,9 @@
 	return 0;
 }
 
-static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
-	BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-	return true;
-}
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
+		unsigned long nr_pages);
 
 static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
new file mode 100644
index 0000000..2265a19
--- /dev/null
+++ b/arch/arm/include/debug/efm32.S
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define UARTn_CMD		0x000c
+#define UARTn_CMD_TXEN			0x0004
+
+#define	UARTn_STATUS		0x0010
+#define	UARTn_STATUS_TXC		0x0020
+#define	UARTn_STATUS_TXBL		0x0040
+
+#define	UARTn_TXDATA		0x0034
+
+		.macro	addruart, rx, tmp
+		ldr	\rx, =(CONFIG_DEBUG_UART_PHYS)
+
+		/*
+		 * enable TX. The driver might disable it to save energy. We
+		 * don't care about disabling at the end as during debug power
+		 * consumption isn't that important.
+		 */
+		ldr	\tmp, =(UARTn_CMD_TXEN)
+		str	\tmp, [\rx, #UARTn_CMD]
+		.endm
+
+		.macro	senduart,rd,rx
+		strb	\rd, [\rx, #UARTn_TXDATA]
+		.endm
+
+		.macro	waituart,rd,rx
+1001:		ldr	\rd, [\rx, #UARTn_STATUS]
+		tst	\rd, #UARTn_STATUS_TXBL
+		beq	1001b
+		.endm
+
+		.macro	busyuart,rd,rx
+1001:		ldr	\rd, [\rx, UARTn_STATUS]
+		tst	\rd, #UARTn_STATUS_TXC
+		bne	1001b
+		.endm
diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S
index 9166e1b..9d653d4 100644
--- a/arch/arm/include/debug/msm.S
+++ b/arch/arm/include/debug/msm.S
@@ -46,6 +46,11 @@
 #define MSM_DEBUG_UART_PHYS	0x16440000
 #endif
 
+#ifdef CONFIG_DEBUG_MSM8974_UART
+#define MSM_DEBUG_UART_BASE	0xFA71E000
+#define MSM_DEBUG_UART_PHYS	0xF991E000
+#endif
+
 	.macro	addruart, rp, rv, tmp
 #ifdef MSM_DEBUG_UART_PHYS
 	ldr	\rp, =MSM_DEBUG_UART_PHYS
diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S
index 37c6895b..92ef808 100644
--- a/arch/arm/include/debug/pl01x.S
+++ b/arch/arm/include/debug/pl01x.S
@@ -25,12 +25,14 @@
 
 		.macro	waituart,rd,rx
 1001:		ldr	\rd, [\rx, #UART01x_FR]
+ ARM_BE8(	rev	\rd, \rd )
 		tst	\rd, #UART01x_FR_TXFF
 		bne	1001b
 		.endm
 
 		.macro	busyuart,rd,rx
 1001:		ldr	\rd, [\rx, #UART01x_FR]
+ ARM_BE8(	rev	\rd, \rd )
 		tst	\rd, #UART01x_FR_BUSY
 		bne	1001b
 		.endm
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 18d76fd..70a1c9d 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -7,6 +7,7 @@
 header-y += ioctls.h
 header-y += kvm_para.h
 header-y += mman.h
+header-y += perf_regs.h
 header-y += posix_types.h
 header-y += ptrace.h
 header-y += setup.h
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index c1ee007..c498b60 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -63,7 +63,8 @@
 
 /* Supported Processor Types */
 #define KVM_ARM_TARGET_CORTEX_A15	0
-#define KVM_ARM_NUM_TARGETS		1
+#define KVM_ARM_TARGET_CORTEX_A7	1
+#define KVM_ARM_NUM_TARGETS		2
 
 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
 #define KVM_ARM_DEVICE_TYPE_SHIFT	0
diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h
new file mode 100644
index 0000000..ce59448
--- /dev/null
+++ b/arch/arm/include/uapi/asm/perf_regs.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_ARM_PERF_REGS_H
+#define _ASM_ARM_PERF_REGS_H
+
+enum perf_event_arm_regs {
+	PERF_REG_ARM_R0,
+	PERF_REG_ARM_R1,
+	PERF_REG_ARM_R2,
+	PERF_REG_ARM_R3,
+	PERF_REG_ARM_R4,
+	PERF_REG_ARM_R5,
+	PERF_REG_ARM_R6,
+	PERF_REG_ARM_R7,
+	PERF_REG_ARM_R8,
+	PERF_REG_ARM_R9,
+	PERF_REG_ARM_R10,
+	PERF_REG_ARM_FP,
+	PERF_REG_ARM_IP,
+	PERF_REG_ARM_SP,
+	PERF_REG_ARM_LR,
+	PERF_REG_ARM_PC,
+	PERF_REG_ARM_MAX,
+};
+#endif /* _ASM_ARM_PERF_REGS_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5140df5f..a30fc9b 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,7 +17,8 @@
 
 obj-y		:= elf.o entry-common.o irq.o opcodes.o \
 		   process.o ptrace.o return_address.o \
-		   setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
+		   setup.o signal.o sigreturn_codes.o \
+		   stacktrace.o sys_arm.o time.o traps.o
 
 obj-$(CONFIG_ATAGS)		+= atags_parse.o
 obj-$(CONFIG_ATAGS_PROC)	+= atags_proc.o
@@ -78,6 +79,7 @@
 obj-$(CONFIG_CPU_MOHAWK)	+= xscale-cp0.o
 obj-$(CONFIG_CPU_PJ4)		+= pj4-cp0.o
 obj-$(CONFIG_IWMMXT)		+= iwmmxt.o
+obj-$(CONFIG_PERF_EVENTS)	+= perf_regs.o
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o perf_event_cpu.o
 AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
 obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 60d3b73..1f031dd 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -155,4 +155,5 @@
 
 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 EXPORT_SYMBOL(__pv_phys_offset);
+EXPORT_SYMBOL(__pv_offset);
 #endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9cbe70c..b3fb8c9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,6 +192,7 @@
 	svc_entry
 	mov	r2, sp
 	dabt_helper
+ THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
 	svc_exit r5				@ return from exception
  UNWIND(.fnend		)
 ENDPROC(__dabt_svc)
@@ -416,9 +417,8 @@
 	bne	__und_usr_thumb
 	sub	r4, r2, #4			@ ARM instr at LR - 4
 1:	ldrt	r0, [r4]
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	rev	r0, r0				@ little endian instruction
-#endif
+ ARM_BE8(rev	r0, r0)				@ little endian instruction
+
 	@ r0 = 32-bit ARM instruction which caused the exception
 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 	@ r4 = PC value for the faulting instruction
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc6bd96..a2dcafd 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -393,9 +393,7 @@
 #else
  USER(	ldr	r10, [lr, #-4]		)	@ get SWI instruction
 #endif
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	rev	r10, r10			@ little endian instruction
-#endif
+ ARM_BE8(rev	r10, r10)			@ little endian instruction
 
 #elif defined(CONFIG_AEABI)
 
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 476de57..7801866 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -77,6 +77,7 @@
 
 	__HEAD
 ENTRY(stext)
+ ARM_BE8(setend	be )			@ ensure we are in BE8 mode
 
  THUMB(	adr	r9, BSYM(1f)	)	@ Kernel is always entered in ARM.
  THUMB(	bx	r9		)	@ If this is a Thumb-2 kernel,
@@ -352,6 +353,9 @@
 	 * the processor type - there is no need to check the machine type
 	 * as it has already been validated by the primary processor.
 	 */
+
+ ARM_BE8(setend	be)				@ ensure we are in BE8 mode
+
 #ifdef CONFIG_ARM_VIRT_EXT
 	bl	__hyp_stub_install_secondary
 #endif
@@ -555,6 +559,14 @@
 	ldmfd	sp!, {r4 - r6, pc}
 ENDPROC(fixup_smp)
 
+#ifdef __ARMEB__
+#define LOW_OFFSET	0x4
+#define HIGH_OFFSET	0x0
+#else
+#define LOW_OFFSET	0x0
+#define HIGH_OFFSET	0x4
+#endif
+
 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 
 /* __fixup_pv_table - patch the stub instructions with the delta between
@@ -565,17 +577,20 @@
 	__HEAD
 __fixup_pv_table:
 	adr	r0, 1f
-	ldmia	r0, {r3-r5, r7}
-	sub	r3, r0, r3	@ PHYS_OFFSET - PAGE_OFFSET
+	ldmia	r0, {r3-r7}
+	mvn	ip, #0
+	subs	r3, r0, r3	@ PHYS_OFFSET - PAGE_OFFSET
 	add	r4, r4, r3	@ adjust table start address
 	add	r5, r5, r3	@ adjust table end address
-	add	r7, r7, r3	@ adjust __pv_phys_offset address
-	str	r8, [r7]	@ save computed PHYS_OFFSET to __pv_phys_offset
+	add	r6, r6, r3	@ adjust __pv_phys_offset address
+	add	r7, r7, r3	@ adjust __pv_offset address
+	str	r8, [r6, #LOW_OFFSET]	@ save computed PHYS_OFFSET to __pv_phys_offset
+	strcc	ip, [r7, #HIGH_OFFSET]	@ save to __pv_offset high bits
 	mov	r6, r3, lsr #24	@ constant for add/sub instructions
 	teq	r3, r6, lsl #24 @ must be 16MiB aligned
 THUMB(	it	ne		@ cross section branch )
 	bne	__error
-	str	r6, [r7, #4]	@ save to __pv_offset
+	str	r3, [r7, #LOW_OFFSET]	@ save to __pv_offset low bits
 	b	__fixup_a_pv_table
 ENDPROC(__fixup_pv_table)
 
@@ -584,10 +599,19 @@
 	.long	__pv_table_begin
 	.long	__pv_table_end
 2:	.long	__pv_phys_offset
+	.long	__pv_offset
 
 	.text
 __fixup_a_pv_table:
+	adr	r0, 3f
+	ldr	r6, [r0]
+	add	r6, r6, r3
+	ldr	r0, [r6, #HIGH_OFFSET]	@ pv_offset high word
+	ldr	r6, [r6, #LOW_OFFSET]	@ pv_offset low word
+	mov	r6, r6, lsr #24
+	cmn	r0, #1
 #ifdef CONFIG_THUMB2_KERNEL
+	moveq	r0, #0x200000	@ set bit 21, mov to mvn instruction
 	lsls	r6, #24
 	beq	2f
 	clz	r7, r6
@@ -601,18 +625,42 @@
 	b	2f
 1:	add     r7, r3
 	ldrh	ip, [r7, #2]
-	and	ip, 0x8f00
-	orr	ip, r6	@ mask in offset bits 31-24
+ARM_BE8(rev16	ip, ip)
+	tst	ip, #0x4000
+	and	ip, #0x8f00
+	orrne	ip, r6	@ mask in offset bits 31-24
+	orreq	ip, r0	@ mask in offset bits 7-0
+ARM_BE8(rev16	ip, ip)
 	strh	ip, [r7, #2]
+	bne	2f
+	ldrh	ip, [r7]
+ARM_BE8(rev16	ip, ip)
+	bic	ip, #0x20
+	orr	ip, ip, r0, lsr #16
+ARM_BE8(rev16	ip, ip)
+	strh	ip, [r7]
 2:	cmp	r4, r5
 	ldrcc	r7, [r4], #4	@ use branch for delay slot
 	bcc	1b
 	bx	lr
 #else
+	moveq	r0, #0x400000	@ set bit 22, mov to mvn instruction
 	b	2f
 1:	ldr	ip, [r7, r3]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+	@ in BE8, we load data in BE, but instructions still in LE
+	bic	ip, ip, #0xff000000
+	tst	ip, #0x000f0000	@ check the rotation field
+	orrne	ip, ip, r6, lsl #24 @ mask in offset bits 31-24
+	biceq	ip, ip, #0x00004000 @ clear bit 22
+	orreq	ip, ip, r0, lsl #24 @ mask in offset bits 7-0
+#else
 	bic	ip, ip, #0x000000ff
-	orr	ip, ip, r6	@ mask in offset bits 31-24
+	tst	ip, #0xf00	@ check the rotation field
+	orrne	ip, ip, r6	@ mask in offset bits 31-24
+	biceq	ip, ip, #0x400000	@ clear bit 22
+	orreq	ip, ip, r0	@ mask in offset bits 7-0
+#endif
 	str	ip, [r7, r3]
 2:	cmp	r4, r5
 	ldrcc	r7, [r4], #4	@ use branch for delay slot
@@ -621,28 +669,30 @@
 #endif
 ENDPROC(__fixup_a_pv_table)
 
+	.align
+3:	.long __pv_offset
+
 ENTRY(fixup_pv_table)
 	stmfd	sp!, {r4 - r7, lr}
-	ldr	r2, 2f			@ get address of __pv_phys_offset
 	mov	r3, #0			@ no offset
 	mov	r4, r0			@ r0 = table start
 	add	r5, r0, r1		@ r1 = table size
-	ldr	r6, [r2, #4]		@ get __pv_offset
 	bl	__fixup_a_pv_table
 	ldmfd	sp!, {r4 - r7, pc}
 ENDPROC(fixup_pv_table)
 
-	.align
-2:	.long	__pv_phys_offset
-
 	.data
 	.globl	__pv_phys_offset
 	.type	__pv_phys_offset, %object
 __pv_phys_offset:
-	.long	0
-	.size	__pv_phys_offset, . - __pv_phys_offset
+	.quad	0
+	.size	__pv_phys_offset, . -__pv_phys_offset
+
+	.globl	__pv_offset
+	.type	__pv_offset, %object
 __pv_offset:
-	.long	0
+	.quad	0
+	.size	__pv_offset, . -__pv_offset
 #endif
 
 #include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7b95de6..3d44660 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -344,13 +344,13 @@
 		/* Breakpoint */
 		ctrl_base = ARM_BASE_BCR;
 		val_base = ARM_BASE_BVR;
-		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+		slots = this_cpu_ptr(bp_on_reg);
 		max_slots = core_num_brps;
 	} else {
 		/* Watchpoint */
 		ctrl_base = ARM_BASE_WCR;
 		val_base = ARM_BASE_WVR;
-		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+		slots = this_cpu_ptr(wp_on_reg);
 		max_slots = core_num_wrps;
 	}
 
@@ -396,12 +396,12 @@
 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
 		/* Breakpoint */
 		base = ARM_BASE_BCR;
-		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+		slots = this_cpu_ptr(bp_on_reg);
 		max_slots = core_num_brps;
 	} else {
 		/* Watchpoint */
 		base = ARM_BASE_WCR;
-		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+		slots = this_cpu_ptr(wp_on_reg);
 		max_slots = core_num_wrps;
 	}
 
@@ -697,7 +697,7 @@
 	struct arch_hw_breakpoint *info;
 	struct arch_hw_breakpoint_ctrl ctrl;
 
-	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+	slots = this_cpu_ptr(wp_on_reg);
 
 	for (i = 0; i < core_num_wrps; ++i) {
 		rcu_read_lock();
@@ -768,7 +768,7 @@
 	struct perf_event *wp, **slots;
 	struct arch_hw_breakpoint *info;
 
-	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+	slots = this_cpu_ptr(wp_on_reg);
 
 	for (i = 0; i < core_num_wrps; ++i) {
 		rcu_read_lock();
@@ -802,7 +802,7 @@
 	struct arch_hw_breakpoint *info;
 	struct arch_hw_breakpoint_ctrl ctrl;
 
-	slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+	slots = this_cpu_ptr(bp_on_reg);
 
 	/* The exception entry code places the amended lr in the PC. */
 	addr = regs->ARM_pc;
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 170e9f3..a7b621e 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -171,13 +171,13 @@
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 	kcb->kprobe_status = kcb->prev_kprobe.status;
 }
 
 static void __kprobes set_current_kprobe(struct kprobe *p)
 {
-	__get_cpu_var(current_kprobe) = p;
+	__this_cpu_write(current_kprobe, p);
 }
 
 static void __kprobes
@@ -421,10 +421,10 @@
 			continue;
 
 		if (ri->rp && ri->rp->handler) {
-			__get_cpu_var(current_kprobe) = &ri->rp->kp;
+			__this_cpu_write(current_kprobe, &ri->rp->kp);
 			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
 			ri->rp->handler(ri, regs);
-			__get_cpu_var(current_kprobe) = NULL;
+			__this_cpu_write(current_kprobe, NULL);
 		}
 
 		orig_ret_address = (unsigned long)ri->ret_addr;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 084dc88..45e4781 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -24,6 +24,7 @@
 #include <asm/sections.h>
 #include <asm/smp_plat.h>
 #include <asm/unwind.h>
+#include <asm/opcodes.h>
 
 #ifdef CONFIG_XIP_KERNEL
 /*
@@ -40,7 +41,7 @@
 void *module_alloc(unsigned long size)
 {
 	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-				GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+				GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
 #endif
@@ -60,6 +61,7 @@
 		Elf32_Sym *sym;
 		const char *symname;
 		s32 offset;
+		u32 tmp;
 #ifdef CONFIG_THUMB2_KERNEL
 		u32 upper, lower, sign, j1, j2;
 #endif
@@ -95,7 +97,8 @@
 		case R_ARM_PC24:
 		case R_ARM_CALL:
 		case R_ARM_JUMP24:
-			offset = (*(u32 *)loc & 0x00ffffff) << 2;
+			offset = __mem_to_opcode_arm(*(u32 *)loc);
+			offset = (offset & 0x00ffffff) << 2;
 			if (offset & 0x02000000)
 				offset -= 0x04000000;
 
@@ -111,9 +114,10 @@
 			}
 
 			offset >>= 2;
+			offset &= 0x00ffffff;
 
-			*(u32 *)loc &= 0xff000000;
-			*(u32 *)loc |= offset & 0x00ffffff;
+			*(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
+			*(u32 *)loc |= __opcode_to_mem_arm(offset);
 			break;
 
 	       case R_ARM_V4BX:
@@ -121,8 +125,8 @@
 			* other bits to re-code instruction as
 			* MOV PC,Rm.
 			*/
-		       *(u32 *)loc &= 0xf000000f;
-		       *(u32 *)loc |= 0x01a0f000;
+		       *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
+		       *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
 		       break;
 
 		case R_ARM_PREL31:
@@ -132,7 +136,7 @@
 
 		case R_ARM_MOVW_ABS_NC:
 		case R_ARM_MOVT_ABS:
-			offset = *(u32 *)loc;
+			offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
 			offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
 			offset = (offset ^ 0x8000) - 0x8000;
 
@@ -140,16 +144,18 @@
 			if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
 				offset >>= 16;
 
-			*(u32 *)loc &= 0xfff0f000;
-			*(u32 *)loc |= ((offset & 0xf000) << 4) |
-					(offset & 0x0fff);
+			tmp &= 0xfff0f000;
+			tmp |= ((offset & 0xf000) << 4) |
+				(offset & 0x0fff);
+
+			*(u32 *)loc = __opcode_to_mem_arm(tmp);
 			break;
 
 #ifdef CONFIG_THUMB2_KERNEL
 		case R_ARM_THM_CALL:
 		case R_ARM_THM_JUMP24:
-			upper = *(u16 *)loc;
-			lower = *(u16 *)(loc + 2);
+			upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+			lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
 
 			/*
 			 * 25 bit signed address range (Thumb-2 BL and B.W
@@ -198,17 +204,20 @@
 			sign = (offset >> 24) & 1;
 			j1 = sign ^ (~(offset >> 23) & 1);
 			j2 = sign ^ (~(offset >> 22) & 1);
-			*(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+			upper = (u16)((upper & 0xf800) | (sign << 10) |
 					    ((offset >> 12) & 0x03ff));
-			*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
-						  (j1 << 13) | (j2 << 11) |
-						  ((offset >> 1) & 0x07ff));
+			lower = (u16)((lower & 0xd000) |
+				      (j1 << 13) | (j2 << 11) |
+				      ((offset >> 1) & 0x07ff));
+
+			*(u16 *)loc = __opcode_to_mem_thumb16(upper);
+			*(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
 			break;
 
 		case R_ARM_THM_MOVW_ABS_NC:
 		case R_ARM_THM_MOVT_ABS:
-			upper = *(u16 *)loc;
-			lower = *(u16 *)(loc + 2);
+			upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+			lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
 
 			/*
 			 * MOVT/MOVW instructions encoding in Thumb-2:
@@ -229,12 +238,14 @@
 			if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
 				offset >>= 16;
 
-			*(u16 *)loc = (u16)((upper & 0xfbf0) |
-					    ((offset & 0xf000) >> 12) |
-					    ((offset & 0x0800) >> 1));
-			*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
-						  ((offset & 0x0700) << 4) |
-						  (offset & 0x00ff));
+			upper = (u16)((upper & 0xfbf0) |
+				      ((offset & 0xf000) >> 12) |
+				      ((offset & 0x0800) >> 1));
+			lower = (u16)((lower & 0x8f00) |
+				      ((offset & 0x0700) << 4) |
+				      (offset & 0x00ff));
+			*(u16 *)loc = __opcode_to_mem_thumb16(upper);
+			*(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
 			break;
 #endif
 
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e186ee1..bc3f2ef 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -256,12 +256,11 @@
 	       struct perf_event *event)
 {
 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct pmu *leader_pmu = event->group_leader->pmu;
 
 	if (is_software_event(event))
 		return 1;
 
-	if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+	if (event->state < PERF_EVENT_STATE_OFF)
 		return 1;
 
 	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 8d6147b..d85055c 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -68,7 +68,7 @@
 
 static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
 {
-	return &__get_cpu_var(cpu_hw_events);
+	return this_cpu_ptr(&cpu_hw_events);
 }
 
 static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
new file mode 100644
index 0000000..6e4379c
--- /dev/null
+++ b/arch/arm/kernel/perf_regs.c
@@ -0,0 +1,30 @@
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+	if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX))
+		return 0;
+
+	return regs->uregs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+	if (!mask || mask & REG_RESERVED)
+		return -EINVAL;
+
+	return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+	return PERF_SAMPLE_REGS_ABI_32;
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 5d65438..6a1b8a8 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -73,6 +73,8 @@
 #endif
 
 extern void paging_init(const struct machine_desc *desc);
+extern void early_paging_init(const struct machine_desc *,
+			      struct proc_info_list *);
 extern void sanity_check_meminfo(void);
 extern enum reboot_mode reboot_mode;
 extern void setup_dma_zone(const struct machine_desc *desc);
@@ -599,6 +601,8 @@
 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 #endif
 
+	erratum_a15_798181_init();
+
 	feat_v6_fixup();
 
 	cacheid_init();
@@ -619,9 +623,10 @@
 		/* can't use cpu_relax() here as it may require MMU setup */;
 }
 
-int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
+int __init arm_add_memory(u64 start, u64 size)
 {
 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+	u64 aligned_start;
 
 	if (meminfo.nr_banks >= NR_BANKS) {
 		printk(KERN_CRIT "NR_BANKS too low, "
@@ -634,10 +639,16 @@
 	 * Size is appropriately rounded down, start is rounded up.
 	 */
 	size -= start & ~PAGE_MASK;
-	bank->start = PAGE_ALIGN(start);
+	aligned_start = PAGE_ALIGN(start);
 
-#ifndef CONFIG_ARM_LPAE
-	if (bank->start + size < bank->start) {
+#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
+	if (aligned_start > ULONG_MAX) {
+		printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
+		       "32-bit physical address space\n", (long long)start);
+		return -EINVAL;
+	}
+
+	if (aligned_start + size > ULONG_MAX) {
 		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
 			"32-bit physical address space\n", (long long)start);
 		/*
@@ -645,10 +656,11 @@
 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
 		 * This means we lose a page after masking.
 		 */
-		size = ULONG_MAX - bank->start;
+		size = ULONG_MAX - aligned_start;
 	}
 #endif
 
+	bank->start = aligned_start;
 	bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
 
 	/*
@@ -669,8 +681,8 @@
 static int __init early_mem(char *p)
 {
 	static int usermem __initdata = 0;
-	phys_addr_t size;
-	phys_addr_t start;
+	u64 size;
+	u64 start;
 	char *endp;
 
 	/*
@@ -878,6 +890,8 @@
 	parse_early_param();
 
 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
+	early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
 	sanity_check_meminfo();
 	arm_memblock_init(&meminfo, mdesc);
 
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index ab33042..04d6388 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -21,29 +21,7 @@
 #include <asm/unistd.h>
 #include <asm/vfp.h>
 
-/*
- * For ARM syscalls, we encode the syscall number into the instruction.
- */
-#define SWI_SYS_SIGRETURN	(0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RT_SIGRETURN	(0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-
-/*
- * With EABI, the syscall number has to be loaded into r7.
- */
-#define MOV_R7_NR_SIGRETURN	(0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define MOV_R7_NR_RT_SIGRETURN	(0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-/*
- * For Thumb syscalls, we pass the syscall number via r7.  We therefore
- * need two 16-bit instructions.
- */
-#define SWI_THUMB_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define SWI_THUMB_RT_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-static const unsigned long sigreturn_codes[7] = {
-	MOV_R7_NR_SIGRETURN,    SWI_SYS_SIGRETURN,    SWI_THUMB_SIGRETURN,
-	MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
-};
+extern const unsigned long sigreturn_codes[7];
 
 static unsigned long signal_return_offset;
 
@@ -375,12 +353,18 @@
 		 */
 		thumb = handler & 1;
 
+#if __LINUX_ARM_ARCH__ >= 7
+		/*
+		 * Clear the If-Then Thumb-2 execution state
+		 * ARM spec requires this to be all 000s in ARM mode
+		 * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
+		 * signal transition without this.
+		 */
+		cpsr &= ~PSR_IT_MASK;
+#endif
+
 		if (thumb) {
 			cpsr |= PSR_T_BIT;
-#if __LINUX_ARM_ARCH__ >= 7
-			/* clear the If-Then Thumb-2 execution state */
-			cpsr &= ~PSR_IT_MASK;
-#endif
 		} else
 			cpsr &= ~PSR_T_BIT;
 	}
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
new file mode 100644
index 0000000..3c5d0f2
--- /dev/null
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -0,0 +1,80 @@
+/*
+ * sigreturn_codes.S - code sinpets for sigreturn syscalls
+ *
+ * Created by:	Victor Kamensky, 2013-08-13
+ * Copyright:	(C) 2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/unistd.h>
+
+/*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ * With EABI, the syscall number has to be loaded into r7. As result
+ * ARM syscall sequence snippet will have move and svc in .arm encoding
+ *
+ * For Thumb syscalls, we pass the syscall number via r7.  We therefore
+ * need two 16-bit instructions in .thumb encoding
+ *
+ * Please note sigreturn_codes code are not executed in place. Instead
+ * they just copied by kernel into appropriate places. Code inside of
+ * arch/arm/kernel/signal.c is very sensitive to layout of these code
+ * snippets.
+ */
+
+#if __LINUX_ARM_ARCH__ <= 4
+	/*
+	 * Note we manually set minimally required arch that supports
+	 * required thumb opcodes for early arch versions. It is OK
+	 * for this file to be used in combination with other
+	 * lower arch variants, since these code snippets are only
+	 * used as input data.
+	 */
+	.arch armv4t
+#endif
+
+	.section .rodata
+	.global sigreturn_codes
+	.type	sigreturn_codes, #object
+
+	.arm
+
+sigreturn_codes:
+
+	/* ARM sigreturn syscall code snippet */
+	mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+	swi	#(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+	/* Thumb sigreturn syscall code snippet */
+	.thumb
+	movs	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+	swi	#0
+
+	/* ARM sigreturn_rt syscall code snippet */
+	.arm
+	mov	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+	swi	#(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+	/* Thumb sigreturn_rt syscall code snippet */
+	.thumb
+	movs	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+	swi	#0
+
+	/*
+	 * Note on addtional space: setup_return in signal.c
+	 * algorithm uses two words copy regardless whether
+	 * it is thumb case or not, so we need additional
+	 * word after real last entry.
+	 */
+	.arm
+	.space	4
+
+	.size	sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index db1536b..b907d9b 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -55,6 +55,7 @@
  * specific registers and some other data for resume.
  *  r0 = suspend function arg0
  *  r1 = suspend function
+ *  r2 = MPIDR value the resuming CPU will use
  */
 ENTRY(__cpu_suspend)
 	stmfd	sp!, {r4 - r11, lr}
@@ -67,23 +68,18 @@
 	mov	r5, sp			@ current virtual SP
 	add	r4, r4, #12		@ Space for pgd, virt sp, phys resume fn
 	sub	sp, sp, r4		@ allocate CPU state on stack
-	stmfd	sp!, {r0, r1}		@ save suspend func arg and pointer
-	add	r0, sp, #8		@ save pointer to save block
-	mov	r1, r4			@ size of save block
-	mov	r2, r5			@ virtual SP
 	ldr	r3, =sleep_save_sp
+	stmfd	sp!, {r0, r1}		@ save suspend func arg and pointer
 	ldr	r3, [r3, #SLEEP_SAVE_SP_VIRT]
-	ALT_SMP(mrc p15, 0, r9, c0, c0, 5)
-        ALT_UP_B(1f)
-	ldr	r8, =mpidr_hash
-	/*
-	 * This ldmia relies on the memory layout of the mpidr_hash
-	 * struct mpidr_hash.
-	 */
-	ldmia	r8, {r4-r7}	@ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts
-	compute_mpidr_hash	lr, r5, r6, r7, r9, r4
-	add	r3, r3, lr, lsl #2
-1:
+	ALT_SMP(ldr r0, =mpidr_hash)
+	ALT_UP_B(1f)
+	/* This ldmia relies on the memory layout of the mpidr_hash struct */
+	ldmia	r0, {r1, r6-r8}	@ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
+	compute_mpidr_hash	r0, r6, r7, r8, r2, r1
+	add	r3, r3, r0, lsl #2
+1:	mov	r2, r5			@ virtual SP
+	mov	r1, r4			@ size of save block
+	add	r0, sp, #8		@ pointer to save block
 	bl	__cpu_suspend_save
 	adr	lr, BSYM(cpu_suspend_abort)
 	ldmfd	sp!, {r0, pc}		@ call suspend fn
@@ -130,6 +126,7 @@
 	.data
 	.align
 ENTRY(cpu_resume)
+ARM_BE8(setend be)			@ ensure we are in BE mode
 	mov	r1, #0
 	ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
 	ALT_UP_B(1f)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 72024ea..dc894ab 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
 #include <linux/clockchips.h>
 #include <linux/completion.h>
 #include <linux/cpufreq.h>
+#include <linux/irq_work.h>
 
 #include <linux/atomic.h>
 #include <asm/smp.h>
@@ -66,6 +67,8 @@
 	IPI_CALL_FUNC,
 	IPI_CALL_FUNC_SINGLE,
 	IPI_CPU_STOP,
+	IPI_IRQ_WORK,
+	IPI_COMPLETION,
 };
 
 static DECLARE_COMPLETION(cpu_running);
@@ -80,7 +83,7 @@
 
 static unsigned long get_arch_pgd(pgd_t *pgd)
 {
-	phys_addr_t pgdir = virt_to_phys(pgd);
+	phys_addr_t pgdir = virt_to_idmap(pgd);
 	BUG_ON(pgdir & ARCH_PGD_MASK);
 	return pgdir >> ARCH_PGD_SHIFT;
 }
@@ -448,6 +451,14 @@
 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
 }
 
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+	if (is_smp())
+		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
 static const char *ipi_types[NR_IPI] = {
 #define S(x,s)	[x] = s
 	S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -456,6 +467,8 @@
 	S(IPI_CALL_FUNC, "Function call interrupts"),
 	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
 	S(IPI_CPU_STOP, "CPU stop interrupts"),
+	S(IPI_IRQ_WORK, "IRQ work interrupts"),
+	S(IPI_COMPLETION, "completion interrupts"),
 };
 
 void show_ipi_list(struct seq_file *p, int prec)
@@ -515,6 +528,19 @@
 		cpu_relax();
 }
 
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
+{
+	per_cpu(cpu_completion, cpu) = completion;
+	return IPI_COMPLETION;
+}
+
+static void ipi_complete(unsigned int cpu)
+{
+	complete(per_cpu(cpu_completion, cpu));
+}
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -565,6 +591,20 @@
 		irq_exit();
 		break;
 
+#ifdef CONFIG_IRQ_WORK
+	case IPI_IRQ_WORK:
+		irq_enter();
+		irq_work_run();
+		irq_exit();
+		break;
+#endif
+
+	case IPI_COMPLETION:
+		irq_enter();
+		ipi_complete(cpu);
+		irq_exit();
+		break;
+
 	default:
 		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
 		       cpu, ipinr);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 5bc1a63..1aafa0d 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -28,7 +28,7 @@
  */
 unsigned int __init scu_get_core_count(void __iomem *scu_base)
 {
-	unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
+	unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG);
 	return (ncores & 0x03) + 1;
 }
 
@@ -42,19 +42,19 @@
 #ifdef CONFIG_ARM_ERRATA_764369
 	/* Cortex-A9 only */
 	if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
-		scu_ctrl = __raw_readl(scu_base + 0x30);
+		scu_ctrl = readl_relaxed(scu_base + 0x30);
 		if (!(scu_ctrl & 1))
-			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+			writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
 	}
 #endif
 
-	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
+	scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
 	/* already enabled? */
 	if (scu_ctrl & 1)
 		return;
 
 	scu_ctrl |= 1;
-	__raw_writel(scu_ctrl, scu_base + SCU_CTRL);
+	writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
 
 	/*
 	 * Ensure that the data accessed by CPU0 before the SCU was
@@ -80,9 +80,9 @@
 	if (mode > 3 || mode == 1 || cpu > 3)
 		return -EINVAL;
 
-	val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+	val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
 	val |= mode;
-	__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+	writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
 
 	return 0;
 }
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 83ccca3..95d06362 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,6 +70,40 @@
 	local_flush_bp_all();
 }
 
+#ifdef CONFIG_ARM_ERRATA_798181
+bool (*erratum_a15_798181_handler)(void);
+
+static bool erratum_a15_798181_partial(void)
+{
+	asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+	dsb(ish);
+	return false;
+}
+
+static bool erratum_a15_798181_broadcast(void)
+{
+	asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+	dsb(ish);
+	return true;
+}
+
+void erratum_a15_798181_init(void)
+{
+	unsigned int midr = read_cpuid_id();
+	unsigned int revidr = read_cpuid(CPUID_REVIDR);
+
+	/* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
+	if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
+	    (revidr & 0x210) == 0x210) {
+		return;
+	}
+	if (revidr & 0x10)
+		erratum_a15_798181_handler = erratum_a15_798181_partial;
+	else
+		erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+}
+#endif
+
 static void ipi_flush_tlb_a15_erratum(void *arg)
 {
 	dmb();
@@ -80,7 +114,6 @@
 	if (!erratum_a15_798181())
 		return;
 
-	dummy_flush_tlb_a15_erratum();
 	smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
 }
 
@@ -92,7 +125,6 @@
 	if (!erratum_a15_798181())
 		return;
 
-	dummy_flush_tlb_a15_erratum();
 	this_cpu = get_cpu();
 	a15_erratum_get_cpumask(this_cpu, mm, &mask);
 	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2985c9f..6591e26 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -45,7 +45,7 @@
 	case CLOCK_EVT_MODE_PERIODIC:
 		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
 			| TWD_TIMER_CONTROL_PERIODIC;
-		__raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+		writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
 			twd_base + TWD_TIMER_LOAD);
 		break;
 	case CLOCK_EVT_MODE_ONESHOT:
@@ -58,18 +58,18 @@
 		ctrl = 0;
 	}
 
-	__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+	writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
 }
 
 static int twd_set_next_event(unsigned long evt,
 			struct clock_event_device *unused)
 {
-	unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+	unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
 
 	ctrl |= TWD_TIMER_CONTROL_ENABLE;
 
-	__raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
-	__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+	writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER);
+	writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
 
 	return 0;
 }
@@ -82,8 +82,8 @@
  */
 static int twd_timer_ack(void)
 {
-	if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
-		__raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
+	if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) {
+		writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
 		return 1;
 	}
 
@@ -211,15 +211,15 @@
 		waitjiffies += 5;
 
 				 /* enable, no interrupt or reload */
-		__raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
+		writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL);
 
 				 /* maximum value */
-		__raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
+		writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
 
 		while (get_jiffies_64() < waitjiffies)
 			udelay(10);
 
-		count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
+		count = readl_relaxed(twd_base + TWD_TIMER_COUNTER);
 
 		twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
 
@@ -277,7 +277,7 @@
 	 * bother with the below.
 	 */
 	if (per_cpu(percpu_setup_called, cpu)) {
-		__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+		writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
 		clockevents_register_device(clk);
 		enable_percpu_irq(clk->irq, 0);
 		return;
@@ -290,7 +290,7 @@
 	 * The following is done once per CPU the first time .setup() is
 	 * called.
 	 */
-	__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+	writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
 
 	clk->name = "local_timer";
 	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 41cf3cb..2835d35 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -10,7 +10,7 @@
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
+extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
 extern void cpu_resume_mmu(void);
 
 #ifdef CONFIG_MMU
@@ -21,6 +21,7 @@
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
 	struct mm_struct *mm = current->active_mm;
+	u32 __mpidr = cpu_logical_map(smp_processor_id());
 	int ret;
 
 	if (!idmap_pgd)
@@ -32,7 +33,7 @@
 	 * resume (indicated by a zero return code), we need to switch
 	 * back to the correct page tables.
 	 */
-	ret = __cpu_suspend(arg, fn);
+	ret = __cpu_suspend(arg, fn, __mpidr);
 	if (ret == 0) {
 		cpu_switch_mm(mm->pgd, mm);
 		local_flush_bp_all();
@@ -44,7 +45,8 @@
 #else
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-	return __cpu_suspend(arg, fn);
+	u32 __mpidr = cpu_logical_map(smp_processor_id());
+	return __cpu_suspend(arg, fn, __mpidr);
 }
 #define	idmap_pgd	NULL
 #endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 8fcda14..6125f25 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -34,6 +34,7 @@
 #include <asm/unwind.h>
 #include <asm/tls.h>
 #include <asm/system_misc.h>
+#include <asm/opcodes.h>
 
 static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
 
@@ -341,15 +342,17 @@
 int is_valid_bugaddr(unsigned long pc)
 {
 #ifdef CONFIG_THUMB2_KERNEL
-	unsigned short bkpt;
+	u16 bkpt;
+	u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
 #else
-	unsigned long bkpt;
+	u32 bkpt;
+	u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 #endif
 
 	if (probe_kernel_address((unsigned *)pc, bkpt))
 		return 0;
 
-	return bkpt == BUG_INSTR_VALUE;
+	return bkpt == insn;
 }
 
 #endif
@@ -402,25 +405,28 @@
 	if (processor_mode(regs) == SVC_MODE) {
 #ifdef CONFIG_THUMB2_KERNEL
 		if (thumb_mode(regs)) {
-			instr = ((u16 *)pc)[0];
+			instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
 			if (is_wide_instruction(instr)) {
-				instr <<= 16;
-				instr |= ((u16 *)pc)[1];
+				u16 inst2;
+				inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
+				instr = __opcode_thumb32_compose(instr, inst2);
 			}
 		} else
 #endif
-			instr = *(u32 *) pc;
+			instr = __mem_to_opcode_arm(*(u32 *) pc);
 	} else if (thumb_mode(regs)) {
 		if (get_user(instr, (u16 __user *)pc))
 			goto die_sig;
+		instr = __mem_to_opcode_thumb16(instr);
 		if (is_wide_instruction(instr)) {
 			unsigned int instr2;
 			if (get_user(instr2, (u16 __user *)pc+1))
 				goto die_sig;
-			instr <<= 16;
-			instr |= instr2;
+			instr2 = __mem_to_opcode_thumb16(instr2);
+			instr = __opcode_thumb32_compose(instr, instr2);
 		}
 	} else if (get_user(instr, (u32 __user *)pc)) {
+		instr = __mem_to_opcode_arm(instr);
 		goto die_sig;
 	}
 
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index ebf5015..466bd29 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -20,6 +20,7 @@
 	bool "Kernel-based Virtual Machine (KVM) support"
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
+	select HAVE_KVM_CPU_RELAX_INTERCEPT
 	select KVM_MMIO
 	select KVM_ARM_HOST
 	depends on ARM_VIRT_EXT && ARM_LPAE
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index d99bee4..789bca9 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -19,6 +19,6 @@
 
 obj-y += kvm-arm.o init.o interrupts.o
 obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o
+obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
 obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
 obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9c697db..2a700e0 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -65,7 +65,7 @@
 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
 {
 	BUG_ON(preemptible());
-	__get_cpu_var(kvm_arm_running_vcpu) = vcpu;
+	__this_cpu_write(kvm_arm_running_vcpu, vcpu);
 }
 
 /**
@@ -75,7 +75,7 @@
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
 {
 	BUG_ON(preemptible());
-	return __get_cpu_var(kvm_arm_running_vcpu);
+	return __this_cpu_read(kvm_arm_running_vcpu);
 }
 
 /**
@@ -152,12 +152,13 @@
 	return VM_FAULT_SIGBUS;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 			   struct kvm_memory_slot *dont)
 {
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			    unsigned long npages)
 {
 	return 0;
 }
@@ -797,6 +798,19 @@
 			return -EFAULT;
 		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
 	}
+	case KVM_ARM_PREFERRED_TARGET: {
+		int err;
+		struct kvm_vcpu_init init;
+
+		err = kvm_vcpu_preferred_target(&init);
+		if (err)
+			return err;
+
+		if (copy_to_user(argp, &init, sizeof(init)))
+			return -EFAULT;
+
+		return 0;
+	}
 	default:
 		return -EINVAL;
 	}
@@ -815,7 +829,7 @@
 
 	boot_pgd_ptr = kvm_mmu_get_boot_httbr();
 	pgd_ptr = kvm_mmu_get_httbr();
-	stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
+	stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
 	hyp_stack_ptr = stack_page + PAGE_SIZE;
 	vector_ptr = (unsigned long)__kvm_hyp_vector;
 
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index db9cf69..78c0885 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -71,6 +71,98 @@
 	return 1;
 }
 
+static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+	/*
+	 * Compute guest MPIDR. We build a virtual cluster out of the
+	 * vcpu_id, but we read the 'U' bit from the underlying
+	 * hardware directly.
+	 */
+	vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
+				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
+				     (vcpu->vcpu_id & 3));
+}
+
+/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
+static bool access_actlr(struct kvm_vcpu *vcpu,
+			 const struct coproc_params *p,
+			 const struct coproc_reg *r)
+{
+	if (p->is_write)
+		return ignore_write(vcpu, p);
+
+	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
+	return true;
+}
+
+/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
+static bool access_cbar(struct kvm_vcpu *vcpu,
+			const struct coproc_params *p,
+			const struct coproc_reg *r)
+{
+	if (p->is_write)
+		return write_to_read_only(vcpu, p);
+	return read_zero(vcpu, p);
+}
+
+/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
+static bool access_l2ctlr(struct kvm_vcpu *vcpu,
+			  const struct coproc_params *p,
+			  const struct coproc_reg *r)
+{
+	if (p->is_write)
+		return ignore_write(vcpu, p);
+
+	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
+	return true;
+}
+
+static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+	u32 l2ctlr, ncores;
+
+	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+	l2ctlr &= ~(3 << 24);
+	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
+	/* How many cores in the current cluster and the next ones */
+	ncores -= (vcpu->vcpu_id & ~3);
+	/* Cap it to the maximum number of cores in a single cluster */
+	ncores = min(ncores, 3U);
+	l2ctlr |= (ncores & 3) << 24;
+
+	vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+}
+
+static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+	u32 actlr;
+
+	/* ACTLR contains SMP bit: make sure you create all cpus first! */
+	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
+	/* Make the SMP bit consistent with the guest configuration */
+	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
+		actlr |= 1U << 6;
+	else
+		actlr &= ~(1U << 6);
+
+	vcpu->arch.cp15[c1_ACTLR] = actlr;
+}
+
+/*
+ * TRM entries: A7:4.3.50, A15:4.3.49
+ * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
+ */
+static bool access_l2ectlr(struct kvm_vcpu *vcpu,
+			   const struct coproc_params *p,
+			   const struct coproc_reg *r)
+{
+	if (p->is_write)
+		return ignore_write(vcpu, p);
+
+	*vcpu_reg(vcpu, p->Rt1) = 0;
+	return true;
+}
+
 /* See note at ARM ARM B1.14.4 */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
 			const struct coproc_params *p,
@@ -153,10 +245,22 @@
  *            registers preceding 32-bit ones.
  */
 static const struct coproc_reg cp15_regs[] = {
+	/* MPIDR: we use VMPIDR for guest access. */
+	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
+			NULL, reset_mpidr, c0_MPIDR },
+
 	/* CSSELR: swapped by interrupt.S. */
 	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 			NULL, reset_unknown, c0_CSSELR },
 
+	/* ACTLR: trapped by HCR.TAC bit. */
+	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
+			access_actlr, reset_actlr, c1_ACTLR },
+
+	/* CPACR: swapped by interrupt.S. */
+	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
+			NULL, reset_val, c1_CPACR, 0x00000000 },
+
 	/* TTBR0/TTBR1: swapped by interrupt.S. */
 	{ CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
 	{ CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
@@ -195,6 +299,13 @@
 	{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 	{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 	/*
+	 * L2CTLR access (guest wants to know #CPUs).
+	 */
+	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
+			access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
+	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
+
+	/*
 	 * Dummy performance monitor implementation.
 	 */
 	{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
@@ -234,6 +345,9 @@
 	/* CNTKCTL: swapped by interrupt.S. */
 	{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 			NULL, reset_val, c14_CNTKCTL, 0x00000000 },
+
+	/* The Configuration Base Address Register. */
+	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 };
 
 /* Target specific emulation tables */
@@ -241,6 +355,12 @@
 
 void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 {
+	unsigned int i;
+
+	for (i = 1; i < table->num; i++)
+		BUG_ON(cmp_reg(&table->table[i-1],
+			       &table->table[i]) >= 0);
+
 	target_tables[table->target] = table;
 }
 
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index cf93472..bb0cac1 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -17,101 +17,12 @@
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 #include <linux/kvm_host.h>
-#include <asm/cputype.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_host.h>
-#include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
 #include <linux/init.h>
 
-static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
-	/*
-	 * Compute guest MPIDR:
-	 * (Even if we present only one VCPU to the guest on an SMP
-	 * host we don't set the U bit in the MPIDR, or vice versa, as
-	 * revealing the underlying hardware properties is likely to
-	 * be the best choice).
-	 */
-	vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
-		| (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
-}
-
 #include "coproc.h"
 
-/* A15 TRM 4.3.28: RO WI */
-static bool access_actlr(struct kvm_vcpu *vcpu,
-			 const struct coproc_params *p,
-			 const struct coproc_reg *r)
-{
-	if (p->is_write)
-		return ignore_write(vcpu, p);
-
-	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
-	return true;
-}
-
-/* A15 TRM 4.3.60: R/O. */
-static bool access_cbar(struct kvm_vcpu *vcpu,
-			const struct coproc_params *p,
-			const struct coproc_reg *r)
-{
-	if (p->is_write)
-		return write_to_read_only(vcpu, p);
-	return read_zero(vcpu, p);
-}
-
-/* A15 TRM 4.3.48: R/O WI. */
-static bool access_l2ctlr(struct kvm_vcpu *vcpu,
-			  const struct coproc_params *p,
-			  const struct coproc_reg *r)
-{
-	if (p->is_write)
-		return ignore_write(vcpu, p);
-
-	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
-	return true;
-}
-
-static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
-	u32 l2ctlr, ncores;
-
-	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
-	l2ctlr &= ~(3 << 24);
-	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
-	l2ctlr |= (ncores & 3) << 24;
-
-	vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
-}
-
-static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
-	u32 actlr;
-
-	/* ACTLR contains SMP bit: make sure you create all cpus first! */
-	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
-	/* Make the SMP bit consistent with the guest configuration */
-	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
-		actlr |= 1U << 6;
-	else
-		actlr &= ~(1U << 6);
-
-	vcpu->arch.cp15[c1_ACTLR] = actlr;
-}
-
-/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
-static bool access_l2ectlr(struct kvm_vcpu *vcpu,
-			   const struct coproc_params *p,
-			   const struct coproc_reg *r)
-{
-	if (p->is_write)
-		return ignore_write(vcpu, p);
-
-	*vcpu_reg(vcpu, p->Rt1) = 0;
-	return true;
-}
-
 /*
  * A15-specific CP15 registers.
  * CRn denotes the primary register number, but is copied to the CRm in the
@@ -121,29 +32,9 @@
  *            registers preceding 32-bit ones.
  */
 static const struct coproc_reg a15_regs[] = {
-	/* MPIDR: we use VMPIDR for guest access. */
-	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
-			NULL, reset_mpidr, c0_MPIDR },
-
 	/* SCTLR: swapped by interrupt.S. */
 	{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
 			NULL, reset_val, c1_SCTLR, 0x00C50078 },
-	/* ACTLR: trapped by HCR.TAC bit. */
-	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
-			access_actlr, reset_actlr, c1_ACTLR },
-	/* CPACR: swapped by interrupt.S. */
-	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
-			NULL, reset_val, c1_CPACR, 0x00000000 },
-
-	/*
-	 * L2CTLR access (guest wants to know #CPUs).
-	 */
-	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
-			access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
-	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
-
-	/* The Configuration Base Address Register. */
-	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 };
 
 static struct kvm_coproc_target_table a15_target_table = {
@@ -154,12 +45,6 @@
 
 static int __init coproc_a15_init(void)
 {
-	unsigned int i;
-
-	for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
-		BUG_ON(cmp_reg(&a15_regs[i-1],
-			       &a15_regs[i]) >= 0);
-
 	kvm_register_target_coproc_table(&a15_target_table);
 	return 0;
 }
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
new file mode 100644
index 0000000..1df7673
--- /dev/null
+++ b/arch/arm/kvm/coproc_a7.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Copyright (C) 2013 - ARM Ltd
+ *
+ * Authors: Rusty Russell <rusty@rustcorp.au>
+ *          Christoffer Dall <c.dall@virtualopensystems.com>
+ *          Jonathan Austin <jonathan.austin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/kvm_host.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
+#include <linux/init.h>
+
+#include "coproc.h"
+
+/*
+ * Cortex-A7 specific CP15 registers.
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ *            registers preceding 32-bit ones.
+ */
+static const struct coproc_reg a7_regs[] = {
+	/* SCTLR: swapped by interrupt.S. */
+	{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+			NULL, reset_val, c1_SCTLR, 0x00C50878 },
+};
+
+static struct kvm_coproc_target_table a7_target_table = {
+	.target = KVM_ARM_TARGET_CORTEX_A7,
+	.table = a7_regs,
+	.num = ARRAY_SIZE(a7_regs),
+};
+
+static int __init coproc_a7_init(void)
+{
+	kvm_register_target_coproc_table(&a7_target_table);
+	return 0;
+}
+late_initcall(coproc_a7_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index bdede9e..d6c0052 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -354,7 +354,7 @@
 	*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
 
 	if (is_pabt) {
-		/* Set DFAR and DFSR */
+		/* Set IFAR and IFSR */
 		vcpu->arch.cp15[c6_IFAR] = addr;
 		is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
 		/* Always give debug fault for now - should give guest a clue */
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 152d036..20f8d97 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -190,6 +190,8 @@
 		return -EINVAL;
 
 	switch (part_number) {
+	case ARM_CPU_PART_CORTEX_A7:
+		return KVM_ARM_TARGET_CORTEX_A7;
 	case ARM_CPU_PART_CORTEX_A15:
 		return KVM_ARM_TARGET_CORTEX_A15;
 	default:
@@ -202,7 +204,7 @@
 {
 	unsigned int i;
 
-	/* We can only do a cortex A15 for now. */
+	/* We can only cope with guest==host and only on A15/A7 (for now). */
 	if (init->target != kvm_target_cpu())
 		return -EINVAL;
 
@@ -222,6 +224,26 @@
 	return kvm_reset_vcpu(vcpu);
 }
 
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+{
+	int target = kvm_target_cpu();
+
+	if (target < 0)
+		return -ENODEV;
+
+	memset(init, 0, sizeof(*init));
+
+	/*
+	 * For now, we don't return any features.
+	 * In future, we might use features to return target
+	 * specific features available for the preferred
+	 * target type.
+	 */
+	init->target = (__u32)target;
+
+	return 0;
+}
+
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 	return -EINVAL;
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index df4c82d..a920790 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -73,23 +73,29 @@
 }
 
 /**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
  * @vcpu:	the vcpu pointer
  * @run:	the kvm_run structure pointer
  *
- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
- * halt execution of world-switches and schedule other host processes until
- * there is an incoming IRQ or FIQ to the VM.
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * world-switches and schedule other host processes until there is an
+ * incoming IRQ or FIQ to the VM.
  */
-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 	trace_kvm_wfi(*vcpu_pc(vcpu));
-	kvm_vcpu_block(vcpu);
+	if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
+		kvm_vcpu_on_spin(vcpu);
+	else
+		kvm_vcpu_block(vcpu);
+
 	return 1;
 }
 
 static exit_handle_fn arm_exit_handlers[] = {
-	[HSR_EC_WFI]		= kvm_handle_wfi,
+	[HSR_EC_WFI]		= kvm_handle_wfx,
 	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
 	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
 	[HSR_EC_CP14_MR]	= kvm_handle_cp14_access,
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 0c25d94..4cb5a93 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -23,6 +23,68 @@
 
 #include "trace.h"
 
+static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
+{
+	void *datap = NULL;
+	union {
+		u8	byte;
+		u16	hword;
+		u32	word;
+		u64	dword;
+	} tmp;
+
+	switch (len) {
+	case 1:
+		tmp.byte	= data;
+		datap		= &tmp.byte;
+		break;
+	case 2:
+		tmp.hword	= data;
+		datap		= &tmp.hword;
+		break;
+	case 4:
+		tmp.word	= data;
+		datap		= &tmp.word;
+		break;
+	case 8:
+		tmp.dword	= data;
+		datap		= &tmp.dword;
+		break;
+	}
+
+	memcpy(buf, datap, len);
+}
+
+static unsigned long mmio_read_buf(char *buf, unsigned int len)
+{
+	unsigned long data = 0;
+	union {
+		u16	hword;
+		u32	word;
+		u64	dword;
+	} tmp;
+
+	switch (len) {
+	case 1:
+		data = buf[0];
+		break;
+	case 2:
+		memcpy(&tmp.hword, buf, len);
+		data = tmp.hword;
+		break;
+	case 4:
+		memcpy(&tmp.word, buf, len);
+		data = tmp.word;
+		break;
+	case 8:
+		memcpy(&tmp.dword, buf, len);
+		data = tmp.dword;
+		break;
+	}
+
+	return data;
+}
+
 /**
  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
  * @vcpu: The VCPU pointer
@@ -33,28 +95,27 @@
  */
 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	unsigned long *dest;
+	unsigned long data;
 	unsigned int len;
 	int mask;
 
 	if (!run->mmio.is_write) {
-		dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
-		*dest = 0;
-
 		len = run->mmio.len;
 		if (len > sizeof(unsigned long))
 			return -EINVAL;
 
-		memcpy(dest, run->mmio.data, len);
-
-		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-				*((u64 *)run->mmio.data));
+		data = mmio_read_buf(run->mmio.data, len);
 
 		if (vcpu->arch.mmio_decode.sign_extend &&
 		    len < sizeof(unsigned long)) {
 			mask = 1U << ((len * 8) - 1);
-			*dest = (*dest ^ mask) - mask;
+			data = (data ^ mask) - mask;
 		}
+
+		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+			       data);
+		data = vcpu_data_host_to_guest(vcpu, data, len);
+		*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
 	}
 
 	return 0;
@@ -105,6 +166,7 @@
 		 phys_addr_t fault_ipa)
 {
 	struct kvm_exit_mmio mmio;
+	unsigned long data;
 	unsigned long rt;
 	int ret;
 
@@ -125,13 +187,15 @@
 	}
 
 	rt = vcpu->arch.mmio_decode.rt;
+	data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
+
 	trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
 					 KVM_TRACE_MMIO_READ_UNSATISFIED,
 			mmio.len, fault_ipa,
-			(mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
+			(mmio.is_write) ? data : 0);
 
 	if (mmio.is_write)
-		memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
+		mmio_write_buf(mmio.data, mmio.len, data);
 
 	if (vgic_handle_mmio(vcpu, run, &mmio))
 		return 1;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index b0de86b..3719583 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -19,6 +19,7 @@
 #include <linux/mman.h>
 #include <linux/kvm_host.h>
 #include <linux/io.h>
+#include <linux/hugetlb.h>
 #include <trace/events/kvm.h>
 #include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
@@ -41,6 +42,8 @@
 static unsigned long hyp_idmap_end;
 static phys_addr_t hyp_idmap_vector;
 
+#define kvm_pmd_huge(_x)	(pmd_huge(_x) || pmd_trans_huge(_x))
+
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
 	/*
@@ -93,19 +96,29 @@
 
 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
-	pmd_t *pmd_table = pmd_offset(pud, 0);
-	pud_clear(pud);
-	kvm_tlb_flush_vmid_ipa(kvm, addr);
-	pmd_free(NULL, pmd_table);
+	if (pud_huge(*pud)) {
+		pud_clear(pud);
+		kvm_tlb_flush_vmid_ipa(kvm, addr);
+	} else {
+		pmd_t *pmd_table = pmd_offset(pud, 0);
+		pud_clear(pud);
+		kvm_tlb_flush_vmid_ipa(kvm, addr);
+		pmd_free(NULL, pmd_table);
+	}
 	put_page(virt_to_page(pud));
 }
 
 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 {
-	pte_t *pte_table = pte_offset_kernel(pmd, 0);
-	pmd_clear(pmd);
-	kvm_tlb_flush_vmid_ipa(kvm, addr);
-	pte_free_kernel(NULL, pte_table);
+	if (kvm_pmd_huge(*pmd)) {
+		pmd_clear(pmd);
+		kvm_tlb_flush_vmid_ipa(kvm, addr);
+	} else {
+		pte_t *pte_table = pte_offset_kernel(pmd, 0);
+		pmd_clear(pmd);
+		kvm_tlb_flush_vmid_ipa(kvm, addr);
+		pte_free_kernel(NULL, pte_table);
+	}
 	put_page(virt_to_page(pmd));
 }
 
@@ -136,18 +149,32 @@
 			continue;
 		}
 
+		if (pud_huge(*pud)) {
+			/*
+			 * If we are dealing with a huge pud, just clear it and
+			 * move on.
+			 */
+			clear_pud_entry(kvm, pud, addr);
+			addr = pud_addr_end(addr, end);
+			continue;
+		}
+
 		pmd = pmd_offset(pud, addr);
 		if (pmd_none(*pmd)) {
 			addr = pmd_addr_end(addr, end);
 			continue;
 		}
 
-		pte = pte_offset_kernel(pmd, addr);
-		clear_pte_entry(kvm, pte, addr);
-		next = addr + PAGE_SIZE;
+		if (!kvm_pmd_huge(*pmd)) {
+			pte = pte_offset_kernel(pmd, addr);
+			clear_pte_entry(kvm, pte, addr);
+			next = addr + PAGE_SIZE;
+		}
 
-		/* If we emptied the pte, walk back up the ladder */
-		if (page_empty(pte)) {
+		/*
+		 * If the pmd entry is to be cleared, walk back up the ladder
+		 */
+		if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
 			clear_pmd_entry(kvm, pmd, addr);
 			next = pmd_addr_end(addr, end);
 			if (page_empty(pmd) && !page_empty(pud)) {
@@ -420,29 +447,71 @@
 	kvm->arch.pgd = NULL;
 }
 
-
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
-			  phys_addr_t addr, const pte_t *new_pte, bool iomap)
+static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+			     phys_addr_t addr)
 {
 	pgd_t *pgd;
 	pud_t *pud;
 	pmd_t *pmd;
-	pte_t *pte, old_pte;
 
-	/* Create 2nd stage page table mapping - Level 1 */
 	pgd = kvm->arch.pgd + pgd_index(addr);
 	pud = pud_offset(pgd, addr);
 	if (pud_none(*pud)) {
 		if (!cache)
-			return 0; /* ignore calls from kvm_set_spte_hva */
+			return NULL;
 		pmd = mmu_memory_cache_alloc(cache);
 		pud_populate(NULL, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
 
-	pmd = pmd_offset(pud, addr);
+	return pmd_offset(pud, addr);
+}
 
-	/* Create 2nd stage page table mapping - Level 2 */
+static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+			       *cache, phys_addr_t addr, const pmd_t *new_pmd)
+{
+	pmd_t *pmd, old_pmd;
+
+	pmd = stage2_get_pmd(kvm, cache, addr);
+	VM_BUG_ON(!pmd);
+
+	/*
+	 * Mapping in huge pages should only happen through a fault.  If a
+	 * page is merged into a transparent huge page, the individual
+	 * subpages of that huge page should be unmapped through MMU
+	 * notifiers before we get here.
+	 *
+	 * Merging of CompoundPages is not supported; they should become
+	 * splitting first, unmapped, merged, and mapped back in on-demand.
+	 */
+	VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+
+	old_pmd = *pmd;
+	kvm_set_pmd(pmd, *new_pmd);
+	if (pmd_present(old_pmd))
+		kvm_tlb_flush_vmid_ipa(kvm, addr);
+	else
+		get_page(virt_to_page(pmd));
+	return 0;
+}
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+			  phys_addr_t addr, const pte_t *new_pte, bool iomap)
+{
+	pmd_t *pmd;
+	pte_t *pte, old_pte;
+
+	/* Create stage-2 page table mapping - Level 1 */
+	pmd = stage2_get_pmd(kvm, cache, addr);
+	if (!pmd) {
+		/*
+		 * Ignore calls from kvm_set_spte_hva for unallocated
+		 * address ranges.
+		 */
+		return 0;
+	}
+
+	/* Create stage-2 page mappings - Level 2 */
 	if (pmd_none(*pmd)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
@@ -507,16 +576,60 @@
 	return ret;
 }
 
+static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
+{
+	pfn_t pfn = *pfnp;
+	gfn_t gfn = *ipap >> PAGE_SHIFT;
+
+	if (PageTransCompound(pfn_to_page(pfn))) {
+		unsigned long mask;
+		/*
+		 * The address we faulted on is backed by a transparent huge
+		 * page.  However, because we map the compound huge page and
+		 * not the individual tail page, we need to transfer the
+		 * refcount to the head page.  We have to be careful that the
+		 * THP doesn't start to split while we are adjusting the
+		 * refcounts.
+		 *
+		 * We are sure this doesn't happen, because mmu_notifier_retry
+		 * was successful and we are holding the mmu_lock, so if this
+		 * THP is trying to split, it will be blocked in the mmu
+		 * notifier before touching any of the pages, specifically
+		 * before being able to call __split_huge_page_refcount().
+		 *
+		 * We can therefore safely transfer the refcount from PG_tail
+		 * to PG_head and switch the pfn from a tail page to the head
+		 * page accordingly.
+		 */
+		mask = PTRS_PER_PMD - 1;
+		VM_BUG_ON((gfn & mask) != (pfn & mask));
+		if (pfn & mask) {
+			*ipap &= PMD_MASK;
+			kvm_release_pfn_clean(pfn);
+			pfn &= ~mask;
+			kvm_get_pfn(pfn);
+			*pfnp = pfn;
+		}
+
+		return true;
+	}
+
+	return false;
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-			  gfn_t gfn, struct kvm_memory_slot *memslot,
+			  struct kvm_memory_slot *memslot,
 			  unsigned long fault_status)
 {
-	pte_t new_pte;
-	pfn_t pfn;
 	int ret;
-	bool write_fault, writable;
+	bool write_fault, writable, hugetlb = false, force_pte = false;
 	unsigned long mmu_seq;
+	gfn_t gfn = fault_ipa >> PAGE_SHIFT;
+	unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
+	struct kvm *kvm = vcpu->kvm;
 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+	struct vm_area_struct *vma;
+	pfn_t pfn;
 
 	write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
 	if (fault_status == FSC_PERM && !write_fault) {
@@ -524,6 +637,26 @@
 		return -EFAULT;
 	}
 
+	/* Let's check if we will get back a huge page backed by hugetlbfs */
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma_intersection(current->mm, hva, hva + 1);
+	if (is_vm_hugetlb_page(vma)) {
+		hugetlb = true;
+		gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+	} else {
+		/*
+		 * Pages belonging to VMAs not aligned to the PMD mapping
+		 * granularity cannot be mapped using block descriptors even
+		 * if the pages belong to a THP for the process, because the
+		 * stage-2 block descriptor will cover more than a single THP
+		 * and we loose atomicity for unmapping, updates, and splits
+		 * of the THP or other pages in the stage-2 block range.
+		 */
+		if (vma->vm_start & ~PMD_MASK)
+			force_pte = true;
+	}
+	up_read(&current->mm->mmap_sem);
+
 	/* We need minimum second+third level pages */
 	ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
 	if (ret)
@@ -541,26 +674,40 @@
 	 */
 	smp_rmb();
 
-	pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
+	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
 	if (is_error_pfn(pfn))
 		return -EFAULT;
 
-	new_pte = pfn_pte(pfn, PAGE_S2);
-	coherent_icache_guest_page(vcpu->kvm, gfn);
-
-	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+	spin_lock(&kvm->mmu_lock);
+	if (mmu_notifier_retry(kvm, mmu_seq))
 		goto out_unlock;
-	if (writable) {
-		kvm_set_s2pte_writable(&new_pte);
-		kvm_set_pfn_dirty(pfn);
+	if (!hugetlb && !force_pte)
+		hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+
+	if (hugetlb) {
+		pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
+		new_pmd = pmd_mkhuge(new_pmd);
+		if (writable) {
+			kvm_set_s2pmd_writable(&new_pmd);
+			kvm_set_pfn_dirty(pfn);
+		}
+		coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
+		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+	} else {
+		pte_t new_pte = pfn_pte(pfn, PAGE_S2);
+		if (writable) {
+			kvm_set_s2pte_writable(&new_pte);
+			kvm_set_pfn_dirty(pfn);
+		}
+		coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
+		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
 	}
-	stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
+
 
 out_unlock:
-	spin_unlock(&vcpu->kvm->mmu_lock);
+	spin_unlock(&kvm->mmu_lock);
 	kvm_release_pfn_clean(pfn);
-	return 0;
+	return ret;
 }
 
 /**
@@ -629,7 +776,7 @@
 
 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
 
-	ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+	ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
 	if (ret == 0)
 		ret = 1;
 out_unlock:
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 86a693a..0881bf1 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -18,6 +18,7 @@
 #include <linux/kvm_host.h>
 #include <linux/wait.h>
 
+#include <asm/cputype.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_psci.h>
 
@@ -34,22 +35,30 @@
 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 {
 	struct kvm *kvm = source_vcpu->kvm;
-	struct kvm_vcpu *vcpu;
+	struct kvm_vcpu *vcpu = NULL, *tmp;
 	wait_queue_head_t *wq;
 	unsigned long cpu_id;
+	unsigned long mpidr;
 	phys_addr_t target_pc;
+	int i;
 
 	cpu_id = *vcpu_reg(source_vcpu, 1);
 	if (vcpu_mode_is_32bit(source_vcpu))
 		cpu_id &= ~((u32) 0);
 
-	if (cpu_id >= atomic_read(&kvm->online_vcpus))
+	kvm_for_each_vcpu(i, tmp, kvm) {
+		mpidr = kvm_vcpu_get_mpidr(tmp);
+		if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
+			vcpu = tmp;
+			break;
+		}
+	}
+
+	if (!vcpu)
 		return KVM_PSCI_RET_INVAL;
 
 	target_pc = *vcpu_reg(source_vcpu, 2);
 
-	vcpu = kvm_get_vcpu(kvm, cpu_id);
-
 	wq = kvm_arch_vcpu_wq(vcpu);
 	if (!waitqueue_active(wq))
 		return KVM_PSCI_RET_INVAL;
@@ -62,6 +71,10 @@
 		vcpu_set_thumb(vcpu);
 	}
 
+	/* Propagate caller endianness */
+	if (kvm_vcpu_is_be(source_vcpu))
+		kvm_vcpu_set_be(vcpu);
+
 	*vcpu_pc(vcpu) = target_pc;
 	vcpu->arch.pause = false;
 	smp_mb();		/* Make sure the above is visible */
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index c02ba4a..f558c07 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -30,16 +30,14 @@
 #include <kvm/arm_arch_timer.h>
 
 /******************************************************************************
- * Cortex-A15 Reset Values
+ * Cortex-A15 and Cortex-A7 Reset Values
  */
 
-static const int a15_max_cpu_idx = 3;
-
-static struct kvm_regs a15_regs_reset = {
+static struct kvm_regs cortexa_regs_reset = {
 	.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
 };
 
-static const struct kvm_irq_level a15_vtimer_irq = {
+static const struct kvm_irq_level cortexa_vtimer_irq = {
 	{ .irq = 27 },
 	.level = 1,
 };
@@ -62,12 +60,11 @@
 	const struct kvm_irq_level *cpu_vtimer_irq;
 
 	switch (vcpu->arch.target) {
+	case KVM_ARM_TARGET_CORTEX_A7:
 	case KVM_ARM_TARGET_CORTEX_A15:
-		if (vcpu->vcpu_id > a15_max_cpu_idx)
-			return -EINVAL;
-		reset_regs = &a15_regs_reset;
+		reset_regs = &cortexa_regs_reset;
 		vcpu->arch.midr = read_cpuid_id();
-		cpu_vtimer_irq = &a15_vtimer_irq;
+		cpu_vtimer_irq = &cortexa_vtimer_irq;
 		break;
 	default:
 		return -ENODEV;
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index d6408d1..e0c68d5 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -10,6 +10,11 @@
 	and	r3, r0, #31		@ Get bit offset
 	mov	r0, r0, lsr #5
 	add	r1, r1, r0, lsl #2	@ Get word offset
+#if __LINUX_ARM_ARCH__ >= 7
+	.arch_extension	mp
+	ALT_SMP(W(pldw)	[r1])
+	ALT_UP(W(nop))
+#endif
 	mov	r3, r2, lsl r3
 1:	ldrex	r2, [r1]
 	\instr	r2, r2, r3
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 025f742..3e58d71 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -18,6 +18,7 @@
 #include <linux/hardirq.h> /* for in_atomic() */
 #include <linux/gfp.h>
 #include <linux/highmem.h>
+#include <linux/hugetlb.h>
 #include <asm/current.h>
 #include <asm/page.h>
 
@@ -40,7 +41,35 @@
 		return 0;
 
 	pmd = pmd_offset(pud, addr);
-	if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
+	if (unlikely(pmd_none(*pmd)))
+		return 0;
+
+	/*
+	 * A pmd can be bad if it refers to a HugeTLB or THP page.
+	 *
+	 * Both THP and HugeTLB pages have the same pmd layout
+	 * and should not be manipulated by the pte functions.
+	 *
+	 * Lock the page table for the destination and check
+	 * to see that it's still huge and whether or not we will
+	 * need to fault on write, or if we have a splitting THP.
+	 */
+	if (unlikely(pmd_thp_or_huge(*pmd))) {
+		ptl = &current->mm->page_table_lock;
+		spin_lock(ptl);
+		if (unlikely(!pmd_thp_or_huge(*pmd)
+			|| pmd_hugewillfault(*pmd)
+			|| pmd_trans_splitting(*pmd))) {
+			spin_unlock(ptl);
+			return 0;
+		}
+
+		*ptep = NULL;
+		*ptlp = ptl;
+		return 1;
+	}
+
+	if (unlikely(pmd_bad(*pmd)))
 		return 0;
 
 	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
@@ -94,7 +123,10 @@
 		from += tocopy;
 		n -= tocopy;
 
-		pte_unmap_unlock(pte, ptl);
+		if (pte)
+			pte_unmap_unlock(pte, ptl);
+		else
+			spin_unlock(ptl);
 	}
 	if (!atomic)
 		up_read(&current->mm->mmap_sem);
@@ -147,7 +179,10 @@
 		addr += tocopy;
 		n -= tocopy;
 
-		pte_unmap_unlock(pte, ptl);
+		if (pte)
+			pte_unmap_unlock(pte, ptl);
+		else
+			spin_unlock(ptl);
 	}
 	up_read(&current->mm->mmap_sem);
 
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index e0fda04..90aab2d 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -98,7 +98,6 @@
 # Power Management
 obj-$(CONFIG_PM)		+= pm.o
 obj-$(CONFIG_AT91_SLOW_CLOCK)	+= pm_slowclock.o
-obj-$(CONFIG_CPU_IDLE)	+= cpuidle.o
 
 ifeq ($(CONFIG_PM_DEBUG),y)
 CFLAGS_pm.o += -DDEBUG
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 4aad93d..25805f2 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -327,6 +328,7 @@
 {
 	at91rm9200_ioremap_st(AT91RM9200_BASE_ST);
 	at91_ioremap_ramc(0, AT91RM9200_BASE_MC, 256);
+	at91_pm_set_standby(at91rm9200_standby);
 }
 
 static void __init at91rm9200_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index ae10d14..d6a1fa8 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -28,6 +28,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -342,6 +343,7 @@
 	at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
 	at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
 	at91_ioremap_matrix(AT91SAM9260_BASE_MATRIX);
+	at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9260_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index e761e74..23ba1d8 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -284,6 +285,7 @@
 	at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
 	at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
 	at91_ioremap_matrix(AT91SAM9261_BASE_MATRIX);
+	at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9261_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 629ea5f..b2a3474 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -465,7 +465,7 @@
 
 #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
 static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
 
 static struct resource lcdc_resources[] = {
 	[0] = {
@@ -498,7 +498,7 @@
 	.num_resources	= ARRAY_SIZE(lcdc_resources),
 };
 
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
 {
 	if (!data) {
 		return;
@@ -559,7 +559,7 @@
 	platform_device_register(&at91_lcdc_device);
 }
 #else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
 #endif
 
 
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index e6fed62..7eccb0f 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -26,6 +26,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -321,6 +322,7 @@
 	at91sam9_ioremap_smc(0, AT91SAM9263_BASE_SMC0);
 	at91sam9_ioremap_smc(1, AT91SAM9263_BASE_SMC1);
 	at91_ioremap_matrix(AT91SAM9263_BASE_MATRIX);
+	at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9263_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 858c8aa..4aeaddd 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -832,7 +832,7 @@
 
 #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
 static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
 
 static struct resource lcdc_resources[] = {
 	[0] = {
@@ -859,7 +859,7 @@
 	.num_resources	= ARRAY_SIZE(lcdc_resources),
 };
 
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
 {
 	if (!data)
 		return;
@@ -891,7 +891,7 @@
 	platform_device_register(&at91_lcdc_device);
 }
 #else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
 #endif
 
 
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 9f7a97c..9405aa0 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -26,6 +26,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -370,6 +371,7 @@
 	at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT);
 	at91sam9_ioremap_smc(0, AT91SAM9G45_BASE_SMC);
 	at91_ioremap_matrix(AT91SAM9G45_BASE_MATRIX);
+	at91_pm_set_standby(at91_ddr_standby);
 }
 
 static void __init at91sam9g45_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index acb703e..cb36fa8 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -965,7 +965,7 @@
 
 #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
 static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
 
 static struct resource lcdc_resources[] = {
 	[0] = {
@@ -991,7 +991,7 @@
 	.num_resources	= ARRAY_SIZE(lcdc_resources),
 };
 
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
 {
 	if (!data)
 		return;
@@ -1037,7 +1037,7 @@
 	platform_device_register(&at91_lcdc_device);
 }
 #else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
 #endif
 
 
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 301e172..0750ffb 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -287,6 +288,7 @@
 	at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT);
 	at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC);
 	at91_ioremap_matrix(AT91SAM9RL_BASE_MATRIX);
+	at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9rl_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 352468f..a698bda 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -498,7 +498,7 @@
 
 #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
 static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
 
 static struct resource lcdc_resources[] = {
 	[0] = {
@@ -525,7 +525,7 @@
 	.num_resources	= ARRAY_SIZE(lcdc_resources),
 };
 
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
 {
 	if (!data) {
 		return;
@@ -557,7 +557,7 @@
 	platform_device_register(&at91_lcdc_device);
 }
 #else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
 #endif
 
 
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index d343762..473546b 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -389,7 +389,7 @@
 					| ATMEL_LCDC_IFWIDTH_4 \
 					| ATMEL_LCDC_SCANMOD_SINGLE)
 
-static void at91_lcdc_stn_power_control(int on)
+static void at91_lcdc_stn_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
 	/* backlight */
 	if (on) {	/* power up */
@@ -401,7 +401,7 @@
 	}
 }
 
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
 	.default_bpp			= 1,
 	.default_dmacon			= ATMEL_LCDC_DMAEN,
 	.default_lcdcon2		= AT91SAM9261_DEFAULT_STN_LCDCON2,
@@ -445,7 +445,7 @@
 					| ATMEL_LCDC_DISTYPE_TFT    \
 					| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
 
-static void at91_lcdc_tft_power_control(int on)
+static void at91_lcdc_tft_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
 	if (on)
 		at91_set_gpio_value(AT91_PIN_PA12, 0);	/* power up */
@@ -453,7 +453,7 @@
 		at91_set_gpio_value(AT91_PIN_PA12, 1);	/* power down */
 }
 
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
 	.lcdcon_is_backlight		= true,
 	.default_bpp			= 16,
 	.default_dmacon			= ATMEL_LCDC_DMAEN,
@@ -465,7 +465,7 @@
 #endif
 
 #else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
 #endif
 
 
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index 3284df0..8b4942c 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -275,13 +275,13 @@
 					| ATMEL_LCDC_DISTYPE_TFT \
 					| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
 
-static void at91_lcdc_power_control(int on)
+static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
 	at91_set_gpio_value(AT91_PIN_PA30, on);
 }
 
 /* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
 	.lcdcon_is_backlight		= true,
 	.default_bpp			= 16,
 	.default_dmacon			= ATMEL_LCDC_DMAEN,
@@ -292,7 +292,7 @@
 };
 
 #else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
 #endif
 
 
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 2a94896..ef39078 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -284,7 +284,7 @@
 					| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
 
 /* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
 	.lcdcon_is_backlight		= true,
 	.default_bpp			= 32,
 	.default_dmacon			= ATMEL_LCDC_DMAEN,
@@ -295,7 +295,7 @@
 };
 
 #else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
 #endif
 
 
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index aa265dc..604eecf 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -170,7 +170,7 @@
 					| ATMEL_LCDC_DISTYPE_TFT \
 					| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
 
-static void at91_lcdc_power_control(int on)
+static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
 	if (on)
 		at91_set_gpio_value(AT91_PIN_PC1, 0);	/* power up */
@@ -179,7 +179,7 @@
 }
 
 /* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
 	.lcdcon_is_backlight            = true,
 	.default_bpp			= 16,
 	.default_dmacon			= ATMEL_LCDC_DMAEN,
@@ -191,7 +191,7 @@
 };
 
 #else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
 #endif
 
 
diff --git a/arch/arm/mach-at91/board.h b/arch/arm/mach-at91/board.h
index 4a234fb..6c08b34 100644
--- a/arch/arm/mach-at91/board.h
+++ b/arch/arm/mach-at91/board.h
@@ -107,8 +107,8 @@
 extern void __init at91_add_device_ssc(unsigned id, unsigned pins);
 
  /* LCD Controller */
-struct atmel_lcdfb_info;
-extern void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data);
+struct atmel_lcdfb_pdata;
+extern void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data);
 
  /* AC97 */
 extern void __init at91_add_device_ac97(struct ac97c_platform_data *data);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 15afb5d..9986542 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -39,6 +39,8 @@
 #include "at91_rstc.h"
 #include "at91_shdwc.h"
 
+static void (*at91_pm_standby)(void);
+
 static void __init show_reset_status(void)
 {
 	static char reset[] __initdata = "reset";
@@ -266,14 +268,8 @@
 			 * For ARM 926 based chips, this requirement is weaker
 			 * as at91sam9 can access a RAM in self-refresh mode.
 			 */
-			if (cpu_is_at91rm9200())
-				at91rm9200_standby();
-			else if (cpu_is_at91sam9g45())
-				at91sam9g45_standby();
-			else if (cpu_is_at91sam9263())
-				at91sam9263_standby();
-			else
-				at91sam9_standby();
+			if (at91_pm_standby)
+				at91_pm_standby();
 			break;
 
 		case PM_SUSPEND_ON:
@@ -314,6 +310,18 @@
 	.end	= at91_pm_end,
 };
 
+static struct platform_device at91_cpuidle_device = {
+	.name = "cpuidle-at91",
+};
+
+void at91_pm_set_standby(void (*at91_standby)(void))
+{
+	if (at91_standby) {
+		at91_cpuidle_device.dev.platform_data = at91_standby;
+		at91_pm_standby = at91_standby;
+	}
+}
+
 static int __init at91_pm_init(void)
 {
 #ifdef CONFIG_AT91_SLOW_CLOCK
@@ -325,6 +333,9 @@
 	/* AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. */
 	if (cpu_is_at91rm9200())
 		at91_ramc_write(0, AT91RM9200_SDRAMC_LPR, 0);
+	
+	if (at91_cpuidle_device.dev.platform_data)
+		platform_device_register(&at91_cpuidle_device);
 
 	suspend_set_ops(&at91_pm_ops);
 
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 2f5908f..3ed190c 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -11,9 +11,13 @@
 #ifndef __ARCH_ARM_MACH_AT91_PM
 #define __ARCH_ARM_MACH_AT91_PM
 
+#include <asm/proc-fns.h>
+
 #include <mach/at91_ramc.h>
 #include <mach/at91rm9200_sdramc.h>
 
+extern void at91_pm_set_standby(void (*at91_standby)(void));
+
 /*
  * The AT91RM9200 goes into self-refresh mode with this command, and will
  * terminate self-refresh automatically on the next SDRAM access.
@@ -45,16 +49,18 @@
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
-static inline void at91sam9g45_standby(void)
+static inline void at91_ddr_standby(void)
 {
 	/* Those two values allow us to delay self-refresh activation
 	 * to the maximum. */
-	u32 lpr0, lpr1;
-	u32 saved_lpr0, saved_lpr1;
+	u32 lpr0, lpr1 = 0;
+	u32 saved_lpr0, saved_lpr1 = 0;
 
-	saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
-	lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
-	lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+	if (at91_ramc_base[1]) {
+		saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
+		lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
+		lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+	}
 
 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
@@ -62,25 +68,29 @@
 
 	/* self-refresh mode now */
 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
-	at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
+	if (at91_ramc_base[1])
+		at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
 
 	cpu_do_idle();
 
 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
-	at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
+	if (at91_ramc_base[1])
+		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 }
 
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
-static inline void at91sam9263_standby(void)
+static inline void at91sam9_sdram_standby(void)
 {
-	u32 lpr0, lpr1;
-	u32 saved_lpr0, saved_lpr1;
+	u32 lpr0, lpr1 = 0;
+	u32 saved_lpr0, saved_lpr1 = 0;
 
-	saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
-	lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
-	lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+	if (at91_ramc_base[1]) {
+		saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
+		lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
+		lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+	}
 
 	saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
 	lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
@@ -88,27 +98,14 @@
 
 	/* self-refresh mode now */
 	at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
-	at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
+	if (at91_ramc_base[1])
+		at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
 
 	cpu_do_idle();
 
 	at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
-	at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
-}
-
-static inline void at91sam9_standby(void)
-{
-	u32 saved_lpr, lpr;
-
-	saved_lpr = at91_ramc_read(0, AT91_SDRAMC_LPR);
-
-	lpr = saved_lpr & ~AT91_SDRAMC_LPCB;
-	at91_ramc_write(0, AT91_SDRAMC_LPR, lpr |
-			AT91_SDRAMC_LPCB_SELF_REFRESH);
-
-	cpu_do_idle();
-
-	at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr);
+	if (at91_ramc_base[1])
+		at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
 }
 
 #endif
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index b17fbcf..094b345 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -23,6 +23,7 @@
 #include "at91_shdwc.h"
 #include "soc.h"
 #include "generic.h"
+#include "pm.h"
 
 struct at91_init_soc __initdata at91_boot_soc;
 
@@ -376,15 +377,16 @@
 }
 
 static struct of_device_id ramc_ids[] = {
-	{ .compatible = "atmel,at91rm9200-sdramc" },
-	{ .compatible = "atmel,at91sam9260-sdramc" },
-	{ .compatible = "atmel,at91sam9g45-ddramc" },
+	{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
+	{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
+	{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
 	{ /*sentinel*/ }
 };
 
 static void at91_dt_ramc(void)
 {
 	struct device_node *np;
+	const struct of_device_id *of_id;
 
 	np = of_find_matching_node(NULL, ramc_ids);
 	if (!np)
@@ -396,6 +398,12 @@
 	/* the controller may have 2 banks */
 	at91_ramc_base[1] = of_iomap(np, 1);
 
+	of_id = of_match_node(ramc_ids, np);
+	if (!of_id)
+		pr_warn("AT91: ramc no standby function available\n");
+	else
+		at91_pm_set_standby(of_id->data);
+
 	of_node_put(np);
 }
 
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index e026b19..a075b3e 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -40,7 +40,6 @@
 	bool "DA850/OMAP-L138/AM18x based system"
 	select ARCH_DAVINCI_DA8XX
 	select ARCH_HAS_CPUFREQ
-	select CPU_FREQ_TABLE
 	select CP_INTC
 
 config ARCH_DAVINCI_DA8XX
diff --git a/arch/arm/mach-davinci/sram.c b/arch/arm/mach-davinci/sram.c
index f18928b..8540ddd 100644
--- a/arch/arm/mach-davinci/sram.c
+++ b/arch/arm/mach-davinci/sram.c
@@ -25,7 +25,6 @@
 
 void *sram_alloc(size_t len, dma_addr_t *dma)
 {
-	unsigned long vaddr;
 	dma_addr_t dma_base = davinci_soc_info.sram_dma;
 
 	if (dma)
@@ -33,13 +32,7 @@
 	if (!sram_pool || (dma && !dma_base))
 		return NULL;
 
-	vaddr = gen_pool_alloc(sram_pool, len);
-	if (!vaddr)
-		return NULL;
-
-	if (dma)
-		*dma = gen_pool_virt_to_phys(sram_pool, vaddr);
-	return (void *)vaddr;
+	return gen_pool_dma_alloc(sram_pool, len, dma);
 
 }
 EXPORT_SYMBOL(sram_alloc);
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index a4e7ba8..61d2906 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -28,6 +28,7 @@
 #include <linux/of_address.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/irqchip/chained_irq.h>
+#include <linux/platform_device.h>
 
 #include <asm/proc-fns.h>
 #include <asm/exception.h>
@@ -292,6 +293,16 @@
 	__raw_writel(val, addr);
 }
 
+static struct platform_device exynos_cpuidle = {
+	.name		= "exynos_cpuidle",
+	.id		= -1,
+};
+
+void __init exynos_cpuidle_init(void)
+{
+	platform_device_register(&exynos_cpuidle);
+}
+
 void __init exynos_init_late(void)
 {
 	if (of_machine_is_compatible("samsung,exynos5440"))
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index f0fa205..ff9b6a9 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -21,6 +21,7 @@
 void exynos_init_io(void);
 void exynos4_restart(enum reboot_mode mode, const char *cmd);
 void exynos5_restart(enum reboot_mode mode, const char *cmd);
+void exynos_cpuidle_init(void);
 void exynos_init_late(void);
 
 void exynos_firmware_init(void);
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index ac139226..ddbfe87 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include <linux/export.h>
 #include <linux/time.h>
+#include <linux/platform_device.h>
 
 #include <asm/proc-fns.h>
 #include <asm/smp_scu.h>
@@ -192,7 +193,7 @@
 	__raw_writel(tmp, EXYNOS5_PWR_CTRL2);
 }
 
-static int __init exynos4_init_cpuidle(void)
+static int exynos_cpuidle_probe(struct platform_device *pdev)
 {
 	int cpu_id, ret;
 	struct cpuidle_device *device;
@@ -205,7 +206,7 @@
 
 	ret = cpuidle_register_driver(&exynos4_idle_driver);
 	if (ret) {
-		printk(KERN_ERR "CPUidle failed to register driver\n");
+		dev_err(&pdev->dev, "failed to register cpuidle driver\n");
 		return ret;
 	}
 
@@ -219,11 +220,20 @@
 
 		ret = cpuidle_register_device(device);
 		if (ret) {
-			printk(KERN_ERR "CPUidle register device failed\n");
+			dev_err(&pdev->dev, "failed to register cpuidle device\n");
 			return ret;
 		}
 	}
 
 	return 0;
 }
-device_initcall(exynos4_init_cpuidle);
+
+static struct platform_driver exynos_cpuidle_driver = {
+	.probe	= exynos_cpuidle_probe,
+	.driver = {
+		.name = "exynos_cpuidle",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(exynos_cpuidle_driver);
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 4b8f6e2..4603e6b 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -21,6 +21,8 @@
 
 static void __init exynos4_dt_machine_init(void)
 {
+	exynos_cpuidle_init();
+
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 7976ab3..1fe075a 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -43,6 +43,8 @@
 		}
 	}
 
+	exynos_cpuidle_init();
+
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 1fd2cf0..eb1fa5c 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -692,14 +692,14 @@
 	unsigned long flags;
 	u32 reg;
 
-	spin_lock_irqsave(&nw_gpio_lock, flags);
+	raw_spin_lock_irqsave(&nw_gpio_lock, flags);
 	reg = nw_gpio_read();
 	if (b != LED_OFF)
 		reg &= ~led->mask;
 	else
 		reg |= led->mask;
 	nw_gpio_modify_op(led->mask, reg);
-	spin_unlock_irqrestore(&nw_gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
 }
 
 static enum led_brightness netwinder_led_get(struct led_classdev *cdev)
@@ -709,9 +709,9 @@
 	unsigned long flags;
 	u32 reg;
 
-	spin_lock_irqsave(&nw_gpio_lock, flags);
+	raw_spin_lock_irqsave(&nw_gpio_lock, flags);
 	reg = nw_gpio_read();
-	spin_unlock_irqrestore(&nw_gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
 
 	return (reg & led->mask) ? LED_OFF : LED_FULL;
 }
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 6e1d723..0aded64 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -4,11 +4,12 @@
 	select ARCH_HAS_CPUFREQ
 	select ARCH_HAS_HOLES_MEMORYMODEL
 	select ARCH_HAS_OPP
+	select ARCH_SUPPORTS_BIG_ENDIAN
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARM_AMBA
 	select ARM_ERRATA_764369 if SMP
 	select ARM_ERRATA_775420
-	select ARM_ERRATA_798181
+	select ARM_ERRATA_798181 if SMP
 	select ARM_GIC
 	select ARM_PSCI
 	select ARM_TIMER_SP804
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 0f9f241..d0cfb22 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -22,7 +22,7 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/phy.h>
 #include <linux/reboot.h>
 #include <linux/regmap.h>
@@ -176,7 +176,7 @@
 	val = readl_relaxed(base + OCOTP_CFG3);
 	val >>= OCOTP_CFG3_SPEED_SHIFT;
 	if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
-		if (opp_disable(cpu_dev, 1200000000))
+		if (dev_pm_opp_disable(cpu_dev, 1200000000))
 			pr_warn("failed to disable 1.2 GHz OPP\n");
 
 put_node:
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 30e1ebe..c342dc4 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -1,9 +1,5 @@
 if ARCH_IXP4XX
 
-config ARCH_SUPPORTS_BIG_ENDIAN
-	bool
-	default y
-
 menu "Intel IXP4xx Implementation Options"
 
 comment "IXP4xx Platforms"
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 7022329..cfadd97 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -191,7 +191,6 @@
 #define SCLK_SOURCE_SELECT(x)  (x << 30) /* 0x0 ~ 0x3 */
 /* link config */
 #define CFG_DUMBMODE(mode)     (mode << 28) /* 0x0 ~ 0x6*/
-#define CFG_GRA_SWAPRB(x)      (x << 0) /* 1: rbswap enabled */
 static struct mmp_mach_path_config dkb_disp_config[] = {
 	[0] = {
 		.name = "mmp-parallel",
@@ -199,8 +198,7 @@
 		.output_type = PATH_OUT_PARALLEL,
 		.path_config = CFG_IOPADMODE(0x1)
 			| SCLK_SOURCE_SELECT(0x1),
-		.link_config = CFG_DUMBMODE(0x2)
-			| CFG_GRA_SWAPRB(0x1),
+		.link_config = CFG_DUMBMODE(0x2),
 	},
 };
 
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 9eb63d7..5e269d7 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -1,5 +1,6 @@
 config ARCH_MVEBU
 	bool "Marvell SOCs with Device Tree support" if ARCH_MULTI_V7
+	select ARCH_SUPPORTS_BIG_ENDIAN
 	select CLKSRC_MMIO
 	select COMMON_CLK
 	select GENERIC_CLOCKEVENTS
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 5476669..ee7598f 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -20,6 +20,8 @@
 #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
 #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
 
+#include <asm/assembler.h>
+
 	.text
 /*
  * r0: Coherency fabric base register address
@@ -29,6 +31,7 @@
 	/* Create bit by cpu index */
 	mov	r3, #(1 << 24)
 	lsl	r1, r3, r1
+ARM_BE8(rev	r1, r1)
 
 	/* Add CPU to SMP group - Atomic */
 	add	r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
index 8a1b0c9..3dd80df 100644
--- a/arch/arm/mach-mvebu/headsmp.S
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -21,12 +21,16 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
+#include <asm/assembler.h>
+
 /*
  * Armada XP specific entry point for secondary CPUs.
  * We add the CPU to the coherency fabric and then jump to secondary
  * startup
  */
 ENTRY(armada_xp_secondary_startup)
+ ARM_BE8(setend	be )			@ go BE8 if entered LE
+
 	/* Get coherency fabric base physical address */
 	adr	r0, 1f
 	ldr	r1, [r0]
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index fc20a61..ac82512 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -142,7 +142,7 @@
 	board_nand_data.nr_parts	= nr_parts;
 	board_nand_data.devsize		= nand_type;
 
-	board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT;
+	board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW;
 	gpmc_nand_init(&board_nand_data, gpmc_t);
 }
 #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 09f6149..d6ed819 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -25,7 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 
 #include <linux/mtd/mtd.h>
@@ -516,11 +516,11 @@
 			return -ENODEV;
 		}
 		/* Enable MPU 1GHz and lower opps */
-		r = opp_enable(mpu_dev, 800000000);
+		r = dev_pm_opp_enable(mpu_dev, 800000000);
 		/* TODO: MPU 1GHz needs SR and ABB */
 
 		/* Enable IVA 800MHz and lower opps */
-		r |= opp_enable(iva_dev, 660000000);
+		r |= dev_pm_opp_enable(iva_dev, 660000000);
 		/* TODO: DSP 800MHz needs SR and ABB */
 		if (r) {
 			pr_err("%s: failed to enable higher opp %d\n",
@@ -529,8 +529,8 @@
 			 * Cleanup - disable the higher freqs - we dont care
 			 * about the results
 			 */
-			opp_disable(mpu_dev, 800000000);
-			opp_disable(iva_dev, 660000000);
+			dev_pm_opp_disable(mpu_dev, 800000000);
+			dev_pm_opp_disable(iva_dev, 660000000);
 		}
 	}
 	return 0;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 51525fa..81de562 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1341,14 +1341,6 @@
 
 #ifdef CONFIG_MTD_NAND
 
-static const char * const nand_ecc_opts[] = {
-	[OMAP_ECC_HAMMING_CODE_DEFAULT]		= "sw",
-	[OMAP_ECC_HAMMING_CODE_HW]		= "hw",
-	[OMAP_ECC_HAMMING_CODE_HW_ROMCODE]	= "hw-romcode",
-	[OMAP_ECC_BCH4_CODE_HW]			= "bch4",
-	[OMAP_ECC_BCH8_CODE_HW]			= "bch8",
-};
-
 static const char * const nand_xfer_types[] = {
 	[NAND_OMAP_PREFETCH_POLLED]		= "prefetch-polled",
 	[NAND_OMAP_POLLED]			= "polled",
@@ -1378,13 +1370,41 @@
 	gpmc_nand_data->cs = val;
 	gpmc_nand_data->of_node = child;
 
-	if (!of_property_read_string(child, "ti,nand-ecc-opt", &s))
-		for (val = 0; val < ARRAY_SIZE(nand_ecc_opts); val++)
-			if (!strcasecmp(s, nand_ecc_opts[val])) {
-				gpmc_nand_data->ecc_opt = val;
-				break;
-			}
+	/* Detect availability of ELM module */
+	gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
+	if (gpmc_nand_data->elm_of_node == NULL)
+		gpmc_nand_data->elm_of_node =
+					of_parse_phandle(child, "elm_id", 0);
+	if (gpmc_nand_data->elm_of_node == NULL)
+		pr_warn("%s: ti,elm-id property not found\n", __func__);
 
+	/* select ecc-scheme for NAND */
+	if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
+		pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!strcmp(s, "ham1") || !strcmp(s, "sw") ||
+		!strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
+		gpmc_nand_data->ecc_opt =
+				OMAP_ECC_HAM1_CODE_HW;
+	else if (!strcmp(s, "bch4"))
+		if (gpmc_nand_data->elm_of_node)
+			gpmc_nand_data->ecc_opt =
+				OMAP_ECC_BCH4_CODE_HW;
+		else
+			gpmc_nand_data->ecc_opt =
+				OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
+	else if (!strcmp(s, "bch8"))
+		if (gpmc_nand_data->elm_of_node)
+			gpmc_nand_data->ecc_opt =
+				OMAP_ECC_BCH8_CODE_HW;
+		else
+			gpmc_nand_data->ecc_opt =
+				OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
+	else
+		pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
+
+	/* select data transfer mode for NAND controller */
 	if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
 		for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
 			if (!strcasecmp(s, nand_xfer_types[val])) {
diff --git a/arch/arm/mach-omap2/omap-pm.h b/arch/arm/mach-omap2/omap-pm.h
index 67faa7b..1d777e6 100644
--- a/arch/arm/mach-omap2/omap-pm.h
+++ b/arch/arm/mach-omap2/omap-pm.h
@@ -17,7 +17,7 @@
 #include <linux/device.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 
 /*
  * agent_id values for use with omap_pm_set_min_bus_tput():
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
index 82fd8c7..a358a07 100644
--- a/arch/arm/mach-omap2/opp.c
+++ b/arch/arm/mach-omap2/opp.c
@@ -18,7 +18,7 @@
  */
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 
 #include "omap_device.h"
@@ -85,14 +85,14 @@
 			dev = &oh->od->pdev->dev;
 		}
 
-		r = opp_add(dev, opp_def->freq, opp_def->u_volt);
+		r = dev_pm_opp_add(dev, opp_def->freq, opp_def->u_volt);
 		if (r) {
 			dev_err(dev, "%s: add OPP %ld failed for %s [%d] result=%d\n",
 				__func__, opp_def->freq,
 				opp_def->hwmod_name, i, r);
 		} else {
 			if (!opp_def->default_available)
-				r = opp_disable(dev, opp_def->freq);
+				r = dev_pm_opp_disable(dev, opp_def->freq);
 			if (r)
 				dev_err(dev, "%s: disable %ld failed for %s [%d] result=%d\n",
 					__func__, opp_def->freq,
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 360b2da..e1b4141 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/err.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/export.h>
 #include <linux/suspend.h>
 #include <linux/cpu.h>
@@ -131,7 +131,7 @@
 {
 	struct voltagedomain *voltdm;
 	struct clk *clk;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	unsigned long freq, bootup_volt;
 	struct device *dev;
 
@@ -172,7 +172,7 @@
 	clk_put(clk);
 
 	rcu_read_lock();
-	opp = opp_find_freq_ceil(dev, &freq);
+	opp = dev_pm_opp_find_freq_ceil(dev, &freq);
 	if (IS_ERR(opp)) {
 		rcu_read_unlock();
 		pr_err("%s: unable to find boot up OPP for vdd_%s\n",
@@ -180,7 +180,7 @@
 		goto exit;
 	}
 
-	bootup_volt = opp_get_voltage(opp);
+	bootup_volt = dev_pm_opp_get_voltage(opp);
 	rcu_read_unlock();
 	if (!bootup_volt) {
 		pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index a842711..96100db 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -615,14 +615,12 @@
 config PXA25x
 	bool
 	select CPU_XSCALE
-	select CPU_FREQ_TABLE if CPU_FREQ
 	help
 	  Select code specific to PXA21x/25x/26x variants
 
 config PXA27x
 	bool
 	select CPU_XSCALE
-	select CPU_FREQ_TABLE if CPU_FREQ
 	help
 	  Select code specific to PXA27x variants
 
@@ -635,7 +633,6 @@
 config PXA3xx
 	bool
 	select CPU_XSC3
-	select CPU_FREQ_TABLE if CPU_FREQ
 	help
 	  Select code specific to PXA3xx variants
 
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index f942349..584439b 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -310,6 +310,7 @@
 	.max_brightness	= 100,
 	.dft_brightness	= 100,
 	.pwm_period_ns	= 10000,
+	.enable_gpio	= -1,
 };
 
 static struct platform_device cm_x300_backlight_device = {
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index 2d4a7b4..3aa2646 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -189,6 +189,7 @@
 	.max_brightness	= 0x3ff,
 	.dft_brightness	= 0x1ff,
 	.pwm_period_ns	= 1000000,
+	.enable_gpio	= -1,
 };
 
 static struct platform_device income_backlight = {
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index fe2eb83..ab93441 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -54,6 +54,7 @@
 	.max_brightness	= 1023,
 	.dft_brightness	= 1023,
 	.pwm_period_ns	= 78770,
+	.enable_gpio	= -1,
 };
 
 static struct platform_device ezx_backlight_device = {
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 133109e..a7c30eb 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -561,6 +561,7 @@
 	.max_brightness = 200,
 	.dft_brightness = 100,
 	.pwm_period_ns  = 30923,
+	.enable_gpio    = -1,
 };
 
 static struct platform_device backlight = {
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 1255ee0..9f6ec167 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -269,6 +269,7 @@
 	.max_brightness	= 1,
 	.dft_brightness	= 1,
 	.pwm_period_ns	= 78770,
+	.enable_gpio	= -1,
 };
 
 static struct platform_device lpd270_backlight_device = {
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index f44532f..fab30d6 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -378,6 +378,7 @@
 	.max_brightness = 272,
 	.dft_brightness = 100,
 	.pwm_period_ns  = 30923,
+	.enable_gpio    = -1,
 	.init           = magician_backlight_init,
 	.notify         = magician_backlight_notify,
 	.exit           = magician_backlight_exit,
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index dd70343..08ccc07 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -338,6 +338,7 @@
 	.max_brightness	= 1023,
 	.dft_brightness	= 1023,
 	.pwm_period_ns	= 78770,
+	.enable_gpio	= -1,
 };
 
 static struct platform_device mainstone_backlight_device = {
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index acc9d3c..f70583f 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -186,6 +186,7 @@
 	.max_brightness	= 100,
 	.dft_brightness	= 50,
 	.pwm_period_ns	= 4000 * 1024,	/* Fl = 250kHz */
+	.enable_gpio	= -1,
 };
 
 /*
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 17d4c53..e54a296 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -322,6 +322,7 @@
 	.max_brightness	= 0xfe,
 	.dft_brightness	= 0x7e,
 	.pwm_period_ns	= 3500 * 1024,
+	.enable_gpio	= -1,
 	.init		= palm27x_backlight_init,
 	.notify		= palm27x_backlight_notify,
 	.exit		= palm27x_backlight_exit,
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 100b176f..7691c97 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -166,45 +166,12 @@
  * Backlight
  ******************************************************************************/
 #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
-static int palmtc_backlight_init(struct device *dev)
-{
-	int ret;
-
-	ret = gpio_request(GPIO_NR_PALMTC_BL_POWER, "BL POWER");
-	if (ret)
-		goto err;
-	ret = gpio_direction_output(GPIO_NR_PALMTC_BL_POWER, 1);
-	if (ret)
-		goto err2;
-
-	return 0;
-
-err2:
-	gpio_free(GPIO_NR_PALMTC_BL_POWER);
-err:
-	return ret;
-}
-
-static int palmtc_backlight_notify(struct device *dev, int brightness)
-{
-	/* backlight is on when GPIO16 AF0 is high */
-	gpio_set_value(GPIO_NR_PALMTC_BL_POWER, brightness);
-	return brightness;
-}
-
-static void palmtc_backlight_exit(struct device *dev)
-{
-	gpio_free(GPIO_NR_PALMTC_BL_POWER);
-}
-
 static struct platform_pwm_backlight_data palmtc_backlight_data = {
 	.pwm_id		= 1,
 	.max_brightness	= PALMTC_MAX_INTENSITY,
 	.dft_brightness	= PALMTC_MAX_INTENSITY,
 	.pwm_period_ns	= PALMTC_PERIOD_NS,
-	.init		= palmtc_backlight_init,
-	.notify		= palmtc_backlight_notify,
-	.exit		= palmtc_backlight_exit,
+	.enable_gpio	= GPIO_NR_PALMTC_BL_POWER,
 };
 
 static struct platform_device palmtc_backlight = {
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index 0742721..956fd24 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -165,6 +165,7 @@
 	.max_brightness	= PALMTE2_MAX_INTENSITY,
 	.dft_brightness	= PALMTE2_MAX_INTENSITY,
 	.pwm_period_ns	= PALMTE2_PERIOD_NS,
+	.enable_gpio	= -1,
 	.init		= palmte2_backlight_init,
 	.notify		= palmte2_backlight_notify,
 	.exit		= palmte2_backlight_exit,
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 3133ba8..9a4e470 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -153,6 +153,7 @@
 	.max_brightness	= 1023,
 	.dft_brightness	= 1023,
 	.pwm_period_ns	= 78770,
+	.enable_gpio	= -1,
 };
 
 static struct platform_device pcm990_backlight_device = {
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 969b0ba..8386dc3 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -539,6 +539,7 @@
 	.dft_brightness	= 100,
 	/* 10000 ns = 10 ms ^= 100 kHz */
 	.pwm_period_ns	= 10000,
+	.enable_gpio	= -1,
 };
 
 static struct platform_device raumfeld_pwm_backlight_device = {
diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c
index 4680efe..a71da84 100644
--- a/arch/arm/mach-pxa/tavorevb.c
+++ b/arch/arm/mach-pxa/tavorevb.c
@@ -175,6 +175,7 @@
 		.max_brightness	= 100,
 		.dft_brightness	= 100,
 		.pwm_period_ns	= 100000,
+		.enable_gpio	= -1,
 	},
 	[1] = {
 		/* secondary backlight */
@@ -182,6 +183,7 @@
 		.max_brightness	= 100,
 		.dft_brightness	= 100,
 		.pwm_period_ns	= 100000,
+		.enable_gpio	= -1,
 	},
 };
 
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 9c363c0..29905b1 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -401,6 +401,7 @@
 	.max_brightness	= 100,
 	.dft_brightness	= 100,
 	.pwm_period_ns	= 1000000,
+	.enable_gpio	= -1,
 	.init		= viper_backlight_init,
 	.notify		= viper_backlight_notify,
 	.exit		= viper_backlight_exit,
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index 2513d8f..e1a121b 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -206,6 +206,7 @@
 		.max_brightness	= 1023,
 		.dft_brightness	= 0,
 		.pwm_period_ns	= 1260320,
+		.enable_gpio	= -1,
 	},
 	[1] = {
 		/* LCD Backlight */
@@ -213,6 +214,7 @@
 		.max_brightness	= 1023,
 		.dft_brightness	= 512,
 		.pwm_period_ns	= 1260320,
+		.enable_gpio	= -1,
 	},
 };
 
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 36cf7cf..77daea4 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -125,6 +125,7 @@
 	.max_brightness	= 100,
 	.dft_brightness	= 100,
 	.pwm_period_ns	= 10000,
+	.enable_gpio	= -1,
 };
 
 static struct platform_device zylonite_backlight_device = {
diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
index 74dd479..952b6a0 100644
--- a/arch/arm/mach-s3c24xx/mach-h1940.c
+++ b/arch/arm/mach-s3c24xx/mach-h1940.c
@@ -504,6 +504,7 @@
 	.dft_brightness = 50,
 	/* tcnt = 0x31 */
 	.pwm_period_ns  = 36296,
+	.enable_gpio    = -1,
 	.init           = h1940_backlight_init,
 	.notify		= h1940_backlight_notify,
 	.exit           = h1940_backlight_exit,
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
index 206b1f7..034b7fe 100644
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
@@ -522,6 +522,7 @@
 	.max_brightness = 24,
 	.dft_brightness = 4,
 	.pwm_period_ns = 48000,
+	.enable_gpio = -1,
 	.init = rx1950_backlight_init,
 	.notify = rx1950_backlight_notify,
 	.exit = rx1950_backlight_exit,
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index aca7d16..758e31b 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -114,6 +114,7 @@
 	.max_brightness	= 1000,
 	.dft_brightness	= 600,
 	.pwm_period_ns	= 100000,	/* about 1kHz */
+	.enable_gpio	= -1,
 };
 
 static struct platform_device crag6410_backlight_device = {
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index e806404..614a03a 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -114,6 +114,7 @@
 	.max_brightness	= 100 * 256,
 	.dft_brightness	= 40 * 256,
 	.pwm_period_ns	= 1000000000 / (100 * 256 * 20),
+	.enable_gpio	= -1,
 	.init		= hmt_bl_init,
 	.notify		= hmt_bl_notify,
 	.exit		= hmt_bl_exit,
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index 0f47237..a6b338f 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -151,6 +151,7 @@
 	.max_brightness	= 1000,
 	.dft_brightness	= 600,
 	.pwm_period_ns	= 1000000000 / (1000 * 20),
+	.enable_gpio	= -1,
 	.init		= smartq_bl_init,
 };
 
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 2a7b32c..d5ea938 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -625,6 +625,7 @@
 
 static struct platform_pwm_backlight_data smdk6410_bl_data = {
 	.pwm_id = 1,
+	.enable_gpio = -1,
 };
 
 static struct s3c_hsotg_plat smdk6410_hsotg_pdata;
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index 0b00304..9efdcc0 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -223,6 +223,7 @@
 
 static struct platform_pwm_backlight_data smdk6440_bl_data = {
 	.pwm_id = 1,
+	.enable_gpio = -1,
 };
 
 static void __init smdk6440_map_io(void)
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index 5949296..c3cacc0 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -242,6 +242,7 @@
 
 static struct platform_pwm_backlight_data smdk6450_bl_data = {
 	.pwm_id = 1,
+	.enable_gpio = -1,
 };
 
 static void __init smdk6450_map_io(void)
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 7c57a22..9e256b9 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -216,6 +216,7 @@
 
 static struct platform_pwm_backlight_data smdkc100_bl_data = {
 	.pwm_id = 0,
+	.enable_gpio = -1,
 };
 
 static void __init smdkc100_map_io(void)
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index 6d72bb99..f52cc15 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -279,6 +279,7 @@
 static struct platform_pwm_backlight_data smdkv210_bl_data = {
 	.pwm_id = 3,
 	.pwm_period_ns = 1000,
+	.enable_gpio = -1,
 };
 
 static void __init smdkv210_map_io(void)
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index e838ba2..c9808c6 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -512,6 +512,9 @@
 	 * Its called GPCLKR0 in my SA1110 manual.
 	 */
 	Ser1SDCR0 |= SDCR0_SUS;
+	MSC1 = (MSC1 & ~0xffff) |
+		MSC_NonBrst | MSC_32BitStMem |
+		MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
 
 	if (!machine_has_neponset())
 		sa1100_register_uart_fns(&assabet_port_fns);
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index f25b611..d4ea142 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -42,74 +42,31 @@
 /*
  * This table is setup for a 3.6864MHz Crystal.
  */
-static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
-	 590,	/*  59.0 MHz */
-	 737,	/*  73.7 MHz */
-	 885,	/*  88.5 MHz */
-	1032,	/* 103.2 MHz */
-	1180,	/* 118.0 MHz */
-	1327,	/* 132.7 MHz */
-	1475,	/* 147.5 MHz */
-	1622,	/* 162.2 MHz */
-	1769,	/* 176.9 MHz */
-	1917,	/* 191.7 MHz */
-	2064,	/* 206.4 MHz */
-	2212,	/* 221.2 MHz */
-	2359,	/* 235.9 MHz */
-	2507,	/* 250.7 MHz */
-	2654,	/* 265.4 MHz */
-	2802	/* 280.2 MHz */
+struct cpufreq_frequency_table sa11x0_freq_table[NR_FREQS+1] = {
+	{ .frequency = 59000,	/*  59.0 MHz */},
+	{ .frequency = 73700,	/*  73.7 MHz */},
+	{ .frequency = 88500,	/*  88.5 MHz */},
+	{ .frequency = 103200,	/* 103.2 MHz */},
+	{ .frequency = 118000,	/* 118.0 MHz */},
+	{ .frequency = 132700,	/* 132.7 MHz */},
+	{ .frequency = 147500,	/* 147.5 MHz */},
+	{ .frequency = 162200,	/* 162.2 MHz */},
+	{ .frequency = 176900,	/* 176.9 MHz */},
+	{ .frequency = 191700,	/* 191.7 MHz */},
+	{ .frequency = 206400,	/* 206.4 MHz */},
+	{ .frequency = 221200,	/* 221.2 MHz */},
+	{ .frequency = 235900,	/* 235.9 MHz */},
+	{ .frequency = 250700,	/* 250.7 MHz */},
+	{ .frequency = 265400,	/* 265.4 MHz */},
+	{ .frequency = 280200,	/* 280.2 MHz */},
+	{ .frequency = CPUFREQ_TABLE_END, },
 };
 
-/* rounds up(!)  */
-unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
-{
-	int i;
-
-	khz /= 100;
-
-	for (i = 0; i < NR_FREQS; i++)
-		if (cclk_frequency_100khz[i] >= khz)
-			break;
-
-	return i;
-}
-
-unsigned int sa11x0_ppcr_to_freq(unsigned int idx)
-{
-	unsigned int freq = 0;
-	if (idx < NR_FREQS)
-		freq = cclk_frequency_100khz[idx] * 100;
-	return freq;
-}
-
-
-/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on
- * this platform, anyway.
- */
-int sa11x0_verify_speed(struct cpufreq_policy *policy)
-{
-	unsigned int tmp;
-	if (policy->cpu)
-		return -EINVAL;
-
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
-	/* make sure that at least one frequency is within the policy */
-	tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100;
-	if (tmp > policy->max)
-		policy->max = tmp;
-
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
-	return 0;
-}
-
 unsigned int sa11x0_getspeed(unsigned int cpu)
 {
 	if (cpu)
 		return 0;
-	return cclk_frequency_100khz[PPCR & 0xf] * 100;
+	return sa11x0_freq_table[PPCR & 0xf].frequency;
 }
 
 /*
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 9a33695..0d92e11 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -3,6 +3,7 @@
  *
  * Author: Nicolas Pitre
  */
+#include <linux/cpufreq.h>
 #include <linux/reboot.h>
 
 extern void sa1100_timer_init(void);
@@ -19,12 +20,8 @@
 extern void sa1110_mb_enable(void);
 extern void sa1110_mb_disable(void);
 
-struct cpufreq_policy;
-
-extern unsigned int sa11x0_freq_to_ppcr(unsigned int khz);
-extern int sa11x0_verify_speed(struct cpufreq_policy *policy);
+extern struct cpufreq_frequency_table sa11x0_freq_table[];
 extern unsigned int sa11x0_getspeed(unsigned int cpu);
-extern unsigned int sa11x0_ppcr_to_freq(unsigned int idx);
 
 struct flash_platform_data;
 struct resource;
diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h
deleted file mode 100644
index 6a9eecf..0000000
--- a/arch/arm/mach-sa1100/include/mach/gpio.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/gpio.h
- *
- * SA1100 GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_SA1100_GPIO_H
-#define __ASM_ARCH_SA1100_GPIO_H
-
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm-generic/gpio.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_get_value(unsigned gpio)
-{
-	if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
-		return GPLR & GPIO_GPIO(gpio);
-	else
-		return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
-	if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
-		if (value)
-			GPSR = GPIO_GPIO(gpio);
-		else
-			GPCR = GPIO_GPIO(gpio);
-	else
-		__gpio_set_value(gpio, value);
-}
-
-#define gpio_cansleep	__gpio_cansleep
-
-#endif
diff --git a/arch/arm/mach-sa1100/include/mach/h3xxx.h b/arch/arm/mach-sa1100/include/mach/h3xxx.h
index 7d9df16..c810620 100644
--- a/arch/arm/mach-sa1100/include/mach/h3xxx.h
+++ b/arch/arm/mach-sa1100/include/mach/h3xxx.h
@@ -13,6 +13,8 @@
 #ifndef _INCLUDE_H3XXX_H_
 #define _INCLUDE_H3XXX_H_
 
+#include "hardware.h" /* Gives GPIO_MAX */
+
 /* Physical memory regions corresponding to chip selects */
 #define H3600_EGPIO_PHYS	(SA1100_CS5_PHYS + 0x01000000)
 #define H3600_BANK_2_PHYS	SA1100_CS2_PHYS
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index bcbc945..41e476e 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -19,6 +19,7 @@
 
 #include <mach/hardware.h>
 #include <asm/setup.h>
+#include <asm/irq.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 8bc8e4c..958e3cb 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -423,6 +423,7 @@
 	.max_brightness = 255,
 	.dft_brightness = 255,
 	.pwm_period_ns = 33333, /* 30kHz */
+	.enable_gpio = -1,
 };
 
 static struct platform_device pwm_backlight_device = {
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 939ddb8..d71654b 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -30,7 +30,7 @@
 	default y
 	help
 	  This enables support for STMicroelectronics Digital Consumer
-	  Electronics family StiH415 parts, primarily targetted at set-top-box
+	  Electronics family StiH415 parts, primarily targeted at set-top-box
 	  and other digital audio/video applications using Flattned Device
 	  Trees.
 
@@ -39,7 +39,7 @@
 	default y
 	help
 	  This enables support for STMicroelectronics Digital Consumer
-	  Electronics family StiH416 parts, primarily targetted at set-top-box
+	  Electronics family StiH416 parts, primarily targeted at set-top-box
 	  and other digital audio/video applications using Flattened Device
 	  Trees.
 
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 0bf04a0..09e740f 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -51,7 +51,7 @@
 
 config ARCH_TEGRA_114_SOC
 	bool "Enable support for Tegra114 family"
-	select ARM_ERRATA_798181
+	select ARM_ERRATA_798181 if SMP
 	select ARM_L1_CACHE_SHIFT_6
 	select HAVE_ARM_ARCH_TIMER
 	select PINCTRL_TEGRA114
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c
index d7aa52e..bc47197 100644
--- a/arch/arm/mach-tegra/apbio.c
+++ b/arch/arm/mach-tegra/apbio.c
@@ -114,7 +114,7 @@
 	dma_desc->callback = apb_dma_complete;
 	dma_desc->callback_param = NULL;
 
-	INIT_COMPLETION(tegra_apb_wait);
+	reinit_completion(&tegra_apb_wait);
 
 	dmaengine_submit(dma_desc);
 	dma_async_issue_pending(tegra_apb_dma_chan);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index c67f8ad..0034d2c 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -29,7 +29,6 @@
 
 config UX500_SOC_DB8500
 	bool
-	select CPU_FREQ_TABLE if CPU_FREQ
 	select MFD_DB8500_PRCMU
 	select PINCTRL_DB8500
 	select PINCTRL_DB8540
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index d7e7422..4a70be4 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -1,6 +1,7 @@
 config ARCH_VEXPRESS
 	bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
 	select ARCH_REQUIRE_GPIOLIB
+	select ARCH_SUPPORTS_BIG_ENDIAN
 	select ARM_AMBA
 	select ARM_GIC
 	select ARM_TIMER_SP804
@@ -64,10 +65,22 @@
 	  This is needed to provide CPU and cluster power management
 	  on RTSM implementing big.LITTLE.
 
+config ARCH_VEXPRESS_SPC
+	bool "Versatile Express Serial Power Controller (SPC)"
+	select ARCH_HAS_CPUFREQ
+	select ARCH_HAS_OPP
+	select PM_OPP
+	help
+	  The TC2 (A15x2 A7x3) versatile express core tile integrates a logic
+	  block called Serial Power Controller (SPC) that provides the interface
+	  between the dual cluster test-chip and the M3 microcontroller that
+	  carries out power management.
+
 config ARCH_VEXPRESS_TC2_PM
 	bool "Versatile Express TC2 power management"
 	depends on MCPM
 	select ARM_CCI
+	select ARCH_VEXPRESS_SPC
 	help
 	  Support for CPU and cluster power management on Versatile Express
 	  with a TC2 (A15x2 A7x3) big.LITTLE core tile.
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 505e64a..0997e0b 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -8,7 +8,8 @@
 obj-$(CONFIG_ARCH_VEXPRESS_CA9X4)	+= ct-ca9x4.o
 obj-$(CONFIG_ARCH_VEXPRESS_DCSCB)	+= dcscb.o	dcscb_setup.o
 CFLAGS_dcscb.o				+= -march=armv7-a
-obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM)	+= tc2_pm.o spc.o
+obj-$(CONFIG_ARCH_VEXPRESS_SPC)		+= spc.o
+obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM)	+= tc2_pm.o
 CFLAGS_tc2_pm.o				+= -march=armv7-a
 obj-$(CONFIG_SMP)			+= platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)		+= hotplug.o
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 3a6384c..14d4996 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -133,38 +133,8 @@
 	if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
 		arch_spin_unlock(&dcscb_lock);
 
-		/*
-		 * Flush all cache levels for this cluster.
-		 *
-		 * To do so we do:
-		 * - Clear the SCTLR.C bit to prevent further cache allocations
-		 * - Flush the whole cache
-		 * - Clear the ACTLR "SMP" bit to disable local coherency
-		 *
-		 * Let's do it in the safest possible way i.e. with
-		 * no memory access within the following sequence
-		 * including to the stack.
-		 *
-		 * Note: fp is preserved to the stack explicitly prior doing
-		 * this since adding it to the clobber list is incompatible
-		 * with having CONFIG_FRAME_POINTER=y.
-		 */
-		asm volatile(
-		"str	fp, [sp, #-4]! \n\t"
-		"mrc	p15, 0, r0, c1, c0, 0	@ get CR \n\t"
-		"bic	r0, r0, #"__stringify(CR_C)" \n\t"
-		"mcr	p15, 0, r0, c1, c0, 0	@ set CR \n\t"
-		"isb	\n\t"
-		"bl	v7_flush_dcache_all \n\t"
-		"clrex	\n\t"
-		"mrc	p15, 0, r0, c1, c0, 1	@ get AUXCR \n\t"
-		"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t"
-		"mcr	p15, 0, r0, c1, c0, 1	@ set AUXCR \n\t"
-		"isb	\n\t"
-		"dsb	\n\t"
-		"ldr	fp, [sp], #4"
-		: : : "r0","r1","r2","r3","r4","r5","r6","r7",
-		      "r9","r10","lr","memory");
+		/* Flush all cache levels for this cluster. */
+		v7_exit_coherency_flush(all);
 
 		/*
 		 * This is a harmless no-op.  On platforms with a real
@@ -183,26 +153,8 @@
 	} else {
 		arch_spin_unlock(&dcscb_lock);
 
-		/*
-		 * Flush the local CPU cache.
-		 * Let's do it in the safest possible way as above.
-		 */
-		asm volatile(
-		"str	fp, [sp, #-4]! \n\t"
-		"mrc	p15, 0, r0, c1, c0, 0	@ get CR \n\t"
-		"bic	r0, r0, #"__stringify(CR_C)" \n\t"
-		"mcr	p15, 0, r0, c1, c0, 0	@ set CR \n\t"
-		"isb	\n\t"
-		"bl	v7_flush_dcache_louis \n\t"
-		"clrex	\n\t"
-		"mrc	p15, 0, r0, c1, c0, 1	@ get AUXCR \n\t"
-		"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t"
-		"mcr	p15, 0, r0, c1, c0, 1	@ set AUXCR \n\t"
-		"isb	\n\t"
-		"dsb	\n\t"
-		"ldr	fp, [sp], #4"
-		: : : "r0","r1","r2","r3","r4","r5","r6","r7",
-		      "r9","r10","lr","memory");
+		/* Disable and flush the local CPU cache. */
+		v7_exit_coherency_flush(louis);
 	}
 
 	__mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index eefb029..033d34d 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -17,14 +17,31 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
 #include <linux/slab.h>
+#include <linux/semaphore.h>
 
 #include <asm/cacheflush.h>
 
 #define SPCLOG "vexpress-spc: "
 
+#define PERF_LVL_A15		0x00
+#define PERF_REQ_A15		0x04
+#define PERF_LVL_A7		0x08
+#define PERF_REQ_A7		0x0c
+#define COMMS			0x10
+#define COMMS_REQ		0x14
+#define PWC_STATUS		0x18
+#define PWC_FLAG		0x1c
+
 /* SPC wake-up IRQs status and mask */
 #define WAKE_INT_MASK		0x24
 #define WAKE_INT_RAW		0x28
@@ -36,12 +53,45 @@
 #define A15_BX_ADDR0		0x68
 #define A7_BX_ADDR0		0x78
 
+/* SPC system config interface registers */
+#define SYSCFG_WDATA		0x70
+#define SYSCFG_RDATA		0x74
+
+/* A15/A7 OPP virtual register base */
+#define A15_PERFVAL_BASE	0xC10
+#define A7_PERFVAL_BASE		0xC30
+
+/* Config interface control bits */
+#define SYSCFG_START		(1 << 31)
+#define SYSCFG_SCC		(6 << 20)
+#define SYSCFG_STAT		(14 << 20)
+
 /* wake-up interrupt masks */
 #define GBL_WAKEUP_INT_MSK	(0x3 << 10)
 
 /* TC2 static dual-cluster configuration */
 #define MAX_CLUSTERS		2
 
+/*
+ * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
+ * operation, the operation could start just before jiffie is about
+ * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
+ */
+#define TIMEOUT_US	20000
+
+#define MAX_OPPS	8
+#define CA15_DVFS	0
+#define CA7_DVFS	1
+#define SPC_SYS_CFG	2
+#define STAT_COMPLETE(type)	((1 << 0) << (type << 2))
+#define STAT_ERR(type)		((1 << 1) << (type << 2))
+#define RESPONSE_MASK(type)	(STAT_COMPLETE(type) | STAT_ERR(type))
+
+struct ve_spc_opp {
+	unsigned long freq;
+	unsigned long u_volt;
+};
+
 struct ve_spc_drvdata {
 	void __iomem *baseaddr;
 	/*
@@ -49,6 +99,12 @@
 	 * It corresponds to A15 processors MPIDR[15:8] bitfield
 	 */
 	u32 a15_clusid;
+	uint32_t cur_rsp_mask;
+	uint32_t cur_rsp_stat;
+	struct semaphore sem;
+	struct completion done;
+	struct ve_spc_opp *opps[MAX_CLUSTERS];
+	int num_opps[MAX_CLUSTERS];
 };
 
 static struct ve_spc_drvdata *info;
@@ -157,8 +213,197 @@
 	writel_relaxed(enable, info->baseaddr + pwdrn_reg);
 }
 
-int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
+static int ve_spc_get_performance(int cluster, u32 *freq)
 {
+	struct ve_spc_opp *opps = info->opps[cluster];
+	u32 perf_cfg_reg = 0;
+	u32 perf;
+
+	perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
+
+	perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
+	if (perf >= info->num_opps[cluster])
+		return -EINVAL;
+
+	opps += perf;
+	*freq = opps->freq;
+
+	return 0;
+}
+
+/* find closest match to given frequency in OPP table */
+static int ve_spc_round_performance(int cluster, u32 freq)
+{
+	int idx, max_opp = info->num_opps[cluster];
+	struct ve_spc_opp *opps = info->opps[cluster];
+	u32 fmin = 0, fmax = ~0, ftmp;
+
+	freq /= 1000; /* OPP entries in kHz */
+	for (idx = 0; idx < max_opp; idx++, opps++) {
+		ftmp = opps->freq;
+		if (ftmp >= freq) {
+			if (ftmp <= fmax)
+				fmax = ftmp;
+		} else {
+			if (ftmp >= fmin)
+				fmin = ftmp;
+		}
+	}
+	if (fmax != ~0)
+		return fmax * 1000;
+	else
+		return fmin * 1000;
+}
+
+static int ve_spc_find_performance_index(int cluster, u32 freq)
+{
+	int idx, max_opp = info->num_opps[cluster];
+	struct ve_spc_opp *opps = info->opps[cluster];
+
+	for (idx = 0; idx < max_opp; idx++, opps++)
+		if (opps->freq == freq)
+			break;
+	return (idx == max_opp) ? -EINVAL : idx;
+}
+
+static int ve_spc_waitforcompletion(int req_type)
+{
+	int ret = wait_for_completion_interruptible_timeout(
+			&info->done, usecs_to_jiffies(TIMEOUT_US));
+	if (ret == 0)
+		ret = -ETIMEDOUT;
+	else if (ret > 0)
+		ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
+	return ret;
+}
+
+static int ve_spc_set_performance(int cluster, u32 freq)
+{
+	u32 perf_cfg_reg, perf_stat_reg;
+	int ret, perf, req_type;
+
+	if (cluster_is_a15(cluster)) {
+		req_type = CA15_DVFS;
+		perf_cfg_reg = PERF_LVL_A15;
+		perf_stat_reg = PERF_REQ_A15;
+	} else {
+		req_type = CA7_DVFS;
+		perf_cfg_reg = PERF_LVL_A7;
+		perf_stat_reg = PERF_REQ_A7;
+	}
+
+	perf = ve_spc_find_performance_index(cluster, freq);
+
+	if (perf < 0)
+		return perf;
+
+	if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
+		return -ETIME;
+
+	init_completion(&info->done);
+	info->cur_rsp_mask = RESPONSE_MASK(req_type);
+
+	writel(perf, info->baseaddr + perf_cfg_reg);
+	ret = ve_spc_waitforcompletion(req_type);
+
+	info->cur_rsp_mask = 0;
+	up(&info->sem);
+
+	return ret;
+}
+
+static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
+{
+	int ret;
+
+	if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
+		return -ETIME;
+
+	init_completion(&info->done);
+	info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
+
+	/* Set the control value */
+	writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
+	ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
+
+	if (ret == 0)
+		*data = readl(info->baseaddr + SYSCFG_RDATA);
+
+	info->cur_rsp_mask = 0;
+	up(&info->sem);
+
+	return ret;
+}
+
+static irqreturn_t ve_spc_irq_handler(int irq, void *data)
+{
+	struct ve_spc_drvdata *drv_data = data;
+	uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
+
+	if (info->cur_rsp_mask & status) {
+		info->cur_rsp_stat = status;
+		complete(&drv_data->done);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *  +--------------------------+
+ *  | 31      20 | 19        0 |
+ *  +--------------------------+
+ *  |   u_volt   |  freq(kHz)  |
+ *  +--------------------------+
+ */
+#define MULT_FACTOR	20
+#define VOLT_SHIFT	20
+#define FREQ_MASK	(0xFFFFF)
+static int ve_spc_populate_opps(uint32_t cluster)
+{
+	uint32_t data = 0, off, ret, idx;
+	struct ve_spc_opp *opps;
+
+	opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
+	if (!opps)
+		return -ENOMEM;
+
+	info->opps[cluster] = opps;
+
+	off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
+	for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
+		ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
+		if (!ret) {
+			opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
+			opps->u_volt = data >> VOLT_SHIFT;
+		} else {
+			break;
+		}
+	}
+	info->num_opps[cluster] = idx;
+
+	return ret;
+}
+
+static int ve_init_opp_table(struct device *cpu_dev)
+{
+	int cluster = topology_physical_package_id(cpu_dev->id);
+	int idx, ret = 0, max_opp = info->num_opps[cluster];
+	struct ve_spc_opp *opps = info->opps[cluster];
+
+	for (idx = 0; idx < max_opp; idx++, opps++) {
+		ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
+		if (ret) {
+			dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
+				 opps->freq, opps->u_volt);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
+{
+	int ret;
 	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (!info) {
 		pr_err(SPCLOG "unable to allocate mem\n");
@@ -168,6 +413,25 @@
 	info->baseaddr = baseaddr;
 	info->a15_clusid = a15_clusid;
 
+	if (irq <= 0) {
+		pr_err(SPCLOG "Invalid IRQ %d\n", irq);
+		kfree(info);
+		return -EINVAL;
+	}
+
+	init_completion(&info->done);
+
+	readl_relaxed(info->baseaddr + PWC_STATUS);
+
+	ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
+				| IRQF_ONESHOT, "vexpress-spc", info);
+	if (ret) {
+		pr_err(SPCLOG "IRQ %d request failed\n", irq);
+		kfree(info);
+		return -ENODEV;
+	}
+
+	sema_init(&info->sem, 1);
 	/*
 	 * Multi-cluster systems may need this data when non-coherent, during
 	 * cluster power-up/power-down. Make sure driver info reaches main
@@ -178,3 +442,103 @@
 
 	return 0;
 }
+
+struct clk_spc {
+	struct clk_hw hw;
+	int cluster;
+};
+
+#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
+static unsigned long spc_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_spc *spc = to_clk_spc(hw);
+	u32 freq;
+
+	if (ve_spc_get_performance(spc->cluster, &freq))
+		return -EIO;
+
+	return freq * 1000;
+}
+
+static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
+		unsigned long *parent_rate)
+{
+	struct clk_spc *spc = to_clk_spc(hw);
+
+	return ve_spc_round_performance(spc->cluster, drate);
+}
+
+static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_spc *spc = to_clk_spc(hw);
+
+	return ve_spc_set_performance(spc->cluster, rate / 1000);
+}
+
+static struct clk_ops clk_spc_ops = {
+	.recalc_rate = spc_recalc_rate,
+	.round_rate = spc_round_rate,
+	.set_rate = spc_set_rate,
+};
+
+static struct clk *ve_spc_clk_register(struct device *cpu_dev)
+{
+	struct clk_init_data init;
+	struct clk_spc *spc;
+
+	spc = kzalloc(sizeof(*spc), GFP_KERNEL);
+	if (!spc) {
+		pr_err("could not allocate spc clk\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spc->hw.init = &init;
+	spc->cluster = topology_physical_package_id(cpu_dev->id);
+
+	init.name = dev_name(cpu_dev);
+	init.ops = &clk_spc_ops;
+	init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+	init.num_parents = 0;
+
+	return devm_clk_register(cpu_dev, &spc->hw);
+}
+
+static int __init ve_spc_clk_init(void)
+{
+	int cpu;
+	struct clk *clk;
+
+	if (!info)
+		return 0; /* Continue only if SPC is initialised */
+
+	if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
+		pr_err("failed to build OPP table\n");
+		return -ENODEV;
+	}
+
+	for_each_possible_cpu(cpu) {
+		struct device *cpu_dev = get_cpu_device(cpu);
+		if (!cpu_dev) {
+			pr_warn("failed to get cpu%d device\n", cpu);
+			continue;
+		}
+		clk = ve_spc_clk_register(cpu_dev);
+		if (IS_ERR(clk)) {
+			pr_warn("failed to register cpu%d clock\n", cpu);
+			continue;
+		}
+		if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
+			pr_warn("failed to register cpu%d clock lookup\n", cpu);
+			continue;
+		}
+
+		if (ve_init_opp_table(cpu_dev))
+			pr_warn("failed to initialise cpu%d opp table\n", cpu);
+	}
+
+	platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
+	return 0;
+}
+module_init(ve_spc_clk_init);
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h
index 5f7e4a4..dbd44c3 100644
--- a/arch/arm/mach-vexpress/spc.h
+++ b/arch/arm/mach-vexpress/spc.h
@@ -15,7 +15,7 @@
 #ifndef __SPC_H_
 #define __SPC_H_
 
-int __init ve_spc_init(void __iomem *base, u32 a15_clusid);
+int __init ve_spc_init(void __iomem *base, u32 a15_clusid, int irq);
 void ve_spc_global_wakeup_irq(bool set);
 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
 void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index e6eb481..05a364c50 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/irqchip/arm-gic.h>
@@ -156,32 +157,7 @@
 			: : "r" (0x400) );
 		}
 
-		/*
-		 * We need to disable and flush the whole (L1 and L2) cache.
-		 * Let's do it in the safest possible way i.e. with
-		 * no memory access within the following sequence
-		 * including the stack.
-		 *
-		 * Note: fp is preserved to the stack explicitly prior doing
-		 * this since adding it to the clobber list is incompatible
-		 * with having CONFIG_FRAME_POINTER=y.
-		 */
-		asm volatile(
-		"str	fp, [sp, #-4]! \n\t"
-		"mrc	p15, 0, r0, c1, c0, 0	@ get CR \n\t"
-		"bic	r0, r0, #"__stringify(CR_C)" \n\t"
-		"mcr	p15, 0, r0, c1, c0, 0	@ set CR \n\t"
-		"isb	\n\t"
-		"bl	v7_flush_dcache_all \n\t"
-		"clrex	\n\t"
-		"mrc	p15, 0, r0, c1, c0, 1	@ get AUXCR \n\t"
-		"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t"
-		"mcr	p15, 0, r0, c1, c0, 1	@ set AUXCR \n\t"
-		"isb	\n\t"
-		"dsb	\n\t"
-		"ldr	fp, [sp], #4"
-		: : : "r0","r1","r2","r3","r4","r5","r6","r7",
-		      "r9","r10","lr","memory");
+		v7_exit_coherency_flush(all);
 
 		cci_disable_port_by_cpu(mpidr);
 
@@ -197,26 +173,7 @@
 
 		arch_spin_unlock(&tc2_pm_lock);
 
-		/*
-		 * We need to disable and flush only the L1 cache.
-		 * Let's do it in the safest possible way as above.
-		 */
-		asm volatile(
-		"str	fp, [sp, #-4]! \n\t"
-		"mrc	p15, 0, r0, c1, c0, 0	@ get CR \n\t"
-		"bic	r0, r0, #"__stringify(CR_C)" \n\t"
-		"mcr	p15, 0, r0, c1, c0, 0	@ set CR \n\t"
-		"isb	\n\t"
-		"bl	v7_flush_dcache_louis \n\t"
-		"clrex	\n\t"
-		"mrc	p15, 0, r0, c1, c0, 1	@ get AUXCR \n\t"
-		"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t"
-		"mcr	p15, 0, r0, c1, c0, 1	@ set AUXCR \n\t"
-		"isb	\n\t"
-		"dsb	\n\t"
-		"ldr	fp, [sp], #4"
-		: : : "r0","r1","r2","r3","r4","r5","r6","r7",
-		      "r9","r10","lr","memory");
+		v7_exit_coherency_flush(louis);
 	}
 
 	__mcpm_cpu_down(cpu, cluster);
@@ -311,7 +268,7 @@
 
 static int __init tc2_pm_init(void)
 {
-	int ret;
+	int ret, irq;
 	void __iomem *scc;
 	u32 a15_cluster_id, a7_cluster_id, sys_info;
 	struct device_node *np;
@@ -336,13 +293,15 @@
 	tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
 	tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
 
+	irq = irq_of_parse_and_map(np, 0);
+
 	/*
 	 * A subset of the SCC registers is also used to communicate
 	 * with the SPC (power controller). We need to be able to
 	 * drive it very early in the boot process to power up
 	 * processors, so we initialize the SPC driver here.
 	 */
-	ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id);
+	ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
 	if (ret)
 		return ret;
 
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 5f25256..9a7bd13 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -44,6 +44,10 @@
 	{}
 };
 
+static struct platform_device zynq_cpuidle_device = {
+	.name = "cpuidle-zynq",
+};
+
 /**
  * zynq_init_machine - System specific initialization, intended to be
  *		       called from board specific initialization.
@@ -56,6 +60,8 @@
 	l2x0_of_init(0x02060000, 0xF0F0FFFF);
 
 	of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL);
+
+	platform_device_register(&zynq_cpuidle_device);
 }
 
 static void __init zynq_timer_init(void)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index cd2c88e..1f8fed9 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -952,3 +952,9 @@
 	help
 	  This option allows the use of custom mandatory barriers
 	  included via the mach/barriers.h file.
+
+config ARCH_SUPPORTS_BIG_ENDIAN
+	bool
+	help
+	  This option specifies the architecture can support big endian
+	  operation.
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8074199..3815a82 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -38,9 +38,8 @@
 	bne	do_DataAbort
 	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
 	ldr	r3, [r4]			@ read aborted ARM instruction
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	rev	r3, r3
-#endif
+ ARM_BE8(rev	r3, r3)
+
 	do_ldrd_abort tmp=ip, insn=r3
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 6f4585b..9240364 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -25,6 +25,7 @@
 #include <asm/cp15.h>
 #include <asm/system_info.h>
 #include <asm/unaligned.h>
+#include <asm/opcodes.h>
 
 #include "fault.h"
 
@@ -762,21 +763,25 @@
 	if (thumb_mode(regs)) {
 		u16 *ptr = (u16 *)(instrptr & ~1);
 		fault = probe_kernel_address(ptr, tinstr);
+		tinstr = __mem_to_opcode_thumb16(tinstr);
 		if (!fault) {
 			if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
 			    IS_T32(tinstr)) {
 				/* Thumb-2 32-bit */
 				u16 tinst2 = 0;
 				fault = probe_kernel_address(ptr + 1, tinst2);
-				instr = (tinstr << 16) | tinst2;
+				tinst2 = __mem_to_opcode_thumb16(tinst2);
+				instr = __opcode_thumb32_compose(tinstr, tinst2);
 				thumb2_32b = 1;
 			} else {
 				isize = 2;
 				instr = thumb2arm(tinstr);
 			}
 		}
-	} else
+	} else {
 		fault = probe_kernel_address(instrptr, instr);
+		instr = __mem_to_opcode_arm(instr);
+	}
 
 	if (fault) {
 		type = TYPE_FAULT;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1272ed2..79f8b39 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@
 
 static u64 get_coherent_dma_mask(struct device *dev)
 {
-	u64 mask = (u64)arm_dma_limit;
+	u64 mask = (u64)DMA_BIT_MASK(32);
 
 	if (dev) {
 		mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@
 			return 0;
 		}
 
-		if ((~mask) & (u64)arm_dma_limit) {
-			dev_warn(dev, "coherent DMA mask %#llx is smaller "
-				 "than system GFP_DMA mask %#llx\n",
-				 mask, (u64)arm_dma_limit);
+		/*
+		 * If the mask allows for more memory than we can address,
+		 * and we actually have that much memory, then fail the
+		 * allocation.
+		 */
+		if (sizeof(mask) != sizeof(dma_addr_t) &&
+		    mask > (dma_addr_t)~0 &&
+		    dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+			dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+				 mask);
+			dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+			return 0;
+		}
+
+		/*
+		 * Now check that the mask, when translated to a PFN,
+		 * fits within the allowable addresses which we can
+		 * allocate.
+		 */
+		if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+			dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+				 mask,
+				 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+				 arm_dma_pfn_limit + 1);
 			return 0;
 		}
 	}
@@ -687,7 +707,7 @@
 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 		    gfp_t gfp, struct dma_attrs *attrs)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	void *memory;
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -700,7 +720,7 @@
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	void *memory;
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -1007,8 +1027,27 @@
  */
 int dma_supported(struct device *dev, u64 mask)
 {
-	if (mask < (u64)arm_dma_limit)
+	unsigned long limit;
+
+	/*
+	 * If the mask allows for more memory than we can address,
+	 * and we actually have that much memory, then we must
+	 * indicate that DMA to this device is not supported.
+	 */
+	if (sizeof(mask) != sizeof(dma_addr_t) &&
+	    mask > (dma_addr_t)~0 &&
+	    dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
 		return 0;
+
+	/*
+	 * Translate the device's DMA mask to a PFN limit.  This
+	 * PFN number includes the page which we can DMA to.
+	 */
+	limit = dma_to_pfn(dev, mask);
+
+	if (limit < arm_dma_pfn_limit)
+		return 0;
+
 	return 1;
 }
 EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
index 9d28562..312e15e 100644
--- a/arch/arm/mm/extable.c
+++ b/arch/arm/mm/extable.c
@@ -9,8 +9,13 @@
 	const struct exception_table_entry *fixup;
 
 	fixup = search_exception_tables(instruction_pointer(regs));
-	if (fixup)
+	if (fixup) {
 		regs->ARM_pc = fixup->fixup;
+#ifdef CONFIG_THUMB2_KERNEL
+		/* Clear the IT state to avoid nasty surprises in the fixup */
+		regs->ARM_cpsr &= ~PSR_IT_MASK;
+#endif
+	}
 
 	return fixup != NULL;
 }
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 2a5907b..ff379ac 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -65,7 +65,7 @@
 	return ret;
 }
 
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
 /*
  * If we are using split PTE locks, then we need to take the page
  * lock here.  Otherwise we are using shared mm->page_table_lock
@@ -84,10 +84,10 @@
 {
 	spin_unlock(ptl);
 }
-#else /* !USE_SPLIT_PTLOCKS */
+#else /* !USE_SPLIT_PTE_PTLOCKS */
 static inline void do_pte_lock(spinlock_t *ptl) {}
 static inline void do_pte_unlock(spinlock_t *ptl) {}
-#endif /* USE_SPLIT_PTLOCKS */
+#endif /* USE_SPLIT_PTE_PTLOCKS */
 
 static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
 	unsigned long pfn)
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 83cb3ac..8e0e52e 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -10,6 +10,7 @@
 #include <asm/system_info.h>
 
 pgd_t *idmap_pgd;
+phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
 
 #ifdef CONFIG_ARM_LPAE
 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
@@ -67,8 +68,9 @@
 	unsigned long addr, end;
 	unsigned long next;
 
-	addr = virt_to_phys(text_start);
-	end = virt_to_phys(text_end);
+	addr = virt_to_idmap(text_start);
+	end = virt_to_idmap(text_end);
+	pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
 
 	prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
 
@@ -90,8 +92,6 @@
 	if (!idmap_pgd)
 		return -ENOMEM;
 
-	pr_info("Setting up static identity map for 0x%p - 0x%p\n",
-		__idmap_text_start, __idmap_text_end);
 	identity_mapping_add(idmap_pgd, __idmap_text_start,
 			     __idmap_text_end, 0);
 
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ca907f8..3e8f106e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -209,6 +209,7 @@
  * so a successful GFP_DMA allocation will always satisfy this.
  */
 phys_addr_t arm_dma_limit;
+unsigned long arm_dma_pfn_limit;
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 	unsigned long dma_size)
@@ -231,6 +232,7 @@
 		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
 	} else
 		arm_dma_limit = 0xffffffff;
+	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
 #endif
 }
 
@@ -418,12 +420,10 @@
 	 * This doesn't seem to be used by the Linux memory manager any
 	 * more, but is used by ll_rw_block.  If we can get rid of it, we
 	 * also get rid of some of the stuff above as well.
-	 *
-	 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
-	 * the system, not the maximum PFN.
 	 */
-	max_low_pfn = max_low - PHYS_PFN_OFFSET;
-	max_pfn = max_high - PHYS_PFN_OFFSET;
+	min_low_pfn = min;
+	max_low_pfn = max_low;
+	max_pfn = max_high;
 }
 
 /*
@@ -529,7 +529,7 @@
 static void __init free_highpages(void)
 {
 #ifdef CONFIG_HIGHMEM
-	unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+	unsigned long max_low = max_low_pfn;
 	struct memblock_region *mem, *res;
 
 	/* set highmem page free */
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9a..d5a982d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@
 
 #ifdef CONFIG_ZONE_DMA
 extern phys_addr_t arm_dma_limit;
+extern unsigned long arm_dma_pfn_limit;
 #else
 #define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
 #endif
 
 extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 0c63562..d27158c3 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -202,13 +202,11 @@
 }
 
 /*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
  */
 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 {
-	return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
 }
 
 #ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b1d17ee..78eeeca 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -28,6 +28,8 @@
 #include <asm/highmem.h>
 #include <asm/system_info.h>
 #include <asm/traps.h>
+#include <asm/procinfo.h>
+#include <asm/memory.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -1315,6 +1317,86 @@
 	}
 }
 
+#ifdef CONFIG_ARM_LPAE
+/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+			      struct proc_info_list *procinfo)
+{
+	pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
+	unsigned long map_start, map_end;
+	pgd_t *pgd0, *pgdk;
+	pud_t *pud0, *pudk, *pud_start;
+	pmd_t *pmd0, *pmdk;
+	phys_addr_t phys;
+	int i;
+
+	if (!(mdesc->init_meminfo))
+		return;
+
+	/* remap kernel code and data */
+	map_start = init_mm.start_code;
+	map_end   = init_mm.brk;
+
+	/* get a handle on things... */
+	pgd0 = pgd_offset_k(0);
+	pud_start = pud0 = pud_offset(pgd0, 0);
+	pmd0 = pmd_offset(pud0, 0);
+
+	pgdk = pgd_offset_k(map_start);
+	pudk = pud_offset(pgdk, map_start);
+	pmdk = pmd_offset(pudk, map_start);
+
+	mdesc->init_meminfo();
+
+	/* Run the patch stub to update the constants */
+	fixup_pv_table(&__pv_table_begin,
+		(&__pv_table_end - &__pv_table_begin) << 2);
+
+	/*
+	 * Cache cleaning operations for self-modifying code
+	 * We should clean the entries by MVA but running a
+	 * for loop over every pv_table entry pointer would
+	 * just complicate the code.
+	 */
+	flush_cache_louis();
+	dsb();
+	isb();
+
+	/* remap level 1 table */
+	for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
+		set_pud(pud0,
+			__pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
+		pmd0 += PTRS_PER_PMD;
+	}
+
+	/* remap pmds for kernel mapping */
+	phys = __pa(map_start) & PMD_MASK;
+	do {
+		*pmdk++ = __pmd(phys | pmdprot);
+		phys += PMD_SIZE;
+	} while (phys < map_end);
+
+	flush_cache_all();
+	cpu_switch_mm(pgd0, &init_mm);
+	cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+	local_flush_bp_all();
+	local_flush_tlb_all();
+}
+
+#else
+
+void __init early_paging_init(const struct machine_desc *mdesc,
+			      struct proc_info_list *procinfo)
+{
+	if (mdesc->init_meminfo)
+		mdesc->init_meminfo();
+}
+
+#endif
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 34d4ab2..5c668b7 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -296,6 +296,15 @@
 }
 
 /*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+			      struct proc_info_list *procinfo)
+{
+}
+
+/*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
  */
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 1128064..45dc29f 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -220,9 +220,7 @@
 #endif /* CONFIG_MMU */
 	adr	r5, v6_crval
 	ldmia	r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	orr	r6, r6, #1 << 25		@ big-endian page tables
-#endif
+ ARM_BE8(orr	r6, r6, #1 << 25)		@ big-endian page tables
 	mrc	p15, 0, r0, c1, c0, 0		@ read control register
 	bic	r0, r0, r5			@ clear bits them
 	orr	r0, r0, r6			@ set them
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c63d9bd..60920f6 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -367,9 +367,7 @@
 #endif
 	adr	r5, v7_crval
 	ldmia	r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	orr	r6, r6, #1 << 25		@ big-endian page tables
-#endif
+ ARM_BE8(orr	r6, r6, #1 << 25)		@ big-endian page tables
 #ifdef CONFIG_SWP_EMULATE
 	orr     r5, r5, #(1 << 10)              @ set SW bit in "clear"
 	bic     r6, r6, #(1 << 10)              @ clear it in "mmuset"
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 99b44e0..9ed155a 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -19,6 +19,7 @@
 #include <linux/if_vlan.h>
 #include <asm/cacheflush.h>
 #include <asm/hwcap.h>
+#include <asm/opcodes.h>
 
 #include "bpf_jit_32.h"
 
@@ -113,8 +114,11 @@
 
 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
 {
+	inst |= (cond << 28);
+	inst = __opcode_to_mem_arm(inst);
+
 	if (ctx->target != NULL)
-		ctx->target[ctx->idx] = inst | (cond << 28);
+		ctx->target[ctx->idx] = inst;
 
 	ctx->idx++;
 }
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
index d51f956..be4ad0b 100644
--- a/arch/arm/plat-samsung/dev-backlight.c
+++ b/arch/arm/plat-samsung/dev-backlight.c
@@ -70,6 +70,7 @@
 		.max_brightness = 255,
 		.dft_brightness = 255,
 		.pwm_period_ns  = 78770,
+		.enable_gpio    = -1,
 		.init           = samsung_bl_init,
 		.exit           = samsung_bl_exit,
 	},
@@ -121,6 +122,10 @@
 		samsung_bl_data->lth_brightness = bl_data->lth_brightness;
 	if (bl_data->pwm_period_ns)
 		samsung_bl_data->pwm_period_ns = bl_data->pwm_period_ns;
+	if (bl_data->enable_gpio >= 0)
+		samsung_bl_data->enable_gpio = bl_data->enable_gpio;
+	if (bl_data->enable_gpio_flags)
+		samsung_bl_data->enable_gpio_flags = bl_data->enable_gpio_flags;
 	if (bl_data->init)
 		samsung_bl_data->init = bl_data->init;
 	if (bl_data->notify)
diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S
index 2677bc3..40f27e5 100644
--- a/arch/arm/plat-versatile/headsmp.S
+++ b/arch/arm/plat-versatile/headsmp.S
@@ -10,6 +10,7 @@
  */
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <asm/assembler.h>
 
 /*
  * Realview/Versatile Express specific entry point for secondary CPUs.
@@ -17,6 +18,7 @@
  * until we're ready for them to initialise.
  */
 ENTRY(versatile_secondary_startup)
+ ARM_BE8(setend	be)
 	mrc	p15, 0, r0, c0, c0, 5
 	bic	r0, #0xff000000
 	adr	r4, 1f
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 52b8f40..2f37e1d 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -642,9 +642,9 @@
 static int vfp_hotplug(struct notifier_block *b, unsigned long action,
 	void *hcpu)
 {
-	if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
-		vfp_force_reload((long)hcpu, current_thread_info());
-	} else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+	if (action == CPU_DYING || action == CPU_DYING_FROZEN)
+		vfp_current_hw_state[(long)hcpu] = NULL;
+	else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
 		vfp_enable(NULL);
 	return NOTIFY_OK;
 }
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 4384103..1296952 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
-obj-y		:= enlighten.o hypercall.o grant-table.o
+obj-y		:= enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
new file mode 100644
index 0000000..b0e77de
--- /dev/null
+++ b/arch/arm/xen/mm.c
@@ -0,0 +1,65 @@
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/swiotlb.h>
+
+#include <xen/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/swiotlb-xen.h>
+
+#include <asm/cacheflush.h>
+#include <asm/xen/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/interface.h>
+
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+				 unsigned int address_bits,
+				 dma_addr_t *dma_handle)
+{
+	if (!xen_initial_domain())
+		return -EINVAL;
+
+	/* we assume that dom0 is mapped 1:1 for now */
+	*dma_handle = pstart;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
+
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
+{
+	return;
+}
+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
+
+struct dma_map_ops *xen_dma_ops;
+EXPORT_SYMBOL_GPL(xen_dma_ops);
+
+static struct dma_map_ops xen_swiotlb_dma_ops = {
+	.mapping_error = xen_swiotlb_dma_mapping_error,
+	.alloc = xen_swiotlb_alloc_coherent,
+	.free = xen_swiotlb_free_coherent,
+	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+	.map_sg = xen_swiotlb_map_sg_attrs,
+	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
+	.map_page = xen_swiotlb_map_page,
+	.unmap_page = xen_swiotlb_unmap_page,
+	.dma_supported = xen_swiotlb_dma_supported,
+	.set_dma_mask = xen_swiotlb_set_dma_mask,
+};
+
+int __init xen_mm_init(void)
+{
+	if (!xen_initial_domain())
+		return 0;
+	xen_swiotlb_init(1, false);
+	xen_dma_ops = &xen_swiotlb_dma_ops;
+	return 0;
+}
+arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
new file mode 100644
index 0000000..23732cd
--- /dev/null
+++ b/arch/arm/xen/p2m.c
@@ -0,0 +1,208 @@
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/rwlock.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/swiotlb.h>
+
+#include <xen/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/swiotlb-xen.h>
+
+#include <asm/cacheflush.h>
+#include <asm/xen/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/interface.h>
+
+struct xen_p2m_entry {
+	unsigned long pfn;
+	unsigned long mfn;
+	unsigned long nr_pages;
+	struct rb_node rbnode_mach;
+	struct rb_node rbnode_phys;
+};
+
+rwlock_t p2m_lock;
+struct rb_root phys_to_mach = RB_ROOT;
+static struct rb_root mach_to_phys = RB_ROOT;
+
+static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
+{
+	struct rb_node **link = &phys_to_mach.rb_node;
+	struct rb_node *parent = NULL;
+	struct xen_p2m_entry *entry;
+	int rc = 0;
+
+	while (*link) {
+		parent = *link;
+		entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
+
+		if (new->mfn == entry->mfn)
+			goto err_out;
+		if (new->pfn == entry->pfn)
+			goto err_out;
+
+		if (new->pfn < entry->pfn)
+			link = &(*link)->rb_left;
+		else
+			link = &(*link)->rb_right;
+	}
+	rb_link_node(&new->rbnode_phys, parent, link);
+	rb_insert_color(&new->rbnode_phys, &phys_to_mach);
+	goto out;
+
+err_out:
+	rc = -EINVAL;
+	pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
+			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
+out:
+	return rc;
+}
+
+unsigned long __pfn_to_mfn(unsigned long pfn)
+{
+	struct rb_node *n = phys_to_mach.rb_node;
+	struct xen_p2m_entry *entry;
+	unsigned long irqflags;
+
+	read_lock_irqsave(&p2m_lock, irqflags);
+	while (n) {
+		entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+		if (entry->pfn <= pfn &&
+				entry->pfn + entry->nr_pages > pfn) {
+			read_unlock_irqrestore(&p2m_lock, irqflags);
+			return entry->mfn + (pfn - entry->pfn);
+		}
+		if (pfn < entry->pfn)
+			n = n->rb_left;
+		else
+			n = n->rb_right;
+	}
+	read_unlock_irqrestore(&p2m_lock, irqflags);
+
+	return INVALID_P2M_ENTRY;
+}
+EXPORT_SYMBOL_GPL(__pfn_to_mfn);
+
+static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
+{
+	struct rb_node **link = &mach_to_phys.rb_node;
+	struct rb_node *parent = NULL;
+	struct xen_p2m_entry *entry;
+	int rc = 0;
+
+	while (*link) {
+		parent = *link;
+		entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
+
+		if (new->mfn == entry->mfn)
+			goto err_out;
+		if (new->pfn == entry->pfn)
+			goto err_out;
+
+		if (new->mfn < entry->mfn)
+			link = &(*link)->rb_left;
+		else
+			link = &(*link)->rb_right;
+	}
+	rb_link_node(&new->rbnode_mach, parent, link);
+	rb_insert_color(&new->rbnode_mach, &mach_to_phys);
+	goto out;
+
+err_out:
+	rc = -EINVAL;
+	pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
+			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
+out:
+	return rc;
+}
+
+unsigned long __mfn_to_pfn(unsigned long mfn)
+{
+	struct rb_node *n = mach_to_phys.rb_node;
+	struct xen_p2m_entry *entry;
+	unsigned long irqflags;
+
+	read_lock_irqsave(&p2m_lock, irqflags);
+	while (n) {
+		entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
+		if (entry->mfn <= mfn &&
+				entry->mfn + entry->nr_pages > mfn) {
+			read_unlock_irqrestore(&p2m_lock, irqflags);
+			return entry->pfn + (mfn - entry->mfn);
+		}
+		if (mfn < entry->mfn)
+			n = n->rb_left;
+		else
+			n = n->rb_right;
+	}
+	read_unlock_irqrestore(&p2m_lock, irqflags);
+
+	return INVALID_P2M_ENTRY;
+}
+EXPORT_SYMBOL_GPL(__mfn_to_pfn);
+
+bool __set_phys_to_machine_multi(unsigned long pfn,
+		unsigned long mfn, unsigned long nr_pages)
+{
+	int rc;
+	unsigned long irqflags;
+	struct xen_p2m_entry *p2m_entry;
+	struct rb_node *n = phys_to_mach.rb_node;
+
+	if (mfn == INVALID_P2M_ENTRY) {
+		write_lock_irqsave(&p2m_lock, irqflags);
+		while (n) {
+			p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+			if (p2m_entry->pfn <= pfn &&
+					p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
+				rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
+				rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
+				write_unlock_irqrestore(&p2m_lock, irqflags);
+				kfree(p2m_entry);
+				return true;
+			}
+			if (pfn < p2m_entry->pfn)
+				n = n->rb_left;
+			else
+				n = n->rb_right;
+		}
+		write_unlock_irqrestore(&p2m_lock, irqflags);
+		return true;
+	}
+
+	p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
+	if (!p2m_entry) {
+		pr_warn("cannot allocate xen_p2m_entry\n");
+		return false;
+	}
+	p2m_entry->pfn = pfn;
+	p2m_entry->nr_pages = nr_pages;
+	p2m_entry->mfn = mfn;
+
+	write_lock_irqsave(&p2m_lock, irqflags);
+	if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
+		(rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+		write_unlock_irqrestore(&p2m_lock, irqflags);
+		return false;
+	}
+	write_unlock_irqrestore(&p2m_lock, irqflags);
+	return true;
+}
+EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
+
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+	return __set_phys_to_machine_multi(pfn, mfn, 1);
+}
+EXPORT_SYMBOL_GPL(__set_phys_to_machine);
+
+int p2m_init(void)
+{
+	rwlock_init(&p2m_lock);
+	return 0;
+}
+arch_initcall(p2m_init);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index bb0bf1b..88c8b6c1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -143,7 +143,6 @@
 
 config SMP
 	bool "Symmetric Multi-Processing"
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU.  If
 	  you say N here, the kernel will run on single and
@@ -221,6 +220,7 @@
 config XEN
 	bool "Xen guest support on ARM64 (EXPERIMENTAL)"
 	depends on ARM64 && OF
+	select SWIOTLB_XEN
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
 
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 8363644..01de5aa 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -126,20 +126,6 @@
 	return oldval;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-	unsigned long tmp, tmp2;
-
-	asm volatile("// atomic_clear_mask\n"
-"1:	ldxr	%0, %2\n"
-"	bic	%0, %0, %3\n"
-"	stxr	%w1, %0, %2\n"
-"	cbnz	%w1, 1b"
-	: "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
-	: "Ir" (mask)
-	: "cc");
-}
-
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 8d18100..fd0c0c0 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -23,11 +23,15 @@
 
 #include <asm-generic/dma-coherent.h>
 
+#include <xen/xen.h>
+#include <asm/xen/hypervisor.h>
+
 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
 
+#define DMA_ERROR_CODE	(~(dma_addr_t)0)
 extern struct dma_map_ops *dma_ops;
 
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
 {
 	if (unlikely(!dev) || !dev->archdata.dma_ops)
 		return dma_ops;
@@ -35,6 +39,14 @@
 		return dev->archdata.dma_ops;
 }
 
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+	if (xen_initial_domain())
+		return xen_dma_ops;
+	else
+		return __generic_dma_ops(dev);
+}
+
 #include <asm-generic/dma-mapping-common.h>
 
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index b56e5b5..4cc813e 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,11 +22,14 @@
 #ifdef __KERNEL__
 
 #include <linux/types.h>
+#include <linux/blk_types.h>
 
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 #include <asm/pgtable.h>
 
+#include <xen/xen.h>
+
 /*
  * Generic IO read/write.  These perform native-endian accesses.
  */
@@ -263,5 +266,12 @@
  */
 #define xlate_dev_kmem_ptr(p)	p
 
+struct bio_vec;
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+				      const struct bio_vec *vec2);
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
+	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\
+	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+
 #endif	/* __KERNEL__ */
 #endif	/* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index a5f28e2..c98ef47 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -63,6 +63,7 @@
  * TAC:		Trap ACTLR
  * TSC:		Trap SMC
  * TSW:		Trap cache operations by set/way
+ * TWE:		Trap WFE
  * TWI:		Trap WFI
  * TIDCP:	Trap L2CTLR/L2ECTLR
  * BSU_IS:	Upgrade barriers to the inner shareable domain
@@ -72,8 +73,9 @@
  * FMO:		Override CPSR.F and enable signaling with VF
  * SWIO:	Turn set/way invalidates into set/way clean+invalidate
  */
-#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
-			 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
+			 HCR_BSU_IS | HCR_FB | HCR_TAC | \
+			 HCR_AMO | HCR_IMO | HCR_FMO | \
 			 HCR_SWIO | HCR_TIDCP | HCR_RW)
 #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
 
@@ -242,4 +244,6 @@
 
 #define ESR_EL2_EC_xABT_xFSR_EXTABT	0x10
 
+#define ESR_EL2_EC_WFI_ISS_WFE	(1 << 0)
+
 #endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index eec0738..dd8ecfc3 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -177,4 +177,65 @@
 	return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
 }
 
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+	return vcpu_sys_reg(vcpu, MPIDR_EL1);
+}
+
+static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+{
+	if (vcpu_mode_is_32bit(vcpu))
+		*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
+	else
+		vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+	if (vcpu_mode_is_32bit(vcpu))
+		return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
+
+	return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
+}
+
+static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+						    unsigned long data,
+						    unsigned int len)
+{
+	if (kvm_vcpu_is_be(vcpu)) {
+		switch (len) {
+		case 1:
+			return data & 0xff;
+		case 2:
+			return be16_to_cpu(data & 0xffff);
+		case 4:
+			return be32_to_cpu(data & 0xffffffff);
+		default:
+			return be64_to_cpu(data);
+		}
+	}
+
+	return data;		/* Leave LE untouched */
+}
+
+static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+						    unsigned long data,
+						    unsigned int len)
+{
+	if (kvm_vcpu_is_be(vcpu)) {
+		switch (len) {
+		case 1:
+			return data & 0xff;
+		case 2:
+			return cpu_to_be16(data & 0xffff);
+		case 4:
+			return cpu_to_be32(data & 0xffffffff);
+		default:
+			return cpu_to_be64(data);
+		}
+	}
+
+	return data;		/* Leave LE untouched */
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0859a4d..5d85a02 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -36,11 +36,6 @@
 
 #define KVM_VCPU_MAX_FEATURES 2
 
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x)	0
-#define KVM_NR_PAGE_SIZES	1
-#define KVM_PAGES_PER_HPAGE(x)	(1UL<<31)
-
 struct kvm_vcpu;
 int kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -151,6 +146,7 @@
 struct kvm_vcpu_init;
 int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 			const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 struct kvm_one_reg;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index efe609c..680f74e 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -91,6 +91,7 @@
 void kvm_clear_hyp_idmap(void);
 
 #define	kvm_set_pte(ptep, pte)		set_pte(ptep, pte)
+#define	kvm_set_pmd(pmdp, pmd)		set_pmd(pmdp, pmd)
 
 static inline bool kvm_is_write_fault(unsigned long esr)
 {
@@ -116,13 +117,18 @@
 	pte_val(*pte) |= PTE_S2_RDWR;
 }
 
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+	pmd_val(*pmd) |= PMD_S2_RDWR;
+}
+
 struct kvm;
 
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+					      unsigned long size)
 {
 	if (!icache_is_aliasing()) {		/* PIPT */
-		unsigned long hva = gfn_to_hva(kvm, gfn);
-		flush_icache_range(hva, hva + PAGE_SIZE);
+		flush_icache_range(hva, hva + size);
 	} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
 		/* any kind of VIPT cache */
 		__flush_icache_all();
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index f214069..9bea6e7 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -63,9 +63,12 @@
 	struct page *pte;
 
 	pte = alloc_pages(PGALLOC_GFP, 0);
-	if (pte)
-		pgtable_page_ctor(pte);
-
+	if (!pte)
+		return NULL;
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index d57e668..755f861 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -85,6 +85,8 @@
 #define PTE_S2_RDONLY		(_AT(pteval_t, 1) << 6)   /* HAP[2:1] */
 #define PTE_S2_RDWR		(_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 
+#define PMD_S2_RDWR		(_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
+
 /*
  * Memory Attribute override for Stage-2 (MemAttr[3:0])
  */
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
new file mode 100644
index 0000000..2820f1a
--- /dev/null
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -0,0 +1,47 @@
+#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
+#define _ASM_ARM64_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+		dma_addr_t *dma_handle, gfp_t flags,
+		struct dma_attrs *attrs)
+{
+	return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+		void *cpu_addr, dma_addr_t dma_handle,
+		struct dma_attrs *attrs)
+{
+	__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs)
+{
+	__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
+{
+	__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
+#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index cbfacf7..6a0a9b1 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -27,7 +27,6 @@
 #include <linux/uaccess.h>
 
 #include <asm/debug-monitors.h>
-#include <asm/local.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
 
@@ -89,8 +88,8 @@
  * Keep track of debug users on each core.
  * The ref counts are per-cpu so we use a local_t type.
  */
-static DEFINE_PER_CPU(local_t, mde_ref_count);
-static DEFINE_PER_CPU(local_t, kde_ref_count);
+static DEFINE_PER_CPU(int, mde_ref_count);
+static DEFINE_PER_CPU(int, kde_ref_count);
 
 void enable_debug_monitors(enum debug_el el)
 {
@@ -98,11 +97,11 @@
 
 	WARN_ON(preemptible());
 
-	if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1)
+	if (this_cpu_inc_return(mde_ref_count) == 1)
 		enable = DBG_MDSCR_MDE;
 
 	if (el == DBG_ACTIVE_EL1 &&
-	    local_inc_return(&__get_cpu_var(kde_ref_count)) == 1)
+	    this_cpu_inc_return(kde_ref_count) == 1)
 		enable |= DBG_MDSCR_KDE;
 
 	if (enable && debug_enabled) {
@@ -118,11 +117,11 @@
 
 	WARN_ON(preemptible());
 
-	if (local_dec_and_test(&__get_cpu_var(mde_ref_count)))
+	if (this_cpu_dec_return(mde_ref_count) == 0)
 		disable = ~DBG_MDSCR_MDE;
 
 	if (el == DBG_ACTIVE_EL1 &&
-	    local_dec_and_test(&__get_cpu_var(kde_ref_count)))
+	    this_cpu_dec_return(kde_ref_count) == 0)
 		disable &= ~DBG_MDSCR_KDE;
 
 	if (disable) {
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 329218c..ff516f6 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -184,14 +184,14 @@
 		/* Breakpoint */
 		ctrl_reg = AARCH64_DBG_REG_BCR;
 		val_reg = AARCH64_DBG_REG_BVR;
-		slots = __get_cpu_var(bp_on_reg);
+		slots = this_cpu_ptr(bp_on_reg);
 		max_slots = core_num_brps;
 		reg_enable = !debug_info->bps_disabled;
 	} else {
 		/* Watchpoint */
 		ctrl_reg = AARCH64_DBG_REG_WCR;
 		val_reg = AARCH64_DBG_REG_WVR;
-		slots = __get_cpu_var(wp_on_reg);
+		slots = this_cpu_ptr(wp_on_reg);
 		max_slots = core_num_wrps;
 		reg_enable = !debug_info->wps_disabled;
 	}
@@ -230,12 +230,12 @@
 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
 		/* Breakpoint */
 		base = AARCH64_DBG_REG_BCR;
-		slots = __get_cpu_var(bp_on_reg);
+		slots = this_cpu_ptr(bp_on_reg);
 		max_slots = core_num_brps;
 	} else {
 		/* Watchpoint */
 		base = AARCH64_DBG_REG_WCR;
-		slots = __get_cpu_var(wp_on_reg);
+		slots = this_cpu_ptr(wp_on_reg);
 		max_slots = core_num_wrps;
 	}
 
@@ -505,11 +505,11 @@
 
 	switch (reg) {
 	case AARCH64_DBG_REG_BCR:
-		slots = __get_cpu_var(bp_on_reg);
+		slots = this_cpu_ptr(bp_on_reg);
 		max_slots = core_num_brps;
 		break;
 	case AARCH64_DBG_REG_WCR:
-		slots = __get_cpu_var(wp_on_reg);
+		slots = this_cpu_ptr(wp_on_reg);
 		max_slots = core_num_wrps;
 		break;
 	default:
@@ -546,7 +546,7 @@
 	struct debug_info *debug_info;
 	struct arch_hw_breakpoint_ctrl ctrl;
 
-	slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+	slots = this_cpu_ptr(bp_on_reg);
 	addr = instruction_pointer(regs);
 	debug_info = &current->thread.debug;
 
@@ -596,7 +596,7 @@
 			user_enable_single_step(current);
 	} else {
 		toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
-		kernel_step = &__get_cpu_var(stepping_kernel_bp);
+		kernel_step = this_cpu_ptr(&stepping_kernel_bp);
 
 		if (*kernel_step != ARM_KERNEL_STEP_NONE)
 			return 0;
@@ -623,7 +623,7 @@
 	struct arch_hw_breakpoint *info;
 	struct arch_hw_breakpoint_ctrl ctrl;
 
-	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+	slots = this_cpu_ptr(wp_on_reg);
 	debug_info = &current->thread.debug;
 
 	for (i = 0; i < core_num_wrps; ++i) {
@@ -698,7 +698,7 @@
 			user_enable_single_step(current);
 	} else {
 		toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
-		kernel_step = &__get_cpu_var(stepping_kernel_bp);
+		kernel_step = this_cpu_ptr(&stepping_kernel_bp);
 
 		if (*kernel_step != ARM_KERNEL_STEP_NONE)
 			return 0;
@@ -722,7 +722,7 @@
 	struct debug_info *debug_info = &current->thread.debug;
 	int handled_exception = 0, *kernel_step;
 
-	kernel_step = &__get_cpu_var(stepping_kernel_bp);
+	kernel_step = this_cpu_ptr(&stepping_kernel_bp);
 
 	/*
 	 * Called from single-step exception handler.
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 2c28a6c..e2ad0d8 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -29,7 +29,7 @@
 void *module_alloc(unsigned long size)
 {
 	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-				    GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+				    GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
 				    __builtin_return_address(0));
 }
 
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 5d14470..0e63c98 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1044,7 +1044,7 @@
 	 */
 	regs = get_irq_regs();
 
-	cpuc = &__get_cpu_var(cpu_hw_events);
+	cpuc = this_cpu_ptr(&cpu_hw_events);
 	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 		struct perf_event *event = cpuc->events[idx];
 		struct hw_perf_event *hwc;
@@ -1258,7 +1258,7 @@
 
 static struct pmu_hw_events *armpmu_get_cpu_events(void)
 {
-	return &__get_cpu_var(cpu_hw_events);
+	return this_cpu_ptr(&cpu_hw_events);
 }
 
 static void __init cpu_pmu_init(struct arm_pmu *armpmu)
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e51bbe7..b3fc9f5 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -122,7 +122,7 @@
 	return 0;
 }
 
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 {
 	int err;
 
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 21e9082..4480ab3 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -21,6 +21,7 @@
 	select MMU_NOTIFIER
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
+	select HAVE_KVM_CPU_RELAX_INTERCEPT
 	select KVM_MMIO
 	select KVM_ARM_HOST
 	select KVM_ARM_VGIC
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2c3ff67..3f0731e 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -248,6 +248,26 @@
 	return kvm_reset_vcpu(vcpu);
 }
 
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+{
+	int target = kvm_target_cpu();
+
+	if (target < 0)
+		return -ENODEV;
+
+	memset(init, 0, sizeof(*init));
+
+	/*
+	 * For now, we don't return any features.
+	 * In future, we might use features to return target
+	 * specific features available for the preferred
+	 * target type.
+	 */
+	init->target = (__u32)target;
+
+	return 0;
+}
+
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 	return -EINVAL;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 9beaca0..8da5606 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -47,21 +47,29 @@
 }
 
 /**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
+ *		    instruction executed by a guest
+ *
  * @vcpu:	the vcpu pointer
  *
- * Simply call kvm_vcpu_block(), which will halt execution of
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
  * world-switches and schedule other host processes until there is an
  * incoming IRQ or FIQ to the VM.
  */
-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	kvm_vcpu_block(vcpu);
+	if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
+		kvm_vcpu_on_spin(vcpu);
+	else
+		kvm_vcpu_block(vcpu);
+
 	return 1;
 }
 
 static exit_handle_fn arm_exit_handlers[] = {
-	[ESR_EL2_EC_WFI]	= kvm_handle_wfi,
+	[ESR_EL2_EC_WFI]	= kvm_handle_wfx,
 	[ESR_EL2_EC_CP15_32]	= kvm_handle_cp15_32,
 	[ESR_EL2_EC_CP15_64]	= kvm_handle_cp15_64,
 	[ESR_EL2_EC_CP14_MR]	= kvm_handle_cp14_access,
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile
index be24040..74a8d87 100644
--- a/arch/arm64/xen/Makefile
+++ b/arch/arm64/xen/Makefile
@@ -1,2 +1,2 @@
-xen-arm-y	+= $(addprefix ../../arm/xen/, enlighten.o grant-table.o)
+xen-arm-y	+= $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
 obj-y		:= xen-arm.o hypercall.o
diff --git a/arch/avr32/boards/atngw100/evklcd10x.c b/arch/avr32/boards/atngw100/evklcd10x.c
index 2038875..64919b0 100644
--- a/arch/avr32/boards/atngw100/evklcd10x.c
+++ b/arch/avr32/boards/atngw100/evklcd10x.c
@@ -58,7 +58,7 @@
 	.dclkmax		= 28330000,
 };
 
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
 	.default_bpp		= 16,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
@@ -96,7 +96,7 @@
 	.dclkmax		= 7000000,
 };
 
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
 	.default_bpp		= 16,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
@@ -134,7 +134,7 @@
 	.dclkmax		= 6400000,
 };
 
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
 	.default_bpp		= 16,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
@@ -145,7 +145,7 @@
 };
 #endif
 
-static void atevklcd10x_lcdc_power_control(int on)
+static void atevklcd10x_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
 	gpio_set_value(GPIO_PIN_PB(15), on);
 }
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index 7de083d..1ba09e4 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -83,7 +83,7 @@
 	.dclkmax		= 9260000,
 };
 
-static struct atmel_lcdfb_info __initdata rmt_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata rmt_lcdc_data = {
 	.default_bpp		= 24,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
@@ -126,7 +126,7 @@
 	.dclkmax		= 9260000,
 };
 
-static struct atmel_lcdfb_info __initdata rmt_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata rmt_lcdc_data = {
 	.default_bpp		= 24,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/atstk1000/atstk1000.h b/arch/avr32/boards/atstk1000/atstk1000.h
index 9392d32..653cc09 100644
--- a/arch/avr32/boards/atstk1000/atstk1000.h
+++ b/arch/avr32/boards/atstk1000/atstk1000.h
@@ -10,7 +10,7 @@
 #ifndef __ARCH_AVR32_BOARDS_ATSTK1000_ATSTK1000_H
 #define __ARCH_AVR32_BOARDS_ATSTK1000_ATSTK1000_H
 
-extern struct atmel_lcdfb_info atstk1000_lcdc_data;
+extern struct atmel_lcdfb_pdata atstk1000_lcdc_data;
 
 void atstk1000_setup_j2_leds(void);
 
diff --git a/arch/avr32/boards/atstk1000/setup.c b/arch/avr32/boards/atstk1000/setup.c
index 2d6b560..b6b88f5 100644
--- a/arch/avr32/boards/atstk1000/setup.c
+++ b/arch/avr32/boards/atstk1000/setup.c
@@ -55,7 +55,7 @@
 	.dclkmax		= 30000000,
 };
 
-struct atmel_lcdfb_info __initdata atstk1000_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata atstk1000_lcdc_data = {
 	.default_bpp		= 24,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 27bd6fb..7b1f2cd 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -125,7 +125,7 @@
 	.dclkmax		= 28000000,
 };
 
-struct atmel_lcdfb_info __initdata favr32_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata favr32_lcdc_data = {
 	.default_bpp		= 16,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c
index 9d1efd1..dc0e317 100644
--- a/arch/avr32/boards/hammerhead/setup.c
+++ b/arch/avr32/boards/hammerhead/setup.c
@@ -77,7 +77,7 @@
 	.dclkmax		= 10000000,
 };
 
-struct atmel_lcdfb_info __initdata hammerhead_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata hammerhead_lcdc_data = {
 	.default_bpp		= 24,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/merisc/display.c b/arch/avr32/boards/merisc/display.c
index 85a543c..e7683ee 100644
--- a/arch/avr32/boards/merisc/display.c
+++ b/arch/avr32/boards/merisc/display.c
@@ -45,7 +45,7 @@
 	.dclkmax	= 30000000,
 };
 
-struct atmel_lcdfb_info merisc_lcdc_data = {
+struct atmel_lcdfb_pdata merisc_lcdc_data = {
 	.default_bpp		= 24,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c
index 05358aa..1cb8e9c 100644
--- a/arch/avr32/boards/mimc200/setup.c
+++ b/arch/avr32/boards/mimc200/setup.c
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 
-extern struct atmel_lcdfb_info mimc200_lcdc_data;
+extern struct atmel_lcdfb_pdata mimc200_lcdc_data;
 
 #include <linux/clk.h>
 #include <linux/etherdevice.h>
@@ -71,7 +71,7 @@
 	.dclkmax		= 25200000,
 };
 
-struct atmel_lcdfb_info __initdata mimc200_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata mimc200_lcdc_data = {
 	.default_bpp		= 16,
 	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
 	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index bc7e8ae..1aba19d 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -68,7 +68,10 @@
 		return NULL;
 
 	page = virt_to_page(pg);
-	pgtable_page_ctor(page);
+	if (!pgtable_page_ctor(page)) {
+		quicklist_free(QUICK_PT, NULL, pg);
+		return NULL;
+	}
 
 	return page;
 }
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 11c4259..4399364 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* __ASM_AVR32_SOCKET_H */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index a68f3cf..a1f4d1e 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1439,7 +1439,7 @@
  *  LCDC
  * -------------------------------------------------------------------- */
 #if defined(CONFIG_CPU_AT32AP7000) || defined(CONFIG_CPU_AT32AP7002)
-static struct atmel_lcdfb_info atmel_lcdfb0_data;
+static struct atmel_lcdfb_pdata atmel_lcdfb0_data;
 static struct resource atmel_lcdfb0_resource[] = {
 	{
 		.start		= 0xff000000,
@@ -1467,12 +1467,12 @@
 };
 
 struct platform_device *__init
-at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
+at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_pdata *data,
 		     unsigned long fbmem_start, unsigned long fbmem_len,
 		     u64 pin_mask)
 {
 	struct platform_device *pdev;
-	struct atmel_lcdfb_info *info;
+	struct atmel_lcdfb_pdata *info;
 	struct fb_monspecs *monspecs;
 	struct fb_videomode *modedb;
 	unsigned int modedb_size;
@@ -1529,7 +1529,7 @@
 	}
 
 	info = pdev->dev.platform_data;
-	memcpy(info, data, sizeof(struct atmel_lcdfb_info));
+	memcpy(info, data, sizeof(struct atmel_lcdfb_pdata));
 	info->default_monspecs = monspecs;
 
 	pdev->name = "at32ap-lcdfb";
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index d485b03..f1a316d 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -44,9 +44,9 @@
 at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n);
 void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n);
 
-struct atmel_lcdfb_info;
+struct atmel_lcdfb_pdata;
 struct platform_device *
-at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
+at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_pdata *data,
 		     unsigned long fbmem_start, unsigned long fbmem_len,
 		     u64 pin_mask);
 
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 74314bd..9ceccef 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -34,7 +34,6 @@
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select GENERIC_ATOMIC64
 	select GENERIC_IRQ_PROBE
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
 	select GENERIC_SMP_IDLE_THREAD
 	select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
@@ -1440,7 +1439,6 @@
 config BFIN_CPU_FREQ
 	bool
 	depends on CPU_FREQ
-	select CPU_FREQ_TABLE
 	default y
 
 config CPU_VOLTAGE
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index 13eb732..4ca39ab 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -102,7 +102,7 @@
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN6XX=y
+CONFIG_SPI_BFIN_V3=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 4ae1144..2fd04f1 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -23,8 +23,7 @@
 /*
  * pm save bfin pint registers
  */
-struct bfin_pm_pint_save {
-	u32 mask_set;
+struct adi_pm_pint_save {
 	u32 assign;
 	u32 edge_set;
 	u32 invert_set;
diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h
index 4fbf835..4b2a992 100644
--- a/arch/blackfin/include/asm/irq_handler.h
+++ b/arch/blackfin/include/asm/irq_handler.h
@@ -12,11 +12,11 @@
 #include <mach/irq.h>
 
 /* init functions only */
-extern int __init init_arch_irq(void);
+extern int init_arch_irq(void);
 extern void init_exception_vectors(void);
-extern void __init program_IAR(void);
+extern void program_IAR(void);
 #ifdef init_mach_irq
-extern void __init init_mach_irq(void);
+extern void init_mach_irq(void);
 #else
 # define init_mach_irq()
 #endif
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index ed978f1..a017359 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -11,11 +11,8 @@
 #include <linux/err.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <asm/blackfin.h>
-#include <asm/gpio.h>
-#include <asm/portmux.h>
+#include <linux/gpio.h>
 #include <linux/irq.h>
-#include <asm/irq_handler.h>
 
 #if ANOMALY_05000311 || ANOMALY_05000323
 enum {
@@ -58,19 +55,6 @@
 	(struct gpio_port_t *) FIO0_FLAG_D,
 	(struct gpio_port_t *) FIO1_FLAG_D,
 	(struct gpio_port_t *) FIO2_FLAG_D,
-#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x) 
-	(struct gpio_port_t *)PORTA_FER,
-	(struct gpio_port_t *)PORTB_FER,
-	(struct gpio_port_t *)PORTC_FER,
-	(struct gpio_port_t *)PORTD_FER,
-	(struct gpio_port_t *)PORTE_FER,
-	(struct gpio_port_t *)PORTF_FER,
-	(struct gpio_port_t *)PORTG_FER,
-# if defined(CONFIG_BF54x)
-	(struct gpio_port_t *)PORTH_FER,
-	(struct gpio_port_t *)PORTI_FER,
-	(struct gpio_port_t *)PORTJ_FER,
-# endif
 #else
 # error no gpio arrays defined
 #endif
@@ -169,12 +153,6 @@
 
 inline int check_gpio(unsigned gpio)
 {
-#if defined(CONFIG_BF54x)
-	if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15
-	    || gpio == GPIO_PH14 || gpio == GPIO_PH15
-	    || gpio == GPIO_PJ14 || gpio == GPIO_PJ15)
-		return -EINVAL;
-#endif
 	if (gpio >= MAX_BLACKFIN_GPIOS)
 		return -EINVAL;
 	return 0;
@@ -212,12 +190,6 @@
 	else
 		*port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
 	SSYNC();
-#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
-	if (usage == GPIO_USAGE)
-		gpio_array[gpio_bank(gpio)]->port_fer &= ~gpio_bit(gpio);
-	else
-		gpio_array[gpio_bank(gpio)]->port_fer |= gpio_bit(gpio);
-	SSYNC();
 #endif
 }
 
@@ -255,7 +227,7 @@
 	u16 ident = P_IDENT(per);
 	u16 function = P_FUNCT2MUX(per);
 	s8 offset = port_mux[ident];
-	u16 m, pmux, pfunc;
+	u16 m, pmux, pfunc, mask;
 
 	if (offset < 0)
 		return 0;
@@ -270,10 +242,12 @@
 			continue;
 
 		if (offset == 1)
-			pfunc = (pmux >> offset) & 3;
+			mask = 3;
 		else
-			pfunc = (pmux >> offset) & 1;
-		if (pfunc != function) {
+			mask = 1;
+
+		pfunc = (pmux >> offset) & mask;
+		if (pfunc != (function & mask)) {
 			pr_err("pin group conflict! request pin %d func %d conflict with pin %d func %d\n",
 				ident, function, m, pfunc);
 			return -EINVAL;
@@ -288,44 +262,22 @@
 	u16 ident = P_IDENT(per);
 	u16 function = P_FUNCT2MUX(per);
 	s8 offset = port_mux[ident];
-	u16 pmux;
+	u16 pmux, mask;
 
 	if (offset == -1)
 		return;
 
 	pmux = bfin_read_PORT_MUX();
-	if (offset != 1)
-		pmux &= ~(1 << offset);
+	if (offset == 1)
+		mask = 3;
 	else
-		pmux &= ~(3 << 1);
-	pmux |= (function << offset);
+		mask = 1;
+
+	pmux &= ~(mask << offset);
+	pmux |= ((function & mask) << offset);
+
 	bfin_write_PORT_MUX(pmux);
 }
-#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
-inline void portmux_setup(unsigned short per)
-{
-	u16 ident = P_IDENT(per);
-	u16 function = P_FUNCT2MUX(per);
-	u32 pmux;
-
-	pmux = gpio_array[gpio_bank(ident)]->port_mux;
-
-	pmux &= ~(0x3 << (2 * gpio_sub_n(ident)));
-	pmux |= (function & 0x3) << (2 * gpio_sub_n(ident));
-
-	gpio_array[gpio_bank(ident)]->port_mux = pmux;
-}
-
-inline u16 get_portmux(unsigned short per)
-{
-	u16 ident = P_IDENT(per);
-	u32 pmux = gpio_array[gpio_bank(ident)]->port_mux;
-	return (pmux >> (2 * gpio_sub_n(ident)) & 0x3);
-}
-static int portmux_group_check(unsigned short per)
-{
-	return 0;
-}
 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
 static int portmux_group_check(unsigned short per)
 {
@@ -379,7 +331,6 @@
 }
 #endif
 
-#if !(defined(CONFIG_BF54x) || defined(CONFIG_BF60x))
 /***********************************************************
 *
 * FUNCTIONS: Blackfin General Purpose Ports Access Functions
@@ -572,7 +523,7 @@
 *************************************************************
 * MODIFICATION HISTORY :
 **************************************************************/
-int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl)
+int bfin_gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl)
 {
 	unsigned long flags;
 
@@ -591,7 +542,7 @@
 	return 0;
 }
 
-int bfin_pm_standby_ctrl(unsigned ctrl)
+int bfin_gpio_pm_standby_ctrl(unsigned ctrl)
 {
 	u16 bank, mask, i;
 
@@ -682,53 +633,6 @@
 
 
 #endif
-#else /* CONFIG_BF54x || CONFIG_BF60x */
-#ifdef CONFIG_PM
-
-int bfin_pm_standby_ctrl(unsigned ctrl)
-{
-	return 0;
-}
-
-void bfin_gpio_pm_hibernate_suspend(void)
-{
-	int i, bank;
-
-	for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
-		bank = gpio_bank(i);
-
-		gpio_bank_saved[bank].fer = gpio_array[bank]->port_fer;
-		gpio_bank_saved[bank].mux = gpio_array[bank]->port_mux;
-		gpio_bank_saved[bank].data = gpio_array[bank]->data;
-		gpio_bank_saved[bank].inen = gpio_array[bank]->inen;
-		gpio_bank_saved[bank].dir = gpio_array[bank]->dir_set;
-	}
-}
-
-void bfin_gpio_pm_hibernate_restore(void)
-{
-	int i, bank;
-
-	for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
-		bank = gpio_bank(i);
-
-		gpio_array[bank]->port_mux = gpio_bank_saved[bank].mux;
-		gpio_array[bank]->port_fer = gpio_bank_saved[bank].fer;
-		gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
-		gpio_array[bank]->data_set = gpio_bank_saved[bank].data
-						& gpio_bank_saved[bank].dir;
-		gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
-	}
-}
-#endif
-
-unsigned short get_gpio_dir(unsigned gpio)
-{
-	return (0x01 & (gpio_array[gpio_bank(gpio)]->dir_clear >> gpio_sub_n(gpio)));
-}
-EXPORT_SYMBOL(get_gpio_dir);
-
-#endif /* CONFIG_BF54x || CONFIG_BF60x */
 
 /***********************************************************
 *
@@ -785,11 +689,7 @@
 		 * be requested and used by several drivers
 		 */
 
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
-		if (!((per & P_MAYSHARE) && get_portmux(per) == P_FUNCT2MUX(per))) {
-#else
 		if (!(per & P_MAYSHARE)) {
-#endif
 			/*
 			 * Allow that the identical pin function can
 			 * be requested from the same driver twice
@@ -938,12 +838,9 @@
 	if (unlikely(is_reserved(gpio_irq, gpio, 1))) {
 		printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved as gpio-irq!"
 		       " (Documentation/blackfin/bfin-gpio-notes.txt)\n", gpio);
-	}
-#if !(defined(CONFIG_BF54x) || defined(CONFIG_BF60x))
-	else {	/* Reset POLAR setting when acquiring a gpio for the first time */
+	} else {	/* Reset POLAR setting when acquiring a gpio for the first time */
 		set_gpio_polar(gpio, 0);
 	}
-#endif
 
 	reserve(gpio, gpio);
 	set_label(gpio, label);
@@ -1112,11 +1009,7 @@
 
 static inline void __bfin_gpio_direction_input(unsigned gpio)
 {
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
-	gpio_array[gpio_bank(gpio)]->dir_clear = gpio_bit(gpio);
-#else
 	gpio_array[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio);
-#endif
 	gpio_array[gpio_bank(gpio)]->inen |= gpio_bit(gpio);
 }
 
@@ -1140,17 +1033,7 @@
 
 void bfin_gpio_irq_prepare(unsigned gpio)
 {
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
-	unsigned long flags;
-#endif
-
 	port_setup(gpio, GPIO_USAGE);
-
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
-	flags = hard_local_irq_save();
-	__bfin_gpio_direction_input(gpio);
-	hard_local_irq_restore(flags);
-#endif
 }
 
 void bfin_gpio_set_value(unsigned gpio, int arg)
@@ -1175,11 +1058,7 @@
 
 	gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
 	gpio_set_value(gpio, value);
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
-	gpio_array[gpio_bank(gpio)]->dir_set = gpio_bit(gpio);
-#else
 	gpio_array[gpio_bank(gpio)]->dir |= gpio_bit(gpio);
-#endif
 
 	AWA_DUMMY_READ(dir);
 	hard_local_irq_restore(flags);
@@ -1190,9 +1069,6 @@
 
 int bfin_gpio_get_value(unsigned gpio)
 {
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
-	return (1 & (gpio_array[gpio_bank(gpio)]->data >> gpio_sub_n(gpio)));
-#else
 	unsigned long flags;
 
 	if (unlikely(get_gpio_edge(gpio))) {
@@ -1205,7 +1081,6 @@
 		return ret;
 	} else
 		return get_gpio_data(gpio);
-#endif
 }
 EXPORT_SYMBOL(bfin_gpio_get_value);
 
diff --git a/arch/blackfin/mach-bf548/Kconfig b/arch/blackfin/mach-bf548/Kconfig
index 94acb58..334ec7b 100644
--- a/arch/blackfin/mach-bf548/Kconfig
+++ b/arch/blackfin/mach-bf548/Kconfig
@@ -377,40 +377,6 @@
 
 endmenu
 
-comment "Pin Interrupt to Port Assignment"
-menu "Assignment"
-
-config PINTx_REASSIGN
-	bool "Reprogram PINT Assignment"
-	default y
-	help
-	  The interrupt assignment registers controls the pin-to-interrupt
-	  assignment in a byte-wide manner. Each option allows you to select
-	  a set of pins (High/Low Byte) of an specific Port being mapped
-	  to one of the four PIN Interrupts IRQ_PINTx.
-
-	  You shouldn't change any of these unless you know exactly what you're doing.
-	  Please consult the Blackfin BF54x Processor Hardware Reference Manual.
-
-config PINT0_ASSIGN
-	hex "PINT0_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x00000101
-config PINT1_ASSIGN
-	hex "PINT1_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x01010000
-config PINT2_ASSIGN
-	hex "PINT2_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x07000101
-config PINT3_ASSIGN
-	hex "PINT3_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x02020303
-
-endmenu
-
 endmenu
 
 endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 372eb54..d495000 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -17,6 +17,9 @@
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/usb/musb.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_data/pinctrl-adi2.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/dma.h>
 #include <asm/gpio.h>
@@ -241,6 +244,13 @@
 		.end = UART0_RBR+2,
 		.flags = IORESOURCE_MEM,
 	},
+#ifdef CONFIG_EARLY_PRINTK
+	{
+		.start = PORTE_FER,
+		.end = PORTE_FER+2,
+		.flags = IORESOURCE_REG,
+	},
+#endif
 	{
 		.start = IRQ_UART0_TX,
 		.end = IRQ_UART0_TX,
@@ -289,6 +299,13 @@
 		.end = UART1_RBR+2,
 		.flags = IORESOURCE_MEM,
 	},
+#ifdef CONFIG_EARLY_PRINTK
+	{
+		.start = PORTH_FER,
+		.end = PORTH_FER+2,
+		.flags = IORESOURCE_REG,
+	},
+#endif
 	{
 		.start = IRQ_UART1_TX,
 		.end = IRQ_UART1_TX,
@@ -353,6 +370,13 @@
 		.end = UART2_RBR+2,
 		.flags = IORESOURCE_MEM,
 	},
+#ifdef CONFIG_EARLY_PRINTK
+	{
+		.start = PORTB_FER,
+		.end = PORTB_FER+2,
+		.flags = IORESOURCE_REG,
+	},
+#endif
 	{
 		.start = IRQ_UART2_TX,
 		.end = IRQ_UART2_TX,
@@ -401,6 +425,13 @@
 		.end = UART3_RBR+2,
 		.flags = IORESOURCE_MEM,
 	},
+#ifdef CONFIG_EARLY_PRINTK
+	{
+		.start = PORTB_FER,
+		.end = PORTB_FER+2,
+		.flags = IORESOURCE_REG,
+	},
+#endif
 	{
 		.start = IRQ_UART3_TX,
 		.end = IRQ_UART3_TX,
@@ -1058,6 +1089,411 @@
 };
 #endif
 
+#ifdef CONFIG_PINCTRL_ADI2
+
+# define ADI_PINT_DEVNAME "adi-gpio-pint"
+# define ADI_GPIO_DEVNAME "adi-gpio"
+# define ADI_PINCTRL_DEVNAME "pinctrl-adi2"
+
+static struct platform_device bfin_pinctrl_device = {
+	.name = ADI_PINCTRL_DEVNAME,
+	.id = 0,
+};
+
+static struct resource bfin_pint0_resources[] = {
+	{
+		.start = PINT0_MASK_SET,
+		.end = PINT0_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT0,
+		.end = IRQ_PINT0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint0_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_pint0_resources),
+	.resource = bfin_pint0_resources,
+};
+
+static struct resource bfin_pint1_resources[] = {
+	{
+		.start = PINT1_MASK_SET,
+		.end = PINT1_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT1,
+		.end = IRQ_PINT1,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint1_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_pint1_resources),
+	.resource = bfin_pint1_resources,
+};
+
+static struct resource bfin_pint2_resources[] = {
+	{
+		.start = PINT2_MASK_SET,
+		.end = PINT2_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT2,
+		.end = IRQ_PINT2,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint2_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 2,
+	.num_resources = ARRAY_SIZE(bfin_pint2_resources),
+	.resource = bfin_pint2_resources,
+};
+
+static struct resource bfin_pint3_resources[] = {
+	{
+		.start = PINT3_MASK_SET,
+		.end = PINT3_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT3,
+		.end = IRQ_PINT3,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint3_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 3,
+	.num_resources = ARRAY_SIZE(bfin_pint3_resources),
+	.resource = bfin_pint3_resources,
+};
+
+static struct resource bfin_gpa_resources[] = {
+	{
+		.start = PORTA_FER,
+		.end = PORTA_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{	/* optional */
+		.start = IRQ_PA0,
+		.end = IRQ_PA0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpa_pdata = {
+	.port_gpio_base	= GPIO_PA0,	/* Optional */
+	.port_pin_base	= GPIO_PA0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 0,		/* PINT0 */
+	.pint_assign	= true,		/* PINT upper 16 bit */
+	.pint_map	= 0,		/* mapping mask in PINT */
+};
+
+static struct platform_device bfin_gpa_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_gpa_resources),
+	.resource = bfin_gpa_resources,
+	.dev = {
+		.platform_data = &bfin_gpa_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpb_resources[] = {
+	{
+		.start = PORTB_FER,
+		.end = PORTB_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PB0,
+		.end = IRQ_PB0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpb_pdata = {
+	.port_gpio_base	= GPIO_PB0,
+	.port_pin_base	= GPIO_PB0,
+	.port_width	= 15,
+	.pint_id	= 0,
+	.pint_assign	= true,
+	.pint_map	= 1,
+};
+
+static struct platform_device bfin_gpb_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_gpb_resources),
+	.resource = bfin_gpb_resources,
+	.dev = {
+		.platform_data = &bfin_gpb_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpc_resources[] = {
+	{
+		.start = PORTC_FER,
+		.end = PORTC_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PC0,
+		.end = IRQ_PC0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpc_pdata = {
+	.port_gpio_base	= GPIO_PC0,
+	.port_pin_base	= GPIO_PC0,
+	.port_width	= 14,
+	.pint_id	= 2,
+	.pint_assign	= true,
+	.pint_map	= 0,
+};
+
+static struct platform_device bfin_gpc_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 2,
+	.num_resources = ARRAY_SIZE(bfin_gpc_resources),
+	.resource = bfin_gpc_resources,
+	.dev = {
+		.platform_data = &bfin_gpc_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpd_resources[] = {
+	{
+		.start = PORTD_FER,
+		.end = PORTD_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PD0,
+		.end = IRQ_PD0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpd_pdata = {
+	.port_gpio_base	= GPIO_PD0,
+	.port_pin_base	= GPIO_PD0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 2,
+	.pint_assign	= false,
+	.pint_map	= 1,
+};
+
+static struct platform_device bfin_gpd_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 3,
+	.num_resources = ARRAY_SIZE(bfin_gpd_resources),
+	.resource = bfin_gpd_resources,
+	.dev = {
+		.platform_data = &bfin_gpd_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpe_resources[] = {
+	{
+		.start = PORTE_FER,
+		.end = PORTE_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PE0,
+		.end = IRQ_PE0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpe_pdata = {
+	.port_gpio_base	= GPIO_PE0,
+	.port_pin_base	= GPIO_PE0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 3,
+	.pint_assign	= true,
+	.pint_map	= 2,
+};
+
+static struct platform_device bfin_gpe_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 4,
+	.num_resources = ARRAY_SIZE(bfin_gpe_resources),
+	.resource = bfin_gpe_resources,
+	.dev = {
+		.platform_data = &bfin_gpe_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpf_resources[] = {
+	{
+		.start = PORTF_FER,
+		.end = PORTF_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PF0,
+		.end = IRQ_PF0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpf_pdata = {
+	.port_gpio_base	= GPIO_PF0,
+	.port_pin_base	= GPIO_PF0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 3,
+	.pint_assign	= false,
+	.pint_map	= 3,
+};
+
+static struct platform_device bfin_gpf_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 5,
+	.num_resources = ARRAY_SIZE(bfin_gpf_resources),
+	.resource = bfin_gpf_resources,
+	.dev = {
+		.platform_data = &bfin_gpf_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpg_resources[] = {
+	{
+		.start = PORTG_FER,
+		.end = PORTG_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PG0,
+		.end = IRQ_PG0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpg_pdata = {
+	.port_gpio_base	= GPIO_PG0,
+	.port_pin_base	= GPIO_PG0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= -1,
+};
+
+static struct platform_device bfin_gpg_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 6,
+	.num_resources = ARRAY_SIZE(bfin_gpg_resources),
+	.resource = bfin_gpg_resources,
+	.dev = {
+		.platform_data = &bfin_gpg_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gph_resources[] = {
+	{
+		.start = PORTH_FER,
+		.end = PORTH_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PH0,
+		.end = IRQ_PH0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gph_pdata = {
+	.port_gpio_base	= GPIO_PH0,
+	.port_pin_base	= GPIO_PH0,
+	.port_width	= 14,
+	.pint_id	= -1,
+};
+
+static struct platform_device bfin_gph_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 7,
+	.num_resources = ARRAY_SIZE(bfin_gph_resources),
+	.resource = bfin_gph_resources,
+	.dev = {
+		.platform_data = &bfin_gph_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpi_resources[] = {
+	{
+		.start = PORTI_FER,
+		.end = PORTI_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PI0,
+		.end = IRQ_PI0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpi_pdata = {
+	.port_gpio_base	= GPIO_PI0,
+	.port_pin_base	= GPIO_PI0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= -1,
+};
+
+static struct platform_device bfin_gpi_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 8,
+	.num_resources = ARRAY_SIZE(bfin_gpi_resources),
+	.resource = bfin_gpi_resources,
+	.dev = {
+		.platform_data = &bfin_gpi_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpj_resources[] = {
+	{
+		.start = PORTJ_FER,
+		.end = PORTJ_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PJ0,
+		.end = IRQ_PJ0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpj_pdata = {
+	.port_gpio_base	= GPIO_PJ0,
+	.port_pin_base	= GPIO_PJ0,
+	.port_width	= 14,
+	.pint_id	= -1,
+};
+
+static struct platform_device bfin_gpj_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 9,
+	.num_resources = ARRAY_SIZE(bfin_gpj_resources),
+	.resource = bfin_gpj_resources,
+	.dev = {
+		.platform_data = &bfin_gpj_pdata, /* Passed to driver */
+	},
+};
+
+#endif
+
 static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #if defined(CONFIG_MTD_M25P80) \
 	|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -1066,7 +1502,7 @@
 		.modalias = "m25p80", /* Name of spi_driver for this device */
 		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num = 0, /* Framework bus number */
-		.chip_select = 1, /* SPI_SSEL1*/
+		.chip_select = MAX_CTRL_CS + GPIO_PE4, /* SPI_SSEL1*/
 		.platform_data = &bfin_spi_flash_data,
 		.controller_data = &spi_flash_chip_info,
 		.mode = SPI_MODE_3,
@@ -1078,7 +1514,7 @@
 		.modalias = "ad183x",
 		.max_speed_hz = 3125000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num = 1,
-		.chip_select = 4,
+		.chip_select = MAX_CTRL_CS + GPIO_PG6, /* SPI_SSEL2 */
 	},
 #endif
 #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
@@ -1088,7 +1524,7 @@
 		.irq			= IRQ_PB4,	/* old boards (<=Rev 1.3) use IRQ_PJ11 */
 		.max_speed_hz		= 12500000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num		= 0,
-		.chip_select  		= 2,
+		.chip_select		= MAX_CTRL_CS + GPIO_PE5, /* SPI_SSEL2 */
 	},
 #endif
 #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -1096,7 +1532,7 @@
 		.modalias = "spidev",
 		.max_speed_hz = 3125000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num = 0,
-		.chip_select = 1,
+		.chip_select = MAX_CTRL_CS + GPIO_PE4, /* SPI_SSEL1 */
 	},
 #endif
 #if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
@@ -1106,7 +1542,7 @@
 		.irq			= IRQ_PC5,
 		.max_speed_hz		= 5000000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num		= 1,
-		.chip_select  		= 2,
+		.chip_select		= MAX_CTRL_CS + GPIO_PG6, /* SPI_SSEL2 */
 		.mode = SPI_MODE_3,
 	},
 #endif
@@ -1152,7 +1588,7 @@
 
 /* SPI controller data */
 static struct bfin5xx_spi_master bf54x_spi_master_info0 = {
-	.num_chipselect = 4,
+	.num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
 	.enable_dma = 1,  /* master has the ability to do dma transfer */
 	.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
 };
@@ -1168,7 +1604,7 @@
 };
 
 static struct bfin5xx_spi_master bf54x_spi_master_info1 = {
-	.num_chipselect = 4,
+	.num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
 	.enable_dma = 1,  /* master has the ability to do dma transfer */
 	.pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
 };
@@ -1508,6 +1944,23 @@
 static struct platform_device *ezkit_devices[] __initdata = {
 
 	&bfin_dpmc,
+#if defined(CONFIG_PINCTRL_ADI2)
+	&bfin_pinctrl_device,
+	&bfin_pint0_device,
+	&bfin_pint1_device,
+	&bfin_pint2_device,
+	&bfin_pint3_device,
+	&bfin_gpa_device,
+	&bfin_gpb_device,
+	&bfin_gpc_device,
+	&bfin_gpd_device,
+	&bfin_gpe_device,
+	&bfin_gpf_device,
+	&bfin_gpg_device,
+	&bfin_gph_device,
+	&bfin_gpi_device,
+	&bfin_gpj_device,
+#endif
 
 #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
 	&rtc_device,
@@ -1644,10 +2097,66 @@
 #endif
 };
 
+/* Pin control settings */
+static struct pinctrl_map __initdata bfin_pinmux_map[] = {
+	/* per-device maps */
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.0",  "pinctrl-adi2.0", NULL, "uart0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.1",  "pinctrl-adi2.0", NULL, "uart1"),
+#ifdef CONFIG_BFIN_UART1_CTSRTS
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.1",  "pinctrl-adi2.0", NULL, "uart1_ctsrts"),
+#endif
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.2",  "pinctrl-adi2.0", NULL, "uart2"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.3",  "pinctrl-adi2.0", NULL, "uart3"),
+#ifdef CONFIG_BFIN_UART3_CTSRTS
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.3",  "pinctrl-adi2.0", NULL, "uart3_ctsrts"),
+#endif
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.0",  "pinctrl-adi2.0", NULL, "uart0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.1",  "pinctrl-adi2.0", NULL, "uart1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.2",  "pinctrl-adi2.0", NULL, "uart2"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.3",  "pinctrl-adi2.0", NULL, "uart3"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-sdh.0",  "pinctrl-adi2.0", NULL, "rsi0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi.0",  "pinctrl-adi2.0", NULL, "spi0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi.1",  "pinctrl-adi2.0", NULL, "spi1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.0",  "pinctrl-adi2.0", NULL, "twi0"),
+#if !defined(CONFIG_BF542)	/* The BF542 only has 1 TWI */
+	PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.1",  "pinctrl-adi2.0", NULL, "twi1"),
+#endif
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1",  "pinctrl-adi2.0", NULL, "can1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0",  "pinctrl-adi2.0", NULL, "sport0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1",  "pinctrl-adi2.0", NULL, "sport1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.1",  "pinctrl-adi2.0", NULL, "sport1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.1",  "pinctrl-adi2.0", NULL, "sport1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.2",  "pinctrl-adi2.0", NULL, "sport2"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.2",  "pinctrl-adi2.0", NULL, "sport2"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.2",  "pinctrl-adi2.0", NULL, "sport2"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.3",  "pinctrl-adi2.0", NULL, "sport3"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.3",  "pinctrl-adi2.0", NULL, "sport3"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.3",  "pinctrl-adi2.0", NULL, "sport3"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-sport-uart.0",  "pinctrl-adi2.0", NULL, "sport0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-sport-uart.1",  "pinctrl-adi2.0", NULL, "sport1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-sport-uart.2",  "pinctrl-adi2.0", NULL, "sport2"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-sport-uart.3",  "pinctrl-adi2.0", NULL, "sport3"),
+	PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x",  "pinctrl-adi2.0", NULL, "atapi"),
+#ifdef CONFIG_BF548_ATAPI_ALTERNATIVE_PORT
+	PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x",  "pinctrl-adi2.0", NULL, "atapi_alter"),
+#endif
+	PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0",  "pinctrl-adi2.0", NULL, "nfc0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", NULL, "keys_4x4"),
+};
+
 static int __init ezkit_init(void)
 {
 	printk(KERN_INFO "%s(): registering device resources\n", __func__);
 
+	/* Initialize pinmuxing */
+	pinctrl_register_mappings(bfin_pinmux_map,
+				ARRAY_SIZE(bfin_pinmux_map));
+
 	i2c_register_board_info(0, bfin_i2c_board_info0,
 				ARRAY_SIZE(bfin_i2c_board_info0));
 #if !defined(CONFIG_BF542)	/* The BF542 only has 1 TWI */
@@ -1679,21 +2188,6 @@
 	&bfin_uart3_device,
 #endif
 #endif
-
-#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
-#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
-	&bfin_sport0_uart_device,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
-	&bfin_sport1_uart_device,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
-	&bfin_sport2_uart_device,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
-	&bfin_sport3_uart_device,
-#endif
-#endif
 };
 
 void __init native_machine_early_platform_add_devices(void)
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index be9edb2..006da1e 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -194,14 +194,6 @@
 	unsigned int port_mux;
 };
 
-struct gpio_port_s {
-	unsigned short fer;
-	unsigned short data;
-	unsigned short dir;
-	unsigned short inen;
-	unsigned int mux;
-};
-
 #endif
 
 #include <mach-common/ports-a.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 10dc142..cf7cb72 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -433,7 +433,7 @@
 #include <linux/types.h>
 
 /*
- * bfin pint registers layout
+ * gpio pint registers layout
  */
 struct bfin_pint_regs {
 	u32 mask_set;
diff --git a/arch/blackfin/mach-bf609/Kconfig b/arch/blackfin/mach-bf609/Kconfig
index 2bcbf94..b0fca44 100644
--- a/arch/blackfin/mach-bf609/Kconfig
+++ b/arch/blackfin/mach-bf609/Kconfig
@@ -9,48 +9,6 @@
 
 menu "BF609 Specific Configuration"
 
-comment "Pin Interrupt to Port Assignment"
-menu "Assignment"
-
-config PINTx_REASSIGN
-	bool "Reprogram PINT Assignment"
-	default y
-	help
-	  The interrupt assignment registers controls the pin-to-interrupt
-	  assignment in a byte-wide manner. Each option allows you to select
-	  a set of pins (High/Low Byte) of an specific Port being mapped
-	  to one of the four PIN Interrupts IRQ_PINTx.
-
-	  You shouldn't change any of these unless you know exactly what you're doing.
-	  Please consult the Blackfin BF60x Processor Hardware Reference Manual.
-
-config PINT0_ASSIGN
-	hex "PINT0_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x00000101
-config PINT1_ASSIGN
-	hex "PINT1_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x00000101
-config PINT2_ASSIGN
-	hex "PINT2_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x00000101
-config PINT3_ASSIGN
-	hex "PINT3_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x00000101
-config PINT4_ASSIGN
-	hex "PINT3_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x00000101
-config PINT5_ASSIGN
-	hex "PINT3_ASSIGN"
-	depends on PINTx_REASSIGN
-	default 0x00000101
-
-endmenu
-
 config SEC_IRQ_PRIORITY_LEVELS
 	int "SEC interrupt priority levels"
 	default 7
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index d56a55a..82beedd 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -17,6 +17,9 @@
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/usb/musb.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_data/pinctrl-adi2.h>
 #include <asm/bfin_spi3.h>
 #include <asm/dma.h>
 #include <asm/gpio.h>
@@ -106,8 +109,6 @@
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 
-static unsigned short pins[] = P_RMII0;
-
 static struct stmmac_mdio_bus_data phy_private_data = {
 	.phy_mask = 1,
 };
@@ -212,6 +213,18 @@
 		.end = UART0_RXDIV+4,
 		.flags = IORESOURCE_MEM,
 	},
+#ifdef CONFIG_EARLY_PRINTK
+	{
+		.start = PORTD_FER,
+		.end = PORTD_FER+2,
+		.flags = IORESOURCE_REG,
+	},
+	{
+		.start = PORTD_MUX,
+		.end = PORTD_MUX+3,
+		.flags = IORESOURCE_REG,
+	},
+#endif
 	{
 		.start = IRQ_UART0_TX,
 		.end = IRQ_UART0_TX,
@@ -276,6 +289,13 @@
 		.end = UART1_RXDIV+4,
 		.flags = IORESOURCE_MEM,
 	},
+#ifdef CONFIG_EARLY_PRINTK
+	{
+		.start = PORTG_FER_SET,
+		.end = PORTG_FER_SET+2,
+		.flags = IORESOURCE_REG,
+	},
+#endif
 	{
 		.start = IRQ_UART1_TX,
 		.end = IRQ_UART1_TX,
@@ -674,17 +694,12 @@
 	},
 };
 
-int bf609_nor_flash_init(struct platform_device *dev)
+int bf609_nor_flash_init(struct platform_device *pdev)
 {
 #define CONFIG_SMC_GCTL_VAL     0x00000010
-	const unsigned short pins[] = {
-		P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12,
-		P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21,
-		P_A22, P_A23, P_A24, P_A25, P_NORCK, 0,
-	};
 
-	peripheral_request_list(pins, "smc0");
-
+	if (!devm_pinctrl_get_select_default(&pdev->dev))
+		return -EBUSY;
 	bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
 	bfin_write32(SMC_B0CTL, 0x01002011);
 	bfin_write32(SMC_B0TIM, 0x08170977);
@@ -692,16 +707,9 @@
 	return 0;
 }
 
-void bf609_nor_flash_exit(struct platform_device *dev)
+void bf609_nor_flash_exit(struct platform_device *pdev)
 {
-	const unsigned short pins[] = {
-		P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12,
-		P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21,
-		P_A22, P_A23, P_A24, P_A25, P_NORCK, 0,
-	};
-
-	peripheral_free_list(pins);
-
+	devm_pinctrl_put(pdev->dev.pins->p);
 	bfin_write32(SMC_GCTL, 0);
 }
 
@@ -1319,6 +1327,356 @@
 };
 #endif
 
+#ifdef CONFIG_PINCTRL_ADI2
+
+# define ADI_PINT_DEVNAME "adi-gpio-pint"
+# define ADI_GPIO_DEVNAME "adi-gpio"
+# define ADI_PINCTRL_DEVNAME "pinctrl-adi2"
+
+static struct platform_device bfin_pinctrl_device = {
+	.name = ADI_PINCTRL_DEVNAME,
+	.id = 0,
+};
+
+static struct resource bfin_pint0_resources[] = {
+	{
+		.start = PINT0_MASK_SET,
+		.end = PINT0_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT0,
+		.end = IRQ_PINT0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint0_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_pint0_resources),
+	.resource = bfin_pint0_resources,
+};
+
+static struct resource bfin_pint1_resources[] = {
+	{
+		.start = PINT1_MASK_SET,
+		.end = PINT1_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT1,
+		.end = IRQ_PINT1,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint1_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_pint1_resources),
+	.resource = bfin_pint1_resources,
+};
+
+static struct resource bfin_pint2_resources[] = {
+	{
+		.start = PINT2_MASK_SET,
+		.end = PINT2_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT2,
+		.end = IRQ_PINT2,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint2_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 2,
+	.num_resources = ARRAY_SIZE(bfin_pint2_resources),
+	.resource = bfin_pint2_resources,
+};
+
+static struct resource bfin_pint3_resources[] = {
+	{
+		.start = PINT3_MASK_SET,
+		.end = PINT3_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT3,
+		.end = IRQ_PINT3,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint3_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 3,
+	.num_resources = ARRAY_SIZE(bfin_pint3_resources),
+	.resource = bfin_pint3_resources,
+};
+
+static struct resource bfin_pint4_resources[] = {
+	{
+		.start = PINT4_MASK_SET,
+		.end = PINT4_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT4,
+		.end = IRQ_PINT4,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint4_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 4,
+	.num_resources = ARRAY_SIZE(bfin_pint4_resources),
+	.resource = bfin_pint4_resources,
+};
+
+static struct resource bfin_pint5_resources[] = {
+	{
+		.start = PINT5_MASK_SET,
+		.end = PINT5_LATCH + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PINT5,
+		.end = IRQ_PINT5,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_pint5_device = {
+	.name = ADI_PINT_DEVNAME,
+	.id = 5,
+	.num_resources = ARRAY_SIZE(bfin_pint5_resources),
+	.resource = bfin_pint5_resources,
+};
+
+static struct resource bfin_gpa_resources[] = {
+	{
+		.start = PORTA_FER,
+		.end = PORTA_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{	/* optional */
+		.start = IRQ_PA0,
+		.end = IRQ_PA0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpa_pdata = {
+	.port_pin_base	= GPIO_PA0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 0,		/* PINT0 */
+	.pint_assign	= true,		/* PINT upper 16 bit */
+	.pint_map	= 0,		/* mapping mask in PINT */
+};
+
+static struct platform_device bfin_gpa_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_gpa_resources),
+	.resource = bfin_gpa_resources,
+	.dev = {
+		.platform_data = &bfin_gpa_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpb_resources[] = {
+	{
+		.start = PORTB_FER,
+		.end = PORTB_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PB0,
+		.end = IRQ_PB0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpb_pdata = {
+	.port_pin_base	= GPIO_PB0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 0,
+	.pint_assign	= false,
+	.pint_map	= 1,
+};
+
+static struct platform_device bfin_gpb_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_gpb_resources),
+	.resource = bfin_gpb_resources,
+	.dev = {
+		.platform_data = &bfin_gpb_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpc_resources[] = {
+	{
+		.start = PORTC_FER,
+		.end = PORTC_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PC0,
+		.end = IRQ_PC0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpc_pdata = {
+	.port_pin_base	= GPIO_PC0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 1,
+	.pint_assign	= false,
+	.pint_map	= 1,
+};
+
+static struct platform_device bfin_gpc_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 2,
+	.num_resources = ARRAY_SIZE(bfin_gpc_resources),
+	.resource = bfin_gpc_resources,
+	.dev = {
+		.platform_data = &bfin_gpc_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpd_resources[] = {
+	{
+		.start = PORTD_FER,
+		.end = PORTD_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PD0,
+		.end = IRQ_PD0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpd_pdata = {
+	.port_pin_base	= GPIO_PD0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 2,
+	.pint_assign	= false,
+	.pint_map	= 1,
+};
+
+static struct platform_device bfin_gpd_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 3,
+	.num_resources = ARRAY_SIZE(bfin_gpd_resources),
+	.resource = bfin_gpd_resources,
+	.dev = {
+		.platform_data = &bfin_gpd_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpe_resources[] = {
+	{
+		.start = PORTE_FER,
+		.end = PORTE_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PE0,
+		.end = IRQ_PE0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpe_pdata = {
+	.port_pin_base	= GPIO_PE0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 3,
+	.pint_assign	= false,
+	.pint_map	= 1,
+};
+
+static struct platform_device bfin_gpe_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 4,
+	.num_resources = ARRAY_SIZE(bfin_gpe_resources),
+	.resource = bfin_gpe_resources,
+	.dev = {
+		.platform_data = &bfin_gpe_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpf_resources[] = {
+	{
+		.start = PORTF_FER,
+		.end = PORTF_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PF0,
+		.end = IRQ_PF0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpf_pdata = {
+	.port_pin_base	= GPIO_PF0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 4,
+	.pint_assign	= false,
+	.pint_map	= 1,
+};
+
+static struct platform_device bfin_gpf_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 5,
+	.num_resources = ARRAY_SIZE(bfin_gpf_resources),
+	.resource = bfin_gpf_resources,
+	.dev = {
+		.platform_data = &bfin_gpf_pdata, /* Passed to driver */
+	},
+};
+
+static struct resource bfin_gpg_resources[] = {
+	{
+		.start = PORTG_FER,
+		.end = PORTG_MUX + 3,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_PG0,
+		.end = IRQ_PG0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct adi_pinctrl_gpio_platform_data bfin_gpg_pdata = {
+	.port_pin_base	= GPIO_PG0,
+	.port_width	= GPIO_BANKSIZE,
+	.pint_id	= 5,
+	.pint_assign	= false,
+	.pint_map	= 1,
+};
+
+static struct platform_device bfin_gpg_device = {
+	.name = ADI_GPIO_DEVNAME,
+	.id = 6,
+	.num_resources = ARRAY_SIZE(bfin_gpg_resources),
+	.resource = bfin_gpg_resources,
+	.dev = {
+		.platform_data = &bfin_gpg_pdata, /* Passed to driver */
+	},
+};
+
+#endif
+
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
@@ -1349,7 +1707,7 @@
 		.modalias = "m25p80", /* Name of spi_driver for this device */
 		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num = 0, /* Framework bus number */
-		.chip_select = 1, /* SPI_SSEL1*/
+		.chip_select = MAX_CTRL_CS + GPIO_PD11, /* SPI_SSEL1*/
 		.platform_data = &bfin_spi_flash_data,
 		.controller_data = &spi_flash_chip_info,
 		.mode = SPI_MODE_3,
@@ -1362,7 +1720,7 @@
 		.irq			= IRQ_PD9,
 		.max_speed_hz		= 12500000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num		= 0,
-		.chip_select  		= 4,
+		.chip_select		= MAX_CTRL_CS + GPIO_PC15, /* SPI_SSEL4 */
 	},
 #endif
 #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -1370,7 +1728,7 @@
 		.modalias = "spidev",
 		.max_speed_hz = 3125000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num = 0,
-		.chip_select = 1,
+		.chip_select = MAX_CTRL_CS + GPIO_PD11, /* SPI_SSEL1*/
 		.controller_data = &spidev_chip_info,
 	},
 #endif
@@ -1565,6 +1923,22 @@
 static struct platform_device *ezkit_devices[] __initdata = {
 
 	&bfin_dpmc,
+#if defined(CONFIG_PINCTRL_ADI2)
+	&bfin_pinctrl_device,
+	&bfin_pint0_device,
+	&bfin_pint1_device,
+	&bfin_pint2_device,
+	&bfin_pint3_device,
+	&bfin_pint4_device,
+	&bfin_pint5_device,
+	&bfin_gpa_device,
+	&bfin_gpb_device,
+	&bfin_gpc_device,
+	&bfin_gpd_device,
+	&bfin_gpe_device,
+	&bfin_gpf_device,
+	&bfin_gpg_device,
+#endif
 
 #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
 	&rtc_device,
@@ -1681,20 +2055,52 @@
 
 };
 
+/* Pin control settings */
+static struct pinctrl_map __initdata bfin_pinmux_map[] = {
+	/* per-device maps */
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.0",  "pinctrl-adi2.0", NULL, "uart0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.1",  "pinctrl-adi2.0", NULL, "uart1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.0",  "pinctrl-adi2.0", NULL, "uart0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.1",  "pinctrl-adi2.0", NULL, "uart1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-sdh.0",  "pinctrl-adi2.0", NULL, "rsi0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("stmmaceth.0",  "pinctrl-adi2.0", NULL, "eth0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi3.0",  "pinctrl-adi2.0", NULL, "spi0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi3.1",  "pinctrl-adi2.0", NULL, "spi1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.0",  "pinctrl-adi2.0", NULL, "twi0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.1",  "pinctrl-adi2.0", NULL, "twi1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0",  "pinctrl-adi2.0", NULL, "smc0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", NULL, "ppi2_16b"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
+#if defined(CONFIG_VIDEO_MT9M114) || defined(CONFIG_VIDEO_MT9M114_MODULE)
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_8b"),
+#elif defined(CONFIG_VIDEO_VS6624) || defined(CONFIG_VIDEO_VS6624_MODULE)
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
+#else
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
+#endif
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1",  "pinctrl-adi2.0", NULL, "sport1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.1",  "pinctrl-adi2.0", NULL, "sport1"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.2",  "pinctrl-adi2.0", NULL, "sport2"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.2",  "pinctrl-adi2.0", NULL, "sport2"),
+};
+
 static int __init ezkit_init(void)
 {
 	printk(KERN_INFO "%s(): registering device resources\n", __func__);
 
+	/* Initialize pinmuxing */
+	pinctrl_register_mappings(bfin_pinmux_map,
+				ARRAY_SIZE(bfin_pinmux_map));
+
 	i2c_register_board_info(0, bfin_i2c_board_info0,
 				ARRAY_SIZE(bfin_i2c_board_info0));
 	i2c_register_board_info(1, bfin_i2c_board_info1,
 				ARRAY_SIZE(bfin_i2c_board_info1));
 
-#if defined(CONFIG_STMMAC_ETH) || defined(CONFIG_STMMAC_ETH_MODULE)
-	if (!peripheral_request_list(pins, "emac0"))
-		printk(KERN_ERR "%s(): request emac pins failed\n", __func__);
-#endif
-
 	platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices));
 
 	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
@@ -1713,18 +2119,6 @@
 	&bfin_uart1_device,
 #endif
 #endif
-
-#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
-#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
-	&bfin_sport0_uart_device,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
-	&bfin_sport1_uart_device,
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
-	&bfin_sport2_uart_device,
-#endif
-#endif
 };
 
 void __init native_machine_early_platform_add_devices(void)
diff --git a/arch/blackfin/mach-bf609/include/mach/gpio.h b/arch/blackfin/mach-bf609/include/mach/gpio.h
index c32c8cc..0718251 100644
--- a/arch/blackfin/mach-bf609/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf609/include/mach/gpio.h
@@ -152,14 +152,6 @@
 	unsigned long revid;
 };
 
-struct gpio_port_s {
-	unsigned short fer;
-	unsigned short data;
-	unsigned short dir;
-	unsigned short inen;
-	unsigned int mux;
-};
-
 #endif
 
 #include <mach-common/ports-a.h>
diff --git a/arch/blackfin/mach-bf609/include/mach/irq.h b/arch/blackfin/mach-bf609/include/mach/irq.h
index fa0843d..d1cb6a8 100644
--- a/arch/blackfin/mach-bf609/include/mach/irq.h
+++ b/arch/blackfin/mach-bf609/include/mach/irq.h
@@ -298,7 +298,7 @@
 extern u8 sec_int_priority[];
 
 /*
- * bfin pint registers layout
+ * gpio pint registers layout
  */
 struct bfin_pint_regs {
 	u32 mask_set;
diff --git a/arch/blackfin/mach-bf609/include/mach/portmux.h b/arch/blackfin/mach-bf609/include/mach/portmux.h
index fe34191..c48bb71 100644
--- a/arch/blackfin/mach-bf609/include/mach/portmux.h
+++ b/arch/blackfin/mach-bf609/include/mach/portmux.h
@@ -19,6 +19,7 @@
 #define P_MII0_CRS	(P_DEFINED | P_IDENT(GPIO_PC5) | P_FUNCT(0))
 #define P_MII0_ERxER	(P_DEFINED | P_IDENT(GPIO_PC4) | P_FUNCT(0))
 #define P_MII0_TxCLK	(P_DEFINED | P_IDENT(GPIO_PB14) | P_FUNCT(0))
+#define P_MII0_PTPPPS	(P_DEFINED | P_IDENT(GPIO_PB15) | P_FUNCT(0))
 
 #define P_RMII0 {\
 	P_MII0_ETxD0, \
@@ -30,6 +31,7 @@
 	P_MII0_TxCLK, \
 	P_MII0_PHYINT, \
 	P_MII0_CRS, \
+	P_MII0_PTPPPS, \
 	P_MII0_MDC, \
 	P_MII0_MDIO, 0}
 
@@ -44,6 +46,7 @@
 #define P_MII1_CRS	(P_DEFINED | P_IDENT(GPIO_PE13) | P_FUNCT(0))
 #define P_MII1_ERxER	(P_DEFINED | P_IDENT(GPIO_PE14) | P_FUNCT(0))
 #define P_MII1_TxCLK	(P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(0))
+#define P_MII1_PTPPPS	(P_DEFINED | P_IDENT(GPIO_PC9) | P_FUNCT(0))
 
 #define P_RMII1 {\
 	P_MII1_ETxD0, \
@@ -55,6 +58,7 @@
 	P_MII1_TxCLK, \
 	P_MII1_PHYINT, \
 	P_MII1_CRS, \
+	P_MII1_PTPPPS, \
 	P_MII1_MDC, \
 	P_MII1_MDIO, 0}
 
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index d143fd8..ca75613 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -704,10 +704,9 @@
 	__irq_set_handler_locked(irq, handle);
 }
 
-static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
-extern void bfin_gpio_irq_prepare(unsigned gpio);
+#ifdef CONFIG_GPIO_ADI
 
-#if !BFIN_GPIO_PINT
+static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
 
 static void bfin_gpio_ack_irq(struct irq_data *d)
 {
@@ -821,15 +820,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
-{
-	return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
-}
-#else
-# define bfin_gpio_set_wake NULL
-#endif
-
 static void bfin_demux_gpio_block(unsigned int irq)
 {
 	unsigned int gpio, mask;
@@ -896,279 +886,40 @@
 	bfin_demux_gpio_block(irq);
 }
 
-#else
-
-#define NR_PINT_BITS		32
-#define IRQ_NOT_AVAIL		0xFF
-
-#define PINT_2_BANK(x)		((x) >> 5)
-#define PINT_2_BIT(x)		((x) & 0x1F)
-#define PINT_BIT(x)		(1 << (PINT_2_BIT(x)))
-
-static unsigned char irq2pint_lut[NR_PINTS];
-static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
-
-static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
-	(struct bfin_pint_regs *)PINT0_MASK_SET,
-	(struct bfin_pint_regs *)PINT1_MASK_SET,
-	(struct bfin_pint_regs *)PINT2_MASK_SET,
-	(struct bfin_pint_regs *)PINT3_MASK_SET,
-#ifdef CONFIG_BF60x
-	(struct bfin_pint_regs *)PINT4_MASK_SET,
-	(struct bfin_pint_regs *)PINT5_MASK_SET,
-#endif
-};
-
-inline unsigned int get_irq_base(u32 bank, u8 bmap)
-{
-	unsigned int irq_base;
-
-#ifndef CONFIG_BF60x
-	if (bank < 2) {		/*PA-PB */
-		irq_base = IRQ_PA0 + bmap * 16;
-	} else {		/*PC-PJ */
-		irq_base = IRQ_PC0 + bmap * 16;
-	}
-#else
-	irq_base = IRQ_PA0 + bank * 16 + bmap * 16;
-#endif
-	return irq_base;
-}
-
-	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
-void init_pint_lut(void)
-{
-	u16 bank, bit, irq_base, bit_pos;
-	u32 pint_assign;
-	u8 bmap;
-
-	memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
-
-	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
-
-		pint_assign = pint[bank]->assign;
-
-		for (bit = 0; bit < NR_PINT_BITS; bit++) {
-
-			bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
-
-			irq_base = get_irq_base(bank, bmap);
-
-			irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
-			bit_pos = bit + bank * NR_PINT_BITS;
-
-			pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
-			irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
-		}
-	}
-}
-
-static void bfin_gpio_ack_irq(struct irq_data *d)
-{
-	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
-	u32 pintbit = PINT_BIT(pint_val);
-	u32 bank = PINT_2_BANK(pint_val);
-
-	if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
-		if (pint[bank]->invert_set & pintbit)
-			pint[bank]->invert_clear = pintbit;
-		else
-			pint[bank]->invert_set = pintbit;
-	}
-	pint[bank]->request = pintbit;
-
-}
-
-static void bfin_gpio_mask_ack_irq(struct irq_data *d)
-{
-	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
-	u32 pintbit = PINT_BIT(pint_val);
-	u32 bank = PINT_2_BANK(pint_val);
-
-	if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
-		if (pint[bank]->invert_set & pintbit)
-			pint[bank]->invert_clear = pintbit;
-		else
-			pint[bank]->invert_set = pintbit;
-	}
-
-	pint[bank]->request = pintbit;
-	pint[bank]->mask_clear = pintbit;
-}
-
-static void bfin_gpio_mask_irq(struct irq_data *d)
-{
-	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
-
-	pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
-}
-
-static void bfin_gpio_unmask_irq(struct irq_data *d)
-{
-	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
-	u32 pintbit = PINT_BIT(pint_val);
-	u32 bank = PINT_2_BANK(pint_val);
-
-	pint[bank]->mask_set = pintbit;
-}
-
-static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
-{
-	unsigned int irq = d->irq;
-	u32 gpionr = irq_to_gpio(irq);
-	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
-
-	if (pint_val == IRQ_NOT_AVAIL) {
-		printk(KERN_ERR
-		"GPIO IRQ %d :Not in PINT Assign table "
-		"Reconfigure Interrupt to Port Assignemt\n", irq);
-		return -ENODEV;
-	}
-
-	if (__test_and_set_bit(gpionr, gpio_enabled))
-		bfin_gpio_irq_prepare(gpionr);
-
-	bfin_gpio_unmask_irq(d);
-
-	return 0;
-}
-
-static void bfin_gpio_irq_shutdown(struct irq_data *d)
-{
-	u32 gpionr = irq_to_gpio(d->irq);
-
-	bfin_gpio_mask_irq(d);
-	__clear_bit(gpionr, gpio_enabled);
-	bfin_gpio_irq_free(gpionr);
-}
-
-static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
-{
-	unsigned int irq = d->irq;
-	int ret;
-	char buf[16];
-	u32 gpionr = irq_to_gpio(irq);
-	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
-	u32 pintbit = PINT_BIT(pint_val);
-	u32 bank = PINT_2_BANK(pint_val);
-
-	if (pint_val == IRQ_NOT_AVAIL)
-		return -ENODEV;
-
-	if (type == IRQ_TYPE_PROBE) {
-		/* only probe unenabled GPIO interrupt lines */
-		if (test_bit(gpionr, gpio_enabled))
-			return 0;
-		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
-	}
-
-	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
-		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
-
-		snprintf(buf, 16, "gpio-irq%d", irq);
-		ret = bfin_gpio_irq_request(gpionr, buf);
-		if (ret)
-			return ret;
-
-		if (__test_and_set_bit(gpionr, gpio_enabled))
-			bfin_gpio_irq_prepare(gpionr);
-
-	} else {
-		__clear_bit(gpionr, gpio_enabled);
-		return 0;
-	}
-
-	if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
-		pint[bank]->invert_set = pintbit;	/* low or falling edge denoted by one */
-	else
-		pint[bank]->invert_clear = pintbit;	/* high or rising edge denoted by zero */
-
-	if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
-	    == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
-		if (gpio_get_value(gpionr))
-			pint[bank]->invert_set = pintbit;
-		else
-			pint[bank]->invert_clear = pintbit;
-	}
-
-	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
-		pint[bank]->edge_set = pintbit;
-		bfin_set_irq_handler(irq, handle_edge_irq);
-	} else {
-		pint[bank]->edge_clear = pintbit;
-		bfin_set_irq_handler(irq, handle_level_irq);
-	}
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
-static struct bfin_pm_pint_save save_pint_reg[NR_PINT_SYS_IRQS];
-static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
 
 static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
 {
-	u32 pint_irq;
-	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
-	u32 bank = PINT_2_BANK(pint_val);
+	return bfin_gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
+}
 
-	switch (bank) {
-	case 0:
-		pint_irq = IRQ_PINT0;
-		break;
-	case 2:
-		pint_irq = IRQ_PINT2;
-		break;
-	case 3:
-		pint_irq = IRQ_PINT3;
-		break;
-	case 1:
-		pint_irq = IRQ_PINT1;
-		break;
-#ifdef CONFIG_BF60x
-	case 4:
-		pint_irq = IRQ_PINT4;
-		break;
-	case 5:
-		pint_irq = IRQ_PINT5;
-		break;
-#endif
-	default:
-		return -EINVAL;
-	}
+#else
 
-#ifndef SEC_GCTL
-	bfin_internal_set_wake(pint_irq, state);
+# define bfin_gpio_set_wake NULL
+
 #endif
 
-	return 0;
-}
+static struct irq_chip bfin_gpio_irqchip = {
+	.name = "GPIO",
+	.irq_ack = bfin_gpio_ack_irq,
+	.irq_mask = bfin_gpio_mask_irq,
+	.irq_mask_ack = bfin_gpio_mask_ack_irq,
+	.irq_unmask = bfin_gpio_unmask_irq,
+	.irq_disable = bfin_gpio_mask_irq,
+	.irq_enable = bfin_gpio_unmask_irq,
+	.irq_set_type = bfin_gpio_irq_type,
+	.irq_startup = bfin_gpio_irq_startup,
+	.irq_shutdown = bfin_gpio_irq_shutdown,
+	.irq_set_wake = bfin_gpio_set_wake,
+};
 
-void bfin_pint_suspend(void)
-{
-	u32 bank;
+#endif
 
-	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
-		save_pint_reg[bank].mask_set = pint[bank]->mask_set;
-		save_pint_reg[bank].assign = pint[bank]->assign;
-		save_pint_reg[bank].edge_set = pint[bank]->edge_set;
-		save_pint_reg[bank].invert_set = pint[bank]->invert_set;
-	}
-}
-
-void bfin_pint_resume(void)
-{
-	u32 bank;
-
-	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
-		pint[bank]->mask_set = save_pint_reg[bank].mask_set;
-		pint[bank]->assign = save_pint_reg[bank].assign;
-		pint[bank]->edge_set = save_pint_reg[bank].edge_set;
-		pint[bank]->invert_set = save_pint_reg[bank].invert_set;
-	}
-}
+#ifdef CONFIG_PM
 
 #ifdef SEC_GCTL
+static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
+
 static int sec_suspend(void)
 {
 	u32 bank;
@@ -1195,91 +946,9 @@
 	.suspend = sec_suspend,
 	.resume = sec_resume,
 };
-
-#endif
-#else
-# define bfin_gpio_set_wake NULL
 #endif
 
-void bfin_demux_gpio_irq(unsigned int inta_irq,
-			struct irq_desc *desc)
-{
-	u32 bank, pint_val;
-	u32 request, irq;
-	u32 level_mask;
-	int umask = 0;
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-
-	if (chip->irq_mask_ack) {
-		chip->irq_mask_ack(&desc->irq_data);
-	} else {
-		chip->irq_mask(&desc->irq_data);
-		if (chip->irq_ack)
-			chip->irq_ack(&desc->irq_data);
-	}
-
-	switch (inta_irq) {
-	case IRQ_PINT0:
-		bank = 0;
-		break;
-	case IRQ_PINT2:
-		bank = 2;
-		break;
-	case IRQ_PINT3:
-		bank = 3;
-		break;
-	case IRQ_PINT1:
-		bank = 1;
-		break;
-#ifdef CONFIG_BF60x
-	case IRQ_PINT4:
-		bank = 4;
-		break;
-	case IRQ_PINT5:
-		bank = 5;
-		break;
 #endif
-	default:
-		return;
-	}
-
-	pint_val = bank * NR_PINT_BITS;
-
-	request = pint[bank]->request;
-
-	level_mask = pint[bank]->edge_set & request;
-
-	while (request) {
-		if (request & 1) {
-			irq = pint2irq_lut[pint_val] + SYS_IRQS;
-			if (level_mask & PINT_BIT(pint_val)) {
-				umask = 1;
-				chip->irq_unmask(&desc->irq_data);
-			}
-			bfin_handle_irq(irq);
-		}
-		pint_val++;
-		request >>= 1;
-	}
-
-	if (!umask)
-		chip->irq_unmask(&desc->irq_data);
-}
-#endif
-
-static struct irq_chip bfin_gpio_irqchip = {
-	.name = "GPIO",
-	.irq_ack = bfin_gpio_ack_irq,
-	.irq_mask = bfin_gpio_mask_irq,
-	.irq_mask_ack = bfin_gpio_mask_ack_irq,
-	.irq_unmask = bfin_gpio_unmask_irq,
-	.irq_disable = bfin_gpio_mask_irq,
-	.irq_enable = bfin_gpio_unmask_irq,
-	.irq_set_type = bfin_gpio_irq_type,
-	.irq_startup = bfin_gpio_irq_startup,
-	.irq_shutdown = bfin_gpio_irq_shutdown,
-	.irq_set_wake = bfin_gpio_set_wake,
-};
 
 void init_exception_vectors(void)
 {
@@ -1331,17 +1000,6 @@
 
 	local_irq_disable();
 
-#if BFIN_GPIO_PINT
-# ifdef CONFIG_PINTx_REASSIGN
-	pint[0]->assign = CONFIG_PINT0_ASSIGN;
-	pint[1]->assign = CONFIG_PINT1_ASSIGN;
-	pint[2]->assign = CONFIG_PINT2_ASSIGN;
-	pint[3]->assign = CONFIG_PINT3_ASSIGN;
-# endif
-	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
-	init_pint_lut();
-#endif
-
 	for (irq = 0; irq <= SYS_IRQS; irq++) {
 		if (irq <= IRQ_CORETMR)
 			irq_set_chip(irq, &bfin_core_irqchip);
@@ -1349,12 +1007,8 @@
 			irq_set_chip(irq, &bfin_internal_irqchip);
 
 		switch (irq) {
-#if BFIN_GPIO_PINT
-		case IRQ_PINT0:
-		case IRQ_PINT1:
-		case IRQ_PINT2:
-		case IRQ_PINT3:
-#elif defined(BF537_FAMILY)
+#if !BFIN_GPIO_PINT
+#if defined(BF537_FAMILY)
 		case IRQ_PH_INTA_MAC_RX:
 		case IRQ_PF_INTA_PG_INTA:
 #elif defined(BF533_FAMILY)
@@ -1372,6 +1026,7 @@
 #endif
 			irq_set_chained_handler(irq, bfin_demux_gpio_irq);
 			break;
+#endif
 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
 		case IRQ_MAC_ERROR:
 			irq_set_chained_handler(irq,
@@ -1419,10 +1074,12 @@
 					 handle_level_irq);
 #endif
 	/* if configured as edge, then will be changed to do_edge_IRQ */
+#ifdef CONFIG_GPIO_ADI
 	for (irq = GPIO_IRQ_BASE;
 		irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
 		irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
 					 handle_level_irq);
+#endif
 	bfin_write_IMASK(0);
 	CSYNC();
 	ilat = bfin_read_ILAT();
@@ -1525,19 +1182,6 @@
 
 	local_irq_disable();
 
-#if BFIN_GPIO_PINT
-# ifdef CONFIG_PINTx_REASSIGN
-	pint[0]->assign = CONFIG_PINT0_ASSIGN;
-	pint[1]->assign = CONFIG_PINT1_ASSIGN;
-	pint[2]->assign = CONFIG_PINT2_ASSIGN;
-	pint[3]->assign = CONFIG_PINT3_ASSIGN;
-	pint[4]->assign = CONFIG_PINT4_ASSIGN;
-	pint[5]->assign = CONFIG_PINT5_ASSIGN;
-# endif
-	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
-	init_pint_lut();
-#endif
-
 	for (irq = 0; irq <= SYS_IRQS; irq++) {
 		if (irq <= IRQ_CORETMR) {
 			irq_set_chip_and_handler(irq, &bfin_core_irqchip,
@@ -1546,9 +1190,6 @@
 			if (irq == IRQ_CORETMR)
 				irq_set_handler(irq, handle_percpu_irq);
 #endif
-		} else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
-			irq_set_chip(irq, &bfin_sec_irqchip);
-			irq_set_chained_handler(irq, bfin_demux_gpio_irq);
 		} else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
 			irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
 				handle_percpu_irq);
@@ -1563,10 +1204,6 @@
 			__irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
 		}
 	}
-	for (irq = GPIO_IRQ_BASE;
-		irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
-		irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
-					handle_level_irq);
 
 	bfin_write_IMASK(0);
 	CSYNC();
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 87bfe54..1387a94 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -27,7 +27,7 @@
 
 void bfin_pm_suspend_standby_enter(void)
 {
-#ifndef CONFIG_BF60x
+#if !BFIN_GPIO_PINT
 	bfin_pm_standby_setup();
 #endif
 
@@ -41,7 +41,7 @@
 # endif
 #endif
 
-#ifndef CONFIG_BF60x
+#if !BFIN_GPIO_PINT
 	bfin_pm_standby_restore();
 #endif
 
@@ -128,6 +128,7 @@
 					if ((status & 0x3) != 0x3)
 						continue;
 
+
 					/* construct the address using the tag */
 					addr = (status & 0xFFFFC800) | (subbank << 12) | (set << 5);
 
@@ -140,11 +141,14 @@
 
 int bfin_pm_suspend_mem_enter(void)
 {
-	int wakeup, ret;
+	int ret;
+#ifndef CONFIG_BF60x
+	int wakeup;
+#endif
 
 	unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
 					 + L1_DATA_B_LENGTH + L1_SCRATCH_LENGTH,
-					  GFP_KERNEL);
+					  GFP_ATOMIC);
 
 	if (memptr == NULL) {
 		panic("bf53x_suspend_l1_mem malloc failed");
@@ -170,10 +174,8 @@
 		return ret;
 	}
 
+#ifdef CONFIG_GPIO_ADI
 	bfin_gpio_pm_hibernate_suspend();
-
-#if BFIN_GPIO_PINT
-	bfin_pint_suspend();
 #endif
 
 #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
@@ -194,11 +196,9 @@
 	_enable_icplb();
 	_enable_dcplb();
 
-#if BFIN_GPIO_PINT
-	bfin_pint_resume();
-#endif
-
+#ifdef CONFIG_GPIO_ADI
 	bfin_gpio_pm_hibernate_restore();
+#endif
 	blackfin_dma_resume();
 
 	kfree(memptr);
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 82f301c..2bbae07 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -146,6 +146,7 @@
 
 	platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 
+	smp_rmb();
 	bfin_ipi_data = &__get_cpu_var(bfin_ipi);
 	while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) {
 		msg = 0;
@@ -161,18 +162,20 @@
 			case BFIN_IPI_CALL_FUNC:
 				generic_smp_call_function_interrupt();
 				break;
-
 			case BFIN_IPI_CALL_FUNC_SINGLE:
 				generic_smp_call_function_single_interrupt();
 				break;
-
 			case BFIN_IPI_CPU_STOP:
 				ipi_cpu_stop(cpu);
 				break;
+			default:
+				goto out;
 			}
 			atomic_dec(&bfin_ipi_data->count);
 		} while (msg < BITS_PER_LONG);
+
 	}
+out:
 	return IRQ_HANDLED;
 }
 
@@ -198,10 +201,11 @@
 		bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
 		atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
 		atomic_inc(&bfin_ipi_data->count);
-		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
 	}
-
 	local_irq_restore(flags);
+	smp_wmb();
+	for_each_cpu(cpu, cpumask)
+		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
 }
 
 void arch_send_call_function_single_ipi(int cpu)
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 957dd00..77ea09b 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -36,9 +36,6 @@
 config GENERIC_BUG
 	def_bool y
 
-config COMMON_CLKDEV
-	def_bool y
-
 config C6X_BIG_KERNEL
 	bool "Build a big kernel"
 	help
@@ -105,10 +102,6 @@
 
 source "arch/c6x/platforms/Kconfig"
 
-config TMS320C6X_CACHES_ON
-	bool "L2 cache support"
-	default y
-
 config KERNEL_RAM_BASE_ADDRESS
 	hex "Virtual address of memory base"
 	default 0xe0000000 if SOC_TMS320C6455
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 02380be..9c957c8 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -130,13 +130,11 @@
 
 config ETRAXFS
 	bool "ETRAX-FS-V32"
-	select CPU_FREQ_TABLE if CPU_FREQ
 	help
 	  Support CRIS V32.
 
 config CRIS_MACH_ARTPEC3
         bool "ARTPEC-3"
-	select CPU_FREQ_TABLE if CPU_FREQ
         help
           Support Axis ARTPEC-3.
 
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
index 5d3047e..4353cf2 100644
--- a/arch/cris/include/asm/io.h
+++ b/arch/cris/include/asm/io.h
@@ -3,6 +3,7 @@
 
 #include <asm/page.h>   /* for __va, __pa */
 #include <arch/io.h>
+#include <asm-generic/iomap.h>
 #include <linux/kernel.h>
 
 struct cris_io_operations
diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h
index 146da90..f666734 100644
--- a/arch/cris/include/asm/pci.h
+++ b/arch/cris/include/asm/pci.h
@@ -11,7 +11,6 @@
 
 #define pcibios_assign_all_busses(void) 1
 
-extern unsigned long pci_mem_start;
 #define PCIBIOS_MIN_IO		0x1000
 #define PCIBIOS_MIN_MEM		0x10000000
 
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h
index 6da975d..235ece4 100644
--- a/arch/cris/include/asm/pgalloc.h
+++ b/arch/cris/include/asm/pgalloc.h
@@ -32,7 +32,12 @@
 {
 	struct page *pte;
 	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-	pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
index eb723e5..13829aa 100644
--- a/arch/cris/include/uapi/asm/socket.h
+++ b/arch/cris/include/uapi/asm/socket.h
@@ -78,6 +78,8 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* _ASM_SOCKET_H */
 
 
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index f0cb1c3..5d42997 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -76,5 +76,7 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* _ASM_SOCKET_H */
 
diff --git a/arch/frv/mb93090-mb00/pci-frv.h b/arch/frv/mb93090-mb00/pci-frv.h
index 76c4e73..a7e487fe 100644
--- a/arch/frv/mb93090-mb00/pci-frv.h
+++ b/arch/frv/mb93090-mb00/pci-frv.h
@@ -30,7 +30,6 @@
 
 /* pci-vdk.c */
 
-extern int __nongpreldata pcibios_last_bus;
 extern struct pci_ops *__nongpreldata pci_root_ops;
 
 /* pci-irq.c */
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index deb6784..efa5d65 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -25,7 +25,6 @@
 
 unsigned int __nongpreldata pci_probe = 1;
 
-int  __nongpreldata pcibios_last_bus = -1;
 struct pci_ops *__nongpreldata pci_root_ops;
 
 /*
@@ -220,37 +219,6 @@
 }
 
 /*
- * Discover remaining PCI buses in case there are peer host bridges.
- * We use the number of last PCI bus provided by the PCI BIOS.
- */
-static void __init pcibios_fixup_peer_bridges(void)
-{
-	struct pci_bus bus;
-	struct pci_dev dev;
-	int n;
-	u16 l;
-
-	if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
-		return;
-	printk("PCI: Peer bridge fixup\n");
-	for (n=0; n <= pcibios_last_bus; n++) {
-		if (pci_find_bus(0, n))
-			continue;
-		bus.number = n;
-		bus.ops = pci_root_ops;
-		dev.bus = &bus;
-		for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
-			if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
-			    l != 0x0000 && l != 0xffff) {
-				printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
-				printk("PCI: Discovered peer bus %02x\n", n);
-				pci_scan_bus(n, pci_root_ops, NULL);
-				break;
-			}
-	}
-}
-
-/*
  * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
  */
 
@@ -418,7 +386,6 @@
 	pci_scan_root_bus(NULL, 0, pci_root_ops, NULL, &resources);
 
 	pcibios_irq_init();
-	pcibios_fixup_peer_bridges();
 	pcibios_fixup_irqs();
 	pcibios_resource_survey();
 
@@ -432,9 +399,6 @@
 	if (!strcmp(str, "off")) {
 		pci_probe = 0;
 		return NULL;
-	} else if (!strncmp(str, "lastbus=", 8)) {
-		pcibios_last_bus = simple_strtol(str+8, NULL, 0);
-		return NULL;
 	}
 	return str;
 }
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index f6084bc..41907d2 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -37,11 +37,15 @@
 #else
 	page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
 #endif
-	if (page) {
-		clear_highpage(page);
-		pgtable_page_ctor(page);
-		flush_dcache_page(page);
+	if (!page)
+		return NULL;
+
+	clear_highpage(page);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
 	}
+	flush_dcache_page(page);
 	return page;
 }
 
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 99041b0..09df260 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,7 +4,6 @@
 config HEXAGON
 	def_bool y
 	select HAVE_OPROFILE
-	select USE_GENERIC_SMP_HELPERS if SMP
 	# Other pending projects/to-do items.
 	# select HAVE_REGS_AND_STACK_ACCESS_API
 	# select HAVE_HW_BREAKPOINT if PERF_EVENTS
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 679bf6d..4c9d382 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -65,10 +65,12 @@
 	struct page *pte;
 
 	pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
-
-	if (pte)
-		pgtable_page_ctor(pte);
-
+	if (!pte)
+		return NULL;
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 7740ab1..4e4119b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -6,6 +6,7 @@
 
 config IA64
 	bool
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select PCI if (!IA64_HP_SIM)
 	select ACPI if (!IA64_HP_SIM)
 	select PM if (!IA64_HP_SIM)
@@ -343,7 +344,6 @@
 
 config SMP
 	bool "Symmetric multi-processing support"
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, say N.  If you have a system with more
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 989dd3f..db95f57 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -234,10 +234,6 @@
 #define KVM_REQ_PTC_G		32
 #define KVM_REQ_RESUME		33
 
-#define KVM_HPAGE_GFN_SHIFT(x)	0
-#define KVM_NR_PAGE_SIZES	1
-#define KVM_PAGES_PER_HPAGE(x)	1
-
 struct kvm;
 struct kvm_vcpu;
 
@@ -480,7 +476,7 @@
 
 	struct list_head assigned_dev_head;
 	struct iommu_domain *iommu_domain;
-	int iommu_flags;
+	bool iommu_noncoherent;
 
 	unsigned long irq_sources_bitmap;
 	unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index 96a8d92..5767cdf 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -91,7 +91,10 @@
 	if (!pg)
 		return NULL;
 	page = virt_to_page(pg);
-	pgtable_page_ctor(page);
+	if (!pgtable_page_ctor(page)) {
+		quicklist_free(0, NULL, pg);
+		return NULL;
+	}
 	return page;
 }
 
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index e0a899a..5a84b3a 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -319,7 +319,7 @@
 	regs->loadrs = 0;									\
 	regs->r8 = get_dumpable(current->mm);	/* set "don't zap registers" flag */		\
 	regs->r12 = new_sp - 16;	/* allocate 16 byte scratch area */			\
-	if (unlikely(!get_dumpable(current->mm))) {							\
+	if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) {	\
 		/*										\
 		 * Zap scratch regs to avoid leaking bits between processes with different	\
 		 * uid/privileges.								\
diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h
new file mode 100644
index 0000000..96e42f9
--- /dev/null
+++ b/arch/ia64/include/asm/xen/page-coherent.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
+#define _ASM_IA64_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+		dma_addr_t *dma_handle, gfp_t flags,
+		struct dma_attrs *attrs)
+{
+	void *vstart = (void*)__get_free_pages(flags, get_order(size));
+	*dma_handle = virt_to_phys(vstart);
+	return vstart;
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+		void *cpu_addr, dma_addr_t dma_handle,
+		struct dma_attrs *attrs)
+{
+	free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs) { }
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs) { }
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 556d070..c25302f 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -85,4 +85,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 5eb71d2..59d52e3 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -882,40 +882,10 @@
 		set_cpu_possible(i, true);
 }
 
-static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *obj;
-	struct acpi_madt_local_sapic *lsapic;
 	cpumask_t tmp_map;
-	int cpu, physid;
-
-	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
-		return -EINVAL;
-
-	if (!buffer.length || !buffer.pointer)
-		return -EINVAL;
-
-	obj = buffer.pointer;
-	if (obj->type != ACPI_TYPE_BUFFER)
-	{
-		kfree(buffer.pointer);
-		return -EINVAL;
-	}
-
-	lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
-
-	if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
-	    (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
-		kfree(buffer.pointer);
-		return -EINVAL;
-	}
-
-	physid = ((lsapic->id << 8) | (lsapic->eid));
-
-	kfree(buffer.pointer);
-	buffer.length = ACPI_ALLOCATE_BUFFER;
-	buffer.pointer = NULL;
+	int cpu;
 
 	cpumask_complement(&tmp_map, cpu_present_mask);
 	cpu = cpumask_first(&tmp_map);
@@ -934,9 +904,9 @@
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
+int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-	return _acpi_map_lsapic(handle, pcpu);
+	return _acpi_map_lsapic(handle, physid, pcpu);
 }
 EXPORT_SYMBOL(acpi_map_lsapic);
 
diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c
index bac1639..04bc8fd 100644
--- a/arch/ia64/kernel/elfcore.c
+++ b/arch/ia64/kernel/elfcore.c
@@ -11,8 +11,7 @@
 	return GATE_EHDR->e_phnum;
 }
 
-int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-			       unsigned long limit)
+int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
 	const struct elf_phdr *const gate_phdrs =
 		(const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
@@ -35,15 +34,13 @@
 			phdr.p_offset += ofs;
 		}
 		phdr.p_paddr = 0; /* match other core phdrs */
-		*size += sizeof(phdr);
-		if (*size > limit || !dump_write(file, &phdr, sizeof(phdr)))
+		if (!dump_emit(cprm, &phdr, sizeof(phdr)))
 			return 0;
 	}
 	return 1;
 }
 
-int elf_core_write_extra_data(struct file *file, size_t *size,
-			      unsigned long limit)
+int elf_core_write_extra_data(struct coredump_params *cprm)
 {
 	const struct elf_phdr *const gate_phdrs =
 		(const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
@@ -54,8 +51,7 @@
 			void *addr = (void *)gate_phdrs[i].p_vaddr;
 			size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz);
 
-			*size += memsz;
-			if (*size > limit || !dump_write(file, addr, memsz))
+			if (!dump_emit(cprm, addr, memsz))
 				return 0;
 			break;
 		}
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f8280a7..074fde4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -947,7 +947,7 @@
 	case KPROBE_HIT_SSDONE:
 		/*
 		 * We increment the nmissed count for accounting,
-		 * we can also use npre/npostfault count for accouting
+		 * we can also use npre/npostfault count for accounting
 		 * these specific fault cases.
 		 */
 		kprobes_inc_nmissed_count(cur);
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 3637e03..33cab9a 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -105,7 +105,7 @@
 }
 
 int
-copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from)
+copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from)
 {
 	if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t)))
 		return -EFAULT;
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index bdfd878..985bf80 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1550,12 +1550,13 @@
 	return VM_FAULT_SIGBUS;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 			   struct kvm_memory_slot *dont)
 {
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			    unsigned long npages)
 {
 	return 0;
 }
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b6f7f43..88504ab 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -357,9 +357,7 @@
 
 	end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
 	end_address = PAGE_ALIGN(end_address);
-
-	stop_address = (unsigned long) &vmem_map[
-		pgdat->node_start_pfn + pgdat->node_spanned_pages];
+	stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
 
 	do {
 		pgd_t *pgd;
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 75661fb..09ef94a 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -275,7 +275,6 @@
 
 config SMP
 	bool "Symmetric multi-processing support"
-	select USE_GENERIC_SMP_HELPERS
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index a979a41..9fc78fc 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -45,7 +45,7 @@
 		   Flush all TLB and start new cycle. */
 		local_flush_tlb_all();
 		/* Fix version if needed.
-		   Note that we avoid version #0 to distingush NO_CONTEXT. */
+		   Note that we avoid version #0 to distinguish NO_CONTEXT. */
 		if (!mc)
 			mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
 	}
diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h
index 0fc7361..2d55a06 100644
--- a/arch/m32r/include/asm/pgalloc.h
+++ b/arch/m32r/include/asm/pgalloc.h
@@ -43,7 +43,12 @@
 {
 	struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
 
-	pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 24be7c8..5296665 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 311a300..75f25a8 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -1,6 +1,7 @@
 config M68K
 	bool
 	default y
+	select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
 	select HAVE_IDE
 	select HAVE_AOUT if MMU
 	select HAVE_DEBUG_BUGVERBOSE
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 313f3dd..f9924fb 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -56,6 +56,10 @@
 
 	if (!page)
 		return NULL;
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 
 	pte = kmap(page);
 	if (pte) {
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 2f02f26..24bcba4 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -29,18 +29,22 @@
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-	struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+	struct page *page;
 	pte_t *pte;
 
+	page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
 	if(!page)
 		return NULL;
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 
 	pte = kmap(page);
 	__flush_page_to_ram(pte);
 	flush_tlb_kernel_page(pte);
 	nocache_page(pte);
 	kunmap(page);
-	pgtable_page_ctor(page);
 	return page;
 }
 
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 48d80d5..f868506 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -59,7 +59,10 @@
 		return NULL;
 
 	clear_highpage(page);
-	pgtable_page_ctor(page);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 	return page;
 
 }
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index 36368eb..e56abd2 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -111,7 +111,6 @@
 config SMP
 	bool "Symmetric multi-processing support"
 	depends on METAG_META21 && METAG_META21_MMU
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one thread running
 	  Linux. If you have a system with only one thread running Linux,
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
index 275d928..3104df0 100644
--- a/arch/metag/include/asm/pgalloc.h
+++ b/arch/metag/include/asm/pgalloc.h
@@ -52,8 +52,12 @@
 {
 	struct page *pte;
 	pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
-	if (pte)
-		pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
index 8c00ded..db589ad 100644
--- a/arch/metag/kernel/dma.c
+++ b/arch/metag/kernel/dma.c
@@ -305,9 +305,7 @@
 
 			if (pfn_valid(pfn)) {
 				struct page *page = pfn_to_page(pfn);
-				ClearPageReserved(page);
-
-				__free_page(page);
+				__free_reserved_page(page);
 				continue;
 			}
 		}
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 249fff6..3cd6288f 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -148,7 +148,7 @@
 	if (!p->node_spanned_pages)
 		return;
 
-	end_pfn = p->node_start_pfn + p->node_spanned_pages;
+	end_pfn = pgdat_end_pfn(p);
 #ifdef CONFIG_HIGHMEM
 	if (end_pfn > max_low_pfn)
 		end_pfn = max_low_pfn;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 655e1ca..e23cccd 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,5 +1,6 @@
 config MICROBLAZE
 	def_bool y
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_FUNCTION_TRACER
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index ebd3579..7fdf7fa 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -122,8 +122,13 @@
 #endif
 
 	ptepage = alloc_pages(flags, 0);
-	if (ptepage)
-		clear_highpage(ptepage);
+	if (!ptepage)
+		return NULL;
+	clear_highpage(ptepage);
+	if (!pgtable_page_ctor(ptepage)) {
+		__free_page(ptepage);
+		return NULL;
+	}
 	return ptepage;
 }
 
@@ -158,8 +163,9 @@
 	__free_page(ptepage);
 }
 
-extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
 {
+	pgtable_page_dtor(ptepage);
 	__free_page(ptepage);
 }
 
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index 5226b09..dbbf224 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -176,8 +176,7 @@
 	page = virt_to_page(vaddr);
 
 	do {
-		ClearPageReserved(page);
-		__free_page(page);
+		__free_reserved_page(page);
 		page++;
 	} while (size -= PAGE_SIZE);
 #else
@@ -194,9 +193,7 @@
 			pte_clear(&init_mm, (unsigned int)vaddr, ptep);
 			if (pfn_valid(pfn)) {
 				page = pfn_to_page(pfn);
-
-				ClearPageReserved(page);
-				__free_page(page);
+				__free_reserved_page(page);
 			}
 		}
 		vaddr += PAGE_SIZE;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 17cc7ff..650de39 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1,6 +1,7 @@
 config MIPS
 	bool
 	default y
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_IDE
@@ -2125,7 +2126,6 @@
 config SMP
 	bool "Multi-Processing support"
 	depends on SYS_SUPPORTS_SMP
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 4d6fa0b..3296696 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -27,13 +27,6 @@
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
-/* Don't support huge pages */
-#define KVM_HPAGE_GFN_SHIFT(x)	0
-
-/* We don't currently support large pages. */
-#define KVM_NR_PAGE_SIZES	1
-#define KVM_PAGES_PER_HPAGE(x)	1
-
 
 
 /* Special address that contains the comm page, used for reducing # of traps */
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
index a76fe5a..df69bfd 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -192,13 +192,13 @@
 	/* Number of packets processed by PIP */
 	uint32_t packets;
 	/*
-	 * Number of indentified L2 multicast packets.	Does not
+	 * Number of identified L2 multicast packets.	Does not
 	 * include broadcast packets.  Only includes packets whose
 	 * parse mode is SKIP_TO_L2
 	 */
 	uint32_t multicast_packets;
 	/*
-	 * Number of indentified L2 broadcast packets.	Does not
+	 * Number of identified L2 broadcast packets.	Does not
 	 * include multicast packets.  Only includes packets whose
 	 * parse mode is SKIP_TO_L2
 	 */
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 881d18b4..b336037 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -80,9 +80,12 @@
 	struct page *pte;
 
 	pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
-	if (pte) {
-		clear_highpage(pte);
-		pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	clear_highpage(pte);
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
 	}
 	return pte;
 }
diff --git a/arch/mips/include/uapi/asm/errno.h b/arch/mips/include/uapi/asm/errno.h
index 31575e2f..02d645d 100644
--- a/arch/mips/include/uapi/asm/errno.h
+++ b/arch/mips/include/uapi/asm/errno.h
@@ -102,7 +102,7 @@
 #define EWOULDBLOCK	EAGAIN	/* Operation would block */
 #define EALREADY	149	/* Operation already in progress */
 #define EINPROGRESS	150	/* Operation now in progress */
-#define ESTALE		151	/* Stale NFS file handle */
+#define ESTALE		151	/* Stale file handle */
 #define ECANCELED	158	/* AIO operation canceled */
 
 /*
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 61c01f0..0df9787 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -94,4 +94,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 57de8b7..1905a41 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -314,7 +314,7 @@
 	return ret;
 }
 
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 {
 	int err;
 
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index a7b0445..73b3482 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -198,12 +198,13 @@
 	return -ENOIOCTLCMD;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 			   struct kvm_memory_slot *dont)
 {
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			    unsigned long npages)
 {
 	return 0;
 }
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 6aaa160..8bde923 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -181,7 +181,6 @@
 config SMP
 	bool "Symmetric multi-processing support"
 	default y
-	select USE_GENERIC_SMP_HELPERS
 	depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
 	---help---
 	  This enables support for systems with more than one CPU. If you have
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index c67c2b5..75dbe69 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -71,7 +71,7 @@
 		local_flush_tlb_all();
 
 		/* fix the TLB version if needed (we avoid version #0 so as to
-		 * distingush MMU_NO_CONTEXT) */
+		 * distinguish MMU_NO_CONTEXT) */
 		if (!mc)
 			*pmc = mc = MMU_CONTEXT_FIRST_VERSION;
 	}
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h
index 6f31cc0..1663238 100644
--- a/arch/mn10300/include/asm/pci.h
+++ b/arch/mn10300/include/asm/pci.h
@@ -44,7 +44,6 @@
 #define pcibios_assign_all_busses()	0
 #endif
 
-extern unsigned long pci_mem_start;
 #define PCIBIOS_MIN_IO		0xBE000004
 #define PCIBIOS_MIN_MEM		0xB8000000
 
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index 146bacf..0f25d5f 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -46,6 +46,7 @@
 
 static inline void pte_free(struct mm_struct *mm, struct page *pte)
 {
+	pgtable_page_dtor(pte);
 	__free_page(pte);
 }
 
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index e2a2b203..71dedca 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index ebac9c1..2ad7f32 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -35,9 +35,6 @@
 
 struct mn10300_cpuinfo boot_cpu_data;
 
-/* For PCI or other memory-mapped resources */
-unsigned long pci_mem_start = 0x18000000;
-
 static char __initdata cmd_line[COMMAND_LINE_SIZE];
 char redboot_command_line[COMMAND_LINE_SIZE] =
 	"console=ttyS0,115200 root=/dev/mtdblock3 rw";
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index bd9ada6..e77a7c7 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -78,8 +78,13 @@
 #else
 	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
 #endif
-	if (pte)
-		clear_highpage(pte);
+	if (!pte)
+		return NULL;
+	clear_highpage(pte);
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h
index 7fa66a0..9e17aca 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.h
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.h
@@ -35,7 +35,6 @@
 
 /* pci.c */
 
-extern int pcibios_last_bus;
 extern struct pci_ops *pci_root_ops;
 
 extern struct irq_routing_table *pcibios_get_irq_routing_table(void);
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index e37fac0..6b4339f 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -24,7 +24,6 @@
 
 unsigned int pci_probe = 1;
 
-int pcibios_last_bus = -1;
 struct pci_ops *pci_root_ops;
 
 /*
@@ -392,10 +391,6 @@
 	if (!strcmp(str, "off")) {
 		pci_probe = 0;
 		return NULL;
-
-	} else if (!strncmp(str, "lastbus=", 8)) {
-		pcibios_last_bus = simple_strtol(str+8, NULL, 0);
-		return NULL;
 	}
 
 	return str;
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 4739b83..89076a6 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -24,7 +24,7 @@
 LDFLAGS_vmlinux :=
 LIBGCC 		:= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
-KBUILD_CFLAGS	+= -pipe -ffixed-r10
+KBUILD_CFLAGS	+= -pipe -ffixed-r10 -D__linux__
 
 ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y)
 	KBUILD_CFLAGS += $(call cc-option,-mhard-mul)
diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig
index ea172bd..42fe530 100644
--- a/arch/openrisc/configs/or1ksim_defconfig
+++ b/arch/openrisc/configs/or1ksim_defconfig
@@ -1,9 +1,9 @@
 CONFIG_CROSS_COMPILE="or32-linux-"
+CONFIG_NO_HZ=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_GZIP is not set
 CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
 # CONFIG_TIMERFD is not set
@@ -15,7 +15,6 @@
 CONFIG_MODULES=y
 # CONFIG_BLOCK is not set
 CONFIG_OPENRISC_BUILTIN_DTB="or1ksim"
-CONFIG_NO_HZ=y
 CONFIG_HZ_100=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -39,11 +38,8 @@
 # CONFIG_FW_LOADER is not set
 CONFIG_PROC_DEVICETREE=y
 CONFIG_NETDEVICES=y
-CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
 CONFIG_ETHOC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_MICREL_PHY=y
 # CONFIG_WLAN is not set
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
@@ -55,11 +51,9 @@
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 7840562..da1951a 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -65,6 +65,7 @@
 generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
+generic-y += vga.h
 generic-y += word-at-a-time.h
 generic-y += xor.h
 generic-y += preempt.h
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 05c39ec..21484e5b 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -78,8 +78,13 @@
 {
 	struct page *pte;
 	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
-	if (pte)
-		clear_page(page_address(pte));
+	if (!pte)
+		return NULL;
+	clear_page(page_address(pte));
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
@@ -90,6 +95,7 @@
 
 static inline void pte_free(struct mm_struct *mm, struct page *pte)
 {
+	pgtable_page_dtor(pte);
 	__free_page(pte);
 }
 
diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c
index 10ff50f..ef872ae 100644
--- a/arch/openrisc/kernel/module.c
+++ b/arch/openrisc/kernel/module.c
@@ -47,12 +47,10 @@
 			*location = value;
 			break;
 		case R_OR32_CONST:
-			location = (uint16_t *)location + 1;
-			*((uint16_t *)location) = (uint16_t) (value);
+			*((uint16_t *)location + 1) = value;
 			break;
 		case R_OR32_CONSTH:
-			location = (uint16_t *)location + 1;
-			*((uint16_t *)location) = (uint16_t) (value >> 16);
+			*((uint16_t *)location + 1) = value >> 16;
 			break;
 		case R_OR32_JUMPTARG:
 			value -= (uint32_t)location;
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index 09a769b..4fc7ccc 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -40,6 +40,7 @@
 #include <linux/device.h>
 #include <linux/of_platform.h>
 
+#include <asm/sections.h>
 #include <asm/segment.h>
 #include <asm/pgtable.h>
 #include <asm/types.h>
@@ -75,7 +76,7 @@
 
 	ram_start_pfn = PFN_UP(memory_start);
 	/* free_ram_start_pfn is first page after kernel */
-	free_ram_start_pfn = PFN_UP(__pa(&_end));
+	free_ram_start_pfn = PFN_UP(__pa(_end));
 	ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
 
 	max_pfn = ram_end_pfn;
@@ -207,15 +208,15 @@
  * Falls back on built-in device tree in case null pointer is passed.
  */
 
-void __init or32_early_setup(unsigned int fdt)
+void __init or32_early_setup(void *fdt)
 {
-	if (fdt) {
-		early_init_devtree((void*) fdt);
-		printk(KERN_INFO "FDT at 0x%08x\n", fdt);
-	} else {
-		early_init_devtree(__dtb_start);
-		printk(KERN_INFO "Compiled-in FDT at %p\n", __dtb_start);
+	if (fdt)
+		pr_info("FDT at %p\n", fdt);
+	else {
+		fdt = __dtb_start;
+		pr_info("Compiled-in FDT at %p\n", fdt);
 	}
+	early_init_devtree(fdt);
 }
 
 static int __init openrisc_device_probe(void)
@@ -288,10 +289,10 @@
 	setup_cpuinfo();
 
 	/* process 1's initial memory region is the kernel code/data */
-	init_mm.start_code = (unsigned long)&_stext;
-	init_mm.end_code = (unsigned long)&_etext;
-	init_mm.end_data = (unsigned long)&_edata;
-	init_mm.brk = (unsigned long)&_end;
+	init_mm.start_code = (unsigned long)_stext;
+	init_mm.end_code = (unsigned long)_etext;
+	init_mm.end_data = (unsigned long)_edata;
+	init_mm.brk = (unsigned long)_end;
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	initrd_start = (unsigned long)&__initrd_start;
diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h
index ee842a2..70b9ce4 100644
--- a/arch/openrisc/kernel/vmlinux.h
+++ b/arch/openrisc/kernel/vmlinux.h
@@ -1,10 +1,8 @@
 #ifndef __OPENRISC_VMLINUX_H_
 #define __OPENRISC_VMLINUX_H_
 
-extern char _stext, _etext, _edata, _end;
 #ifdef CONFIG_BLK_DEV_INITRD
 extern char __initrd_start, __initrd_end;
-extern char __initramfs_start;
 #endif
 
 extern u32 __dtb_start[];
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 7dcde53..b5f1858 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,6 +1,7 @@
 config PARISC
 	def_bool y
 	select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_FUNCTION_TRACER if 64BIT
@@ -226,7 +227,6 @@
 
 config SMP
 	bool "Symmetric multi-processing support"
-	select USE_GENERIC_SMP_HELPERS
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index fc987a1..f213f5b 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -121,8 +121,12 @@
 pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
 	struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-	if (page)
-		pgtable_page_ctor(page);
+	if (!page)
+		return NULL;
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 	return page;
 }
 
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index 135ad60..f3a8aa5 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -37,7 +37,7 @@
 #define	EBADMSG		67	/* Not a data message */
 #define	EUSERS		68	/* Too many users */
 #define	EDQUOT		69	/* Quota exceeded */
-#define	ESTALE		70	/* Stale NFS file handle */
+#define	ESTALE		70	/* Stale file handle */
 #define	EREMOTE		71	/* Object is remote */
 #define	EOVERFLOW	72	/* Value too large for defined data type */
 
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 71700e6..7c614d0 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -75,6 +75,8 @@
 
 #define SO_BUSY_POLL		0x4027
 
+#define SO_MAX_PACING_RATE	0x4048
+
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 2a625fb..50dfafc 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -219,7 +219,7 @@
 	 * init_data correctly */
 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
 				    GFP_KERNEL | __GFP_HIGHMEM,
-				    PAGE_KERNEL_RWX, -1,
+				    PAGE_KERNEL_RWX, NUMA_NO_NODE,
 				    __builtin_return_address(0));
 }
 
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 6c6a271..984abbe 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -319,7 +319,7 @@
 }
 
 int
-copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
+copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
 {
 	compat_uptr_t addr;
 	compat_int_t val;
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
index 72ab41a..af51d4c 100644
--- a/arch/parisc/kernel/signal32.h
+++ b/arch/parisc/kernel/signal32.h
@@ -34,7 +34,7 @@
 
 /* ELF32 signal handling */
 
-int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from);
+int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from);
 int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
 
 /* In a deft move of uber-hackery, we decide to carry the top half of all
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2f898d6..b44b52c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -85,6 +85,7 @@
 config PPC
 	bool
 	default y
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select BINFMT_ELF
 	select OF
 	select OF_EARLY_FLATTREE
@@ -106,7 +107,6 @@
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_API_DEBUG
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_OPROFILE
 	select HAVE_DEBUG_KMEMLEAK
 	select GENERIC_ATOMIC64 if PPC32
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 9b198d1..856f8de 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -77,4 +77,8 @@
 	return inst & 0xffff;
 }
 
+static inline unsigned int get_oc(u32 inst)
+{
+	return (inst >> 11) & 0x7fff;
+}
 #endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index cca12f0..894662a 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -198,12 +198,27 @@
 	cmpwi	r10,0;							\
 	bne	do_kvm_##n
 
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+/*
+ * If hv is possible, interrupts come into to the hv version
+ * of the kvmppc_interrupt code, which then jumps to the PR handler,
+ * kvmppc_interrupt_pr, if the guest is a PR guest.
+ */
+#define kvmppc_interrupt kvmppc_interrupt_hv
+#else
+#define kvmppc_interrupt kvmppc_interrupt_pr
+#endif
+
 #define __KVM_HANDLER(area, h, n)					\
 do_kvm_##n:								\
 	BEGIN_FTR_SECTION_NESTED(947)					\
 	ld	r10,area+EX_CFAR(r13);					\
 	std	r10,HSTATE_CFAR(r13);					\
 	END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947);		\
+	BEGIN_FTR_SECTION_NESTED(948)					\
+	ld	r10,area+EX_PPR(r13);					\
+	std	r10,HSTATE_PPR(r13);					\
+	END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948);	\
 	ld	r10,area+EX_R10(r13);					\
 	stw	r9,HSTATE_SCRATCH1(r13);				\
 	ld	r9,area+EX_R9(r13);					\
@@ -217,6 +232,10 @@
 	ld	r10,area+EX_R10(r13);					\
 	beq	89f;							\
 	stw	r9,HSTATE_SCRATCH1(r13);			\
+	BEGIN_FTR_SECTION_NESTED(948)					\
+	ld	r9,area+EX_PPR(r13);					\
+	std	r9,HSTATE_PPR(r13);					\
+	END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948);	\
 	ld	r9,area+EX_R9(r13);					\
 	std	r12,HSTATE_SCRATCH0(r13);			\
 	li	r12,n;							\
@@ -236,7 +255,7 @@
 #define KVM_HANDLER_SKIP(area, h, n)
 #endif
 
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 #define KVMTEST_PR(n)			__KVMTEST(n)
 #define KVM_HANDLER_PR(area, h, n)	__KVM_HANDLER(area, h, n)
 #define KVM_HANDLER_PR_SKIP(area, h, n)	__KVM_HANDLER_SKIP(area, h, n)
diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h
index b8a4b9b..f49ddb1 100644
--- a/arch/powerpc/include/asm/fsl_ifc.h
+++ b/arch/powerpc/include/asm/fsl_ifc.h
@@ -93,6 +93,7 @@
 #define CSOR_NAND_PGS_512		0x00000000
 #define CSOR_NAND_PGS_2K		0x00080000
 #define CSOR_NAND_PGS_4K		0x00100000
+#define CSOR_NAND_PGS_8K		0x00180000
 /* Spare region Size */
 #define CSOR_NAND_SPRZ_MASK		0x0000E000
 #define CSOR_NAND_SPRZ_SHIFT		13
@@ -102,6 +103,7 @@
 #define CSOR_NAND_SPRZ_210		0x00006000
 #define CSOR_NAND_SPRZ_218		0x00008000
 #define CSOR_NAND_SPRZ_224		0x0000A000
+#define CSOR_NAND_SPRZ_CSOR_EXT		0x0000C000
 /* Pages Per Block */
 #define CSOR_NAND_PB_MASK		0x00000700
 #define CSOR_NAND_PB_SHIFT		8
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 851bac7..1bd92fd 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -123,6 +123,8 @@
 #define BOOK3S_HFLAG_SLB			0x2
 #define BOOK3S_HFLAG_PAIRED_SINGLE		0x4
 #define BOOK3S_HFLAG_NATIVE_PS			0x8
+#define BOOK3S_HFLAG_MULTI_PGSIZE		0x10
+#define BOOK3S_HFLAG_NEW_TLBIE			0x20
 
 #define RESUME_FLAG_NV          (1<<0)  /* Reload guest nonvolatile state? */
 #define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
@@ -136,6 +138,8 @@
 #define KVM_GUEST_MODE_NONE	0
 #define KVM_GUEST_MODE_GUEST	1
 #define KVM_GUEST_MODE_SKIP	2
+#define KVM_GUEST_MODE_GUEST_HV	3
+#define KVM_GUEST_MODE_HOST_HV	4
 
 #define KVM_INST_FETCH_FAILED	-1
 
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fa19e2f..4a594b7 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -58,16 +58,18 @@
 	struct hlist_node list_pte_long;
 	struct hlist_node list_vpte;
 	struct hlist_node list_vpte_long;
+#ifdef CONFIG_PPC_BOOK3S_64
+	struct hlist_node list_vpte_64k;
+#endif
 	struct rcu_head rcu_head;
 	u64 host_vpn;
 	u64 pfn;
 	ulong slot;
 	struct kvmppc_pte pte;
+	int pagesize;
 };
 
 struct kvmppc_vcpu_book3s {
-	struct kvm_vcpu vcpu;
-	struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
 	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
 	struct {
 		u64 esid;
@@ -99,6 +101,9 @@
 	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
 	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
 	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+#ifdef CONFIG_PPC_BOOK3S_64
+	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
+#endif
 	int hpte_cache_count;
 	spinlock_t mmu_lock;
 };
@@ -107,8 +112,9 @@
 #define CONTEXT_GUEST		1
 #define CONTEXT_GUEST_END	2
 
-#define VSID_REAL	0x0fffffffffc00000ULL
-#define VSID_BAT	0x0fffffffffb00000ULL
+#define VSID_REAL	0x07ffffffffc00000ULL
+#define VSID_BAT	0x07ffffffffb00000ULL
+#define VSID_64K	0x0800000000000000ULL
 #define VSID_1T		0x1000000000000000ULL
 #define VSID_REAL_DR	0x2000000000000000ULL
 #define VSID_REAL_IR	0x4000000000000000ULL
@@ -118,11 +124,12 @@
 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
-extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
-extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
+extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
+			       bool iswrite);
+extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -134,6 +141,7 @@
 
 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -151,7 +159,8 @@
 			   bool upper, u32 val);
 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
-extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
+			bool *writable);
 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
 			unsigned long *rmap, long pte_index, int realmode);
 extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
@@ -172,6 +181,8 @@
 			unsigned long *hpret);
 extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
 			struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
+			unsigned long mask);
 
 extern void kvmppc_entry_trampoline(void);
 extern void kvmppc_hv_entry_trampoline(void);
@@ -184,11 +195,9 @@
 
 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
 {
-	return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
+	return vcpu->arch.book3s;
 }
 
-extern void kvm_return_point(void);
-
 /* Also add subarch specific defines */
 
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -198,203 +207,6 @@
 #include <asm/kvm_book3s_64.h>
 #endif
 
-#ifdef CONFIG_KVM_BOOK3S_PR
-
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
-{
-	return to_book3s(vcpu)->hior;
-}
-
-static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
-			unsigned long pending_now, unsigned long old_pending)
-{
-	if (pending_now)
-		vcpu->arch.shared->int_pending = 1;
-	else if (old_pending)
-		vcpu->arch.shared->int_pending = 0;
-}
-
-static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
-{
-	if ( num < 14 ) {
-		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-		svcpu->gpr[num] = val;
-		svcpu_put(svcpu);
-		to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
-	} else
-		vcpu->arch.gpr[num] = val;
-}
-
-static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
-{
-	if ( num < 14 ) {
-		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-		ulong r = svcpu->gpr[num];
-		svcpu_put(svcpu);
-		return r;
-	} else
-		return vcpu->arch.gpr[num];
-}
-
-static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	svcpu->cr = val;
-	svcpu_put(svcpu);
-	to_book3s(vcpu)->shadow_vcpu->cr = val;
-}
-
-static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	u32 r;
-	r = svcpu->cr;
-	svcpu_put(svcpu);
-	return r;
-}
-
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	svcpu->xer = val;
-	to_book3s(vcpu)->shadow_vcpu->xer = val;
-	svcpu_put(svcpu);
-}
-
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	u32 r;
-	r = svcpu->xer;
-	svcpu_put(svcpu);
-	return r;
-}
-
-static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	svcpu->ctr = val;
-	svcpu_put(svcpu);
-}
-
-static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	ulong r;
-	r = svcpu->ctr;
-	svcpu_put(svcpu);
-	return r;
-}
-
-static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	svcpu->lr = val;
-	svcpu_put(svcpu);
-}
-
-static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	ulong r;
-	r = svcpu->lr;
-	svcpu_put(svcpu);
-	return r;
-}
-
-static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	svcpu->pc = val;
-	svcpu_put(svcpu);
-}
-
-static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	ulong r;
-	r = svcpu->pc;
-	svcpu_put(svcpu);
-	return r;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-	ulong pc = kvmppc_get_pc(vcpu);
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	u32 r;
-
-	/* Load the instruction manually if it failed to do so in the
-	 * exit path */
-	if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
-		kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
-
-	r = svcpu->last_inst;
-	svcpu_put(svcpu);
-	return r;
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
-	ulong pc = kvmppc_get_pc(vcpu) - 4;
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	u32 r;
-
-	/* Load the instruction manually if it failed to do so in the
-	 * exit path */
-	if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
-		kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
-
-	r = svcpu->last_inst;
-	svcpu_put(svcpu);
-	return r;
-}
-
-static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
-{
-	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-	ulong r;
-	r = svcpu->fault_dar;
-	svcpu_put(svcpu);
-	return r;
-}
-
-static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
-{
-	ulong crit_raw = vcpu->arch.shared->critical;
-	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
-	bool crit;
-
-	/* Truncate crit indicators in 32 bit mode */
-	if (!(vcpu->arch.shared->msr & MSR_SF)) {
-		crit_raw &= 0xffffffff;
-		crit_r1 &= 0xffffffff;
-	}
-
-	/* Critical section when crit == r1 */
-	crit = (crit_raw == crit_r1);
-	/* ... and we're in supervisor mode */
-	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
-
-	return crit;
-}
-#else /* CONFIG_KVM_BOOK3S_PR */
-
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
-{
-	return 0;
-}
-
-static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
-			unsigned long pending_now, unsigned long old_pending)
-{
-}
-
 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
 {
 	vcpu->arch.gpr[num] = val;
@@ -489,12 +301,6 @@
 	return vcpu->arch.fault_dar;
 }
 
-static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
-{
-	return false;
-}
-#endif
-
 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
  * instruction for the OSI hypercalls */
 #define OSI_SC_MAGIC_R3			0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index ce0ef6c..c720e0b 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -22,7 +22,7 @@
 
 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
 {
-	return to_book3s(vcpu)->shadow_vcpu;
+	return vcpu->arch.shadow_vcpu;
 }
 
 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 86d638a..bf0fa8b0a 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,7 +20,7 @@
 #ifndef __ASM_KVM_BOOK3S_64_H__
 #define __ASM_KVM_BOOK3S_64_H__
 
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
 {
 	preempt_disable();
@@ -35,7 +35,7 @@
 
 #define SPAPR_TCE_SHIFT		12
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 #define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
 extern unsigned long kvm_rma_pages;
 #endif
@@ -278,7 +278,7 @@
 		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
 }
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 /*
  * Note modification of an HPTE; set the HPTE modified bit
  * if anyone is interested.
@@ -289,6 +289,6 @@
 	if (atomic_read(&kvm->arch.hpte_mod_interest))
 		rev->guest_rpte |= HPTE_GR_MODIFIED;
 }
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 
 #endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 9039d3c..0bd9348 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -83,7 +83,7 @@
 	u8 restore_hid5;
 	u8 napping;
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	u8 hwthread_req;
 	u8 hwthread_state;
 	u8 host_ipi;
@@ -101,6 +101,7 @@
 #endif
 #ifdef CONFIG_PPC_BOOK3S_64
 	u64 cfar;
+	u64 ppr;
 #endif
 };
 
@@ -108,14 +109,14 @@
 	ulong gpr[14];
 	u32 cr;
 	u32 xer;
-
-	u32 fault_dsisr;
-	u32 last_inst;
 	ulong ctr;
 	ulong lr;
 	ulong pc;
+
 	ulong shadow_srr1;
 	ulong fault_dar;
+	u32 fault_dsisr;
+	u32 last_inst;
 
 #ifdef CONFIG_PPC_BOOK3S_32
 	u32     sr[16];			/* Guest SRs */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d3c1eb3..dd8f615 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -26,7 +26,12 @@
 /* LPIDs we support with this build -- runtime limit may be lower */
 #define KVMPPC_NR_LPIDS                        64
 
-#define KVMPPC_INST_EHPRIV	0x7c00021c
+#define KVMPPC_INST_EHPRIV		0x7c00021c
+#define EHPRIV_OC_SHIFT			11
+/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
+#define EHPRIV_OC_DEBUG			1
+#define KVMPPC_INST_EHPRIV_DEBUG	(KVMPPC_INST_EHPRIV | \
+					 (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
 
 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
 {
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3328353..237d1d2 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -63,20 +63,17 @@
 
 #endif
 
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x)	0
-#define KVM_NR_PAGE_SIZES	1
-#define KVM_PAGES_PER_HPAGE(x)	(1UL<<31)
-
 #define HPTEG_CACHE_NUM			(1 << 15)
 #define HPTEG_HASH_BITS_PTE		13
 #define HPTEG_HASH_BITS_PTE_LONG	12
 #define HPTEG_HASH_BITS_VPTE		13
 #define HPTEG_HASH_BITS_VPTE_LONG	5
+#define HPTEG_HASH_BITS_VPTE_64K	11
 #define HPTEG_HASH_NUM_PTE		(1 << HPTEG_HASH_BITS_PTE)
 #define HPTEG_HASH_NUM_PTE_LONG		(1 << HPTEG_HASH_BITS_PTE_LONG)
 #define HPTEG_HASH_NUM_VPTE		(1 << HPTEG_HASH_BITS_VPTE)
 #define HPTEG_HASH_NUM_VPTE_LONG	(1 << HPTEG_HASH_BITS_VPTE_LONG)
+#define HPTEG_HASH_NUM_VPTE_64K		(1 << HPTEG_HASH_BITS_VPTE_64K)
 
 /* Physical Address Mask - allowed range of real mode RAM access */
 #define KVM_PAM			0x0fffffffffffffffULL
@@ -89,6 +86,9 @@
 struct slb_shadow;
 struct dtl_entry;
 
+struct kvmppc_vcpu_book3s;
+struct kvmppc_book3s_shadow_vcpu;
+
 struct kvm_vm_stat {
 	u32 remote_tlb_flush;
 };
@@ -224,15 +224,15 @@
 #define KVMPPC_GOT_PAGE		0x80
 
 struct kvm_arch_memory_slot {
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	unsigned long *rmap;
 	unsigned long *slot_phys;
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 };
 
 struct kvm_arch {
 	unsigned int lpid;
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	unsigned long hpt_virt;
 	struct revmap_entry *revmap;
 	unsigned int host_lpid;
@@ -256,7 +256,10 @@
 	cpumask_t need_tlb_flush;
 	struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
 	int hpt_cma_alloc;
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+	struct mutex hpt_mutex;
+#endif
 #ifdef CONFIG_PPC_BOOK3S_64
 	struct list_head spapr_tce_tables;
 	struct list_head rtas_tokens;
@@ -267,6 +270,7 @@
 #ifdef CONFIG_KVM_XICS
 	struct kvmppc_xics *xics;
 #endif
+	struct kvmppc_ops *kvm_ops;
 };
 
 /*
@@ -294,6 +298,10 @@
 	u64 stolen_tb;
 	u64 preempt_tb;
 	struct kvm_vcpu *runner;
+	u64 tb_offset;		/* guest timebase - host timebase */
+	ulong lpcr;
+	u32 arch_compat;
+	ulong pcr;
 };
 
 #define VCORE_ENTRY_COUNT(vc)	((vc)->entry_exit_count & 0xff)
@@ -328,6 +336,7 @@
 	bool may_read		: 1;
 	bool may_write		: 1;
 	bool may_execute	: 1;
+	u8 page_size;		/* MMU_PAGE_xxx */
 };
 
 struct kvmppc_mmu {
@@ -340,7 +349,8 @@
 	/* book3s */
 	void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
 	u32  (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
-	int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
+	int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
+		      struct kvmppc_pte *pte, bool data, bool iswrite);
 	void (*reset_msr)(struct kvm_vcpu *vcpu);
 	void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
 	int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
@@ -360,6 +370,7 @@
 	bool large	: 1;	/* PTEs are 16MB */
 	bool tb		: 1;	/* 1TB segment */
 	bool class	: 1;
+	u8 base_page_size;	/* MMU_PAGE_xxx */
 };
 
 # ifdef CONFIG_PPC_FSL_BOOK3E
@@ -377,17 +388,6 @@
 #define KVMPPC_EPR_USER		1 /* exit to userspace to fill EPR */
 #define KVMPPC_EPR_KERNEL	2 /* in-kernel irqchip */
 
-struct kvmppc_booke_debug_reg {
-	u32 dbcr0;
-	u32 dbcr1;
-	u32 dbcr2;
-#ifdef CONFIG_KVM_E500MC
-	u32 dbcr4;
-#endif
-	u64 iac[KVMPPC_BOOKE_MAX_IAC];
-	u64 dac[KVMPPC_BOOKE_MAX_DAC];
-};
-
 #define KVMPPC_IRQ_DEFAULT	0
 #define KVMPPC_IRQ_MPIC		1
 #define KVMPPC_IRQ_XICS		2
@@ -402,6 +402,10 @@
 	int slb_max;		/* 1 + index of last valid entry in slb[] */
 	int slb_nr;		/* total number of entries in SLB */
 	struct kvmppc_mmu mmu;
+	struct kvmppc_vcpu_book3s *book3s;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+	struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
 #endif
 
 	ulong gpr[32];
@@ -463,6 +467,8 @@
 	u32 ctrl;
 	ulong dabr;
 	ulong cfar;
+	ulong ppr;
+	ulong shadow_srr1;
 #endif
 	u32 vrsave; /* also USPRG0 */
 	u32 mmucr;
@@ -498,6 +504,8 @@
 
 	u64 mmcr[3];
 	u32 pmc[8];
+	u64 siar;
+	u64 sdar;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
@@ -531,7 +539,10 @@
 	u32 eptcfg;
 	u32 epr;
 	u32 crit_save;
-	struct kvmppc_booke_debug_reg dbg_reg;
+	/* guest debug registers*/
+	struct debug_reg dbg_reg;
+	/* hardware visible debug registers when in guest state */
+	struct debug_reg shadow_dbg_reg;
 #endif
 	gpa_t paddr_accessed;
 	gva_t vaddr_accessed;
@@ -582,7 +593,7 @@
 	struct kvmppc_icp *icp; /* XICS presentation controller */
 #endif
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	struct kvm_vcpu_arch_shared shregs;
 
 	unsigned long pgfault_addr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b15554a..c8317fb 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -106,13 +106,6 @@
                                        struct kvm_interrupt *irq);
 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
-
-extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                                  unsigned int op, int *advance);
-extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
-				     ulong val);
-extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
-				     ulong *val);
 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
 
 extern int kvmppc_booke_init(void);
@@ -135,17 +128,17 @@
 				struct kvm_create_spapr_tce *args);
 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 			     unsigned long ioba, unsigned long tce);
-extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
-				struct kvm_allocate_rma *rma);
 extern struct kvm_rma_info *kvm_alloc_rma(void);
 extern void kvm_release_rma(struct kvm_rma_info *ri);
 extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
 extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
 extern int kvmppc_core_init_vm(struct kvm *kvm);
 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
-extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+extern void kvmppc_core_free_memslot(struct kvm *kvm,
+				     struct kvm_memory_slot *free,
 				     struct kvm_memory_slot *dont);
-extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+extern int kvmppc_core_create_memslot(struct kvm *kvm,
+				      struct kvm_memory_slot *slot,
 				      unsigned long npages);
 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
 				struct kvm_memory_slot *memslot,
@@ -177,6 +170,72 @@
 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
 
+union kvmppc_one_reg {
+	u32	wval;
+	u64	dval;
+	vector128 vval;
+	u64	vsxval[2];
+	struct {
+		u64	addr;
+		u64	length;
+	}	vpaval;
+};
+
+struct kvmppc_ops {
+	struct module *owner;
+	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+			   union kvmppc_one_reg *val);
+	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+			   union kvmppc_one_reg *val);
+	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+	void (*vcpu_put)(struct kvm_vcpu *vcpu);
+	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
+	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
+	void (*vcpu_free)(struct kvm_vcpu *vcpu);
+	int (*check_requests)(struct kvm_vcpu *vcpu);
+	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
+	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
+	int (*prepare_memory_region)(struct kvm *kvm,
+				     struct kvm_memory_slot *memslot,
+				     struct kvm_userspace_memory_region *mem);
+	void (*commit_memory_region)(struct kvm *kvm,
+				     struct kvm_userspace_memory_region *mem,
+				     const struct kvm_memory_slot *old);
+	int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
+	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
+			   unsigned long end);
+	int (*age_hva)(struct kvm *kvm, unsigned long hva);
+	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
+	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
+	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
+	void (*free_memslot)(struct kvm_memory_slot *free,
+			     struct kvm_memory_slot *dont);
+	int (*create_memslot)(struct kvm_memory_slot *slot,
+			      unsigned long npages);
+	int (*init_vm)(struct kvm *kvm);
+	void (*destroy_vm)(struct kvm *kvm);
+	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
+	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
+			  unsigned int inst, int *advance);
+	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
+	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
+	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
+			      unsigned long arg);
+
+};
+
+extern struct kvmppc_ops *kvmppc_hv_ops;
+extern struct kvmppc_ops *kvmppc_pr_ops;
+
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
+{
+	return kvm->arch.kvm_ops == kvmppc_hv_ops;
+}
+
 /*
  * Cuts out inst bits with ordering according to spec.
  * That means the leftmost bit is zero. All given bits are included.
@@ -210,17 +269,6 @@
 	return r;
 }
 
-union kvmppc_one_reg {
-	u32	wval;
-	u64	dval;
-	vector128 vval;
-	u64	vsxval[2];
-	struct {
-		u64	addr;
-		u64	length;
-	}	vpaval;
-};
-
 #define one_reg_size(id)	\
 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
 
@@ -245,10 +293,10 @@
 	__v;					\
 })
 
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 
-void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 
 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
@@ -260,7 +308,7 @@
 
 struct openpic;
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 extern void kvm_cma_reserve(void) __init;
 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
 {
@@ -269,10 +317,10 @@
 
 static inline u32 kvmppc_get_xics_latch(void)
 {
-	u32 xirr = get_paca()->kvm_hstate.saved_xirr;
+	u32 xirr;
 
+	xirr = get_paca()->kvm_hstate.saved_xirr;
 	get_paca()->kvm_hstate.saved_xirr = 0;
-
 	return xirr;
 }
 
@@ -281,7 +329,10 @@
 	paca[cpu].kvm_hstate.host_ipi = host_ipi;
 }
 
-extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
+}
 
 #else
 static inline void __init kvm_cma_reserve(void)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a5954ce..b6ea9e0 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -166,7 +166,7 @@
 	struct dtl_entry *dtl_curr;	/* pointer corresponding to dtl_ridx */
 
 #ifdef CONFIG_KVM_BOOK3S_HANDLER
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 	/* We use this to store guest state in */
 	struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
 #endif
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index f65e27b..16cb92d 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -91,7 +91,10 @@
 	if (!pte)
 		return NULL;
 	page = virt_to_page(pte);
-	pgtable_page_ctor(page);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 	return page;
 }
 
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7794b2b..fc14a38 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -208,6 +208,7 @@
 
 struct thread_struct {
 	unsigned long	ksp;		/* Kernel stack pointer */
+
 #ifdef CONFIG_PPC64
 	unsigned long	ksp_vsid;
 #endif
@@ -221,6 +222,7 @@
 	void		*pgdir;		/* root of page-table tree */
 	unsigned long	ksp_limit;	/* if ksp <= ksp_limit stack overflow */
 #endif
+	/* Debug Registers */
 	struct debug_reg debug;
 	struct thread_fp_state	fp_state;
 	struct thread_fp_state	*fp_save_area;
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
index 0156702..576ad88 100644
--- a/arch/powerpc/include/asm/pte-book3e.h
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -40,7 +40,7 @@
 #define _PAGE_U1	0x010000
 #define _PAGE_U0	0x020000
 #define _PAGE_ACCESSED	0x040000
-#define _PAGE_LENDIAN	0x080000
+#define _PAGE_ENDIAN	0x080000
 #define _PAGE_GUARDED	0x100000
 #define _PAGE_COHERENT	0x200000 /* M: enforce memory coherence */
 #define _PAGE_NO_CACHE	0x400000 /* I: cache inhibit */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 126f6e9..5c45787 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -248,6 +248,7 @@
 #define SPRN_TBRU	0x10D	/* Time Base Read Upper Register (user, R/O) */
 #define SPRN_TBWL	0x11C	/* Time Base Lower Register (super, R/W) */
 #define SPRN_TBWU	0x11D	/* Time Base Upper Register (super, R/W) */
+#define SPRN_TBU40	0x11E	/* Timebase upper 40 bits (hyper, R/W) */
 #define SPRN_SPURR	0x134	/* Scaled PURR */
 #define SPRN_HSPRG0	0x130	/* Hypervisor Scratch 0 */
 #define SPRN_HSPRG1	0x131	/* Hypervisor Scratch 1 */
@@ -288,6 +289,7 @@
 #define   LPCR_ISL	(1ul << (63-2))
 #define   LPCR_VC_SH	(63-2)
 #define   LPCR_DPFD_SH	(63-11)
+#define   LPCR_DPFD	(7ul << LPCR_DPFD_SH)
 #define   LPCR_VRMASD	(0x1ful << (63-16))
 #define   LPCR_VRMA_L	(1ul << (63-12))
 #define   LPCR_VRMA_LP0	(1ul << (63-15))
@@ -304,6 +306,7 @@
 #define     LPCR_PECE2	0x00001000	/* machine check etc can cause exit */
 #define   LPCR_MER	0x00000800	/* Mediated External Exception */
 #define   LPCR_MER_SH	11
+#define   LPCR_TC      0x00000200	/* Translation control */
 #define   LPCR_LPES    0x0000000c
 #define   LPCR_LPES0   0x00000008      /* LPAR Env selector 0 */
 #define   LPCR_LPES1   0x00000004      /* LPAR Env selector 1 */
@@ -316,6 +319,10 @@
 #define   LPID_RSVD	0x3ff		/* Reserved LPID for partn switching */
 #define	SPRN_HMER	0x150	/* Hardware m? error recovery */
 #define	SPRN_HMEER	0x151	/* Hardware m? enable error recovery */
+#define SPRN_PCR	0x152	/* Processor compatibility register */
+#define   PCR_VEC_DIS	(1ul << (63-0))	/* Vec. disable (bit NA since POWER8) */
+#define   PCR_VSX_DIS	(1ul << (63-1))	/* VSX disable (bit NA since POWER8) */
+#define   PCR_ARCH_205	0x2		/* Architecture 2.05 */
 #define	SPRN_HEIR	0x153	/* Hypervisor Emulated Instruction Register */
 #define SPRN_TLBINDEXR	0x154	/* P7 TLB control register */
 #define SPRN_TLBVPNR	0x155	/* P7 TLB control register */
@@ -425,6 +432,7 @@
 #define	 HID4_RMLS2_SH	 (63 - 2)	/* Real mode limit bottom 2 bits */
 #define	 HID4_LPID5_SH	 (63 - 6)	/* partition ID bottom 4 bits */
 #define	 HID4_RMOR_SH	 (63 - 22)	/* real mode offset (16 bits) */
+#define  HID4_RMOR	 (0xFFFFul << HID4_RMOR_SH)
 #define  HID4_LPES1	 (1 << (63-57))	/* LPAR env. sel. bit 1 */
 #define  HID4_RMLS0_SH	 (63 - 58)	/* Real mode limit top bit */
 #define	 HID4_LPID1_SH	 0		/* partition ID top 2 bits */
@@ -1107,6 +1115,13 @@
 #define PVR_BE		0x0070
 #define PVR_PA6T	0x0090
 
+/* "Logical" PVR values defined in PAPR, representing architecture levels */
+#define PVR_ARCH_204	0x0f000001
+#define PVR_ARCH_205	0x0f000002
+#define PVR_ARCH_206	0x0f000003
+#define PVR_ARCH_206p	0x0f100003
+#define PVR_ARCH_207	0x0f000004
+
 /* Macros for setting and retrieving special purpose registers */
 #ifndef __ASSEMBLY__
 #define mfmsr()		({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 93f280e..37b7ca3 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -235,6 +235,7 @@
 
 /* syscalls implemented in spufs */
 struct file;
+struct coredump_params;
 struct spufs_calls {
 	long (*create_thread)(const char __user *name,
 					unsigned int flags, umode_t mode,
@@ -242,7 +243,7 @@
 	long (*spu_run)(struct file *filp, __u32 __user *unpc,
 						__u32 __user *ustatus);
 	int (*coredump_extra_notes_size)(void);
-	int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset);
+	int (*coredump_extra_notes_write)(struct coredump_params *cprm);
 	void (*notify_spus_active)(void);
 	struct module *owner;
 };
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e..6836ec7 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
 #define __KVM_HAVE_PPC_SMT
 #define __KVM_HAVE_IRQCHIP
 #define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_GUEST_DEBUG
 
 struct kvm_regs {
 	__u64 pc;
@@ -269,7 +270,24 @@
 	__u64 fpr[32];
 };
 
+/*
+ * Defines for h/w breakpoint, watchpoint (read, write or both) and
+ * software breakpoint.
+ * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
+ * for KVM_DEBUG_EXIT.
+ */
+#define KVMPPC_DEBUG_NONE		0x0
+#define KVMPPC_DEBUG_BREAKPOINT		(1UL << 1)
+#define KVMPPC_DEBUG_WATCH_WRITE	(1UL << 2)
+#define KVMPPC_DEBUG_WATCH_READ		(1UL << 3)
 struct kvm_debug_exit_arch {
+	__u64 address;
+	/*
+	 * exiting to userspace because of h/w breakpoint, watchpoint
+	 * (read, write or both) and software breakpoint.
+	 */
+	__u32 status;
+	__u32 reserved;
 };
 
 /* for KVM_SET_GUEST_DEBUG */
@@ -281,10 +299,6 @@
 		 * Type denotes h/w breakpoint, read watchpoint, write
 		 * watchpoint or watchpoint (both read and write).
 		 */
-#define KVMPPC_DEBUG_NONE		0x0
-#define KVMPPC_DEBUG_BREAKPOINT		(1UL << 1)
-#define KVMPPC_DEBUG_WATCH_WRITE	(1UL << 2)
-#define KVMPPC_DEBUG_WATCH_READ		(1UL << 3)
 		__u32 type;
 		__u32 reserved;
 	} bp[16];
@@ -429,6 +443,11 @@
 #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
 #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
 #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_MMCR2	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
+#define KVM_REG_PPC_MMCRS	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
+#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
+#define KVM_REG_PPC_SIER	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
 
 #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
 #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
@@ -499,6 +518,65 @@
 #define KVM_REG_PPC_TLB3PS	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
 #define KVM_REG_PPC_EPTCFG	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
 
+/* Timebase offset */
+#define KVM_REG_PPC_TB_OFFSET	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
+
+/* POWER8 registers */
+#define KVM_REG_PPC_SPMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
+#define KVM_REG_PPC_SPMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
+#define KVM_REG_PPC_IAMR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
+#define KVM_REG_PPC_TFHAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
+#define KVM_REG_PPC_TFIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
+#define KVM_REG_PPC_TEXASR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
+#define KVM_REG_PPC_FSCR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
+#define KVM_REG_PPC_PSPB	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
+#define KVM_REG_PPC_EBBHR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
+#define KVM_REG_PPC_EBBRR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
+#define KVM_REG_PPC_BESCR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
+#define KVM_REG_PPC_TAR		(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
+#define KVM_REG_PPC_DPDES	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
+#define KVM_REG_PPC_DAWR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
+#define KVM_REG_PPC_DAWRX	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
+#define KVM_REG_PPC_CIABR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
+#define KVM_REG_PPC_IC		(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
+#define KVM_REG_PPC_VTB		(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
+#define KVM_REG_PPC_CSIGR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
+#define KVM_REG_PPC_TACR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
+#define KVM_REG_PPC_TCSCR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
+#define KVM_REG_PPC_PID		(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
+#define KVM_REG_PPC_ACOP	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
+
+#define KVM_REG_PPC_VRSAVE	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
+#define KVM_REG_PPC_LPCR	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
+#define KVM_REG_PPC_PPR		(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
+
+/* Architecture compatibility level */
+#define KVM_REG_PPC_ARCH_COMPAT	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
+
+/* Transactional Memory checkpointed state:
+ * This is all GPRs, all VSX regs and a subset of SPRs
+ */
+#define KVM_REG_PPC_TM		(KVM_REG_PPC | 0x80000000)
+/* TM GPRs */
+#define KVM_REG_PPC_TM_GPR0	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_PPC_TM_GPR(n)	(KVM_REG_PPC_TM_GPR0 + (n))
+#define KVM_REG_PPC_TM_GPR31	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
+/* TM VSX */
+#define KVM_REG_PPC_TM_VSR0	(KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
+#define KVM_REG_PPC_TM_VSR(n)	(KVM_REG_PPC_TM_VSR0 + (n))
+#define KVM_REG_PPC_TM_VSR63	(KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
+/* TM SPRS */
+#define KVM_REG_PPC_TM_CR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
+#define KVM_REG_PPC_TM_LR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
+#define KVM_REG_PPC_TM_CTR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
+#define KVM_REG_PPC_TM_FPSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
+#define KVM_REG_PPC_TM_AMR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
+#define KVM_REG_PPC_TM_PPR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
+#define KVM_REG_PPC_TM_VRSAVE	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
+#define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
+#define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
+#define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
 
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index a6d7446..fa69832 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -83,4 +83,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif	/* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e60a369..2ea5cc0 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -439,7 +439,7 @@
 	DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
 	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
 	DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
 	DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
 	DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
@@ -470,7 +470,7 @@
 	DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
 
 	/* book3s */
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
 	DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
 	DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -502,6 +502,8 @@
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
@@ -511,18 +513,22 @@
 	DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
 	DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
 	DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
+	DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
+	DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
 	DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
 	DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
 	DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
 	DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
-	DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
-			   offsetof(struct kvmppc_vcpu_book3s, vcpu));
+	DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
+	DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
+	DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
 	DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
 	DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
 	DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
 
 #ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+	DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
 # define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
 #else
 # define SVCPU_FIELD(x, f)
@@ -574,7 +580,7 @@
 	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
 	HSTATE_FIELD(HSTATE_NAPPING, napping);
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
 	HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
 	HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
@@ -590,10 +596,11 @@
 	HSTATE_FIELD(HSTATE_DABR, dabr);
 	HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
 	DEFINE(IPI_PRIORITY, IPI_PRIORITY);
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 
 #ifdef CONFIG_PPC_BOOK3S_64
 	HSTATE_FIELD(HSTATE_CFAR, cfar);
+	HSTATE_FIELD(HSTATE_PPR, ppr);
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 #else /* CONFIG_PPC_BOOK3S */
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 1fb331d..6713020 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -189,14 +189,13 @@
 	}
 
 	/* If PCI-E capable, dump PCI-E cap 10, and the AER */
-	cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
-	if (cap) {
+	if (pci_is_pcie(dev)) {
 		n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
 		printk(KERN_WARNING
 		       "EEH: PCI-E capabilities and status follow:\n");
 
 		for (i=0; i<=8; i++) {
-			eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
+			eeh_ops->read_config(dn, dev->pcie_cap+4*i, 4, &cfg);
 			n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
 			printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
 		}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3a9ed6a..9f905e4 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -126,7 +126,7 @@
 	bgt	cr1,.
 	GET_PACA(r13)
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	li	r0,KVM_HWTHREAD_IN_KERNEL
 	stb	r0,HSTATE_HWTHREAD_STATE(r13)
 	/* Order setting hwthread_state vs. testing hwthread_req */
@@ -425,7 +425,7 @@
 	mfspr	r9,SPRN_DSISR
 	srdi	r10,r10,60
 	rlwimi	r10,r9,16,0x20
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 	lbz	r9,HSTATE_IN_GUEST(r13)
 	rlwimi	r10,r9,8,0x300
 #endif
@@ -650,6 +650,32 @@
 	b	.				/* prevent spec. execution */
 #endif /* __DISABLED__ */
 
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+kvmppc_skip_interrupt:
+	/*
+	 * Here all GPRs are unchanged from when the interrupt happened
+	 * except for r13, which is saved in SPRG_SCRATCH0.
+	 */
+	mfspr	r13, SPRN_SRR0
+	addi	r13, r13, 4
+	mtspr	SPRN_SRR0, r13
+	GET_SCRATCH0(r13)
+	rfid
+	b	.
+
+kvmppc_skip_Hinterrupt:
+	/*
+	 * Here all GPRs are unchanged from when the interrupt happened
+	 * except for r13, which is saved in SPRG_SCRATCH0.
+	 */
+	mfspr	r13, SPRN_HSRR0
+	addi	r13, r13, 4
+	mtspr	SPRN_HSRR0, r13
+	GET_SCRATCH0(r13)
+	hrfid
+	b	.
+#endif
+
 /*
  * Code from here down to __end_handlers is invoked from the
  * exception prologs above.  Because the prologs assemble the
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index e11863f..847e40e 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -84,7 +84,7 @@
 	std	r9,_MSR(r1)
 	std	r1,PACAR1(r13)
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	/* Tell KVM we're napping */
 	li	r4,KVM_HWTHREAD_IN_NAP
 	stb	r4,HSTATE_HWTHREAD_STATE(r13)
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2156ea9..90fab64 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -429,7 +429,7 @@
 	case KPROBE_HIT_SSDONE:
 		/*
 		 * We increment the nmissed count for accounting,
-		 * we can also use npre/npostfault count for accouting
+		 * we can also use npre/npostfault count for accounting
 		 * these specific fault cases.
 		 */
 		kprobes_inc_nmissed_count(cur);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 1a410aa..749778e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -893,7 +893,7 @@
 #endif
 
 #ifdef CONFIG_PPC64
-int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
+int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
 {
 	int err;
 
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8e59abc..930cd8a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -844,18 +844,6 @@
 		smp_ops->cpu_die(cpu);
 }
 
-static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock()
-{
-	mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock()
-{
-	mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
-}
-
 void cpu_die(void)
 {
 	if (ppc_md.cpu_die)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 62c3dd8..907a472 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1529,7 +1529,7 @@
 	 * back on or not.
 	 */
 	if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
-	    current->thread.debug.dbcr1))
+			       current->thread.debug.dbcr1))
 		regs->msr |= MSR_DE;
 	else
 		/* Make sure the IDM flag is off */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f99cefb..e7d0c88f 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1419,8 +1419,7 @@
 
 		/* needed to ensure proper operation of coherent allocations
 		 * later, in case driver doesn't set it explicitly */
-		dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
-		dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+		dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
 	}
 
 	/* register with generic device framework */
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 2f5c6b6..93221e8 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -31,13 +31,13 @@
 #include "44x_tlb.h"
 #include "booke.h"
 
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
 {
 	kvmppc_booke_vcpu_load(vcpu, cpu);
 	kvmppc_44x_tlb_load(vcpu);
 }
 
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
 {
 	kvmppc_44x_tlb_put(vcpu);
 	kvmppc_booke_vcpu_put(vcpu);
@@ -114,29 +114,32 @@
 	return 0;
 }
 
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
+				      struct kvm_sregs *sregs)
 {
-	kvmppc_get_sregs_ivor(vcpu, sregs);
+	return kvmppc_get_sregs_ivor(vcpu, sregs);
 }
 
-int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
+				     struct kvm_sregs *sregs)
 {
 	return kvmppc_set_sregs_ivor(vcpu, sregs);
 }
 
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
-			union kvmppc_one_reg *val)
+static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
+				  union kvmppc_one_reg *val)
 {
 	return -EINVAL;
 }
 
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
-		       union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
+				  union kvmppc_one_reg *val)
 {
 	return -EINVAL;
 }
 
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
+						    unsigned int id)
 {
 	struct kvmppc_vcpu_44x *vcpu_44x;
 	struct kvm_vcpu *vcpu;
@@ -167,7 +170,7 @@
 	return ERR_PTR(err);
 }
 
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 
@@ -176,28 +179,53 @@
 	kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
 }
 
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_44x(struct kvm *kvm)
 {
 	return 0;
 }
 
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
 {
 }
 
+static struct kvmppc_ops kvm_ops_44x = {
+	.get_sregs = kvmppc_core_get_sregs_44x,
+	.set_sregs = kvmppc_core_set_sregs_44x,
+	.get_one_reg = kvmppc_get_one_reg_44x,
+	.set_one_reg = kvmppc_set_one_reg_44x,
+	.vcpu_load   = kvmppc_core_vcpu_load_44x,
+	.vcpu_put    = kvmppc_core_vcpu_put_44x,
+	.vcpu_create = kvmppc_core_vcpu_create_44x,
+	.vcpu_free   = kvmppc_core_vcpu_free_44x,
+	.mmu_destroy  = kvmppc_mmu_destroy_44x,
+	.init_vm = kvmppc_core_init_vm_44x,
+	.destroy_vm = kvmppc_core_destroy_vm_44x,
+	.emulate_op = kvmppc_core_emulate_op_44x,
+	.emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
+	.emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
+};
+
 static int __init kvmppc_44x_init(void)
 {
 	int r;
 
 	r = kvmppc_booke_init();
 	if (r)
-		return r;
+		goto err_out;
 
-	return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
+	r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
+	if (r)
+		goto err_out;
+	kvm_ops_44x.owner = THIS_MODULE;
+	kvmppc_pr_ops = &kvm_ops_44x;
+
+err_out:
+	return r;
 }
 
 static void __exit kvmppc_44x_exit(void)
 {
+	kvmppc_pr_ops = NULL;
 	kvmppc_booke_exit();
 }
 
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 35ec0a8..92c9ab4 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -91,8 +91,8 @@
 	return EMULATE_DONE;
 }
 
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                           unsigned int inst, int *advance)
+int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
+			       unsigned int inst, int *advance)
 {
 	int emulated = EMULATE_DONE;
 	int dcrn = get_dcrn(inst);
@@ -152,7 +152,7 @@
 	return emulated;
 }
 
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 {
 	int emulated = EMULATE_DONE;
 
@@ -172,7 +172,7 @@
 	return emulated;
 }
 
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 {
 	int emulated = EMULATE_DONE;
 
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ed03854..0deef10 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -268,7 +268,7 @@
 	trace_kvm_stlb_inval(stlb_index);
 }
 
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 	int i;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index e593ff2..141b202 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -35,17 +35,20 @@
 	bool
 	select KVM_BOOK3S_HANDLER
 
-config KVM_BOOK3S_PR
+config KVM_BOOK3S_PR_POSSIBLE
 	bool
 	select KVM_MMIO
 	select MMU_NOTIFIER
 
+config KVM_BOOK3S_HV_POSSIBLE
+	bool
+
 config KVM_BOOK3S_32
 	tristate "KVM support for PowerPC book3s_32 processors"
 	depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
 	select KVM
 	select KVM_BOOK3S_32_HANDLER
-	select KVM_BOOK3S_PR
+	select KVM_BOOK3S_PR_POSSIBLE
 	---help---
 	  Support running unmodified book3s_32 guest kernels
 	  in virtual machines on book3s_32 host processors.
@@ -60,6 +63,7 @@
 	depends on PPC_BOOK3S_64
 	select KVM_BOOK3S_64_HANDLER
 	select KVM
+	select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
 	---help---
 	  Support running unmodified book3s_64 and book3s_32 guest kernels
 	  in virtual machines on book3s_64 host processors.
@@ -70,8 +74,9 @@
 	  If unsure, say N.
 
 config KVM_BOOK3S_64_HV
-	bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+	tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
 	depends on KVM_BOOK3S_64
+	select KVM_BOOK3S_HV_POSSIBLE
 	select MMU_NOTIFIER
 	select CMA
 	---help---
@@ -90,9 +95,20 @@
 	  If unsure, say N.
 
 config KVM_BOOK3S_64_PR
-	def_bool y
-	depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
-	select KVM_BOOK3S_PR
+	tristate "KVM support without using hypervisor mode in host"
+	depends on KVM_BOOK3S_64
+	select KVM_BOOK3S_PR_POSSIBLE
+	---help---
+	  Support running guest kernels in virtual machines on processors
+	  without using hypervisor mode in the host, by running the
+	  guest in user mode (problem state) and emulating all
+	  privileged instructions and registers.
+
+	  This is not as fast as using hypervisor mode, but works on
+	  machines where hypervisor mode is not available or not usable,
+	  and can emulate processors that are different from the host
+	  processor, including emulating 32-bit processors on a 64-bit
+	  host.
 
 config KVM_BOOKE_HV
 	bool
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 6646c95..ce569b6 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -53,41 +53,51 @@
 	e500_emulate.o
 kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
 
-kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
-	$(KVM)/coalesced_mmio.o \
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
+	book3s_64_vio_hv.o
+
+kvm-pr-y := \
 	fpu.o \
 	book3s_paired_singles.o \
 	book3s_pr.o \
 	book3s_pr_papr.o \
-	book3s_64_vio_hv.o \
 	book3s_emulate.o \
 	book3s_interrupts.o \
 	book3s_mmu_hpte.o \
 	book3s_64_mmu_host.o \
 	book3s_64_mmu.o \
 	book3s_32_mmu.o
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
-	book3s_rmhandlers.o
 
-kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+kvm-book3s_64-module-objs := \
+	$(KVM)/coalesced_mmio.o
+
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+	book3s_rmhandlers.o
+endif
+
+kvm-hv-y += \
 	book3s_hv.o \
 	book3s_hv_interrupts.o \
 	book3s_64_mmu_hv.o
+
 kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
 	book3s_hv_rm_xics.o
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+
+ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
 	book3s_hv_rmhandlers.o \
 	book3s_hv_rm_mmu.o \
-	book3s_64_vio_hv.o \
 	book3s_hv_ras.o \
 	book3s_hv_builtin.o \
 	book3s_hv_cma.o \
 	$(kvm-book3s_64-builtin-xics-objs-y)
+endif
 
 kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
 	book3s_xics.o
 
-kvm-book3s_64-module-objs := \
+kvm-book3s_64-module-objs += \
 	$(KVM)/kvm_main.o \
 	$(KVM)/eventfd.o \
 	powerpc.o \
@@ -123,4 +133,7 @@
 obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
 obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
 
+obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
+obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
+
 obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 700df6f..8912608 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -34,6 +34,7 @@
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
 
+#include "book3s.h"
 #include "trace.h"
 
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -69,6 +70,50 @@
 {
 }
 
+static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
+{
+	if (!is_kvmppc_hv_enabled(vcpu->kvm))
+		return to_book3s(vcpu)->hior;
+	return 0;
+}
+
+static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
+			unsigned long pending_now, unsigned long old_pending)
+{
+	if (is_kvmppc_hv_enabled(vcpu->kvm))
+		return;
+	if (pending_now)
+		vcpu->arch.shared->int_pending = 1;
+	else if (old_pending)
+		vcpu->arch.shared->int_pending = 0;
+}
+
+static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+{
+	ulong crit_raw;
+	ulong crit_r1;
+	bool crit;
+
+	if (is_kvmppc_hv_enabled(vcpu->kvm))
+		return false;
+
+	crit_raw = vcpu->arch.shared->critical;
+	crit_r1 = kvmppc_get_gpr(vcpu, 1);
+
+	/* Truncate crit indicators in 32 bit mode */
+	if (!(vcpu->arch.shared->msr & MSR_SF)) {
+		crit_raw &= 0xffffffff;
+		crit_r1 &= 0xffffffff;
+	}
+
+	/* Critical section when crit == r1 */
+	crit = (crit_raw == crit_r1);
+	/* ... and we're in supervisor mode */
+	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+
+	return crit;
+}
+
 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
 {
 	vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
@@ -126,28 +171,32 @@
 	printk(KERN_INFO "Queueing interrupt %x\n", vec);
 #endif
 }
-
+EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
 
 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
 {
 	/* might as well deliver this straight away */
 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
 }
+EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
 
 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
 {
 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
 }
+EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
 
 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
 {
 	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
 }
+EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
 
 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
 {
 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
 }
+EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
 
 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
                                 struct kvm_interrupt *irq)
@@ -285,8 +334,10 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
 
-pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
+			bool *writable)
 {
 	ulong mp_pa = vcpu->arch.magic_page_pa;
 
@@ -302,20 +353,23 @@
 
 		pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
 		get_page(pfn_to_page(pfn));
+		if (writable)
+			*writable = true;
 		return pfn;
 	}
 
-	return gfn_to_pfn(vcpu->kvm, gfn);
+	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
 }
+EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
 
 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
-			 struct kvmppc_pte *pte)
+			bool iswrite, struct kvmppc_pte *pte)
 {
 	int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
 	int r;
 
 	if (relocated) {
-		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
+		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
 	} else {
 		pte->eaddr = eaddr;
 		pte->raddr = eaddr & KVM_PAM;
@@ -361,7 +415,7 @@
 
 	vcpu->stat.st++;
 
-	if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
+	if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
 		return -ENOENT;
 
 	*eaddr = pte.raddr;
@@ -374,6 +428,7 @@
 
 	return EMULATE_DONE;
 }
+EXPORT_SYMBOL_GPL(kvmppc_st);
 
 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 		      bool data)
@@ -383,7 +438,7 @@
 
 	vcpu->stat.ld++;
 
-	if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
+	if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
 		goto nopte;
 
 	*eaddr = pte.raddr;
@@ -404,6 +459,7 @@
 mmio:
 	return EMULATE_DO_MMIO;
 }
+EXPORT_SYMBOL_GPL(kvmppc_ld);
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
@@ -419,6 +475,18 @@
 {
 }
 
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+				  struct kvm_sregs *sregs)
+{
+	return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+				  struct kvm_sregs *sregs)
+{
+	return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
+}
+
 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
 	int i;
@@ -495,8 +563,7 @@
 	if (size > sizeof(val))
 		return -EINVAL;
 
-	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
-
+	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
 	if (r == -EINVAL) {
 		r = 0;
 		switch (reg->id) {
@@ -528,6 +595,9 @@
 			}
 			val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
 			break;
+		case KVM_REG_PPC_VRSAVE:
+			val = get_reg_val(reg->id, vcpu->arch.vrsave);
+			break;
 #endif /* CONFIG_ALTIVEC */
 		case KVM_REG_PPC_DEBUG_INST: {
 			u32 opcode = INS_TW;
@@ -572,8 +642,7 @@
 	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
 		return -EFAULT;
 
-	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
-
+	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
 	if (r == -EINVAL) {
 		r = 0;
 		switch (reg->id) {
@@ -605,6 +674,13 @@
 			}
 			vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
 			break;
+		case KVM_REG_PPC_VRSAVE:
+			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+				r = -ENXIO;
+				break;
+			}
+			vcpu->arch.vrsave = set_reg_val(reg->id, val);
+			break;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_KVM_XICS
 		case KVM_REG_PPC_ICP_STATE:
@@ -625,6 +701,27 @@
 	return r;
 }
 
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
+}
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
+}
+EXPORT_SYMBOL_GPL(kvmppc_set_msr);
+
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
+}
+
 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
                                   struct kvm_translation *tr)
 {
@@ -644,3 +741,141 @@
 	kvmppc_core_queue_dec(vcpu);
 	kvm_vcpu_kick(vcpu);
 }
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
+}
+
+int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
+{
+	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+	return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
+}
+
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+			      struct kvm_memory_slot *dont)
+{
+	kvm->arch.kvm_ops->free_memslot(free, dont);
+}
+
+int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			       unsigned long npages)
+{
+	return kvm->arch.kvm_ops->create_memslot(slot, npages);
+}
+
+void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+	kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
+}
+
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+				struct kvm_memory_slot *memslot,
+				struct kvm_userspace_memory_region *mem)
+{
+	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+				struct kvm_userspace_memory_region *mem,
+				const struct kvm_memory_slot *old)
+{
+	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+	return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
+}
+EXPORT_SYMBOL_GPL(kvm_unmap_hva);
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+	return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+	return kvm->arch.kvm_ops->age_hva(kvm, hva);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+	return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+	kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+
+#ifdef CONFIG_PPC64
+	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
+#endif
+
+	return kvm->arch.kvm_ops->init_vm(kvm);
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+	kvm->arch.kvm_ops->destroy_vm(kvm);
+
+#ifdef CONFIG_PPC64
+	kvmppc_rtas_tokens_free(kvm);
+	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+#endif
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+	/*
+	 * We always return 0 for book3s. We check
+	 * for compatability while loading the HV
+	 * or PR module
+	 */
+	return 0;
+}
+
+static int kvmppc_book3s_init(void)
+{
+	int r;
+
+	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+	if (r)
+		return r;
+#ifdef CONFIG_KVM_BOOK3S_32
+	r = kvmppc_book3s_init_pr();
+#endif
+	return r;
+
+}
+
+static void kvmppc_book3s_exit(void)
+{
+#ifdef CONFIG_KVM_BOOK3S_32
+	kvmppc_book3s_exit_pr();
+#endif
+	kvm_exit();
+}
+
+module_init(kvmppc_book3s_init);
+module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
new file mode 100644
index 0000000..4bf956c
--- /dev/null
+++ b/arch/powerpc/kvm/book3s.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright IBM Corporation, 2013
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ *
+ */
+
+#ifndef __POWERPC_KVM_BOOK3S_H__
+#define __POWERPC_KVM_BOOK3S_H__
+
+extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
+					 struct kvm_memory_slot *memslot);
+extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
+extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
+				  unsigned long end);
+extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva);
+extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
+extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				     unsigned int inst, int *advance);
+extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
+					int sprn, ulong spr_val);
+extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
+					int sprn, ulong *spr_val);
+extern int kvmppc_book3s_init_pr(void);
+extern void kvmppc_book3s_exit_pr(void);
+
+#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index c8cefdd..76a64ce 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -84,7 +84,8 @@
 }
 
 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
-					  struct kvmppc_pte *pte, bool data);
+					  struct kvmppc_pte *pte, bool data,
+					  bool iswrite);
 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
 					     u64 *vsid);
 
@@ -99,7 +100,7 @@
 	u64 vsid;
 	struct kvmppc_pte pte;
 
-	if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
+	if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
 		return pte.vpage;
 
 	kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -111,10 +112,11 @@
 	kvmppc_set_msr(vcpu, 0);
 }
 
-static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s,
+static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
 				      u32 sre, gva_t eaddr,
 				      bool primary)
 {
+	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 	u32 page, hash, pteg, htabmask;
 	hva_t r;
 
@@ -132,7 +134,7 @@
 		kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
 		sr_vsid(sre));
 
-	r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
+	r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
 	if (kvm_is_error_hva(r))
 		return r;
 	return r | (pteg & ~PAGE_MASK);
@@ -145,7 +147,8 @@
 }
 
 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
-					  struct kvmppc_pte *pte, bool data)
+					  struct kvmppc_pte *pte, bool data,
+					  bool iswrite)
 {
 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 	struct kvmppc_bat *bat;
@@ -186,8 +189,7 @@
 				printk(KERN_INFO "BAT is not readable!\n");
 				continue;
 			}
-			if (!pte->may_write) {
-				/* let's treat r/o BATs as not-readable for now */
+			if (iswrite && !pte->may_write) {
 				dprintk_pte("BAT is read-only!\n");
 				continue;
 			}
@@ -201,9 +203,8 @@
 
 static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
 				     struct kvmppc_pte *pte, bool data,
-				     bool primary)
+				     bool iswrite, bool primary)
 {
-	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 	u32 sre;
 	hva_t ptegp;
 	u32 pteg[16];
@@ -218,7 +219,7 @@
 
 	pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
 
-	ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary);
+	ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
 	if (kvm_is_error_hva(ptegp)) {
 		printk(KERN_INFO "KVM: Invalid PTEG!\n");
 		goto no_page_found;
@@ -258,9 +259,6 @@
 					break;
 			}
 
-			if ( !pte->may_read )
-				continue;
-
 			dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
 				    pteg[i], pteg[i+1], pp);
 			found = 1;
@@ -271,19 +269,23 @@
 	/* Update PTE C and A bits, so the guest's swapper knows we used the
 	   page */
 	if (found) {
-		u32 oldpte = pteg[i+1];
+		u32 pte_r = pteg[i+1];
+		char __user *addr = (char __user *) &pteg[i+1];
 
-		if (pte->may_read)
-			pteg[i+1] |= PTEG_FLAG_ACCESSED;
-		if (pte->may_write)
-			pteg[i+1] |= PTEG_FLAG_DIRTY;
-		else
-			dprintk_pte("KVM: Mapping read-only page!\n");
-
-		/* Write back into the PTEG */
-		if (pteg[i+1] != oldpte)
-			copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
-
+		/*
+		 * Use single-byte writes to update the HPTE, to
+		 * conform to what real hardware does.
+		 */
+		if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
+			pte_r |= PTEG_FLAG_ACCESSED;
+			put_user(pte_r >> 8, addr + 2);
+		}
+		if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
+			pte_r |= PTEG_FLAG_DIRTY;
+			put_user(pte_r, addr + 3);
+		}
+		if (!pte->may_read || (iswrite && !pte->may_write))
+			return -EPERM;
 		return 0;
 	}
 
@@ -302,12 +304,14 @@
 }
 
 static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
-				      struct kvmppc_pte *pte, bool data)
+				      struct kvmppc_pte *pte, bool data,
+				      bool iswrite)
 {
 	int r;
 	ulong mp_ea = vcpu->arch.magic_page_ea;
 
 	pte->eaddr = eaddr;
+	pte->page_size = MMU_PAGE_4K;
 
 	/* Magic page override */
 	if (unlikely(mp_ea) &&
@@ -323,11 +327,13 @@
 		return 0;
 	}
 
-	r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
+	r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
 	if (r < 0)
-	       r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
+		r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
+						   data, iswrite, true);
 	if (r < 0)
-	       r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false);
+		r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
+						   data, iswrite, false);
 
 	return r;
 }
@@ -347,7 +353,12 @@
 
 static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
 {
-	kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
+	int i;
+	struct kvm_vcpu *v;
+
+	/* flush this VA on all cpus */
+	kvm_for_each_vcpu(i, v, vcpu->kvm)
+		kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
 }
 
 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 00e619b..3a0abd2 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -138,7 +138,8 @@
 
 extern char etext[];
 
-int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
+			bool iswrite)
 {
 	pfn_t hpaddr;
 	u64 vpn;
@@ -152,9 +153,11 @@
 	bool evict = false;
 	struct hpte_cache *pte;
 	int r = 0;
+	bool writable;
 
 	/* Get host physical address for gpa */
-	hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
+	hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
+				   iswrite, &writable);
 	if (is_error_noslot_pfn(hpaddr)) {
 		printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
 				 orig_pte->eaddr);
@@ -204,7 +207,7 @@
 		(primary ? 0 : PTE_SEC);
 	pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
 
-	if (orig_pte->may_write) {
+	if (orig_pte->may_write && writable) {
 		pteg1 |= PP_RWRW;
 		mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
 	} else {
@@ -259,6 +262,11 @@
 	return r;
 }
 
+void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
+{
+	kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
+}
+
 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
 {
 	struct kvmppc_sid_map *map;
@@ -341,7 +349,7 @@
 	svcpu_put(svcpu);
 }
 
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
 {
 	int i;
 
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 7e345e0..83da1f8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -107,9 +107,20 @@
 	return kvmppc_slb_calc_vpn(slb, eaddr);
 }
 
+static int mmu_pagesize(int mmu_pg)
+{
+	switch (mmu_pg) {
+	case MMU_PAGE_64K:
+		return 16;
+	case MMU_PAGE_16M:
+		return 24;
+	}
+	return 12;
+}
+
 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
 {
-	return slbe->large ? 24 : 12;
+	return mmu_pagesize(slbe->base_page_size);
 }
 
 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
@@ -119,11 +130,11 @@
 	return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
 }
 
-static hva_t kvmppc_mmu_book3s_64_get_pteg(
-				struct kvmppc_vcpu_book3s *vcpu_book3s,
+static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
 				struct kvmppc_slb *slbe, gva_t eaddr,
 				bool second)
 {
+	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 	u64 hash, pteg, htabsize;
 	u32 ssize;
 	hva_t r;
@@ -148,10 +159,10 @@
 
 	/* When running a PAPR guest, SDR1 contains a HVA address instead
            of a GPA */
-	if (vcpu_book3s->vcpu.arch.papr_enabled)
+	if (vcpu->arch.papr_enabled)
 		r = pteg;
 	else
-		r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
+		r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
 
 	if (kvm_is_error_hva(r))
 		return r;
@@ -166,18 +177,38 @@
 	avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
 	avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
 
-	if (p < 24)
-		avpn >>= ((80 - p) - 56) - 8;
+	if (p < 16)
+		avpn >>= ((80 - p) - 56) - 8;	/* 16 - p */
 	else
-		avpn <<= 8;
+		avpn <<= p - 16;
 
 	return avpn;
 }
 
-static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
-				struct kvmppc_pte *gpte, bool data)
+/*
+ * Return page size encoded in the second word of a HPTE, or
+ * -1 for an invalid encoding for the base page size indicated by
+ * the SLB entry.  This doesn't handle mixed pagesize segments yet.
+ */
+static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
 {
-	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+	switch (slbe->base_page_size) {
+	case MMU_PAGE_64K:
+		if ((r & 0xf000) == 0x1000)
+			return MMU_PAGE_64K;
+		break;
+	case MMU_PAGE_16M:
+		if ((r & 0xff000) == 0)
+			return MMU_PAGE_16M;
+		break;
+	}
+	return -1;
+}
+
+static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+				      struct kvmppc_pte *gpte, bool data,
+				      bool iswrite)
+{
 	struct kvmppc_slb *slbe;
 	hva_t ptegp;
 	u64 pteg[16];
@@ -189,6 +220,7 @@
 	u8 pp, key = 0;
 	bool found = false;
 	bool second = false;
+	int pgsize;
 	ulong mp_ea = vcpu->arch.magic_page_ea;
 
 	/* Magic page override */
@@ -202,6 +234,7 @@
 		gpte->may_execute = true;
 		gpte->may_read = true;
 		gpte->may_write = true;
+		gpte->page_size = MMU_PAGE_4K;
 
 		return 0;
 	}
@@ -222,8 +255,12 @@
 	v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
 		HPTE_V_SECONDARY;
 
+	pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
+
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+
 do_second:
-	ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
+	ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
 	if (kvm_is_error_hva(ptegp))
 		goto no_page_found;
 
@@ -240,6 +277,13 @@
 	for (i=0; i<16; i+=2) {
 		/* Check all relevant fields of 1st dword */
 		if ((pteg[i] & v_mask) == v_val) {
+			/* If large page bit is set, check pgsize encoding */
+			if (slbe->large &&
+			    (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
+				pgsize = decode_pagesize(slbe, pteg[i+1]);
+				if (pgsize < 0)
+					continue;
+			}
 			found = true;
 			break;
 		}
@@ -256,13 +300,15 @@
 	v = pteg[i];
 	r = pteg[i+1];
 	pp = (r & HPTE_R_PP) | key;
-	eaddr_mask = 0xFFF;
+	if (r & HPTE_R_PP0)
+		pp |= 8;
 
 	gpte->eaddr = eaddr;
 	gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
-	if (slbe->large)
-		eaddr_mask = 0xFFFFFF;
+
+	eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
 	gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
+	gpte->page_size = pgsize;
 	gpte->may_execute = ((r & HPTE_R_N) ? false : true);
 	gpte->may_read = false;
 	gpte->may_write = false;
@@ -277,6 +323,7 @@
 	case 3:
 	case 5:
 	case 7:
+	case 10:
 		gpte->may_read = true;
 		break;
 	}
@@ -287,30 +334,37 @@
 
 	/* Update PTE R and C bits, so the guest's swapper knows we used the
 	 * page */
-	if (gpte->may_read) {
-		/* Set the accessed flag */
+	if (gpte->may_read && !(r & HPTE_R_R)) {
+		/*
+		 * Set the accessed flag.
+		 * We have to write this back with a single byte write
+		 * because another vcpu may be accessing this on
+		 * non-PAPR platforms such as mac99, and this is
+		 * what real hardware does.
+		 */
+		char __user *addr = (char __user *) &pteg[i+1];
 		r |= HPTE_R_R;
+		put_user(r >> 8, addr + 6);
 	}
-	if (data && gpte->may_write) {
-		/* Set the dirty flag -- XXX even if not writing */
+	if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
+		/* Set the dirty flag */
+		/* Use a single byte write */
+		char __user *addr = (char __user *) &pteg[i+1];
 		r |= HPTE_R_C;
+		put_user(r, addr + 7);
 	}
 
-	/* Write back into the PTEG */
-	if (pteg[i+1] != r) {
-		pteg[i+1] = r;
-		copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
-	}
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 
-	if (!gpte->may_read)
+	if (!gpte->may_read || (iswrite && !gpte->may_write))
 		return -EPERM;
 	return 0;
 
 no_page_found:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 	return -ENOENT;
 
 no_seg_found:
-
 	dprintk("KVM MMU: Trigger segment fault\n");
 	return -EINVAL;
 }
@@ -345,6 +399,21 @@
 	slbe->nx    = (rs & SLB_VSID_N) ? 1 : 0;
 	slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
 
+	slbe->base_page_size = MMU_PAGE_4K;
+	if (slbe->large) {
+		if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
+			switch (rs & SLB_VSID_LP) {
+			case SLB_VSID_LP_00:
+				slbe->base_page_size = MMU_PAGE_16M;
+				break;
+			case SLB_VSID_LP_01:
+				slbe->base_page_size = MMU_PAGE_64K;
+				break;
+			}
+		} else
+			slbe->base_page_size = MMU_PAGE_16M;
+	}
+
 	slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
 	slbe->origv = rs;
 
@@ -460,14 +529,45 @@
 				       bool large)
 {
 	u64 mask = 0xFFFFFFFFFULL;
+	long i;
+	struct kvm_vcpu *v;
 
 	dprintk("KVM MMU: tlbie(0x%lx)\n", va);
 
-	if (large)
-		mask = 0xFFFFFF000ULL;
-	kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
+	/*
+	 * The tlbie instruction changed behaviour starting with
+	 * POWER6.  POWER6 and later don't have the large page flag
+	 * in the instruction but in the RB value, along with bits
+	 * indicating page and segment sizes.
+	 */
+	if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
+		/* POWER6 or later */
+		if (va & 1) {		/* L bit */
+			if ((va & 0xf000) == 0x1000)
+				mask = 0xFFFFFFFF0ULL;	/* 64k page */
+			else
+				mask = 0xFFFFFF000ULL;	/* 16M page */
+		}
+	} else {
+		/* older processors, e.g. PPC970 */
+		if (large)
+			mask = 0xFFFFFF000ULL;
+	}
+	/* flush this VA on all vcpus */
+	kvm_for_each_vcpu(i, v, vcpu->kvm)
+		kvmppc_mmu_pte_vflush(v, va >> 12, mask);
 }
 
+#ifdef CONFIG_PPC_64K_PAGES
+static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
+{
+	ulong mp_ea = vcpu->arch.magic_page_ea;
+
+	return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
+		(mp_ea >> SID_SHIFT) == esid;
+}
+#endif
+
 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
 					     u64 *vsid)
 {
@@ -475,11 +575,13 @@
 	struct kvmppc_slb *slb;
 	u64 gvsid = esid;
 	ulong mp_ea = vcpu->arch.magic_page_ea;
+	int pagesize = MMU_PAGE_64K;
 
 	if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
 		slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
 		if (slb) {
 			gvsid = slb->vsid;
+			pagesize = slb->base_page_size;
 			if (slb->tb) {
 				gvsid <<= SID_SHIFT_1T - SID_SHIFT;
 				gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
@@ -490,28 +592,41 @@
 
 	switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
 	case 0:
-		*vsid = VSID_REAL | esid;
+		gvsid = VSID_REAL | esid;
 		break;
 	case MSR_IR:
-		*vsid = VSID_REAL_IR | gvsid;
+		gvsid |= VSID_REAL_IR;
 		break;
 	case MSR_DR:
-		*vsid = VSID_REAL_DR | gvsid;
+		gvsid |= VSID_REAL_DR;
 		break;
 	case MSR_DR|MSR_IR:
 		if (!slb)
 			goto no_slb;
 
-		*vsid = gvsid;
 		break;
 	default:
 		BUG();
 		break;
 	}
 
-	if (vcpu->arch.shared->msr & MSR_PR)
-		*vsid |= VSID_PR;
+#ifdef CONFIG_PPC_64K_PAGES
+	/*
+	 * Mark this as a 64k segment if the host is using
+	 * 64k pages, the host MMU supports 64k pages and
+	 * the guest segment page size is >= 64k,
+	 * but not if this segment contains the magic page.
+	 */
+	if (pagesize >= MMU_PAGE_64K &&
+	    mmu_psize_defs[MMU_PAGE_64K].shift &&
+	    !segment_contains_magic_page(vcpu, esid))
+		gvsid |= VSID_64K;
+#endif
 
+	if (vcpu->arch.shared->msr & MSR_PR)
+		gvsid |= VSID_PR;
+
+	*vsid = gvsid;
 	return 0;
 
 no_slb:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index e524052..0d513af 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -27,14 +27,14 @@
 #include <asm/machdep.h>
 #include <asm/mmu_context.h>
 #include <asm/hw_irq.h>
-#include "trace.h"
+#include "trace_pr.h"
 
 #define PTE_SIZE 12
 
 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 {
 	ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
-			       MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M,
+			       pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
 			       false);
 }
 
@@ -78,7 +78,8 @@
 	return NULL;
 }
 
-int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
+			bool iswrite)
 {
 	unsigned long vpn;
 	pfn_t hpaddr;
@@ -90,16 +91,26 @@
 	int attempt = 0;
 	struct kvmppc_sid_map *map;
 	int r = 0;
+	int hpsize = MMU_PAGE_4K;
+	bool writable;
+	unsigned long mmu_seq;
+	struct kvm *kvm = vcpu->kvm;
+	struct hpte_cache *cpte;
+	unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
+	unsigned long pfn;
+
+	/* used to check for invalidations in progress */
+	mmu_seq = kvm->mmu_notifier_seq;
+	smp_rmb();
 
 	/* Get host physical address for gpa */
-	hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
-	if (is_error_noslot_pfn(hpaddr)) {
-		printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
+	pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
+	if (is_error_noslot_pfn(pfn)) {
+		printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
 		r = -EINVAL;
 		goto out;
 	}
-	hpaddr <<= PAGE_SHIFT;
-	hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
+	hpaddr = pfn << PAGE_SHIFT;
 
 	/* and write the mapping ea -> hpa into the pt */
 	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -117,20 +128,39 @@
 		goto out;
 	}
 
-	vsid = map->host_vsid;
-	vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
+	vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
 
-	if (!orig_pte->may_write)
-		rflags |= HPTE_R_PP;
-	else
-		mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+	kvm_set_pfn_accessed(pfn);
+	if (!orig_pte->may_write || !writable)
+		rflags |= PP_RXRX;
+	else {
+		mark_page_dirty(vcpu->kvm, gfn);
+		kvm_set_pfn_dirty(pfn);
+	}
 
 	if (!orig_pte->may_execute)
 		rflags |= HPTE_R_N;
 	else
-		kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
+		kvmppc_mmu_flush_icache(pfn);
 
-	hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
+	/*
+	 * Use 64K pages if possible; otherwise, on 64K page kernels,
+	 * we need to transfer 4 more bits from guest real to host real addr.
+	 */
+	if (vsid & VSID_64K)
+		hpsize = MMU_PAGE_64K;
+	else
+		hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
+
+	hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
+
+	cpte = kvmppc_mmu_hpte_cache_next(vcpu);
+
+	spin_lock(&kvm->mmu_lock);
+	if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
+		r = -EAGAIN;
+		goto out_unlock;
+	}
 
 map_again:
 	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -139,11 +169,11 @@
 	if (attempt > 1)
 		if (ppc_md.hpte_remove(hpteg) < 0) {
 			r = -1;
-			goto out;
+			goto out_unlock;
 		}
 
 	ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
-				 MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M);
+				 hpsize, hpsize, MMU_SEGSIZE_256M);
 
 	if (ret < 0) {
 		/* If we couldn't map a primary PTE, try a secondary */
@@ -152,8 +182,6 @@
 		attempt++;
 		goto map_again;
 	} else {
-		struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
-
 		trace_kvm_book3s_64_mmu_map(rflags, hpteg,
 					    vpn, hpaddr, orig_pte);
 
@@ -164,19 +192,37 @@
 			hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 		}
 
-		pte->slot = hpteg + (ret & 7);
-		pte->host_vpn = vpn;
-		pte->pte = *orig_pte;
-		pte->pfn = hpaddr >> PAGE_SHIFT;
+		cpte->slot = hpteg + (ret & 7);
+		cpte->host_vpn = vpn;
+		cpte->pte = *orig_pte;
+		cpte->pfn = pfn;
+		cpte->pagesize = hpsize;
 
-		kvmppc_mmu_hpte_cache_map(vcpu, pte);
+		kvmppc_mmu_hpte_cache_map(vcpu, cpte);
+		cpte = NULL;
 	}
-	kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
+
+out_unlock:
+	spin_unlock(&kvm->mmu_lock);
+	kvm_release_pfn_clean(pfn);
+	if (cpte)
+		kvmppc_mmu_hpte_cache_free(cpte);
 
 out:
 	return r;
 }
 
+void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
+{
+	u64 mask = 0xfffffffffULL;
+	u64 vsid;
+
+	vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
+	if (vsid & VSID_64K)
+		mask = 0xffffffff0ULL;
+	kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
+}
+
 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
 {
 	struct kvmppc_sid_map *map;
@@ -291,6 +337,12 @@
 	slb_vsid &= ~SLB_VSID_KP;
 	slb_esid |= slb_index;
 
+#ifdef CONFIG_PPC_64K_PAGES
+	/* Set host segment base page size to 64K if possible */
+	if (gvsid & VSID_64K)
+		slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
+#endif
+
 	svcpu->slb[slb_index].esid = slb_esid;
 	svcpu->slb[slb_index].vsid = slb_vsid;
 
@@ -326,7 +378,7 @@
 	svcpu_put(svcpu);
 }
 
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
 {
 	kvmppc_mmu_hpte_destroy(vcpu);
 	__destroy_context(to_book3s(vcpu)->context_id[0]);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 043eec8..f3ff587 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -260,10 +260,6 @@
 	return 0;
 }
 
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
-{
-}
-
 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
 {
 	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
@@ -451,7 +447,7 @@
 }
 
 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
-			struct kvmppc_pte *gpte, bool data)
+			struct kvmppc_pte *gpte, bool data, bool iswrite)
 {
 	struct kvm *kvm = vcpu->kvm;
 	struct kvmppc_slb *slbe;
@@ -906,21 +902,22 @@
 	return 0;
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
 {
 	if (kvm->arch.using_mmu_notifiers)
 		kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
 	return 0;
 }
 
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
 {
 	if (kvm->arch.using_mmu_notifiers)
 		kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
 	return 0;
 }
 
-void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
+				  struct kvm_memory_slot *memslot)
 {
 	unsigned long *rmapp;
 	unsigned long gfn;
@@ -994,7 +991,7 @@
 	return ret;
 }
 
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
 {
 	if (!kvm->arch.using_mmu_notifiers)
 		return 0;
@@ -1032,14 +1029,14 @@
 	return ret;
 }
 
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
 {
 	if (!kvm->arch.using_mmu_notifiers)
 		return 0;
 	return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
 }
 
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
 	if (!kvm->arch.using_mmu_notifiers)
 		return;
@@ -1512,9 +1509,8 @@
 
 				kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
 					(VRMA_VSID << SLB_VSID_SHIFT_1T);
-				lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
-				lpcr |= senc << (LPCR_VRMASD_SH - 4);
-				kvm->arch.lpcr = lpcr;
+				lpcr = senc << (LPCR_VRMASD_SH - 4);
+				kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
 				rma_setup = 1;
 			}
 			++i;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 30c2f3b..2c25f54 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -74,3 +74,4 @@
 	/* Didn't find the liobn, punt it to userspace */
 	return H_TOO_HARD;
 }
+EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 360ce68..99d40f8 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -86,8 +86,8 @@
 	return true;
 }
 
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                           unsigned int inst, int *advance)
+int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+			      unsigned int inst, int *advance)
 {
 	int emulated = EMULATE_DONE;
 	int rt = get_rt(inst);
@@ -172,7 +172,7 @@
 			vcpu->arch.mmu.tlbie(vcpu, addr, large);
 			break;
 		}
-#ifdef CONFIG_KVM_BOOK3S_64_PR
+#ifdef CONFIG_PPC_BOOK3S_64
 		case OP_31_XOP_FAKE_SC1:
 		{
 			/* SC 1 papr hypercalls */
@@ -267,12 +267,9 @@
 
 			r = kvmppc_st(vcpu, &addr, 32, zeros, true);
 			if ((r == -ENOENT) || (r == -EPERM)) {
-				struct kvmppc_book3s_shadow_vcpu *svcpu;
-
-				svcpu = svcpu_get(vcpu);
 				*advance = 0;
 				vcpu->arch.shared->dar = vaddr;
-				svcpu->fault_dar = vaddr;
+				vcpu->arch.fault_dar = vaddr;
 
 				dsisr = DSISR_ISSTORE;
 				if (r == -ENOENT)
@@ -281,8 +278,7 @@
 					dsisr |= DSISR_PROTFAULT;
 
 				vcpu->arch.shared->dsisr = dsisr;
-				svcpu->fault_dsisr = dsisr;
-				svcpu_put(svcpu);
+				vcpu->arch.fault_dsisr = dsisr;
 
 				kvmppc_book3s_queue_irqprio(vcpu,
 					BOOK3S_INTERRUPT_DATA_STORAGE);
@@ -349,7 +345,7 @@
 	return bat;
 }
 
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 {
 	int emulated = EMULATE_DONE;
 
@@ -472,7 +468,7 @@
 	return emulated;
 }
 
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 {
 	int emulated = EMULATE_DONE;
 
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 7057a02..852989a 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,9 +20,10 @@
 #include <linux/export.h>
 #include <asm/kvm_book3s.h>
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
-#else
+#endif
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
 EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
 #ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 62a2b5a..072287f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -52,6 +52,9 @@
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
 #include <linux/hugetlb.h>
+#include <linux/module.h>
+
+#include "book3s.h"
 
 /* #define EXIT_DEBUG */
 /* #define EXIT_DEBUG_SIMPLE */
@@ -66,7 +69,7 @@
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
-void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 {
 	int me;
 	int cpu = vcpu->cpu;
@@ -125,7 +128,7 @@
  * purely defensive; they should never fail.)
  */
 
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 
@@ -143,7 +146,7 @@
 	spin_unlock(&vcpu->arch.tbacct_lock);
 }
 
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 
@@ -155,17 +158,46 @@
 	spin_unlock(&vcpu->arch.tbacct_lock);
 }
 
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
 {
 	vcpu->arch.shregs.msr = msr;
 	kvmppc_end_cede(vcpu);
 }
 
-void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
 {
 	vcpu->arch.pvr = pvr;
 }
 
+int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
+{
+	unsigned long pcr = 0;
+	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+	if (arch_compat) {
+		if (!cpu_has_feature(CPU_FTR_ARCH_206))
+			return -EINVAL;	/* 970 has no compat mode support */
+
+		switch (arch_compat) {
+		case PVR_ARCH_205:
+			pcr = PCR_ARCH_205;
+			break;
+		case PVR_ARCH_206:
+		case PVR_ARCH_206p:
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	spin_lock(&vc->lock);
+	vc->arch_compat = arch_compat;
+	vc->pcr = pcr;
+	spin_unlock(&vc->lock);
+
+	return 0;
+}
+
 void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
 {
 	int r;
@@ -195,7 +227,7 @@
 		pr_err("  ESID = %.16llx VSID = %.16llx\n",
 		       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
 	pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
-	       vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
+	       vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
 	       vcpu->arch.last_inst);
 }
 
@@ -489,7 +521,7 @@
 	memset(dt, 0, sizeof(struct dtl_entry));
 	dt->dispatch_reason = 7;
 	dt->processor_id = vc->pcpu + vcpu->arch.ptid;
-	dt->timebase = now;
+	dt->timebase = now + vc->tb_offset;
 	dt->enqueue_to_dispatch_time = stolen;
 	dt->srr0 = kvmppc_get_pc(vcpu);
 	dt->srr1 = vcpu->arch.shregs.msr;
@@ -538,6 +570,15 @@
 		}
 		break;
 	case H_CONFER:
+		target = kvmppc_get_gpr(vcpu, 4);
+		if (target == -1)
+			break;
+		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+		if (!tvcpu) {
+			ret = H_PARAMETER;
+			break;
+		}
+		kvm_vcpu_yield_to(tvcpu);
 		break;
 	case H_REGISTER_VPA:
 		ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
@@ -576,8 +617,8 @@
 	return RESUME_GUEST;
 }
 
-static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
-			      struct task_struct *tsk)
+static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				 struct task_struct *tsk)
 {
 	int r = RESUME_HOST;
 
@@ -671,16 +712,16 @@
 		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
 			vcpu->arch.trap, kvmppc_get_pc(vcpu),
 			vcpu->arch.shregs.msr);
+		run->hw.hardware_exit_reason = vcpu->arch.trap;
 		r = RESUME_HOST;
-		BUG();
 		break;
 	}
 
 	return r;
 }
 
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
-				  struct kvm_sregs *sregs)
+static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
+					    struct kvm_sregs *sregs)
 {
 	int i;
 
@@ -694,12 +735,12 @@
 	return 0;
 }
 
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
-				  struct kvm_sregs *sregs)
+static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
+					    struct kvm_sregs *sregs)
 {
 	int i, j;
 
-	kvmppc_set_pvr(vcpu, sregs->pvr);
+	kvmppc_set_pvr_hv(vcpu, sregs->pvr);
 
 	j = 0;
 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
@@ -714,7 +755,23 @@
 	return 0;
 }
 
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
+static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
+{
+	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+	u64 mask;
+
+	spin_lock(&vc->lock);
+	/*
+	 * Userspace can only modify DPFD (default prefetch depth),
+	 * ILE (interrupt little-endian) and TC (translation control).
+	 */
+	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
+	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
+	spin_unlock(&vc->lock);
+}
+
+static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+				 union kvmppc_one_reg *val)
 {
 	int r = 0;
 	long int i;
@@ -749,6 +806,12 @@
 		i = id - KVM_REG_PPC_PMC1;
 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
 		break;
+	case KVM_REG_PPC_SIAR:
+		*val = get_reg_val(id, vcpu->arch.siar);
+		break;
+	case KVM_REG_PPC_SDAR:
+		*val = get_reg_val(id, vcpu->arch.sdar);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -787,6 +850,18 @@
 		val->vpaval.length = vcpu->arch.dtl.len;
 		spin_unlock(&vcpu->arch.vpa_update_lock);
 		break;
+	case KVM_REG_PPC_TB_OFFSET:
+		*val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
+		break;
+	case KVM_REG_PPC_LPCR:
+		*val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+		break;
+	case KVM_REG_PPC_PPR:
+		*val = get_reg_val(id, vcpu->arch.ppr);
+		break;
+	case KVM_REG_PPC_ARCH_COMPAT:
+		*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
+		break;
 	default:
 		r = -EINVAL;
 		break;
@@ -795,7 +870,8 @@
 	return r;
 }
 
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+				 union kvmppc_one_reg *val)
 {
 	int r = 0;
 	long int i;
@@ -833,6 +909,12 @@
 		i = id - KVM_REG_PPC_PMC1;
 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_SIAR:
+		vcpu->arch.siar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_SDAR:
+		vcpu->arch.sdar = set_reg_val(id, *val);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -880,6 +962,20 @@
 		len -= len % sizeof(struct dtl_entry);
 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
 		break;
+	case KVM_REG_PPC_TB_OFFSET:
+		/* round up to multiple of 2^24 */
+		vcpu->arch.vcore->tb_offset =
+			ALIGN(set_reg_val(id, *val), 1UL << 24);
+		break;
+	case KVM_REG_PPC_LPCR:
+		kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
+		break;
+	case KVM_REG_PPC_PPR:
+		vcpu->arch.ppr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_ARCH_COMPAT:
+		r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
+		break;
 	default:
 		r = -EINVAL;
 		break;
@@ -888,14 +984,8 @@
 	return r;
 }
 
-int kvmppc_core_check_processor_compat(void)
-{
-	if (cpu_has_feature(CPU_FTR_HVMODE))
-		return 0;
-	return -EIO;
-}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
+						   unsigned int id)
 {
 	struct kvm_vcpu *vcpu;
 	int err = -EINVAL;
@@ -919,8 +1009,7 @@
 	vcpu->arch.mmcr[0] = MMCR0_FC;
 	vcpu->arch.ctrl = CTRL_RUNLATCH;
 	/* default to host PVR, since we can't spoof it */
-	vcpu->arch.pvr = mfspr(SPRN_PVR);
-	kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+	kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
 	spin_lock_init(&vcpu->arch.vpa_update_lock);
 	spin_lock_init(&vcpu->arch.tbacct_lock);
 	vcpu->arch.busy_preempt = TB_NIL;
@@ -940,6 +1029,7 @@
 			spin_lock_init(&vcore->lock);
 			init_waitqueue_head(&vcore->wq);
 			vcore->preempt_tb = TB_NIL;
+			vcore->lpcr = kvm->arch.lpcr;
 		}
 		kvm->arch.vcores[core] = vcore;
 		kvm->arch.online_vcores++;
@@ -972,7 +1062,7 @@
 					vpa->dirty);
 }
 
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
 {
 	spin_lock(&vcpu->arch.vpa_update_lock);
 	unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
@@ -983,6 +1073,12 @@
 	kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
+static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
+{
+	/* Indicate we want to get back into the guest */
+	return 1;
+}
+
 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
 {
 	unsigned long dec_nsec, now;
@@ -1264,8 +1360,8 @@
 
 		ret = RESUME_GUEST;
 		if (vcpu->arch.trap)
-			ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
-						 vcpu->arch.run_task);
+			ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+						    vcpu->arch.run_task);
 
 		vcpu->arch.ret = ret;
 		vcpu->arch.trap = 0;
@@ -1424,7 +1520,7 @@
 	return vcpu->arch.ret;
 }
 
-int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
 	int r;
 	int srcu_idx;
@@ -1546,7 +1642,8 @@
 	.release	= kvm_rma_release,
 };
 
-long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
+static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
+				      struct kvm_allocate_rma *ret)
 {
 	long fd;
 	struct kvm_rma_info *ri;
@@ -1592,7 +1689,8 @@
 	(*sps)++;
 }
 
-int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
+					 struct kvm_ppc_smmu_info *info)
 {
 	struct kvm_ppc_one_seg_page_size *sps;
 
@@ -1613,7 +1711,8 @@
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
+					 struct kvm_dirty_log *log)
 {
 	struct kvm_memory_slot *memslot;
 	int r;
@@ -1667,8 +1766,8 @@
 	}
 }
 
-void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
-			      struct kvm_memory_slot *dont)
+static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
+					struct kvm_memory_slot *dont)
 {
 	if (!dont || free->arch.rmap != dont->arch.rmap) {
 		vfree(free->arch.rmap);
@@ -1681,8 +1780,8 @@
 	}
 }
 
-int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
-			       unsigned long npages)
+static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
+					 unsigned long npages)
 {
 	slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
 	if (!slot->arch.rmap)
@@ -1692,9 +1791,9 @@
 	return 0;
 }
 
-int kvmppc_core_prepare_memory_region(struct kvm *kvm,
-				      struct kvm_memory_slot *memslot,
-				      struct kvm_userspace_memory_region *mem)
+static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
+					struct kvm_memory_slot *memslot,
+					struct kvm_userspace_memory_region *mem)
 {
 	unsigned long *phys;
 
@@ -1710,9 +1809,9 @@
 	return 0;
 }
 
-void kvmppc_core_commit_memory_region(struct kvm *kvm,
-				      struct kvm_userspace_memory_region *mem,
-				      const struct kvm_memory_slot *old)
+static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
+				struct kvm_userspace_memory_region *mem,
+				const struct kvm_memory_slot *old)
 {
 	unsigned long npages = mem->memory_size >> PAGE_SHIFT;
 	struct kvm_memory_slot *memslot;
@@ -1729,6 +1828,37 @@
 	}
 }
 
+/*
+ * Update LPCR values in kvm->arch and in vcores.
+ * Caller must hold kvm->lock.
+ */
+void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
+{
+	long int i;
+	u32 cores_done = 0;
+
+	if ((kvm->arch.lpcr & mask) == lpcr)
+		return;
+
+	kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
+
+	for (i = 0; i < KVM_MAX_VCORES; ++i) {
+		struct kvmppc_vcore *vc = kvm->arch.vcores[i];
+		if (!vc)
+			continue;
+		spin_lock(&vc->lock);
+		vc->lpcr = (vc->lpcr & ~mask) | lpcr;
+		spin_unlock(&vc->lock);
+		if (++cores_done >= kvm->arch.online_vcores)
+			break;
+	}
+}
+
+static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
+{
+	return;
+}
+
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 {
 	int err = 0;
@@ -1737,7 +1867,8 @@
 	unsigned long hva;
 	struct kvm_memory_slot *memslot;
 	struct vm_area_struct *vma;
-	unsigned long lpcr, senc;
+	unsigned long lpcr = 0, senc;
+	unsigned long lpcr_mask = 0;
 	unsigned long psize, porder;
 	unsigned long rma_size;
 	unsigned long rmls;
@@ -1802,9 +1933,9 @@
 		senc = slb_pgsize_encoding(psize);
 		kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
 			(VRMA_VSID << SLB_VSID_SHIFT_1T);
-		lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
-		lpcr |= senc << (LPCR_VRMASD_SH - 4);
-		kvm->arch.lpcr = lpcr;
+		lpcr_mask = LPCR_VRMASD;
+		/* the -4 is to account for senc values starting at 0x10 */
+		lpcr = senc << (LPCR_VRMASD_SH - 4);
 
 		/* Create HPTEs in the hash page table for the VRMA */
 		kvmppc_map_vrma(vcpu, memslot, porder);
@@ -1825,23 +1956,21 @@
 		kvm->arch.rma = ri;
 
 		/* Update LPCR and RMOR */
-		lpcr = kvm->arch.lpcr;
 		if (cpu_has_feature(CPU_FTR_ARCH_201)) {
 			/* PPC970; insert RMLS value (split field) in HID4 */
-			lpcr &= ~((1ul << HID4_RMLS0_SH) |
-				  (3ul << HID4_RMLS2_SH));
-			lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
+			lpcr_mask = (1ul << HID4_RMLS0_SH) |
+				(3ul << HID4_RMLS2_SH) | HID4_RMOR;
+			lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
 				((rmls & 3) << HID4_RMLS2_SH);
 			/* RMOR is also in HID4 */
 			lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
 				<< HID4_RMOR_SH;
 		} else {
 			/* POWER7 */
-			lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
-			lpcr |= rmls << LPCR_RMLS_SH;
+			lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
+			lpcr = rmls << LPCR_RMLS_SH;
 			kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
 		}
-		kvm->arch.lpcr = lpcr;
 		pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
 			ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
 
@@ -1860,6 +1989,8 @@
 		}
 	}
 
+	kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
 	/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
 	smp_wmb();
 	kvm->arch.rma_setup_done = 1;
@@ -1875,7 +2006,7 @@
 	goto out_srcu;
 }
 
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_hv(struct kvm *kvm)
 {
 	unsigned long lpcr, lpid;
 
@@ -1893,9 +2024,6 @@
 	 */
 	cpumask_setall(&kvm->arch.need_tlb_flush);
 
-	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
-	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
-
 	kvm->arch.rma = NULL;
 
 	kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -1931,61 +2059,162 @@
 	return 0;
 }
 
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_free_vcores(struct kvm *kvm)
+{
+	long int i;
+
+	for (i = 0; i < KVM_MAX_VCORES; ++i)
+		kfree(kvm->arch.vcores[i]);
+	kvm->arch.online_vcores = 0;
+}
+
+static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
 {
 	uninhibit_secondary_onlining();
 
+	kvmppc_free_vcores(kvm);
 	if (kvm->arch.rma) {
 		kvm_release_rma(kvm->arch.rma);
 		kvm->arch.rma = NULL;
 	}
 
-	kvmppc_rtas_tokens_free(kvm);
-
 	kvmppc_free_hpt(kvm);
-	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
-}
-
-/* These are stubs for now */
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
-{
 }
 
 /* We don't need to emulate any privileged instructions or dcbz */
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                           unsigned int inst, int *advance)
+static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				     unsigned int inst, int *advance)
 {
 	return EMULATE_FAIL;
 }
 
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
+					ulong spr_val)
 {
 	return EMULATE_FAIL;
 }
 
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
+					ulong *spr_val)
 {
 	return EMULATE_FAIL;
 }
 
-static int kvmppc_book3s_hv_init(void)
+static int kvmppc_core_check_processor_compat_hv(void)
 {
-	int r;
+	if (!cpu_has_feature(CPU_FTR_HVMODE))
+		return -EIO;
+	return 0;
+}
 
-	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+static long kvm_arch_vm_ioctl_hv(struct file *filp,
+				 unsigned int ioctl, unsigned long arg)
+{
+	struct kvm *kvm __maybe_unused = filp->private_data;
+	void __user *argp = (void __user *)arg;
+	long r;
 
-	if (r)
-		return r;
+	switch (ioctl) {
 
-	r = kvmppc_mmu_hv_init();
+	case KVM_ALLOCATE_RMA: {
+		struct kvm_allocate_rma rma;
+		struct kvm *kvm = filp->private_data;
+
+		r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
+		if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
+			r = -EFAULT;
+		break;
+	}
+
+	case KVM_PPC_ALLOCATE_HTAB: {
+		u32 htab_order;
+
+		r = -EFAULT;
+		if (get_user(htab_order, (u32 __user *)argp))
+			break;
+		r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
+		if (r)
+			break;
+		r = -EFAULT;
+		if (put_user(htab_order, (u32 __user *)argp))
+			break;
+		r = 0;
+		break;
+	}
+
+	case KVM_PPC_GET_HTAB_FD: {
+		struct kvm_get_htab_fd ghf;
+
+		r = -EFAULT;
+		if (copy_from_user(&ghf, argp, sizeof(ghf)))
+			break;
+		r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
+		break;
+	}
+
+	default:
+		r = -ENOTTY;
+	}
 
 	return r;
 }
 
-static void kvmppc_book3s_hv_exit(void)
+static struct kvmppc_ops kvm_ops_hv = {
+	.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
+	.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
+	.get_one_reg = kvmppc_get_one_reg_hv,
+	.set_one_reg = kvmppc_set_one_reg_hv,
+	.vcpu_load   = kvmppc_core_vcpu_load_hv,
+	.vcpu_put    = kvmppc_core_vcpu_put_hv,
+	.set_msr     = kvmppc_set_msr_hv,
+	.vcpu_run    = kvmppc_vcpu_run_hv,
+	.vcpu_create = kvmppc_core_vcpu_create_hv,
+	.vcpu_free   = kvmppc_core_vcpu_free_hv,
+	.check_requests = kvmppc_core_check_requests_hv,
+	.get_dirty_log  = kvm_vm_ioctl_get_dirty_log_hv,
+	.flush_memslot  = kvmppc_core_flush_memslot_hv,
+	.prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
+	.commit_memory_region  = kvmppc_core_commit_memory_region_hv,
+	.unmap_hva = kvm_unmap_hva_hv,
+	.unmap_hva_range = kvm_unmap_hva_range_hv,
+	.age_hva  = kvm_age_hva_hv,
+	.test_age_hva = kvm_test_age_hva_hv,
+	.set_spte_hva = kvm_set_spte_hva_hv,
+	.mmu_destroy  = kvmppc_mmu_destroy_hv,
+	.free_memslot = kvmppc_core_free_memslot_hv,
+	.create_memslot = kvmppc_core_create_memslot_hv,
+	.init_vm =  kvmppc_core_init_vm_hv,
+	.destroy_vm = kvmppc_core_destroy_vm_hv,
+	.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
+	.emulate_op = kvmppc_core_emulate_op_hv,
+	.emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
+	.emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
+	.fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
+	.arch_vm_ioctl  = kvm_arch_vm_ioctl_hv,
+};
+
+static int kvmppc_book3s_init_hv(void)
 {
-	kvm_exit();
+	int r;
+	/*
+	 * FIXME!! Do we need to check on all cpus ?
+	 */
+	r = kvmppc_core_check_processor_compat_hv();
+	if (r < 0)
+		return r;
+
+	kvm_ops_hv.owner = THIS_MODULE;
+	kvmppc_hv_ops = &kvm_ops_hv;
+
+	r = kvmppc_mmu_hv_init();
+	return r;
 }
 
-module_init(kvmppc_book3s_hv_init);
-module_exit(kvmppc_book3s_hv_exit);
+static void kvmppc_book3s_exit_hv(void)
+{
+	kvmppc_hv_ops = NULL;
+}
+
+module_init(kvmppc_book3s_init_hv);
+module_exit(kvmppc_book3s_exit_hv);
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 37f1cc4..928142c 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -158,9 +158,6 @@
  * Interrupts are enabled again at this point.
  */
 
-.global kvmppc_handler_highmem
-kvmppc_handler_highmem:
-
 	/*
 	 * Register usage at this point:
 	 *
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c71103b..bc8de75 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,30 +33,6 @@
 #error Need to fix lppaca and SLB shadow accesses in little endian mode
 #endif
 
-/*****************************************************************************
- *                                                                           *
- *        Real Mode handlers that need to be in the linear mapping           *
- *                                                                           *
- ****************************************************************************/
-
-	.globl	kvmppc_skip_interrupt
-kvmppc_skip_interrupt:
-	mfspr	r13,SPRN_SRR0
-	addi	r13,r13,4
-	mtspr	SPRN_SRR0,r13
-	GET_SCRATCH0(r13)
-	rfid
-	b	.
-
-	.globl	kvmppc_skip_Hinterrupt
-kvmppc_skip_Hinterrupt:
-	mfspr	r13,SPRN_HSRR0
-	addi	r13,r13,4
-	mtspr	SPRN_HSRR0,r13
-	GET_SCRATCH0(r13)
-	hrfid
-	b	.
-
 /*
  * Call kvmppc_hv_entry in real mode.
  * Must be called with interrupts hard-disabled.
@@ -66,8 +42,11 @@
  * LR = return address to continue at after eventually re-enabling MMU
  */
 _GLOBAL(kvmppc_hv_entry_trampoline)
+	mflr	r0
+	std	r0, PPC_LR_STKOFF(r1)
+	stdu	r1, -112(r1)
 	mfmsr	r10
-	LOAD_REG_ADDR(r5, kvmppc_hv_entry)
+	LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
 	li	r0,MSR_RI
 	andc	r0,r10,r0
 	li	r6,MSR_IR | MSR_DR
@@ -77,11 +56,103 @@
 	mtsrr1	r6
 	RFI
 
-/******************************************************************************
- *                                                                            *
- *                               Entry code                                   *
- *                                                                            *
- *****************************************************************************/
+kvmppc_call_hv_entry:
+	bl	kvmppc_hv_entry
+
+	/* Back from guest - restore host state and return to caller */
+
+	/* Restore host DABR and DABRX */
+	ld	r5,HSTATE_DABR(r13)
+	li	r6,7
+	mtspr	SPRN_DABR,r5
+	mtspr	SPRN_DABRX,r6
+
+	/* Restore SPRG3 */
+	ld	r3,PACA_SPRG3(r13)
+	mtspr	SPRN_SPRG3,r3
+
+	/*
+	 * Reload DEC.  HDEC interrupts were disabled when
+	 * we reloaded the host's LPCR value.
+	 */
+	ld	r3, HSTATE_DECEXP(r13)
+	mftb	r4
+	subf	r4, r4, r3
+	mtspr	SPRN_DEC, r4
+
+	/* Reload the host's PMU registers */
+	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
+	lbz	r4, LPPACA_PMCINUSE(r3)
+	cmpwi	r4, 0
+	beq	23f			/* skip if not */
+	lwz	r3, HSTATE_PMC(r13)
+	lwz	r4, HSTATE_PMC + 4(r13)
+	lwz	r5, HSTATE_PMC + 8(r13)
+	lwz	r6, HSTATE_PMC + 12(r13)
+	lwz	r8, HSTATE_PMC + 16(r13)
+	lwz	r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+	lwz	r10, HSTATE_PMC + 24(r13)
+	lwz	r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+	mtspr	SPRN_PMC1, r3
+	mtspr	SPRN_PMC2, r4
+	mtspr	SPRN_PMC3, r5
+	mtspr	SPRN_PMC4, r6
+	mtspr	SPRN_PMC5, r8
+	mtspr	SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+	mtspr	SPRN_PMC7, r10
+	mtspr	SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+	ld	r3, HSTATE_MMCR(r13)
+	ld	r4, HSTATE_MMCR + 8(r13)
+	ld	r5, HSTATE_MMCR + 16(r13)
+	mtspr	SPRN_MMCR1, r4
+	mtspr	SPRN_MMCRA, r5
+	mtspr	SPRN_MMCR0, r3
+	isync
+23:
+
+	/*
+	 * For external and machine check interrupts, we need
+	 * to call the Linux handler to process the interrupt.
+	 * We do that by jumping to absolute address 0x500 for
+	 * external interrupts, or the machine_check_fwnmi label
+	 * for machine checks (since firmware might have patched
+	 * the vector area at 0x200).  The [h]rfid at the end of the
+	 * handler will return to the book3s_hv_interrupts.S code.
+	 * For other interrupts we do the rfid to get back
+	 * to the book3s_hv_interrupts.S code here.
+	 */
+	ld	r8, 112+PPC_LR_STKOFF(r1)
+	addi	r1, r1, 112
+	ld	r7, HSTATE_HOST_MSR(r13)
+
+	cmpwi	cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
+BEGIN_FTR_SECTION
+	beq	11f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+	/* RFI into the highmem handler, or branch to interrupt handler */
+	mfmsr	r6
+	li	r0, MSR_RI
+	andc	r6, r6, r0
+	mtmsrd	r6, 1			/* Clear RI in MSR */
+	mtsrr0	r8
+	mtsrr1	r7
+	beqa	0x500			/* external interrupt (PPC970) */
+	beq	cr1, 13f		/* machine check */
+	RFI
+
+	/* On POWER7, we have external interrupts set to use HSRR0/1 */
+11:	mtspr	SPRN_HSRR0, r8
+	mtspr	SPRN_HSRR1, r7
+	ba	0x500
+
+13:	b	machine_check_fwnmi
+
 
 /*
  * We come in here when wakened from nap mode on a secondary hw thread.
@@ -137,7 +208,7 @@
 	cmpdi	r4,0
 	/* if we have no vcpu to run, go back to sleep */
 	beq	kvm_no_guest
-	b	kvmppc_hv_entry
+	b	30f
 
 27:	/* XXX should handle hypervisor maintenance interrupts etc. here */
 	b	kvm_no_guest
@@ -147,6 +218,57 @@
 	stw	r8,HSTATE_SAVED_XIRR(r13)
 	b	kvm_no_guest
 
+30:	bl	kvmppc_hv_entry
+
+	/* Back from the guest, go back to nap */
+	/* Clear our vcpu pointer so we don't come back in early */
+	li	r0, 0
+	std	r0, HSTATE_KVM_VCPU(r13)
+	lwsync
+	/* Clear any pending IPI - we're an offline thread */
+	ld	r5, HSTATE_XICS_PHYS(r13)
+	li	r7, XICS_XIRR
+	lwzcix	r3, r5, r7		/* ack any pending interrupt */
+	rlwinm.	r0, r3, 0, 0xffffff	/* any pending? */
+	beq	37f
+	sync
+	li	r0, 0xff
+	li	r6, XICS_MFRR
+	stbcix	r0, r5, r6		/* clear the IPI */
+	stwcix	r3, r5, r7		/* EOI it */
+37:	sync
+
+	/* increment the nap count and then go to nap mode */
+	ld	r4, HSTATE_KVM_VCORE(r13)
+	addi	r4, r4, VCORE_NAP_COUNT
+	lwsync				/* make previous updates visible */
+51:	lwarx	r3, 0, r4
+	addi	r3, r3, 1
+	stwcx.	r3, 0, r4
+	bne	51b
+
+kvm_no_guest:
+	li	r0, KVM_HWTHREAD_IN_NAP
+	stb	r0, HSTATE_HWTHREAD_STATE(r13)
+	li	r3, LPCR_PECE0
+	mfspr	r4, SPRN_LPCR
+	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
+	mtspr	SPRN_LPCR, r4
+	isync
+	std	r0, HSTATE_SCRATCH0(r13)
+	ptesync
+	ld	r0, HSTATE_SCRATCH0(r13)
+1:	cmpd	r0, r0
+	bne	1b
+	nap
+	b	.
+
+/******************************************************************************
+ *                                                                            *
+ *                               Entry code                                   *
+ *                                                                            *
+ *****************************************************************************/
+
 .global kvmppc_hv_entry
 kvmppc_hv_entry:
 
@@ -159,7 +281,8 @@
 	 * all other volatile GPRS = free
 	 */
 	mflr	r0
-	std	r0, HSTATE_VMHANDLER(r13)
+	std	r0, PPC_LR_STKOFF(r1)
+	stdu	r1, -112(r1)
 
 	/* Set partition DABR */
 	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -200,8 +323,12 @@
 	ld	r3, VCPU_MMCR(r4)
 	ld	r5, VCPU_MMCR + 8(r4)
 	ld	r6, VCPU_MMCR + 16(r4)
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
 	mtspr	SPRN_MMCR1, r5
 	mtspr	SPRN_MMCRA, r6
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
 	mtspr	SPRN_MMCR0, r3
 	isync
 
@@ -254,22 +381,15 @@
 	/* Save R1 in the PACA */
 	std	r1, HSTATE_HOST_R1(r13)
 
-	/* Increment yield count if they have a VPA */
-	ld	r3, VCPU_VPA(r4)
-	cmpdi	r3, 0
-	beq	25f
-	lwz	r5, LPPACA_YIELDCOUNT(r3)
-	addi	r5, r5, 1
-	stw	r5, LPPACA_YIELDCOUNT(r3)
-	li	r6, 1
-	stb	r6, VCPU_VPA_DIRTY(r4)
-25:
 	/* Load up DAR and DSISR */
 	ld	r5, VCPU_DAR(r4)
 	lwz	r6, VCPU_DSISR(r4)
 	mtspr	SPRN_DAR, r5
 	mtspr	SPRN_DSISR, r6
 
+	li	r6, KVM_GUEST_MODE_HOST_HV
+	stb	r6, HSTATE_IN_GUEST(r13)
+
 BEGIN_FTR_SECTION
 	/* Restore AMR and UAMOR, set AMOR to all 1s */
 	ld	r5,VCPU_AMR(r4)
@@ -343,7 +463,28 @@
 	bdnz	28b
 	ptesync
 
-22:	li	r0,1
+	/* Add timebase offset onto timebase */
+22:	ld	r8,VCORE_TB_OFFSET(r5)
+	cmpdi	r8,0
+	beq	37f
+	mftb	r6		/* current host timebase */
+	add	r8,r8,r6
+	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
+	mftb	r7		/* check if lower 24 bits overflowed */
+	clrldi	r6,r6,40
+	clrldi	r7,r7,40
+	cmpld	r7,r6
+	bge	37f
+	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
+	mtspr	SPRN_TBU40,r8
+
+	/* Load guest PCR value to select appropriate compat mode */
+37:	ld	r7, VCORE_PCR(r5)
+	cmpdi	r7, 0
+	beq	38f
+	mtspr	SPRN_PCR, r7
+38:
+	li	r0,1
 	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
 	b	10f
 
@@ -353,12 +494,22 @@
 	beq	20b
 
 	/* Set LPCR and RMOR. */
-10:	ld	r8,KVM_LPCR(r9)
+10:	ld	r8,VCORE_LPCR(r5)
 	mtspr	SPRN_LPCR,r8
 	ld	r8,KVM_RMOR(r9)
 	mtspr	SPRN_RMOR,r8
 	isync
 
+	/* Increment yield count if they have a VPA */
+	ld	r3, VCPU_VPA(r4)
+	cmpdi	r3, 0
+	beq	25f
+	lwz	r5, LPPACA_YIELDCOUNT(r3)
+	addi	r5, r5, 1
+	stw	r5, LPPACA_YIELDCOUNT(r3)
+	li	r6, 1
+	stb	r6, VCPU_VPA_DIRTY(r4)
+25:
 	/* Check if HDEC expires soon */
 	mfspr	r3,SPRN_HDEC
 	cmpwi	r3,10
@@ -405,7 +556,8 @@
 	bne	24b
 	isync
 
-	ld	r7,KVM_LPCR(r9)		/* use kvm->arch.lpcr to store HID4 */
+	ld	r5,HSTATE_KVM_VCORE(r13)
+	ld	r7,VCORE_LPCR(r5)	/* use vcore->lpcr to store HID4 */
 	li	r0,0x18f
 	rotldi	r0,r0,HID4_LPID5_SH	/* all lpid bits in HID4 = 1 */
 	or	r0,r7,r0
@@ -541,7 +693,7 @@
 	mtspr	SPRN_HSRR1,r11
 
 	/* Activate guest mode, so faults get handled by KVM */
-	li	r9, KVM_GUEST_MODE_GUEST
+	li	r9, KVM_GUEST_MODE_GUEST_HV
 	stb	r9, HSTATE_IN_GUEST(r13)
 
 	/* Enter guest */
@@ -550,13 +702,15 @@
 	ld	r5, VCPU_CFAR(r4)
 	mtspr	SPRN_CFAR, r5
 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+	ld	r0, VCPU_PPR(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
 	ld	r5, VCPU_LR(r4)
 	lwz	r6, VCPU_CR(r4)
 	mtlr	r5
 	mtcr	r6
 
-	ld	r0, VCPU_GPR(R0)(r4)
 	ld	r1, VCPU_GPR(R1)(r4)
 	ld	r2, VCPU_GPR(R2)(r4)
 	ld	r3, VCPU_GPR(R3)(r4)
@@ -570,6 +724,10 @@
 	ld	r12, VCPU_GPR(R12)(r4)
 	ld	r13, VCPU_GPR(R13)(r4)
 
+BEGIN_FTR_SECTION
+	mtspr	SPRN_PPR, r0
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+	ld	r0, VCPU_GPR(R0)(r4)
 	ld	r4, VCPU_GPR(R4)(r4)
 
 	hrfid
@@ -584,8 +742,8 @@
 /*
  * We come here from the first-level interrupt handlers.
  */
-	.globl	kvmppc_interrupt
-kvmppc_interrupt:
+	.globl	kvmppc_interrupt_hv
+kvmppc_interrupt_hv:
 	/*
 	 * Register contents:
 	 * R12		= interrupt vector
@@ -595,6 +753,19 @@
 	 */
 	/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
 	std	r9, HSTATE_HOST_R2(r13)
+
+	lbz	r9, HSTATE_IN_GUEST(r13)
+	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
+	beq	kvmppc_bad_host_intr
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+	cmpwi	r9, KVM_GUEST_MODE_GUEST
+	ld	r9, HSTATE_HOST_R2(r13)
+	beq	kvmppc_interrupt_pr
+#endif
+	/* We're now back in the host but in guest MMU context */
+	li	r9, KVM_GUEST_MODE_HOST_HV
+	stb	r9, HSTATE_IN_GUEST(r13)
+
 	ld	r9, HSTATE_KVM_VCPU(r13)
 
 	/* Save registers */
@@ -620,6 +791,10 @@
 	ld	r3, HSTATE_CFAR(r13)
 	std	r3, VCPU_CFAR(r9)
 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+	ld	r4, HSTATE_PPR(r13)
+	std	r4, VCPU_PPR(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
 	/* Restore R1/R2 so we can handle faults */
 	ld	r1, HSTATE_HOST_R1(r13)
@@ -642,10 +817,6 @@
 	std	r3, VCPU_GPR(R13)(r9)
 	std	r4, VCPU_LR(r9)
 
-	/* Unset guest mode */
-	li	r0, KVM_GUEST_MODE_NONE
-	stb	r0, HSTATE_IN_GUEST(r13)
-
 	stw	r12,VCPU_TRAP(r9)
 
 	/* Save HEIR (HV emulation assist reg) in last_inst
@@ -696,46 +867,11 @@
 	 * set, we know the host wants us out so let's do it now
 	 */
 do_ext_interrupt:
-	lbz	r0, HSTATE_HOST_IPI(r13)
-	cmpwi	r0, 0
-	bne	ext_interrupt_to_host
-
-	/* Now read the interrupt from the ICP */
-	ld	r5, HSTATE_XICS_PHYS(r13)
-	li	r7, XICS_XIRR
-	cmpdi	r5, 0
-	beq-	ext_interrupt_to_host
-	lwzcix	r3, r5, r7
-	rlwinm.	r0, r3, 0, 0xffffff
-	sync
-	beq	3f		/* if nothing pending in the ICP */
-
-	/* We found something in the ICP...
-	 *
-	 * If it's not an IPI, stash it in the PACA and return to
-	 * the host, we don't (yet) handle directing real external
-	 * interrupts directly to the guest
-	 */
-	cmpwi	r0, XICS_IPI
-	bne	ext_stash_for_host
-
-	/* It's an IPI, clear the MFRR and EOI it */
-	li	r0, 0xff
-	li	r6, XICS_MFRR
-	stbcix	r0, r5, r6		/* clear the IPI */
-	stwcix	r3, r5, r7		/* EOI it */
-	sync
-
-	/* We need to re-check host IPI now in case it got set in the
-	 * meantime. If it's clear, we bounce the interrupt to the
-	 * guest
-	 */
-	lbz	r0, HSTATE_HOST_IPI(r13)
-	cmpwi	r0, 0
-	bne-	1f
+	bl	kvmppc_read_intr
+	cmpdi	r3, 0
+	bgt	ext_interrupt_to_host
 
 	/* Allright, looks like an IPI for the guest, we need to set MER */
-3:
 	/* Check if any CPU is heading out to the host, if so head out too */
 	ld	r5, HSTATE_KVM_VCORE(r13)
 	lwz	r0, VCORE_ENTRY_EXIT(r5)
@@ -764,27 +900,9 @@
 	mtspr	SPRN_LPCR, r8
 	b	fast_guest_return
 
-	/* We raced with the host, we need to resend that IPI, bummer */
-1:	li	r0, IPI_PRIORITY
-	stbcix	r0, r5, r6		/* set the IPI */
-	sync
-	b	ext_interrupt_to_host
-
-ext_stash_for_host:
-	/* It's not an IPI and it's for the host, stash it in the PACA
-	 * before exit, it will be picked up by the host ICP driver
-	 */
-	stw	r3, HSTATE_SAVED_XIRR(r13)
 ext_interrupt_to_host:
 
 guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
-	/* Save DEC */
-	mfspr	r5,SPRN_DEC
-	mftb	r6
-	extsw	r5,r5
-	add	r5,r5,r6
-	std	r5,VCPU_DEC_EXPIRES(r9)
-
 	/* Save more register state  */
 	mfdar	r6
 	mfdsisr	r7
@@ -954,7 +1072,30 @@
 	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
 	mtspr	SPRN_LPID,r7
 	isync
-	li	r0,0
+
+	/* Subtract timebase offset from timebase */
+	ld	r8,VCORE_TB_OFFSET(r5)
+	cmpdi	r8,0
+	beq	17f
+	mftb	r6			/* current host timebase */
+	subf	r8,r8,r6
+	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
+	mftb	r7			/* check if lower 24 bits overflowed */
+	clrldi	r6,r6,40
+	clrldi	r7,r7,40
+	cmpld	r7,r6
+	bge	17f
+	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
+	mtspr	SPRN_TBU40,r8
+
+	/* Reset PCR */
+17:	ld	r0, VCORE_PCR(r5)
+	cmpdi	r0, 0
+	beq	18f
+	li	r0, 0
+	mtspr	SPRN_PCR, r0
+18:
+	/* Signal secondary CPUs to continue */
 	stb	r0,VCORE_IN_GUEST(r5)
 	lis	r8,0x7fff		/* MAX_INT@h */
 	mtspr	SPRN_HDEC,r8
@@ -1052,6 +1193,13 @@
 1:	addi	r8,r8,16
 	.endr
 
+	/* Save DEC */
+	mfspr	r5,SPRN_DEC
+	mftb	r6
+	extsw	r5,r5
+	add	r5,r5,r6
+	std	r5,VCPU_DEC_EXPIRES(r9)
+
 	/* Save and reset AMR and UAMOR before turning on the MMU */
 BEGIN_FTR_SECTION
 	mfspr	r5,SPRN_AMR
@@ -1062,6 +1210,10 @@
 	mtspr	SPRN_AMR,r6
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
+	/* Unset guest mode */
+	li	r0, KVM_GUEST_MODE_NONE
+	stb	r0, HSTATE_IN_GUEST(r13)
+
 	/* Switch DSCR back to host value */
 BEGIN_FTR_SECTION
 	mfspr	r8, SPRN_DSCR
@@ -1134,9 +1286,13 @@
 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
 	b	22f
 21:	mfspr	r5, SPRN_MMCR1
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
 	std	r4, VCPU_MMCR(r9)
 	std	r5, VCPU_MMCR + 8(r9)
 	std	r6, VCPU_MMCR + 16(r9)
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
 	mfspr	r3, SPRN_PMC1
 	mfspr	r4, SPRN_PMC2
 	mfspr	r5, SPRN_PMC3
@@ -1158,103 +1314,30 @@
 	stw	r11, VCPU_PMC + 28(r9)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 22:
+	ld	r0, 112+PPC_LR_STKOFF(r1)
+	addi	r1, r1, 112
+	mtlr	r0
+	blr
+secondary_too_late:
+	ld	r5,HSTATE_KVM_VCORE(r13)
+	HMT_LOW
+13:	lbz	r3,VCORE_IN_GUEST(r5)
+	cmpwi	r3,0
+	bne	13b
+	HMT_MEDIUM
+	li	r0, KVM_GUEST_MODE_NONE
+	stb	r0, HSTATE_IN_GUEST(r13)
+	ld	r11,PACA_SLBSHADOWPTR(r13)
 
-	/* Secondary threads go off to take a nap on POWER7 */
-BEGIN_FTR_SECTION
-	lwz	r0,VCPU_PTID(r9)
-	cmpwi	r0,0
-	bne	secondary_nap
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
-	/* Restore host DABR and DABRX */
-	ld	r5,HSTATE_DABR(r13)
-	li	r6,7
-	mtspr	SPRN_DABR,r5
-	mtspr	SPRN_DABRX,r6
-
-	/* Restore SPRG3 */
-	ld	r3,PACA_SPRG3(r13)
-	mtspr	SPRN_SPRG3,r3
-
-	/*
-	 * Reload DEC.  HDEC interrupts were disabled when
-	 * we reloaded the host's LPCR value.
-	 */
-	ld	r3, HSTATE_DECEXP(r13)
-	mftb	r4
-	subf	r4, r4, r3
-	mtspr	SPRN_DEC, r4
-
-	/* Reload the host's PMU registers */
-	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
-	lbz	r4, LPPACA_PMCINUSE(r3)
-	cmpwi	r4, 0
-	beq	23f			/* skip if not */
-	lwz	r3, HSTATE_PMC(r13)
-	lwz	r4, HSTATE_PMC + 4(r13)
-	lwz	r5, HSTATE_PMC + 8(r13)
-	lwz	r6, HSTATE_PMC + 12(r13)
-	lwz	r8, HSTATE_PMC + 16(r13)
-	lwz	r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
-	lwz	r10, HSTATE_PMC + 24(r13)
-	lwz	r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-	mtspr	SPRN_PMC1, r3
-	mtspr	SPRN_PMC2, r4
-	mtspr	SPRN_PMC3, r5
-	mtspr	SPRN_PMC4, r6
-	mtspr	SPRN_PMC5, r8
-	mtspr	SPRN_PMC6, r9
-BEGIN_FTR_SECTION
-	mtspr	SPRN_PMC7, r10
-	mtspr	SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-	ld	r3, HSTATE_MMCR(r13)
-	ld	r4, HSTATE_MMCR + 8(r13)
-	ld	r5, HSTATE_MMCR + 16(r13)
-	mtspr	SPRN_MMCR1, r4
-	mtspr	SPRN_MMCRA, r5
-	mtspr	SPRN_MMCR0, r3
-	isync
-23:
-	/*
-	 * For external and machine check interrupts, we need
-	 * to call the Linux handler to process the interrupt.
-	 * We do that by jumping to absolute address 0x500 for
-	 * external interrupts, or the machine_check_fwnmi label
-	 * for machine checks (since firmware might have patched
-	 * the vector area at 0x200).  The [h]rfid at the end of the
-	 * handler will return to the book3s_hv_interrupts.S code.
-	 * For other interrupts we do the rfid to get back
-	 * to the book3s_hv_interrupts.S code here.
-	 */
-	ld	r8, HSTATE_VMHANDLER(r13)
-	ld	r7, HSTATE_HOST_MSR(r13)
-
-	cmpwi	cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
-	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
-BEGIN_FTR_SECTION
-	beq	11f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
-	/* RFI into the highmem handler, or branch to interrupt handler */
-	mfmsr	r6
-	li	r0, MSR_RI
-	andc	r6, r6, r0
-	mtmsrd	r6, 1			/* Clear RI in MSR */
-	mtsrr0	r8
-	mtsrr1	r7
-	beqa	0x500			/* external interrupt (PPC970) */
-	beq	cr1, 13f		/* machine check */
-	RFI
-
-	/* On POWER7, we have external interrupts set to use HSRR0/1 */
-11:	mtspr	SPRN_HSRR0, r8
-	mtspr	SPRN_HSRR1, r7
-	ba	0x500
-
-13:	b	machine_check_fwnmi
+	.rept	SLB_NUM_BOLTED
+	ld	r5,SLBSHADOW_SAVEAREA(r11)
+	ld	r6,SLBSHADOW_SAVEAREA+8(r11)
+	andis.	r7,r5,SLB_ESID_V@h
+	beq	1f
+	slbmte	r6,r5
+1:	addi	r11,r11,16
+	.endr
+	b	22b
 
 /*
  * Check whether an HDSI is an HPTE not found fault or something else.
@@ -1333,7 +1416,7 @@
 	stw	r8, VCPU_LAST_INST(r9)
 
 	/* Unset guest mode. */
-	li	r0, KVM_GUEST_MODE_NONE
+	li	r0, KVM_GUEST_MODE_HOST_HV
 	stb	r0, HSTATE_IN_GUEST(r13)
 	b	guest_exit_cont
 
@@ -1701,67 +1784,70 @@
 	rotldi	r11, r11, 63
 	b	fast_interrupt_c_return
 
-secondary_too_late:
-	ld	r5,HSTATE_KVM_VCORE(r13)
-	HMT_LOW
-13:	lbz	r3,VCORE_IN_GUEST(r5)
-	cmpwi	r3,0
-	bne	13b
-	HMT_MEDIUM
-	ld	r11,PACA_SLBSHADOWPTR(r13)
+/*
+ * Determine what sort of external interrupt is pending (if any).
+ * Returns:
+ *	0 if no interrupt is pending
+ *	1 if an interrupt is pending that needs to be handled by the host
+ *	-1 if there was a guest wakeup IPI (which has now been cleared)
+ */
+kvmppc_read_intr:
+	/* see if a host IPI is pending */
+	li	r3, 1
+	lbz	r0, HSTATE_HOST_IPI(r13)
+	cmpwi	r0, 0
+	bne	1f
 
-	.rept	SLB_NUM_BOLTED
-	ld	r5,SLBSHADOW_SAVEAREA(r11)
-	ld	r6,SLBSHADOW_SAVEAREA+8(r11)
-	andis.	r7,r5,SLB_ESID_V@h
-	beq	1f
-	slbmte	r6,r5
-1:	addi	r11,r11,16
-	.endr
-
-secondary_nap:
-	/* Clear our vcpu pointer so we don't come back in early */
-	li	r0, 0
-	std	r0, HSTATE_KVM_VCPU(r13)
-	lwsync
-	/* Clear any pending IPI - assume we're a secondary thread */
-	ld	r5, HSTATE_XICS_PHYS(r13)
+	/* Now read the interrupt from the ICP */
+	ld	r6, HSTATE_XICS_PHYS(r13)
 	li	r7, XICS_XIRR
-	lwzcix	r3, r5, r7		/* ack any pending interrupt */
-	rlwinm.	r0, r3, 0, 0xffffff	/* any pending? */
-	beq	37f
+	cmpdi	r6, 0
+	beq-	1f
+	lwzcix	r0, r6, r7
+	rlwinm.	r3, r0, 0, 0xffffff
 	sync
-	li	r0, 0xff
-	li	r6, XICS_MFRR
-	stbcix	r0, r5, r6		/* clear the IPI */
-	stwcix	r3, r5, r7		/* EOI it */
-37:	sync
+	beq	1f			/* if nothing pending in the ICP */
 
-	/* increment the nap count and then go to nap mode */
-	ld	r4, HSTATE_KVM_VCORE(r13)
-	addi	r4, r4, VCORE_NAP_COUNT
-	lwsync				/* make previous updates visible */
-51:	lwarx	r3, 0, r4
-	addi	r3, r3, 1
-	stwcx.	r3, 0, r4
-	bne	51b
+	/* We found something in the ICP...
+	 *
+	 * If it's not an IPI, stash it in the PACA and return to
+	 * the host, we don't (yet) handle directing real external
+	 * interrupts directly to the guest
+	 */
+	cmpwi	r3, XICS_IPI		/* if there is, is it an IPI? */
+	li	r3, 1
+	bne	42f
 
-kvm_no_guest:
-	li	r0, KVM_HWTHREAD_IN_NAP
-	stb	r0, HSTATE_HWTHREAD_STATE(r13)
+	/* It's an IPI, clear the MFRR and EOI it */
+	li	r3, 0xff
+	li	r8, XICS_MFRR
+	stbcix	r3, r6, r8		/* clear the IPI */
+	stwcix	r0, r6, r7		/* EOI it */
+	sync
 
-	li	r3, LPCR_PECE0
-	mfspr	r4, SPRN_LPCR
-	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
-	mtspr	SPRN_LPCR, r4
-	isync
-	std	r0, HSTATE_SCRATCH0(r13)
-	ptesync
-	ld	r0, HSTATE_SCRATCH0(r13)
-1:	cmpd	r0, r0
-	bne	1b
-	nap
-	b	.
+	/* We need to re-check host IPI now in case it got set in the
+	 * meantime. If it's clear, we bounce the interrupt to the
+	 * guest
+	 */
+	lbz	r0, HSTATE_HOST_IPI(r13)
+	cmpwi	r0, 0
+	bne-	43f
+
+	/* OK, it's an IPI for us */
+	li	r3, -1
+1:	blr
+
+42:	/* It's not an IPI and it's for the host, stash it in the PACA
+	 * before exit, it will be picked up by the host ICP driver
+	 */
+	stw	r0, HSTATE_SAVED_XIRR(r13)
+	b	1b
+
+43:	/* We raced with the host, we need to resend that IPI, bummer */
+	li	r0, IPI_PRIORITY
+	stbcix	r0, r6, r8		/* set the IPI */
+	sync
+	b	1b
 
 /*
  * Save away FP, VMX and VSX registers.
@@ -1879,3 +1965,11 @@
 	lwz	r7,VCPU_VRSAVE(r4)
 	mtspr	SPRN_VRSAVE,r7
 	blr
+
+/*
+ * We come here if we get any exception or interrupt while we are
+ * executing host real mode code while in guest MMU context.
+ * For now just spin, but we should do something better.
+ */
+kvmppc_bad_host_intr:
+	b	.
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 17cfae5..f4dd041 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -26,8 +26,12 @@
 
 #if defined(CONFIG_PPC_BOOK3S_64)
 #define FUNC(name) 		GLUE(.,name)
+#define GET_SHADOW_VCPU(reg)    addi	reg, r13, PACA_SVCPU
+
 #elif defined(CONFIG_PPC_BOOK3S_32)
 #define FUNC(name)		name
+#define GET_SHADOW_VCPU(reg)	lwz     reg, (THREAD + THREAD_KVM_SVCPU)(r2)
+
 #endif /* CONFIG_PPC_BOOK3S_XX */
 
 #define VCPU_LOAD_NVGPRS(vcpu) \
@@ -87,8 +91,14 @@
 	VCPU_LOAD_NVGPRS(r4)
 
 kvm_start_lightweight:
+	/* Copy registers into shadow vcpu so we can access them in real mode */
+	GET_SHADOW_VCPU(r3)
+	bl	FUNC(kvmppc_copy_to_svcpu)
+	nop
+	REST_GPR(4, r1)
 
 #ifdef CONFIG_PPC_BOOK3S_64
+	/* Get the dcbz32 flag */
 	PPC_LL	r3, VCPU_HFLAGS(r4)
 	rldicl	r3, r3, 0, 63		/* r3 &= 1 */
 	stb	r3, HSTATE_RESTORE_HID5(r13)
@@ -111,9 +121,6 @@
  *
  */
 
-.global kvmppc_handler_highmem
-kvmppc_handler_highmem:
-
 	/*
 	 * Register usage at this point:
 	 *
@@ -125,18 +132,31 @@
 	 *
 	 */
 
-	/* R7 = vcpu */
-	PPC_LL	r7, GPR4(r1)
+	/* Transfer reg values from shadow vcpu back to vcpu struct */
+	/* On 64-bit, interrupts are still off at this point */
+	PPC_LL	r3, GPR4(r1)		/* vcpu pointer */
+	GET_SHADOW_VCPU(r4)
+	bl	FUNC(kvmppc_copy_from_svcpu)
+	nop
 
 #ifdef CONFIG_PPC_BOOK3S_64
+	/* Re-enable interrupts */
+	ld	r3, HSTATE_HOST_MSR(r13)
+	ori	r3, r3, MSR_EE
+	MTMSR_EERI(r3)
+
 	/*
 	 * Reload kernel SPRG3 value.
 	 * No need to save guest value as usermode can't modify SPRG3.
 	 */
 	ld	r3, PACA_SPRG3(r13)
 	mtspr	SPRN_SPRG3, r3
+
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
+	/* R7 = vcpu */
+	PPC_LL	r7, GPR4(r1)
+
 	PPC_STL	r14, VCPU_GPR(R14)(r7)
 	PPC_STL	r15, VCPU_GPR(R15)(r7)
 	PPC_STL	r16, VCPU_GPR(R16)(r7)
@@ -161,7 +181,7 @@
 
 	/* Restore r3 (kvm_run) and r4 (vcpu) */
 	REST_2GPRS(3, r1)
-	bl	FUNC(kvmppc_handle_exit)
+	bl	FUNC(kvmppc_handle_exit_pr)
 
 	/* If RESUME_GUEST, get back in the loop */
 	cmpwi	r3, RESUME_GUEST
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index da8b13c..5a1ab12 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -28,7 +28,7 @@
 #include <asm/mmu_context.h>
 #include <asm/hw_irq.h>
 
-#include "trace.h"
+#include "trace_pr.h"
 
 #define PTE_SIZE	12
 
@@ -56,6 +56,14 @@
 		       HPTEG_HASH_BITS_VPTE_LONG);
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
+{
+	return hash_64((vpage & 0xffffffff0ULL) >> 4,
+		       HPTEG_HASH_BITS_VPTE_64K);
+}
+#endif
+
 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 {
 	u64 index;
@@ -83,6 +91,15 @@
 	hlist_add_head_rcu(&pte->list_vpte_long,
 			   &vcpu3s->hpte_hash_vpte_long[index]);
 
+#ifdef CONFIG_PPC_BOOK3S_64
+	/* Add to vPTE_64k list */
+	index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
+	hlist_add_head_rcu(&pte->list_vpte_64k,
+			   &vcpu3s->hpte_hash_vpte_64k[index]);
+#endif
+
+	vcpu3s->hpte_cache_count++;
+
 	spin_unlock(&vcpu3s->mmu_lock);
 }
 
@@ -113,10 +130,13 @@
 	hlist_del_init_rcu(&pte->list_pte_long);
 	hlist_del_init_rcu(&pte->list_vpte);
 	hlist_del_init_rcu(&pte->list_vpte_long);
+#ifdef CONFIG_PPC_BOOK3S_64
+	hlist_del_init_rcu(&pte->list_vpte_64k);
+#endif
+	vcpu3s->hpte_cache_count--;
 
 	spin_unlock(&vcpu3s->mmu_lock);
 
-	vcpu3s->hpte_cache_count--;
 	call_rcu(&pte->rcu_head, free_pte_rcu);
 }
 
@@ -219,6 +239,29 @@
 	rcu_read_unlock();
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+/* Flush with mask 0xffffffff0 */
+static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
+{
+	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+	struct hlist_head *list;
+	struct hpte_cache *pte;
+	u64 vp_mask = 0xffffffff0ULL;
+
+	list = &vcpu3s->hpte_hash_vpte_64k[
+		kvmppc_mmu_hash_vpte_64k(guest_vp)];
+
+	rcu_read_lock();
+
+	/* Check the list for matching entries and invalidate */
+	hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
+		if ((pte->pte.vpage & vp_mask) == guest_vp)
+			invalidate_pte(vcpu, pte);
+
+	rcu_read_unlock();
+}
+#endif
+
 /* Flush with mask 0xffffff000 */
 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
 {
@@ -249,6 +292,11 @@
 	case 0xfffffffffULL:
 		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
 		break;
+#ifdef CONFIG_PPC_BOOK3S_64
+	case 0xffffffff0ULL:
+		kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
+		break;
+#endif
 	case 0xffffff000ULL:
 		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
 		break;
@@ -285,15 +333,19 @@
 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 	struct hpte_cache *pte;
 
-	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
-	vcpu3s->hpte_cache_count++;
-
 	if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
 		kvmppc_mmu_pte_flush_all(vcpu);
 
+	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
+
 	return pte;
 }
 
+void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
+{
+	kmem_cache_free(hpte_cache, pte);
+}
+
 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
 {
 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
@@ -320,6 +372,10 @@
 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
+#ifdef CONFIG_PPC_BOOK3S_64
+	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
+				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
+#endif
 
 	spin_lock_init(&vcpu3s->mmu_lock);
 
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c0b48f96..fe14ca3 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -40,8 +40,12 @@
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
+#include <linux/module.h>
 
-#include "trace.h"
+#include "book3s.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace_pr.h"
 
 /* #define EXIT_DEBUG */
 /* #define DEBUG_EXT */
@@ -56,29 +60,25 @@
 #define HW_PAGE_SIZE PAGE_SIZE
 #endif
 
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 	memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
-	memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
-	       sizeof(get_paca()->shadow_vcpu));
 	svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
 	svcpu_put(svcpu);
 #endif
 	vcpu->cpu = smp_processor_id();
 #ifdef CONFIG_PPC_BOOK3S_32
-	current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
+	current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
 #endif
 }
 
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 	memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
-	memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
-	       sizeof(get_paca()->shadow_vcpu));
 	to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
 	svcpu_put(svcpu);
 #endif
@@ -87,7 +87,61 @@
 	vcpu->cpu = -1;
 }
 
-int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
+/* Copy data needed by real-mode code from vcpu to shadow vcpu */
+void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
+			  struct kvm_vcpu *vcpu)
+{
+	svcpu->gpr[0] = vcpu->arch.gpr[0];
+	svcpu->gpr[1] = vcpu->arch.gpr[1];
+	svcpu->gpr[2] = vcpu->arch.gpr[2];
+	svcpu->gpr[3] = vcpu->arch.gpr[3];
+	svcpu->gpr[4] = vcpu->arch.gpr[4];
+	svcpu->gpr[5] = vcpu->arch.gpr[5];
+	svcpu->gpr[6] = vcpu->arch.gpr[6];
+	svcpu->gpr[7] = vcpu->arch.gpr[7];
+	svcpu->gpr[8] = vcpu->arch.gpr[8];
+	svcpu->gpr[9] = vcpu->arch.gpr[9];
+	svcpu->gpr[10] = vcpu->arch.gpr[10];
+	svcpu->gpr[11] = vcpu->arch.gpr[11];
+	svcpu->gpr[12] = vcpu->arch.gpr[12];
+	svcpu->gpr[13] = vcpu->arch.gpr[13];
+	svcpu->cr  = vcpu->arch.cr;
+	svcpu->xer = vcpu->arch.xer;
+	svcpu->ctr = vcpu->arch.ctr;
+	svcpu->lr  = vcpu->arch.lr;
+	svcpu->pc  = vcpu->arch.pc;
+}
+
+/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
+void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
+			    struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+	vcpu->arch.gpr[0] = svcpu->gpr[0];
+	vcpu->arch.gpr[1] = svcpu->gpr[1];
+	vcpu->arch.gpr[2] = svcpu->gpr[2];
+	vcpu->arch.gpr[3] = svcpu->gpr[3];
+	vcpu->arch.gpr[4] = svcpu->gpr[4];
+	vcpu->arch.gpr[5] = svcpu->gpr[5];
+	vcpu->arch.gpr[6] = svcpu->gpr[6];
+	vcpu->arch.gpr[7] = svcpu->gpr[7];
+	vcpu->arch.gpr[8] = svcpu->gpr[8];
+	vcpu->arch.gpr[9] = svcpu->gpr[9];
+	vcpu->arch.gpr[10] = svcpu->gpr[10];
+	vcpu->arch.gpr[11] = svcpu->gpr[11];
+	vcpu->arch.gpr[12] = svcpu->gpr[12];
+	vcpu->arch.gpr[13] = svcpu->gpr[13];
+	vcpu->arch.cr  = svcpu->cr;
+	vcpu->arch.xer = svcpu->xer;
+	vcpu->arch.ctr = svcpu->ctr;
+	vcpu->arch.lr  = svcpu->lr;
+	vcpu->arch.pc  = svcpu->pc;
+	vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
+	vcpu->arch.fault_dar   = svcpu->fault_dar;
+	vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
+	vcpu->arch.last_inst   = svcpu->last_inst;
+}
+
+static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
 {
 	int r = 1; /* Indicate we want to get back into the guest */
 
@@ -100,44 +154,69 @@
 }
 
 /************* MMU Notifiers *************/
+static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
+			     unsigned long end)
+{
+	long i;
+	struct kvm_vcpu *vcpu;
+	struct kvm_memslots *slots;
+	struct kvm_memory_slot *memslot;
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+	slots = kvm_memslots(kvm);
+	kvm_for_each_memslot(memslot, slots) {
+		unsigned long hva_start, hva_end;
+		gfn_t gfn, gfn_end;
+
+		hva_start = max(start, memslot->userspace_addr);
+		hva_end = min(end, memslot->userspace_addr +
+					(memslot->npages << PAGE_SHIFT));
+		if (hva_start >= hva_end)
+			continue;
+		/*
+		 * {gfn(page) | page intersects with [hva_start, hva_end)} =
+		 * {gfn, gfn+1, ..., gfn_end-1}.
+		 */
+		gfn = hva_to_gfn_memslot(hva_start, memslot);
+		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+		kvm_for_each_vcpu(i, vcpu, kvm)
+			kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
+					      gfn_end << PAGE_SHIFT);
+	}
+}
+
+static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
 {
 	trace_kvm_unmap_hva(hva);
 
-	/*
-	 * Flush all shadow tlb entries everywhere. This is slow, but
-	 * we are 100% sure that we catch the to be unmapped page
-	 */
-	kvm_flush_remote_tlbs(kvm);
+	do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
 
 	return 0;
 }
 
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
+				  unsigned long end)
 {
-	/* kvm_unmap_hva flushes everything anyways */
-	kvm_unmap_hva(kvm, start);
+	do_kvm_unmap_hva(kvm, start, end);
 
 	return 0;
 }
 
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
 {
 	/* XXX could be more clever ;) */
 	return 0;
 }
 
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
 {
 	/* XXX could be more clever ;) */
 	return 0;
 }
 
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
 	/* The page will get remapped properly on its next fault */
-	kvm_unmap_hva(kvm, hva);
+	do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
 }
 
 /*****************************************/
@@ -159,7 +238,7 @@
 	vcpu->arch.shadow_msr = smsr;
 }
 
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
 {
 	ulong old_msr = vcpu->arch.shared->msr;
 
@@ -219,7 +298,7 @@
 		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
 }
 
-void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
 {
 	u32 host_pvr;
 
@@ -256,6 +335,23 @@
 	if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
 		to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
 
+	/*
+	 * If they're asking for POWER6 or later, set the flag
+	 * indicating that we can do multiple large page sizes
+	 * and 1TB segments.
+	 * Also set the flag that indicates that tlbie has the large
+	 * page bit in the RB operand instead of the instruction.
+	 */
+	switch (PVR_VER(pvr)) {
+	case PVR_POWER6:
+	case PVR_POWER7:
+	case PVR_POWER7p:
+	case PVR_POWER8:
+		vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
+			BOOK3S_HFLAG_NEW_TLBIE;
+		break;
+	}
+
 #ifdef CONFIG_PPC_BOOK3S_32
 	/* 32 bit Book3S always has 32 byte dcbz */
 	vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
@@ -334,6 +430,7 @@
 			    ulong eaddr, int vec)
 {
 	bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
+	bool iswrite = false;
 	int r = RESUME_GUEST;
 	int relocated;
 	int page_found = 0;
@@ -344,10 +441,12 @@
 	u64 vsid;
 
 	relocated = data ? dr : ir;
+	if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
+		iswrite = true;
 
 	/* Resolve real address if translation turned on */
 	if (relocated) {
-		page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
+		page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
 	} else {
 		pte.may_execute = true;
 		pte.may_read = true;
@@ -355,6 +454,7 @@
 		pte.raddr = eaddr & KVM_PAM;
 		pte.eaddr = eaddr;
 		pte.vpage = eaddr >> 12;
+		pte.page_size = MMU_PAGE_64K;
 	}
 
 	switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
@@ -388,22 +488,18 @@
 
 	if (page_found == -ENOENT) {
 		/* Page not found in guest PTE entries */
-		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 		vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
-		vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
+		vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
 		vcpu->arch.shared->msr |=
-			(svcpu->shadow_srr1 & 0x00000000f8000000ULL);
-		svcpu_put(svcpu);
+			vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
 		kvmppc_book3s_queue_irqprio(vcpu, vec);
 	} else if (page_found == -EPERM) {
 		/* Storage protection */
-		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 		vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
-		vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
+		vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
 		vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
 		vcpu->arch.shared->msr |=
-			svcpu->shadow_srr1 & 0x00000000f8000000ULL;
-		svcpu_put(svcpu);
+			vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
 		kvmppc_book3s_queue_irqprio(vcpu, vec);
 	} else if (page_found == -EINVAL) {
 		/* Page not found in guest SLB */
@@ -411,12 +507,20 @@
 		kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
 	} else if (!is_mmio &&
 		   kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
+		if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
+			/*
+			 * There is already a host HPTE there, presumably
+			 * a read-only one for a page the guest thinks
+			 * is writable, so get rid of it first.
+			 */
+			kvmppc_mmu_unmap_page(vcpu, &pte);
+		}
 		/* The guest's PTE is not mapped yet. Map on the host */
-		kvmppc_mmu_map_page(vcpu, &pte);
+		kvmppc_mmu_map_page(vcpu, &pte, iswrite);
 		if (data)
 			vcpu->stat.sp_storage++;
 		else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
-			(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
+			 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
 			kvmppc_patch_dcbz(vcpu, &pte);
 	} else {
 		/* MMIO */
@@ -619,13 +723,15 @@
 
 	if (lost_ext & MSR_FP)
 		kvmppc_load_up_fpu();
+#ifdef CONFIG_ALTIVEC
 	if (lost_ext & MSR_VEC)
 		kvmppc_load_up_altivec();
+#endif
 	current->thread.regs->msr |= lost_ext;
 }
 
-int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                       unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+			  unsigned int exit_nr)
 {
 	int r = RESUME_HOST;
 	int s;
@@ -643,25 +749,32 @@
 	switch (exit_nr) {
 	case BOOK3S_INTERRUPT_INST_STORAGE:
 	{
-		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-		ulong shadow_srr1 = svcpu->shadow_srr1;
+		ulong shadow_srr1 = vcpu->arch.shadow_srr1;
 		vcpu->stat.pf_instruc++;
 
 #ifdef CONFIG_PPC_BOOK3S_32
 		/* We set segments as unused segments when invalidating them. So
 		 * treat the respective fault as segment fault. */
-		if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
-			kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
-			r = RESUME_GUEST;
+		{
+			struct kvmppc_book3s_shadow_vcpu *svcpu;
+			u32 sr;
+
+			svcpu = svcpu_get(vcpu);
+			sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
 			svcpu_put(svcpu);
-			break;
+			if (sr == SR_INVALID) {
+				kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+				r = RESUME_GUEST;
+				break;
+			}
 		}
 #endif
-		svcpu_put(svcpu);
 
 		/* only care about PTEG not found errors, but leave NX alone */
 		if (shadow_srr1 & 0x40000000) {
+			int idx = srcu_read_lock(&vcpu->kvm->srcu);
 			r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
+			srcu_read_unlock(&vcpu->kvm->srcu, idx);
 			vcpu->stat.sp_instruc++;
 		} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
 			  (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -682,25 +795,36 @@
 	case BOOK3S_INTERRUPT_DATA_STORAGE:
 	{
 		ulong dar = kvmppc_get_fault_dar(vcpu);
-		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-		u32 fault_dsisr = svcpu->fault_dsisr;
+		u32 fault_dsisr = vcpu->arch.fault_dsisr;
 		vcpu->stat.pf_storage++;
 
 #ifdef CONFIG_PPC_BOOK3S_32
 		/* We set segments as unused segments when invalidating them. So
 		 * treat the respective fault as segment fault. */
-		if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
-			kvmppc_mmu_map_segment(vcpu, dar);
-			r = RESUME_GUEST;
+		{
+			struct kvmppc_book3s_shadow_vcpu *svcpu;
+			u32 sr;
+
+			svcpu = svcpu_get(vcpu);
+			sr = svcpu->sr[dar >> SID_SHIFT];
 			svcpu_put(svcpu);
-			break;
+			if (sr == SR_INVALID) {
+				kvmppc_mmu_map_segment(vcpu, dar);
+				r = RESUME_GUEST;
+				break;
+			}
 		}
 #endif
-		svcpu_put(svcpu);
 
-		/* The only case we need to handle is missing shadow PTEs */
-		if (fault_dsisr & DSISR_NOHPTE) {
+		/*
+		 * We need to handle missing shadow PTEs, and
+		 * protection faults due to us mapping a page read-only
+		 * when the guest thinks it is writable.
+		 */
+		if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
+			int idx = srcu_read_lock(&vcpu->kvm->srcu);
 			r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
+			srcu_read_unlock(&vcpu->kvm->srcu, idx);
 		} else {
 			vcpu->arch.shared->dar = dar;
 			vcpu->arch.shared->dsisr = fault_dsisr;
@@ -743,13 +867,10 @@
 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
 	{
 		enum emulation_result er;
-		struct kvmppc_book3s_shadow_vcpu *svcpu;
 		ulong flags;
 
 program_interrupt:
-		svcpu = svcpu_get(vcpu);
-		flags = svcpu->shadow_srr1 & 0x1f0000ull;
-		svcpu_put(svcpu);
+		flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
 
 		if (vcpu->arch.shared->msr & MSR_PR) {
 #ifdef EXIT_DEBUG
@@ -798,7 +919,7 @@
 			ulong cmd = kvmppc_get_gpr(vcpu, 3);
 			int i;
 
-#ifdef CONFIG_KVM_BOOK3S_64_PR
+#ifdef CONFIG_PPC_BOOK3S_64
 			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
 				r = RESUME_GUEST;
 				break;
@@ -881,9 +1002,7 @@
 		break;
 	default:
 	{
-		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-		ulong shadow_srr1 = svcpu->shadow_srr1;
-		svcpu_put(svcpu);
+		ulong shadow_srr1 = vcpu->arch.shadow_srr1;
 		/* Ugh - bork here! What did we get? */
 		printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
 			exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
@@ -920,8 +1039,8 @@
 	return r;
 }
 
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
-                                  struct kvm_sregs *sregs)
+static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
+					    struct kvm_sregs *sregs)
 {
 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 	int i;
@@ -947,13 +1066,13 @@
 	return 0;
 }
 
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
-                                  struct kvm_sregs *sregs)
+static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
+					    struct kvm_sregs *sregs)
 {
 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 	int i;
 
-	kvmppc_set_pvr(vcpu, sregs->pvr);
+	kvmppc_set_pvr_pr(vcpu, sregs->pvr);
 
 	vcpu3s->sdr1 = sregs->u.s.sdr1;
 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
@@ -983,7 +1102,8 @@
 	return 0;
 }
 
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
+static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
+				 union kvmppc_one_reg *val)
 {
 	int r = 0;
 
@@ -1012,7 +1132,8 @@
 	return r;
 }
 
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
+				 union kvmppc_one_reg *val)
 {
 	int r = 0;
 
@@ -1042,28 +1163,30 @@
 	return r;
 }
 
-int kvmppc_core_check_processor_compat(void)
-{
-	return 0;
-}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
+						   unsigned int id)
 {
 	struct kvmppc_vcpu_book3s *vcpu_book3s;
 	struct kvm_vcpu *vcpu;
 	int err = -ENOMEM;
 	unsigned long p;
 
-	vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
-	if (!vcpu_book3s)
+	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+	if (!vcpu)
 		goto out;
 
-	vcpu_book3s->shadow_vcpu =
-		kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
-	if (!vcpu_book3s->shadow_vcpu)
+	vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
+	if (!vcpu_book3s)
 		goto free_vcpu;
+	vcpu->arch.book3s = vcpu_book3s;
 
-	vcpu = &vcpu_book3s->vcpu;
+#ifdef CONFIG_KVM_BOOK3S_32
+	vcpu->arch.shadow_vcpu =
+		kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
+	if (!vcpu->arch.shadow_vcpu)
+		goto free_vcpu3s;
+#endif
+
 	err = kvm_vcpu_init(vcpu, kvm, id);
 	if (err)
 		goto free_shadow_vcpu;
@@ -1076,13 +1199,19 @@
 	vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
 
 #ifdef CONFIG_PPC_BOOK3S_64
-	/* default to book3s_64 (970fx) */
+	/*
+	 * Default to the same as the host if we're on sufficiently
+	 * recent machine that we have 1TB segments;
+	 * otherwise default to PPC970FX.
+	 */
 	vcpu->arch.pvr = 0x3C0301;
+	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
+		vcpu->arch.pvr = mfspr(SPRN_PVR);
 #else
 	/* default to book3s_32 (750) */
 	vcpu->arch.pvr = 0x84202;
 #endif
-	kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+	kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
 	vcpu->arch.slb_nr = 64;
 
 	vcpu->arch.shadow_msr = MSR_USER64;
@@ -1096,24 +1225,31 @@
 uninit_vcpu:
 	kvm_vcpu_uninit(vcpu);
 free_shadow_vcpu:
-	kfree(vcpu_book3s->shadow_vcpu);
-free_vcpu:
+#ifdef CONFIG_KVM_BOOK3S_32
+	kfree(vcpu->arch.shadow_vcpu);
+free_vcpu3s:
+#endif
 	vfree(vcpu_book3s);
+free_vcpu:
+	kmem_cache_free(kvm_vcpu_cache, vcpu);
 out:
 	return ERR_PTR(err);
 }
 
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 
 	free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
 	kvm_vcpu_uninit(vcpu);
-	kfree(vcpu_book3s->shadow_vcpu);
+#ifdef CONFIG_KVM_BOOK3S_32
+	kfree(vcpu->arch.shadow_vcpu);
+#endif
 	vfree(vcpu_book3s);
+	kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
 	int ret;
 	struct thread_fp_state fp;
@@ -1216,8 +1352,8 @@
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
-				      struct kvm_dirty_log *log)
+static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
+					 struct kvm_dirty_log *log)
 {
 	struct kvm_memory_slot *memslot;
 	struct kvm_vcpu *vcpu;
@@ -1252,10 +1388,47 @@
 	return r;
 }
 
-#ifdef CONFIG_PPC64
-int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
+					 struct kvm_memory_slot *memslot)
 {
-	info->flags = KVM_PPC_1T_SEGMENTS;
+	return;
+}
+
+static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
+					struct kvm_memory_slot *memslot,
+					struct kvm_userspace_memory_region *mem)
+{
+	return 0;
+}
+
+static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
+				struct kvm_userspace_memory_region *mem,
+				const struct kvm_memory_slot *old)
+{
+	return;
+}
+
+static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
+					struct kvm_memory_slot *dont)
+{
+	return;
+}
+
+static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
+					 unsigned long npages)
+{
+	return 0;
+}
+
+
+#ifdef CONFIG_PPC64
+static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
+					 struct kvm_ppc_smmu_info *info)
+{
+	long int i;
+	struct kvm_vcpu *vcpu;
+
+	info->flags = 0;
 
 	/* SLB is always 64 entries */
 	info->slb_size = 64;
@@ -1266,53 +1439,49 @@
 	info->sps[0].enc[0].page_shift = 12;
 	info->sps[0].enc[0].pte_enc = 0;
 
+	/*
+	 * 64k large page size.
+	 * We only want to put this in if the CPUs we're emulating
+	 * support it, but unfortunately we don't have a vcpu easily
+	 * to hand here to test.  Just pick the first vcpu, and if
+	 * that doesn't exist yet, report the minimum capability,
+	 * i.e., no 64k pages.
+	 * 1T segment support goes along with 64k pages.
+	 */
+	i = 1;
+	vcpu = kvm_get_vcpu(kvm, 0);
+	if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
+		info->flags = KVM_PPC_1T_SEGMENTS;
+		info->sps[i].page_shift = 16;
+		info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
+		info->sps[i].enc[0].page_shift = 16;
+		info->sps[i].enc[0].pte_enc = 1;
+		++i;
+	}
+
 	/* Standard 16M large page size segment */
-	info->sps[1].page_shift = 24;
-	info->sps[1].slb_enc = SLB_VSID_L;
-	info->sps[1].enc[0].page_shift = 24;
-	info->sps[1].enc[0].pte_enc = 0;
+	info->sps[i].page_shift = 24;
+	info->sps[i].slb_enc = SLB_VSID_L;
+	info->sps[i].enc[0].page_shift = 24;
+	info->sps[i].enc[0].pte_enc = 0;
 
 	return 0;
 }
+#else
+static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
+					 struct kvm_ppc_smmu_info *info)
+{
+	/* We should not get called */
+	BUG();
+}
 #endif /* CONFIG_PPC64 */
 
-void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
-			      struct kvm_memory_slot *dont)
-{
-}
-
-int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
-			       unsigned long npages)
-{
-	return 0;
-}
-
-int kvmppc_core_prepare_memory_region(struct kvm *kvm,
-				      struct kvm_memory_slot *memslot,
-				      struct kvm_userspace_memory_region *mem)
-{
-	return 0;
-}
-
-void kvmppc_core_commit_memory_region(struct kvm *kvm,
-				struct kvm_userspace_memory_region *mem,
-				const struct kvm_memory_slot *old)
-{
-}
-
-void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
-{
-}
-
 static unsigned int kvm_global_user_count = 0;
 static DEFINE_SPINLOCK(kvm_global_user_count_lock);
 
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_pr(struct kvm *kvm)
 {
-#ifdef CONFIG_PPC64
-	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
-	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
-#endif
+	mutex_init(&kvm->arch.hpt_mutex);
 
 	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
 		spin_lock(&kvm_global_user_count_lock);
@@ -1323,7 +1492,7 @@
 	return 0;
 }
 
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
 {
 #ifdef CONFIG_PPC64
 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
@@ -1338,26 +1507,81 @@
 	}
 }
 
-static int kvmppc_book3s_init(void)
+static int kvmppc_core_check_processor_compat_pr(void)
+{
+	/* we are always compatible */
+	return 0;
+}
+
+static long kvm_arch_vm_ioctl_pr(struct file *filp,
+				 unsigned int ioctl, unsigned long arg)
+{
+	return -ENOTTY;
+}
+
+static struct kvmppc_ops kvm_ops_pr = {
+	.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
+	.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
+	.get_one_reg = kvmppc_get_one_reg_pr,
+	.set_one_reg = kvmppc_set_one_reg_pr,
+	.vcpu_load   = kvmppc_core_vcpu_load_pr,
+	.vcpu_put    = kvmppc_core_vcpu_put_pr,
+	.set_msr     = kvmppc_set_msr_pr,
+	.vcpu_run    = kvmppc_vcpu_run_pr,
+	.vcpu_create = kvmppc_core_vcpu_create_pr,
+	.vcpu_free   = kvmppc_core_vcpu_free_pr,
+	.check_requests = kvmppc_core_check_requests_pr,
+	.get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
+	.flush_memslot = kvmppc_core_flush_memslot_pr,
+	.prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
+	.commit_memory_region = kvmppc_core_commit_memory_region_pr,
+	.unmap_hva = kvm_unmap_hva_pr,
+	.unmap_hva_range = kvm_unmap_hva_range_pr,
+	.age_hva  = kvm_age_hva_pr,
+	.test_age_hva = kvm_test_age_hva_pr,
+	.set_spte_hva = kvm_set_spte_hva_pr,
+	.mmu_destroy  = kvmppc_mmu_destroy_pr,
+	.free_memslot = kvmppc_core_free_memslot_pr,
+	.create_memslot = kvmppc_core_create_memslot_pr,
+	.init_vm = kvmppc_core_init_vm_pr,
+	.destroy_vm = kvmppc_core_destroy_vm_pr,
+	.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
+	.emulate_op = kvmppc_core_emulate_op_pr,
+	.emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
+	.emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
+	.fast_vcpu_kick = kvm_vcpu_kick,
+	.arch_vm_ioctl  = kvm_arch_vm_ioctl_pr,
+};
+
+
+int kvmppc_book3s_init_pr(void)
 {
 	int r;
 
-	r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
-		     THIS_MODULE);
-
-	if (r)
+	r = kvmppc_core_check_processor_compat_pr();
+	if (r < 0)
 		return r;
 
-	r = kvmppc_mmu_hpte_sysinit();
+	kvm_ops_pr.owner = THIS_MODULE;
+	kvmppc_pr_ops = &kvm_ops_pr;
 
+	r = kvmppc_mmu_hpte_sysinit();
 	return r;
 }
 
-static void kvmppc_book3s_exit(void)
+void kvmppc_book3s_exit_pr(void)
 {
+	kvmppc_pr_ops = NULL;
 	kvmppc_mmu_hpte_sysexit();
-	kvm_exit();
 }
 
-module_init(kvmppc_book3s_init);
-module_exit(kvmppc_book3s_exit);
+/*
+ * We only support separate modules for book3s 64
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+
+module_init(kvmppc_book3s_init_pr);
+module_exit(kvmppc_book3s_exit_pr);
+
+MODULE_LICENSE("GPL");
+#endif
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index da0e0bc..5efa97b 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -21,6 +21,8 @@
 #include <asm/kvm_ppc.h>
 #include <asm/kvm_book3s.h>
 
+#define HPTE_SIZE	16		/* bytes per HPT entry */
+
 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
 {
 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -40,32 +42,41 @@
 	long pte_index = kvmppc_get_gpr(vcpu, 5);
 	unsigned long pteg[2 * 8];
 	unsigned long pteg_addr, i, *hpte;
+	long int ret;
 
+	i = pte_index & 7;
 	pte_index &= ~7UL;
 	pteg_addr = get_pteg_addr(vcpu, pte_index);
 
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
 	hpte = pteg;
 
+	ret = H_PTEG_FULL;
 	if (likely((flags & H_EXACT) == 0)) {
-		pte_index &= ~7UL;
 		for (i = 0; ; ++i) {
 			if (i == 8)
-				return H_PTEG_FULL;
+				goto done;
 			if ((*hpte & HPTE_V_VALID) == 0)
 				break;
 			hpte += 2;
 		}
 	} else {
-		i = kvmppc_get_gpr(vcpu, 5) & 7UL;
 		hpte += i * 2;
+		if (*hpte & HPTE_V_VALID)
+			goto done;
 	}
 
 	hpte[0] = kvmppc_get_gpr(vcpu, 6);
 	hpte[1] = kvmppc_get_gpr(vcpu, 7);
-	copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg));
-	kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+	pteg_addr += i * HPTE_SIZE;
+	copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
 	kvmppc_set_gpr(vcpu, 4, pte_index | i);
+	ret = H_SUCCESS;
+
+ done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
 }
@@ -77,26 +88,31 @@
 	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
 	unsigned long v = 0, pteg, rb;
 	unsigned long pte[2];
+	long int ret;
 
 	pteg = get_pteg_addr(vcpu, pte_index);
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
 
+	ret = H_NOT_FOUND;
 	if ((pte[0] & HPTE_V_VALID) == 0 ||
 	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
-	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
-		kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
-		return EMULATE_DONE;
-	}
+	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
+		goto done;
 
 	copy_to_user((void __user *)pteg, &v, sizeof(v));
 
 	rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 
-	kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+	ret = H_SUCCESS;
 	kvmppc_set_gpr(vcpu, 4, pte[0]);
 	kvmppc_set_gpr(vcpu, 5, pte[1]);
 
+ done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+	kvmppc_set_gpr(vcpu, 3, ret);
+
 	return EMULATE_DONE;
 }
 
@@ -124,6 +140,7 @@
 	int paramnr = 4;
 	int ret = H_SUCCESS;
 
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 		unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
 		unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
@@ -172,6 +189,7 @@
 		}
 		kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
 	}
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
@@ -184,15 +202,16 @@
 	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
 	unsigned long rb, pteg, r, v;
 	unsigned long pte[2];
+	long int ret;
 
 	pteg = get_pteg_addr(vcpu, pte_index);
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
 
+	ret = H_NOT_FOUND;
 	if ((pte[0] & HPTE_V_VALID) == 0 ||
-	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
-		kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
-		return EMULATE_DONE;
-	}
+	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
+		goto done;
 
 	v = pte[0];
 	r = pte[1];
@@ -207,8 +226,11 @@
 	rb = compute_tlbie_rb(v, r, pte_index);
 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 	copy_to_user((void __user *)pteg, pte, sizeof(pte));
+	ret = H_SUCCESS;
 
-	kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
 }
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 8f7633e..a38c4c9 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -38,32 +38,6 @@
 
 #define FUNC(name) 		GLUE(.,name)
 
-	.globl	kvmppc_skip_interrupt
-kvmppc_skip_interrupt:
-	/*
-	 * Here all GPRs are unchanged from when the interrupt happened
-	 * except for r13, which is saved in SPRG_SCRATCH0.
-	 */
-	mfspr	r13, SPRN_SRR0
-	addi	r13, r13, 4
-	mtspr	SPRN_SRR0, r13
-	GET_SCRATCH0(r13)
-	rfid
-	b	.
-
-	.globl	kvmppc_skip_Hinterrupt
-kvmppc_skip_Hinterrupt:
-	/*
-	 * Here all GPRs are unchanged from when the interrupt happened
-	 * except for r13, which is saved in SPRG_SCRATCH0.
-	 */
-	mfspr	r13, SPRN_HSRR0
-	addi	r13, r13, 4
-	mtspr	SPRN_HSRR0, r13
-	GET_SCRATCH0(r13)
-	hrfid
-	b	.
-
 #elif defined(CONFIG_PPC_BOOK3S_32)
 
 #define FUNC(name)		name
@@ -179,11 +153,15 @@
 
 	li	r6, MSR_IR | MSR_DR
 	andc	r6, r5, r6	/* Clear DR and IR in MSR value */
+#ifdef CONFIG_PPC_BOOK3S_32
 	/*
 	 * Set EE in HOST_MSR so that it's enabled when we get into our
-	 * C exit handler function
+	 * C exit handler function.  On 64-bit we delay enabling
+	 * interrupts until we have finished transferring stuff
+	 * to or from the PACA.
 	 */
 	ori	r5, r5, MSR_EE
+#endif
 	mtsrr0	r7
 	mtsrr1	r6
 	RFI
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 3219ba8..cf95cde 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -260,6 +260,7 @@
 	 */
 	return rc;
 }
+EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
 
 void kvmppc_rtas_tokens_free(struct kvm *kvm)
 {
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1abe478..bc50c97 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -161,8 +161,8 @@
 .global kvmppc_handler_trampoline_exit
 kvmppc_handler_trampoline_exit:
 
-.global kvmppc_interrupt
-kvmppc_interrupt:
+.global kvmppc_interrupt_pr
+kvmppc_interrupt_pr:
 
 	/* Register usage at this point:
 	 *
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index a3a5cb8..02a17dc 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -818,7 +818,7 @@
 	}
 
 	/* Check for real mode returning too hard */
-	if (xics->real_mode)
+	if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
 		return kvmppc_xics_rm_complete(vcpu, req);
 
 	switch (req) {
@@ -840,6 +840,7 @@
 
 	return rc;
 }
+EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
 
 
 /* -- Initialisation code etc. -- */
@@ -1250,13 +1251,13 @@
 
 	xics_debugfs_init(xics);
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
 		/* Enable real mode support */
 		xics->real_mode = ENABLE_REALMODE;
 		xics->real_mode_dbg = DEBUG_REALMODE;
 	}
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 
 	return 0;
 }
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 5133199..53e65a2 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -40,7 +40,9 @@
 
 #include "timing.h"
 #include "booke.h"
-#include "trace.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace_booke.h"
 
 unsigned long kvmppc_booke_handlers;
 
@@ -133,6 +135,29 @@
 #endif
 }
 
+static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
+{
+	/* Synchronize guest's desire to get debug interrupts into shadow MSR */
+#ifndef CONFIG_KVM_BOOKE_HV
+	vcpu->arch.shadow_msr &= ~MSR_DE;
+	vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
+#endif
+
+	/* Force enable debug interrupts when user space wants to debug */
+	if (vcpu->guest_debug) {
+#ifdef CONFIG_KVM_BOOKE_HV
+		/*
+		 * Since there is no shadow MSR, sync MSR_DE into the guest
+		 * visible MSR.
+		 */
+		vcpu->arch.shared->msr |= MSR_DE;
+#else
+		vcpu->arch.shadow_msr |= MSR_DE;
+		vcpu->arch.shared->msr &= ~MSR_DE;
+#endif
+	}
+}
+
 /*
  * Helper function for "full" MSR writes.  No need to call this if only
  * EE/CE/ME/DE/RI are changing.
@@ -150,6 +175,7 @@
 	kvmppc_mmu_msr_notify(vcpu, old_msr);
 	kvmppc_vcpu_sync_spe(vcpu);
 	kvmppc_vcpu_sync_fpu(vcpu);
+	kvmppc_vcpu_sync_debug(vcpu);
 }
 
 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
@@ -655,6 +681,7 @@
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
 	int ret, s;
+	struct thread_struct thread;
 #ifdef CONFIG_PPC_FPU
 	struct thread_fp_state fp;
 	int fpexc_mode;
@@ -695,6 +722,12 @@
 	kvmppc_load_guest_fp(vcpu);
 #endif
 
+	/* Switch to guest debug context */
+	thread.debug = vcpu->arch.shadow_dbg_reg;
+	switch_booke_debug_regs(&thread);
+	thread.debug = current->thread.debug;
+	current->thread.debug = vcpu->arch.shadow_dbg_reg;
+
 	kvmppc_fix_ee_before_entry();
 
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -702,6 +735,10 @@
 	/* No need for kvm_guest_exit. It's done in handle_exit.
 	   We also get here with interrupts enabled. */
 
+	/* Switch back to user space debug context */
+	switch_booke_debug_regs(&thread);
+	current->thread.debug = thread.debug;
+
 #ifdef CONFIG_PPC_FPU
 	kvmppc_save_guest_fp(vcpu);
 
@@ -757,6 +794,30 @@
 	}
 }
 
+static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
+	u32 dbsr = vcpu->arch.dbsr;
+
+	run->debug.arch.status = 0;
+	run->debug.arch.address = vcpu->arch.pc;
+
+	if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
+		run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
+	} else {
+		if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
+			run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
+		else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
+			run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
+		if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
+			run->debug.arch.address = dbg_reg->dac1;
+		else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
+			run->debug.arch.address = dbg_reg->dac2;
+	}
+
+	return RESUME_HOST;
+}
+
 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
 {
 	ulong r1, ip, msr, lr;
@@ -817,6 +878,11 @@
 	case BOOKE_INTERRUPT_CRITICAL:
 		unknown_exception(&regs);
 		break;
+	case BOOKE_INTERRUPT_DEBUG:
+		/* Save DBSR before preemption is enabled */
+		vcpu->arch.dbsr = mfspr(SPRN_DBSR);
+		kvmppc_clear_dbsr();
+		break;
 	}
 }
 
@@ -1134,18 +1200,10 @@
 	}
 
 	case BOOKE_INTERRUPT_DEBUG: {
-		u32 dbsr;
-
-		vcpu->arch.pc = mfspr(SPRN_CSRR0);
-
-		/* clear IAC events in DBSR register */
-		dbsr = mfspr(SPRN_DBSR);
-		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
-		mtspr(SPRN_DBSR, dbsr);
-
-		run->exit_reason = KVM_EXIT_DEBUG;
+		r = kvmppc_handle_debug(run, vcpu);
+		if (r == RESUME_HOST)
+			run->exit_reason = KVM_EXIT_DEBUG;
 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
-		r = RESUME_HOST;
 		break;
 	}
 
@@ -1196,7 +1254,7 @@
 	kvmppc_set_msr(vcpu, 0);
 
 #ifndef CONFIG_KVM_BOOKE_HV
-	vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
+	vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
 	vcpu->arch.shadow_pid = 1;
 	vcpu->arch.shared->msr = 0;
 #endif
@@ -1358,7 +1416,7 @@
 	return 0;
 }
 
-void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
 	sregs->u.e.features |= KVM_SREGS_E_IVOR;
 
@@ -1378,6 +1436,7 @@
 	sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
 	sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
 	sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
+	return 0;
 }
 
 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
@@ -1412,8 +1471,7 @@
 
 	get_sregs_base(vcpu, sregs);
 	get_sregs_arch206(vcpu, sregs);
-	kvmppc_core_get_sregs(vcpu, sregs);
-	return 0;
+	return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
 }
 
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -1432,7 +1490,7 @@
 	if (ret < 0)
 		return ret;
 
-	return kvmppc_core_set_sregs(vcpu, sregs);
+	return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
 }
 
 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
@@ -1440,7 +1498,6 @@
 	int r = 0;
 	union kvmppc_one_reg val;
 	int size;
-	long int i;
 
 	size = one_reg_size(reg->id);
 	if (size > sizeof(val))
@@ -1448,16 +1505,24 @@
 
 	switch (reg->id) {
 	case KVM_REG_PPC_IAC1:
-	case KVM_REG_PPC_IAC2:
-	case KVM_REG_PPC_IAC3:
-	case KVM_REG_PPC_IAC4:
-		i = reg->id - KVM_REG_PPC_IAC1;
-		val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]);
+		val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
 		break;
+	case KVM_REG_PPC_IAC2:
+		val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
+		break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+	case KVM_REG_PPC_IAC3:
+		val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
+		break;
+	case KVM_REG_PPC_IAC4:
+		val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
+		break;
+#endif
 	case KVM_REG_PPC_DAC1:
+		val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
+		break;
 	case KVM_REG_PPC_DAC2:
-		i = reg->id - KVM_REG_PPC_DAC1;
-		val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]);
+		val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
 		break;
 	case KVM_REG_PPC_EPR: {
 		u32 epr = get_guest_epr(vcpu);
@@ -1476,10 +1541,13 @@
 		val = get_reg_val(reg->id, vcpu->arch.tsr);
 		break;
 	case KVM_REG_PPC_DEBUG_INST:
-		val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV);
+		val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
+		break;
+	case KVM_REG_PPC_VRSAVE:
+		val = get_reg_val(reg->id, vcpu->arch.vrsave);
 		break;
 	default:
-		r = kvmppc_get_one_reg(vcpu, reg->id, &val);
+		r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
 		break;
 	}
 
@@ -1497,7 +1565,6 @@
 	int r = 0;
 	union kvmppc_one_reg val;
 	int size;
-	long int i;
 
 	size = one_reg_size(reg->id);
 	if (size > sizeof(val))
@@ -1508,16 +1575,24 @@
 
 	switch (reg->id) {
 	case KVM_REG_PPC_IAC1:
-	case KVM_REG_PPC_IAC2:
-	case KVM_REG_PPC_IAC3:
-	case KVM_REG_PPC_IAC4:
-		i = reg->id - KVM_REG_PPC_IAC1;
-		vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val);
+		vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
 		break;
+	case KVM_REG_PPC_IAC2:
+		vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
+		break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+	case KVM_REG_PPC_IAC3:
+		vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
+		break;
+	case KVM_REG_PPC_IAC4:
+		vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
+		break;
+#endif
 	case KVM_REG_PPC_DAC1:
+		vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
+		break;
 	case KVM_REG_PPC_DAC2:
-		i = reg->id - KVM_REG_PPC_DAC1;
-		vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val);
+		vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
 		break;
 	case KVM_REG_PPC_EPR: {
 		u32 new_epr = set_reg_val(reg->id, val);
@@ -1551,20 +1626,17 @@
 		kvmppc_set_tcr(vcpu, tcr);
 		break;
 	}
+	case KVM_REG_PPC_VRSAVE:
+		vcpu->arch.vrsave = set_reg_val(reg->id, val);
+		break;
 	default:
-		r = kvmppc_set_one_reg(vcpu, reg->id, &val);
+		r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
 		break;
 	}
 
 	return r;
 }
 
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
-					 struct kvm_guest_debug *dbg)
-{
-	return -EINVAL;
-}
-
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 	return -ENOTSUPP;
@@ -1589,12 +1661,12 @@
 	return -ENOTSUPP;
 }
 
-void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 			      struct kvm_memory_slot *dont)
 {
 }
 
-int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 			       unsigned long npages)
 {
 	return 0;
@@ -1670,6 +1742,157 @@
 	kvmppc_set_tsr_bits(vcpu, TSR_DIS);
 }
 
+static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
+				       uint64_t addr, int index)
+{
+	switch (index) {
+	case 0:
+		dbg_reg->dbcr0 |= DBCR0_IAC1;
+		dbg_reg->iac1 = addr;
+		break;
+	case 1:
+		dbg_reg->dbcr0 |= DBCR0_IAC2;
+		dbg_reg->iac2 = addr;
+		break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+	case 2:
+		dbg_reg->dbcr0 |= DBCR0_IAC3;
+		dbg_reg->iac3 = addr;
+		break;
+	case 3:
+		dbg_reg->dbcr0 |= DBCR0_IAC4;
+		dbg_reg->iac4 = addr;
+		break;
+#endif
+	default:
+		return -EINVAL;
+	}
+
+	dbg_reg->dbcr0 |= DBCR0_IDM;
+	return 0;
+}
+
+static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
+				       int type, int index)
+{
+	switch (index) {
+	case 0:
+		if (type & KVMPPC_DEBUG_WATCH_READ)
+			dbg_reg->dbcr0 |= DBCR0_DAC1R;
+		if (type & KVMPPC_DEBUG_WATCH_WRITE)
+			dbg_reg->dbcr0 |= DBCR0_DAC1W;
+		dbg_reg->dac1 = addr;
+		break;
+	case 1:
+		if (type & KVMPPC_DEBUG_WATCH_READ)
+			dbg_reg->dbcr0 |= DBCR0_DAC2R;
+		if (type & KVMPPC_DEBUG_WATCH_WRITE)
+			dbg_reg->dbcr0 |= DBCR0_DAC2W;
+		dbg_reg->dac2 = addr;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dbg_reg->dbcr0 |= DBCR0_IDM;
+	return 0;
+}
+void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
+{
+	/* XXX: Add similar MSR protection for BookE-PR */
+#ifdef CONFIG_KVM_BOOKE_HV
+	BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
+	if (set) {
+		if (prot_bitmap & MSR_UCLE)
+			vcpu->arch.shadow_msrp |= MSRP_UCLEP;
+		if (prot_bitmap & MSR_DE)
+			vcpu->arch.shadow_msrp |= MSRP_DEP;
+		if (prot_bitmap & MSR_PMM)
+			vcpu->arch.shadow_msrp |= MSRP_PMMP;
+	} else {
+		if (prot_bitmap & MSR_UCLE)
+			vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
+		if (prot_bitmap & MSR_DE)
+			vcpu->arch.shadow_msrp &= ~MSRP_DEP;
+		if (prot_bitmap & MSR_PMM)
+			vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
+	}
+#endif
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+					 struct kvm_guest_debug *dbg)
+{
+	struct debug_reg *dbg_reg;
+	int n, b = 0, w = 0;
+
+	if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
+		vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
+		vcpu->guest_debug = 0;
+		kvm_guest_protect_msr(vcpu, MSR_DE, false);
+		return 0;
+	}
+
+	kvm_guest_protect_msr(vcpu, MSR_DE, true);
+	vcpu->guest_debug = dbg->control;
+	vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
+	/* Set DBCR0_EDM in guest visible DBCR0 register. */
+	vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
+
+	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+		vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+
+	/* Code below handles only HW breakpoints */
+	dbg_reg = &(vcpu->arch.shadow_dbg_reg);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+	/*
+	 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
+	 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
+	 */
+	dbg_reg->dbcr1 = 0;
+	dbg_reg->dbcr2 = 0;
+#else
+	/*
+	 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
+	 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
+	 * is set.
+	 */
+	dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
+			  DBCR1_IAC4US;
+	dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
+#endif
+
+	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
+		return 0;
+
+	for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
+		uint64_t addr = dbg->arch.bp[n].addr;
+		uint32_t type = dbg->arch.bp[n].type;
+
+		if (type == KVMPPC_DEBUG_NONE)
+			continue;
+
+		if (type & !(KVMPPC_DEBUG_WATCH_READ |
+			     KVMPPC_DEBUG_WATCH_WRITE |
+			     KVMPPC_DEBUG_BREAKPOINT))
+			return -EINVAL;
+
+		if (type & KVMPPC_DEBUG_BREAKPOINT) {
+			/* Setting H/W breakpoint */
+			if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
+				return -EINVAL;
+		} else {
+			/* Setting H/W watchpoint */
+			if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
+							type, w++))
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 	vcpu->cpu = smp_processor_id();
@@ -1680,6 +1903,44 @@
 {
 	current->thread.kvm_vcpu = NULL;
 	vcpu->cpu = -1;
+
+	/* Clear pending debug event in DBSR */
+	kvmppc_clear_dbsr();
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+	return kvm->arch.kvm_ops->init_vm(kvm);
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+	kvm->arch.kvm_ops->destroy_vm(kvm);
+}
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
 }
 
 int __init kvmppc_booke_init(void)
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 5fd1ba6..09bfd9b 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -99,6 +99,30 @@
 
 void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
 
+extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				      unsigned int inst, int *advance);
+extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
+					 ulong spr_val);
+extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
+					 ulong *spr_val);
+extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
+				       struct kvm_vcpu *vcpu,
+				       unsigned int inst, int *advance);
+extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
+					  ulong spr_val);
+extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
+					  ulong *spr_val);
+extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
+				       struct kvm_vcpu *vcpu,
+				       unsigned int inst, int *advance);
+extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
+					  ulong spr_val);
+extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
+					  ulong *spr_val);
+
 /*
  * Load up guest vcpu FP state if it's needed.
  * It also set the MSR_FP in thread so that host know
@@ -129,4 +153,9 @@
 		giveup_fpu(current);
 #endif
 }
+
+static inline void kvmppc_clear_dbsr(void)
+{
+	mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
+}
 #endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ce6b73c..497b142 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -305,7 +305,7 @@
 {
 }
 
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
 {
 	kvmppc_booke_vcpu_load(vcpu, cpu);
 
@@ -313,7 +313,7 @@
 	kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
 }
 
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_SPE
 	if (vcpu->arch.shadow_msr & MSR_SPE)
@@ -367,7 +367,8 @@
 	return 0;
 }
 
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
+				      struct kvm_sregs *sregs)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
@@ -388,9 +389,11 @@
 
 	kvmppc_get_sregs_ivor(vcpu, sregs);
 	kvmppc_get_sregs_e500_tlb(vcpu, sregs);
+	return 0;
 }
 
-int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
+				      struct kvm_sregs *sregs)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 	int ret;
@@ -425,21 +428,22 @@
 	return kvmppc_set_sregs_ivor(vcpu, sregs);
 }
 
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
-			union kvmppc_one_reg *val)
+static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
+				   union kvmppc_one_reg *val)
 {
 	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
 	return r;
 }
 
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
-		       union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
+				   union kvmppc_one_reg *val)
 {
 	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
 	return r;
 }
 
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
+						     unsigned int id)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500;
 	struct kvm_vcpu *vcpu;
@@ -481,7 +485,7 @@
 	return ERR_PTR(err);
 }
 
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
@@ -492,15 +496,32 @@
 	kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 }
 
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_e500(struct kvm *kvm)
 {
 	return 0;
 }
 
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
 {
 }
 
+static struct kvmppc_ops kvm_ops_e500 = {
+	.get_sregs = kvmppc_core_get_sregs_e500,
+	.set_sregs = kvmppc_core_set_sregs_e500,
+	.get_one_reg = kvmppc_get_one_reg_e500,
+	.set_one_reg = kvmppc_set_one_reg_e500,
+	.vcpu_load   = kvmppc_core_vcpu_load_e500,
+	.vcpu_put    = kvmppc_core_vcpu_put_e500,
+	.vcpu_create = kvmppc_core_vcpu_create_e500,
+	.vcpu_free   = kvmppc_core_vcpu_free_e500,
+	.mmu_destroy  = kvmppc_mmu_destroy_e500,
+	.init_vm = kvmppc_core_init_vm_e500,
+	.destroy_vm = kvmppc_core_destroy_vm_e500,
+	.emulate_op = kvmppc_core_emulate_op_e500,
+	.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
+	.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
+};
+
 static int __init kvmppc_e500_init(void)
 {
 	int r, i;
@@ -512,11 +533,11 @@
 
 	r = kvmppc_core_check_processor_compat();
 	if (r)
-		return r;
+		goto err_out;
 
 	r = kvmppc_booke_init();
 	if (r)
-		return r;
+		goto err_out;
 
 	/* copy extra E500 exception handlers */
 	ivor[0] = mfspr(SPRN_IVOR32);
@@ -534,11 +555,19 @@
 	flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
 			   ivor[max_ivor] + handler_len);
 
-	return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+	r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+	if (r)
+		goto err_out;
+	kvm_ops_e500.owner = THIS_MODULE;
+	kvmppc_pr_ops = &kvm_ops_e500;
+
+err_out:
+	return r;
 }
 
 static void __exit kvmppc_e500_exit(void)
 {
+	kvmppc_pr_ops = NULL;
 	kvmppc_booke_exit();
 }
 
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index c2e5e98..4fd9650 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -117,7 +117,7 @@
 #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
 #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
 #define MAS2_ATTRIB_MASK \
-	  (MAS2_X0 | MAS2_X1)
+	  (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
 #define MAS3_ATTRIB_MASK \
 	  (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
 	   | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index b10a012..89b7f82 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -26,6 +26,7 @@
 #define XOP_TLBRE   946
 #define XOP_TLBWE   978
 #define XOP_TLBILX  18
+#define XOP_EHPRIV  270
 
 #ifdef CONFIG_KVM_E500MC
 static int dbell2prio(ulong param)
@@ -82,8 +83,28 @@
 }
 #endif
 
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                           unsigned int inst, int *advance)
+static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				   unsigned int inst, int *advance)
+{
+	int emulated = EMULATE_DONE;
+
+	switch (get_oc(inst)) {
+	case EHPRIV_OC_DEBUG:
+		run->exit_reason = KVM_EXIT_DEBUG;
+		run->debug.arch.address = vcpu->arch.pc;
+		run->debug.arch.status = 0;
+		kvmppc_account_exit(vcpu, DEBUG_EXITS);
+		emulated = EMULATE_EXIT_USER;
+		*advance = 0;
+		break;
+	default:
+		emulated = EMULATE_FAIL;
+	}
+	return emulated;
+}
+
+int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				unsigned int inst, int *advance)
 {
 	int emulated = EMULATE_DONE;
 	int ra = get_ra(inst);
@@ -130,6 +151,11 @@
 			emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
 			break;
 
+		case XOP_EHPRIV:
+			emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
+							   advance);
+			break;
+
 		default:
 			emulated = EMULATE_FAIL;
 		}
@@ -146,7 +172,7 @@
 	return emulated;
 }
 
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 	int emulated = EMULATE_DONE;
@@ -237,7 +263,7 @@
 	return emulated;
 }
 
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 	int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 6d6f153..ebca6b8 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -32,7 +32,7 @@
 #include <asm/kvm_ppc.h>
 
 #include "e500.h"
-#include "trace.h"
+#include "trace_booke.h"
 #include "timing.h"
 #include "e500_mmu_host.h"
 
@@ -536,7 +536,7 @@
 	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
 }
 
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
 {
 }
 
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index c65593a..ecf2247 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -32,10 +32,11 @@
 #include <asm/kvm_ppc.h>
 
 #include "e500.h"
-#include "trace.h"
 #include "timing.h"
 #include "e500_mmu_host.h"
 
+#include "trace_booke.h"
+
 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
 
 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
@@ -253,6 +254,9 @@
 	ref->pfn = pfn;
 	ref->flags |= E500_TLB_VALID;
 
+	/* Mark the page accessed */
+	kvm_set_pfn_accessed(pfn);
+
 	if (tlbe_is_writable(gtlbe))
 		kvm_set_pfn_dirty(pfn);
 }
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 19c8379..4132cd2 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -110,7 +110,7 @@
 
 static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
 
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
@@ -147,7 +147,7 @@
 	kvmppc_load_guest_fp(vcpu);
 }
 
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.eplc = mfspr(SPRN_EPLC);
 	vcpu->arch.epsc = mfspr(SPRN_EPSC);
@@ -204,7 +204,8 @@
 	return 0;
 }
 
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
+					struct kvm_sregs *sregs)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
@@ -224,10 +225,11 @@
 	sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
 	sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
 
-	kvmppc_get_sregs_ivor(vcpu, sregs);
+	return kvmppc_get_sregs_ivor(vcpu, sregs);
 }
 
-int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
+					struct kvm_sregs *sregs)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 	int ret;
@@ -260,21 +262,22 @@
 	return kvmppc_set_sregs_ivor(vcpu, sregs);
 }
 
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
-			union kvmppc_one_reg *val)
+static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
+			      union kvmppc_one_reg *val)
 {
 	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
 	return r;
 }
 
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
-		       union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
+			      union kvmppc_one_reg *val)
 {
 	int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
 	return r;
 }
 
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
+						       unsigned int id)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500;
 	struct kvm_vcpu *vcpu;
@@ -315,7 +318,7 @@
 	return ERR_PTR(err);
 }
 
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
@@ -325,7 +328,7 @@
 	kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 }
 
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
 {
 	int lpid;
 
@@ -337,27 +340,52 @@
 	return 0;
 }
 
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
 {
 	kvmppc_free_lpid(kvm->arch.lpid);
 }
 
+static struct kvmppc_ops kvm_ops_e500mc = {
+	.get_sregs = kvmppc_core_get_sregs_e500mc,
+	.set_sregs = kvmppc_core_set_sregs_e500mc,
+	.get_one_reg = kvmppc_get_one_reg_e500mc,
+	.set_one_reg = kvmppc_set_one_reg_e500mc,
+	.vcpu_load   = kvmppc_core_vcpu_load_e500mc,
+	.vcpu_put    = kvmppc_core_vcpu_put_e500mc,
+	.vcpu_create = kvmppc_core_vcpu_create_e500mc,
+	.vcpu_free   = kvmppc_core_vcpu_free_e500mc,
+	.mmu_destroy  = kvmppc_mmu_destroy_e500,
+	.init_vm = kvmppc_core_init_vm_e500mc,
+	.destroy_vm = kvmppc_core_destroy_vm_e500mc,
+	.emulate_op = kvmppc_core_emulate_op_e500,
+	.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
+	.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
+};
+
 static int __init kvmppc_e500mc_init(void)
 {
 	int r;
 
 	r = kvmppc_booke_init();
 	if (r)
-		return r;
+		goto err_out;
 
 	kvmppc_init_lpid(64);
 	kvmppc_claim_lpid(0); /* host */
 
-	return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+	r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+	if (r)
+		goto err_out;
+	kvm_ops_e500mc.owner = THIS_MODULE;
+	kvmppc_pr_ops = &kvm_ops_e500mc;
+
+err_out:
+	return r;
 }
 
 static void __exit kvmppc_e500mc_exit(void)
 {
+	kvmppc_pr_ops = NULL;
 	kvmppc_booke_exit();
 }
 
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 751cd45..2f9a087 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -130,8 +130,8 @@
 	case SPRN_PIR: break;
 
 	default:
-		emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
-						     spr_val);
+		emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
+								  spr_val);
 		if (emulated == EMULATE_FAIL)
 			printk(KERN_INFO "mtspr: unknown spr "
 				"0x%x\n", sprn);
@@ -191,8 +191,8 @@
 		spr_val = kvmppc_get_dec(vcpu, get_tb());
 		break;
 	default:
-		emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
-						     &spr_val);
+		emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
+								  &spr_val);
 		if (unlikely(emulated == EMULATE_FAIL)) {
 			printk(KERN_INFO "mfspr: unknown spr "
 				"0x%x\n", sprn);
@@ -464,7 +464,8 @@
 	}
 
 	if (emulated == EMULATE_FAIL) {
-		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
+		emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
+							       &advance);
 		if (emulated == EMULATE_AGAIN) {
 			advance = 0;
 		} else if (emulated == EMULATE_FAIL) {
@@ -483,3 +484,4 @@
 
 	return emulated;
 }
+EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 07c0106..9ae9768 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -26,6 +26,7 @@
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/file.h>
+#include <linux/module.h>
 #include <asm/cputable.h>
 #include <asm/uaccess.h>
 #include <asm/kvm_ppc.h>
@@ -39,6 +40,12 @@
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
+struct kvmppc_ops *kvmppc_hv_ops;
+EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
+struct kvmppc_ops *kvmppc_pr_ops;
+EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
+
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 {
 	return !!(v->arch.pending_exceptions) ||
@@ -50,7 +57,6 @@
 	return 1;
 }
 
-#ifndef CONFIG_KVM_BOOK3S_64_HV
 /*
  * Common checks before entering the guest world.  Call with interrupts
  * disabled.
@@ -125,7 +131,7 @@
 
 	return r;
 }
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
 
 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 {
@@ -179,6 +185,7 @@
 
 	return r;
 }
+EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
 
 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
 {
@@ -192,11 +199,9 @@
 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
 		goto out;
 
-#ifdef CONFIG_KVM_BOOK3S_64_HV
 	/* HV KVM can only do PAPR mode for now */
-	if (!vcpu->arch.papr_enabled)
+	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
 		goto out;
-#endif
 
 #ifdef CONFIG_KVM_BOOKE_HV
 	if (!cpu_has_feature(CPU_FTR_EMB_HV))
@@ -209,6 +214,7 @@
 	vcpu->arch.sane = r;
 	return r ? 0 : -EINVAL;
 }
+EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
 
 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
@@ -243,6 +249,7 @@
 
 	return r;
 }
+EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
 
 int kvm_arch_hardware_enable(void *garbage)
 {
@@ -269,10 +276,35 @@
 
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
-	if (type)
-		return -EINVAL;
+	struct kvmppc_ops *kvm_ops = NULL;
+	/*
+	 * if we have both HV and PR enabled, default is HV
+	 */
+	if (type == 0) {
+		if (kvmppc_hv_ops)
+			kvm_ops = kvmppc_hv_ops;
+		else
+			kvm_ops = kvmppc_pr_ops;
+		if (!kvm_ops)
+			goto err_out;
+	} else	if (type == KVM_VM_PPC_HV) {
+		if (!kvmppc_hv_ops)
+			goto err_out;
+		kvm_ops = kvmppc_hv_ops;
+	} else if (type == KVM_VM_PPC_PR) {
+		if (!kvmppc_pr_ops)
+			goto err_out;
+		kvm_ops = kvmppc_pr_ops;
+	} else
+		goto err_out;
 
+	if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
+		return -ENOENT;
+
+	kvm->arch.kvm_ops = kvm_ops;
 	return kvmppc_core_init_vm(kvm);
+err_out:
+	return -EINVAL;
 }
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -292,6 +324,9 @@
 	kvmppc_core_destroy_vm(kvm);
 
 	mutex_unlock(&kvm->lock);
+
+	/* drop the module reference */
+	module_put(kvm->arch.kvm_ops->owner);
 }
 
 void kvm_arch_sync_events(struct kvm *kvm)
@@ -301,6 +336,10 @@
 int kvm_dev_ioctl_check_extension(long ext)
 {
 	int r;
+	/* FIXME!!
+	 * Should some of this be vm ioctl ? is it possible now ?
+	 */
+	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
 
 	switch (ext) {
 #ifdef CONFIG_BOOKE
@@ -320,22 +359,26 @@
 	case KVM_CAP_DEVICE_CTRL:
 		r = 1;
 		break;
-#ifndef CONFIG_KVM_BOOK3S_64_HV
 	case KVM_CAP_PPC_PAIRED_SINGLES:
 	case KVM_CAP_PPC_OSI:
 	case KVM_CAP_PPC_GET_PVINFO:
 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
 	case KVM_CAP_SW_TLB:
 #endif
-#ifdef CONFIG_KVM_MPIC
-	case KVM_CAP_IRQ_MPIC:
-#endif
-		r = 1;
+		/* We support this only for PR */
+		r = !hv_enabled;
 		break;
+#ifdef CONFIG_KVM_MMIO
 	case KVM_CAP_COALESCED_MMIO:
 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 		break;
 #endif
+#ifdef CONFIG_KVM_MPIC
+	case KVM_CAP_IRQ_MPIC:
+		r = 1;
+		break;
+#endif
+
 #ifdef CONFIG_PPC_BOOK3S_64
 	case KVM_CAP_SPAPR_TCE:
 	case KVM_CAP_PPC_ALLOC_HTAB:
@@ -346,32 +389,37 @@
 		r = 1;
 		break;
 #endif /* CONFIG_PPC_BOOK3S_64 */
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	case KVM_CAP_PPC_SMT:
-		r = threads_per_core;
+		if (hv_enabled)
+			r = threads_per_core;
+		else
+			r = 0;
 		break;
 	case KVM_CAP_PPC_RMA:
-		r = 1;
+		r = hv_enabled;
 		/* PPC970 requires an RMA */
-		if (cpu_has_feature(CPU_FTR_ARCH_201))
+		if (r && cpu_has_feature(CPU_FTR_ARCH_201))
 			r = 2;
 		break;
 #endif
 	case KVM_CAP_SYNC_MMU:
-#ifdef CONFIG_KVM_BOOK3S_64_HV
-		r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+		if (hv_enabled)
+			r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
+		else
+			r = 0;
 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 		r = 1;
 #else
 		r = 0;
-		break;
 #endif
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+		break;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	case KVM_CAP_PPC_HTAB_FD:
-		r = 1;
+		r = hv_enabled;
 		break;
 #endif
-		break;
 	case KVM_CAP_NR_VCPUS:
 		/*
 		 * Recommending a number of CPUs is somewhat arbitrary; we
@@ -379,11 +427,10 @@
 		 * will have secondary threads "offline"), and for other KVM
 		 * implementations just count online CPUs.
 		 */
-#ifdef CONFIG_KVM_BOOK3S_64_HV
-		r = num_present_cpus();
-#else
-		r = num_online_cpus();
-#endif
+		if (hv_enabled)
+			r = num_present_cpus();
+		else
+			r = num_online_cpus();
 		break;
 	case KVM_CAP_MAX_VCPUS:
 		r = KVM_MAX_VCPUS;
@@ -407,15 +454,16 @@
 	return -EINVAL;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 			   struct kvm_memory_slot *dont)
 {
-	kvmppc_core_free_memslot(free, dont);
+	kvmppc_core_free_memslot(kvm, free, dont);
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			    unsigned long npages)
 {
-	return kvmppc_core_create_memslot(slot, npages);
+	return kvmppc_core_create_memslot(kvm, slot, npages);
 }
 
 void kvm_arch_memslots_updated(struct kvm *kvm)
@@ -659,6 +707,7 @@
 
 	return EMULATE_DO_MMIO;
 }
+EXPORT_SYMBOL_GPL(kvmppc_handle_load);
 
 /* Same as above, but sign extends */
 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -720,6 +769,7 @@
 
 	return EMULATE_DO_MMIO;
 }
+EXPORT_SYMBOL_GPL(kvmppc_handle_store);
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
@@ -1024,52 +1074,12 @@
 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
 		goto out;
 	}
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-#ifdef CONFIG_KVM_BOOK3S_64_HV
-	case KVM_ALLOCATE_RMA: {
-		struct kvm_allocate_rma rma;
-		struct kvm *kvm = filp->private_data;
-
-		r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
-		if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
-			r = -EFAULT;
-		break;
-	}
-
-	case KVM_PPC_ALLOCATE_HTAB: {
-		u32 htab_order;
-
-		r = -EFAULT;
-		if (get_user(htab_order, (u32 __user *)argp))
-			break;
-		r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
-		if (r)
-			break;
-		r = -EFAULT;
-		if (put_user(htab_order, (u32 __user *)argp))
-			break;
-		r = 0;
-		break;
-	}
-
-	case KVM_PPC_GET_HTAB_FD: {
-		struct kvm_get_htab_fd ghf;
-
-		r = -EFAULT;
-		if (copy_from_user(&ghf, argp, sizeof(ghf)))
-			break;
-		r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
-		break;
-	}
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
-
-#ifdef CONFIG_PPC_BOOK3S_64
 	case KVM_PPC_GET_SMMU_INFO: {
 		struct kvm_ppc_smmu_info info;
+		struct kvm *kvm = filp->private_data;
 
 		memset(&info, 0, sizeof(info));
-		r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
+		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
 			r = -EFAULT;
 		break;
@@ -1080,11 +1090,15 @@
 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
 		break;
 	}
-#endif /* CONFIG_PPC_BOOK3S_64 */
+	default: {
+		struct kvm *kvm = filp->private_data;
+		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
+	}
+#else /* CONFIG_PPC_BOOK3S_64 */
 	default:
 		r = -ENOTTY;
+#endif
 	}
-
 out:
 	return r;
 }
@@ -1106,22 +1120,26 @@
 
 	return lpid;
 }
+EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
 
 void kvmppc_claim_lpid(long lpid)
 {
 	set_bit(lpid, lpid_inuse);
 }
+EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
 
 void kvmppc_free_lpid(long lpid)
 {
 	clear_bit(lpid, lpid_inuse);
 }
+EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
 
 void kvmppc_init_lpid(unsigned long nr_lpids_param)
 {
 	nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
 	memset(lpid_inuse, 0, sizeof(lpid_inuse));
 }
+EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
 
 int kvm_arch_init(void *opaque)
 {
@@ -1130,4 +1148,5 @@
 
 void kvm_arch_exit(void)
 {
+
 }
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index e326489..2e0e67e 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -31,126 +31,6 @@
 		  __entry->inst, __entry->pc, __entry->emulate)
 );
 
-#ifdef CONFIG_PPC_BOOK3S
-#define kvm_trace_symbol_exit \
-	{0x100, "SYSTEM_RESET"}, \
-	{0x200, "MACHINE_CHECK"}, \
-	{0x300, "DATA_STORAGE"}, \
-	{0x380, "DATA_SEGMENT"}, \
-	{0x400, "INST_STORAGE"}, \
-	{0x480, "INST_SEGMENT"}, \
-	{0x500, "EXTERNAL"}, \
-	{0x501, "EXTERNAL_LEVEL"}, \
-	{0x502, "EXTERNAL_HV"}, \
-	{0x600, "ALIGNMENT"}, \
-	{0x700, "PROGRAM"}, \
-	{0x800, "FP_UNAVAIL"}, \
-	{0x900, "DECREMENTER"}, \
-	{0x980, "HV_DECREMENTER"}, \
-	{0xc00, "SYSCALL"}, \
-	{0xd00, "TRACE"}, \
-	{0xe00, "H_DATA_STORAGE"}, \
-	{0xe20, "H_INST_STORAGE"}, \
-	{0xe40, "H_EMUL_ASSIST"}, \
-	{0xf00, "PERFMON"}, \
-	{0xf20, "ALTIVEC"}, \
-	{0xf40, "VSX"}
-#else
-#define kvm_trace_symbol_exit \
-	{0, "CRITICAL"}, \
-	{1, "MACHINE_CHECK"}, \
-	{2, "DATA_STORAGE"}, \
-	{3, "INST_STORAGE"}, \
-	{4, "EXTERNAL"}, \
-	{5, "ALIGNMENT"}, \
-	{6, "PROGRAM"}, \
-	{7, "FP_UNAVAIL"}, \
-	{8, "SYSCALL"}, \
-	{9, "AP_UNAVAIL"}, \
-	{10, "DECREMENTER"}, \
-	{11, "FIT"}, \
-	{12, "WATCHDOG"}, \
-	{13, "DTLB_MISS"}, \
-	{14, "ITLB_MISS"}, \
-	{15, "DEBUG"}, \
-	{32, "SPE_UNAVAIL"}, \
-	{33, "SPE_FP_DATA"}, \
-	{34, "SPE_FP_ROUND"}, \
-	{35, "PERFORMANCE_MONITOR"}, \
-	{36, "DOORBELL"}, \
-	{37, "DOORBELL_CRITICAL"}, \
-	{38, "GUEST_DBELL"}, \
-	{39, "GUEST_DBELL_CRIT"}, \
-	{40, "HV_SYSCALL"}, \
-	{41, "HV_PRIV"}
-#endif
-
-TRACE_EVENT(kvm_exit,
-	TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
-	TP_ARGS(exit_nr, vcpu),
-
-	TP_STRUCT__entry(
-		__field(	unsigned int,	exit_nr		)
-		__field(	unsigned long,	pc		)
-		__field(	unsigned long,	msr		)
-		__field(	unsigned long,	dar		)
-#ifdef CONFIG_KVM_BOOK3S_PR
-		__field(	unsigned long,	srr1		)
-#endif
-		__field(	unsigned long,	last_inst	)
-	),
-
-	TP_fast_assign(
-#ifdef CONFIG_KVM_BOOK3S_PR
-		struct kvmppc_book3s_shadow_vcpu *svcpu;
-#endif
-		__entry->exit_nr	= exit_nr;
-		__entry->pc		= kvmppc_get_pc(vcpu);
-		__entry->dar		= kvmppc_get_fault_dar(vcpu);
-		__entry->msr		= vcpu->arch.shared->msr;
-#ifdef CONFIG_KVM_BOOK3S_PR
-		svcpu = svcpu_get(vcpu);
-		__entry->srr1		= svcpu->shadow_srr1;
-		svcpu_put(svcpu);
-#endif
-		__entry->last_inst	= vcpu->arch.last_inst;
-	),
-
-	TP_printk("exit=%s"
-		" | pc=0x%lx"
-		" | msr=0x%lx"
-		" | dar=0x%lx"
-#ifdef CONFIG_KVM_BOOK3S_PR
-		" | srr1=0x%lx"
-#endif
-		" | last_inst=0x%lx"
-		,
-		__print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
-		__entry->pc,
-		__entry->msr,
-		__entry->dar,
-#ifdef CONFIG_KVM_BOOK3S_PR
-		__entry->srr1,
-#endif
-		__entry->last_inst
-		)
-);
-
-TRACE_EVENT(kvm_unmap_hva,
-	TP_PROTO(unsigned long hva),
-	TP_ARGS(hva),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	hva		)
-	),
-
-	TP_fast_assign(
-		__entry->hva		= hva;
-	),
-
-	TP_printk("unmap hva 0x%lx\n", __entry->hva)
-);
-
 TRACE_EVENT(kvm_stlb_inval,
 	TP_PROTO(unsigned int stlb_index),
 	TP_ARGS(stlb_index),
@@ -236,315 +116,6 @@
 		__entry->cpu_nr, __entry->requests)
 );
 
-
-/*************************************************************************
- *                         Book3S trace points                           *
- *************************************************************************/
-
-#ifdef CONFIG_KVM_BOOK3S_PR
-
-TRACE_EVENT(kvm_book3s_reenter,
-	TP_PROTO(int r, struct kvm_vcpu *vcpu),
-	TP_ARGS(r, vcpu),
-
-	TP_STRUCT__entry(
-		__field(	unsigned int,	r		)
-		__field(	unsigned long,	pc		)
-	),
-
-	TP_fast_assign(
-		__entry->r		= r;
-		__entry->pc		= kvmppc_get_pc(vcpu);
-	),
-
-	TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
-);
-
-#ifdef CONFIG_PPC_BOOK3S_64
-
-TRACE_EVENT(kvm_book3s_64_mmu_map,
-	TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
-		 struct kvmppc_pte *orig_pte),
-	TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
-
-	TP_STRUCT__entry(
-		__field(	unsigned char,		flag_w		)
-		__field(	unsigned char,		flag_x		)
-		__field(	unsigned long,		eaddr		)
-		__field(	unsigned long,		hpteg		)
-		__field(	unsigned long,		va		)
-		__field(	unsigned long long,	vpage		)
-		__field(	unsigned long,		hpaddr		)
-	),
-
-	TP_fast_assign(
-		__entry->flag_w	= ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
-		__entry->flag_x	= (rflags & HPTE_R_N) ? '-' : 'x';
-		__entry->eaddr	= orig_pte->eaddr;
-		__entry->hpteg	= hpteg;
-		__entry->va	= va;
-		__entry->vpage	= orig_pte->vpage;
-		__entry->hpaddr	= hpaddr;
-	),
-
-	TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
-		  __entry->flag_w, __entry->flag_x, __entry->eaddr,
-		  __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
-);
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-TRACE_EVENT(kvm_book3s_mmu_map,
-	TP_PROTO(struct hpte_cache *pte),
-	TP_ARGS(pte),
-
-	TP_STRUCT__entry(
-		__field(	u64,		host_vpn	)
-		__field(	u64,		pfn		)
-		__field(	ulong,		eaddr		)
-		__field(	u64,		vpage		)
-		__field(	ulong,		raddr		)
-		__field(	int,		flags		)
-	),
-
-	TP_fast_assign(
-		__entry->host_vpn	= pte->host_vpn;
-		__entry->pfn		= pte->pfn;
-		__entry->eaddr		= pte->pte.eaddr;
-		__entry->vpage		= pte->pte.vpage;
-		__entry->raddr		= pte->pte.raddr;
-		__entry->flags		= (pte->pte.may_read ? 0x4 : 0) |
-					  (pte->pte.may_write ? 0x2 : 0) |
-					  (pte->pte.may_execute ? 0x1 : 0);
-	),
-
-	TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
-		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
-		  __entry->vpage, __entry->raddr, __entry->flags)
-);
-
-TRACE_EVENT(kvm_book3s_mmu_invalidate,
-	TP_PROTO(struct hpte_cache *pte),
-	TP_ARGS(pte),
-
-	TP_STRUCT__entry(
-		__field(	u64,		host_vpn	)
-		__field(	u64,		pfn		)
-		__field(	ulong,		eaddr		)
-		__field(	u64,		vpage		)
-		__field(	ulong,		raddr		)
-		__field(	int,		flags		)
-	),
-
-	TP_fast_assign(
-		__entry->host_vpn	= pte->host_vpn;
-		__entry->pfn		= pte->pfn;
-		__entry->eaddr		= pte->pte.eaddr;
-		__entry->vpage		= pte->pte.vpage;
-		__entry->raddr		= pte->pte.raddr;
-		__entry->flags		= (pte->pte.may_read ? 0x4 : 0) |
-					  (pte->pte.may_write ? 0x2 : 0) |
-					  (pte->pte.may_execute ? 0x1 : 0);
-	),
-
-	TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
-		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
-		  __entry->vpage, __entry->raddr, __entry->flags)
-);
-
-TRACE_EVENT(kvm_book3s_mmu_flush,
-	TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
-		 unsigned long long p2),
-	TP_ARGS(type, vcpu, p1, p2),
-
-	TP_STRUCT__entry(
-		__field(	int,			count		)
-		__field(	unsigned long long,	p1		)
-		__field(	unsigned long long,	p2		)
-		__field(	const char *,		type		)
-	),
-
-	TP_fast_assign(
-		__entry->count		= to_book3s(vcpu)->hpte_cache_count;
-		__entry->p1		= p1;
-		__entry->p2		= p2;
-		__entry->type		= type;
-	),
-
-	TP_printk("Flush %d %sPTEs: %llx - %llx",
-		  __entry->count, __entry->type, __entry->p1, __entry->p2)
-);
-
-TRACE_EVENT(kvm_book3s_slb_found,
-	TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
-	TP_ARGS(gvsid, hvsid),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long long,	gvsid		)
-		__field(	unsigned long long,	hvsid		)
-	),
-
-	TP_fast_assign(
-		__entry->gvsid		= gvsid;
-		__entry->hvsid		= hvsid;
-	),
-
-	TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
-);
-
-TRACE_EVENT(kvm_book3s_slb_fail,
-	TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
-	TP_ARGS(sid_map_mask, gvsid),
-
-	TP_STRUCT__entry(
-		__field(	unsigned short,		sid_map_mask	)
-		__field(	unsigned long long,	gvsid		)
-	),
-
-	TP_fast_assign(
-		__entry->sid_map_mask	= sid_map_mask;
-		__entry->gvsid		= gvsid;
-	),
-
-	TP_printk("%x/%x: %llx", __entry->sid_map_mask,
-		  SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
-);
-
-TRACE_EVENT(kvm_book3s_slb_map,
-	TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
-		 unsigned long long hvsid),
-	TP_ARGS(sid_map_mask, gvsid, hvsid),
-
-	TP_STRUCT__entry(
-		__field(	unsigned short,		sid_map_mask	)
-		__field(	unsigned long long,	guest_vsid	)
-		__field(	unsigned long long,	host_vsid	)
-	),
-
-	TP_fast_assign(
-		__entry->sid_map_mask	= sid_map_mask;
-		__entry->guest_vsid	= gvsid;
-		__entry->host_vsid	= hvsid;
-	),
-
-	TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
-		  __entry->guest_vsid, __entry->host_vsid)
-);
-
-TRACE_EVENT(kvm_book3s_slbmte,
-	TP_PROTO(u64 slb_vsid, u64 slb_esid),
-	TP_ARGS(slb_vsid, slb_esid),
-
-	TP_STRUCT__entry(
-		__field(	u64,	slb_vsid	)
-		__field(	u64,	slb_esid	)
-	),
-
-	TP_fast_assign(
-		__entry->slb_vsid	= slb_vsid;
-		__entry->slb_esid	= slb_esid;
-	),
-
-	TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
-);
-
-#endif /* CONFIG_PPC_BOOK3S */
-
-
-/*************************************************************************
- *                         Book3E trace points                           *
- *************************************************************************/
-
-#ifdef CONFIG_BOOKE
-
-TRACE_EVENT(kvm_booke206_stlb_write,
-	TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
-	TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
-
-	TP_STRUCT__entry(
-		__field(	__u32,	mas0		)
-		__field(	__u32,	mas8		)
-		__field(	__u32,	mas1		)
-		__field(	__u64,	mas2		)
-		__field(	__u64,	mas7_3		)
-	),
-
-	TP_fast_assign(
-		__entry->mas0		= mas0;
-		__entry->mas8		= mas8;
-		__entry->mas1		= mas1;
-		__entry->mas2		= mas2;
-		__entry->mas7_3		= mas7_3;
-	),
-
-	TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
-		__entry->mas0, __entry->mas8, __entry->mas1,
-		__entry->mas2, __entry->mas7_3)
-);
-
-TRACE_EVENT(kvm_booke206_gtlb_write,
-	TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
-	TP_ARGS(mas0, mas1, mas2, mas7_3),
-
-	TP_STRUCT__entry(
-		__field(	__u32,	mas0		)
-		__field(	__u32,	mas1		)
-		__field(	__u64,	mas2		)
-		__field(	__u64,	mas7_3		)
-	),
-
-	TP_fast_assign(
-		__entry->mas0		= mas0;
-		__entry->mas1		= mas1;
-		__entry->mas2		= mas2;
-		__entry->mas7_3		= mas7_3;
-	),
-
-	TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
-		__entry->mas0, __entry->mas1,
-		__entry->mas2, __entry->mas7_3)
-);
-
-TRACE_EVENT(kvm_booke206_ref_release,
-	TP_PROTO(__u64 pfn, __u32 flags),
-	TP_ARGS(pfn, flags),
-
-	TP_STRUCT__entry(
-		__field(	__u64,	pfn		)
-		__field(	__u32,	flags		)
-	),
-
-	TP_fast_assign(
-		__entry->pfn		= pfn;
-		__entry->flags		= flags;
-	),
-
-	TP_printk("pfn=%llx flags=%x",
-		__entry->pfn, __entry->flags)
-);
-
-TRACE_EVENT(kvm_booke_queue_irqprio,
-	TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
-	TP_ARGS(vcpu, priority),
-
-	TP_STRUCT__entry(
-		__field(	__u32,	cpu_nr		)
-		__field(	__u32,	priority		)
-		__field(	unsigned long,	pending		)
-	),
-
-	TP_fast_assign(
-		__entry->cpu_nr		= vcpu->vcpu_id;
-		__entry->priority	= priority;
-		__entry->pending	= vcpu->arch.pending_exceptions;
-	),
-
-	TP_printk("vcpu=%x prio=%x pending=%lx",
-		__entry->cpu_nr, __entry->priority, __entry->pending)
-);
-
-#endif
-
 #endif /* _TRACE_KVM_H */
 
 /* This part must be outside protection */
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
new file mode 100644
index 0000000..f7537cf
--- /dev/null
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -0,0 +1,177 @@
+#if !defined(_TRACE_KVM_BOOKE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_BOOKE_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm_booke
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_booke
+
+#define kvm_trace_symbol_exit \
+	{0, "CRITICAL"}, \
+	{1, "MACHINE_CHECK"}, \
+	{2, "DATA_STORAGE"}, \
+	{3, "INST_STORAGE"}, \
+	{4, "EXTERNAL"}, \
+	{5, "ALIGNMENT"}, \
+	{6, "PROGRAM"}, \
+	{7, "FP_UNAVAIL"}, \
+	{8, "SYSCALL"}, \
+	{9, "AP_UNAVAIL"}, \
+	{10, "DECREMENTER"}, \
+	{11, "FIT"}, \
+	{12, "WATCHDOG"}, \
+	{13, "DTLB_MISS"}, \
+	{14, "ITLB_MISS"}, \
+	{15, "DEBUG"}, \
+	{32, "SPE_UNAVAIL"}, \
+	{33, "SPE_FP_DATA"}, \
+	{34, "SPE_FP_ROUND"}, \
+	{35, "PERFORMANCE_MONITOR"}, \
+	{36, "DOORBELL"}, \
+	{37, "DOORBELL_CRITICAL"}, \
+	{38, "GUEST_DBELL"}, \
+	{39, "GUEST_DBELL_CRIT"}, \
+	{40, "HV_SYSCALL"}, \
+	{41, "HV_PRIV"}
+
+TRACE_EVENT(kvm_exit,
+	TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
+	TP_ARGS(exit_nr, vcpu),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	exit_nr		)
+		__field(	unsigned long,	pc		)
+		__field(	unsigned long,	msr		)
+		__field(	unsigned long,	dar		)
+		__field(	unsigned long,	last_inst	)
+	),
+
+	TP_fast_assign(
+		__entry->exit_nr	= exit_nr;
+		__entry->pc		= kvmppc_get_pc(vcpu);
+		__entry->dar		= kvmppc_get_fault_dar(vcpu);
+		__entry->msr		= vcpu->arch.shared->msr;
+		__entry->last_inst	= vcpu->arch.last_inst;
+	),
+
+	TP_printk("exit=%s"
+		" | pc=0x%lx"
+		" | msr=0x%lx"
+		" | dar=0x%lx"
+		" | last_inst=0x%lx"
+		,
+		__print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
+		__entry->pc,
+		__entry->msr,
+		__entry->dar,
+		__entry->last_inst
+		)
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+	TP_PROTO(unsigned long hva),
+	TP_ARGS(hva),
+
+	TP_STRUCT__entry(
+		__field(	unsigned long,	hva		)
+	),
+
+	TP_fast_assign(
+		__entry->hva		= hva;
+	),
+
+	TP_printk("unmap hva 0x%lx\n", __entry->hva)
+);
+
+TRACE_EVENT(kvm_booke206_stlb_write,
+	TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
+	TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
+
+	TP_STRUCT__entry(
+		__field(	__u32,	mas0		)
+		__field(	__u32,	mas8		)
+		__field(	__u32,	mas1		)
+		__field(	__u64,	mas2		)
+		__field(	__u64,	mas7_3		)
+	),
+
+	TP_fast_assign(
+		__entry->mas0		= mas0;
+		__entry->mas8		= mas8;
+		__entry->mas1		= mas1;
+		__entry->mas2		= mas2;
+		__entry->mas7_3		= mas7_3;
+	),
+
+	TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
+		__entry->mas0, __entry->mas8, __entry->mas1,
+		__entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_gtlb_write,
+	TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
+	TP_ARGS(mas0, mas1, mas2, mas7_3),
+
+	TP_STRUCT__entry(
+		__field(	__u32,	mas0		)
+		__field(	__u32,	mas1		)
+		__field(	__u64,	mas2		)
+		__field(	__u64,	mas7_3		)
+	),
+
+	TP_fast_assign(
+		__entry->mas0		= mas0;
+		__entry->mas1		= mas1;
+		__entry->mas2		= mas2;
+		__entry->mas7_3		= mas7_3;
+	),
+
+	TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
+		__entry->mas0, __entry->mas1,
+		__entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_ref_release,
+	TP_PROTO(__u64 pfn, __u32 flags),
+	TP_ARGS(pfn, flags),
+
+	TP_STRUCT__entry(
+		__field(	__u64,	pfn		)
+		__field(	__u32,	flags		)
+	),
+
+	TP_fast_assign(
+		__entry->pfn		= pfn;
+		__entry->flags		= flags;
+	),
+
+	TP_printk("pfn=%llx flags=%x",
+		__entry->pfn, __entry->flags)
+);
+
+TRACE_EVENT(kvm_booke_queue_irqprio,
+	TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
+	TP_ARGS(vcpu, priority),
+
+	TP_STRUCT__entry(
+		__field(	__u32,	cpu_nr		)
+		__field(	__u32,	priority		)
+		__field(	unsigned long,	pending		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu_nr		= vcpu->vcpu_id;
+		__entry->priority	= priority;
+		__entry->pending	= vcpu->arch.pending_exceptions;
+	),
+
+	TP_printk("vcpu=%x prio=%x pending=%lx",
+		__entry->cpu_nr, __entry->priority, __entry->pending)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
new file mode 100644
index 0000000..8b22e47
--- /dev/null
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -0,0 +1,297 @@
+
+#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_PR_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm_pr
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_pr
+
+#define kvm_trace_symbol_exit \
+	{0x100, "SYSTEM_RESET"}, \
+	{0x200, "MACHINE_CHECK"}, \
+	{0x300, "DATA_STORAGE"}, \
+	{0x380, "DATA_SEGMENT"}, \
+	{0x400, "INST_STORAGE"}, \
+	{0x480, "INST_SEGMENT"}, \
+	{0x500, "EXTERNAL"}, \
+	{0x501, "EXTERNAL_LEVEL"}, \
+	{0x502, "EXTERNAL_HV"}, \
+	{0x600, "ALIGNMENT"}, \
+	{0x700, "PROGRAM"}, \
+	{0x800, "FP_UNAVAIL"}, \
+	{0x900, "DECREMENTER"}, \
+	{0x980, "HV_DECREMENTER"}, \
+	{0xc00, "SYSCALL"}, \
+	{0xd00, "TRACE"}, \
+	{0xe00, "H_DATA_STORAGE"}, \
+	{0xe20, "H_INST_STORAGE"}, \
+	{0xe40, "H_EMUL_ASSIST"}, \
+	{0xf00, "PERFMON"}, \
+	{0xf20, "ALTIVEC"}, \
+	{0xf40, "VSX"}
+
+TRACE_EVENT(kvm_book3s_reenter,
+	TP_PROTO(int r, struct kvm_vcpu *vcpu),
+	TP_ARGS(r, vcpu),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	r		)
+		__field(	unsigned long,	pc		)
+	),
+
+	TP_fast_assign(
+		__entry->r		= r;
+		__entry->pc		= kvmppc_get_pc(vcpu);
+	),
+
+	TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
+);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+
+TRACE_EVENT(kvm_book3s_64_mmu_map,
+	TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
+		 struct kvmppc_pte *orig_pte),
+	TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
+
+	TP_STRUCT__entry(
+		__field(	unsigned char,		flag_w		)
+		__field(	unsigned char,		flag_x		)
+		__field(	unsigned long,		eaddr		)
+		__field(	unsigned long,		hpteg		)
+		__field(	unsigned long,		va		)
+		__field(	unsigned long long,	vpage		)
+		__field(	unsigned long,		hpaddr		)
+	),
+
+	TP_fast_assign(
+		__entry->flag_w	= ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
+		__entry->flag_x	= (rflags & HPTE_R_N) ? '-' : 'x';
+		__entry->eaddr	= orig_pte->eaddr;
+		__entry->hpteg	= hpteg;
+		__entry->va	= va;
+		__entry->vpage	= orig_pte->vpage;
+		__entry->hpaddr	= hpaddr;
+	),
+
+	TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
+		  __entry->flag_w, __entry->flag_x, __entry->eaddr,
+		  __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
+);
+
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+TRACE_EVENT(kvm_book3s_mmu_map,
+	TP_PROTO(struct hpte_cache *pte),
+	TP_ARGS(pte),
+
+	TP_STRUCT__entry(
+		__field(	u64,		host_vpn	)
+		__field(	u64,		pfn		)
+		__field(	ulong,		eaddr		)
+		__field(	u64,		vpage		)
+		__field(	ulong,		raddr		)
+		__field(	int,		flags		)
+	),
+
+	TP_fast_assign(
+		__entry->host_vpn	= pte->host_vpn;
+		__entry->pfn		= pte->pfn;
+		__entry->eaddr		= pte->pte.eaddr;
+		__entry->vpage		= pte->pte.vpage;
+		__entry->raddr		= pte->pte.raddr;
+		__entry->flags		= (pte->pte.may_read ? 0x4 : 0) |
+					  (pte->pte.may_write ? 0x2 : 0) |
+					  (pte->pte.may_execute ? 0x1 : 0);
+	),
+
+	TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
+		  __entry->vpage, __entry->raddr, __entry->flags)
+);
+
+TRACE_EVENT(kvm_book3s_mmu_invalidate,
+	TP_PROTO(struct hpte_cache *pte),
+	TP_ARGS(pte),
+
+	TP_STRUCT__entry(
+		__field(	u64,		host_vpn	)
+		__field(	u64,		pfn		)
+		__field(	ulong,		eaddr		)
+		__field(	u64,		vpage		)
+		__field(	ulong,		raddr		)
+		__field(	int,		flags		)
+	),
+
+	TP_fast_assign(
+		__entry->host_vpn	= pte->host_vpn;
+		__entry->pfn		= pte->pfn;
+		__entry->eaddr		= pte->pte.eaddr;
+		__entry->vpage		= pte->pte.vpage;
+		__entry->raddr		= pte->pte.raddr;
+		__entry->flags		= (pte->pte.may_read ? 0x4 : 0) |
+					  (pte->pte.may_write ? 0x2 : 0) |
+					  (pte->pte.may_execute ? 0x1 : 0);
+	),
+
+	TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
+		  __entry->vpage, __entry->raddr, __entry->flags)
+);
+
+TRACE_EVENT(kvm_book3s_mmu_flush,
+	TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
+		 unsigned long long p2),
+	TP_ARGS(type, vcpu, p1, p2),
+
+	TP_STRUCT__entry(
+		__field(	int,			count		)
+		__field(	unsigned long long,	p1		)
+		__field(	unsigned long long,	p2		)
+		__field(	const char *,		type		)
+	),
+
+	TP_fast_assign(
+		__entry->count		= to_book3s(vcpu)->hpte_cache_count;
+		__entry->p1		= p1;
+		__entry->p2		= p2;
+		__entry->type		= type;
+	),
+
+	TP_printk("Flush %d %sPTEs: %llx - %llx",
+		  __entry->count, __entry->type, __entry->p1, __entry->p2)
+);
+
+TRACE_EVENT(kvm_book3s_slb_found,
+	TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
+	TP_ARGS(gvsid, hvsid),
+
+	TP_STRUCT__entry(
+		__field(	unsigned long long,	gvsid		)
+		__field(	unsigned long long,	hvsid		)
+	),
+
+	TP_fast_assign(
+		__entry->gvsid		= gvsid;
+		__entry->hvsid		= hvsid;
+	),
+
+	TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
+);
+
+TRACE_EVENT(kvm_book3s_slb_fail,
+	TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
+	TP_ARGS(sid_map_mask, gvsid),
+
+	TP_STRUCT__entry(
+		__field(	unsigned short,		sid_map_mask	)
+		__field(	unsigned long long,	gvsid		)
+	),
+
+	TP_fast_assign(
+		__entry->sid_map_mask	= sid_map_mask;
+		__entry->gvsid		= gvsid;
+	),
+
+	TP_printk("%x/%x: %llx", __entry->sid_map_mask,
+		  SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
+);
+
+TRACE_EVENT(kvm_book3s_slb_map,
+	TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
+		 unsigned long long hvsid),
+	TP_ARGS(sid_map_mask, gvsid, hvsid),
+
+	TP_STRUCT__entry(
+		__field(	unsigned short,		sid_map_mask	)
+		__field(	unsigned long long,	guest_vsid	)
+		__field(	unsigned long long,	host_vsid	)
+	),
+
+	TP_fast_assign(
+		__entry->sid_map_mask	= sid_map_mask;
+		__entry->guest_vsid	= gvsid;
+		__entry->host_vsid	= hvsid;
+	),
+
+	TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
+		  __entry->guest_vsid, __entry->host_vsid)
+);
+
+TRACE_EVENT(kvm_book3s_slbmte,
+	TP_PROTO(u64 slb_vsid, u64 slb_esid),
+	TP_ARGS(slb_vsid, slb_esid),
+
+	TP_STRUCT__entry(
+		__field(	u64,	slb_vsid	)
+		__field(	u64,	slb_esid	)
+	),
+
+	TP_fast_assign(
+		__entry->slb_vsid	= slb_vsid;
+		__entry->slb_esid	= slb_esid;
+	),
+
+	TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
+);
+
+TRACE_EVENT(kvm_exit,
+	TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
+	TP_ARGS(exit_nr, vcpu),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	exit_nr		)
+		__field(	unsigned long,	pc		)
+		__field(	unsigned long,	msr		)
+		__field(	unsigned long,	dar		)
+		__field(	unsigned long,	srr1		)
+		__field(	unsigned long,	last_inst	)
+	),
+
+	TP_fast_assign(
+		__entry->exit_nr	= exit_nr;
+		__entry->pc		= kvmppc_get_pc(vcpu);
+		__entry->dar		= kvmppc_get_fault_dar(vcpu);
+		__entry->msr		= vcpu->arch.shared->msr;
+		__entry->srr1		= vcpu->arch.shadow_srr1;
+		__entry->last_inst	= vcpu->arch.last_inst;
+	),
+
+	TP_printk("exit=%s"
+		" | pc=0x%lx"
+		" | msr=0x%lx"
+		" | dar=0x%lx"
+		" | srr1=0x%lx"
+		" | last_inst=0x%lx"
+		,
+		__print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
+		__entry->pc,
+		__entry->msr,
+		__entry->dar,
+		__entry->srr1,
+		__entry->last_inst
+		)
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+	TP_PROTO(unsigned long hva),
+	TP_ARGS(hva),
+
+	TP_STRUCT__entry(
+		__field(	unsigned long,	hva		)
+	),
+
+	TP_fast_assign(
+		__entry->hva		= hva;
+	),
+
+	TP_printk("unmap hva 0x%lx\n", __entry->hva)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 6747eec..7b6c107 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -287,9 +287,7 @@
 			pte_clear(&init_mm, addr, ptep);
 			if (pfn_valid(pfn)) {
 				struct page *page = pfn_to_page(pfn);
-
-				ClearPageReserved(page);
-				__free_page(page);
+				__free_reserved_page(page);
 			}
 		}
 		addr += PAGE_SIZE;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index d67db4b..90bb6d9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -633,8 +633,6 @@
 
 /*
  * This function frees user-level page tables of a process.
- *
- * Must be called with pagetable lock held.
  */
 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 			    unsigned long addr, unsigned long end,
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 33d6784..078d3e0 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -938,8 +938,7 @@
 		unsigned long start_pfn = physbase >> PAGE_SHIFT;
 		unsigned long end_pfn = PFN_UP(physbase + size);
 		struct node_active_region node_ar;
-		unsigned long node_end_pfn = node->node_start_pfn +
-					     node->node_spanned_pages;
+		unsigned long node_end_pfn = pgdat_end_pfn(node);
 
 		/*
 		 * Check to make sure that this memblock.reserved area is
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 6c856fb..5b96017 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -121,7 +121,10 @@
 	ptepage = alloc_pages(flags, 0);
 	if (!ptepage)
 		return NULL;
-	pgtable_page_ctor(ptepage);
+	if (!pgtable_page_ctor(ptepage)) {
+		__free_page(ptepage);
+		return NULL;
+	}
 	return ptepage;
 }
 
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 536eec72..9d95786 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -378,6 +378,10 @@
 				       __GFP_REPEAT | __GFP_ZERO);
 	if (!page)
 		return NULL;
+	if (!kernel && !pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 
 	ret = page_address(page);
 	spin_lock(&mm->page_table_lock);
@@ -392,9 +396,6 @@
 	}
 	spin_unlock(&mm->page_table_lock);
 
-	if (!kernel)
-		pgtable_page_ctor(page);
-
 	return (pte_t *)ret;
 }
 
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index db4e638..3844f13 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/syscalls.h>
 #include <linux/rcupdate.h>
+#include <linux/binfmts.h>
 
 #include <asm/spu.h>
 
@@ -126,7 +127,7 @@
 	return ret;
 }
 
-int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset)
+int elf_coredump_extra_notes_write(struct coredump_params *cprm)
 {
 	struct spufs_calls *calls;
 	int ret;
@@ -135,7 +136,7 @@
 	if (!calls)
 		return 0;
 
-	ret = calls->coredump_extra_notes_write(file, foffset);
+	ret = calls->coredump_extra_notes_write(cprm);
 
 	spufs_calls_put(calls);
 
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index c9500ea..be6212d 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -27,6 +27,8 @@
 #include <linux/gfp.h>
 #include <linux/list.h>
 #include <linux/syscalls.h>
+#include <linux/coredump.h>
+#include <linux/binfmts.h>
 
 #include <asm/uaccess.h>
 
@@ -48,44 +50,6 @@
 	return ++ret; /* count trailing NULL */
 }
 
-/*
- * These are the only things you should do on a core-file: use only these
- * functions to write out all the necessary info.
- */
-static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset)
-{
-	unsigned long limit = rlimit(RLIMIT_CORE);
-	ssize_t written;
-
-	if (*foffset + nr > limit)
-		return -EIO;
-
-	written = file->f_op->write(file, addr, nr, &file->f_pos);
-	*foffset += written;
-
-	if (written != nr)
-		return -EIO;
-
-	return 0;
-}
-
-static int spufs_dump_align(struct file *file, char *buf, loff_t new_off,
-			    loff_t *foffset)
-{
-	int rc, size;
-
-	size = min((loff_t)PAGE_SIZE, new_off - *foffset);
-	memset(buf, 0, size);
-
-	rc = 0;
-	while (rc == 0 && new_off > *foffset) {
-		size = min((loff_t)PAGE_SIZE, new_off - *foffset);
-		rc = spufs_dump_write(file, buf, size, foffset);
-	}
-
-	return rc;
-}
-
 static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
 {
 	int i, sz, total = 0;
@@ -165,10 +129,10 @@
 }
 
 static int spufs_arch_write_note(struct spu_context *ctx, int i,
-				  struct file *file, int dfd, loff_t *foffset)
+				  struct coredump_params *cprm, int dfd)
 {
 	loff_t pos = 0;
-	int sz, rc, nread, total = 0;
+	int sz, rc, total = 0;
 	const int bufsz = PAGE_SIZE;
 	char *name;
 	char fullname[80], *buf;
@@ -186,42 +150,39 @@
 	en.n_descsz = sz;
 	en.n_type = NT_SPU;
 
-	rc = spufs_dump_write(file, &en, sizeof(en), foffset);
-	if (rc)
-		goto out;
+	if (!dump_emit(cprm, &en, sizeof(en)))
+		goto Eio;
 
-	rc = spufs_dump_write(file, fullname, en.n_namesz, foffset);
-	if (rc)
-		goto out;
+	if (!dump_emit(cprm, fullname, en.n_namesz))
+		goto Eio;
 
-	rc = spufs_dump_align(file, buf, roundup(*foffset, 4), foffset);
-	if (rc)
-		goto out;
+	if (!dump_align(cprm, 4))
+		goto Eio;
 
 	do {
-		nread = do_coredump_read(i, ctx, buf, bufsz, &pos);
-		if (nread > 0) {
-			rc = spufs_dump_write(file, buf, nread, foffset);
-			if (rc)
-				goto out;
-			total += nread;
+		rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
+		if (rc > 0) {
+			if (!dump_emit(cprm, buf, rc))
+				goto Eio;
+			total += rc;
 		}
-	} while (nread == bufsz && total < sz);
+	} while (rc == bufsz && total < sz);
 
-	if (nread < 0) {
-		rc = nread;
+	if (rc < 0)
 		goto out;
-	}
 
-	rc = spufs_dump_align(file, buf, roundup(*foffset - total + sz, 4),
-			      foffset);
-
+	if (!dump_skip(cprm,
+		       roundup(cprm->written - total + sz, 4) - cprm->written))
+		goto Eio;
 out:
 	free_page((unsigned long)buf);
 	return rc;
+Eio:
+	free_page((unsigned long)buf);
+	return -EIO;
 }
 
-int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
+int spufs_coredump_extra_notes_write(struct coredump_params *cprm)
 {
 	struct spu_context *ctx;
 	int fd, j, rc;
@@ -233,7 +194,7 @@
 			return rc;
 
 		for (j = 0; spufs_coredump_read[j].name != NULL; j++) {
-			rc = spufs_arch_write_note(ctx, j, file, fd, foffset);
+			rc = spufs_arch_write_note(ctx, j, cprm, fd);
 			if (rc) {
 				spu_release_saved(ctx);
 				return rc;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 67852ad..0ba3c95 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -247,12 +247,13 @@
 
 /* system call implementation */
 extern struct spufs_calls spufs_calls;
+struct coredump_params;
 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
 long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
 			umode_t mode, struct file *filp);
 /* ELF coredump callbacks for writing SPU ELF notes */
 extern int spufs_coredump_extra_notes_size(void);
-extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset);
+extern int spufs_coredump_extra_notes_write(struct coredump_params *cprm);
 
 extern const struct file_operations spufs_context_fops;
 
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index fc536f2..7553b6a 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -452,7 +452,7 @@
 	 */
 	if (use_irq) {
 		/* Clear completion */
-		INIT_COMPLETION(host->complete);
+		reinit_completion(&host->complete);
 		/* Ack stale interrupts */
 		kw_write_reg(reg_isr, kw_read_reg(reg_isr));
 		/* Arm timeout */
@@ -717,7 +717,7 @@
 			return -EINVAL;
 		}
 
-		INIT_COMPLETION(comp);
+		reinit_completion(&comp);
 		req->data[0] = PMU_I2C_CMD;
 		req->reply[0] = 0xff;
 		req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
@@ -748,7 +748,7 @@
 
 		hdr->bus = PMU_I2C_BUS_STATUS;
 
-		INIT_COMPLETION(comp);
+		reinit_completion(&comp);
 		req->data[0] = PMU_I2C_CMD;
 		req->reply[0] = 0xff;
 		req->nbytes = 2;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 7cfdaae..a8fe5aa 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -404,46 +404,38 @@
 	unsigned long drc_index;
 	int rc;
 
-	cpu_hotplug_driver_lock();
 	rc = strict_strtoul(buf, 0, &drc_index);
-	if (rc) {
-		rc = -EINVAL;
-		goto out;
-	}
+	if (rc)
+		return -EINVAL;
 
 	parent = of_find_node_by_path("/cpus");
-	if (!parent) {
-		rc = -ENODEV;
-		goto out;
-	}
+	if (!parent)
+		return -ENODEV;
 
 	dn = dlpar_configure_connector(drc_index, parent);
-	if (!dn) {
-		rc = -EINVAL;
-		goto out;
-	}
+	if (!dn)
+		return -EINVAL;
 
 	of_node_put(parent);
 
 	rc = dlpar_acquire_drc(drc_index);
 	if (rc) {
 		dlpar_free_cc_nodes(dn);
-		rc = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
 	rc = dlpar_attach_node(dn);
 	if (rc) {
 		dlpar_release_drc(drc_index);
 		dlpar_free_cc_nodes(dn);
-		goto out;
+		return rc;
 	}
 
 	rc = dlpar_online_cpu(dn);
-out:
-	cpu_hotplug_driver_unlock();
+	if (rc)
+		return rc;
 
-	return rc ? rc : count;
+	return count;
 }
 
 static int dlpar_offline_cpu(struct device_node *dn)
@@ -516,30 +508,27 @@
 		return -EINVAL;
 	}
 
-	cpu_hotplug_driver_lock();
 	rc = dlpar_offline_cpu(dn);
 	if (rc) {
 		of_node_put(dn);
-		rc = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
 	rc = dlpar_release_drc(*drc_index);
 	if (rc) {
 		of_node_put(dn);
-		goto out;
+		return rc;
 	}
 
 	rc = dlpar_detach_node(dn);
 	if (rc) {
 		dlpar_acquire_drc(*drc_index);
-		goto out;
+		return rc;
 	}
 
 	of_node_put(dn);
-out:
-	cpu_hotplug_driver_unlock();
-	return rc ? rc : count;
+
+	return count;
 }
 
 static int __init pseries_dlpar_init(void)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 057fc89..7bfaf58 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -31,7 +31,7 @@
 #define NVRW_CNT 0x20
 
 /*
- * Set oops header version to distingush between old and new format header.
+ * Set oops header version to distinguish between old and new format header.
  * lnx,oops-log partition max size is 4000, header version > 4000 will
  * help in identifying new header.
  */
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 5f997e7..16a2552 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -106,7 +106,7 @@
 	atomic_set(&suspend_data.done, 0);
 	atomic_set(&suspend_data.error, 0);
 	suspend_data.complete = &suspend_work;
-	INIT_COMPLETION(suspend_work);
+	reinit_completion(&suspend_work);
 	return 0;
 }
 
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 2103963..4dfd61d 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -45,7 +45,7 @@
 	u8 hdr_type;
 
 	/* if we aren't a PCIe don't bother */
-	if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
+	if (!pci_is_pcie(dev))
 		return;
 
 	/* if we aren't in host mode don't bother */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f75d7e5..314fced 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -141,7 +141,6 @@
 	select OLD_SIGACTION
 	select OLD_SIGSUSPEND3
 	select SYSCTL_EXCEPTION_TRACE
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select VIRT_CPU_ACCOUNTING
 	select VIRT_TO_BUS
 
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e87ecaa..d5bc375 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -38,13 +38,6 @@
 	struct sca_entry cpu[64];
 } __attribute__((packed));
 
-#define KVM_NR_PAGE_SIZES 2
-#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8)
-#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
-#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
-#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
-#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
-
 #define CPUSTAT_STOPPED    0x80000000
 #define CPUSTAT_WAIT       0x10000000
 #define CPUSTAT_ECALL_PEND 0x08000000
@@ -220,7 +213,6 @@
 /* for local_interrupt.action_flags */
 #define ACTION_STORE_ON_STOP		(1<<0)
 #define ACTION_STOP_ON_STOP		(1<<1)
-#define ACTION_RELOADVCPU_ON_STOP	(1<<2)
 
 struct kvm_s390_local_interrupt {
 	spinlock_t lock;
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 9249449..c286c2e 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -82,4 +82,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 5a3ab5c..6e24429 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -49,7 +49,7 @@
 	__u32 gprs_high[NUM_GPRS];
 } rt_sigframe32;
 
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 {
 	int err;
 
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 59a9c35..bc71a7b 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -680,7 +680,7 @@
 	case KPROBE_HIT_SSDONE:
 		/*
 		 * We increment the nmissed count for accounting,
-		 * we can also use npre/npostfault count for accouting
+		 * we can also use npre/npostfault count for accounting
 		 * these specific fault cases.
 		 */
 		kprobes_inc_nmissed_count(p);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 7845e15..b89b591 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -50,7 +50,7 @@
 	if (PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
 	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-				    GFP_KERNEL, PAGE_KERNEL, -1,
+				    GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
 				    __builtin_return_address(0));
 }
 #endif
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 3a74d8a..78d967f 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -107,14 +107,13 @@
 
 static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
 {
-	int ret, idx;
+	int ret;
 
 	/* No virtio-ccw notification? Get out quickly. */
 	if (!vcpu->kvm->arch.css_support ||
 	    (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
 		return -EOPNOTSUPP;
 
-	idx = srcu_read_lock(&vcpu->kvm->srcu);
 	/*
 	 * The layout is as follows:
 	 * - gpr 2 contains the subchannel id (passed as addr)
@@ -125,7 +124,6 @@
 				      vcpu->run->s.regs.gprs[2],
 				      8, &vcpu->run->s.regs.gprs[3],
 				      vcpu->run->s.regs.gprs[4]);
-	srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
 	/*
 	 * Return cookie in gpr 2, but don't overwrite the register if the
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 99d789e..374a439 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,20 +18,27 @@
 #include <asm/uaccess.h>
 #include "kvm-s390.h"
 
+/* Convert real to absolute address by applying the prefix of the CPU */
+static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+						 unsigned long gaddr)
+{
+	unsigned long prefix  = vcpu->arch.sie_block->prefix;
+	if (gaddr < 2 * PAGE_SIZE)
+		gaddr += prefix;
+	else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE)
+		gaddr -= prefix;
+	return gaddr;
+}
+
 static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
 					  void __user *gptr,
 					  int prefixing)
 {
-	unsigned long prefix  = vcpu->arch.sie_block->prefix;
 	unsigned long gaddr = (unsigned long) gptr;
 	unsigned long uaddr;
 
-	if (prefixing) {
-		if (gaddr < 2 * PAGE_SIZE)
-			gaddr += prefix;
-		else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
-			gaddr -= prefix;
-	}
+	if (prefixing)
+		gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
 	uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
 	if (IS_ERR_VALUE(uaddr))
 		uaddr = -EFAULT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 5ee56e5..5ddbbde 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -62,12 +62,6 @@
 
 	trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
 
-	if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
-		vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
-		rc = SIE_INTERCEPT_RERUNVCPU;
-		vcpu->run->exit_reason = KVM_EXIT_INTR;
-	}
-
 	if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
 		atomic_set_mask(CPUSTAT_STOPPED,
 				&vcpu->arch.sie_block->cpuflags);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 7f1f7ac..5f79d2d 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -436,6 +436,7 @@
 	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
 	VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
 no_timer:
+	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 	spin_lock(&vcpu->arch.local_int.float_int->lock);
 	spin_lock_bh(&vcpu->arch.local_int.lock);
 	add_wait_queue(&vcpu->wq, &wait);
@@ -455,6 +456,8 @@
 	remove_wait_queue(&vcpu->wq, &wait);
 	spin_unlock_bh(&vcpu->arch.local_int.lock);
 	spin_unlock(&vcpu->arch.local_int.float_int->lock);
+	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
 	hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
 	return 0;
 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ed8064c..569494e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -695,9 +695,9 @@
 	return 0;
 }
 
-static int __vcpu_run(struct kvm_vcpu *vcpu)
+static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 {
-	int rc;
+	int rc, cpuflags;
 
 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
 
@@ -715,28 +715,24 @@
 		return rc;
 
 	vcpu->arch.sie_block->icptcode = 0;
-	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
-		   atomic_read(&vcpu->arch.sie_block->cpuflags));
-	trace_kvm_s390_sie_enter(vcpu,
-				 atomic_read(&vcpu->arch.sie_block->cpuflags));
+	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
+	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
+	trace_kvm_s390_sie_enter(vcpu, cpuflags);
 
-	/*
-	 * As PF_VCPU will be used in fault handler, between guest_enter
-	 * and guest_exit should be no uaccess.
-	 */
-	preempt_disable();
-	kvm_guest_enter();
-	preempt_enable();
-	rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
-	kvm_guest_exit();
+	return 0;
+}
+
+static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
+{
+	int rc;
 
 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
 		   vcpu->arch.sie_block->icptcode);
 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
 
-	if (rc > 0)
+	if (exit_reason >= 0) {
 		rc = 0;
-	if (rc < 0) {
+	} else {
 		if (kvm_is_ucontrol(vcpu->kvm)) {
 			rc = SIE_INTERCEPT_UCONTROL;
 		} else {
@@ -747,6 +743,49 @@
 	}
 
 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
+
+	if (rc == 0) {
+		if (kvm_is_ucontrol(vcpu->kvm))
+			rc = -EOPNOTSUPP;
+		else
+			rc = kvm_handle_sie_intercept(vcpu);
+	}
+
+	return rc;
+}
+
+static int __vcpu_run(struct kvm_vcpu *vcpu)
+{
+	int rc, exit_reason;
+
+	/*
+	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
+	 * ning the guest), so that memslots (and other stuff) are protected
+	 */
+	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+	do {
+		rc = vcpu_pre_run(vcpu);
+		if (rc)
+			break;
+
+		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+		/*
+		 * As PF_VCPU will be used in fault handler, between
+		 * guest_enter and guest_exit should be no uaccess.
+		 */
+		preempt_disable();
+		kvm_guest_enter();
+		preempt_enable();
+		exit_reason = sie64a(vcpu->arch.sie_block,
+				     vcpu->run->s.regs.gprs);
+		kvm_guest_exit();
+		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+		rc = vcpu_post_run(vcpu, exit_reason);
+	} while (!signal_pending(current) && !rc);
+
+	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 	return rc;
 }
 
@@ -755,7 +794,6 @@
 	int rc;
 	sigset_t sigsaved;
 
-rerun_vcpu:
 	if (vcpu->sigset_active)
 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
@@ -788,19 +826,7 @@
 	}
 
 	might_fault();
-
-	do {
-		rc = __vcpu_run(vcpu);
-		if (rc)
-			break;
-		if (kvm_is_ucontrol(vcpu->kvm))
-			rc = -EOPNOTSUPP;
-		else
-			rc = kvm_handle_sie_intercept(vcpu);
-	} while (!signal_pending(current) && !rc);
-
-	if (rc == SIE_INTERCEPT_RERUNVCPU)
-		goto rerun_vcpu;
+	rc = __vcpu_run(vcpu);
 
 	if (signal_pending(current) && !rc) {
 		kvm_run->exit_reason = KVM_EXIT_INTR;
@@ -958,6 +984,7 @@
 {
 	struct kvm_vcpu *vcpu = filp->private_data;
 	void __user *argp = (void __user *)arg;
+	int idx;
 	long r;
 
 	switch (ioctl) {
@@ -971,7 +998,9 @@
 		break;
 	}
 	case KVM_S390_STORE_STATUS:
+		idx = srcu_read_lock(&vcpu->kvm->srcu);
 		r = kvm_s390_vcpu_store_status(vcpu, arg);
+		srcu_read_unlock(&vcpu->kvm->srcu, idx);
 		break;
 	case KVM_S390_SET_INITIAL_PSW: {
 		psw_t psw;
@@ -1067,12 +1096,13 @@
 	return VM_FAULT_SIGBUS;
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 			   struct kvm_memory_slot *dont)
 {
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			    unsigned long npages)
 {
 	return 0;
 }
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index dc99f1c..b44912a 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -28,8 +28,7 @@
 extern unsigned long *vfacilities;
 
 /* negativ values are error codes, positive values for internal conditions */
-#define SIE_INTERCEPT_RERUNVCPU		(1<<0)
-#define SIE_INTERCEPT_UCONTROL		(1<<1)
+#define SIE_INTERCEPT_UCONTROL		(1<<0)
 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
 
 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
@@ -91,8 +90,10 @@
 
 static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
 {
-	*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
-	*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
+	if (r1)
+		*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
+	if (r2)
+		*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
 }
 
 static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 59200ee..2440602 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -30,6 +30,38 @@
 #include "kvm-s390.h"
 #include "trace.h"
 
+/* Handle SCK (SET CLOCK) interception */
+static int handle_set_clock(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu *cpup;
+	s64 hostclk, val;
+	u64 op2;
+	int i;
+
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+	op2 = kvm_s390_get_base_disp_s(vcpu);
+	if (op2 & 7)	/* Operand must be on a doubleword boundary */
+		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+	if (get_guest(vcpu, val, (u64 __user *) op2))
+		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+
+	if (store_tod_clock(&hostclk)) {
+		kvm_s390_set_psw_cc(vcpu, 3);
+		return 0;
+	}
+	val = (val - hostclk) & ~0x3fUL;
+
+	mutex_lock(&vcpu->kvm->lock);
+	kvm_for_each_vcpu(i, cpup, vcpu->kvm)
+		cpup->arch.sie_block->epoch = val;
+	mutex_unlock(&vcpu->kvm->lock);
+
+	kvm_s390_set_psw_cc(vcpu, 0);
+	return 0;
+}
+
 static int handle_set_prefix(struct kvm_vcpu *vcpu)
 {
 	u64 operand2;
@@ -128,6 +160,33 @@
 	return 0;
 }
 
+static int handle_test_block(struct kvm_vcpu *vcpu)
+{
+	unsigned long hva;
+	gpa_t addr;
+	int reg2;
+
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
+	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+	addr = kvm_s390_real_to_abs(vcpu, addr);
+
+	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
+	if (kvm_is_error_hva(hva))
+		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+	/*
+	 * We don't expect errors on modern systems, and do not care
+	 * about storage keys (yet), so let's just clear the page.
+	 */
+	if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
+		return -EFAULT;
+	kvm_s390_set_psw_cc(vcpu, 0);
+	vcpu->run->s.regs.gprs[0] = 0;
+	return 0;
+}
+
 static int handle_tpi(struct kvm_vcpu *vcpu)
 {
 	struct kvm_s390_interrupt_info *inti;
@@ -438,12 +497,14 @@
 
 static const intercept_handler_t b2_handlers[256] = {
 	[0x02] = handle_stidp,
+	[0x04] = handle_set_clock,
 	[0x10] = handle_set_prefix,
 	[0x11] = handle_store_prefix,
 	[0x12] = handle_store_cpu_address,
 	[0x29] = handle_skey,
 	[0x2a] = handle_skey,
 	[0x2b] = handle_skey,
+	[0x2c] = handle_test_block,
 	[0x30] = handle_io_inst,
 	[0x31] = handle_io_inst,
 	[0x32] = handle_io_inst,
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 6bcb045..9b436c2 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -64,6 +64,11 @@
 	return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
 }
 
+static unsigned long mmap_base_legacy(void)
+{
+	return TASK_UNMAPPED_BASE + mmap_rnd();
+}
+
 static inline unsigned long mmap_base(void)
 {
 	unsigned long gap = rlimit(RLIMIT_STACK);
@@ -89,7 +94,7 @@
 	 * bit is set, or if the expected stack growth is unlimited:
 	 */
 	if (mmap_is_legacy()) {
-		mm->mmap_base = TASK_UNMAPPED_BASE;
+		mm->mmap_base = mmap_base_legacy();
 		mm->get_unmapped_area = arch_get_unmapped_area;
 	} else {
 		mm->mmap_base = mmap_base();
@@ -164,7 +169,7 @@
 	 * bit is set, or if the expected stack growth is unlimited:
 	 */
 	if (mmap_is_legacy()) {
-		mm->mmap_base = TASK_UNMAPPED_BASE;
+		mm->mmap_base = mmap_base_legacy();
 		mm->get_unmapped_area = s390_get_unmapped_area;
 	} else {
 		mm->mmap_base = mmap_base();
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0a2e5e0..e794c88 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -772,7 +772,11 @@
 		__free_page(page);
 		return NULL;
 	}
-	pgtable_page_ctor(page);
+	if (!pgtable_page_ctor(page)) {
+		kfree(mp);
+		__free_page(page);
+		return NULL;
+	}
 	mp->vmaddr = vmaddr & PMD_MASK;
 	INIT_LIST_HEAD(&mp->mapper);
 	page->index = (unsigned long) mp;
@@ -902,7 +906,10 @@
 		page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
 		if (!page)
 			return NULL;
-		pgtable_page_ctor(page);
+		if (!pgtable_page_ctor(page)) {
+			__free_page(page);
+			return NULL;
+		}
 		atomic_set(&page->_mapcount, 1);
 		table = (unsigned long *) page_to_phys(page);
 		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
@@ -1244,11 +1251,11 @@
 	assert_spin_locked(&mm->page_table_lock);
 
 	/* FIFO */
-	if (!mm->pmd_huge_pte)
+	if (!pmd_huge_pte(mm, pmdp))
 		INIT_LIST_HEAD(lh);
 	else
-		list_add(lh, (struct list_head *) mm->pmd_huge_pte);
-	mm->pmd_huge_pte = pgtable;
+		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+	pmd_huge_pte(mm, pmdp) = pgtable;
 }
 
 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
@@ -1260,12 +1267,12 @@
 	assert_spin_locked(&mm->page_table_lock);
 
 	/* FIFO */
-	pgtable = mm->pmd_huge_pte;
+	pgtable = pmd_huge_pte(mm, pmdp);
 	lh = (struct list_head *) pgtable;
 	if (list_empty(lh))
-		mm->pmd_huge_pte = NULL;
+		pmd_huge_pte(mm, pmdp) = NULL;
 	else {
-		mm->pmd_huge_pte = (pgtable_t) lh->next;
+		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
 		list_del(lh);
 	}
 	ptep = (pte_t *) pgtable;
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 716b3fd..2e06765 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -54,9 +54,12 @@
 	struct page *pte;
 
 	pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
-	if (pte) {
-		clear_highpage(pte);
-		pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	clear_highpage(pte);
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
 	}
 	return pte;
 }
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 224f4bc..9b0979f 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,5 +1,6 @@
 config SUPERH
 	def_bool y
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select EXPERT
 	select CLKDEV_LOOKUP
 	select HAVE_IDE if HAS_IOPORT
@@ -711,7 +712,6 @@
 config SMP
 	bool "Symmetric multi-processing support"
 	depends on SYS_SUPPORTS_SMP
-	select USE_GENERIC_SMP_HELPERS
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 06c4281..09fc2bc 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -46,7 +46,7 @@
 		save_fpu(tsk);
 		release_fpu(regs);
 	} else
-		tsk->fpu_counter = 0;
+		tsk->thread.fpu_counter = 0;
 }
 
 static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 21c5088..b9d9489 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -81,7 +81,7 @@
 
 		/*
 		 * Fix version; Note that we avoid version #0
-		 * to distingush NO_CONTEXT.
+		 * to distinguish NO_CONTEXT.
 		 */
 		if (!asid)
 			asid = MMU_CONTEXT_FIRST_VERSION;
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 8c00785..a33673b 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -47,7 +47,10 @@
 	if (!pg)
 		return NULL;
 	page = virt_to_page(pg);
-	pgtable_page_ctor(page);
+	if (!pgtable_page_ctor(page)) {
+		quicklist_free(QUICK_PT, NULL, pg);
+		return NULL;
+	}
 	return page;
 }
 
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index e699a12..18e0377 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -111,6 +111,16 @@
 
 	/* Extended processor state */
 	union thread_xstate *xstate;
+
+	/*
+	 * fpu_counter contains the number of consecutive context switches
+	 * that the FPU is used. If this is over a threshold, the lazy fpu
+	 * saving becomes unlazy to save the trap. This is an unsigned char
+	 * so that after 256 times the counter wraps and the behavior turns
+	 * lazy again; this to deal with bursty apps that only use FPU for
+	 * a short time
+	 */
+	unsigned char fpu_counter;
 };
 
 #define INIT_THREAD  {						\
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 1cc7d31..eedd4f6 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -126,6 +126,16 @@
 
 	/* floating point info */
 	union thread_xstate *xstate;
+
+	/*
+	 * fpu_counter contains the number of consecutive context switches
+	 * that the FPU is used. If this is over a threshold, the lazy fpu
+	 * saving becomes unlazy to save the trap. This is an unsigned char
+	 * so that after 256 times the counter wraps and the behavior turns
+	 * lazy again; this to deal with bursty apps that only use FPU for
+	 * a short time
+	 */
+	unsigned char fpu_counter;
 };
 
 #define INIT_MMAP \
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
index f8f7af5..4e33224 100644
--- a/arch/sh/kernel/cpu/fpu.c
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -44,7 +44,7 @@
 	restore_fpu(tsk);
 
 	task_thread_info(tsk)->status |= TS_USEDFPU;
-	tsk->fpu_counter++;
+	tsk->thread.fpu_counter++;
 }
 
 void fpu_state_restore(struct pt_regs *regs)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index ebd3933..2885fc9 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -156,7 +156,7 @@
 #endif
 		ti->addr_limit = KERNEL_DS;
 		ti->status &= ~TS_USEDFPU;
-		p->fpu_counter = 0;
+		p->thread.fpu_counter = 0;
 		return 0;
 	}
 	*childregs = *current_pt_regs();
@@ -189,7 +189,7 @@
 	unlazy_fpu(prev, task_pt_regs(prev));
 
 	/* we're going to use this soon, after a few expensive things */
-	if (next->fpu_counter > 5)
+	if (next->thread.fpu_counter > 5)
 		prefetch(next_t->xstate);
 
 #ifdef CONFIG_MMU
@@ -207,7 +207,7 @@
 	 * restore of the math state immediately to avoid the trap; the
 	 * chances of needing FPU soon are obviously high now
 	 */
-	if (next->fpu_counter > 5)
+	if (next->thread.fpu_counter > 5)
 		__fpu_state_restore();
 
 	return prev;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 174d124b..e2062e6 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -374,7 +374,7 @@
 int copy_thread(unsigned long clone_flags, unsigned long usp,
 		unsigned long arg, struct task_struct *p)
 {
-	struct pt_regs *childregs, *regs = current_pt_regs();
+	struct pt_regs *childregs;
 
 #ifdef CONFIG_SH_FPU
 	/* can't happen for a kernel thread */
@@ -393,7 +393,7 @@
 	if (unlikely(p->flags & PF_KTHREAD)) {
 		memset(childregs, 0, sizeof(struct pt_regs));
 		childregs->regs[2] = (unsigned long)arg;
-		childregs->regs[3] = (unsigned long)fn;
+		childregs->regs[3] = (unsigned long)usp;
 		childregs->sr = (1 << 30); /* not user_mode */
 		childregs->sr |= SR_FD; /* Invalidate FPU flag */
 		p->thread.pc = (unsigned long) ret_from_kernel_thread;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 33890fd..2d089fe 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -231,7 +231,7 @@
 	if (!p->node_spanned_pages)
 		return;
 
-	end_pfn = p->node_start_pfn + p->node_spanned_pages;
+	end_pfn = pgdat_end_pfn(p);
 
 	total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
 
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 78c4fdb..d4f7a6a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -12,6 +12,7 @@
 config SPARC
 	bool
 	default y
+	select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
 	select OF
 	select OF_PROMTREE
 	select HAVE_IDE
@@ -28,7 +29,6 @@
 	select HAVE_ARCH_JUMP_LABEL
 	select GENERIC_IRQ_SHOW
 	select ARCH_WANT_IPC_PARSE_VERSION
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select GENERIC_PCI_IOMAP
 	select HAVE_NMI_WATCHDOG if SPARC64
 	select HAVE_BPF_JIT
@@ -64,6 +64,7 @@
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_SYSCALL_TRACEPOINTS
+	select HAVE_CONTEXT_TRACKING
 	select HAVE_DEBUG_KMEMLEAK
 	select RTC_DRV_CMOS
 	select RTC_DRV_BQ4802
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index 76092c4..f668797 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -93,7 +93,6 @@
 	spinlock_t		lock;
 	unsigned long		sparc64_ctx_val;
 	unsigned long		huge_pte_count;
-	struct page		*pgtable_page;
 	struct tsb_config	tsb_block[MM_NUM_TSBS];
 	struct hv_tsb_descr	tsb_descr[MM_NUM_TSBS];
 } mm_context_t;
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index e155388..aac53fc 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -15,7 +15,10 @@
 #define DCACHE_ALIASING_POSSIBLE
 #endif
 
-#define HPAGE_SHIFT		22
+#define HPAGE_SHIFT		23
+#define REAL_HPAGE_SHIFT	22
+
+#define REAL_HPAGE_SIZE		(_AC(1,UL) << REAL_HPAGE_SHIFT)
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 #define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
@@ -53,8 +56,8 @@
 /* These are used to make use of C type-checking.. */
 typedef struct { unsigned long pte; } pte_t;
 typedef struct { unsigned long iopte; } iopte_t;
-typedef struct { unsigned int pmd; } pmd_t;
-typedef struct { unsigned int pgd; } pgd_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
 
 #define pte_val(x)	((x).pte)
@@ -73,8 +76,8 @@
 /* .. while these make it easier on the compiler */
 typedef unsigned long pte_t;
 typedef unsigned long iopte_t;
-typedef unsigned int pmd_t;
-typedef unsigned int pgd_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
 typedef unsigned long pgprot_t;
 
 #define pte_val(x)	(x)
@@ -93,18 +96,44 @@
 
 typedef pte_t *pgtable_t;
 
+/* These two values define the virtual address space range in which we
+ * must forbid 64-bit user processes from making mappings.  It used to
+ * represent precisely the virtual address space hole present in most
+ * early sparc64 chips including UltraSPARC-I.  But now it also is
+ * further constrained by the limits of our page tables, which is
+ * 43-bits of virtual address.
+ */
+#define SPARC64_VA_HOLE_TOP	_AC(0xfffffc0000000000,UL)
+#define SPARC64_VA_HOLE_BOTTOM	_AC(0x0000040000000000,UL)
+
+/* The next two defines specify the actual exclusion region we
+ * enforce, wherein we use a 4GB red zone on each side of the VA hole.
+ */
+#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL))
+#define VA_EXCLUDE_END   (SPARC64_VA_HOLE_TOP + (1UL << 32UL))
+
 #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_32BIT) ? \
-				 (_AC(0x0000000070000000,UL)) : \
-				 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
+				 _AC(0x0000000070000000,UL) : \
+				 VA_EXCLUDE_END)
 
 #include <asm-generic/memory_model.h>
 
+#define PAGE_OFFSET_BY_BITS(X)	(-(_AC(1,UL) << (X)))
+extern unsigned long PAGE_OFFSET;
+
 #endif /* !(__ASSEMBLY__) */
 
-/* We used to stick this into a hard-coded global register (%g4)
- * but that does not make sense anymore.
+/* The maximum number of physical memory address bits we support, this
+ * is used to size various tables used to manage kernel TLB misses and
+ * also the sparsemem code.
  */
-#define PAGE_OFFSET		_AC(0xFFFFF80000000000,UL)
+#define MAX_PHYS_ADDRESS_BITS	47
+
+/* These two shift counts are used when indexing sparc64_valid_addr_bitmap
+ * and kpte_linear_bitmap.
+ */
+#define ILOG2_4MB		22
+#define ILOG2_256MB		28
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 3676031..8358dc1 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -48,18 +48,18 @@
 /* PMD_SHIFT determines the size of the area a second-level page
  * table can map
  */
-#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-4))
+#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
 #define PMD_SIZE	(_AC(1,UL) << PMD_SHIFT)
 #define PMD_MASK	(~(PMD_SIZE-1))
-#define PMD_BITS	(PAGE_SHIFT - 2)
+#define PMD_BITS	(PAGE_SHIFT - 3)
 
 /* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-4) + PMD_BITS)
+#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
 #define PGDIR_SIZE	(_AC(1,UL) << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
-#define PGDIR_BITS	(PAGE_SHIFT - 2)
+#define PGDIR_BITS	(PAGE_SHIFT - 3)
 
-#if (PGDIR_SHIFT + PGDIR_BITS) != 44
+#if (PGDIR_SHIFT + PGDIR_BITS) != 43
 #error Page table parameters do not cover virtual address space properly.
 #endif
 
@@ -67,35 +67,12 @@
 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
 #endif
 
-/* PMDs point to PTE tables which are 4K aligned.  */
-#define PMD_PADDR	_AC(0xfffffffe,UL)
-#define PMD_PADDR_SHIFT	_AC(11,UL)
-
-#define PMD_ISHUGE	_AC(0x00000001,UL)
-
-/* This is the PMD layout when PMD_ISHUGE is set.  With 4MB huge
- * pages, this frees up a bunch of bits in the layout that we can
- * use for the protection settings and software metadata.
- */
-#define PMD_HUGE_PADDR		_AC(0xfffff800,UL)
-#define PMD_HUGE_PROTBITS	_AC(0x000007ff,UL)
-#define PMD_HUGE_PRESENT	_AC(0x00000400,UL)
-#define PMD_HUGE_WRITE		_AC(0x00000200,UL)
-#define PMD_HUGE_DIRTY		_AC(0x00000100,UL)
-#define PMD_HUGE_ACCESSED	_AC(0x00000080,UL)
-#define PMD_HUGE_EXEC		_AC(0x00000040,UL)
-#define PMD_HUGE_SPLITTING	_AC(0x00000020,UL)
-
-/* PGDs point to PMD tables which are 8K aligned.  */
-#define PGD_PADDR	_AC(0xfffffffc,UL)
-#define PGD_PADDR_SHIFT	_AC(11,UL)
-
 #ifndef __ASSEMBLY__
 
 #include <linux/sched.h>
 
 /* Entries per page directory level. */
-#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-4))
+#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
 #define PTRS_PER_PMD	(1UL << PMD_BITS)
 #define PTRS_PER_PGD	(1UL << PGDIR_BITS)
 
@@ -112,6 +89,7 @@
 #define _PAGE_VALID	  _AC(0x8000000000000000,UL) /* Valid TTE            */
 #define _PAGE_R	  	  _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
 #define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
+#define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
 
 /* Advertise support for _PAGE_SPECIAL */
 #define __HAVE_ARCH_PTE_SPECIAL
@@ -125,6 +103,7 @@
 #define _PAGE_IE_4U	  _AC(0x0800000000000000,UL) /* Invert Endianness    */
 #define _PAGE_SOFT2_4U	  _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
 #define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
+#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
 #define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
 #define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
@@ -155,6 +134,7 @@
 #define _PAGE_READ_4V	  _AC(0x0800000000000000,UL) /* Readable SW Bit      */
 #define _PAGE_WRITE_4V	  _AC(0x0400000000000000,UL) /* Writable SW Bit      */
 #define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
+#define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
 #define _PAGE_PADDR_4V	  _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
 #define _PAGE_IE_4V	  _AC(0x0000000000001000,UL) /* Invert Endianness    */
 #define _PAGE_E_4V	  _AC(0x0000000000000800,UL) /* side-Effect          */
@@ -180,6 +160,10 @@
 #define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
 #define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
 
+#if REAL_HPAGE_SHIFT != 22
+#error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
+#endif
+
 #define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
 #define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
 
@@ -239,16 +223,13 @@
 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot);
-#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
-
-extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
-
-static inline pmd_t pmd_mkhuge(pmd_t pmd)
+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 {
-	/* Do nothing, mk_pmd() does this part.  */
-	return pmd;
+	pte_t pte = pfn_pte(page_nr, pgprot);
+
+	return __pmd(pte_val(pte));
 }
+#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
 #endif
 
 /* This one can be done with two shifts.  */
@@ -309,14 +290,25 @@
 	: "=r" (mask), "=r" (tmp)
 	: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
 	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
-	       _PAGE_SPECIAL),
+	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
 	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
 	       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
-	       _PAGE_SPECIAL));
+	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
 
 	return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+	pte_t pte = __pte(pmd_val(pmd));
+
+	pte = pte_modify(pte, newprot);
+
+	return __pmd(pte_val(pte));
+}
+#endif
+
 static inline pte_t pgoff_to_pte(unsigned long off)
 {
 	off <<= PAGE_SHIFT;
@@ -357,7 +349,7 @@
  */
 #define pgprot_noncached pgprot_noncached
 
-#ifdef CONFIG_HUGETLB_PAGE
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 static inline pte_t pte_mkhuge(pte_t pte)
 {
 	unsigned long mask;
@@ -375,6 +367,17 @@
 
 	return __pte(pte_val(pte) | mask);
 }
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+	pte_t pte = __pte(pmd_val(pmd));
+
+	pte = pte_mkhuge(pte);
+	pte_val(pte) |= _PAGE_PMD_HUGE;
+
+	return __pmd(pte_val(pte));
+}
+#endif
 #endif
 
 static inline pte_t pte_mkdirty(pte_t pte)
@@ -626,91 +629,130 @@
 	return pte_val(pte) & _PAGE_SPECIAL;
 }
 
-static inline int pmd_large(pmd_t pmd)
+static inline unsigned long pmd_large(pmd_t pmd)
 {
-	return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
-		(PMD_ISHUGE | PMD_HUGE_PRESENT);
+	pte_t pte = __pte(pmd_val(pmd));
+
+	return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmd_young(pmd_t pmd)
+static inline unsigned long pmd_young(pmd_t pmd)
 {
-	return pmd_val(pmd) & PMD_HUGE_ACCESSED;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	return pte_young(pte);
 }
 
-static inline int pmd_write(pmd_t pmd)
+static inline unsigned long pmd_write(pmd_t pmd)
 {
-	return pmd_val(pmd) & PMD_HUGE_WRITE;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	return pte_write(pte);
 }
 
 static inline unsigned long pmd_pfn(pmd_t pmd)
 {
-	unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR;
+	pte_t pte = __pte(pmd_val(pmd));
 
-	return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);
+	return pte_pfn(pte);
 }
 
-static inline int pmd_trans_splitting(pmd_t pmd)
+static inline unsigned long pmd_trans_huge(pmd_t pmd)
 {
-	return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) ==
-		(PMD_ISHUGE|PMD_HUGE_SPLITTING);
+	pte_t pte = __pte(pmd_val(pmd));
+
+	return pte_val(pte) & _PAGE_PMD_HUGE;
 }
 
-static inline int pmd_trans_huge(pmd_t pmd)
+static inline unsigned long pmd_trans_splitting(pmd_t pmd)
 {
-	return pmd_val(pmd) & PMD_ISHUGE;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	return pmd_trans_huge(pmd) && pte_special(pte);
 }
 
 #define has_transparent_hugepage() 1
 
 static inline pmd_t pmd_mkold(pmd_t pmd)
 {
-	pmd_val(pmd) &= ~PMD_HUGE_ACCESSED;
-	return pmd;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	pte = pte_mkold(pte);
+
+	return __pmd(pte_val(pte));
 }
 
 static inline pmd_t pmd_wrprotect(pmd_t pmd)
 {
-	pmd_val(pmd) &= ~PMD_HUGE_WRITE;
-	return pmd;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	pte = pte_wrprotect(pte);
+
+	return __pmd(pte_val(pte));
 }
 
 static inline pmd_t pmd_mkdirty(pmd_t pmd)
 {
-	pmd_val(pmd) |= PMD_HUGE_DIRTY;
-	return pmd;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	pte = pte_mkdirty(pte);
+
+	return __pmd(pte_val(pte));
 }
 
 static inline pmd_t pmd_mkyoung(pmd_t pmd)
 {
-	pmd_val(pmd) |= PMD_HUGE_ACCESSED;
-	return pmd;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	pte = pte_mkyoung(pte);
+
+	return __pmd(pte_val(pte));
 }
 
 static inline pmd_t pmd_mkwrite(pmd_t pmd)
 {
-	pmd_val(pmd) |= PMD_HUGE_WRITE;
-	return pmd;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	pte = pte_mkwrite(pte);
+
+	return __pmd(pte_val(pte));
 }
 
 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 {
-	pmd_val(pmd) &= ~PMD_HUGE_PRESENT;
+	unsigned long mask;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_PRESENT_4V;
+	else
+		mask = _PAGE_PRESENT_4U;
+
+	pmd_val(pmd) &= ~mask;
+
 	return pmd;
 }
 
 static inline pmd_t pmd_mksplitting(pmd_t pmd)
 {
-	pmd_val(pmd) |= PMD_HUGE_SPLITTING;
-	return pmd;
+	pte_t pte = __pte(pmd_val(pmd));
+
+	pte = pte_mkspecial(pte);
+
+	return __pmd(pte_val(pte));
 }
 
-extern pgprot_t pmd_pgprot(pmd_t entry);
+static inline pgprot_t pmd_pgprot(pmd_t entry)
+{
+	unsigned long val = pmd_val(entry);
+
+	return __pgprot(val);
+}
 #endif
 
 static inline int pmd_present(pmd_t pmd)
 {
-	return pmd_val(pmd) != 0U;
+	return pmd_val(pmd) != 0UL;
 }
 
 #define pmd_none(pmd)			(!pmd_val(pmd))
@@ -728,33 +770,32 @@
 
 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
 {
-	unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT;
+	unsigned long val = __pa((unsigned long) (ptep));
 
 	pmd_val(*pmdp) = val;
 }
 
 #define pud_set(pudp, pmdp)	\
-	(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT))
+	(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
 static inline unsigned long __pmd_page(pmd_t pmd)
 {
-	unsigned long paddr = (unsigned long) pmd_val(pmd);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	if (pmd_val(pmd) & PMD_ISHUGE)
-		paddr &= PMD_HUGE_PADDR;
-#endif
-	paddr <<= PMD_PADDR_SHIFT;
-	return ((unsigned long) __va(paddr));
+	pte_t pte = __pte(pmd_val(pmd));
+	unsigned long pfn;
+
+	pfn = pte_pfn(pte);
+
+	return ((unsigned long) __va(pfn << PAGE_SHIFT));
 }
 #define pmd_page(pmd) 			virt_to_page((void *)__pmd_page(pmd))
 #define pud_page_vaddr(pud)		\
-	((unsigned long) __va((((unsigned long)pud_val(pud))<<PGD_PADDR_SHIFT)))
+	((unsigned long) __va(pud_val(pud)))
 #define pud_page(pud) 			virt_to_page((void *)pud_page_vaddr(pud))
 #define pmd_bad(pmd)			(0)
-#define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0U)
+#define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
 #define pud_none(pud)			(!pud_val(pud))
 #define pud_bad(pud)			(0)
 #define pud_present(pud)		(pud_val(pud) != 0U)
-#define pud_clear(pudp)			(pud_val(*(pudp)) = 0U)
+#define pud_clear(pudp)			(pud_val(*(pudp)) = 0UL)
 
 /* Same in both SUN4V and SUN4U.  */
 #define pte_none(pte) 			(!pte_val(pte))
@@ -789,7 +830,7 @@
 				       pmd_t *pmdp)
 {
 	pmd_t pmd = *pmdp;
-	set_pmd_at(mm, addr, pmdp, __pmd(0U));
+	set_pmd_at(mm, addr, pmdp, __pmd(0UL));
 	return pmd;
 }
 
@@ -837,8 +878,8 @@
 })
 #endif
 
-extern pgd_t swapper_pg_dir[2048];
-extern pmd_t swapper_low_pmd_dir[2048];
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
 
 extern void paging_init(void);
 extern unsigned long find_ecache_flush_span(unsigned long size);
diff --git a/arch/sparc/include/asm/sparsemem.h b/arch/sparc/include/asm/sparsemem.h
index b99d4e4..e5e1752 100644
--- a/arch/sparc/include/asm/sparsemem.h
+++ b/arch/sparc/include/asm/sparsemem.h
@@ -3,9 +3,11 @@
 
 #ifdef __KERNEL__
 
+#include <asm/page.h>
+
 #define SECTION_SIZE_BITS       30
-#define MAX_PHYSADDR_BITS       42
-#define MAX_PHYSMEM_BITS        42
+#define MAX_PHYSADDR_BITS       MAX_PHYS_ADDRESS_BITS
+#define MAX_PHYSMEM_BITS        MAX_PHYS_ADDRESS_BITS
 
 #endif /* !(__KERNEL__) */
 
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index d5e5042..5d9292a 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -192,7 +192,7 @@
 #define TIF_UNALIGNED		5	/* allowed to do unaligned accesses */
 /* flag bit 6 is available */
 #define TIF_32BIT		7	/* 32-bit binary */
-/* flag bit 8 is available */
+#define TIF_NOHZ		8	/* in adaptive nohz mode */
 #define TIF_SECCOMP		9	/* secure computing */
 #define TIF_SYSCALL_AUDIT	10	/* syscall auditing active */
 #define TIF_SYSCALL_TRACEPOINT	11	/* syscall tracepoint instrumentation */
@@ -210,6 +210,7 @@
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_UNALIGNED		(1<<TIF_UNALIGNED)
 #define _TIF_32BIT		(1<<TIF_32BIT)
+#define _TIF_NOHZ		(1<<TIF_NOHZ)
 #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index e696432..2230f80 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -142,98 +142,39 @@
 	or		REG1, %lo(swapper_pg_dir), REG1; \
 	sllx		VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
 	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
-	andn		REG2, 0x3, REG2; \
-	lduw		[REG1 + REG2], REG1; \
+	andn		REG2, 0x7, REG2; \
+	ldx		[REG1 + REG2], REG1; \
 	brz,pn		REG1, FAIL_LABEL; \
 	 sllx		VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
 	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
-	sllx		REG1, PGD_PADDR_SHIFT, REG1; \
-	andn		REG2, 0x3, REG2; \
-	lduwa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+	andn		REG2, 0x7, REG2; \
+	ldxa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
 	brz,pn		REG1, FAIL_LABEL; \
 	 sllx		VADDR, 64 - PMD_SHIFT, REG2; \
-	srlx		REG2, 64 - (PAGE_SHIFT - 1), REG2; \
-	sllx		REG1, PMD_PADDR_SHIFT, REG1; \
+	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
 	andn		REG2, 0x7, REG2; \
 	add		REG1, REG2, REG1;
 
-	/* These macros exists only to make the PMD translator below
-	 * easier to read.  It hides the ELF section switch for the
-	 * sun4v code patching.
-	 */
-#define OR_PTE_BIT_1INSN(REG, NAME)			\
-661:	or		REG, _PAGE_##NAME##_4U, REG;	\
-	.section	.sun4v_1insn_patch, "ax";	\
-	.word		661b;				\
-	or		REG, _PAGE_##NAME##_4V, REG;	\
-	.previous;
-
-#define OR_PTE_BIT_2INSN(REG, TMP, NAME)		\
-661:	sethi		%hi(_PAGE_##NAME##_4U), TMP;	\
-	or		REG, TMP, REG;			\
-	.section	.sun4v_2insn_patch, "ax";	\
-	.word		661b;				\
-	mov		-1, TMP;			\
-	or		REG, _PAGE_##NAME##_4V, REG;	\
-	.previous;
-
-	/* Load into REG the PTE value for VALID, CACHE, and SZHUGE.  */
-#define BUILD_PTE_VALID_SZHUGE_CACHE(REG)				   \
-661:	sethi		%uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG;		   \
-	.section	.sun4v_1insn_patch, "ax";			   \
-	.word		661b;						   \
-	sethi		%uhi(_PAGE_VALID), REG;				   \
-	.previous;							   \
-	sllx		REG, 32, REG;					   \
-661:	or		REG, _PAGE_CP_4U|_PAGE_CV_4U, REG;		   \
-	.section	.sun4v_1insn_patch, "ax";			   \
-	.word		661b;						   \
-	or		REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \
-	.previous;
-
 	/* PMD has been loaded into REG1, interpret the value, seeing
 	 * if it is a HUGE PMD or a normal one.  If it is not valid
 	 * then jump to FAIL_LABEL.  If it is a HUGE PMD, and it
 	 * translates to a valid PTE, branch to PTE_LABEL.
 	 *
-	 * We translate the PMD by hand, one bit at a time,
-	 * constructing the huge PTE.
-	 *
-	 * So we construct the PTE in REG2 as follows:
-	 *
-	 * 1) Extract the PMD PFN from REG1 and place it into REG2.
-	 *
-	 * 2) Translate PMD protection bits in REG1 into REG2, one bit
-	 *    at a time using andcc tests on REG1 and OR's into REG2.
-	 *
-	 *    Only two bits to be concerned with here, EXEC and WRITE.
-	 *    Now REG1 is freed up and we can use it as a temporary.
-	 *
-	 * 3) Construct the VALID, CACHE, and page size PTE bits in
-	 *    REG1, OR with REG2 to form final PTE.
+	 * We have to propagate the 4MB bit of the virtual address
+	 * because we are fabricating 8MB pages using 4MB hw pages.
 	 */
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
-	brz,pn		REG1, FAIL_LABEL;				      \
-	 andcc		REG1, PMD_ISHUGE, %g0;				      \
-	be,pt		%xcc, 700f;					      \
-	 and		REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2;	      \
-	cmp		REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED;	      \
-	bne,pn		%xcc, FAIL_LABEL;				      \
-	 andn		REG1, PMD_HUGE_PROTBITS, REG2;			      \
-	sllx		REG2, PMD_PADDR_SHIFT, REG2;			      \
-	/* REG2 now holds PFN << PAGE_SHIFT */				      \
-	andcc		REG1, PMD_HUGE_WRITE, %g0;			      \
-	bne,a,pt	%xcc, 1f;					      \
-	 OR_PTE_BIT_1INSN(REG2, W);					      \
-1:	andcc		REG1, PMD_HUGE_EXEC, %g0;			      \
-	be,pt		%xcc, 1f;					      \
-	 nop;								      \
-	OR_PTE_BIT_2INSN(REG2, REG1, EXEC);				      \
-	/* REG1 can now be clobbered, build final PTE */		      \
-1:	BUILD_PTE_VALID_SZHUGE_CACHE(REG1);				      \
-	ba,pt		%xcc, PTE_LABEL;				      \
-	 or		REG1, REG2, REG1;				      \
+	brz,pn		REG1, FAIL_LABEL;		\
+	 sethi		%uhi(_PAGE_PMD_HUGE), REG2;	\
+	sllx		REG2, 32, REG2;			\
+	andcc		REG1, REG2, %g0;		\
+	be,pt		%xcc, 700f;			\
+	 sethi		%hi(4 * 1024 * 1024), REG2;	\
+	andn		REG1, REG2, REG1;		\
+	and		VADDR, REG2, REG2;		\
+	brlz,pt		REG1, PTE_LABEL;		\
+	 or		REG1, REG2, REG1;		\
 700:
 #else
 #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
@@ -253,18 +194,16 @@
 #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL)	\
 	sllx		VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
 	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
-	andn		REG2, 0x3, REG2; \
-	lduwa		[PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
+	andn		REG2, 0x7, REG2; \
+	ldxa		[PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
 	brz,pn		REG1, FAIL_LABEL; \
 	 sllx		VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
 	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
-	sllx		REG1, PGD_PADDR_SHIFT, REG1; \
-	andn		REG2, 0x3, REG2; \
-	lduwa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+	andn		REG2, 0x7, REG2; \
+	ldxa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
 	USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \
 	sllx		VADDR, 64 - PMD_SHIFT, REG2; \
-	srlx		REG2, 64 - (PAGE_SHIFT - 1), REG2; \
-	sllx		REG1, PMD_PADDR_SHIFT, REG1; \
+	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
 	andn		REG2, 0x7, REG2; \
 	add		REG1, REG2, REG1; \
 	ldxa		[REG1] ASI_PHYS_USE_EC, REG1; \
diff --git a/arch/sparc/include/uapi/asm/errno.h b/arch/sparc/include/uapi/asm/errno.h
index c351aba..20423e17 100644
--- a/arch/sparc/include/uapi/asm/errno.h
+++ b/arch/sparc/include/uapi/asm/errno.h
@@ -40,7 +40,7 @@
 #define EPROCLIM        67      /* SUNOS: Too many processes */
 #define	EUSERS		68	/* Too many users */
 #define	EDQUOT		69	/* Quota exceeded */
-#define	ESTALE		70	/* Stale NFS file handle */
+#define	ESTALE		70	/* Stale file handle */
 #define	EREMOTE		71	/* Object is remote */
 #define	ENOSTR		72	/* Device not a stream */
 #define	ETIME		73	/* Timer expired */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 4e1d66c..0f21e9a 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -72,6 +72,8 @@
 
 #define SO_BUSY_POLL		0x0030
 
+#define SO_MAX_PACING_RATE	0x0031
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION		0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT	0x5002
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 9c179fb..140966f 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -88,7 +88,6 @@
 
 extern void bad_trap_tl1(struct pt_regs *regs, long lvl);
 
-extern void do_fpe_common(struct pt_regs *regs);
 extern void do_fpieee(struct pt_regs *regs);
 extern void do_fpother(struct pt_regs *regs);
 extern void do_tof(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 53c0a82..60b19f5 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -159,11 +159,12 @@
 
 asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
 	unsigned long flags;
 
 	if (user_mode(regs)) {
 		bad_trap(regs, trap_level);
-		return;
+		goto out;
 	}
 
 	flushw_all();
@@ -171,6 +172,8 @@
 	local_irq_save(flags);
 	kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
 	local_irq_restore(flags);
+out:
+	exception_exit(prev_state);
 }
 
 int kgdb_arch_init(void)
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index e722121..1b09735 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/kdebug.h>
 #include <linux/slab.h>
+#include <linux/context_tracking.h>
 #include <asm/signal.h>
 #include <asm/cacheflush.h>
 #include <asm/uaccess.h>
@@ -349,7 +350,7 @@
 	case KPROBE_HIT_SSDONE:
 		/*
 		 * We increment the nmissed count for accounting,
-		 * we can also use npre/npostfault count for accouting
+		 * we can also use npre/npostfault count for accounting
 		 * these specific fault cases.
 		 */
 		kprobes_inc_nmissed_count(cur);
@@ -418,12 +419,14 @@
 asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
 				      struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
+
 	BUG_ON(trap_level != 0x170 && trap_level != 0x171);
 
 	if (user_mode(regs)) {
 		local_irq_enable();
 		bad_trap(regs, trap_level);
-		return;
+		goto out;
 	}
 
 	/* trap_level == 0x170 --> ta 0x70
@@ -433,6 +436,8 @@
 		       (trap_level == 0x170) ? "debug" : "debug_2",
 		       regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
 		bad_trap(regs, trap_level);
+out:
+	exception_exit(prev_state);
 }
 
 /* Jprobes support.  */
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index fde5a41..542e96a 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -153,12 +153,19 @@
 	/* Clear the PAGE_OFFSET top virtual bits, shift
 	 * down to get PFN, and make sure PFN is in range.
 	 */
-	sllx		%g4, 21, %g5
+661:	sllx		%g4, 0, %g5
+	.section	.page_offset_shift_patch, "ax"
+	.word		661b
+	.previous
 
 	/* Check to see if we know about valid memory at the 4MB
 	 * chunk this physical address will reside within.
 	 */
-	srlx		%g5, 21 + 41, %g2
+661:	srlx		%g5, MAX_PHYS_ADDRESS_BITS, %g2
+	.section	.page_offset_shift_patch, "ax"
+	.word		661b
+	.previous
+
 	brnz,pn		%g2, kvmap_dtlb_longpath
 	 nop
 
@@ -176,7 +183,11 @@
 	or		%g7, %lo(sparc64_valid_addr_bitmap), %g7
 	.previous
 
-	srlx		%g5, 21 + 22, %g2
+661:	srlx		%g5, ILOG2_4MB, %g2
+	.section	.page_offset_shift_patch, "ax"
+	.word		661b
+	.previous
+
 	srlx		%g2, 6, %g5
 	and		%g2, 63, %g2
 	sllx		%g5, 3, %g5
@@ -189,9 +200,18 @@
 2:	 sethi		%hi(kpte_linear_bitmap), %g2
 
 	/* Get the 256MB physical address index. */
-	sllx		%g4, 21, %g5
+661:	sllx		%g4, 0, %g5
+	.section	.page_offset_shift_patch, "ax"
+	.word		661b
+	.previous
+
 	or		%g2, %lo(kpte_linear_bitmap), %g2
-	srlx		%g5, 21 + 28, %g5
+
+661:	srlx		%g5, ILOG2_256MB, %g5
+	.section	.page_offset_shift_patch, "ax"
+	.word		661b
+	.previous
+
 	and		%g5, (32 - 1), %g7
 
 	/* Divide by 32 to get the offset into the bitmask.  */
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 4435488..97655e0 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -29,7 +29,7 @@
 	if (PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
 	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-				GFP_KERNEL, PAGE_KERNEL, -1,
+				GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
 #else
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index bc4d3f5..cb02145 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -398,8 +398,8 @@
 	apb_calc_first_last(map, &first, &last);
 	res = bus->resource[1];
 	res->flags = IORESOURCE_MEM;
-	region.start = (first << 21);
-	region.end = (last << 21) + ((1 << 21) - 1);
+	region.start = (first << 29);
+	region.end = (last << 29) + ((1 << 29) - 1);
 	pcibios_bus_to_resource(dev, res, &region);
 }
 
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index baebab2..32a280e 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -31,6 +31,7 @@
 #include <linux/elfcore.h>
 #include <linux/sysrq.h>
 #include <linux/nmi.h>
+#include <linux/context_tracking.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -557,6 +558,7 @@
 
 barf:
 	set_thread_wsaved(window + 1);
+	user_exit();
 	do_exit(SIGILL);
 }
 
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 773c1f2..c13c9f2 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -27,6 +27,7 @@
 #include <trace/syscall.h>
 #include <linux/compat.h>
 #include <linux/elf.h>
+#include <linux/context_tracking.h>
 
 #include <asm/asi.h>
 #include <asm/pgtable.h>
@@ -1066,6 +1067,9 @@
 	/* do the secure computing check first */
 	secure_computing_strict(regs->u_regs[UREG_G1]);
 
+	if (test_thread_flag(TIF_NOHZ))
+		user_exit();
+
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
 		ret = tracehook_report_syscall_entry(regs);
 
@@ -1086,6 +1090,9 @@
 
 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
 {
+	if (test_thread_flag(TIF_NOHZ))
+		user_exit();
+
 	audit_syscall_exit(regs);
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
@@ -1093,4 +1100,7 @@
 
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
 		tracehook_report_syscall_exit(regs, 0);
+
+	if (test_thread_flag(TIF_NOHZ))
+		user_enter();
 }
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index afa2a9e..a954eb8 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -18,10 +18,16 @@
 #define		RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
 #define		RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
 
+#ifdef CONFIG_CONTEXT_TRACKING
+# define SCHEDULE_USER schedule_user
+#else
+# define SCHEDULE_USER schedule
+#endif
+
 		.text
 		.align			32
 __handle_preemption:
-		call			schedule
+		call			SCHEDULE_USER
 		 wrpr			%g0, RTRAP_PSTATE, %pstate
 		ba,pt			%xcc, __handle_preemption_continue
 		 wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index b524f91d..ee789d2 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -68,7 +68,7 @@
 	/* __siginfo_rwin_t * */u32 rwin_save;
 } __attribute__((aligned(8)));
 
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 {
 	int err;
 
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 35923e8..cd91d01 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -23,6 +23,7 @@
 #include <linux/tty.h>
 #include <linux/binfmts.h>
 #include <linux/bitops.h>
+#include <linux/context_tracking.h>
 
 #include <asm/uaccess.h>
 #include <asm/ptrace.h>
@@ -43,6 +44,7 @@
 {
 	struct ucontext __user *ucp = (struct ucontext __user *)
 		regs->u_regs[UREG_I0];
+	enum ctx_state prev_state = exception_enter();
 	mc_gregset_t __user *grp;
 	unsigned long pc, npc, tstate;
 	unsigned long fp, i7;
@@ -129,16 +131,19 @@
 	}
 	if (err)
 		goto do_sigsegv;
-
+out:
+	exception_exit(prev_state);
 	return;
 do_sigsegv:
 	force_sig(SIGSEGV, current);
+	goto out;
 }
 
 asmlinkage void sparc64_get_context(struct pt_regs *regs)
 {
 	struct ucontext __user *ucp = (struct ucontext __user *)
 		regs->u_regs[UREG_I0];
+	enum ctx_state prev_state = exception_enter();
 	mc_gregset_t __user *grp;
 	mcontext_t __user *mcp;
 	unsigned long fp, i7;
@@ -220,10 +225,12 @@
 	}
 	if (err)
 		goto do_sigsegv;
-
+out:
+	exception_exit(prev_state);
 	return;
 do_sigsegv:
 	force_sig(SIGSEGV, current);
+	goto out;
 }
 
 struct rt_signal_frame {
@@ -528,11 +535,13 @@
 
 void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
 {
+	user_exit();
 	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs, orig_i0);
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
 	}
+	user_enter();
 }
 
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index e142545..b66a533 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1399,8 +1399,13 @@
 
 void smp_send_reschedule(int cpu)
 {
-	xcall_deliver((u64) &xcall_receive_signal, 0, 0,
-		      cpumask_of(cpu));
+	if (cpu == smp_processor_id()) {
+		WARN_ON_ONCE(preemptible());
+		set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
+	} else {
+		xcall_deliver((u64) &xcall_receive_signal,
+			      0, 0, cpumask_of(cpu));
+	}
 }
 
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
index bde867f..e0c09bf8 100644
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
@@ -182,7 +182,7 @@
 	cmp	%g5, -1
 	be,pt	%xcc, 80f
 	 nop
-	COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
+	COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7)
 
 	/* That clobbered %g2, reload it.  */
 	ldxa	[%g0] ASI_SCRATCHPAD, %g2
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 51561b8..beb0b5a 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -24,6 +24,7 @@
 #include <linux/personality.h>
 #include <linux/random.h>
 #include <linux/export.h>
+#include <linux/context_tracking.h>
 
 #include <asm/uaccess.h>
 #include <asm/utrap.h>
@@ -39,9 +40,6 @@
 	return PAGE_SIZE;
 }
 
-#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
-#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
-
 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
  * overflow past the end of the 64-bit address space?
  */
@@ -499,6 +497,7 @@
 
 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
 	siginfo_t info;
 
 	if (test_thread_flag(TIF_32BIT)) {
@@ -517,6 +516,7 @@
 #ifdef DEBUG_SPARC_BREAKPOINT
 	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
 #endif
+	exception_exit(prev_state);
 }
 
 extern void check_pending(int signum);
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index d950197..87729ff 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -52,7 +52,7 @@
 #endif
 	.align	32
 1:	ldx	[%g6 + TI_FLAGS], %l5
-	andcc	%l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+	andcc	%l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
 	be,pt	%icc, rtrap
 	 nop
 	call	syscall_trace_leave
@@ -184,7 +184,7 @@
 
 	srl	%i3, 0, %o3				! IEU0
 	srl	%i2, 0, %o2				! IEU0	Group
-	andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+	andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
 	bne,pn	%icc, linux_syscall_trace32		! CTI
 	 mov	%i0, %l5				! IEU1
 5:	call	%l7					! CTI	Group brk forced
@@ -207,7 +207,7 @@
 
 	mov	%i3, %o3				! IEU1
 	mov	%i4, %o4				! IEU0	Group
-	andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+	andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
 	bne,pn	%icc, linux_syscall_trace		! CTI	Group
 	 mov	%i0, %l5				! IEU0
 2:	call	%l7					! CTI	Group brk forced
@@ -223,7 +223,7 @@
 
 	cmp	%o0, -ERESTART_RESTARTBLOCK
 	bgeu,pn	%xcc, 1f
-	 andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+	 andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
 	ldx	[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
 
 2:
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index b3f833a..4ced92f 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -20,6 +20,7 @@
 #include <linux/ftrace.h>
 #include <linux/reboot.h>
 #include <linux/gfp.h>
+#include <linux/context_tracking.h>
 
 #include <asm/smp.h>
 #include <asm/delay.h>
@@ -186,11 +187,12 @@
 
 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
 {
+	enum ctx_state prev_state = exception_enter();
 	siginfo_t info;
 
 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	if (regs->tstate & TSTATE_PRIV) {
 		printk("spitfire_insn_access_exception: SFSR[%016lx] "
@@ -207,6 +209,8 @@
 	info.si_addr = (void __user *)regs->tpc;
 	info.si_trapno = 0;
 	force_sig_info(SIGSEGV, &info, current);
+out:
+	exception_exit(prev_state);
 }
 
 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
@@ -260,11 +264,12 @@
 
 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
 {
+	enum ctx_state prev_state = exception_enter();
 	siginfo_t info;
 
 	if (notify_die(DIE_TRAP, "data access exception", regs,
 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	if (regs->tstate & TSTATE_PRIV) {
 		/* Test if this comes from uaccess places. */
@@ -280,7 +285,7 @@
 #endif
 			regs->tpc = entry->fixup;
 			regs->tnpc = regs->tpc + 4;
-			return;
+			goto out;
 		}
 		/* Shit... */
 		printk("spitfire_data_access_exception: SFSR[%016lx] "
@@ -294,6 +299,8 @@
 	info.si_addr = (void __user *)sfar;
 	info.si_trapno = 0;
 	force_sig_info(SIGSEGV, &info, current);
+out:
+	exception_exit(prev_state);
 }
 
 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
@@ -1994,6 +2001,7 @@
  */
 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
 {
+	enum ctx_state prev_state = exception_enter();
 	struct sun4v_error_entry *ent, local_copy;
 	struct trap_per_cpu *tb;
 	unsigned long paddr;
@@ -2022,12 +2030,14 @@
 		pr_info("Shutdown request, %u seconds...\n",
 			local_copy.err_secs);
 		orderly_poweroff(true);
-		return;
+		goto out;
 	}
 
 	sun4v_log_error(regs, &local_copy, cpu,
 			KERN_ERR "RESUMABLE ERROR",
 			&sun4v_resum_oflow_cnt);
+out:
+	exception_exit(prev_state);
 }
 
 /* If we try to printk() we'll probably make matters worse, by trying
@@ -2152,7 +2162,7 @@
 	       err, op);
 }
 
-void do_fpe_common(struct pt_regs *regs)
+static void do_fpe_common(struct pt_regs *regs)
 {
 	if (regs->tstate & TSTATE_PRIV) {
 		regs->tpc = regs->tnpc;
@@ -2188,23 +2198,28 @@
 
 void do_fpieee(struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
+
 	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
 		       0, 0x24, SIGFPE) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	do_fpe_common(regs);
+out:
+	exception_exit(prev_state);
 }
 
 extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
 
 void do_fpother(struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
 	struct fpustate *f = FPUSTATE;
 	int ret = 0;
 
 	if (notify_die(DIE_TRAP, "fpu exception other", regs,
 		       0, 0x25, SIGFPE) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
 	case (2 << 14): /* unfinished_FPop */
@@ -2213,17 +2228,20 @@
 		break;
 	}
 	if (ret)
-		return;
+		goto out;
 	do_fpe_common(regs);
+out:
+	exception_exit(prev_state);
 }
 
 void do_tof(struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
 	siginfo_t info;
 
 	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
 		       0, 0x26, SIGEMT) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	if (regs->tstate & TSTATE_PRIV)
 		die_if_kernel("Penguin overflow trap from kernel mode", regs);
@@ -2237,15 +2255,18 @@
 	info.si_addr = (void __user *)regs->tpc;
 	info.si_trapno = 0;
 	force_sig_info(SIGEMT, &info, current);
+out:
+	exception_exit(prev_state);
 }
 
 void do_div0(struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
 	siginfo_t info;
 
 	if (notify_die(DIE_TRAP, "integer division by zero", regs,
 		       0, 0x28, SIGFPE) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	if (regs->tstate & TSTATE_PRIV)
 		die_if_kernel("TL0: Kernel divide by zero.", regs);
@@ -2259,6 +2280,8 @@
 	info.si_addr = (void __user *)regs->tpc;
 	info.si_trapno = 0;
 	force_sig_info(SIGFPE, &info, current);
+out:
+	exception_exit(prev_state);
 }
 
 static void instruction_dump(unsigned int *pc)
@@ -2415,6 +2438,7 @@
 
 void do_illegal_instruction(struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
 	unsigned long pc = regs->tpc;
 	unsigned long tstate = regs->tstate;
 	u32 insn;
@@ -2422,7 +2446,7 @@
 
 	if (notify_die(DIE_TRAP, "illegal instruction", regs,
 		       0, 0x10, SIGILL) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	if (tstate & TSTATE_PRIV)
 		die_if_kernel("Kernel illegal instruction", regs);
@@ -2431,14 +2455,14 @@
 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
 		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
 			if (handle_popc(insn, regs))
-				return;
+				goto out;
 		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
 			if (handle_ldf_stq(insn, regs))
-				return;
+				goto out;
 		} else if (tlb_type == hypervisor) {
 			if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
 				if (!vis_emul(regs, insn))
-					return;
+					goto out;
 			} else {
 				struct fpustate *f = FPUSTATE;
 
@@ -2448,7 +2472,7 @@
 				 * Trap in the %fsr to unimplemented_FPop.
 				 */
 				if (do_mathemu(regs, f, true))
-					return;
+					goto out;
 			}
 		}
 	}
@@ -2458,21 +2482,24 @@
 	info.si_addr = (void __user *)pc;
 	info.si_trapno = 0;
 	force_sig_info(SIGILL, &info, current);
+out:
+	exception_exit(prev_state);
 }
 
 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
 
 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
 {
+	enum ctx_state prev_state = exception_enter();
 	siginfo_t info;
 
 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	if (regs->tstate & TSTATE_PRIV) {
 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
-		return;
+		goto out;
 	}
 	info.si_signo = SIGBUS;
 	info.si_errno = 0;
@@ -2480,6 +2507,8 @@
 	info.si_addr = (void __user *)sfar;
 	info.si_trapno = 0;
 	force_sig_info(SIGBUS, &info, current);
+out:
+	exception_exit(prev_state);
 }
 
 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
@@ -2504,11 +2533,12 @@
 
 void do_privop(struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
 	siginfo_t info;
 
 	if (notify_die(DIE_TRAP, "privileged operation", regs,
 		       0, 0x11, SIGILL) == NOTIFY_STOP)
-		return;
+		goto out;
 
 	if (test_thread_flag(TIF_32BIT)) {
 		regs->tpc &= 0xffffffff;
@@ -2520,6 +2550,8 @@
 	info.si_addr = (void __user *)regs->tpc;
 	info.si_trapno = 0;
 	force_sig_info(SIGILL, &info, current);
+out:
+	exception_exit(prev_state);
 }
 
 void do_privact(struct pt_regs *regs)
@@ -2530,99 +2562,116 @@
 /* Trap level 1 stuff or other traps we should never see... */
 void do_cee(struct pt_regs *regs)
 {
+	exception_enter();
 	die_if_kernel("TL0: Cache Error Exception", regs);
 }
 
 void do_cee_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: Cache Error Exception", regs);
 }
 
 void do_dae_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: Data Access Exception", regs);
 }
 
 void do_iae_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: Instruction Access Exception", regs);
 }
 
 void do_div0_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: DIV0 Exception", regs);
 }
 
 void do_fpdis_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: FPU Disabled", regs);
 }
 
 void do_fpieee_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: FPU IEEE Exception", regs);
 }
 
 void do_fpother_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: FPU Other Exception", regs);
 }
 
 void do_ill_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: Illegal Instruction Exception", regs);
 }
 
 void do_irq_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: IRQ Exception", regs);
 }
 
 void do_lddfmna_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: LDDF Exception", regs);
 }
 
 void do_stdfmna_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: STDF Exception", regs);
 }
 
 void do_paw(struct pt_regs *regs)
 {
+	exception_enter();
 	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
 }
 
 void do_paw_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
 }
 
 void do_vaw(struct pt_regs *regs)
 {
+	exception_enter();
 	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
 }
 
 void do_vaw_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
 }
 
 void do_tof_tl1(struct pt_regs *regs)
 {
+	exception_enter();
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 	die_if_kernel("TL1: Tag Overflow Exception", regs);
 }
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index a313e4a..14158d4 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -75,7 +75,7 @@
 	mov		512, %g7
 	andn		%g5, 0x7, %g5
 	sllx		%g7, %g6, %g7
-	srlx		%g4, HPAGE_SHIFT, %g6
+	srlx		%g4, REAL_HPAGE_SHIFT, %g6
 	sub		%g7, 1, %g7
 	and		%g6, %g7, %g6
 	sllx		%g6, 4, %g6
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 8201c25e..3c1a7cb 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -21,9 +21,12 @@
 #include <linux/bitops.h>
 #include <linux/perf_event.h>
 #include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
 #include <asm/fpumacro.h>
 #include <asm/cacheflush.h>
 
+#include "entry.h"
+
 enum direction {
 	load,    /* ld, ldd, ldh, ldsh */
 	store,   /* st, std, sth, stsh */
@@ -418,9 +421,6 @@
 
 extern void do_fpother(struct pt_regs *regs);
 extern void do_privact(struct pt_regs *regs);
-extern void spitfire_data_access_exception(struct pt_regs *regs,
-					   unsigned long sfsr,
-					   unsigned long sfar);
 extern void sun4v_data_access_exception(struct pt_regs *regs,
 					unsigned long addr,
 					unsigned long type_ctx);
@@ -578,6 +578,7 @@
 
 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
 {
+	enum ctx_state prev_state = exception_enter();
 	unsigned long pc = regs->tpc;
 	unsigned long tstate = regs->tstate;
 	u32 insn;
@@ -632,13 +633,16 @@
 			sun4v_data_access_exception(regs, sfar, sfsr);
 		else
 			spitfire_data_access_exception(regs, sfsr, sfar);
-		return;
+		goto out;
 	}
 	advance(regs);
+out:
+	exception_exit(prev_state);
 }
 
 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
 {
+	enum ctx_state prev_state = exception_enter();
 	unsigned long pc = regs->tpc;
 	unsigned long tstate = regs->tstate;
 	u32 insn;
@@ -680,7 +684,9 @@
 			sun4v_data_access_exception(regs, sfar, sfsr);
 		else
 			spitfire_data_access_exception(regs, sfsr, sfar);
-		return;
+		goto out;
 	}
 	advance(regs);
+out:
+	exception_exit(prev_state);
 }
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0bacceb..932ff90 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -122,6 +122,11 @@
 		*(.swapper_4m_tsb_phys_patch)
 		__swapper_4m_tsb_phys_patch_end = .;
 	}
+	.page_offset_shift_patch : {
+		__page_offset_shift_patch = .;
+		*(.page_offset_shift_patch)
+		__page_offset_shift_patch_end = .;
+	}
 	.popc_3insn_patch : {
 		__popc_3insn_patch = .;
 		*(.popc_3insn_patch)
diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
index 77e531f..46272df 100644
--- a/arch/sparc/lib/clear_page.S
+++ b/arch/sparc/lib/clear_page.S
@@ -37,10 +37,10 @@
 	.globl		clear_user_page
 clear_user_page:	/* %o0=dest, %o1=vaddr */
 	lduw		[%g6 + TI_PRE_COUNT], %o2
-	sethi		%uhi(PAGE_OFFSET), %g2
+	sethi		%hi(PAGE_OFFSET), %g2
 	sethi		%hi(PAGE_SIZE), %o4
 
-	sllx		%g2, 32, %g2
+	ldx		[%g2 + %lo(PAGE_OFFSET)], %g2
 	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
 
 	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
index 4d2df32..dd16c61 100644
--- a/arch/sparc/lib/copy_page.S
+++ b/arch/sparc/lib/copy_page.S
@@ -46,10 +46,10 @@
 	.type		copy_user_page,#function
 copy_user_page:		/* %o0=dest, %o1=src, %o2=vaddr */
 	lduw		[%g6 + TI_PRE_COUNT], %o4
-	sethi		%uhi(PAGE_OFFSET), %g2
+	sethi		%hi(PAGE_OFFSET), %g2
 	sethi		%hi(PAGE_SIZE), %o3
 
-	sllx		%g2, 32, %g2
+	ldx		[%g2 + %lo(PAGE_OFFSET)], %g2
 	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
 
 	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 2ebec26..69bb818f 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -21,6 +21,7 @@
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/percpu.h>
+#include <linux/context_tracking.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -272,6 +273,7 @@
 
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
+	enum ctx_state prev_state = exception_enter();
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned int insn = 0;
@@ -282,7 +284,7 @@
 	fault_code = get_thread_fault_code();
 
 	if (notify_page_fault(regs))
-		return;
+		goto exit_exception;
 
 	si_code = SEGV_MAPERR;
 	address = current_thread_info()->fault_address;
@@ -313,7 +315,7 @@
 			/* Valid, no problems... */
 		} else {
 			bad_kernel_pc(regs, address);
-			return;
+			goto exit_exception;
 		}
 	} else
 		flags |= FAULT_FLAG_USER;
@@ -430,7 +432,7 @@
 	fault = handle_mm_fault(mm, vma, address, flags);
 
 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
-		return;
+		goto exit_exception;
 
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
@@ -482,6 +484,8 @@
 
 	}
 #endif
+exit_exception:
+	exception_exit(prev_state);
 	return;
 
 	/*
@@ -494,7 +498,7 @@
 
 handle_kernel_fault:
 	do_kernel_fault(regs, si_code, fault_code, insn, address);
-	return;
+	goto exit_exception;
 
 /*
  * We ran out of memory, or some other thing happened to us that made
@@ -505,7 +509,7 @@
 	up_read(&mm->mmap_sem);
 	if (!(regs->tstate & TSTATE_PRIV)) {
 		pagefault_out_of_memory();
-		return;
+		goto exit_exception;
 	}
 	goto handle_kernel_fault;
 
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 01ee23d..c4d3da6 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -71,13 +71,12 @@
 			int *nr)
 {
 	struct page *head, *page, *tail;
-	u32 mask;
 	int refs;
 
-	mask = PMD_HUGE_PRESENT;
-	if (write)
-		mask |= PMD_HUGE_WRITE;
-	if ((pmd_val(pmd) & mask) != mask)
+	if (!pmd_large(pmd))
+		return 0;
+
+	if (write && !pmd_write(pmd))
 		return 0;
 
 	refs = 0;
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 9639964..3096317 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -21,8 +21,6 @@
 /* Slightly simplified from the non-hugepage variant because by
  * definition we don't have to worry about any page coloring stuff
  */
-#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
-#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
 
 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
 							unsigned long addr,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ed82eda..6b64379 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -354,7 +354,7 @@
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 	if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
-		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
+		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
 					address, pte_val(pte));
 	else
 #endif
@@ -1557,6 +1557,96 @@
 	return ~0UL;
 }
 
+unsigned long PAGE_OFFSET;
+EXPORT_SYMBOL(PAGE_OFFSET);
+
+static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
+{
+	unsigned long final_shift;
+	unsigned int val = *insn;
+	unsigned int cnt;
+
+	/* We are patching in ilog2(max_supported_phys_address), and
+	 * we are doing so in a manner similar to a relocation addend.
+	 * That is, we are adding the shift value to whatever value
+	 * is in the shift instruction count field already.
+	 */
+	cnt = (val & 0x3f);
+	val &= ~0x3f;
+
+	/* If we are trying to shift >= 64 bits, clear the destination
+	 * register.  This can happen when phys_bits ends up being equal
+	 * to MAX_PHYS_ADDRESS_BITS.
+	 */
+	final_shift = (cnt + (64 - phys_bits));
+	if (final_shift >= 64) {
+		unsigned int rd = (val >> 25) & 0x1f;
+
+		val = 0x80100000 | (rd << 25);
+	} else {
+		val |= final_shift;
+	}
+	*insn = val;
+
+	__asm__ __volatile__("flush	%0"
+			     : /* no outputs */
+			     : "r" (insn));
+}
+
+static void __init page_offset_shift_patch(unsigned long phys_bits)
+{
+	extern unsigned int __page_offset_shift_patch;
+	extern unsigned int __page_offset_shift_patch_end;
+	unsigned int *p;
+
+	p = &__page_offset_shift_patch;
+	while (p < &__page_offset_shift_patch_end) {
+		unsigned int *insn = (unsigned int *)(unsigned long)*p;
+
+		page_offset_shift_patch_one(insn, phys_bits);
+
+		p++;
+	}
+}
+
+static void __init setup_page_offset(void)
+{
+	unsigned long max_phys_bits = 40;
+
+	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		max_phys_bits = 42;
+	} else if (tlb_type == hypervisor) {
+		switch (sun4v_chip_type) {
+		case SUN4V_CHIP_NIAGARA1:
+		case SUN4V_CHIP_NIAGARA2:
+			max_phys_bits = 39;
+			break;
+		case SUN4V_CHIP_NIAGARA3:
+			max_phys_bits = 43;
+			break;
+		case SUN4V_CHIP_NIAGARA4:
+		case SUN4V_CHIP_NIAGARA5:
+		case SUN4V_CHIP_SPARC64X:
+		default:
+			max_phys_bits = 47;
+			break;
+		}
+	}
+
+	if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
+		prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
+			    max_phys_bits);
+		prom_halt();
+	}
+
+	PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
+
+	pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
+		PAGE_OFFSET, max_phys_bits);
+
+	page_offset_shift_patch(max_phys_bits);
+}
+
 static void __init tsb_phys_patch(void)
 {
 	struct tsb_ldquad_phys_patch_entry *pquad;
@@ -1722,7 +1812,7 @@
 #ifndef CONFIG_DEBUG_PAGEALLOC
 	if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
 		kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
-			0xfffff80000000000UL;
+			PAGE_OFFSET;
 		kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
 					   _PAGE_P_4V | _PAGE_W_4V);
 	} else {
@@ -1731,7 +1821,7 @@
 
 	if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
 		kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
-			0xfffff80000000000UL;
+			PAGE_OFFSET;
 		kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
 					   _PAGE_P_4V | _PAGE_W_4V);
 	} else {
@@ -1740,7 +1830,7 @@
 
 	if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
 		kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
-			0xfffff80000000000UL;
+			PAGE_OFFSET;
 		kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
 					   _PAGE_P_4V | _PAGE_W_4V);
 	} else {
@@ -1752,7 +1842,7 @@
 /* paging_init() sets up the page tables */
 
 static unsigned long last_valid_pfn;
-pgd_t swapper_pg_dir[2048];
+pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
 static void sun4u_pgprot_init(void);
 static void sun4v_pgprot_init(void);
@@ -1763,6 +1853,8 @@
 	unsigned long real_end, i;
 	int node;
 
+	setup_page_offset();
+
 	/* These build time checkes make sure that the dcache_dirty_cpu()
 	 * page->flags usage will work.
 	 *
@@ -2261,10 +2353,10 @@
 		     __ACCESS_BITS_4U | _PAGE_E_4U);
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-	kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
+	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
 #else
 	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
-		0xfffff80000000000UL;
+		PAGE_OFFSET;
 #endif
 	kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
 				   _PAGE_P_4U | _PAGE_W_4U);
@@ -2308,10 +2400,10 @@
 	_PAGE_CACHE = _PAGE_CACHE_4V;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-	kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
+	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
 #else
 	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
-		0xfffff80000000000UL;
+		PAGE_OFFSET;
 #endif
 	kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
 				   _PAGE_P_4V | _PAGE_W_4V);
@@ -2455,53 +2547,13 @@
 			     : : "r" (pstate));
 }
 
-static pte_t *get_from_cache(struct mm_struct *mm)
-{
-	struct page *page;
-	pte_t *ret;
-
-	spin_lock(&mm->page_table_lock);
-	page = mm->context.pgtable_page;
-	ret = NULL;
-	if (page) {
-		void *p = page_address(page);
-
-		mm->context.pgtable_page = NULL;
-
-		ret = (pte_t *) (p + (PAGE_SIZE / 2));
-	}
-	spin_unlock(&mm->page_table_lock);
-
-	return ret;
-}
-
-static struct page *__alloc_for_cache(struct mm_struct *mm)
-{
-	struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-				       __GFP_REPEAT | __GFP_ZERO);
-
-	if (page) {
-		spin_lock(&mm->page_table_lock);
-		if (!mm->context.pgtable_page) {
-			atomic_set(&page->_count, 2);
-			mm->context.pgtable_page = page;
-		}
-		spin_unlock(&mm->page_table_lock);
-	}
-	return page;
-}
-
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 			    unsigned long address)
 {
-	struct page *page;
-	pte_t *pte;
+	struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+				       __GFP_REPEAT | __GFP_ZERO);
+	pte_t *pte = NULL;
 
-	pte = get_from_cache(mm);
-	if (pte)
-		return pte;
-
-	page = __alloc_for_cache(mm);
 	if (page)
 		pte = (pte_t *) page_address(page);
 
@@ -2511,36 +2563,30 @@
 pgtable_t pte_alloc_one(struct mm_struct *mm,
 			unsigned long address)
 {
-	struct page *page;
-	pte_t *pte;
+	struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+				       __GFP_REPEAT | __GFP_ZERO);
+	pte_t *pte = NULL;
 
-	pte = get_from_cache(mm);
-	if (pte)
-		return pte;
-
-	page = __alloc_for_cache(mm);
-	if (page) {
-		pgtable_page_ctor(page);
-		pte = (pte_t *) page_address(page);
+	if (!page)
+		return NULL;
+	if (!pgtable_page_ctor(page)) {
+		free_hot_cold_page(page, 0);
+		return NULL;
 	}
-
-	return pte;
+	return (pte_t *) page_address(page);
 }
 
 void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-	struct page *page = virt_to_page(pte);
-	if (put_page_testzero(page))
-		free_hot_cold_page(page, 0);
+	free_page((unsigned long)pte);
 }
 
 static void __pte_free(pgtable_t pte)
 {
 	struct page *page = virt_to_page(pte);
-	if (put_page_testzero(page)) {
-		pgtable_page_dtor(page);
-		free_hot_cold_page(page, 0);
-	}
+
+	pgtable_page_dtor(page);
+	__free_page(page);
 }
 
 void pte_free(struct mm_struct *mm, pgtable_t pte)
@@ -2557,124 +2603,27 @@
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
-{
-	if (pgprot_val(pgprot) & _PAGE_VALID)
-		pmd_val(pmd) |= PMD_HUGE_PRESENT;
-	if (tlb_type == hypervisor) {
-		if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
-			pmd_val(pmd) |= PMD_HUGE_WRITE;
-		if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
-			pmd_val(pmd) |= PMD_HUGE_EXEC;
-
-		if (!for_modify) {
-			if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
-				pmd_val(pmd) |= PMD_HUGE_ACCESSED;
-			if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
-				pmd_val(pmd) |= PMD_HUGE_DIRTY;
-		}
-	} else {
-		if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
-			pmd_val(pmd) |= PMD_HUGE_WRITE;
-		if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
-			pmd_val(pmd) |= PMD_HUGE_EXEC;
-
-		if (!for_modify) {
-			if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
-				pmd_val(pmd) |= PMD_HUGE_ACCESSED;
-			if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
-				pmd_val(pmd) |= PMD_HUGE_DIRTY;
-		}
-	}
-
-	return pmd;
-}
-
-pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
-{
-	pmd_t pmd;
-
-	pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
-	pmd_val(pmd) |= PMD_ISHUGE;
-	pmd = pmd_set_protbits(pmd, pgprot, false);
-	return pmd;
-}
-
-pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
-{
-	pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
-			  PMD_HUGE_WRITE |
-			  PMD_HUGE_EXEC);
-	pmd = pmd_set_protbits(pmd, newprot, true);
-	return pmd;
-}
-
-pgprot_t pmd_pgprot(pmd_t entry)
-{
-	unsigned long pte = 0;
-
-	if (pmd_val(entry) & PMD_HUGE_PRESENT)
-		pte |= _PAGE_VALID;
-
-	if (tlb_type == hypervisor) {
-		if (pmd_val(entry) & PMD_HUGE_PRESENT)
-			pte |= _PAGE_PRESENT_4V;
-		if (pmd_val(entry) & PMD_HUGE_EXEC)
-			pte |= _PAGE_EXEC_4V;
-		if (pmd_val(entry) & PMD_HUGE_WRITE)
-			pte |= _PAGE_W_4V;
-		if (pmd_val(entry) & PMD_HUGE_ACCESSED)
-			pte |= _PAGE_ACCESSED_4V;
-		if (pmd_val(entry) & PMD_HUGE_DIRTY)
-			pte |= _PAGE_MODIFIED_4V;
-		pte |= _PAGE_CP_4V|_PAGE_CV_4V;
-	} else {
-		if (pmd_val(entry) & PMD_HUGE_PRESENT)
-			pte |= _PAGE_PRESENT_4U;
-		if (pmd_val(entry) & PMD_HUGE_EXEC)
-			pte |= _PAGE_EXEC_4U;
-		if (pmd_val(entry) & PMD_HUGE_WRITE)
-			pte |= _PAGE_W_4U;
-		if (pmd_val(entry) & PMD_HUGE_ACCESSED)
-			pte |= _PAGE_ACCESSED_4U;
-		if (pmd_val(entry) & PMD_HUGE_DIRTY)
-			pte |= _PAGE_MODIFIED_4U;
-		pte |= _PAGE_CP_4U|_PAGE_CV_4U;
-	}
-
-	return __pgprot(pte);
-}
-
 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
 			  pmd_t *pmd)
 {
 	unsigned long pte, flags;
 	struct mm_struct *mm;
 	pmd_t entry = *pmd;
-	pgprot_t prot;
 
 	if (!pmd_large(entry) || !pmd_young(entry))
 		return;
 
-	pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS);
-	pte <<= PMD_PADDR_SHIFT;
-	pte |= _PAGE_VALID;
+	pte = pmd_val(entry);
 
-	prot = pmd_pgprot(entry);
-
-	if (tlb_type == hypervisor)
-		pgprot_val(prot) |= _PAGE_SZHUGE_4V;
-	else
-		pgprot_val(prot) |= _PAGE_SZHUGE_4U;
-
-	pte |= pgprot_val(prot);
+	/* We are fabricating 8MB pages using 4MB real hw pages.  */
+	pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
 
 	mm = vma->vm_mm;
 
 	spin_lock_irqsave(&mm->context.lock, flags);
 
 	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
-		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
+		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
 					addr, pte);
 
 	spin_unlock_irqrestore(&mm->context.lock, flags);
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index 0661aa6..5d3782de 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -1,11 +1,13 @@
 #ifndef _SPARC64_MM_INIT_H
 #define _SPARC64_MM_INIT_H
 
+#include <asm/page.h>
+
 /* Most of the symbols in this file are defined in init.c and
  * marked non-static so that assembler code can get at them.
  */
 
-#define MAX_PHYS_ADDRESS	(1UL << 41UL)
+#define MAX_PHYS_ADDRESS	(1UL << MAX_PHYS_ADDRESS_BITS)
 #define KPTE_BITMAP_CHUNK_SZ		(256UL * 1024UL * 1024UL)
 #define KPTE_BITMAP_BYTES	\
 	((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 5d721df..869023a 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -345,7 +345,10 @@
 	if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
 		return NULL;
 	page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
-	pgtable_page_ctor(page);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 	return page;
 }
 
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 7a91f28..ad3bf4b 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -161,8 +161,8 @@
 	if (mm == &init_mm)
 		return;
 
-	if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
-		if (pmd_val(pmd) & PMD_ISHUGE)
+	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
+		if (pmd_val(pmd) & _PAGE_PMD_HUGE)
 			mm->context.huge_pte_count++;
 		else
 			mm->context.huge_pte_count--;
@@ -178,13 +178,16 @@
 	}
 
 	if (!pmd_none(orig)) {
-		bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
+		pte_t orig_pte = __pte(pmd_val(orig));
+		bool exec = pte_exec(orig_pte);
 
 		addr &= HPAGE_MASK;
-		if (pmd_val(orig) & PMD_ISHUGE)
+		if (pmd_trans_huge(orig)) {
 			tlb_batch_add_one(mm, addr, exec);
-		else
+			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
+		} else {
 			tlb_batch_pmd_scan(mm, addr, orig, exec);
+		}
 	}
 }
 
@@ -196,11 +199,11 @@
 	assert_spin_locked(&mm->page_table_lock);
 
 	/* FIFO */
-	if (!mm->pmd_huge_pte)
+	if (!pmd_huge_pte(mm, pmdp))
 		INIT_LIST_HEAD(lh);
 	else
-		list_add(lh, (struct list_head *) mm->pmd_huge_pte);
-	mm->pmd_huge_pte = pgtable;
+		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+	pmd_huge_pte(mm, pmdp) = pgtable;
 }
 
 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
@@ -211,12 +214,12 @@
 	assert_spin_locked(&mm->page_table_lock);
 
 	/* FIFO */
-	pgtable = mm->pmd_huge_pte;
+	pgtable = pmd_huge_pte(mm, pmdp);
 	lh = (struct list_head *) pgtable;
 	if (list_empty(lh))
-		mm->pmd_huge_pte = NULL;
+		pmd_huge_pte(mm, pmdp) = NULL;
 	else {
-		mm->pmd_huge_pte = (pgtable_t) lh->next;
+		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
 		list_del(lh);
 	}
 	pte_val(pgtable[0]) = 0;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 2cc3bce..3b3a360 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -87,7 +87,7 @@
 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
 			base = __pa(base);
-		__flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
+		__flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
 	}
 #endif
 	spin_unlock_irqrestore(&mm->context.lock, flags);
@@ -111,7 +111,7 @@
 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
 			base = __pa(base);
-		__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
+		__flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
 	}
 #endif
 	spin_unlock_irqrestore(&mm->context.lock, flags);
@@ -472,8 +472,6 @@
 	mm->context.huge_pte_count = 0;
 #endif
 
-	mm->context.pgtable_page = NULL;
-
 	/* copy_mm() copies over the parent's mm_struct before calling
 	 * us, so we need to zero out the TSB pointer or else tsb_grow()
 	 * will be confused and think there is an older TSB to free up.
@@ -512,17 +510,10 @@
 void destroy_context(struct mm_struct *mm)
 {
 	unsigned long flags, i;
-	struct page *page;
 
 	for (i = 0; i < MM_NUM_TSBS; i++)
 		tsb_destroy_one(&mm->context.tsb_block[i]);
 
-	page = mm->context.pgtable_page;
-	if (page && put_page_testzero(page)) {
-		pgtable_page_dtor(page);
-		free_hot_cold_page(page, 0);
-	}
-
 	spin_lock_irqsave(&ctx_alloc_lock, flags);
 
 	if (CTX_VALID(mm->context)) {
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 432aa0c..b4f4733 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -153,10 +153,10 @@
 	.globl		__flush_icache_page
 __flush_icache_page:	/* %o0 = phys_page */
 	srlx		%o0, PAGE_SHIFT, %o0
-	sethi		%uhi(PAGE_OFFSET), %g1
+	sethi		%hi(PAGE_OFFSET), %g1
 	sllx		%o0, PAGE_SHIFT, %o0
 	sethi		%hi(PAGE_SIZE), %g2
-	sllx		%g1, 32, %g1
+	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 	add		%o0, %g1, %o0
 1:	subcc		%g2, 32, %g2
 	bne,pt		%icc, 1b
@@ -178,8 +178,8 @@
 	.align		64
 	.globl		__flush_dcache_page
 __flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
-	sethi		%uhi(PAGE_OFFSET), %g1
-	sllx		%g1, 32, %g1
+	sethi		%hi(PAGE_OFFSET), %g1
+	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 	sub		%o0, %g1, %o0			! physical address
 	srlx		%o0, 11, %o0			! make D-cache TAG
 	sethi		%hi(1 << 14), %o2		! D-cache size
@@ -287,8 +287,8 @@
 
 #ifdef DCACHE_ALIASING_POSSIBLE
 __cheetah_flush_dcache_page: /* 11 insns */
-	sethi		%uhi(PAGE_OFFSET), %g1
-	sllx		%g1, 32, %g1
+	sethi		%hi(PAGE_OFFSET), %g1
+	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 	sub		%o0, %g1, %o0
 	sethi		%hi(PAGE_SIZE), %o4
 1:	subcc		%o4, (1 << 5), %o4
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index d45a2c4..b3692ce 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -8,7 +8,6 @@
 	select HAVE_KVM if !TILEGX
 	select GENERIC_FIND_FIRST_BIT
 	select SYSCTL_EXCEPTION_TRACE
-	select USE_GENERIC_SMP_HELPERS
 	select CC_OPTIMIZE_FOR_SIZE
 	select HAVE_DEBUG_KMEMLEAK
 	select GENERIC_IRQ_PROBE
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 85e00b2..19c04b5 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -49,7 +49,7 @@
 	struct compat_ucontext uc;
 };
 
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from)
 {
 	int err;
 
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index b7180e6..c45593d 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -251,15 +251,12 @@
 	/* Scan for the smallest maximum payload size. */
 	for_each_pci_dev(dev) {
 		u32 devcap;
-		int max_payload;
 
 		if (!pci_is_pcie(dev))
 			continue;
 
-		pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap);
-		max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
-		if (max_payload < smallest_max_payload)
-			smallest_max_payload = max_payload;
+		if (dev->pcie_mpss < smallest_max_payload)
+			smallest_max_payload = dev->pcie_mpss;
 	}
 
 	/* Now, set the max_payload_size for all devices to that value. */
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 4fd9ec0..5e86eac 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -241,6 +241,11 @@
 	if (p == NULL)
 		return NULL;
 
+	if (!pgtable_page_ctor(p)) {
+		__free_pages(p, L2_USER_PGTABLE_ORDER);
+		return NULL;
+	}
+
 	/*
 	 * Make every page have a page_count() of one, not just the first.
 	 * We don't use __GFP_COMP since it doesn't look like it works
@@ -251,7 +256,6 @@
 		inc_zone_page_state(p+i, NR_PAGETABLE);
 	}
 
-	pgtable_page_ctor(p);
 	return p;
 }
 
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 7ddb64b..8636e90 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -279,8 +279,12 @@
 	struct page *pte;
 
 	pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-	if (pte)
-		pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 82cdd89..a7ba27b 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -1,5 +1,6 @@
 config UNICORE32
 	def_bool y
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select HAVE_MEMBLOCK
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_DMA_ATTRS
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
index 0213e37..2e02d13 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -51,12 +51,14 @@
 	struct page *pte;
 
 	pte = alloc_pages(PGALLOC_GFP, 0);
-	if (pte) {
-		if (!PageHighMem(pte)) {
-			void *page = page_address(pte);
-			clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
-		}
-		pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	if (!PageHighMem(pte)) {
+		void *page = page_address(pte);
+		clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
+	}
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
 	}
 
 	return pte;
diff --git a/arch/unicore32/kernel/puv3-nb0916.c b/arch/unicore32/kernel/puv3-nb0916.c
index 181108b..0c6618e 100644
--- a/arch/unicore32/kernel/puv3-nb0916.c
+++ b/arch/unicore32/kernel/puv3-nb0916.c
@@ -54,6 +54,7 @@
 	.max_brightness	= 100,
 	.dft_brightness	= 100,
 	.pwm_period_ns	= 70 * 1024,
+	.enable_gpio	= -1,
 };
 
 static struct gpio_keys_button nb0916_gpio_keys[] = {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 725e157..e903c71 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,7 @@
 config X86
 	def_bool y
 	select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select HAVE_AOUT if X86_32
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select ARCH_SUPPORTS_NUMA_BALANCING
@@ -90,7 +91,6 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_CLOCKEVENTS_MIN_ADJUST
 	select IRQ_FORCED_THREADING
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_BPF_JIT if X86_64
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
 	select CLKEVT_I8253
@@ -255,10 +255,6 @@
 	default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
 	default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
 
-config ARCH_CPU_PROBE_RELEASE
-	def_bool y
-	depends on HOTPLUG_CPU
-
 config ARCH_SUPPORTS_UPROBES
 	def_bool y
 
@@ -639,10 +635,10 @@
 	  spinlock implementation with something virtualization-friendly
 	  (for example, block the virtual CPU rather than spinning).
 
-	  Unfortunately the downside is an up to 5% performance hit on
-	  native kernels, with various workloads.
+	  It has a minimal impact on native kernels and gives a nice performance
+	  benefit on paravirtualized KVM / Xen kernels.
 
-	  If you are unsure how to answer this question, answer N.
+	  If you are unsure how to answer this question, answer Y.
 
 source "arch/x86/xen/Kconfig"
 
@@ -1889,6 +1885,10 @@
 	def_bool y
 	depends on NUMA
 
+config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+	def_bool y
+	depends on X86_64 || X86_PAE
+
 menu "Power management and ACPI options"
 
 config ARCH_HIBERNATION_HEADER
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index bae3aba..d21ff89 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -25,6 +25,7 @@
 #include <linux/personality.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
+#include <linux/perf_event.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
@@ -33,14 +34,18 @@
 #include <asm/ia32.h>
 
 #undef WARN_OLD
-#undef CORE_DUMP /* definitely broken */
 
 static int load_aout_binary(struct linux_binprm *);
 static int load_aout_library(struct file *);
 
-#ifdef CORE_DUMP
-static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
-			  unsigned long limit);
+#ifdef CONFIG_COREDUMP
+static int aout_core_dump(struct coredump_params *);
+
+static unsigned long get_dr(int n)
+{
+	struct perf_event *bp = current->thread.ptrace_bps[n];
+	return bp ? bp->hw.info.address : 0;
+}
 
 /*
  * fill in the user structure for a core dump..
@@ -48,6 +53,7 @@
 static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
 {
 	u32 fs, gs;
+	memset(dump, 0, sizeof(*dump));
 
 /* changed the size calculations - should hopefully work better. lbt */
 	dump->magic = CMAGIC;
@@ -57,15 +63,12 @@
 	dump->u_dsize = ((unsigned long)
 			 (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
 	dump->u_dsize -= dump->u_tsize;
-	dump->u_ssize = 0;
-	dump->u_debugreg[0] = current->thread.debugreg0;
-	dump->u_debugreg[1] = current->thread.debugreg1;
-	dump->u_debugreg[2] = current->thread.debugreg2;
-	dump->u_debugreg[3] = current->thread.debugreg3;
-	dump->u_debugreg[4] = 0;
-	dump->u_debugreg[5] = 0;
+	dump->u_debugreg[0] = get_dr(0);
+	dump->u_debugreg[1] = get_dr(1);
+	dump->u_debugreg[2] = get_dr(2);
+	dump->u_debugreg[3] = get_dr(3);
 	dump->u_debugreg[6] = current->thread.debugreg6;
-	dump->u_debugreg[7] = current->thread.debugreg7;
+	dump->u_debugreg[7] = current->thread.ptrace_dr7;
 
 	if (dump->start_stack < 0xc0000000) {
 		unsigned long tmp;
@@ -74,24 +77,24 @@
 		dump->u_ssize = tmp >> PAGE_SHIFT;
 	}
 
-	dump->regs.bx = regs->bx;
-	dump->regs.cx = regs->cx;
-	dump->regs.dx = regs->dx;
-	dump->regs.si = regs->si;
-	dump->regs.di = regs->di;
-	dump->regs.bp = regs->bp;
-	dump->regs.ax = regs->ax;
+	dump->regs.ebx = regs->bx;
+	dump->regs.ecx = regs->cx;
+	dump->regs.edx = regs->dx;
+	dump->regs.esi = regs->si;
+	dump->regs.edi = regs->di;
+	dump->regs.ebp = regs->bp;
+	dump->regs.eax = regs->ax;
 	dump->regs.ds = current->thread.ds;
 	dump->regs.es = current->thread.es;
 	savesegment(fs, fs);
 	dump->regs.fs = fs;
 	savesegment(gs, gs);
 	dump->regs.gs = gs;
-	dump->regs.orig_ax = regs->orig_ax;
-	dump->regs.ip = regs->ip;
+	dump->regs.orig_eax = regs->orig_ax;
+	dump->regs.eip = regs->ip;
 	dump->regs.cs = regs->cs;
-	dump->regs.flags = regs->flags;
-	dump->regs.sp = regs->sp;
+	dump->regs.eflags = regs->flags;
+	dump->regs.esp = regs->sp;
 	dump->regs.ss = regs->ss;
 
 #if 1 /* FIXME */
@@ -107,7 +110,7 @@
 	.module		= THIS_MODULE,
 	.load_binary	= load_aout_binary,
 	.load_shlib	= load_aout_library,
-#ifdef CORE_DUMP
+#ifdef CONFIG_COREDUMP
 	.core_dump	= aout_core_dump,
 #endif
 	.min_coredump	= PAGE_SIZE
@@ -122,7 +125,7 @@
 	vm_brk(start, end - start);
 }
 
-#ifdef CORE_DUMP
+#ifdef CONFIG_COREDUMP
 /*
  * These are the only things you should do on a core-file: use only these
  * macros to write out all the necessary info.
@@ -130,15 +133,7 @@
 
 #include <linux/coredump.h>
 
-#define DUMP_WRITE(addr, nr)			     \
-	if (!dump_write(file, (void *)(addr), (nr))) \
-		goto end_coredump;
-
-#define DUMP_SEEK(offset)		\
-	if (!dump_seek(file, offset))	\
-		goto end_coredump;
-
-#define START_DATA()	(u.u_tsize << PAGE_SHIFT)
+#define START_DATA(u)	(u.u_tsize << PAGE_SHIFT)
 #define START_STACK(u)	(u.start_stack)
 
 /*
@@ -151,8 +146,7 @@
  * dumping of the process results in another error..
  */
 
-static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
-			  unsigned long limit)
+static int aout_core_dump(struct coredump_params *cprm)
 {
 	mm_segment_t fs;
 	int has_dumped = 0;
@@ -164,19 +158,19 @@
 	has_dumped = 1;
 	strncpy(dump.u_comm, current->comm, sizeof(current->comm));
 	dump.u_ar0 = offsetof(struct user32, regs);
-	dump.signal = signr;
-	dump_thread32(regs, &dump);
+	dump.signal = cprm->siginfo->si_signo;
+	dump_thread32(cprm->regs, &dump);
 
 	/*
 	 * If the size of the dump file exceeds the rlimit, then see
 	 * what would happen if we wrote the stack, but not the data
 	 * area.
 	 */
-	if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > limit)
+	if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
 		dump.u_dsize = 0;
 
 	/* Make sure we have enough room to write the stack and data areas. */
-	if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
+	if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
 		dump.u_ssize = 0;
 
 	/* make sure we actually have a data and stack area to dump */
@@ -190,22 +184,26 @@
 
 	set_fs(KERNEL_DS);
 	/* struct user */
-	DUMP_WRITE(&dump, sizeof(dump));
+	if (!dump_emit(cprm, &dump, sizeof(dump)))
+		goto end_coredump;
 	/* Now dump all of the user data.  Include malloced stuff as well */
-	DUMP_SEEK(PAGE_SIZE - sizeof(dump));
+	if (!dump_skip(cprm, PAGE_SIZE - sizeof(dump)))
+		goto end_coredump;
 	/* now we start writing out the user space info */
 	set_fs(USER_DS);
 	/* Dump the data area */
 	if (dump.u_dsize != 0) {
 		dump_start = START_DATA(dump);
 		dump_size = dump.u_dsize << PAGE_SHIFT;
-		DUMP_WRITE(dump_start, dump_size);
+		if (!dump_emit(cprm, (void *)dump_start, dump_size))
+			goto end_coredump;
 	}
 	/* Now prepare to dump the stack area */
 	if (dump.u_ssize != 0) {
 		dump_start = START_STACK(dump);
 		dump_size = dump.u_ssize << PAGE_SHIFT;
-		DUMP_WRITE(dump_start, dump_size);
+		if (!dump_emit(cprm, (void *)dump_start, dump_size))
+			goto end_coredump;
 	}
 end_coredump:
 	set_fs(fs);
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 665a730..2206757 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -34,7 +34,7 @@
 #include <asm/sys_ia32.h>
 #include <asm/smap.h>
 
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 {
 	int err = 0;
 	bool ia32 = test_thread_flag(TIF_IA32);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index b1977ba..c8c1e70 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -26,6 +26,7 @@
 #include <acpi/pdc_intel.h>
 
 #include <asm/numa.h>
+#include <asm/fixmap.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/mpspec.h>
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index b90e5df..50d033a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -327,10 +327,25 @@
 {
 	write_idt_entry(trace_idt_table, entry, gate);
 }
+
+static inline void _trace_set_gate(int gate, unsigned type, void *addr,
+				   unsigned dpl, unsigned ist, unsigned seg)
+{
+	gate_desc s;
+
+	pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
+	/*
+	 * does not need to be atomic because it is only done once at
+	 * setup time
+	 */
+	write_trace_idt_entry(gate, &s);
+}
 #else
 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
 {
 }
+
+#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
 #endif
 
 static inline void _set_gate(int gate, unsigned type, void *addr,
@@ -353,11 +368,14 @@
  * Pentium F0 0F bugfix can have resulted in the mapped
  * IDT being write-protected.
  */
-static inline void set_intr_gate(unsigned int n, void *addr)
-{
-	BUG_ON((unsigned)n > 0xFF);
-	_set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
-}
+#define set_intr_gate(n, addr)						\
+	do {								\
+		BUG_ON((unsigned)n > 0xFF);				\
+		_set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0,	\
+			  __KERNEL_CS);					\
+		_trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
+				0, 0, __KERNEL_CS);			\
+	} while (0)
 
 extern int first_system_vector;
 /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
@@ -374,37 +392,10 @@
 	}
 }
 
-#ifdef CONFIG_TRACING
-static inline void trace_set_intr_gate(unsigned int gate, void *addr)
-{
-	gate_desc s;
-
-	pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
-	write_idt_entry(trace_idt_table, gate, &s);
-}
-
-static inline void __trace_alloc_intr_gate(unsigned int n, void *addr)
-{
-	trace_set_intr_gate(n, addr);
-}
-#else
-static inline void trace_set_intr_gate(unsigned int gate, void *addr)
-{
-}
-
-#define __trace_alloc_intr_gate(n, addr)
-#endif
-
-static inline void __alloc_intr_gate(unsigned int n, void *addr)
-{
-	set_intr_gate(n, addr);
-}
-
 #define alloc_intr_gate(n, addr)				\
 	do {							\
 		alloc_system_vector(n);				\
-		__alloc_intr_gate(n, addr);			\
-		__trace_alloc_intr_gate(n, trace_##addr);	\
+		set_intr_gate(n, addr);				\
 	} while (0)
 
 /*
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 4d0bda7..c49a613 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -365,7 +365,7 @@
 	 * Forget coprocessor state..
 	 */
 	preempt_disable();
-	tsk->fpu_counter = 0;
+	tsk->thread.fpu_counter = 0;
 	__drop_fpu(tsk);
 	clear_used_math();
 	preempt_enable();
@@ -424,7 +424,7 @@
 	 * or if the past 5 consecutive context-switches used math.
 	 */
 	fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
-					     new->fpu_counter > 5);
+					     new->thread.fpu_counter > 5);
 	if (__thread_has_fpu(old)) {
 		if (!__save_init_fpu(old))
 			cpu = ~0;
@@ -433,16 +433,16 @@
 
 		/* Don't change CR0.TS if we just switch! */
 		if (fpu.preload) {
-			new->fpu_counter++;
+			new->thread.fpu_counter++;
 			__thread_set_has_fpu(new);
 			prefetch(new->thread.fpu.state);
 		} else if (!use_eager_fpu())
 			stts();
 	} else {
-		old->fpu_counter = 0;
+		old->thread.fpu_counter = 0;
 		old->thread.fpu.last_cpu = ~0;
 		if (fpu.preload) {
-			new->fpu_counter++;
+			new->thread.fpu_counter++;
 			if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
 				fpu.preload = 0;
 			else
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 92b3bae..cba45d9 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -187,6 +187,9 @@
 #endif
 
 extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
+#ifdef CONFIG_TRACING
+#define trace_interrupt interrupt
+#endif
 
 typedef int vector_irq_t[NR_VECTORS];
 DECLARE_PER_CPU(vector_irq_t, vector_irq);
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 2c37aad..32ce713 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -21,7 +21,7 @@
 	DIE_NMIUNKNOWN,
 };
 
-extern void printk_address(unsigned long address, int reliable);
+extern void printk_address(unsigned long address);
 extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_trace(struct task_struct *t, struct pt_regs *regs,
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 15f960c..24ec121 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -274,13 +274,17 @@
 
 	bool guest_mode; /* guest running a nested guest */
 	bool perm_ok; /* do not check permissions if true */
-	bool only_vendor_specific_insn;
+	bool ud;	/* inject an #UD if host doesn't support insn */
 
 	bool have_exception;
 	struct x86_exception exception;
 
-	/* decode cache */
-	u8 twobyte;
+	/*
+	 * decode cache
+	 */
+
+	/* current opcode length in bytes */
+	u8 opcode_len;
 	u8 b;
 	u8 intercept;
 	u8 lock_prefix;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c76ff74..ae5d783 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -79,6 +79,13 @@
 #define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
 #define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
 
+static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+{
+	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
+	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+}
+
 #define SELECTOR_TI_MASK (1 << 2)
 #define SELECTOR_RPL_MASK 0x03
 
@@ -253,7 +260,6 @@
  * mode.
  */
 struct kvm_mmu {
-	void (*new_cr3)(struct kvm_vcpu *vcpu);
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
 	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
 	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
@@ -261,7 +267,6 @@
 			  bool prefault);
 	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
 				  struct x86_exception *fault);
-	void (*free)(struct kvm_vcpu *vcpu);
 	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
 			    struct x86_exception *exception);
 	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
@@ -389,6 +394,8 @@
 
 	struct fpu guest_fpu;
 	u64 xcr0;
+	u64 guest_supported_xcr0;
+	u32 guest_xstate_size;
 
 	struct kvm_pio_request pio;
 	void *pio_data;
@@ -557,7 +564,9 @@
 
 	struct list_head assigned_dev_head;
 	struct iommu_domain *iommu_domain;
-	int iommu_flags;
+	bool iommu_noncoherent;
+#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
+	atomic_t noncoherent_dma_count;
 	struct kvm_pic *vpic;
 	struct kvm_ioapic *vioapic;
 	struct kvm_pit *vpit;
@@ -780,11 +789,11 @@
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
-int kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_setup(struct kvm_vcpu *vcpu);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 		u64 dirty_mask, u64 nx_mask, u64 x_mask);
 
-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
+void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
 void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 				     struct kvm_memory_slot *slot,
@@ -922,13 +931,11 @@
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
 		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
 
 void kvm_enable_tdp(void);
 void kvm_disable_tdp(void);
 
-int complete_pio(struct kvm_vcpu *vcpu);
-bool kvm_check_iopl(struct kvm_vcpu *vcpu);
-
 static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
 {
 	return gpa;
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 626cf70..3142a94 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -94,7 +94,7 @@
 #define default_get_smp_config x86_init_uint_noop
 #endif
 
-void generic_processor_info(int apicid, int version);
+int generic_processor_info(int apicid, int version);
 #ifdef CONFIG_ACPI
 extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
 extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index cb75028..e139b13 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -218,10 +218,14 @@
 #ifdef CONFIG_SMP
 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
 #else  /*  CONFIG_SMP  */
@@ -235,6 +239,16 @@
 	wrmsr(msr_no, l, h);
 	return 0;
 }
+static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+	rdmsrl(msr_no, *q);
+	return 0;
+}
+static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+	wrmsrl(msr_no, q);
+	return 0;
+}
 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
 				struct msr *msrs)
 {
@@ -254,6 +268,14 @@
 {
 	return wrmsr_safe(msr_no, l, h);
 }
+static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+	return rdmsrl_safe(msr_no, q);
+}
+static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+	return wrmsrl_safe(msr_no, q);
+}
 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
 {
 	return rdmsr_safe_regs(regs);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index b3e18f8..94220d1 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -378,9 +378,6 @@
 #define __this_cpu_or_1(pcp, val)	percpu_to_op("or", (pcp), val)
 #define __this_cpu_or_2(pcp, val)	percpu_to_op("or", (pcp), val)
 #define __this_cpu_or_4(pcp, val)	percpu_to_op("or", (pcp), val)
-#define __this_cpu_xor_1(pcp, val)	percpu_to_op("xor", (pcp), val)
-#define __this_cpu_xor_2(pcp, val)	percpu_to_op("xor", (pcp), val)
-#define __this_cpu_xor_4(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define __this_cpu_xchg_1(pcp, val)	percpu_xchg_op(pcp, val)
 #define __this_cpu_xchg_2(pcp, val)	percpu_xchg_op(pcp, val)
 #define __this_cpu_xchg_4(pcp, val)	percpu_xchg_op(pcp, val)
@@ -400,9 +397,6 @@
 #define this_cpu_or_1(pcp, val)		percpu_to_op("or", (pcp), val)
 #define this_cpu_or_2(pcp, val)		percpu_to_op("or", (pcp), val)
 #define this_cpu_or_4(pcp, val)		percpu_to_op("or", (pcp), val)
-#define this_cpu_xor_1(pcp, val)	percpu_to_op("xor", (pcp), val)
-#define this_cpu_xor_2(pcp, val)	percpu_to_op("xor", (pcp), val)
-#define this_cpu_xor_4(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define this_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define this_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define this_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
@@ -447,7 +441,6 @@
 #define __this_cpu_add_8(pcp, val)	percpu_add_op((pcp), val)
 #define __this_cpu_and_8(pcp, val)	percpu_to_op("and", (pcp), val)
 #define __this_cpu_or_8(pcp, val)	percpu_to_op("or", (pcp), val)
-#define __this_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
 #define __this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define __this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
@@ -457,7 +450,6 @@
 #define this_cpu_add_8(pcp, val)	percpu_add_op((pcp), val)
 #define this_cpu_and_8(pcp, val)	percpu_to_op("and", (pcp), val)
 #define this_cpu_or_8(pcp, val)		percpu_to_op("or", (pcp), val)
-#define this_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define this_cpu_add_return_8(pcp, val)	percpu_add_return_op(pcp, val)
 #define this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index b4389a4..c4412e9 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -80,12 +80,21 @@
 #if PAGETABLE_LEVELS > 2
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-	return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+	struct page *page;
+	page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+	if (!page)
+		return NULL;
+	if (!pgtable_pmd_page_ctor(page)) {
+		__free_pages(page, 0);
+		return NULL;
+	}
+	return (pmd_t *)page_address(page);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
 	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+	pgtable_pmd_page_dtor(virt_to_page(pmd));
 	free_page((unsigned long)pmd);
 }
 
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 987c75e..7b034a4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -488,6 +488,15 @@
 	unsigned long		iopl;
 	/* Max allowed port in the bitmap, in bytes: */
 	unsigned		io_bitmap_max;
+	/*
+	 * fpu_counter contains the number of consecutive context switches
+	 * that the FPU is used. If this is over a threshold, the lazy fpu
+	 * saving becomes unlazy to save the trap. This is an unsigned char
+	 * so that after 256 times the counter wraps and the behavior turns
+	 * lazy again; this to deal with bursty apps that only use FPU for
+	 * a short time
+	 */
+	unsigned char fpu_counter;
 };
 
 /*
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index be8269b..d6b078e 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -14,6 +14,8 @@
 			    struct timespec *ts);
 void pvclock_resume(void);
 
+void pvclock_touch_watchdogs(void);
+
 /*
  * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
  * yielding a 64-bit result.
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index c48a950..6f1c3a8 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -214,6 +214,9 @@
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
+#ifdef CONFIG_TRACING
+#define trace_early_idt_handlers early_idt_handlers
+#endif
 
 /*
  * Load a segment. Fall back on loading the zero
diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h
new file mode 100644
index 0000000..2fbc66c
--- /dev/null
+++ b/arch/x86/include/asm/trace/exceptions.h
@@ -0,0 +1,52 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exceptions
+
+#if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGE_FAULT_H
+
+#include <linux/tracepoint.h>
+
+extern void trace_irq_vector_regfunc(void);
+extern void trace_irq_vector_unregfunc(void);
+
+DECLARE_EVENT_CLASS(x86_exceptions,
+
+	TP_PROTO(unsigned long address, struct pt_regs *regs,
+		 unsigned long error_code),
+
+	TP_ARGS(address, regs, error_code),
+
+	TP_STRUCT__entry(
+		__field(		unsigned long, address	)
+		__field(		unsigned long, ip	)
+		__field(		unsigned long, error_code )
+	),
+
+	TP_fast_assign(
+		__entry->address = address;
+		__entry->ip = regs->ip;
+		__entry->error_code = error_code;
+	),
+
+	TP_printk("address=%pf ip=%pf error_code=0x%lx",
+		  (void *)__entry->address, (void *)__entry->ip,
+		  __entry->error_code) );
+
+#define DEFINE_PAGE_FAULT_EVENT(name)				\
+DEFINE_EVENT_FN(x86_exceptions, name,				\
+	TP_PROTO(unsigned long address,	struct pt_regs *regs,	\
+		 unsigned long error_code),			\
+	TP_ARGS(address, regs, error_code),			\
+	trace_irq_vector_regfunc,				\
+	trace_irq_vector_unregfunc);
+
+DEFINE_PAGE_FAULT_EVENT(page_fault_user);
+DEFINE_PAGE_FAULT_EVENT(page_fault_kernel);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE exceptions
+#endif /*  _TRACE_PAGE_FAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 7036cb6..58d66fe 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -37,6 +37,23 @@
 #endif /* CONFIG_X86_MCE */
 asmlinkage void simd_coprocessor_error(void);
 
+#ifdef CONFIG_TRACING
+asmlinkage void trace_page_fault(void);
+#define trace_divide_error divide_error
+#define trace_bounds bounds
+#define trace_invalid_op invalid_op
+#define trace_device_not_available device_not_available
+#define trace_coprocessor_segment_overrun coprocessor_segment_overrun
+#define trace_invalid_TSS invalid_TSS
+#define trace_segment_not_present segment_not_present
+#define trace_general_protection general_protection
+#define trace_spurious_interrupt_bug spurious_interrupt_bug
+#define trace_coprocessor_error coprocessor_error
+#define trace_alignment_check alignment_check
+#define trace_simd_coprocessor_error simd_coprocessor_error
+#define trace_async_page_fault async_page_fault
+#endif
+
 dotraplinkage void do_divide_error(struct pt_regs *, long);
 dotraplinkage void do_debug(struct pt_regs *, long);
 dotraplinkage void do_nmi(struct pt_regs *, long);
@@ -55,6 +72,9 @@
 #endif
 dotraplinkage void do_general_protection(struct pt_regs *, long);
 dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
+#ifdef CONFIG_TRACING
+dotraplinkage void trace_do_page_fault(struct pt_regs *, unsigned long);
+#endif
 dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
 dotraplinkage void do_coprocessor_error(struct pt_regs *, long);
 dotraplinkage void do_alignment_check(struct pt_regs *, long);
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 828a156..0f1be11 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -172,6 +172,7 @@
 
 struct pci_dev;
 struct msi_msg;
+struct msi_desc;
 
 struct x86_msi_ops {
 	int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
@@ -182,6 +183,8 @@
 	void (*teardown_msi_irqs)(struct pci_dev *dev);
 	void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
 	int  (*setup_hpet_msi)(unsigned int irq, unsigned int id);
+	u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
+	u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
 };
 
 struct IO_APIC_route_entry;
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h
new file mode 100644
index 0000000..7f02fe4
--- /dev/null
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_X86_XEN_PAGE_COHERENT_H
+#define _ASM_X86_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+		dma_addr_t *dma_handle, gfp_t flags,
+		struct dma_attrs *attrs)
+{
+	void *vstart = (void*)__get_free_pages(flags, get_order(size));
+	*dma_handle = virt_to_phys(vstart);
+	return vstart;
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+		void *cpu_addr, dma_addr_t dma_handle,
+		struct dma_attrs *attrs)
+{
+	free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs) { }
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs) { }
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 5d9a303..d3a8778 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -211,9 +211,9 @@
 	__u32 padding[3];
 };
 
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
-#define KVM_CPUID_FLAG_STATEFUL_FUNC    2
-#define KVM_CPUID_FLAG_STATE_READ_NEXT  4
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX		BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC		BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT		BIT(2)
 
 /* for KVM_SET_CPUID2 */
 struct kvm_cpuid2 {
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index bb04650..b93e09a 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -536,6 +536,7 @@
 
 /* MSR_IA32_VMX_MISC bits */
 #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
+#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE   0x1F
 /* AMD-V MSRs */
 
 #define MSR_VM_CR                       0xc0010114
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 40c7660..6c0b43b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -189,24 +189,31 @@
 	return 0;
 }
 
-static void acpi_register_lapic(int id, u8 enabled)
+/**
+ * acpi_register_lapic - register a local apic and generates a logic cpu number
+ * @id: local apic id to register
+ * @enabled: this cpu is enabled or not
+ *
+ * Returns the logic cpu number which maps to the local apic
+ */
+static int acpi_register_lapic(int id, u8 enabled)
 {
 	unsigned int ver = 0;
 
 	if (id >= MAX_LOCAL_APIC) {
 		printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
-		return;
+		return -EINVAL;
 	}
 
 	if (!enabled) {
 		++disabled_cpus;
-		return;
+		return -EINVAL;
 	}
 
 	if (boot_cpu_physical_apicid != -1U)
 		ver = apic_version[boot_cpu_physical_apicid];
 
-	generic_processor_info(id, ver);
+	return generic_processor_info(id, ver);
 }
 
 static int __init
@@ -614,84 +621,27 @@
 #endif
 }
 
-static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *obj;
-	struct acpi_madt_local_apic *lapic;
-	cpumask_var_t tmp_map, new_map;
-	u8 physid;
 	int cpu;
-	int retval = -ENOMEM;
 
-	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
-		return -EINVAL;
-
-	if (!buffer.length || !buffer.pointer)
-		return -EINVAL;
-
-	obj = buffer.pointer;
-	if (obj->type != ACPI_TYPE_BUFFER ||
-	    obj->buffer.length < sizeof(*lapic)) {
-		kfree(buffer.pointer);
-		return -EINVAL;
-	}
-
-	lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
-
-	if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
-	    !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
-		kfree(buffer.pointer);
-		return -EINVAL;
-	}
-
-	physid = lapic->id;
-
-	kfree(buffer.pointer);
-	buffer.length = ACPI_ALLOCATE_BUFFER;
-	buffer.pointer = NULL;
-	lapic = NULL;
-
-	if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
-		goto out;
-
-	if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
-		goto free_tmp_map;
-
-	cpumask_copy(tmp_map, cpu_present_mask);
-	acpi_register_lapic(physid, ACPI_MADT_ENABLED);
-
-	/*
-	 * If acpi_register_lapic successfully generates a new logical cpu
-	 * number, then the following will get us exactly what was mapped
-	 */
-	cpumask_andnot(new_map, cpu_present_mask, tmp_map);
-	if (cpumask_empty(new_map)) {
-		printk ("Unable to map lapic to logical cpu number\n");
-		retval = -EINVAL;
-		goto free_new_map;
+	cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
+	if (cpu < 0) {
+		pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+		return cpu;
 	}
 
 	acpi_processor_set_pdc(handle);
-
-	cpu = cpumask_first(new_map);
 	acpi_map_cpu2node(handle, cpu, physid);
 
 	*pcpu = cpu;
-	retval = 0;
-
-free_new_map:
-	free_cpumask_var(new_map);
-free_tmp_map:
-	free_cpumask_var(tmp_map);
-out:
-	return retval;
+	return 0;
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
+int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-	return _acpi_map_lsapic(handle, pcpu);
+	return _acpi_map_lsapic(handle, physid, pcpu);
 }
 EXPORT_SYMBOL(acpi_map_lsapic);
 
@@ -745,7 +695,7 @@
 #ifdef CONFIG_HPET_TIMER
 #include <asm/hpet.h>
 
-static struct __initdata resource *hpet_res;
+static struct resource *hpet_res __initdata;
 
 static int __init acpi_parse_hpet(struct acpi_table_header *table)
 {
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 3312010..3a2ae4c 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -26,6 +26,17 @@
 #endif
 
 /**
+ * x86_acpi_enter_sleep_state - enter sleep state
+ * @state: Sleep state to enter.
+ *
+ * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
+ */
+acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
+{
+	return acpi_enter_sleep_state(state);
+}
+
+/**
  * x86_acpi_suspend_lowlevel - save kernel state
  *
  * Create an identity mapped page table and copy the wakeup routine to
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index c9c2c98..65c7b60 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -17,3 +17,5 @@
 extern void do_suspend_lowlevel(void);
 
 extern int x86_acpi_suspend_lowlevel(void);
+
+acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index d1daa66..665c6b7 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -73,7 +73,7 @@
 	call	save_processor_state
 	call	save_registers
 	pushl	$3
-	call	acpi_enter_sleep_state
+	call	x86_acpi_enter_sleep_state
 	addl	$4, %esp
 
 #	In case of S3 failure, we'll emerge here.  Jump
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8ea5164..ae693b5 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -73,7 +73,7 @@
 	addq	$8, %rsp
 	movl	$3, %edi
 	xorl	%eax, %eax
-	call	acpi_enter_sleep_state
+	call	x86_acpi_enter_sleep_state
 	/* in case something went wrong, restore the machine status and go on */
 	jmp	resume_point
 
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 15e8563..df94598 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -402,17 +402,6 @@
 {
 	struct smp_alt_module *mod;
 
-#ifdef CONFIG_LOCKDEP
-	/*
-	 * Older binutils section handling bug prevented
-	 * alternatives-replacement from working reliably.
-	 *
-	 * If this still occurs then you should see a hang
-	 * or crash shortly after this line:
-	 */
-	pr_info("lockdep: fixing up alternatives\n");
-#endif
-
 	/* Why bother if there are no other CPUs? */
 	BUG_ON(num_possible_cpus() == 1);
 
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a7eb82d..ed165d6 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2107,7 +2107,7 @@
 	apic_write(APIC_LVT1, value);
 }
 
-void generic_processor_info(int apicid, int version)
+int generic_processor_info(int apicid, int version)
 {
 	int cpu, max = nr_cpu_ids;
 	bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2127,7 +2127,7 @@
 			"  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
 
 		disabled_cpus++;
-		return;
+		return -ENODEV;
 	}
 
 	if (num_processors >= nr_cpu_ids) {
@@ -2138,7 +2138,7 @@
 			"  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
 
 		disabled_cpus++;
-		return;
+		return -EINVAL;
 	}
 
 	num_processors++;
@@ -2183,6 +2183,8 @@
 #endif
 	set_cpu_possible(cpu, true);
 	set_cpu_present(cpu, true);
+
+	return cpu;
 }
 
 int hard_smp_processor_id(void)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 3daece7..bca023b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -339,7 +339,7 @@
 #endif
 
 /*
- * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
+ * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
  * Assumes number of cores is a power of two.
  */
 static void amd_detect_cmp(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 1414c90..0641113 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -1,5 +1,5 @@
 /*
- *	Routines to indentify caches on Intel CPU.
+ *	Routines to identify caches on Intel CPU.
  *
  *	Changes:
  *	Venkatesh Pallipadi	: Adding cache identification through cpuid(4)
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index f2cc63e..b6f794a 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -1,5 +1,5 @@
 /*
- *	Routines to indentify additional cpu features that are scattered in
+ *	Routines to identify additional cpu features that are scattered in
  *	cpuid space.
  */
 #include <linux/cpu.h>
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index deb6421..d9c12d3 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -25,12 +25,17 @@
 int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
 static int die_counter;
 
-void printk_address(unsigned long address, int reliable)
+static void printk_stack_address(unsigned long address, int reliable)
 {
 	pr_cont(" [<%p>] %s%pB\n",
 		(void *)address, reliable ? "" : "? ", (void *)address);
 }
 
+void printk_address(unsigned long address)
+{
+	pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
+}
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static void
 print_ftrace_graph_addr(unsigned long addr, void *data,
@@ -151,7 +156,7 @@
 {
 	touch_nmi_watchdog();
 	printk(data);
-	printk_address(addr, reliable);
+	printk_stack_address(addr, reliable);
 }
 
 static const struct stacktrace_ops print_trace_ops = {
@@ -281,7 +286,7 @@
 #else
 	/* Executive summary in case the oops scrolled away */
 	printk(KERN_ALERT "RIP ");
-	printk_address(regs->ip, 1);
+	printk_address(regs->ip);
 	printk(" RSP <%016lx>\n", regs->sp);
 #endif
 	return 0;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index b3cd3eb..96f958d 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -313,6 +313,16 @@
 	return gmch_ctrl << 25; /* 32 MB units */
 }
 
+static inline size_t gen8_stolen_size(int num, int slot, int func)
+{
+	u16 gmch_ctrl;
+
+	gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+	gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
+	gmch_ctrl &= BDW_GMCH_GMS_MASK;
+	return gmch_ctrl << 25; /* 32 MB units */
+}
+
 typedef size_t (*stolen_size_fn)(int num, int slot, int func);
 
 static struct pci_device_id intel_stolen_ids[] __initdata = {
@@ -336,6 +346,8 @@
 	INTEL_IVB_D_IDS(gen6_stolen_size),
 	INTEL_HSW_D_IDS(gen6_stolen_size),
 	INTEL_HSW_M_IDS(gen6_stolen_size),
+	INTEL_BDW_M_IDS(gen8_stolen_size),
+	INTEL_BDW_D_IDS(gen8_stolen_size)
 };
 
 static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index fd1bc1b..51e2988 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1244,6 +1244,16 @@
  */
 	.pushsection .kprobes.text, "ax"
 
+#ifdef CONFIG_TRACING
+ENTRY(trace_page_fault)
+	RING0_EC_FRAME
+	ASM_CLAC
+	pushl_cfi $trace_do_page_fault
+	jmp error_code
+	CFI_ENDPROC
+END(trace_page_fault)
+#endif
+
 ENTRY(page_fault)
 	RING0_EC_FRAME
 	ASM_CLAC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 603be7c..e21b078 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1278,6 +1278,17 @@
 END(\sym)
 .endm
 
+#ifdef CONFIG_TRACING
+.macro trace_errorentry sym do_sym
+errorentry trace(\sym) trace(\do_sym)
+errorentry \sym \do_sym
+.endm
+#else
+.macro trace_errorentry sym do_sym
+errorentry \sym \do_sym
+.endm
+#endif
+
 	/* error code is on the stack already */
 .macro paranoiderrorentry sym do_sym
 ENTRY(\sym)
@@ -1480,7 +1491,7 @@
 errorentry xen_stack_segment do_stack_segment
 #endif
 errorentry general_protection do_general_protection
-errorentry page_fault do_page_fault
+trace_errorentry page_fault do_page_fault
 #ifdef CONFIG_KVM_GUEST
 errorentry async_page_fault do_async_page_fault
 #endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 42a392a..d4bdd25 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -248,6 +248,15 @@
 	return ret;
 }
 
+static int is_ftrace_caller(unsigned long ip)
+{
+	if (ip == (unsigned long)(&ftrace_call) ||
+		ip == (unsigned long)(&ftrace_regs_call))
+		return 1;
+
+	return 0;
+}
+
 /*
  * A breakpoint was added to the code address we are about to
  * modify, and this is the handle that will just skip over it.
@@ -257,10 +266,13 @@
  */
 int ftrace_int3_handler(struct pt_regs *regs)
 {
+	unsigned long ip;
+
 	if (WARN_ON_ONCE(!regs))
 		return 0;
 
-	if (!ftrace_location(regs->ip - 1))
+	ip = regs->ip - 1;
+	if (!ftrace_location(ip) && !is_ftrace_caller(ip))
 		return 0;
 
 	regs->ip += MCOUNT_INSN_SIZE - 1;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 1be8e43..85126cc 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -162,7 +162,7 @@
 	clear_bss();
 
 	for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
-		set_intr_gate(i, &early_idt_handlers[i]);
+		set_intr_gate(i, early_idt_handlers[i]);
 	load_idt((const struct desc_ptr *)&idt_descr);
 
 	copy_bootdata(__va(real_mode_data));
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 5d576ab..e8368c6 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -100,7 +100,7 @@
 		__save_init_fpu(tsk);
 		__thread_fpu_end(tsk);
 	} else
-		tsk->fpu_counter = 0;
+		tsk->thread.fpu_counter = 0;
 	preempt_enable();
 }
 EXPORT_SYMBOL(unlazy_fpu);
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index ee11b7d..26d5a55 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -42,15 +42,27 @@
 				   int init)
 {
 	union jump_code_union code;
+	const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
 	const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 
 	if (type == JUMP_LABEL_ENABLE) {
-		/*
-		 * We are enabling this jump label. If it is not a nop
-		 * then something must have gone wrong.
-		 */
-		if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0))
-			bug_at((void *)entry->code, __LINE__);
+		if (init) {
+			/*
+			 * Jump label is enabled for the first time.
+			 * So we expect a default_nop...
+			 */
+			if (unlikely(memcmp((void *)entry->code, default_nop, 5)
+				     != 0))
+				bug_at((void *)entry->code, __LINE__);
+		} else {
+			/*
+			 * ...otherwise expect an ideal_nop. Otherwise
+			 * something went horribly wrong.
+			 */
+			if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
+				     != 0))
+				bug_at((void *)entry->code, __LINE__);
+		}
 
 		code.jump = 0xe9;
 		code.offset = entry->target -
@@ -63,7 +75,6 @@
 		 * are converting the default nop to the ideal nop.
 		 */
 		if (init) {
-			const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
 			if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
 				bug_at((void *)entry->code, __LINE__);
 		} else {
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b2046e4..6dd802c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -464,7 +464,7 @@
 
 static void __init kvm_apf_trap_init(void)
 {
-	set_intr_gate(14, &async_page_fault);
+	set_intr_gate(14, async_page_fault);
 }
 
 void __init kvm_guest_init(void)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1570e07..e604109 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -139,6 +139,7 @@
 	src = &hv_clock[cpu].pvti;
 	if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
 		src->flags &= ~PVCLOCK_GUEST_STOPPED;
+		pvclock_touch_watchdogs();
 		ret = true;
 	}
 
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index af99f71..c3d4cc9 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -431,7 +431,7 @@
 		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
 
 	if (request_firmware(&fw, (const char *)fw_name, device)) {
-		pr_err("failed to load file %s\n", fw_name);
+		pr_debug("failed to load file %s\n", fw_name);
 		goto out;
 	}
 
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 216a4d7..18be189 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -49,7 +49,7 @@
 		return NULL;
 	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
 				GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
-				-1, __builtin_return_address(0));
+				NUMA_NO_NODE, __builtin_return_address(0));
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index c2ec1aa..6f1236c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -153,7 +153,7 @@
 		childregs->orig_ax = -1;
 		childregs->cs = __KERNEL_CS | get_kernel_rpl();
 		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
-		p->fpu_counter = 0;
+		p->thread.fpu_counter = 0;
 		p->thread.io_bitmap_ptr = NULL;
 		memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 		return 0;
@@ -166,7 +166,7 @@
 	p->thread.ip = (unsigned long) ret_from_fork;
 	task_user_gs(p) = get_user_gs(current_pt_regs());
 
-	p->fpu_counter = 0;
+	p->thread.fpu_counter = 0;
 	p->thread.io_bitmap_ptr = NULL;
 	tsk = current;
 	err = -ENOMEM;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 45ab4d6..9c0280f 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -63,7 +63,7 @@
 	unsigned int ds, cs, es;
 
 	printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
-	printk_address(regs->ip, 1);
+	printk_address(regs->ip);
 	printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss,
 			regs->sp, regs->flags);
 	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
@@ -163,7 +163,7 @@
 	p->thread.sp = (unsigned long) childregs;
 	p->thread.usersp = me->thread.usersp;
 	set_tsk_thread_flag(p, TIF_FORK);
-	p->fpu_counter = 0;
+	p->thread.fpu_counter = 0;
 	p->thread.io_bitmap_ptr = NULL;
 
 	savesegment(gs, p->thread.gsindex);
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index a16bae3..2f355d2 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -43,6 +43,14 @@
 	return pv_tsc_khz;
 }
 
+void pvclock_touch_watchdogs(void)
+{
+	touch_softlockup_watchdog_sync();
+	clocksource_touch_watchdog();
+	rcu_cpu_stall_reset();
+	reset_hung_task_detector();
+}
+
 static atomic64_t last_value = ATOMIC64_INIT(0);
 
 void pvclock_resume(void)
@@ -74,6 +82,11 @@
 		version = __pvclock_read_cycles(src, &ret, &flags);
 	} while ((src->version & 1) || version != src->version);
 
+	if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
+		src->flags &= ~PVCLOCK_GUEST_STOPPED;
+		pvclock_touch_watchdogs();
+	}
+
 	if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) &&
 		(flags & PVCLOCK_TSC_STABLE_BIT))
 		return ret;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 918d489..cb233bc 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1121,8 +1121,6 @@
 	acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
 #endif
 
-	reserve_crashkernel();
-
 	vsmp_init();
 
 	io_delay_init();
@@ -1135,6 +1133,13 @@
 	early_acpi_boot_init();
 
 	initmem_init();
+
+	/*
+	 * Reserve memory for crash kernel after SRAT is parsed so that it
+	 * won't consume hotpluggable memory.
+	 */
+	reserve_crashkernel();
+
 	memblock_find_dma_reserve();
 
 #ifdef CONFIG_KVM_GUEST
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2a16558..85dc05a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -81,27 +81,6 @@
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * We need this for trampoline_base protection from concurrent accesses when
- * off- and onlining cores wildly.
- */
-static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock(void)
-{
-	mutex_lock(&x86_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock(void)
-{
-	mutex_unlock(&x86_cpu_hotplug_driver_mutex);
-}
-
-ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
-ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
-#endif
-
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
 EXPORT_SYMBOL(smp_num_siblings);
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 6e60b5f..649b010 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -65,29 +65,32 @@
 	if (!cpu_is_hotpluggable(cpu))
 		return -EINVAL;
 
-	cpu_hotplug_driver_lock();
+	lock_device_hotplug();
 
 	switch (action) {
 	case 0:
 		ret = cpu_down(cpu);
 		if (!ret) {
 			pr_info("CPU %u is now offline\n", cpu);
+			dev->offline = true;
 			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
 		} else
 			pr_debug("Can't offline CPU%d.\n", cpu);
 		break;
 	case 1:
 		ret = cpu_up(cpu);
-		if (!ret)
+		if (!ret) {
+			dev->offline = false;
 			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
-		else
+		} else {
 			pr_debug("Can't online CPU%d.\n", cpu);
+		}
 		break;
 	default:
 		ret = -EINVAL;
 	}
 
-	cpu_hotplug_driver_unlock();
+	unlock_device_hotplug();
 
 	return ret;
 }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 729aa77..b857ed8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -653,7 +653,7 @@
 		return;
 	}
 
-	tsk->fpu_counter++;
+	tsk->thread.fpu_counter++;
 }
 EXPORT_SYMBOL_GPL(math_state_restore);
 
@@ -713,7 +713,7 @@
 	/* int3 can be called from all */
 	set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
 #ifdef CONFIG_X86_32
-	set_intr_gate(X86_TRAP_PF, &page_fault);
+	set_intr_gate(X86_TRAP_PF, page_fault);
 #endif
 	load_idt(&idt_descr);
 }
@@ -721,7 +721,7 @@
 void __init early_trap_pf_init(void)
 {
 #ifdef CONFIG_X86_64
-	set_intr_gate(X86_TRAP_PF, &page_fault);
+	set_intr_gate(X86_TRAP_PF, page_fault);
 #endif
 }
 
@@ -737,30 +737,30 @@
 	early_iounmap(p, 4);
 #endif
 
-	set_intr_gate(X86_TRAP_DE, &divide_error);
+	set_intr_gate(X86_TRAP_DE, divide_error);
 	set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
 	/* int4 can be called from all */
 	set_system_intr_gate(X86_TRAP_OF, &overflow);
-	set_intr_gate(X86_TRAP_BR, &bounds);
-	set_intr_gate(X86_TRAP_UD, &invalid_op);
-	set_intr_gate(X86_TRAP_NM, &device_not_available);
+	set_intr_gate(X86_TRAP_BR, bounds);
+	set_intr_gate(X86_TRAP_UD, invalid_op);
+	set_intr_gate(X86_TRAP_NM, device_not_available);
 #ifdef CONFIG_X86_32
 	set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
 #else
 	set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
 #endif
-	set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
-	set_intr_gate(X86_TRAP_TS, &invalid_TSS);
-	set_intr_gate(X86_TRAP_NP, &segment_not_present);
+	set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
+	set_intr_gate(X86_TRAP_TS, invalid_TSS);
+	set_intr_gate(X86_TRAP_NP, segment_not_present);
 	set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
-	set_intr_gate(X86_TRAP_GP, &general_protection);
-	set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
-	set_intr_gate(X86_TRAP_MF, &coprocessor_error);
-	set_intr_gate(X86_TRAP_AC, &alignment_check);
+	set_intr_gate(X86_TRAP_GP, general_protection);
+	set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
+	set_intr_gate(X86_TRAP_MF, coprocessor_error);
+	set_intr_gate(X86_TRAP_AC, alignment_check);
 #ifdef CONFIG_X86_MCE
 	set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
 #endif
-	set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
+	set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
 
 	/* Reserve all the builtin and the syscall vector: */
 	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 8ce0072..021783b 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -116,6 +116,8 @@
 	.teardown_msi_irqs	= default_teardown_msi_irqs,
 	.restore_msi_irqs	= default_restore_msi_irqs,
 	.setup_hpet_msi		= default_setup_hpet_msi,
+	.msi_mask_irq		= default_msi_mask_irq,
+	.msix_mask_irq		= default_msix_mask_irq,
 };
 
 /* MSI arch specific hooks */
@@ -138,6 +140,14 @@
 {
 	x86_msi.restore_msi_irqs(dev, irq);
 }
+u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+	return x86_msi.msi_mask_irq(desc, mask, flag);
+}
+u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+	return x86_msi.msix_mask_irq(desc, flag);
+}
 #endif
 
 struct x86_io_apic_ops x86_io_apic_ops = {
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index a47a3e5..b89c5db 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -38,6 +38,7 @@
 	select PERF_EVENTS
 	select HAVE_KVM_MSI
 	select HAVE_KVM_CPU_RELAX_INTERCEPT
+	select KVM_VFIO
 	---help---
 	  Support hosting fully virtualized guest machines using hardware
 	  virtualization extensions.  You will need a fairly recent
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index bf4fb04..25d22b2 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -9,7 +9,7 @@
 
 kvm-y			+= $(KVM)/kvm_main.o $(KVM)/ioapic.o \
 				$(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o \
-				$(KVM)/eventfd.o $(KVM)/irqchip.o
+				$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o
 kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)	+= $(KVM)/assigned-dev.o $(KVM)/iommu.o
 kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(KVM)/async_pf.o
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index b110fe6..c697625 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -23,6 +23,26 @@
 #include "mmu.h"
 #include "trace.h"
 
+static u32 xstate_required_size(u64 xstate_bv)
+{
+	int feature_bit = 0;
+	u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+
+	xstate_bv &= ~XSTATE_FPSSE;
+	while (xstate_bv) {
+		if (xstate_bv & 0x1) {
+		        u32 eax, ebx, ecx, edx;
+		        cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
+			ret = max(ret, eax + ebx);
+		}
+
+		xstate_bv >>= 1;
+		feature_bit++;
+	}
+
+	return ret;
+}
+
 void kvm_update_cpuid(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
@@ -46,6 +66,18 @@
 			apic->lapic_timer.timer_mode_mask = 1 << 17;
 	}
 
+	best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
+	if (!best) {
+		vcpu->arch.guest_supported_xcr0 = 0;
+		vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+	} else {
+		vcpu->arch.guest_supported_xcr0 =
+			(best->eax | ((u64)best->edx << 32)) &
+			host_xcr0 & KVM_SUPPORTED_XCR0;
+		vcpu->arch.guest_xstate_size =
+			xstate_required_size(vcpu->arch.guest_supported_xcr0);
+	}
+
 	kvm_pmu_cpuid_update(vcpu);
 }
 
@@ -182,13 +214,35 @@
 {
 	u64 mask = ((u64)1 << bit);
 
-	return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
+	return mask & KVM_SUPPORTED_XCR0 & host_xcr0;
 }
 
 #define F(x) bit(X86_FEATURE_##x)
 
-static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
-			 u32 index, int *nent, int maxnent)
+static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
+				   u32 func, u32 index, int *nent, int maxnent)
+{
+	switch (func) {
+	case 0:
+		entry->eax = 1;		/* only one leaf currently */
+		++*nent;
+		break;
+	case 1:
+		entry->ecx = F(MOVBE);
+		++*nent;
+		break;
+	default:
+		break;
+	}
+
+	entry->function = func;
+	entry->index = index;
+
+	return 0;
+}
+
+static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+				 u32 index, int *nent, int maxnent)
 {
 	int r;
 	unsigned f_nx = is_efer_nx() ? F(NX) : 0;
@@ -383,6 +437,8 @@
 	case 0xd: {
 		int idx, i;
 
+		entry->eax &= host_xcr0 & KVM_SUPPORTED_XCR0;
+		entry->edx &= (host_xcr0 & KVM_SUPPORTED_XCR0) >> 32;
 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
 		for (idx = 1, i = 1; idx < 64; ++idx) {
 			if (*nent >= maxnent)
@@ -481,6 +537,15 @@
 	return r;
 }
 
+static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
+			u32 idx, int *nent, int maxnent, unsigned int type)
+{
+	if (type == KVM_GET_EMULATED_CPUID)
+		return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
+
+	return __do_cpuid_ent(entry, func, idx, nent, maxnent);
+}
+
 #undef F
 
 struct kvm_cpuid_param {
@@ -495,8 +560,36 @@
 	return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
 }
 
-int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
-				      struct kvm_cpuid_entry2 __user *entries)
+static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
+				 __u32 num_entries, unsigned int ioctl_type)
+{
+	int i;
+	__u32 pad[3];
+
+	if (ioctl_type != KVM_GET_EMULATED_CPUID)
+		return false;
+
+	/*
+	 * We want to make sure that ->padding is being passed clean from
+	 * userspace in case we want to use it for something in the future.
+	 *
+	 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
+	 * have to give ourselves satisfied only with the emulated side. /me
+	 * sheds a tear.
+	 */
+	for (i = 0; i < num_entries; i++) {
+		if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
+			return true;
+
+		if (pad[0] || pad[1] || pad[2])
+			return true;
+	}
+	return false;
+}
+
+int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
+			    struct kvm_cpuid_entry2 __user *entries,
+			    unsigned int type)
 {
 	struct kvm_cpuid_entry2 *cpuid_entries;
 	int limit, nent = 0, r = -E2BIG, i;
@@ -513,8 +606,12 @@
 		goto out;
 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
 		cpuid->nent = KVM_MAX_CPUID_ENTRIES;
+
+	if (sanity_check_entries(entries, cpuid->nent, type))
+		return -EINVAL;
+
 	r = -ENOMEM;
-	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
+	cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
 	if (!cpuid_entries)
 		goto out;
 
@@ -526,7 +623,7 @@
 			continue;
 
 		r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
-				&nent, cpuid->nent);
+				&nent, cpuid->nent, type);
 
 		if (r)
 			goto out_free;
@@ -537,7 +634,7 @@
 		limit = cpuid_entries[nent - 1].eax;
 		for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
 			r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
-				     &nent, cpuid->nent);
+				     &nent, cpuid->nent, type);
 
 		if (r)
 			goto out_free;
@@ -661,6 +758,7 @@
 		*edx = best->edx;
 	} else
 		*eax = *ebx = *ecx = *edx = 0;
+	trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx);
 }
 EXPORT_SYMBOL_GPL(kvm_cpuid);
 
@@ -676,6 +774,5 @@
 	kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
 	kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
 	kvm_x86_ops->skip_emulated_instruction(vcpu);
-	trace_kvm_cpuid(function, eax, ebx, ecx, edx);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index b7fd079..f1e48951 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -6,8 +6,9 @@
 void kvm_update_cpuid(struct kvm_vcpu *vcpu);
 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
 					      u32 function, u32 index);
-int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
-				      struct kvm_cpuid_entry2 __user *entries);
+int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
+			    struct kvm_cpuid_entry2 __user *entries,
+			    unsigned int type);
 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 			     struct kvm_cpuid *cpuid,
 			     struct kvm_cpuid_entry __user *entries);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index ddc3f3d..07ffca0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -130,7 +130,7 @@
 #define Mov         (1<<20)
 /* Misc flags */
 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
-#define VendorSpecific (1<<22) /* Vendor specific instruction */
+#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
 #define Undefined   (1<<25) /* No Such Instruction */
@@ -785,9 +785,10 @@
  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
  */
 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
-			     int highbyte_regs)
+			     int byteop)
 {
 	void *p;
+	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
 
 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
@@ -1024,7 +1025,6 @@
 				    struct operand *op)
 {
 	unsigned reg = ctxt->modrm_reg;
-	int highbyte_regs = ctxt->rex_prefix == 0;
 
 	if (!(ctxt->d & ModRM))
 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
@@ -1045,13 +1045,9 @@
 	}
 
 	op->type = OP_REG;
-	if (ctxt->d & ByteOp) {
-		op->addr.reg = decode_register(ctxt, reg, highbyte_regs);
-		op->bytes = 1;
-	} else {
-		op->addr.reg = decode_register(ctxt, reg, 0);
-		op->bytes = ctxt->op_bytes;
-	}
+	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
+
 	fetch_register_operand(op);
 	op->orig_val = op->val;
 }
@@ -1082,12 +1078,10 @@
 	ctxt->modrm_seg = VCPU_SREG_DS;
 
 	if (ctxt->modrm_mod == 3) {
-		int highbyte_regs = ctxt->rex_prefix == 0;
-
 		op->type = OP_REG;
 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
 		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
-					       highbyte_regs && (ctxt->d & ByteOp));
+				ctxt->d & ByteOp);
 		if (ctxt->d & Sse) {
 			op->type = OP_XMM;
 			op->bytes = 16;
@@ -2961,6 +2955,46 @@
 	return X86EMUL_CONTINUE;
 }
 
+#define FFL(x) bit(X86_FEATURE_##x)
+
+static int em_movbe(struct x86_emulate_ctxt *ctxt)
+{
+	u32 ebx, ecx, edx, eax = 1;
+	u16 tmp;
+
+	/*
+	 * Check MOVBE is set in the guest-visible CPUID leaf.
+	 */
+	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
+	if (!(ecx & FFL(MOVBE)))
+		return emulate_ud(ctxt);
+
+	switch (ctxt->op_bytes) {
+	case 2:
+		/*
+		 * From MOVBE definition: "...When the operand size is 16 bits,
+		 * the upper word of the destination register remains unchanged
+		 * ..."
+		 *
+		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
+		 * rules so we have to do the operation almost per hand.
+		 */
+		tmp = (u16)ctxt->src.val;
+		ctxt->dst.val &= ~0xffffUL;
+		ctxt->dst.val |= (unsigned long)swab16(tmp);
+		break;
+	case 4:
+		ctxt->dst.val = swab32((u32)ctxt->src.val);
+		break;
+	case 8:
+		ctxt->dst.val = swab64(ctxt->src.val);
+		break;
+	default:
+		return X86EMUL_PROPAGATE_FAULT;
+	}
+	return X86EMUL_CONTINUE;
+}
+
 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
 {
 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
@@ -3256,6 +3290,18 @@
 	return X86EMUL_CONTINUE;
 }
 
+static int em_sahf(struct x86_emulate_ctxt *ctxt)
+{
+	u32 flags;
+
+	flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
+	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
+
+	ctxt->eflags &= ~0xffUL;
+	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
+	return X86EMUL_CONTINUE;
+}
+
 static int em_lahf(struct x86_emulate_ctxt *ctxt)
 {
 	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
@@ -3502,7 +3548,7 @@
 
 static const struct opcode group7_rm3[] = {
 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
-	II(SrcNone  | Prot | VendorSpecific,	em_vmmcall,	vmmcall),
+	II(SrcNone  | Prot | EmulateOnUD,	em_vmmcall,	vmmcall),
 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
@@ -3587,7 +3633,7 @@
 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
 }, {
-	I(SrcNone | Priv | VendorSpecific,	em_vmcall),
+	I(SrcNone | Priv | EmulateOnUD,	em_vmcall),
 	EXT(0, group7_rm1),
 	N, EXT(0, group7_rm3),
 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
@@ -3750,7 +3796,8 @@
 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
 	I(SrcImmFAddr | No64, em_call_far), N,
 	II(ImplicitOps | Stack, em_pushf, pushf),
-	II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf),
+	II(ImplicitOps | Stack, em_popf, popf),
+	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
 	/* 0xA0 - 0xA7 */
 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
@@ -3810,7 +3857,7 @@
 static const struct opcode twobyte_table[256] = {
 	/* 0x00 - 0x0F */
 	G(0, group6), GD(0, &group7), N, N,
-	N, I(ImplicitOps | VendorSpecific, em_syscall),
+	N, I(ImplicitOps | EmulateOnUD, em_syscall),
 	II(ImplicitOps | Priv, em_clts, clts), N,
 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
 	N, D(ImplicitOps | ModRM), N, N,
@@ -3830,8 +3877,8 @@
 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
-	I(ImplicitOps | VendorSpecific, em_sysenter),
-	I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
+	I(ImplicitOps | EmulateOnUD, em_sysenter),
+	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
 	N, N,
 	N, N, N, N, N, N, N, N,
 	/* 0x40 - 0x4F */
@@ -3892,6 +3939,30 @@
 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
 };
 
+static const struct gprefix three_byte_0f_38_f0 = {
+	I(DstReg | SrcMem | Mov, em_movbe), N, N, N
+};
+
+static const struct gprefix three_byte_0f_38_f1 = {
+	I(DstMem | SrcReg | Mov, em_movbe), N, N, N
+};
+
+/*
+ * Insns below are selected by the prefix which indexed by the third opcode
+ * byte.
+ */
+static const struct opcode opcode_map_0f_38[256] = {
+	/* 0x00 - 0x7f */
+	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
+	/* 0x80 - 0xef */
+	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
+	/* 0xf0 - 0xf1 */
+	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
+	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
+	/* 0xf2 - 0xff */
+	N, N, X4(N), X8(N)
+};
+
 #undef D
 #undef N
 #undef G
@@ -4040,7 +4111,8 @@
 	case OpMem8:
 		ctxt->memop.bytes = 1;
 		if (ctxt->memop.type == OP_REG) {
-			ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1);
+			ctxt->memop.addr.reg = decode_register(ctxt,
+					ctxt->modrm_rm, true);
 			fetch_register_operand(&ctxt->memop);
 		}
 		goto mem_common;
@@ -4126,6 +4198,7 @@
 	ctxt->_eip = ctxt->eip;
 	ctxt->fetch.start = ctxt->_eip;
 	ctxt->fetch.end = ctxt->fetch.start + insn_len;
+	ctxt->opcode_len = 1;
 	if (insn_len > 0)
 		memcpy(ctxt->fetch.data, insn, insn_len);
 
@@ -4208,9 +4281,16 @@
 	opcode = opcode_table[ctxt->b];
 	/* Two-byte opcode? */
 	if (ctxt->b == 0x0f) {
-		ctxt->twobyte = 1;
+		ctxt->opcode_len = 2;
 		ctxt->b = insn_fetch(u8, ctxt);
 		opcode = twobyte_table[ctxt->b];
+
+		/* 0F_38 opcode map */
+		if (ctxt->b == 0x38) {
+			ctxt->opcode_len = 3;
+			ctxt->b = insn_fetch(u8, ctxt);
+			opcode = opcode_map_0f_38[ctxt->b];
+		}
 	}
 	ctxt->d = opcode.flags;
 
@@ -4267,7 +4347,7 @@
 	if (ctxt->d == 0 || (ctxt->d & NotImpl))
 		return EMULATION_FAILED;
 
-	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
+	if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
 		return EMULATION_FAILED;
 
 	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
@@ -4540,8 +4620,10 @@
 		goto writeback;
 	}
 
-	if (ctxt->twobyte)
+	if (ctxt->opcode_len == 2)
 		goto twobyte_insn;
+	else if (ctxt->opcode_len == 3)
+		goto threebyte_insn;
 
 	switch (ctxt->b) {
 	case 0x63:		/* movsxd */
@@ -4726,6 +4808,8 @@
 		goto cannot_emulate;
 	}
 
+threebyte_insn:
+
 	if (rc != X86EMUL_CONTINUE)
 		goto done;
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dce0df8..40772ef 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2570,11 +2570,6 @@
 	kvm_release_pfn_clean(pfn);
 }
 
-static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
-{
-	mmu_free_roots(vcpu);
-}
-
 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
 				     bool no_dirty_log)
 {
@@ -3424,18 +3419,11 @@
 	return 0;
 }
 
-static void nonpaging_free(struct kvm_vcpu *vcpu)
+static void nonpaging_init_context(struct kvm_vcpu *vcpu,
+				   struct kvm_mmu *context)
 {
-	mmu_free_roots(vcpu);
-}
-
-static int nonpaging_init_context(struct kvm_vcpu *vcpu,
-				  struct kvm_mmu *context)
-{
-	context->new_cr3 = nonpaging_new_cr3;
 	context->page_fault = nonpaging_page_fault;
 	context->gva_to_gpa = nonpaging_gva_to_gpa;
-	context->free = nonpaging_free;
 	context->sync_page = nonpaging_sync_page;
 	context->invlpg = nonpaging_invlpg;
 	context->update_pte = nonpaging_update_pte;
@@ -3444,7 +3432,6 @@
 	context->root_hpa = INVALID_PAGE;
 	context->direct_map = true;
 	context->nx = false;
-	return 0;
 }
 
 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
@@ -3454,9 +3441,8 @@
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
 
-static void paging_new_cr3(struct kvm_vcpu *vcpu)
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
 {
-	pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
 	mmu_free_roots(vcpu);
 }
 
@@ -3471,11 +3457,6 @@
 	vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
-static void paging_free(struct kvm_vcpu *vcpu)
-{
-	nonpaging_free(vcpu);
-}
-
 static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
 			   unsigned access, int *nr_present)
 {
@@ -3665,9 +3646,9 @@
 	mmu->last_pte_bitmap = map;
 }
 
-static int paging64_init_context_common(struct kvm_vcpu *vcpu,
-					struct kvm_mmu *context,
-					int level)
+static void paging64_init_context_common(struct kvm_vcpu *vcpu,
+					 struct kvm_mmu *context,
+					 int level)
 {
 	context->nx = is_nx(vcpu);
 	context->root_level = level;
@@ -3677,27 +3658,24 @@
 	update_last_pte_bitmap(vcpu, context);
 
 	ASSERT(is_pae(vcpu));
-	context->new_cr3 = paging_new_cr3;
 	context->page_fault = paging64_page_fault;
 	context->gva_to_gpa = paging64_gva_to_gpa;
 	context->sync_page = paging64_sync_page;
 	context->invlpg = paging64_invlpg;
 	context->update_pte = paging64_update_pte;
-	context->free = paging_free;
 	context->shadow_root_level = level;
 	context->root_hpa = INVALID_PAGE;
 	context->direct_map = false;
-	return 0;
 }
 
-static int paging64_init_context(struct kvm_vcpu *vcpu,
-				 struct kvm_mmu *context)
+static void paging64_init_context(struct kvm_vcpu *vcpu,
+				  struct kvm_mmu *context)
 {
-	return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
+	paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
 }
 
-static int paging32_init_context(struct kvm_vcpu *vcpu,
-				 struct kvm_mmu *context)
+static void paging32_init_context(struct kvm_vcpu *vcpu,
+				  struct kvm_mmu *context)
 {
 	context->nx = false;
 	context->root_level = PT32_ROOT_LEVEL;
@@ -3706,33 +3684,28 @@
 	update_permission_bitmask(vcpu, context, false);
 	update_last_pte_bitmap(vcpu, context);
 
-	context->new_cr3 = paging_new_cr3;
 	context->page_fault = paging32_page_fault;
 	context->gva_to_gpa = paging32_gva_to_gpa;
-	context->free = paging_free;
 	context->sync_page = paging32_sync_page;
 	context->invlpg = paging32_invlpg;
 	context->update_pte = paging32_update_pte;
 	context->shadow_root_level = PT32E_ROOT_LEVEL;
 	context->root_hpa = INVALID_PAGE;
 	context->direct_map = false;
-	return 0;
 }
 
-static int paging32E_init_context(struct kvm_vcpu *vcpu,
-				  struct kvm_mmu *context)
+static void paging32E_init_context(struct kvm_vcpu *vcpu,
+				   struct kvm_mmu *context)
 {
-	return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
+	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
 }
 
-static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu *context = vcpu->arch.walk_mmu;
 
 	context->base_role.word = 0;
-	context->new_cr3 = nonpaging_new_cr3;
 	context->page_fault = tdp_page_fault;
-	context->free = nonpaging_free;
 	context->sync_page = nonpaging_sync_page;
 	context->invlpg = nonpaging_invlpg;
 	context->update_pte = nonpaging_update_pte;
@@ -3767,37 +3740,32 @@
 
 	update_permission_bitmask(vcpu, context, false);
 	update_last_pte_bitmap(vcpu, context);
-
-	return 0;
 }
 
-int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
 {
-	int r;
 	bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
 	ASSERT(vcpu);
 	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
 	if (!is_paging(vcpu))
-		r = nonpaging_init_context(vcpu, context);
+		nonpaging_init_context(vcpu, context);
 	else if (is_long_mode(vcpu))
-		r = paging64_init_context(vcpu, context);
+		paging64_init_context(vcpu, context);
 	else if (is_pae(vcpu))
-		r = paging32E_init_context(vcpu, context);
+		paging32E_init_context(vcpu, context);
 	else
-		r = paging32_init_context(vcpu, context);
+		paging32_init_context(vcpu, context);
 
 	vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
 	vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
 	vcpu->arch.mmu.base_role.cr0_wp  = is_write_protection(vcpu);
 	vcpu->arch.mmu.base_role.smep_andnot_wp
 		= smep && !is_write_protection(vcpu);
-
-	return r;
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
 
-int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
 		bool execonly)
 {
 	ASSERT(vcpu);
@@ -3806,37 +3774,30 @@
 	context->shadow_root_level = kvm_x86_ops->get_tdp_level();
 
 	context->nx = true;
-	context->new_cr3 = paging_new_cr3;
 	context->page_fault = ept_page_fault;
 	context->gva_to_gpa = ept_gva_to_gpa;
 	context->sync_page = ept_sync_page;
 	context->invlpg = ept_invlpg;
 	context->update_pte = ept_update_pte;
-	context->free = paging_free;
 	context->root_level = context->shadow_root_level;
 	context->root_hpa = INVALID_PAGE;
 	context->direct_map = false;
 
 	update_permission_bitmask(vcpu, context, true);
 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
-
-	return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
 
-static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
+static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
 {
-	int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
-
+	kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
 	vcpu->arch.walk_mmu->set_cr3           = kvm_x86_ops->set_cr3;
 	vcpu->arch.walk_mmu->get_cr3           = get_cr3;
 	vcpu->arch.walk_mmu->get_pdptr         = kvm_pdptr_read;
 	vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
-
-	return r;
 }
 
-static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
 
@@ -3873,11 +3834,9 @@
 
 	update_permission_bitmask(vcpu, g_context, false);
 	update_last_pte_bitmap(vcpu, g_context);
-
-	return 0;
 }
 
-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_mmu(struct kvm_vcpu *vcpu)
 {
 	if (mmu_is_nested(vcpu))
 		return init_kvm_nested_mmu(vcpu);
@@ -3887,18 +3846,12 @@
 		return init_kvm_softmmu(vcpu);
 }
 
-static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
+void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
 {
 	ASSERT(vcpu);
-	if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
-		/* mmu.free() should set root_hpa = INVALID_PAGE */
-		vcpu->arch.mmu.free(vcpu);
-}
 
-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
-{
-	destroy_kvm_mmu(vcpu);
-	return init_kvm_mmu(vcpu);
+	kvm_mmu_unload(vcpu);
+	init_kvm_mmu(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
 
@@ -3923,6 +3876,7 @@
 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
 {
 	mmu_free_roots(vcpu);
+	WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
 
@@ -4281,12 +4235,12 @@
 	return alloc_mmu_pages(vcpu);
 }
 
-int kvm_mmu_setup(struct kvm_vcpu *vcpu)
+void kvm_mmu_setup(struct kvm_vcpu *vcpu)
 {
 	ASSERT(vcpu);
 	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
-	return init_kvm_mmu(vcpu);
+	init_kvm_mmu(vcpu);
 }
 
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
@@ -4428,7 +4382,7 @@
 	int nr_to_scan = sc->nr_to_scan;
 	unsigned long freed = 0;
 
-	raw_spin_lock(&kvm_lock);
+	spin_lock(&kvm_lock);
 
 	list_for_each_entry(kvm, &vm_list, vm_list) {
 		int idx;
@@ -4478,9 +4432,8 @@
 		break;
 	}
 
-	raw_spin_unlock(&kvm_lock);
+	spin_unlock(&kvm_lock);
 	return freed;
-
 }
 
 static unsigned long
@@ -4574,7 +4527,7 @@
 {
 	ASSERT(vcpu);
 
-	destroy_kvm_mmu(vcpu);
+	kvm_mmu_unload(vcpu);
 	free_mmu_pages(vcpu);
 	mmu_free_memory_caches(vcpu);
 }
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 77e044a..2926152 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -70,8 +70,8 @@
 };
 
 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
-int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
-int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
 		bool execonly);
 
 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c0bc803..c7168a5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1959,11 +1959,9 @@
 	nested_svm_vmexit(svm);
 }
 
-static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
+static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
 {
-	int r;
-
-	r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
+	kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
 
 	vcpu->arch.mmu.set_cr3           = nested_svm_set_tdp_cr3;
 	vcpu->arch.mmu.get_cr3           = nested_svm_get_tdp_cr3;
@@ -1971,8 +1969,6 @@
 	vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
 	vcpu->arch.mmu.shadow_root_level = get_npt_level();
 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
-
-	return r;
 }
 
 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2b2fce1..b2fe1c2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1498,7 +1498,7 @@
 			break;
 
 	if (i == NR_AUTOLOAD_MSRS) {
-		printk_once(KERN_WARNING"Not enough mst switch entries. "
+		printk_once(KERN_WARNING "Not enough msr switch entries. "
 				"Can't add msr %x\n", msr);
 		return;
 	} else if (i == m->nr) {
@@ -1898,16 +1898,12 @@
 /*
  * KVM wants to inject page-faults which it got to the guest. This function
  * checks whether in a nested guest, we need to inject them to L1 or L2.
- * This function assumes it is called with the exit reason in vmcs02 being
- * a #PF exception (this is the only case in which KVM injects a #PF when L2
- * is running).
  */
-static int nested_pf_handled(struct kvm_vcpu *vcpu)
+static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
 {
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 
-	/* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
-	if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
+	if (!(vmcs12->exception_bitmap & (1u << nr)))
 		return 0;
 
 	nested_vmx_vmexit(vcpu);
@@ -1921,8 +1917,8 @@
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 intr_info = nr | INTR_INFO_VALID_MASK;
 
-	if (nr == PF_VECTOR && is_guest_mode(vcpu) &&
-	    !vmx->nested.nested_run_pending && nested_pf_handled(vcpu))
+	if (!reinject && is_guest_mode(vcpu) &&
+	    nested_vmx_check_exception(vcpu, nr))
 		return;
 
 	if (has_error_code) {
@@ -2204,9 +2200,15 @@
 #ifdef CONFIG_X86_64
 		VM_EXIT_HOST_ADDR_SPACE_SIZE |
 #endif
-		VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
+		VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
+		VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
+	if (!(nested_vmx_pinbased_ctls_high & PIN_BASED_VMX_PREEMPTION_TIMER) ||
+	    !(nested_vmx_exit_ctls_high & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)) {
+		nested_vmx_exit_ctls_high &= ~VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
+		nested_vmx_pinbased_ctls_high &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+	}
 	nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
-				      VM_EXIT_LOAD_IA32_EFER);
+		VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER);
 
 	/* entry controls */
 	rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
@@ -2226,7 +2228,8 @@
 		nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
 	nested_vmx_procbased_ctls_low = 0;
 	nested_vmx_procbased_ctls_high &=
-		CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
+		CPU_BASED_VIRTUAL_INTR_PENDING |
+		CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
 		CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
 		CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
 		CPU_BASED_CR3_STORE_EXITING |
@@ -2252,13 +2255,15 @@
 	nested_vmx_secondary_ctls_low = 0;
 	nested_vmx_secondary_ctls_high &=
 		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+		SECONDARY_EXEC_UNRESTRICTED_GUEST |
 		SECONDARY_EXEC_WBINVD_EXITING;
 
 	if (enable_ept) {
 		/* nested EPT: emulate EPT also to L1 */
 		nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
 		nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
-			 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
+			 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
+			 VMX_EPT_INVEPT_BIT;
 		nested_vmx_ept_caps &= vmx_capability.ept;
 		/*
 		 * Since invept is completely emulated we support both global
@@ -3380,8 +3385,10 @@
 	if (enable_ept) {
 		eptp = construct_eptp(cr3);
 		vmcs_write64(EPT_POINTER, eptp);
-		guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
-			vcpu->kvm->arch.ept_identity_map_addr;
+		if (is_paging(vcpu) || is_guest_mode(vcpu))
+			guest_cr3 = kvm_read_cr3(vcpu);
+		else
+			guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr;
 		ept_load_pdptrs(vcpu);
 	}
 
@@ -4879,6 +4886,17 @@
 	hypercall[2] = 0xc1;
 }
 
+static bool nested_cr0_valid(struct vmcs12 *vmcs12, unsigned long val)
+{
+	unsigned long always_on = VMXON_CR0_ALWAYSON;
+
+	if (nested_vmx_secondary_ctls_high &
+		SECONDARY_EXEC_UNRESTRICTED_GUEST &&
+	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
+		always_on &= ~(X86_CR0_PE | X86_CR0_PG);
+	return (val & always_on) == always_on;
+}
+
 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
 {
@@ -4897,9 +4915,7 @@
 		val = (val & ~vmcs12->cr0_guest_host_mask) |
 			(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
 
-		/* TODO: will have to take unrestricted guest mode into
-		 * account */
-		if ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)
+		if (!nested_cr0_valid(vmcs12, val))
 			return 1;
 
 		if (kvm_set_cr0(vcpu, val))
@@ -6627,6 +6643,9 @@
 			return 0;
 		else if (is_page_fault(intr_info))
 			return enable_ept;
+		else if (is_no_device(intr_info) &&
+			 !(nested_read_cr0(vmcs12) & X86_CR0_TS))
+			return 0;
 		return vmcs12->exception_bitmap &
 				(1u << (intr_info & INTR_INFO_VECTOR_MASK));
 	case EXIT_REASON_EXTERNAL_INTERRUPT:
@@ -6722,6 +6741,27 @@
 	*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
 }
 
+static void nested_adjust_preemption_timer(struct kvm_vcpu *vcpu)
+{
+	u64 delta_tsc_l1;
+	u32 preempt_val_l1, preempt_val_l2, preempt_scale;
+
+	if (!(get_vmcs12(vcpu)->pin_based_vm_exec_control &
+			PIN_BASED_VMX_PREEMPTION_TIMER))
+		return;
+	preempt_scale = native_read_msr(MSR_IA32_VMX_MISC) &
+			MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE;
+	preempt_val_l2 = vmcs_read32(VMX_PREEMPTION_TIMER_VALUE);
+	delta_tsc_l1 = vmx_read_l1_tsc(vcpu, native_read_tsc())
+		- vcpu->arch.last_guest_tsc;
+	preempt_val_l1 = delta_tsc_l1 >> preempt_scale;
+	if (preempt_val_l2 <= preempt_val_l1)
+		preempt_val_l2 = 0;
+	else
+		preempt_val_l2 -= preempt_val_l1;
+	vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, preempt_val_l2);
+}
+
 /*
  * The guest has exited.  See if we can fix it or if we need userspace
  * assistance.
@@ -6736,20 +6776,6 @@
 	if (vmx->emulation_required)
 		return handle_invalid_guest_state(vcpu);
 
-	/*
-	 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
-	 * we did not inject a still-pending event to L1 now because of
-	 * nested_run_pending, we need to re-enable this bit.
-	 */
-	if (vmx->nested.nested_run_pending)
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
-
-	if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
-	    exit_reason == EXIT_REASON_VMRESUME))
-		vmx->nested.nested_run_pending = 1;
-	else
-		vmx->nested.nested_run_pending = 0;
-
 	if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
 		nested_vmx_vmexit(vcpu);
 		return 1;
@@ -7061,9 +7087,9 @@
 	case INTR_TYPE_HARD_EXCEPTION:
 		if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
 			u32 err = vmcs_read32(error_code_field);
-			kvm_queue_exception_e(vcpu, vector, err);
+			kvm_requeue_exception_e(vcpu, vector, err);
 		} else
-			kvm_queue_exception(vcpu, vector);
+			kvm_requeue_exception(vcpu, vector);
 		break;
 	case INTR_TYPE_SOFT_INTR:
 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
@@ -7146,6 +7172,8 @@
 	atomic_switch_perf_msrs(vmx);
 	debugctlmsr = get_debugctlmsr();
 
+	if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending)
+		nested_adjust_preemption_timer(vcpu);
 	vmx->__launched = vmx->loaded_vmcs->launched;
 	asm(
 		/* Store host registers */
@@ -7284,6 +7312,16 @@
 	vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
 	trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
 
+	/*
+	 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
+	 * we did not inject a still-pending event to L1 now because of
+	 * nested_run_pending, we need to re-enable this bit.
+	 */
+	if (vmx->nested.nested_run_pending)
+		kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+	vmx->nested.nested_run_pending = 0;
+
 	vmx_complete_atomic_exit(vmx);
 	vmx_recover_nmi_blocking(vmx);
 	vmx_complete_interrupts(vmx);
@@ -7410,8 +7448,7 @@
 	 */
 	if (is_mmio)
 		ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
-	else if (vcpu->kvm->arch.iommu_domain &&
-		!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
+	else if (kvm_arch_has_noncoherent_dma(vcpu->kvm))
 		ret = kvm_get_guest_memory_type(vcpu, gfn) <<
 		      VMX_EPT_MT_EPTE_SHIFT;
 	else
@@ -7501,9 +7538,9 @@
 	return get_vmcs12(vcpu)->ept_pointer;
 }
 
-static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
+static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
 {
-	int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
+	kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
 			nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
 
 	vcpu->arch.mmu.set_cr3           = vmx_set_cr3;
@@ -7511,8 +7548,6 @@
 	vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
 
 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
-
-	return r;
 }
 
 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
@@ -7520,6 +7555,20 @@
 	vcpu->arch.walk_mmu = &vcpu->arch.mmu;
 }
 
+static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
+		struct x86_exception *fault)
+{
+	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+	WARN_ON(!is_guest_mode(vcpu));
+
+	/* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
+	if (vmcs12->exception_bitmap & (1u << PF_VECTOR))
+		nested_vmx_vmexit(vcpu);
+	else
+		kvm_inject_page_fault(vcpu, fault);
+}
+
 /*
  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -7533,6 +7582,7 @@
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 exec_control;
+	u32 exit_control;
 
 	vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
 	vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -7706,7 +7756,10 @@
 	 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
 	 * bits are further modified by vmx_set_efer() below.
 	 */
-	vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
+	exit_control = vmcs_config.vmexit_ctrl;
+	if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
+		exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
+	vmcs_write32(VM_EXIT_CONTROLS, exit_control);
 
 	/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
 	 * emulated by vmx_set_efer(), below.
@@ -7773,6 +7826,9 @@
 	kvm_set_cr3(vcpu, vmcs12->guest_cr3);
 	kvm_mmu_reset_context(vcpu);
 
+	if (!enable_ept)
+		vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
+
 	/*
 	 * L1 may access the L2's PDPTR, so save them to construct vmcs12
 	 */
@@ -7876,7 +7932,7 @@
 		return 1;
 	}
 
-	if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
+	if (!nested_cr0_valid(vmcs12, vmcs12->guest_cr0) ||
 	    ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
 		nested_vmx_entry_failure(vcpu, vmcs12,
 			EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
@@ -7938,6 +7994,8 @@
 
 	enter_guest_mode(vcpu);
 
+	vmx->nested.nested_run_pending = 1;
+
 	vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
 
 	cpu = get_cpu();
@@ -8005,7 +8063,7 @@
 	u32 idt_vectoring;
 	unsigned int nr;
 
-	if (vcpu->arch.exception.pending) {
+	if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) {
 		nr = vcpu->arch.exception.nr;
 		idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
 
@@ -8023,7 +8081,7 @@
 		}
 
 		vmcs12->idt_vectoring_info_field = idt_vectoring;
-	} else if (vcpu->arch.nmi_pending) {
+	} else if (vcpu->arch.nmi_injected) {
 		vmcs12->idt_vectoring_info_field =
 			INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
 	} else if (vcpu->arch.interrupt.pending) {
@@ -8105,6 +8163,11 @@
 	vmcs12->guest_pending_dbg_exceptions =
 		vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
 
+	if ((vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) &&
+	    (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER))
+		vmcs12->vmx_preemption_timer_value =
+			vmcs_read32(VMX_PREEMPTION_TIMER_VALUE);
+
 	/*
 	 * In some cases (usually, nested EPT), L2 is allowed to change its
 	 * own CR3 without exiting. If it has changed it, we must keep it.
@@ -8130,6 +8193,8 @@
 	vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
 	if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
 		vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
+	if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
+		vmcs12->guest_ia32_efer = vcpu->arch.efer;
 	vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
 	vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
 	vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
@@ -8201,7 +8266,7 @@
 	 * fpu_active (which may have changed).
 	 * Note that vmx_set_cr0 refers to efer set above.
 	 */
-	kvm_set_cr0(vcpu, vmcs12->host_cr0);
+	vmx_set_cr0(vcpu, vmcs12->host_cr0);
 	/*
 	 * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
 	 * to apply the same changes to L1's vmcs. We just set cr0 correctly,
@@ -8224,6 +8289,9 @@
 	kvm_set_cr3(vcpu, vmcs12->host_cr3);
 	kvm_mmu_reset_context(vcpu);
 
+	if (!enable_ept)
+		vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+
 	if (enable_vpid) {
 		/*
 		 * Trivially support vpid by letting L2s share their parent
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e5ca72a..21ef1ba 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -577,6 +577,7 @@
 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
 	u64 xcr0;
+	u64 valid_bits;
 
 	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
 	if (index != XCR_XFEATURE_ENABLED_MASK)
@@ -586,8 +587,16 @@
 		return 1;
 	if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
 		return 1;
-	if (xcr0 & ~host_xcr0)
+
+	/*
+	 * Do not allow the guest to set bits that we do not support
+	 * saving.  However, xcr0 bit 0 is always set, even if the
+	 * emulated CPU does not support XSAVE (see fx_init).
+	 */
+	valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP;
+	if (xcr0 & ~valid_bits)
 		return 1;
+
 	kvm_put_guest_xcr0(vcpu);
 	vcpu->arch.xcr0 = xcr0;
 	return 0;
@@ -684,7 +693,7 @@
 
 	vcpu->arch.cr3 = cr3;
 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
-	vcpu->arch.mmu.new_cr3(vcpu);
+	kvm_mmu_new_cr3(vcpu);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr3);
@@ -2564,6 +2573,7 @@
 	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
 	case KVM_CAP_SET_TSS_ADDR:
 	case KVM_CAP_EXT_CPUID:
+	case KVM_CAP_EXT_EMUL_CPUID:
 	case KVM_CAP_CLOCKSOURCE:
 	case KVM_CAP_PIT:
 	case KVM_CAP_NOP_IO_DELAY:
@@ -2673,15 +2683,17 @@
 		r = 0;
 		break;
 	}
-	case KVM_GET_SUPPORTED_CPUID: {
+	case KVM_GET_SUPPORTED_CPUID:
+	case KVM_GET_EMULATED_CPUID: {
 		struct kvm_cpuid2 __user *cpuid_arg = argp;
 		struct kvm_cpuid2 cpuid;
 
 		r = -EFAULT;
 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
 			goto out;
-		r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
-						      cpuid_arg->entries);
+
+		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
+					    ioctl);
 		if (r)
 			goto out;
 
@@ -2715,8 +2727,7 @@
 
 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
 {
-	return vcpu->kvm->arch.iommu_domain &&
-		!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
+	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
 }
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -2984,11 +2995,13 @@
 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
 					 struct kvm_xsave *guest_xsave)
 {
-	if (cpu_has_xsave)
+	if (cpu_has_xsave) {
 		memcpy(guest_xsave->region,
 			&vcpu->arch.guest_fpu.state->xsave,
-			xstate_size);
-	else {
+			vcpu->arch.guest_xstate_size);
+		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &=
+			vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE;
+	} else {
 		memcpy(guest_xsave->region,
 			&vcpu->arch.guest_fpu.state->fxsave,
 			sizeof(struct i387_fxsave_struct));
@@ -3003,10 +3016,19 @@
 	u64 xstate_bv =
 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
 
-	if (cpu_has_xsave)
+	if (cpu_has_xsave) {
+		/*
+		 * Here we allow setting states that are not present in
+		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
+		 * with old userspace.
+		 */
+		if (xstate_bv & ~KVM_SUPPORTED_XCR0)
+			return -EINVAL;
+		if (xstate_bv & ~host_xcr0)
+			return -EINVAL;
 		memcpy(&vcpu->arch.guest_fpu.state->xsave,
-			guest_xsave->region, xstate_size);
-	else {
+			guest_xsave->region, vcpu->arch.guest_xstate_size);
+	} else {
 		if (xstate_bv & ~XSTATE_FPSSE)
 			return -EINVAL;
 		memcpy(&vcpu->arch.guest_fpu.state->fxsave,
@@ -3042,9 +3064,9 @@
 
 	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
 		/* Only support XCR0 currently */
-		if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
+		if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
 			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
-				guest_xcrs->xcrs[0].value);
+				guest_xcrs->xcrs[i].value);
 			break;
 		}
 	if (r)
@@ -4775,8 +4797,8 @@
 
 static void init_decode_cache(struct x86_emulate_ctxt *ctxt)
 {
-	memset(&ctxt->twobyte, 0,
-	       (void *)&ctxt->_regs - (void *)&ctxt->twobyte);
+	memset(&ctxt->opcode_len, 0,
+	       (void *)&ctxt->_regs - (void *)&ctxt->opcode_len);
 
 	ctxt->fetch.start = 0;
 	ctxt->fetch.end = 0;
@@ -5094,8 +5116,7 @@
 		ctxt->have_exception = false;
 		ctxt->perm_ok = false;
 
-		ctxt->only_vendor_specific_insn
-			= emulation_type & EMULTYPE_TRAP_UD;
+		ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
 
 		r = x86_decode_insn(ctxt, insn, insn_len);
 
@@ -5263,7 +5284,7 @@
 
 	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
 
-	raw_spin_lock(&kvm_lock);
+	spin_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list) {
 		kvm_for_each_vcpu(i, vcpu, kvm) {
 			if (vcpu->cpu != freq->cpu)
@@ -5273,7 +5294,7 @@
 				send_ipi = 1;
 		}
 	}
-	raw_spin_unlock(&kvm_lock);
+	spin_unlock(&kvm_lock);
 
 	if (freq->old < freq->new && send_ipi) {
 		/*
@@ -5426,12 +5447,12 @@
 	struct kvm_vcpu *vcpu;
 	int i;
 
-	raw_spin_lock(&kvm_lock);
+	spin_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list)
 		kvm_for_each_vcpu(i, vcpu, kvm)
 			set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
 	atomic_set(&kvm_guest_has_master_clock, 0);
-	raw_spin_unlock(&kvm_lock);
+	spin_unlock(&kvm_lock);
 }
 
 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
@@ -5945,10 +5966,12 @@
 
 	vcpu->mode = IN_GUEST_MODE;
 
+	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
 	/* We should set ->mode before check ->requests,
 	 * see the comment in make_all_cpus_request.
 	 */
-	smp_mb();
+	smp_mb__after_srcu_read_unlock();
 
 	local_irq_disable();
 
@@ -5958,12 +5981,11 @@
 		smp_wmb();
 		local_irq_enable();
 		preempt_enable();
+		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 		r = 1;
 		goto cancel_injection;
 	}
 
-	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-
 	if (req_immediate_exit)
 		smp_send_reschedule(vcpu->cpu);
 
@@ -6688,7 +6710,7 @@
 	if (r)
 		return r;
 	kvm_vcpu_reset(vcpu);
-	r = kvm_mmu_setup(vcpu);
+	kvm_mmu_setup(vcpu);
 	vcpu_put(vcpu);
 
 	return r;
@@ -6940,6 +6962,10 @@
 
 	vcpu->arch.ia32_tsc_adjust_msr = 0x0;
 	vcpu->arch.pv_time_enabled = false;
+
+	vcpu->arch.guest_supported_xcr0 = 0;
+	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+
 	kvm_async_pf_hash_reset(vcpu);
 	kvm_pmu_init(vcpu);
 
@@ -6981,6 +7007,7 @@
 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
 	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
+	atomic_set(&kvm->arch.noncoherent_dma_count, 0);
 
 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
@@ -7065,7 +7092,7 @@
 	kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
 }
 
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 			   struct kvm_memory_slot *dont)
 {
 	int i;
@@ -7086,7 +7113,8 @@
 	}
 }
 
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			    unsigned long npages)
 {
 	int i;
 
@@ -7283,7 +7311,7 @@
 	int r;
 
 	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
-	      is_error_page(work->page))
+	      work->wakeup_all)
 		return;
 
 	r = kvm_mmu_reload(vcpu);
@@ -7393,7 +7421,7 @@
 	struct x86_exception fault;
 
 	trace_kvm_async_pf_ready(work->arch.token, work->gva);
-	if (is_error_page(work->page))
+	if (work->wakeup_all)
 		work->arch.token = ~0; /* broadcast wakeup */
 	else
 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
@@ -7420,6 +7448,24 @@
 			kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
+void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
+{
+	atomic_inc(&kvm->arch.noncoherent_dma_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
+
+void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
+{
+	atomic_dec(&kvm->arch.noncoherent_dma_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
+
+bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
+{
+	return atomic_read(&kvm->arch.noncoherent_dma_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index e224f7a..587fb9e 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -122,6 +122,7 @@
 	gva_t addr, void *val, unsigned int bytes,
 	struct x86_exception *exception);
 
+#define KVM_SUPPORTED_XCR0	(XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
 extern u64 host_xcr0;
 
 extern struct static_key kvm_no_apic_vcpu;
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index a6b1b86..518532e 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -47,6 +47,21 @@
 }
 EXPORT_SYMBOL(rdmsr_on_cpu);
 
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+	int err;
+	struct msr_info rv;
+
+	memset(&rv, 0, sizeof(rv));
+
+	rv.msr_no = msr_no;
+	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+	*q = rv.reg.q;
+
+	return err;
+}
+EXPORT_SYMBOL(rdmsrl_on_cpu);
+
 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 {
 	int err;
@@ -63,6 +78,22 @@
 }
 EXPORT_SYMBOL(wrmsr_on_cpu);
 
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+	int err;
+	struct msr_info rv;
+
+	memset(&rv, 0, sizeof(rv));
+
+	rv.msr_no = msr_no;
+	rv.reg.q = q;
+
+	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+
+	return err;
+}
+EXPORT_SYMBOL(wrmsrl_on_cpu);
+
 static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
 			    struct msr *msrs,
 			    void (*msr_func) (void *info))
@@ -159,6 +190,37 @@
 }
 EXPORT_SYMBOL(wrmsr_safe_on_cpu);
 
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+	int err;
+	struct msr_info rv;
+
+	memset(&rv, 0, sizeof(rv));
+
+	rv.msr_no = msr_no;
+	rv.reg.q = q;
+
+	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+
+	return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
+
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+	int err;
+	struct msr_info rv;
+
+	memset(&rv, 0, sizeof(rv));
+
+	rv.msr_no = msr_no;
+	err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+	*q = rv.reg.q;
+
+	return err ? err : rv.err;
+}
+EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
+
 /*
  * These variants are significantly slower, but allows control over
  * the entire 32-bit GPR set.
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 23d8e5f..6a19ad9 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -6,6 +6,8 @@
 CFLAGS_physaddr.o		:= $(nostackp)
 CFLAGS_setup_nx.o		:= $(nostackp)
 
+CFLAGS_fault.o := -I$(src)/../include/asm/trace
+
 obj-$(CONFIG_X86_PAT)		+= pat_rbtree.o
 obj-$(CONFIG_SMP)		+= tlb.o
 
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 7a517bb..9ff85bb 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -20,6 +20,9 @@
 #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
 #include <asm/fixmap.h>			/* VSYSCALL_START		*/
 
+#define CREATE_TRACE_POINTS
+#include <asm/trace/exceptions.h>
+
 /*
  * Page fault error code bits:
  *
@@ -596,7 +599,7 @@
 
 	printk(KERN_CONT " at %p\n", (void *) address);
 	printk(KERN_ALERT "IP:");
-	printk_address(regs->ip, 1);
+	printk_address(regs->ip);
 
 	dump_pagetable(address);
 }
@@ -1232,3 +1235,23 @@
 	__do_page_fault(regs, error_code);
 	exception_exit(prev_state);
 }
+
+static void trace_page_fault_entries(struct pt_regs *regs,
+				     unsigned long error_code)
+{
+	if (user_mode(regs))
+		trace_page_fault_user(read_cr2(), regs, error_code);
+	else
+		trace_page_fault_kernel(read_cr2(), regs, error_code);
+}
+
+dotraplinkage void __kprobes
+trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+	enum ctx_state prev_state;
+
+	prev_state = exception_enter();
+	trace_page_fault_entries(regs, error_code);
+	__do_page_fault(regs, error_code);
+	exception_exit(prev_state);
+}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ce32017..f971306 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -53,12 +53,12 @@
 	if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
 		unsigned long ret;
 		if (min_pfn_mapped >= max_pfn_mapped)
-			panic("alloc_low_page: ran out of memory");
+			panic("alloc_low_pages: ran out of memory");
 		ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
 					max_pfn_mapped << PAGE_SHIFT,
 					PAGE_SIZE * num , PAGE_SIZE);
 		if (!ret)
-			panic("alloc_low_page: can not alloc memory");
+			panic("alloc_low_pages: can not alloc memory");
 		memblock_reserve(ret, PAGE_SIZE * num);
 		pfn = ret >> PAGE_SHIFT;
 	} else {
@@ -418,27 +418,27 @@
 	return step_size << 5;
 }
 
-void __init init_mem_mapping(void)
+/**
+ * memory_map_top_down - Map [map_start, map_end) top down
+ * @map_start: start address of the target memory range
+ * @map_end: end address of the target memory range
+ *
+ * This function will setup direct mapping for memory range
+ * [map_start, map_end) in top-down. That said, the page tables
+ * will be allocated at the end of the memory, and we map the
+ * memory in top-down.
+ */
+static void __init memory_map_top_down(unsigned long map_start,
+				       unsigned long map_end)
 {
-	unsigned long end, real_end, start, last_start;
+	unsigned long real_end, start, last_start;
 	unsigned long step_size;
 	unsigned long addr;
 	unsigned long mapped_ram_size = 0;
 	unsigned long new_mapped_ram_size;
 
-	probe_page_size_mask();
-
-#ifdef CONFIG_X86_64
-	end = max_pfn << PAGE_SHIFT;
-#else
-	end = max_low_pfn << PAGE_SHIFT;
-#endif
-
-	/* the ISA range is always mapped regardless of memory holes */
-	init_memory_mapping(0, ISA_END_ADDRESS);
-
 	/* xen has big range in reserved near end of ram, skip it at first.*/
-	addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
+	addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
 	real_end = addr + PMD_SIZE;
 
 	/* step_size need to be small so pgt_buf from BRK could cover it */
@@ -453,13 +453,13 @@
 	 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
 	 * for page table.
 	 */
-	while (last_start > ISA_END_ADDRESS) {
+	while (last_start > map_start) {
 		if (last_start > step_size) {
 			start = round_down(last_start - 1, step_size);
-			if (start < ISA_END_ADDRESS)
-				start = ISA_END_ADDRESS;
+			if (start < map_start)
+				start = map_start;
 		} else
-			start = ISA_END_ADDRESS;
+			start = map_start;
 		new_mapped_ram_size = init_range_memory_mapping(start,
 							last_start);
 		last_start = start;
@@ -470,8 +470,89 @@
 		mapped_ram_size += new_mapped_ram_size;
 	}
 
-	if (real_end < end)
-		init_range_memory_mapping(real_end, end);
+	if (real_end < map_end)
+		init_range_memory_mapping(real_end, map_end);
+}
+
+/**
+ * memory_map_bottom_up - Map [map_start, map_end) bottom up
+ * @map_start: start address of the target memory range
+ * @map_end: end address of the target memory range
+ *
+ * This function will setup direct mapping for memory range
+ * [map_start, map_end) in bottom-up. Since we have limited the
+ * bottom-up allocation above the kernel, the page tables will
+ * be allocated just above the kernel and we map the memory
+ * in [map_start, map_end) in bottom-up.
+ */
+static void __init memory_map_bottom_up(unsigned long map_start,
+					unsigned long map_end)
+{
+	unsigned long next, new_mapped_ram_size, start;
+	unsigned long mapped_ram_size = 0;
+	/* step_size need to be small so pgt_buf from BRK could cover it */
+	unsigned long step_size = PMD_SIZE;
+
+	start = map_start;
+	min_pfn_mapped = start >> PAGE_SHIFT;
+
+	/*
+	 * We start from the bottom (@map_start) and go to the top (@map_end).
+	 * The memblock_find_in_range() gets us a block of RAM from the
+	 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
+	 * for page table.
+	 */
+	while (start < map_end) {
+		if (map_end - start > step_size) {
+			next = round_up(start + 1, step_size);
+			if (next > map_end)
+				next = map_end;
+		} else
+			next = map_end;
+
+		new_mapped_ram_size = init_range_memory_mapping(start, next);
+		start = next;
+
+		if (new_mapped_ram_size > mapped_ram_size)
+			step_size = get_new_step_size(step_size);
+		mapped_ram_size += new_mapped_ram_size;
+	}
+}
+
+void __init init_mem_mapping(void)
+{
+	unsigned long end;
+
+	probe_page_size_mask();
+
+#ifdef CONFIG_X86_64
+	end = max_pfn << PAGE_SHIFT;
+#else
+	end = max_low_pfn << PAGE_SHIFT;
+#endif
+
+	/* the ISA range is always mapped regardless of memory holes */
+	init_memory_mapping(0, ISA_END_ADDRESS);
+
+	/*
+	 * If the allocation is in bottom-up direction, we setup direct mapping
+	 * in bottom-up, otherwise we setup direct mapping in top-down.
+	 */
+	if (memblock_bottom_up()) {
+		unsigned long kernel_end = __pa_symbol(_end);
+
+		/*
+		 * we need two separate calls here. This is because we want to
+		 * allocate page tables above the kernel. So we first map
+		 * [kernel_end, end) to make memory above the kernel be mapped
+		 * as soon as possible. And then use page tables allocated above
+		 * the kernel to map [ISA_END_ADDRESS, kernel_end).
+		 */
+		memory_map_bottom_up(kernel_end, end);
+		memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
+	} else {
+		memory_map_top_down(ISA_END_ADDRESS, end);
+	}
 
 #ifdef CONFIG_X86_64
 	if (max_pfn > max_low_pfn) {
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 8bf93ba..24aec58 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -567,6 +567,17 @@
 	ret = init_func();
 	if (ret < 0)
 		return ret;
+
+	/*
+	 * We reset memblock back to the top-down direction
+	 * here because if we configured ACPI_NUMA, we have
+	 * parsed SRAT in init_func(). It is ok to have the
+	 * reset here even if we did't configure ACPI_NUMA
+	 * or acpi numa init fails and fallbacks to dummy
+	 * numa init.
+	 */
+	memblock_set_bottom_up(false);
+
 	ret = numa_cleanup_meminfo(&numa_meminfo);
 	if (ret < 0)
 		return ret;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index dfa537a..a7cccb6 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -25,8 +25,12 @@
 	struct page *pte;
 
 	pte = alloc_pages(__userpte_alloc_gfp, 0);
-	if (pte)
-		pgtable_page_ctor(pte);
+	if (!pte)
+		return NULL;
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
 	return pte;
 }
 
@@ -189,8 +193,10 @@
 	int i;
 
 	for(i = 0; i < PREALLOCATED_PMDS; i++)
-		if (pmds[i])
+		if (pmds[i]) {
+			pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
 			free_page((unsigned long)pmds[i]);
+		}
 }
 
 static int preallocate_pmds(pmd_t *pmds[])
@@ -200,8 +206,13 @@
 
 	for(i = 0; i < PREALLOCATED_PMDS; i++) {
 		pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
-		if (pmd == NULL)
+		if (!pmd)
 			failed = true;
+		if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
+			free_page((unsigned long)pmds[i]);
+			pmd = NULL;
+			failed = true;
+		}
 		pmds[i] = pmd;
 	}
 
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 516593e1..26328e8 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -788,5 +788,7 @@
 	if (fp->bpf_func != sk_run_filter) {
 		INIT_WORK(&fp->work, bpf_jit_free_deferred);
 		schedule_work(&fp->work);
+	} else {
+		kfree(fp);
 	}
 }
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index b30e937..7fb24e5 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -354,12 +354,12 @@
 			 * the kernel resource tree doesn't allow overlaps.
 			 */
 			if (resource_overlaps(res1, res2)) {
-				res1->start = min(res1->start, res2->start);
-				res1->end = max(res1->end, res2->end);
+				res2->start = min(res1->start, res2->start);
+				res2->end = max(res1->end, res2->end);
 				dev_info(&info->bridge->dev,
 					 "host bridge window expanded to %pR; %pR ignored\n",
-					 res1, res2);
-				res2->flags = 0;
+					 res2, res1);
+				res1->flags = 0;
 			}
 		}
 	}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index f5809fa..b046e07 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -231,7 +231,7 @@
 	offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
 
 	if ((offset) && (where == offset))
-		value = value & 0xfffffffc;
+		value = value & ~PCI_EXP_LNKCTL_ASPMC;
 
 	return raw_pci_write(pci_domain_nr(bus), bus->number,
 						devfn, where, size, value);
@@ -252,7 +252,7 @@
  */
 static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
 {
-	int cap_base, i;
+	int i;
 	struct pci_bus  *pbus;
 	struct pci_dev *dev;
 
@@ -278,7 +278,7 @@
 		for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
 			quirk_aspm_offset[i] = 0;
 
-		pbus->ops = pbus->parent->ops;
+		pci_bus_set_ops(pbus, pbus->parent->ops);
 	} else {
 		/*
 		 * If devices are attached to the root port at power-up or
@@ -286,13 +286,15 @@
 		 * each root port to save the register offsets and replace the
 		 * bus ops.
 		 */
-		list_for_each_entry(dev, &pbus->devices, bus_list) {
+		list_for_each_entry(dev, &pbus->devices, bus_list)
 			/* There are 0 to 8 devices attached to this bus */
-			cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP);
-			quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10;
-		}
-		pbus->ops = &quirk_pcie_aspm_ops;
+			quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
+				dev->pcie_cap + PCI_EXP_LNKCTL;
+
+		pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
+		dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
 	}
+
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA,	pcie_rootport_aspm_quirk);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA1,	pcie_rootport_aspm_quirk);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 48e8461..5eee495 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -382,7 +382,14 @@
 {
 	xen_destroy_irq(irq);
 }
-
+static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+	return 0;
+}
+static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+	return 0;
+}
 #endif
 
 int __init pci_xen_init(void)
@@ -406,6 +413,8 @@
 	x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
 	x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
 	x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
+	x86_msi.msi_mask_irq = xen_nop_msi_mask_irq;
+	x86_msi.msix_mask_irq = xen_nop_msix_mask_irq;
 #endif
 	return 0;
 }
@@ -485,6 +494,8 @@
 	x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
 	x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
 	x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
+	x86_msi.msi_mask_irq = xen_nop_msi_mask_irq;
+	x86_msi.msix_mask_irq = xen_nop_msix_mask_irq;
 #endif
 	xen_setup_acpi_sci();
 	__acpi_register_gsi = acpi_register_gsi_xen;
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index fef7d0b..649a12b 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -40,16 +40,9 @@
  */
 static int set_lid_wake_behavior(bool wake_on_close)
 {
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
 	acpi_status status;
 
-	arg_list.count		= 1;
-	arg_list.pointer	= &arg;
-	arg.type		= ACPI_TYPE_INTEGER;
-	arg.integer.value	= wake_on_close;
-
-	status = acpi_evaluate_object(NULL, "\\_SB.PCI0.LID.LIDW", &arg_list, NULL);
+	status = acpi_execute_simple_method(NULL, "\\_SB.PCI0.LID.LIDW", wake_on_close);
 	if (ACPI_FAILURE(status)) {
 		pr_warning(PFX "failed to set lid behavior\n");
 		return 1;
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 2e863ad..8eeccba 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -399,7 +399,7 @@
 	printk(KERN_DEFAULT "UV: %4d %6d %-32.32s ",
 		cpu, current->pid, current->comm);
 
-	printk_address(regs->ip, 1);
+	printk_address(regs->ip);
 }
 
 /* Dump this cpu's state */
diff --git a/arch/x86/um/elfcore.c b/arch/x86/um/elfcore.c
index 6bb49b6..7bb89a2 100644
--- a/arch/x86/um/elfcore.c
+++ b/arch/x86/um/elfcore.c
@@ -11,8 +11,7 @@
 	return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
 }
 
-int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-			       unsigned long limit)
+int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
 	if ( vsyscall_ehdr ) {
 		const struct elfhdr *const ehdrp =
@@ -32,17 +31,14 @@
 				phdr.p_offset += ofs;
 			}
 			phdr.p_paddr = 0; /* match other core phdrs */
-			*size += sizeof(phdr);
-			if (*size > limit
-			    || !dump_write(file, &phdr, sizeof(phdr)))
+			if (!dump_emit(cprm, &phdr, sizeof(phdr)))
 				return 0;
 		}
 	}
 	return 1;
 }
 
-int elf_core_write_extra_data(struct file *file, size_t *size,
-			      unsigned long limit)
+int elf_core_write_extra_data(struct coredump_params *cprm)
 {
 	if ( vsyscall_ehdr ) {
 		const struct elfhdr *const ehdrp =
@@ -55,10 +51,7 @@
 			if (phdrp[i].p_type == PT_LOAD) {
 				void *addr = (void *) phdrp[i].p_vaddr;
 				size_t filesz = phdrp[i].p_filesz;
-
-				*size += filesz;
-				if (*size > limit
-				    || !dump_write(file, addr, filesz))
+				if (!dump_emit(cprm, addr, filesz))
 					return 0;
 			}
 		}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 72074d5..2ada505 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -178,7 +178,7 @@
 
 	ts->tv_nsec = 0;
 	do {
-		seq = read_seqcount_begin(&gtod->seq);
+		seq = read_seqcount_begin_no_lockdep(&gtod->seq);
 		mode = gtod->clock.vclock_mode;
 		ts->tv_sec = gtod->wall_time_sec;
 		ns = gtod->wall_time_snsec;
@@ -198,7 +198,7 @@
 
 	ts->tv_nsec = 0;
 	do {
-		seq = read_seqcount_begin(&gtod->seq);
+		seq = read_seqcount_begin_no_lockdep(&gtod->seq);
 		mode = gtod->clock.vclock_mode;
 		ts->tv_sec = gtod->monotonic_time_sec;
 		ns = gtod->monotonic_time_snsec;
@@ -214,7 +214,7 @@
 {
 	unsigned long seq;
 	do {
-		seq = read_seqcount_begin(&gtod->seq);
+		seq = read_seqcount_begin_no_lockdep(&gtod->seq);
 		ts->tv_sec = gtod->wall_time_coarse.tv_sec;
 		ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -225,7 +225,7 @@
 {
 	unsigned long seq;
 	do {
-		seq = read_seqcount_begin(&gtod->seq);
+		seq = read_seqcount_begin_no_lockdep(&gtod->seq);
 		ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
 		ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index fdc3ba2..ce563be 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -468,8 +468,8 @@
  * 3        PCD PWT      UC       UC     UC
  * 4    PAT              WB       WC     WB
  * 5    PAT     PWT      WC       WP     WT
- * 6    PAT PCD          UC-      UC     UC-
- * 7    PAT PCD PWT      UC       UC     UC
+ * 6    PAT PCD          UC-      rsv    UC-
+ * 7    PAT PCD PWT      UC       rsv    UC
  */
 
 void xen_set_pat(u64 pat)
@@ -796,8 +796,8 @@
 {
 	spinlock_t *ptl = NULL;
 
-#if USE_SPLIT_PTLOCKS
-	ptl = __pte_lockptr(page);
+#if USE_SPLIT_PTE_PTLOCKS
+	ptl = ptlock_ptr(page);
 	spin_lock_nest_lock(ptl, &mm->page_table_lock);
 #endif
 
@@ -1637,7 +1637,7 @@
 
 			__set_pfn_prot(pfn, PAGE_KERNEL_RO);
 
-			if (level == PT_PTE && USE_SPLIT_PTLOCKS)
+			if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
 				__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
 
 			xen_mc_issue(PARAVIRT_LAZY_MMU);
@@ -1671,7 +1671,7 @@
 		if (!PageHighMem(page)) {
 			xen_mc_batch();
 
-			if (level == PT_PTE && USE_SPLIT_PTLOCKS)
+			if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
 				__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
 
 			__set_pfn_prot(pfn, PAGE_KERNEL);
@@ -2328,12 +2328,14 @@
 	return success;
 }
 
-int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
-				 unsigned int address_bits)
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+				 unsigned int address_bits,
+				 dma_addr_t *dma_handle)
 {
 	unsigned long *in_frames = discontig_frames, out_frame;
 	unsigned long  flags;
 	int            success;
+	unsigned long vstart = (unsigned long)phys_to_virt(pstart);
 
 	/*
 	 * Currently an auto-translated guest will not perform I/O, nor will
@@ -2368,15 +2370,17 @@
 
 	spin_unlock_irqrestore(&xen_reservation_lock, flags);
 
+	*dma_handle = virt_to_machine(vstart).maddr;
 	return success ? 0 : -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
 
-void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
 {
 	unsigned long *out_frames = discontig_frames, in_frame;
 	unsigned long  flags;
 	int success;
+	unsigned long vstart;
 
 	if (xen_feature(XENFEAT_auto_translated_physmap))
 		return;
@@ -2384,6 +2388,7 @@
 	if (unlikely(order > MAX_CONTIG_ORDER))
 		return;
 
+	vstart = (unsigned long)phys_to_virt(pstart);
 	memset((void *) vstart, 0, PAGE_SIZE << order);
 
 	spin_lock_irqsave(&xen_reservation_lock, flags);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index a61c7d5..2ae8699 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -799,10 +799,10 @@
 {
 	unsigned topidx, mididx, idx;
 
-	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
-		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+	/* don't track P2M changes in autotranslate guests */
+	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
 		return true;
-	}
+
 	if (unlikely(pfn >= MAX_P2M_PFN)) {
 		BUG_ON(mfn != INVALID_P2M_ENTRY);
 		return true;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 9695704..0e98e5d 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -75,8 +75,10 @@
 		xen_swiotlb_init(1, true /* early */);
 		dma_ops = &xen_swiotlb_dma_ops;
 
+#ifdef CONFIG_PCI
 		/* Make sure ACS will be enabled */
 		pci_request_acs();
+#endif
 	}
 }
 
@@ -92,8 +94,10 @@
 		return rc;
 
 	dma_ops = &xen_swiotlb_dma_ops;
+#ifdef CONFIG_PCI
 	/* Make sure ACS will be enabled */
 	pci_request_acs();
+#endif
 
 	return 0;
 }
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 09f3059..68c054f 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,7 +556,7 @@
 	}
 #endif /* CONFIG_X86_64 */
 }
-void __cpuinit xen_enable_nmi(void)
+void xen_enable_nmi(void)
 {
 #ifdef CONFIG_X86_64
 	if (register_callback(CALLBACKTYPE_nmi, nmi))
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 31d0475..c36b325 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -149,7 +149,7 @@
 	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
 				    cpu,
 				    xen_reschedule_interrupt,
-				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+				    IRQF_PERCPU|IRQF_NOBALANCING,
 				    resched_name,
 				    NULL);
 	if (rc < 0)
@@ -161,7 +161,7 @@
 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
 				    cpu,
 				    xen_call_function_interrupt,
-				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+				    IRQF_PERCPU|IRQF_NOBALANCING,
 				    callfunc_name,
 				    NULL);
 	if (rc < 0)
@@ -171,7 +171,7 @@
 
 	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
 	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
-				     IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
+				     IRQF_PERCPU | IRQF_NOBALANCING,
 				     debug_name, NULL);
 	if (rc < 0)
 		goto fail;
@@ -182,7 +182,7 @@
 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
 				    cpu,
 				    xen_call_function_single_interrupt,
-				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+				    IRQF_PERCPU|IRQF_NOBALANCING,
 				    callfunc_name,
 				    NULL);
 	if (rc < 0)
@@ -201,7 +201,7 @@
 	rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
 				    cpu,
 				    xen_irq_work_interrupt,
-				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+				    IRQF_PERCPU|IRQF_NOBALANCING,
 				    callfunc_name,
 				    NULL);
 	if (rc < 0)
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index be6b860..0e36cde 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -234,7 +234,7 @@
 	irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
 				     cpu,
 				     dummy_handler,
-				     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+				     IRQF_PERCPU|IRQF_NOBALANCING,
 				     name,
 				     NULL);
 
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index ee36589..12a1ca7 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -443,8 +443,7 @@
 		name = "<timer kasprintf failed>";
 
 	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
-				      IRQF_DISABLED|IRQF_PERCPU|
-				      IRQF_NOBALANCING|IRQF_TIMER|
+				      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
 				      IRQF_FORCE_RESUME,
 				      name, NULL);
 
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index cf914c8..d38eb92 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -38,35 +38,46 @@
 	free_page((unsigned long)pgd);
 }
 
-/* Use a slab cache for the pte pages (see also sparc64 implementation) */
-
-extern struct kmem_cache *pgtable_cache;
-
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 					 unsigned long address)
 {
-	return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
+	pte_t *ptep;
+	int i;
+
+	ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+	if (!ptep)
+		return NULL;
+	for (i = 0; i < 1024; i++)
+		pte_clear(NULL, 0, ptep + i);
+	return ptep;
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 					unsigned long addr)
 {
+	pte_t *pte;
 	struct page *page;
 
-	page = virt_to_page(pte_alloc_one_kernel(mm, addr));
-	pgtable_page_ctor(page);
+	pte = pte_alloc_one_kernel(mm, addr);
+	if (!pte)
+		return NULL;
+	page = virt_to_page(pte);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 	return page;
 }
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-	kmem_cache_free(pgtable_cache, pte);
+	free_page((unsigned long)pte);
 }
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
 {
 	pgtable_page_dtor(pte);
-	kmem_cache_free(pgtable_cache, page_address(pte));
+	__free_page(pte);
 }
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 0fdf5d0..2164462 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -220,12 +220,11 @@
 #ifdef CONFIG_MMU
 extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
 extern void paging_init(void);
-extern void pgtable_cache_init(void);
 #else
 # define swapper_pg_dir NULL
 static inline void paging_init(void) { }
-static inline void pgtable_cache_init(void) { }
 #endif
+static inline void pgtable_cache_init(void) { }
 
 /*
  * The pmd contains the kernel virtual address of the pte page.
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index c114483..7db5c22 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -87,4 +87,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif	/* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index a107757..c43771c 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -50,23 +50,3 @@
 	 */
 	set_ptevaddr_register(PGTABLE_START);
 }
-
-struct kmem_cache *pgtable_cache __read_mostly;
-
-static void pgd_ctor(void *addr)
-{
-	pte_t *ptep = (pte_t *)addr;
-	int i;
-
-	for (i = 0; i < 1024; i++, ptep++)
-		pte_clear(NULL, 0, ptep);
-
-}
-
-void __init pgtable_cache_init(void)
-{
-	pgtable_cache = kmem_cache_create("pgd",
-			PAGE_SIZE, PAGE_SIZE,
-			SLAB_HWCACHE_ALIGN,
-			pgd_ctor);
-}
diff --git a/block/Makefile b/block/Makefile
index 671a83d..20645e8 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,8 +5,9 @@
 obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
 			blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
 			blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
-			blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o \
-			partition-generic.o partitions/
+			blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
+			blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
+			genhd.o scsi_ioctl.o partition-generic.o partitions/
 
 obj-$(CONFIG_BLK_DEV_BSG)	+= bsg.o
 obj-$(CONFIG_BLK_DEV_BSGLIB)	+= bsg-lib.o
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index ae6969a..1610b22 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -402,6 +402,11 @@
 #define blk_queue_for_each_rl(rl, q)	\
 	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
 
+static inline void blkg_stat_init(struct blkg_stat *stat)
+{
+	u64_stats_init(&stat->syncp);
+}
+
 /**
  * blkg_stat_add - add a value to a blkg_stat
  * @stat: target blkg_stat
@@ -458,6 +463,11 @@
 	blkg_stat_add(to, blkg_stat_read(from));
 }
 
+static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
+{
+	u64_stats_init(&rwstat->syncp);
+}
+
 /**
  * blkg_rwstat_add - add a value to a blkg_rwstat
  * @rwstat: target blkg_rwstat
diff --git a/block/blk-core.c b/block/blk-core.c
index 0a00e4e..8bdd012 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -16,6 +16,7 @@
 #include <linux/backing-dev.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/blk-mq.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
 #include <linux/kernel_stat.h>
@@ -48,7 +49,7 @@
 /*
  * For the allocated request tables
  */
-static struct kmem_cache *request_cachep;
+struct kmem_cache *request_cachep = NULL;
 
 /*
  * For queue allocation
@@ -60,42 +61,6 @@
  */
 static struct workqueue_struct *kblockd_workqueue;
 
-static void drive_stat_acct(struct request *rq, int new_io)
-{
-	struct hd_struct *part;
-	int rw = rq_data_dir(rq);
-	int cpu;
-
-	if (!blk_do_io_stat(rq))
-		return;
-
-	cpu = part_stat_lock();
-
-	if (!new_io) {
-		part = rq->part;
-		part_stat_inc(cpu, part, merges[rw]);
-	} else {
-		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
-		if (!hd_struct_try_get(part)) {
-			/*
-			 * The partition is already being removed,
-			 * the request will be accounted on the disk only
-			 *
-			 * We take a reference on disk->part0 although that
-			 * partition will never be deleted, so we can treat
-			 * it as any other partition.
-			 */
-			part = &rq->rq_disk->part0;
-			hd_struct_get(part);
-		}
-		part_round_stats(cpu, part);
-		part_inc_in_flight(part, rw);
-		rq->part = part;
-	}
-
-	part_stat_unlock();
-}
-
 void blk_queue_congestion_threshold(struct request_queue *q)
 {
 	int nr;
@@ -145,7 +110,6 @@
 	rq->cmd = rq->__cmd;
 	rq->cmd_len = BLK_MAX_CDB;
 	rq->tag = -1;
-	rq->ref_count = 1;
 	rq->start_time = jiffies;
 	set_start_time_ns(rq);
 	rq->part = NULL;
@@ -174,9 +138,9 @@
 {
 	int bit;
 
-	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
+	printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
-		rq->cmd_flags);
+		(unsigned long long) rq->cmd_flags);
 
 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
 	       (unsigned long long)blk_rq_pos(rq),
@@ -595,9 +559,12 @@
 	if (!q)
 		return NULL;
 
+	if (percpu_counter_init(&q->mq_usage_counter, 0))
+		goto fail_q;
+
 	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
 	if (q->id < 0)
-		goto fail_q;
+		goto fail_c;
 
 	q->backing_dev_info.ra_pages =
 			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@@ -644,13 +611,19 @@
 	q->bypass_depth = 1;
 	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
 
+	init_waitqueue_head(&q->mq_freeze_wq);
+
 	if (blkcg_init_queue(q))
-		goto fail_id;
+		goto fail_bdi;
 
 	return q;
 
+fail_bdi:
+	bdi_destroy(&q->backing_dev_info);
 fail_id:
 	ida_simple_remove(&blk_queue_ida, q->id);
+fail_c:
+	percpu_counter_destroy(&q->mq_usage_counter);
 fail_q:
 	kmem_cache_free(blk_requestq_cachep, q);
 	return NULL;
@@ -739,9 +712,17 @@
 
 	q->sg_reserved_size = INT_MAX;
 
+	/* Protect q->elevator from elevator_change */
+	mutex_lock(&q->sysfs_lock);
+
 	/* init elevator */
-	if (elevator_init(q, NULL))
+	if (elevator_init(q, NULL)) {
+		mutex_unlock(&q->sysfs_lock);
 		return NULL;
+	}
+
+	mutex_unlock(&q->sysfs_lock);
+
 	return q;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1109,7 +1090,8 @@
 	goto retry;
 }
 
-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+static struct request *blk_old_get_request(struct request_queue *q, int rw,
+		gfp_t gfp_mask)
 {
 	struct request *rq;
 
@@ -1126,6 +1108,14 @@
 
 	return rq;
 }
+
+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+{
+	if (q->mq_ops)
+		return blk_mq_alloc_request(q, rw, gfp_mask, false);
+	else
+		return blk_old_get_request(q, rw, gfp_mask);
+}
 EXPORT_SYMBOL(blk_get_request);
 
 /**
@@ -1211,7 +1201,7 @@
 static void add_acct_request(struct request_queue *q, struct request *rq,
 			     int where)
 {
-	drive_stat_acct(rq, 1);
+	blk_account_io_start(rq, true);
 	__elv_add_request(q, rq, where);
 }
 
@@ -1272,8 +1262,6 @@
 {
 	if (unlikely(!q))
 		return;
-	if (unlikely(--req->ref_count))
-		return;
 
 	blk_pm_put_request(req);
 
@@ -1302,12 +1290,17 @@
 
 void blk_put_request(struct request *req)
 {
-	unsigned long flags;
 	struct request_queue *q = req->q;
 
-	spin_lock_irqsave(q->queue_lock, flags);
-	__blk_put_request(q, req);
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	if (q->mq_ops)
+		blk_mq_free_request(req);
+	else {
+		unsigned long flags;
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		__blk_put_request(q, req);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+	}
 }
 EXPORT_SYMBOL(blk_put_request);
 
@@ -1343,8 +1336,8 @@
 }
 EXPORT_SYMBOL_GPL(blk_add_request_payload);
 
-static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
-				   struct bio *bio)
+bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
+			    struct bio *bio)
 {
 	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
 
@@ -1361,12 +1354,12 @@
 	req->__data_len += bio->bi_size;
 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
-	drive_stat_acct(req, 0);
+	blk_account_io_start(req, false);
 	return true;
 }
 
-static bool bio_attempt_front_merge(struct request_queue *q,
-				    struct request *req, struct bio *bio)
+bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
+			     struct bio *bio)
 {
 	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
 
@@ -1391,12 +1384,12 @@
 	req->__data_len += bio->bi_size;
 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
-	drive_stat_acct(req, 0);
+	blk_account_io_start(req, false);
 	return true;
 }
 
 /**
- * attempt_plug_merge - try to merge with %current's plugged list
+ * blk_attempt_plug_merge - try to merge with %current's plugged list
  * @q: request_queue new bio is being queued at
  * @bio: new bio being queued
  * @request_count: out parameter for number of traversed plugged requests
@@ -1412,19 +1405,28 @@
  * reliable access to the elevator outside queue lock.  Only check basic
  * merging parameters without querying the elevator.
  */
-static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
-			       unsigned int *request_count)
+bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
+			    unsigned int *request_count)
 {
 	struct blk_plug *plug;
 	struct request *rq;
 	bool ret = false;
+	struct list_head *plug_list;
+
+	if (blk_queue_nomerges(q))
+		goto out;
 
 	plug = current->plug;
 	if (!plug)
 		goto out;
 	*request_count = 0;
 
-	list_for_each_entry_reverse(rq, &plug->list, queuelist) {
+	if (q->mq_ops)
+		plug_list = &plug->mq_list;
+	else
+		plug_list = &plug->list;
+
+	list_for_each_entry_reverse(rq, plug_list, queuelist) {
 		int el_ret;
 
 		if (rq->q == q)
@@ -1492,7 +1494,7 @@
 	 * Check if we can merge with the plugged list before grabbing
 	 * any locks.
 	 */
-	if (attempt_plug_merge(q, bio, &request_count))
+	if (blk_attempt_plug_merge(q, bio, &request_count))
 		return;
 
 	spin_lock_irq(q->queue_lock);
@@ -1560,7 +1562,7 @@
 			}
 		}
 		list_add_tail(&req->queuelist, &plug->list);
-		drive_stat_acct(req, 1);
+		blk_account_io_start(req, true);
 	} else {
 		spin_lock_irq(q->queue_lock);
 		add_acct_request(q, req, where);
@@ -2014,7 +2016,7 @@
 }
 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
 
-static void blk_account_io_completion(struct request *req, unsigned int bytes)
+void blk_account_io_completion(struct request *req, unsigned int bytes)
 {
 	if (blk_do_io_stat(req)) {
 		const int rw = rq_data_dir(req);
@@ -2028,7 +2030,7 @@
 	}
 }
 
-static void blk_account_io_done(struct request *req)
+void blk_account_io_done(struct request *req)
 {
 	/*
 	 * Account IO completion.  flush_rq isn't accounted as a
@@ -2076,6 +2078,42 @@
 }
 #endif
 
+void blk_account_io_start(struct request *rq, bool new_io)
+{
+	struct hd_struct *part;
+	int rw = rq_data_dir(rq);
+	int cpu;
+
+	if (!blk_do_io_stat(rq))
+		return;
+
+	cpu = part_stat_lock();
+
+	if (!new_io) {
+		part = rq->part;
+		part_stat_inc(cpu, part, merges[rw]);
+	} else {
+		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+		if (!hd_struct_try_get(part)) {
+			/*
+			 * The partition is already being removed,
+			 * the request will be accounted on the disk only
+			 *
+			 * We take a reference on disk->part0 although that
+			 * partition will never be deleted, so we can treat
+			 * it as any other partition.
+			 */
+			part = &rq->rq_disk->part0;
+			hd_struct_get(part);
+		}
+		part_round_stats(cpu, part);
+		part_inc_in_flight(part, rw);
+		rq->part = part;
+	}
+
+	part_stat_unlock();
+}
+
 /**
  * blk_peek_request - peek at the top of a request queue
  * @q: request queue to peek at
@@ -2227,6 +2265,7 @@
 	if (unlikely(blk_bidi_rq(req)))
 		req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
 
+	BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
 	blk_add_timer(req);
 }
 EXPORT_SYMBOL(blk_start_request);
@@ -2451,7 +2490,6 @@
 	if (req->cmd_flags & REQ_DONTPREP)
 		blk_unprep_request(req);
 
-
 	blk_account_io_done(req);
 
 	if (req->end_io)
@@ -2873,6 +2911,7 @@
 
 	plug->magic = PLUG_MAGIC;
 	INIT_LIST_HEAD(&plug->list);
+	INIT_LIST_HEAD(&plug->mq_list);
 	INIT_LIST_HEAD(&plug->cb_list);
 
 	/*
@@ -2970,6 +3009,10 @@
 	BUG_ON(plug->magic != PLUG_MAGIC);
 
 	flush_plug_callbacks(plug, from_schedule);
+
+	if (!list_empty(&plug->mq_list))
+		blk_mq_flush_plug_list(plug, from_schedule);
+
 	if (list_empty(&plug->list))
 		return;
 
diff --git a/block/blk-exec.c b/block/blk-exec.c
index ae4f27d..c3edf9d 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -5,6 +5,7 @@
 #include <linux/module.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/blk-mq.h>
 #include <linux/sched/sysctl.h>
 
 #include "blk.h"
@@ -24,7 +25,6 @@
 	struct completion *waiting = rq->end_io_data;
 
 	rq->end_io_data = NULL;
-	__blk_put_request(rq->q, rq);
 
 	/*
 	 * complete last, if this is a stack request the process (and thus
@@ -59,6 +59,12 @@
 
 	rq->rq_disk = bd_disk;
 	rq->end_io = done;
+
+	if (q->mq_ops) {
+		blk_mq_insert_request(q, rq, true);
+		return;
+	}
+
 	/*
 	 * need to check this before __blk_run_queue(), because rq can
 	 * be freed before that returns.
@@ -103,12 +109,6 @@
 	int err = 0;
 	unsigned long hang_check;
 
-	/*
-	 * we need an extra reference to the request, so we can look at
-	 * it after io completion
-	 */
-	rq->ref_count++;
-
 	if (!rq->sense) {
 		memset(sense, 0, sizeof(sense));
 		rq->sense = sense;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index cc2b827..331e627 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -69,8 +69,10 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/gfp.h>
+#include <linux/blk-mq.h>
 
 #include "blk.h"
+#include "blk-mq.h"
 
 /* FLUSH/FUA sequences */
 enum {
@@ -124,6 +126,24 @@
 	/* make @rq a normal request */
 	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
 	rq->end_io = rq->flush.saved_end_io;
+
+	blk_clear_rq_complete(rq);
+}
+
+static void mq_flush_data_run(struct work_struct *work)
+{
+	struct request *rq;
+
+	rq = container_of(work, struct request, mq_flush_data);
+
+	memset(&rq->csd, 0, sizeof(rq->csd));
+	blk_mq_run_request(rq, true, false);
+}
+
+static void blk_mq_flush_data_insert(struct request *rq)
+{
+	INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
+	kblockd_schedule_work(rq->q, &rq->mq_flush_data);
 }
 
 /**
@@ -136,7 +156,7 @@
  * completion and trigger the next step.
  *
  * CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
  *
  * RETURNS:
  * %true if requests were added to the dispatch queue, %false otherwise.
@@ -146,7 +166,7 @@
 {
 	struct request_queue *q = rq->q;
 	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
-	bool queued = false;
+	bool queued = false, kicked;
 
 	BUG_ON(rq->flush.seq & seq);
 	rq->flush.seq |= seq;
@@ -167,8 +187,12 @@
 
 	case REQ_FSEQ_DATA:
 		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
-		list_add(&rq->queuelist, &q->queue_head);
-		queued = true;
+		if (q->mq_ops)
+			blk_mq_flush_data_insert(rq);
+		else {
+			list_add(&rq->queuelist, &q->queue_head);
+			queued = true;
+		}
 		break;
 
 	case REQ_FSEQ_DONE:
@@ -181,28 +205,43 @@
 		BUG_ON(!list_empty(&rq->queuelist));
 		list_del_init(&rq->flush.list);
 		blk_flush_restore_request(rq);
-		__blk_end_request_all(rq, error);
+		if (q->mq_ops)
+			blk_mq_end_io(rq, error);
+		else
+			__blk_end_request_all(rq, error);
 		break;
 
 	default:
 		BUG();
 	}
 
-	return blk_kick_flush(q) | queued;
+	kicked = blk_kick_flush(q);
+	/* blk_mq_run_flush will run queue */
+	if (q->mq_ops)
+		return queued;
+	return kicked | queued;
 }
 
 static void flush_end_io(struct request *flush_rq, int error)
 {
 	struct request_queue *q = flush_rq->q;
-	struct list_head *running = &q->flush_queue[q->flush_running_idx];
+	struct list_head *running;
 	bool queued = false;
 	struct request *rq, *n;
+	unsigned long flags = 0;
 
+	if (q->mq_ops) {
+		blk_mq_free_request(flush_rq);
+		spin_lock_irqsave(&q->mq_flush_lock, flags);
+	}
+	running = &q->flush_queue[q->flush_running_idx];
 	BUG_ON(q->flush_pending_idx == q->flush_running_idx);
 
 	/* account completion of the flush request */
 	q->flush_running_idx ^= 1;
-	elv_completed_request(q, flush_rq);
+
+	if (!q->mq_ops)
+		elv_completed_request(q, flush_rq);
 
 	/* and push the waiting requests to the next stage */
 	list_for_each_entry_safe(rq, n, running, flush.list) {
@@ -223,9 +262,48 @@
 	 * directly into request_fn may confuse the driver.  Always use
 	 * kblockd.
 	 */
-	if (queued || q->flush_queue_delayed)
-		blk_run_queue_async(q);
+	if (queued || q->flush_queue_delayed) {
+		if (!q->mq_ops)
+			blk_run_queue_async(q);
+		else
+		/*
+		 * This can be optimized to only run queues with requests
+		 * queued if necessary.
+		 */
+			blk_mq_run_queues(q, true);
+	}
 	q->flush_queue_delayed = 0;
+	if (q->mq_ops)
+		spin_unlock_irqrestore(&q->mq_flush_lock, flags);
+}
+
+static void mq_flush_work(struct work_struct *work)
+{
+	struct request_queue *q;
+	struct request *rq;
+
+	q = container_of(work, struct request_queue, mq_flush_work);
+
+	/* We don't need set REQ_FLUSH_SEQ, it's for consistency */
+	rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
+		__GFP_WAIT|GFP_ATOMIC, true);
+	rq->cmd_type = REQ_TYPE_FS;
+	rq->end_io = flush_end_io;
+
+	blk_mq_run_request(rq, true, false);
+}
+
+/*
+ * We can't directly use q->flush_rq, because it doesn't have tag and is not in
+ * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
+ * so offload the work to workqueue.
+ *
+ * Note: we assume a flush request finished in any hardware queue will flush
+ * the whole disk cache.
+ */
+static void mq_run_flush(struct request_queue *q)
+{
+	kblockd_schedule_work(q, &q->mq_flush_work);
 }
 
 /**
@@ -236,7 +314,7 @@
  * Please read the comment at the top of this file for more info.
  *
  * CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
  *
  * RETURNS:
  * %true if flush was issued, %false otherwise.
@@ -261,13 +339,18 @@
 	 * Issue flush and toggle pending_idx.  This makes pending_idx
 	 * different from running_idx, which means flush is in flight.
 	 */
+	q->flush_pending_idx ^= 1;
+	if (q->mq_ops) {
+		mq_run_flush(q);
+		return true;
+	}
+
 	blk_rq_init(q, &q->flush_rq);
 	q->flush_rq.cmd_type = REQ_TYPE_FS;
 	q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
 	q->flush_rq.rq_disk = first_rq->rq_disk;
 	q->flush_rq.end_io = flush_end_io;
 
-	q->flush_pending_idx ^= 1;
 	list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
 	return true;
 }
@@ -284,16 +367,37 @@
 		blk_run_queue_async(q);
 }
 
+static void mq_flush_data_end_io(struct request *rq, int error)
+{
+	struct request_queue *q = rq->q;
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *ctx;
+	unsigned long flags;
+
+	ctx = rq->mq_ctx;
+	hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+	/*
+	 * After populating an empty queue, kick it to avoid stall.  Read
+	 * the comment in flush_end_io().
+	 */
+	spin_lock_irqsave(&q->mq_flush_lock, flags);
+	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
+		blk_mq_run_hw_queue(hctx, true);
+	spin_unlock_irqrestore(&q->mq_flush_lock, flags);
+}
+
 /**
  * blk_insert_flush - insert a new FLUSH/FUA request
  * @rq: request to insert
  *
  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
+ * or __blk_mq_run_hw_queue() to dispatch request.
  * @rq is being submitted.  Analyze what needs to be done and put it on the
  * right queue.
  *
  * CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock) in !mq case
  */
 void blk_insert_flush(struct request *rq)
 {
@@ -316,7 +420,10 @@
 	 * complete the request.
 	 */
 	if (!policy) {
-		__blk_end_bidi_request(rq, 0, 0, 0);
+		if (q->mq_ops)
+			blk_mq_end_io(rq, 0);
+		else
+			__blk_end_bidi_request(rq, 0, 0, 0);
 		return;
 	}
 
@@ -329,7 +436,10 @@
 	 */
 	if ((policy & REQ_FSEQ_DATA) &&
 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
-		list_add_tail(&rq->queuelist, &q->queue_head);
+		if (q->mq_ops) {
+			blk_mq_run_request(rq, false, true);
+		} else
+			list_add_tail(&rq->queuelist, &q->queue_head);
 		return;
 	}
 
@@ -341,6 +451,14 @@
 	INIT_LIST_HEAD(&rq->flush.list);
 	rq->cmd_flags |= REQ_FLUSH_SEQ;
 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
+	if (q->mq_ops) {
+		rq->end_io = mq_flush_data_end_io;
+
+		spin_lock_irq(&q->mq_flush_lock);
+		blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
+		spin_unlock_irq(&q->mq_flush_lock);
+		return;
+	}
 	rq->end_io = flush_data_end_io;
 
 	blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
@@ -453,3 +571,9 @@
 	return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_flush);
+
+void blk_mq_init_flush(struct request_queue *q)
+{
+	spin_lock_init(&q->mq_flush_lock);
+	INIT_WORK(&q->mq_flush_work, mq_flush_work);
+}
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 46cd7bd..242df01 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -6,7 +6,6 @@
 #include <linux/init.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
-#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 #include <linux/slab.h>
 
 #include "blk.h"
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 4b8d9b54..1855bf5 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -35,7 +35,7 @@
 	unsigned long flags;
 
 	local_irq_save(flags);
-	list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
+	list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
 	__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
 	local_irq_restore(flags);
 }
@@ -79,7 +79,7 @@
 
 static void blk_iopoll_softirq(struct softirq_action *h)
 {
-	struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+	struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
 	int rearm = 0, budget = blk_iopoll_budget;
 	unsigned long start_time = jiffies;
 
@@ -201,7 +201,7 @@
 
 		local_irq_disable();
 		list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
-				 &__get_cpu_var(blk_cpu_iopoll));
+				 this_cpu_ptr(&blk_cpu_iopoll));
 		__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
 		local_irq_enable();
 	}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index d6f50d5..9b5b561 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -43,8 +43,8 @@
 	DECLARE_COMPLETION_ONSTACK(wait);
 	struct request_queue *q = bdev_get_queue(bdev);
 	int type = REQ_WRITE | REQ_DISCARD;
-	sector_t max_discard_sectors;
-	sector_t granularity, alignment;
+	unsigned int max_discard_sectors, granularity;
+	int alignment;
 	struct bio_batch bb;
 	struct bio *bio;
 	int ret = 0;
@@ -58,16 +58,14 @@
 
 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 	granularity = max(q->limits.discard_granularity >> 9, 1U);
-	alignment = bdev_discard_alignment(bdev) >> 9;
-	alignment = sector_div(alignment, granularity);
+	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
 
 	/*
 	 * Ensure that max_discard_sectors is of the proper
 	 * granularity, so that requests stay aligned after a split.
 	 */
 	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-	sector_div(max_discard_sectors, granularity);
-	max_discard_sectors *= granularity;
+	max_discard_sectors -= max_discard_sectors % granularity;
 	if (unlikely(!max_discard_sectors)) {
 		/* Avoid infinite loop below. Being cautious never hurts. */
 		return -EOPNOTSUPP;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5f24482..1ffc589 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -308,6 +308,17 @@
 	return ll_new_hw_segment(q, req, bio);
 }
 
+/*
+ * blk-mq uses req->special to carry normal driver per-request payload, it
+ * does not indicate a prepared command that we cannot merge with.
+ */
+static bool req_no_special_merge(struct request *req)
+{
+	struct request_queue *q = req->q;
+
+	return !q->mq_ops && req->special;
+}
+
 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 				struct request *next)
 {
@@ -319,7 +330,7 @@
 	 * First check if the either of the requests are re-queued
 	 * requests.  Can't merge them if they are.
 	 */
-	if (req->special || next->special)
+	if (req_no_special_merge(req) || req_no_special_merge(next))
 		return 0;
 
 	/*
@@ -416,7 +427,7 @@
 
 	if (rq_data_dir(req) != rq_data_dir(next)
 	    || req->rq_disk != next->rq_disk
-	    || next->special)
+	    || req_no_special_merge(next))
 		return 0;
 
 	if (req->cmd_flags & REQ_WRITE_SAME &&
@@ -515,7 +526,7 @@
 		return false;
 
 	/* must be same device and not a special request */
-	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
 		return false;
 
 	/* only merge integrity protected bio into ditto rq */
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
new file mode 100644
index 0000000..0045ace
--- /dev/null
+++ b/block/blk-mq-cpu.c
@@ -0,0 +1,93 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+
+#include <linux/blk-mq.h>
+#include "blk-mq.h"
+
+static LIST_HEAD(blk_mq_cpu_notify_list);
+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+
+static int blk_mq_main_cpu_notify(struct notifier_block *self,
+				  unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long) hcpu;
+	struct blk_mq_cpu_notifier *notify;
+
+	spin_lock(&blk_mq_cpu_notify_lock);
+
+	list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
+		notify->notify(notify->data, action, cpu);
+
+	spin_unlock(&blk_mq_cpu_notify_lock);
+	return NOTIFY_OK;
+}
+
+static void blk_mq_cpu_notify(void *data, unsigned long action,
+			      unsigned int cpu)
+{
+	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+		/*
+		 * If the CPU goes away, ensure that we run any pending
+		 * completions.
+		 */
+		struct llist_node *node;
+		struct request *rq;
+
+		local_irq_disable();
+
+		node = llist_del_all(&per_cpu(ipi_lists, cpu));
+		while (node) {
+			struct llist_node *next = node->next;
+
+			rq = llist_entry(node, struct request, ll_list);
+			__blk_mq_end_io(rq, rq->errors);
+			node = next;
+		}
+
+		local_irq_enable();
+	}
+}
+
+static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
+	.notifier_call	= blk_mq_main_cpu_notify,
+};
+
+void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+{
+	BUG_ON(!notifier->notify);
+
+	spin_lock(&blk_mq_cpu_notify_lock);
+	list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
+	spin_unlock(&blk_mq_cpu_notify_lock);
+}
+
+void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+{
+	spin_lock(&blk_mq_cpu_notify_lock);
+	list_del(&notifier->list);
+	spin_unlock(&blk_mq_cpu_notify_lock);
+}
+
+void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+			      void (*fn)(void *, unsigned long, unsigned int),
+			      void *data)
+{
+	notifier->notify = fn;
+	notifier->data = data;
+}
+
+static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
+	.notify = blk_mq_cpu_notify,
+};
+
+void __init blk_mq_cpu_init(void)
+{
+	register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
+	blk_mq_register_cpu_notifier(&cpu_notifier);
+}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
new file mode 100644
index 0000000..f872127
--- /dev/null
+++ b/block/blk-mq-cpumap.c
@@ -0,0 +1,108 @@
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+
+static void show_map(unsigned int *map, unsigned int nr)
+{
+	int i;
+
+	pr_info("blk-mq: CPU -> queue map\n");
+	for_each_online_cpu(i)
+		pr_info("  CPU%2u -> Queue %u\n", i, map[i]);
+}
+
+static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
+			      const int cpu)
+{
+	return cpu / ((nr_cpus + nr_queues - 1) / nr_queues);
+}
+
+static int get_first_sibling(unsigned int cpu)
+{
+	unsigned int ret;
+
+	ret = cpumask_first(topology_thread_cpumask(cpu));
+	if (ret < nr_cpu_ids)
+		return ret;
+
+	return cpu;
+}
+
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+{
+	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
+	cpumask_var_t cpus;
+
+	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
+		return 1;
+
+	cpumask_clear(cpus);
+	nr_cpus = nr_uniq_cpus = 0;
+	for_each_online_cpu(i) {
+		nr_cpus++;
+		first_sibling = get_first_sibling(i);
+		if (!cpumask_test_cpu(first_sibling, cpus))
+			nr_uniq_cpus++;
+		cpumask_set_cpu(i, cpus);
+	}
+
+	queue = 0;
+	for_each_possible_cpu(i) {
+		if (!cpu_online(i)) {
+			map[i] = 0;
+			continue;
+		}
+
+		/*
+		 * Easy case - we have equal or more hardware queues. Or
+		 * there are no thread siblings to take into account. Do
+		 * 1:1 if enough, or sequential mapping if less.
+		 */
+		if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
+			map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
+			queue++;
+			continue;
+		}
+
+		/*
+		 * Less then nr_cpus queues, and we have some number of
+		 * threads per cores. Map sibling threads to the same
+		 * queue.
+		 */
+		first_sibling = get_first_sibling(i);
+		if (first_sibling == i) {
+			map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
+							queue);
+			queue++;
+		} else
+			map[i] = map[first_sibling];
+	}
+
+	show_map(map, nr_cpus);
+	free_cpumask_var(cpus);
+	return 0;
+}
+
+unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg)
+{
+	unsigned int *map;
+
+	/* If cpus are offline, map them to first hctx */
+	map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
+				reg->numa_node);
+	if (!map)
+		return NULL;
+
+	if (!blk_mq_update_queue_map(map, reg->nr_hw_queues))
+		return map;
+
+	kfree(map);
+	return NULL;
+}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
new file mode 100644
index 0000000..ba6cf8e
--- /dev/null
+++ b/block/blk-mq-sysfs.c
@@ -0,0 +1,384 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/smp.h>
+
+#include <linux/blk-mq.h>
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+static void blk_mq_sysfs_release(struct kobject *kobj)
+{
+}
+
+struct blk_mq_ctx_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct blk_mq_ctx *, char *);
+	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
+};
+
+struct blk_mq_hw_ctx_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
+	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
+};
+
+static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
+				 char *page)
+{
+	struct blk_mq_ctx_sysfs_entry *entry;
+	struct blk_mq_ctx *ctx;
+	struct request_queue *q;
+	ssize_t res;
+
+	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
+	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+	q = ctx->queue;
+
+	if (!entry->show)
+		return -EIO;
+
+	res = -ENOENT;
+	mutex_lock(&q->sysfs_lock);
+	if (!blk_queue_dying(q))
+		res = entry->show(ctx, page);
+	mutex_unlock(&q->sysfs_lock);
+	return res;
+}
+
+static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
+				  const char *page, size_t length)
+{
+	struct blk_mq_ctx_sysfs_entry *entry;
+	struct blk_mq_ctx *ctx;
+	struct request_queue *q;
+	ssize_t res;
+
+	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
+	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+	q = ctx->queue;
+
+	if (!entry->store)
+		return -EIO;
+
+	res = -ENOENT;
+	mutex_lock(&q->sysfs_lock);
+	if (!blk_queue_dying(q))
+		res = entry->store(ctx, page, length);
+	mutex_unlock(&q->sysfs_lock);
+	return res;
+}
+
+static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
+				    struct attribute *attr, char *page)
+{
+	struct blk_mq_hw_ctx_sysfs_entry *entry;
+	struct blk_mq_hw_ctx *hctx;
+	struct request_queue *q;
+	ssize_t res;
+
+	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
+	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+	q = hctx->queue;
+
+	if (!entry->show)
+		return -EIO;
+
+	res = -ENOENT;
+	mutex_lock(&q->sysfs_lock);
+	if (!blk_queue_dying(q))
+		res = entry->show(hctx, page);
+	mutex_unlock(&q->sysfs_lock);
+	return res;
+}
+
+static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
+				     struct attribute *attr, const char *page,
+				     size_t length)
+{
+	struct blk_mq_hw_ctx_sysfs_entry *entry;
+	struct blk_mq_hw_ctx *hctx;
+	struct request_queue *q;
+	ssize_t res;
+
+	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
+	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+	q = hctx->queue;
+
+	if (!entry->store)
+		return -EIO;
+
+	res = -ENOENT;
+	mutex_lock(&q->sysfs_lock);
+	if (!blk_queue_dying(q))
+		res = entry->store(hctx, page, length);
+	mutex_unlock(&q->sysfs_lock);
+	return res;
+}
+
+static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
+{
+	return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
+				ctx->rq_dispatched[0]);
+}
+
+static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
+{
+	return sprintf(page, "%lu\n", ctx->rq_merged);
+}
+
+static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
+{
+	return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
+				ctx->rq_completed[0]);
+}
+
+static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
+{
+	char *start_page = page;
+	struct request *rq;
+
+	page += sprintf(page, "%s:\n", msg);
+
+	list_for_each_entry(rq, list, queuelist)
+		page += sprintf(page, "\t%p\n", rq);
+
+	return page - start_page;
+}
+
+static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
+{
+	ssize_t ret;
+
+	spin_lock(&ctx->lock);
+	ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
+	spin_unlock(&ctx->lock);
+
+	return ret;
+}
+
+static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
+					   char *page)
+{
+	return sprintf(page, "%lu\n", hctx->queued);
+}
+
+static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+	return sprintf(page, "%lu\n", hctx->run);
+}
+
+static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
+					       char *page)
+{
+	char *start_page = page;
+	int i;
+
+	page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
+
+	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
+		unsigned long d = 1U << (i - 1);
+
+		page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
+	}
+
+	return page - start_page;
+}
+
+static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
+					    char *page)
+{
+	ssize_t ret;
+
+	spin_lock(&hctx->lock);
+	ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
+	spin_unlock(&hctx->lock);
+
+	return ret;
+}
+
+static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+	ssize_t ret;
+
+	spin_lock(&hctx->lock);
+	ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
+	spin_unlock(&hctx->lock);
+
+	return ret;
+}
+
+static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
+					 const char *page, size_t len)
+{
+	struct blk_mq_ctx *ctx;
+	unsigned long ret;
+	unsigned int i;
+
+	if (kstrtoul(page, 10, &ret)) {
+		pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
+		return -EINVAL;
+	}
+
+	spin_lock(&hctx->lock);
+	if (ret)
+		hctx->flags |= BLK_MQ_F_SHOULD_IPI;
+	else
+		hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
+	spin_unlock(&hctx->lock);
+
+	hctx_for_each_ctx(hctx, ctx, i)
+		ctx->ipi_redirect = !!ret;
+
+	return len;
+}
+
+static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+	return blk_mq_tag_sysfs_show(hctx->tags, page);
+}
+
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
+	.attr = {.name = "dispatched", .mode = S_IRUGO },
+	.show = blk_mq_sysfs_dispatched_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
+	.attr = {.name = "merged", .mode = S_IRUGO },
+	.show = blk_mq_sysfs_merged_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
+	.attr = {.name = "completed", .mode = S_IRUGO },
+	.show = blk_mq_sysfs_completed_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
+	.attr = {.name = "rq_list", .mode = S_IRUGO },
+	.show = blk_mq_sysfs_rq_list_show,
+};
+
+static struct attribute *default_ctx_attrs[] = {
+	&blk_mq_sysfs_dispatched.attr,
+	&blk_mq_sysfs_merged.attr,
+	&blk_mq_sysfs_completed.attr,
+	&blk_mq_sysfs_rq_list.attr,
+	NULL,
+};
+
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
+	.attr = {.name = "queued", .mode = S_IRUGO },
+	.show = blk_mq_hw_sysfs_queued_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
+	.attr = {.name = "run", .mode = S_IRUGO },
+	.show = blk_mq_hw_sysfs_run_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
+	.attr = {.name = "dispatched", .mode = S_IRUGO },
+	.show = blk_mq_hw_sysfs_dispatched_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
+	.attr = {.name = "pending", .mode = S_IRUGO },
+	.show = blk_mq_hw_sysfs_rq_list_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
+	.attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
+	.show = blk_mq_hw_sysfs_ipi_show,
+	.store = blk_mq_hw_sysfs_ipi_store,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
+	.attr = {.name = "tags", .mode = S_IRUGO },
+	.show = blk_mq_hw_sysfs_tags_show,
+};
+
+static struct attribute *default_hw_ctx_attrs[] = {
+	&blk_mq_hw_sysfs_queued.attr,
+	&blk_mq_hw_sysfs_run.attr,
+	&blk_mq_hw_sysfs_dispatched.attr,
+	&blk_mq_hw_sysfs_pending.attr,
+	&blk_mq_hw_sysfs_ipi.attr,
+	&blk_mq_hw_sysfs_tags.attr,
+	NULL,
+};
+
+static const struct sysfs_ops blk_mq_sysfs_ops = {
+	.show	= blk_mq_sysfs_show,
+	.store	= blk_mq_sysfs_store,
+};
+
+static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
+	.show	= blk_mq_hw_sysfs_show,
+	.store	= blk_mq_hw_sysfs_store,
+};
+
+static struct kobj_type blk_mq_ktype = {
+	.sysfs_ops	= &blk_mq_sysfs_ops,
+	.release	= blk_mq_sysfs_release,
+};
+
+static struct kobj_type blk_mq_ctx_ktype = {
+	.sysfs_ops	= &blk_mq_sysfs_ops,
+	.default_attrs	= default_ctx_attrs,
+	.release	= blk_mq_sysfs_release,
+};
+
+static struct kobj_type blk_mq_hw_ktype = {
+	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
+	.default_attrs	= default_hw_ctx_attrs,
+	.release	= blk_mq_sysfs_release,
+};
+
+void blk_mq_unregister_disk(struct gendisk *disk)
+{
+	struct request_queue *q = disk->queue;
+
+	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
+	kobject_del(&q->mq_kobj);
+
+	kobject_put(&disk_to_dev(disk)->kobj);
+}
+
+int blk_mq_register_disk(struct gendisk *disk)
+{
+	struct device *dev = disk_to_dev(disk);
+	struct request_queue *q = disk->queue;
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *ctx;
+	int ret, i, j;
+
+	kobject_init(&q->mq_kobj, &blk_mq_ktype);
+
+	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
+	if (ret < 0)
+		return ret;
+
+	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
+		ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
+		if (ret)
+			break;
+
+		if (!hctx->nr_ctx)
+			continue;
+
+		hctx_for_each_ctx(hctx, ctx, j) {
+			kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
+			ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
+			if (ret)
+				break;
+		}
+	}
+
+	if (ret) {
+		blk_mq_unregister_disk(disk);
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
new file mode 100644
index 0000000..d64a02f
--- /dev/null
+++ b/block/blk-mq-tag.c
@@ -0,0 +1,204 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu_ida.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+/*
+ * Per tagged queue (tag address space) map
+ */
+struct blk_mq_tags {
+	unsigned int nr_tags;
+	unsigned int nr_reserved_tags;
+	unsigned int nr_batch_move;
+	unsigned int nr_max_cache;
+
+	struct percpu_ida free_tags;
+	struct percpu_ida reserved_tags;
+};
+
+void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
+{
+	int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
+	blk_mq_put_tag(tags, tag);
+}
+
+bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
+{
+	return !tags ||
+		percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
+}
+
+static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
+{
+	int tag;
+
+	tag = percpu_ida_alloc(&tags->free_tags, gfp);
+	if (tag < 0)
+		return BLK_MQ_TAG_FAIL;
+	return tag + tags->nr_reserved_tags;
+}
+
+static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
+					      gfp_t gfp)
+{
+	int tag;
+
+	if (unlikely(!tags->nr_reserved_tags)) {
+		WARN_ON_ONCE(1);
+		return BLK_MQ_TAG_FAIL;
+	}
+
+	tag = percpu_ida_alloc(&tags->reserved_tags, gfp);
+	if (tag < 0)
+		return BLK_MQ_TAG_FAIL;
+	return tag;
+}
+
+unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
+{
+	if (!reserved)
+		return __blk_mq_get_tag(tags, gfp);
+
+	return __blk_mq_get_reserved_tag(tags, gfp);
+}
+
+static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
+{
+	BUG_ON(tag >= tags->nr_tags);
+
+	percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
+}
+
+static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
+				      unsigned int tag)
+{
+	BUG_ON(tag >= tags->nr_reserved_tags);
+
+	percpu_ida_free(&tags->reserved_tags, tag);
+}
+
+void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
+{
+	if (tag >= tags->nr_reserved_tags)
+		__blk_mq_put_tag(tags, tag);
+	else
+		__blk_mq_put_reserved_tag(tags, tag);
+}
+
+static int __blk_mq_tag_iter(unsigned id, void *data)
+{
+	unsigned long *tag_map = data;
+	__set_bit(id, tag_map);
+	return 0;
+}
+
+void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
+			  void (*fn)(void *, unsigned long *), void *data)
+{
+	unsigned long *tag_map;
+	size_t map_size;
+
+	map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
+	tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
+	if (!tag_map)
+		return;
+
+	percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
+	if (tags->nr_reserved_tags)
+		percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
+			tag_map);
+
+	fn(data, tag_map);
+	kfree(tag_map);
+}
+
+struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
+				     unsigned int reserved_tags, int node)
+{
+	unsigned int nr_tags, nr_cache;
+	struct blk_mq_tags *tags;
+	int ret;
+
+	if (total_tags > BLK_MQ_TAG_MAX) {
+		pr_err("blk-mq: tag depth too large\n");
+		return NULL;
+	}
+
+	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
+	if (!tags)
+		return NULL;
+
+	nr_tags = total_tags - reserved_tags;
+	nr_cache = nr_tags / num_possible_cpus();
+
+	if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
+		nr_cache = BLK_MQ_TAG_CACHE_MIN;
+	else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
+		nr_cache = BLK_MQ_TAG_CACHE_MAX;
+
+	tags->nr_tags = total_tags;
+	tags->nr_reserved_tags = reserved_tags;
+	tags->nr_max_cache = nr_cache;
+	tags->nr_batch_move = max(1u, nr_cache / 2);
+
+	ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
+				tags->nr_reserved_tags,
+				tags->nr_max_cache,
+				tags->nr_batch_move);
+	if (ret)
+		goto err_free_tags;
+
+	if (reserved_tags) {
+		/*
+		 * With max_cahe and batch set to 1, the allocator fallbacks to
+		 * no cached. It's fine reserved tags allocation is slow.
+		 */
+		ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
+				1, 1);
+		if (ret)
+			goto err_reserved_tags;
+	}
+
+	return tags;
+
+err_reserved_tags:
+	percpu_ida_destroy(&tags->free_tags);
+err_free_tags:
+	kfree(tags);
+	return NULL;
+}
+
+void blk_mq_free_tags(struct blk_mq_tags *tags)
+{
+	percpu_ida_destroy(&tags->free_tags);
+	percpu_ida_destroy(&tags->reserved_tags);
+	kfree(tags);
+}
+
+ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
+{
+	char *orig_page = page;
+	int cpu;
+
+	if (!tags)
+		return 0;
+
+	page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
+			" max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
+			tags->nr_batch_move, tags->nr_max_cache);
+
+	page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
+			percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
+			percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
+
+	for_each_possible_cpu(cpu) {
+		page += sprintf(page, "  cpu%02u: nr_free=%u\n", cpu,
+				percpu_ida_free_tags(&tags->free_tags, cpu));
+	}
+
+	return page - orig_page;
+}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
new file mode 100644
index 0000000..947ba2c
--- /dev/null
+++ b/block/blk-mq-tag.h
@@ -0,0 +1,27 @@
+#ifndef INT_BLK_MQ_TAG_H
+#define INT_BLK_MQ_TAG_H
+
+struct blk_mq_tags;
+
+extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
+extern void blk_mq_free_tags(struct blk_mq_tags *tags);
+
+extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved);
+extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags);
+extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag);
+extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
+extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
+extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
+
+enum {
+	BLK_MQ_TAG_CACHE_MIN	= 1,
+	BLK_MQ_TAG_CACHE_MAX	= 64,
+};
+
+enum {
+	BLK_MQ_TAG_FAIL		= -1U,
+	BLK_MQ_TAG_MIN		= BLK_MQ_TAG_CACHE_MIN,
+	BLK_MQ_TAG_MAX		= BLK_MQ_TAG_FAIL - 1,
+};
+
+#endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
new file mode 100644
index 0000000..862f458
--- /dev/null
+++ b/block/blk-mq.c
@@ -0,0 +1,1500 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/smp.h>
+#include <linux/llist.h>
+#include <linux/list_sort.h>
+#include <linux/cpu.h>
+#include <linux/cache.h>
+#include <linux/sched/sysctl.h>
+#include <linux/delay.h>
+
+#include <trace/events/block.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+static DEFINE_MUTEX(all_q_mutex);
+static LIST_HEAD(all_q_list);
+
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
+
+DEFINE_PER_CPU(struct llist_head, ipi_lists);
+
+static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+					   unsigned int cpu)
+{
+	return per_cpu_ptr(q->queue_ctx, cpu);
+}
+
+/*
+ * This assumes per-cpu software queueing queues. They could be per-node
+ * as well, for instance. For now this is hardcoded as-is. Note that we don't
+ * care about preemption, since we know the ctx's are persistent. This does
+ * mean that we can't rely on ctx always matching the currently running CPU.
+ */
+static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
+{
+	return __blk_mq_get_ctx(q, get_cpu());
+}
+
+static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+{
+	put_cpu();
+}
+
+/*
+ * Check if any of the ctx's have pending work in this hardware queue
+ */
+static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
+{
+	unsigned int i;
+
+	for (i = 0; i < hctx->nr_ctx_map; i++)
+		if (hctx->ctx_map[i])
+			return true;
+
+	return false;
+}
+
+/*
+ * Mark this ctx as having pending work in this hardware queue
+ */
+static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
+				     struct blk_mq_ctx *ctx)
+{
+	if (!test_bit(ctx->index_hw, hctx->ctx_map))
+		set_bit(ctx->index_hw, hctx->ctx_map);
+}
+
+static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
+				       bool reserved)
+{
+	struct request *rq;
+	unsigned int tag;
+
+	tag = blk_mq_get_tag(hctx->tags, gfp, reserved);
+	if (tag != BLK_MQ_TAG_FAIL) {
+		rq = hctx->rqs[tag];
+		rq->tag = tag;
+
+		return rq;
+	}
+
+	return NULL;
+}
+
+static int blk_mq_queue_enter(struct request_queue *q)
+{
+	int ret;
+
+	__percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+	smp_wmb();
+	/* we have problems to freeze the queue if it's initializing */
+	if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
+		return 0;
+
+	__percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+
+	spin_lock_irq(q->queue_lock);
+	ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
+		!blk_queue_bypass(q), *q->queue_lock);
+	/* inc usage with lock hold to avoid freeze_queue runs here */
+	if (!ret)
+		__percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+	spin_unlock_irq(q->queue_lock);
+
+	return ret;
+}
+
+static void blk_mq_queue_exit(struct request_queue *q)
+{
+	__percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+}
+
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+static void blk_mq_freeze_queue(struct request_queue *q)
+{
+	bool drain;
+
+	spin_lock_irq(q->queue_lock);
+	drain = !q->bypass_depth++;
+	queue_flag_set(QUEUE_FLAG_BYPASS, q);
+	spin_unlock_irq(q->queue_lock);
+
+	if (!drain)
+		return;
+
+	while (true) {
+		s64 count;
+
+		spin_lock_irq(q->queue_lock);
+		count = percpu_counter_sum(&q->mq_usage_counter);
+		spin_unlock_irq(q->queue_lock);
+
+		if (count == 0)
+			break;
+		blk_mq_run_queues(q, false);
+		msleep(10);
+	}
+}
+
+static void blk_mq_unfreeze_queue(struct request_queue *q)
+{
+	bool wake = false;
+
+	spin_lock_irq(q->queue_lock);
+	if (!--q->bypass_depth) {
+		queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+		wake = true;
+	}
+	WARN_ON_ONCE(q->bypass_depth < 0);
+	spin_unlock_irq(q->queue_lock);
+	if (wake)
+		wake_up_all(&q->mq_freeze_wq);
+}
+
+bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
+{
+	return blk_mq_has_free_tags(hctx->tags);
+}
+EXPORT_SYMBOL(blk_mq_can_queue);
+
+static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq,
+			       unsigned int rw_flags)
+{
+	rq->mq_ctx = ctx;
+	rq->cmd_flags = rw_flags;
+	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
+}
+
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+					      gfp_t gfp, bool reserved)
+{
+	return blk_mq_alloc_rq(hctx, gfp, reserved);
+}
+
+static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
+						   int rw, gfp_t gfp,
+						   bool reserved)
+{
+	struct request *rq;
+
+	do {
+		struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+		struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+		rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
+		if (rq) {
+			blk_mq_rq_ctx_init(ctx, rq, rw);
+			break;
+		} else if (!(gfp & __GFP_WAIT))
+			break;
+
+		blk_mq_put_ctx(ctx);
+		__blk_mq_run_hw_queue(hctx);
+		blk_mq_wait_for_tags(hctx->tags);
+	} while (1);
+
+	return rq;
+}
+
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+		gfp_t gfp, bool reserved)
+{
+	struct request *rq;
+
+	if (blk_mq_queue_enter(q))
+		return NULL;
+
+	rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+	blk_mq_put_ctx(rq->mq_ctx);
+	return rq;
+}
+
+struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
+					      gfp_t gfp)
+{
+	struct request *rq;
+
+	if (blk_mq_queue_enter(q))
+		return NULL;
+
+	rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
+	blk_mq_put_ctx(rq->mq_ctx);
+	return rq;
+}
+EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
+
+/*
+ * Re-init and set pdu, if we have it
+ */
+static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+	blk_rq_init(hctx->queue, rq);
+
+	if (hctx->cmd_size)
+		rq->special = blk_mq_rq_to_pdu(rq);
+}
+
+static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
+				  struct blk_mq_ctx *ctx, struct request *rq)
+{
+	const int tag = rq->tag;
+	struct request_queue *q = rq->q;
+
+	blk_mq_rq_init(hctx, rq);
+	blk_mq_put_tag(hctx->tags, tag);
+
+	blk_mq_queue_exit(q);
+}
+
+void blk_mq_free_request(struct request *rq)
+{
+	struct blk_mq_ctx *ctx = rq->mq_ctx;
+	struct blk_mq_hw_ctx *hctx;
+	struct request_queue *q = rq->q;
+
+	ctx->rq_completed[rq_is_sync(rq)]++;
+
+	hctx = q->mq_ops->map_queue(q, ctx->cpu);
+	__blk_mq_free_request(hctx, ctx, rq);
+}
+
+static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
+{
+	if (error)
+		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+		error = -EIO;
+
+	if (unlikely(rq->cmd_flags & REQ_QUIET))
+		set_bit(BIO_QUIET, &bio->bi_flags);
+
+	/* don't actually finish bio if it's part of flush sequence */
+	if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
+		bio_endio(bio, error);
+}
+
+void blk_mq_complete_request(struct request *rq, int error)
+{
+	struct bio *bio = rq->bio;
+	unsigned int bytes = 0;
+
+	trace_block_rq_complete(rq->q, rq);
+
+	while (bio) {
+		struct bio *next = bio->bi_next;
+
+		bio->bi_next = NULL;
+		bytes += bio->bi_size;
+		blk_mq_bio_endio(rq, bio, error);
+		bio = next;
+	}
+
+	blk_account_io_completion(rq, bytes);
+
+	if (rq->end_io)
+		rq->end_io(rq, error);
+	else
+		blk_mq_free_request(rq);
+
+	blk_account_io_done(rq);
+}
+
+void __blk_mq_end_io(struct request *rq, int error)
+{
+	if (!blk_mark_rq_complete(rq))
+		blk_mq_complete_request(rq, error);
+}
+
+#if defined(CONFIG_SMP)
+
+/*
+ * Called with interrupts disabled.
+ */
+static void ipi_end_io(void *data)
+{
+	struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
+	struct llist_node *entry, *next;
+	struct request *rq;
+
+	entry = llist_del_all(list);
+
+	while (entry) {
+		next = entry->next;
+		rq = llist_entry(entry, struct request, ll_list);
+		__blk_mq_end_io(rq, rq->errors);
+		entry = next;
+	}
+}
+
+static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
+			  struct request *rq, const int error)
+{
+	struct call_single_data *data = &rq->csd;
+
+	rq->errors = error;
+	rq->ll_list.next = NULL;
+
+	/*
+	 * If the list is non-empty, an existing IPI must already
+	 * be "in flight". If that is the case, we need not schedule
+	 * a new one.
+	 */
+	if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
+		data->func = ipi_end_io;
+		data->flags = 0;
+		__smp_call_function_single(ctx->cpu, data, 0);
+	}
+
+	return true;
+}
+#else /* CONFIG_SMP */
+static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
+			  struct request *rq, const int error)
+{
+	return false;
+}
+#endif
+
+/*
+ * End IO on this request on a multiqueue enabled driver. We'll either do
+ * it directly inline, or punt to a local IPI handler on the matching
+ * remote CPU.
+ */
+void blk_mq_end_io(struct request *rq, int error)
+{
+	struct blk_mq_ctx *ctx = rq->mq_ctx;
+	int cpu;
+
+	if (!ctx->ipi_redirect)
+		return __blk_mq_end_io(rq, error);
+
+	cpu = get_cpu();
+
+	if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
+	    !ipi_remote_cpu(ctx, cpu, rq, error))
+		__blk_mq_end_io(rq, error);
+
+	put_cpu();
+}
+EXPORT_SYMBOL(blk_mq_end_io);
+
+static void blk_mq_start_request(struct request *rq)
+{
+	struct request_queue *q = rq->q;
+
+	trace_block_rq_issue(q, rq);
+
+	/*
+	 * Just mark start time and set the started bit. Due to memory
+	 * ordering, we know we'll see the correct deadline as long as
+	 * REQ_ATOMIC_STARTED is seen.
+	 */
+	rq->deadline = jiffies + q->rq_timeout;
+	set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+
+static void blk_mq_requeue_request(struct request *rq)
+{
+	struct request_queue *q = rq->q;
+
+	trace_block_rq_requeue(q, rq);
+	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+
+struct blk_mq_timeout_data {
+	struct blk_mq_hw_ctx *hctx;
+	unsigned long *next;
+	unsigned int *next_set;
+};
+
+static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
+{
+	struct blk_mq_timeout_data *data = __data;
+	struct blk_mq_hw_ctx *hctx = data->hctx;
+	unsigned int tag;
+
+	 /* It may not be in flight yet (this is where
+	 * the REQ_ATOMIC_STARTED flag comes in). The requests are
+	 * statically allocated, so we know it's always safe to access the
+	 * memory associated with a bit offset into ->rqs[].
+	 */
+	tag = 0;
+	do {
+		struct request *rq;
+
+		tag = find_next_zero_bit(free_tags, hctx->queue_depth, tag);
+		if (tag >= hctx->queue_depth)
+			break;
+
+		rq = hctx->rqs[tag++];
+
+		if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+			continue;
+
+		blk_rq_check_expired(rq, data->next, data->next_set);
+	} while (1);
+}
+
+static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
+					unsigned long *next,
+					unsigned int *next_set)
+{
+	struct blk_mq_timeout_data data = {
+		.hctx		= hctx,
+		.next		= next,
+		.next_set	= next_set,
+	};
+
+	/*
+	 * Ask the tagging code to iterate busy requests, so we can
+	 * check them for timeout.
+	 */
+	blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
+}
+
+static void blk_mq_rq_timer(unsigned long data)
+{
+	struct request_queue *q = (struct request_queue *) data;
+	struct blk_mq_hw_ctx *hctx;
+	unsigned long next = 0;
+	int i, next_set = 0;
+
+	queue_for_each_hw_ctx(q, hctx, i)
+		blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
+
+	if (next_set)
+		mod_timer(&q->timeout, round_jiffies_up(next));
+}
+
+/*
+ * Reverse check our software queue for entries that we could potentially
+ * merge with. Currently includes a hand-wavy stop count of 8, to not spend
+ * too much time checking for merges.
+ */
+static bool blk_mq_attempt_merge(struct request_queue *q,
+				 struct blk_mq_ctx *ctx, struct bio *bio)
+{
+	struct request *rq;
+	int checked = 8;
+
+	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
+		int el_ret;
+
+		if (!checked--)
+			break;
+
+		if (!blk_rq_merge_ok(rq, bio))
+			continue;
+
+		el_ret = blk_try_merge(rq, bio);
+		if (el_ret == ELEVATOR_BACK_MERGE) {
+			if (bio_attempt_back_merge(q, rq, bio)) {
+				ctx->rq_merged++;
+				return true;
+			}
+			break;
+		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
+			if (bio_attempt_front_merge(q, rq, bio)) {
+				ctx->rq_merged++;
+				return true;
+			}
+			break;
+		}
+	}
+
+	return false;
+}
+
+void blk_mq_add_timer(struct request *rq)
+{
+	__blk_add_timer(rq, NULL);
+}
+
+/*
+ * Run this hardware queue, pulling any software queues mapped to it in.
+ * Note that this function currently has various problems around ordering
+ * of IO. In particular, we'd like FIFO behaviour on handling existing
+ * items on the hctx->dispatch list. Ignore that for now.
+ */
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+	struct request_queue *q = hctx->queue;
+	struct blk_mq_ctx *ctx;
+	struct request *rq;
+	LIST_HEAD(rq_list);
+	int bit, queued;
+
+	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+		return;
+
+	hctx->run++;
+
+	/*
+	 * Touch any software queue that has pending entries.
+	 */
+	for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) {
+		clear_bit(bit, hctx->ctx_map);
+		ctx = hctx->ctxs[bit];
+		BUG_ON(bit != ctx->index_hw);
+
+		spin_lock(&ctx->lock);
+		list_splice_tail_init(&ctx->rq_list, &rq_list);
+		spin_unlock(&ctx->lock);
+	}
+
+	/*
+	 * If we have previous entries on our dispatch list, grab them
+	 * and stuff them at the front for more fair dispatch.
+	 */
+	if (!list_empty_careful(&hctx->dispatch)) {
+		spin_lock(&hctx->lock);
+		if (!list_empty(&hctx->dispatch))
+			list_splice_init(&hctx->dispatch, &rq_list);
+		spin_unlock(&hctx->lock);
+	}
+
+	/*
+	 * Delete and return all entries from our dispatch list
+	 */
+	queued = 0;
+
+	/*
+	 * Now process all the entries, sending them to the driver.
+	 */
+	while (!list_empty(&rq_list)) {
+		int ret;
+
+		rq = list_first_entry(&rq_list, struct request, queuelist);
+		list_del_init(&rq->queuelist);
+		blk_mq_start_request(rq);
+
+		/*
+		 * Last request in the series. Flag it as such, this
+		 * enables drivers to know when IO should be kicked off,
+		 * if they don't do it on a per-request basis.
+		 *
+		 * Note: the flag isn't the only condition drivers
+		 * should do kick off. If drive is busy, the last
+		 * request might not have the bit set.
+		 */
+		if (list_empty(&rq_list))
+			rq->cmd_flags |= REQ_END;
+
+		ret = q->mq_ops->queue_rq(hctx, rq);
+		switch (ret) {
+		case BLK_MQ_RQ_QUEUE_OK:
+			queued++;
+			continue;
+		case BLK_MQ_RQ_QUEUE_BUSY:
+			/*
+			 * FIXME: we should have a mechanism to stop the queue
+			 * like blk_stop_queue, otherwise we will waste cpu
+			 * time
+			 */
+			list_add(&rq->queuelist, &rq_list);
+			blk_mq_requeue_request(rq);
+			break;
+		default:
+			pr_err("blk-mq: bad return on queue: %d\n", ret);
+			rq->errors = -EIO;
+		case BLK_MQ_RQ_QUEUE_ERROR:
+			blk_mq_end_io(rq, rq->errors);
+			break;
+		}
+
+		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
+			break;
+	}
+
+	if (!queued)
+		hctx->dispatched[0]++;
+	else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
+		hctx->dispatched[ilog2(queued) + 1]++;
+
+	/*
+	 * Any items that need requeuing? Stuff them into hctx->dispatch,
+	 * that is where we will continue on next queue run.
+	 */
+	if (!list_empty(&rq_list)) {
+		spin_lock(&hctx->lock);
+		list_splice(&rq_list, &hctx->dispatch);
+		spin_unlock(&hctx->lock);
+	}
+}
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+		return;
+
+	if (!async)
+		__blk_mq_run_hw_queue(hctx);
+	else {
+		struct request_queue *q = hctx->queue;
+
+		kblockd_schedule_delayed_work(q, &hctx->delayed_work, 0);
+	}
+}
+
+void blk_mq_run_queues(struct request_queue *q, bool async)
+{
+	struct blk_mq_hw_ctx *hctx;
+	int i;
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		if ((!blk_mq_hctx_has_pending(hctx) &&
+		    list_empty_careful(&hctx->dispatch)) ||
+		    test_bit(BLK_MQ_S_STOPPED, &hctx->flags))
+			continue;
+
+		blk_mq_run_hw_queue(hctx, async);
+	}
+}
+EXPORT_SYMBOL(blk_mq_run_queues);
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+	cancel_delayed_work(&hctx->delayed_work);
+	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
+}
+EXPORT_SYMBOL(blk_mq_stop_hw_queue);
+
+void blk_mq_stop_hw_queues(struct request_queue *q)
+{
+	struct blk_mq_hw_ctx *hctx;
+	int i;
+
+	queue_for_each_hw_ctx(q, hctx, i)
+		blk_mq_stop_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_stop_hw_queues);
+
+void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+	__blk_mq_run_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_start_hw_queue);
+
+void blk_mq_start_stopped_hw_queues(struct request_queue *q)
+{
+	struct blk_mq_hw_ctx *hctx;
+	int i;
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+			continue;
+
+		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+		blk_mq_run_hw_queue(hctx, true);
+	}
+}
+EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
+
+static void blk_mq_work_fn(struct work_struct *work)
+{
+	struct blk_mq_hw_ctx *hctx;
+
+	hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work);
+	__blk_mq_run_hw_queue(hctx);
+}
+
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+				    struct request *rq)
+{
+	struct blk_mq_ctx *ctx = rq->mq_ctx;
+
+	list_add_tail(&rq->queuelist, &ctx->rq_list);
+	blk_mq_hctx_mark_pending(hctx, ctx);
+
+	/*
+	 * We do this early, to ensure we are on the right CPU.
+	 */
+	blk_mq_add_timer(rq);
+}
+
+void blk_mq_insert_request(struct request_queue *q, struct request *rq,
+			   bool run_queue)
+{
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *ctx, *current_ctx;
+
+	ctx = rq->mq_ctx;
+	hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+	if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+		blk_insert_flush(rq);
+	} else {
+		current_ctx = blk_mq_get_ctx(q);
+
+		if (!cpu_online(ctx->cpu)) {
+			ctx = current_ctx;
+			hctx = q->mq_ops->map_queue(q, ctx->cpu);
+			rq->mq_ctx = ctx;
+		}
+		spin_lock(&ctx->lock);
+		__blk_mq_insert_request(hctx, rq);
+		spin_unlock(&ctx->lock);
+
+		blk_mq_put_ctx(current_ctx);
+	}
+
+	if (run_queue)
+		__blk_mq_run_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_insert_request);
+
+/*
+ * This is a special version of blk_mq_insert_request to bypass FLUSH request
+ * check. Should only be used internally.
+ */
+void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
+{
+	struct request_queue *q = rq->q;
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *ctx, *current_ctx;
+
+	current_ctx = blk_mq_get_ctx(q);
+
+	ctx = rq->mq_ctx;
+	if (!cpu_online(ctx->cpu)) {
+		ctx = current_ctx;
+		rq->mq_ctx = ctx;
+	}
+	hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+	/* ctx->cpu might be offline */
+	spin_lock(&ctx->lock);
+	__blk_mq_insert_request(hctx, rq);
+	spin_unlock(&ctx->lock);
+
+	blk_mq_put_ctx(current_ctx);
+
+	if (run_queue)
+		blk_mq_run_hw_queue(hctx, async);
+}
+
+static void blk_mq_insert_requests(struct request_queue *q,
+				     struct blk_mq_ctx *ctx,
+				     struct list_head *list,
+				     int depth,
+				     bool from_schedule)
+
+{
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *current_ctx;
+
+	trace_block_unplug(q, depth, !from_schedule);
+
+	current_ctx = blk_mq_get_ctx(q);
+
+	if (!cpu_online(ctx->cpu))
+		ctx = current_ctx;
+	hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+	/*
+	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
+	 * offline now
+	 */
+	spin_lock(&ctx->lock);
+	while (!list_empty(list)) {
+		struct request *rq;
+
+		rq = list_first_entry(list, struct request, queuelist);
+		list_del_init(&rq->queuelist);
+		rq->mq_ctx = ctx;
+		__blk_mq_insert_request(hctx, rq);
+	}
+	spin_unlock(&ctx->lock);
+
+	blk_mq_put_ctx(current_ctx);
+
+	blk_mq_run_hw_queue(hctx, from_schedule);
+}
+
+static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct request *rqa = container_of(a, struct request, queuelist);
+	struct request *rqb = container_of(b, struct request, queuelist);
+
+	return !(rqa->mq_ctx < rqb->mq_ctx ||
+		 (rqa->mq_ctx == rqb->mq_ctx &&
+		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
+}
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+{
+	struct blk_mq_ctx *this_ctx;
+	struct request_queue *this_q;
+	struct request *rq;
+	LIST_HEAD(list);
+	LIST_HEAD(ctx_list);
+	unsigned int depth;
+
+	list_splice_init(&plug->mq_list, &list);
+
+	list_sort(NULL, &list, plug_ctx_cmp);
+
+	this_q = NULL;
+	this_ctx = NULL;
+	depth = 0;
+
+	while (!list_empty(&list)) {
+		rq = list_entry_rq(list.next);
+		list_del_init(&rq->queuelist);
+		BUG_ON(!rq->q);
+		if (rq->mq_ctx != this_ctx) {
+			if (this_ctx) {
+				blk_mq_insert_requests(this_q, this_ctx,
+							&ctx_list, depth,
+							from_schedule);
+			}
+
+			this_ctx = rq->mq_ctx;
+			this_q = rq->q;
+			depth = 0;
+		}
+
+		depth++;
+		list_add_tail(&rq->queuelist, &ctx_list);
+	}
+
+	/*
+	 * If 'this_ctx' is set, we know we have entries to complete
+	 * on 'ctx_list'. Do those.
+	 */
+	if (this_ctx) {
+		blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
+				       from_schedule);
+	}
+}
+
+static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
+{
+	init_request_from_bio(rq, bio);
+	blk_account_io_start(rq, 1);
+}
+
+static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *ctx;
+	const int is_sync = rw_is_sync(bio->bi_rw);
+	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+	int rw = bio_data_dir(bio);
+	struct request *rq;
+	unsigned int use_plug, request_count = 0;
+
+	/*
+	 * If we have multiple hardware queues, just go directly to
+	 * one of those for sync IO.
+	 */
+	use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync);
+
+	blk_queue_bounce(q, &bio);
+
+	if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
+		return;
+
+	if (blk_mq_queue_enter(q)) {
+		bio_endio(bio, -EIO);
+		return;
+	}
+
+	ctx = blk_mq_get_ctx(q);
+	hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+	trace_block_getrq(q, bio, rw);
+	rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
+	if (likely(rq))
+		blk_mq_rq_ctx_init(ctx, rq, rw);
+	else {
+		blk_mq_put_ctx(ctx);
+		trace_block_sleeprq(q, bio, rw);
+		rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
+							false);
+		ctx = rq->mq_ctx;
+		hctx = q->mq_ops->map_queue(q, ctx->cpu);
+	}
+
+	hctx->queued++;
+
+	if (unlikely(is_flush_fua)) {
+		blk_mq_bio_to_request(rq, bio);
+		blk_mq_put_ctx(ctx);
+		blk_insert_flush(rq);
+		goto run_queue;
+	}
+
+	/*
+	 * A task plug currently exists. Since this is completely lockless,
+	 * utilize that to temporarily store requests until the task is
+	 * either done or scheduled away.
+	 */
+	if (use_plug) {
+		struct blk_plug *plug = current->plug;
+
+		if (plug) {
+			blk_mq_bio_to_request(rq, bio);
+			if (list_empty(&plug->mq_list))
+				trace_block_plug(q);
+			else if (request_count >= BLK_MAX_REQUEST_COUNT) {
+				blk_flush_plug_list(plug, false);
+				trace_block_plug(q);
+			}
+			list_add_tail(&rq->queuelist, &plug->mq_list);
+			blk_mq_put_ctx(ctx);
+			return;
+		}
+	}
+
+	spin_lock(&ctx->lock);
+
+	if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
+	    blk_mq_attempt_merge(q, ctx, bio))
+		__blk_mq_free_request(hctx, ctx, rq);
+	else {
+		blk_mq_bio_to_request(rq, bio);
+		__blk_mq_insert_request(hctx, rq);
+	}
+
+	spin_unlock(&ctx->lock);
+	blk_mq_put_ctx(ctx);
+
+	/*
+	 * For a SYNC request, send it to the hardware immediately. For an
+	 * ASYNC request, just ensure that we run it later on. The latter
+	 * allows for merging opportunities and more efficient dispatching.
+	 */
+run_queue:
+	blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua);
+}
+
+/*
+ * Default mapping to a software queue, since we use one per CPU.
+ */
+struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
+{
+	return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+EXPORT_SYMBOL(blk_mq_map_queue);
+
+struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *reg,
+						   unsigned int hctx_index)
+{
+	return kmalloc_node(sizeof(struct blk_mq_hw_ctx),
+				GFP_KERNEL | __GFP_ZERO, reg->numa_node);
+}
+EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
+
+void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
+				 unsigned int hctx_index)
+{
+	kfree(hctx);
+}
+EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
+
+static void blk_mq_hctx_notify(void *data, unsigned long action,
+			       unsigned int cpu)
+{
+	struct blk_mq_hw_ctx *hctx = data;
+	struct blk_mq_ctx *ctx;
+	LIST_HEAD(tmp);
+
+	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+		return;
+
+	/*
+	 * Move ctx entries to new CPU, if this one is going away.
+	 */
+	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+
+	spin_lock(&ctx->lock);
+	if (!list_empty(&ctx->rq_list)) {
+		list_splice_init(&ctx->rq_list, &tmp);
+		clear_bit(ctx->index_hw, hctx->ctx_map);
+	}
+	spin_unlock(&ctx->lock);
+
+	if (list_empty(&tmp))
+		return;
+
+	ctx = blk_mq_get_ctx(hctx->queue);
+	spin_lock(&ctx->lock);
+
+	while (!list_empty(&tmp)) {
+		struct request *rq;
+
+		rq = list_first_entry(&tmp, struct request, queuelist);
+		rq->mq_ctx = ctx;
+		list_move_tail(&rq->queuelist, &ctx->rq_list);
+	}
+
+	blk_mq_hctx_mark_pending(hctx, ctx);
+
+	spin_unlock(&ctx->lock);
+	blk_mq_put_ctx(ctx);
+}
+
+static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
+				    void (*init)(void *, struct blk_mq_hw_ctx *,
+					struct request *, unsigned int),
+				    void *data)
+{
+	unsigned int i;
+
+	for (i = 0; i < hctx->queue_depth; i++) {
+		struct request *rq = hctx->rqs[i];
+
+		init(data, hctx, rq, i);
+	}
+}
+
+void blk_mq_init_commands(struct request_queue *q,
+			  void (*init)(void *, struct blk_mq_hw_ctx *,
+					struct request *, unsigned int),
+			  void *data)
+{
+	struct blk_mq_hw_ctx *hctx;
+	unsigned int i;
+
+	queue_for_each_hw_ctx(q, hctx, i)
+		blk_mq_init_hw_commands(hctx, init, data);
+}
+EXPORT_SYMBOL(blk_mq_init_commands);
+
+static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
+{
+	struct page *page;
+
+	while (!list_empty(&hctx->page_list)) {
+		page = list_first_entry(&hctx->page_list, struct page, list);
+		list_del_init(&page->list);
+		__free_pages(page, page->private);
+	}
+
+	kfree(hctx->rqs);
+
+	if (hctx->tags)
+		blk_mq_free_tags(hctx->tags);
+}
+
+static size_t order_to_size(unsigned int order)
+{
+	size_t ret = PAGE_SIZE;
+
+	while (order--)
+		ret *= 2;
+
+	return ret;
+}
+
+static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
+			      unsigned int reserved_tags, int node)
+{
+	unsigned int i, j, entries_per_page, max_order = 4;
+	size_t rq_size, left;
+
+	INIT_LIST_HEAD(&hctx->page_list);
+
+	hctx->rqs = kmalloc_node(hctx->queue_depth * sizeof(struct request *),
+					GFP_KERNEL, node);
+	if (!hctx->rqs)
+		return -ENOMEM;
+
+	/*
+	 * rq_size is the size of the request plus driver payload, rounded
+	 * to the cacheline size
+	 */
+	rq_size = round_up(sizeof(struct request) + hctx->cmd_size,
+				cache_line_size());
+	left = rq_size * hctx->queue_depth;
+
+	for (i = 0; i < hctx->queue_depth;) {
+		int this_order = max_order;
+		struct page *page;
+		int to_do;
+		void *p;
+
+		while (left < order_to_size(this_order - 1) && this_order)
+			this_order--;
+
+		do {
+			page = alloc_pages_node(node, GFP_KERNEL, this_order);
+			if (page)
+				break;
+			if (!this_order--)
+				break;
+			if (order_to_size(this_order) < rq_size)
+				break;
+		} while (1);
+
+		if (!page)
+			break;
+
+		page->private = this_order;
+		list_add_tail(&page->list, &hctx->page_list);
+
+		p = page_address(page);
+		entries_per_page = order_to_size(this_order) / rq_size;
+		to_do = min(entries_per_page, hctx->queue_depth - i);
+		left -= to_do * rq_size;
+		for (j = 0; j < to_do; j++) {
+			hctx->rqs[i] = p;
+			blk_mq_rq_init(hctx, hctx->rqs[i]);
+			p += rq_size;
+			i++;
+		}
+	}
+
+	if (i < (reserved_tags + BLK_MQ_TAG_MIN))
+		goto err_rq_map;
+	else if (i != hctx->queue_depth) {
+		hctx->queue_depth = i;
+		pr_warn("%s: queue depth set to %u because of low memory\n",
+					__func__, i);
+	}
+
+	hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node);
+	if (!hctx->tags) {
+err_rq_map:
+		blk_mq_free_rq_map(hctx);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int blk_mq_init_hw_queues(struct request_queue *q,
+				 struct blk_mq_reg *reg, void *driver_data)
+{
+	struct blk_mq_hw_ctx *hctx;
+	unsigned int i, j;
+
+	/*
+	 * Initialize hardware queues
+	 */
+	queue_for_each_hw_ctx(q, hctx, i) {
+		unsigned int num_maps;
+		int node;
+
+		node = hctx->numa_node;
+		if (node == NUMA_NO_NODE)
+			node = hctx->numa_node = reg->numa_node;
+
+		INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn);
+		spin_lock_init(&hctx->lock);
+		INIT_LIST_HEAD(&hctx->dispatch);
+		hctx->queue = q;
+		hctx->queue_num = i;
+		hctx->flags = reg->flags;
+		hctx->queue_depth = reg->queue_depth;
+		hctx->cmd_size = reg->cmd_size;
+
+		blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
+						blk_mq_hctx_notify, hctx);
+		blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+
+		if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node))
+			break;
+
+		/*
+		 * Allocate space for all possible cpus to avoid allocation in
+		 * runtime
+		 */
+		hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+						GFP_KERNEL, node);
+		if (!hctx->ctxs)
+			break;
+
+		num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG;
+		hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long),
+						GFP_KERNEL, node);
+		if (!hctx->ctx_map)
+			break;
+
+		hctx->nr_ctx_map = num_maps;
+		hctx->nr_ctx = 0;
+
+		if (reg->ops->init_hctx &&
+		    reg->ops->init_hctx(hctx, driver_data, i))
+			break;
+	}
+
+	if (i == q->nr_hw_queues)
+		return 0;
+
+	/*
+	 * Init failed
+	 */
+	queue_for_each_hw_ctx(q, hctx, j) {
+		if (i == j)
+			break;
+
+		if (reg->ops->exit_hctx)
+			reg->ops->exit_hctx(hctx, j);
+
+		blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+		blk_mq_free_rq_map(hctx);
+		kfree(hctx->ctxs);
+	}
+
+	return 1;
+}
+
+static void blk_mq_init_cpu_queues(struct request_queue *q,
+				   unsigned int nr_hw_queues)
+{
+	unsigned int i;
+
+	for_each_possible_cpu(i) {
+		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
+		struct blk_mq_hw_ctx *hctx;
+
+		memset(__ctx, 0, sizeof(*__ctx));
+		__ctx->cpu = i;
+		spin_lock_init(&__ctx->lock);
+		INIT_LIST_HEAD(&__ctx->rq_list);
+		__ctx->queue = q;
+
+		/* If the cpu isn't online, the cpu is mapped to first hctx */
+		hctx = q->mq_ops->map_queue(q, i);
+		hctx->nr_ctx++;
+
+		if (!cpu_online(i))
+			continue;
+
+		/*
+		 * Set local node, IFF we have more than one hw queue. If
+		 * not, we remain on the home node of the device
+		 */
+		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
+			hctx->numa_node = cpu_to_node(i);
+	}
+}
+
+static void blk_mq_map_swqueue(struct request_queue *q)
+{
+	unsigned int i;
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *ctx;
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		hctx->nr_ctx = 0;
+	}
+
+	/*
+	 * Map software to hardware queues
+	 */
+	queue_for_each_ctx(q, ctx, i) {
+		/* If the cpu isn't online, the cpu is mapped to first hctx */
+		hctx = q->mq_ops->map_queue(q, i);
+		ctx->index_hw = hctx->nr_ctx;
+		hctx->ctxs[hctx->nr_ctx++] = ctx;
+	}
+}
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
+					void *driver_data)
+{
+	struct blk_mq_hw_ctx **hctxs;
+	struct blk_mq_ctx *ctx;
+	struct request_queue *q;
+	int i;
+
+	if (!reg->nr_hw_queues ||
+	    !reg->ops->queue_rq || !reg->ops->map_queue ||
+	    !reg->ops->alloc_hctx || !reg->ops->free_hctx)
+		return ERR_PTR(-EINVAL);
+
+	if (!reg->queue_depth)
+		reg->queue_depth = BLK_MQ_MAX_DEPTH;
+	else if (reg->queue_depth > BLK_MQ_MAX_DEPTH) {
+		pr_err("blk-mq: queuedepth too large (%u)\n", reg->queue_depth);
+		reg->queue_depth = BLK_MQ_MAX_DEPTH;
+	}
+
+	/*
+	 * Set aside a tag for flush requests.  It will only be used while
+	 * another flush request is in progress but outside the driver.
+	 *
+	 * TODO: only allocate if flushes are supported
+	 */
+	reg->queue_depth++;
+	reg->reserved_tags++;
+
+	if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
+		return ERR_PTR(-EINVAL);
+
+	ctx = alloc_percpu(struct blk_mq_ctx);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	hctxs = kmalloc_node(reg->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
+			reg->numa_node);
+
+	if (!hctxs)
+		goto err_percpu;
+
+	for (i = 0; i < reg->nr_hw_queues; i++) {
+		hctxs[i] = reg->ops->alloc_hctx(reg, i);
+		if (!hctxs[i])
+			goto err_hctxs;
+
+		hctxs[i]->numa_node = NUMA_NO_NODE;
+		hctxs[i]->queue_num = i;
+	}
+
+	q = blk_alloc_queue_node(GFP_KERNEL, reg->numa_node);
+	if (!q)
+		goto err_hctxs;
+
+	q->mq_map = blk_mq_make_queue_map(reg);
+	if (!q->mq_map)
+		goto err_map;
+
+	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+	blk_queue_rq_timeout(q, 30000);
+
+	q->nr_queues = nr_cpu_ids;
+	q->nr_hw_queues = reg->nr_hw_queues;
+
+	q->queue_ctx = ctx;
+	q->queue_hw_ctx = hctxs;
+
+	q->mq_ops = reg->ops;
+
+	blk_queue_make_request(q, blk_mq_make_request);
+	blk_queue_rq_timed_out(q, reg->ops->timeout);
+	if (reg->timeout)
+		blk_queue_rq_timeout(q, reg->timeout);
+
+	blk_mq_init_flush(q);
+	blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
+
+	if (blk_mq_init_hw_queues(q, reg, driver_data))
+		goto err_hw;
+
+	blk_mq_map_swqueue(q);
+
+	mutex_lock(&all_q_mutex);
+	list_add_tail(&q->all_q_node, &all_q_list);
+	mutex_unlock(&all_q_mutex);
+
+	return q;
+err_hw:
+	kfree(q->mq_map);
+err_map:
+	blk_cleanup_queue(q);
+err_hctxs:
+	for (i = 0; i < reg->nr_hw_queues; i++) {
+		if (!hctxs[i])
+			break;
+		reg->ops->free_hctx(hctxs[i], i);
+	}
+	kfree(hctxs);
+err_percpu:
+	free_percpu(ctx);
+	return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(blk_mq_init_queue);
+
+void blk_mq_free_queue(struct request_queue *q)
+{
+	struct blk_mq_hw_ctx *hctx;
+	int i;
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		cancel_delayed_work_sync(&hctx->delayed_work);
+		kfree(hctx->ctx_map);
+		kfree(hctx->ctxs);
+		blk_mq_free_rq_map(hctx);
+		blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+		if (q->mq_ops->exit_hctx)
+			q->mq_ops->exit_hctx(hctx, i);
+		q->mq_ops->free_hctx(hctx, i);
+	}
+
+	free_percpu(q->queue_ctx);
+	kfree(q->queue_hw_ctx);
+	kfree(q->mq_map);
+
+	q->queue_ctx = NULL;
+	q->queue_hw_ctx = NULL;
+	q->mq_map = NULL;
+
+	mutex_lock(&all_q_mutex);
+	list_del_init(&q->all_q_node);
+	mutex_unlock(&all_q_mutex);
+}
+EXPORT_SYMBOL(blk_mq_free_queue);
+
+/* Basically redo blk_mq_init_queue with queue frozen */
+static void blk_mq_queue_reinit(struct request_queue *q)
+{
+	blk_mq_freeze_queue(q);
+
+	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+
+	/*
+	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
+	 * we should change hctx numa_node according to new topology (this
+	 * involves free and re-allocate memory, worthy doing?)
+	 */
+
+	blk_mq_map_swqueue(q);
+
+	blk_mq_unfreeze_queue(q);
+}
+
+static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
+				      unsigned long action, void *hcpu)
+{
+	struct request_queue *q;
+
+	/*
+	 * Before new mapping is established, hotadded cpu might already start
+	 * handling requests. This doesn't break anything as we map offline
+	 * CPUs to first hardware queue. We will re-init queue below to get
+	 * optimal settings.
+	 */
+	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
+	    action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+		return NOTIFY_OK;
+
+	mutex_lock(&all_q_mutex);
+	list_for_each_entry(q, &all_q_list, all_q_node)
+		blk_mq_queue_reinit(q);
+	mutex_unlock(&all_q_mutex);
+	return NOTIFY_OK;
+}
+
+static int __init blk_mq_init(void)
+{
+	unsigned int i;
+
+	for_each_possible_cpu(i)
+		init_llist_head(&per_cpu(ipi_lists, i));
+
+	blk_mq_cpu_init();
+
+	/* Must be called after percpu_counter_hotcpu_callback() */
+	hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
+
+	return 0;
+}
+subsys_initcall(blk_mq_init);
diff --git a/block/blk-mq.h b/block/blk-mq.h
new file mode 100644
index 0000000..52bf1f9
--- /dev/null
+++ b/block/blk-mq.h
@@ -0,0 +1,52 @@
+#ifndef INT_BLK_MQ_H
+#define INT_BLK_MQ_H
+
+struct blk_mq_ctx {
+	struct {
+		spinlock_t		lock;
+		struct list_head	rq_list;
+	}  ____cacheline_aligned_in_smp;
+
+	unsigned int		cpu;
+	unsigned int		index_hw;
+	unsigned int		ipi_redirect;
+
+	/* incremented at dispatch time */
+	unsigned long		rq_dispatched[2];
+	unsigned long		rq_merged;
+
+	/* incremented at completion time */
+	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
+
+	struct request_queue	*queue;
+	struct kobject		kobj;
+};
+
+void __blk_mq_end_io(struct request *rq, int error);
+void blk_mq_complete_request(struct request *rq, int error);
+void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
+void blk_mq_init_flush(struct request_queue *q);
+
+/*
+ * CPU hotplug helpers
+ */
+struct blk_mq_cpu_notifier;
+void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+			      void (*fn)(void *, unsigned long, unsigned int),
+			      void *data);
+void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
+void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
+void blk_mq_cpu_init(void);
+DECLARE_PER_CPU(struct llist_head, ipi_lists);
+
+/*
+ * CPU -> queue mappings
+ */
+struct blk_mq_reg;
+extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+
+void blk_mq_add_timer(struct request *rq);
+
+#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c50ecf0..05e8267 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -144,6 +144,7 @@
 	lim->discard_zeroes_data = 1;
 	lim->max_segments = USHRT_MAX;
 	lim->max_hw_sectors = UINT_MAX;
+	lim->max_segment_size = UINT_MAX;
 	lim->max_sectors = UINT_MAX;
 	lim->max_write_same_sectors = UINT_MAX;
 }
@@ -195,17 +196,17 @@
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
  * @q: the request queue for the device
- * @dma_mask: the maximum address the device can handle
+ * @max_addr: the maximum address the device can handle
  *
  * Description:
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @dma_mask.
+ *    buffers for doing I/O to pages residing above @max_addr.
  **/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
+void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
 {
-	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
+	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
 	int dma = 0;
 
 	q->bounce_gfp = GFP_NOIO;
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index ec9e606..57790c1 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -23,7 +23,7 @@
 	struct list_head *cpu_list, local_list;
 
 	local_irq_disable();
-	cpu_list = &__get_cpu_var(blk_cpu_done);
+	cpu_list = this_cpu_ptr(&blk_cpu_done);
 	list_replace_init(cpu_list, &local_list);
 	local_irq_enable();
 
@@ -36,7 +36,7 @@
 	}
 }
 
-#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
 static void trigger_softirq(void *data)
 {
 	struct request *rq = data;
@@ -44,7 +44,7 @@
 	struct list_head *list;
 
 	local_irq_save(flags);
-	list = &__get_cpu_var(blk_cpu_done);
+	list = this_cpu_ptr(&blk_cpu_done);
 	list_add_tail(&rq->csd.list, list);
 
 	if (list->next == &rq->csd.list)
@@ -71,7 +71,7 @@
 
 	return 1;
 }
-#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+#else /* CONFIG_SMP */
 static int raise_blk_irq(int cpu, struct request *rq)
 {
 	return 1;
@@ -90,7 +90,7 @@
 
 		local_irq_disable();
 		list_splice_init(&per_cpu(blk_cpu_done, cpu),
-				 &__get_cpu_var(blk_cpu_done));
+				 this_cpu_ptr(&blk_cpu_done));
 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
 		local_irq_enable();
 	}
@@ -135,7 +135,7 @@
 	if (ccpu == cpu || shared) {
 		struct list_head *list;
 do_local:
-		list = &__get_cpu_var(blk_cpu_done);
+		list = this_cpu_ptr(&blk_cpu_done);
 		list_add_tail(&req->csd.list, list);
 
 		/*
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3aa5b19..9777952 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -7,6 +7,7 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/blktrace_api.h>
+#include <linux/blk-mq.h>
 
 #include "blk.h"
 #include "blk-cgroup.h"
@@ -287,7 +288,7 @@
 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 {
 	ssize_t ret = -EINVAL;
-#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
 	unsigned long val;
 
 	ret = queue_var_store(&val, page, count);
@@ -542,6 +543,11 @@
 	if (q->queue_tags)
 		__blk_queue_free_tags(q);
 
+	percpu_counter_destroy(&q->mq_usage_counter);
+
+	if (q->mq_ops)
+		blk_mq_free_queue(q);
+
 	blk_trace_shutdown(q);
 
 	bdi_destroy(&q->backing_dev_info);
@@ -575,6 +581,7 @@
 	 * bypass from queue allocation.
 	 */
 	blk_queue_bypass_end(q);
+	queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
 
 	ret = blk_trace_init_sysfs(dev);
 	if (ret)
@@ -588,6 +595,9 @@
 
 	kobject_uevent(&q->kobj, KOBJ_ADD);
 
+	if (q->mq_ops)
+		blk_mq_register_disk(disk);
+
 	if (!q->request_fn)
 		return 0;
 
@@ -610,6 +620,9 @@
 	if (WARN_ON(!q))
 		return;
 
+	if (q->mq_ops)
+		blk_mq_unregister_disk(disk);
+
 	if (q->request_fn)
 		elv_unregister_queue(q);
 
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8331aba..0653404 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -256,6 +256,12 @@
 	}								\
 } while (0)
 
+static void tg_stats_init(struct tg_stats_cpu *tg_stats)
+{
+	blkg_rwstat_init(&tg_stats->service_bytes);
+	blkg_rwstat_init(&tg_stats->serviced);
+}
+
 /*
  * Worker for allocating per cpu stat for tgs. This is scheduled on the
  * system_wq once there are some groups on the alloc_list waiting for
@@ -269,12 +275,16 @@
 
 alloc_stats:
 	if (!stats_cpu) {
+		int cpu;
+
 		stats_cpu = alloc_percpu(struct tg_stats_cpu);
 		if (!stats_cpu) {
 			/* allocation failed, try again after some time */
 			schedule_delayed_work(dwork, msecs_to_jiffies(10));
 			return;
 		}
+		for_each_possible_cpu(cpu)
+			tg_stats_init(per_cpu_ptr(stats_cpu, cpu));
 	}
 
 	spin_lock_irq(&tg_stats_alloc_lock);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 65f1035..bba81c9 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -7,6 +7,7 @@
 #include <linux/fault-inject.h>
 
 #include "blk.h"
+#include "blk-mq.h"
 
 #ifdef CONFIG_FAIL_IO_TIMEOUT
 
@@ -31,7 +32,7 @@
 	struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
 						NULL, &fail_io_timeout);
 
-	return IS_ERR(dir) ? PTR_ERR(dir) : 0;
+	return PTR_ERR_OR_ZERO(dir);
 }
 
 late_initcall(fail_io_timeout_debugfs);
@@ -88,11 +89,19 @@
 		ret = q->rq_timed_out_fn(req);
 	switch (ret) {
 	case BLK_EH_HANDLED:
-		__blk_complete_request(req);
+		/* Can we use req->errors here? */
+		if (q->mq_ops)
+			blk_mq_complete_request(req, req->errors);
+		else
+			__blk_complete_request(req);
 		break;
 	case BLK_EH_RESET_TIMER:
+		if (q->mq_ops)
+			blk_mq_add_timer(req);
+		else
+			blk_add_timer(req);
+
 		blk_clear_rq_complete(req);
-		blk_add_timer(req);
 		break;
 	case BLK_EH_NOT_HANDLED:
 		/*
@@ -108,6 +117,23 @@
 	}
 }
 
+void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
+			  unsigned int *next_set)
+{
+	if (time_after_eq(jiffies, rq->deadline)) {
+		list_del_init(&rq->timeout_list);
+
+		/*
+		 * Check if we raced with end io completion
+		 */
+		if (!blk_mark_rq_complete(rq))
+			blk_rq_timed_out(rq);
+	} else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
+		*next_timeout = rq->deadline;
+		*next_set = 1;
+	}
+}
+
 void blk_rq_timed_out_timer(unsigned long data)
 {
 	struct request_queue *q = (struct request_queue *) data;
@@ -117,21 +143,8 @@
 
 	spin_lock_irqsave(q->queue_lock, flags);
 
-	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
-		if (time_after_eq(jiffies, rq->deadline)) {
-			list_del_init(&rq->timeout_list);
-
-			/*
-			 * Check if we raced with end io completion
-			 */
-			if (blk_mark_rq_complete(rq))
-				continue;
-			blk_rq_timed_out(rq);
-		} else if (!next_set || time_after(next, rq->deadline)) {
-			next = rq->deadline;
-			next_set = 1;
-		}
-	}
+	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
+		blk_rq_check_expired(rq, &next, &next_set);
 
 	if (next_set)
 		mod_timer(&q->timeout, round_jiffies_up(next));
@@ -157,15 +170,7 @@
 }
 EXPORT_SYMBOL_GPL(blk_abort_request);
 
-/**
- * blk_add_timer - Start timeout timer for a single request
- * @req:	request that is about to start running.
- *
- * Notes:
- *    Each request has its own timer, and as it is added to the queue, we
- *    set up the timer. When the request completes, we cancel the timer.
- */
-void blk_add_timer(struct request *req)
+void __blk_add_timer(struct request *req, struct list_head *timeout_list)
 {
 	struct request_queue *q = req->q;
 	unsigned long expiry;
@@ -174,7 +179,6 @@
 		return;
 
 	BUG_ON(!list_empty(&req->timeout_list));
-	BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
 
 	/*
 	 * Some LLDs, like scsi, peek at the timeout to prevent a
@@ -184,7 +188,8 @@
 		req->timeout = q->rq_timeout;
 
 	req->deadline = jiffies + req->timeout;
-	list_add_tail(&req->timeout_list, &q->timeout_list);
+	if (timeout_list)
+		list_add_tail(&req->timeout_list, timeout_list);
 
 	/*
 	 * If the timer isn't already pending or this timeout is earlier
@@ -196,5 +201,19 @@
 	if (!timer_pending(&q->timeout) ||
 	    time_before(expiry, q->timeout.expires))
 		mod_timer(&q->timeout, expiry);
+
+}
+
+/**
+ * blk_add_timer - Start timeout timer for a single request
+ * @req:	request that is about to start running.
+ *
+ * Notes:
+ *    Each request has its own timer, and as it is added to the queue, we
+ *    set up the timer. When the request completes, we cancel the timer.
+ */
+void blk_add_timer(struct request *req)
+{
+	__blk_add_timer(req, &req->q->timeout_list);
 }
 
diff --git a/block/blk.h b/block/blk.h
index e837b8f..c90e1d8 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -10,6 +10,7 @@
 #define BLK_BATCH_REQ	32
 
 extern struct kmem_cache *blk_requestq_cachep;
+extern struct kmem_cache *request_cachep;
 extern struct kobj_type blk_queue_ktype;
 extern struct ida blk_queue_ida;
 
@@ -34,14 +35,30 @@
 			    unsigned int nr_bytes, unsigned int bidi_bytes);
 
 void blk_rq_timed_out_timer(unsigned long data);
+void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
+			  unsigned int *next_set);
+void __blk_add_timer(struct request *req, struct list_head *timeout_list);
 void blk_delete_timer(struct request *);
 void blk_add_timer(struct request *);
 
+
+bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
+			     struct bio *bio);
+bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
+			    struct bio *bio);
+bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
+			    unsigned int *request_count);
+
+void blk_account_io_start(struct request *req, bool new_io);
+void blk_account_io_completion(struct request *req, unsigned int bytes);
+void blk_account_io_done(struct request *req);
+
 /*
  * Internal atomic flags for request handling
  */
 enum rq_atomic_flags {
 	REQ_ATOM_COMPLETE = 0,
+	REQ_ATOM_STARTED,
 };
 
 /*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 434944c..4d5cec1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1508,6 +1508,29 @@
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void cfqg_stats_init(struct cfqg_stats *stats)
+{
+	blkg_rwstat_init(&stats->service_bytes);
+	blkg_rwstat_init(&stats->serviced);
+	blkg_rwstat_init(&stats->merged);
+	blkg_rwstat_init(&stats->service_time);
+	blkg_rwstat_init(&stats->wait_time);
+	blkg_rwstat_init(&stats->queued);
+
+	blkg_stat_init(&stats->sectors);
+	blkg_stat_init(&stats->time);
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+	blkg_stat_init(&stats->unaccounted_time);
+	blkg_stat_init(&stats->avg_queue_size_sum);
+	blkg_stat_init(&stats->avg_queue_size_samples);
+	blkg_stat_init(&stats->dequeue);
+	blkg_stat_init(&stats->group_wait_time);
+	blkg_stat_init(&stats->idle_time);
+	blkg_stat_init(&stats->empty_time);
+#endif
+}
+
 static void cfq_pd_init(struct blkcg_gq *blkg)
 {
 	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
@@ -1515,6 +1538,8 @@
 	cfq_init_cfqg_base(cfqg);
 	cfqg->weight = blkg->blkcg->cfq_weight;
 	cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
+	cfqg_stats_init(&cfqg->stats);
+	cfqg_stats_init(&cfqg->dead_stats);
 }
 
 static void cfq_pd_offline(struct blkcg_gq *blkg)
diff --git a/block/elevator.c b/block/elevator.c
index 2bcbd8c..b7ff286 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -186,6 +186,12 @@
 	struct elevator_type *e = NULL;
 	int err;
 
+	/*
+	 * q->sysfs_lock must be held to provide mutual exclusion between
+	 * elevator_switch() and here.
+	 */
+	lockdep_assert_held(&q->sysfs_lock);
+
 	if (unlikely(q->elevator))
 		return 0;
 
@@ -959,7 +965,7 @@
 /*
  * Switch this queue to the given IO scheduler.
  */
-int elevator_change(struct request_queue *q, const char *name)
+static int __elevator_change(struct request_queue *q, const char *name)
 {
 	char elevator_name[ELV_NAME_MAX];
 	struct elevator_type *e;
@@ -981,6 +987,18 @@
 
 	return elevator_switch(q, e);
 }
+
+int elevator_change(struct request_queue *q, const char *name)
+{
+	int ret;
+
+	/* Protect q->elevator from elevator_init() */
+	mutex_lock(&q->sysfs_lock);
+	ret = __elevator_change(q, name);
+	mutex_unlock(&q->sysfs_lock);
+
+	return ret;
+}
 EXPORT_SYMBOL(elevator_change);
 
 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
@@ -991,7 +1009,7 @@
 	if (!q->elevator)
 		return count;
 
-	ret = elevator_change(q, name);
+	ret = __elevator_change(q, name);
 	if (!ret)
 		return count;
 
diff --git a/block/ioctl.c b/block/ioctl.c
index a31d91d9b..7d5c3b2 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -64,7 +64,7 @@
 			part = add_partition(disk, partno, start, length,
 					     ADDPART_FLAG_NONE, NULL);
 			mutex_unlock(&bdev->bd_mutex);
-			return IS_ERR(part) ? PTR_ERR(part) : 0;
+			return PTR_ERR_OR_ZERO(part);
 		case BLKPG_DEL_PARTITION:
 			part = disk_get_part(disk, partno);
 			if (!part)
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a5ffcc9..625e3e4 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -286,7 +286,8 @@
 		struct sg_io_hdr *hdr, fmode_t mode)
 {
 	unsigned long start_time;
-	int writing = 0, ret = 0;
+	ssize_t ret = 0;
+	int writing = 0;
 	struct request *rq;
 	char sense[SCSI_SENSE_BUFFERSIZE];
 	struct bio *bio;
@@ -321,37 +322,16 @@
 	}
 
 	if (hdr->iovec_count) {
-		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
 		size_t iov_data_len;
-		struct sg_iovec *sg_iov;
 		struct iovec *iov;
-		int i;
 
-		sg_iov = kmalloc(size, GFP_KERNEL);
-		if (!sg_iov) {
-			ret = -ENOMEM;
+		ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
+					    0, NULL, &iov);
+		if (ret < 0)
 			goto out;
-		}
 
-		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
-			kfree(sg_iov);
-			ret = -EFAULT;
-			goto out;
-		}
-
-		/*
-		 * Sum up the vecs, making sure they don't overflow
-		 */
-		iov = (struct iovec *) sg_iov;
-		iov_data_len = 0;
-		for (i = 0; i < hdr->iovec_count; i++) {
-			if (iov_data_len + iov[i].iov_len < iov_data_len) {
-				kfree(sg_iov);
-				ret = -EINVAL;
-				goto out;
-			}
-			iov_data_len += iov[i].iov_len;
-		}
+		iov_data_len = ret;
+		ret = 0;
 
 		/* SG_IO howto says that the shorter of the two wins */
 		if (hdr->dxfer_len < iov_data_len) {
@@ -361,9 +341,10 @@
 			iov_data_len = hdr->dxfer_len;
 		}
 
-		ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
+		ret = blk_rq_map_user_iov(q, rq, NULL, (struct sg_iovec *) iov,
+					  hdr->iovec_count,
 					  iov_data_len, GFP_KERNEL);
-		kfree(sg_iov);
+		kfree(iov);
 	} else if (hdr->dxfer_len)
 		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
 				      GFP_KERNEL);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 69ce573..71f337a 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -776,6 +776,22 @@
 
 	  See <http://csrc.nist.gov/encryption/aes/> for more information.
 
+config CRYPTO_AES_ARM_BS
+	tristate "Bit sliced AES using NEON instructions"
+	depends on ARM && KERNEL_MODE_NEON
+	select CRYPTO_ALGAPI
+	select CRYPTO_AES_ARM
+	select CRYPTO_ABLK_HELPER
+	help
+	  Use a faster and more secure NEON based implementation of AES in CBC,
+	  CTR and XTS modes
+
+	  Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
+	  and for XTS mode encryption, CBC and XTS mode decryption speedup is
+	  around 25%. (CBC encryption speed is not affected by this driver.)
+	  This implementation does not rely on any lookup tables so it is
+	  believed to be invulnerable to cache timing attacks.
+
 config CRYPTO_ANUBIS
 	tristate "Anubis cipher algorithm"
 	select CRYPTO_ALGAPI
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ac33d5f..966f893 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -434,7 +434,7 @@
 	case -EINPROGRESS:
 	case -EBUSY:
 		wait_for_completion(&completion->completion);
-		INIT_COMPLETION(completion->completion);
+		reinit_completion(&completion->completion);
 		err = completion->err;
 		break;
 	};
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 25a5934..1ab8258 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -493,7 +493,7 @@
 		ret = wait_for_completion_interruptible(&tr->completion);
 		if (!ret)
 			ret = tr->err;
-		INIT_COMPLETION(tr->completion);
+		reinit_completion(&tr->completion);
 	}
 	return ret;
 }
@@ -721,7 +721,7 @@
 		ret = wait_for_completion_interruptible(&tr->completion);
 		if (!ret)
 			ret = tr->err;
-		INIT_COMPLETION(tr->completion);
+		reinit_completion(&tr->completion);
 	}
 
 	return ret;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e091ef6..432afc0 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -179,7 +179,7 @@
 		ret = wait_for_completion_interruptible(&tr->completion);
 		if (!ret)
 			ret = tr->err;
-		INIT_COMPLETION(tr->completion);
+		reinit_completion(&tr->completion);
 	}
 	return ret;
 }
@@ -336,7 +336,7 @@
 				ret = wait_for_completion_interruptible(
 					&tresult.completion);
 				if (!ret && !(ret = tresult.err)) {
-					INIT_COMPLETION(tresult.completion);
+					reinit_completion(&tresult.completion);
 					break;
 				}
 				/* fall through */
@@ -543,7 +543,7 @@
 				ret = wait_for_completion_interruptible(
 					&result.completion);
 				if (!ret && !(ret = result.err)) {
-					INIT_COMPLETION(result.completion);
+					reinit_completion(&result.completion);
 					break;
 				}
 			case -EBADMSG:
@@ -697,7 +697,7 @@
 				ret = wait_for_completion_interruptible(
 					&result.completion);
 				if (!ret && !(ret = result.err)) {
-					INIT_COMPLETION(result.completion);
+					reinit_completion(&result.completion);
 					break;
 				}
 			case -EBADMSG:
@@ -983,7 +983,7 @@
 				ret = wait_for_completion_interruptible(
 					&result.completion);
 				if (!ret && !((ret = result.err))) {
-					INIT_COMPLETION(result.completion);
+					reinit_completion(&result.completion);
 					break;
 				}
 				/* fall through */
@@ -1086,7 +1086,7 @@
 				ret = wait_for_completion_interruptible(
 					&result.completion);
 				if (!ret && !((ret = result.err))) {
-					INIT_COMPLETION(result.completion);
+					reinit_completion(&result.completion);
 					break;
 				}
 				/* fall through */
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8f45144..b3138fb 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -168,4 +168,6 @@
 
 source "drivers/phy/Kconfig"
 
+source "drivers/powercap/Kconfig"
+
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 687da89..3cc8214 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -154,3 +154,4 @@
 obj-$(CONFIG_IPACK_BUS)		+= ipack/
 obj-$(CONFIG_NTB)		+= ntb/
 obj-$(CONFIG_FMC)		+= fmc/
+obj-$(CONFIG_POWERCAP)		+= powercap/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e11faae..c95df0b 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -56,23 +56,6 @@
 
 	  Say N to delete /proc/acpi/ files that have moved to /sys/
 
-config ACPI_PROCFS_POWER
-	bool "Deprecated power /proc/acpi directories"
-	depends on PROC_FS
-	help
-	  For backwards compatibility, this option allows
-          deprecated power /proc/acpi/ directories to exist, even when
-          they have been replaced by functions in /sys.
-          The deprecated directories (and their replacements) include:
-	  /proc/acpi/battery/* (/sys/class/power_supply/*)
-	  /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
-	  This option has no effect on /proc/acpi/ directories
-	  and functions, which do not yet exist in /sys
-	  This option, together with the proc directories, will be
-	  deleted in 2.6.39.
-
-	  Say N to delete power /proc/acpi/ directories that have moved to /sys/
-
 config ACPI_EC_DEBUGFS
 	tristate "EC read/write access through /sys/kernel/debug/ec"
 	default n
@@ -175,9 +158,10 @@
 
 	  To compile this driver as a module, choose M here:
 	  the module will be called processor.
+
 config ACPI_IPMI
 	tristate "IPMI"
-	depends on IPMI_SI && IPMI_HANDLER
+	depends on IPMI_SI
 	default n
 	help
 	  This driver enables the ACPI to access the BMC controller. And it
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bce34af..0331f91 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,7 +47,6 @@
 acpi-$(CONFIG_X86)		+= acpi_cmos_rtc.o
 acpi-$(CONFIG_DEBUG_FS)		+= debugfs.o
 acpi-$(CONFIG_ACPI_NUMA)	+= numa.o
-acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 ifdef CONFIG_ACPI_VIDEO
 acpi-y				+= video_detect.o
 endif
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index f37beaa3..b9f0d5f 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -30,10 +30,7 @@
 #include <linux/types.h>
 #include <linux/dmi.h>
 #include <linux/delay.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
+#include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
@@ -55,75 +52,30 @@
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-static int acpi_ac_open_fs(struct inode *inode, struct file *file);
-#endif
-
-static int acpi_ac_add(struct acpi_device *device);
-static int acpi_ac_remove(struct acpi_device *device);
-static void acpi_ac_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id ac_device_ids[] = {
-	{"ACPI0003", 0},
-	{"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ac_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_ac_resume(struct device *dev);
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
-
 static int ac_sleep_before_get_state_ms;
 
-static struct acpi_driver acpi_ac_driver = {
-	.name = "ac",
-	.class = ACPI_AC_CLASS,
-	.ids = ac_device_ids,
-	.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
-	.ops = {
-		.add = acpi_ac_add,
-		.remove = acpi_ac_remove,
-		.notify = acpi_ac_notify,
-		},
-	.drv.pm = &acpi_ac_pm,
-};
-
 struct acpi_ac {
 	struct power_supply charger;
-	struct acpi_device * device;
+	struct acpi_device *adev;
+	struct platform_device *pdev;
 	unsigned long long state;
 };
 
 #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static const struct file_operations acpi_ac_fops = {
-	.owner = THIS_MODULE,
-	.open = acpi_ac_open_fs,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-#endif
-
 /* --------------------------------------------------------------------------
                                AC Adapter Management
    -------------------------------------------------------------------------- */
 
 static int acpi_ac_get_state(struct acpi_ac *ac)
 {
-	acpi_status status = AE_OK;
+	acpi_status status;
 
-
-	if (!ac)
-		return -EINVAL;
-
-	status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state);
+	status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL,
+				       &ac->state);
 	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state"));
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Error reading AC Adapter state"));
 		ac->state = ACPI_AC_STATUS_UNKNOWN;
 		return -ENODEV;
 	}
@@ -160,91 +112,13 @@
 	POWER_SUPPLY_PROP_ONLINE,
 };
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-
-static struct proc_dir_entry *acpi_ac_dir;
-
-static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
-{
-	struct acpi_ac *ac = seq->private;
-
-
-	if (!ac)
-		return 0;
-
-	if (acpi_ac_get_state(ac)) {
-		seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
-		return 0;
-	}
-
-	seq_puts(seq, "state:                   ");
-	switch (ac->state) {
-	case ACPI_AC_STATUS_OFFLINE:
-		seq_puts(seq, "off-line\n");
-		break;
-	case ACPI_AC_STATUS_ONLINE:
-		seq_puts(seq, "on-line\n");
-		break;
-	default:
-		seq_puts(seq, "unknown\n");
-		break;
-	}
-
-	return 0;
-}
-
-static int acpi_ac_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
-}
-
-static int acpi_ac_add_fs(struct acpi_device *device)
-{
-	struct proc_dir_entry *entry = NULL;
-
-	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
-			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-	if (!acpi_device_dir(device)) {
-		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-						     acpi_ac_dir);
-		if (!acpi_device_dir(device))
-			return -ENODEV;
-	}
-
-	/* 'state' [R] */
-	entry = proc_create_data(ACPI_AC_FILE_STATE,
-				 S_IRUGO, acpi_device_dir(device),
-				 &acpi_ac_fops, acpi_driver_data(device));
-	if (!entry)
-		return -ENODEV;
-	return 0;
-}
-
-static int acpi_ac_remove_fs(struct acpi_device *device)
-{
-
-	if (acpi_device_dir(device)) {
-		remove_proc_entry(ACPI_AC_FILE_STATE, acpi_device_dir(device));
-
-		remove_proc_entry(acpi_device_bid(device), acpi_ac_dir);
-		acpi_device_dir(device) = NULL;
-	}
-
-	return 0;
-}
-#endif
-
 /* --------------------------------------------------------------------------
                                    Driver Model
    -------------------------------------------------------------------------- */
 
-static void acpi_ac_notify(struct acpi_device *device, u32 event)
+static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
 {
-	struct acpi_ac *ac = acpi_driver_data(device);
-
+	struct acpi_ac *ac = data;
 
 	if (!ac)
 		return;
@@ -267,10 +141,10 @@
 			msleep(ac_sleep_before_get_state_ms);
 
 		acpi_ac_get_state(ac);
-		acpi_bus_generate_netlink_event(device->pnp.device_class,
-						  dev_name(&device->dev), event,
-						  (u32) ac->state);
-		acpi_notifier_call_chain(device, event, (u32) ac->state);
+		acpi_bus_generate_netlink_event(ac->adev->pnp.device_class,
+						dev_name(&ac->pdev->dev),
+						event, (u32) ac->state);
+		acpi_notifier_call_chain(ac->adev, event, (u32) ac->state);
 		kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
 	}
 
@@ -295,53 +169,55 @@
 	{},
 };
 
-static int acpi_ac_add(struct acpi_device *device)
+static int acpi_ac_probe(struct platform_device *pdev)
 {
 	int result = 0;
 	struct acpi_ac *ac = NULL;
+	struct acpi_device *adev;
 
-
-	if (!device)
+	if (!pdev)
 		return -EINVAL;
 
+	result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+	if (result)
+		return -ENODEV;
+
 	ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
 	if (!ac)
 		return -ENOMEM;
 
-	ac->device = device;
-	strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
-	strcpy(acpi_device_class(device), ACPI_AC_CLASS);
-	device->driver_data = ac;
+	strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
+	strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
+	ac->adev = adev;
+	ac->pdev = pdev;
+	platform_set_drvdata(pdev, ac);
 
 	result = acpi_ac_get_state(ac);
 	if (result)
 		goto end;
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	result = acpi_ac_add_fs(device);
-#endif
-	if (result)
-		goto end;
-	ac->charger.name = acpi_device_bid(device);
+	ac->charger.name = acpi_device_bid(adev);
 	ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
 	ac->charger.properties = ac_props;
 	ac->charger.num_properties = ARRAY_SIZE(ac_props);
 	ac->charger.get_property = get_ac_property;
-	result = power_supply_register(&ac->device->dev, &ac->charger);
+	result = power_supply_register(&pdev->dev, &ac->charger);
 	if (result)
 		goto end;
 
+	result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
+			ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+	if (result) {
+		power_supply_unregister(&ac->charger);
+		goto end;
+	}
 	printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
-	       acpi_device_name(device), acpi_device_bid(device),
+	       acpi_device_name(adev), acpi_device_bid(adev),
 	       ac->state ? "on-line" : "off-line");
 
-      end:
-	if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-		acpi_ac_remove_fs(device);
-#endif
+end:
+	if (result)
 		kfree(ac);
-	}
 
 	dmi_check_system(ac_dmi_table);
 	return result;
@@ -356,7 +232,7 @@
 	if (!dev)
 		return -EINVAL;
 
-	ac = acpi_driver_data(to_acpi_device(dev));
+	ac = platform_get_drvdata(to_platform_device(dev));
 	if (!ac)
 		return -EINVAL;
 
@@ -368,28 +244,44 @@
 	return 0;
 }
 #endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
 
-static int acpi_ac_remove(struct acpi_device *device)
+static int acpi_ac_remove(struct platform_device *pdev)
 {
-	struct acpi_ac *ac = NULL;
+	struct acpi_ac *ac;
 
-
-	if (!device || !acpi_driver_data(device))
+	if (!pdev)
 		return -EINVAL;
 
-	ac = acpi_driver_data(device);
+	acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+			ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
 
+	ac = platform_get_drvdata(pdev);
 	if (ac->charger.dev)
 		power_supply_unregister(&ac->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	acpi_ac_remove_fs(device);
-#endif
 
 	kfree(ac);
 
 	return 0;
 }
 
+static const struct acpi_device_id acpi_ac_match[] = {
+	{ "ACPI0003", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
+
+static struct platform_driver acpi_ac_driver = {
+	.probe          = acpi_ac_probe,
+	.remove         = acpi_ac_remove,
+	.driver         = {
+		.name   = "acpi-ac",
+		.owner  = THIS_MODULE,
+		.pm     = &acpi_ac_pm_ops,
+		.acpi_match_table = ACPI_PTR(acpi_ac_match),
+	},
+};
+
 static int __init acpi_ac_init(void)
 {
 	int result;
@@ -397,34 +289,16 @@
 	if (acpi_disabled)
 		return -ENODEV;
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	acpi_ac_dir = acpi_lock_ac_dir();
-	if (!acpi_ac_dir)
+	result = platform_driver_register(&acpi_ac_driver);
+	if (result < 0)
 		return -ENODEV;
-#endif
-
-	result = acpi_bus_register_driver(&acpi_ac_driver);
-	if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-		acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-		return -ENODEV;
-	}
 
 	return 0;
 }
 
 static void __exit acpi_ac_exit(void)
 {
-
-	acpi_bus_unregister_driver(&acpi_ac_driver);
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-
-	return;
+	platform_driver_unregister(&acpi_ac_driver);
 }
-
 module_init(acpi_ac_init);
 module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index a6977e1..ac0f52f 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -1,8 +1,9 @@
 /*
  *  acpi_ipmi.c - ACPI IPMI opregion
  *
- *  Copyright (C) 2010 Intel Corporation
- *  Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ *  Copyright (C) 2010, 2013 Intel Corporation
+ *    Author: Zhao Yakui <yakui.zhao@intel.com>
+ *            Lv Zheng <lv.zheng@intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -23,60 +24,58 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
 #include <linux/ipmi.h>
-#include <linux/device.h>
-#include <linux/pnp.h>
 #include <linux/spinlock.h>
 
 MODULE_AUTHOR("Zhao Yakui");
 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
 MODULE_LICENSE("GPL");
 
-#define IPMI_FLAGS_HANDLER_INSTALL	0
-
 #define ACPI_IPMI_OK			0
 #define ACPI_IPMI_TIMEOUT		0x10
 #define ACPI_IPMI_UNKNOWN		0x07
 /* the IPMI timeout is 5s */
-#define IPMI_TIMEOUT			(5 * HZ)
+#define IPMI_TIMEOUT			(5000)
+#define ACPI_IPMI_MAX_MSG_LENGTH	64
 
 struct acpi_ipmi_device {
 	/* the device list attached to driver_data.ipmi_devices */
 	struct list_head head;
+
 	/* the IPMI request message list */
 	struct list_head tx_msg_list;
-	spinlock_t	tx_msg_lock;
+
+	spinlock_t tx_msg_lock;
 	acpi_handle handle;
-	struct pnp_dev *pnp_dev;
-	ipmi_user_t	user_interface;
+	struct device *dev;
+	ipmi_user_t user_interface;
 	int ipmi_ifnum; /* IPMI interface number */
 	long curr_msgid;
-	unsigned long flags;
-	struct ipmi_smi_info smi_data;
+	bool dead;
+	struct kref kref;
 };
 
 struct ipmi_driver_data {
-	struct list_head	ipmi_devices;
-	struct ipmi_smi_watcher	bmc_events;
-	struct ipmi_user_hndl	ipmi_hndlrs;
-	struct mutex		ipmi_lock;
+	struct list_head ipmi_devices;
+	struct ipmi_smi_watcher bmc_events;
+	struct ipmi_user_hndl ipmi_hndlrs;
+	struct mutex ipmi_lock;
+
+	/*
+	 * NOTE: IPMI System Interface Selection
+	 * There is no system interface specified by the IPMI operation
+	 * region access.  We try to select one system interface with ACPI
+	 * handle set.  IPMI messages passed from the ACPI codes are sent
+	 * to this selected global IPMI system interface.
+	 */
+	struct acpi_ipmi_device *selected_smi;
 };
 
 struct acpi_ipmi_msg {
 	struct list_head head;
+
 	/*
 	 * General speaking the addr type should be SI_ADDR_TYPE. And
 	 * the addr channel should be BMC.
@@ -86,30 +85,31 @@
 	 */
 	struct ipmi_addr addr;
 	long tx_msgid;
+
 	/* it is used to track whether the IPMI message is finished */
 	struct completion tx_complete;
+
 	struct kernel_ipmi_msg tx_message;
-	int	msg_done;
-	/* tx data . And copy it from ACPI object buffer */
-	u8	tx_data[64];
-	int	tx_len;
-	u8	rx_data[64];
-	int	rx_len;
+	int msg_done;
+
+	/* tx/rx data . And copy it from/to ACPI object buffer */
+	u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
+	u8 rx_len;
+
 	struct acpi_ipmi_device *device;
+	struct kref kref;
 };
 
 /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
 struct acpi_ipmi_buffer {
 	u8 status;
 	u8 length;
-	u8 data[64];
+	u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
 };
 
 static void ipmi_register_bmc(int iface, struct device *dev);
 static void ipmi_bmc_gone(int iface);
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
 
 static struct ipmi_driver_data driver_data = {
 	.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
@@ -121,29 +121,142 @@
 	.ipmi_hndlrs = {
 		.ipmi_recv_hndl = ipmi_msg_handler,
 	},
+	.ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
 };
 
-static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+static struct acpi_ipmi_device *
+ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
 {
-	struct acpi_ipmi_msg *ipmi_msg;
-	struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+	struct acpi_ipmi_device *ipmi_device;
+	int err;
+	ipmi_user_t user;
 
-	ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
-	if (!ipmi_msg)	{
-		dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+	ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+	if (!ipmi_device)
+		return NULL;
+
+	kref_init(&ipmi_device->kref);
+	INIT_LIST_HEAD(&ipmi_device->head);
+	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+	spin_lock_init(&ipmi_device->tx_msg_lock);
+	ipmi_device->handle = handle;
+	ipmi_device->dev = get_device(dev);
+	ipmi_device->ipmi_ifnum = iface;
+
+	err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+			       ipmi_device, &user);
+	if (err) {
+		put_device(dev);
+		kfree(ipmi_device);
 		return NULL;
 	}
+	ipmi_device->user_interface = user;
+
+	return ipmi_device;
+}
+
+static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
+{
+	ipmi_destroy_user(ipmi_device->user_interface);
+	put_device(ipmi_device->dev);
+	kfree(ipmi_device);
+}
+
+static void ipmi_dev_release_kref(struct kref *kref)
+{
+	struct acpi_ipmi_device *ipmi =
+		container_of(kref, struct acpi_ipmi_device, kref);
+
+	ipmi_dev_release(ipmi);
+}
+
+static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
+{
+	list_del(&ipmi_device->head);
+	if (driver_data.selected_smi == ipmi_device)
+		driver_data.selected_smi = NULL;
+
+	/*
+	 * Always setting dead flag after deleting from the list or
+	 * list_for_each_entry() codes must get changed.
+	 */
+	ipmi_device->dead = true;
+}
+
+static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
+{
+	struct acpi_ipmi_device *ipmi_device = NULL;
+
+	mutex_lock(&driver_data.ipmi_lock);
+	if (driver_data.selected_smi) {
+		ipmi_device = driver_data.selected_smi;
+		kref_get(&ipmi_device->kref);
+	}
+	mutex_unlock(&driver_data.ipmi_lock);
+
+	return ipmi_device;
+}
+
+static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
+{
+	kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
+}
+
+static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
+{
+	struct acpi_ipmi_device *ipmi;
+	struct acpi_ipmi_msg *ipmi_msg;
+
+	ipmi = acpi_ipmi_dev_get();
+	if (!ipmi)
+		return NULL;
+
+	ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
+	if (!ipmi_msg) {
+		acpi_ipmi_dev_put(ipmi);
+		return NULL;
+	}
+
+	kref_init(&ipmi_msg->kref);
 	init_completion(&ipmi_msg->tx_complete);
 	INIT_LIST_HEAD(&ipmi_msg->head);
 	ipmi_msg->device = ipmi;
+	ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
+
 	return ipmi_msg;
 }
 
-#define		IPMI_OP_RGN_NETFN(offset)	((offset >> 8) & 0xff)
-#define		IPMI_OP_RGN_CMD(offset)		(offset & 0xff)
-static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
-				acpi_physical_address address,
-				acpi_integer *value)
+static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
+{
+	acpi_ipmi_dev_put(tx_msg->device);
+	kfree(tx_msg);
+}
+
+static void ipmi_msg_release_kref(struct kref *kref)
+{
+	struct acpi_ipmi_msg *tx_msg =
+		container_of(kref, struct acpi_ipmi_msg, kref);
+
+	ipmi_msg_release(tx_msg);
+}
+
+static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
+{
+	kref_get(&tx_msg->kref);
+
+	return tx_msg;
+}
+
+static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
+{
+	kref_put(&tx_msg->kref, ipmi_msg_release_kref);
+}
+
+#define IPMI_OP_RGN_NETFN(offset)	((offset >> 8) & 0xff)
+#define IPMI_OP_RGN_CMD(offset)		(offset & 0xff)
+static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
+				    acpi_physical_address address,
+				    acpi_integer *value)
 {
 	struct kernel_ipmi_msg *msg;
 	struct acpi_ipmi_buffer *buffer;
@@ -151,21 +264,31 @@
 	unsigned long flags;
 
 	msg = &tx_msg->tx_message;
+
 	/*
 	 * IPMI network function and command are encoded in the address
 	 * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
 	 */
 	msg->netfn = IPMI_OP_RGN_NETFN(address);
 	msg->cmd = IPMI_OP_RGN_CMD(address);
-	msg->data = tx_msg->tx_data;
+	msg->data = tx_msg->data;
+
 	/*
 	 * value is the parameter passed by the IPMI opregion space handler.
 	 * It points to the IPMI request message buffer
 	 */
 	buffer = (struct acpi_ipmi_buffer *)value;
+
 	/* copy the tx message data */
+	if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
+		dev_WARN_ONCE(tx_msg->device->dev, true,
+			      "Unexpected request (msg len %d).\n",
+			      buffer->length);
+		return -EINVAL;
+	}
 	msg->data_len = buffer->length;
-	memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+	memcpy(tx_msg->data, buffer->data, msg->data_len);
+
 	/*
 	 * now the default type is SYSTEM_INTERFACE and channel type is BMC.
 	 * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
@@ -179,14 +302,17 @@
 
 	/* Get the msgid */
 	device = tx_msg->device;
+
 	spin_lock_irqsave(&device->tx_msg_lock, flags);
 	device->curr_msgid++;
 	tx_msg->tx_msgid = device->curr_msgid;
 	spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+
+	return 0;
 }
 
 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
-		acpi_integer *value, int rem_time)
+				      acpi_integer *value)
 {
 	struct acpi_ipmi_buffer *buffer;
 
@@ -195,110 +321,158 @@
 	 * IPMI message returned by IPMI command.
 	 */
 	buffer = (struct acpi_ipmi_buffer *)value;
-	if (!rem_time && !msg->msg_done) {
-		buffer->status = ACPI_IPMI_TIMEOUT;
-		return;
-	}
+
 	/*
-	 * If the flag of msg_done is not set or the recv length is zero, it
-	 * means that the IPMI command is not executed correctly.
-	 * The status code will be ACPI_IPMI_UNKNOWN.
+	 * If the flag of msg_done is not set, it means that the IPMI command is
+	 * not executed correctly.
 	 */
-	if (!msg->msg_done || !msg->rx_len) {
-		buffer->status = ACPI_IPMI_UNKNOWN;
+	buffer->status = msg->msg_done;
+	if (msg->msg_done != ACPI_IPMI_OK)
 		return;
-	}
+
 	/*
 	 * If the IPMI response message is obtained correctly, the status code
 	 * will be ACPI_IPMI_OK
 	 */
-	buffer->status = ACPI_IPMI_OK;
 	buffer->length = msg->rx_len;
-	memcpy(buffer->data, msg->rx_data, msg->rx_len);
+	memcpy(buffer->data, msg->data, msg->rx_len);
 }
 
 static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 {
-	struct acpi_ipmi_msg *tx_msg, *temp;
-	int count = HZ / 10;
-	struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+	struct acpi_ipmi_msg *tx_msg;
+	unsigned long flags;
 
-	list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+	/*
+	 * NOTE: On-going ipmi_recv_msg
+	 * ipmi_msg_handler() may still be invoked by ipmi_si after
+	 * flushing.  But it is safe to do a fast flushing on module_exit()
+	 * without waiting for all ipmi_recv_msg(s) to complete from
+	 * ipmi_msg_handler() as it is ensured by ipmi_si that all
+	 * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
+	 */
+	spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+	while (!list_empty(&ipmi->tx_msg_list)) {
+		tx_msg = list_first_entry(&ipmi->tx_msg_list,
+					  struct acpi_ipmi_msg,
+					  head);
+		list_del(&tx_msg->head);
+		spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+
 		/* wake up the sleep thread on the Tx msg */
 		complete(&tx_msg->tx_complete);
+		acpi_ipmi_msg_put(tx_msg);
+		spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 	}
+	spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+}
 
-	/* wait for about 100ms to flush the tx message list */
-	while (count--) {
-		if (list_empty(&ipmi->tx_msg_list))
+static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
+			       struct acpi_ipmi_msg *msg)
+{
+	struct acpi_ipmi_msg *tx_msg, *temp;
+	bool msg_found = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+	list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+		if (msg == tx_msg) {
+			msg_found = true;
+			list_del(&tx_msg->head);
 			break;
-		schedule_timeout(1);
+		}
 	}
-	if (!list_empty(&ipmi->tx_msg_list))
-		dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+	spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+
+	if (msg_found)
+		acpi_ipmi_msg_put(tx_msg);
 }
 
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 {
 	struct acpi_ipmi_device *ipmi_device = user_msg_data;
-	int msg_found = 0;
-	struct acpi_ipmi_msg *tx_msg;
-	struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+	bool msg_found = false;
+	struct acpi_ipmi_msg *tx_msg, *temp;
+	struct device *dev = ipmi_device->dev;
 	unsigned long flags;
 
 	if (msg->user != ipmi_device->user_interface) {
-		dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
-			"returned user %p, expected user %p\n",
-			msg->user, ipmi_device->user_interface);
-		ipmi_free_recv_msg(msg);
-		return;
+		dev_warn(dev,
+			 "Unexpected response is returned. returned user %p, expected user %p\n",
+			 msg->user, ipmi_device->user_interface);
+		goto out_msg;
 	}
+
 	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
-	list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+	list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
 		if (msg->msgid == tx_msg->tx_msgid) {
-			msg_found = 1;
+			msg_found = true;
+			list_del(&tx_msg->head);
 			break;
 		}
 	}
-
 	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+
 	if (!msg_found) {
-		dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
-			"returned.\n", msg->msgid);
-		ipmi_free_recv_msg(msg);
-		return;
+		dev_warn(dev,
+			 "Unexpected response (msg id %ld) is returned.\n",
+			 msg->msgid);
+		goto out_msg;
 	}
 
-	if (msg->msg.data_len) {
-		/* copy the response data to Rx_data buffer */
-		memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
-		tx_msg->rx_len = msg->msg.data_len;
-		tx_msg->msg_done = 1;
+	/* copy the response data to Rx_data buffer */
+	if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
+		dev_WARN_ONCE(dev, true,
+			      "Unexpected response (msg len %d).\n",
+			      msg->msg.data_len);
+		goto out_comp;
 	}
+
+	/* response msg is an error msg */
+	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
+	    msg->msg.data_len == 1) {
+		if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
+			dev_WARN_ONCE(dev, true,
+				      "Unexpected response (timeout).\n");
+			tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
+		}
+		goto out_comp;
+	}
+
+	tx_msg->rx_len = msg->msg.data_len;
+	memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
+	tx_msg->msg_done = ACPI_IPMI_OK;
+
+out_comp:
 	complete(&tx_msg->tx_complete);
+	acpi_ipmi_msg_put(tx_msg);
+out_msg:
 	ipmi_free_recv_msg(msg);
-};
+}
 
 static void ipmi_register_bmc(int iface, struct device *dev)
 {
 	struct acpi_ipmi_device *ipmi_device, *temp;
-	struct pnp_dev *pnp_dev;
-	ipmi_user_t		user;
 	int err;
 	struct ipmi_smi_info smi_data;
 	acpi_handle handle;
 
 	err = ipmi_get_smi_info(iface, &smi_data);
-
 	if (err)
 		return;
 
-	if (smi_data.addr_src != SI_ACPI) {
-		put_device(smi_data.dev);
-		return;
-	}
-
+	if (smi_data.addr_src != SI_ACPI)
+		goto err_ref;
 	handle = smi_data.addr_info.acpi_info.acpi_handle;
+	if (!handle)
+		goto err_ref;
+
+	ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
+	if (!ipmi_device) {
+		dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
+		goto err_ref;
+	}
 
 	mutex_lock(&driver_data.ipmi_lock);
 	list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
@@ -307,34 +481,20 @@
 		 * to the device list, don't add it again.
 		 */
 		if (temp->handle == handle)
-			goto out;
+			goto err_lock;
 	}
-
-	ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
-
-	if (!ipmi_device)
-		goto out;
-
-	pnp_dev = to_pnp_dev(smi_data.dev);
-	ipmi_device->handle = handle;
-	ipmi_device->pnp_dev = pnp_dev;
-
-	err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
-					ipmi_device, &user);
-	if (err) {
-		dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
-		kfree(ipmi_device);
-		goto out;
-	}
-	acpi_add_ipmi_device(ipmi_device);
-	ipmi_device->user_interface = user;
-	ipmi_device->ipmi_ifnum = iface;
+	if (!driver_data.selected_smi)
+		driver_data.selected_smi = ipmi_device;
+	list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
 	mutex_unlock(&driver_data.ipmi_lock);
-	memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+
+	put_device(smi_data.dev);
 	return;
 
-out:
+err_lock:
 	mutex_unlock(&driver_data.ipmi_lock);
+	ipmi_dev_release(ipmi_device);
+err_ref:
 	put_device(smi_data.dev);
 	return;
 }
@@ -342,23 +502,29 @@
 static void ipmi_bmc_gone(int iface)
 {
 	struct acpi_ipmi_device *ipmi_device, *temp;
+	bool dev_found = false;
 
 	mutex_lock(&driver_data.ipmi_lock);
 	list_for_each_entry_safe(ipmi_device, temp,
-				&driver_data.ipmi_devices, head) {
-		if (ipmi_device->ipmi_ifnum != iface)
-			continue;
-
-		acpi_remove_ipmi_device(ipmi_device);
-		put_device(ipmi_device->smi_data.dev);
-		kfree(ipmi_device);
-		break;
+				 &driver_data.ipmi_devices, head) {
+		if (ipmi_device->ipmi_ifnum != iface) {
+			dev_found = true;
+			__ipmi_dev_kill(ipmi_device);
+			break;
+		}
 	}
+	if (!driver_data.selected_smi)
+		driver_data.selected_smi = list_first_entry_or_null(
+					&driver_data.ipmi_devices,
+					struct acpi_ipmi_device, head);
 	mutex_unlock(&driver_data.ipmi_lock);
+
+	if (dev_found) {
+		ipmi_flush_tx_msg(ipmi_device);
+		acpi_ipmi_dev_put(ipmi_device);
+	}
 }
-/* --------------------------------------------------------------------------
- *			Address Space Management
- * -------------------------------------------------------------------------- */
+
 /*
  * This is the IPMI opregion space handler.
  * @function: indicates the read/write. In fact as the IPMI message is driven
@@ -371,17 +537,17 @@
  *	     the response IPMI message returned by IPMI command.
  * @handler_context: IPMI device context.
  */
-
 static acpi_status
 acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
-		      u32 bits, acpi_integer *value,
-		      void *handler_context, void *region_context)
+			u32 bits, acpi_integer *value,
+			void *handler_context, void *region_context)
 {
 	struct acpi_ipmi_msg *tx_msg;
-	struct acpi_ipmi_device *ipmi_device = handler_context;
-	int err, rem_time;
+	struct acpi_ipmi_device *ipmi_device;
+	int err;
 	acpi_status status;
 	unsigned long flags;
+
 	/*
 	 * IPMI opregion message.
 	 * IPMI message is firstly written to the BMC and system software
@@ -391,118 +557,75 @@
 	if ((function & ACPI_IO_MASK) == ACPI_READ)
 		return AE_TYPE;
 
-	if (!ipmi_device->user_interface)
-		return AE_NOT_EXIST;
-
-	tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
+	tx_msg = ipmi_msg_alloc();
 	if (!tx_msg)
-		return AE_NO_MEMORY;
+		return AE_NOT_EXIST;
+	ipmi_device = tx_msg->device;
 
-	acpi_format_ipmi_msg(tx_msg, address, value);
+	if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
+		ipmi_msg_release(tx_msg);
+		return AE_TYPE;
+	}
+
+	acpi_ipmi_msg_get(tx_msg);
+	mutex_lock(&driver_data.ipmi_lock);
+	/* Do not add a tx_msg that can not be flushed. */
+	if (ipmi_device->dead) {
+		mutex_unlock(&driver_data.ipmi_lock);
+		ipmi_msg_release(tx_msg);
+		return AE_NOT_EXIST;
+	}
 	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
 	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+	mutex_unlock(&driver_data.ipmi_lock);
+
 	err = ipmi_request_settime(ipmi_device->user_interface,
-					&tx_msg->addr,
-					tx_msg->tx_msgid,
-					&tx_msg->tx_message,
-					NULL, 0, 0, 0);
+				   &tx_msg->addr,
+				   tx_msg->tx_msgid,
+				   &tx_msg->tx_message,
+				   NULL, 0, 0, IPMI_TIMEOUT);
 	if (err) {
 		status = AE_ERROR;
-		goto end_label;
+		goto out_msg;
 	}
-	rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
-					IPMI_TIMEOUT);
-	acpi_format_ipmi_response(tx_msg, value, rem_time);
+	wait_for_completion(&tx_msg->tx_complete);
+
+	acpi_format_ipmi_response(tx_msg, value);
 	status = AE_OK;
 
-end_label:
-	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
-	list_del(&tx_msg->head);
-	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
-	kfree(tx_msg);
+out_msg:
+	ipmi_cancel_tx_msg(ipmi_device, tx_msg);
+	acpi_ipmi_msg_put(tx_msg);
 	return status;
 }
 
-static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
-{
-	if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
-		return;
-
-	acpi_remove_address_space_handler(ipmi->handle,
-				ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
-
-	clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-}
-
-static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
-{
-	acpi_status status;
-
-	if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
-		return 0;
-
-	status = acpi_install_address_space_handler(ipmi->handle,
-						    ACPI_ADR_SPACE_IPMI,
-						    &acpi_ipmi_space_handler,
-						    NULL, ipmi);
-	if (ACPI_FAILURE(status)) {
-		struct pnp_dev *pnp_dev = ipmi->pnp_dev;
-		dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
-			"handle\n");
-		return -EINVAL;
-	}
-	set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-	return 0;
-}
-
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-
-	INIT_LIST_HEAD(&ipmi_device->head);
-
-	spin_lock_init(&ipmi_device->tx_msg_lock);
-	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
-	ipmi_install_space_handler(ipmi_device);
-
-	list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
-}
-
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-	/*
-	 * If the IPMI user interface is created, it should be
-	 * destroyed.
-	 */
-	if (ipmi_device->user_interface) {
-		ipmi_destroy_user(ipmi_device->user_interface);
-		ipmi_device->user_interface = NULL;
-	}
-	/* flush the Tx_msg list */
-	if (!list_empty(&ipmi_device->tx_msg_list))
-		ipmi_flush_tx_msg(ipmi_device);
-
-	list_del(&ipmi_device->head);
-	ipmi_remove_space_handler(ipmi_device);
-}
-
 static int __init acpi_ipmi_init(void)
 {
-	int result = 0;
+	int result;
+	acpi_status status;
 
 	if (acpi_disabled)
-		return result;
+		return 0;
 
-	mutex_init(&driver_data.ipmi_lock);
-
+	status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
+						    ACPI_ADR_SPACE_IPMI,
+						    &acpi_ipmi_space_handler,
+						    NULL, NULL);
+	if (ACPI_FAILURE(status)) {
+		pr_warn("Can't register IPMI opregion space handle\n");
+		return -EINVAL;
+	}
 	result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+	if (result)
+		pr_err("Can't register IPMI system interface watcher\n");
 
 	return result;
 }
 
 static void __exit acpi_ipmi_exit(void)
 {
-	struct acpi_ipmi_device *ipmi_device, *temp;
+	struct acpi_ipmi_device *ipmi_device;
 
 	if (acpi_disabled)
 		return;
@@ -516,13 +639,22 @@
 	 * handler and free it.
 	 */
 	mutex_lock(&driver_data.ipmi_lock);
-	list_for_each_entry_safe(ipmi_device, temp,
-				&driver_data.ipmi_devices, head) {
-		acpi_remove_ipmi_device(ipmi_device);
-		put_device(ipmi_device->smi_data.dev);
-		kfree(ipmi_device);
+	while (!list_empty(&driver_data.ipmi_devices)) {
+		ipmi_device = list_first_entry(&driver_data.ipmi_devices,
+					       struct acpi_ipmi_device,
+					       head);
+		__ipmi_dev_kill(ipmi_device);
+		mutex_unlock(&driver_data.ipmi_lock);
+
+		ipmi_flush_tx_msg(ipmi_device);
+		acpi_ipmi_dev_put(ipmi_device);
+
+		mutex_lock(&driver_data.ipmi_lock);
 	}
 	mutex_unlock(&driver_data.ipmi_lock);
+	acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
+					  ACPI_ADR_SPACE_IPMI,
+					  &acpi_ipmi_space_handler);
 }
 
 module_init(acpi_ipmi_init);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index fb78bb9..d396101 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -30,6 +30,7 @@
 /* Offsets relative to LPSS_PRIVATE_OFFSET */
 #define LPSS_GENERAL			0x08
 #define LPSS_GENERAL_LTR_MODE_SW	BIT(2)
+#define LPSS_GENERAL_UART_RTS_OVRD	BIT(3)
 #define LPSS_SW_LTR			0x10
 #define LPSS_AUTO_LTR			0x14
 #define LPSS_TX_INT			0x20
@@ -68,11 +69,16 @@
 
 static void lpss_uart_setup(struct lpss_private_data *pdata)
 {
-	unsigned int tx_int_offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+	unsigned int offset;
 	u32 reg;
 
-	reg = readl(pdata->mmio_base + tx_int_offset);
-	writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + tx_int_offset);
+	offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+	reg = readl(pdata->mmio_base + offset);
+	writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
+
+	offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
+	reg = readl(pdata->mmio_base + offset);
+	writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
 }
 
 static struct lpss_device_desc lpt_dev_desc = {
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 999adb5..551dad7 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -152,8 +152,9 @@
 	unsigned long long current_status;
 
 	/* Get device present/absent information from the _STA */
-	if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
-					       NULL, &current_status)))
+	if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
+					       METHOD_NAME__STA, NULL,
+					       &current_status)))
 		return -ENODEV;
 	/*
 	 * Check for device status. Device should be
@@ -281,7 +282,7 @@
 		if (!info->enabled)
 			continue;
 
-		if (nid < 0)
+		if (nid == NUMA_NO_NODE)
 			nid = memory_add_physaddr_to_nid(info->start_addr);
 
 		acpi_unbind_memory_blocks(info, handle);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 1bde127..8a4cfc7 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,6 +29,13 @@
 static const struct acpi_device_id acpi_platform_device_ids[] = {
 
 	{ "PNP0D40" },
+	{ "ACPI0003" },
+	{ "VPC2004" },
+	{ "BCM4752" },
+
+	/* Intel Smart Sound Technology */
+	{ "INT33C8" },
+	{ "80860F28" },
 
 	{ }
 };
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f29e06e..3c1d6b0 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -140,15 +140,11 @@
 	return 0;
 }
 
-static int acpi_processor_errata(struct acpi_processor *pr)
+static int acpi_processor_errata(void)
 {
 	int result = 0;
 	struct pci_dev *dev = NULL;
 
-
-	if (!pr)
-		return -EINVAL;
-
 	/*
 	 * PIIX4
 	 */
@@ -181,7 +177,7 @@
 	cpu_maps_update_begin();
 	cpu_hotplug_begin();
 
-	ret = acpi_map_lsapic(pr->handle, &pr->id);
+	ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
 	if (ret)
 		goto out;
 
@@ -219,11 +215,9 @@
 	int cpu_index, device_declaration = 0;
 	acpi_status status = AE_OK;
 	static int cpu0_initialized;
+	unsigned long long value;
 
-	if (num_online_cpus() > 1)
-		errata.smp = TRUE;
-
-	acpi_processor_errata(pr);
+	acpi_processor_errata();
 
 	/*
 	 * Check to see if we have bus mastering arbitration control.  This
@@ -247,18 +241,12 @@
 			return -ENODEV;
 		}
 
-		/*
-		 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
-		 *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
-		 *      arch/xxx/acpi.c
-		 */
 		pr->acpi_id = object.processor.proc_id;
 	} else {
 		/*
 		 * Declared with "Device" statement; match _UID.
 		 * Note that we don't handle string _UIDs yet.
 		 */
-		unsigned long long value;
 		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
 						NULL, &value);
 		if (ACPI_FAILURE(status)) {
@@ -270,7 +258,9 @@
 		device_declaration = 1;
 		pr->acpi_id = value;
 	}
-	cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
+	pr->apic_id = acpi_get_apicid(pr->handle, device_declaration,
+					pr->acpi_id);
+	cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
 
 	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
 	if (!cpu0_initialized && (cpu_index == -1) &&
@@ -332,9 +322,9 @@
 	 * ensure we get the right value in the "physical id" field
 	 * of /proc/cpuinfo
 	 */
-	status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
+	status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
 	if (ACPI_SUCCESS(status))
-		arch_fix_phys_package_id(pr->id, object.integer.value);
+		arch_fix_phys_package_id(pr->id, value);
 
 	return 0;
 }
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 9feba08..a9fd0b8 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -114,10 +114,12 @@
 			      acpi_db_generate_gpe(char *gpe_arg,
 						   char *block_arg))
 
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
+
 /*
  * dbconvert - miscellaneous conversion routines
  */
- acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
+acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
 
 acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
 
@@ -154,6 +156,8 @@
 
 void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
 
+void acpi_db_dump_namespace_paths(void);
+
 void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
 
 acpi_status acpi_db_find_name_in_namespace(char *name_arg);
@@ -240,6 +244,8 @@
 
 char *acpi_db_get_from_history(char *command_num_arg);
 
+char *acpi_db_get_history_by_index(u32 commandd_num);
+
 /*
  * dbinput - user front-end to the AML debugger
  */
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ab0e977..41abe55 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -71,7 +71,8 @@
 
 ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
 			    acpi_ev_acquire_global_lock(u16 timeout))
- ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
  acpi_status acpi_ev_remove_global_lock_handler(void);
 
 /*
@@ -242,11 +243,11 @@
  */
 u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
 
+u32 acpi_ev_sci_dispatch(void);
+
 u32 acpi_ev_install_sci_handler(void);
 
-acpi_status acpi_ev_remove_sci_handler(void);
-
-u32 acpi_ev_initialize_SCI(u32 program_SCI);
+acpi_status acpi_ev_remove_all_sci_handlers(void);
 
 ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
 #endif				/* __ACEVENTS_H__  */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 90e846f..e9f1fc7 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -269,6 +269,7 @@
 ACPI_EXTERN void *acpi_gbl_table_handler_context;
 ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
 ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
+ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
 
 /* Owner ID support */
 
@@ -405,7 +406,9 @@
 
 /* Event counters */
 
+ACPI_EXTERN u32 acpi_method_count;
 ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_sci_count;
 ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
 
 /* Support for dynamic control method tracing mechanism */
@@ -445,13 +448,6 @@
 ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
 ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
 ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[80];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[80];
 ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
 ACPI_EXTERN char *acpi_gbl_db_buffer;
 ACPI_EXTERN char *acpi_gbl_db_filename;
@@ -459,6 +455,16 @@
 ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
 ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
 
+ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+
+/* These buffers should all be the same size */
+
+ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+
 /*
  * Statistic globals
  */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0ed0066..53ed1a8 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -398,6 +398,14 @@
  *
  ****************************************************************************/
 
+/* Dispatch info for each host-installed SCI handler */
+
+struct acpi_sci_handler_info {
+	struct acpi_sci_handler_info *next;
+	acpi_sci_handler address;	/* Address of handler */
+	void *context;		/* Context to be passed to handler */
+};
+
 /* Dispatch info for each GPE -- either a method or handler, cannot be both */
 
 struct acpi_gpe_handler_info {
@@ -1064,7 +1072,7 @@
 	char *name;
 	u32 flags;
 	u32 num_loops;
-	char pathname[128];
+	char pathname[ACPI_DB_LINE_BUFFER_SIZE];
 	char **args;
 	acpi_object_type *types;
 
@@ -1086,6 +1094,7 @@
 	u32 objects;
 };
 
+#define ACPI_DB_DISABLE_OUTPUT          0x00
 #define ACPI_DB_REDIRECTABLE_OUTPUT     0x01
 #define ACPI_DB_CONSOLE_OUTPUT          0x02
 #define ACPI_DB_DUPLICATE_OUTPUT        0x03
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 530a2f8..2a86c65 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -410,37 +410,6 @@
 #endif
 
 /*
- * Memory allocation tracking (DEBUG ONLY)
- */
-#define ACPI_MEM_PARAMETERS         _COMPONENT, _acpi_module_name, __LINE__
-
-#ifndef ACPI_DBG_TRACK_ALLOCATIONS
-
-/* Memory allocation */
-
-#ifndef ACPI_ALLOCATE
-#define ACPI_ALLOCATE(a)            acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_ALLOCATE_ZEROED
-#define ACPI_ALLOCATE_ZEROED(a)     acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_FREE
-#define ACPI_FREE(a)                acpi_os_free(a)
-#endif
-#define ACPI_MEM_TRACKING(a)
-
-#else
-
-/* Memory allocation */
-
-#define ACPI_ALLOCATE(a)            acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#define ACPI_ALLOCATE_ZEROED(a)     acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#define ACPI_FREE(a)                acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS)
-#define ACPI_MEM_TRACKING(a)        a
-
-#endif				/* ACPI_DBG_TRACK_ALLOCATIONS */
-
-/*
  * Macros used for ACPICA utilities only
  */
 
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 40b04bd..e6138ac 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -213,6 +213,12 @@
 		     u8 display_type,
 		     u32 max_depth,
 		     acpi_owner_id owner_id, acpi_handle start_handle);
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+			  u8 display_type,
+			  u32 max_depth,
+			  acpi_owner_id owner_id, acpi_handle start_handle);
 #endif				/* ACPI_FUTURE_USAGE */
 
 /*
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index d5a62a6..be8180c 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -628,6 +628,17 @@
 
 void acpi_ut_repair_name(char *name);
 
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
+
+u8
+acpi_ut_safe_strncat(char *dest,
+		     acpi_size dest_size,
+		     char *source, acpi_size max_transfer_length);
+#endif
+
 /*
  * utmutex - mutex support
  */
@@ -652,12 +663,6 @@
 acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
 			  acpi_size required_length);
 
-void *acpi_ut_allocate(acpi_size size,
-		       u32 component, const char *module, u32 line);
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
-			      u32 component, const char *module, u32 line);
-
 #ifdef ACPI_DBG_TRACK_ALLOCATIONS
 void *acpi_ut_allocate_and_track(acpi_size size,
 				 u32 component, const char *module, u32 line);
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index fb09b08..afdc6df 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -158,7 +158,7 @@
 	walk_state->deferred_node = node;
 	status = acpi_ps_parse_aml(walk_state);
 
-      cleanup:
+cleanup:
 	acpi_ps_delete_parse_tree(op);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index d4bfe7b..2d4c073 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -259,7 +259,7 @@
 		goto cleanup;
 	}
 
-      cleanup:
+cleanup:
 
 	/* Remove local reference to the object */
 
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index a9ffd44..81a78ba 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -292,9 +292,10 @@
 	 * reentered one more time (even if it is the same thread)
 	 */
 	obj_desc->method.thread_count++;
+	acpi_method_count++;
 	return_ACPI_STATUS(status);
 
-      cleanup:
+cleanup:
 	/* On error, must release the method mutex (if present) */
 
 	if (obj_desc->method.mutex) {
@@ -424,7 +425,7 @@
 
 	return_ACPI_STATUS(status);
 
-      cleanup:
+cleanup:
 
 	/* On error, we must terminate the method properly */
 
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 63f0d22..b1746a6 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -240,7 +240,7 @@
 		return_ACPI_STATUS(status);
 	}
 
-      exit:
+exit:
 	*obj_desc_ptr = obj_desc;
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 1fc1ff1..5205edc 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -257,7 +257,7 @@
 	    (buffer_desc->common.reference_count +
 	     obj_desc->common.reference_count);
 
-      cleanup:
+cleanup:
 
 	/* Always delete the operands */
 
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index c666fc0..ade44e4 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -299,7 +299,7 @@
 		goto result_used;
 	}
 
-      result_used:
+result_used:
 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
 			  "Result of [%s] used by Parent [%s] Op=%p\n",
 			  acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -308,7 +308,7 @@
 
 	return_UINT8(TRUE);
 
-      result_not_used:
+result_not_used:
 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
 			  "Result of [%s] not used by Parent [%s] Op=%p\n",
 			  acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -752,7 +752,7 @@
 
 	return_ACPI_STATUS(status);
 
-      cleanup:
+cleanup:
 	/*
 	 * We must undo everything done above; meaning that we must
 	 * pop everything off of the operand stack and delete those
@@ -851,7 +851,7 @@
 		goto exit;
 	}
 
-      push_result:
+push_result:
 
 	walk_state->result_obj = new_obj_desc;
 
@@ -863,7 +863,7 @@
 		op->common.flags |= ACPI_PARSEOP_IN_STACK;
 	}
 
-      exit:
+exit:
 
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 151d924..1bbb22f 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -170,7 +170,7 @@
 
 	(void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
 
-      cleanup:
+cleanup:
 
 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
 			  walk_state->control_state->common.value,
@@ -335,7 +335,7 @@
 
 	return_ACPI_STATUS(status);
 
-      error_exit:
+error_exit:
 	status = acpi_ds_method_error(status, walk_state);
 	return_ACPI_STATUS(status);
 }
@@ -722,7 +722,7 @@
 		walk_state->result_obj = NULL;
 	}
 
-      cleanup:
+cleanup:
 
 	if (walk_state->result_obj) {
 
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index b1f8f47..7f569d5 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -728,7 +728,7 @@
 		break;
 	}
 
-      cleanup:
+cleanup:
 
 	/* Remove the Node pushed at the very beginning */
 
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index fdb0a76..4c67193 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -173,7 +173,7 @@
 
 	acpi_gbl_global_lock_pending = FALSE;
 
-      cleanup_and_exit:
+cleanup_and_exit:
 
 	acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
 	return (ACPI_INTERRUPT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index c8a1f7d..a9cb4a1 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -458,7 +458,7 @@
 		gpe_block = gpe_block->next;
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 
 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	return (int_status);
@@ -522,6 +522,7 @@
 
 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(local_gpe_event_info);
 		return_VOID;
 	}
 
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index c1aa1ed..a9e76bc 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -111,7 +111,7 @@
 	gpe_block->xrupt_block = gpe_xrupt_block;
 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 
-      unlock_and_exit:
+unlock_and_exit:
 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 	return_ACPI_STATUS(status);
 }
@@ -178,7 +178,7 @@
 	ACPI_FREE(gpe_block->event_info);
 	ACPI_FREE(gpe_block);
 
-      unlock_and_exit:
+unlock_and_exit:
 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 	return_ACPI_STATUS(status);
 }
@@ -302,7 +302,7 @@
 
 	return_ACPI_STATUS(AE_OK);
 
-      error_exit:
+error_exit:
 	if (gpe_register_info) {
 		ACPI_FREE(gpe_register_info);
 	}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 7842700..a3e2f38 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -203,7 +203,7 @@
 		goto cleanup;
 	}
 
-      cleanup:
+cleanup:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(AE_OK);
 }
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index b24dbb8..d3f5e1e 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -101,7 +101,7 @@
 		gpe_xrupt_info = gpe_xrupt_info->next;
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	return_ACPI_STATUS(status);
 }
@@ -196,7 +196,7 @@
  *
  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
  *
- * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
+ * PARAMETERS:  interrupt_number            - Interrupt for a GPE block
  *
  * RETURN:      A GPE interrupt block
  *
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 068af96..e315731 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -129,7 +129,7 @@
 		}
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(status);
 }
@@ -531,6 +531,6 @@
 					acpi_ev_install_handler, NULL,
 					handler_obj, NULL);
 
-      unlock_and_exit:
+unlock_and_exit:
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 1b111ef..a568754 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -264,13 +264,6 @@
 
 		status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
 
-		/* Remove SCI handler */
-
-		status = acpi_ev_remove_sci_handler();
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
-		}
-
 		status = acpi_ev_remove_global_lock_handler();
 		if (ACPI_FAILURE(status)) {
 			ACPI_ERROR((AE_INFO,
@@ -280,6 +273,13 @@
 		acpi_gbl_events_initialized = FALSE;
 	}
 
+	/* Remove SCI handlers */
+
+	status = acpi_ev_remove_all_sci_handlers();
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+	}
+
 	/* Deallocate all handler objects installed within GPE info structs */
 
 	status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index cea14d6..144cbb9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -217,16 +217,11 @@
 		if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
 			region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
 
-			if (region_obj2->extra.region_context) {
-
-				/* The handler for this region was already installed */
-
-				ACPI_FREE(region_context);
-			} else {
-				/*
-				 * Save the returned context for use in all accesses to
-				 * this particular region
-				 */
+			/*
+			 * Save the returned context for use in all accesses to
+			 * the handler for this particular region
+			 */
+			if (!(region_obj2->extra.region_context)) {
 				region_obj2->extra.region_context =
 				    region_context;
 			}
@@ -402,6 +397,14 @@
 						 handler_obj->address_space.
 						 context, region_context);
 
+				/*
+				 * region_context should have been released by the deactivate
+				 * operation. We don't need access to it anymore here.
+				 */
+				if (region_context) {
+					*region_context = NULL;
+				}
+
 				/* Init routine may fail, Just ignore errors */
 
 				if (ACPI_FAILURE(status)) {
@@ -570,10 +573,10 @@
 	status = acpi_ns_evaluate(info);
 	acpi_ut_remove_reference(args[1]);
 
-      cleanup2:
+cleanup2:
 	acpi_ut_remove_reference(args[0]);
 
-      cleanup1:
+cleanup1:
 	ACPI_FREE(info);
 	return_ACPI_STATUS(status);
 }
@@ -758,7 +761,7 @@
 
 	status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
 
-      exit:
+exit:
 	/* We ignore all errors from above, don't care */
 
 	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index b905acf..9e9e345 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -54,6 +54,50 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_ev_sci_dispatch
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_sci_dispatch(void)
+{
+	struct acpi_sci_handler_info *sci_handler;
+	acpi_cpu_flags flags;
+	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+
+	ACPI_FUNCTION_NAME(ev_sci_dispatch);
+
+	/* Are there any host-installed SCI handlers? */
+
+	if (!acpi_gbl_sci_handler_list) {
+		return (int_status);
+	}
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Invoke all host-installed SCI handlers */
+
+	sci_handler = acpi_gbl_sci_handler_list;
+	while (sci_handler) {
+
+		/* Invoke the installed handler (at interrupt level) */
+
+		int_status |= sci_handler->address(sci_handler->context);
+
+		sci_handler = sci_handler->next;
+	}
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return (int_status);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_ev_sci_xrupt_handler
  *
  * PARAMETERS:  context   - Calling Context
@@ -89,6 +133,11 @@
 	 */
 	interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
 
+	/* Invoke all host-installed SCI handlers */
+
+	interrupt_handled |= acpi_ev_sci_dispatch();
+
+	acpi_sci_count++;
 	return_UINT32(interrupt_handled);
 }
 
@@ -112,14 +161,13 @@
 	ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
 
 	/*
-	 * We are guaranteed by the ACPI CA initialization/shutdown code that
+	 * We are guaranteed by the ACPICA initialization/shutdown code that
 	 * if this interrupt handler is installed, ACPI is enabled.
 	 */
 
 	/* GPEs: Check for and dispatch any GPEs that have occurred */
 
 	interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
-
 	return_UINT32(interrupt_handled);
 }
 
@@ -150,15 +198,15 @@
 
 /******************************************************************************
  *
- * FUNCTION:    acpi_ev_remove_sci_handler
+ * FUNCTION:    acpi_ev_remove_all_sci_handlers
  *
  * PARAMETERS:  none
  *
- * RETURN:      E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * RETURN:      AE_OK if handler uninstalled, AE_ERROR if handler was not
  *              installed to begin with
  *
  * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
- *              taken.
+ *              taken. Remove all host-installed SCI handlers.
  *
  * Note:  It doesn't seem important to disable all events or set the event
  *        enable registers to their original values. The OS should disable
@@ -167,11 +215,13 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_ev_remove_sci_handler(void)
+acpi_status acpi_ev_remove_all_sci_handlers(void)
 {
+	struct acpi_sci_handler_info *sci_handler;
+	acpi_cpu_flags flags;
 	acpi_status status;
 
-	ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+	ACPI_FUNCTION_TRACE(ev_remove_all_sci_handlers);
 
 	/* Just let the OS remove the handler and disable the level */
 
@@ -179,6 +229,21 @@
 	    acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
 					     acpi_ev_sci_xrupt_handler);
 
+	if (!acpi_gbl_sci_handler_list) {
+		return (status);
+	}
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Free all host-installed SCI handlers */
+
+	while (acpi_gbl_sci_handler_list) {
+		sci_handler = acpi_gbl_sci_handler_list;
+		acpi_gbl_sci_handler_list = sci_handler->next;
+		ACPI_FREE(sci_handler);
+	}
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	return_ACPI_STATUS(status);
 }
 
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ca5fba9..23a7fad 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
@@ -374,7 +375,7 @@
 
 	acpi_gbl_exception_handler = handler;
 
-      cleanup:
+cleanup:
 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 	return_ACPI_STATUS(status);
 }
@@ -385,6 +386,144 @@
 #if (!ACPI_REDUCED_HARDWARE)
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_install_sci_handler
+ *
+ * PARAMETERS:  address             - Address of the handler
+ *              context             - Value passed to the handler on each SCI
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context)
+{
+	struct acpi_sci_handler_info *new_sci_handler;
+	struct acpi_sci_handler_info *sci_handler;
+	acpi_cpu_flags flags;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_sci_handler);
+
+	if (!address) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Allocate and init a handler object */
+
+	new_sci_handler = ACPI_ALLOCATE(sizeof(struct acpi_sci_handler_info));
+	if (!new_sci_handler) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	new_sci_handler->address = address;
+	new_sci_handler->context = context;
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		goto exit;
+	}
+
+	/* Lock list during installation */
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+	sci_handler = acpi_gbl_sci_handler_list;
+
+	/* Ensure handler does not already exist */
+
+	while (sci_handler) {
+		if (address == sci_handler->address) {
+			status = AE_ALREADY_EXISTS;
+			goto unlock_and_exit;
+		}
+
+		sci_handler = sci_handler->next;
+	}
+
+	/* Install the new handler into the global list (at head) */
+
+	new_sci_handler->next = acpi_gbl_sci_handler_list;
+	acpi_gbl_sci_handler_list = new_sci_handler;
+
+unlock_and_exit:
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+exit:
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(new_sci_handler);
+	}
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_sci_handler
+ *
+ * PARAMETERS:  address             - Address of the handler
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_remove_sci_handler(acpi_sci_handler address)
+{
+	struct acpi_sci_handler_info *prev_sci_handler;
+	struct acpi_sci_handler_info *next_sci_handler;
+	acpi_cpu_flags flags;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_sci_handler);
+
+	if (!address) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Remove the SCI handler with lock */
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	prev_sci_handler = NULL;
+	next_sci_handler = acpi_gbl_sci_handler_list;
+	while (next_sci_handler) {
+		if (next_sci_handler->address == address) {
+
+			/* Unlink and free the SCI handler info block */
+
+			if (prev_sci_handler) {
+				prev_sci_handler->next = next_sci_handler->next;
+			} else {
+				acpi_gbl_sci_handler_list =
+				    next_sci_handler->next;
+			}
+
+			acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+			ACPI_FREE(next_sci_handler);
+			goto unlock_and_exit;
+		}
+
+		prev_sci_handler = next_sci_handler;
+		next_sci_handler = next_sci_handler->next;
+	}
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	status = AE_NOT_EXIST;
+
+unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_install_global_event_handler
  *
  * PARAMETERS:  handler         - Pointer to the global event handler function
@@ -398,6 +537,7 @@
  *              Can be used to update event counters, etc.
  *
  ******************************************************************************/
+
 acpi_status
 acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
 {
@@ -426,7 +566,7 @@
 	acpi_gbl_global_event_handler = handler;
 	acpi_gbl_global_event_handler_context = context;
 
-      cleanup:
+cleanup:
 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 	return_ACPI_STATUS(status);
 }
@@ -498,7 +638,7 @@
 				  handler));
 	}
 
-      cleanup:
+cleanup:
 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 7039606..39d06af 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "actables.h"
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 7662f1a..5713da7 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acevents.h"
@@ -471,7 +472,7 @@
 	if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
 		*event_status |= ACPI_EVENT_FLAG_HANDLE;
 
-      unlock_and_exit:
+unlock_and_exit:
 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	return_ACPI_STATUS(status);
 }
@@ -624,7 +625,7 @@
 
 	obj_desc->device.gpe_block = gpe_block;
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(status);
 }
@@ -679,7 +680,7 @@
 		obj_desc->device.gpe_block = NULL;
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 80cecf8..02ed75a 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -42,7 +42,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
@@ -147,7 +148,7 @@
 
 	status = acpi_ev_execute_reg_methods(node, space_id);
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(status);
 }
@@ -286,7 +287,7 @@
 
 	status = AE_NOT_EXIST;
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 269e81d..3c2e6dc 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -193,7 +193,7 @@
 	    acpi_ns_attach_object((struct acpi_namespace_node *)walk_state->
 				  operands[0], obj_desc, ACPI_TYPE_EVENT);
 
-      cleanup:
+cleanup:
 	/*
 	 * Remove local reference to the object (on error, will cause deletion
 	 * of both object and semaphore if present.)
@@ -248,7 +248,7 @@
 	    acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
 				  ACPI_TYPE_MUTEX);
 
-      cleanup:
+cleanup:
 	/*
 	 * Remove local reference to the object (on error, will cause deletion
 	 * of both object and semaphore if present.)
@@ -347,7 +347,7 @@
 
 	status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
 
-      cleanup:
+cleanup:
 
 	/* Remove local reference to the object */
 
@@ -520,7 +520,7 @@
 
 	acpi_ut_remove_reference(obj_desc);
 
-      exit:
+exit:
 	/* Remove a reference to the operand */
 
 	acpi_ut_remove_reference(operand[1]);
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index c2a65aa..cfd8752 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -197,7 +197,7 @@
 	status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
 	acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 
-      exit:
+exit:
 	if (ACPI_FAILURE(status)) {
 		acpi_ut_remove_reference(buffer_desc);
 	} else {
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 7e0afe7..49fb742 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -123,12 +123,6 @@
 		}
 	}
 
-	/* Exit if Address/Length have been disallowed by the host OS */
-
-	if (rgn_desc->common.flags & AOPOBJ_INVALID) {
-		return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
-	}
-
 	/*
 	 * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
 	 * address space and the request cannot be directly validated
@@ -1002,7 +996,7 @@
 						mask, merged_datum,
 						field_offset);
 
-      exit:
+exit:
 	/* Free temporary buffer if we used one */
 
 	if (new_buffer) {
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 00bf298..65d9360 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -388,7 +388,7 @@
 
 	*actual_return_desc = return_desc;
 
-      cleanup:
+cleanup:
 	if (local_operand1 != operand1) {
 		acpi_ut_remove_reference(local_operand1);
 	}
@@ -718,7 +718,7 @@
 		}
 	}
 
-      cleanup:
+cleanup:
 
 	/* New object was created if implicit conversion performed - delete */
 
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 2cdd41d..d74cea4 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -115,7 +115,7 @@
 		break;
 	}
 
-      cleanup:
+cleanup:
 
 	/* Delete return object on error */
 
@@ -234,7 +234,7 @@
 		goto cleanup;
 	}
 
-      cleanup:
+cleanup:
 
 	return_ACPI_STATUS(status);
 }
@@ -551,7 +551,7 @@
 		status = acpi_ex_store(return_desc, operand[1], walk_state);
 	}
 
-      cleanup:
+cleanup:
 
 	/* Delete return object on error */
 
@@ -1054,7 +1054,7 @@
 		goto cleanup;
 	}
 
-      cleanup:
+cleanup:
 
 	/* Delete return object on error */
 
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index d5088f7..d6fa0fc 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -215,7 +215,7 @@
 		goto cleanup;
 	}
 
-      cleanup:
+cleanup:
 	/*
 	 * Since the remainder is not returned indirectly, remove a reference to
 	 * it. Only the quotient is returned indirectly.
@@ -445,7 +445,7 @@
 		break;
 	}
 
-      store_result_to_target:
+store_result_to_target:
 
 	if (ACPI_SUCCESS(status)) {
 		/*
@@ -462,7 +462,7 @@
 		}
 	}
 
-      cleanup:
+cleanup:
 
 	/* Delete return object on error */
 
@@ -553,7 +553,7 @@
 		goto cleanup;
 	}
 
-      store_logical_result:
+store_logical_result:
 	/*
 	 * Set return value to according to logical_result. logical TRUE (all ones)
 	 * Default is FALSE (zero)
@@ -562,7 +562,7 @@
 		return_desc->integer.value = ACPI_UINT64_MAX;
 	}
 
-      cleanup:
+cleanup:
 
 	/* Delete return object on error */
 
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 37656f1..bc042adf 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -124,7 +124,7 @@
 		goto cleanup;
 	}
 
-      cleanup:
+cleanup:
 
 	return_ACPI_STATUS(status);
 }
@@ -252,7 +252,7 @@
 
 	status = acpi_ex_store(return_desc, operand[3], walk_state);
 
-      cleanup:
+cleanup:
 
 	/* Delete return object on error */
 
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 879b6cd..4459e32 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -314,7 +314,7 @@
 		goto cleanup;
 	}
 
-      cleanup:
+cleanup:
 
 	/* Delete return object on error */
 
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 303429bb..9d28867e 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -400,6 +400,7 @@
 	switch (function) {
 	case ACPI_READ:
 
+		*value = 0;
 		status = acpi_os_read_pci_configuration(pci_id, pci_register,
 							value, bit_width);
 		break;
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index ac04278..1606524 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -521,7 +521,7 @@
 	 */
 	type = obj_desc->common.type;
 
-      exit:
+exit:
 	/* Convert internal types to external types */
 
 	switch (type) {
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 00e5af7..be3f669 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -683,7 +683,7 @@
 			return_ACPI_STATUS(status);
 		}
 
-	      next_operand:
+next_operand:
 		/*
 		 * If more operands needed, decrement stack_ptr to point
 		 * to next operand on stack
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 8d2e866..12e6cff 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -560,7 +560,7 @@
 		break;
 	}
 
-      exit:
+exit:
 	return_ACPI_STATUS(status);
 }
 
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 2d7d22e..3c498dc 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 5ee7a814..b4b47db 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
@@ -83,11 +84,17 @@
 		 * For I/O space, write directly to the OSL. This bypasses the port
 		 * validation mechanism, which may block a valid write to the reset
 		 * register.
-		 * Spec section 4.7.3.6 requires register width to be 8.
+		 *
+		 * NOTE:
+		 * The ACPI spec requires the reset register width to be 8, so we
+		 * hardcode it here and ignore the FADT value. This maintains
+		 * compatibility with other ACPI implementations that have allowed
+		 * BIOS code with bad register width values to go unnoticed.
 		 */
 		status =
 		    acpi_os_write_port((acpi_io_address) reset_reg->address,
-				       acpi_gbl_FADT.reset_value, 8);
+				       acpi_gbl_FADT.reset_value,
+				       ACPI_RESET_REGISTER_WIDTH);
 	} else {
 		/* Write the reset value to the reset register */
 
@@ -119,7 +126,8 @@
  ******************************************************************************/
 acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
 {
-	u32 value;
+	u32 value_lo;
+	u32 value_hi;
 	u32 width;
 	u64 address;
 	acpi_status status;
@@ -137,13 +145,8 @@
 		return (status);
 	}
 
-	/* Initialize entire 64-bit return value to zero */
-
-	*return_value = 0;
-	value = 0;
-
 	/*
-	 * Two address spaces supported: Memory or IO. PCI_Config is
+	 * Two address spaces supported: Memory or I/O. PCI_Config is
 	 * not supported here because the GAS structure is insufficient
 	 */
 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -155,29 +158,35 @@
 		}
 	} else {		/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
 
+		value_lo = 0;
+		value_hi = 0;
+
 		width = reg->bit_width;
 		if (width == 64) {
 			width = 32;	/* Break into two 32-bit transfers */
 		}
 
 		status = acpi_hw_read_port((acpi_io_address)
-					   address, &value, width);
+					   address, &value_lo, width);
 		if (ACPI_FAILURE(status)) {
 			return (status);
 		}
-		*return_value = value;
 
 		if (reg->bit_width == 64) {
 
 			/* Read the top 32 bits */
 
 			status = acpi_hw_read_port((acpi_io_address)
-						   (address + 4), &value, 32);
+						   (address + 4), &value_hi,
+						   32);
 			if (ACPI_FAILURE(status)) {
 				return (status);
 			}
-			*return_value |= ((u64)value << 32);
 		}
+
+		/* Set the return value only if status is AE_OK */
+
+		*return_value = (value_lo | ((u64)value_hi << 32));
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_IO,
@@ -186,7 +195,7 @@
 			  ACPI_FORMAT_UINT64(address),
 			  acpi_ut_get_region_name(reg->space_id)));
 
-	return (status);
+	return (AE_OK);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_read)
@@ -561,10 +570,10 @@
 		break;
 	}
 
-      cleanup1:
+cleanup1:
 	acpi_ut_remove_reference(info->return_object);
 
-      cleanup:
+cleanup:
 	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status,
 				"While evaluating Sleep State [%s]",
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f2e669db..15dddc1 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 
@@ -166,7 +167,7 @@
  *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
  *
  ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
+acpi_status acpi_enter_sleep_state_s4bios(void)
 {
 	u32 in_value;
 	acpi_status status;
@@ -360,7 +361,7 @@
  *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
  *
  ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+acpi_status acpi_enter_sleep_state(u8 sleep_state)
 {
 	acpi_status status;
 
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index c5316e5..14f65f6 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -240,7 +240,7 @@
 		}
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 
 	/* Save a handle to "_GPE", it is always present */
@@ -424,8 +424,9 @@
 					/* Current scope has no parent scope */
 
 					ACPI_ERROR((AE_INFO,
-						    "ACPI path has too many parent prefixes (^) "
-						    "- reached beyond root node"));
+						    "%s: Path has too many parent prefixes (^) "
+						    "- reached beyond root node",
+						    pathname));
 					return_ACPI_STATUS(AE_NOT_FOUND);
 				}
 			}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 7418c77..48b9c6f 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -59,6 +59,17 @@
 #endif
 
 #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#ifdef	ACPI_FUTURE_USAGE
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+			     u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+		      u32 level, void *context, void **return_value);
+#endif				/* ACPI_FUTURE_USAGE */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_print_pathname
@@ -609,7 +620,7 @@
 		obj_type = ACPI_TYPE_INVALID;	/* Terminate loop after next pass */
 	}
 
-      cleanup:
+cleanup:
 	acpi_os_printf("\n");
 	return (AE_OK);
 }
@@ -671,6 +682,136 @@
 }
 #endif				/* ACPI_FUTURE_USAGE */
 
+#ifdef	ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_one_object_path, acpi_ns_get_max_depth
+ *
+ * PARAMETERS:  obj_handle          - Node to be dumped
+ *              level               - Nesting level of the handle
+ *              context             - Passed into walk_namespace
+ *              return_value        - Not used
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dump the full pathname to a namespace object. acp_ns_get_max_depth
+ *              computes the maximum nesting depth in the namespace tree, in
+ *              order to simplify formatting in acpi_ns_dump_one_object_path.
+ *              These procedures are user_functions called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+			     u32 level, void *context, void **return_value)
+{
+	u32 max_level = *((u32 *)context);
+	char *pathname;
+	struct acpi_namespace_node *node;
+	int path_indent;
+
+	if (!obj_handle) {
+		return (AE_OK);
+	}
+
+	node = acpi_ns_validate_handle(obj_handle);
+	if (!node) {
+
+		/* Ignore bad node during namespace walk */
+
+		return (AE_OK);
+	}
+
+	pathname = acpi_ns_get_external_pathname(node);
+
+	path_indent = 1;
+	if (level <= max_level) {
+		path_indent = max_level - level + 1;
+	}
+
+	acpi_os_printf("%2d%*s%-12s%*s",
+		       level, level, " ", acpi_ut_get_type_name(node->type),
+		       path_indent, " ");
+
+	acpi_os_printf("%s\n", &pathname[1]);
+	ACPI_FREE(pathname);
+	return (AE_OK);
+}
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+		      u32 level, void *context, void **return_value)
+{
+	u32 *max_level = (u32 *)context;
+
+	if (level > *max_level) {
+		*max_level = level;
+	}
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_object_paths
+ *
+ * PARAMETERS:  type                - Object type to be dumped
+ *              display_type        - 0 or ACPI_DISPLAY_SUMMARY
+ *              max_depth           - Maximum depth of dump. Use ACPI_UINT32_MAX
+ *                                    for an effectively unlimited depth.
+ *              owner_id            - Dump only objects owned by this ID. Use
+ *                                    ACPI_UINT32_MAX to match all owners.
+ *              start_handle        - Where in namespace to start/end search
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses
+ *              acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object_path.
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+			  u8 display_type,
+			  u32 max_depth,
+			  acpi_owner_id owner_id, acpi_handle start_handle)
+{
+	acpi_status status;
+	u32 max_level = 0;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * Just lock the entire namespace for the duration of the dump.
+	 * We don't want any changes to the namespace during this time,
+	 * especially the temporary nodes since we are going to display
+	 * them also.
+	 */
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		acpi_os_printf("Could not acquire namespace mutex\n");
+		return;
+	}
+
+	/* Get the max depth of the namespace tree, for formatting later */
+
+	(void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+				     ACPI_NS_WALK_NO_UNLOCK |
+				     ACPI_NS_WALK_TEMP_NODES,
+				     acpi_ns_get_max_depth, NULL,
+				     (void *)&max_level, NULL);
+
+	/* Now dump the entire namespace */
+
+	(void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+				     ACPI_NS_WALK_NO_UNLOCK |
+				     ACPI_NS_WALK_TEMP_NODES,
+				     acpi_ns_dump_one_object_path, NULL,
+				     (void *)&max_level, NULL);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+#endif				/* ACPI_FUTURE_USAGE */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_dump_entry
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 409ae80..2837625 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -69,6 +69,7 @@
 acpi_ns_dump_one_device(acpi_handle obj_handle,
 			u32 level, void *context, void **return_value)
 {
+	struct acpi_buffer buffer;
 	struct acpi_device_info *info;
 	acpi_status status;
 	u32 i;
@@ -78,15 +79,17 @@
 	status =
 	    acpi_ns_dump_one_object(obj_handle, level, context, return_value);
 
-	status = acpi_get_object_info(obj_handle, &info);
+	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+	status = acpi_get_object_info(obj_handle, &buffer);
 	if (ACPI_SUCCESS(status)) {
+		info = buffer.pointer;
 		for (i = 0; i < level; i++) {
 			ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " "));
 		}
 
 		ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES,
 				      "    HID: %s, ADR: %8.8X%8.8X, Status: %X\n",
-				      info->hardware_id.string,
+				      info->hardware_id.value,
 				      ACPI_FORMAT_UINT64(info->address),
 				      info->current_status));
 		ACPI_FREE(info);
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 18108bc..963ceef 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -314,7 +314,7 @@
 			  "*** Completed evaluation of object %s ***\n",
 			  info->relative_pathname));
 
- cleanup:
+cleanup:
 	/*
 	 * Namespace was unlocked by the handling acpi_ns* function, so we
 	 * just free the pathname and return
@@ -486,7 +486,7 @@
 		parent_node->type = (u8)type;
 	}
 
-      exit:
+exit:
 	if (parent_obj) {
 		acpi_ut_remove_reference(parent_obj);
 	}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index dd2ceae..3a0423a 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -213,7 +213,7 @@
 
 	return_ACPI_STATUS(status);
 
-      error_exit:
+error_exit:
 	ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 0a7badc..89ec645 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -114,7 +114,7 @@
 		(void)acpi_tb_release_owner_id(table_index);
 	}
 
-      unlock:
+unlock:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 
 	if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 35dde81..1778573 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -140,7 +140,7 @@
 			  pass_number));
 	status = acpi_ps_parse_aml(walk_state);
 
-      cleanup:
+cleanup:
 	acpi_ps_delete_parse_tree(parse_root);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 098e766..d2855d9 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -271,7 +271,7 @@
 		return (AE_OK);	/* Successful repair */
 	}
 
-      type_error_exit:
+type_error_exit:
 
 	/* Create a string with all expected types for this predefined object */
 
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 6d55cef..3d5391f 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -330,7 +330,7 @@
 
 	return (status);
 
-      package_too_small:
+package_too_small:
 
 	/* Error exit for the case with an incorrect package count */
 
@@ -555,7 +555,7 @@
 
 	return (AE_OK);
 
-      package_too_small:
+package_too_small:
 
 	/* The sub-package count was smaller than required */
 
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index f8e71ea..a05afff 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -263,7 +263,7 @@
 
 	return (AE_AML_OPERAND_TYPE);
 
-      object_repaired:
+object_repaired:
 
 	/* Object was successfully repaired */
 
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index c84603e..6a25d32 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -478,7 +478,7 @@
 			removing = TRUE;
 		}
 
-	      remove_element:
+remove_element:
 		if (removing) {
 			acpi_ns_remove_element(return_object, i + 1);
 			outer_element_count--;
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5d43efc..47420fa 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -381,7 +381,8 @@
 
 	/* Node is an object defined by an External() statement */
 
-	if (flags & ACPI_NS_EXTERNAL) {
+	if (flags & ACPI_NS_EXTERNAL ||
+	    (walk_state && walk_state->opcode == AML_SCOPE_OP)) {
 		new_node->flags |= ANOBJ_IS_EXTERNAL;
 	}
 #endif
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 08c0b5b..cc2fea9 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -722,7 +722,7 @@
 
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 
-      cleanup:
+cleanup:
 	ACPI_FREE(internal_path);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index b38b4b0..e973e311 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -42,7 +42,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
@@ -138,7 +139,7 @@
 
 		/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
 
-		ACPI_FREE(return_buffer->pointer);
+		ACPI_FREE_BUFFER(*return_buffer);
 		return_buffer->pointer = NULL;
 	}
 
@@ -441,7 +442,7 @@
 		acpi_ex_exit_interpreter();
 	}
 
-      cleanup:
+cleanup:
 
 	/* Free the input parameter list (if we created one) */
 
@@ -605,14 +606,22 @@
 		goto unlock_and_exit;
 	}
 
+	/* Now we can validate the starting node */
+
+	if (!acpi_ns_validate_handle(start_object)) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit2;
+	}
+
 	status = acpi_ns_walk_namespace(type, start_object, max_depth,
 					ACPI_NS_WALK_UNLOCK,
 					descending_callback, ascending_callback,
 					context, return_value);
 
+unlock_and_exit2:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock);
 	return_ACPI_STATUS(status);
 }
@@ -856,7 +865,7 @@
 
 	status = acpi_ns_attach_data(node, handler, data);
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return (status);
 }
@@ -902,7 +911,7 @@
 
 	status = acpi_ns_detach_data(node, handler);
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return (status);
 }
@@ -949,7 +958,7 @@
 
 	status = acpi_ns_get_attached_data(node, handler, data);
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return (status);
 }
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83c1644..3a4bd3f 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -42,7 +42,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
@@ -208,7 +209,7 @@
 	((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
 	status = AE_OK;
 
-      unlock_and_exit:
+unlock_and_exit:
 
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return (status);
@@ -496,7 +497,7 @@
 	*return_buffer = info;
 	status = AE_OK;
 
-      cleanup:
+cleanup:
 	if (hid) {
 		ACPI_FREE(hid);
 	}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index c0853ef..0e6d79e 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -42,7 +42,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
@@ -200,7 +201,7 @@
 		status = AE_NULL_ENTRY;
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return (status);
@@ -280,7 +281,7 @@
 		*ret_handle = ACPI_CAST_PTR(acpi_handle, node);
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return (status);
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 86198a9..79d9a28 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -297,7 +297,7 @@
 			}
 	}
 
-      cleanup:
+cleanup:
 
 	/* Now we can actually delete the subtree rooted at Op */
 
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 11b99ab..fcb7a84 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -142,7 +142,7 @@
 		acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
 	}
 
-      exit:
+exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 }
 
@@ -185,7 +185,7 @@
 	acpi_dbg_level = acpi_gbl_original_dbg_level;
 	acpi_dbg_layer = acpi_gbl_original_dbg_layer;
 
-      exit:
+exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 }
 
@@ -323,7 +323,7 @@
 
 	/* walk_state was deleted by parse_aml */
 
-      cleanup:
+cleanup:
 	acpi_ps_delete_parse_tree(op);
 
 	/* End optional tracing */
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 80d1299..c99cec9 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -440,7 +440,7 @@
 		info++;
 	}
 
-      exit:
+exit:
 	if (!flags_mode) {
 
 		/* Round the resource struct length up to the next boundary (32 or 64) */
@@ -783,7 +783,7 @@
 		info++;
 	}
 
-      exit:
+exit:
 	return_ACPI_STATUS(AE_OK);
 }
 
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 480b6b4..aef303d 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -784,7 +784,7 @@
 
 	acpi_ut_remove_reference(args[0]);
 
-      cleanup:
+cleanup:
 	ACPI_FREE(info);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 94e3517..01e4769 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acresrc.h"
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 42a13c0..634357d 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -80,16 +80,10 @@
 		}
 	}
 
-	/* FACS is the odd table, has no standard ACPI header and no checksum */
+	/* Always calculate checksum, ignore bad checksum if requested */
 
-	if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
-
-		/* Always calculate checksum, ignore bad checksum if requested */
-
-		status =
-		    acpi_tb_verify_checksum(table_desc->pointer,
-					    table_desc->length);
-	}
+	status =
+	    acpi_tb_verify_checksum(table_desc->pointer, table_desc->length);
 
 	return_ACPI_STATUS(status);
 }
@@ -237,10 +231,10 @@
 		goto release;
 	}
 
-      print_header:
+print_header:
 	acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
 
-      release:
+release:
 	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 	return_ACPI_STATUS(status);
 }
@@ -312,7 +306,7 @@
 
 	return (NULL);		/* There was no override */
 
-      finish_override:
+finish_override:
 
 	ACPI_INFO((AE_INFO,
 		   "%4.4s %p %s table override, new table: %p",
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index dc963f8..6866e76 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -135,10 +135,10 @@
 
 		/* FACS only has signature and length fields */
 
-		ACPI_INFO((AE_INFO, "%4.4s %p %05X",
+		ACPI_INFO((AE_INFO, "%4.4s %p %06X",
 			   header->signature, ACPI_CAST_PTR(void, address),
 			   header->length));
-	} else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+	} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
 
 		/* RSDP has no common fields */
 
@@ -147,7 +147,7 @@
 					  header)->oem_id, ACPI_OEM_ID_SIZE);
 		acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
 
-		ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
+		ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
 			   ACPI_CAST_PTR(void, address),
 			   (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
 			    revision >
@@ -162,7 +162,7 @@
 		acpi_tb_cleanup_table_header(&local_header, header);
 
 		ACPI_INFO((AE_INFO,
-			   "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
+			   "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
 			   local_header.signature, ACPI_CAST_PTR(void, address),
 			   local_header.length, local_header.revision,
 			   local_header.oem_id, local_header.oem_table_id,
@@ -190,6 +190,16 @@
 {
 	u8 checksum;
 
+	/*
+	 * FACS/S3PT:
+	 * They are the odd tables, have no standard ACPI header and no checksum
+	 */
+
+	if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
+	    ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+		return (AE_OK);
+	}
+
 	/* Compute the checksum on the table */
 
 	checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index bffdfc7..3d6bb83 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -350,7 +350,7 @@
 		acpi_tb_delete_table(table_desc);
 	}
 
-      unmap_and_exit:
+unmap_and_exit:
 
 	/* Always unmap the table header that we mapped above */
 
@@ -430,8 +430,7 @@
  *
  ******************************************************************************/
 
-acpi_status __init
-acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
+acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
 {
 	struct acpi_table_rsdp *rsdp;
 	u32 table_entry_size;
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index ad11162..db826ea 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "actables.h"
@@ -147,6 +148,8 @@
 	return_ACPI_STATUS(status);
 }
 
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_reallocate_root_table
@@ -161,7 +164,7 @@
  *              kernel.
  *
  ******************************************************************************/
-acpi_status acpi_reallocate_root_table(void)
+acpi_status __init acpi_reallocate_root_table(void)
 {
 	acpi_status status;
 
@@ -181,6 +184,8 @@
 	return_ACPI_STATUS(status);
 }
 
+ACPI_EXPORT_SYMBOL_INIT(acpi_reallocate_root_table)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_get_table_header
@@ -356,6 +361,7 @@
 
 	return (AE_NOT_FOUND);
 }
+
 ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
 
 acpi_status
@@ -367,6 +373,7 @@
 	return acpi_get_table_with_size(signature,
 		       instance, out_table, &tbl_size);
 }
+
 ACPI_EXPORT_SYMBOL(acpi_get_table)
 
 /*******************************************************************************
@@ -424,7 +431,6 @@
 
 ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
 
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_table_handler
@@ -465,7 +471,7 @@
 	acpi_gbl_table_handler = handler;
 	acpi_gbl_table_handler_context = context;
 
-      cleanup:
+cleanup:
 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 	return_ACPI_STATUS(status);
 }
@@ -506,7 +512,7 @@
 
 	acpi_gbl_table_handler = NULL;
 
-      cleanup:
+cleanup:
 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0ba9e32..60b5a87 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
@@ -65,7 +66,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_load_tables(void)
+acpi_status __init acpi_load_tables(void)
 {
 	acpi_status status;
 
@@ -82,7 +83,7 @@
 	return_ACPI_STATUS(status);
 }
 
-ACPI_EXPORT_SYMBOL(acpi_load_tables)
+ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables)
 
 /*******************************************************************************
  *
@@ -200,7 +201,7 @@
 
 	ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 	return_ACPI_STATUS(status);
 }
@@ -268,7 +269,7 @@
 					     acpi_gbl_table_handler_context);
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 948c95e..e4e1468 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -68,8 +68,7 @@
 	 * Note: Sometimes there exists more than one RSDP in memory; the valid
 	 * RSDP has a valid checksum, all others have an invalid checksum.
 	 */
-	if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
-			 sizeof(ACPI_SIG_RSDP) - 1) != 0) {
+	if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) {
 
 		/* Nope, BAD Signature */
 
@@ -112,7 +111,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_find_root_pointer(acpi_size *table_address)
+acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
 {
 	u8 *table_ptr;
 	u8 *mem_rover;
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index e0ffb58..814267f 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -48,6 +48,39 @@
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utalloc")
 
+#if !defined (USE_NATIVE_ALLOCATE_ZEROED)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_os_allocate_zeroed
+ *
+ * PARAMETERS:  size                - Size of the allocation
+ *
+ * RETURN:      Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
+ *              This is the default implementation. Can be overridden via the
+ *              USE_NATIVE_ALLOCATE_ZEROED flag.
+ *
+ ******************************************************************************/
+void *acpi_os_allocate_zeroed(acpi_size size)
+{
+	void *allocation;
+
+	ACPI_FUNCTION_ENTRY();
+
+	allocation = acpi_os_allocate(size);
+	if (allocation) {
+
+		/* Clear the memory block */
+
+		ACPI_MEMSET(allocation, 0, size);
+	}
+
+	return (allocation);
+}
+
+#endif				/* !USE_NATIVE_ALLOCATE_ZEROED */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_create_caches
@@ -59,6 +92,7 @@
  * DESCRIPTION: Create all local caches
  *
  ******************************************************************************/
+
 acpi_status acpi_ut_create_caches(void)
 {
 	acpi_status status;
@@ -175,10 +209,10 @@
 
 	/* Free memory lists */
 
-	ACPI_FREE(acpi_gbl_global_list);
+	acpi_os_free(acpi_gbl_global_list);
 	acpi_gbl_global_list = NULL;
 
-	ACPI_FREE(acpi_gbl_ns_node_list);
+	acpi_os_free(acpi_gbl_ns_node_list);
 	acpi_gbl_ns_node_list = NULL;
 #endif
 
@@ -302,82 +336,3 @@
 	ACPI_MEMSET(buffer->pointer, 0, required_length);
 	return (AE_OK);
 }
-
-#ifdef NOT_USED_BY_LINUX
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_allocate
- *
- * PARAMETERS:  size                - Size of the allocation
- *              component           - Component type of caller
- *              module              - Source file name of caller
- *              line                - Line number of caller
- *
- * RETURN:      Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of malloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate(acpi_size size,
-		       u32 component, const char *module, u32 line)
-{
-	void *allocation;
-
-	ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
-
-	/* Check for an inadvertent size of zero bytes */
-
-	if (!size) {
-		ACPI_WARNING((module, line,
-			      "Attempt to allocate zero bytes, allocating 1 byte"));
-		size = 1;
-	}
-
-	allocation = acpi_os_allocate(size);
-	if (!allocation) {
-
-		/* Report allocation error */
-
-		ACPI_WARNING((module, line,
-			      "Could not allocate size %u", (u32) size));
-
-		return_PTR(NULL);
-	}
-
-	return_PTR(allocation);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_allocate_zeroed
- *
- * PARAMETERS:  size                - Size of the allocation
- *              component           - Component type of caller
- *              module              - Source file name of caller
- *              line                - Line number of caller
- *
- * RETURN:      Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
-			      u32 component, const char *module, u32 line)
-{
-	void *allocation;
-
-	ACPI_FUNCTION_ENTRY();
-
-	allocation = acpi_ut_allocate(size, component, module, line);
-	if (allocation) {
-
-		/* Clear the memory block */
-
-		ACPI_MEMSET(allocation, 0, size);
-	}
-
-	return (allocation);
-}
-#endif
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index a877a96..366bfec 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -65,7 +65,7 @@
 acpi_status
 acpi_os_create_cache(char *cache_name,
 		     u16 object_size,
-		     u16 max_depth, struct acpi_memory_list ** return_cache)
+		     u16 max_depth, struct acpi_memory_list **return_cache)
 {
 	struct acpi_memory_list *cache;
 
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 1731c27..edff4e6 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -552,7 +552,7 @@
 	*ret_internal_object = internal_object;
 	return_ACPI_STATUS(AE_OK);
 
-      error_exit:
+error_exit:
 	acpi_ut_remove_reference(internal_object);
 	return_ACPI_STATUS(AE_NO_MEMORY);
 }
@@ -899,7 +899,7 @@
 
 	return (status);
 
-      error_exit:
+error_exit:
 	acpi_ut_remove_reference(target_object);
 	return (status);
 }
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5796e11..1a67b39 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 
@@ -190,7 +191,7 @@
 	 * Display the module name, current line number, thread ID (if requested),
 	 * current procedure nesting level, and the current procedure name
 	 */
-	acpi_os_printf("%8s-%04ld ", module_name, line_number);
+	acpi_os_printf("%9s-%04ld ", module_name, line_number);
 
 	if (ACPI_LV_THREADS & acpi_dbg_level) {
 		acpi_os_printf("[%u] ", (u32)thread_id);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 11e2e02..b3f31dd 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -41,7 +41,6 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index d6b33f2..c07d222 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -649,7 +649,7 @@
 
 	return (AE_OK);
 
-      error_exit:
+error_exit:
 
 	ACPI_EXCEPTION((AE_INFO, status,
 			"Could not update object reference count"));
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 4fd6897..16fb905 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -181,7 +181,7 @@
 
 	*return_desc = info->return_object;
 
-      cleanup:
+cleanup:
 	ACPI_FREE(info);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index ff6d9e8..3cf7b59 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -41,8 +41,9 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
+#define EXPORT_ACPI_INTERFACES
+
 #define ACPI_DEFINE_EXCEPTION_TABLE
-#include <linux/export.h>
 #include <acpi/acpi.h>
 #include "accommon.h"
 
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index d6f26bf..81f9a95 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -41,9 +41,9 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
+#define EXPORT_ACPI_INTERFACES
 #define DEFINE_ACPI_GLOBALS
 
-#include <linux/export.h>
 #include <acpi/acpi.h>
 #include "accommon.h"
 
@@ -289,9 +289,19 @@
 
 	acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
 
+	/* Event counters */
+
+	acpi_method_count = 0;
+	acpi_sci_count = 0;
+	acpi_gpe_count = 0;
+
+	for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+		acpi_fixed_event_count[i] = 0;
+	}
+
 #if (!ACPI_REDUCED_HARDWARE)
 
-	/* GPE support */
+	/* GPE/SCI support */
 
 	acpi_gbl_all_gpes_initialized = FALSE;
 	acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -300,6 +310,7 @@
 	acpi_current_gpe_count = 0;
 
 	acpi_gbl_global_event_handler = NULL;
+	acpi_gbl_sci_handler_list = NULL;
 
 #endif				/* !ACPI_REDUCED_HARDWARE */
 
@@ -377,6 +388,11 @@
 /* Public globals */
 
 ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
+
 ACPI_EXPORT_SYMBOL(acpi_dbg_level)
+
 ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
+
+ACPI_EXPORT_SYMBOL(acpi_gpe_count)
+
 ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index fa69071..bfca7b4 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -184,7 +184,7 @@
 	sub->length = length;
 	*return_id = sub;
 
-      cleanup:
+cleanup:
 
 	/* On exit, we must delete the return object */
 
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index aa61f66..517af70 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -180,7 +180,7 @@
 	package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
 						 1) * sizeof(void *));
 	if (!package_elements) {
-		acpi_ut_remove_reference(package_desc);
+		ACPI_FREE(package_desc);
 		return_PTR(NULL);
 	}
 
@@ -356,7 +356,7 @@
 	default:
 
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "%p is not not an ACPI operand obj [%s]\n",
+				  "%p is not an ACPI operand obj [%s]\n",
 				  object, acpi_ut_get_descriptor_name(object)));
 		break;
 	}
@@ -396,7 +396,6 @@
 
 	/* Mark the descriptor type */
 
-	memset(object, 0, sizeof(union acpi_operand_object));
 	ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
 
 	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n",
@@ -461,25 +460,28 @@
 
 	ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
 
-	/*
-	 * Handle a null object (Could be a uninitialized package
-	 * element -- which is legal)
-	 */
-	if (!internal_object) {
-		*obj_length = sizeof(union acpi_object);
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Start with the length of the Acpi object */
+	/* Start with the length of the (external) Acpi object */
 
 	length = sizeof(union acpi_object);
 
+	/* A NULL object is allowed, can be a legal uninitialized package element */
+
+	if (!internal_object) {
+	/*
+		 * Object is NULL, just return the length of union acpi_object
+		 * (A NULL union acpi_object is an object of all zeroes.)
+	 */
+		*obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* A Namespace Node should never appear here */
+
 	if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
 
-		/* Object is a named object (reference), just return the length */
+		/* A namespace node should never get here */
 
-		*obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
-		return_ACPI_STATUS(status);
+		return_ACPI_STATUS(AE_AML_INTERNAL);
 	}
 
 	/*
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 835340b..eb3aca7 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -148,7 +148,7 @@
 	ACPI_ERROR((AE_INFO,
 		    "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
 
-      exit:
+exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cb7fa49..2c2accb 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -643,7 +643,7 @@
 
 	return (AE_OK);
 
-      invalid_resource:
+invalid_resource:
 
 	if (walk_state) {
 		ACPI_ERROR((AE_INFO,
@@ -652,7 +652,7 @@
 	}
 	return (AE_AML_INVALID_RESOURCE_TYPE);
 
-      bad_resource_length:
+bad_resource_length:
 
 	if (walk_state) {
 		ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index a6b729d..03c4c2f 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -161,7 +161,6 @@
 	if (state) {
 
 		/* Initialize */
-		memset(state, 0, sizeof(union acpi_generic_state));
 		state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
 	}
 
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index cb1e9cc..45c0eb2 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -310,7 +310,7 @@
 
 	/* All done, normal exit */
 
-      all_done:
+all_done:
 
 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
 			  ACPI_FORMAT_UINT64(return_value)));
@@ -318,7 +318,7 @@
 	*ret_integer = return_value;
 	return_ACPI_STATUS(AE_OK);
 
-      error_exit:
+error_exit:
 	/* Base was set/validated above */
 
 	if (base == 10) {
@@ -584,3 +584,65 @@
 	}
 }
 #endif
+
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS:  Adds a "DestSize" parameter to each of the standard string
+ *              functions. This is the size of the Destination buffer.
+ *
+ * RETURN:      TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ *              the result of the operation will not overflow the output string
+ *              buffer.
+ *
+ * NOTE:        These functions are typically only helpful for processing
+ *              user input and command lines. For most ACPICA code, the
+ *              required buffer length is precisely calculated before buffer
+ *              allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+	if (ACPI_STRLEN(source) >= dest_size) {
+		return (TRUE);
+	}
+
+	ACPI_STRCPY(dest, source);
+	return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+	if ((ACPI_STRLEN(dest) + ACPI_STRLEN(source)) >= dest_size) {
+		return (TRUE);
+	}
+
+	ACPI_STRCAT(dest, source);
+	return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+		     acpi_size dest_size,
+		     char *source, acpi_size max_transfer_length)
+{
+	acpi_size actual_transfer_length;
+
+	actual_transfer_length =
+	    ACPI_MIN(max_transfer_length, ACPI_STRLEN(source));
+
+	if ((ACPI_STRLEN(dest) + actual_transfer_length) >= dest_size) {
+		return (TRUE);
+	}
+
+	ACPI_STRNCAT(dest, source, max_transfer_length);
+	return (FALSE);
+}
+#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 160f13f..c002777 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -130,10 +130,23 @@
 	struct acpi_debug_mem_block *allocation;
 	acpi_status status;
 
+	/* Check for an inadvertent size of zero bytes */
+
+	if (!size) {
+		ACPI_WARNING((module, line,
+			      "Attempt to allocate zero bytes, allocating 1 byte"));
+		size = 1;
+	}
+
 	allocation =
-	    acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
-			     component, module, line);
+	    acpi_os_allocate(size + sizeof(struct acpi_debug_mem_header));
 	if (!allocation) {
+
+		/* Report allocation error */
+
+		ACPI_WARNING((module, line,
+			      "Could not allocate size %u", (u32)size));
+
 		return (NULL);
 	}
 
@@ -179,9 +192,17 @@
 	struct acpi_debug_mem_block *allocation;
 	acpi_status status;
 
+	/* Check for an inadvertent size of zero bytes */
+
+	if (!size) {
+		ACPI_WARNING((module, line,
+			      "Attempt to allocate zero bytes, allocating 1 byte"));
+		size = 1;
+	}
+
 	allocation =
-	    acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header),
-				    component, module, line);
+	    acpi_os_allocate_zeroed(size +
+				    sizeof(struct acpi_debug_mem_header));
 	if (!allocation) {
 
 		/* Report allocation error */
@@ -409,7 +430,7 @@
 		element->next = allocation;
 	}
 
-      unlock_and_exit:
+unlock_and_exit:
 	status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 03a211e..be322c8 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acdebug.h"
@@ -60,7 +61,7 @@
  * DESCRIPTION: Shutdown the ACPICA subsystem and release all resources.
  *
  ******************************************************************************/
-acpi_status acpi_terminate(void)
+acpi_status __init acpi_terminate(void)
 {
 	acpi_status status;
 
@@ -104,7 +105,7 @@
 	return_ACPI_STATUS(status);
 }
 
-ACPI_EXPORT_SYMBOL(acpi_terminate)
+ACPI_EXPORT_SYMBOL_INIT(acpi_terminate)
 
 #ifndef ACPI_ASL_COMPILER
 #ifdef ACPI_FUTURE_USAGE
@@ -207,6 +208,44 @@
 
 ACPI_EXPORT_SYMBOL(acpi_get_system_info)
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_statistics
+ *
+ * PARAMETERS:  stats           - Where the statistics are returned
+ *
+ * RETURN:      status          - the status of the call
+ *
+ * DESCRIPTION: Get the contents of the various system counters
+ *
+ ******************************************************************************/
+acpi_status acpi_get_statistics(struct acpi_statistics *stats)
+{
+	ACPI_FUNCTION_TRACE(acpi_get_statistics);
+
+	/* Parameter validation */
+
+	if (!stats) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Various interrupt-based event counters */
+
+	stats->sci_count = acpi_sci_count;
+	stats->gpe_count = acpi_gpe_count;
+
+	ACPI_MEMCPY(stats->fixed_event_count, acpi_fixed_event_count,
+		    sizeof(acpi_fixed_event_count));
+
+	/* Other counters */
+
+	stats->method_count = acpi_method_count;
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_statistics)
+
 /*****************************************************************************
  *
  * FUNCTION:    acpi_install_initialization_handler
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index e966a2e..f7edb88 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 41ebaaf..75efea0 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -41,7 +41,8 @@
  * POSSIBILITY OF SUCH DAMAGES.
  */
 
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acevents.h"
@@ -64,7 +65,7 @@
  *              called, so any early initialization belongs here.
  *
  ******************************************************************************/
-acpi_status acpi_initialize_subsystem(void)
+acpi_status __init acpi_initialize_subsystem(void)
 {
 	acpi_status status;
 
@@ -124,7 +125,8 @@
 	ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
 	return_ACPI_STATUS(status);
 }
-ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem)
 
 /*******************************************************************************
  *
@@ -138,7 +140,7 @@
  *              Puts system into ACPI mode if it isn't already.
  *
  ******************************************************************************/
-acpi_status acpi_enable_subsystem(u32 flags)
+acpi_status __init acpi_enable_subsystem(u32 flags)
 {
 	acpi_status status = AE_OK;
 
@@ -228,7 +230,8 @@
 
 	return_ACPI_STATUS(status);
 }
-ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_enable_subsystem)
 
 /*******************************************************************************
  *
@@ -242,7 +245,7 @@
  *              objects and executing AML code for Regions, buffers, etc.
  *
  ******************************************************************************/
-acpi_status acpi_initialize_objects(u32 flags)
+acpi_status __init acpi_initialize_objects(u32 flags)
 {
 	acpi_status status = AE_OK;
 
@@ -314,4 +317,5 @@
 	acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
 	return_ACPI_STATUS(status);
 }
-ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_objects)
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 46f80e2..6d2c49b 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -758,9 +758,9 @@
 		.cap.pointer	= capbuf,
 	};
 
-	capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-	capbuf[OSC_SUPPORT_TYPE] = 1;
-	capbuf[OSC_CONTROL_TYPE] = 0;
+	capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+	capbuf[OSC_SUPPORT_DWORD] = 1;
+	capbuf[OSC_CONTROL_DWORD] = 0;
 
 	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
 	    || ACPI_FAILURE(acpi_run_osc(handle, &context)))
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 2c9958c..fbf1ace 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,12 +36,6 @@
 #include <linux/suspend.h>
 #include <asm/unaligned.h>
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 #include <linux/power_supply.h>
@@ -72,19 +66,6 @@
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
-enum acpi_battery_files {
-	info_tag = 0,
-	state_tag,
-	alarm_tag,
-	ACPI_BATTERY_NUMFILES,
-};
-
-#endif
-
 static const struct acpi_device_id battery_device_ids[] = {
 	{"PNP0C0A", 0},
 	{"", 0},
@@ -320,14 +301,6 @@
 	POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-inline char *acpi_battery_units(struct acpi_battery *battery)
-{
-	return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
-		"mA" : "mW";
-}
-#endif
-
 /* --------------------------------------------------------------------------
                                Battery Management
    -------------------------------------------------------------------------- */
@@ -741,279 +714,6 @@
 }
 
 /* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static struct proc_dir_entry *acpi_battery_dir;
-
-static int acpi_battery_print_info(struct seq_file *seq, int result)
-{
-	struct acpi_battery *battery = seq->private;
-
-	if (result)
-		goto end;
-
-	seq_printf(seq, "present:                 %s\n",
-		   acpi_battery_present(battery) ? "yes" : "no");
-	if (!acpi_battery_present(battery))
-		goto end;
-	if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
-		seq_printf(seq, "design capacity:         unknown\n");
-	else
-		seq_printf(seq, "design capacity:         %d %sh\n",
-			   battery->design_capacity,
-			   acpi_battery_units(battery));
-
-	if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
-		seq_printf(seq, "last full capacity:      unknown\n");
-	else
-		seq_printf(seq, "last full capacity:      %d %sh\n",
-			   battery->full_charge_capacity,
-			   acpi_battery_units(battery));
-
-	seq_printf(seq, "battery technology:      %srechargeable\n",
-		   (!battery->technology)?"non-":"");
-
-	if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
-		seq_printf(seq, "design voltage:          unknown\n");
-	else
-		seq_printf(seq, "design voltage:          %d mV\n",
-			   battery->design_voltage);
-	seq_printf(seq, "design capacity warning: %d %sh\n",
-		   battery->design_capacity_warning,
-		   acpi_battery_units(battery));
-	seq_printf(seq, "design capacity low:     %d %sh\n",
-		   battery->design_capacity_low,
-		   acpi_battery_units(battery));
-	seq_printf(seq, "cycle count:		  %i\n", battery->cycle_count);
-	seq_printf(seq, "capacity granularity 1:  %d %sh\n",
-		   battery->capacity_granularity_1,
-		   acpi_battery_units(battery));
-	seq_printf(seq, "capacity granularity 2:  %d %sh\n",
-		   battery->capacity_granularity_2,
-		   acpi_battery_units(battery));
-	seq_printf(seq, "model number:            %s\n", battery->model_number);
-	seq_printf(seq, "serial number:           %s\n", battery->serial_number);
-	seq_printf(seq, "battery type:            %s\n", battery->type);
-	seq_printf(seq, "OEM info:                %s\n", battery->oem_info);
-      end:
-	if (result)
-		seq_printf(seq, "ERROR: Unable to read battery info\n");
-	return result;
-}
-
-static int acpi_battery_print_state(struct seq_file *seq, int result)
-{
-	struct acpi_battery *battery = seq->private;
-
-	if (result)
-		goto end;
-
-	seq_printf(seq, "present:                 %s\n",
-		   acpi_battery_present(battery) ? "yes" : "no");
-	if (!acpi_battery_present(battery))
-		goto end;
-
-	seq_printf(seq, "capacity state:          %s\n",
-			(battery->state & 0x04) ? "critical" : "ok");
-	if ((battery->state & 0x01) && (battery->state & 0x02))
-		seq_printf(seq,
-			   "charging state:          charging/discharging\n");
-	else if (battery->state & 0x01)
-		seq_printf(seq, "charging state:          discharging\n");
-	else if (battery->state & 0x02)
-		seq_printf(seq, "charging state:          charging\n");
-	else
-		seq_printf(seq, "charging state:          charged\n");
-
-	if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
-		seq_printf(seq, "present rate:            unknown\n");
-	else
-		seq_printf(seq, "present rate:            %d %s\n",
-			   battery->rate_now, acpi_battery_units(battery));
-
-	if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
-		seq_printf(seq, "remaining capacity:      unknown\n");
-	else
-		seq_printf(seq, "remaining capacity:      %d %sh\n",
-			   battery->capacity_now, acpi_battery_units(battery));
-	if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
-		seq_printf(seq, "present voltage:         unknown\n");
-	else
-		seq_printf(seq, "present voltage:         %d mV\n",
-			   battery->voltage_now);
-      end:
-	if (result)
-		seq_printf(seq, "ERROR: Unable to read battery state\n");
-
-	return result;
-}
-
-static int acpi_battery_print_alarm(struct seq_file *seq, int result)
-{
-	struct acpi_battery *battery = seq->private;
-
-	if (result)
-		goto end;
-
-	if (!acpi_battery_present(battery)) {
-		seq_printf(seq, "present:                 no\n");
-		goto end;
-	}
-	seq_printf(seq, "alarm:                   ");
-	if (!battery->alarm)
-		seq_printf(seq, "unsupported\n");
-	else
-		seq_printf(seq, "%u %sh\n", battery->alarm,
-				acpi_battery_units(battery));
-      end:
-	if (result)
-		seq_printf(seq, "ERROR: Unable to read battery alarm\n");
-	return result;
-}
-
-static ssize_t acpi_battery_write_alarm(struct file *file,
-					const char __user * buffer,
-					size_t count, loff_t * ppos)
-{
-	int result = 0;
-	char alarm_string[12] = { '\0' };
-	struct seq_file *m = file->private_data;
-	struct acpi_battery *battery = m->private;
-
-	if (!battery || (count > sizeof(alarm_string) - 1))
-		return -EINVAL;
-	if (!acpi_battery_present(battery)) {
-		result = -ENODEV;
-		goto end;
-	}
-	if (copy_from_user(alarm_string, buffer, count)) {
-		result = -EFAULT;
-		goto end;
-	}
-	alarm_string[count] = '\0';
-	battery->alarm = simple_strtol(alarm_string, NULL, 0);
-	result = acpi_battery_set_alarm(battery);
-      end:
-	if (!result)
-		return count;
-	return result;
-}
-
-typedef int(*print_func)(struct seq_file *seq, int result);
-
-static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
-	acpi_battery_print_info,
-	acpi_battery_print_state,
-	acpi_battery_print_alarm,
-};
-
-static int acpi_battery_read(int fid, struct seq_file *seq)
-{
-	struct acpi_battery *battery = seq->private;
-	int result = acpi_battery_update(battery);
-	return acpi_print_funcs[fid](seq, result);
-}
-
-#define DECLARE_FILE_FUNCTIONS(_name) \
-static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
-{ \
-	return acpi_battery_read(_name##_tag, seq); \
-} \
-static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
-{ \
-	return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
-}
-
-DECLARE_FILE_FUNCTIONS(info);
-DECLARE_FILE_FUNCTIONS(state);
-DECLARE_FILE_FUNCTIONS(alarm);
-
-#undef DECLARE_FILE_FUNCTIONS
-
-#define FILE_DESCRIPTION_RO(_name) \
-	{ \
-	.name = __stringify(_name), \
-	.mode = S_IRUGO, \
-	.ops = { \
-		.open = acpi_battery_##_name##_open_fs, \
-		.read = seq_read, \
-		.llseek = seq_lseek, \
-		.release = single_release, \
-		.owner = THIS_MODULE, \
-		}, \
-	}
-
-#define FILE_DESCRIPTION_RW(_name) \
-	{ \
-	.name = __stringify(_name), \
-	.mode = S_IFREG | S_IRUGO | S_IWUSR, \
-	.ops = { \
-		.open = acpi_battery_##_name##_open_fs, \
-		.read = seq_read, \
-		.llseek = seq_lseek, \
-		.write = acpi_battery_write_##_name, \
-		.release = single_release, \
-		.owner = THIS_MODULE, \
-		}, \
-	}
-
-static const struct battery_file {
-	struct file_operations ops;
-	umode_t mode;
-	const char *name;
-} acpi_battery_file[] = {
-	FILE_DESCRIPTION_RO(info),
-	FILE_DESCRIPTION_RO(state),
-	FILE_DESCRIPTION_RW(alarm),
-};
-
-#undef FILE_DESCRIPTION_RO
-#undef FILE_DESCRIPTION_RW
-
-static int acpi_battery_add_fs(struct acpi_device *device)
-{
-	struct proc_dir_entry *entry = NULL;
-	int i;
-
-	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
-			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-	if (!acpi_device_dir(device)) {
-		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-						     acpi_battery_dir);
-		if (!acpi_device_dir(device))
-			return -ENODEV;
-	}
-
-	for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
-		entry = proc_create_data(acpi_battery_file[i].name,
-					 acpi_battery_file[i].mode,
-					 acpi_device_dir(device),
-					 &acpi_battery_file[i].ops,
-					 acpi_driver_data(device));
-		if (!entry)
-			return -ENODEV;
-	}
-	return 0;
-}
-
-static void acpi_battery_remove_fs(struct acpi_device *device)
-{
-	int i;
-	if (!acpi_device_dir(device))
-		return;
-	for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
-		remove_proc_entry(acpi_battery_file[i].name,
-				  acpi_device_dir(device));
-
-	remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
-	acpi_device_dir(device) = NULL;
-}
-
-#endif
-
-/* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
 
@@ -1075,15 +775,6 @@
 	result = acpi_battery_update(battery);
 	if (result)
 		goto fail;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	result = acpi_battery_add_fs(device);
-#endif
-	if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-		acpi_battery_remove_fs(device);
-#endif
-		goto fail;
-	}
 
 	printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
 		ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -1110,9 +801,6 @@
 		return -EINVAL;
 	battery = acpi_driver_data(device);
 	unregister_pm_notifier(&battery->pm_nb);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	acpi_battery_remove_fs(device);
-#endif
 	sysfs_remove_battery(battery);
 	mutex_destroy(&battery->lock);
 	mutex_destroy(&battery->sysfs_lock);
@@ -1158,18 +846,7 @@
 {
 	if (acpi_disabled)
 		return;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	acpi_battery_dir = acpi_lock_battery_dir();
-	if (!acpi_battery_dir)
-		return;
-#endif
-	if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-		acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
-		return;
-	}
-	return;
+	acpi_bus_register_driver(&acpi_battery_driver);
 }
 
 static int __init acpi_battery_init(void)
@@ -1181,9 +858,6 @@
 static void __exit acpi_battery_exit(void)
 {
 	acpi_bus_unregister_driver(&acpi_battery_driver);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
 }
 
 module_init(acpi_battery_init);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 9515f18..fb84837 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -274,6 +274,19 @@
 		},
 	},
 	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "Toshiba NB100",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
+		},
+	},
+
+	/*
+	 * The following machines have broken backlight support when reporting
+	 * the Windows 2012 OSI, so disable it until their support is fixed.
+	 */
+	{
 	.callback = dmi_disable_osi_win8,
 	.ident = "ASUS Zenbook Prime UX31A",
 	.matches = {
@@ -291,12 +304,60 @@
 	},
 	{
 	.callback = dmi_disable_osi_win8,
-	.ident = "Lenovo ThinkPad Edge E530",
+	.ident = "ThinkPad Edge E530",
 	.matches = {
 		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 		     DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
 		},
 	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "ThinkPad Edge E530",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		     DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "ThinkPad Edge E530",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		     DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "Acer Aspire V5-573G",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+		     DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "Acer Aspire V5-572G",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+		     DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "ThinkPad T431s",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		     DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "ThinkPad T430",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		     DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+		},
+	},
 
 	/*
 	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e1bd9a1..bba9b72 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -256,7 +256,7 @@
 			acpi_print_osc_error(handle, context,
 				"_OSC invalid revision");
 		if (errors & OSC_CAPABILITIES_MASK_ERROR) {
-			if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
+			if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD]
 			    & OSC_QUERY_ENABLE)
 				goto out_success;
 			status = AE_SUPPORT;
@@ -296,30 +296,30 @@
 	};
 	acpi_handle handle;
 
-	capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-	capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
+	capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+	capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
 #if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
 			defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
-	capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
+	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
 #endif
 
 #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
-	capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
+	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
 #endif
 
 #ifdef ACPI_HOTPLUG_OST
-	capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
 #endif
 
 	if (!ghes_disable)
-		capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
+		capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
 	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
 		return;
 	if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
 		u32 *capbuf_ret = context.ret.pointer;
-		if (context.ret.length > OSC_SUPPORT_TYPE)
+		if (context.ret.length > OSC_SUPPORT_DWORD)
 			osc_sb_apei_support_acked =
-				capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
+				capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
 		kfree(context.ret.pointer);
 	}
 	/* do we need to check other returned cap? Sounds no */
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a557738..c971929 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -383,18 +383,15 @@
 
 	switch (button->type) {
 	case ACPI_BUTTON_TYPE_POWER:
-		input->evbit[0] = BIT_MASK(EV_KEY);
-		set_bit(KEY_POWER, input->keybit);
+		input_set_capability(input, EV_KEY, KEY_POWER);
 		break;
 
 	case ACPI_BUTTON_TYPE_SLEEP:
-		input->evbit[0] = BIT_MASK(EV_KEY);
-		set_bit(KEY_SLEEP, input->keybit);
+		input_set_capability(input, EV_KEY, KEY_SLEEP);
 		break;
 
 	case ACPI_BUTTON_TYPE_LID:
-		input->evbit[0] = BIT_MASK(EV_SW);
-		set_bit(SW_LID, input->swbit);
+		input_set_capability(input, EV_SW, SW_LID);
 		break;
 	}
 
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
deleted file mode 100644
index 6c9ee68..0000000
--- a/drivers/acpi/cm_sbs.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or (at
- *  your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define PREFIX "ACPI: "
-
-ACPI_MODULE_NAME("cm_sbs");
-#define ACPI_AC_CLASS		"ac_adapter"
-#define ACPI_BATTERY_CLASS	"battery"
-#define _COMPONENT		ACPI_SBS_COMPONENT
-static struct proc_dir_entry *acpi_ac_dir;
-static struct proc_dir_entry *acpi_battery_dir;
-
-static DEFINE_MUTEX(cm_sbs_mutex);
-
-static int lock_ac_dir_cnt;
-static int lock_battery_dir_cnt;
-
-struct proc_dir_entry *acpi_lock_ac_dir(void)
-{
-	mutex_lock(&cm_sbs_mutex);
-	if (!acpi_ac_dir)
-		acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
-	if (acpi_ac_dir) {
-		lock_ac_dir_cnt++;
-	} else {
-		printk(KERN_ERR PREFIX
-				  "Cannot create %s\n", ACPI_AC_CLASS);
-	}
-	mutex_unlock(&cm_sbs_mutex);
-	return acpi_ac_dir;
-}
-EXPORT_SYMBOL(acpi_lock_ac_dir);
-
-void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
-{
-	mutex_lock(&cm_sbs_mutex);
-	if (acpi_ac_dir_param)
-		lock_ac_dir_cnt--;
-	if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
-		remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
-		acpi_ac_dir = NULL;
-	}
-	mutex_unlock(&cm_sbs_mutex);
-}
-EXPORT_SYMBOL(acpi_unlock_ac_dir);
-
-struct proc_dir_entry *acpi_lock_battery_dir(void)
-{
-	mutex_lock(&cm_sbs_mutex);
-	if (!acpi_battery_dir) {
-		acpi_battery_dir =
-		    proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
-	}
-	if (acpi_battery_dir) {
-		lock_battery_dir_cnt++;
-	} else {
-		printk(KERN_ERR PREFIX
-				  "Cannot create %s\n", ACPI_BATTERY_CLASS);
-	}
-	mutex_unlock(&cm_sbs_mutex);
-	return acpi_battery_dir;
-}
-EXPORT_SYMBOL(acpi_lock_battery_dir);
-
-void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
-{
-	mutex_lock(&cm_sbs_mutex);
-	if (acpi_battery_dir_param)
-		lock_battery_dir_cnt--;
-	if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
-	    && acpi_battery_dir) {
-		remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
-		acpi_battery_dir = NULL;
-	}
-	mutex_unlock(&cm_sbs_mutex);
-	return;
-}
-EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index a94383d..d42b2fb 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -118,9 +118,10 @@
 	/*
 	 * If we were unsure about the device parent's power state up to this
 	 * point, the fact that the device is in D0 implies that the parent has
-	 * to be in D0 too.
+	 * to be in D0 too, except if ignore_parent is set.
 	 */
-	if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+	if (!device->power.flags.ignore_parent && device->parent
+	    && device->parent->power.state == ACPI_STATE_UNKNOWN
 	    && result == ACPI_STATE_D0)
 		device->parent->power.state = ACPI_STATE_D0;
 
@@ -177,7 +178,8 @@
 			 acpi_power_state_string(state));
 		return -ENODEV;
 	}
-	if (device->parent && (state < device->parent->power.state)) {
+	if (!device->power.flags.ignore_parent &&
+	    device->parent && (state < device->parent->power.state)) {
 		dev_warn(&device->dev,
 			 "Cannot transition to power state %s for parent in %s\n",
 			 acpi_power_state_string(state),
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 05ea4be..dcd73cc 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -441,7 +441,7 @@
 	acpi_status status;
 	struct acpi_object_list arg_list;
 	union acpi_object arg;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	unsigned long long value;
 
 	acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
 
@@ -450,12 +450,10 @@
 	arg_list.pointer = &arg;
 	arg.type = ACPI_TYPE_INTEGER;
 	arg.integer.value = dock;
-	status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
+	status = acpi_evaluate_integer(ds->handle, "_DCK", &arg_list, &value);
 	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
 		acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
 				status);
-
-	kfree(buffer.pointer);
 }
 
 static inline void dock(struct dock_station *ds)
@@ -671,39 +669,20 @@
 	}
 }
 
-struct dock_data {
-	struct dock_station *ds;
-	u32 event;
-};
-
-static void acpi_dock_deferred_cb(void *context)
+static void acpi_dock_deferred_cb(void *data, u32 event)
 {
-	struct dock_data *data = context;
-
 	acpi_scan_lock_acquire();
-	dock_notify(data->ds, data->event);
+	dock_notify(data, event);
 	acpi_scan_lock_release();
-	kfree(data);
 }
 
 static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
 {
-	struct dock_data *dd;
-
 	if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
 	   && event != ACPI_NOTIFY_EJECT_REQUEST)
 		return;
 
-	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
-	if (dd) {
-		acpi_status status;
-
-		dd->ds = data;
-		dd->event = event;
-		status = acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
-		if (ACPI_FAILURE(status))
-			kfree(dd);
-	}
+	acpi_hotplug_execute(acpi_dock_deferred_cb, data, event);
 }
 
 /**
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a06d983..d5309fd 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -28,6 +28,7 @@
 
 /* Uncomment next line to get verbose printout */
 /* #define DEBUG */
+#define pr_fmt(fmt) "ACPI : EC: " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -49,9 +50,6 @@
 #define ACPI_EC_DEVICE_NAME		"Embedded Controller"
 #define ACPI_EC_FILE_INFO		"info"
 
-#undef PREFIX
-#define PREFIX				"ACPI: EC: "
-
 /* EC status register */
 #define ACPI_EC_FLAG_OBF	0x01	/* Output buffer full */
 #define ACPI_EC_FLAG_IBF	0x02	/* Input buffer full */
@@ -131,26 +129,26 @@
 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
 {
 	u8 x = inb(ec->command_addr);
-	pr_debug(PREFIX "---> status = 0x%2.2x\n", x);
+	pr_debug("---> status = 0x%2.2x\n", x);
 	return x;
 }
 
 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 {
 	u8 x = inb(ec->data_addr);
-	pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
+	pr_debug("---> data = 0x%2.2x\n", x);
 	return x;
 }
 
 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 {
-	pr_debug(PREFIX "<--- command = 0x%2.2x\n", command);
+	pr_debug("<--- command = 0x%2.2x\n", command);
 	outb(command, ec->command_addr);
 }
 
 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 {
-	pr_debug(PREFIX "<--- data = 0x%2.2x\n", data);
+	pr_debug("<--- data = 0x%2.2x\n", data);
 	outb(data, ec->data_addr);
 }
 
@@ -241,7 +239,7 @@
 			}
 			advance_transaction(ec, acpi_ec_read_status(ec));
 		} while (time_before(jiffies, delay));
-		pr_debug(PREFIX "controller reset, restart transaction\n");
+		pr_debug("controller reset, restart transaction\n");
 		spin_lock_irqsave(&ec->lock, flags);
 		start_transaction(ec);
 		spin_unlock_irqrestore(&ec->lock, flags);
@@ -309,12 +307,12 @@
 		}
 	}
 	if (ec_wait_ibf0(ec)) {
-		pr_err(PREFIX "input buffer is not empty, "
+		pr_err("input buffer is not empty, "
 				"aborting transaction\n");
 		status = -ETIME;
 		goto end;
 	}
-	pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+	pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
 			t->command, t->wdata ? t->wdata[0] : 0);
 	/* disable GPE during transaction if storm is detected */
 	if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
@@ -331,12 +329,12 @@
 		/* It is safe to enable the GPE outside of the transaction. */
 		acpi_enable_gpe(NULL, ec->gpe);
 	} else if (t->irq_count > ec_storm_threshold) {
-		pr_info(PREFIX "GPE storm detected(%d GPEs), "
+		pr_info("GPE storm detected(%d GPEs), "
 			"transactions will use polling mode\n",
 			t->irq_count);
 		set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
 	}
-	pr_debug(PREFIX "transaction end\n");
+	pr_debug("transaction end\n");
 end:
 	if (ec->global_lock)
 		acpi_release_global_lock(glk);
@@ -570,12 +568,12 @@
 	struct acpi_ec_query_handler *handler = cxt;
 	if (!handler)
 		return;
-	pr_debug(PREFIX "start query execution\n");
+	pr_debug("start query execution\n");
 	if (handler->func)
 		handler->func(handler->data);
 	else if (handler->handle)
 		acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
-	pr_debug(PREFIX "stop query execution\n");
+	pr_debug("stop query execution\n");
 	kfree(handler);
 }
 
@@ -593,7 +591,8 @@
 			if (!copy)
 				return -ENOMEM;
 			memcpy(copy, handler, sizeof(*copy));
-			pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
+			pr_debug("push query execution (0x%2x) on queue\n",
+				value);
 			return acpi_os_execute((copy->func) ?
 				OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
 				acpi_ec_run, copy);
@@ -616,7 +615,7 @@
 {
 	if (state & ACPI_EC_FLAG_SCI) {
 		if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
-			pr_debug(PREFIX "push gpe query to the queue\n");
+			pr_debug("push gpe query to the queue\n");
 			return acpi_os_execute(OSL_NOTIFY_HANDLER,
 				acpi_ec_gpe_query, ec);
 		}
@@ -630,7 +629,7 @@
 	struct acpi_ec *ec = data;
 	u8 status = acpi_ec_read_status(ec);
 
-	pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
+	pr_debug("~~~> interrupt, status:0x%02x\n", status);
 
 	advance_transaction(ec, status);
 	if (ec_transaction_done(ec) &&
@@ -776,7 +775,7 @@
 			 * The AE_NOT_FOUND error will be ignored and OS
 			 * continue to initialize EC.
 			 */
-			printk(KERN_ERR "Fail in evaluating the _REG object"
+			pr_err("Fail in evaluating the _REG object"
 				" of EC device. Broken bios is suspected.\n");
 		} else {
 			acpi_remove_gpe_handler(NULL, ec->gpe,
@@ -795,10 +794,10 @@
 	acpi_disable_gpe(NULL, ec->gpe);
 	if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
 				ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
-		pr_err(PREFIX "failed to remove space handler\n");
+		pr_err("failed to remove space handler\n");
 	if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
 				&acpi_ec_gpe_handler)))
-		pr_err(PREFIX "failed to remove gpe handler\n");
+		pr_err("failed to remove gpe handler\n");
 	clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
 }
 
@@ -840,7 +839,7 @@
 	ret = !!request_region(ec->command_addr, 1, "EC cmd");
 	WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
 
-	pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
+	pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
 			  ec->gpe, ec->command_addr, ec->data_addr);
 
 	ret = ec_install_handlers(ec);
@@ -931,7 +930,7 @@
 /* MSI EC needs special treatment, enable it */
 static int ec_flag_msi(const struct dmi_system_id *id)
 {
-	printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
+	pr_debug("Detected MSI hardware, enabling workarounds.\n");
 	EC_FLAGS_MSI = 1;
 	EC_FLAGS_VALIDATE_ECDT = 1;
 	return 0;
@@ -1010,7 +1009,7 @@
 	status = acpi_get_table(ACPI_SIG_ECDT, 1,
 				(struct acpi_table_header **)&ecdt_ptr);
 	if (ACPI_SUCCESS(status)) {
-		pr_info(PREFIX "EC description table is found, configuring boot EC\n");
+		pr_info("EC description table is found, configuring boot EC\n");
 		boot_ec->command_addr = ecdt_ptr->control.address;
 		boot_ec->data_addr = ecdt_ptr->data.address;
 		boot_ec->gpe = ecdt_ptr->gpe;
@@ -1030,7 +1029,7 @@
 
 	/* This workaround is needed only on some broken machines,
 	 * which require early EC, but fail to provide ECDT */
-	printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
+	pr_debug("Look up EC in DSDT\n");
 	status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
 					boot_ec, NULL);
 	/* Check that acpi_get_devices actually find something */
@@ -1042,7 +1041,7 @@
 		    saved_ec->data_addr != boot_ec->data_addr ||
 		    saved_ec->gpe != boot_ec->gpe ||
 		    saved_ec->handle != boot_ec->handle)
-			pr_info(PREFIX "ASUSTek keeps feeding us with broken "
+			pr_info("ASUSTek keeps feeding us with broken "
 			"ECDT tables, which are very hard to workaround. "
 			"Trying to use DSDT EC info instead. Please send "
 			"output of acpidump to linux-acpi@vger.kernel.org\n");
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 8247fcd..fdef416 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -127,11 +127,6 @@
 	}
 
 	event = nla_data(attr);
-	if (!event) {
-		nlmsg_free(skb);
-		return -EINVAL;
-	}
-
 	memset(event, 0, sizeof(struct acpi_genl_event));
 
 	strcpy(event->device_class, device_class);
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 41ade65..ba3da88 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -168,7 +168,7 @@
 	       acpi_device_name(device), acpi_device_bid(device),
 	       !device->power.state ? "on" : "off");
 
-      end:
+end:
 	return result;
 }
 
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 20f4233..a29739c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -26,11 +26,6 @@
 acpi_status acpi_os_initialize1(void);
 int init_acpi_device_notify(void);
 int acpi_scan_init(void);
-#ifdef	CONFIG_ACPI_PCI_SLOT
-void acpi_pci_slot_init(void);
-#else
-static inline void acpi_pci_slot_init(void) { }
-#endif
 void acpi_pci_root_init(void);
 void acpi_pci_link_init(void);
 void acpi_pci_root_hp_init(void);
@@ -92,6 +87,7 @@
 void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
 int acpi_bind_one(struct device *dev, acpi_handle handle);
 int acpi_unbind_one(struct device *dev);
+void acpi_bus_device_eject(void *data, u32 ost_src);
 
 /* --------------------------------------------------------------------------
                                   Power Resource
@@ -169,9 +165,7 @@
 					Video
   -------------------------------------------------------------------------- */
 #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-bool acpi_video_backlight_quirks(void);
-#else
-static inline bool acpi_video_backlight_quirks(void) { return false; }
+bool acpi_osi_is_win8(void);
 #endif
 
 #endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 2e82e5d..a2343a1 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -73,7 +73,7 @@
 {
 	int node = pxm_to_node_map[pxm];
 
-	if (node < 0) {
+	if (node == NUMA_NO_NODE) {
 		if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
 			return NUMA_NO_NODE;
 		node = first_unset_node(nodes_found_map);
@@ -334,7 +334,7 @@
 
 int acpi_get_node(acpi_handle *handle)
 {
-	int pxm, node = -1;
+	int pxm, node = NUMA_NO_NODE;
 
 	pxm = acpi_get_pxm(handle);
 	if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e5f416c..54a20ff 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -61,7 +61,6 @@
 	acpi_osd_exec_callback function;
 	void *context;
 	struct work_struct work;
-	int wait;
 };
 
 #ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -569,8 +568,10 @@
 
 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
 
-/* Must not increase 10 or needs code modification below */
-#define ACPI_OVERRIDE_TABLES 10
+#define ACPI_OVERRIDE_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+
+#define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
 
 void __init acpi_initrd_override(void *data, size_t size)
 {
@@ -579,8 +580,6 @@
 	struct acpi_table_header *table;
 	char cpio_path[32] = "kernel/firmware/acpi/";
 	struct cpio_data file;
-	struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
-	char *p;
 
 	if (data == NULL || size == 0)
 		return;
@@ -625,8 +624,8 @@
 			table->signature, cpio_path, file.name, table->length);
 
 		all_tables_size += table->length;
-		early_initrd_files[table_nr].data = file.data;
-		early_initrd_files[table_nr].size = file.size;
+		acpi_initrd_files[table_nr].data = file.data;
+		acpi_initrd_files[table_nr].size = file.size;
 		table_nr++;
 	}
 	if (table_nr == 0)
@@ -652,14 +651,34 @@
 	memblock_reserve(acpi_tables_addr, all_tables_size);
 	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
 
-	p = early_ioremap(acpi_tables_addr, all_tables_size);
-
+	/*
+	 * early_ioremap only can remap 256k one time. If we map all
+	 * tables one time, we will hit the limit. Need to map chunks
+	 * one by one during copying the same as that in relocate_initrd().
+	 */
 	for (no = 0; no < table_nr; no++) {
-		memcpy(p + total_offset, early_initrd_files[no].data,
-		       early_initrd_files[no].size);
-		total_offset += early_initrd_files[no].size;
+		unsigned char *src_p = acpi_initrd_files[no].data;
+		phys_addr_t size = acpi_initrd_files[no].size;
+		phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+		phys_addr_t slop, clen;
+		char *dest_p;
+
+		total_offset += size;
+
+		while (size) {
+			slop = dest_addr & ~PAGE_MASK;
+			clen = size;
+			if (clen > MAP_CHUNK_SIZE - slop)
+				clen = MAP_CHUNK_SIZE - slop;
+			dest_p = early_ioremap(dest_addr & PAGE_MASK,
+						 clen + slop);
+			memcpy(dest_p + slop, src_p, clen);
+			early_iounmap(dest_p, clen + slop);
+			src_p += clen;
+			dest_addr += clen;
+			size -= clen;
+		}
 	}
-	early_iounmap(p, all_tables_size);
 }
 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
 
@@ -820,7 +839,7 @@
 
 void acpi_os_sleep(u64 ms)
 {
-	schedule_timeout_interruptible(msecs_to_jiffies(ms));
+	msleep(ms);
 }
 
 void acpi_os_stall(u32 us)
@@ -1067,9 +1086,6 @@
 {
 	struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
 
-	if (dpc->wait)
-		acpi_os_wait_events_complete();
-
 	dpc->function(dpc->context);
 	kfree(dpc);
 }
@@ -1089,8 +1105,8 @@
  *
  ******************************************************************************/
 
-static acpi_status __acpi_os_execute(acpi_execute_type type,
-	acpi_osd_exec_callback function, void *context, int hp)
+acpi_status acpi_os_execute(acpi_execute_type type,
+			    acpi_osd_exec_callback function, void *context)
 {
 	acpi_status status = AE_OK;
 	struct acpi_os_dpc *dpc;
@@ -1117,20 +1133,11 @@
 	dpc->context = context;
 
 	/*
-	 * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
-	 * because the hotplug code may call driver .remove() functions,
-	 * which invoke flush_scheduled_work/acpi_os_wait_events_complete
-	 * to flush these workqueues.
-	 *
 	 * To prevent lockdep from complaining unnecessarily, make sure that
 	 * there is a different static lockdep key for each workqueue by using
 	 * INIT_WORK() for each of them separately.
 	 */
-	if (hp) {
-		queue = kacpi_hotplug_wq;
-		dpc->wait = 1;
-		INIT_WORK(&dpc->work, acpi_os_execute_deferred);
-	} else if (type == OSL_NOTIFY_HANDLER) {
+	if (type == OSL_NOTIFY_HANDLER) {
 		queue = kacpi_notify_wq;
 		INIT_WORK(&dpc->work, acpi_os_execute_deferred);
 	} else {
@@ -1155,28 +1162,59 @@
 	}
 	return status;
 }
-
-acpi_status acpi_os_execute(acpi_execute_type type,
-			    acpi_osd_exec_callback function, void *context)
-{
-	return __acpi_os_execute(type, function, context, 0);
-}
 EXPORT_SYMBOL(acpi_os_execute);
 
-acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
-	void *context)
-{
-	return __acpi_os_execute(0, function, context, 1);
-}
-EXPORT_SYMBOL(acpi_os_hotplug_execute);
-
 void acpi_os_wait_events_complete(void)
 {
 	flush_workqueue(kacpid_wq);
 	flush_workqueue(kacpi_notify_wq);
 }
 
-EXPORT_SYMBOL(acpi_os_wait_events_complete);
+struct acpi_hp_work {
+	struct work_struct work;
+	acpi_hp_callback func;
+	void *data;
+	u32 src;
+};
+
+static void acpi_hotplug_work_fn(struct work_struct *work)
+{
+	struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
+
+	acpi_os_wait_events_complete();
+	hpw->func(hpw->data, hpw->src);
+	kfree(hpw);
+}
+
+acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
+{
+	struct acpi_hp_work *hpw;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+		  "Scheduling function [%p(%p, %u)] for deferred execution.\n",
+		  func, data, src));
+
+	hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
+	if (!hpw)
+		return AE_NO_MEMORY;
+
+	INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
+	hpw->func = func;
+	hpw->data = data;
+	hpw->src = src;
+	/*
+	 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
+	 * the hotplug code may call driver .remove() functions, which may
+	 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
+	 * these workqueues.
+	 */
+	if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
+		kfree(hpw);
+		return AE_ERROR;
+	}
+	return AE_OK;
+}
+
 
 acpi_status
 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
@@ -1335,7 +1373,7 @@
 	if (!str || !*str)
 		return 0;
 
-	for (; count-- && str && *str; str++) {
+	for (; count-- && *str; str++) {
 		if (isalnum(*str) || *str == ' ' || *str == ':')
 			*p++ = *str;
 		else if (*str == '\'' || *str == '"')
@@ -1825,25 +1863,3 @@
 {
 	__acpi_os_prepare_extended_sleep = func;
 }
-
-
-void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
-			void (*func)(struct work_struct *work))
-{
-	struct acpi_hp_work *hp_work;
-	int ret;
-
-	hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
-	if (!hp_work)
-		return;
-
-	hp_work->handle = handle;
-	hp_work->type = type;
-	hp_work->context = context;
-
-	INIT_WORK(&hp_work->work, func);
-	ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
-	if (!ret)
-		kfree(hp_work);
-}
-EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d3874f4..56f0586 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -39,6 +39,8 @@
 #include <acpi/acpi_drivers.h>
 #include <acpi/apei.h>
 
+#include "internal.h"
+
 #define PREFIX "ACPI: "
 
 #define _COMPONENT		ACPI_PCI_COMPONENT
@@ -49,10 +51,10 @@
 			     const struct acpi_device_id *not_used);
 static void acpi_pci_root_remove(struct acpi_device *device);
 
-#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
-				| OSC_ACTIVE_STATE_PWR_SUPPORT \
-				| OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
-				| OSC_MSI_SUPPORT)
+#define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
+				| OSC_PCI_ASPM_SUPPORT \
+				| OSC_PCI_CLOCK_PM_SUPPORT \
+				| OSC_PCI_MSI_SUPPORT)
 
 static const struct acpi_device_id root_device_ids[] = {
 	{"PNP0A03", 0},
@@ -127,6 +129,55 @@
 	return AE_OK;
 }
 
+struct pci_osc_bit_struct {
+	u32 bit;
+	char *desc;
+};
+
+static struct pci_osc_bit_struct pci_osc_support_bit[] = {
+	{ OSC_PCI_EXT_CONFIG_SUPPORT, "ExtendedConfig" },
+	{ OSC_PCI_ASPM_SUPPORT, "ASPM" },
+	{ OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
+	{ OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
+	{ OSC_PCI_MSI_SUPPORT, "MSI" },
+};
+
+static struct pci_osc_bit_struct pci_osc_control_bit[] = {
+	{ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL, "PCIeHotplug" },
+	{ OSC_PCI_SHPC_NATIVE_HP_CONTROL, "SHPCHotplug" },
+	{ OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
+	{ OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
+	{ OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
+};
+
+static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
+			    struct pci_osc_bit_struct *table, int size)
+{
+	char buf[80];
+	int i, len = 0;
+	struct pci_osc_bit_struct *entry;
+
+	buf[0] = '\0';
+	for (i = 0, entry = table; i < size; i++, entry++)
+		if (word & entry->bit)
+			len += snprintf(buf + len, sizeof(buf) - len, "%s%s",
+					len ? " " : "", entry->desc);
+
+	dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf);
+}
+
+static void decode_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
+{
+	decode_osc_bits(root, msg, word, pci_osc_support_bit,
+			ARRAY_SIZE(pci_osc_support_bit));
+}
+
+static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
+{
+	decode_osc_bits(root, msg, word, pci_osc_control_bit,
+			ARRAY_SIZE(pci_osc_control_bit));
+}
+
 static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
 
 static acpi_status acpi_pci_run_osc(acpi_handle handle,
@@ -158,14 +209,14 @@
 	support &= OSC_PCI_SUPPORT_MASKS;
 	support |= root->osc_support_set;
 
-	capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-	capbuf[OSC_SUPPORT_TYPE] = support;
+	capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+	capbuf[OSC_SUPPORT_DWORD] = support;
 	if (control) {
 		*control &= OSC_PCI_CONTROL_MASKS;
-		capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+		capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
 	} else {
 		/* Run _OSC query only with existing controls. */
-		capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
+		capbuf[OSC_CONTROL_DWORD] = root->osc_control_set;
 	}
 
 	status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
@@ -180,11 +231,7 @@
 static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
 {
 	acpi_status status;
-	acpi_handle tmp;
 
-	status = acpi_get_handle(root->device->handle, "_OSC", &tmp);
-	if (ACPI_FAILURE(status))
-		return status;
 	mutex_lock(&osc_lock);
 	status = acpi_pci_query_osc(root, flags, NULL);
 	mutex_unlock(&osc_lock);
@@ -316,9 +363,8 @@
 acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
 {
 	struct acpi_pci_root *root;
-	acpi_status status;
+	acpi_status status = AE_OK;
 	u32 ctrl, capbuf[3];
-	acpi_handle tmp;
 
 	if (!mask)
 		return AE_BAD_PARAMETER;
@@ -331,10 +377,6 @@
 	if (!root)
 		return AE_NOT_EXIST;
 
-	status = acpi_get_handle(handle, "_OSC", &tmp);
-	if (ACPI_FAILURE(status))
-		return status;
-
 	mutex_lock(&osc_lock);
 
 	*mask = ctrl | root->osc_control_set;
@@ -349,17 +391,21 @@
 			goto out;
 		if (ctrl == *mask)
 			break;
+		decode_osc_control(root, "platform does not support",
+				   ctrl & ~(*mask));
 		ctrl = *mask;
 	}
 
 	if ((ctrl & req) != req) {
+		decode_osc_control(root, "not requesting control; platform does not support",
+				   req & ~(ctrl));
 		status = AE_SUPPORT;
 		goto out;
 	}
 
-	capbuf[OSC_QUERY_TYPE] = 0;
-	capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
-	capbuf[OSC_CONTROL_TYPE] = ctrl;
+	capbuf[OSC_QUERY_DWORD] = 0;
+	capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
+	capbuf[OSC_CONTROL_DWORD] = ctrl;
 	status = acpi_pci_run_osc(handle, capbuf, mask);
 	if (ACPI_SUCCESS(status))
 		root->osc_control_set = *mask;
@@ -369,6 +415,87 @@
 }
 EXPORT_SYMBOL(acpi_pci_osc_control_set);
 
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
+				 int *clear_aspm)
+{
+	u32 support, control, requested;
+	acpi_status status;
+	struct acpi_device *device = root->device;
+	acpi_handle handle = device->handle;
+
+	/*
+	 * All supported architectures that use ACPI have support for
+	 * PCI domains, so we indicate this in _OSC support capabilities.
+	 */
+	support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+	if (pci_ext_cfg_avail())
+		support |= OSC_PCI_EXT_CONFIG_SUPPORT;
+	if (pcie_aspm_support_enabled())
+		support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
+	if (pci_msi_enabled())
+		support |= OSC_PCI_MSI_SUPPORT;
+
+	decode_osc_support(root, "OS supports", support);
+	status = acpi_pci_osc_support(root, support);
+	if (ACPI_FAILURE(status)) {
+		dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+			 acpi_format_exception(status));
+		*no_aspm = 1;
+		return;
+	}
+
+	if (pcie_ports_disabled) {
+		dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
+		return;
+	}
+
+	if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
+		decode_osc_support(root, "not requesting OS control; OS requires",
+				   ACPI_PCIE_REQ_SUPPORT);
+		return;
+	}
+
+	control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
+		| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+		| OSC_PCI_EXPRESS_PME_CONTROL;
+
+	if (pci_aer_available()) {
+		if (aer_acpi_firmware_first())
+			dev_info(&device->dev,
+				 "PCIe AER handled by firmware\n");
+		else
+			control |= OSC_PCI_EXPRESS_AER_CONTROL;
+	}
+
+	requested = control;
+	status = acpi_pci_osc_control_set(handle, &control,
+					  OSC_PCI_EXPRESS_CAPABILITY_CONTROL);
+	if (ACPI_SUCCESS(status)) {
+		decode_osc_control(root, "OS now controls", control);
+		if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+			/*
+			 * We have ASPM control, but the FADT indicates
+			 * that it's unsupported. Clear it.
+			 */
+			*clear_aspm = 1;
+		}
+	} else {
+		decode_osc_control(root, "OS requested", requested);
+		decode_osc_control(root, "platform willing to grant", control);
+		dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+			acpi_format_exception(status));
+
+		/*
+		 * We want to disable ASPM here, but aspm_disabled
+		 * needs to remain in its state from boot so that we
+		 * properly handle PCIe 1.1 devices.  So we set this
+		 * flag here, to defer the action until after the ACPI
+		 * root scan.
+		 */
+		*no_aspm = 1;
+	}
+}
+
 static int acpi_pci_root_add(struct acpi_device *device,
 			     const struct acpi_device_id *not_used)
 {
@@ -376,9 +503,8 @@
 	acpi_status status;
 	int result;
 	struct acpi_pci_root *root;
-	u32 flags, base_flags;
 	acpi_handle handle = device->handle;
-	bool no_aspm = false, clear_aspm = false;
+	int no_aspm = 0, clear_aspm = 0;
 
 	root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
 	if (!root)
@@ -431,81 +557,7 @@
 
 	root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
 
-	/*
-	 * All supported architectures that use ACPI have support for
-	 * PCI domains, so we indicate this in _OSC support capabilities.
-	 */
-	flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
-	acpi_pci_osc_support(root, flags);
-
-	if (pci_ext_cfg_avail())
-		flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
-	if (pcie_aspm_support_enabled()) {
-		flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
-		OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
-	}
-	if (pci_msi_enabled())
-		flags |= OSC_MSI_SUPPORT;
-	if (flags != base_flags) {
-		status = acpi_pci_osc_support(root, flags);
-		if (ACPI_FAILURE(status)) {
-			dev_info(&device->dev, "ACPI _OSC support "
-				"notification failed, disabling PCIe ASPM\n");
-			no_aspm = true;
-			flags = base_flags;
-		}
-	}
-
-	if (!pcie_ports_disabled
-	    && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
-		flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
-			| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
-			| OSC_PCI_EXPRESS_PME_CONTROL;
-
-		if (pci_aer_available()) {
-			if (aer_acpi_firmware_first())
-				dev_dbg(&device->dev,
-					"PCIe errors handled by BIOS.\n");
-			else
-				flags |= OSC_PCI_EXPRESS_AER_CONTROL;
-		}
-
-		dev_info(&device->dev,
-			"Requesting ACPI _OSC control (0x%02x)\n", flags);
-
-		status = acpi_pci_osc_control_set(handle, &flags,
-				       OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-		if (ACPI_SUCCESS(status)) {
-			dev_info(&device->dev,
-				"ACPI _OSC control (0x%02x) granted\n", flags);
-			if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
-				/*
-				 * We have ASPM control, but the FADT indicates
-				 * that it's unsupported. Clear it.
-				 */
-				clear_aspm = true;
-			}
-		} else {
-			dev_info(&device->dev,
-				"ACPI _OSC request failed (%s), "
-				"returned control mask: 0x%02x\n",
-				acpi_format_exception(status), flags);
-			dev_info(&device->dev,
-				 "ACPI _OSC control for PCIe not granted, disabling ASPM\n");
-			/*
-			 * We want to disable ASPM here, but aspm_disabled
-			 * needs to remain in its state from boot so that we
-			 * properly handle PCIe 1.1 devices.  So we set this
-			 * flag here, to defer the action until after the ACPI
-			 * root scan.
-			 */
-			no_aspm = true;
-		}
-	} else {
-		dev_info(&device->dev,
-			 "Unable to request _OSC control "
-			 "(_OSC support mask: 0x%02x)\n", flags);
-	}
+	negotiate_os_control(root, &no_aspm, &clear_aspm);
 
 	/*
 	 * TBD: Need PCI interface for enumeration/configuration of roots.
@@ -590,39 +642,10 @@
 		acpi_handle_err(handle, "cannot add bridge to acpi list\n");
 }
 
-static void handle_root_bridge_removal(struct acpi_device *device)
+static void hotplug_event_root(void *data, u32 type)
 {
-	acpi_status status;
-	struct acpi_eject_event *ej_event;
-
-	ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
-	if (!ej_event) {
-		/* Inform firmware the hot-remove operation has error */
-		(void) acpi_evaluate_hotplug_ost(device->handle,
-					ACPI_NOTIFY_EJECT_REQUEST,
-					ACPI_OST_SC_NON_SPECIFIC_FAILURE,
-					NULL);
-		return;
-	}
-
-	ej_event->device = device;
-	ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
-
-	status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
-	if (ACPI_FAILURE(status))
-		kfree(ej_event);
-}
-
-static void _handle_hotplug_event_root(struct work_struct *work)
-{
+	acpi_handle handle = data;
 	struct acpi_pci_root *root;
-	struct acpi_hp_work *hp_work;
-	acpi_handle handle;
-	u32 type;
-
-	hp_work = container_of(work, struct acpi_hp_work, work);
-	handle = hp_work->handle;
-	type = hp_work->type;
 
 	acpi_scan_lock_acquire();
 
@@ -652,9 +675,15 @@
 		/* request device eject */
 		acpi_handle_printk(KERN_DEBUG, handle,
 				   "Device eject notify on %s\n", __func__);
-		if (root)
-			handle_root_bridge_removal(root->device);
-		break;
+		if (!root)
+			break;
+
+		get_device(&root->device->dev);
+
+		acpi_scan_lock_release();
+
+		acpi_bus_device_eject(root->device, ACPI_NOTIFY_EJECT_REQUEST);
+		return;
 	default:
 		acpi_handle_warn(handle,
 				 "notify_handler: unknown event type 0x%x\n",
@@ -663,14 +692,12 @@
 	}
 
 	acpi_scan_lock_release();
-	kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
 }
 
 static void handle_hotplug_event_root(acpi_handle handle, u32 type,
 					void *context)
 {
-	alloc_acpi_hp_work(handle, type, context,
-				_handle_hotplug_event_root);
+	acpi_hotplug_execute(hotplug_event_root, handle, type);
 }
 
 static acpi_status __init
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 04a1378..6a5b152 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -8,289 +8,17 @@
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
-#ifdef CONFIG_X86
-#include <linux/mc146818rtc.h>
-#endif
-
 #include "sleep.h"
 
 #define _COMPONENT		ACPI_SYSTEM_COMPONENT
 
 /*
  * this file provides support for:
- * /proc/acpi/alarm
  * /proc/acpi/wakeup
  */
 
 ACPI_MODULE_NAME("sleep")
 
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
-/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
-#else
-#define	HAVE_ACPI_LEGACY_ALARM
-#endif
-
-#ifdef	HAVE_ACPI_LEGACY_ALARM
-
-static u32 cmos_bcd_read(int offset, int rtc_control);
-
-static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
-{
-	u32 sec, min, hr;
-	u32 day, mo, yr, cent = 0;
-	u32 today = 0;
-	unsigned char rtc_control = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&rtc_lock, flags);
-
-	rtc_control = CMOS_READ(RTC_CONTROL);
-	sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
-	min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
-	hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
-
-	/* If we ever get an FACP with proper values... */
-	if (acpi_gbl_FADT.day_alarm) {
-		/* ACPI spec: only low 6 its should be cared */
-		day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
-		if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-			day = bcd2bin(day);
-	} else
-		day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-	if (acpi_gbl_FADT.month_alarm)
-		mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
-	else {
-		mo = cmos_bcd_read(RTC_MONTH, rtc_control);
-		today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-	}
-	if (acpi_gbl_FADT.century)
-		cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
-
-	yr = cmos_bcd_read(RTC_YEAR, rtc_control);
-
-	spin_unlock_irqrestore(&rtc_lock, flags);
-
-	/* we're trusting the FADT (see above) */
-	if (!acpi_gbl_FADT.century)
-		/* If we're not trusting the FADT, we should at least make it
-		 * right for _this_ century... ehm, what is _this_ century?
-		 *
-		 * TBD:
-		 *  ASAP: find piece of code in the kernel, e.g. star tracker driver,
-		 *        which we can trust to determine the century correctly. Atom
-		 *        watch driver would be nice, too...
-		 *
-		 *  if that has not happened, change for first release in 2050:
-		 *        if (yr<50)
-		 *                yr += 2100;
-		 *        else
-		 *                yr += 2000;   // current line of code
-		 *
-		 *  if that has not happened either, please do on 2099/12/31:23:59:59
-		 *        s/2000/2100
-		 *
-		 */
-		yr += 2000;
-	else
-		yr += cent * 100;
-
-	/*
-	 * Show correct dates for alarms up to a month into the future.
-	 * This solves issues for nearly all situations with the common
-	 * 30-day alarm clocks in PC hardware.
-	 */
-	if (day < today) {
-		if (mo < 12) {
-			mo += 1;
-		} else {
-			mo = 1;
-			yr += 1;
-		}
-	}
-
-	seq_printf(seq, "%4.4u-", yr);
-	(mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
-	(day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
-	(hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
-	(min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
-	(sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
-
-	return 0;
-}
-
-static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_system_alarm_seq_show, PDE_DATA(inode));
-}
-
-static int get_date_field(char **p, u32 * value)
-{
-	char *next = NULL;
-	char *string_end = NULL;
-	int result = -EINVAL;
-
-	/*
-	 * Try to find delimeter, only to insert null.  The end of the
-	 * string won't have one, but is still valid.
-	 */
-	if (*p == NULL)
-		return result;
-
-	next = strpbrk(*p, "- :");
-	if (next)
-		*next++ = '\0';
-
-	*value = simple_strtoul(*p, &string_end, 10);
-
-	/* Signal success if we got a good digit */
-	if (string_end != *p)
-		result = 0;
-
-	if (next)
-		*p = next;
-	else
-		*p = NULL;
-
-	return result;
-}
-
-/* Read a possibly BCD register, always return binary */
-static u32 cmos_bcd_read(int offset, int rtc_control)
-{
-	u32 val = CMOS_READ(offset);
-	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		val = bcd2bin(val);
-	return val;
-}
-
-/* Write binary value into possibly BCD register */
-static void cmos_bcd_write(u32 val, int offset, int rtc_control)
-{
-	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		val = bin2bcd(val);
-	CMOS_WRITE(val, offset);
-}
-
-static ssize_t
-acpi_system_write_alarm(struct file *file,
-			const char __user * buffer, size_t count, loff_t * ppos)
-{
-	int result = 0;
-	char alarm_string[30] = { '\0' };
-	char *p = alarm_string;
-	u32 sec, min, hr, day, mo, yr;
-	int adjust = 0;
-	unsigned char rtc_control = 0;
-
-	if (count > sizeof(alarm_string) - 1)
-		return -EINVAL;
-
-	if (copy_from_user(alarm_string, buffer, count))
-		return -EFAULT;
-
-	alarm_string[count] = '\0';
-
-	/* check for time adjustment */
-	if (alarm_string[0] == '+') {
-		p++;
-		adjust = 1;
-	}
-
-	if ((result = get_date_field(&p, &yr)))
-		goto end;
-	if ((result = get_date_field(&p, &mo)))
-		goto end;
-	if ((result = get_date_field(&p, &day)))
-		goto end;
-	if ((result = get_date_field(&p, &hr)))
-		goto end;
-	if ((result = get_date_field(&p, &min)))
-		goto end;
-	if ((result = get_date_field(&p, &sec)))
-		goto end;
-
-	spin_lock_irq(&rtc_lock);
-
-	rtc_control = CMOS_READ(RTC_CONTROL);
-
-	if (adjust) {
-		yr += cmos_bcd_read(RTC_YEAR, rtc_control);
-		mo += cmos_bcd_read(RTC_MONTH, rtc_control);
-		day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-		hr += cmos_bcd_read(RTC_HOURS, rtc_control);
-		min += cmos_bcd_read(RTC_MINUTES, rtc_control);
-		sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
-	}
-
-	spin_unlock_irq(&rtc_lock);
-
-	if (sec > 59) {
-		min += sec/60;
-		sec = sec%60;
-	}
-	if (min > 59) {
-		hr += min/60;
-		min = min%60;
-	}
-	if (hr > 23) {
-		day += hr/24;
-		hr = hr%24;
-	}
-	if (day > 31) {
-		mo += day/32;
-		day = day%32;
-	}
-	if (mo > 12) {
-		yr += mo/13;
-		mo = mo%13;
-	}
-
-	spin_lock_irq(&rtc_lock);
-	/*
-	 * Disable alarm interrupt before setting alarm timer or else
-	 * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
-	 */
-	rtc_control &= ~RTC_AIE;
-	CMOS_WRITE(rtc_control, RTC_CONTROL);
-	CMOS_READ(RTC_INTR_FLAGS);
-
-	/* write the fields the rtc knows about */
-	cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
-	cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
-	cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
-
-	/*
-	 * If the system supports an enhanced alarm it will have non-zero
-	 * offsets into the CMOS RAM here -- which for some reason are pointing
-	 * to the RTC area of memory.
-	 */
-	if (acpi_gbl_FADT.day_alarm)
-		cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
-	if (acpi_gbl_FADT.month_alarm)
-		cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
-	if (acpi_gbl_FADT.century) {
-		if (adjust)
-			yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
-		cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
-	}
-	/* enable the rtc alarm interrupt */
-	rtc_control |= RTC_AIE;
-	CMOS_WRITE(rtc_control, RTC_CONTROL);
-	CMOS_READ(RTC_INTR_FLAGS);
-
-	spin_unlock_irq(&rtc_lock);
-
-	acpi_clear_event(ACPI_EVENT_RTC);
-	acpi_enable_event(ACPI_EVENT_RTC, 0);
-
-	*ppos += count;
-
-	result = 0;
-      end:
-	return result ? result : count;
-}
-#endif				/* HAVE_ACPI_LEGACY_ALARM */
-
 static int
 acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
 {
@@ -417,41 +145,8 @@
 	.release = single_release,
 };
 
-#ifdef	HAVE_ACPI_LEGACY_ALARM
-static const struct file_operations acpi_system_alarm_fops = {
-	.owner = THIS_MODULE,
-	.open = acpi_system_alarm_open_fs,
-	.read = seq_read,
-	.write = acpi_system_write_alarm,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static u32 rtc_handler(void *context)
-{
-	acpi_clear_event(ACPI_EVENT_RTC);
-	acpi_disable_event(ACPI_EVENT_RTC, 0);
-
-	return ACPI_INTERRUPT_HANDLED;
-}
-#endif				/* HAVE_ACPI_LEGACY_ALARM */
-
 int __init acpi_sleep_proc_init(void)
 {
-#ifdef	HAVE_ACPI_LEGACY_ALARM
-	/* 'alarm' [R/W] */
-	proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
-		    acpi_root_dir, &acpi_system_alarm_fops);
-
-	acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
-	/*
-	 * Disable the RTC event after installing RTC handler.
-	 * Only when RTC alarm is set will it be enabled.
-	 */
-	acpi_clear_event(ACPI_EVENT_RTC);
-	acpi_disable_event(ACPI_EVENT_RTC, 0);
-#endif				/* HAVE_ACPI_LEGACY_ALARM */
-
 	/* 'wakeup device' [R/W] */
 	proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
 		    acpi_root_dir, &acpi_system_wakeup_device_fops);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index cf34d90..b3171f3 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -162,16 +162,23 @@
 	return apic_id;
 }
 
-int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
 {
-#ifdef CONFIG_SMP
-	int i;
-#endif
-	int apic_id = -1;
+	int apic_id;
 
 	apic_id = map_mat_entry(handle, type, acpi_id);
 	if (apic_id == -1)
 		apic_id = map_madt_entry(type, acpi_id);
+
+	return apic_id;
+}
+
+int acpi_map_cpuid(int apic_id, u32 acpi_id)
+{
+#ifdef CONFIG_SMP
+	int i;
+#endif
+
 	if (apic_id == -1) {
 		/*
 		 * On UP processor, there is no _MAT or MADT table.
@@ -211,6 +218,15 @@
 #endif
 	return -1;
 }
+
+int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+{
+	int apic_id;
+
+	apic_id = acpi_get_apicid(handle, type, acpi_id);
+
+	return acpi_map_cpuid(apic_id, acpi_id);
+}
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 
 static bool __init processor_physically_present(acpi_handle handle)
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index e534ba6..146ab7e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -153,8 +153,7 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata acpi_cpu_notifier =
-{
+static struct notifier_block __refdata acpi_cpu_notifier = {
 	    .notifier_call = acpi_cpu_soft_notify,
 };
 
@@ -172,7 +171,6 @@
 
 #ifdef CONFIG_CPU_FREQ
 	acpi_processor_ppc_has_changed(pr, 0);
-	acpi_processor_load_module(pr);
 #endif
 	acpi_processor_get_throttling_info(pr);
 
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index c7414a5..644516d 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -265,9 +265,6 @@
 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 {
 
-	if (!pr)
-		return -EINVAL;
-
 	if (!pr->pblk)
 		return -ENODEV;
 
@@ -1050,12 +1047,8 @@
 	if (disabled_by_idle_boot_param())
 		return 0;
 
-	if (!pr)
-		return -EINVAL;
-
-	if (nocst) {
+	if (nocst)
 		return -ENODEV;
-	}
 
 	if (!pr->flags.power_setup_done)
 		return -ENODEV;
@@ -1082,9 +1075,6 @@
 	if (disabled_by_idle_boot_param())
 		return 0;
 
-	if (!pr)
-		return -EINVAL;
-
 	if (nocst)
 		return -ENODEV;
 
@@ -1157,9 +1147,6 @@
 		first_run++;
 	}
 
-	if (!pr)
-		return -EINVAL;
-
 	if (acpi_gbl_FADT.cst_control && !nocst) {
 		status =
 		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 51d7948..60a7c28 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -235,28 +235,6 @@
 	acpi_processor_ppc_status &= ~PPC_REGISTERED;
 }
 
-/*
- * Do a quick check if the systems looks like it should use ACPI
- * cpufreq. We look at a _PCT method being available, but don't
- * do a whole lot of sanity checks.
- */
-void acpi_processor_load_module(struct acpi_processor *pr)
-{
-	static int requested;
-	acpi_status status = 0;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
-	if (!arch_has_acpi_pdc() || requested)
-		return;
-	status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
-	if (!ACPI_FAILURE(status)) {
-		printk(KERN_INFO PREFIX "Requesting acpi_cpufreq\n");
-		request_module_nowait("acpi_cpufreq");
-		requested = 1;
-	}
-	kfree(buffer.pointer);
-}
-
 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
 {
 	int result = 0;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index aef7e1c..d465ae6 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -30,12 +30,6 @@
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
 #include <linux/acpi.h>
 #include <linux/timer.h>
 #include <linux/jiffies.h>
@@ -67,11 +61,6 @@
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
 #define MAX_SBS_BAT			4
 #define ACPI_SBS_BLOCK_MAX		32
 
@@ -84,9 +73,6 @@
 struct acpi_battery {
 	struct power_supply bat;
 	struct acpi_sbs *sbs;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	struct proc_dir_entry *proc_entry;
-#endif
 	unsigned long update_time;
 	char name[8];
 	char manufacturer_name[ACPI_SBS_BLOCK_MAX];
@@ -119,9 +105,6 @@
 	struct acpi_device *device;
 	struct acpi_smb_hc *hc;
 	struct mutex lock;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	struct proc_dir_entry *charger_entry;
-#endif
 	struct acpi_battery battery[MAX_SBS_BAT];
 	u8 batteries_supported:4;
 	u8 manager_present:1;
@@ -482,261 +465,6 @@
 };
 
 /* --------------------------------------------------------------------------
-                              FS Interface (/proc/acpi)
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* Generic Routines */
-static int
-acpi_sbs_add_fs(struct proc_dir_entry **dir,
-		struct proc_dir_entry *parent_dir,
-		char *dir_name,
-		const struct file_operations *info_fops,
-		const struct file_operations *state_fops,
-		const struct file_operations *alarm_fops, void *data)
-{
-	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
-			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-	if (!*dir) {
-		*dir = proc_mkdir(dir_name, parent_dir);
-		if (!*dir) {
-			return -ENODEV;
-		}
-	}
-
-	/* 'info' [R] */
-	if (info_fops)
-		proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
-				 info_fops, data);
-
-	/* 'state' [R] */
-	if (state_fops)
-		proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
-				 state_fops, data);
-
-	/* 'alarm' [R/W] */
-	if (alarm_fops)
-		proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
-				 alarm_fops, data);
-	return 0;
-}
-
-/* Smart Battery Interface */
-static struct proc_dir_entry *acpi_battery_dir = NULL;
-
-static inline char *acpi_battery_units(struct acpi_battery *battery)
-{
-	return acpi_battery_mode(battery) ? " mW" : " mA";
-}
-
-
-static int acpi_battery_read_info(struct seq_file *seq, void *offset)
-{
-	struct acpi_battery *battery = seq->private;
-	struct acpi_sbs *sbs = battery->sbs;
-	int result = 0;
-
-	mutex_lock(&sbs->lock);
-
-	seq_printf(seq, "present:                 %s\n",
-		   (battery->present) ? "yes" : "no");
-	if (!battery->present)
-		goto end;
-
-	seq_printf(seq, "design capacity:         %i%sh\n",
-		   battery->design_capacity * acpi_battery_scale(battery),
-		   acpi_battery_units(battery));
-	seq_printf(seq, "last full capacity:      %i%sh\n",
-		   battery->full_charge_capacity * acpi_battery_scale(battery),
-		   acpi_battery_units(battery));
-	seq_printf(seq, "battery technology:      rechargeable\n");
-	seq_printf(seq, "design voltage:          %i mV\n",
-		   battery->design_voltage * acpi_battery_vscale(battery));
-	seq_printf(seq, "design capacity warning: unknown\n");
-	seq_printf(seq, "design capacity low:     unknown\n");
-	seq_printf(seq, "cycle count:		  %i\n", battery->cycle_count);
-	seq_printf(seq, "capacity granularity 1:  unknown\n");
-	seq_printf(seq, "capacity granularity 2:  unknown\n");
-	seq_printf(seq, "model number:            %s\n", battery->device_name);
-	seq_printf(seq, "serial number:           %i\n",
-		   battery->serial_number);
-	seq_printf(seq, "battery type:            %s\n",
-		   battery->device_chemistry);
-	seq_printf(seq, "OEM info:                %s\n",
-		   battery->manufacturer_name);
-      end:
-	mutex_unlock(&sbs->lock);
-	return result;
-}
-
-static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_battery_read_info, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_state(struct seq_file *seq, void *offset)
-{
-	struct acpi_battery *battery = seq->private;
-	struct acpi_sbs *sbs = battery->sbs;
-	int rate;
-
-	mutex_lock(&sbs->lock);
-	seq_printf(seq, "present:                 %s\n",
-		   (battery->present) ? "yes" : "no");
-	if (!battery->present)
-		goto end;
-
-	acpi_battery_get_state(battery);
-	seq_printf(seq, "capacity state:          %s\n",
-		   (battery->state & 0x0010) ? "critical" : "ok");
-	seq_printf(seq, "charging state:          %s\n",
-		   (battery->rate_now < 0) ? "discharging" :
-		   ((battery->rate_now > 0) ? "charging" : "charged"));
-	rate = abs(battery->rate_now) * acpi_battery_ipscale(battery);
-	rate *= (acpi_battery_mode(battery))?(battery->voltage_now *
-			acpi_battery_vscale(battery)/1000):1;
-	seq_printf(seq, "present rate:            %d%s\n", rate,
-		   acpi_battery_units(battery));
-	seq_printf(seq, "remaining capacity:      %i%sh\n",
-		   battery->capacity_now * acpi_battery_scale(battery),
-		   acpi_battery_units(battery));
-	seq_printf(seq, "present voltage:         %i mV\n",
-		   battery->voltage_now * acpi_battery_vscale(battery));
-
-      end:
-	mutex_unlock(&sbs->lock);
-	return 0;
-}
-
-static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_battery_read_state, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
-{
-	struct acpi_battery *battery = seq->private;
-	struct acpi_sbs *sbs = battery->sbs;
-	int result = 0;
-
-	mutex_lock(&sbs->lock);
-
-	if (!battery->present) {
-		seq_printf(seq, "present:                 no\n");
-		goto end;
-	}
-
-	acpi_battery_get_alarm(battery);
-	seq_printf(seq, "alarm:                   ");
-	if (battery->alarm_capacity)
-		seq_printf(seq, "%i%sh\n",
-			   battery->alarm_capacity *
-			   acpi_battery_scale(battery),
-			   acpi_battery_units(battery));
-	else
-		seq_printf(seq, "disabled\n");
-      end:
-	mutex_unlock(&sbs->lock);
-	return result;
-}
-
-static ssize_t
-acpi_battery_write_alarm(struct file *file, const char __user * buffer,
-			 size_t count, loff_t * ppos)
-{
-	struct seq_file *seq = file->private_data;
-	struct acpi_battery *battery = seq->private;
-	struct acpi_sbs *sbs = battery->sbs;
-	char alarm_string[12] = { '\0' };
-	int result = 0;
-	mutex_lock(&sbs->lock);
-	if (!battery->present) {
-		result = -ENODEV;
-		goto end;
-	}
-	if (count > sizeof(alarm_string) - 1) {
-		result = -EINVAL;
-		goto end;
-	}
-	if (copy_from_user(alarm_string, buffer, count)) {
-		result = -EFAULT;
-		goto end;
-	}
-	alarm_string[count] = 0;
-	battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) /
-					acpi_battery_scale(battery);
-	acpi_battery_set_alarm(battery);
-      end:
-	mutex_unlock(&sbs->lock);
-	if (result)
-		return result;
-	return count;
-}
-
-static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_battery_read_alarm, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_battery_info_fops = {
-	.open = acpi_battery_info_open_fs,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_state_fops = {
-	.open = acpi_battery_state_open_fs,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_alarm_fops = {
-	.open = acpi_battery_alarm_open_fs,
-	.read = seq_read,
-	.write = acpi_battery_write_alarm,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.owner = THIS_MODULE,
-};
-
-/* Legacy AC Adapter Interface */
-
-static struct proc_dir_entry *acpi_ac_dir = NULL;
-
-static int acpi_ac_read_state(struct seq_file *seq, void *offset)
-{
-
-	struct acpi_sbs *sbs = seq->private;
-
-	mutex_lock(&sbs->lock);
-
-	seq_printf(seq, "state:                   %s\n",
-		   sbs->charger_present ? "on-line" : "off-line");
-
-	mutex_unlock(&sbs->lock);
-	return 0;
-}
-
-static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_ac_read_state, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_ac_state_fops = {
-	.open = acpi_ac_state_open_fs,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.owner = THIS_MODULE,
-};
-
-#endif
-
-/* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
 static int acpi_battery_read(struct acpi_battery *battery)
@@ -781,12 +509,6 @@
 		return result;
 
 	sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir,
-			battery->name, &acpi_battery_info_fops,
-			&acpi_battery_state_fops, &acpi_battery_alarm_fops,
-			battery);
-#endif
 	battery->bat.name = battery->name;
 	battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
 	if (!acpi_battery_mode(battery)) {
@@ -822,10 +544,6 @@
 			device_remove_file(battery->bat.dev, &alarm_attr);
 		power_supply_unregister(&battery->bat);
 	}
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	proc_remove(battery->proc_entry);
-	battery->proc_entry = NULL;
-#endif
 }
 
 static int acpi_charger_add(struct acpi_sbs *sbs)
@@ -835,13 +553,7 @@
 	result = acpi_ac_get_present(sbs);
 	if (result)
 		goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir,
-				 ACPI_AC_DIR_NAME, NULL,
-				 &acpi_ac_state_fops, NULL, sbs);
-	if (result)
-		goto end;
-#endif
+
 	sbs->charger.name = "sbs-charger";
 	sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
 	sbs->charger.properties = sbs_ac_props;
@@ -859,10 +571,6 @@
 {
 	if (sbs->charger.dev)
 		power_supply_unregister(&sbs->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	proc_remove(sbs->charger_entry);
-	sbs->charger_entry = NULL;
-#endif
 }
 
 static void acpi_sbs_callback(void *context)
@@ -950,20 +658,6 @@
 	return 0;
 }
 
-static void acpi_sbs_rmdirs(void)
-{
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	if (acpi_ac_dir) {
-		acpi_unlock_ac_dir(acpi_ac_dir);
-		acpi_ac_dir = NULL;
-	}
-	if (acpi_battery_dir) {
-		acpi_unlock_battery_dir(acpi_battery_dir);
-		acpi_battery_dir = NULL;
-	}
-#endif
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int acpi_sbs_resume(struct device *dev)
 {
@@ -995,28 +689,17 @@
 
 	if (acpi_disabled)
 		return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-	acpi_ac_dir = acpi_lock_ac_dir();
-	if (!acpi_ac_dir)
-		return -ENODEV;
-	acpi_battery_dir = acpi_lock_battery_dir();
-	if (!acpi_battery_dir) {
-		acpi_sbs_rmdirs();
-		return -ENODEV;
-	}
-#endif
+
 	result = acpi_bus_register_driver(&acpi_sbs_driver);
-	if (result < 0) {
-		acpi_sbs_rmdirs();
+	if (result < 0)
 		return -ENODEV;
-	}
+
 	return 0;
 }
 
 static void __exit acpi_sbs_exit(void)
 {
 	acpi_bus_unregister_driver(&acpi_sbs_driver);
-	acpi_sbs_rmdirs();
 	return;
 }
 
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fee8a29..55f9ded 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -125,8 +125,8 @@
 }
 static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
 
-static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl,
-					       void *data, void **ret_p)
+static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
+				    void **ret_p)
 {
 	struct acpi_device *device = NULL;
 	struct acpi_device_physical_node *pn;
@@ -136,6 +136,11 @@
 	if (acpi_bus_get_device(handle, &device))
 		return AE_OK;
 
+	if (device->handler && !device->handler->hotplug.enabled) {
+		*ret_p = &device->dev;
+		return AE_SUPPORT;
+	}
+
 	mutex_lock(&device->physical_node_lock);
 
 	list_for_each_entry(pn, &device->physical_node_list, node) {
@@ -168,8 +173,8 @@
 	return status;
 }
 
-static acpi_status acpi_bus_online_companions(acpi_handle handle, u32 lvl,
-					      void *data, void **ret_p)
+static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
+				   void **ret_p)
 {
 	struct acpi_device *device = NULL;
 	struct acpi_device_physical_node *pn;
@@ -214,26 +219,32 @@
 	 * If the first pass is successful, the second one isn't needed, though.
 	 */
 	errdev = NULL;
-	acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
-			    NULL, acpi_bus_offline_companions,
-			    (void *)false, (void **)&errdev);
-	acpi_bus_offline_companions(handle, 0, (void *)false, (void **)&errdev);
+	status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+				     NULL, acpi_bus_offline, (void *)false,
+				     (void **)&errdev);
+	if (status == AE_SUPPORT) {
+		dev_warn(errdev, "Offline disabled.\n");
+		acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+				    acpi_bus_online, NULL, NULL, NULL);
+		put_device(&device->dev);
+		return -EPERM;
+	}
+	acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
 	if (errdev) {
 		errdev = NULL;
 		acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
-				    NULL, acpi_bus_offline_companions,
-				    (void *)true , (void **)&errdev);
+				    NULL, acpi_bus_offline, (void *)true,
+				    (void **)&errdev);
 		if (!errdev || acpi_force_hot_remove)
-			acpi_bus_offline_companions(handle, 0, (void *)true,
-						    (void **)&errdev);
+			acpi_bus_offline(handle, 0, (void *)true,
+					 (void **)&errdev);
 
 		if (errdev && !acpi_force_hot_remove) {
 			dev_warn(errdev, "Offline failed.\n");
-			acpi_bus_online_companions(handle, 0, NULL, NULL);
+			acpi_bus_online(handle, 0, NULL, NULL);
 			acpi_walk_namespace(ACPI_TYPE_ANY, handle,
-					    ACPI_UINT32_MAX,
-					    acpi_bus_online_companions, NULL,
-					    NULL, NULL);
+					    ACPI_UINT32_MAX, acpi_bus_online,
+					    NULL, NULL, NULL);
 			put_device(&device->dev);
 			return -EBUSY;
 		}
@@ -274,10 +285,10 @@
 	return 0;
 }
 
-static void acpi_bus_device_eject(void *context)
+void acpi_bus_device_eject(void *data, u32 ost_src)
 {
-	acpi_handle handle = context;
-	struct acpi_device *device = NULL;
+	struct acpi_device *device = data;
+	acpi_handle handle = device->handle;
 	struct acpi_scan_handler *handler;
 	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
 	int error;
@@ -285,38 +296,41 @@
 	lock_device_hotplug();
 	mutex_lock(&acpi_scan_lock);
 
-	acpi_bus_get_device(handle, &device);
-	if (!device)
-		goto err_out;
-
 	handler = device->handler;
 	if (!handler || !handler->hotplug.enabled) {
-		ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
-		goto err_out;
+		put_device(&device->dev);
+		goto err_support;
 	}
-	acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
-				  ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+
+	if (ost_src == ACPI_NOTIFY_EJECT_REQUEST)
+		acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
+					  ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+
 	if (handler->hotplug.mode == AHM_CONTAINER)
 		kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
 
-	get_device(&device->dev);
 	error = acpi_scan_hot_remove(device);
-	if (error)
+	if (error == -EPERM) {
+		goto err_support;
+	} else if (error) {
 		goto err_out;
+	}
 
  out:
 	mutex_unlock(&acpi_scan_lock);
 	unlock_device_hotplug();
 	return;
 
+ err_support:
+	ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
  err_out:
-	acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ost_code,
-				  NULL);
+	acpi_evaluate_hotplug_ost(handle, ost_src, ost_code, NULL);
 	goto out;
 }
 
-static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
+static void acpi_scan_bus_device_check(void *data, u32 ost_source)
 {
+	acpi_handle handle = data;
 	struct acpi_device *device = NULL;
 	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
 	int error;
@@ -331,8 +345,6 @@
 			goto out;
 		}
 	}
-	acpi_evaluate_hotplug_ost(handle, ost_source,
-				  ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
 	error = acpi_bus_scan(handle);
 	if (error) {
 		acpi_handle_warn(handle, "Namespace scan failure\n");
@@ -353,18 +365,6 @@
 	unlock_device_hotplug();
 }
 
-static void acpi_scan_bus_check(void *context)
-{
-	acpi_scan_bus_device_check((acpi_handle)context,
-				   ACPI_NOTIFY_BUS_CHECK);
-}
-
-static void acpi_scan_device_check(void *context)
-{
-	acpi_scan_bus_device_check((acpi_handle)context,
-				   ACPI_NOTIFY_DEVICE_CHECK);
-}
-
 static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
 {
 	u32 ost_status;
@@ -395,8 +395,8 @@
 
 static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
 {
-	acpi_osd_exec_callback callback;
 	struct acpi_scan_handler *handler = data;
+	struct acpi_device *adev;
 	acpi_status status;
 
 	if (!handler->hotplug.enabled)
@@ -405,57 +405,36 @@
 	switch (type) {
 	case ACPI_NOTIFY_BUS_CHECK:
 		acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
-		callback = acpi_scan_bus_check;
 		break;
 	case ACPI_NOTIFY_DEVICE_CHECK:
 		acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
-		callback = acpi_scan_device_check;
 		break;
 	case ACPI_NOTIFY_EJECT_REQUEST:
 		acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
-		callback = acpi_bus_device_eject;
-		break;
+		status = acpi_bus_get_device(handle, &adev);
+		if (ACPI_FAILURE(status))
+			goto err_out;
+
+		get_device(&adev->dev);
+		status = acpi_hotplug_execute(acpi_bus_device_eject, adev, type);
+		if (ACPI_SUCCESS(status))
+			return;
+
+		put_device(&adev->dev);
+		goto err_out;
 	default:
 		/* non-hotplug event; possibly handled by other handler */
 		return;
 	}
-	status = acpi_os_hotplug_execute(callback, handle);
-	if (ACPI_FAILURE(status))
-		acpi_evaluate_hotplug_ost(handle, type,
-					  ACPI_OST_SC_NON_SPECIFIC_FAILURE,
-					  NULL);
+	status = acpi_hotplug_execute(acpi_scan_bus_device_check, handle, type);
+	if (ACPI_SUCCESS(status))
+		return;
+
+ err_out:
+	acpi_evaluate_hotplug_ost(handle, type,
+				  ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
 }
 
-/**
- * acpi_bus_hot_remove_device: hot-remove a device and its children
- * @context: struct acpi_eject_event pointer (freed in this func)
- *
- * Hot-remove a device and its children. This function frees up the
- * memory space passed by arg context, so that the caller may call
- * this function asynchronously through acpi_os_hotplug_execute().
- */
-void acpi_bus_hot_remove_device(void *context)
-{
-	struct acpi_eject_event *ej_event = context;
-	struct acpi_device *device = ej_event->device;
-	acpi_handle handle = device->handle;
-	int error;
-
-	lock_device_hotplug();
-	mutex_lock(&acpi_scan_lock);
-
-	error = acpi_scan_hot_remove(device);
-	if (error && handle)
-		acpi_evaluate_hotplug_ost(handle, ej_event->event,
-					  ACPI_OST_SC_NON_SPECIFIC_FAILURE,
-					  NULL);
-
-	mutex_unlock(&acpi_scan_lock);
-	unlock_device_hotplug();
-	kfree(context);
-}
-EXPORT_SYMBOL(acpi_bus_hot_remove_device);
-
 static ssize_t real_power_state_show(struct device *dev,
 				     struct device_attribute *attr, char *buf)
 {
@@ -487,10 +466,8 @@
 		const char *buf, size_t count)
 {
 	struct acpi_device *acpi_device = to_acpi_device(d);
-	struct acpi_eject_event *ej_event;
 	acpi_object_type not_used;
 	acpi_status status;
-	int ret;
 
 	if (!count || buf[0] != '1')
 		return -EINVAL;
@@ -503,28 +480,18 @@
 	if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
 		return -ENODEV;
 
-	ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
-	if (!ej_event) {
-		ret = -ENOMEM;
-		goto err_out;
-	}
 	acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
 				  ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
-	ej_event->device = acpi_device;
-	ej_event->event = ACPI_OST_EC_OSPM_EJECT;
 	get_device(&acpi_device->dev);
-	status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
+	status = acpi_hotplug_execute(acpi_bus_device_eject, acpi_device,
+				      ACPI_OST_EC_OSPM_EJECT);
 	if (ACPI_SUCCESS(status))
 		return count;
 
 	put_device(&acpi_device->dev);
-	kfree(ej_event);
-	ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
-
- err_out:
 	acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
 				  ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
-	return ret;
+	return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
 }
 
 static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
@@ -1676,7 +1643,6 @@
 
 void acpi_device_add_finalize(struct acpi_device *device)
 {
-	device->flags.match_driver = true;
 	dev_set_uevent_suppress(&device->dev, false);
 	kobject_uevent(&device->dev.kobj, KOBJ_ADD);
 }
@@ -1915,8 +1881,12 @@
 		return AE_OK;
 
 	ret = acpi_scan_attach_handler(device);
-	if (ret)
-		return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
+	if (ret < 0)
+		return AE_CTRL_DEPTH;
+
+	device->flags.match_driver = true;
+	if (ret > 0)
+		return AE_OK;
 
 	ret = device_attach(&device->dev);
 	return ret >= 0 ? AE_OK : AE_CTRL_DEPTH;
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 05306a5..db52936 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -564,6 +564,7 @@
 	acpi_event_status status;
 	acpi_handle handle;
 	int result = 0;
+	unsigned long tmp;
 
 	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
 		int i;
@@ -596,8 +597,10 @@
 		else if (!strcmp(buf, "clear\n") &&
 			 (status & ACPI_EVENT_FLAG_SET))
 			result = acpi_clear_gpe(handle, index);
+		else if (!kstrtoul(buf, 0, &tmp))
+			all_counters[index].count = tmp;
 		else
-			all_counters[index].count = strtoul(buf, NULL, 0);
+			result = -EINVAL;
 	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
 		int event = index - num_gpes;
 		if (!strcmp(buf, "disable\n") &&
@@ -609,8 +612,10 @@
 		else if (!strcmp(buf, "clear\n") &&
 			 (status & ACPI_EVENT_FLAG_SET))
 			result = acpi_clear_event(event);
+		else if (!kstrtoul(buf, 0, &tmp))
+			all_counters[index].count = tmp;
 		else
-			all_counters[index].count = strtoul(buf, NULL, 0);
+			result = -EINVAL;
 	} else
 		all_counters[index].count = strtoul(buf, NULL, 0);
 
@@ -762,13 +767,8 @@
 	if (!hotplug_kobj)
 		goto err_out;
 
-	kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
-	error = kobject_set_name(&hotplug->kobj, "%s", name);
-	if (error)
-		goto err_out;
-
-	hotplug->kobj.parent = hotplug_kobj;
-	error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
+	error = kobject_init_and_add(&hotplug->kobj,
+		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
 	if (error)
 		goto err_out;
 
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 6a03293..0d9f46b 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -299,8 +299,8 @@
 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 					  "No critical threshold\n"));
 		} else if (tmp <= 2732) {
-			printk(KERN_WARNING FW_BUG "Invalid critical threshold "
-			       "(%llu)\n", tmp);
+			pr_warn(FW_BUG "Invalid critical threshold (%llu)\n",
+				tmp);
 			tz->trips.critical.flags.valid = 0;
 		} else {
 			tz->trips.critical.flags.valid = 1;
@@ -317,8 +317,8 @@
 				 * Allow override critical threshold
 				 */
 				if (crt_k > tz->trips.critical.temperature)
-					printk(KERN_WARNING PREFIX
-						"Critical threshold %d C\n", crt);
+					pr_warn(PREFIX "Critical threshold %d C\n",
+						crt);
 				tz->trips.critical.temperature = crt_k;
 			}
 		}
@@ -390,8 +390,7 @@
 		status = acpi_evaluate_reference(tz->device->handle, "_PSL",
 							NULL, &devices);
 		if (ACPI_FAILURE(status)) {
-			printk(KERN_WARNING PREFIX
-				"Invalid passive threshold\n");
+			pr_warn(PREFIX "Invalid passive threshold\n");
 			tz->trips.passive.flags.valid = 0;
 		}
 		else
@@ -453,8 +452,8 @@
 			status = acpi_evaluate_reference(tz->device->handle,
 						name, NULL, &devices);
 			if (ACPI_FAILURE(status)) {
-				printk(KERN_WARNING PREFIX
-					"Invalid active%d threshold\n", i);
+				pr_warn(PREFIX "Invalid active%d threshold\n",
+					i);
 				tz->trips.active[i].flags.valid = 0;
 			}
 			else
@@ -505,7 +504,7 @@
 		valid |= tz->trips.active[i].flags.valid;
 
 	if (!valid) {
-		printk(KERN_WARNING FW_BUG "No valid trip found\n");
+		pr_warn(FW_BUG "No valid trip found\n");
 		return -ENODEV;
 	}
 	return 0;
@@ -515,10 +514,9 @@
 {
 	struct acpi_thermal *tz = data;
 
-	if (!tz->tz_enabled) {
-		pr_warn("thermal zone is disabled \n");
+	if (!tz->tz_enabled)
 		return;
-	}
+
 	thermal_zone_device_update(tz->thermal_zone);
 }
 
@@ -570,9 +568,10 @@
 	 */
 	if (mode == THERMAL_DEVICE_ENABLED)
 		enable = 1;
-	else if (mode == THERMAL_DEVICE_DISABLED)
+	else if (mode == THERMAL_DEVICE_DISABLED) {
 		enable = 0;
-	else
+		pr_warn("thermal zone will be disabled\n");
+	} else
 		return -EINVAL;
 
 	if (enable != tz->tz_enabled) {
@@ -923,8 +922,7 @@
 				  acpi_bus_private_data_handler,
 				  tz->thermal_zone);
 	if (ACPI_FAILURE(status)) {
-		printk(KERN_ERR PREFIX
-				"Error attaching device data\n");
+		pr_err(PREFIX "Error attaching device data\n");
 		return -ENODEV;
 	}
 
@@ -1094,9 +1092,8 @@
 	if (result)
 		goto free_memory;
 
-	printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
-	       acpi_device_name(device), acpi_device_bid(device),
-	       KELVIN_TO_CELSIUS(tz->temperature));
+	pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+		acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
 	goto end;
 
 free_memory:
@@ -1159,24 +1156,24 @@
 static int thermal_act(const struct dmi_system_id *d) {
 
 	if (act == 0) {
-		printk(KERN_NOTICE "ACPI: %s detected: "
-			"disabling all active thermal trip points\n", d->ident);
+		pr_notice(PREFIX "%s detected: "
+			  "disabling all active thermal trip points\n", d->ident);
 		act = -1;
 	}
 	return 0;
 }
 static int thermal_nocrt(const struct dmi_system_id *d) {
 
-	printk(KERN_NOTICE "ACPI: %s detected: "
-		"disabling all critical thermal trip point actions.\n", d->ident);
+	pr_notice(PREFIX "%s detected: "
+		  "disabling all critical thermal trip point actions.\n", d->ident);
 	nocrt = 1;
 	return 0;
 }
 static int thermal_tzp(const struct dmi_system_id *d) {
 
 	if (tzp == 0) {
-		printk(KERN_NOTICE "ACPI: %s detected: "
-			"enabling thermal zone polling\n", d->ident);
+		pr_notice(PREFIX "%s detected: "
+			  "enabling thermal zone polling\n", d->ident);
 		tzp = 300;	/* 300 dS = 30 Seconds */
 	}
 	return 0;
@@ -1184,8 +1181,8 @@
 static int thermal_psv(const struct dmi_system_id *d) {
 
 	if (psv == 0) {
-		printk(KERN_NOTICE "ACPI: %s detected: "
-			"disabling all passive thermal trip points\n", d->ident);
+		pr_notice(PREFIX "%s detected: "
+			  "disabling all passive thermal trip points\n", d->ident);
 		psv = -1;
 	}
 	return 0;
@@ -1238,7 +1235,7 @@
 	dmi_check_system(thermal_dmi_table);
 
 	if (off) {
-		printk(KERN_NOTICE "ACPI: thermal control disabled\n");
+		pr_notice(PREFIX "thermal control disabled\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 552248b..6d408bf 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -121,7 +121,7 @@
 				break;
 			default:
 				printk(KERN_WARNING PREFIX "Invalid package element"
-					      " [%d]: got number, expecing"
+					      " [%d]: got number, expecting"
 					      " [%c]\n",
 					      i, format_string[i]);
 				return AE_BAD_DATA;
@@ -148,7 +148,7 @@
 			default:
 				printk(KERN_WARNING PREFIX "Invalid package element"
 					      " [%d] got string/buffer,"
-					      " expecing [%c]\n",
+					      " expecting [%c]\n",
 					      i, format_string[i]);
 				return AE_BAD_DATA;
 				break;
@@ -169,11 +169,20 @@
 	/*
 	 * Validate output buffer.
 	 */
-	if (buffer->length < size_required) {
+	if (buffer->length == ACPI_ALLOCATE_BUFFER) {
+		buffer->pointer = ACPI_ALLOCATE(size_required);
+		if (!buffer->pointer)
+			return AE_NO_MEMORY;
 		buffer->length = size_required;
-		return AE_BUFFER_OVERFLOW;
-	} else if (buffer->length != size_required || !buffer->pointer) {
-		return AE_BAD_PARAMETER;
+		memset(buffer->pointer, 0, size_required);
+	} else {
+		if (buffer->length < size_required) {
+			buffer->length = size_required;
+			return AE_BUFFER_OVERFLOW;
+		} else if (buffer->length != size_required ||
+			   !buffer->pointer) {
+			return AE_BAD_PARAMETER;
+		}
 	}
 
 	head = buffer->pointer;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index aebcf63..18dbdff 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -88,7 +88,16 @@
 static bool use_bios_initial_backlight = 1;
 module_param(use_bios_initial_backlight, bool, 0644);
 
+/*
+ * For Windows 8 systems: if set ture and the GPU driver has
+ * registered a backlight interface, skip registering ACPI video's.
+ */
+static bool use_native_backlight = false;
+module_param(use_native_backlight, bool, 0644);
+
 static int register_count;
+static struct mutex video_list_lock;
+static struct list_head video_bus_head;
 static int acpi_video_bus_add(struct acpi_device *device);
 static int acpi_video_bus_remove(struct acpi_device *device);
 static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
@@ -157,6 +166,7 @@
 	struct acpi_video_bus_flags flags;
 	struct list_head video_device_list;
 	struct mutex device_list_lock;	/* protects video_device_list */
+	struct list_head entry;
 	struct input_dev *input;
 	char phys[32];	/* for input device */
 	struct notifier_block pm_nb;
@@ -229,6 +239,14 @@
 static int acpi_video_switch_brightness(struct acpi_video_device *device,
 					 int event);
 
+static bool acpi_video_verify_backlight_support(void)
+{
+	if (acpi_osi_is_win8() && use_native_backlight &&
+	    backlight_device_registered(BACKLIGHT_RAW))
+		return false;
+	return acpi_video_backlight_support();
+}
+
 /* backlight device sysfs support */
 static int acpi_video_get_brightness(struct backlight_device *bd)
 {
@@ -830,9 +848,9 @@
 		 * or an index). Set the backlight to max_level in this case.
 		 */
 		for (i = 2; i < br->count; i++)
-			if (level_old == br->levels[i])
+			if (level == br->levels[i])
 				break;
-		if (i == br->count)
+		if (i == br->count || !level)
 			level = max_level;
 	}
 
@@ -884,79 +902,6 @@
 
 	if (acpi_has_method(device->dev->handle, "_DDC"))
 		device->cap._DDC = 1;
-
-	if (acpi_video_backlight_support()) {
-		struct backlight_properties props;
-		struct pci_dev *pdev;
-		acpi_handle acpi_parent;
-		struct device *parent = NULL;
-		int result;
-		static int count;
-		char *name;
-
-		result = acpi_video_init_brightness(device);
-		if (result)
-			return;
-		name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
-		if (!name)
-			return;
-		count++;
-
-		acpi_get_parent(device->dev->handle, &acpi_parent);
-
-		pdev = acpi_get_pci_dev(acpi_parent);
-		if (pdev) {
-			parent = &pdev->dev;
-			pci_dev_put(pdev);
-		}
-
-		memset(&props, 0, sizeof(struct backlight_properties));
-		props.type = BACKLIGHT_FIRMWARE;
-		props.max_brightness = device->brightness->count - 3;
-		device->backlight = backlight_device_register(name,
-							      parent,
-							      device,
-							      &acpi_backlight_ops,
-							      &props);
-		kfree(name);
-		if (IS_ERR(device->backlight))
-			return;
-
-		/*
-		 * Save current brightness level in case we have to restore it
-		 * before acpi_video_device_lcd_set_level() is called next time.
-		 */
-		device->backlight->props.brightness =
-				acpi_video_get_brightness(device->backlight);
-
-		device->cooling_dev = thermal_cooling_device_register("LCD",
-					device->dev, &video_cooling_ops);
-		if (IS_ERR(device->cooling_dev)) {
-			/*
-			 * Set cooling_dev to NULL so we don't crash trying to
-			 * free it.
-			 * Also, why the hell we are returning early and
-			 * not attempt to register video output if cooling
-			 * device registration failed?
-			 * -- dtor
-			 */
-			device->cooling_dev = NULL;
-			return;
-		}
-
-		dev_info(&device->dev->dev, "registered as cooling_device%d\n",
-			 device->cooling_dev->id);
-		result = sysfs_create_link(&device->dev->dev.kobj,
-				&device->cooling_dev->device.kobj,
-				"thermal_cooling");
-		if (result)
-			printk(KERN_ERR PREFIX "Create sysfs link\n");
-		result = sysfs_create_link(&device->cooling_dev->device.kobj,
-				&device->dev->dev.kobj, "device");
-		if (result)
-			printk(KERN_ERR PREFIX "Create sysfs link\n");
-
-	}
 }
 
 /*
@@ -1143,13 +1088,6 @@
 	acpi_video_device_bind(video, data);
 	acpi_video_device_find_cap(data);
 
-	status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-					     acpi_video_device_notify, data);
-	if (ACPI_FAILURE(status))
-		dev_err(&device->dev, "Error installing notify handler\n");
-	else
-		data->flags.notify = 1;
-
 	mutex_lock(&video->device_list_lock);
 	list_add_tail(&data->entry, &video->video_device_list);
 	mutex_unlock(&video->device_list_lock);
@@ -1333,8 +1271,8 @@
 	unsigned long long level_current, level_next;
 	int result = -EINVAL;
 
-	/* no warning message if acpi_backlight=vendor is used */
-	if (!acpi_video_backlight_support())
+	/* no warning message if acpi_backlight=vendor or a quirk is used */
+	if (!acpi_video_verify_backlight_support())
 		return 0;
 
 	if (!device->brightness)
@@ -1454,64 +1392,6 @@
 	return status;
 }
 
-static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
-{
-	acpi_status status;
-
-	if (!device || !device->video)
-		return -ENOENT;
-
-	if (device->flags.notify) {
-		status = acpi_remove_notify_handler(device->dev->handle,
-				ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
-		if (ACPI_FAILURE(status))
-			dev_err(&device->dev->dev,
-					"Can't remove video notify handler\n");
-	}
-
-	if (device->backlight) {
-		backlight_device_unregister(device->backlight);
-		device->backlight = NULL;
-	}
-	if (device->cooling_dev) {
-		sysfs_remove_link(&device->dev->dev.kobj,
-				  "thermal_cooling");
-		sysfs_remove_link(&device->cooling_dev->device.kobj,
-				  "device");
-		thermal_cooling_device_unregister(device->cooling_dev);
-		device->cooling_dev = NULL;
-	}
-
-	return 0;
-}
-
-static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
-{
-	int status;
-	struct acpi_video_device *dev, *next;
-
-	mutex_lock(&video->device_list_lock);
-
-	list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
-
-		status = acpi_video_bus_put_one_device(dev);
-		if (ACPI_FAILURE(status))
-			printk(KERN_WARNING PREFIX
-			       "hhuuhhuu bug in acpi video driver.\n");
-
-		if (dev->brightness) {
-			kfree(dev->brightness->levels);
-			kfree(dev->brightness);
-		}
-		list_del(&dev->entry);
-		kfree(dev);
-	}
-
-	mutex_unlock(&video->device_list_lock);
-
-	return 0;
-}
-
 /* acpi_video interface */
 
 /*
@@ -1521,13 +1401,13 @@
 static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
 {
 	return acpi_video_bus_DOS(video, 0,
-				  acpi_video_backlight_quirks() ? 1 : 0);
+				  acpi_osi_is_win8() ? 1 : 0);
 }
 
 static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
 {
 	return acpi_video_bus_DOS(video, 0,
-				  acpi_video_backlight_quirks() ? 0 : 1);
+				  acpi_osi_is_win8() ? 0 : 1);
 }
 
 static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
@@ -1536,7 +1416,7 @@
 	struct input_dev *input;
 	int keycode = 0;
 
-	if (!video)
+	if (!video || !video->input)
 		return;
 
 	input = video->input;
@@ -1691,12 +1571,236 @@
 	return AE_OK;
 }
 
+static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+{
+	if (acpi_video_verify_backlight_support()) {
+		struct backlight_properties props;
+		struct pci_dev *pdev;
+		acpi_handle acpi_parent;
+		struct device *parent = NULL;
+		int result;
+		static int count;
+		char *name;
+
+		result = acpi_video_init_brightness(device);
+		if (result)
+			return;
+		name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
+		if (!name)
+			return;
+		count++;
+
+		acpi_get_parent(device->dev->handle, &acpi_parent);
+
+		pdev = acpi_get_pci_dev(acpi_parent);
+		if (pdev) {
+			parent = &pdev->dev;
+			pci_dev_put(pdev);
+		}
+
+		memset(&props, 0, sizeof(struct backlight_properties));
+		props.type = BACKLIGHT_FIRMWARE;
+		props.max_brightness = device->brightness->count - 3;
+		device->backlight = backlight_device_register(name,
+							      parent,
+							      device,
+							      &acpi_backlight_ops,
+							      &props);
+		kfree(name);
+		if (IS_ERR(device->backlight))
+			return;
+
+		/*
+		 * Save current brightness level in case we have to restore it
+		 * before acpi_video_device_lcd_set_level() is called next time.
+		 */
+		device->backlight->props.brightness =
+				acpi_video_get_brightness(device->backlight);
+
+		device->cooling_dev = thermal_cooling_device_register("LCD",
+					device->dev, &video_cooling_ops);
+		if (IS_ERR(device->cooling_dev)) {
+			/*
+			 * Set cooling_dev to NULL so we don't crash trying to
+			 * free it.
+			 * Also, why the hell we are returning early and
+			 * not attempt to register video output if cooling
+			 * device registration failed?
+			 * -- dtor
+			 */
+			device->cooling_dev = NULL;
+			return;
+		}
+
+		dev_info(&device->dev->dev, "registered as cooling_device%d\n",
+			 device->cooling_dev->id);
+		result = sysfs_create_link(&device->dev->dev.kobj,
+				&device->cooling_dev->device.kobj,
+				"thermal_cooling");
+		if (result)
+			printk(KERN_ERR PREFIX "Create sysfs link\n");
+		result = sysfs_create_link(&device->cooling_dev->device.kobj,
+				&device->dev->dev.kobj, "device");
+		if (result)
+			printk(KERN_ERR PREFIX "Create sysfs link\n");
+	}
+}
+
+static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
+{
+	struct acpi_video_device *dev;
+
+	mutex_lock(&video->device_list_lock);
+	list_for_each_entry(dev, &video->video_device_list, entry)
+		acpi_video_dev_register_backlight(dev);
+	mutex_unlock(&video->device_list_lock);
+
+	video->pm_nb.notifier_call = acpi_video_resume;
+	video->pm_nb.priority = 0;
+	return register_pm_notifier(&video->pm_nb);
+}
+
+static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device)
+{
+	if (device->backlight) {
+		backlight_device_unregister(device->backlight);
+		device->backlight = NULL;
+	}
+	if (device->brightness) {
+		kfree(device->brightness->levels);
+		kfree(device->brightness);
+		device->brightness = NULL;
+	}
+	if (device->cooling_dev) {
+		sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling");
+		sysfs_remove_link(&device->cooling_dev->device.kobj, "device");
+		thermal_cooling_device_unregister(device->cooling_dev);
+		device->cooling_dev = NULL;
+	}
+}
+
+static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video)
+{
+	struct acpi_video_device *dev;
+	int error = unregister_pm_notifier(&video->pm_nb);
+
+	mutex_lock(&video->device_list_lock);
+	list_for_each_entry(dev, &video->video_device_list, entry)
+		acpi_video_dev_unregister_backlight(dev);
+	mutex_unlock(&video->device_list_lock);
+
+	return error;
+}
+
+static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device)
+{
+	acpi_status status;
+	struct acpi_device *adev = device->dev;
+
+	status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+					     acpi_video_device_notify, device);
+	if (ACPI_FAILURE(status))
+		dev_err(&adev->dev, "Error installing notify handler\n");
+	else
+		device->flags.notify = 1;
+}
+
+static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video)
+{
+	struct input_dev *input;
+	struct acpi_video_device *dev;
+	int error;
+
+	video->input = input = input_allocate_device();
+	if (!input) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	error = acpi_video_bus_start_devices(video);
+	if (error)
+		goto err_free_input;
+
+	snprintf(video->phys, sizeof(video->phys),
+			"%s/video/input0", acpi_device_hid(video->device));
+
+	input->name = acpi_device_name(video->device);
+	input->phys = video->phys;
+	input->id.bustype = BUS_HOST;
+	input->id.product = 0x06;
+	input->dev.parent = &video->device->dev;
+	input->evbit[0] = BIT(EV_KEY);
+	set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
+	set_bit(KEY_VIDEO_NEXT, input->keybit);
+	set_bit(KEY_VIDEO_PREV, input->keybit);
+	set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
+	set_bit(KEY_BRIGHTNESSUP, input->keybit);
+	set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
+	set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
+	set_bit(KEY_DISPLAY_OFF, input->keybit);
+
+	error = input_register_device(input);
+	if (error)
+		goto err_stop_dev;
+
+	mutex_lock(&video->device_list_lock);
+	list_for_each_entry(dev, &video->video_device_list, entry)
+		acpi_video_dev_add_notify_handler(dev);
+	mutex_unlock(&video->device_list_lock);
+
+	return 0;
+
+err_stop_dev:
+	acpi_video_bus_stop_devices(video);
+err_free_input:
+	input_free_device(input);
+	video->input = NULL;
+out:
+	return error;
+}
+
+static void acpi_video_dev_remove_notify_handler(struct acpi_video_device *dev)
+{
+	if (dev->flags.notify) {
+		acpi_remove_notify_handler(dev->dev->handle, ACPI_DEVICE_NOTIFY,
+					   acpi_video_device_notify);
+		dev->flags.notify = 0;
+	}
+}
+
+static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
+{
+	struct acpi_video_device *dev;
+
+	mutex_lock(&video->device_list_lock);
+	list_for_each_entry(dev, &video->video_device_list, entry)
+		acpi_video_dev_remove_notify_handler(dev);
+	mutex_unlock(&video->device_list_lock);
+
+	acpi_video_bus_stop_devices(video);
+	input_unregister_device(video->input);
+	video->input = NULL;
+}
+
+static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
+{
+	struct acpi_video_device *dev, *next;
+
+	mutex_lock(&video->device_list_lock);
+	list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+		list_del(&dev->entry);
+		kfree(dev);
+	}
+	mutex_unlock(&video->device_list_lock);
+
+	return 0;
+}
+
 static int instance;
 
 static int acpi_video_bus_add(struct acpi_device *device)
 {
 	struct acpi_video_bus *video;
-	struct input_dev *input;
 	int error;
 	acpi_status status;
 
@@ -1748,62 +1852,24 @@
 	if (error)
 		goto err_put_video;
 
-	video->input = input = input_allocate_device();
-	if (!input) {
-		error = -ENOMEM;
-		goto err_put_video;
-	}
-
-	error = acpi_video_bus_start_devices(video);
-	if (error)
-		goto err_free_input_dev;
-
-	snprintf(video->phys, sizeof(video->phys),
-		"%s/video/input0", acpi_device_hid(video->device));
-
-	input->name = acpi_device_name(video->device);
-	input->phys = video->phys;
-	input->id.bustype = BUS_HOST;
-	input->id.product = 0x06;
-	input->dev.parent = &device->dev;
-	input->evbit[0] = BIT(EV_KEY);
-	set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
-	set_bit(KEY_VIDEO_NEXT, input->keybit);
-	set_bit(KEY_VIDEO_PREV, input->keybit);
-	set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
-	set_bit(KEY_BRIGHTNESSUP, input->keybit);
-	set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
-	set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
-	set_bit(KEY_DISPLAY_OFF, input->keybit);
-
 	printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s  rom: %s  post: %s)\n",
 	       ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
 	       video->flags.multihead ? "yes" : "no",
 	       video->flags.rom ? "yes" : "no",
 	       video->flags.post ? "yes" : "no");
+	mutex_lock(&video_list_lock);
+	list_add_tail(&video->entry, &video_bus_head);
+	mutex_unlock(&video_list_lock);
 
-	video->pm_nb.notifier_call = acpi_video_resume;
-	video->pm_nb.priority = 0;
-	error = register_pm_notifier(&video->pm_nb);
-	if (error)
-		goto err_stop_video;
-
-	error = input_register_device(input);
-	if (error)
-		goto err_unregister_pm_notifier;
+	acpi_video_bus_register_backlight(video);
+	acpi_video_bus_add_notify_handler(video);
 
 	return 0;
 
- err_unregister_pm_notifier:
-	unregister_pm_notifier(&video->pm_nb);
- err_stop_video:
-	acpi_video_bus_stop_devices(video);
- err_free_input_dev:
-	input_free_device(input);
- err_put_video:
+err_put_video:
 	acpi_video_bus_put_devices(video);
 	kfree(video->attached_array);
- err_free_video:
+err_free_video:
 	kfree(video);
 	device->driver_data = NULL;
 
@@ -1820,12 +1886,14 @@
 
 	video = acpi_driver_data(device);
 
-	unregister_pm_notifier(&video->pm_nb);
-
-	acpi_video_bus_stop_devices(video);
+	acpi_video_bus_remove_notify_handler(video);
+	acpi_video_bus_unregister_backlight(video);
 	acpi_video_bus_put_devices(video);
 
-	input_unregister_device(video->input);
+	mutex_lock(&video_list_lock);
+	list_del(&video->entry);
+	mutex_unlock(&video_list_lock);
+
 	kfree(video->attached_array);
 	kfree(video);
 
@@ -1874,6 +1942,9 @@
 		return 0;
 	}
 
+	mutex_init(&video_list_lock);
+	INIT_LIST_HEAD(&video_bus_head);
+
 	result = acpi_bus_register_driver(&acpi_video_bus);
 	if (result < 0)
 		return -ENODEV;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 940edbf..84875fd 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -168,6 +168,14 @@
 		DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
 		},
 	},
+	{
+	.callback = video_detect_force_vendor,
+	.ident = "Lenovo Yoga 13",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+		},
+	},
 	{ },
 };
 
@@ -233,11 +241,11 @@
 		acpi_video_get_capabilities(NULL);
 }
 
-bool acpi_video_backlight_quirks(void)
+bool acpi_osi_is_win8(void)
 {
 	return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
 }
-EXPORT_SYMBOL(acpi_video_backlight_quirks);
+EXPORT_SYMBOL(acpi_osi_is_win8);
 
 /* Promote the vendor interface instead of the generic video module.
  * This function allow DMI blacklists to be implemented by externals
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index c670727..c4876ac 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -552,7 +552,6 @@
 	if (!dev)
 		return ERR_PTR(-ENOMEM);
 
-	dev->dma_mask = dma_mask;
 	dev->dev.coherent_dma_mask = dma_mask;
 	dev->irq[0] = irq1;
 	dev->irq[1] = irq2;
@@ -619,7 +618,7 @@
 		dev_set_name(&dev->dev, "%s", name);
 	dev->dev.release = amba_device_release;
 	dev->dev.bus = &amba_bustype;
-	dev->dev.dma_mask = &dev->dma_mask;
+	dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
 	dev->res.name = dev_name(&dev->dev);
 }
 
@@ -663,9 +662,6 @@
 	amba_device_initialize(dev, dev->dev.init_name);
 	dev->dev.init_name = NULL;
 
-	if (!dev->dev.coherent_dma_mask && dev->dma_mask)
-		dev_warn(&dev->dev, "coherent dma mask is unset\n");
-
 	return amba_device_add(dev, parent);
 }
 
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 8e28f92..e2903d0 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -292,6 +292,10 @@
 	{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
 	{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
 	{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
+	{ PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
+	{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
 
 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 1145637..2289efd 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -339,6 +339,7 @@
 	.sdev_attrs		= ahci_sdev_attrs
 
 extern struct ata_port_operations ahci_ops;
+extern struct ata_port_operations ahci_platform_ops;
 extern struct ata_port_operations ahci_pmp_retry_srst_ops;
 
 unsigned int ahci_dev_classify(struct ata_port *ap);
@@ -368,6 +369,7 @@
 irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
 void ahci_print_info(struct ata_host *host, const char *scc_s);
 int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
+void ahci_error_handler(struct ata_port *ap);
 
 static inline void __iomem *__ahci_port_base(struct ata_host *host,
 					     unsigned int port_no)
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 58debb0..ae2d73f 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -1,6 +1,6 @@
 /*
+ * copyright (c) 2013 Freescale Semiconductor, Inc.
  * Freescale IMX AHCI SATA platform driver
- * Copyright 2013 Freescale Semiconductor, Inc.
  *
  * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
  *
@@ -25,10 +25,13 @@
 #include <linux/of_device.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/libata.h>
 #include "ahci.h"
 
 enum {
-	HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
+	PORT_PHY_CTL = 0x178,			/* Port0 PHY Control */
+	PORT_PHY_CTL_PDDQ_LOC = 0x100000,	/* PORT_PHY_CTL bits */
+	HOST_TIMER1MS = 0xe0,			/* Timer 1-ms */
 };
 
 struct imx_ahci_priv {
@@ -36,6 +39,56 @@
 	struct clk *sata_ref_clk;
 	struct clk *ahb_clk;
 	struct regmap *gpr;
+	bool no_device;
+	bool first_time;
+};
+
+static int ahci_imx_hotplug;
+module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
+MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+
+static void ahci_imx_error_handler(struct ata_port *ap)
+{
+	u32 reg_val;
+	struct ata_device *dev;
+	struct ata_host *host = dev_get_drvdata(ap->dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+
+	ahci_error_handler(ap);
+
+	if (!(imxpriv->first_time) || ahci_imx_hotplug)
+		return;
+
+	imxpriv->first_time = false;
+
+	ata_for_each_dev(dev, &ap->link, ENABLED)
+		return;
+	/*
+	 * Disable link to save power.  An imx ahci port can't be recovered
+	 * without full reset once the pddq mode is enabled making it
+	 * impossible to use as part of libata LPM.
+	 */
+	reg_val = readl(mmio + PORT_PHY_CTL);
+	writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+	regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+			IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+			!IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+	clk_disable_unprepare(imxpriv->sata_ref_clk);
+	imxpriv->no_device = true;
+}
+
+static struct ata_port_operations ahci_imx_ops = {
+	.inherits	= &ahci_platform_ops,
+	.error_handler	= ahci_imx_error_handler,
+};
+
+static const struct ata_port_info ahci_imx_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_imx_ops,
 };
 
 static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
@@ -117,9 +170,51 @@
 	clk_disable_unprepare(imxpriv->sata_ref_clk);
 }
 
+static int imx_ahci_suspend(struct device *dev)
+{
+	struct imx_ahci_priv *imxpriv =  dev_get_drvdata(dev->parent);
+
+	/*
+	 * If no_device is set, The CLKs had been gated off in the
+	 * initialization so don't do it again here.
+	 */
+	if (!imxpriv->no_device) {
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+				IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+				!IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+		clk_disable_unprepare(imxpriv->sata_ref_clk);
+	}
+
+	return 0;
+}
+
+static int imx_ahci_resume(struct device *dev)
+{
+	struct imx_ahci_priv *imxpriv =  dev_get_drvdata(dev->parent);
+	int ret;
+
+	if (!imxpriv->no_device) {
+		ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+		if (ret < 0) {
+			dev_err(dev, "pre-enable sata_ref clock err:%d\n", ret);
+			return ret;
+		}
+
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+				IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+				IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+		usleep_range(1000, 2000);
+	}
+
+	return 0;
+}
+
 static struct ahci_platform_data imx6q_sata_pdata = {
 	.init = imx6q_sata_init,
 	.exit = imx6q_sata_exit,
+	.ata_port_info = &ahci_imx_port_info,
+	.suspend = imx_ahci_suspend,
+	.resume = imx_ahci_resume,
 };
 
 static const struct of_device_id imx_ahci_of_match[] = {
@@ -152,6 +247,8 @@
 	ahci_dev = &ahci_pdev->dev;
 	ahci_dev->parent = dev;
 
+	imxpriv->no_device = false;
+	imxpriv->first_time = true;
 	imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
 	if (IS_ERR(imxpriv->ahb_clk)) {
 		dev_err(dev, "can't get ahb clock.\n");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 7d3b853..f955431 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -49,10 +49,11 @@
 };
 MODULE_DEVICE_TABLE(platform, ahci_devtype);
 
-static struct ata_port_operations ahci_platform_ops = {
+struct ata_port_operations ahci_platform_ops = {
 	.inherits	= &ahci_ops,
 	.host_stop	= ahci_host_stop,
 };
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
 
 static struct ata_port_operations ahci_platform_retry_srst_ops = {
 	.inherits	= &ahci_pmp_retry_srst_ops,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 513ad7e..6334c8d 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -100,7 +100,7 @@
 
 enum {
 	PIIX_IOCFG		= 0x54, /* IDE I/O configuration register */
-	ICH5_PMR		= 0x90, /* port mapping register */
+	ICH5_PMR		= 0x90, /* address map register */
 	ICH5_PCS		= 0x92,	/* port control and status */
 	PIIX_SIDPR_BAR		= 5,
 	PIIX_SIDPR_LEN		= 16,
@@ -233,7 +233,7 @@
 	  PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
 	/* 82801GB/GR/GH (ICH7, identical to ICH6) */
 	{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
-	/* 2801GBM/GHM (ICH7M, identical to ICH6M) */
+	/* 82801GBM/GHM (ICH7M, identical to ICH6M)  */
 	{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
 	/* Enterprise Southbridge 2 (631xESB/632xESB) */
 	{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
@@ -517,7 +517,7 @@
 	const struct ich_laptop *lap = &ich_laptop[0];
 	u8 mask;
 
-	/* Check for specials - Acer Aspire 5602WLMi */
+	/* Check for specials */
 	while (lap->device) {
 		if (lap->device == pdev->device &&
 		    lap->subvendor == pdev->subsystem_vendor &&
@@ -1366,38 +1366,39 @@
 	const int *map;
 	int i, invalid_map = 0;
 	u8 map_value;
+	char buf[32];
+	char *p = buf, *end = buf + sizeof(buf);
 
 	pci_read_config_byte(pdev, ICH5_PMR, &map_value);
 
 	map = map_db->map[map_value & map_db->mask];
 
-	dev_info(&pdev->dev, "MAP [");
 	for (i = 0; i < 4; i++) {
 		switch (map[i]) {
 		case RV:
 			invalid_map = 1;
-			pr_cont(" XX");
+			p += scnprintf(p, end - p, " XX");
 			break;
 
 		case NA:
-			pr_cont(" --");
+			p += scnprintf(p, end - p, " --");
 			break;
 
 		case IDE:
 			WARN_ON((i & 1) || map[i + 1] != IDE);
 			pinfo[i / 2] = piix_port_info[ich_pata_100];
 			i++;
-			pr_cont(" IDE IDE");
+			p += scnprintf(p, end - p, " IDE IDE");
 			break;
 
 		default:
-			pr_cont(" P%d", map[i]);
+			p += scnprintf(p, end - p, " P%d", map[i]);
 			if (i & 1)
 				pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
 			break;
 		}
 	}
-	pr_cont(" ]\n");
+	dev_info(&pdev->dev, "MAP [%s ]\n", buf);
 
 	if (invalid_map)
 		dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index aaac4fb..c482f8c 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -89,7 +89,6 @@
 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
 			  unsigned long deadline);
 static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
 static void ahci_dev_config(struct ata_device *dev);
 #ifdef CONFIG_PM
@@ -189,14 +188,15 @@
 };
 EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
 
-int ahci_em_messages = 1;
+static bool ahci_em_messages __read_mostly = true;
 EXPORT_SYMBOL_GPL(ahci_em_messages);
-module_param(ahci_em_messages, int, 0444);
+module_param(ahci_em_messages, bool, 0444);
 /* add other LED protocol types when they become supported */
 MODULE_PARM_DESC(ahci_em_messages,
 	"AHCI Enclosure Management Message control (0 = off, 1 = on)");
 
-int devslp_idle_timeout = 1000;	/* device sleep idle timeout in ms */
+/* device sleep idle timeout in ms */
+static int devslp_idle_timeout __read_mostly = 1000;
 module_param(devslp_idle_timeout, int, 0644);
 MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
 
@@ -1275,9 +1275,11 @@
 {
 	struct ata_port *ap = link->ap;
 	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
 	const char *reason = NULL;
 	unsigned long now, msecs;
 	struct ata_taskfile tf;
+	bool fbs_disabled = false;
 	int rc;
 
 	DPRINTK("ENTER\n");
@@ -1287,6 +1289,16 @@
 	if (rc && rc != -EOPNOTSUPP)
 		ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
 
+	/*
+	 * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
+	 * clear PxFBS.EN to '0' prior to issuing software reset to devices
+	 * that is attached to port multiplier.
+	 */
+	if (!ata_is_host_link(link) && pp->fbs_enabled) {
+		ahci_disable_fbs(ap);
+		fbs_disabled = true;
+	}
+
 	ata_tf_init(link->device, &tf);
 
 	/* issue the first D2H Register FIS */
@@ -1327,6 +1339,10 @@
 	} else
 		*class = ahci_dev_classify(ap);
 
+	/* re-enable FBS if disabled before */
+	if (fbs_disabled)
+		ahci_enable_fbs(ap);
+
 	DPRINTK("EXIT, class=%u\n", *class);
 	return 0;
 
@@ -1989,7 +2005,7 @@
 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
 }
 
-static void ahci_error_handler(struct ata_port *ap)
+void ahci_error_handler(struct ata_port *ap)
 {
 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
 		/* restart engine */
@@ -2002,6 +2018,7 @@
 	if (!ata_dev_enabled(ap->link.device))
 		ahci_stop_engine(ap);
 }
+EXPORT_SYMBOL_GPL(ahci_error_handler);
 
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
 {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 83b1a9f..81a94a3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4126,6 +4126,7 @@
 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
+	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
 
 	/* Devices we expect to fail diagnostics */
 
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 370462f..92d7797 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2293,6 +2293,7 @@
 		{ ATA_CMD_IDLE, 		"IDLE" },
 		{ ATA_CMD_EDD, 			"EXECUTE DEVICE DIAGNOSTIC" },
 		{ ATA_CMD_DOWNLOAD_MICRO,   	"DOWNLOAD MICROCODE" },
+		{ ATA_CMD_DOWNLOAD_MICRO_DMA,	"DOWNLOAD MICROCODE DMA" },
 		{ ATA_CMD_NOP,			"NOP" },
 		{ ATA_CMD_FLUSH, 		"FLUSH CACHE" },
 		{ ATA_CMD_FLUSH_EXT, 		"FLUSH CACHE EXT" },
@@ -2313,6 +2314,8 @@
 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
+		{ ATA_CMD_FPDMA_SEND,		"SEND FPDMA QUEUED" },
+		{ ATA_CMD_FPDMA_RECV,		"RECEIVE FPDMA QUEUED" },
 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
@@ -2339,12 +2342,15 @@
 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
 		{ ATA_CMD_WRITE_LOG_DMA_EXT, 	"WRITE LOG DMA EXT" },
+		{ ATA_CMD_TRUSTED_NONDATA,	"TRUSTED NON-DATA" },
 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
 		{ ATA_CMD_TRUSTED_RCV_DMA, 	"TRUSTED RECEIVE DMA" },
 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
 		{ ATA_CMD_TRUSTED_SND_DMA, 	"TRUSTED SEND DMA" },
 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
+		{ ATA_CMD_PMP_READ_DMA,		"READ BUFFER DMA" },
 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
+		{ ATA_CMD_PMP_WRITE_DMA,	"WRITE BUFFER DMA" },
 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
@@ -2363,6 +2369,8 @@
 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
 		{ ATA_CMD_CFA_WRITE_MULT_NE, 	"CFA WRITE MULTIPLE WITHOUT ERASE" },
+		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
+		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
@@ -3009,7 +3017,7 @@
 	 * ourselves at the beginning of each pass over the loop.
 	 *
 	 * Additionally, all write accesses to &ap->park_req_pending
-	 * through INIT_COMPLETION() (see below) or complete_all()
+	 * through reinit_completion() (see below) or complete_all()
 	 * (see ata_scsi_park_store()) are protected by the host lock.
 	 * As a result we have that park_req_pending.done is zero on
 	 * exit from this function, i.e. when ATA_EH_PARK actions for
@@ -3023,7 +3031,7 @@
 	 */
 
 	spin_lock_irqsave(ap->lock, flags);
-	INIT_COMPLETION(ap->park_req_pending);
+	reinit_completion(&ap->park_req_pending);
 	ata_for_each_link(link, ap, EDGE) {
 		ata_for_each_dev(dev, link, ALL) {
 			struct ata_eh_info *ehi = &link->eh_info;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 150a917..e37413228 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -321,25 +321,25 @@
 /*
  * ATA link attributes
  */
+static int noop(int x) { return x; }
 
-
-#define ata_link_show_linkspeed(field)					\
+#define ata_link_show_linkspeed(field, format)			        \
 static ssize_t								\
 show_ata_link_##field(struct device *dev,				\
 		      struct device_attribute *attr, char *buf)		\
 {									\
 	struct ata_link *link = transport_class_to_link(dev);		\
 									\
-	return sprintf(buf,"%s\n", sata_spd_string(fls(link->field)));	\
+	return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
 }
 
-#define ata_link_linkspeed_attr(field)					\
-	ata_link_show_linkspeed(field)					\
+#define ata_link_linkspeed_attr(field, format)				\
+	ata_link_show_linkspeed(field, format)				\
 static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
 
-ata_link_linkspeed_attr(hw_sata_spd_limit);
-ata_link_linkspeed_attr(sata_spd_limit);
-ata_link_linkspeed_attr(sata_spd);
+ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd, noop);
 
 
 static DECLARE_TRANSPORT_CLASS(ata_link_class,
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 1ec53f8..ddf470c 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -144,6 +144,7 @@
 	struct ata_host *host;
 	struct ata_port *ap;
 	struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+	int ret;
 
 	cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -157,7 +158,9 @@
 		return -ENOMEM;
 
 	/* acquire resources and fill host */
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
 	data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index c51bbb9..83c4ddb 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1014,8 +1014,9 @@
 	}
 	cf_port->c0 = ap->ioaddr.ctl_addr;
 
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
-	pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+	rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rv)
+		return rv;
 
 	ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
 
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 7f5e5d9..ea3b3dc 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -343,13 +343,11 @@
 {
 	struct device_node *sata_node = dev->of_node;
 	int phy_count = 0, phy, port = 0, i;
-	void __iomem *cphy_base[CPHY_PHY_COUNT];
-	struct device_node *phy_nodes[CPHY_PHY_COUNT];
-	u32 tx_atten[CPHY_PORT_COUNT];
+	void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
+	struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
+	u32 tx_atten[CPHY_PORT_COUNT] = {};
 
 	memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
-	memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT);
-	memset(tx_atten, 0xff, CPHY_PORT_COUNT);
 
 	do {
 		u32 tmp;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index c2d95e9..1dae9a9 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -792,7 +792,7 @@
 		dev_err(&pdev->dev, "failed to get access to sata clock\n");
 		return PTR_ERR(priv->clk);
 	}
-	clk_enable(priv->clk);
+	clk_prepare_enable(priv->clk);
 
 	host = ata_host_alloc(&pdev->dev, 1);
 	if (!host) {
@@ -822,7 +822,7 @@
 		return 0;
 
 cleanup:
-	clk_disable(priv->clk);
+	clk_disable_unprepare(priv->clk);
 
 	return ret;
 }
@@ -841,7 +841,7 @@
 	iowrite32(0, base + SATAINTSTAT_REG);
 	iowrite32(0x7ff, base + SATAINTMASK_REG);
 
-	clk_disable(priv->clk);
+	clk_disable_unprepare(priv->clk);
 
 	return 0;
 }
@@ -861,7 +861,7 @@
 		/* mask */
 		iowrite32(0x7ff, base + SATAINTMASK_REG);
 
-		clk_disable(priv->clk);
+		clk_disable_unprepare(priv->clk);
 	}
 
 	return ret;
@@ -873,7 +873,7 @@
 	struct sata_rcar_priv *priv = host->private_data;
 	void __iomem *base = priv->base;
 
-	clk_enable(priv->clk);
+	clk_prepare_enable(priv->clk);
 
 	/* ack and mask */
 	iowrite32(0, base + SATAINTSTAT_REG);
diff --git a/drivers/atm/firestream.h b/drivers/atm/firestream.h
index 49e783e..364eded 100644
--- a/drivers/atm/firestream.h
+++ b/drivers/atm/firestream.h
@@ -420,7 +420,6 @@
 #define RC_FLAGS_BFPS_BFP27 (0xd << 17)
 #define RC_FLAGS_BFPS_BFP47 (0xe << 17)
 
-#define RC_FLAGS_BFPS       (0x1 << 17)
 #define RC_FLAGS_BFPP       (0x1 << 21)
 #define RC_FLAGS_TEVC       (0x1 << 22)
 #define RC_FLAGS_TEP        (0x1 << 23)
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index d585735..a387403 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -102,8 +102,7 @@
 
 	platform_set_drvdata(device, info);
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-		info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	return 0;
 
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 848ebbd..f48370d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -44,13 +44,11 @@
 	struct cpu *cpu = container_of(dev, struct cpu, dev);
 	int cpuid = dev->id;
 	int from_nid, to_nid;
-	int ret = -ENODEV;
-
-	cpu_hotplug_driver_lock();
+	int ret;
 
 	from_nid = cpu_to_node(cpuid);
 	if (from_nid == NUMA_NO_NODE)
-		goto out;
+		return -ENODEV;
 
 	ret = cpu_up(cpuid);
 	/*
@@ -61,19 +59,12 @@
 	if (from_nid != to_nid)
 		change_cpu_under_node(cpu, from_nid, to_nid);
 
- out:
-	cpu_hotplug_driver_unlock();
 	return ret;
 }
 
 static int cpu_subsys_offline(struct device *dev)
 {
-	int ret;
-
-	cpu_hotplug_driver_lock();
-	ret = cpu_down(dev->id);
-	cpu_hotplug_driver_unlock();
-	return ret;
+	return cpu_down(dev->id);
 }
 
 void unregister_cpu(struct cpu *cpu)
@@ -93,7 +84,17 @@
 			       const char *buf,
 			       size_t count)
 {
-	return arch_cpu_probe(buf, count);
+	ssize_t cnt;
+	int ret;
+
+	ret = lock_device_hotplug_sysfs();
+	if (ret)
+		return ret;
+
+	cnt = arch_cpu_probe(buf, count);
+
+	unlock_device_hotplug();
+	return cnt;
 }
 
 static ssize_t cpu_release_store(struct device *dev,
@@ -101,7 +102,17 @@
 				 const char *buf,
 				 size_t count)
 {
-	return arch_cpu_release(buf, count);
+	ssize_t cnt;
+	int ret;
+
+	ret = lock_device_hotplug_sysfs();
+	if (ret)
+		return ret;
+
+	cnt = arch_cpu_release(buf, count);
+
+	unlock_device_hotplug();
+	return cnt;
 }
 
 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 35fa368..0605176 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -499,7 +499,7 @@
 						     BUS_NOTIFY_UNBIND_DRIVER,
 						     dev);
 
-		pm_runtime_put(dev);
+		pm_runtime_put_sync(dev);
 
 		if (dev->bus && dev->bus->remove)
 			dev->bus->remove(dev);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 7413d06..0f38201 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -216,7 +216,7 @@
 		newattrs.ia_gid = gid;
 		newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
 		mutex_lock(&dentry->d_inode->i_mutex);
-		notify_change(dentry, &newattrs);
+		notify_change(dentry, &newattrs, NULL);
 		mutex_unlock(&dentry->d_inode->i_mutex);
 
 		/* mark as kernel-created inode */
@@ -322,9 +322,9 @@
 			newattrs.ia_valid =
 				ATTR_UID|ATTR_GID|ATTR_MODE;
 			mutex_lock(&dentry->d_inode->i_mutex);
-			notify_change(dentry, &newattrs);
+			notify_change(dentry, &newattrs, NULL);
 			mutex_unlock(&dentry->d_inode->i_mutex);
-			err = vfs_unlink(parent.dentry->d_inode, dentry);
+			err = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
 			if (!err || err == -ENOENT)
 				deleted = 1;
 		}
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 99802d6f..165c2c2 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -49,7 +49,7 @@
 
 /*
  * Default global CMA area size can be defined in kernel's .config.
- * This is usefull mainly for distro maintainers to create a kernel
+ * This is useful mainly for distro maintainers to create a kernel
  * that works correctly for most supported systems.
  * The size can be set in bytes or as a percentage of the total memory
  * in the system.
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9f098a8..c12e9b9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -30,6 +30,8 @@
 #include <linux/suspend.h>
 #include <trace/events/power.h>
 #include <linux/cpuidle.h>
+#include <linux/timer.h>
+
 #include "../base.h"
 #include "power.h"
 
@@ -390,6 +392,71 @@
 	return error;
 }
 
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+	struct device		*dev;
+	struct task_struct	*tsk;
+	struct timer_list	timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+	struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @data: Watchdog object address.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(unsigned long data)
+{
+	struct dpm_watchdog *wd = (void *)data;
+
+	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+	show_stack(wd->tsk, NULL);
+	panic("%s %s: unrecoverable failure\n",
+		dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+	struct timer_list *timer = &wd->timer;
+
+	wd->dev = dev;
+	wd->tsk = current;
+
+	init_timer_on_stack(timer);
+	/* use same timeout value for both suspend and resume */
+	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+	timer->function = dpm_watchdog_handler;
+	timer->data = (unsigned long)wd;
+	add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+	struct timer_list *timer = &wd->timer;
+
+	del_timer_sync(timer);
+	destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
 /*------------------------- Resume routines -------------------------*/
 
 /**
@@ -576,6 +643,7 @@
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	int error = 0;
+	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
 	TRACE_DEVICE(dev);
 	TRACE_RESUME(0);
@@ -584,6 +652,7 @@
 		goto Complete;
 
 	dpm_wait(dev->parent, async);
+	dpm_watchdog_set(&wd, dev);
 	device_lock(dev);
 
 	/*
@@ -642,6 +711,7 @@
 
  Unlock:
 	device_unlock(dev);
+	dpm_watchdog_clear(&wd);
 
  Complete:
 	complete_all(&dev->power.completion);
@@ -687,7 +757,7 @@
 	async_error = 0;
 
 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
-		INIT_COMPLETION(dev->power.completion);
+		reinit_completion(&dev->power.completion);
 		if (is_async(dev)) {
 			get_device(dev);
 			async_schedule(async_resume, dev);
@@ -1060,6 +1130,7 @@
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	int error = 0;
+	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
 	dpm_wait_for_children(dev, async);
 
@@ -1083,6 +1154,7 @@
 	if (dev->power.syscore)
 		goto Complete;
 
+	dpm_watchdog_set(&wd, dev);
 	device_lock(dev);
 
 	if (dev->pm_domain) {
@@ -1139,6 +1211,7 @@
 	}
 
 	device_unlock(dev);
+	dpm_watchdog_clear(&wd);
 
  Complete:
 	complete_all(&dev->power.completion);
@@ -1164,7 +1237,7 @@
 
 static int device_suspend(struct device *dev)
 {
-	INIT_COMPLETION(dev->power.completion);
+	reinit_completion(&dev->power.completion);
 
 	if (pm_async_enabled && dev->power.async_suspend) {
 		get_device(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ef89897..fa41874 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -21,7 +21,7 @@
 #include <linux/list.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/of.h>
 #include <linux/export.h>
 
@@ -42,7 +42,7 @@
  */
 
 /**
- * struct opp - Generic OPP description structure
+ * struct dev_pm_opp - Generic OPP description structure
  * @node:	opp list node. The nodes are maintained throughout the lifetime
  *		of boot. It is expected only an optimal set of OPPs are
  *		added to the library by the SoC framework.
@@ -59,7 +59,7 @@
  *
  * This structure stores the OPP information for a given device.
  */
-struct opp {
+struct dev_pm_opp {
 	struct list_head node;
 
 	bool available;
@@ -136,7 +136,7 @@
 }
 
 /**
- * opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
  * @opp:	opp for which voltage has to be returned for
  *
  * Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +150,9 @@
  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  * pointer.
  */
-unsigned long opp_get_voltage(struct opp *opp)
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
-	struct opp *tmp_opp;
+	struct dev_pm_opp *tmp_opp;
 	unsigned long v = 0;
 
 	tmp_opp = rcu_dereference(opp);
@@ -163,10 +163,10 @@
 
 	return v;
 }
-EXPORT_SYMBOL_GPL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
 
 /**
- * opp_get_freq() - Gets the frequency corresponding to an available opp
+ * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  * @opp:	opp for which frequency has to be returned for
  *
  * Return frequency in hertz corresponding to the opp, else
@@ -180,9 +180,9 @@
  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  * pointer.
  */
-unsigned long opp_get_freq(struct opp *opp)
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
 {
-	struct opp *tmp_opp;
+	struct dev_pm_opp *tmp_opp;
 	unsigned long f = 0;
 
 	tmp_opp = rcu_dereference(opp);
@@ -193,10 +193,10 @@
 
 	return f;
 }
-EXPORT_SYMBOL_GPL(opp_get_freq);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
 
 /**
- * opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  * @dev:	device for which we do this operation
  *
  * This function returns the number of available opps if there are any,
@@ -206,10 +206,10 @@
  * internally references two RCU protected structures: device_opp and opp which
  * are safe as long as we are under a common RCU locked section.
  */
-int opp_get_opp_count(struct device *dev)
+int dev_pm_opp_get_opp_count(struct device *dev)
 {
 	struct device_opp *dev_opp;
-	struct opp *temp_opp;
+	struct dev_pm_opp *temp_opp;
 	int count = 0;
 
 	dev_opp = find_device_opp(dev);
@@ -226,10 +226,10 @@
 
 	return count;
 }
-EXPORT_SYMBOL_GPL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
 
 /**
- * opp_find_freq_exact() - search for an exact frequency
+ * dev_pm_opp_find_freq_exact() - search for an exact frequency
  * @dev:		device for which we do this operation
  * @freq:		frequency to search for
  * @available:		true/false - match for available opp
@@ -254,11 +254,12 @@
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
-				bool available)
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+					      unsigned long freq,
+					      bool available)
 {
 	struct device_opp *dev_opp;
-	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	dev_opp = find_device_opp(dev);
 	if (IS_ERR(dev_opp)) {
@@ -277,10 +278,10 @@
 
 	return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
 
 /**
- * opp_find_freq_ceil() - Search for an rounded ceil freq
+ * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  * @dev:	device for which we do this operation
  * @freq:	Start frequency
  *
@@ -300,10 +301,11 @@
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+					     unsigned long *freq)
 {
 	struct device_opp *dev_opp;
-	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	if (!dev || !freq) {
 		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +326,10 @@
 
 	return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
 
 /**
- * opp_find_freq_floor() - Search for a rounded floor freq
+ * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  * @dev:	device for which we do this operation
  * @freq:	Start frequency
  *
@@ -347,10 +349,11 @@
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+					      unsigned long *freq)
 {
 	struct device_opp *dev_opp;
-	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
 	if (!dev || !freq) {
 		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,17 +378,17 @@
 
 	return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
 /**
- * opp_add()  - Add an OPP table from a table definitions
+ * dev_pm_opp_add()  - Add an OPP table from a table definitions
  * @dev:	device for which we do this operation
  * @freq:	Frequency in Hz for this OPP
  * @u_volt:	Voltage in uVolts for this OPP
  *
  * This function adds an opp definition to the opp list and returns status.
  * The opp is made available by default and it can be controlled using
- * opp_enable/disable functions.
+ * dev_pm_opp_enable/disable functions.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
@@ -393,14 +396,14 @@
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
  */
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
 {
 	struct device_opp *dev_opp = NULL;
-	struct opp *opp, *new_opp;
+	struct dev_pm_opp *opp, *new_opp;
 	struct list_head *head;
 
 	/* allocate new OPP node */
-	new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
+	new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
 	if (!new_opp) {
 		dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
 		return -ENOMEM;
@@ -460,7 +463,7 @@
 	srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(opp_add);
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
 
 /**
  * opp_set_availability() - helper to set the availability of an opp
@@ -485,11 +488,11 @@
 		bool availability_req)
 {
 	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
-	struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
 	int r = 0;
 
 	/* keep the node allocated */
-	new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
+	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
 	if (!new_opp) {
 		dev_warn(dev, "%s: Unable to create OPP\n", __func__);
 		return -ENOMEM;
@@ -552,13 +555,13 @@
 }
 
 /**
- * opp_enable() - Enable a specific OPP
+ * dev_pm_opp_enable() - Enable a specific OPP
  * @dev:	device for which we do this operation
  * @freq:	OPP frequency to enable
  *
  * Enables a provided opp. If the operation is valid, this returns 0, else the
  * corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with opp_disable.
+ * after being temporarily made unavailable with dev_pm_opp_disable.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -566,21 +569,21 @@
  * this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  */
-int opp_enable(struct device *dev, unsigned long freq)
+int dev_pm_opp_enable(struct device *dev, unsigned long freq)
 {
 	return opp_set_availability(dev, freq, true);
 }
-EXPORT_SYMBOL_GPL(opp_enable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
 
 /**
- * opp_disable() - Disable a specific OPP
+ * dev_pm_opp_disable() - Disable a specific OPP
  * @dev:	device for which we do this operation
  * @freq:	OPP frequency to disable
  *
  * Disables a provided opp. If the operation is valid, this returns
  * 0, else the corresponding error value. It is meant to be a temporary
  * control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to opp_enable).
+ * right to make it available again (with a call to dev_pm_opp_enable).
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -588,15 +591,15 @@
  * this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  */
-int opp_disable(struct device *dev, unsigned long freq)
+int dev_pm_opp_disable(struct device *dev, unsigned long freq)
 {
 	return opp_set_availability(dev, freq, false);
 }
-EXPORT_SYMBOL_GPL(opp_disable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
 
 #ifdef CONFIG_CPU_FREQ
 /**
- * opp_init_cpufreq_table() - create a cpufreq table for a device
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
  * @dev:	device for which we do this operation
  * @table:	Cpufreq table returned back to caller
  *
@@ -619,11 +622,11 @@
  * Callers should ensure that this function is *NOT* called under RCU protection
  * or in contexts where mutex locking cannot be used.
  */
-int opp_init_cpufreq_table(struct device *dev,
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
 			    struct cpufreq_frequency_table **table)
 {
 	struct device_opp *dev_opp;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	struct cpufreq_frequency_table *freq_table;
 	int i = 0;
 
@@ -639,7 +642,7 @@
 	}
 
 	freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
-			     (opp_get_opp_count(dev) + 1), GFP_KERNEL);
+			     (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
 	if (!freq_table) {
 		mutex_unlock(&dev_opp_list_lock);
 		dev_warn(dev, "%s: Unable to allocate frequency table\n",
@@ -663,16 +666,16 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
 
 /**
- * opp_free_cpufreq_table() - free the cpufreq table
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
  * @dev:	device for which we do this operation
  * @table:	table to free
  *
- * Free up the table allocated by opp_init_cpufreq_table
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
  */
-void opp_free_cpufreq_table(struct device *dev,
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
 				struct cpufreq_frequency_table **table)
 {
 	if (!table)
@@ -681,14 +684,14 @@
 	kfree(*table);
 	*table = NULL;
 }
-EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
 #endif		/* CONFIG_CPU_FREQ */
 
 /**
- * opp_get_notifier() - find notifier_head of the device with opp
+ * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  * @dev:	device pointer used to lookup device OPPs.
  */
-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
 {
 	struct device_opp *dev_opp = find_device_opp(dev);
 
@@ -732,7 +735,7 @@
 		unsigned long freq = be32_to_cpup(val++) * 1000;
 		unsigned long volt = be32_to_cpup(val++);
 
-		if (opp_add(dev, freq, volt)) {
+		if (dev_pm_opp_add(dev, freq, volt)) {
 			dev_warn(dev, "%s: Failed to add OPP %ld\n",
 				 __func__, freq);
 			continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 268a350..72e00e6 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -258,7 +258,8 @@
  * Check if the device's runtime PM status allows it to be suspended.  If
  * another idle notification has been started earlier, return immediately.  If
  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
  *
  * This function must be called under dev->power.lock with interrupts disabled.
  */
@@ -331,7 +332,7 @@
 
  out:
 	trace_rpm_return_int(dev, _THIS_IP_, retval);
-	return retval ? retval : rpm_suspend(dev, rpmflags);
+	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 }
 
 /**
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index a355e63..6fb98b5 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -188,8 +188,11 @@
 		pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
 
 	/* SSB needed additional powering up, do we have any AMBA PCI cards? */
-	if (!pci_is_pcie(dev))
-		bcma_err(bus, "PCI card detected, report problems.\n");
+	if (!pci_is_pcie(dev)) {
+		bcma_err(bus, "PCI card detected, they are not supported.\n");
+		err = -ENXIO;
+		goto err_pci_release_regions;
+	}
 
 	/* Map MMIO */
 	err = -ENOMEM;
@@ -269,6 +272,7 @@
 
 static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e67fa16..86b9f37 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -15,6 +15,9 @@
 
 if BLK_DEV
 
+config BLK_DEV_NULL_BLK
+	tristate "Null test block driver"
+
 config BLK_DEV_FD
 	tristate "Normal floppy disk support"
 	depends on ARCH_MAY_HAVE_PC_FDC
@@ -107,7 +110,7 @@
 
 config BLK_CPQ_DA
 	tristate "Compaq SMART2 support"
-	depends on PCI && VIRT_TO_BUS
+	depends on PCI && VIRT_TO_BUS && 0
 	help
 	  This is the driver for Compaq Smart Array controllers.  Everyone
 	  using these boards should say Y here.  See the file
@@ -316,6 +319,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called nvme.
 
+config BLK_DEV_SKD
+	tristate "STEC S1120 Block Driver"
+	depends on PCI
+	depends on 64BIT
+	---help---
+	Saying Y or M here will enable support for the
+	STEC, Inc. S1120 PCIe SSD.
+
+	Use device /dev/skd$N amd /dev/skd$Np$M.
+
 config BLK_DEV_OSD
 	tristate "OSD object-as-blkdev support"
 	depends on SCSI_OSD_ULD
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ca07399..8cc98cd 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@
 obj-$(CONFIG_MG_DISK)		+= mg_disk.o
 obj-$(CONFIG_SUNVDC)		+= sunvdc.o
 obj-$(CONFIG_BLK_DEV_NVME)	+= nvme.o
+obj-$(CONFIG_BLK_DEV_SKD)	+= skd.o
 obj-$(CONFIG_BLK_DEV_OSD)	+= osdblk.o
 
 obj-$(CONFIG_BLK_DEV_UMEM)	+= umem.o
@@ -41,6 +42,8 @@
 obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX)	+= mtip32xx/
 
 obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+obj-$(CONFIG_BLK_DEV_NULL_BLK)	+= null_blk.o
 
 nvme-y		:= nvme-core.o nvme-scsi.o
+skd-y		:= skd_main.o
 swim_mod-y	:= swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 4ff85b8..748dea4 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -343,7 +343,7 @@
 		unit[nr].motor = 1;
 		fd_select(nr);
 
-		INIT_COMPLETION(motor_on_completion);
+		reinit_completion(&motor_on_completion);
 		motor_on_timer.data = nr;
 		mod_timer(&motor_on_timer, jiffies + HZ/2);
 
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 9bf4371..d91f1a5 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -545,7 +545,7 @@
 
 	mutex_lock(&brd_devices_mutex);
 	brd = brd_init_one(MINOR(dev) >> part_shift);
-	kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
+	kobj = brd ? get_disk(brd->brd_disk) : NULL;
 	mutex_unlock(&brd_devices_mutex);
 
 	*part = 0;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index edfa251..b35fc4f 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2808,7 +2808,7 @@
 		/* erase the old error information */
 		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
 		return_status = IO_OK;
-		INIT_COMPLETION(wait);
+		reinit_completion(&wait);
 		goto resend_cmd2;
 	}
 
@@ -3669,7 +3669,7 @@
 		}
 	}
 	if (!found && !h->busy_scanning) {
-		INIT_COMPLETION(h->scan_wait);
+		reinit_completion(&h->scan_wait);
 		list_add_tail(&h->scan_list, &scan_q);
 		ret = 1;
 	}
@@ -5183,7 +5183,7 @@
 	rebuild_lun_table(h, 1, 0);
 	cciss_engage_scsi(h);
 	h->busy_initializing = 0;
-	return 1;
+	return 0;
 
 clean4:
 	cciss_free_cmd_pool(h);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 2d7f608..0e06f0c 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1474,7 +1474,8 @@
 	DS_ERROR = -1,
 	DS_UNCHANGED = 0,
 	DS_SHRUNK = 1,
-	DS_GREW = 2
+	DS_GREW = 2,
+	DS_GREW_FROM_ZERO = 3,
 };
 extern enum determine_dev_size
 drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 55635ed..9e3818b 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2750,13 +2750,6 @@
 		return err;
 	}
 
-	err = drbd_genl_register();
-	if (err) {
-		printk(KERN_ERR "drbd: unable to register generic netlink family\n");
-		goto fail;
-	}
-
-
 	register_reboot_notifier(&drbd_notifier);
 
 	/*
@@ -2767,6 +2760,15 @@
 	drbd_proc = NULL; /* play safe for drbd_cleanup */
 	idr_init(&minors);
 
+	rwlock_init(&global_state_lock);
+	INIT_LIST_HEAD(&drbd_tconns);
+
+	err = drbd_genl_register();
+	if (err) {
+		printk(KERN_ERR "drbd: unable to register generic netlink family\n");
+		goto fail;
+	}
+
 	err = drbd_create_mempools();
 	if (err)
 		goto fail;
@@ -2778,9 +2780,6 @@
 		goto fail;
 	}
 
-	rwlock_init(&global_state_lock);
-	INIT_LIST_HEAD(&drbd_tconns);
-
 	retry.wq = create_singlethread_workqueue("drbd-reissue");
 	if (!retry.wq) {
 		printk(KERN_ERR "drbd: unable to create retry workqueue\n");
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 8cc1e64..c706d50 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -955,7 +955,7 @@
 	}
 
 	if (size > la_size_sect)
-		rv = DS_GREW;
+		rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO;
 	if (size < la_size_sect)
 		rv = DS_SHRUNK;
 
@@ -1132,9 +1132,9 @@
 	/* We may ignore peer limits if the peer is modern enough.
 	   Because new from 8.3.8 onwards the peer can use multiple
 	   BIOs for a single peer_request */
-	if (mdev->state.conn >= C_CONNECTED) {
+	if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
 		if (mdev->tconn->agreed_pro_version < 94)
-			peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+			peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
 			/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
 		else if (mdev->tconn->agreed_pro_version == 94)
 			peer = DRBD_MAX_SIZE_H80_PACKET;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cc29cd3..6fa6673 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1890,29 +1890,11 @@
 	return seq_greater(a, b) ? a : b;
 }
 
-static bool need_peer_seq(struct drbd_conf *mdev)
-{
-	struct drbd_tconn *tconn = mdev->tconn;
-	int tp;
-
-	/*
-	 * We only need to keep track of the last packet_seq number of our peer
-	 * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
-	 * handle_write_conflicts().
-	 */
-
-	rcu_read_lock();
-	tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
-	rcu_read_unlock();
-
-	return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
-}
-
 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
 {
 	unsigned int newest_peer_seq;
 
-	if (need_peer_seq(mdev)) {
+	if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
 		spin_lock(&mdev->peer_seq_lock);
 		newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
 		mdev->peer_seq = newest_peer_seq;
@@ -1972,22 +1954,31 @@
 {
 	DEFINE_WAIT(wait);
 	long timeout;
-	int ret;
+	int ret = 0, tp;
 
-	if (!need_peer_seq(mdev))
+	if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
 		return 0;
 
 	spin_lock(&mdev->peer_seq_lock);
 	for (;;) {
 		if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
 			mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
-			ret = 0;
 			break;
 		}
+
 		if (signal_pending(current)) {
 			ret = -ERESTARTSYS;
 			break;
 		}
+
+		rcu_read_lock();
+		tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+		rcu_read_unlock();
+
+		if (!tp)
+			break;
+
+		/* Only need to wait if two_primaries is enabled */
 		prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
 		spin_unlock(&mdev->peer_seq_lock);
 		rcu_read_lock();
@@ -2228,8 +2219,10 @@
 			}
 			goto out_interrupted;
 		}
-	} else
+	} else {
+		update_peer_seq(mdev, peer_seq);
 		spin_lock_irq(&mdev->tconn->req_lock);
+	}
 	list_add(&peer_req->w.list, &mdev->active_ee);
 	spin_unlock_irq(&mdev->tconn->req_lock);
 
@@ -4132,7 +4125,11 @@
 				(unsigned int)bs.buf_len);
 			return -EIO;
 		}
-		look_ahead >>= bits;
+		/* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
+		if (likely(bits < 64))
+			look_ahead >>= bits;
+		else
+			look_ahead = 0;
 		have -= bits;
 
 		bits = bitstream_get_bits(&bs, &tmp, 64 - have);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index c24379f..fec7bef 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1306,6 +1306,7 @@
 	int backing_limit;
 
 	if (bio_size && get_ldev(mdev)) {
+		unsigned int max_hw_sectors = queue_max_hw_sectors(q);
 		struct request_queue * const b =
 			mdev->ldev->backing_bdev->bd_disk->queue;
 		if (b->merge_bvec_fn) {
@@ -1313,6 +1314,8 @@
 			limit = min(limit, backing_limit);
 		}
 		put_ldev(mdev);
+		if ((limit >> 9) > max_hw_sectors)
+			limit = max_hw_sectors << 9;
 	}
 	return limit;
 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 04ceb7e..000abe2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2886,9 +2886,9 @@
 		return;
 
 	if (WARN(atomic_read(&usage_count) == 0,
-		 "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n",
+		 "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n",
 		 current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
-		 current_req->cmd_flags))
+		 (unsigned long long) current_req->cmd_flags))
 		return;
 
 	if (test_and_set_bit(0, &fdc_busy)) {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 40e7155..c8dac73 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -894,13 +894,6 @@
 
 	bio_list_init(&lo->lo_bio_list);
 
-	/*
-	 * set queue make_request_fn, and add limits based on lower level
-	 * device
-	 */
-	blk_queue_make_request(lo->lo_queue, loop_make_request);
-	lo->lo_queue->queuedata = lo;
-
 	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
 		blk_queue_flush(lo->lo_queue, REQ_FLUSH);
 
@@ -1618,6 +1611,8 @@
 	if (!lo)
 		goto out;
 
+	lo->lo_state = Lo_unbound;
+
 	/* allocate id, if @id >= 0, we're requesting that specific id */
 	if (i >= 0) {
 		err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
@@ -1633,7 +1628,13 @@
 	err = -ENOMEM;
 	lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
 	if (!lo->lo_queue)
-		goto out_free_dev;
+		goto out_free_idr;
+
+	/*
+	 * set queue make_request_fn
+	 */
+	blk_queue_make_request(lo->lo_queue, loop_make_request);
+	lo->lo_queue->queuedata = lo;
 
 	disk = lo->lo_disk = alloc_disk(1 << part_shift);
 	if (!disk)
@@ -1678,6 +1679,8 @@
 
 out_free_queue:
 	blk_cleanup_queue(lo->lo_queue);
+out_free_idr:
+	idr_remove(&loop_index_idr, i);
 out_free_dev:
 	kfree(lo);
 out:
@@ -1741,7 +1744,7 @@
 	if (err < 0)
 		err = loop_add(&lo, MINOR(dev) >> part_shift);
 	if (err < 0)
-		kobj = ERR_PTR(err);
+		kobj = NULL;
 	else
 		kobj = get_disk(lo->lo_disk);
 	mutex_unlock(&loop_index_mutex);
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 77a60be..7bc363f 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -936,7 +936,7 @@
 			goto probe_err_3b;
 		}
 		err = request_irq(host->irq, mg_irq,
-				IRQF_DISABLED | IRQF_TRIGGER_RISING,
+				IRQF_TRIGGER_RISING,
 				MG_DEV_NAME, host);
 		if (err) {
 			printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 952dbfe..050c712 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -126,64 +126,30 @@
 static bool mtip_check_surprise_removal(struct pci_dev *pdev)
 {
 	u16 vendor_id = 0;
+	struct driver_data *dd = pci_get_drvdata(pdev);
+
+	if (dd->sr)
+		return true;
 
        /* Read the vendorID from the configuration space */
 	pci_read_config_word(pdev, 0x00, &vendor_id);
-	if (vendor_id == 0xFFFF)
+	if (vendor_id == 0xFFFF) {
+		dd->sr = true;
+		if (dd->queue)
+			set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags);
+		else
+			dev_warn(&dd->pdev->dev,
+				"%s: dd->queue is NULL\n", __func__);
+		if (dd->port) {
+			set_bit(MTIP_PF_SR_CLEANUP_BIT, &dd->port->flags);
+			wake_up_interruptible(&dd->port->svc_wait);
+		} else
+			dev_warn(&dd->pdev->dev,
+				"%s: dd->port is NULL\n", __func__);
 		return true; /* device removed */
-
-	return false; /* device present */
-}
-
-/*
- * This function is called for clean the pending command in the
- * command slot during the surprise removal of device and return
- * error to the upper layer.
- *
- * @dd Pointer to the DRIVER_DATA structure.
- *
- * return value
- *	None
- */
-static void mtip_command_cleanup(struct driver_data *dd)
-{
-	int group = 0, commandslot = 0, commandindex = 0;
-	struct mtip_cmd *command;
-	struct mtip_port *port = dd->port;
-	static int in_progress;
-
-	if (in_progress)
-		return;
-
-	in_progress = 1;
-
-	for (group = 0; group < 4; group++) {
-		for (commandslot = 0; commandslot < 32; commandslot++) {
-			if (!(port->allocated[group] & (1 << commandslot)))
-				continue;
-
-			commandindex = group << 5 | commandslot;
-			command = &port->commands[commandindex];
-
-			if (atomic_read(&command->active)
-			    && (command->async_callback)) {
-				command->async_callback(command->async_data,
-					-ENODEV);
-				command->async_callback = NULL;
-				command->async_data = NULL;
-			}
-
-			dma_unmap_sg(&port->dd->pdev->dev,
-				command->sg,
-				command->scatter_ents,
-				command->direction);
-		}
 	}
 
-	up(&port->cmd_slot);
-
-	set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
-	in_progress = 0;
+	return false; /* device present */
 }
 
 /*
@@ -222,10 +188,7 @@
 	}
 	dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
 
-	if (mtip_check_surprise_removal(port->dd->pdev)) {
-		/* Device not present, clean outstanding commands */
-		mtip_command_cleanup(port->dd);
-	}
+	mtip_check_surprise_removal(port->dd->pdev);
 	return -1;
 }
 
@@ -246,6 +209,107 @@
 }
 
 /*
+ * IO completion function.
+ *
+ * This completion function is called by the driver ISR when a
+ * command that was issued by the kernel completes. It first calls the
+ * asynchronous completion function which normally calls back into the block
+ * layer passing the asynchronous callback data, then unmaps the
+ * scatter list associated with the completed command, and finally
+ * clears the allocated bit associated with the completed command.
+ *
+ * @port   Pointer to the port data structure.
+ * @tag    Tag of the command.
+ * @data   Pointer to driver_data.
+ * @status Completion status.
+ *
+ * return value
+ *	None
+ */
+static void mtip_async_complete(struct mtip_port *port,
+				int tag,
+				void *data,
+				int status)
+{
+	struct mtip_cmd *command;
+	struct driver_data *dd = data;
+	int cb_status = status ? -EIO : 0;
+
+	if (unlikely(!dd) || unlikely(!port))
+		return;
+
+	command = &port->commands[tag];
+
+	if (unlikely(status == PORT_IRQ_TF_ERR)) {
+		dev_warn(&port->dd->pdev->dev,
+			"Command tag %d failed due to TFE\n", tag);
+	}
+
+	/* Upper layer callback */
+	if (likely(command->async_callback))
+		command->async_callback(command->async_data, cb_status);
+
+	command->async_callback = NULL;
+	command->comp_func = NULL;
+
+	/* Unmap the DMA scatter list entries */
+	dma_unmap_sg(&dd->pdev->dev,
+		command->sg,
+		command->scatter_ents,
+		command->direction);
+
+	/* Clear the allocated and active bits for the command */
+	atomic_set(&port->commands[tag].active, 0);
+	release_slot(port, tag);
+
+	up(&port->cmd_slot);
+}
+
+/*
+ * This function is called for clean the pending command in the
+ * command slot during the surprise removal of device and return
+ * error to the upper layer.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ *	None
+ */
+static void mtip_command_cleanup(struct driver_data *dd)
+{
+	int tag = 0;
+	struct mtip_cmd *cmd;
+	struct mtip_port *port = dd->port;
+	unsigned int num_cmd_slots = dd->slot_groups * 32;
+
+	if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+		return;
+
+	if (!port)
+		return;
+
+	cmd = &port->commands[MTIP_TAG_INTERNAL];
+	if (atomic_read(&cmd->active))
+		if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) &
+					(1 << MTIP_TAG_INTERNAL))
+			if (cmd->comp_func)
+				cmd->comp_func(port, MTIP_TAG_INTERNAL,
+					 cmd->comp_data, -ENODEV);
+
+	while (1) {
+		tag = find_next_bit(port->allocated, num_cmd_slots, tag);
+		if (tag >= num_cmd_slots)
+			break;
+
+		cmd = &port->commands[tag];
+		if (atomic_read(&cmd->active))
+			mtip_async_complete(port, tag, dd, -ENODEV);
+	}
+
+	set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
+}
+
+/*
  * Reset the HBA (without sleeping)
  *
  * @dd Pointer to the driver data structure.
@@ -584,6 +648,9 @@
 	if (unlikely(!port))
 		return;
 
+	if (unlikely(port->dd->sr))
+		return;
+
 	if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
 		mod_timer(&port->cmd_timer,
 			jiffies + msecs_to_jiffies(30000));
@@ -675,66 +742,6 @@
 }
 
 /*
- * IO completion function.
- *
- * This completion function is called by the driver ISR when a
- * command that was issued by the kernel completes. It first calls the
- * asynchronous completion function which normally calls back into the block
- * layer passing the asynchronous callback data, then unmaps the
- * scatter list associated with the completed command, and finally
- * clears the allocated bit associated with the completed command.
- *
- * @port   Pointer to the port data structure.
- * @tag    Tag of the command.
- * @data   Pointer to driver_data.
- * @status Completion status.
- *
- * return value
- *	None
- */
-static void mtip_async_complete(struct mtip_port *port,
-				int tag,
-				void *data,
-				int status)
-{
-	struct mtip_cmd *command;
-	struct driver_data *dd = data;
-	int cb_status = status ? -EIO : 0;
-
-	if (unlikely(!dd) || unlikely(!port))
-		return;
-
-	command = &port->commands[tag];
-
-	if (unlikely(status == PORT_IRQ_TF_ERR)) {
-		dev_warn(&port->dd->pdev->dev,
-			"Command tag %d failed due to TFE\n", tag);
-	}
-
-	/* Upper layer callback */
-	if (likely(command->async_callback))
-		command->async_callback(command->async_data, cb_status);
-
-	command->async_callback = NULL;
-	command->comp_func = NULL;
-
-	/* Unmap the DMA scatter list entries */
-	dma_unmap_sg(&dd->pdev->dev,
-		command->sg,
-		command->scatter_ents,
-		command->direction);
-
-	/* Clear the allocated and active bits for the command */
-	atomic_set(&port->commands[tag].active, 0);
-	release_slot(port, tag);
-
-	if (unlikely(command->unaligned))
-		up(&port->cmd_slot_unal);
-	else
-		up(&port->cmd_slot);
-}
-
-/*
  * Internal command completion callback function.
  *
  * This function is normally called by the driver ISR when an internal
@@ -854,7 +861,6 @@
 					"Missing completion func for tag %d",
 					tag);
 				if (mtip_check_surprise_removal(dd->pdev)) {
-					mtip_command_cleanup(dd);
 					/* don't proceed further */
 					return;
 				}
@@ -1018,14 +1024,12 @@
 					command->comp_data,
 					0);
 			} else {
-				dev_warn(&dd->pdev->dev,
-					"Null completion "
-					"for tag %d",
+				dev_dbg(&dd->pdev->dev,
+					"Null completion for tag %d",
 					tag);
 
 				if (mtip_check_surprise_removal(
 					dd->pdev)) {
-					mtip_command_cleanup(dd);
 					return;
 				}
 			}
@@ -1145,7 +1149,6 @@
 
 		if (unlikely(port_stat & PORT_IRQ_ERR)) {
 			if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
-				mtip_command_cleanup(dd);
 				/* don't proceed further */
 				return IRQ_HANDLED;
 			}
@@ -2806,34 +2809,51 @@
 static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
 						size_t len, loff_t *offset)
 {
+	struct driver_data *dd =  (struct driver_data *)f->private_data;
 	int size = *offset;
-	char buf[MTIP_DFS_MAX_BUF_SIZE];
+	char *buf;
+	int rv = 0;
 
 	if (!len || *offset)
 		return 0;
 
+	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		dev_err(&dd->pdev->dev,
+			"Memory allocation: status buffer\n");
+		return -ENOMEM;
+	}
+
 	size += show_device_status(NULL, buf);
 
 	*offset = size <= len ? size : len;
 	size = copy_to_user(ubuf, buf, *offset);
 	if (size)
-		return -EFAULT;
+		rv = -EFAULT;
 
-	return *offset;
+	kfree(buf);
+	return rv ? rv : *offset;
 }
 
 static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
 				  size_t len, loff_t *offset)
 {
 	struct driver_data *dd =  (struct driver_data *)f->private_data;
-	char buf[MTIP_DFS_MAX_BUF_SIZE];
+	char *buf;
 	u32 group_allocated;
 	int size = *offset;
-	int n;
+	int n, rv = 0;
 
 	if (!len || size)
 		return 0;
 
+	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		dev_err(&dd->pdev->dev,
+			"Memory allocation: register buffer\n");
+		return -ENOMEM;
+	}
+
 	size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
 
 	for (n = dd->slot_groups-1; n >= 0; n--)
@@ -2888,21 +2908,30 @@
 	*offset = size <= len ? size : len;
 	size = copy_to_user(ubuf, buf, *offset);
 	if (size)
-		return -EFAULT;
+		rv = -EFAULT;
 
-	return *offset;
+	kfree(buf);
+	return rv ? rv : *offset;
 }
 
 static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
 				  size_t len, loff_t *offset)
 {
 	struct driver_data *dd =  (struct driver_data *)f->private_data;
-	char buf[MTIP_DFS_MAX_BUF_SIZE];
+	char *buf;
 	int size = *offset;
+	int rv = 0;
 
 	if (!len || size)
 		return 0;
 
+	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		dev_err(&dd->pdev->dev,
+			"Memory allocation: flag buffer\n");
+		return -ENOMEM;
+	}
+
 	size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
 							dd->port->flags);
 	size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
@@ -2911,9 +2940,10 @@
 	*offset = size <= len ? size : len;
 	size = copy_to_user(ubuf, buf, *offset);
 	if (size)
-		return -EFAULT;
+		rv = -EFAULT;
 
-	return *offset;
+	kfree(buf);
+	return rv ? rv : *offset;
 }
 
 static const struct file_operations mtip_device_status_fops = {
@@ -3006,6 +3036,46 @@
 		debugfs_remove_recursive(dd->dfs_node);
 }
 
+static int mtip_free_orphan(struct driver_data *dd)
+{
+	struct kobject *kobj;
+
+	if (dd->bdev) {
+		if (dd->bdev->bd_holders >= 1)
+			return -2;
+
+		bdput(dd->bdev);
+		dd->bdev = NULL;
+	}
+
+	mtip_hw_debugfs_exit(dd);
+
+	spin_lock(&rssd_index_lock);
+	ida_remove(&rssd_index_ida, dd->index);
+	spin_unlock(&rssd_index_lock);
+
+	if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) &&
+			test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
+		put_disk(dd->disk);
+	} else {
+		if (dd->disk) {
+			kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+			if (kobj) {
+				mtip_hw_sysfs_exit(dd, kobj);
+				kobject_put(kobj);
+			}
+			del_gendisk(dd->disk);
+			dd->disk = NULL;
+		}
+		if (dd->queue) {
+			dd->queue->queuedata = NULL;
+			blk_cleanup_queue(dd->queue);
+			dd->queue = NULL;
+		}
+	}
+	kfree(dd);
+	return 0;
+}
 
 /*
  * Perform any init/resume time hardware setup
@@ -3154,6 +3224,7 @@
 	unsigned long slot, slot_start, slot_wrap;
 	unsigned int num_cmd_slots = dd->slot_groups * 32;
 	struct mtip_port *port = dd->port;
+	int ret;
 
 	while (1) {
 		/*
@@ -3164,13 +3235,18 @@
 			!(port->flags & MTIP_PF_PAUSE_IO));
 
 		if (kthread_should_stop())
+			goto st_out;
+
+		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
+		/* If I am an orphan, start self cleanup */
+		if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags))
 			break;
 
 		if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
 				&dd->dd_flag)))
-			break;
+			goto st_out;
 
-		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
 		if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
 			slot = 1;
 			/* used to restrict the loop to one iteration */
@@ -3201,7 +3277,7 @@
 
 			clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
 		} else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
-			if (!mtip_ftl_rebuild_poll(dd))
+			if (mtip_ftl_rebuild_poll(dd) < 0)
 				set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
 							&dd->dd_flag);
 			clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
@@ -3209,8 +3285,30 @@
 		clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
 
 		if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
-			break;
+			goto st_out;
 	}
+
+	/* wait for pci remove to exit */
+	while (1) {
+		if (test_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag))
+			break;
+		msleep_interruptible(1000);
+		if (kthread_should_stop())
+			goto st_out;
+	}
+
+	while (1) {
+		ret = mtip_free_orphan(dd);
+		if (!ret) {
+			/* NOTE: All data structures are invalid, do not
+			 * access any here */
+			return 0;
+		}
+		msleep_interruptible(1000);
+		if (kthread_should_stop())
+			goto st_out;
+	}
+st_out:
 	return 0;
 }
 
@@ -3437,13 +3535,13 @@
 		rv = -EFAULT;
 		goto out3;
 	}
+	mtip_dump_identify(dd->port);
 
 	if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
 		MTIP_FTL_REBUILD_MAGIC) {
 		set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
 		return MTIP_FTL_REBUILD_MAGIC;
 	}
-	mtip_dump_identify(dd->port);
 
 	/* check write protect, over temp and rebuild statuses */
 	rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
@@ -3467,8 +3565,8 @@
 		}
 		if (buf[288] == 0xBF) {
 			dev_info(&dd->pdev->dev,
-				"Drive indicates rebuild has failed.\n");
-			/* TODO */
+				"Drive is in security locked state.\n");
+			set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
 		}
 	}
 
@@ -3523,9 +3621,8 @@
 	 * Send standby immediate (E0h) to the drive so that it
 	 * saves its state.
 	 */
-	if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
-
-		if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
+	if (!dd->sr) {
+		if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
 			if (mtip_standby_immediate(dd->port))
 				dev_warn(&dd->pdev->dev,
 					"STANDBY IMMEDIATE failed\n");
@@ -3551,6 +3648,7 @@
 			dd->port->command_list_dma);
 	/* Free the memory allocated for the for structure. */
 	kfree(dd->port);
+	dd->port = NULL;
 
 	return 0;
 }
@@ -3572,7 +3670,8 @@
 	 * Send standby immediate (E0h) to the drive so that it
 	 * saves its state.
 	 */
-	mtip_standby_immediate(dd->port);
+	if (!dd->sr && dd->port)
+		mtip_standby_immediate(dd->port);
 
 	return 0;
 }
@@ -3887,6 +3986,10 @@
 			bio_endio(bio, -ENODATA);
 			return;
 		}
+		if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
+			bio_endio(bio, -ENXIO);
+			return;
+		}
 	}
 
 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
@@ -4010,6 +4113,8 @@
 	dd->disk->private_data	= dd;
 	dd->index		= index;
 
+	mtip_hw_debugfs_init(dd);
+
 	/*
 	 * if rebuild pending, start the service thread, and delay the block
 	 * queue creation and add_disk()
@@ -4068,6 +4173,7 @@
 	/* Enable the block device and add it to /dev */
 	add_disk(dd->disk);
 
+	dd->bdev = bdget_disk(dd->disk, 0);
 	/*
 	 * Now that the disk is active, initialize any sysfs attributes
 	 * managed by the protocol layer.
@@ -4077,7 +4183,6 @@
 		mtip_hw_sysfs_init(dd, kobj);
 		kobject_put(kobj);
 	}
-	mtip_hw_debugfs_init(dd);
 
 	if (dd->mtip_svc_handler) {
 		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -4103,7 +4208,8 @@
 	return rv;
 
 kthread_run_error:
-	mtip_hw_debugfs_exit(dd);
+	bdput(dd->bdev);
+	dd->bdev = NULL;
 
 	/* Delete our gendisk. This also removes the device from /dev */
 	del_gendisk(dd->disk);
@@ -4112,6 +4218,7 @@
 	blk_cleanup_queue(dd->queue);
 
 block_queue_alloc_init_error:
+	mtip_hw_debugfs_exit(dd);
 disk_index_error:
 	spin_lock(&rssd_index_lock);
 	ida_remove(&rssd_index_ida, index);
@@ -4141,40 +4248,48 @@
 {
 	struct kobject *kobj;
 
-	if (dd->mtip_svc_handler) {
-		set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
-		wake_up_interruptible(&dd->port->svc_wait);
-		kthread_stop(dd->mtip_svc_handler);
-	}
+	if (!dd->sr) {
+		mtip_hw_debugfs_exit(dd);
 
-	/* Clean up the sysfs attributes, if created */
-	if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
-		kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
-		if (kobj) {
-			mtip_hw_sysfs_exit(dd, kobj);
-			kobject_put(kobj);
+		if (dd->mtip_svc_handler) {
+			set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
+			wake_up_interruptible(&dd->port->svc_wait);
+			kthread_stop(dd->mtip_svc_handler);
 		}
+
+		/* Clean up the sysfs attributes, if created */
+		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
+			kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+			if (kobj) {
+				mtip_hw_sysfs_exit(dd, kobj);
+				kobject_put(kobj);
+			}
+		}
+		/*
+		 * Delete our gendisk structure. This also removes the device
+		 * from /dev
+		 */
+		if (dd->bdev) {
+			bdput(dd->bdev);
+			dd->bdev = NULL;
+		}
+		if (dd->disk) {
+			if (dd->disk->queue) {
+				del_gendisk(dd->disk);
+				blk_cleanup_queue(dd->queue);
+				dd->queue = NULL;
+			} else
+				put_disk(dd->disk);
+		}
+		dd->disk  = NULL;
+
+		spin_lock(&rssd_index_lock);
+		ida_remove(&rssd_index_ida, dd->index);
+		spin_unlock(&rssd_index_lock);
+	} else {
+		dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+						dd->disk->disk_name);
 	}
-	mtip_hw_debugfs_exit(dd);
-
-	/*
-	 * Delete our gendisk structure. This also removes the device
-	 * from /dev
-	 */
-	if (dd->disk) {
-		if (dd->disk->queue)
-			del_gendisk(dd->disk);
-		else
-			put_disk(dd->disk);
-	}
-
-	spin_lock(&rssd_index_lock);
-	ida_remove(&rssd_index_ida, dd->index);
-	spin_unlock(&rssd_index_lock);
-
-	blk_cleanup_queue(dd->queue);
-	dd->disk  = NULL;
-	dd->queue = NULL;
 
 	/* De-initialize the protocol layer. */
 	mtip_hw_exit(dd);
@@ -4490,8 +4605,7 @@
 static void mtip_pci_remove(struct pci_dev *pdev)
 {
 	struct driver_data *dd = pci_get_drvdata(pdev);
-	int counter = 0;
-	unsigned long flags;
+	unsigned long flags, to;
 
 	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
 
@@ -4500,17 +4614,22 @@
 	list_add(&dd->remove_list, &removing_list);
 	spin_unlock_irqrestore(&dev_lock, flags);
 
-	if (mtip_check_surprise_removal(pdev)) {
-		while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
-			counter++;
-			msleep(20);
-			if (counter == 10) {
-				/* Cleanup the outstanding commands */
-				mtip_command_cleanup(dd);
-				break;
-			}
-		}
+	mtip_check_surprise_removal(pdev);
+	synchronize_irq(dd->pdev->irq);
+
+	/* Spin until workers are done */
+	to = jiffies + msecs_to_jiffies(4000);
+	do {
+		msleep(20);
+	} while (atomic_read(&dd->irq_workers_active) != 0 &&
+		time_before(jiffies, to));
+
+	if (atomic_read(&dd->irq_workers_active) != 0) {
+		dev_warn(&dd->pdev->dev,
+			"Completion workers still active!\n");
 	}
+	/* Cleanup the outstanding commands */
+	mtip_command_cleanup(dd);
 
 	/* Clean up the block layer. */
 	mtip_block_remove(dd);
@@ -4529,8 +4648,15 @@
 	list_del_init(&dd->remove_list);
 	spin_unlock_irqrestore(&dev_lock, flags);
 
-	kfree(dd);
+	if (!dd->sr)
+		kfree(dd);
+	else
+		set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag);
+
 	pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+	pci_set_drvdata(pdev, NULL);
+	pci_dev_put(pdev);
+
 }
 
 /*
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3bb8a29..9be7a15 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -140,6 +140,7 @@
 	MTIP_PF_SVC_THD_ACTIVE_BIT  = 4,
 	MTIP_PF_ISSUE_CMDS_BIT      = 5,
 	MTIP_PF_REBUILD_BIT         = 6,
+	MTIP_PF_SR_CLEANUP_BIT      = 7,
 	MTIP_PF_SVC_THD_STOP_BIT    = 8,
 
 	/* below are bit numbers in 'dd_flag' defined in driver_data */
@@ -147,15 +148,18 @@
 	MTIP_DDF_REMOVE_PENDING_BIT = 1,
 	MTIP_DDF_OVER_TEMP_BIT      = 2,
 	MTIP_DDF_WRITE_PROTECT_BIT  = 3,
-	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
-				(1 << MTIP_DDF_SEC_LOCK_BIT) |
-				(1 << MTIP_DDF_OVER_TEMP_BIT) |
-				(1 << MTIP_DDF_WRITE_PROTECT_BIT)),
-
+	MTIP_DDF_REMOVE_DONE_BIT    = 4,
 	MTIP_DDF_CLEANUP_BIT        = 5,
 	MTIP_DDF_RESUME_BIT         = 6,
 	MTIP_DDF_INIT_DONE_BIT      = 7,
 	MTIP_DDF_REBUILD_FAILED_BIT = 8,
+
+	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
+				(1 << MTIP_DDF_SEC_LOCK_BIT) |
+				(1 << MTIP_DDF_OVER_TEMP_BIT) |
+				(1 << MTIP_DDF_WRITE_PROTECT_BIT) |
+				(1 << MTIP_DDF_REBUILD_FAILED_BIT)),
+
 };
 
 struct smart_attr {
@@ -499,6 +503,8 @@
 
 	bool trim_supp; /* flag indicating trim support */
 
+	bool sr;
+
 	int numa_node; /* NUMA support */
 
 	char workq_name[32];
@@ -511,6 +517,8 @@
 
 	int isr_binding;
 
+	struct block_device *bdev;
+
 	int unal_qdepth; /* qdepth of unaligned IO queue */
 
 	struct list_head online_list; /* linkage for online list */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
new file mode 100644
index 0000000..b5d8423
--- /dev/null
+++ b/drivers/block/null_blk.c
@@ -0,0 +1,635 @@
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/hrtimer.h>
+
+struct nullb_cmd {
+	struct list_head list;
+	struct llist_node ll_list;
+	struct call_single_data csd;
+	struct request *rq;
+	struct bio *bio;
+	unsigned int tag;
+	struct nullb_queue *nq;
+};
+
+struct nullb_queue {
+	unsigned long *tag_map;
+	wait_queue_head_t wait;
+	unsigned int queue_depth;
+
+	struct nullb_cmd *cmds;
+};
+
+struct nullb {
+	struct list_head list;
+	unsigned int index;
+	struct request_queue *q;
+	struct gendisk *disk;
+	struct hrtimer timer;
+	unsigned int queue_depth;
+	spinlock_t lock;
+
+	struct nullb_queue *queues;
+	unsigned int nr_queues;
+};
+
+static LIST_HEAD(nullb_list);
+static struct mutex lock;
+static int null_major;
+static int nullb_indexes;
+
+struct completion_queue {
+	struct llist_head list;
+	struct hrtimer timer;
+};
+
+/*
+ * These are per-cpu for now, they will need to be configured by the
+ * complete_queues parameter and appropriately mapped.
+ */
+static DEFINE_PER_CPU(struct completion_queue, completion_queues);
+
+enum {
+	NULL_IRQ_NONE		= 0,
+	NULL_IRQ_SOFTIRQ	= 1,
+	NULL_IRQ_TIMER		= 2,
+
+	NULL_Q_BIO		= 0,
+	NULL_Q_RQ		= 1,
+	NULL_Q_MQ		= 2,
+};
+
+static int submit_queues = 1;
+module_param(submit_queues, int, S_IRUGO);
+MODULE_PARM_DESC(submit_queues, "Number of submission queues");
+
+static int home_node = NUMA_NO_NODE;
+module_param(home_node, int, S_IRUGO);
+MODULE_PARM_DESC(home_node, "Home node for the device");
+
+static int queue_mode = NULL_Q_MQ;
+module_param(queue_mode, int, S_IRUGO);
+MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
+
+static int gb = 250;
+module_param(gb, int, S_IRUGO);
+MODULE_PARM_DESC(gb, "Size in GB");
+
+static int bs = 512;
+module_param(bs, int, S_IRUGO);
+MODULE_PARM_DESC(bs, "Block size (in bytes)");
+
+static int nr_devices = 2;
+module_param(nr_devices, int, S_IRUGO);
+MODULE_PARM_DESC(nr_devices, "Number of devices to register");
+
+static int irqmode = NULL_IRQ_SOFTIRQ;
+module_param(irqmode, int, S_IRUGO);
+MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
+
+static int completion_nsec = 10000;
+module_param(completion_nsec, int, S_IRUGO);
+MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
+
+static int hw_queue_depth = 64;
+module_param(hw_queue_depth, int, S_IRUGO);
+MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
+
+static bool use_per_node_hctx = true;
+module_param(use_per_node_hctx, bool, S_IRUGO);
+MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
+
+static void put_tag(struct nullb_queue *nq, unsigned int tag)
+{
+	clear_bit_unlock(tag, nq->tag_map);
+
+	if (waitqueue_active(&nq->wait))
+		wake_up(&nq->wait);
+}
+
+static unsigned int get_tag(struct nullb_queue *nq)
+{
+	unsigned int tag;
+
+	do {
+		tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
+		if (tag >= nq->queue_depth)
+			return -1U;
+	} while (test_and_set_bit_lock(tag, nq->tag_map));
+
+	return tag;
+}
+
+static void free_cmd(struct nullb_cmd *cmd)
+{
+	put_tag(cmd->nq, cmd->tag);
+}
+
+static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
+{
+	struct nullb_cmd *cmd;
+	unsigned int tag;
+
+	tag = get_tag(nq);
+	if (tag != -1U) {
+		cmd = &nq->cmds[tag];
+		cmd->tag = tag;
+		cmd->nq = nq;
+		return cmd;
+	}
+
+	return NULL;
+}
+
+static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
+{
+	struct nullb_cmd *cmd;
+	DEFINE_WAIT(wait);
+
+	cmd = __alloc_cmd(nq);
+	if (cmd || !can_wait)
+		return cmd;
+
+	do {
+		prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
+		cmd = __alloc_cmd(nq);
+		if (cmd)
+			break;
+
+		io_schedule();
+	} while (1);
+
+	finish_wait(&nq->wait, &wait);
+	return cmd;
+}
+
+static void end_cmd(struct nullb_cmd *cmd)
+{
+	if (cmd->rq) {
+		if (queue_mode == NULL_Q_MQ)
+			blk_mq_end_io(cmd->rq, 0);
+		else {
+			INIT_LIST_HEAD(&cmd->rq->queuelist);
+			blk_end_request_all(cmd->rq, 0);
+		}
+	} else if (cmd->bio)
+		bio_endio(cmd->bio, 0);
+
+	if (queue_mode != NULL_Q_MQ)
+		free_cmd(cmd);
+}
+
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
+{
+	struct completion_queue *cq;
+	struct llist_node *entry;
+	struct nullb_cmd *cmd;
+
+	cq = &per_cpu(completion_queues, smp_processor_id());
+
+	while ((entry = llist_del_all(&cq->list)) != NULL) {
+		do {
+			cmd = container_of(entry, struct nullb_cmd, ll_list);
+			end_cmd(cmd);
+			entry = entry->next;
+		} while (entry);
+	}
+
+	return HRTIMER_NORESTART;
+}
+
+static void null_cmd_end_timer(struct nullb_cmd *cmd)
+{
+	struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
+
+	cmd->ll_list.next = NULL;
+	if (llist_add(&cmd->ll_list, &cq->list)) {
+		ktime_t kt = ktime_set(0, completion_nsec);
+
+		hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL);
+	}
+
+	put_cpu();
+}
+
+static void null_softirq_done_fn(struct request *rq)
+{
+	blk_end_request_all(rq, 0);
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+
+static void null_ipi_cmd_end_io(void *data)
+{
+	struct completion_queue *cq;
+	struct llist_node *entry, *next;
+	struct nullb_cmd *cmd;
+
+	cq = &per_cpu(completion_queues, smp_processor_id());
+
+	entry = llist_del_all(&cq->list);
+
+	while (entry) {
+		next = entry->next;
+		cmd = llist_entry(entry, struct nullb_cmd, ll_list);
+		end_cmd(cmd);
+		entry = next;
+	}
+}
+
+static void null_cmd_end_ipi(struct nullb_cmd *cmd)
+{
+	struct call_single_data *data = &cmd->csd;
+	int cpu = get_cpu();
+	struct completion_queue *cq = &per_cpu(completion_queues, cpu);
+
+	cmd->ll_list.next = NULL;
+
+	if (llist_add(&cmd->ll_list, &cq->list)) {
+		data->func = null_ipi_cmd_end_io;
+		data->flags = 0;
+		__smp_call_function_single(cpu, data, 0);
+	}
+
+	put_cpu();
+}
+
+#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+
+static inline void null_handle_cmd(struct nullb_cmd *cmd)
+{
+	/* Complete IO by inline, softirq or timer */
+	switch (irqmode) {
+	case NULL_IRQ_NONE:
+		end_cmd(cmd);
+		break;
+	case NULL_IRQ_SOFTIRQ:
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+		null_cmd_end_ipi(cmd);
+#else
+		end_cmd(cmd);
+#endif
+		break;
+	case NULL_IRQ_TIMER:
+		null_cmd_end_timer(cmd);
+		break;
+	}
+}
+
+static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
+{
+	int index = 0;
+
+	if (nullb->nr_queues != 1)
+		index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
+
+	return &nullb->queues[index];
+}
+
+static void null_queue_bio(struct request_queue *q, struct bio *bio)
+{
+	struct nullb *nullb = q->queuedata;
+	struct nullb_queue *nq = nullb_to_queue(nullb);
+	struct nullb_cmd *cmd;
+
+	cmd = alloc_cmd(nq, 1);
+	cmd->bio = bio;
+
+	null_handle_cmd(cmd);
+}
+
+static int null_rq_prep_fn(struct request_queue *q, struct request *req)
+{
+	struct nullb *nullb = q->queuedata;
+	struct nullb_queue *nq = nullb_to_queue(nullb);
+	struct nullb_cmd *cmd;
+
+	cmd = alloc_cmd(nq, 0);
+	if (cmd) {
+		cmd->rq = req;
+		req->special = cmd;
+		return BLKPREP_OK;
+	}
+
+	return BLKPREP_DEFER;
+}
+
+static void null_request_fn(struct request_queue *q)
+{
+	struct request *rq;
+
+	while ((rq = blk_fetch_request(q)) != NULL) {
+		struct nullb_cmd *cmd = rq->special;
+
+		spin_unlock_irq(q->queue_lock);
+		null_handle_cmd(cmd);
+		spin_lock_irq(q->queue_lock);
+	}
+}
+
+static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+	struct nullb_cmd *cmd = rq->special;
+
+	cmd->rq = rq;
+	cmd->nq = hctx->driver_data;
+
+	null_handle_cmd(cmd);
+	return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
+{
+	return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
+				hctx_index);
+}
+
+static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
+{
+	kfree(hctx);
+}
+
+static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+			  unsigned int index)
+{
+	struct nullb *nullb = data;
+	struct nullb_queue *nq = &nullb->queues[index];
+
+	init_waitqueue_head(&nq->wait);
+	nq->queue_depth = nullb->queue_depth;
+	nullb->nr_queues++;
+	hctx->driver_data = nq;
+
+	return 0;
+}
+
+static struct blk_mq_ops null_mq_ops = {
+	.queue_rq       = null_queue_rq,
+	.map_queue      = blk_mq_map_queue,
+	.init_hctx	= null_init_hctx,
+};
+
+static struct blk_mq_reg null_mq_reg = {
+	.ops		= &null_mq_ops,
+	.queue_depth	= 64,
+	.cmd_size	= sizeof(struct nullb_cmd),
+	.flags		= BLK_MQ_F_SHOULD_MERGE,
+};
+
+static void null_del_dev(struct nullb *nullb)
+{
+	list_del_init(&nullb->list);
+
+	del_gendisk(nullb->disk);
+	if (queue_mode == NULL_Q_MQ)
+		blk_mq_free_queue(nullb->q);
+	else
+		blk_cleanup_queue(nullb->q);
+	put_disk(nullb->disk);
+	kfree(nullb);
+}
+
+static int null_open(struct block_device *bdev, fmode_t mode)
+{
+	return 0;
+}
+
+static void null_release(struct gendisk *disk, fmode_t mode)
+{
+}
+
+static const struct block_device_operations null_fops = {
+	.owner =	THIS_MODULE,
+	.open =		null_open,
+	.release =	null_release,
+};
+
+static int setup_commands(struct nullb_queue *nq)
+{
+	struct nullb_cmd *cmd;
+	int i, tag_size;
+
+	nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
+	if (!nq->cmds)
+		return 1;
+
+	tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
+	nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
+	if (!nq->tag_map) {
+		kfree(nq->cmds);
+		return 1;
+	}
+
+	for (i = 0; i < nq->queue_depth; i++) {
+		cmd = &nq->cmds[i];
+		INIT_LIST_HEAD(&cmd->list);
+		cmd->ll_list.next = NULL;
+		cmd->tag = -1U;
+	}
+
+	return 0;
+}
+
+static void cleanup_queue(struct nullb_queue *nq)
+{
+	kfree(nq->tag_map);
+	kfree(nq->cmds);
+}
+
+static void cleanup_queues(struct nullb *nullb)
+{
+	int i;
+
+	for (i = 0; i < nullb->nr_queues; i++)
+		cleanup_queue(&nullb->queues[i]);
+
+	kfree(nullb->queues);
+}
+
+static int setup_queues(struct nullb *nullb)
+{
+	struct nullb_queue *nq;
+	int i;
+
+	nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
+	if (!nullb->queues)
+		return 1;
+
+	nullb->nr_queues = 0;
+	nullb->queue_depth = hw_queue_depth;
+
+	if (queue_mode == NULL_Q_MQ)
+		return 0;
+
+	for (i = 0; i < submit_queues; i++) {
+		nq = &nullb->queues[i];
+		init_waitqueue_head(&nq->wait);
+		nq->queue_depth = hw_queue_depth;
+		if (setup_commands(nq))
+			break;
+		nullb->nr_queues++;
+	}
+
+	if (i == submit_queues)
+		return 0;
+
+	cleanup_queues(nullb);
+	return 1;
+}
+
+static int null_add_dev(void)
+{
+	struct gendisk *disk;
+	struct nullb *nullb;
+	sector_t size;
+
+	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
+	if (!nullb)
+		return -ENOMEM;
+
+	spin_lock_init(&nullb->lock);
+
+	if (setup_queues(nullb))
+		goto err;
+
+	if (queue_mode == NULL_Q_MQ) {
+		null_mq_reg.numa_node = home_node;
+		null_mq_reg.queue_depth = hw_queue_depth;
+
+		if (use_per_node_hctx) {
+			null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
+			null_mq_reg.ops->free_hctx = null_free_hctx;
+
+			null_mq_reg.nr_hw_queues = nr_online_nodes;
+		} else {
+			null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
+			null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
+
+			null_mq_reg.nr_hw_queues = submit_queues;
+		}
+
+		nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
+	} else if (queue_mode == NULL_Q_BIO) {
+		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
+		blk_queue_make_request(nullb->q, null_queue_bio);
+	} else {
+		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
+		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
+		if (nullb->q)
+			blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+	}
+
+	if (!nullb->q)
+		goto queue_fail;
+
+	nullb->q->queuedata = nullb;
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
+
+	disk = nullb->disk = alloc_disk_node(1, home_node);
+	if (!disk) {
+queue_fail:
+		if (queue_mode == NULL_Q_MQ)
+			blk_mq_free_queue(nullb->q);
+		else
+			blk_cleanup_queue(nullb->q);
+		cleanup_queues(nullb);
+err:
+		kfree(nullb);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&lock);
+	list_add_tail(&nullb->list, &nullb_list);
+	nullb->index = nullb_indexes++;
+	mutex_unlock(&lock);
+
+	blk_queue_logical_block_size(nullb->q, bs);
+	blk_queue_physical_block_size(nullb->q, bs);
+
+	size = gb * 1024 * 1024 * 1024ULL;
+	sector_div(size, bs);
+	set_capacity(disk, size);
+
+	disk->flags |= GENHD_FL_EXT_DEVT;
+	disk->major		= null_major;
+	disk->first_minor	= nullb->index;
+	disk->fops		= &null_fops;
+	disk->private_data	= nullb;
+	disk->queue		= nullb->q;
+	sprintf(disk->disk_name, "nullb%d", nullb->index);
+	add_disk(disk);
+	return 0;
+}
+
+static int __init null_init(void)
+{
+	unsigned int i;
+
+#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+	if (irqmode == NULL_IRQ_SOFTIRQ) {
+		pr_warn("null_blk: softirq completions not available.\n");
+		pr_warn("null_blk: using direct completions.\n");
+		irqmode = NULL_IRQ_NONE;
+	}
+#endif
+
+	if (submit_queues > nr_cpu_ids)
+		submit_queues = nr_cpu_ids;
+	else if (!submit_queues)
+		submit_queues = 1;
+
+	mutex_init(&lock);
+
+	/* Initialize a separate list for each CPU for issuing softirqs */
+	for_each_possible_cpu(i) {
+		struct completion_queue *cq = &per_cpu(completion_queues, i);
+
+		init_llist_head(&cq->list);
+
+		if (irqmode != NULL_IRQ_TIMER)
+			continue;
+
+		hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		cq->timer.function = null_cmd_timer_expired;
+	}
+
+	null_major = register_blkdev(0, "nullb");
+	if (null_major < 0)
+		return null_major;
+
+	for (i = 0; i < nr_devices; i++) {
+		if (null_add_dev()) {
+			unregister_blkdev(null_major, "nullb");
+			return -EINVAL;
+		}
+	}
+
+	pr_info("null: module loaded\n");
+	return 0;
+}
+
+static void __exit null_exit(void)
+{
+	struct nullb *nullb;
+
+	unregister_blkdev(null_major, "nullb");
+
+	mutex_lock(&lock);
+	while (!list_empty(&nullb_list)) {
+		nullb = list_entry(nullb_list.next, struct nullb, list);
+		null_del_dev(nullb);
+	}
+	mutex_unlock(&lock);
+}
+
+module_init(null_init);
+module_exit(null_exit);
+
+MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da52092..26d03fa 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1949,12 +1949,9 @@
 	if (pci_request_selected_regions(pdev, bars, "nvme"))
 		goto disable_pci;
 
-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
-		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-	else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
-		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-	else
-		goto disable_pci;
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+		goto disable;
 
 	pci_set_drvdata(pdev, dev);
 	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2168,6 +2165,7 @@
 
 	INIT_LIST_HEAD(&dev->namespaces);
 	dev->pci_dev = pdev;
+
 	result = nvme_set_instance(dev);
 	if (result)
 		goto free;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 5618847..ff8668c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -473,45 +473,31 @@
 {
 	if (!pkt_debugfs_root)
 		return;
-	pd->dfs_f_info = NULL;
 	pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
-	if (IS_ERR(pd->dfs_d_root)) {
-		pd->dfs_d_root = NULL;
+	if (!pd->dfs_d_root)
 		return;
-	}
+
 	pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
 				pd->dfs_d_root, pd, &debug_fops);
-	if (IS_ERR(pd->dfs_f_info)) {
-		pd->dfs_f_info = NULL;
-		return;
-	}
 }
 
 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
 {
 	if (!pkt_debugfs_root)
 		return;
-	if (pd->dfs_f_info)
-		debugfs_remove(pd->dfs_f_info);
+	debugfs_remove(pd->dfs_f_info);
+	debugfs_remove(pd->dfs_d_root);
 	pd->dfs_f_info = NULL;
-	if (pd->dfs_d_root)
-		debugfs_remove(pd->dfs_d_root);
 	pd->dfs_d_root = NULL;
 }
 
 static void pkt_debugfs_init(void)
 {
 	pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
-	if (IS_ERR(pkt_debugfs_root)) {
-		pkt_debugfs_root = NULL;
-		return;
-	}
 }
 
 static void pkt_debugfs_cleanup(void)
 {
-	if (!pkt_debugfs_root)
-		return;
 	debugfs_remove(pkt_debugfs_root);
 	pkt_debugfs_root = NULL;
 }
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 6e85e21..a8de2ee 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -654,7 +654,8 @@
 	for (i = 0; i < card->n_targets; i++) {
 		spin_lock_bh(&card->ctrl[i].queue_lock);
 		cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
-					     &card->ctrl[i].queue);
+					     &card->ctrl[i].queue,
+					     COMPLETE_DMA);
 		spin_unlock_bh(&card->ctrl[i].queue_lock);
 
 		cnt += rsxx_dma_cancel(&card->ctrl[i]);
@@ -748,10 +749,6 @@
 
 	card->eeh_state = 0;
 
-	st = rsxx_eeh_remap_dmas(card);
-	if (st)
-		goto failed_remap_dmas;
-
 	spin_lock_irqsave(&card->irq_lock, flags);
 	if (card->n_targets & RSXX_MAX_TARGETS)
 		rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
@@ -778,7 +775,6 @@
 	return PCI_ERS_RESULT_RECOVERED;
 
 failed_hw_buffers_init:
-failed_remap_dmas:
 	for (i = 0; i < card->n_targets; i++) {
 		if (card->ctrl[i].status.buf)
 			pci_free_consistent(card->dev,
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index d7af441..2284f5d 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -295,13 +295,15 @@
 		return -ENOMEM;
 	}
 
-	blk_size = card->config.data.block_size;
+	if (card->config_valid) {
+		blk_size = card->config.data.block_size;
+		blk_queue_dma_alignment(card->queue, blk_size - 1);
+		blk_queue_logical_block_size(card->queue, blk_size);
+	}
 
 	blk_queue_make_request(card->queue, rsxx_make_request);
 	blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
-	blk_queue_dma_alignment(card->queue, blk_size - 1);
 	blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
-	blk_queue_logical_block_size(card->queue, blk_size);
 	blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
 
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index bed32f1..fc88ba3 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -221,6 +221,21 @@
 }
 
 /*----------------- RSXX DMA Handling -------------------*/
+static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
+{
+	if (dma->cmd != HW_CMD_BLK_DISCARD) {
+		if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+			pci_unmap_page(ctrl->card->dev, dma->dma_addr,
+				       get_dma_size(dma),
+				       dma->cmd == HW_CMD_BLK_WRITE ?
+						   PCI_DMA_TODEVICE :
+						   PCI_DMA_FROMDEVICE);
+		}
+	}
+
+	kmem_cache_free(rsxx_dma_pool, dma);
+}
+
 static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
 				  struct rsxx_dma *dma,
 				  unsigned int status)
@@ -232,21 +247,14 @@
 	if (status & DMA_CANCELLED)
 		ctrl->stats.dma_cancelled++;
 
-	if (dma->dma_addr)
-		pci_unmap_page(ctrl->card->dev, dma->dma_addr,
-			       get_dma_size(dma),
-			       dma->cmd == HW_CMD_BLK_WRITE ?
-					   PCI_DMA_TODEVICE :
-					   PCI_DMA_FROMDEVICE);
-
 	if (dma->cb)
 		dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
 
-	kmem_cache_free(rsxx_dma_pool, dma);
+	rsxx_free_dma(ctrl, dma);
 }
 
 int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
-			   struct list_head *q)
+			   struct list_head *q, unsigned int done)
 {
 	struct rsxx_dma *dma;
 	struct rsxx_dma *tmp;
@@ -254,7 +262,10 @@
 
 	list_for_each_entry_safe(dma, tmp, q, list) {
 		list_del(&dma->list);
-		rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+		if (done & COMPLETE_DMA)
+			rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+		else
+			rsxx_free_dma(ctrl, dma);
 		cnt++;
 	}
 
@@ -370,7 +381,7 @@
 
 		/* Clean up the DMA queue */
 		spin_lock(&ctrl->queue_lock);
-		cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+		cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
 		spin_unlock(&ctrl->queue_lock);
 
 		cnt += rsxx_dma_cancel(ctrl);
@@ -388,6 +399,7 @@
 	int tag;
 	int cmds_pending = 0;
 	struct hw_cmd *hw_cmd_buf;
+	int dir;
 
 	hw_cmd_buf = ctrl->cmd.buf;
 
@@ -424,6 +436,31 @@
 			continue;
 		}
 
+		if (dma->cmd != HW_CMD_BLK_DISCARD) {
+			if (dma->cmd == HW_CMD_BLK_WRITE)
+				dir = PCI_DMA_TODEVICE;
+			else
+				dir = PCI_DMA_FROMDEVICE;
+
+			/*
+			 * The function pci_map_page is placed here because we
+			 * can only, by design, issue up to 255 commands to the
+			 * hardware at one time per DMA channel. So the maximum
+			 * amount of mapped memory would be 255 * 4 channels *
+			 * 4096 Bytes which is less than 2GB, the limit of a x8
+			 * Non-HWWD PCIe slot. This way the pci_map_page
+			 * function should never fail because of a lack of
+			 * mappable memory.
+			 */
+			dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
+					dma->pg_off, dma->sub_page.cnt << 9, dir);
+			if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+				push_tracker(ctrl->trackers, tag);
+				rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+				continue;
+			}
+		}
+
 		set_tracker_dma(ctrl->trackers, tag, dma);
 		hw_cmd_buf[ctrl->cmd.idx].command  = dma->cmd;
 		hw_cmd_buf[ctrl->cmd.idx].tag      = tag;
@@ -620,14 +657,6 @@
 	if (!dma)
 		return -ENOMEM;
 
-	dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
-				     dir ? PCI_DMA_TODEVICE :
-				     PCI_DMA_FROMDEVICE);
-	if (!dma->dma_addr) {
-		kmem_cache_free(rsxx_dma_pool, dma);
-		return -ENOMEM;
-	}
-
 	dma->cmd          = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
 	dma->laddr        = laddr;
 	dma->sub_page.off = (dma_off >> 9);
@@ -736,11 +765,9 @@
 	return 0;
 
 bvec_err:
-	for (i = 0; i < card->n_targets; i++) {
-		spin_lock_bh(&card->ctrl[i].queue_lock);
-		rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
-		spin_unlock_bh(&card->ctrl[i].queue_lock);
-	}
+	for (i = 0; i < card->n_targets; i++)
+		rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
+					FREE_DMA);
 
 	return st;
 }
@@ -990,7 +1017,7 @@
 
 		/* Clean up the DMA queue */
 		spin_lock_bh(&ctrl->queue_lock);
-		rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+		rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
 		spin_unlock_bh(&ctrl->queue_lock);
 
 		rsxx_dma_cancel(ctrl);
@@ -1032,6 +1059,14 @@
 			else
 				card->ctrl[i].stats.reads_issued--;
 
+			if (dma->cmd != HW_CMD_BLK_DISCARD) {
+				pci_unmap_page(card->dev, dma->dma_addr,
+					       get_dma_size(dma),
+					       dma->cmd == HW_CMD_BLK_WRITE ?
+					       PCI_DMA_TODEVICE :
+					       PCI_DMA_FROMDEVICE);
+			}
+
 			list_add_tail(&dma->list, &issued_dmas[i]);
 			push_tracker(card->ctrl[i].trackers, j);
 			cnt++;
@@ -1043,15 +1078,6 @@
 		atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
 		card->ctrl[i].stats.sw_q_depth += cnt;
 		card->ctrl[i].e_cnt = 0;
-
-		list_for_each_entry(dma, &card->ctrl[i].queue, list) {
-			if (dma->dma_addr)
-				pci_unmap_page(card->dev, dma->dma_addr,
-					       get_dma_size(dma),
-					       dma->cmd == HW_CMD_BLK_WRITE ?
-					       PCI_DMA_TODEVICE :
-					       PCI_DMA_FROMDEVICE);
-		}
 		spin_unlock_bh(&card->ctrl[i].queue_lock);
 	}
 
@@ -1060,31 +1086,6 @@
 	return 0;
 }
 
-int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
-{
-	struct rsxx_dma *dma;
-	int i;
-
-	for (i = 0; i < card->n_targets; i++) {
-		spin_lock_bh(&card->ctrl[i].queue_lock);
-		list_for_each_entry(dma, &card->ctrl[i].queue, list) {
-			dma->dma_addr = pci_map_page(card->dev, dma->page,
-					dma->pg_off, get_dma_size(dma),
-					dma->cmd == HW_CMD_BLK_WRITE ?
-					PCI_DMA_TODEVICE :
-					PCI_DMA_FROMDEVICE);
-			if (!dma->dma_addr) {
-				spin_unlock_bh(&card->ctrl[i].queue_lock);
-				kmem_cache_free(rsxx_dma_pool, dma);
-				return -ENOMEM;
-			}
-		}
-		spin_unlock_bh(&card->ctrl[i].queue_lock);
-	}
-
-	return 0;
-}
-
 int rsxx_dma_init(void)
 {
 	rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 5ad5055..6bbc64d 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -52,7 +52,7 @@
 #define RS70_PCI_REV_SUPPORTED	4
 
 #define DRIVER_NAME "rsxx"
-#define DRIVER_VERSION "4.0"
+#define DRIVER_VERSION "4.0.3.2516"
 
 /* Block size is 4096 */
 #define RSXX_HW_BLK_SHIFT		12
@@ -345,6 +345,11 @@
 	CREG_STAT_TAG_MASK	= 0x0000ff00,
 };
 
+enum rsxx_dma_finish {
+	FREE_DMA	= 0x0,
+	COMPLETE_DMA	= 0x1,
+};
+
 static inline unsigned int CREG_DATA(int N)
 {
 	return CREG_DATA0 + (N << 2);
@@ -379,7 +384,9 @@
 int rsxx_dma_setup(struct rsxx_cardinfo *card);
 void rsxx_dma_destroy(struct rsxx_cardinfo *card);
 int rsxx_dma_init(void);
-int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q);
+int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
+				struct list_head *q,
+				unsigned int done);
 int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
 void rsxx_dma_cleanup(void);
 void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
new file mode 100644
index 0000000..9199c93
--- /dev/null
+++ b/drivers/block/skd_main.c
@@ -0,0 +1,5432 @@
+/* Copyright 2012 STEC, Inc.
+ *
+ * This file is licensed under the terms of the 3-clause
+ * BSD License (http://opensource.org/licenses/BSD-3-Clause)
+ * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
+ * at your option. Both licenses are also available in the LICENSE file
+ * distributed with this project. This file may not be copied, modified,
+ * or distributed except in accordance with those terms.
+ * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
+ * Initial Driver Design!
+ * Thomas Swann <tswann@stec-inc.com>
+ * Interrupt handling.
+ * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
+ * biomode implementation.
+ * Akhil Bhansali <abhansali@stec-inc.com>
+ * Added support for DISCARD / FLUSH and FUA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/hdreg.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/scatterlist.h>
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+#include <linux/aer.h>
+#include <linux/ctype.h>
+#include <linux/wait.h>
+#include <linux/uio.h>
+#include <scsi/scsi.h>
+#include <scsi/sg.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
+#include "skd_s1120.h"
+
+static int skd_dbg_level;
+static int skd_isr_comp_limit = 4;
+
+enum {
+	STEC_LINK_2_5GTS = 0,
+	STEC_LINK_5GTS = 1,
+	STEC_LINK_8GTS = 2,
+	STEC_LINK_UNKNOWN = 0xFF
+};
+
+enum {
+	SKD_FLUSH_INITIALIZER,
+	SKD_FLUSH_ZERO_SIZE_FIRST,
+	SKD_FLUSH_DATA_SECOND,
+};
+
+#define SKD_ASSERT(expr) \
+	do { \
+		if (unlikely(!(expr))) { \
+			pr_err("Assertion failed! %s,%s,%s,line=%d\n",	\
+			       # expr, __FILE__, __func__, __LINE__); \
+		} \
+	} while (0)
+
+#define DRV_NAME "skd"
+#define DRV_VERSION "2.2.1"
+#define DRV_BUILD_ID "0260"
+#define PFX DRV_NAME ": "
+#define DRV_BIN_VERSION 0x100
+#define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
+
+MODULE_AUTHOR("bug-reports: support@stec-inc.com");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
+MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
+
+#define PCI_VENDOR_ID_STEC      0x1B39
+#define PCI_DEVICE_ID_S1120     0x0001
+
+#define SKD_FUA_NV		(1 << 1)
+#define SKD_MINORS_PER_DEVICE   16
+
+#define SKD_MAX_QUEUE_DEPTH     200u
+
+#define SKD_PAUSE_TIMEOUT       (5 * 1000)
+
+#define SKD_N_FITMSG_BYTES      (512u)
+
+#define SKD_N_SPECIAL_CONTEXT   32u
+#define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
+
+/* SG elements are 32 bytes, so we can make this 4096 and still be under the
+ * 128KB limit.  That allows 4096*4K = 16M xfer size
+ */
+#define SKD_N_SG_PER_REQ_DEFAULT 256u
+#define SKD_N_SG_PER_SPECIAL    256u
+
+#define SKD_N_COMPLETION_ENTRY  256u
+#define SKD_N_READ_CAP_BYTES    (8u)
+
+#define SKD_N_INTERNAL_BYTES    (512u)
+
+/* 5 bits of uniqifier, 0xF800 */
+#define SKD_ID_INCR             (0x400)
+#define SKD_ID_TABLE_MASK       (3u << 8u)
+#define  SKD_ID_RW_REQUEST      (0u << 8u)
+#define  SKD_ID_INTERNAL        (1u << 8u)
+#define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
+#define  SKD_ID_FIT_MSG         (3u << 8u)
+#define SKD_ID_SLOT_MASK        0x00FFu
+#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
+
+#define SKD_N_TIMEOUT_SLOT      4u
+#define SKD_TIMEOUT_SLOT_MASK   3u
+
+#define SKD_N_MAX_SECTORS 2048u
+
+#define SKD_MAX_RETRIES 2u
+
+#define SKD_TIMER_SECONDS(seconds) (seconds)
+#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
+
+#define INQ_STD_NBYTES 36
+#define SKD_DISCARD_CDB_LENGTH	24
+
+enum skd_drvr_state {
+	SKD_DRVR_STATE_LOAD,
+	SKD_DRVR_STATE_IDLE,
+	SKD_DRVR_STATE_BUSY,
+	SKD_DRVR_STATE_STARTING,
+	SKD_DRVR_STATE_ONLINE,
+	SKD_DRVR_STATE_PAUSING,
+	SKD_DRVR_STATE_PAUSED,
+	SKD_DRVR_STATE_DRAINING_TIMEOUT,
+	SKD_DRVR_STATE_RESTARTING,
+	SKD_DRVR_STATE_RESUMING,
+	SKD_DRVR_STATE_STOPPING,
+	SKD_DRVR_STATE_FAULT,
+	SKD_DRVR_STATE_DISAPPEARED,
+	SKD_DRVR_STATE_PROTOCOL_MISMATCH,
+	SKD_DRVR_STATE_BUSY_ERASE,
+	SKD_DRVR_STATE_BUSY_SANITIZE,
+	SKD_DRVR_STATE_BUSY_IMMINENT,
+	SKD_DRVR_STATE_WAIT_BOOT,
+	SKD_DRVR_STATE_SYNCING,
+};
+
+#define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
+#define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
+#define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
+#define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
+#define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
+#define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
+#define SKD_START_WAIT_SECONDS  90u
+
+enum skd_req_state {
+	SKD_REQ_STATE_IDLE,
+	SKD_REQ_STATE_SETUP,
+	SKD_REQ_STATE_BUSY,
+	SKD_REQ_STATE_COMPLETED,
+	SKD_REQ_STATE_TIMEOUT,
+	SKD_REQ_STATE_ABORTED,
+};
+
+enum skd_fit_msg_state {
+	SKD_MSG_STATE_IDLE,
+	SKD_MSG_STATE_BUSY,
+};
+
+enum skd_check_status_action {
+	SKD_CHECK_STATUS_REPORT_GOOD,
+	SKD_CHECK_STATUS_REPORT_SMART_ALERT,
+	SKD_CHECK_STATUS_REQUEUE_REQUEST,
+	SKD_CHECK_STATUS_REPORT_ERROR,
+	SKD_CHECK_STATUS_BUSY_IMMINENT,
+};
+
+struct skd_fitmsg_context {
+	enum skd_fit_msg_state state;
+
+	struct skd_fitmsg_context *next;
+
+	u32 id;
+	u16 outstanding;
+
+	u32 length;
+	u32 offset;
+
+	u8 *msg_buf;
+	dma_addr_t mb_dma_address;
+};
+
+struct skd_request_context {
+	enum skd_req_state state;
+
+	struct skd_request_context *next;
+
+	u16 id;
+	u32 fitmsg_id;
+
+	struct request *req;
+	u8 flush_cmd;
+	u8 discard_page;
+
+	u32 timeout_stamp;
+	u8 sg_data_dir;
+	struct scatterlist *sg;
+	u32 n_sg;
+	u32 sg_byte_count;
+
+	struct fit_sg_descriptor *sksg_list;
+	dma_addr_t sksg_dma_address;
+
+	struct fit_completion_entry_v1 completion;
+
+	struct fit_comp_error_info err_info;
+
+};
+#define SKD_DATA_DIR_HOST_TO_CARD       1
+#define SKD_DATA_DIR_CARD_TO_HOST       2
+#define SKD_DATA_DIR_NONE		3	/* especially for DISCARD requests. */
+
+struct skd_special_context {
+	struct skd_request_context req;
+
+	u8 orphaned;
+
+	void *data_buf;
+	dma_addr_t db_dma_address;
+
+	u8 *msg_buf;
+	dma_addr_t mb_dma_address;
+};
+
+struct skd_sg_io {
+	fmode_t mode;
+	void __user *argp;
+
+	struct sg_io_hdr sg;
+
+	u8 cdb[16];
+
+	u32 dxfer_len;
+	u32 iovcnt;
+	struct sg_iovec *iov;
+	struct sg_iovec no_iov_iov;
+
+	struct skd_special_context *skspcl;
+};
+
+typedef enum skd_irq_type {
+	SKD_IRQ_LEGACY,
+	SKD_IRQ_MSI,
+	SKD_IRQ_MSIX
+} skd_irq_type_t;
+
+#define SKD_MAX_BARS                    2
+
+struct skd_device {
+	volatile void __iomem *mem_map[SKD_MAX_BARS];
+	resource_size_t mem_phys[SKD_MAX_BARS];
+	u32 mem_size[SKD_MAX_BARS];
+
+	skd_irq_type_t irq_type;
+	u32 msix_count;
+	struct skd_msix_entry *msix_entries;
+
+	struct pci_dev *pdev;
+	int pcie_error_reporting_is_enabled;
+
+	spinlock_t lock;
+	struct gendisk *disk;
+	struct request_queue *queue;
+	struct device *class_dev;
+	int gendisk_on;
+	int sync_done;
+
+	atomic_t device_count;
+	u32 devno;
+	u32 major;
+	char name[32];
+	char isr_name[30];
+
+	enum skd_drvr_state state;
+	u32 drive_state;
+
+	u32 in_flight;
+	u32 cur_max_queue_depth;
+	u32 queue_low_water_mark;
+	u32 dev_max_queue_depth;
+
+	u32 num_fitmsg_context;
+	u32 num_req_context;
+
+	u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
+	u32 timeout_stamp;
+	struct skd_fitmsg_context *skmsg_free_list;
+	struct skd_fitmsg_context *skmsg_table;
+
+	struct skd_request_context *skreq_free_list;
+	struct skd_request_context *skreq_table;
+
+	struct skd_special_context *skspcl_free_list;
+	struct skd_special_context *skspcl_table;
+
+	struct skd_special_context internal_skspcl;
+	u32 read_cap_blocksize;
+	u32 read_cap_last_lba;
+	int read_cap_is_valid;
+	int inquiry_is_valid;
+	u8 inq_serial_num[13];  /*12 chars plus null term */
+	u8 id_str[80];          /* holds a composite name (pci + sernum) */
+
+	u8 skcomp_cycle;
+	u32 skcomp_ix;
+	struct fit_completion_entry_v1 *skcomp_table;
+	struct fit_comp_error_info *skerr_table;
+	dma_addr_t cq_dma_address;
+
+	wait_queue_head_t waitq;
+
+	struct timer_list timer;
+	u32 timer_countdown;
+	u32 timer_substate;
+
+	int n_special;
+	int sgs_per_request;
+	u32 last_mtd;
+
+	u32 proto_ver;
+
+	int dbg_level;
+	u32 connect_time_stamp;
+	int connect_retries;
+#define SKD_MAX_CONNECT_RETRIES 16
+	u32 drive_jiffies;
+
+	u32 timo_slot;
+
+
+	struct work_struct completion_worker;
+};
+
+#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
+#define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
+#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
+
+static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
+{
+	u32 val;
+
+	if (likely(skdev->dbg_level < 2))
+		return readl(skdev->mem_map[1] + offset);
+	else {
+		barrier();
+		val = readl(skdev->mem_map[1] + offset);
+		barrier();
+		pr_debug("%s:%s:%d offset %x = %x\n",
+			 skdev->name, __func__, __LINE__, offset, val);
+		return val;
+	}
+
+}
+
+static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
+				   u32 offset)
+{
+	if (likely(skdev->dbg_level < 2)) {
+		writel(val, skdev->mem_map[1] + offset);
+		barrier();
+	} else {
+		barrier();
+		writel(val, skdev->mem_map[1] + offset);
+		barrier();
+		pr_debug("%s:%s:%d offset %x = %x\n",
+			 skdev->name, __func__, __LINE__, offset, val);
+	}
+}
+
+static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
+				   u32 offset)
+{
+	if (likely(skdev->dbg_level < 2)) {
+		writeq(val, skdev->mem_map[1] + offset);
+		barrier();
+	} else {
+		barrier();
+		writeq(val, skdev->mem_map[1] + offset);
+		barrier();
+		pr_debug("%s:%s:%d offset %x = %016llx\n",
+			 skdev->name, __func__, __LINE__, offset, val);
+	}
+}
+
+
+#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
+static int skd_isr_type = SKD_IRQ_DEFAULT;
+
+module_param(skd_isr_type, int, 0444);
+MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
+		 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
+
+#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
+static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
+
+module_param(skd_max_req_per_msg, int, 0444);
+MODULE_PARM_DESC(skd_max_req_per_msg,
+		 "Maximum SCSI requests packed in a single message."
+		 " (1-14, default==1)");
+
+#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
+#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
+static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
+
+module_param(skd_max_queue_depth, int, 0444);
+MODULE_PARM_DESC(skd_max_queue_depth,
+		 "Maximum SCSI requests issued to s1120."
+		 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
+
+static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
+module_param(skd_sgs_per_request, int, 0444);
+MODULE_PARM_DESC(skd_sgs_per_request,
+		 "Maximum SG elements per block request."
+		 " (1-4096, default==256)");
+
+static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+module_param(skd_max_pass_thru, int, 0444);
+MODULE_PARM_DESC(skd_max_pass_thru,
+		 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
+
+module_param(skd_dbg_level, int, 0444);
+MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
+
+module_param(skd_isr_comp_limit, int, 0444);
+MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
+
+/* Major device number dynamically assigned. */
+static u32 skd_major;
+
+static void skd_destruct(struct skd_device *skdev);
+static const struct block_device_operations skd_blockdev_ops;
+static void skd_send_fitmsg(struct skd_device *skdev,
+			    struct skd_fitmsg_context *skmsg);
+static void skd_send_special_fitmsg(struct skd_device *skdev,
+				    struct skd_special_context *skspcl);
+static void skd_request_fn(struct request_queue *rq);
+static void skd_end_request(struct skd_device *skdev,
+			    struct skd_request_context *skreq, int error);
+static int skd_preop_sg_list(struct skd_device *skdev,
+			     struct skd_request_context *skreq);
+static void skd_postop_sg_list(struct skd_device *skdev,
+			       struct skd_request_context *skreq);
+
+static void skd_restart_device(struct skd_device *skdev);
+static int skd_quiesce_dev(struct skd_device *skdev);
+static int skd_unquiesce_dev(struct skd_device *skdev);
+static void skd_release_special(struct skd_device *skdev,
+				struct skd_special_context *skspcl);
+static void skd_disable_interrupts(struct skd_device *skdev);
+static void skd_isr_fwstate(struct skd_device *skdev);
+static void skd_recover_requests(struct skd_device *skdev, int requeue);
+static void skd_soft_reset(struct skd_device *skdev);
+
+static const char *skd_name(struct skd_device *skdev);
+const char *skd_drive_state_to_str(int state);
+const char *skd_skdev_state_to_str(enum skd_drvr_state state);
+static void skd_log_skdev(struct skd_device *skdev, const char *event);
+static void skd_log_skmsg(struct skd_device *skdev,
+			  struct skd_fitmsg_context *skmsg, const char *event);
+static void skd_log_skreq(struct skd_device *skdev,
+			  struct skd_request_context *skreq, const char *event);
+
+/*
+ *****************************************************************************
+ * READ/WRITE REQUESTS
+ *****************************************************************************
+ */
+static void skd_fail_all_pending(struct skd_device *skdev)
+{
+	struct request_queue *q = skdev->queue;
+	struct request *req;
+
+	for (;; ) {
+		req = blk_peek_request(q);
+		if (req == NULL)
+			break;
+		blk_start_request(req);
+		__blk_end_request_all(req, -EIO);
+	}
+}
+
+static void
+skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
+		int data_dir, unsigned lba,
+		unsigned count)
+{
+	if (data_dir == READ)
+		scsi_req->cdb[0] = 0x28;
+	else
+		scsi_req->cdb[0] = 0x2a;
+
+	scsi_req->cdb[1] = 0;
+	scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
+	scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
+	scsi_req->cdb[4] = (lba & 0xff00) >> 8;
+	scsi_req->cdb[5] = (lba & 0xff);
+	scsi_req->cdb[6] = 0;
+	scsi_req->cdb[7] = (count & 0xff00) >> 8;
+	scsi_req->cdb[8] = count & 0xff;
+	scsi_req->cdb[9] = 0;
+}
+
+static void
+skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
+			    struct skd_request_context *skreq)
+{
+	skreq->flush_cmd = 1;
+
+	scsi_req->cdb[0] = 0x35;
+	scsi_req->cdb[1] = 0;
+	scsi_req->cdb[2] = 0;
+	scsi_req->cdb[3] = 0;
+	scsi_req->cdb[4] = 0;
+	scsi_req->cdb[5] = 0;
+	scsi_req->cdb[6] = 0;
+	scsi_req->cdb[7] = 0;
+	scsi_req->cdb[8] = 0;
+	scsi_req->cdb[9] = 0;
+}
+
+static void
+skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
+		     struct skd_request_context *skreq,
+		     struct page *page,
+		     u32 lba, u32 count)
+{
+	char *buf;
+	unsigned long len;
+	struct request *req;
+
+	buf = page_address(page);
+	len = SKD_DISCARD_CDB_LENGTH;
+
+	scsi_req->cdb[0] = UNMAP;
+	scsi_req->cdb[8] = len;
+
+	put_unaligned_be16(6 + 16, &buf[0]);
+	put_unaligned_be16(16, &buf[2]);
+	put_unaligned_be64(lba, &buf[8]);
+	put_unaligned_be32(count, &buf[16]);
+
+	req = skreq->req;
+	blk_add_request_payload(req, page, len);
+	req->buffer = buf;
+}
+
+static void skd_request_fn_not_online(struct request_queue *q);
+
+static void skd_request_fn(struct request_queue *q)
+{
+	struct skd_device *skdev = q->queuedata;
+	struct skd_fitmsg_context *skmsg = NULL;
+	struct fit_msg_hdr *fmh = NULL;
+	struct skd_request_context *skreq;
+	struct request *req = NULL;
+	struct skd_scsi_request *scsi_req;
+	struct page *page;
+	unsigned long io_flags;
+	int error;
+	u32 lba;
+	u32 count;
+	int data_dir;
+	u32 be_lba;
+	u32 be_count;
+	u64 be_dmaa;
+	u64 cmdctxt;
+	u32 timo_slot;
+	void *cmd_ptr;
+	int flush, fua;
+
+	if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+		skd_request_fn_not_online(q);
+		return;
+	}
+
+	if (blk_queue_stopped(skdev->queue)) {
+		if (skdev->skmsg_free_list == NULL ||
+		    skdev->skreq_free_list == NULL ||
+		    skdev->in_flight >= skdev->queue_low_water_mark)
+			/* There is still some kind of shortage */
+			return;
+
+		queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
+	}
+
+	/*
+	 * Stop conditions:
+	 *  - There are no more native requests
+	 *  - There are already the maximum number of requests in progress
+	 *  - There are no more skd_request_context entries
+	 *  - There are no more FIT msg buffers
+	 */
+	for (;; ) {
+
+		flush = fua = 0;
+
+		req = blk_peek_request(q);
+
+		/* Are there any native requests to start? */
+		if (req == NULL)
+			break;
+
+		lba = (u32)blk_rq_pos(req);
+		count = blk_rq_sectors(req);
+		data_dir = rq_data_dir(req);
+		io_flags = req->cmd_flags;
+
+		if (io_flags & REQ_FLUSH)
+			flush++;
+
+		if (io_flags & REQ_FUA)
+			fua++;
+
+		pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
+			 "count=%u(0x%x) dir=%d\n",
+			 skdev->name, __func__, __LINE__,
+			 req, lba, lba, count, count, data_dir);
+
+		/* At this point we know there is a request */
+
+		/* Are too many requets already in progress? */
+		if (skdev->in_flight >= skdev->cur_max_queue_depth) {
+			pr_debug("%s:%s:%d qdepth %d, limit %d\n",
+				 skdev->name, __func__, __LINE__,
+				 skdev->in_flight, skdev->cur_max_queue_depth);
+			break;
+		}
+
+		/* Is a skd_request_context available? */
+		skreq = skdev->skreq_free_list;
+		if (skreq == NULL) {
+			pr_debug("%s:%s:%d Out of req=%p\n",
+				 skdev->name, __func__, __LINE__, q);
+			break;
+		}
+		SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
+		SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
+
+		/* Now we check to see if we can get a fit msg */
+		if (skmsg == NULL) {
+			if (skdev->skmsg_free_list == NULL) {
+				pr_debug("%s:%s:%d Out of msg\n",
+					 skdev->name, __func__, __LINE__);
+				break;
+			}
+		}
+
+		skreq->flush_cmd = 0;
+		skreq->n_sg = 0;
+		skreq->sg_byte_count = 0;
+		skreq->discard_page = 0;
+
+		/*
+		 * OK to now dequeue request from q.
+		 *
+		 * At this point we are comitted to either start or reject
+		 * the native request. Note that skd_request_context is
+		 * available but is still at the head of the free list.
+		 */
+		blk_start_request(req);
+		skreq->req = req;
+		skreq->fitmsg_id = 0;
+
+		/* Either a FIT msg is in progress or we have to start one. */
+		if (skmsg == NULL) {
+			/* Are there any FIT msg buffers available? */
+			skmsg = skdev->skmsg_free_list;
+			if (skmsg == NULL) {
+				pr_debug("%s:%s:%d Out of msg skdev=%p\n",
+					 skdev->name, __func__, __LINE__,
+					 skdev);
+				break;
+			}
+			SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
+			SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
+
+			skdev->skmsg_free_list = skmsg->next;
+
+			skmsg->state = SKD_MSG_STATE_BUSY;
+			skmsg->id += SKD_ID_INCR;
+
+			/* Initialize the FIT msg header */
+			fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
+			memset(fmh, 0, sizeof(*fmh));
+			fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+			skmsg->length = sizeof(*fmh);
+		}
+
+		skreq->fitmsg_id = skmsg->id;
+
+		/*
+		 * Note that a FIT msg may have just been started
+		 * but contains no SoFIT requests yet.
+		 */
+
+		/*
+		 * Transcode the request, checking as we go. The outcome of
+		 * the transcoding is represented by the error variable.
+		 */
+		cmd_ptr = &skmsg->msg_buf[skmsg->length];
+		memset(cmd_ptr, 0, 32);
+
+		be_lba = cpu_to_be32(lba);
+		be_count = cpu_to_be32(count);
+		be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
+		cmdctxt = skreq->id + SKD_ID_INCR;
+
+		scsi_req = cmd_ptr;
+		scsi_req->hdr.tag = cmdctxt;
+		scsi_req->hdr.sg_list_dma_address = be_dmaa;
+
+		if (data_dir == READ)
+			skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
+		else
+			skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
+
+		if (io_flags & REQ_DISCARD) {
+			page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+			if (!page) {
+				pr_err("request_fn:Page allocation failed.\n");
+				skd_end_request(skdev, skreq, -ENOMEM);
+				break;
+			}
+			skreq->discard_page = 1;
+			skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
+
+		} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
+			skd_prep_zerosize_flush_cdb(scsi_req, skreq);
+			SKD_ASSERT(skreq->flush_cmd == 1);
+
+		} else {
+			skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
+		}
+
+		if (fua)
+			scsi_req->cdb[1] |= SKD_FUA_NV;
+
+		if (!req->bio)
+			goto skip_sg;
+
+		error = skd_preop_sg_list(skdev, skreq);
+
+		if (error != 0) {
+			/*
+			 * Complete the native request with error.
+			 * Note that the request context is still at the
+			 * head of the free list, and that the SoFIT request
+			 * was encoded into the FIT msg buffer but the FIT
+			 * msg length has not been updated. In short, the
+			 * only resource that has been allocated but might
+			 * not be used is that the FIT msg could be empty.
+			 */
+			pr_debug("%s:%s:%d error Out\n",
+				 skdev->name, __func__, __LINE__);
+			skd_end_request(skdev, skreq, error);
+			continue;
+		}
+
+skip_sg:
+		scsi_req->hdr.sg_list_len_bytes =
+			cpu_to_be32(skreq->sg_byte_count);
+
+		/* Complete resource allocations. */
+		skdev->skreq_free_list = skreq->next;
+		skreq->state = SKD_REQ_STATE_BUSY;
+		skreq->id += SKD_ID_INCR;
+
+		skmsg->length += sizeof(struct skd_scsi_request);
+		fmh->num_protocol_cmds_coalesced++;
+
+		/*
+		 * Update the active request counts.
+		 * Capture the timeout timestamp.
+		 */
+		skreq->timeout_stamp = skdev->timeout_stamp;
+		timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+		skdev->timeout_slot[timo_slot]++;
+		skdev->in_flight++;
+		pr_debug("%s:%s:%d req=0x%x busy=%d\n",
+			 skdev->name, __func__, __LINE__,
+			 skreq->id, skdev->in_flight);
+
+		/*
+		 * If the FIT msg buffer is full send it.
+		 */
+		if (skmsg->length >= SKD_N_FITMSG_BYTES ||
+		    fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
+			skd_send_fitmsg(skdev, skmsg);
+			skmsg = NULL;
+			fmh = NULL;
+		}
+	}
+
+	/*
+	 * Is a FIT msg in progress? If it is empty put the buffer back
+	 * on the free list. If it is non-empty send what we got.
+	 * This minimizes latency when there are fewer requests than
+	 * what fits in a FIT msg.
+	 */
+	if (skmsg != NULL) {
+		/* Bigger than just a FIT msg header? */
+		if (skmsg->length > sizeof(struct fit_msg_hdr)) {
+			pr_debug("%s:%s:%d sending msg=%p, len %d\n",
+				 skdev->name, __func__, __LINE__,
+				 skmsg, skmsg->length);
+			skd_send_fitmsg(skdev, skmsg);
+		} else {
+			/*
+			 * The FIT msg is empty. It means we got started
+			 * on the msg, but the requests were rejected.
+			 */
+			skmsg->state = SKD_MSG_STATE_IDLE;
+			skmsg->id += SKD_ID_INCR;
+			skmsg->next = skdev->skmsg_free_list;
+			skdev->skmsg_free_list = skmsg;
+		}
+		skmsg = NULL;
+		fmh = NULL;
+	}
+
+	/*
+	 * If req is non-NULL it means there is something to do but
+	 * we are out of a resource.
+	 */
+	if (req)
+		blk_stop_queue(skdev->queue);
+}
+
+static void skd_end_request(struct skd_device *skdev,
+			    struct skd_request_context *skreq, int error)
+{
+	struct request *req = skreq->req;
+	unsigned int io_flags = req->cmd_flags;
+
+	if ((io_flags & REQ_DISCARD) &&
+		(skreq->discard_page == 1)) {
+		pr_debug("%s:%s:%d, free the page!",
+			 skdev->name, __func__, __LINE__);
+		free_page((unsigned long)req->buffer);
+		req->buffer = NULL;
+	}
+
+	if (unlikely(error)) {
+		struct request *req = skreq->req;
+		char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
+		u32 lba = (u32)blk_rq_pos(req);
+		u32 count = blk_rq_sectors(req);
+
+		pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
+		       skd_name(skdev), cmd, lba, count, skreq->id);
+	} else
+		pr_debug("%s:%s:%d id=0x%x error=%d\n",
+			 skdev->name, __func__, __LINE__, skreq->id, error);
+
+	__blk_end_request_all(skreq->req, error);
+}
+
+static int skd_preop_sg_list(struct skd_device *skdev,
+			     struct skd_request_context *skreq)
+{
+	struct request *req = skreq->req;
+	int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
+	int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
+	struct scatterlist *sg = &skreq->sg[0];
+	int n_sg;
+	int i;
+
+	skreq->sg_byte_count = 0;
+
+	/* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
+		   skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
+
+	n_sg = blk_rq_map_sg(skdev->queue, req, sg);
+	if (n_sg <= 0)
+		return -EINVAL;
+
+	/*
+	 * Map scatterlist to PCI bus addresses.
+	 * Note PCI might change the number of entries.
+	 */
+	n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
+	if (n_sg <= 0)
+		return -EINVAL;
+
+	SKD_ASSERT(n_sg <= skdev->sgs_per_request);
+
+	skreq->n_sg = n_sg;
+
+	for (i = 0; i < n_sg; i++) {
+		struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+		u32 cnt = sg_dma_len(&sg[i]);
+		uint64_t dma_addr = sg_dma_address(&sg[i]);
+
+		sgd->control = FIT_SGD_CONTROL_NOT_LAST;
+		sgd->byte_count = cnt;
+		skreq->sg_byte_count += cnt;
+		sgd->host_side_addr = dma_addr;
+		sgd->dev_side_addr = 0;
+	}
+
+	skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
+	skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
+
+	if (unlikely(skdev->dbg_level > 1)) {
+		pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
+			 skdev->name, __func__, __LINE__,
+			 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+		for (i = 0; i < n_sg; i++) {
+			struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+			pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
+				 "addr=0x%llx next=0x%llx\n",
+				 skdev->name, __func__, __LINE__,
+				 i, sgd->byte_count, sgd->control,
+				 sgd->host_side_addr, sgd->next_desc_ptr);
+		}
+	}
+
+	return 0;
+}
+
+static void skd_postop_sg_list(struct skd_device *skdev,
+			       struct skd_request_context *skreq)
+{
+	int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
+	int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
+
+	/*
+	 * restore the next ptr for next IO request so we
+	 * don't have to set it every time.
+	 */
+	skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
+		skreq->sksg_dma_address +
+		((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
+	pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
+}
+
+static void skd_request_fn_not_online(struct request_queue *q)
+{
+	struct skd_device *skdev = q->queuedata;
+	int error;
+
+	SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
+
+	skd_log_skdev(skdev, "req_not_online");
+	switch (skdev->state) {
+	case SKD_DRVR_STATE_PAUSING:
+	case SKD_DRVR_STATE_PAUSED:
+	case SKD_DRVR_STATE_STARTING:
+	case SKD_DRVR_STATE_RESTARTING:
+	case SKD_DRVR_STATE_WAIT_BOOT:
+	/* In case of starting, we haven't started the queue,
+	 * so we can't get here... but requests are
+	 * possibly hanging out waiting for us because we
+	 * reported the dev/skd0 already.  They'll wait
+	 * forever if connect doesn't complete.
+	 * What to do??? delay dev/skd0 ??
+	 */
+	case SKD_DRVR_STATE_BUSY:
+	case SKD_DRVR_STATE_BUSY_IMMINENT:
+	case SKD_DRVR_STATE_BUSY_ERASE:
+	case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+		return;
+
+	case SKD_DRVR_STATE_BUSY_SANITIZE:
+	case SKD_DRVR_STATE_STOPPING:
+	case SKD_DRVR_STATE_SYNCING:
+	case SKD_DRVR_STATE_FAULT:
+	case SKD_DRVR_STATE_DISAPPEARED:
+	default:
+		error = -EIO;
+		break;
+	}
+
+	/* If we get here, terminate all pending block requeusts
+	 * with EIO and any scsi pass thru with appropriate sense
+	 */
+
+	skd_fail_all_pending(skdev);
+}
+
+/*
+ *****************************************************************************
+ * TIMER
+ *****************************************************************************
+ */
+
+static void skd_timer_tick_not_online(struct skd_device *skdev);
+
+static void skd_timer_tick(ulong arg)
+{
+	struct skd_device *skdev = (struct skd_device *)arg;
+
+	u32 timo_slot;
+	u32 overdue_timestamp;
+	unsigned long reqflags;
+	u32 state;
+
+	if (skdev->state == SKD_DRVR_STATE_FAULT)
+		/* The driver has declared fault, and we want it to
+		 * stay that way until driver is reloaded.
+		 */
+		return;
+
+	spin_lock_irqsave(&skdev->lock, reqflags);
+
+	state = SKD_READL(skdev, FIT_STATUS);
+	state &= FIT_SR_DRIVE_STATE_MASK;
+	if (state != skdev->drive_state)
+		skd_isr_fwstate(skdev);
+
+	if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+		skd_timer_tick_not_online(skdev);
+		goto timer_func_out;
+	}
+	skdev->timeout_stamp++;
+	timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+
+	/*
+	 * All requests that happened during the previous use of
+	 * this slot should be done by now. The previous use was
+	 * over 7 seconds ago.
+	 */
+	if (skdev->timeout_slot[timo_slot] == 0)
+		goto timer_func_out;
+
+	/* Something is overdue */
+	overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
+
+	pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
+		 skdev->name, __func__, __LINE__,
+		 skdev->timeout_slot[timo_slot], skdev->in_flight);
+	pr_err("(%s): Overdue IOs (%d), busy %d\n",
+	       skd_name(skdev), skdev->timeout_slot[timo_slot],
+	       skdev->in_flight);
+
+	skdev->timer_countdown = SKD_DRAINING_TIMO;
+	skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
+	skdev->timo_slot = timo_slot;
+	blk_stop_queue(skdev->queue);
+
+timer_func_out:
+	mod_timer(&skdev->timer, (jiffies + HZ));
+
+	spin_unlock_irqrestore(&skdev->lock, reqflags);
+}
+
+static void skd_timer_tick_not_online(struct skd_device *skdev)
+{
+	switch (skdev->state) {
+	case SKD_DRVR_STATE_IDLE:
+	case SKD_DRVR_STATE_LOAD:
+		break;
+	case SKD_DRVR_STATE_BUSY_SANITIZE:
+		pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
+			 skdev->name, __func__, __LINE__,
+			 skdev->drive_state, skdev->state);
+		/* If we've been in sanitize for 3 seconds, we figure we're not
+		 * going to get anymore completions, so recover requests now
+		 */
+		if (skdev->timer_countdown > 0) {
+			skdev->timer_countdown--;
+			return;
+		}
+		skd_recover_requests(skdev, 0);
+		break;
+
+	case SKD_DRVR_STATE_BUSY:
+	case SKD_DRVR_STATE_BUSY_IMMINENT:
+	case SKD_DRVR_STATE_BUSY_ERASE:
+		pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
+			 skdev->name, __func__, __LINE__,
+			 skdev->state, skdev->timer_countdown);
+		if (skdev->timer_countdown > 0) {
+			skdev->timer_countdown--;
+			return;
+		}
+		pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
+			 skdev->name, __func__, __LINE__,
+			 skdev->state, skdev->timer_countdown);
+		skd_restart_device(skdev);
+		break;
+
+	case SKD_DRVR_STATE_WAIT_BOOT:
+	case SKD_DRVR_STATE_STARTING:
+		if (skdev->timer_countdown > 0) {
+			skdev->timer_countdown--;
+			return;
+		}
+		/* For now, we fault the drive.  Could attempt resets to
+		 * revcover at some point. */
+		skdev->state = SKD_DRVR_STATE_FAULT;
+
+		pr_err("(%s): DriveFault Connect Timeout (%x)\n",
+		       skd_name(skdev), skdev->drive_state);
+
+		/*start the queue so we can respond with error to requests */
+		/* wakeup anyone waiting for startup complete */
+		blk_start_queue(skdev->queue);
+		skdev->gendisk_on = -1;
+		wake_up_interruptible(&skdev->waitq);
+		break;
+
+	case SKD_DRVR_STATE_ONLINE:
+		/* shouldn't get here. */
+		break;
+
+	case SKD_DRVR_STATE_PAUSING:
+	case SKD_DRVR_STATE_PAUSED:
+		break;
+
+	case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+		pr_debug("%s:%s:%d "
+			 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
+			 skdev->name, __func__, __LINE__,
+			 skdev->timo_slot,
+			 skdev->timer_countdown,
+			 skdev->in_flight,
+			 skdev->timeout_slot[skdev->timo_slot]);
+		/* if the slot has cleared we can let the I/O continue */
+		if (skdev->timeout_slot[skdev->timo_slot] == 0) {
+			pr_debug("%s:%s:%d Slot drained, starting queue.\n",
+				 skdev->name, __func__, __LINE__);
+			skdev->state = SKD_DRVR_STATE_ONLINE;
+			blk_start_queue(skdev->queue);
+			return;
+		}
+		if (skdev->timer_countdown > 0) {
+			skdev->timer_countdown--;
+			return;
+		}
+		skd_restart_device(skdev);
+		break;
+
+	case SKD_DRVR_STATE_RESTARTING:
+		if (skdev->timer_countdown > 0) {
+			skdev->timer_countdown--;
+			return;
+		}
+		/* For now, we fault the drive. Could attempt resets to
+		 * revcover at some point. */
+		skdev->state = SKD_DRVR_STATE_FAULT;
+		pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
+		       skd_name(skdev), skdev->drive_state);
+
+		/*
+		 * Recovering does two things:
+		 * 1. completes IO with error
+		 * 2. reclaims dma resources
+		 * When is it safe to recover requests?
+		 * - if the drive state is faulted
+		 * - if the state is still soft reset after out timeout
+		 * - if the drive registers are dead (state = FF)
+		 * If it is "unsafe", we still need to recover, so we will
+		 * disable pci bus mastering and disable our interrupts.
+		 */
+
+		if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
+		    (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
+		    (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
+			/* It never came out of soft reset. Try to
+			 * recover the requests and then let them
+			 * fail. This is to mitigate hung processes. */
+			skd_recover_requests(skdev, 0);
+		else {
+			pr_err("(%s): Disable BusMaster (%x)\n",
+			       skd_name(skdev), skdev->drive_state);
+			pci_disable_device(skdev->pdev);
+			skd_disable_interrupts(skdev);
+			skd_recover_requests(skdev, 0);
+		}
+
+		/*start the queue so we can respond with error to requests */
+		/* wakeup anyone waiting for startup complete */
+		blk_start_queue(skdev->queue);
+		skdev->gendisk_on = -1;
+		wake_up_interruptible(&skdev->waitq);
+		break;
+
+	case SKD_DRVR_STATE_RESUMING:
+	case SKD_DRVR_STATE_STOPPING:
+	case SKD_DRVR_STATE_SYNCING:
+	case SKD_DRVR_STATE_FAULT:
+	case SKD_DRVR_STATE_DISAPPEARED:
+	default:
+		break;
+	}
+}
+
+static int skd_start_timer(struct skd_device *skdev)
+{
+	int rc;
+
+	init_timer(&skdev->timer);
+	setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+
+	rc = mod_timer(&skdev->timer, (jiffies + HZ));
+	if (rc)
+		pr_err("%s: failed to start timer %d\n",
+		       __func__, rc);
+	return rc;
+}
+
+static void skd_kill_timer(struct skd_device *skdev)
+{
+	del_timer_sync(&skdev->timer);
+}
+
+/*
+ *****************************************************************************
+ * IOCTL
+ *****************************************************************************
+ */
+static int skd_ioctl_sg_io(struct skd_device *skdev,
+			   fmode_t mode, void __user *argp);
+static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
+					struct skd_sg_io *sksgio);
+static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
+				   struct skd_sg_io *sksgio);
+static int skd_sg_io_prep_buffering(struct skd_device *skdev,
+				    struct skd_sg_io *sksgio);
+static int skd_sg_io_copy_buffer(struct skd_device *skdev,
+				 struct skd_sg_io *sksgio, int dxfer_dir);
+static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
+				 struct skd_sg_io *sksgio);
+static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
+static int skd_sg_io_release_skspcl(struct skd_device *skdev,
+				    struct skd_sg_io *sksgio);
+static int skd_sg_io_put_status(struct skd_device *skdev,
+				struct skd_sg_io *sksgio);
+
+static void skd_complete_special(struct skd_device *skdev,
+				 volatile struct fit_completion_entry_v1
+				 *skcomp,
+				 volatile struct fit_comp_error_info *skerr,
+				 struct skd_special_context *skspcl);
+
+static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
+			  uint cmd_in, ulong arg)
+{
+	int rc = 0;
+	struct gendisk *disk = bdev->bd_disk;
+	struct skd_device *skdev = disk->private_data;
+	void __user *p = (void *)arg;
+
+	pr_debug("%s:%s:%d %s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
+		 skdev->name, __func__, __LINE__,
+		 disk->disk_name, current->comm, mode, cmd_in, arg);
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	switch (cmd_in) {
+	case SG_SET_TIMEOUT:
+	case SG_GET_TIMEOUT:
+	case SG_GET_VERSION_NUM:
+		rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
+		break;
+	case SG_IO:
+		rc = skd_ioctl_sg_io(skdev, mode, p);
+		break;
+
+	default:
+		rc = -ENOTTY;
+		break;
+	}
+
+	pr_debug("%s:%s:%d %s:  completion rc %d\n",
+		 skdev->name, __func__, __LINE__, disk->disk_name, rc);
+	return rc;
+}
+
+static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
+			   void __user *argp)
+{
+	int rc;
+	struct skd_sg_io sksgio;
+
+	memset(&sksgio, 0, sizeof(sksgio));
+	sksgio.mode = mode;
+	sksgio.argp = argp;
+	sksgio.iov = &sksgio.no_iov_iov;
+
+	switch (skdev->state) {
+	case SKD_DRVR_STATE_ONLINE:
+	case SKD_DRVR_STATE_BUSY_IMMINENT:
+		break;
+
+	default:
+		pr_debug("%s:%s:%d drive not online\n",
+			 skdev->name, __func__, __LINE__);
+		rc = -ENXIO;
+		goto out;
+	}
+
+	rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
+	if (rc)
+		goto out;
+
+	rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
+	if (rc)
+		goto out;
+
+	rc = skd_sg_io_prep_buffering(skdev, &sksgio);
+	if (rc)
+		goto out;
+
+	rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
+	if (rc)
+		goto out;
+
+	rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
+	if (rc)
+		goto out;
+
+	rc = skd_sg_io_await(skdev, &sksgio);
+	if (rc)
+		goto out;
+
+	rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
+	if (rc)
+		goto out;
+
+	rc = skd_sg_io_put_status(skdev, &sksgio);
+	if (rc)
+		goto out;
+
+	rc = 0;
+
+out:
+	skd_sg_io_release_skspcl(skdev, &sksgio);
+
+	if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
+		kfree(sksgio.iov);
+	return rc;
+}
+
+static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
+					struct skd_sg_io *sksgio)
+{
+	struct sg_io_hdr *sgp = &sksgio->sg;
+	int i, acc;
+
+	if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
+		pr_debug("%s:%s:%d access sg failed %p\n",
+			 skdev->name, __func__, __LINE__, sksgio->argp);
+		return -EFAULT;
+	}
+
+	if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
+		pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
+			 skdev->name, __func__, __LINE__, sksgio->argp);
+		return -EFAULT;
+	}
+
+	if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
+		pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
+			 skdev->name, __func__, __LINE__, sgp->interface_id);
+		return -EINVAL;
+	}
+
+	if (sgp->cmd_len > sizeof(sksgio->cdb)) {
+		pr_debug("%s:%s:%d cmd_len invalid %d\n",
+			 skdev->name, __func__, __LINE__, sgp->cmd_len);
+		return -EINVAL;
+	}
+
+	if (sgp->iovec_count > 256) {
+		pr_debug("%s:%s:%d iovec_count invalid %d\n",
+			 skdev->name, __func__, __LINE__, sgp->iovec_count);
+		return -EINVAL;
+	}
+
+	if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
+		pr_debug("%s:%s:%d dxfer_len invalid %d\n",
+			 skdev->name, __func__, __LINE__, sgp->dxfer_len);
+		return -EINVAL;
+	}
+
+	switch (sgp->dxfer_direction) {
+	case SG_DXFER_NONE:
+		acc = -1;
+		break;
+
+	case SG_DXFER_TO_DEV:
+		acc = VERIFY_READ;
+		break;
+
+	case SG_DXFER_FROM_DEV:
+	case SG_DXFER_TO_FROM_DEV:
+		acc = VERIFY_WRITE;
+		break;
+
+	default:
+		pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
+			 skdev->name, __func__, __LINE__, sgp->dxfer_direction);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
+		pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
+			 skdev->name, __func__, __LINE__, sgp->cmdp);
+		return -EFAULT;
+	}
+
+	if (sgp->mx_sb_len != 0) {
+		if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
+			pr_debug("%s:%s:%d access sbp failed %p\n",
+				 skdev->name, __func__, __LINE__, sgp->sbp);
+			return -EFAULT;
+		}
+	}
+
+	if (sgp->iovec_count == 0) {
+		sksgio->iov[0].iov_base = sgp->dxferp;
+		sksgio->iov[0].iov_len = sgp->dxfer_len;
+		sksgio->iovcnt = 1;
+		sksgio->dxfer_len = sgp->dxfer_len;
+	} else {
+		struct sg_iovec *iov;
+		uint nbytes = sizeof(*iov) * sgp->iovec_count;
+		size_t iov_data_len;
+
+		iov = kmalloc(nbytes, GFP_KERNEL);
+		if (iov == NULL) {
+			pr_debug("%s:%s:%d alloc iovec failed %d\n",
+				 skdev->name, __func__, __LINE__,
+				 sgp->iovec_count);
+			return -ENOMEM;
+		}
+		sksgio->iov = iov;
+		sksgio->iovcnt = sgp->iovec_count;
+
+		if (copy_from_user(iov, sgp->dxferp, nbytes)) {
+			pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
+				 skdev->name, __func__, __LINE__, sgp->dxferp);
+			return -EFAULT;
+		}
+
+		/*
+		 * Sum up the vecs, making sure they don't overflow
+		 */
+		iov_data_len = 0;
+		for (i = 0; i < sgp->iovec_count; i++) {
+			if (iov_data_len + iov[i].iov_len < iov_data_len)
+				return -EINVAL;
+			iov_data_len += iov[i].iov_len;
+		}
+
+		/* SG_IO howto says that the shorter of the two wins */
+		if (sgp->dxfer_len < iov_data_len) {
+			sksgio->iovcnt = iov_shorten((struct iovec *)iov,
+						     sgp->iovec_count,
+						     sgp->dxfer_len);
+			sksgio->dxfer_len = sgp->dxfer_len;
+		} else
+			sksgio->dxfer_len = iov_data_len;
+	}
+
+	if (sgp->dxfer_direction != SG_DXFER_NONE) {
+		struct sg_iovec *iov = sksgio->iov;
+		for (i = 0; i < sksgio->iovcnt; i++, iov++) {
+			if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
+				pr_debug("%s:%s:%d access data failed %p/%d\n",
+					 skdev->name, __func__, __LINE__,
+					 iov->iov_base, (int)iov->iov_len);
+				return -EFAULT;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
+				   struct skd_sg_io *sksgio)
+{
+	struct skd_special_context *skspcl = NULL;
+	int rc;
+
+	for (;;) {
+		ulong flags;
+
+		spin_lock_irqsave(&skdev->lock, flags);
+		skspcl = skdev->skspcl_free_list;
+		if (skspcl != NULL) {
+			skdev->skspcl_free_list =
+				(struct skd_special_context *)skspcl->req.next;
+			skspcl->req.id += SKD_ID_INCR;
+			skspcl->req.state = SKD_REQ_STATE_SETUP;
+			skspcl->orphaned = 0;
+			skspcl->req.n_sg = 0;
+		}
+		spin_unlock_irqrestore(&skdev->lock, flags);
+
+		if (skspcl != NULL) {
+			rc = 0;
+			break;
+		}
+
+		pr_debug("%s:%s:%d blocking\n",
+			 skdev->name, __func__, __LINE__);
+
+		rc = wait_event_interruptible_timeout(
+				skdev->waitq,
+				(skdev->skspcl_free_list != NULL),
+				msecs_to_jiffies(sksgio->sg.timeout));
+
+		pr_debug("%s:%s:%d unblocking, rc=%d\n",
+			 skdev->name, __func__, __LINE__, rc);
+
+		if (rc <= 0) {
+			if (rc == 0)
+				rc = -ETIMEDOUT;
+			else
+				rc = -EINTR;
+			break;
+		}
+		/*
+		 * If we get here rc > 0 meaning the timeout to
+		 * wait_event_interruptible_timeout() had time left, hence the
+		 * sought event -- non-empty free list -- happened.
+		 * Retry the allocation.
+		 */
+	}
+	sksgio->skspcl = skspcl;
+
+	return rc;
+}
+
+static int skd_skreq_prep_buffering(struct skd_device *skdev,
+				    struct skd_request_context *skreq,
+				    u32 dxfer_len)
+{
+	u32 resid = dxfer_len;
+
+	/*
+	 * The DMA engine must have aligned addresses and byte counts.
+	 */
+	resid += (-resid) & 3;
+	skreq->sg_byte_count = resid;
+
+	skreq->n_sg = 0;
+
+	while (resid > 0) {
+		u32 nbytes = PAGE_SIZE;
+		u32 ix = skreq->n_sg;
+		struct scatterlist *sg = &skreq->sg[ix];
+		struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
+		struct page *page;
+
+		if (nbytes > resid)
+			nbytes = resid;
+
+		page = alloc_page(GFP_KERNEL);
+		if (page == NULL)
+			return -ENOMEM;
+
+		sg_set_page(sg, page, nbytes, 0);
+
+		/* TODO: This should be going through a pci_???()
+		 * routine to do proper mapping. */
+		sksg->control = FIT_SGD_CONTROL_NOT_LAST;
+		sksg->byte_count = nbytes;
+
+		sksg->host_side_addr = sg_phys(sg);
+
+		sksg->dev_side_addr = 0;
+		sksg->next_desc_ptr = skreq->sksg_dma_address +
+				      (ix + 1) * sizeof(*sksg);
+
+		skreq->n_sg++;
+		resid -= nbytes;
+	}
+
+	if (skreq->n_sg > 0) {
+		u32 ix = skreq->n_sg - 1;
+		struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
+
+		sksg->control = FIT_SGD_CONTROL_LAST;
+		sksg->next_desc_ptr = 0;
+	}
+
+	if (unlikely(skdev->dbg_level > 1)) {
+		u32 i;
+
+		pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
+			 skdev->name, __func__, __LINE__,
+			 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+		for (i = 0; i < skreq->n_sg; i++) {
+			struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+
+			pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
+				 "addr=0x%llx next=0x%llx\n",
+				 skdev->name, __func__, __LINE__,
+				 i, sgd->byte_count, sgd->control,
+				 sgd->host_side_addr, sgd->next_desc_ptr);
+		}
+	}
+
+	return 0;
+}
+
+static int skd_sg_io_prep_buffering(struct skd_device *skdev,
+				    struct skd_sg_io *sksgio)
+{
+	struct skd_special_context *skspcl = sksgio->skspcl;
+	struct skd_request_context *skreq = &skspcl->req;
+	u32 dxfer_len = sksgio->dxfer_len;
+	int rc;
+
+	rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
+	/*
+	 * Eventually, errors or not, skd_release_special() is called
+	 * to recover allocations including partial allocations.
+	 */
+	return rc;
+}
+
+static int skd_sg_io_copy_buffer(struct skd_device *skdev,
+				 struct skd_sg_io *sksgio, int dxfer_dir)
+{
+	struct skd_special_context *skspcl = sksgio->skspcl;
+	u32 iov_ix = 0;
+	struct sg_iovec curiov;
+	u32 sksg_ix = 0;
+	u8 *bufp = NULL;
+	u32 buf_len = 0;
+	u32 resid = sksgio->dxfer_len;
+	int rc;
+
+	curiov.iov_len = 0;
+	curiov.iov_base = NULL;
+
+	if (dxfer_dir != sksgio->sg.dxfer_direction) {
+		if (dxfer_dir != SG_DXFER_TO_DEV ||
+		    sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
+			return 0;
+	}
+
+	while (resid > 0) {
+		u32 nbytes = PAGE_SIZE;
+
+		if (curiov.iov_len == 0) {
+			curiov = sksgio->iov[iov_ix++];
+			continue;
+		}
+
+		if (buf_len == 0) {
+			struct page *page;
+			page = sg_page(&skspcl->req.sg[sksg_ix++]);
+			bufp = page_address(page);
+			buf_len = PAGE_SIZE;
+		}
+
+		nbytes = min_t(u32, nbytes, resid);
+		nbytes = min_t(u32, nbytes, curiov.iov_len);
+		nbytes = min_t(u32, nbytes, buf_len);
+
+		if (dxfer_dir == SG_DXFER_TO_DEV)
+			rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
+		else
+			rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
+
+		if (rc)
+			return -EFAULT;
+
+		resid -= nbytes;
+		curiov.iov_len -= nbytes;
+		curiov.iov_base += nbytes;
+		buf_len -= nbytes;
+	}
+
+	return 0;
+}
+
+static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
+				 struct skd_sg_io *sksgio)
+{
+	struct skd_special_context *skspcl = sksgio->skspcl;
+	struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
+	struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
+
+	memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
+
+	/* Initialize the FIT msg header */
+	fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+	fmh->num_protocol_cmds_coalesced = 1;
+
+	/* Initialize the SCSI request */
+	if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
+		scsi_req->hdr.sg_list_dma_address =
+			cpu_to_be64(skspcl->req.sksg_dma_address);
+	scsi_req->hdr.tag = skspcl->req.id;
+	scsi_req->hdr.sg_list_len_bytes =
+		cpu_to_be32(skspcl->req.sg_byte_count);
+	memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
+
+	skspcl->req.state = SKD_REQ_STATE_BUSY;
+	skd_send_special_fitmsg(skdev, skspcl);
+
+	return 0;
+}
+
+static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
+{
+	unsigned long flags;
+	int rc;
+
+	rc = wait_event_interruptible_timeout(skdev->waitq,
+					      (sksgio->skspcl->req.state !=
+					       SKD_REQ_STATE_BUSY),
+					      msecs_to_jiffies(sksgio->sg.
+							       timeout));
+
+	spin_lock_irqsave(&skdev->lock, flags);
+
+	if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
+		pr_debug("%s:%s:%d skspcl %p aborted\n",
+			 skdev->name, __func__, __LINE__, sksgio->skspcl);
+
+		/* Build check cond, sense and let command finish. */
+		/* For a timeout, we must fabricate completion and sense
+		 * data to complete the command */
+		sksgio->skspcl->req.completion.status =
+			SAM_STAT_CHECK_CONDITION;
+
+		memset(&sksgio->skspcl->req.err_info, 0,
+		       sizeof(sksgio->skspcl->req.err_info));
+		sksgio->skspcl->req.err_info.type = 0x70;
+		sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
+		sksgio->skspcl->req.err_info.code = 0x44;
+		sksgio->skspcl->req.err_info.qual = 0;
+		rc = 0;
+	} else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
+		/* No longer on the adapter. We finish. */
+		rc = 0;
+	else {
+		/* Something's gone wrong. Still busy. Timeout or
+		 * user interrupted (control-C). Mark as an orphan
+		 * so it will be disposed when completed. */
+		sksgio->skspcl->orphaned = 1;
+		sksgio->skspcl = NULL;
+		if (rc == 0) {
+			pr_debug("%s:%s:%d timed out %p (%u ms)\n",
+				 skdev->name, __func__, __LINE__,
+				 sksgio, sksgio->sg.timeout);
+			rc = -ETIMEDOUT;
+		} else {
+			pr_debug("%s:%s:%d cntlc %p\n",
+				 skdev->name, __func__, __LINE__, sksgio);
+			rc = -EINTR;
+		}
+	}
+
+	spin_unlock_irqrestore(&skdev->lock, flags);
+
+	return rc;
+}
+
+static int skd_sg_io_put_status(struct skd_device *skdev,
+				struct skd_sg_io *sksgio)
+{
+	struct sg_io_hdr *sgp = &sksgio->sg;
+	struct skd_special_context *skspcl = sksgio->skspcl;
+	int resid = 0;
+
+	u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
+
+	sgp->status = skspcl->req.completion.status;
+	resid = sksgio->dxfer_len - nb;
+
+	sgp->masked_status = sgp->status & STATUS_MASK;
+	sgp->msg_status = 0;
+	sgp->host_status = 0;
+	sgp->driver_status = 0;
+	sgp->resid = resid;
+	if (sgp->masked_status || sgp->host_status || sgp->driver_status)
+		sgp->info |= SG_INFO_CHECK;
+
+	pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
+		 skdev->name, __func__, __LINE__,
+		 sgp->status, sgp->masked_status, sgp->resid);
+
+	if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
+		if (sgp->mx_sb_len > 0) {
+			struct fit_comp_error_info *ei = &skspcl->req.err_info;
+			u32 nbytes = sizeof(*ei);
+
+			nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
+
+			sgp->sb_len_wr = nbytes;
+
+			if (__copy_to_user(sgp->sbp, ei, nbytes)) {
+				pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
+					 skdev->name, __func__, __LINE__,
+					 sgp->sbp);
+				return -EFAULT;
+			}
+		}
+	}
+
+	if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
+		pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
+			 skdev->name, __func__, __LINE__, sksgio->argp);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int skd_sg_io_release_skspcl(struct skd_device *skdev,
+				    struct skd_sg_io *sksgio)
+{
+	struct skd_special_context *skspcl = sksgio->skspcl;
+
+	if (skspcl != NULL) {
+		ulong flags;
+
+		sksgio->skspcl = NULL;
+
+		spin_lock_irqsave(&skdev->lock, flags);
+		skd_release_special(skdev, skspcl);
+		spin_unlock_irqrestore(&skdev->lock, flags);
+	}
+
+	return 0;
+}
+
+/*
+ *****************************************************************************
+ * INTERNAL REQUESTS -- generated by driver itself
+ *****************************************************************************
+ */
+
+static int skd_format_internal_skspcl(struct skd_device *skdev)
+{
+	struct skd_special_context *skspcl = &skdev->internal_skspcl;
+	struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
+	struct fit_msg_hdr *fmh;
+	uint64_t dma_address;
+	struct skd_scsi_request *scsi;
+
+	fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
+	fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+	fmh->num_protocol_cmds_coalesced = 1;
+
+	scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+	memset(scsi, 0, sizeof(*scsi));
+	dma_address = skspcl->req.sksg_dma_address;
+	scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
+	sgd->control = FIT_SGD_CONTROL_LAST;
+	sgd->byte_count = 0;
+	sgd->host_side_addr = skspcl->db_dma_address;
+	sgd->dev_side_addr = 0;
+	sgd->next_desc_ptr = 0LL;
+
+	return 1;
+}
+
+#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
+
+static void skd_send_internal_skspcl(struct skd_device *skdev,
+				     struct skd_special_context *skspcl,
+				     u8 opcode)
+{
+	struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
+	struct skd_scsi_request *scsi;
+	unsigned char *buf = skspcl->data_buf;
+	int i;
+
+	if (skspcl->req.state != SKD_REQ_STATE_IDLE)
+		/*
+		 * A refresh is already in progress.
+		 * Just wait for it to finish.
+		 */
+		return;
+
+	SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
+	skspcl->req.state = SKD_REQ_STATE_BUSY;
+	skspcl->req.id += SKD_ID_INCR;
+
+	scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+	scsi->hdr.tag = skspcl->req.id;
+
+	memset(scsi->cdb, 0, sizeof(scsi->cdb));
+
+	switch (opcode) {
+	case TEST_UNIT_READY:
+		scsi->cdb[0] = TEST_UNIT_READY;
+		sgd->byte_count = 0;
+		scsi->hdr.sg_list_len_bytes = 0;
+		break;
+
+	case READ_CAPACITY:
+		scsi->cdb[0] = READ_CAPACITY;
+		sgd->byte_count = SKD_N_READ_CAP_BYTES;
+		scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+		break;
+
+	case INQUIRY:
+		scsi->cdb[0] = INQUIRY;
+		scsi->cdb[1] = 0x01;    /* evpd */
+		scsi->cdb[2] = 0x80;    /* serial number page */
+		scsi->cdb[4] = 0x10;
+		sgd->byte_count = 16;
+		scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+		break;
+
+	case SYNCHRONIZE_CACHE:
+		scsi->cdb[0] = SYNCHRONIZE_CACHE;
+		sgd->byte_count = 0;
+		scsi->hdr.sg_list_len_bytes = 0;
+		break;
+
+	case WRITE_BUFFER:
+		scsi->cdb[0] = WRITE_BUFFER;
+		scsi->cdb[1] = 0x02;
+		scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
+		scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
+		sgd->byte_count = WR_BUF_SIZE;
+		scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+		/* fill incrementing byte pattern */
+		for (i = 0; i < sgd->byte_count; i++)
+			buf[i] = i & 0xFF;
+		break;
+
+	case READ_BUFFER:
+		scsi->cdb[0] = READ_BUFFER;
+		scsi->cdb[1] = 0x02;
+		scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
+		scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
+		sgd->byte_count = WR_BUF_SIZE;
+		scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+		memset(skspcl->data_buf, 0, sgd->byte_count);
+		break;
+
+	default:
+		SKD_ASSERT("Don't know what to send");
+		return;
+
+	}
+	skd_send_special_fitmsg(skdev, skspcl);
+}
+
+static void skd_refresh_device_data(struct skd_device *skdev)
+{
+	struct skd_special_context *skspcl = &skdev->internal_skspcl;
+
+	skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
+}
+
+static int skd_chk_read_buf(struct skd_device *skdev,
+			    struct skd_special_context *skspcl)
+{
+	unsigned char *buf = skspcl->data_buf;
+	int i;
+
+	/* check for incrementing byte pattern */
+	for (i = 0; i < WR_BUF_SIZE; i++)
+		if (buf[i] != (i & 0xFF))
+			return 1;
+
+	return 0;
+}
+
+static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
+				 u8 code, u8 qual, u8 fruc)
+{
+	/* If the check condition is of special interest, log a message */
+	if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
+	    && (code == 0x04) && (qual == 0x06)) {
+		pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
+		       "ascq/fruc %02x/%02x/%02x/%02x\n",
+		       skd_name(skdev), key, code, qual, fruc);
+	}
+}
+
+static void skd_complete_internal(struct skd_device *skdev,
+				  volatile struct fit_completion_entry_v1
+				  *skcomp,
+				  volatile struct fit_comp_error_info *skerr,
+				  struct skd_special_context *skspcl)
+{
+	u8 *buf = skspcl->data_buf;
+	u8 status;
+	int i;
+	struct skd_scsi_request *scsi =
+		(struct skd_scsi_request *)&skspcl->msg_buf[64];
+
+	SKD_ASSERT(skspcl == &skdev->internal_skspcl);
+
+	pr_debug("%s:%s:%d complete internal %x\n",
+		 skdev->name, __func__, __LINE__, scsi->cdb[0]);
+
+	skspcl->req.completion = *skcomp;
+	skspcl->req.state = SKD_REQ_STATE_IDLE;
+	skspcl->req.id += SKD_ID_INCR;
+
+	status = skspcl->req.completion.status;
+
+	skd_log_check_status(skdev, status, skerr->key, skerr->code,
+			     skerr->qual, skerr->fruc);
+
+	switch (scsi->cdb[0]) {
+	case TEST_UNIT_READY:
+		if (status == SAM_STAT_GOOD)
+			skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
+		else if ((status == SAM_STAT_CHECK_CONDITION) &&
+			 (skerr->key == MEDIUM_ERROR))
+			skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
+		else {
+			if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+				pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
+					 skdev->name, __func__, __LINE__,
+					 skdev->state);
+				return;
+			}
+			pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
+				 skdev->name, __func__, __LINE__);
+			skd_send_internal_skspcl(skdev, skspcl, 0x00);
+		}
+		break;
+
+	case WRITE_BUFFER:
+		if (status == SAM_STAT_GOOD)
+			skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
+		else {
+			if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+				pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
+					 skdev->name, __func__, __LINE__,
+					 skdev->state);
+				return;
+			}
+			pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
+				 skdev->name, __func__, __LINE__);
+			skd_send_internal_skspcl(skdev, skspcl, 0x00);
+		}
+		break;
+
+	case READ_BUFFER:
+		if (status == SAM_STAT_GOOD) {
+			if (skd_chk_read_buf(skdev, skspcl) == 0)
+				skd_send_internal_skspcl(skdev, skspcl,
+							 READ_CAPACITY);
+			else {
+				pr_err(
+				       "(%s):*** W/R Buffer mismatch %d ***\n",
+				       skd_name(skdev), skdev->connect_retries);
+				if (skdev->connect_retries <
+				    SKD_MAX_CONNECT_RETRIES) {
+					skdev->connect_retries++;
+					skd_soft_reset(skdev);
+				} else {
+					pr_err(
+					       "(%s): W/R Buffer Connect Error\n",
+					       skd_name(skdev));
+					return;
+				}
+			}
+
+		} else {
+			if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+				pr_debug("%s:%s:%d "
+					 "read buffer failed, don't send anymore state 0x%x\n",
+					 skdev->name, __func__, __LINE__,
+					 skdev->state);
+				return;
+			}
+			pr_debug("%s:%s:%d "
+				 "**** read buffer failed, retry skerr\n",
+				 skdev->name, __func__, __LINE__);
+			skd_send_internal_skspcl(skdev, skspcl, 0x00);
+		}
+		break;
+
+	case READ_CAPACITY:
+		skdev->read_cap_is_valid = 0;
+		if (status == SAM_STAT_GOOD) {
+			skdev->read_cap_last_lba =
+				(buf[0] << 24) | (buf[1] << 16) |
+				(buf[2] << 8) | buf[3];
+			skdev->read_cap_blocksize =
+				(buf[4] << 24) | (buf[5] << 16) |
+				(buf[6] << 8) | buf[7];
+
+			pr_debug("%s:%s:%d last lba %d, bs %d\n",
+				 skdev->name, __func__, __LINE__,
+				 skdev->read_cap_last_lba,
+				 skdev->read_cap_blocksize);
+
+			set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
+
+			skdev->read_cap_is_valid = 1;
+
+			skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
+		} else if ((status == SAM_STAT_CHECK_CONDITION) &&
+			   (skerr->key == MEDIUM_ERROR)) {
+			skdev->read_cap_last_lba = ~0;
+			set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
+			pr_debug("%s:%s:%d "
+				 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
+				 skdev->name, __func__, __LINE__);
+			skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
+		} else {
+			pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
+				 skdev->name, __func__, __LINE__);
+			skd_send_internal_skspcl(skdev, skspcl,
+						 TEST_UNIT_READY);
+		}
+		break;
+
+	case INQUIRY:
+		skdev->inquiry_is_valid = 0;
+		if (status == SAM_STAT_GOOD) {
+			skdev->inquiry_is_valid = 1;
+
+			for (i = 0; i < 12; i++)
+				skdev->inq_serial_num[i] = buf[i + 4];
+			skdev->inq_serial_num[12] = 0;
+		}
+
+		if (skd_unquiesce_dev(skdev) < 0)
+			pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
+				 skdev->name, __func__, __LINE__);
+		 /* connection is complete */
+		skdev->connect_retries = 0;
+		break;
+
+	case SYNCHRONIZE_CACHE:
+		if (status == SAM_STAT_GOOD)
+			skdev->sync_done = 1;
+		else
+			skdev->sync_done = -1;
+		wake_up_interruptible(&skdev->waitq);
+		break;
+
+	default:
+		SKD_ASSERT("we didn't send this");
+	}
+}
+
+/*
+ *****************************************************************************
+ * FIT MESSAGES
+ *****************************************************************************
+ */
+
+static void skd_send_fitmsg(struct skd_device *skdev,
+			    struct skd_fitmsg_context *skmsg)
+{
+	u64 qcmd;
+	struct fit_msg_hdr *fmh;
+
+	pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
+		 skdev->name, __func__, __LINE__,
+		 skmsg->mb_dma_address, skdev->in_flight);
+	pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
+		 skdev->name, __func__, __LINE__,
+		 skmsg->msg_buf, skmsg->offset);
+
+	qcmd = skmsg->mb_dma_address;
+	qcmd |= FIT_QCMD_QID_NORMAL;
+
+	fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
+	skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
+
+	if (unlikely(skdev->dbg_level > 1)) {
+		u8 *bp = (u8 *)skmsg->msg_buf;
+		int i;
+		for (i = 0; i < skmsg->length; i += 8) {
+			pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
+				 "%02x %02x %02x %02x\n",
+				 skdev->name, __func__, __LINE__,
+				 i, bp[i + 0], bp[i + 1], bp[i + 2],
+				 bp[i + 3], bp[i + 4], bp[i + 5],
+				 bp[i + 6], bp[i + 7]);
+			if (i == 0)
+				i = 64 - 8;
+		}
+	}
+
+	if (skmsg->length > 256)
+		qcmd |= FIT_QCMD_MSGSIZE_512;
+	else if (skmsg->length > 128)
+		qcmd |= FIT_QCMD_MSGSIZE_256;
+	else if (skmsg->length > 64)
+		qcmd |= FIT_QCMD_MSGSIZE_128;
+	else
+		/*
+		 * This makes no sense because the FIT msg header is
+		 * 64 bytes. If the msg is only 64 bytes long it has
+		 * no payload.
+		 */
+		qcmd |= FIT_QCMD_MSGSIZE_64;
+
+	SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+
+}
+
+static void skd_send_special_fitmsg(struct skd_device *skdev,
+				    struct skd_special_context *skspcl)
+{
+	u64 qcmd;
+
+	if (unlikely(skdev->dbg_level > 1)) {
+		u8 *bp = (u8 *)skspcl->msg_buf;
+		int i;
+
+		for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
+			pr_debug("%s:%s:%d  spcl[%2d] %02x %02x %02x %02x  "
+				 "%02x %02x %02x %02x\n",
+				 skdev->name, __func__, __LINE__, i,
+				 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
+				 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
+			if (i == 0)
+				i = 64 - 8;
+		}
+
+		pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+			 skdev->name, __func__, __LINE__,
+			 skspcl, skspcl->req.id, skspcl->req.sksg_list,
+			 skspcl->req.sksg_dma_address);
+		for (i = 0; i < skspcl->req.n_sg; i++) {
+			struct fit_sg_descriptor *sgd =
+				&skspcl->req.sksg_list[i];
+
+			pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
+				 "addr=0x%llx next=0x%llx\n",
+				 skdev->name, __func__, __LINE__,
+				 i, sgd->byte_count, sgd->control,
+				 sgd->host_side_addr, sgd->next_desc_ptr);
+		}
+	}
+
+	/*
+	 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
+	 * and one 64-byte SSDI command.
+	 */
+	qcmd = skspcl->mb_dma_address;
+	qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+
+	SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+}
+
+/*
+ *****************************************************************************
+ * COMPLETION QUEUE
+ *****************************************************************************
+ */
+
+static void skd_complete_other(struct skd_device *skdev,
+			       volatile struct fit_completion_entry_v1 *skcomp,
+			       volatile struct fit_comp_error_info *skerr);
+
+struct sns_info {
+	u8 type;
+	u8 stat;
+	u8 key;
+	u8 asc;
+	u8 ascq;
+	u8 mask;
+	enum skd_check_status_action action;
+};
+
+static struct sns_info skd_chkstat_table[] = {
+	/* Good */
+	{ 0x70, 0x02, RECOVERED_ERROR, 0,    0,	   0x1c,
+	  SKD_CHECK_STATUS_REPORT_GOOD },
+
+	/* Smart alerts */
+	{ 0x70, 0x02, NO_SENSE,	       0x0B, 0x00, 0x1E,	/* warnings */
+	  SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+	{ 0x70, 0x02, NO_SENSE,	       0x5D, 0x00, 0x1E,	/* thresholds */
+	  SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+	{ 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
+	  SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+
+	/* Retry (with limits) */
+	{ 0x70, 0x02, 0x0B,	       0,    0,	   0x1C,        /* This one is for DMA ERROR */
+	  SKD_CHECK_STATUS_REQUEUE_REQUEST },
+	{ 0x70, 0x02, 0x06,	       0x0B, 0x00, 0x1E,        /* warnings */
+	  SKD_CHECK_STATUS_REQUEUE_REQUEST },
+	{ 0x70, 0x02, 0x06,	       0x5D, 0x00, 0x1E,        /* thresholds */
+	  SKD_CHECK_STATUS_REQUEUE_REQUEST },
+	{ 0x70, 0x02, 0x06,	       0x80, 0x30, 0x1F,        /* backup power */
+	  SKD_CHECK_STATUS_REQUEUE_REQUEST },
+
+	/* Busy (or about to be) */
+	{ 0x70, 0x02, 0x06,	       0x3f, 0x01, 0x1F, /* fw changed */
+	  SKD_CHECK_STATUS_BUSY_IMMINENT },
+};
+
+/*
+ * Look up status and sense data to decide how to handle the error
+ * from the device.
+ * mask says which fields must match e.g., mask=0x18 means check
+ * type and stat, ignore key, asc, ascq.
+ */
+
+static enum skd_check_status_action
+skd_check_status(struct skd_device *skdev,
+		 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
+{
+	int i, n;
+
+	pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+	       skd_name(skdev), skerr->key, skerr->code, skerr->qual,
+	       skerr->fruc);
+
+	pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
+		 skdev->name, __func__, __LINE__, skerr->type, cmp_status,
+		 skerr->key, skerr->code, skerr->qual, skerr->fruc);
+
+	/* Does the info match an entry in the good category? */
+	n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
+	for (i = 0; i < n; i++) {
+		struct sns_info *sns = &skd_chkstat_table[i];
+
+		if (sns->mask & 0x10)
+			if (skerr->type != sns->type)
+				continue;
+
+		if (sns->mask & 0x08)
+			if (cmp_status != sns->stat)
+				continue;
+
+		if (sns->mask & 0x04)
+			if (skerr->key != sns->key)
+				continue;
+
+		if (sns->mask & 0x02)
+			if (skerr->code != sns->asc)
+				continue;
+
+		if (sns->mask & 0x01)
+			if (skerr->qual != sns->ascq)
+				continue;
+
+		if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
+			pr_err("(%s): SMART Alert: sense key/asc/ascq "
+			       "%02x/%02x/%02x\n",
+			       skd_name(skdev), skerr->key,
+			       skerr->code, skerr->qual);
+		}
+		return sns->action;
+	}
+
+	/* No other match, so nonzero status means error,
+	 * zero status means good
+	 */
+	if (cmp_status) {
+		pr_debug("%s:%s:%d status check: error\n",
+			 skdev->name, __func__, __LINE__);
+		return SKD_CHECK_STATUS_REPORT_ERROR;
+	}
+
+	pr_debug("%s:%s:%d status check good default\n",
+		 skdev->name, __func__, __LINE__);
+	return SKD_CHECK_STATUS_REPORT_GOOD;
+}
+
+static void skd_resolve_req_exception(struct skd_device *skdev,
+				      struct skd_request_context *skreq)
+{
+	u8 cmp_status = skreq->completion.status;
+
+	switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
+	case SKD_CHECK_STATUS_REPORT_GOOD:
+	case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
+		skd_end_request(skdev, skreq, 0);
+		break;
+
+	case SKD_CHECK_STATUS_BUSY_IMMINENT:
+		skd_log_skreq(skdev, skreq, "retry(busy)");
+		blk_requeue_request(skdev->queue, skreq->req);
+		pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
+		skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
+		skdev->timer_countdown = SKD_TIMER_MINUTES(20);
+		skd_quiesce_dev(skdev);
+		break;
+
+	case SKD_CHECK_STATUS_REQUEUE_REQUEST:
+		if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
+			skd_log_skreq(skdev, skreq, "retry");
+			blk_requeue_request(skdev->queue, skreq->req);
+			break;
+		}
+	/* fall through to report error */
+
+	case SKD_CHECK_STATUS_REPORT_ERROR:
+	default:
+		skd_end_request(skdev, skreq, -EIO);
+		break;
+	}
+}
+
+/* assume spinlock is already held */
+static void skd_release_skreq(struct skd_device *skdev,
+			      struct skd_request_context *skreq)
+{
+	u32 msg_slot;
+	struct skd_fitmsg_context *skmsg;
+
+	u32 timo_slot;
+
+	/*
+	 * Reclaim the FIT msg buffer if this is
+	 * the first of the requests it carried to
+	 * be completed. The FIT msg buffer used to
+	 * send this request cannot be reused until
+	 * we are sure the s1120 card has copied
+	 * it to its memory. The FIT msg might have
+	 * contained several requests. As soon as
+	 * any of them are completed we know that
+	 * the entire FIT msg was transferred.
+	 * Only the first completed request will
+	 * match the FIT msg buffer id. The FIT
+	 * msg buffer id is immediately updated.
+	 * When subsequent requests complete the FIT
+	 * msg buffer id won't match, so we know
+	 * quite cheaply that it is already done.
+	 */
+	msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
+	SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
+
+	skmsg = &skdev->skmsg_table[msg_slot];
+	if (skmsg->id == skreq->fitmsg_id) {
+		SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
+		SKD_ASSERT(skmsg->outstanding > 0);
+		skmsg->outstanding--;
+		if (skmsg->outstanding == 0) {
+			skmsg->state = SKD_MSG_STATE_IDLE;
+			skmsg->id += SKD_ID_INCR;
+			skmsg->next = skdev->skmsg_free_list;
+			skdev->skmsg_free_list = skmsg;
+		}
+	}
+
+	/*
+	 * Decrease the number of active requests.
+	 * Also decrements the count in the timeout slot.
+	 */
+	SKD_ASSERT(skdev->in_flight > 0);
+	skdev->in_flight -= 1;
+
+	timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+	SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
+	skdev->timeout_slot[timo_slot] -= 1;
+
+	/*
+	 * Reset backpointer
+	 */
+	skreq->req = NULL;
+
+	/*
+	 * Reclaim the skd_request_context
+	 */
+	skreq->state = SKD_REQ_STATE_IDLE;
+	skreq->id += SKD_ID_INCR;
+	skreq->next = skdev->skreq_free_list;
+	skdev->skreq_free_list = skreq;
+}
+
+#define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
+
+static void skd_do_inq_page_00(struct skd_device *skdev,
+			       volatile struct fit_completion_entry_v1 *skcomp,
+			       volatile struct fit_comp_error_info *skerr,
+			       uint8_t *cdb, uint8_t *buf)
+{
+	uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
+
+	/* Caller requested "supported pages".  The driver needs to insert
+	 * its page.
+	 */
+	pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
+		 skdev->name, __func__, __LINE__);
+
+	/* If the device rejected the request because the CDB was
+	 * improperly formed, then just leave.
+	 */
+	if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
+	    skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
+		return;
+
+	/* Get the amount of space the caller allocated */
+	max_bytes = (cdb[3] << 8) | cdb[4];
+
+	/* Get the number of pages actually returned by the device */
+	drive_pages = (buf[2] << 8) | buf[3];
+	drive_bytes = drive_pages + 4;
+	new_size = drive_pages + 1;
+
+	/* Supported pages must be in numerical order, so find where
+	 * the driver page needs to be inserted into the list of
+	 * pages returned by the device.
+	 */
+	for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
+		if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
+			return; /* Device using this page code. abort */
+		else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
+			break;
+	}
+
+	if (insert_pt < max_bytes) {
+		uint16_t u;
+
+		/* Shift everything up one byte to make room. */
+		for (u = new_size + 3; u > insert_pt; u--)
+			buf[u] = buf[u - 1];
+		buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
+
+		/* SCSI byte order increment of num_returned_bytes by 1 */
+		skcomp->num_returned_bytes =
+			be32_to_cpu(skcomp->num_returned_bytes) + 1;
+		skcomp->num_returned_bytes =
+			be32_to_cpu(skcomp->num_returned_bytes);
+	}
+
+	/* update page length field to reflect the driver's page too */
+	buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
+	buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
+}
+
+static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
+{
+	int pcie_reg;
+	u16 pci_bus_speed;
+	u8 pci_lanes;
+
+	pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (pcie_reg) {
+		u16 linksta;
+		pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
+
+		pci_bus_speed = linksta & 0xF;
+		pci_lanes = (linksta & 0x3F0) >> 4;
+	} else {
+		*speed = STEC_LINK_UNKNOWN;
+		*width = 0xFF;
+		return;
+	}
+
+	switch (pci_bus_speed) {
+	case 1:
+		*speed = STEC_LINK_2_5GTS;
+		break;
+	case 2:
+		*speed = STEC_LINK_5GTS;
+		break;
+	case 3:
+		*speed = STEC_LINK_8GTS;
+		break;
+	default:
+		*speed = STEC_LINK_UNKNOWN;
+		break;
+	}
+
+	if (pci_lanes <= 0x20)
+		*width = pci_lanes;
+	else
+		*width = 0xFF;
+}
+
+static void skd_do_inq_page_da(struct skd_device *skdev,
+			       volatile struct fit_completion_entry_v1 *skcomp,
+			       volatile struct fit_comp_error_info *skerr,
+			       uint8_t *cdb, uint8_t *buf)
+{
+	struct pci_dev *pdev = skdev->pdev;
+	unsigned max_bytes;
+	struct driver_inquiry_data inq;
+	u16 val;
+
+	pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
+		 skdev->name, __func__, __LINE__);
+
+	memset(&inq, 0, sizeof(inq));
+
+	inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
+
+	skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
+	inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
+	inq.pcie_device_number = PCI_SLOT(pdev->devfn);
+	inq.pcie_function_number = PCI_FUNC(pdev->devfn);
+
+	pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
+	inq.pcie_vendor_id = cpu_to_be16(val);
+
+	pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
+	inq.pcie_device_id = cpu_to_be16(val);
+
+	pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
+	inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
+
+	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
+	inq.pcie_subsystem_device_id = cpu_to_be16(val);
+
+	/* Driver version, fixed lenth, padded with spaces on the right */
+	inq.driver_version_length = sizeof(inq.driver_version);
+	memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
+	memcpy(inq.driver_version, DRV_VER_COMPL,
+	       min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
+
+	inq.page_length = cpu_to_be16((sizeof(inq) - 4));
+
+	/* Clear the error set by the device */
+	skcomp->status = SAM_STAT_GOOD;
+	memset((void *)skerr, 0, sizeof(*skerr));
+
+	/* copy response into output buffer */
+	max_bytes = (cdb[3] << 8) | cdb[4];
+	memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
+
+	skcomp->num_returned_bytes =
+		be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
+}
+
+static void skd_do_driver_inq(struct skd_device *skdev,
+			      volatile struct fit_completion_entry_v1 *skcomp,
+			      volatile struct fit_comp_error_info *skerr,
+			      uint8_t *cdb, uint8_t *buf)
+{
+	if (!buf)
+		return;
+	else if (cdb[0] != INQUIRY)
+		return;         /* Not an INQUIRY */
+	else if ((cdb[1] & 1) == 0)
+		return;         /* EVPD not set */
+	else if (cdb[2] == 0)
+		/* Need to add driver's page to supported pages list */
+		skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
+	else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
+		/* Caller requested driver's page */
+		skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
+}
+
+static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
+{
+	if (!sg)
+		return NULL;
+	if (!sg_page(sg))
+		return NULL;
+	return sg_virt(sg);
+}
+
+static void skd_process_scsi_inq(struct skd_device *skdev,
+				 volatile struct fit_completion_entry_v1
+				 *skcomp,
+				 volatile struct fit_comp_error_info *skerr,
+				 struct skd_special_context *skspcl)
+{
+	uint8_t *buf;
+	struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
+	struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
+
+	dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
+			    skspcl->req.sg_data_dir);
+	buf = skd_sg_1st_page_ptr(skspcl->req.sg);
+
+	if (buf)
+		skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
+}
+
+
+static int skd_isr_completion_posted(struct skd_device *skdev,
+					int limit, int *enqueued)
+{
+	volatile struct fit_completion_entry_v1 *skcmp = NULL;
+	volatile struct fit_comp_error_info *skerr;
+	u16 req_id;
+	u32 req_slot;
+	struct skd_request_context *skreq;
+	u16 cmp_cntxt = 0;
+	u8 cmp_status = 0;
+	u8 cmp_cycle = 0;
+	u32 cmp_bytes = 0;
+	int rc = 0;
+	int processed = 0;
+
+	for (;; ) {
+		SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
+
+		skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
+		cmp_cycle = skcmp->cycle;
+		cmp_cntxt = skcmp->tag;
+		cmp_status = skcmp->status;
+		cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
+
+		skerr = &skdev->skerr_table[skdev->skcomp_ix];
+
+		pr_debug("%s:%s:%d "
+			 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
+			 "busy=%d rbytes=0x%x proto=%d\n",
+			 skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
+			 skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
+			 skdev->in_flight, cmp_bytes, skdev->proto_ver);
+
+		if (cmp_cycle != skdev->skcomp_cycle) {
+			pr_debug("%s:%s:%d end of completions\n",
+				 skdev->name, __func__, __LINE__);
+			break;
+		}
+		/*
+		 * Update the completion queue head index and possibly
+		 * the completion cycle count. 8-bit wrap-around.
+		 */
+		skdev->skcomp_ix++;
+		if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
+			skdev->skcomp_ix = 0;
+			skdev->skcomp_cycle++;
+		}
+
+		/*
+		 * The command context is a unique 32-bit ID. The low order
+		 * bits help locate the request. The request is usually a
+		 * r/w request (see skd_start() above) or a special request.
+		 */
+		req_id = cmp_cntxt;
+		req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
+
+		/* Is this other than a r/w request? */
+		if (req_slot >= skdev->num_req_context) {
+			/*
+			 * This is not a completion for a r/w request.
+			 */
+			skd_complete_other(skdev, skcmp, skerr);
+			continue;
+		}
+
+		skreq = &skdev->skreq_table[req_slot];
+
+		/*
+		 * Make sure the request ID for the slot matches.
+		 */
+		if (skreq->id != req_id) {
+			pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
+				 skdev->name, __func__, __LINE__,
+				 req_id, skreq->id);
+			{
+				u16 new_id = cmp_cntxt;
+				pr_err("(%s): Completion mismatch "
+				       "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
+				       skd_name(skdev), req_id,
+				       skreq->id, new_id);
+
+				continue;
+			}
+		}
+
+		SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
+
+		if (skreq->state == SKD_REQ_STATE_ABORTED) {
+			pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
+				 skdev->name, __func__, __LINE__,
+				 skreq, skreq->id);
+			/* a previously timed out command can
+			 * now be cleaned up */
+			skd_release_skreq(skdev, skreq);
+			continue;
+		}
+
+		skreq->completion = *skcmp;
+		if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
+			skreq->err_info = *skerr;
+			skd_log_check_status(skdev, cmp_status, skerr->key,
+					     skerr->code, skerr->qual,
+					     skerr->fruc);
+		}
+		/* Release DMA resources for the request. */
+		if (skreq->n_sg > 0)
+			skd_postop_sg_list(skdev, skreq);
+
+		if (!skreq->req) {
+			pr_debug("%s:%s:%d NULL backptr skdreq %p, "
+				 "req=0x%x req_id=0x%x\n",
+				 skdev->name, __func__, __LINE__,
+				 skreq, skreq->id, req_id);
+		} else {
+			/*
+			 * Capture the outcome and post it back to the
+			 * native request.
+			 */
+			if (likely(cmp_status == SAM_STAT_GOOD))
+				skd_end_request(skdev, skreq, 0);
+			else
+				skd_resolve_req_exception(skdev, skreq);
+		}
+
+		/*
+		 * Release the skreq, its FIT msg (if one), timeout slot,
+		 * and queue depth.
+		 */
+		skd_release_skreq(skdev, skreq);
+
+		/* skd_isr_comp_limit equal zero means no limit */
+		if (limit) {
+			if (++processed >= limit) {
+				rc = 1;
+				break;
+			}
+		}
+	}
+
+	if ((skdev->state == SKD_DRVR_STATE_PAUSING)
+		&& (skdev->in_flight) == 0) {
+		skdev->state = SKD_DRVR_STATE_PAUSED;
+		wake_up_interruptible(&skdev->waitq);
+	}
+
+	return rc;
+}
+
+static void skd_complete_other(struct skd_device *skdev,
+			       volatile struct fit_completion_entry_v1 *skcomp,
+			       volatile struct fit_comp_error_info *skerr)
+{
+	u32 req_id = 0;
+	u32 req_table;
+	u32 req_slot;
+	struct skd_special_context *skspcl;
+
+	req_id = skcomp->tag;
+	req_table = req_id & SKD_ID_TABLE_MASK;
+	req_slot = req_id & SKD_ID_SLOT_MASK;
+
+	pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
+		 skdev->name, __func__, __LINE__,
+		 req_table, req_id, req_slot);
+
+	/*
+	 * Based on the request id, determine how to dispatch this completion.
+	 * This swich/case is finding the good cases and forwarding the
+	 * completion entry. Errors are reported below the switch.
+	 */
+	switch (req_table) {
+	case SKD_ID_RW_REQUEST:
+		/*
+		 * The caller, skd_completion_posted_isr() above,
+		 * handles r/w requests. The only way we get here
+		 * is if the req_slot is out of bounds.
+		 */
+		break;
+
+	case SKD_ID_SPECIAL_REQUEST:
+		/*
+		 * Make sure the req_slot is in bounds and that the id
+		 * matches.
+		 */
+		if (req_slot < skdev->n_special) {
+			skspcl = &skdev->skspcl_table[req_slot];
+			if (skspcl->req.id == req_id &&
+			    skspcl->req.state == SKD_REQ_STATE_BUSY) {
+				skd_complete_special(skdev,
+						     skcomp, skerr, skspcl);
+				return;
+			}
+		}
+		break;
+
+	case SKD_ID_INTERNAL:
+		if (req_slot == 0) {
+			skspcl = &skdev->internal_skspcl;
+			if (skspcl->req.id == req_id &&
+			    skspcl->req.state == SKD_REQ_STATE_BUSY) {
+				skd_complete_internal(skdev,
+						      skcomp, skerr, skspcl);
+				return;
+			}
+		}
+		break;
+
+	case SKD_ID_FIT_MSG:
+		/*
+		 * These id's should never appear in a completion record.
+		 */
+		break;
+
+	default:
+		/*
+		 * These id's should never appear anywhere;
+		 */
+		break;
+	}
+
+	/*
+	 * If we get here it is a bad or stale id.
+	 */
+}
+
+static void skd_complete_special(struct skd_device *skdev,
+				 volatile struct fit_completion_entry_v1
+				 *skcomp,
+				 volatile struct fit_comp_error_info *skerr,
+				 struct skd_special_context *skspcl)
+{
+	pr_debug("%s:%s:%d  completing special request %p\n",
+		 skdev->name, __func__, __LINE__, skspcl);
+	if (skspcl->orphaned) {
+		/* Discard orphaned request */
+		/* ?: Can this release directly or does it need
+		 * to use a worker? */
+		pr_debug("%s:%s:%d release orphaned %p\n",
+			 skdev->name, __func__, __LINE__, skspcl);
+		skd_release_special(skdev, skspcl);
+		return;
+	}
+
+	skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
+
+	skspcl->req.state = SKD_REQ_STATE_COMPLETED;
+	skspcl->req.completion = *skcomp;
+	skspcl->req.err_info = *skerr;
+
+	skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
+			     skerr->code, skerr->qual, skerr->fruc);
+
+	wake_up_interruptible(&skdev->waitq);
+}
+
+/* assume spinlock is already held */
+static void skd_release_special(struct skd_device *skdev,
+				struct skd_special_context *skspcl)
+{
+	int i, was_depleted;
+
+	for (i = 0; i < skspcl->req.n_sg; i++) {
+		struct page *page = sg_page(&skspcl->req.sg[i]);
+		__free_page(page);
+	}
+
+	was_depleted = (skdev->skspcl_free_list == NULL);
+
+	skspcl->req.state = SKD_REQ_STATE_IDLE;
+	skspcl->req.id += SKD_ID_INCR;
+	skspcl->req.next =
+		(struct skd_request_context *)skdev->skspcl_free_list;
+	skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
+
+	if (was_depleted) {
+		pr_debug("%s:%s:%d skspcl was depleted\n",
+			 skdev->name, __func__, __LINE__);
+		/* Free list was depleted. Their might be waiters. */
+		wake_up_interruptible(&skdev->waitq);
+	}
+}
+
+static void skd_reset_skcomp(struct skd_device *skdev)
+{
+	u32 nbytes;
+	struct fit_completion_entry_v1 *skcomp;
+
+	nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
+	nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+
+	memset(skdev->skcomp_table, 0, nbytes);
+
+	skdev->skcomp_ix = 0;
+	skdev->skcomp_cycle = 1;
+}
+
+/*
+ *****************************************************************************
+ * INTERRUPTS
+ *****************************************************************************
+ */
+static void skd_completion_worker(struct work_struct *work)
+{
+	struct skd_device *skdev =
+		container_of(work, struct skd_device, completion_worker);
+	unsigned long flags;
+	int flush_enqueued = 0;
+
+	spin_lock_irqsave(&skdev->lock, flags);
+
+	/*
+	 * pass in limit=0, which means no limit..
+	 * process everything in compq
+	 */
+	skd_isr_completion_posted(skdev, 0, &flush_enqueued);
+	skd_request_fn(skdev->queue);
+
+	spin_unlock_irqrestore(&skdev->lock, flags);
+}
+
+static void skd_isr_msg_from_dev(struct skd_device *skdev);
+
+irqreturn_t
+static skd_isr(int irq, void *ptr)
+{
+	struct skd_device *skdev;
+	u32 intstat;
+	u32 ack;
+	int rc = 0;
+	int deferred = 0;
+	int flush_enqueued = 0;
+
+	skdev = (struct skd_device *)ptr;
+	spin_lock(&skdev->lock);
+
+	for (;; ) {
+		intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
+
+		ack = FIT_INT_DEF_MASK;
+		ack &= intstat;
+
+		pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
+			 skdev->name, __func__, __LINE__, intstat, ack);
+
+		/* As long as there is an int pending on device, keep
+		 * running loop.  When none, get out, but if we've never
+		 * done any processing, call completion handler?
+		 */
+		if (ack == 0) {
+			/* No interrupts on device, but run the completion
+			 * processor anyway?
+			 */
+			if (rc == 0)
+				if (likely (skdev->state
+					== SKD_DRVR_STATE_ONLINE))
+					deferred = 1;
+			break;
+		}
+
+		rc = IRQ_HANDLED;
+
+		SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
+
+		if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
+			   (skdev->state != SKD_DRVR_STATE_STOPPING))) {
+			if (intstat & FIT_ISH_COMPLETION_POSTED) {
+				/*
+				 * If we have already deferred completion
+				 * processing, don't bother running it again
+				 */
+				if (deferred == 0)
+					deferred =
+						skd_isr_completion_posted(skdev,
+						skd_isr_comp_limit, &flush_enqueued);
+			}
+
+			if (intstat & FIT_ISH_FW_STATE_CHANGE) {
+				skd_isr_fwstate(skdev);
+				if (skdev->state == SKD_DRVR_STATE_FAULT ||
+				    skdev->state ==
+				    SKD_DRVR_STATE_DISAPPEARED) {
+					spin_unlock(&skdev->lock);
+					return rc;
+				}
+			}
+
+			if (intstat & FIT_ISH_MSG_FROM_DEV)
+				skd_isr_msg_from_dev(skdev);
+		}
+	}
+
+	if (unlikely(flush_enqueued))
+		skd_request_fn(skdev->queue);
+
+	if (deferred)
+		schedule_work(&skdev->completion_worker);
+	else if (!flush_enqueued)
+		skd_request_fn(skdev->queue);
+
+	spin_unlock(&skdev->lock);
+
+	return rc;
+}
+
+static void skd_drive_fault(struct skd_device *skdev)
+{
+	skdev->state = SKD_DRVR_STATE_FAULT;
+	pr_err("(%s): Drive FAULT\n", skd_name(skdev));
+}
+
+static void skd_drive_disappeared(struct skd_device *skdev)
+{
+	skdev->state = SKD_DRVR_STATE_DISAPPEARED;
+	pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
+}
+
+static void skd_isr_fwstate(struct skd_device *skdev)
+{
+	u32 sense;
+	u32 state;
+	u32 mtd;
+	int prev_driver_state = skdev->state;
+
+	sense = SKD_READL(skdev, FIT_STATUS);
+	state = sense & FIT_SR_DRIVE_STATE_MASK;
+
+	pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
+	       skd_name(skdev),
+	       skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+	       skd_drive_state_to_str(state), state);
+
+	skdev->drive_state = state;
+
+	switch (skdev->drive_state) {
+	case FIT_SR_DRIVE_INIT:
+		if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
+			skd_disable_interrupts(skdev);
+			break;
+		}
+		if (skdev->state == SKD_DRVR_STATE_RESTARTING)
+			skd_recover_requests(skdev, 0);
+		if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
+			skdev->timer_countdown = SKD_STARTING_TIMO;
+			skdev->state = SKD_DRVR_STATE_STARTING;
+			skd_soft_reset(skdev);
+			break;
+		}
+		mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
+		SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+		skdev->last_mtd = mtd;
+		break;
+
+	case FIT_SR_DRIVE_ONLINE:
+		skdev->cur_max_queue_depth = skd_max_queue_depth;
+		if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
+			skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
+
+		skdev->queue_low_water_mark =
+			skdev->cur_max_queue_depth * 2 / 3 + 1;
+		if (skdev->queue_low_water_mark < 1)
+			skdev->queue_low_water_mark = 1;
+		pr_info(
+		       "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
+		       skd_name(skdev),
+		       skdev->cur_max_queue_depth,
+		       skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+
+		skd_refresh_device_data(skdev);
+		break;
+
+	case FIT_SR_DRIVE_BUSY:
+		skdev->state = SKD_DRVR_STATE_BUSY;
+		skdev->timer_countdown = SKD_BUSY_TIMO;
+		skd_quiesce_dev(skdev);
+		break;
+	case FIT_SR_DRIVE_BUSY_SANITIZE:
+		/* set timer for 3 seconds, we'll abort any unfinished
+		 * commands after that expires
+		 */
+		skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
+		skdev->timer_countdown = SKD_TIMER_SECONDS(3);
+		blk_start_queue(skdev->queue);
+		break;
+	case FIT_SR_DRIVE_BUSY_ERASE:
+		skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
+		skdev->timer_countdown = SKD_BUSY_TIMO;
+		break;
+	case FIT_SR_DRIVE_OFFLINE:
+		skdev->state = SKD_DRVR_STATE_IDLE;
+		break;
+	case FIT_SR_DRIVE_SOFT_RESET:
+		switch (skdev->state) {
+		case SKD_DRVR_STATE_STARTING:
+		case SKD_DRVR_STATE_RESTARTING:
+			/* Expected by a caller of skd_soft_reset() */
+			break;
+		default:
+			skdev->state = SKD_DRVR_STATE_RESTARTING;
+			break;
+		}
+		break;
+	case FIT_SR_DRIVE_FW_BOOTING:
+		pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
+			 skdev->name, __func__, __LINE__, skdev->name);
+		skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
+		skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
+		break;
+
+	case FIT_SR_DRIVE_DEGRADED:
+	case FIT_SR_PCIE_LINK_DOWN:
+	case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
+		break;
+
+	case FIT_SR_DRIVE_FAULT:
+		skd_drive_fault(skdev);
+		skd_recover_requests(skdev, 0);
+		blk_start_queue(skdev->queue);
+		break;
+
+	/* PCIe bus returned all Fs? */
+	case 0xFF:
+		pr_info("(%s): state=0x%x sense=0x%x\n",
+		       skd_name(skdev), state, sense);
+		skd_drive_disappeared(skdev);
+		skd_recover_requests(skdev, 0);
+		blk_start_queue(skdev->queue);
+		break;
+	default:
+		/*
+		 * Uknown FW State. Wait for a state we recognize.
+		 */
+		break;
+	}
+	pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
+	       skd_name(skdev),
+	       skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
+	       skd_skdev_state_to_str(skdev->state), skdev->state);
+}
+
+static void skd_recover_requests(struct skd_device *skdev, int requeue)
+{
+	int i;
+
+	for (i = 0; i < skdev->num_req_context; i++) {
+		struct skd_request_context *skreq = &skdev->skreq_table[i];
+
+		if (skreq->state == SKD_REQ_STATE_BUSY) {
+			skd_log_skreq(skdev, skreq, "recover");
+
+			SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
+			SKD_ASSERT(skreq->req != NULL);
+
+			/* Release DMA resources for the request. */
+			if (skreq->n_sg > 0)
+				skd_postop_sg_list(skdev, skreq);
+
+			if (requeue &&
+			    (unsigned long) ++skreq->req->special <
+			    SKD_MAX_RETRIES)
+				blk_requeue_request(skdev->queue, skreq->req);
+			else
+				skd_end_request(skdev, skreq, -EIO);
+
+			skreq->req = NULL;
+
+			skreq->state = SKD_REQ_STATE_IDLE;
+			skreq->id += SKD_ID_INCR;
+		}
+		if (i > 0)
+			skreq[-1].next = skreq;
+		skreq->next = NULL;
+	}
+	skdev->skreq_free_list = skdev->skreq_table;
+
+	for (i = 0; i < skdev->num_fitmsg_context; i++) {
+		struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
+
+		if (skmsg->state == SKD_MSG_STATE_BUSY) {
+			skd_log_skmsg(skdev, skmsg, "salvaged");
+			SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
+			skmsg->state = SKD_MSG_STATE_IDLE;
+			skmsg->id += SKD_ID_INCR;
+		}
+		if (i > 0)
+			skmsg[-1].next = skmsg;
+		skmsg->next = NULL;
+	}
+	skdev->skmsg_free_list = skdev->skmsg_table;
+
+	for (i = 0; i < skdev->n_special; i++) {
+		struct skd_special_context *skspcl = &skdev->skspcl_table[i];
+
+		/* If orphaned, reclaim it because it has already been reported
+		 * to the process as an error (it was just waiting for
+		 * a completion that didn't come, and now it will never come)
+		 * If busy, change to a state that will cause it to error
+		 * out in the wait routine and let it do the normal
+		 * reporting and reclaiming
+		 */
+		if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
+			if (skspcl->orphaned) {
+				pr_debug("%s:%s:%d orphaned %p\n",
+					 skdev->name, __func__, __LINE__,
+					 skspcl);
+				skd_release_special(skdev, skspcl);
+			} else {
+				pr_debug("%s:%s:%d not orphaned %p\n",
+					 skdev->name, __func__, __LINE__,
+					 skspcl);
+				skspcl->req.state = SKD_REQ_STATE_ABORTED;
+			}
+		}
+	}
+	skdev->skspcl_free_list = skdev->skspcl_table;
+
+	for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
+		skdev->timeout_slot[i] = 0;
+
+	skdev->in_flight = 0;
+}
+
+static void skd_isr_msg_from_dev(struct skd_device *skdev)
+{
+	u32 mfd;
+	u32 mtd;
+	u32 data;
+
+	mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
+
+	pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
+		 skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
+
+	/* ignore any mtd that is an ack for something we didn't send */
+	if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
+		return;
+
+	switch (FIT_MXD_TYPE(mfd)) {
+	case FIT_MTD_FITFW_INIT:
+		skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
+
+		if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
+			pr_err("(%s): protocol mismatch\n",
+			       skdev->name);
+			pr_err("(%s):   got=%d support=%d\n",
+			       skdev->name, skdev->proto_ver,
+			       FIT_PROTOCOL_VERSION_1);
+			pr_err("(%s):   please upgrade driver\n",
+			       skdev->name);
+			skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
+			skd_soft_reset(skdev);
+			break;
+		}
+		mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
+		SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+		skdev->last_mtd = mtd;
+		break;
+
+	case FIT_MTD_GET_CMDQ_DEPTH:
+		skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
+		mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
+				   SKD_N_COMPLETION_ENTRY);
+		SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+		skdev->last_mtd = mtd;
+		break;
+
+	case FIT_MTD_SET_COMPQ_DEPTH:
+		SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
+		mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
+		SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+		skdev->last_mtd = mtd;
+		break;
+
+	case FIT_MTD_SET_COMPQ_ADDR:
+		skd_reset_skcomp(skdev);
+		mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
+		SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+		skdev->last_mtd = mtd;
+		break;
+
+	case FIT_MTD_CMD_LOG_HOST_ID:
+		skdev->connect_time_stamp = get_seconds();
+		data = skdev->connect_time_stamp & 0xFFFF;
+		mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
+		SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+		skdev->last_mtd = mtd;
+		break;
+
+	case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
+		skdev->drive_jiffies = FIT_MXD_DATA(mfd);
+		data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
+		mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
+		SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+		skdev->last_mtd = mtd;
+		break;
+
+	case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
+		skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
+		mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
+		SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+		skdev->last_mtd = mtd;
+
+		pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
+		       skd_name(skdev),
+		       skdev->connect_time_stamp, skdev->drive_jiffies);
+		break;
+
+	case FIT_MTD_ARM_QUEUE:
+		skdev->last_mtd = 0;
+		/*
+		 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
+		 */
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void skd_disable_interrupts(struct skd_device *skdev)
+{
+	u32 sense;
+
+	sense = SKD_READL(skdev, FIT_CONTROL);
+	sense &= ~FIT_CR_ENABLE_INTERRUPTS;
+	SKD_WRITEL(skdev, sense, FIT_CONTROL);
+	pr_debug("%s:%s:%d sense 0x%x\n",
+		 skdev->name, __func__, __LINE__, sense);
+
+	/* Note that the 1s is written. A 1-bit means
+	 * disable, a 0 means enable.
+	 */
+	SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
+}
+
+static void skd_enable_interrupts(struct skd_device *skdev)
+{
+	u32 val;
+
+	/* unmask interrupts first */
+	val = FIT_ISH_FW_STATE_CHANGE +
+	      FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
+
+	/* Note that the compliment of mask is written. A 1-bit means
+	 * disable, a 0 means enable. */
+	SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
+	pr_debug("%s:%s:%d interrupt mask=0x%x\n",
+		 skdev->name, __func__, __LINE__, ~val);
+
+	val = SKD_READL(skdev, FIT_CONTROL);
+	val |= FIT_CR_ENABLE_INTERRUPTS;
+	pr_debug("%s:%s:%d control=0x%x\n",
+		 skdev->name, __func__, __LINE__, val);
+	SKD_WRITEL(skdev, val, FIT_CONTROL);
+}
+
+/*
+ *****************************************************************************
+ * START, STOP, RESTART, QUIESCE, UNQUIESCE
+ *****************************************************************************
+ */
+
+static void skd_soft_reset(struct skd_device *skdev)
+{
+	u32 val;
+
+	val = SKD_READL(skdev, FIT_CONTROL);
+	val |= (FIT_CR_SOFT_RESET);
+	pr_debug("%s:%s:%d control=0x%x\n",
+		 skdev->name, __func__, __LINE__, val);
+	SKD_WRITEL(skdev, val, FIT_CONTROL);
+}
+
+static void skd_start_device(struct skd_device *skdev)
+{
+	unsigned long flags;
+	u32 sense;
+	u32 state;
+
+	spin_lock_irqsave(&skdev->lock, flags);
+
+	/* ack all ghost interrupts */
+	SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+
+	sense = SKD_READL(skdev, FIT_STATUS);
+
+	pr_debug("%s:%s:%d initial status=0x%x\n",
+		 skdev->name, __func__, __LINE__, sense);
+
+	state = sense & FIT_SR_DRIVE_STATE_MASK;
+	skdev->drive_state = state;
+	skdev->last_mtd = 0;
+
+	skdev->state = SKD_DRVR_STATE_STARTING;
+	skdev->timer_countdown = SKD_STARTING_TIMO;
+
+	skd_enable_interrupts(skdev);
+
+	switch (skdev->drive_state) {
+	case FIT_SR_DRIVE_OFFLINE:
+		pr_err("(%s): Drive offline...\n", skd_name(skdev));
+		break;
+
+	case FIT_SR_DRIVE_FW_BOOTING:
+		pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
+			 skdev->name, __func__, __LINE__, skdev->name);
+		skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
+		skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
+		break;
+
+	case FIT_SR_DRIVE_BUSY_SANITIZE:
+		pr_info("(%s): Start: BUSY_SANITIZE\n",
+		       skd_name(skdev));
+		skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
+		skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+		break;
+
+	case FIT_SR_DRIVE_BUSY_ERASE:
+		pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
+		skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
+		skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+		break;
+
+	case FIT_SR_DRIVE_INIT:
+	case FIT_SR_DRIVE_ONLINE:
+		skd_soft_reset(skdev);
+		break;
+
+	case FIT_SR_DRIVE_BUSY:
+		pr_err("(%s): Drive Busy...\n", skd_name(skdev));
+		skdev->state = SKD_DRVR_STATE_BUSY;
+		skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+		break;
+
+	case FIT_SR_DRIVE_SOFT_RESET:
+		pr_err("(%s) drive soft reset in prog\n",
+		       skd_name(skdev));
+		break;
+
+	case FIT_SR_DRIVE_FAULT:
+		/* Fault state is bad...soft reset won't do it...
+		 * Hard reset, maybe, but does it work on device?
+		 * For now, just fault so the system doesn't hang.
+		 */
+		skd_drive_fault(skdev);
+		/*start the queue so we can respond with error to requests */
+		pr_debug("%s:%s:%d starting %s queue\n",
+			 skdev->name, __func__, __LINE__, skdev->name);
+		blk_start_queue(skdev->queue);
+		skdev->gendisk_on = -1;
+		wake_up_interruptible(&skdev->waitq);
+		break;
+
+	case 0xFF:
+		/* Most likely the device isn't there or isn't responding
+		 * to the BAR1 addresses. */
+		skd_drive_disappeared(skdev);
+		/*start the queue so we can respond with error to requests */
+		pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
+			 skdev->name, __func__, __LINE__, skdev->name);
+		blk_start_queue(skdev->queue);
+		skdev->gendisk_on = -1;
+		wake_up_interruptible(&skdev->waitq);
+		break;
+
+	default:
+		pr_err("(%s) Start: unknown state %x\n",
+		       skd_name(skdev), skdev->drive_state);
+		break;
+	}
+
+	state = SKD_READL(skdev, FIT_CONTROL);
+	pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
+		 skdev->name, __func__, __LINE__, state);
+
+	state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
+	pr_debug("%s:%s:%d Intr Status=0x%x\n",
+		 skdev->name, __func__, __LINE__, state);
+
+	state = SKD_READL(skdev, FIT_INT_MASK_HOST);
+	pr_debug("%s:%s:%d Intr Mask=0x%x\n",
+		 skdev->name, __func__, __LINE__, state);
+
+	state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
+	pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
+		 skdev->name, __func__, __LINE__, state);
+
+	state = SKD_READL(skdev, FIT_HW_VERSION);
+	pr_debug("%s:%s:%d HW version=0x%x\n",
+		 skdev->name, __func__, __LINE__, state);
+
+	spin_unlock_irqrestore(&skdev->lock, flags);
+}
+
+static void skd_stop_device(struct skd_device *skdev)
+{
+	unsigned long flags;
+	struct skd_special_context *skspcl = &skdev->internal_skspcl;
+	u32 dev_state;
+	int i;
+
+	spin_lock_irqsave(&skdev->lock, flags);
+
+	if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+		pr_err("(%s): skd_stop_device not online no sync\n",
+		       skd_name(skdev));
+		goto stop_out;
+	}
+
+	if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
+		pr_err("(%s): skd_stop_device no special\n",
+		       skd_name(skdev));
+		goto stop_out;
+	}
+
+	skdev->state = SKD_DRVR_STATE_SYNCING;
+	skdev->sync_done = 0;
+
+	skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
+
+	spin_unlock_irqrestore(&skdev->lock, flags);
+
+	wait_event_interruptible_timeout(skdev->waitq,
+					 (skdev->sync_done), (10 * HZ));
+
+	spin_lock_irqsave(&skdev->lock, flags);
+
+	switch (skdev->sync_done) {
+	case 0:
+		pr_err("(%s): skd_stop_device no sync\n",
+		       skd_name(skdev));
+		break;
+	case 1:
+		pr_err("(%s): skd_stop_device sync done\n",
+		       skd_name(skdev));
+		break;
+	default:
+		pr_err("(%s): skd_stop_device sync error\n",
+		       skd_name(skdev));
+	}
+
+stop_out:
+	skdev->state = SKD_DRVR_STATE_STOPPING;
+	spin_unlock_irqrestore(&skdev->lock, flags);
+
+	skd_kill_timer(skdev);
+
+	spin_lock_irqsave(&skdev->lock, flags);
+	skd_disable_interrupts(skdev);
+
+	/* ensure all ints on device are cleared */
+	/* soft reset the device to unload with a clean slate */
+	SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+	SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
+
+	spin_unlock_irqrestore(&skdev->lock, flags);
+
+	/* poll every 100ms, 1 second timeout */
+	for (i = 0; i < 10; i++) {
+		dev_state =
+			SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
+		if (dev_state == FIT_SR_DRIVE_INIT)
+			break;
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(msecs_to_jiffies(100));
+	}
+
+	if (dev_state != FIT_SR_DRIVE_INIT)
+		pr_err("(%s): skd_stop_device state error 0x%02x\n",
+		       skd_name(skdev), dev_state);
+}
+
+/* assume spinlock is held */
+static void skd_restart_device(struct skd_device *skdev)
+{
+	u32 state;
+
+	/* ack all ghost interrupts */
+	SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+
+	state = SKD_READL(skdev, FIT_STATUS);
+
+	pr_debug("%s:%s:%d drive status=0x%x\n",
+		 skdev->name, __func__, __LINE__, state);
+
+	state &= FIT_SR_DRIVE_STATE_MASK;
+	skdev->drive_state = state;
+	skdev->last_mtd = 0;
+
+	skdev->state = SKD_DRVR_STATE_RESTARTING;
+	skdev->timer_countdown = SKD_RESTARTING_TIMO;
+
+	skd_soft_reset(skdev);
+}
+
+/* assume spinlock is held */
+static int skd_quiesce_dev(struct skd_device *skdev)
+{
+	int rc = 0;
+
+	switch (skdev->state) {
+	case SKD_DRVR_STATE_BUSY:
+	case SKD_DRVR_STATE_BUSY_IMMINENT:
+		pr_debug("%s:%s:%d stopping %s queue\n",
+			 skdev->name, __func__, __LINE__, skdev->name);
+		blk_stop_queue(skdev->queue);
+		break;
+	case SKD_DRVR_STATE_ONLINE:
+	case SKD_DRVR_STATE_STOPPING:
+	case SKD_DRVR_STATE_SYNCING:
+	case SKD_DRVR_STATE_PAUSING:
+	case SKD_DRVR_STATE_PAUSED:
+	case SKD_DRVR_STATE_STARTING:
+	case SKD_DRVR_STATE_RESTARTING:
+	case SKD_DRVR_STATE_RESUMING:
+	default:
+		rc = -EINVAL;
+		pr_debug("%s:%s:%d state [%d] not implemented\n",
+			 skdev->name, __func__, __LINE__, skdev->state);
+	}
+	return rc;
+}
+
+/* assume spinlock is held */
+static int skd_unquiesce_dev(struct skd_device *skdev)
+{
+	int prev_driver_state = skdev->state;
+
+	skd_log_skdev(skdev, "unquiesce");
+	if (skdev->state == SKD_DRVR_STATE_ONLINE) {
+		pr_debug("%s:%s:%d **** device already ONLINE\n",
+			 skdev->name, __func__, __LINE__);
+		return 0;
+	}
+	if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
+		/*
+		 * If there has been an state change to other than
+		 * ONLINE, we will rely on controller state change
+		 * to come back online and restart the queue.
+		 * The BUSY state means that driver is ready to
+		 * continue normal processing but waiting for controller
+		 * to become available.
+		 */
+		skdev->state = SKD_DRVR_STATE_BUSY;
+		pr_debug("%s:%s:%d drive BUSY state\n",
+			 skdev->name, __func__, __LINE__);
+		return 0;
+	}
+
+	/*
+	 * Drive has just come online, driver is either in startup,
+	 * paused performing a task, or bust waiting for hardware.
+	 */
+	switch (skdev->state) {
+	case SKD_DRVR_STATE_PAUSED:
+	case SKD_DRVR_STATE_BUSY:
+	case SKD_DRVR_STATE_BUSY_IMMINENT:
+	case SKD_DRVR_STATE_BUSY_ERASE:
+	case SKD_DRVR_STATE_STARTING:
+	case SKD_DRVR_STATE_RESTARTING:
+	case SKD_DRVR_STATE_FAULT:
+	case SKD_DRVR_STATE_IDLE:
+	case SKD_DRVR_STATE_LOAD:
+		skdev->state = SKD_DRVR_STATE_ONLINE;
+		pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
+		       skd_name(skdev),
+		       skd_skdev_state_to_str(prev_driver_state),
+		       prev_driver_state, skd_skdev_state_to_str(skdev->state),
+		       skdev->state);
+		pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
+			 skdev->name, __func__, __LINE__);
+		pr_debug("%s:%s:%d starting %s queue\n",
+			 skdev->name, __func__, __LINE__, skdev->name);
+		pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
+		blk_start_queue(skdev->queue);
+		skdev->gendisk_on = 1;
+		wake_up_interruptible(&skdev->waitq);
+		break;
+
+	case SKD_DRVR_STATE_DISAPPEARED:
+	default:
+		pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
+			 skdev->name, __func__, __LINE__,
+			 skdev->state);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/*
+ *****************************************************************************
+ * PCIe MSI/MSI-X INTERRUPT HANDLERS
+ *****************************************************************************
+ */
+
+static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
+{
+	struct skd_device *skdev = skd_host_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&skdev->lock, flags);
+	pr_debug("%s:%s:%d MSIX = 0x%x\n",
+		 skdev->name, __func__, __LINE__,
+		 SKD_READL(skdev, FIT_INT_STATUS_HOST));
+	pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
+	       irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
+	SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
+	spin_unlock_irqrestore(&skdev->lock, flags);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
+{
+	struct skd_device *skdev = skd_host_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&skdev->lock, flags);
+	pr_debug("%s:%s:%d MSIX = 0x%x\n",
+		 skdev->name, __func__, __LINE__,
+		 SKD_READL(skdev, FIT_INT_STATUS_HOST));
+	SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
+	skd_isr_fwstate(skdev);
+	spin_unlock_irqrestore(&skdev->lock, flags);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
+{
+	struct skd_device *skdev = skd_host_data;
+	unsigned long flags;
+	int flush_enqueued = 0;
+	int deferred;
+
+	spin_lock_irqsave(&skdev->lock, flags);
+	pr_debug("%s:%s:%d MSIX = 0x%x\n",
+		 skdev->name, __func__, __LINE__,
+		 SKD_READL(skdev, FIT_INT_STATUS_HOST));
+	SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
+	deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
+						&flush_enqueued);
+	if (flush_enqueued)
+		skd_request_fn(skdev->queue);
+
+	if (deferred)
+		schedule_work(&skdev->completion_worker);
+	else if (!flush_enqueued)
+		skd_request_fn(skdev->queue);
+
+	spin_unlock_irqrestore(&skdev->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
+{
+	struct skd_device *skdev = skd_host_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&skdev->lock, flags);
+	pr_debug("%s:%s:%d MSIX = 0x%x\n",
+		 skdev->name, __func__, __LINE__,
+		 SKD_READL(skdev, FIT_INT_STATUS_HOST));
+	SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
+	skd_isr_msg_from_dev(skdev);
+	spin_unlock_irqrestore(&skdev->lock, flags);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
+{
+	struct skd_device *skdev = skd_host_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&skdev->lock, flags);
+	pr_debug("%s:%s:%d MSIX = 0x%x\n",
+		 skdev->name, __func__, __LINE__,
+		 SKD_READL(skdev, FIT_INT_STATUS_HOST));
+	SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
+	spin_unlock_irqrestore(&skdev->lock, flags);
+	return IRQ_HANDLED;
+}
+
+/*
+ *****************************************************************************
+ * PCIe MSI/MSI-X SETUP
+ *****************************************************************************
+ */
+
+struct skd_msix_entry {
+	int have_irq;
+	u32 vector;
+	u32 entry;
+	struct skd_device *rsp;
+	char isr_name[30];
+};
+
+struct skd_init_msix_entry {
+	const char *name;
+	irq_handler_t handler;
+};
+
+#define SKD_MAX_MSIX_COUNT              13
+#define SKD_MIN_MSIX_COUNT              7
+#define SKD_BASE_MSIX_IRQ               4
+
+static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
+	{ "(DMA 0)",	    skd_reserved_isr },
+	{ "(DMA 1)",	    skd_reserved_isr },
+	{ "(DMA 2)",	    skd_reserved_isr },
+	{ "(DMA 3)",	    skd_reserved_isr },
+	{ "(State Change)", skd_statec_isr   },
+	{ "(COMPL_Q)",	    skd_comp_q	     },
+	{ "(MSG)",	    skd_msg_isr	     },
+	{ "(Reserved)",	    skd_reserved_isr },
+	{ "(Reserved)",	    skd_reserved_isr },
+	{ "(Queue Full 0)", skd_qfull_isr    },
+	{ "(Queue Full 1)", skd_qfull_isr    },
+	{ "(Queue Full 2)", skd_qfull_isr    },
+	{ "(Queue Full 3)", skd_qfull_isr    },
+};
+
+static void skd_release_msix(struct skd_device *skdev)
+{
+	struct skd_msix_entry *qentry;
+	int i;
+
+	if (skdev->msix_entries == NULL)
+		return;
+	for (i = 0; i < skdev->msix_count; i++) {
+		qentry = &skdev->msix_entries[i];
+		skdev = qentry->rsp;
+
+		if (qentry->have_irq)
+			devm_free_irq(&skdev->pdev->dev,
+				      qentry->vector, qentry->rsp);
+	}
+	pci_disable_msix(skdev->pdev);
+	kfree(skdev->msix_entries);
+	skdev->msix_count = 0;
+	skdev->msix_entries = NULL;
+}
+
+static int skd_acquire_msix(struct skd_device *skdev)
+{
+	int i, rc;
+	struct pci_dev *pdev;
+	struct msix_entry *entries = NULL;
+	struct skd_msix_entry *qentry;
+
+	pdev = skdev->pdev;
+	skdev->msix_count = SKD_MAX_MSIX_COUNT;
+	entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
+			  GFP_KERNEL);
+	if (!entries)
+		return -ENOMEM;
+
+	for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
+		entries[i].entry = i;
+
+	rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
+	if (rc < 0)
+		goto msix_out;
+	if (rc) {
+		if (rc < SKD_MIN_MSIX_COUNT) {
+			pr_err("(%s): failed to enable MSI-X %d\n",
+			       skd_name(skdev), rc);
+			goto msix_out;
+		}
+		pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
+			 skdev->name, __func__, __LINE__,
+			 pci_name(pdev), skdev->name, rc);
+
+		skdev->msix_count = rc;
+		rc = pci_enable_msix(pdev, entries, skdev->msix_count);
+		if (rc) {
+			pr_err("(%s): failed to enable MSI-X "
+			       "support (%d) %d\n",
+			       skd_name(skdev), skdev->msix_count, rc);
+			goto msix_out;
+		}
+	}
+	skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
+				      skdev->msix_count, GFP_KERNEL);
+	if (!skdev->msix_entries) {
+		rc = -ENOMEM;
+		skdev->msix_count = 0;
+		pr_err("(%s): msix table allocation error\n",
+		       skd_name(skdev));
+		goto msix_out;
+	}
+
+	qentry = skdev->msix_entries;
+	for (i = 0; i < skdev->msix_count; i++) {
+		qentry->vector = entries[i].vector;
+		qentry->entry = entries[i].entry;
+		qentry->rsp = NULL;
+		qentry->have_irq = 0;
+		pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
+			 skdev->name, __func__, __LINE__,
+			 pci_name(pdev), skdev->name,
+			 i, qentry->vector, qentry->entry);
+		qentry++;
+	}
+
+	/* Enable MSI-X vectors for the base queue */
+	for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+		qentry = &skdev->msix_entries[i];
+		snprintf(qentry->isr_name, sizeof(qentry->isr_name),
+			 "%s%d-msix %s", DRV_NAME, skdev->devno,
+			 msix_entries[i].name);
+		rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
+				      msix_entries[i].handler, 0,
+				      qentry->isr_name, skdev);
+		if (rc) {
+			pr_err("(%s): Unable to register(%d) MSI-X "
+			       "handler %d: %s\n",
+			       skd_name(skdev), rc, i, qentry->isr_name);
+			goto msix_out;
+		} else {
+			qentry->have_irq = 1;
+			qentry->rsp = skdev;
+		}
+	}
+	pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
+		 skdev->name, __func__, __LINE__,
+		 pci_name(pdev), skdev->name, skdev->msix_count);
+	return 0;
+
+msix_out:
+	if (entries)
+		kfree(entries);
+	skd_release_msix(skdev);
+	return rc;
+}
+
+static int skd_acquire_irq(struct skd_device *skdev)
+{
+	int rc;
+	struct pci_dev *pdev;
+
+	pdev = skdev->pdev;
+	skdev->msix_count = 0;
+
+RETRY_IRQ_TYPE:
+	switch (skdev->irq_type) {
+	case SKD_IRQ_MSIX:
+		rc = skd_acquire_msix(skdev);
+		if (!rc)
+			pr_info("(%s): MSI-X %d irqs enabled\n",
+			       skd_name(skdev), skdev->msix_count);
+		else {
+			pr_err(
+			       "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
+			       skd_name(skdev), rc);
+			skdev->irq_type = SKD_IRQ_MSI;
+			goto RETRY_IRQ_TYPE;
+		}
+		break;
+	case SKD_IRQ_MSI:
+		snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
+			 DRV_NAME, skdev->devno);
+		rc = pci_enable_msi(pdev);
+		if (!rc) {
+			rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
+					      skdev->isr_name, skdev);
+			if (rc) {
+				pci_disable_msi(pdev);
+				pr_err(
+				       "(%s): failed to allocate the MSI interrupt %d\n",
+				       skd_name(skdev), rc);
+				goto RETRY_IRQ_LEGACY;
+			}
+			pr_info("(%s): MSI irq %d enabled\n",
+			       skd_name(skdev), pdev->irq);
+		} else {
+RETRY_IRQ_LEGACY:
+			pr_err(
+			       "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
+			       skd_name(skdev), rc);
+			skdev->irq_type = SKD_IRQ_LEGACY;
+			goto RETRY_IRQ_TYPE;
+		}
+		break;
+	case SKD_IRQ_LEGACY:
+		snprintf(skdev->isr_name, sizeof(skdev->isr_name),
+			 "%s%d-legacy", DRV_NAME, skdev->devno);
+		rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
+				      IRQF_SHARED, skdev->isr_name, skdev);
+		if (!rc)
+			pr_info("(%s): LEGACY irq %d enabled\n",
+			       skd_name(skdev), pdev->irq);
+		else
+			pr_err("(%s): request LEGACY irq error %d\n",
+			       skd_name(skdev), rc);
+		break;
+	default:
+		pr_info("(%s): irq_type %d invalid, re-set to %d\n",
+		       skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
+		skdev->irq_type = SKD_IRQ_LEGACY;
+		goto RETRY_IRQ_TYPE;
+	}
+	return rc;
+}
+
+static void skd_release_irq(struct skd_device *skdev)
+{
+	switch (skdev->irq_type) {
+	case SKD_IRQ_MSIX:
+		skd_release_msix(skdev);
+		break;
+	case SKD_IRQ_MSI:
+		devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
+		pci_disable_msi(skdev->pdev);
+		break;
+	case SKD_IRQ_LEGACY:
+		devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
+		break;
+	default:
+		pr_err("(%s): wrong irq type %d!",
+		       skd_name(skdev), skdev->irq_type);
+		break;
+	}
+}
+
+/*
+ *****************************************************************************
+ * CONSTRUCT
+ *****************************************************************************
+ */
+
+static int skd_cons_skcomp(struct skd_device *skdev)
+{
+	int rc = 0;
+	struct fit_completion_entry_v1 *skcomp;
+	u32 nbytes;
+
+	nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
+	nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+
+	pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
+		 skdev->name, __func__, __LINE__,
+		 nbytes, SKD_N_COMPLETION_ENTRY);
+
+	skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
+				      &skdev->cq_dma_address);
+
+	if (skcomp == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	memset(skcomp, 0, nbytes);
+
+	skdev->skcomp_table = skcomp;
+	skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
+							   sizeof(*skcomp) *
+							   SKD_N_COMPLETION_ENTRY);
+
+err_out:
+	return rc;
+}
+
+static int skd_cons_skmsg(struct skd_device *skdev)
+{
+	int rc = 0;
+	u32 i;
+
+	pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
+		 skdev->name, __func__, __LINE__,
+		 sizeof(struct skd_fitmsg_context),
+		 skdev->num_fitmsg_context,
+		 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
+
+	skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
+				     *skdev->num_fitmsg_context, GFP_KERNEL);
+	if (skdev->skmsg_table == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	for (i = 0; i < skdev->num_fitmsg_context; i++) {
+		struct skd_fitmsg_context *skmsg;
+
+		skmsg = &skdev->skmsg_table[i];
+
+		skmsg->id = i + SKD_ID_FIT_MSG;
+
+		skmsg->state = SKD_MSG_STATE_IDLE;
+		skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
+						      SKD_N_FITMSG_BYTES + 64,
+						      &skmsg->mb_dma_address);
+
+		if (skmsg->msg_buf == NULL) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+
+		skmsg->offset = (u32)((u64)skmsg->msg_buf &
+				      (~FIT_QCMD_BASE_ADDRESS_MASK));
+		skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
+		skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
+				       FIT_QCMD_BASE_ADDRESS_MASK);
+		skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
+		skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
+		memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
+
+		skmsg->next = &skmsg[1];
+	}
+
+	/* Free list is in order starting with the 0th entry. */
+	skdev->skmsg_table[i - 1].next = NULL;
+	skdev->skmsg_free_list = skdev->skmsg_table;
+
+err_out:
+	return rc;
+}
+
+static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
+						  u32 n_sg,
+						  dma_addr_t *ret_dma_addr)
+{
+	struct fit_sg_descriptor *sg_list;
+	u32 nbytes;
+
+	nbytes = sizeof(*sg_list) * n_sg;
+
+	sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
+
+	if (sg_list != NULL) {
+		uint64_t dma_address = *ret_dma_addr;
+		u32 i;
+
+		memset(sg_list, 0, nbytes);
+
+		for (i = 0; i < n_sg - 1; i++) {
+			uint64_t ndp_off;
+			ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
+
+			sg_list[i].next_desc_ptr = dma_address + ndp_off;
+		}
+		sg_list[i].next_desc_ptr = 0LL;
+	}
+
+	return sg_list;
+}
+
+static int skd_cons_skreq(struct skd_device *skdev)
+{
+	int rc = 0;
+	u32 i;
+
+	pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
+		 skdev->name, __func__, __LINE__,
+		 sizeof(struct skd_request_context),
+		 skdev->num_req_context,
+		 sizeof(struct skd_request_context) * skdev->num_req_context);
+
+	skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
+				     * skdev->num_req_context, GFP_KERNEL);
+	if (skdev->skreq_table == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
+		 skdev->name, __func__, __LINE__,
+		 skdev->sgs_per_request, sizeof(struct scatterlist),
+		 skdev->sgs_per_request * sizeof(struct scatterlist));
+
+	for (i = 0; i < skdev->num_req_context; i++) {
+		struct skd_request_context *skreq;
+
+		skreq = &skdev->skreq_table[i];
+
+		skreq->id = i + SKD_ID_RW_REQUEST;
+		skreq->state = SKD_REQ_STATE_IDLE;
+
+		skreq->sg = kzalloc(sizeof(struct scatterlist) *
+				    skdev->sgs_per_request, GFP_KERNEL);
+		if (skreq->sg == NULL) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		sg_init_table(skreq->sg, skdev->sgs_per_request);
+
+		skreq->sksg_list = skd_cons_sg_list(skdev,
+						    skdev->sgs_per_request,
+						    &skreq->sksg_dma_address);
+
+		if (skreq->sksg_list == NULL) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+
+		skreq->next = &skreq[1];
+	}
+
+	/* Free list is in order starting with the 0th entry. */
+	skdev->skreq_table[i - 1].next = NULL;
+	skdev->skreq_free_list = skdev->skreq_table;
+
+err_out:
+	return rc;
+}
+
+static int skd_cons_skspcl(struct skd_device *skdev)
+{
+	int rc = 0;
+	u32 i, nbytes;
+
+	pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
+		 skdev->name, __func__, __LINE__,
+		 sizeof(struct skd_special_context),
+		 skdev->n_special,
+		 sizeof(struct skd_special_context) * skdev->n_special);
+
+	skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
+				      * skdev->n_special, GFP_KERNEL);
+	if (skdev->skspcl_table == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	for (i = 0; i < skdev->n_special; i++) {
+		struct skd_special_context *skspcl;
+
+		skspcl = &skdev->skspcl_table[i];
+
+		skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
+		skspcl->req.state = SKD_REQ_STATE_IDLE;
+
+		skspcl->req.next = &skspcl[1].req;
+
+		nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+
+		skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+						       &skspcl->mb_dma_address);
+		if (skspcl->msg_buf == NULL) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+
+		memset(skspcl->msg_buf, 0, nbytes);
+
+		skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
+					 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
+		if (skspcl->req.sg == NULL) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+
+		skspcl->req.sksg_list = skd_cons_sg_list(skdev,
+							 SKD_N_SG_PER_SPECIAL,
+							 &skspcl->req.
+							 sksg_dma_address);
+		if (skspcl->req.sksg_list == NULL) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+	}
+
+	/* Free list is in order starting with the 0th entry. */
+	skdev->skspcl_table[i - 1].req.next = NULL;
+	skdev->skspcl_free_list = skdev->skspcl_table;
+
+	return rc;
+
+err_out:
+	return rc;
+}
+
+static int skd_cons_sksb(struct skd_device *skdev)
+{
+	int rc = 0;
+	struct skd_special_context *skspcl;
+	u32 nbytes;
+
+	skspcl = &skdev->internal_skspcl;
+
+	skspcl->req.id = 0 + SKD_ID_INTERNAL;
+	skspcl->req.state = SKD_REQ_STATE_IDLE;
+
+	nbytes = SKD_N_INTERNAL_BYTES;
+
+	skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+						&skspcl->db_dma_address);
+	if (skspcl->data_buf == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	memset(skspcl->data_buf, 0, nbytes);
+
+	nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+	skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+					       &skspcl->mb_dma_address);
+	if (skspcl->msg_buf == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	memset(skspcl->msg_buf, 0, nbytes);
+
+	skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
+						 &skspcl->req.sksg_dma_address);
+	if (skspcl->req.sksg_list == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	if (!skd_format_internal_skspcl(skdev)) {
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+err_out:
+	return rc;
+}
+
+static int skd_cons_disk(struct skd_device *skdev)
+{
+	int rc = 0;
+	struct gendisk *disk;
+	struct request_queue *q;
+	unsigned long flags;
+
+	disk = alloc_disk(SKD_MINORS_PER_DEVICE);
+	if (!disk) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	skdev->disk = disk;
+	sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
+
+	disk->major = skdev->major;
+	disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
+	disk->fops = &skd_blockdev_ops;
+	disk->private_data = skdev;
+
+	q = blk_init_queue(skd_request_fn, &skdev->lock);
+	if (!q) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	skdev->queue = q;
+	disk->queue = q;
+	q->queuedata = skdev;
+
+	blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+	blk_queue_max_segments(q, skdev->sgs_per_request);
+	blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
+
+	/* set sysfs ptimal_io_size to 8K */
+	blk_queue_io_opt(q, 8192);
+
+	/* DISCARD Flag initialization. */
+	q->limits.discard_granularity = 8192;
+	q->limits.discard_alignment = 0;
+	q->limits.max_discard_sectors = UINT_MAX >> 9;
+	q->limits.discard_zeroes_data = 1;
+	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+
+	spin_lock_irqsave(&skdev->lock, flags);
+	pr_debug("%s:%s:%d stopping %s queue\n",
+		 skdev->name, __func__, __LINE__, skdev->name);
+	blk_stop_queue(skdev->queue);
+	spin_unlock_irqrestore(&skdev->lock, flags);
+
+err_out:
+	return rc;
+}
+
+#define SKD_N_DEV_TABLE         16u
+static u32 skd_next_devno;
+
+static struct skd_device *skd_construct(struct pci_dev *pdev)
+{
+	struct skd_device *skdev;
+	int blk_major = skd_major;
+	int rc;
+
+	skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
+
+	if (!skdev) {
+		pr_err(PFX "(%s): memory alloc failure\n",
+		       pci_name(pdev));
+		return NULL;
+	}
+
+	skdev->state = SKD_DRVR_STATE_LOAD;
+	skdev->pdev = pdev;
+	skdev->devno = skd_next_devno++;
+	skdev->major = blk_major;
+	skdev->irq_type = skd_isr_type;
+	sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
+	skdev->dev_max_queue_depth = 0;
+
+	skdev->num_req_context = skd_max_queue_depth;
+	skdev->num_fitmsg_context = skd_max_queue_depth;
+	skdev->n_special = skd_max_pass_thru;
+	skdev->cur_max_queue_depth = 1;
+	skdev->queue_low_water_mark = 1;
+	skdev->proto_ver = 99;
+	skdev->sgs_per_request = skd_sgs_per_request;
+	skdev->dbg_level = skd_dbg_level;
+
+	atomic_set(&skdev->device_count, 0);
+
+	spin_lock_init(&skdev->lock);
+
+	INIT_WORK(&skdev->completion_worker, skd_completion_worker);
+
+	pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+	rc = skd_cons_skcomp(skdev);
+	if (rc < 0)
+		goto err_out;
+
+	pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+	rc = skd_cons_skmsg(skdev);
+	if (rc < 0)
+		goto err_out;
+
+	pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
+	rc = skd_cons_skreq(skdev);
+	if (rc < 0)
+		goto err_out;
+
+	pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
+	rc = skd_cons_skspcl(skdev);
+	if (rc < 0)
+		goto err_out;
+
+	pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+	rc = skd_cons_sksb(skdev);
+	if (rc < 0)
+		goto err_out;
+
+	pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+	rc = skd_cons_disk(skdev);
+	if (rc < 0)
+		goto err_out;
+
+	pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
+	return skdev;
+
+err_out:
+	pr_debug("%s:%s:%d construct failed\n",
+		 skdev->name, __func__, __LINE__);
+	skd_destruct(skdev);
+	return NULL;
+}
+
+/*
+ *****************************************************************************
+ * DESTRUCT (FREE)
+ *****************************************************************************
+ */
+
+static void skd_free_skcomp(struct skd_device *skdev)
+{
+	if (skdev->skcomp_table != NULL) {
+		u32 nbytes;
+
+		nbytes = sizeof(skdev->skcomp_table[0]) *
+			 SKD_N_COMPLETION_ENTRY;
+		pci_free_consistent(skdev->pdev, nbytes,
+				    skdev->skcomp_table, skdev->cq_dma_address);
+	}
+
+	skdev->skcomp_table = NULL;
+	skdev->cq_dma_address = 0;
+}
+
+static void skd_free_skmsg(struct skd_device *skdev)
+{
+	u32 i;
+
+	if (skdev->skmsg_table == NULL)
+		return;
+
+	for (i = 0; i < skdev->num_fitmsg_context; i++) {
+		struct skd_fitmsg_context *skmsg;
+
+		skmsg = &skdev->skmsg_table[i];
+
+		if (skmsg->msg_buf != NULL) {
+			skmsg->msg_buf += skmsg->offset;
+			skmsg->mb_dma_address += skmsg->offset;
+			pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
+					    skmsg->msg_buf,
+					    skmsg->mb_dma_address);
+		}
+		skmsg->msg_buf = NULL;
+		skmsg->mb_dma_address = 0;
+	}
+
+	kfree(skdev->skmsg_table);
+	skdev->skmsg_table = NULL;
+}
+
+static void skd_free_sg_list(struct skd_device *skdev,
+			     struct fit_sg_descriptor *sg_list,
+			     u32 n_sg, dma_addr_t dma_addr)
+{
+	if (sg_list != NULL) {
+		u32 nbytes;
+
+		nbytes = sizeof(*sg_list) * n_sg;
+
+		pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
+	}
+}
+
+static void skd_free_skreq(struct skd_device *skdev)
+{
+	u32 i;
+
+	if (skdev->skreq_table == NULL)
+		return;
+
+	for (i = 0; i < skdev->num_req_context; i++) {
+		struct skd_request_context *skreq;
+
+		skreq = &skdev->skreq_table[i];
+
+		skd_free_sg_list(skdev, skreq->sksg_list,
+				 skdev->sgs_per_request,
+				 skreq->sksg_dma_address);
+
+		skreq->sksg_list = NULL;
+		skreq->sksg_dma_address = 0;
+
+		kfree(skreq->sg);
+	}
+
+	kfree(skdev->skreq_table);
+	skdev->skreq_table = NULL;
+}
+
+static void skd_free_skspcl(struct skd_device *skdev)
+{
+	u32 i;
+	u32 nbytes;
+
+	if (skdev->skspcl_table == NULL)
+		return;
+
+	for (i = 0; i < skdev->n_special; i++) {
+		struct skd_special_context *skspcl;
+
+		skspcl = &skdev->skspcl_table[i];
+
+		if (skspcl->msg_buf != NULL) {
+			nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+			pci_free_consistent(skdev->pdev, nbytes,
+					    skspcl->msg_buf,
+					    skspcl->mb_dma_address);
+		}
+
+		skspcl->msg_buf = NULL;
+		skspcl->mb_dma_address = 0;
+
+		skd_free_sg_list(skdev, skspcl->req.sksg_list,
+				 SKD_N_SG_PER_SPECIAL,
+				 skspcl->req.sksg_dma_address);
+
+		skspcl->req.sksg_list = NULL;
+		skspcl->req.sksg_dma_address = 0;
+
+		kfree(skspcl->req.sg);
+	}
+
+	kfree(skdev->skspcl_table);
+	skdev->skspcl_table = NULL;
+}
+
+static void skd_free_sksb(struct skd_device *skdev)
+{
+	struct skd_special_context *skspcl;
+	u32 nbytes;
+
+	skspcl = &skdev->internal_skspcl;
+
+	if (skspcl->data_buf != NULL) {
+		nbytes = SKD_N_INTERNAL_BYTES;
+
+		pci_free_consistent(skdev->pdev, nbytes,
+				    skspcl->data_buf, skspcl->db_dma_address);
+	}
+
+	skspcl->data_buf = NULL;
+	skspcl->db_dma_address = 0;
+
+	if (skspcl->msg_buf != NULL) {
+		nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+		pci_free_consistent(skdev->pdev, nbytes,
+				    skspcl->msg_buf, skspcl->mb_dma_address);
+	}
+
+	skspcl->msg_buf = NULL;
+	skspcl->mb_dma_address = 0;
+
+	skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
+			 skspcl->req.sksg_dma_address);
+
+	skspcl->req.sksg_list = NULL;
+	skspcl->req.sksg_dma_address = 0;
+}
+
+static void skd_free_disk(struct skd_device *skdev)
+{
+	struct gendisk *disk = skdev->disk;
+
+	if (disk != NULL) {
+		struct request_queue *q = disk->queue;
+
+		if (disk->flags & GENHD_FL_UP)
+			del_gendisk(disk);
+		if (q)
+			blk_cleanup_queue(q);
+		put_disk(disk);
+	}
+	skdev->disk = NULL;
+}
+
+static void skd_destruct(struct skd_device *skdev)
+{
+	if (skdev == NULL)
+		return;
+
+
+	pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+	skd_free_disk(skdev);
+
+	pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+	skd_free_sksb(skdev);
+
+	pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
+	skd_free_skspcl(skdev);
+
+	pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
+	skd_free_skreq(skdev);
+
+	pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+	skd_free_skmsg(skdev);
+
+	pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+	skd_free_skcomp(skdev);
+
+	pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
+	kfree(skdev);
+}
+
+/*
+ *****************************************************************************
+ * BLOCK DEVICE (BDEV) GLUE
+ *****************************************************************************
+ */
+
+static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	struct skd_device *skdev;
+	u64 capacity;
+
+	skdev = bdev->bd_disk->private_data;
+
+	pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
+		 skdev->name, __func__, __LINE__,
+		 bdev->bd_disk->disk_name, current->comm);
+
+	if (skdev->read_cap_is_valid) {
+		capacity = get_capacity(skdev->disk);
+		geo->heads = 64;
+		geo->sectors = 255;
+		geo->cylinders = (capacity) / (255 * 64);
+
+		return 0;
+	}
+	return -EIO;
+}
+
+static int skd_bdev_attach(struct skd_device *skdev)
+{
+	pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
+	add_disk(skdev->disk);
+	return 0;
+}
+
+static const struct block_device_operations skd_blockdev_ops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= skd_bdev_ioctl,
+	.getgeo		= skd_bdev_getgeo,
+};
+
+
+/*
+ *****************************************************************************
+ * PCIe DRIVER GLUE
+ *****************************************************************************
+ */
+
+static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
+	{ PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ 0 }                     /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
+
+static char *skd_pci_info(struct skd_device *skdev, char *str)
+{
+	int pcie_reg;
+
+	strcpy(str, "PCIe (");
+	pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
+
+	if (pcie_reg) {
+
+		char lwstr[6];
+		uint16_t pcie_lstat, lspeed, lwidth;
+
+		pcie_reg += 0x12;
+		pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
+		lspeed = pcie_lstat & (0xF);
+		lwidth = (pcie_lstat & 0x3F0) >> 4;
+
+		if (lspeed == 1)
+			strcat(str, "2.5GT/s ");
+		else if (lspeed == 2)
+			strcat(str, "5.0GT/s ");
+		else
+			strcat(str, "<unknown> ");
+		snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
+		strcat(str, lwstr);
+	}
+	return str;
+}
+
+static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int i;
+	int rc = 0;
+	char pci_str[32];
+	struct skd_device *skdev;
+
+	pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
+	       DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
+	pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
+	       pci_name(pdev), pdev->vendor, pdev->device);
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc)
+		goto err_out;
+	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (!rc) {
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+			pr_err("(%s): consistent DMA mask error %d\n",
+			       pci_name(pdev), rc);
+		}
+	} else {
+		(rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
+		if (rc) {
+
+			pr_err("(%s): DMA mask error %d\n",
+			       pci_name(pdev), rc);
+			goto err_out_regions;
+		}
+	}
+
+	if (!skd_major) {
+		rc = register_blkdev(0, DRV_NAME);
+		if (rc < 0)
+			goto err_out_regions;
+		BUG_ON(!rc);
+		skd_major = rc;
+	}
+
+	skdev = skd_construct(pdev);
+	if (skdev == NULL) {
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+
+	skd_pci_info(skdev, pci_str);
+	pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
+
+	pci_set_master(pdev);
+	rc = pci_enable_pcie_error_reporting(pdev);
+	if (rc) {
+		pr_err(
+		       "(%s): bad enable of PCIe error reporting rc=%d\n",
+		       skd_name(skdev), rc);
+		skdev->pcie_error_reporting_is_enabled = 0;
+	} else
+		skdev->pcie_error_reporting_is_enabled = 1;
+
+
+	pci_set_drvdata(pdev, skdev);
+
+	skdev->disk->driverfs_dev = &pdev->dev;
+
+	for (i = 0; i < SKD_MAX_BARS; i++) {
+		skdev->mem_phys[i] = pci_resource_start(pdev, i);
+		skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
+		skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
+					    skdev->mem_size[i]);
+		if (!skdev->mem_map[i]) {
+			pr_err("(%s): Unable to map adapter memory!\n",
+			       skd_name(skdev));
+			rc = -ENODEV;
+			goto err_out_iounmap;
+		}
+		pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
+			 skdev->name, __func__, __LINE__,
+			 skdev->mem_map[i],
+			 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+	}
+
+	rc = skd_acquire_irq(skdev);
+	if (rc) {
+		pr_err("(%s): interrupt resource error %d\n",
+		       skd_name(skdev), rc);
+		goto err_out_iounmap;
+	}
+
+	rc = skd_start_timer(skdev);
+	if (rc)
+		goto err_out_timer;
+
+	init_waitqueue_head(&skdev->waitq);
+
+	skd_start_device(skdev);
+
+	rc = wait_event_interruptible_timeout(skdev->waitq,
+					      (skdev->gendisk_on),
+					      (SKD_START_WAIT_SECONDS * HZ));
+	if (skdev->gendisk_on > 0) {
+		/* device came on-line after reset */
+		skd_bdev_attach(skdev);
+		rc = 0;
+	} else {
+		/* we timed out, something is wrong with the device,
+		   don't add the disk structure */
+		pr_err(
+		       "(%s): error: waiting for s1120 timed out %d!\n",
+		       skd_name(skdev), rc);
+		/* in case of no error; we timeout with ENXIO */
+		if (!rc)
+			rc = -ENXIO;
+		goto err_out_timer;
+	}
+
+
+#ifdef SKD_VMK_POLL_HANDLER
+	if (skdev->irq_type == SKD_IRQ_MSIX) {
+		/* MSIX completion handler is being used for coredump */
+		vmklnx_scsi_register_poll_handler(skdev->scsi_host,
+						  skdev->msix_entries[5].vector,
+						  skd_comp_q, skdev);
+	} else {
+		vmklnx_scsi_register_poll_handler(skdev->scsi_host,
+						  skdev->pdev->irq, skd_isr,
+						  skdev);
+	}
+#endif  /* SKD_VMK_POLL_HANDLER */
+
+	return rc;
+
+err_out_timer:
+	skd_stop_device(skdev);
+	skd_release_irq(skdev);
+
+err_out_iounmap:
+	for (i = 0; i < SKD_MAX_BARS; i++)
+		if (skdev->mem_map[i])
+			iounmap(skdev->mem_map[i]);
+
+	if (skdev->pcie_error_reporting_is_enabled)
+		pci_disable_pcie_error_reporting(pdev);
+
+	skd_destruct(skdev);
+
+err_out_regions:
+	pci_release_regions(pdev);
+
+err_out:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static void skd_pci_remove(struct pci_dev *pdev)
+{
+	int i;
+	struct skd_device *skdev;
+
+	skdev = pci_get_drvdata(pdev);
+	if (!skdev) {
+		pr_err("%s: no device data for PCI\n", pci_name(pdev));
+		return;
+	}
+	skd_stop_device(skdev);
+	skd_release_irq(skdev);
+
+	for (i = 0; i < SKD_MAX_BARS; i++)
+		if (skdev->mem_map[i])
+			iounmap((u32 *)skdev->mem_map[i]);
+
+	if (skdev->pcie_error_reporting_is_enabled)
+		pci_disable_pcie_error_reporting(pdev);
+
+	skd_destruct(skdev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	return;
+}
+
+static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int i;
+	struct skd_device *skdev;
+
+	skdev = pci_get_drvdata(pdev);
+	if (!skdev) {
+		pr_err("%s: no device data for PCI\n", pci_name(pdev));
+		return -EIO;
+	}
+
+	skd_stop_device(skdev);
+
+	skd_release_irq(skdev);
+
+	for (i = 0; i < SKD_MAX_BARS; i++)
+		if (skdev->mem_map[i])
+			iounmap((u32 *)skdev->mem_map[i]);
+
+	if (skdev->pcie_error_reporting_is_enabled)
+		pci_disable_pcie_error_reporting(pdev);
+
+	pci_release_regions(pdev);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int skd_pci_resume(struct pci_dev *pdev)
+{
+	int i;
+	int rc = 0;
+	struct skd_device *skdev;
+
+	skdev = pci_get_drvdata(pdev);
+	if (!skdev) {
+		pr_err("%s: no device data for PCI\n", pci_name(pdev));
+		return -1;
+	}
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	pci_restore_state(pdev);
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc)
+		goto err_out;
+	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (!rc) {
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+			pr_err("(%s): consistent DMA mask error %d\n",
+			       pci_name(pdev), rc);
+		}
+	} else {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+
+			pr_err("(%s): DMA mask error %d\n",
+			       pci_name(pdev), rc);
+			goto err_out_regions;
+		}
+	}
+
+	pci_set_master(pdev);
+	rc = pci_enable_pcie_error_reporting(pdev);
+	if (rc) {
+		pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
+		       skdev->name, rc);
+		skdev->pcie_error_reporting_is_enabled = 0;
+	} else
+		skdev->pcie_error_reporting_is_enabled = 1;
+
+	for (i = 0; i < SKD_MAX_BARS; i++) {
+
+		skdev->mem_phys[i] = pci_resource_start(pdev, i);
+		skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
+		skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
+					    skdev->mem_size[i]);
+		if (!skdev->mem_map[i]) {
+			pr_err("(%s): Unable to map adapter memory!\n",
+			       skd_name(skdev));
+			rc = -ENODEV;
+			goto err_out_iounmap;
+		}
+		pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
+			 skdev->name, __func__, __LINE__,
+			 skdev->mem_map[i],
+			 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+	}
+	rc = skd_acquire_irq(skdev);
+	if (rc) {
+
+		pr_err("(%s): interrupt resource error %d\n",
+		       pci_name(pdev), rc);
+		goto err_out_iounmap;
+	}
+
+	rc = skd_start_timer(skdev);
+	if (rc)
+		goto err_out_timer;
+
+	init_waitqueue_head(&skdev->waitq);
+
+	skd_start_device(skdev);
+
+	return rc;
+
+err_out_timer:
+	skd_stop_device(skdev);
+	skd_release_irq(skdev);
+
+err_out_iounmap:
+	for (i = 0; i < SKD_MAX_BARS; i++)
+		if (skdev->mem_map[i])
+			iounmap(skdev->mem_map[i]);
+
+	if (skdev->pcie_error_reporting_is_enabled)
+		pci_disable_pcie_error_reporting(pdev);
+
+err_out_regions:
+	pci_release_regions(pdev);
+
+err_out:
+	pci_disable_device(pdev);
+	return rc;
+}
+
+static void skd_pci_shutdown(struct pci_dev *pdev)
+{
+	struct skd_device *skdev;
+
+	pr_err("skd_pci_shutdown called\n");
+
+	skdev = pci_get_drvdata(pdev);
+	if (!skdev) {
+		pr_err("%s: no device data for PCI\n", pci_name(pdev));
+		return;
+	}
+
+	pr_err("%s: calling stop\n", skd_name(skdev));
+	skd_stop_device(skdev);
+}
+
+static struct pci_driver skd_driver = {
+	.name		= DRV_NAME,
+	.id_table	= skd_pci_tbl,
+	.probe		= skd_pci_probe,
+	.remove		= skd_pci_remove,
+	.suspend	= skd_pci_suspend,
+	.resume		= skd_pci_resume,
+	.shutdown	= skd_pci_shutdown,
+};
+
+/*
+ *****************************************************************************
+ * LOGGING SUPPORT
+ *****************************************************************************
+ */
+
+static const char *skd_name(struct skd_device *skdev)
+{
+	memset(skdev->id_str, 0, sizeof(skdev->id_str));
+
+	if (skdev->inquiry_is_valid)
+		snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
+			 skdev->name, skdev->inq_serial_num,
+			 pci_name(skdev->pdev));
+	else
+		snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
+			 skdev->name, pci_name(skdev->pdev));
+
+	return skdev->id_str;
+}
+
+const char *skd_drive_state_to_str(int state)
+{
+	switch (state) {
+	case FIT_SR_DRIVE_OFFLINE:
+		return "OFFLINE";
+	case FIT_SR_DRIVE_INIT:
+		return "INIT";
+	case FIT_SR_DRIVE_ONLINE:
+		return "ONLINE";
+	case FIT_SR_DRIVE_BUSY:
+		return "BUSY";
+	case FIT_SR_DRIVE_FAULT:
+		return "FAULT";
+	case FIT_SR_DRIVE_DEGRADED:
+		return "DEGRADED";
+	case FIT_SR_PCIE_LINK_DOWN:
+		return "INK_DOWN";
+	case FIT_SR_DRIVE_SOFT_RESET:
+		return "SOFT_RESET";
+	case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
+		return "NEED_FW";
+	case FIT_SR_DRIVE_INIT_FAULT:
+		return "INIT_FAULT";
+	case FIT_SR_DRIVE_BUSY_SANITIZE:
+		return "BUSY_SANITIZE";
+	case FIT_SR_DRIVE_BUSY_ERASE:
+		return "BUSY_ERASE";
+	case FIT_SR_DRIVE_FW_BOOTING:
+		return "FW_BOOTING";
+	default:
+		return "???";
+	}
+}
+
+const char *skd_skdev_state_to_str(enum skd_drvr_state state)
+{
+	switch (state) {
+	case SKD_DRVR_STATE_LOAD:
+		return "LOAD";
+	case SKD_DRVR_STATE_IDLE:
+		return "IDLE";
+	case SKD_DRVR_STATE_BUSY:
+		return "BUSY";
+	case SKD_DRVR_STATE_STARTING:
+		return "STARTING";
+	case SKD_DRVR_STATE_ONLINE:
+		return "ONLINE";
+	case SKD_DRVR_STATE_PAUSING:
+		return "PAUSING";
+	case SKD_DRVR_STATE_PAUSED:
+		return "PAUSED";
+	case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+		return "DRAINING_TIMEOUT";
+	case SKD_DRVR_STATE_RESTARTING:
+		return "RESTARTING";
+	case SKD_DRVR_STATE_RESUMING:
+		return "RESUMING";
+	case SKD_DRVR_STATE_STOPPING:
+		return "STOPPING";
+	case SKD_DRVR_STATE_SYNCING:
+		return "SYNCING";
+	case SKD_DRVR_STATE_FAULT:
+		return "FAULT";
+	case SKD_DRVR_STATE_DISAPPEARED:
+		return "DISAPPEARED";
+	case SKD_DRVR_STATE_BUSY_ERASE:
+		return "BUSY_ERASE";
+	case SKD_DRVR_STATE_BUSY_SANITIZE:
+		return "BUSY_SANITIZE";
+	case SKD_DRVR_STATE_BUSY_IMMINENT:
+		return "BUSY_IMMINENT";
+	case SKD_DRVR_STATE_WAIT_BOOT:
+		return "WAIT_BOOT";
+
+	default:
+		return "???";
+	}
+}
+
+const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
+{
+	switch (state) {
+	case SKD_MSG_STATE_IDLE:
+		return "IDLE";
+	case SKD_MSG_STATE_BUSY:
+		return "BUSY";
+	default:
+		return "???";
+	}
+}
+
+const char *skd_skreq_state_to_str(enum skd_req_state state)
+{
+	switch (state) {
+	case SKD_REQ_STATE_IDLE:
+		return "IDLE";
+	case SKD_REQ_STATE_SETUP:
+		return "SETUP";
+	case SKD_REQ_STATE_BUSY:
+		return "BUSY";
+	case SKD_REQ_STATE_COMPLETED:
+		return "COMPLETED";
+	case SKD_REQ_STATE_TIMEOUT:
+		return "TIMEOUT";
+	case SKD_REQ_STATE_ABORTED:
+		return "ABORTED";
+	default:
+		return "???";
+	}
+}
+
+static void skd_log_skdev(struct skd_device *skdev, const char *event)
+{
+	pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
+		 skdev->name, __func__, __LINE__, skdev->name, skdev, event);
+	pr_debug("%s:%s:%d   drive_state=%s(%d) driver_state=%s(%d)\n",
+		 skdev->name, __func__, __LINE__,
+		 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+		 skd_skdev_state_to_str(skdev->state), skdev->state);
+	pr_debug("%s:%s:%d   busy=%d limit=%d dev=%d lowat=%d\n",
+		 skdev->name, __func__, __LINE__,
+		 skdev->in_flight, skdev->cur_max_queue_depth,
+		 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+	pr_debug("%s:%s:%d   timestamp=0x%x cycle=%d cycle_ix=%d\n",
+		 skdev->name, __func__, __LINE__,
+		 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
+}
+
+static void skd_log_skmsg(struct skd_device *skdev,
+			  struct skd_fitmsg_context *skmsg, const char *event)
+{
+	pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
+		 skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
+	pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x length=%d\n",
+		 skdev->name, __func__, __LINE__,
+		 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
+		 skmsg->id, skmsg->length);
+}
+
+static void skd_log_skreq(struct skd_device *skdev,
+			  struct skd_request_context *skreq, const char *event)
+{
+	pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
+		 skdev->name, __func__, __LINE__, skdev->name, skreq, event);
+	pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
+		 skdev->name, __func__, __LINE__,
+		 skd_skreq_state_to_str(skreq->state), skreq->state,
+		 skreq->id, skreq->fitmsg_id);
+	pr_debug("%s:%s:%d   timo=0x%x sg_dir=%d n_sg=%d\n",
+		 skdev->name, __func__, __LINE__,
+		 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
+
+	if (skreq->req != NULL) {
+		struct request *req = skreq->req;
+		u32 lba = (u32)blk_rq_pos(req);
+		u32 count = blk_rq_sectors(req);
+
+		pr_debug("%s:%s:%d "
+			 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
+			 skdev->name, __func__, __LINE__,
+			 req, lba, lba, count, count,
+			 (int)rq_data_dir(req));
+	} else
+		pr_debug("%s:%s:%d req=NULL\n",
+			 skdev->name, __func__, __LINE__);
+}
+
+/*
+ *****************************************************************************
+ * MODULE GLUE
+ *****************************************************************************
+ */
+
+static int __init skd_init(void)
+{
+	pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
+
+	switch (skd_isr_type) {
+	case SKD_IRQ_LEGACY:
+	case SKD_IRQ_MSI:
+	case SKD_IRQ_MSIX:
+		break;
+	default:
+		pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
+		       skd_isr_type, SKD_IRQ_DEFAULT);
+		skd_isr_type = SKD_IRQ_DEFAULT;
+	}
+
+	if (skd_max_queue_depth < 1 ||
+	    skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
+		pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
+		       skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
+		skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
+	}
+
+	if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
+		pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
+		       skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
+		skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
+	}
+
+	if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
+		pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
+		       skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
+		skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
+	}
+
+	if (skd_dbg_level < 0 || skd_dbg_level > 2) {
+		pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
+		       skd_dbg_level, 0);
+		skd_dbg_level = 0;
+	}
+
+	if (skd_isr_comp_limit < 0) {
+		pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
+		       skd_isr_comp_limit, 0);
+		skd_isr_comp_limit = 0;
+	}
+
+	if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
+		pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
+		       skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
+		skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+	}
+
+	return pci_register_driver(&skd_driver);
+}
+
+static void __exit skd_exit(void)
+{
+	pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
+
+	pci_unregister_driver(&skd_driver);
+
+	if (skd_major)
+		unregister_blkdev(skd_major, DRV_NAME);
+}
+
+module_init(skd_init);
+module_exit(skd_exit);
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
new file mode 100644
index 0000000..61c757f
--- /dev/null
+++ b/drivers/block/skd_s1120.h
@@ -0,0 +1,330 @@
+/* Copyright 2012 STEC, Inc.
+ *
+ * This file is licensed under the terms of the 3-clause
+ * BSD License (http://opensource.org/licenses/BSD-3-Clause)
+ * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
+ * at your option. Both licenses are also available in the LICENSE file
+ * distributed with this project. This file may not be copied, modified,
+ * or distributed except in accordance with those terms.
+ */
+
+
+#ifndef SKD_S1120_H
+#define SKD_S1120_H
+
+#pragma pack(push, s1120_h, 1)
+
+/*
+ * Q-channel, 64-bit r/w
+ */
+#define FIT_Q_COMMAND			0x400u
+#define FIT_QCMD_QID_MASK		(0x3 << 1)
+#define  FIT_QCMD_QID0			(0x0 << 1)
+#define  FIT_QCMD_QID_NORMAL		FIT_QCMD_QID0
+#define  FIT_QCMD_QID1			(0x1 << 1)
+#define  FIT_QCMD_QID2			(0x2 << 1)
+#define  FIT_QCMD_QID3			(0x3 << 1)
+#define  FIT_QCMD_FLUSH_QUEUE		(0ull)	/* add QID */
+#define  FIT_QCMD_MSGSIZE_MASK		(0x3 << 4)
+#define  FIT_QCMD_MSGSIZE_64		(0x0 << 4)
+#define  FIT_QCMD_MSGSIZE_128		(0x1 << 4)
+#define  FIT_QCMD_MSGSIZE_256		(0x2 << 4)
+#define  FIT_QCMD_MSGSIZE_512		(0x3 << 4)
+#define  FIT_QCMD_BASE_ADDRESS_MASK	(0xFFFFFFFFFFFFFFC0ull)
+
+/*
+ * Control, 32-bit r/w
+ */
+#define FIT_CONTROL			0x500u
+#define  FIT_CR_HARD_RESET		(1u << 0u)
+#define  FIT_CR_SOFT_RESET		(1u << 1u)
+#define  FIT_CR_DIS_TIMESTAMPS		(1u << 6u)
+#define  FIT_CR_ENABLE_INTERRUPTS	(1u << 7u)
+
+/*
+ * Status, 32-bit, r/o
+ */
+#define FIT_STATUS			0x510u
+#define FIT_SR_DRIVE_STATE_MASK		0x000000FFu
+#define	FIT_SR_SIGNATURE		(0xFF << 8)
+#define	FIT_SR_PIO_DMA			(1 << 16)
+#define FIT_SR_DRIVE_OFFLINE		0x00
+#define FIT_SR_DRIVE_INIT		0x01
+/* #define FIT_SR_DRIVE_READY		0x02 */
+#define FIT_SR_DRIVE_ONLINE		0x03
+#define FIT_SR_DRIVE_BUSY		0x04
+#define FIT_SR_DRIVE_FAULT		0x05
+#define FIT_SR_DRIVE_DEGRADED		0x06
+#define FIT_SR_PCIE_LINK_DOWN		0x07
+#define FIT_SR_DRIVE_SOFT_RESET		0x08
+#define FIT_SR_DRIVE_INIT_FAULT		0x09
+#define FIT_SR_DRIVE_BUSY_SANITIZE	0x0A
+#define FIT_SR_DRIVE_BUSY_ERASE		0x0B
+#define FIT_SR_DRIVE_FW_BOOTING		0x0C
+#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD	0xFE
+#define FIT_SR_DEVICE_MISSING		0xFF
+#define FIT_SR__RESERVED		0xFFFFFF00u
+
+/*
+ * FIT_STATUS - Status register data definition
+ */
+#define FIT_SR_STATE_MASK		(0xFF << 0)
+#define FIT_SR_SIGNATURE		(0xFF << 8)
+#define FIT_SR_PIO_DMA			(1 << 16)
+
+/*
+ * Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear)
+ */
+#define FIT_INT_STATUS_HOST		0x520u
+#define  FIT_ISH_FW_STATE_CHANGE	(1u << 0u)
+#define  FIT_ISH_COMPLETION_POSTED	(1u << 1u)
+#define  FIT_ISH_MSG_FROM_DEV		(1u << 2u)
+#define  FIT_ISH_UNDEFINED_3		(1u << 3u)
+#define  FIT_ISH_UNDEFINED_4		(1u << 4u)
+#define  FIT_ISH_Q0_FULL		(1u << 5u)
+#define  FIT_ISH_Q1_FULL		(1u << 6u)
+#define  FIT_ISH_Q2_FULL		(1u << 7u)
+#define  FIT_ISH_Q3_FULL		(1u << 8u)
+#define  FIT_ISH_QCMD_FIFO_OVERRUN	(1u << 9u)
+#define  FIT_ISH_BAD_EXP_ROM_READ	(1u << 10u)
+
+#define FIT_INT_DEF_MASK \
+	(FIT_ISH_FW_STATE_CHANGE | \
+	 FIT_ISH_COMPLETION_POSTED | \
+	 FIT_ISH_MSG_FROM_DEV | \
+	 FIT_ISH_Q0_FULL | \
+	 FIT_ISH_Q1_FULL | \
+	 FIT_ISH_Q2_FULL | \
+	 FIT_ISH_Q3_FULL | \
+	 FIT_ISH_QCMD_FIFO_OVERRUN | \
+	 FIT_ISH_BAD_EXP_ROM_READ)
+
+#define FIT_INT_QUEUE_FULL \
+	(FIT_ISH_Q0_FULL | \
+	 FIT_ISH_Q1_FULL | \
+	 FIT_ISH_Q2_FULL | \
+	 FIT_ISH_Q3_FULL)
+
+#define MSI_MSG_NWL_ERROR_0		0x00000000
+#define MSI_MSG_NWL_ERROR_1		0x00000001
+#define MSI_MSG_NWL_ERROR_2		0x00000002
+#define MSI_MSG_NWL_ERROR_3		0x00000003
+#define MSI_MSG_STATE_CHANGE		0x00000004
+#define MSI_MSG_COMPLETION_POSTED	0x00000005
+#define MSI_MSG_MSG_FROM_DEV		0x00000006
+#define MSI_MSG_RESERVED_0		0x00000007
+#define MSI_MSG_RESERVED_1		0x00000008
+#define MSI_MSG_QUEUE_0_FULL		0x00000009
+#define MSI_MSG_QUEUE_1_FULL		0x0000000A
+#define MSI_MSG_QUEUE_2_FULL		0x0000000B
+#define MSI_MSG_QUEUE_3_FULL		0x0000000C
+
+#define FIT_INT_RESERVED_MASK \
+	(FIT_ISH_UNDEFINED_3 | \
+	 FIT_ISH_UNDEFINED_4)
+
+/*
+ * Interrupt mask, 32-bit r/w
+ * Bit definitions are the same as FIT_INT_STATUS_HOST
+ */
+#define FIT_INT_MASK_HOST		0x528u
+
+/*
+ * Message to device, 32-bit r/w
+ */
+#define FIT_MSG_TO_DEVICE		0x540u
+
+/*
+ * Message from device, 32-bit, r/o
+ */
+#define FIT_MSG_FROM_DEVICE		0x548u
+
+/*
+ * 32-bit messages to/from device, composition/extraction macros
+ */
+#define FIT_MXD_CONS(TYPE, PARAM, DATA) \
+	((((TYPE)  & 0xFFu) << 24u) | \
+	(((PARAM) & 0xFFu) << 16u) | \
+	(((DATA)  & 0xFFFFu) << 0u))
+#define FIT_MXD_TYPE(MXD)		(((MXD) >> 24u) & 0xFFu)
+#define FIT_MXD_PARAM(MXD)		(((MXD) >> 16u) & 0xFFu)
+#define FIT_MXD_DATA(MXD)		(((MXD) >> 0u) & 0xFFFFu)
+
+/*
+ * Types of messages to/from device
+ */
+#define FIT_MTD_FITFW_INIT		0x01u
+#define FIT_MTD_GET_CMDQ_DEPTH		0x02u
+#define FIT_MTD_SET_COMPQ_DEPTH		0x03u
+#define FIT_MTD_SET_COMPQ_ADDR		0x04u
+#define FIT_MTD_ARM_QUEUE		0x05u
+#define FIT_MTD_CMD_LOG_HOST_ID		0x07u
+#define FIT_MTD_CMD_LOG_TIME_STAMP_LO	0x08u
+#define FIT_MTD_CMD_LOG_TIME_STAMP_HI	0x09u
+#define FIT_MFD_SMART_EXCEEDED		0x10u
+#define FIT_MFD_POWER_DOWN		0x11u
+#define FIT_MFD_OFFLINE			0x12u
+#define FIT_MFD_ONLINE			0x13u
+#define FIT_MFD_FW_RESTARTING		0x14u
+#define FIT_MFD_PM_ACTIVE		0x15u
+#define FIT_MFD_PM_STANDBY		0x16u
+#define FIT_MFD_PM_SLEEP		0x17u
+#define FIT_MFD_CMD_PROGRESS		0x18u
+
+#define FIT_MTD_DEBUG			0xFEu
+#define FIT_MFD_DEBUG			0xFFu
+
+#define FIT_MFD_MASK			(0xFFu)
+#define FIT_MFD_DATA_MASK		(0xFFu)
+#define FIT_MFD_MSG(x)			(((x) >> 24) & FIT_MFD_MASK)
+#define FIT_MFD_DATA(x)			((x) & FIT_MFD_MASK)
+
+/*
+ * Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w
+ * Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR)
+ * (was Response buffer in docs)
+ */
+#define FIT_MSG_TO_DEVICE_ARG		0x580u
+
+/*
+ * Hardware (ASIC) version, 32-bit r/o
+ */
+#define FIT_HW_VERSION			0x588u
+
+/*
+ * Scatter/gather list descriptor.
+ * 32-bytes and must be aligned on a 32-byte boundary.
+ * All fields are in little endian order.
+ */
+struct fit_sg_descriptor {
+	uint32_t control;
+	uint32_t byte_count;
+	uint64_t host_side_addr;
+	uint64_t dev_side_addr;
+	uint64_t next_desc_ptr;
+};
+
+#define FIT_SGD_CONTROL_NOT_LAST	0x000u
+#define FIT_SGD_CONTROL_LAST		0x40Eu
+
+/*
+ * Header at the beginning of a FIT message. The header
+ * is followed by SSDI requests each 64 bytes.
+ * A FIT message can be up to 512 bytes long and must start
+ * on a 64-byte boundary.
+ */
+struct fit_msg_hdr {
+	uint8_t protocol_id;
+	uint8_t num_protocol_cmds_coalesced;
+	uint8_t _reserved[62];
+};
+
+#define FIT_PROTOCOL_ID_FIT	1
+#define FIT_PROTOCOL_ID_SSDI	2
+#define FIT_PROTOCOL_ID_SOFIT	3
+
+
+#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
+#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF)
+
+/*
+ * Format of a completion entry. The completion queue is circular
+ * and must have at least as many entries as the maximum number
+ * of commands that may be issued to the device.
+ *
+ * There are no head/tail pointers. The cycle value is used to
+ * infer the presence of new completion records.
+ * Initially the cycle in all entries is 0, the index is 0, and
+ * the cycle value to expect is 1. When completions are added
+ * their cycle values are set to 1. When the index wraps the
+ * cycle value to expect is incremented.
+ *
+ * Command_context is opaque and taken verbatim from the SSDI command.
+ * All other fields are big endian.
+ */
+#define FIT_PROTOCOL_VERSION_0		0
+
+/*
+ *  Protocol major version 1 completion entry.
+ *  The major protocol version is found in bits
+ *  20-23 of the FIT_MTD_FITFW_INIT response.
+ */
+struct fit_completion_entry_v1 {
+	uint32_t	num_returned_bytes;
+	uint16_t	tag;
+	uint8_t		status;  /* SCSI status */
+	uint8_t		cycle;
+};
+#define FIT_PROTOCOL_VERSION_1		1
+#define FIT_PROTOCOL_VERSION_CURRENT	FIT_PROTOCOL_VERSION_1
+
+struct fit_comp_error_info {
+	uint8_t		type:7; /* 00: Bits0-6 indicates the type of sense data. */
+	uint8_t		valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */
+	uint8_t		reserved0; /* 01: Obsolete field */
+	uint8_t		key:4; /* 02: Bits0-3 indicate the sense key. */
+	uint8_t		reserved2:1; /* 02: Reserved bit. */
+	uint8_t		bad_length:1; /* 02: Incorrect Length Indicator */
+	uint8_t		end_medium:1; /* 02: End of Medium */
+	uint8_t		file_mark:1; /* 02: Filemark */
+	uint8_t		info[4]; /* 03: */
+	uint8_t		reserved1; /* 07: Additional Sense Length */
+	uint8_t		cmd_spec[4]; /* 08: Command Specific Information */
+	uint8_t		code; /* 0C: Additional Sense Code */
+	uint8_t		qual; /* 0D: Additional Sense Code Qualifier */
+	uint8_t		fruc; /* 0E: Field Replaceable Unit Code */
+	uint8_t		sks_high:7; /* 0F: Sense Key Specific (MSB) */
+	uint8_t		sks_valid:1; /* 0F: Sense Key Specific Valid */
+	uint16_t	sks_low; /* 10: Sense Key Specific (LSW) */
+	uint16_t	reserved3; /* 12: Part of additional sense bytes (unused) */
+	uint16_t	uec; /* 14: Additional Sense Bytes */
+	uint64_t	per; /* 16: Additional Sense Bytes */
+	uint8_t		reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
+};
+
+
+/* Task management constants */
+#define SOFT_TASK_SIMPLE		0x00
+#define SOFT_TASK_HEAD_OF_QUEUE		0x01
+#define SOFT_TASK_ORDERED		0x02
+
+/* Version zero has the last 32 bits reserved,
+ * Version one has the last 32 bits sg_list_len_bytes;
+ */
+struct skd_command_header {
+	uint64_t	sg_list_dma_address;
+	uint16_t	tag;
+	uint8_t		attribute;
+	uint8_t		add_cdb_len;     /* In 32 bit words */
+	uint32_t	sg_list_len_bytes;
+};
+
+struct skd_scsi_request {
+	struct		skd_command_header hdr;
+	unsigned char	cdb[16];
+/*	unsigned char _reserved[16]; */
+};
+
+struct driver_inquiry_data {
+	uint8_t		peripheral_device_type:5;
+	uint8_t		qualifier:3;
+	uint8_t		page_code;
+	uint16_t	page_length;
+	uint16_t	pcie_bus_number;
+	uint8_t		pcie_device_number;
+	uint8_t		pcie_function_number;
+	uint8_t		pcie_link_speed;
+	uint8_t		pcie_link_lanes;
+	uint16_t	pcie_vendor_id;
+	uint16_t	pcie_device_id;
+	uint16_t	pcie_subsystem_vendor_id;
+	uint16_t	pcie_subsystem_device_id;
+	uint8_t		reserved1[2];
+	uint8_t		reserved2[3];
+	uint8_t		driver_version_length;
+	uint8_t		driver_version[0x14];
+};
+
+#pragma pack(pop, s1120_h)
+
+#endif /* SKD_S1120_H */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5cdf88b..588479d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -11,12 +11,11 @@
 #include <linux/string_helpers.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/idr.h>
+#include <linux/blk-mq.h>
+#include <linux/numa.h>
 
 #define PART_BITS 4
 
-static bool use_bio;
-module_param(use_bio, bool, S_IRUGO);
-
 static int major;
 static DEFINE_IDA(vd_index_ida);
 
@@ -26,13 +25,11 @@
 {
 	struct virtio_device *vdev;
 	struct virtqueue *vq;
-	wait_queue_head_t queue_wait;
+	spinlock_t vq_lock;
 
 	/* The disk structure for the kernel. */
 	struct gendisk *disk;
 
-	mempool_t *pool;
-
 	/* Process context for config space updates */
 	struct work_struct config_work;
 
@@ -47,31 +44,17 @@
 
 	/* Ida index - used to track minor number allocations. */
 	int index;
-
-	/* Scatterlist: can be too big for stack. */
-	struct scatterlist sg[/*sg_elems*/];
 };
 
 struct virtblk_req
 {
 	struct request *req;
-	struct bio *bio;
 	struct virtio_blk_outhdr out_hdr;
 	struct virtio_scsi_inhdr in_hdr;
-	struct work_struct work;
-	struct virtio_blk *vblk;
-	int flags;
 	u8 status;
 	struct scatterlist sg[];
 };
 
-enum {
-	VBLK_IS_FLUSH		= 1,
-	VBLK_REQ_FLUSH		= 2,
-	VBLK_REQ_DATA		= 4,
-	VBLK_REQ_FUA		= 8,
-};
-
 static inline int virtblk_result(struct virtblk_req *vbr)
 {
 	switch (vbr->status) {
@@ -84,22 +67,6 @@
 	}
 }
 
-static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
-						    gfp_t gfp_mask)
-{
-	struct virtblk_req *vbr;
-
-	vbr = mempool_alloc(vblk->pool, gfp_mask);
-	if (!vbr)
-		return NULL;
-
-	vbr->vblk = vblk;
-	if (use_bio)
-		sg_init_table(vbr->sg, vblk->sg_elems);
-
-	return vbr;
-}
-
 static int __virtblk_add_req(struct virtqueue *vq,
 			     struct virtblk_req *vbr,
 			     struct scatterlist *data_sg,
@@ -143,83 +110,8 @@
 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
 }
 
-static void virtblk_add_req(struct virtblk_req *vbr, bool have_data)
-{
-	struct virtio_blk *vblk = vbr->vblk;
-	DEFINE_WAIT(wait);
-	int ret;
-
-	spin_lock_irq(vblk->disk->queue->queue_lock);
-	while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg,
-						 have_data)) < 0)) {
-		prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
-					  TASK_UNINTERRUPTIBLE);
-
-		spin_unlock_irq(vblk->disk->queue->queue_lock);
-		io_schedule();
-		spin_lock_irq(vblk->disk->queue->queue_lock);
-
-		finish_wait(&vblk->queue_wait, &wait);
-	}
-
-	virtqueue_kick(vblk->vq);
-	spin_unlock_irq(vblk->disk->queue->queue_lock);
-}
-
-static void virtblk_bio_send_flush(struct virtblk_req *vbr)
-{
-	vbr->flags |= VBLK_IS_FLUSH;
-	vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
-	vbr->out_hdr.sector = 0;
-	vbr->out_hdr.ioprio = 0;
-
-	virtblk_add_req(vbr, false);
-}
-
-static void virtblk_bio_send_data(struct virtblk_req *vbr)
-{
-	struct virtio_blk *vblk = vbr->vblk;
-	struct bio *bio = vbr->bio;
-	bool have_data;
-
-	vbr->flags &= ~VBLK_IS_FLUSH;
-	vbr->out_hdr.type = 0;
-	vbr->out_hdr.sector = bio->bi_sector;
-	vbr->out_hdr.ioprio = bio_prio(bio);
-
-	if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
-		have_data = true;
-		if (bio->bi_rw & REQ_WRITE)
-			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
-		else
-			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
-	} else
-		have_data = false;
-
-	virtblk_add_req(vbr, have_data);
-}
-
-static void virtblk_bio_send_data_work(struct work_struct *work)
-{
-	struct virtblk_req *vbr;
-
-	vbr = container_of(work, struct virtblk_req, work);
-
-	virtblk_bio_send_data(vbr);
-}
-
-static void virtblk_bio_send_flush_work(struct work_struct *work)
-{
-	struct virtblk_req *vbr;
-
-	vbr = container_of(work, struct virtblk_req, work);
-
-	virtblk_bio_send_flush(vbr);
-}
-
 static inline void virtblk_request_done(struct virtblk_req *vbr)
 {
-	struct virtio_blk *vblk = vbr->vblk;
 	struct request *req = vbr->req;
 	int error = virtblk_result(vbr);
 
@@ -231,90 +123,45 @@
 		req->errors = (error != 0);
 	}
 
-	__blk_end_request_all(req, error);
-	mempool_free(vbr, vblk->pool);
-}
-
-static inline void virtblk_bio_flush_done(struct virtblk_req *vbr)
-{
-	struct virtio_blk *vblk = vbr->vblk;
-
-	if (vbr->flags & VBLK_REQ_DATA) {
-		/* Send out the actual write data */
-		INIT_WORK(&vbr->work, virtblk_bio_send_data_work);
-		queue_work(virtblk_wq, &vbr->work);
-	} else {
-		bio_endio(vbr->bio, virtblk_result(vbr));
-		mempool_free(vbr, vblk->pool);
-	}
-}
-
-static inline void virtblk_bio_data_done(struct virtblk_req *vbr)
-{
-	struct virtio_blk *vblk = vbr->vblk;
-
-	if (unlikely(vbr->flags & VBLK_REQ_FUA)) {
-		/* Send out a flush before end the bio */
-		vbr->flags &= ~VBLK_REQ_DATA;
-		INIT_WORK(&vbr->work, virtblk_bio_send_flush_work);
-		queue_work(virtblk_wq, &vbr->work);
-	} else {
-		bio_endio(vbr->bio, virtblk_result(vbr));
-		mempool_free(vbr, vblk->pool);
-	}
-}
-
-static inline void virtblk_bio_done(struct virtblk_req *vbr)
-{
-	if (unlikely(vbr->flags & VBLK_IS_FLUSH))
-		virtblk_bio_flush_done(vbr);
-	else
-		virtblk_bio_data_done(vbr);
+	blk_mq_end_io(req, error);
 }
 
 static void virtblk_done(struct virtqueue *vq)
 {
 	struct virtio_blk *vblk = vq->vdev->priv;
-	bool bio_done = false, req_done = false;
+	bool req_done = false;
 	struct virtblk_req *vbr;
 	unsigned long flags;
 	unsigned int len;
 
-	spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
+	spin_lock_irqsave(&vblk->vq_lock, flags);
 	do {
 		virtqueue_disable_cb(vq);
 		while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
-			if (vbr->bio) {
-				virtblk_bio_done(vbr);
-				bio_done = true;
-			} else {
-				virtblk_request_done(vbr);
-				req_done = true;
-			}
+			virtblk_request_done(vbr);
+			req_done = true;
 		}
+		if (unlikely(virtqueue_is_broken(vq)))
+			break;
 	} while (!virtqueue_enable_cb(vq));
+	spin_unlock_irqrestore(&vblk->vq_lock, flags);
+
 	/* In case queue is stopped waiting for more buffers. */
 	if (req_done)
-		blk_start_queue(vblk->disk->queue);
-	spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
-
-	if (bio_done)
-		wake_up(&vblk->queue_wait);
+		blk_mq_start_stopped_hw_queues(vblk->disk->queue);
 }
 
-static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
-		   struct request *req)
+static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
 {
+	struct virtio_blk *vblk = hctx->queue->queuedata;
+	struct virtblk_req *vbr = req->special;
+	unsigned long flags;
 	unsigned int num;
-	struct virtblk_req *vbr;
+	const bool last = (req->cmd_flags & REQ_END) != 0;
 
-	vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
-	if (!vbr)
-		/* When another request finishes we'll try again. */
-		return false;
+	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
 
 	vbr->req = req;
-	vbr->bio = NULL;
 	if (req->cmd_flags & REQ_FLUSH) {
 		vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
 		vbr->out_hdr.sector = 0;
@@ -342,7 +189,7 @@
 		}
 	}
 
-	num = blk_rq_map_sg(q, vbr->req, vblk->sg);
+	num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
 	if (num) {
 		if (rq_data_dir(vbr->req) == WRITE)
 			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
@@ -350,63 +197,18 @@
 			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
 	}
 
-	if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) {
-		mempool_free(vbr, vblk->pool);
-		return false;
-	}
-
-	return true;
-}
-
-static void virtblk_request(struct request_queue *q)
-{
-	struct virtio_blk *vblk = q->queuedata;
-	struct request *req;
-	unsigned int issued = 0;
-
-	while ((req = blk_peek_request(q)) != NULL) {
-		BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
-
-		/* If this request fails, stop queue and wait for something to
-		   finish to restart it. */
-		if (!do_req(q, vblk, req)) {
-			blk_stop_queue(q);
-			break;
-		}
-		blk_start_request(req);
-		issued++;
-	}
-
-	if (issued)
+	spin_lock_irqsave(&vblk->vq_lock, flags);
+	if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
+		spin_unlock_irqrestore(&vblk->vq_lock, flags);
+		blk_mq_stop_hw_queue(hctx);
 		virtqueue_kick(vblk->vq);
-}
-
-static void virtblk_make_request(struct request_queue *q, struct bio *bio)
-{
-	struct virtio_blk *vblk = q->queuedata;
-	struct virtblk_req *vbr;
-
-	BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
-
-	vbr = virtblk_alloc_req(vblk, GFP_NOIO);
-	if (!vbr) {
-		bio_endio(bio, -ENOMEM);
-		return;
+		return BLK_MQ_RQ_QUEUE_BUSY;
 	}
+	spin_unlock_irqrestore(&vblk->vq_lock, flags);
 
-	vbr->bio = bio;
-	vbr->flags = 0;
-	if (bio->bi_rw & REQ_FLUSH)
-		vbr->flags |= VBLK_REQ_FLUSH;
-	if (bio->bi_rw & REQ_FUA)
-		vbr->flags |= VBLK_REQ_FUA;
-	if (bio->bi_size)
-		vbr->flags |= VBLK_REQ_DATA;
-
-	if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
-		virtblk_bio_send_flush(vbr);
-	else
-		virtblk_bio_send_data(vbr);
+	if (last)
+		virtqueue_kick(vblk->vq);
+	return BLK_MQ_RQ_QUEUE_OK;
 }
 
 /* return id (s/n) string for *disk to *id_str
@@ -456,18 +258,15 @@
 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 {
 	struct virtio_blk *vblk = bd->bd_disk->private_data;
-	struct virtio_blk_geometry vgeo;
-	int err;
 
 	/* see if the host passed in geometry config */
-	err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
-				offsetof(struct virtio_blk_config, geometry),
-				&vgeo);
-
-	if (!err) {
-		geo->heads = vgeo.heads;
-		geo->sectors = vgeo.sectors;
-		geo->cylinders = vgeo.cylinders;
+	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
+		virtio_cread(vblk->vdev, struct virtio_blk_config,
+			     geometry.cylinders, &geo->cylinders);
+		virtio_cread(vblk->vdev, struct virtio_blk_config,
+			     geometry.heads, &geo->heads);
+		virtio_cread(vblk->vdev, struct virtio_blk_config,
+			     geometry.sectors, &geo->sectors);
 	} else {
 		/* some standard values, similar to sd */
 		geo->heads = 1 << 6;
@@ -529,8 +328,7 @@
 		goto done;
 
 	/* Host must always specify the capacity. */
-	vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
-			  &capacity, sizeof(capacity));
+	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
 
 	/* If capacity is too big, truncate with warning. */
 	if ((sector_t)capacity != capacity) {
@@ -608,9 +406,9 @@
 	u8 writeback;
 	int err;
 
-	err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
-				offsetof(struct virtio_blk_config, wce),
-				&writeback);
+	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
+				   struct virtio_blk_config, wce,
+				   &writeback);
 	if (err)
 		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
 
@@ -642,7 +440,6 @@
 	struct virtio_blk *vblk = disk->private_data;
 	struct virtio_device *vdev = vblk->vdev;
 	int i;
-	u8 writeback;
 
 	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
 	for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
@@ -652,11 +449,7 @@
 	if (i < 0)
 		return -EINVAL;
 
-	writeback = i;
-	vdev->config->set(vdev,
-			  offsetof(struct virtio_blk_config, wce),
-			  &writeback, sizeof(writeback));
-
+	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
 	virtblk_update_cache_mode(vdev);
 	return count;
 }
@@ -680,12 +473,35 @@
 	__ATTR(cache_type, S_IRUGO|S_IWUSR,
 	       virtblk_cache_type_show, virtblk_cache_type_store);
 
+static struct blk_mq_ops virtio_mq_ops = {
+	.queue_rq	= virtio_queue_rq,
+	.map_queue	= blk_mq_map_queue,
+	.alloc_hctx	= blk_mq_alloc_single_hw_queue,
+	.free_hctx	= blk_mq_free_single_hw_queue,
+};
+
+static struct blk_mq_reg virtio_mq_reg = {
+	.ops		= &virtio_mq_ops,
+	.nr_hw_queues	= 1,
+	.queue_depth	= 64,
+	.numa_node	= NUMA_NO_NODE,
+	.flags		= BLK_MQ_F_SHOULD_MERGE,
+};
+
+static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
+			     struct request *rq, unsigned int nr)
+{
+	struct virtio_blk *vblk = data;
+	struct virtblk_req *vbr = rq->special;
+
+	sg_init_table(vbr->sg, vblk->sg_elems);
+}
+
 static int virtblk_probe(struct virtio_device *vdev)
 {
 	struct virtio_blk *vblk;
 	struct request_queue *q;
 	int err, index;
-	int pool_size;
 
 	u64 cap;
 	u32 v, blk_size, sg_elems, opt_io_size;
@@ -699,9 +515,9 @@
 	index = err;
 
 	/* We need to know how many segments before we allocate. */
-	err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
-				offsetof(struct virtio_blk_config, seg_max),
-				&sg_elems);
+	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
+				   struct virtio_blk_config, seg_max,
+				   &sg_elems);
 
 	/* We need at least one SG element, whatever they say. */
 	if (err || !sg_elems)
@@ -709,17 +525,14 @@
 
 	/* We need an extra sg elements at head and tail. */
 	sg_elems += 2;
-	vdev->priv = vblk = kmalloc(sizeof(*vblk) +
-				    sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
+	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
 	if (!vblk) {
 		err = -ENOMEM;
 		goto out_free_index;
 	}
 
-	init_waitqueue_head(&vblk->queue_wait);
 	vblk->vdev = vdev;
 	vblk->sg_elems = sg_elems;
-	sg_init_table(vblk->sg, vblk->sg_elems);
 	mutex_init(&vblk->config_lock);
 
 	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
@@ -728,31 +541,27 @@
 	err = init_vq(vblk);
 	if (err)
 		goto out_free_vblk;
-
-	pool_size = sizeof(struct virtblk_req);
-	if (use_bio)
-		pool_size += sizeof(struct scatterlist) * sg_elems;
-	vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
-	if (!vblk->pool) {
-		err = -ENOMEM;
-		goto out_free_vq;
-	}
+	spin_lock_init(&vblk->vq_lock);
 
 	/* FIXME: How many partitions?  How long is a piece of string? */
 	vblk->disk = alloc_disk(1 << PART_BITS);
 	if (!vblk->disk) {
 		err = -ENOMEM;
-		goto out_mempool;
+		goto out_free_vq;
 	}
 
-	q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL);
+	virtio_mq_reg.cmd_size =
+		sizeof(struct virtblk_req) +
+		sizeof(struct scatterlist) * sg_elems;
+
+	q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
 	if (!q) {
 		err = -ENOMEM;
 		goto out_put_disk;
 	}
 
-	if (use_bio)
-		blk_queue_make_request(q, virtblk_make_request);
+	blk_mq_init_commands(q, virtblk_init_vbr, vblk);
+
 	q->queuedata = vblk;
 
 	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
@@ -772,8 +581,7 @@
 		set_disk_ro(vblk->disk, 1);
 
 	/* Host must always specify the capacity. */
-	vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
-			  &cap, sizeof(cap));
+	virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
 
 	/* If capacity is too big, truncate with warning. */
 	if ((sector_t)cap != cap) {
@@ -794,46 +602,45 @@
 
 	/* Host can optionally specify maximum segment size and number of
 	 * segments. */
-	err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
-				offsetof(struct virtio_blk_config, size_max),
-				&v);
+	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
+				   struct virtio_blk_config, size_max, &v);
 	if (!err)
 		blk_queue_max_segment_size(q, v);
 	else
 		blk_queue_max_segment_size(q, -1U);
 
 	/* Host can optionally specify the block size of the device */
-	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
-				offsetof(struct virtio_blk_config, blk_size),
-				&blk_size);
+	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
+				   struct virtio_blk_config, blk_size,
+				   &blk_size);
 	if (!err)
 		blk_queue_logical_block_size(q, blk_size);
 	else
 		blk_size = queue_logical_block_size(q);
 
 	/* Use topology information if available */
-	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
-			offsetof(struct virtio_blk_config, physical_block_exp),
-			&physical_block_exp);
+	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+				   struct virtio_blk_config, physical_block_exp,
+				   &physical_block_exp);
 	if (!err && physical_block_exp)
 		blk_queue_physical_block_size(q,
 				blk_size * (1 << physical_block_exp));
 
-	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
-			offsetof(struct virtio_blk_config, alignment_offset),
-			&alignment_offset);
+	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+				   struct virtio_blk_config, alignment_offset,
+				   &alignment_offset);
 	if (!err && alignment_offset)
 		blk_queue_alignment_offset(q, blk_size * alignment_offset);
 
-	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
-			offsetof(struct virtio_blk_config, min_io_size),
-			&min_io_size);
+	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+				   struct virtio_blk_config, min_io_size,
+				   &min_io_size);
 	if (!err && min_io_size)
 		blk_queue_io_min(q, blk_size * min_io_size);
 
-	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
-			offsetof(struct virtio_blk_config, opt_io_size),
-			&opt_io_size);
+	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+				   struct virtio_blk_config, opt_io_size,
+				   &opt_io_size);
 	if (!err && opt_io_size)
 		blk_queue_io_opt(q, blk_size * opt_io_size);
 
@@ -857,8 +664,6 @@
 	blk_cleanup_queue(vblk->disk->queue);
 out_put_disk:
 	put_disk(vblk->disk);
-out_mempool:
-	mempool_destroy(vblk->pool);
 out_free_vq:
 	vdev->config->del_vqs(vdev);
 out_free_vblk:
@@ -890,7 +695,6 @@
 
 	refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
 	put_disk(vblk->disk);
-	mempool_destroy(vblk->pool);
 	vdev->config->del_vqs(vdev);
 	kfree(vblk);
 
@@ -899,7 +703,7 @@
 		ida_simple_remove(&vd_index_ida, index);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtblk_freeze(struct virtio_device *vdev)
 {
 	struct virtio_blk *vblk = vdev->priv;
@@ -914,10 +718,7 @@
 
 	flush_work(&vblk->config_work);
 
-	spin_lock_irq(vblk->disk->queue->queue_lock);
-	blk_stop_queue(vblk->disk->queue);
-	spin_unlock_irq(vblk->disk->queue->queue_lock);
-	blk_sync_queue(vblk->disk->queue);
+	blk_mq_stop_hw_queues(vblk->disk->queue);
 
 	vdev->config->del_vqs(vdev);
 	return 0;
@@ -930,11 +731,9 @@
 
 	vblk->config_enable = true;
 	ret = init_vq(vdev->priv);
-	if (!ret) {
-		spin_lock_irq(vblk->disk->queue->queue_lock);
-		blk_start_queue(vblk->disk->queue);
-		spin_unlock_irq(vblk->disk->queue->queue_lock);
-	}
+	if (!ret)
+		blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+
 	return ret;
 }
 #endif
@@ -959,7 +758,7 @@
 	.probe			= virtblk_probe,
 	.remove			= virtblk_remove,
 	.config_changed		= virtblk_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 	.freeze			= virtblk_freeze,
 	.restore		= virtblk_restore,
 #endif
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index bf4b9d2..6620b73 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -887,6 +887,8 @@
 	unsigned long secure;
 	struct phys_req preq;
 
+	xen_blkif_get(blkif);
+
 	preq.sector_number = req->u.discard.sector_number;
 	preq.nr_sects      = req->u.discard.nr_sectors;
 
@@ -899,7 +901,6 @@
 	}
 	blkif->st_ds_req++;
 
-	xen_blkif_get(blkif);
 	secure = (blkif->vbd.discard_secure &&
 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
 		 BLKDEV_DISCARD_SECURE : 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a4660bb..432db1b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -121,7 +121,8 @@
 	struct work_struct work;
 	struct gnttab_free_callback callback;
 	struct blk_shadow shadow[BLK_RING_SIZE];
-	struct list_head persistent_gnts;
+	struct list_head grants;
+	struct list_head indirect_pages;
 	unsigned int persistent_gnts_c;
 	unsigned long shadow_free;
 	unsigned int feature_flush;
@@ -200,15 +201,17 @@
 		if (!gnt_list_entry)
 			goto out_of_memory;
 
-		granted_page = alloc_page(GFP_NOIO);
-		if (!granted_page) {
-			kfree(gnt_list_entry);
-			goto out_of_memory;
+		if (info->feature_persistent) {
+			granted_page = alloc_page(GFP_NOIO);
+			if (!granted_page) {
+				kfree(gnt_list_entry);
+				goto out_of_memory;
+			}
+			gnt_list_entry->pfn = page_to_pfn(granted_page);
 		}
 
-		gnt_list_entry->pfn = page_to_pfn(granted_page);
 		gnt_list_entry->gref = GRANT_INVALID_REF;
-		list_add(&gnt_list_entry->node, &info->persistent_gnts);
+		list_add(&gnt_list_entry->node, &info->grants);
 		i++;
 	}
 
@@ -216,9 +219,10 @@
 
 out_of_memory:
 	list_for_each_entry_safe(gnt_list_entry, n,
-	                         &info->persistent_gnts, node) {
+	                         &info->grants, node) {
 		list_del(&gnt_list_entry->node);
-		__free_page(pfn_to_page(gnt_list_entry->pfn));
+		if (info->feature_persistent)
+			__free_page(pfn_to_page(gnt_list_entry->pfn));
 		kfree(gnt_list_entry);
 		i--;
 	}
@@ -227,13 +231,14 @@
 }
 
 static struct grant *get_grant(grant_ref_t *gref_head,
+                               unsigned long pfn,
                                struct blkfront_info *info)
 {
 	struct grant *gnt_list_entry;
 	unsigned long buffer_mfn;
 
-	BUG_ON(list_empty(&info->persistent_gnts));
-	gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
+	BUG_ON(list_empty(&info->grants));
+	gnt_list_entry = list_first_entry(&info->grants, struct grant,
 	                                  node);
 	list_del(&gnt_list_entry->node);
 
@@ -245,6 +250,10 @@
 	/* Assign a gref to this page */
 	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
 	BUG_ON(gnt_list_entry->gref == -ENOSPC);
+	if (!info->feature_persistent) {
+		BUG_ON(!pfn);
+		gnt_list_entry->pfn = pfn;
+	}
 	buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
 	gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
 	                                info->xbdev->otherend_id,
@@ -400,10 +409,13 @@
 	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
 		return 1;
 
-	max_grefs = info->max_indirect_segments ?
-		    info->max_indirect_segments +
-		    INDIRECT_GREFS(info->max_indirect_segments) :
-		    BLKIF_MAX_SEGMENTS_PER_REQUEST;
+	max_grefs = req->nr_phys_segments;
+	if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
+		/*
+		 * If we are using indirect segments we need to account
+		 * for the indirect grefs used in the request.
+		 */
+		max_grefs += INDIRECT_GREFS(req->nr_phys_segments);
 
 	/* Check if we have enough grants to allocate a requests */
 	if (info->persistent_gnts_c < max_grefs) {
@@ -477,22 +489,34 @@
 
 			if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
 			    (i % SEGS_PER_INDIRECT_FRAME == 0)) {
+				unsigned long pfn;
+
 				if (segments)
 					kunmap_atomic(segments);
 
 				n = i / SEGS_PER_INDIRECT_FRAME;
-				gnt_list_entry = get_grant(&gref_head, info);
+				if (!info->feature_persistent) {
+					struct page *indirect_page;
+
+					/* Fetch a pre-allocated page to use for indirect grefs */
+					BUG_ON(list_empty(&info->indirect_pages));
+					indirect_page = list_first_entry(&info->indirect_pages,
+					                                 struct page, lru);
+					list_del(&indirect_page->lru);
+					pfn = page_to_pfn(indirect_page);
+				}
+				gnt_list_entry = get_grant(&gref_head, pfn, info);
 				info->shadow[id].indirect_grants[n] = gnt_list_entry;
 				segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
 				ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
 			}
 
-			gnt_list_entry = get_grant(&gref_head, info);
+			gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
 			ref = gnt_list_entry->gref;
 
 			info->shadow[id].grants_used[i] = gnt_list_entry;
 
-			if (rq_data_dir(req)) {
+			if (rq_data_dir(req) && info->feature_persistent) {
 				char *bvec_data;
 				void *shared_data;
 
@@ -904,21 +928,36 @@
 		blk_stop_queue(info->rq);
 
 	/* Remove all persistent grants */
-	if (!list_empty(&info->persistent_gnts)) {
+	if (!list_empty(&info->grants)) {
 		list_for_each_entry_safe(persistent_gnt, n,
-		                         &info->persistent_gnts, node) {
+		                         &info->grants, node) {
 			list_del(&persistent_gnt->node);
 			if (persistent_gnt->gref != GRANT_INVALID_REF) {
 				gnttab_end_foreign_access(persistent_gnt->gref,
 				                          0, 0UL);
 				info->persistent_gnts_c--;
 			}
-			__free_page(pfn_to_page(persistent_gnt->pfn));
+			if (info->feature_persistent)
+				__free_page(pfn_to_page(persistent_gnt->pfn));
 			kfree(persistent_gnt);
 		}
 	}
 	BUG_ON(info->persistent_gnts_c != 0);
 
+	/*
+	 * Remove indirect pages, this only happens when using indirect
+	 * descriptors but not persistent grants
+	 */
+	if (!list_empty(&info->indirect_pages)) {
+		struct page *indirect_page, *n;
+
+		BUG_ON(info->feature_persistent);
+		list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
+			list_del(&indirect_page->lru);
+			__free_page(indirect_page);
+		}
+	}
+
 	for (i = 0; i < BLK_RING_SIZE; i++) {
 		/*
 		 * Clear persistent grants present in requests already
@@ -933,7 +972,8 @@
 		for (j = 0; j < segs; j++) {
 			persistent_gnt = info->shadow[i].grants_used[j];
 			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
-			__free_page(pfn_to_page(persistent_gnt->pfn));
+			if (info->feature_persistent)
+				__free_page(pfn_to_page(persistent_gnt->pfn));
 			kfree(persistent_gnt);
 		}
 
@@ -992,7 +1032,7 @@
 	nseg = s->req.operation == BLKIF_OP_INDIRECT ?
 		s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
 
-	if (bret->operation == BLKIF_OP_READ) {
+	if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
 		/*
 		 * Copy the data received from the backend into the bvec.
 		 * Since bv_offset can be different than 0, and bv_len different
@@ -1013,13 +1053,51 @@
 	}
 	/* Add the persistent grant into the list of free grants */
 	for (i = 0; i < nseg; i++) {
-		list_add(&s->grants_used[i]->node, &info->persistent_gnts);
-		info->persistent_gnts_c++;
+		if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
+			/*
+			 * If the grant is still mapped by the backend (the
+			 * backend has chosen to make this grant persistent)
+			 * we add it at the head of the list, so it will be
+			 * reused first.
+			 */
+			if (!info->feature_persistent)
+				pr_alert_ratelimited("backed has not unmapped grant: %u\n",
+						     s->grants_used[i]->gref);
+			list_add(&s->grants_used[i]->node, &info->grants);
+			info->persistent_gnts_c++;
+		} else {
+			/*
+			 * If the grant is not mapped by the backend we end the
+			 * foreign access and add it to the tail of the list,
+			 * so it will not be picked again unless we run out of
+			 * persistent grants.
+			 */
+			gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
+			s->grants_used[i]->gref = GRANT_INVALID_REF;
+			list_add_tail(&s->grants_used[i]->node, &info->grants);
+		}
 	}
 	if (s->req.operation == BLKIF_OP_INDIRECT) {
 		for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
-			list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
-			info->persistent_gnts_c++;
+			if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
+				if (!info->feature_persistent)
+					pr_alert_ratelimited("backed has not unmapped grant: %u\n",
+							     s->indirect_grants[i]->gref);
+				list_add(&s->indirect_grants[i]->node, &info->grants);
+				info->persistent_gnts_c++;
+			} else {
+				struct page *indirect_page;
+
+				gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
+				/*
+				 * Add the used indirect page back to the list of
+				 * available pages for indirect grefs.
+				 */
+				indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+				list_add(&indirect_page->lru, &info->indirect_pages);
+				s->indirect_grants[i]->gref = GRANT_INVALID_REF;
+				list_add_tail(&s->indirect_grants[i]->node, &info->grants);
+			}
 		}
 	}
 }
@@ -1313,7 +1391,8 @@
 	spin_lock_init(&info->io_lock);
 	info->xbdev = dev;
 	info->vdevice = vdevice;
-	INIT_LIST_HEAD(&info->persistent_gnts);
+	INIT_LIST_HEAD(&info->grants);
+	INIT_LIST_HEAD(&info->indirect_pages);
 	info->persistent_gnts_c = 0;
 	info->connected = BLKIF_STATE_DISCONNECTED;
 	INIT_WORK(&info->work, blkif_restart_queue);
@@ -1336,57 +1415,6 @@
 	return 0;
 }
 
-/*
- * This is a clone of md_trim_bio, used to split a bio into smaller ones
- */
-static void trim_bio(struct bio *bio, int offset, int size)
-{
-	/* 'bio' is a cloned bio which we need to trim to match
-	 * the given offset and size.
-	 * This requires adjusting bi_sector, bi_size, and bi_io_vec
-	 */
-	int i;
-	struct bio_vec *bvec;
-	int sofar = 0;
-
-	size <<= 9;
-	if (offset == 0 && size == bio->bi_size)
-		return;
-
-	bio->bi_sector += offset;
-	bio->bi_size = size;
-	offset <<= 9;
-	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
-
-	while (bio->bi_idx < bio->bi_vcnt &&
-	       bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
-		/* remove this whole bio_vec */
-		offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
-		bio->bi_idx++;
-	}
-	if (bio->bi_idx < bio->bi_vcnt) {
-		bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
-		bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
-	}
-	/* avoid any complications with bi_idx being non-zero*/
-	if (bio->bi_idx) {
-		memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
-			(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
-		bio->bi_vcnt -= bio->bi_idx;
-		bio->bi_idx = 0;
-	}
-	/* Make sure vcnt and last bv are not too big */
-	bio_for_each_segment(bvec, bio, i) {
-		if (sofar + bvec->bv_len > size)
-			bvec->bv_len = size - sofar;
-		if (bvec->bv_len == 0) {
-			bio->bi_vcnt = i;
-			break;
-		}
-		sofar += bvec->bv_len;
-	}
-}
-
 static void split_bio_end(struct bio *bio, int error)
 {
 	struct split_bio *split_bio = bio->bi_private;
@@ -1522,7 +1550,7 @@
 					   (unsigned int)(bio->bi_size >> 9) - offset);
 				cloned_bio = bio_clone(bio, GFP_NOIO);
 				BUG_ON(cloned_bio == NULL);
-				trim_bio(cloned_bio, offset, size);
+				bio_trim(cloned_bio, offset, size);
 				cloned_bio->bi_private = split_bio;
 				cloned_bio->bi_end_io = split_bio_end;
 				submit_bio(cloned_bio->bi_rw, cloned_bio);
@@ -1660,6 +1688,23 @@
 	if (err)
 		goto out_of_memory;
 
+	if (!info->feature_persistent && info->max_indirect_segments) {
+		/*
+		 * We are using indirect descriptors but not persistent
+		 * grants, we need to allocate a set of pages that can be
+		 * used for mapping indirect grefs
+		 */
+		int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
+
+		BUG_ON(!list_empty(&info->indirect_pages));
+		for (i = 0; i < num; i++) {
+			struct page *indirect_page = alloc_page(GFP_NOIO);
+			if (!indirect_page)
+				goto out_of_memory;
+			list_add(&indirect_page->lru, &info->indirect_pages);
+		}
+	}
+
 	for (i = 0; i < BLK_RING_SIZE; i++) {
 		info->shadow[i].grants_used = kzalloc(
 			sizeof(info->shadow[i].grants_used[0]) * segs,
@@ -1690,6 +1735,13 @@
 		kfree(info->shadow[i].indirect_grants);
 		info->shadow[i].indirect_grants = NULL;
 	}
+	if (!list_empty(&info->indirect_pages)) {
+		struct page *indirect_page, *n;
+		list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
+			list_del(&indirect_page->lru);
+			__free_page(indirect_page);
+		}
+	}
 	return -ENOMEM;
 }
 
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 4afae20..9fe8a87 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -30,3 +30,5 @@
 hci_uart-$(CONFIG_BT_HCIUART_ATH3K)	+= hci_ath.o
 hci_uart-$(CONFIG_BT_HCIUART_3WIRE)	+= hci_h5.o
 hci_uart-objs				:= $(hci_uart-y)
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 0a327f4..6bfc1bb 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -57,7 +57,7 @@
 	unsigned char	reserved[0x07];
 };
 
-static struct usb_device_id ath3k_table[] = {
+static const struct usb_device_id ath3k_table[] = {
 	/* Atheros AR3011 */
 	{ USB_DEVICE(0x0CF3, 0x3000) },
 
@@ -112,7 +112,7 @@
 #define BTUSB_ATH3012		0x80
 /* This table is to load patch and sysconfig files
  * for AR3012 */
-static struct usb_device_id ath3k_blist_tbl[] = {
+static const struct usb_device_id ath3k_blist_tbl[] = {
 
 	/* Atheros AR3012 with sflash firmware*/
 	{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 995aee9..3138699 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -42,7 +42,7 @@
 
 static struct usb_driver bfusb_driver;
 
-static struct usb_device_id bfusb_table[] = {
+static const struct usb_device_id bfusb_table[] = {
 	/* AVM BlueFRITZ! USB */
 	{ USB_DEVICE(0x057c, 0x2200) },
 
@@ -318,7 +318,6 @@
 			return -ENOMEM;
 		}
 
-		skb->dev = (void *) data->hdev;
 		bt_cb(skb)->pkt_type = pkt_type;
 
 		data->reassembly = skb;
@@ -333,7 +332,7 @@
 		memcpy(skb_put(data->reassembly, len), buf, len);
 
 	if (hdr & 0x08) {
-		hci_recv_frame(data->reassembly);
+		hci_recv_frame(data->hdev, data->reassembly);
 		data->reassembly = NULL;
 	}
 
@@ -465,26 +464,18 @@
 	return 0;
 }
 
-static int bfusb_send_frame(struct sk_buff *skb)
+static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-	struct bfusb_data *data;
+	struct bfusb_data *data = hci_get_drvdata(hdev);
 	struct sk_buff *nskb;
 	unsigned char buf[3];
 	int sent = 0, size, count;
 
 	BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
 
-	if (!hdev) {
-		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-		return -ENODEV;
-	}
-
 	if (!test_bit(HCI_RUNNING, &hdev->flags))
 		return -EBUSY;
 
-	data = hci_get_drvdata(hdev);
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
@@ -544,11 +535,6 @@
 	return 0;
 }
 
-static int bfusb_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
-	return -ENOIOCTLCMD;
-}
-
 static int bfusb_load_firmware(struct bfusb_data *data,
 			       const unsigned char *firmware, int count)
 {
@@ -699,11 +685,10 @@
 	hci_set_drvdata(hdev, data);
 	SET_HCIDEV_DEV(hdev, &intf->dev);
 
-	hdev->open     = bfusb_open;
-	hdev->close    = bfusb_close;
-	hdev->flush    = bfusb_flush;
-	hdev->send     = bfusb_send_frame;
-	hdev->ioctl    = bfusb_ioctl;
+	hdev->open  = bfusb_open;
+	hdev->close = bfusb_close;
+	hdev->flush = bfusb_flush;
+	hdev->send  = bfusb_send_frame;
 
 	if (hci_register_dev(hdev) < 0) {
 		BT_ERR("Can't register HCI device");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 6c3e3d4..57427de 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -399,7 +399,6 @@
 
 		if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
 
-			info->rx_skb->dev = (void *) info->hdev;
 			bt_cb(info->rx_skb)->pkt_type = buf[i];
 
 			switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -477,7 +476,7 @@
 					break;
 
 				case RECV_WAIT_DATA:
-					hci_recv_frame(info->rx_skb);
+					hci_recv_frame(info->hdev, info->rx_skb);
 					info->rx_skb = NULL;
 					break;
 
@@ -659,17 +658,9 @@
 }
 
 
-static int bluecard_hci_send_frame(struct sk_buff *skb)
+static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	bluecard_info_t *info;
-	struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
-
-	if (!hdev) {
-		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-		return -ENODEV;
-	}
-
-	info = hci_get_drvdata(hdev);
+	bluecard_info_t *info = hci_get_drvdata(hdev);
 
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
@@ -693,12 +684,6 @@
 }
 
 
-static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
-	return -ENOIOCTLCMD;
-}
-
-
 
 /* ======================== Card services HCI interaction ======================== */
 
@@ -734,11 +719,10 @@
 	hci_set_drvdata(hdev, info);
 	SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
 
-	hdev->open     = bluecard_hci_open;
-	hdev->close    = bluecard_hci_close;
-	hdev->flush    = bluecard_hci_flush;
-	hdev->send     = bluecard_hci_send_frame;
-	hdev->ioctl    = bluecard_hci_ioctl;
+	hdev->open  = bluecard_hci_open;
+	hdev->close = bluecard_hci_close;
+	hdev->flush = bluecard_hci_flush;
+	hdev->send  = bluecard_hci_send_frame;
 
 	id = inb(iobase + 0x30);
 
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 2fe4a80..8a31991 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -37,7 +37,7 @@
 
 #define VERSION "0.10"
 
-static struct usb_device_id bpa10x_table[] = {
+static const struct usb_device_id bpa10x_table[] = {
 	/* Tektronix BPA 100/105 (Digianswer) */
 	{ USB_DEVICE(0x08fd, 0x0002) },
 
@@ -129,8 +129,6 @@
 				return -ENOMEM;
 			}
 
-			skb->dev = (void *) hdev;
-
 			data->rx_skb[queue] = skb;
 
 			scb = (void *) skb->cb;
@@ -155,7 +153,7 @@
 			data->rx_skb[queue] = NULL;
 
 			bt_cb(skb)->pkt_type = scb->type;
-			hci_recv_frame(skb);
+			hci_recv_frame(hdev, skb);
 		}
 
 		count -= len; buf += len;
@@ -352,9 +350,8 @@
 	return 0;
 }
 
-static int bpa10x_send_frame(struct sk_buff *skb)
+static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
 	struct bpa10x_data *data = hci_get_drvdata(hdev);
 	struct usb_ctrlrequest *dr;
 	struct urb *urb;
@@ -366,6 +363,8 @@
 	if (!test_bit(HCI_RUNNING, &hdev->flags))
 		return -EBUSY;
 
+	skb->dev = (void *) hdev;
+
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
 	if (!urb)
 		return -ENOMEM;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index a1aaa3b..73d8799 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -247,7 +247,6 @@
 
 		if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
 
-			info->rx_skb->dev = (void *) info->hdev;
 			bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
 			inb(iobase + DATA_H);
 			//printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
@@ -318,7 +317,7 @@
 					break;
 
 				case RECV_WAIT_DATA:
-					hci_recv_frame(info->rx_skb);
+					hci_recv_frame(info->hdev, info->rx_skb);
 					info->rx_skb = NULL;
 					break;
 
@@ -416,19 +415,11 @@
 }
 
 
-static int bt3c_hci_send_frame(struct sk_buff *skb)
+static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	bt3c_info_t *info;
-	struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
+	bt3c_info_t *info = hci_get_drvdata(hdev);
 	unsigned long flags;
 
-	if (!hdev) {
-		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-		return -ENODEV;
-	}
-
-	info = hci_get_drvdata(hdev);
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
@@ -455,12 +446,6 @@
 }
 
 
-static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
-	return -ENOIOCTLCMD;
-}
-
-
 
 /* ======================== Card services HCI interaction ======================== */
 
@@ -577,11 +562,10 @@
 	hci_set_drvdata(hdev, info);
 	SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
 
-	hdev->open     = bt3c_hci_open;
-	hdev->close    = bt3c_hci_close;
-	hdev->flush    = bt3c_hci_flush;
-	hdev->send     = bt3c_hci_send_frame;
-	hdev->ioctl    = bt3c_hci_ioctl;
+	hdev->open  = bt3c_hci_open;
+	hdev->close = bt3c_hci_close;
+	hdev->flush = bt3c_hci_flush;
+	hdev->send  = bt3c_hci_send_frame;
 
 	/* Load firmware */
 	err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 27068d1..f9d1833 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -23,6 +23,8 @@
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include <net/bluetooth/bluetooth.h>
+#include <linux/ctype.h>
+#include <linux/firmware.h>
 
 #define BTM_HEADER_LEN			4
 #define BTM_UPLD_SIZE			2312
@@ -41,6 +43,8 @@
 struct btmrvl_device {
 	void *card;
 	struct hci_dev *hcidev;
+	struct device *dev;
+	const char *cal_data;
 
 	u8 dev_type;
 
@@ -91,6 +95,7 @@
 #define BT_CMD_HOST_SLEEP_CONFIG	0x59
 #define BT_CMD_HOST_SLEEP_ENABLE	0x5A
 #define BT_CMD_MODULE_CFG_REQ		0x5B
+#define BT_CMD_LOAD_CONFIG_DATA		0x61
 
 /* Sub-commands: Module Bringup/Shutdown Request/Response */
 #define MODULE_BRINGUP_REQ		0xF1
@@ -116,11 +121,8 @@
 #define PS_SLEEP			0x01
 #define PS_AWAKE			0x00
 
-struct btmrvl_cmd {
-	__le16 ocf_ogf;
-	u8 length;
-	u8 data[4];
-} __packed;
+#define BT_CMD_DATA_SIZE		32
+#define BT_CAL_DATA_SIZE		28
 
 struct btmrvl_event {
 	u8 ec;		/* event counter */
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 9a9f518..5cf31c4 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -57,8 +57,7 @@
 		ocf = hci_opcode_ocf(opcode);
 		ogf = hci_opcode_ogf(opcode);
 
-		if (ocf == BT_CMD_MODULE_CFG_REQ &&
-					priv->btmrvl_dev.sendcmdflag) {
+		if (priv->btmrvl_dev.sendcmdflag) {
 			priv->btmrvl_dev.sendcmdflag = false;
 			priv->adapter->cmd_complete = true;
 			wake_up_interruptible(&priv->adapter->cmd_wait_q);
@@ -116,7 +115,6 @@
 			adapter->hs_state = HS_ACTIVATED;
 			if (adapter->psmode)
 				adapter->ps_state = PS_SLEEP;
-			wake_up_interruptible(&adapter->cmd_wait_q);
 			BT_DBG("HS ACTIVATED!");
 		} else {
 			BT_DBG("HS Enable failed");
@@ -168,45 +166,50 @@
 }
 EXPORT_SYMBOL_GPL(btmrvl_process_event);
 
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
+				const void *param, u8 len)
 {
 	struct sk_buff *skb;
-	struct btmrvl_cmd *cmd;
-	int ret = 0;
+	struct hci_command_hdr *hdr;
 
-	skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
+	skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
 	if (skb == NULL) {
 		BT_ERR("No free skb");
 		return -ENOMEM;
 	}
 
-	cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-	cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ));
-	cmd->length = 1;
-	cmd->data[0] = subcmd;
+	hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
+	hdr->opcode = cpu_to_le16(hci_opcode_pack(OGF, cmd_no));
+	hdr->plen = len;
+
+	if (len)
+		memcpy(skb_put(skb, len), param, len);
 
 	bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
 
-	skb->dev = (void *) priv->btmrvl_dev.hcidev;
 	skb_queue_head(&priv->adapter->tx_queue, skb);
 
 	priv->btmrvl_dev.sendcmdflag = true;
 
 	priv->adapter->cmd_complete = false;
 
-	BT_DBG("Queue module cfg Command");
-
 	wake_up_interruptible(&priv->main_thread.wait_q);
 
 	if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
 				priv->adapter->cmd_complete,
-				msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) {
-		ret = -ETIMEDOUT;
-		BT_ERR("module_cfg_cmd(%x): timeout: %d",
-					subcmd, priv->btmrvl_dev.sendcmdflag);
-	}
+				msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
+		return -ETIMEDOUT;
 
-	BT_DBG("module cfg Command done");
+	return 0;
+}
+
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+{
+	int ret;
+
+	ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1);
+	if (ret)
+		BT_ERR("module_cfg_cmd(%x) failed\n", subcmd);
 
 	return ret;
 }
@@ -214,61 +217,36 @@
 
 int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
 {
-	struct sk_buff *skb;
-	struct btmrvl_cmd *cmd;
+	int ret;
+	u8 param[2];
 
-	skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
-	if (!skb) {
-		BT_ERR("No free skb");
-		return -ENOMEM;
-	}
+	param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+	param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
 
-	cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-	cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
-						   BT_CMD_HOST_SLEEP_CONFIG));
-	cmd->length = 2;
-	cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
-	cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
+	BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x",
+	       param[0], param[1]);
 
-	bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
+	ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2);
+	if (ret)
+		BT_ERR("HSCFG command failed\n");
 
-	skb->dev = (void *) priv->btmrvl_dev.hcidev;
-	skb_queue_head(&priv->adapter->tx_queue, skb);
-
-	BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
-	       cmd->data[1]);
-
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
 
 int btmrvl_enable_ps(struct btmrvl_private *priv)
 {
-	struct sk_buff *skb;
-	struct btmrvl_cmd *cmd;
-
-	skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
-	if (skb == NULL) {
-		BT_ERR("No free skb");
-		return -ENOMEM;
-	}
-
-	cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-	cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
-					BT_CMD_AUTO_SLEEP_MODE));
-	cmd->length = 1;
+	int ret;
+	u8 param;
 
 	if (priv->btmrvl_dev.psmode)
-		cmd->data[0] = BT_PS_ENABLE;
+		param = BT_PS_ENABLE;
 	else
-		cmd->data[0] = BT_PS_DISABLE;
+		param = BT_PS_DISABLE;
 
-	bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
-	skb->dev = (void *) priv->btmrvl_dev.hcidev;
-	skb_queue_head(&priv->adapter->tx_queue, skb);
-
-	BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
+	ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1);
+	if (ret)
+		BT_ERR("PSMODE command failed\n");
 
 	return 0;
 }
@@ -276,37 +254,11 @@
 
 int btmrvl_enable_hs(struct btmrvl_private *priv)
 {
-	struct sk_buff *skb;
-	struct btmrvl_cmd *cmd;
-	int ret = 0;
+	int ret;
 
-	skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
-	if (skb == NULL) {
-		BT_ERR("No free skb");
-		return -ENOMEM;
-	}
-
-	cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-	cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE));
-	cmd->length = 0;
-
-	bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
-	skb->dev = (void *) priv->btmrvl_dev.hcidev;
-	skb_queue_head(&priv->adapter->tx_queue, skb);
-
-	BT_DBG("Queue hs enable Command");
-
-	wake_up_interruptible(&priv->main_thread.wait_q);
-
-	if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
-			priv->adapter->hs_state,
-			msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) {
-		ret = -ETIMEDOUT;
-		BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state,
-						priv->adapter->ps_state,
-						priv->adapter->wakeup_tries);
-	}
+	ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
+	if (ret)
+		BT_ERR("Host sleep enable command failed\n");
 
 	return ret;
 }
@@ -403,26 +355,12 @@
 	priv->adapter = NULL;
 }
 
-static int btmrvl_ioctl(struct hci_dev *hdev,
-				unsigned int cmd, unsigned long arg)
+static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	return -ENOIOCTLCMD;
-}
-
-static int btmrvl_send_frame(struct sk_buff *skb)
-{
-	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-	struct btmrvl_private *priv = NULL;
+	struct btmrvl_private *priv = hci_get_drvdata(hdev);
 
 	BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
 
-	if (!hdev) {
-		BT_ERR("Frame for unknown HCI device");
-		return -ENODEV;
-	}
-
-	priv = hci_get_drvdata(hdev);
-
 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
 		BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
 		print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
@@ -480,6 +418,137 @@
 }
 
 /*
+ * This function parses provided calibration data input. It should contain
+ * hex bytes separated by space or new line character. Here is an example.
+ * 00 1C 01 37 FF FF FF FF 02 04 7F 01
+ * CE BA 00 00 00 2D C6 C0 00 00 00 00
+ * 00 F0 00 00
+ */
+static int btmrvl_parse_cal_cfg(const u8 *src, u32 len, u8 *dst, u32 dst_size)
+{
+	const u8 *s = src;
+	u8 *d = dst;
+	int ret;
+	u8 tmp[3];
+
+	tmp[2] = '\0';
+	while ((s - src) <= len - 2) {
+		if (isspace(*s)) {
+			s++;
+			continue;
+		}
+
+		if (isxdigit(*s)) {
+			if ((d - dst) >= dst_size) {
+				BT_ERR("calibration data file too big!!!");
+				return -EINVAL;
+			}
+
+			memcpy(tmp, s, 2);
+
+			ret = kstrtou8(tmp, 16, d++);
+			if (ret < 0)
+				return ret;
+
+			s += 2;
+		} else {
+			return -EINVAL;
+		}
+	}
+	if (d == dst)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int btmrvl_load_cal_data(struct btmrvl_private *priv,
+				u8 *config_data)
+{
+	int i, ret;
+	u8 data[BT_CMD_DATA_SIZE];
+
+	data[0] = 0x00;
+	data[1] = 0x00;
+	data[2] = 0x00;
+	data[3] = BT_CMD_DATA_SIZE - 4;
+
+	/* Swap cal-data bytes. Each four bytes are swapped. Considering 4
+	 * byte SDIO header offset, mapping of input and output bytes will be
+	 * {3, 2, 1, 0} -> {0+4, 1+4, 2+4, 3+4},
+	 * {7, 6, 5, 4} -> {4+4, 5+4, 6+4, 7+4} */
+	for (i = 4; i < BT_CMD_DATA_SIZE; i++)
+		data[i] = config_data[(i / 4) * 8 - 1 - i];
+
+	print_hex_dump_bytes("Calibration data: ",
+			     DUMP_PREFIX_OFFSET, data, BT_CMD_DATA_SIZE);
+
+	ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
+				   BT_CMD_DATA_SIZE);
+	if (ret)
+		BT_ERR("Failed to download caibration data\n");
+
+	return 0;
+}
+
+static int
+btmrvl_process_cal_cfg(struct btmrvl_private *priv, u8 *data, u32 size)
+{
+	u8 cal_data[BT_CAL_DATA_SIZE];
+	int ret;
+
+	ret = btmrvl_parse_cal_cfg(data, size, cal_data, sizeof(cal_data));
+	if (ret)
+		return ret;
+
+	ret = btmrvl_load_cal_data(priv, cal_data);
+	if (ret) {
+		BT_ERR("Fail to load calibrate data");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int btmrvl_cal_data_config(struct btmrvl_private *priv)
+{
+	const struct firmware *cfg;
+	int ret;
+	const char *cal_data = priv->btmrvl_dev.cal_data;
+
+	if (!cal_data)
+		return 0;
+
+	ret = request_firmware(&cfg, cal_data, priv->btmrvl_dev.dev);
+	if (ret < 0) {
+		BT_DBG("Failed to get %s file, skipping cal data download",
+		       cal_data);
+		return 0;
+	}
+
+	ret = btmrvl_process_cal_cfg(priv, (u8 *)cfg->data, cfg->size);
+	release_firmware(cfg);
+	return ret;
+}
+
+static int btmrvl_setup(struct hci_dev *hdev)
+{
+	struct btmrvl_private *priv = hci_get_drvdata(hdev);
+
+	btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+
+	if (btmrvl_cal_data_config(priv))
+		BT_ERR("Set cal data failed");
+
+	priv->btmrvl_dev.psmode = 1;
+	btmrvl_enable_ps(priv);
+
+	priv->btmrvl_dev.gpio_gap = 0xffff;
+	btmrvl_send_hscfg_cmd(priv);
+
+	return 0;
+}
+
+/*
  * This function handles the event generated by firmware, rx data
  * received from firmware, and tx data sent from kernel.
  */
@@ -566,14 +635,12 @@
 	priv->btmrvl_dev.hcidev = hdev;
 	hci_set_drvdata(hdev, priv);
 
-	hdev->bus = HCI_SDIO;
-	hdev->open = btmrvl_open;
+	hdev->bus   = HCI_SDIO;
+	hdev->open  = btmrvl_open;
 	hdev->close = btmrvl_close;
 	hdev->flush = btmrvl_flush;
-	hdev->send = btmrvl_send_frame;
-	hdev->ioctl = btmrvl_ioctl;
-
-	btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+	hdev->send  = btmrvl_send_frame;
+	hdev->setup = btmrvl_setup;
 
 	hdev->dev_type = priv->btmrvl_dev.dev_type;
 
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 00da6df..fabcf5b 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -18,7 +18,6 @@
  * this warranty disclaimer.
  **/
 
-#include <linux/firmware.h>
 #include <linux/slab.h>
 
 #include <linux/mmc/sdio_ids.h>
@@ -102,6 +101,7 @@
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
 	.helper		= "mrvl/sd8688_helper.bin",
 	.firmware	= "mrvl/sd8688.bin",
+	.cal_data	= NULL,
 	.reg		= &btmrvl_reg_8688,
 	.sd_blksz_fw_dl	= 64,
 };
@@ -109,6 +109,7 @@
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
 	.helper		= NULL,
 	.firmware	= "mrvl/sd8787_uapsta.bin",
+	.cal_data	= NULL,
 	.reg		= &btmrvl_reg_87xx,
 	.sd_blksz_fw_dl	= 256,
 };
@@ -116,6 +117,7 @@
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
 	.helper		= NULL,
 	.firmware	= "mrvl/sd8797_uapsta.bin",
+	.cal_data	= "mrvl/sd8797_caldata.conf",
 	.reg		= &btmrvl_reg_87xx,
 	.sd_blksz_fw_dl	= 256,
 };
@@ -123,6 +125,7 @@
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
 	.helper		= NULL,
 	.firmware	= "mrvl/sd8897_uapsta.bin",
+	.cal_data	= NULL,
 	.reg		= &btmrvl_reg_88xx,
 	.sd_blksz_fw_dl	= 256,
 };
@@ -597,15 +600,14 @@
 	case HCI_SCODATA_PKT:
 	case HCI_EVENT_PKT:
 		bt_cb(skb)->pkt_type = type;
-		skb->dev = (void *)hdev;
 		skb_put(skb, buf_len);
 		skb_pull(skb, SDIO_HEADER_LEN);
 
 		if (type == HCI_EVENT_PKT) {
 			if (btmrvl_check_evtpkt(priv, skb))
-				hci_recv_frame(skb);
+				hci_recv_frame(hdev, skb);
 		} else {
-			hci_recv_frame(skb);
+			hci_recv_frame(hdev, skb);
 		}
 
 		hdev->stat.byte_rx += buf_len;
@@ -613,12 +615,11 @@
 
 	case MRVL_VENDOR_PKT:
 		bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
-		skb->dev = (void *)hdev;
 		skb_put(skb, buf_len);
 		skb_pull(skb, SDIO_HEADER_LEN);
 
 		if (btmrvl_process_event(priv, skb))
-			hci_recv_frame(skb);
+			hci_recv_frame(hdev, skb);
 
 		hdev->stat.byte_rx += buf_len;
 		break;
@@ -1006,6 +1007,7 @@
 		struct btmrvl_sdio_device *data = (void *) id->driver_data;
 		card->helper = data->helper;
 		card->firmware = data->firmware;
+		card->cal_data = data->cal_data;
 		card->reg = data->reg;
 		card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
 	}
@@ -1034,6 +1036,8 @@
 	}
 
 	card->priv = priv;
+	priv->btmrvl_dev.dev = &card->func->dev;
+	priv->btmrvl_dev.cal_data = card->cal_data;
 
 	/* Initialize the interface specific function pointers */
 	priv->hw_host_to_card = btmrvl_sdio_host_to_card;
@@ -1046,12 +1050,6 @@
 		goto disable_host_int;
 	}
 
-	priv->btmrvl_dev.psmode = 1;
-	btmrvl_enable_ps(priv);
-
-	priv->btmrvl_dev.gpio_gap = 0xffff;
-	btmrvl_send_hscfg_cmd(priv);
-
 	return 0;
 
 disable_host_int:
@@ -1222,4 +1220,5 @@
 MODULE_FIRMWARE("mrvl/sd8688.bin");
 MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
 MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_caldata.conf");
 MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 43d35a6..6872d9e 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -85,6 +85,7 @@
 	u32 ioport;
 	const char *helper;
 	const char *firmware;
+	const char *cal_data;
 	const struct btmrvl_sdio_card_reg *reg;
 	u16 sd_blksz_fw_dl;
 	u8 rx_unit;
@@ -94,6 +95,7 @@
 struct btmrvl_sdio_device {
 	const char *helper;
 	const char *firmware;
+	const char *cal_data;
 	const struct btmrvl_sdio_card_reg *reg;
 	u16 sd_blksz_fw_dl;
 };
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 4a99097..b61440a 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -157,10 +157,9 @@
 
 	data->hdev->stat.byte_rx += len;
 
-	skb->dev = (void *) data->hdev;
 	bt_cb(skb)->pkt_type = hdr[3];
 
-	err = hci_recv_frame(skb);
+	err = hci_recv_frame(data->hdev, skb);
 	if (err < 0)
 		return err;
 
@@ -255,9 +254,8 @@
 	return 0;
 }
 
-static int btsdio_send_frame(struct sk_buff *skb)
+static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
 	struct btsdio_data *data = hci_get_drvdata(hdev);
 
 	BT_DBG("%s", hdev->name);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index beb262f..a03ecc2 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -198,7 +198,6 @@
 
 		if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
 
-			info->rx_skb->dev = (void *) info->hdev;
 			bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
 
 			switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -265,7 +264,7 @@
 					break;
 
 				case RECV_WAIT_DATA:
-					hci_recv_frame(info->rx_skb);
+					hci_recv_frame(info->hdev, info->rx_skb);
 					info->rx_skb = NULL;
 					break;
 
@@ -424,17 +423,9 @@
 }
 
 
-static int btuart_hci_send_frame(struct sk_buff *skb)
+static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	btuart_info_t *info;
-	struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
-
-	if (!hdev) {
-		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-		return -ENODEV;
-	}
-
-	info = hci_get_drvdata(hdev);
+	btuart_info_t *info = hci_get_drvdata(hdev);
 
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
@@ -458,12 +449,6 @@
 }
 
 
-static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
-	return -ENOIOCTLCMD;
-}
-
-
 
 /* ======================== Card services HCI interaction ======================== */
 
@@ -495,11 +480,10 @@
 	hci_set_drvdata(hdev, info);
 	SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
 
-	hdev->open     = btuart_hci_open;
-	hdev->close    = btuart_hci_close;
-	hdev->flush    = btuart_hci_flush;
-	hdev->send     = btuart_hci_send_frame;
-	hdev->ioctl    = btuart_hci_ioctl;
+	hdev->open  = btuart_hci_open;
+	hdev->close = btuart_hci_close;
+	hdev->flush = btuart_hci_flush;
+	hdev->send  = btuart_hci_send_frame;
 
 	spin_lock_irqsave(&(info->lock), flags);
 
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f3dfc0a..c0ff34f 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -50,7 +50,7 @@
 #define BTUSB_ATH3012		0x80
 #define BTUSB_INTEL		0x100
 
-static struct usb_device_id btusb_table[] = {
+static const struct usb_device_id btusb_table[] = {
 	/* Generic Bluetooth USB device */
 	{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
 
@@ -121,7 +121,7 @@
 
 MODULE_DEVICE_TABLE(usb, btusb_table);
 
-static struct usb_device_id blacklist_table[] = {
+static const struct usb_device_id blacklist_table[] = {
 	/* CSR BlueCore devices */
 	{ USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR },
 
@@ -716,9 +716,8 @@
 	return 0;
 }
 
-static int btusb_send_frame(struct sk_buff *skb)
+static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
 	struct btusb_data *data = hci_get_drvdata(hdev);
 	struct usb_ctrlrequest *dr;
 	struct urb *urb;
@@ -730,6 +729,8 @@
 	if (!test_bit(HCI_RUNNING, &hdev->flags))
 		return -EBUSY;
 
+	skb->dev = (void *) hdev;
+
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -774,7 +775,7 @@
 		break;
 
 	case HCI_SCODATA_PKT:
-		if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
+		if (!data->isoc_tx_ep || hci_conn_num(hdev, SCO_LINK) < 1)
 			return -ENODEV;
 
 		urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
@@ -833,8 +834,8 @@
 
 	BT_DBG("%s evt %d", hdev->name, evt);
 
-	if (hdev->conn_hash.sco_num != data->sco_num) {
-		data->sco_num = hdev->conn_hash.sco_num;
+	if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) {
+		data->sco_num = hci_conn_num(hdev, SCO_LINK);
 		schedule_work(&data->work);
 	}
 }
@@ -889,7 +890,7 @@
 	int new_alts;
 	int err;
 
-	if (hdev->conn_hash.sco_num > 0) {
+	if (data->sco_num > 0) {
 		if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
 			err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
 			if (err < 0) {
@@ -903,9 +904,9 @@
 
 		if (hdev->voice_setting & 0x0020) {
 			static const int alts[3] = { 2, 4, 5 };
-			new_alts = alts[hdev->conn_hash.sco_num - 1];
+			new_alts = alts[data->sco_num - 1];
 		} else {
-			new_alts = hdev->conn_hash.sco_num;
+			new_alts = data->sco_num;
 		}
 
 		if (data->isoc_altsetting != new_alts) {
@@ -1628,7 +1629,6 @@
 #ifdef CONFIG_PM
 	.suspend	= btusb_suspend,
 	.resume		= btusb_resume,
-	.reset_resume	= btusb_resume,
 #endif
 	.id_table	= btusb_table,
 	.supports_autosuspend = 1,
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 60abf59..f038dba 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -108,10 +108,8 @@
 		return -EFAULT;
 	}
 
-	skb->dev = (void *) lhst->hdev;
-
 	/* Forward skb to HCI core layer */
-	err = hci_recv_frame(skb);
+	err = hci_recv_frame(lhst->hdev, skb);
 	if (err < 0) {
 		BT_ERR("Unable to push skb to HCI core(%d)", err);
 		return err;
@@ -253,14 +251,11 @@
 	return err;
 }
 
-static int ti_st_send_frame(struct sk_buff *skb)
+static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev *hdev;
 	struct ti_st *hst;
 	long len;
 
-	hdev = (struct hci_dev *)skb->dev;
-
 	if (!test_bit(HCI_RUNNING, &hdev->flags))
 		return -EBUSY;
 
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 33f3a69..52eed1f 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -256,9 +256,8 @@
 				case 0x83:
 				case 0x84:
 					/* send frame to the HCI layer */
-					info->rx_skb->dev = (void *) info->hdev;
 					bt_cb(info->rx_skb)->pkt_type &= 0x0f;
-					hci_recv_frame(info->rx_skb);
+					hci_recv_frame(info->hdev, info->rx_skb);
 					break;
 				default:
 					/* unknown packet */
@@ -383,20 +382,12 @@
 }
 
 
-static int dtl1_hci_send_frame(struct sk_buff *skb)
+static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	dtl1_info_t *info;
-	struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
+	dtl1_info_t *info = hci_get_drvdata(hdev);
 	struct sk_buff *s;
 	nsh_t nsh;
 
-	if (!hdev) {
-		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-		return -ENODEV;
-	}
-
-	info = hci_get_drvdata(hdev);
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
@@ -438,12 +429,6 @@
 }
 
 
-static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd,  unsigned long arg)
-{
-	return -ENOIOCTLCMD;
-}
-
-
 
 /* ======================== Card services HCI interaction ======================== */
 
@@ -477,11 +462,10 @@
 	hci_set_drvdata(hdev, info);
 	SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
 
-	hdev->open     = dtl1_hci_open;
-	hdev->close    = dtl1_hci_close;
-	hdev->flush    = dtl1_hci_flush;
-	hdev->send     = dtl1_hci_send_frame;
-	hdev->ioctl    = dtl1_hci_ioctl;
+	hdev->open  = dtl1_hci_open;
+	hdev->close = dtl1_hci_close;
+	hdev->flush = dtl1_hci_flush;
+	hdev->send  = dtl1_hci_send_frame;
 
 	spin_lock_irqsave(&(info->lock), flags);
 
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 57e502e..0bc87f7 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -522,7 +522,7 @@
 				memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
 				bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
 
-				hci_recv_frame(bcsp->rx_skb);
+				hci_recv_frame(hu->hdev, bcsp->rx_skb);
 			} else {
 				BT_ERR ("Packet for unknown channel (%u %s)",
 					bcsp->rx_skb->data[1] & 0x0f,
@@ -536,7 +536,7 @@
 		/* Pull out BCSP hdr */
 		skb_pull(bcsp->rx_skb, 4);
 
-		hci_recv_frame(bcsp->rx_skb);
+		hci_recv_frame(hu->hdev, bcsp->rx_skb);
 	}
 
 	bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -655,7 +655,6 @@
 					bcsp->rx_count = 0;
 					return 0;
 				}
-				bcsp->rx_skb->dev = (void *) hu->hdev;
 				break;
 			}
 			break;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 8ae9f1e..7048a58 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -124,30 +124,6 @@
 	return 0;
 }
 
-static inline int h4_check_data_len(struct h4_struct *h4, int len)
-{
-	int room = skb_tailroom(h4->rx_skb);
-
-	BT_DBG("len %d room %d", len, room);
-
-	if (!len) {
-		hci_recv_frame(h4->rx_skb);
-	} else if (len > room) {
-		BT_ERR("Data length is too large");
-		kfree_skb(h4->rx_skb);
-	} else {
-		h4->rx_state = H4_W4_DATA;
-		h4->rx_count = len;
-		return len;
-	}
-
-	h4->rx_state = H4_W4_PACKET_TYPE;
-	h4->rx_skb   = NULL;
-	h4->rx_count = 0;
-
-	return 0;
-}
-
 /* Recv data */
 static int h4_recv(struct hci_uart *hu, void *data, int count)
 {
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index b6154d5..f6f4974 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -340,7 +340,7 @@
 		/* Remove Three-wire header */
 		skb_pull(h5->rx_skb, 4);
 
-		hci_recv_frame(h5->rx_skb);
+		hci_recv_frame(hu->hdev, h5->rx_skb);
 		h5->rx_skb = NULL;
 
 		break;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index bc68a44..6e06f6f 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -234,21 +234,13 @@
 }
 
 /* Send frames from HCI layer */
-static int hci_uart_send_frame(struct sk_buff *skb)
+static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev* hdev = (struct hci_dev *) skb->dev;
-	struct hci_uart *hu;
-
-	if (!hdev) {
-		BT_ERR("Frame for unknown device (hdev=NULL)");
-		return -ENODEV;
-	}
+	struct hci_uart *hu = hci_get_drvdata(hdev);
 
 	if (!test_bit(HCI_RUNNING, &hdev->flags))
 		return -EBUSY;
 
-	hu = hci_get_drvdata(hdev);
-
 	BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
 
 	hu->proto->enqueue(hu, skb);
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index cfc7679..69a90b1 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -110,7 +110,6 @@
 	/* prepare packet */
 	hcill_packet = (struct hcill_cmd *) skb_put(skb, 1);
 	hcill_packet->cmd = cmd;
-	skb->dev = (void *) hu->hdev;
 
 	/* send packet */
 	skb_queue_tail(&ll->txq, skb);
@@ -346,14 +345,14 @@
 	return 0;
 }
 
-static inline int ll_check_data_len(struct ll_struct *ll, int len)
+static inline int ll_check_data_len(struct hci_dev *hdev, struct ll_struct *ll, int len)
 {
 	int room = skb_tailroom(ll->rx_skb);
 
 	BT_DBG("len %d room %d", len, room);
 
 	if (!len) {
-		hci_recv_frame(ll->rx_skb);
+		hci_recv_frame(hdev, ll->rx_skb);
 	} else if (len > room) {
 		BT_ERR("Data length is too large");
 		kfree_skb(ll->rx_skb);
@@ -395,7 +394,7 @@
 			switch (ll->rx_state) {
 			case HCILL_W4_DATA:
 				BT_DBG("Complete data");
-				hci_recv_frame(ll->rx_skb);
+				hci_recv_frame(hu->hdev, ll->rx_skb);
 
 				ll->rx_state = HCILL_W4_PACKET_TYPE;
 				ll->rx_skb = NULL;
@@ -406,7 +405,7 @@
 
 				BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
 
-				ll_check_data_len(ll, eh->plen);
+				ll_check_data_len(hu->hdev, ll, eh->plen);
 				continue;
 
 			case HCILL_W4_ACL_HDR:
@@ -415,7 +414,7 @@
 
 				BT_DBG("ACL header: dlen %d", dlen);
 
-				ll_check_data_len(ll, dlen);
+				ll_check_data_len(hu->hdev, ll, dlen);
 				continue;
 
 			case HCILL_W4_SCO_HDR:
@@ -423,7 +422,7 @@
 
 				BT_DBG("SCO header: dlen %d", sh->dlen);
 
-				ll_check_data_len(ll, sh->dlen);
+				ll_check_data_len(hu->hdev, ll, sh->dlen);
 				continue;
 			}
 		}
@@ -494,7 +493,6 @@
 			return -ENOMEM;
 		}
 
-		ll->rx_skb->dev = (void *) hu->hdev;
 		bt_cb(ll->rx_skb)->pkt_type = type;
 	}
 
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index d8b7aed..7b16738 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -24,6 +24,7 @@
  */
 
 #include <linux/module.h>
+#include <asm/unaligned.h>
 
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -39,17 +40,17 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-#define VERSION "1.3"
+#define VERSION "1.4"
 
 static bool amp;
 
 struct vhci_data {
 	struct hci_dev *hdev;
 
-	unsigned long flags;
-
 	wait_queue_head_t read_wait;
 	struct sk_buff_head readq;
+
+	struct delayed_work open_timeout;
 };
 
 static int vhci_open_dev(struct hci_dev *hdev)
@@ -80,35 +81,73 @@
 	return 0;
 }
 
-static int vhci_send_frame(struct sk_buff *skb)
+static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev* hdev = (struct hci_dev *) skb->dev;
-	struct vhci_data *data;
-
-	if (!hdev) {
-		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-		return -ENODEV;
-	}
+	struct vhci_data *data = hci_get_drvdata(hdev);
 
 	if (!test_bit(HCI_RUNNING, &hdev->flags))
 		return -EBUSY;
 
-	data = hci_get_drvdata(hdev);
-
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
 	skb_queue_tail(&data->readq, skb);
 
 	wake_up_interruptible(&data->read_wait);
+	return 0;
+}
 
+static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
+{
+	struct hci_dev *hdev;
+	struct sk_buff *skb;
+
+	skb = bt_skb_alloc(4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	hdev = hci_alloc_dev();
+	if (!hdev) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	data->hdev = hdev;
+
+	hdev->bus = HCI_VIRTUAL;
+	hdev->dev_type = dev_type;
+	hci_set_drvdata(hdev, data);
+
+	hdev->open  = vhci_open_dev;
+	hdev->close = vhci_close_dev;
+	hdev->flush = vhci_flush;
+	hdev->send  = vhci_send_frame;
+
+	if (hci_register_dev(hdev) < 0) {
+		BT_ERR("Can't register HCI device");
+		hci_free_dev(hdev);
+		data->hdev = NULL;
+		kfree_skb(skb);
+		return -EBUSY;
+	}
+
+	bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
+
+	*skb_put(skb, 1) = 0xff;
+	*skb_put(skb, 1) = dev_type;
+	put_unaligned_le16(hdev->id, skb_put(skb, 2));
+	skb_queue_tail(&data->readq, skb);
+
+	wake_up_interruptible(&data->read_wait);
 	return 0;
 }
 
 static inline ssize_t vhci_get_user(struct vhci_data *data,
-					const char __user *buf, size_t count)
+				    const char __user *buf, size_t count)
 {
 	struct sk_buff *skb;
+	__u8 pkt_type, dev_type;
+	int ret;
 
-	if (count > HCI_MAX_FRAME_SIZE)
+	if (count < 2 || count > HCI_MAX_FRAME_SIZE)
 		return -EINVAL;
 
 	skb = bt_skb_alloc(count, GFP_KERNEL);
@@ -120,27 +159,69 @@
 		return -EFAULT;
 	}
 
-	skb->dev = (void *) data->hdev;
-	bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
+	pkt_type = *((__u8 *) skb->data);
 	skb_pull(skb, 1);
 
-	hci_recv_frame(skb);
+	switch (pkt_type) {
+	case HCI_EVENT_PKT:
+	case HCI_ACLDATA_PKT:
+	case HCI_SCODATA_PKT:
+		if (!data->hdev) {
+			kfree_skb(skb);
+			return -ENODEV;
+		}
 
-	return count;
+		bt_cb(skb)->pkt_type = pkt_type;
+
+		ret = hci_recv_frame(data->hdev, skb);
+		break;
+
+	case HCI_VENDOR_PKT:
+		if (data->hdev) {
+			kfree_skb(skb);
+			return -EBADFD;
+		}
+
+		cancel_delayed_work_sync(&data->open_timeout);
+
+		dev_type = *((__u8 *) skb->data);
+		skb_pull(skb, 1);
+
+		if (skb->len > 0) {
+			kfree_skb(skb);
+			return -EINVAL;
+		}
+
+		kfree_skb(skb);
+
+		if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
+			return -EINVAL;
+
+		ret = vhci_create_device(data, dev_type);
+		break;
+
+	default:
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	return (ret < 0) ? ret : count;
 }
 
 static inline ssize_t vhci_put_user(struct vhci_data *data,
-			struct sk_buff *skb, char __user *buf, int count)
+				    struct sk_buff *skb,
+				    char __user *buf, int count)
 {
 	char __user *ptr = buf;
-	int len, total = 0;
+	int len;
 
 	len = min_t(unsigned int, skb->len, count);
 
 	if (copy_to_user(ptr, skb->data, len))
 		return -EFAULT;
 
-	total += len;
+	if (!data->hdev)
+		return len;
 
 	data->hdev->stat.byte_tx += len;
 
@@ -148,21 +229,19 @@
 	case HCI_COMMAND_PKT:
 		data->hdev->stat.cmd_tx++;
 		break;
-
 	case HCI_ACLDATA_PKT:
 		data->hdev->stat.acl_tx++;
 		break;
-
 	case HCI_SCODATA_PKT:
 		data->hdev->stat.sco_tx++;
 		break;
 	}
 
-	return total;
+	return len;
 }
 
 static ssize_t vhci_read(struct file *file,
-				char __user *buf, size_t count, loff_t *pos)
+			 char __user *buf, size_t count, loff_t *pos)
 {
 	struct vhci_data *data = file->private_data;
 	struct sk_buff *skb;
@@ -185,7 +264,7 @@
 		}
 
 		ret = wait_event_interruptible(data->read_wait,
-					!skb_queue_empty(&data->readq));
+					       !skb_queue_empty(&data->readq));
 		if (ret < 0)
 			break;
 	}
@@ -194,7 +273,7 @@
 }
 
 static ssize_t vhci_write(struct file *file,
-			const char __user *buf, size_t count, loff_t *pos)
+			  const char __user *buf, size_t count, loff_t *pos)
 {
 	struct vhci_data *data = file->private_data;
 
@@ -213,10 +292,17 @@
 	return POLLOUT | POLLWRNORM;
 }
 
+static void vhci_open_timeout(struct work_struct *work)
+{
+	struct vhci_data *data = container_of(work, struct vhci_data,
+					      open_timeout.work);
+
+	vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR);
+}
+
 static int vhci_open(struct inode *inode, struct file *file)
 {
 	struct vhci_data *data;
-	struct hci_dev *hdev;
 
 	data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
 	if (!data)
@@ -225,35 +311,13 @@
 	skb_queue_head_init(&data->readq);
 	init_waitqueue_head(&data->read_wait);
 
-	hdev = hci_alloc_dev();
-	if (!hdev) {
-		kfree(data);
-		return -ENOMEM;
-	}
-
-	data->hdev = hdev;
-
-	hdev->bus = HCI_VIRTUAL;
-	hci_set_drvdata(hdev, data);
-
-	if (amp)
-		hdev->dev_type = HCI_AMP;
-
-	hdev->open     = vhci_open_dev;
-	hdev->close    = vhci_close_dev;
-	hdev->flush    = vhci_flush;
-	hdev->send     = vhci_send_frame;
-
-	if (hci_register_dev(hdev) < 0) {
-		BT_ERR("Can't register HCI device");
-		kfree(data);
-		hci_free_dev(hdev);
-		return -EBUSY;
-	}
+	INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
 
 	file->private_data = data;
 	nonseekable_open(inode, file);
 
+	schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
+
 	return 0;
 }
 
@@ -262,8 +326,12 @@
 	struct vhci_data *data = file->private_data;
 	struct hci_dev *hdev = data->hdev;
 
-	hci_unregister_dev(hdev);
-	hci_free_dev(hdev);
+	cancel_delayed_work_sync(&data->open_timeout);
+
+	if (hdev) {
+		hci_unregister_dev(hdev);
+		hci_free_dev(hdev);
+	}
 
 	file->private_data = NULL;
 	kfree(data);
@@ -309,3 +377,4 @@
 MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("devname:vhci");
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index bb5b90e..b6739cb 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -852,7 +852,7 @@
 
 	/* Enable the CCI port */
 "	ldr	r0, [r0, %[offsetof_port_phys]] \n"
-"	mov	r3, #"__stringify(CCI_ENABLE_REQ)" \n"
+"	mov	r3, %[cci_enable_req]\n"		   
 "	str	r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
 
 	/* poll the status reg for completion */
@@ -860,7 +860,7 @@
 "	ldr	r0, [r1] \n"
 "	ldr	r0, [r0, r1]		@ cci_ctrl_base \n"
 "4:	ldr	r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
-"	tst	r1, #1 \n"
+"	tst	r1, %[cci_control_status_bits] \n"			
 "	bne	4b \n"
 
 "	mov	r0, #0 \n"
@@ -873,6 +873,8 @@
 "7:	.word	cci_ctrl_phys - . \n"
 	: :
 	[sizeof_cpu_port] "i" (sizeof(cpu_port)),
+	[cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
+	[cci_control_status_bits] "i" cpu_to_le32(1),
 #ifndef __ARMEB__
 	[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
 #else
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1421997..fa3243d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -522,10 +522,16 @@
 	  If you say Y here, user applications will be able to mmap
 	  the HPET registers.
 
+config HPET_MMAP_DEFAULT
+	bool "Enable HPET MMAP access by default"
+	default y
+	depends on HPET_MMAP
+	help
 	  In some hardware implementations, the page containing HPET
 	  registers may also contain other things that shouldn't be
-	  exposed to the user.  If this applies to your hardware,
-	  say N here.
+	  exposed to the user.  This option selects the default (if
+	  kernel parameter hpet_mmap is not set) user access to the
+	  registers for applications that require it.
 
 config HANGCHECK_TIMER
 	tristate "Hangcheck timer"
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index dca5834..5d9c31d 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -367,12 +367,29 @@
 	return 0;
 }
 
+#ifdef CONFIG_HPET_MMAP
+#ifdef CONFIG_HPET_MMAP_DEFAULT
+static int hpet_mmap_enabled = 1;
+#else
+static int hpet_mmap_enabled = 0;
+#endif
+
+static __init int hpet_mmap_enable(char *str)
+{
+	get_option(&str, &hpet_mmap_enabled);
+	pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
+	return 1;
+}
+__setup("hpet_mmap", hpet_mmap_enable);
+
 static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
 {
-#ifdef	CONFIG_HPET_MMAP
 	struct hpet_dev *devp;
 	unsigned long addr;
 
+	if (!hpet_mmap_enabled)
+		return -EACCES;
+
 	devp = file->private_data;
 	addr = devp->hd_hpets->hp_hpet_phys;
 
@@ -381,10 +398,13 @@
 
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 	return vm_iomap_memory(vma, addr, PAGE_SIZE);
-#else
-	return -ENOSYS;
-#endif
 }
+#else
+static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	return -ENOSYS;
+}
+#endif
 
 static int hpet_fasync(int fd, struct file *file, int on)
 {
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index d2120ba..73ce739 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -79,7 +79,7 @@
 	priv->expires = cur + delay;
 	priv->present = 0;
 
-	INIT_COMPLETION(priv->completion);
+	reinit_completion(&priv->completion);
 	mod_timer(&priv->timer, priv->expires);
 
 	return 4;
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index ef46a9c..c12398d 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -133,7 +133,7 @@
 	remove_common(vdev);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtrng_freeze(struct virtio_device *vdev)
 {
 	remove_common(vdev);
@@ -157,7 +157,7 @@
 	.id_table =	id_table,
 	.probe =	virtrng_probe,
 	.remove =	virtrng_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 	.freeze =	virtrng_freeze,
 	.restore =	virtrng_restore,
 #endif
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 2f685f6..ffa97d2 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -114,7 +114,7 @@
 	int minor = iminor(inode);
 	struct miscdevice *c;
 	int err = -ENODEV;
-	const struct file_operations *old_fops, *new_fops = NULL;
+	const struct file_operations *new_fops = NULL;
 
 	mutex_lock(&misc_mtx);
 	
@@ -141,17 +141,11 @@
 	}
 
 	err = 0;
-	old_fops = file->f_op;
-	file->f_op = new_fops;
+	replace_fops(file, new_fops);
 	if (file->f_op->open) {
 		file->private_data = c;
-		err=file->f_op->open(inode,file);
-		if (err) {
-			fops_put(file->f_op);
-			file->f_op = fops_get(old_fops);
-		}
+		err = file->f_op->open(inode,file);
 	}
-	fops_put(old_fops);
 fail:
 	mutex_unlock(&misc_mtx);
 	return err;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7a744d3..429b75b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -255,6 +255,7 @@
 #include <linux/fips.h>
 #include <linux/ptrace.h>
 #include <linux/kmemcheck.h>
+#include <linux/workqueue.h>
 #include <linux/irq.h>
 
 #include <asm/processor.h>
@@ -269,14 +270,28 @@
 /*
  * Configuration information
  */
-#define INPUT_POOL_WORDS 128
-#define OUTPUT_POOL_WORDS 32
-#define SEC_XFER_SIZE 512
-#define EXTRACT_SIZE 10
+#define INPUT_POOL_SHIFT	12
+#define INPUT_POOL_WORDS	(1 << (INPUT_POOL_SHIFT-5))
+#define OUTPUT_POOL_SHIFT	10
+#define OUTPUT_POOL_WORDS	(1 << (OUTPUT_POOL_SHIFT-5))
+#define SEC_XFER_SIZE		512
+#define EXTRACT_SIZE		10
+
+#define DEBUG_RANDOM_BOOT 0
 
 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
 
 /*
+ * To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits.
+ *
+ * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+ * credit_entropy_bits() needs to be 64 bits wide.
+ */
+#define ENTROPY_SHIFT 3
+#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+
+/*
  * The minimum number of bits of entropy before we wake up a read on
  * /dev/random.  Should be enough to do a significant reseed.
  */
@@ -287,107 +302,99 @@
  * should wake up processes which are selecting or polling on write
  * access to /dev/random.
  */
-static int random_write_wakeup_thresh = 128;
+static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS;
 
 /*
- * When the input pool goes over trickle_thresh, start dropping most
- * samples to avoid wasting CPU time and reduce lock contention.
+ * The minimum number of seconds between urandom pool resending.  We
+ * do this to limit the amount of entropy that can be drained from the
+ * input pool even if there are heavy demands on /dev/urandom.
  */
-
-static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
-
-static DEFINE_PER_CPU(int, trickle_count);
+static int random_min_urandom_seed = 60;
 
 /*
- * A pool of size .poolwords is stirred with a primitive polynomial
- * of degree .poolwords over GF(2).  The taps for various sizes are
- * defined below.  They are chosen to be evenly spaced (minimum RMS
- * distance from evenly spaced; the numbers in the comments are a
- * scaled squared error sum) except for the last tap, which is 1 to
- * get the twisting happening as fast as possible.
- */
-static struct poolinfo {
-	int poolwords;
-	int tap1, tap2, tap3, tap4, tap5;
-} poolinfo_table[] = {
-	/* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
-	{ 128,	103,	76,	51,	25,	1 },
-	/* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
-	{ 32,	26,	20,	14,	7,	1 },
-#if 0
-	/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
-	{ 2048,	1638,	1231,	819,	411,	1 },
-
-	/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
-	{ 1024,	817,	615,	412,	204,	1 },
-
-	/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
-	{ 1024,	819,	616,	410,	207,	2 },
-
-	/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
-	{ 512,	411,	308,	208,	104,	1 },
-
-	/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
-	{ 512,	409,	307,	206,	102,	2 },
-	/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
-	{ 512,	409,	309,	205,	103,	2 },
-
-	/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
-	{ 256,	205,	155,	101,	52,	1 },
-
-	/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
-	{ 128,	103,	78,	51,	27,	2 },
-
-	/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
-	{ 64,	52,	39,	26,	14,	1 },
-#endif
-};
-
-#define POOLBITS	poolwords*32
-#define POOLBYTES	poolwords*4
-
-/*
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a twisted Generalized Feedback Shift Reigster
+ * Originally, we used a primitive polynomial of degree .poolwords
+ * over GF(2).  The taps for various sizes are defined below.  They
+ * were chosen to be evenly spaced except for the last tap, which is 1
+ * to get the twisting happening as fast as possible.
  *
- * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
- * Transactions on Modeling and Computer Simulation 2(3):179-194.
- * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
- * II.  ACM Transactions on Mdeling and Computer Simulation 4:254-266)
+ * For the purposes of better mixing, we use the CRC-32 polynomial as
+ * well to make a (modified) twisted Generalized Feedback Shift
+ * Register.  (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR
+ * generators.  ACM Transactions on Modeling and Computer Simulation
+ * 2(3):179-194.  Also see M. Matsumoto & Y. Kurita, 1994.  Twisted
+ * GFSR generators II.  ACM Transactions on Mdeling and Computer
+ * Simulation 4:254-266)
  *
  * Thanks to Colin Plumb for suggesting this.
  *
- * We have not analyzed the resultant polynomial to prove it primitive;
- * in fact it almost certainly isn't.  Nonetheless, the irreducible factors
- * of a random large-degree polynomial over GF(2) are more than large enough
- * that periodicity is not a concern.
- *
- * The input hash is much less sensitive than the output hash.  All
- * that we want of it is that it be a good non-cryptographic hash;
- * i.e. it not produce collisions when fed "random" data of the sort
- * we expect to see.  As long as the pool state differs for different
- * inputs, we have preserved the input entropy and done a good job.
- * The fact that an intelligent attacker can construct inputs that
- * will produce controlled alterations to the pool's state is not
- * important because we don't consider such inputs to contribute any
- * randomness.  The only property we need with respect to them is that
- * the attacker can't increase his/her knowledge of the pool's state.
- * Since all additions are reversible (knowing the final state and the
- * input, you can reconstruct the initial state), if an attacker has
- * any uncertainty about the initial state, he/she can only shuffle
- * that uncertainty about, but never cause any collisions (which would
+ * The mixing operation is much less sensitive than the output hash,
+ * where we use SHA-1.  All that we want of mixing operation is that
+ * it be a good non-cryptographic hash; i.e. it not produce collisions
+ * when fed "random" data of the sort we expect to see.  As long as
+ * the pool state differs for different inputs, we have preserved the
+ * input entropy and done a good job.  The fact that an intelligent
+ * attacker can construct inputs that will produce controlled
+ * alterations to the pool's state is not important because we don't
+ * consider such inputs to contribute any randomness.  The only
+ * property we need with respect to them is that the attacker can't
+ * increase his/her knowledge of the pool's state.  Since all
+ * additions are reversible (knowing the final state and the input,
+ * you can reconstruct the initial state), if an attacker has any
+ * uncertainty about the initial state, he/she can only shuffle that
+ * uncertainty about, but never cause any collisions (which would
  * decrease the uncertainty).
  *
- * The chosen system lets the state of the pool be (essentially) the input
- * modulo the generator polymnomial.  Now, for random primitive polynomials,
- * this is a universal class of hash functions, meaning that the chance
- * of a collision is limited by the attacker's knowledge of the generator
- * polynomail, so if it is chosen at random, an attacker can never force
- * a collision.  Here, we use a fixed polynomial, but we *can* assume that
- * ###--> it is unknown to the processes generating the input entropy. <-###
- * Because of this important property, this is a good, collision-resistant
- * hash; hash collisions will occur no more often than chance.
+ * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
+ * Videau in their paper, "The Linux Pseudorandom Number Generator
+ * Revisited" (see: http://eprint.iacr.org/2012/251.pdf).  In their
+ * paper, they point out that we are not using a true Twisted GFSR,
+ * since Matsumoto & Kurita used a trinomial feedback polynomial (that
+ * is, with only three taps, instead of the six that we are using).
+ * As a result, the resulting polynomial is neither primitive nor
+ * irreducible, and hence does not have a maximal period over
+ * GF(2**32).  They suggest a slight change to the generator
+ * polynomial which improves the resulting TGFSR polynomial to be
+ * irreducible, which we have made here.
  */
+static struct poolinfo {
+	int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
+#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
+	int tap1, tap2, tap3, tap4, tap5;
+} poolinfo_table[] = {
+	/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+	/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
+	{ S(128),	104,	76,	51,	25,	1 },
+	/* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
+	/* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
+	{ S(32),	26,	19,	14,	7,	1 },
+#if 0
+	/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
+	{ S(2048),	1638,	1231,	819,	411,	1 },
+
+	/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
+	{ S(1024),	817,	615,	412,	204,	1 },
+
+	/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
+	{ S(1024),	819,	616,	410,	207,	2 },
+
+	/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
+	{ S(512),	411,	308,	208,	104,	1 },
+
+	/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
+	{ S(512),	409,	307,	206,	102,	2 },
+	/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
+	{ S(512),	409,	309,	205,	103,	2 },
+
+	/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
+	{ S(256),	205,	155,	101,	52,	1 },
+
+	/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
+	{ S(128),	103,	78,	51,	27,	2 },
+
+	/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
+	{ S(64),	52,	39,	26,	14,	1 },
+#endif
+};
 
 /*
  * Static global variables
@@ -396,17 +403,6 @@
 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static struct fasync_struct *fasync;
 
-static bool debug;
-module_param(debug, bool, 0644);
-#define DEBUG_ENT(fmt, arg...) do { \
-	if (debug) \
-		printk(KERN_DEBUG "random %04d %04d %04d: " \
-		fmt,\
-		input_pool.entropy_count,\
-		blocking_pool.entropy_count,\
-		nonblocking_pool.entropy_count,\
-		## arg); } while (0)
-
 /**********************************************************************
  *
  * OS independent entropy store.   Here are the functions which handle
@@ -417,23 +413,26 @@
 struct entropy_store;
 struct entropy_store {
 	/* read-only data: */
-	struct poolinfo *poolinfo;
+	const struct poolinfo *poolinfo;
 	__u32 *pool;
 	const char *name;
 	struct entropy_store *pull;
-	int limit;
+	struct work_struct push_work;
 
 	/* read-write data: */
+	unsigned long last_pulled;
 	spinlock_t lock;
-	unsigned add_ptr;
-	unsigned input_rotate;
+	unsigned short add_ptr;
+	unsigned short input_rotate;
 	int entropy_count;
 	int entropy_total;
 	unsigned int initialized:1;
-	bool last_data_init;
+	unsigned int limit:1;
+	unsigned int last_data_init:1;
 	__u8 last_data[EXTRACT_SIZE];
 };
 
+static void push_to_pool(struct work_struct *work);
 static __u32 input_pool_data[INPUT_POOL_WORDS];
 static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
 static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
@@ -452,7 +451,9 @@
 	.limit = 1,
 	.pull = &input_pool,
 	.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
-	.pool = blocking_pool_data
+	.pool = blocking_pool_data,
+	.push_work = __WORK_INITIALIZER(blocking_pool.push_work,
+					push_to_pool),
 };
 
 static struct entropy_store nonblocking_pool = {
@@ -460,7 +461,9 @@
 	.name = "nonblocking",
 	.pull = &input_pool,
 	.lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
-	.pool = nonblocking_pool_data
+	.pool = nonblocking_pool_data,
+	.push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
+					push_to_pool),
 };
 
 static __u32 const twist_table[8] = {
@@ -498,7 +501,7 @@
 
 	/* mix one byte at a time to simplify size handling and churn faster */
 	while (nbytes--) {
-		w = rol32(*bytes++, input_rotate & 31);
+		w = rol32(*bytes++, input_rotate);
 		i = (i - 1) & wordmask;
 
 		/* XOR in the various taps */
@@ -518,7 +521,7 @@
 		 * rotation, so that successive passes spread the
 		 * input bits across the pool evenly.
 		 */
-		input_rotate += i ? 7 : 14;
+		input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
 	}
 
 	ACCESS_ONCE(r->input_rotate) = input_rotate;
@@ -561,62 +564,151 @@
  * collector.  It's hardcoded for an 128 bit pool and assumes that any
  * locks that might be needed are taken by the caller.
  */
-static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
+static void fast_mix(struct fast_pool *f, __u32 input[4])
 {
-	const char	*bytes = in;
 	__u32		w;
-	unsigned	i = f->count;
 	unsigned	input_rotate = f->rotate;
 
-	while (nbytes--) {
-		w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
-			f->pool[(i + 1) & 3];
-		f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
-		input_rotate += (i++ & 3) ? 7 : 14;
-	}
-	f->count = i;
+	w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
+	f->pool[0] = (w >> 3) ^ twist_table[w & 7];
+	input_rotate = (input_rotate + 14) & 31;
+	w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
+	f->pool[1] = (w >> 3) ^ twist_table[w & 7];
+	input_rotate = (input_rotate + 7) & 31;
+	w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
+	f->pool[2] = (w >> 3) ^ twist_table[w & 7];
+	input_rotate = (input_rotate + 7) & 31;
+	w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
+	f->pool[3] = (w >> 3) ^ twist_table[w & 7];
+	input_rotate = (input_rotate + 7) & 31;
+
 	f->rotate = input_rotate;
+	f->count++;
 }
 
 /*
- * Credit (or debit) the entropy store with n bits of entropy
+ * Credit (or debit) the entropy store with n bits of entropy.
+ * Use credit_entropy_bits_safe() if the value comes from userspace
+ * or otherwise should be checked for extreme values.
  */
 static void credit_entropy_bits(struct entropy_store *r, int nbits)
 {
 	int entropy_count, orig;
+	const int pool_size = r->poolinfo->poolfracbits;
+	int nfrac = nbits << ENTROPY_SHIFT;
 
 	if (!nbits)
 		return;
 
-	DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
 retry:
 	entropy_count = orig = ACCESS_ONCE(r->entropy_count);
-	entropy_count += nbits;
+	if (nfrac < 0) {
+		/* Debit */
+		entropy_count += nfrac;
+	} else {
+		/*
+		 * Credit: we have to account for the possibility of
+		 * overwriting already present entropy.	 Even in the
+		 * ideal case of pure Shannon entropy, new contributions
+		 * approach the full value asymptotically:
+		 *
+		 * entropy <- entropy + (pool_size - entropy) *
+		 *	(1 - exp(-add_entropy/pool_size))
+		 *
+		 * For add_entropy <= pool_size/2 then
+		 * (1 - exp(-add_entropy/pool_size)) >=
+		 *    (add_entropy/pool_size)*0.7869...
+		 * so we can approximate the exponential with
+		 * 3/4*add_entropy/pool_size and still be on the
+		 * safe side by adding at most pool_size/2 at a time.
+		 *
+		 * The use of pool_size-2 in the while statement is to
+		 * prevent rounding artifacts from making the loop
+		 * arbitrarily long; this limits the loop to log2(pool_size)*2
+		 * turns no matter how large nbits is.
+		 */
+		int pnfrac = nfrac;
+		const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
+		/* The +2 corresponds to the /4 in the denominator */
+
+		do {
+			unsigned int anfrac = min(pnfrac, pool_size/2);
+			unsigned int add =
+				((pool_size - entropy_count)*anfrac*3) >> s;
+
+			entropy_count += add;
+			pnfrac -= anfrac;
+		} while (unlikely(entropy_count < pool_size-2 && pnfrac));
+	}
 
 	if (entropy_count < 0) {
-		DEBUG_ENT("negative entropy/overflow\n");
+		pr_warn("random: negative entropy/overflow: pool %s count %d\n",
+			r->name, entropy_count);
+		WARN_ON(1);
 		entropy_count = 0;
-	} else if (entropy_count > r->poolinfo->POOLBITS)
-		entropy_count = r->poolinfo->POOLBITS;
+	} else if (entropy_count > pool_size)
+		entropy_count = pool_size;
 	if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
 		goto retry;
 
-	if (!r->initialized && nbits > 0) {
-		r->entropy_total += nbits;
-		if (r->entropy_total > 128)
-			r->initialized = 1;
+	r->entropy_total += nbits;
+	if (!r->initialized && r->entropy_total > 128) {
+		r->initialized = 1;
+		r->entropy_total = 0;
+		if (r == &nonblocking_pool) {
+			prandom_reseed_late();
+			pr_notice("random: %s pool is initialized\n", r->name);
+		}
 	}
 
-	trace_credit_entropy_bits(r->name, nbits, entropy_count,
+	trace_credit_entropy_bits(r->name, nbits,
+				  entropy_count >> ENTROPY_SHIFT,
 				  r->entropy_total, _RET_IP_);
 
-	/* should we wake readers? */
-	if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
-		wake_up_interruptible(&random_read_wait);
-		kill_fasync(&fasync, SIGIO, POLL_IN);
+	if (r == &input_pool) {
+		int entropy_bytes = entropy_count >> ENTROPY_SHIFT;
+
+		/* should we wake readers? */
+		if (entropy_bytes >= random_read_wakeup_thresh) {
+			wake_up_interruptible(&random_read_wait);
+			kill_fasync(&fasync, SIGIO, POLL_IN);
+		}
+		/* If the input pool is getting full, send some
+		 * entropy to the two output pools, flipping back and
+		 * forth between them, until the output pools are 75%
+		 * full.
+		 */
+		if (entropy_bytes > random_write_wakeup_thresh &&
+		    r->initialized &&
+		    r->entropy_total >= 2*random_read_wakeup_thresh) {
+			static struct entropy_store *last = &blocking_pool;
+			struct entropy_store *other = &blocking_pool;
+
+			if (last == &blocking_pool)
+				other = &nonblocking_pool;
+			if (other->entropy_count <=
+			    3 * other->poolinfo->poolfracbits / 4)
+				last = other;
+			if (last->entropy_count <=
+			    3 * last->poolinfo->poolfracbits / 4) {
+				schedule_work(&last->push_work);
+				r->entropy_total = 0;
+			}
+		}
 	}
 }
 
+static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+{
+	const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+
+	/* Cap the value to avoid overflows */
+	nbits = min(nbits,  nbits_max);
+	nbits = max(nbits, -nbits_max);
+
+	credit_entropy_bits(r, nbits);
+}
+
 /*********************************************************************
  *
  * Entropy input management
@@ -630,6 +722,8 @@
 	unsigned dont_count_entropy:1;
 };
 
+#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
+
 /*
  * Add device- or boot-specific data to the input and nonblocking
  * pools to help initialize them to unique values.
@@ -641,15 +735,22 @@
 void add_device_randomness(const void *buf, unsigned int size)
 {
 	unsigned long time = random_get_entropy() ^ jiffies;
+	unsigned long flags;
 
-	mix_pool_bytes(&input_pool, buf, size, NULL);
-	mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
-	mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
-	mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
+	trace_add_device_randomness(size, _RET_IP_);
+	spin_lock_irqsave(&input_pool.lock, flags);
+	_mix_pool_bytes(&input_pool, buf, size, NULL);
+	_mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+	spin_unlock_irqrestore(&input_pool.lock, flags);
+
+	spin_lock_irqsave(&nonblocking_pool.lock, flags);
+	_mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+	_mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
+	spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
 }
 EXPORT_SYMBOL(add_device_randomness);
 
-static struct timer_rand_state input_timer_state;
+static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
 
 /*
  * This function adds entropy to the entropy "pool" by using timing
@@ -663,6 +764,7 @@
  */
 static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
 {
+	struct entropy_store	*r;
 	struct {
 		long jiffies;
 		unsigned cycles;
@@ -671,15 +773,12 @@
 	long delta, delta2, delta3;
 
 	preempt_disable();
-	/* if over the trickle threshold, use only 1 in 4096 samples */
-	if (input_pool.entropy_count > trickle_thresh &&
-	    ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
-		goto out;
 
 	sample.jiffies = jiffies;
 	sample.cycles = random_get_entropy();
 	sample.num = num;
-	mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+	r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+	mix_pool_bytes(r, &sample, sizeof(sample), NULL);
 
 	/*
 	 * Calculate number of bits of randomness we probably added.
@@ -713,10 +812,8 @@
 		 * Round down by 1 bit on general principles,
 		 * and limit entropy entimate to 12 bits.
 		 */
-		credit_entropy_bits(&input_pool,
-				    min_t(int, fls(delta>>1), 11));
+		credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
 	}
-out:
 	preempt_enable();
 }
 
@@ -729,10 +826,10 @@
 	if (value == last_value)
 		return;
 
-	DEBUG_ENT("input event\n");
 	last_value = value;
 	add_timer_randomness(&input_timer_state,
 			     (type << 4) ^ code ^ (code >> 4) ^ value);
+	trace_add_input_randomness(ENTROPY_BITS(&input_pool));
 }
 EXPORT_SYMBOL_GPL(add_input_randomness);
 
@@ -744,20 +841,21 @@
 	struct fast_pool	*fast_pool = &__get_cpu_var(irq_randomness);
 	struct pt_regs		*regs = get_irq_regs();
 	unsigned long		now = jiffies;
-	__u32			input[4], cycles = random_get_entropy();
+	cycles_t		cycles = random_get_entropy();
+	__u32			input[4], c_high, j_high;
+	__u64			ip;
 
-	input[0] = cycles ^ jiffies;
-	input[1] = irq;
-	if (regs) {
-		__u64 ip = instruction_pointer(regs);
-		input[2] = ip;
-		input[3] = ip >> 32;
-	}
+	c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+	j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+	input[0] = cycles ^ j_high ^ irq;
+	input[1] = now ^ c_high;
+	ip = regs ? instruction_pointer(regs) : _RET_IP_;
+	input[2] = ip;
+	input[3] = ip >> 32;
 
-	fast_mix(fast_pool, input, sizeof(input));
+	fast_mix(fast_pool, input);
 
-	if ((fast_pool->count & 1023) &&
-	    !time_after(now, fast_pool->last + HZ))
+	if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
 		return;
 
 	fast_pool->last = now;
@@ -786,10 +884,8 @@
 	if (!disk || !disk->random)
 		return;
 	/* first major is 1, so we get >= 0x200 here */
-	DEBUG_ENT("disk event %d:%d\n",
-		  MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
-
 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
+	trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
 }
 #endif
 
@@ -807,30 +903,58 @@
  * from the primary pool to the secondary extraction pool. We make
  * sure we pull enough for a 'catastrophic reseed'.
  */
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
 {
+	if (r->limit == 0 && random_min_urandom_seed) {
+		unsigned long now = jiffies;
+
+		if (time_before(now,
+				r->last_pulled + random_min_urandom_seed * HZ))
+			return;
+		r->last_pulled = now;
+	}
+	if (r->pull &&
+	    r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
+	    r->entropy_count < r->poolinfo->poolfracbits)
+		_xfer_secondary_pool(r, nbytes);
+}
+
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+{
 	__u32	tmp[OUTPUT_POOL_WORDS];
 
-	if (r->pull && r->entropy_count < nbytes * 8 &&
-	    r->entropy_count < r->poolinfo->POOLBITS) {
-		/* If we're limited, always leave two wakeup worth's BITS */
-		int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
-		int bytes = nbytes;
+	/* For /dev/random's pool, always leave two wakeup worth's BITS */
+	int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+	int bytes = nbytes;
 
-		/* pull at least as many as BYTES as wakeup BITS */
-		bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
-		/* but never more than the buffer size */
-		bytes = min_t(int, bytes, sizeof(tmp));
+	/* pull at least as many as BYTES as wakeup BITS */
+	bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+	/* but never more than the buffer size */
+	bytes = min_t(int, bytes, sizeof(tmp));
 
-		DEBUG_ENT("going to reseed %s with %d bits "
-			  "(%zu of %d requested)\n",
-			  r->name, bytes * 8, nbytes * 8, r->entropy_count);
+	trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
+				  ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
+	bytes = extract_entropy(r->pull, tmp, bytes,
+				random_read_wakeup_thresh / 8, rsvd);
+	mix_pool_bytes(r, tmp, bytes, NULL);
+	credit_entropy_bits(r, bytes*8);
+}
 
-		bytes = extract_entropy(r->pull, tmp, bytes,
-					random_read_wakeup_thresh / 8, rsvd);
-		mix_pool_bytes(r, tmp, bytes, NULL);
-		credit_entropy_bits(r, bytes*8);
-	}
+/*
+ * Used as a workqueue function so that when the input pool is getting
+ * full, we can "spill over" some entropy to the output pools.  That
+ * way the output pools can store some of the excess entropy instead
+ * of letting it go to waste.
+ */
+static void push_to_pool(struct work_struct *work)
+{
+	struct entropy_store *r = container_of(work, struct entropy_store,
+					      push_work);
+	BUG_ON(!r);
+	_xfer_secondary_pool(r, random_read_wakeup_thresh/8);
+	trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
+			   r->pull->entropy_count >> ENTROPY_SHIFT);
 }
 
 /*
@@ -850,50 +974,48 @@
 {
 	unsigned long flags;
 	int wakeup_write = 0;
+	int have_bytes;
+	int entropy_count, orig;
+	size_t ibytes;
 
 	/* Hold lock while accounting */
 	spin_lock_irqsave(&r->lock, flags);
 
-	BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
-	DEBUG_ENT("trying to extract %zu bits from %s\n",
-		  nbytes * 8, r->name);
+	BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
 
 	/* Can we pull enough? */
-	if (r->entropy_count / 8 < min + reserved) {
-		nbytes = 0;
-	} else {
-		int entropy_count, orig;
 retry:
-		entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+	entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+	have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+	ibytes = nbytes;
+	if (have_bytes < min + reserved) {
+		ibytes = 0;
+	} else {
 		/* If limited, never pull more than available */
-		if (r->limit && nbytes + reserved >= entropy_count / 8)
-			nbytes = entropy_count/8 - reserved;
+		if (r->limit && ibytes + reserved >= have_bytes)
+			ibytes = have_bytes - reserved;
 
-		if (entropy_count / 8 >= nbytes + reserved) {
-			entropy_count -= nbytes*8;
-			if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
-				goto retry;
-		} else {
-			entropy_count = reserved;
-			if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
-				goto retry;
-		}
+		if (have_bytes >= ibytes + reserved)
+			entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+		else
+			entropy_count = reserved << (ENTROPY_SHIFT + 3);
 
-		if (entropy_count < random_write_wakeup_thresh)
+		if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+			goto retry;
+
+		if ((r->entropy_count >> ENTROPY_SHIFT)
+		    < random_write_wakeup_thresh)
 			wakeup_write = 1;
 	}
-
-	DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
-		  nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
-
 	spin_unlock_irqrestore(&r->lock, flags);
 
+	trace_debit_entropy(r->name, 8 * ibytes);
 	if (wakeup_write) {
 		wake_up_interruptible(&random_write_wait);
 		kill_fasync(&fasync, SIGIO, POLL_OUT);
 	}
 
-	return nbytes;
+	return ibytes;
 }
 
 static void extract_buf(struct entropy_store *r, __u8 *out)
@@ -901,7 +1023,7 @@
 	int i;
 	union {
 		__u32 w[5];
-		unsigned long l[LONGS(EXTRACT_SIZE)];
+		unsigned long l[LONGS(20)];
 	} hash;
 	__u32 workspace[SHA_WORKSPACE_WORDS];
 	__u8 extract[64];
@@ -914,6 +1036,17 @@
 		sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
 
 	/*
+	 * If we have a architectural hardware random number
+	 * generator, mix that in, too.
+	 */
+	for (i = 0; i < LONGS(20); i++) {
+		unsigned long v;
+		if (!arch_get_random_long(&v))
+			break;
+		hash.l[i] ^= v;
+	}
+
+	/*
 	 * We mix the hash back into the pool to prevent backtracking
 	 * attacks (where the attacker knows the state of the pool
 	 * plus the current outputs, and attempts to find previous
@@ -942,17 +1075,6 @@
 	hash.w[1] ^= hash.w[4];
 	hash.w[2] ^= rol32(hash.w[2], 16);
 
-	/*
-	 * If we have a architectural hardware random number
-	 * generator, mix that in, too.
-	 */
-	for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
-		unsigned long v;
-		if (!arch_get_random_long(&v))
-			break;
-		hash.l[i] ^= v;
-	}
-
 	memcpy(out, &hash, EXTRACT_SIZE);
 	memset(&hash, 0, sizeof(hash));
 }
@@ -968,10 +1090,10 @@
 	if (fips_enabled) {
 		spin_lock_irqsave(&r->lock, flags);
 		if (!r->last_data_init) {
-			r->last_data_init = true;
+			r->last_data_init = 1;
 			spin_unlock_irqrestore(&r->lock, flags);
 			trace_extract_entropy(r->name, EXTRACT_SIZE,
-					      r->entropy_count, _RET_IP_);
+					      ENTROPY_BITS(r), _RET_IP_);
 			xfer_secondary_pool(r, EXTRACT_SIZE);
 			extract_buf(r, tmp);
 			spin_lock_irqsave(&r->lock, flags);
@@ -980,7 +1102,7 @@
 		spin_unlock_irqrestore(&r->lock, flags);
 	}
 
-	trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
+	trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
 	xfer_secondary_pool(r, nbytes);
 	nbytes = account(r, nbytes, min, reserved);
 
@@ -1013,7 +1135,7 @@
 	ssize_t ret = 0, i;
 	__u8 tmp[EXTRACT_SIZE];
 
-	trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
+	trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
 	xfer_secondary_pool(r, nbytes);
 	nbytes = account(r, nbytes, 0, 0);
 
@@ -1053,6 +1175,14 @@
  */
 void get_random_bytes(void *buf, int nbytes)
 {
+#if DEBUG_RANDOM_BOOT > 0
+	if (unlikely(nonblocking_pool.initialized == 0))
+		printk(KERN_NOTICE "random: %pF get_random_bytes called "
+		       "with %d bits of entropy available\n",
+		       (void *) _RET_IP_,
+		       nonblocking_pool.entropy_total);
+#endif
+	trace_get_random_bytes(nbytes, _RET_IP_);
 	extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
 }
 EXPORT_SYMBOL(get_random_bytes);
@@ -1071,7 +1201,7 @@
 {
 	char *p = buf;
 
-	trace_get_random_bytes(nbytes, _RET_IP_);
+	trace_get_random_bytes_arch(nbytes, _RET_IP_);
 	while (nbytes) {
 		unsigned long v;
 		int chunk = min(nbytes, (int)sizeof(unsigned long));
@@ -1105,13 +1235,11 @@
 	ktime_t now = ktime_get_real();
 	unsigned long rv;
 
-	r->entropy_count = 0;
-	r->entropy_total = 0;
-	r->last_data_init = false;
+	r->last_pulled = jiffies;
 	mix_pool_bytes(r, &now, sizeof(now), NULL);
-	for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
+	for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
 		if (!arch_get_random_long(&rv))
-			break;
+			rv = random_get_entropy();
 		mix_pool_bytes(r, &rv, sizeof(rv), NULL);
 	}
 	mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
@@ -1134,7 +1262,7 @@
 	init_std_data(&nonblocking_pool);
 	return 0;
 }
-module_init(rand_initialize);
+early_initcall(rand_initialize);
 
 #ifdef CONFIG_BLOCK
 void rand_initialize_disk(struct gendisk *disk)
@@ -1146,8 +1274,10 @@
 	 * source.
 	 */
 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
-	if (state)
+	if (state) {
+		state->last_time = INITIAL_JIFFIES;
 		disk->random = state;
+	}
 }
 #endif
 
@@ -1164,8 +1294,6 @@
 		if (n > SEC_XFER_SIZE)
 			n = SEC_XFER_SIZE;
 
-		DEBUG_ENT("reading %zu bits\n", n*8);
-
 		n = extract_entropy_user(&blocking_pool, buf, n);
 
 		if (n < 0) {
@@ -1173,8 +1301,9 @@
 			break;
 		}
 
-		DEBUG_ENT("read got %zd bits (%zd still needed)\n",
-			  n*8, (nbytes-n)*8);
+		trace_random_read(n*8, (nbytes-n)*8,
+				  ENTROPY_BITS(&blocking_pool),
+				  ENTROPY_BITS(&input_pool));
 
 		if (n == 0) {
 			if (file->f_flags & O_NONBLOCK) {
@@ -1182,13 +1311,9 @@
 				break;
 			}
 
-			DEBUG_ENT("sleeping?\n");
-
 			wait_event_interruptible(random_read_wait,
-				input_pool.entropy_count >=
-						 random_read_wakeup_thresh);
-
-			DEBUG_ENT("awake\n");
+				ENTROPY_BITS(&input_pool) >=
+				random_read_wakeup_thresh);
 
 			if (signal_pending(current)) {
 				retval = -ERESTARTSYS;
@@ -1211,7 +1336,18 @@
 static ssize_t
 urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
-	return extract_entropy_user(&nonblocking_pool, buf, nbytes);
+	int ret;
+
+	if (unlikely(nonblocking_pool.initialized == 0))
+		printk_once(KERN_NOTICE "random: %s urandom read "
+			    "with %d bits of entropy available\n",
+			    current->comm, nonblocking_pool.entropy_total);
+
+	ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
+
+	trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
+			   ENTROPY_BITS(&input_pool));
+	return ret;
 }
 
 static unsigned int
@@ -1222,9 +1358,9 @@
 	poll_wait(file, &random_read_wait, wait);
 	poll_wait(file, &random_write_wait, wait);
 	mask = 0;
-	if (input_pool.entropy_count >= random_read_wakeup_thresh)
+	if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh)
 		mask |= POLLIN | POLLRDNORM;
-	if (input_pool.entropy_count < random_write_wakeup_thresh)
+	if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh)
 		mask |= POLLOUT | POLLWRNORM;
 	return mask;
 }
@@ -1275,7 +1411,8 @@
 	switch (cmd) {
 	case RNDGETENTCNT:
 		/* inherently racy, no point locking */
-		if (put_user(input_pool.entropy_count, p))
+		ent_count = ENTROPY_BITS(&input_pool);
+		if (put_user(ent_count, p))
 			return -EFAULT;
 		return 0;
 	case RNDADDTOENTCNT:
@@ -1283,7 +1420,7 @@
 			return -EPERM;
 		if (get_user(ent_count, p))
 			return -EFAULT;
-		credit_entropy_bits(&input_pool, ent_count);
+		credit_entropy_bits_safe(&input_pool, ent_count);
 		return 0;
 	case RNDADDENTROPY:
 		if (!capable(CAP_SYS_ADMIN))
@@ -1298,14 +1435,19 @@
 				    size);
 		if (retval < 0)
 			return retval;
-		credit_entropy_bits(&input_pool, ent_count);
+		credit_entropy_bits_safe(&input_pool, ent_count);
 		return 0;
 	case RNDZAPENTCNT:
 	case RNDCLEARPOOL:
-		/* Clear the entropy pool counters. */
+		/*
+		 * Clear the entropy pool counters. We no longer clear
+		 * the entropy pool, as that's silly.
+		 */
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
-		rand_initialize();
+		input_pool.entropy_count = 0;
+		nonblocking_pool.entropy_count = 0;
+		blocking_pool.entropy_count = 0;
 		return 0;
 	default:
 		return -EINVAL;
@@ -1405,6 +1547,23 @@
 	return proc_dostring(&fake_table, write, buffer, lenp, ppos);
 }
 
+/*
+ * Return entropy available scaled to integral bits
+ */
+static int proc_do_entropy(ctl_table *table, int write,
+			   void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	ctl_table fake_table;
+	int entropy_count;
+
+	entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
+
+	fake_table.data = &entropy_count;
+	fake_table.maxlen = sizeof(entropy_count);
+
+	return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+}
+
 static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
 extern struct ctl_table random_table[];
 struct ctl_table random_table[] = {
@@ -1419,7 +1578,7 @@
 		.procname	= "entropy_avail",
 		.maxlen		= sizeof(int),
 		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_do_entropy,
 		.data		= &input_pool.entropy_count,
 	},
 	{
@@ -1441,6 +1600,13 @@
 		.extra2		= &max_write_thresh,
 	},
 	{
+		.procname	= "urandom_min_reseed_secs",
+		.data		= &random_min_urandom_seed,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "boot_id",
 		.data		= &sysctl_bootid,
 		.maxlen		= 16,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index b79cf3e..feea87c 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -577,7 +577,8 @@
 	spin_lock(&portdev->c_ovq_lock);
 	if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
 		virtqueue_kick(vq);
-		while (!virtqueue_get_buf(vq, &len))
+		while (!virtqueue_get_buf(vq, &len)
+			&& !virtqueue_is_broken(vq))
 			cpu_relax();
 	}
 	spin_unlock(&portdev->c_ovq_lock);
@@ -650,7 +651,8 @@
 	 * we need to kmalloc a GFP_ATOMIC buffer each time the
 	 * console driver writes something out.
 	 */
-	while (!virtqueue_get_buf(out_vq, &len))
+	while (!virtqueue_get_buf(out_vq, &len)
+		&& !virtqueue_is_broken(out_vq))
 		cpu_relax();
 done:
 	spin_unlock_irqrestore(&port->outvq_lock, flags);
@@ -1837,12 +1839,8 @@
 		struct port *port;
 		u16 rows, cols;
 
-		vdev->config->get(vdev,
-				  offsetof(struct virtio_console_config, cols),
-				  &cols, sizeof(u16));
-		vdev->config->get(vdev,
-				  offsetof(struct virtio_console_config, rows),
-				  &rows, sizeof(u16));
+		virtio_cread(vdev, struct virtio_console_config, cols, &cols);
+		virtio_cread(vdev, struct virtio_console_config, rows, &rows);
 
 		port = find_port_by_id(portdev, 0);
 		set_console_size(port, rows, cols);
@@ -2014,10 +2012,9 @@
 
 	/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
 	if (!is_rproc_serial(vdev) &&
-	    virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
-				  offsetof(struct virtio_console_config,
-					   max_nr_ports),
-				  &portdev->config.max_nr_ports) == 0) {
+	    virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+				 struct virtio_console_config, max_nr_ports,
+				 &portdev->config.max_nr_ports) == 0) {
 		multiport = true;
 	}
 
@@ -2142,7 +2139,7 @@
 static unsigned int rproc_serial_features[] = {
 };
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtcons_freeze(struct virtio_device *vdev)
 {
 	struct ports_device *portdev;
@@ -2220,7 +2217,7 @@
 	.probe =	virtcons_probe,
 	.remove =	virtcons_remove,
 	.config_changed = config_intr,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 	.freeze =	virtcons_freeze,
 	.restore =	virtcons_restore,
 #endif
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 0e1d89b..d9e3f67 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -117,7 +117,7 @@
 	}
 
 	if (of_property_read_u32(node, "clock-mult", &mult)) {
-		pr_err("%s Fixed factor clock <%s> must have a clokc-mult property\n",
+		pr_err("%s Fixed factor clock <%s> must have a clock-mult property\n",
 			__func__, node->name);
 		return;
 	}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 534fcb8..38093e2 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -17,15 +17,11 @@
 
 if CPU_FREQ
 
-config CPU_FREQ_TABLE
-	tristate
-
 config CPU_FREQ_GOV_COMMON
 	bool
 
 config CPU_FREQ_STAT
 	tristate "CPU frequency translation statistics"
-	select CPU_FREQ_TABLE
 	default y
 	help
 	  This driver exports CPU frequency statistics information through sysfs
@@ -143,7 +139,6 @@
 
 config CPU_FREQ_GOV_ONDEMAND
 	tristate "'ondemand' cpufreq policy governor"
-	select CPU_FREQ_TABLE
 	select CPU_FREQ_GOV_COMMON
 	help
 	  'ondemand' - This driver adds a dynamic cpufreq policy governor.
@@ -187,7 +182,6 @@
 config GENERIC_CPUFREQ_CPU0
 	tristate "Generic CPU0 cpufreq driver"
 	depends on HAVE_CLK && REGULATOR && PM_OPP && OF
-	select CPU_FREQ_TABLE
 	help
 	  This adds a generic cpufreq driver for CPU0 frequency management.
 	  It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
@@ -223,7 +217,6 @@
 
 config IA64_ACPI_CPUFREQ
 	tristate "ACPI Processor P-States driver"
-	select CPU_FREQ_TABLE
 	depends on ACPI_PROCESSOR
 	help
 	This driver adds a CPUFreq driver which utilizes the ACPI
@@ -240,7 +233,6 @@
 
 config LOONGSON2_CPUFREQ
 	tristate "Loongson2 CPUFreq Driver"
-	select CPU_FREQ_TABLE
 	help
 	  This option adds a CPUFreq driver for loongson processors which
 	  support software configurable cpu frequency.
@@ -262,7 +254,6 @@
 depends on SPARC64
 config SPARC_US3_CPUFREQ
 	tristate "UltraSPARC-III CPU Frequency driver"
-	select CPU_FREQ_TABLE
 	help
 	  This adds the CPUFreq driver for UltraSPARC-III processors.
 
@@ -272,7 +263,6 @@
 
 config SPARC_US2E_CPUFREQ
 	tristate "UltraSPARC-IIe CPU Frequency driver"
-	select CPU_FREQ_TABLE
 	help
 	  This adds the CPUFreq driver for UltraSPARC-IIe processors.
 
@@ -285,7 +275,6 @@
 depends on SUPERH
 config SH_CPU_FREQ
 	tristate "SuperH CPU Frequency driver"
-	select CPU_FREQ_TABLE
 	help
 	  This adds the cpufreq driver for SuperH. Any CPU that supports
 	  clock rate rounding through the clock framework can use this
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0fa204b..ce52ed9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,6 @@
 config ARM_BIG_LITTLE_CPUFREQ
 	tristate "Generic ARM big LITTLE CPUfreq driver"
 	depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
-	select CPU_FREQ_TABLE
 	help
 	  This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
 
@@ -18,7 +17,6 @@
 
 config ARM_EXYNOS_CPUFREQ
 	bool
-	select CPU_FREQ_TABLE
 
 config ARM_EXYNOS4210_CPUFREQ
 	bool "SAMSUNG EXYNOS4210"
@@ -58,7 +56,6 @@
 	depends on SOC_EXYNOS5440
 	depends on HAVE_CLK && PM_OPP && OF
 	default y
-	select CPU_FREQ_TABLE
 	help
 	  This adds the CPUFreq driver for Samsung EXYNOS5440
 	  SoC. The nature of exynos5440 clock controller is
@@ -85,7 +82,6 @@
 	tristate "Freescale i.MX6Q cpufreq support"
 	depends on SOC_IMX6Q
 	depends on REGULATOR_ANATOP
-	select CPU_FREQ_TABLE
 	help
 	  This adds cpufreq driver support for Freescale i.MX6Q SOC.
 
@@ -101,7 +97,6 @@
 
 config ARM_KIRKWOOD_CPUFREQ
 	def_bool ARCH_KIRKWOOD && OF
-	select CPU_FREQ_TABLE
 	help
 	  This adds the CPUFreq driver for Marvell Kirkwood
 	  SoCs.
@@ -110,7 +105,6 @@
 	bool "TI OMAP2+"
 	depends on ARCH_OMAP2PLUS
 	default ARCH_OMAP2PLUS
-	select CPU_FREQ_TABLE
 
 config ARM_S3C_CPUFREQ
 	bool
@@ -165,7 +159,6 @@
 config ARM_S3C2416_CPUFREQ
 	bool "S3C2416 CPU Frequency scaling support"
 	depends on CPU_S3C2416
-	select CPU_FREQ_TABLE
 	help
 	  This adds the CPUFreq driver for the Samsung S3C2416 and
 	  S3C2450 SoC. The S3C2416 supports changing the rate of the
@@ -196,7 +189,6 @@
 config ARM_S3C64XX_CPUFREQ
 	bool "Samsung S3C64XX"
 	depends on CPU_S3C6410
-	select CPU_FREQ_TABLE
 	default y
 	help
 	  This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -206,7 +198,6 @@
 config ARM_S5PV210_CPUFREQ
 	bool "Samsung S5PV210 and S5PC110"
 	depends on CPU_S5PV210
-	select CPU_FREQ_TABLE
 	default y
 	help
 	  This adds the CPUFreq driver for Samsung S5PV210 and
@@ -223,7 +214,6 @@
 config ARM_SPEAR_CPUFREQ
 	bool "SPEAr CPUFreq support"
 	depends on PLAT_SPEAR
-	select CPU_FREQ_TABLE
 	default y
 	help
 	  This adds the CPUFreq driver support for SPEAr SOCs.
@@ -231,7 +221,14 @@
 config ARM_TEGRA_CPUFREQ
 	bool "TEGRA CPUFreq support"
 	depends on ARCH_TEGRA
-	select CPU_FREQ_TABLE
 	default y
 	help
 	  This adds the CPUFreq driver support for TEGRA SOCs.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+        tristate "Versatile Express SPC based CPUfreq driver"
+        select ARM_BIG_LITTLE_CPUFREQ
+        depends on ARCH_VEXPRESS_SPC
+        help
+          This add the CPUfreq driver support for Versatile Express
+	  big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 25ca9db..ca0021a 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,7 +1,6 @@
 config CPU_FREQ_CBE
 	tristate "CBE frequency scaling"
 	depends on CBE_RAS && PPC_CELL
-	select CPU_FREQ_TABLE
 	default m
 	help
 	  This adds the cpufreq driver for Cell BE processors.
@@ -20,7 +19,6 @@
 config CPU_FREQ_MAPLE
 	bool "Support for Maple 970FX Evaluation Board"
 	depends on PPC_MAPLE
-	select CPU_FREQ_TABLE
 	help
 	  This adds support for frequency switching on Maple 970FX
 	  Evaluation Board and compatible boards (IBM JS2x blades).
@@ -28,7 +26,6 @@
 config PPC_CORENET_CPUFREQ
 	tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
 	depends on PPC_E500MC && OF && COMMON_CLK
-	select CPU_FREQ_TABLE
 	select CLK_PPC_CORENET
 	help
 	  This adds the CPUFreq driver support for Freescale e500mc,
@@ -38,7 +35,6 @@
 config CPU_FREQ_PMAC
 	bool "Support for Apple PowerBooks"
 	depends on ADB_PMU && PPC32
-	select CPU_FREQ_TABLE
 	help
 	  This adds support for frequency switching on Apple PowerBooks,
 	  this currently includes some models of iBook & Titanium
@@ -47,7 +43,6 @@
 config CPU_FREQ_PMAC64
 	bool "Support for some Apple G5s"
 	depends on PPC_PMAC && PPC64
-	select CPU_FREQ_TABLE
 	help
 	  This adds support for frequency switching on Apple iMac G5,
 	  and some of the more recent desktop G5 machines as well.
@@ -55,7 +50,6 @@
 config PPC_PASEMI_CPUFREQ
 	bool "Support for PA Semi PWRficient"
 	depends on PPC_PASEMI
-	select CPU_FREQ_TABLE
 	default y
 	help
 	  This adds the support for frequency switching on PA Semi
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index e2b6eab..d369349 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -31,7 +31,6 @@
 
 config X86_ACPI_CPUFREQ
 	tristate "ACPI Processor P-States driver"
-	select CPU_FREQ_TABLE
 	depends on ACPI_PROCESSOR
 	help
 	  This driver adds a CPUFreq driver which utilizes the ACPI
@@ -60,7 +59,6 @@
 
 config ELAN_CPUFREQ
 	tristate "AMD Elan SC400 and SC410"
-	select CPU_FREQ_TABLE
 	depends on MELAN
 	---help---
 	  This adds the CPUFreq driver for AMD Elan SC400 and SC410
@@ -76,7 +74,6 @@
 
 config SC520_CPUFREQ
 	tristate "AMD Elan SC520"
-	select CPU_FREQ_TABLE
 	depends on MELAN
 	---help---
 	  This adds the CPUFreq driver for AMD Elan SC520 processor.
@@ -88,7 +85,6 @@
 
 config X86_POWERNOW_K6
 	tristate "AMD Mobile K6-2/K6-3 PowerNow!"
-	select CPU_FREQ_TABLE
 	depends on X86_32
 	help
 	  This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
@@ -100,7 +96,6 @@
 
 config X86_POWERNOW_K7
 	tristate "AMD Mobile Athlon/Duron PowerNow!"
-	select CPU_FREQ_TABLE
 	depends on X86_32
 	help
 	  This adds the CPUFreq driver for mobile AMD K7 mobile processors.
@@ -118,7 +113,6 @@
 
 config X86_POWERNOW_K8
 	tristate "AMD Opteron/Athlon64 PowerNow!"
-	select CPU_FREQ_TABLE
 	depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
 	help
 	  This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
@@ -132,11 +126,10 @@
 config X86_AMD_FREQ_SENSITIVITY
 	tristate "AMD frequency sensitivity feedback powersave bias"
 	depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
-	select CPU_FREQ_TABLE
 	help
 	  This adds AMD-specific powersave bias function to the ondemand
 	  governor, which allows it to make more power-conscious frequency
-	  change decisions based on feedback from hardware (availble on AMD
+	  change decisions based on feedback from hardware (available on AMD
 	  Family 16h and above).
 
 	  Hardware feedback tells software how "sensitive" to frequency changes
@@ -160,7 +153,6 @@
 
 config X86_SPEEDSTEP_CENTRINO
 	tristate "Intel Enhanced SpeedStep (deprecated)"
-	select CPU_FREQ_TABLE
 	select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
 	depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
 	help
@@ -190,7 +182,6 @@
 
 config X86_SPEEDSTEP_ICH
 	tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
-	select CPU_FREQ_TABLE
 	depends on X86_32
 	help
 	  This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -204,7 +195,6 @@
 
 config X86_SPEEDSTEP_SMI
 	tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
-	select CPU_FREQ_TABLE
 	depends on X86_32
 	help
 	  This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -217,7 +207,6 @@
 
 config X86_P4_CLOCKMOD
 	tristate "Intel Pentium 4 clock modulation"
-	select CPU_FREQ_TABLE
 	help
 	  This adds the CPUFreq driver for Intel Pentium 4 / XEON
 	  processors.  When enabled it will lower CPU temperature by skipping
@@ -259,7 +248,6 @@
 
 config X86_LONGHAUL
 	tristate "VIA Cyrix III Longhaul"
-	select CPU_FREQ_TABLE
 	depends on X86_32 && ACPI_PROCESSOR
 	help
 	  This adds the CPUFreq driver for VIA Samuel/CyrixIII,
@@ -272,7 +260,6 @@
 
 config X86_E_POWERSAVER
 	tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
-	select CPU_FREQ_TABLE
 	depends on X86_32 && ACPI_PROCESSOR
 	help
 	  This adds the CPUFreq driver for VIA C7 processors.  However, this driver
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ad5866c..7494565 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -1,5 +1,5 @@
 # CPUfreq core
-obj-$(CONFIG_CPU_FREQ)			+= cpufreq.o
+obj-$(CONFIG_CPU_FREQ)			+= cpufreq.o freq_table.o
 # CPUfreq stats
 obj-$(CONFIG_CPU_FREQ_STAT)             += cpufreq_stats.o
 
@@ -11,9 +11,6 @@
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)	+= cpufreq_conservative.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)		+= cpufreq_governor.o
 
-# CPUfreq cross-arch helpers
-obj-$(CONFIG_CPU_FREQ_TABLE)		+= freq_table.o
-
 obj-$(CONFIG_GENERIC_CPUFREQ_CPU0)	+= cpufreq-cpu0.o
 
 ##################################################################################
@@ -77,6 +74,7 @@
 obj-$(CONFIG_ARM_SA1110_CPUFREQ)	+= sa1110-cpufreq.o
 obj-$(CONFIG_ARM_SPEAR_CPUFREQ)		+= spear-cpufreq.o
 obj-$(CONFIG_ARM_TEGRA_CPUFREQ)		+= tegra-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ)	+= vexpress-spc-cpufreq.o
 
 ##################################################################################
 # PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 506fd23..caf41eb 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -424,34 +424,21 @@
 }
 
 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
-			       unsigned int target_freq, unsigned int relation)
+			       unsigned int index)
 {
 	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 	struct acpi_processor_performance *perf;
-	struct cpufreq_freqs freqs;
 	struct drv_cmd cmd;
-	unsigned int next_state = 0; /* Index into freq_table */
 	unsigned int next_perf_state = 0; /* Index into perf table */
 	int result = 0;
 
-	pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
-
 	if (unlikely(data == NULL ||
 	     data->acpi_data == NULL || data->freq_table == NULL)) {
 		return -ENODEV;
 	}
 
 	perf = data->acpi_data;
-	result = cpufreq_frequency_table_target(policy,
-						data->freq_table,
-						target_freq,
-						relation, &next_state);
-	if (unlikely(result)) {
-		result = -ENODEV;
-		goto out;
-	}
-
-	next_perf_state = data->freq_table[next_state].driver_data;
+	next_perf_state = data->freq_table[index].driver_data;
 	if (perf->state == next_perf_state) {
 		if (unlikely(data->resume)) {
 			pr_debug("Called after resume, resetting to P%d\n",
@@ -492,23 +479,17 @@
 	else
 		cmd.mask = cpumask_of(policy->cpu);
 
-	freqs.old = perf->states[perf->state].core_frequency * 1000;
-	freqs.new = data->freq_table[next_state].frequency;
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	drv_write(&cmd);
 
 	if (acpi_pstate_strict) {
-		if (!check_freqs(cmd.mask, freqs.new, data)) {
+		if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
+					data)) {
 			pr_debug("acpi_cpufreq_target failed (%d)\n",
 				policy->cpu);
 			result = -EAGAIN;
-			freqs.new = freqs.old;
 		}
 	}
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 	if (!result)
 		perf->state = next_perf_state;
 
@@ -516,15 +497,6 @@
 	return result;
 }
 
-static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
-
-	pr_debug("acpi_cpufreq_verify\n");
-
-	return cpufreq_frequency_table_verify(policy, data->freq_table);
-}
-
 static unsigned long
 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 {
@@ -837,7 +809,7 @@
 	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
 	perf->state = 0;
 
-	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+	result = cpufreq_table_validate_and_show(policy, data->freq_table);
 	if (result)
 		goto err_freqfree;
 
@@ -846,12 +818,16 @@
 
 	switch (perf->control_register.space_id) {
 	case ACPI_ADR_SPACE_SYSTEM_IO:
-		/* Current speed is unknown and not detectable by IO port */
+		/*
+		 * The core will not set policy->cur, because
+		 * cpufreq_driver->get is NULL, so we need to set it here.
+		 * However, we have to guess it, because the current speed is
+		 * unknown and not detectable via IO ports.
+		 */
 		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
 		break;
 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
 		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
-		policy->cur = get_cur_freq_on_cpu(cpu);
 		break;
 	default:
 		break;
@@ -868,8 +844,6 @@
 			(u32) perf->states[i].power,
 			(u32) perf->states[i].transition_latency);
 
-	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
 	/*
 	 * the first call to ->target() should result in us actually
 	 * writing something to the appropriate registers.
@@ -929,8 +903,8 @@
 };
 
 static struct cpufreq_driver acpi_cpufreq_driver = {
-	.verify		= acpi_cpufreq_verify,
-	.target		= acpi_cpufreq_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= acpi_cpufreq_target,
 	.bios_limit	= acpi_processor_get_bios_limit,
 	.init		= acpi_cpufreq_cpu_init,
 	.exit		= acpi_cpufreq_cpu_exit,
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 3549f07..5519933 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -24,110 +24,323 @@
 #include <linux/cpufreq.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
+#include <linux/mutex.h>
 #include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/slab.h>
 #include <linux/topology.h>
 #include <linux/types.h>
+#include <asm/bL_switcher.h>
 
 #include "arm_big_little.h"
 
 /* Currently we support only two clusters */
+#define A15_CLUSTER	0
+#define A7_CLUSTER	1
 #define MAX_CLUSTERS	2
 
+#ifdef CONFIG_BL_SWITCHER
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled()	bL_switching_enabled
+#define set_switching_enabled(x)	(bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled()	false
+#define set_switching_enabled(x)	do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq)  ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq)    ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
 static struct cpufreq_arm_bL_ops *arm_bL_ops;
 static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
-static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
 
-static unsigned int bL_cpufreq_get(unsigned int cpu)
+static unsigned int clk_big_min;	/* (Big) clock frequencies */
+static unsigned int clk_little_max;	/* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
 {
-	u32 cur_cluster = cpu_to_cluster(cpu);
-
-	return clk_get_rate(clk[cur_cluster]) / 1000;
+	return topology_physical_package_id(cpu);
 }
 
-/* Validate policy frequency range */
-static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
+static inline int cpu_to_cluster(int cpu)
 {
-	u32 cur_cluster = cpu_to_cluster(policy->cpu);
+	return is_bL_switching_enabled() ?
+		MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
+}
 
-	return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+	int j;
+	u32 max_freq = 0, cpu_freq;
+
+	for_each_online_cpu(j) {
+		cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+		if ((cluster == per_cpu(physical_cluster, j)) &&
+				(max_freq < cpu_freq))
+			max_freq = cpu_freq;
+	}
+
+	pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
+			max_freq);
+
+	return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+	u32 cur_cluster = per_cpu(physical_cluster, cpu);
+	u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+	/* For switcher we use virtual A7 clock rates */
+	if (is_bL_switching_enabled())
+		rate = VIRT_FREQ(cur_cluster, rate);
+
+	pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
+			cur_cluster, rate);
+
+	return rate;
+}
+
+static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
+{
+	if (is_bL_switching_enabled()) {
+		pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
+					cpu));
+
+		return per_cpu(cpu_last_req_freq, cpu);
+	} else {
+		return clk_get_cpu_rate(cpu);
+	}
+}
+
+static unsigned int
+bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+	u32 new_rate, prev_rate;
+	int ret;
+	bool bLs = is_bL_switching_enabled();
+
+	mutex_lock(&cluster_lock[new_cluster]);
+
+	if (bLs) {
+		prev_rate = per_cpu(cpu_last_req_freq, cpu);
+		per_cpu(cpu_last_req_freq, cpu) = rate;
+		per_cpu(physical_cluster, cpu) = new_cluster;
+
+		new_rate = find_cluster_maxfreq(new_cluster);
+		new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+	} else {
+		new_rate = rate;
+	}
+
+	pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
+			__func__, cpu, old_cluster, new_cluster, new_rate);
+
+	ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+	if (WARN_ON(ret)) {
+		pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
+				new_cluster);
+		if (bLs) {
+			per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+			per_cpu(physical_cluster, cpu) = old_cluster;
+		}
+
+		mutex_unlock(&cluster_lock[new_cluster]);
+
+		return ret;
+	}
+
+	mutex_unlock(&cluster_lock[new_cluster]);
+
+	/* Recalc freq for old cluster when switching clusters */
+	if (old_cluster != new_cluster) {
+		pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
+				__func__, cpu, old_cluster, new_cluster);
+
+		/* Switch cluster */
+		bL_switch_request(cpu, new_cluster);
+
+		mutex_lock(&cluster_lock[old_cluster]);
+
+		/* Set freq of old cluster if there are cpus left on it */
+		new_rate = find_cluster_maxfreq(old_cluster);
+		new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+		if (new_rate) {
+			pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
+					__func__, old_cluster, new_rate);
+
+			if (clk_set_rate(clk[old_cluster], new_rate * 1000))
+				pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+						__func__, ret, old_cluster);
+		}
+		mutex_unlock(&cluster_lock[old_cluster]);
+	}
+
+	return 0;
 }
 
 /* Set clock frequency */
 static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
-		unsigned int target_freq, unsigned int relation)
+		unsigned int index)
 {
-	struct cpufreq_freqs freqs;
-	u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
-	int ret = 0;
+	u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+	unsigned int freqs_new;
 
-	cur_cluster = cpu_to_cluster(policy->cpu);
+	cur_cluster = cpu_to_cluster(cpu);
+	new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
 
-	freqs.old = bL_cpufreq_get(policy->cpu);
+	freqs_new = freq_table[cur_cluster][index].frequency;
 
-	/* Determine valid target frequency using freq_table */
-	cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
-			target_freq, relation, &freq_tab_idx);
-	freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
-
-	pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
-			__func__, cpu, cur_cluster, freqs.old, target_freq,
-			freqs.new);
-
-	if (freqs.old == freqs.new)
-		return 0;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-	ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
-	if (ret) {
-		pr_err("clk_set_rate failed: %d\n", ret);
-		freqs.new = freqs.old;
+	if (is_bL_switching_enabled()) {
+		if ((actual_cluster == A15_CLUSTER) &&
+				(freqs_new < clk_big_min)) {
+			new_cluster = A7_CLUSTER;
+		} else if ((actual_cluster == A7_CLUSTER) &&
+				(freqs_new > clk_little_max)) {
+			new_cluster = A15_CLUSTER;
+		}
 	}
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+	return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+}
 
-	return ret;
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+	int count;
+
+	for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+		;
+
+	return count;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+	int i;
+	uint32_t min_freq = ~0;
+	for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+		if (table[i].frequency < min_freq)
+			min_freq = table[i].frequency;
+	return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+	int i;
+	uint32_t max_freq = 0;
+	for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+		if (table[i].frequency > max_freq)
+			max_freq = table[i].frequency;
+	return max_freq;
+}
+
+static int merge_cluster_tables(void)
+{
+	int i, j, k = 0, count = 1;
+	struct cpufreq_frequency_table *table;
+
+	for (i = 0; i < MAX_CLUSTERS; i++)
+		count += get_table_count(freq_table[i]);
+
+	table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	freq_table[MAX_CLUSTERS] = table;
+
+	/* Add in reverse order to get freqs in increasing order */
+	for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
+		for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+				j++) {
+			table[k].frequency = VIRT_FREQ(i,
+					freq_table[i][j].frequency);
+			pr_debug("%s: index: %d, freq: %d\n", __func__, k,
+					table[k].frequency);
+			k++;
+		}
+	}
+
+	table[k].driver_data = k;
+	table[k].frequency = CPUFREQ_TABLE_END;
+
+	pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
+
+	return 0;
+}
+
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+	u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+	if (!freq_table[cluster])
+		return;
+
+	clk_put(clk[cluster]);
+	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+	dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
 }
 
 static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
 {
 	u32 cluster = cpu_to_cluster(cpu_dev->id);
+	int i;
 
-	if (!atomic_dec_return(&cluster_usage[cluster])) {
-		clk_put(clk[cluster]);
-		opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-		dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
+	if (atomic_dec_return(&cluster_usage[cluster]))
+		return;
+
+	if (cluster < MAX_CLUSTERS)
+		return _put_cluster_clk_and_freq_table(cpu_dev);
+
+	for_each_present_cpu(i) {
+		struct device *cdev = get_cpu_device(i);
+		if (!cdev) {
+			pr_err("%s: failed to get cpu%d device\n", __func__, i);
+			return;
+		}
+
+		_put_cluster_clk_and_freq_table(cdev);
 	}
+
+	/* free virtual table */
+	kfree(freq_table[cluster]);
 }
 
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
 {
-	u32 cluster = cpu_to_cluster(cpu_dev->id);
+	u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
 	char name[14] = "cpu-cluster.";
 	int ret;
 
-	if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+	if (freq_table[cluster])
 		return 0;
 
 	ret = arm_bL_ops->init_opp_table(cpu_dev);
 	if (ret) {
 		dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
 				__func__, cpu_dev->id, ret);
-		goto atomic_dec;
+		goto out;
 	}
 
-	ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
 	if (ret) {
 		dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
 				__func__, cpu_dev->id, ret);
-		goto atomic_dec;
+		goto out;
 	}
 
 	name[12] = cluster + '0';
-	clk[cluster] = clk_get_sys(name, NULL);
+	clk[cluster] = clk_get(cpu_dev, name);
 	if (!IS_ERR(clk[cluster])) {
 		dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
 				__func__, clk[cluster], freq_table[cluster],
@@ -138,15 +351,74 @@
 	dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
 			__func__, cpu_dev->id, cluster);
 	ret = PTR_ERR(clk[cluster]);
-	opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
 
-atomic_dec:
-	atomic_dec(&cluster_usage[cluster]);
+out:
 	dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
 			cluster);
 	return ret;
 }
 
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+	u32 cluster = cpu_to_cluster(cpu_dev->id);
+	int i, ret;
+
+	if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+		return 0;
+
+	if (cluster < MAX_CLUSTERS) {
+		ret = _get_cluster_clk_and_freq_table(cpu_dev);
+		if (ret)
+			atomic_dec(&cluster_usage[cluster]);
+		return ret;
+	}
+
+	/*
+	 * Get data for all clusters and fill virtual cluster with a merge of
+	 * both
+	 */
+	for_each_present_cpu(i) {
+		struct device *cdev = get_cpu_device(i);
+		if (!cdev) {
+			pr_err("%s: failed to get cpu%d device\n", __func__, i);
+			return -ENODEV;
+		}
+
+		ret = _get_cluster_clk_and_freq_table(cdev);
+		if (ret)
+			goto put_clusters;
+	}
+
+	ret = merge_cluster_tables();
+	if (ret)
+		goto put_clusters;
+
+	/* Assuming 2 cluster, set clk_big_min and clk_little_max */
+	clk_big_min = get_table_min(freq_table[0]);
+	clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
+
+	pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
+			__func__, cluster, clk_big_min, clk_little_max);
+
+	return 0;
+
+put_clusters:
+	for_each_present_cpu(i) {
+		struct device *cdev = get_cpu_device(i);
+		if (!cdev) {
+			pr_err("%s: failed to get cpu%d device\n", __func__, i);
+			return -ENODEV;
+		}
+
+		_put_cluster_clk_and_freq_table(cdev);
+	}
+
+	atomic_dec(&cluster_usage[cluster]);
+
+	return ret;
+}
+
 /* Per-CPU initialization */
 static int bL_cpufreq_init(struct cpufreq_policy *policy)
 {
@@ -165,7 +437,7 @@
 	if (ret)
 		return ret;
 
-	ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+	ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
 	if (ret) {
 		dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
 				policy->cpu, cur_cluster);
@@ -173,7 +445,14 @@
 		return ret;
 	}
 
-	cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+	if (cur_cluster < MAX_CLUSTERS) {
+		cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+		per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+	} else {
+		/* Assumption: during init, we are always running on A15 */
+		per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+	}
 
 	if (arm_bL_ops->get_transition_latency)
 		policy->cpuinfo.transition_latency =
@@ -181,9 +460,8 @@
 	else
 		policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 
-	policy->cur = bL_cpufreq_get(policy->cpu);
-
-	cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+	if (is_bL_switching_enabled())
+		per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
 
 	dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
 	return 0;
@@ -200,33 +478,60 @@
 		return -ENODEV;
 	}
 
+	cpufreq_frequency_table_put_attr(policy->cpu);
 	put_cluster_clk_and_freq_table(cpu_dev);
 	dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
 
 	return 0;
 }
 
-/* Export freq_table to sysfs */
-static struct freq_attr *bL_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver bL_cpufreq_driver = {
 	.name			= "arm-big-little",
-	.flags			= CPUFREQ_STICKY,
-	.verify			= bL_cpufreq_verify_policy,
-	.target			= bL_cpufreq_set_target,
-	.get			= bL_cpufreq_get,
+	.flags			= CPUFREQ_STICKY |
+					CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+	.verify			= cpufreq_generic_frequency_table_verify,
+	.target_index		= bL_cpufreq_set_target,
+	.get			= bL_cpufreq_get_rate,
 	.init			= bL_cpufreq_init,
 	.exit			= bL_cpufreq_exit,
-	.have_governor_per_policy = true,
-	.attr			= bL_cpufreq_attr,
+	.attr			= cpufreq_generic_attr,
+};
+
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+					unsigned long action, void *_arg)
+{
+	pr_debug("%s: action: %ld\n", __func__, action);
+
+	switch (action) {
+	case BL_NOTIFY_PRE_ENABLE:
+	case BL_NOTIFY_PRE_DISABLE:
+		cpufreq_unregister_driver(&bL_cpufreq_driver);
+		break;
+
+	case BL_NOTIFY_POST_ENABLE:
+		set_switching_enabled(true);
+		cpufreq_register_driver(&bL_cpufreq_driver);
+		break;
+
+	case BL_NOTIFY_POST_DISABLE:
+		set_switching_enabled(false);
+		cpufreq_register_driver(&bL_cpufreq_driver);
+		break;
+
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+	.notifier_call = bL_cpufreq_switcher_notifier,
 };
 
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
 {
-	int ret;
+	int ret, i;
 
 	if (arm_bL_ops) {
 		pr_debug("%s: Already registered: %s, exiting\n", __func__,
@@ -241,16 +546,29 @@
 
 	arm_bL_ops = ops;
 
+	ret = bL_switcher_get_enabled();
+	set_switching_enabled(ret);
+
+	for (i = 0; i < MAX_CLUSTERS; i++)
+		mutex_init(&cluster_lock[i]);
+
 	ret = cpufreq_register_driver(&bL_cpufreq_driver);
 	if (ret) {
 		pr_info("%s: Failed registering platform driver: %s, err: %d\n",
 				__func__, ops->name, ret);
 		arm_bL_ops = NULL;
 	} else {
-		pr_info("%s: Registered platform driver: %s\n", __func__,
-				ops->name);
+		ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+		if (ret) {
+			cpufreq_unregister_driver(&bL_cpufreq_driver);
+			arm_bL_ops = NULL;
+		} else {
+			pr_info("%s: Registered platform driver: %s\n",
+					__func__, ops->name);
+		}
 	}
 
+	bL_switcher_put_enabled();
 	return ret;
 }
 EXPORT_SYMBOL_GPL(bL_cpufreq_register);
@@ -263,7 +581,10 @@
 		return;
 	}
 
+	bL_switcher_get_enabled();
+	bL_switcher_unregister_notifier(&bL_switcher_notifier);
 	cpufreq_unregister_driver(&bL_cpufreq_driver);
+	bL_switcher_put_enabled();
 	pr_info("%s: Un-registered platform driver: %s\n", __func__,
 			arm_bL_ops->name);
 	arm_bL_ops = NULL;
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 79b2ce1..70f18fc 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,11 +34,6 @@
 	int (*init_opp_table)(struct device *cpu_dev);
 };
 
-static inline int cpu_to_cluster(int cpu)
-{
-	return topology_physical_package_id(cpu);
-}
-
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
 void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
 
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 480c0bd..8d9d591 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -24,7 +24,7 @@
 #include <linux/export.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index e0c38d9..856ad80 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -19,18 +19,10 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/slab.h>
 
 static struct clk *cpuclk;
-
-static int at32_verify_speed(struct cpufreq_policy *policy)
-{
-	if (policy->cpu != 0)
-		return -EINVAL;
-
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-			policy->cpuinfo.max_freq);
-	return 0;
-}
+static struct cpufreq_frequency_table *freq_table;
 
 static unsigned int at32_get_speed(unsigned int cpu)
 {
@@ -43,74 +35,94 @@
 static unsigned int	ref_freq;
 static unsigned long	loops_per_jiffy_ref;
 
-static int at32_set_target(struct cpufreq_policy *policy,
-			  unsigned int target_freq,
-			  unsigned int relation)
+static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	struct cpufreq_freqs freqs;
-	long freq;
+	unsigned int old_freq, new_freq;
 
-	/* Convert target_freq from kHz to Hz */
-	freq = clk_round_rate(cpuclk, target_freq * 1000);
-
-	/* Check if policy->min <= new_freq <= policy->max */
-	if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
-		return -EINVAL;
-
-	pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
-
-	freqs.old = at32_get_speed(0);
-	freqs.new = (freq + 500) / 1000;
-	freqs.flags = 0;
+	old_freq = at32_get_speed(0);
+	new_freq = freq_table[index].frequency;
 
 	if (!ref_freq) {
-		ref_freq = freqs.old;
+		ref_freq = old_freq;
 		loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
 	}
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-	if (freqs.old < freqs.new)
+	if (old_freq < new_freq)
 		boot_cpu_data.loops_per_jiffy = cpufreq_scale(
-				loops_per_jiffy_ref, ref_freq, freqs.new);
-	clk_set_rate(cpuclk, freq);
-	if (freqs.new < freqs.old)
+				loops_per_jiffy_ref, ref_freq, new_freq);
+	clk_set_rate(cpuclk, new_freq * 1000);
+	if (new_freq < old_freq)
 		boot_cpu_data.loops_per_jiffy = cpufreq_scale(
-				loops_per_jiffy_ref, ref_freq, freqs.new);
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	pr_debug("cpufreq: set frequency %lu Hz\n", freq);
+				loops_per_jiffy_ref, ref_freq, new_freq);
 
 	return 0;
 }
 
 static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
+	unsigned int frequency, rate, min_freq;
+	int retval, steps, i;
+
 	if (policy->cpu != 0)
 		return -EINVAL;
 
 	cpuclk = clk_get(NULL, "cpu");
 	if (IS_ERR(cpuclk)) {
 		pr_debug("cpufreq: could not get CPU clk\n");
-		return PTR_ERR(cpuclk);
+		retval = PTR_ERR(cpuclk);
+		goto out_err;
 	}
 
-	policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
-	policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+	min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+	frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
 	policy->cpuinfo.transition_latency = 0;
-	policy->cur = at32_get_speed(0);
-	policy->min = policy->cpuinfo.min_freq;
-	policy->max = policy->cpuinfo.max_freq;
 
-	printk("cpufreq: AT32AP CPU frequency driver\n");
+	/*
+	 * AVR32 CPU frequency rate scales in power of two between maximum and
+	 * minimum, also add space for the table end marker.
+	 *
+	 * Further validate that the frequency is usable, and append it to the
+	 * frequency table.
+	 */
+	steps = fls(frequency / min_freq) + 1;
+	freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
+			GFP_KERNEL);
+	if (!freq_table) {
+		retval = -ENOMEM;
+		goto out_err_put_clk;
+	}
 
-	return 0;
+	for (i = 0; i < (steps - 1); i++) {
+		rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
+
+		if (rate != frequency)
+			freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+		else
+			freq_table[i].frequency = frequency;
+
+		frequency /= 2;
+	}
+
+	freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
+
+	retval = cpufreq_table_validate_and_show(policy, freq_table);
+	if (!retval) {
+		printk("cpufreq: AT32AP CPU frequency driver\n");
+		return 0;
+	}
+
+	kfree(freq_table);
+out_err_put_clk:
+	clk_put(cpuclk);
+out_err:
+	return retval;
 }
 
 static struct cpufreq_driver at32_driver = {
 	.name		= "at32ap",
 	.init		= at32_cpufreq_driver_init,
-	.verify		= at32_verify_speed,
-	.target		= at32_set_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= at32_set_target,
 	.get		= at32_get_speed,
 	.flags		= CPUFREQ_STICKY,
 };
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index ef05978..e9e63fc 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -127,41 +127,28 @@
 }
 #endif
 
-static int bfin_target(struct cpufreq_policy *policy,
-			unsigned int target_freq, unsigned int relation)
+static int bfin_target(struct cpufreq_policy *policy, unsigned int index)
 {
 #ifndef CONFIG_BF60x
 	unsigned int plldiv;
 #endif
-	unsigned int index;
-	unsigned long cclk_hz;
-	struct cpufreq_freqs freqs;
 	static unsigned long lpj_ref;
 	static unsigned int  lpj_ref_freq;
+	unsigned int old_freq, new_freq;
 	int ret = 0;
 
 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
 	cycles_t cycles;
 #endif
 
-	if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq,
-				relation, &index))
-		return -EINVAL;
+	old_freq = bfin_getfreq_khz(0);
+	new_freq = bfin_freq_table[index].frequency;
 
-	cclk_hz = bfin_freq_table[index].frequency;
-
-	freqs.old = bfin_getfreq_khz(0);
-	freqs.new = cclk_hz;
-
-	pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
-			cclk_hz, target_freq, freqs.old);
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 #ifndef CONFIG_BF60x
 	plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
 	bfin_write_PLL_DIV(plldiv);
 #else
-	ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
+	ret = cpu_set_cclk(policy->cpu, new_freq * 1000);
 	if (ret != 0) {
 		WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
 		return ret;
@@ -177,25 +164,16 @@
 #endif
 	if (!lpj_ref_freq) {
 		lpj_ref = loops_per_jiffy;
-		lpj_ref_freq = freqs.old;
+		lpj_ref_freq = old_freq;
 	}
-	if (freqs.new != freqs.old) {
+	if (new_freq != old_freq) {
 		loops_per_jiffy = cpufreq_scale(lpj_ref,
-				lpj_ref_freq, freqs.new);
+				lpj_ref_freq, new_freq);
 	}
 
-	/* TODO: just test case for cycles clock source, remove later */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	pr_debug("cpufreq: done\n");
 	return ret;
 }
 
-static int bfin_verify_speed(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, bfin_freq_table);
-}
-
 static int __bfin_cpu_init(struct cpufreq_policy *policy)
 {
 
@@ -209,23 +187,17 @@
 
 	policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
 
-	policy->cur = cclk;
-	cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
-	return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
+	return cpufreq_table_validate_and_show(policy, bfin_freq_table);
 }
 
-static struct freq_attr *bfin_freq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver bfin_driver = {
-	.verify = bfin_verify_speed,
-	.target = bfin_target,
+	.verify = cpufreq_generic_frequency_table_verify,
+	.target_index = bfin_target,
 	.get = bfin_getfreq_khz,
 	.init = __bfin_cpu_init,
+	.exit = cpufreq_generic_exit,
 	.name = "bfin cpufreq",
-	.attr = bfin_freq_attr,
+	.attr = cpufreq_generic_attr,
 };
 
 static int __init bfin_cpu_init(void)
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index c522a95..d4585ce 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -17,7 +17,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
@@ -30,73 +30,51 @@
 static struct regulator *cpu_reg;
 static struct cpufreq_frequency_table *freq_table;
 
-static int cpu0_verify_speed(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int cpu0_get_speed(unsigned int cpu)
 {
 	return clk_get_rate(cpu_clk) / 1000;
 }
 
-static int cpu0_set_target(struct cpufreq_policy *policy,
-			   unsigned int target_freq, unsigned int relation)
+static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	struct cpufreq_freqs freqs;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	unsigned long volt = 0, volt_old = 0, tol = 0;
+	unsigned int old_freq, new_freq;
 	long freq_Hz, freq_exact;
-	unsigned int index;
 	int ret;
 
-	ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
-					     relation, &index);
-	if (ret) {
-		pr_err("failed to match target freqency %d: %d\n",
-		       target_freq, ret);
-		return ret;
-	}
-
 	freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
 	if (freq_Hz < 0)
 		freq_Hz = freq_table[index].frequency * 1000;
+
 	freq_exact = freq_Hz;
-	freqs.new = freq_Hz / 1000;
-	freqs.old = clk_get_rate(cpu_clk) / 1000;
-
-	if (freqs.old == freqs.new)
-		return 0;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+	new_freq = freq_Hz / 1000;
+	old_freq = clk_get_rate(cpu_clk) / 1000;
 
 	if (!IS_ERR(cpu_reg)) {
 		rcu_read_lock();
-		opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
+		opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
 		if (IS_ERR(opp)) {
 			rcu_read_unlock();
 			pr_err("failed to find OPP for %ld\n", freq_Hz);
-			freqs.new = freqs.old;
-			ret = PTR_ERR(opp);
-			goto post_notify;
+			return PTR_ERR(opp);
 		}
-		volt = opp_get_voltage(opp);
+		volt = dev_pm_opp_get_voltage(opp);
 		rcu_read_unlock();
 		tol = volt * voltage_tolerance / 100;
 		volt_old = regulator_get_voltage(cpu_reg);
 	}
 
 	pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
-		 freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
-		 freqs.new / 1000, volt ? volt / 1000 : -1);
+		 old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+		 new_freq / 1000, volt ? volt / 1000 : -1);
 
 	/* scaling up?  scale voltage before frequency */
-	if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
+	if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
 		ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
 		if (ret) {
 			pr_err("failed to scale voltage up: %d\n", ret);
-			freqs.new = freqs.old;
-			goto post_notify;
+			return ret;
 		}
 	}
 
@@ -105,72 +83,35 @@
 		pr_err("failed to set clock rate: %d\n", ret);
 		if (!IS_ERR(cpu_reg))
 			regulator_set_voltage_tol(cpu_reg, volt_old, tol);
-		freqs.new = freqs.old;
-		goto post_notify;
+		return ret;
 	}
 
 	/* scaling down?  scale voltage after frequency */
-	if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
+	if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
 		ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
 		if (ret) {
 			pr_err("failed to scale voltage down: %d\n", ret);
-			clk_set_rate(cpu_clk, freqs.old * 1000);
-			freqs.new = freqs.old;
+			clk_set_rate(cpu_clk, old_freq * 1000);
 		}
 	}
 
-post_notify:
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 	return ret;
 }
 
 static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
 {
-	int ret;
-
-	ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-	if (ret) {
-		pr_err("invalid frequency table: %d\n", ret);
-		return ret;
-	}
-
-	policy->cpuinfo.transition_latency = transition_latency;
-	policy->cur = clk_get_rate(cpu_clk) / 1000;
-
-	/*
-	 * The driver only supports the SMP configuartion where all processors
-	 * share the clock and voltage and clock.  Use cpufreq affected_cpus
-	 * interface to have all CPUs scaled together.
-	 */
-	cpumask_setall(policy->cpus);
-
-	cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-	return 0;
+	return cpufreq_generic_init(policy, freq_table, transition_latency);
 }
 
-static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-
-	return 0;
-}
-
-static struct freq_attr *cpu0_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver cpu0_cpufreq_driver = {
 	.flags = CPUFREQ_STICKY,
-	.verify = cpu0_verify_speed,
-	.target = cpu0_set_target,
+	.verify = cpufreq_generic_frequency_table_verify,
+	.target_index = cpu0_set_target,
 	.get = cpu0_get_speed,
 	.init = cpu0_cpufreq_init,
-	.exit = cpu0_cpufreq_exit,
+	.exit = cpufreq_generic_exit,
 	.name = "generic_cpu0",
-	.attr = cpu0_cpufreq_attr,
+	.attr = cpufreq_generic_attr,
 };
 
 static int cpu0_cpufreq_probe(struct platform_device *pdev)
@@ -218,7 +159,7 @@
 		goto out_put_node;
 	}
 
-	ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
 	if (ret) {
 		pr_err("failed to init cpufreq table: %d\n", ret);
 		goto out_put_node;
@@ -230,7 +171,7 @@
 		transition_latency = CPUFREQ_ETERNAL;
 
 	if (!IS_ERR(cpu_reg)) {
-		struct opp *opp;
+		struct dev_pm_opp *opp;
 		unsigned long min_uV, max_uV;
 		int i;
 
@@ -242,12 +183,12 @@
 		for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
 			;
 		rcu_read_lock();
-		opp = opp_find_freq_exact(cpu_dev,
+		opp = dev_pm_opp_find_freq_exact(cpu_dev,
 				freq_table[0].frequency * 1000, true);
-		min_uV = opp_get_voltage(opp);
-		opp = opp_find_freq_exact(cpu_dev,
+		min_uV = dev_pm_opp_get_voltage(opp);
+		opp = dev_pm_opp_find_freq_exact(cpu_dev,
 				freq_table[i-1].frequency * 1000, true);
-		max_uV = opp_get_voltage(opp);
+		max_uV = dev_pm_opp_get_voltage(opp);
 		rcu_read_unlock();
 		ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
 		if (ret > 0)
@@ -264,7 +205,7 @@
 	return 0;
 
 out_free_table:
-	opp_free_cpufreq_table(cpu_dev, &freq_table);
+	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 out_put_node:
 	of_node_put(np);
 	return ret;
@@ -273,7 +214,7 @@
 static int cpu0_cpufreq_remove(struct platform_device *pdev)
 {
 	cpufreq_unregister_driver(&cpu0_cpufreq_driver);
-	opp_free_cpufreq_table(cpu_dev, &freq_table);
+	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index b83d45f6..a05b876 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -303,9 +303,7 @@
 	if (policy->min < (fsb_pol_max * fid * 100))
 		policy->max = (fsb_pol_max + 1) * fid * 100;
 
-	cpufreq_verify_within_limits(policy,
-				     policy->cpuinfo.min_freq,
-				     policy->cpuinfo.max_freq);
+	cpufreq_verify_within_cpu_limits(policy);
 	return 0;
 }
 
@@ -362,7 +360,6 @@
 	policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
 	policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-	policy->cur = nforce2_get(policy->cpu);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 04548f7..02d534d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -47,49 +47,11 @@
 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
 #endif
 
-/*
- * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
- * all cpufreq/hotplug/workqueue/etc related lock issues.
- *
- * The rules for this semaphore:
- * - Any routine that wants to read from the policy structure will
- *   do a down_read on this semaphore.
- * - Any routine that will write to the policy structure and/or may take away
- *   the policy altogether (eg. CPU hotplug), will hold this lock in write
- *   mode before doing so.
- *
- * Additional rules:
- * - Governor routines that can be called in cpufreq hotplug path should not
- *   take this sem as top level hotplug notifier handler takes this.
- * - Lock should not be held across
- *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
- */
-static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
-
-#define lock_policy_rwsem(mode, cpu)					\
-static int lock_policy_rwsem_##mode(int cpu)				\
-{									\
-	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);	\
-	BUG_ON(!policy);						\
-	down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));		\
-									\
-	return 0;							\
+static inline bool has_target(void)
+{
+	return cpufreq_driver->target_index || cpufreq_driver->target;
 }
 
-lock_policy_rwsem(read, cpu);
-lock_policy_rwsem(write, cpu);
-
-#define unlock_policy_rwsem(mode, cpu)					\
-static void unlock_policy_rwsem_##mode(int cpu)				\
-{									\
-	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);	\
-	BUG_ON(!policy);						\
-	up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));		\
-}
-
-unlock_policy_rwsem(read, cpu);
-unlock_policy_rwsem(write, cpu);
-
 /*
  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
  * sections
@@ -135,7 +97,7 @@
 
 bool have_governor_per_policy(void)
 {
-	return cpufreq_driver->have_governor_per_policy;
+	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
 }
 EXPORT_SYMBOL_GPL(have_governor_per_policy);
 
@@ -183,6 +145,37 @@
 }
 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+		struct cpufreq_frequency_table *table,
+		unsigned int transition_latency)
+{
+	int ret;
+
+	ret = cpufreq_table_validate_and_show(policy, table);
+	if (ret) {
+		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+		return ret;
+	}
+
+	policy->cpuinfo.transition_latency = transition_latency;
+
+	/*
+	 * The driver only supports the SMP configuartion where all processors
+	 * share the clock and voltage and clock.
+	 */
+	cpumask_setall(policy->cpus);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 {
 	struct cpufreq_policy *policy = NULL;
@@ -363,7 +356,7 @@
 			*policy = CPUFREQ_POLICY_POWERSAVE;
 			err = 0;
 		}
-	} else if (cpufreq_driver->target) {
+	} else if (has_target()) {
 		struct cpufreq_governor *t;
 
 		mutex_lock(&cpufreq_governor_mutex);
@@ -414,7 +407,7 @@
 show_one(scaling_max_freq, max);
 show_one(scaling_cur_freq, cur);
 
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
 				struct cpufreq_policy *new_policy);
 
 /**
@@ -435,7 +428,7 @@
 	if (ret != 1)							\
 		return -EINVAL;						\
 									\
-	ret = __cpufreq_set_policy(policy, &new_policy);		\
+	ret = cpufreq_set_policy(policy, &new_policy);		\
 	policy->user_policy.object = policy->object;			\
 									\
 	return ret ? ret : count;					\
@@ -493,11 +486,7 @@
 						&new_policy.governor))
 		return -EINVAL;
 
-	/*
-	 * Do not use cpufreq_set_policy here or the user_policy.max
-	 * will be wrongly overridden
-	 */
-	ret = __cpufreq_set_policy(policy, &new_policy);
+	ret = cpufreq_set_policy(policy, &new_policy);
 
 	policy->user_policy.policy = policy->policy;
 	policy->user_policy.governor = policy->governor;
@@ -525,7 +514,7 @@
 	ssize_t i = 0;
 	struct cpufreq_governor *t;
 
-	if (!cpufreq_driver->target) {
+	if (!has_target()) {
 		i += sprintf(buf, "performance powersave");
 		goto out;
 	}
@@ -653,24 +642,21 @@
 {
 	struct cpufreq_policy *policy = to_policy(kobj);
 	struct freq_attr *fattr = to_attr(attr);
-	ssize_t ret = -EINVAL;
+	ssize_t ret;
 
 	if (!down_read_trylock(&cpufreq_rwsem))
-		goto exit;
+		return -EINVAL;
 
-	if (lock_policy_rwsem_read(policy->cpu) < 0)
-		goto up_read;
+	down_read(&policy->rwsem);
 
 	if (fattr->show)
 		ret = fattr->show(policy, buf);
 	else
 		ret = -EIO;
 
-	unlock_policy_rwsem_read(policy->cpu);
-
-up_read:
+	up_read(&policy->rwsem);
 	up_read(&cpufreq_rwsem);
-exit:
+
 	return ret;
 }
 
@@ -689,17 +675,15 @@
 	if (!down_read_trylock(&cpufreq_rwsem))
 		goto unlock;
 
-	if (lock_policy_rwsem_write(policy->cpu) < 0)
-		goto up_read;
+	down_write(&policy->rwsem);
 
 	if (fattr->store)
 		ret = fattr->store(policy, buf, count);
 	else
 		ret = -EIO;
 
-	unlock_policy_rwsem_write(policy->cpu);
+	up_write(&policy->rwsem);
 
-up_read:
 	up_read(&cpufreq_rwsem);
 unlock:
 	put_online_cpus();
@@ -815,7 +799,7 @@
 		if (ret)
 			goto err_out_kobj_put;
 	}
-	if (cpufreq_driver->target) {
+	if (has_target()) {
 		ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
 		if (ret)
 			goto err_out_kobj_put;
@@ -844,11 +828,11 @@
 	int ret = 0;
 
 	memcpy(&new_policy, policy, sizeof(*policy));
-	/* assure that the starting sequence is run in __cpufreq_set_policy */
+	/* assure that the starting sequence is run in cpufreq_set_policy */
 	policy->governor = NULL;
 
 	/* set default policy */
-	ret = __cpufreq_set_policy(policy, &new_policy);
+	ret = cpufreq_set_policy(policy, &new_policy);
 	policy->user_policy.policy = policy->policy;
 	policy->user_policy.governor = policy->governor;
 
@@ -864,10 +848,10 @@
 				  unsigned int cpu, struct device *dev,
 				  bool frozen)
 {
-	int ret = 0, has_target = !!cpufreq_driver->target;
+	int ret = 0;
 	unsigned long flags;
 
-	if (has_target) {
+	if (has_target()) {
 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 		if (ret) {
 			pr_err("%s: Failed to stop governor\n", __func__);
@@ -875,7 +859,7 @@
 		}
 	}
 
-	lock_policy_rwsem_write(policy->cpu);
+	down_write(&policy->rwsem);
 
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 
@@ -883,9 +867,9 @@
 	per_cpu(cpufreq_cpu_data, cpu) = policy;
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-	unlock_policy_rwsem_write(policy->cpu);
+	up_write(&policy->rwsem);
 
-	if (has_target) {
+	if (has_target()) {
 		if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
 			(ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
 			pr_err("%s: Failed to start governor\n", __func__);
@@ -930,6 +914,8 @@
 		goto err_free_cpumask;
 
 	INIT_LIST_HEAD(&policy->policy_list);
+	init_rwsem(&policy->rwsem);
+
 	return policy;
 
 err_free_cpumask:
@@ -949,26 +935,17 @@
 
 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 {
-	if (cpu == policy->cpu)
+	if (WARN_ON(cpu == policy->cpu))
 		return;
 
-	/*
-	 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
-	 * Also lock for last cpu is enough here as contention will happen only
-	 * after policy->cpu is changed and after it is changed, other threads
-	 * will try to acquire lock for new cpu. And policy is already updated
-	 * by then.
-	 */
-	down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
+	down_write(&policy->rwsem);
 
 	policy->last_cpu = policy->cpu;
 	policy->cpu = cpu;
 
-	up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
+	up_write(&policy->rwsem);
 
-#ifdef CONFIG_CPU_FREQ_TABLE
 	cpufreq_frequency_table_update_policy_cpu(policy);
-#endif
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 			CPUFREQ_UPDATE_POLICY_CPU, policy);
 }
@@ -1053,6 +1030,14 @@
 		goto err_set_policy_cpu;
 	}
 
+	if (cpufreq_driver->get) {
+		policy->cur = cpufreq_driver->get(policy->cpu);
+		if (!policy->cur) {
+			pr_err("%s: ->get() failed\n", __func__);
+			goto err_get_freq;
+		}
+	}
+
 	/* related cpus should atleast have policy->cpus */
 	cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
 
@@ -1107,6 +1092,9 @@
 		per_cpu(cpufreq_cpu_data, j) = NULL;
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+err_get_freq:
+	if (cpufreq_driver->exit)
+		cpufreq_driver->exit(policy);
 err_set_policy_cpu:
 	cpufreq_policy_free(policy);
 nomem_out:
@@ -1147,9 +1135,9 @@
 	if (ret) {
 		pr_err("%s: Failed to move kobj: %d", __func__, ret);
 
-		WARN_ON(lock_policy_rwsem_write(old_cpu));
+		down_write(&policy->rwsem);
 		cpumask_set_cpu(old_cpu, policy->cpus);
-		unlock_policy_rwsem_write(old_cpu);
+		up_write(&policy->rwsem);
 
 		ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
 					"cpufreq");
@@ -1186,7 +1174,7 @@
 		return -EINVAL;
 	}
 
-	if (cpufreq_driver->target) {
+	if (has_target()) {
 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 		if (ret) {
 			pr_err("%s: Failed to stop governor\n", __func__);
@@ -1200,22 +1188,21 @@
 			policy->governor->name, CPUFREQ_NAME_LEN);
 #endif
 
-	lock_policy_rwsem_read(cpu);
+	down_read(&policy->rwsem);
 	cpus = cpumask_weight(policy->cpus);
-	unlock_policy_rwsem_read(cpu);
+	up_read(&policy->rwsem);
 
 	if (cpu != policy->cpu) {
 		if (!frozen)
 			sysfs_remove_link(&dev->kobj, "cpufreq");
 	} else if (cpus > 1) {
-
 		new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
 		if (new_cpu >= 0) {
 			update_policy_cpu(policy, new_cpu);
 
 			if (!frozen) {
-				pr_debug("%s: policy Kobject moved to cpu: %d "
-					 "from: %d\n",__func__, new_cpu, cpu);
+				pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+						__func__, new_cpu, cpu);
 			}
 		}
 	}
@@ -1243,16 +1230,16 @@
 		return -EINVAL;
 	}
 
-	WARN_ON(lock_policy_rwsem_write(cpu));
+	down_write(&policy->rwsem);
 	cpus = cpumask_weight(policy->cpus);
 
 	if (cpus > 1)
 		cpumask_clear_cpu(cpu, policy->cpus);
-	unlock_policy_rwsem_write(cpu);
+	up_write(&policy->rwsem);
 
 	/* If cpu is last user of policy, free policy */
 	if (cpus == 1) {
-		if (cpufreq_driver->target) {
+		if (has_target()) {
 			ret = __cpufreq_governor(policy,
 					CPUFREQ_GOV_POLICY_EXIT);
 			if (ret) {
@@ -1263,10 +1250,10 @@
 		}
 
 		if (!frozen) {
-			lock_policy_rwsem_read(cpu);
+			down_read(&policy->rwsem);
 			kobj = &policy->kobj;
 			cmp = &policy->kobj_unregister;
-			unlock_policy_rwsem_read(cpu);
+			up_read(&policy->rwsem);
 			kobject_put(kobj);
 
 			/*
@@ -1295,7 +1282,7 @@
 		if (!frozen)
 			cpufreq_policy_free(policy);
 	} else {
-		if (cpufreq_driver->target) {
+		if (has_target()) {
 			if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
 					(ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
 				pr_err("%s: Failed to start governor\n",
@@ -1310,36 +1297,24 @@
 }
 
 /**
- * __cpufreq_remove_dev - remove a CPU device
+ * cpufreq_remove_dev - remove a CPU device
  *
  * Removes the cpufreq interface for a CPU device.
- * Caller should already have policy_rwsem in write mode for this CPU.
- * This routine frees the rwsem before returning.
  */
-static inline int __cpufreq_remove_dev(struct device *dev,
-				       struct subsys_interface *sif,
-				       bool frozen)
-{
-	int ret;
-
-	ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
-
-	if (!ret)
-		ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
-
-	return ret;
-}
-
 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
 	unsigned int cpu = dev->id;
-	int retval;
+	int ret;
 
 	if (cpu_is_offline(cpu))
 		return 0;
 
-	retval = __cpufreq_remove_dev(dev, sif, false);
-	return retval;
+	ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+
+	if (!ret)
+		ret = __cpufreq_remove_dev_finish(dev, sif, false);
+
+	return ret;
 }
 
 static void handle_update(struct work_struct *work)
@@ -1458,22 +1433,22 @@
  */
 unsigned int cpufreq_get(unsigned int cpu)
 {
+	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 	unsigned int ret_freq = 0;
 
 	if (cpufreq_disabled() || !cpufreq_driver)
 		return -ENOENT;
 
+	BUG_ON(!policy);
+
 	if (!down_read_trylock(&cpufreq_rwsem))
 		return 0;
 
-	if (unlikely(lock_policy_rwsem_read(cpu)))
-		goto out_policy;
+	down_read(&policy->rwsem);
 
 	ret_freq = __cpufreq_get(cpu);
 
-	unlock_policy_rwsem_read(cpu);
-
-out_policy:
+	up_read(&policy->rwsem);
 	up_read(&cpufreq_rwsem);
 
 	return ret_freq;
@@ -1681,12 +1656,75 @@
 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
 			policy->cpu, target_freq, relation, old_target_freq);
 
+	/*
+	 * This might look like a redundant call as we are checking it again
+	 * after finding index. But it is left intentionally for cases where
+	 * exactly same freq is called again and so we can save on few function
+	 * calls.
+	 */
 	if (target_freq == policy->cur)
 		return 0;
 
 	if (cpufreq_driver->target)
 		retval = cpufreq_driver->target(policy, target_freq, relation);
+	else if (cpufreq_driver->target_index) {
+		struct cpufreq_frequency_table *freq_table;
+		struct cpufreq_freqs freqs;
+		bool notify;
+		int index;
 
+		freq_table = cpufreq_frequency_get_table(policy->cpu);
+		if (unlikely(!freq_table)) {
+			pr_err("%s: Unable to find freq_table\n", __func__);
+			goto out;
+		}
+
+		retval = cpufreq_frequency_table_target(policy, freq_table,
+				target_freq, relation, &index);
+		if (unlikely(retval)) {
+			pr_err("%s: Unable to find matching freq\n", __func__);
+			goto out;
+		}
+
+		if (freq_table[index].frequency == policy->cur) {
+			retval = 0;
+			goto out;
+		}
+
+		notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
+
+		if (notify) {
+			freqs.old = policy->cur;
+			freqs.new = freq_table[index].frequency;
+			freqs.flags = 0;
+
+			pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
+					__func__, policy->cpu, freqs.old,
+					freqs.new);
+
+			cpufreq_notify_transition(policy, &freqs,
+					CPUFREQ_PRECHANGE);
+		}
+
+		retval = cpufreq_driver->target_index(policy, index);
+		if (retval)
+			pr_err("%s: Failed to change cpu frequency: %d\n",
+					__func__, retval);
+
+		if (notify) {
+			/*
+			 * Notify with old freq in case we failed to change
+			 * frequency
+			 */
+			if (retval)
+				freqs.new = freqs.old;
+
+			cpufreq_notify_transition(policy, &freqs,
+					CPUFREQ_POSTCHANGE);
+		}
+	}
+
+out:
 	return retval;
 }
 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1697,14 +1735,12 @@
 {
 	int ret = -EINVAL;
 
-	if (unlikely(lock_policy_rwsem_write(policy->cpu)))
-		goto fail;
+	down_write(&policy->rwsem);
 
 	ret = __cpufreq_driver_target(policy, target_freq, relation);
 
-	unlock_policy_rwsem_write(policy->cpu);
+	up_write(&policy->rwsem);
 
-fail:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -1871,10 +1907,10 @@
 EXPORT_SYMBOL(cpufreq_get_policy);
 
 /*
- * data   : current policy.
- * policy : policy to be set.
+ * policy : current policy.
+ * new_policy: policy to be set.
  */
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
 				struct cpufreq_policy *new_policy)
 {
 	int ret = 0, failed = 1;
@@ -1934,10 +1970,10 @@
 			/* end old governor */
 			if (policy->governor) {
 				__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-				unlock_policy_rwsem_write(new_policy->cpu);
+				up_write(&policy->rwsem);
 				__cpufreq_governor(policy,
 						CPUFREQ_GOV_POLICY_EXIT);
-				lock_policy_rwsem_write(new_policy->cpu);
+				down_write(&policy->rwsem);
 			}
 
 			/* start new governor */
@@ -1946,10 +1982,10 @@
 				if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
 					failed = 0;
 				} else {
-					unlock_policy_rwsem_write(new_policy->cpu);
+					up_write(&policy->rwsem);
 					__cpufreq_governor(policy,
 							CPUFREQ_GOV_POLICY_EXIT);
-					lock_policy_rwsem_write(new_policy->cpu);
+					down_write(&policy->rwsem);
 				}
 			}
 
@@ -1995,10 +2031,7 @@
 		goto no_policy;
 	}
 
-	if (unlikely(lock_policy_rwsem_write(cpu))) {
-		ret = -EINVAL;
-		goto fail;
-	}
+	down_write(&policy->rwsem);
 
 	pr_debug("updating policy for CPU %u\n", cpu);
 	memcpy(&new_policy, policy, sizeof(*policy));
@@ -2017,17 +2050,16 @@
 			pr_debug("Driver did not initialize current freq");
 			policy->cur = new_policy.cur;
 		} else {
-			if (policy->cur != new_policy.cur && cpufreq_driver->target)
+			if (policy->cur != new_policy.cur && has_target())
 				cpufreq_out_of_sync(cpu, policy->cur,
 								new_policy.cur);
 		}
 	}
 
-	ret = __cpufreq_set_policy(policy, &new_policy);
+	ret = cpufreq_set_policy(policy, &new_policy);
 
-	unlock_policy_rwsem_write(cpu);
+	up_write(&policy->rwsem);
 
-fail:
 	cpufreq_cpu_put(policy);
 no_policy:
 	return ret;
@@ -2096,7 +2128,8 @@
 		return -ENODEV;
 
 	if (!driver_data || !driver_data->verify || !driver_data->init ||
-	    ((!driver_data->setpolicy) && (!driver_data->target)))
+	    !(driver_data->setpolicy || driver_data->target_index ||
+		    driver_data->target))
 		return -EINVAL;
 
 	pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2183,14 +2216,9 @@
 
 static int __init cpufreq_core_init(void)
 {
-	int cpu;
-
 	if (cpufreq_disabled())
 		return -ENODEV;
 
-	for_each_possible_cpu(cpu)
-		init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-
 	cpufreq_global_kobject = kobject_create();
 	BUG_ON(!cpufreq_global_kobject);
 	register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index f62d822..218460f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -80,13 +80,18 @@
 
 	/* Check for frequency decrease */
 	if (load < cs_tuners->down_threshold) {
+		unsigned int freq_target;
 		/*
 		 * if we cannot reduce the frequency anymore, break out early
 		 */
 		if (policy->cur == policy->min)
 			return;
 
-		dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
+		freq_target = get_freq_target(cs_tuners, policy);
+		if (dbs_info->requested_freq > freq_target)
+			dbs_info->requested_freq -= freq_target;
+		else
+			dbs_info->requested_freq = policy->min;
 
 		__cpufreq_driver_target(policy, dbs_info->requested_freq,
 				CPUFREQ_RELATION_L);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 88cd39f..b5f2b86 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -191,7 +191,10 @@
 	struct attribute_group *attr_group_gov_sys; /* one governor - system */
 	struct attribute_group *attr_group_gov_pol; /* one governor - policy */
 
-	/* Common data for platforms that don't set have_governor_per_policy */
+	/*
+	 * Common data for platforms that don't set
+	 * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
+	 */
 	struct dbs_data *gdbs_data;
 
 	struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 32f26f6..18d4091 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -168,7 +168,6 @@
 			dbs_info->rate_mult =
 				od_tuners->sampling_down_factor;
 		dbs_freq_increase(policy, policy->max);
-		return;
 	} else {
 		/* Calculate the next frequency proportional to load */
 		unsigned int freq_next;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 0307809..4dbf1db 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -38,18 +38,7 @@
 	if (!per_cpu(cpu_is_managed, policy->cpu))
 		goto err;
 
-	/*
-	 * We're safe from concurrent calls to ->target() here
-	 * as we hold the userspace_mutex lock. If we were calling
-	 * cpufreq_driver_target, a deadlock situation might occur:
-	 * A: cpufreq_set (lock userspace_mutex) ->
-	 *      cpufreq_driver_target(lock policy->lock)
-	 * B: cpufreq_set_policy(lock policy->lock) ->
-	 *      __cpufreq_governor ->
-	 *         cpufreq_governor_userspace (lock userspace_mutex)
-	 */
 	ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
-
  err:
 	mutex_unlock(&userspace_mutex);
 	return ret;
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index cb8276d..8655904 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -27,18 +27,11 @@
 	return clk_ctrl.pll ? 200000 : 6000;
 }
 
-static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
-		unsigned int state)
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
 {
-	struct cpufreq_freqs freqs;
 	reg_clkgen_rw_clk_ctrl clk_ctrl;
 	clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
 
-	freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
-	freqs.new = cris_freq_table[state].frequency;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	local_irq_disable();
 
 	/* Even though we may be SMP they will share the same clock
@@ -51,67 +44,22 @@
 
 	local_irq_enable();
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
-static int cris_freq_target(struct cpufreq_policy *policy,
-			    unsigned int target_freq,
-			    unsigned int relation)
-{
-	unsigned int newstate = 0;
-
-	if (cpufreq_frequency_table_target(policy, cris_freq_table,
-			target_freq, relation, &newstate))
-		return -EINVAL;
-
-	cris_freq_set_cpu_state(policy, newstate);
-
 	return 0;
 }
 
 static int cris_freq_cpu_init(struct cpufreq_policy *policy)
 {
-	int result;
-
-	/* cpuinfo and default policy values */
-	policy->cpuinfo.transition_latency = 1000000; /* 1ms */
-	policy->cur = cris_freq_get_cpu_frequency(0);
-
-	result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
-	if (result)
-		return (result);
-
-	cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
-	return 0;
+	return cpufreq_generic_init(policy, cris_freq_table, 1000000);
 }
 
-
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-
-static struct freq_attr *cris_freq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver cris_freq_driver = {
 	.get	= cris_freq_get_cpu_frequency,
-	.verify	= cris_freq_verify,
-	.target	= cris_freq_target,
+	.verify	= cpufreq_generic_frequency_table_verify,
+	.target_index = cris_freq_target,
 	.init	= cris_freq_cpu_init,
-	.exit	= cris_freq_cpu_exit,
+	.exit	= cpufreq_generic_exit,
 	.name	= "cris_freq",
-	.attr	= cris_freq_attr,
+	.attr	= cpufreq_generic_attr,
 };
 
 static int __init cris_freq_init(void)
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 72328f7..26d940d 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -27,18 +27,11 @@
 	return clk_ctrl.pll ? 200000 : 6000;
 }
 
-static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
-		unsigned int state)
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
 {
-	struct cpufreq_freqs freqs;
 	reg_config_rw_clk_ctrl clk_ctrl;
 	clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
 
-	freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
-	freqs.new = cris_freq_table[state].frequency;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	local_irq_disable();
 
 	/* Even though we may be SMP they will share the same clock
@@ -51,64 +44,22 @@
 
 	local_irq_enable();
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
-static int cris_freq_target(struct cpufreq_policy *policy,
-			    unsigned int target_freq, unsigned int relation)
-{
-	unsigned int newstate = 0;
-
-	if (cpufreq_frequency_table_target
-	    (policy, cris_freq_table, target_freq, relation, &newstate))
-		return -EINVAL;
-
-	cris_freq_set_cpu_state(policy, newstate);
-
 	return 0;
 }
 
 static int cris_freq_cpu_init(struct cpufreq_policy *policy)
 {
-	int result;
-
-	/* cpuinfo and default policy values */
-	policy->cpuinfo.transition_latency = 1000000;	/* 1ms */
-	policy->cur = cris_freq_get_cpu_frequency(0);
-
-	result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
-	if (result)
-		return (result);
-
-	cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
-	return 0;
+	return cpufreq_generic_init(policy, cris_freq_table, 1000000);
 }
 
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static struct freq_attr *cris_freq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver cris_freq_driver = {
 	.get = cris_freq_get_cpu_frequency,
-	.verify = cris_freq_verify,
-	.target = cris_freq_target,
+	.verify = cpufreq_generic_frequency_table_verify,
+	.target_index = cris_freq_target,
 	.init = cris_freq_cpu_init,
-	.exit = cris_freq_cpu_exit,
+	.exit = cpufreq_generic_exit,
 	.name = "cris_freq",
-	.attr = cris_freq_attr,
+	.attr = cpufreq_generic_attr,
 };
 
 static int __init cris_freq_init(void)
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 551dd65..5e8a854 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -50,9 +50,7 @@
 	if (policy->cpu)
 		return -EINVAL;
 
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-				     policy->cpuinfo.max_freq);
-
+	cpufreq_verify_within_cpu_limits(policy);
 	policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
 	policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -68,58 +66,38 @@
 	return clk_get_rate(cpufreq.armclk) / 1000;
 }
 
-static int davinci_target(struct cpufreq_policy *policy,
-				unsigned int target_freq, unsigned int relation)
+static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
 {
-	int ret = 0;
-	unsigned int idx;
-	struct cpufreq_freqs freqs;
 	struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
 	struct clk *armclk = cpufreq.armclk;
+	unsigned int old_freq, new_freq;
+	int ret = 0;
 
-	freqs.old = davinci_getspeed(0);
-	freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
-
-	if (freqs.old == freqs.new)
-		return ret;
-
-	dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
-
-	ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
-						freqs.new, relation, &idx);
-	if (ret)
-		return -EINVAL;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+	old_freq = davinci_getspeed(0);
+	new_freq = pdata->freq_table[idx].frequency;
 
 	/* if moving to higher frequency, up the voltage beforehand */
-	if (pdata->set_voltage && freqs.new > freqs.old) {
+	if (pdata->set_voltage && new_freq > old_freq) {
 		ret = pdata->set_voltage(idx);
 		if (ret)
-			goto out;
+			return ret;
 	}
 
 	ret = clk_set_rate(armclk, idx);
 	if (ret)
-		goto out;
+		return ret;
 
 	if (cpufreq.asyncclk) {
 		ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
 		if (ret)
-			goto out;
+			return ret;
 	}
 
 	/* if moving to lower freq, lower the voltage after lowering freq */
-	if (pdata->set_voltage && freqs.new < freqs.old)
+	if (pdata->set_voltage && new_freq < old_freq)
 		pdata->set_voltage(idx);
 
-out:
-	if (ret)
-		freqs.new = freqs.old;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	return ret;
+	return 0;
 }
 
 static int davinci_cpu_init(struct cpufreq_policy *policy)
@@ -138,47 +116,24 @@
 			return result;
 	}
 
-	policy->cur = davinci_getspeed(0);
-
-	result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-	if (result) {
-		pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
-				__func__);
-		return result;
-	}
-
-	cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
 	/*
 	 * Time measurement across the target() function yields ~1500-1800us
 	 * time taken with no drivers on notification list.
 	 * Setting the latency to 2000 us to accommodate addition of drivers
 	 * to pre/post change notification list.
 	 */
-	policy->cpuinfo.transition_latency = 2000 * 1000;
-	return 0;
+	return cpufreq_generic_init(policy, freq_table, 2000 * 1000);
 }
 
-static int davinci_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static struct freq_attr *davinci_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver davinci_driver = {
 	.flags		= CPUFREQ_STICKY,
 	.verify		= davinci_verify_speed,
-	.target		= davinci_target,
+	.target_index	= davinci_target,
 	.get		= davinci_getspeed,
 	.init		= davinci_cpu_init,
-	.exit		= davinci_cpu_exit,
+	.exit		= cpufreq_generic_exit,
 	.name		= "davinci",
-	.attr		= davinci_cpufreq_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static int __init davinci_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 26321cd..0e67ab9 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -19,51 +19,11 @@
 static struct cpufreq_frequency_table *freq_table;
 static struct clk *armss_clk;
 
-static struct freq_attr *dbx500_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
-static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
-				unsigned int target_freq,
-				unsigned int relation)
+				unsigned int index)
 {
-	struct cpufreq_freqs freqs;
-	unsigned int idx;
-	int ret;
-
-	/* Lookup the next frequency */
-	if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
-					relation, &idx))
-		return -EINVAL;
-
-	freqs.old = policy->cur;
-	freqs.new = freq_table[idx].frequency;
-
-	if (freqs.old == freqs.new)
-		return 0;
-
-	/* pre-change notification */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	/* update armss clk frequency */
-	ret = clk_set_rate(armss_clk, freqs.new * 1000);
-
-	if (ret) {
-		pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
-		       freqs.new * 1000, ret);
-		freqs.new = freqs.old;
-	}
-
-	/* post change notification */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	return ret;
+	return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
 }
 
 static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
@@ -84,43 +44,17 @@
 
 static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
 {
-	int res;
-
-	/* get policy fields based on the table */
-	res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-	if (!res)
-		cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-	else {
-		pr_err("dbx500-cpufreq: Failed to read policy table\n");
-		return res;
-	}
-
-	policy->min = policy->cpuinfo.min_freq;
-	policy->max = policy->cpuinfo.max_freq;
-	policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
-	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
-	/*
-	 * FIXME : Need to take time measurement across the target()
-	 *	   function with no/some/all drivers in the notification
-	 *	   list.
-	 */
-	policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
-	/* policy sharing between dual CPUs */
-	cpumask_setall(policy->cpus);
-
-	return 0;
+	return cpufreq_generic_init(policy, freq_table, 20 * 1000);
 }
 
 static struct cpufreq_driver dbx500_cpufreq_driver = {
 	.flags  = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
-	.verify = dbx500_cpufreq_verify_speed,
-	.target = dbx500_cpufreq_target,
+	.verify = cpufreq_generic_frequency_table_verify,
+	.target_index = dbx500_cpufreq_target,
 	.get    = dbx500_cpufreq_getspeed,
 	.init   = dbx500_cpufreq_init,
 	.name   = "DBX500",
-	.attr   = dbx500_cpufreq_attr,
+	.attr   = cpufreq_generic_attr,
 };
 
 static int dbx500_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 09f64cc..9012b8b 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -107,15 +107,9 @@
 			 struct cpufreq_policy *policy,
 			 u32 dest_state)
 {
-	struct cpufreq_freqs freqs;
 	u32 lo, hi;
-	int err = 0;
 	int i;
 
-	freqs.old = eps_get(policy->cpu);
-	freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	/* Wait while CPU is busy */
 	rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
 	i = 0;
@@ -124,8 +118,7 @@
 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
 		i++;
 		if (unlikely(i > 64)) {
-			err = -ENODEV;
-			goto postchange;
+			return -ENODEV;
 		}
 	}
 	/* Set new multiplier and voltage */
@@ -137,16 +130,10 @@
 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
 		i++;
 		if (unlikely(i > 64)) {
-			err = -ENODEV;
-			goto postchange;
+			return -ENODEV;
 		}
 	} while (lo & ((1 << 16) | (1 << 17)));
 
-	/* Return current frequency */
-postchange:
-	rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
-	freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
-
 #ifdef DEBUG
 	{
 	u8 current_multiplier, current_voltage;
@@ -161,19 +148,12 @@
 		current_multiplier);
 	}
 #endif
-	if (err)
-		freqs.new = freqs.old;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-	return err;
+	return 0;
 }
 
-static int eps_target(struct cpufreq_policy *policy,
-			       unsigned int target_freq,
-			       unsigned int relation)
+static int eps_target(struct cpufreq_policy *policy, unsigned int index)
 {
 	struct eps_cpu_data *centaur;
-	unsigned int newstate = 0;
 	unsigned int cpu = policy->cpu;
 	unsigned int dest_state;
 	int ret;
@@ -182,28 +162,14 @@
 		return -ENODEV;
 	centaur = eps_cpu[cpu];
 
-	if (unlikely(cpufreq_frequency_table_target(policy,
-			&eps_cpu[cpu]->freq_table[0],
-			target_freq,
-			relation,
-			&newstate))) {
-		return -EINVAL;
-	}
-
 	/* Make frequency transition */
-	dest_state = centaur->freq_table[newstate].driver_data & 0xffff;
+	dest_state = centaur->freq_table[index].driver_data & 0xffff;
 	ret = eps_set_state(centaur, policy, dest_state);
 	if (ret)
 		printk(KERN_ERR "eps: Timeout!\n");
 	return ret;
 }
 
-static int eps_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy,
-			&eps_cpu[policy->cpu]->freq_table[0]);
-}
-
 static int eps_cpu_init(struct cpufreq_policy *policy)
 {
 	unsigned int i;
@@ -401,15 +367,13 @@
 	}
 
 	policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
-	policy->cur = fsb * current_multiplier;
 
-	ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
+	ret = cpufreq_table_validate_and_show(policy, &centaur->freq_table[0]);
 	if (ret) {
 		kfree(centaur);
 		return ret;
 	}
 
-	cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
 	return 0;
 }
 
@@ -424,19 +388,14 @@
 	return 0;
 }
 
-static struct freq_attr *eps_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver eps_driver = {
-	.verify		= eps_verify,
-	.target		= eps_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= eps_target,
 	.init		= eps_cpu_init,
 	.exit		= eps_cpu_exit,
 	.get		= eps_get,
 	.name		= "e_powersaver",
-	.attr		= eps_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 823a400..de08acf 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -105,32 +105,9 @@
 }
 
 
-/**
- *	elanfreq_set_cpu_frequency: Change the CPU core frequency
- *	@cpu: cpu number
- *	@freq: frequency in kHz
- *
- *	This function takes a frequency value and changes the CPU frequency
- *	according to this. Note that the frequency has to be checked by
- *	elanfreq_validatespeed() for correctness!
- *
- *	There is no return value.
- */
-
-static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
-		unsigned int state)
+static int elanfreq_target(struct cpufreq_policy *policy,
+			    unsigned int state)
 {
-	struct cpufreq_freqs    freqs;
-
-	freqs.old = elanfreq_get_cpu_frequency(0);
-	freqs.new = elan_multiplier[state].clock;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-	printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
-			elan_multiplier[state].clock);
-
-
 	/*
 	 * Access to the Elan's internal registers is indexed via
 	 * 0x22: Chip Setup & Control Register Index Register (CSCI)
@@ -161,39 +138,8 @@
 	udelay(10000);
 	local_irq_enable();
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-
-/**
- *	elanfreq_validatespeed: test if frequency range is valid
- *	@policy: the policy to validate
- *
- *	This function checks if a given frequency range in kHz is valid
- *	for the hardware supported by the driver.
- */
-
-static int elanfreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
-}
-
-static int elanfreq_target(struct cpufreq_policy *policy,
-			    unsigned int target_freq,
-			    unsigned int relation)
-{
-	unsigned int newstate = 0;
-
-	if (cpufreq_frequency_table_target(policy, &elanfreq_table[0],
-				target_freq, relation, &newstate))
-		return -EINVAL;
-
-	elanfreq_set_cpu_state(policy, newstate);
-
 	return 0;
 }
-
-
 /*
  *	Module init and exit code
  */
@@ -202,7 +148,6 @@
 {
 	struct cpuinfo_x86 *c = &cpu_data(0);
 	unsigned int i;
-	int result;
 
 	/* capability check */
 	if ((c->x86_vendor != X86_VENDOR_AMD) ||
@@ -221,21 +166,8 @@
 
 	/* cpuinfo and default policy values */
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-	policy->cur = elanfreq_get_cpu_frequency(0);
 
-	result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
-	if (result)
-		return result;
-
-	cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
-	return 0;
-}
-
-
-static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
+	return cpufreq_table_validate_and_show(policy, elanfreq_table);
 }
 
 
@@ -261,20 +193,14 @@
 #endif
 
 
-static struct freq_attr *elanfreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
-
 static struct cpufreq_driver elanfreq_driver = {
 	.get		= elanfreq_get_cpu_frequency,
-	.verify		= elanfreq_verify,
-	.target		= elanfreq_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= elanfreq_target,
 	.init		= elanfreq_cpu_init,
-	.exit		= elanfreq_cpu_exit,
+	.exit		= cpufreq_generic_exit,
 	.name		= "elanfreq",
-	.attr		= elanfreq_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 0fac344..f3c2287 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -25,18 +25,11 @@
 static struct exynos_dvfs_info *exynos_info;
 
 static struct regulator *arm_regulator;
-static struct cpufreq_freqs freqs;
 
 static unsigned int locking_frequency;
 static bool frequency_locked;
 static DEFINE_MUTEX(cpufreq_lock);
 
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy,
-					      exynos_info->freq_table);
-}
-
 static unsigned int exynos_getspeed(unsigned int cpu)
 {
 	return clk_get_rate(exynos_info->cpu_clk) / 1000;
@@ -65,21 +58,18 @@
 	struct cpufreq_policy *policy = cpufreq_cpu_get(0);
 	unsigned int arm_volt, safe_arm_volt = 0;
 	unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
+	unsigned int old_freq;
 	int index, old_index;
 	int ret = 0;
 
-	freqs.old = policy->cur;
-	freqs.new = target_freq;
-
-	if (freqs.new == freqs.old)
-		goto out;
+	old_freq = policy->cur;
 
 	/*
 	 * The policy max have been changed so that we cannot get proper
 	 * old_index with cpufreq_frequency_table_target(). Thus, ignore
-	 * policy and get the index from the raw freqeuncy table.
+	 * policy and get the index from the raw frequency table.
 	 */
-	old_index = exynos_cpufreq_get_index(freqs.old);
+	old_index = exynos_cpufreq_get_index(old_freq);
 	if (old_index < 0) {
 		ret = old_index;
 		goto out;
@@ -104,17 +94,14 @@
 	}
 	arm_volt = volt_table[index];
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	/* When the new frequency is higher than current frequency */
-	if ((freqs.new > freqs.old) && !safe_arm_volt) {
+	if ((target_freq > old_freq) && !safe_arm_volt) {
 		/* Firstly, voltage up to increase frequency */
 		ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
 		if (ret) {
 			pr_err("%s: failed to set cpu voltage to %d\n",
 				__func__, arm_volt);
-			freqs.new = freqs.old;
-			goto post_notify;
+			return ret;
 		}
 	}
 
@@ -124,24 +111,17 @@
 		if (ret) {
 			pr_err("%s: failed to set cpu voltage to %d\n",
 				__func__, safe_arm_volt);
-			freqs.new = freqs.old;
-			goto post_notify;
+			return ret;
 		}
 	}
 
 	exynos_info->set_freq(old_index, index);
 
-post_notify:
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	if (ret)
-		goto out;
-
 	/* When the new frequency is lower than current frequency */
-	if ((freqs.new < freqs.old) ||
-	   ((freqs.new > freqs.old) && safe_arm_volt)) {
+	if ((target_freq < old_freq) ||
+	   ((target_freq > old_freq) && safe_arm_volt)) {
 		/* down the voltage after frequency change */
-		regulator_set_voltage(arm_regulator, arm_volt,
+		ret = regulator_set_voltage(arm_regulator, arm_volt,
 				arm_volt);
 		if (ret) {
 			pr_err("%s: failed to set cpu voltage to %d\n",
@@ -151,19 +131,14 @@
 	}
 
 out:
-
 	cpufreq_cpu_put(policy);
 
 	return ret;
 }
 
-static int exynos_target(struct cpufreq_policy *policy,
-			  unsigned int target_freq,
-			  unsigned int relation)
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
 {
 	struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
-	unsigned int index;
-	unsigned int new_freq;
 	int ret = 0;
 
 	mutex_lock(&cpufreq_lock);
@@ -171,15 +146,7 @@
 	if (frequency_locked)
 		goto out;
 
-	if (cpufreq_frequency_table_target(policy, freq_table,
-					   target_freq, relation, &index)) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	new_freq = freq_table[index].frequency;
-
-	ret = exynos_cpufreq_scale(new_freq);
+	ret = exynos_cpufreq_scale(freq_table[index].frequency);
 
 out:
 	mutex_unlock(&cpufreq_lock);
@@ -247,38 +214,18 @@
 
 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-	policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
-
-	cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
-
-	/* set the transition latency value */
-	policy->cpuinfo.transition_latency = 100000;
-
-	cpumask_setall(policy->cpus);
-
-	return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
+	return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
 }
 
-static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static struct freq_attr *exynos_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver exynos_driver = {
 	.flags		= CPUFREQ_STICKY,
-	.verify		= exynos_verify_speed,
-	.target		= exynos_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= exynos_target,
 	.get		= exynos_getspeed,
 	.init		= exynos_cpufreq_cpu_init,
-	.exit		= exynos_cpufreq_cpu_exit,
+	.exit		= cpufreq_generic_exit,
 	.name		= "exynos_cpufreq",
-	.attr		= exynos_cpufreq_attr,
+	.attr		= cpufreq_generic_attr,
 #ifdef CONFIG_PM
 	.suspend	= exynos_cpufreq_suspend,
 	.resume		= exynos_cpufreq_resume,
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index add7fbe..f2c7506 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -81,9 +81,9 @@
 
 static void exynos4210_set_apll(unsigned int index)
 {
-	unsigned int tmp;
+	unsigned int tmp, freq = apll_freq_4210[index].freq;
 
-	/* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+	/* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
 	clk_set_parent(moutcore, mout_mpll);
 
 	do {
@@ -92,21 +92,9 @@
 		tmp &= 0x7;
 	} while (tmp != 0x2);
 
-	/* 2. Set APLL Lock time */
-	__raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK);
+	clk_set_rate(mout_apll, freq * 1000);
 
-	/* 3. Change PLL PMS values */
-	tmp = __raw_readl(EXYNOS4_APLL_CON0);
-	tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
-	tmp |= apll_freq_4210[index].mps;
-	__raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-	/* 4. wait_lock_time */
-	do {
-		tmp = __raw_readl(EXYNOS4_APLL_CON0);
-	} while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
-	/* 5. MUX_CORE_SEL = APLL */
+	/* MUX_CORE_SEL = APLL */
 	clk_set_parent(moutcore, mout_apll);
 
 	do {
@@ -115,53 +103,15 @@
 	} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
 }
 
-static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
-{
-	unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
-	unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
-
-	return (old_pm == new_pm) ? 0 : 1;
-}
-
 static void exynos4210_set_frequency(unsigned int old_index,
 				     unsigned int new_index)
 {
-	unsigned int tmp;
-
 	if (old_index > new_index) {
-		if (!exynos4210_pms_change(old_index, new_index)) {
-			/* 1. Change the system clock divider values */
-			exynos4210_set_clkdiv(new_index);
-
-			/* 2. Change just s value in apll m,p,s value */
-			tmp = __raw_readl(EXYNOS4_APLL_CON0);
-			tmp &= ~(0x7 << 0);
-			tmp |= apll_freq_4210[new_index].mps & 0x7;
-			__raw_writel(tmp, EXYNOS4_APLL_CON0);
-		} else {
-			/* Clock Configuration Procedure */
-			/* 1. Change the system clock divider values */
-			exynos4210_set_clkdiv(new_index);
-			/* 2. Change the apll m,p,s value */
-			exynos4210_set_apll(new_index);
-		}
+		exynos4210_set_clkdiv(new_index);
+		exynos4210_set_apll(new_index);
 	} else if (old_index < new_index) {
-		if (!exynos4210_pms_change(old_index, new_index)) {
-			/* 1. Change just s value in apll m,p,s value */
-			tmp = __raw_readl(EXYNOS4_APLL_CON0);
-			tmp &= ~(0x7 << 0);
-			tmp |= apll_freq_4210[new_index].mps & 0x7;
-			__raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-			/* 2. Change the system clock divider values */
-			exynos4210_set_clkdiv(new_index);
-		} else {
-			/* Clock Configuration Procedure */
-			/* 1. Change the apll m,p,s value */
-			exynos4210_set_apll(new_index);
-			/* 2. Change the system clock divider values */
-			exynos4210_set_clkdiv(new_index);
-		}
+		exynos4210_set_apll(new_index);
+		exynos4210_set_clkdiv(new_index);
 	}
 }
 
@@ -194,7 +144,6 @@
 	info->volt_table = exynos4210_volt_table;
 	info->freq_table = exynos4210_freq_table;
 	info->set_freq = exynos4210_set_frequency;
-	info->need_apll_change = exynos4210_pms_change;
 
 	return 0;
 
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 08b7477..8683304 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -128,9 +128,9 @@
 
 static void exynos4x12_set_apll(unsigned int index)
 {
-	unsigned int tmp, pdiv;
+	unsigned int tmp, freq = apll_freq_4x12[index].freq;
 
-	/* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+	/* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
 	clk_set_parent(moutcore, mout_mpll);
 
 	do {
@@ -140,24 +140,9 @@
 		tmp &= 0x7;
 	} while (tmp != 0x2);
 
-	/* 2. Set APLL Lock time */
-	pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
+	clk_set_rate(mout_apll, freq * 1000);
 
-	__raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
-
-	/* 3. Change PLL PMS values */
-	tmp = __raw_readl(EXYNOS4_APLL_CON0);
-	tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
-	tmp |= apll_freq_4x12[index].mps;
-	__raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-	/* 4. wait_lock_time */
-	do {
-		cpu_relax();
-		tmp = __raw_readl(EXYNOS4_APLL_CON0);
-	} while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
-	/* 5. MUX_CORE_SEL = APLL */
+	/* MUX_CORE_SEL = APLL */
 	clk_set_parent(moutcore, mout_apll);
 
 	do {
@@ -167,52 +152,15 @@
 	} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
 }
 
-static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
-{
-	unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
-	unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
-
-	return (old_pm == new_pm) ? 0 : 1;
-}
-
 static void exynos4x12_set_frequency(unsigned int old_index,
 				  unsigned int new_index)
 {
-	unsigned int tmp;
-
 	if (old_index > new_index) {
-		if (!exynos4x12_pms_change(old_index, new_index)) {
-			/* 1. Change the system clock divider values */
-			exynos4x12_set_clkdiv(new_index);
-			/* 2. Change just s value in apll m,p,s value */
-			tmp = __raw_readl(EXYNOS4_APLL_CON0);
-			tmp &= ~(0x7 << 0);
-			tmp |= apll_freq_4x12[new_index].mps & 0x7;
-			__raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-		} else {
-			/* Clock Configuration Procedure */
-			/* 1. Change the system clock divider values */
-			exynos4x12_set_clkdiv(new_index);
-			/* 2. Change the apll m,p,s value */
-			exynos4x12_set_apll(new_index);
-		}
+		exynos4x12_set_clkdiv(new_index);
+		exynos4x12_set_apll(new_index);
 	} else if (old_index < new_index) {
-		if (!exynos4x12_pms_change(old_index, new_index)) {
-			/* 1. Change just s value in apll m,p,s value */
-			tmp = __raw_readl(EXYNOS4_APLL_CON0);
-			tmp &= ~(0x7 << 0);
-			tmp |= apll_freq_4x12[new_index].mps & 0x7;
-			__raw_writel(tmp, EXYNOS4_APLL_CON0);
-			/* 2. Change the system clock divider values */
-			exynos4x12_set_clkdiv(new_index);
-		} else {
-			/* Clock Configuration Procedure */
-			/* 1. Change the apll m,p,s value */
-			exynos4x12_set_apll(new_index);
-			/* 2. Change the system clock divider values */
-			exynos4x12_set_clkdiv(new_index);
-		}
+		exynos4x12_set_apll(new_index);
+		exynos4x12_set_clkdiv(new_index);
 	}
 }
 
@@ -250,7 +198,6 @@
 	info->volt_table = exynos4x12_volt_table;
 	info->freq_table = exynos4x12_freq_table;
 	info->set_freq = exynos4x12_set_frequency;
-	info->need_apll_change = exynos4x12_pms_change;
 
 	return 0;
 
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index be5380e..76bef8b 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -20,7 +20,7 @@
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -118,12 +118,12 @@
 	struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
 	unsigned int tmp, clk_div, ema_div, freq, volt_id;
 	int i = 0;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 
 	rcu_read_lock();
 	for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
 
-		opp = opp_find_freq_exact(dvfs_info->dev,
+		opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
 					freq_tbl[i].frequency * 1000, true);
 		if (IS_ERR(opp)) {
 			rcu_read_unlock();
@@ -142,7 +142,7 @@
 					<< P0_7_CSCLKDEV_SHIFT;
 
 		/* Calculate EMA */
-		volt_id = opp_get_voltage(opp);
+		volt_id = dev_pm_opp_get_voltage(opp);
 		volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
 		if (volt_id < PMIC_HIGH_VOLT) {
 			ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
@@ -209,38 +209,22 @@
 				dvfs_info->base + XMU_DVFS_CTRL);
 }
 
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy,
-					      dvfs_info->freq_table);
-}
-
 static unsigned int exynos_getspeed(unsigned int cpu)
 {
 	return dvfs_info->cur_frequency;
 }
 
-static int exynos_target(struct cpufreq_policy *policy,
-			  unsigned int target_freq,
-			  unsigned int relation)
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	unsigned int index, tmp;
-	int ret = 0, i;
+	unsigned int tmp;
+	int i;
 	struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
 
 	mutex_lock(&cpufreq_lock);
 
-	ret = cpufreq_frequency_table_target(policy, freq_table,
-					   target_freq, relation, &index);
-	if (ret)
-		goto out;
-
 	freqs.old = dvfs_info->cur_frequency;
 	freqs.new = freq_table[index].frequency;
 
-	if (freqs.old == freqs.new)
-		goto out;
-
 	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* Set the target frequency in all C0_3_PSTATE register */
@@ -251,9 +235,8 @@
 
 		__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
 	}
-out:
 	mutex_unlock(&cpufreq_lock);
-	return ret;
+	return 0;
 }
 
 static void exynos_cpufreq_work(struct work_struct *work)
@@ -324,30 +307,19 @@
 
 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-	int ret;
-
-	ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
-	if (ret) {
-		dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
-		return ret;
-	}
-
-	policy->cur = dvfs_info->cur_frequency;
-	policy->cpuinfo.transition_latency = dvfs_info->latency;
-	cpumask_setall(policy->cpus);
-
-	cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
-
-	return 0;
+	return cpufreq_generic_init(policy, dvfs_info->freq_table,
+			dvfs_info->latency);
 }
 
 static struct cpufreq_driver exynos_driver = {
-	.flags		= CPUFREQ_STICKY,
-	.verify		= exynos_verify_speed,
-	.target		= exynos_target,
+	.flags		= CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= exynos_target,
 	.get		= exynos_getspeed,
 	.init		= exynos_cpufreq_cpu_init,
+	.exit		= cpufreq_generic_exit,
 	.name		= CPUFREQ_NAME,
+	.attr		= cpufreq_generic_attr,
 };
 
 static const struct of_device_id exynos_cpufreq_match[] = {
@@ -399,13 +371,14 @@
 		goto err_put_node;
 	}
 
-	ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+	ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
+					    &dvfs_info->freq_table);
 	if (ret) {
 		dev_err(dvfs_info->dev,
 			"failed to init cpufreq table: %d\n", ret);
 		goto err_put_node;
 	}
-	dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
+	dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
 	exynos_sort_descend_freq_table();
 
 	if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
@@ -454,7 +427,7 @@
 	return 0;
 
 err_free_table:
-	opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+	dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
 err_put_node:
 	of_node_put(np);
 	dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -464,7 +437,7 @@
 static int exynos_cpufreq_remove(struct platform_device *pdev)
 {
 	cpufreq_unregister_driver(&exynos_driver);
-	opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+	dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
 	return 0;
 }
 
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index f111454a..3458d27 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -54,31 +54,30 @@
 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
 				   struct cpufreq_frequency_table *table)
 {
-	unsigned int next_larger = ~0;
-	unsigned int i;
-	unsigned int count = 0;
+	unsigned int next_larger = ~0, freq, i = 0;
+	bool found = false;
 
 	pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
 					policy->min, policy->max, policy->cpu);
 
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-				     policy->cpuinfo.max_freq);
+	cpufreq_verify_within_cpu_limits(policy);
 
-	for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
-		unsigned int freq = table[i].frequency;
+	for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
 		if (freq == CPUFREQ_ENTRY_INVALID)
 			continue;
-		if ((freq >= policy->min) && (freq <= policy->max))
-			count++;
-		else if ((next_larger > freq) && (freq > policy->max))
+		if ((freq >= policy->min) && (freq <= policy->max)) {
+			found = true;
+			break;
+		}
+
+		if ((next_larger > freq) && (freq > policy->max))
 			next_larger = freq;
 	}
 
-	if (!count)
+	if (!found) {
 		policy->max = next_larger;
-
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-				     policy->cpuinfo.max_freq);
+		cpufreq_verify_within_cpu_limits(policy);
+	}
 
 	pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
 				policy->min, policy->max, policy->cpu);
@@ -87,6 +86,20 @@
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
 
+/*
+ * Generic routine to verify policy & frequency table, requires driver to call
+ * cpufreq_frequency_table_get_attr() prior to it.
+ */
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+{
+	struct cpufreq_frequency_table *table =
+		cpufreq_frequency_get_table(policy->cpu);
+	if (!table)
+		return -ENODEV;
+
+	return cpufreq_frequency_table_verify(policy, table);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
 
 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
 				   struct cpufreq_frequency_table *table,
@@ -200,6 +213,12 @@
 };
 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
 
+struct freq_attr *cpufreq_generic_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+
 /*
  * if you use these, you must assure that the frequency table is valid
  * all the time between get_attr and put_attr!
@@ -219,6 +238,18 @@
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
 
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+				      struct cpufreq_frequency_table *table)
+{
+	int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+
+	if (!ret)
+		cpufreq_frequency_table_get_attr(table, policy->cpu);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
+
 void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
 {
 	pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 70442c7..d83e826 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -401,7 +401,7 @@
 
 static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
 {
-	unsigned int maxfreq, curfreq;
+	unsigned int maxfreq;
 
 	if (!policy || policy->cpu != 0)
 		return -ENODEV;
@@ -415,10 +415,8 @@
 		maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
 
 	stock_freq = maxfreq;
-	curfreq = gx_get_cpuspeed(0);
 
 	pr_debug("cpu max frequency is %d.\n", maxfreq);
-	pr_debug("cpu current frequency is %dkHz.\n", curfreq);
 
 	/* setup basic struct for cpufreq API */
 	policy->cpu = 0;
@@ -428,7 +426,6 @@
 	else
 		policy->min = maxfreq / POLICY_MIN_DIV;
 	policy->max = maxfreq;
-	policy->cur = curfreq;
 	policy->cpuinfo.min_freq = maxfreq / max_duration;
 	policy->cpuinfo.max_freq = maxfreq;
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 794123f..bf8902a 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -66,7 +66,8 @@
 	struct device_node *np;
 	int ret;
 
-	if (!of_machine_is_compatible("calxeda,highbank"))
+	if ((!of_machine_is_compatible("calxeda,highbank")) &&
+		(!of_machine_is_compatible("calxeda,ecx-2000")))
 		return -ENODEV;
 
 	cpu_dev = get_cpu_device(0);
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 3e14f03..53c6ac6 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -141,7 +141,6 @@
 {
 	int			ret = 0;
 	u32			value = 0;
-	struct cpufreq_freqs    cpufreq_freqs;
 	cpumask_t		saved_mask;
 	int			retval;
 
@@ -168,13 +167,6 @@
 	pr_debug("Transitioning from P%d to P%d\n",
 		data->acpi_data.state, state);
 
-	/* cpufreq frequency struct */
-	cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
-	cpufreq_freqs.new = data->freq_table[state].frequency;
-
-	/* notify cpufreq */
-	cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
-
 	/*
 	 * First we write the target state's 'control' value to the
 	 * control_register.
@@ -186,22 +178,11 @@
 
 	ret = processor_set_pstate(value);
 	if (ret) {
-		unsigned int tmp = cpufreq_freqs.new;
-		cpufreq_notify_transition(policy, &cpufreq_freqs,
-				CPUFREQ_POSTCHANGE);
-		cpufreq_freqs.new = cpufreq_freqs.old;
-		cpufreq_freqs.old = tmp;
-		cpufreq_notify_transition(policy, &cpufreq_freqs,
-				CPUFREQ_PRECHANGE);
-		cpufreq_notify_transition(policy, &cpufreq_freqs,
-				CPUFREQ_POSTCHANGE);
 		printk(KERN_WARNING "Transition failed with error %d\n", ret);
 		retval = -ENODEV;
 		goto migrate_end;
 	}
 
-	cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
-
 	data->acpi_data.state = state;
 
 	retval = 0;
@@ -227,42 +208,11 @@
 static int
 acpi_cpufreq_target (
 	struct cpufreq_policy   *policy,
-	unsigned int target_freq,
-	unsigned int relation)
+	unsigned int index)
 {
-	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-	unsigned int next_state = 0;
-	unsigned int result = 0;
-
-	pr_debug("acpi_cpufreq_setpolicy\n");
-
-	result = cpufreq_frequency_table_target(policy,
-			data->freq_table, target_freq, relation, &next_state);
-	if (result)
-		return (result);
-
-	result = processor_set_freq(data, policy, next_state);
-
-	return (result);
+	return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
 }
 
-
-static int
-acpi_cpufreq_verify (
-	struct cpufreq_policy   *policy)
-{
-	unsigned int result = 0;
-	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
-	pr_debug("acpi_cpufreq_verify\n");
-
-	result = cpufreq_frequency_table_verify(policy,
-			data->freq_table);
-
-	return (result);
-}
-
-
 static int
 acpi_cpufreq_cpu_init (
 	struct cpufreq_policy   *policy)
@@ -321,7 +271,6 @@
 			    data->acpi_data.states[i].transition_latency * 1000;
 		}
 	}
-	policy->cur = processor_get_freq(data, policy->cpu);
 
 	/* table init */
 	for (i = 0; i <= data->acpi_data.state_count; i++)
@@ -335,7 +284,7 @@
 		}
 	}
 
-	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+	result = cpufreq_table_validate_and_show(policy, data->freq_table);
 	if (result) {
 		goto err_freqfree;
 	}
@@ -356,8 +305,6 @@
 			(u32) data->acpi_data.states[i].status,
 			(u32) data->acpi_data.states[i].control);
 
-	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
 	/* the first call to ->target() should result in us actually
 	 * writing something to the appropriate registers. */
 	data->resume = 1;
@@ -396,20 +343,14 @@
 }
 
 
-static struct freq_attr* acpi_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
-
 static struct cpufreq_driver acpi_cpufreq_driver = {
-	.verify 	= acpi_cpufreq_verify,
-	.target 	= acpi_cpufreq_target,
+	.verify 	= cpufreq_generic_frequency_table_verify,
+	.target_index	= acpi_cpufreq_target,
 	.get 		= acpi_cpufreq_get,
 	.init		= acpi_cpufreq_cpu_init,
 	.exit		= acpi_cpufreq_cpu_exit,
 	.name		= "acpi-cpufreq",
-	.attr           = acpi_cpufreq_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c3fd2a1..4b3f18e 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -13,7 +13,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
@@ -35,73 +35,52 @@
 static struct cpufreq_frequency_table *freq_table;
 static unsigned int transition_latency;
 
-static int imx6q_verify_speed(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int imx6q_get_speed(unsigned int cpu)
 {
 	return clk_get_rate(arm_clk) / 1000;
 }
 
-static int imx6q_set_target(struct cpufreq_policy *policy,
-			    unsigned int target_freq, unsigned int relation)
+static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	struct cpufreq_freqs freqs;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	unsigned long freq_hz, volt, volt_old;
-	unsigned int index;
+	unsigned int old_freq, new_freq;
 	int ret;
 
-	ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
-					     relation, &index);
-	if (ret) {
-		dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
-			target_freq, ret);
-		return ret;
-	}
-
-	freqs.new = freq_table[index].frequency;
-	freq_hz = freqs.new * 1000;
-	freqs.old = clk_get_rate(arm_clk) / 1000;
-
-	if (freqs.old == freqs.new)
-		return 0;
+	new_freq = freq_table[index].frequency;
+	freq_hz = new_freq * 1000;
+	old_freq = clk_get_rate(arm_clk) / 1000;
 
 	rcu_read_lock();
-	opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+	opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
 	if (IS_ERR(opp)) {
 		rcu_read_unlock();
 		dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
 		return PTR_ERR(opp);
 	}
 
-	volt = opp_get_voltage(opp);
+	volt = dev_pm_opp_get_voltage(opp);
 	rcu_read_unlock();
 	volt_old = regulator_get_voltage(arm_reg);
 
 	dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
-		freqs.old / 1000, volt_old / 1000,
-		freqs.new / 1000, volt / 1000);
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+		old_freq / 1000, volt_old / 1000,
+		new_freq / 1000, volt / 1000);
 
 	/* scaling up?  scale voltage before frequency */
-	if (freqs.new > freqs.old) {
+	if (new_freq > old_freq) {
 		ret = regulator_set_voltage_tol(arm_reg, volt, 0);
 		if (ret) {
 			dev_err(cpu_dev,
 				"failed to scale vddarm up: %d\n", ret);
-			freqs.new = freqs.old;
-			goto post_notify;
+			return ret;
 		}
 
 		/*
 		 * Need to increase vddpu and vddsoc for safety
 		 * if we are about to run at 1.2 GHz.
 		 */
-		if (freqs.new == FREQ_1P2_GHZ / 1000) {
+		if (new_freq == FREQ_1P2_GHZ / 1000) {
 			regulator_set_voltage_tol(pu_reg,
 					PU_SOC_VOLTAGE_HIGH, 0);
 			regulator_set_voltage_tol(soc_reg,
@@ -121,21 +100,20 @@
 	clk_set_parent(step_clk, pll2_pfd2_396m_clk);
 	clk_set_parent(pll1_sw_clk, step_clk);
 	if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
-		clk_set_rate(pll1_sys_clk, freqs.new * 1000);
+		clk_set_rate(pll1_sys_clk, new_freq * 1000);
 		clk_set_parent(pll1_sw_clk, pll1_sys_clk);
 	}
 
 	/* Ensure the arm clock divider is what we expect */
-	ret = clk_set_rate(arm_clk, freqs.new * 1000);
+	ret = clk_set_rate(arm_clk, new_freq * 1000);
 	if (ret) {
 		dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
 		regulator_set_voltage_tol(arm_reg, volt_old, 0);
-		freqs.new = freqs.old;
-		goto post_notify;
+		return ret;
 	}
 
 	/* scaling down?  scale voltage after frequency */
-	if (freqs.new < freqs.old) {
+	if (new_freq < old_freq) {
 		ret = regulator_set_voltage_tol(arm_reg, volt, 0);
 		if (ret) {
 			dev_warn(cpu_dev,
@@ -143,7 +121,7 @@
 			ret = 0;
 		}
 
-		if (freqs.old == FREQ_1P2_GHZ / 1000) {
+		if (old_freq == FREQ_1P2_GHZ / 1000) {
 			regulator_set_voltage_tol(pu_reg,
 					PU_SOC_VOLTAGE_NORMAL, 0);
 			regulator_set_voltage_tol(soc_reg,
@@ -151,55 +129,28 @@
 		}
 	}
 
-post_notify:
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	return ret;
+	return 0;
 }
 
 static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
 {
-	int ret;
-
-	ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-	if (ret) {
-		dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
-		return ret;
-	}
-
-	policy->cpuinfo.transition_latency = transition_latency;
-	policy->cur = clk_get_rate(arm_clk) / 1000;
-	cpumask_setall(policy->cpus);
-	cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-	return 0;
+	return cpufreq_generic_init(policy, freq_table, transition_latency);
 }
 
-static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static struct freq_attr *imx6q_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver imx6q_cpufreq_driver = {
-	.verify = imx6q_verify_speed,
-	.target = imx6q_set_target,
+	.verify = cpufreq_generic_frequency_table_verify,
+	.target_index = imx6q_set_target,
 	.get = imx6q_get_speed,
 	.init = imx6q_cpufreq_init,
-	.exit = imx6q_cpufreq_exit,
+	.exit = cpufreq_generic_exit,
 	.name = "imx6q-cpufreq",
-	.attr = imx6q_cpufreq_attr,
+	.attr = cpufreq_generic_attr,
 };
 
 static int imx6q_cpufreq_probe(struct platform_device *pdev)
 {
 	struct device_node *np;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	unsigned long min_volt, max_volt;
 	int num, ret;
 
@@ -237,14 +188,14 @@
 	}
 
 	/* We expect an OPP table supplied by platform */
-	num = opp_get_opp_count(cpu_dev);
+	num = dev_pm_opp_get_opp_count(cpu_dev);
 	if (num < 0) {
 		ret = num;
 		dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
 		goto put_node;
 	}
 
-	ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
 	if (ret) {
 		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
 		goto put_node;
@@ -259,12 +210,12 @@
 	 * same order.
 	 */
 	rcu_read_lock();
-	opp = opp_find_freq_exact(cpu_dev,
+	opp = dev_pm_opp_find_freq_exact(cpu_dev,
 				  freq_table[0].frequency * 1000, true);
-	min_volt = opp_get_voltage(opp);
-	opp = opp_find_freq_exact(cpu_dev,
+	min_volt = dev_pm_opp_get_voltage(opp);
+	opp = dev_pm_opp_find_freq_exact(cpu_dev,
 				  freq_table[--num].frequency * 1000, true);
-	max_volt = opp_get_voltage(opp);
+	max_volt = dev_pm_opp_get_voltage(opp);
 	rcu_read_unlock();
 	ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
 	if (ret > 0)
@@ -292,7 +243,7 @@
 	return 0;
 
 free_freq_table:
-	opp_free_cpufreq_table(cpu_dev, &freq_table);
+	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 put_node:
 	of_node_put(np);
 	return ret;
@@ -301,7 +252,7 @@
 static int imx6q_cpufreq_remove(struct platform_device *pdev)
 {
 	cpufreq_unregister_driver(&imx6q_cpufreq_driver);
-	opp_free_cpufreq_table(cpu_dev, &freq_table);
+	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 3d79bca..7d8ab000d 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -60,9 +60,7 @@
 {
 	struct icst_vco vco;
 
-	cpufreq_verify_within_limits(policy, 
-				     policy->cpuinfo.min_freq, 
-				     policy->cpuinfo.max_freq);
+	cpufreq_verify_within_cpu_limits(policy);
 
 	vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
 	policy->max = icst_hz(&cclk_params, vco) / 1000;
@@ -70,10 +68,7 @@
 	vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
 	policy->min = icst_hz(&cclk_params, vco) / 1000;
 
-	cpufreq_verify_within_limits(policy, 
-				     policy->cpuinfo.min_freq, 
-				     policy->cpuinfo.max_freq);
-
+	cpufreq_verify_within_cpu_limits(policy);
 	return 0;
 }
 
@@ -187,10 +182,9 @@
 {
 
 	/* set default policy and cpuinfo */
-	policy->cpuinfo.max_freq = 160000;
-	policy->cpuinfo.min_freq = 12000;
+	policy->max = policy->cpuinfo.max_freq = 160000;
+	policy->min = policy->cpuinfo.min_freq = 12000;
 	policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
-	policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index eb3fdc7..5f1cbae 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -25,6 +25,7 @@
 #include <linux/types.h>
 #include <linux/fs.h>
 #include <linux/debugfs.h>
+#include <linux/acpi.h>
 #include <trace/events/power.h>
 
 #include <asm/div64.h>
@@ -33,6 +34,8 @@
 
 #define SAMPLE_COUNT		3
 
+#define BYT_RATIOS	0x66a
+
 #define FRAC_BITS 8
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
 #define fp_toint(X) ((X) >> FRAC_BITS)
@@ -78,7 +81,6 @@
 
 	struct timer_list timer;
 
-	struct pstate_adjust_policy *pstate_policy;
 	struct pstate_data pstate;
 	struct _pid pid;
 
@@ -100,15 +102,21 @@
 	int i_gain_pct;
 };
 
-static struct pstate_adjust_policy default_policy = {
-	.sample_rate_ms = 10,
-	.deadband = 0,
-	.setpoint = 97,
-	.p_gain_pct = 20,
-	.d_gain_pct = 0,
-	.i_gain_pct = 0,
+struct pstate_funcs {
+	int (*get_max)(void);
+	int (*get_min)(void);
+	int (*get_turbo)(void);
+	void (*set)(int pstate);
 };
 
+struct cpu_defaults {
+	struct pstate_adjust_policy pid_policy;
+	struct pstate_funcs funcs;
+};
+
+static struct pstate_adjust_policy pid_params;
+static struct pstate_funcs pstate_funcs;
+
 struct perf_limits {
 	int no_turbo;
 	int max_perf_pct;
@@ -185,14 +193,14 @@
 
 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
 {
-	pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
-	pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
-	pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
+	pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
+	pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
+	pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
 
 	pid_reset(&cpu->pid,
-		cpu->pstate_policy->setpoint,
+		pid_params.setpoint,
 		100,
-		cpu->pstate_policy->deadband,
+		pid_params.deadband,
 		0);
 }
 
@@ -226,12 +234,12 @@
 };
 
 static struct pid_param pid_files[] = {
-	{"sample_rate_ms", &default_policy.sample_rate_ms},
-	{"d_gain_pct", &default_policy.d_gain_pct},
-	{"i_gain_pct", &default_policy.i_gain_pct},
-	{"deadband", &default_policy.deadband},
-	{"setpoint", &default_policy.setpoint},
-	{"p_gain_pct", &default_policy.p_gain_pct},
+	{"sample_rate_ms", &pid_params.sample_rate_ms},
+	{"d_gain_pct", &pid_params.d_gain_pct},
+	{"i_gain_pct", &pid_params.i_gain_pct},
+	{"deadband", &pid_params.deadband},
+	{"setpoint", &pid_params.setpoint},
+	{"p_gain_pct", &pid_params.p_gain_pct},
 	{NULL, NULL}
 };
 
@@ -336,33 +344,92 @@
 }
 
 /************************** sysfs end ************************/
+static int byt_get_min_pstate(void)
+{
+	u64 value;
+	rdmsrl(BYT_RATIOS, value);
+	return value & 0xFF;
+}
 
-static int intel_pstate_min_pstate(void)
+static int byt_get_max_pstate(void)
+{
+	u64 value;
+	rdmsrl(BYT_RATIOS, value);
+	return (value >> 16) & 0xFF;
+}
+
+static int core_get_min_pstate(void)
 {
 	u64 value;
 	rdmsrl(MSR_PLATFORM_INFO, value);
 	return (value >> 40) & 0xFF;
 }
 
-static int intel_pstate_max_pstate(void)
+static int core_get_max_pstate(void)
 {
 	u64 value;
 	rdmsrl(MSR_PLATFORM_INFO, value);
 	return (value >> 8) & 0xFF;
 }
 
-static int intel_pstate_turbo_pstate(void)
+static int core_get_turbo_pstate(void)
 {
 	u64 value;
 	int nont, ret;
 	rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
-	nont = intel_pstate_max_pstate();
+	nont = core_get_max_pstate();
 	ret = ((value) & 255);
 	if (ret <= nont)
 		ret = nont;
 	return ret;
 }
 
+static void core_set_pstate(int pstate)
+{
+	u64 val;
+
+	val = pstate << 8;
+	if (limits.no_turbo)
+		val |= (u64)1 << 32;
+
+	wrmsrl(MSR_IA32_PERF_CTL, val);
+}
+
+static struct cpu_defaults core_params = {
+	.pid_policy = {
+		.sample_rate_ms = 10,
+		.deadband = 0,
+		.setpoint = 97,
+		.p_gain_pct = 20,
+		.d_gain_pct = 0,
+		.i_gain_pct = 0,
+	},
+	.funcs = {
+		.get_max = core_get_max_pstate,
+		.get_min = core_get_min_pstate,
+		.get_turbo = core_get_turbo_pstate,
+		.set = core_set_pstate,
+	},
+};
+
+static struct cpu_defaults byt_params = {
+	.pid_policy = {
+		.sample_rate_ms = 10,
+		.deadband = 0,
+		.setpoint = 97,
+		.p_gain_pct = 14,
+		.d_gain_pct = 0,
+		.i_gain_pct = 4,
+	},
+	.funcs = {
+		.get_max = byt_get_max_pstate,
+		.get_min = byt_get_min_pstate,
+		.get_turbo = byt_get_max_pstate,
+		.set = core_set_pstate,
+	},
+};
+
+
 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 {
 	int max_perf = cpu->pstate.turbo_pstate;
@@ -383,7 +450,6 @@
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
 {
 	int max_perf, min_perf;
-	u64 val;
 
 	intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
 
@@ -395,11 +461,8 @@
 	trace_cpu_frequency(pstate * 100000, cpu->cpu);
 
 	cpu->pstate.current_pstate = pstate;
-	val = pstate << 8;
-	if (limits.no_turbo)
-		val |= (u64)1 << 32;
 
-	wrmsrl(MSR_IA32_PERF_CTL, val);
+	pstate_funcs.set(pstate);
 }
 
 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -421,9 +484,9 @@
 {
 	sprintf(cpu->name, "Intel 2nd generation core");
 
-	cpu->pstate.min_pstate = intel_pstate_min_pstate();
-	cpu->pstate.max_pstate = intel_pstate_max_pstate();
-	cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
+	cpu->pstate.min_pstate = pstate_funcs.get_min();
+	cpu->pstate.max_pstate = pstate_funcs.get_max();
+	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
 
 	/*
 	 * goto max pstate so we don't slow up boot if we are built-in if we are
@@ -465,7 +528,7 @@
 {
 	int sample_time, delay;
 
-	sample_time = cpu->pstate_policy->sample_rate_ms;
+	sample_time = pid_params.sample_rate_ms;
 	delay = msecs_to_jiffies(sample_time);
 	mod_timer_pinned(&cpu->timer, jiffies + delay);
 }
@@ -521,14 +584,15 @@
 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
 
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
-	ICPU(0x2a, default_policy),
-	ICPU(0x2d, default_policy),
-	ICPU(0x3a, default_policy),
-	ICPU(0x3c, default_policy),
-	ICPU(0x3e, default_policy),
-	ICPU(0x3f, default_policy),
-	ICPU(0x45, default_policy),
-	ICPU(0x46, default_policy),
+	ICPU(0x2a, core_params),
+	ICPU(0x2d, core_params),
+	ICPU(0x37, byt_params),
+	ICPU(0x3a, core_params),
+	ICPU(0x3c, core_params),
+	ICPU(0x3e, core_params),
+	ICPU(0x3f, core_params),
+	ICPU(0x45, core_params),
+	ICPU(0x46, core_params),
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -552,8 +616,7 @@
 	intel_pstate_get_cpu_pstates(cpu);
 
 	cpu->cpu = cpunum;
-	cpu->pstate_policy =
-		(struct pstate_adjust_policy *)id->driver_data;
+
 	init_timer_deferrable(&cpu->timer);
 	cpu->timer.function = intel_pstate_timer_func;
 	cpu->timer.data =
@@ -613,9 +676,7 @@
 
 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 {
-	cpufreq_verify_within_limits(policy,
-				policy->cpuinfo.min_freq,
-				policy->cpuinfo.max_freq);
+	cpufreq_verify_within_cpu_limits(policy);
 
 	if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
 		(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
@@ -683,9 +744,9 @@
 	rdmsrl(MSR_IA32_APERF, aperf);
 	rdmsrl(MSR_IA32_MPERF, mperf);
 
-	if (!intel_pstate_min_pstate() ||
-		!intel_pstate_max_pstate() ||
-		!intel_pstate_turbo_pstate())
+	if (!pstate_funcs.get_max() ||
+		!pstate_funcs.get_min() ||
+		!pstate_funcs.get_turbo())
 		return -ENODEV;
 
 	rdmsrl(MSR_IA32_APERF, tmp);
@@ -698,10 +759,96 @@
 
 	return 0;
 }
+
+static void copy_pid_params(struct pstate_adjust_policy *policy)
+{
+	pid_params.sample_rate_ms = policy->sample_rate_ms;
+	pid_params.p_gain_pct = policy->p_gain_pct;
+	pid_params.i_gain_pct = policy->i_gain_pct;
+	pid_params.d_gain_pct = policy->d_gain_pct;
+	pid_params.deadband = policy->deadband;
+	pid_params.setpoint = policy->setpoint;
+}
+
+static void copy_cpu_funcs(struct pstate_funcs *funcs)
+{
+	pstate_funcs.get_max   = funcs->get_max;
+	pstate_funcs.get_min   = funcs->get_min;
+	pstate_funcs.get_turbo = funcs->get_turbo;
+	pstate_funcs.set       = funcs->set;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+#include <acpi/processor.h>
+
+static bool intel_pstate_no_acpi_pss(void)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		acpi_status status;
+		union acpi_object *pss;
+		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+		struct acpi_processor *pr = per_cpu(processors, i);
+
+		if (!pr)
+			continue;
+
+		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
+		if (ACPI_FAILURE(status))
+			continue;
+
+		pss = buffer.pointer;
+		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
+			kfree(pss);
+			return false;
+		}
+
+		kfree(pss);
+	}
+
+	return true;
+}
+
+struct hw_vendor_info {
+	u16  valid;
+	char oem_id[ACPI_OEM_ID_SIZE];
+	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+};
+
+/* Hardware vendor-specific info that has its own power management modes */
+static struct hw_vendor_info vendor_info[] = {
+	{1, "HP    ", "ProLiant"},
+	{0, "", ""},
+};
+
+static bool intel_pstate_platform_pwr_mgmt_exists(void)
+{
+	struct acpi_table_header hdr;
+	struct hw_vendor_info *v_info;
+
+	if (acpi_disabled
+	    || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
+		return false;
+
+	for (v_info = vendor_info; v_info->valid; v_info++) {
+		if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
+		    && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
+		    && intel_pstate_no_acpi_pss())
+			return true;
+	}
+
+	return false;
+}
+#else /* CONFIG_ACPI not enabled */
+static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
+#endif /* CONFIG_ACPI */
+
 static int __init intel_pstate_init(void)
 {
 	int cpu, rc = 0;
 	const struct x86_cpu_id *id;
+	struct cpu_defaults *cpu_info;
 
 	if (no_load)
 		return -ENODEV;
@@ -710,6 +857,18 @@
 	if (!id)
 		return -ENODEV;
 
+	/*
+	 * The Intel pstate driver will be ignored if the platform
+	 * firmware has its own power management modes.
+	 */
+	if (intel_pstate_platform_pwr_mgmt_exists())
+		return -ENODEV;
+
+	cpu_info = (struct cpu_defaults *)id->driver_data;
+
+	copy_pid_params(&cpu_info->pid_policy);
+	copy_cpu_funcs(&cpu_info->funcs);
+
 	if (intel_pstate_msrs_not_valid())
 		return -ENODEV;
 
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index ba10658..0767a4e 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -55,69 +55,37 @@
 	return kirkwood_freq_table[0].frequency;
 }
 
-static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
-		unsigned int index)
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+			    unsigned int index)
 {
-	struct cpufreq_freqs freqs;
 	unsigned int state = kirkwood_freq_table[index].driver_data;
 	unsigned long reg;
 
-	freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
-	freqs.new = kirkwood_freq_table[index].frequency;
+	local_irq_disable();
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+	/* Disable interrupts to the CPU */
+	reg = readl_relaxed(priv.base);
+	reg |= CPU_SW_INT_BLK;
+	writel_relaxed(reg, priv.base);
 
-	dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
-		kirkwood_freq_table[index].frequency);
-	dev_dbg(priv.dev, "old frequency was %i KHz\n",
-		kirkwood_cpufreq_get_cpu_frequency(0));
-
-	if (freqs.old != freqs.new) {
-		local_irq_disable();
-
-		/* Disable interrupts to the CPU */
-		reg = readl_relaxed(priv.base);
-		reg |= CPU_SW_INT_BLK;
-		writel_relaxed(reg, priv.base);
-
-		switch (state) {
-		case STATE_CPU_FREQ:
-			clk_disable(priv.powersave_clk);
-			break;
-		case STATE_DDR_FREQ:
-			clk_enable(priv.powersave_clk);
-			break;
-		}
-
-		/* Wait-for-Interrupt, while the hardware changes frequency */
-		cpu_do_idle();
-
-		/* Enable interrupts to the CPU */
-		reg = readl_relaxed(priv.base);
-		reg &= ~CPU_SW_INT_BLK;
-		writel_relaxed(reg, priv.base);
-
-		local_irq_enable();
+	switch (state) {
+	case STATE_CPU_FREQ:
+		clk_disable(priv.powersave_clk);
+		break;
+	case STATE_DDR_FREQ:
+		clk_enable(priv.powersave_clk);
+		break;
 	}
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
 
-static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
-}
+	/* Wait-for-Interrupt, while the hardware changes frequency */
+	cpu_do_idle();
 
-static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
-			    unsigned int target_freq,
-			    unsigned int relation)
-{
-	unsigned int index = 0;
+	/* Enable interrupts to the CPU */
+	reg = readl_relaxed(priv.base);
+	reg &= ~CPU_SW_INT_BLK;
+	writel_relaxed(reg, priv.base);
 
-	if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
-				target_freq, relation, &index))
-		return -EINVAL;
-
-	kirkwood_cpufreq_set_cpu_state(policy, index);
+	local_irq_enable();
 
 	return 0;
 }
@@ -125,40 +93,17 @@
 /* Module init and exit code */
 static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-	int result;
-
-	/* cpuinfo and default policy values */
-	policy->cpuinfo.transition_latency = 5000; /* 5uS */
-	policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
-
-	result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
-	if (result)
-		return result;
-
-	cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
-
-	return 0;
+	return cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
 }
 
-static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static struct freq_attr *kirkwood_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver kirkwood_cpufreq_driver = {
 	.get	= kirkwood_cpufreq_get_cpu_frequency,
-	.verify	= kirkwood_cpufreq_verify,
-	.target	= kirkwood_cpufreq_target,
+	.verify	= cpufreq_generic_frequency_table_verify,
+	.target_index = kirkwood_cpufreq_target,
 	.init	= kirkwood_cpufreq_cpu_init,
-	.exit	= kirkwood_cpufreq_cpu_exit,
+	.exit	= cpufreq_generic_exit,
 	.name	= "kirkwood-cpufreq",
-	.attr	= kirkwood_cpufreq_attr,
+	.attr	= cpufreq_generic_attr,
 };
 
 static int kirkwood_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 4ada1cc..45bafdd 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -625,28 +625,13 @@
 }
 
 
-static int longhaul_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, longhaul_table);
-}
-
-
 static int longhaul_target(struct cpufreq_policy *policy,
-			    unsigned int target_freq, unsigned int relation)
+			    unsigned int table_index)
 {
-	unsigned int table_index = 0;
 	unsigned int i;
 	unsigned int dir = 0;
 	u8 vid, current_vid;
 
-	if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq,
-				relation, &table_index))
-		return -EINVAL;
-
-	/* Don't set same frequency again */
-	if (longhaul_index == table_index)
-		return 0;
-
 	if (!can_scale_voltage)
 		longhaul_setstate(policy, table_index);
 	else {
@@ -919,36 +904,18 @@
 		longhaul_setup_voltagescaling();
 
 	policy->cpuinfo.transition_latency = 200000;	/* nsec */
-	policy->cur = calc_speed(longhaul_get_cpu_mult());
 
-	ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
-	if (ret)
-		return ret;
-
-	cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
-
-	return 0;
+	return cpufreq_table_validate_and_show(policy, longhaul_table);
 }
 
-static int longhaul_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static struct freq_attr *longhaul_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver longhaul_driver = {
-	.verify	= longhaul_verify,
-	.target	= longhaul_target,
+	.verify	= cpufreq_generic_frequency_table_verify,
+	.target_index = longhaul_target,
 	.get	= longhaul_get,
 	.init	= longhaul_cpu_init,
-	.exit	= longhaul_cpu_exit,
+	.exit	= cpufreq_generic_exit,
 	.name	= "longhaul",
-	.attr	= longhaul_attr,
+	.attr	= cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id longhaul_id[] = {
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 5aa0316..074971b 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -129,9 +129,7 @@
 		return -EINVAL;
 
 	policy->cpu = 0;
-	cpufreq_verify_within_limits(policy,
-		policy->cpuinfo.min_freq,
-		policy->cpuinfo.max_freq);
+	cpufreq_verify_within_cpu_limits(policy);
 
 	if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
 	    (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 7bc3c44..a436092 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -53,51 +53,24 @@
  * Here we notify other drivers of the proposed change and the final change.
  */
 static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
-				     unsigned int target_freq,
-				     unsigned int relation)
+				     unsigned int index)
 {
 	unsigned int cpu = policy->cpu;
-	unsigned int newstate = 0;
 	cpumask_t cpus_allowed;
-	struct cpufreq_freqs freqs;
 	unsigned int freq;
 
 	cpus_allowed = current->cpus_allowed;
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
-	if (cpufreq_frequency_table_target
-	    (policy, &loongson2_clockmod_table[0], target_freq, relation,
-	     &newstate))
-		return -EINVAL;
-
 	freq =
 	    ((cpu_clock_freq / 1000) *
-	     loongson2_clockmod_table[newstate].driver_data) / 8;
-	if (freq < policy->min || freq > policy->max)
-		return -EINVAL;
-
-	pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
-
-	freqs.old = loongson2_cpufreq_get(cpu);
-	freqs.new = freq;
-	freqs.flags = 0;
-
-	if (freqs.new == freqs.old)
-		return 0;
-
-	/* notifiers */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+	     loongson2_clockmod_table[index].driver_data) / 8;
 
 	set_cpus_allowed_ptr(current, &cpus_allowed);
 
 	/* setting the cpu frequency */
 	clk_set_rate(cpuclk, freq);
 
-	/* notifiers */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	pr_debug("cpufreq: set frequency %u kHz\n", freq);
-
 	return 0;
 }
 
@@ -131,40 +104,24 @@
 		return ret;
 	}
 
-	policy->cur = loongson2_cpufreq_get(policy->cpu);
-
-	cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
-					 policy->cpu);
-
-	return cpufreq_frequency_table_cpuinfo(policy,
-					    &loongson2_clockmod_table[0]);
-}
-
-static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy,
-					      &loongson2_clockmod_table[0]);
+	return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
 }
 
 static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
 {
+	cpufreq_frequency_table_put_attr(policy->cpu);
 	clk_put(cpuclk);
 	return 0;
 }
 
-static struct freq_attr *loongson2_table_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver loongson2_cpufreq_driver = {
 	.name = "loongson2",
 	.init = loongson2_cpufreq_cpu_init,
-	.verify = loongson2_cpufreq_verify,
-	.target = loongson2_cpufreq_target,
+	.verify = cpufreq_generic_frequency_table_verify,
+	.target_index = loongson2_cpufreq_target,
 	.get = loongson2_cpufreq_get,
 	.exit = loongson2_cpufreq_exit,
-	.attr = loongson2_table_attr,
+	.attr = cpufreq_generic_attr,
 };
 
 static struct platform_device_id platform_device_ids[] = {
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 6168d77..c4dfa42 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -64,18 +64,11 @@
 	{0,			CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr *maple_cpu_freqs_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 /* Power mode data is an array of the 32 bits PCR values to use for
  * the various frequencies, retrieved from the device-tree
  */
 static int maple_pmode_cur;
 
-static DEFINE_MUTEX(maple_switch_mutex);
-
 static const u32 *maple_pmode_data;
 static int maple_pmode_max;
 
@@ -135,37 +128,10 @@
  * Common interface to the cpufreq core
  */
 
-static int maple_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
-}
-
 static int maple_cpufreq_target(struct cpufreq_policy *policy,
-	unsigned int target_freq, unsigned int relation)
+	unsigned int index)
 {
-	unsigned int newstate = 0;
-	struct cpufreq_freqs freqs;
-	int rc;
-
-	if (cpufreq_frequency_table_target(policy, maple_cpu_freqs,
-			target_freq, relation, &newstate))
-		return -EINVAL;
-
-	if (maple_pmode_cur == newstate)
-		return 0;
-
-	mutex_lock(&maple_switch_mutex);
-
-	freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
-	freqs.new = maple_cpu_freqs[newstate].frequency;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-	rc = maple_scom_switch_freq(newstate);
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	mutex_unlock(&maple_switch_mutex);
-
-	return rc;
+	return maple_scom_switch_freq(index);
 }
 
 static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
@@ -175,27 +141,17 @@
 
 static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-	policy->cpuinfo.transition_latency = 12000;
-	policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
-	/* secondary CPUs are tied to the primary one by the
-	 * cpufreq core if in the secondary policy we tell it that
-	 * it actually must be one policy together with all others. */
-	cpumask_setall(policy->cpus);
-	cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
-
-	return cpufreq_frequency_table_cpuinfo(policy,
-		maple_cpu_freqs);
+	return cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
 }
 
-
 static struct cpufreq_driver maple_cpufreq_driver = {
 	.name		= "maple",
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.init		= maple_cpufreq_cpu_init,
-	.verify		= maple_cpufreq_verify,
-	.target		= maple_cpufreq_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= maple_cpufreq_target,
 	.get		= maple_cpufreq_get_speed,
-	.attr		= maple_cpu_freqs_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static int __init maple_cpufreq_init(void)
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index f31fcfc..be6d143 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -22,7 +22,7 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -40,13 +40,6 @@
 static struct device *mpu_dev;
 static struct regulator *mpu_reg;
 
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
-	if (!freq_table)
-		return -EINVAL;
-	return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int omap_getspeed(unsigned int cpu)
 {
 	unsigned long rate;
@@ -58,42 +51,16 @@
 	return rate;
 }
 
-static int omap_target(struct cpufreq_policy *policy,
-		       unsigned int target_freq,
-		       unsigned int relation)
+static int omap_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	unsigned int i;
-	int r, ret = 0;
-	struct cpufreq_freqs freqs;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	unsigned long freq, volt = 0, volt_old = 0, tol = 0;
+	unsigned int old_freq, new_freq;
 
-	if (!freq_table) {
-		dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
-				policy->cpu);
-		return -EINVAL;
-	}
+	old_freq = omap_getspeed(policy->cpu);
+	new_freq = freq_table[index].frequency;
 
-	ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
-			relation, &i);
-	if (ret) {
-		dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
-			__func__, policy->cpu, target_freq, ret);
-		return ret;
-	}
-	freqs.new = freq_table[i].frequency;
-	if (!freqs.new) {
-		dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
-			policy->cpu, target_freq);
-		return -EINVAL;
-	}
-
-	freqs.old = omap_getspeed(policy->cpu);
-
-	if (freqs.old == freqs.new && policy->cur == freqs.new)
-		return ret;
-
-	freq = freqs.new * 1000;
+	freq = new_freq * 1000;
 	ret = clk_round_rate(mpu_clk, freq);
 	if (IS_ERR_VALUE(ret)) {
 		dev_warn(mpu_dev,
@@ -105,143 +72,103 @@
 
 	if (mpu_reg) {
 		rcu_read_lock();
-		opp = opp_find_freq_ceil(mpu_dev, &freq);
+		opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
 		if (IS_ERR(opp)) {
 			rcu_read_unlock();
 			dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
-				__func__, freqs.new);
+				__func__, new_freq);
 			return -EINVAL;
 		}
-		volt = opp_get_voltage(opp);
+		volt = dev_pm_opp_get_voltage(opp);
 		rcu_read_unlock();
 		tol = volt * OPP_TOLERANCE / 100;
 		volt_old = regulator_get_voltage(mpu_reg);
 	}
 
 	dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n", 
-		freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
-		freqs.new / 1000, volt ? volt / 1000 : -1);
-
-	/* notifiers */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+		old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+		new_freq / 1000, volt ? volt / 1000 : -1);
 
 	/* scaling up?  scale voltage before frequency */
-	if (mpu_reg && (freqs.new > freqs.old)) {
+	if (mpu_reg && (new_freq > old_freq)) {
 		r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
 		if (r < 0) {
 			dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
 				 __func__);
-			freqs.new = freqs.old;
-			goto done;
+			return r;
 		}
 	}
 
-	ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+	ret = clk_set_rate(mpu_clk, new_freq * 1000);
 
 	/* scaling down?  scale voltage after frequency */
-	if (mpu_reg && (freqs.new < freqs.old)) {
+	if (mpu_reg && (new_freq < old_freq)) {
 		r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
 		if (r < 0) {
 			dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
 				 __func__);
-			ret = clk_set_rate(mpu_clk, freqs.old * 1000);
-			freqs.new = freqs.old;
-			goto done;
+			clk_set_rate(mpu_clk, old_freq * 1000);
+			return r;
 		}
 	}
 
-	freqs.new = omap_getspeed(policy->cpu);
-
-done:
-	/* notifiers */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 	return ret;
 }
 
 static inline void freq_table_free(void)
 {
 	if (atomic_dec_and_test(&freq_table_users))
-		opp_free_cpufreq_table(mpu_dev, &freq_table);
+		dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
 }
 
 static int omap_cpu_init(struct cpufreq_policy *policy)
 {
-	int result = 0;
+	int result;
 
 	mpu_clk = clk_get(NULL, "cpufreq_ck");
 	if (IS_ERR(mpu_clk))
 		return PTR_ERR(mpu_clk);
 
-	if (policy->cpu >= NR_CPUS) {
-		result = -EINVAL;
-		goto fail_ck;
-	}
-
-	policy->cur = omap_getspeed(policy->cpu);
-
-	if (!freq_table)
-		result = opp_init_cpufreq_table(mpu_dev, &freq_table);
-
-	if (result) {
-		dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+	if (!freq_table) {
+		result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
+		if (result) {
+			dev_err(mpu_dev,
+				"%s: cpu%d: failed creating freq table[%d]\n",
 				__func__, policy->cpu, result);
-		goto fail_ck;
+			goto fail;
+		}
 	}
 
 	atomic_inc_return(&freq_table_users);
 
-	result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-	if (result)
-		goto fail_table;
-
-	cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-	policy->cur = omap_getspeed(policy->cpu);
-
-	/*
-	 * On OMAP SMP configuartion, both processors share the voltage
-	 * and clock. So both CPUs needs to be scaled together and hence
-	 * needs software co-ordination. Use cpufreq affected_cpus
-	 * interface to handle this scenario. Additional is_smp() check
-	 * is to keep SMP_ON_UP build working.
-	 */
-	if (is_smp())
-		cpumask_setall(policy->cpus);
-
 	/* FIXME: what's the actual transition time? */
-	policy->cpuinfo.transition_latency = 300 * 1000;
+	result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+	if (!result)
+		return 0;
 
-	return 0;
-
-fail_table:
 	freq_table_free();
-fail_ck:
+fail:
 	clk_put(mpu_clk);
 	return result;
 }
 
 static int omap_cpu_exit(struct cpufreq_policy *policy)
 {
+	cpufreq_frequency_table_put_attr(policy->cpu);
 	freq_table_free();
 	clk_put(mpu_clk);
 	return 0;
 }
 
-static struct freq_attr *omap_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver omap_driver = {
 	.flags		= CPUFREQ_STICKY,
-	.verify		= omap_verify_speed,
-	.target		= omap_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= omap_target,
 	.get		= omap_getspeed,
 	.init		= omap_cpu_init,
 	.exit		= omap_cpu_exit,
 	.name		= "omap",
-	.attr		= omap_cpufreq_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static int omap_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 2f0a2a6..3d1cba9 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -105,47 +105,21 @@
 };
 
 
-static int cpufreq_p4_target(struct cpufreq_policy *policy,
-			     unsigned int target_freq,
-			     unsigned int relation)
+static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	unsigned int    newstate = DC_RESV;
-	struct cpufreq_freqs freqs;
 	int i;
 
-	if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0],
-				target_freq, relation, &newstate))
-		return -EINVAL;
-
-	freqs.old = cpufreq_p4_get(policy->cpu);
-	freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8;
-
-	if (freqs.new == freqs.old)
-		return 0;
-
-	/* notifiers */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	/* run on each logical CPU,
 	 * see section 13.15.3 of IA32 Intel Architecture Software
 	 * Developer's Manual, Volume 3
 	 */
 	for_each_cpu(i, policy->cpus)
-		cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data);
-
-	/* notifiers */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+		cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
 
 	return 0;
 }
 
 
-static int cpufreq_p4_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
-}
-
-
 static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
 {
 	if (c->x86 == 0x06) {
@@ -230,25 +204,17 @@
 		else
 			p4clockmod_table[i].frequency = (stock_freq * i)/8;
 	}
-	cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
 
 	/* cpuinfo and default policy values */
 
 	/* the transition latency is set to be 1 higher than the maximum
 	 * transition latency of the ondemand governor */
 	policy->cpuinfo.transition_latency = 10000001;
-	policy->cur = stock_freq;
 
-	return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
+	return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
 }
 
 
-static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
 static unsigned int cpufreq_p4_get(unsigned int cpu)
 {
 	u32 l, h;
@@ -267,19 +233,14 @@
 	return stock_freq;
 }
 
-static struct freq_attr *p4clockmod_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver p4clockmod_driver = {
-	.verify		= cpufreq_p4_verify,
-	.target		= cpufreq_p4_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= cpufreq_p4_target,
 	.init		= cpufreq_p4_cpu_init,
-	.exit		= cpufreq_p4_cpu_exit,
+	.exit		= cpufreq_generic_exit,
 	.get		= cpufreq_p4_get,
 	.name		= "p4-clockmod",
-	.attr		= p4clockmod_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index f4ec814..0426008 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -52,8 +52,6 @@
 static void __iomem *sdcpwr_mapbase;
 static void __iomem *sdcasr_mapbase;
 
-static DEFINE_MUTEX(pas_switch_mutex);
-
 /* Current astate, is used when waking up from power savings on
  * one core, in case the other core has switched states during
  * the idle time.
@@ -70,11 +68,6 @@
 	{0,	CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr *pas_cpu_freqs_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 /*
  * hardware specific functions
  */
@@ -210,22 +203,13 @@
 		pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
 	}
 
-	policy->cpuinfo.transition_latency = get_gizmo_latency();
-
 	cur_astate = get_cur_astate(policy->cpu);
 	pr_debug("current astate is at %d\n",cur_astate);
 
 	policy->cur = pas_freqs[cur_astate].frequency;
-	cpumask_copy(policy->cpus, cpu_online_mask);
-
 	ppc_proc_freq = policy->cur * 1000ul;
 
-	cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
-
-	/* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
-	 * are set correctly
-	 */
-	return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
+	return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
 
 out_unmap_sdcpwr:
 	iounmap(sdcpwr_mapbase);
@@ -254,31 +238,11 @@
 	return 0;
 }
 
-static int pas_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, pas_freqs);
-}
-
 static int pas_cpufreq_target(struct cpufreq_policy *policy,
-			      unsigned int target_freq,
-			      unsigned int relation)
+			      unsigned int pas_astate_new)
 {
-	struct cpufreq_freqs freqs;
-	int pas_astate_new;
 	int i;
 
-	cpufreq_frequency_table_target(policy,
-				       pas_freqs,
-				       target_freq,
-				       relation,
-				       &pas_astate_new);
-
-	freqs.old = policy->cur;
-	freqs.new = pas_freqs[pas_astate_new].frequency;
-
-	mutex_lock(&pas_switch_mutex);
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
 		 policy->cpu,
 		 pas_freqs[pas_astate_new].frequency,
@@ -289,10 +253,7 @@
 	for_each_online_cpu(i)
 		set_astate(i, pas_astate_new);
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-	mutex_unlock(&pas_switch_mutex);
-
-	ppc_proc_freq = freqs.new * 1000ul;
+	ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul;
 	return 0;
 }
 
@@ -301,9 +262,9 @@
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.init		= pas_cpufreq_cpu_init,
 	.exit		= pas_cpufreq_cpu_exit,
-	.verify		= pas_cpufreq_verify,
-	.target		= pas_cpufreq_target,
-	.attr		= pas_cpu_freqs_attr,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= pas_cpufreq_target,
+	.attr		= cpufreq_generic_attr,
 };
 
 /*
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index d81c4e5..e2b4f40 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -111,8 +111,7 @@
 
 static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
 {
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-				     policy->cpuinfo.max_freq);
+	cpufreq_verify_within_cpu_limits(policy);
 	return 0;
 }
 
@@ -396,15 +395,14 @@
 	struct pcc_memory_resource *mem_resource;
 	struct pcc_register_resource *reg_resource;
 	union acpi_object *out_obj, *member;
-	acpi_handle handle, osc_handle, pcch_handle;
+	acpi_handle handle, osc_handle;
 	int ret = 0;
 
 	status = acpi_get_handle(NULL, "\\_SB", &handle);
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
 
-	status = acpi_get_handle(handle, "PCCH", &pcch_handle);
-	if (ACPI_FAILURE(status))
+	if (!acpi_has_method(handle, "PCCH"))
 		return -ENODEV;
 
 	status = acpi_get_handle(handle, "_OSC", &osc_handle);
@@ -560,13 +558,6 @@
 		ioread32(&pcch_hdr->nominal) * 1000;
 	policy->min = policy->cpuinfo.min_freq =
 		ioread32(&pcch_hdr->minimum_frequency) * 1000;
-	policy->cur = pcc_get_freq(cpu);
-
-	if (!policy->cur) {
-		pr_debug("init: Unable to get current CPU frequency\n");
-		result = -EINVAL;
-		goto out;
-	}
 
 	pr_debug("init: policy->max is %d, policy->min is %d\n",
 		policy->max, policy->min);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index a096cd3..cf55d20 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -86,11 +86,6 @@
 	{0,			CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr* pmac_cpu_freqs_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static inline void local_delay(unsigned long ms)
 {
 	if (no_schedule)
@@ -336,21 +331,11 @@
 	return 0;
 }
 
-static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
-		int notify)
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode)
 {
-	struct cpufreq_freqs freqs;
 	unsigned long l3cr;
 	static unsigned long prev_l3cr;
 
-	freqs.old = cur_freq;
-	freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
-
-	if (freqs.old == freqs.new)
-		return 0;
-
-	if (notify)
-		cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 	if (speed_mode == CPUFREQ_LOW &&
 	    cpu_has_feature(CPU_FTR_L3CR)) {
 		l3cr = _get_L3CR();
@@ -366,8 +351,6 @@
 		if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
 			_set_L3CR(prev_l3cr);
 	}
-	if (notify)
-		cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
 
 	return 0;
@@ -378,23 +361,12 @@
 	return cur_freq;
 }
 
-static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
-}
-
 static int pmac_cpufreq_target(	struct cpufreq_policy *policy,
-					unsigned int target_freq,
-					unsigned int relation)
+					unsigned int index)
 {
-	unsigned int    newstate = 0;
 	int		rc;
 
-	if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
-			target_freq, relation, &newstate))
-		return -EINVAL;
-
-	rc = do_set_cpu_speed(policy, newstate, 1);
+	rc = do_set_cpu_speed(policy, index);
 
 	ppc_proc_freq = cur_freq * 1000ul;
 	return rc;
@@ -402,14 +374,7 @@
 
 static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-	if (policy->cpu != 0)
-		return -ENODEV;
-
-	policy->cpuinfo.transition_latency	= transition_latency;
-	policy->cur = cur_freq;
-
-	cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
-	return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+	return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
 }
 
 static u32 read_gpio(struct device_node *np)
@@ -443,7 +408,7 @@
 	no_schedule = 1;
 	sleep_freq = cur_freq;
 	if (cur_freq == low_freq && !is_pmu_based)
-		do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
+		do_set_cpu_speed(policy, CPUFREQ_HIGH);
 	return 0;
 }
 
@@ -460,7 +425,7 @@
 	 * probably high speed due to our suspend() routine
 	 */
 	do_set_cpu_speed(policy, sleep_freq == low_freq ?
-			 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
+			 CPUFREQ_LOW : CPUFREQ_HIGH);
 
 	ppc_proc_freq = cur_freq * 1000ul;
 
@@ -469,14 +434,14 @@
 }
 
 static struct cpufreq_driver pmac_cpufreq_driver = {
-	.verify 	= pmac_cpufreq_verify,
-	.target 	= pmac_cpufreq_target,
+	.verify 	= cpufreq_generic_frequency_table_verify,
+	.target_index	= pmac_cpufreq_target,
 	.get		= pmac_cpufreq_get_speed,
 	.init		= pmac_cpufreq_cpu_init,
 	.suspend	= pmac_cpufreq_suspend,
 	.resume		= pmac_cpufreq_resume,
 	.flags		= CPUFREQ_PM_NO_WARN,
-	.attr		= pmac_cpu_freqs_attr,
+	.attr		= cpufreq_generic_attr,
 	.name		= "powermac",
 };
 
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 3a51ad7..6a338f8 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -70,11 +70,6 @@
 	{0,			CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr* g5_cpu_freqs_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 /* Power mode data is an array of the 32 bits PCR values to use for
  * the various frequencies, retrieved from the device-tree
  */
@@ -84,8 +79,6 @@
 static int (*g5_switch_freq)(int speed_mode);
 static int (*g5_query_freq)(void);
 
-static DEFINE_MUTEX(g5_switch_mutex);
-
 static unsigned long transition_latency;
 
 #ifdef CONFIG_PMAC_SMU
@@ -142,7 +135,7 @@
 		pmf_call_one(pfunc_vdnap0_complete, &args);
 		if (done)
 			break;
-		msleep(1);
+		usleep_range(1000, 1000);
 	}
 	if (done == 0)
 		printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -241,7 +234,7 @@
 		if (pfunc_cpu1_volt_low)
 			pmf_call_one(pfunc_cpu1_volt_low, NULL);
 	}
-	msleep(10); /* should be faster , to fix */
+	usleep_range(10000, 10000); /* should be faster , to fix */
 }
 
 /*
@@ -286,7 +279,7 @@
 		pmf_call_one(pfunc_slewing_done, &args);
 		if (done)
 			break;
-		msleep(1);
+		usleep_range(500, 500);
 	}
 	if (done == 0)
 		printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -317,37 +310,9 @@
  * Common interface to the cpufreq core
  */
 
-static int g5_cpufreq_verify(struct cpufreq_policy *policy)
+static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
-}
-
-static int g5_cpufreq_target(struct cpufreq_policy *policy,
-	unsigned int target_freq, unsigned int relation)
-{
-	unsigned int newstate = 0;
-	struct cpufreq_freqs freqs;
-	int rc;
-
-	if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
-			target_freq, relation, &newstate))
-		return -EINVAL;
-
-	if (g5_pmode_cur == newstate)
-		return 0;
-
-	mutex_lock(&g5_switch_mutex);
-
-	freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
-	freqs.new = g5_cpu_freqs[newstate].frequency;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-	rc = g5_switch_freq(newstate);
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	mutex_unlock(&g5_switch_mutex);
-
-	return rc;
+	return g5_switch_freq(index);
 }
 
 static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
@@ -357,27 +322,17 @@
 
 static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-	policy->cpuinfo.transition_latency = transition_latency;
-	policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
-	/* secondary CPUs are tied to the primary one by the
-	 * cpufreq core if in the secondary policy we tell it that
-	 * it actually must be one policy together with all others. */
-	cpumask_copy(policy->cpus, cpu_online_mask);
-	cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
-
-	return cpufreq_frequency_table_cpuinfo(policy,
-		g5_cpu_freqs);
+	return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
 }
 
-
 static struct cpufreq_driver g5_cpufreq_driver = {
 	.name		= "powermac",
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.init		= g5_cpufreq_cpu_init,
-	.verify		= g5_cpufreq_verify,
-	.target		= g5_cpufreq_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= g5_cpufreq_target,
 	.get		= g5_cpufreq_get_speed,
-	.attr 		= g5_cpu_freqs_attr,
+	.attr 		= cpufreq_generic_attr,
 };
 
 
@@ -397,7 +352,8 @@
 	/* Check supported platforms */
 	if (of_machine_is_compatible("PowerMac8,1") ||
 	    of_machine_is_compatible("PowerMac8,2") ||
-	    of_machine_is_compatible("PowerMac9,1"))
+	    of_machine_is_compatible("PowerMac9,1") ||
+	    of_machine_is_compatible("PowerMac12,1"))
 		use_volts_smu = 1;
 	else if (of_machine_is_compatible("PowerMac11,2"))
 		use_volts_vdnap = 1;
@@ -647,8 +603,10 @@
 	g5_cpu_freqs[0].frequency = max_freq;
 	g5_cpu_freqs[1].frequency = min_freq;
 
+	/* Based on a measurement on Xserve G5, rounded up. */
+	transition_latency = 10 * NSEC_PER_MSEC;
+
 	/* Set callbacks */
-	transition_latency = CPUFREQ_ETERNAL;
 	g5_switch_volt = g5_pfunc_switch_volt;
 	g5_switch_freq = g5_pfunc_switch_freq;
 	g5_query_freq = g5_pfunc_query_freq;
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 85f1c8c..643e795 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -63,12 +63,12 @@
 
 
 /**
- * powernow_k6_set_state - set the PowerNow! multiplier
+ * powernow_k6_target - set the PowerNow! multiplier
  * @best_i: clock_ratio[best_i] is the target multiplier
  *
  *   Tries to change the PowerNow! multiplier
  */
-static void powernow_k6_set_state(struct cpufreq_policy *policy,
+static int powernow_k6_target(struct cpufreq_policy *policy,
 		unsigned int best_i)
 {
 	unsigned long outvalue = 0, invalue = 0;
@@ -77,7 +77,7 @@
 
 	if (clock_ratio[best_i].driver_data > max_multiplier) {
 		printk(KERN_ERR PFX "invalid target frequency\n");
-		return;
+		return -EINVAL;
 	}
 
 	freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
@@ -100,44 +100,6 @@
 
 	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
-	return;
-}
-
-
-/**
- * powernow_k6_verify - verifies a new CPUfreq policy
- * @policy: new policy
- *
- * Policy must be within lowest and highest possible CPU Frequency,
- * and at least one possible state must be within min and max.
- */
-static int powernow_k6_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
-}
-
-
-/**
- * powernow_k6_setpolicy - sets a new CPUFreq policy
- * @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- *  (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
- *
- * sets a new CPUFreq policy
- */
-static int powernow_k6_target(struct cpufreq_policy *policy,
-			       unsigned int target_freq,
-			       unsigned int relation)
-{
-	unsigned int newstate = 0;
-
-	if (cpufreq_frequency_table_target(policy, &clock_ratio[0],
-				target_freq, relation, &newstate))
-		return -EINVAL;
-
-	powernow_k6_set_state(policy, newstate);
-
 	return 0;
 }
 
@@ -145,7 +107,6 @@
 static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
 {
 	unsigned int i, f;
-	int result;
 
 	if (policy->cpu != 0)
 		return -ENODEV;
@@ -165,15 +126,8 @@
 
 	/* cpuinfo and default policy values */
 	policy->cpuinfo.transition_latency = 200000;
-	policy->cur = busfreq * max_multiplier;
 
-	result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
-	if (result)
-		return result;
-
-	cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
-
-	return 0;
+	return cpufreq_table_validate_and_show(policy, clock_ratio);
 }
 
 
@@ -182,7 +136,7 @@
 	unsigned int i;
 	for (i = 0; i < 8; i++) {
 		if (i == max_multiplier)
-			powernow_k6_set_state(policy, i);
+			powernow_k6_target(policy, i);
 	}
 	cpufreq_frequency_table_put_attr(policy->cpu);
 	return 0;
@@ -195,19 +149,14 @@
 	return ret;
 }
 
-static struct freq_attr *powernow_k6_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver powernow_k6_driver = {
-	.verify		= powernow_k6_verify,
-	.target		= powernow_k6_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= powernow_k6_target,
 	.init		= powernow_k6_cpu_init,
 	.exit		= powernow_k6_cpu_exit,
 	.get		= powernow_k6_get,
 	.name		= "powernow-k6",
-	.attr		= powernow_k6_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 14ce480..946708a 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -248,7 +248,7 @@
 }
 
 
-static void change_speed(struct cpufreq_policy *policy, unsigned int index)
+static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
 {
 	u8 fid, vid;
 	struct cpufreq_freqs freqs;
@@ -291,6 +291,8 @@
 		local_irq_enable();
 
 	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+	return 0;
 }
 
 
@@ -533,27 +535,6 @@
 }
 
 
-static int powernow_target(struct cpufreq_policy *policy,
-			    unsigned int target_freq,
-			    unsigned int relation)
-{
-	unsigned int newstate;
-
-	if (cpufreq_frequency_table_target(policy, powernow_table, target_freq,
-				relation, &newstate))
-		return -EINVAL;
-
-	change_speed(policy, newstate);
-
-	return 0;
-}
-
-
-static int powernow_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, powernow_table);
-}
-
 /*
  * We use the fact that the bus frequency is somehow
  * a multiple of 100000/3 khz, then we compute sgtc according
@@ -678,11 +659,7 @@
 	policy->cpuinfo.transition_latency =
 		cpufreq_scale(2000000UL, fsb, latency);
 
-	policy->cur = powernow_get(0);
-
-	cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
-
-	return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
+	return cpufreq_table_validate_and_show(policy, powernow_table);
 }
 
 static int powernow_cpu_exit(struct cpufreq_policy *policy)
@@ -701,14 +678,9 @@
 	return 0;
 }
 
-static struct freq_attr *powernow_table_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver powernow_driver = {
-	.verify		= powernow_verify,
-	.target		= powernow_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= powernow_target,
 	.get		= powernow_get,
 #ifdef CONFIG_X86_POWERNOW_K7_ACPI
 	.bios_limit	= acpi_processor_get_bios_limit,
@@ -716,7 +688,7 @@
 	.init		= powernow_cpu_init,
 	.exit		= powernow_cpu_exit,
 	.name		= "powernow-k7",
-	.attr		= powernow_table_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 2344a9e..0023c7d 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -977,20 +977,17 @@
 
 struct powernowk8_target_arg {
 	struct cpufreq_policy		*pol;
-	unsigned			targfreq;
-	unsigned			relation;
+	unsigned			newstate;
 };
 
 static long powernowk8_target_fn(void *arg)
 {
 	struct powernowk8_target_arg *pta = arg;
 	struct cpufreq_policy *pol = pta->pol;
-	unsigned targfreq = pta->targfreq;
-	unsigned relation = pta->relation;
+	unsigned newstate = pta->newstate;
 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
 	u32 checkfid;
 	u32 checkvid;
-	unsigned int newstate;
 	int ret;
 
 	if (!data)
@@ -1004,8 +1001,9 @@
 		return -EIO;
 	}
 
-	pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
-		pol->cpu, targfreq, pol->min, pol->max, relation);
+	pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n",
+		pol->cpu, data->powernow_table[newstate].frequency, pol->min,
+		pol->max);
 
 	if (query_current_values_with_pending_wait(data))
 		return -EIO;
@@ -1021,10 +1019,6 @@
 		       checkvid, data->currvid);
 	}
 
-	if (cpufreq_frequency_table_target(pol, data->powernow_table,
-				targfreq, relation, &newstate))
-		return -EIO;
-
 	mutex_lock(&fidvid_mutex);
 
 	powernow_k8_acpi_pst_values(data, newstate);
@@ -1044,26 +1038,13 @@
 }
 
 /* Driver entry point to switch to the target frequency */
-static int powernowk8_target(struct cpufreq_policy *pol,
-		unsigned targfreq, unsigned relation)
+static int powernowk8_target(struct cpufreq_policy *pol, unsigned index)
 {
-	struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
-					     .relation = relation };
+	struct powernowk8_target_arg pta = { .pol = pol, .newstate = index };
 
 	return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
 }
 
-/* Driver entry point to verify the policy and range of frequencies */
-static int powernowk8_verify(struct cpufreq_policy *pol)
-{
-	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
-
-	if (!data)
-		return -EINVAL;
-
-	return cpufreq_frequency_table_verify(pol, data->powernow_table);
-}
-
 struct init_on_cpu {
 	struct powernow_k8_data *data;
 	int rc;
@@ -1152,11 +1133,8 @@
 	cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
 	data->available_cores = pol->cpus;
 
-	pol->cur = find_khz_freq_from_fid(data->currfid);
-	pr_debug("policy current frequency %d kHz\n", pol->cur);
-
 	/* min/max the cpu is capable of */
-	if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
+	if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
 		printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
 		powernow_k8_cpu_exit_acpi(data);
 		kfree(data->powernow_table);
@@ -1164,8 +1142,6 @@
 		return -EINVAL;
 	}
 
-	cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
-
 	pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
 		 data->currfid, data->currvid);
 
@@ -1227,20 +1203,16 @@
 	return khz;
 }
 
-static struct freq_attr *powernow_k8_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver cpufreq_amd64_driver = {
-	.verify		= powernowk8_verify,
-	.target		= powernowk8_target,
+	.flags		= CPUFREQ_ASYNC_NOTIFICATION,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= powernowk8_target,
 	.bios_limit	= acpi_processor_get_bios_limit,
 	.init		= powernowk8_cpu_init,
 	.exit		= powernowk8_cpu_exit,
 	.get		= powernowk8_get,
 	.name		= "powernow-k8",
-	.attr		= powernow_k8_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 60e81d5..3f7be46 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -69,8 +69,6 @@
 static u32 min_cpufreq;
 static const u32 *fmask;
 
-/* serialize frequency changes  */
-static DEFINE_MUTEX(cpufreq_lock);
 static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
 
 /* cpumask in a cluster */
@@ -202,7 +200,7 @@
 	table[i].frequency = CPUFREQ_TABLE_END;
 
 	/* set the min and max frequency properly */
-	ret = cpufreq_frequency_table_cpuinfo(policy, table);
+	ret = cpufreq_table_validate_and_show(policy, table);
 	if (ret) {
 		pr_err("invalid frequency table: %d\n", ret);
 		goto err_nomem1;
@@ -217,9 +215,6 @@
 		per_cpu(cpu_data, i) = data;
 
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-	policy->cur = corenet_cpufreq_get_speed(policy->cpu);
-
-	cpufreq_frequency_table_get_attr(table, cpu);
 	of_node_put(np);
 
 	return 0;
@@ -253,60 +248,25 @@
 	return 0;
 }
 
-static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	struct cpufreq_frequency_table *table =
-		per_cpu(cpu_data, policy->cpu)->table;
-
-	return cpufreq_frequency_table_verify(policy, table);
-}
-
 static int corenet_cpufreq_target(struct cpufreq_policy *policy,
-		unsigned int target_freq, unsigned int relation)
+		unsigned int index)
 {
-	struct cpufreq_freqs freqs;
-	unsigned int new;
 	struct clk *parent;
-	int ret;
 	struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
 
-	cpufreq_frequency_table_target(policy, data->table,
-			target_freq, relation, &new);
-
-	if (policy->cur == data->table[new].frequency)
-		return 0;
-
-	freqs.old = policy->cur;
-	freqs.new = data->table[new].frequency;
-
-	mutex_lock(&cpufreq_lock);
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-	parent = of_clk_get(data->parent, data->table[new].driver_data);
-	ret = clk_set_parent(data->clk, parent);
-	if (ret)
-		freqs.new = freqs.old;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-	mutex_unlock(&cpufreq_lock);
-
-	return ret;
+	parent = of_clk_get(data->parent, data->table[index].driver_data);
+	return clk_set_parent(data->clk, parent);
 }
 
-static struct freq_attr *corenet_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
 	.name		= "ppc_cpufreq",
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.init		= corenet_cpufreq_cpu_init,
 	.exit		= __exit_p(corenet_cpufreq_cpu_exit),
-	.verify		= corenet_cpufreq_verify,
-	.target		= corenet_cpufreq_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= corenet_cpufreq_target,
 	.get		= corenet_cpufreq_get_speed,
-	.attr		= corenet_cpufreq_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static const struct of_device_id node_matches[] __initdata = {
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 2e448f0..e42ca9c 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -30,9 +30,6 @@
 
 #include "ppc_cbe_cpufreq.h"
 
-static DEFINE_MUTEX(cbe_switch_mutex);
-
-
 /* the CBE supports an 8 step frequency scaling */
 static struct cpufreq_frequency_table cbe_freqs[] = {
 	{1,	0},
@@ -123,63 +120,28 @@
 	cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
 #endif
 
-	cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
-
 	/* this ensures that policy->cpuinfo_min
 	 * and policy->cpuinfo_max are set correctly */
-	return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, cbe_freqs);
+	return cpufreq_table_validate_and_show(policy, cbe_freqs);
 }
 
 static int cbe_cpufreq_target(struct cpufreq_policy *policy,
-			      unsigned int target_freq,
-			      unsigned int relation)
+			      unsigned int cbe_pmode_new)
 {
-	int rc;
-	struct cpufreq_freqs freqs;
-	unsigned int cbe_pmode_new;
-
-	cpufreq_frequency_table_target(policy,
-				       cbe_freqs,
-				       target_freq,
-				       relation,
-				       &cbe_pmode_new);
-
-	freqs.old = policy->cur;
-	freqs.new = cbe_freqs[cbe_pmode_new].frequency;
-
-	mutex_lock(&cbe_switch_mutex);
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	pr_debug("setting frequency for cpu %d to %d kHz, " \
 		 "1/%d of max frequency\n",
 		 policy->cpu,
 		 cbe_freqs[cbe_pmode_new].frequency,
 		 cbe_freqs[cbe_pmode_new].driver_data);
 
-	rc = set_pmode(policy->cpu, cbe_pmode_new);
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-	mutex_unlock(&cbe_switch_mutex);
-
-	return rc;
+	return set_pmode(policy->cpu, cbe_pmode_new);
 }
 
 static struct cpufreq_driver cbe_cpufreq_driver = {
-	.verify		= cbe_cpufreq_verify,
-	.target		= cbe_cpufreq_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= cbe_cpufreq_target,
 	.init		= cbe_cpufreq_cpu_init,
-	.exit		= cbe_cpufreq_cpu_exit,
+	.exit		= cpufreq_generic_exit,
 	.name		= "cbe-cpufreq",
 	.flags		= CPUFREQ_CONST_LOOPS,
 };
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 8749eaf..0a0f436 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -262,36 +262,15 @@
 	return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
 }
 
-/* find a valid frequency point */
-static int pxa_verify_policy(struct cpufreq_policy *policy)
-{
-	struct cpufreq_frequency_table *pxa_freqs_table;
-	pxa_freqs_t *pxa_freqs;
-	int ret;
-
-	find_freq_tables(&pxa_freqs_table, &pxa_freqs);
-	ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
-
-	if (freq_debug)
-		pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
-			 policy->min, policy->max);
-
-	return ret;
-}
-
 static unsigned int pxa_cpufreq_get(unsigned int cpu)
 {
 	return get_clk_frequency_khz(0);
 }
 
-static int pxa_set_target(struct cpufreq_policy *policy,
-			  unsigned int target_freq,
-			  unsigned int relation)
+static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
 {
 	struct cpufreq_frequency_table *pxa_freqs_table;
 	pxa_freqs_t *pxa_freq_settings;
-	struct cpufreq_freqs freqs;
-	unsigned int idx;
 	unsigned long flags;
 	unsigned int new_freq_cpu, new_freq_mem;
 	unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
@@ -300,32 +279,19 @@
 	/* Get the current policy */
 	find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
 
-	/* Lookup the next frequency */
-	if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
-					   target_freq, relation, &idx)) {
-		return -EINVAL;
-	}
-
 	new_freq_cpu = pxa_freq_settings[idx].khz;
 	new_freq_mem = pxa_freq_settings[idx].membus;
-	freqs.old = policy->cur;
-	freqs.new = new_freq_cpu;
 
 	if (freq_debug)
 		pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
-			 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
+			 new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
 			 (new_freq_mem / 2000) : (new_freq_mem / 1000));
 
-	if (vcc_core && freqs.new > freqs.old)
+	if (vcc_core && new_freq_cpu > policy->cur) {
 		ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
-	if (ret)
-		return ret;
-	/*
-	 * Tell everyone what we're about to do...
-	 * you should add a notify client with any platform specific
-	 * Vcc changing capability
-	 */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+		if (ret)
+			return ret;
+	}
 
 	/* Calculate the next MDREFR.  If we're slowing down the SDRAM clock
 	 * we need to preset the smaller DRI before the change.	 If we're
@@ -376,13 +342,6 @@
 	local_irq_restore(flags);
 
 	/*
-	 * Tell everyone what we've just done...
-	 * you should add a notify client with any platform specific
-	 * SDRAM refresh timer adjustments
-	 */
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	/*
 	 * Even if voltage setting fails, we don't report it, as the frequency
 	 * change succeeded. The voltage reduction is not a critical failure,
 	 * only power savings will suffer from this.
@@ -391,7 +350,7 @@
 	 * bug is triggered (seems a deadlock). Should anybody find out where,
 	 * the "return 0" should become a "return ret".
 	 */
-	if (vcc_core && freqs.new < freqs.old)
+	if (vcc_core && new_freq_cpu < policy->cur)
 		ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
 
 	return 0;
@@ -414,8 +373,6 @@
 
 	/* set default policy and cpuinfo */
 	policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
-	policy->cur = get_clk_frequency_khz(0);	   /* current freq */
-	policy->min = policy->max = policy->cur;
 
 	/* Generate pxa25x the run cpufreq_frequency_table struct */
 	for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
@@ -453,10 +410,12 @@
 		find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
 		pr_info("PXA255 cpufreq using %s frequency table\n",
 			pxa255_turbo_table ? "turbo" : "run");
-		cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
+
+		cpufreq_table_validate_and_show(policy, pxa255_freq_table);
 	}
-	else if (cpu_is_pxa27x())
-		cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
+	else if (cpu_is_pxa27x()) {
+		cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
+	}
 
 	printk(KERN_INFO "PXA CPU frequency change support initialized\n");
 
@@ -464,9 +423,10 @@
 }
 
 static struct cpufreq_driver pxa_cpufreq_driver = {
-	.verify	= pxa_verify_policy,
-	.target	= pxa_set_target,
+	.verify	= cpufreq_generic_frequency_table_verify,
+	.target_index = pxa_set_target,
 	.init	= pxa_cpufreq_init,
+	.exit	= cpufreq_generic_exit,
 	.get	= pxa_cpufreq_get,
 	.name	= "PXA2xx",
 };
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index d26306f..9384004 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -108,7 +108,7 @@
 	pxa3xx_freqs_num = num;
 	pxa3xx_freqs_table = table;
 
-	return cpufreq_frequency_table_cpuinfo(policy, table);
+	return cpufreq_table_validate_and_show(policy, table);
 }
 
 static void __update_core_freq(struct pxa3xx_freq_info *info)
@@ -150,54 +150,26 @@
 		cpu_relax();
 }
 
-static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
-}
-
 static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
 {
 	return pxa3xx_get_clk_frequency_khz(0);
 }
 
-static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
-			      unsigned int target_freq,
-			      unsigned int relation)
+static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
 {
 	struct pxa3xx_freq_info *next;
-	struct cpufreq_freqs freqs;
 	unsigned long flags;
-	int idx;
 
 	if (policy->cpu != 0)
 		return -EINVAL;
 
-	/* Lookup the next frequency */
-	if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table,
-				target_freq, relation, &idx))
-		return -EINVAL;
-
-	next = &pxa3xx_freqs[idx];
-
-	freqs.old = policy->cur;
-	freqs.new = next->cpufreq_mhz * 1000;
-
-	pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
-			freqs.old / 1000, freqs.new / 1000,
-			(freqs.old == freqs.new) ? " (skipped)" : "");
-
-	if (freqs.old == target_freq)
-		return 0;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+	next = &pxa3xx_freqs[index];
 
 	local_irq_save(flags);
 	__update_core_freq(next);
 	__update_bus_freq(next);
 	local_irq_restore(flags);
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 	return 0;
 }
 
@@ -206,11 +178,10 @@
 	int ret = -EINVAL;
 
 	/* set default policy and cpuinfo */
-	policy->cpuinfo.min_freq = 104000;
-	policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
+	policy->min = policy->cpuinfo.min_freq = 104000;
+	policy->max = policy->cpuinfo.max_freq =
+		(cpu_is_pxa320()) ? 806000 : 624000;
 	policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
-	policy->max = pxa3xx_get_clk_frequency_khz(0);
-	policy->cur = policy->min = policy->max;
 
 	if (cpu_is_pxa300() || cpu_is_pxa310())
 		ret = setup_freqs_table(policy, pxa300_freqs,
@@ -230,9 +201,10 @@
 }
 
 static struct cpufreq_driver pxa3xx_cpufreq_driver = {
-	.verify		= pxa3xx_cpufreq_verify,
-	.target		= pxa3xx_cpufreq_set,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= pxa3xx_cpufreq_set,
 	.init		= pxa3xx_cpufreq_init,
+	.exit		= cpufreq_generic_exit,
 	.get		= pxa3xx_cpufreq_get,
 	.name		= "pxa3xx-cpufreq",
 };
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 22dcb81..8d904a0 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -87,16 +87,6 @@
 	{ 0, CPUFREQ_TABLE_END },
 };
 
-static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-	struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
-	if (policy->cpu != 0)
-		return -EINVAL;
-
-	return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table);
-}
-
 static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
 {
 	struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
@@ -227,24 +217,15 @@
 }
 
 static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
-				      unsigned int target_freq,
-				      unsigned int relation)
+				      unsigned int index)
 {
 	struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-	struct cpufreq_freqs freqs;
+	unsigned int new_freq;
 	int idx, ret, to_dvs = 0;
-	unsigned int i;
 
 	mutex_lock(&cpufreq_lock);
 
-	pr_debug("cpufreq: to %dKHz, relation %d\n", target_freq, relation);
-
-	ret = cpufreq_frequency_table_target(policy, s3c_freq->freq_table,
-					     target_freq, relation, &i);
-	if (ret != 0)
-		goto out;
-
-	idx = s3c_freq->freq_table[i].driver_data;
+	idx = s3c_freq->freq_table[index].driver_data;
 
 	if (idx == SOURCE_HCLK)
 		to_dvs = 1;
@@ -256,24 +237,13 @@
 		goto out;
 	}
 
-	freqs.flags = 0;
-	freqs.old = s3c_freq->is_dvs ? FREQ_DVS
-				     : clk_get_rate(s3c_freq->armclk) / 1000;
-
 	/* When leavin dvs mode, always switch the armdiv to the hclk rate
 	 * The S3C2416 has stability issues when switching directly to
 	 * higher frequencies.
 	 */
-	freqs.new = (s3c_freq->is_dvs && !to_dvs)
+	new_freq = (s3c_freq->is_dvs && !to_dvs)
 				? clk_get_rate(s3c_freq->hclk) / 1000
-				: s3c_freq->freq_table[i].frequency;
-
-	pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
-
-	if (!to_dvs && freqs.old == freqs.new)
-		goto out;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+				: s3c_freq->freq_table[index].frequency;
 
 	if (to_dvs) {
 		pr_debug("cpufreq: enter dvs\n");
@@ -282,12 +252,10 @@
 		pr_debug("cpufreq: leave dvs\n");
 		ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
 	} else {
-		pr_debug("cpufreq: change armdiv to %dkHz\n", freqs.new);
-		ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
+		pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
+		ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
 	}
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 out:
 	mutex_unlock(&cpufreq_lock);
 
@@ -486,20 +454,14 @@
 		freq++;
 	}
 
-	policy->cur = clk_get_rate(s3c_freq->armclk) / 1000;
-
 	/* Datasheet says PLL stabalisation time must be at least 300us,
 	 * so but add some fudge. (reference in LOCKCON0 register description)
 	 */
-	policy->cpuinfo.transition_latency = (500 * 1000) +
-					     s3c_freq->regulator_latency;
-
-	ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table);
+	ret = cpufreq_generic_init(policy, s3c_freq->freq_table,
+			(500 * 1000) + s3c_freq->regulator_latency);
 	if (ret)
 		goto err_freq_table;
 
-	cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0);
-
 	register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
 
 	return 0;
@@ -518,19 +480,14 @@
 	return ret;
 }
 
-static struct freq_attr *s3c2416_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver s3c2416_cpufreq_driver = {
 	.flags          = 0,
-	.verify		= s3c2416_cpufreq_verify_speed,
-	.target		= s3c2416_cpufreq_set_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= s3c2416_cpufreq_set_target,
 	.get		= s3c2416_cpufreq_get_speed,
 	.init		= s3c2416_cpufreq_driver_init,
 	.name		= "s3c2416",
-	.attr		= s3c2416_cpufreq_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static int __init s3c2416_cpufreq_init(void)
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index b0f343f..4850882 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -373,23 +373,7 @@
 
 static int s3c_cpufreq_init(struct cpufreq_policy *policy)
 {
-	printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
-
-	if (policy->cpu != 0)
-		return -EINVAL;
-
-	policy->cur = s3c_cpufreq_get(0);
-	policy->min = policy->cpuinfo.min_freq = 0;
-	policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
-	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
-	/* feed the latency information from the cpu driver */
-	policy->cpuinfo.transition_latency = cpu_cur.info->latency;
-
-	if (ftab)
-		cpufreq_frequency_table_cpuinfo(policy, ftab);
-
-	return 0;
+	return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
 }
 
 static int __init s3c_cpufreq_initclks(void)
@@ -416,14 +400,6 @@
 	return 0;
 }
 
-static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	if (policy->cpu != 0)
-		return -EINVAL;
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static struct cpufreq_frequency_table suspend_pll;
 static unsigned int suspend_freq;
@@ -473,7 +449,6 @@
 
 static struct cpufreq_driver s3c24xx_driver = {
 	.flags		= CPUFREQ_STICKY,
-	.verify		= s3c_cpufreq_verify,
 	.target		= s3c_cpufreq_target,
 	.get		= s3c_cpufreq_get,
 	.init		= s3c_cpufreq_init,
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 15631f9..67e302e 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -54,14 +54,6 @@
 };
 #endif
 
-static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-	if (policy->cpu != 0)
-		return -EINVAL;
-
-	return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
-}
-
 static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
 {
 	if (cpu != 0)
@@ -71,66 +63,48 @@
 }
 
 static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
-				      unsigned int target_freq,
-				      unsigned int relation)
+				      unsigned int index)
 {
-	int ret;
-	unsigned int i;
-	struct cpufreq_freqs freqs;
 	struct s3c64xx_dvfs *dvfs;
+	unsigned int old_freq, new_freq;
+	int ret;
 
-	ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
-					     target_freq, relation, &i);
-	if (ret != 0)
-		return ret;
-
-	freqs.old = clk_get_rate(armclk) / 1000;
-	freqs.new = s3c64xx_freq_table[i].frequency;
-	freqs.flags = 0;
-	dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data];
-
-	if (freqs.old == freqs.new)
-		return 0;
-
-	pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+	old_freq = clk_get_rate(armclk) / 1000;
+	new_freq = s3c64xx_freq_table[index].frequency;
+	dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
 
 #ifdef CONFIG_REGULATOR
-	if (vddarm && freqs.new > freqs.old) {
+	if (vddarm && new_freq > old_freq) {
 		ret = regulator_set_voltage(vddarm,
 					    dvfs->vddarm_min,
 					    dvfs->vddarm_max);
 		if (ret != 0) {
 			pr_err("Failed to set VDDARM for %dkHz: %d\n",
-			       freqs.new, ret);
-			freqs.new = freqs.old;
-			goto post_notify;
+			       new_freq, ret);
+			return ret;
 		}
 	}
 #endif
 
-	ret = clk_set_rate(armclk, freqs.new * 1000);
+	ret = clk_set_rate(armclk, new_freq * 1000);
 	if (ret < 0) {
 		pr_err("Failed to set rate %dkHz: %d\n",
-		       freqs.new, ret);
-		freqs.new = freqs.old;
+		       new_freq, ret);
+		return ret;
 	}
 
-post_notify:
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-	if (ret)
-		goto err;
-
 #ifdef CONFIG_REGULATOR
-	if (vddarm && freqs.new < freqs.old) {
+	if (vddarm && new_freq < old_freq) {
 		ret = regulator_set_voltage(vddarm,
 					    dvfs->vddarm_min,
 					    dvfs->vddarm_max);
 		if (ret != 0) {
 			pr_err("Failed to set VDDARM for %dkHz: %d\n",
-			       freqs.new, ret);
-			goto err_clk;
+			       new_freq, ret);
+			if (clk_set_rate(armclk, old_freq * 1000) < 0)
+				pr_err("Failed to restore original clock rate\n");
+
+			return ret;
 		}
 	}
 #endif
@@ -139,14 +113,6 @@
 		 clk_get_rate(armclk) / 1000);
 
 	return 0;
-
-err_clk:
-	if (clk_set_rate(armclk, freqs.old * 1000) < 0)
-		pr_err("Failed to restore original clock rate\n");
-err:
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	return ret;
 }
 
 #ifdef CONFIG_REGULATOR
@@ -243,15 +209,12 @@
 		freq++;
 	}
 
-	policy->cur = clk_get_rate(armclk) / 1000;
-
 	/* Datasheet says PLL stabalisation time (if we were to use
 	 * the PLLs, which we don't currently) is ~300us worst case,
 	 * but add some fudge.
 	 */
-	policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
-
-	ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+	ret = cpufreq_generic_init(policy, s3c64xx_freq_table,
+			(500 * 1000) + regulator_latency);
 	if (ret != 0) {
 		pr_err("Failed to configure frequency table: %d\n",
 		       ret);
@@ -264,8 +227,8 @@
 
 static struct cpufreq_driver s3c64xx_cpufreq_driver = {
 	.flags          = 0,
-	.verify		= s3c64xx_cpufreq_verify_speed,
-	.target		= s3c64xx_cpufreq_set_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= s3c64xx_cpufreq_set_target,
 	.get		= s3c64xx_cpufreq_get_speed,
 	.init		= s3c64xx_cpufreq_driver_init,
 	.name		= "s3c",
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 5c77570..e3973da 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -26,7 +26,6 @@
 static struct clk *cpu_clk;
 static struct clk *dmc0_clk;
 static struct clk *dmc1_clk;
-static struct cpufreq_freqs freqs;
 static DEFINE_MUTEX(set_freq_lock);
 
 /* APLL M,P,S values for 1G/800Mhz */
@@ -36,16 +35,7 @@
 /* Use 800MHz when entering sleep mode */
 #define SLEEP_FREQ	(800 * 1000)
 
-/*
- * relation has an additional symantics other than the standard of cpufreq
- * DISALBE_FURTHER_CPUFREQ: disable further access to target
- * ENABLE_FURTUER_CPUFREQ: enable access to target
- */
-enum cpufreq_access {
-	DISABLE_FURTHER_CPUFREQ = 0x10,
-	ENABLE_FURTHER_CPUFREQ = 0x20,
-};
-
+/* Tracks if cpu freqency can be updated anymore */
 static bool no_cpufreq_access;
 
 /*
@@ -174,14 +164,6 @@
 	__raw_writel(tmp1, reg);
 }
 
-static int s5pv210_verify_speed(struct cpufreq_policy *policy)
-{
-	if (policy->cpu)
-		return -EINVAL;
-
-	return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
-}
-
 static unsigned int s5pv210_getspeed(unsigned int cpu)
 {
 	if (cpu)
@@ -190,22 +172,18 @@
 	return clk_get_rate(cpu_clk) / 1000;
 }
 
-static int s5pv210_target(struct cpufreq_policy *policy,
-			  unsigned int target_freq,
-			  unsigned int relation)
+static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
 {
 	unsigned long reg;
-	unsigned int index, priv_index;
+	unsigned int priv_index;
 	unsigned int pll_changing = 0;
 	unsigned int bus_speed_changing = 0;
+	unsigned int old_freq, new_freq;
 	int arm_volt, int_volt;
 	int ret = 0;
 
 	mutex_lock(&set_freq_lock);
 
-	if (relation & ENABLE_FURTHER_CPUFREQ)
-		no_cpufreq_access = false;
-
 	if (no_cpufreq_access) {
 #ifdef CONFIG_PM_VERBOSE
 		pr_err("%s:%d denied access to %s as it is disabled"
@@ -215,27 +193,13 @@
 		goto exit;
 	}
 
-	if (relation & DISABLE_FURTHER_CPUFREQ)
-		no_cpufreq_access = true;
-
-	relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
-
-	freqs.old = s5pv210_getspeed(0);
-
-	if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
-					   target_freq, relation, &index)) {
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	freqs.new = s5pv210_freq_table[index].frequency;
-
-	if (freqs.new == freqs.old)
-		goto exit;
+	old_freq = s5pv210_getspeed(0);
+	new_freq = s5pv210_freq_table[index].frequency;
 
 	/* Finding current running level index */
 	if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
-					   freqs.old, relation, &priv_index)) {
+					   old_freq, CPUFREQ_RELATION_H,
+					   &priv_index)) {
 		ret = -EINVAL;
 		goto exit;
 	}
@@ -243,7 +207,7 @@
 	arm_volt = dvs_conf[index].arm_volt;
 	int_volt = dvs_conf[index].int_volt;
 
-	if (freqs.new > freqs.old) {
+	if (new_freq > old_freq) {
 		ret = regulator_set_voltage(arm_regulator,
 				arm_volt, arm_volt_max);
 		if (ret)
@@ -255,8 +219,6 @@
 			goto exit;
 	}
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	/* Check if there need to change PLL */
 	if ((index == L0) || (priv_index == L0))
 		pll_changing = 1;
@@ -467,9 +429,7 @@
 		}
 	}
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
-	if (freqs.new < freqs.old) {
+	if (new_freq < old_freq) {
 		regulator_set_voltage(int_regulator,
 				int_volt, int_volt_max);
 
@@ -551,13 +511,7 @@
 	s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
 	s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
 
-	policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
-
-	cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
-
-	policy->cpuinfo.transition_latency = 40000;
-
-	return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+	return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
 
 out_dmc1:
 	clk_put(dmc0_clk);
@@ -573,16 +527,18 @@
 
 	switch (event) {
 	case PM_SUSPEND_PREPARE:
-		ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
-					    DISABLE_FURTHER_CPUFREQ);
+		ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
 		if (ret < 0)
 			return NOTIFY_BAD;
 
+		/* Disable updation of cpu frequency */
+		no_cpufreq_access = true;
 		return NOTIFY_OK;
 	case PM_POST_RESTORE:
 	case PM_POST_SUSPEND:
-		cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
-				      ENABLE_FURTHER_CPUFREQ);
+		/* Enable updation of cpu frequency */
+		no_cpufreq_access = false;
+		cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
 
 		return NOTIFY_OK;
 	}
@@ -595,18 +551,18 @@
 {
 	int ret;
 
-	ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
-				    DISABLE_FURTHER_CPUFREQ);
+	ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
 	if (ret < 0)
 		return NOTIFY_BAD;
 
+	no_cpufreq_access = true;
 	return NOTIFY_DONE;
 }
 
 static struct cpufreq_driver s5pv210_driver = {
 	.flags		= CPUFREQ_STICKY,
-	.verify		= s5pv210_verify_speed,
-	.target		= s5pv210_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= s5pv210_target,
 	.get		= s5pv210_getspeed,
 	.init		= s5pv210_cpu_init,
 	.name		= "s5pv210",
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index cff18e8..623da74 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -177,60 +177,33 @@
 	}
 }
 
-static int sa1100_target(struct cpufreq_policy *policy,
-			 unsigned int target_freq,
-			 unsigned int relation)
+static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
 {
 	unsigned int cur = sa11x0_getspeed(0);
-	unsigned int new_ppcr;
-	struct cpufreq_freqs freqs;
+	unsigned int new_freq;
 
-	new_ppcr = sa11x0_freq_to_ppcr(target_freq);
-	switch (relation) {
-	case CPUFREQ_RELATION_L:
-		if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
-			new_ppcr--;
-		break;
-	case CPUFREQ_RELATION_H:
-		if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
-		    (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
-			new_ppcr--;
-		break;
-	}
+	new_freq = sa11x0_freq_table[ppcr].frequency;
 
-	freqs.old = cur;
-	freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
+	if (new_freq > cur)
+		sa1100_update_dram_timings(cur, new_freq);
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+	PPCR = ppcr;
 
-	if (freqs.new > cur)
-		sa1100_update_dram_timings(cur, freqs.new);
-
-	PPCR = new_ppcr;
-
-	if (freqs.new < cur)
-		sa1100_update_dram_timings(cur, freqs.new);
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+	if (new_freq < cur)
+		sa1100_update_dram_timings(cur, new_freq);
 
 	return 0;
 }
 
 static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
 {
-	if (policy->cpu != 0)
-		return -EINVAL;
-	policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
-	policy->cpuinfo.min_freq = 59000;
-	policy->cpuinfo.max_freq = 287000;
-	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-	return 0;
+	return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
 }
 
 static struct cpufreq_driver sa1100_driver __refdata = {
 	.flags		= CPUFREQ_STICKY,
-	.verify		= sa11x0_verify_speed,
-	.target		= sa1100_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= sa1100_target,
 	.get		= sa11x0_getspeed,
 	.init		= sa1100_cpu_init,
 	.name		= "sa1100",
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 39c90b6..2c2b2e6 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -229,36 +229,14 @@
 /*
  * Ok, set the CPU frequency.
  */
-static int sa1110_target(struct cpufreq_policy *policy,
-			 unsigned int target_freq,
-			 unsigned int relation)
+static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
 {
 	struct sdram_params *sdram = &sdram_params;
-	struct cpufreq_freqs freqs;
 	struct sdram_info sd;
 	unsigned long flags;
-	unsigned int ppcr, unused;
+	unsigned int unused;
 
-	switch (relation) {
-	case CPUFREQ_RELATION_L:
-		ppcr = sa11x0_freq_to_ppcr(target_freq);
-		if (sa11x0_ppcr_to_freq(ppcr) > policy->max)
-			ppcr--;
-		break;
-	case CPUFREQ_RELATION_H:
-		ppcr = sa11x0_freq_to_ppcr(target_freq);
-		if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) &&
-		    (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
-			ppcr--;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	freqs.old = sa11x0_getspeed(0);
-	freqs.new = sa11x0_ppcr_to_freq(ppcr);
-
-	sdram_calculate_timing(&sd, freqs.new, sdram);
+	sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram);
 
 #if 0
 	/*
@@ -277,8 +255,6 @@
 	sd.mdcas[2] = 0xaaaaaaaa;
 #endif
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	/*
 	 * The clock could be going away for some time.  Set the SDRAMs
 	 * to refresh rapidly (every 64 memory clock cycles).  To get
@@ -323,30 +299,22 @@
 	/*
 	 * Now, return the SDRAM refresh back to normal.
 	 */
-	sdram_update_refresh(freqs.new, sdram);
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+	sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram);
 
 	return 0;
 }
 
 static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
 {
-	if (policy->cpu != 0)
-		return -EINVAL;
-	policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
-	policy->cpuinfo.min_freq = 59000;
-	policy->cpuinfo.max_freq = 287000;
-	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-	return 0;
+	return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
 }
 
 /* sa1110_driver needs __refdata because it must remain after init registers
  * it with cpufreq_register_driver() */
 static struct cpufreq_driver sa1110_driver __refdata = {
 	.flags		= CPUFREQ_STICKY,
-	.verify		= sa11x0_verify_speed,
-	.target		= sa1110_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= sa1110_target,
 	.get		= sa11x0_getspeed,
 	.init		= sa1110_cpu_init,
 	.name		= "sa1110",
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index d6f6c6f..6adb354 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -53,21 +53,11 @@
 	}
 }
 
-static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
-		unsigned int state)
+static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
 {
 
-	struct cpufreq_freqs	freqs;
 	u8 clockspeed_reg;
 
-	freqs.old = sc520_freq_get_cpu_frequency(0);
-	freqs.new = sc520_freq_table[state].frequency;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-	pr_debug("attempting to set frequency to %i kHz\n",
-			sc520_freq_table[state].frequency);
-
 	local_irq_disable();
 
 	clockspeed_reg = *cpuctl & ~0x03;
@@ -75,30 +65,9 @@
 
 	local_irq_enable();
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int sc520_freq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
-}
-
-static int sc520_freq_target(struct cpufreq_policy *policy,
-			    unsigned int target_freq,
-			    unsigned int relation)
-{
-	unsigned int newstate = 0;
-
-	if (cpufreq_frequency_table_target(policy, sc520_freq_table,
-				target_freq, relation, &newstate))
-		return -EINVAL;
-
-	sc520_freq_set_cpu_state(policy, newstate);
-
 	return 0;
 }
 
-
 /*
  *	Module init and exit code
  */
@@ -106,7 +75,6 @@
 static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
 {
 	struct cpuinfo_x86 *c = &cpu_data(0);
-	int result;
 
 	/* capability check */
 	if (c->x86_vendor != X86_VENDOR_AMD ||
@@ -115,39 +83,19 @@
 
 	/* cpuinfo and default policy values */
 	policy->cpuinfo.transition_latency = 1000000; /* 1ms */
-	policy->cur = sc520_freq_get_cpu_frequency(0);
 
-	result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
-	if (result)
-		return result;
-
-	cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
-
-	return 0;
+	return cpufreq_table_validate_and_show(policy, sc520_freq_table);
 }
 
 
-static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-
-static struct freq_attr *sc520_freq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
-
 static struct cpufreq_driver sc520_freq_driver = {
 	.get	= sc520_freq_get_cpu_frequency,
-	.verify	= sc520_freq_verify,
-	.target	= sc520_freq_target,
+	.verify	= cpufreq_generic_frequency_table_verify,
+	.target_index = sc520_freq_target,
 	.init	= sc520_freq_cpu_init,
-	.exit	= sc520_freq_cpu_exit,
+	.exit	= cpufreq_generic_exit,
 	.name	= "sc520_freq",
-	.attr	= sc520_freq_attr,
+	.attr	= cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index ffc6d24..387af12 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -87,15 +87,12 @@
 	if (freq_table)
 		return cpufreq_frequency_table_verify(policy, freq_table);
 
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-				     policy->cpuinfo.max_freq);
+	cpufreq_verify_within_cpu_limits(policy);
 
 	policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
 	policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
 
-	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-				     policy->cpuinfo.max_freq);
-
+	cpufreq_verify_within_cpu_limits(policy);
 	return 0;
 }
 
@@ -114,15 +111,13 @@
 		return PTR_ERR(cpuclk);
 	}
 
-	policy->cur = sh_cpufreq_get(cpu);
-
 	freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
 	if (freq_table) {
 		int result;
 
-		result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-		if (!result)
-			cpufreq_frequency_table_get_attr(freq_table, cpu);
+		result = cpufreq_table_validate_and_show(policy, freq_table);
+		if (result)
+			return result;
 	} else {
 		dev_notice(dev, "no frequency table found, falling back "
 			   "to rate rounding.\n");
@@ -154,11 +149,6 @@
 	return 0;
 }
 
-static struct freq_attr *sh_freq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver sh_cpufreq_driver = {
 	.name		= "sh",
 	.get		= sh_cpufreq_get,
@@ -166,7 +156,7 @@
 	.verify		= sh_cpufreq_verify,
 	.init		= sh_cpufreq_cpu_init,
 	.exit		= sh_cpufreq_cpu_exit,
-	.attr		= sh_freq_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index cf5bc2c..62aa23e 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -245,14 +245,12 @@
 	return clock_tick / estar_to_divisor(estar);
 }
 
-static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
-		unsigned int index)
+static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
 {
 	unsigned int cpu = policy->cpu;
 	unsigned long new_bits, new_freq;
 	unsigned long clock_tick, divisor, old_divisor, estar;
 	cpumask_t cpus_allowed;
-	struct cpufreq_freqs freqs;
 
 	cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -266,41 +264,15 @@
 
 	old_divisor = estar_to_divisor(estar);
 
-	freqs.old = clock_tick / old_divisor;
-	freqs.new = new_freq;
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	if (old_divisor != divisor)
 		us2e_transition(estar, new_bits, clock_tick * 1000,
 				old_divisor, divisor);
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 	set_cpus_allowed_ptr(current, &cpus_allowed);
-}
-
-static int us2e_freq_target(struct cpufreq_policy *policy,
-			  unsigned int target_freq,
-			  unsigned int relation)
-{
-	unsigned int new_index = 0;
-
-	if (cpufreq_frequency_table_target(policy,
-					   &us2e_freq_table[policy->cpu].table[0],
-					   target_freq, relation, &new_index))
-		return -EINVAL;
-
-	us2e_set_cpu_divider_index(policy, new_index);
 
 	return 0;
 }
 
-static int us2e_freq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy,
-					      &us2e_freq_table[policy->cpu].table[0]);
-}
-
 static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
 {
 	unsigned int cpu = policy->cpu;
@@ -324,13 +296,15 @@
 	policy->cpuinfo.transition_latency = 0;
 	policy->cur = clock_tick;
 
-	return cpufreq_frequency_table_cpuinfo(policy, table);
+	return cpufreq_table_validate_and_show(policy, table);
 }
 
 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-	if (cpufreq_us2e_driver)
-		us2e_set_cpu_divider_index(policy, 0);
+	if (cpufreq_us2e_driver) {
+		cpufreq_frequency_table_put_attr(policy->cpu);
+		us2e_freq_target(policy, 0);
+	}
 
 	return 0;
 }
@@ -361,8 +335,8 @@
 			goto err_out;
 
 		driver->init = us2e_freq_cpu_init;
-		driver->verify = us2e_freq_verify;
-		driver->target = us2e_freq_target;
+		driver->verify = cpufreq_generic_frequency_table_verify;
+		driver->target_index = us2e_freq_target;
 		driver->get = us2e_freq_get;
 		driver->exit = us2e_freq_cpu_exit;
 		strcpy(driver->name, "UltraSPARC-IIe");
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index ac76b48..724ffbd 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -93,13 +93,11 @@
 	return ret;
 }
 
-static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
-		unsigned int index)
+static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
 {
 	unsigned int cpu = policy->cpu;
 	unsigned long new_bits, new_freq, reg;
 	cpumask_t cpus_allowed;
-	struct cpufreq_freqs freqs;
 
 	cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -125,43 +123,15 @@
 
 	reg = read_safari_cfg();
 
-	freqs.old = get_current_freq(cpu, reg);
-	freqs.new = new_freq;
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	reg &= ~SAFARI_CFG_DIV_MASK;
 	reg |= new_bits;
 	write_safari_cfg(reg);
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 	set_cpus_allowed_ptr(current, &cpus_allowed);
-}
-
-static int us3_freq_target(struct cpufreq_policy *policy,
-			  unsigned int target_freq,
-			  unsigned int relation)
-{
-	unsigned int new_index = 0;
-
-	if (cpufreq_frequency_table_target(policy,
-					   &us3_freq_table[policy->cpu].table[0],
-					   target_freq,
-					   relation,
-					   &new_index))
-		return -EINVAL;
-
-	us3_set_cpu_divider_index(policy, new_index);
 
 	return 0;
 }
 
-static int us3_freq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy,
-					      &us3_freq_table[policy->cpu].table[0]);
-}
-
 static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
 {
 	unsigned int cpu = policy->cpu;
@@ -181,13 +151,15 @@
 	policy->cpuinfo.transition_latency = 0;
 	policy->cur = clock_tick;
 
-	return cpufreq_frequency_table_cpuinfo(policy, table);
+	return cpufreq_table_validate_and_show(policy, table);
 }
 
 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-	if (cpufreq_us3_driver)
-		us3_set_cpu_divider_index(policy, 0);
+	if (cpufreq_us3_driver) {
+		cpufreq_frequency_table_put_attr(policy->cpu);
+		us3_freq_target(policy, 0);
+	}
 
 	return 0;
 }
@@ -222,8 +194,8 @@
 			goto err_out;
 
 		driver->init = us3_freq_cpu_init;
-		driver->verify = us3_freq_verify;
-		driver->target = us3_freq_target;
+		driver->verify = cpufreq_generic_frequency_table_verify;
+		driver->target_index = us3_freq_target;
 		driver->get = us3_freq_get;
 		driver->exit = us3_freq_cpu_exit;
 		strcpy(driver->name, "UltraSPARC-III");
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 3f41816..d02ccd1 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,11 +30,6 @@
 	u32 cnt;
 } spear_cpufreq;
 
-static int spear_cpufreq_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
-}
-
 static unsigned int spear_cpufreq_get(unsigned int cpu)
 {
 	return clk_get_rate(spear_cpufreq.clk) / 1000;
@@ -110,20 +105,14 @@
 }
 
 static int spear_cpufreq_target(struct cpufreq_policy *policy,
-		unsigned int target_freq, unsigned int relation)
+		unsigned int index)
 {
-	struct cpufreq_freqs freqs;
 	long newfreq;
 	struct clk *srcclk;
-	int index, ret, mult = 1;
-
-	if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl,
-				target_freq, relation, &index))
-		return -EINVAL;
-
-	freqs.old = spear_cpufreq_get(0);
+	int ret, mult = 1;
 
 	newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
+
 	if (of_machine_is_compatible("st,spear1340")) {
 		/*
 		 * SPEAr1340 is special in the sense that due to the possibility
@@ -154,65 +143,32 @@
 		return newfreq;
 	}
 
-	freqs.new = newfreq / 1000;
-	freqs.new /= mult;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
 	if (mult == 2)
 		ret = spear1340_set_cpu_rate(srcclk, newfreq);
 	else
 		ret = clk_set_rate(spear_cpufreq.clk, newfreq);
 
-	/* Get current rate after clk_set_rate, in case of failure */
-	if (ret) {
+	if (ret)
 		pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
-		freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
-	}
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	return ret;
 }
 
 static int spear_cpufreq_init(struct cpufreq_policy *policy)
 {
-	int ret;
-
-	ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
-	if (ret) {
-		pr_err("cpufreq_frequency_table_cpuinfo() failed");
-		return ret;
-	}
-
-	cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
-	policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
-	policy->cur = spear_cpufreq_get(0);
-
-	cpumask_setall(policy->cpus);
-
-	return 0;
+	return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
+			spear_cpufreq.transition_latency);
 }
 
-static int spear_cpufreq_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static struct freq_attr *spear_cpufreq_attr[] = {
-	 &cpufreq_freq_attr_scaling_available_freqs,
-	 NULL,
-};
-
 static struct cpufreq_driver spear_cpufreq_driver = {
 	.name		= "cpufreq-spear",
 	.flags		= CPUFREQ_STICKY,
-	.verify		= spear_cpufreq_verify,
-	.target		= spear_cpufreq_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= spear_cpufreq_target,
 	.get		= spear_cpufreq_get,
 	.init		= spear_cpufreq_init,
-	.exit		= spear_cpufreq_exit,
-	.attr		= spear_cpufreq_attr,
+	.exit		= cpufreq_generic_exit,
+	.attr		= cpufreq_generic_attr,
 };
 
 static int spear_cpufreq_driver_init(void)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index f897d51..4e1daca 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -343,9 +343,7 @@
 static int centrino_cpu_init(struct cpufreq_policy *policy)
 {
 	struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
-	unsigned freq;
 	unsigned l, h;
-	int ret;
 	int i;
 
 	/* Only Intel makes Enhanced Speedstep-capable CPUs */
@@ -373,9 +371,8 @@
 		return -ENODEV;
 	}
 
-	if (centrino_cpu_init_table(policy)) {
+	if (centrino_cpu_init_table(policy))
 		return -ENODEV;
-	}
 
 	/* Check to see if Enhanced SpeedStep is enabled, and try to
 	   enable it if not. */
@@ -395,22 +392,11 @@
 		}
 	}
 
-	freq = get_cur_freq(policy->cpu);
 	policy->cpuinfo.transition_latency = 10000;
 						/* 10uS transition latency */
-	policy->cur = freq;
 
-	pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
-
-	ret = cpufreq_frequency_table_cpuinfo(policy,
+	return cpufreq_table_validate_and_show(policy,
 		per_cpu(centrino_model, policy->cpu)->op_points);
-	if (ret)
-		return (ret);
-
-	cpufreq_frequency_table_get_attr(
-		per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
-
-	return 0;
 }
 
 static int centrino_cpu_exit(struct cpufreq_policy *policy)
@@ -428,36 +414,18 @@
 }
 
 /**
- * centrino_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within this model's frequency range at least one
- * border included.
- */
-static int centrino_verify (struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy,
-			per_cpu(centrino_model, policy->cpu)->op_points);
-}
-
-/**
  * centrino_setpolicy - set a new CPUFreq policy
  * @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- *	(CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @index: index of target frequency
  *
  * Sets a new CPUFreq policy.
  */
-static int centrino_target (struct cpufreq_policy *policy,
-			    unsigned int target_freq,
-			    unsigned int relation)
+static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	unsigned int    newstate = 0;
 	unsigned int	msr, oldmsr = 0, h = 0, cpu = policy->cpu;
-	struct cpufreq_freqs	freqs;
 	int			retval = 0;
-	unsigned int		j, first_cpu, tmp;
+	unsigned int		j, first_cpu;
+	struct cpufreq_frequency_table *op_points;
 	cpumask_var_t covered_cpus;
 
 	if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
@@ -468,16 +436,8 @@
 		goto out;
 	}
 
-	if (unlikely(cpufreq_frequency_table_target(policy,
-			per_cpu(centrino_model, cpu)->op_points,
-			target_freq,
-			relation,
-			&newstate))) {
-		retval = -EINVAL;
-		goto out;
-	}
-
 	first_cpu = 1;
+	op_points = &per_cpu(centrino_model, cpu)->op_points[index];
 	for_each_cpu(j, policy->cpus) {
 		int good_cpu;
 
@@ -501,7 +461,7 @@
 			break;
 		}
 
-		msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data;
+		msr = op_points->driver_data;
 
 		if (first_cpu) {
 			rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
@@ -512,15 +472,6 @@
 				goto out;
 			}
 
-			freqs.old = extract_clock(oldmsr, cpu, 0);
-			freqs.new = extract_clock(msr, cpu, 0);
-
-			pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
-				target_freq, freqs.old, freqs.new, msr);
-
-			cpufreq_notify_transition(policy, &freqs,
-					CPUFREQ_PRECHANGE);
-
 			first_cpu = 0;
 			/* all but 16 LSB are reserved, treat them with care */
 			oldmsr &= ~0xffff;
@@ -535,8 +486,6 @@
 		cpumask_set_cpu(j, covered_cpus);
 	}
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 	if (unlikely(retval)) {
 		/*
 		 * We have failed halfway through the frequency change.
@@ -547,12 +496,6 @@
 
 		for_each_cpu(j, covered_cpus)
 			wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
-
-		tmp = freqs.new;
-		freqs.new = freqs.old;
-		freqs.old = tmp;
-		cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-		cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	}
 	retval = 0;
 
@@ -561,20 +504,15 @@
 	return retval;
 }
 
-static struct freq_attr* centrino_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver centrino_driver = {
 	.name		= "centrino", /* should be speedstep-centrino,
 					 but there's a 16 char limit */
 	.init		= centrino_cpu_init,
 	.exit		= centrino_cpu_exit,
-	.verify		= centrino_verify,
-	.target		= centrino_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= centrino_target,
 	.get		= get_cur_freq,
-	.attr           = centrino_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 /*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 5355abb..7639b2b 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -251,56 +251,23 @@
 /**
  * speedstep_target - set a new CPUFreq policy
  * @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- *	(CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @index: index of target frequency
  *
  * Sets a new CPUFreq policy.
  */
-static int speedstep_target(struct cpufreq_policy *policy,
-			     unsigned int target_freq,
-			     unsigned int relation)
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	unsigned int newstate = 0, policy_cpu;
-	struct cpufreq_freqs freqs;
-
-	if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
-				target_freq, relation, &newstate))
-		return -EINVAL;
+	unsigned int policy_cpu;
 
 	policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
-	freqs.old = speedstep_get(policy_cpu);
-	freqs.new = speedstep_freqs[newstate].frequency;
 
-	pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
-
-	/* no transition necessary */
-	if (freqs.old == freqs.new)
-		return 0;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-	smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
+	smp_call_function_single(policy_cpu, _speedstep_set_state, &index,
 				 true);
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
 	return 0;
 }
 
 
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
 struct get_freqs {
 	struct cpufreq_policy *policy;
 	int ret;
@@ -320,8 +287,7 @@
 
 static int speedstep_cpu_init(struct cpufreq_policy *policy)
 {
-	int result;
-	unsigned int policy_cpu, speed;
+	unsigned int policy_cpu;
 	struct get_freqs gf;
 
 	/* only run on CPU to be set, or on its sibling */
@@ -336,49 +302,18 @@
 	if (gf.ret)
 		return gf.ret;
 
-	/* get current speed setting */
-	speed = speedstep_get(policy_cpu);
-	if (!speed)
-		return -EIO;
-
-	pr_debug("currently at %s speed setting - %i MHz\n",
-		(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
-		? "low" : "high",
-		(speed / 1000));
-
-	/* cpuinfo and default policy values */
-	policy->cur = speed;
-
-	result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
-	if (result)
-		return result;
-
-	cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
-	return 0;
+	return cpufreq_table_validate_and_show(policy, speedstep_freqs);
 }
 
 
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
-}
-
-static struct freq_attr *speedstep_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
-
 static struct cpufreq_driver speedstep_driver = {
 	.name	= "speedstep-ich",
-	.verify	= speedstep_verify,
-	.target	= speedstep_target,
+	.verify	= cpufreq_generic_frequency_table_verify,
+	.target_index = speedstep_target,
 	.init	= speedstep_cpu_init,
-	.exit	= speedstep_cpu_exit,
+	.exit	= cpufreq_generic_exit,
 	.get	= speedstep_get,
-	.attr	= speedstep_attr,
+	.attr	= cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index abfba4f..0f5326d 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -235,52 +235,21 @@
 /**
  * speedstep_target - set a new CPUFreq policy
  * @policy: new policy
- * @target_freq: new freq
- * @relation:
+ * @index: index of new freq
  *
  * Sets a new CPUFreq policy/freq.
  */
-static int speedstep_target(struct cpufreq_policy *policy,
-			unsigned int target_freq, unsigned int relation)
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	unsigned int newstate = 0;
-	struct cpufreq_freqs freqs;
-
-	if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
-				target_freq, relation, &newstate))
-		return -EINVAL;
-
-	freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
-	freqs.new = speedstep_freqs[newstate].frequency;
-
-	if (freqs.old == freqs.new)
-		return 0;
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-	speedstep_set_state(newstate);
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+	speedstep_set_state(index);
 
 	return 0;
 }
 
 
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
-
 static int speedstep_cpu_init(struct cpufreq_policy *policy)
 {
 	int result;
-	unsigned int speed, state;
 	unsigned int *low, *high;
 
 	/* capability check */
@@ -316,32 +285,8 @@
 			pr_debug("workaround worked.\n");
 	}
 
-	/* get current speed setting */
-	state = speedstep_get_state();
-	speed = speedstep_freqs[state].frequency;
-
-	pr_debug("currently at %s speed setting - %i MHz\n",
-		(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
-		? "low" : "high",
-		(speed / 1000));
-
-	/* cpuinfo and default policy values */
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-	policy->cur = speed;
-
-	result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
-	if (result)
-		return result;
-
-	cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
-	return 0;
-}
-
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
-	cpufreq_frequency_table_put_attr(policy->cpu);
-	return 0;
+	return cpufreq_table_validate_and_show(policy, speedstep_freqs);
 }
 
 static unsigned int speedstep_get(unsigned int cpu)
@@ -362,20 +307,15 @@
 	return result;
 }
 
-static struct freq_attr *speedstep_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver speedstep_driver = {
 	.name		= "speedstep-smi",
-	.verify		= speedstep_verify,
-	.target		= speedstep_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= speedstep_target,
 	.init		= speedstep_cpu_init,
-	.exit		= speedstep_cpu_exit,
+	.exit		= cpufreq_generic_exit,
 	.get		= speedstep_get,
 	.resume		= speedstep_resume,
-	.attr		= speedstep_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index a7b876f..f42df7e 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -51,11 +51,6 @@
 static DEFINE_MUTEX(tegra_cpu_lock);
 static bool is_suspended;
 
-static int tegra_verify_speed(struct cpufreq_policy *policy)
-{
-	return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int tegra_getspeed(unsigned int cpu)
 {
 	unsigned long rate;
@@ -107,12 +102,8 @@
 		unsigned long rate)
 {
 	int ret = 0;
-	struct cpufreq_freqs freqs;
 
-	freqs.old = tegra_getspeed(0);
-	freqs.new = rate;
-
-	if (freqs.old == freqs.new)
+	if (tegra_getspeed(0) == rate)
 		return ret;
 
 	/*
@@ -126,21 +117,10 @@
 	else
 		clk_set_rate(emc_clk, 100000000);  /* emc 50Mhz */
 
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-#ifdef CONFIG_CPU_FREQ_DEBUG
-	printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
-	       freqs.old, freqs.new);
-#endif
-
-	ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
-	if (ret) {
-		pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
-			freqs.new);
-		freqs.new = freqs.old;
-	}
-
-	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+	ret = tegra_cpu_clk_set_rate(rate * 1000);
+	if (ret)
+		pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n",
+			rate);
 
 	return ret;
 }
@@ -155,11 +135,8 @@
 	return rate;
 }
 
-static int tegra_target(struct cpufreq_policy *policy,
-		       unsigned int target_freq,
-		       unsigned int relation)
+static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
 {
-	unsigned int idx;
 	unsigned int freq;
 	int ret = 0;
 
@@ -170,10 +147,7 @@
 		goto out;
 	}
 
-	cpufreq_frequency_table_target(policy, freq_table, target_freq,
-		relation, &idx);
-
-	freq = freq_table[idx].frequency;
+	freq = freq_table[index].frequency;
 
 	target_cpu_speed[policy->cpu] = freq;
 
@@ -209,21 +183,23 @@
 
 static int tegra_cpu_init(struct cpufreq_policy *policy)
 {
+	int ret;
+
 	if (policy->cpu >= NUM_CPUS)
 		return -EINVAL;
 
 	clk_prepare_enable(emc_clk);
 	clk_prepare_enable(cpu_clk);
 
-	cpufreq_frequency_table_cpuinfo(policy, freq_table);
-	cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-	policy->cur = tegra_getspeed(policy->cpu);
-	target_cpu_speed[policy->cpu] = policy->cur;
+	target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
 
 	/* FIXME: what's the actual transition time? */
-	policy->cpuinfo.transition_latency = 300 * 1000;
-
-	cpumask_copy(policy->cpus, cpu_possible_mask);
+	ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+	if (ret) {
+		clk_disable_unprepare(cpu_clk);
+		clk_disable_unprepare(emc_clk);
+		return ret;
+	}
 
 	if (policy->cpu == 0)
 		register_pm_notifier(&tegra_cpu_pm_notifier);
@@ -233,24 +209,20 @@
 
 static int tegra_cpu_exit(struct cpufreq_policy *policy)
 {
-	cpufreq_frequency_table_cpuinfo(policy, freq_table);
+	cpufreq_frequency_table_put_attr(policy->cpu);
+	clk_disable_unprepare(cpu_clk);
 	clk_disable_unprepare(emc_clk);
 	return 0;
 }
 
-static struct freq_attr *tegra_cpufreq_attr[] = {
-	&cpufreq_freq_attr_scaling_available_freqs,
-	NULL,
-};
-
 static struct cpufreq_driver tegra_cpufreq_driver = {
-	.verify		= tegra_verify_speed,
-	.target		= tegra_target,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= tegra_target,
 	.get		= tegra_getspeed,
 	.init		= tegra_cpu_init,
 	.exit		= tegra_cpu_exit,
 	.name		= "tegra",
-	.attr		= tegra_cpufreq_attr,
+	.attr		= cpufreq_generic_attr,
 };
 
 static int __init tegra_cpufreq_init(void)
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index b225f04..653ae29 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -29,9 +29,7 @@
 	if (policy->cpu)
 		return -EINVAL;
 
-	cpufreq_verify_within_limits(policy,
-			policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
+	cpufreq_verify_within_cpu_limits(policy);
 	return 0;
 }
 
@@ -68,7 +66,6 @@
 {
 	if (policy->cpu != 0)
 		return -EINVAL;
-	policy->cur = ucv2_getspeed(0);
 	policy->min = policy->cpuinfo.min_freq = 250000;
 	policy->max = policy->cpuinfo.max_freq = 1000000;
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
new file mode 100644
index 0000000..7f7c9c0
--- /dev/null
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -0,0 +1,70 @@
+/*
+ * Versatile Express SPC CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver.
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/types.h>
+
+#include "arm_big_little.h"
+
+static int ve_spc_init_opp_table(struct device *cpu_dev)
+{
+	/*
+	 * platform specific SPC code must initialise the opp table
+	 * so just check if the OPP count is non-zero
+	 */
+	return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+}
+
+static int ve_spc_get_transition_latency(struct device *cpu_dev)
+{
+	return 1000000; /* 1 ms */
+}
+
+static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
+	.name	= "vexpress-spc",
+	.get_transition_latency = ve_spc_get_transition_latency,
+	.init_opp_table = ve_spc_init_opp_table,
+};
+
+static int ve_spc_cpufreq_probe(struct platform_device *pdev)
+{
+	return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+}
+
+static int ve_spc_cpufreq_remove(struct platform_device *pdev)
+{
+	bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+	return 0;
+}
+
+static struct platform_driver ve_spc_cpufreq_platdrv = {
+	.driver = {
+		.name	= "vexpress-spc-cpufreq",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= ve_spc_cpufreq_probe,
+	.remove		= ve_spc_cpufreq_remove,
+};
+module_platform_driver(ve_spc_cpufreq_platdrv);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index d6f57d5..d988948 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -2,6 +2,17 @@
 # ARM CPU Idle drivers
 #
 
+config ARM_BIG_LITTLE_CPUIDLE
+	bool "Support for ARM big.LITTLE processors"
+	depends on ARCH_VEXPRESS_TC2_PM
+	select ARM_CPU_SUSPEND
+	select CPU_IDLE_MULTIPLE_DRIVERS
+	help
+	  Select this option to enable CPU idle driver for big.LITTLE based
+	  ARM systems. Driver manages CPUs coordination through MCPM and
+	  define different C-states for little and big cores through the
+	  multiple CPU idle drivers infrastructure.
+
 config ARM_HIGHBANK_CPUIDLE
 	bool "CPU Idle Driver for Calxeda processors"
 	depends on ARM_PSCI
@@ -27,13 +38,9 @@
 	help
 	  Select this to enable cpuidle for ST-E u8500 processors
 
-config CPU_IDLE_BIG_LITTLE
-	bool "Support for ARM big.LITTLE processors"
-	depends on ARCH_VEXPRESS_TC2_PM
-	select ARM_CPU_SUSPEND
-	select CPU_IDLE_MULTIPLE_DRIVERS
+config ARM_AT91_CPUIDLE
+	bool "Cpu Idle Driver for the AT91 processors"
+	default y
+	depends on ARCH_AT91
 	help
-	  Select this option to enable CPU idle driver for big.LITTLE based
-	  ARM systems. Driver manages CPUs coordination through MCPM and
-	  define different C-states for little and big cores through the
-	  multiple CPU idle drivers infrastructure.
+	  Select this to enable cpuidle for AT91 processors
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cea5ef5..527be28 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -7,8 +7,9 @@
 
 ##################################################################################
 # ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE)	+= cpuidle-big_little.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE)	+= cpuidle-calxeda.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE)	+= cpuidle-kirkwood.o
 obj-$(CONFIG_ARM_ZYNQ_CPUIDLE)		+= cpuidle-zynq.o
 obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
-obj-$(CONFIG_CPU_IDLE_BIG_LITTLE)	+= cpuidle-big_little.o
+obj-$(CONFIG_ARM_AT91_CPUIDLE)          += cpuidle-at91.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index f8a8636..e952936 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -147,7 +147,7 @@
  * has returned from this function, the barrier is immediately available for
  * reuse.
  *
- * The atomic variable a must be initialized to 0 before any cpu calls
+ * The atomic variable must be initialized to 0 before any cpu calls
  * this function, will be reset to 0 before any cpu returns from this function.
  *
  * Must only be called from within a coupled idle state handler
diff --git a/arch/arm/mach-at91/cpuidle.c b/drivers/cpuidle/cpuidle-at91.c
similarity index 78%
rename from arch/arm/mach-at91/cpuidle.c
rename to drivers/cpuidle/cpuidle-at91.c
index 4ec6a6d..a077437 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/drivers/cpuidle/cpuidle-at91.c
@@ -21,26 +21,17 @@
 #include <linux/export.h>
 #include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
-#include <mach/cpu.h>
-
-#include "pm.h"
 
 #define AT91_MAX_STATES	2
 
+static void (*at91_standby)(void);
+
 /* Actual code that puts the SoC in different idle states */
 static int at91_enter_idle(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			       int index)
 {
-	if (cpu_is_at91rm9200())
-		at91rm9200_standby();
-	else if (cpu_is_at91sam9g45())
-		at91sam9g45_standby();
-	else if (cpu_is_at91sam9263())
-		at91sam9263_standby();
-	else
-		at91sam9_standby();
-
+	at91_standby();
 	return index;
 }
 
@@ -60,9 +51,19 @@
 };
 
 /* Initialize CPU idle by registering the idle states */
-static int __init at91_init_cpuidle(void)
+static int at91_cpuidle_probe(struct platform_device *dev)
 {
+	at91_standby = (void *)(dev->dev.platform_data);
+	
 	return cpuidle_register(&at91_idle_driver, NULL);
 }
 
-device_initcall(at91_init_cpuidle);
+static struct platform_driver at91_cpuidle_driver = {
+	.driver = {
+		.name = "cpuidle-at91",
+		.owner = THIS_MODULE,
+	},
+	.probe = at91_cpuidle_probe,
+};
+
+module_platform_driver(at91_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index e056465..5e35804 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -111,7 +111,7 @@
 	.state_count = 2,
 };
 
-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
+static int dbx500_cpuidle_probe(struct platform_device *pdev)
 {
 	/* Configure wake up reasons */
 	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
index 38e03a1..aded759 100644
--- a/drivers/cpuidle/cpuidle-zynq.c
+++ b/drivers/cpuidle/cpuidle-zynq.c
@@ -28,7 +28,7 @@
 #include <linux/init.h>
 #include <linux/cpu_pm.h>
 #include <linux/cpuidle.h>
-#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
 
@@ -70,14 +70,19 @@
 };
 
 /* Initialize CPU idle by registering the idle states */
-static int __init zynq_cpuidle_init(void)
+static int zynq_cpuidle_probe(struct platform_device *pdev)
 {
-	if (!of_machine_is_compatible("xlnx,zynq-7000"))
-		return -ENODEV;
-
 	pr_info("Xilinx Zynq CpuIdle Driver started\n");
 
 	return cpuidle_register(&zynq_idle_driver, NULL);
 }
 
-device_initcall(zynq_cpuidle_init);
+static struct platform_driver zynq_cpuidle_driver = {
+	.driver = {
+		.name = "cpuidle-zynq",
+		.owner = THIS_MODULE,
+	},
+	.probe = zynq_cpuidle_probe,
+};
+
+module_platform_driver(zynq_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d75040d..2a991e4 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -118,11 +118,9 @@
 	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
 	struct cpuidle_driver *drv;
 	int next_state, entered_state;
+	bool broadcast;
 
-	if (off)
-		return -ENODEV;
-
-	if (!initialized)
+	if (off || !initialized)
 		return -ENODEV;
 
 	/* check if the device is ready */
@@ -144,9 +142,10 @@
 
 	trace_cpu_idle_rcuidle(next_state, dev->cpu);
 
-	if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
-		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
-				   &dev->cpu);
+	broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
+
+	if (broadcast)
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
 
 	if (cpuidle_state_is_coupled(dev, drv, next_state))
 		entered_state = cpuidle_enter_state_coupled(dev, drv,
@@ -154,9 +153,8 @@
 	else
 		entered_state = cpuidle_enter_state(dev, drv, next_state);
 
-	if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
-		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
-				   &dev->cpu);
+	if (broadcast)
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
 
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
@@ -228,45 +226,6 @@
 	mutex_unlock(&cpuidle_lock);
 }
 
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev,
-		struct cpuidle_driver *drv, int index)
-{
-	ktime_t	t1, t2;
-	s64 diff;
-
-	t1 = ktime_get();
-	local_irq_enable();
-	while (!need_resched())
-		cpu_relax();
-
-	t2 = ktime_get();
-	diff = ktime_to_us(ktime_sub(t2, t1));
-	if (diff > INT_MAX)
-		diff = INT_MAX;
-
-	dev->last_residency = (int) diff;
-
-	return index;
-}
-
-static void poll_idle_init(struct cpuidle_driver *drv)
-{
-	struct cpuidle_state *state = &drv->states[0];
-
-	snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
-	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
-	state->exit_latency = 0;
-	state->target_residency = 0;
-	state->power_usage = -1;
-	state->flags = 0;
-	state->enter = poll_idle;
-	state->disabled = false;
-}
-#else
-static void poll_idle_init(struct cpuidle_driver *drv) {}
-#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
-
 /**
  * cpuidle_enable_device - enables idle PM for a CPU
  * @dev: the CPU
@@ -296,8 +255,6 @@
 	if (!dev->state_count)
 		dev->state_count = drv->state_count;
 
-	poll_idle_init(drv);
-
 	ret = cpuidle_add_device_sysfs(dev);
 	if (ret)
 		return ret;
@@ -358,12 +315,10 @@
 	module_put(drv->owner);
 }
 
-static int __cpuidle_device_init(struct cpuidle_device *dev)
+static void __cpuidle_device_init(struct cpuidle_device *dev)
 {
 	memset(dev->states_usage, 0, sizeof(dev->states_usage));
 	dev->last_residency = 0;
-
-	return 0;
 }
 
 /**
@@ -385,13 +340,12 @@
 	list_add(&dev->device_list, &cpuidle_detected_devices);
 
 	ret = cpuidle_coupled_register_device(dev);
-	if (ret) {
+	if (ret)
 		__cpuidle_unregister_device(dev);
-		return ret;
-	}
+	else
+		dev->registered = 1;
 
-	dev->registered = 1;
-	return 0;
+	return ret;
 }
 
 /**
@@ -410,9 +364,7 @@
 	if (dev->registered)
 		goto out_unlock;
 
-	ret = __cpuidle_device_init(dev);
-	if (ret)
-		goto out_unlock;
+	__cpuidle_device_init(dev);
 
 	ret = __cpuidle_register_device(dev);
 	if (ret)
@@ -516,7 +468,7 @@
 
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
 		/*
-		 * On multiplatform for ARM, the coupled idle states could
+		 * On multiplatform for ARM, the coupled idle states could be
 		 * enabled in the kernel even if the cpuidle driver does not
 		 * use it. Note, coupled_cpus is a struct copy.
 		 */
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 6e11701..06dbe7c 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -10,6 +10,7 @@
 
 #include <linux/mutex.h>
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/cpuidle.h>
 #include <linux/cpumask.h>
 #include <linux/clockchips.h>
@@ -56,7 +57,7 @@
 }
 
 /**
- * __cpuidle_set_driver - set per CPU driver variables the the given driver.
+ * __cpuidle_set_driver - set per CPU driver variables for the given driver.
  * @drv: a valid pointer to a struct cpuidle_driver
  *
  * For each CPU in the driver's cpumask, unset the registered driver per CPU
@@ -132,7 +133,7 @@
  * cpuidle_setup_broadcast_timer - enable/disable the broadcast timer
  * @arg: a void pointer used to match the SMP cross call API
  *
- * @arg is used as a value of type 'long' with on of the two values:
+ * @arg is used as a value of type 'long' with one of the two values:
  * - CLOCK_EVT_NOTIFY_BROADCAST_ON
  * - CLOCK_EVT_NOTIFY_BROADCAST_OFF
  *
@@ -149,10 +150,8 @@
 /**
  * __cpuidle_driver_init - initialize the driver's internal data
  * @drv: a valid pointer to a struct cpuidle_driver
- *
- * Returns 0 on success, a negative error code otherwise.
  */
-static int __cpuidle_driver_init(struct cpuidle_driver *drv)
+static void __cpuidle_driver_init(struct cpuidle_driver *drv)
 {
 	int i;
 
@@ -169,20 +168,55 @@
 	/*
 	 * Look for the timer stop flag in the different states, so that we know
 	 * if the broadcast timer has to be set up.  The loop is in the reverse
-	 * order, because usually on of the the deeper states has this flag set.
+	 * order, because usually one of the deeper states have this flag set.
 	 */
 	for (i = drv->state_count - 1; i >= 0 ; i--) {
-
-		if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
-			continue;
-
-		drv->bctimer = 1;
-		break;
+		if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+			drv->bctimer = 1;
+			break;
+		}
 	}
-
-	return 0;
 }
 
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index)
+{
+	ktime_t	t1, t2;
+	s64 diff;
+
+	t1 = ktime_get();
+	local_irq_enable();
+	while (!need_resched())
+		cpu_relax();
+
+	t2 = ktime_get();
+	diff = ktime_to_us(ktime_sub(t2, t1));
+	if (diff > INT_MAX)
+		diff = INT_MAX;
+
+	dev->last_residency = (int) diff;
+
+	return index;
+}
+
+static void poll_idle_init(struct cpuidle_driver *drv)
+{
+	struct cpuidle_state *state = &drv->states[0];
+
+	snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+	state->exit_latency = 0;
+	state->target_residency = 0;
+	state->power_usage = -1;
+	state->flags = 0;
+	state->enter = poll_idle;
+	state->disabled = false;
+}
+#else
+static void poll_idle_init(struct cpuidle_driver *drv) {}
+#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */
+
 /**
  * __cpuidle_register_driver: register the driver
  * @drv: a valid pointer to a struct cpuidle_driver
@@ -206,9 +240,7 @@
 	if (cpuidle_disabled())
 		return -ENODEV;
 
-	ret = __cpuidle_driver_init(drv);
-	if (ret)
-		return ret;
+	__cpuidle_driver_init(drv);
 
 	ret = __cpuidle_set_driver(drv);
 	if (ret)
@@ -218,6 +250,8 @@
 		on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
 				 (void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
 
+	poll_idle_init(drv);
+
 	return 0;
 }
 
@@ -346,10 +380,11 @@
  */
 void cpuidle_driver_unref(void)
 {
-	struct cpuidle_driver *drv = cpuidle_get_driver();
+	struct cpuidle_driver *drv;
 
 	spin_lock(&cpuidle_driver_lock);
 
+	drv = cpuidle_get_driver();
 	if (drv && !WARN_ON(drv->refcnt <= 0))
 		drv->refcnt--;
 
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index ea2f8e7..ca89412 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -96,46 +96,3 @@
 
 	return ret;
 }
-
-/**
- * cpuidle_replace_governor - find a replacement governor
- * @exclude_rating: the rating that will be skipped while looking for
- * new governor.
- */
-static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating)
-{
-	struct cpuidle_governor *gov;
-	struct cpuidle_governor *ret_gov = NULL;
-	unsigned int max_rating = 0;
-
-	list_for_each_entry(gov, &cpuidle_governors, governor_list) {
-		if (gov->rating == exclude_rating)
-			continue;
-		if (gov->rating > max_rating) {
-			max_rating = gov->rating;
-			ret_gov = gov;
-		}
-	}
-
-	return ret_gov;
-}
-
-/**
- * cpuidle_unregister_governor - unregisters a governor
- * @gov: the governor
- */
-void cpuidle_unregister_governor(struct cpuidle_governor *gov)
-{
-	if (!gov)
-		return;
-
-	mutex_lock(&cpuidle_lock);
-	if (gov == cpuidle_curr_governor) {
-		struct cpuidle_governor *new_gov;
-		new_gov = cpuidle_replace_governor(gov->rating);
-		cpuidle_switch_governor(new_gov);
-	}
-	list_del(&gov->governor_list);
-	mutex_unlock(&cpuidle_lock);
-}
-
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 8739cc0..e918b6d 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -52,11 +52,12 @@
 				   char *buf)
 {
 	ssize_t ret;
-	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+	struct cpuidle_driver *drv;
 
 	spin_lock(&cpuidle_driver_lock);
-	if (cpuidle_driver)
-		ret = sprintf(buf, "%s\n", cpuidle_driver->name);
+	drv = cpuidle_get_driver();
+	if (drv)
+		ret = sprintf(buf, "%s\n", drv->name);
 	else
 		ret = sprintf(buf, "none\n");
 	spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 21180d6..214357e 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -218,23 +218,9 @@
 
 static int support_aes = 1;
 
-static void dev_release(struct device *dev)
-{
-	return;
-}
-
 #define DRIVER_NAME "ixp4xx_crypto"
-static struct platform_device pseudo_dev = {
-	.name = DRIVER_NAME,
-	.id   = 0,
-	.num_resources = 0,
-	.dev  = {
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-		.release = dev_release,
-	}
-};
 
-static struct device *dev = &pseudo_dev.dev;
+static struct platform_device *pdev;
 
 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 {
@@ -263,6 +249,7 @@
 
 static int setup_crypt_desc(void)
 {
+	struct device *dev = &pdev->dev;
 	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
 	crypt_virt = dma_alloc_coherent(dev,
 			NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@
 
 static void one_packet(dma_addr_t phys)
 {
+	struct device *dev = &pdev->dev;
 	struct crypt_ctl *crypt;
 	struct ixp_ctx *ctx;
 	int failed;
@@ -432,7 +420,7 @@
 	tasklet_schedule(&crypto_done_tasklet);
 }
 
-static int init_ixp_crypto(void)
+static int init_ixp_crypto(struct device *dev)
 {
 	int ret = -ENODEV;
 	u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@
 	return ret;
 }
 
-static void release_ixp_crypto(void)
+static void release_ixp_crypto(struct device *dev)
 {
 	qmgr_disable_irq(RECV_QID);
 	tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@
 	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
 	struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 	struct buffer_desc src_hook;
+	struct device *dev = &pdev->dev;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 				GFP_KERNEL : GFP_ATOMIC;
 
@@ -1010,6 +999,7 @@
 	unsigned int cryptlen;
 	struct buffer_desc *buf, src_hook;
 	struct aead_ctx *req_ctx = aead_request_ctx(req);
+	struct device *dev = &pdev->dev;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 				GFP_KERNEL : GFP_ATOMIC;
 
@@ -1418,20 +1408,30 @@
 } };
 
 #define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+	.name		= DRIVER_NAME,
+	.id		= 0,
+	.dma_mask	= DMA_BIT_MASK(32),
+};
+
 static int __init ixp_module_init(void)
 {
 	int num = ARRAY_SIZE(ixp4xx_algos);
-	int i,err ;
+	int i, err ;
 
-	if (platform_device_register(&pseudo_dev))
-		return -ENODEV;
+	pdev = platform_device_register_full(&ixp_dev_info);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	dev = &pdev->dev;
 
 	spin_lock_init(&desc_lock);
 	spin_lock_init(&emerg_lock);
 
-	err = init_ixp_crypto();
+	err = init_ixp_crypto(&pdev->dev);
 	if (err) {
-		platform_device_unregister(&pseudo_dev);
+		platform_device_unregister(pdev);
 		return err;
 	}
 	for (i=0; i< num; i++) {
@@ -1495,8 +1495,8 @@
 		if (ixp4xx_algos[i].registered)
 			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
 	}
-	release_ixp_crypto();
-	platform_device_unregister(&pseudo_dev);
+	release_ixp_crypto(&pdev->dev);
+	platform_device_unregister(pdev);
 }
 
 module_init(ixp_module_init);
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 2d58da9..fa05e3c 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -268,7 +268,7 @@
 	aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT);
 
 	aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR);
-	INIT_COMPLETION(dd->op_complete);
+	reinit_completion(&dd->op_complete);
 
 	for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) {
 		do {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index c99c00d..a0b2f7e 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -18,7 +18,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/devfreq.h>
 #include <linux/workqueue.h>
 #include <linux/platform_device.h>
@@ -902,13 +902,13 @@
 {
 	struct devfreq *df = to_devfreq(d);
 	struct device *dev = df->dev.parent;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	ssize_t count = 0;
 	unsigned long freq = 0;
 
 	rcu_read_lock();
 	do {
-		opp = opp_find_freq_ceil(dev, &freq);
+		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
 		if (IS_ERR(opp))
 			break;
 
@@ -993,10 +993,10 @@
 	}
 
 	devfreq_wq = create_freezable_workqueue("devfreq_wq");
-	if (IS_ERR(devfreq_wq)) {
+	if (!devfreq_wq) {
 		class_destroy(devfreq_class);
 		pr_err("%s: couldn't create workqueue\n", __FILE__);
-		return PTR_ERR(devfreq_wq);
+		return -ENOMEM;
 	}
 	devfreq_class->dev_groups = devfreq_groups;
 
@@ -1029,25 +1029,26 @@
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
-				    u32 flags)
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+					   unsigned long *freq,
+					   u32 flags)
 {
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 
 	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
 		/* The freq is an upper bound. opp should be lower */
-		opp = opp_find_freq_floor(dev, freq);
+		opp = dev_pm_opp_find_freq_floor(dev, freq);
 
 		/* If not available, use the closest opp */
 		if (opp == ERR_PTR(-ERANGE))
-			opp = opp_find_freq_ceil(dev, freq);
+			opp = dev_pm_opp_find_freq_ceil(dev, freq);
 	} else {
 		/* The freq is an lower bound. opp should be higher */
-		opp = opp_find_freq_ceil(dev, freq);
+		opp = dev_pm_opp_find_freq_ceil(dev, freq);
 
 		/* If not available, use the closest opp */
 		if (opp == ERR_PTR(-ERANGE))
-			opp = opp_find_freq_floor(dev, freq);
+			opp = dev_pm_opp_find_freq_floor(dev, freq);
 	}
 
 	return opp;
@@ -1066,7 +1067,7 @@
 	int ret = 0;
 
 	rcu_read_lock();
-	nh = opp_get_notifier(dev);
+	nh = dev_pm_opp_get_notifier(dev);
 	if (IS_ERR(nh))
 		ret = PTR_ERR(nh);
 	rcu_read_unlock();
@@ -1092,7 +1093,7 @@
 	int ret = 0;
 
 	rcu_read_lock();
-	nh = opp_get_notifier(dev);
+	nh = dev_pm_opp_get_notifier(dev);
 	if (IS_ERR(nh))
 		ret = PTR_ERR(nh);
 	rcu_read_unlock();
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
index c5f86d8..cede6f7 100644
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ b/drivers/devfreq/exynos/exynos4_bus.c
@@ -19,7 +19,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/suspend.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/devfreq.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
@@ -639,7 +639,7 @@
 	struct platform_device *pdev = container_of(dev, struct platform_device,
 						    dev);
 	struct busfreq_data *data = platform_get_drvdata(pdev);
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	unsigned long freq;
 	unsigned long old_freq = data->curr_oppinfo.rate;
 	struct busfreq_opp_info	new_oppinfo;
@@ -650,8 +650,8 @@
 		rcu_read_unlock();
 		return PTR_ERR(opp);
 	}
-	new_oppinfo.rate = opp_get_freq(opp);
-	new_oppinfo.volt = opp_get_voltage(opp);
+	new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+	new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
 	rcu_read_unlock();
 	freq = new_oppinfo.rate;
 
@@ -873,7 +873,7 @@
 		exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
 
 	for (i = LV_0; i < EX4210_LV_NUM; i++) {
-		err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+		err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
 			      exynos4210_busclk_table[i].volt);
 		if (err) {
 			dev_err(data->dev, "Cannot add opp entries.\n");
@@ -940,7 +940,7 @@
 	}
 
 	for (i = 0; i < EX4x12_LV_NUM; i++) {
-		ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+		ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
 			      exynos4x12_mifclk_table[i].volt);
 		if (ret) {
 			dev_err(data->dev, "Fail to add opp entries.\n");
@@ -956,7 +956,7 @@
 {
 	struct busfreq_data *data = container_of(this, struct busfreq_data,
 						 pm_notifier);
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	struct busfreq_opp_info	new_oppinfo;
 	unsigned long maxfreq = ULONG_MAX;
 	int err = 0;
@@ -969,7 +969,7 @@
 		data->disabled = true;
 
 		rcu_read_lock();
-		opp = opp_find_freq_floor(data->dev, &maxfreq);
+		opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
 		if (IS_ERR(opp)) {
 			rcu_read_unlock();
 			dev_err(data->dev, "%s: unable to find a min freq\n",
@@ -977,8 +977,8 @@
 			mutex_unlock(&data->lock);
 			return PTR_ERR(opp);
 		}
-		new_oppinfo.rate = opp_get_freq(opp);
-		new_oppinfo.volt = opp_get_voltage(opp);
+		new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+		new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
 		rcu_read_unlock();
 
 		err = exynos4_bus_setvolt(data, &new_oppinfo,
@@ -1020,7 +1020,7 @@
 static int exynos4_busfreq_probe(struct platform_device *pdev)
 {
 	struct busfreq_data *data;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	struct device *dev = &pdev->dev;
 	int err = 0;
 
@@ -1065,15 +1065,16 @@
 	}
 
 	rcu_read_lock();
-	opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+	opp = dev_pm_opp_find_freq_floor(dev,
+					 &exynos4_devfreq_profile.initial_freq);
 	if (IS_ERR(opp)) {
 		rcu_read_unlock();
 		dev_err(dev, "Invalid initial frequency %lu kHz.\n",
 			exynos4_devfreq_profile.initial_freq);
 		return PTR_ERR(opp);
 	}
-	data->curr_oppinfo.rate = opp_get_freq(opp);
-	data->curr_oppinfo.volt = opp_get_voltage(opp);
+	data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
+	data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
 	rcu_read_unlock();
 
 	platform_set_drvdata(pdev, data);
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
index 574b16b..a60da3c 100644
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ b/drivers/devfreq/exynos/exynos5_bus.c
@@ -15,10 +15,9 @@
 #include <linux/module.h>
 #include <linux/devfreq.h>
 #include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
-#include <linux/opp.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
@@ -132,7 +131,7 @@
 	struct platform_device *pdev = container_of(dev, struct platform_device,
 						    dev);
 	struct busfreq_data_int *data = platform_get_drvdata(pdev);
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	unsigned long old_freq, freq;
 	unsigned long volt;
 
@@ -144,8 +143,8 @@
 		return PTR_ERR(opp);
 	}
 
-	freq = opp_get_freq(opp);
-	volt = opp_get_voltage(opp);
+	freq = dev_pm_opp_get_freq(opp);
+	volt = dev_pm_opp_get_voltage(opp);
 	rcu_read_unlock();
 
 	old_freq = data->curr_freq;
@@ -246,7 +245,7 @@
 	int i, err = 0;
 
 	for (i = LV_0; i < _LV_END; i++) {
-		err = opp_add(data->dev, exynos5_int_opp_table[i].clk,
+		err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
 				exynos5_int_opp_table[i].volt);
 		if (err) {
 			dev_err(data->dev, "Cannot add opp entries.\n");
@@ -262,7 +261,7 @@
 {
 	struct busfreq_data_int *data = container_of(this,
 					struct busfreq_data_int, pm_notifier);
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	unsigned long maxfreq = ULONG_MAX;
 	unsigned long freq;
 	unsigned long volt;
@@ -276,14 +275,14 @@
 		data->disabled = true;
 
 		rcu_read_lock();
-		opp = opp_find_freq_floor(data->dev, &maxfreq);
+		opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
 		if (IS_ERR(opp)) {
 			rcu_read_unlock();
 			err = PTR_ERR(opp);
 			goto unlock;
 		}
-		freq = opp_get_freq(opp);
-		volt = opp_get_voltage(opp);
+		freq = dev_pm_opp_get_freq(opp);
+		volt = dev_pm_opp_get_voltage(opp);
 		rcu_read_unlock();
 
 		err = exynos5_int_setvolt(data, volt);
@@ -316,7 +315,7 @@
 static int exynos5_busfreq_int_probe(struct platform_device *pdev)
 {
 	struct busfreq_data_int *data;
-	struct opp *opp;
+	struct dev_pm_opp *opp;
 	struct device *dev = &pdev->dev;
 	struct device_node *np;
 	unsigned long initial_freq;
@@ -351,46 +350,43 @@
 
 	err = exynos5250_init_int_tables(data);
 	if (err)
-		goto err_regulator;
+		return err;
 
-	data->vdd_int = regulator_get(dev, "vdd_int");
+	data->vdd_int = devm_regulator_get(dev, "vdd_int");
 	if (IS_ERR(data->vdd_int)) {
 		dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
-		err = PTR_ERR(data->vdd_int);
-		goto err_regulator;
+		return PTR_ERR(data->vdd_int);
 	}
 
-	data->int_clk = clk_get(dev, "int_clk");
+	data->int_clk = devm_clk_get(dev, "int_clk");
 	if (IS_ERR(data->int_clk)) {
 		dev_err(dev, "Cannot get clock \"int_clk\"\n");
-		err = PTR_ERR(data->int_clk);
-		goto err_clock;
+		return PTR_ERR(data->int_clk);
 	}
 
 	rcu_read_lock();
-	opp = opp_find_freq_floor(dev,
+	opp = dev_pm_opp_find_freq_floor(dev,
 			&exynos5_devfreq_int_profile.initial_freq);
 	if (IS_ERR(opp)) {
 		rcu_read_unlock();
 		dev_err(dev, "Invalid initial frequency %lu kHz.\n",
 		       exynos5_devfreq_int_profile.initial_freq);
-		err = PTR_ERR(opp);
-		goto err_opp_add;
+		return PTR_ERR(opp);
 	}
-	initial_freq = opp_get_freq(opp);
-	initial_volt = opp_get_voltage(opp);
+	initial_freq = dev_pm_opp_get_freq(opp);
+	initial_volt = dev_pm_opp_get_voltage(opp);
 	rcu_read_unlock();
 	data->curr_freq = initial_freq;
 
 	err = clk_set_rate(data->int_clk, initial_freq * 1000);
 	if (err) {
 		dev_err(dev, "Failed to set initial frequency\n");
-		goto err_opp_add;
+		return err;
 	}
 
 	err = exynos5_int_setvolt(data, initial_volt);
 	if (err)
-		goto err_opp_add;
+		return err;
 
 	platform_set_drvdata(pdev, data);
 
@@ -419,12 +415,6 @@
 
 err_devfreq_add:
 	devfreq_remove_device(data->devfreq);
-	platform_set_drvdata(pdev, NULL);
-err_opp_add:
-	clk_put(data->int_clk);
-err_clock:
-	regulator_put(data->vdd_int);
-err_regulator:
 	return err;
 }
 
@@ -435,9 +425,6 @@
 	pm_qos_remove_request(&data->int_req);
 	unregister_pm_notifier(&data->pm_notifier);
 	devfreq_remove_device(data->devfreq);
-	regulator_put(data->vdd_int);
-	clk_put(data->int_clk);
-	platform_set_drvdata(pdev, NULL);
 
 	return 0;
 }
@@ -479,7 +466,7 @@
 
 	exynos5_devfreq_pdev =
 		platform_device_register_simple("exynos5-bus-int", -1, NULL, 0);
-	if (IS_ERR_OR_NULL(exynos5_devfreq_pdev)) {
+	if (IS_ERR(exynos5_devfreq_pdev)) {
 		ret = PTR_ERR(exynos5_devfreq_pdev);
 		goto out1;
 	}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c61a6ec..dd2874ec 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -313,7 +313,7 @@
 	depends on (ARCH_MMP || ARCH_PXA)
 	select DMA_ENGINE
 	help
-	  Support the MMP PDMA engine for PXA and MMP platfrom.
+	  Support the MMP PDMA engine for PXA and MMP platform.
 
 config DMA_JZ4740
 	tristate "JZ4740 DMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fce46c5..e51a983 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2055,6 +2055,11 @@
 	if (ret)
 		return ret;
 
+	/* Ensure that we can do DMA */
+	ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto out_no_pl08x;
+
 	/* Create the driver state holder */
 	pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
 	if (!pl08x) {
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index e35d975..453822c 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -191,11 +191,9 @@
 	if (IS_ERR(chip->regs))
 		return PTR_ERR(chip->regs);
 
-	/* Apply default dma_mask if needed */
-	if (!dev->dma_mask) {
-		dev->dma_mask = &dev->coherent_dma_mask;
-		dev->coherent_dma_mask = DMA_BIT_MASK(32);
-	}
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
 
 	pdata = dev_get_platdata(dev);
 	if (!pdata)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 10b577f..bef8a36 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -634,6 +634,10 @@
 	struct edma_cc *ecc;
 	int ret;
 
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
 	ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
 	if (!ecc) {
 		dev_err(&pdev->dev, "Can't allocate controller\n");
@@ -705,11 +709,13 @@
 static const struct platform_device_info edma_dev_info0 = {
 	.name = "edma-dma-engine",
 	.id = 0,
+	.dma_mask = DMA_BIT_MASK(32),
 };
 
 static const struct platform_device_info edma_dev_info1 = {
 	.name = "edma-dma-engine",
 	.id = 1,
+	.dma_mask = DMA_BIT_MASK(32),
 };
 
 static int edma_init(void)
@@ -723,8 +729,6 @@
 			ret = PTR_ERR(pdev0);
 			goto out;
 		}
-		pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
-		pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 	}
 
 	if (EDMA_CTLRS == 2) {
@@ -734,8 +738,6 @@
 			platform_device_unregister(pdev0);
 			ret = PTR_ERR(pdev1);
 		}
-		pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
-		pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 	}
 
 out:
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fc43603..c1fd504 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1432,6 +1432,10 @@
 		return -EINVAL;
 	}
 
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
 	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
 	if (!sdma)
 		return -ENOMEM;
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 38cb517..d3b6358 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -350,12 +350,7 @@
 	if (!gpool)
 		return NULL;
 
-	tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
-	if (!tdmac->desc_arr)
-		return NULL;
-
-	tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
-			(unsigned long)tdmac->desc_arr);
+	tdmac->desc_arr = gen_pool_dma_alloc(gpool, size, &tdmac->desc_arr_phys);
 
 	return tdmac->desc_arr;
 }
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a562d24..df8b10f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2903,6 +2903,10 @@
 
 	pdat = dev_get_platdata(&adev->dev);
 
+	ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
 	/* Allocate a new DMAC and its Channels */
 	pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
 	if (!pdmac) {
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index e5af0e3..0e79951 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -477,7 +477,7 @@
 	phy_config_packet.header[1] = data;
 	phy_config_packet.header[2] = ~data;
 	phy_config_packet.generation = generation;
-	INIT_COMPLETION(phy_config_done);
+	reinit_completion(&phy_config_done);
 
 	card->driver->send_request(card, &phy_config_packet);
 	wait_for_completion_timeout(&phy_config_done, timeout);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index ff080ee..1b5e8e4 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -545,12 +545,15 @@
 	host_control_action = HC_ACTION_NONE;
 	host_control_smi_type = HC_SMITYPE_NONE;
 
+	dcdbas_pdev = dev;
+
 	/*
 	 * BIOS SMI calls require buffer addresses be in 32-bit address space.
 	 * This is done by setting the DMA mask below.
 	 */
-	dcdbas_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-	dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask;
+	error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
+	if (error)
+		return error;
 
 	error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
 	if (error)
@@ -581,6 +584,14 @@
 	.remove		= dcdbas_remove,
 };
 
+static const struct platform_device_info dcdbas_dev_info __initdata = {
+	.name		= DRIVER_NAME,
+	.id		= -1,
+	.dma_mask	= DMA_BIT_MASK(32),
+};
+
+static struct platform_device *dcdbas_pdev_reg;
+
 /**
  * dcdbas_init: initialize driver
  */
@@ -592,20 +603,14 @@
 	if (error)
 		return error;
 
-	dcdbas_pdev = platform_device_alloc(DRIVER_NAME, -1);
-	if (!dcdbas_pdev) {
-		error = -ENOMEM;
+	dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
+	if (IS_ERR(dcdbas_pdev_reg)) {
+		error = PTR_ERR(dcdbas_pdev_reg);
 		goto err_unregister_driver;
 	}
 
-	error = platform_device_add(dcdbas_pdev);
-	if (error)
-		goto err_free_device;
-
 	return 0;
 
- err_free_device:
-	platform_device_put(dcdbas_pdev);
  err_unregister_driver:
 	platform_driver_unregister(&dcdbas_driver);
 	return error;
@@ -628,8 +633,9 @@
 	 * all sysfs attributes belonging to this module have been
 	 * released.
 	 */
-	smi_data_buf_free();
-	platform_device_unregister(dcdbas_pdev);
+	if (dcdbas_pdev)
+		smi_data_buf_free();
+	platform_device_unregister(dcdbas_pdev_reg);
 	platform_driver_unregister(&dcdbas_driver);
 }
 
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 6eb535f..e5a67b2 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -764,6 +764,13 @@
 static struct kobject *gsmi_kobj;
 static struct efivars efivars;
 
+static const struct platform_device_info gsmi_dev_info = {
+	.name		= "gsmi",
+	.id		= -1,
+	/* SMI callbacks require 32bit addresses */
+	.dma_mask	= DMA_BIT_MASK(32),
+};
+
 static __init int gsmi_init(void)
 {
 	unsigned long flags;
@@ -776,7 +783,7 @@
 	gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
 
 	/* register device */
-	gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0);
+	gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
 	if (IS_ERR(gsmi_dev.pdev)) {
 		printk(KERN_ERR "gsmi: unable to register platform device\n");
 		return PTR_ERR(gsmi_dev.pdev);
@@ -785,10 +792,6 @@
 	/* SMI access needs to be serialized */
 	spin_lock_init(&gsmi_dev.lock);
 
-	/* SMI callbacks require 32bit addresses */
-	gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-	gsmi_dev.pdev->dev.dma_mask =
-		&gsmi_dev.pdev->dev.coherent_dma_mask;
 	ret = -ENOMEM;
 	gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
 					     GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
diff --git a/drivers/fmc/Kconfig b/drivers/fmc/Kconfig
index c01cf45..3a75f42 100644
--- a/drivers/fmc/Kconfig
+++ b/drivers/fmc/Kconfig
@@ -46,6 +46,6 @@
 	  This driver matches every mezzanine device and allows user
 	  space to read and write registers using a char device. It
 	  can be used to write user-space drivers, or just get
-	  aquainted with a mezzanine before writing its specific driver.
+	  acquainted with a mezzanine before writing its specific driver.
 
 endif # FMC
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 8ea3b33..a90be34 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -10,7 +10,7 @@
 #include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/module.h>
-
+#include <linux/io.h>
 #include <mach/hardware.h>
 #include <mach/irqs.h>
 
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 955555d..f864275 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -29,11 +29,17 @@
 config DRM_KMS_HELPER
 	tristate
 	depends on DRM
+	help
+	  CRTC helpers for KMS drivers.
+
+config DRM_KMS_FB_HELPER
+	bool
+	depends on DRM_KMS_HELPER
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
 	help
-	  FB and CRTC helpers for KMS drivers.
+	  FBDEV helpers for KMS drivers.
 
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
@@ -64,6 +70,7 @@
 config DRM_KMS_CMA_HELPER
 	bool
 	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
@@ -96,6 +103,7 @@
 	select FB_CFB_IMAGEBLIT
 	select FW_LOADER
         select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
         select DRM_TTM
 	select POWER_SUPPLY
 	select HWMON
@@ -120,64 +128,7 @@
 	  selected, the module will be called i810.  AGP support is required
 	  for this driver to work.
 
-config DRM_I915
-	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
-	depends on DRM
-	depends on AGP
-	depends on AGP_INTEL
-	# we need shmfs for the swappable backing store, and in particular
-	# the shmem_readpage() which depends upon tmpfs
-	select SHMEM
-	select TMPFS
-	select DRM_KMS_HELPER
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-	# i915 depends on ACPI_VIDEO when ACPI is enabled
-	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
-	select BACKLIGHT_LCD_SUPPORT if ACPI
-	select BACKLIGHT_CLASS_DEVICE if ACPI
-	select VIDEO_OUTPUT_CONTROL if ACPI
-	select INPUT if ACPI
-	select THERMAL if ACPI
-	select ACPI_VIDEO if ACPI
-	select ACPI_BUTTON if ACPI
-	help
-	  Choose this option if you have a system that has "Intel Graphics
-	  Media Accelerator" or "HD Graphics" integrated graphics,
-	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
-	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
-	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
-	  If M is selected, the module will be called i915.  AGP support
-	  is required for this driver to work. This driver is used by
-	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
-	  replaces the older i830 module that supported a subset of the
-	  hardware in older X.org releases.
-
-	  Note that the older i810/i815 chipsets require the use of the
-	  i810 driver instead, and the Atom z5xx series has an entirely
-	  different implementation.
-
-config DRM_I915_KMS
-	bool "Enable modesetting on intel by default"
-	depends on DRM_I915
-	help
-	  Choose this option if you want kernel modesetting enabled by default,
-	  and you have a new enough userspace to support this. Running old
-	  userspaces with this enabled will cause pain.  Note that this causes
-	  the driver to bind to PCI devices, which precludes loading things
-	  like intelfb.
-
-config DRM_I915_PRELIMINARY_HW_SUPPORT
-	bool "Enable preliminary support for prerelease Intel hardware by default"
-	depends on DRM_I915
-	help
-	  Choose this option if you have prerelease Intel hardware and want the
-	  i915 driver to support it by default.  You can enable such support at
-	  runtime with the module option i915.preliminary_hw_support=1; this
-	  option changes the default for that module option.
-
-	  If in doubt, say "N".
+source "drivers/gpu/drm/i915/Kconfig"
 
 config DRM_MGA
 	tristate "Matrox g200/g400"
@@ -225,6 +176,8 @@
 
 source "drivers/gpu/drm/cirrus/Kconfig"
 
+source "drivers/gpu/drm/armada/Kconfig"
+
 source "drivers/gpu/drm/rcar-du/Kconfig"
 
 source "drivers/gpu/drm/shmobile/Kconfig"
@@ -236,3 +189,5 @@
 source "drivers/gpu/drm/qxl/Kconfig"
 
 source "drivers/gpu/drm/msm/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f089adf..cc08b84 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -21,8 +21,9 @@
 
 drm-usb-y   := drm_usb.o
 
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -49,10 +50,12 @@
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_ARMADA) += armada/
 obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
 obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-y			+= i2c/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
new file mode 100644
index 0000000..40d3715
--- /dev/null
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -0,0 +1,24 @@
+config DRM_ARMADA
+	tristate "DRM support for Marvell Armada SoCs"
+	depends on DRM && HAVE_CLK && ARM
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select DRM_KMS_HELPER
+	help
+	  Support the "LCD" controllers found on the Marvell Armada 510
+	  devices.  There are two controllers on the device, each controller
+	  supports graphics and video overlays.
+
+	  This driver provides no built-in acceleration; acceleration is
+	  performed by other IP found on the SoC.  This driver provides
+	  kernel mode setting and buffer management to userspace.
+
+config DRM_ARMADA_TDA1998X
+	bool "Support TDA1998X HDMI output"
+	depends on DRM_ARMADA != n
+	depends on I2C && DRM_I2C_NXP_TDA998X = y
+	default y
+	help
+	  Support the TDA1998x HDMI output device found on the Solid-Run
+	  CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
new file mode 100644
index 0000000..d6f43e0
--- /dev/null
+++ b/drivers/gpu/drm/armada/Makefile
@@ -0,0 +1,7 @@
+armada-y	:= armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
+		   armada_gem.o armada_output.o armada_overlay.o \
+		   armada_slave.o
+armada-y	+= armada_510.o
+armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
+
+obj-$(CONFIG_DRM_ARMADA) := armada.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
new file mode 100644
index 0000000..59948ef
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Armada 510 (aka Dove) variant support
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_hw.h"
+
+static int armada510_init(struct armada_private *priv, struct device *dev)
+{
+	priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+
+	if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
+		priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+
+	return PTR_RET(priv->extclk[0]);
+}
+
+static int armada510_crtc_init(struct armada_crtc *dcrtc)
+{
+	/* Lower the watermark so to eliminate jitter at higher bandwidths */
+	armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+	return 0;
+}
+
+/*
+ * Armada510 specific SCLK register selection.
+ * This gets called with sclk = NULL to test whether the mode is
+ * supportable, and again with sclk != NULL to set the clocks up for
+ * that.  The former can return an error, but the latter is expected
+ * not to.
+ *
+ * We currently are pretty rudimentary here, always selecting
+ * EXT_REF_CLK_1 for LCD0 and erroring LCD1.  This needs improvement!
+ */
+static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
+	const struct drm_display_mode *mode, uint32_t *sclk)
+{
+	struct armada_private *priv = dcrtc->crtc.dev->dev_private;
+	struct clk *clk = priv->extclk[0];
+	int ret;
+
+	if (dcrtc->num == 1)
+		return -EINVAL;
+
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	if (dcrtc->clk != clk) {
+		ret = clk_prepare_enable(clk);
+		if (ret)
+			return ret;
+		dcrtc->clk = clk;
+	}
+
+	if (sclk) {
+		uint32_t rate, ref, div;
+
+		rate = mode->clock * 1000;
+		ref = clk_round_rate(clk, rate);
+		div = DIV_ROUND_UP(ref, rate);
+		if (div < 1)
+			div = 1;
+
+		clk_set_rate(clk, ref);
+		*sclk = div | SCLK_510_EXTCLK1;
+	}
+
+	return 0;
+}
+
+const struct armada_variant armada510_ops = {
+	.has_spu_adv_reg = true,
+	.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+	.init = armada510_init,
+	.crtc_init = armada510_crtc_init,
+	.crtc_compute_clock = armada510_crtc_compute_clock,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
new file mode 100644
index 0000000..d8e3982
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -0,0 +1,1098 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+struct armada_frame_work {
+	struct drm_pending_vblank_event *event;
+	struct armada_regs regs[4];
+	struct drm_framebuffer *old_fb;
+};
+
+enum csc_mode {
+	CSC_AUTO = 0,
+	CSC_YUV_CCIR601 = 1,
+	CSC_YUV_CCIR709 = 2,
+	CSC_RGB_COMPUTER = 1,
+	CSC_RGB_STUDIO = 2,
+};
+
+/*
+ * A note about interlacing.  Let's consider HDMI 1920x1080i.
+ * The timing parameters we have from X are:
+ *  Hact HsyA HsyI Htot  Vact VsyA VsyI Vtot
+ *  1920 2448 2492 2640  1080 1084 1094 1125
+ * Which get translated to:
+ *  Hact HsyA HsyI Htot  Vact VsyA VsyI Vtot
+ *  1920 2448 2492 2640   540  542  547  562
+ *
+ * This is how it is defined by CEA-861-D - line and pixel numbers are
+ * referenced to the rising edge of VSYNC and HSYNC.  Total clocks per
+ * line: 2640.  The odd frame, the first active line is at line 21, and
+ * the even frame, the first active line is 584.
+ *
+ * LN:    560     561     562     563             567     568    569
+ * DE:    ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
+ *  22 blanking lines.  VSYNC at 1320 (referenced to the HSYNC rising edge).
+ *
+ * LN:    1123   1124    1125      1               5       6      7
+ * DE:    ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
+ *  23 blanking lines
+ *
+ * The Armada LCD Controller line and pixel numbers are, like X timings,
+ * referenced to the top left of the active frame.
+ *
+ * So, translating these to our LCD controller:
+ *  Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
+ *  Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
+ * Note: Vsync front porch remains constant!
+ *
+ * if (odd_frame) {
+ *   vtotal = mode->crtc_vtotal + 1;
+ *   vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
+ *   vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
+ * } else {
+ *   vtotal = mode->crtc_vtotal;
+ *   vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ *   vhorizpos = mode->crtc_hsync_start;
+ * }
+ * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
+ *
+ * So, we need to reprogram these registers on each vsync event:
+ *  LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *
+ * Note: we do not use the frame done interrupts because these appear
+ * to happen too early, and lead to jitter on the display (presumably
+ * they occur at the end of the last active line, before the vsync back
+ * porch, which we're reprogramming.)
+ */
+
+void
+armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
+{
+	while (regs->offset != ~0) {
+		void __iomem *reg = dcrtc->base + regs->offset;
+		uint32_t val;
+
+		val = regs->mask;
+		if (val != 0)
+			val &= readl_relaxed(reg);
+		writel_relaxed(val | regs->val, reg);
+		++regs;
+	}
+}
+
+#define dpms_blanked(dpms)	((dpms) != DRM_MODE_DPMS_ON)
+
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+{
+	uint32_t dumb_ctrl;
+
+	dumb_ctrl = dcrtc->cfg_dumb_ctrl;
+
+	if (!dpms_blanked(dcrtc->dpms))
+		dumb_ctrl |= CFG_DUMB_ENA;
+
+	/*
+	 * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
+	 * be using SPI or GPIO.  If we set this to DUMB_BLANK, we will
+	 * force LCD_D[23:0] to output blank color, overriding the GPIO or
+	 * SPI usage.  So leave it as-is unless in DUMB24_RGB888_0 mode.
+	 */
+	if (dpms_blanked(dcrtc->dpms) &&
+	    (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+		dumb_ctrl &= ~DUMB_MASK;
+		dumb_ctrl |= DUMB_BLANK;
+	}
+
+	/*
+	 * The documentation doesn't indicate what the normal state of
+	 * the sync signals are.  Sebastian Hesselbart kindly probed
+	 * these signals on his board to determine their state.
+	 *
+	 * The non-inverted state of the sync signals is active high.
+	 * Setting these bits makes the appropriate signal active low.
+	 */
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
+		dumb_ctrl |= CFG_INV_CSYNC;
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
+		dumb_ctrl |= CFG_INV_HSYNC;
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
+		dumb_ctrl |= CFG_INV_VSYNC;
+
+	if (dcrtc->dumb_ctrl != dumb_ctrl) {
+		dcrtc->dumb_ctrl = dumb_ctrl;
+		writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
+	}
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
+	int x, int y, struct armada_regs *regs, bool interlaced)
+{
+	struct armada_gem_object *obj = drm_fb_obj(fb);
+	unsigned pitch = fb->pitches[0];
+	unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
+	uint32_t addr_odd, addr_even;
+	unsigned i = 0;
+
+	DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
+		pitch, x, y, fb->bits_per_pixel);
+
+	addr_odd = addr_even = obj->dev_addr + offset;
+
+	if (interlaced) {
+		addr_even += pitch;
+		pitch *= 2;
+	}
+
+	/* write offset, base, and pitch */
+	armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
+	armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
+	armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
+
+	return i;
+}
+
+static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
+	struct armada_frame_work *work)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+	unsigned long flags;
+	int ret;
+
+	ret = drm_vblank_get(dev, dcrtc->num);
+	if (ret) {
+		DRM_ERROR("failed to acquire vblank counter\n");
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (!dcrtc->frame_work)
+		dcrtc->frame_work = work;
+	else
+		ret = -EBUSY;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (ret)
+		drm_vblank_put(dev, dcrtc->num);
+
+	return ret;
+}
+
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+	struct armada_frame_work *work = dcrtc->frame_work;
+
+	dcrtc->frame_work = NULL;
+
+	armada_drm_crtc_update_regs(dcrtc, work->regs);
+
+	if (work->event)
+		drm_send_vblank_event(dev, dcrtc->num, work->event);
+
+	drm_vblank_put(dev, dcrtc->num);
+
+	/* Finally, queue the process-half of the cleanup. */
+	__armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
+	kfree(work);
+}
+
+static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
+	struct drm_framebuffer *fb, bool force)
+{
+	struct armada_frame_work *work;
+
+	if (!fb)
+		return;
+
+	if (force) {
+		/* Display is disabled, so just drop the old fb */
+		drm_framebuffer_unreference(fb);
+		return;
+	}
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (work) {
+		int i = 0;
+		work->event = NULL;
+		work->old_fb = fb;
+		armada_reg_queue_end(work->regs, i);
+
+		if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+			return;
+
+		kfree(work);
+	}
+
+	/*
+	 * Oops - just drop the reference immediately and hope for
+	 * the best.  The worst that will happen is the buffer gets
+	 * reused before it has finished being displayed.
+	 */
+	drm_framebuffer_unreference(fb);
+}
+
+static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+
+	/*
+	 * Tell the DRM core that vblank IRQs aren't going to happen for
+	 * a while.  This cleans up any pending vblank events for us.
+	 */
+	drm_vblank_off(dev, dcrtc->num);
+
+	/* Handle any pending flip event. */
+	spin_lock_irq(&dev->event_lock);
+	if (dcrtc->frame_work)
+		armada_drm_crtc_complete_frame_work(dcrtc);
+	spin_unlock_irq(&dev->event_lock);
+}
+
+void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+	int idx)
+{
+}
+
+void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+	int idx)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	if (dcrtc->dpms != dpms) {
+		dcrtc->dpms = dpms;
+		armada_drm_crtc_update(dcrtc);
+		if (dpms_blanked(dpms))
+			armada_drm_vblank_off(dcrtc);
+	}
+}
+
+/*
+ * Prepare for a mode set.  Turn off overlay to ensure that we don't end
+ * up with the overlay size being bigger than the active screen size.
+ * We rely upon X refreshing this state after the mode set has completed.
+ *
+ * The mode_config.mutex will be held for this call
+ */
+static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct drm_plane *plane;
+
+	/*
+	 * If we have an overlay plane associated with this CRTC, disable
+	 * it before the modeset to avoid its coordinates being outside
+	 * the new mode parameters.  DRM doesn't provide help with this.
+	 */
+	plane = dcrtc->plane;
+	if (plane) {
+		struct drm_framebuffer *fb = plane->fb;
+
+		plane->funcs->disable_plane(plane);
+		plane->fb = NULL;
+		plane->crtc = NULL;
+		drm_framebuffer_unreference(fb);
+	}
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
+		dcrtc->dpms = DRM_MODE_DPMS_ON;
+		armada_drm_crtc_update(dcrtc);
+	}
+}
+
+/* The mode_config.mutex will be held for this call */
+static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+	const struct drm_display_mode *mode, struct drm_display_mode *adj)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	int ret;
+
+	/* We can't do interlaced modes if we don't have the SPU_ADV_REG */
+	if (!priv->variant->has_spu_adv_reg &&
+	    adj->flags & DRM_MODE_FLAG_INTERLACE)
+		return false;
+
+	/* Check whether the display mode is possible */
+	ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+	if (ret)
+		return false;
+
+	return true;
+}
+
+void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+{
+	struct armada_vbl_event *e, *n;
+	void __iomem *base = dcrtc->base;
+
+	if (stat & DMA_FF_UNDERFLOW)
+		DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
+	if (stat & GRA_FF_UNDERFLOW)
+		DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
+
+	if (stat & VSYNC_IRQ)
+		drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+
+	spin_lock(&dcrtc->irq_lock);
+
+	list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
+		list_del_init(&e->node);
+		drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+		e->fn(dcrtc, e->data);
+	}
+
+	if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
+		int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
+		uint32_t val;
+
+		writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
+		writel_relaxed(dcrtc->v[i].spu_v_h_total,
+			       base + LCD_SPUT_V_H_TOTAL);
+
+		val = readl_relaxed(base + LCD_SPU_ADV_REG);
+		val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
+		val |= dcrtc->v[i].spu_adv_reg;
+		writel_relaxed(val, base + LCD_SPU_ADV_REG);
+	}
+
+	if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
+		writel_relaxed(dcrtc->cursor_hw_pos,
+			       base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+		writel_relaxed(dcrtc->cursor_hw_sz,
+			       base + LCD_SPU_HWC_HPXL_VLN);
+		armada_updatel(CFG_HWC_ENA,
+			       CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
+			       base + LCD_SPU_DMA_CTRL0);
+		dcrtc->cursor_update = false;
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+	}
+
+	spin_unlock(&dcrtc->irq_lock);
+
+	if (stat & GRA_FRAME_IRQ) {
+		struct drm_device *dev = dcrtc->crtc.dev;
+
+		spin_lock(&dev->event_lock);
+		if (dcrtc->frame_work)
+			armada_drm_crtc_complete_frame_work(dcrtc);
+		spin_unlock(&dev->event_lock);
+
+		wake_up(&dcrtc->frame_wait);
+	}
+}
+
+/* These are locked by dev->vbl_lock */
+void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+	if (dcrtc->irq_ena & mask) {
+		dcrtc->irq_ena &= ~mask;
+		writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+	}
+}
+
+void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+	if ((dcrtc->irq_ena & mask) != mask) {
+		dcrtc->irq_ena |= mask;
+		writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+		if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
+			writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+	}
+}
+
+static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
+{
+	struct drm_display_mode *adj = &dcrtc->crtc.mode;
+	uint32_t val = 0;
+
+	if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
+		val |= CFG_CSC_YUV_CCIR709;
+	if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
+		val |= CFG_CSC_RGB_STUDIO;
+
+	/*
+	 * In auto mode, set the colorimetry, based upon the HDMI spec.
+	 * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
+	 * ITU601.  It may be more appropriate to set this depending on
+	 * the source - but what if the graphic frame is YUV and the
+	 * video frame is RGB?
+	 */
+	if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
+	     !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
+	    (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
+		if (dcrtc->csc_yuv_mode == CSC_AUTO)
+			val |= CFG_CSC_YUV_CCIR709;
+	}
+
+	/*
+	 * We assume we're connected to a TV-like device, so the YUV->RGB
+	 * conversion should produce a limited range.  We should set this
+	 * depending on the connectors attached to this CRTC, and what
+	 * kind of device they report being connected.
+	 */
+	if (dcrtc->csc_rgb_mode == CSC_AUTO)
+		val |= CFG_CSC_RGB_STUDIO;
+
+	return val;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
+	struct drm_display_mode *mode, struct drm_display_mode *adj,
+	int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_regs regs[17];
+	uint32_t lm, rm, tm, bm, val, sclk;
+	unsigned long flags;
+	unsigned i;
+	bool interlaced;
+
+	drm_framebuffer_reference(crtc->fb);
+
+	interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
+
+	i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
+
+	rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
+	lm = adj->crtc_htotal - adj->crtc_hsync_end;
+	bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
+	tm = adj->crtc_vtotal - adj->crtc_vsync_end;
+
+	DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
+		adj->crtc_hdisplay,
+		adj->crtc_hsync_start,
+		adj->crtc_hsync_end,
+		adj->crtc_htotal, lm, rm);
+	DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
+		adj->crtc_vdisplay,
+		adj->crtc_vsync_start,
+		adj->crtc_vsync_end,
+		adj->crtc_vtotal, tm, bm);
+
+	/* Wait for pending flips to complete */
+	wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+	drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
+
+	crtc->mode = *adj;
+
+	val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
+	if (val != dcrtc->dumb_ctrl) {
+		dcrtc->dumb_ctrl = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
+	}
+
+	/* Now compute the divider for real */
+	priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+
+	/* Ensure graphic fifo is enabled */
+	armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+	armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
+
+	if (interlaced ^ dcrtc->interlaced) {
+		if (adj->flags & DRM_MODE_FLAG_INTERLACE)
+			drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+		else
+			drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+		dcrtc->interlaced = interlaced;
+	}
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+
+	/* Even interlaced/progressive frame */
+	dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
+				    adj->crtc_htotal;
+	dcrtc->v[1].spu_v_porch = tm << 16 | bm;
+	val = adj->crtc_hsync_start;
+	dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+		priv->variant->spu_adv_reg;
+
+	if (interlaced) {
+		/* Odd interlaced frame */
+		dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
+						(1 << 16);
+		dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
+		val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
+		dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+			priv->variant->spu_adv_reg;
+	} else {
+		dcrtc->v[0] = dcrtc->v[1];
+	}
+
+	val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+
+	armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
+	armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
+	armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
+	armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
+			   LCD_SPUT_V_H_TOTAL);
+
+	if (priv->variant->has_spu_adv_reg) {
+		armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
+				     ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
+				     ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
+	}
+
+	val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+	val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
+	val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
+
+	if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
+		val |= CFG_PALETTE_ENA;
+
+	if (interlaced)
+		val |= CFG_GRA_FTOGGLE;
+
+	armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
+			     CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+					 CFG_SWAPYU | CFG_YUV2RGB) |
+			     CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+			     LCD_SPU_DMA_CTRL0);
+
+	val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
+	armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
+
+	val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+	armada_reg_queue_end(regs, i);
+
+	armada_drm_crtc_update_regs(dcrtc, regs);
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+
+	armada_drm_crtc_update(dcrtc);
+
+	drm_vblank_post_modeset(crtc->dev, dcrtc->num);
+	armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+	return 0;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+	struct drm_framebuffer *old_fb)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_regs regs[4];
+	unsigned i;
+
+	i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
+				    dcrtc->interlaced);
+	armada_reg_queue_end(regs, i);
+
+	/* Wait for pending flips to complete */
+	wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+	/* Take a reference to the new fb as we're using it */
+	drm_framebuffer_reference(crtc->fb);
+
+	/* Update the base in the CRTC */
+	armada_drm_crtc_update_regs(dcrtc, regs);
+
+	/* Drop our previously held reference */
+	armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+	return 0;
+}
+
+static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
+
+	/* Power down most RAMs and FIFOs */
+	writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+		       CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+		       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+}
+
+static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
+	.dpms		= armada_drm_crtc_dpms,
+	.prepare	= armada_drm_crtc_prepare,
+	.commit		= armada_drm_crtc_commit,
+	.mode_fixup	= armada_drm_crtc_mode_fixup,
+	.mode_set	= armada_drm_crtc_mode_set,
+	.mode_set_base	= armada_drm_crtc_mode_set_base,
+	.load_lut	= armada_drm_crtc_load_lut,
+	.disable	= armada_drm_crtc_disable,
+};
+
+static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
+	unsigned stride, unsigned width, unsigned height)
+{
+	uint32_t addr;
+	unsigned y;
+
+	addr = SRAM_HWC32_RAM1;
+	for (y = 0; y < height; y++) {
+		uint32_t *p = &pix[y * stride];
+		unsigned x;
+
+		for (x = 0; x < width; x++, p++) {
+			uint32_t val = *p;
+
+			val = (val & 0xff00ff00) |
+			      (val & 0x000000ff) << 16 |
+			      (val & 0x00ff0000) >> 16;
+
+			writel_relaxed(val,
+				       base + LCD_SPU_SRAM_WRDAT);
+			writel_relaxed(addr | SRAM_WRITE,
+				       base + LCD_SPU_SRAM_CTRL);
+			addr += 1;
+			if ((addr & 0x00ff) == 0)
+				addr += 0xf00;
+			if ((addr & 0x30ff) == 0)
+				addr = SRAM_HWC32_RAM2;
+		}
+	}
+}
+
+static void armada_drm_crtc_cursor_tran(void __iomem *base)
+{
+	unsigned addr;
+
+	for (addr = 0; addr < 256; addr++) {
+		/* write the default value */
+		writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
+		writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
+			       base + LCD_SPU_SRAM_CTRL);
+	}
+}
+
+static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
+{
+	uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
+	uint32_t yoff, yscr, h = dcrtc->cursor_h;
+	uint32_t para1;
+
+	/*
+	 * Calculate the visible width and height of the cursor,
+	 * screen position, and the position in the cursor bitmap.
+	 */
+	if (dcrtc->cursor_x < 0) {
+		xoff = -dcrtc->cursor_x;
+		xscr = 0;
+		w -= min(xoff, w);
+	} else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
+		xoff = 0;
+		xscr = dcrtc->cursor_x;
+		w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
+	} else {
+		xoff = 0;
+		xscr = dcrtc->cursor_x;
+	}
+
+	if (dcrtc->cursor_y < 0) {
+		yoff = -dcrtc->cursor_y;
+		yscr = 0;
+		h -= min(yoff, h);
+	} else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
+		yoff = 0;
+		yscr = dcrtc->cursor_y;
+		h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
+	} else {
+		yoff = 0;
+		yscr = dcrtc->cursor_y;
+	}
+
+	/* On interlaced modes, the vertical cursor size must be halved */
+	s = dcrtc->cursor_w;
+	if (dcrtc->interlaced) {
+		s *= 2;
+		yscr /= 2;
+		h /= 2;
+	}
+
+	if (!dcrtc->cursor_obj || !h || !w) {
+		spin_lock_irq(&dcrtc->irq_lock);
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+		dcrtc->cursor_update = false;
+		armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+		spin_unlock_irq(&dcrtc->irq_lock);
+		return 0;
+	}
+
+	para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
+	armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
+		       dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+	/*
+	 * Initialize the transparency if the SRAM was powered down.
+	 * We must also reload the cursor data as well.
+	 */
+	if (!(para1 & CFG_CSB_256x32)) {
+		armada_drm_crtc_cursor_tran(dcrtc->base);
+		reload = true;
+	}
+
+	if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
+		spin_lock_irq(&dcrtc->irq_lock);
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+		dcrtc->cursor_update = false;
+		armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+		spin_unlock_irq(&dcrtc->irq_lock);
+		reload = true;
+	}
+	if (reload) {
+		struct armada_gem_object *obj = dcrtc->cursor_obj;
+		uint32_t *pix;
+		/* Set the top-left corner of the cursor image */
+		pix = obj->addr;
+		pix += yoff * s + xoff;
+		armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
+	}
+
+	/* Reload the cursor position, size and enable in the IRQ handler */
+	spin_lock_irq(&dcrtc->irq_lock);
+	dcrtc->cursor_hw_pos = yscr << 16 | xscr;
+	dcrtc->cursor_hw_sz = h << 16 | w;
+	dcrtc->cursor_update = true;
+	armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+	spin_unlock_irq(&dcrtc->irq_lock);
+
+	return 0;
+}
+
+static void cursor_update(void *data)
+{
+	armada_drm_crtc_cursor_update(data, true);
+}
+
+static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
+	struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
+{
+	struct drm_device *dev = crtc->dev;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_gem_object *obj = NULL;
+	int ret;
+
+	/* If no cursor support, replicate drm's return value */
+	if (!priv->variant->has_spu_adv_reg)
+		return -ENXIO;
+
+	if (handle && w > 0 && h > 0) {
+		/* maximum size is 64x32 or 32x64 */
+		if (w > 64 || h > 64 || (w > 32 && h > 32))
+			return -ENOMEM;
+
+		obj = armada_gem_object_lookup(dev, file, handle);
+		if (!obj)
+			return -ENOENT;
+
+		/* Must be a kernel-mapped object */
+		if (!obj->addr) {
+			drm_gem_object_unreference_unlocked(&obj->obj);
+			return -EINVAL;
+		}
+
+		if (obj->obj.size < w * h * 4) {
+			DRM_ERROR("buffer is too small\n");
+			drm_gem_object_unreference_unlocked(&obj->obj);
+			return -ENOMEM;
+		}
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	if (dcrtc->cursor_obj) {
+		dcrtc->cursor_obj->update = NULL;
+		dcrtc->cursor_obj->update_data = NULL;
+		drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+	}
+	dcrtc->cursor_obj = obj;
+	dcrtc->cursor_w = w;
+	dcrtc->cursor_h = h;
+	ret = armada_drm_crtc_cursor_update(dcrtc, true);
+	if (obj) {
+		obj->update_data = dcrtc;
+		obj->update = cursor_update;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+	int ret;
+
+	/* If no cursor support, replicate drm's return value */
+	if (!priv->variant->has_spu_adv_reg)
+		return -EFAULT;
+
+	mutex_lock(&dev->struct_mutex);
+	dcrtc->cursor_x = x;
+	dcrtc->cursor_y = y;
+	ret = armada_drm_crtc_cursor_update(dcrtc, false);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+
+	if (dcrtc->cursor_obj)
+		drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+
+	priv->dcrtc[dcrtc->num] = NULL;
+	drm_crtc_cleanup(&dcrtc->crtc);
+
+	if (!IS_ERR(dcrtc->clk))
+		clk_disable_unprepare(dcrtc->clk);
+
+	kfree(dcrtc);
+}
+
+/*
+ * The mode_config lock is held here, to prevent races between this
+ * and a mode_set.
+ */
+static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
+	struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_frame_work *work;
+	struct drm_device *dev = crtc->dev;
+	unsigned long flags;
+	unsigned i;
+	int ret;
+
+	/* We don't support changing the pixel format */
+	if (fb->pixel_format != crtc->fb->pixel_format)
+		return -EINVAL;
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (!work)
+		return -ENOMEM;
+
+	work->event = event;
+	work->old_fb = dcrtc->crtc.fb;
+
+	i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
+				    dcrtc->interlaced);
+	armada_reg_queue_end(work->regs, i);
+
+	/*
+	 * Hold the old framebuffer for the work - DRM appears to drop our
+	 * reference to the old framebuffer in drm_mode_page_flip_ioctl().
+	 */
+	drm_framebuffer_reference(work->old_fb);
+
+	ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+	if (ret) {
+		/*
+		 * Undo our reference above; DRM does not drop the reference
+		 * to this object on error, so that's okay.
+		 */
+		drm_framebuffer_unreference(work->old_fb);
+		kfree(work);
+		return ret;
+	}
+
+	/*
+	 * Don't take a reference on the new framebuffer;
+	 * drm_mode_page_flip_ioctl() has already grabbed a reference and
+	 * will _not_ drop that reference on successful return from this
+	 * function.  Simply mark this new framebuffer as the current one.
+	 */
+	dcrtc->crtc.fb = fb;
+
+	/*
+	 * Finally, if the display is blanked, we won't receive an
+	 * interrupt, so complete it now.
+	 */
+	if (dpms_blanked(dcrtc->dpms)) {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		if (dcrtc->frame_work)
+			armada_drm_crtc_complete_frame_work(dcrtc);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	return 0;
+}
+
+static int
+armada_drm_crtc_set_property(struct drm_crtc *crtc,
+	struct drm_property *property, uint64_t val)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	bool update_csc = false;
+
+	if (property == priv->csc_yuv_prop) {
+		dcrtc->csc_yuv_mode = val;
+		update_csc = true;
+	} else if (property == priv->csc_rgb_prop) {
+		dcrtc->csc_rgb_mode = val;
+		update_csc = true;
+	}
+
+	if (update_csc) {
+		uint32_t val;
+
+		val = dcrtc->spu_iopad_ctrl |
+		      armada_drm_crtc_calculate_csc(dcrtc);
+		writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+	}
+
+	return 0;
+}
+
+static struct drm_crtc_funcs armada_crtc_funcs = {
+	.cursor_set	= armada_drm_crtc_cursor_set,
+	.cursor_move	= armada_drm_crtc_cursor_move,
+	.destroy	= armada_drm_crtc_destroy,
+	.set_config	= drm_crtc_helper_set_config,
+	.page_flip	= armada_drm_crtc_page_flip,
+	.set_property	= armada_drm_crtc_set_property,
+};
+
+static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+	{ CSC_AUTO,        "Auto" },
+	{ CSC_YUV_CCIR601, "CCIR601" },
+	{ CSC_YUV_CCIR709, "CCIR709" },
+};
+
+static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+	{ CSC_AUTO,         "Auto" },
+	{ CSC_RGB_COMPUTER, "Computer system" },
+	{ CSC_RGB_STUDIO,   "Studio" },
+};
+
+static int armada_drm_crtc_create_properties(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	if (priv->csc_yuv_prop)
+		return 0;
+
+	priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
+				"CSC_YUV", armada_drm_csc_yuv_enum_list,
+				ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
+	priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
+				"CSC_RGB", armada_drm_csc_rgb_enum_list,
+				ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
+
+	if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
+		return -ENOMEM;
+
+	return 0;
+}
+
+int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
+	struct resource *res)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc;
+	void __iomem *base;
+	int ret;
+
+	ret = armada_drm_crtc_create_properties(dev);
+	if (ret)
+		return ret;
+
+	base = devm_request_and_ioremap(dev->dev, res);
+	if (!base) {
+		DRM_ERROR("failed to ioremap register\n");
+		return -ENOMEM;
+	}
+
+	dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
+	if (!dcrtc) {
+		DRM_ERROR("failed to allocate Armada crtc\n");
+		return -ENOMEM;
+	}
+
+	dcrtc->base = base;
+	dcrtc->num = num;
+	dcrtc->clk = ERR_PTR(-EINVAL);
+	dcrtc->csc_yuv_mode = CSC_AUTO;
+	dcrtc->csc_rgb_mode = CSC_AUTO;
+	dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
+	dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
+	spin_lock_init(&dcrtc->irq_lock);
+	dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
+	INIT_LIST_HEAD(&dcrtc->vbl_list);
+	init_waitqueue_head(&dcrtc->frame_wait);
+
+	/* Initialize some registers which we don't otherwise set */
+	writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
+	writel_relaxed(dcrtc->spu_iopad_ctrl,
+		       dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
+	writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+		       CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+		       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+	writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+
+	if (priv->variant->crtc_init) {
+		ret = priv->variant->crtc_init(dcrtc);
+		if (ret) {
+			kfree(dcrtc);
+			return ret;
+		}
+	}
+
+	/* Ensure AXI pipeline is enabled */
+	armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
+
+	priv->dcrtc[dcrtc->num] = dcrtc;
+
+	drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+	drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
+
+	drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
+				   dcrtc->csc_yuv_mode);
+	drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
+				   dcrtc->csc_rgb_mode);
+
+	return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
new file mode 100644
index 0000000..9c10a07
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CRTC_H
+#define ARMADA_CRTC_H
+
+struct armada_gem_object;
+
+struct armada_regs {
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t val;
+};
+
+#define armada_reg_queue_mod(_r, _i, _v, _m, _o)	\
+	do {					\
+		struct armada_regs *__reg = _r;	\
+		__reg[_i].offset = _o;		\
+		__reg[_i].mask = ~(_m);		\
+		__reg[_i].val = _v;		\
+		_i++;				\
+	} while (0)
+
+#define armada_reg_queue_set(_r, _i, _v, _o)	\
+	armada_reg_queue_mod(_r, _i, _v, ~0, _o)
+
+#define armada_reg_queue_end(_r, _i)		\
+	armada_reg_queue_mod(_r, _i, 0, 0, ~0)
+
+struct armada_frame_work;
+
+struct armada_crtc {
+	struct drm_crtc		crtc;
+	unsigned		num;
+	void __iomem		*base;
+	struct clk		*clk;
+	struct {
+		uint32_t	spu_v_h_total;
+		uint32_t	spu_v_porch;
+		uint32_t	spu_adv_reg;
+	} v[2];
+	bool			interlaced;
+	bool			cursor_update;
+	uint8_t			csc_yuv_mode;
+	uint8_t			csc_rgb_mode;
+
+	struct drm_plane	*plane;
+
+	struct armada_gem_object	*cursor_obj;
+	int			cursor_x;
+	int			cursor_y;
+	uint32_t		cursor_hw_pos;
+	uint32_t		cursor_hw_sz;
+	uint32_t		cursor_w;
+	uint32_t		cursor_h;
+
+	int			dpms;
+	uint32_t		cfg_dumb_ctrl;
+	uint32_t		dumb_ctrl;
+	uint32_t		spu_iopad_ctrl;
+
+	wait_queue_head_t	frame_wait;
+	struct armada_frame_work *frame_work;
+
+	spinlock_t		irq_lock;
+	uint32_t		irq_ena;
+	struct list_head	vbl_list;
+};
+#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
+
+int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
+void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
+void armada_drm_crtc_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
new file mode 100644
index 0000000..471e456
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+
+static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct armada_private *priv = dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_mm_dump_table(m, &priv->linear);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int armada_debugfs_reg_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct armada_private *priv = dev->dev_private;
+	int n, i;
+
+	if (priv) {
+		for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+			struct armada_crtc *dcrtc = priv->dcrtc[n];
+			if (!dcrtc)
+				continue;
+
+			for (i = 0x84; i <= 0x1c4; i += 4) {
+				uint32_t v = readl_relaxed(dcrtc->base + i);
+				seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, armada_debugfs_reg_show, inode->i_private);
+}
+
+static const struct file_operations fops_reg_r = {
+	.owner = THIS_MODULE,
+	.open = armada_debugfs_reg_r_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int armada_debugfs_write(struct file *file, const char __user *ptr,
+	size_t len, loff_t *off)
+{
+	struct drm_device *dev = file->private_data;
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+	char buf[32], *p;
+	uint32_t reg, val;
+	int ret;
+
+	if (*off != 0)
+		return 0;
+
+	if (len > sizeof(buf) - 1)
+		len = sizeof(buf) - 1;
+
+	ret = strncpy_from_user(buf, ptr, len);
+	if (ret < 0)
+		return ret;
+	buf[len] = '\0';
+
+	reg = simple_strtoul(buf, &p, 16);
+	if (!isspace(*p))
+		return -EINVAL;
+	val = simple_strtoul(p + 1, NULL, 16);
+
+	if (reg >= 0x84 && reg <= 0x1c4)
+		writel(val, dcrtc->base + reg);
+
+	return len;
+}
+
+static const struct file_operations fops_reg_w = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = armada_debugfs_write,
+	.llseek = noop_llseek,
+};
+
+static struct drm_info_list armada_debugfs_list[] = {
+	{ "gem_linear", armada_debugfs_gem_linear_show, 0 },
+};
+#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
+
+static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
+	const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
+static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
+	const char *name, umode_t mode, const struct file_operations *fops)
+{
+	struct dentry *de;
+
+	de = debugfs_create_file(name, mode, root, minor->dev, fops);
+
+	return drm_add_fake_info_node(minor, de, fops);
+}
+
+int armada_drm_debugfs_init(struct drm_minor *minor)
+{
+	int ret;
+
+	ret = drm_debugfs_create_files(armada_debugfs_list,
+				       ARMADA_DEBUGFS_ENTRIES,
+				       minor->debugfs_root, minor);
+	if (ret)
+		return ret;
+
+	ret = armada_debugfs_create(minor->debugfs_root, minor,
+				   "reg", S_IFREG | S_IRUSR, &fops_reg_r);
+	if (ret)
+		goto err_1;
+
+	ret = armada_debugfs_create(minor->debugfs_root, minor,
+				"reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
+	if (ret)
+		goto err_2;
+	return ret;
+
+ err_2:
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ err_1:
+	drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+				 minor);
+	return ret;
+}
+
+void armada_drm_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+	drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+				 minor);
+}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
new file mode 100644
index 0000000..eef09ec
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_DRM_H
+#define ARMADA_DRM_H
+
+#include <linux/kfifo.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <drm/drmP.h>
+
+struct armada_crtc;
+struct armada_gem_object;
+struct clk;
+struct drm_fb_helper;
+
+static inline void
+armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
+{
+	uint32_t ov, v;
+
+	ov = v = readl_relaxed(ptr);
+	v = (v & ~mask) | val;
+	if (ov != v)
+		writel_relaxed(v, ptr);
+}
+
+static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
+{
+	uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
+
+	/* 88AP510 spec recommends pitch be a multiple of 128 */
+	return ALIGN(pitch, 128);
+}
+
+struct armada_vbl_event {
+	struct list_head	node;
+	void			*data;
+	void			(*fn)(struct armada_crtc *, void *);
+};
+void armada_drm_vbl_event_add(struct armada_crtc *,
+	struct armada_vbl_event *);
+void armada_drm_vbl_event_remove(struct armada_crtc *,
+	struct armada_vbl_event *);
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
+	struct armada_vbl_event *);
+#define armada_drm_vbl_event_init(_e, _f, _d) do {	\
+	struct armada_vbl_event *__e = _e;		\
+	INIT_LIST_HEAD(&__e->node);			\
+	__e->data = _d;					\
+	__e->fn = _f;					\
+} while (0)
+
+
+struct armada_private;
+
+struct armada_variant {
+	bool	has_spu_adv_reg;
+	uint32_t spu_adv_reg;
+	int (*init)(struct armada_private *, struct device *);
+	int (*crtc_init)(struct armada_crtc *);
+	int (*crtc_compute_clock)(struct armada_crtc *,
+				  const struct drm_display_mode *,
+				  uint32_t *);
+};
+
+/* Variant ops */
+extern const struct armada_variant armada510_ops;
+
+struct armada_private {
+	const struct armada_variant *variant;
+	struct work_struct	fb_unref_work;
+	DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
+	struct drm_fb_helper	*fbdev;
+	struct armada_crtc	*dcrtc[2];
+	struct drm_mm		linear;
+	struct clk		*extclk[2];
+	struct drm_property	*csc_yuv_prop;
+	struct drm_property	*csc_rgb_prop;
+	struct drm_property	*colorkey_prop;
+	struct drm_property	*colorkey_min_prop;
+	struct drm_property	*colorkey_max_prop;
+	struct drm_property	*colorkey_val_prop;
+	struct drm_property	*colorkey_alpha_prop;
+	struct drm_property	*colorkey_mode_prop;
+	struct drm_property	*brightness_prop;
+	struct drm_property	*contrast_prop;
+	struct drm_property	*saturation_prop;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		*de;
+#endif
+};
+
+void __armada_drm_queue_unref_work(struct drm_device *,
+	struct drm_framebuffer *);
+void armada_drm_queue_unref_work(struct drm_device *,
+	struct drm_framebuffer *);
+
+extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
+
+int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_fini(struct drm_device *);
+
+int armada_overlay_plane_create(struct drm_device *, unsigned long);
+
+int armada_drm_debugfs_init(struct drm_minor *);
+void armada_drm_debugfs_cleanup(struct drm_minor *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
new file mode 100644
index 0000000..4f2b283
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+#include <drm/i2c/tda998x.h>
+#include "armada_slave.h"
+
+static struct tda998x_encoder_params params = {
+	/* With 0x24, there is no translation between vp_out and int_vp
+	FB	LCD out	Pins	VIP	Int Vp
+	R:23:16	R:7:0	VPC7:0	7:0	7:0[R]
+	G:15:8	G:15:8	VPB7:0	23:16	23:16[G]
+	B:7:0	B:23:16	VPA7:0	15:8	15:8[B]
+	*/
+	.swap_a = 2,
+	.swap_b = 3,
+	.swap_c = 4,
+	.swap_d = 5,
+	.swap_e = 0,
+	.swap_f = 1,
+	.audio_cfg = BIT(2),
+	.audio_frame[1] = 1,
+	.audio_format = AFMT_SPDIF,
+	.audio_sample_rate = 44100,
+};
+
+static const struct armada_drm_slave_config tda19988_config = {
+	.i2c_adapter_id = 0,
+	.crtcs = 1 << 0, /* Only LCD0 at the moment */
+	.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
+	.interlace_allowed = true,
+	.info = {
+		.type = "tda998x",
+		.addr = 0x70,
+		.platform_data = &params,
+	},
+};
+#endif
+
+static void armada_drm_unref_work(struct work_struct *work)
+{
+	struct armada_private *priv =
+		container_of(work, struct armada_private, fb_unref_work);
+	struct drm_framebuffer *fb;
+
+	while (kfifo_get(&priv->fb_unref, &fb))
+		drm_framebuffer_unreference(fb);
+}
+
+/* Must be called with dev->event_lock held */
+void __armada_drm_queue_unref_work(struct drm_device *dev,
+	struct drm_framebuffer *fb)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	/*
+	 * Yes, we really must jump through these hoops just to store a
+	 * _pointer_ to something into the kfifo.  This is utterly insane
+	 * and idiotic, because it kfifo requires the _data_ pointed to by
+	 * the pointer const, not the pointer itself.  Not only that, but
+	 * you have to pass a pointer _to_ the pointer you want stored.
+	 */
+	const struct drm_framebuffer *silly_api_alert = fb;
+	WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+	schedule_work(&priv->fb_unref_work);
+}
+
+void armada_drm_queue_unref_work(struct drm_device *dev,
+	struct drm_framebuffer *fb)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	__armada_drm_queue_unref_work(dev, fb);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int armada_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	const struct platform_device_id *id;
+	struct armada_private *priv;
+	struct resource *res[ARRAY_SIZE(priv->dcrtc)];
+	struct resource *mem = NULL;
+	int ret, n, i;
+
+	memset(res, 0, sizeof(res));
+
+	for (n = i = 0; ; n++) {
+		struct resource *r = platform_get_resource(dev->platformdev,
+							   IORESOURCE_MEM, n);
+		if (!r)
+			break;
+
+		/* Resources above 64K are graphics memory */
+		if (resource_size(r) > SZ_64K)
+			mem = r;
+		else if (i < ARRAY_SIZE(priv->dcrtc))
+			res[i++] = r;
+		else
+			return -EINVAL;
+	}
+
+	if (!res[0] || !mem)
+		return -ENXIO;
+
+	if (!devm_request_mem_region(dev->dev, mem->start,
+			resource_size(mem), "armada-drm"))
+		return -EBUSY;
+
+	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		DRM_ERROR("failed to allocate private\n");
+		return -ENOMEM;
+	}
+
+	dev->dev_private = priv;
+
+	/* Get the implementation specific driver data. */
+	id = platform_get_device_id(dev->platformdev);
+	if (!id)
+		return -ENXIO;
+
+	priv->variant = (struct armada_variant *)id->driver_data;
+
+	ret = priv->variant->init(priv, dev->dev);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+	INIT_KFIFO(priv->fb_unref);
+
+	/* Mode setting support */
+	drm_mode_config_init(dev);
+	dev->mode_config.min_width = 320;
+	dev->mode_config.min_height = 200;
+
+	/*
+	 * With vscale enabled, the maximum width is 1920 due to the
+	 * 1920 by 3 lines RAM
+	 */
+	dev->mode_config.max_width = 1920;
+	dev->mode_config.max_height = 2048;
+
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.funcs = &armada_drm_mode_config_funcs;
+	drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+
+	/* Create all LCD controllers */
+	for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+		if (!res[n])
+			break;
+
+		ret = armada_drm_crtc_create(dev, n, res[n]);
+		if (ret)
+			goto err_kms;
+	}
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+	ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+	if (ret)
+		goto err_kms;
+#endif
+
+	ret = drm_vblank_init(dev, n);
+	if (ret)
+		goto err_kms;
+
+	ret = drm_irq_install(dev);
+	if (ret)
+		goto err_kms;
+
+	dev->vblank_disable_allowed = 1;
+
+	ret = armada_fbdev_init(dev);
+	if (ret)
+		goto err_irq;
+
+	drm_kms_helper_poll_init(dev);
+
+	return 0;
+
+ err_irq:
+	drm_irq_uninstall(dev);
+ err_kms:
+	drm_mode_config_cleanup(dev);
+	drm_mm_takedown(&priv->linear);
+	flush_work(&priv->fb_unref_work);
+
+	return ret;
+}
+
+static int armada_drm_unload(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	drm_kms_helper_poll_fini(dev);
+	armada_fbdev_fini(dev);
+	drm_irq_uninstall(dev);
+	drm_mode_config_cleanup(dev);
+	drm_mm_takedown(&priv->linear);
+	flush_work(&priv->fb_unref_work);
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+	if (list_empty(&evt->node)) {
+		list_add_tail(&evt->node, &dcrtc->vbl_list);
+
+		drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+	}
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	if (!list_empty(&evt->node)) {
+		list_del_init(&evt->node);
+		drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+	}
+}
+
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+	armada_drm_vbl_event_remove(dcrtc, evt);
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+/* These are called under the vbl_lock. */
+static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct armada_private *priv = dev->dev_private;
+	armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+	return 0;
+}
+
+static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct armada_private *priv = dev->dev_private;
+	armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+}
+
+static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+	uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+	irqreturn_t handled = IRQ_NONE;
+
+	/*
+	 * This is rediculous - rather than writing bits to clear, we
+	 * have to set the actual status register value.  This is racy.
+	 */
+	writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+	/* Mask out those interrupts we haven't enabled */
+	v = stat & dcrtc->irq_ena;
+
+	if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+		armada_drm_crtc_irq(dcrtc, stat);
+		handled = IRQ_HANDLED;
+	}
+
+	return handled;
+}
+
+static int armada_drm_irq_postinstall(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+	spin_lock_irq(&dev->vbl_lock);
+	writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+	writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+	spin_unlock_irq(&dev->vbl_lock);
+
+	return 0;
+}
+
+static void armada_drm_irq_uninstall(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+	writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+}
+
+static struct drm_ioctl_desc armada_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
+		DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
+		DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
+		DRM_UNLOCKED),
+};
+
+static const struct file_operations armada_drm_fops = {
+	.owner			= THIS_MODULE,
+	.llseek			= no_llseek,
+	.read			= drm_read,
+	.poll			= drm_poll,
+	.unlocked_ioctl		= drm_ioctl,
+	.mmap			= drm_gem_mmap,
+	.open			= drm_open,
+	.release		= drm_release,
+};
+
+static struct drm_driver armada_drm_driver = {
+	.load			= armada_drm_load,
+	.open			= NULL,
+	.preclose		= NULL,
+	.postclose		= NULL,
+	.lastclose		= NULL,
+	.unload			= armada_drm_unload,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= armada_drm_enable_vblank,
+	.disable_vblank		= armada_drm_disable_vblank,
+	.irq_handler		= armada_drm_irq_handler,
+	.irq_postinstall	= armada_drm_irq_postinstall,
+	.irq_uninstall		= armada_drm_irq_uninstall,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init		= armada_drm_debugfs_init,
+	.debugfs_cleanup	= armada_drm_debugfs_cleanup,
+#endif
+	.gem_free_object	= armada_gem_free_object,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_export	= armada_gem_prime_export,
+	.gem_prime_import	= armada_gem_prime_import,
+	.dumb_create		= armada_gem_dumb_create,
+	.dumb_map_offset	= armada_gem_dumb_map_offset,
+	.dumb_destroy		= armada_gem_dumb_destroy,
+	.gem_vm_ops		= &armada_gem_vm_ops,
+	.major			= 1,
+	.minor			= 0,
+	.name			= "armada-drm",
+	.desc			= "Armada SoC DRM",
+	.date			= "20120730",
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET |
+				  DRIVER_HAVE_IRQ | DRIVER_PRIME,
+	.ioctls			= armada_ioctls,
+	.fops			= &armada_drm_fops,
+};
+
+static int armada_drm_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&armada_drm_driver, pdev);
+}
+
+static int armada_drm_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&armada_drm_driver, pdev);
+	return 0;
+}
+
+static const struct platform_device_id armada_drm_platform_ids[] = {
+	{
+		.name		= "armada-drm",
+		.driver_data	= (unsigned long)&armada510_ops,
+	}, {
+		.name		= "armada-510-drm",
+		.driver_data	= (unsigned long)&armada510_ops,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
+
+static struct platform_driver armada_drm_platform_driver = {
+	.probe	= armada_drm_probe,
+	.remove	= armada_drm_remove,
+	.driver	= {
+		.name	= "armada-drm",
+		.owner	= THIS_MODULE,
+	},
+	.id_table = armada_drm_platform_ids,
+};
+
+static int __init armada_drm_init(void)
+{
+	armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
+	return platform_driver_register(&armada_drm_platform_driver);
+}
+module_init(armada_drm_init);
+
+static void __exit armada_drm_exit(void)
+{
+	platform_driver_unregister(&armada_drm_platform_driver);
+}
+module_exit(armada_drm_exit);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Armada DRM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
new file mode 100644
index 0000000..1c90969
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+static void armada_fb_destroy(struct drm_framebuffer *fb)
+{
+	struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+
+	drm_framebuffer_cleanup(&dfb->fb);
+	drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+	kfree(dfb);
+}
+
+static int armada_fb_create_handle(struct drm_framebuffer *fb,
+	struct drm_file *dfile, unsigned int *handle)
+{
+	struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+	return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs armada_fb_funcs = {
+	.destroy	= armada_fb_destroy,
+	.create_handle	= armada_fb_create_handle,
+};
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
+	struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+{
+	struct armada_framebuffer *dfb;
+	uint8_t format, config;
+	int ret;
+
+	switch (mode->pixel_format) {
+#define FMT(drm, fmt, mod)		\
+	case DRM_FORMAT_##drm:		\
+		format = CFG_##fmt;	\
+		config = mod;		\
+		break
+	FMT(RGB565,	565,		CFG_SWAPRB);
+	FMT(BGR565,	565,		0);
+	FMT(ARGB1555,	1555,		CFG_SWAPRB);
+	FMT(ABGR1555,	1555,		0);
+	FMT(RGB888,	888PACK,	CFG_SWAPRB);
+	FMT(BGR888,	888PACK,	0);
+	FMT(XRGB8888,	X888,		CFG_SWAPRB);
+	FMT(XBGR8888,	X888,		0);
+	FMT(ARGB8888,	8888,		CFG_SWAPRB);
+	FMT(ABGR8888,	8888,		0);
+	FMT(YUYV,	422PACK,	CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
+	FMT(UYVY,	422PACK,	CFG_YUV2RGB);
+	FMT(VYUY,	422PACK,	CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(YVYU,	422PACK,	CFG_YUV2RGB | CFG_SWAPYU);
+	FMT(YUV422,	422,		CFG_YUV2RGB);
+	FMT(YVU422,	422,		CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(YUV420,	420,		CFG_YUV2RGB);
+	FMT(YVU420,	420,		CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(C8,		PSEUDO8,	0);
+#undef FMT
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
+	if (!dfb) {
+		DRM_ERROR("failed to allocate Armada fb object\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dfb->fmt = format;
+	dfb->mod = config;
+	dfb->obj = obj;
+
+	drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
+
+	ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
+	if (ret) {
+		kfree(dfb);
+		return ERR_PTR(ret);
+	}
+
+	/*
+	 * Take a reference on our object as we're successful - the
+	 * caller already holds a reference, which keeps us safe for
+	 * the above call, but the caller will drop their reference
+	 * to it.  Hence we need to take our own reference.
+	 */
+	drm_gem_object_reference(&obj->obj);
+
+	return dfb;
+}
+
+static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+	struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
+{
+	struct armada_gem_object *obj;
+	struct armada_framebuffer *dfb;
+	int ret;
+
+	DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
+		mode->width, mode->height, mode->pixel_format,
+		mode->flags, mode->pitches[0], mode->pitches[1],
+		mode->pitches[2]);
+
+	/* We can only handle a single plane at the moment */
+	if (drm_format_num_planes(mode->pixel_format) > 1 &&
+	    (mode->handles[0] != mode->handles[1] ||
+	     mode->handles[0] != mode->handles[2])) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+	if (!obj) {
+		ret = -ENOENT;
+		goto err;
+	}
+
+	if (obj->obj.import_attach && !obj->sgt) {
+		ret = armada_gem_map_import(obj);
+		if (ret)
+			goto err_unref;
+	}
+
+	/* Framebuffer objects must have a valid device address for scanout */
+	if (obj->dev_addr == DMA_ERROR_CODE) {
+		ret = -EINVAL;
+		goto err_unref;
+	}
+
+	dfb = armada_framebuffer_create(dev, mode, obj);
+	if (IS_ERR(dfb)) {
+		ret = PTR_ERR(dfb);
+		goto err;
+	}
+
+	drm_gem_object_unreference_unlocked(&obj->obj);
+
+	return &dfb->fb;
+
+ err_unref:
+	drm_gem_object_unreference_unlocked(&obj->obj);
+ err:
+	DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+	return ERR_PTR(ret);
+}
+
+static void armada_output_poll_changed(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh = priv->fbdev;
+
+	if (fbh)
+		drm_fb_helper_hotplug_event(fbh);
+}
+
+const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+	.fb_create		= armada_fb_create,
+	.output_poll_changed	= armada_output_poll_changed,
+};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
new file mode 100644
index 0000000..ce3f12e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_FB_H
+#define ARMADA_FB_H
+
+struct armada_framebuffer {
+	struct drm_framebuffer	fb;
+	struct armada_gem_object *obj;
+	uint8_t			fmt;
+	uint8_t			mod;
+};
+#define drm_fb_to_armada_fb(dfb) \
+	container_of(dfb, struct armada_framebuffer, fb)
+#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+	struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
new file mode 100644
index 0000000..dd5ea77
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Written from the i915 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+
+static /*const*/ struct fb_ops armada_fb_ops = {
+	.owner		= THIS_MODULE,
+	.fb_check_var	= drm_fb_helper_check_var,
+	.fb_set_par	= drm_fb_helper_set_par,
+	.fb_fillrect	= cfb_fillrect,
+	.fb_copyarea	= cfb_copyarea,
+	.fb_imageblit	= cfb_imageblit,
+	.fb_pan_display	= drm_fb_helper_pan_display,
+	.fb_blank	= drm_fb_helper_blank,
+	.fb_setcmap	= drm_fb_helper_setcmap,
+	.fb_debug_enter	= drm_fb_helper_debug_enter,
+	.fb_debug_leave	= drm_fb_helper_debug_leave,
+};
+
+static int armada_fb_create(struct drm_fb_helper *fbh,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = fbh->dev;
+	struct drm_mode_fb_cmd2 mode;
+	struct armada_framebuffer *dfb;
+	struct armada_gem_object *obj;
+	struct fb_info *info;
+	int size, ret;
+	void *ptr;
+
+	memset(&mode, 0, sizeof(mode));
+	mode.width = sizes->surface_width;
+	mode.height = sizes->surface_height;
+	mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
+	mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+					sizes->surface_depth);
+
+	size = mode.pitches[0] * mode.height;
+	obj = armada_gem_alloc_private_object(dev, size);
+	if (!obj) {
+		DRM_ERROR("failed to allocate fb memory\n");
+		return -ENOMEM;
+	}
+
+	ret = armada_gem_linear_back(dev, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(&obj->obj);
+		return ret;
+	}
+
+	ptr = armada_gem_map_object(dev, obj);
+	if (!ptr) {
+		drm_gem_object_unreference_unlocked(&obj->obj);
+		return -ENOMEM;
+	}
+
+	dfb = armada_framebuffer_create(dev, &mode, obj);
+
+	/*
+	 * A reference is now held by the framebuffer object if
+	 * successful, otherwise this drops the ref for the error path.
+	 */
+	drm_gem_object_unreference_unlocked(&obj->obj);
+
+	if (IS_ERR(dfb))
+		return PTR_ERR(dfb);
+
+	info = framebuffer_alloc(0, dev->dev);
+	if (!info) {
+		ret = -ENOMEM;
+		goto err_fballoc;
+	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto err_fbcmap;
+	}
+
+	strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
+	info->par = fbh;
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &armada_fb_ops;
+	info->fix.smem_start = obj->phys_addr;
+	info->fix.smem_len = obj->obj.size;
+	info->screen_size = obj->obj.size;
+	info->screen_base = ptr;
+	fbh->fb = &dfb->fb;
+	fbh->fbdev = info;
+	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
+	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+
+	DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
+		dfb->fb.width, dfb->fb.height,
+		dfb->fb.bits_per_pixel, obj->phys_addr);
+
+	return 0;
+
+ err_fbcmap:
+	framebuffer_release(info);
+ err_fballoc:
+	dfb->fb.funcs->destroy(&dfb->fb);
+	return ret;
+}
+
+static int armada_fb_probe(struct drm_fb_helper *fbh,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	int ret = 0;
+
+	if (!fbh->fb) {
+		ret = armada_fb_create(fbh, sizes);
+		if (ret == 0)
+			ret = 1;
+	}
+	return ret;
+}
+
+static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+	.gamma_set	= armada_drm_crtc_gamma_set,
+	.gamma_get	= armada_drm_crtc_gamma_get,
+	.fb_probe	= armada_fb_probe,
+};
+
+int armada_fbdev_init(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh;
+	int ret;
+
+	fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
+	if (!fbh)
+		return -ENOMEM;
+
+	priv->fbdev = fbh;
+
+	fbh->funcs = &armada_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(dev, fbh, 1, 1);
+	if (ret) {
+		DRM_ERROR("failed to initialize drm fb helper\n");
+		goto err_fb_helper;
+	}
+
+	ret = drm_fb_helper_single_add_all_connectors(fbh);
+	if (ret) {
+		DRM_ERROR("failed to add fb connectors\n");
+		goto err_fb_setup;
+	}
+
+	ret = drm_fb_helper_initial_config(fbh, 32);
+	if (ret) {
+		DRM_ERROR("failed to set initial config\n");
+		goto err_fb_setup;
+	}
+
+	return 0;
+ err_fb_setup:
+	drm_fb_helper_fini(fbh);
+ err_fb_helper:
+	priv->fbdev = NULL;
+	return ret;
+}
+
+void armada_fbdev_fini(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh = priv->fbdev;
+
+	if (fbh) {
+		struct fb_info *info = fbh->fbdev;
+
+		if (info) {
+			unregister_framebuffer(info);
+			if (info->cmap.len)
+				fb_dealloc_cmap(&info->cmap);
+			framebuffer_release(info);
+		}
+
+		if (fbh->fb)
+			fbh->fb->funcs->destroy(fbh->fb);
+
+		drm_fb_helper_fini(fbh);
+
+		priv->fbdev = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
new file mode 100644
index 0000000..9f2356b
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/shmem_fs.h>
+#include <drm/drmP.h>
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+	unsigned long addr = (unsigned long)vmf->virtual_address;
+	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
+	int ret;
+
+	pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
+	ret = vm_insert_pfn(vma, addr, pfn);
+
+	switch (ret) {
+	case 0:
+	case -EBUSY:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+const struct vm_operations_struct armada_gem_vm_ops = {
+	.fault	= armada_gem_vm_fault,
+	.open	= drm_gem_vm_open,
+	.close	= drm_gem_vm_close,
+};
+
+static size_t roundup_gem_size(size_t size)
+{
+	return roundup(size, PAGE_SIZE);
+}
+
+/* dev->struct_mutex is held here */
+void armada_gem_free_object(struct drm_gem_object *obj)
+{
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+
+	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
+
+	drm_gem_free_mmap_offset(&dobj->obj);
+
+	if (dobj->page) {
+		/* page backed memory */
+		unsigned int order = get_order(dobj->obj.size);
+		__free_pages(dobj->page, order);
+	} else if (dobj->linear) {
+		/* linear backed memory */
+		drm_mm_remove_node(dobj->linear);
+		kfree(dobj->linear);
+		if (dobj->addr)
+			iounmap(dobj->addr);
+	}
+
+	if (dobj->obj.import_attach) {
+		/* We only ever display imported data */
+		dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
+					 DMA_TO_DEVICE);
+		drm_prime_gem_destroy(&dobj->obj, NULL);
+	}
+
+	drm_gem_object_release(&dobj->obj);
+
+	kfree(dobj);
+}
+
+int
+armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
+{
+	struct armada_private *priv = dev->dev_private;
+	size_t size = obj->obj.size;
+
+	if (obj->page || obj->linear)
+		return 0;
+
+	/*
+	 * If it is a small allocation (typically cursor, which will
+	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
+	 * Framebuffers will never be this small (our minimum size for
+	 * framebuffers is larger than this anyway.)  Such objects are
+	 * only accessed by the CPU so we don't need any special handing
+	 * here.
+	 */
+	if (size <= 8192) {
+		unsigned int order = get_order(size);
+		struct page *p = alloc_pages(GFP_KERNEL, order);
+
+		if (p) {
+			obj->addr = page_address(p);
+			obj->phys_addr = page_to_phys(p);
+			obj->page = p;
+
+			memset(obj->addr, 0, PAGE_ALIGN(size));
+		}
+	}
+
+	/*
+	 * We could grab something from CMA if it's enabled, but that
+	 * involves building in a problem:
+	 *
+	 * CMA's interface uses dma_alloc_coherent(), which provides us
+	 * with an CPU virtual address and a device address.
+	 *
+	 * The CPU virtual address may be either an address in the kernel
+	 * direct mapped region (for example, as it would be on x86) or
+	 * it may be remapped into another part of kernel memory space
+	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
+	 * returned virtual address is invalid depending on the architecture
+	 * implementation.
+	 *
+	 * The device address may also not be a physical address; it may
+	 * be that there is some kind of remapping between the device and
+	 * system RAM, which makes the use of the device address also
+	 * unsafe to re-use as a physical address.
+	 *
+	 * This makes DRM usage of dma_alloc_coherent() in a generic way
+	 * at best very questionable and unsafe.
+	 */
+
+	/* Otherwise, grab it from our linear allocation */
+	if (!obj->page) {
+		struct drm_mm_node *node;
+		unsigned align = min_t(unsigned, size, SZ_2M);
+		void __iomem *ptr;
+		int ret;
+
+		node = kzalloc(sizeof(*node), GFP_KERNEL);
+		if (!node)
+			return -ENOSPC;
+
+		mutex_lock(&dev->struct_mutex);
+		ret = drm_mm_insert_node(&priv->linear, node, size, align,
+					 DRM_MM_SEARCH_DEFAULT);
+		mutex_unlock(&dev->struct_mutex);
+		if (ret) {
+			kfree(node);
+			return ret;
+		}
+
+		obj->linear = node;
+
+		/* Ensure that the memory we're returning is cleared. */
+		ptr = ioremap_wc(obj->linear->start, size);
+		if (!ptr) {
+			mutex_lock(&dev->struct_mutex);
+			drm_mm_remove_node(obj->linear);
+			mutex_unlock(&dev->struct_mutex);
+			kfree(obj->linear);
+			obj->linear = NULL;
+			return -ENOMEM;
+		}
+
+		memset_io(ptr, 0, size);
+		iounmap(ptr);
+
+		obj->phys_addr = obj->linear->start;
+		obj->dev_addr = obj->linear->start;
+	}
+
+	DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
+			 obj, obj->phys_addr, obj->dev_addr);
+
+	return 0;
+}
+
+void *
+armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
+{
+	/* only linear objects need to be ioremap'd */
+	if (!dobj->addr && dobj->linear)
+		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
+	return dobj->addr;
+}
+
+struct armada_gem_object *
+armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
+{
+	struct armada_gem_object *obj;
+
+	size = roundup_gem_size(size);
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return NULL;
+
+	drm_gem_private_object_init(dev, &obj->obj, size);
+	obj->dev_addr = DMA_ERROR_CODE;
+
+	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
+
+	return obj;
+}
+
+struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+	size_t size)
+{
+	struct armada_gem_object *obj;
+	struct address_space *mapping;
+
+	size = roundup_gem_size(size);
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return NULL;
+
+	if (drm_gem_object_init(dev, &obj->obj, size)) {
+		kfree(obj);
+		return NULL;
+	}
+
+	obj->dev_addr = DMA_ERROR_CODE;
+
+	mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
+	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
+	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
+
+	return obj;
+}
+
+/* Dumb alloc support */
+int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+	struct drm_mode_create_dumb *args)
+{
+	struct armada_gem_object *dobj;
+	u32 handle;
+	size_t size;
+	int ret;
+
+	args->pitch = armada_pitch(args->width, args->bpp);
+	args->size = size = args->pitch * args->height;
+
+	dobj = armada_gem_alloc_private_object(dev, size);
+	if (dobj == NULL)
+		return -ENOMEM;
+
+	ret = armada_gem_linear_back(dev, dobj);
+	if (ret)
+		goto err;
+
+	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+	if (ret)
+		goto err;
+
+	args->handle = handle;
+
+	/* drop reference from allocate - handle holds it now */
+	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+	uint32_t handle, uint64_t *offset)
+{
+	struct armada_gem_object *obj;
+	int ret = 0;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = armada_gem_object_lookup(dev, file, handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object\n");
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+
+	/* Don't allow imported objects to be mapped */
+	if (obj->obj.import_attach) {
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+
+	ret = drm_gem_create_mmap_offset(&obj->obj);
+	if (ret == 0) {
+		*offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
+		DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
+	}
+
+	drm_gem_object_unreference(&obj->obj);
+ err_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+	uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+/* Private driver gem ioctls */
+int armada_gem_create_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_create *args = data;
+	struct armada_gem_object *dobj;
+	size_t size;
+	u32 handle;
+	int ret;
+
+	if (args->size == 0)
+		return -ENOMEM;
+
+	size = args->size;
+
+	dobj = armada_gem_alloc_object(dev, size);
+	if (dobj == NULL)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+	if (ret)
+		goto err;
+
+	args->handle = handle;
+
+	/* drop reference from allocate - handle holds it now */
+	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+/* Map a shmem-backed object into process memory space */
+int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_mmap *args = data;
+	struct armada_gem_object *dobj;
+	unsigned long addr;
+
+	dobj = armada_gem_object_lookup(dev, file, args->handle);
+	if (dobj == NULL)
+		return -ENOENT;
+
+	if (!dobj->obj.filp) {
+		drm_gem_object_unreference(&dobj->obj);
+		return -EINVAL;
+	}
+
+	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
+		       MAP_SHARED, args->offset);
+	drm_gem_object_unreference(&dobj->obj);
+	if (IS_ERR_VALUE(addr))
+		return addr;
+
+	args->addr = addr;
+
+	return 0;
+}
+
+int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_pwrite *args = data;
+	struct armada_gem_object *dobj;
+	char __user *ptr;
+	int ret;
+
+	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
+		args->handle, args->offset, args->size, args->ptr);
+
+	if (args->size == 0)
+		return 0;
+
+	ptr = (char __user *)(uintptr_t)args->ptr;
+
+	if (!access_ok(VERIFY_READ, ptr, args->size))
+		return -EFAULT;
+
+	ret = fault_in_multipages_readable(ptr, args->size);
+	if (ret)
+		return ret;
+
+	dobj = armada_gem_object_lookup(dev, file, args->handle);
+	if (dobj == NULL)
+		return -ENOENT;
+
+	/* Must be a kernel-mapped object */
+	if (!dobj->addr)
+		return -EINVAL;
+
+	if (args->offset > dobj->obj.size ||
+	    args->size > dobj->obj.size - args->offset) {
+		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
+		ret = -EINVAL;
+		goto unref;
+	}
+
+	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
+		ret = -EFAULT;
+	} else if (dobj->update) {
+		dobj->update(dobj->update_data);
+		ret = 0;
+	}
+
+ unref:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+/* Prime support */
+struct sg_table *
+armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+	enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+	struct scatterlist *sg;
+	struct sg_table *sgt;
+	int i, num;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return NULL;
+
+	if (dobj->obj.filp) {
+		struct address_space *mapping;
+		gfp_t gfp;
+		int count;
+
+		count = dobj->obj.size / PAGE_SIZE;
+		if (sg_alloc_table(sgt, count, GFP_KERNEL))
+			goto free_sgt;
+
+		mapping = file_inode(dobj->obj.filp)->i_mapping;
+		gfp = mapping_gfp_mask(mapping);
+
+		for_each_sg(sgt->sgl, sg, count, i) {
+			struct page *page;
+
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+			if (IS_ERR(page)) {
+				num = i;
+				goto release;
+			}
+
+			sg_set_page(sg, page, PAGE_SIZE, 0);
+		}
+
+		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
+			num = sgt->nents;
+			goto release;
+		}
+	} else if (dobj->page) {
+		/* Single contiguous page */
+		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+			goto free_sgt;
+
+		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
+
+		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+			goto free_table;
+	} else if (dobj->linear) {
+		/* Single contiguous physical region - no struct page */
+		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+			goto free_sgt;
+		sg_dma_address(sgt->sgl) = dobj->dev_addr;
+		sg_dma_len(sgt->sgl) = dobj->obj.size;
+	} else {
+		goto free_sgt;
+	}
+	return sgt;
+
+ release:
+	for_each_sg(sgt->sgl, sg, num, i)
+		page_cache_release(sg_page(sg));
+ free_table:
+	sg_free_table(sgt);
+ free_sgt:
+	kfree(sgt);
+	return NULL;
+}
+
+static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+	struct sg_table *sgt, enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+	int i;
+
+	if (!dobj->linear)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+	if (dobj->obj.filp) {
+		struct scatterlist *sg;
+		for_each_sg(sgt->sgl, sg, sgt->nents, i)
+			page_cache_release(sg_page(sg));
+	}
+
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
+{
+	return NULL;
+}
+
+static void
+armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
+{
+}
+
+static int
+armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
+	.map_dma_buf	= armada_gem_prime_map_dma_buf,
+	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
+	.release	= drm_gem_dmabuf_release,
+	.kmap_atomic	= armada_gem_dmabuf_no_kmap,
+	.kunmap_atomic	= armada_gem_dmabuf_no_kunmap,
+	.kmap		= armada_gem_dmabuf_no_kmap,
+	.kunmap		= armada_gem_dmabuf_no_kunmap,
+	.mmap		= armada_gem_dmabuf_mmap,
+};
+
+struct dma_buf *
+armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
+	int flags)
+{
+	return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
+			      O_RDWR);
+}
+
+struct drm_gem_object *
+armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+	struct dma_buf_attachment *attach;
+	struct armada_gem_object *dobj;
+
+	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
+		struct drm_gem_object *obj = buf->priv;
+		if (obj->dev == dev) {
+			/*
+			 * Importing our own dmabuf(s) increases the
+			 * refcount on the gem object itself.
+			 */
+			drm_gem_object_reference(obj);
+			dma_buf_put(buf);
+			return obj;
+		}
+	}
+
+	attach = dma_buf_attach(buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_CAST(attach);
+
+	dobj = armada_gem_alloc_private_object(dev, buf->size);
+	if (!dobj) {
+		dma_buf_detach(buf, attach);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dobj->obj.import_attach = attach;
+
+	/*
+	 * Don't call dma_buf_map_attachment() here - it maps the
+	 * scatterlist immediately for DMA, and this is not always
+	 * an appropriate thing to do.
+	 */
+	return &dobj->obj;
+}
+
+int armada_gem_map_import(struct armada_gem_object *dobj)
+{
+	int ret;
+
+	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
+					  DMA_TO_DEVICE);
+	if (!dobj->sgt) {
+		DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
+		return -EINVAL;
+	}
+	if (IS_ERR(dobj->sgt)) {
+		ret = PTR_ERR(dobj->sgt);
+		dobj->sgt = NULL;
+		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
+		return ret;
+	}
+	if (dobj->sgt->nents > 1) {
+		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
+		return -EINVAL;
+	}
+	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
+		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
+		return -EINVAL;
+	}
+	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+	return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
new file mode 100644
index 0000000..00b6cd4
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_GEM_H
+#define ARMADA_GEM_H
+
+/* GEM */
+struct armada_gem_object {
+	struct drm_gem_object	obj;
+	void			*addr;
+	phys_addr_t		phys_addr;
+	resource_size_t		dev_addr;
+	struct drm_mm_node	*linear;	/* for linear backed */
+	struct page		*page;		/* for page backed */
+	struct sg_table		*sgt;		/* for imported */
+	void			(*update)(void *);
+	void			*update_data;
+};
+
+extern const struct vm_operations_struct armada_gem_vm_ops;
+
+#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
+
+void armada_gem_free_object(struct drm_gem_object *);
+int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
+void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
+struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
+	size_t);
+int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
+	struct drm_mode_create_dumb *);
+int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
+	uint32_t, uint64_t *);
+int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
+	uint32_t);
+struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
+	struct drm_gem_object *obj, int flags);
+struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
+	struct dma_buf *);
+int armada_gem_map_import(struct armada_gem_object *);
+
+static inline struct armada_gem_object *armada_gem_object_lookup(
+	struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+{
+	struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+
+	return obj ? drm_to_armada_gem(obj) : NULL;
+}
+#endif
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
new file mode 100644
index 0000000..27319a8
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_HW_H
+#define ARMADA_HW_H
+
+/*
+ * Note: the following registers are written from IRQ context:
+ *  LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *  LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
+ *  LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
+ *  LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
+ */
+enum {
+	LCD_SPU_ADV_REG			= 0x0084,	/* Armada 510 */
+	LCD_SPU_DMA_START_ADDR_Y0	= 0x00c0,
+	LCD_SPU_DMA_START_ADDR_U0	= 0x00c4,
+	LCD_SPU_DMA_START_ADDR_V0	= 0x00c8,
+	LCD_CFG_DMA_START_ADDR_0	= 0x00cc,
+	LCD_SPU_DMA_START_ADDR_Y1	= 0x00d0,
+	LCD_SPU_DMA_START_ADDR_U1	= 0x00d4,
+	LCD_SPU_DMA_START_ADDR_V1	= 0x00d8,
+	LCD_CFG_DMA_START_ADDR_1	= 0x00dc,
+	LCD_SPU_DMA_PITCH_YC		= 0x00e0,
+	LCD_SPU_DMA_PITCH_UV		= 0x00e4,
+	LCD_SPU_DMA_OVSA_HPXL_VLN	= 0x00e8,
+	LCD_SPU_DMA_HPXL_VLN		= 0x00ec,
+	LCD_SPU_DZM_HPXL_VLN		= 0x00f0,
+	LCD_CFG_GRA_START_ADDR0		= 0x00f4,
+	LCD_CFG_GRA_START_ADDR1		= 0x00f8,
+	LCD_CFG_GRA_PITCH		= 0x00fc,
+	LCD_SPU_GRA_OVSA_HPXL_VLN	= 0x0100,
+	LCD_SPU_GRA_HPXL_VLN		= 0x0104,
+	LCD_SPU_GZM_HPXL_VLN		= 0x0108,
+	LCD_SPU_HWC_OVSA_HPXL_VLN	= 0x010c,
+	LCD_SPU_HWC_HPXL_VLN		= 0x0110,
+	LCD_SPUT_V_H_TOTAL		= 0x0114,
+	LCD_SPU_V_H_ACTIVE		= 0x0118,
+	LCD_SPU_H_PORCH			= 0x011c,
+	LCD_SPU_V_PORCH			= 0x0120,
+	LCD_SPU_BLANKCOLOR		= 0x0124,
+	LCD_SPU_ALPHA_COLOR1		= 0x0128,
+	LCD_SPU_ALPHA_COLOR2		= 0x012c,
+	LCD_SPU_COLORKEY_Y		= 0x0130,
+	LCD_SPU_COLORKEY_U		= 0x0134,
+	LCD_SPU_COLORKEY_V		= 0x0138,
+	LCD_CFG_RDREG4F			= 0x013c,	/* Armada 510 */
+	LCD_SPU_SPI_RXDATA		= 0x0140,
+	LCD_SPU_ISA_RXDATA		= 0x0144,
+	LCD_SPU_HWC_RDDAT		= 0x0158,
+	LCD_SPU_GAMMA_RDDAT		= 0x015c,
+	LCD_SPU_PALETTE_RDDAT		= 0x0160,
+	LCD_SPU_IOPAD_IN		= 0x0178,
+	LCD_CFG_RDREG5F			= 0x017c,
+	LCD_SPU_SPI_CTRL		= 0x0180,
+	LCD_SPU_SPI_TXDATA		= 0x0184,
+	LCD_SPU_SMPN_CTRL		= 0x0188,
+	LCD_SPU_DMA_CTRL0		= 0x0190,
+	LCD_SPU_DMA_CTRL1		= 0x0194,
+	LCD_SPU_SRAM_CTRL		= 0x0198,
+	LCD_SPU_SRAM_WRDAT		= 0x019c,
+	LCD_SPU_SRAM_PARA0		= 0x01a0,	/* Armada 510 */
+	LCD_SPU_SRAM_PARA1		= 0x01a4,
+	LCD_CFG_SCLK_DIV		= 0x01a8,
+	LCD_SPU_CONTRAST		= 0x01ac,
+	LCD_SPU_SATURATION		= 0x01b0,
+	LCD_SPU_CBSH_HUE		= 0x01b4,
+	LCD_SPU_DUMB_CTRL		= 0x01b8,
+	LCD_SPU_IOPAD_CONTROL		= 0x01bc,
+	LCD_SPU_IRQ_ENA			= 0x01c0,
+	LCD_SPU_IRQ_ISR			= 0x01c4,
+};
+
+/* For LCD_SPU_ADV_REG */
+enum {
+	ADV_VSYNC_L_OFF	= 0xfff << 20,
+	ADV_GRACOLORKEY	= 1 << 19,
+	ADV_VIDCOLORKEY	= 1 << 18,
+	ADV_HWC32BLEND	= 1 << 15,
+	ADV_HWC32ARGB	= 1 << 14,
+	ADV_HWC32ENABLE	= 1 << 13,
+	ADV_VSYNCOFFEN	= 1 << 12,
+	ADV_VSYNC_H_OFF	= 0xfff << 0,
+};
+
+enum {
+	CFG_565		= 0,
+	CFG_1555	= 1,
+	CFG_888PACK	= 2,
+	CFG_X888	= 3,
+	CFG_8888	= 4,
+	CFG_422PACK	= 5,
+	CFG_422		= 6,
+	CFG_420		= 7,
+	CFG_PSEUDO4	= 9,
+	CFG_PSEUDO8	= 10,
+	CFG_SWAPRB	= 1 << 4,
+	CFG_SWAPUV	= 1 << 3,
+	CFG_SWAPYU	= 1 << 2,
+	CFG_YUV2RGB	= 1 << 1,
+};
+
+/* For LCD_SPU_DMA_CTRL0 */
+enum {
+	CFG_NOBLENDING	= 1 << 31,
+	CFG_GAMMA_ENA	= 1 << 30,
+	CFG_CBSH_ENA	= 1 << 29,
+	CFG_PALETTE_ENA	= 1 << 28,
+	CFG_ARBFAST_ENA	= 1 << 27,
+	CFG_HWC_1BITMOD	= 1 << 26,
+	CFG_HWC_1BITENA	= 1 << 25,
+	CFG_HWC_ENA	= 1 << 24,
+	CFG_DMAFORMAT	= 0xf << 20,
+#define	CFG_DMA_FMT(x)	((x) << 20)
+	CFG_GRAFORMAT	= 0xf << 16,
+#define	CFG_GRA_FMT(x)	((x) << 16)
+#define CFG_GRA_MOD(x)	((x) << 8)
+	CFG_GRA_FTOGGLE	= 1 << 15,
+	CFG_GRA_HSMOOTH	= 1 << 14,
+	CFG_GRA_TSTMODE	= 1 << 13,
+	CFG_GRA_ENA	= 1 << 8,
+#define CFG_DMA_MOD(x)	((x) << 0)
+	CFG_DMA_FTOGGLE	= 1 << 7,
+	CFG_DMA_HSMOOTH	= 1 << 6,
+	CFG_DMA_TSTMODE	= 1 << 5,
+	CFG_DMA_ENA	= 1 << 0,
+};
+
+enum {
+	CKMODE_DISABLE	= 0,
+	CKMODE_Y	= 1,
+	CKMODE_U	= 2,
+	CKMODE_RGB	= 3,
+	CKMODE_V	= 4,
+	CKMODE_R	= 5,
+	CKMODE_G	= 6,
+	CKMODE_B	= 7,
+};
+
+/* For LCD_SPU_DMA_CTRL1 */
+enum {
+	CFG_FRAME_TRIG		= 1 << 31,
+	CFG_VSYNC_INV		= 1 << 27,
+	CFG_CKMODE_MASK		= 0x7 << 24,
+#define CFG_CKMODE(x)		((x) << 24)
+	CFG_CARRY		= 1 << 23,
+	CFG_GATED_CLK		= 1 << 21,
+	CFG_PWRDN_ENA		= 1 << 20,
+	CFG_DSCALE_MASK		= 0x3 << 18,
+	CFG_DSCALE_NONE		= 0x0 << 18,
+	CFG_DSCALE_HALF		= 0x1 << 18,
+	CFG_DSCALE_QUAR		= 0x2 << 18,
+	CFG_ALPHAM_MASK		= 0x3 << 16,
+	CFG_ALPHAM_VIDEO	= 0x0 << 16,
+	CFG_ALPHAM_GRA		= 0x1 << 16,
+	CFG_ALPHAM_CFG		= 0x2 << 16,
+	CFG_ALPHA_MASK		= 0xff << 8,
+	CFG_PIXCMD_MASK		= 0xff,
+};
+
+/* For LCD_SPU_SRAM_CTRL */
+enum {
+	SRAM_READ	= 0 << 14,
+	SRAM_WRITE	= 2 << 14,
+	SRAM_INIT	= 3 << 14,
+	SRAM_HWC32_RAM1	= 0xc << 8,
+	SRAM_HWC32_RAM2	= 0xd << 8,
+	SRAM_HWC32_RAMR	= SRAM_HWC32_RAM1,
+	SRAM_HWC32_RAMG	= SRAM_HWC32_RAM2,
+	SRAM_HWC32_RAMB	= 0xe << 8,
+	SRAM_HWC32_TRAN	= 0xf << 8,
+	SRAM_HWC	= 0xf << 8,
+};
+
+/* For LCD_SPU_SRAM_PARA1 */
+enum {
+	CFG_CSB_256x32	= 1 << 15,	/* cursor */
+	CFG_CSB_256x24	= 1 << 14,	/* palette */
+	CFG_CSB_256x8	= 1 << 13,	/* gamma */
+	CFG_PDWN1920x32	= 1 << 8,	/* Armada 510: power down vscale ram */
+	CFG_PDWN256x32	= 1 << 7,	/* power down cursor */
+	CFG_PDWN256x24	= 1 << 6,	/* power down palette */
+	CFG_PDWN256x8	= 1 << 5,	/* power down gamma */
+	CFG_PDWNHWC	= 1 << 4,	/* Armada 510: power down all hwc ram */
+	CFG_PDWN32x32	= 1 << 3,	/* power down slave->smart ram */
+	CFG_PDWN16x66	= 1 << 2,	/* power down UV fifo */
+	CFG_PDWN32x66	= 1 << 1,	/* power down Y fifo */
+	CFG_PDWN64x66	= 1 << 0,	/* power down graphic fifo */
+};
+
+/* For LCD_CFG_SCLK_DIV */
+enum {
+	/* Armada 510 */
+	SCLK_510_AXI		= 0x0 << 30,
+	SCLK_510_EXTCLK0	= 0x1 << 30,
+	SCLK_510_PLL		= 0x2 << 30,
+	SCLK_510_EXTCLK1	= 0x3 << 30,
+	SCLK_510_DIV_CHANGE	= 1 << 29,
+	SCLK_510_FRAC_DIV_MASK	= 0xfff << 16,
+	SCLK_510_INT_DIV_MASK	= 0xffff << 0,
+
+	/* Armada 16x */
+	SCLK_16X_AHB		= 0x0 << 28,
+	SCLK_16X_PCLK		= 0x1 << 28,
+	SCLK_16X_AXI		= 0x4 << 28,
+	SCLK_16X_PLL		= 0x8 << 28,
+	SCLK_16X_FRAC_DIV_MASK	= 0xfff << 16,
+	SCLK_16X_INT_DIV_MASK	= 0xffff << 0,
+};
+
+/* For LCD_SPU_DUMB_CTRL */
+enum {
+	DUMB16_RGB565_0	= 0x0 << 28,
+	DUMB16_RGB565_1	= 0x1 << 28,
+	DUMB18_RGB666_0	= 0x2 << 28,
+	DUMB18_RGB666_1	= 0x3 << 28,
+	DUMB12_RGB444_0	= 0x4 << 28,
+	DUMB12_RGB444_1	= 0x5 << 28,
+	DUMB24_RGB888_0	= 0x6 << 28,
+	DUMB_BLANK	= 0x7 << 28,
+	DUMB_MASK	= 0xf << 28,
+	CFG_BIAS_OUT	= 1 << 8,
+	CFG_REV_RGB	= 1 << 7,
+	CFG_INV_CBLANK	= 1 << 6,
+	CFG_INV_CSYNC	= 1 << 5,	/* Normally active high */
+	CFG_INV_HENA	= 1 << 4,
+	CFG_INV_VSYNC	= 1 << 3,	/* Normally active high */
+	CFG_INV_HSYNC	= 1 << 2,	/* Normally active high */
+	CFG_INV_PCLK	= 1 << 1,
+	CFG_DUMB_ENA	= 1 << 0,
+};
+
+/* For LCD_SPU_IOPAD_CONTROL */
+enum {
+	CFG_VSCALE_LN_EN	= 3 << 18,
+	CFG_GRA_VM_ENA		= 1 << 15,
+	CFG_DMA_VM_ENA		= 1 << 13,
+	CFG_CMD_VM_ENA		= 1 << 11,
+	CFG_CSC_MASK		= 3 << 8,
+	CFG_CSC_YUV_CCIR709	= 1 << 9,
+	CFG_CSC_YUV_CCIR601	= 0 << 9,
+	CFG_CSC_RGB_STUDIO	= 1 << 8,
+	CFG_CSC_RGB_COMPUTER	= 0 << 8,
+	CFG_IOPAD_MASK		= 0xf << 0,
+	CFG_IOPAD_DUMB24	= 0x0 << 0,
+	CFG_IOPAD_DUMB18SPI	= 0x1 << 0,
+	CFG_IOPAD_DUMB18GPIO	= 0x2 << 0,
+	CFG_IOPAD_DUMB16SPI	= 0x3 << 0,
+	CFG_IOPAD_DUMB16GPIO	= 0x4 << 0,
+	CFG_IOPAD_DUMB12GPIO	= 0x5 << 0,
+	CFG_IOPAD_SMART18	= 0x6 << 0,
+	CFG_IOPAD_SMART16	= 0x7 << 0,
+	CFG_IOPAD_SMART8	= 0x8 << 0,
+};
+
+#define IOPAD_DUMB24                0x0
+
+/* For LCD_SPU_IRQ_ENA */
+enum {
+	DMA_FRAME_IRQ0_ENA	= 1 << 31,
+	DMA_FRAME_IRQ1_ENA	= 1 << 30,
+	DMA_FRAME_IRQ_ENA	= DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
+	DMA_FF_UNDERFLOW_ENA	= 1 << 29,
+	GRA_FRAME_IRQ0_ENA	= 1 << 27,
+	GRA_FRAME_IRQ1_ENA	= 1 << 26,
+	GRA_FRAME_IRQ_ENA	= GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
+	GRA_FF_UNDERFLOW_ENA	= 1 << 25,
+	VSYNC_IRQ_ENA		= 1 << 23,
+	DUMB_FRAMEDONE_ENA	= 1 << 22,
+	TWC_FRAMEDONE_ENA	= 1 << 21,
+	HWC_FRAMEDONE_ENA	= 1 << 20,
+	SLV_IRQ_ENA		= 1 << 19,
+	SPI_IRQ_ENA		= 1 << 18,
+	PWRDN_IRQ_ENA		= 1 << 17,
+	ERR_IRQ_ENA		= 1 << 16,
+	CLEAN_SPU_IRQ_ISR	= 0xffff,
+};
+
+/* For LCD_SPU_IRQ_ISR */
+enum {
+	DMA_FRAME_IRQ0		= 1 << 31,
+	DMA_FRAME_IRQ1		= 1 << 30,
+	DMA_FRAME_IRQ		= DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
+	DMA_FF_UNDERFLOW	= 1 << 29,
+	GRA_FRAME_IRQ0		= 1 << 27,
+	GRA_FRAME_IRQ1		= 1 << 26,
+	GRA_FRAME_IRQ		= GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
+	GRA_FF_UNDERFLOW	= 1 << 25,
+	VSYNC_IRQ		= 1 << 23,
+	DUMB_FRAMEDONE		= 1 << 22,
+	TWC_FRAMEDONE		= 1 << 21,
+	HWC_FRAMEDONE		= 1 << 20,
+	SLV_IRQ			= 1 << 19,
+	SPI_IRQ			= 1 << 18,
+	PWRDN_IRQ		= 1 << 17,
+	ERR_IRQ			= 1 << 16,
+	DMA_FRAME_IRQ0_LEVEL	= 1 << 15,
+	DMA_FRAME_IRQ1_LEVEL	= 1 << 14,
+	DMA_FRAME_CNT_ISR	= 3 << 12,
+	GRA_FRAME_IRQ0_LEVEL	= 1 << 11,
+	GRA_FRAME_IRQ1_LEVEL	= 1 << 10,
+	GRA_FRAME_CNT_ISR	= 3 << 8,
+	VSYNC_IRQ_LEVEL		= 1 << 7,
+	DUMB_FRAMEDONE_LEVEL	= 1 << 6,
+	TWC_FRAMEDONE_LEVEL	= 1 << 5,
+	HWC_FRAMEDONE_LEVEL	= 1 << 4,
+	SLV_FF_EMPTY		= 1 << 3,
+	DMA_FF_ALLEMPTY		= 1 << 2,
+	GRA_FF_ALLEMPTY		= 1 << 1,
+	PWRDN_IRQ_LEVEL		= 1 << 0,
+};
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_ioctlP.h b/drivers/gpu/drm/armada/armada_ioctlP.h
new file mode 100644
index 0000000..bd8c456
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_ioctlP.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_IOCTLP_H
+#define ARMADA_IOCTLP_H
+
+#define ARMADA_IOCTL_PROTO(name)\
+extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
+
+ARMADA_IOCTL_PROTO(gem_create);
+ARMADA_IOCTL_PROTO(gem_mmap);
+ARMADA_IOCTL_PROTO(gem_pwrite);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
new file mode 100644
index 0000000..d685a54
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_output.h"
+#include "armada_drm.h"
+
+struct armada_connector {
+	struct drm_connector conn;
+	const struct armada_output_type *type;
+};
+
+#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
+{
+	struct drm_encoder *enc = conn->encoder;
+
+	return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
+}
+
+static enum drm_connector_status armada_drm_connector_detect(
+	struct drm_connector *conn, bool force)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+	enum drm_connector_status status = connector_status_disconnected;
+
+	if (dconn->type->detect) {
+		status = dconn->type->detect(conn, force);
+	} else {
+		struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+
+		if (enc)
+			status = encoder_helper_funcs(enc)->detect(enc, conn);
+	}
+
+	return status;
+}
+
+static void armada_drm_connector_destroy(struct drm_connector *conn)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+	drm_sysfs_connector_remove(conn);
+	drm_connector_cleanup(conn);
+	kfree(dconn);
+}
+
+static int armada_drm_connector_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+	if (!dconn->type->set_property)
+		return -EINVAL;
+
+	return dconn->type->set_property(conn, property, value);
+}
+
+static const struct drm_connector_funcs armada_drm_conn_funcs = {
+	.dpms		= drm_helper_connector_dpms,
+	.fill_modes	= drm_helper_probe_single_connector_modes,
+	.detect		= armada_drm_connector_detect,
+	.destroy	= armada_drm_connector_destroy,
+	.set_property	= armada_drm_connector_set_property,
+};
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+	encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void armada_drm_encoder_commit(struct drm_encoder *encoder)
+{
+	encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+	const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
+{
+	return true;
+}
+
+/* Shouldn't this be a generic helper function? */
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+	struct drm_display_mode *mode)
+{
+	struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+	int valid = MODE_BAD;
+
+	if (encoder) {
+		struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+		valid = slave->slave_funcs->mode_valid(encoder, mode);
+	}
+	return valid;
+}
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value)
+{
+	struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+	int rc = -EINVAL;
+
+	if (encoder) {
+		struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+		rc = slave->slave_funcs->set_property(encoder, conn, property,
+						      value);
+	}
+	return rc;
+}
+
+int armada_output_create(struct drm_device *dev,
+	const struct armada_output_type *type, const void *data)
+{
+	struct armada_connector *dconn;
+	int ret;
+
+	dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
+	if (!dconn)
+		return -ENOMEM;
+
+	dconn->type = type;
+
+	ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
+				 type->connector_type);
+	if (ret) {
+		DRM_ERROR("unable to init connector\n");
+		goto err_destroy_dconn;
+	}
+
+	ret = type->create(&dconn->conn, data);
+	if (ret)
+		goto err_conn;
+
+	ret = drm_sysfs_connector_add(&dconn->conn);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+ err_sysfs:
+	if (dconn->conn.encoder)
+		dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
+ err_conn:
+	drm_connector_cleanup(&dconn->conn);
+ err_destroy_dconn:
+	kfree(dconn);
+	return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
new file mode 100644
index 0000000..4126d43
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CONNETOR_H
+#define ARMADA_CONNETOR_H
+
+#define encoder_helper_funcs(encoder) \
+	((struct drm_encoder_helper_funcs *)encoder->helper_private)
+
+struct armada_output_type {
+	int connector_type;
+	enum drm_connector_status (*detect)(struct drm_connector *, bool);
+	int (*create)(struct drm_connector *, const void *);
+	int (*set_property)(struct drm_connector *, struct drm_property *,
+			    uint64_t);
+};
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder);
+void armada_drm_encoder_commit(struct drm_encoder *encoder);
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+	const struct drm_display_mode *mode, struct drm_display_mode *adj);
+
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+	struct drm_display_mode *mode);
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value);
+
+int armada_output_create(struct drm_device *dev,
+	const struct armada_output_type *type, const void *data);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
new file mode 100644
index 0000000..c5b06fd
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+struct armada_plane_properties {
+	uint32_t colorkey_yr;
+	uint32_t colorkey_ug;
+	uint32_t colorkey_vb;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
+	int16_t  brightness;
+	uint16_t contrast;
+	uint16_t saturation;
+	uint32_t colorkey_mode;
+};
+
+struct armada_plane {
+	struct drm_plane base;
+	spinlock_t lock;
+	struct drm_framebuffer *old_fb;
+	uint32_t src_hw;
+	uint32_t dst_hw;
+	uint32_t dst_yx;
+	uint32_t ctrl0;
+	struct {
+		struct armada_vbl_event update;
+		struct armada_regs regs[13];
+		wait_queue_head_t wait;
+	} vbl;
+	struct armada_plane_properties prop;
+};
+#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+
+
+static void
+armada_ovl_update_attr(struct armada_plane_properties *prop,
+	struct armada_crtc *dcrtc)
+{
+	writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
+	writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
+	writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+
+	writel_relaxed(prop->brightness << 16 | prop->contrast,
+		       dcrtc->base + LCD_SPU_CONTRAST);
+	/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
+	writel_relaxed(prop->saturation << 16,
+		       dcrtc->base + LCD_SPU_SATURATION);
+	writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
+
+	spin_lock_irq(&dcrtc->irq_lock);
+	armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
+		     CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+		     dcrtc->base + LCD_SPU_DMA_CTRL1);
+
+	armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+	spin_unlock_irq(&dcrtc->irq_lock);
+}
+
+/* === Plane support === */
+static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
+{
+	struct armada_plane *dplane = data;
+	struct drm_framebuffer *fb;
+
+	armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+
+	spin_lock(&dplane->lock);
+	fb = dplane->old_fb;
+	dplane->old_fb = NULL;
+	spin_unlock(&dplane->lock);
+
+	if (fb)
+		armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
+}
+
+static unsigned armada_limit(int start, unsigned size, unsigned max)
+{
+	int end = start + size;
+	if (end < 0)
+		return 0;
+	if (start < 0)
+		start = 0;
+	return (unsigned)end > max ? max - start : end - start;
+}
+
+static int
+armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+	struct drm_framebuffer *fb,
+	int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+	uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	uint32_t val, ctrl0;
+	unsigned idx = 0;
+	int ret;
+
+	crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
+	crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+	ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
+		CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
+		CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
+
+	/* Does the position/size result in nothing to display? */
+	if (crtc_w == 0 || crtc_h == 0) {
+		ctrl0 &= ~CFG_DMA_ENA;
+	}
+
+	/*
+	 * FIXME: if the starting point is off screen, we need to
+	 * adjust src_x, src_y, src_w, src_h appropriately, and
+	 * according to the scale.
+	 */
+
+	if (!dcrtc->plane) {
+		dcrtc->plane = plane;
+		armada_ovl_update_attr(&dplane->prop, dcrtc);
+	}
+
+	/* FIXME: overlay on an interlaced display */
+	/* Just updating the position/size? */
+	if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+		val = (src_h & 0xffff0000) | src_w >> 16;
+		dplane->src_hw = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
+		val = crtc_h << 16 | crtc_w;
+		dplane->dst_hw = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
+		val = crtc_y << 16 | crtc_x;
+		dplane->dst_yx = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+		return 0;
+	} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+		/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+		armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
+			       dcrtc->base + LCD_SPU_SRAM_PARA1);
+	}
+
+	ret = wait_event_timeout(dplane->vbl.wait,
+				 list_empty(&dplane->vbl.update.node),
+				 HZ/25);
+	if (ret < 0)
+		return ret;
+
+	if (plane->fb != fb) {
+		struct armada_gem_object *obj = drm_fb_obj(fb);
+		uint32_t sy, su, sv;
+
+		/*
+		 * Take a reference on the new framebuffer - we want to
+		 * hold on to it while the hardware is displaying it.
+		 */
+		drm_framebuffer_reference(fb);
+
+		if (plane->fb) {
+			struct drm_framebuffer *older_fb;
+
+			spin_lock_irq(&dplane->lock);
+			older_fb = dplane->old_fb;
+			dplane->old_fb = plane->fb;
+			spin_unlock_irq(&dplane->lock);
+			if (older_fb)
+				armada_drm_queue_unref_work(dcrtc->crtc.dev,
+							    older_fb);
+		}
+
+		src_y >>= 16;
+		src_x >>= 16;
+		sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
+			src_x * fb->bits_per_pixel / 8;
+		su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
+			src_x;
+		sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
+			src_x;
+
+		armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+				     LCD_SPU_DMA_START_ADDR_Y0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, su,
+				     LCD_SPU_DMA_START_ADDR_U0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+				     LCD_SPU_DMA_START_ADDR_V0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+				     LCD_SPU_DMA_START_ADDR_Y1);
+		armada_reg_queue_set(dplane->vbl.regs, idx, su,
+				     LCD_SPU_DMA_START_ADDR_U1);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+				     LCD_SPU_DMA_START_ADDR_V1);
+
+		val = fb->pitches[0] << 16 | fb->pitches[0];
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_PITCH_YC);
+		val = fb->pitches[1] << 16 | fb->pitches[2];
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_PITCH_UV);
+	}
+
+	val = (src_h & 0xffff0000) | src_w >> 16;
+	if (dplane->src_hw != val) {
+		dplane->src_hw = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_HPXL_VLN);
+	}
+	val = crtc_h << 16 | crtc_w;
+	if (dplane->dst_hw != val) {
+		dplane->dst_hw = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DZM_HPXL_VLN);
+	}
+	val = crtc_y << 16 | crtc_x;
+	if (dplane->dst_yx != val) {
+		dplane->dst_yx = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_OVSA_HPXL_VLN);
+	}
+	if (dplane->ctrl0 != ctrl0) {
+		dplane->ctrl0 = ctrl0;
+		armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+			CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
+			CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
+			CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
+			CFG_YUV2RGB) | CFG_DMA_ENA,
+			LCD_SPU_DMA_CTRL0);
+	}
+	if (idx) {
+		armada_reg_queue_end(dplane->vbl.regs, idx);
+		armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
+	}
+	return 0;
+}
+
+static int armada_plane_disable(struct drm_plane *plane)
+{
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	struct drm_framebuffer *fb;
+	struct armada_crtc *dcrtc;
+
+	if (!dplane->base.crtc)
+		return 0;
+
+	dcrtc = drm_to_armada_crtc(dplane->base.crtc);
+	dcrtc->plane = NULL;
+
+	spin_lock_irq(&dcrtc->irq_lock);
+	armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
+	armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+	dplane->ctrl0 = 0;
+	spin_unlock_irq(&dcrtc->irq_lock);
+
+	/* Power down the Y/U/V FIFOs */
+	armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+		       dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+	if (plane->fb)
+		drm_framebuffer_unreference(plane->fb);
+
+	spin_lock_irq(&dplane->lock);
+	fb = dplane->old_fb;
+	dplane->old_fb = NULL;
+	spin_unlock_irq(&dplane->lock);
+	if (fb)
+		drm_framebuffer_unreference(fb);
+
+	return 0;
+}
+
+static void armada_plane_destroy(struct drm_plane *plane)
+{
+	kfree(plane);
+}
+
+static int armada_plane_set_property(struct drm_plane *plane,
+	struct drm_property *property, uint64_t val)
+{
+	struct armada_private *priv = plane->dev->dev_private;
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	bool update_attr = false;
+
+	if (property == priv->colorkey_prop) {
+#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
+		dplane->prop.colorkey_yr = CCC(K2R(val));
+		dplane->prop.colorkey_ug = CCC(K2G(val));
+		dplane->prop.colorkey_vb = CCC(K2B(val));
+#undef CCC
+		update_attr = true;
+	} else if (property == priv->colorkey_min_prop) {
+		dplane->prop.colorkey_yr &= ~0x00ff0000;
+		dplane->prop.colorkey_yr |= K2R(val) << 16;
+		dplane->prop.colorkey_ug &= ~0x00ff0000;
+		dplane->prop.colorkey_ug |= K2G(val) << 16;
+		dplane->prop.colorkey_vb &= ~0x00ff0000;
+		dplane->prop.colorkey_vb |= K2B(val) << 16;
+		update_attr = true;
+	} else if (property == priv->colorkey_max_prop) {
+		dplane->prop.colorkey_yr &= ~0xff000000;
+		dplane->prop.colorkey_yr |= K2R(val) << 24;
+		dplane->prop.colorkey_ug &= ~0xff000000;
+		dplane->prop.colorkey_ug |= K2G(val) << 24;
+		dplane->prop.colorkey_vb &= ~0xff000000;
+		dplane->prop.colorkey_vb |= K2B(val) << 24;
+		update_attr = true;
+	} else if (property == priv->colorkey_val_prop) {
+		dplane->prop.colorkey_yr &= ~0x0000ff00;
+		dplane->prop.colorkey_yr |= K2R(val) << 8;
+		dplane->prop.colorkey_ug &= ~0x0000ff00;
+		dplane->prop.colorkey_ug |= K2G(val) << 8;
+		dplane->prop.colorkey_vb &= ~0x0000ff00;
+		dplane->prop.colorkey_vb |= K2B(val) << 8;
+		update_attr = true;
+	} else if (property == priv->colorkey_alpha_prop) {
+		dplane->prop.colorkey_yr &= ~0x000000ff;
+		dplane->prop.colorkey_yr |= K2R(val);
+		dplane->prop.colorkey_ug &= ~0x000000ff;
+		dplane->prop.colorkey_ug |= K2G(val);
+		dplane->prop.colorkey_vb &= ~0x000000ff;
+		dplane->prop.colorkey_vb |= K2B(val);
+		update_attr = true;
+	} else if (property == priv->colorkey_mode_prop) {
+		dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
+		dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+		update_attr = true;
+	} else if (property == priv->brightness_prop) {
+		dplane->prop.brightness = val - 256;
+		update_attr = true;
+	} else if (property == priv->contrast_prop) {
+		dplane->prop.contrast = val;
+		update_attr = true;
+	} else if (property == priv->saturation_prop) {
+		dplane->prop.saturation = val;
+		update_attr = true;
+	}
+
+	if (update_attr && dplane->base.crtc)
+		armada_ovl_update_attr(&dplane->prop,
+				       drm_to_armada_crtc(dplane->base.crtc));
+
+	return 0;
+}
+
+static const struct drm_plane_funcs armada_plane_funcs = {
+	.update_plane	= armada_plane_update,
+	.disable_plane	= armada_plane_disable,
+	.destroy	= armada_plane_destroy,
+	.set_property	= armada_plane_set_property,
+};
+
+static const uint32_t armada_formats[] = {
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YUV420,
+	DRM_FORMAT_YVU420,
+	DRM_FORMAT_YUV422,
+	DRM_FORMAT_YVU422,
+	DRM_FORMAT_VYUY,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_BGR888,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_ABGR1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_BGR565,
+};
+
+static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+	{ CKMODE_DISABLE, "disabled" },
+	{ CKMODE_Y,       "Y component" },
+	{ CKMODE_U,       "U component" },
+	{ CKMODE_V,       "V component" },
+	{ CKMODE_RGB,     "RGB" },
+	{ CKMODE_R,       "R component" },
+	{ CKMODE_G,       "G component" },
+	{ CKMODE_B,       "B component" },
+};
+
+static int armada_overlay_create_properties(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	if (priv->colorkey_prop)
+		return 0;
+
+	priv->colorkey_prop = drm_property_create_range(dev, 0,
+				"colorkey", 0, 0xffffff);
+	priv->colorkey_min_prop = drm_property_create_range(dev, 0,
+				"colorkey_min", 0, 0xffffff);
+	priv->colorkey_max_prop = drm_property_create_range(dev, 0,
+				"colorkey_max", 0, 0xffffff);
+	priv->colorkey_val_prop = drm_property_create_range(dev, 0,
+				"colorkey_val", 0, 0xffffff);
+	priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
+				"colorkey_alpha", 0, 0xffffff);
+	priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
+				"colorkey_mode",
+				armada_drm_colorkey_enum_list,
+				ARRAY_SIZE(armada_drm_colorkey_enum_list));
+	priv->brightness_prop = drm_property_create_range(dev, 0,
+				"brightness", 0, 256 + 255);
+	priv->contrast_prop = drm_property_create_range(dev, 0,
+				"contrast", 0, 0x7fff);
+	priv->saturation_prop = drm_property_create_range(dev, 0,
+				"saturation", 0, 0x7fff);
+
+	if (!priv->colorkey_prop)
+		return -ENOMEM;
+
+	return 0;
+}
+
+int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_mode_object *mobj;
+	struct armada_plane *dplane;
+	int ret;
+
+	ret = armada_overlay_create_properties(dev);
+	if (ret)
+		return ret;
+
+	dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
+	if (!dplane)
+		return -ENOMEM;
+
+	spin_lock_init(&dplane->lock);
+	init_waitqueue_head(&dplane->vbl.wait);
+	armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
+				  dplane);
+
+	drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
+		       armada_formats, ARRAY_SIZE(armada_formats), false);
+
+	dplane->prop.colorkey_yr = 0xfefefe00;
+	dplane->prop.colorkey_ug = 0x01010100;
+	dplane->prop.colorkey_vb = 0x01010100;
+	dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+	dplane->prop.brightness = 0;
+	dplane->prop.contrast = 0x4000;
+	dplane->prop.saturation = 0x4000;
+
+	mobj = &dplane->base.base;
+	drm_object_attach_property(mobj, priv->colorkey_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_min_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_max_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_val_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
+				   0x000000);
+	drm_object_attach_property(mobj, priv->colorkey_mode_prop,
+				   CKMODE_RGB);
+	drm_object_attach_property(mobj, priv->brightness_prop, 256);
+	drm_object_attach_property(mobj, priv->contrast_prop,
+				   dplane->prop.contrast);
+	drm_object_attach_property(mobj, priv->saturation_prop,
+				   dplane->prop.saturation);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
new file mode 100644
index 0000000..00d0fac
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_drm.h"
+#include "armada_output.h"
+#include "armada_slave.h"
+
+static int armada_drm_slave_get_modes(struct drm_connector *conn)
+{
+	struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+	int count = 0;
+
+	if (enc) {
+		struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+		count = slave->slave_funcs->get_modes(enc, conn);
+	}
+
+	return count;
+}
+
+static void armada_drm_slave_destroy(struct drm_encoder *enc)
+{
+	struct drm_encoder_slave *slave = to_encoder_slave(enc);
+	struct i2c_client *client = drm_i2c_encoder_get_client(enc);
+
+	if (slave->slave_funcs)
+		slave->slave_funcs->destroy(enc);
+	if (client)
+		i2c_put_adapter(client->adapter);
+
+	drm_encoder_cleanup(&slave->base);
+	kfree(slave);
+}
+
+static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
+	.destroy	= armada_drm_slave_destroy,
+};
+
+static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
+	.get_modes	= armada_drm_slave_get_modes,
+	.mode_valid	= armada_drm_slave_encoder_mode_valid,
+	.best_encoder	= armada_drm_connector_encoder,
+};
+
+static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
+	.dpms = drm_i2c_encoder_dpms,
+	.save = drm_i2c_encoder_save,
+	.restore = drm_i2c_encoder_restore,
+	.mode_fixup = drm_i2c_encoder_mode_fixup,
+	.prepare = drm_i2c_encoder_prepare,
+	.commit = drm_i2c_encoder_commit,
+	.mode_set = drm_i2c_encoder_mode_set,
+	.detect = drm_i2c_encoder_detect,
+};
+
+static int
+armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
+{
+	const struct armada_drm_slave_config *config = data;
+	struct drm_encoder_slave *slave;
+	struct i2c_adapter *adap;
+	int ret;
+
+	conn->interlace_allowed = config->interlace_allowed;
+	conn->doublescan_allowed = config->doublescan_allowed;
+	conn->polled = config->polled;
+
+	drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
+
+	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	if (!slave)
+		return -ENOMEM;
+
+	slave->base.possible_crtcs = config->crtcs;
+
+	adap = i2c_get_adapter(config->i2c_adapter_id);
+	if (!adap) {
+		kfree(slave);
+		return -EPROBE_DEFER;
+	}
+
+	ret = drm_encoder_init(conn->dev, &slave->base,
+			       &armada_drm_slave_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS);
+	if (ret) {
+		DRM_ERROR("unable to init encoder\n");
+		i2c_put_adapter(adap);
+		kfree(slave);
+		return ret;
+	}
+
+	ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
+	i2c_put_adapter(adap);
+	if (ret) {
+		DRM_ERROR("unable to init encoder slave\n");
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
+
+	ret = slave->slave_funcs->create_resources(&slave->base, conn);
+	if (ret) {
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	ret = drm_mode_connector_attach_encoder(conn, &slave->base);
+	if (ret) {
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	conn->encoder = &slave->base;
+
+	return ret;
+}
+
+static const struct armada_output_type armada_drm_conn_slave = {
+	.connector_type	= DRM_MODE_CONNECTOR_HDMIA,
+	.create		= armada_drm_conn_slave_create,
+	.set_property	= armada_drm_slave_encoder_set_property,
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+	const struct armada_drm_slave_config *config)
+{
+	return armada_output_create(dev, &armada_drm_conn_slave, config);
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
new file mode 100644
index 0000000..bf2374c
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_SLAVE_H
+#define ARMADA_SLAVE_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+struct armada_drm_slave_config {
+	int i2c_adapter_id;
+	uint32_t crtcs;
+	uint8_t polled;
+	bool interlace_allowed;
+	bool doublescan_allowed;
+	struct i2c_board_info info;
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+	const struct armada_drm_slave_config *);
+
+#endif
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da4a51e..8a784c4 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -6,6 +6,7 @@
 	select FB_SYS_FILLRECT
 	select FB_SYS_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	help
 	 Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 32e270d..5137f15 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -211,7 +211,6 @@
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 
-	.gem_init_object = ast_gem_init_object,
 	.gem_free_object = ast_gem_free_object,
 	.dumb_create = ast_dumb_create,
 	.dumb_map_offset = ast_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8492b68e..9833a1b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -323,7 +323,6 @@
 			   struct drm_device *dev,
 			   struct drm_mode_create_dumb *args);
 
-extern int ast_gem_init_object(struct drm_gem_object *obj);
 extern void ast_gem_free_object(struct drm_gem_object *obj);
 extern int ast_dumb_mmap_offset(struct drm_file *file,
 				struct drm_device *dev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7f6152d..af0b868 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,12 +449,6 @@
 	return 0;
 }
 
-int ast_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-	return 0;
-}
-
 void ast_bo_unref(struct ast_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index bf67b22..9864559 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -5,6 +5,7 @@
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	help
 	 This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 138364d..953fc8a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -97,7 +97,6 @@
 	.major = DRIVER_MAJOR,
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
-	.gem_init_object = cirrus_gem_init_object,
 	.gem_free_object = cirrus_gem_free_object,
 	.dumb_create = cirrus_dumb_create,
 	.dumb_map_offset = cirrus_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 9b0bb91..b6aded7 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -191,7 +191,6 @@
 		      struct pci_dev *pdev,
 		      uint32_t flags);
 void cirrus_device_fini(struct cirrus_device *cdev);
-int cirrus_gem_init_object(struct drm_gem_object *obj);
 void cirrus_gem_free_object(struct drm_gem_object *obj);
 int cirrus_dumb_mmap_offset(struct drm_file *file,
 			    struct drm_device *dev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index f130a53..78e76f2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,12 +255,6 @@
 	return 0;
 }
 
-int cirrus_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-	return 0;
-}
-
 void cirrus_bo_unref(struct cirrus_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 60685b2..adabc3d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -494,13 +494,12 @@
 
 int cirrus_vga_get_modes(struct drm_connector *connector)
 {
-	/* Just add a static list of modes */
-	drm_add_modes_noedid(connector, 640, 480);
-	drm_add_modes_noedid(connector, 800, 600);
-	drm_add_modes_noedid(connector, 1024, 768);
-	drm_add_modes_noedid(connector, 1280, 1024);
+	int count;
 
-	return 4;
+	/* Just add a static list of modes */
+	count = drm_add_modes_noedid(connector, 1280, 1024);
+	drm_set_preferred_mode(connector, 1024, 768);
+	return count;
 }
 
 static int cirrus_vga_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 224ff96..a4b017b 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -334,7 +334,6 @@
 
 	mutex_lock(&dev->ctxlist_mutex);
 	list_add(&ctx_entry->head, &dev->ctxlist);
-	++dev->ctx_count;
 	mutex_unlock(&dev->ctxlist_mutex);
 
 	return 0;
@@ -432,7 +431,6 @@
 			if (pos->handle == ctx->handle) {
 				list_del(&pos->head);
 				kfree(pos);
-				--dev->ctx_count;
 			}
 		}
 	}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bff2fa9..d6cf77c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -202,6 +202,7 @@
 	{ DRM_MODE_CONNECTOR_TV, "TV" },
 	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
 	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
 };
 
 static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@
 	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
 	{ DRM_MODE_ENCODER_TVDAC, "TV" },
 	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+	{ DRM_MODE_ENCODER_DSI, "DSI" },
 };
 
 void drm_connector_ida_init(void)
@@ -1301,7 +1303,7 @@
 }
 
 /**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
  * @out: drm_display_mode to return to the user
  * @in: drm_mode_modeinfo to use
  *
@@ -1317,6 +1319,9 @@
 	if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
 		return -ERANGE;
 
+	if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
+		return -EINVAL;
+
 	out->clock = in->clock;
 	out->hdisplay = in->hdisplay;
 	out->hsync_start = in->hsync_start;
@@ -1552,7 +1557,7 @@
 	obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
 				   DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -1579,6 +1584,19 @@
 	return ret;
 }
 
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+					 const struct drm_file *file_priv)
+{
+	/*
+	 * If user-space hasn't configured the driver to expose the stereo 3D
+	 * modes, don't expose them.
+	 */
+	if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+		return false;
+
+	return true;
+}
+
 /**
  * drm_mode_getconnector - get connector configuration
  * @dev: drm device for the ioctl
@@ -1623,7 +1641,7 @@
 	obj = drm_mode_object_find(dev, out_resp->connector_id,
 				   DRM_MODE_OBJECT_CONNECTOR);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	connector = obj_to_connector(obj);
@@ -1644,7 +1662,8 @@
 
 	/* delayed so we get modes regardless of pre-fill_modes state */
 	list_for_each_entry(mode, &connector->modes, head)
-		mode_count++;
+		if (drm_mode_expose_to_userspace(mode, file_priv))
+			mode_count++;
 
 	out_resp->connector_id = connector->base.id;
 	out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@
 		copied = 0;
 		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
 		list_for_each_entry(mode, &connector->modes, head) {
+			if (!drm_mode_expose_to_userspace(mode, file_priv))
+				continue;
+
 			drm_crtc_convert_to_umode(&u_mode, mode);
 			if (copy_to_user(mode_ptr + copied,
 					 &u_mode, sizeof(u_mode))) {
@@ -1735,7 +1757,7 @@
 	obj = drm_mode_object_find(dev, enc_resp->encoder_id,
 				   DRM_MODE_OBJECT_ENCODER);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	encoder = obj_to_encoder(obj);
@@ -2040,6 +2062,45 @@
 }
 EXPORT_SYMBOL(drm_mode_set_config_internal);
 
+/*
+ * Checks that the framebuffer is big enough for the CRTC viewport
+ * (x, y, hdisplay, vdisplay)
+ */
+static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+				   int x, int y,
+				   const struct drm_display_mode *mode,
+				   const struct drm_framebuffer *fb)
+
+{
+	int hdisplay, vdisplay;
+
+	hdisplay = mode->hdisplay;
+	vdisplay = mode->vdisplay;
+
+	if (drm_mode_is_stereo(mode)) {
+		struct drm_display_mode adjusted = *mode;
+
+		drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
+		hdisplay = adjusted.crtc_hdisplay;
+		vdisplay = adjusted.crtc_vdisplay;
+	}
+
+	if (crtc->invert_dimensions)
+		swap(hdisplay, vdisplay);
+
+	if (hdisplay > fb->width ||
+	    vdisplay > fb->height ||
+	    x > fb->width - hdisplay ||
+	    y > fb->height - vdisplay) {
+		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+			      fb->width, fb->height, hdisplay, vdisplay, x, y,
+			      crtc->invert_dimensions ? " (inverted)" : "");
+		return -ENOSPC;
+	}
+
+	return 0;
+}
+
 /**
  * drm_mode_setcrtc - set CRTC configuration
  * @dev: drm device for the ioctl
@@ -2080,14 +2141,13 @@
 				   DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
 	if (crtc_req->mode_valid) {
-		int hdisplay, vdisplay;
 		/* If we have a mode we need a framebuffer. */
 		/* If we pass -1, set the mode with the currently bound fb */
 		if (crtc_req->fb_id == -1) {
@@ -2104,7 +2164,7 @@
 			if (!fb) {
 				DRM_DEBUG_KMS("Unknown FB ID%d\n",
 						crtc_req->fb_id);
-				ret = -EINVAL;
+				ret = -ENOENT;
 				goto out;
 			}
 		}
@@ -2123,23 +2183,11 @@
 
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
-		hdisplay = mode->hdisplay;
-		vdisplay = mode->vdisplay;
-
-		if (crtc->invert_dimensions)
-			swap(hdisplay, vdisplay);
-
-		if (hdisplay > fb->width ||
-		    vdisplay > fb->height ||
-		    crtc_req->x > fb->width - hdisplay ||
-		    crtc_req->y > fb->height - vdisplay) {
-			DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
-				      fb->width, fb->height,
-				      hdisplay, vdisplay, crtc_req->x, crtc_req->y,
-				      crtc->invert_dimensions ? " (inverted)" : "");
-			ret = -ENOSPC;
+		ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+					      mode, fb);
+		if (ret)
 			goto out;
-		}
+
 	}
 
 	if (crtc_req->count_connectors == 0 && mode) {
@@ -2184,7 +2232,7 @@
 			if (!obj) {
 				DRM_DEBUG_KMS("Connector id %d unknown\n",
 						out_id);
-				ret = -EINVAL;
+				ret = -ENOENT;
 				goto out;
 			}
 			connector = obj_to_connector(obj);
@@ -2232,7 +2280,7 @@
 	obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
-		return -EINVAL;
+		return -ENOENT;
 	}
 	crtc = obj_to_crtc(obj);
 
@@ -2441,6 +2489,8 @@
 	case DRM_FORMAT_YVU444:
 		return 0;
 	default:
+		DRM_DEBUG_KMS("invalid pixel format %s\n",
+			      drm_get_format_name(r->pixel_format));
 		return -EINVAL;
 	}
 }
@@ -2606,7 +2656,7 @@
 	mutex_unlock(&dev->mode_config.fb_lock);
 	mutex_unlock(&file_priv->fbs_lock);
 
-	return -EINVAL;
+	return -ENOENT;
 }
 
 /**
@@ -2634,7 +2684,7 @@
 
 	fb = drm_framebuffer_lookup(dev, r->fb_id);
 	if (!fb)
-		return -EINVAL;
+		return -ENOENT;
 
 	r->height = fb->height;
 	r->width = fb->width;
@@ -2679,7 +2729,7 @@
 
 	fb = drm_framebuffer_lookup(dev, r->fb_id);
 	if (!fb)
-		return -EINVAL;
+		return -ENOENT;
 
 	num_clips = r->num_clips;
 	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -3011,7 +3061,7 @@
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto done;
 	}
 	property = obj_to_property(obj);
@@ -3140,7 +3190,7 @@
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto done;
 	}
 	blob = obj_to_blob(obj);
@@ -3301,7 +3351,7 @@
 
 	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	if (!obj->properties) {
@@ -3354,8 +3404,10 @@
 	drm_modeset_lock_all(dev);
 
 	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-	if (!arg_obj)
+	if (!arg_obj) {
+		ret = -ENOENT;
 		goto out;
+	}
 	if (!arg_obj->properties)
 		goto out;
 
@@ -3368,8 +3420,10 @@
 
 	prop_obj = drm_mode_object_find(dev, arg->prop_id,
 					DRM_MODE_OBJECT_PROPERTY);
-	if (!prop_obj)
+	if (!prop_obj) {
+		ret = -ENOENT;
 		goto out;
+	}
 	property = obj_to_property(prop_obj);
 
 	if (!drm_property_change_is_valid(property, arg->value))
@@ -3454,7 +3508,7 @@
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -3513,7 +3567,7 @@
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -3556,7 +3610,6 @@
 	struct drm_framebuffer *fb = NULL, *old_fb = NULL;
 	struct drm_pending_vblank_event *e = NULL;
 	unsigned long flags;
-	int hdisplay, vdisplay;
 	int ret = -EINVAL;
 
 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3568,7 +3621,7 @@
 
 	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj)
-		return -EINVAL;
+		return -ENOENT;
 	crtc = obj_to_crtc(obj);
 
 	mutex_lock(&crtc->mutex);
@@ -3585,26 +3638,15 @@
 		goto out;
 
 	fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
-	if (!fb)
-		goto out;
-
-	hdisplay = crtc->mode.hdisplay;
-	vdisplay = crtc->mode.vdisplay;
-
-	if (crtc->invert_dimensions)
-		swap(hdisplay, vdisplay);
-
-	if (hdisplay > fb->width ||
-	    vdisplay > fb->height ||
-	    crtc->x > fb->width - hdisplay ||
-	    crtc->y > fb->height - vdisplay) {
-		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
-			      fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
-			      crtc->invert_dimensions ? " (inverted)" : "");
-		ret = -ENOSPC;
+	if (!fb) {
+		ret = -ENOENT;
 		goto out;
 	}
 
+	ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+	if (ret)
+		goto out;
+
 	if (crtc->fb->pixel_format != fb->pixel_format) {
 		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
 		ret = -EINVAL;
@@ -3788,7 +3830,8 @@
 		*bpp = 32;
 		break;
 	default:
-		DRM_DEBUG_KMS("unsupported pixel format\n");
+		DRM_DEBUG_KMS("unsupported pixel format %s\n",
+			      drm_get_format_name(format));
 		*depth = 0;
 		*bpp = 0;
 		break;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c722c3b..01361ab 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,10 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_edid.h>
 
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
 /**
  * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
  * 						connector list
@@ -76,7 +80,8 @@
 {
 	struct drm_display_mode *mode;
 
-	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
+		      DRM_MODE_FLAG_3D_MASK))
 		return;
 
 	list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +91,9 @@
 		if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
 				!(flags & DRM_MODE_FLAG_DBLSCAN))
 			mode->status = MODE_NO_DBLESCAN;
+		if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+				!(flags & DRM_MODE_FLAG_3D_MASK))
+			mode->status = MODE_NO_STEREO;
 	}
 
 	return;
@@ -105,9 +113,9 @@
  * then culled (based on validity and the @maxX, @maxY parameters) and put into
  * the normal modes list.
  *
- * Intended to be use as a generic implementation of the ->probe() @connector
- * callback for drivers that use the crtc helpers for output mode filtering and
- * detection.
+ * Intended to be use as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the crtc helpers for output mode
+ * filtering and detection.
  *
  * RETURNS:
  * Number of modes found on @connector.
@@ -175,6 +183,8 @@
 		mode_flags |= DRM_MODE_FLAG_INTERLACE;
 	if (connector->doublescan_allowed)
 		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+	if (connector->stereo_allowed)
+		mode_flags |= DRM_MODE_FLAG_3D_MASK;
 	drm_mode_validate_flag(connector, mode_flags);
 
 	list_for_each_entry(mode, &connector->modes, head) {
@@ -395,22 +405,25 @@
 			      struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+	struct drm_display_mode *adjusted_mode, saved_mode;
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	int saved_x, saved_y;
+	bool saved_enabled;
 	struct drm_encoder *encoder;
 	bool ret = true;
 
+	saved_enabled = crtc->enabled;
 	crtc->enabled = drm_helper_crtc_in_use(crtc);
 	if (!crtc->enabled)
 		return true;
 
 	adjusted_mode = drm_mode_duplicate(dev, mode);
-	if (!adjusted_mode)
+	if (!adjusted_mode) {
+		crtc->enabled = saved_enabled;
 		return false;
+	}
 
-	saved_hwmode = crtc->hwmode;
 	saved_mode = crtc->mode;
 	saved_x = crtc->x;
 	saved_y = crtc->y;
@@ -529,7 +542,7 @@
 done:
 	drm_mode_destroy(dev, adjusted_mode);
 	if (!ret) {
-		crtc->hwmode = saved_hwmode;
+		crtc->enabled = saved_enabled;
 		crtc->mode = saved_mode;
 		crtc->x = saved_x;
 		crtc->y = saved_y;
@@ -557,6 +570,14 @@
 				continue;
 
 			connector->encoder = NULL;
+
+			/*
+			 * drm_helper_disable_unused_functions() ought to be
+			 * doing this, but since we've decoupled the encoder
+			 * from the connector above, the required connection
+			 * between them is henceforth no longer available.
+			 */
+			connector->dpms = DRM_MODE_DPMS_OFF;
 		}
 	}
 
@@ -583,9 +604,8 @@
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
 	struct drm_device *dev;
-	struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+	struct drm_crtc *new_crtc;
 	struct drm_encoder *save_encoders, *new_encoder, *encoder;
-	struct drm_framebuffer *old_fb = NULL;
 	bool mode_changed = false; /* if true do a full mode set */
 	bool fb_changed = false; /* if true and !mode_changed just do a flip */
 	struct drm_connector *save_connectors, *connector;
@@ -621,38 +641,28 @@
 
 	dev = set->crtc->dev;
 
-	/* Allocate space for the backup of all (non-pointer) crtc, encoder and
-	 * connector data. */
-	save_crtcs = kzalloc(dev->mode_config.num_crtc *
-			     sizeof(struct drm_crtc), GFP_KERNEL);
-	if (!save_crtcs)
-		return -ENOMEM;
-
+	/*
+	 * Allocate space for the backup of all (non-pointer) encoder and
+	 * connector data.
+	 */
 	save_encoders = kzalloc(dev->mode_config.num_encoder *
 				sizeof(struct drm_encoder), GFP_KERNEL);
-	if (!save_encoders) {
-		kfree(save_crtcs);
+	if (!save_encoders)
 		return -ENOMEM;
-	}
 
 	save_connectors = kzalloc(dev->mode_config.num_connector *
 				sizeof(struct drm_connector), GFP_KERNEL);
 	if (!save_connectors) {
-		kfree(save_crtcs);
 		kfree(save_encoders);
 		return -ENOMEM;
 	}
 
-	/* Copy data. Note that driver private data is not affected.
+	/*
+	 * Copy data. Note that driver private data is not affected.
 	 * Should anything bad happen only the expected state is
 	 * restored, not the drivers personal bookkeeping.
 	 */
 	count = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		save_crtcs[count++] = *crtc;
-	}
-
-	count = 0;
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		save_encoders[count++] = *encoder;
 	}
@@ -775,19 +785,17 @@
 		mode_changed = true;
 
 	if (mode_changed) {
-		set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
-		if (set->crtc->enabled) {
+		if (drm_helper_crtc_in_use(set->crtc)) {
 			DRM_DEBUG_KMS("attempting to set mode from"
 					" userspace\n");
 			drm_mode_debug_printmodeline(set->mode);
-			old_fb = set->crtc->fb;
 			set->crtc->fb = set->fb;
 			if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
 						      set->x, set->y,
-						      old_fb)) {
+						      save_set.fb)) {
 				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
 					  set->crtc->base.id);
-				set->crtc->fb = old_fb;
+				set->crtc->fb = save_set.fb;
 				ret = -EINVAL;
 				goto fail;
 			}
@@ -802,31 +810,24 @@
 	} else if (fb_changed) {
 		set->crtc->x = set->x;
 		set->crtc->y = set->y;
-
-		old_fb = set->crtc->fb;
-		if (set->crtc->fb != set->fb)
-			set->crtc->fb = set->fb;
+		set->crtc->fb = set->fb;
 		ret = crtc_funcs->mode_set_base(set->crtc,
-						set->x, set->y, old_fb);
+						set->x, set->y, save_set.fb);
 		if (ret != 0) {
-			set->crtc->fb = old_fb;
+			set->crtc->x = save_set.x;
+			set->crtc->y = save_set.y;
+			set->crtc->fb = save_set.fb;
 			goto fail;
 		}
 	}
 
 	kfree(save_connectors);
 	kfree(save_encoders);
-	kfree(save_crtcs);
 	return 0;
 
 fail:
 	/* Restore all previous data. */
 	count = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		*crtc = save_crtcs[count++];
-	}
-
-	count = 0;
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		*encoder = save_encoders[count++];
 	}
@@ -844,7 +845,6 @@
 
 	kfree(save_connectors);
 	kfree(save_encoders);
-	kfree(save_crtcs);
 	return ret;
 }
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -1125,14 +1125,14 @@
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
-void drm_helper_hpd_irq_event(struct drm_device *dev)
+bool drm_helper_hpd_irq_event(struct drm_device *dev)
 {
 	struct drm_connector *connector;
 	enum drm_connector_status old_status;
 	bool changed = false;
 
 	if (!dev->mode_config.poll_enabled)
-		return;
+		return false;
 
 	mutex_lock(&dev->mode_config.mutex);
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -1157,5 +1157,7 @@
 
 	if (changed)
 		drm_kms_helper_hotplug_event(dev);
+
+	return changed;
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index a05087c..b4b51d4 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -42,7 +42,7 @@
  * Initialization, etc.
  **************************************************/
 
-static struct drm_info_list drm_debugfs_list[] = {
+static const struct drm_info_list drm_debugfs_list[] = {
 	{"name", drm_name_info, 0},
 	{"vm", drm_vm_info, 0},
 	{"clients", drm_clients_info, 0},
@@ -84,7 +84,7 @@
  * Create a given set of debugfs files represented by an array of
  * gdm_debugfs_lists in the given root directory.
  */
-int drm_debugfs_create_files(struct drm_info_list *files, int count,
+int drm_debugfs_create_files(const struct drm_info_list *files, int count,
 			     struct dentry *root, struct drm_minor *minor)
 {
 	struct drm_device *dev = minor->dev;
@@ -188,7 +188,7 @@
  *
  * Remove all debugfs entries created by debugfs_init().
  */
-int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
 			     struct drm_minor *minor)
 {
 	struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 89e1966..9e978aa 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -228,12 +228,12 @@
 EXPORT_SYMBOL(i2c_dp_aux_add_bus);
 
 /* Helpers for DP link training */
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
 {
 	return link_status[r - DP_LANE0_1_STATUS];
 }
 
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
 			     int lane)
 {
 	int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@
 	return (l >> s) & 0xf;
 }
 
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			  int lane_count)
 {
 	u8 lane_align;
@@ -262,7 +262,7 @@
 }
 EXPORT_SYMBOL(drm_dp_channel_eq_ok);
 
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			      int lane_count)
 {
 	int lane;
@@ -277,7 +277,7 @@
 }
 EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
 
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
 				     int lane)
 {
 	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@
 }
 EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
 
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane)
 {
 	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@
 }
 EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
 
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
 	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
 		udelay(100);
 	else
@@ -311,7 +311,7 @@
 }
 EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
 
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
 	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
 		udelay(400);
 	else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fe58d08..d9137e4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -69,6 +69,7 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
 
-/**
- * drm_legacy_dev_reinit
- *
- * Reinitializes a legacy/ums drm device in it's lastclose function.
- */
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
-	int i;
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		return;
-
-	atomic_set(&dev->ioctl_count, 0);
-	atomic_set(&dev->vma_count, 0);
-
-	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
-		atomic_set(&dev->counts[i], 0);
-
-	dev->sigdata.lock = NULL;
-
-	dev->context_flag = 0;
-	dev->last_context = 0;
-	dev->if_version = 0;
-}
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
-	struct drm_vma_entry *vma, *vma_temp;
-
-	DRM_DEBUG("\n");
-
-	if (dev->driver->lastclose)
-		dev->driver->lastclose(dev);
-	DRM_DEBUG("driver lastclose completed\n");
-
-	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_irq_uninstall(dev);
-
-	mutex_lock(&dev->struct_mutex);
-
-	drm_agp_clear(dev);
-
-	drm_legacy_sg_cleanup(dev);
-
-	/* Clear vma list (only built for debugging) */
-	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
-		list_del(&vma->head);
-		kfree(vma);
-	}
-
-	drm_legacy_dma_takedown(dev);
-
-	dev->dev_mapping = NULL;
-	mutex_unlock(&dev->struct_mutex);
-
-	drm_legacy_dev_reinit(dev);
-
-	DRM_DEBUG("lastclose completed\n");
-	return 0;
-}
-
 /** File operations structure */
 static const struct file_operations drm_stub_fops = {
 	.owner = THIS_MODULE,
@@ -385,7 +316,6 @@
 		return -ENODEV;
 
 	atomic_inc(&dev->ioctl_count);
-	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
 	++file_priv->ioctl_count;
 
 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@@ -473,7 +403,7 @@
 
       err_i1:
 	if (!ioctl)
-		DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+		DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
 			  task_pid_nr(current),
 			  (long)old_encode_dev(file_priv->minor->device),
 			  file_priv->authenticated, cmd, nr);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 830f750..fb7cf0e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -458,6 +458,15 @@
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 };
 
+/*
+ * These more or less come from the DMT spec.  The 720x400 modes are
+ * inferred from historical 80x25 practice.  The 640x480@67 and 832x624@75
+ * modes are old-school Mac modes.  The EDID spec says the 1152x864@75 mode
+ * should be 1152x870, again for the Mac, but instead we use the x864 DMT
+ * mode.
+ *
+ * The DMT modes have been fact-checked; the rest are mild guesses.
+ */
 static const struct drm_display_mode edid_est_modes[] = {
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
 		   968, 1056, 0, 600, 601, 605, 628, 0,
@@ -560,7 +569,7 @@
 	{ 1600, 1200, 75, 0 },
 	{ 1600, 1200, 85, 0 },
 	{ 1792, 1344, 60, 0 },
-	{ 1792, 1344, 85, 0 },
+	{ 1792, 1344, 75, 0 },
 	{ 1856, 1392, 60, 0 },
 	{ 1856, 1392, 75, 0 },
 	{ 1920, 1200, 60, 1 },
@@ -1264,6 +1273,18 @@
 }
 EXPORT_SYMBOL(drm_get_edid);
 
+/**
+ * drm_edid_duplicate - duplicate an EDID and the extensions
+ * @edid: EDID to duplicate
+ *
+ * Return duplicate edid or NULL on allocation failure.
+ */
+struct edid *drm_edid_duplicate(const struct edid *edid)
+{
+	return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_edid_duplicate);
+
 /*** EDID parsing ***/
 
 /**
@@ -1308,7 +1329,7 @@
 }
 
 #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
-#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
 
 /**
  * edid_fixup_preferred - set preferred modes based on quirk list
@@ -1323,6 +1344,7 @@
 {
 	struct drm_display_mode *t, *cur_mode, *preferred_mode;
 	int target_refresh = 0;
+	int cur_vrefresh, preferred_vrefresh;
 
 	if (list_empty(&connector->probed_modes))
 		return;
@@ -1345,10 +1367,14 @@
 		if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
 			preferred_mode = cur_mode;
 
+		cur_vrefresh = cur_mode->vrefresh ?
+			cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
+		preferred_vrefresh = preferred_mode->vrefresh ?
+			preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
 		/* At a given size, try to get closest to target refresh */
 		if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
-		    MODE_REFRESH_DIFF(cur_mode, target_refresh) <
-		    MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+		    MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
+		    MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
 			preferred_mode = cur_mode;
 		}
 	}
@@ -2068,7 +2094,7 @@
 	u8 *est = ((u8 *)timing) + 5;
 
 	for (i = 0; i < 6; i++) {
-		for (j = 7; j > 0; j--) {
+		for (j = 7; j >= 0; j--) {
 			m = (i * 8) + (7 - j);
 			if (m >= ARRAY_SIZE(est3_modes))
 				break;
@@ -2404,7 +2430,7 @@
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-		    drm_mode_equal_no_clocks(to_match, cea_mode))
+		    drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
 			return mode + 1;
 	}
 	return 0;
@@ -2453,7 +2479,7 @@
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-		    drm_mode_equal_no_clocks(to_match, hdmi_mode))
+		    drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
 			return mode + 1;
 	}
 	return 0;
@@ -2507,6 +2533,9 @@
 		if (!newmode)
 			continue;
 
+		/* Carry over the stereo flags */
+		newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
+
 		/*
 		 * The current mode could be either variant. Make
 		 * sure to pick the "other" clock for the new mode.
@@ -2553,20 +2582,151 @@
 	return modes;
 }
 
+struct stereo_mandatory_mode {
+	int width, height, vrefresh;
+	unsigned int flags;
+};
+
+static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
+	{ 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
+	{ 1920, 1080, 50,
+	  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+	{ 1920, 1080, 60,
+	  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+	{ 1280, 720,  50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1280, 720,  50, DRM_MODE_FLAG_3D_FRAME_PACKING },
+	{ 1280, 720,  60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1280, 720,  60, DRM_MODE_FLAG_3D_FRAME_PACKING }
+};
+
+static bool
+stereo_match_mandatory(const struct drm_display_mode *mode,
+		       const struct stereo_mandatory_mode *stereo_mode)
+{
+	unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+	return mode->hdisplay == stereo_mode->width &&
+	       mode->vdisplay == stereo_mode->height &&
+	       interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+	       drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
+}
+
+static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	const struct drm_display_mode *mode;
+	struct list_head stereo_modes;
+	int modes = 0, i;
+
+	INIT_LIST_HEAD(&stereo_modes);
+
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
+			const struct stereo_mandatory_mode *mandatory;
+			struct drm_display_mode *new_mode;
+
+			if (!stereo_match_mandatory(mode,
+						    &stereo_mandatory_modes[i]))
+				continue;
+
+			mandatory = &stereo_mandatory_modes[i];
+			new_mode = drm_mode_duplicate(dev, mode);
+			if (!new_mode)
+				continue;
+
+			new_mode->flags |= mandatory->flags;
+			list_add_tail(&new_mode->head, &stereo_modes);
+			modes++;
+		}
+	}
+
+	list_splice_tail(&stereo_modes, &connector->probed_modes);
+
+	return modes;
+}
+
+static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *newmode;
+
+	vic--; /* VICs start at 1 */
+	if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+		DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+		return 0;
+	}
+
+	newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+	if (!newmode)
+		return 0;
+
+	drm_mode_probed_add(connector, newmode);
+
+	return 1;
+}
+
+static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
+			       const u8 *video_db, u8 video_len, u8 video_index)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *newmode;
+	int modes = 0;
+	u8 cea_mode;
+
+	if (video_db == NULL || video_index > video_len)
+		return 0;
+
+	/* CEA modes are numbered 1..127 */
+	cea_mode = (video_db[video_index] & 127) - 1;
+	if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+		return 0;
+
+	if (structure & (1 << 0)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+	if (structure & (1 << 6)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+	if (structure & (1 << 8)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+
+	return modes;
+}
+
 /*
  * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
  * @connector: connector corresponding to the HDMI sink
  * @db: start of the CEA vendor specific block
  * @len: length of the CEA block payload, ie. one can access up to db[len]
  *
- * Parses the HDMI VSDB looking for modes to add to @connector.
+ * Parses the HDMI VSDB looking for modes to add to @connector. This function
+ * also adds the stereo 3d modes when applicable.
  */
 static int
-do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
+		   const u8 *video_db, u8 video_len)
 {
-	struct drm_device *dev = connector->dev;
-	int modes = 0, offset = 0, i;
-	u8 vic_len;
+	int modes = 0, offset = 0, i, multi_present = 0;
+	u8 vic_len, hdmi_3d_len = 0;
+	u16 mask;
+	u16 structure_all;
 
 	if (len < 8)
 		goto out;
@@ -2585,30 +2745,56 @@
 
 	/* the declared length is not long enough for the 2 first bytes
 	 * of additional video format capabilities */
-	offset += 2;
-	if (len < (8 + offset))
+	if (len < (8 + offset + 2))
 		goto out;
 
+	/* 3D_Present */
+	offset++;
+	if (db[8 + offset] & (1 << 7)) {
+		modes += add_hdmi_mandatory_stereo_modes(connector);
+
+		/* 3D_Multi_present */
+		multi_present = (db[8 + offset] & 0x60) >> 5;
+	}
+
+	offset++;
 	vic_len = db[8 + offset] >> 5;
+	hdmi_3d_len = db[8 + offset] & 0x1f;
 
 	for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
-		struct drm_display_mode *newmode;
 		u8 vic;
 
 		vic = db[9 + offset + i];
+		modes += add_hdmi_mode(connector, vic);
+	}
+	offset += 1 + vic_len;
 
-		vic--; /* VICs start at 1 */
-		if (vic >= ARRAY_SIZE(edid_4k_modes)) {
-			DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
-			continue;
-		}
+	if (!(multi_present == 1 || multi_present == 2))
+		goto out;
 
-		newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
-		if (!newmode)
-			continue;
+	if ((multi_present == 1 && len < (9 + offset)) ||
+	    (multi_present == 2 && len < (11 + offset)))
+		goto out;
 
-		drm_mode_probed_add(connector, newmode);
-		modes++;
+	if ((multi_present == 1 && hdmi_3d_len < 2) ||
+	    (multi_present == 2 && hdmi_3d_len < 4))
+		goto out;
+
+	/* 3D_Structure_ALL */
+	structure_all = (db[8 + offset] << 8) | db[9 + offset];
+
+	/* check if 3D_MASK is present */
+	if (multi_present == 2)
+		mask = (db[10 + offset] << 8) | db[11 + offset];
+	else
+		mask = 0xffff;
+
+	for (i = 0; i < 16; i++) {
+		if (mask & (1 << i))
+			modes += add_3d_struct_modes(connector,
+						     structure_all,
+						     video_db,
+						     video_len, i);
 	}
 
 out:
@@ -2668,8 +2854,8 @@
 add_cea_modes(struct drm_connector *connector, struct edid *edid)
 {
 	const u8 *cea = drm_find_cea_extension(edid);
-	const u8 *db;
-	u8 dbl;
+	const u8 *db, *hdmi = NULL, *video = NULL;
+	u8 dbl, hdmi_len, video_len = 0;
 	int modes = 0;
 
 	if (cea && cea_revision(cea) >= 3) {
@@ -2682,13 +2868,26 @@
 			db = &cea[i];
 			dbl = cea_db_payload_len(db);
 
-			if (cea_db_tag(db) == VIDEO_BLOCK)
-				modes += do_cea_modes(connector, db + 1, dbl);
-			else if (cea_db_is_hdmi_vsdb(db))
-				modes += do_hdmi_vsdb_modes(connector, db, dbl);
+			if (cea_db_tag(db) == VIDEO_BLOCK) {
+				video = db + 1;
+				video_len = dbl;
+				modes += do_cea_modes(connector, video, dbl);
+			}
+			else if (cea_db_is_hdmi_vsdb(db)) {
+				hdmi = db;
+				hdmi_len = dbl;
+			}
 		}
 	}
 
+	/*
+	 * We parse the HDMI VSDB after having added the cea modes as we will
+	 * be patching their flags when the sink supports stereo 3D.
+	 */
+	if (hdmi)
+		modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
+					    video_len);
+
 	return modes;
 }
 
@@ -3288,6 +3487,19 @@
 }
 EXPORT_SYMBOL(drm_add_modes_noedid);
 
+void drm_set_preferred_mode(struct drm_connector *connector,
+			   int hpref, int vpref)
+{
+	struct drm_display_mode *mode;
+
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		if (drm_mode_width(mode)  == hpref &&
+		    drm_mode_height(mode) == vpref)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+	}
+}
+EXPORT_SYMBOL(drm_set_preferred_mode);
+
 /**
  * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
  *                                              data from a DRM display mode
@@ -3321,6 +3533,33 @@
 }
 EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
 
+static enum hdmi_3d_structure
+s3d_structure_from_display_mode(const struct drm_display_mode *mode)
+{
+	u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+	switch (layout) {
+	case DRM_MODE_FLAG_3D_FRAME_PACKING:
+		return HDMI_3D_STRUCTURE_FRAME_PACKING;
+	case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
+		return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
+	case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
+		return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
+	case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
+		return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
+	case DRM_MODE_FLAG_3D_L_DEPTH:
+		return HDMI_3D_STRUCTURE_L_DEPTH;
+	case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
+		return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
+	case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
+		return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
+	case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
+		return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
+	default:
+		return HDMI_3D_STRUCTURE_INVALID;
+	}
+}
+
 /**
  * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
  * data from a DRM display mode
@@ -3338,20 +3577,29 @@
 					    const struct drm_display_mode *mode)
 {
 	int err;
+	u32 s3d_flags;
 	u8 vic;
 
 	if (!frame || !mode)
 		return -EINVAL;
 
 	vic = drm_match_hdmi_mode(mode);
-	if (!vic)
+	s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+	if (!vic && !s3d_flags)
+		return -EINVAL;
+
+	if (vic && s3d_flags)
 		return -EINVAL;
 
 	err = hdmi_vendor_infoframe_init(frame);
 	if (err < 0)
 		return err;
 
-	frame->vic = vic;
+	if (vic)
+		frame->vic = vic;
+	else
+		frame->s3d_struct = s3d_structure_from_display_mode(mode);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 271b42b..9081172 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -32,7 +32,7 @@
 	"from built-in data or /lib/firmware instead. ");
 
 #define GENERIC_EDIDS 5
-static char *generic_edid_name[GENERIC_EDIDS] = {
+static const char *generic_edid_name[GENERIC_EDIDS] = {
 	"edid/1024x768.bin",
 	"edid/1280x1024.bin",
 	"edid/1600x1200.bin",
@@ -40,7 +40,7 @@
 	"edid/1920x1080.bin",
 };
 
-static u8 generic_edid[GENERIC_EDIDS][128] = {
+static const u8 generic_edid[GENERIC_EDIDS][128] = {
 	{
 	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
 	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@
 	},
 };
 
+static int edid_size(const u8 *edid, int data_size)
+{
+	if (data_size < EDID_LENGTH)
+		return 0;
+
+	return (edid[0x7e] + 1) * EDID_LENGTH;
+}
+
 static u8 *edid_load(struct drm_connector *connector, const char *name,
 			const char *connector_name)
 {
-	const struct firmware *fw;
-	struct platform_device *pdev;
-	u8 *fwdata = NULL, *edid, *new_edid;
-	int fwsize, expected;
-	int builtin = 0, err = 0;
+	const struct firmware *fw = NULL;
+	const u8 *fwdata;
+	u8 *edid;
+	int fwsize, builtin;
 	int i, valid_extensions = 0;
 	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
-	pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
-	if (IS_ERR(pdev)) {
-		DRM_ERROR("Failed to register EDID firmware platform device "
-		    "for connector \"%s\"\n", connector_name);
-		err = -EINVAL;
-		goto out;
-	}
-
-	err = request_firmware(&fw, name, &pdev->dev);
-	platform_device_unregister(pdev);
-
-	if (err) {
-		i = 0;
-		while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
-			i++;
-		if (i < GENERIC_EDIDS) {
-			err = 0;
-			builtin = 1;
+	builtin = 0;
+	for (i = 0; i < GENERIC_EDIDS; i++) {
+		if (strcmp(name, generic_edid_name[i]) == 0) {
 			fwdata = generic_edid[i];
 			fwsize = sizeof(generic_edid[i]);
+			builtin = 1;
+			break;
 		}
 	}
+	if (!builtin) {
+		struct platform_device *pdev;
+		int err;
 
-	if (err) {
-		DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
-		    name, err);
-		goto out;
-	}
+		pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+		if (IS_ERR(pdev)) {
+			DRM_ERROR("Failed to register EDID firmware platform device "
+				  "for connector \"%s\"\n", connector_name);
+			return ERR_CAST(pdev);
+		}
 
-	if (fwdata == NULL) {
-		fwdata = (u8 *) fw->data;
+		err = request_firmware(&fw, name, &pdev->dev);
+		platform_device_unregister(pdev);
+		if (err) {
+			DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+				  name, err);
+			return ERR_PTR(err);
+		}
+
+		fwdata = fw->data;
 		fwsize = fw->size;
 	}
 
-	expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
-	if (expected != fwsize) {
+	if (edid_size(fwdata, fwsize) != fwsize) {
 		DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
-		    "(expected %d, got %d)\n", name, expected, (int) fwsize);
-		err = -EINVAL;
-		goto relfw_out;
+			  "(expected %d, got %d\n", name,
+			  edid_size(fwdata, fwsize), (int)fwsize);
+		edid = ERR_PTR(-EINVAL);
+		goto out;
 	}
 
 	edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
 	if (edid == NULL) {
-		err = -ENOMEM;
-		goto relfw_out;
+		edid = ERR_PTR(-ENOMEM);
+		goto out;
 	}
 
 	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@
 		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
 		    name);
 		kfree(edid);
-		err = -EINVAL;
-		goto relfw_out;
+		edid = ERR_PTR(-EINVAL);
+		goto out;
 	}
 
 	for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@
 	}
 
 	if (valid_extensions != edid[0x7e]) {
+		u8 *new_edid;
+
 		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
 		DRM_INFO("Found %d valid extensions instead of %d in EDID data "
 		    "\"%s\" for connector \"%s\"\n", valid_extensions,
 		    edid[0x7e], name, connector_name);
 		edid[0x7e] = valid_extensions;
+
 		new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
-		    GFP_KERNEL);
-		if (new_edid == NULL) {
-			err = -ENOMEM;
-			kfree(edid);
-			goto relfw_out;
-		}
-		edid = new_edid;
+				    GFP_KERNEL);
+		if (new_edid)
+			edid = new_edid;
 	}
 
 	DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@
 	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
 	    name, connector_name);
 
-relfw_out:
-	release_firmware(fw);
-
 out:
-	if (err)
-		return ERR_PTR(err);
-
+	if (fw)
+		release_firmware(fw);
 	return edid;
 }
 
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e2..0a19401 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -39,10 +39,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
-
 static LIST_HEAD(kernel_fb_helper_list);
 
 /**
@@ -844,7 +840,6 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_mode_set *modeset;
-	struct drm_crtc *crtc;
 	int ret = 0;
 	int i;
 
@@ -855,8 +850,6 @@
 	}
 
 	for (i = 0; i < fb_helper->crtc_count; i++) {
-		crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
 		modeset = &fb_helper->crtc_info[i].mode_set;
 
 		modeset->x = var->xoffset;
@@ -1352,7 +1345,6 @@
 	struct drm_connector *connector;
 	struct drm_connector_helper_funcs *connector_funcs;
 	struct drm_encoder *encoder;
-	struct drm_fb_helper_crtc *best_crtc;
 	int my_score, best_score, score;
 	struct drm_fb_helper_crtc **crtcs, *crtc;
 	struct drm_fb_helper_connector *fb_helper_conn;
@@ -1364,7 +1356,6 @@
 	connector = fb_helper_conn->connector;
 
 	best_crtcs[n] = NULL;
-	best_crtc = NULL;
 	best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
 	if (modes[n] == NULL)
 		return best_score;
@@ -1413,7 +1404,6 @@
 		score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
 						  width, height);
 		if (score > best_score) {
-			best_crtc = crtc;
 			best_score = score;
 			memcpy(best_crtcs, crtcs,
 			       dev->mode_config.num_connector *
@@ -1580,8 +1570,7 @@
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 {
 	struct drm_device *dev = fb_helper->dev;
-	int count = 0;
-	u32 max_width, max_height, bpp_sel;
+	u32 max_width, max_height;
 
 	if (!fb_helper->fb)
 		return 0;
@@ -1596,10 +1585,8 @@
 
 	max_width = fb_helper->fb->width;
 	max_height = fb_helper->fb->height;
-	bpp_sel = fb_helper->fb->bits_per_pixel;
 
-	count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
-						    max_height);
+	drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
 	mutex_unlock(&fb_helper->dev->mode_config.mutex);
 
 	drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index e788882..f9c7fa3 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -34,7 +34,7 @@
  */
 void drm_flip_work_queue(struct drm_flip_work *work, void *val)
 {
-	if (kfifo_put(&work->fifo, (const void **)&val)) {
+	if (kfifo_put(&work->fifo, val)) {
 		atomic_inc(&work->pending);
 	} else {
 		DRM_ERROR("%s fifo full!\n", work->name);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3f84277..c5b929c 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -113,7 +113,6 @@
 	retcode = drm_open_helper(inode, filp, dev);
 	if (retcode)
 		goto err_undo;
-	atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
 	if (need_setup) {
 		retcode = drm_setup(dev);
 		if (retcode)
@@ -148,7 +147,7 @@
 	struct drm_minor *minor;
 	int minor_id = iminor(inode);
 	int err = -ENODEV;
-	const struct file_operations *old_fops;
+	const struct file_operations *new_fops;
 
 	DRM_DEBUG("\n");
 
@@ -163,18 +162,13 @@
 	if (drm_device_is_unplugged(dev))
 		goto out;
 
-	old_fops = filp->f_op;
-	filp->f_op = fops_get(dev->driver->fops);
-	if (filp->f_op == NULL) {
-		filp->f_op = old_fops;
+	new_fops = fops_get(dev->driver->fops);
+	if (!new_fops)
 		goto out;
-	}
-	if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
-		fops_put(filp->f_op);
-		filp->f_op = fops_get(old_fops);
-	}
-	fops_put(old_fops);
 
+	replace_fops(filp, new_fops);
+	if (filp->f_op->open)
+		err = filp->f_op->open(inode, filp);
 out:
 	mutex_unlock(&drm_global_mutex);
 	return err;
@@ -240,7 +234,8 @@
 
 	priv->ioctl_count = 0;
 	/* for compatibility root is always authenticated */
-	priv->authenticated = capable(CAP_SYS_ADMIN);
+	priv->always_authenticated = capable(CAP_SYS_ADMIN);
+	priv->authenticated = priv->always_authenticated;
 	priv->lock_count = 0;
 
 	INIT_LIST_HEAD(&priv->lhead);
@@ -379,13 +374,80 @@
 		}
 
 	/* Remove unconsumed events */
-	list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+		list_del(&e->link);
 		e->destroy(e);
+	}
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
 /**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	atomic_set(&dev->ioctl_count, 0);
+	atomic_set(&dev->vma_count, 0);
+
+	dev->sigdata.lock = NULL;
+
+	dev->context_flag = 0;
+	dev->last_context = 0;
+	dev->if_version = 0;
+}
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+	struct drm_vma_entry *vma, *vma_temp;
+
+	DRM_DEBUG("\n");
+
+	if (dev->driver->lastclose)
+		dev->driver->lastclose(dev);
+	DRM_DEBUG("driver lastclose completed\n");
+
+	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_irq_uninstall(dev);
+
+	mutex_lock(&dev->struct_mutex);
+
+	drm_agp_clear(dev);
+
+	drm_legacy_sg_cleanup(dev);
+
+	/* Clear vma list (only built for debugging) */
+	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+		list_del(&vma->head);
+		kfree(vma);
+	}
+
+	drm_legacy_dma_takedown(dev);
+
+	dev->dev_mapping = NULL;
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_legacy_dev_reinit(dev);
+
+	DRM_DEBUG("lastclose completed\n");
+	return 0;
+}
+
+/**
  * Release file.
  *
  * \param inode device inode
@@ -454,7 +516,6 @@
 
 				list_del(&pos->head);
 				kfree(pos);
-				--dev->ctx_count;
 			}
 		}
 	}
@@ -468,7 +529,7 @@
 		list_for_each_entry(temp, &dev->filelist, lhead) {
 			if ((temp->master == file_priv->master) &&
 			    (temp != file_priv))
-				temp->authenticated = 0;
+				temp->authenticated = temp->always_authenticated;
 		}
 
 		/**
@@ -516,7 +577,6 @@
 	 * End inline drm_release
 	 */
 
-	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
 	if (!--dev->open_count) {
 		if (atomic_read(&dev->ioctl_count)) {
 			DRM_ERROR("Device busy: %d\n",
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 49293bdc..4761ade 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -160,35 +160,6 @@
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
-	struct drm_gem_object *obj;
-
-	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-	if (!obj)
-		goto free;
-
-	if (drm_gem_object_init(dev, obj, size) != 0)
-		goto free;
-
-	if (dev->driver->gem_init_object != NULL &&
-	    dev->driver->gem_init_object(obj) != 0) {
-		goto fput;
-	}
-	return obj;
-fput:
-	/* Object_init mangles the global counters - readjust them. */
-	fput(obj->filp);
-free:
-	kfree(obj);
-	return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
 static void
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index f731116..3d2e91c 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -67,7 +67,6 @@
 {
 	int ret;
 	struct drm_global_item *item = &glob[ref->global_type];
-	void *object;
 
 	mutex_lock(&item->mutex);
 	if (item->refcount == 0) {
@@ -85,7 +84,6 @@
 	}
 	++item->refcount;
 	ref->object = item->object;
-	object = item->object;
 	mutex_unlock(&item->mutex);
 	return 0;
 out_err:
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 5329832..7d5a152 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -163,13 +163,13 @@
 	mutex_lock(&dev->struct_mutex);
 	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
 		seq_printf(m, "CRTC %d enable:     %d\n",
-			   crtc, atomic_read(&dev->vblank_refcount[crtc]));
+			   crtc, atomic_read(&dev->vblank[crtc].refcount));
 		seq_printf(m, "CRTC %d counter:    %d\n",
 			   crtc, drm_vblank_count(dev, crtc));
 		seq_printf(m, "CRTC %d last wait:  %d\n",
-			   crtc, dev->last_vblank_wait[crtc]);
+			   crtc, dev->vblank[crtc].last_wait);
 		seq_printf(m, "CRTC %d in modeset: %d\n",
-			   crtc, dev->vblank_inmodeset[crtc]);
+			   crtc, dev->vblank[crtc].inmodeset);
 	}
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 07247e2..dffc836 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -303,6 +303,27 @@
 }
 
 /**
+ * Set device/driver capabilities
+ */
+int
+drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_set_client_cap *req = data;
+
+	switch (req->capability) {
+	case DRM_CLIENT_CAP_STEREO_3D:
+		if (req->value > 1)
+			return -EINVAL;
+		file_priv->stereo_allowed = req->value;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
  * Setversion ioctl.
  *
  * \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f92da0a..64c34d5 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,9 +43,8 @@
 #include <linux/export.h>
 
 /* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) ( \
-	(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
-	((count) % DRM_VBLANKTIME_RBSIZE)])
+#define vblanktimestamp(dev, crtc, count) \
+	((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
 /* Retry timestamp calculation up to 3 times to satisfy
  * drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@
  */
 static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
 {
-	memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
-		DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+	memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
 }
 
 /*
@@ -115,7 +113,7 @@
 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
 	dev->driver->disable_vblank(dev, crtc);
-	dev->vblank_enabled[crtc] = 0;
+	dev->vblank[crtc].enabled = false;
 
 	/* No further vblank irq's will be processed after
 	 * this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@
 	 * delayed gpu counter increment.
 	 */
 	do {
-		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+		dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
 		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+	} while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
 
 	if (!count)
 		vblrc = 0;
@@ -140,7 +138,7 @@
 	/* Compute time difference to stored timestamp of last vblank
 	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
 	 */
-	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	vblcount = atomic_read(&dev->vblank[crtc].count);
 	diff_ns = timeval_to_ns(&tvblank) -
 		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
 
@@ -157,7 +155,7 @@
 	 * hope for the best.
 	 */
 	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
-		atomic_inc(&dev->_vblank_count[crtc]);
+		atomic_inc(&dev->vblank[crtc].count);
 		smp_mb__after_atomic_inc();
 	}
 
@@ -178,8 +176,8 @@
 
 	for (i = 0; i < dev->num_crtcs; i++) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
-		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
-		    dev->vblank_enabled[i]) {
+		if (atomic_read(&dev->vblank[i].refcount) == 0 &&
+		    dev->vblank[i].enabled) {
 			DRM_DEBUG("disabling vblank on crtc %d\n", i);
 			vblank_disable_and_save(dev, i);
 		}
@@ -197,14 +195,7 @@
 
 	vblank_disable_fn((unsigned long)dev);
 
-	kfree(dev->vbl_queue);
-	kfree(dev->_vblank_count);
-	kfree(dev->vblank_refcount);
-	kfree(dev->vblank_enabled);
-	kfree(dev->last_vblank);
-	kfree(dev->last_vblank_wait);
-	kfree(dev->vblank_inmodeset);
-	kfree(dev->_vblank_time);
+	kfree(dev->vblank);
 
 	dev->num_crtcs = 0;
 }
@@ -221,42 +212,14 @@
 
 	dev->num_crtcs = num_crtcs;
 
-	dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
-				 GFP_KERNEL);
-	if (!dev->vbl_queue)
+	dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+	if (!dev->vblank)
 		goto err;
 
-	dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
-	if (!dev->_vblank_count)
-		goto err;
+	for (i = 0; i < num_crtcs; i++)
+		init_waitqueue_head(&dev->vblank[i].queue);
 
-	dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
-				       GFP_KERNEL);
-	if (!dev->vblank_refcount)
-		goto err;
-
-	dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
-	if (!dev->vblank_enabled)
-		goto err;
-
-	dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
-	if (!dev->last_vblank)
-		goto err;
-
-	dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
-	if (!dev->last_vblank_wait)
-		goto err;
-
-	dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
-	if (!dev->vblank_inmodeset)
-		goto err;
-
-	dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
-				    sizeof(struct timeval), GFP_KERNEL);
-	if (!dev->_vblank_time)
-		goto err;
-
-	DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+	DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
 
 	/* Driver specific high-precision vblank timestamping supported? */
 	if (dev->driver->get_vblank_timestamp)
@@ -264,14 +227,8 @@
 	else
 		DRM_INFO("No driver support for vblank timestamp query.\n");
 
-	/* Zero per-crtc vblank stuff */
-	for (i = 0; i < num_crtcs; i++) {
-		init_waitqueue_head(&dev->vbl_queue[i]);
-		atomic_set(&dev->_vblank_count[i], 0);
-		atomic_set(&dev->vblank_refcount[i], 0);
-	}
+	dev->vblank_disable_allowed = false;
 
-	dev->vblank_disable_allowed = 0;
 	return 0;
 
 err:
@@ -336,7 +293,7 @@
 		mutex_unlock(&dev->struct_mutex);
 		return -EBUSY;
 	}
-	dev->irq_enabled = 1;
+	dev->irq_enabled = true;
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@
 
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
 	}
@@ -373,7 +330,7 @@
 
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		if (!drm_core_check_feature(dev, DRIVER_MODESET))
 			vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@
 int drm_irq_uninstall(struct drm_device *dev)
 {
 	unsigned long irqflags;
-	int irq_enabled, i;
+	bool irq_enabled;
+	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
 	irq_enabled = dev->irq_enabled;
-	dev->irq_enabled = 0;
+	dev->irq_enabled = false;
 	mutex_unlock(&dev->struct_mutex);
 
 	/*
@@ -410,9 +368,9 @@
 	if (dev->num_crtcs) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		for (i = 0; i < dev->num_crtcs; i++) {
-			DRM_WAKEUP(&dev->vbl_queue[i]);
-			dev->vblank_enabled[i] = 0;
-			dev->last_vblank[i] =
+			DRM_WAKEUP(&dev->vblank[i].queue);
+			dev->vblank[i].enabled = false;
+			dev->vblank[i].last =
 				dev->driver->get_vblank_counter(dev, i);
 		}
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -497,8 +455,8 @@
 	/* Dot clock in Hz: */
 	dotclock = (u64) crtc->hwmode.clock * 1000;
 
-	/* Fields of interlaced scanout modes are only halve a frame duration.
-	 * Double the dotclock to get halve the frame-/line-/pixelduration.
+	/* Fields of interlaced scanout modes are only half a frame duration.
+	 * Double the dotclock to get half the frame-/line-/pixelduration.
 	 */
 	if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
 		dotclock *= 2;
@@ -628,24 +586,20 @@
 	 * code gets preempted or delayed for some reason.
 	 */
 	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
-		/* Disable preemption to make it very likely to
-		 * succeed in the first iteration even on PREEMPT_RT kernel.
+		/*
+		 * Get vertical and horizontal scanout position vpos, hpos,
+		 * and bounding timestamps stime, etime, pre/post query.
 		 */
-		preempt_disable();
+		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
+							       &hpos, &stime, &etime);
 
-		/* Get system timestamp before query. */
-		stime = ktime_get();
-
-		/* Get vertical and horizontal scanout pos. vpos, hpos. */
-		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
-
-		/* Get system timestamp after query. */
-		etime = ktime_get();
+		/*
+		 * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
+		 * CLOCK_REALTIME is requested.
+		 */
 		if (!drm_timestamp_monotonic)
 			mono_time_offset = ktime_get_monotonic_offset();
 
-		preempt_enable();
-
 		/* Return as no-op if scanout query unsupported or failed. */
 		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
 			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -653,6 +607,7 @@
 			return -EIO;
 		}
 
+		/* Compute uncertainty in timestamp of scanout position query. */
 		duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
 		/* Accept result with <  max_error nsecs timing uncertainty. */
@@ -795,7 +750,7 @@
  */
 u32 drm_vblank_count(struct drm_device *dev, int crtc)
 {
-	return atomic_read(&dev->_vblank_count[crtc]);
+	return atomic_read(&dev->vblank[crtc].count);
 }
 EXPORT_SYMBOL(drm_vblank_count);
 
@@ -824,10 +779,10 @@
 	 * a seqlock.
 	 */
 	do {
-		cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+		cur_vblank = atomic_read(&dev->vblank[crtc].count);
 		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
 		smp_rmb();
-	} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+	} while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
 
 	return cur_vblank;
 }
@@ -914,12 +869,12 @@
 	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
 
 	/* Deal with counter wrap */
-	diff = cur_vblank - dev->last_vblank[crtc];
-	if (cur_vblank < dev->last_vblank[crtc]) {
+	diff = cur_vblank - dev->vblank[crtc].last;
+	if (cur_vblank < dev->vblank[crtc].last) {
 		diff += dev->max_vblank_count;
 
 		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-			  crtc, dev->last_vblank[crtc], cur_vblank, diff);
+			  crtc, dev->vblank[crtc].last, cur_vblank, diff);
 	}
 
 	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +885,12 @@
 	 * reinitialize delayed at next vblank interrupt in that case.
 	 */
 	if (rc) {
-		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+		tslot = atomic_read(&dev->vblank[crtc].count) + diff;
 		vblanktimestamp(dev, crtc, tslot) = t_vblank;
 	}
 
 	smp_mb__before_atomic_inc();
-	atomic_add(diff, &dev->_vblank_count[crtc]);
+	atomic_add(diff, &dev->vblank[crtc].count);
 	smp_mb__after_atomic_inc();
 }
 
@@ -957,9 +912,9 @@
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
-	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+	if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
 		spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
-		if (!dev->vblank_enabled[crtc]) {
+		if (!dev->vblank[crtc].enabled) {
 			/* Enable vblank irqs under vblank_time_lock protection.
 			 * All vblank count & timestamp updates are held off
 			 * until we are done reinitializing master counter and
@@ -970,16 +925,16 @@
 			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
 				  crtc, ret);
 			if (ret)
-				atomic_dec(&dev->vblank_refcount[crtc]);
+				atomic_dec(&dev->vblank[crtc].refcount);
 			else {
-				dev->vblank_enabled[crtc] = 1;
+				dev->vblank[crtc].enabled = true;
 				drm_update_vblank_count(dev, crtc);
 			}
 		}
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
 	} else {
-		if (!dev->vblank_enabled[crtc]) {
-			atomic_dec(&dev->vblank_refcount[crtc]);
+		if (!dev->vblank[crtc].enabled) {
+			atomic_dec(&dev->vblank[crtc].refcount);
 			ret = -EINVAL;
 		}
 	}
@@ -999,10 +954,10 @@
  */
 void drm_vblank_put(struct drm_device *dev, int crtc)
 {
-	BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
+	BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
 
 	/* Last user schedules interrupt disable */
-	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+	if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
 	    (drm_vblank_offdelay > 0))
 		mod_timer(&dev->vblank_disable_timer,
 			  jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +980,7 @@
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	vblank_disable_and_save(dev, crtc);
-	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	DRM_WAKEUP(&dev->vblank[crtc].queue);
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
 	seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1022,10 @@
 	 * to avoid corrupting the count if multiple, mismatch calls occur),
 	 * so that interrupts remain enabled in the interim.
 	 */
-	if (!dev->vblank_inmodeset[crtc]) {
-		dev->vblank_inmodeset[crtc] = 0x1;
+	if (!dev->vblank[crtc].inmodeset) {
+		dev->vblank[crtc].inmodeset = 0x1;
 		if (drm_vblank_get(dev, crtc) == 0)
-			dev->vblank_inmodeset[crtc] |= 0x2;
+			dev->vblank[crtc].inmodeset |= 0x2;
 	}
 }
 EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1038,15 @@
 	if (!dev->num_crtcs)
 		return;
 
-	if (dev->vblank_inmodeset[crtc]) {
+	if (dev->vblank[crtc].inmodeset) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
-		dev->vblank_disable_allowed = 1;
+		dev->vblank_disable_allowed = true;
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
-		if (dev->vblank_inmodeset[crtc] & 0x2)
+		if (dev->vblank[crtc].inmodeset & 0x2)
 			drm_vblank_put(dev, crtc);
 
-		dev->vblank_inmodeset[crtc] = 0;
+		dev->vblank[crtc].inmodeset = 0;
 	}
 }
 EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1243,8 @@
 
 	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
 		  vblwait->request.sequence, crtc);
-	dev->last_vblank_wait[crtc] = vblwait->request.sequence;
-	DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+	dev->vblank[crtc].last_wait = vblwait->request.sequence;
+	DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
 		    (((drm_vblank_count(dev, crtc) -
 		       vblwait->request.sequence) <= (1 << 23)) ||
 		     !dev->irq_enabled));
@@ -1367,7 +1322,7 @@
 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
 	/* Vblank irq handling disabled. Nothing to do. */
-	if (!dev->vblank_enabled[crtc]) {
+	if (!dev->vblank[crtc].enabled) {
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 		return false;
 	}
@@ -1377,7 +1332,7 @@
 	 */
 
 	/* Get current timestamp and count. */
-	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	vblcount = atomic_read(&dev->vblank[crtc].count);
 	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 
 	/* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1356,14 @@
 		 * the timestamp computed above.
 		 */
 		smp_mb__before_atomic_inc();
-		atomic_inc(&dev->_vblank_count[crtc]);
+		atomic_inc(&dev->vblank[crtc].count);
 		smp_mb__after_atomic_inc();
 	} else {
 		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
 			  crtc, (int) diff_ns);
 	}
 
-	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	DRM_WAKEUP(&dev->vblank[crtc].queue);
 	drm_handle_vblank_events(dev, crtc);
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index d752c96..f645268 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -86,7 +86,6 @@
 		if (drm_lock_take(&master->lock, lock->context)) {
 			master->lock.file_priv = file_priv;
 			master->lock.lock_time = jiffies;
-			atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
 			break;	/* Got lock */
 		}
 
@@ -157,8 +156,6 @@
 		return -EINVAL;
 	}
 
-	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
 	if (drm_lock_free(&master->lock, lock->context)) {
 		/* FIXME: Should really bail out here. */
 	}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc2adb6..85071a1 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -707,18 +707,25 @@
 /**
  * drm_mode_set_crtcinfo - set CRTC modesetting parameters
  * @p: mode
- * @adjust_flags: unused? (FIXME)
+ * @adjust_flags: a combination of adjustment flags
  *
  * LOCKING:
  * None.
  *
  * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ *
+ * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
+ *   interlaced modes.
+ * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
+ *   buffers containing two eyes (only adjust the timings when needed, eg. for
+ *   "frame packing" or "side by side full").
  */
 void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
 {
 	if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
 		return;
 
+	p->crtc_clock = p->clock;
 	p->crtc_hdisplay = p->hdisplay;
 	p->crtc_hsync_start = p->hsync_start;
 	p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@
 		p->crtc_vtotal *= p->vscan;
 	}
 
+	if (adjust_flags & CRTC_STEREO_DOUBLE) {
+		unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
+
+		switch (layout) {
+		case DRM_MODE_FLAG_3D_FRAME_PACKING:
+			p->crtc_clock *= 2;
+			p->crtc_vdisplay += p->crtc_vtotal;
+			p->crtc_vsync_start += p->crtc_vtotal;
+			p->crtc_vsync_end += p->crtc_vtotal;
+			p->crtc_vtotal += p->crtc_vtotal;
+			break;
+		}
+	}
+
 	p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
 	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
 	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@
 	} else if (mode1->clock != mode2->clock)
 		return false;
 
-	return drm_mode_equal_no_clocks(mode1, mode2);
+	if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
+	    (mode2->flags & DRM_MODE_FLAG_3D_MASK))
+		return false;
+
+	return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
 }
 EXPORT_SYMBOL(drm_mode_equal);
 
 /**
- * drm_mode_equal_no_clocks - test modes for equality
+ * drm_mode_equal_no_clocks_no_stereo - test modes for equality
  * @mode1: first mode
  * @mode2: second mode
  *
@@ -843,12 +868,13 @@
  * None.
  *
  * Check to see if @mode1 and @mode2 are equivalent, but
- * don't check the pixel clocks.
+ * don't check the pixel clocks nor the stereo layout.
  *
  * RETURNS:
  * True if the modes are equal, false otherwise.
  */
-bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+					const struct drm_display_mode *mode2)
 {
 	if (mode1->hdisplay == mode2->hdisplay &&
 	    mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@
 	    mode1->vsync_end == mode2->vsync_end &&
 	    mode1->vtotal == mode2->vtotal &&
 	    mode1->vscan == mode2->vscan &&
-	    mode1->flags == mode2->flags)
+	    (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
+	     (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
 		return true;
 
 	return false;
 }
-EXPORT_SYMBOL(drm_mode_equal_no_clocks);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
 
 /**
  * drm_mode_validate_size - make sure modes adhere to size constraints
@@ -1014,7 +1041,7 @@
 				/* if equal delete the probed mode */
 				mode->status = pmode->status;
 				/* Merge type bits together */
-				mode->type |= pmode->type;
+				mode->type = pmode->type;
 				list_del(&pmode->head);
 				drm_mode_destroy(connector->dev, pmode);
 				break;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1f96cee..0267979 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -80,7 +80,7 @@
 	/* Reserve */
 	for (addr = (unsigned long)dmah->vaddr, sz = size;
 	     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-		SetPageReserved(virt_to_page(addr));
+		SetPageReserved(virt_to_page((void *)addr));
 	}
 
 	return dmah;
@@ -103,7 +103,7 @@
 		/* Unreserve */
 		for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
 		     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-			ClearPageReserved(virt_to_page(addr));
+			ClearPageReserved(virt_to_page((void *)addr));
 		}
 		dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
 				  dmah->busaddr);
@@ -322,83 +322,36 @@
 
 	DRM_DEBUG("\n");
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &pdev->dev);
 	if (!dev)
 		return -ENOMEM;
 
 	ret = pci_enable_device(pdev);
 	if (ret)
-		goto err_g1;
+		goto err_free;
 
 	dev->pdev = pdev;
-	dev->dev = &pdev->dev;
-
-	dev->pci_device = pdev->device;
-	dev->pci_vendor = pdev->vendor;
-
 #ifdef __alpha__
 	dev->hose = pdev->sysdata;
 #endif
 
-	mutex_lock(&drm_global_mutex);
-
-	if ((ret = drm_fill_in_dev(dev, ent, driver))) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g2;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		pci_set_drvdata(pdev, dev);
-		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-		if (ret)
-			goto err_g2;
-	}
 
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g21;
-	}
-
-	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
-		goto err_g3;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, ent->driver_data);
-		if (ret)
-			goto err_g4;
-	}
-
-	/* setup the grouping for the legacy output */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-						&dev->primary->mode_group);
-		if (ret)
-			goto err_g4;
-	}
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
+	ret = drm_dev_register(dev, ent->driver_data);
+	if (ret)
+		goto err_pci;
 
 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->date, pci_name(pdev), dev->primary->index);
 
-	mutex_unlock(&drm_global_mutex);
 	return 0;
 
-err_g4:
-	drm_put_minor(&dev->primary);
-err_g3:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g21:
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
-err_g2:
+err_pci:
 	pci_disable_device(pdev);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 }
 EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index f7a18c6..fc24fee 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -47,55 +47,15 @@
 
 	DRM_DEBUG("\n");
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &platdev->dev);
 	if (!dev)
 		return -ENOMEM;
 
 	dev->platformdev = platdev;
-	dev->dev = &platdev->dev;
 
-	mutex_lock(&drm_global_mutex);
-
-	ret = drm_fill_in_dev(dev, NULL, driver);
-
-	if (ret) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g1;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-		if (ret)
-			goto err_g1;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g11;
-	}
-
-	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	ret = drm_dev_register(dev, 0);
 	if (ret)
-		goto err_g2;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, 0);
-		if (ret)
-			goto err_g3;
-	}
-
-	/* setup the grouping for the legacy output */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-				&dev->primary->mode_group);
-		if (ret)
-			goto err_g3;
-	}
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
-
-	mutex_unlock(&drm_global_mutex);
+		goto err_free;
 
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@
 
 	return 0;
 
-err_g3:
-	drm_put_minor(&dev->primary);
-err_g2:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g11:
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 276d470..56805c3 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -637,14 +637,13 @@
 	unsigned count;
 	struct scatterlist *sg;
 	struct page *page;
-	u32 len, offset;
+	u32 len;
 	int pg_index;
 	dma_addr_t addr;
 
 	pg_index = 0;
 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
 		len = sg->length;
-		offset = sg->offset;
 		page = sg_page(sg);
 		addr = sg_dma_address(sg);
 
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 39d8645..f53d524 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -254,81 +254,21 @@
 	return 0;
 }
 
-int drm_fill_in_dev(struct drm_device *dev,
-			   const struct pci_device_id *ent,
-			   struct drm_driver *driver)
-{
-	int retcode;
-
-	INIT_LIST_HEAD(&dev->filelist);
-	INIT_LIST_HEAD(&dev->ctxlist);
-	INIT_LIST_HEAD(&dev->vmalist);
-	INIT_LIST_HEAD(&dev->maplist);
-	INIT_LIST_HEAD(&dev->vblank_event_list);
-
-	spin_lock_init(&dev->count_lock);
-	spin_lock_init(&dev->event_lock);
-	mutex_init(&dev->struct_mutex);
-	mutex_init(&dev->ctxlist_mutex);
-
-	if (drm_ht_create(&dev->map_hash, 12)) {
-		return -ENOMEM;
-	}
-
-	/* the DRM has 6 basic counters */
-	dev->counters = 6;
-	dev->types[0] = _DRM_STAT_LOCK;
-	dev->types[1] = _DRM_STAT_OPENS;
-	dev->types[2] = _DRM_STAT_CLOSES;
-	dev->types[3] = _DRM_STAT_IOCTLS;
-	dev->types[4] = _DRM_STAT_LOCKS;
-	dev->types[5] = _DRM_STAT_UNLOCKS;
-
-	dev->driver = driver;
-
-	if (dev->driver->bus->agp_init) {
-		retcode = dev->driver->bus->agp_init(dev);
-		if (retcode)
-			goto error_out_unreg;
-	}
-
-
-
-	retcode = drm_ctxbitmap_init(dev);
-	if (retcode) {
-		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-		goto error_out_unreg;
-	}
-
-	if (driver->driver_features & DRIVER_GEM) {
-		retcode = drm_gem_init(dev);
-		if (retcode) {
-			DRM_ERROR("Cannot initialize graphics execution "
-				  "manager (GEM)\n");
-			goto error_out_unreg;
-		}
-	}
-
-	return 0;
-
-      error_out_unreg:
-	drm_lastclose(dev);
-	return retcode;
-}
-EXPORT_SYMBOL(drm_fill_in_dev);
-
-
 /**
- * Get a secondary minor number.
+ * drm_get_minor - Allocate and register new DRM minor
+ * @dev: DRM device
+ * @minor: Pointer to where new minor is stored
+ * @type: Type of minor
  *
- * \param dev device data structure
- * \param sec-minor structure to hold the assigned minor
- * \return negative number on failure.
+ * Allocate a new minor of the given type and register it. A pointer to the new
+ * minor is returned in @minor.
+ * Caller must hold the global DRM mutex.
  *
- * Search an empty entry and initialize it to the given parameters. This
- * routines assigns minor numbers to secondary heads of multi-headed cards
+ * RETURNS:
+ * 0 on success, negative error code on failure.
  */
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
+			 int type)
 {
 	struct drm_minor *new_minor;
 	int ret;
@@ -385,37 +325,48 @@
 	*minor = NULL;
 	return ret;
 }
-EXPORT_SYMBOL(drm_get_minor);
 
 /**
- * Put a secondary minor number.
+ * drm_unplug_minor - Unplug DRM minor
+ * @minor: Minor to unplug
  *
- * \param sec_minor - structure to be released
- * \return always zero
+ * Unplugs the given DRM minor but keeps the object. So after this returns,
+ * minor->dev is still valid so existing open-files can still access it to get
+ * device information from their drm_file ojects.
+ * If the minor is already unplugged or if @minor is NULL, nothing is done.
+ * The global DRM mutex must be held by the caller.
  */
-int drm_put_minor(struct drm_minor **minor_p)
+static void drm_unplug_minor(struct drm_minor *minor)
 {
-	struct drm_minor *minor = *minor_p;
-
-	DRM_DEBUG("release secondary minor %d\n", minor->index);
+	if (!minor || !minor->kdev)
+		return;
 
 #if defined(CONFIG_DEBUG_FS)
 	drm_debugfs_cleanup(minor);
 #endif
 
 	drm_sysfs_device_remove(minor);
-
 	idr_remove(&drm_minors_idr, minor->index);
-
-	kfree(minor);
-	*minor_p = NULL;
-	return 0;
 }
-EXPORT_SYMBOL(drm_put_minor);
 
-static void drm_unplug_minor(struct drm_minor *minor)
+/**
+ * drm_put_minor - Destroy DRM minor
+ * @minor: Minor to destroy
+ *
+ * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
+ * is done if @minor is NULL. It is fine to call this on already unplugged
+ * minors.
+ * The global DRM mutex must be held by the caller.
+ */
+static void drm_put_minor(struct drm_minor *minor)
 {
-	drm_sysfs_device_remove(minor);
+	if (!minor)
+		return;
+
+	DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+	drm_unplug_minor(minor);
+	kfree(minor);
 }
 
 /**
@@ -427,47 +378,15 @@
  */
 void drm_put_dev(struct drm_device *dev)
 {
-	struct drm_driver *driver;
-	struct drm_map_list *r_list, *list_temp;
-
 	DRM_DEBUG("\n");
 
 	if (!dev) {
 		DRM_ERROR("cleanup called no dev\n");
 		return;
 	}
-	driver = dev->driver;
 
-	drm_lastclose(dev);
-
-	if (dev->driver->unload)
-		dev->driver->unload(dev);
-
-	if (dev->driver->bus->agp_destroy)
-		dev->driver->bus->agp_destroy(dev);
-
-	drm_vblank_cleanup(dev);
-
-	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
-		drm_rmmap(dev, r_list->map);
-	drm_ht_remove(&dev->map_hash);
-
-	drm_ctxbitmap_cleanup(dev);
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
-
-	if (dev->render)
-		drm_put_minor(&dev->render);
-
-	if (driver->driver_features & DRIVER_GEM)
-		drm_gem_destroy(dev);
-
-	drm_put_minor(&dev->primary);
-
-	list_del(&dev->driver_item);
-	kfree(dev->devname);
-	kfree(dev);
+	drm_dev_unregister(dev);
+	drm_dev_free(dev);
 }
 EXPORT_SYMBOL(drm_put_dev);
 
@@ -490,3 +409,206 @@
 	mutex_unlock(&drm_global_mutex);
 }
 EXPORT_SYMBOL(drm_unplug_dev);
+
+/**
+ * drm_dev_alloc - Allocate new drm device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+				 struct device *parent)
+{
+	struct drm_device *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	dev->dev = parent;
+	dev->driver = driver;
+
+	INIT_LIST_HEAD(&dev->filelist);
+	INIT_LIST_HEAD(&dev->ctxlist);
+	INIT_LIST_HEAD(&dev->vmalist);
+	INIT_LIST_HEAD(&dev->maplist);
+	INIT_LIST_HEAD(&dev->vblank_event_list);
+
+	spin_lock_init(&dev->count_lock);
+	spin_lock_init(&dev->event_lock);
+	mutex_init(&dev->struct_mutex);
+	mutex_init(&dev->ctxlist_mutex);
+
+	if (drm_ht_create(&dev->map_hash, 12))
+		goto err_free;
+
+	ret = drm_ctxbitmap_init(dev);
+	if (ret) {
+		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+		goto err_ht;
+	}
+
+	if (driver->driver_features & DRIVER_GEM) {
+		ret = drm_gem_init(dev);
+		if (ret) {
+			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+			goto err_ctxbitmap;
+		}
+	}
+
+	return dev;
+
+err_ctxbitmap:
+	drm_ctxbitmap_cleanup(dev);
+err_ht:
+	drm_ht_remove(&dev->map_hash);
+err_free:
+	kfree(dev);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+/**
+ * drm_dev_free - Free DRM device
+ * @dev: DRM device to free
+ *
+ * Free a DRM device that has previously been allocated via drm_dev_alloc().
+ * You must not use kfree() instead or you will leak memory.
+ *
+ * This must not be called once the device got registered. Use drm_put_dev()
+ * instead, which then calls drm_dev_free().
+ */
+void drm_dev_free(struct drm_device *dev)
+{
+	drm_put_minor(dev->control);
+	drm_put_minor(dev->render);
+	drm_put_minor(dev->primary);
+
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_destroy(dev);
+
+	drm_ctxbitmap_cleanup(dev);
+	drm_ht_remove(&dev->map_hash);
+
+	kfree(dev->devname);
+	kfree(dev);
+}
+EXPORT_SYMBOL(drm_dev_free);
+
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
+{
+	int ret;
+
+	mutex_lock(&drm_global_mutex);
+
+	if (dev->driver->bus->agp_init) {
+		ret = dev->driver->bus->agp_init(dev);
+		if (ret)
+			goto out_unlock;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+		if (ret)
+			goto err_agp;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+		if (ret)
+			goto err_control_node;
+	}
+
+	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	if (ret)
+		goto err_render_node;
+
+	if (dev->driver->load) {
+		ret = dev->driver->load(dev, flags);
+		if (ret)
+			goto err_primary_node;
+	}
+
+	/* setup grouping for legacy outputs */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_mode_group_init_legacy_group(dev,
+				&dev->primary->mode_group);
+		if (ret)
+			goto err_unload;
+	}
+
+	list_add_tail(&dev->driver_item, &dev->driver->device_list);
+
+	ret = 0;
+	goto out_unlock;
+
+err_unload:
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+err_primary_node:
+	drm_put_minor(dev->primary);
+err_render_node:
+	drm_put_minor(dev->render);
+err_control_node:
+	drm_put_minor(dev->control);
+err_agp:
+	if (dev->driver->bus->agp_destroy)
+		dev->driver->bus->agp_destroy(dev);
+out_unlock:
+	mutex_unlock(&drm_global_mutex);
+	return ret;
+}
+EXPORT_SYMBOL(drm_dev_register);
+
+/**
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_free() to free all resources.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+	struct drm_map_list *r_list, *list_temp;
+
+	drm_lastclose(dev);
+
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+
+	if (dev->driver->bus->agp_destroy)
+		dev->driver->bus->agp_destroy(dev);
+
+	drm_vblank_cleanup(dev);
+
+	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+		drm_rmmap(dev, r_list->map);
+
+	drm_unplug_minor(dev->control);
+	drm_unplug_minor(dev->render);
+	drm_unplug_minor(dev->primary);
+
+	list_del(&dev->driver_item);
+}
+EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 2290b3b..1a35ea5 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,8 +22,8 @@
 #include <drm/drm_core.h>
 #include <drm/drmP.h>
 
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+#define to_drm_minor(d) dev_get_drvdata(d)
+#define to_drm_connector(d) dev_get_drvdata(d)
 
 static struct device_type drm_sysfs_device_minor = {
 	.name = "drm_minor"
@@ -162,20 +162,6 @@
 	drm_class = NULL;
 }
 
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff.  But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
-	memset(dev, 0, sizeof(struct device));
-	return;
-}
-
 /*
  * Connector properties
  */
@@ -380,11 +366,6 @@
  * properties (so far, connection status, dpms, mode list & edid) and
  * generate a hotplug event so userspace knows there's a new connector
  * available.
- *
- * Note:
- * This routine should only be called *once* for each registered connector.
- * A second call for an already registered connector will trigger the BUG_ON
- * below.
  */
 int drm_sysfs_connector_add(struct drm_connector *connector)
 {
@@ -394,29 +375,25 @@
 	int i;
 	int ret;
 
-	/* We shouldn't get called more than once for the same connector */
-	BUG_ON(device_is_registered(&connector->kdev));
+	if (connector->kdev)
+		return 0;
 
-	connector->kdev.parent = &dev->primary->kdev;
-	connector->kdev.class = drm_class;
-	connector->kdev.release = drm_sysfs_device_release;
-
+	connector->kdev = device_create(drm_class, dev->primary->kdev,
+					0, connector, "card%d-%s",
+					dev->primary->index, drm_get_connector_name(connector));
 	DRM_DEBUG("adding \"%s\" to sysfs\n",
 		  drm_get_connector_name(connector));
 
-	dev_set_name(&connector->kdev, "card%d-%s",
-		     dev->primary->index, drm_get_connector_name(connector));
-	ret = device_register(&connector->kdev);
-
-	if (ret) {
-		DRM_ERROR("failed to register connector device: %d\n", ret);
+	if (IS_ERR(connector->kdev)) {
+		DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
+		ret = PTR_ERR(connector->kdev);
 		goto out;
 	}
 
 	/* Standard attributes */
 
 	for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
-		ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
+		ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
 		if (ret)
 			goto err_out_files;
 	}
@@ -433,7 +410,7 @@
 		case DRM_MODE_CONNECTOR_Component:
 		case DRM_MODE_CONNECTOR_TV:
 			for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
-				ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
+				ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
 				if (ret)
 					goto err_out_files;
 			}
@@ -442,7 +419,7 @@
 			break;
 	}
 
-	ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+	ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
 	if (ret)
 		goto err_out_files;
 
@@ -453,10 +430,10 @@
 
 err_out_files:
 	for (i = 0; i < opt_cnt; i++)
-		device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+		device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
 	for (i = 0; i < attr_cnt; i++)
-		device_remove_file(&connector->kdev, &connector_attrs[i]);
-	device_unregister(&connector->kdev);
+		device_remove_file(connector->kdev, &connector_attrs[i]);
+	device_unregister(connector->kdev);
 
 out:
 	return ret;
@@ -480,16 +457,16 @@
 {
 	int i;
 
-	if (!connector->kdev.parent)
+	if (!connector->kdev)
 		return;
 	DRM_DEBUG("removing \"%s\" from sysfs\n",
 		  drm_get_connector_name(connector));
 
 	for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
-		device_remove_file(&connector->kdev, &connector_attrs[i]);
-	sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
-	device_unregister(&connector->kdev);
-	connector->kdev.parent = NULL;
+		device_remove_file(connector->kdev, &connector_attrs[i]);
+	sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
+	device_unregister(connector->kdev);
+	connector->kdev = NULL;
 }
 EXPORT_SYMBOL(drm_sysfs_connector_remove);
 
@@ -508,7 +485,7 @@
 
 	DRM_DEBUG("generating hotplug event\n");
 
-	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 }
 EXPORT_SYMBOL(drm_sysfs_hotplug_event);
 
@@ -523,15 +500,8 @@
  */
 int drm_sysfs_device_add(struct drm_minor *minor)
 {
-	int err;
 	char *minor_str;
 
-	minor->kdev.parent = minor->dev->dev;
-
-	minor->kdev.class = drm_class;
-	minor->kdev.release = drm_sysfs_device_release;
-	minor->kdev.devt = minor->device;
-	minor->kdev.type = &drm_sysfs_device_minor;
 	if (minor->type == DRM_MINOR_CONTROL)
 		minor_str = "controlD%d";
         else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +509,14 @@
         else
                 minor_str = "card%d";
 
-	dev_set_name(&minor->kdev, minor_str, minor->index);
-
-	err = device_register(&minor->kdev);
-	if (err) {
-		DRM_ERROR("device add failed: %d\n", err);
-		goto err_out;
+	minor->kdev = device_create(drm_class, minor->dev->dev,
+				    MKDEV(DRM_MAJOR, minor->index),
+				    minor, minor_str, minor->index);
+	if (IS_ERR(minor->kdev)) {
+		DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
+		return PTR_ERR(minor->kdev);
 	}
-
 	return 0;
-
-err_out:
-	return err;
 }
 
 /**
@@ -562,9 +528,9 @@
  */
 void drm_sysfs_device_remove(struct drm_minor *minor)
 {
-	if (minor->kdev.parent)
-		device_unregister(&minor->kdev);
-	minor->kdev.parent = NULL;
+	if (minor->kdev)
+		device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
+	minor->kdev = NULL;
 }
 
 
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 8766472..b179b70 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -7,57 +7,20 @@
 		    struct drm_driver *driver)
 {
 	struct drm_device *dev;
-	struct usb_device *usbdev;
 	int ret;
 
 	DRM_DEBUG("\n");
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &interface->dev);
 	if (!dev)
 		return -ENOMEM;
 
-	usbdev = interface_to_usbdev(interface);
-	dev->usbdev = usbdev;
-	dev->dev = &interface->dev;
-
-	mutex_lock(&drm_global_mutex);
-
-	ret = drm_fill_in_dev(dev, NULL, driver);
-	if (ret) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g1;
-	}
-
+	dev->usbdev = interface_to_usbdev(interface);
 	usb_set_intfdata(interface, dev);
-	ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+
+	ret = drm_dev_register(dev, 0);
 	if (ret)
-		goto err_g1;
-
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g11;
-	}
-
-	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
-	if (ret)
-		goto err_g2;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, 0);
-		if (ret)
-			goto err_g3;
-	}
-
-	/* setup the grouping for the legacy output */
-	ret = drm_mode_group_init_legacy_group(dev,
-					       &dev->primary->mode_group);
-	if (ret)
-		goto err_g3;
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
-
-	mutex_unlock(&drm_global_mutex);
+		goto err_free;
 
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@
 
 	return 0;
 
-err_g3:
-	drm_put_minor(&dev->primary);
-err_g2:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g11:
-	drm_put_minor(&dev->control);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 
 }
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index b5c5af7..93e95d7 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -301,7 +301,7 @@
 
 	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
-	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+	page = virt_to_page((void *)dma->pagelist[page_nr]);
 
 	get_page(page);
 	vmf->page = page;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 45b6ef5..f227f54 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@
 	tristate "DRM Support for Samsung SoC EXYNOS Series"
 	depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index bb82ef7..b676006 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -264,7 +264,6 @@
 	.get_vblank_counter	= drm_vblank_count,
 	.enable_vblank		= exynos_drm_crtc_enable_vblank,
 	.disable_vblank		= exynos_drm_crtc_disable_vblank,
-	.gem_init_object	= exynos_drm_gem_init_object,
 	.gem_free_object	= exynos_drm_gem_free_object,
 	.gem_vm_ops		= &exynos_drm_gem_vm_ops,
 	.dumb_create		= exynos_drm_gem_dumb_create,
@@ -286,7 +285,11 @@
 
 static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	int ret;
+
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	return drm_platform_init(&exynos_drm_driver, pdev);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 868a14d..23da72b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -716,20 +716,20 @@
 {
 	/*
 	 * enable drm irq mode.
-	 * - with irq_enabled = 1, we can use the vblank feature.
+	 * - with irq_enabled = true, we can use the vblank feature.
 	 *
 	 * P.S. note that we wouldn't use drm irq handler but
 	 *	just specific driver own one instead because
 	 *	drm framework supports only one irq handler.
 	 */
-	drm_dev->irq_enabled = 1;
+	drm_dev->irq_enabled = true;
 
 	/*
-	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
-	drm_dev->vblank_disable_allowed = 1;
+	drm_dev->vblank_disable_allowed = true;
 
 	/* attach this sub driver to iommu mapping if supported. */
 	if (is_drm_iommu_supported(drm_dev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 49f9cd2..1ade191 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -630,11 +630,6 @@
 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
 }
 
-int exynos_drm_gem_init_object(struct drm_gem_object *obj)
-{
-	return 0;
-}
-
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 09555af..702ec3a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -135,9 +135,6 @@
 						unsigned int gem_handle,
 						struct drm_file *file_priv);
 
-/* initialize gem object. */
-int exynos_drm_gem_init_object(struct drm_gem_object *obj);
-
 /* free gem object. */
 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4400330..ddaaedd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -101,7 +101,6 @@
 {
 	struct vidi_context *ctx = get_vidi_context(dev);
 	struct edid *edid;
-	int edid_len;
 
 	/*
 	 * the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@
 		return ERR_PTR(-EFAULT);
 	}
 
-	edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
-	edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+	edid = drm_edid_duplicate(ctx->raw_edid);
 	if (!edid) {
 		DRM_DEBUG_KMS("failed to allocate edid\n");
 		return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@
 {
 	/*
 	 * enable drm irq mode.
-	 * - with irq_enabled = 1, we can use the vblank feature.
+	 * - with irq_enabled = true, we can use the vblank feature.
 	 *
 	 * P.S. note that we wouldn't use drm irq handler but
 	 *	just specific driver own one instead because
 	 *	drm framework supports only one irq handler.
 	 */
-	drm_dev->irq_enabled = 1;
+	drm_dev->irq_enabled = true;
 
 	/*
-	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
-	drm_dev->vblank_disable_allowed = 1;
+	drm_dev->vblank_disable_allowed = true;
 
 	return 0;
 }
@@ -485,7 +483,6 @@
 	struct exynos_drm_manager *manager;
 	struct exynos_drm_display_ops *display_ops;
 	struct drm_exynos_vidi_connection *vidi = data;
-	int edid_len;
 
 	if (!vidi) {
 		DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@
 			DRM_DEBUG_KMS("edid data is invalid.\n");
 			return -EINVAL;
 		}
-		edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
-		ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
+		ctx->raw_edid = drm_edid_duplicate(raw_edid);
 		if (!ctx->raw_edid) {
 			DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
 			return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1f6e2df..508cf99 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -5,6 +5,7 @@
 	select FB_CFB_FILLRECT
 	select FB_CFB_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
 	select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 162f686..5a9a6a3 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -634,6 +634,7 @@
 	.crtcs = 2,
 	.hdmi_mask = (1 << 0) | (1 << 1),
 	.lvds_mask = (1 << 1),
+	.sdvo_mask = (1 << 0),
 	.cursor_needs_phys = 0,
 	.sgx_offset = MRST_SGX_OFFSET,
 	.chip_setup = cdv_chip_setup,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f4eb435..f88a181 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -666,7 +666,7 @@
 	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
 	intel_dp->adapter.algo_data = &intel_dp->algo;
-	intel_dp->adapter.dev.parent = &connector->base.kdev;
+	intel_dp->adapter.dev.parent = connector->base.kdev;
 
 	if (is_edp(encoder))
 		cdv_intel_edp_panel_vdd_on(encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 01dd7d2..94b3fec 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -714,7 +714,7 @@
 			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
 			break;
 		case INTEL_OUTPUT_SDVO:
-			crtc_mask = ((1 << 0) | (1 << 1));
+			crtc_mask = dev_priv->ops->sdvo_mask;
 			clone_mask = (1 << INTEL_OUTPUT_SDVO);
 			break;
 		case INTEL_OUTPUT_LVDS:
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 10ae8c5..e2db48a 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -29,11 +29,6 @@
 #include <drm/drm_vma_manager.h>
 #include "psb_drv.h"
 
-int psb_gem_init_object(struct drm_gem_object *obj)
-{
-	return -EINVAL;
-}
-
 void psb_gem_free_object(struct drm_gem_object *obj)
 {
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 62cd42e..566d330 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -51,6 +51,9 @@
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
 
+#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
+#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
+
 /* Intel GPIO access functions */
 
 #define I2C_RISEFALL_TIME 20
@@ -71,7 +74,8 @@
 void
 gma_intel_i2c_reset(struct drm_device *dev)
 {
-	REG_WRITE(GMBUS0, 0);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	GMBUS_REG_WRITE(GMBUS0, 0);
 }
 
 static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
@@ -98,11 +102,10 @@
 static u32 get_reserved(struct intel_gpio *gpio)
 {
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = 0;
 
 	/* On most chips, these bits must be preserved in software. */
-	reserved = REG_READ(gpio->reg) &
+	reserved = GMBUS_REG_READ(gpio->reg) &
 				     (GPIO_DATA_PULLUP_DISABLE |
 				      GPIO_CLOCK_PULLUP_DISABLE);
 
@@ -113,29 +116,26 @@
 {
 	struct intel_gpio *gpio = data;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = get_reserved(gpio);
-	REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
-	REG_WRITE(gpio->reg, reserved);
-	return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+	GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+	GMBUS_REG_WRITE(gpio->reg, reserved);
+	return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
 }
 
 static int get_data(void *data)
 {
 	struct intel_gpio *gpio = data;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = get_reserved(gpio);
-	REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
-	REG_WRITE(gpio->reg, reserved);
-	return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+	GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+	GMBUS_REG_WRITE(gpio->reg, reserved);
+	return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
 }
 
 static void set_clock(void *data, int state_high)
 {
 	struct intel_gpio *gpio = data;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = get_reserved(gpio);
 	u32 clock_bits;
 
@@ -145,15 +145,14 @@
 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
 			GPIO_CLOCK_VAL_MASK;
 
-	REG_WRITE(gpio->reg, reserved | clock_bits);
-	REG_READ(gpio->reg); /* Posting */
+	GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
+	GMBUS_REG_READ(gpio->reg); /* Posting */
 }
 
 static void set_data(void *data, int state_high)
 {
 	struct intel_gpio *gpio = data;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = get_reserved(gpio);
 	u32 data_bits;
 
@@ -163,8 +162,8 @@
 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
 			GPIO_DATA_VAL_MASK;
 
-	REG_WRITE(gpio->reg, reserved | data_bits);
-	REG_READ(gpio->reg);
+	GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
+	GMBUS_REG_READ(gpio->reg);
 }
 
 static struct i2c_adapter *
@@ -251,7 +250,6 @@
 					       struct intel_gmbus,
 					       adapter);
 	struct drm_psb_private *dev_priv = adapter->algo_data;
-	struct drm_device *dev = dev_priv->dev;
 	int i, reg_offset;
 
 	if (bus->force_bit)
@@ -260,28 +258,30 @@
 
 	reg_offset = 0;
 
-	REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+	GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
 	for (i = 0; i < num; i++) {
 		u16 len = msgs[i].len;
 		u8 *buf = msgs[i].buf;
 
 		if (msgs[i].flags & I2C_M_RD) {
-			REG_WRITE(GMBUS1 + reg_offset,
-				   GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
-				   (len << GMBUS_BYTE_COUNT_SHIFT) |
-				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
-				   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
-			REG_READ(GMBUS2+reg_offset);
+			GMBUS_REG_WRITE(GMBUS1 + reg_offset,
+					GMBUS_CYCLE_WAIT |
+					(i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+					(len << GMBUS_BYTE_COUNT_SHIFT) |
+					(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+					GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+			GMBUS_REG_READ(GMBUS2+reg_offset);
 			do {
 				u32 val, loop = 0;
 
-				if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+				if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
+					     (GMBUS_SATOER | GMBUS_HW_RDY), 50))
 					goto timeout;
-				if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+				if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
 					goto clear_err;
 
-				val = REG_READ(GMBUS3 + reg_offset);
+				val = GMBUS_REG_READ(GMBUS3 + reg_offset);
 				do {
 					*buf++ = val & 0xff;
 					val >>= 8;
@@ -295,18 +295,20 @@
 				val |= *buf++ << (8 * loop);
 			} while (--len && ++loop < 4);
 
-			REG_WRITE(GMBUS3 + reg_offset, val);
-			REG_WRITE(GMBUS1 + reg_offset,
+			GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
+			GMBUS_REG_WRITE(GMBUS1 + reg_offset,
 				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
 				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
 				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
 				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
-			REG_READ(GMBUS2+reg_offset);
+			GMBUS_REG_READ(GMBUS2+reg_offset);
 
 			while (len) {
-				if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+				if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
+					     (GMBUS_SATOER | GMBUS_HW_RDY), 50))
 					goto timeout;
-				if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+				if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
+				    GMBUS_SATOER)
 					goto clear_err;
 
 				val = loop = 0;
@@ -314,14 +316,14 @@
 					val |= *buf++ << (8 * loop);
 				} while (--len && ++loop < 4);
 
-				REG_WRITE(GMBUS3 + reg_offset, val);
-				REG_READ(GMBUS2+reg_offset);
+				GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
+				GMBUS_REG_READ(GMBUS2+reg_offset);
 			}
 		}
 
-		if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+		if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
 			goto timeout;
-		if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+		if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
 			goto clear_err;
 	}
 
@@ -332,20 +334,20 @@
 	 * of resetting the GMBUS controller and so clearing the
 	 * BUS_ERROR raised by the slave's NAK.
 	 */
-	REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
-	REG_WRITE(GMBUS1 + reg_offset, 0);
+	GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+	GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
 
 done:
 	/* Mark the GMBUS interface as disabled. We will re-enable it at the
 	 * start of the next xfer, till then let it sleep.
 	 */
-	REG_WRITE(GMBUS0 + reg_offset, 0);
+	GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
 	return i;
 
 timeout:
 	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
 		 bus->reg0 & 0xff, bus->adapter.name);
-	REG_WRITE(GMBUS0 + reg_offset, 0);
+	GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
 
 	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
 	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
@@ -399,6 +401,11 @@
 	if (dev_priv->gmbus == NULL)
 		return -ENOMEM;
 
+	if (IS_MRST(dev))
+		dev_priv->gmbus_reg = dev_priv->aux_reg;
+	else
+		dev_priv->gmbus_reg = dev_priv->vdc_reg;
+
 	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
 		struct intel_gmbus *bus = &dev_priv->gmbus[i];
 
@@ -487,6 +494,7 @@
 		i2c_del_adapter(&bus->adapter);
 	}
 
+	dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
 	kfree(dev_priv->gmbus);
 	dev_priv->gmbus = NULL;
 }
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 54c9896..8195e85 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -26,24 +26,10 @@
 #include "gma_display.h"
 #include "power.h"
 
-struct psb_intel_range_t {
-	int min, max;
-};
-
-struct oaktrail_limit_t {
-	struct psb_intel_range_t dot, m, p1;
-};
-
-struct oaktrail_clock_t {
-	/* derived values */
-	int dot;
-	int m;
-	int p1;
-};
-
-#define MRST_LIMIT_LVDS_100L	    0
-#define MRST_LIMIT_LVDS_83	    1
-#define MRST_LIMIT_LVDS_100	    2
+#define MRST_LIMIT_LVDS_100L	0
+#define MRST_LIMIT_LVDS_83	1
+#define MRST_LIMIT_LVDS_100	2
+#define MRST_LIMIT_SDVO		3
 
 #define MRST_DOT_MIN		  19750
 #define MRST_DOT_MAX		  120000
@@ -57,21 +43,40 @@
 #define MRST_P1_MAX_0		    7
 #define MRST_P1_MAX_1		    8
 
-static const struct oaktrail_limit_t oaktrail_limits[] = {
+static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+				    struct drm_crtc *crtc, int target,
+				    int refclk, struct gma_clock_t *best_clock);
+
+static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+				    struct drm_crtc *crtc, int target,
+				    int refclk, struct gma_clock_t *best_clock);
+
+static const struct gma_limit_t mrst_limits[] = {
 	{			/* MRST_LIMIT_LVDS_100L */
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+	 .find_pll = mrst_lvds_find_best_pll,
 	 },
 	{			/* MRST_LIMIT_LVDS_83L */
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+	 .find_pll = mrst_lvds_find_best_pll,
 	 },
 	{			/* MRST_LIMIT_LVDS_100 */
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+	 .find_pll = mrst_lvds_find_best_pll,
+	 },
+	{			/* MRST_LIMIT_SDVO */
+	 .vco = {.min = 1400000, .max = 2800000},
+	 .n = {.min = 3, .max = 7},
+	 .m = {.min = 80, .max = 137},
+	 .p1 = {.min = 1, .max = 2},
+	 .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = mrst_sdvo_find_best_pll,
 	 },
 };
 
@@ -82,9 +87,10 @@
 	0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
 };
 
-static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
+static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
+					    int refclk)
 {
-	const struct oaktrail_limit_t *limit = NULL;
+	const struct gma_limit_t *limit = NULL;
 	struct drm_device *dev = crtc->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
@@ -92,45 +98,100 @@
 	    || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
 		switch (dev_priv->core_freq) {
 		case 100:
-			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
+			limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
 			break;
 		case 166:
-			limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
+			limit = &mrst_limits[MRST_LIMIT_LVDS_83];
 			break;
 		case 200:
-			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
+			limit = &mrst_limits[MRST_LIMIT_LVDS_100];
 			break;
 		}
+	} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+		limit = &mrst_limits[MRST_LIMIT_SDVO];
 	} else {
 		limit = NULL;
-		dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
+		dev_err(dev->dev, "mrst_limit Wrong display type.\n");
 	}
 
 	return limit;
 }
 
 /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
+static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
 {
 	clock->dot = (refclk * clock->m) / (14 * clock->p1);
 }
 
-static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+static void mrst_print_pll(struct gma_clock_t *clock)
 {
-	pr_debug("%s: dotclock = %d,  m = %d, p1 = %d.\n",
-	     prefix, clock->dot, clock->m, clock->p1);
+	DRM_DEBUG_DRIVER("dotclock=%d,  m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
+			 clock->dot, clock->m, clock->m1, clock->m2, clock->n,
+			 clock->p1, clock->p2);
+}
+
+static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+				    struct drm_crtc *crtc, int target,
+				    int refclk, struct gma_clock_t *best_clock)
+{
+	struct gma_clock_t clock;
+	u32 target_vco, actual_freq;
+	s32 freq_error, min_error = 100000;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+		for (clock.n = limit->n.min; clock.n <= limit->n.max;
+		     clock.n++) {
+			for (clock.p1 = limit->p1.min;
+			     clock.p1 <= limit->p1.max; clock.p1++) {
+				/* p2 value always stored in p2_slow on SDVO */
+				clock.p = clock.p1 * limit->p2.p2_slow;
+				target_vco = target * clock.p;
+
+				/* VCO will increase at this point so break */
+				if (target_vco > limit->vco.max)
+					break;
+
+				if (target_vco < limit->vco.min)
+					continue;
+
+				actual_freq = (refclk * clock.m) /
+					      (clock.n * clock.p);
+				freq_error = 10000 -
+					     ((target * 10000) / actual_freq);
+
+				if (freq_error < -min_error) {
+					/* freq_error will start to decrease at
+					   this point so break */
+					break;
+				}
+
+				if (freq_error < 0)
+					freq_error = -freq_error;
+
+				if (freq_error < min_error) {
+					min_error = freq_error;
+					*best_clock = clock;
+				}
+			}
+		}
+		if (min_error == 0)
+			break;
+	}
+
+	return min_error == 0;
 }
 
 /**
  * Returns a set of divisors for the desired target clock with the given refclk,
  * or FALSE.  Divisor values are the actual divisors for
  */
-static bool
-mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
-		struct oaktrail_clock_t *best_clock)
+static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+				    struct drm_crtc *crtc, int target,
+				    int refclk, struct gma_clock_t *best_clock)
 {
-	struct oaktrail_clock_t clock;
-	const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
+	struct gma_clock_t clock;
 	int err = target;
 
 	memset(best_clock, 0, sizeof(*best_clock));
@@ -140,7 +201,7 @@
 		     clock.p1++) {
 			int this_err;
 
-			oaktrail_clock(refclk, &clock);
+			mrst_lvds_clock(refclk, &clock);
 
 			this_err = abs(clock.dot - target);
 			if (this_err < err) {
@@ -149,7 +210,6 @@
 			}
 		}
 	}
-	dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
 	return err != target;
 }
 
@@ -167,8 +227,10 @@
 	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 temp;
+	int i;
+	int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
 
-	if (pipe == 1) {
+	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
 		oaktrail_crtc_hdmi_dpms(crtc, mode);
 		return;
 	}
@@ -183,35 +245,45 @@
 	case DRM_MODE_DPMS_ON:
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
-		/* Enable the DPLL */
-		temp = REG_READ(map->dpll);
-		if ((temp & DPLL_VCO_ENABLE) == 0) {
-			REG_WRITE(map->dpll, temp);
-			REG_READ(map->dpll);
-			/* Wait for the clocks to stabilize. */
-			udelay(150);
-			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
-			REG_READ(map->dpll);
-			/* Wait for the clocks to stabilize. */
-			udelay(150);
-			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
-			REG_READ(map->dpll);
-			/* Wait for the clocks to stabilize. */
-			udelay(150);
-		}
-		/* Enable the pipe */
-		temp = REG_READ(map->conf);
-		if ((temp & PIPEACONF_ENABLE) == 0)
-			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
-		/* Enable the plane */
-		temp = REG_READ(map->cntr);
-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-			REG_WRITE(map->cntr,
-				  temp | DISPLAY_PLANE_ENABLE);
-			/* Flush the plane changes */
-			REG_WRITE(map->base, REG_READ(map->base));
-		}
+		for (i = 0; i <= need_aux; i++) {
+			/* Enable the DPLL */
+			temp = REG_READ_WITH_AUX(map->dpll, i);
+			if ((temp & DPLL_VCO_ENABLE) == 0) {
+				REG_WRITE_WITH_AUX(map->dpll, temp, i);
+				REG_READ_WITH_AUX(map->dpll, i);
+				/* Wait for the clocks to stabilize. */
+				udelay(150);
+				REG_WRITE_WITH_AUX(map->dpll,
+						   temp | DPLL_VCO_ENABLE, i);
+				REG_READ_WITH_AUX(map->dpll, i);
+				/* Wait for the clocks to stabilize. */
+				udelay(150);
+				REG_WRITE_WITH_AUX(map->dpll,
+						   temp | DPLL_VCO_ENABLE, i);
+				REG_READ_WITH_AUX(map->dpll, i);
+				/* Wait for the clocks to stabilize. */
+				udelay(150);
+			}
 
+			/* Enable the pipe */
+			temp = REG_READ_WITH_AUX(map->conf, i);
+			if ((temp & PIPEACONF_ENABLE) == 0) {
+				REG_WRITE_WITH_AUX(map->conf,
+						   temp | PIPEACONF_ENABLE, i);
+			}
+
+			/* Enable the plane */
+			temp = REG_READ_WITH_AUX(map->cntr, i);
+			if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+				REG_WRITE_WITH_AUX(map->cntr,
+						   temp | DISPLAY_PLANE_ENABLE,
+						   i);
+				/* Flush the plane changes */
+				REG_WRITE_WITH_AUX(map->base,
+					REG_READ_WITH_AUX(map->base, i), i);
+			}
+
+		}
 		gma_crtc_load_lut(crtc);
 
 		/* Give the overlay scaler a chance to enable
@@ -223,48 +295,52 @@
 		 * if it's on this pipe */
 		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
 
-		/* Disable the VGA plane that we never use */
-		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-		/* Disable display plane */
-		temp = REG_READ(map->cntr);
-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-			REG_WRITE(map->cntr,
-				  temp & ~DISPLAY_PLANE_ENABLE);
-			/* Flush the plane changes */
-			REG_WRITE(map->base, REG_READ(map->base));
-			REG_READ(map->base);
-		}
+		for (i = 0; i <= need_aux; i++) {
+			/* Disable the VGA plane that we never use */
+			REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
+			/* Disable display plane */
+			temp = REG_READ_WITH_AUX(map->cntr, i);
+			if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+				REG_WRITE_WITH_AUX(map->cntr,
+					temp & ~DISPLAY_PLANE_ENABLE, i);
+				/* Flush the plane changes */
+				REG_WRITE_WITH_AUX(map->base,
+						   REG_READ(map->base), i);
+				REG_READ_WITH_AUX(map->base, i);
+			}
 
-		/* Next, disable display pipes */
-		temp = REG_READ(map->conf);
-		if ((temp & PIPEACONF_ENABLE) != 0) {
-			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
-			REG_READ(map->conf);
-		}
-		/* Wait for for the pipe disable to take effect. */
-		gma_wait_for_vblank(dev);
+			/* Next, disable display pipes */
+			temp = REG_READ_WITH_AUX(map->conf, i);
+			if ((temp & PIPEACONF_ENABLE) != 0) {
+				REG_WRITE_WITH_AUX(map->conf,
+						   temp & ~PIPEACONF_ENABLE, i);
+				REG_READ_WITH_AUX(map->conf, i);
+			}
+			/* Wait for for the pipe disable to take effect. */
+			gma_wait_for_vblank(dev);
 
-		temp = REG_READ(map->dpll);
-		if ((temp & DPLL_VCO_ENABLE) != 0) {
-			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
-			REG_READ(map->dpll);
-		}
+			temp = REG_READ_WITH_AUX(map->dpll, i);
+			if ((temp & DPLL_VCO_ENABLE) != 0) {
+				REG_WRITE_WITH_AUX(map->dpll,
+						   temp & ~DPLL_VCO_ENABLE, i);
+				REG_READ_WITH_AUX(map->dpll, i);
+			}
 
-		/* Wait for the clocks to turn off. */
-		udelay(150);
+			/* Wait for the clocks to turn off. */
+			udelay(150);
+		}
 		break;
 	}
 
-	/*Set FIFO Watermarks*/
-	REG_WRITE(DSPARB, 0x3FFF);
-	REG_WRITE(DSPFW1, 0x3F88080A);
-	REG_WRITE(DSPFW2, 0x0b060808);
+	/* Set FIFO Watermarks (values taken from EMGD) */
+	REG_WRITE(DSPARB, 0x3f80);
+	REG_WRITE(DSPFW1, 0x3f8f0404);
+	REG_WRITE(DSPFW2, 0x04040f04);
 	REG_WRITE(DSPFW3, 0x0);
-	REG_WRITE(DSPFW4, 0x08030404);
+	REG_WRITE(DSPFW4, 0x04040404);
 	REG_WRITE(DSPFW5, 0x04040404);
 	REG_WRITE(DSPFW6, 0x78);
-	REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
-	/* Must write Bit 14 of the Chicken Bit Register */
+	REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
 
 	gma_power_end(dev);
 }
@@ -297,7 +373,8 @@
 	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int refclk = 0;
-	struct oaktrail_clock_t clock;
+	struct gma_clock_t clock;
+	const struct gma_limit_t *limit;
 	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
 	bool ok, is_sdvo = false;
 	bool is_lvds = false;
@@ -306,8 +383,10 @@
 	struct gma_encoder *gma_encoder = NULL;
 	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
 	struct drm_connector *connector;
+	int i;
+	int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
 
-	if (pipe == 1)
+	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
 		return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
 
 	if (!gma_power_begin(dev, true))
@@ -340,15 +419,17 @@
 	}
 
 	/* Disable the VGA plane that we never use */
-	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+	for (i = 0; i <= need_aux; i++)
+		REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
 
 	/* Disable the panel fitter if it was on our pipe */
 	if (oaktrail_panel_fitter_pipe(dev) == pipe)
 		REG_WRITE(PFIT_CONTROL, 0);
 
-	REG_WRITE(map->src,
-		  ((mode->crtc_hdisplay - 1) << 16) |
-		  (mode->crtc_vdisplay - 1));
+	for (i = 0; i <= need_aux; i++) {
+		REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
+					     (mode->crtc_vdisplay - 1), i);
+	}
 
 	if (gma_encoder)
 		drm_object_property_get_value(&connector->base,
@@ -365,35 +446,39 @@
 		offsetY = (adjusted_mode->crtc_vdisplay -
 			   mode->crtc_vdisplay) / 2;
 
-		REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
-			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
-			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(map->hblank,
-			(adjusted_mode->crtc_hblank_start - offsetX - 1) |
-			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
-		REG_WRITE(map->hsync,
-			(adjusted_mode->crtc_hsync_start - offsetX - 1) |
-			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
-		REG_WRITE(map->vblank,
-			(adjusted_mode->crtc_vblank_start - offsetY - 1) |
-			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
-		REG_WRITE(map->vsync,
-			(adjusted_mode->crtc_vsync_start - offsetY - 1) |
-			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+		for (i = 0; i <= need_aux; i++) {
+			REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
+				((adjusted_mode->crtc_htotal - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
+				((adjusted_mode->crtc_vtotal - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->hblank,
+				(adjusted_mode->crtc_hblank_start - offsetX - 1) |
+				((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->hsync,
+				(adjusted_mode->crtc_hsync_start - offsetX - 1) |
+				((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vblank,
+				(adjusted_mode->crtc_vblank_start - offsetY - 1) |
+				((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vsync,
+				(adjusted_mode->crtc_vsync_start - offsetY - 1) |
+				((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
+		}
 	} else {
-		REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
-			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
-			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
-			((adjusted_mode->crtc_hblank_end - 1) << 16));
-		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
-			((adjusted_mode->crtc_hsync_end - 1) << 16));
-		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
-			((adjusted_mode->crtc_vblank_end - 1) << 16));
-		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
-			((adjusted_mode->crtc_vsync_end - 1) << 16));
+		for (i = 0; i <= need_aux; i++) {
+			REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+				((adjusted_mode->crtc_htotal - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+				((adjusted_mode->crtc_vtotal - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+				((adjusted_mode->crtc_hblank_end - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+				((adjusted_mode->crtc_hsync_end - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+				((adjusted_mode->crtc_vblank_end - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+				((adjusted_mode->crtc_vsync_end - 1) << 16), i);
+		}
 	}
 
 	/* Flush the plane changes */
@@ -418,21 +503,30 @@
 	if (is_mipi)
 		goto oaktrail_crtc_mode_set_exit;
 
-	refclk = dev_priv->core_freq * 1000;
 
 	dpll = 0;		/*BIT16 = 0 for 100MHz reference */
 
-	ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+	refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
+	limit = mrst_limit(crtc, refclk);
+	ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
+			     refclk, &clock);
 
-	if (!ok) {
-		dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
-	} else {
-		dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
-			 "m = %x, p1 = %x.\n", clock.dot, clock.m,
-			 clock.p1);
+	if (is_sdvo) {
+		/* Convert calculated values to register values */
+		clock.p1 = (1L << (clock.p1 - 1));
+		clock.m -= 2;
+		clock.n = (1L << (clock.n - 1));
 	}
 
-	fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+	if (!ok)
+		DRM_ERROR("Failed to find proper PLL settings");
+
+	mrst_print_pll(&clock);
+
+	if (is_sdvo)
+		fp = clock.n << 16 | clock.m;
+	else
+		fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
 
 	dpll |= DPLL_VGA_MODE_DIS;
 
@@ -456,38 +550,43 @@
 
 
 	/* compute bitmask from p1 value */
-	dpll |= (1 << (clock.p1 - 2)) << 17;
+	if (is_sdvo)
+		dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
+	else
+		dpll |= (1 << (clock.p1 - 2)) << 17;
 
 	dpll |= DPLL_VCO_ENABLE;
 
-	mrstPrintPll("chosen", &clock);
-
 	if (dpll & DPLL_VCO_ENABLE) {
-		REG_WRITE(map->fp0, fp);
-		REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
-		REG_READ(map->dpll);
-		/* Check the DPLLA lock bit PIPEACONF[29] */
-		udelay(150);
+		for (i = 0; i <= need_aux; i++) {
+			REG_WRITE_WITH_AUX(map->fp0, fp, i);
+			REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
+			REG_READ_WITH_AUX(map->dpll, i);
+			/* Check the DPLLA lock bit PIPEACONF[29] */
+			udelay(150);
+		}
 	}
 
-	REG_WRITE(map->fp0, fp);
-	REG_WRITE(map->dpll, dpll);
-	REG_READ(map->dpll);
-	/* Wait for the clocks to stabilize. */
-	udelay(150);
+	for (i = 0; i <= need_aux; i++) {
+		REG_WRITE_WITH_AUX(map->fp0, fp, i);
+		REG_WRITE_WITH_AUX(map->dpll, dpll, i);
+		REG_READ_WITH_AUX(map->dpll, i);
+		/* Wait for the clocks to stabilize. */
+		udelay(150);
 
-	/* write it again -- the BIOS does, after all */
-	REG_WRITE(map->dpll, dpll);
-	REG_READ(map->dpll);
-	/* Wait for the clocks to stabilize. */
-	udelay(150);
+		/* write it again -- the BIOS does, after all */
+		REG_WRITE_WITH_AUX(map->dpll, dpll, i);
+		REG_READ_WITH_AUX(map->dpll, i);
+		/* Wait for the clocks to stabilize. */
+		udelay(150);
 
-	REG_WRITE(map->conf, pipeconf);
-	REG_READ(map->conf);
-	gma_wait_for_vblank(dev);
+		REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
+		REG_READ_WITH_AUX(map->conf, i);
+		gma_wait_for_vblank(dev);
 
-	REG_WRITE(map->cntr, dspcntr);
-	gma_wait_for_vblank(dev);
+		REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
+		gma_wait_for_vblank(dev);
+	}
 
 oaktrail_crtc_mode_set_exit:
 	gma_power_end(dev);
@@ -565,3 +664,9 @@
 	.commit = gma_crtc_commit,
 };
 
+/* Not used yet */
+const struct gma_clock_funcs mrst_clock_funcs = {
+	.clock = mrst_lvds_clock,
+	.limit = mrst_limit,
+	.pll_is_valid = gma_pll_is_valid,
+};
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 7a9ce00..368a03a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -40,6 +40,9 @@
 		dev_err(dev->dev, "DSI is not supported\n");
 	if (dev_priv->hdmi_priv)
 		oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+
+	psb_intel_sdvo_init(dev, SDVOB);
+
 	return 0;
 }
 
@@ -526,6 +529,7 @@
 		psb_intel_opregion_init(dev);
 		psb_intel_init_bios(dev);
 	}
+	gma_intel_setup_gmbus(dev);
 	oaktrail_hdmi_setup(dev);
 	return 0;
 }
@@ -534,6 +538,7 @@
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
+	gma_intel_teardown_gmbus(dev);
 	oaktrail_hdmi_teardown(dev);
 	if (!dev_priv->has_gct)
 		psb_intel_destroy_bios(dev);
@@ -546,6 +551,7 @@
 	.crtcs = 2,
 	.hdmi_mask = (1 << 1),
 	.lvds_mask = (1 << 0),
+	.sdvo_mask = (1 << 1),
 	.cursor_needs_phys = 0,
 	.sgx_offset = MRST_SGX_OFFSET,
 
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 1eb86c795..e281070 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -99,7 +99,7 @@
 	i2c_dev->status = I2C_STAT_INIT;
 	i2c_dev->msg = pmsg;
 	i2c_dev->buf_offset = 0;
-	INIT_COMPLETION(i2c_dev->complete);
+	reinit_completion(&i2c_dev->complete);
 
 	/* Enable I2C transaction */
 	temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 3ece553..5e06978 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -218,30 +218,6 @@
 	.commit = oaktrail_lvds_commit,
 };
 
-static struct drm_display_mode lvds_configuration_modes[] = {
-	/* hard coded fixed mode for TPO LTPS LPJ040K001A */
-	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
-		   846, 1056, 0, 480, 489, 491, 525, 0, 0) },
-	/* hard coded fixed mode for LVDS 800x480 */
-	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
-		   802, 1024, 0, 480, 481, 482, 525, 0, 0) },
-	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
-		   1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
-	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
-		   1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
-	/* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
-		   1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
-	/* hard coded fixed mode for LVDS 1024x768 */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
-		   1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
-	/* hard coded fixed mode for LVDS 1366x768 */
-	{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
-		   1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
-};
-
 /* Returns the panel fixed mode from configuration. */
 
 static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
@@ -303,10 +279,10 @@
 			mode_dev->panel_fixed_mode =
 				drm_mode_duplicate(dev,
 					dev_priv->lfp_lvds_vbt_mode);
-	/* Then guess */
+
+	/* If we still got no mode then bail */
 	if (mode_dev->panel_fixed_mode == NULL)
-		mode_dev->panel_fixed_mode
-			= drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+		return;
 
 	drm_mode_set_name(mode_dev->panel_fixed_mode);
 	drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 6976786..23fb33f 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -373,6 +373,7 @@
 	.crtcs = 2,
 	.hdmi_mask = (1 << 0),
 	.lvds_mask = (1 << 1),
+	.sdvo_mask = (1 << 0),
 	.cursor_needs_phys = 1,
 	.sgx_offset = PSB_SGX_OFFSET,
 	.chip_setup = psb_chip_setup,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index fcb4e9f..1199180 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -251,6 +251,12 @@
 			iounmap(dev_priv->sgx_reg);
 			dev_priv->sgx_reg = NULL;
 		}
+		if (dev_priv->aux_reg) {
+			iounmap(dev_priv->aux_reg);
+			dev_priv->aux_reg = NULL;
+		}
+		if (dev_priv->aux_pdev)
+			pci_dev_put(dev_priv->aux_pdev);
 
 		/* Destroy VBT data */
 		psb_intel_destroy_bios(dev);
@@ -266,7 +272,7 @@
 static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 	struct drm_psb_private *dev_priv;
-	unsigned long resource_start;
+	unsigned long resource_start, resource_len;
 	unsigned long irqflags;
 	int ret = -ENOMEM;
 	struct drm_connector *connector;
@@ -296,6 +302,30 @@
 	if (!dev_priv->sgx_reg)
 		goto out_err;
 
+	if (IS_MRST(dev)) {
+		dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
+
+		if (dev_priv->aux_pdev) {
+			resource_start = pci_resource_start(dev_priv->aux_pdev,
+							    PSB_AUX_RESOURCE);
+			resource_len = pci_resource_len(dev_priv->aux_pdev,
+							PSB_AUX_RESOURCE);
+			dev_priv->aux_reg = ioremap_nocache(resource_start,
+							    resource_len);
+			if (!dev_priv->aux_reg)
+				goto out_err;
+
+			DRM_DEBUG_KMS("Found aux vdc");
+		} else {
+			/* Couldn't find the aux vdc so map to primary vdc */
+			dev_priv->aux_reg = dev_priv->vdc_reg;
+			DRM_DEBUG_KMS("Couldn't find aux pci device");
+		}
+		dev_priv->gmbus_reg = dev_priv->aux_reg;
+	} else {
+		dev_priv->gmbus_reg = dev_priv->vdc_reg;
+	}
+
 	psb_intel_opregion_setup(dev);
 
 	ret = dev_priv->ops->chip_setup(dev);
@@ -359,7 +389,7 @@
 
 	drm_irq_install(dev);
 
-	dev->vblank_disable_allowed = 1;
+	dev->vblank_disable_allowed = true;
 
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 
@@ -449,7 +479,7 @@
 	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
 	if (!obj) {
 		dev_dbg(dev->dev, "Invalid Connector object.\n");
-		return -EINVAL;
+		return -ENOENT;
 	}
 
 	connector = obj_to_connector(obj);
@@ -491,7 +521,7 @@
 		obj = drm_mode_object_find(dev, obj_id,
 					DRM_MODE_OBJECT_CONNECTOR);
 		if (!obj) {
-			ret = -EINVAL;
+			ret = -ENOENT;
 			goto mode_op_out;
 		}
 
@@ -646,7 +676,6 @@
 	.preclose = psb_driver_preclose,
 	.postclose = psb_driver_close,
 
-	.gem_init_object = psb_gem_init_object,
 	.gem_free_object = psb_gem_free_object,
 	.gem_vm_ops = &psb_gem_vm_ops,
 	.dumb_create = psb_gem_dumb_create,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 4535ac7..b59e658 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -44,10 +44,10 @@
 	CHIP_MFLD_0130 = 3,		/* Medfield */
 };
 
-#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
+#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
 
 /*
  * Driver definitions
@@ -75,6 +75,7 @@
  *	PCI resource identifiers
  */
 #define PSB_MMIO_RESOURCE	 0
+#define PSB_AUX_RESOURCE	 0
 #define PSB_GATT_RESOURCE	 2
 #define PSB_GTT_RESOURCE	 3
 /*
@@ -455,6 +456,7 @@
 
 struct drm_psb_private {
 	struct drm_device *dev;
+	struct pci_dev *aux_pdev; /* Currently only used by mrst */
 	const struct psb_ops *ops;
 	const struct psb_offset *regmap;
 	
@@ -486,6 +488,7 @@
 
 	uint8_t __iomem *sgx_reg;
 	uint8_t __iomem *vdc_reg;
+	uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
 	uint32_t gatt_free_offset;
 
 	/*
@@ -532,6 +535,7 @@
 
 	/* gmbus */
 	struct intel_gmbus *gmbus;
+	uint8_t __iomem *gmbus_reg;
 
 	/* Used by SDVO */
 	int crt_ddc_pin;
@@ -672,6 +676,7 @@
 	int sgx_offset;		/* Base offset of SGX device */
 	int hdmi_mask;		/* Mask of HDMI CRTCs */
 	int lvds_mask;		/* Mask of LVDS CRTCs */
+	int sdvo_mask;		/* Mask of SDVO CRTCs */
 	int cursor_needs_phys;  /* If cursor base reg need physical address */
 
 	/* Sub functions */
@@ -837,7 +842,6 @@
 extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
 
 /* gem.c */
-extern int psb_gem_init_object(struct drm_gem_object *obj);
 extern void psb_gem_free_object(struct drm_gem_object *obj);
 extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
 			struct drm_file *file);
@@ -928,16 +932,58 @@
 	return ioread32(dev_priv->vdc_reg + reg);
 }
 
+static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	return ioread32(dev_priv->aux_reg + reg);
+}
+
 #define REG_READ(reg)	       REGISTER_READ(dev, (reg))
+#define REG_READ_AUX(reg)      REGISTER_READ_AUX(dev, (reg))
+
+/* Useful for post reads */
+static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
+					      uint32_t reg, int aux)
+{
+	uint32_t val;
+
+	if (aux)
+		val = REG_READ_AUX(reg);
+	else
+		val = REG_READ(reg);
+
+	return val;
+}
+
+#define REG_READ_WITH_AUX(reg, aux) REGISTER_READ_WITH_AUX(dev, (reg), (aux))
 
 static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
-				      uint32_t val)
+				  uint32_t val)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	iowrite32((val), dev_priv->vdc_reg + (reg));
 }
 
+static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
+				      uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	iowrite32((val), dev_priv->aux_reg + (reg));
+}
+
 #define REG_WRITE(reg, val)	REGISTER_WRITE(dev, (reg), (val))
+#define REG_WRITE_AUX(reg, val)	REGISTER_WRITE_AUX(dev, (reg), (val))
+
+static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
+				      uint32_t val, int aux)
+{
+	if (aux)
+		REG_WRITE_AUX(reg, val);
+	else
+		REG_WRITE(reg, val);
+}
+
+#define REG_WRITE_WITH_AUX(reg, val, aux) REGISTER_WRITE_WITH_AUX(dev, (reg), (val), (aux))
 
 static inline void REGISTER_WRITE16(struct drm_device *dev,
 					uint32_t reg, uint32_t val)
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 97f8a03..c8841ac 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -572,7 +572,7 @@
 
 	if (!drmmode_obj) {
 		dev_err(dev->dev, "no such CRTC id\n");
-		return -EINVAL;
+		return -ENOENT;
 	}
 
 	crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 6f01cdf..07d3a9e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -228,24 +228,26 @@
 {
 	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
 	u32 bval = val, cval = val;
-	int i;
+	int i, j;
+	int need_aux = IS_MRST(dev) ? 1 : 0;
 
-	if (psb_intel_sdvo->sdvo_reg == SDVOB) {
-		cval = REG_READ(SDVOC);
-	} else {
-		bval = REG_READ(SDVOB);
-	}
-	/*
-	 * Write the registers twice for luck. Sometimes,
-	 * writing them only once doesn't appear to 'stick'.
-	 * The BIOS does this too. Yay, magic
-	 */
-	for (i = 0; i < 2; i++)
-	{
-		REG_WRITE(SDVOB, bval);
-		REG_READ(SDVOB);
-		REG_WRITE(SDVOC, cval);
-		REG_READ(SDVOC);
+	for (j = 0; j <= need_aux; j++) {
+		if (psb_intel_sdvo->sdvo_reg == SDVOB)
+			cval = REG_READ_WITH_AUX(SDVOC, j);
+		else
+			bval = REG_READ_WITH_AUX(SDVOB, j);
+
+		/*
+		* Write the registers twice for luck. Sometimes,
+		* writing them only once doesn't appear to 'stick'.
+		* The BIOS does this too. Yay, magic
+		*/
+		for (i = 0; i < 2; i++) {
+			REG_WRITE_WITH_AUX(SDVOB, bval, j);
+			REG_READ_WITH_AUX(SDVOB, j);
+			REG_WRITE_WITH_AUX(SDVOC, cval, j);
+			REG_READ_WITH_AUX(SDVOC, j);
+		}
 	}
 }
 
@@ -995,6 +997,7 @@
 	struct psb_intel_sdvo_dtd input_dtd;
 	int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
 	int rate;
+	int need_aux = IS_MRST(dev) ? 1 : 0;
 
 	if (!mode)
 		return;
@@ -1060,7 +1063,11 @@
 		return;
 
 	/* Set the SDVO control regs. */
-	sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+	if (need_aux)
+		sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+	else
+		sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+
 	switch (psb_intel_sdvo->sdvo_reg) {
 	case SDVOB:
 		sdvox &= SDVOB_PRESERVE_MASK;
@@ -1090,6 +1097,8 @@
 	struct drm_device *dev = encoder->dev;
 	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
 	u32 temp;
+	int i;
+	int need_aux = IS_MRST(dev) ? 1 : 0;
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
@@ -1108,19 +1117,27 @@
 			psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
 
 		if (mode == DRM_MODE_DPMS_OFF) {
-			temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+			if (need_aux)
+				temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+			else
+				temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+
 			if ((temp & SDVO_ENABLE) != 0) {
 				psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
 			}
 		}
 	} else {
 		bool input1, input2;
-		int i;
 		u8 status;
 
-		temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+		if (need_aux)
+			temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+		else
+			temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+
 		if ((temp & SDVO_ENABLE) == 0)
 			psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+
 		for (i = 0; i < 2; i++)
 			gma_wait_for_vblank(dev);
 
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 029eccf..ba48303 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -271,15 +271,15 @@
 
 	if (gma_power_is_on(dev))
 		PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
 
 	/* FIXME: Handle Medfield irq mask
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
 	*/
 
@@ -305,17 +305,17 @@
 	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@
 
 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 
 	dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@
 {
 	unsigned int cur_vblank;
 	int ret = 0;
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+	DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
 		    (((cur_vblank = atomic_read(counter))
 		      - *sequence) <= (1 << 23)));
 	*sequence = cur_vblank;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 60e8404..400b0c4 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -17,6 +17,7 @@
 
 
 
+#include <linux/hdmi.h>
 #include <linux/module.h>
 
 #include <drm/drmP.h>
@@ -549,6 +550,8 @@
 	buf[HB(0)] = 0x82;
 	buf[HB(1)] = 0x02;
 	buf[HB(2)] = 13;
+	buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
+	buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
 	buf[PB(4)] = drm_match_cea_mode(mode);
 
 	tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ab1892eb..249fdff 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -944,8 +944,6 @@
 				 dma->buflist[vertex->idx],
 				 vertex->discard, vertex->used);
 
-	atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
-	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
 	sarea_priv->last_enqueue = dev_priv->counter - 1;
 	sarea_priv->last_dispatch = (int)hw_status[5];
 
@@ -1105,8 +1103,6 @@
 	i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
 			     mc->last_render);
 
-	atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
-	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
 	sarea_priv->last_enqueue = dev_priv->counter - 1;
 	sarea_priv->last_dispatch = (int)hw_status[5];
 
@@ -1197,13 +1193,6 @@
 
 int i810_driver_load(struct drm_device *dev, unsigned long flags)
 {
-	/* i810 has 4 more counters */
-	dev->counters += 4;
-	dev->types[6] = _DRM_STAT_IRQ;
-	dev->types[7] = _DRM_STAT_PRIMARY;
-	dev->types[8] = _DRM_STAT_SECONDARY;
-	dev->types[9] = _DRM_STAT_DMA;
-
 	pci_set_master(dev->pdev);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644
index 0000000..6199d0b
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -0,0 +1,67 @@
+config DRM_I915
+	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+	depends on DRM
+	depends on AGP
+	depends on AGP_INTEL
+	# we need shmfs for the swappable backing store, and in particular
+	# the shmem_readpage() which depends upon tmpfs
+	select SHMEM
+	select TMPFS
+	select DRM_KMS_HELPER
+	# i915 depends on ACPI_VIDEO when ACPI is enabled
+	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+	select BACKLIGHT_LCD_SUPPORT if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select VIDEO_OUTPUT_CONTROL if ACPI
+	select INPUT if ACPI
+	select ACPI_VIDEO if ACPI
+	select ACPI_BUTTON if ACPI
+	help
+	  Choose this option if you have a system that has "Intel Graphics
+	  Media Accelerator" or "HD Graphics" integrated graphics,
+	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+	  If M is selected, the module will be called i915.  AGP support
+	  is required for this driver to work. This driver is used by
+	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+	  replaces the older i830 module that supported a subset of the
+	  hardware in older X.org releases.
+
+	  Note that the older i810/i815 chipsets require the use of the
+	  i810 driver instead, and the Atom z5xx series has an entirely
+	  different implementation.
+
+config DRM_I915_KMS
+	bool "Enable modesetting on intel by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you want kernel modesetting enabled by default,
+	  and you have a new enough userspace to support this. Running old
+	  userspaces with this enabled will cause pain.  Note that this causes
+	  the driver to bind to PCI devices, which precludes loading things
+	  like intelfb.
+
+config DRM_I915_FBDEV
+	bool "Enable legacy fbdev support for the modesettting intel driver"
+	depends on DRM_I915
+	select DRM_KMS_FB_HELPER
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	default y
+	help
+	  Choose this option if you have a need for the legacy fbdev
+	  support. Note that this support also provide the linux console
+	  support on top of the intel modesetting driver.
+
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+	bool "Enable preliminary support for prerelease Intel hardware by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you have prerelease Intel hardware and want the
+	  i915 driver to support it by default.  You can enable such support at
+	  runtime with the module option i915.preliminary_hw_support=1; this
+	  option changes the default for that module option.
+
+	  If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8449a8..41838ea 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,6 +21,9 @@
 	  intel_display.o \
 	  intel_crt.o \
 	  intel_lvds.o \
+	  intel_dsi.o \
+	  intel_dsi_cmd.o \
+	  intel_dsi_pll.o \
 	  intel_bios.o \
 	  intel_ddi.o \
 	  intel_dp.o \
@@ -30,7 +33,6 @@
 	  intel_panel.o \
 	  intel_pm.o \
 	  intel_i2c.o \
-	  intel_fb.o \
 	  intel_tv.o \
 	  intel_dvo.o \
 	  intel_ringbuffer.o \
@@ -51,6 +53,8 @@
 
 i915-$(CONFIG_ACPI)	+= intel_acpi.o
 
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 33a62ad..3121633 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -77,17 +77,6 @@
 			  struct drm_display_mode *mode);
 
 	/*
-	 * Callback to adjust the mode to be set in the CRTC.
-	 *
-	 * This allows an output to adjust the clock or even the entire set of
-	 * timings, which is used for panels with fixed timings or for
-	 * buses with clock limitations.
-	 */
-	bool (*mode_fixup)(struct intel_dvo_device *dvo,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode);
-
-	/*
 	 * Callback for preparing mode changes on an output
 	 */
 	void (*prepare)(struct intel_dvo_device *dvo);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6f4cb5..6ed45a9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,8 @@
  */
 
 #include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -38,9 +40,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#define DRM_I915_RING_DEBUG 1
-
-
 #if defined(CONFIG_DEBUG_FS)
 
 enum {
@@ -54,6 +53,32 @@
 	return v ? "yes" : "no";
 }
 
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+		       struct dentry *ent,
+		       const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
 static int i915_capabilities(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -145,6 +170,13 @@
 		seq_printf(m, " (%s)", obj->ring->name);
 }
 
+static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
+{
+	seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+	seq_putc(m, ' ');
+}
+
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -554,7 +586,53 @@
 	if (ret)
 		return ret;
 
-	if (IS_VALLEYVIEW(dev)) {
+	if (INTEL_INFO(dev)->gen >= 8) {
+		int i;
+		seq_printf(m, "Master Interrupt Control:\t%08x\n",
+			   I915_READ(GEN8_MASTER_IRQ));
+
+		for (i = 0; i < 4; i++) {
+			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IMR(i)));
+			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IIR(i)));
+			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IER(i)));
+		}
+
+		for_each_pipe(i) {
+			seq_printf(m, "Pipe %c IMR:\t%08x\n",
+				   pipe_name(i),
+				   I915_READ(GEN8_DE_PIPE_IMR(i)));
+			seq_printf(m, "Pipe %c IIR:\t%08x\n",
+				   pipe_name(i),
+				   I915_READ(GEN8_DE_PIPE_IIR(i)));
+			seq_printf(m, "Pipe %c IER:\t%08x\n",
+				   pipe_name(i),
+				   I915_READ(GEN8_DE_PIPE_IER(i)));
+		}
+
+		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IMR));
+		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IIR));
+		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IER));
+
+		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IMR));
+		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IIR));
+		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IER));
+
+		seq_printf(m, "PCU interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_PCU_IMR));
+		seq_printf(m, "PCU interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_PCU_IIR));
+		seq_printf(m, "PCU interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_PCU_IER));
+	} else if (IS_VALLEYVIEW(dev)) {
 		seq_printf(m, "Display IER:\t%08x\n",
 			   I915_READ(VLV_IER));
 		seq_printf(m, "Display IIR:\t%08x\n",
@@ -626,7 +704,7 @@
 	seq_printf(m, "Interrupts received: %d\n",
 		   atomic_read(&dev_priv->irq_received));
 	for_each_ring(ring, dev_priv, i) {
-		if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		if (INTEL_INFO(dev)->gen >= 6) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
 				   ring->name, I915_READ_IMR(ring));
@@ -843,6 +921,8 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	if (IS_GEN5(dev)) {
 		u16 rgvswctl = I915_READ16(MEMSWCTL);
 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1321,6 +1401,8 @@
 		return 0;
 	}
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
@@ -1395,12 +1477,12 @@
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_fbdev *ifbdev;
+	struct intel_fbdev *ifbdev = NULL;
 	struct intel_framebuffer *fb;
-	int ret;
 
-	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+#ifdef CONFIG_DRM_I915_FBDEV
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
 	if (ret)
 		return ret;
 
@@ -1416,10 +1498,11 @@
 	describe_obj(m, fb->obj);
 	seq_putc(m, '\n');
 	mutex_unlock(&dev->mode_config.mutex);
+#endif
 
 	mutex_lock(&dev->mode_config.fb_lock);
 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
-		if (&fb->base == ifbdev->helper.fb)
+		if (ifbdev && &fb->base == ifbdev->helper.fb)
 			continue;
 
 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
@@ -1442,6 +1525,7 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
+	struct i915_hw_context *ctx;
 	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1544,15 @@
 		seq_putc(m, '\n');
 	}
 
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->default_context) {
-			seq_printf(m, "HW default context %s ring ", ring->name);
-			describe_obj(m, ring->default_context->obj);
-			seq_putc(m, '\n');
-		}
+	list_for_each_entry(ctx, &dev_priv->context_list, link) {
+		seq_puts(m, "HW context ");
+		describe_ctx(m, ctx);
+		for_each_ring(ring, dev_priv, i)
+			if (ring->default_context == ctx)
+				seq_printf(m, "(default context %s) ", ring->name);
+
+		describe_obj(m, ctx->obj);
+		seq_putc(m, '\n');
 	}
 
 	mutex_unlock(&dev->mode_config.mutex);
@@ -1536,7 +1623,7 @@
 			   I915_READ16(C0DRB3));
 		seq_printf(m, "C1DRB3 = 0x%04x\n",
 			   I915_READ16(C1DRB3));
-	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+	} else if (INTEL_INFO(dev)->gen >= 6) {
 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
 			   I915_READ(MAD_DIMM_C0));
 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -1545,8 +1632,12 @@
 			   I915_READ(MAD_DIMM_C2));
 		seq_printf(m, "TILECTL = 0x%08x\n",
 			   I915_READ(TILECTL));
-		seq_printf(m, "ARB_MODE = 0x%08x\n",
-			   I915_READ(ARB_MODE));
+		if (IS_GEN8(dev))
+			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
+				   I915_READ(GAMTARBMODE));
+		else
+			seq_printf(m, "ARB_MODE = 0x%08x\n",
+				   I915_READ(ARB_MODE));
 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
 			   I915_READ(DISP_ARB_CTL));
 	}
@@ -1555,18 +1646,37 @@
 	return 0;
 }
 
-static int i915_ppgtt_info(struct seq_file *m, void *data)
+static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
-	int i, ret;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	int unused, i;
 
+	if (!ppgtt)
+		return;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
+	seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
+	seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
+	for_each_ring(ring, dev_priv, unused) {
+		seq_printf(m, "%s\n", ring->name);
+		for (i = 0; i < 4; i++) {
+			u32 offset = 0x270 + i * 8;
+			u64 pdp = I915_READ(ring->mmio_base + offset + 4);
+			pdp <<= 32;
+			pdp |= I915_READ(ring->mmio_base + offset);
+			for (i = 0; i < 4; i++)
+				seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
+		}
+	}
+}
+
+static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int i;
+
 	if (INTEL_INFO(dev)->gen == 6)
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
@@ -1585,6 +1695,22 @@
 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
 	}
 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+
+	int ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	if (INTEL_INFO(dev)->gen >= 8)
+		gen8_ppgtt_info(m, dev);
+	else if (INTEL_INFO(dev)->gen >= 6)
+		gen6_ppgtt_info(m, dev);
+
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -1610,27 +1736,27 @@
 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 
 	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_DIV_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
 	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_DIV_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
 
 	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
 	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
 
 	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
 	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
 
 	seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
 	seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
 
 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+		   vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
 
 	mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1655,126 +1781,20 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 psrstat, psrperf;
+	u32 psrperf = 0;
+	bool enabled = false;
 
-	if (!IS_HASWELL(dev)) {
-		seq_puts(m, "PSR not supported on this platform\n");
-	} else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
-		seq_puts(m, "PSR enabled\n");
-	} else {
-		seq_puts(m, "PSR disabled: ");
-		switch (dev_priv->no_psr_reason) {
-		case PSR_NO_SOURCE:
-			seq_puts(m, "not supported on this platform");
-			break;
-		case PSR_NO_SINK:
-			seq_puts(m, "not supported by panel");
-			break;
-		case PSR_MODULE_PARAM:
-			seq_puts(m, "disabled by flag");
-			break;
-		case PSR_CRTC_NOT_ACTIVE:
-			seq_puts(m, "crtc not active");
-			break;
-		case PSR_PWR_WELL_ENABLED:
-			seq_puts(m, "power well enabled");
-			break;
-		case PSR_NOT_TILED:
-			seq_puts(m, "not tiled");
-			break;
-		case PSR_SPRITE_ENABLED:
-			seq_puts(m, "sprite enabled");
-			break;
-		case PSR_S3D_ENABLED:
-			seq_puts(m, "stereo 3d enabled");
-			break;
-		case PSR_INTERLACED_ENABLED:
-			seq_puts(m, "interlaced enabled");
-			break;
-		case PSR_HSW_NOT_DDIA:
-			seq_puts(m, "HSW ties PSR to DDI A (eDP)");
-			break;
-		default:
-			seq_puts(m, "unknown reason");
-		}
-		seq_puts(m, "\n");
-		return 0;
-	}
+	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
+	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
 
-	psrstat = I915_READ(EDP_PSR_STATUS_CTL);
+	enabled = HAS_PSR(dev) &&
+		I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+	seq_printf(m, "Enabled: %s\n", yesno(enabled));
 
-	seq_puts(m, "PSR Current State: ");
-	switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
-	case EDP_PSR_STATUS_STATE_IDLE:
-		seq_puts(m, "Reset state\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDONACK:
-		seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDENT:
-		seq_puts(m, "SRD entry\n");
-		break;
-	case EDP_PSR_STATUS_STATE_BUFOFF:
-		seq_puts(m, "Wait for buffer turn off\n");
-		break;
-	case EDP_PSR_STATUS_STATE_BUFON:
-		seq_puts(m, "Wait for buffer turn on\n");
-		break;
-	case EDP_PSR_STATUS_STATE_AUXACK:
-		seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDOFFACK:
-		seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
-		break;
-	default:
-		seq_puts(m, "Unknown\n");
-		break;
-	}
-
-	seq_puts(m, "Link Status: ");
-	switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
-	case EDP_PSR_STATUS_LINK_FULL_OFF:
-		seq_puts(m, "Link is fully off\n");
-		break;
-	case EDP_PSR_STATUS_LINK_FULL_ON:
-		seq_puts(m, "Link is fully on\n");
-		break;
-	case EDP_PSR_STATUS_LINK_STANDBY:
-		seq_puts(m, "Link is in standby\n");
-		break;
-	default:
-		seq_puts(m, "Unknown\n");
-		break;
-	}
-
-	seq_printf(m, "PSR Entry Count: %u\n",
-		   psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
-		   EDP_PSR_STATUS_COUNT_MASK);
-
-	seq_printf(m, "Max Sleep Timer Counter: %u\n",
-		   psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
-		   EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
-
-	seq_printf(m, "Had AUX error: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
-
-	seq_printf(m, "Sending AUX: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
-
-	seq_printf(m, "Sending Idle: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
-
-	seq_printf(m, "Sending TP2 TP3: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
-
-	seq_printf(m, "Sending TP1: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
-
-	seq_printf(m, "Idle Count: %u\n",
-		   psrstat & EDP_PSR_STATUS_IDLE_MASK);
-
-	psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
-	seq_printf(m, "Performance Counter: %u\n", psrperf);
+	if (HAS_PSR(dev))
+		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
+			EDP_PSR_PERF_CNT_MASK;
+	seq_printf(m, "Performance_Counter: %u\n", psrperf);
 
 	return 0;
 }
@@ -1825,6 +1845,751 @@
 	return 0;
 }
 
+struct pipe_crc_info {
+	const char *name;
+	struct drm_device *dev;
+	enum pipe pipe;
+};
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+	struct pipe_crc_info *info = inode->i_private;
+	struct drm_i915_private *dev_priv = info->dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+	spin_lock_irq(&pipe_crc->lock);
+
+	if (pipe_crc->opened) {
+		spin_unlock_irq(&pipe_crc->lock);
+		return -EBUSY; /* already open */
+	}
+
+	pipe_crc->opened = true;
+	filep->private_data = inode->i_private;
+
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+	struct pipe_crc_info *info = inode->i_private;
+	struct drm_i915_private *dev_priv = info->dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+	spin_lock_irq(&pipe_crc->lock);
+	pipe_crc->opened = false;
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+	assert_spin_locked(&pipe_crc->lock);
+	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+			INTEL_PIPE_CRC_ENTRIES_NR);
+}
+
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+		   loff_t *pos)
+{
+	struct pipe_crc_info *info = filep->private_data;
+	struct drm_device *dev = info->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+	char buf[PIPE_CRC_BUFFER_LEN];
+	int head, tail, n_entries, n;
+	ssize_t bytes_read;
+
+	/*
+	 * Don't allow user space to provide buffers not big enough to hold
+	 * a line of data.
+	 */
+	if (count < PIPE_CRC_LINE_LEN)
+		return -EINVAL;
+
+	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+		return 0;
+
+	/* nothing to read */
+	spin_lock_irq(&pipe_crc->lock);
+	while (pipe_crc_data_count(pipe_crc) == 0) {
+		int ret;
+
+		if (filep->f_flags & O_NONBLOCK) {
+			spin_unlock_irq(&pipe_crc->lock);
+			return -EAGAIN;
+		}
+
+		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+		if (ret) {
+			spin_unlock_irq(&pipe_crc->lock);
+			return ret;
+		}
+	}
+
+	/* We now have one or more entries to read */
+	head = pipe_crc->head;
+	tail = pipe_crc->tail;
+	n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
+			count / PIPE_CRC_LINE_LEN);
+	spin_unlock_irq(&pipe_crc->lock);
+
+	bytes_read = 0;
+	n = 0;
+	do {
+		struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+		int ret;
+
+		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+				       "%8u %8x %8x %8x %8x %8x\n",
+				       entry->frame, entry->crc[0],
+				       entry->crc[1], entry->crc[2],
+				       entry->crc[3], entry->crc[4]);
+
+		ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
+				   buf, PIPE_CRC_LINE_LEN);
+		if (ret == PIPE_CRC_LINE_LEN)
+			return -EFAULT;
+
+		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+		tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+		n++;
+	} while (--n_entries);
+
+	spin_lock_irq(&pipe_crc->lock);
+	pipe_crc->tail = tail;
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_pipe_crc_open,
+	.read = i915_pipe_crc_read,
+	.release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+	{
+		.name = "i915_pipe_A_crc",
+		.pipe = PIPE_A,
+	},
+	{
+		.name = "i915_pipe_B_crc",
+		.pipe = PIPE_B,
+	},
+	{
+		.name = "i915_pipe_C_crc",
+		.pipe = PIPE_C,
+	},
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+				enum pipe pipe)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+	info->dev = dev;
+	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+				  &i915_pipe_crc_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+
+	return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+	"none",
+	"plane1",
+	"plane2",
+	"pf",
+	"pipe",
+	"TV",
+	"DP-B",
+	"DP-C",
+	"DP-D",
+	"auto",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+	return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < I915_MAX_PIPES; i++)
+		seq_printf(m, "%c %s\n", pipe_name(i),
+			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+	return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	return single_open(file, display_crc_ctl_show, dev);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				 uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+				     enum intel_pipe_crc_source *source)
+{
+	struct intel_encoder *encoder;
+	struct intel_crtc *crtc;
+	struct intel_digital_port *dig_port;
+	int ret = 0;
+
+	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (!encoder->base.crtc)
+			continue;
+
+		crtc = to_intel_crtc(encoder->base.crtc);
+
+		if (crtc->pipe != pipe)
+			continue;
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_TVOUT:
+			*source = INTEL_PIPE_CRC_SOURCE_TV;
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+		case INTEL_OUTPUT_EDP:
+			dig_port = enc_to_dig_port(&encoder->base);
+			switch (dig_port->port) {
+			case PORT_B:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
+				break;
+			case PORT_C:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
+				break;
+			case PORT_D:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
+				break;
+			default:
+				WARN(1, "nonexisting DP port %c\n",
+				     port_name(dig_port->port));
+				break;
+			}
+			break;
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+				enum pipe pipe,
+				enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool need_stable_symbols = false;
+
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		if (ret)
+			return ret;
+	}
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_B:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_C:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * When the pipe CRC tap point is after the transcoders we need
+	 * to tweak symbol-level features to produce a deterministic series of
+	 * symbols for a given frame. We need to reset those features only once
+	 * a frame (instead of every nth symbol):
+	 *   - DC-balance: used to ensure a better clock recovery from the data
+	 *     link (SDVO)
+	 *   - DisplayPort scrambling: used for EMI reduction
+	 */
+	if (need_stable_symbols) {
+		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+		WARN_ON(!IS_G4X(dev));
+
+		tmp |= DC_BALANCE_RESET_VLV;
+		if (pipe == PIPE_A)
+			tmp |= PIPE_A_SCRAMBLE_RESET;
+		else
+			tmp |= PIPE_B_SCRAMBLE_RESET;
+
+		I915_WRITE(PORT_DFT2_G4X, tmp);
+	}
+
+	return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+				 enum pipe pipe,
+				 enum intel_pipe_crc_source *source,
+				 uint32_t *val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool need_stable_symbols = false;
+
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		if (ret)
+			return ret;
+	}
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_TV:
+		if (!SUPPORTS_TV(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_B:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_C:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_D:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * When the pipe CRC tap point is after the transcoders we need
+	 * to tweak symbol-level features to produce a deterministic series of
+	 * symbols for a given frame. We need to reset those features only once
+	 * a frame (instead of every nth symbol):
+	 *   - DC-balance: used to ensure a better clock recovery from the data
+	 *     link (SDVO)
+	 *   - DisplayPort scrambling: used for EMI reduction
+	 */
+	if (need_stable_symbols) {
+		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+		WARN_ON(!IS_G4X(dev));
+
+		I915_WRITE(PORT_DFT_I9XX,
+			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+		if (pipe == PIPE_A)
+			tmp |= PIPE_A_SCRAMBLE_RESET;
+		else
+			tmp |= PIPE_B_SCRAMBLE_RESET;
+
+		I915_WRITE(PORT_DFT2_G4X, tmp);
+	}
+
+	return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+					 enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+	if (pipe == PIPE_A)
+		tmp &= ~PIPE_A_SCRAMBLE_RESET;
+	else
+		tmp &= ~PIPE_B_SCRAMBLE_RESET;
+	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+		tmp &= ~DC_BALANCE_RESET_VLV;
+	I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+					 enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+	if (pipe == PIPE_A)
+		tmp &= ~PIPE_A_SCRAMBLE_RESET;
+	else
+		tmp &= ~PIPE_B_SCRAMBLE_RESET;
+	I915_WRITE(PORT_DFT2_G4X, tmp);
+
+	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+		I915_WRITE(PORT_DFT_I9XX,
+			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+	}
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PF;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PF:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+			       enum intel_pipe_crc_source source)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	u32 val;
+	int ret;
+
+	if (pipe_crc->source == source)
+		return 0;
+
+	/* forbid changing the source without going back to 'none' */
+	if (pipe_crc->source && source)
+		return -EINVAL;
+
+	if (IS_GEN2(dev))
+		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
+	else if (INTEL_INFO(dev)->gen < 5)
+		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+	else if (IS_VALLEYVIEW(dev))
+		ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+	else if (IS_GEN5(dev) || IS_GEN6(dev))
+		ret = ilk_pipe_crc_ctl_reg(&source, &val);
+	else
+		ret = ivb_pipe_crc_ctl_reg(&source, &val);
+
+	if (ret != 0)
+		return ret;
+
+	/* none -> real source transition */
+	if (source) {
+		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+				 pipe_name(pipe), pipe_crc_source_name(source));
+
+		pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
+					    INTEL_PIPE_CRC_ENTRIES_NR,
+					    GFP_KERNEL);
+		if (!pipe_crc->entries)
+			return -ENOMEM;
+
+		spin_lock_irq(&pipe_crc->lock);
+		pipe_crc->head = 0;
+		pipe_crc->tail = 0;
+		spin_unlock_irq(&pipe_crc->lock);
+	}
+
+	pipe_crc->source = source;
+
+	I915_WRITE(PIPE_CRC_CTL(pipe), val);
+	POSTING_READ(PIPE_CRC_CTL(pipe));
+
+	/* real source -> none transition */
+	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+		struct intel_pipe_crc_entry *entries;
+
+		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+				 pipe_name(pipe));
+
+		intel_wait_for_vblank(dev, pipe);
+
+		spin_lock_irq(&pipe_crc->lock);
+		entries = pipe_crc->entries;
+		pipe_crc->entries = NULL;
+		spin_unlock_irq(&pipe_crc->lock);
+
+		kfree(entries);
+
+		if (IS_G4X(dev))
+			g4x_undo_pipe_scramble_reset(dev, pipe);
+		else if (IS_VALLEYVIEW(dev))
+			vlv_undo_pipe_scramble_reset(dev, pipe);
+	}
+
+	return 0;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ *   command: wsp* object wsp+ name wsp+ source wsp*
+ *   object: 'pipe'
+ *   name: (A | B | C)
+ *   source: (none | plane1 | plane2 | pf)
+ *   wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
+ *  "pipe A none"    ->  Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+	int n_words = 0;
+
+	while (*buf) {
+		char *end;
+
+		/* skip leading white space */
+		buf = skip_spaces(buf);
+		if (!*buf)
+			break;	/* end of buffer */
+
+		/* find end of word */
+		for (end = buf; *end && !isspace(*end); end++)
+			;
+
+		if (n_words == max_words) {
+			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+					 max_words);
+			return -EINVAL;	/* ran out of words[] before bytes */
+		}
+
+		if (*end)
+			*end++ = '\0';
+		words[n_words++] = buf;
+		buf = end;
+	}
+
+	return n_words;
+}
+
+enum intel_pipe_crc_object {
+	PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+	"pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+		if (!strcmp(buf, pipe_crc_objects[i])) {
+			*o = i;
+			return 0;
+		    }
+
+	return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+	const char name = buf[0];
+
+	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+		return -EINVAL;
+
+	*pipe = name - 'A';
+
+	return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+		if (!strcmp(buf, pipe_crc_sources[i])) {
+			*s = i;
+			return 0;
+		    }
+
+	return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+{
+#define N_WORDS 3
+	int n_words;
+	char *words[N_WORDS];
+	enum pipe pipe;
+	enum intel_pipe_crc_object object;
+	enum intel_pipe_crc_source source;
+
+	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+	if (n_words != N_WORDS) {
+		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+				 N_WORDS);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+		return -EINVAL;
+	}
+
+	return pipe_crc_set_source(dev, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+				     size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_device *dev = m->private;
+	char *tmpbuf;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	if (len > PAGE_SIZE - 1) {
+		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+				 PAGE_SIZE);
+		return -E2BIG;
+	}
+
+	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+	if (!tmpbuf)
+		return -ENOMEM;
+
+	if (copy_from_user(tmpbuf, ubuf, len)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	tmpbuf[len] = '\0';
+
+	ret = display_crc_ctl_parse(dev, tmpbuf, len);
+
+out:
+	kfree(tmpbuf);
+	if (ret < 0)
+		return ret;
+
+	*offp += len;
+	return len;
+}
+
+static const struct file_operations i915_display_crc_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = display_crc_ctl_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = display_crc_ctl_write
+};
+
 static int
 i915_wedged_get(void *data, u64 *val)
 {
@@ -1885,6 +2650,72 @@
 			i915_ring_stop_get, i915_ring_stop_set,
 			"0x%08llx\n");
 
+static int
+i915_ring_missed_irq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.missed_irq_rings;
+	return 0;
+}
+
+static int
+i915_ring_missed_irq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	/* Lock against concurrent debugfs callers */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	dev_priv->gpu_error.missed_irq_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
+			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
+			"0x%08llx\n");
+
+static int
+i915_ring_test_irq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.test_irq_rings;
+
+	return 0;
+}
+
+static int
+i915_ring_test_irq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
+
+	/* Lock against concurrent debugfs callers */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	dev_priv->gpu_error.test_irq_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
+			i915_ring_test_irq_get, i915_ring_test_irq_set,
+			"0x%08llx\n");
+
 #define DROP_UNBOUND 0x1
 #define DROP_BOUND 0x2
 #define DROP_RETIRE 0x4
@@ -1972,6 +2803,8 @@
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
@@ -1996,6 +2829,8 @@
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2034,6 +2869,8 @@
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
@@ -2058,6 +2895,8 @@
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2136,32 +2975,6 @@
 			i915_cache_sharing_get, i915_cache_sharing_set,
 			"%llu\n");
 
-/* As the drm_debugfs_init() routines are called before dev->dev_private is
- * allocated we need to hook into the minor for release. */
-static int
-drm_add_fake_info_node(struct drm_minor *minor,
-		       struct dentry *ent,
-		       const void *key)
-{
-	struct drm_info_node *node;
-
-	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
-	if (node == NULL) {
-		debugfs_remove(ent);
-		return -ENOMEM;
-	}
-
-	node->minor = minor;
-	node->dent = ent;
-	node->info_ent = (void *) key;
-
-	mutex_lock(&minor->debugfs_lock);
-	list_add(&node->list, &minor->debugfs_list);
-	mutex_unlock(&minor->debugfs_lock);
-
-	return 0;
-}
-
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
 	struct drm_device *dev = inode->i_private;
@@ -2227,7 +3040,7 @@
 	return drm_add_fake_info_node(minor, ent, fops);
 }
 
-static struct drm_info_list i915_debugfs_list[] = {
+static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
@@ -2269,7 +3082,7 @@
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
-static struct i915_debugfs_files {
+static const struct i915_debugfs_files {
 	const char *name;
 	const struct file_operations *fops;
 } i915_debugfs_files[] = {
@@ -2278,11 +3091,28 @@
 	{"i915_min_freq", &i915_min_freq_fops},
 	{"i915_cache_sharing", &i915_cache_sharing_fops},
 	{"i915_ring_stop", &i915_ring_stop_fops},
+	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
+	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
 	{"i915_error_state", &i915_error_state_fops},
 	{"i915_next_seqno", &i915_next_seqno_fops},
+	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
 };
 
+void intel_display_crc_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+
+		pipe_crc->opened = false;
+		spin_lock_init(&pipe_crc->lock);
+		init_waitqueue_head(&pipe_crc->wq);
+	}
+}
+
 int i915_debugfs_init(struct drm_minor *minor)
 {
 	int ret, i;
@@ -2291,6 +3121,12 @@
 	if (ret)
 		return ret;
 
+	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		ret = i915_debugfs_create(minor->debugfs_root, minor,
 					  i915_debugfs_files[i].name,
@@ -2310,8 +3146,17 @@
 
 	drm_debugfs_remove_files(i915_debugfs_list,
 				 I915_DEBUGFS_ENTRIES, minor);
+
 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
 				 1, minor);
+
+	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+		struct drm_info_list *info_list =
+			(struct drm_info_list *)&i915_pipe_crc_data[i];
+
+		drm_debugfs_remove_files(info_list, 1, minor);
+	}
+
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		struct drm_info_list *info_list =
 			(struct drm_info_list *) i915_debugfs_files[i].fops;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d5c784d..0cab2d0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -52,7 +52,7 @@
 	intel_ring_emit(LP_RING(dev_priv), x)
 
 #define ADVANCE_LP_RING() \
-	intel_ring_advance(LP_RING(dev_priv))
+	__intel_ring_advance(LP_RING(dev_priv))
 
 /**
  * Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@
 
 	if (batch->num_cliprects) {
 		cliprects = kcalloc(batch->num_cliprects,
-				    sizeof(struct drm_clip_rect),
+				    sizeof(*cliprects),
 				    GFP_KERNEL);
 		if (cliprects == NULL)
 			return -ENOMEM;
@@ -703,7 +703,7 @@
 
 	if (cmdbuf->num_cliprects) {
 		cliprects = kcalloc(cmdbuf->num_cliprects,
-				    sizeof(struct drm_clip_rect), GFP_KERNEL);
+				    sizeof(*cliprects), GFP_KERNEL);
 		if (cliprects == NULL) {
 			ret = -ENOMEM;
 			goto fail_batch_free;
@@ -931,7 +931,7 @@
 		value = READ_BREADCRUMB(dev_priv);
 		break;
 	case I915_PARAM_CHIPSET_ID:
-		value = dev->pci_device;
+		value = dev->pdev->device;
 		break;
 	case I915_PARAM_HAS_GEM:
 		value = 1;
@@ -1311,13 +1311,15 @@
 	if (ret)
 		goto cleanup_gem_stolen;
 
+	intel_power_domains_init_hw(dev);
+
 	/* Important: The output setup functions called by modeset_init need
 	 * working irqs for e.g. gmbus and dp aux transfers. */
 	intel_modeset_init(dev);
 
 	ret = i915_gem_init(dev);
 	if (ret)
-		goto cleanup_irq;
+		goto cleanup_power;
 
 	INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
 
@@ -1325,9 +1327,11 @@
 
 	/* Always safe in the mode setting case. */
 	/* FIXME: do pre/post-mode set stuff in core KMS code */
-	dev->vblank_disable_allowed = 1;
-	if (INTEL_INFO(dev)->num_pipes == 0)
+	dev->vblank_disable_allowed = true;
+	if (INTEL_INFO(dev)->num_pipes == 0) {
+		intel_display_power_put(dev, POWER_DOMAIN_VGA);
 		return 0;
+	}
 
 	ret = intel_fbdev_init(dev);
 	if (ret)
@@ -1362,7 +1366,8 @@
 	mutex_unlock(&dev->struct_mutex);
 	i915_gem_cleanup_aliasing_ppgtt(dev);
 	drm_mm_takedown(&dev_priv->gtt.base.mm);
-cleanup_irq:
+cleanup_power:
+	intel_display_power_put(dev, POWER_DOMAIN_VGA);
 	drm_irq_uninstall(dev);
 cleanup_gem_stolen:
 	i915_gem_cleanup_stolen(dev);
@@ -1398,6 +1403,7 @@
 	master->driver_priv = NULL;
 }
 
+#ifdef CONFIG_DRM_I915_FBDEV
 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
 	struct apertures_struct *ap;
@@ -1418,6 +1424,11 @@
 
 	kfree(ap);
 }
+#else
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+}
+#endif
 
 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 {
@@ -1459,17 +1470,13 @@
 	info = (struct intel_device_info *) flags;
 
 	/* Refuse to load on gen6+ without kms enabled. */
-	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
+		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
+		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
 		return -ENODEV;
+	}
 
-	/* i915 has 4 more counters */
-	dev->counters += 4;
-	dev->types[6] = _DRM_STAT_IRQ;
-	dev->types[7] = _DRM_STAT_PRIMARY;
-	dev->types[8] = _DRM_STAT_SECONDARY;
-	dev->types[9] = _DRM_STAT_DMA;
-
-	dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (dev_priv == NULL)
 		return -ENOMEM;
 
@@ -1494,6 +1501,8 @@
 	dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
 	INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
 
+	intel_display_crc_init(dev);
+
 	i915_dump_device_info(dev_priv);
 
 	/* Not all pre-production machines fall into this category, only the
@@ -1531,19 +1540,14 @@
 
 	intel_uncore_early_sanitize(dev);
 
-	if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
-		/* The docs do not explain exactly how the calculation can be
-		 * made. It is somewhat guessable, but for now, it's always
-		 * 128MB.
-		 * NB: We can't write IDICR yet because we do not have gt funcs
-		 * set up */
-		dev_priv->ellc_size = 128;
-		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
-	}
+	/* This must be called before any calls to HAS_PCH_* */
+	intel_detect_pch(dev);
+
+	intel_uncore_init(dev);
 
 	ret = i915_gem_gtt_init(dev);
 	if (ret)
-		goto put_bridge;
+		goto out_regs;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_kick_out_firmware_fb(dev_priv);
@@ -1572,7 +1576,7 @@
 				     aperture_size);
 	if (dev_priv->gtt.mappable == NULL) {
 		ret = -EIO;
-		goto out_rmmap;
+		goto out_gtt;
 	}
 
 	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1598,13 +1602,9 @@
 		goto out_mtrrfree;
 	}
 
-	/* This must be called before any calls to HAS_PCH_* */
-	intel_detect_pch(dev);
-
 	intel_irq_init(dev);
 	intel_pm_init(dev);
 	intel_uncore_sanitize(dev);
-	intel_uncore_init(dev);
 
 	/* Try to make sure MCHBAR is enabled before poking at it */
 	intel_setup_mchbar(dev);
@@ -1640,13 +1640,13 @@
 	}
 
 	if (HAS_POWER_WELL(dev))
-		i915_init_power_well(dev);
+		intel_power_domains_init(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = i915_load_modeset_init(dev);
 		if (ret < 0) {
 			DRM_ERROR("failed to init modeset\n");
-			goto out_gem_unload;
+			goto out_power_well;
 		}
 	} else {
 		/* Start out suspended in ums mode. */
@@ -1666,6 +1666,10 @@
 
 	return 0;
 
+out_power_well:
+	if (HAS_POWER_WELL(dev))
+		intel_power_domains_remove(dev);
+	drm_vblank_cleanup(dev);
 out_gem_unload:
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1679,12 +1683,18 @@
 out_mtrrfree:
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
 	io_mapping_free(dev_priv->gtt.mappable);
+out_gtt:
+	list_del(&dev_priv->gtt.base.global_link);
+	drm_mm_takedown(&dev_priv->gtt.base.mm);
 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
-out_rmmap:
+out_regs:
+	intel_uncore_fini(dev);
 	pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
 free_priv:
+	if (dev_priv->slab)
+		kmem_cache_destroy(dev_priv->slab);
 	kfree(dev_priv);
 	return ret;
 }
@@ -1700,8 +1710,8 @@
 		/* The i915.ko module is still not prepared to be loaded when
 		 * the power well is not enabled, so just enable it in case
 		 * we're going to unload/reload. */
-		intel_set_power_well(dev, true);
-		i915_remove_power_well(dev);
+		intel_display_set_init_power(dev, true);
+		intel_power_domains_remove(dev);
 	}
 
 	i915_teardown_sysfs(dev);
@@ -1709,15 +1719,9 @@
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
-	mutex_lock(&dev->struct_mutex);
-	ret = i915_gpu_idle(dev);
+	ret = i915_gem_suspend(dev);
 	if (ret)
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
-	i915_gem_retire_requests(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	/* Cancel the retire work handler, which should be idle now. */
-	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
 	io_mapping_free(dev_priv->gtt.mappable);
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1774,8 +1778,8 @@
 	list_del(&dev_priv->gtt.base.global_link);
 	WARN_ON(!list_empty(&dev_priv->vm_list));
 	drm_mm_takedown(&dev_priv->gtt.base.mm);
-	if (dev_priv->regs != NULL)
-		pci_iounmap(dev->pdev, dev_priv->regs);
+
+	drm_vblank_cleanup(dev);
 
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
@@ -1785,6 +1789,10 @@
 
 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
+	intel_uncore_fini(dev);
+	if (dev_priv->regs != NULL)
+		pci_iounmap(dev->pdev, dev_priv->regs);
+
 	if (dev_priv->slab)
 		kmem_cache_destroy(dev_priv->slab);
 
@@ -1796,19 +1804,11 @@
 
 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
-	struct drm_i915_file_private *file_priv;
+	int ret;
 
-	DRM_DEBUG_DRIVER("\n");
-	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
-	if (!file_priv)
-		return -ENOMEM;
-
-	file->driver_priv = file_priv;
-
-	spin_lock_init(&file_priv->mm.lock);
-	INIT_LIST_HEAD(&file_priv->mm.request_list);
-
-	idr_init(&file_priv->context_idr);
+	ret = i915_gem_open(dev, file);
+	if (ret)
+		return ret;
 
 	return 0;
 }
@@ -1836,7 +1836,7 @@
 		return;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		intel_fb_restore_mode(dev);
+		intel_fbdev_restore_mode(dev);
 		vga_switcheroo_process_delayed_switch();
 		return;
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2ad2788..989be12 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -160,49 +160,58 @@
 static const struct intel_device_info intel_i830_info = {
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_845g_info = {
 	.gen = 2, .num_pipes = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i85x_info = {
 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i865g_info = {
 	.gen = 2, .num_pipes = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i915g_info = {
 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i915gm_info = {
 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i945g_info = {
 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i945gm_info = {
 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i965g_info = {
 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
 	.has_hotplug = 1,
 	.has_overlay = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
@@ -210,18 +219,20 @@
 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
 	.has_overlay = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_g33_info = {
 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_overlay = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_g45_info = {
 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_gm45_info = {
@@ -229,7 +240,7 @@
 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.supports_tv = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_pineview_info = {
@@ -241,42 +252,36 @@
 static const struct intel_device_info intel_ironlake_d_info = {
 	.gen = 5, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
 	.gen = 6, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
-	.has_blt_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.has_llc = 1,
-	.has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 1,
-	.has_bsd_ring = 1,
-	.has_blt_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.has_llc = 1,
-	.has_force_wake = 1,
 };
 
 #define GEN7_FEATURES  \
 	.gen = 7, .num_pipes = 3, \
 	.need_gfx_hws = 1, .has_hotplug = 1, \
-	.has_bsd_ring = 1, \
-	.has_blt_ring = 1, \
-	.has_llc = 1, \
-	.has_force_wake = 1
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.has_llc = 1
 
 static const struct intel_device_info intel_ivybridge_d_info = {
 	GEN7_FEATURES,
@@ -318,7 +323,7 @@
 	.is_haswell = 1,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
-	.has_vebox_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
@@ -328,7 +333,25 @@
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
 	.has_fbc = 1,
-	.has_vebox_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+};
+
+static const struct intel_device_info intel_broadwell_d_info = {
+	.is_preliminary = 1,
+	.gen = 8, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+	.is_preliminary = 1,
+	.gen = 8, .is_mobile = 1, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
 };
 
 /*
@@ -362,7 +385,9 @@
 	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
 	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
 	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
-	INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
+	INTEL_BDW_M_IDS(&intel_broadwell_m_info),	\
+	INTEL_BDW_D_IDS(&intel_broadwell_d_info)
 
 static const struct pci_device_id pciidlist[] = {		/* aka */
 	INTEL_PCI_IDS,
@@ -416,13 +441,19 @@
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 				/* PantherPoint is CPT compatible */
 				dev_priv->pch_type = PCH_CPT;
-				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
 				WARN_ON(!IS_HASWELL(dev));
 				WARN_ON(IS_ULT(dev));
+			} else if (IS_BROADWELL(dev)) {
+				dev_priv->pch_type = PCH_LPT;
+				dev_priv->pch_id =
+					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+				DRM_DEBUG_KMS("This is Broadwell, assuming "
+					      "LynxPoint LP PCH\n");
 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
@@ -447,6 +478,12 @@
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
+	/* Until we get further testing... */
+	if (IS_GEN8(dev)) {
+		WARN_ON(!i915_preliminary_hw_support);
+		return 0;
+	}
+
 	if (i915_semaphores >= 0)
 		return i915_semaphores;
 
@@ -472,7 +509,7 @@
 	/* We do a lot of poking in a lot of registers, make sure they work
 	 * properly. */
 	hsw_disable_package_c8(dev_priv);
-	intel_set_power_well(dev, true);
+	intel_display_set_init_power(dev, true);
 
 	drm_kms_helper_poll_disable(dev);
 
@@ -482,9 +519,7 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		int error;
 
-		mutex_lock(&dev->struct_mutex);
-		error = i915_gem_idle(dev);
-		mutex_unlock(&dev->struct_mutex);
+		error = i915_gem_suspend(dev);
 		if (error) {
 			dev_err(&dev->pdev->dev,
 				"GEM idle failed, resume might fail\n");
@@ -578,11 +613,24 @@
 	drm_helper_hpd_irq_event(dev);
 }
 
-static int __i915_drm_thaw(struct drm_device *dev)
+static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int error = 0;
 
+	intel_uncore_early_sanitize(dev);
+
+	intel_uncore_sanitize(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+	    restore_gtt_mappings) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	intel_power_domains_init_hw(dev);
+
 	i915_restore_state(dev);
 	intel_opregion_setup(dev);
 
@@ -642,20 +690,10 @@
 
 static int i915_drm_thaw(struct drm_device *dev)
 {
-	int error = 0;
-
-	intel_uncore_sanitize(dev);
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		mutex_lock(&dev->struct_mutex);
-		i915_gem_restore_gtt_mappings(dev);
-		mutex_unlock(&dev->struct_mutex);
-	} else if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_check_and_clear_faults(dev);
 
-	__i915_drm_thaw(dev);
-
-	return error;
+	return __i915_drm_thaw(dev, true);
 }
 
 int i915_resume(struct drm_device *dev)
@@ -671,20 +709,12 @@
 
 	pci_set_master(dev->pdev);
 
-	intel_uncore_sanitize(dev);
-
 	/*
 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
-	 * earlier) need this since the BIOS might clear all our scratch PTEs.
+	 * earlier) need to restore the GTT mappings since the BIOS might clear
+	 * all our scratch PTEs.
 	 */
-	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
-	    !dev_priv->opregion.header) {
-		mutex_lock(&dev->struct_mutex);
-		i915_gem_restore_gtt_mappings(dev);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
-	ret = __i915_drm_thaw(dev);
+	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
 	if (ret)
 		return ret;
 
@@ -722,24 +752,19 @@
 
 	simulated = dev_priv->gpu_error.stop_rings != 0;
 
-	if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
-		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
-		ret = -ENODEV;
-	} else {
-		ret = intel_gpu_reset(dev);
+	ret = intel_gpu_reset(dev);
 
-		/* Also reset the gpu hangman. */
-		if (simulated) {
-			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-			dev_priv->gpu_error.stop_rings = 0;
-			if (ret == -ENODEV) {
-				DRM_ERROR("Reset not implemented, but ignoring "
-					  "error for simulated gpu hangs\n");
-				ret = 0;
-			}
-		} else
-			dev_priv->gpu_error.last_reset = get_seconds();
+	/* Also reset the gpu hangman. */
+	if (simulated) {
+		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+		dev_priv->gpu_error.stop_rings = 0;
+		if (ret == -ENODEV) {
+			DRM_ERROR("Reset not implemented, but ignoring "
+				  "error for simulated gpu hangs\n");
+			ret = 0;
+		}
 	}
+
 	if (ret) {
 		DRM_ERROR("Failed to reset chip.\n");
 		mutex_unlock(&dev->struct_mutex);
@@ -762,30 +787,17 @@
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 			!dev_priv->ums.mm_suspended) {
-		struct intel_ring_buffer *ring;
-		int i;
-
+		bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
 		dev_priv->ums.mm_suspended = 0;
 
-		i915_gem_init_swizzling(dev);
-
-		for_each_ring(ring, dev_priv, i)
-			ring->init(ring);
-
-		i915_gem_context_init(dev);
-		if (dev_priv->mm.aliasing_ppgtt) {
-			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
-			if (ret)
-				i915_gem_cleanup_aliasing_ppgtt(dev);
-		}
-
-		/*
-		 * It would make sense to re-init all the other hw state, at
-		 * least the rps/rc6/emon init done within modeset_init_hw. For
-		 * some unknown reason, this blows up my ilk, so don't.
-		 */
-
+		ret = i915_gem_init_hw(dev);
+		if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
+			DRM_ERROR("HW contexts didn't survive reset\n");
 		mutex_unlock(&dev->struct_mutex);
+		if (ret) {
+			DRM_ERROR("Failed hw init on reset %d\n", ret);
+			return ret;
+		}
 
 		drm_irq_uninstall(dev);
 		drm_irq_install(dev);
@@ -802,6 +814,12 @@
 	struct intel_device_info *intel_info =
 		(struct intel_device_info *) ent->driver_data;
 
+	if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+		DRM_INFO("This hardware requires preliminary hardware support.\n"
+			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+		return -ENODEV;
+	}
+
 	/* Only bind to function 0 of the device. Early generations
 	 * used function 1 as a placeholder for multi-head. This causes
 	 * us confusion instead, especially on the systems where both
@@ -949,7 +967,6 @@
 	.debugfs_init = i915_debugfs_init,
 	.debugfs_cleanup = i915_debugfs_cleanup,
 #endif
-	.gem_init_object = i915_gem_init_object,
 	.gem_free_object = i915_gem_free_object,
 	.gem_vm_ops = &i915_gem_vm_ops,
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ab0f2c0..8600c31 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
 #define DRIVER_DATE		"20080730"
 
 enum pipe {
+	INVALID_PIPE = -1,
 	PIPE_A = 0,
 	PIPE_B,
 	PIPE_C,
@@ -98,13 +99,29 @@
 	POWER_DOMAIN_TRANSCODER_A,
 	POWER_DOMAIN_TRANSCODER_B,
 	POWER_DOMAIN_TRANSCODER_C,
-	POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
+	POWER_DOMAIN_TRANSCODER_EDP,
+	POWER_DOMAIN_VGA,
+	POWER_DOMAIN_INIT,
+
+	POWER_DOMAIN_NUM,
 };
 
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
 		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+	 (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PIPE_A) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_EDP))
+#define BDW_ALWAYS_ON_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PIPE_A) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_EDP) |	\
+	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
 
 enum hpd_pin {
 	HPD_NONE = 0,
@@ -225,9 +242,12 @@
 	struct opregion_header __iomem *header;
 	struct opregion_acpi __iomem *acpi;
 	struct opregion_swsci __iomem *swsci;
+	u32 swsci_gbda_sub_functions;
+	u32 swsci_sbcb_sub_functions;
 	struct opregion_asle __iomem *asle;
 	void __iomem *vbt;
 	u32 __iomem *lid_state;
+	struct work_struct asle_work;
 };
 #define OPREGION_SIZE            (8*1024)
 
@@ -285,6 +305,7 @@
 	u32 cpu_ring_tail[I915_NUM_RINGS];
 	u32 error; /* gen6+ */
 	u32 err_int; /* gen7 */
+	u32 bbstate[I915_NUM_RINGS];
 	u32 instpm[I915_NUM_RINGS];
 	u32 instps[I915_NUM_RINGS];
 	u32 extra_instdone[I915_NUM_INSTDONE_REG];
@@ -321,11 +342,13 @@
 		u32 dirty:1;
 		u32 purgeable:1;
 		s32 ring:4;
-		u32 cache_level:2;
+		u32 cache_level:3;
 	} **active_bo, **pinned_bo;
 	u32 *active_bo_count, *pinned_bo_count;
 	struct intel_overlay_error_state *overlay;
 	struct intel_display_error_state *display;
+	int hangcheck_score[I915_NUM_RINGS];
+	enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 };
 
 struct intel_crtc_config;
@@ -357,7 +380,7 @@
 			  int target, int refclk,
 			  struct dpll *match_clock,
 			  struct dpll *best_clock);
-	void (*update_wm)(struct drm_device *dev);
+	void (*update_wm)(struct drm_crtc *crtc);
 	void (*update_sprite_wm)(struct drm_plane *plane,
 				 struct drm_crtc *crtc,
 				 uint32_t sprite_width, int pixel_size,
@@ -367,7 +390,6 @@
 	 * fills out the pipe-config with the hw state. */
 	bool (*get_pipe_config)(struct intel_crtc *,
 				struct intel_crtc_config *);
-	void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
 			     int x, int y,
 			     struct drm_framebuffer *old_fb);
@@ -375,7 +397,8 @@
 	void (*crtc_disable)(struct drm_crtc *crtc);
 	void (*off)(struct drm_crtc *crtc);
 	void (*write_eld)(struct drm_connector *connector,
-			  struct drm_crtc *crtc);
+			  struct drm_crtc *crtc,
+			  struct drm_display_mode *mode);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
 	void (*init_clock_gating)(struct drm_device *dev);
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -395,6 +418,20 @@
 struct intel_uncore_funcs {
 	void (*force_wake_get)(struct drm_i915_private *dev_priv);
 	void (*force_wake_put)(struct drm_i915_private *dev_priv);
+
+	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+
+	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
+				uint8_t val, bool trace);
+	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
+				uint16_t val, bool trace);
+	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
+				uint32_t val, bool trace);
+	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
+				uint64_t val, bool trace);
 };
 
 struct intel_uncore {
@@ -404,6 +441,8 @@
 
 	unsigned fifo_count;
 	unsigned forcewake_count;
+
+	struct delayed_work force_wake_work;
 };
 
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,7 +459,7 @@
 	func(is_ivybridge) sep \
 	func(is_valleyview) sep \
 	func(is_haswell) sep \
-	func(has_force_wake) sep \
+	func(is_preliminary) sep \
 	func(has_fbc) sep \
 	func(has_pipe_cxsr) sep \
 	func(has_hotplug) sep \
@@ -428,9 +467,6 @@
 	func(has_overlay) sep \
 	func(overlay_needs_physical) sep \
 	func(supports_tv) sep \
-	func(has_bsd_ring) sep \
-	func(has_blt_ring) sep \
-	func(has_vebox_ring) sep \
 	func(has_llc) sep \
 	func(has_ddi) sep \
 	func(has_fpga_dbg)
@@ -442,6 +478,7 @@
 	u32 display_mmio_offset;
 	u8 num_pipes:3;
 	u8 gen;
+	u8 ring_mask; /* Rings supported by the HW */
 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
 };
 
@@ -542,10 +579,21 @@
 struct i915_hw_ppgtt {
 	struct i915_address_space base;
 	unsigned num_pd_entries;
-	struct page **pt_pages;
-	uint32_t pd_offset;
-	dma_addr_t *pt_dma_addr;
-
+	union {
+		struct page **pt_pages;
+		struct page *gen8_pt_pages;
+	};
+	struct page *pd_pages;
+	int num_pd_pages;
+	int num_pt_pages;
+	union {
+		uint32_t pd_offset;
+		dma_addr_t pd_dma_addr[4];
+	};
+	union {
+		dma_addr_t *pt_dma_addr;
+		dma_addr_t *gen8_pt_dma_addr[4];
+	};
 	int (*enable)(struct drm_device *dev);
 };
 
@@ -570,6 +618,13 @@
 	/** This vma's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
 
+	/**
+	 * Used for performing relocations during execbuffer insertion.
+	 */
+	struct hlist_node exec_node;
+	unsigned long exec_handle;
+	struct drm_i915_gem_exec_object2 *exec_entry;
+
 };
 
 struct i915_ctx_hang_stats {
@@ -578,6 +633,12 @@
 
 	/* This context had batch active when hang was declared */
 	unsigned batch_active;
+
+	/* Time when this context was last blamed for a GPU reset */
+	unsigned long guilty_ts;
+
+	/* This context is banned to submit more work */
+	bool banned;
 };
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -586,10 +647,13 @@
 	struct kref ref;
 	int id;
 	bool is_initialized;
+	uint8_t remap_slice;
 	struct drm_i915_file_private *file_priv;
 	struct intel_ring_buffer *ring;
 	struct drm_i915_gem_object *obj;
 	struct i915_ctx_hang_stats hang_stats;
+
+	struct list_head link;
 };
 
 struct i915_fbc {
@@ -623,17 +687,9 @@
 	} no_fbc_reason;
 };
 
-enum no_psr_reason {
-	PSR_NO_SOURCE, /* Not supported on platform */
-	PSR_NO_SINK, /* Not supported by panel */
-	PSR_MODULE_PARAM,
-	PSR_CRTC_NOT_ACTIVE,
-	PSR_PWR_WELL_ENABLED,
-	PSR_NOT_TILED,
-	PSR_SPRITE_ENABLED,
-	PSR_S3D_ENABLED,
-	PSR_INTERLACED_ENABLED,
-	PSR_HSW_NOT_DDIA,
+struct i915_psr {
+	bool sink_support;
+	bool source_ok;
 };
 
 enum intel_pch {
@@ -704,6 +760,9 @@
 	u32 saveBLC_HIST_CTL;
 	u32 saveBLC_PWM_CTL;
 	u32 saveBLC_PWM_CTL2;
+	u32 saveBLC_HIST_CTL_B;
+	u32 saveBLC_PWM_CTL_B;
+	u32 saveBLC_PWM_CTL2_B;
 	u32 saveBLC_CPU_PWM_CTL;
 	u32 saveBLC_CPU_PWM_CTL2;
 	u32 saveFPB0;
@@ -823,17 +882,20 @@
 	struct work_struct work;
 	u32 pm_iir;
 
-	/* On vlv we need to manually drop to Vmin with a delayed work. */
-	struct delayed_work vlv_work;
-
 	/* The below variables an all the rps hw state are protected by
 	 * dev->struct mutext. */
 	u8 cur_delay;
 	u8 min_delay;
 	u8 max_delay;
 	u8 rpe_delay;
+	u8 rp1_delay;
+	u8 rp0_delay;
 	u8 hw_max;
 
+	int last_adj;
+	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+	bool enabled;
 	struct delayed_work delayed_resume_work;
 
 	/*
@@ -870,11 +932,21 @@
 
 /* Power well structure for haswell */
 struct i915_power_well {
-	struct drm_device *device;
-	spinlock_t lock;
 	/* power well enable/disable usage count */
 	int count;
-	int i915_request;
+};
+
+#define I915_MAX_POWER_WELLS 1
+
+struct i915_power_domains {
+	/*
+	 * Power wells needed for initialization at driver init and suspend
+	 * time are on. They are kept on until after the first modeset.
+	 */
+	bool init_power_on;
+
+	struct mutex lock;
+	struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
 };
 
 struct i915_dri1_state {
@@ -902,9 +974,11 @@
 	int mm_suspended;
 };
 
+#define MAX_L3_SLICES 2
 struct intel_l3_parity {
-	u32 *remap_info;
+	u32 *remap_info[MAX_L3_SLICES];
 	struct work_struct error_work;
+	int which_slice;
 };
 
 struct i915_gem_mm {
@@ -942,6 +1016,15 @@
 	struct delayed_work retire_work;
 
 	/**
+	 * When we detect an idle GPU, we want to turn on
+	 * powersaving features. So once we see that there
+	 * are no more requests outstanding and no more
+	 * arrive within a small period of time, we fire
+	 * off the idle_work.
+	 */
+	struct delayed_work idle_work;
+
+	/**
 	 * Are we in a non-interruptible section of code like
 	 * modesetting?
 	 */
@@ -979,6 +1062,9 @@
 	/* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+	/* Hang gpu twice in this window and your context gets banned */
+#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
+
 	struct timer_list hangcheck_timer;
 
 	/* For reset and error_state handling. */
@@ -987,7 +1073,8 @@
 	struct drm_i915_error_state *first_error;
 	struct work_struct work;
 
-	unsigned long last_reset;
+
+	unsigned long missed_irq_rings;
 
 	/**
 	 * State variable and reset counter controlling the reset flow
@@ -1027,6 +1114,9 @@
 
 	/* For gpu hang simulation. */
 	unsigned int stop_rings;
+
+	/* For missed irq/seqno simulation. */
+	unsigned int test_irq_rings;
 };
 
 enum modeset_restore {
@@ -1035,6 +1125,14 @@
 	MODESET_SUSPENDED,
 };
 
+struct ddi_vbt_port_info {
+	uint8_t hdmi_level_shift;
+
+	uint8_t supports_dvi:1;
+	uint8_t supports_hdmi:1;
+	uint8_t supports_dp:1;
+};
+
 struct intel_vbt_data {
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1060,10 +1158,17 @@
 	int edp_bpp;
 	struct edp_power_seq edp_pps;
 
+	/* MIPI DSI */
+	struct {
+		u16 panel_id;
+	} dsi;
+
 	int crt_ddc_pin;
 
 	int child_dev_num;
-	struct child_device_config *child_dev;
+	union child_device_config *child_dev;
+
+	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
 };
 
 enum intel_ddb_partitioning {
@@ -1079,6 +1184,15 @@
 	uint32_t fbc_val;
 };
 
+struct hsw_wm_values {
+	uint32_t wm_pipe[3];
+	uint32_t wm_lp[3];
+	uint32_t wm_lp_spr[3];
+	uint32_t wm_linetime[3];
+	bool enable_fbc_wm;
+	enum intel_ddb_partitioning partitioning;
+};
+
 /*
  * This struct tracks the state needed for the Package C8+ feature.
  *
@@ -1148,6 +1262,36 @@
 	} regsave;
 };
 
+enum intel_pipe_crc_source {
+	INTEL_PIPE_CRC_SOURCE_NONE,
+	INTEL_PIPE_CRC_SOURCE_PLANE1,
+	INTEL_PIPE_CRC_SOURCE_PLANE2,
+	INTEL_PIPE_CRC_SOURCE_PF,
+	INTEL_PIPE_CRC_SOURCE_PIPE,
+	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
+	INTEL_PIPE_CRC_SOURCE_TV,
+	INTEL_PIPE_CRC_SOURCE_DP_B,
+	INTEL_PIPE_CRC_SOURCE_DP_C,
+	INTEL_PIPE_CRC_SOURCE_DP_D,
+	INTEL_PIPE_CRC_SOURCE_AUTO,
+	INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+struct intel_pipe_crc_entry {
+	uint32_t frame;
+	uint32_t crc[5];
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR	128
+struct intel_pipe_crc {
+	spinlock_t lock;
+	bool opened;		/* exclusive access to the result file */
+	struct intel_pipe_crc_entry *entries;
+	enum intel_pipe_crc_source source;
+	int head, tail;
+	wait_queue_head_t wq;
+};
+
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 	struct kmem_cache *slab;
@@ -1193,7 +1337,10 @@
 	struct mutex dpio_lock;
 
 	/** Cached value of IMR to avoid reads in updating the bitfield */
-	u32 irq_mask;
+	union {
+		u32 irq_mask;
+		u32 de_irq_mask[I915_MAX_PIPES];
+	};
 	u32 gt_irq_mask;
 	u32 pm_irq_mask;
 
@@ -1272,6 +1419,10 @@
 	struct drm_crtc *pipe_to_crtc_mapping[3];
 	wait_queue_head_t pending_flip_queue;
 
+#ifdef CONFIG_DEBUG_FS
+	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+#endif
+
 	int num_shared_dpll;
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 	struct intel_ddi_plls ddi_plls;
@@ -1297,17 +1448,18 @@
 	 * mchdev_lock in intel_pm.c */
 	struct intel_ilk_power_mgmt ips;
 
-	/* Haswell power well */
-	struct i915_power_well power_well;
+	struct i915_power_domains power_domains;
 
-	enum no_psr_reason no_psr_reason;
+	struct i915_psr psr;
 
 	struct i915_gpu_error gpu_error;
 
 	struct drm_i915_gem_object *vlv_pctx;
 
+#ifdef CONFIG_DRM_I915_FBDEV
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
+#endif
 
 	/*
 	 * The console may be contended at resume, but we don't
@@ -1320,6 +1472,7 @@
 
 	bool hw_contexts_disabled;
 	uint32_t hw_context_size;
+	struct list_head context_list;
 
 	u32 fdi_rx_config;
 
@@ -1337,6 +1490,9 @@
 		uint16_t spr_latency[5];
 		/* cursor */
 		uint16_t cur_latency[5];
+
+		/* current hardware state */
+		struct hsw_wm_values hw;
 	} wm;
 
 	struct i915_package_c8 pc8;
@@ -1400,8 +1556,6 @@
 	struct list_head ring_list;
 	/** Used in execbuf to temporarily hold a ref */
 	struct list_head obj_exec_link;
-	/** This object's place in the batchbuffer or on the eviction list */
-	struct list_head exec_list;
 
 	/**
 	 * This is set if the object is on the active lists (has pending
@@ -1487,13 +1641,6 @@
 	void *dma_buf_vmapping;
 	int vmapping_count;
 
-	/**
-	 * Used for performing relocations during execbuffer insertion.
-	 */
-	struct hlist_node exec_node;
-	unsigned long exec_handle;
-	struct drm_i915_gem_exec_object2 *exec_entry;
-
 	struct intel_ring_buffer *ring;
 
 	/** Breadcrumb of last rendering to the buffer. */
@@ -1505,11 +1652,14 @@
 	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
 
+	/** References from framebuffers, locks out tiling changes. */
+	unsigned long framebuffer_references;
+
 	/** Record of address bit 17 of each page at last unbind. */
 	unsigned long *bit_17;
 
 	/** User space pin count and filp owning the pin */
-	uint32_t user_pin_count;
+	unsigned long user_pin_count;
 	struct drm_file *pin_filp;
 
 	/** for phy allocated objects */
@@ -1560,48 +1710,56 @@
 };
 
 struct drm_i915_file_private {
+	struct drm_i915_private *dev_priv;
+
 	struct {
 		spinlock_t lock;
 		struct list_head request_list;
+		struct delayed_work idle_work;
 	} mm;
 	struct idr context_idr;
 
 	struct i915_ctx_hang_stats hang_stats;
+	atomic_t rps_wait_boost;
 };
 
 #define INTEL_INFO(dev)	(to_i915(dev)->info)
 
-#define IS_I830(dev)		((dev)->pci_device == 0x3577)
-#define IS_845G(dev)		((dev)->pci_device == 0x2562)
+#define IS_I830(dev)		((dev)->pdev->device == 0x3577)
+#define IS_845G(dev)		((dev)->pdev->device == 0x2562)
 #define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
+#define IS_I865G(dev)		((dev)->pdev->device == 0x2572)
 #define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
-#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
+#define IS_I915GM(dev)		((dev)->pdev->device == 0x2592)
+#define IS_I945G(dev)		((dev)->pdev->device == 0x2772)
 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
 #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
 #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
+#define IS_GM45(dev)		((dev)->pdev->device == 0x2A42)
 #define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW_G(dev)	((dev)->pdev->device == 0xa001)
+#define IS_PINEVIEW_M(dev)	((dev)->pdev->device == 0xa011)
 #define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE_M(dev)	((dev)->pdev->device == 0x0046)
 #define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev)		((dev)->pci_device == 0x0156 || \
-				 (dev)->pci_device == 0x0152 ||	\
-				 (dev)->pci_device == 0x015a)
-#define IS_SNB_GT1(dev)		((dev)->pci_device == 0x0102 || \
-				 (dev)->pci_device == 0x0106 ||	\
-				 (dev)->pci_device == 0x010A)
+#define IS_IVB_GT1(dev)		((dev)->pdev->device == 0x0156 || \
+				 (dev)->pdev->device == 0x0152 || \
+				 (dev)->pdev->device == 0x015a)
+#define IS_SNB_GT1(dev)		((dev)->pdev->device == 0x0102 || \
+				 (dev)->pdev->device == 0x0106 || \
+				 (dev)->pdev->device == 0x010A)
 #define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
 #define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
+#define IS_BROADWELL(dev)	(INTEL_INFO(dev)->gen == 8)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
-				 ((dev)->pci_device & 0xFF00) == 0x0C00)
+				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
 #define IS_ULT(dev)		(IS_HASWELL(dev) && \
-				 ((dev)->pci_device & 0xFF00) == 0x0A00)
+				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
+				 ((dev)->pdev->device & 0x00F0) == 0x0020)
+#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 /*
  * The genX designation typically refers to the render engine, so render
@@ -1615,10 +1773,15 @@
 #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
 #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
 #define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
+#define IS_GEN8(dev)	(INTEL_INFO(dev)->gen == 8)
 
-#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_VEBOX(dev)          (INTEL_INFO(dev)->has_vebox_ring)
+#define RENDER_RING		(1<<RCS)
+#define BSD_RING		(1<<VCS)
+#define BLT_RING		(1<<BCS)
+#define VEBOX_RING		(1<<VECS)
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev)            (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
 #define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
@@ -1640,7 +1803,6 @@
 #define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
 #define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 
@@ -1648,11 +1810,12 @@
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
-#define HAS_IPS(dev)		(IS_ULT(dev))
+#define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
 
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
-#define HAS_POWER_WELL(dev)	(IS_HASWELL(dev))
+#define HAS_POWER_WELL(dev)	(IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
+#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
 
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
@@ -1668,35 +1831,14 @@
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
-#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
-
-#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+/* DPF == dynamic parity feature */
+#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 #define GT_FREQUENCY_MULTIPLIER 50
 
 #include "i915_trace.h"
 
-/**
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage.  This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-#define INTEL_RC6_ENABLE			(1<<0)
-#define INTEL_RC6p_ENABLE			(1<<1)
-#define INTEL_RC6pp_ENABLE			(1<<2)
-
 extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
@@ -1767,12 +1909,13 @@
 extern void intel_uncore_init(struct drm_device *dev);
 extern void intel_uncore_clear_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
+extern void intel_uncore_fini(struct drm_device *dev);
 
 void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 
 void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 
 /* i915_gem.c */
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1824,14 +1967,11 @@
 void i915_gem_load(struct drm_device *dev);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
-int i915_gem_init_object(struct drm_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1870,9 +2010,8 @@
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-				    struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct intel_ring_buffer *ring);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
@@ -1913,7 +2052,7 @@
 	}
 }
 
-void i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
@@ -1933,11 +2072,11 @@
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_l3_remap(struct drm_device *dev);
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
-int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_device *dev);
 int __i915_add_request(struct intel_ring_buffer *ring,
 		       struct drm_file *file,
 		       struct drm_i915_gem_object *batch_obj,
@@ -1964,6 +2103,7 @@
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_i915_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
 uint32_t
@@ -1995,6 +2135,9 @@
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
 				  struct i915_address_space *vm);
+
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+
 /* Some GGTT VM helpers */
 #define obj_to_ggtt(obj) \
 	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2031,7 +2174,6 @@
 	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
 				   map_and_fenceable, nonblocking);
 }
-#undef obj_to_ggtt
 
 /* i915_gem_context.c */
 void i915_gem_context_init(struct drm_device *dev);
@@ -2094,6 +2236,7 @@
 					  unsigned cache_level,
 					  bool mappable,
 					  bool nonblock);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 int i915_gem_evict_everything(struct drm_device *dev);
 
 /* i915_gem_stolen.c */
@@ -2133,6 +2276,11 @@
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
 void i915_debugfs_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_device *dev);
+#else
+static inline void intel_display_crc_init(struct drm_device *dev) {}
+#endif
 
 /* i915_gpu_error.c */
 __printf(2, 3)
@@ -2186,15 +2334,30 @@
 extern void intel_i2c_reset(struct drm_device *dev);
 
 /* intel_opregion.c */
+struct intel_encoder;
 extern int intel_opregion_setup(struct drm_device *dev);
 #ifdef CONFIG_ACPI
 extern void intel_opregion_init(struct drm_device *dev);
 extern void intel_opregion_fini(struct drm_device *dev);
 extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+					 bool enable);
+extern int intel_opregion_notify_adapter(struct drm_device *dev,
+					 pci_power_t state);
 #else
 static inline void intel_opregion_init(struct drm_device *dev) { return; }
 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+	return 0;
+}
+static inline int
+intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+	return 0;
+}
 #endif
 
 /* intel_acpi.c */
@@ -2256,8 +2419,16 @@
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 		   enum intel_sbi_destination destination);
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@@ -2266,37 +2437,21 @@
 int vlv_gpu_freq(int ddr_freq, int val);
 int vlv_freq_opcode(int ddr_freq, int val);
 
-#define __i915_read(x) \
-	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
+#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
 
-#define __i915_write(x) \
-	void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
+#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
 
-#define I915_READ8(reg)		i915_read8(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val)	i915_write8(dev_priv, (reg), (val), true)
+#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
+#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
 
-#define I915_READ16(reg)	i915_read16(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val)	i915_write16(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg)	i915_read16(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val)	i915_write16(dev_priv, (reg), (val), false)
-
-#define I915_READ(reg)		i915_read32(dev_priv, (reg), true)
-#define I915_WRITE(reg, val)	i915_write32(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg)		i915_read32(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val)	i915_write32(dev_priv, (reg), (val), false)
-
-#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val), true)
-#define I915_READ64(reg)	i915_read64(dev_priv, (reg), true)
+#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
+#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cdfb9da..12bbd5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,6 +41,9 @@
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
 						   bool force);
 static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly);
+static __must_check int
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 			   struct i915_address_space *vm,
 			   unsigned alignment,
@@ -61,8 +64,8 @@
 					     struct shrink_control *sc);
 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
 					    struct shrink_control *sc);
-static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -258,7 +261,7 @@
 		     struct drm_mode_create_dumb *args)
 {
 	/* have to work out size/pitch and return them */
-	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
 	args->size = args->pitch * args->height;
 	return i915_gem_create(file, dev,
 			       args->size, &args->handle);
@@ -432,11 +435,9 @@
 		 * optimizes for the case when the gpu will dirty the data
 		 * anyway again before the next pread happens. */
 		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
-		if (i915_gem_obj_bound_any(obj)) {
-			ret = i915_gem_object_set_to_gtt_domain(obj, false);
-			if (ret)
-				return ret;
-		}
+		ret = i915_gem_object_wait_rendering(obj, true);
+		if (ret)
+			return ret;
 	}
 
 	ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@
 		 * optimizes for the case when the gpu will use the data
 		 * right away and we therefore have to clflush anyway. */
 		needs_clflush_after = cpu_write_needs_clflush(obj);
-		if (i915_gem_obj_bound_any(obj)) {
-			ret = i915_gem_object_set_to_gtt_domain(obj, true);
-			if (ret)
-				return ret;
-		}
+		ret = i915_gem_object_wait_rendering(obj, false);
+		if (ret)
+			return ret;
 	}
 	/* Same trick applies to invalidate partially written cachelines read
 	 * before writing. */
@@ -966,12 +965,31 @@
 	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
 	ret = 0;
-	if (seqno == ring->outstanding_lazy_request)
+	if (seqno == ring->outstanding_lazy_seqno)
 		ret = i915_add_request(ring, NULL);
 
 	return ret;
 }
 
+static void fake_irq(unsigned long data)
+{
+	wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+		       struct intel_ring_buffer *ring)
+{
+	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+{
+	if (file_priv == NULL)
+		return true;
+
+	return !atomic_xchg(&file_priv->rps_wait_boost, true);
+}
+
 /**
  * __wait_seqno - wait until execution of seqno has finished
  * @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@
  */
 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 			unsigned reset_counter,
-			bool interruptible, struct timespec *timeout)
+			bool interruptible,
+			struct timespec *timeout,
+			struct drm_i915_file_private *file_priv)
 {
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	struct timespec before, now, wait_time={1,0};
-	unsigned long timeout_jiffies;
-	long end;
-	bool wait_forever = true;
+	struct timespec before, now;
+	DEFINE_WAIT(wait);
+	long timeout_jiffies;
 	int ret;
 
 	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@
 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
 		return 0;
 
-	trace_i915_gem_request_wait_begin(ring, seqno);
+	timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
 
-	if (timeout != NULL) {
-		wait_time = *timeout;
-		wait_forever = false;
+	if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+		gen6_rps_boost(dev_priv);
+		if (file_priv)
+			mod_delayed_work(dev_priv->wq,
+					 &file_priv->mm.idle_work,
+					 msecs_to_jiffies(100));
 	}
 
-	timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
-
-	if (WARN_ON(!ring->irq_get(ring)))
+	if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
+	    WARN_ON(!ring->irq_get(ring)))
 		return -ENODEV;
 
-	/* Record current time in case interrupted by signal, or wedged * */
+	/* Record current time in case interrupted by signal, or wedged */
+	trace_i915_gem_request_wait_begin(ring, seqno);
 	getrawmonotonic(&before);
+	for (;;) {
+		struct timer_list timer;
+		unsigned long expire;
 
-#define EXIT_COND \
-	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
-	 i915_reset_in_progress(&dev_priv->gpu_error) || \
-	 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-	do {
-		if (interruptible)
-			end = wait_event_interruptible_timeout(ring->irq_queue,
-							       EXIT_COND,
-							       timeout_jiffies);
-		else
-			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
-						 timeout_jiffies);
+		prepare_to_wait(&ring->irq_queue, &wait,
+				interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 
 		/* We need to check whether any gpu reset happened in between
 		 * the caller grabbing the seqno and now ... */
-		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-			end = -EAGAIN;
+		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
+			 * is truely gone. */
+			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+			if (ret == 0)
+				ret = -EAGAIN;
+			break;
+		}
 
-		/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
-		 * gone. */
-		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-		if (ret)
-			end = ret;
-	} while (end == 0 && wait_forever);
+		if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+			ret = 0;
+			break;
+		}
 
+		if (interruptible && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		if (timeout_jiffies <= 0) {
+			ret = -ETIME;
+			break;
+		}
+
+		timer.function = NULL;
+		if (timeout || missed_irq(dev_priv, ring)) {
+			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+			expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+			mod_timer(&timer, expire);
+		}
+
+		io_schedule();
+
+		if (timeout)
+			timeout_jiffies = expire - jiffies;
+
+		if (timer.function) {
+			del_singleshot_timer_sync(&timer);
+			destroy_timer_on_stack(&timer);
+		}
+	}
 	getrawmonotonic(&now);
+	trace_i915_gem_request_wait_end(ring, seqno);
 
 	ring->irq_put(ring);
-	trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
+
+	finish_wait(&ring->irq_queue, &wait);
 
 	if (timeout) {
 		struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@
 			set_normalized_timespec(timeout, 0, 0);
 	}
 
-	switch (end) {
-	case -EIO:
-	case -EAGAIN: /* Wedged */
-	case -ERESTARTSYS: /* Signal */
-		return (int)end;
-	case 0: /* Timeout */
-		return -ETIME;
-	default: /* Completed */
-		WARN_ON(end < 0); /* We're not aware of other errors */
-		return 0;
-	}
+	return ret;
 }
 
 /**
@@ -1097,7 +1134,7 @@
 
 	return __wait_seqno(ring, seqno,
 			    atomic_read(&dev_priv->gpu_error.reset_counter),
-			    interruptible, NULL);
+			    interruptible, NULL, NULL);
 }
 
 static int
@@ -1147,6 +1184,7 @@
  */
 static __must_check int
 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    struct drm_file *file,
 					    bool readonly)
 {
 	struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@
 
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
 	mutex_lock(&dev->struct_mutex);
 	if (ret)
 		return ret;
@@ -1222,7 +1260,7 @@
 	 * We will repeat the flush holding the lock in the normal manner
 	 * to catch cases where we are gazumped.
 	 */
-	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+	ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
 	if (ret)
 		goto unref;
 
@@ -1690,13 +1728,13 @@
 	return 0;
 }
 
-static long
+static unsigned long
 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 		  bool purgeable_only)
 {
 	struct list_head still_bound_list;
 	struct drm_i915_gem_object *obj, *next;
-	long count = 0;
+	unsigned long count = 0;
 
 	list_for_each_entry_safe(obj, next,
 				 &dev_priv->mm.unbound_list,
@@ -1762,13 +1800,13 @@
 	return count;
 }
 
-static long
+static unsigned long
 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
 {
 	return __i915_gem_shrink(dev_priv, target, true);
 }
 
-static long
+static unsigned long
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
 	struct drm_i915_gem_object *obj, *next;
@@ -1778,9 +1816,8 @@
 
 	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
 				 global_list) {
-		if (obj->pages_pin_count == 0)
+		if (i915_gem_object_put_pages(obj) == 0)
 			freed += obj->base.size >> PAGE_SHIFT;
-		i915_gem_object_put_pages(obj);
 	}
 	return freed;
 }
@@ -1865,6 +1902,9 @@
 			sg->length += PAGE_SIZE;
 		}
 		last_pfn = page_to_pfn(page);
+
+		/* Check that the i965g/gm workaround works. */
+		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
 	}
 #ifdef CONFIG_SWIOTLB
 	if (!swiotlb_nr_tbl())
@@ -1918,7 +1958,7 @@
 	return 0;
 }
 
-void
+static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 			       struct intel_ring_buffer *ring)
 {
@@ -1957,6 +1997,13 @@
 	}
 }
 
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct intel_ring_buffer *ring)
+{
+	list_move_tail(&vma->mm_list, &vma->vm->active_list);
+	return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
@@ -2078,11 +2125,10 @@
 	if (ret)
 		return ret;
 
-	request = kmalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL)
+	request = ring->preallocated_lazy_request;
+	if (WARN_ON(request == NULL))
 		return -ENOMEM;
 
-
 	/* Record the position of the start of the request so that
 	 * should we detect the updated seqno part-way through the
 	 * GPU processing the request, we never over-estimate the
@@ -2091,17 +2137,13 @@
 	request_ring_position = intel_ring_get_tail(ring);
 
 	ret = ring->add_request(ring);
-	if (ret) {
-		kfree(request);
+	if (ret)
 		return ret;
-	}
 
 	request->seqno = intel_ring_get_seqno(ring);
 	request->ring = ring;
 	request->head = request_start;
 	request->tail = request_ring_position;
-	request->ctx = ring->last_context;
-	request->batch_obj = obj;
 
 	/* Whilst this request exists, batch_obj will be on the
 	 * active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2151,12 @@
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * to explicitly hold another reference here.
 	 */
+	request->batch_obj = obj;
 
+	/* Hold a reference to the current context so that we can inspect
+	 * it later in case a hangcheck error event fires.
+	 */
+	request->ctx = ring->last_context;
 	if (request->ctx)
 		i915_gem_context_reference(request->ctx);
 
@@ -2129,12 +2176,14 @@
 	}
 
 	trace_i915_gem_request_add(ring, request->seqno);
-	ring->outstanding_lazy_request = 0;
+	ring->outstanding_lazy_seqno = 0;
+	ring->preallocated_lazy_request = NULL;
 
 	if (!dev_priv->ums.mm_suspended) {
 		i915_queue_hangcheck(ring->dev);
 
 		if (was_empty) {
+			cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 			queue_delayed_work(dev_priv->wq,
 					   &dev_priv->mm.retire_work,
 					   round_jiffies_up_relative(HZ));
@@ -2156,10 +2205,8 @@
 		return;
 
 	spin_lock(&file_priv->mm.lock);
-	if (request->file_priv) {
-		list_del(&request->client_list);
-		request->file_priv = NULL;
-	}
+	list_del(&request->client_list);
+	request->file_priv = NULL;
 	spin_unlock(&file_priv->mm.lock);
 }
 
@@ -2224,6 +2271,21 @@
 	return false;
 }
 
+static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+{
+	const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+
+	if (hs->banned)
+		return true;
+
+	if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+		DRM_ERROR("context hanging too fast, declaring banned!\n");
+		return true;
+	}
+
+	return false;
+}
+
 static void i915_set_reset_status(struct intel_ring_buffer *ring,
 				  struct drm_i915_gem_request *request,
 				  u32 acthd)
@@ -2260,10 +2322,13 @@
 		hs = &request->file_priv->hang_stats;
 
 	if (hs) {
-		if (guilty)
+		if (guilty) {
+			hs->banned = i915_context_is_banned(hs);
 			hs->batch_active++;
-		else
+			hs->guilty_ts = get_seconds();
+		} else {
 			hs->batch_pending++;
+		}
 	}
 }
 
@@ -2341,6 +2406,8 @@
 	for_each_ring(ring, dev_priv, i)
 		i915_gem_reset_ring_lists(dev_priv, ring);
 
+	i915_gem_cleanup_ringbuffer(dev);
+
 	i915_gem_restore_fences(dev);
 }
 
@@ -2405,57 +2472,53 @@
 	WARN_ON(i915_verify_lists(ring->dev));
 }
 
-void
+bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
+	bool idle = true;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_ring(ring, dev_priv, i) {
 		i915_gem_retire_requests_ring(ring);
+		idle &= list_empty(&ring->request_list);
+	}
+
+	if (idle)
+		mod_delayed_work(dev_priv->wq,
+				   &dev_priv->mm.idle_work,
+				   msecs_to_jiffies(100));
+
+	return idle;
 }
 
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
-	drm_i915_private_t *dev_priv;
-	struct drm_device *dev;
-	struct intel_ring_buffer *ring;
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), mm.retire_work.work);
+	struct drm_device *dev = dev_priv->dev;
 	bool idle;
-	int i;
-
-	dev_priv = container_of(work, drm_i915_private_t,
-				mm.retire_work.work);
-	dev = dev_priv->dev;
 
 	/* Come back later if the device is busy... */
-	if (!mutex_trylock(&dev->struct_mutex)) {
+	idle = false;
+	if (mutex_trylock(&dev->struct_mutex)) {
+		idle = i915_gem_retire_requests(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+	if (!idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
 				   round_jiffies_up_relative(HZ));
-		return;
-	}
+}
 
-	i915_gem_retire_requests(dev);
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), mm.idle_work.work);
 
-	/* Send a periodic flush down the ring so we don't hold onto GEM
-	 * objects indefinitely.
-	 */
-	idle = true;
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->gpu_caches_dirty)
-			i915_add_request(ring, NULL);
-
-		idle &= list_empty(&ring->request_list);
-	}
-
-	if (!dev_priv->ums.mm_suspended && !idle)
-		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
-				   round_jiffies_up_relative(HZ));
-	if (idle)
-		intel_mark_idle(dev);
-
-	mutex_unlock(&dev->struct_mutex);
+	intel_mark_idle(dev_priv->dev);
 }
 
 /**
@@ -2553,7 +2616,7 @@
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
 	if (timeout)
 		args->timeout_ns = timespec_to_ns(timeout);
 	return ret;
@@ -2600,6 +2663,7 @@
 	if (ret)
 		return ret;
 
+	trace_i915_gem_ring_sync_to(from, to, seqno);
 	ret = to->sync_to(to, from, seqno);
 	if (!ret)
 		/* We use last_read_seqno because sync_to()
@@ -2641,11 +2705,17 @@
 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
+	/* For now we only ever use 1 vma per object */
+	WARN_ON(!list_is_singular(&obj->vma_list));
+
 	if (list_empty(&vma->vma_link))
 		return 0;
 
-	if (!drm_mm_node_allocated(&vma->node))
-		goto destroy;
+	if (!drm_mm_node_allocated(&vma->node)) {
+		i915_gem_vma_destroy(vma);
+
+		return 0;
+	}
 
 	if (obj->pin_count)
 		return -EBUSY;
@@ -2685,13 +2755,10 @@
 
 	drm_mm_remove_node(&vma->node);
 
-destroy:
 	i915_gem_vma_destroy(vma);
 
 	/* Since the unbound list is global, only move to that list if
-	 * no more VMAs exist.
-	 * NB: Until we have real VMAs there will only ever be one */
-	WARN_ON(!list_empty(&obj->vma_list));
+	 * no more VMAs exist. */
 	if (list_empty(&obj->vma_list))
 		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
@@ -2887,6 +2954,7 @@
 	     obj->stride, obj->tiling_mode);
 
 	switch (INTEL_INFO(dev)->gen) {
+	case 8:
 	case 7:
 	case 6:
 	case 5:
@@ -3389,8 +3457,7 @@
 
 	/* And bump the LRU for this access */
 	if (i915_gem_object_is_inactive(obj)) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
-							   &dev_priv->gtt.base);
+		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
 		if (vma)
 			list_move_tail(&vma->mm_list,
 				       &dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3828,7 @@
 	if (seqno == 0)
 		return 0;
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -3865,6 +3932,11 @@
 		goto out;
 	}
 
+	if (obj->user_pin_count == ULONG_MAX) {
+		ret = -EBUSY;
+		goto out;
+	}
+
 	if (obj->user_pin_count == 0) {
 		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
 		if (ret)
@@ -4015,7 +4087,6 @@
 {
 	INIT_LIST_HEAD(&obj->global_list);
 	INIT_LIST_HEAD(&obj->ring_list);
-	INIT_LIST_HEAD(&obj->exec_list);
 	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 
@@ -4087,13 +4158,6 @@
 	return obj;
 }
 
-int i915_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-
-	return 0;
-}
-
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,9 +4211,20 @@
 	i915_gem_object_free(obj);
 }
 
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 				     struct i915_address_space *vm)
 {
+	struct i915_vma *vma;
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		if (vma->vm == vm)
+			return vma;
+
+	return NULL;
+}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+					      struct i915_address_space *vm)
+{
 	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -4169,76 +4244,103 @@
 	return vma;
 }
 
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm)
+{
+	struct i915_vma *vma;
+
+	vma = i915_gem_obj_to_vma(obj, vm);
+	if (!vma)
+		vma = __i915_gem_vma_create(obj, vm);
+
+	return vma;
+}
+
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
 	WARN_ON(vma->node.allocated);
+
+	/* Keep the vma as a placeholder in the execbuffer reservation lists */
+	if (!list_empty(&vma->exec_list))
+		return;
+
 	list_del(&vma->vma_link);
+
 	kfree(vma);
 }
 
 int
-i915_gem_idle(struct drm_device *dev)
+i915_gem_suspend(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret = 0;
 
-	if (dev_priv->ums.mm_suspended) {
-		mutex_unlock(&dev->struct_mutex);
-		return 0;
-	}
+	mutex_lock(&dev->struct_mutex);
+	if (dev_priv->ums.mm_suspended)
+		goto err;
 
 	ret = i915_gpu_idle(dev);
-	if (ret) {
-		mutex_unlock(&dev->struct_mutex);
-		return ret;
-	}
+	if (ret)
+		goto err;
+
 	i915_gem_retire_requests(dev);
 
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_gem_evict_everything(dev);
 
-	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
-
 	i915_kernel_lost_context(dev);
 	i915_gem_cleanup_ringbuffer(dev);
 
-	/* Cancel the retire work handler, which should be idle now. */
+	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
+	 * We need to replace this with a semaphore, or something.
+	 * And not confound ums.mm_suspended!
+	 */
+	dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
+							     DRIVER_MODESET);
+	mutex_unlock(&dev->struct_mutex);
+
+	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+	cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 
 	return 0;
+
+err:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
 }
 
-void i915_gem_l3_remap(struct drm_device *dev)
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
 {
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 misccpctl;
-	int i;
+	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
+	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+	int i, ret;
 
-	if (!HAS_L3_GPU_CACHE(dev))
-		return;
+	if (!HAS_L3_DPF(dev) || !remap_info)
+		return 0;
 
-	if (!dev_priv->l3_parity.remap_info)
-		return;
+	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+	if (ret)
+		return ret;
 
-	misccpctl = I915_READ(GEN7_MISCCPCTL);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-	POSTING_READ(GEN7_MISCCPCTL);
-
+	/*
+	 * Note: We do not worry about the concurrent register cacheline hang
+	 * here because no other code should access these registers other than
+	 * at initialization time.
+	 */
 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
-		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
-		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
-			DRM_DEBUG("0x%x was already programmed to %x\n",
-				  GEN7_L3LOG_BASE + i, remap);
-		if (remap && !dev_priv->l3_parity.remap_info[i/4])
-			DRM_DEBUG_DRIVER("Clearing remapped register\n");
-		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, reg_base + i);
+		intel_ring_emit(ring, remap_info[i/4]);
 	}
 
-	/* Make sure all the writes land before disabling dop clock gating */
-	POSTING_READ(GEN7_L3LOG_BASE);
+	intel_ring_advance(ring);
 
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+	return ret;
 }
 
 void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4260,6 +4362,8 @@
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
 	else if (IS_GEN7(dev))
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+	else if (IS_GEN8(dev))
+		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
 	else
 		BUG();
 }
@@ -4330,7 +4434,7 @@
 i915_gem_init_hw(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 		return -EIO;
@@ -4338,20 +4442,26 @@
 	if (dev_priv->ellc_size)
 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
+	if (IS_HSW_GT3(dev))
+		I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
+	else
+		I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+
 	if (HAS_PCH_NOP(dev)) {
 		u32 temp = I915_READ(GEN7_MSG_CTL);
 		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
 		I915_WRITE(GEN7_MSG_CTL, temp);
 	}
 
-	i915_gem_l3_remap(dev);
-
 	i915_gem_init_swizzling(dev);
 
 	ret = i915_gem_init_rings(dev);
 	if (ret)
 		return ret;
 
+	for (i = 0; i < NUM_L3_SLICES(dev); i++)
+		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+
 	/*
 	 * XXX: There was some w/a described somewhere suggesting loading
 	 * contexts before PPGTT.
@@ -4454,26 +4564,12 @@
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
 
 	drm_irq_uninstall(dev);
 
-	mutex_lock(&dev->struct_mutex);
-	ret =  i915_gem_idle(dev);
-
-	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
-	 * We need to replace this with a semaphore, or something.
-	 * And not confound ums.mm_suspended!
-	 */
-	if (ret != 0)
-		dev_priv->ums.mm_suspended = 1;
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
+	return i915_gem_suspend(dev);
 }
 
 void
@@ -4484,11 +4580,9 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_idle(dev);
+	ret = i915_gem_suspend(dev);
 	if (ret)
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
-	mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -4523,6 +4617,7 @@
 	INIT_LIST_HEAD(&dev_priv->vm_list);
 	i915_init_vm(dev_priv, &dev_priv->gtt.base);
 
+	INIT_LIST_HEAD(&dev_priv->context_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4627,8 @@
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
+	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+			  i915_gem_idle_work_handler);
 	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4679,7 @@
 	if (dev_priv->mm.phys_objs[id - 1] || !size)
 		return 0;
 
-	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+	phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
 	if (!phys_obj)
 		return -ENOMEM;
 
@@ -4756,6 +4853,8 @@
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
+	cancel_delayed_work_sync(&file_priv->mm.idle_work);
+
 	/* Clean up our request list when the client is going away, so that
 	 * later retire_requests won't dereference our soon-to-be-gone
 	 * file_priv.
@@ -4773,6 +4872,38 @@
 	spin_unlock(&file_priv->mm.lock);
 }
 
+static void
+i915_gem_file_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_file_private *file_priv =
+		container_of(work, typeof(*file_priv), mm.idle_work.work);
+
+	atomic_set(&file_priv->rps_wait_boost, false);
+}
+
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
+		return -ENOMEM;
+
+	file->driver_priv = file_priv;
+	file_priv->dev_priv = dev->dev_private;
+
+	spin_lock_init(&file_priv->mm.lock);
+	INIT_LIST_HEAD(&file_priv->mm.request_list);
+	INIT_DELAYED_WORK(&file_priv->mm.idle_work,
+			  i915_gem_file_idle_work_handler);
+
+	idr_init(&file_priv->context_idr);
+
+	return 0;
+}
+
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 {
 	if (!mutex_is_locked(mutex))
@@ -4823,6 +4954,7 @@
 
 	if (unlock)
 		mutex_unlock(&dev->struct_mutex);
+
 	return count;
 }
 
@@ -4859,11 +4991,10 @@
 
 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 {
-	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
-	struct i915_address_space *vm;
+	struct i915_vma *vma;
 
-	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-		if (i915_gem_obj_bound(o, vm))
+	list_for_each_entry(vma, &o->vma_list, vma_link)
+		if (drm_mm_node_allocated(&vma->node))
 			return true;
 
 	return false;
@@ -4895,7 +5026,6 @@
 			     struct drm_i915_private,
 			     mm.inactive_shrinker);
 	struct drm_device *dev = dev_priv->dev;
-	int nr_to_scan = sc->nr_to_scan;
 	unsigned long freed;
 	bool unlock = true;
 
@@ -4909,38 +5039,30 @@
 		unlock = false;
 	}
 
-	freed = i915_gem_purge(dev_priv, nr_to_scan);
-	if (freed < nr_to_scan)
-		freed += __i915_gem_shrink(dev_priv, nr_to_scan,
-							false);
-	if (freed < nr_to_scan)
+	freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+	if (freed < sc->nr_to_scan)
+		freed += __i915_gem_shrink(dev_priv,
+					   sc->nr_to_scan - freed,
+					   false);
+	if (freed < sc->nr_to_scan)
 		freed += i915_gem_shrink_all(dev_priv);
 
 	if (unlock)
 		mutex_unlock(&dev->struct_mutex);
+
 	return freed;
 }
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm)
-{
-	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
-		if (vma->vm == vm)
-			return vma;
-
-	return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
 
-	vma = i915_gem_obj_to_vma(obj, vm);
-	if (!vma)
-		vma = i915_gem_vma_create(obj, vm);
+	if (WARN_ON(list_empty(&obj->vma_list)))
+		return NULL;
+
+	vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
+	if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+		return NULL;
 
 	return vma;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c..72a3df3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -73,7 +73,7 @@
  *
  * There are two confusing terms used above:
  *  The "current context" means the context which is currently running on the
- *  GPU. The GPU has loaded it's state already and has stored away the gtt
+ *  GPU. The GPU has loaded its state already and has stored away the gtt
  *  offset of the BO. The GPU is not actively referencing the data at this
  *  offset, but it will on the next context switch. The only way to avoid this
  *  is to do a GPU reset.
@@ -117,6 +117,9 @@
 		else
 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
 		break;
+	case 8:
+		ret = GEN8_CXT_TOTAL_SIZE;
+		break;
 	default:
 		BUG();
 	}
@@ -129,6 +132,7 @@
 	struct i915_hw_context *ctx = container_of(ctx_ref,
 						   typeof(*ctx), ref);
 
+	list_del(&ctx->link);
 	drm_gem_object_unreference(&ctx->obj->base);
 	kfree(ctx);
 }
@@ -147,6 +151,7 @@
 
 	kref_init(&ctx->ref);
 	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+	INIT_LIST_HEAD(&ctx->link);
 	if (ctx->obj == NULL) {
 		kfree(ctx);
 		DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +171,7 @@
 	 * assertion in the context switch code.
 	 */
 	ctx->ring = &dev_priv->ring[RCS];
+	list_add_tail(&ctx->link, &dev_priv->context_list);
 
 	/* Default context will never have a file_priv */
 	if (file_priv == NULL)
@@ -178,6 +184,10 @@
 
 	ctx->file_priv = file_priv;
 	ctx->id = ret;
+	/* NB: Mark all slices as needing a remap so that when the context first
+	 * loads it will restore whatever remap state already exists. If there
+	 * is no remap info, it will be a NOP. */
+	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
 
 	return ctx;
 
@@ -213,7 +223,6 @@
 	 * may not be available. To avoid this we always pin the
 	 * default context.
 	 */
-	dev_priv->ring[RCS].default_context = ctx;
 	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +235,8 @@
 		goto err_unpin;
 	}
 
+	dev_priv->ring[RCS].default_context = ctx;
+
 	DRM_DEBUG_DRIVER("Default HW context loaded\n");
 	return 0;
 
@@ -281,16 +292,24 @@
 	 * other code, leading to spurious errors. */
 	intel_gpu_reset(dev);
 
-	i915_gem_object_unpin(dctx->obj);
-
 	/* When default context is created and switched to, base object refcount
 	 * will be 2 (+1 from object creation and +1 from do_switch()).
 	 * i915_gem_context_fini() will be called after gpu_idle() has switched
 	 * to default context. So we need to unreference the base object once
 	 * to offset the do_switch part, so that i915_gem_context_unreference()
 	 * can then free the base object correctly. */
-	drm_gem_object_unreference(&dctx->obj->base);
+	WARN_ON(!dev_priv->ring[RCS].last_context);
+	if (dev_priv->ring[RCS].last_context == dctx) {
+		/* Fake switch to NULL context */
+		WARN_ON(dctx->obj->active);
+		i915_gem_object_unpin(dctx->obj);
+		i915_gem_context_unreference(dctx);
+	}
+
+	i915_gem_object_unpin(dctx->obj);
 	i915_gem_context_unreference(dctx);
+	dev_priv->ring[RCS].default_context = NULL;
+	dev_priv->ring[RCS].last_context = NULL;
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +412,11 @@
 	struct intel_ring_buffer *ring = to->ring;
 	struct i915_hw_context *from = ring->last_context;
 	u32 hw_flags = 0;
-	int ret;
+	int ret, i;
 
 	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 
-	if (from == to)
+	if (from == to && !to->remap_slice)
 		return 0;
 
 	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +439,6 @@
 
 	if (!to->is_initialized || is_default_context(to))
 		hw_flags |= MI_RESTORE_INHIBIT;
-	else if (WARN_ON_ONCE(from == to)) /* not yet expected */
-		hw_flags |= MI_FORCE_RESTORE;
 
 	ret = mi_set_context(ring, to, hw_flags);
 	if (ret) {
@@ -429,6 +446,18 @@
 		return ret;
 	}
 
+	for (i = 0; i < MAX_L3_SLICES; i++) {
+		if (!(to->remap_slice & (1<<i)))
+			continue;
+
+		ret = i915_gem_l3_remap(ring, i);
+		/* If it failed, try again next round */
+		if (ret)
+			DRM_DEBUG_DRIVER("L3 remapping failed\n");
+		else
+			to->remap_slice &= ~(1<<i);
+	}
+
 	/* The backing object for the context is done after switching to the
 	 * *next* context. Therefore we cannot retire the previous context until
 	 * the next context has already started running. In fact, the below code
@@ -436,11 +465,8 @@
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
-		struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
-		struct i915_address_space *ggtt = &dev_priv->gtt.base;
 		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
-		i915_gem_object_move_to_active(from->obj, ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -451,17 +477,7 @@
 		from->obj->dirty = 1;
 		BUG_ON(from->obj->ring != ring);
 
-		ret = i915_add_request(ring, NULL);
-		if (ret) {
-			/* Too late, we've already scheduled a context switch.
-			 * Try to undo the change so that the hw state is
-			 * consistent with out tracking. In case of emergency,
-			 * scream.
-			 */
-			WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
-			return ret;
-		}
-
+		/* obj is kept alive until the next request by its active ref */
 		i915_gem_object_unpin(from->obj);
 		i915_gem_context_unreference(from);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 91b7001..b737653 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,6 +37,9 @@
 	if (vma->obj->pin_count)
 		return false;
 
+	if (WARN_ON(!list_empty(&vma->exec_list)))
+		return false;
+
 	list_add(&vma->exec_list, unwind);
 	return drm_mm_scan_add_block(&vma->node);
 }
@@ -113,7 +116,7 @@
 	}
 
 	/* We expect the caller to unpin, evict all and try again, or give up.
-	 * So calling i915_gem_evict_everything() is unnecessary.
+	 * So calling i915_gem_evict_vm() is unnecessary.
 	 */
 	return -ENOSPC;
 
@@ -152,12 +155,48 @@
 	return ret;
 }
 
+/**
+ * i915_gem_evict_vm - Try to free up VM space
+ *
+ * @vm: Address space to evict from
+ * @do_idle: Boolean directing whether to idle first.
+ *
+ * VM eviction is about freeing up virtual address space. If one wants fine
+ * grained eviction, they should see evict something for more details. In terms
+ * of freeing up actual system memory, this function may not accomplish the
+ * desired result. An object may be shared in multiple address space, and this
+ * function will not assert those objects be freed.
+ *
+ * Using do_idle will result in a more complete eviction because it retires, and
+ * inactivates current BOs.
+ */
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+{
+	struct i915_vma *vma, *next;
+	int ret;
+
+	trace_i915_gem_evict_vm(vm);
+
+	if (do_idle) {
+		ret = i915_gpu_idle(vm->dev);
+		if (ret)
+			return ret;
+
+		i915_gem_retire_requests(vm->dev);
+	}
+
+	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+		if (vma->obj->pin_count == 0)
+			WARN_ON(i915_vma_unbind(vma));
+
+	return 0;
+}
+
 int
 i915_gem_evict_everything(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct i915_address_space *vm;
-	struct i915_vma *vma, *next;
 	bool lists_empty = true;
 	int ret;
 
@@ -184,11 +223,8 @@
 	i915_gem_retire_requests(dev);
 
 	/* Having flushed everything, unbind() should never raise an error */
-	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-		list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
-			if (vma->obj->pin_count == 0)
-				WARN_ON(i915_vma_unbind(vma));
-	}
+	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+		WARN_ON(i915_gem_evict_vm(vm, false));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf34577..885d595 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,35 +33,35 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
-struct eb_objects {
-	struct list_head objects;
+struct eb_vmas {
+	struct list_head vmas;
 	int and;
 	union {
-		struct drm_i915_gem_object *lut[0];
+		struct i915_vma *lut[0];
 		struct hlist_head buckets[0];
 	};
 };
 
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
 {
-	struct eb_objects *eb = NULL;
+	struct eb_vmas *eb = NULL;
 
 	if (args->flags & I915_EXEC_HANDLE_LUT) {
-		int size = args->buffer_count;
-		size *= sizeof(struct drm_i915_gem_object *);
-		size += sizeof(struct eb_objects);
+		unsigned size = args->buffer_count;
+		size *= sizeof(struct i915_vma *);
+		size += sizeof(struct eb_vmas);
 		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 	}
 
 	if (eb == NULL) {
-		int size = args->buffer_count;
-		int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+		unsigned size = args->buffer_count;
+		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
 		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
 		while (count > 2*size)
 			count >>= 1;
 		eb = kzalloc(count*sizeof(struct hlist_head) +
-			     sizeof(struct eb_objects),
+			     sizeof(struct eb_vmas),
 			     GFP_TEMPORARY);
 		if (eb == NULL)
 			return eb;
@@ -70,64 +70,102 @@
 	} else
 		eb->and = -args->buffer_count;
 
-	INIT_LIST_HEAD(&eb->objects);
+	INIT_LIST_HEAD(&eb->vmas);
 	return eb;
 }
 
 static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
 {
 	if (eb->and >= 0)
 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
 static int
-eb_lookup_objects(struct eb_objects *eb,
-		  struct drm_i915_gem_exec_object2 *exec,
-		  const struct drm_i915_gem_execbuffer2 *args,
-		  struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+	       struct drm_i915_gem_exec_object2 *exec,
+	       const struct drm_i915_gem_execbuffer2 *args,
+	       struct i915_address_space *vm,
+	       struct drm_file *file)
 {
-	int i;
+	struct drm_i915_gem_object *obj;
+	struct list_head objects;
+	int i, ret = 0;
 
+	INIT_LIST_HEAD(&objects);
 	spin_lock(&file->table_lock);
+	/* Grab a reference to the object and release the lock so we can lookup
+	 * or create the VMA without using GFP_ATOMIC */
 	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_i915_gem_object *obj;
-
 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
 		if (obj == NULL) {
 			spin_unlock(&file->table_lock);
 			DRM_DEBUG("Invalid object handle %d at index %d\n",
 				   exec[i].handle, i);
-			return -ENOENT;
+			ret = -ENOENT;
+			goto out;
 		}
 
-		if (!list_empty(&obj->exec_list)) {
+		if (!list_empty(&obj->obj_exec_link)) {
 			spin_unlock(&file->table_lock);
 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
 				   obj, exec[i].handle, i);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto out;
 		}
 
 		drm_gem_object_reference(&obj->base);
-		list_add_tail(&obj->exec_list, &eb->objects);
-
-		obj->exec_entry = &exec[i];
-		if (eb->and < 0) {
-			eb->lut[i] = obj;
-		} else {
-			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
-			obj->exec_handle = handle;
-			hlist_add_head(&obj->exec_node,
-				       &eb->buckets[handle & eb->and]);
-		}
+		list_add_tail(&obj->obj_exec_link, &objects);
 	}
 	spin_unlock(&file->table_lock);
 
-	return 0;
+	i = 0;
+	list_for_each_entry(obj, &objects, obj_exec_link) {
+		struct i915_vma *vma;
+
+		/*
+		 * NOTE: We can leak any vmas created here when something fails
+		 * later on. But that's no issue since vma_unbind can deal with
+		 * vmas which are not actually bound. And since only
+		 * lookup_or_create exists as an interface to get at the vma
+		 * from the (obj, vm) we don't run the risk of creating
+		 * duplicated vmas for the same vm.
+		 */
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+		if (IS_ERR(vma)) {
+			DRM_DEBUG("Failed to lookup VMA\n");
+			ret = PTR_ERR(vma);
+			goto out;
+		}
+
+		list_add_tail(&vma->exec_list, &eb->vmas);
+
+		vma->exec_entry = &exec[i];
+		if (eb->and < 0) {
+			eb->lut[i] = vma;
+		} else {
+			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
+			vma->exec_handle = handle;
+			hlist_add_head(&vma->exec_node,
+				       &eb->buckets[handle & eb->and]);
+		}
+		++i;
+	}
+
+
+out:
+	while (!list_empty(&objects)) {
+		obj = list_first_entry(&objects,
+				       struct drm_i915_gem_object,
+				       obj_exec_link);
+		list_del_init(&obj->obj_exec_link);
+		if (ret)
+			drm_gem_object_unreference(&obj->base);
+	}
+	return ret;
 }
 
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 {
 	if (eb->and < 0) {
 		if (handle >= -eb->and)
@@ -139,34 +177,33 @@
 
 		head = &eb->buckets[handle & eb->and];
 		hlist_for_each(node, head) {
-			struct drm_i915_gem_object *obj;
+			struct i915_vma *vma;
 
-			obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
-			if (obj->exec_handle == handle)
-				return obj;
+			vma = hlist_entry(node, struct i915_vma, exec_node);
+			if (vma->exec_handle == handle)
+				return vma;
 		}
 		return NULL;
 	}
 }
 
-static void
-eb_destroy(struct eb_objects *eb)
-{
-	while (!list_empty(&eb->objects)) {
-		struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+	while (!list_empty(&eb->vmas)) {
+		struct i915_vma *vma;
 
-		obj = list_first_entry(&eb->objects,
-				       struct drm_i915_gem_object,
+		vma = list_first_entry(&eb->vmas,
+				       struct i915_vma,
 				       exec_list);
-		list_del_init(&obj->exec_list);
-		drm_gem_object_unreference(&obj->base);
+		list_del_init(&vma->exec_list);
+		drm_gem_object_unreference(&vma->obj->base);
 	}
 	kfree(eb);
 }
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
-	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+	return (HAS_LLC(obj->base.dev) ||
+		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 		!obj->map_and_fenceable ||
 		obj->cache_level != I915_CACHE_NONE);
 }
@@ -175,17 +212,31 @@
 relocate_entry_cpu(struct drm_i915_gem_object *obj,
 		   struct drm_i915_gem_relocation_entry *reloc)
 {
+	struct drm_device *dev = obj->base.dev;
 	uint32_t page_offset = offset_in_page(reloc->offset);
 	char *vaddr;
 	int ret = -EINVAL;
 
-	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
 	if (ret)
 		return ret;
 
 	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 				reloc->offset >> PAGE_SHIFT));
 	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+		if (page_offset == 0) {
+			kunmap_atomic(vaddr);
+			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+		}
+
+		*(uint32_t *)(vaddr + page_offset) = 0;
+	}
+
 	kunmap_atomic(vaddr);
 
 	return 0;
@@ -216,6 +267,21 @@
 	reloc_entry = (uint32_t __iomem *)
 		(reloc_page + offset_in_page(reloc->offset));
 	iowrite32(reloc->delta, reloc_entry);
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		reloc_entry += 1;
+
+		if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
+			io_mapping_unmap_atomic(reloc_page);
+			reloc_page = io_mapping_map_atomic_wc(
+					dev_priv->gtt.mappable,
+					reloc->offset + sizeof(uint32_t));
+			reloc_entry = reloc_page;
+		}
+
+		iowrite32(0, reloc_entry);
+	}
+
 	io_mapping_unmap_atomic(reloc_page);
 
 	return 0;
@@ -223,22 +289,24 @@
 
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-				   struct eb_objects *eb,
+				   struct eb_vmas *eb,
 				   struct drm_i915_gem_relocation_entry *reloc,
 				   struct i915_address_space *vm)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_object *target_obj;
 	struct drm_i915_gem_object *target_i915_obj;
+	struct i915_vma *target_vma;
 	uint32_t target_offset;
 	int ret = -EINVAL;
 
 	/* we've already hold a reference to all valid objects */
-	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
-	if (unlikely(target_obj == NULL))
+	target_vma = eb_get_vma(eb, reloc->target_handle);
+	if (unlikely(target_vma == NULL))
 		return -ENOENT;
+	target_i915_obj = target_vma->obj;
+	target_obj = &target_vma->obj->base;
 
-	target_i915_obj = to_intel_bo(target_obj);
 	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -284,7 +352,8 @@
 		return 0;
 
 	/* Check that the relocation address is valid... */
-	if (unlikely(reloc->offset > obj->base.size - 4)) {
+	if (unlikely(reloc->offset >
+		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
 		DRM_DEBUG("Relocation beyond object bounds: "
 			  "obj %p target %d offset %d size %d.\n",
 			  obj, reloc->target_handle,
@@ -320,14 +389,13 @@
 }
 
 static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-				    struct eb_objects *eb,
-				    struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+				 struct eb_vmas *eb)
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 	struct drm_i915_gem_relocation_entry __user *user_relocs;
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	int remain, ret;
 
 	user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +414,8 @@
 		do {
 			u64 offset = r->presumed_offset;
 
-			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
-								 vm);
+			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
+								 vma->vm);
 			if (ret)
 				return ret;
 
@@ -368,17 +436,16 @@
 }
 
 static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-					 struct eb_objects *eb,
-					 struct drm_i915_gem_relocation_entry *relocs,
-					 struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+				      struct eb_vmas *eb,
+				      struct drm_i915_gem_relocation_entry *relocs)
 {
-	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	int i, ret;
 
 	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
-							 vm);
+		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
+							 vma->vm);
 		if (ret)
 			return ret;
 	}
@@ -387,10 +454,10 @@
 }
 
 static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb,
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
 			     struct i915_address_space *vm)
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	int ret = 0;
 
 	/* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +468,8 @@
 	 * lockdep complains vehemently.
 	 */
 	pagefault_disable();
-	list_for_each_entry(obj, &eb->objects, exec_list) {
-		ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
+	list_for_each_entry(vma, &eb->vmas, exec_list) {
+		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
 		if (ret)
 			break;
 	}
@@ -415,31 +482,32 @@
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 
 static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
 {
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-	return entry->relocation_count && !use_cpu_reloc(obj);
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+		i915_is_ggtt(vma->vm);
 }
 
 static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
-				   struct intel_ring_buffer *ring,
-				   struct i915_address_space *vm,
-				   bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+				struct intel_ring_buffer *ring,
+				bool *need_reloc)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	bool need_fence, need_mappable;
+	struct drm_i915_gem_object *obj = vma->obj;
 	int ret;
 
 	need_fence =
 		has_fenced_gpu_access &&
 		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 		obj->tiling_mode != I915_TILING_NONE;
-	need_mappable = need_fence || need_reloc_mappable(obj);
+	need_mappable = need_fence || need_reloc_mappable(vma);
 
-	ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
 				  false);
 	if (ret)
 		return ret;
@@ -467,8 +535,8 @@
 		obj->has_aliasing_ppgtt_mapping = 1;
 	}
 
-	if (entry->offset != i915_gem_obj_offset(obj, vm)) {
-		entry->offset = i915_gem_obj_offset(obj, vm);
+	if (entry->offset != vma->node.start) {
+		entry->offset = vma->node.start;
 		*need_reloc = true;
 	}
 
@@ -485,14 +553,15 @@
 }
 
 static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
 	struct drm_i915_gem_exec_object2 *entry;
+	struct drm_i915_gem_object *obj = vma->obj;
 
-	if (!i915_gem_obj_bound_any(obj))
+	if (!drm_mm_node_allocated(&vma->node))
 		return;
 
-	entry = obj->exec_entry;
+	entry = vma->exec_entry;
 
 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 		i915_gem_object_unpin_fence(obj);
@@ -505,41 +574,46 @@
 
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
-			    struct list_head *objects,
-			    struct i915_address_space *vm,
+			    struct list_head *vmas,
 			    bool *need_relocs)
 {
 	struct drm_i915_gem_object *obj;
-	struct list_head ordered_objects;
+	struct i915_vma *vma;
+	struct i915_address_space *vm;
+	struct list_head ordered_vmas;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	int retry;
 
-	INIT_LIST_HEAD(&ordered_objects);
-	while (!list_empty(objects)) {
+	if (list_empty(vmas))
+		return 0;
+
+	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+	INIT_LIST_HEAD(&ordered_vmas);
+	while (!list_empty(vmas)) {
 		struct drm_i915_gem_exec_object2 *entry;
 		bool need_fence, need_mappable;
 
-		obj = list_first_entry(objects,
-				       struct drm_i915_gem_object,
-				       exec_list);
-		entry = obj->exec_entry;
+		vma = list_first_entry(vmas, struct i915_vma, exec_list);
+		obj = vma->obj;
+		entry = vma->exec_entry;
 
 		need_fence =
 			has_fenced_gpu_access &&
 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 			obj->tiling_mode != I915_TILING_NONE;
-		need_mappable = need_fence || need_reloc_mappable(obj);
+		need_mappable = need_fence || need_reloc_mappable(vma);
 
 		if (need_mappable)
-			list_move(&obj->exec_list, &ordered_objects);
+			list_move(&vma->exec_list, &ordered_vmas);
 		else
-			list_move_tail(&obj->exec_list, &ordered_objects);
+			list_move_tail(&vma->exec_list, &ordered_vmas);
 
 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
 		obj->base.pending_write_domain = 0;
 		obj->pending_fenced_gpu_access = false;
 	}
-	list_splice(&ordered_objects, objects);
+	list_splice(&ordered_vmas, vmas);
 
 	/* Attempt to pin all of the buffers into the GTT.
 	 * This is done in 3 phases:
@@ -558,52 +632,52 @@
 		int ret = 0;
 
 		/* Unbind any ill-fitting objects or pin. */
-		list_for_each_entry(obj, objects, exec_list) {
-			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+		list_for_each_entry(vma, vmas, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 			bool need_fence, need_mappable;
-			u32 obj_offset;
 
-			if (!i915_gem_obj_bound(obj, vm))
+			obj = vma->obj;
+
+			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
-			obj_offset = i915_gem_obj_offset(obj, vm);
 			need_fence =
 				has_fenced_gpu_access &&
 				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 				obj->tiling_mode != I915_TILING_NONE;
-			need_mappable = need_fence || need_reloc_mappable(obj);
+			need_mappable = need_fence || need_reloc_mappable(vma);
 
 			WARN_ON((need_mappable || need_fence) &&
-				!i915_is_ggtt(vm));
+			       !i915_is_ggtt(vma->vm));
 
 			if ((entry->alignment &&
-			     obj_offset & (entry->alignment - 1)) ||
+			     vma->node.start & (entry->alignment - 1)) ||
 			    (need_mappable && !obj->map_and_fenceable))
-				ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
+				ret = i915_vma_unbind(vma);
 			else
-				ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 			if (ret)
 				goto err;
 		}
 
 		/* Bind fresh objects */
-		list_for_each_entry(obj, objects, exec_list) {
-			if (i915_gem_obj_bound(obj, vm))
+		list_for_each_entry(vma, vmas, exec_list) {
+			if (drm_mm_node_allocated(&vma->node))
 				continue;
 
-			ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 			if (ret)
 				goto err;
 		}
 
 err:		/* Decrement pin count for bound objects */
-		list_for_each_entry(obj, objects, exec_list)
-			i915_gem_execbuffer_unreserve_object(obj);
+		list_for_each_entry(vma, vmas, exec_list)
+			i915_gem_execbuffer_unreserve_vma(vma);
 
 		if (ret != -ENOSPC || retry++)
 			return ret;
 
-		ret = i915_gem_evict_everything(ring->dev);
+		ret = i915_gem_evict_vm(vm, true);
 		if (ret)
 			return ret;
 	} while (1);
@@ -614,24 +688,27 @@
 				  struct drm_i915_gem_execbuffer2 *args,
 				  struct drm_file *file,
 				  struct intel_ring_buffer *ring,
-				  struct eb_objects *eb,
-				  struct drm_i915_gem_exec_object2 *exec,
-				  struct i915_address_space *vm)
+				  struct eb_vmas *eb,
+				  struct drm_i915_gem_exec_object2 *exec)
 {
 	struct drm_i915_gem_relocation_entry *reloc;
-	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm;
+	struct i915_vma *vma;
 	bool need_relocs;
 	int *reloc_offset;
 	int i, total, ret;
-	int count = args->buffer_count;
+	unsigned count = args->buffer_count;
+
+	if (WARN_ON(list_empty(&eb->vmas)))
+		return 0;
+
+	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 
 	/* We may process another execbuffer during the unlock... */
-	while (!list_empty(&eb->objects)) {
-		obj = list_first_entry(&eb->objects,
-				       struct drm_i915_gem_object,
-				       exec_list);
-		list_del_init(&obj->exec_list);
-		drm_gem_object_unreference(&obj->base);
+	while (!list_empty(&eb->vmas)) {
+		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+		list_del_init(&vma->exec_list);
+		drm_gem_object_unreference(&vma->obj->base);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -695,20 +772,19 @@
 
 	/* reacquire the objects */
 	eb_reset(eb);
-	ret = eb_lookup_objects(eb, exec, args, file);
+	ret = eb_lookup_vmas(eb, exec, args, vm, file);
 	if (ret)
 		goto err;
 
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
 	if (ret)
 		goto err;
 
-	list_for_each_entry(obj, &eb->objects, exec_list) {
-		int offset = obj->exec_entry - exec;
-		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
-							       reloc + reloc_offset[offset],
-							       vm);
+	list_for_each_entry(vma, &eb->vmas, exec_list) {
+		int offset = vma->exec_entry - exec;
+		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+							    reloc + reloc_offset[offset]);
 		if (ret)
 			goto err;
 	}
@@ -727,14 +803,15 @@
 
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
-				struct list_head *objects)
+				struct list_head *vmas)
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
 	int ret;
 
-	list_for_each_entry(obj, objects, exec_list) {
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_object *obj = vma->obj;
 		ret = i915_gem_object_sync(obj, ring);
 		if (ret)
 			return ret;
@@ -771,8 +848,8 @@
 		   int count)
 {
 	int i;
-	int relocs_total = 0;
-	int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+	unsigned relocs_total = 0;
+	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
 	for (i = 0; i < count; i++) {
 		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +886,13 @@
 }
 
 static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
-				   struct i915_address_space *vm,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct intel_ring_buffer *ring)
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 
-	list_for_each_entry(obj, objects, exec_list) {
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_object *obj = vma->obj;
 		u32 old_read = obj->base.read_domains;
 		u32 old_write = obj->base.write_domain;
 
@@ -825,9 +902,7 @@
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
-		/* FIXME: This lookup gets fixed later <-- danvet */
-		list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
-		i915_gem_object_move_to_active(obj, ring);
+		i915_vma_move_to_active(vma, ring);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +960,11 @@
 		       struct i915_address_space *vm)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct eb_objects *eb;
+	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_clip_rect *cliprects = NULL;
 	struct intel_ring_buffer *ring;
+	struct i915_ctx_hang_stats *hs;
 	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 exec_start, exec_len;
 	u32 mask, flags;
@@ -1000,7 +1076,8 @@
 			return -EINVAL;
 		}
 
-		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+		cliprects = kcalloc(args->num_cliprects,
+				    sizeof(*cliprects),
 				    GFP_KERNEL);
 		if (cliprects == NULL) {
 			ret = -ENOMEM;
@@ -1025,7 +1102,7 @@
 		goto pre_mutex_err;
 	}
 
-	eb = eb_create(args);
+	eb = eb_create(args, vm);
 	if (eb == NULL) {
 		mutex_unlock(&dev->struct_mutex);
 		ret = -ENOMEM;
@@ -1033,18 +1110,16 @@
 	}
 
 	/* Look up object handles */
-	ret = eb_lookup_objects(eb, exec, args, file);
+	ret = eb_lookup_vmas(eb, exec, args, vm, file);
 	if (ret)
 		goto err;
 
 	/* take note of the batch buffer before we might reorder the lists */
-	batch_obj = list_entry(eb->objects.prev,
-			       struct drm_i915_gem_object,
-			       exec_list);
+	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -1054,7 +1129,7 @@
 	if (ret) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-								eb, exec, vm);
+								eb, exec);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
 		if (ret)
@@ -1071,15 +1146,25 @@
 
 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
-	 * hsw should have this fixed, but let's be paranoid and do it
-	 * unconditionally for now. */
+	 * hsw should have this fixed, but bdw mucks it up again. */
 	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
 		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
 	if (ret)
 		goto err;
 
+	hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+	if (IS_ERR(hs)) {
+		ret = PTR_ERR(hs);
+		goto err;
+	}
+
+	if (hs->banned) {
+		ret = -EIO;
+		goto err;
+	}
+
 	ret = i915_switch_context(ring, file, ctx_id);
 	if (ret)
 		goto err;
@@ -1131,7 +1216,7 @@
 
 	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
-	i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
+	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
 	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
 err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1f7b4ca..3620a1b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,8 @@
 
 #define GEN6_PPGTT_PD_ENTRIES 512
 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+typedef uint64_t gen8_gtt_pte_t;
+typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
 
 /* PPGTT stuff */
 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
@@ -57,6 +59,41 @@
 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
 
+#define GEN8_PTES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_gtt_pte_t))
+#define GEN8_PDES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+#define GEN8_LEGACY_PDPS		4
+
+#define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
+#define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC_INDEX		_PAGE_PCD /* WT eLLC */
+
+static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
+					     enum i915_cache_level level,
+					     bool valid)
+{
+	gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+	pte |= addr;
+	if (level != I915_CACHE_NONE)
+		pte |= PPAT_CACHED_INDEX;
+	else
+		pte |= PPAT_UNCACHED_INDEX;
+	return pte;
+}
+
+static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
+					     dma_addr_t addr,
+					     enum i915_cache_level level)
+{
+	gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+	pde |= addr;
+	if (level != I915_CACHE_NONE)
+		pde |= PPAT_CACHED_PDE_INDEX;
+	else
+		pde |= PPAT_UNCACHED_INDEX;
+	return pde;
+}
+
 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 				     enum i915_cache_level level,
 				     bool valid)
@@ -158,6 +195,257 @@
 	return pte;
 }
 
+/* Broadwell Page Directory Pointer Descriptors */
+static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
+			   uint64_t val)
+{
+	int ret;
+
+	BUG_ON(entry >= 4);
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
+	intel_ring_emit(ring, (u32)(val >> 32));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
+	intel_ring_emit(ring, (u32)(val));
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int gen8_ppgtt_enable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	int i, j, ret;
+
+	/* bit of a hack to find the actual last used pd */
+	int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+
+	for_each_ring(ring, dev_priv, j) {
+		I915_WRITE(RING_MODE_GEN7(ring),
+			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+	}
+
+	for (i = used_pd - 1; i >= 0; i--) {
+		dma_addr_t addr = ppgtt->pd_dma_addr[i];
+		for_each_ring(ring, dev_priv, j) {
+			ret = gen8_write_pdp(ring, i, addr);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+				   unsigned first_entry,
+				   unsigned num_entries,
+				   bool use_scratch)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+	unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
+	unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
+	unsigned last_pte, i;
+
+	scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+				      I915_CACHE_LLC, use_scratch);
+
+	while (num_entries) {
+		struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
+
+		last_pte = first_pte + num_entries;
+		if (last_pte > GEN8_PTES_PER_PAGE)
+			last_pte = GEN8_PTES_PER_PAGE;
+
+		pt_vaddr = kmap_atomic(page_table);
+
+		for (i = first_pte; i < last_pte; i++)
+			pt_vaddr[i] = scratch_pte;
+
+		kunmap_atomic(pt_vaddr);
+
+		num_entries -= last_pte - first_pte;
+		first_pte = 0;
+		act_pt++;
+	}
+}
+
+static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
+				      struct sg_table *pages,
+				      unsigned first_entry,
+				      enum i915_cache_level cache_level)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen8_gtt_pte_t *pt_vaddr;
+	unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
+	unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
+	struct sg_page_iter sg_iter;
+
+	pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+		dma_addr_t page_addr;
+
+		page_addr = sg_dma_address(sg_iter.sg) +
+				(sg_iter.sg_pgoffset << PAGE_SHIFT);
+		pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
+						    true);
+		if (++act_pte == GEN8_PTES_PER_PAGE) {
+			kunmap_atomic(pt_vaddr);
+			act_pt++;
+			pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+			act_pte = 0;
+
+		}
+	}
+	kunmap_atomic(pt_vaddr);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	int i, j;
+
+	for (i = 0; i < ppgtt->num_pd_pages ; i++) {
+		if (ppgtt->pd_dma_addr[i]) {
+			pci_unmap_page(ppgtt->base.dev->pdev,
+				       ppgtt->pd_dma_addr[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+			for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+				dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+				if (addr)
+					pci_unmap_page(ppgtt->base.dev->pdev,
+						       addr,
+						       PAGE_SIZE,
+						       PCI_DMA_BIDIRECTIONAL);
+
+			}
+		}
+		kfree(ppgtt->gen8_pt_dma_addr[i]);
+	}
+
+	__free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
+	__free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+}
+
+/**
+ * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
+ * net effect resembling a 2-level page table in normal x86 terms. Each PDP
+ * represents 1GB of memory
+ * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
+ *
+ * TODO: Do something with the size parameter
+ **/
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+{
+	struct page *pt_pages;
+	int i, j, ret = -ENOMEM;
+	const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
+	const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+
+	if (size % (1<<30))
+		DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+
+	/* FIXME: split allocation into smaller pieces. For now we only ever do
+	 * this once, but with full PPGTT, the multiple contiguous allocations
+	 * will be bad.
+	 */
+	ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
+	if (!ppgtt->pd_pages)
+		return -ENOMEM;
+
+	pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
+	if (!pt_pages) {
+		__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
+		return -ENOMEM;
+	}
+
+	ppgtt->gen8_pt_pages = pt_pages;
+	ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+	ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
+	ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
+	ppgtt->enable = gen8_ppgtt_enable;
+	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+
+	BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+
+	/*
+	 * - Create a mapping for the page directories.
+	 * - For each page directory:
+	 *      allocate space for page table mappings.
+	 *      map each page table
+	 */
+	for (i = 0; i < max_pdp; i++) {
+		dma_addr_t temp;
+		temp = pci_map_page(ppgtt->base.dev->pdev,
+				    &ppgtt->pd_pages[i], 0,
+				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
+			goto err_out;
+
+		ppgtt->pd_dma_addr[i] = temp;
+
+		ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
+		if (!ppgtt->gen8_pt_dma_addr[i])
+			goto err_out;
+
+		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+			struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
+			temp = pci_map_page(ppgtt->base.dev->pdev,
+					    p, 0, PAGE_SIZE,
+					    PCI_DMA_BIDIRECTIONAL);
+
+			if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
+				goto err_out;
+
+			ppgtt->gen8_pt_dma_addr[i][j] = temp;
+		}
+	}
+
+	/* For now, the PPGTT helper functions all require that the PDEs are
+	 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
+	 * will never need to touch the PDEs again */
+	for (i = 0; i < max_pdp; i++) {
+		gen8_ppgtt_pde_t *pd_vaddr;
+		pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
+		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+			dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+			pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
+						      I915_CACHE_LLC);
+		}
+		kunmap_atomic(pd_vaddr);
+	}
+
+	ppgtt->base.clear_range(&ppgtt->base, 0,
+				ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
+				true);
+
+	DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
+			 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
+	DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
+			 ppgtt->num_pt_pages,
+			 (ppgtt->num_pt_pages - num_pt_pages) +
+			 size % (1<<30));
+	return 0;
+
+err_out:
+	ppgtt->base.cleanup(&ppgtt->base);
+	return ret;
+}
+
 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -342,7 +630,7 @@
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
-	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
 				  GFP_KERNEL);
 	if (!ppgtt->pt_pages)
 		return -ENOMEM;
@@ -353,7 +641,7 @@
 			goto err_pt_alloc;
 	}
 
-	ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+	ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
 				     GFP_KERNEL);
 	if (!ppgtt->pt_dma_addr)
 		goto err_pt_alloc;
@@ -410,6 +698,8 @@
 
 	if (INTEL_INFO(dev)->gen < 8)
 		ret = gen6_ppgtt_init(ppgtt);
+	else if (IS_GEN8(dev))
+		ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
 	else
 		BUG();
 
@@ -573,6 +863,57 @@
 	return 0;
 }
 
+static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
+{
+#ifdef writeq
+	writeq(pte, addr);
+#else
+	iowrite32((u32)pte, addr);
+	iowrite32(pte >> 32, addr + 4);
+#endif
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+				     struct sg_table *st,
+				     unsigned int first_entry,
+				     enum i915_cache_level level)
+{
+	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	gen8_gtt_pte_t __iomem *gtt_entries =
+		(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+	int i = 0;
+	struct sg_page_iter sg_iter;
+	dma_addr_t addr;
+
+	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+		addr = sg_dma_address(sg_iter.sg) +
+			(sg_iter.sg_pgoffset << PAGE_SHIFT);
+		gen8_set_pte(&gtt_entries[i],
+			     gen8_pte_encode(addr, level, true));
+		i++;
+	}
+
+	/*
+	 * XXX: This serves as a posting read to make sure that the PTE has
+	 * actually been updated. There is some concern that even though
+	 * registers and PTEs are within the same BAR that they are potentially
+	 * of NUMA access patterns. Therefore, even with the way we assume
+	 * hardware should work, we must keep this posting read for paranoia.
+	 */
+	if (i != 0)
+		WARN_ON(readq(&gtt_entries[i-1])
+			!= gen8_pte_encode(addr, level, true));
+
+#if 0 /* TODO: Still needed on GEN8? */
+	/* This next bit makes the above posting read even more important. We
+	 * want to flush the TLBs only after we're certain all the PTE updates
+	 * have finished.
+	 */
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+#endif
+}
+
 /*
  * Binds an object into the global gtt with the specified cache level. The object
  * will be accessible to the GPU via commands whose operands reference offsets
@@ -615,6 +956,30 @@
 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+				  unsigned int first_entry,
+				  unsigned int num_entries,
+				  bool use_scratch)
+{
+	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
+		(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+	int i;
+
+	if (WARN(num_entries > max_entries,
+		 "First entry = %d; Num entries = %d (max=%d)\n",
+		 first_entry, num_entries, max_entries))
+		num_entries = max_entries;
+
+	scratch_pte = gen8_pte_encode(vm->scratch.addr,
+				      I915_CACHE_LLC,
+				      use_scratch);
+	for (i = 0; i < num_entries; i++)
+		gen8_set_pte(&gtt_base[i], scratch_pte);
+	readl(gtt_base);
+}
+
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 				  unsigned int first_entry,
 				  unsigned int num_entries,
@@ -638,7 +1003,6 @@
 	readl(gtt_base);
 }
 
-
 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     unsigned int pg_start,
@@ -720,6 +1084,7 @@
 			*end -= 4096;
 	}
 }
+
 void i915_gem_setup_global_gtt(struct drm_device *dev,
 			       unsigned long start,
 			       unsigned long mappable_end,
@@ -817,7 +1182,8 @@
 
 		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
 		drm_mm_takedown(&dev_priv->gtt.base.mm);
-		gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
+		if (INTEL_INFO(dev)->gen < 8)
+			gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
 	}
 	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 }
@@ -867,6 +1233,15 @@
 	return snb_gmch_ctl << 20;
 }
 
+static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+	if (bdw_gmch_ctl)
+		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+	return bdw_gmch_ctl << 20;
+}
+
 static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
 {
 	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@@ -874,6 +1249,108 @@
 	return snb_gmch_ctl << 25; /* 32 MB units */
 }
 
+static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
+{
+	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
+	return bdw_gmch_ctl << 25; /* 32 MB units */
+}
+
+static int ggtt_probe_common(struct drm_device *dev,
+			     size_t gtt_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	phys_addr_t gtt_bus_addr;
+	int ret;
+
+	/* For Modern GENs the PTEs and register space are split in the BAR */
+	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+		(pci_resource_len(dev->pdev, 0) / 2);
+
+	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+	if (!dev_priv->gtt.gsm) {
+		DRM_ERROR("Failed to map the gtt page table\n");
+		return -ENOMEM;
+	}
+
+	ret = setup_scratch_page(dev);
+	if (ret) {
+		DRM_ERROR("Scratch setup failed\n");
+		/* iounmap will also get called at remove, but meh */
+		iounmap(dev_priv->gtt.gsm);
+	}
+
+	return ret;
+}
+
+/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases. */
+static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+#define GEN8_PPAT_UC		(0<<0)
+#define GEN8_PPAT_WC		(1<<0)
+#define GEN8_PPAT_WT		(2<<0)
+#define GEN8_PPAT_WB		(3<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE	(0<<2)
+/* FIXME(BDW): Bspec is completely confused about cache control bits. */
+#define GEN8_PPAT_LLC		(1<<2)
+#define GEN8_PPAT_LLCELLC	(2<<2)
+#define GEN8_PPAT_LLCeLLC	(3<<2)
+#define GEN8_PPAT_AGE(x)	(x<<4)
+#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+	uint64_t pat;
+
+	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
+	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
+	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
+	 * write would work. */
+	I915_WRITE(GEN8_PRIVATE_PAT, pat);
+	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
+}
+
+static int gen8_gmch_probe(struct drm_device *dev,
+			   size_t *gtt_total,
+			   size_t *stolen,
+			   phys_addr_t *mappable_base,
+			   unsigned long *mappable_end)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned int gtt_size;
+	u16 snb_gmch_ctl;
+	int ret;
+
+	/* TODO: We're not aware of mappable constraints on gen8 yet */
+	*mappable_base = pci_resource_start(dev->pdev, 2);
+	*mappable_end = pci_resource_len(dev->pdev, 2);
+
+	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
+		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
+
+	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+	*stolen = gen8_get_stolen_size(snb_gmch_ctl);
+
+	gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+	*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
+
+	gen8_setup_private_ppat(dev_priv);
+
+	ret = ggtt_probe_common(dev, gtt_size);
+
+	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
+	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
+
+	return ret;
+}
+
 static int gen6_gmch_probe(struct drm_device *dev,
 			   size_t *gtt_total,
 			   size_t *stolen,
@@ -881,7 +1358,6 @@
 			   unsigned long *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	phys_addr_t gtt_bus_addr;
 	unsigned int gtt_size;
 	u16 snb_gmch_ctl;
 	int ret;
@@ -901,24 +1377,13 @@
 	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
 		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
 	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
 	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
+
+	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
 
-	/* For Modern GENs the PTEs and register space are split in the BAR */
-	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
-		(pci_resource_len(dev->pdev, 0) / 2);
-
-	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
-	if (!dev_priv->gtt.gsm) {
-		DRM_ERROR("Failed to map the gtt page table\n");
-		return -ENOMEM;
-	}
-
-	ret = setup_scratch_page(dev);
-	if (ret)
-		DRM_ERROR("Scratch setup failed\n");
+	ret = ggtt_probe_common(dev, gtt_size);
 
 	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
 	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
@@ -972,7 +1437,7 @@
 	if (INTEL_INFO(dev)->gen <= 5) {
 		gtt->gtt_probe = i915_gmch_probe;
 		gtt->base.cleanup = i915_gmch_remove;
-	} else {
+	} else if (INTEL_INFO(dev)->gen < 8) {
 		gtt->gtt_probe = gen6_gmch_probe;
 		gtt->base.cleanup = gen6_gmch_remove;
 		if (IS_HASWELL(dev) && dev_priv->ellc_size)
@@ -985,6 +1450,9 @@
 			gtt->base.pte_encode = ivb_pte_encode;
 		else
 			gtt->base.pte_encode = snb_pte_encode;
+	} else {
+		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
+		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
 	}
 
 	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e15a1d9..d284d89 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -395,7 +395,7 @@
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
-	vma = i915_gem_vma_create(obj, ggtt);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err_out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 032e9ef..b139053 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@
 		return -EINVAL;
 	}
 
-	if (obj->pin_count) {
+	if (obj->pin_count || obj->framebuffer_references) {
 		drm_gem_object_unreference_unlocked(&obj->base);
 		return -EBUSY;
 	}
@@ -393,7 +393,7 @@
 	/* Try to preallocate memory required to save swizzling on put-pages */
 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
 		if (obj->bit_17 == NULL) {
-			obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+			obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
 					      sizeof(long), GFP_KERNEL);
 		}
 	} else {
@@ -504,8 +504,8 @@
 	int i;
 
 	if (obj->bit_17 == NULL) {
-		obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
-					   sizeof(long), GFP_KERNEL);
+		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+				      sizeof(long), GFP_KERNEL);
 		if (obj->bit_17 == NULL) {
 			DRM_ERROR("Failed to allocate memory for bit 17 "
 				  "record\n");
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index dae364f..79dcb8f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -215,6 +215,24 @@
 	}
 }
 
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+	switch (a) {
+	case HANGCHECK_IDLE:
+		return "idle";
+	case HANGCHECK_WAIT:
+		return "wait";
+	case HANGCHECK_ACTIVE:
+		return "active";
+	case HANGCHECK_KICK:
+		return "kick";
+	case HANGCHECK_HUNG:
+		return "hung";
+	}
+
+	return "unknown";
+}
+
 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
 				  struct drm_device *dev,
 				  struct drm_i915_error_state *error,
@@ -231,7 +249,8 @@
 	err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
 	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
 		err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-
+	if (INTEL_INFO(dev)->gen >= 4)
+		err_printf(m, "  BB_STATE: 0x%08x\n", error->bbstate[ring]);
 	if (INTEL_INFO(dev)->gen >= 4)
 		err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 	err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -255,6 +274,9 @@
 	err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
 	err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
 	err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+	err_printf(m, "  hangcheck: %s [%d]\n",
+		   hangcheck_action_to_str(error->hangcheck_action[ring]),
+		   error->hangcheck_score[ring]);
 }
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +305,14 @@
 	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 		   error->time.tv_usec);
 	err_printf(m, "Kernel: " UTS_RELEASE "\n");
-	err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
 	err_printf(m, "EIR: 0x%08x\n", error->eir);
 	err_printf(m, "IER: 0x%08x\n", error->ier);
 	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
 	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
 	err_printf(m, "CCID: 0x%08x\n", error->ccid);
+	err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
 		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
@@ -601,6 +624,7 @@
 
 	/* Fences */
 	switch (INTEL_INFO(dev)->gen) {
+	case 8:
 	case 7:
 	case 6:
 		for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -703,6 +727,7 @@
 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
 		if (ring->id == RCS)
 			error->bbaddr = I915_READ64(BB_ADDR);
+		error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
 	} else {
 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 		error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -720,6 +745,9 @@
 
 	error->cpu_ring_head[ring->id] = ring->head;
 	error->cpu_ring_tail[ring->id] = ring->tail;
+
+	error->hangcheck_score[ring->id] = ring->hangcheck.score;
+	error->hangcheck_action[ring->id] = ring->hangcheck.action;
 }
 
 
@@ -769,7 +797,7 @@
 
 		error->ring[i].num_requests = count;
 		error->ring[i].requests =
-			kmalloc(count*sizeof(struct drm_i915_error_request),
+			kcalloc(count, sizeof(*error->ring[i].requests),
 				GFP_ATOMIC);
 		if (error->ring[i].requests == NULL) {
 			error->ring[i].num_requests = 0;
@@ -811,7 +839,7 @@
 	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
 	if (i) {
-		active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+		active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
 		if (active_bo)
 			pinned_bo = active_bo + error->active_bo_count[ndx];
 	}
@@ -885,8 +913,12 @@
 		return;
 	}
 
-	DRM_INFO("capturing error event; look for more information in "
-		 "/sys/class/drm/card%d/error\n", dev->primary->index);
+	DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+		 dev->primary->index);
+	DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+	DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+	DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+	DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
 
 	kref_init(&error->ref);
 	error->eir = I915_READ(EIR);
@@ -988,6 +1020,7 @@
 	case I915_CACHE_NONE: return " uncached";
 	case I915_CACHE_LLC: return " snooped or LLC";
 	case I915_CACHE_L3_LLC: return " L3+LLC";
+	case I915_CACHE_WT: return " WT";
 	default: return "";
 	}
 }
@@ -1012,6 +1045,7 @@
 	default:
 		WARN_ONCE(1, "Unsupported platform\n");
 	case 7:
+	case 8:
 		instdone[0] = I915_READ(GEN7_INSTDONE_1);
 		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
 		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4b91228..5d1dedc 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
 
 #include <linux/sysrq.h>
 #include <linux/slab.h>
+#include <linux/circ_buf.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -269,6 +270,21 @@
 	}
 }
 
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+						  enum pipe pipe, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	if (enable)
+		dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+	else
+		dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
 /**
  * ibx_display_interrupt_update - update SDEIMR
  * @dev_priv: driver private
@@ -381,6 +397,8 @@
 		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
 	else if (IS_GEN7(dev))
 		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
+	else if (IS_GEN8(dev))
+		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
 done:
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -441,7 +459,7 @@
 
 
 void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
 {
 	u32 reg = PIPESTAT(pipe);
 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -458,7 +476,7 @@
 }
 
 void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
 {
 	u32 reg = PIPESTAT(pipe);
 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -486,9 +504,10 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
-	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
 	if (INTEL_INFO(dev)->gen >= 4)
-		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+		i915_enable_pipestat(dev_priv, PIPE_A,
+				     PIPE_LEGACY_BLC_EVENT_ENABLE);
 
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
@@ -518,6 +537,12 @@
 	}
 }
 
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+	/* Gen2 doesn't have a hardware frame counter */
+	return 0;
+}
+
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
@@ -526,7 +551,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long high_frame;
 	unsigned long low_frame;
-	u32 high1, high2, low;
+	u32 high1, high2, low, pixel, vbl_start;
 
 	if (!i915_pipe_enabled(dev, pipe)) {
 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +559,24 @@
 		return 0;
 	}
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		struct intel_crtc *intel_crtc =
+			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		const struct drm_display_mode *mode =
+			&intel_crtc->config.adjusted_mode;
+
+		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+	} else {
+		enum transcoder cpu_transcoder =
+			intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+		u32 htotal;
+
+		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
+
+		vbl_start *= htotal;
+	}
+
 	high_frame = PIPEFRAME(pipe);
 	low_frame = PIPEFRAMEPIXEL(pipe);
 
@@ -544,13 +587,20 @@
 	 */
 	do {
 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+		low   = I915_READ(low_frame);
 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 	} while (high1 != high2);
 
 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
+	pixel = low & PIPE_PIXEL_MASK;
 	low >>= PIPE_FRAME_LOW_SHIFT;
-	return (high1 << 8) | low;
+
+	/*
+	 * The frame counter increments at beginning of active.
+	 * Cook up a vblank counter by also checking the pixel
+	 * counter against vblank start.
+	 */
+	return ((high1 << 8) | low) + (pixel >= vbl_start);
 }
 
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,67 +617,164 @@
 	return I915_READ(reg);
 }
 
-static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
-			     int *vpos, int *hpos)
+/* raw reads, only for fast reads of display block, no need for forcewake etc. */
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+
+static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
 {
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u32 vbl = 0, position = 0;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t status;
+	int reg;
+
+	if (IS_VALLEYVIEW(dev)) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = VLV_ISR;
+	} else if (IS_GEN2(dev)) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = ISR;
+	} else if (INTEL_INFO(dev)->gen < 5) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = ISR;
+	} else if (INTEL_INFO(dev)->gen < 7) {
+		status = pipe == PIPE_A ?
+			DE_PIPEA_VBLANK :
+			DE_PIPEB_VBLANK;
+
+		reg = DEISR;
+	} else {
+		switch (pipe) {
+		default:
+		case PIPE_A:
+			status = DE_PIPEA_VBLANK_IVB;
+			break;
+		case PIPE_B:
+			status = DE_PIPEB_VBLANK_IVB;
+			break;
+		case PIPE_C:
+			status = DE_PIPEC_VBLANK_IVB;
+			break;
+		}
+
+		reg = DEISR;
+	}
+
+	if (IS_GEN2(dev))
+		return __raw_i915_read16(dev_priv, reg) & status;
+	else
+		return __raw_i915_read32(dev_priv, reg) & status;
+}
+
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+			     int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+	int position;
 	int vbl_start, vbl_end, htotal, vtotal;
 	bool in_vbl = true;
 	int ret = 0;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
+	unsigned long irqflags;
 
-	if (!i915_pipe_enabled(dev, pipe)) {
+	if (!intel_crtc->active) {
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 				 "pipe %c\n", pipe_name(pipe));
 		return 0;
 	}
 
-	/* Get vtotal. */
-	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+	htotal = mode->crtc_htotal;
+	vtotal = mode->crtc_vtotal;
+	vbl_start = mode->crtc_vblank_start;
+	vbl_end = mode->crtc_vblank_end;
 
-	if (INTEL_INFO(dev)->gen >= 4) {
+	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+	/*
+	 * Lock uncore.lock, as we will do multiple timing critical raw
+	 * register reads, potentially with preemption disabled, so the
+	 * following code must not block on uncore.lock.
+	 */
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	
+	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Get optional system timestamp before query. */
+	if (stime)
+		*stime = ktime_get();
+
+	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 		/* No obvious pixelcount register. Only query vertical
 		 * scanout position from Display scan line register.
 		 */
-		position = I915_READ(PIPEDSL(pipe));
+		if (IS_GEN2(dev))
+			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+		else
+			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
-		/* Decode into vertical scanout position. Don't have
-		 * horizontal scanout position.
+		/*
+		 * The scanline counter increments at the leading edge
+		 * of hsync, ie. it completely misses the active portion
+		 * of the line. Fix up the counter at both edges of vblank
+		 * to get a more accurate picture whether we're in vblank
+		 * or not.
 		 */
-		*vpos = position & 0x1fff;
-		*hpos = 0;
+		in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
+		if ((in_vbl && position == vbl_start - 1) ||
+		    (!in_vbl && position == vbl_end - 1))
+			position = (position + 1) % vtotal;
 	} else {
 		/* Have access to pixelcount since start of frame.
 		 * We can split this into vertical and horizontal
 		 * scanout position.
 		 */
-		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
-		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+		/* convert to pixel counts */
+		vbl_start *= htotal;
+		vbl_end *= htotal;
+		vtotal *= htotal;
+	}
+
+	/* Get optional system timestamp after query. */
+	if (etime)
+		*etime = ktime_get();
+
+	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+	in_vbl = position >= vbl_start && position < vbl_end;
+
+	/*
+	 * While in vblank, position will be negative
+	 * counting up towards 0 at vbl_end. And outside
+	 * vblank, position will be positive counting
+	 * up since vbl_end.
+	 */
+	if (position >= vbl_start)
+		position -= vbl_end;
+	else
+		position += vtotal - vbl_end;
+
+	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+		*vpos = position;
+		*hpos = 0;
+	} else {
 		*vpos = position / htotal;
 		*hpos = position - (*vpos * htotal);
 	}
 
-	/* Query vblank area. */
-	vbl = I915_READ(VBLANK(cpu_transcoder));
-
-	/* Test position against vblank region. */
-	vbl_start = vbl & 0x1fff;
-	vbl_end = (vbl >> 16) & 0x1fff;
-
-	if ((*vpos < vbl_start) || (*vpos > vbl_end))
-		in_vbl = false;
-
-	/* Inside "upper part" of vblank area? Apply corrective offset: */
-	if (in_vbl && (*vpos >= vbl_start))
-		*vpos = *vpos - vtotal;
-
-	/* Readouts valid? */
-	if (vbl > 0)
-		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
-
 	/* In vblank? */
 	if (in_vbl)
 		ret |= DRM_SCANOUTPOS_INVBL;
@@ -665,7 +812,8 @@
 						     crtc);
 }
 
-static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+static bool intel_hpd_irq_event(struct drm_device *dev,
+				struct drm_connector *connector)
 {
 	enum drm_connector_status old_status;
 
@@ -673,11 +821,16 @@
 	old_status = connector->status;
 
 	connector->status = connector->funcs->detect(connector, false);
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+	if (old_status == connector->status)
+		return false;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
 		      connector->base.id,
 		      drm_get_connector_name(connector),
-		      old_status, connector->status);
-	return (old_status != connector->status);
+		      drm_get_connector_status_name(old_status),
+		      drm_get_connector_status_name(connector->status));
+
+	return true;
 }
 
 /*
@@ -801,7 +954,7 @@
 	if (ring->obj == NULL)
 		return;
 
-	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+	trace_i915_gem_request_complete(ring);
 
 	wake_up_all(&ring->irq_queue);
 	i915_queue_hangcheck(dev);
@@ -812,7 +965,7 @@
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 						    rps.work);
 	u32 pm_iir;
-	u8 new_delay;
+	int new_delay, adj;
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +982,49 @@
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 
+	adj = dev_priv->rps.last_adj;
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-		new_delay = dev_priv->rps.cur_delay + 1;
+		if (adj > 0)
+			adj *= 2;
+		else
+			adj = 1;
+		new_delay = dev_priv->rps.cur_delay + adj;
 
 		/*
 		 * For better performance, jump directly
 		 * to RPe if we're below it.
 		 */
-		if (IS_VALLEYVIEW(dev_priv->dev) &&
-		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+		if (new_delay < dev_priv->rps.rpe_delay)
 			new_delay = dev_priv->rps.rpe_delay;
-	} else
-		new_delay = dev_priv->rps.cur_delay - 1;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+		if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
+			new_delay = dev_priv->rps.rpe_delay;
+		else
+			new_delay = dev_priv->rps.min_delay;
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+		if (adj < 0)
+			adj *= 2;
+		else
+			adj = -1;
+		new_delay = dev_priv->rps.cur_delay + adj;
+	} else { /* unknown event */
+		new_delay = dev_priv->rps.cur_delay;
+	}
 
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	 * interrupt
 	 */
-	if (new_delay >= dev_priv->rps.min_delay &&
-	    new_delay <= dev_priv->rps.max_delay) {
-		if (IS_VALLEYVIEW(dev_priv->dev))
-			valleyview_set_rps(dev_priv->dev, new_delay);
-		else
-			gen6_set_rps(dev_priv->dev, new_delay);
-	}
+	if (new_delay < (int)dev_priv->rps.min_delay)
+		new_delay = dev_priv->rps.min_delay;
+	if (new_delay > (int)dev_priv->rps.max_delay)
+		new_delay = dev_priv->rps.max_delay;
+	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
 
-	if (IS_VALLEYVIEW(dev_priv->dev)) {
-		/*
-		 * On VLV, when we enter RC6 we may not be at the minimum
-		 * voltage level, so arm a timer to check.  It should only
-		 * fire when there's activity or once after we've entered
-		 * RC6, and then won't be re-armed until the next RPS interrupt.
-		 */
-		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
-				 msecs_to_jiffies(100));
-	}
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		valleyview_set_rps(dev_priv->dev, new_delay);
+	else
+		gen6_set_rps(dev_priv->dev, new_delay);
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
@@ -882,9 +1044,10 @@
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 						    l3_parity.error_work);
 	u32 error_status, row, bank, subbank;
-	char *parity_event[5];
+	char *parity_event[6];
 	uint32_t misccpctl;
 	unsigned long flags;
+	uint8_t slice = 0;
 
 	/* We must turn off DOP level clock gating to access the L3 registers.
 	 * In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1055,81 @@
 	 */
 	mutex_lock(&dev_priv->dev->struct_mutex);
 
+	/* If we've screwed up tracking, just let the interrupt fire again */
+	if (WARN_ON(!dev_priv->l3_parity.which_slice))
+		goto out;
+
 	misccpctl = I915_READ(GEN7_MISCCPCTL);
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 	POSTING_READ(GEN7_MISCCPCTL);
 
-	error_status = I915_READ(GEN7_L3CDERRST1);
-	row = GEN7_PARITY_ERROR_ROW(error_status);
-	bank = GEN7_PARITY_ERROR_BANK(error_status);
-	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+		u32 reg;
 
-	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
-				    GEN7_L3CDERRST1_ENABLE);
-	POSTING_READ(GEN7_L3CDERRST1);
+		slice--;
+		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+			break;
+
+		dev_priv->l3_parity.which_slice &= ~(1<<slice);
+
+		reg = GEN7_L3CDERRST1 + (slice * 0x200);
+
+		error_status = I915_READ(reg);
+		row = GEN7_PARITY_ERROR_ROW(error_status);
+		bank = GEN7_PARITY_ERROR_BANK(error_status);
+		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+		POSTING_READ(reg);
+
+		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+		parity_event[5] = NULL;
+
+		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+				   KOBJ_CHANGE, parity_event);
+
+		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+			  slice, row, bank, subbank);
+
+		kfree(parity_event[4]);
+		kfree(parity_event[3]);
+		kfree(parity_event[2]);
+		kfree(parity_event[1]);
+	}
 
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
+out:
+	WARN_ON(dev_priv->l3_parity.which_slice);
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	mutex_unlock(&dev_priv->dev->struct_mutex);
-
-	parity_event[0] = I915_L3_PARITY_UEVENT "=1";
-	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
-	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
-	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
-	parity_event[4] = NULL;
-
-	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
-			   KOBJ_CHANGE, parity_event);
-
-	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
-		  row, bank, subbank);
-
-	kfree(parity_event[3]);
-	kfree(parity_event[2]);
-	kfree(parity_event[1]);
 }
 
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-	if (!HAS_L3_GPU_CACHE(dev))
+	if (!HAS_L3_DPF(dev))
 		return;
 
 	spin_lock(&dev_priv->irq_lock);
-	ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
 	spin_unlock(&dev_priv->irq_lock);
 
+	iir &= GT_PARITY_ERROR(dev);
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+		dev_priv->l3_parity.which_slice |= 1 << 1;
+
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+		dev_priv->l3_parity.which_slice |= 1 << 0;
+
 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
@@ -975,8 +1164,58 @@
 		i915_handle_error(dev, false);
 	}
 
-	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-		ivybridge_parity_error_irq_handler(dev);
+	if (gt_iir & GT_PARITY_ERROR(dev))
+		ivybridge_parity_error_irq_handler(dev, gt_iir);
+}
+
+static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
+				       struct drm_i915_private *dev_priv,
+				       u32 master_ctl)
+{
+	u32 rcs, bcs, vcs;
+	uint32_t tmp = 0;
+	irqreturn_t ret = IRQ_NONE;
+
+	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+		tmp = I915_READ(GEN8_GT_IIR(0));
+		if (tmp) {
+			ret = IRQ_HANDLED;
+			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
+			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+			if (rcs & GT_RENDER_USER_INTERRUPT)
+				notify_ring(dev, &dev_priv->ring[RCS]);
+			if (bcs & GT_RENDER_USER_INTERRUPT)
+				notify_ring(dev, &dev_priv->ring[BCS]);
+			I915_WRITE(GEN8_GT_IIR(0), tmp);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT0)!\n");
+	}
+
+	if (master_ctl & GEN8_GT_VCS1_IRQ) {
+		tmp = I915_READ(GEN8_GT_IIR(1));
+		if (tmp) {
+			ret = IRQ_HANDLED;
+			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
+			if (vcs & GT_RENDER_USER_INTERRUPT)
+				notify_ring(dev, &dev_priv->ring[VCS]);
+			I915_WRITE(GEN8_GT_IIR(1), tmp);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT1)!\n");
+	}
+
+	if (master_ctl & GEN8_GT_VECS_IRQ) {
+		tmp = I915_READ(GEN8_GT_IIR(3));
+		if (tmp) {
+			ret = IRQ_HANDLED;
+			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
+			if (vcs & GT_RENDER_USER_INTERRUPT)
+				notify_ring(dev, &dev_priv->ring[VECS]);
+			I915_WRITE(GEN8_GT_IIR(3), tmp);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT3)!\n");
+	}
+
+	return ret;
 }
 
 #define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1289,102 @@
 	wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
+#if defined(CONFIG_DEBUG_FS)
+static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+					 uint32_t crc0, uint32_t crc1,
+					 uint32_t crc2, uint32_t crc3,
+					 uint32_t crc4)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	struct intel_pipe_crc_entry *entry;
+	int head, tail;
+
+	spin_lock(&pipe_crc->lock);
+
+	if (!pipe_crc->entries) {
+		spin_unlock(&pipe_crc->lock);
+		DRM_ERROR("spurious interrupt\n");
+		return;
+	}
+
+	head = pipe_crc->head;
+	tail = pipe_crc->tail;
+
+	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+		spin_unlock(&pipe_crc->lock);
+		DRM_ERROR("CRC buffer overflowing\n");
+		return;
+	}
+
+	entry = &pipe_crc->entries[head];
+
+	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+	entry->crc[0] = crc0;
+	entry->crc[1] = crc1;
+	entry->crc[2] = crc2;
+	entry->crc[3] = crc3;
+	entry->crc[4] = crc4;
+
+	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+	pipe_crc->head = head;
+
+	spin_unlock(&pipe_crc->lock);
+
+	wake_up_interruptible(&pipe_crc->wq);
+}
+#else
+static inline void
+display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+			     uint32_t crc0, uint32_t crc1,
+			     uint32_t crc2, uint32_t crc3,
+			     uint32_t crc4) {}
+#endif
+
+
+static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     0, 0, 0, 0);
+}
+
+static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+}
+
+static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t res1, res2;
+
+	if (INTEL_INFO(dev)->gen >= 3)
+		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+	else
+		res1 = 0;
+
+	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+	else
+		res2 = 0;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_RED(pipe)),
+				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
+				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+				     res1, res2);
+}
+
 /* The RPS events need forcewake, so we add them to a work queue and mask their
  * IMR bits until the work is done. Other interrupts can be processed without
  * the work queue. */
@@ -1117,13 +1452,16 @@
 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 		for_each_pipe(pipe) {
-			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
 				drm_handle_vblank(dev, pipe);
 
 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
 				intel_prepare_page_flip(dev, pipe);
 				intel_finish_page_flip(dev, pipe);
 			}
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 
 		/* Consume port.  Then clear IIR or we'll miss events */
@@ -1212,21 +1550,26 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 err_int = I915_READ(GEN7_ERR_INT);
+	enum pipe pipe;
 
 	if (err_int & ERR_INT_POISON)
 		DRM_ERROR("Poison interrupt\n");
 
-	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+	for_each_pipe(pipe) {
+		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
+			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+								  false))
+				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+						 pipe_name(pipe));
+		}
 
-	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
-	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
-			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
+			if (IS_IVYBRIDGE(dev))
+				ivb_pipe_crc_irq_handler(dev, pipe);
+			else
+				hsw_pipe_crc_irq_handler(dev, pipe);
+		}
+	}
 
 	I915_WRITE(GEN7_ERR_INT, err_int);
 }
@@ -1297,6 +1640,7 @@
 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
 
 	if (de_iir & DE_AUX_CHANNEL_A)
 		dp_aux_irq_handler(dev);
@@ -1304,31 +1648,26 @@
 	if (de_iir & DE_GSE)
 		intel_opregion_asle_intr(dev);
 
-	if (de_iir & DE_PIPEA_VBLANK)
-		drm_handle_vblank(dev, 0);
-
-	if (de_iir & DE_PIPEB_VBLANK)
-		drm_handle_vblank(dev, 1);
-
 	if (de_iir & DE_POISON)
 		DRM_ERROR("Poison interrupt\n");
 
-	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+	for_each_pipe(pipe) {
+		if (de_iir & DE_PIPE_VBLANK(pipe))
+			drm_handle_vblank(dev, pipe);
 
-	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
+			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+						 pipe_name(pipe));
 
-	if (de_iir & DE_PLANEA_FLIP_DONE) {
-		intel_prepare_page_flip(dev, 0);
-		intel_finish_page_flip_plane(dev, 0);
-	}
+		if (de_iir & DE_PIPE_CRC_DONE(pipe))
+			i9xx_pipe_crc_irq_handler(dev, pipe);
 
-	if (de_iir & DE_PLANEB_FLIP_DONE) {
-		intel_prepare_page_flip(dev, 1);
-		intel_finish_page_flip_plane(dev, 1);
+		/* plane/pipes map 1:1 on ilk+ */
+		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip_plane(dev, pipe);
+		}
 	}
 
 	/* check event from PCH */
@@ -1351,7 +1690,7 @@
 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
+	enum pipe i;
 
 	if (de_iir & DE_ERR_INT_IVB)
 		ivb_err_int_handler(dev);
@@ -1362,10 +1701,12 @@
 	if (de_iir & DE_GSE_IVB)
 		intel_opregion_asle_intr(dev);
 
-	for (i = 0; i < 3; i++) {
-		if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+	for_each_pipe(i) {
+		if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
 			drm_handle_vblank(dev, i);
-		if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+
+		/* plane/pipes map 1:1 on ilk+ */
+		if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
 			intel_prepare_page_flip(dev, i);
 			intel_finish_page_flip_plane(dev, i);
 		}
@@ -1388,7 +1729,6 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
 	irqreturn_t ret = IRQ_NONE;
-	bool err_int_reenable = false;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -1412,17 +1752,6 @@
 		POSTING_READ(SDEIER);
 	}
 
-	/* On Haswell, also mask ERR_INT because we don't want to risk
-	 * generating "unclaimed register" interrupts from inside the interrupt
-	 * handler. */
-	if (IS_HASWELL(dev)) {
-		spin_lock(&dev_priv->irq_lock);
-		err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
-		if (err_int_reenable)
-			ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-		spin_unlock(&dev_priv->irq_lock);
-	}
-
 	gt_iir = I915_READ(GTIIR);
 	if (gt_iir) {
 		if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1781,6 @@
 		}
 	}
 
-	if (err_int_reenable) {
-		spin_lock(&dev_priv->irq_lock);
-		if (ivb_can_enable_err_int(dev))
-			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-		spin_unlock(&dev_priv->irq_lock);
-	}
-
 	I915_WRITE(DEIER, de_ier);
 	POSTING_READ(DEIER);
 	if (!HAS_PCH_NOP(dev)) {
@@ -1469,6 +1791,117 @@
 	return ret;
 }
 
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 master_ctl;
+	irqreturn_t ret = IRQ_NONE;
+	uint32_t tmp = 0;
+	enum pipe pipe;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	master_ctl = I915_READ(GEN8_MASTER_IRQ);
+	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+	if (!master_ctl)
+		return IRQ_NONE;
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+
+	if (master_ctl & GEN8_DE_MISC_IRQ) {
+		tmp = I915_READ(GEN8_DE_MISC_IIR);
+		if (tmp & GEN8_DE_MISC_GSE)
+			intel_opregion_asle_intr(dev);
+		else if (tmp)
+			DRM_ERROR("Unexpected DE Misc interrupt\n");
+		else
+			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+
+		if (tmp) {
+			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+			ret = IRQ_HANDLED;
+		}
+	}
+
+	if (master_ctl & GEN8_DE_PORT_IRQ) {
+		tmp = I915_READ(GEN8_DE_PORT_IIR);
+		if (tmp & GEN8_AUX_CHANNEL_A)
+			dp_aux_irq_handler(dev);
+		else if (tmp)
+			DRM_ERROR("Unexpected DE Port interrupt\n");
+		else
+			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
+
+		if (tmp) {
+			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+			ret = IRQ_HANDLED;
+		}
+	}
+
+	for_each_pipe(pipe) {
+		uint32_t pipe_iir;
+
+		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
+			continue;
+
+		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+		if (pipe_iir & GEN8_PIPE_VBLANK)
+			drm_handle_vblank(dev, pipe);
+
+		if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip_plane(dev, pipe);
+		}
+
+		if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+			hsw_pipe_crc_irq_handler(dev, pipe);
+
+		if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+								  false))
+				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+						 pipe_name(pipe));
+		}
+
+		if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+				  pipe_name(pipe),
+				  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+		}
+
+		if (pipe_iir) {
+			ret = IRQ_HANDLED;
+			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+		} else
+			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+	}
+
+	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
+		/*
+		 * FIXME(BDW): Assume for now that the new interrupt handling
+		 * scheme also closed the SDE interrupt handling race we've seen
+		 * on older pch-split platforms. But this needs testing.
+		 */
+		u32 pch_iir = I915_READ(SDEIIR);
+
+		cpt_irq_handler(dev, pch_iir);
+
+		if (pch_iir) {
+			I915_WRITE(SDEIIR, pch_iir);
+			ret = IRQ_HANDLED;
+		}
+	}
+
+	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	return ret;
+}
+
 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 			       bool reset_completed)
 {
@@ -1516,7 +1949,7 @@
 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
 	int ret;
 
-	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
 
 	/*
 	 * Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1963,7 @@
 	 */
 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
 		DRM_DEBUG_DRIVER("resetting chip\n");
-		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
 				   reset_event);
 
 		/*
@@ -1557,7 +1990,7 @@
 			smp_mb__before_atomic_inc();
 			atomic_inc(&dev_priv->gpu_error.reset_counter);
 
-			kobject_uevent_env(&dev->primary->kdev.kobj,
+			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
 		} else {
 			atomic_set(&error->reset_counter, I915_WEDGED);
@@ -1787,7 +2220,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-						     DE_PIPE_VBLANK_ILK(pipe);
+						     DE_PIPE_VBLANK(pipe);
 
 	if (!i915_pipe_enabled(dev, pipe))
 		return -EINVAL;
@@ -1810,7 +2243,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	imr = I915_READ(VLV_IMR);
-	if (pipe == 0)
+	if (pipe == PIPE_A)
 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 	else
 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1822,6 +2255,22 @@
 	return 0;
 }
 
+static int gen8_enable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	return 0;
+}
+
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
@@ -1845,7 +2294,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-						     DE_PIPE_VBLANK_ILK(pipe);
+						     DE_PIPE_VBLANK(pipe);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	ironlake_disable_display_irq(dev_priv, bit);
@@ -1862,7 +2311,7 @@
 	i915_disable_pipestat(dev_priv, pipe,
 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
 	imr = I915_READ(VLV_IMR);
-	if (pipe == 0)
+	if (pipe == PIPE_A)
 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 	else
 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1870,6 +2319,21 @@
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
+static void gen8_disable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
 static u32
 ring_last_seqno(struct intel_ring_buffer *ring)
 {
@@ -1965,6 +2429,7 @@
 	if (tmp & RING_WAIT) {
 		DRM_ERROR("Kicking stuck wait on %s\n",
 			  ring->name);
+		i915_handle_error(dev, false);
 		I915_WRITE_CTL(ring, tmp);
 		return HANGCHECK_KICK;
 	}
@@ -1976,6 +2441,7 @@
 		case 1:
 			DRM_ERROR("Kicking stuck semaphore on %s\n",
 				  ring->name);
+			i915_handle_error(dev, false);
 			I915_WRITE_CTL(ring, tmp);
 			return HANGCHECK_KICK;
 		case 0:
@@ -2021,12 +2487,21 @@
 
 		if (ring->hangcheck.seqno == seqno) {
 			if (ring_idle(ring, seqno)) {
+				ring->hangcheck.action = HANGCHECK_IDLE;
+
 				if (waitqueue_active(&ring->irq_queue)) {
 					/* Issue a wake-up to catch stuck h/w. */
-					DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-						  ring->name);
-					wake_up_all(&ring->irq_queue);
-					ring->hangcheck.score += HUNG;
+					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+								  ring->name);
+						else
+							DRM_INFO("Fake missed irq on %s\n",
+								 ring->name);
+						wake_up_all(&ring->irq_queue);
+					}
+					/* Safeguard against driver failure */
+					ring->hangcheck.score += BUSY;
 				} else
 					busy = false;
 			} else {
@@ -2049,6 +2524,7 @@
 								    acthd);
 
 				switch (ring->hangcheck.action) {
+				case HANGCHECK_IDLE:
 				case HANGCHECK_WAIT:
 					break;
 				case HANGCHECK_ACTIVE:
@@ -2064,6 +2540,8 @@
 				}
 			}
 		} else {
+			ring->hangcheck.action = HANGCHECK_ACTIVE;
+
 			/* Gradually reduce the count so that we catch DoS
 			 * attempts across multiple batches.
 			 */
@@ -2190,6 +2668,53 @@
 	POSTING_READ(VLV_IER);
 }
 
+static void gen8_irq_preinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	/* IIR can theoretically queue up two events. Be paranoid */
+#define GEN8_IRQ_INIT_NDX(type, which) do { \
+		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+		POSTING_READ(GEN8_##type##_IMR(which)); \
+		I915_WRITE(GEN8_##type##_IER(which), 0); \
+		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+		POSTING_READ(GEN8_##type##_IIR(which)); \
+		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+	} while (0)
+
+#define GEN8_IRQ_INIT(type) do { \
+		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+		POSTING_READ(GEN8_##type##_IMR); \
+		I915_WRITE(GEN8_##type##_IER, 0); \
+		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+		POSTING_READ(GEN8_##type##_IIR); \
+		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+	} while (0)
+
+	GEN8_IRQ_INIT_NDX(GT, 0);
+	GEN8_IRQ_INIT_NDX(GT, 1);
+	GEN8_IRQ_INIT_NDX(GT, 2);
+	GEN8_IRQ_INIT_NDX(GT, 3);
+
+	for_each_pipe(pipe) {
+		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
+	}
+
+	GEN8_IRQ_INIT(DE_PORT);
+	GEN8_IRQ_INIT(DE_MISC);
+	GEN8_IRQ_INIT(PCU);
+#undef GEN8_IRQ_INIT
+#undef GEN8_IRQ_INIT_NDX
+
+	POSTING_READ(GEN8_PCU_IIR);
+}
+
 static void ibx_hpd_irq_setup(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2254,10 +2779,10 @@
 	pm_irqs = gt_irqs = 0;
 
 	dev_priv->gt_irq_mask = ~0;
-	if (HAS_L3_GPU_CACHE(dev)) {
+	if (HAS_L3_DPF(dev)) {
 		/* L3 parity interrupt is always unmasked. */
-		dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-		gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+		gt_irqs |= GT_PARITY_ERROR(dev);
 	}
 
 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2831,10 @@
 	} else {
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-				DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
-				DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+				DE_AUX_CHANNEL_A |
+				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
+				DE_POISON);
 		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
 	}
 
@@ -2341,7 +2868,8 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 enable_mask;
-	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
+		PIPE_CRC_DONE_ENABLE;
 	unsigned long irqflags;
 
 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2371,9 +2899,9 @@
 	/* Interrupt setup is already guaranteed to be single-threaded, this is
 	 * just to make the assert_spin_locked check happy. */
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
-	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
-	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+	i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2392,6 +2920,117 @@
 	return 0;
 }
 
+static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	int i;
+
+	/* These are interrupts we'll toggle with the ring mask register */
+	uint32_t gt_interrupts[] = {
+		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
+			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
+		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+		0,
+		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
+		};
+
+	for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
+		u32 tmp = I915_READ(GEN8_GT_IIR(i));
+		if (tmp)
+			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
+				  i, tmp);
+		I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
+		I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
+	}
+	POSTING_READ(GEN8_GT_IER(0));
+}
+
+static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
+		GEN8_PIPE_CDCLK_CRC_DONE |
+		GEN8_PIPE_FIFO_UNDERRUN |
+		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+	uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
+	int pipe;
+	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
+	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
+	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+
+	for_each_pipe(pipe) {
+		u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+		if (tmp)
+			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
+				  pipe, tmp);
+		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+		I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
+	}
+	POSTING_READ(GEN8_DE_PIPE_ISR(0));
+
+	I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
+	I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
+	POSTING_READ(GEN8_DE_PORT_IER);
+}
+
+static int gen8_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	gen8_gt_irq_postinstall(dev_priv);
+	gen8_de_irq_postinstall(dev_priv);
+
+	ibx_irq_postinstall(dev);
+
+	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	return 0;
+}
+
+static void gen8_irq_uninstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	if (!dev_priv)
+		return;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+
+#define GEN8_IRQ_FINI_NDX(type, which) do { \
+		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+		I915_WRITE(GEN8_##type##_IER(which), 0); \
+		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+	} while (0)
+
+#define GEN8_IRQ_FINI(type) do { \
+		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+		I915_WRITE(GEN8_##type##_IER, 0); \
+		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+	} while (0)
+
+	GEN8_IRQ_FINI_NDX(GT, 0);
+	GEN8_IRQ_FINI_NDX(GT, 1);
+	GEN8_IRQ_FINI_NDX(GT, 2);
+	GEN8_IRQ_FINI_NDX(GT, 3);
+
+	for_each_pipe(pipe) {
+		GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
+	}
+
+	GEN8_IRQ_FINI(DE_PORT);
+	GEN8_IRQ_FINI(DE_MISC);
+	GEN8_IRQ_FINI(PCU);
+#undef GEN8_IRQ_FINI
+#undef GEN8_IRQ_FINI_NDX
+
+	POSTING_READ(GEN8_PCU_IIR);
+}
+
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2464,6 +3103,7 @@
 static int i8xx_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
 
 	I915_WRITE16(EMR,
 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +3124,13 @@
 		     I915_USER_INTERRUPT);
 	POSTING_READ16(IER);
 
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
 	return 0;
 }
 
@@ -2570,13 +3217,14 @@
 		if (iir & I915_USER_INTERRUPT)
 			notify_ring(dev, &dev_priv->ring[RCS]);
 
-		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
-		    i8xx_handle_vblank(dev, 0, iir))
-			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
+		for_each_pipe(pipe) {
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+			    i8xx_handle_vblank(dev, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
 
-		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
-		    i8xx_handle_vblank(dev, 1, iir))
-			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
+		}
 
 		iir = new_iir;
 	}
@@ -2623,6 +3271,7 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 enable_mask;
+	unsigned long irqflags;
 
 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
@@ -2658,6 +3307,13 @@
 
 	i915_enable_asle_pipestat(dev);
 
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
 	return 0;
 }
 
@@ -2769,6 +3425,9 @@
 
 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 				blc_event = true;
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 
 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2867,7 +3526,9 @@
 	/* Interrupt setup is already guaranteed to be single-threaded, this is
 	 * just to make the assert_spin_locked check happy. */
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	/*
@@ -3013,6 +3674,9 @@
 
 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 				blc_event = true;
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 
 
@@ -3122,18 +3786,21 @@
 
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
-	dev->driver->get_vblank_counter = i915_get_vblank_counter;
-	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+	if (IS_GEN2(dev)) {
+		dev->max_vblank_count = 0;
+		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+	} else {
+		dev->driver->get_vblank_counter = i915_get_vblank_counter;
+		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	}
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
-	else
-		dev->driver->get_vblank_timestamp = NULL;
-	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+		dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+	}
 
 	if (IS_VALLEYVIEW(dev)) {
 		dev->driver->irq_handler = valleyview_irq_handler;
@@ -3143,6 +3810,14 @@
 		dev->driver->enable_vblank = valleyview_enable_vblank;
 		dev->driver->disable_vblank = valleyview_disable_vblank;
 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else if (IS_GEN8(dev)) {
+		dev->driver->irq_handler = gen8_irq_handler;
+		dev->driver->irq_preinstall = gen8_irq_preinstall;
+		dev->driver->irq_postinstall = gen8_irq_postinstall;
+		dev->driver->irq_uninstall = gen8_irq_uninstall;
+		dev->driver->enable_vblank = gen8_enable_vblank;
+		dev->driver->disable_vblank = gen8_disable_vblank;
+		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
 	} else if (HAS_PCH_SPLIT(dev)) {
 		dev->driver->irq_handler = ironlake_irq_handler;
 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ef9b354..f9eafb6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
 #define _I915_REG_H_
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -109,6 +110,9 @@
 #define RING_PP_DIR_DCLV(ring)		((ring)->mmio_base+0x220)
 #define   PP_DIR_DCLV_2G		0xffffffff
 
+#define GEN8_RING_PDP_UDW(ring, n)	((ring)->mmio_base+0x270 + ((n) * 8 + 4))
+#define GEN8_RING_PDP_LDW(ring, n)	((ring)->mmio_base+0x270 + (n) * 8)
+
 #define GAM_ECOCHK			0x4090
 #define   ECOCHK_SNB_BIT		(1<<10)
 #define   HSW_ECOCHK_ARB_PRIO_SOL	(1<<6)
@@ -246,6 +250,7 @@
 #define   MI_BATCH_NON_SECURE_HSW 	(1<<13)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
 #define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
 #define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
 #define  MI_SEMAPHORE_UPDATE	    (1<<21)
@@ -264,6 +269,11 @@
 #define  MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
 #define  MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
 #define  MI_SEMAPHORE_SYNC_INVALID  (3<<16)
+
+#define MI_PREDICATE_RESULT_2	(0x2214)
+#define  LOWER_SLICE_ENABLED	(1<<0)
+#define  LOWER_SLICE_DISABLED	(0<<0)
+
 /*
  * 3D instructions used by the kernel
  */
@@ -346,12 +356,25 @@
 #define   IOSF_PORT_PUNIT			0x4
 #define   IOSF_PORT_NC				0x11
 #define   IOSF_PORT_DPIO			0x12
+#define   IOSF_PORT_GPIO_NC			0x13
+#define   IOSF_PORT_CCK				0x14
+#define   IOSF_PORT_CCU				0xA9
+#define   IOSF_PORT_GPS_CORE			0x48
 #define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
 
 #define PUNIT_OPCODE_REG_READ			6
 #define PUNIT_OPCODE_REG_WRITE			7
 
+#define PUNIT_REG_PWRGT_CTRL			0x60
+#define PUNIT_REG_PWRGT_STATUS			0x61
+#define	  PUNIT_CLK_GATE			1
+#define	  PUNIT_PWR_RESET			2
+#define	  PUNIT_PWR_GATE			3
+#define	  RENDER_PWRGT				(PUNIT_PWR_GATE << 0)
+#define	  MEDIA_PWRGT				(PUNIT_PWR_GATE << 2)
+#define	  DISP2D_PWRGT				(PUNIT_PWR_GATE << 6)
+
 #define PUNIT_REG_GPU_LFM			0xd3
 #define PUNIT_REG_GPU_FREQ_REQ			0xd4
 #define PUNIT_REG_GPU_FREQ_STS			0xd8
@@ -372,6 +395,40 @@
 #define   FB_FMAX_VMIN_FREQ_LO_SHIFT		27
 #define   FB_FMAX_VMIN_FREQ_LO_MASK		0xf8000000
 
+/* vlv2 north clock has */
+#define CCK_FUSE_REG				0x8
+#define  CCK_FUSE_HPLL_FREQ_MASK		0x3
+#define CCK_REG_DSI_PLL_FUSE			0x44
+#define CCK_REG_DSI_PLL_CONTROL			0x48
+#define  DSI_PLL_VCO_EN				(1 << 31)
+#define  DSI_PLL_LDO_GATE			(1 << 30)
+#define  DSI_PLL_P1_POST_DIV_SHIFT		17
+#define  DSI_PLL_P1_POST_DIV_MASK		(0x1ff << 17)
+#define  DSI_PLL_P2_MUX_DSI0_DIV2		(1 << 13)
+#define  DSI_PLL_P3_MUX_DSI1_DIV2		(1 << 12)
+#define  DSI_PLL_MUX_MASK			(3 << 9)
+#define  DSI_PLL_MUX_DSI0_DSIPLL		(0 << 10)
+#define  DSI_PLL_MUX_DSI0_CCK			(1 << 10)
+#define  DSI_PLL_MUX_DSI1_DSIPLL		(0 << 9)
+#define  DSI_PLL_MUX_DSI1_CCK			(1 << 9)
+#define  DSI_PLL_CLK_GATE_MASK			(0xf << 5)
+#define  DSI_PLL_CLK_GATE_DSI0_DSIPLL		(1 << 8)
+#define  DSI_PLL_CLK_GATE_DSI1_DSIPLL		(1 << 7)
+#define  DSI_PLL_CLK_GATE_DSI0_CCK		(1 << 6)
+#define  DSI_PLL_CLK_GATE_DSI1_CCK		(1 << 5)
+#define  DSI_PLL_LOCK				(1 << 0)
+#define CCK_REG_DSI_PLL_DIVIDER			0x4c
+#define  DSI_PLL_LFSR				(1 << 31)
+#define  DSI_PLL_FRACTION_EN			(1 << 30)
+#define  DSI_PLL_FRAC_COUNTER_SHIFT		27
+#define  DSI_PLL_FRAC_COUNTER_MASK		(7 << 27)
+#define  DSI_PLL_USYNC_CNT_SHIFT		18
+#define  DSI_PLL_USYNC_CNT_MASK			(0x1ff << 18)
+#define  DSI_PLL_N1_DIV_SHIFT			16
+#define  DSI_PLL_N1_DIV_MASK			(3 << 16)
+#define  DSI_PLL_M1_DIV_SHIFT			0
+#define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
+
 /*
  * DPIO - a special bus for various display related registers to hide behind
  *
@@ -387,11 +444,11 @@
 #define  DPIO_MODSEL1			(1<<3) /* if ref clk b == 27 */
 #define  DPIO_MODSEL0			(1<<2) /* if ref clk a == 27 */
 #define  DPIO_SFR_BYPASS		(1<<1)
-#define  DPIO_RESET			(1<<0)
+#define  DPIO_CMNRST			(1<<0)
 
 #define _DPIO_TX3_SWING_CTL4_A		0x690
 #define _DPIO_TX3_SWING_CTL4_B		0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
+#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
 					_DPIO_TX3_SWING_CTL4_B)
 
 /*
@@ -602,6 +659,9 @@
 #define ARB_MODE		0x04030
 #define   ARB_MODE_SWIZZLE_SNB	(1<<4)
 #define   ARB_MODE_SWIZZLE_IVB	(1<<5)
+#define GAMTARBMODE		0x04a08
+#define   ARB_MODE_BWGTLB_DISABLE (1<<9)
+#define   ARB_MODE_SWIZZLE_BDW	(1<<1)
 #define RENDER_HWS_PGA_GEN7	(0x04080)
 #define RING_FAULT_REG(ring)	(0x4094 + 0x100*(ring)->id)
 #define   RING_FAULT_GTTSEL_MASK (1<<11)
@@ -609,6 +669,7 @@
 #define   RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
 #define   RING_FAULT_VALID	(1<<0)
 #define DONE_REG		0x40b0
+#define GEN8_PRIVATE_PAT	0x40e0
 #define BSD_HWS_PGA_GEN7	(0x04180)
 #define BLT_HWS_PGA_GEN7	(0x04280)
 #define VEBOX_HWS_PGA_GEN7	(0x04380)
@@ -669,13 +730,18 @@
 #define NOPID		0x02094
 #define HWSTAM		0x02098
 #define DMA_FADD_I8XX	0x020d0
+#define RING_BBSTATE(base)	((base)+0x110)
 
 #define ERROR_GEN6	0x040a0
 #define GEN7_ERR_INT	0x44040
 #define   ERR_INT_POISON		(1<<31)
 #define   ERR_INT_MMIO_UNCLAIMED	(1<<13)
+#define   ERR_INT_PIPE_CRC_DONE_C	(1<<8)
 #define   ERR_INT_FIFO_UNDERRUN_C	(1<<6)
+#define   ERR_INT_PIPE_CRC_DONE_B	(1<<5)
 #define   ERR_INT_FIFO_UNDERRUN_B	(1<<3)
+#define   ERR_INT_PIPE_CRC_DONE_A	(1<<2)
+#define   ERR_INT_PIPE_CRC_DONE(pipe)	(1<<(2 + pipe*3))
 #define   ERR_INT_FIFO_UNDERRUN_A	(1<<0)
 #define   ERR_INT_FIFO_UNDERRUN(pipe)	(1<<(pipe*3))
 
@@ -683,6 +749,7 @@
 #define   FPGA_DBG_RM_NOCLAIM	(1<<31)
 
 #define DERRMR		0x44050
+/* Note that HBLANK events are reserved on bdw+ */
 #define   DERRMR_PIPEA_SCANLINE		(1<<0)
 #define   DERRMR_PIPEA_PRI_FLIP_DONE	(1<<1)
 #define   DERRMR_PIPEA_SPR_FLIP_DONE	(1<<2)
@@ -716,6 +783,7 @@
 #define _3D_CHICKEN3	0x02090
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL		(1 << 10)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
+#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)	((x)<<1)
 
 #define MI_MODE		0x0209c
 # define VS_TIMER_DISPATCH				(1 << 6)
@@ -890,6 +958,7 @@
 #define GT_BLT_USER_INTERRUPT			(1 << 22)
 #define GT_BSD_CS_ERROR_INTERRUPT		(1 << 15)
 #define GT_BSD_USER_INTERRUPT			(1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1	(1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
 #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT	(1 <<  3)
@@ -900,6 +969,10 @@
 #define PM_VEBOX_CS_ERROR_INTERRUPT		(1 << 12) /* hsw+ */
 #define PM_VEBOX_USER_INTERRUPT			(1 << 10) /* hsw+ */
 
+#define GT_PARITY_ERROR(dev) \
+	(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
+	 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+
 /* These are all the "old" interrupts */
 #define ILK_BSD_USER_INTERRUPT				(1<<5)
 #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
@@ -1048,9 +1121,6 @@
 					     _HSW_PIPE_SLICE_CHICKEN_1_A, + \
 					     _HSW_PIPE_SLICE_CHICKEN_1_B)
 
-#define HSW_CLKGATE_DISABLE_PART_1	0x46500
-#define   HSW_DPFC_GATING_DISABLE	(1<<23)
-
 /*
  * GPIO regs
  */
@@ -1387,6 +1457,12 @@
 
 #define MI_ARB_VLV		(VLV_DISPLAY_BASE + 0x6504)
 
+#define CZCLK_CDCLK_FREQ_RATIO	(VLV_DISPLAY_BASE + 0x6508)
+#define   CDCLK_FREQ_SHIFT	4
+#define   CDCLK_FREQ_MASK	(0x1f << CDCLK_FREQ_SHIFT)
+#define   CZCLK_FREQ_MASK	0xf
+#define GMBUSFREQ_VLV		(VLV_DISPLAY_BASE + 0x6510)
+
 /*
  * Palette regs
  */
@@ -1404,13 +1480,15 @@
  * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
  * every way.  It is not accessible from the CP register read instructions.
  *
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
  */
 #define MCHBAR_MIRROR_BASE	0x10000
 
 #define MCHBAR_MIRROR_BASE_SNB	0x140000
 
 /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
-#define DCLK 0x5e04
+#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
 
 /** 915-945 and GM965 MCH register controlling DRAM channel access */
 #define DCC			0x10200
@@ -1705,9 +1783,9 @@
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 #define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
 
-#define GEN6_GT_PERF_STATUS	0x145948
-#define GEN6_RP_STATE_LIMITS	0x145994
-#define GEN6_RP_STATE_CAP	0x145998
+#define GEN6_GT_PERF_STATUS	(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define GEN6_RP_STATE_LIMITS	(MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP	(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 
 /*
  * Logical Context regs
@@ -1752,6 +1830,12 @@
  * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
  */
 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
+/* Same as Haswell, but 72064 bytes now. */
+#define GEN8_CXT_TOTAL_SIZE		(18 * PAGE_SIZE)
+
+
+#define VLV_CLK_CTL2			0x101104
+#define   CLK_CTL2_CZCOUNT_30NS_SHIFT	28
 
 /*
  * Overlay regs
@@ -1771,6 +1855,83 @@
  * Display engine regs
  */
 
+/* Pipe A CRC regs */
+#define _PIPE_CRC_CTL_A		(dev_priv->info->display_mmio_offset + 0x60050)
+#define   PIPE_CRC_ENABLE		(1 << 31)
+/* ivb+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_IVB	(0 << 29)
+#define   PIPE_CRC_SOURCE_SPRITE_IVB	(1 << 29)
+#define   PIPE_CRC_SOURCE_PF_IVB	(2 << 29)
+/* ilk+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_ILK	(0 << 28)
+#define   PIPE_CRC_SOURCE_SPRITE_ILK	(1 << 28)
+#define   PIPE_CRC_SOURCE_PIPE_ILK	(2 << 28)
+/* embedded DP port on the north display block, reserved on ivb */
+#define   PIPE_CRC_SOURCE_PORT_A_ILK	(4 << 28)
+#define   PIPE_CRC_SOURCE_FDI_ILK	(5 << 28) /* reserved on ivb */
+/* vlv source selection */
+#define   PIPE_CRC_SOURCE_PIPE_VLV	(0 << 27)
+#define   PIPE_CRC_SOURCE_HDMIB_VLV	(1 << 27)
+#define   PIPE_CRC_SOURCE_HDMIC_VLV	(2 << 27)
+/* with DP port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_VLV	(3 << 27)
+#define   PIPE_CRC_SOURCE_DP_B_VLV	(6 << 27)
+#define   PIPE_CRC_SOURCE_DP_C_VLV	(7 << 27)
+/* gen3+ source selection */
+#define   PIPE_CRC_SOURCE_PIPE_I9XX	(0 << 28)
+#define   PIPE_CRC_SOURCE_SDVOB_I9XX	(1 << 28)
+#define   PIPE_CRC_SOURCE_SDVOC_I9XX	(2 << 28)
+/* with DP/TV port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_G4X	(3 << 28)
+#define   PIPE_CRC_SOURCE_TV_PRE	(4 << 28)
+#define   PIPE_CRC_SOURCE_TV_POST	(5 << 28)
+#define   PIPE_CRC_SOURCE_DP_B_G4X	(6 << 28)
+#define   PIPE_CRC_SOURCE_DP_C_G4X	(7 << 28)
+/* gen2 doesn't have source selection bits */
+#define   PIPE_CRC_INCLUDE_BORDER_I8XX	(1 << 30)
+
+#define _PIPE_CRC_RES_1_A_IVB		0x60064
+#define _PIPE_CRC_RES_2_A_IVB		0x60068
+#define _PIPE_CRC_RES_3_A_IVB		0x6006c
+#define _PIPE_CRC_RES_4_A_IVB		0x60070
+#define _PIPE_CRC_RES_5_A_IVB		0x60074
+
+#define _PIPE_CRC_RES_RED_A		(dev_priv->info->display_mmio_offset + 0x60060)
+#define _PIPE_CRC_RES_GREEN_A		(dev_priv->info->display_mmio_offset + 0x60064)
+#define _PIPE_CRC_RES_BLUE_A		(dev_priv->info->display_mmio_offset + 0x60068)
+#define _PIPE_CRC_RES_RES1_A_I915	(dev_priv->info->display_mmio_offset + 0x6006c)
+#define _PIPE_CRC_RES_RES2_A_G4X	(dev_priv->info->display_mmio_offset + 0x60080)
+
+/* Pipe B CRC regs */
+#define _PIPE_CRC_RES_1_B_IVB		0x61064
+#define _PIPE_CRC_RES_2_B_IVB		0x61068
+#define _PIPE_CRC_RES_3_B_IVB		0x6106c
+#define _PIPE_CRC_RES_4_B_IVB		0x61070
+#define _PIPE_CRC_RES_5_B_IVB		0x61074
+
+#define PIPE_CRC_CTL(pipe)	_PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_RES_1_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+
+#define PIPE_CRC_RES_RED(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+#define PIPE_CRC_RES_GREEN(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+#define PIPE_CRC_RES_BLUE(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+#define PIPE_CRC_RES_RES1_I915(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+#define PIPE_CRC_RES_RES2_G4X(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+
 /* Pipe A timing regs */
 #define _HTOTAL_A	(dev_priv->info->display_mmio_offset + 0x60000)
 #define _HBLANK_A	(dev_priv->info->display_mmio_offset + 0x60004)
@@ -1793,7 +1954,6 @@
 #define _BCLRPAT_B	(dev_priv->info->display_mmio_offset + 0x61020)
 #define _VSYNCSHIFT_B	(dev_priv->info->display_mmio_offset + 0x61028)
 
-
 #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
 #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
 #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
@@ -1803,8 +1963,9 @@
 #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
 #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
-/* HSW eDP PSR registers */
-#define EDP_PSR_CTL				0x64800
+/* HSW+ eDP PSR registers */
+#define EDP_PSR_BASE(dev)                       (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
+#define EDP_PSR_CTL(dev)			(EDP_PSR_BASE(dev) + 0)
 #define   EDP_PSR_ENABLE			(1<<31)
 #define   EDP_PSR_LINK_DISABLE			(0<<27)
 #define   EDP_PSR_LINK_STANDBY			(1<<27)
@@ -1827,16 +1988,16 @@
 #define   EDP_PSR_TP1_TIME_0us			(3<<4)
 #define   EDP_PSR_IDLE_FRAME_SHIFT		0
 
-#define EDP_PSR_AUX_CTL			0x64810
-#define EDP_PSR_AUX_DATA1		0x64814
+#define EDP_PSR_AUX_CTL(dev)			(EDP_PSR_BASE(dev) + 0x10)
+#define EDP_PSR_AUX_DATA1(dev)			(EDP_PSR_BASE(dev) + 0x14)
 #define   EDP_PSR_DPCD_COMMAND		0x80060000
-#define EDP_PSR_AUX_DATA2		0x64818
+#define EDP_PSR_AUX_DATA2(dev)			(EDP_PSR_BASE(dev) + 0x18)
 #define   EDP_PSR_DPCD_NORMAL_OPERATION	(1<<24)
-#define EDP_PSR_AUX_DATA3		0x6481c
-#define EDP_PSR_AUX_DATA4		0x64820
-#define EDP_PSR_AUX_DATA5		0x64824
+#define EDP_PSR_AUX_DATA3(dev)			(EDP_PSR_BASE(dev) + 0x1c)
+#define EDP_PSR_AUX_DATA4(dev)			(EDP_PSR_BASE(dev) + 0x20)
+#define EDP_PSR_AUX_DATA5(dev)			(EDP_PSR_BASE(dev) + 0x24)
 
-#define EDP_PSR_STATUS_CTL			0x64840
+#define EDP_PSR_STATUS_CTL(dev)			(EDP_PSR_BASE(dev) + 0x40)
 #define   EDP_PSR_STATUS_STATE_MASK		(7<<29)
 #define   EDP_PSR_STATUS_STATE_IDLE		(0<<29)
 #define   EDP_PSR_STATUS_STATE_SRDONACK		(1<<29)
@@ -1860,10 +2021,10 @@
 #define   EDP_PSR_STATUS_SENDING_TP1		(1<<4)
 #define   EDP_PSR_STATUS_IDLE_MASK		0xf
 
-#define EDP_PSR_PERF_CNT		0x64844
+#define EDP_PSR_PERF_CNT(dev)		(EDP_PSR_BASE(dev) + 0x44)
 #define   EDP_PSR_PERF_CNT_MASK		0xffffff
 
-#define EDP_PSR_DEBUG_CTL		0x64860
+#define EDP_PSR_DEBUG_CTL(dev)		(EDP_PSR_BASE(dev) + 0x60)
 #define   EDP_PSR_DEBUG_MASK_LPSP	(1<<27)
 #define   EDP_PSR_DEBUG_MASK_MEMUP	(1<<26)
 #define   EDP_PSR_DEBUG_MASK_HPD	(1<<25)
@@ -2006,6 +2167,14 @@
 #define PCH_HDMIC	0xe1150
 #define PCH_HDMID	0xe1160
 
+#define PORT_DFT_I9XX				0x61150
+#define   DC_BALANCE_RESET			(1 << 25)
+#define PORT_DFT2_G4X				0x61154
+#define   DC_BALANCE_RESET_VLV			(1 << 31)
+#define   PIPE_SCRAMBLE_RESET_MASK		(0x3 << 0)
+#define   PIPE_B_SCRAMBLE_RESET			(1 << 1)
+#define   PIPE_A_SCRAMBLE_RESET			(1 << 0)
+
 /* Gen 3 SDVO bits: */
 #define   SDVO_ENABLE				(1 << 31)
 #define   SDVO_PIPE_SEL(pipe)			((pipe) << 30)
@@ -2034,6 +2203,7 @@
 
 /* Gen 4 SDVO/HDMI bits: */
 #define   SDVO_COLOR_FORMAT_8bpc		(0 << 26)
+#define   SDVO_COLOR_FORMAT_MASK		(7 << 26)
 #define   SDVO_ENCODING_SDVO			(0 << 10)
 #define   SDVO_ENCODING_HDMI			(2 << 10)
 #define   HDMI_MODE_SELECT_HDMI			(1 << 9) /* HDMI only */
@@ -2238,6 +2408,21 @@
 
 #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
 
+#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+				     _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+				    _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+				     _VLV_BLC_HIST_CTL_B)
+
 /* Backlight control */
 #define BLC_PWM_CTL2	(dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
 #define   BLM_PWM_ENABLE		(1 << 31)
@@ -2986,6 +3171,7 @@
 #define   PIPECONF_DISABLE	0
 #define   PIPECONF_DOUBLE_WIDE	(1<<30)
 #define   I965_PIPECONF_ACTIVE	(1<<30)
+#define   PIPECONF_DSI_PLL_LOCKED	(1<<29) /* vlv & pipe A only */
 #define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
 #define   PIPECONF_SINGLE_WIDE	0
 #define   PIPECONF_PIPE_UNLOCKED 0
@@ -3068,6 +3254,18 @@
 #define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
 #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
 
+#define _PIPE_MISC_A			0x70030
+#define _PIPE_MISC_B			0x71030
+#define   PIPEMISC_DITHER_BPC_MASK	(7<<5)
+#define   PIPEMISC_DITHER_8_BPC		(0<<5)
+#define   PIPEMISC_DITHER_10_BPC	(1<<5)
+#define   PIPEMISC_DITHER_6_BPC		(2<<5)
+#define   PIPEMISC_DITHER_12_BPC	(3<<5)
+#define   PIPEMISC_DITHER_ENABLE	(1<<4)
+#define   PIPEMISC_DITHER_TYPE_MASK	(3<<2)
+#define   PIPEMISC_DITHER_TYPE_SP	(0<<2)
+#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+
 #define VLV_DPFLIPSTAT				(VLV_DISPLAY_BASE + 0x70028)
 #define   PIPEB_LINE_COMPARE_INT_EN		(1<<29)
 #define   PIPEB_HLINE_INT_EN			(1<<28)
@@ -3184,11 +3382,11 @@
 
 /* define the Watermark register on Ironlake */
 #define WM0_PIPEA_ILK		0x45100
-#define  WM0_PIPE_PLANE_MASK	(0x7f<<16)
+#define  WM0_PIPE_PLANE_MASK	(0xffff<<16)
 #define  WM0_PIPE_PLANE_SHIFT	16
-#define  WM0_PIPE_SPRITE_MASK	(0x3f<<8)
+#define  WM0_PIPE_SPRITE_MASK	(0xff<<8)
 #define  WM0_PIPE_SPRITE_SHIFT	8
-#define  WM0_PIPE_CURSOR_MASK	(0x1f)
+#define  WM0_PIPE_CURSOR_MASK	(0xff)
 
 #define WM0_PIPEB_ILK		0x45104
 #define WM0_PIPEC_IVB		0x45200
@@ -3198,9 +3396,10 @@
 #define  WM1_LP_LATENCY_MASK	(0x7f<<24)
 #define  WM1_LP_FBC_MASK	(0xf<<20)
 #define  WM1_LP_FBC_SHIFT	20
-#define  WM1_LP_SR_MASK		(0x1ff<<8)
+#define  WM1_LP_FBC_SHIFT_BDW	19
+#define  WM1_LP_SR_MASK		(0x7ff<<8)
 #define  WM1_LP_SR_SHIFT	8
-#define  WM1_LP_CURSOR_MASK	(0x3f)
+#define  WM1_LP_CURSOR_MASK	(0xff)
 #define WM2_LP_ILK		0x4510c
 #define  WM2_LP_EN		(1<<31)
 #define WM3_LP_ILK		0x45110
@@ -3281,17 +3480,17 @@
  *  } while (high1 != high2);
  *  frame = (high1 << 8) | low1;
  */
-#define _PIPEAFRAMEHIGH          (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEAFRAMEHIGH          0x70040
 #define   PIPE_FRAME_HIGH_MASK    0x0000ffff
 #define   PIPE_FRAME_HIGH_SHIFT   0
-#define _PIPEAFRAMEPIXEL         (dev_priv->info->display_mmio_offset + 0x70044)
+#define _PIPEAFRAMEPIXEL         0x70044
 #define   PIPE_FRAME_LOW_MASK     0xff000000
 #define   PIPE_FRAME_LOW_SHIFT    24
 #define   PIPE_PIXEL_MASK         0x00ffffff
 #define   PIPE_PIXEL_SHIFT        0
 /* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45	0x70040
-#define _PIPEA_FLIPCOUNT_GM45	0x70044
+#define _PIPEA_FRMCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEA_FLIPCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x70044)
 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 
 /* Cursor A & B regs */
@@ -3422,10 +3621,10 @@
 #define _PIPEBDSL		(dev_priv->info->display_mmio_offset + 0x71000)
 #define _PIPEBCONF		(dev_priv->info->display_mmio_offset + 0x71008)
 #define _PIPEBSTAT		(dev_priv->info->display_mmio_offset + 0x71024)
-#define _PIPEBFRAMEHIGH		(dev_priv->info->display_mmio_offset + 0x71040)
-#define _PIPEBFRAMEPIXEL	(dev_priv->info->display_mmio_offset + 0x71044)
-#define _PIPEB_FRMCOUNT_GM45	0x71040
-#define _PIPEB_FLIPCOUNT_GM45	0x71044
+#define _PIPEBFRAMEHIGH		0x71040
+#define _PIPEBFRAMEPIXEL	0x71044
+#define _PIPEB_FRMCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x71044)
 
 
 /* Display B control */
@@ -3780,6 +3979,7 @@
 #define DE_SPRITEA_FLIP_DONE    (1 << 28)
 #define DE_PLANEB_FLIP_DONE     (1 << 27)
 #define DE_PLANEA_FLIP_DONE     (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
 #define DE_PCU_EVENT            (1 << 25)
 #define DE_GTT_FAULT            (1 << 24)
 #define DE_POISON               (1 << 23)
@@ -3793,13 +3993,18 @@
 #define DE_PIPEB_ODD_FIELD      (1 << 13)
 #define DE_PIPEB_LINE_COMPARE   (1 << 12)
 #define DE_PIPEB_VSYNC          (1 << 11)
+#define DE_PIPEB_CRC_DONE	(1 << 10)
 #define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
 #define DE_PIPEA_VBLANK         (1 << 7)
+#define DE_PIPE_VBLANK(pipe)    (1 << (7 + 8*(pipe)))
 #define DE_PIPEA_EVEN_FIELD     (1 << 6)
 #define DE_PIPEA_ODD_FIELD      (1 << 5)
 #define DE_PIPEA_LINE_COMPARE   (1 << 4)
 #define DE_PIPEA_VSYNC          (1 << 3)
+#define DE_PIPEA_CRC_DONE	(1 << 2)
+#define DE_PIPE_CRC_DONE(pipe)	(1 << (2 + 8*(pipe)))
 #define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe)  (1 << (8*(pipe)))
 
 /* More Ivybridge lolz */
 #define DE_ERR_INT_IVB			(1<<30)
@@ -3815,9 +4020,8 @@
 #define DE_PIPEB_VBLANK_IVB		(1<<5)
 #define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
 #define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
+#define DE_PLANE_FLIP_DONE_IVB(plane)	(1<< (3 + 5*(plane)))
 #define DE_PIPEA_VBLANK_IVB		(1<<0)
-
-#define DE_PIPE_VBLANK_ILK(pipe)	(1 << ((pipe * 8) + 7))
 #define DE_PIPE_VBLANK_IVB(pipe)	(1 << (pipe * 5))
 
 #define VLV_MASTER_IER			0x4400c /* Gunit master IER */
@@ -3833,6 +4037,71 @@
 #define GTIIR   0x44018
 #define GTIER   0x4401c
 
+#define GEN8_MASTER_IRQ			0x44200
+#define  GEN8_MASTER_IRQ_CONTROL	(1<<31)
+#define  GEN8_PCU_IRQ			(1<<30)
+#define  GEN8_DE_PCH_IRQ		(1<<23)
+#define  GEN8_DE_MISC_IRQ		(1<<22)
+#define  GEN8_DE_PORT_IRQ		(1<<20)
+#define  GEN8_DE_PIPE_C_IRQ		(1<<18)
+#define  GEN8_DE_PIPE_B_IRQ		(1<<17)
+#define  GEN8_DE_PIPE_A_IRQ		(1<<16)
+#define  GEN8_DE_PIPE_IRQ(pipe)		(1<<(16+pipe))
+#define  GEN8_GT_VECS_IRQ		(1<<6)
+#define  GEN8_GT_VCS2_IRQ		(1<<3)
+#define  GEN8_GT_VCS1_IRQ		(1<<2)
+#define  GEN8_GT_BCS_IRQ		(1<<1)
+#define  GEN8_GT_RCS_IRQ		(1<<0)
+
+#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
+#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
+#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
+#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
+
+#define GEN8_BCS_IRQ_SHIFT 16
+#define GEN8_RCS_IRQ_SHIFT 0
+#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VCS1_IRQ_SHIFT 0
+#define GEN8_VECS_IRQ_SHIFT 0
+
+#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
+#define  GEN8_PIPE_FIFO_UNDERRUN	(1 << 31)
+#define  GEN8_PIPE_CDCLK_CRC_ERROR	(1 << 29)
+#define  GEN8_PIPE_CDCLK_CRC_DONE	(1 << 28)
+#define  GEN8_PIPE_CURSOR_FAULT		(1 << 10)
+#define  GEN8_PIPE_SPRITE_FAULT		(1 << 9)
+#define  GEN8_PIPE_PRIMARY_FAULT	(1 << 8)
+#define  GEN8_PIPE_SPRITE_FLIP_DONE	(1 << 5)
+#define  GEN8_PIPE_FLIP_DONE		(1 << 4)
+#define  GEN8_PIPE_SCAN_LINE_EVENT	(1 << 2)
+#define  GEN8_PIPE_VSYNC		(1 << 1)
+#define  GEN8_PIPE_VBLANK		(1 << 0)
+#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
+	(GEN8_PIPE_CURSOR_FAULT | \
+	 GEN8_PIPE_SPRITE_FAULT | \
+	 GEN8_PIPE_PRIMARY_FAULT)
+
+#define GEN8_DE_PORT_ISR 0x44440
+#define GEN8_DE_PORT_IMR 0x44444
+#define GEN8_DE_PORT_IIR 0x44448
+#define GEN8_DE_PORT_IER 0x4444c
+#define  GEN8_PORT_DP_A_HOTPLUG		(1 << 3)
+#define  GEN8_AUX_CHANNEL_A		(1 << 0)
+
+#define GEN8_DE_MISC_ISR 0x44460
+#define GEN8_DE_MISC_IMR 0x44464
+#define GEN8_DE_MISC_IIR 0x44468
+#define GEN8_DE_MISC_IER 0x4446c
+#define  GEN8_DE_MISC_GSE		(1 << 27)
+
+#define GEN8_PCU_ISR 0x444e0
+#define GEN8_PCU_IMR 0x444e4
+#define GEN8_PCU_IIR 0x444e8
+#define GEN8_PCU_IER 0x444ec
+
 #define ILK_DISPLAY_CHICKEN2	0x42004
 /* Required on all Ironlake and Sandybridge according to the B-Spec. */
 #define  ILK_ELPIN_409_SELECT	(1 << 25)
@@ -3858,8 +4127,14 @@
 # define CHICKEN3_DGMG_DONE_FIX_DISABLE		(1 << 2)
 
 #define CHICKEN_PAR1_1		0x42080
+#define  DPA_MASK_VBLANK_SRD	(1 << 15)
 #define  FORCE_ARB_IDLE_PLANES	(1 << 14)
 
+#define _CHICKEN_PIPESL_1_A	0x420b0
+#define _CHICKEN_PIPESL_1_B	0x420b4
+#define  DPRS_MASK_VBLANK_SRD	(1 << 0)
+#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+
 #define DISP_ARB_CTL	0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
 #define  DISP_FBC_WM_DIS		(1<<15)
@@ -3870,6 +4145,8 @@
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1		0x7010
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC	((1<<10) | (1<<26))
+#define COMMON_SLICE_CHICKEN2			0x7014
+# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE	(1<<0)
 
 #define GEN7_L3CNTLREG1				0xB01C
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL			0x3C4FFF8C
@@ -4416,6 +4693,8 @@
 #define PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
 #define PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
 #define PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
+#define  PANEL_PORT_SELECT_DPB_VLV	(1 << 30)
+#define  PANEL_PORT_SELECT_DPC_VLV	(2 << 30)
 #define PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
 #define PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
 
@@ -4447,7 +4726,6 @@
 #define  PANEL_PORT_SELECT_MASK	(3 << 30)
 #define  PANEL_PORT_SELECT_LVDS	(0 << 30)
 #define  PANEL_PORT_SELECT_DPA	(1 << 30)
-#define  EDP_PANEL		(1 << 30)
 #define  PANEL_PORT_SELECT_DPC	(2 << 30)
 #define  PANEL_PORT_SELECT_DPD	(3 << 30)
 #define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
@@ -4456,11 +4734,6 @@
 #define  PANEL_LIGHT_ON_DELAY_SHIFT	0
 
 #define PCH_PP_OFF_DELAYS	0xc720c
-#define  PANEL_POWER_PORT_SELECT_MASK	(0x3 << 30)
-#define  PANEL_POWER_PORT_LVDS		(0 << 30)
-#define  PANEL_POWER_PORT_DP_A		(1 << 30)
-#define  PANEL_POWER_PORT_DP_C		(2 << 30)
-#define  PANEL_POWER_PORT_DP_D		(3 << 30)
 #define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
 #define  PANEL_POWER_DOWN_DELAY_SHIFT	16
 #define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
@@ -4638,7 +4911,7 @@
 #define   GEN6_RP_UP_IDLE_MIN			(0x1<<3)
 #define   GEN6_RP_UP_BUSY_AVG			(0x2<<3)
 #define   GEN6_RP_UP_BUSY_CONT			(0x4<<3)
-#define   GEN7_RP_DOWN_IDLE_AVG			(0x2<<0)
+#define   GEN6_RP_DOWN_IDLE_AVG			(0x2<<0)
 #define   GEN6_RP_DOWN_IDLE_CONT		(0x1<<0)
 #define GEN6_RP_UP_THRESHOLD			0xA02C
 #define GEN6_RP_DOWN_THRESHOLD			0xA030
@@ -4683,6 +4956,10 @@
 						 GEN6_PM_RP_DOWN_TIMEOUT)
 
 #define GEN6_GT_GFX_RC6_LOCKED			0x138104
+#define VLV_COUNTER_CONTROL			0x138104
+#define   VLV_COUNT_RANGE_HIGH			(1<<15)
+#define   VLV_MEDIA_RC6_COUNT_EN		(1<<1)
+#define   VLV_RENDER_RC6_COUNT_EN		(1<<0)
 #define GEN6_GT_GFX_RC6				0x138108
 #define GEN6_GT_GFX_RC6p			0x13810C
 #define GEN6_GT_GFX_RC6pp			0x138110
@@ -4694,8 +4971,11 @@
 #define   GEN6_PCODE_READ_MIN_FREQ_TABLE	0x9
 #define	  GEN6_PCODE_WRITE_RC6VIDS		0x4
 #define	  GEN6_PCODE_READ_RC6VIDS		0x5
+#define   GEN6_PCODE_READ_D_COMP		0x10
+#define   GEN6_PCODE_WRITE_D_COMP		0x11
 #define   GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
 #define   GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
+#define   DISPLAY_IPS_CONTROL			0x19
 #define GEN6_PCODE_DATA				0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT	16
@@ -4713,6 +4993,7 @@
 
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
+#define HSW_L3CDERRST11			0xB208 /* L3CD Error Status register 1 slice 1 */
 #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
 #define   GEN7_PARITY_ERROR_VALID	(1<<13)
 #define   GEN7_L3CDERRST1_BANK_MASK	(3<<11)
@@ -4726,11 +5007,13 @@
 #define   GEN7_L3CDERRST1_ENABLE	(1<<7)
 
 #define GEN7_L3LOG_BASE			0xB070
+#define HSW_L3LOG_BASE_SLICE1		0xB270
 #define GEN7_L3LOG_SIZE			0x80
 
 #define GEN7_HALF_SLICE_CHICKEN1	0xe100 /* IVB GT1 + VLV */
 #define GEN7_HALF_SLICE_CHICKEN1_GT2	0xf100
 #define   GEN7_MAX_PS_THREAD_DEP		(8<<12)
+#define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE	(1<<10)
 #define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE	(1<<3)
 
 #define GEN7_ROW_CHICKEN2		0xe4f4
@@ -4740,6 +5023,10 @@
 #define HSW_ROW_CHICKEN3		0xe49c
 #define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
 
+#define HALF_SLICE_CHICKEN3		0xe184
+#define   GEN8_CENTROID_PIXEL_OPT_DIS	(1<<8)
+#define   GEN8_SAMPLER_POWER_BYPASS_DIS	(1<<1)
+
 #define G4X_AUD_VID_DID			(dev_priv->info->display_mmio_offset + 0x62020)
 #define INTEL_AUDIO_DEVCL		0x808629FB
 #define INTEL_AUDIO_DEVBLC		0x80862801
@@ -4781,6 +5068,18 @@
 					CPT_AUD_CNTL_ST_B)
 #define CPT_AUD_CNTRL_ST2		0xE50C0
 
+#define VLV_HDMIW_HDMIEDID_A		(VLV_DISPLAY_BASE + 0x62050)
+#define VLV_HDMIW_HDMIEDID_B		(VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					VLV_HDMIW_HDMIEDID_A, \
+					VLV_HDMIW_HDMIEDID_B)
+#define VLV_AUD_CNTL_ST_A		(VLV_DISPLAY_BASE + 0x620B4)
+#define VLV_AUD_CNTL_ST_B		(VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					VLV_AUD_CNTL_ST_A, \
+					VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2		(VLV_DISPLAY_BASE + 0x620C0)
+
 /* These are the 4 32-bit write offset registers for each stream
  * output buffer.  It determines the offset from the
  * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
@@ -4797,6 +5096,12 @@
 #define CPT_AUD_CFG(pipe) _PIPE(pipe, \
 					CPT_AUD_CONFIG_A, \
 					CPT_AUD_CONFIG_B)
+#define VLV_AUD_CONFIG_A		(VLV_DISPLAY_BASE + 0x62000)
+#define VLV_AUD_CONFIG_B		(VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
+					VLV_AUD_CONFIG_A, \
+					VLV_AUD_CONFIG_B)
+
 #define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
 #define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
 #define   AUD_CONFIG_UPPER_N_SHIFT		20
@@ -4804,7 +5109,17 @@
 #define   AUD_CONFIG_LOWER_N_SHIFT		4
 #define   AUD_CONFIG_LOWER_N_VALUE		(0xfff << 4)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT	16
-#define   AUD_CONFIG_PIXEL_CLOCK_HDMI		(0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK	(0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25175	(0 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25200	(1 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27000	(2 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27027	(3 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54000	(4 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54054	(5 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74176	(6 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74250	(7 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148352	(8 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148500	(9 << 16)
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 
 /* HSW Audio */
@@ -4929,6 +5244,7 @@
 #define DDI_BUF_CTL_B				0x64100
 #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
 #define  DDI_BUF_CTL_ENABLE			(1<<31)
+/* Haswell */
 #define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
 #define  DDI_BUF_EMP_400MV_3_5DB_HSW		(1<<24)   /* Sel1 */
 #define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
@@ -4938,6 +5254,16 @@
 #define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
 #define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
 #define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
+/* Broadwell */
+#define  DDI_BUF_EMP_400MV_0DB_BDW		(0<<24)   /* Sel0 */
+#define  DDI_BUF_EMP_400MV_3_5DB_BDW		(1<<24)   /* Sel1 */
+#define  DDI_BUF_EMP_400MV_6DB_BDW		(2<<24)   /* Sel2 */
+#define  DDI_BUF_EMP_600MV_0DB_BDW		(3<<24)   /* Sel3 */
+#define  DDI_BUF_EMP_600MV_3_5DB_BDW		(4<<24)   /* Sel4 */
+#define  DDI_BUF_EMP_600MV_6DB_BDW		(5<<24)   /* Sel5 */
+#define  DDI_BUF_EMP_800MV_0DB_BDW		(6<<24)   /* Sel6 */
+#define  DDI_BUF_EMP_800MV_3_5DB_BDW		(7<<24)   /* Sel7 */
+#define  DDI_BUF_EMP_1200MV_0DB_BDW		(8<<24)   /* Sel8 */
 #define  DDI_BUF_EMP_MASK			(0xf<<24)
 #define  DDI_BUF_PORT_REVERSAL			(1<<16)
 #define  DDI_BUF_IS_IDLE			(1<<7)
@@ -5047,6 +5373,9 @@
 #define  LCPLL_PLL_LOCK			(1<<30)
 #define  LCPLL_CLK_FREQ_MASK		(3<<26)
 #define  LCPLL_CLK_FREQ_450		(0<<26)
+#define  LCPLL_CLK_FREQ_54O_BDW		(1<<26)
+#define  LCPLL_CLK_FREQ_337_5_BDW	(2<<26)
+#define  LCPLL_CLK_FREQ_675_BDW		(3<<26)
 #define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 #define  LCPLL_POWER_DOWN_ALLOW		(1<<22)
@@ -5128,4 +5457,414 @@
 #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
 #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
+/* VLV MIPI registers */
+
+#define _MIPIA_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIB_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(pipe)		_PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define  DPI_ENABLE					(1 << 31) /* A + B */
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT		27
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 27)
+#define  DUAL_LINK_MODE_MASK				(1 << 26)
+#define  DUAL_LINK_MODE_FRONT_BACK			(0 << 26)
+#define  DUAL_LINK_MODE_PIXEL_ALTERNATIVE		(1 << 26)
+#define  DITHERING_ENABLE				(1 << 25) /* A + B */
+#define  FLOPPED_HSTX					(1 << 23)
+#define  DE_INVERT					(1 << 19) /* XXX */
+#define  MIPIA_FLISDSI_DELAY_COUNT_SHIFT		18
+#define  MIPIA_FLISDSI_DELAY_COUNT_MASK			(0xf << 18)
+#define  AFE_LATCHOUT					(1 << 17)
+#define  LP_OUTPUT_HOLD					(1 << 16)
+#define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT		15
+#define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK		(1 << 15)
+#define  MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT		11
+#define  MIPIB_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 11)
+#define  CSB_SHIFT					9
+#define  CSB_MASK					(3 << 9)
+#define  CSB_20MHZ					(0 << 9)
+#define  CSB_10MHZ					(1 << 9)
+#define  CSB_40MHZ					(2 << 9)
+#define  BANDGAP_MASK					(1 << 8)
+#define  BANDGAP_PNW_CIRCUIT				(0 << 8)
+#define  BANDGAP_LNC_CIRCUIT				(1 << 8)
+#define  MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT		5
+#define  MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK		(7 << 5)
+#define  TEARING_EFFECT_DELAY				(1 << 4) /* A + B */
+#define  TEARING_EFFECT_SHIFT				2 /* A + B */
+#define  TEARING_EFFECT_MASK				(3 << 2)
+#define  TEARING_EFFECT_OFF				(0 << 2)
+#define  TEARING_EFFECT_DSI				(1 << 2)
+#define  TEARING_EFFECT_GPIO				(2 << 2)
+#define  LANE_CONFIGURATION_SHIFT			0
+#define  LANE_CONFIGURATION_MASK			(3 << 0)
+#define  LANE_CONFIGURATION_4LANE			(0 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_A			(1 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_B			(2 << 0)
+
+#define _MIPIA_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIB_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(pipe)		_PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define  TEARING_EFFECT_DELAY_SHIFT			0
+#define  TEARING_EFFECT_DELAY_MASK			(0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG				(VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY			(VLV_DISPLAY_BASE + 0xb000)
+#define _MIPIB_DEVICE_READY			(VLV_DISPLAY_BASE + 0xb800)
+#define MIPI_DEVICE_READY(pipe)		_PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define  BUS_POSSESSION					(1 << 3) /* set to give bus to receiver */
+#define  ULPS_STATE_MASK				(3 << 1)
+#define  ULPS_STATE_ENTER				(2 << 1)
+#define  ULPS_STATE_EXIT				(1 << 1)
+#define  ULPS_STATE_NORMAL_OPERATION			(0 << 1)
+#define  DEVICE_READY					(1 << 0)
+
+#define _MIPIA_INTR_STAT			(VLV_DISPLAY_BASE + 0xb004)
+#define _MIPIB_INTR_STAT			(VLV_DISPLAY_BASE + 0xb804)
+#define MIPI_INTR_STAT(pipe)		_PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN				(VLV_DISPLAY_BASE + 0xb008)
+#define _MIPIB_INTR_EN				(VLV_DISPLAY_BASE + 0xb808)
+#define MIPI_INTR_EN(pipe)		_PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define  TEARING_EFFECT					(1 << 31)
+#define  SPL_PKT_SENT_INTERRUPT				(1 << 30)
+#define  GEN_READ_DATA_AVAIL				(1 << 29)
+#define  LP_GENERIC_WR_FIFO_FULL			(1 << 28)
+#define  HS_GENERIC_WR_FIFO_FULL			(1 << 27)
+#define  RX_PROT_VIOLATION				(1 << 26)
+#define  RX_INVALID_TX_LENGTH				(1 << 25)
+#define  ACK_WITH_NO_ERROR				(1 << 24)
+#define  TURN_AROUND_ACK_TIMEOUT			(1 << 23)
+#define  LP_RX_TIMEOUT					(1 << 22)
+#define  HS_TX_TIMEOUT					(1 << 21)
+#define  DPI_FIFO_UNDERRUN				(1 << 20)
+#define  LOW_CONTENTION					(1 << 19)
+#define  HIGH_CONTENTION				(1 << 18)
+#define  TXDSI_VC_ID_INVALID				(1 << 17)
+#define  TXDSI_DATA_TYPE_NOT_RECOGNISED			(1 << 16)
+#define  TXCHECKSUM_ERROR				(1 << 15)
+#define  TXECC_MULTIBIT_ERROR				(1 << 14)
+#define  TXECC_SINGLE_BIT_ERROR				(1 << 13)
+#define  TXFALSE_CONTROL_ERROR				(1 << 12)
+#define  RXDSI_VC_ID_INVALID				(1 << 11)
+#define  RXDSI_DATA_TYPE_NOT_REGOGNISED			(1 << 10)
+#define  RXCHECKSUM_ERROR				(1 << 9)
+#define  RXECC_MULTIBIT_ERROR				(1 << 8)
+#define  RXECC_SINGLE_BIT_ERROR				(1 << 7)
+#define  RXFALSE_CONTROL_ERROR				(1 << 6)
+#define  RXHS_RECEIVE_TIMEOUT_ERROR			(1 << 5)
+#define  RX_LP_TX_SYNC_ERROR				(1 << 4)
+#define  RXEXCAPE_MODE_ENTRY_ERROR			(1 << 3)
+#define  RXEOT_SYNC_ERROR				(1 << 2)
+#define  RXSOT_SYNC_ERROR				(1 << 1)
+#define  RXSOT_ERROR					(1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG			(VLV_DISPLAY_BASE + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG			(VLV_DISPLAY_BASE + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(pipe)		_PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define  CMD_MODE_DATA_WIDTH_MASK			(7 << 13)
+#define  CMD_MODE_NOT_SUPPORTED				(0 << 13)
+#define  CMD_MODE_DATA_WIDTH_16_BIT			(1 << 13)
+#define  CMD_MODE_DATA_WIDTH_9_BIT			(2 << 13)
+#define  CMD_MODE_DATA_WIDTH_8_BIT			(3 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION1			(4 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION2			(5 << 13)
+#define  VID_MODE_FORMAT_MASK				(0xf << 7)
+#define  VID_MODE_NOT_SUPPORTED				(0 << 7)
+#define  VID_MODE_FORMAT_RGB565				(1 << 7)
+#define  VID_MODE_FORMAT_RGB666				(2 << 7)
+#define  VID_MODE_FORMAT_RGB666_LOOSE			(3 << 7)
+#define  VID_MODE_FORMAT_RGB888				(4 << 7)
+#define  CMD_MODE_CHANNEL_NUMBER_SHIFT			5
+#define  CMD_MODE_CHANNEL_NUMBER_MASK			(3 << 5)
+#define  VID_MODE_CHANNEL_NUMBER_SHIFT			3
+#define  VID_MODE_CHANNEL_NUMBER_MASK			(3 << 3)
+#define  DATA_LANES_PRG_REG_SHIFT			0
+#define  DATA_LANES_PRG_REG_MASK			(7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK		0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK		0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT		(VLV_DISPLAY_BASE + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT		(VLV_DISPLAY_BASE + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define  TURN_AROUND_TIMEOUT_MASK			0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER		(VLV_DISPLAY_BASE + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER		(VLV_DISPLAY_BASE + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(pipe)	_PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define  DEVICE_RESET_TIMER_MASK			0xffff
+
+#define _MIPIA_DPI_RESOLUTION			(VLV_DISPLAY_BASE + 0xb020)
+#define _MIPIB_DPI_RESOLUTION			(VLV_DISPLAY_BASE + 0xb820)
+#define MIPI_DPI_RESOLUTION(pipe)	_PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define  VERTICAL_ADDRESS_SHIFT				16
+#define  VERTICAL_ADDRESS_MASK				(0xffff << 16)
+#define  HORIZONTAL_ADDRESS_SHIFT			0
+#define  HORIZONTAL_ADDRESS_MASK			0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE		(VLV_DISPLAY_BASE + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE		(VLV_DISPLAY_BASE + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(pipe)	_PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define  DBI_FIFO_EMPTY_HALF				(0 << 0)
+#define  DBI_FIFO_EMPTY_QUARTER				(1 << 0)
+#define  DBI_FIFO_EMPTY_7_LOCATIONS			(2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(pipe)	_PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT			(VLV_DISPLAY_BASE + 0xb02c)
+#define _MIPIB_HBP_COUNT			(VLV_DISPLAY_BASE + 0xb82c)
+#define MIPI_HBP_COUNT(pipe)		_PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT			(VLV_DISPLAY_BASE + 0xb030)
+#define _MIPIB_HFP_COUNT			(VLV_DISPLAY_BASE + 0xb830)
+#define MIPI_HFP_COUNT(pipe)		_PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT		(VLV_DISPLAY_BASE + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT		(VLV_DISPLAY_BASE + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(pipe)	_PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(pipe)	_PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT			(VLV_DISPLAY_BASE + 0xb03c)
+#define _MIPIB_VBP_COUNT			(VLV_DISPLAY_BASE + 0xb83c)
+#define MIPI_VBP_COUNT(pipe)		_PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT			(VLV_DISPLAY_BASE + 0xb040)
+#define _MIPIB_VFP_COUNT			(VLV_DISPLAY_BASE + 0xb840)
+#define MIPI_VFP_COUNT(pipe)		_PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT		(VLV_DISPLAY_BASE + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT		(VLV_DISPLAY_BASE + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe)	_PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+/* regs above are bits 15:0 */
+
+#define _MIPIA_DPI_CONTROL			(VLV_DISPLAY_BASE + 0xb048)
+#define _MIPIB_DPI_CONTROL			(VLV_DISPLAY_BASE + 0xb848)
+#define MIPI_DPI_CONTROL(pipe)		_PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define  DPI_LP_MODE					(1 << 6)
+#define  BACKLIGHT_OFF					(1 << 5)
+#define  BACKLIGHT_ON					(1 << 4)
+#define  COLOR_MODE_OFF					(1 << 3)
+#define  COLOR_MODE_ON					(1 << 2)
+#define  TURN_ON					(1 << 1)
+#define  SHUTDOWN					(1 << 0)
+
+#define _MIPIA_DPI_DATA				(VLV_DISPLAY_BASE + 0xb04c)
+#define _MIPIB_DPI_DATA				(VLV_DISPLAY_BASE + 0xb84c)
+#define MIPI_DPI_DATA(pipe)		_PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define  COMMAND_BYTE_SHIFT				0
+#define  COMMAND_BYTE_MASK				(0x3f << 0)
+
+#define _MIPIA_INIT_COUNT			(VLV_DISPLAY_BASE + 0xb050)
+#define _MIPIB_INIT_COUNT			(VLV_DISPLAY_BASE + 0xb850)
+#define MIPI_INIT_COUNT(pipe)		_PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define  MASTER_INIT_TIMER_SHIFT			0
+#define  MASTER_INIT_TIMER_MASK				(0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE		(VLV_DISPLAY_BASE + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE		(VLV_DISPLAY_BASE + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(pipe)	_PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define  MAX_RETURN_PKT_SIZE_SHIFT			0
+#define  MAX_RETURN_PKT_SIZE_MASK			(0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT		(VLV_DISPLAY_BASE + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT		(VLV_DISPLAY_BASE + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(pipe)	_PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define  RANDOM_DPI_DISPLAY_RESOLUTION			(1 << 4)
+#define  DISABLE_VIDEO_BTA				(1 << 3)
+#define  IP_TG_CONFIG					(1 << 2)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE		(1 << 0)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS		(2 << 0)
+#define  VIDEO_MODE_BURST				(3 << 0)
+
+#define _MIPIA_EOT_DISABLE			(VLV_DISPLAY_BASE + 0xb05c)
+#define _MIPIB_EOT_DISABLE			(VLV_DISPLAY_BASE + 0xb85c)
+#define MIPI_EOT_DISABLE(pipe)		_PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 7)
+#define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 6)
+#define  LOW_CONTENTION_RECOVERY_DISABLE		(1 << 5)
+#define  HIGH_CONTENTION_RECOVERY_DISABLE		(1 << 4)
+#define  TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define  TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE		(1 << 2)
+#define  CLOCKSTOP					(1 << 1)
+#define  EOT_DISABLE					(1 << 0)
+
+#define _MIPIA_LP_BYTECLK			(VLV_DISPLAY_BASE + 0xb060)
+#define _MIPIB_LP_BYTECLK			(VLV_DISPLAY_BASE + 0xb860)
+#define MIPI_LP_BYTECLK(pipe)		_PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define  LP_BYTECLK_SHIFT				0
+#define  LP_BYTECLK_MASK				(0xffff << 0)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA			(VLV_DISPLAY_BASE + 0xb064)
+#define _MIPIB_LP_GEN_DATA			(VLV_DISPLAY_BASE + 0xb864)
+#define MIPI_LP_GEN_DATA(pipe)		_PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA			(VLV_DISPLAY_BASE + 0xb068)
+#define _MIPIB_HS_GEN_DATA			(VLV_DISPLAY_BASE + 0xb868)
+#define MIPI_HS_GEN_DATA(pipe)		_PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb86c)
+#define MIPI_LP_GEN_CTRL(pipe)		_PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb070)
+#define _MIPIB_HS_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb870)
+#define MIPI_HS_GEN_CTRL(pipe)		_PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define  LONG_PACKET_WORD_COUNT_SHIFT			8
+#define  LONG_PACKET_WORD_COUNT_MASK			(0xffff << 8)
+#define  SHORT_PACKET_PARAM_SHIFT			8
+#define  SHORT_PACKET_PARAM_MASK			(0xffff << 8)
+#define  VIRTUAL_CHANNEL_SHIFT				6
+#define  VIRTUAL_CHANNEL_MASK				(3 << 6)
+#define  DATA_TYPE_SHIFT				0
+#define  DATA_TYPE_MASK					(3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT			(VLV_DISPLAY_BASE + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT			(VLV_DISPLAY_BASE + 0xb874)
+#define MIPI_GEN_FIFO_STAT(pipe)	_PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define  DPI_FIFO_EMPTY					(1 << 28)
+#define  DBI_FIFO_EMPTY					(1 << 27)
+#define  LP_CTRL_FIFO_EMPTY				(1 << 26)
+#define  LP_CTRL_FIFO_HALF_EMPTY			(1 << 25)
+#define  LP_CTRL_FIFO_FULL				(1 << 24)
+#define  HS_CTRL_FIFO_EMPTY				(1 << 18)
+#define  HS_CTRL_FIFO_HALF_EMPTY			(1 << 17)
+#define  HS_CTRL_FIFO_FULL				(1 << 16)
+#define  LP_DATA_FIFO_EMPTY				(1 << 10)
+#define  LP_DATA_FIFO_HALF_EMPTY			(1 << 9)
+#define  LP_DATA_FIFO_FULL				(1 << 8)
+#define  HS_DATA_FIFO_EMPTY				(1 << 2)
+#define  HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
+#define  HS_DATA_FIFO_FULL				(1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE			(VLV_DISPLAY_BASE + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE			(VLV_DISPLAY_BASE + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(pipe)	_PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define  DBI_HS_LP_MODE_MASK				(1 << 0)
+#define  DBI_LP_MODE					(1 << 0)
+#define  DBI_HS_MODE					(0 << 0)
+
+#define _MIPIA_DPHY_PARAM			(VLV_DISPLAY_BASE + 0xb080)
+#define _MIPIB_DPHY_PARAM			(VLV_DISPLAY_BASE + 0xb880)
+#define MIPI_DPHY_PARAM(pipe)		_PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define  EXIT_ZERO_COUNT_SHIFT				24
+#define  EXIT_ZERO_COUNT_MASK				(0x3f << 24)
+#define  TRAIL_COUNT_SHIFT				16
+#define  TRAIL_COUNT_MASK				(0x1f << 16)
+#define  CLK_ZERO_COUNT_SHIFT				8
+#define  CLK_ZERO_COUNT_MASK				(0xff << 8)
+#define  PREPARE_COUNT_SHIFT				0
+#define  PREPARE_COUNT_MASK				(0x3f << 0)
+
+/* bits 31:0 */
+#define _MIPIA_DBI_BW_CTRL			(VLV_DISPLAY_BASE + 0xb084)
+#define _MIPIB_DBI_BW_CTRL			(VLV_DISPLAY_BASE + 0xb884)
+#define MIPI_DBI_BW_CTRL(pipe)		_PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT		(VLV_DISPLAY_BASE + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT		(VLV_DISPLAY_BASE + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe)	_PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define  LP_HS_SSW_CNT_SHIFT				16
+#define  LP_HS_SSW_CNT_MASK				(0xffff << 16)
+#define  HS_LP_PWR_SW_CNT_SHIFT				0
+#define  HS_LP_PWR_SW_CNT_MASK				(0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL			(VLV_DISPLAY_BASE + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL			(VLV_DISPLAY_BASE + 0xb88c)
+#define MIPI_STOP_STATE_STALL(pipe)	_PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define  STOP_STATE_STALL_COUNTER_SHIFT			0
+#define  STOP_STATE_STALL_COUNTER_MASK			(0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1			(VLV_DISPLAY_BASE + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1			(VLV_DISPLAY_BASE + 0xb890)
+#define MIPI_INTR_STAT_REG_1(pipe)	_PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1			(VLV_DISPLAY_BASE + 0xb094)
+#define _MIPIB_INTR_EN_REG_1			(VLV_DISPLAY_BASE + 0xb894)
+#define MIPI_INTR_EN_REG_1(pipe)	_PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define  RX_CONTENTION_DETECTED				(1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL			(VLV_DISPLAY_BASE + 0xb100)
+#define  DBI_TYPEC_ENABLE				(1 << 31)
+#define  DBI_TYPEC_WIP					(1 << 30)
+#define  DBI_TYPEC_OPTION_SHIFT				28
+#define  DBI_TYPEC_OPTION_MASK				(3 << 28)
+#define  DBI_TYPEC_FREQ_SHIFT				24
+#define  DBI_TYPEC_FREQ_MASK				(0xf << 24)
+#define  DBI_TYPEC_OVERRIDE				(1 << 8)
+#define  DBI_TYPEC_OVERRIDE_COUNTER_SHIFT		0
+#define  DBI_TYPEC_OVERRIDE_COUNTER_MASK		(0xff << 0)
+
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL				(VLV_DISPLAY_BASE + 0xb104)
+#define _MIPIB_CTRL				(VLV_DISPLAY_BASE + 0xb904)
+#define MIPI_CTRL(pipe)			_PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define  ESCAPE_CLOCK_DIVIDER_SHIFT			5 /* A only */
+#define  ESCAPE_CLOCK_DIVIDER_MASK			(3 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_1				(0 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_2				(1 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_4				(2 << 5)
+#define  READ_REQUEST_PRIORITY_SHIFT			3
+#define  READ_REQUEST_PRIORITY_MASK			(3 << 3)
+#define  READ_REQUEST_PRIORITY_LOW			(0 << 3)
+#define  READ_REQUEST_PRIORITY_HIGH			(3 << 3)
+#define  RGB_FLIP_TO_BGR				(1 << 2)
+
+#define _MIPIA_DATA_ADDRESS			(VLV_DISPLAY_BASE + 0xb108)
+#define _MIPIB_DATA_ADDRESS			(VLV_DISPLAY_BASE + 0xb908)
+#define MIPI_DATA_ADDRESS(pipe)		_PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define  DATA_MEM_ADDRESS_SHIFT				5
+#define  DATA_MEM_ADDRESS_MASK				(0x7ffffff << 5)
+#define  DATA_VALID					(1 << 0)
+
+#define _MIPIA_DATA_LENGTH			(VLV_DISPLAY_BASE + 0xb10c)
+#define _MIPIB_DATA_LENGTH			(VLV_DISPLAY_BASE + 0xb90c)
+#define MIPI_DATA_LENGTH(pipe)		_PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define  DATA_LENGTH_SHIFT				0
+#define  DATA_LENGTH_MASK				(0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS			(VLV_DISPLAY_BASE + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS			(VLV_DISPLAY_BASE + 0xb910)
+#define MIPI_COMMAND_ADDRESS(pipe)	_PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define  COMMAND_MEM_ADDRESS_SHIFT			5
+#define  COMMAND_MEM_ADDRESS_MASK			(0x7ffffff << 5)
+#define  AUTO_PWG_ENABLE				(1 << 2)
+#define  MEMORY_WRITE_DATA_FROM_PIPE_RENDERING		(1 << 1)
+#define  COMMAND_VALID					(1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH			(VLV_DISPLAY_BASE + 0xb114)
+#define _MIPIB_COMMAND_LENGTH			(VLV_DISPLAY_BASE + 0xb914)
+#define MIPI_COMMAND_LENGTH(pipe)	_PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define  COMMAND_LENGTH_SHIFT(n)			(8 * (n)) /* n: 0...3 */
+#define  COMMAND_LENGTH_MASK(n)				(0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0		(VLV_DISPLAY_BASE + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0		(VLV_DISPLAY_BASE + 0xb918)
+#define MIPI_READ_DATA_RETURN(pipe, n) \
+	(_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID			(VLV_DISPLAY_BASE + 0xb138)
+#define _MIPIB_READ_DATA_VALID			(VLV_DISPLAY_BASE + 0xb938)
+#define MIPI_READ_DATA_VALID(pipe)	_PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define  READ_DATA_VALID(n)				(1 << (n))
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 70db618..98790c7 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -214,6 +214,22 @@
 		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
 		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 			dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+
+		dev_priv->regfile.saveBLC_PWM_CTL =
+			I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+		dev_priv->regfile.saveBLC_HIST_CTL =
+			I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
+		dev_priv->regfile.saveBLC_PWM_CTL2 =
+			I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+		dev_priv->regfile.saveBLC_PWM_CTL_B =
+			I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
+		dev_priv->regfile.saveBLC_HIST_CTL_B =
+			I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
+		dev_priv->regfile.saveBLC_PWM_CTL2_B =
+			I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
 	} else {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -302,6 +318,19 @@
 		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 		I915_WRITE(RSTDBYCTL,
 			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+	} else if (IS_VALLEYVIEW(dev)) {
+		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
+			   dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
+			   dev_priv->regfile.saveBLC_HIST_CTL);
+		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
+			   dev_priv->regfile.saveBLC_PWM_CTL2);
+		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
+			   dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
+			   dev_priv->regfile.saveBLC_HIST_CTL);
+		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
+			   dev_priv->regfile.saveBLC_PWM_CTL2);
 	} else {
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
 		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
@@ -340,7 +369,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+	if (INTEL_INFO(dev)->gen <= 4)
+		pci_read_config_byte(dev->pdev, LBB,
+				     &dev_priv->regfile.saveLBB);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -367,7 +398,8 @@
 	intel_disable_gt_powersave(dev);
 
 	/* Cache mode state */
-	dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+	if (INTEL_INFO(dev)->gen < 7)
+		dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
 	/* Memory Arbitration state */
 	dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@@ -390,7 +422,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+	if (INTEL_INFO(dev)->gen <= 4)
+		pci_write_config_byte(dev->pdev, LBB,
+				      dev_priv->regfile.saveLBB);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -414,7 +448,9 @@
 	}
 
 	/* Cache mode state */
-	I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+	if (INTEL_INFO(dev)->gen < 7)
+		I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+			   0xffff0000);
 
 	/* Memory arbitration state */
 	I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c8c4112..cef38fd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,30 +32,50 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
 #ifdef CONFIG_PM
 static u32 calc_residency(struct drm_device *dev, const u32 reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u64 raw_time; /* 32b value may overflow during fixed point math */
+	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
 
 	if (!intel_enable_rc6(dev))
 		return 0;
 
-	raw_time = I915_READ(reg) * 128ULL;
-	return DIV_ROUND_UP_ULL(raw_time, 100000);
+	/* On VLV, residency time is in CZ units rather than 1.28us */
+	if (IS_VALLEYVIEW(dev)) {
+		u32 clkctl2;
+
+		clkctl2 = I915_READ(VLV_CLK_CTL2) >>
+			CLK_CTL2_CZCOUNT_30NS_SHIFT;
+		if (!clkctl2) {
+			WARN(!clkctl2, "bogus CZ count value");
+			return 0;
+		}
+		units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+			units <<= 8;
+
+		div = 1000000ULL * bias;
+	}
+
+	raw_time = I915_READ(reg) * units;
+	return DIV_ROUND_UP_ULL(raw_time, div);
 }
 
 static ssize_t
 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
 }
 
 static ssize_t
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_get_drvdata(kdev);
 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
@@ -63,16 +83,20 @@
 static ssize_t
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+	if (IS_VALLEYVIEW(dminor->dev))
+		rc6p_residency = 0;
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 
 static ssize_t
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+	if (IS_VALLEYVIEW(dminor->dev))
+		rc6pp_residency = 0;
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 
@@ -97,7 +121,7 @@
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 {
-	if (!HAS_L3_GPU_CACHE(dev))
+	if (!HAS_L3_DPF(dev))
 		return -EPERM;
 
 	if (offset % 4 != 0)
@@ -115,31 +139,34 @@
 	     loff_t offset, size_t count)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(dev);
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
-	uint32_t misccpctl;
-	int i, ret;
+	int slice = (int)(uintptr_t)attr->private;
+	int ret;
+
+	count = round_down(count, 4);
 
 	ret = l3_access_valid(drm_dev, offset);
 	if (ret)
 		return ret;
 
+	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	if (ret)
 		return ret;
 
-	misccpctl = I915_READ(GEN7_MISCCPCTL);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
-	for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
-		*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
-
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+	if (dev_priv->l3_parity.remap_info[slice])
+		memcpy(buf,
+		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
+		       count);
+	else
+		memset(buf, 0, count);
 
 	mutex_unlock(&drm_dev->struct_mutex);
 
-	return i - offset;
+	return count;
 }
 
 static ssize_t
@@ -148,21 +175,26 @@
 	      loff_t offset, size_t count)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(dev);
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	struct i915_hw_context *ctx;
 	u32 *temp = NULL; /* Just here to make handling failures easy */
+	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
 	ret = l3_access_valid(drm_dev, offset);
 	if (ret)
 		return ret;
 
+	if (dev_priv->hw_contexts_disabled)
+		return -ENXIO;
+
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	if (ret)
 		return ret;
 
-	if (!dev_priv->l3_parity.remap_info) {
+	if (!dev_priv->l3_parity.remap_info[slice]) {
 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 		if (!temp) {
 			mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +214,13 @@
 	 * at this point it is left as a TODO.
 	*/
 	if (temp)
-		dev_priv->l3_parity.remap_info = temp;
+		dev_priv->l3_parity.remap_info[slice] = temp;
 
-	memcpy(dev_priv->l3_parity.remap_info + (offset/4),
-	       buf + (offset/4),
-	       count);
+	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
 
-	i915_gem_l3_remap(drm_dev);
+	/* NB: We defer the remapping until we switch to the context */
+	list_for_each_entry(ctx, &dev_priv->context_list, link)
+		ctx->remap_slice |= (1<<slice);
 
 	mutex_unlock(&drm_dev->struct_mutex);
 
@@ -200,17 +232,29 @@
 	.size = GEN7_L3LOG_SIZE,
 	.read = i915_l3_read,
 	.write = i915_l3_write,
-	.mmap = NULL
+	.mmap = NULL,
+	.private = (void *)0
+};
+
+static struct bin_attribute dpf_attrs_1 = {
+	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
+	.size = GEN7_L3LOG_SIZE,
+	.read = i915_l3_read,
+	.write = i915_l3_write,
+	.mmap = NULL,
+	.private = (void *)1
 };
 
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 		u32 freq;
@@ -227,7 +271,7 @@
 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 				     struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -238,11 +282,13 @@
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -257,7 +303,7 @@
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -267,6 +313,8 @@
 	if (ret)
 		return ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -310,11 +358,13 @@
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -329,7 +379,7 @@
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap, hw_max, hw_min;
@@ -339,6 +389,8 @@
 	if (ret)
 		return ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	if (IS_VALLEYVIEW(dev)) {
@@ -388,7 +440,7 @@
 /* For now we have a static number of RP states */
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap;
@@ -436,7 +488,7 @@
 {
 
 	struct device *kdev = container_of(kobj, struct device, kobj);
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct i915_error_state_file_priv error_priv;
 	struct drm_i915_error_state_buf error_str;
@@ -471,7 +523,7 @@
 				 loff_t off, size_t count)
 {
 	struct device *kdev = container_of(kobj, struct device, kobj);
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	int ret;
 
@@ -501,27 +553,34 @@
 
 #ifdef CONFIG_PM
 	if (INTEL_INFO(dev)->gen >= 6) {
-		ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
 					&rc6_attr_group);
 		if (ret)
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 	}
 #endif
-	if (HAS_L3_GPU_CACHE(dev)) {
-		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+	if (HAS_L3_DPF(dev)) {
+		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
 		if (ret)
 			DRM_ERROR("l3 parity sysfs setup failed\n");
+
+		if (NUM_L3_SLICES(dev) > 1) {
+			ret = device_create_bin_file(dev->primary->kdev,
+						     &dpf_attrs_1);
+			if (ret)
+				DRM_ERROR("l3 parity slice 1 setup failed\n");
+		}
 	}
 
 	ret = 0;
 	if (IS_VALLEYVIEW(dev))
-		ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
 	else if (INTEL_INFO(dev)->gen >= 6)
-		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
 	if (ret)
 		DRM_ERROR("RPS sysfs setup failed\n");
 
-	ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
 				    &error_state_attr);
 	if (ret)
 		DRM_ERROR("error_state sysfs setup failed\n");
@@ -529,13 +588,14 @@
 
 void i915_teardown_sysfs(struct drm_device *dev)
 {
-	sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
+	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
 	if (IS_VALLEYVIEW(dev))
-		sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
 	else
-		sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
-	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
+		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
+	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
+	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
-	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
 #endif
 }
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e2c5ee6..6e580c9 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -233,6 +233,47 @@
 	    TP_printk("dev=%d", __entry->dev)
 );
 
+TRACE_EVENT(i915_gem_evict_vm,
+	    TP_PROTO(struct i915_address_space *vm),
+	    TP_ARGS(vm),
+
+	    TP_STRUCT__entry(
+			     __field(struct i915_address_space *, vm)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->vm = vm;
+			  ),
+
+	    TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
+);
+
+TRACE_EVENT(i915_gem_ring_sync_to,
+	    TP_PROTO(struct intel_ring_buffer *from,
+		     struct intel_ring_buffer *to,
+		     u32 seqno),
+	    TP_ARGS(from, to, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, sync_from)
+			     __field(u32, sync_to)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = from->dev->primary->index;
+			   __entry->sync_from = from->id;
+			   __entry->sync_to = to->id;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+		      __entry->dev,
+		      __entry->sync_from, __entry->sync_to,
+		      __entry->seqno)
+);
+
 TRACE_EVENT(i915_gem_ring_dispatch,
 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
 	    TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@
 	    TP_ARGS(ring, seqno)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
-	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
-	    TP_ARGS(ring, seqno)
+TRACE_EVENT(i915_gem_request_complete,
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = ring->get_seqno(ring, false);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 57fe1ae..43959ed 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -193,16 +193,14 @@
 
 static bool intel_dsm_pci_probe(struct pci_dev *pdev)
 {
-	acpi_handle dhandle, intel_handle;
-	acpi_status status;
+	acpi_handle dhandle;
 	int ret;
 
 	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
 	if (!dhandle)
 		return false;
 
-	status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
-	if (ACPI_FAILURE(status)) {
+	if (!acpi_has_method(dhandle, "_DSM")) {
 		DRM_DEBUG_KMS("no _DSM method for intel device\n");
 		return false;
 	}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 53f2bed..6dd622d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -389,7 +389,7 @@
 {
 	struct sdvo_device_mapping *p_mapping;
 	struct bdb_general_definitions *p_defs;
-	struct child_device_config *p_child;
+	union child_device_config *p_child;
 	int i, child_device_num, count;
 	u16	block_size;
 
@@ -416,36 +416,36 @@
 	count = 0;
 	for (i = 0; i < child_device_num; i++) {
 		p_child = &(p_defs->devices[i]);
-		if (!p_child->device_type) {
+		if (!p_child->old.device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
-		if (p_child->slave_addr != SLAVE_ADDR1 &&
-			p_child->slave_addr != SLAVE_ADDR2) {
+		if (p_child->old.slave_addr != SLAVE_ADDR1 &&
+			p_child->old.slave_addr != SLAVE_ADDR2) {
 			/*
 			 * If the slave address is neither 0x70 nor 0x72,
 			 * it is not a SDVO device. Skip it.
 			 */
 			continue;
 		}
-		if (p_child->dvo_port != DEVICE_PORT_DVOB &&
-			p_child->dvo_port != DEVICE_PORT_DVOC) {
+		if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
+			p_child->old.dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
 			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
 			continue;
 		}
 		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
 				" %s port\n",
-				p_child->slave_addr,
-				(p_child->dvo_port == DEVICE_PORT_DVOB) ?
+				p_child->old.slave_addr,
+				(p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
 					"SDVOB" : "SDVOC");
-		p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+		p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
 		if (!p_mapping->initialized) {
-			p_mapping->dvo_port = p_child->dvo_port;
-			p_mapping->slave_addr = p_child->slave_addr;
-			p_mapping->dvo_wiring = p_child->dvo_wiring;
-			p_mapping->ddc_pin = p_child->ddc_pin;
-			p_mapping->i2c_pin = p_child->i2c_pin;
+			p_mapping->dvo_port = p_child->old.dvo_port;
+			p_mapping->slave_addr = p_child->old.slave_addr;
+			p_mapping->dvo_wiring = p_child->old.dvo_wiring;
+			p_mapping->ddc_pin = p_child->old.ddc_pin;
+			p_mapping->i2c_pin = p_child->old.i2c_pin;
 			p_mapping->initialized = 1;
 			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
 				      p_mapping->dvo_port,
@@ -457,7 +457,7 @@
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
 		}
-		if (p_child->slave2_addr) {
+		if (p_child->old.slave2_addr) {
 			/* Maybe this is a SDVO device with multiple inputs */
 			/* And the mapping info is not added */
 			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@
 parse_driver_features(struct drm_i915_private *dev_priv,
 		       struct bdb_header *bdb)
 {
-	struct drm_device *dev = dev_priv->dev;
 	struct bdb_driver_features *driver;
 
 	driver = find_section(bdb, BDB_DRIVER_FEATURES);
 	if (!driver)
 		return;
 
-	if (SUPPORTS_EDP(dev) &&
-	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
 		dev_priv->vbt.edp_support = 1;
 
 	if (driver->dual_frequency)
@@ -501,7 +499,7 @@
 
 	edp = find_section(bdb, BDB_EDP);
 	if (!edp) {
-		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
+		if (dev_priv->vbt.edp_support)
 			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
 		return;
 	}
@@ -569,11 +567,149 @@
 }
 
 static void
+parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+	struct bdb_mipi *mipi;
+
+	mipi = find_section(bdb, BDB_MIPI);
+	if (!mipi) {
+		DRM_DEBUG_KMS("No MIPI BDB found");
+		return;
+	}
+
+	/* XXX: add more info */
+	dev_priv->vbt.dsi.panel_id = mipi->panel_id;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+			   struct bdb_header *bdb)
+{
+	union child_device_config *it, *child = NULL;
+	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+	uint8_t hdmi_level_shift;
+	int i, j;
+	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+	uint8_t aux_channel;
+	/* Each DDI port can have more than one value on the "DVO Port" field,
+	 * so look for all the possible values for each port and abort if more
+	 * than one is found. */
+	int dvo_ports[][2] = {
+		{DVO_PORT_HDMIA, DVO_PORT_DPA},
+		{DVO_PORT_HDMIB, DVO_PORT_DPB},
+		{DVO_PORT_HDMIC, DVO_PORT_DPC},
+		{DVO_PORT_HDMID, DVO_PORT_DPD},
+		{DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+	};
+
+	/* Find the child device to use, abort if more than one found. */
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		it = dev_priv->vbt.child_dev + i;
+
+		for (j = 0; j < 2; j++) {
+			if (dvo_ports[port][j] == -1)
+				break;
+
+			if (it->common.dvo_port == dvo_ports[port][j]) {
+				if (child) {
+					DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+						      port_name(port));
+					return;
+				}
+				child = it;
+			}
+		}
+	}
+	if (!child)
+		return;
+
+	aux_channel = child->raw[25];
+
+	is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+	is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+	is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+	is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+	is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+
+	info->supports_dvi = is_dvi;
+	info->supports_hdmi = is_hdmi;
+	info->supports_dp = is_dp;
+
+	DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
+		      port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+
+	if (is_edp && is_dvi)
+		DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
+			      port_name(port));
+	if (is_crt && port != PORT_E)
+		DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
+	if (is_crt && (is_dvi || is_dp))
+		DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
+			      port_name(port));
+	if (is_dvi && (port == PORT_A || port == PORT_E))
+		DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
+	if (!is_dvi && !is_dp && !is_crt)
+		DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
+			      port_name(port));
+	if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
+		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+	if (is_dvi) {
+		if (child->common.ddc_pin == 0x05 && port != PORT_B)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
+		if (child->common.ddc_pin == 0x04 && port != PORT_C)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
+		if (child->common.ddc_pin == 0x06 && port != PORT_D)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+	}
+
+	if (is_dp) {
+		if (aux_channel == 0x40 && port != PORT_A)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
+		if (aux_channel == 0x10 && port != PORT_B)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
+		if (aux_channel == 0x20 && port != PORT_C)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
+		if (aux_channel == 0x30 && port != PORT_D)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+	}
+
+	if (bdb->version >= 158) {
+		/* The VBT HDMI level shift values match the table we have. */
+		hdmi_level_shift = child->raw[7] & 0xF;
+		if (hdmi_level_shift < 0xC) {
+			DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+				      port_name(port),
+				      hdmi_level_shift);
+			info->hdmi_level_shift = hdmi_level_shift;
+		}
+	}
+}
+
+static void parse_ddi_ports(struct drm_i915_private *dev_priv,
+			    struct bdb_header *bdb)
+{
+	struct drm_device *dev = dev_priv->dev;
+	enum port port;
+
+	if (!HAS_DDI(dev))
+		return;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return;
+
+	if (bdb->version < 155)
+		return;
+
+	for (port = PORT_A; port < I915_MAX_PORTS; port++)
+		parse_ddi_port(dev_priv, port, bdb);
+}
+
+static void
 parse_device_mapping(struct drm_i915_private *dev_priv,
 		       struct bdb_header *bdb)
 {
 	struct bdb_general_definitions *p_defs;
-	struct child_device_config *p_child, *child_dev_ptr;
+	union child_device_config *p_child, *child_dev_ptr;
 	int i, child_device_num, count;
 	u16	block_size;
 
@@ -601,7 +737,7 @@
 	/* get the number of child device that is present */
 	for (i = 0; i < child_device_num; i++) {
 		p_child = &(p_defs->devices[i]);
-		if (!p_child->device_type) {
+		if (!p_child->common.device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
@@ -621,7 +757,7 @@
 	count = 0;
 	for (i = 0; i < child_device_num; i++) {
 		p_child = &(p_defs->devices[i]);
-		if (!p_child->device_type) {
+		if (!p_child->common.device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
@@ -637,6 +773,7 @@
 init_vbt_defaults(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
+	enum port port;
 
 	dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
@@ -655,6 +792,18 @@
 	dev_priv->vbt.lvds_use_ssc = 1;
 	dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
 	DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+
+	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+		struct ddi_vbt_port_info *info =
+			&dev_priv->vbt.ddi_port_info[port];
+
+		/* Recommended BSpec default: 800mV 0dB. */
+		info->hdmi_level_shift = 6;
+
+		info->supports_dvi = (port != PORT_A && port != PORT_E);
+		info->supports_hdmi = info->supports_dvi;
+		info->supports_dp = (port != PORT_E);
+	}
 }
 
 static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -745,6 +894,8 @@
 	parse_device_mapping(dev_priv, bdb);
 	parse_driver_features(dev_priv, bdb);
 	parse_edp(dev_priv, bdb);
+	parse_mipi(dev_priv, bdb);
+	parse_ddi_ports(dev_priv, bdb);
 
 	if (bios)
 		pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index e088d6f..f580a2b 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,6 +104,7 @@
 #define BDB_LVDS_LFP_DATA	 42
 #define BDB_LVDS_BACKLIGHT	 43
 #define BDB_LVDS_POWER		 44
+#define BDB_MIPI		 50
 #define BDB_SKIP		254 /* VBIOS private block, ignore */
 
 struct bdb_general_features {
@@ -201,7 +202,10 @@
 #define DEVICE_PORT_DVOB	0x01
 #define DEVICE_PORT_DVOC	0x02
 
-struct child_device_config {
+/* We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. */
+struct old_child_dev_config {
 	u16 handle;
 	u16 device_type;
 	u8  device_id[10]; /* ascii string */
@@ -223,6 +227,32 @@
 	u8  dvo_function;
 } __attribute__((packed));
 
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+struct common_child_dev_config {
+	u16 handle;
+	u16 device_type;
+	u8 not_common1[12];
+	u8 dvo_port;
+	u8 not_common2[2];
+	u8 ddc_pin;
+	u16 edid_ptr;
+} __attribute__((packed));
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+	/* This one is safe to be used anywhere, but the code should still check
+	 * the BDB version. */
+	u8 raw[33];
+	/* This one should only be kept for legacy code. */
+	struct old_child_dev_config old;
+	/* This one should also be safe to use anywhere, even without version
+	 * checks. */
+	struct common_child_dev_config common;
+};
+
 struct bdb_general_definitions {
 	/* DDC GPIO */
 	u8 crt_ddc_gmbus_pin;
@@ -248,7 +278,7 @@
 	 * number = (block_size - sizeof(bdb_general_definitions))/
 	 *	     sizeof(child_device_config);
 	 */
-	struct child_device_config devices[0];
+	union child_device_config devices[0];
 } __attribute__((packed));
 
 struct bdb_lvds_options {
@@ -608,6 +638,40 @@
 #define	 DEVICE_TYPE_DP		0x68C6
 #define	 DEVICE_TYPE_eDP	0x78C6
 
+#define  DEVICE_TYPE_CLASS_EXTENSION	(1 << 15)
+#define  DEVICE_TYPE_POWER_MANAGEMENT	(1 << 14)
+#define  DEVICE_TYPE_HOTPLUG_SIGNALING	(1 << 13)
+#define  DEVICE_TYPE_INTERNAL_CONNECTOR	(1 << 12)
+#define  DEVICE_TYPE_NOT_HDMI_OUTPUT	(1 << 11)
+#define  DEVICE_TYPE_MIPI_OUTPUT	(1 << 10)
+#define  DEVICE_TYPE_COMPOSITE_OUTPUT	(1 << 9)
+#define  DEVICE_TYPE_DUAL_CHANNEL	(1 << 8)
+#define  DEVICE_TYPE_HIGH_SPEED_LINK	(1 << 6)
+#define  DEVICE_TYPE_LVDS_SINGALING	(1 << 5)
+#define  DEVICE_TYPE_TMDS_DVI_SIGNALING	(1 << 4)
+#define  DEVICE_TYPE_VIDEO_SIGNALING	(1 << 3)
+#define  DEVICE_TYPE_DISPLAYPORT_OUTPUT	(1 << 2)
+#define  DEVICE_TYPE_DIGITAL_OUTPUT	(1 << 1)
+#define  DEVICE_TYPE_ANALOG_OUTPUT	(1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP
+ * Depending on the system, the other bits may or may not
+ * be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+	(DEVICE_TYPE_INTERNAL_CONNECTOR | \
+	 DEVICE_TYPE_NOT_HDMI_OUTPUT | \
+	 DEVICE_TYPE_MIPI_OUTPUT | \
+	 DEVICE_TYPE_COMPOSITE_OUTPUT | \
+	 DEVICE_TYPE_DUAL_CHANNEL | \
+	 DEVICE_TYPE_LVDS_SINGALING | \
+	 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+	 DEVICE_TYPE_VIDEO_SIGNALING | \
+	 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+	 DEVICE_TYPE_DIGITAL_OUTPUT | \
+	 DEVICE_TYPE_ANALOG_OUTPUT)
+
 /* define the DVO port for HDMI output type */
 #define		DVO_B		1
 #define		DVO_C		2
@@ -618,4 +682,57 @@
 #define		PORT_IDPC	8
 #define		PORT_IDPD	9
 
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA	0
+#define DVO_PORT_HDMIB	1
+#define DVO_PORT_HDMIC	2
+#define DVO_PORT_HDMID	3
+#define DVO_PORT_LVDS	4
+#define DVO_PORT_TV	5
+#define DVO_PORT_CRT	6
+#define DVO_PORT_DPB	7
+#define DVO_PORT_DPC	8
+#define DVO_PORT_DPD	9
+#define DVO_PORT_DPA	10
+
+/* MIPI DSI panel info */
+struct bdb_mipi {
+	u16 panel_id;
+	u16 bridge_revision;
+
+	/* General params */
+	u32 dithering:1;
+	u32 bpp_pixel_format:1;
+	u32 rsvd1:1;
+	u32 dphy_valid:1;
+	u32 resvd2:28;
+
+	u16 port_info;
+	u16 rsvd3:2;
+	u16 num_lanes:2;
+	u16 rsvd4:12;
+
+	/* DSI config */
+	u16 virt_ch_num:2;
+	u16 vtm:2;
+	u16 rsvd5:12;
+
+	u32 dsi_clock;
+	u32 bridge_ref_clk;
+	u16 rsvd_pwr;
+
+	/* Dphy Params */
+	u32 prepare_cnt:5;
+	u32 rsvd6:3;
+	u32 clk_zero_cnt:8;
+	u32 trail_cnt:5;
+	u32 rsvd7:3;
+	u32 exit_zero_cnt:6;
+	u32 rsvd8:2;
+
+	u32 hl_switch_cnt;
+	u32 lp_byte_clk;
+	u32 clk_lane_switch_cnt;
+} __attribute__((packed));
+
 #endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 10d1de5..b5b1b9b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -107,7 +107,17 @@
 static void intel_crt_get_config(struct intel_encoder *encoder,
 				 struct intel_crtc_config *pipe_config)
 {
+	struct drm_device *dev = encoder->base.dev;
+	int dotclock;
+
 	pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+	dotclock = pipe_config->port_clock;
+
+	if (HAS_PCH_SPLIT(dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
 }
 
 static void hsw_crt_get_config(struct intel_encoder *encoder,
@@ -264,7 +274,7 @@
 	struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
 	u32 adpa;
 
-	if (HAS_PCH_SPLIT(dev))
+	if (INTEL_INFO(dev)->gen >= 5)
 		adpa = ADPA_HOTPLUG_BITS;
 	else
 		adpa = 0;
@@ -366,9 +376,6 @@
 
 	DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
-	/* FIXME: debug force function and remove */
-	ret = true;
-
 	return ret;
 }
 
@@ -670,7 +677,6 @@
 
 static void intel_crt_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -776,7 +782,7 @@
 	if (!crt)
 		return;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(crt);
 		return;
@@ -816,16 +822,15 @@
 	crt->base.mode_set = intel_crt_mode_set;
 	crt->base.disable = intel_disable_crt;
 	crt->base.enable = intel_enable_crt;
-	if (IS_HASWELL(dev))
-		crt->base.get_config = hsw_crt_get_config;
-	else
-		crt->base.get_config = intel_crt_get_config;
 	if (I915_HAS_HOTPLUG(dev))
 		crt->base.hpd_pin = HPD_CRT;
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev)) {
+		crt->base.get_config = hsw_crt_get_config;
 		crt->base.get_hw_state = intel_ddi_get_hw_state;
-	else
+	} else {
+		crt->base.get_config = intel_crt_get_config;
 		crt->base.get_hw_state = intel_crt_get_hw_state;
+	}
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b53fff8..1591576 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -42,7 +42,6 @@
 	0x80C30FFF, 0x000B0000,
 	0x00FFFFFF, 0x00040006,
 	0x80D75FFF, 0x000B0000,
-	0x00FFFFFF, 0x00040006		/* HDMI parameters */
 };
 
 static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,10 +54,64 @@
 	0x00C30FFF, 0x001E0000,
 	0x00FFFFFF, 0x00060006,
 	0x00D75FFF, 0x001E0000,
-	0x00FFFFFF, 0x00040006		/* HDMI parameters */
 };
 
-static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+static const u32 hsw_ddi_translations_hdmi[] = {
+				/* Idx	NT mV diff	T mV diff	db  */
+	0x00FFFFFF, 0x0006000E, /* 0:	400		400		0   */
+	0x00E79FFF, 0x000E000C, /* 1:	400		500		2   */
+	0x00D75FFF, 0x0005000A, /* 2:	400		600		3.5 */
+	0x00FFFFFF, 0x0005000A, /* 3:	600		600		0   */
+	0x00E79FFF, 0x001D0007, /* 4:	600		750		2   */
+	0x00D75FFF, 0x000C0004, /* 5:	600		900		3.5 */
+	0x00FFFFFF, 0x00040006, /* 6:	800		800		0   */
+	0x80E79FFF, 0x00030002, /* 7:	800		1000		2   */
+	0x00FFFFFF, 0x00140005, /* 8:	850		850		0   */
+	0x00FFFFFF, 0x000C0004, /* 9:	900		900		0   */
+	0x00FFFFFF, 0x001C0003, /* 10:	950		950		0   */
+	0x80FFFFFF, 0x00030002, /* 11:	1000		1000		0   */
+};
+
+static const u32 bdw_ddi_translations_edp[] = {
+	0x00FFFFFF, 0x00000012,		/* DP parameters */
+	0x00EBAFFF, 0x00020011,
+	0x00C71FFF, 0x0006000F,
+	0x00FFFFFF, 0x00020011,
+	0x00DB6FFF, 0x0005000F,
+	0x00BEEFFF, 0x000A000C,
+	0x00FFFFFF, 0x0005000F,
+	0x00DB6FFF, 0x000A000C,
+	0x00FFFFFF, 0x000A000C,
+	0x00FFFFFF, 0x00140006		/* HDMI parameters 800mV 0dB*/
+};
+
+static const u32 bdw_ddi_translations_dp[] = {
+	0x00FFFFFF, 0x0007000E,		/* DP parameters */
+	0x00D75FFF, 0x000E000A,
+	0x00BEFFFF, 0x00140006,
+	0x00FFFFFF, 0x000E000A,
+	0x00D75FFF, 0x00180004,
+	0x80CB2FFF, 0x001B0002,
+	0x00F7DFFF, 0x00180004,
+	0x80D75FFF, 0x001B0002,
+	0x80FFFFFF, 0x001B0002,
+	0x00FFFFFF, 0x00140006		/* HDMI parameters 800mV 0dB*/
+};
+
+static const u32 bdw_ddi_translations_fdi[] = {
+	0x00FFFFFF, 0x0001000E,		/* FDI parameters */
+	0x00D75FFF, 0x0004000A,
+	0x00C30FFF, 0x00070006,
+	0x00AAAFFF, 0x000C0000,
+	0x00FFFFFF, 0x0004000A,
+	0x00D75FFF, 0x00090004,
+	0x00C30FFF, 0x000C0000,
+	0x00FFFFFF, 0x00070006,
+	0x00D75FFF, 0x000C0000,
+	0x00FFFFFF, 0x00140006		/* HDMI parameters 800mV 0dB*/
+};
+
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	int type = intel_encoder->type;
@@ -78,8 +131,9 @@
 	}
 }
 
-/* On Haswell, DDI port buffers must be programmed with correct values
- * in advance. The buffer values are different for FDI and DP modes,
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. The buffer values are different for FDI and DP modes,
  * but the HDMI/DVI fields are shared among those. So we program the DDI
  * in either FDI or DP modes only, as HDMI connections will work with both
  * of those
@@ -89,15 +143,58 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
 	int i;
-	const u32 *ddi_translations = (port == PORT_E) ?
-		hsw_ddi_translations_fdi :
-		hsw_ddi_translations_dp;
+	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+	const u32 *ddi_translations_fdi;
+	const u32 *ddi_translations_dp;
+	const u32 *ddi_translations_edp;
+	const u32 *ddi_translations;
+
+	if (IS_BROADWELL(dev)) {
+		ddi_translations_fdi = bdw_ddi_translations_fdi;
+		ddi_translations_dp = bdw_ddi_translations_dp;
+		ddi_translations_edp = bdw_ddi_translations_edp;
+	} else if (IS_HASWELL(dev)) {
+		ddi_translations_fdi = hsw_ddi_translations_fdi;
+		ddi_translations_dp = hsw_ddi_translations_dp;
+		ddi_translations_edp = hsw_ddi_translations_dp;
+	} else {
+		WARN(1, "ddi translation table missing\n");
+		ddi_translations_edp = bdw_ddi_translations_dp;
+		ddi_translations_fdi = bdw_ddi_translations_fdi;
+		ddi_translations_dp = bdw_ddi_translations_dp;
+	}
+
+	switch (port) {
+	case PORT_A:
+		ddi_translations = ddi_translations_edp;
+		break;
+	case PORT_B:
+	case PORT_C:
+		ddi_translations = ddi_translations_dp;
+		break;
+	case PORT_D:
+		if (intel_dpd_is_edp(dev))
+			ddi_translations = ddi_translations_edp;
+		else
+			ddi_translations = ddi_translations_dp;
+		break;
+	case PORT_E:
+		ddi_translations = ddi_translations_fdi;
+		break;
+	default:
+		BUG();
+	}
 
 	for (i = 0, reg = DDI_BUF_TRANS(port);
 	     i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
 		I915_WRITE(reg, ddi_translations[i]);
 		reg += 4;
 	}
+	/* Entry 9 is for HDMI: */
+	for (i = 0; i < 2; i++) {
+		I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
+		reg += 4;
+	}
 }
 
 /* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -296,9 +393,6 @@
 			DRM_DEBUG_DRIVER("DP audio: write eld information\n");
 			intel_write_eld(&encoder->base, adjusted_mode);
 		}
-
-		intel_dp_init_link_config(intel_dp);
-
 	} else if (type == INTEL_OUTPUT_HDMI) {
 		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
@@ -739,7 +833,8 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
 	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe = intel_crtc->pipe;
 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 	enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -767,18 +862,19 @@
 		BUG();
 	}
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
 		temp |= TRANS_DDI_PVSYNC;
-	if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
 		temp |= TRANS_DDI_PHSYNC;
 
 	if (cpu_transcoder == TRANSCODER_EDP) {
 		switch (pipe) {
 		case PIPE_A:
-			/* Can only use the always-on power well for eDP when
-			 * not using the panel fitter, and when not using motion
-			  * blur mitigation (which we don't support). */
-			if (intel_crtc->config.pch_pfit.enabled)
+			/* On Haswell, can only use the always-on power well for
+			 * eDP when not using the panel fitter, and when not
+			 * using motion blur mitigation (which we don't
+			 * support). */
+			if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
 				temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
 			else
 				temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1139,18 +1235,29 @@
 
 int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
 {
+	struct drm_device *dev = dev_priv->dev;
 	uint32_t lcpll = I915_READ(LCPLL_CTL);
+	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
-	if (lcpll & LCPLL_CD_SOURCE_FCLK)
+	if (lcpll & LCPLL_CD_SOURCE_FCLK) {
 		return 800000;
-	else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+	} else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
 		return 450000;
-	else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
+	} else if (freq == LCPLL_CLK_FREQ_450) {
 		return 450000;
-	else if (IS_ULT(dev_priv->dev))
-		return 337500;
-	else
-		return 540000;
+	} else if (IS_HASWELL(dev)) {
+		if (IS_ULT(dev))
+			return 337500;
+		else
+			return 540000;
+	} else {
+		if (freq == LCPLL_CLK_FREQ_54O_BDW)
+			return 540000;
+		else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+			return 337500;
+		else
+			return 675000;
+	}
 }
 
 void intel_ddi_pll_init(struct drm_device *dev)
@@ -1202,7 +1309,7 @@
 
 	val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
 	      DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
-	if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 		val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
 	I915_WRITE(DP_TP_CTL(port), val);
 	POSTING_READ(DP_TP_CTL(port));
@@ -1285,6 +1392,20 @@
 	default:
 		break;
 	}
+
+	switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
+	case TRANS_DDI_MODE_SELECT_HDMI:
+	case TRANS_DDI_MODE_SELECT_DVI:
+	case TRANS_DDI_MODE_SELECT_FDI:
+		break;
+	case TRANS_DDI_MODE_SELECT_DP_SST:
+	case TRANS_DDI_MODE_SELECT_DP_MST:
+		pipe_config->has_dp_encoder = true;
+		intel_dp_get_m_n(intel_crtc, pipe_config);
+		break;
+	default:
+		break;
+	}
 }
 
 static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1314,6 +1435,41 @@
 	.destroy = intel_ddi_destroy,
 };
 
+static struct intel_connector *
+intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_connector *connector;
+	enum port port = intel_dig_port->port;
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	if (!connector)
+		return NULL;
+
+	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+	if (!intel_dp_init_connector(intel_dig_port, connector)) {
+		kfree(connector);
+		return NULL;
+	}
+
+	return connector;
+}
+
+static struct intel_connector *
+intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_connector *connector;
+	enum port port = intel_dig_port->port;
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	if (!connector)
+		return NULL;
+
+	intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+	intel_hdmi_init_connector(intel_dig_port, connector);
+
+	return connector;
+}
+
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1322,17 +1478,22 @@
 	struct drm_encoder *encoder;
 	struct intel_connector *hdmi_connector = NULL;
 	struct intel_connector *dp_connector = NULL;
+	bool init_hdmi, init_dp;
 
-	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
+		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
+	init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+	if (!init_dp && !init_hdmi) {
+		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+			      port_name(port));
+		init_hdmi = true;
+		init_dp = true;
+	}
+
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
 	if (!intel_dig_port)
 		return;
 
-	dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-	if (!dp_connector) {
-		kfree(intel_dig_port);
-		return;
-	}
-
 	intel_encoder = &intel_dig_port->base;
 	encoder = &intel_encoder->base;
 
@@ -1352,28 +1513,22 @@
 	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
 					  (DDI_BUF_PORT_REVERSAL |
 					   DDI_A_4_LANES);
-	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
 	intel_encoder->crtc_mask =  (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = false;
 	intel_encoder->hot_plug = intel_ddi_hot_plug;
 
-	if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
+	if (init_dp)
+		dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
+
+	/* In theory we don't need the encoder->type check, but leave it just in
+	 * case we have some really bad VBTs... */
+	if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
+		hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
+
+	if (!dp_connector && !hdmi_connector) {
 		drm_encoder_cleanup(encoder);
 		kfree(intel_dig_port);
-		kfree(dp_connector);
-		return;
-	}
-
-	if (intel_encoder->type != INTEL_OUTPUT_EDP) {
-		hdmi_connector = kzalloc(sizeof(struct intel_connector),
-					 GFP_KERNEL);
-		if (!hdmi_connector) {
-			return;
-		}
-
-		intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
-		intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d78d33f..3cddd50 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,14 +41,13 @@
 #include <drm/drm_crtc_helper.h>
 #include <linux/dma_remapping.h>
 
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				struct intel_crtc_config *pipe_config);
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
-				    struct intel_crtc_config *pipe_config);
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+				   struct intel_crtc_config *pipe_config);
 
 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
 			  int x, int y, struct drm_framebuffer *old_fb);
@@ -69,9 +68,6 @@
 	intel_p2_t	    p2;
 };
 
-/* FDI */
-#define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
-
 int
 intel_pch_rawclk(struct drm_device *dev)
 {
@@ -313,44 +309,44 @@
 		.p2_slow = 7, .p2_fast = 7 },
 };
 
-static const intel_limit_t intel_limits_vlv_dac = {
-	.dot = { .min = 25000, .max = 270000 },
+static const intel_limit_t intel_limits_vlv = {
+	 /*
+	  * These are the data rate limits (measured in fast clocks)
+	  * since those are the strictest limits we have. The fast
+	  * clock and actual rate limits are more relaxed, so checking
+	  * them would make no difference.
+	  */
+	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
 	.vco = { .min = 4000000, .max = 6000000 },
 	.n = { .min = 1, .max = 7 },
-	.m = { .min = 22, .max = 450 }, /* guess */
 	.m1 = { .min = 2, .max = 3 },
 	.m2 = { .min = 11, .max = 156 },
-	.p = { .min = 10, .max = 30 },
-	.p1 = { .min = 1, .max = 3 },
-	.p2 = { .dot_limit = 270000,
-		.p2_slow = 2, .p2_fast = 20 },
-};
-
-static const intel_limit_t intel_limits_vlv_hdmi = {
-	.dot = { .min = 25000, .max = 270000 },
-	.vco = { .min = 4000000, .max = 6000000 },
-	.n = { .min = 1, .max = 7 },
-	.m = { .min = 60, .max = 300 }, /* guess */
-	.m1 = { .min = 2, .max = 3 },
-	.m2 = { .min = 11, .max = 156 },
-	.p = { .min = 10, .max = 30 },
 	.p1 = { .min = 2, .max = 3 },
-	.p2 = { .dot_limit = 270000,
-		.p2_slow = 2, .p2_fast = 20 },
+	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
 };
 
-static const intel_limit_t intel_limits_vlv_dp = {
-	.dot = { .min = 25000, .max = 270000 },
-	.vco = { .min = 4000000, .max = 6000000 },
-	.n = { .min = 1, .max = 7 },
-	.m = { .min = 22, .max = 450 },
-	.m1 = { .min = 2, .max = 3 },
-	.m2 = { .min = 11, .max = 156 },
-	.p = { .min = 10, .max = 30 },
-	.p1 = { .min = 1, .max = 3 },
-	.p2 = { .dot_limit = 270000,
-		.p2_slow = 2, .p2_fast = 20 },
-};
+static void vlv_clock(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *encoder;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->type == type)
+			return true;
+
+	return false;
+}
 
 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
 						int refclk)
@@ -412,12 +408,7 @@
 		else
 			limit = &intel_limits_pineview_sdvo;
 	} else if (IS_VALLEYVIEW(dev)) {
-		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
-			limit = &intel_limits_vlv_dac;
-		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
-			limit = &intel_limits_vlv_hdmi;
-		else
-			limit = &intel_limits_vlv_dp;
+		limit = &intel_limits_vlv;
 	} else if (!IS_GEN2(dev)) {
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 			limit = &intel_limits_i9xx_lvds;
@@ -439,8 +430,8 @@
 {
 	clock->m = clock->m2 + 2;
 	clock->p = clock->p1 * clock->p2;
-	clock->vco = refclk * clock->m / clock->n;
-	clock->dot = clock->vco / clock->p;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
 
 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -452,23 +443,8 @@
 {
 	clock->m = i9xx_dpll_compute_m(clock);
 	clock->p = clock->p1 * clock->p2;
-	clock->vco = refclk * clock->m / (clock->n + 2);
-	clock->dot = clock->vco / clock->p;
-}
-
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
-	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *encoder;
-
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->type == type)
-			return true;
-
-	return false;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
 
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -481,20 +457,26 @@
 			       const intel_limit_t *limit,
 			       const intel_clock_t *clock)
 {
+	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
+		INTELPllInvalid("n out of range\n");
 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 		INTELPllInvalid("p1 out of range\n");
-	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
-		INTELPllInvalid("p out of range\n");
 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
 		INTELPllInvalid("m2 out of range\n");
 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 		INTELPllInvalid("m1 out of range\n");
-	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
-		INTELPllInvalid("m1 <= m2\n");
-	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
-		INTELPllInvalid("m out of range\n");
-	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
-		INTELPllInvalid("n out of range\n");
+
+	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
+		if (clock->m1 <= clock->m2)
+			INTELPllInvalid("m1 <= m2\n");
+
+	if (!IS_VALLEYVIEW(dev)) {
+		if (clock->p < limit->p.min || limit->p.max < clock->p)
+			INTELPllInvalid("p out of range\n");
+		if (clock->m < limit->m.min || limit->m.max < clock->m)
+			INTELPllInvalid("m out of range\n");
+	}
+
 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
 		INTELPllInvalid("vco out of range\n");
 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
@@ -688,67 +670,73 @@
 		   int target, int refclk, intel_clock_t *match_clock,
 		   intel_clock_t *best_clock)
 {
-	u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
-	u32 m, n, fastclk;
-	u32 updrate, minupdate, p;
-	unsigned long bestppm, ppm, absppm;
-	int dotclk, flag;
+	struct drm_device *dev = crtc->dev;
+	intel_clock_t clock;
+	unsigned int bestppm = 1000000;
+	/* min update 19.2 MHz */
+	int max_n = min(limit->n.max, refclk / 19200);
+	bool found = false;
 
-	flag = 0;
-	dotclk = target * 1000;
-	bestppm = 1000000;
-	ppm = absppm = 0;
-	fastclk = dotclk / (2*100);
-	updrate = 0;
-	minupdate = 19200;
-	n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
-	bestm1 = bestm2 = bestp1 = bestp2 = 0;
+	target *= 5; /* fast clock */
+
+	memset(best_clock, 0, sizeof(*best_clock));
 
 	/* based on hardware requirement, prefer smaller n to precision */
-	for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
-		updrate = refclk / n;
-		for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
-			for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
-				if (p2 > 10)
-					p2 = p2 - 1;
-				p = p1 * p2;
+	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+				clock.p = clock.p1 * clock.p2;
 				/* based on hardware requirement, prefer bigger m1,m2 values */
-				for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
-					m2 = (((2*(fastclk * p * n / m1 )) +
-					       refclk) / (2*refclk));
-					m = m1 * m2;
-					vco = updrate * m;
-					if (vco >= limit->vco.min && vco < limit->vco.max) {
-						ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
-						absppm = (ppm > 0) ? ppm : (-ppm);
-						if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
-							bestppm = 0;
-							flag = 1;
-						}
-						if (absppm < bestppm - 10) {
-							bestppm = absppm;
-							flag = 1;
-						}
-						if (flag) {
-							bestn = n;
-							bestm1 = m1;
-							bestm2 = m2;
-							bestp1 = p1;
-							bestp2 = p2;
-							flag = 0;
-						}
+				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+					unsigned int ppm, diff;
+
+					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+								     refclk * clock.m1);
+
+					vlv_clock(refclk, &clock);
+
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
+						continue;
+
+					diff = abs(clock.dot - target);
+					ppm = div_u64(1000000ULL * diff, target);
+
+					if (ppm < 100 && clock.p > best_clock->p) {
+						bestppm = 0;
+						*best_clock = clock;
+						found = true;
+					}
+
+					if (bestppm >= 10 && ppm < bestppm - 10) {
+						bestppm = ppm;
+						*best_clock = clock;
+						found = true;
 					}
 				}
 			}
 		}
 	}
-	best_clock->n = bestn;
-	best_clock->m1 = bestm1;
-	best_clock->m2 = bestm2;
-	best_clock->p1 = bestp1;
-	best_clock->p2 = bestp2;
 
-	return true;
+	return found;
+}
+
+bool intel_crtc_active(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/* Be paranoid as we can arrive here with only partial
+	 * state retrieved from the hardware during setup.
+	 *
+	 * We can ditch the adjusted_mode.crtc_clock check as soon
+	 * as Haswell has gained clock readout/fastboot support.
+	 *
+	 * We can ditch the crtc->fb check as soon as we can
+	 * properly reconstruct framebuffers.
+	 */
+	return intel_crtc->active && crtc->fb &&
+		intel_crtc->config.adjusted_mode.crtc_clock;
 }
 
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -812,6 +800,25 @@
 		DRM_DEBUG_KMS("vblank wait timed out\n");
 }
 
+static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg = PIPEDSL(pipe);
+	u32 line1, line2;
+	u32 line_mask;
+
+	if (IS_GEN2(dev))
+		line_mask = DSL_LINEMASK_GEN2;
+	else
+		line_mask = DSL_LINEMASK_GEN3;
+
+	line1 = I915_READ(reg) & line_mask;
+	mdelay(5);
+	line2 = I915_READ(reg) & line_mask;
+
+	return line1 == line2;
+}
+
 /*
  * intel_wait_for_pipe_off - wait for pipe to turn off
  * @dev: drm device
@@ -843,22 +850,8 @@
 			     100))
 			WARN(1, "pipe_off wait timed out\n");
 	} else {
-		u32 last_line, line_mask;
-		int reg = PIPEDSL(pipe);
-		unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
-		if (IS_GEN2(dev))
-			line_mask = DSL_LINEMASK_GEN2;
-		else
-			line_mask = DSL_LINEMASK_GEN3;
-
 		/* Wait for the display line to settle */
-		do {
-			last_line = I915_READ(reg) & line_mask;
-			mdelay(5);
-		} while (((I915_READ(reg) & line_mask) != last_line) &&
-			 time_after(timeout, jiffies));
-		if (time_after(jiffies, timeout))
+		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
 			WARN(1, "pipe_off wait timed out\n");
 	}
 }
@@ -929,6 +922,24 @@
 	     state_string(state), state_string(cur_state));
 }
 
+/* XXX: the dsi pll is shared between MIPI DSI ports */
+static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+{
+	u32 val;
+	bool cur_state;
+
+	mutex_lock(&dev_priv->dpio_lock);
+	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	cur_state = val & DSI_PLL_VCO_EN;
+	WARN(cur_state != state,
+	     "DSI PLL state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+
 struct intel_shared_dpll *
 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
 {
@@ -1069,6 +1080,26 @@
 	     pipe_name(pipe));
 }
 
+static void assert_cursor(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	struct drm_device *dev = dev_priv->dev;
+	bool cur_state;
+
+	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+		cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+	else if (IS_845G(dev) || IS_I865G(dev))
+		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+	else
+		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+	WARN(cur_state != state,
+	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
+	     pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
+#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
+
 void assert_pipe(struct drm_i915_private *dev_priv,
 		 enum pipe pipe, bool state)
 {
@@ -1323,6 +1354,26 @@
 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
+static void intel_init_dpio(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!IS_VALLEYVIEW(dev))
+		return;
+
+	/*
+	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
+	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
+	 *   b.	The other bits such as sfr settings / modesel may all be set
+	 *      to 0.
+	 *
+	 * This should only be done on init and resume from S3 with both
+	 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
+	 */
+	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
 static void vlv_enable_pll(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -1429,6 +1480,20 @@
 	POSTING_READ(DPLL(pipe));
 }
 
+static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	u32 val = 0;
+
+	/* Make sure the pipe isn't still relying on us */
+	assert_pipe_disabled(dev_priv, pipe);
+
+	/* Leave integrated clock source enabled */
+	if (pipe == PIPE_B)
+		val = DPLL_INTEGRATED_CRI_CLK_VLV;
+	I915_WRITE(DPLL(pipe), val);
+	POSTING_READ(DPLL(pipe));
+}
+
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
 {
 	u32 port_mask;
@@ -1661,7 +1726,7 @@
  * returning.
  */
 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
-			      bool pch_port)
+			      bool pch_port, bool dsi)
 {
 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
 								      pipe);
@@ -1670,6 +1735,7 @@
 	u32 val;
 
 	assert_planes_disabled(dev_priv, pipe);
+	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
 
 	if (HAS_PCH_LPT(dev_priv->dev))
@@ -1683,7 +1749,10 @@
 	 * need the check.
 	 */
 	if (!HAS_PCH_SPLIT(dev_priv->dev))
-		assert_pll_enabled(dev_priv, pipe);
+		if (dsi)
+			assert_dsi_pll_enabled(dev_priv);
+		else
+			assert_pll_enabled(dev_priv, pipe);
 	else {
 		if (pch_port) {
 			/* if driving the PCH, we need FDI enabled */
@@ -1728,6 +1797,7 @@
 	 * or we might hang the display.
 	 */
 	assert_planes_disabled(dev_priv, pipe);
+	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
 
 	/* Don't disable pipe A or pipe A PLLs if needed */
@@ -1747,63 +1817,75 @@
  * Plane regs are double buffered, going from enabled->disabled needs a
  * trigger in order to latch.  The display address reg provides this.
  */
-void intel_flush_display_plane(struct drm_i915_private *dev_priv,
-				      enum plane plane)
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+			       enum plane plane)
 {
-	if (dev_priv->info->gen >= 4)
-		I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
-	else
-		I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+	u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
+
+	I915_WRITE(reg, I915_READ(reg));
+	POSTING_READ(reg);
 }
 
 /**
- * intel_enable_plane - enable a display plane on a given pipe
+ * intel_enable_primary_plane - enable the primary plane on a given pipe
  * @dev_priv: i915 private structure
  * @plane: plane to enable
  * @pipe: pipe being fed
  *
  * Enable @plane on @pipe, making sure that @pipe is running first.
  */
-static void intel_enable_plane(struct drm_i915_private *dev_priv,
-			       enum plane plane, enum pipe pipe)
+static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
+				       enum plane plane, enum pipe pipe)
 {
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 	int reg;
 	u32 val;
 
 	/* If the pipe isn't enabled, we can't pump pixels and may hang */
 	assert_pipe_enabled(dev_priv, pipe);
 
+	WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
+
+	intel_crtc->primary_enabled = true;
+
 	reg = DSPCNTR(plane);
 	val = I915_READ(reg);
 	if (val & DISPLAY_PLANE_ENABLE)
 		return;
 
 	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
-	intel_flush_display_plane(dev_priv, plane);
+	intel_flush_primary_plane(dev_priv, plane);
 	intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
 /**
- * intel_disable_plane - disable a display plane
+ * intel_disable_primary_plane - disable the primary plane
  * @dev_priv: i915 private structure
  * @plane: plane to disable
  * @pipe: pipe consuming the data
  *
  * Disable @plane; should be an independent operation.
  */
-static void intel_disable_plane(struct drm_i915_private *dev_priv,
-				enum plane plane, enum pipe pipe)
+static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
+					enum plane plane, enum pipe pipe)
 {
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 	int reg;
 	u32 val;
 
+	WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
+
+	intel_crtc->primary_enabled = false;
+
 	reg = DSPCNTR(plane);
 	val = I915_READ(reg);
 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
 		return;
 
 	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
-	intel_flush_display_plane(dev_priv, plane);
+	intel_flush_primary_plane(dev_priv, plane);
 	intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
@@ -1839,10 +1921,7 @@
 		alignment = 0;
 		break;
 	case I915_TILING_Y:
-		/* Despite that we check this in framebuffer_init userspace can
-		 * screw us over and change the tiling after the fact. Only
-		 * pinned buffers can't change their tiling. */
-		DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
+		WARN(1, "Y tiled bo slipped through, driver bug!\n");
 		return -EINVAL;
 	default:
 		BUG();
@@ -2077,7 +2156,7 @@
 	else
 		dspcntr &= ~DISPPLANE_TILED;
 
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
 	else
 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -2097,7 +2176,7 @@
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_MODIFY_DISPBASE(DSPSURF(plane),
 			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
-	if (IS_HASWELL(dev)) {
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
 	} else {
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2244,11 +2323,26 @@
 		return ret;
 	}
 
-	/* Update pipe size and adjust fitter if needed */
+	/*
+	 * Update pipe size and adjust fitter if needed: the reason for this is
+	 * that in compute_mode_changes we check the native mode (not the pfit
+	 * mode) to see if we can flip rather than do a full mode set. In the
+	 * fastboot case, we'll flip, but if we don't update the pipesrc and
+	 * pfit state, we'll end up with a big fb scanned out into the wrong
+	 * sized surface.
+	 *
+	 * To fix this properly, we need to hoist the checks up into
+	 * compute_mode_changes (or above), check the actual pfit state and
+	 * whether the platform allows pfit disable with pipe active, and only
+	 * then update the pipesrc and pfit state, even on the flip path.
+	 */
 	if (i915_fastboot) {
+		const struct drm_display_mode *adjusted_mode =
+			&intel_crtc->config.adjusted_mode;
+
 		I915_WRITE(PIPESRC(intel_crtc->pipe),
-			   ((crtc->mode.hdisplay - 1) << 16) |
-			   (crtc->mode.vdisplay - 1));
+			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+			   (adjusted_mode->crtc_vdisplay - 1));
 		if (!intel_crtc->config.pch_pfit.enabled &&
 		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
 		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2873,6 +2967,7 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
 	u32 temp;
 
@@ -2890,14 +2985,14 @@
 			SBI_ICLK);
 
 	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
-	if (crtc->mode.clock == 20000) {
+	if (clock == 20000) {
 		auxdiv = 1;
 		divsel = 0x41;
 		phaseinc = 0x20;
 	} else {
 		/* The iCLK virtual clock root frequency is in MHz,
-		 * but the crtc->mode.clock in in KHz. To get the divisors,
-		 * it is necessary to divide one by another, so we
+		 * but the adjusted_mode->crtc_clock in in KHz. To get the
+		 * divisors, it is necessary to divide one by another, so we
 		 * convert the virtual clock precision to KHz here for higher
 		 * precision.
 		 */
@@ -2905,7 +3000,7 @@
 		u32 iclk_pi_range = 64;
 		u32 desired_divisor, msb_divisor_value, pi_value;
 
-		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+		desired_divisor = (iclk_virtual_root_freq / clock);
 		msb_divisor_value = desired_divisor / iclk_pi_range;
 		pi_value = desired_divisor % iclk_pi_range;
 
@@ -2921,7 +3016,7 @@
 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
 
 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
-			crtc->mode.clock,
+			clock,
 			auxdiv,
 			divsel,
 			phasedir,
@@ -3286,6 +3381,108 @@
 			intel_plane_disable(&intel_plane->base);
 }
 
+void hsw_enable_ips(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (!crtc->config.ips_enabled)
+		return;
+
+	/* We can only enable IPS after we enable a plane and wait for a vblank.
+	 * We guarantee that the plane is enabled by calling intel_enable_ips
+	 * only after intel_enable_plane. And intel_enable_plane already waits
+	 * for a vblank, so all we need to do here is to enable the IPS bit. */
+	assert_plane_enabled(dev_priv, crtc->plane);
+	if (IS_BROADWELL(crtc->base.dev)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		/* Quoting Art Runyan: "its not safe to expect any particular
+		 * value in IPS_CTL bit 31 after enabling IPS through the
+		 * mailbox." Therefore we need to defer waiting on the state
+		 * change.
+		 * TODO: need to fix this for state checker
+		 */
+	} else {
+		I915_WRITE(IPS_CTL, IPS_ENABLE);
+		/* The bit only becomes 1 in the next vblank, so this wait here
+		 * is essentially intel_wait_for_vblank. If we don't have this
+		 * and don't wait for vblanks until the end of crtc_enable, then
+		 * the HW state readout code will complain that the expected
+		 * IPS_CTL value is not the one we read. */
+		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+			DRM_ERROR("Timed out waiting for IPS enable\n");
+	}
+}
+
+void hsw_disable_ips(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!crtc->config.ips_enabled)
+		return;
+
+	assert_plane_enabled(dev_priv, crtc->plane);
+	if (IS_BROADWELL(crtc->base.dev)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	} else
+		I915_WRITE(IPS_CTL, 0);
+	POSTING_READ(IPS_CTL);
+
+	/* We need to wait for a vblank before we can disable the plane. */
+	intel_wait_for_vblank(dev, crtc->pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	int palreg = PALETTE(pipe);
+	int i;
+	bool reenable_ips = false;
+
+	/* The clocks have to be on to load the palette. */
+	if (!crtc->enabled || !intel_crtc->active)
+		return;
+
+	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+			assert_dsi_pll_enabled(dev_priv);
+		else
+			assert_pll_enabled(dev_priv, pipe);
+	}
+
+	/* use legacy palette for Ironlake */
+	if (HAS_PCH_SPLIT(dev))
+		palreg = LGC_PALETTE(pipe);
+
+	/* Workaround : Do not read or write the pipe palette/gamma data while
+	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+	 */
+	if (intel_crtc->config.ips_enabled &&
+	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+	     GAMMA_MODE_MODE_SPLIT)) {
+		hsw_disable_ips(intel_crtc);
+		reenable_ips = true;
+	}
+
+	for (i = 0; i < 256; i++) {
+		I915_WRITE(palreg + 4 * i,
+			   (intel_crtc->lut_r[i] << 16) |
+			   (intel_crtc->lut_g[i] << 8) |
+			   intel_crtc->lut_b[i]);
+	}
+
+	if (reenable_ips)
+		hsw_enable_ips(intel_crtc);
+}
+
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3305,8 +3502,6 @@
 	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
 	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
-	intel_update_watermarks(dev);
-
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_enable)
 			encoder->pre_enable(encoder);
@@ -3329,9 +3524,10 @@
 	 */
 	intel_crtc_load_lut(crtc);
 
+	intel_update_watermarks(crtc);
 	intel_enable_pipe(dev_priv, pipe,
-			  intel_crtc->config.has_pch_encoder);
-	intel_enable_plane(dev_priv, plane, pipe);
+			  intel_crtc->config.has_pch_encoder, false);
+	intel_enable_primary_plane(dev_priv, plane, pipe);
 	intel_enable_planes(crtc);
 	intel_crtc_update_cursor(crtc, true);
 
@@ -3365,34 +3561,74 @@
 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-static void hsw_enable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
 
-	if (!crtc->config.ips_enabled)
-		return;
+	intel_enable_primary_plane(dev_priv, plane, pipe);
+	intel_enable_planes(crtc);
+	intel_crtc_update_cursor(crtc, true);
 
-	/* We can only enable IPS after we enable a plane and wait for a vblank.
-	 * We guarantee that the plane is enabled by calling intel_enable_ips
-	 * only after intel_enable_plane. And intel_enable_plane already waits
-	 * for a vblank, so all we need to do here is to enable the IPS bit. */
-	assert_plane_enabled(dev_priv, crtc->plane);
-	I915_WRITE(IPS_CTL, IPS_ENABLE);
+	hsw_enable_ips(intel_crtc);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
 }
 
-static void hsw_disable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+
+	intel_crtc_wait_for_pending_flips(crtc);
+	drm_vblank_off(dev, pipe);
+
+	/* FBC must be disabled before disabling the plane on HSW. */
+	if (dev_priv->fbc.plane == plane)
+		intel_disable_fbc(dev);
+
+	hsw_disable_ips(intel_crtc);
+
+	intel_crtc_update_cursor(crtc, false);
+	intel_disable_planes(crtc);
+	intel_disable_primary_plane(dev_priv, plane, pipe);
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
 
-	if (!crtc->config.ips_enabled)
+	/* We want to get the other_active_crtc only if there's only 1 other
+	 * active crtc. */
+	list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
+		if (!crtc_it->active || crtc_it == crtc)
+			continue;
+
+		if (other_active_crtc)
+			return;
+
+		other_active_crtc = crtc_it;
+	}
+	if (!other_active_crtc)
 		return;
 
-	assert_plane_enabled(dev_priv, crtc->plane);
-	I915_WRITE(IPS_CTL, 0);
-
-	/* We need to wait for a vblank before we can disable the plane. */
-	intel_wait_for_vblank(dev, crtc->pipe);
+	intel_wait_for_vblank(dev, other_active_crtc->pipe);
+	intel_wait_for_vblank(dev, other_active_crtc->pipe);
 }
 
 static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3402,7 +3638,6 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
-	int plane = intel_crtc->plane;
 
 	WARN_ON(!crtc->enabled);
 
@@ -3415,8 +3650,6 @@
 	if (intel_crtc->config.has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 
-	intel_update_watermarks(dev);
-
 	if (intel_crtc->config.has_pch_encoder)
 		dev_priv->display.fdi_link_train(crtc);
 
@@ -3437,23 +3670,22 @@
 	intel_ddi_set_pipe_settings(crtc);
 	intel_ddi_enable_transcoder_func(crtc);
 
+	intel_update_watermarks(crtc);
 	intel_enable_pipe(dev_priv, pipe,
-			  intel_crtc->config.has_pch_encoder);
-	intel_enable_plane(dev_priv, plane, pipe);
-	intel_enable_planes(crtc);
-	intel_crtc_update_cursor(crtc, true);
-
-	hsw_enable_ips(intel_crtc);
+			  intel_crtc->config.has_pch_encoder, false);
 
 	if (intel_crtc->config.has_pch_encoder)
 		lpt_pch_enable(crtc);
 
-	mutex_lock(&dev->struct_mutex);
-	intel_update_fbc(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	for_each_encoder_on_crtc(dev, crtc, encoder)
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		encoder->enable(encoder);
+		intel_opregion_notify_encoder(encoder, true);
+	}
+
+	/* If we change the relative order between pipe/planes enabling, we need
+	 * to change the workaround. */
+	haswell_mode_set_planes_workaround(intel_crtc);
+	haswell_crtc_enable_planes(crtc);
 
 	/*
 	 * There seems to be a race in PCH platform hw (at least on some
@@ -3506,7 +3738,7 @@
 
 	intel_crtc_update_cursor(crtc, false);
 	intel_disable_planes(crtc);
-	intel_disable_plane(dev_priv, plane, pipe);
+	intel_disable_primary_plane(dev_priv, plane, pipe);
 
 	if (intel_crtc->config.has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3547,7 +3779,7 @@
 	}
 
 	intel_crtc->active = false;
-	intel_update_watermarks(dev);
+	intel_update_watermarks(crtc);
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
@@ -3561,27 +3793,17 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
-	int plane = intel_crtc->plane;
 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 
 	if (!intel_crtc->active)
 		return;
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
+	haswell_crtc_disable_planes(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		intel_opregion_notify_encoder(encoder, false);
 		encoder->disable(encoder);
-
-	intel_crtc_wait_for_pending_flips(crtc);
-	drm_vblank_off(dev, pipe);
-
-	/* FBC must be disabled before disabling the plane on HSW. */
-	if (dev_priv->fbc.plane == plane)
-		intel_disable_fbc(dev);
-
-	hsw_disable_ips(intel_crtc);
-
-	intel_crtc_update_cursor(crtc, false);
-	intel_disable_planes(crtc);
-	intel_disable_plane(dev_priv, plane, pipe);
+	}
 
 	if (intel_crtc->config.has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
@@ -3604,7 +3826,7 @@
 	}
 
 	intel_crtc->active = false;
-	intel_update_watermarks(dev);
+	intel_update_watermarks(crtc);
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
@@ -3696,6 +3918,7 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
+	bool is_dsi;
 
 	WARN_ON(!crtc->enabled);
 
@@ -3703,13 +3926,15 @@
 		return;
 
 	intel_crtc->active = true;
-	intel_update_watermarks(dev);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_pll_enable)
 			encoder->pre_pll_enable(encoder);
 
-	vlv_enable_pll(intel_crtc);
+	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+	if (!is_dsi)
+		vlv_enable_pll(intel_crtc);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_enable)
@@ -3719,8 +3944,9 @@
 
 	intel_crtc_load_lut(crtc);
 
-	intel_enable_pipe(dev_priv, pipe, false);
-	intel_enable_plane(dev_priv, plane, pipe);
+	intel_update_watermarks(crtc);
+	intel_enable_pipe(dev_priv, pipe, false, is_dsi);
+	intel_enable_primary_plane(dev_priv, plane, pipe);
 	intel_enable_planes(crtc);
 	intel_crtc_update_cursor(crtc, true);
 
@@ -3745,7 +3971,6 @@
 		return;
 
 	intel_crtc->active = true;
-	intel_update_watermarks(dev);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_enable)
@@ -3757,8 +3982,9 @@
 
 	intel_crtc_load_lut(crtc);
 
-	intel_enable_pipe(dev_priv, pipe, false);
-	intel_enable_plane(dev_priv, plane, pipe);
+	intel_update_watermarks(crtc);
+	intel_enable_pipe(dev_priv, pipe, false, false);
+	intel_enable_primary_plane(dev_priv, plane, pipe);
 	intel_enable_planes(crtc);
 	/* The fixup needs to happen before cursor is enabled */
 	if (IS_G4X(dev))
@@ -3814,7 +4040,7 @@
 	intel_crtc_dpms_overlay(intel_crtc, false);
 	intel_crtc_update_cursor(crtc, false);
 	intel_disable_planes(crtc);
-	intel_disable_plane(dev_priv, plane, pipe);
+	intel_disable_primary_plane(dev_priv, plane, pipe);
 
 	intel_disable_pipe(dev_priv, pipe);
 
@@ -3824,11 +4050,15 @@
 		if (encoder->post_disable)
 			encoder->post_disable(encoder);
 
-	i9xx_disable_pll(dev_priv, pipe);
+	if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+		vlv_disable_pll(dev_priv, pipe);
+	else if (!IS_VALLEYVIEW(dev))
+		i9xx_disable_pll(dev_priv, pipe);
 
 	intel_crtc->active = false;
+	intel_update_watermarks(crtc);
+
 	intel_update_fbc(dev);
-	intel_update_watermarks(dev);
 }
 
 static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -3902,6 +4132,7 @@
 	dev_priv->display.off(crtc);
 
 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+	assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
 
 	if (crtc->fb) {
@@ -4029,7 +4260,7 @@
 		return false;
 	}
 
-	if (IS_HASWELL(dev)) {
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		if (pipe_config->fdi_lanes > 2) {
 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
 				      pipe_config->fdi_lanes);
@@ -4091,8 +4322,7 @@
 	 */
 	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 
-	fdi_dotclock = adjusted_mode->clock;
-	fdi_dotclock /= pipe_config->pixel_multiplier;
+	fdi_dotclock = adjusted_mode->crtc_clock;
 
 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
 					   pipe_config->pipe_bpp);
@@ -4134,13 +4364,39 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
-	if (HAS_PCH_SPLIT(dev)) {
-		/* FDI link clock is fixed at 2.7G */
-		if (pipe_config->requested_mode.clock * 3
-		    > IRONLAKE_FDI_FREQ * 4)
+	/* FIXME should check pixel clock limits on all platforms */
+	if (INTEL_INFO(dev)->gen < 4) {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		int clock_limit =
+			dev_priv->display.get_display_clock_speed(dev);
+
+		/*
+		 * Enable pixel doubling when the dot clock
+		 * is > 90% of the (display) core speed.
+		 *
+		 * GDG double wide on either pipe,
+		 * otherwise pipe A only.
+		 */
+		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
+		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
+			clock_limit *= 2;
+			pipe_config->double_wide = true;
+		}
+
+		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
 			return -EINVAL;
 	}
 
+	/*
+	 * Pipe horizontal size must be even in:
+	 * - DVO ganged mode
+	 * - LVDS dual channel mode
+	 * - Double wide pipe
+	 */
+	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
+		pipe_config->pipe_src_w &= ~1;
+
 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
 	 */
@@ -4304,28 +4560,6 @@
 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
-static int vlv_get_refclk(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int refclk = 27000; /* for DP & HDMI */
-
-	return 100000; /* only one validated so far */
-
-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
-		refclk = 96000;
-	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv))
-			refclk = 100000;
-		else
-			refclk = 96000;
-	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
-		refclk = 100000;
-	}
-
-	return refclk;
-}
-
 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
 {
 	struct drm_device *dev = crtc->dev;
@@ -4333,7 +4567,7 @@
 	int refclk;
 
 	if (IS_VALLEYVIEW(dev)) {
-		refclk = vlv_get_refclk(crtc);
+		refclk = 100000;
 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 		refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
@@ -4391,7 +4625,8 @@
 	}
 }
 
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+		pipe)
 {
 	u32 reg_val;
 
@@ -4399,24 +4634,24 @@
 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
 	 * and set it to a reasonable value instead.
 	 */
-	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
 	reg_val &= 0xffffff00;
 	reg_val |= 0x00000030;
-	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
 	reg_val &= 0x8cffffff;
 	reg_val = 0x8c000000;
-	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
 	reg_val &= 0xffffff00;
-	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
 	reg_val &= 0x00ffffff;
 	reg_val |= 0xb0000000;
-	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
 }
 
 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4482,18 +4717,18 @@
 
 	/* PLL B needs special handling */
 	if (pipe)
-		vlv_pllb_recal_opamp(dev_priv);
+		vlv_pllb_recal_opamp(dev_priv, pipe);
 
 	/* Set up Tx target for periodic Rcomp update */
-	vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
+	vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
 
 	/* Disable target IRef on PLL */
-	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
 	reg_val &= 0x00ffffff;
-	vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
 
 	/* Disable fast lock */
-	vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
+	vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
 
 	/* Set idtafcrecal before PLL is enabled */
 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4507,55 +4742,55 @@
 	 * Note: don't use the DAC post divider as it seems unstable.
 	 */
 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
 
 	mdiv |= DPIO_ENABLE_CALIBRATION;
-	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
 
 	/* Set HBR and RBR LPF coefficients */
 	if (crtc->config.port_clock == 162000 ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
-		vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+		vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
 				 0x009f0003);
 	else
-		vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+		vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
 				 0x00d0000f);
 
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
 		/* Use SSC source */
 		if (!pipe)
-			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 					 0x0df40000);
 		else
-			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 					 0x0df70000);
 	} else { /* HDMI or VGA */
 		/* Use bend source */
 		if (!pipe)
-			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 					 0x0df70000);
 		else
-			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 					 0x0df40000);
 	}
 
-	coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
+	coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
 		coreclk |= 0x01000000;
-	vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
+	vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
 
-	vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
 
 	/* Enable DPIO clock input */
 	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
 		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
-	if (pipe)
+	/* We should never disable this, set it here for state tracking */
+	if (pipe == PIPE_B)
 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
 	dpll |= DPLL_VCO_ENABLE;
 	crtc->config.dpll_hw_state.dpll = dpll;
 
@@ -4693,7 +4928,6 @@
 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 	struct drm_display_mode *adjusted_mode =
 		&intel_crtc->config.adjusted_mode;
-	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
 	uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
 
 	/* We need to be careful not to changed the adjusted mode, for otherwise
@@ -4746,7 +4980,8 @@
 	 * always be the user's requested size.
 	 */
 	I915_WRITE(PIPESRC(pipe),
-		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
+		   (intel_crtc->config.pipe_src_h - 1));
 }
 
 static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -4784,8 +5019,11 @@
 	}
 
 	tmp = I915_READ(PIPESRC(crtc->pipe));
-	pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
-	pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
+	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
+	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
+
+	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
+	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
 }
 
 static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
@@ -4805,7 +5043,7 @@
 
 	crtc->mode.flags = pipe_config->adjusted_mode.flags;
 
-	crtc->mode.clock = pipe_config->adjusted_mode.clock;
+	crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
 	crtc->mode.flags |= pipe_config->adjusted_mode.flags;
 }
 
@@ -4821,17 +5059,8 @@
 	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
 		pipeconf |= PIPECONF_ENABLE;
 
-	if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
-		/* Enable pixel doubling when the dot clock is > 90% of the (display)
-		 * core speed.
-		 *
-		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
-		 * pipe == 0 check?
-		 */
-		if (intel_crtc->config.requested_mode.clock >
-		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
-			pipeconf |= PIPECONF_DOUBLE_WIDE;
-	}
+	if (intel_crtc->config.double_wide)
+		pipeconf |= PIPECONF_DOUBLE_WIDE;
 
 	/* only g4x and later have fancy bpc/dither controls */
 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
@@ -4885,14 +5114,13 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 	int refclk, num_connectors = 0;
 	intel_clock_t clock, reduced_clock;
 	u32 dspcntr;
 	bool ok, has_reduced_clock = false;
-	bool is_lvds = false;
+	bool is_lvds = false, is_dsi = false;
 	struct intel_encoder *encoder;
 	const intel_limit_t *limit;
 	int ret;
@@ -4902,42 +5130,49 @@
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
 			break;
+		case INTEL_OUTPUT_DSI:
+			is_dsi = true;
+			break;
 		}
 
 		num_connectors++;
 	}
 
-	refclk = i9xx_get_refclk(crtc, num_connectors);
+	if (is_dsi)
+		goto skip_dpll;
 
-	/*
-	 * Returns a set of divisors for the desired target clock with the given
-	 * refclk, or FALSE.  The returned values represent the clock equation:
-	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
-	 */
-	limit = intel_limit(crtc, refclk);
-	ok = dev_priv->display.find_dpll(limit, crtc,
-					 intel_crtc->config.port_clock,
-					 refclk, NULL, &clock);
-	if (!ok && !intel_crtc->config.clock_set) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
-
-	if (is_lvds && dev_priv->lvds_downclock_avail) {
-		/*
-		 * Ensure we match the reduced clock's P to the target clock.
-		 * If the clocks don't match, we can't switch the display clock
-		 * by using the FP0/FP1. In such case we will disable the LVDS
-		 * downclock feature.
-		*/
-		has_reduced_clock =
-			dev_priv->display.find_dpll(limit, crtc,
-						    dev_priv->lvds_downclock,
-						    refclk, &clock,
-						    &reduced_clock);
-	}
-	/* Compat-code for transition, will disappear. */
 	if (!intel_crtc->config.clock_set) {
+		refclk = i9xx_get_refclk(crtc, num_connectors);
+
+		/*
+		 * Returns a set of divisors for the desired target clock with
+		 * the given refclk, or FALSE.  The returned values represent
+		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
+		 * 2) / p1 / p2.
+		 */
+		limit = intel_limit(crtc, refclk);
+		ok = dev_priv->display.find_dpll(limit, crtc,
+						 intel_crtc->config.port_clock,
+						 refclk, NULL, &clock);
+		if (!ok) {
+			DRM_ERROR("Couldn't find PLL settings for mode!\n");
+			return -EINVAL;
+		}
+
+		if (is_lvds && dev_priv->lvds_downclock_avail) {
+			/*
+			 * Ensure we match the reduced clock's P to the target
+			 * clock.  If the clocks don't match, we can't switch
+			 * the display clock by using the FP0/FP1. In such case
+			 * we will disable the LVDS downclock feature.
+			 */
+			has_reduced_clock =
+				dev_priv->display.find_dpll(limit, crtc,
+							    dev_priv->lvds_downclock,
+							    refclk, &clock,
+							    &reduced_clock);
+		}
+		/* Compat-code for transition, will disappear. */
 		intel_crtc->config.dpll.n = clock.n;
 		intel_crtc->config.dpll.m1 = clock.m1;
 		intel_crtc->config.dpll.m2 = clock.m2;
@@ -4945,17 +5180,19 @@
 		intel_crtc->config.dpll.p2 = clock.p2;
 	}
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev)) {
 		i8xx_update_pll(intel_crtc,
 				has_reduced_clock ? &reduced_clock : NULL,
 				num_connectors);
-	else if (IS_VALLEYVIEW(dev))
+	} else if (IS_VALLEYVIEW(dev)) {
 		vlv_update_pll(intel_crtc);
-	else
+	} else {
 		i9xx_update_pll(intel_crtc,
 				has_reduced_clock ? &reduced_clock : NULL,
                                 num_connectors);
+	}
 
+skip_dpll:
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -4972,8 +5209,8 @@
 	 * which should always be the user's requested size.
 	 */
 	I915_WRITE(DSPSIZE(plane),
-		   ((mode->vdisplay - 1) << 16) |
-		   (mode->hdisplay - 1));
+		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
+		   (intel_crtc->config.pipe_src_w - 1));
 	I915_WRITE(DSPPOS(plane), 0);
 
 	i9xx_set_pipeconf(intel_crtc);
@@ -4983,8 +5220,6 @@
 
 	ret = intel_pipe_set_base(crtc, x, y, fb);
 
-	intel_update_watermarks(dev);
-
 	return ret;
 }
 
@@ -5015,6 +5250,32 @@
 			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
 }
 
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+			       struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = pipe_config->cpu_transcoder;
+	intel_clock_t clock;
+	u32 mdiv;
+	int refclk = 100000;
+
+	mutex_lock(&dev_priv->dpio_lock);
+	mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+	clock.m2 = mdiv & DPIO_M2DIV_MASK;
+	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+	vlv_clock(refclk, &clock);
+
+	/* clock.dot is the fast clock */
+	pipe_config->port_clock = clock.dot / 5;
+}
+
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 				 struct intel_crtc_config *pipe_config)
 {
@@ -5045,6 +5306,9 @@
 		}
 	}
 
+	if (INTEL_INFO(dev)->gen < 4)
+		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+
 	intel_get_pipe_timings(crtc, pipe_config);
 
 	i9xx_get_pfit_config(crtc, pipe_config);
@@ -5077,6 +5341,11 @@
 						     DPLL_PORTB_READY_MASK);
 	}
 
+	if (IS_VALLEYVIEW(dev))
+		vlv_crtc_clock_get(crtc, pipe_config);
+	else
+		i9xx_crtc_clock_get(crtc, pipe_config);
+
 	return true;
 }
 
@@ -5565,14 +5834,16 @@
 
 static void haswell_set_pipeconf(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 	uint32_t val;
 
 	val = 0;
 
-	if (intel_crtc->config.dither)
+	if (IS_HASWELL(dev) && intel_crtc->config.dither)
 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
 	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -5585,6 +5856,33 @@
 
 	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
 	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
+
+	if (IS_BROADWELL(dev)) {
+		val = 0;
+
+		switch (intel_crtc->config.pipe_bpp) {
+		case 18:
+			val |= PIPEMISC_DITHER_6_BPC;
+			break;
+		case 24:
+			val |= PIPEMISC_DITHER_8_BPC;
+			break;
+		case 30:
+			val |= PIPEMISC_DITHER_10_BPC;
+			break;
+		case 36:
+			val |= PIPEMISC_DITHER_12_BPC;
+			break;
+		default:
+			/* Case prevented by pipe_config_set_bpp. */
+			BUG();
+		}
+
+		if (intel_crtc->config.dither)
+			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+
+		I915_WRITE(PIPEMISC(pipe), val);
+	}
 }
 
 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
@@ -5819,11 +6117,6 @@
 	else
 		intel_crtc->lowfreq_avail = false;
 
-	if (intel_crtc->config.has_pch_encoder) {
-		pll = intel_crtc_to_shared_dpll(intel_crtc);
-
-	}
-
 	intel_set_pipe_timings(intel_crtc);
 
 	if (intel_crtc->config.has_pch_encoder) {
@@ -5839,25 +6132,67 @@
 
 	ret = intel_pipe_set_base(crtc, x, y, fb);
 
-	intel_update_watermarks(dev);
-
 	return ret;
 }
 
+static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = crtc->pipe;
+
+	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
+	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
+	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+		& ~TU_SIZE_MASK;
+	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
+	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
+					 enum transcoder transcoder,
+					 struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = crtc->pipe;
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
+		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
+		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+			& ~TU_SIZE_MASK;
+		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+	} else {
+		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
+		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
+		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+			& ~TU_SIZE_MASK;
+		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
+		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+	}
+}
+
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+		      struct intel_crtc_config *pipe_config)
+{
+	if (crtc->config.has_pch_encoder)
+		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+	else
+		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+					     &pipe_config->dp_m_n);
+}
+
 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
 					struct intel_crtc_config *pipe_config)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum transcoder transcoder = pipe_config->cpu_transcoder;
-
-	pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
-	pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
-	pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
-					& ~TU_SIZE_MASK;
-	pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
-	pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
-				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+				     &pipe_config->fdi_m_n);
 }
 
 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -5946,6 +6281,8 @@
 		pipe_config->pixel_multiplier =
 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+		ironlake_pch_clock_get(crtc, pipe_config);
 	} else {
 		pipe_config->pixel_multiplier = 1;
 	}
@@ -6002,8 +6339,8 @@
  * register. Callers should take care of disabling all the display engine
  * functions, doing the mode unset, fixing interrupts, etc.
  */
-void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
-		       bool switch_to_fclk, bool allow_power_down)
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+			      bool switch_to_fclk, bool allow_power_down)
 {
 	uint32_t val;
 
@@ -6031,7 +6368,10 @@
 
 	val = I915_READ(D_COMP);
 	val |= D_COMP_COMP_DISABLE;
-	I915_WRITE(D_COMP, val);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+		DRM_ERROR("Failed to disable D_COMP\n");
+	mutex_unlock(&dev_priv->rps.hw_lock);
 	POSTING_READ(D_COMP);
 	ndelay(100);
 
@@ -6050,7 +6390,7 @@
  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
  * source.
  */
-void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 {
 	uint32_t val;
 
@@ -6073,7 +6413,10 @@
 	val = I915_READ(D_COMP);
 	val |= D_COMP_COMP_FORCE;
 	val &= ~D_COMP_COMP_DISABLE;
-	I915_WRITE(D_COMP, val);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+		DRM_ERROR("Failed to enable D_COMP\n");
+	mutex_unlock(&dev_priv->rps.hw_lock);
 	POSTING_READ(D_COMP);
 
 	val = I915_READ(LCPLL_CTL);
@@ -6256,22 +6599,79 @@
 	}
 }
 
-static void haswell_modeset_global_resources(struct drm_device *dev)
+#define for_each_power_domain(domain, mask)				\
+	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
+		if ((1 << (domain)) & (mask))
+
+static unsigned long get_pipe_power_domains(struct drm_device *dev,
+					    enum pipe pipe, bool pfit_enabled)
 {
-	bool enable = false;
+	unsigned long mask;
+	enum transcoder transcoder;
+
+	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+	mask = BIT(POWER_DOMAIN_PIPE(pipe));
+	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+	if (pfit_enabled)
+		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+	return mask;
+}
+
+void intel_display_set_init_power(struct drm_device *dev, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->power_domains.init_power_on == enable)
+		return;
+
+	if (enable)
+		intel_display_power_get(dev, POWER_DOMAIN_INIT);
+	else
+		intel_display_power_put(dev, POWER_DOMAIN_INIT);
+
+	dev_priv->power_domains.init_power_on = enable;
+}
+
+static void modeset_update_power_wells(struct drm_device *dev)
+{
+	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
 	struct intel_crtc *crtc;
 
+	/*
+	 * First get all needed power domains, then put all unneeded, to avoid
+	 * any unnecessary toggling of the power wells.
+	 */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+		enum intel_display_power_domain domain;
+
 		if (!crtc->base.enabled)
 			continue;
 
-		if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
-		    crtc->config.cpu_transcoder != TRANSCODER_EDP)
-			enable = true;
+		pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
+						crtc->pipe,
+						crtc->config.pch_pfit.enabled);
+
+		for_each_power_domain(domain, pipe_domains[crtc->pipe])
+			intel_display_power_get(dev, domain);
 	}
 
-	intel_set_power_well(dev, enable);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+		enum intel_display_power_domain domain;
 
+		for_each_power_domain(domain, crtc->enabled_power_domains)
+			intel_display_power_put(dev, domain);
+
+		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+	}
+
+	intel_display_set_init_power(dev, false);
+}
+
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+	modeset_update_power_wells(dev);
 	hsw_update_package_c8(dev);
 }
 
@@ -6310,8 +6710,6 @@
 
 	ret = intel_pipe_set_base(crtc, x, y, fb);
 
-	intel_update_watermarks(dev);
-
 	return ret;
 }
 
@@ -6419,6 +6817,44 @@
 	return 0;
 }
 
+static struct {
+	int clock;
+	u32 config;
+} hdmi_audio_clock[] = {
+	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+		if (mode->clock == hdmi_audio_clock[i].clock)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+		i = 1;
+	}
+
+	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+		      hdmi_audio_clock[i].clock,
+		      hdmi_audio_clock[i].config);
+
+	return hdmi_audio_clock[i].config;
+}
+
 static bool intel_eld_uptodate(struct drm_connector *connector,
 			       int reg_eldv, uint32_t bits_eldv,
 			       int reg_elda, uint32_t bits_elda,
@@ -6449,7 +6885,8 @@
 }
 
 static void g4x_write_eld(struct drm_connector *connector,
-			  struct drm_crtc *crtc)
+			  struct drm_crtc *crtc,
+			  struct drm_display_mode *mode)
 {
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	uint8_t *eld = connector->eld;
@@ -6489,7 +6926,8 @@
 }
 
 static void haswell_write_eld(struct drm_connector *connector,
-				     struct drm_crtc *crtc)
+			      struct drm_crtc *crtc,
+			      struct drm_display_mode *mode)
 {
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	uint8_t *eld = connector->eld;
@@ -6542,8 +6980,9 @@
 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-	} else
-		I915_WRITE(aud_config, 0);
+	} else {
+		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+	}
 
 	if (intel_eld_uptodate(connector,
 			       aud_cntrl_st2, eldv,
@@ -6576,7 +7015,8 @@
 }
 
 static void ironlake_write_eld(struct drm_connector *connector,
-				     struct drm_crtc *crtc)
+			       struct drm_crtc *crtc,
+			       struct drm_display_mode *mode)
 {
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	uint8_t *eld = connector->eld;
@@ -6594,6 +7034,11 @@
 		aud_config = IBX_AUD_CFG(pipe);
 		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+	} else if (IS_VALLEYVIEW(connector->dev)) {
+		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+		aud_config = VLV_AUD_CFG(pipe);
+		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
 	} else {
 		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
 		aud_config = CPT_AUD_CFG(pipe);
@@ -6603,8 +7048,19 @@
 
 	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
-	i = I915_READ(aud_cntl_st);
-	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
+	if (IS_VALLEYVIEW(connector->dev))  {
+		struct intel_encoder *intel_encoder;
+		struct intel_digital_port *intel_dig_port;
+
+		intel_encoder = intel_attached_encoder(connector);
+		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+		i = intel_dig_port->port;
+	} else {
+		i = I915_READ(aud_cntl_st);
+		i = (i >> 29) & DIP_PORT_SEL_MASK;
+		/* DIP_Port_Select, 0x1 = PortB */
+	}
+
 	if (!i) {
 		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
 		/* operate blindly on all ports */
@@ -6620,8 +7076,9 @@
 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-	} else
-		I915_WRITE(aud_config, 0);
+	} else {
+		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+	}
 
 	if (intel_eld_uptodate(connector,
 			       aud_cntrl_st2, eldv,
@@ -6671,50 +7128,7 @@
 	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 
 	if (dev_priv->display.write_eld)
-		dev_priv->display.write_eld(connector, crtc);
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void intel_crtc_load_lut(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
-	int palreg = PALETTE(pipe);
-	int i;
-	bool reenable_ips = false;
-
-	/* The clocks have to be on to load the palette. */
-	if (!crtc->enabled || !intel_crtc->active)
-		return;
-
-	if (!HAS_PCH_SPLIT(dev_priv->dev))
-		assert_pll_enabled(dev_priv, pipe);
-
-	/* use legacy palette for Ironlake */
-	if (HAS_PCH_SPLIT(dev))
-		palreg = LGC_PALETTE(pipe);
-
-	/* Workaround : Do not read or write the pipe palette/gamma data while
-	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-	 */
-	if (intel_crtc->config.ips_enabled &&
-	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
-	     GAMMA_MODE_MODE_SPLIT)) {
-		hsw_disable_ips(intel_crtc);
-		reenable_ips = true;
-	}
-
-	for (i = 0; i < 256; i++) {
-		I915_WRITE(palreg + 4 * i,
-			   (intel_crtc->lut_r[i] << 16) |
-			   (intel_crtc->lut_g[i] << 8) |
-			   intel_crtc->lut_b[i]);
-	}
-
-	if (reenable_ips)
-		hsw_enable_ips(intel_crtc);
+		dev_priv->display.write_eld(connector, crtc, mode);
 }
 
 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6790,7 +7204,7 @@
 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 			cntl |= CURSOR_MODE_DISABLE;
 		}
-		if (IS_HASWELL(dev)) {
+		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 			cntl |= CURSOR_PIPE_CSC_ENABLE;
 			cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
 		}
@@ -6812,23 +7226,20 @@
 	int pipe = intel_crtc->pipe;
 	int x = intel_crtc->cursor_x;
 	int y = intel_crtc->cursor_y;
-	u32 base, pos;
+	u32 base = 0, pos = 0;
 	bool visible;
 
-	pos = 0;
-
-	if (on && crtc->enabled && crtc->fb) {
+	if (on)
 		base = intel_crtc->cursor_addr;
-		if (x > (int) crtc->fb->width)
-			base = 0;
 
-		if (y > (int) crtc->fb->height)
-			base = 0;
-	} else
+	if (x >= intel_crtc->config.pipe_src_w)
+		base = 0;
+
+	if (y >= intel_crtc->config.pipe_src_h)
 		base = 0;
 
 	if (x < 0) {
-		if (x + intel_crtc->cursor_width < 0)
+		if (x + intel_crtc->cursor_width <= 0)
 			base = 0;
 
 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -6837,7 +7248,7 @@
 	pos |= x << CURSOR_X_SHIFT;
 
 	if (y < 0) {
-		if (y + intel_crtc->cursor_height < 0)
+		if (y + intel_crtc->cursor_height <= 0)
 			base = 0;
 
 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -6849,7 +7260,7 @@
 	if (!visible && !intel_crtc->cursor_visible)
 		return;
 
-	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		I915_WRITE(CURPOS_IVB(pipe), pos);
 		ivb_update_cursor(crtc, base);
 	} else {
@@ -6980,8 +7391,8 @@
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	intel_crtc->cursor_x = x;
-	intel_crtc->cursor_y = y;
+	intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
+	intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
 
 	if (intel_crtc->active)
 		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
@@ -6989,27 +7400,6 @@
 	return 0;
 }
 
-/** Sets the color ramps on behalf of RandR */
-void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-				 u16 blue, int regno)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	intel_crtc->lut_r[regno] = red >> 8;
-	intel_crtc->lut_g[regno] = green >> 8;
-	intel_crtc->lut_b[regno] = blue >> 8;
-}
-
-void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-			     u16 *blue, int regno)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	*red = intel_crtc->lut_r[regno] << 8;
-	*green = intel_crtc->lut_g[regno] << 8;
-	*blue = intel_crtc->lut_b[regno] << 8;
-}
-
 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
 				 u16 *blue, uint32_t start, uint32_t size)
 {
@@ -7045,14 +7435,21 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto err;
+
 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
-	if (ret) {
-		drm_gem_object_unreference_unlocked(&obj->base);
-		kfree(intel_fb);
-		return ERR_PTR(ret);
-	}
+	mutex_unlock(&dev->struct_mutex);
+	if (ret)
+		goto err;
 
 	return &intel_fb->base;
+err:
+	drm_gem_object_unreference_unlocked(&obj->base);
+	kfree(intel_fb);
+
+	return ERR_PTR(ret);
 }
 
 static u32
@@ -7095,6 +7492,7 @@
 mode_fits_in_fbdev(struct drm_device *dev,
 		   struct drm_display_mode *mode)
 {
+#ifdef CONFIG_DRM_I915_FBDEV
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct drm_framebuffer *fb;
@@ -7115,6 +7513,9 @@
 		return NULL;
 
 	return fb;
+#else
+	return NULL;
+#endif
 }
 
 bool intel_get_load_detect_pipe(struct drm_connector *connector,
@@ -7258,6 +7659,22 @@
 	mutex_unlock(&crtc->mutex);
 }
 
+static int i9xx_pll_refclk(struct drm_device *dev,
+			   const struct intel_crtc_config *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+		return dev_priv->vbt.lvds_ssc_freq * 1000;
+	else if (HAS_PCH_SPLIT(dev))
+		return 120000;
+	else if (!IS_GEN2(dev))
+		return 96000;
+	else
+		return 48000;
+}
+
 /* Returns the clock of the currently programmed mode of the given pipe. */
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				struct intel_crtc_config *pipe_config)
@@ -7265,14 +7682,15 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe = pipe_config->cpu_transcoder;
-	u32 dpll = I915_READ(DPLL(pipe));
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
 	u32 fp;
 	intel_clock_t clock;
+	int refclk = i9xx_pll_refclk(dev, pipe_config);
 
 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-		fp = I915_READ(FP0(pipe));
+		fp = pipe_config->dpll_hw_state.fp0;
 	else
-		fp = I915_READ(FP1(pipe));
+		fp = pipe_config->dpll_hw_state.fp1;
 
 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
 	if (IS_PINEVIEW(dev)) {
@@ -7303,14 +7721,13 @@
 		default:
 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
-			pipe_config->adjusted_mode.clock = 0;
 			return;
 		}
 
 		if (IS_PINEVIEW(dev))
-			pineview_clock(96000, &clock);
+			pineview_clock(refclk, &clock);
 		else
-			i9xx_clock(96000, &clock);
+			i9xx_clock(refclk, &clock);
 	} else {
 		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
 
@@ -7318,13 +7735,6 @@
 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
 			clock.p2 = 14;
-
-			if ((dpll & PLL_REF_INPUT_MASK) ==
-			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
-				/* XXX: might not be 66MHz */
-				i9xx_clock(66000, &clock);
-			} else
-				i9xx_clock(48000, &clock);
 		} else {
 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
 				clock.p1 = 2;
@@ -7336,59 +7746,55 @@
 				clock.p2 = 4;
 			else
 				clock.p2 = 2;
-
-			i9xx_clock(48000, &clock);
 		}
+
+		i9xx_clock(refclk, &clock);
 	}
 
-	pipe_config->adjusted_mode.clock = clock.dot;
+	/*
+	 * This value includes pixel_multiplier. We will use
+	 * port_clock to compute adjusted_mode.crtc_clock in the
+	 * encoder's get_config() function.
+	 */
+	pipe_config->port_clock = clock.dot;
 }
 
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
-				    struct intel_crtc_config *pipe_config)
+int intel_dotclock_calculate(int link_freq,
+			     const struct intel_link_m_n *m_n)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-	int link_freq, repeat;
-	u64 clock;
-	u32 link_m, link_n;
-
-	repeat = pipe_config->pixel_multiplier;
-
 	/*
 	 * The calculation for the data clock is:
-	 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
 	 * But we want to avoid losing precison if possible, so:
-	 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
 	 *
 	 * and the link clock is simpler:
-	 * link_clock = (m * link_clock * repeat) / n
+	 * link_clock = (m * link_clock) / n
 	 */
 
+	if (!m_n->link_n)
+		return 0;
+
+	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+}
+
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+				   struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+
+	/* read out port_clock from the DPLL */
+	i9xx_crtc_clock_get(crtc, pipe_config);
+
 	/*
-	 * We need to get the FDI or DP link clock here to derive
-	 * the M/N dividers.
-	 *
-	 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
-	 * For DP, it's either 1.62GHz or 2.7GHz.
-	 * We do our calculations in 10*MHz since we don't need much precison.
+	 * This value does not include pixel_multiplier.
+	 * We will check that port_clock and adjusted_mode.crtc_clock
+	 * agree once we know their relationship in the encoder's
+	 * get_config() function.
 	 */
-	if (pipe_config->has_pch_encoder)
-		link_freq = intel_fdi_link_freq(dev) * 10000;
-	else
-		link_freq = pipe_config->port_clock;
-
-	link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
-	link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
-
-	if (!link_m || !link_n)
-		return;
-
-	clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
-	do_div(clock, link_n);
-
-	pipe_config->adjusted_mode.clock = clock;
+	pipe_config->adjusted_mode.crtc_clock =
+		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+					 &pipe_config->fdi_m_n);
 }
 
 /** Returns the currently programmed mode of the given pipe. */
@@ -7404,6 +7810,7 @@
 	int hsync = I915_READ(HSYNC(cpu_transcoder));
 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
 	int vsync = I915_READ(VSYNC(cpu_transcoder));
+	enum pipe pipe = intel_crtc->pipe;
 
 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
 	if (!mode)
@@ -7416,11 +7823,14 @@
 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
 	 * to use a real value here instead.
 	 */
-	pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+	pipe_config.cpu_transcoder = (enum transcoder) pipe;
 	pipe_config.pixel_multiplier = 1;
+	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
 	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
 
-	mode->clock = pipe_config.adjusted_mode.clock;
+	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
 	mode->hdisplay = (htot & 0xffff) + 1;
 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
 	mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7526,6 +7936,9 @@
 
 		intel_decrease_pllclock(crtc);
 	}
+
+	if (dev_priv->info->gen >= 6)
+		gen6_rps_idle(dev->dev_private);
 }
 
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -7714,7 +8127,7 @@
 	intel_ring_emit(ring, 0); /* aux display base address, unused */
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7756,7 +8169,7 @@
 	intel_ring_emit(ring, MI_NOOP);
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7805,7 +8218,7 @@
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7850,7 +8263,7 @@
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7929,7 +8342,7 @@
 	intel_ring_emit(ring, (MI_NOOP));
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7974,7 +8387,7 @@
 	     fb->pitches[0] != crtc->fb->pitches[0]))
 		return -EINVAL;
 
-	work = kzalloc(sizeof *work, GFP_KERNEL);
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL)
 		return -ENOMEM;
 
@@ -8209,6 +8622,17 @@
 	return bpp;
 }
 
+static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
+			"type: 0x%x flags: 0x%x\n",
+		mode->crtc_clock,
+		mode->crtc_hdisplay, mode->crtc_hsync_start,
+		mode->crtc_hsync_end, mode->crtc_htotal,
+		mode->crtc_vdisplay, mode->crtc_vsync_start,
+		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+}
+
 static void intel_dump_pipe_config(struct intel_crtc *crtc,
 				   struct intel_crtc_config *pipe_config,
 				   const char *context)
@@ -8225,10 +8649,19 @@
 		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
 		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
 		      pipe_config->fdi_m_n.tu);
+	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+		      pipe_config->has_dp_encoder,
+		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
+		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
+		      pipe_config->dp_m_n.tu);
 	DRM_DEBUG_KMS("requested mode:\n");
 	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
 	DRM_DEBUG_KMS("adjusted mode:\n");
 	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
+	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
+	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
 	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
 		      pipe_config->gmch_pfit.control,
 		      pipe_config->gmch_pfit.pgm_ratios,
@@ -8238,6 +8671,7 @@
 		      pipe_config->pch_pfit.size,
 		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 }
 
 static bool check_encoder_cloning(struct drm_crtc *crtc)
@@ -8281,6 +8715,7 @@
 
 	drm_mode_copy(&pipe_config->adjusted_mode, mode);
 	drm_mode_copy(&pipe_config->requested_mode, mode);
+
 	pipe_config->cpu_transcoder =
 		(enum transcoder) to_intel_crtc(crtc)->pipe;
 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8307,13 +8742,25 @@
 	if (plane_bpp < 0)
 		goto fail;
 
+	/*
+	 * Determine the real pipe dimensions. Note that stereo modes can
+	 * increase the actual pipe size due to the frame doubling and
+	 * insertion of additional space for blanks between the frame. This
+	 * is stored in the crtc timings. We use the requested mode to do this
+	 * computation to clearly distinguish it from the adjusted mode, which
+	 * can be changed by the connectors in the below retry loop.
+	 */
+	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
+	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
+	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+
 encoder_retry:
 	/* Ensure the port clock defaults are reset when retrying. */
 	pipe_config->port_clock = 0;
 	pipe_config->pixel_multiplier = 1;
 
 	/* Fill in default crtc timings, allow encoders to overwrite them. */
-	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
+	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
 
 	/* Pass our mode to the connectors and the CRTC to give them a chance to
 	 * adjust it according to limitations or connector properties, and also
@@ -8334,7 +8781,8 @@
 	/* Set default port clock if not overwritten by the encoder. Needs to be
 	 * done afterwards in case the encoder adjusts the mode. */
 	if (!pipe_config->port_clock)
-		pipe_config->port_clock = pipe_config->adjusted_mode.clock;
+		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+			* pipe_config->pixel_multiplier;
 
 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
 	if (ret < 0) {
@@ -8521,13 +8969,9 @@
 
 }
 
-static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
-				    struct intel_crtc_config *new)
+static bool intel_fuzzy_clock_check(int clock1, int clock2)
 {
-	int clock1, clock2, diff;
-
-	clock1 = cur->adjusted_mode.clock;
-	clock2 = new->adjusted_mode.clock;
+	int diff;
 
 	if (clock1 == clock2)
 		return true;
@@ -8581,6 +9025,15 @@
 		return false; \
 	}
 
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
+		DRM_ERROR("mismatch in " #name " " \
+			  "(expected %i, found %i)\n", \
+			  current_config->name, \
+			  pipe_config->name); \
+		return false; \
+	}
+
 #define PIPE_CONF_QUIRK(quirk)	\
 	((current_config->quirks | pipe_config->quirks) & (quirk))
 
@@ -8594,6 +9047,13 @@
 	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
 	PIPE_CONF_CHECK_I(fdi_m_n.tu);
 
+	PIPE_CONF_CHECK_I(has_dp_encoder);
+	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
+	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
+	PIPE_CONF_CHECK_I(dp_m_n.link_m);
+	PIPE_CONF_CHECK_I(dp_m_n.link_n);
+	PIPE_CONF_CHECK_I(dp_m_n.tu);
+
 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
@@ -8624,8 +9084,8 @@
 				      DRM_MODE_FLAG_NVSYNC);
 	}
 
-	PIPE_CONF_CHECK_I(requested_mode.hdisplay);
-	PIPE_CONF_CHECK_I(requested_mode.vdisplay);
+	PIPE_CONF_CHECK_I(pipe_src_w);
+	PIPE_CONF_CHECK_I(pipe_src_h);
 
 	PIPE_CONF_CHECK_I(gmch_pfit.control);
 	/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -8640,6 +9100,8 @@
 
 	PIPE_CONF_CHECK_I(ips_enabled);
 
+	PIPE_CONF_CHECK_I(double_wide);
+
 	PIPE_CONF_CHECK_I(shared_dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -8649,20 +9111,17 @@
 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
 		PIPE_CONF_CHECK_I(pipe_bpp);
 
+	if (!IS_HASWELL(dev)) {
+		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+	}
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
 
-	if (!IS_HASWELL(dev)) {
-		if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
-			DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
-				  current_config->adjusted_mode.clock,
-				  pipe_config->adjusted_mode.clock);
-			return false;
-		}
-	}
-
 	return true;
 }
 
@@ -8794,9 +9253,6 @@
 				encoder->get_config(encoder, &pipe_config);
 		}
 
-		if (dev_priv->display.get_clock)
-			dev_priv->display.get_clock(crtc, &pipe_config);
-
 		WARN(crtc->active != active,
 		     "crtc active state doesn't match with hw state "
 		     "(expected %i, found %i)\n", crtc->active, active);
@@ -8871,6 +9327,18 @@
 	check_shared_dpll_state(dev);
 }
 
+void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+				     int dotclock)
+{
+	/*
+	 * FDI already provided one idea for the dotclock.
+	 * Yell if the encoder disagrees.
+	 */
+	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+	     pipe_config->adjusted_mode.crtc_clock, dotclock);
+}
+
 static int __intel_set_mode(struct drm_crtc *crtc,
 			    struct drm_display_mode *mode,
 			    int x, int y, struct drm_framebuffer *fb)
@@ -8883,7 +9351,7 @@
 	unsigned disable_pipes, prepare_pipes, modeset_pipes;
 	int ret = 0;
 
-	saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+	saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
 	if (!saved_mode)
 		return -ENOMEM;
 	saved_hwmode = saved_mode + 1;
@@ -9422,7 +9890,7 @@
 	struct intel_crtc *intel_crtc;
 	int i;
 
-	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
 	if (intel_crtc == NULL)
 		return;
 
@@ -9451,6 +9919,18 @@
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 }
 
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
+{
+	struct drm_encoder *encoder = connector->base.encoder;
+
+	WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
+
+	if (!encoder)
+		return INVALID_PIPE;
+
+	return to_intel_crtc(encoder->crtc)->pipe;
+}
+
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file)
 {
@@ -9466,7 +9946,7 @@
 
 	if (!drmmode_obj) {
 		DRM_ERROR("no such CRTC id\n");
-		return -EINVAL;
+		return -ENOENT;
 	}
 
 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
@@ -9573,7 +10053,13 @@
 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
 			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	} else if (IS_VALLEYVIEW(dev)) {
-		/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+					PORT_B);
+			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
+		}
+
 		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
 			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
 					PORT_C);
@@ -9582,12 +10068,7 @@
 					      PORT_C);
 		}
 
-		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
-			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
-					PORT_B);
-			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
-				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
-		}
+		intel_dsi_init(dev);
 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 		bool found = false;
 
@@ -9643,6 +10124,7 @@
 void intel_framebuffer_fini(struct intel_framebuffer *fb)
 {
 	drm_framebuffer_cleanup(&fb->base);
+	WARN_ON(!fb->obj->framebuffer_references--);
 	drm_gem_object_unreference_unlocked(&fb->obj->base);
 }
 
@@ -9674,9 +10156,12 @@
 			   struct drm_mode_fb_cmd2 *mode_cmd,
 			   struct drm_i915_gem_object *obj)
 {
+	int aligned_height, tile_height;
 	int pitch_limit;
 	int ret;
 
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
 	if (obj->tiling_mode == I915_TILING_Y) {
 		DRM_DEBUG("hardware does not support tiling Y\n");
 		return -EINVAL;
@@ -9765,8 +10250,16 @@
 	if (mode_cmd->offsets[0] != 0)
 		return -EINVAL;
 
+	tile_height = IS_GEN2(dev) ? 16 : 8;
+	aligned_height = ALIGN(mode_cmd->height,
+			       obj->tiling_mode ? tile_height : 1);
+	/* FIXME drm helper for size checks (especially planar formats)? */
+	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
+		return -EINVAL;
+
 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
 	intel_fb->obj = obj;
+	intel_fb->obj->framebuffer_references++;
 
 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
 	if (ret) {
@@ -9792,9 +10285,15 @@
 	return intel_framebuffer_create(dev, mode_cmd, obj);
 }
 
+#ifndef CONFIG_DRM_I915_FBDEV
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+#endif
+
 static const struct drm_mode_config_funcs intel_mode_funcs = {
 	.fb_create = intel_user_framebuffer_create,
-	.output_poll_changed = intel_fb_output_poll_changed,
+	.output_poll_changed = intel_fbdev_output_poll_changed,
 };
 
 /* Set up chip specific display functions */
@@ -9820,7 +10319,6 @@
 		dev_priv->display.update_plane = ironlake_update_plane;
 	} else if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
-		dev_priv->display.get_clock = ironlake_crtc_clock_get;
 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9828,7 +10326,6 @@
 		dev_priv->display.update_plane = ironlake_update_plane;
 	} else if (IS_VALLEYVIEW(dev)) {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_clock = i9xx_crtc_clock_get;
 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9836,7 +10333,6 @@
 		dev_priv->display.update_plane = i9xx_update_plane;
 	} else {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_clock = i9xx_crtc_clock_get;
 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9886,7 +10382,7 @@
 			dev_priv->display.write_eld = ironlake_write_eld;
 			dev_priv->display.modeset_global_resources =
 				ivb_modeset_global_resources;
-		} else if (IS_HASWELL(dev)) {
+		} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
 			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
 			dev_priv->display.write_eld = haswell_write_eld;
 			dev_priv->display.modeset_global_resources =
@@ -9894,7 +10390,8 @@
 		}
 	} else if (IS_G4X(dev)) {
 		dev_priv->display.write_eld = g4x_write_eld;
-	}
+	} else if (IS_VALLEYVIEW(dev))
+		dev_priv->display.write_eld = ironlake_write_eld;
 
 	/* Default just returns -ENODEV to indicate unsupported */
 	dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -9917,6 +10414,7 @@
 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
 		break;
 	case 7:
+	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
 		break;
 	}
@@ -10012,8 +10510,7 @@
 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
 
-	/* 830/845 need to leave pipe A & dpll A up */
-	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+	/* 830 needs to leave pipe A & dpll A up */
 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 
 	/* Lenovo U160 cannot use SSC on LVDS */
@@ -10022,20 +10519,11 @@
 	/* Sony Vaio Y cannot use SSC on LVDS */
 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
 
-	/* Acer Aspire 5734Z must invert backlight brightness */
-	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
-	/* Acer/eMachines G725 */
-	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
-	/* Acer/eMachines e725 */
-	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
-	/* Acer/Packard Bell NCL20 */
-	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
-	/* Acer Aspire 4736Z */
-	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+	/*
+	 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
+	 * seem to use inverted backlight PWM.
+	 */
+	{ 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
 
 	/* Dell XPS13 HD Sandy Bridge */
 	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -10084,12 +10572,19 @@
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
-	intel_init_power_well(dev);
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	intel_prepare_ddi(dev);
 
 	intel_init_clock_gating(dev);
 
+	/* Enable the CRI clock source so we can get at the display */
+	if (IS_VALLEYVIEW(dev))
+		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+			   DPLL_INTEGRATED_CRI_CLK_VLV);
+
+	intel_init_dpio(dev);
+
 	mutex_lock(&dev->struct_mutex);
 	intel_enable_gt_powersave(dev);
 	mutex_unlock(&dev->struct_mutex);
@@ -10357,7 +10852,7 @@
 	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
 		return;
 
-	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
 		i915_disable_vga(dev);
 	}
@@ -10380,6 +10875,7 @@
 								 &crtc->config);
 
 		crtc->base.enabled = crtc->active;
+		crtc->primary_enabled = crtc->active;
 
 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
 			      crtc->base.base.id,
@@ -10420,20 +10916,11 @@
 		}
 
 		encoder->connectors_active = false;
-		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
 			      encoder->base.base.id,
 			      drm_get_encoder_name(&encoder->base),
 			      encoder->base.crtc ? "enabled" : "disabled",
-			      pipe);
-	}
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
-			    base.head) {
-		if (!crtc->active)
-			continue;
-		if (dev_priv->display.get_clock)
-			dev_priv->display.get_clock(crtc,
-						    &crtc->config);
+			      pipe_name(pipe));
 	}
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10460,7 +10947,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe;
-	struct drm_plane *plane;
 	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
 	int i;
@@ -10507,7 +10993,12 @@
 		pll->on = false;
 	}
 
+	if (IS_HASWELL(dev))
+		ilk_wm_get_hw_state(dev);
+
 	if (force_restore) {
+		i915_redisable_vga(dev);
+
 		/*
 		 * We need to use raw interfaces for restoring state to avoid
 		 * checking (bogus) intermediate states.
@@ -10519,10 +11010,6 @@
 			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
 					 crtc->fb);
 		}
-		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
-			intel_plane_restore(plane);
-
-		i915_redisable_vga(dev);
 	} else {
 		intel_modeset_update_staged_output_state(dev);
 	}
@@ -10545,6 +11032,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
+	struct drm_connector *connector;
 
 	/*
 	 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -10585,6 +11073,10 @@
 	/* destroy backlight, if any, before the connectors */
 	intel_panel_destroy_backlight(dev);
 
+	/* destroy the sysfs files before encoders/connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		drm_sysfs_connector_remove(connector);
+
 	drm_mode_config_cleanup(dev);
 
 	intel_cleanup_overlay(dev);
@@ -10680,7 +11172,7 @@
 	if (INTEL_INFO(dev)->num_pipes == 0)
 		return NULL;
 
-	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+	error = kzalloc(sizeof(*error), GFP_ATOMIC);
 	if (error == NULL)
 		return NULL;
 
@@ -10688,6 +11180,9 @@
 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
 	for_each_pipe(i) {
+		if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+			continue;
+
 		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
 			error->cursor[i].control = I915_READ(CURCNTR(i));
 			error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10721,6 +11216,10 @@
 	for (i = 0; i < error->num_transcoders; i++) {
 		enum transcoder cpu_transcoder = transcoders[i];
 
+		if (!intel_display_power_enabled(dev,
+				POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+			continue;
+
 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
 
 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
@@ -10732,12 +11231,6 @@
 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
 	}
 
-	/* In the code above we read the registers without checking if the power
-	 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
-	 * prevent the next I915_WRITE from detecting it and printing an error
-	 * message. */
-	intel_uncore_clear_errors(dev);
-
 	return error;
 }
 
@@ -10782,7 +11275,7 @@
 	}
 
 	for (i = 0; i < error->num_transcoders; i++) {
-		err_printf(m, "  CPU transcoder: %c\n",
+		err_printf(m, "CPU transcoder: %c\n",
 			   transcoder_name(error->transcoder[i].cpu_transcoder));
 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1a43137..eb8139d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -38,6 +38,32 @@
 
 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
 
+struct dp_link_dpll {
+	int link_bw;
+	struct dpll dpll;
+};
+
+static const struct dp_link_dpll gen4_dpll[] = {
+	{ DP_LINK_BW_1_62,
+		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+	{ DP_LINK_BW_2_7,
+		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+};
+
+static const struct dp_link_dpll pch_dpll[] = {
+	{ DP_LINK_BW_1_62,
+		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+	{ DP_LINK_BW_2_7,
+		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+};
+
+static const struct dp_link_dpll vlv_dpll[] = {
+	{ DP_LINK_BW_1_62,
+		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+	{ DP_LINK_BW_2_7,
+		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+};
+
 /**
  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -211,24 +237,77 @@
 	}
 }
 
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+				    struct intel_dp *intel_dp,
+				    struct edp_power_seq *out);
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+					      struct intel_dp *intel_dp,
+					      struct edp_power_seq *out);
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	enum pipe pipe;
+
+	/* modeset should have pipe */
+	if (crtc)
+		return to_intel_crtc(crtc)->pipe;
+
+	/* init time, try to find a pipe with this port selected */
+	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+			PANEL_PORT_SELECT_MASK;
+		if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
+			return pipe;
+		if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
+			return pipe;
+	}
+
+	/* shrug */
+	return PIPE_A;
+}
+
+static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	if (HAS_PCH_SPLIT(dev))
+		return PCH_PP_CONTROL;
+	else
+		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+}
+
+static u32 _pp_stat_reg(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	if (HAS_PCH_SPLIT(dev))
+		return PCH_PP_STATUS;
+	else
+		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+}
+
 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_stat_reg;
 
-	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-	return (I915_READ(pp_stat_reg) & PP_ON) != 0;
+	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 }
 
 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_ctrl_reg;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-	return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
+	return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
 }
 
 static void
@@ -236,19 +315,15 @@
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_stat_reg, pp_ctrl_reg;
 
 	if (!is_edp(intel_dp))
 		return;
 
-	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
 	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
-				I915_READ(pp_stat_reg),
-				I915_READ(pp_ctrl_reg));
+			      I915_READ(_pp_stat_reg(intel_dp)),
+			      I915_READ(_pp_ctrl_reg(intel_dp)));
 	}
 }
 
@@ -330,6 +405,7 @@
 	uint32_t status;
 	int try, precharge, clock = 0;
 	bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+	uint32_t timeout;
 
 	/* dp aux is extremely sensitive to irq latency, hence request the
 	 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -344,6 +420,11 @@
 	else
 		precharge = 5;
 
+	if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
+		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+	else
+		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
 	intel_aux_display_runtime_get(dev_priv);
 
 	/* Try to wait for any previous AUX channel activity */
@@ -361,6 +442,12 @@
 		goto out;
 	}
 
+	/* Only 5 data registers! */
+	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+		ret = -E2BIG;
+		goto out;
+	}
+
 	while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
 		/* Must try at least 3 times according to DP spec */
 		for (try = 0; try < 5; try++) {
@@ -373,7 +460,7 @@
 			I915_WRITE(ch_ctl,
 				   DP_AUX_CH_CTL_SEND_BUSY |
 				   (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
-				   DP_AUX_CH_CTL_TIME_OUT_400us |
+				   timeout |
 				   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 				   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 				   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
@@ -451,9 +538,10 @@
 	int msg_bytes;
 	uint8_t	ack;
 
+	if (WARN_ON(send_bytes > 16))
+		return -E2BIG;
+
 	intel_dp_check_edp(intel_dp);
-	if (send_bytes > 16)
-		return -1;
 	msg[0] = AUX_NATIVE_WRITE << 4;
 	msg[1] = address >> 8;
 	msg[2] = address & 0xff;
@@ -494,6 +582,9 @@
 	uint8_t ack;
 	int ret;
 
+	if (WARN_ON(recv_bytes > 19))
+		return -E2BIG;
+
 	intel_dp_check_edp(intel_dp);
 	msg[0] = AUX_NATIVE_READ << 4;
 	msg[1] = address >> 8;
@@ -538,6 +629,7 @@
 	int reply_bytes;
 	int ret;
 
+	ironlake_edp_panel_vdd_on(intel_dp);
 	intel_dp_check_edp(intel_dp);
 	/* Set up the command byte */
 	if (mode & MODE_I2C_READ)
@@ -569,13 +661,18 @@
 		break;
 	}
 
-	for (retry = 0; retry < 5; retry++) {
+	/*
+	 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
+	 * required to retry at least seven times upon receiving AUX_DEFER
+	 * before giving up the AUX transaction.
+	 */
+	for (retry = 0; retry < 7; retry++) {
 		ret = intel_dp_aux_ch(intel_dp,
 				      msg, msg_bytes,
 				      reply, reply_bytes);
 		if (ret < 0) {
 			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
-			return ret;
+			goto out;
 		}
 
 		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -586,7 +683,8 @@
 			break;
 		case AUX_NATIVE_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_ch native nack\n");
-			return -EREMOTEIO;
+			ret = -EREMOTEIO;
+			goto out;
 		case AUX_NATIVE_REPLY_DEFER:
 			/*
 			 * For now, just give more slack to branch devices. We
@@ -604,7 +702,8 @@
 		default:
 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
 				  reply[0]);
-			return -EREMOTEIO;
+			ret = -EREMOTEIO;
+			goto out;
 		}
 
 		switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -612,22 +711,29 @@
 			if (mode == MODE_I2C_READ) {
 				*read_byte = reply[1];
 			}
-			return reply_bytes - 1;
+			ret = reply_bytes - 1;
+			goto out;
 		case AUX_I2C_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_i2c nack\n");
-			return -EREMOTEIO;
+			ret = -EREMOTEIO;
+			goto out;
 		case AUX_I2C_REPLY_DEFER:
 			DRM_DEBUG_KMS("aux_i2c defer\n");
 			udelay(100);
 			break;
 		default:
 			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
-			return -EREMOTEIO;
+			ret = -EREMOTEIO;
+			goto out;
 		}
 	}
 
 	DRM_ERROR("too many retries, giving up\n");
-	return -EREMOTEIO;
+	ret = -EREMOTEIO;
+
+out:
+	ironlake_edp_panel_vdd_off(intel_dp, false);
+	return ret;
 }
 
 static int
@@ -647,11 +753,9 @@
 	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
 	intel_dp->adapter.algo_data = &intel_dp->algo;
-	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+	intel_dp->adapter.dev.parent = intel_connector->base.kdev;
 
-	ironlake_edp_panel_vdd_on(intel_dp);
 	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
-	ironlake_edp_panel_vdd_off(intel_dp, false);
 	return ret;
 }
 
@@ -660,41 +764,30 @@
 		   struct intel_crtc_config *pipe_config, int link_bw)
 {
 	struct drm_device *dev = encoder->base.dev;
+	const struct dp_link_dpll *divisor = NULL;
+	int i, count = 0;
 
 	if (IS_G4X(dev)) {
-		if (link_bw == DP_LINK_BW_1_62) {
-			pipe_config->dpll.p1 = 2;
-			pipe_config->dpll.p2 = 10;
-			pipe_config->dpll.n = 2;
-			pipe_config->dpll.m1 = 23;
-			pipe_config->dpll.m2 = 8;
-		} else {
-			pipe_config->dpll.p1 = 1;
-			pipe_config->dpll.p2 = 10;
-			pipe_config->dpll.n = 1;
-			pipe_config->dpll.m1 = 14;
-			pipe_config->dpll.m2 = 2;
-		}
-		pipe_config->clock_set = true;
+		divisor = gen4_dpll;
+		count = ARRAY_SIZE(gen4_dpll);
 	} else if (IS_HASWELL(dev)) {
 		/* Haswell has special-purpose DP DDI clocks. */
 	} else if (HAS_PCH_SPLIT(dev)) {
-		if (link_bw == DP_LINK_BW_1_62) {
-			pipe_config->dpll.n = 1;
-			pipe_config->dpll.p1 = 2;
-			pipe_config->dpll.p2 = 10;
-			pipe_config->dpll.m1 = 12;
-			pipe_config->dpll.m2 = 9;
-		} else {
-			pipe_config->dpll.n = 2;
-			pipe_config->dpll.p1 = 1;
-			pipe_config->dpll.p2 = 10;
-			pipe_config->dpll.m1 = 14;
-			pipe_config->dpll.m2 = 8;
-		}
-		pipe_config->clock_set = true;
+		divisor = pch_dpll;
+		count = ARRAY_SIZE(pch_dpll);
 	} else if (IS_VALLEYVIEW(dev)) {
-		/* FIXME: Need to figure out optimized DP clocks for vlv. */
+		divisor = vlv_dpll;
+		count = ARRAY_SIZE(vlv_dpll);
+	}
+
+	if (divisor && count) {
+		for (i = 0; i < count; i++) {
+			if (link_bw == divisor[i].link_bw) {
+				pipe_config->dpll = divisor[i].dpll;
+				pipe_config->clock_set = true;
+				break;
+			}
+		}
 	}
 }
 
@@ -737,19 +830,22 @@
 
 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
 		      "max bw %02x pixel clock %iKHz\n",
-		      max_lane_count, bws[max_clock], adjusted_mode->clock);
+		      max_lane_count, bws[max_clock],
+		      adjusted_mode->crtc_clock);
 
 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
 	 * bpc in between. */
 	bpp = pipe_config->pipe_bpp;
-	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+	    dev_priv->vbt.edp_bpp < bpp) {
 		DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
 			      dev_priv->vbt.edp_bpp);
-		bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+		bpp = dev_priv->vbt.edp_bpp;
 	}
 
 	for (; bpp >= 6*3; bpp -= 2*3) {
-		mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
+		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+						   bpp);
 
 		for (clock = 0; clock <= max_clock; clock++) {
 			for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +890,8 @@
 		      mode_rate, link_avail);
 
 	intel_link_compute_m_n(bpp, lane_count,
-			       adjusted_mode->clock, pipe_config->port_clock,
+			       adjusted_mode->crtc_clock,
+			       pipe_config->port_clock,
 			       &pipe_config->dp_m_n);
 
 	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +899,6 @@
 	return true;
 }
 
-void intel_dp_init_link_config(struct intel_dp *intel_dp)
-{
-	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
-	intel_dp->link_configuration[0] = intel_dp->link_bw;
-	intel_dp->link_configuration[1] = intel_dp->lane_count;
-	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
-	/*
-	 * Check for DPCD version > 1.1 and enhanced framing support
-	 */
-	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
-		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-	}
-}
-
 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +971,6 @@
 		intel_write_eld(&encoder->base, adjusted_mode);
 	}
 
-	intel_dp_init_link_config(intel_dp);
-
 	/* Split out the IBX/CPU vs CPT settings */
 
 	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +980,7 @@
 			intel_dp->DP |= DP_SYNC_VS_HIGH;
 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 
-		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 			intel_dp->DP |= DP_ENHANCED_FRAMING;
 
 		intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +994,7 @@
 			intel_dp->DP |= DP_SYNC_VS_HIGH;
 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
 
-		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 			intel_dp->DP |= DP_ENHANCED_FRAMING;
 
 		if (crtc->pipe == 1)
@@ -944,8 +1024,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp_stat_reg, pp_ctrl_reg;
 
-	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
 			mask, value,
@@ -987,11 +1067,8 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 control;
-	u32 pp_ctrl_reg;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-	control = I915_READ(pp_ctrl_reg);
-
+	control = I915_READ(_pp_ctrl_reg(intel_dp));
 	control &= ~PANEL_UNLOCK_MASK;
 	control |= PANEL_UNLOCK_REGS;
 	return control;
@@ -1006,17 +1083,16 @@
 
 	if (!is_edp(intel_dp))
 		return;
-	DRM_DEBUG_KMS("Turn eDP VDD on\n");
 
 	WARN(intel_dp->want_panel_vdd,
 	     "eDP VDD already requested on\n");
 
 	intel_dp->want_panel_vdd = true;
 
-	if (ironlake_edp_have_panel_vdd(intel_dp)) {
-		DRM_DEBUG_KMS("eDP VDD already on\n");
+	if (ironlake_edp_have_panel_vdd(intel_dp))
 		return;
-	}
+
+	DRM_DEBUG_KMS("Turning eDP VDD on\n");
 
 	if (!ironlake_edp_have_panel_power(intel_dp))
 		ironlake_wait_panel_power_cycle(intel_dp);
@@ -1024,8 +1100,8 @@
 	pp = ironlake_get_pp_control(intel_dp);
 	pp |= EDP_FORCE_VDD;
 
-	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
@@ -1050,11 +1126,13 @@
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
 	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+		DRM_DEBUG_KMS("Turning eDP VDD off\n");
+
 		pp = ironlake_get_pp_control(intel_dp);
 		pp &= ~EDP_FORCE_VDD;
 
-		pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-		pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+		pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+		pp_stat_reg = _pp_stat_reg(intel_dp);
 
 		I915_WRITE(pp_ctrl_reg, pp);
 		POSTING_READ(pp_ctrl_reg);
@@ -1082,7 +1160,6 @@
 	if (!is_edp(intel_dp))
 		return;
 
-	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
 	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
 
 	intel_dp->want_panel_vdd = false;
@@ -1119,20 +1196,19 @@
 
 	ironlake_wait_panel_power_cycle(intel_dp);
 
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 	pp = ironlake_get_pp_control(intel_dp);
 	if (IS_GEN5(dev)) {
 		/* ILK workaround: disable reset around power sequence */
 		pp &= ~PANEL_POWER_RESET;
-		I915_WRITE(PCH_PP_CONTROL, pp);
-		POSTING_READ(PCH_PP_CONTROL);
+		I915_WRITE(pp_ctrl_reg, pp);
+		POSTING_READ(pp_ctrl_reg);
 	}
 
 	pp |= POWER_TARGET_ON;
 	if (!IS_GEN5(dev))
 		pp |= PANEL_POWER_RESET;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
 
@@ -1140,8 +1216,8 @@
 
 	if (IS_GEN5(dev)) {
 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
-		I915_WRITE(PCH_PP_CONTROL, pp);
-		POSTING_READ(PCH_PP_CONTROL);
+		I915_WRITE(pp_ctrl_reg, pp);
+		POSTING_READ(pp_ctrl_reg);
 	}
 }
 
@@ -1164,7 +1240,7 @@
 	 * panels get very unhappy and cease to work. */
 	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
@@ -1179,7 +1255,6 @@
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
 	u32 pp;
 	u32 pp_ctrl_reg;
 
@@ -1197,12 +1272,12 @@
 	pp = ironlake_get_pp_control(intel_dp);
 	pp |= EDP_BLC_ENABLE;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
 
-	intel_panel_enable_backlight(dev, pipe);
+	intel_panel_enable_backlight(intel_dp->attached_connector);
 }
 
 void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1215,13 +1290,13 @@
 	if (!is_edp(intel_dp))
 		return;
 
-	intel_panel_disable_backlight(dev);
+	intel_panel_disable_backlight(intel_dp->attached_connector);
 
 	DRM_DEBUG_KMS("\n");
 	pp = ironlake_get_pp_control(intel_dp);
 	pp &= ~EDP_BLC_ENABLE;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1443,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	int dotclock;
 
 	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
 		tmp = I915_READ(intel_dp->output_reg);
@@ -1395,13 +1471,25 @@
 
 	pipe_config->adjusted_mode.flags |= flags;
 
-	if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+	pipe_config->has_dp_encoder = true;
+
+	intel_dp_get_m_n(crtc, pipe_config);
+
+	if (port == PORT_A) {
 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
 			pipe_config->port_clock = 162000;
 		else
 			pipe_config->port_clock = 270000;
 	}
 
+	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+					    &pipe_config->dp_m_n);
+
+	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
+
 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
 		/*
@@ -1423,20 +1511,21 @@
 	}
 }
 
-static bool is_edp_psr(struct intel_dp *intel_dp)
+static bool is_edp_psr(struct drm_device *dev)
 {
-	return is_edp(intel_dp) &&
-		intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return dev_priv->psr.sink_support;
 }
 
 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!IS_HASWELL(dev))
+	if (!HAS_PSR(dev))
 		return false;
 
-	return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+	return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
 }
 
 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1486,7 +1575,7 @@
 	intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
 
 	/* Avoid continuous PSR exit by masking memup and hpd */
-	I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
 		   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
 	intel_dp->psr_setup_done = true;
@@ -1511,9 +1600,9 @@
 					    DP_PSR_MAIN_LINK_ACTIVE);
 
 	/* Setup AUX registers */
-	I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
-	I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
-	I915_WRITE(EDP_PSR_AUX_CTL,
+	I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
+	I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
+	I915_WRITE(EDP_PSR_AUX_CTL(dev),
 		   DP_AUX_CH_CTL_TIME_OUT_400us |
 		   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1527,6 +1616,7 @@
 	uint32_t max_sleep_time = 0x1f;
 	uint32_t idle_frames = 1;
 	uint32_t val = 0x0;
+	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
 	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
 		val |= EDP_PSR_LINK_STANDBY;
@@ -1536,8 +1626,8 @@
 	} else
 		val |= EDP_PSR_LINK_DISABLE;
 
-	I915_WRITE(EDP_PSR_CTL, val |
-		   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
+	I915_WRITE(EDP_PSR_CTL(dev), val |
+		   IS_BROADWELL(dev) ? 0 : link_entry_time |
 		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
 		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
 		   EDP_PSR_ENABLE);
@@ -1553,42 +1643,33 @@
 	struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 
-	if (!IS_HASWELL(dev)) {
+	dev_priv->psr.source_ok = false;
+
+	if (!HAS_PSR(dev)) {
 		DRM_DEBUG_KMS("PSR not supported on this platform\n");
-		dev_priv->no_psr_reason = PSR_NO_SOURCE;
 		return false;
 	}
 
 	if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
 	    (dig_port->port != PORT_A)) {
 		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
-		dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
-		return false;
-	}
-
-	if (!is_edp_psr(intel_dp)) {
-		DRM_DEBUG_KMS("PSR not supported by this panel\n");
-		dev_priv->no_psr_reason = PSR_NO_SINK;
 		return false;
 	}
 
 	if (!i915_enable_psr) {
 		DRM_DEBUG_KMS("PSR disable by flag\n");
-		dev_priv->no_psr_reason = PSR_MODULE_PARAM;
 		return false;
 	}
 
 	crtc = dig_port->base.base.crtc;
 	if (crtc == NULL) {
 		DRM_DEBUG_KMS("crtc not active for PSR\n");
-		dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
 		return false;
 	}
 
 	intel_crtc = to_intel_crtc(crtc);
-	if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+	if (!intel_crtc_active(crtc)) {
 		DRM_DEBUG_KMS("crtc not active for PSR\n");
-		dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
 		return false;
 	}
 
@@ -1596,29 +1677,26 @@
 	if (obj->tiling_mode != I915_TILING_X ||
 	    obj->fence_reg == I915_FENCE_REG_NONE) {
 		DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
-		dev_priv->no_psr_reason = PSR_NOT_TILED;
 		return false;
 	}
 
 	if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
 		DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
-		dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
 		return false;
 	}
 
 	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
 	    S3D_ENABLE) {
 		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
-		dev_priv->no_psr_reason = PSR_S3D_ENABLED;
 		return false;
 	}
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
-		dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
 		return false;
 	}
 
+	dev_priv->psr.source_ok = true;
 	return true;
 }
 
@@ -1657,10 +1735,11 @@
 	if (!intel_edp_is_psr_enabled(dev))
 		return;
 
-	I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+	I915_WRITE(EDP_PSR_CTL(dev),
+		   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
 	/* Wait till PSR is idle */
-	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
 		       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
 		DRM_ERROR("Timed out waiting for PSR Idle State\n");
 }
@@ -1674,7 +1753,7 @@
 		if (encoder->type == INTEL_OUTPUT_EDP) {
 			intel_dp = enc_to_intel_dp(&encoder->base);
 
-			if (!is_edp_psr(intel_dp))
+			if (!is_edp_psr(dev))
 				return;
 
 			if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1733,14 +1812,24 @@
 	ironlake_edp_panel_vdd_off(intel_dp, true);
 	intel_dp_complete_link_train(intel_dp);
 	intel_dp_stop_link_train(intel_dp);
+}
+
+static void g4x_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	intel_enable_dp(encoder);
 	ironlake_edp_backlight_on(intel_dp);
 }
 
 static void vlv_enable_dp(struct intel_encoder *encoder)
 {
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	ironlake_edp_backlight_on(intel_dp);
 }
 
-static void intel_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1758,53 +1847,59 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	int port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
+	struct edp_power_seq power_seq;
 	u32 val;
 
 	mutex_lock(&dev_priv->dpio_lock);
 
-	val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
 	val = 0;
 	if (pipe)
 		val |= (1<<21);
 	else
 		val &= ~(1<<21);
 	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
 
 	mutex_unlock(&dev_priv->dpio_lock);
 
+	/* init power sequencer on this pipe and port */
+	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+						      &power_seq);
+
 	intel_enable_dp(encoder);
 
 	vlv_wait_port_ready(dev_priv, port);
 }
 
-static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
 	int port = vlv_dport_to_channel(dport);
-
-	if (!IS_VALLEYVIEW(dev))
-		return;
+	int pipe = intel_crtc->pipe;
 
 	/* Program Tx lane resets to default */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 				 DPIO_PCS_CLK_SOFT_RESET);
 
 	/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
-	vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1869,7 +1964,7 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	if (IS_VALLEYVIEW(dev))
+	if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
 		return DP_TRAIN_VOLTAGE_SWING_1200;
 	else if (IS_GEN7(dev) && port == PORT_A)
 		return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1885,7 +1980,18 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	if (HAS_DDI(dev)) {
+	if (IS_BROADWELL(dev)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		case DP_TRAIN_VOLTAGE_SWING_1200:
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
+	} else if (IS_HASWELL(dev)) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 		case DP_TRAIN_VOLTAGE_SWING_400:
 			return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -1939,10 +2045,13 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dport->base.base.crtc);
 	unsigned long demph_reg_value, preemph_reg_value,
 		uniqtranscale_reg_value;
 	uint8_t train_set = intel_dp->train_set[0];
 	int port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
 
 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
 	case DP_TRAIN_PRE_EMPHASIS_0:
@@ -2018,21 +2127,22 @@
 	}
 
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
 			 uniqtranscale_reg_value);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
-	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 	return 0;
 }
 
 static void
-intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_get_adjust_train(struct intel_dp *intel_dp,
+		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
 	uint8_t v = 0;
 	uint8_t p = 0;
@@ -2193,6 +2303,41 @@
 	}
 }
 
+static uint32_t
+intel_bdw_signal_levels(uint8_t train_set)
+{
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_400MV_0DB_BDW;	/* Sel0 */
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_400MV_3_5DB_BDW;	/* Sel1 */
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+		return DDI_BUF_EMP_400MV_6DB_BDW;	/* Sel2 */
+
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_600MV_0DB_BDW;	/* Sel3 */
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_600MV_3_5DB_BDW;	/* Sel4 */
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+		return DDI_BUF_EMP_600MV_6DB_BDW;	/* Sel5 */
+
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_800MV_0DB_BDW;	/* Sel6 */
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_800MV_3_5DB_BDW;	/* Sel7 */
+
+	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_1200MV_0DB_BDW;	/* Sel8 */
+
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return DDI_BUF_EMP_400MV_0DB_BDW;	/* Sel0 */
+	}
+}
+
 /* Properly updates "DP" with the correct signal levels. */
 static void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2203,7 +2348,10 @@
 	uint32_t signal_levels, mask;
 	uint8_t train_set = intel_dp->train_set[0];
 
-	if (HAS_DDI(dev)) {
+	if (IS_BROADWELL(dev)) {
+		signal_levels = intel_bdw_signal_levels(train_set);
+		mask = DDI_BUF_EMP_MASK;
+	} else if (IS_HASWELL(dev)) {
 		signal_levels = intel_hsw_signal_levels(train_set);
 		mask = DDI_BUF_EMP_MASK;
 	} else if (IS_VALLEYVIEW(dev)) {
@@ -2227,14 +2375,15 @@
 
 static bool
 intel_dp_set_link_train(struct intel_dp *intel_dp,
-			uint32_t dp_reg_value,
+			uint32_t *DP,
 			uint8_t dp_train_pat)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum port port = intel_dig_port->port;
-	int ret;
+	uint8_t buf[sizeof(intel_dp->train_set) + 1];
+	int ret, len;
 
 	if (HAS_DDI(dev)) {
 		uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2263,62 +2412,93 @@
 		I915_WRITE(DP_TP_CTL(port), temp);
 
 	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
-		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
 		case DP_TRAINING_PATTERN_DISABLE:
-			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+			*DP |= DP_LINK_TRAIN_OFF_CPT;
 			break;
 		case DP_TRAINING_PATTERN_1:
-			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
 			break;
 		case DP_TRAINING_PATTERN_2:
-			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		case DP_TRAINING_PATTERN_3:
 			DRM_ERROR("DP training pattern 3 not supported\n");
-			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		}
 
 	} else {
-		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+		*DP &= ~DP_LINK_TRAIN_MASK;
 
 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
 		case DP_TRAINING_PATTERN_DISABLE:
-			dp_reg_value |= DP_LINK_TRAIN_OFF;
+			*DP |= DP_LINK_TRAIN_OFF;
 			break;
 		case DP_TRAINING_PATTERN_1:
-			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+			*DP |= DP_LINK_TRAIN_PAT_1;
 			break;
 		case DP_TRAINING_PATTERN_2:
-			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		case DP_TRAINING_PATTERN_3:
 			DRM_ERROR("DP training pattern 3 not supported\n");
-			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		}
 	}
 
-	I915_WRITE(intel_dp->output_reg, dp_reg_value);
+	I915_WRITE(intel_dp->output_reg, *DP);
 	POSTING_READ(intel_dp->output_reg);
 
-	intel_dp_aux_native_write_1(intel_dp,
-				    DP_TRAINING_PATTERN_SET,
-				    dp_train_pat);
-
-	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+	buf[0] = dp_train_pat;
+	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
 	    DP_TRAINING_PATTERN_DISABLE) {
-		ret = intel_dp_aux_native_write(intel_dp,
-						DP_TRAINING_LANE0_SET,
-						intel_dp->train_set,
-						intel_dp->lane_count);
-		if (ret != intel_dp->lane_count)
-			return false;
+		/* don't write DP_TRAINING_LANEx_SET on disable */
+		len = 1;
+	} else {
+		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
+		len = intel_dp->lane_count + 1;
 	}
 
-	return true;
+	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
+					buf, len);
+
+	return ret == len;
+}
+
+static bool
+intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+			uint8_t dp_train_pat)
+{
+	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+	intel_dp_set_signal_levels(intel_dp, DP);
+	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
+}
+
+static bool
+intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	intel_get_adjust_train(intel_dp, link_status);
+	intel_dp_set_signal_levels(intel_dp, DP);
+
+	I915_WRITE(intel_dp->output_reg, *DP);
+	POSTING_READ(intel_dp->output_reg);
+
+	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
+					intel_dp->train_set,
+					intel_dp->lane_count);
+
+	return ret == intel_dp->lane_count;
 }
 
 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -2362,32 +2542,37 @@
 	uint8_t voltage;
 	int voltage_tries, loop_tries;
 	uint32_t DP = intel_dp->DP;
+	uint8_t link_config[2];
 
 	if (HAS_DDI(dev))
 		intel_ddi_prepare_link_retrain(encoder);
 
 	/* Write the link configuration data */
-	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
-				  intel_dp->link_configuration,
-				  DP_LINK_CONFIGURATION_SIZE);
+	link_config[0] = intel_dp->link_bw;
+	link_config[1] = intel_dp->lane_count;
+	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
+
+	link_config[0] = 0;
+	link_config[1] = DP_SET_ANSI_8B10B;
+	intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
 
 	DP |= DP_PORT_EN;
 
-	memset(intel_dp->train_set, 0, 4);
+	/* clock recovery */
+	if (!intel_dp_reset_link_train(intel_dp, &DP,
+				       DP_TRAINING_PATTERN_1 |
+				       DP_LINK_SCRAMBLING_DISABLE)) {
+		DRM_ERROR("failed to enable link training\n");
+		return;
+	}
+
 	voltage = 0xff;
 	voltage_tries = 0;
 	loop_tries = 0;
 	for (;;) {
-		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
-		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
-
-		intel_dp_set_signal_levels(intel_dp, &DP);
-
-		/* Set training pattern 1 */
-		if (!intel_dp_set_link_train(intel_dp, DP,
-					     DP_TRAINING_PATTERN_1 |
-					     DP_LINK_SCRAMBLING_DISABLE))
-			break;
+		uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2407,10 +2592,12 @@
 		if (i == intel_dp->lane_count) {
 			++loop_tries;
 			if (loop_tries == 5) {
-				DRM_DEBUG_KMS("too many full retries, give up\n");
+				DRM_ERROR("too many full retries, give up\n");
 				break;
 			}
-			memset(intel_dp->train_set, 0, 4);
+			intel_dp_reset_link_train(intel_dp, &DP,
+						  DP_TRAINING_PATTERN_1 |
+						  DP_LINK_SCRAMBLING_DISABLE);
 			voltage_tries = 0;
 			continue;
 		}
@@ -2419,15 +2606,18 @@
 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
 			++voltage_tries;
 			if (voltage_tries == 5) {
-				DRM_DEBUG_KMS("too many voltage retries, give up\n");
+				DRM_ERROR("too many voltage retries, give up\n");
 				break;
 			}
 		} else
 			voltage_tries = 0;
 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
-		/* Compute new intel_dp->train_set as requested by target */
-		intel_get_adjust_train(intel_dp, link_status);
+		/* Update training set as requested by target */
+		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+			DRM_ERROR("failed to update link training\n");
+			break;
+		}
 	}
 
 	intel_dp->DP = DP;
@@ -2441,11 +2631,18 @@
 	uint32_t DP = intel_dp->DP;
 
 	/* channel equalization */
+	if (!intel_dp_set_link_train(intel_dp, &DP,
+				     DP_TRAINING_PATTERN_2 |
+				     DP_LINK_SCRAMBLING_DISABLE)) {
+		DRM_ERROR("failed to start channel equalization\n");
+		return;
+	}
+
 	tries = 0;
 	cr_tries = 0;
 	channel_eq = false;
 	for (;;) {
-		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
+		uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 		if (cr_tries > 5) {
 			DRM_ERROR("failed to train DP, aborting\n");
@@ -2453,21 +2650,18 @@
 			break;
 		}
 
-		intel_dp_set_signal_levels(intel_dp, &DP);
-
-		/* channel eq pattern */
-		if (!intel_dp_set_link_train(intel_dp, DP,
-					     DP_TRAINING_PATTERN_2 |
-					     DP_LINK_SCRAMBLING_DISABLE))
-			break;
-
 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
-		if (!intel_dp_get_link_status(intel_dp, link_status))
+		if (!intel_dp_get_link_status(intel_dp, link_status)) {
+			DRM_ERROR("failed to get link status\n");
 			break;
+		}
 
 		/* Make sure clock is still ok */
 		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
 			intel_dp_start_link_train(intel_dp);
+			intel_dp_set_link_train(intel_dp, &DP,
+						DP_TRAINING_PATTERN_2 |
+						DP_LINK_SCRAMBLING_DISABLE);
 			cr_tries++;
 			continue;
 		}
@@ -2481,13 +2675,19 @@
 		if (tries > 5) {
 			intel_dp_link_down(intel_dp);
 			intel_dp_start_link_train(intel_dp);
+			intel_dp_set_link_train(intel_dp, &DP,
+						DP_TRAINING_PATTERN_2 |
+						DP_LINK_SCRAMBLING_DISABLE);
 			tries = 0;
 			cr_tries++;
 			continue;
 		}
 
-		/* Compute new intel_dp->train_set as requested by target */
-		intel_get_adjust_train(intel_dp, link_status);
+		/* Update training set as requested by target */
+		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+			DRM_ERROR("failed to update link training\n");
+			break;
+		}
 		++tries;
 	}
 
@@ -2502,7 +2702,7 @@
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
 {
-	intel_dp_set_link_train(intel_dp, intel_dp->DP,
+	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
 				DP_TRAINING_PATTERN_DISABLE);
 }
 
@@ -2589,6 +2789,10 @@
 static bool
 intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
 	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2604,11 +2808,16 @@
 
 	/* Check if the panel supports PSR */
 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
-	intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
-				       intel_dp->psr_dpcd,
-				       sizeof(intel_dp->psr_dpcd));
-	if (is_edp_psr(intel_dp))
-		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+	if (is_edp(intel_dp)) {
+		intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+					       intel_dp->psr_dpcd,
+					       sizeof(intel_dp->psr_dpcd));
+		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+			dev_priv->psr.sink_support = true;
+			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+		}
+	}
+
 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
 	      DP_DWN_STRM_PORT_PRESENT))
 		return true; /* native DP sink */
@@ -2728,7 +2937,6 @@
 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 {
 	uint8_t *dpcd = intel_dp->dpcd;
-	bool hpd;
 	uint8_t type;
 
 	if (!intel_dp_get_dpcd(intel_dp))
@@ -2739,8 +2947,8 @@
 		return connector_status_connected;
 
 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
-	hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
-	if (hpd) {
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
 		uint8_t reg;
 		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
 						    &reg, 1))
@@ -2754,9 +2962,18 @@
 		return connector_status_connected;
 
 	/* Well we tried, say unknown for unreliable port types */
-	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
-	if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
-		return connector_status_unknown;
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+		if (type == DP_DS_PORT_TYPE_VGA ||
+		    type == DP_DS_PORT_TYPE_NON_EDID)
+			return connector_status_unknown;
+	} else {
+		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+			DP_DWN_STRM_PORT_TYPE_MASK;
+		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
+		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
+			return connector_status_unknown;
+	}
 
 	/* Anything else is out of spec, warn and ignore */
 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2830,19 +3047,11 @@
 
 	/* use cached edid if we have one */
 	if (intel_connector->edid) {
-		struct edid *edid;
-		int size;
-
 		/* invalid edid */
 		if (IS_ERR(intel_connector->edid))
 			return NULL;
 
-		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
-		edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
-		if (!edid)
-			return NULL;
-
-		return edid;
+		return drm_edid_duplicate(intel_connector->edid);
 	}
 
 	return drm_get_edid(connector, adapter);
@@ -3050,7 +3259,6 @@
 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
 		intel_panel_fini(&intel_connector->panel);
 
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -3121,7 +3329,7 @@
 bool intel_dpd_is_edp(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct child_device_config *p_child;
+	union child_device_config *p_child;
 	int i;
 
 	if (!dev_priv->vbt.child_dev_num)
@@ -3130,8 +3338,9 @@
 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
 		p_child = dev_priv->vbt.child_dev + i;
 
-		if (p_child->dvo_port == PORT_IDPD &&
-		    p_child->device_type == DEVICE_TYPE_eDP)
+		if (p_child->common.dvo_port == PORT_IDPD &&
+		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
 			return true;
 	}
 	return false;
@@ -3164,24 +3373,26 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct edp_power_seq cur, vbt, spec, final;
 	u32 pp_on, pp_off, pp_div, pp;
-	int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
 
 	if (HAS_PCH_SPLIT(dev)) {
-		pp_control_reg = PCH_PP_CONTROL;
+		pp_ctrl_reg = PCH_PP_CONTROL;
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_div_reg = PCH_PP_DIVISOR;
 	} else {
-		pp_control_reg = PIPEA_PP_CONTROL;
-		pp_on_reg = PIPEA_PP_ON_DELAYS;
-		pp_off_reg = PIPEA_PP_OFF_DELAYS;
-		pp_div_reg = PIPEA_PP_DIVISOR;
+		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
 	}
 
 	/* Workaround: Need to write PP_CONTROL with the unlock key as
 	 * the very first thing. */
 	pp = ironlake_get_pp_control(intel_dp);
-	I915_WRITE(pp_control_reg, pp);
+	I915_WRITE(pp_ctrl_reg, pp);
 
 	pp_on = I915_READ(pp_on_reg);
 	pp_off = I915_READ(pp_off_reg);
@@ -3269,9 +3480,11 @@
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_div_reg = PCH_PP_DIVISOR;
 	} else {
-		pp_on_reg = PIPEA_PP_ON_DELAYS;
-		pp_off_reg = PIPEA_PP_OFF_DELAYS;
-		pp_div_reg = PIPEA_PP_DIVISOR;
+		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
 	}
 
 	/* And finally store the new values in the power sequencer. */
@@ -3288,12 +3501,15 @@
 	/* Haswell doesn't have any port selection bits for the panel
 	 * power sequencer any more. */
 	if (IS_VALLEYVIEW(dev)) {
-		port_sel = I915_READ(pp_on_reg) & 0xc0000000;
+		if (dp_to_dig_port(intel_dp)->port == PORT_B)
+			port_sel = PANEL_PORT_SELECT_DPB_VLV;
+		else
+			port_sel = PANEL_PORT_SELECT_DPC_VLV;
 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
 		if (dp_to_dig_port(intel_dp)->port == PORT_A)
-			port_sel = PANEL_POWER_PORT_DP_A;
+			port_sel = PANEL_PORT_SELECT_DPA;
 		else
-			port_sel = PANEL_POWER_PORT_DP_D;
+			port_sel = PANEL_PORT_SELECT_DPD;
 	}
 
 	pp_on |= port_sel;
@@ -3346,7 +3562,6 @@
 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
 						      &power_seq);
 
-	ironlake_edp_panel_vdd_on(intel_dp);
 	edid = drm_get_edid(connector, &intel_dp->adapter);
 	if (edid) {
 		if (drm_add_edid_modes(connector, edid)) {
@@ -3378,8 +3593,6 @@
 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 	}
 
-	ironlake_edp_panel_vdd_off(intel_dp, false);
-
 	intel_panel_init(&intel_connector->panel, fixed_mode);
 	intel_panel_setup_backlight(connector);
 
@@ -3536,11 +3749,11 @@
 	struct drm_encoder *encoder;
 	struct intel_connector *intel_connector;
 
-	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
 	if (!intel_dig_port)
 		return;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_dig_port);
 		return;
@@ -3559,12 +3772,12 @@
 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
 	intel_encoder->get_config = intel_dp_get_config;
 	if (IS_VALLEYVIEW(dev)) {
-		intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
 		intel_encoder->pre_enable = vlv_pre_enable_dp;
 		intel_encoder->enable = vlv_enable_dp;
 	} else {
-		intel_encoder->pre_enable = intel_pre_enable_dp;
-		intel_encoder->enable = intel_enable_dp;
+		intel_encoder->pre_enable = g4x_pre_enable_dp;
+		intel_encoder->enable = g4x_enable_dp;
 	}
 
 	intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7f2b384..1e49aa8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -77,7 +77,6 @@
 /* the i915, i945 have a single sDVO i2c bus - which is different */
 #define MAX_OUTPUTS 6
 /* maximum connectors per crtcs in the mode set */
-#define INTELFB_CONN_LIMIT 4
 
 #define INTEL_I2C_BUS_DVO 1
 #define INTEL_I2C_BUS_SDVO 2
@@ -93,13 +92,17 @@
 #define INTEL_OUTPUT_HDMI 6
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_UNKNOWN 9
+#define INTEL_OUTPUT_DSI 9
+#define INTEL_OUTPUT_UNKNOWN 10
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
 #define INTEL_DVO_CHIP_TMDS 2
 #define INTEL_DVO_CHIP_TVOUT 4
 
+#define INTEL_DSI_COMMAND_MODE	0
+#define INTEL_DSI_VIDEO_MODE	1
+
 struct intel_framebuffer {
 	struct drm_framebuffer base;
 	struct drm_i915_gem_object *obj;
@@ -207,8 +210,21 @@
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
 	unsigned long quirks;
 
+	/* User requested mode, only valid as a starting point to
+	 * compute adjusted_mode, except in the case of (S)DVO where
+	 * it's also for the output timings of the (S)DVO chip.
+	 * adjusted_mode will then correspond to the S(DVO) chip's
+	 * preferred input timings. */
 	struct drm_display_mode requested_mode;
+	/* Actual pipe timings ie. what we program into the pipe timing
+	 * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
 	struct drm_display_mode adjusted_mode;
+
+	/* Pipe source size (ie. panel fitter input size)
+	 * All planes will be positioned inside this space,
+	 * and get clipped at the edges. */
+	int pipe_src_w, pipe_src_h;
+
 	/* Whether to set up the PCH/FDI. Note that we never allow sharing
 	 * between pch encoders and cpu encoders. */
 	bool has_pch_encoder;
@@ -262,7 +278,8 @@
 
 	/*
 	 * Frequence the dpll for the port should run at. Differs from the
-	 * adjusted dotclock e.g. for DP or 12bpc hdmi mode.
+	 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+	 * already multiplied by pixel_multiplier.
 	 */
 	int port_clock;
 
@@ -288,6 +305,14 @@
 	struct intel_link_m_n fdi_m_n;
 
 	bool ips_enabled;
+
+	bool double_wide;
+};
+
+struct intel_pipe_wm {
+	struct intel_wm_level wm[5];
+	uint32_t linetime;
+	bool fbc_wm_enabled;
 };
 
 struct intel_crtc {
@@ -301,8 +326,9 @@
 	 * some outputs connected to this crtc.
 	 */
 	bool active;
+	unsigned long enabled_power_domains;
 	bool eld_vld;
-	bool primary_disabled; /* is the crtc obscured by a plane? */
+	bool primary_enabled; /* is the primary plane (partially) visible? */
 	bool lowfreq_avail;
 	struct intel_overlay *overlay;
 	struct intel_unpin_work *unpin_work;
@@ -330,6 +356,12 @@
 	/* Access to these should be protected by dev_priv->irq_lock. */
 	bool cpu_fifo_underrun_disabled;
 	bool pch_fifo_underrun_disabled;
+
+	/* per-pipe watermark state */
+	struct {
+		/* watermarks currently being used  */
+		struct intel_pipe_wm active;
+	} wm;
 };
 
 struct intel_plane_wm_parameters {
@@ -417,13 +449,11 @@
 };
 
 #define DP_MAX_DOWNSTREAM_PORTS		0x10
-#define DP_LINK_CONFIGURATION_SIZE	9
 
 struct intel_dp {
 	uint32_t output_reg;
 	uint32_t aux_ch_ctl_reg;
 	uint32_t DP;
-	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
 	bool has_audio;
 	enum hdmi_force_audio force_audio;
 	uint32_t color_range;
@@ -495,80 +525,6 @@
 	bool enable_stall_check;
 };
 
-int intel_pch_rawclk(struct drm_device *dev);
-
-int intel_connector_update_modes(struct drm_connector *connector,
-				struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-
-extern void intel_attach_force_audio_property(struct drm_connector *connector);
-extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-
-extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev,
-			    int hdmi_reg, enum port port);
-extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
-				      struct intel_connector *intel_connector);
-extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-				      struct intel_crtc_config *pipe_config);
-extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
-			    bool is_sdvob);
-extern void intel_dvo_init(struct drm_device *dev);
-extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-			       struct intel_ring_buffer *ring);
-extern void intel_mark_idle(struct drm_device *dev);
-extern void intel_lvds_init(struct drm_device *dev);
-extern bool intel_is_dual_link_lvds(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int output_reg,
-			  enum port port);
-extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
-				    struct intel_connector *intel_connector);
-extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
-extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
-extern bool intel_dp_compute_config(struct intel_encoder *encoder,
-				    struct intel_crtc_config *pipe_config);
-extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
-				      enum plane plane);
-
-/* intel_panel.c */
-extern int intel_panel_init(struct intel_panel *panel,
-			    struct drm_display_mode *fixed_mode);
-extern void intel_panel_fini(struct intel_panel *panel);
-
-extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
-				   struct drm_display_mode *adjusted_mode);
-extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
-				    struct intel_crtc_config *pipe_config,
-				    int fitting_mode);
-extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
-				     struct intel_crtc_config *pipe_config,
-				     int fitting_mode);
-extern void intel_panel_set_backlight(struct drm_device *dev,
-				      u32 level, u32 max);
-extern int intel_panel_setup_backlight(struct drm_connector *connector);
-extern void intel_panel_enable_backlight(struct drm_device *dev,
-					 enum pipe pipe);
-extern void intel_panel_disable_backlight(struct drm_device *dev);
-extern void intel_panel_destroy_backlight(struct drm_device *dev);
-extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
 struct intel_set_config {
 	struct drm_encoder **save_connector_encoders;
 	struct drm_crtc **save_encoder_crtcs;
@@ -577,18 +533,14 @@
 	bool mode_changed;
 };
 
-extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
-extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_connector_dpms(struct drm_connector *, int mode);
-extern bool intel_connector_get_hw_state(struct intel_connector *connector);
-extern void intel_modeset_check_state(struct drm_device *dev);
-extern void intel_plane_restore(struct drm_plane *plane);
-extern void intel_plane_disable(struct drm_plane *plane);
+struct intel_load_detect_pipe {
+	struct drm_framebuffer *release_fb;
+	bool load_detect_temp;
+	int dpms_mode;
+};
 
-
-static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+static inline struct intel_encoder *
+intel_attached_encoder(struct drm_connector *connector)
 {
 	return to_intel_connector(connector)->encoder;
 }
@@ -616,73 +568,95 @@
 	return container_of(intel_hdmi, struct intel_digital_port, hdmi);
 }
 
+
+/* i915_irq.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+					   enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+					   enum transcoder pch_transcoder,
+					   bool enable);
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void hsw_pc8_disable_interrupts(struct drm_device *dev);
+void hsw_pc8_restore_interrupts(struct drm_device *dev);
+
+
+/* intel_crt.c */
+void intel_crt_init(struct drm_device *dev);
+
+
+/* intel_ddi.c */
+void intel_prepare_ddi(struct drm_device *dev);
+void hsw_fdi_link_train(struct drm_crtc *crtc);
+void intel_ddi_init(struct drm_device *dev, enum port port);
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+void intel_ddi_pll_init(struct drm_device *dev);
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+				       enum transcoder cpu_transcoder);
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+			  struct intel_crtc_config *pipe_config);
+
+
+/* intel_display.c */
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+			struct intel_ring_buffer *ring);
+void intel_mark_idle(struct drm_device *dev);
+void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_update_dpms(struct drm_crtc *crtc);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+void intel_connector_dpms(struct drm_connector *, int mode);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+void intel_modeset_check_state(struct drm_device *dev);
 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
 				struct intel_digital_port *port);
-
-extern void intel_connector_attach_encoder(struct intel_connector *connector,
-					   struct intel_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
-
-extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
-						    struct drm_crtc *crtc);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+				    struct intel_encoder *encoder);
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+					     struct drm_crtc *crtc);
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
-extern enum transcoder
-intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
-			     enum pipe pipe);
-extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
-extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
-
-struct intel_load_detect_pipe {
-	struct drm_framebuffer *release_fb;
-	bool load_detect_temp;
-	int dpms_mode;
-};
-extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
-				       struct drm_display_mode *mode,
-				       struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct drm_connector *connector,
-					   struct intel_load_detect_pipe *old);
-
-extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-				    u16 blue, int regno);
-extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-				    u16 *blue, int regno);
-
-extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-				      struct drm_i915_gem_object *obj,
-				      struct intel_ring_buffer *pipelined);
-extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
-
-extern int intel_framebuffer_init(struct drm_device *dev,
-				  struct intel_framebuffer *ifb,
-				  struct drm_mode_fb_cmd2 *mode_cmd,
-				  struct drm_i915_gem_object *obj);
-extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
-extern int intel_fbdev_init(struct drm_device *dev);
-extern void intel_fbdev_initial_config(struct drm_device *dev);
-extern void intel_fbdev_fini(struct drm_device *dev);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
-extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
-extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
-extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-
-extern void intel_setup_overlay(struct drm_device *dev);
-extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_put_image(struct drm_device *dev, void *data,
-				   struct drm_file *file_priv);
-extern int intel_overlay_attrs(struct drm_device *dev, void *data,
-			       struct drm_file *file_priv);
-
-extern void intel_fb_output_poll_changed(struct drm_device *dev);
-extern void intel_fb_restore_mode(struct drm_device *dev);
-
-struct intel_shared_dpll *
-intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
-
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+					     enum pipe pipe);
+void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+				struct drm_display_mode *mode,
+				struct intel_load_detect_pipe *old);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+				    struct intel_load_detect_pipe *old);
+int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+			       struct drm_i915_gem_object *obj,
+			       struct intel_ring_buffer *pipelined);
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+int intel_framebuffer_init(struct drm_device *dev,
+			   struct intel_framebuffer *ifb,
+			   struct drm_mode_fb_cmd2 *mode_cmd,
+			   struct drm_i915_gem_object *obj);
+void intel_framebuffer_fini(struct intel_framebuffer *fb);
+void intel_prepare_page_flip(struct drm_device *dev, int plane);
+void intel_finish_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 void assert_shared_dpll(struct drm_i915_private *dev_priv,
 			struct intel_shared_dpll *pll,
 			bool state);
@@ -696,104 +670,199 @@
 		       enum pipe pipe, bool state);
 #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
 #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
-			bool state);
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void intel_write_eld(struct drm_encoder *encoder,
+		     struct drm_display_mode *mode);
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+					     unsigned int tiling_mode,
+					     unsigned int bpp,
+					     unsigned int pitch);
+void intel_display_handle_reset(struct drm_device *dev);
+void hsw_enable_pc8_work(struct work_struct *__work);
+void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
+void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+		      struct intel_crtc_config *pipe_config);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+void
+ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+				int dotclock);
+bool intel_crtc_active(struct drm_crtc *crtc);
+void i915_disable_vga_mem(struct drm_device *dev);
+void hsw_enable_ips(struct intel_crtc *crtc);
+void hsw_disable_ips(struct intel_crtc *crtc);
+void intel_display_set_init_power(struct drm_device *dev, bool enable);
 
-extern void intel_init_clock_gating(struct drm_device *dev);
-extern void intel_suspend_hw(struct drm_device *dev);
-extern void intel_write_eld(struct drm_encoder *encoder,
-			    struct drm_display_mode *mode);
-extern void intel_prepare_ddi(struct drm_device *dev);
-extern void hsw_fdi_link_train(struct drm_crtc *crtc);
-extern void intel_ddi_init(struct drm_device *dev, enum port port);
 
-/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void intel_update_watermarks(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_plane *plane,
-					   struct drm_crtc *crtc,
-					   uint32_t sprite_width, int pixel_size,
-					   bool enabled, bool scaled);
+/* intel_dp.c */
+void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+			     struct intel_connector *intel_connector);
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+void intel_dp_check_link_status(struct intel_dp *intel_dp);
+bool intel_dp_compute_config(struct intel_encoder *encoder,
+			     struct intel_crtc_config *pipe_config);
+bool intel_dpd_is_edp(struct drm_device *dev);
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+void intel_edp_psr_enable(struct intel_dp *intel_dp);
+void intel_edp_psr_disable(struct intel_dp *intel_dp);
+void intel_edp_psr_update(struct drm_device *dev);
 
-extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
-						    unsigned int tiling_mode,
-						    unsigned int bpp,
-						    unsigned int pitch);
 
-extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
-				     struct drm_file *file_priv);
-extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
-				     struct drm_file *file_priv);
+/* intel_dsi.c */
+bool intel_dsi_init(struct drm_device *dev);
 
-/* Power-related functions, located in intel_pm.c */
-extern void intel_init_pm(struct drm_device *dev);
-/* FBC */
-extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_update_fbc(struct drm_device *dev);
-/* IPS */
-extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-extern void intel_gpu_ips_teardown(void);
 
-/* Power well */
-extern int i915_init_power_well(struct drm_device *dev);
-extern void i915_remove_power_well(struct drm_device *dev);
+/* intel_dvo.c */
+void intel_dvo_init(struct drm_device *dev);
 
-extern bool intel_display_power_enabled(struct drm_device *dev,
-					enum intel_display_power_domain domain);
-extern void intel_init_power_well(struct drm_device *dev);
-extern void intel_set_power_well(struct drm_device *dev, bool enable);
-extern void intel_enable_gt_powersave(struct drm_device *dev);
-extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void ironlake_teardown_rc6(struct drm_device *dev);
+
+/* legacy fbdev emulation in intel_fbdev.c */
+#ifdef CONFIG_DRM_I915_FBDEV
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
+extern void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+	return 0;
+}
+
+static inline void intel_fbdev_initial_config(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+/* intel_hdmi.c */
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+			       struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+			       struct intel_crtc_config *pipe_config);
+
+
+/* intel_lvds.c */
+void intel_lvds_init(struct drm_device *dev);
+bool intel_is_dual_link_lvds(struct drm_device *dev);
+
+
+/* intel_modes.c */
+int intel_connector_update_modes(struct drm_connector *connector,
+				 struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+
+/* intel_overlay.c */
+void intel_setup_overlay(struct drm_device *dev);
+void intel_cleanup_overlay(struct drm_device *dev);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+
+
+/* intel_panel.c */
+int intel_panel_init(struct intel_panel *panel,
+		     struct drm_display_mode *fixed_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+			    struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+			     struct intel_crtc_config *pipe_config,
+			     int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+			      struct intel_crtc_config *pipe_config,
+			      int fitting_mode);
+void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
+			       u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector);
+void intel_panel_enable_backlight(struct intel_connector *connector);
+void intel_panel_disable_backlight(struct intel_connector *connector);
+void intel_panel_destroy_backlight(struct drm_device *dev);
+enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+
+/* intel_pm.c */
+void intel_init_clock_gating(struct drm_device *dev);
+void intel_suspend_hw(struct drm_device *dev);
+void intel_update_watermarks(struct drm_crtc *crtc);
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+				    struct drm_crtc *crtc,
+				    uint32_t sprite_width, int pixel_size,
+				    bool enabled, bool scaled);
+void intel_init_pm(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_update_fbc(struct drm_device *dev);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+int intel_power_domains_init(struct drm_device *dev);
+void intel_power_domains_remove(struct drm_device *dev);
+bool intel_display_power_enabled(struct drm_device *dev,
+				 enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_device *dev,
+			     enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_device *dev,
+			     enum intel_display_power_domain domain);
+void intel_power_domains_init_hw(struct drm_device *dev);
+void intel_set_power_well(struct drm_device *dev, bool enable);
+void intel_enable_gt_powersave(struct drm_device *dev);
+void intel_disable_gt_powersave(struct drm_device *dev);
+void ironlake_teardown_rc6(struct drm_device *dev);
 void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_device *dev);
 
-extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
-				   enum pipe *pipe);
-extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
-extern void intel_ddi_pll_init(struct drm_device *dev);
-extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
-extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-					      enum transcoder cpu_transcoder);
-extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
-extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
-extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
-extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
-extern bool
-intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
-extern void intel_ddi_get_config(struct intel_encoder *encoder,
-				 struct intel_crtc_config *pipe_config);
 
-extern void intel_display_handle_reset(struct drm_device *dev);
-extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-						  enum pipe pipe,
-						  bool enable);
-extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
-						 enum transcoder pch_transcoder,
-						 bool enable);
+/* intel_sdvo.c */
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
 
-extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_update(struct drm_device *dev);
-extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
-			      bool switch_to_fclk, bool allow_power_down);
-extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
-extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
-			       uint32_t mask);
-extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
-			       uint32_t mask);
-extern void hsw_enable_pc8_work(struct work_struct *__work);
-extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
-extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
-extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+
+/* intel_sprite.c */
+int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+			       enum plane plane);
+void intel_plane_restore(struct drm_plane *plane);
+void intel_plane_disable(struct drm_plane *plane);
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+
+
+/* intel_tv.c */
+void intel_tv_init(struct drm_device *dev);
 
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 0000000..d257b09
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/* the sub-encoders aka panel drivers */
+static const struct intel_dsi_device intel_dsi_devices[] = {
+};
+
+
+static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
+			   u32 mask)
+{
+	u32 tmp = vlv_cck_read(dev_priv, reg);
+	tmp &= ~mask;
+	tmp |= val;
+	vlv_cck_write(dev_priv, reg, tmp);
+}
+
+static void band_gap_wa(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->dpio_lock);
+
+	/* Enable bandgap fix in GOP driver */
+	vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
+	msleep(20);
+
+	/* Turn Display Trunk on */
+	vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
+	msleep(20);
+
+	vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
+	msleep(20);
+
+	vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	/* Need huge delay, otherwise clock is not stable */
+	msleep(100);
+}
+
+static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_dsi, base);
+}
+
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+	return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+	return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
+}
+
+static void intel_dsi_hot_plug(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+}
+
+static bool intel_dsi_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_config *config)
+{
+	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+						   base);
+	struct intel_connector *intel_connector = intel_dsi->attached_connector;
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
+	struct drm_display_mode *mode = &config->requested_mode;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (fixed_mode)
+		intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+
+	if (intel_dsi->dev.dev_ops->mode_fixup)
+		return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
+							  mode, adjusted_mode);
+
+	return true;
+}
+
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+
+	vlv_enable_dsi_pll(encoder);
+}
+
+static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dsi_enable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	int pipe = intel_crtc->pipe;
+	u32 temp;
+
+	DRM_DEBUG_KMS("\n");
+
+	temp = I915_READ(MIPI_DEVICE_READY(pipe));
+	if ((temp & DEVICE_READY) == 0) {
+		temp &= ~ULPS_STATE_MASK;
+		I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
+	} else if (temp & ULPS_STATE_MASK) {
+		temp &= ~ULPS_STATE_MASK;
+		I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
+		/*
+		 * We need to ensure that there is a minimum of 1 ms time
+		 * available before clearing the UPLS exit state.
+		 */
+		msleep(2);
+		I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+	}
+
+	if (is_cmd_mode(intel_dsi))
+		I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
+
+	if (is_vid_mode(intel_dsi)) {
+		msleep(20); /* XXX */
+		dpi_send_cmd(intel_dsi, TURN_ON);
+		msleep(100);
+
+		/* assert ip_tg_enable signal */
+		temp = I915_READ(MIPI_PORT_CTRL(pipe));
+		I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
+		POSTING_READ(MIPI_PORT_CTRL(pipe));
+	}
+
+	intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+}
+
+static void intel_dsi_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	int pipe = intel_crtc->pipe;
+	u32 temp;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+
+	if (is_vid_mode(intel_dsi)) {
+		dpi_send_cmd(intel_dsi, SHUTDOWN);
+		msleep(10);
+
+		/* de-assert ip_tg_enable signal */
+		temp = I915_READ(MIPI_PORT_CTRL(pipe));
+		I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
+		POSTING_READ(MIPI_PORT_CTRL(pipe));
+
+		msleep(2);
+	}
+
+	temp = I915_READ(MIPI_DEVICE_READY(pipe));
+	if (temp & DEVICE_READY) {
+		temp &= ~DEVICE_READY;
+		temp &= ~ULPS_STATE_MASK;
+		I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+	}
+}
+
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+
+	vlv_disable_dsi_pll(encoder);
+}
+
+static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 port, func;
+	enum pipe p;
+
+	DRM_DEBUG_KMS("\n");
+
+	/* XXX: this only works for one DSI output */
+	for (p = PIPE_A; p <= PIPE_B; p++) {
+		port = I915_READ(MIPI_PORT_CTRL(p));
+		func = I915_READ(MIPI_DSI_FUNC_PRG(p));
+
+		if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+			if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
+				*pipe = p;
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+static void intel_dsi_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_config *pipe_config)
+{
+	DRM_DEBUG_KMS("\n");
+
+	/* XXX: read flags, set to adjusted_mode */
+}
+
+static int intel_dsi_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+
+	DRM_DEBUG_KMS("\n");
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+		DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
+		return MODE_NO_DBLESCAN;
+	}
+
+	if (fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+}
+
+/* return txclkesc cycles in terms of divider and duration in us */
+static u16 txclkesc(u32 divider, unsigned int us)
+{
+	switch (divider) {
+	case ESCAPE_CLOCK_DIVIDER_1:
+	default:
+		return 20 * us;
+	case ESCAPE_CLOCK_DIVIDER_2:
+		return 10 * us;
+	case ESCAPE_CLOCK_DIVIDER_4:
+		return 5 * us;
+	}
+}
+
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
+{
+	return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
+}
+
+static void set_dsi_timings(struct drm_encoder *encoder,
+			    const struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	int pipe = intel_crtc->pipe;
+	unsigned int bpp = intel_crtc->config.pipe_bpp;
+	unsigned int lane_count = intel_dsi->lane_count;
+
+	u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+
+	hactive = mode->hdisplay;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsync = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
+
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsync = mode->vsync_end - mode->vsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+
+	/* horizontal values are in terms of high speed byte clock */
+	hactive = txbyteclkhs(hactive, bpp, lane_count);
+	hfp = txbyteclkhs(hfp, bpp, lane_count);
+	hsync = txbyteclkhs(hsync, bpp, lane_count);
+	hbp = txbyteclkhs(hbp, bpp, lane_count);
+
+	I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
+	I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+
+	/* meaningful for video mode non-burst sync pulse mode only, can be zero
+	 * for non-burst sync events and burst modes */
+	I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
+	I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+
+	/* vertical values are in terms of lines */
+	I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
+	I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
+	I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+}
+
+static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	struct drm_display_mode *adjusted_mode =
+		&intel_crtc->config.adjusted_mode;
+	int pipe = intel_crtc->pipe;
+	unsigned int bpp = intel_crtc->config.pipe_bpp;
+	u32 val, tmp;
+
+	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+	/* Update the DSI PLL */
+	vlv_enable_dsi_pll(intel_encoder);
+
+	/* XXX: Location of the call */
+	band_gap_wa(dev_priv);
+
+	/* escape clock divider, 20MHz, shared for A and C. device ready must be
+	 * off when doing this! txclkesc? */
+	tmp = I915_READ(MIPI_CTRL(0));
+	tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+	I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+
+	/* read request priority is per pipe */
+	tmp = I915_READ(MIPI_CTRL(pipe));
+	tmp &= ~READ_REQUEST_PRIORITY_MASK;
+	I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+
+	/* XXX: why here, why like this? handling in irq handler?! */
+	I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
+	I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+
+	I915_WRITE(MIPI_DPHY_PARAM(pipe),
+		   0x3c << EXIT_ZERO_COUNT_SHIFT |
+		   0x1f << TRAIL_COUNT_SHIFT |
+		   0xc5 << CLK_ZERO_COUNT_SHIFT |
+		   0x1f << PREPARE_COUNT_SHIFT);
+
+	I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
+		   adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+		   adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+
+	set_dsi_timings(encoder, adjusted_mode);
+
+	val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
+	if (is_cmd_mode(intel_dsi)) {
+		val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
+		val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
+	} else {
+		val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
+
+		/* XXX: cross-check bpp vs. pixel format? */
+		val |= intel_dsi->pixel_format;
+	}
+	I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
+
+	/* timeouts for recovery. one frame IIUC. if counter expires, EOT and
+	 * stop state. */
+
+	/*
+	 * In burst mode, value greater than one DPI line Time in byte clock
+	 * (txbyteclkhs) To timeout this timer 1+ of the above said value is
+	 * recommended.
+	 *
+	 * In non-burst mode, Value greater than one DPI frame time in byte
+	 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+	 * is recommended.
+	 *
+	 * In DBI only mode, value greater than one DBI frame time in byte
+	 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+	 * is recommended.
+	 */
+
+	if (is_vid_mode(intel_dsi) &&
+	    intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+		I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+			   txbyteclkhs(adjusted_mode->htotal, bpp,
+				       intel_dsi->lane_count) + 1);
+	} else {
+		I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+			   txbyteclkhs(adjusted_mode->vtotal *
+				       adjusted_mode->htotal,
+				       bpp, intel_dsi->lane_count) + 1);
+	}
+	I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
+	I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
+	I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+
+	/* dphy stuff */
+
+	/* in terms of low power clock */
+	I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
+
+	/* recovery disables */
+	I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
+
+	/* in terms of txbyteclkhs. actual high to low switch +
+	 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+	 *
+	 * XXX: write MIPI_STOP_STATE_STALL?
+	 */
+	I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+
+	/* XXX: low power clock equivalence in terms of byte clock. the number
+	 * of byte clocks occupied in one low power clock. based on txbyteclkhs
+	 * and txclkesc. txclkesc time / txbyteclk time * (105 +
+	 * MIPI_STOP_STATE_STALL) / 105.???
+	 */
+	I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+
+	/* the bw essential for transmitting 16 long packets containing 252
+	 * bytes meant for dcs write memory command is programmed in this
+	 * register in terms of byte clocks. based on dsi transfer rate and the
+	 * number of lanes configured the time taken to transmit 16 long packets
+	 * in a dsi stream varies. */
+	I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+
+	I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
+		   0xa << LP_HS_SSW_CNT_SHIFT |
+		   0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+
+	if (is_vid_mode(intel_dsi))
+		I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
+			   intel_dsi->video_mode_format);
+}
+
+static enum drm_connector_status
+intel_dsi_detect(struct drm_connector *connector, bool force)
+{
+	struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+	DRM_DEBUG_KMS("\n");
+	return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+}
+
+static int intel_dsi_get_modes(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *mode;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (!intel_connector->panel.fixed_mode) {
+		DRM_DEBUG_KMS("no fixed mode\n");
+		return 0;
+	}
+
+	mode = drm_mode_duplicate(connector->dev,
+				  intel_connector->panel.fixed_mode);
+	if (!mode) {
+		DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+		return 0;
+	}
+
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static void intel_dsi_destroy(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	DRM_DEBUG_KMS("\n");
+	intel_panel_fini(&intel_connector->panel);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+	.get_modes = intel_dsi_get_modes,
+	.mode_valid = intel_dsi_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+	.dpms = intel_connector_dpms,
+	.detect = intel_dsi_detect,
+	.destroy = intel_dsi_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+bool intel_dsi_init(struct drm_device *dev)
+{
+	struct intel_dsi *intel_dsi;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	struct drm_display_mode *fixed_mode = NULL;
+	const struct intel_dsi_device *dsi;
+	unsigned int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+	if (!intel_dsi)
+		return false;
+
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_dsi);
+		return false;
+	}
+
+	intel_encoder = &intel_dsi->base;
+	encoder = &intel_encoder->base;
+	intel_dsi->attached_connector = intel_connector;
+
+	connector = &intel_connector->base;
+
+	drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
+
+	/* XXX: very likely not all of these are needed */
+	intel_encoder->hot_plug = intel_dsi_hot_plug;
+	intel_encoder->compute_config = intel_dsi_compute_config;
+	intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
+	intel_encoder->pre_enable = intel_dsi_pre_enable;
+	intel_encoder->enable = intel_dsi_enable;
+	intel_encoder->mode_set = intel_dsi_mode_set;
+	intel_encoder->disable = intel_dsi_disable;
+	intel_encoder->post_disable = intel_dsi_post_disable;
+	intel_encoder->get_hw_state = intel_dsi_get_hw_state;
+	intel_encoder->get_config = intel_dsi_get_config;
+
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
+		dsi = &intel_dsi_devices[i];
+		intel_dsi->dev = *dsi;
+
+		if (dsi->dev_ops->init(&intel_dsi->dev))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(intel_dsi_devices)) {
+		DRM_DEBUG_KMS("no device found\n");
+		goto err;
+	}
+
+	intel_encoder->type = INTEL_OUTPUT_DSI;
+	intel_encoder->crtc_mask = (1 << 0); /* XXX */
+
+	intel_encoder->cloneable = false;
+	drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DSI);
+
+	drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+	drm_sysfs_connector_add(connector);
+
+	fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+	if (!fixed_mode) {
+		DRM_DEBUG_KMS("no fixed mode\n");
+		goto err;
+	}
+
+	fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+	intel_panel_init(&intel_connector->panel, fixed_mode);
+
+	return true;
+
+err:
+	drm_encoder_cleanup(&intel_encoder->base);
+	kfree(intel_dsi);
+	kfree(intel_connector);
+
+	return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 0000000..c7765f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_DSI_H
+#define _INTEL_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dsi_device {
+	unsigned int panel_id;
+	const char *name;
+	int type;
+	const struct intel_dsi_dev_ops *dev_ops;
+	void *dev_priv;
+};
+
+struct intel_dsi_dev_ops {
+	bool (*init)(struct intel_dsi_device *dsi);
+
+	/* This callback must be able to assume DSI commands can be sent */
+	void (*enable)(struct intel_dsi_device *dsi);
+
+	/* This callback must be able to assume DSI commands can be sent */
+	void (*disable)(struct intel_dsi_device *dsi);
+
+	int (*mode_valid)(struct intel_dsi_device *dsi,
+			  struct drm_display_mode *mode);
+
+	bool (*mode_fixup)(struct intel_dsi_device *dsi,
+			   const struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode);
+
+	void (*mode_set)(struct intel_dsi_device *dsi,
+			 struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode);
+
+	enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
+
+	bool (*get_hw_state)(struct intel_dsi_device *dev);
+
+	struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
+
+	void (*destroy) (struct intel_dsi_device *dsi);
+};
+
+struct intel_dsi {
+	struct intel_encoder base;
+
+	struct intel_dsi_device dev;
+
+	struct intel_connector *attached_connector;
+
+	/* if true, use HS mode, otherwise LP */
+	bool hs;
+
+	/* virtual channel */
+	int channel;
+
+	/* number of DSI lanes */
+	unsigned int lane_count;
+
+	/* video mode pixel format for MIPI_DSI_FUNC_PRG register */
+	u32 pixel_format;
+
+	/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
+	u32 video_mode_format;
+
+	/* eot for MIPI_EOT_DISABLE register */
+	u32 eot_disable;
+};
+
+static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_dsi, base.base);
+}
+
+extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
+extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+
+#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644
index 0000000..7c40f98
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/*
+ * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
+ * MIPI_COMMAND_ADDRESS registers.
+ *
+ * Apparently these registers provide a MIPI adapter level way to send (lots of)
+ * commands and data to the receiver, without having to write the commands and
+ * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
+ *
+ * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
+ * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
+ * framebuffer in command mode displays) these are just an optimization that can
+ * come later.
+ *
+ * For memory writes, these should probably be used for performance.
+ */
+
+static void print_stat(struct intel_dsi *intel_dsi)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 val;
+
+	val = I915_READ(MIPI_INTR_STAT(pipe));
+
+#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
+	DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
+		      "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+		      "\n", pipe, val,
+		      STAT_BIT(val, TEARING_EFFECT),
+		      STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
+		      STAT_BIT(val, GEN_READ_DATA_AVAIL),
+		      STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
+		      STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
+		      STAT_BIT(val, RX_PROT_VIOLATION),
+		      STAT_BIT(val, RX_INVALID_TX_LENGTH),
+		      STAT_BIT(val, ACK_WITH_NO_ERROR),
+		      STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
+		      STAT_BIT(val, LP_RX_TIMEOUT),
+		      STAT_BIT(val, HS_TX_TIMEOUT),
+		      STAT_BIT(val, DPI_FIFO_UNDERRUN),
+		      STAT_BIT(val, LOW_CONTENTION),
+		      STAT_BIT(val, HIGH_CONTENTION),
+		      STAT_BIT(val, TXDSI_VC_ID_INVALID),
+		      STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
+		      STAT_BIT(val, TXCHECKSUM_ERROR),
+		      STAT_BIT(val, TXECC_MULTIBIT_ERROR),
+		      STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
+		      STAT_BIT(val, TXFALSE_CONTROL_ERROR),
+		      STAT_BIT(val, RXDSI_VC_ID_INVALID),
+		      STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
+		      STAT_BIT(val, RXCHECKSUM_ERROR),
+		      STAT_BIT(val, RXECC_MULTIBIT_ERROR),
+		      STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
+		      STAT_BIT(val, RXFALSE_CONTROL_ERROR),
+		      STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
+		      STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
+		      STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
+		      STAT_BIT(val, RXEOT_SYNC_ERROR),
+		      STAT_BIT(val, RXSOT_SYNC_ERROR),
+		      STAT_BIT(val, RXSOT_ERROR));
+#undef STAT_BIT
+}
+
+enum dsi_type {
+	DSI_DCS,
+	DSI_GENERIC,
+};
+
+/* enable or disable command mode hs transmissions */
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 temp;
+	u32 mask = DBI_FIFO_EMPTY;
+
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+		DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
+
+	temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
+	temp &= DBI_HS_LP_MODE_MASK;
+	I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
+
+	intel_dsi->hs = enable;
+}
+
+static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
+			     u8 data_type, u16 data)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 ctrl_reg;
+	u32 ctrl;
+	u32 mask;
+
+	DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
+		      channel, data_type, data);
+
+	if (intel_dsi->hs) {
+		ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
+		mask = HS_CTRL_FIFO_FULL;
+	} else {
+		ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
+		mask = LP_CTRL_FIFO_FULL;
+	}
+
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
+		DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+		print_stat(intel_dsi);
+	}
+
+	/*
+	 * Note: This function is also used for long packets, with length passed
+	 * as data, since SHORT_PACKET_PARAM_SHIFT ==
+	 * LONG_PACKET_WORD_COUNT_SHIFT.
+	 */
+	ctrl = data << SHORT_PACKET_PARAM_SHIFT |
+		channel << VIRTUAL_CHANNEL_SHIFT |
+		data_type << DATA_TYPE_SHIFT;
+
+	I915_WRITE(ctrl_reg, ctrl);
+
+	return 0;
+}
+
+static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
+			    u8 data_type, const u8 *data, int len)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 data_reg;
+	int i, j, n;
+	u32 mask;
+
+	DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
+		      channel, data_type, len);
+
+	if (intel_dsi->hs) {
+		data_reg = MIPI_HS_GEN_DATA(pipe);
+		mask = HS_DATA_FIFO_FULL;
+	} else {
+		data_reg = MIPI_LP_GEN_DATA(pipe);
+		mask = LP_DATA_FIFO_FULL;
+	}
+
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
+		DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+	for (i = 0; i < len; i += n) {
+		u32 val = 0;
+		n = min_t(int, len - i, 4);
+
+		for (j = 0; j < n; j++)
+			val |= *data++ << 8 * j;
+
+		I915_WRITE(data_reg, val);
+		/* XXX: check for data fifo full, once that is set, write 4
+		 * dwords, then wait for not set, then continue. */
+	}
+
+	return dsi_vc_send_short(intel_dsi, channel, data_type, len);
+}
+
+static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
+			       int channel, const u8 *data, int len,
+			       enum dsi_type type)
+{
+	int ret;
+
+	if (len == 0) {
+		BUG_ON(type == DSI_GENERIC);
+		ret = dsi_vc_send_short(intel_dsi, channel,
+					MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
+					0);
+	} else if (len == 1) {
+		ret = dsi_vc_send_short(intel_dsi, channel,
+					type == DSI_GENERIC ?
+					MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
+					MIPI_DSI_DCS_SHORT_WRITE, data[0]);
+	} else if (len == 2) {
+		ret = dsi_vc_send_short(intel_dsi, channel,
+					type == DSI_GENERIC ?
+					MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
+					MIPI_DSI_DCS_SHORT_WRITE_PARAM,
+					(data[1] << 8) | data[0]);
+	} else {
+		ret = dsi_vc_send_long(intel_dsi, channel,
+				       type == DSI_GENERIC ?
+				       MIPI_DSI_GENERIC_LONG_WRITE :
+				       MIPI_DSI_DCS_LONG_WRITE, data, len);
+	}
+
+	return ret;
+}
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+		     const u8 *data, int len)
+{
+	return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
+}
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+			 const u8 *data, int len)
+{
+	return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
+}
+
+static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
+					int channel, u8 dcs_cmd)
+{
+	return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
+				 dcs_cmd);
+}
+
+static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
+					    int channel, u8 *reqdata,
+					    int reqlen)
+{
+	u16 data;
+	u8 data_type;
+
+	switch (reqlen) {
+	case 0:
+		data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+		data = 0;
+		break;
+	case 1:
+		data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+		data = reqdata[0];
+		break;
+	case 2:
+		data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+		data = (reqdata[1] << 8) | reqdata[0];
+		break;
+	default:
+		BUG();
+	}
+
+	return dsi_vc_send_short(intel_dsi, channel, data_type, data);
+}
+
+static int dsi_read_data_return(struct intel_dsi *intel_dsi,
+				u8 *buf, int buflen)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	int i, len = 0;
+	u32 data_reg, val;
+
+	if (intel_dsi->hs) {
+		data_reg = MIPI_HS_GEN_DATA(pipe);
+	} else {
+		data_reg = MIPI_LP_GEN_DATA(pipe);
+	}
+
+	while (len < buflen) {
+		val = I915_READ(data_reg);
+		for (i = 0; i < 4 && len < buflen; i++, len++)
+			buf[len] = val >> 8 * i;
+	}
+
+	return len;
+}
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+		    u8 *buf, int buflen)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 mask;
+	int ret;
+
+	/*
+	 * XXX: should issue multiple read requests and reads if request is
+	 * longer than MIPI_MAX_RETURN_PKT_SIZE
+	 */
+
+	I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+	ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
+	if (ret)
+		return ret;
+
+	mask = GEN_READ_DATA_AVAIL;
+	if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+		DRM_ERROR("Timeout waiting for read data.\n");
+
+	ret = dsi_read_data_return(intel_dsi, buf, buflen);
+	if (ret < 0)
+		return ret;
+
+	if (ret != buflen)
+		return -EIO;
+
+	return 0;
+}
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+			u8 *reqdata, int reqlen, u8 *buf, int buflen)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 mask;
+	int ret;
+
+	/*
+	 * XXX: should issue multiple read requests and reads if request is
+	 * longer than MIPI_MAX_RETURN_PKT_SIZE
+	 */
+
+	I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+	ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
+					       reqlen);
+	if (ret)
+		return ret;
+
+	mask = GEN_READ_DATA_AVAIL;
+	if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+		DRM_ERROR("Timeout waiting for read data.\n");
+
+	ret = dsi_read_data_return(intel_dsi, buf, buflen);
+	if (ret < 0)
+		return ret;
+
+	if (ret != buflen)
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 mask;
+
+	/* XXX: pipe, hs */
+	if (intel_dsi->hs)
+		cmd &= ~DPI_LP_MODE;
+	else
+		cmd |= DPI_LP_MODE;
+
+	/* DPI virtual channel?! */
+
+	mask = DPI_FIFO_EMPTY;
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+		DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
+
+	/* clear bit */
+	I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
+
+	/* XXX: old code skips write if control unchanged */
+	if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
+		DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+	I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
+
+	mask = SPL_PKT_SENT_INTERRUPT;
+	if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
+		DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644
index 0000000..54c8a23
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#ifndef _INTEL_DSI_DSI_H
+#define _INTEL_DSI_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+		     const u8 *data, int len);
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+			 const u8 *data, int len);
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+		    u8 *buf, int buflen);
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+			u8 *reqdata, int reqlen, u8 *buf, int buflen);
+
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
+
+/* XXX: questionable write helpers */
+static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
+				     int channel, u8 dcs_cmd)
+{
+	return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
+}
+
+static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
+				     int channel, u8 dcs_cmd, u8 param)
+{
+	u8 buf[2] = { dcs_cmd, param };
+	return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
+}
+
+static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
+					 int channel)
+{
+	return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
+}
+
+static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
+					 int channel, u8 param)
+{
+	return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
+}
+
+static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
+					 int channel, u8 param1, u8 param2)
+{
+	u8 buf[2] = { param1, param2 };
+	return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
+}
+
+/* XXX: questionable read helpers */
+static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
+					int channel, u8 *buf, int buflen)
+{
+	return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
+					int channel, u8 param, u8 *buf,
+					int buflen)
+{
+	return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
+					int channel, u8 param1, u8 param2,
+					u8 *buf, int buflen)
+{
+	u8 req[2] = { param1, param2 };
+
+	return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
+}
+
+
+#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 0000000..44279b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Shobhit Kumar <shobhit.kumar@intel.com>
+ *	Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+#include "intel_dsi.h"
+
+#define DSI_HSS_PACKET_SIZE		4
+#define DSI_HSE_PACKET_SIZE		4
+#define DSI_HSA_PACKET_EXTRA_SIZE	6
+#define DSI_HBP_PACKET_EXTRA_SIZE	6
+#define DSI_HACTIVE_PACKET_EXTRA_SIZE	6
+#define DSI_HFP_PACKET_EXTRA_SIZE	6
+#define DSI_EOTP_PACKET_SIZE		4
+
+struct dsi_mnp {
+	u32 dsi_pll_ctrl;
+	u32 dsi_pll_div;
+};
+
+static const u32 lfsr_converts[] = {
+	426, 469, 234, 373, 442, 221, 110, 311, 411,		/* 62 - 70 */
+	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
+	106, 53, 282, 397, 354, 227, 113, 56, 284, 142,		/* 81 - 90 */
+	71, 35							/* 91 - 92 */
+};
+
+static u32 dsi_rr_formula(const struct drm_display_mode *mode,
+			  int pixel_format, int video_mode_format,
+			  int lane_count, bool eotp)
+{
+	u32 bpp;
+	u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
+	u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
+	u32 bytes_per_line, bytes_per_frame;
+	u32 num_frames;
+	u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
+	u32 dsi_bit_clock_hz;
+	u32 dsi_clk;
+
+	switch (pixel_format) {
+	default:
+	case VID_MODE_FORMAT_RGB888:
+	case VID_MODE_FORMAT_RGB666_LOOSE:
+		bpp = 24;
+		break;
+	case VID_MODE_FORMAT_RGB666:
+		bpp = 18;
+		break;
+	case VID_MODE_FORMAT_RGB565:
+		bpp = 16;
+		break;
+	}
+
+	hactive = mode->hdisplay;
+	vactive = mode->vdisplay;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsync = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
+
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsync = mode->vsync_end - mode->vsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+
+	hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
+	hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
+	hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
+	hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
+
+	bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
+		DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
+		hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
+		hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
+		hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
+
+	/*
+	 * XXX: Need to accurately calculate LP to HS transition timeout and add
+	 * it to bytes_per_line/bytes_per_frame.
+	 */
+
+	if (eotp && video_mode_format == VIDEO_MODE_BURST)
+		bytes_per_line += DSI_EOTP_PACKET_SIZE;
+
+	bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
+		vactive * bytes_per_line + vfp * bytes_per_line;
+
+	if (eotp &&
+	    (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
+	     video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
+		bytes_per_frame += DSI_EOTP_PACKET_SIZE;
+
+	num_frames = drm_mode_vrefresh(mode);
+	bytes_per_x_frames = num_frames * bytes_per_frame;
+
+	bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
+
+	/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
+	dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
+	dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+
+	if (eotp && video_mode_format == VIDEO_MODE_BURST)
+		dsi_clk *= 2;
+
+	return dsi_clk;
+}
+
+#ifdef MNP_FROM_TABLE
+
+struct dsi_clock_table {
+	u32 freq;
+	u8 m;
+	u8 p;
+};
+
+static const struct dsi_clock_table dsi_clk_tbl[] = {
+	{300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
+	{343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
+	{383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
+	{401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
+	{405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
+	{409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
+	{413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
+	{417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
+	{430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
+	{470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
+	{510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
+	{550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
+	{590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
+	{630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
+	{670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
+	{710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
+	{750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
+	{790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
+	{1000, 80, 2},		/* dsi clock frequency in Mhz*/
+};
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+	unsigned int i;
+	u8 m;
+	u8 n;
+	u8 p;
+	u32 m_seed;
+
+	if (dsi_clk < 300 || dsi_clk > 1000)
+		return -ECHRNG;
+
+	for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
+		if (dsi_clk_tbl[i].freq > dsi_clk)
+			break;
+	}
+
+	m = dsi_clk_tbl[i].m;
+	p = dsi_clk_tbl[i].p;
+	m_seed = lfsr_converts[m - 62];
+	n = 1;
+	dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
+	dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+		m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+	return 0;
+}
+
+#else
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+	u32 m, n, p;
+	u32 ref_clk;
+	u32 error;
+	u32 tmp_error;
+	u32 target_dsi_clk;
+	u32 calc_dsi_clk;
+	u32 calc_m;
+	u32 calc_p;
+	u32 m_seed;
+
+	if (dsi_clk < 300 || dsi_clk > 1150) {
+		DRM_ERROR("DSI CLK Out of Range\n");
+		return -ECHRNG;
+	}
+
+	ref_clk = 25000;
+	target_dsi_clk = dsi_clk * 1000;
+	error = 0xFFFFFFFF;
+	calc_m = 0;
+	calc_p = 0;
+
+	for (m = 62; m <= 92; m++) {
+		for (p = 2; p <= 6; p++) {
+
+			calc_dsi_clk = (m * ref_clk) / p;
+			if (calc_dsi_clk >= target_dsi_clk) {
+				tmp_error = calc_dsi_clk - target_dsi_clk;
+				if (tmp_error < error) {
+					error = tmp_error;
+					calc_m = m;
+					calc_p = p;
+				}
+			}
+		}
+	}
+
+	m_seed = lfsr_converts[calc_m - 62];
+	n = 1;
+	dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+	dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+		m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+	return 0;
+}
+
+#endif
+
+/*
+ * XXX: The muxing and gating is hard coded for now. Need to add support for
+ * sharing PLLs with two DSI outputs.
+ */
+static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	int ret;
+	struct dsi_mnp dsi_mnp;
+	u32 dsi_clk;
+
+	dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
+				 intel_dsi->video_mode_format,
+				 intel_dsi->lane_count, !intel_dsi->eot_disable);
+
+	ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+	if (ret) {
+		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+		return;
+	}
+
+	dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+	DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
+		      dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+}
+
+void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 tmp;
+
+	DRM_DEBUG_KMS("\n");
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	vlv_configure_dsi_pll(encoder);
+
+	/* wait at least 0.5 us after ungating before enabling VCO */
+	usleep_range(1, 10);
+
+	tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	tmp |= DSI_PLL_VCO_EN;
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+		DRM_ERROR("DSI PLL lock failed\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 tmp;
+
+	DRM_DEBUG_KMS("\n");
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	tmp &= ~DSI_PLL_VCO_EN;
+	tmp |= DSI_PLL_LDO_GATE;
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fa7df5..3c77365 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -153,6 +153,8 @@
 		flags |= DRM_MODE_FLAG_NVSYNC;
 
 	pipe_config->adjusted_mode.flags |= flags;
+
+	pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
 static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -171,11 +173,16 @@
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	u32 dvo_reg = intel_dvo->dev.dvo_reg;
 	u32 temp = I915_READ(dvo_reg);
 
 	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
 	I915_READ(dvo_reg);
+	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+					 &crtc->config.requested_mode,
+					 &crtc->config.adjusted_mode);
+
 	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 }
 
@@ -184,6 +191,7 @@
 {
 	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 	struct drm_crtc *crtc;
+	struct intel_crtc_config *config;
 
 	/* dvo supports only 2 dpms states. */
 	if (mode != DRM_MODE_DPMS_ON)
@@ -204,10 +212,16 @@
 	/* We call connector dpms manually below in case pipe dpms doesn't
 	 * change due to cloning. */
 	if (mode == DRM_MODE_DPMS_ON) {
+		config = &to_intel_crtc(crtc)->config;
+
 		intel_dvo->base.connectors_active = true;
 
 		intel_crtc_update_dpms(crtc);
 
+		intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+						 &config->requested_mode,
+						 &config->adjusted_mode);
+
 		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 	} else {
 		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@@ -267,11 +281,6 @@
 		drm_mode_set_crtcinfo(adjusted_mode, 0);
 	}
 
-	if (intel_dvo->dev.dev_ops->mode_fixup)
-		return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
-							  &pipe_config->requested_mode,
-							  adjusted_mode);
-
 	return true;
 }
 
@@ -299,10 +308,6 @@
 		break;
 	}
 
-	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
-					 &crtc->config.requested_mode,
-					 adjusted_mode);
-
 	/* Save the data order, since I don't know what it should be set to. */
 	dvo_val = I915_READ(dvo_reg) &
 		  (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
@@ -370,7 +375,6 @@
 
 static void intel_dvo_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -451,11 +455,11 @@
 	int i;
 	int encoder_type = DRM_MODE_ENCODER_NONE;
 
-	intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+	intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
 	if (!intel_dvo)
 		return;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_dvo);
 		return;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fbdev.c
similarity index 89%
rename from drivers/gpu/drm/i915/intel_fb.c
rename to drivers/gpu/drm/i915/intel_fbdev.c
index bc21000..895fcb4 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -78,8 +78,8 @@
 	mode_cmd.width = sizes->surface_width;
 	mode_cmd.height = sizes->surface_height;
 
-	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
-						      8), 64);
+	mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+				    DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
 							  sizes->surface_depth);
 
@@ -184,6 +184,27 @@
 	return ret;
 }
 
+/** Sets the color ramps on behalf of RandR */
+static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				    u16 blue, int regno)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	intel_crtc->lut_r[regno] = red >> 8;
+	intel_crtc->lut_g[regno] = green >> 8;
+	intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				    u16 *blue, int regno)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	*red = intel_crtc->lut_r[regno] << 8;
+	*green = intel_crtc->lut_g[regno] << 8;
+	*blue = intel_crtc->lut_b[regno] << 8;
+}
+
 static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
 	.gamma_set = intel_crtc_fb_gamma_set,
 	.gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+	ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
 	if (!ifbdev)
 		return -ENOMEM;
 
@@ -225,7 +246,7 @@
 
 	ret = drm_fb_helper_init(dev, &ifbdev->helper,
 				 INTEL_INFO(dev)->num_pipes,
-				 INTELFB_CONN_LIMIT);
+				 4);
 	if (ret) {
 		kfree(ifbdev);
 		return ret;
@@ -278,13 +299,13 @@
 
 MODULE_LICENSE("GPL and additional rights");
 
-void intel_fb_output_poll_changed(struct drm_device *dev)
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
 }
 
-void intel_fb_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_device *dev)
 {
 	int ret;
 	struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4148cc8..03f9ca7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -713,6 +713,7 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	u32 tmp, flags = 0;
+	int dotclock;
 
 	tmp = I915_READ(intel_hdmi->hdmi_reg);
 
@@ -727,6 +728,16 @@
 		flags |= DRM_MODE_FLAG_NVSYNC;
 
 	pipe_config->adjusted_mode.flags |= flags;
+
+	if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+		dotclock = pipe_config->port_clock * 2 / 3;
+	else
+		dotclock = pipe_config->port_clock;
+
+	if (HAS_PCH_SPLIT(dev_priv->dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
 }
 
 static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -836,7 +847,7 @@
 
 	if (IS_G4X(dev))
 		return 165000;
-	else if (IS_HASWELL(dev))
+	else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
 		return 300000;
 	else
 		return 225000;
@@ -862,7 +873,7 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
-	int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+	int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
 	int portclock_limit = hdmi_portclock_limit(intel_hdmi);
 	int desired_bpp;
 
@@ -904,7 +915,7 @@
 		pipe_config->pipe_bpp = desired_bpp;
 	}
 
-	if (adjusted_mode->clock > portclock_limit) {
+	if (adjusted_mode->crtc_clock > portclock_limit) {
 		DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
 		return false;
 	}
@@ -1063,7 +1074,7 @@
 	return 0;
 }
 
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -1079,35 +1090,35 @@
 
 	/* Enable clock channels for this port */
 	mutex_lock(&dev_priv->dpio_lock);
-	val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
 	val = 0;
 	if (pipe)
 		val |= (1<<21);
 	else
 		val &= ~(1<<21);
 	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
 
 	/* HDMI 1.0V-2dB */
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
 			 0x2b245f5f);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
 			 0x5578b83a);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
 			 0x0c782040);
-	vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
 			 0x2b247878);
-	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
 			 0x00002000);
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
 			 DPIO_TX_OCALINIT_EN);
 
 	/* Program lane clock */
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
 			 0x00760018);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
 			 0x00400888);
 	mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1116,55 +1127,60 @@
 	vlv_wait_port_ready(dev_priv, port);
 }
 
-static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
 	int port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
 
 	if (!IS_VALLEYVIEW(dev))
 		return;
 
 	/* Program Tx lane resets to default */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 			 DPIO_PCS_CLK_SOFT_RESET);
 
 	/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
-	vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
 
-	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
 			 0x00002000);
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
 			 DPIO_TX_OCALINIT_EN);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
-static void intel_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
 	int port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
 
 	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -1211,6 +1227,7 @@
 
 	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
+	connector->stereo_allowed = 1;
 
 	switch (port) {
 	case PORT_B:
@@ -1275,11 +1292,11 @@
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
 
-	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
 	if (!intel_dig_port)
 		return;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_dig_port);
 		return;
@@ -1296,10 +1313,10 @@
 	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
 	intel_encoder->get_config = intel_hdmi_get_config;
 	if (IS_VALLEYVIEW(dev)) {
-		intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
-		intel_encoder->pre_enable = intel_hdmi_pre_enable;
+		intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+		intel_encoder->pre_enable = vlv_hdmi_pre_enable;
 		intel_encoder->enable = vlv_enable_hdmi;
-		intel_encoder->post_disable = intel_hdmi_post_disable;
+		intel_encoder->post_disable = vlv_hdmi_post_disable;
 	} else {
 		intel_encoder->enable = intel_enable_hdmi;
 	}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d1c1e0f7..2ca17b1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,6 +34,11 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+enum disp_clk {
+	CDCLK,
+	CZCLK
+};
+
 struct gmbus_port {
 	const char *name;
 	int reg;
@@ -58,10 +63,69 @@
 	return container_of(i2c, struct intel_gmbus, adapter);
 }
 
+static int get_disp_clk_div(struct drm_i915_private *dev_priv,
+			    enum disp_clk clk)
+{
+	u32 reg_val;
+	int clk_ratio;
+
+	reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
+
+	if (clk == CDCLK)
+		clk_ratio =
+			((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
+	else
+		clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
+
+	return clk_ratio;
+}
+
+static void gmbus_set_freq(struct drm_i915_private *dev_priv)
+{
+	int vco_freq[] = { 800, 1600, 2000, 2400 };
+	int gmbus_freq = 0, cdclk_div, hpll_freq;
+
+	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
+
+	/* Skip setting the gmbus freq if BIOS has already programmed it */
+	if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
+		return;
+
+	/* Obtain SKU information */
+	mutex_lock(&dev_priv->dpio_lock);
+	hpll_freq =
+		vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	/* Get the CDCLK divide ratio */
+	cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
+
+	/*
+	 * Program the gmbus_freq based on the cdclk frequency.
+	 * BSpec erroneously claims we should aim for 4MHz, but
+	 * in fact 1MHz is the correct frequency.
+	 */
+	if (cdclk_div)
+		gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+
+	if (WARN_ON(gmbus_freq == 0))
+		return;
+
+	I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
+}
+
 void
 intel_i2c_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * In BIOS-less system, program the correct gmbus frequency
+	 * before reading edid.
+	 */
+	if (IS_VALLEYVIEW(dev))
+		gmbus_set_freq(dev_priv);
+
 	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
 	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b8af94a..c3b4da7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -92,6 +92,7 @@
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 lvds_reg, tmp, flags = 0;
+	int dotclock;
 
 	if (HAS_PCH_SPLIT(dev))
 		lvds_reg = PCH_LVDS;
@@ -116,6 +117,13 @@
 
 		pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
 	}
+
+	dotclock = pipe_config->port_clock;
+
+	if (HAS_PCH_SPLIT(dev_priv->dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
 }
 
 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -198,7 +206,8 @@
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, stat_reg;
 
@@ -217,13 +226,15 @@
 	if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
 		DRM_ERROR("timed out waiting for panel to power on\n");
 
-	intel_panel_enable_backlight(dev, intel_crtc->pipe);
+	intel_panel_enable_backlight(intel_connector);
 }
 
 static void intel_disable_lvds(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, stat_reg;
 
@@ -235,7 +246,7 @@
 		stat_reg = PP_STATUS;
 	}
 
-	intel_panel_disable_backlight(dev);
+	intel_panel_disable_backlight(intel_connector);
 
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 	if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
@@ -466,7 +477,6 @@
 
 	intel_panel_fini(&lvds_connector->base.panel);
 
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -802,7 +812,8 @@
 		return true;
 
 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		struct child_device_config *child = dev_priv->vbt.child_dev + i;
+		union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+		struct old_child_dev_config *child = &uchild->old;
 
 		/* If the device type is not LFP, continue.
 		 * We have to check both the new identifiers as well as the
@@ -956,11 +967,11 @@
 		}
 	}
 
-	lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+	lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
 	if (!lvds_encoder)
 		return;
 
-	lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+	lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
 	if (!lvds_connector) {
 		kfree(lvds_encoder);
 		return;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 119771f..1b2f41c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -36,8 +36,11 @@
 #include "i915_drv.h"
 #include "intel_drv.h"
 
-#define PCI_ASLE 0xe4
-#define PCI_ASLS 0xfc
+#define PCI_ASLE		0xe4
+#define PCI_ASLS		0xfc
+#define PCI_SWSCI		0xe8
+#define PCI_SWSCI_SCISEL	(1 << 15)
+#define PCI_SWSCI_GSSCIE	(1 << 0)
 
 #define OPREGION_HEADER_OFFSET 0
 #define OPREGION_ACPI_OFFSET   0x100
@@ -107,25 +110,38 @@
 	u32 epfm;       /* enabled panel fitting modes */
 	u8 plut[74];    /* panel LUT and identifier */
 	u32 pfmb;       /* PWM freq and min brightness */
-	u8 rsvd[102];
+	u32 cddv;       /* color correction default values */
+	u32 pcft;       /* power conservation features */
+	u32 srot;       /* supported rotation angles */
+	u32 iuer;       /* IUER events */
+	u8 rsvd[86];
 } __attribute__((packed));
 
 /* Driver readiness indicator */
 #define ASLE_ARDY_READY		(1 << 0)
 #define ASLE_ARDY_NOT_READY	(0 << 0)
 
-/* ASLE irq request bits */
-#define ASLE_SET_ALS_ILLUM     (1 << 0)
-#define ASLE_SET_BACKLIGHT     (1 << 1)
-#define ASLE_SET_PFIT          (1 << 2)
-#define ASLE_SET_PWM_FREQ      (1 << 3)
-#define ASLE_REQ_MSK           0xf
-
-/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAILED	(1<<10)
-#define ASLE_BACKLIGHT_FAILED	(1<<12)
-#define ASLE_PFIT_FAILED	(1<<14)
-#define ASLE_PWM_FREQ_FAILED	(1<<16)
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM		(1 << 0)
+#define ASLC_SET_BACKLIGHT		(1 << 1)
+#define ASLC_SET_PFIT			(1 << 2)
+#define ASLC_SET_PWM_FREQ		(1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES	(1 << 4)
+#define ASLC_BUTTON_ARRAY		(1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR	(1 << 6)
+#define ASLC_DOCKING_INDICATOR		(1 << 7)
+#define ASLC_ISCT_STATE_CHANGE		(1 << 8)
+#define ASLC_REQ_MSK			0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED		(1 << 10)
+#define ASLC_BACKLIGHT_FAILED		(1 << 12)
+#define ASLC_PFIT_FAILED		(1 << 14)
+#define ASLC_PWM_FREQ_FAILED		(1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED	(1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED	(1 << 20)
+#define ASLC_CONVERTIBLE_FAILED		(1 << 22)
+#define ASLC_DOCKING_FAILED		(1 << 24)
+#define ASLC_ISCT_STATE_FAILED		(1 << 26)
 
 /* Technology enabled indicator */
 #define ASLE_TCHE_ALS_EN	(1 << 0)
@@ -151,6 +167,60 @@
 
 #define ASLE_CBLV_VALID         (1<<31)
 
+/* IUER */
+#define ASLE_IUER_DOCKING		(1 << 7)
+#define ASLE_IUER_CONVERTIBLE		(1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN	(1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN	(1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN		(1 << 2)
+#define ASLE_IUER_WINDOWS_BTN		(1 << 1)
+#define ASLE_IUER_POWER_BTN		(1 << 0)
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR		(1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT	1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK	(0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT	8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK	(0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT	8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK	(0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT	5
+#define SWSCI_SCIC_EXIT_STATUS_MASK	(7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS	1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+	((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+	 (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA			4
+#define SWSCI_GBDA_SUPPORTED_CALLS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD		SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB			6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM		SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
 #define ACPI_OTHER_OUTPUT (0<<8)
 #define ACPI_VGA_OUTPUT (1<<8)
 #define ACPI_TV_OUTPUT (2<<8)
@@ -158,24 +228,224 @@
 #define ACPI_LVDS_OUTPUT (4<<8)
 
 #ifdef CONFIG_ACPI
+static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
+	u32 main_function, sub_function, scic;
+	u16 pci_swsci;
+	u32 dslp;
+
+	if (!swsci)
+		return -ENODEV;
+
+	main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+		SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+	sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+		SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+	/* Check if we can call the function. See swsci_setup for details. */
+	if (main_function == SWSCI_SBCB) {
+		if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+		     (1 << sub_function)) == 0)
+			return -EINVAL;
+	} else if (main_function == SWSCI_GBDA) {
+		if ((dev_priv->opregion.swsci_gbda_sub_functions &
+		     (1 << sub_function)) == 0)
+			return -EINVAL;
+	}
+
+	/* Driver sleep timeout in ms. */
+	dslp = ioread32(&swsci->dslp);
+	if (!dslp) {
+		/* The spec says 2ms should be the default, but it's too small
+		 * for some machines. */
+		dslp = 50;
+	} else if (dslp > 500) {
+		/* Hey bios, trust must be earned. */
+		WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
+		dslp = 500;
+	}
+
+	/* The spec tells us to do this, but we are the only user... */
+	scic = ioread32(&swsci->scic);
+	if (scic & SWSCI_SCIC_INDICATOR) {
+		DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+		return -EBUSY;
+	}
+
+	scic = function | SWSCI_SCIC_INDICATOR;
+
+	iowrite32(parm, &swsci->parm);
+	iowrite32(scic, &swsci->scic);
+
+	/* Ensure SCI event is selected and event trigger is cleared. */
+	pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
+	if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
+		pci_swsci |= PCI_SWSCI_SCISEL;
+		pci_swsci &= ~PCI_SWSCI_GSSCIE;
+		pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+	}
+
+	/* Use event trigger to tell bios to check the mail. */
+	pci_swsci |= PCI_SWSCI_GSSCIE;
+	pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+
+	/* Poll for the result. */
+#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
+	if (wait_for(C, dslp)) {
+		DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+		SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+	/* Note: scic == 0 is an error! */
+	if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+		DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+		return -EIO;
+	}
+
+	if (parm_out)
+		*parm_out = ioread32(&swsci->parm);
+
+	return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT			0
+#define DISPLAY_TYPE_TV				1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL	2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL	3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+				  bool enable)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 parm = 0;
+	u32 type = 0;
+	u32 port;
+
+	/* don't care about old stuff for now */
+	if (!HAS_DDI(dev))
+		return 0;
+
+	port = intel_ddi_get_encoder_port(intel_encoder);
+	if (port == PORT_E) {
+		port = 0;
+	} else {
+		parm |= 1 << port;
+		port++;
+	}
+
+	if (!enable)
+		parm |= 4 << 8;
+
+	switch (intel_encoder->type) {
+	case INTEL_OUTPUT_ANALOG:
+		type = DISPLAY_TYPE_CRT;
+		break;
+	case INTEL_OUTPUT_UNKNOWN:
+	case INTEL_OUTPUT_DISPLAYPORT:
+	case INTEL_OUTPUT_HDMI:
+		type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+		break;
+	case INTEL_OUTPUT_EDP:
+		type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+		break;
+	default:
+		WARN_ONCE(1, "unsupported intel_encoder type %d\n",
+			  intel_encoder->type);
+		return -EINVAL;
+	}
+
+	parm |= type << (16 + port * 3);
+
+	return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+	pci_power_t pci_power_state;
+	u32 parm;
+} power_state_map[] = {
+	{ PCI_D0,	0x00 },
+	{ PCI_D1,	0x01 },
+	{ PCI_D2,	0x02 },
+	{ PCI_D3hot,	0x04 },
+	{ PCI_D3cold,	0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+	int i;
+
+	if (!HAS_DDI(dev))
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+		if (state == power_state_map[i].pci_power_state)
+			return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+				     power_state_map[i].parm, NULL);
+	}
+
+	return -EINVAL;
+}
+
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector = NULL;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
 	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+	u32 ret = 0;
+	bool found = false;
 
 	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
 	if (!(bclp & ASLE_BCLP_VALID))
-		return ASLE_BACKLIGHT_FAILED;
+		return ASLC_BACKLIGHT_FAILED;
 
 	bclp &= ASLE_BCLP_MSK;
 	if (bclp > 255)
-		return ASLE_BACKLIGHT_FAILED;
+		return ASLC_BACKLIGHT_FAILED;
 
-	intel_panel_set_backlight(dev, bclp, 255);
+	mutex_lock(&dev->mode_config.mutex);
+	/*
+	 * Could match the OpRegion connector here instead, but we'd also need
+	 * to verify the connector could handle a backlight call.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc == crtc) {
+			found = true;
+			break;
+		}
+
+	if (!found) {
+		ret = ASLC_BACKLIGHT_FAILED;
+		goto out;
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder)
+			intel_connector = to_intel_connector(connector);
+
+	if (!intel_connector) {
+		ret = ASLC_BACKLIGHT_FAILED;
+		goto out;
+	}
+
+	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+	intel_panel_set_backlight(intel_connector, bclp, 255);
 	iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
-	return 0;
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
 }
 
 static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
@@ -183,13 +453,13 @@
 	/* alsi is the current ALS reading in lux. 0 indicates below sensor
 	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
 	DRM_DEBUG_DRIVER("Illum is not supported\n");
-	return ASLE_ALS_ILLUM_FAILED;
+	return ASLC_ALS_ILLUM_FAILED;
 }
 
 static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
 {
 	DRM_DEBUG_DRIVER("PWM freq is not supported\n");
-	return ASLE_PWM_FREQ_FAILED;
+	return ASLC_PWM_FREQ_FAILED;
 }
 
 static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
@@ -197,39 +467,118 @@
 	/* Panel fitting is currently controlled by the X code, so this is a
 	   noop until modesetting support works fully */
 	DRM_DEBUG_DRIVER("Pfit is not supported\n");
-	return ASLE_PFIT_FAILED;
+	return ASLC_PFIT_FAILED;
+}
+
+static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+{
+	DRM_DEBUG_DRIVER("SROT is not supported\n");
+	return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+{
+	if (!iuer)
+		DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+	if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+	if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+	if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+	if (iuer & ASLE_IUER_WINDOWS_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+	if (iuer & ASLE_IUER_POWER_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+
+	return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+{
+	if (iuer & ASLE_IUER_CONVERTIBLE)
+		DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+	else
+		DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+
+	return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+{
+	if (iuer & ASLE_IUER_DOCKING)
+		DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+	else
+		DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+
+	return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_device *dev)
+{
+	DRM_DEBUG_DRIVER("ISCT is not supported\n");
+	return ASLC_ISCT_STATE_FAILED;
+}
+
+static void asle_work(struct work_struct *work)
+{
+	struct intel_opregion *opregion =
+		container_of(work, struct intel_opregion, asle_work);
+	struct drm_i915_private *dev_priv =
+		container_of(opregion, struct drm_i915_private, opregion);
+	struct drm_device *dev = dev_priv->dev;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+	u32 aslc_stat = 0;
+	u32 aslc_req;
+
+	if (!asle)
+		return;
+
+	aslc_req = ioread32(&asle->aslc);
+
+	if (!(aslc_req & ASLC_REQ_MSK)) {
+		DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
+				 aslc_req);
+		return;
+	}
+
+	if (aslc_req & ASLC_SET_ALS_ILLUM)
+		aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+
+	if (aslc_req & ASLC_SET_BACKLIGHT)
+		aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+
+	if (aslc_req & ASLC_SET_PFIT)
+		aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+
+	if (aslc_req & ASLC_SET_PWM_FREQ)
+		aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
+
+	if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+		aslc_stat |= asle_set_supported_rotation_angles(dev,
+							ioread32(&asle->srot));
+
+	if (aslc_req & ASLC_BUTTON_ARRAY)
+		aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
+
+	if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+		aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
+
+	if (aslc_req & ASLC_DOCKING_INDICATOR)
+		aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
+
+	if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+		aslc_stat |= asle_isct_state(dev);
+
+	iowrite32(aslc_stat, &asle->aslc);
 }
 
 void intel_opregion_asle_intr(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-	u32 asle_stat = 0;
-	u32 asle_req;
 
-	if (!asle)
-		return;
-
-	asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
-
-	if (!asle_req) {
-		DRM_DEBUG_DRIVER("non asle set request??\n");
-		return;
-	}
-
-	if (asle_req & ASLE_SET_ALS_ILLUM)
-		asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
-
-	if (asle_req & ASLE_SET_BACKLIGHT)
-		asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
-
-	if (asle_req & ASLE_SET_PFIT)
-		asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
-
-	if (asle_req & ASLE_SET_PWM_FREQ)
-		asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
-
-	iowrite32(asle_stat, &asle->aslc);
+	if (dev_priv->opregion.asle)
+		schedule_work(&dev_priv->opregion.asle_work);
 }
 
 #define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -432,6 +781,8 @@
 	if (opregion->asle)
 		iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
 
+	cancel_work_sync(&dev_priv->opregion.asle_work);
+
 	if (opregion->acpi) {
 		iowrite32(0, &opregion->acpi->drdy);
 
@@ -446,8 +797,68 @@
 	opregion->swsci = NULL;
 	opregion->asle = NULL;
 	opregion->vbt = NULL;
+	opregion->lid_state = NULL;
 }
-#endif
+
+static void swsci_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	bool requested_callbacks = false;
+	u32 tmp;
+
+	/* Sub-function code 0 is okay, let's allow them. */
+	opregion->swsci_gbda_sub_functions = 1;
+	opregion->swsci_sbcb_sub_functions = 1;
+
+	/* We use GBDA to ask for supported GBDA calls. */
+	if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+		/* make the bits match the sub-function codes */
+		tmp <<= 1;
+		opregion->swsci_gbda_sub_functions |= tmp;
+	}
+
+	/*
+	 * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+	 * must not call interfaces that are not specifically requested by the
+	 * bios.
+	 */
+	if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+		/* here, the bits already match sub-function codes */
+		opregion->swsci_sbcb_sub_functions |= tmp;
+		requested_callbacks = true;
+	}
+
+	/*
+	 * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+	 * the callback is _requested_. But we still can't call interfaces that
+	 * are not requested.
+	 */
+	if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+		/* make the bits match the sub-function codes */
+		u32 low = tmp & 0x7ff;
+		u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+		tmp = (high << 4) | (low << 1) | 1;
+
+		/* best guess what to do with supported wrt requested */
+		if (requested_callbacks) {
+			u32 req = opregion->swsci_sbcb_sub_functions;
+			if ((req & tmp) != req)
+				DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+			/* XXX: for now, trust the requested callbacks */
+			/* opregion->swsci_sbcb_sub_functions &= tmp; */
+		} else {
+			opregion->swsci_sbcb_sub_functions |= tmp;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+			 opregion->swsci_gbda_sub_functions,
+			 opregion->swsci_sbcb_sub_functions);
+}
+#else /* CONFIG_ACPI */
+static inline void swsci_setup(struct drm_device *dev) {}
+#endif  /* CONFIG_ACPI */
 
 int intel_opregion_setup(struct drm_device *dev)
 {
@@ -465,6 +876,10 @@
 		return -ENOTSUPP;
 	}
 
+#ifdef CONFIG_ACPI
+	INIT_WORK(&opregion->asle_work, asle_work);
+#endif
+
 	base = acpi_os_ioremap(asls, OPREGION_SIZE);
 	if (!base)
 		return -ENOMEM;
@@ -490,6 +905,7 @@
 	if (mboxes & MBOX_SWSCI) {
 		DRM_DEBUG_DRIVER("SWSCI supported\n");
 		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+		swsci_setup(dev);
 	}
 	if (mboxes & MBOX_ASLE) {
 		DRM_DEBUG_DRIVER("ASLE supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ddfd0ae..a98a990 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -821,14 +821,11 @@
 static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
 					  struct intel_crtc *crtc)
 {
-	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-
 	if (!crtc->active)
 		return -EINVAL;
 
 	/* can't use the overlay with double wide pipe */
-	if (INTEL_INFO(overlay->dev)->gen < 4 &&
-	    (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+	if (crtc->config.double_wide)
 		return -EINVAL;
 
 	return 0;
@@ -1056,7 +1053,7 @@
 		return ret;
 	}
 
-	params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+	params = kmalloc(sizeof(*params), GFP_KERNEL);
 	if (!params)
 		return -ENOMEM;
 
@@ -1323,7 +1320,7 @@
 	if (!HAS_OVERLAY(dev))
 		return;
 
-	overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
 	if (!overlay)
 		return;
 
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 293564a..f161ac0 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -50,23 +50,22 @@
 			struct intel_crtc_config *pipe_config,
 			int fitting_mode)
 {
-	struct drm_display_mode *mode, *adjusted_mode;
+	struct drm_display_mode *adjusted_mode;
 	int x, y, width, height;
 
-	mode = &pipe_config->requested_mode;
 	adjusted_mode = &pipe_config->adjusted_mode;
 
 	x = y = width = height = 0;
 
 	/* Native modes don't need fitting */
-	if (adjusted_mode->hdisplay == mode->hdisplay &&
-	    adjusted_mode->vdisplay == mode->vdisplay)
+	if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+	    adjusted_mode->vdisplay == pipe_config->pipe_src_h)
 		goto done;
 
 	switch (fitting_mode) {
 	case DRM_MODE_SCALE_CENTER:
-		width = mode->hdisplay;
-		height = mode->vdisplay;
+		width = pipe_config->pipe_src_w;
+		height = pipe_config->pipe_src_h;
 		x = (adjusted_mode->hdisplay - width + 1)/2;
 		y = (adjusted_mode->vdisplay - height + 1)/2;
 		break;
@@ -74,17 +73,19 @@
 	case DRM_MODE_SCALE_ASPECT:
 		/* Scale but preserve the aspect ratio */
 		{
-			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
-			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+			u32 scaled_width = adjusted_mode->hdisplay
+				* pipe_config->pipe_src_h;
+			u32 scaled_height = pipe_config->pipe_src_w
+				* adjusted_mode->vdisplay;
 			if (scaled_width > scaled_height) { /* pillar */
-				width = scaled_height / mode->vdisplay;
+				width = scaled_height / pipe_config->pipe_src_h;
 				if (width & 1)
 					width++;
 				x = (adjusted_mode->hdisplay - width + 1) / 2;
 				y = 0;
 				height = adjusted_mode->vdisplay;
 			} else if (scaled_width < scaled_height) { /* letter */
-				height = scaled_width / mode->hdisplay;
+				height = scaled_width / pipe_config->pipe_src_w;
 				if (height & 1)
 				    height++;
 				y = (adjusted_mode->vdisplay - height + 1) / 2;
@@ -171,20 +172,96 @@
 	return (FACTOR * ratio + FACTOR/2) / FACTOR;
 }
 
+static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+			      u32 *pfit_control)
+{
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	u32 scaled_width = adjusted_mode->hdisplay *
+		pipe_config->pipe_src_h;
+	u32 scaled_height = pipe_config->pipe_src_w *
+		adjusted_mode->vdisplay;
+
+	/* 965+ is easy, it does everything in hw */
+	if (scaled_width > scaled_height)
+		*pfit_control |= PFIT_ENABLE |
+			PFIT_SCALING_PILLAR;
+	else if (scaled_width < scaled_height)
+		*pfit_control |= PFIT_ENABLE |
+			PFIT_SCALING_LETTER;
+	else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
+		*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+			      u32 *pfit_control, u32 *pfit_pgm_ratios,
+			      u32 *border)
+{
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	u32 scaled_width = adjusted_mode->hdisplay *
+		pipe_config->pipe_src_h;
+	u32 scaled_height = pipe_config->pipe_src_w *
+		adjusted_mode->vdisplay;
+	u32 bits;
+
+	/*
+	 * For earlier chips we have to calculate the scaling
+	 * ratio by hand and program it into the
+	 * PFIT_PGM_RATIO register
+	 */
+	if (scaled_width > scaled_height) { /* pillar */
+		centre_horizontally(adjusted_mode,
+				    scaled_height /
+				    pipe_config->pipe_src_h);
+
+		*border = LVDS_BORDER_ENABLE;
+		if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
+			bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+						    adjusted_mode->vdisplay);
+
+			*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+					     bits << PFIT_VERT_SCALE_SHIFT);
+			*pfit_control |= (PFIT_ENABLE |
+					  VERT_INTERP_BILINEAR |
+					  HORIZ_INTERP_BILINEAR);
+		}
+	} else if (scaled_width < scaled_height) { /* letter */
+		centre_vertically(adjusted_mode,
+				  scaled_width /
+				  pipe_config->pipe_src_w);
+
+		*border = LVDS_BORDER_ENABLE;
+		if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
+			bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+						    adjusted_mode->hdisplay);
+
+			*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+					     bits << PFIT_VERT_SCALE_SHIFT);
+			*pfit_control |= (PFIT_ENABLE |
+					  VERT_INTERP_BILINEAR |
+					  HORIZ_INTERP_BILINEAR);
+		}
+	} else {
+		/* Aspects match, Let hw scale both directions */
+		*pfit_control |= (PFIT_ENABLE |
+				  VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+				  VERT_INTERP_BILINEAR |
+				  HORIZ_INTERP_BILINEAR);
+	}
+}
+
 void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
 			      struct intel_crtc_config *pipe_config,
 			      int fitting_mode)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
-	struct drm_display_mode *mode, *adjusted_mode;
+	struct drm_display_mode *adjusted_mode;
 
-	mode = &pipe_config->requested_mode;
 	adjusted_mode = &pipe_config->adjusted_mode;
 
 	/* Native modes don't need fitting */
-	if (adjusted_mode->hdisplay == mode->hdisplay &&
-	    adjusted_mode->vdisplay == mode->vdisplay)
+	if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+	    adjusted_mode->vdisplay == pipe_config->pipe_src_h)
 		goto out;
 
 	switch (fitting_mode) {
@@ -193,81 +270,25 @@
 		 * For centered modes, we have to calculate border widths &
 		 * heights and modify the values programmed into the CRTC.
 		 */
-		centre_horizontally(adjusted_mode, mode->hdisplay);
-		centre_vertically(adjusted_mode, mode->vdisplay);
+		centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
+		centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
 		border = LVDS_BORDER_ENABLE;
 		break;
 	case DRM_MODE_SCALE_ASPECT:
 		/* Scale but preserve the aspect ratio */
-		if (INTEL_INFO(dev)->gen >= 4) {
-			u32 scaled_width = adjusted_mode->hdisplay *
-				mode->vdisplay;
-			u32 scaled_height = mode->hdisplay *
-				adjusted_mode->vdisplay;
-
-			/* 965+ is easy, it does everything in hw */
-			if (scaled_width > scaled_height)
-				pfit_control |= PFIT_ENABLE |
-					PFIT_SCALING_PILLAR;
-			else if (scaled_width < scaled_height)
-				pfit_control |= PFIT_ENABLE |
-					PFIT_SCALING_LETTER;
-			else if (adjusted_mode->hdisplay != mode->hdisplay)
-				pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
-		} else {
-			u32 scaled_width = adjusted_mode->hdisplay *
-				mode->vdisplay;
-			u32 scaled_height = mode->hdisplay *
-				adjusted_mode->vdisplay;
-			/*
-			 * For earlier chips we have to calculate the scaling
-			 * ratio by hand and program it into the
-			 * PFIT_PGM_RATIO register
-			 */
-			if (scaled_width > scaled_height) { /* pillar */
-				centre_horizontally(adjusted_mode,
-						    scaled_height /
-						    mode->vdisplay);
-
-				border = LVDS_BORDER_ENABLE;
-				if (mode->vdisplay != adjusted_mode->vdisplay) {
-					u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
-					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-							    bits << PFIT_VERT_SCALE_SHIFT);
-					pfit_control |= (PFIT_ENABLE |
-							 VERT_INTERP_BILINEAR |
-							 HORIZ_INTERP_BILINEAR);
-				}
-			} else if (scaled_width < scaled_height) { /* letter */
-				centre_vertically(adjusted_mode,
-						  scaled_width /
-						  mode->hdisplay);
-
-				border = LVDS_BORDER_ENABLE;
-				if (mode->hdisplay != adjusted_mode->hdisplay) {
-					u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
-					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-							    bits << PFIT_VERT_SCALE_SHIFT);
-					pfit_control |= (PFIT_ENABLE |
-							 VERT_INTERP_BILINEAR |
-							 HORIZ_INTERP_BILINEAR);
-				}
-			} else {
-				/* Aspects match, Let hw scale both directions */
-				pfit_control |= (PFIT_ENABLE |
-						 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
-						 VERT_INTERP_BILINEAR |
-						 HORIZ_INTERP_BILINEAR);
-			}
-		}
+		if (INTEL_INFO(dev)->gen >= 4)
+			i965_scale_aspect(pipe_config, &pfit_control);
+		else
+			i9xx_scale_aspect(pipe_config, &pfit_control,
+					  &pfit_pgm_ratios, &border);
 		break;
 	case DRM_MODE_SCALE_FULLSCREEN:
 		/*
 		 * Full scaling, even if it changes the aspect ratio.
 		 * Fortunately this is all done for us in hw.
 		 */
-		if (mode->vdisplay != adjusted_mode->vdisplay ||
-		    mode->hdisplay != adjusted_mode->hdisplay) {
+		if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
+		    pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
 			pfit_control |= PFIT_ENABLE;
 			if (INTEL_INFO(dev)->gen >= 4)
 				pfit_control |= PFIT_SCALING_AUTO;
@@ -308,7 +329,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (INTEL_INFO(dev)->gen >= 4)
+	if (IS_GEN4(dev))
 		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
 
 	if (IS_GEN2(dev))
@@ -320,7 +341,7 @@
 /* XXX: query mode clock or hardware clock and program max PWM appropriately
  * when it's 0.
  */
-static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
@@ -337,6 +358,21 @@
 			val = dev_priv->regfile.saveBLC_PWM_CTL2;
 			I915_WRITE(BLC_PWM_PCH_CTL2, val);
 		}
+	} else if (IS_VALLEYVIEW(dev)) {
+		val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+			dev_priv->regfile.saveBLC_PWM_CTL = val;
+			dev_priv->regfile.saveBLC_PWM_CTL2 =
+				I915_READ(VLV_BLC_PWM_CTL2(pipe));
+		} else if (val == 0) {
+			val = dev_priv->regfile.saveBLC_PWM_CTL;
+			I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
+			I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
+				   dev_priv->regfile.saveBLC_PWM_CTL2);
+		}
+
+		if (!val)
+			val = 0x0f42ffff;
 	} else {
 		val = I915_READ(BLC_PWM_CTL);
 		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
@@ -356,11 +392,12 @@
 	return val;
 }
 
-static u32 intel_panel_get_max_backlight(struct drm_device *dev)
+static u32 intel_panel_get_max_backlight(struct drm_device *dev,
+					 enum pipe pipe)
 {
 	u32 max;
 
-	max = i915_read_blc_pwm_ctl(dev);
+	max = i915_read_blc_pwm_ctl(dev, pipe);
 
 	if (HAS_PCH_SPLIT(dev)) {
 		max >>= 16;
@@ -386,7 +423,8 @@
 	"to dri-devel@lists.freedesktop.org, if your machine needs it. "
 	"It will then be included in an upcoming module version.");
 module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
-static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+static u32 intel_panel_compute_brightness(struct drm_device *dev,
+					  enum pipe pipe, u32 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -395,7 +433,7 @@
 
 	if (i915_panel_invert_brightness > 0 ||
 	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-		u32 max = intel_panel_get_max_backlight(dev);
+		u32 max = intel_panel_get_max_backlight(dev, pipe);
 		if (max)
 			return max - val;
 	}
@@ -403,18 +441,25 @@
 	return val;
 }
 
-static u32 intel_panel_get_backlight(struct drm_device *dev)
+static u32 intel_panel_get_backlight(struct drm_device *dev,
+				     enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
 	unsigned long flags;
+	int reg;
 
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	if (HAS_PCH_SPLIT(dev)) {
 		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 	} else {
-		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+		if (IS_VALLEYVIEW(dev))
+			reg = VLV_BLC_PWM_CTL(pipe);
+		else
+			reg = BLC_PWM_CTL;
+
+		val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
 		if (INTEL_INFO(dev)->gen < 4)
 			val >>= 1;
 
@@ -426,7 +471,7 @@
 		}
 	}
 
-	val = intel_panel_compute_brightness(dev, val);
+	val = intel_panel_compute_brightness(dev, pipe, val);
 
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 
@@ -441,19 +486,21 @@
 	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
 }
 
-static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+static void intel_panel_actually_set_backlight(struct drm_device *dev,
+					       enum pipe pipe, u32 level)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tmp;
+	int reg;
 
 	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
-	level = intel_panel_compute_brightness(dev, level);
+	level = intel_panel_compute_brightness(dev, pipe, level);
 
 	if (HAS_PCH_SPLIT(dev))
 		return intel_pch_panel_set_backlight(dev, level);
 
 	if (is_backlight_combination_mode(dev)) {
-		u32 max = intel_panel_get_max_backlight(dev);
+		u32 max = intel_panel_get_max_backlight(dev, pipe);
 		u8 lbpc;
 
 		/* we're screwed, but keep behaviour backwards compatible */
@@ -465,23 +512,34 @@
 		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
 	}
 
-	tmp = I915_READ(BLC_PWM_CTL);
+	if (IS_VALLEYVIEW(dev))
+		reg = VLV_BLC_PWM_CTL(pipe);
+	else
+		reg = BLC_PWM_CTL;
+
+	tmp = I915_READ(reg);
 	if (INTEL_INFO(dev)->gen < 4)
 		level <<= 1;
 	tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(BLC_PWM_CTL, tmp | level);
+	I915_WRITE(reg, tmp | level);
 }
 
 /* set backlight brightness to level in range [0..max] */
-void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
+void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
+			       u32 max)
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	u32 freq;
 	unsigned long flags;
 
+	if (pipe == INVALID_PIPE)
+		return;
+
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
-	freq = intel_panel_get_max_backlight(dev);
+	freq = intel_panel_get_max_backlight(dev, pipe);
 	if (!freq) {
 		/* we are screwed, bail out */
 		goto out;
@@ -498,16 +556,21 @@
 		dev_priv->backlight.device->props.brightness = level;
 
 	if (dev_priv->backlight.enabled)
-		intel_panel_actually_set_backlight(dev, level);
+		intel_panel_actually_set_backlight(dev, pipe, level);
 out:
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
-void intel_panel_disable_backlight(struct drm_device *dev)
+void intel_panel_disable_backlight(struct intel_connector *connector)
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	unsigned long flags;
 
+	if (pipe == INVALID_PIPE)
+		return;
+
 	/*
 	 * Do not disable backlight on the vgaswitcheroo path. When switching
 	 * away from i915, the other client may depend on i915 to handle the
@@ -522,12 +585,17 @@
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	dev_priv->backlight.enabled = false;
-	intel_panel_actually_set_backlight(dev, 0);
+	intel_panel_actually_set_backlight(dev, pipe, 0);
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		uint32_t reg, tmp;
 
-		reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+		if (HAS_PCH_SPLIT(dev))
+			reg = BLC_PWM_CPU_CTL2;
+		else if (IS_VALLEYVIEW(dev))
+			reg = VLV_BLC_PWM_CTL2(pipe);
+		else
+			reg = BLC_PWM_CTL2;
 
 		I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
 
@@ -541,18 +609,25 @@
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
-void intel_panel_enable_backlight(struct drm_device *dev,
-				  enum pipe pipe)
+void intel_panel_enable_backlight(struct intel_connector *connector)
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	enum transcoder cpu_transcoder =
 		intel_pipe_to_cpu_transcoder(dev_priv, pipe);
 	unsigned long flags;
 
+	if (pipe == INVALID_PIPE)
+		return;
+
+	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	if (dev_priv->backlight.level == 0) {
-		dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
+		dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
+									  pipe);
 		if (dev_priv->backlight.device)
 			dev_priv->backlight.device->props.brightness =
 				dev_priv->backlight.level;
@@ -561,8 +636,12 @@
 	if (INTEL_INFO(dev)->gen >= 4) {
 		uint32_t reg, tmp;
 
-		reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
-
+		if (HAS_PCH_SPLIT(dev))
+			reg = BLC_PWM_CPU_CTL2;
+		else if (IS_VALLEYVIEW(dev))
+			reg = VLV_BLC_PWM_CTL2(pipe);
+		else
+			reg = BLC_PWM_CTL2;
 
 		tmp = I915_READ(reg);
 
@@ -602,16 +681,41 @@
 	 * registers are set.
 	 */
 	dev_priv->backlight.enabled = true;
-	intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
+	intel_panel_actually_set_backlight(dev, pipe,
+					   dev_priv->backlight.level);
 
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
+/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
+static void intel_panel_init_backlight_regs(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_VALLEYVIEW(dev)) {
+		enum pipe pipe;
+
+		for_each_pipe(pipe) {
+			u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+			/* Skip if the modulation freq is already set */
+			if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+				continue;
+
+			cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+			I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+				   cur_val);
+		}
+	}
+}
+
 static void intel_panel_init_backlight(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	dev_priv->backlight.level = intel_panel_get_backlight(dev);
+	intel_panel_init_backlight_regs(dev);
+
+	dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
 	dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
 }
 
@@ -637,19 +741,34 @@
 	}
 }
 
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
 static int intel_panel_update_status(struct backlight_device *bd)
 {
-	struct drm_device *dev = bl_get_data(bd);
-	intel_panel_set_backlight(dev, bd->props.brightness,
+	struct intel_connector *connector = bl_get_data(bd);
+	struct drm_device *dev = connector->base.dev;
+
+	mutex_lock(&dev->mode_config.mutex);
+	DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+		      bd->props.brightness, bd->props.max_brightness);
+	intel_panel_set_backlight(connector, bd->props.brightness,
 				  bd->props.max_brightness);
+	mutex_unlock(&dev->mode_config.mutex);
 	return 0;
 }
 
 static int intel_panel_get_brightness(struct backlight_device *bd)
 {
-	struct drm_device *dev = bl_get_data(bd);
-	return intel_panel_get_backlight(dev);
+	struct intel_connector *connector = bl_get_data(bd);
+	struct drm_device *dev = connector->base.dev;
+	enum pipe pipe;
+
+	mutex_lock(&dev->mode_config.mutex);
+	pipe = intel_get_pipe_from_connector(connector);
+	mutex_unlock(&dev->mode_config.mutex);
+	if (pipe == INVALID_PIPE)
+		return 0;
+
+	return intel_panel_get_backlight(connector->base.dev, pipe);
 }
 
 static const struct backlight_ops intel_panel_bl_ops = {
@@ -674,7 +793,7 @@
 	props.brightness = dev_priv->backlight.level;
 
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-	props.max_brightness = intel_panel_get_max_backlight(dev);
+	props.max_brightness = intel_panel_get_max_backlight(dev, 0);
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 
 	if (props.max_brightness == 0) {
@@ -683,7 +802,8 @@
 	}
 	dev_priv->backlight.device =
 		backlight_device_register("intel_backlight",
-					  &connector->kdev, dev,
+					  connector->kdev,
+					  to_intel_connector(connector),
 					  &intel_panel_bl_ops, &props);
 
 	if (IS_ERR(dev_priv->backlight.device)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c2ea3..0a07d7c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,27 @@
 #include <linux/module.h>
 #include <drm/i915_powerwell.h>
 
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE			(1<<0)
+#define INTEL_RC6p_ENABLE			(1<<1)
+#define INTEL_RC6pp_ENABLE			(1<<2)
+
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
  * during in-memory transfers and, therefore, reduce the power packet.
@@ -43,14 +64,6 @@
  * i915.i915_enable_fbc parameter
  */
 
-static bool intel_crtc_active(struct drm_crtc *crtc)
-{
-	/* Be paranoid as we can arrive here with only partial
-	 * state retrieved from the hardware during setup.
-	 */
-	return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
-}
-
 static void i8xx_disable_fbc(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -241,18 +254,6 @@
 		dpfc_ctl &= ~DPFC_CTL_EN;
 		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
-		if (IS_IVYBRIDGE(dev))
-			/* WaFbcDisableDpfcClockGating:ivb */
-			I915_WRITE(ILK_DSPCLK_GATE_D,
-				   I915_READ(ILK_DSPCLK_GATE_D) &
-				   ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
-
-		if (IS_HASWELL(dev))
-			/* WaFbcDisableDpfcClockGating:hsw */
-			I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-				   I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
-				   ~HSW_DPFC_GATING_DISABLE);
-
 		DRM_DEBUG_KMS("disabled FBC\n");
 	}
 }
@@ -282,18 +283,10 @@
 	if (IS_IVYBRIDGE(dev)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
 		I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
-		/* WaFbcDisableDpfcClockGating:ivb */
-		I915_WRITE(ILK_DSPCLK_GATE_D,
-			   I915_READ(ILK_DSPCLK_GATE_D) |
-			   ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
 	} else {
 		/* WaFbcAsynchFlipDisableFbcQueue:hsw */
 		I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
 			   HSW_BYPASS_FBC_QUEUE);
-		/* WaFbcDisableDpfcClockGating:hsw */
-		I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-			   I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
-			   HSW_DPFC_GATING_DISABLE);
 	}
 
 	I915_WRITE(SNB_DPFC_CTL_SA,
@@ -378,7 +371,7 @@
 
 	intel_cancel_fbc_work(dev_priv);
 
-	work = kzalloc(sizeof *work, GFP_KERNEL);
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL) {
 		DRM_ERROR("Failed to allocate FBC work structure\n");
 		dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +451,8 @@
 	struct drm_framebuffer *fb;
 	struct intel_framebuffer *intel_fb;
 	struct drm_i915_gem_object *obj;
-	unsigned int max_hdisplay, max_vdisplay;
+	const struct drm_display_mode *adjusted_mode;
+	unsigned int max_width, max_height;
 
 	if (!I915_HAS_FBC(dev)) {
 		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -482,7 +476,7 @@
 	 */
 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
 		if (intel_crtc_active(tmp_crtc) &&
-		    !to_intel_crtc(tmp_crtc)->primary_disabled) {
+		    to_intel_crtc(tmp_crtc)->primary_enabled) {
 			if (crtc) {
 				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
 					DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
@@ -502,6 +496,7 @@
 	fb = crtc->fb;
 	intel_fb = to_intel_framebuffer(fb);
 	obj = intel_fb->obj;
+	adjusted_mode = &intel_crtc->config.adjusted_mode;
 
 	if (i915_enable_fbc < 0 &&
 	    INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +509,8 @@
 			DRM_DEBUG_KMS("fbc disabled per module param\n");
 		goto out_disable;
 	}
-	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
-	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
 		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
 			DRM_DEBUG_KMS("mode incompatible with compression, "
 				      "disabling\n");
@@ -523,14 +518,14 @@
 	}
 
 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-		max_hdisplay = 4096;
-		max_vdisplay = 2048;
+		max_width = 4096;
+		max_height = 2048;
 	} else {
-		max_hdisplay = 2048;
-		max_vdisplay = 1536;
+		max_width = 2048;
+		max_height = 1536;
 	}
-	if ((crtc->mode.hdisplay > max_hdisplay) ||
-	    (crtc->mode.vdisplay > max_vdisplay)) {
+	if (intel_crtc->config.pipe_src_w > max_width ||
+	    intel_crtc->config.pipe_src_h > max_height) {
 		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
 			DRM_DEBUG_KMS("mode too large for compression, disabling\n");
 		goto out_disable;
@@ -1087,8 +1082,9 @@
 	return enabled;
 }
 
-static void pineview_update_wm(struct drm_device *dev)
+static void pineview_update_wm(struct drm_crtc *unused_crtc)
 {
+	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 	const struct cxsr_latency *latency;
@@ -1105,8 +1101,12 @@
 
 	crtc = single_enabled_crtc(dev);
 	if (crtc) {
-		int clock = crtc->mode.clock;
+		const struct drm_display_mode *adjusted_mode;
 		int pixel_size = crtc->fb->bits_per_pixel / 8;
+		int clock;
+
+		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+		clock = adjusted_mode->crtc_clock;
 
 		/* Display SR */
 		wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1166,7 @@
 			    int *cursor_wm)
 {
 	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
 	int htotal, hdisplay, clock, pixel_size;
 	int line_time_us, line_count;
 	int entries, tlb_miss;
@@ -1177,9 +1178,10 @@
 		return false;
 	}
 
-	htotal = crtc->mode.htotal;
-	hdisplay = crtc->mode.hdisplay;
-	clock = crtc->mode.clock;
+	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+	clock = adjusted_mode->crtc_clock;
+	htotal = adjusted_mode->htotal;
+	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	/* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1252,7 @@
 			     int *display_wm, int *cursor_wm)
 {
 	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
 	int hdisplay, htotal, pixel_size, clock;
 	unsigned long line_time_us;
 	int line_count, line_size;
@@ -1262,9 +1265,10 @@
 	}
 
 	crtc = intel_get_crtc_for_plane(dev, plane);
-	hdisplay = crtc->mode.hdisplay;
-	htotal = crtc->mode.htotal;
-	clock = crtc->mode.clock;
+	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+	clock = adjusted_mode->crtc_clock;
+	htotal = adjusted_mode->htotal;
+	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1307,7 @@
 	if (!intel_crtc_active(crtc))
 		return false;
 
-	clock = crtc->mode.clock;	/* VESA DOT Clock */
+	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 	pixel_size = crtc->fb->bits_per_pixel / 8;	/* BPP */
 
 	entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1369,9 @@
 
 #define single_plane_enabled(mask) is_power_of_2(mask)
 
-static void valleyview_update_wm(struct drm_device *dev)
+static void valleyview_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	static const int sr_latency_ns = 12000;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1429,9 @@
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void g4x_update_wm(struct drm_device *dev)
+static void g4x_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	static const int sr_latency_ns = 12000;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1482,9 @@
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i965_update_wm(struct drm_device *dev)
+static void i965_update_wm(struct drm_crtc *unused_crtc)
 {
+	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 	int srwm = 1;
@@ -1488,9 +1495,11 @@
 	if (crtc) {
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 12000;
-		int clock = crtc->mode.clock;
-		int htotal = crtc->mode.htotal;
-		int hdisplay = crtc->mode.hdisplay;
+		const struct drm_display_mode *adjusted_mode =
+			&to_intel_crtc(crtc)->config.adjusted_mode;
+		int clock = adjusted_mode->crtc_clock;
+		int htotal = adjusted_mode->htotal;
+		int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 		int pixel_size = crtc->fb->bits_per_pixel / 8;
 		unsigned long line_time_us;
 		int entries;
@@ -1541,8 +1550,9 @@
 	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i9xx_update_wm(struct drm_device *dev)
+static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 {
+	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const struct intel_watermark_params *wm_info;
 	uint32_t fwater_lo;
@@ -1562,11 +1572,13 @@
 	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
 	crtc = intel_get_crtc_for_plane(dev, 0);
 	if (intel_crtc_active(crtc)) {
+		const struct drm_display_mode *adjusted_mode;
 		int cpp = crtc->fb->bits_per_pixel / 8;
 		if (IS_GEN2(dev))
 			cpp = 4;
 
-		planea_wm = intel_calculate_wm(crtc->mode.clock,
+		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
 					       wm_info, fifo_size, cpp,
 					       latency_ns);
 		enabled = crtc;
@@ -1576,11 +1588,13 @@
 	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
 	crtc = intel_get_crtc_for_plane(dev, 1);
 	if (intel_crtc_active(crtc)) {
+		const struct drm_display_mode *adjusted_mode;
 		int cpp = crtc->fb->bits_per_pixel / 8;
 		if (IS_GEN2(dev))
 			cpp = 4;
 
-		planeb_wm = intel_calculate_wm(crtc->mode.clock,
+		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
 					       wm_info, fifo_size, cpp,
 					       latency_ns);
 		if (enabled == NULL)
@@ -1607,9 +1621,11 @@
 	if (HAS_FW_BLC(dev) && enabled) {
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 6000;
-		int clock = enabled->mode.clock;
-		int htotal = enabled->mode.htotal;
-		int hdisplay = enabled->mode.hdisplay;
+		const struct drm_display_mode *adjusted_mode =
+			&to_intel_crtc(enabled)->config.adjusted_mode;
+		int clock = adjusted_mode->crtc_clock;
+		int htotal = adjusted_mode->htotal;
+		int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 		int pixel_size = enabled->fb->bits_per_pixel / 8;
 		unsigned long line_time_us;
 		int entries;
@@ -1658,10 +1674,12 @@
 	}
 }
 
-static void i830_update_wm(struct drm_device *dev)
+static void i830_update_wm(struct drm_crtc *unused_crtc)
 {
+	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
 	uint32_t fwater_lo;
 	int planea_wm;
 
@@ -1669,7 +1687,9 @@
 	if (crtc == NULL)
 		return;
 
-	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+				       &i830_wm_info,
 				       dev_priv->display.get_fifo_size(dev, 0),
 				       4, latency_ns);
 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1761,7 @@
 				  int *fbc_wm, int *display_wm, int *cursor_wm)
 {
 	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
 	unsigned long line_time_us;
 	int hdisplay, htotal, pixel_size, clock;
 	int line_count, line_size;
@@ -1753,9 +1774,10 @@
 	}
 
 	crtc = intel_get_crtc_for_plane(dev, plane);
-	hdisplay = crtc->mode.hdisplay;
-	htotal = crtc->mode.htotal;
-	clock = crtc->mode.clock;
+	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+	clock = adjusted_mode->crtc_clock;
+	htotal = adjusted_mode->htotal;
+	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1807,9 @@
 				   display, cursor);
 }
 
-static void ironlake_update_wm(struct drm_device *dev)
+static void ironlake_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int fbc_wm, plane_wm, cursor_wm;
 	unsigned int enabled;
@@ -1868,8 +1891,9 @@
 	 */
 }
 
-static void sandybridge_update_wm(struct drm_device *dev)
+static void sandybridge_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int latency = dev_priv->wm.pri_latency[0] * 100;	/* In unit 0.1us */
 	u32 val;
@@ -1970,8 +1994,9 @@
 		   cursor_wm);
 }
 
-static void ivybridge_update_wm(struct drm_device *dev)
+static void ivybridge_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int latency = dev_priv->wm.pri_latency[0] * 100;	/* In unit 0.1us */
 	u32 val;
@@ -2098,7 +2123,7 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pixel_rate;
 
-	pixel_rate = intel_crtc->config.adjusted_mode.clock;
+	pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
 
 	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
 	 * adjust the pixel_rate here. */
@@ -2107,8 +2132,8 @@
 		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
 		uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
 
-		pipe_w = intel_crtc->config.requested_mode.hdisplay;
-		pipe_h = intel_crtc->config.requested_mode.vdisplay;
+		pipe_w = intel_crtc->config.pipe_src_w;
+		pipe_h = intel_crtc->config.pipe_src_h;
 		pfit_w = (pfit_size >> 16) & 0xFFFF;
 		pfit_h = pfit_size & 0xFFFF;
 		if (pipe_w < pfit_w)
@@ -2176,27 +2201,18 @@
 	uint16_t fbc;
 };
 
-struct hsw_wm_values {
-	uint32_t wm_pipe[3];
-	uint32_t wm_lp[3];
-	uint32_t wm_lp_spr[3];
-	uint32_t wm_linetime[3];
-	bool enable_fbc_wm;
-};
-
 /* used in computing the new watermarks state */
 struct intel_wm_config {
 	unsigned int num_pipes_active;
 	bool sprites_enabled;
 	bool sprites_scaled;
-	bool fbc_wm_enabled;
 };
 
 /*
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
 				   uint32_t mem_value,
 				   bool is_lp)
 {
@@ -2225,7 +2241,7 @@
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
 				   uint32_t mem_value)
 {
 	uint32_t method1, method2;
@@ -2248,7 +2264,7 @@
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
 				   uint32_t mem_value)
 {
 	if (!params->active || !params->cur.enabled)
@@ -2262,7 +2278,7 @@
 }
 
 /* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
 				   uint32_t pri_val)
 {
 	if (!params->active || !params->pri.enabled)
@@ -2275,7 +2291,9 @@
 
 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
 {
-	if (INTEL_INFO(dev)->gen >= 7)
+	if (INTEL_INFO(dev)->gen >= 8)
+		return 3072;
+	else if (INTEL_INFO(dev)->gen >= 7)
 		return 768;
 	else
 		return 512;
@@ -2320,7 +2338,9 @@
 	}
 
 	/* clamp to max that the registers can hold */
-	if (INTEL_INFO(dev)->gen >= 7)
+	if (INTEL_INFO(dev)->gen >= 8)
+		max = level == 0 ? 255 : 2047;
+	else if (INTEL_INFO(dev)->gen >= 7)
 		/* IVB/HSW primary/sprite plane watermarks */
 		max = level == 0 ? 127 : 1023;
 	else if (!is_sprite)
@@ -2350,27 +2370,30 @@
 }
 
 /* Calculate the maximum FBC watermark */
-static unsigned int ilk_fbc_wm_max(void)
+static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
 {
 	/* max that registers can hold */
-	return 15;
+	if (INTEL_INFO(dev)->gen >= 8)
+		return 31;
+	else
+		return 15;
 }
 
-static void ilk_wm_max(struct drm_device *dev,
-		       int level,
-		       const struct intel_wm_config *config,
-		       enum intel_ddb_partitioning ddb_partitioning,
-		       struct hsw_wm_maximums *max)
+static void ilk_compute_wm_maximums(struct drm_device *dev,
+				    int level,
+				    const struct intel_wm_config *config,
+				    enum intel_ddb_partitioning ddb_partitioning,
+				    struct hsw_wm_maximums *max)
 {
 	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
 	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
 	max->cur = ilk_cursor_wm_max(dev, level, config);
-	max->fbc = ilk_fbc_wm_max();
+	max->fbc = ilk_fbc_wm_max(dev);
 }
 
-static bool ilk_check_wm(int level,
-			 const struct hsw_wm_maximums *max,
-			 struct intel_wm_level *result)
+static bool ilk_validate_wm_level(int level,
+				  const struct hsw_wm_maximums *max,
+				  struct intel_wm_level *result)
 {
 	bool ret;
 
@@ -2406,14 +2429,12 @@
 		result->enable = true;
 	}
 
-	DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
-
 	return ret;
 }
 
 static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
 				 int level,
-				 struct hsw_pipe_wm_parameters *p,
+				 const struct hsw_pipe_wm_parameters *p,
 				 struct intel_wm_level *result)
 {
 	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2434,55 +2455,6 @@
 	result->enable = true;
 }
 
-static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
-			      int level, struct hsw_wm_maximums *max,
-			      struct hsw_pipe_wm_parameters *params,
-			      struct intel_wm_level *result)
-{
-	enum pipe pipe;
-	struct intel_wm_level res[3];
-
-	for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
-		ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
-
-	result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
-	result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
-	result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
-	result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
-	result->enable = true;
-
-	return ilk_check_wm(level, max, result);
-}
-
-static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
-				    enum pipe pipe,
-				    struct hsw_pipe_wm_parameters *params)
-{
-	uint32_t pri_val, cur_val, spr_val;
-	/* WM0 latency values stored in 0.1us units */
-	uint16_t pri_latency = dev_priv->wm.pri_latency[0];
-	uint16_t spr_latency = dev_priv->wm.spr_latency[0];
-	uint16_t cur_latency = dev_priv->wm.cur_latency[0];
-
-	pri_val = ilk_compute_pri_wm(params, pri_latency, false);
-	spr_val = ilk_compute_spr_wm(params, spr_latency);
-	cur_val = ilk_compute_cur_wm(params, cur_latency);
-
-	WARN(pri_val > 127,
-	     "Primary WM error, mode not supported for pipe %c\n",
-	     pipe_name(pipe));
-	WARN(spr_val > 127,
-	     "Sprite WM error, mode not supported for pipe %c\n",
-	     pipe_name(pipe));
-	WARN(cur_val > 63,
-	     "Cursor WM error, mode not supported for pipe %c\n",
-	     pipe_name(pipe));
-
-	return (pri_val << WM0_PIPE_PLANE_SHIFT) |
-	       (spr_val << WM0_PIPE_SPRITE_SHIFT) |
-	       cur_val;
-}
-
 static uint32_t
 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
 {
@@ -2554,19 +2526,22 @@
 		wm[3] *= 2;
 }
 
+static int ilk_wm_max_level(const struct drm_device *dev)
+{
+	/* how many WM levels are we expecting */
+	if (IS_HASWELL(dev))
+		return 4;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		return 3;
+	else
+		return 2;
+}
+
 static void intel_print_wm_latency(struct drm_device *dev,
 				   const char *name,
 				   const uint16_t wm[5])
 {
-	int level, max_level;
-
-	/* how many WM levels are we expecting */
-	if (IS_HASWELL(dev))
-		max_level = 4;
-	else if (INTEL_INFO(dev)->gen >= 6)
-		max_level = 3;
-	else
-		max_level = 2;
+	int level, max_level = ilk_wm_max_level(dev);
 
 	for (level = 0; level <= max_level; level++) {
 		unsigned int latency = wm[level];
@@ -2606,218 +2581,321 @@
 	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
 }
 
-static void hsw_compute_wm_parameters(struct drm_device *dev,
-				      struct hsw_pipe_wm_parameters *params,
-				      struct hsw_wm_maximums *lp_max_1_2,
-				      struct hsw_wm_maximums *lp_max_5_6)
+static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
+				      struct hsw_pipe_wm_parameters *p,
+				      struct intel_wm_config *config)
 {
-	struct drm_crtc *crtc;
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
 	struct drm_plane *plane;
-	enum pipe pipe;
-	struct intel_wm_config config = {};
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-		struct hsw_pipe_wm_parameters *p;
-
-		pipe = intel_crtc->pipe;
-		p = &params[pipe];
-
-		p->active = intel_crtc_active(crtc);
-		if (!p->active)
-			continue;
-
-		config.num_pipes_active++;
-
+	p->active = intel_crtc_active(crtc);
+	if (p->active) {
 		p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
 		p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
 		p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
 		p->cur.bytes_per_pixel = 4;
-		p->pri.horiz_pixels =
-			intel_crtc->config.requested_mode.hdisplay;
+		p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
 		p->cur.horiz_pixels = 64;
 		/* TODO: for now, assume primary and cursor planes are always enabled. */
 		p->pri.enabled = true;
 		p->cur.enabled = true;
 	}
 
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		config->num_pipes_active += intel_crtc_active(crtc);
+
 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
 		struct intel_plane *intel_plane = to_intel_plane(plane);
-		struct hsw_pipe_wm_parameters *p;
 
-		pipe = intel_plane->pipe;
-		p = &params[pipe];
+		if (intel_plane->pipe == pipe)
+			p->spr = intel_plane->wm;
 
-		p->spr = intel_plane->wm;
+		config->sprites_enabled |= intel_plane->wm.enabled;
+		config->sprites_scaled |= intel_plane->wm.scaled;
+	}
+}
 
-		config.sprites_enabled |= p->spr.enabled;
-		config.sprites_scaled |= p->spr.scaled;
+/* Compute new watermarks for the pipe */
+static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
+				  const struct hsw_pipe_wm_parameters *params,
+				  struct intel_pipe_wm *pipe_wm)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int level, max_level = ilk_wm_max_level(dev);
+	/* LP0 watermark maximums depend on this pipe alone */
+	struct intel_wm_config config = {
+		.num_pipes_active = 1,
+		.sprites_enabled = params->spr.enabled,
+		.sprites_scaled = params->spr.scaled,
+	};
+	struct hsw_wm_maximums max;
+
+	/* LP0 watermarks always use 1/2 DDB partitioning */
+	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+	for (level = 0; level <= max_level; level++)
+		ilk_compute_wm_level(dev_priv, level, params,
+				     &pipe_wm->wm[level]);
+
+	pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+
+	/* At least LP0 must be valid */
+	return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
+}
+
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_device *dev,
+			       int level,
+			       struct intel_wm_level *ret_wm)
+{
+	const struct intel_crtc *intel_crtc;
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+		const struct intel_wm_level *wm =
+			&intel_crtc->wm.active.wm[level];
+
+		if (!wm->enable)
+			return;
+
+		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
 	}
 
-	ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
+	ret_wm->enable = true;
+}
 
-	/* 5/6 split only in single pipe config on IVB+ */
-	if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
-		ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
-	else
-		*lp_max_5_6 = *lp_max_1_2;
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_device *dev,
+			 const struct hsw_wm_maximums *max,
+			 struct intel_pipe_wm *merged)
+{
+	int level, max_level = ilk_wm_max_level(dev);
+
+	merged->fbc_wm_enabled = true;
+
+	/* merge each WM1+ level */
+	for (level = 1; level <= max_level; level++) {
+		struct intel_wm_level *wm = &merged->wm[level];
+
+		ilk_merge_wm_level(dev, level, wm);
+
+		if (!ilk_validate_wm_level(level, max, wm))
+			break;
+
+		/*
+		 * The spec says it is preferred to disable
+		 * FBC WMs instead of disabling a WM level.
+		 */
+		if (wm->fbc_val > max->fbc) {
+			merged->fbc_wm_enabled = false;
+			wm->fbc_val = 0;
+		}
+	}
+}
+
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
 }
 
 static void hsw_compute_wm_results(struct drm_device *dev,
-				   struct hsw_pipe_wm_parameters *params,
-				   struct hsw_wm_maximums *lp_maximums,
+				   const struct intel_pipe_wm *merged,
+				   enum intel_ddb_partitioning partitioning,
 				   struct hsw_wm_values *results)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
-	struct intel_wm_level lp_results[4] = {};
-	enum pipe pipe;
-	int level, max_level, wm_lp;
+	struct intel_crtc *intel_crtc;
+	int level, wm_lp;
 
-	for (level = 1; level <= 4; level++)
-		if (!hsw_compute_lp_wm(dev_priv, level,
-				       lp_maximums, params,
-				       &lp_results[level - 1]))
-			break;
-	max_level = level - 1;
+	results->enable_fbc_wm = merged->fbc_wm_enabled;
+	results->partitioning = partitioning;
 
-	memset(results, 0, sizeof(*results));
-
-	/* The spec says it is preferred to disable FBC WMs instead of disabling
-	 * a WM level. */
-	results->enable_fbc_wm = true;
-	for (level = 1; level <= max_level; level++) {
-		if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
-			results->enable_fbc_wm = false;
-			lp_results[level - 1].fbc_val = 0;
-		}
-	}
-
+	/* LP1+ register values */
 	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
 		const struct intel_wm_level *r;
 
-		level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
-		if (level > max_level)
+		level = ilk_wm_lp_to_level(wm_lp, merged);
+
+		r = &merged->wm[level];
+		if (!r->enable)
 			break;
 
-		r = &lp_results[level - 1];
-		results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
-							  r->fbc_val,
-							  r->pri_val,
-							  r->cur_val);
+		results->wm_lp[wm_lp - 1] = WM3_LP_EN |
+			((level * 2) << WM1_LP_LATENCY_SHIFT) |
+			(r->pri_val << WM1_LP_SR_SHIFT) |
+			r->cur_val;
+
+		if (INTEL_INFO(dev)->gen >= 8)
+			results->wm_lp[wm_lp - 1] |=
+				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
+		else
+			results->wm_lp[wm_lp - 1] |=
+				r->fbc_val << WM1_LP_FBC_SHIFT;
+
 		results->wm_lp_spr[wm_lp - 1] = r->spr_val;
 	}
 
-	for_each_pipe(pipe)
-		results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
-							     &params[pipe]);
+	/* LP0 register values */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+		enum pipe pipe = intel_crtc->pipe;
+		const struct intel_wm_level *r =
+			&intel_crtc->wm.active.wm[0];
 
-	for_each_pipe(pipe) {
-		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-		results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
+		if (WARN_ON(!r->enable))
+			continue;
+
+		results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
+
+		results->wm_pipe[pipe] =
+			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
+			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
+			r->cur_val;
 	}
 }
 
 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  * case both are at the same level. Prefer r1 in case they're the same. */
-static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
-						  struct hsw_wm_values *r2)
+static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+						  struct intel_pipe_wm *r1,
+						  struct intel_pipe_wm *r2)
 {
-	int i, val_r1 = 0, val_r2 = 0;
+	int level, max_level = ilk_wm_max_level(dev);
+	int level1 = 0, level2 = 0;
 
-	for (i = 0; i < 3; i++) {
-		if (r1->wm_lp[i] & WM3_LP_EN)
-			val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
-		if (r2->wm_lp[i] & WM3_LP_EN)
-			val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
+	for (level = 1; level <= max_level; level++) {
+		if (r1->wm[level].enable)
+			level1 = level;
+		if (r2->wm[level].enable)
+			level2 = level;
 	}
 
-	if (val_r1 == val_r2) {
-		if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
+	if (level1 == level2) {
+		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
 			return r2;
 		else
 			return r1;
-	} else if (val_r1 > val_r2) {
+	} else if (level1 > level2) {
 		return r1;
 	} else {
 		return r2;
 	}
 }
 
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
+					 const struct hsw_wm_values *old,
+					 const struct hsw_wm_values *new)
+{
+	unsigned int dirty = 0;
+	enum pipe pipe;
+	int wm_lp;
+
+	for_each_pipe(pipe) {
+		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
+			dirty |= WM_DIRTY_LINETIME(pipe);
+			/* Must disable LP1+ watermarks too */
+			dirty |= WM_DIRTY_LP_ALL;
+		}
+
+		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+			dirty |= WM_DIRTY_PIPE(pipe);
+			/* Must disable LP1+ watermarks too */
+			dirty |= WM_DIRTY_LP_ALL;
+		}
+	}
+
+	if (old->enable_fbc_wm != new->enable_fbc_wm) {
+		dirty |= WM_DIRTY_FBC;
+		/* Must disable LP1+ watermarks too */
+		dirty |= WM_DIRTY_LP_ALL;
+	}
+
+	if (old->partitioning != new->partitioning) {
+		dirty |= WM_DIRTY_DDB;
+		/* Must disable LP1+ watermarks too */
+		dirty |= WM_DIRTY_LP_ALL;
+	}
+
+	/* LP1+ watermarks already deemed dirty, no need to continue */
+	if (dirty & WM_DIRTY_LP_ALL)
+		return dirty;
+
+	/* Find the lowest numbered LP1+ watermark in need of an update... */
+	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+			break;
+	}
+
+	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+	for (; wm_lp <= 3; wm_lp++)
+		dirty |= WM_DIRTY_LP(wm_lp);
+
+	return dirty;
+}
+
 /*
  * The spec says we shouldn't write when we don't need, because every write
  * causes WMs to be re-evaluated, expending some power.
  */
 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
-				struct hsw_wm_values *results,
-				enum intel_ddb_partitioning partitioning)
+				struct hsw_wm_values *results)
 {
-	struct hsw_wm_values previous;
+	struct hsw_wm_values *previous = &dev_priv->wm.hw;
+	unsigned int dirty;
 	uint32_t val;
-	enum intel_ddb_partitioning prev_partitioning;
-	bool prev_enable_fbc_wm;
 
-	previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
-	previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
-	previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
-	previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
-	previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
-	previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
-	previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
-	previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
-	previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
-	previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
-	previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
-	previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
-
-	prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-				INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
-	prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-
-	if (memcmp(results->wm_pipe, previous.wm_pipe,
-		   sizeof(results->wm_pipe)) == 0 &&
-	    memcmp(results->wm_lp, previous.wm_lp,
-		   sizeof(results->wm_lp)) == 0 &&
-	    memcmp(results->wm_lp_spr, previous.wm_lp_spr,
-		   sizeof(results->wm_lp_spr)) == 0 &&
-	    memcmp(results->wm_linetime, previous.wm_linetime,
-		   sizeof(results->wm_linetime)) == 0 &&
-	    partitioning == prev_partitioning &&
-	    results->enable_fbc_wm == prev_enable_fbc_wm)
+	dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+	if (!dirty)
 		return;
 
-	if (previous.wm_lp[2] != 0)
+	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
 		I915_WRITE(WM3_LP_ILK, 0);
-	if (previous.wm_lp[1] != 0)
+	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
 		I915_WRITE(WM2_LP_ILK, 0);
-	if (previous.wm_lp[0] != 0)
+	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
 		I915_WRITE(WM1_LP_ILK, 0);
 
-	if (previous.wm_pipe[0] != results->wm_pipe[0])
+	if (dirty & WM_DIRTY_PIPE(PIPE_A))
 		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
-	if (previous.wm_pipe[1] != results->wm_pipe[1])
+	if (dirty & WM_DIRTY_PIPE(PIPE_B))
 		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
-	if (previous.wm_pipe[2] != results->wm_pipe[2])
+	if (dirty & WM_DIRTY_PIPE(PIPE_C))
 		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
 
-	if (previous.wm_linetime[0] != results->wm_linetime[0])
+	if (dirty & WM_DIRTY_LINETIME(PIPE_A))
 		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
-	if (previous.wm_linetime[1] != results->wm_linetime[1])
+	if (dirty & WM_DIRTY_LINETIME(PIPE_B))
 		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
-	if (previous.wm_linetime[2] != results->wm_linetime[2])
+	if (dirty & WM_DIRTY_LINETIME(PIPE_C))
 		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
-	if (prev_partitioning != partitioning) {
+	if (dirty & WM_DIRTY_DDB) {
 		val = I915_READ(WM_MISC);
-		if (partitioning == INTEL_DDB_PART_1_2)
+		if (results->partitioning == INTEL_DDB_PART_1_2)
 			val &= ~WM_MISC_DATA_PARTITION_5_6;
 		else
 			val |= WM_MISC_DATA_PARTITION_5_6;
 		I915_WRITE(WM_MISC, val);
 	}
 
-	if (prev_enable_fbc_wm != results->enable_fbc_wm) {
+	if (dirty & WM_DIRTY_FBC) {
 		val = I915_READ(DISP_ARB_CTL);
 		if (results->enable_fbc_wm)
 			val &= ~DISP_FBC_WM_DIS;
@@ -2826,45 +2904,65 @@
 		I915_WRITE(DISP_ARB_CTL, val);
 	}
 
-	if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
+	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
 		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
-	if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
+	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
 		I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
-	if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
+	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
 		I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
 
-	if (results->wm_lp[0] != 0)
+	if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
 		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
-	if (results->wm_lp[1] != 0)
+	if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
 		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
-	if (results->wm_lp[2] != 0)
+	if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
 		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+
+	dev_priv->wm.hw = *results;
 }
 
-static void haswell_update_wm(struct drm_device *dev)
+static void haswell_update_wm(struct drm_crtc *crtc)
 {
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
-	struct hsw_pipe_wm_parameters params[3];
-	struct hsw_wm_values results_1_2, results_5_6, *best_results;
+	struct hsw_wm_maximums max;
+	struct hsw_pipe_wm_parameters params = {};
+	struct hsw_wm_values results = {};
 	enum intel_ddb_partitioning partitioning;
+	struct intel_pipe_wm pipe_wm = {};
+	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+	struct intel_wm_config config = {};
 
-	hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
+	hsw_compute_wm_parameters(crtc, &params, &config);
 
-	hsw_compute_wm_results(dev, params,
-			       &lp_max_1_2, &results_1_2);
-	if (lp_max_1_2.pri != lp_max_5_6.pri) {
-		hsw_compute_wm_results(dev, params,
-				       &lp_max_5_6, &results_5_6);
-		best_results = hsw_find_best_result(&results_1_2, &results_5_6);
+	intel_compute_pipe_wm(crtc, &params, &pipe_wm);
+
+	if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+		return;
+
+	intel_crtc->wm.active = pipe_wm;
+
+	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+	ilk_wm_merge(dev, &max, &lp_wm_1_2);
+
+	/* 5/6 split only in single pipe config on IVB+ */
+	if (INTEL_INFO(dev)->gen >= 7 &&
+	    config.num_pipes_active == 1 && config.sprites_enabled) {
+		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+		ilk_wm_merge(dev, &max, &lp_wm_5_6);
+
+		best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
 	} else {
-		best_results = &results_1_2;
+		best_lp_wm = &lp_wm_1_2;
 	}
 
-	partitioning = (best_results == &results_1_2) ?
+	partitioning = (best_lp_wm == &lp_wm_1_2) ?
 		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
-	hsw_write_wm_values(dev_priv, best_results, partitioning);
+	hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+
+	hsw_write_wm_values(dev_priv, &results);
 }
 
 static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -2879,7 +2977,7 @@
 	intel_plane->wm.horiz_pixels = sprite_width;
 	intel_plane->wm.bytes_per_pixel = pixel_size;
 
-	haswell_update_wm(plane->dev);
+	haswell_update_wm(crtc);
 }
 
 static bool
@@ -2898,7 +2996,7 @@
 		return false;
 	}
 
-	clock = crtc->mode.clock;
+	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 
 	/* Use the small buffer method to calculate the sprite watermark */
 	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +3031,7 @@
 	}
 
 	crtc = intel_get_crtc_for_plane(dev, plane);
-	clock = crtc->mode.clock;
+	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 	if (!clock) {
 		*sprite_wm = 0;
 		return false;
@@ -3044,6 +3142,74 @@
 	I915_WRITE(WM3S_LP_IVB, sprite_wm);
 }
 
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct hsw_wm_values *hw = &dev_priv->wm.hw;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_pipe_wm *active = &intel_crtc->wm.active;
+	enum pipe pipe = intel_crtc->pipe;
+	static const unsigned int wm0_pipe_reg[] = {
+		[PIPE_A] = WM0_PIPEA_ILK,
+		[PIPE_B] = WM0_PIPEB_ILK,
+		[PIPE_C] = WM0_PIPEC_IVB,
+	};
+
+	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+	hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+	if (intel_crtc_active(crtc)) {
+		u32 tmp = hw->wm_pipe[pipe];
+
+		/*
+		 * For active pipes LP0 watermark is marked as
+		 * enabled, and LP1+ watermaks as disabled since
+		 * we can't really reverse compute them in case
+		 * multiple pipes are active.
+		 */
+		active->wm[0].enable = true;
+		active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+		active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+		active->linetime = hw->wm_linetime[pipe];
+	} else {
+		int level, max_level = ilk_wm_max_level(dev);
+
+		/*
+		 * For inactive pipes, all watermark levels
+		 * should be marked as enabled but zeroed,
+		 * which is what we'd compute them to.
+		 */
+		for (level = 0; level <= max_level; level++)
+			active->wm[level].enable = true;
+	}
+}
+
+void ilk_wm_get_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct hsw_wm_values *hw = &dev_priv->wm.hw;
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		ilk_pipe_wm_get_hw_state(crtc);
+
+	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+
+	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+	hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+	hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+
+	hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+		INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+	hw->enable_fbc_wm =
+		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -3076,12 +3242,12 @@
  * We don't use the sprite, so we can ignore that.  And on Crestline we have
  * to set the non-SR watermarks to 8.
  */
-void intel_update_watermarks(struct drm_device *dev)
+void intel_update_watermarks(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 
 	if (dev_priv->display.update_wm)
-		dev_priv->display.update_wm(dev);
+		dev_priv->display.update_wm(crtc);
 }
 
 void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3453,98 @@
 	return limits;
 }
 
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+	int new_power;
+
+	new_power = dev_priv->rps.power;
+	switch (dev_priv->rps.power) {
+	case LOW_POWER:
+		if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+			new_power = BETWEEN;
+		break;
+
+	case BETWEEN:
+		if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+			new_power = LOW_POWER;
+		else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+			new_power = HIGH_POWER;
+		break;
+
+	case HIGH_POWER:
+		if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+			new_power = BETWEEN;
+		break;
+	}
+	/* Max/min bins are special */
+	if (val == dev_priv->rps.min_delay)
+		new_power = LOW_POWER;
+	if (val == dev_priv->rps.max_delay)
+		new_power = HIGH_POWER;
+	if (new_power == dev_priv->rps.power)
+		return;
+
+	/* Note the units here are not exactly 1us, but 1280ns. */
+	switch (new_power) {
+	case LOW_POWER:
+		/* Upclock if more than 95% busy over 16ms */
+		I915_WRITE(GEN6_RP_UP_EI, 12500);
+		I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+
+		/* Downclock if less than 85% busy over 32ms */
+		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
+
+		I915_WRITE(GEN6_RP_CONTROL,
+			   GEN6_RP_MEDIA_TURBO |
+			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			   GEN6_RP_MEDIA_IS_GFX |
+			   GEN6_RP_ENABLE |
+			   GEN6_RP_UP_BUSY_AVG |
+			   GEN6_RP_DOWN_IDLE_AVG);
+		break;
+
+	case BETWEEN:
+		/* Upclock if more than 90% busy over 13ms */
+		I915_WRITE(GEN6_RP_UP_EI, 10250);
+		I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+
+		/* Downclock if less than 75% busy over 32ms */
+		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
+
+		I915_WRITE(GEN6_RP_CONTROL,
+			   GEN6_RP_MEDIA_TURBO |
+			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			   GEN6_RP_MEDIA_IS_GFX |
+			   GEN6_RP_ENABLE |
+			   GEN6_RP_UP_BUSY_AVG |
+			   GEN6_RP_DOWN_IDLE_AVG);
+		break;
+
+	case HIGH_POWER:
+		/* Upclock if more than 85% busy over 10ms */
+		I915_WRITE(GEN6_RP_UP_EI, 8000);
+		I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+
+		/* Downclock if less than 60% busy over 32ms */
+		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
+
+		I915_WRITE(GEN6_RP_CONTROL,
+			   GEN6_RP_MEDIA_TURBO |
+			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			   GEN6_RP_MEDIA_IS_GFX |
+			   GEN6_RP_ENABLE |
+			   GEN6_RP_UP_BUSY_AVG |
+			   GEN6_RP_DOWN_IDLE_AVG);
+		break;
+	}
+
+	dev_priv->rps.power = new_power;
+	dev_priv->rps.last_adj = 0;
+}
+
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3557,8 @@
 	if (val == dev_priv->rps.cur_delay)
 		return;
 
+	gen6_set_rps_thresholds(dev_priv, val);
+
 	if (IS_HASWELL(dev))
 		I915_WRITE(GEN6_RPNSWREQ,
 			   HSW_FREQUENCY(val));
@@ -3320,6 +3580,32 @@
 	trace_intel_gpu_freq_change(val * 50);
 }
 
+void gen6_rps_idle(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (dev_priv->rps.enabled) {
+		if (dev_priv->info->is_valleyview)
+			valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+		else
+			gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+		dev_priv->rps.last_adj = 0;
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void gen6_rps_boost(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (dev_priv->rps.enabled) {
+		if (dev_priv->info->is_valleyview)
+			valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+		else
+			gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+		dev_priv->rps.last_adj = 0;
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 /*
  * Wait until the previous freq change has completed,
  * or the timeout elapsed, and then update our notion
@@ -3415,6 +3701,20 @@
 	}
 }
 
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+	if (IS_GEN6(dev))
+		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+
+	if (IS_HASWELL(dev))
+		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+
+	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+			(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+			(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+			(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+}
+
 int intel_enable_rc6(const struct drm_device *dev)
 {
 	/* No RC6 before Ironlake */
@@ -3429,18 +3729,13 @@
 	if (INTEL_INFO(dev)->gen == 5)
 		return 0;
 
-	if (IS_HASWELL(dev)) {
-		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+	if (IS_HASWELL(dev))
 		return INTEL_RC6_ENABLE;
-	}
 
 	/* snb/ivb have more than one rc6 state. */
-	if (INTEL_INFO(dev)->gen == 6) {
-		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+	if (INTEL_INFO(dev)->gen == 6)
 		return INTEL_RC6_ENABLE;
-	}
 
-	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
@@ -3467,6 +3762,78 @@
 	I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
 }
 
+static void gen8_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	uint32_t rc6_mask = 0, rp_state_cap;
+	int unused;
+
+	/* 1a: Software RC state - RC0 */
+	I915_WRITE(GEN6_RC_STATE, 0);
+
+	/* 1c & 1d: Get forcewake during program sequence. Although the driver
+	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+	gen6_gt_force_wake_get(dev_priv);
+
+	/* 2a: Disable RC states. */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+
+	/* 2b: Program RC6 thresholds.*/
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+	for_each_ring(ring, dev_priv, unused)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+	/* 3: Enable RC6 */
+	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+	DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+	I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+			GEN6_RC_CTL_EI_MODE(1) |
+			rc6_mask);
+
+	/* 4 Program defaults and thresholds for RPS*/
+	I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
+	I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
+	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
+
+	/* Docs recommend 900MHz, and 300 MHz respectively */
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+		   dev_priv->rps.max_delay << 24 |
+		   dev_priv->rps.min_delay << 16);
+
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
+	I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	/* 5: Enable RPS */
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_TURBO |
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_AVG |
+		   GEN6_RP_DOWN_IDLE_AVG);
+
+	/* 6: Ring frequency + overclocking (our driver does this later */
+
+	gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
+
+	gen6_enable_rps_interrupts(dev);
+
+	gen6_gt_force_wake_put(dev_priv);
+}
+
 static void gen6_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3501,7 +3868,10 @@
 
 	/* In units of 50MHz */
 	dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
-	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+	dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
+	dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
+	dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
+	dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
 	dev_priv->rps.cur_delay = 0;
 
 	/* disable the counters and set deterministic thresholds */
@@ -3539,48 +3909,16 @@
 			rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
 	}
 
-	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-			(rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-			(rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-			(rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+	intel_print_rc6_info(dev, rc6_mask);
 
 	I915_WRITE(GEN6_RC_CONTROL,
 		   rc6_mask |
 		   GEN6_RC_CTL_EI_MODE(1) |
 		   GEN6_RC_CTL_HW_ENABLE);
 
-	if (IS_HASWELL(dev)) {
-		I915_WRITE(GEN6_RPNSWREQ,
-			   HSW_FREQUENCY(10));
-		I915_WRITE(GEN6_RC_VIDEO_FREQ,
-			   HSW_FREQUENCY(12));
-	} else {
-		I915_WRITE(GEN6_RPNSWREQ,
-			   GEN6_FREQUENCY(10) |
-			   GEN6_OFFSET(0) |
-			   GEN6_AGGRESSIVE_TURBO);
-		I915_WRITE(GEN6_RC_VIDEO_FREQ,
-			   GEN6_FREQUENCY(12));
-	}
-
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
-	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   dev_priv->rps.max_delay << 24 |
-		   dev_priv->rps.min_delay << 16);
-
-	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-	I915_WRITE(GEN6_RP_UP_EI, 66000);
-	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
+	/* Power down if completely idle for over 50ms */
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-	I915_WRITE(GEN6_RP_CONTROL,
-		   GEN6_RP_MEDIA_TURBO |
-		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-		   GEN6_RP_MEDIA_IS_GFX |
-		   GEN6_RP_ENABLE |
-		   GEN6_RP_UP_BUSY_AVG |
-		   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
 	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
 	if (!ret) {
@@ -3596,7 +3934,8 @@
 		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
 	}
 
-	gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+	dev_priv->rps.power = HIGH_POWER; /* force a reset */
+	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
 
 	gen6_enable_rps_interrupts(dev);
 
@@ -3624,23 +3963,28 @@
 	unsigned int gpu_freq;
 	unsigned int max_ia_freq, min_ring_freq;
 	int scaling_factor = 180;
+	struct cpufreq_policy *policy;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-	max_ia_freq = cpufreq_quick_get_max(0);
-	/*
-	 * Default to measured freq if none found, PCU will ensure we don't go
-	 * over
-	 */
-	if (!max_ia_freq)
+	policy = cpufreq_cpu_get(0);
+	if (policy) {
+		max_ia_freq = policy->cpuinfo.max_freq;
+		cpufreq_cpu_put(policy);
+	} else {
+		/*
+		 * Default to measured freq if none found, PCU will ensure we
+		 * don't go over
+		 */
 		max_ia_freq = tsc_khz;
+	}
 
 	/* Convert from kHz to MHz */
 	max_ia_freq /= 1000;
 
-	min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
-	/* convert DDR frequency from units of 133.3MHz to bandwidth */
-	min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
+	min_ring_freq = I915_READ(DCLK) & 0xf;
+	/* convert DDR frequency from units of 266.6MHz to bandwidth */
+	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
 	/*
 	 * For each potential GPU frequency, load a ring frequency we'd like
@@ -3652,8 +3996,11 @@
 		int diff = dev_priv->rps.max_delay - gpu_freq;
 		unsigned int ia_freq = 0, ring_freq = 0;
 
-		if (IS_HASWELL(dev)) {
-			ring_freq = (gpu_freq * 5 + 3) / 4;
+		if (INTEL_INFO(dev)->gen >= 8) {
+			/* max(2 * GT, DDR). NB: GT is 50MHz units */
+			ring_freq = max(min_ring_freq, gpu_freq);
+		} else if (IS_HASWELL(dev)) {
+			ring_freq = mult_frac(gpu_freq, 5, 4);
 			ring_freq = max(min_ring_freq, ring_freq);
 			/* leave ia_freq as the default, chosen by cpufreq */
 		} else {
@@ -3709,24 +4056,6 @@
 	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
 }
 
-static void vlv_rps_timer_work(struct work_struct *work)
-{
-	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    rps.vlv_work.work);
-
-	/*
-	 * Timer fired, we must be idle.  Drop to min voltage state.
-	 * Note: we use RPe here since it should match the
-	 * Vmin we were shooting for.  That should give us better
-	 * perf when we come back out of RC6 than if we used the
-	 * min freq available.
-	 */
-	mutex_lock(&dev_priv->rps.hw_lock);
-	if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
-		valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
-	mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
 static void valleyview_setup_pctx(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +4102,14 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
-	u32 gtfifodbg, val;
+	u32 gtfifodbg, val, rc6_mode = 0;
 	int i;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
 	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
-		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+				 gtfifodbg);
 		I915_WRITE(GTFIFODBG, gtfifodbg);
 	}
 
@@ -3812,9 +4142,16 @@
 	I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
 
 	/* allows RC6 residency counter to work */
-	I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
-	I915_WRITE(GEN6_RC_CONTROL,
-		   GEN7_RC_CTL_TO_MODE);
+	I915_WRITE(VLV_COUNTER_CONTROL,
+		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+				      VLV_MEDIA_RC6_COUNT_EN |
+				      VLV_RENDER_RC6_COUNT_EN));
+	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+		rc6_mode = GEN7_RC_CTL_TO_MODE;
+
+	intel_print_rc6_info(dev, rc6_mode);
+
+	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 	switch ((val >> 6) & 3) {
@@ -3985,6 +4322,8 @@
 
 	I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+	intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
 }
 
 static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4603,13 +4942,12 @@
 	} else if (INTEL_INFO(dev)->gen >= 6) {
 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
 		cancel_work_sync(&dev_priv->rps.work);
-		if (IS_VALLEYVIEW(dev))
-			cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
 		mutex_lock(&dev_priv->rps.hw_lock);
 		if (IS_VALLEYVIEW(dev))
 			valleyview_disable_rps(dev);
 		else
 			gen6_disable_rps(dev);
+		dev_priv->rps.enabled = false;
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	}
 }
@@ -4625,10 +4963,14 @@
 
 	if (IS_VALLEYVIEW(dev)) {
 		valleyview_enable_rps(dev);
+	} else if (IS_BROADWELL(dev)) {
+		gen8_enable_rps(dev);
+		gen6_update_ring_freq(dev);
 	} else {
 		gen6_enable_rps(dev);
 		gen6_update_ring_freq(dev);
 	}
+	dev_priv->rps.enabled = true;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
@@ -4672,7 +5014,7 @@
 		I915_WRITE(DSPCNTR(pipe),
 			   I915_READ(DSPCNTR(pipe)) |
 			   DISPPLANE_TRICKLE_FEED_DISABLE);
-		intel_flush_display_plane(dev_priv, pipe);
+		intel_flush_primary_plane(dev_priv, pipe);
 	}
 }
 
@@ -4932,6 +5274,50 @@
 	}
 }
 
+static void gen8_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe i;
+
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	/* FIXME(BDW): Check all the w/a, some might only apply to
+	 * pre-production hw. */
+
+	WARN(!i915_preliminary_hw_support,
+	     "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
+	I915_WRITE(HALF_SLICE_CHICKEN3,
+		   _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
+	I915_WRITE(HALF_SLICE_CHICKEN3,
+		   _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+	I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
+
+	I915_WRITE(_3D_CHICKEN3,
+		   _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+
+	I915_WRITE(COMMON_SLICE_CHICKEN2,
+		   _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
+
+	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
+
+	/* WaSwitchSolVfFArbitrationPriority */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+	/* WaPsrDPAMaskVBlankInSRD */
+	I915_WRITE(CHICKEN_PAR1_1,
+		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+
+	/* WaPsrDPRSUnmaskVBlankInSRD */
+	for_each_pipe(i) {
+		I915_WRITE(CHICKEN_PIPESL_1(i),
+			   I915_READ(CHICKEN_PIPESL_1(i) |
+				     DPRS_MASK_VBLANK_SRD));
+	}
+}
+
 static void haswell_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5255,6 +5641,25 @@
 		lpt_suspend_hw(dev);
 }
 
+static bool is_always_on_power_domain(struct drm_device *dev,
+				      enum intel_display_power_domain domain)
+{
+	unsigned long always_on_domains;
+
+	BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+
+	if (IS_BROADWELL(dev)) {
+		always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
+	} else if (IS_HASWELL(dev)) {
+		always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
+	} else {
+		WARN_ON(1);
+		return true;
+	}
+
+	return BIT(domain) & always_on_domains;
+}
+
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -5268,23 +5673,11 @@
 	if (!HAS_POWER_WELL(dev))
 		return true;
 
-	switch (domain) {
-	case POWER_DOMAIN_PIPE_A:
-	case POWER_DOMAIN_TRANSCODER_EDP:
+	if (is_always_on_power_domain(dev, domain))
 		return true;
-	case POWER_DOMAIN_PIPE_B:
-	case POWER_DOMAIN_PIPE_C:
-	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-	case POWER_DOMAIN_TRANSCODER_A:
-	case POWER_DOMAIN_TRANSCODER_B:
-	case POWER_DOMAIN_TRANSCODER_C:
-		return I915_READ(HSW_PWR_WELL_DRIVER) ==
+
+	return I915_READ(HSW_PWR_WELL_DRIVER) ==
 		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-	default:
-		BUG();
-	}
 }
 
 static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5328,83 +5721,136 @@
 			spin_lock_irqsave(&dev->vbl_lock, irqflags);
 			for_each_pipe(p)
 				if (p != PIPE_A)
-					dev->last_vblank[p] = 0;
+					dev->vblank[p].last = 0;
 			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 		}
 	}
 }
 
-static struct i915_power_well *hsw_pwr;
+static void __intel_power_well_get(struct drm_device *dev,
+				   struct i915_power_well *power_well)
+{
+	if (!power_well->count++)
+		__intel_set_power_well(dev, true);
+}
+
+static void __intel_power_well_put(struct drm_device *dev,
+				   struct i915_power_well *power_well)
+{
+	WARN_ON(!power_well->count);
+	if (!--power_well->count && i915_disable_power_well)
+		__intel_set_power_well(dev, false);
+}
+
+void intel_display_power_get(struct drm_device *dev,
+			     enum intel_display_power_domain domain)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+
+	if (!HAS_POWER_WELL(dev))
+		return;
+
+	if (is_always_on_power_domain(dev, domain))
+		return;
+
+	power_domains = &dev_priv->power_domains;
+
+	mutex_lock(&power_domains->lock);
+	__intel_power_well_get(dev, &power_domains->power_wells[0]);
+	mutex_unlock(&power_domains->lock);
+}
+
+void intel_display_power_put(struct drm_device *dev,
+			     enum intel_display_power_domain domain)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+
+	if (!HAS_POWER_WELL(dev))
+		return;
+
+	if (is_always_on_power_domain(dev, domain))
+		return;
+
+	power_domains = &dev_priv->power_domains;
+
+	mutex_lock(&power_domains->lock);
+	__intel_power_well_put(dev, &power_domains->power_wells[0]);
+	mutex_unlock(&power_domains->lock);
+}
+
+static struct i915_power_domains *hsw_pwr;
 
 /* Display audio driver power well request */
 void i915_request_power_well(void)
 {
+	struct drm_i915_private *dev_priv;
+
 	if (WARN_ON(!hsw_pwr))
 		return;
 
-	spin_lock_irq(&hsw_pwr->lock);
-	if (!hsw_pwr->count++ &&
-			!hsw_pwr->i915_request)
-		__intel_set_power_well(hsw_pwr->device, true);
-	spin_unlock_irq(&hsw_pwr->lock);
+	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+				power_domains);
+
+	mutex_lock(&hsw_pwr->lock);
+	__intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
+	mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
 /* Display audio driver power well release */
 void i915_release_power_well(void)
 {
+	struct drm_i915_private *dev_priv;
+
 	if (WARN_ON(!hsw_pwr))
 		return;
 
-	spin_lock_irq(&hsw_pwr->lock);
-	WARN_ON(!hsw_pwr->count);
-	if (!--hsw_pwr->count &&
-		       !hsw_pwr->i915_request)
-		__intel_set_power_well(hsw_pwr->device, false);
-	spin_unlock_irq(&hsw_pwr->lock);
+	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+				power_domains);
+
+	mutex_lock(&hsw_pwr->lock);
+	__intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
+	mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
-int i915_init_power_well(struct drm_device *dev)
+int intel_power_domains_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *power_well;
 
-	hsw_pwr = &dev_priv->power_well;
+	mutex_init(&power_domains->lock);
+	hsw_pwr = power_domains;
 
-	hsw_pwr->device = dev;
-	spin_lock_init(&hsw_pwr->lock);
-	hsw_pwr->count = 0;
+	power_well = &power_domains->power_wells[0];
+	power_well->count = 0;
 
 	return 0;
 }
 
-void i915_remove_power_well(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_device *dev)
 {
 	hsw_pwr = NULL;
 }
 
-void intel_set_power_well(struct drm_device *dev, bool enable)
+static void intel_power_domains_resume(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_power_well *power_well = &dev_priv->power_well;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *power_well;
 
 	if (!HAS_POWER_WELL(dev))
 		return;
 
-	if (!i915_disable_power_well && !enable)
-		return;
+	mutex_lock(&power_domains->lock);
 
-	spin_lock_irq(&power_well->lock);
-	power_well->i915_request = enable;
+	power_well = &power_domains->power_wells[0];
+	__intel_set_power_well(dev, power_well->count > 0);
 
-	/* only reject "disable" power well request */
-	if (power_well->count && !enable) {
-		spin_unlock_irq(&power_well->lock);
-		return;
-	}
-
-	__intel_set_power_well(dev, enable);
-	spin_unlock_irq(&power_well->lock);
+	mutex_unlock(&power_domains->lock);
 }
 
 /*
@@ -5413,7 +5859,7 @@
  * to be enabled, and it will only be disabled if none of the registers is
  * requesting it to be enabled.
  */
-void intel_init_power_well(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -5421,7 +5867,8 @@
 		return;
 
 	/* For now, we need the power well to be always enabled. */
-	intel_set_power_well(dev, true);
+	intel_display_set_init_power(dev, true);
+	intel_power_domains_resume(dev);
 
 	/* We're taking over the BIOS, so clear any requests made by it since
 	 * the driver is in charge now. */
@@ -5525,6 +5972,8 @@
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+		} else if (INTEL_INFO(dev)->gen == 8) {
+			dev_priv->display.init_clock_gating = gen8_init_clock_gating;
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_VALLEYVIEW(dev)) {
@@ -5686,7 +6135,4 @@
 
 	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
 			  intel_gen6_powersave_work);
-
-	INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
 }
-
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 460ee10..b620337 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,6 +41,16 @@
 	return space;
 }
 
+void __intel_ring_advance(struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	ring->tail &= ring->size - 1;
+	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+		return;
+	ring->write_tail(ring, ring->tail);
+}
+
 static int
 gen2_render_ring_flush(struct intel_ring_buffer *ring,
 		       u32	invalidate_domains,
@@ -350,6 +360,47 @@
 	return 0;
 }
 
+static int
+gen8_render_ring_flush(struct intel_ring_buffer *ring,
+		       u32 invalidate_domains, u32 flush_domains)
+{
+	u32 flags = 0;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
+	int ret;
+
+	flags |= PIPE_CONTROL_CS_STALL;
+
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+	}
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+
+}
+
 static void ring_write_tail(struct intel_ring_buffer *ring,
 			    u32 value)
 {
@@ -385,8 +436,7 @@
 	int ret = 0;
 	u32 head;
 
-	if (HAS_FORCE_WAKE(dev))
-		gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv);
 
 	if (I915_NEED_GFX_HWS(dev))
 		intel_ring_setup_status_page(ring);
@@ -459,8 +509,7 @@
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
 out:
-	if (HAS_FORCE_WAKE(dev))
-		gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv);
 
 	return ret;
 }
@@ -559,8 +608,8 @@
 	if (INTEL_INFO(dev)->gen >= 6)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	if (HAS_L3_GPU_CACHE(dev))
-		I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	if (HAS_L3_DPF(dev))
+		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 
 	return ret;
 }
@@ -593,7 +642,7 @@
 #define MBOX_UPDATE_DWORDS 4
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 	intel_ring_emit(ring, mmio_offset);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, MI_NOOP);
 }
 
@@ -629,9 +678,9 @@
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 
 	return 0;
 }
@@ -723,7 +772,7 @@
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
 	scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +791,9 @@
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 
 	return 0;
 }
@@ -963,9 +1012,9 @@
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 
 	return 0;
 }
@@ -987,10 +1036,10 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
-		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+		if (HAS_L3_DPF(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring,
 				       ~(ring->irq_enable_mask |
-					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+					 GT_PARITY_ERROR(dev)));
 		else
 			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1058,8 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
-		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring,
-				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+		if (HAS_L3_DPF(dev) && ring->id == RCS)
+			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 		else
 			I915_WRITE_IMR(ring, ~0);
 		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1059,6 +1107,52 @@
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
+static bool
+gen8_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		if (HAS_L3_DPF(dev) && ring->id == RCS) {
+			I915_WRITE_IMR(ring,
+				       ~(ring->irq_enable_mask |
+					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+		} else {
+			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+		}
+		POSTING_READ(RING_IMR(ring->mmio_base));
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+gen8_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		if (HAS_L3_DPF(dev) && ring->id == RCS) {
+			I915_WRITE_IMR(ring,
+				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+		} else {
+			I915_WRITE_IMR(ring, ~0);
+		}
+		POSTING_READ(RING_IMR(ring->mmio_base));
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
 static int
 i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
 			 u32 offset, u32 length,
@@ -1317,7 +1411,7 @@
 	/* Disable the ring buffer. The ring must be idle at this point */
 	dev_priv = ring->dev->dev_private;
 	ret = intel_ring_idle(ring);
-	if (ret)
+	if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
 			  ring->name, ret);
 
@@ -1328,6 +1422,8 @@
 	i915_gem_object_unpin(ring->obj);
 	drm_gem_object_unreference(&ring->obj->base);
 	ring->obj = NULL;
+	ring->preallocated_lazy_request = NULL;
+	ring->outstanding_lazy_seqno = 0;
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -1414,6 +1510,9 @@
 	if (ret != -ENOSPC)
 		return ret;
 
+	/* force the tail write in case we have been skipping them */
+	__intel_ring_advance(ring);
+
 	trace_i915_ring_wait_begin(ring);
 	/* With GEM the hangcheck timer should kick us out of the loop,
 	 * leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1574,7 @@
 	int ret;
 
 	/* We need to add any requests required to flush the objects and ring */
-	if (ring->outstanding_lazy_request) {
+	if (ring->outstanding_lazy_seqno) {
 		ret = i915_add_request(ring, NULL);
 		if (ret)
 			return ret;
@@ -1495,10 +1594,20 @@
 static int
 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
 {
-	if (ring->outstanding_lazy_request)
+	if (ring->outstanding_lazy_seqno)
 		return 0;
 
-	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+	if (ring->preallocated_lazy_request == NULL) {
+		struct drm_i915_gem_request *request;
+
+		request = kmalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
+
+		ring->preallocated_lazy_request = request;
+	}
+
+	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
 static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1654,7 @@
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
-	BUG_ON(ring->outstanding_lazy_request);
+	BUG_ON(ring->outstanding_lazy_seqno);
 
 	if (INTEL_INFO(ring->dev)->gen >= 6) {
 		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1667,6 @@
 	ring->hangcheck.seqno = seqno;
 }
 
-void intel_ring_advance(struct intel_ring_buffer *ring)
-{
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-	ring->tail &= ring->size - 1;
-	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
-		return;
-	ring->write_tail(ring, ring->tail);
-}
-
-
 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
 				     u32 value)
 {
@@ -1613,6 +1711,8 @@
 		return ret;
 
 	cmd = MI_FLUSH_DW;
+	if (INTEL_INFO(ring->dev)->gen >= 8)
+		cmd += 1;
 	/*
 	 * Bspec vol 1c.5 - video engine command streamer:
 	 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1624,9 +1724,38 @@
 			MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	if (INTEL_INFO(ring->dev)->gen >= 8) {
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
+	} else  {
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
+	}
+	intel_ring_advance(ring);
+	return 0;
+}
+
+static int
+gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			      u32 offset, u32 len,
+			      unsigned flags)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
+		!(flags & I915_DISPATCH_SECURE);
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	/* FIXME(BDW): Address space and security selectors. */
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+	intel_ring_emit(ring, offset);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
+
 	return 0;
 }
 
@@ -1686,6 +1815,8 @@
 		return ret;
 
 	cmd = MI_FLUSH_DW;
+	if (INTEL_INFO(ring->dev)->gen >= 8)
+		cmd += 1;
 	/*
 	 * Bspec vol 1c.3 - blitter engine command streamer:
 	 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1697,8 +1828,13 @@
 			MI_FLUSH_DW_OP_STOREDW;
 	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
+	if (INTEL_INFO(ring->dev)->gen >= 8) {
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
+	} else  {
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
+	}
 	intel_ring_advance(ring);
 
 	if (IS_GEN7(dev) && flush)
@@ -1721,8 +1857,14 @@
 		ring->flush = gen7_render_ring_flush;
 		if (INTEL_INFO(dev)->gen == 6)
 			ring->flush = gen6_render_ring_flush;
-		ring->irq_get = gen6_ring_get_irq;
-		ring->irq_put = gen6_ring_put_irq;
+		if (INTEL_INFO(dev)->gen >= 8) {
+			ring->flush = gen8_render_ring_flush;
+			ring->irq_get = gen8_ring_get_irq;
+			ring->irq_put = gen8_ring_put_irq;
+		} else {
+			ring->irq_get = gen6_ring_get_irq;
+			ring->irq_put = gen6_ring_put_irq;
+		}
 		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
@@ -1764,6 +1906,8 @@
 	ring->write_tail = ring_write_tail;
 	if (IS_HASWELL(dev))
 		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+	else if (IS_GEN8(dev))
+		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 	else if (INTEL_INFO(dev)->gen >= 6)
 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 	else if (INTEL_INFO(dev)->gen >= 4)
@@ -1877,7 +2021,7 @@
 	ring->id = VCS;
 
 	ring->write_tail = ring_write_tail;
-	if (IS_GEN6(dev) || IS_GEN7(dev)) {
+	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->mmio_base = GEN6_BSD_RING_BASE;
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev))
@@ -1886,10 +2030,20 @@
 		ring->add_request = gen6_add_request;
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
-		ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-		ring->irq_get = gen6_ring_get_irq;
-		ring->irq_put = gen6_ring_put_irq;
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		if (INTEL_INFO(dev)->gen >= 8) {
+			ring->irq_enable_mask =
+				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+			ring->irq_get = gen8_ring_get_irq;
+			ring->irq_put = gen8_ring_put_irq;
+			ring->dispatch_execbuffer =
+				gen8_ring_dispatch_execbuffer;
+		} else {
+			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+			ring->irq_get = gen6_ring_get_irq;
+			ring->irq_put = gen6_ring_put_irq;
+			ring->dispatch_execbuffer =
+				gen6_ring_dispatch_execbuffer;
+		}
 		ring->sync_to = gen6_ring_sync;
 		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
 		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
@@ -1935,10 +2089,18 @@
 	ring->add_request = gen6_add_request;
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
-	ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-	ring->irq_get = gen6_ring_get_irq;
-	ring->irq_put = gen6_ring_put_irq;
-	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	if (INTEL_INFO(dev)->gen >= 8) {
+		ring->irq_enable_mask =
+			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+		ring->irq_get = gen8_ring_get_irq;
+		ring->irq_put = gen8_ring_put_irq;
+		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+	} else {
+		ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+		ring->irq_get = gen6_ring_get_irq;
+		ring->irq_put = gen6_ring_put_irq;
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	}
 	ring->sync_to = gen6_ring_sync;
 	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
 	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
@@ -1967,10 +2129,19 @@
 	ring->add_request = gen6_add_request;
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
-	ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-	ring->irq_get = hsw_vebox_get_irq;
-	ring->irq_put = hsw_vebox_put_irq;
-	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		ring->irq_enable_mask =
+			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+		ring->irq_get = gen8_ring_get_irq;
+		ring->irq_put = gen8_ring_put_irq;
+		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+	} else {
+		ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+		ring->irq_get = hsw_vebox_get_irq;
+		ring->irq_put = hsw_vebox_put_irq;
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	}
 	ring->sync_to = gen6_ring_sync;
 	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
 	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68b1ca974..71a73f4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@
 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
 enum intel_ring_hangcheck_action {
+	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
 	HANGCHECK_ACTIVE,
 	HANGCHECK_KICK,
@@ -140,7 +141,8 @@
 	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
-	u32 outstanding_lazy_request;
+	struct drm_i915_gem_request *preallocated_lazy_request;
+	u32 outstanding_lazy_seqno;
 	bool gpu_caches_dirty;
 	bool fbc_dirty;
 
@@ -237,7 +239,12 @@
 	iowrite32(data, ring->virtual_start + ring->tail);
 	ring->tail += 4;
 }
-void intel_ring_advance(struct intel_ring_buffer *ring);
+static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+{
+	ring->tail &= ring->size - 1;
+}
+void __intel_ring_advance(struct intel_ring_buffer *ring);
+
 int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
@@ -258,8 +265,8 @@
 
 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
 {
-	BUG_ON(ring->outstanding_lazy_request == 0);
-	return ring->outstanding_lazy_request;
+	BUG_ON(ring->outstanding_lazy_seqno == 0);
+	return ring->outstanding_lazy_seqno;
 }
 
 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 49482fd..a583e8f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -539,7 +539,7 @@
 		goto log_fail;
 
 	while ((status == SDVO_CMD_STATUS_PENDING ||
-			status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
+		status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
 		if (retry < 10)
 			msleep(15);
 		else
@@ -1068,7 +1068,7 @@
 
 static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
 {
-	unsigned dotclock = pipe_config->adjusted_mode.clock;
+	unsigned dotclock = pipe_config->port_clock;
 	struct dpll *clock = &pipe_config->dpll;
 
 	/* SDVO TV has fixed PLL values depend on its clock range,
@@ -1133,7 +1133,6 @@
 	 */
 	pipe_config->pixel_multiplier =
 		intel_sdvo_get_pixel_multiplier(adjusted_mode);
-	adjusted_mode->clock *= pipe_config->pixel_multiplier;
 
 	if (intel_sdvo->color_range_auto) {
 		/* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -1217,11 +1216,7 @@
 	    !intel_sdvo_set_tv_format(intel_sdvo))
 		return;
 
-	/* We have tried to get input timing in mode_fixup, and filled into
-	 * adjusted_mode.
-	 */
 	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
-	input_dtd.part1.clock /= crtc->config.pixel_multiplier;
 
 	if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
 		input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
@@ -1330,6 +1325,7 @@
 	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
 	struct intel_sdvo_dtd dtd;
 	int encoder_pixel_multiplier = 0;
+	int dotclock;
 	u32 flags = 0, sdvox;
 	u8 val;
 	bool ret;
@@ -1368,6 +1364,13 @@
 			 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
 	}
 
+	dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+	if (HAS_PCH_SPLIT(dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
+
 	/* Cross check the port pixel multiplier with the sdvo encoder state. */
 	if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
 				 &val, 1)) {
@@ -1770,6 +1773,9 @@
 {
 	struct edid *edid;
 
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, drm_get_connector_name(connector));
+
 	/* set the bus switch and get the modes */
 	edid = intel_sdvo_get_edid(connector);
 
@@ -1865,6 +1871,9 @@
 	uint32_t reply = 0, format_map = 0;
 	int i;
 
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, drm_get_connector_name(connector));
+
 	/* Read the list of supported input resolutions for the selected TV
 	 * format.
 	 */
@@ -1899,6 +1908,9 @@
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	struct drm_display_mode *newmode;
 
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, drm_get_connector_name(connector));
+
 	/*
 	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
 	 * SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1930,7 +1942,6 @@
 			break;
 		}
 	}
-
 }
 
 static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1998,7 +2009,6 @@
 				     intel_sdvo_connector->tv_format);
 
 	intel_sdvo_destroy_enhance_property(connector);
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(intel_sdvo_connector);
 }
@@ -2394,7 +2404,9 @@
 	struct intel_connector *intel_connector;
 	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	DRM_DEBUG_KMS("initialising DVI device %d\n", device);
+
+	intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
 	if (!intel_sdvo_connector)
 		return false;
 
@@ -2442,7 +2454,9 @@
 	struct intel_connector *intel_connector;
 	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	DRM_DEBUG_KMS("initialising TV type %d\n", type);
+
+	intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
 	if (!intel_sdvo_connector)
 		return false;
 
@@ -2467,6 +2481,7 @@
 	return true;
 
 err:
+	drm_sysfs_connector_remove(connector);
 	intel_sdvo_destroy(connector);
 	return false;
 }
@@ -2479,7 +2494,9 @@
 	struct intel_connector *intel_connector;
 	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	DRM_DEBUG_KMS("initialising analog device %d\n", device);
+
+	intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
 	if (!intel_sdvo_connector)
 		return false;
 
@@ -2510,7 +2527,9 @@
 	struct intel_connector *intel_connector;
 	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+
+	intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
 	if (!intel_sdvo_connector)
 		return false;
 
@@ -2534,6 +2553,7 @@
 	return true;
 
 err:
+	drm_sysfs_connector_remove(connector);
 	intel_sdvo_destroy(connector);
 	return false;
 }
@@ -2605,8 +2625,10 @@
 
 	list_for_each_entry_safe(connector, tmp,
 				 &dev->mode_config.connector_list, head) {
-		if (intel_attached_encoder(connector) == &intel_sdvo->base)
+		if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+			drm_sysfs_connector_remove(connector);
 			intel_sdvo_destroy(connector);
+		}
 	}
 }
 
@@ -2876,7 +2898,7 @@
 	struct intel_encoder *intel_encoder;
 	struct intel_sdvo *intel_sdvo;
 	int i;
-	intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+	intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
 	if (!intel_sdvo)
 		return false;
 
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9a0e6c5..9944d81 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -25,7 +25,10 @@
 #include "i915_drv.h"
 #include "intel_drv.h"
 
-/* IOSF sideband */
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
 static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
 			   u32 port, u32 opcode, u32 addr, u32 *val)
 {
@@ -101,19 +104,83 @@
 	return val;
 }
 
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
 {
 	u32 val = 0;
-
-	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
-			DPIO_OPCODE_REG_READ, reg, &val);
-
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+			PUNIT_OPCODE_REG_READ, reg, &val);
 	return val;
 }
 
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
 {
-	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+			PUNIT_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+			PUNIT_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+			PUNIT_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+static u32 vlv_get_phy_port(enum pipe pipe)
+{
+	u32 port = IOSF_PORT_DPIO;
+
+	WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
+
+	return port;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+{
+	u32 val = 0;
+
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+			DPIO_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
 			DPIO_OPCODE_REG_WRITE, reg, &val);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ad6ec4b..b9fabf8 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,14 +260,14 @@
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SPRITE_TILED;
 
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
 	else
 		sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
 
 	sprctl |= SPRITE_ENABLE;
 
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
 	intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
@@ -288,7 +288,7 @@
 		dev_priv->sprite_scaling_enabled |= 1 << pipe;
 
 		if (!scaling_was_enabled) {
-			intel_update_watermarks(dev);
+			intel_update_watermarks(crtc);
 			intel_wait_for_vblank(dev, pipe);
 		}
 		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -306,7 +306,7 @@
 
 	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
 	 * register */
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
 	else if (obj->tiling_mode != I915_TILING_NONE)
 		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
@@ -323,7 +323,7 @@
 
 	/* potentially re-enable LP watermarks */
 	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-		intel_update_watermarks(dev);
+		intel_update_watermarks(crtc);
 }
 
 static void
@@ -349,7 +349,7 @@
 
 	/* potentially re-enable LP watermarks */
 	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-		intel_update_watermarks(dev);
+		intel_update_watermarks(crtc);
 }
 
 static int
@@ -521,13 +521,28 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int reg = DSPCNTR(intel_crtc->plane);
 
-	if (!intel_crtc->primary_disabled)
+	if (intel_crtc->primary_enabled)
 		return;
 
-	intel_crtc->primary_disabled = false;
-	intel_update_fbc(dev);
+	intel_crtc->primary_enabled = true;
 
 	I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+	intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+	/*
+	 * FIXME IPS should be fine as long as one plane is
+	 * enabled, but in practice it seems to have problems
+	 * when going from primary only to sprite only and vice
+	 * versa.
+	 */
+	if (intel_crtc->config.ips_enabled) {
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
+		hsw_enable_ips(intel_crtc);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -538,13 +553,26 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int reg = DSPCNTR(intel_crtc->plane);
 
-	if (intel_crtc->primary_disabled)
+	if (!intel_crtc->primary_enabled)
 		return;
 
-	I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+	intel_crtc->primary_enabled = false;
 
-	intel_crtc->primary_disabled = true;
-	intel_update_fbc(dev);
+	mutex_lock(&dev->struct_mutex);
+	if (dev_priv->fbc.plane == intel_crtc->plane)
+		intel_disable_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	/*
+	 * FIXME IPS should be fine as long as one plane is
+	 * enabled, but in practice it seems to have problems
+	 * when going from primary only to sprite only and vice
+	 * versa.
+	 */
+	hsw_disable_ips(intel_crtc);
+
+	I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+	intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 }
 
 static int
@@ -623,15 +651,12 @@
 		   uint32_t src_w, uint32_t src_h)
 {
 	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj, *old_obj;
-	int pipe = intel_plane->pipe;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
-	int ret = 0;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct drm_i915_gem_object *old_obj = intel_plane->obj;
+	int ret;
 	bool disable_primary = false;
 	bool visible;
 	int hscale, vscale;
@@ -652,29 +677,23 @@
 		.y2 = crtc_y + crtc_h,
 	};
 	const struct drm_rect clip = {
-		.x2 = crtc->mode.hdisplay,
-		.y2 = crtc->mode.vdisplay,
+		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
 	};
-
-	intel_fb = to_intel_framebuffer(fb);
-	obj = intel_fb->obj;
-
-	old_obj = intel_plane->obj;
-
-	intel_plane->crtc_x = crtc_x;
-	intel_plane->crtc_y = crtc_y;
-	intel_plane->crtc_w = crtc_w;
-	intel_plane->crtc_h = crtc_h;
-	intel_plane->src_x = src_x;
-	intel_plane->src_y = src_y;
-	intel_plane->src_w = src_w;
-	intel_plane->src_h = src_h;
-
-	/* Pipe must be running... */
-	if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
-		DRM_DEBUG_KMS("Pipe disabled\n");
-		return -EINVAL;
-	}
+	const struct {
+		int crtc_x, crtc_y;
+		unsigned int crtc_w, crtc_h;
+		uint32_t src_x, src_y, src_w, src_h;
+	} orig = {
+		.crtc_x = crtc_x,
+		.crtc_y = crtc_y,
+		.crtc_w = crtc_w,
+		.crtc_h = crtc_h,
+		.src_x = src_x,
+		.src_y = src_y,
+		.src_w = src_w,
+		.src_h = src_h,
+	};
 
 	/* Don't modify another pipe's plane */
 	if (intel_plane->pipe != intel_crtc->pipe) {
@@ -810,7 +829,7 @@
 	 * we can disable the primary and save power.
 	 */
 	disable_primary = drm_rect_equals(&dst, &clip);
-	WARN_ON(disable_primary && !visible);
+	WARN_ON(disable_primary && !visible && intel_crtc->active);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -820,27 +839,40 @@
 	 * the sprite planes only require 128KiB alignment and 32 PTE padding.
 	 */
 	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-	if (ret)
-		goto out_unlock;
 
+	mutex_unlock(&dev->struct_mutex);
+
+	if (ret)
+		return ret;
+
+	intel_plane->crtc_x = orig.crtc_x;
+	intel_plane->crtc_y = orig.crtc_y;
+	intel_plane->crtc_w = orig.crtc_w;
+	intel_plane->crtc_h = orig.crtc_h;
+	intel_plane->src_x = orig.src_x;
+	intel_plane->src_y = orig.src_y;
+	intel_plane->src_w = orig.src_w;
+	intel_plane->src_h = orig.src_h;
 	intel_plane->obj = obj;
 
-	/*
-	 * Be sure to re-enable the primary before the sprite is no longer
-	 * covering it fully.
-	 */
-	if (!disable_primary)
-		intel_enable_primary(crtc);
+	if (intel_crtc->active) {
+		/*
+		 * Be sure to re-enable the primary before the sprite is no longer
+		 * covering it fully.
+		 */
+		if (!disable_primary)
+			intel_enable_primary(crtc);
 
-	if (visible)
-		intel_plane->update_plane(plane, crtc, fb, obj,
-					  crtc_x, crtc_y, crtc_w, crtc_h,
-					  src_x, src_y, src_w, src_h);
-	else
-		intel_plane->disable_plane(plane, crtc);
+		if (visible)
+			intel_plane->update_plane(plane, crtc, fb, obj,
+						  crtc_x, crtc_y, crtc_w, crtc_h,
+						  src_x, src_y, src_w, src_h);
+		else
+			intel_plane->disable_plane(plane, crtc);
 
-	if (disable_primary)
-		intel_disable_primary(crtc);
+		if (disable_primary)
+			intel_disable_primary(crtc);
+	}
 
 	/* Unpin old obj after new one is active to avoid ugliness */
 	if (old_obj) {
@@ -850,17 +882,15 @@
 		 * wait for vblank to avoid ugliness, we only need to
 		 * do the pin & ref bookkeeping.
 		 */
-		if (old_obj != obj) {
-			mutex_unlock(&dev->struct_mutex);
-			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
-			mutex_lock(&dev->struct_mutex);
-		}
+		if (old_obj != obj && intel_crtc->active)
+			intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+		mutex_lock(&dev->struct_mutex);
 		intel_unpin_fb_obj(old_obj);
+		mutex_unlock(&dev->struct_mutex);
 	}
 
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	return 0;
 }
 
 static int
@@ -868,7 +898,7 @@
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	int ret = 0;
+	struct intel_crtc *intel_crtc;
 
 	if (!plane->fb)
 		return 0;
@@ -876,21 +906,25 @@
 	if (WARN_ON(!plane->crtc))
 		return -EINVAL;
 
-	intel_enable_primary(plane->crtc);
-	intel_plane->disable_plane(plane, plane->crtc);
+	intel_crtc = to_intel_crtc(plane->crtc);
 
-	if (!intel_plane->obj)
-		goto out;
+	if (intel_crtc->active) {
+		intel_enable_primary(plane->crtc);
+		intel_plane->disable_plane(plane, plane->crtc);
+	}
 
-	intel_wait_for_vblank(dev, intel_plane->pipe);
+	if (intel_plane->obj) {
+		if (intel_crtc->active)
+			intel_wait_for_vblank(dev, intel_plane->pipe);
 
-	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(intel_plane->obj);
-	intel_plane->obj = NULL;
-	mutex_unlock(&dev->struct_mutex);
-out:
+		mutex_lock(&dev->struct_mutex);
+		intel_unpin_fb_obj(intel_plane->obj);
+		mutex_unlock(&dev->struct_mutex);
 
-	return ret;
+		intel_plane->obj = NULL;
+	}
+
+	return 0;
 }
 
 static void intel_destroy_plane(struct drm_plane *plane)
@@ -921,7 +955,7 @@
 
 	obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out_unlock;
 	}
 
@@ -950,7 +984,7 @@
 
 	obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out_unlock;
 	}
 
@@ -1034,7 +1068,7 @@
 	if (INTEL_INFO(dev)->gen < 5)
 		return -ENODEV;
 
-	intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+	intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
 	if (!intel_plane)
 		return -ENOMEM;
 
@@ -1058,6 +1092,7 @@
 		break;
 
 	case 7:
+	case 8:
 		if (IS_IVYBRIDGE(dev)) {
 			intel_plane->can_scale = true;
 			intel_plane->max_downscale = 2;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dd6f84b..18c4062 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -912,7 +912,7 @@
 	if (!tv_mode)
 		return false;
 
-	pipe_config->adjusted_mode.clock = tv_mode->clock;
+	pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
 	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
 	pipe_config->pipe_bpp = 8*3;
 
@@ -1044,7 +1044,7 @@
 		tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
 
 	/* Enable two fixes for the chips that need them. */
-	if (dev->pci_device < 0x2772)
+	if (dev->pdev->device < 0x2772)
 		tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
 
 	I915_WRITE(TV_H_CTL_1, hctl1);
@@ -1094,7 +1094,7 @@
 		unsigned int xsize, ysize;
 		/* Pipe must be off here */
 		I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
-		intel_flush_display_plane(dev_priv, intel_crtc->plane);
+		intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
 		/* Wait for vblank for the disable to take effect */
 		if (IS_GEN2(dev))
@@ -1123,7 +1123,7 @@
 
 		I915_WRITE(pipeconf_reg, pipeconf);
 		I915_WRITE(dspcntr_reg, dspcntr);
-		intel_flush_display_plane(dev_priv, intel_crtc->plane);
+		intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 	}
 
 	j = 0;
@@ -1433,7 +1433,6 @@
 static void
 intel_tv_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -1518,7 +1517,7 @@
 static int tv_is_present_in_vbt(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct child_device_config *p_child;
+	union child_device_config *p_child;
 	int i, ret;
 
 	if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1529,13 @@
 		/*
 		 * If the device type is not TV, continue.
 		 */
-		if (p_child->device_type != DEVICE_TYPE_INT_TV &&
-			p_child->device_type != DEVICE_TYPE_TV)
+		if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
+			p_child->old.device_type != DEVICE_TYPE_TV)
 			continue;
 		/* Only when the addin_offset is non-zero, it is regarded
 		 * as present.
 		 */
-		if (p_child->addin_offset) {
+		if (p_child->old.addin_offset) {
 			ret = 1;
 			break;
 		}
@@ -1590,12 +1589,12 @@
 	    (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
 		return;
 
-	intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+	intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
 	if (!intel_tv) {
 		return;
 	}
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_tv);
 		return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8649f1c..f9883ce 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -93,7 +93,7 @@
 {
 	u32 forcewake_ack;
 
-	if (IS_HASWELL(dev_priv->dev))
+	if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
 		forcewake_ack = FORCEWAKE_ACK_HSW;
 	else
 		forcewake_ack = FORCEWAKE_MT_ACK;
@@ -112,7 +112,8 @@
 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
 	/* WaRsForcewakeWaitTC0:ivb,hsw */
-	__gen6_gt_wait_for_thread_c0(dev_priv);
+	if (INTEL_INFO(dev_priv->dev)->gen < 8)
+		__gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -204,22 +205,336 @@
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
+static void gen6_force_wake_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (--dev_priv->uncore.forcewake_count == 0)
+		dev_priv->uncore.funcs.force_wake_put(dev_priv);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
 void intel_uncore_early_sanitize(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (HAS_FPGA_DBG_UNCLAIMED(dev))
 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+	if (IS_HASWELL(dev) &&
+	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+		/* The docs do not explain exactly how the calculation can be
+		 * made. It is somewhat guessable, but for now, it's always
+		 * 128MB.
+		 * NB: We can't write IDICR yet because we do not have gt funcs
+		 * set up */
+		dev_priv->ellc_size = 128;
+		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+	}
 }
 
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_VALLEYVIEW(dev)) {
+		vlv_force_wake_reset(dev_priv);
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		__gen6_gt_force_wake_reset(dev_priv);
+		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+			__gen6_gt_force_wake_mt_reset(dev_priv);
+	}
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg_val;
+
+	intel_uncore_forcewake_reset(dev);
+
+	/* BIOS often leaves RC6 enabled, but disable it for hw init */
+	intel_disable_gt_powersave(dev);
+
+	/* Turn off power gate, require especially for the BIOS less system */
+	if (IS_VALLEYVIEW(dev)) {
+
+		mutex_lock(&dev_priv->rps.hw_lock);
+		reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
+
+		if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
+			vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
+
+		mutex_unlock(&dev_priv->rps.hw_lock);
+
+	}
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+	unsigned long irqflags;
+
+	if (!dev_priv->uncore.funcs.force_wake_get)
+		return;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (dev_priv->uncore.forcewake_count++ == 0)
+		dev_priv->uncore.funcs.force_wake_get(dev_priv);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+	unsigned long irqflags;
+
+	if (!dev_priv->uncore.funcs.force_wake_put)
+		return;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (--dev_priv->uncore.forcewake_count == 0) {
+		dev_priv->uncore.forcewake_count++;
+		mod_delayed_work(dev_priv->wq,
+				 &dev_priv->uncore.force_wake_work,
+				 1);
+	}
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+	 * the chip from rc6 before touching it for real. MI_MODE is masked,
+	 * hence harmless to write 0 into. */
+	__raw_i915_write32(dev_priv, MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
+		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+			  reg);
+		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	}
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
+		DRM_ERROR("Unclaimed write to %x\n", reg);
+		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	}
+}
+
+#define REG_READ_HEADER(x) \
+	unsigned long irqflags; \
+	u##x val = 0; \
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define REG_READ_FOOTER \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+	return val
+
+#define __gen4_read(x) \
+static u##x \
+gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	REG_READ_HEADER(x); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	REG_READ_FOOTER; \
+}
+
+#define __gen5_read(x) \
+static u##x \
+gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	REG_READ_HEADER(x); \
+	ilk_dummy_write(dev_priv); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	REG_READ_FOOTER; \
+}
+
+#define __gen6_read(x) \
+static u##x \
+gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	REG_READ_HEADER(x); \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		if (dev_priv->uncore.forcewake_count == 0) \
+			dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+		val = __raw_i915_read##x(dev_priv, reg); \
+		if (dev_priv->uncore.forcewake_count == 0) \
+			dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+	} else { \
+		val = __raw_i915_read##x(dev_priv, reg); \
+	} \
+	REG_READ_FOOTER; \
+}
+
+__gen6_read(8)
+__gen6_read(16)
+__gen6_read(32)
+__gen6_read(64)
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen4_read(8)
+__gen4_read(16)
+__gen4_read(32)
+__gen4_read(64)
+
+#undef __gen6_read
+#undef __gen5_read
+#undef __gen4_read
+#undef REG_READ_FOOTER
+#undef REG_READ_HEADER
+
+#define REG_WRITE_HEADER \
+	unsigned long irqflags; \
+	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define __gen4_write(x) \
+static void \
+gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	REG_WRITE_HEADER; \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __gen5_write(x) \
+static void \
+gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	REG_WRITE_HEADER; \
+	ilk_dummy_write(dev_priv); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __gen6_write(x) \
+static void \
+gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	u32 __fifo_ret = 0; \
+	REG_WRITE_HEADER; \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+	} \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	if (unlikely(__fifo_ret)) { \
+		gen6_gt_check_fifodbg(dev_priv); \
+	} \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __hsw_write(x) \
+static void \
+hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	u32 __fifo_ret = 0; \
+	REG_WRITE_HEADER; \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+	} \
+	hsw_unclaimed_reg_clear(dev_priv, reg); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	if (unlikely(__fifo_ret)) { \
+		gen6_gt_check_fifodbg(dev_priv); \
+	} \
+	hsw_unclaimed_reg_check(dev_priv, reg); \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+static const u32 gen8_shadowed_regs[] = {
+	FORCEWAKE_MT,
+	GEN6_RPNSWREQ,
+	GEN6_RC_VIDEO_FREQ,
+	RING_TAIL(RENDER_RING_BASE),
+	RING_TAIL(GEN6_BSD_RING_BASE),
+	RING_TAIL(VEBOX_RING_BASE),
+	RING_TAIL(BLT_RING_BASE),
+	/* TODO: Other registers are not yet used */
+};
+
+static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
+		if (reg == gen8_shadowed_regs[i])
+			return true;
+
+	return false;
+}
+
+#define __gen8_write(x) \
+static void \
+gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
+	REG_WRITE_HEADER; \
+	if (__needs_put) { \
+		dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+	} \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	if (__needs_put) { \
+		dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+	} \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+__gen8_write(8)
+__gen8_write(16)
+__gen8_write(32)
+__gen8_write(64)
+__hsw_write(8)
+__hsw_write(16)
+__hsw_write(32)
+__hsw_write(64)
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
+__gen6_write(64)
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen4_write(8)
+__gen4_write(16)
+__gen4_write(32)
+__gen4_write(64)
+
+#undef __gen8_write
+#undef __hsw_write
+#undef __gen6_write
+#undef __gen5_write
+#undef __gen4_write
+#undef REG_WRITE_HEADER
+
 void intel_uncore_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
+			  gen6_force_wake_work);
+
 	if (IS_VALLEYVIEW(dev)) {
 		dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
 		dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
-	} else if (IS_HASWELL(dev)) {
+	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
 		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
 		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
 	} else if (IS_IVYBRIDGE(dev)) {
@@ -259,146 +574,71 @@
 		dev_priv->uncore.funcs.force_wake_put =
 			__gen6_gt_force_wake_put;
 	}
+
+	switch (INTEL_INFO(dev)->gen) {
+	default:
+		dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
+		dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
+		dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
+		dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
+		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+		break;
+	case 7:
+	case 6:
+		if (IS_HASWELL(dev)) {
+			dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
+			dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
+			dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
+			dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
+		} else {
+			dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
+			dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
+			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
+			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
+		}
+		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+		break;
+	case 5:
+		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
+		dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
+		dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
+		dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
+		dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
+		dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
+		dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
+		dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
+		break;
+	case 4:
+	case 3:
+	case 2:
+		dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
+		dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
+		dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
+		dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
+		dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
+		dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
+		dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
+		dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
+		break;
+	}
 }
 
-static void intel_uncore_forcewake_reset(struct drm_device *dev)
+void intel_uncore_fini(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (IS_VALLEYVIEW(dev)) {
-		vlv_force_wake_reset(dev_priv);
-	} else if (INTEL_INFO(dev)->gen >= 6) {
-		__gen6_gt_force_wake_reset(dev_priv);
-		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-			__gen6_gt_force_wake_mt_reset(dev_priv);
-	}
+	flush_delayed_work(&dev_priv->uncore.force_wake_work);
+
+	/* Paranoia: make sure we have disabled everything before we exit. */
+	intel_uncore_sanitize(dev);
 }
 
-void intel_uncore_sanitize(struct drm_device *dev)
-{
-	intel_uncore_forcewake_reset(dev);
-
-	/* BIOS often leaves RC6 enabled, but disable it for hw init */
-	intel_disable_gt_powersave(dev);
-}
-
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-	if (dev_priv->uncore.forcewake_count++ == 0)
-		dev_priv->uncore.funcs.force_wake_get(dev_priv);
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-/*
- * see gen6_gt_force_wake_get()
- */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-	if (--dev_priv->uncore.forcewake_count == 0)
-		dev_priv->uncore.funcs.force_wake_put(dev_priv);
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
-	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
-	 ((reg) < 0x40000) &&            \
-	 ((reg) != FORCEWAKE))
-
-static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
-{
-	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
-	 * the chip from rc6 before touching it for real. MI_MODE is masked,
-	 * hence harmless to write 0 into. */
-	__raw_i915_write32(dev_priv, MI_MODE, 0);
-}
-
-static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
-{
-	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
-			  reg);
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-	}
-}
-
-static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
-{
-	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-		DRM_ERROR("Unclaimed write to %x\n", reg);
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-	}
-}
-
-#define __i915_read(x) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
-	unsigned long irqflags; \
-	u##x val = 0; \
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
-	if (dev_priv->info->gen == 5) \
-		ilk_dummy_write(dev_priv); \
-	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-		if (dev_priv->uncore.forcewake_count == 0) \
-			dev_priv->uncore.funcs.force_wake_get(dev_priv); \
-		val = __raw_i915_read##x(dev_priv, reg); \
-		if (dev_priv->uncore.forcewake_count == 0) \
-			dev_priv->uncore.funcs.force_wake_put(dev_priv); \
-	} else { \
-		val = __raw_i915_read##x(dev_priv, reg); \
-	} \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
-	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
-	return val; \
-}
-
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
-
-#define __i915_write(x) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
-	unsigned long irqflags; \
-	u32 __fifo_ret = 0; \
-	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
-	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
-	} \
-	if (dev_priv->info->gen == 5) \
-		ilk_dummy_write(dev_priv); \
-	hsw_unclaimed_reg_clear(dev_priv, reg); \
-	__raw_i915_write##x(dev_priv, reg, val); \
-	if (unlikely(__fifo_ret)) { \
-		gen6_gt_check_fifodbg(dev_priv); \
-	} \
-	hsw_unclaimed_reg_check(dev_priv, reg); \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
-}
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
-
 static const struct register_whitelist {
 	uint64_t offset;
 	uint32_t size;
@@ -445,36 +685,6 @@
 	return 0;
 }
 
-static int i8xx_do_reset(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (IS_I85X(dev))
-		return -ENODEV;
-
-	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
-	POSTING_READ(D_STATE);
-
-	if (IS_I830(dev) || IS_845G(dev)) {
-		I915_WRITE(DEBUG_RESET_I830,
-			   DEBUG_RESET_DISPLAY |
-			   DEBUG_RESET_RENDER |
-			   DEBUG_RESET_FULL);
-		POSTING_READ(DEBUG_RESET_I830);
-		msleep(1);
-
-		I915_WRITE(DEBUG_RESET_I830, 0);
-		POSTING_READ(DEBUG_RESET_I830);
-	}
-
-	msleep(1);
-
-	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
-	POSTING_READ(D_STATE);
-
-	return 0;
-}
-
 static int i965_reset_complete(struct drm_device *dev)
 {
 	u8 gdrst;
@@ -576,7 +786,6 @@
 	case 6: return gen6_do_reset(dev);
 	case 5: return ironlake_do_reset(dev);
 	case 4: return i965_do_reset(dev);
-	case 2: return i8xx_do_reset(dev);
 	default: return -ENODEV;
 	}
 }
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index cc3166d..087db33 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -406,11 +406,6 @@
 	dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
 	dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
 
-	dev->counters += 3;
-	dev->types[6] = _DRM_STAT_IRQ;
-	dev->types[7] = _DRM_STAT_PRIMARY;
-	dev->types[8] = _DRM_STAT_SECONDARY;
-
 	ret = drm_vblank_init(dev, 1);
 
 	if (ret) {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 598c281..2b0ceb8 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -169,5 +169,5 @@
 	/* Disable *all* interrupts */
 	MGA_WRITE(MGA_IEN, 0);
 
-	dev->irq_enabled = 0;
+	dev->irq_enabled = false;
 }
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index b487cde..3a1c5fb 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -5,6 +5,7 @@
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	help
 	 This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index fcce7b2..f15ea3c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -99,7 +99,6 @@
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 
-	.gem_init_object = mgag200_gem_init_object,
 	.gem_free_object = mgag200_gem_free_object,
 	.dumb_create = mgag200_dumb_create,
 	.dumb_map_offset = mgag200_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index baaae193..cf11ee6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -260,7 +260,6 @@
 int mgag200_gem_create(struct drm_device *dev,
 		   u32 size, bool iskernel,
 		       struct drm_gem_object **obj);
-int mgag200_gem_init_object(struct drm_gem_object *obj);
 int mgag200_dumb_create(struct drm_file *file,
 			struct drm_device *dev,
 			struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 0f8b861..b1120cb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,12 +310,6 @@
 	return 0;
 }
 
-int mgag200_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-	return 0;
-}
-
 void mgag200_bo_unref(struct mgag200_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 503a414..ee6ed63 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -765,8 +765,6 @@
 	}
 	mgag200_bo_unreserve(bo);
 
-	DRM_INFO("mga base %llx\n", gpu_addr);
-
 	mga_set_start_address(crtc, (u32)gpu_addr);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a06c19c..f39ab75 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,6 +14,7 @@
 config DRM_MSM_FBDEV
 	bool "Enable legacy fbdev support for MSM modesetting driver"
 	depends on DRM_MSM
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index e179148..e5fa12b 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -21,6 +21,7 @@
 	msm_drv.o \
 	msm_fb.o \
 	msm_gem.o \
+	msm_gem_prime.o \
 	msm_gem_submit.o \
 	msm_gpu.o \
 	msm_ringbuffer.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 3546386..9588098 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -4,16 +4,16 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9712 bytes, from 2013-05-26 15:22:37)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51415 bytes, from 2013-08-03 14:26:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -317,6 +317,38 @@
 #define A2XX_RBBM_STATUS_RB_CNTX_BUSY				0x40000000
 #define A2XX_RBBM_STATUS_GUI_ACTIVE				0x80000000
 
+#define REG_A2XX_MH_ARBITER_CONFIG				0x00000a40
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK		0x0000003f
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT		0
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
+{
+	return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY		0x00000040
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE			0x00000080
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE		0x00000100
+#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL			0x00000200
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK			0x00001c00
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT			10
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
+{
+	return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE		0x00002000
+#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE		0x00004000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE		0x00008000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK		0x003f0000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT		16
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
+{
+	return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE			0x00400000
+#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE			0x00800000
+#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE			0x01000000
+#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE			0x02000000
+#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE			0x04000000
+
 #define REG_A2XX_A220_VSC_BIN_SIZE				0x00000c01
 #define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK			0x0000001f
 #define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT			0
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index d183516..d4afdf6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -4,16 +4,16 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9712 bytes, from 2013-05-26 15:22:37)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51415 bytes, from 2013-08-03 14:26:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -637,11 +637,12 @@
 #define REG_A3XX_GRAS_SU_MODE_CONTROL				0x00002070
 #define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT			0x00000001
 #define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK			0x00000002
-#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK		0x000007fc
-#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT		2
-static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
+#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW			0x00000004
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK		0x000007f8
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT		3
+static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
 {
-	return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+	return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
 }
 #define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET			0x00000800
 
@@ -745,6 +746,7 @@
 }
 #define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE		0x00001000
 #define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM			0x00002000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST			0x00400000
 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK		0x07000000
 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT		24
 static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
@@ -767,7 +769,19 @@
 	return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
 }
 
-#define REG_A3XX_UNKNOWN_20C3					0x000020c3
+#define REG_A3XX_RB_ALPHA_REF					0x000020c3
+#define A3XX_RB_ALPHA_REF_UINT__MASK				0x0000ff00
+#define A3XX_RB_ALPHA_REF_UINT__SHIFT				8
+static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
+{
+	return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
+}
+#define A3XX_RB_ALPHA_REF_FLOAT__MASK				0xffff0000
+#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT				16
+static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
+{
+	return ((util_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
+}
 
 static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
 
@@ -1002,7 +1016,7 @@
 #define REG_A3XX_RB_DEPTH_CONTROL				0x00002100
 #define A3XX_RB_DEPTH_CONTROL_Z_ENABLE				0x00000002
 #define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE			0x00000004
-#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE			0x00000008
+#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE			0x00000008
 #define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK			0x00000070
 #define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT			4
 static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
@@ -1038,7 +1052,8 @@
 
 #define REG_A3XX_RB_STENCIL_CONTROL				0x00002104
 #define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE			0x00000001
-#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF		0x00000004
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF		0x00000002
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ			0x00000004
 #define A3XX_RB_STENCIL_CONTROL_FUNC__MASK			0x00000700
 #define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT			8
 static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
@@ -2074,6 +2089,7 @@
 #define REG_A3XX_TP_PERFCOUNTER5_SELECT				0x00000f09
 
 #define REG_A3XX_TEX_SAMP_0					0x00000000
+#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR			0x00000002
 #define A3XX_TEX_SAMP_0_XY_MAG__MASK				0x0000000c
 #define A3XX_TEX_SAMP_0_XY_MAG__SHIFT				2
 static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
@@ -2134,6 +2150,12 @@
 {
 	return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
 }
+#define A3XX_TEX_CONST_0_MIPLVLS__MASK				0x000f0000
+#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT				16
+static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+	return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
+}
 #define A3XX_TEX_CONST_0_FMT__MASK				0x1fc00000
 #define A3XX_TEX_CONST_0_FMT__SHIFT				22
 static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 61979d4..33dcc60 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -4,16 +4,16 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9712 bytes, from 2013-05-26 15:22:37)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51415 bytes, from 2013-08-03 14:26:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 94c13f4..259ad70 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -4,16 +4,16 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9712 bytes, from 2013-05-26 15:22:37)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51415 bytes, from 2013-08-03 14:26:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 6f8396b..6d4c62b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -4,13 +4,13 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index aefc1b8..d1df38b 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -4,13 +4,13 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index a225e81..0030a11 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -4,13 +4,13 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index f5fa486..4e939f8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -4,13 +4,13 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index bee3636..dbde4f6 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -4,13 +4,13 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
index bbeeebe..9908ffe 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -4,13 +4,13 @@
 /* Autogenerated file, DO NOT EDIT manually!
 
 This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://0x04.net/cgit/index.cgi/rules-ng-ng
-git clone git://0x04.net/rules-ng-ng
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
@@ -42,28 +42,28 @@
 */
 
 
-enum mpd4_bpc {
+enum mdp4_bpc {
 	BPC1 = 0,
 	BPC5 = 1,
 	BPC6 = 2,
 	BPC8 = 3,
 };
 
-enum mpd4_bpc_alpha {
+enum mdp4_bpc_alpha {
 	BPC1A = 0,
 	BPC4A = 1,
 	BPC6A = 2,
 	BPC8A = 3,
 };
 
-enum mpd4_alpha_type {
+enum mdp4_alpha_type {
 	FG_CONST = 0,
 	BG_CONST = 1,
 	FG_PIXEL = 2,
 	BG_PIXEL = 3,
 };
 
-enum mpd4_pipe {
+enum mdp4_pipe {
 	VG1 = 0,
 	VG2 = 1,
 	RGB1 = 2,
@@ -73,13 +73,13 @@
 	VG4 = 6,
 };
 
-enum mpd4_mixer {
+enum mdp4_mixer {
 	MIXER0 = 0,
 	MIXER1 = 1,
 	MIXER2 = 2,
 };
 
-enum mpd4_mixer_stage_id {
+enum mdp4_mixer_stage_id {
 	STAGE_UNUSED = 0,
 	STAGE_BASE = 1,
 	STAGE0 = 2,
@@ -194,56 +194,56 @@
 #define REG_MDP4_LAYERMIXER2_IN_CFG				0x000100f0
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK			0x00000007
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT			0
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1			0x00000008
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK			0x00000070
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT			4
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1			0x00000080
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK			0x00000700
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT			8
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1			0x00000800
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK			0x00007000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT			12
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1			0x00008000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK			0x00070000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT			16
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1			0x00080000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK			0x00700000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT			20
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1			0x00800000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK			0x07000000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT			24
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1			0x08000000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK			0x70000000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT			28
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
 }
@@ -254,56 +254,56 @@
 #define REG_MDP4_LAYERMIXER_IN_CFG				0x00010100
 #define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK			0x00000007
 #define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT			0
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1			0x00000008
 #define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK			0x00000070
 #define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT			4
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1			0x00000080
 #define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK			0x00000700
 #define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT			8
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1			0x00000800
 #define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK			0x00007000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT			12
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1			0x00008000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK			0x00070000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT			16
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1			0x00080000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK			0x00700000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT			20
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1			0x00800000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK			0x07000000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT			24
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1			0x08000000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK			0x70000000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT			28
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
 }
@@ -369,7 +369,7 @@
 static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
 #define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK			0x00000003
 #define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT			0
-static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
 {
 	return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
 }
@@ -377,7 +377,7 @@
 #define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA				0x00000008
 #define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK			0x00000030
 #define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT			4
-static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
 {
 	return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
 }
@@ -472,19 +472,19 @@
 static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
 #define MDP4_DMA_CONFIG_G_BPC__MASK				0x00000003
 #define MDP4_DMA_CONFIG_G_BPC__SHIFT				0
-static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
 {
 	return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
 }
 #define MDP4_DMA_CONFIG_B_BPC__MASK				0x0000000c
 #define MDP4_DMA_CONFIG_B_BPC__SHIFT				2
-static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
 {
 	return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
 }
 #define MDP4_DMA_CONFIG_R_BPC__MASK				0x00000030
 #define MDP4_DMA_CONFIG_R_BPC__SHIFT				4
-static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
 {
 	return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
 }
@@ -601,9 +601,9 @@
 
 static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
 
-static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
 #define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK				0xffff0000
 #define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT			16
 static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
@@ -617,7 +617,7 @@
 	return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
 }
 
-static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
 #define MDP4_PIPE_SRC_XY_Y__MASK				0xffff0000
 #define MDP4_PIPE_SRC_XY_Y__SHIFT				16
 static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
@@ -631,7 +631,7 @@
 	return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
 }
 
-static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
 #define MDP4_PIPE_DST_SIZE_HEIGHT__MASK				0xffff0000
 #define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT			16
 static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
@@ -645,7 +645,7 @@
 	return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
 }
 
-static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
 #define MDP4_PIPE_DST_XY_Y__MASK				0xffff0000
 #define MDP4_PIPE_DST_XY_Y__SHIFT				16
 static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
@@ -659,13 +659,13 @@
 	return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
 }
 
-static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
 
-static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
 
-static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
 
-static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
 #define MDP4_PIPE_SRC_STRIDE_A_P0__MASK				0x0000ffff
 #define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT			0
 static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
@@ -679,7 +679,7 @@
 	return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
 }
 
-static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
 #define MDP4_PIPE_SRC_STRIDE_B_P2__MASK				0x0000ffff
 #define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT			0
 static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
@@ -693,7 +693,7 @@
 	return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
 }
 
-static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
 #define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK			0xffff0000
 #define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT			16
 static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
@@ -707,28 +707,28 @@
 	return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
 }
 
-static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
 #define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK			0x00000003
 #define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT			0
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
 {
 	return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
 }
 #define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK			0x0000000c
 #define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT			2
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
 {
 	return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
 }
 #define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK			0x00000030
 #define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT			4
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
 {
 	return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
 }
 #define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK			0x000000c0
 #define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT			6
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
 {
 	return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
 }
@@ -750,7 +750,7 @@
 #define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB			0x00040000
 #define MDP4_PIPE_SRC_FORMAT_SOLID_FILL				0x00400000
 
-static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
 #define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK			0x000000ff
 #define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT			0
 static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
@@ -776,7 +776,7 @@
 	return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
 }
 
-static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
 #define MDP4_PIPE_OP_MODE_SCALEX_EN				0x00000001
 #define MDP4_PIPE_OP_MODE_SCALEY_EN				0x00000002
 #define MDP4_PIPE_OP_MODE_SRC_YCBCR				0x00000200
@@ -789,36 +789,36 @@
 #define MDP4_PIPE_OP_MODE_DEINT_EN				0x00040000
 #define MDP4_PIPE_OP_MODE_DEINT_ODD_REF				0x00080000
 
-static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
 
-static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
 
-static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
 
-static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
+static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
 
 
-static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
 
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
 
 #define REG_MDP4_LCDC						0x000c0000
 
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
index de6bea2..019d530 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -26,6 +26,7 @@
 	struct drm_crtc base;
 	char name[8];
 	struct drm_plane *plane;
+	struct drm_plane *planes[8];
 	int id;
 	int ovlp;
 	enum mdp4_dma dma;
@@ -50,7 +51,11 @@
 
 	/* if there is a pending flip, these will be non-null: */
 	struct drm_pending_vblank_event *event;
-	struct work_struct pageflip_work;
+	struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP   0x2
+	atomic_t pending;
 
 	/* the fb that we currently hold a scanout ref to: */
 	struct drm_framebuffer *fb;
@@ -92,7 +97,8 @@
 	}
 }
 
-static void complete_flip(struct drm_crtc *crtc, bool canceled)
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -102,11 +108,14 @@
 	spin_lock_irqsave(&dev->event_lock, flags);
 	event = mdp4_crtc->event;
 	if (event) {
-		mdp4_crtc->event = NULL;
-		if (canceled)
-			event->base.destroy(&event->base);
-		else
+		/* if regular vblank case (!file) or if cancel-flip from
+		 * preclose on file that requested flip, then send the
+		 * event:
+		 */
+		if (!file || (event->base.file_priv == file)) {
+			mdp4_crtc->event = NULL;
 			drm_send_vblank_event(dev, mdp4_crtc->id, event);
+		}
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
@@ -115,9 +124,15 @@
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
-	uint32_t flush = 0;
+	uint32_t i, flush = 0;
 
-	flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
+	for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+		struct drm_plane *plane = mdp4_crtc->planes[i];
+		if (plane) {
+			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+			flush |= pipe2flush(pipe_id);
+		}
+	}
 	flush |= ovlp2flush(mdp4_crtc->ovlp);
 
 	DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -125,17 +140,29 @@
 	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
 }
 
-static void pageflip_worker(struct work_struct *work)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+	atomic_or(pending, &mdp4_crtc->pending);
+	mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
 {
 	struct mdp4_crtc *mdp4_crtc =
-		container_of(work, struct mdp4_crtc, pageflip_work);
+		container_of(cb, struct mdp4_crtc, pageflip_cb);
 	struct drm_crtc *crtc = &mdp4_crtc->base;
+	struct drm_framebuffer *fb = crtc->fb;
 
-	mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
+	if (!fb)
+		return;
+
+	mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
 	crtc_flush(crtc);
 
 	/* enable vblank to complete flip: */
-	mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
+	request_pending(crtc, PENDING_FLIP);
 }
 
 static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -205,67 +232,69 @@
 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
 	int i, ovlp = mdp4_crtc->ovlp;
 	uint32_t mixer_cfg = 0;
+	static const enum mdp4_mixer_stage_id stages[] = {
+			STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
+	};
+	/* statically (for now) map planes to mixer stage (z-order): */
+	static const int idxs[] = {
+			[VG1]  = 1,
+			[VG2]  = 2,
+			[RGB1] = 0,
+			[RGB2] = 0,
+			[RGB3] = 0,
+			[VG3]  = 3,
+			[VG4]  = 4,
 
-	/*
-	 * This probably would also need to be triggered by any attached
-	 * plane when it changes.. for now since we are only using a single
-	 * private plane, the configuration is hard-coded:
-	 */
+	};
+	bool alpha[4]= { false, false, false, false };
 
 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
 
+	/* TODO single register for all CRTCs, so this won't work properly
+	 * when multiple CRTCs are active..
+	 */
+	for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+		struct drm_plane *plane = mdp4_crtc->planes[i];
+		if (plane) {
+			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+			int idx = idxs[pipe_id];
+			if (idx > 0) {
+				const struct mdp4_format *format =
+					to_mdp4_format(msm_framebuffer_format(plane->fb));
+				alpha[idx-1] = format->alpha_enable;
+			}
+			mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
+		}
+	}
+
+	/* this shouldn't happen.. and seems to cause underflow: */
+	WARN_ON(!mixer_cfg);
+
 	for (i = 0; i < 4; i++) {
-		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
-		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
-		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
-				MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
-				MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
-		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
+		uint32_t op;
+
+		if (alpha[i]) {
+			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
+					MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
+					MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
+		} else {
+			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+					MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
+		}
+
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
 	}
 
-	/* TODO single register for all CRTCs, so this won't work properly
-	 * when multiple CRTCs are active..
-	 */
-	switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
-	case VG1:
-		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
-			COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
-		break;
-	case VG2:
-		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
-			COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
-		break;
-	case RGB1:
-		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
-			COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
-		break;
-	case RGB2:
-		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
-			COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
-		break;
-	case RGB3:
-		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
-			COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
-		break;
-	case VG3:
-		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
-			COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
-		break;
-	case VG4:
-		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
-			COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
-		break;
-	default:
-		WARN_ON("invalid pipe");
-		break;
-	}
 	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
 }
 
@@ -377,6 +406,7 @@
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct drm_gem_object *obj;
+	unsigned long flags;
 
 	if (mdp4_crtc->event) {
 		dev_err(dev->dev, "already pending flip!\n");
@@ -385,11 +415,13 @@
 
 	obj = msm_framebuffer_bo(new_fb, 0);
 
+	spin_lock_irqsave(&dev->event_lock, flags);
 	mdp4_crtc->event = event;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
 	update_fb(crtc, true, new_fb);
 
-	return msm_gem_queue_inactive_work(obj,
-			&mdp4_crtc->pageflip_work);
+	return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
 }
 
 static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -498,6 +530,8 @@
 		drm_gem_object_unreference_unlocked(old_bo);
 	}
 
+	request_pending(crtc, PENDING_CURSOR);
+
 	return 0;
 
 fail:
@@ -542,13 +576,21 @@
 	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
 	struct drm_crtc *crtc = &mdp4_crtc->base;
 	struct msm_drm_private *priv = crtc->dev->dev_private;
+	unsigned pending;
 
-	update_cursor(crtc);
-	complete_flip(crtc, false);
 	mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
 
-	drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
-	drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+	pending = atomic_xchg(&mdp4_crtc->pending, 0);
+
+	if (pending & PENDING_FLIP) {
+		complete_flip(crtc, NULL);
+		drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
+	}
+
+	if (pending & PENDING_CURSOR) {
+		update_cursor(crtc);
+		drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+	}
 }
 
 static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
@@ -565,9 +607,10 @@
 	return mdp4_crtc->vblank.irqmask;
 }
 
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
 {
-	complete_flip(crtc, true);
+	DBG("cancel: %p", file);
+	complete_flip(crtc, file);
 }
 
 /* set dma config, ie. the format the encoder wants. */
@@ -622,6 +665,32 @@
 	mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
 }
 
+static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
+		struct drm_plane *plane)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+	BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
+
+	if (mdp4_crtc->planes[pipe_id] == plane)
+		return;
+
+	mdp4_crtc->planes[pipe_id] = plane;
+	blend_setup(crtc);
+	if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
+		crtc_flush(crtc);
+}
+
+void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+	set_attach(crtc, mdp4_plane_pipe(plane), plane);
+}
+
+void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+	set_attach(crtc, mdp4_plane_pipe(plane), NULL);
+}
+
 static const char *dma_names[] = {
 		"DMA_P", "DMA_S", "DMA_E",
 };
@@ -644,7 +713,6 @@
 	crtc = &mdp4_crtc->base;
 
 	mdp4_crtc->plane = plane;
-	mdp4_crtc->plane->crtc = crtc;
 
 	mdp4_crtc->ovlp = ovlp_id;
 	mdp4_crtc->dma = dma_id;
@@ -668,7 +736,7 @@
 	ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
 			"unref cursor", unref_cursor_worker);
 
-	INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
+	INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
 
 	drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
 	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
index 7b645f2..17330b0 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -44,6 +44,22 @@
 	FMT(BGR565,   0, 5, 6, 5,  2, 0, 1, 0,  false,  true,  2,  3),
 };
 
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
+		uint32_t max_formats)
+{
+	uint32_t i;
+	for (i = 0; i < ARRAY_SIZE(formats); i++) {
+		const struct mdp4_format *f = &formats[i];
+
+		if (i == max_formats)
+			break;
+
+		pixel_formats[i] = f->base.pixel_format;
+	}
+
+	return i;
+}
+
 const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
 {
 	int i;
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index bc7fd11..8972ac3 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -135,7 +135,7 @@
 	unsigned i;
 
 	for (i = 0; i < priv->num_crtcs; i++)
-		mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
+		mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
 }
 
 static void mdp4_destroy(struct msm_kms *kms)
@@ -196,6 +196,23 @@
 	 * for more than just RGB1->DMA_E->DTV->HDMI
 	 */
 
+	/* construct non-private planes: */
+	plane = mdp4_plane_init(dev, VG1, false);
+	if (IS_ERR(plane)) {
+		dev_err(dev->dev, "failed to construct plane for VG1\n");
+		ret = PTR_ERR(plane);
+		goto fail;
+	}
+	priv->planes[priv->num_planes++] = plane;
+
+	plane = mdp4_plane_init(dev, VG2, false);
+	if (IS_ERR(plane)) {
+		dev_err(dev->dev, "failed to construct plane for VG2\n");
+		ret = PTR_ERR(plane);
+		goto fail;
+	}
+	priv->planes[priv->num_planes++] = plane;
+
 	/* the CRTCs get constructed with a private plane: */
 	plane = mdp4_plane_init(dev, RGB1, true);
 	if (IS_ERR(plane)) {
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
index 1e83554..eb015c8 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -75,8 +75,8 @@
 
 struct mdp4_format {
 	struct msm_format base;
-	enum mpd4_bpc bpc_r, bpc_g, bpc_b;
-	enum mpd4_bpc_alpha bpc_a;
+	enum mdp4_bpc bpc_r, bpc_g, bpc_b;
+	enum mdp4_bpc_alpha bpc_a;
 	uint8_t unpack[4];
 	bool alpha_enable, unpack_tight;
 	uint8_t cpp, unpack_count;
@@ -93,7 +93,7 @@
 	return msm_readl(mdp4_kms->mmio + reg);
 }
 
-static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
+static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
 {
 	switch (pipe) {
 	case VG1:      return MDP4_OVERLAY_FLUSH_VG1;
@@ -133,6 +133,48 @@
 	}
 }
 
+static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
+		enum mdp4_mixer_stage_id stage)
+{
+	uint32_t mixer_cfg = 0;
+
+	switch (pipe) {
+	case VG1:
+		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
+			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+		break;
+	case VG2:
+		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
+			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+		break;
+	case RGB1:
+		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
+			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+		break;
+	case RGB2:
+		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
+			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+		break;
+	case RGB3:
+		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
+			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+		break;
+	case VG3:
+		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
+			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+		break;
+	case VG4:
+		mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
+			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+		break;
+	default:
+		WARN_ON("invalid pipe");
+		break;
+	}
+
+	return mixer_cfg;
+}
+
 int mdp4_disable(struct mdp4_kms *mdp4_kms);
 int mdp4_enable(struct mdp4_kms *mdp4_kms);
 
@@ -146,6 +188,8 @@
 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
+		uint32_t max_formats);
 const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
 
 void mdp4_plane_install_properties(struct drm_plane *plane,
@@ -158,14 +202,16 @@
 		unsigned int crtc_w, unsigned int crtc_h,
 		uint32_t src_x, uint32_t src_y,
 		uint32_t src_w, uint32_t src_h);
-enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp4_plane_init(struct drm_device *dev,
-		enum mpd4_pipe pipe_id, bool private_plane);
+		enum mdp4_pipe pipe_id, bool private_plane);
 
 uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
+void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
 struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
 		struct drm_plane *plane, int id, int ovlp_id,
 		enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
index 3468229..0f0af24 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -22,7 +22,7 @@
 	struct drm_plane base;
 	const char *name;
 
-	enum mpd4_pipe pipe;
+	enum mdp4_pipe pipe;
 
 	uint32_t nformats;
 	uint32_t formats[32];
@@ -61,7 +61,9 @@
 static int mdp4_plane_disable(struct drm_plane *plane)
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-	DBG("%s: TODO", mdp4_plane->name); // XXX
+	DBG("%s: disable", mdp4_plane->name);
+	if (plane->crtc)
+		mdp4_crtc_detach(plane->crtc, plane);
 	return 0;
 }
 
@@ -101,7 +103,7 @@
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 	struct mdp4_kms *mdp4_kms = get_kms(plane);
-	enum mpd4_pipe pipe = mdp4_plane->pipe;
+	enum mdp4_pipe pipe = mdp4_plane->pipe;
 	uint32_t iova;
 
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -129,7 +131,7 @@
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 	struct mdp4_kms *mdp4_kms = get_kms(plane);
-	enum mpd4_pipe pipe = mdp4_plane->pipe;
+	enum mdp4_pipe pipe = mdp4_plane->pipe;
 	const struct mdp4_format *format;
 	uint32_t op_mode = 0;
 	uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
@@ -141,6 +143,10 @@
 	src_w = src_w >> 16;
 	src_h = src_h >> 16;
 
+	DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
+			fb->base.id, src_x, src_y, src_w, src_h,
+			crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
 	if (src_w != crtc_w) {
 		op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
 		/* TODO calc phasex_step */
@@ -191,7 +197,8 @@
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
 
-	plane->crtc = crtc;
+	/* TODO detach from old crtc (if we had more than one) */
+	mdp4_crtc_attach(crtc, plane);
 
 	return 0;
 }
@@ -202,7 +209,7 @@
 		"VG3", "VG4",
 };
 
-enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 	return mdp4_plane->pipe;
@@ -210,9 +217,8 @@
 
 /* initialize plane */
 struct drm_plane *mdp4_plane_init(struct drm_device *dev,
-		enum mpd4_pipe pipe_id, bool private_plane)
+		enum mdp4_pipe pipe_id, bool private_plane)
 {
-	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_plane *plane = NULL;
 	struct mdp4_plane *mdp4_plane;
 	int ret;
@@ -228,8 +234,12 @@
 	mdp4_plane->pipe = pipe_id;
 	mdp4_plane->name = pipe_names[pipe_id];
 
-	drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
-			mdp4_plane->formats, mdp4_plane->nformats, private_plane);
+	mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
+			ARRAY_SIZE(mdp4_plane->formats));
+
+	drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+			mdp4_plane->formats, mdp4_plane->nformats,
+			private_plane);
 
 	mdp4_plane_install_properties(plane, &plane->base);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b3a2f16..8653769 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -187,6 +187,7 @@
 	init_waitqueue_head(&priv->fence_event);
 
 	INIT_LIST_HEAD(&priv->inactive_list);
+	INIT_LIST_HEAD(&priv->fence_cbs);
 
 	drm_mode_config_init(dev);
 
@@ -539,15 +540,36 @@
 	return ret;
 }
 
-/* call under struct_mutex */
+/* called from workqueue */
 void msm_update_fence(struct drm_device *dev, uint32_t fence)
 {
 	struct msm_drm_private *priv = dev->dev_private;
 
-	if (fence > priv->completed_fence) {
-		priv->completed_fence = fence;
-		wake_up_all(&priv->fence_event);
+	mutex_lock(&dev->struct_mutex);
+	priv->completed_fence = max(fence, priv->completed_fence);
+
+	while (!list_empty(&priv->fence_cbs)) {
+		struct msm_fence_cb *cb;
+
+		cb = list_first_entry(&priv->fence_cbs,
+				struct msm_fence_cb, work.entry);
+
+		if (cb->fence > priv->completed_fence)
+			break;
+
+		list_del_init(&cb->work.entry);
+		queue_work(priv->wq, &cb->work);
 	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	wake_up_all(&priv->fence_event);
+}
+
+void __msm_fence_worker(struct work_struct *work)
+{
+	struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
+	cb->func(cb);
 }
 
 /*
@@ -650,13 +672,13 @@
 }
 
 static const struct drm_ioctl_desc msm_ioctls[] = {
-	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static const struct vm_operations_struct vm_ops = {
@@ -680,7 +702,11 @@
 };
 
 static struct drm_driver msm_driver = {
-	.driver_features    = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.driver_features    = DRIVER_HAVE_IRQ |
+				DRIVER_GEM |
+				DRIVER_PRIME |
+				DRIVER_RENDER |
+				DRIVER_MODESET,
 	.load               = msm_load,
 	.unload             = msm_unload,
 	.open               = msm_open,
@@ -698,6 +724,16 @@
 	.dumb_create        = msm_gem_dumb_create,
 	.dumb_map_offset    = msm_gem_dumb_map_offset,
 	.dumb_destroy       = drm_gem_dumb_destroy,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export   = drm_gem_prime_export,
+	.gem_prime_import   = drm_gem_prime_import,
+	.gem_prime_pin      = msm_gem_prime_pin,
+	.gem_prime_unpin    = msm_gem_prime_unpin,
+	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+	.gem_prime_vmap     = msm_gem_prime_vmap,
+	.gem_prime_vunmap   = msm_gem_prime_vunmap,
 #ifdef CONFIG_DEBUG_FS
 	.debugfs_init       = msm_debugfs_init,
 	.debugfs_cleanup    = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index df8f1d0..d39f086 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -73,10 +73,16 @@
 
 	struct workqueue_struct *wq;
 
+	/* callbacks deferred until bo is inactive: */
+	struct list_head fence_cbs;
+
 	/* registered IOMMU domains: */
 	unsigned int num_iommus;
 	struct iommu_domain *iommus[NUM_DOMAINS];
 
+	unsigned int num_planes;
+	struct drm_plane *planes[8];
+
 	unsigned int num_crtcs;
 	struct drm_crtc *crtcs[8];
 
@@ -94,6 +100,20 @@
 	uint32_t pixel_format;
 };
 
+/* callback from wq once fence has passed: */
+struct msm_fence_cb {
+	struct work_struct work;
+	uint32_t fence;
+	void (*func)(struct msm_fence_cb *cb);
+};
+
+void __msm_fence_worker(struct work_struct *work);
+
+#define INIT_FENCE_CB(_cb, _func)  do {                     \
+		INIT_WORK(&(_cb)->work, __msm_fence_worker); \
+		(_cb)->func = _func;                         \
+	} while (0)
+
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
  * implementation is loaded at runtime.  The kms module is responsible
@@ -141,17 +161,24 @@
 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
 		uint32_t *iova);
 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+struct page **msm_gem_get_pages(struct drm_gem_object *obj);
+void msm_gem_put_pages(struct drm_gem_object *obj);
 void msm_gem_put_iova(struct drm_gem_object *obj, int id);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args);
-int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-		uint32_t handle);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 		uint32_t handle, uint64_t *offset);
+struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+void *msm_gem_prime_vmap(struct drm_gem_object *obj);
+void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+		size_t size, struct sg_table *sg);
+int msm_gem_prime_pin(struct drm_gem_object *obj);
+void msm_gem_prime_unpin(struct drm_gem_object *obj);
 void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
 void *msm_gem_vaddr(struct drm_gem_object *obj);
-int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
-		struct work_struct *work);
+int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
+		struct msm_fence_cb *cb);
 void msm_gem_move_to_active(struct drm_gem_object *obj,
 		struct msm_gpu *gpu, bool write, uint32_t fence);
 void msm_gem_move_to_inactive(struct drm_gem_object *obj);
@@ -163,6 +190,8 @@
 		uint32_t size, uint32_t flags, uint32_t *handle);
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 		uint32_t size, uint32_t flags);
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+		uint32_t size, struct sg_table *sgt);
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2bae46c..e587d25 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -17,6 +17,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
 
 #include "msm_drv.h"
 #include "msm_gem.h"
@@ -77,6 +78,21 @@
 	}
 }
 
+struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct page **p;
+	mutex_lock(&dev->struct_mutex);
+	p = get_pages(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return p;
+}
+
+void msm_gem_put_pages(struct drm_gem_object *obj)
+{
+	/* when we start tracking the pin count, then do something here */
+}
+
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 		struct vm_area_struct *vma)
 {
@@ -162,6 +178,11 @@
 	case 0:
 	case -ERESTARTSYS:
 	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
 		return VM_FAULT_NOPAGE;
 	case -ENOMEM:
 		return VM_FAULT_OOM;
@@ -293,7 +314,17 @@
 
 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
 {
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	int ret;
+
+	/* this is safe right now because we don't unmap until the
+	 * bo is deleted:
+	 */
+	if (msm_obj->domain[id].iova) {
+		*iova = msm_obj->domain[id].iova;
+		return 0;
+	}
+
 	mutex_lock(&obj->dev->struct_mutex);
 	ret = msm_gem_get_iova_locked(obj, id, iova);
 	mutex_unlock(&obj->dev->struct_mutex);
@@ -363,8 +394,11 @@
 	return ret;
 }
 
-int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
-		struct work_struct *work)
+/* setup callback for when bo is no longer busy..
+ * TODO probably want to differentiate read vs write..
+ */
+int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
+		struct msm_fence_cb *cb)
 {
 	struct drm_device *dev = obj->dev;
 	struct msm_drm_private *priv = dev->dev_private;
@@ -372,12 +406,13 @@
 	int ret = 0;
 
 	mutex_lock(&dev->struct_mutex);
-	if (!list_empty(&work->entry)) {
+	if (!list_empty(&cb->work.entry)) {
 		ret = -EINVAL;
 	} else if (is_active(msm_obj)) {
-		list_add_tail(&work->entry, &msm_obj->inactive_work);
+		cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
+		list_add_tail(&cb->work.entry, &priv->fence_cbs);
 	} else {
-		queue_work(priv->wq, work);
+		queue_work(priv->wq, &cb->work);
 	}
 	mutex_unlock(&dev->struct_mutex);
 
@@ -410,16 +445,6 @@
 	msm_obj->write_fence = 0;
 	list_del_init(&msm_obj->mm_list);
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
-
-	while (!list_empty(&msm_obj->inactive_work)) {
-		struct work_struct *work;
-
-		work = list_first_entry(&msm_obj->inactive_work,
-				struct work_struct, entry);
-
-		list_del_init(&work->entry);
-		queue_work(priv->wq, work);
-	}
 }
 
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
@@ -510,10 +535,21 @@
 
 	drm_gem_free_mmap_offset(obj);
 
-	if (msm_obj->vaddr)
-		vunmap(msm_obj->vaddr);
+	if (obj->import_attach) {
+		if (msm_obj->vaddr)
+			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
 
-	put_pages(obj);
+		/* Don't drop the pages for imported dmabuf, as they are not
+		 * ours, just free the array we allocated:
+		 */
+		if (msm_obj->pages)
+			drm_free_large(msm_obj->pages);
+
+	} else {
+		if (msm_obj->vaddr)
+			vunmap(msm_obj->vaddr);
+		put_pages(obj);
+	}
 
 	if (msm_obj->resv == &msm_obj->_resv)
 		reservation_object_fini(msm_obj->resv);
@@ -549,17 +585,12 @@
 	return ret;
 }
 
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
-		uint32_t size, uint32_t flags)
+static int msm_gem_new_impl(struct drm_device *dev,
+		uint32_t size, uint32_t flags,
+		struct drm_gem_object **obj)
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_gem_object *msm_obj;
-	struct drm_gem_object *obj = NULL;
-	int ret;
-
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-	size = PAGE_ALIGN(size);
 
 	switch (flags & MSM_BO_CACHE_MASK) {
 	case MSM_BO_UNCACHED:
@@ -569,21 +600,12 @@
 	default:
 		dev_err(dev->dev, "invalid cache flag: %x\n",
 				(flags & MSM_BO_CACHE_MASK));
-		ret = -EINVAL;
-		goto fail;
+		return -EINVAL;
 	}
 
 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
-	if (!msm_obj) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	obj = &msm_obj->base;
-
-	ret = drm_gem_object_init(dev, obj, size);
-	if (ret)
-		goto fail;
+	if (!msm_obj)
+		return -ENOMEM;
 
 	msm_obj->flags = flags;
 
@@ -591,9 +613,69 @@
 	reservation_object_init(msm_obj->resv);
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
-	INIT_LIST_HEAD(&msm_obj->inactive_work);
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
+	*obj = &msm_obj->base;
+
+	return 0;
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+		uint32_t size, uint32_t flags)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	size = PAGE_ALIGN(size);
+
+	ret = msm_gem_new_impl(dev, size, flags, &obj);
+	if (ret)
+		goto fail;
+
+	ret = drm_gem_object_init(dev, obj, size);
+	if (ret)
+		goto fail;
+
+	return obj;
+
+fail:
+	if (obj)
+		drm_gem_object_unreference_unlocked(obj);
+
+	return ERR_PTR(ret);
+}
+
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+		uint32_t size, struct sg_table *sgt)
+{
+	struct msm_gem_object *msm_obj;
+	struct drm_gem_object *obj;
+	int ret, npages;
+
+	size = PAGE_ALIGN(size);
+
+	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+	if (ret)
+		goto fail;
+
+	drm_gem_private_object_init(dev, obj, size);
+
+	npages = size / PAGE_SIZE;
+
+	msm_obj = to_msm_bo(obj);
+	msm_obj->sgt = sgt;
+	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!msm_obj->pages) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+	if (ret)
+		goto fail;
+
 	return obj;
 
 fail:
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 0676f32..f4f23a5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -45,9 +45,6 @@
 	 */
 	struct list_head submit_entry;
 
-	/* work defered until bo is inactive: */
-	struct list_head inactive_work;
-
 	struct page **pages;
 	struct sg_table *sgt;
 	void *vaddr;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
new file mode 100644
index 0000000..d48f9fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+
+struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	BUG_ON(!msm_obj->sgt);  /* should have already pinned! */
+	return msm_obj->sgt;
+}
+
+void *msm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	return msm_gem_vaddr(obj);
+}
+
+void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	/* TODO msm_gem_vunmap() */
+}
+
+struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+		size_t size, struct sg_table *sg)
+{
+	return msm_gem_import(dev, size, sg);
+}
+
+int msm_gem_prime_pin(struct drm_gem_object *obj)
+{
+	if (!obj->import_attach)
+		msm_gem_get_pages(obj);
+	return 0;
+}
+
+void msm_gem_prime_unpin(struct drm_gem_object *obj)
+{
+	if (!obj->import_attach)
+		msm_gem_put_pages(obj);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3bab937..4583d61 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -268,6 +268,8 @@
 	struct drm_device *dev = gpu->dev;
 	uint32_t fence = gpu->funcs->last_fence(gpu);
 
+	msm_update_fence(gpu->dev, fence);
+
 	mutex_lock(&dev->struct_mutex);
 
 	while (!list_empty(&gpu->active_list)) {
@@ -287,8 +289,6 @@
 		}
 	}
 
-	msm_update_fence(gpu->dev, fence);
-
 	mutex_unlock(&dev->struct_mutex);
 }
 
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ff80f12..7cf787d 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,6 +3,7 @@
 	depends on DRM && PCI
         select FW_LOADER
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index d939a1d..edcf801 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -28,7 +28,9 @@
 nouveau-y += core/subdev/bar/nvc0.o
 nouveau-y += core/subdev/bios/base.o
 nouveau-y += core/subdev/bios/bit.o
+nouveau-y += core/subdev/bios/boost.o
 nouveau-y += core/subdev/bios/conn.o
+nouveau-y += core/subdev/bios/cstep.o
 nouveau-y += core/subdev/bios/dcb.o
 nouveau-y += core/subdev/bios/disp.o
 nouveau-y += core/subdev/bios/dp.o
@@ -39,17 +41,26 @@
 nouveau-y += core/subdev/bios/mxm.o
 nouveau-y += core/subdev/bios/perf.o
 nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/rammap.o
+nouveau-y += core/subdev/bios/timing.o
 nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/bios/vmap.o
+nouveau-y += core/subdev/bios/volt.o
 nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bus/hwsq.o
 nouveau-y += core/subdev/bus/nv04.o
 nouveau-y += core/subdev/bus/nv31.o
 nouveau-y += core/subdev/bus/nv50.o
+nouveau-y += core/subdev/bus/nv94.o
 nouveau-y += core/subdev/bus/nvc0.o
+nouveau-y += core/subdev/clock/base.o
 nouveau-y += core/subdev/clock/nv04.o
 nouveau-y += core/subdev/clock/nv40.o
 nouveau-y += core/subdev/clock/nv50.o
+nouveau-y += core/subdev/clock/nv84.o
 nouveau-y += core/subdev/clock/nva3.o
 nouveau-y += core/subdev/clock/nvc0.o
+nouveau-y += core/subdev/clock/nve0.o
 nouveau-y += core/subdev/clock/pllnv04.o
 nouveau-y += core/subdev/clock/pllnva3.o
 nouveau-y += core/subdev/devinit/base.o
@@ -78,7 +89,12 @@
 nouveau-y += core/subdev/fb/nv49.o
 nouveau-y += core/subdev/fb/nv4e.o
 nouveau-y += core/subdev/fb/nv50.o
+nouveau-y += core/subdev/fb/nv84.o
+nouveau-y += core/subdev/fb/nva3.o
+nouveau-y += core/subdev/fb/nvaa.o
+nouveau-y += core/subdev/fb/nvaf.o
 nouveau-y += core/subdev/fb/nvc0.o
+nouveau-y += core/subdev/fb/nve0.o
 nouveau-y += core/subdev/fb/ramnv04.o
 nouveau-y += core/subdev/fb/ramnv10.o
 nouveau-y += core/subdev/fb/ramnv1a.o
@@ -89,7 +105,12 @@
 nouveau-y += core/subdev/fb/ramnv49.o
 nouveau-y += core/subdev/fb/ramnv4e.o
 nouveau-y += core/subdev/fb/ramnv50.o
+nouveau-y += core/subdev/fb/ramnva3.o
+nouveau-y += core/subdev/fb/ramnvaa.o
 nouveau-y += core/subdev/fb/ramnvc0.o
+nouveau-y += core/subdev/fb/ramnve0.o
+nouveau-y += core/subdev/fb/sddr3.o
+nouveau-y += core/subdev/fb/gddr5.o
 nouveau-y += core/subdev/gpio/base.o
 nouveau-y += core/subdev/gpio/nv10.o
 nouveau-y += core/subdev/gpio/nv50.o
@@ -113,13 +134,22 @@
 nouveau-y += core/subdev/ltcg/nvc0.o
 nouveau-y += core/subdev/mc/base.o
 nouveau-y += core/subdev/mc/nv04.o
+nouveau-y += core/subdev/mc/nv40.o
 nouveau-y += core/subdev/mc/nv44.o
 nouveau-y += core/subdev/mc/nv50.o
+nouveau-y += core/subdev/mc/nv94.o
 nouveau-y += core/subdev/mc/nv98.o
 nouveau-y += core/subdev/mc/nvc0.o
+nouveau-y += core/subdev/mc/nvc3.o
 nouveau-y += core/subdev/mxm/base.o
 nouveau-y += core/subdev/mxm/mxms.o
 nouveau-y += core/subdev/mxm/nv50.o
+nouveau-y += core/subdev/pwr/base.o
+nouveau-y += core/subdev/pwr/memx.o
+nouveau-y += core/subdev/pwr/nva3.o
+nouveau-y += core/subdev/pwr/nvc0.o
+nouveau-y += core/subdev/pwr/nvd0.o
+nouveau-y += core/subdev/pwr/nv108.o
 nouveau-y += core/subdev/therm/base.o
 nouveau-y += core/subdev/therm/fan.o
 nouveau-y += core/subdev/therm/fannil.o
@@ -140,6 +170,9 @@
 nouveau-y += core/subdev/vm/nv44.o
 nouveau-y += core/subdev/vm/nv50.o
 nouveau-y += core/subdev/vm/nvc0.o
+nouveau-y += core/subdev/volt/base.o
+nouveau-y += core/subdev/volt/gpio.o
+nouveau-y += core/subdev/volt/nv40.o
 
 nouveau-y += core/engine/falcon.o
 nouveau-y += core/engine/xtensa.o
@@ -158,6 +191,7 @@
 nouveau-y += core/engine/crypt/nv84.o
 nouveau-y += core/engine/crypt/nv98.o
 nouveau-y += core/engine/device/base.o
+nouveau-y += core/engine/device/ctrl.o
 nouveau-y += core/engine/device/nv04.o
 nouveau-y += core/engine/device/nv10.o
 nouveau-y += core/engine/device/nv20.o
@@ -227,8 +261,18 @@
 nouveau-y += core/engine/graph/nvf0.o
 nouveau-y += core/engine/mpeg/nv31.o
 nouveau-y += core/engine/mpeg/nv40.o
+nouveau-y += core/engine/mpeg/nv44.o
 nouveau-y += core/engine/mpeg/nv50.o
 nouveau-y += core/engine/mpeg/nv84.o
+nouveau-y += core/engine/perfmon/base.o
+nouveau-y += core/engine/perfmon/daemon.o
+nouveau-y += core/engine/perfmon/nv40.o
+nouveau-y += core/engine/perfmon/nv50.o
+nouveau-y += core/engine/perfmon/nv84.o
+nouveau-y += core/engine/perfmon/nva3.o
+nouveau-y += core/engine/perfmon/nvc0.o
+nouveau-y += core/engine/perfmon/nve0.o
+nouveau-y += core/engine/perfmon/nvf0.o
 nouveau-y += core/engine/ppp/nv98.o
 nouveau-y += core/engine/ppp/nvc0.o
 nouveau-y += core/engine/software/nv04.o
@@ -260,9 +304,7 @@
 nouveau-y += nv50_display.o
 
 # drm/pm
-nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
-nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
-nouveau-y += nouveau_mem.o
+nouveau-y += nouveau_hwmon.o nouveau_sysfs.o
 
 # other random bits
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 7eb81c1..3f3c765 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -23,62 +23,114 @@
 #include <core/os.h>
 #include <core/event.h>
 
-static void
-nouveau_event_put_locked(struct nouveau_event *event, int index,
-			 struct nouveau_eventh *handler)
-{
-	if (!--event->index[index].refs) {
-		if (event->disable)
-			event->disable(event, index);
-	}
-	list_del(&handler->head);
-}
-
 void
-nouveau_event_put(struct nouveau_event *event, int index,
-		  struct nouveau_eventh *handler)
+nouveau_event_put(struct nouveau_eventh *handler)
 {
+	struct nouveau_event *event = handler->event;
 	unsigned long flags;
-
-	spin_lock_irqsave(&event->lock, flags);
-	if (index < event->index_nr)
-		nouveau_event_put_locked(event, index, handler);
-	spin_unlock_irqrestore(&event->lock, flags);
-}
-
-void
-nouveau_event_get(struct nouveau_event *event, int index,
-		  struct nouveau_eventh *handler)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&event->lock, flags);
-	if (index < event->index_nr) {
-		list_add(&handler->head, &event->index[index].list);
-		if (!event->index[index].refs++) {
-			if (event->enable)
-				event->enable(event, index);
+	if (__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
+		spin_lock_irqsave(&event->refs_lock, flags);
+		if (!--event->index[handler->index].refs) {
+			if (event->disable)
+				event->disable(event, handler->index);
 		}
+		spin_unlock_irqrestore(&event->refs_lock, flags);
 	}
-	spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_get(struct nouveau_eventh *handler)
+{
+	struct nouveau_event *event = handler->event;
+	unsigned long flags;
+	if (!__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
+		spin_lock_irqsave(&event->refs_lock, flags);
+		if (!event->index[handler->index].refs++) {
+			if (event->enable)
+				event->enable(event, handler->index);
+		}
+		spin_unlock_irqrestore(&event->refs_lock, flags);
+	}
+}
+
+static void
+nouveau_event_fini(struct nouveau_eventh *handler)
+{
+	struct nouveau_event *event = handler->event;
+	unsigned long flags;
+	nouveau_event_put(handler);
+	spin_lock_irqsave(&event->list_lock, flags);
+	list_del(&handler->head);
+	spin_unlock_irqrestore(&event->list_lock, flags);
+}
+
+static int
+nouveau_event_init(struct nouveau_event *event, int index,
+		   int (*func)(void *, int), void *priv,
+		   struct nouveau_eventh *handler)
+{
+	unsigned long flags;
+
+	if (index >= event->index_nr)
+		return -EINVAL;
+
+	handler->event = event;
+	handler->flags = 0;
+	handler->index = index;
+	handler->func = func;
+	handler->priv = priv;
+
+	spin_lock_irqsave(&event->list_lock, flags);
+	list_add_tail(&handler->head, &event->index[index].list);
+	spin_unlock_irqrestore(&event->list_lock, flags);
+	return 0;
+}
+
+int
+nouveau_event_new(struct nouveau_event *event, int index,
+		  int (*func)(void *, int), void *priv,
+		  struct nouveau_eventh **phandler)
+{
+	struct nouveau_eventh *handler;
+	int ret = -ENOMEM;
+
+	handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
+	if (handler) {
+		ret = nouveau_event_init(event, index, func, priv, handler);
+		if (ret)
+			kfree(handler);
+	}
+
+	return ret;
+}
+
+void
+nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
+{
+	BUG_ON(handler != NULL);
+	if (*ref) {
+		nouveau_event_fini(*ref);
+		kfree(*ref);
+	}
+	*ref = handler;
 }
 
 void
 nouveau_event_trigger(struct nouveau_event *event, int index)
 {
-	struct nouveau_eventh *handler, *temp;
+	struct nouveau_eventh *handler;
 	unsigned long flags;
 
-	if (index >= event->index_nr)
+	if (WARN_ON(index >= event->index_nr))
 		return;
 
-	spin_lock_irqsave(&event->lock, flags);
-	list_for_each_entry_safe(handler, temp, &event->index[index].list, head) {
-		if (handler->func(handler, index) == NVKM_EVENT_DROP) {
-			nouveau_event_put_locked(event, index, handler);
-		}
+	spin_lock_irqsave(&event->list_lock, flags);
+	list_for_each_entry(handler, &event->index[index].list, head) {
+		if (test_bit(NVKM_EVENT_ENABLE, &handler->flags) &&
+		    handler->func(handler->priv, index) == NVKM_EVENT_DROP)
+			nouveau_event_put(handler);
 	}
-	spin_unlock_irqrestore(&event->lock, flags);
+	spin_unlock_irqrestore(&event->list_lock, flags);
 }
 
 void
@@ -102,7 +154,8 @@
 	if (!event)
 		return -ENOMEM;
 
-	spin_lock_init(&event->lock);
+	spin_lock_init(&event->list_lock);
+	spin_lock_init(&event->refs_lock);
 	for (i = 0; i < index_nr; i++)
 		INIT_LIST_HEAD(&event->index[i].list);
 	event->index_nr = index_nr;
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
index 62a432e..9f6fcc5 100644
--- a/drivers/gpu/drm/nouveau/core/core/option.c
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -25,15 +25,6 @@
 #include <core/option.h>
 #include <core/debug.h>
 
-/* compares unterminated string 'str' with zero-terminated string 'cmp' */
-static inline int
-strncasecmpz(const char *str, const char *cmp, size_t len)
-{
-	if (strlen(cmp) != len)
-		return len;
-	return strncasecmp(str, cmp, len);
-}
-
 const char *
 nouveau_stropt(const char *optstr, const char *opt, int *arglen)
 {
@@ -105,7 +96,7 @@
 				else if (!strncasecmpz(optstr, "warn", len))
 					level = NV_DBG_WARN;
 				else if (!strncasecmpz(optstr, "info", len))
-					level = NV_DBG_INFO;
+					level = NV_DBG_INFO_NORMAL;
 				else if (!strncasecmpz(optstr, "debug", len))
 					level = NV_DBG_DEBUG;
 				else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 52fb2aa..03e0060 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,16 +27,38 @@
 #include <core/subdev.h>
 #include <core/printk.h>
 
-int nv_printk_suspend_level = NV_DBG_DEBUG;
+int nv_info_debug_level = NV_DBG_INFO_NORMAL;
 
 void
-nv_printk_(struct nouveau_object *object, const char *pfx, int level,
-	   const char *fmt, ...)
+nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...)
 {
 	static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
+	const char *pfx;
 	char mfmt[256];
 	va_list args;
 
+	switch (level) {
+	case NV_DBG_FATAL:
+		pfx = KERN_CRIT;
+		break;
+	case NV_DBG_ERROR:
+		pfx = KERN_ERR;
+		break;
+	case NV_DBG_WARN:
+		pfx = KERN_WARNING;
+		break;
+	case NV_DBG_INFO_NORMAL:
+		pfx = KERN_INFO;
+		break;
+	case NV_DBG_DEBUG:
+	case NV_DBG_PARANOIA:
+	case NV_DBG_TRACE:
+	case NV_DBG_SPAM:
+	default:
+		pfx = KERN_DEBUG;
+		break;
+	}
+
 	if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
 		struct nouveau_object *device = object;
 		struct nouveau_object *subdev = object;
@@ -74,20 +96,3 @@
 	vprintk(mfmt, args);
 	va_end(args);
 }
-
-#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
-
-const char *nv_printk_level_to_pfx(int level)
-{
-	switch (level) {
-	CONV_LEVEL(FATAL);
-	CONV_LEVEL(ERROR);
-	CONV_LEVEL(WARN);
-	CONV_LEVEL(INFO);
-	CONV_LEVEL(DEBUG);
-	CONV_LEVEL(PARANOIA);
-	CONV_LEVEL(TRACE);
-	CONV_LEVEL(SPAM);
-	}
-	return NV_PRINTK_DEBUG;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 4c72571..9135b25a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,7 +29,7 @@
 
 #include <core/class.h>
 
-#include <engine/device.h>
+#include "priv.h"
 
 static DEFINE_MUTEX(nv_devices_mutex);
 static LIST_HEAD(nv_devices);
@@ -75,7 +75,9 @@
 	[NVDEV_SUBDEV_BAR]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_VOLT]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_THERM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_PWR]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_ENGINE_DMAOBJ]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_ENGINE_PERFMON]  = NV_DEVICE_DISABLE_CORE,
 	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_DISABLE_FIFO,
 	[NVDEV_ENGINE_SW]	= NV_DEVICE_DISABLE_FIFO,
 	[NVDEV_ENGINE_GR]	= NV_DEVICE_DISABLE_GRAPH,
@@ -87,7 +89,7 @@
 	[NVDEV_ENGINE_PPP]	= NV_DEVICE_DISABLE_PPP,
 	[NVDEV_ENGINE_COPY0]	= NV_DEVICE_DISABLE_COPY0,
 	[NVDEV_ENGINE_COPY1]	= NV_DEVICE_DISABLE_COPY1,
-	[NVDEV_ENGINE_UNK1C1]	= NV_DEVICE_DISABLE_UNK1C1,
+	[NVDEV_ENGINE_VIC]	= NV_DEVICE_DISABLE_VIC,
 	[NVDEV_ENGINE_VENC]	= NV_DEVICE_DISABLE_VENC,
 	[NVDEV_ENGINE_DISP]	= NV_DEVICE_DISABLE_DISP,
 	[NVDEV_SUBDEV_NR]	= 0,
@@ -119,10 +121,12 @@
 			return -ENODEV;
 	}
 
-	ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
+	ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
+				    nouveau_control_oclass,
 				    (1ULL << NVDEV_ENGINE_DMAOBJ) |
 				    (1ULL << NVDEV_ENGINE_FIFO) |
-				    (1ULL << NVDEV_ENGINE_DISP), &devobj);
+				    (1ULL << NVDEV_ENGINE_DISP) |
+				    (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
 	*pobject = nv_object(devobj);
 	if (ret)
 		return ret;
@@ -158,22 +162,29 @@
 		iounmap(map);
 
 		/* determine chipset and derive architecture from it */
-		if ((boot0 & 0x0f000000) > 0) {
-			device->chipset = (boot0 & 0xff00000) >> 20;
-			switch (device->chipset & 0xf0) {
-			case 0x10: device->card_type = NV_10; break;
-			case 0x20: device->card_type = NV_20; break;
-			case 0x30: device->card_type = NV_30; break;
-			case 0x40:
-			case 0x60: device->card_type = NV_40; break;
-			case 0x50:
-			case 0x80:
-			case 0x90:
-			case 0xa0: device->card_type = NV_50; break;
-			case 0xc0: device->card_type = NV_C0; break;
-			case 0xd0: device->card_type = NV_D0; break;
-			case 0xe0:
-			case 0xf0: device->card_type = NV_E0; break;
+		if ((boot0 & 0x1f000000) > 0) {
+			device->chipset = (boot0 & 0x1ff00000) >> 20;
+			switch (device->chipset & 0x1f0) {
+			case 0x010: {
+				if (0x461 & (1 << (device->chipset & 0xf)))
+					device->card_type = NV_10;
+				else
+					device->card_type = NV_11;
+				break;
+			}
+			case 0x020: device->card_type = NV_20; break;
+			case 0x030: device->card_type = NV_30; break;
+			case 0x040:
+			case 0x060: device->card_type = NV_40; break;
+			case 0x050:
+			case 0x080:
+			case 0x090:
+			case 0x0a0: device->card_type = NV_50; break;
+			case 0x0c0: device->card_type = NV_C0; break;
+			case 0x0d0: device->card_type = NV_D0; break;
+			case 0x0e0:
+			case 0x0f0:
+			case 0x100: device->card_type = NV_E0; break;
 			default:
 				break;
 			}
@@ -188,7 +199,8 @@
 
 		switch (device->card_type) {
 		case NV_04: ret = nv04_identify(device); break;
-		case NV_10: ret = nv10_identify(device); break;
+		case NV_10:
+		case NV_11: ret = nv10_identify(device); break;
 		case NV_20: ret = nv20_identify(device); break;
 		case NV_30: ret = nv30_identify(device); break;
 		case NV_40: ret = nv40_identify(device); break;
@@ -212,7 +224,7 @@
 		nv_info(device, "Family : NV%02X\n", device->card_type);
 
 		/* determine frequency of timing crystal */
-		if ( device->chipset < 0x17 ||
+		if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
 		    (device->chipset >= 0x20 && device->chipset < 0x25))
 			strap &= 0x00000040;
 		else
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
new file mode 100644
index 0000000..4b69bf5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include <subdev/clock.h>
+
+#include "priv.h"
+
+static int
+nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd,
+				void *data, u32 size)
+{
+	struct nouveau_clock *clk = nouveau_clock(object);
+	struct nv_control_pstate_info *args = data;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	if (clk) {
+		args->count  = clk->state_nr;
+		args->ustate = clk->ustate;
+		args->pstate = clk->pstate;
+	} else {
+		args->count  = 0;
+		args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE;
+		args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int
+nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
+				void *data, u32 size)
+{
+	struct nouveau_clock *clk = nouveau_clock(object);
+	struct nv_control_pstate_attr *args = data;
+	struct nouveau_clocks *domain;
+	struct nouveau_pstate *pstate;
+	struct nouveau_cstate *cstate;
+	int i = 0, j = -1;
+	u32 lo, hi;
+
+	if ((size < sizeof(*args)) || !clk ||
+	    (args->state >= 0 && args->state >= clk->state_nr))
+		return -EINVAL;
+	domain = clk->domains;
+
+	while (domain->name != nv_clk_src_max) {
+		if (domain->mname && ++j == args->index)
+			break;
+		domain++;
+	}
+
+	if (domain->name == nv_clk_src_max)
+		return -EINVAL;
+
+	if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) {
+		list_for_each_entry(pstate, &clk->states, head) {
+			if (i++ == args->state)
+				break;
+		}
+
+		lo = pstate->base.domain[domain->name];
+		hi = lo;
+		list_for_each_entry(cstate, &pstate->list, head) {
+			lo = min(lo, cstate->domain[domain->name]);
+			hi = max(hi, cstate->domain[domain->name]);
+		}
+
+		args->state = pstate->pstate;
+	} else {
+		lo = max(clk->read(clk, domain->name), 0);
+		hi = lo;
+	}
+
+	snprintf(args->name, sizeof(args->name), "%s", domain->mname);
+	snprintf(args->unit, sizeof(args->unit), "MHz");
+	args->min = lo / domain->mdiv;
+	args->max = hi / domain->mdiv;
+
+	args->index = 0;
+	while ((++domain)->name != nv_clk_src_max) {
+		if (domain->mname) {
+			args->index = ++j;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd,
+				void *data, u32 size)
+{
+	struct nouveau_clock *clk = nouveau_clock(object);
+	struct nv_control_pstate_user *args = data;
+
+	if (size < sizeof(*args) || !clk)
+		return -EINVAL;
+
+	return nouveau_clock_ustate(clk, args->state);
+}
+
+struct nouveau_oclass
+nouveau_control_oclass[] = {
+	{ .handle = NV_CONTROL_CLASS,
+	  .ofuncs = &nouveau_object_ofuncs,
+	  .omthds = (struct nouveau_omthds[]) {
+		  { NV_CONTROL_PSTATE_INFO,
+		    NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
+		  { NV_CONTROL_PSTATE_ATTR,
+		    NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
+		  { NV_CONTROL_PSTATE_USER,
+		    NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
+		  {},
+	  },
+	},
+	{}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index a0284cf..dbd2dde 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -50,15 +50,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv04_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv04_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -68,15 +68,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv04_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv04_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 1b7809a..6e03dd6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -52,10 +52,10 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -69,15 +69,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -88,15 +88,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -107,15 +107,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -126,15 +126,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -145,15 +145,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -164,15 +164,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -183,15 +183,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 12a4005..dcde53b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -53,15 +53,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv20_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv20_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -72,15 +72,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -91,15 +91,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -110,15 +110,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv2a_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index cef0f1e..7b8662e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -53,15 +53,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -72,15 +72,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv35_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv35_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -91,15 +91,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
@@ -111,15 +111,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv36_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv36_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
@@ -131,15 +131,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv34_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1719cb0..c8c41e9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -35,6 +35,7 @@
 #include <subdev/fb.h>
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
+#include <subdev/volt.h>
 
 #include <engine/device.h>
 #include <engine/dmaobj.h>
@@ -43,6 +44,7 @@
 #include <engine/graph.h>
 #include <engine/mpeg.h>
 #include <engine/disp.h>
+#include <engine/perfmon.h>
 
 int
 nv40_identify(struct nouveau_device *device)
@@ -56,18 +58,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x41:
 		device->cname = "NV41";
@@ -77,18 +81,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x42:
 		device->cname = "NV42";
@@ -98,18 +104,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x43:
 		device->cname = "NV43";
@@ -119,18 +127,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x45:
 		device->cname = "NV45";
@@ -140,18 +150,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x47:
 		device->cname = "G70";
@@ -161,18 +173,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv47_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv47_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x49:
 		device->cname = "G71";
@@ -182,18 +196,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x4b:
 		device->cname = "G73";
@@ -203,18 +219,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x44:
 		device->cname = "NV44";
@@ -224,18 +242,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x46:
 		device->cname = "G72";
@@ -245,18 +265,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x4a:
 		device->cname = "NV44A";
@@ -266,18 +288,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x4c:
 		device->cname = "C61";
@@ -287,18 +311,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x4e:
 		device->cname = "C51";
@@ -308,18 +334,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv4e_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv4e_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x63:
 		device->cname = "C73";
@@ -329,18 +357,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x67:
 		device->cname = "C67";
@@ -350,18 +380,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x68:
 		device->cname = "C68";
@@ -371,18 +403,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	default:
 		nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index ffc18b8..db13982 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -36,6 +36,8 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 #include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
 
 #include <engine/device.h>
 #include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
 #include <engine/ppp.h>
 #include <engine/copy.h>
 #include <engine/disp.h>
+#include <engine/perfmon.h>
 
 int
 nv50_identify(struct nouveau_device *device)
@@ -59,257 +62,277 @@
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv50_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv50_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv50_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv50_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv50_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv50_perfmon_oclass;
 		break;
 	case 0x84:
 		device->cname = "G84";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x86:
 		device->cname = "G86";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x92:
 		device->cname = "G92";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x94:
 		device->cname = "G94";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv94_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x96:
 		device->cname = "G96";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv94_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x98:
 		device->cname = "G98";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0xa0:
 		device->cname = "G200";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0xaa:
 		device->cname = "MCP77/MCP78";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaa_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0xac:
 		device->cname = "MCP79/MCP7A";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaa_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0xa3:
 		device->cname = "GT215";
@@ -320,16 +343,18 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
@@ -337,6 +362,7 @@
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nva3_perfmon_oclass;
 		break;
 	case 0xa5:
 		device->cname = "GT216";
@@ -347,22 +373,25 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nva3_perfmon_oclass;
 		break;
 	case 0xa8:
 		device->cname = "GT218";
@@ -373,22 +402,25 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nva3_perfmon_oclass;
 		break;
 	case 0xaf:
 		device->cname = "MCP89";
@@ -399,22 +431,25 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaf_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nva3_perfmon_oclass;
 		break;
 	default:
 		nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 418f51f..8d06eef 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -38,6 +38,8 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 #include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
 
 #include <engine/device.h>
 #include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
 #include <engine/ppp.h>
 #include <engine/copy.h>
 #include <engine/disp.h>
+#include <engine/perfmon.h>
 
 int
 nvc0_identify(struct nouveau_device *device)
@@ -63,18 +66,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc0_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -82,6 +87,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xc4:
 		device->cname = "GF104";
@@ -92,18 +98,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc3_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -111,6 +119,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xc3:
 		device->cname = "GF106";
@@ -121,24 +130,27 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc3_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xce:
 		device->cname = "GF114";
@@ -149,18 +161,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc3_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -168,6 +182,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xcf:
 		device->cname = "GF116";
@@ -178,18 +193,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc3_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -197,6 +214,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xc1:
 		device->cname = "GF108";
@@ -207,24 +225,27 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc1_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xc8:
 		device->cname = "GF110";
@@ -235,18 +256,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc8_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -254,6 +277,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xd9:
 		device->cname = "GF119";
@@ -264,24 +288,27 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvd9_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xd7:
 		device->cname = "GF117";
@@ -292,24 +319,25 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvd7_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	default:
 		nv_fatal(device, "unknown Fermi chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 7aca187..3900104 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -38,6 +38,8 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 #include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
 
 #include <engine/device.h>
 #include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
 #include <engine/bsp.h>
 #include <engine/vp.h>
 #include <engine/ppp.h>
+#include <engine/perfmon.h>
 
 int
 nve0_identify(struct nouveau_device *device)
@@ -59,22 +62,24 @@
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nve4_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
@@ -83,28 +88,31 @@
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
 		break;
 	case 0xe7:
 		device->cname = "GK107";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nve4_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
@@ -113,28 +121,31 @@
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
 		break;
 	case 0xe6:
 		device->cname = "GK106";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nve4_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
@@ -143,28 +154,31 @@
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
 		break;
 	case 0xf0:
 		device->cname = "GK110";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvf0_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nvf0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
@@ -175,6 +189,43 @@
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 #endif
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
+		break;
+	case 0x108:
+		device->cname = "GK208";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nv108_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+#if 0
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] =  nvf0_graph_oclass;
+#endif
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nvf0_disp_oclass;
+#if 0
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+#endif
 		break;
 	default:
 		nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/priv.h b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
new file mode 100644
index 0000000..035fd5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
@@ -0,0 +1,8 @@
+#ifndef __NVKM_DEVICE_PRIV_H__
+#define __NVKM_DEVICE_PRIV_H__
+
+#include <engine/device.h>
+
+extern struct nouveau_oclass nouveau_control_oclass[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 054d9cf..1bd4c63 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -70,17 +70,10 @@
 	};
 	u32 lnkcmp;
 	u8 sink[2];
+	int ret;
 
 	DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
 
-	/* set desired link configuration on the sink */
-	sink[0] = dp->link_bw / 27000;
-	sink[1] = dp->link_nr;
-	if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
-		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
-
-	nv_wraux(dp->aux, DPCD_LC00, sink, 2);
-
 	/* set desired link configuration on the source */
 	if ((lnkcmp = dp->info.lnkcmp)) {
 		if (dp->version < 0x30) {
@@ -96,10 +89,22 @@
 		nvbios_exec(&init);
 	}
 
-	return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
-				 dp->link_nr, dp->link_bw / 27000,
-				 dp->dpcd[DPCD_RC02] &
-					  DPCD_RC02_ENHANCED_FRAME_CAP);
+	ret = dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
+				dp->link_nr, dp->link_bw / 27000,
+				dp->dpcd[DPCD_RC02] &
+					 DPCD_RC02_ENHANCED_FRAME_CAP);
+	if (ret) {
+		ERR("lnk_ctl failed with %d\n", ret);
+		return ret;
+	}
+
+	/* set desired link configuration on the sink */
+	sink[0] = dp->link_bw / 27000;
+	sink[1] = dp->link_nr;
+	if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+
+	return nv_wraux(dp->aux, DPCD_LC00, sink, 2);
 }
 
 static void
@@ -294,8 +299,17 @@
 
 	ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
 	if (ret) {
+		/* it's possible the display has been unplugged before we
+		 * get here.  we still need to execute the full set of
+		 * vbios scripts, and program the OR at a high enough
+		 * frequency to satisfy the target mode.  failure to do
+		 * so results at best in an UPDATE hanging, and at worst
+		 * with PDISP running away to join the circus.
+		 */
+		dp->dpcd[1] = link_bw[0] / 27000;
+		dp->dpcd[2] = 4;
+		dp->dpcd[3] = 0x00;
 		ERR("failed to read DPCD\n");
-		return ret;
 	}
 
 	/* adjust required bandwidth for 8B/10B coding overhead */
@@ -308,7 +322,7 @@
 	while (*link_bw > (dp->dpcd[1] * 27000))
 		link_bw++;
 
-	while (link_bw[0]) {
+	while ((ret = -EIO) && link_bw[0]) {
 		/* find minimum required lane count at this link rate */
 		dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
 		while ((dp->link_nr >> 1) * link_bw[0] > datarate)
@@ -328,8 +342,10 @@
 			    !dp_link_train_eq(dp))
 				break;
 		} else
-		if (ret >= 1) {
-			/* dp_set_link_config() handled training */
+		if (ret) {
+			/* dp_set_link_config() handled training, or
+			 * we failed to communicate with the sink.
+			 */
 			break;
 		}
 
@@ -339,8 +355,10 @@
 
 	/* finish link training */
 	dp_set_training_pattern(dp, 0);
+	if (ret < 0)
+		ERR("link training failed\n");
 
 	/* execute post-train script from vbios */
 	dp_link_train_fini(dp);
-	return true;
+	return (ret < 0) ? false : true;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 05e903f..a0bc8a8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -59,6 +59,7 @@
 	struct nv04_disp_priv *priv = (void *)subdev;
 	u32 crtc0 = nv_rd32(priv, 0x600100);
 	u32 crtc1 = nv_rd32(priv, 0x602100);
+	u32 pvideo;
 
 	if (crtc0 & 0x00000001) {
 		nouveau_event_trigger(priv->base.vblank, 0);
@@ -69,6 +70,14 @@
 		nouveau_event_trigger(priv->base.vblank, 1);
 		nv_wr32(priv, 0x602100, 0x00000001);
 	}
+
+	if (nv_device(priv)->chipset >= 0x10 &&
+	    nv_device(priv)->chipset <= 0x40) {
+		pvideo = nv_rd32(priv, 0x8100);
+		if (pvideo & ~0x11)
+			nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
+		nv_wr32(priv, 0x8100, pvideo);
+	}
 }
 
 static int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 52dd7a1..378a015 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -541,6 +541,15 @@
 	nv_wr32(priv, 0x6100a0, 0x00000000);
 	nv_wr32(priv, 0x6100b0, 0x00000307);
 
+	/* disable underflow reporting, preventing an intermittent issue
+	 * on some nve4 boards where the production vbios left this
+	 * setting enabled by default.
+	 *
+	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+	 */
+	for (i = 0; i < priv->head.nr; i++)
+		nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index 7ec4ee83..eea3ef5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -97,8 +97,9 @@
 {
 	struct nouveau_bios *bios = nouveau_bios(disp);
 	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 shift = nv94_sor_dp_lane_map(priv, lane);
 	const u32 loff = nv94_sor_loff(outp);
-	u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+	u32 addr, data[3];
 	u8  ver, hdr, cnt, len;
 	struct nvbios_dpout info;
 	struct nvbios_dpcfg ocfg;
@@ -113,9 +114,12 @@
 	if (!addr)
 		return -EINVAL;
 
-	nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
-	nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
-	nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
+	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
+	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
+	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 9e1d435..d2df572 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -93,8 +93,9 @@
 {
 	struct nouveau_bios *bios = nouveau_bios(disp);
 	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 shift = nvd0_sor_dp_lane_map(priv, lane);
 	const u32 loff = nvd0_sor_loff(outp);
-	u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+	u32 addr, data[3];
 	u8  ver, hdr, cnt, len;
 	struct nvbios_dpout info;
 	struct nvbios_dpcfg ocfg;
@@ -109,9 +110,12 @@
 	if (!addr)
 		return -EINVAL;
 
-	nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
-	nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
-	nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
+	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
+	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
+	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
 	nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index f877bd5..54f26cc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -632,8 +632,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv04_fifo_oclass = {
+struct nouveau_oclass *
+nv04_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x04),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 2c927c1..571a22a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -159,8 +159,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv10_fifo_oclass = {
+struct nouveau_oclass *
+nv10_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x10),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv10_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index a9cb51d..f257602 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -196,8 +196,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv17_fifo_oclass = {
+struct nouveau_oclass *
+nv17_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x17),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv17_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 5c7433d..343487e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -337,8 +337,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv40_fifo_oclass = {
+struct nouveau_oclass *
+nv40_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x40),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv40_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 7e5dff5..5f555788 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -502,8 +502,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_fifo_oclass = {
+struct nouveau_oclass *
+nv50_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x50),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 91a87cd..0908dc8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -144,7 +144,7 @@
 	case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
 	case NVDEV_ENGINE_VP    : context |= 0x00400000; break;
 	case NVDEV_ENGINE_CRYPT :
-	case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
+	case NVDEV_ENGINE_VIC   : context |= 0x00500000; break;
 	case NVDEV_ENGINE_BSP   : context |= 0x00600000; break;
 	default:
 		return -EINVAL;
@@ -180,7 +180,7 @@
 					  (1ULL << NVDEV_ENGINE_BSP) |
 					  (1ULL << NVDEV_ENGINE_PPP) |
 					  (1ULL << NVDEV_ENGINE_COPY0) |
-					  (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
+					  (1ULL << NVDEV_ENGINE_VIC), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
@@ -243,7 +243,7 @@
 					  (1ULL << NVDEV_ENGINE_BSP) |
 					  (1ULL << NVDEV_ENGINE_PPP) |
 					  (1ULL << NVDEV_ENGINE_COPY0) |
-					  (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
+					  (1ULL << NVDEV_ENGINE_VIC), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
@@ -435,8 +435,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv84_fifo_oclass = {
+struct nouveau_oclass *
+nv84_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x84),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv84_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index ce92f28..9ac94d4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -494,13 +494,6 @@
 	u32 mthd = (addr & 0x00003ffc);
 	u32 show = stat;
 
-	if (stat & 0x00200000) {
-		if (mthd == 0x0054) {
-			if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
-				show &= ~0x00200000;
-		}
-	}
-
 	if (stat & 0x00800000) {
 		if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
 			show &= ~0x00800000;
@@ -720,8 +713,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nvc0_fifo_oclass = {
+struct nouveau_oclass *
+nvc0_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0xc0),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nvc0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 8e8121a..04f4129 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -481,13 +481,6 @@
 	u32 mthd = (addr & 0x00003ffc);
 	u32 show = stat;
 
-	if (stat & 0x00200000) {
-		if (mthd == 0x0054) {
-			if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
-				show &= ~0x00200000;
-		}
-	}
-
 	if (stat & 0x00800000) {
 		if (!nve0_fifo_swmthd(priv, chid, mthd, data))
 			show &= ~0x00800000;
@@ -675,8 +668,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nve0_fifo_oclass = {
+struct nouveau_oclass *
+nve0_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0xe0),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nve0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 64dca26..fe67415 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -1039,7 +1039,7 @@
 			} while (!tpcnr[gpc]);
 			tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
 
-			tpc_set |= 1 << ((gpc * 8) + tpc);
+			tpc_set |= 1ULL << ((gpc * 8) + tpc);
 		}
 
 		nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index e5be3ee..71b4283 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -587,6 +587,7 @@
 	{ 0x405870,   4, 0x04, 0x00000001 },
 	{ 0x405a00,   2, 0x04, 0x00000000 },
 	{ 0x405a18,   1, 0x04, 0x00000000 },
+	{}
 };
 
 static struct nvc0_graph_init
@@ -598,6 +599,7 @@
 	{ 0x408904,   1, 0x04, 0x62000001 },
 	{ 0x408908,   1, 0x04, 0x00c80929 },
 	{ 0x408980,   1, 0x04, 0x0000011d },
+	{}
 };
 
 static struct nvc0_graph_init
@@ -671,6 +673,7 @@
 	{ 0x419000,   1, 0x04, 0x00000780 },
 	{ 0x419004,   2, 0x04, 0x00000000 },
 	{ 0x419014,   1, 0x04, 0x00000004 },
+	{}
 };
 
 static struct nvc0_graph_init
@@ -717,6 +720,7 @@
 	{ 0x419e98,   1, 0x04, 0x00000000 },
 	{ 0x419ee0,   1, 0x04, 0x00011110 },
 	{ 0x419f30,  11, 0x04, 0x00000000 },
+	{}
 };
 
 void
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index 438e784..c4740d5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -258,6 +258,7 @@
 	nvc0_grctx_init_unk78xx,
 	nvc0_grctx_init_unk80xx,
 	nvd9_grctx_init_rop,
+	NULL
 };
 
 struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index 818a475..a1102cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -466,6 +466,7 @@
 	nvc0_grctx_init_unk78xx,
 	nvc0_grctx_init_unk80xx,
 	nvd9_grctx_init_rop,
+	NULL
 };
 
 struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 23c143a..4532f7e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -945,7 +945,8 @@
 	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
 		nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
 
-	if (nv_device(priv)->chipset >= 0x17) {
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
 			nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
 	}
@@ -970,7 +971,8 @@
 	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
 		chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
 
-	if (nv_device(priv)->chipset >= 0x17) {
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
 			chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
 	}
@@ -1052,7 +1054,8 @@
 	NV_WRITE_CTX(0x00400e14, 0x00001000);
 	NV_WRITE_CTX(0x00400e30, 0x00080008);
 	NV_WRITE_CTX(0x00400e34, 0x00080008);
-	if (nv_device(priv)->chipset >= 0x17) {
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17) {
 		/* is it really needed ??? */
 		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
 					nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
@@ -1231,7 +1234,7 @@
 		nv_engine(priv)->sclass = nv10_graph_sclass;
 	else
 	if (nv_device(priv)->chipset <  0x17 ||
-	    nv_device(priv)->chipset == 0x1a)
+	    nv_device(priv)->card_type < NV_11)
 		nv_engine(priv)->sclass = nv15_graph_sclass;
 	else
 		nv_engine(priv)->sclass = nv17_graph_sclass;
@@ -1270,7 +1273,8 @@
 	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
 	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
 
-	if (nv_device(priv)->chipset >= 0x17) {
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17) {
 		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
 		nv_wr32(priv, 0x400a10, 0x03ff3fb6);
 		nv_wr32(priv, 0x400838, 0x002f8684);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 3f4f35c..434bb4b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1138,7 +1138,7 @@
 	if (ret)
 		return ret;
 
-	nv_subdev(priv)->unit = 0x18001000;
+	nv_subdev(priv)->unit = 0x08001000;
 	nv_subdev(priv)->intr = nvc0_graph_intr;
 
 	priv->base.units = nvc0_graph_units;
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index c190043..7eb6d94c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -34,16 +34,7 @@
 
 #include <engine/fifo.h>
 #include <engine/mpeg.h>
-#include <engine/graph/nv40.h>
-
-struct nv31_mpeg_priv {
-	struct nouveau_mpeg base;
-	atomic_t refcount;
-};
-
-struct nv31_mpeg_chan {
-	struct nouveau_object base;
-};
+#include <engine/mpeg/nv31.h>
 
 /*******************************************************************************
  * MPEG object classes
@@ -89,18 +80,18 @@
 
 	if (mthd == 0x0190) {
 		/* DMA_CMD */
-		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
 		nv_wr32(priv, 0x00b334, base);
 		nv_wr32(priv, 0x00b324, size);
 	} else
 	if (mthd == 0x01a0) {
 		/* DMA_DATA */
-		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
 		nv_wr32(priv, 0x00b360, base);
 		nv_wr32(priv, 0x00b364, size);
 	} else {
 		/* DMA_IMAGE, VRAM only */
-		if (dma0 & 0x000c0000)
+		if (dma0 & 0x00030000)
 			return -EINVAL;
 
 		nv_wr32(priv, 0x00b370, base);
@@ -110,7 +101,7 @@
 	return 0;
 }
 
-static struct nouveau_ofuncs
+struct nouveau_ofuncs
 nv31_mpeg_ofuncs = {
 	.ctor = nv31_mpeg_object_ctor,
 	.dtor = _nouveau_gpuobj_dtor,
@@ -146,16 +137,23 @@
 {
 	struct nv31_mpeg_priv *priv = (void *)engine;
 	struct nv31_mpeg_chan *chan;
+	unsigned long flags;
 	int ret;
 
-	if (!atomic_add_unless(&priv->refcount, 1, 1))
-		return -EBUSY;
-
 	ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
 
+	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+	if (priv->chan) {
+		spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
+		nouveau_object_destroy(&chan->base);
+		*pobject = NULL;
+		return -EBUSY;
+	}
+	priv->chan = chan;
+	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
 	return 0;
 }
 
@@ -164,11 +162,15 @@
 {
 	struct nv31_mpeg_priv *priv = (void *)object->engine;
 	struct nv31_mpeg_chan *chan = (void *)object;
-	atomic_dec(&priv->refcount);
+	unsigned long flags;
+
+	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+	priv->chan = NULL;
+	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
 	nouveau_object_destroy(&chan->base);
 }
 
-static struct nouveau_oclass
+struct nouveau_oclass
 nv31_mpeg_cclass = {
 	.handle = NV_ENGCTX(MPEG, 0x31),
 	.ofuncs = &(struct nouveau_ofuncs) {
@@ -197,21 +199,19 @@
 void
 nv31_mpeg_intr(struct nouveau_subdev *subdev)
 {
-	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
-	struct nouveau_engine *engine = nv_engine(subdev);
-	struct nouveau_object *engctx;
-	struct nouveau_handle *handle;
 	struct nv31_mpeg_priv *priv = (void *)subdev;
-	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_handle *handle;
+	struct nouveau_object *engctx;
 	u32 stat = nv_rd32(priv, 0x00b100);
 	u32 type = nv_rd32(priv, 0x00b230);
 	u32 mthd = nv_rd32(priv, 0x00b234);
 	u32 data = nv_rd32(priv, 0x00b238);
 	u32 show = stat;
-	int chid;
+	unsigned long flags;
 
-	engctx = nouveau_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+	engctx = nv_object(priv->chan);
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
@@ -220,7 +220,7 @@
 			show &= ~0x01000000;
 		}
 
-		if (type == 0x00000010) {
+		if (type == 0x00000010 && engctx) {
 			handle = nouveau_handle_get_class(engctx, 0x3174);
 			if (handle && !nv_call(handle->object, mthd, data))
 				show &= ~0x01000000;
@@ -232,13 +232,12 @@
 	nv_wr32(priv, 0x00b230, 0x00000001);
 
 	if (show) {
-		nv_error(priv,
-			 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 chid, inst << 4, nouveau_client_name(engctx), stat,
-			 type, mthd, data);
+		nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 pfifo->chid(pfifo, engctx),
+			 nouveau_client_name(engctx), stat, type, mthd, data);
 	}
 
-	nouveau_engctx_put(engctx);
+	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
 }
 
 static int
@@ -284,10 +283,7 @@
 	/* PMPEG init */
 	nv_wr32(priv, 0x00b32c, 0x00000000);
 	nv_wr32(priv, 0x00b314, 0x00000100);
-	if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
-		nv_wr32(priv, 0x00b220, 0x00000044);
-	else
-		nv_wr32(priv, 0x00b220, 0x00000031);
+	nv_wr32(priv, 0x00b220, 0x00000031);
 	nv_wr32(priv, 0x00b300, 0x02001ec1);
 	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
new file mode 100644
index 0000000..d08629d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
@@ -0,0 +1,15 @@
+#ifndef __NV31_MPEG_H__
+#define __NV31_MPEG_H__
+
+#include <engine/mpeg.h>
+
+struct nv31_mpeg_chan {
+	struct nouveau_object base;
+};
+
+struct nv31_mpeg_priv {
+	struct nouveau_mpeg base;
+	struct nv31_mpeg_chan *chan;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index dd61960..d4e7ec0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -31,66 +31,63 @@
 #include <subdev/instmem.h>
 
 #include <engine/mpeg.h>
-#include <engine/graph/nv40.h>
-
-struct nv40_mpeg_priv {
-	struct nouveau_mpeg base;
-};
-
-struct nv40_mpeg_chan {
-	struct nouveau_mpeg_chan base;
-};
+#include <engine/mpeg/nv31.h>
 
 /*******************************************************************************
- * PMPEG context
+ * MPEG object classes
  ******************************************************************************/
 
 static int
-nv40_mpeg_context_ctor(struct nouveau_object *parent,
-		       struct nouveau_object *engine,
-		       struct nouveau_oclass *oclass, void *data, u32 size,
-		       struct nouveau_object **pobject)
+nv40_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
 {
-	struct nv40_mpeg_chan *chan;
-	int ret;
+	struct nouveau_instmem *imem = nouveau_instmem(object);
+	struct nv31_mpeg_priv *priv = (void *)object->engine;
+	u32 inst = *(u32 *)arg << 4;
+	u32 dma0 = nv_ro32(imem, inst + 0);
+	u32 dma1 = nv_ro32(imem, inst + 4);
+	u32 dma2 = nv_ro32(imem, inst + 8);
+	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
+	u32 size = dma1 + 1;
 
-	ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
-					  264 * 4, 16,
-					  NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	/* only allow linear DMA objects */
+	if (!(dma0 & 0x00002000))
+		return -EINVAL;
 
-	nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
+	if (mthd == 0x0190) {
+		/* DMA_CMD */
+		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nv_wr32(priv, 0x00b334, base);
+		nv_wr32(priv, 0x00b324, size);
+	} else
+	if (mthd == 0x01a0) {
+		/* DMA_DATA */
+		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nv_wr32(priv, 0x00b360, base);
+		nv_wr32(priv, 0x00b364, size);
+	} else {
+		/* DMA_IMAGE, VRAM only */
+		if (dma0 & 0x00030000)
+			return -EINVAL;
+
+		nv_wr32(priv, 0x00b370, base);
+		nv_wr32(priv, 0x00b374, size);
+	}
+
 	return 0;
 }
 
-static int
-nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
-{
+static struct nouveau_omthds
+nv40_mpeg_omthds[] = {
+	{ 0x0190, 0x0190, nv40_mpeg_mthd_dma },
+	{ 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
+	{ 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
+	{}
+};
 
-	struct nv40_mpeg_priv *priv = (void *)object->engine;
-	struct nv40_mpeg_chan *chan = (void *)object;
-	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
-
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
-	if (nv_rd32(priv, 0x00b318) == inst)
-		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
-	return 0;
-}
-
-static struct nouveau_oclass
-nv40_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x40),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv40_mpeg_context_ctor,
-		.dtor = _nouveau_mpeg_context_dtor,
-		.init = _nouveau_mpeg_context_init,
-		.fini = nv40_mpeg_context_fini,
-		.rd32 = _nouveau_mpeg_context_rd32,
-		.wr32 = _nouveau_mpeg_context_wr32,
-	},
+struct nouveau_oclass
+nv40_mpeg_sclass[] = {
+	{ 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
+	{}
 };
 
 /*******************************************************************************
@@ -100,7 +97,7 @@
 static void
 nv40_mpeg_intr(struct nouveau_subdev *subdev)
 {
-	struct nv40_mpeg_priv *priv = (void *)subdev;
+	struct nv31_mpeg_priv *priv = (void *)subdev;
 	u32 stat;
 
 	if ((stat = nv_rd32(priv, 0x00b100)))
@@ -117,7 +114,7 @@
 	       struct nouveau_oclass *oclass, void *data, u32 size,
 	       struct nouveau_object **pobject)
 {
-	struct nv40_mpeg_priv *priv;
+	struct nv31_mpeg_priv *priv;
 	int ret;
 
 	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
@@ -127,8 +124,8 @@
 
 	nv_subdev(priv)->unit = 0x00000002;
 	nv_subdev(priv)->intr = nv40_mpeg_intr;
-	nv_engine(priv)->cclass = &nv40_mpeg_cclass;
-	nv_engine(priv)->sclass = nv31_mpeg_sclass;
+	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
+	nv_engine(priv)->sclass = nv40_mpeg_sclass;
 	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
new file mode 100644
index 0000000..3d8c2133
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/client.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include <engine/fifo.h>
+#include <engine/mpeg.h>
+
+struct nv44_mpeg_priv {
+	struct nouveau_mpeg base;
+};
+
+struct nv44_mpeg_chan {
+	struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static int
+nv44_mpeg_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv44_mpeg_chan *chan;
+	int ret;
+
+	ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
+					  264 * 4, 16,
+					  NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
+	return 0;
+}
+
+static int
+nv44_mpeg_context_fini(struct nouveau_object *object, bool suspend)
+{
+
+	struct nv44_mpeg_priv *priv = (void *)object->engine;
+	struct nv44_mpeg_chan *chan = (void *)object;
+	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
+
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
+	if (nv_rd32(priv, 0x00b318) == inst)
+		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv44_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_mpeg_context_ctor,
+		.dtor = _nouveau_mpeg_context_dtor,
+		.init = _nouveau_mpeg_context_init,
+		.fini = nv44_mpeg_context_fini,
+		.rd32 = _nouveau_mpeg_context_rd32,
+		.wr32 = _nouveau_mpeg_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv44_mpeg_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nv44_mpeg_priv *priv = (void *)subdev;
+	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
+	u32 stat = nv_rd32(priv, 0x00b100);
+	u32 type = nv_rd32(priv, 0x00b230);
+	u32 mthd = nv_rd32(priv, 0x00b234);
+	u32 data = nv_rd32(priv, 0x00b238);
+	u32 show = stat;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x01000000) {
+		/* happens on initial binding of the object */
+		if (type == 0x00000020 && mthd == 0x0000) {
+			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			show &= ~0x01000000;
+		}
+
+		if (type == 0x00000010) {
+			handle = nouveau_handle_get_class(engctx, 0x3174);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~0x01000000;
+			nouveau_handle_put(handle);
+		}
+	}
+
+	nv_wr32(priv, 0x00b100, stat);
+	nv_wr32(priv, 0x00b230, 0x00000001);
+
+	if (show) {
+		nv_error(priv,
+			 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 chid, inst << 4, nouveau_client_name(engctx), stat,
+			 type, mthd, data);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static void
+nv44_mpeg_me_intr(struct nouveau_subdev *subdev)
+{
+	struct nv44_mpeg_priv *priv = (void *)subdev;
+	u32 stat;
+
+	if ((stat = nv_rd32(priv, 0x00b100)))
+		nv44_mpeg_intr(subdev);
+
+	if ((stat = nv_rd32(priv, 0x00b800))) {
+		nv_error(priv, "PMSRCH 0x%08x\n", stat);
+		nv_wr32(priv, 0x00b800, stat);
+	}
+}
+
+static int
+nv44_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv44_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_subdev(priv)->intr = nv44_mpeg_me_intr;
+	nv_engine(priv)->cclass = &nv44_mpeg_cclass;
+	nv_engine(priv)->sclass = nv40_mpeg_sclass;
+	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv44_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv31_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
new file mode 100644
index 0000000..e9c5e51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+#include <core/class.h>
+
+#include <subdev/clock.h>
+
+#include "priv.h"
+
+#define QUAD_MASK 0x0f
+#define QUAD_FREE 0x01
+
+static struct nouveau_perfsig *
+nouveau_perfsig_find_(struct nouveau_perfdom *dom, const char *name, u32 size)
+{
+	char path[64];
+	int i;
+
+	if (name[0] != '/') {
+		for (i = 0; i < dom->signal_nr; i++) {
+			if ( dom->signal[i].name &&
+			    !strncmp(name, dom->signal[i].name, size))
+				return &dom->signal[i];
+		}
+	} else {
+		for (i = 0; i < dom->signal_nr; i++) {
+			snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
+			if (!strncmp(name, path, size))
+				return &dom->signal[i];
+		}
+	}
+
+	return NULL;
+}
+
+struct nouveau_perfsig *
+nouveau_perfsig_find(struct nouveau_perfmon *ppm, const char *name, u32 size,
+		     struct nouveau_perfdom **pdom)
+{
+	struct nouveau_perfdom *dom = *pdom;
+	struct nouveau_perfsig *sig;
+
+	if (dom == NULL) {
+		list_for_each_entry(dom, &ppm->domains, head) {
+			sig = nouveau_perfsig_find_(dom, name, size);
+			if (sig) {
+				*pdom = dom;
+				return sig;
+			}
+		}
+
+		return NULL;
+	}
+
+	return nouveau_perfsig_find_(dom, name, size);
+}
+
+struct nouveau_perfctr *
+nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
+		     struct nouveau_perfdom **pdom)
+{
+	struct nouveau_perfsig *sig;
+	struct nouveau_perfctr *ctr;
+
+	sig = nouveau_perfsig_find(ppm, name, strlen(name), pdom);
+	if (!sig)
+		return NULL;
+
+	ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
+	if (ctr) {
+		ctr->signal[0] = sig;
+		ctr->logic_op = 0xaaaa;
+	}
+
+	return ctr;
+}
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+static int
+nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
+		      void *data, u32 size)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nouveau_perfmon *ppm = (void *)object->engine;
+	struct nouveau_perfdom *dom = NULL, *chk;
+	struct nv_perfctr_query *args = data;
+	const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
+	const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
+	const char *name;
+	int tmp = 0, di, si;
+	char path[64];
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	di = (args->iter & 0xff000000) >> 24;
+	si = (args->iter & 0x00ffffff) - 1;
+
+	list_for_each_entry(chk, &ppm->domains, head) {
+		if (tmp++ == di) {
+			dom = chk;
+			break;
+		}
+	}
+
+	if (dom == NULL || si >= (int)dom->signal_nr)
+		return -EINVAL;
+
+	if (si >= 0) {
+		if (raw || !(name = dom->signal[si].name)) {
+			snprintf(path, sizeof(path), "/%s/%02x", dom->name, si);
+			name = path;
+		}
+
+		if (args->name)
+			strncpy(args->name, name, args->size);
+		args->size = strlen(name) + 1;
+	}
+
+	do {
+		while (++si < dom->signal_nr) {
+			if (all || dom->signal[si].name) {
+				args->iter = (di << 24) | ++si;
+				return 0;
+			}
+		}
+		si = -1;
+		di = di + 1;
+		dom = list_entry(dom->head.next, typeof(*dom), head);
+	} while (&dom->head != &ppm->domains);
+
+	args->iter = 0xffffffff;
+	return 0;
+}
+
+static int
+nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
+		       void *data, u32 size)
+{
+	struct nouveau_perfmon *ppm = (void *)object->engine;
+	struct nouveau_perfctr *ctr, *tmp;
+	struct nouveau_perfdom *dom;
+	struct nv_perfctr_sample *args = data;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+	ppm->sequence++;
+
+	list_for_each_entry(dom, &ppm->domains, head) {
+		/* sample previous batch of counters */
+		if (dom->quad != QUAD_MASK) {
+			dom->func->next(ppm, dom);
+			tmp = NULL;
+			while (!list_empty(&dom->list)) {
+				ctr = list_first_entry(&dom->list,
+							typeof(*ctr), head);
+				if (ctr->slot < 0) break;
+				if ( tmp && tmp == ctr) break;
+				if (!tmp) tmp = ctr;
+				dom->func->read(ppm, dom, ctr);
+				ctr->slot  = -1;
+				list_move_tail(&ctr->head, &dom->list);
+			}
+		}
+
+		dom->quad = QUAD_MASK;
+
+		/* setup next batch of counters for sampling */
+		list_for_each_entry(ctr, &dom->list, head) {
+			ctr->slot = ffs(dom->quad) - 1;
+			if (ctr->slot < 0)
+				break;
+			dom->quad &= ~(QUAD_FREE << ctr->slot);
+			dom->func->init(ppm, dom, ctr);
+		}
+
+		if (dom->quad != QUAD_MASK)
+			dom->func->next(ppm, dom);
+	}
+
+	return 0;
+}
+
+static int
+nouveau_perfctr_read(struct nouveau_object *object, u32 mthd,
+		     void *data, u32 size)
+{
+	struct nouveau_perfctr *ctr = (void *)object;
+	struct nv_perfctr_read *args = data;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+	if (!ctr->clk)
+		return -EAGAIN;
+
+	args->clk = ctr->clk;
+	args->ctr = ctr->ctr;
+	return 0;
+}
+
+static void
+nouveau_perfctr_dtor(struct nouveau_object *object)
+{
+	struct nouveau_perfctr *ctr = (void *)object;
+	if (ctr->head.next)
+		list_del(&ctr->head);
+	nouveau_object_destroy(&ctr->base);
+}
+
+static int
+nouveau_perfctr_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_perfmon *ppm = (void *)engine;
+	struct nouveau_perfdom *dom = NULL;
+	struct nouveau_perfsig *sig[4] = {};
+	struct nouveau_perfctr *ctr;
+	struct nv_perfctr_class *args = data;
+	int ret, i;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) {
+		sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name,
+					      args->signal[i].size, &dom);
+		if (!sig[i])
+			return -EINVAL;
+	}
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &ctr);
+	*pobject = nv_object(ctr);
+	if (ret)
+		return ret;
+
+	ctr->slot = -1;
+	ctr->logic_op = args->logic_op;
+	ctr->signal[0] = sig[0];
+	ctr->signal[1] = sig[1];
+	ctr->signal[2] = sig[2];
+	ctr->signal[3] = sig[3];
+	if (dom)
+		list_add_tail(&ctr->head, &dom->list);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nouveau_perfctr_ofuncs = {
+	.ctor = nouveau_perfctr_ctor,
+	.dtor = nouveau_perfctr_dtor,
+	.init = nouveau_object_init,
+	.fini = nouveau_object_fini,
+};
+
+static struct nouveau_omthds
+nouveau_perfctr_omthds[] = {
+	{ NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query },
+	{ NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample },
+	{ NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read },
+	{}
+};
+
+struct nouveau_oclass
+nouveau_perfmon_sclass[] = {
+	{ .handle = NV_PERFCTR_CLASS,
+	  .ofuncs = &nouveau_perfctr_ofuncs,
+	  .omthds =  nouveau_perfctr_omthds,
+	},
+	{},
+};
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+static void
+nouveau_perfctx_dtor(struct nouveau_object *object)
+{
+	struct nouveau_perfmon *ppm = (void *)object->engine;
+	mutex_lock(&nv_subdev(ppm)->mutex);
+	ppm->context = NULL;
+	mutex_unlock(&nv_subdev(ppm)->mutex);
+}
+
+static int
+nouveau_perfctx_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_perfmon *ppm = (void *)engine;
+	struct nouveau_perfctx *ctx;
+	int ret;
+
+	ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+				    0, 0, 0, &ctx);
+	*pobject = nv_object(ctx);
+	if (ret)
+		return ret;
+
+	mutex_lock(&nv_subdev(ppm)->mutex);
+	if (ppm->context == NULL)
+		ppm->context = ctx;
+	mutex_unlock(&nv_subdev(ppm)->mutex);
+
+	if (ctx != ppm->context)
+		return -EBUSY;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nouveau_perfmon_cclass = {
+	.handle = NV_ENGCTX(PERFMON, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nouveau_perfctx_ctor,
+		.dtor = nouveau_perfctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+	},
+};
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+int
+nouveau_perfdom_new(struct nouveau_perfmon *ppm, const char *name, u32 mask,
+		    u32 base, u32 size_unit, u32 size_domain,
+		    const struct nouveau_specdom *spec)
+{
+	const struct nouveau_specdom *sdom;
+	const struct nouveau_specsig *ssig;
+	struct nouveau_perfdom *dom;
+	int i;
+
+	for (i = 0; i == 0 || mask; i++) {
+		u32 addr = base + (i * size_unit);
+		if (i && !(mask & (1 << i)))
+			continue;
+
+		sdom = spec;
+		while (sdom->signal_nr) {
+			dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
+				      sizeof(*dom->signal), GFP_KERNEL);
+			if (!dom)
+				return -ENOMEM;
+
+			if (mask) {
+				snprintf(dom->name, sizeof(dom->name),
+					 "%s/%02x/%02x", name, i,
+					 (int)(sdom - spec));
+			} else {
+				snprintf(dom->name, sizeof(dom->name),
+					 "%s/%02x", name, (int)(sdom - spec));
+			}
+
+			list_add_tail(&dom->head, &ppm->domains);
+			INIT_LIST_HEAD(&dom->list);
+			dom->func = sdom->func;
+			dom->addr = addr;
+			dom->quad = QUAD_MASK;
+			dom->signal_nr = sdom->signal_nr;
+
+			ssig = (sdom++)->signal;
+			while (ssig->name) {
+				dom->signal[ssig->signal].name = ssig->name;
+				ssig++;
+			}
+
+			addr += size_domain;
+		}
+
+		mask &= ~(1 << i);
+	}
+
+	return 0;
+}
+
+int
+_nouveau_perfmon_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_perfmon *ppm = (void *)object;
+	return nouveau_engine_fini(&ppm->base, suspend);
+}
+
+int
+_nouveau_perfmon_init(struct nouveau_object *object)
+{
+	struct nouveau_perfmon *ppm = (void *)object;
+	return nouveau_engine_init(&ppm->base);
+}
+
+void
+_nouveau_perfmon_dtor(struct nouveau_object *object)
+{
+	struct nouveau_perfmon *ppm = (void *)object;
+	struct nouveau_perfdom *dom, *tmp;
+
+	list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
+		list_del(&dom->head);
+		kfree(dom);
+	}
+
+	nouveau_engine_destroy(&ppm->base);
+}
+
+int
+nouveau_perfmon_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int length, void **pobject)
+{
+	struct nouveau_perfmon *ppm;
+	int ret;
+
+	ret = nouveau_engine_create_(parent, engine, oclass, true, "PPM",
+				     "perfmon", length, pobject);
+	ppm = *pobject;
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&ppm->domains);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
new file mode 100644
index 0000000..50696cc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static void
+pwr_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		 struct nouveau_perfctr *ctr)
+{
+	u32 mask = 0x00000000;
+	u32 ctrl = 0x00000001;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
+		mask |= 1 << (ctr->signal[i] - dom->signal);
+
+	nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
+	nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
+	nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
+}
+
+static void
+pwr_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		 struct nouveau_perfctr *ctr)
+{
+	ctr->ctr = ppm->pwr[ctr->slot];
+	ctr->clk = ppm->pwr[ppm->last];
+}
+
+static void
+pwr_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+	int i;
+
+	for (i = 0; i <= ppm->last; i++) {
+		ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
+		nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
+	}
+}
+
+static const struct nouveau_funcdom
+pwr_perfctr_func = {
+	.init = pwr_perfctr_init,
+	.read = pwr_perfctr_read,
+	.next = pwr_perfctr_next,
+};
+
+const struct nouveau_specdom
+nva3_perfmon_pwr[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{ 0x00, "pwr_gr_idle" },
+			{ 0x04, "pwr_bsp_idle" },
+			{ 0x05, "pwr_vp_idle" },
+			{ 0x06, "pwr_ppp_idle" },
+			{ 0x13, "pwr_ce0_idle" },
+			{}
+		}, &pwr_perfctr_func },
+	{}
+};
+
+const struct nouveau_specdom
+nvc0_perfmon_pwr[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{ 0x00, "pwr_gr_idle" },
+			{ 0x04, "pwr_bsp_idle" },
+			{ 0x05, "pwr_vp_idle" },
+			{ 0x06, "pwr_ppp_idle" },
+			{ 0x13, "pwr_ce0_idle" },
+			{ 0x14, "pwr_ce1_idle" },
+			{}
+		}, &pwr_perfctr_func },
+	{}
+};
+
+const struct nouveau_specdom
+nve0_perfmon_pwr[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{ 0x00, "pwr_gr_idle" },
+			{ 0x04, "pwr_bsp_idle" },
+			{ 0x05, "pwr_vp_idle" },
+			{ 0x06, "pwr_ppp_idle" },
+			{ 0x13, "pwr_ce0_idle" },
+			{ 0x14, "pwr_ce1_idle" },
+			{ 0x15, "pwr_ce2_idle" },
+			{}
+		}, &pwr_perfctr_func },
+	{}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
new file mode 100644
index 0000000..b2a1078
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv40_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		  struct nouveau_perfctr *ctr)
+{
+	struct nv40_perfmon_priv *priv = (void *)ppm;
+	struct nv40_perfmon_cntr *cntr = (void *)ctr;
+	u32 log = ctr->logic_op;
+	u32 src = 0x00000000;
+	int i;
+
+	for (i = 0; i < 4 && ctr->signal[i]; i++)
+		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+
+	nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
+	nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
+	nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
+}
+
+static void
+nv40_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		  struct nouveau_perfctr *ctr)
+{
+	struct nv40_perfmon_priv *priv = (void *)ppm;
+	struct nv40_perfmon_cntr *cntr = (void *)ctr;
+
+	switch (cntr->base.slot) {
+	case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
+	case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
+	case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
+	case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
+	}
+	cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
+}
+
+static void
+nv40_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+	struct nv40_perfmon_priv *priv = (void *)ppm;
+	if (priv->sequence != ppm->sequence) {
+		nv_wr32(priv, 0x400084, 0x00000020);
+		priv->sequence = ppm->sequence;
+	}
+}
+
+const struct nouveau_funcdom
+nv40_perfctr_func = {
+	.init = nv40_perfctr_init,
+	.read = nv40_perfctr_read,
+	.next = nv40_perfctr_next,
+};
+
+static const struct nouveau_specdom
+nv40_perfmon[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+int
+nv40_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv40_perfmon_oclass *mclass = (void *)oclass;
+	struct nv40_perfmon_priv *priv;
+	int ret;
+
+	ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+	nv_engine(priv)->sclass =  nouveau_perfmon_sclass;
+	return 0;
+}
+
+struct nouveau_oclass *
+nv40_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+	.base.handle = NV_ENGINE(PERFMON, 0x40),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = _nouveau_perfmon_fini,
+	},
+	.doms = nv40_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
new file mode 100644
index 0000000..1b5792d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_PM_NV40_H__
+#define __NVKM_PM_NV40_H__
+
+#include "priv.h"
+
+struct nv40_perfmon_oclass {
+	struct nouveau_oclass base;
+	const struct nouveau_specdom *doms;
+};
+
+struct nv40_perfmon_priv {
+	struct nouveau_perfmon base;
+	u32 sequence;
+};
+
+int nv40_perfmon_ctor(struct nouveau_object *, struct nouveau_object *,
+		      struct nouveau_oclass *, void *data, u32 size,
+		      struct nouveau_object **pobject);
+
+struct nv40_perfmon_cntr {
+	struct nouveau_perfctr base;
+};
+
+extern const struct nouveau_funcdom nv40_perfctr_func;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
new file mode 100644
index 0000000..9421769
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nv50_perfmon[] = {
+	{ 0x040, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x100, (const struct nouveau_specsig[]) {
+			{ 0xc8, "gr_idle" },
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x100, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x020, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x040, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+struct nouveau_oclass *
+nv50_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+	.base.handle = NV_ENGINE(PERFMON, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = _nouveau_perfmon_fini,
+	},
+	.doms = nv50_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
new file mode 100644
index 0000000..9232c7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nv84_perfmon[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+struct nouveau_oclass *
+nv84_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+	.base.handle = NV_ENGINE(PERFMON, 0x84),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = _nouveau_perfmon_fini,
+	},
+	.doms = nv84_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
new file mode 100644
index 0000000..6197ebd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nva3_perfmon[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+static int
+nva3_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **object)
+{
+	int ret = nv40_perfmon_ctor(parent, engine, oclass, data, size, object);
+	if (ret == 0) {
+		struct nv40_perfmon_priv *priv = (void *)*object;
+		ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+					   nva3_perfmon_pwr);
+		if (ret)
+			return ret;
+
+		priv->base.last = 3;
+	}
+	return ret;
+}
+
+struct nouveau_oclass *
+nva3_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+	.base.handle = NV_ENGINE(PERFMON, 0xa3),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = _nouveau_perfmon_fini,
+	},
+	.doms = nva3_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
new file mode 100644
index 0000000..74b2410
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nvc0_perfmon_hub[] = {
+	{}
+};
+
+static const struct nouveau_specdom
+nvc0_perfmon_gpc[] = {
+	{}
+};
+
+static const struct nouveau_specdom
+nvc0_perfmon_part[] = {
+	{}
+};
+
+static void
+nvc0_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		  struct nouveau_perfctr *ctr)
+{
+	struct nvc0_perfmon_priv *priv = (void *)ppm;
+	struct nvc0_perfmon_cntr *cntr = (void *)ctr;
+	u32 log = ctr->logic_op;
+	u32 src = 0x00000000;
+	int i;
+
+	for (i = 0; i < 4 && ctr->signal[i]; i++)
+		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+
+	nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
+	nv_wr32(priv, dom->addr + 0x100, 0x00000000);
+	nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
+	nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
+}
+
+static void
+nvc0_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		  struct nouveau_perfctr *ctr)
+{
+	struct nvc0_perfmon_priv *priv = (void *)ppm;
+	struct nvc0_perfmon_cntr *cntr = (void *)ctr;
+
+	switch (cntr->base.slot) {
+	case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
+	case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
+	case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
+	case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
+	}
+	cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
+}
+
+static void
+nvc0_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+	struct nvc0_perfmon_priv *priv = (void *)ppm;
+	nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
+	nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
+}
+
+const struct nouveau_funcdom
+nvc0_perfctr_func = {
+	.init = nvc0_perfctr_init,
+	.read = nvc0_perfctr_read,
+	.next = nvc0_perfctr_next,
+};
+
+int
+nvc0_perfmon_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nvc0_perfmon_priv *priv = (void *)object;
+	nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
+	nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
+	return nouveau_perfmon_fini(&priv->base, suspend);
+}
+
+static int
+nvc0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nvc0_perfmon_priv *priv;
+	u32 mask;
+	int ret;
+
+	ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+				   nvc0_perfmon_pwr);
+	if (ret)
+		return ret;
+
+	/* HUB */
+	ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
+				   nvc0_perfmon_hub);
+	if (ret)
+		return ret;
+
+	/* GPC */
+	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
+	mask &= ~nv_rd32(priv, 0x022504);
+	mask &= ~nv_rd32(priv, 0x022584);
+
+	ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
+				  0x1000, 0x200, nvc0_perfmon_gpc);
+	if (ret)
+		return ret;
+
+	/* PART */
+	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
+	mask &= ~nv_rd32(priv, 0x022548);
+	mask &= ~nv_rd32(priv, 0x0225c8);
+
+	ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
+				  0x1000, 0x200, nvc0_perfmon_part);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+	nv_engine(priv)->sclass =  nouveau_perfmon_sclass;
+	priv->base.last = 7;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_perfmon_oclass = {
+	.handle = NV_ENGINE(PERFMON, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = nvc0_perfmon_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
new file mode 100644
index 0000000..f66bca4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_PM_NVC0_H__
+#define __NVKM_PM_NVC0_H__
+
+#include "priv.h"
+
+struct nvc0_perfmon_priv {
+	struct nouveau_perfmon base;
+};
+
+struct nvc0_perfmon_cntr {
+	struct nouveau_perfctr base;
+};
+
+extern const struct nouveau_funcdom nvc0_perfctr_func;
+int nvc0_perfmon_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
new file mode 100644
index 0000000..71d718c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nve0_perfmon_hub[] = {
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub00_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x40, (const struct nouveau_specsig[]) {
+			{ 0x27, "hub01_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub02_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub03_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x40, (const struct nouveau_specsig[]) {
+			{ 0x03, "host_mmio_rd" },
+			{ 0x27, "hub04_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub05_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0xc0, (const struct nouveau_specsig[]) {
+			{ 0x74, "host_fb_rd3x" },
+			{ 0x75, "host_fb_rd3x_2" },
+			{ 0xa7, "hub06_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub07_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{}
+};
+
+static const struct nouveau_specdom
+nve0_perfmon_gpc[] = {
+	{ 0xe0, (const struct nouveau_specsig[]) {
+			{ 0xc7, "gpc00_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{}
+};
+
+static const struct nouveau_specdom
+nve0_perfmon_part[] = {
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "part00_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "part01_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{}
+};
+
+static int
+nve0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nvc0_perfmon_priv *priv;
+	u32 mask;
+	int ret;
+
+	ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* PDAEMON */
+	ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+				   nve0_perfmon_pwr);
+	if (ret)
+		return ret;
+
+	/* HUB */
+	ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
+				   nve0_perfmon_hub);
+	if (ret)
+		return ret;
+
+	/* GPC */
+	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
+	mask &= ~nv_rd32(priv, 0x022504);
+	mask &= ~nv_rd32(priv, 0x022584);
+
+	ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
+				  0x1000, 0x200, nve0_perfmon_gpc);
+	if (ret)
+		return ret;
+
+	/* PART */
+	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
+	mask &= ~nv_rd32(priv, 0x022548);
+	mask &= ~nv_rd32(priv, 0x0225c8);
+
+	ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
+				  0x1000, 0x200, nve0_perfmon_part);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+	nv_engine(priv)->sclass =  nouveau_perfmon_sclass;
+	priv->base.last = 7;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_perfmon_oclass = {
+	.handle = NV_ENGINE(PERFMON, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = nvc0_perfmon_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
new file mode 100644
index 0000000..47256f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvf0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nvc0_perfmon_priv *priv;
+	int ret;
+
+	ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+				   nve0_perfmon_pwr);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+	nv_engine(priv)->sclass =  nouveau_perfmon_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvf0_perfmon_oclass = {
+	.handle = NV_ENGINE(PERFMON, 0xf0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvf0_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = nvc0_perfmon_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
new file mode 100644
index 0000000..0ac8714
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
@@ -0,0 +1,91 @@
+#ifndef __NVKM_PERFMON_PRIV_H__
+#define __NVKM_PERFMON_PRIV_H__
+
+#include <engine/perfmon.h>
+
+struct nouveau_perfctr {
+	struct nouveau_object base;
+	struct list_head head;
+	struct nouveau_perfsig *signal[4];
+	int slot;
+	u32 logic_op;
+	u32 clk;
+	u32 ctr;
+};
+
+extern struct nouveau_oclass nouveau_perfmon_sclass[];
+
+struct nouveau_perfctx {
+	struct nouveau_engctx base;
+};
+
+extern struct nouveau_oclass nouveau_perfmon_cclass;
+
+struct nouveau_specsig {
+	u8 signal;
+	const char *name;
+};
+
+struct nouveau_perfsig {
+	const char *name;
+};
+
+struct nouveau_perfdom;
+struct nouveau_perfctr *
+nouveau_perfsig_wrap(struct nouveau_perfmon *, const char *,
+		     struct nouveau_perfdom **);
+
+struct nouveau_specdom {
+	u16 signal_nr;
+	const struct nouveau_specsig *signal;
+	const struct nouveau_funcdom *func;
+};
+
+extern const struct nouveau_specdom nva3_perfmon_pwr[];
+extern const struct nouveau_specdom nvc0_perfmon_pwr[];
+extern const struct nouveau_specdom nve0_perfmon_pwr[];
+
+struct nouveau_perfdom {
+	struct list_head head;
+	struct list_head list;
+	const struct nouveau_funcdom *func;
+	char name[32];
+	u32 addr;
+	u8  quad;
+	u32 signal_nr;
+	struct nouveau_perfsig signal[];
+};
+
+struct nouveau_funcdom {
+	void (*init)(struct nouveau_perfmon *, struct nouveau_perfdom *,
+		     struct nouveau_perfctr *);
+	void (*read)(struct nouveau_perfmon *, struct nouveau_perfdom *,
+		     struct nouveau_perfctr *);
+	void (*next)(struct nouveau_perfmon *, struct nouveau_perfdom *);
+};
+
+int nouveau_perfdom_new(struct nouveau_perfmon *, const char *, u32,
+			u32, u32, u32, const struct nouveau_specdom *);
+
+#define nouveau_perfmon_create(p,e,o,d)                                        \
+	nouveau_perfmon_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_perfmon_dtor(p) ({                                             \
+	struct nouveau_perfmon *c = (p);                                       \
+	_nouveau_perfmon_dtor(nv_object(c));                                   \
+})
+#define nouveau_perfmon_init(p) ({                                             \
+	struct nouveau_perfmon *c = (p);                                       \
+	_nouveau_perfmon_init(nv_object(c));                                   \
+})
+#define nouveau_perfmon_fini(p,s) ({                                           \
+	struct nouveau_perfmon *c = (p);                                       \
+	_nouveau_perfmon_fini(nv_object(c), (s));                              \
+})
+
+int nouveau_perfmon_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, void **);
+void _nouveau_perfmon_dtor(struct nouveau_object *);
+int  _nouveau_perfmon_init(struct nouveau_object *);
+int  _nouveau_perfmon_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 2a859a3..c571758 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -135,8 +135,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv04_software_oclass = {
+struct nouveau_oclass *
+nv04_software_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(SW, 0x04),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index a019364..a62f11a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -117,8 +117,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv10_software_oclass = {
+struct nouveau_oclass *
+nv10_software_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(SW, 0x10),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv10_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index c48e749..b574dd4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -32,16 +32,9 @@
 
 #include <subdev/bar.h>
 
-#include <engine/software.h>
 #include <engine/disp.h>
 
-struct nv50_software_priv {
-	struct nouveau_software base;
-};
-
-struct nv50_software_chan {
-	struct nouveau_software_chan base;
-};
+#include "nv50.h"
 
 /*******************************************************************************
  * software object classes
@@ -62,7 +55,7 @@
 
 	if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
 		struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
-		chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
+		chan->vblank.ctxdma = gpuobj->node->offset >> 4;
 		ret = 0;
 	}
 	nouveau_namedb_put(handle);
@@ -74,34 +67,33 @@
 				 void *args, u32 size)
 {
 	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
-	chan->base.vblank.offset = *(u32 *)args;
+	chan->vblank.offset = *(u32 *)args;
 	return 0;
 }
 
-static int
+int
 nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
 				void *args, u32 size)
 {
 	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
-	chan->base.vblank.value = *(u32 *)args;
+	chan->vblank.value = *(u32 *)args;
 	return 0;
 }
 
-static int
+int
 nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
 				  void *args, u32 size)
 {
 	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
-	struct nouveau_disp *disp = nouveau_disp(object);
-	u32 crtc = *(u32 *)args;
-	if (crtc > 1)
+	u32 head = *(u32 *)args;
+	if (head >= chan->vblank.nr_event)
 		return -EINVAL;
 
-	nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
+	nouveau_event_get(chan->vblank.event[head]);
 	return 0;
 }
 
-static int
+int
 nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
 			void *args, u32 size)
 {
@@ -132,10 +124,9 @@
  ******************************************************************************/
 
 static int
-nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
+nv50_software_vblsem_release(void *data, int head)
 {
-	struct nouveau_software_chan *chan =
-		container_of(event, struct nouveau_software_chan, vblank.event);
+	struct nv50_software_chan *chan = data;
 	struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
 	struct nouveau_bar *bar = nouveau_bar(priv);
 
@@ -154,45 +145,76 @@
 	return NVKM_EVENT_DROP;
 }
 
-static int
+void
+nv50_software_context_dtor(struct nouveau_object *object)
+{
+	struct nv50_software_chan *chan = (void *)object;
+	int i;
+
+	if (chan->vblank.event) {
+		for (i = 0; i < chan->vblank.nr_event; i++)
+			nouveau_event_ref(NULL, &chan->vblank.event[i]);
+		kfree(chan->vblank.event);
+	}
+
+	nouveau_software_context_destroy(&chan->base);
+}
+
+int
 nv50_software_context_ctor(struct nouveau_object *parent,
 			   struct nouveau_object *engine,
 			   struct nouveau_oclass *oclass, void *data, u32 size,
 			   struct nouveau_object **pobject)
 {
+	struct nouveau_disp *pdisp = nouveau_disp(parent);
+	struct nv50_software_cclass *pclass = (void *)oclass;
 	struct nv50_software_chan *chan;
-	int ret;
+	int ret, i;
 
 	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
 
-	chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
-	chan->base.vblank.event.func = nv50_software_vblsem_release;
+	chan->vblank.nr_event = pdisp->vblank->index_nr;
+	chan->vblank.event = kzalloc(chan->vblank.nr_event *
+				     sizeof(*chan->vblank.event), GFP_KERNEL);
+	if (!chan->vblank.event)
+		return -ENOMEM;
+
+	for (i = 0; i < chan->vblank.nr_event; i++) {
+		ret = nouveau_event_new(pdisp->vblank, i, pclass->vblank,
+					chan, &chan->vblank.event[i]);
+		if (ret)
+			return ret;
+	}
+
+	chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
 	return 0;
 }
 
-static struct nouveau_oclass
+static struct nv50_software_cclass
 nv50_software_cclass = {
-	.handle = NV_ENGCTX(SW, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+	.base.handle = NV_ENGCTX(SW, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_software_context_ctor,
 		.dtor = _nouveau_software_context_dtor,
 		.init = _nouveau_software_context_init,
 		.fini = _nouveau_software_context_fini,
 	},
+	.vblank = nv50_software_vblsem_release,
 };
 
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-static int
+int
 nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		   struct nouveau_oclass *oclass, void *data, u32 size,
 		   struct nouveau_object **pobject)
 {
+	struct nv50_software_oclass *pclass = (void *)oclass;
 	struct nv50_software_priv *priv;
 	int ret;
 
@@ -201,19 +223,21 @@
 	if (ret)
 		return ret;
 
-	nv_engine(priv)->cclass = &nv50_software_cclass;
-	nv_engine(priv)->sclass = nv50_software_sclass;
+	nv_engine(priv)->cclass = pclass->cclass;
+	nv_engine(priv)->sclass = pclass->sclass;
 	nv_subdev(priv)->intr = nv04_software_intr;
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_software_oclass = {
-	.handle = NV_ENGINE(SW, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_software_oclass = &(struct nv50_software_oclass) {
+	.base.handle = NV_ENGINE(SW, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_software_ctor,
 		.dtor = _nouveau_software_dtor,
 		.init = _nouveau_software_init,
 		.fini = _nouveau_software_fini,
 	},
-};
+	.cclass = &nv50_software_cclass.base,
+	.sclass =  nv50_software_sclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
new file mode 100644
index 0000000..2de370c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -0,0 +1,47 @@
+#ifndef __NVKM_SW_NV50_H__
+#define __NVKM_SW_NV50_H__
+
+#include <engine/software.h>
+
+struct nv50_software_oclass {
+	struct nouveau_oclass base;
+	struct nouveau_oclass *cclass;
+	struct nouveau_oclass *sclass;
+};
+
+struct nv50_software_priv {
+	struct nouveau_software base;
+};
+
+int  nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
+			struct nouveau_oclass *, void *, u32,
+			struct nouveau_object **);
+
+struct nv50_software_cclass {
+	struct nouveau_oclass base;
+	int (*vblank)(void *, int);
+};
+
+struct nv50_software_chan {
+	struct nouveau_software_chan base;
+	struct {
+		struct nouveau_eventh **event;
+		int nr_event;
+		u32 channel;
+		u32 ctxdma;
+		u64 offset;
+		u32 value;
+	} vblank;
+};
+
+int  nv50_software_context_ctor(struct nouveau_object *,
+				struct nouveau_object *,
+				struct nouveau_oclass *, void *, u32,
+				struct nouveau_object **);
+void nv50_software_context_dtor(struct nouveau_object *);
+
+int nv50_software_mthd_vblsem_value(struct nouveau_object *, u32, void *, u32);
+int nv50_software_mthd_vblsem_release(struct nouveau_object *, u32, void *, u32);
+int nv50_software_mthd_flip(struct nouveau_object *, u32, void *, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index d698e71..f9430c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -32,13 +32,7 @@
 #include <engine/software.h>
 #include <engine/disp.h>
 
-struct nvc0_software_priv {
-	struct nouveau_software base;
-};
-
-struct nvc0_software_chan {
-	struct nouveau_software_chan base;
-};
+#include "nv50.h"
 
 /*******************************************************************************
  * software object classes
@@ -48,58 +42,24 @@
 nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
 				 void *args, u32 size)
 {
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
 	u64 data = *(u32 *)args;
 	if (mthd == 0x0400) {
-		chan->base.vblank.offset &= 0x00ffffffffULL;
-		chan->base.vblank.offset |= data << 32;
+		chan->vblank.offset &= 0x00ffffffffULL;
+		chan->vblank.offset |= data << 32;
 	} else {
-		chan->base.vblank.offset &= 0xff00000000ULL;
-		chan->base.vblank.offset |= data;
+		chan->vblank.offset &= 0xff00000000ULL;
+		chan->vblank.offset |= data;
 	}
 	return 0;
 }
 
 static int
-nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
-				void *args, u32 size)
-{
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
-	chan->base.vblank.value = *(u32 *)args;
-	return 0;
-}
-
-static int
-nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
-				  void *args, u32 size)
-{
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
-	struct nouveau_disp *disp = nouveau_disp(object);
-	u32 crtc = *(u32 *)args;
-
-	if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
-		return -EINVAL;
-
-	nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
-	return 0;
-}
-
-static int
-nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
-			void *args, u32 size)
-{
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
-	return -EINVAL;
-}
-
-static int
 nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
                               void *args, u32 size)
 {
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
-	struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
 	u32 data = *(u32 *)args;
 
 	switch (mthd) {
@@ -124,9 +84,9 @@
 nvc0_software_omthds[] = {
 	{ 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
 	{ 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
-	{ 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
-	{ 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
-	{ 0x0500, 0x0500, nvc0_software_mthd_flip },
+	{ 0x0408, 0x0408, nv50_software_mthd_vblsem_value },
+	{ 0x040c, 0x040c, nv50_software_mthd_vblsem_release },
+	{ 0x0500, 0x0500, nv50_software_mthd_flip },
 	{ 0x0600, 0x0600, nvc0_software_mthd_mp_control },
 	{ 0x0644, 0x0644, nvc0_software_mthd_mp_control },
 	{ 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
@@ -144,11 +104,10 @@
  ******************************************************************************/
 
 static int
-nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
+nvc0_software_vblsem_release(void *data, int head)
 {
-	struct nouveau_software_chan *chan =
-		container_of(event, struct nouveau_software_chan, vblank.event);
-	struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+	struct nv50_software_chan *chan = data;
+	struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
 	struct nouveau_bar *bar = nouveau_bar(priv);
 
 	nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
@@ -160,66 +119,31 @@
 	return NVKM_EVENT_DROP;
 }
 
-static int
-nvc0_software_context_ctor(struct nouveau_object *parent,
-			   struct nouveau_object *engine,
-			   struct nouveau_oclass *oclass, void *data, u32 size,
-			   struct nouveau_object **pobject)
-{
-	struct nvc0_software_chan *chan;
-	int ret;
-
-	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
-	chan->base.vblank.event.func = nvc0_software_vblsem_release;
-	return 0;
-}
-
-static struct nouveau_oclass
+static struct nv50_software_cclass
 nvc0_software_cclass = {
-	.handle = NV_ENGCTX(SW, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_software_context_ctor,
+	.base.handle = NV_ENGCTX(SW, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_software_context_ctor,
 		.dtor = _nouveau_software_context_dtor,
 		.init = _nouveau_software_context_init,
 		.fini = _nouveau_software_context_fini,
 	},
+	.vblank = nvc0_software_vblsem_release,
 };
 
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-static int
-nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		   struct nouveau_oclass *oclass, void *data, u32 size,
-		   struct nouveau_object **pobject)
-{
-	struct nvc0_software_priv *priv;
-	int ret;
-
-	ret = nouveau_software_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvc0_software_cclass;
-	nv_engine(priv)->sclass = nvc0_software_sclass;
-	nv_subdev(priv)->intr = nv04_software_intr;
-	return 0;
-}
-
-struct nouveau_oclass
-nvc0_software_oclass = {
-	.handle = NV_ENGINE(SW, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_software_ctor,
+struct nouveau_oclass *
+nvc0_software_oclass = &(struct nv50_software_oclass) {
+	.base.handle = NV_ENGINE(SW, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_software_ctor,
 		.dtor = _nouveau_software_dtor,
 		.init = _nouveau_software_init,
 		.fini = _nouveau_software_fini,
 	},
-};
+	.cclass = &nvc0_software_cclass.base,
+	.sclass =  nvc0_software_sclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 5a5961b..560c359 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -22,7 +22,7 @@
 #define NV_DEVICE_DISABLE_PPP                             0x0000004000000000ULL
 #define NV_DEVICE_DISABLE_COPY0                           0x0000008000000000ULL
 #define NV_DEVICE_DISABLE_COPY1                           0x0000010000000000ULL
-#define NV_DEVICE_DISABLE_UNK1C1                          0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VIC                             0x0000020000000000ULL
 #define NV_DEVICE_DISABLE_VENC                            0x0000040000000000ULL
 
 struct nv_device_class {
@@ -98,6 +98,77 @@
 	u32 conf0;
 };
 
+/* Perfmon counter class
+ *
+ * XXXX: NV_PERFCTR
+ */
+#define NV_PERFCTR_CLASS                                             0x0000ffff
+#define NV_PERFCTR_QUERY                                             0x00000000
+#define NV_PERFCTR_SAMPLE                                            0x00000001
+#define NV_PERFCTR_READ                                              0x00000002
+
+struct nv_perfctr_class {
+	u16 logic_op;
+	struct {
+		char __user *name; /*XXX: use cfu when exposed to userspace */
+		u32 size;
+	} signal[4];
+};
+
+struct nv_perfctr_query {
+	u32 iter;
+	u32 size;
+	char __user *name; /*XXX: use ctu when exposed to userspace */
+};
+
+struct nv_perfctr_sample {
+};
+
+struct nv_perfctr_read {
+	u32 ctr;
+	u32 clk;
+};
+
+/* Device control class
+ *
+ * XXXX: NV_CONTROL
+ */
+#define NV_CONTROL_CLASS                                             0x0000fffe
+
+#define NV_CONTROL_PSTATE_INFO                                       0x00000000
+#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE                              (-1)
+#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON                              (-2)
+#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN                              (-1)
+#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON                              (-2)
+#define NV_CONTROL_PSTATE_ATTR                                       0x00000001
+#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT                               (-1)
+#define NV_CONTROL_PSTATE_USER                                       0x00000002
+#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN                               (-1)
+#define NV_CONTROL_PSTATE_USER_STATE_PERFMON                               (-2)
+
+struct nv_control_pstate_info {
+	u32 count; /* out: number of power states */
+	s32 ustate; /* out: current target pstate index */
+	u32 pstate; /* out: current pstate index */
+};
+
+struct nv_control_pstate_attr {
+	s32 state; /*  in: index of pstate to query
+		    * out: pstate identifier
+		    */
+	u32 index; /*  in: index of attribute to query
+		    * out: index of next attribute, or 0 if no more
+		    */
+	char name[32];
+	char unit[16];
+	u32 min;
+	u32 max;
+};
+
+struct nv_control_pstate_user {
+	s32 state; /*  in: pstate identifier */
+};
+
 /* DMA FIFO channel classes
  *
  * 006b: NV03_CHANNEL_DMA
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h
index 9ea18df..8092e2e 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/debug.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -1,13 +1,20 @@
 #ifndef __NOUVEAU_DEBUG_H__
 #define __NOUVEAU_DEBUG_H__
 
+extern int nv_info_debug_level;
+
 #define NV_DBG_FATAL    0
 #define NV_DBG_ERROR    1
 #define NV_DBG_WARN     2
-#define NV_DBG_INFO     3
+#define NV_DBG_INFO     nv_info_debug_level
 #define NV_DBG_DEBUG    4
 #define NV_DBG_TRACE    5
 #define NV_DBG_PARANOIA 6
 #define NV_DBG_SPAM     7
 
+#define NV_DBG_INFO_NORMAL 3
+#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
+
+#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 99b6600..ac2881d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -33,9 +33,10 @@
 	NVDEV_SUBDEV_INSTMEM,
 	NVDEV_SUBDEV_VM,
 	NVDEV_SUBDEV_BAR,
+	NVDEV_SUBDEV_PWR,
 	NVDEV_SUBDEV_VOLT,
-	NVDEV_SUBDEV_CLOCK,
 	NVDEV_SUBDEV_THERM,
+	NVDEV_SUBDEV_CLOCK,
 
 	NVDEV_ENGINE_DMAOBJ,
 	NVDEV_ENGINE_FIFO,
@@ -50,9 +51,10 @@
 	NVDEV_ENGINE_COPY0,
 	NVDEV_ENGINE_COPY1,
 	NVDEV_ENGINE_COPY2,
-	NVDEV_ENGINE_UNK1C1,
+	NVDEV_ENGINE_VIC,
 	NVDEV_ENGINE_VENC,
 	NVDEV_ENGINE_DISP,
+	NVDEV_ENGINE_PERFMON,
 
 	NVDEV_SUBDEV_NR,
 };
@@ -72,6 +74,7 @@
 	enum {
 		NV_04    = 0x04,
 		NV_10    = 0x10,
+		NV_11    = 0x11,
 		NV_20    = 0x20,
 		NV_30    = 0x30,
 		NV_40    = 0x40,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index 9e09440..5d539eb 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -5,13 +5,21 @@
 #define NVKM_EVENT_DROP 0
 #define NVKM_EVENT_KEEP 1
 
+/* nouveau_eventh.flags bit #s */
+#define NVKM_EVENT_ENABLE 0
+
 struct nouveau_eventh {
+	struct nouveau_event *event;
 	struct list_head head;
-	int (*func)(struct nouveau_eventh *, int index);
+	unsigned long flags;
+	int index;
+	int (*func)(void *, int);
+	void *priv;
 };
 
 struct nouveau_event {
-	spinlock_t lock;
+	spinlock_t list_lock;
+	spinlock_t refs_lock;
 
 	void *priv;
 	void (*enable)(struct nouveau_event *, int index);
@@ -28,9 +36,11 @@
 void nouveau_event_destroy(struct nouveau_event **);
 void nouveau_event_trigger(struct nouveau_event *, int index);
 
-void nouveau_event_get(struct nouveau_event *, int index,
-		       struct nouveau_eventh *);
-void nouveau_event_put(struct nouveau_event *, int index,
-		       struct nouveau_eventh *);
+int  nouveau_event_new(struct nouveau_event *, int index,
+		       int (*func)(void *, int), void *,
+		       struct nouveau_eventh **);
+void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
+void nouveau_event_get(struct nouveau_eventh *);
+void nouveau_event_put(struct nouveau_eventh *);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
index 2707495..ed05584 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/option.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -8,4 +8,13 @@
 
 int nouveau_dbgopt(const char *optstr, const char *sub);
 
+/* compares unterminated string 'str' with zero-terminated string 'cmp' */
+static inline int
+strncasecmpz(const char *str, const char *cmp, size_t len)
+{
+	if (strlen(cmp) != len)
+		return len;
+	return strncasecmp(str, cmp, len);
+}
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index d87836e..0f9a37b 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -6,27 +6,12 @@
 
 struct nouveau_object;
 
-#define NV_PRINTK_FATAL    KERN_CRIT
-#define NV_PRINTK_ERROR    KERN_ERR
-#define NV_PRINTK_WARN     KERN_WARNING
-#define NV_PRINTK_INFO     KERN_INFO
-#define NV_PRINTK_DEBUG    KERN_DEBUG
-#define NV_PRINTK_PARANOIA KERN_DEBUG
-#define NV_PRINTK_TRACE    KERN_DEBUG
-#define NV_PRINTK_SPAM     KERN_DEBUG
-
-extern int nv_printk_suspend_level;
-
-#define NV_DBG_SUSPEND (nv_printk_suspend_level)
-#define NV_PRINTK_SUSPEND  (nv_printk_level_to_pfx(nv_printk_suspend_level))
-
-const char *nv_printk_level_to_pfx(int level);
-void __printf(4, 5)
-nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
+void __printf(3, 4)
+nv_printk_(struct nouveau_object *, int, const char *, ...);
 
 #define nv_printk(o,l,f,a...) do {                                             \
 	if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG)                                \
-		nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a);   \
+		nv_printk_(nv_object(o), NV_DBG_##l, f, ##a);                  \
 } while(0)
 
 #define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
@@ -37,16 +22,9 @@
 #define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
 #define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
 
-#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
-
-static inline void nv_suspend_set_printk_level(int level)
-{
-	nv_printk_suspend_level = level;
-}
-
 #define nv_assert(f,a...) do {                                                 \
 	if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG)                              \
-		nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a);  \
+		nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a);                   \
 	BUG_ON(1);                                                             \
 } while(0)
 
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 633c2f8..8c32cf4 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -101,14 +101,14 @@
 #define _nouveau_fifo_init _nouveau_engine_init
 #define _nouveau_fifo_fini _nouveau_engine_fini
 
-extern struct nouveau_oclass nv04_fifo_oclass;
-extern struct nouveau_oclass nv10_fifo_oclass;
-extern struct nouveau_oclass nv17_fifo_oclass;
-extern struct nouveau_oclass nv40_fifo_oclass;
-extern struct nouveau_oclass nv50_fifo_oclass;
-extern struct nouveau_oclass nv84_fifo_oclass;
-extern struct nouveau_oclass nvc0_fifo_oclass;
-extern struct nouveau_oclass nve0_fifo_oclass;
+extern struct nouveau_oclass *nv04_fifo_oclass;
+extern struct nouveau_oclass *nv10_fifo_oclass;
+extern struct nouveau_oclass *nv17_fifo_oclass;
+extern struct nouveau_oclass *nv40_fifo_oclass;
+extern struct nouveau_oclass *nv50_fifo_oclass;
+extern struct nouveau_oclass *nv84_fifo_oclass;
+extern struct nouveau_oclass *nvc0_fifo_oclass;
+extern struct nouveau_oclass *nve0_fifo_oclass;
 
 void nv04_fifo_intr(struct nouveau_subdev *);
 int  nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
index 1d1a89a..9b0d938 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -42,10 +42,13 @@
 
 extern struct nouveau_oclass nv31_mpeg_oclass;
 extern struct nouveau_oclass nv40_mpeg_oclass;
+extern struct nouveau_oclass nv44_mpeg_oclass;
 extern struct nouveau_oclass nv50_mpeg_oclass;
 extern struct nouveau_oclass nv84_mpeg_oclass;
-
+extern struct nouveau_ofuncs nv31_mpeg_ofuncs;
+extern struct nouveau_oclass nv31_mpeg_cclass;
 extern struct nouveau_oclass nv31_mpeg_sclass[];
+extern struct nouveau_oclass nv40_mpeg_sclass[];
 void nv31_mpeg_intr(struct nouveau_subdev *);
 void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
 int  nv31_mpeg_init(struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
new file mode 100644
index 0000000..49b0024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
@@ -0,0 +1,39 @@
+#ifndef __NVKM_PERFMON_H__
+#define __NVKM_PERFMON_H__
+
+#include <core/device.h>
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/class.h>
+
+struct nouveau_perfdom;
+struct nouveau_perfctr;
+struct nouveau_perfmon {
+	struct nouveau_engine base;
+
+	struct nouveau_perfctx *context;
+	void *profile_data;
+
+	struct list_head domains;
+	u32 sequence;
+
+	/*XXX: temp for daemon backend */
+	u32 pwr[8];
+	u32 last;
+};
+
+static inline struct nouveau_perfmon *
+nouveau_perfmon(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_PERFMON];
+}
+
+extern struct nouveau_oclass *nv40_perfmon_oclass;
+extern struct nouveau_oclass *nv50_perfmon_oclass;
+extern struct nouveau_oclass *nv84_perfmon_oclass;
+extern struct nouveau_oclass *nva3_perfmon_oclass;
+extern struct nouveau_oclass nvc0_perfmon_oclass;
+extern struct nouveau_oclass nve0_perfmon_oclass;
+extern struct nouveau_oclass nvf0_perfmon_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index 4579948..23a462b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,19 +3,10 @@
 
 #include <core/engine.h>
 #include <core/engctx.h>
-#include <core/event.h>
 
 struct nouveau_software_chan {
 	struct nouveau_engctx base;
 
-	struct {
-		struct nouveau_eventh event;
-		u32 channel;
-		u32 ctxdma;
-		u64 offset;
-		u32 value;
-	} vblank;
-
 	int (*flip)(void *);
 	void *flip_data;
 };
@@ -50,10 +41,10 @@
 #define _nouveau_software_init _nouveau_engine_init
 #define _nouveau_software_fini _nouveau_engine_fini
 
-extern struct nouveau_oclass nv04_software_oclass;
-extern struct nouveau_oclass nv10_software_oclass;
-extern struct nouveau_oclass nv50_software_oclass;
-extern struct nouveau_oclass nvc0_software_oclass;
+extern struct nouveau_oclass *nv04_software_oclass;
+extern struct nouveau_oclass *nv10_software_oclass;
+extern struct nouveau_oclass *nv50_software_oclass;
+extern struct nouveau_oclass *nvc0_software_oclass;
 
 void nv04_software_intr(struct nouveau_subdev *);
 
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
new file mode 100644
index 0000000..662b207
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
@@ -0,0 +1,29 @@
+#ifndef __NVBIOS_BOOST_H__
+#define __NVBIOS_BOOST_H__
+
+u16 nvbios_boostTe(struct nouveau_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
+
+struct nvbios_boostE {
+	u8  pstate;
+	u32 min;
+	u32 max;
+};
+
+u16 nvbios_boostEe(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
+u16 nvbios_boostEp(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
+		   struct nvbios_boostE *);
+u16 nvbios_boostEm(struct nouveau_bios *, u8, u8 *, u8 *, u8 *, u8 *,
+		   struct nvbios_boostE *);
+
+struct nvbios_boostS {
+	u8  domain;
+	u8  percent;
+	u32 min;
+	u32 max;
+};
+
+u16 nvbios_boostSe(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8);
+u16 nvbios_boostSp(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8,
+		   struct nvbios_boostS *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
new file mode 100644
index 0000000..a80a4380
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
@@ -0,0 +1,28 @@
+#ifndef __NVBIOS_CSTEP_H__
+#define __NVBIOS_CSTEP_H__
+
+u16 nvbios_cstepTe(struct nouveau_bios *,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
+
+struct nvbios_cstepE {
+	u8  pstate;
+	u8  index;
+};
+
+u16 nvbios_cstepEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u16 nvbios_cstepEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+		   struct nvbios_cstepE *);
+u16 nvbios_cstepEm(struct nouveau_bios *, u8 pstate, u8 *ver, u8 *hdr,
+		   struct nvbios_cstepE *);
+
+struct nvbios_cstepX {
+	u32 freq;
+	u8  unkn[2];
+	u8  voltage;
+};
+
+u16 nvbios_cstepXe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u16 nvbios_cstepXp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+		   struct nvbios_cstepX *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 96d3364..c7b2e58 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -7,7 +7,15 @@
 	DCB_GPIO_TVDAC1 = 0x2d,
 	DCB_GPIO_FAN = 0x09,
 	DCB_GPIO_FAN_SENSE = 0x3d,
-	DCB_GPIO_UNUSED = 0xff
+	DCB_GPIO_UNUSED = 0xff,
+	DCB_GPIO_VID0 = 0x04,
+	DCB_GPIO_VID1 = 0x05,
+	DCB_GPIO_VID2 = 0x06,
+	DCB_GPIO_VID3 = 0x1a,
+	DCB_GPIO_VID4 = 0x73,
+	DCB_GPIO_VID5 = 0x74,
+	DCB_GPIO_VID6 = 0x75,
+	DCB_GPIO_VID7 = 0x76,
 };
 
 #define DCB_GPIO_LOG_DIR     0x02
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
index 0b285e9..16ff06e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -3,6 +3,39 @@
 
 struct nouveau_bios;
 
+u16 nvbios_perf_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
+		      u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+struct nvbios_perfE {
+	u8  pstate;
+	u8  fanspeed;
+	u8  voltage;
+	u32 core;
+	u32 shader;
+	u32 memory;
+	u32 vdec;
+	u32 disp;
+	u32 script;
+};
+
+u16 nvbios_perf_entry(struct nouveau_bios *, int idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_perfEp(struct nouveau_bios *, int idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
+
+struct nvbios_perfS {
+	union {
+		struct {
+			u32 freq;
+		} v40;
+	};
+};
+
+u32 nvbios_perfSe(struct nouveau_bios *, u32 data, int idx,
+		  u8 *ver, u8 *hdr, u8 cnt, u8 len);
+u32 nvbios_perfSp(struct nouveau_bios *, u32 data, int idx,
+		  u8 *ver, u8 *hdr, u8 cnt, u8 len, struct nvbios_perfS *);
+
 struct nvbios_perf_fan {
 	u32 pwm_divisor;
 };
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
new file mode 100644
index 0000000..bc15e03
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -0,0 +1,11 @@
+#ifndef __NVBIOS_RAMMAP_H__
+#define __NVBIOS_RAMMAP_H__
+
+u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
+			u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
+			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
+			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
new file mode 100644
index 0000000..963694b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
@@ -0,0 +1,8 @@
+#ifndef __NVBIOS_TIMING_H__
+#define __NVBIOS_TIMING_H__
+
+u16 nvbios_timing_table(struct nouveau_bios *,
+			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
new file mode 100644
index 0000000..ad5a8f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
@@ -0,0 +1,25 @@
+#ifndef __NVBIOS_VMAP_H__
+#define __NVBIOS_VMAP_H__
+
+struct nouveau_bios;
+
+struct nvbios_vmap {
+};
+
+u16 nvbios_vmap_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_vmap_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_vmap *);
+
+struct nvbios_vmap_entry {
+	u8  unk0;
+	u8  link;
+	u32 min;
+	u32 max;
+	s32 arg[6];
+};
+
+u16 nvbios_vmap_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
+u16 nvbios_vmap_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
+			    struct nvbios_vmap_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
new file mode 100644
index 0000000..6a11dcd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
@@ -0,0 +1,27 @@
+#ifndef __NVBIOS_VOLT_H__
+#define __NVBIOS_VOLT_H__
+
+struct nouveau_bios;
+
+struct nvbios_volt {
+	u8  vidmask;
+	u32 min;
+	u32 max;
+	u32 base;
+	s16 step;
+};
+
+u16 nvbios_volt_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_volt_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_volt *);
+
+struct nvbios_volt_entry {
+	u32 voltage;
+	u8  vid;
+};
+
+u16 nvbios_volt_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
+u16 nvbios_volt_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
+			    struct nvbios_volt_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
index 7d88ec4..697f7ce 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -11,6 +11,8 @@
 
 struct nouveau_bus {
 	struct nouveau_subdev base;
+	int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
+	u32 hwsq_size;
 };
 
 static inline struct nouveau_bus *
@@ -33,9 +35,19 @@
 #define _nouveau_bus_init _nouveau_subdev_init
 #define _nouveau_bus_fini _nouveau_subdev_fini
 
-extern struct nouveau_oclass nv04_bus_oclass;
-extern struct nouveau_oclass nv31_bus_oclass;
-extern struct nouveau_oclass nv50_bus_oclass;
-extern struct nouveau_oclass nvc0_bus_oclass;
+extern struct nouveau_oclass *nv04_bus_oclass;
+extern struct nouveau_oclass *nv31_bus_oclass;
+extern struct nouveau_oclass *nv50_bus_oclass;
+extern struct nouveau_oclass *nv94_bus_oclass;
+extern struct nouveau_oclass *nvc0_bus_oclass;
+
+/* interface to sequencer */
+struct nouveau_hwsq;
+int  nouveau_hwsq_init(struct nouveau_bus *, struct nouveau_hwsq **);
+int  nouveau_hwsq_fini(struct nouveau_hwsq **, bool exec);
+void nouveau_hwsq_wr32(struct nouveau_hwsq *, u32 addr, u32 data);
+void nouveau_hwsq_setf(struct nouveau_hwsq *, u8 flag, int data);
+void nouveau_hwsq_wait(struct nouveau_hwsq *, u8 flag, u8 data);
+void nouveau_hwsq_nsec(struct nouveau_hwsq *, u32 nsec);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 89ee289..e2675bc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -7,9 +7,78 @@
 struct nouveau_pll_vals;
 struct nvbios_pll;
 
+enum nv_clk_src {
+	nv_clk_src_crystal,
+	nv_clk_src_href,
+
+	nv_clk_src_hclk,
+	nv_clk_src_hclkm3,
+	nv_clk_src_hclkm3d2,
+
+	nv_clk_src_host,
+
+	nv_clk_src_sppll0,
+	nv_clk_src_sppll1,
+
+	nv_clk_src_mpllsrcref,
+	nv_clk_src_mpllsrc,
+	nv_clk_src_mpll,
+	nv_clk_src_mdiv,
+
+	nv_clk_src_core,
+	nv_clk_src_shader,
+
+	nv_clk_src_mem,
+
+	nv_clk_src_gpc,
+	nv_clk_src_rop,
+	nv_clk_src_hubk01,
+	nv_clk_src_hubk06,
+	nv_clk_src_hubk07,
+	nv_clk_src_copy,
+	nv_clk_src_daemon,
+	nv_clk_src_disp,
+	nv_clk_src_vdec,
+
+	nv_clk_src_dom6,
+
+	nv_clk_src_max,
+};
+
+struct nouveau_cstate {
+	struct list_head head;
+	u8  voltage;
+	u32 domain[nv_clk_src_max];
+};
+
+struct nouveau_pstate {
+	struct list_head head;
+	struct list_head list; /* c-states */
+	struct nouveau_cstate base;
+	u8 pstate;
+	u8 fanspeed;
+};
+
 struct nouveau_clock {
 	struct nouveau_subdev base;
 
+	struct nouveau_clocks *domains;
+	struct nouveau_pstate bstate;
+
+	struct list_head states;
+	int state_nr;
+
+	int pstate; /* current */
+	int ustate; /* user-requested (-1 disabled, -2 perfmon) */
+	int astate; /* perfmon adjustment (base) */
+	int tstate; /* thermal adjustment (max-) */
+	int dstate; /* display adjustment (min+) */
+
+	int  (*read)(struct nouveau_clock *, enum nv_clk_src);
+	int  (*calc)(struct nouveau_clock *, struct nouveau_cstate *);
+	int  (*prog)(struct nouveau_clock *);
+	void (*tidy)(struct nouveau_clock *);
+
 	/*XXX: die, these are here *only* to support the completely
 	 *     bat-shit insane what-was-nouveau_hw.c code
 	 */
@@ -25,27 +94,42 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
 }
 
-#define nouveau_clock_create(p,e,o,d)                                          \
-	nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d)
-#define nouveau_clock_destroy(p)                                               \
-	nouveau_subdev_destroy(&(p)->base)
-#define nouveau_clock_init(p)                                                  \
-	nouveau_subdev_init(&(p)->base)
+struct nouveau_clocks {
+	enum nv_clk_src name;
+	u8 bios; /* 0xff for none */
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+	u8 flags;
+	const char *mname;
+	int mdiv;
+};
+
+#define nouveau_clock_create(p,e,o,i,d)                                        \
+	nouveau_clock_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
+#define nouveau_clock_destroy(p) ({                                            \
+	struct nouveau_clock *clk = (p);                                       \
+	_nouveau_clock_dtor(nv_object(clk));                                   \
+})
+#define nouveau_clock_init(p) ({                                               \
+	struct nouveau_clock *clk = (p);                                       \
+	_nouveau_clock_init(nv_object(clk));                                   \
+})
 #define nouveau_clock_fini(p,s)                                                \
 	nouveau_subdev_fini(&(p)->base, (s))
 
 int  nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
-			   struct nouveau_oclass *, void *, u32, int, void **);
-
-#define _nouveau_clock_dtor _nouveau_subdev_dtor
-#define _nouveau_clock_init _nouveau_subdev_init
+			   struct nouveau_oclass *,
+			   struct nouveau_clocks *, int, void **);
+void _nouveau_clock_dtor(struct nouveau_object *);
+int _nouveau_clock_init(struct nouveau_object *);
 #define _nouveau_clock_fini _nouveau_subdev_fini
 
 extern struct nouveau_oclass nv04_clock_oclass;
 extern struct nouveau_oclass nv40_clock_oclass;
-extern struct nouveau_oclass nv50_clock_oclass;
+extern struct nouveau_oclass *nv50_clock_oclass;
+extern struct nouveau_oclass *nv84_clock_oclass;
 extern struct nouveau_oclass nva3_clock_oclass;
 extern struct nouveau_oclass nvc0_clock_oclass;
+extern struct nouveau_oclass nve0_clock_oclass;
 
 int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
 int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
@@ -55,4 +139,9 @@
 int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
 			int clk, struct nouveau_pll_vals *);
 
+int nouveau_clock_ustate(struct nouveau_clock *, int req);
+int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
+int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
+int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 2e74050..8541aa3 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -78,23 +78,28 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
 }
 
-extern struct nouveau_oclass nv04_fb_oclass;
-extern struct nouveau_oclass nv10_fb_oclass;
-extern struct nouveau_oclass nv1a_fb_oclass;
-extern struct nouveau_oclass nv20_fb_oclass;
-extern struct nouveau_oclass nv25_fb_oclass;
-extern struct nouveau_oclass nv30_fb_oclass;
-extern struct nouveau_oclass nv35_fb_oclass;
-extern struct nouveau_oclass nv36_fb_oclass;
-extern struct nouveau_oclass nv40_fb_oclass;
-extern struct nouveau_oclass nv41_fb_oclass;
-extern struct nouveau_oclass nv44_fb_oclass;
-extern struct nouveau_oclass nv46_fb_oclass;
-extern struct nouveau_oclass nv47_fb_oclass;
-extern struct nouveau_oclass nv49_fb_oclass;
-extern struct nouveau_oclass nv4e_fb_oclass;
-extern struct nouveau_oclass nv50_fb_oclass;
-extern struct nouveau_oclass nvc0_fb_oclass;
+extern struct nouveau_oclass *nv04_fb_oclass;
+extern struct nouveau_oclass *nv10_fb_oclass;
+extern struct nouveau_oclass *nv1a_fb_oclass;
+extern struct nouveau_oclass *nv20_fb_oclass;
+extern struct nouveau_oclass *nv25_fb_oclass;
+extern struct nouveau_oclass *nv30_fb_oclass;
+extern struct nouveau_oclass *nv35_fb_oclass;
+extern struct nouveau_oclass *nv36_fb_oclass;
+extern struct nouveau_oclass *nv40_fb_oclass;
+extern struct nouveau_oclass *nv41_fb_oclass;
+extern struct nouveau_oclass *nv44_fb_oclass;
+extern struct nouveau_oclass *nv46_fb_oclass;
+extern struct nouveau_oclass *nv47_fb_oclass;
+extern struct nouveau_oclass *nv49_fb_oclass;
+extern struct nouveau_oclass *nv4e_fb_oclass;
+extern struct nouveau_oclass *nv50_fb_oclass;
+extern struct nouveau_oclass *nv84_fb_oclass;
+extern struct nouveau_oclass *nva3_fb_oclass;
+extern struct nouveau_oclass *nvaa_fb_oclass;
+extern struct nouveau_oclass *nvaf_fb_oclass;
+extern struct nouveau_oclass *nvc0_fb_oclass;
+extern struct nouveau_oclass *nve0_fb_oclass;
 
 struct nouveau_ram {
 	struct nouveau_object base;
@@ -121,6 +126,17 @@
 	int  (*get)(struct nouveau_fb *, u64 size, u32 align,
 		    u32 size_nc, u32 type, struct nouveau_mem **);
 	void (*put)(struct nouveau_fb *, struct nouveau_mem **);
+
+	int  (*calc)(struct nouveau_fb *, u32 freq);
+	int  (*prog)(struct nouveau_fb *);
+	void (*tidy)(struct nouveau_fb *);
+	struct {
+		u8  version;
+		u32 data;
+		u8  size;
+	} rammap, ramcfg, timing;
+	u32 freq;
+	u32 mr[16];
 };
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 7e4e277..9fa5da7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -60,13 +60,18 @@
 #define _nouveau_i2c_port_init nouveau_object_init
 #define _nouveau_i2c_port_fini nouveau_object_fini
 
+struct nouveau_i2c_board_info {
+	struct i2c_board_info dev;
+	u8 udelay; /* set to 0 to use the standard delay */
+};
+
 struct nouveau_i2c {
 	struct nouveau_subdev base;
 
 	struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
 	struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
 	int (*identify)(struct nouveau_i2c *, int index,
-			const char *what, struct i2c_board_info *,
+			const char *what, struct nouveau_i2c_board_info *,
 			bool (*match)(struct nouveau_i2c_port *,
 				      struct i2c_board_info *));
 	struct list_head ports;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index ce6569f..adc88b7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -11,7 +11,6 @@
 
 struct nouveau_mc {
 	struct nouveau_subdev base;
-	const struct nouveau_mc_intr *intr_map;
 	bool use_msi;
 };
 
@@ -21,8 +20,8 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
 }
 
-#define nouveau_mc_create(p,e,o,m,d)                                           \
-	nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,d)                                             \
+	nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
 #define nouveau_mc_destroy(p) ({                                               \
 	struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc));        \
 })
@@ -34,20 +33,24 @@
 })
 
 int  nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
-			struct nouveau_oclass *, const struct nouveau_mc_intr *,
-			int, void **);
+			struct nouveau_oclass *, int, void **);
 void _nouveau_mc_dtor(struct nouveau_object *);
 int  _nouveau_mc_init(struct nouveau_object *);
 int  _nouveau_mc_fini(struct nouveau_object *, bool);
 
-extern struct nouveau_oclass nv04_mc_oclass;
-extern struct nouveau_oclass nv44_mc_oclass;
-extern struct nouveau_oclass nv50_mc_oclass;
-extern struct nouveau_oclass nv98_mc_oclass;
-extern struct nouveau_oclass nvc0_mc_oclass;
+struct nouveau_mc_oclass {
+	struct nouveau_oclass base;
+	const struct nouveau_mc_intr *intr;
+	void (*msi_rearm)(struct nouveau_mc *);
+};
 
-extern const struct nouveau_mc_intr nv04_mc_intr[];
-int nv04_mc_init(struct nouveau_object *);
-int nv50_mc_init(struct nouveau_object *);
+extern struct nouveau_oclass *nv04_mc_oclass;
+extern struct nouveau_oclass *nv40_mc_oclass;
+extern struct nouveau_oclass *nv44_mc_oclass;
+extern struct nouveau_oclass *nv50_mc_oclass;
+extern struct nouveau_oclass *nv94_mc_oclass;
+extern struct nouveau_oclass *nv98_mc_oclass;
+extern struct nouveau_oclass *nvc0_mc_oclass;
+extern struct nouveau_oclass *nvc3_mc_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
new file mode 100644
index 0000000..c5c92cb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -0,0 +1,80 @@
+#ifndef __NOUVEAU_PWR_H__
+#define __NOUVEAU_PWR_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_pwr {
+	struct nouveau_subdev base;
+
+	struct {
+		u32 limit;
+		u32 *data;
+		u32  size;
+	} code;
+
+	struct {
+		u32 limit;
+		u32 *data;
+		u32  size;
+	} data;
+
+	struct {
+		u32 base;
+		u32 size;
+	} send;
+
+	struct {
+		u32 base;
+		u32 size;
+
+		struct work_struct work;
+		wait_queue_head_t wait;
+		u32 process;
+		u32 message;
+		u32 data[2];
+	} recv;
+
+	int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
+};
+
+static inline struct nouveau_pwr *
+nouveau_pwr(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
+}
+
+#define nouveau_pwr_create(p, e, o, d)                                         \
+	nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_pwr_destroy(p)                                                 \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_pwr_init(p) ({                                                 \
+	struct nouveau_pwr *ppwr = (p);                                        \
+	_nouveau_pwr_init(nv_object(ppwr));                                    \
+})
+#define nouveau_pwr_fini(p,s) ({                                               \
+	struct nouveau_pwr *ppwr = (p);                                        \
+	_nouveau_pwr_fini(nv_object(ppwr), (s));                               \
+})
+
+int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, int, void **);
+#define _nouveau_pwr_dtor _nouveau_subdev_dtor
+int _nouveau_pwr_init(struct nouveau_object *);
+int _nouveau_pwr_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nva3_pwr_oclass;
+extern struct nouveau_oclass nvc0_pwr_oclass;
+extern struct nouveau_oclass nvd0_pwr_oclass;
+extern struct nouveau_oclass nv108_pwr_oclass;
+
+/* interface to MEMX process running on PPWR */
+struct nouveau_memx;
+int  nouveau_memx_init(struct nouveau_pwr *, struct nouveau_memx **);
+int  nouveau_memx_fini(struct nouveau_memx **, bool exec);
+void nouveau_memx_wr32(struct nouveau_memx *, u32 addr, u32 data);
+void nouveau_memx_wait(struct nouveau_memx *,
+		       u32 addr, u32 mask, u32 data, u32 nsec);
+void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index c075998..69891d4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -71,6 +71,8 @@
 int  _nouveau_therm_init(struct nouveau_object *);
 int  _nouveau_therm_fini(struct nouveau_object *, bool);
 
+int  nouveau_therm_cstate(struct nouveau_therm *, int, int);
+
 extern struct nouveau_oclass nv40_therm_oclass;
 extern struct nouveau_oclass nv50_therm_oclass;
 extern struct nouveau_oclass nv84_therm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
new file mode 100644
index 0000000..820b62f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
@@ -0,0 +1,60 @@
+#ifndef __NOUVEAU_VOLT_H__
+#define __NOUVEAU_VOLT_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_voltage {
+	u32 uv;
+	u8  id;
+};
+
+struct nouveau_volt {
+	struct nouveau_subdev base;
+
+	int (*vid_get)(struct nouveau_volt *);
+	int (*get)(struct nouveau_volt *);
+	int (*vid_set)(struct nouveau_volt *, u8 vid);
+	int (*set)(struct nouveau_volt *, u32 uv);
+	int (*set_id)(struct nouveau_volt *, u8 id, int condition);
+
+	u8 vid_mask;
+	u8 vid_nr;
+	struct {
+		u32 uv;
+		u8 vid;
+	} vid[256];
+};
+
+static inline struct nouveau_volt *
+nouveau_volt(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VOLT];
+}
+
+#define nouveau_volt_create(p, e, o, d)                                        \
+	nouveau_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_volt_destroy(p) ({                                             \
+	struct nouveau_volt *v = (p);                                          \
+	_nouveau_volt_dtor(nv_object(v));                                      \
+})
+#define nouveau_volt_init(p) ({                                                \
+	struct nouveau_volt *v = (p);                                          \
+	_nouveau_volt_init(nv_object(v));                                      \
+})
+#define nouveau_volt_fini(p,s)                                                 \
+	nouveau_subdev_fini((p), (s))
+
+int  nouveau_volt_create_(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, int, void **);
+void _nouveau_volt_dtor(struct nouveau_object *);
+int  _nouveau_volt_init(struct nouveau_object *);
+#define _nouveau_volt_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv40_volt_oclass;
+
+int nouveau_voltgpio_init(struct nouveau_volt *);
+int nouveau_voltgpio_get(struct nouveau_volt *);
+int nouveau_voltgpio_set(struct nouveau_volt *, u8);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
new file mode 100644
index 0000000..c1835e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/boost.h>
+
+u16
+nvbios_boostTe(struct nouveau_bios *bios,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+{
+	struct bit_entry bit_P;
+	u16 boost = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			boost = nv_ro16(bios, bit_P.offset + 0x30);
+
+		if (boost) {
+			*ver = nv_ro08(bios, boost + 0);
+			switch (*ver) {
+			case 0x11:
+				*hdr = nv_ro08(bios, boost + 1);
+				*cnt = nv_ro08(bios, boost + 5);
+				*len = nv_ro08(bios, boost + 2);
+				*snr = nv_ro08(bios, boost + 4);
+				*ssz = nv_ro08(bios, boost + 3);
+				return boost;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_boostEe(struct nouveau_bios *bios, int idx,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u8  snr, ssz;
+	u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+	if (data && idx < *cnt) {
+		data = data + *hdr + (idx * (*len + (snr * ssz)));
+		*hdr = *len;
+		*cnt = snr;
+		*len = ssz;
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_boostEp(struct nouveau_bios *bios, int idx,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
+{
+	u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	if (data) {
+		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->min    =  nv_ro16(bios, data + 0x02) * 1000;
+		info->max    =  nv_ro16(bios, data + 0x04) * 1000;
+	}
+	return data;
+}
+
+u16
+nvbios_boostEm(struct nouveau_bios *bios, u8 pstate,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
+{
+	u32 data, idx = 0;
+	while ((data = nvbios_boostEp(bios, idx++, ver, hdr, cnt, len, info))) {
+		if (info->pstate == pstate)
+			break;
+	}
+	return data;
+}
+
+u16
+nvbios_boostSe(struct nouveau_bios *bios, int idx,
+	       u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
+{
+	if (data && idx < cnt) {
+		data = data + *hdr + (idx * len);
+		*hdr = len;
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_boostSp(struct nouveau_bios *bios, int idx,
+	       u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
+	       struct nvbios_boostS *info)
+{
+	data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	if (data) {
+		info->domain  = nv_ro08(bios, data + 0x00);
+		info->percent = nv_ro08(bios, data + 0x01);
+		info->min     = nv_ro16(bios, data + 0x02) * 1000;
+		info->max     = nv_ro16(bios, data + 0x04) * 1000;
+	}
+	return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
new file mode 100644
index 0000000..d3b1532
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/cstep.h>
+
+u16
+nvbios_cstepTe(struct nouveau_bios *bios,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
+{
+	struct bit_entry bit_P;
+	u16 cstep = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			cstep = nv_ro16(bios, bit_P.offset + 0x34);
+
+		if (cstep) {
+			*ver = nv_ro08(bios, cstep + 0);
+			switch (*ver) {
+			case 0x10:
+				*hdr = nv_ro08(bios, cstep + 1);
+				*cnt = nv_ro08(bios, cstep + 3);
+				*len = nv_ro08(bios, cstep + 2);
+				*xnr = nv_ro08(bios, cstep + 5);
+				*xsz = nv_ro08(bios, cstep + 4);
+				return cstep;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_cstepEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+	u8  cnt, len, xnr, xsz;
+	u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+	if (data && idx < cnt) {
+		data = data + *hdr + (idx * len);
+		*hdr = len;
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_cstepEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+	       struct nvbios_cstepE *info)
+{
+	u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
+	memset(info, 0x00, sizeof(*info));
+	if (data) {
+		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->index   = nv_ro08(bios, data + 0x03);
+	}
+	return data;
+}
+
+u16
+nvbios_cstepEm(struct nouveau_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
+	       struct nvbios_cstepE *info)
+{
+	u32 data, idx = 0;
+	while ((data = nvbios_cstepEp(bios, idx++, ver, hdr, info))) {
+		if (info->pstate == pstate)
+			break;
+	}
+	return data;
+}
+
+u16
+nvbios_cstepXe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+	u8  cnt, len, xnr, xsz;
+	u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+	if (data && idx < xnr) {
+		data = data + *hdr + (cnt * len) + (idx * xsz);
+		*hdr = xsz;
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_cstepXp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+	       struct nvbios_cstepX *info)
+{
+	u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
+	memset(info, 0x00, sizeof(*info));
+	if (data) {
+		info->freq    = nv_ro16(bios, data + 0x00) * 1000;
+		info->unkn[0] = nv_ro08(bios, data + 0x02);
+		info->unkn[1] = nv_ro08(bios, data + 0x03);
+		info->voltage = nv_ro08(bios, data + 0x04);
+	}
+	return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 663853b..7628fe7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -89,6 +89,7 @@
 		   struct nvbios_dpout *info)
 {
 	u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
 	if (data && *ver) {
 		info->type = nv_ro16(bios, data + 0x00);
 		info->mask = nv_ro16(bios, data + 0x02);
@@ -99,9 +100,12 @@
 			info->script[0] = nv_ro16(bios, data + 0x06);
 			info->script[1] = nv_ro16(bios, data + 0x08);
 			info->lnkcmp    = nv_ro16(bios, data + 0x0a);
-			info->script[2] = nv_ro16(bios, data + 0x0c);
-			info->script[3] = nv_ro16(bios, data + 0x0e);
-			info->script[4] = nv_ro16(bios, data + 0x10);
+			if (*len >= 0x0f) {
+				info->script[2] = nv_ro16(bios, data + 0x0c);
+				info->script[3] = nv_ro16(bios, data + 0x0e);
+			}
+			if (*len >= 0x11)
+				info->script[4] = nv_ro16(bios, data + 0x10);
 			break;
 		case 0x40:
 			info->flags     = nv_ro08(bios, data + 0x04);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 57cda2a..420908c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2180,7 +2180,7 @@
 	u16 data;
 
 	if (execute)
-		nv_suspend(bios, "running init tables\n");
+		nv_info(bios, "running init tables\n");
 	while (!ret && (data = (init_script(bios, ++i)))) {
 		struct nvbios_init init = {
 			.subdev = subdev,
@@ -2210,5 +2210,5 @@
 		ret = nvbios_exec(&init);
 	}
 
-	return 0;
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
index bcbb056..675e221 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -26,8 +26,9 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/perf.h>
 
-static u16
-perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u16
+nvbios_perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+		  u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
 {
 	struct bit_entry bit_P;
 	u16 perf = 0x0000;
@@ -38,10 +39,22 @@
 			if (perf) {
 				*ver = nv_ro08(bios, perf + 0);
 				*hdr = nv_ro08(bios, perf + 1);
+				if (*ver >= 0x40 && *ver < 0x41) {
+					*cnt = nv_ro08(bios, perf + 5);
+					*len = nv_ro08(bios, perf + 2);
+					*snr = nv_ro08(bios, perf + 4);
+					*ssz = nv_ro08(bios, perf + 3);
+					return perf;
+				} else
+				if (*ver >= 0x20 && *ver < 0x40) {
+					*cnt = nv_ro08(bios, perf + 2);
+					*len = nv_ro08(bios, perf + 3);
+					*snr = nv_ro08(bios, perf + 4);
+					*ssz = nv_ro08(bios, perf + 5);
+					return perf;
+				}
 			}
-		} else
-			nv_error(bios, "unknown offset for perf in BIT P %d\n",
-				bit_P.version);
+		}
 	}
 
 	if (bios->bmp_offset) {
@@ -50,19 +63,132 @@
 			if (perf) {
 				*hdr = nv_ro08(bios, perf + 0);
 				*ver = nv_ro08(bios, perf + 1);
+				*cnt = nv_ro08(bios, perf + 2);
+				*len = nv_ro08(bios, perf + 3);
+				*snr = 0;
+				*ssz = 0;
+				return perf;
 			}
 		}
 	}
 
+	return 0x0000;
+}
+
+u16
+nvbios_perf_entry(struct nouveau_bios *bios, int idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u8  snr, ssz;
+	u16 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+	if (perf && idx < *cnt) {
+		perf = perf + *hdr + (idx * (*len + (snr * ssz)));
+		*hdr = *len;
+		*cnt = snr;
+		*len = ssz;
+		return perf;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_perfEp(struct nouveau_bios *bios, int idx,
+	      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	      struct nvbios_perfE *info)
+{
+	u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	info->pstate = nv_ro08(bios, perf + 0x00);
+	switch (!!perf * *ver) {
+	case 0x12:
+	case 0x13:
+	case 0x14:
+		info->core     = nv_ro32(bios, perf + 0x01) * 10;
+		info->memory   = nv_ro32(bios, perf + 0x05) * 20;
+		info->fanspeed = nv_ro08(bios, perf + 0x37);
+		if (*hdr > 0x38)
+			info->voltage = nv_ro08(bios, perf + 0x38);
+		break;
+	case 0x21:
+	case 0x23:
+	case 0x24:
+		info->fanspeed = nv_ro08(bios, perf + 0x04);
+		info->voltage  = nv_ro08(bios, perf + 0x05);
+		info->shader   = nv_ro16(bios, perf + 0x06) * 1000;
+		info->core     = info->shader + (signed char)
+				 nv_ro08(bios, perf + 0x08) * 1000;
+		switch (nv_device(bios)->chipset) {
+		case 0x49:
+		case 0x4b:
+			info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
+			break;
+		default:
+			info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
+			break;
+		}
+		break;
+	case 0x25:
+		info->fanspeed = nv_ro08(bios, perf + 0x04);
+		info->voltage  = nv_ro08(bios, perf + 0x05);
+		info->core     = nv_ro16(bios, perf + 0x06) * 1000;
+		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
+		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
+		break;
+	case 0x30:
+		info->script   = nv_ro16(bios, perf + 0x02);
+	case 0x35:
+		info->fanspeed = nv_ro08(bios, perf + 0x06);
+		info->voltage  = nv_ro08(bios, perf + 0x07);
+		info->core     = nv_ro16(bios, perf + 0x08) * 1000;
+		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
+		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
+		info->vdec     = nv_ro16(bios, perf + 0x10) * 1000;
+		info->disp     = nv_ro16(bios, perf + 0x14) * 1000;
+		break;
+	case 0x40:
+		info->voltage  = nv_ro08(bios, perf + 0x02);
+		break;
+	default:
+		return 0x0000;
+	}
 	return perf;
 }
 
+u32
+nvbios_perfSe(struct nouveau_bios *bios, u32 perfE, int idx,
+	      u8 *ver, u8 *hdr, u8 cnt, u8 len)
+{
+	u32 data = 0x00000000;
+	if (idx < cnt) {
+		data = perfE + *hdr + (idx * len);
+		*hdr = len;
+	}
+	return data;
+}
+
+u32
+nvbios_perfSp(struct nouveau_bios *bios, u32 perfE, int idx,
+	      u8 *ver, u8 *hdr, u8 cnt, u8 len,
+	      struct nvbios_perfS *info)
+{
+	u32 data = nvbios_perfSe(bios, perfE, idx, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!data * *ver) {
+	case 0x40:
+		info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
+		break;
+	default:
+		break;
+	}
+	return data;
+}
+
 int
 nvbios_perf_fan_parse(struct nouveau_bios *bios,
 		      struct nvbios_perf_fan *fan)
 {
-	u8 ver = 0, hdr = 0, cnt = 0, len = 0;
-	u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len);
+	u8  ver, hdr, cnt, len, snr, ssz;
+	u16 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
 	if (!perf)
 		return -ENODEV;
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
index f835501..1f76de5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -114,6 +114,7 @@
 	switch (nv_device(bios)->card_type) {
 	case NV_04:
 	case NV_10:
+	case NV_11:
 	case NV_20:
 	case NV_30:
 		return nv04_pll_mapping;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
new file mode 100644
index 0000000..916fa9d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/rammap.h>
+
+u16
+nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+		    u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+{
+	struct bit_entry bit_P;
+	u16 rammap = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			rammap = nv_ro16(bios, bit_P.offset + 4);
+
+		if (rammap) {
+			*ver = nv_ro08(bios, rammap + 0);
+			switch (*ver) {
+			case 0x10:
+			case 0x11:
+				*hdr = nv_ro08(bios, rammap + 1);
+				*cnt = nv_ro08(bios, rammap + 5);
+				*len = nv_ro08(bios, rammap + 2);
+				*snr = nv_ro08(bios, rammap + 4);
+				*ssz = nv_ro08(bios, rammap + 3);
+				return rammap;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u8  snr, ssz;
+	u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+	if (rammap && idx < *cnt) {
+		rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
+		*hdr = *len;
+		*cnt = snr;
+		*len = ssz;
+		return rammap;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	int idx = 0;
+	u32 data;
+	while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
+		if (khz >= nv_ro16(bios, data + 0x00) &&
+		    khz <= nv_ro16(bios, data + 0x02))
+			break;
+	}
+	return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
new file mode 100644
index 0000000..151c2d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/timing.h>
+
+u16
+nvbios_timing_table(struct nouveau_bios *bios,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 timing = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 1)
+			timing = nv_ro16(bios, bit_P.offset + 4);
+		else
+		if (bit_P.version == 2)
+			timing = nv_ro16(bios, bit_P.offset + 8);
+
+		if (timing) {
+			*ver = nv_ro08(bios, timing + 0);
+			switch (*ver) {
+			case 0x10:
+				*hdr = nv_ro08(bios, timing + 1);
+				*cnt = nv_ro08(bios, timing + 2);
+				*len = nv_ro08(bios, timing + 3);
+				return timing;
+			case 0x20:
+				*hdr = nv_ro08(bios, timing + 1);
+				*cnt = nv_ro08(bios, timing + 3);
+				*len = nv_ro08(bios, timing + 2);
+				return timing;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
+	if (timing && idx < cnt)
+		return timing + hdr + (idx * *len);
+	return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
new file mode 100644
index 0000000..f343a1b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vmap.h>
+
+u16
+nvbios_vmap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 vmap = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2) {
+			vmap = nv_ro16(bios, bit_P.offset + 0x20);
+			if (vmap) {
+				*ver = nv_ro08(bios, vmap + 0);
+				switch (*ver) {
+				case 0x10:
+				case 0x20:
+					*hdr = nv_ro08(bios, vmap + 1);
+					*cnt = nv_ro08(bios, vmap + 3);
+					*len = nv_ro08(bios, vmap + 2);
+					return vmap;
+				default:
+					break;
+				}
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_vmap_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_vmap *info)
+{
+	u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!vmap * *ver) {
+	case 0x10:
+	case 0x20:
+		break;
+	}
+	return vmap;
+}
+
+u16
+nvbios_vmap_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
+	if (vmap && idx < cnt) {
+		vmap = vmap + hdr + (idx * *len);
+		return vmap;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_vmap_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+			struct nvbios_vmap_entry *info)
+{
+	u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!vmap * *ver) {
+	case 0x10:
+		info->link   = 0xff;
+		info->min    = nv_ro32(bios, vmap + 0x00);
+		info->max    = nv_ro32(bios, vmap + 0x04);
+		info->arg[0] = nv_ro32(bios, vmap + 0x08);
+		info->arg[1] = nv_ro32(bios, vmap + 0x0c);
+		info->arg[2] = nv_ro32(bios, vmap + 0x10);
+		break;
+	case 0x20:
+		info->unk0   = nv_ro08(bios, vmap + 0x00);
+		info->link   = nv_ro08(bios, vmap + 0x01);
+		info->min    = nv_ro32(bios, vmap + 0x02);
+		info->max    = nv_ro32(bios, vmap + 0x06);
+		info->arg[0] = nv_ro32(bios, vmap + 0x0a);
+		info->arg[1] = nv_ro32(bios, vmap + 0x0e);
+		info->arg[2] = nv_ro32(bios, vmap + 0x12);
+		info->arg[3] = nv_ro32(bios, vmap + 0x16);
+		info->arg[4] = nv_ro32(bios, vmap + 0x1a);
+		info->arg[5] = nv_ro32(bios, vmap + 0x1e);
+		break;
+	}
+	return vmap;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
new file mode 100644
index 0000000..bb590de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/volt.h>
+
+u16
+nvbios_volt_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 volt = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			volt = nv_ro16(bios, bit_P.offset + 0x0c);
+		else
+		if (bit_P.version == 1)
+			volt = nv_ro16(bios, bit_P.offset + 0x10);
+
+		if (volt) {
+			*ver = nv_ro08(bios, volt + 0);
+			switch (*ver) {
+			case 0x12:
+				*hdr = 5;
+				*cnt = nv_ro08(bios, volt + 2);
+				*len = nv_ro08(bios, volt + 1);
+				return volt;
+			case 0x20:
+				*hdr = nv_ro08(bios, volt + 1);
+				*cnt = nv_ro08(bios, volt + 2);
+				*len = nv_ro08(bios, volt + 3);
+				return volt;
+			case 0x30:
+			case 0x40:
+			case 0x50:
+				*hdr = nv_ro08(bios, volt + 1);
+				*cnt = nv_ro08(bios, volt + 3);
+				*len = nv_ro08(bios, volt + 2);
+				return volt;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_volt_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_volt *info)
+{
+	u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!volt * *ver) {
+	case 0x12:
+		info->vidmask = nv_ro08(bios, volt + 0x04);
+		break;
+	case 0x20:
+		info->vidmask = nv_ro08(bios, volt + 0x05);
+		break;
+	case 0x30:
+		info->vidmask = nv_ro08(bios, volt + 0x04);
+		break;
+	case 0x40:
+		info->base    = nv_ro32(bios, volt + 0x04);
+		info->step    = nv_ro16(bios, volt + 0x08);
+		info->vidmask = nv_ro08(bios, volt + 0x0b);
+		/*XXX*/
+		info->min     = 0;
+		info->max     = info->base;
+		break;
+	case 0x50:
+		info->vidmask = nv_ro08(bios, volt + 0x06);
+		info->min     = nv_ro32(bios, volt + 0x0a);
+		info->max     = nv_ro32(bios, volt + 0x0e);
+		info->base    = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
+		info->step    = nv_ro16(bios, volt + 0x16);
+		break;
+	}
+	return volt;
+}
+
+u16
+nvbios_volt_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
+	if (volt && idx < cnt) {
+		volt = volt + hdr + (idx * *len);
+		return volt;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_volt_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+			struct nvbios_volt_entry *info)
+{
+	u16 volt = nvbios_volt_entry(bios, idx, ver, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!volt * *ver) {
+	case 0x12:
+	case 0x20:
+		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
+		info->vid     = nv_ro08(bios, volt + 0x01);
+		break;
+	case 0x30:
+		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
+		info->vid     = nv_ro08(bios, volt + 0x01) >> 2;
+		break;
+	case 0x40:
+	case 0x50:
+		break;
+	}
+	return volt;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
new file mode 100644
index 0000000..f757470
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/timer.h>
+#include <subdev/bus.h>
+
+struct nouveau_hwsq {
+	struct nouveau_bus *pbus;
+	u32 addr;
+	u32 data;
+	struct {
+		u8 data[512];
+		u8 size;
+	} c;
+};
+
+static void
+hwsq_cmd(struct nouveau_hwsq *hwsq, int size, u8 data[])
+{
+	memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
+	hwsq->c.size += size;
+}
+
+int
+nouveau_hwsq_init(struct nouveau_bus *pbus, struct nouveau_hwsq **phwsq)
+{
+	struct nouveau_hwsq *hwsq;
+
+	hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
+	if (hwsq) {
+		hwsq->pbus = pbus;
+		hwsq->addr = ~0;
+		hwsq->data = ~0;
+		memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
+		hwsq->c.size = 0;
+	}
+
+	return hwsq ? 0 : -ENOMEM;
+}
+
+int
+nouveau_hwsq_fini(struct nouveau_hwsq **phwsq, bool exec)
+{
+	struct nouveau_hwsq *hwsq = *phwsq;
+	int ret = 0, i;
+	if (hwsq) {
+		struct nouveau_bus *pbus = hwsq->pbus;
+		hwsq->c.size = (hwsq->c.size + 4) / 4;
+		if (hwsq->c.size <= pbus->hwsq_size) {
+			if (exec)
+				ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
+						      hwsq->c.size);
+			if (ret)
+				nv_error(pbus, "hwsq exec failed: %d\n", ret);
+		} else {
+			nv_error(pbus, "hwsq ucode too large\n");
+			ret = -ENOSPC;
+		}
+
+		for (i = 0; ret && i < hwsq->c.size; i++)
+			nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
+
+		*phwsq = NULL;
+		kfree(hwsq);
+	}
+	return ret;
+}
+
+void
+nouveau_hwsq_wr32(struct nouveau_hwsq *hwsq, u32 addr, u32 data)
+{
+	nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
+
+	if (hwsq->data != data) {
+		if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
+			hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8,
+						  data >> 16, data >> 24 });
+		} else {
+			hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 });
+		}
+	}
+
+	if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) {
+		hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8,
+					  addr >> 16, addr >> 24 });
+	} else {
+		hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 });
+	}
+
+	hwsq->addr = addr;
+	hwsq->data = data;
+}
+
+void
+nouveau_hwsq_setf(struct nouveau_hwsq *hwsq, u8 flag, int data)
+{
+	nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
+	flag += 0x80;
+	if (data >= 0)
+		flag += 0x20;
+	if (data >= 1)
+		flag += 0x20;
+	hwsq_cmd(hwsq, 1, (u8[]){ flag });
+}
+
+void
+nouveau_hwsq_wait(struct nouveau_hwsq *hwsq, u8 flag, u8 data)
+{
+	nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
+	hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
+}
+
+void
+nouveau_hwsq_nsec(struct nouveau_hwsq *hwsq, u32 nsec)
+{
+	u8 shift = 0, usec = nsec / 1000;
+	while (usec & ~3) {
+		usec >>= 2;
+		shift++;
+	}
+
+	nv_debug(hwsq->pbus, "    DELAY = %d ns\n", nsec);
+	hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
new file mode 100644
index 0000000..12176f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
@@ -0,0 +1,113 @@
+#ifndef __NVKM_BUS_HWSQ_H__
+#define __NVKM_BUS_HWSQ_H__
+
+#include <subdev/bus.h>
+
+struct hwsq {
+	struct nouveau_subdev *subdev;
+	struct nouveau_hwsq *hwsq;
+	int sequence;
+};
+
+struct hwsq_reg {
+	int sequence;
+	bool force;
+	u32 addr[2];
+	u32 data;
+};
+
+static inline struct hwsq_reg
+hwsq_reg2(u32 addr1, u32 addr2)
+{
+	return (struct hwsq_reg) {
+		.sequence = 0,
+		.force = 0,
+		.addr = { addr1, addr2 },
+		.data = 0xdeadbeef,
+	};
+}
+
+static inline struct hwsq_reg
+hwsq_reg(u32 addr)
+{
+	return hwsq_reg2(addr, addr);
+}
+
+static inline int
+hwsq_init(struct hwsq *ram, struct nouveau_subdev *subdev)
+{
+	struct nouveau_bus *pbus = nouveau_bus(subdev);
+	int ret;
+
+	ret = nouveau_hwsq_init(pbus, &ram->hwsq);
+	if (ret)
+		return ret;
+
+	ram->sequence++;
+	ram->subdev = subdev;
+	return 0;
+}
+
+static inline int
+hwsq_exec(struct hwsq *ram, bool exec)
+{
+	int ret = 0;
+	if (ram->subdev) {
+		ret = nouveau_hwsq_fini(&ram->hwsq, exec);
+		ram->subdev = NULL;
+	}
+	return ret;
+}
+
+static inline u32
+hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
+{
+	if (reg->sequence != ram->sequence)
+		reg->data = nv_rd32(ram->subdev, reg->addr[0]);
+	return reg->data;
+}
+
+static inline void
+hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
+{
+	reg->sequence = ram->sequence;
+	reg->data = data;
+	if (reg->addr[0] != reg->addr[1])
+		nouveau_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
+	nouveau_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
+}
+
+static inline void
+hwsq_nuke(struct hwsq *ram, struct hwsq_reg *reg)
+{
+	reg->force = true;
+}
+
+static inline u32
+hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
+{
+	u32 temp = hwsq_rd32(ram, reg);
+	if (temp != ((temp & ~mask) | data) || reg->force)
+		hwsq_wr32(ram, reg, (temp & ~mask) | data);
+	return temp;
+}
+
+static inline void
+hwsq_setf(struct hwsq *ram, u8 flag, int data)
+{
+	nouveau_hwsq_setf(ram->hwsq, flag, data);
+}
+
+static inline void
+hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
+{
+	nouveau_hwsq_wait(ram->hwsq, flag, data);
+}
+
+static inline void
+hwsq_nsec(struct hwsq *ram, u32 nsec)
+{
+	nouveau_hwsq_nsec(ram->hwsq, nsec);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
index 8c7f805..23921b5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -23,11 +23,7 @@
  *          Ben Skeggs
  */
 
-#include <subdev/bus.h>
-
-struct nv04_bus_priv {
-	struct nouveau_bus base;
-};
+#include "nv04.h"
 
 static void
 nv04_bus_intr(struct nouveau_subdev *subdev)
@@ -56,23 +52,6 @@
 }
 
 static int
-nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	      struct nouveau_oclass *oclass, void *data, u32 size,
-	      struct nouveau_object **pobject)
-{
-	struct nv04_bus_priv *priv;
-	int ret;
-
-	ret = nouveau_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = nv04_bus_intr;
-	return 0;
-}
-
-static int
 nv04_bus_init(struct nouveau_object *object)
 {
 	struct nv04_bus_priv *priv = (void *)object;
@@ -83,13 +62,34 @@
 	return nouveau_bus_init(&priv->base);
 }
 
-struct nouveau_oclass
-nv04_bus_oclass = {
-	.handle = NV_SUBDEV(BUS, 0x04),
-	.ofuncs = &(struct nouveau_ofuncs) {
+int
+nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv04_bus_impl *impl = (void *)oclass;
+	struct nv04_bus_priv *priv;
+	int ret;
+
+	ret = nouveau_bus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = impl->intr;
+	priv->base.hwsq_exec = impl->hwsq_exec;
+	priv->base.hwsq_size = impl->hwsq_size;
+	return 0;
+}
+
+struct nouveau_oclass *
+nv04_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0x04),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_bus_ctor,
 		.dtor = _nouveau_bus_dtor,
 		.init = nv04_bus_init,
 		.fini = _nouveau_bus_fini,
 	},
-};
+	.intr = nv04_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
new file mode 100644
index 0000000..4d76024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_BUS_NV04_H__
+#define __NVKM_BUS_NV04_H__
+
+#include <subdev/bus.h>
+
+struct nv04_bus_priv {
+	struct nouveau_bus base;
+};
+
+int  nv04_bus_ctor(struct nouveau_object *, struct nouveau_object *,
+		   struct nouveau_oclass *, void *, u32,
+		   struct nouveau_object **);
+int  nv50_bus_init(struct nouveau_object *);
+void nv50_bus_intr(struct nouveau_subdev *);
+
+struct nv04_bus_impl {
+	struct nouveau_oclass base;
+	void (*intr)(struct nouveau_subdev *);
+	int  (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
+	u32  hwsq_size;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
index 34132ae..94da46f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -23,11 +23,7 @@
  *          Ben Skeggs
  */
 
-#include <subdev/bus.h>
-
-struct nv31_bus_priv {
-	struct nouveau_bus base;
-};
+#include "nv04.h"
 
 static void
 nv31_bus_intr(struct nouveau_subdev *subdev)
@@ -71,7 +67,7 @@
 static int
 nv31_bus_init(struct nouveau_object *object)
 {
-	struct nv31_bus_priv *priv = (void *)object;
+	struct nv04_bus_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_bus_init(&priv->base);
@@ -83,30 +79,14 @@
 	return 0;
 }
 
-static int
-nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	      struct nouveau_oclass *oclass, void *data, u32 size,
-	      struct nouveau_object **pobject)
-{
-	struct nv31_bus_priv *priv;
-	int ret;
-
-	ret = nouveau_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = nv31_bus_intr;
-	return 0;
-}
-
-struct nouveau_oclass
-nv31_bus_oclass = {
-	.handle = NV_SUBDEV(BUS, 0x31),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv31_bus_ctor,
+struct nouveau_oclass *
+nv31_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0x31),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
 		.dtor = _nouveau_bus_dtor,
 		.init = nv31_bus_init,
 		.fini = _nouveau_bus_fini,
 	},
-};
+	.intr = nv31_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
index f5b2117..11918f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -23,13 +23,27 @@
  *          Ben Skeggs
  */
 
-#include <subdev/bus.h>
+#include <subdev/timer.h>
 
-struct nv50_bus_priv {
-	struct nouveau_bus base;
-};
+#include "nv04.h"
 
-static void
+static int
+nv50_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
+{
+	struct nv50_bus_priv *priv = (void *)pbus;
+	int i;
+
+	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
+	nv_wr32(pbus, 0x001304, 0x00000000);
+	for (i = 0; i < size; i++)
+		nv_wr32(priv, 0x001400 + (i * 4), data[i]);
+	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
+	nv_wr32(pbus, 0x00130c, 0x00000003);
+
+	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+}
+
+void
 nv50_bus_intr(struct nouveau_subdev *subdev)
 {
 	struct nouveau_bus *pbus = nouveau_bus(subdev);
@@ -61,10 +75,10 @@
 	}
 }
 
-static int
+int
 nv50_bus_init(struct nouveau_object *object)
 {
-	struct nv50_bus_priv *priv = (void *)object;
+	struct nv04_bus_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_bus_init(&priv->base);
@@ -76,30 +90,16 @@
 	return 0;
 }
 
-static int
-nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	      struct nouveau_oclass *oclass, void *data, u32 size,
-	      struct nouveau_object **pobject)
-{
-	struct nv50_bus_priv *priv;
-	int ret;
-
-	ret = nouveau_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = nv50_bus_intr;
-	return 0;
-}
-
-struct nouveau_oclass
-nv50_bus_oclass = {
-	.handle = NV_SUBDEV(BUS, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv50_bus_ctor,
+struct nouveau_oclass *
+nv50_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
 		.dtor = _nouveau_bus_dtor,
 		.init = nv50_bus_init,
 		.fini = _nouveau_bus_fini,
 	},
-};
+	.intr = nv50_bus_intr,
+	.hwsq_exec = nv50_bus_hwsq_exec,
+	.hwsq_size = 64,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
new file mode 100644
index 0000000..d365905
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ *          Ben Skeggs
+ */
+
+#include <subdev/timer.h>
+
+#include "nv04.h"
+
+static int
+nv94_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
+{
+	struct nv50_bus_priv *priv = (void *)pbus;
+	int i;
+
+	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
+	nv_wr32(pbus, 0x001304, 0x00000000);
+	nv_wr32(pbus, 0x001318, 0x00000000);
+	for (i = 0; i < size; i++)
+		nv_wr32(priv, 0x080000 + (i * 4), data[i]);
+	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
+	nv_wr32(pbus, 0x00130c, 0x00000001);
+
+	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+}
+
+struct nouveau_oclass *
+nv94_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0x94),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
+		.dtor = _nouveau_bus_dtor,
+		.init = nv50_bus_init,
+		.fini = _nouveau_bus_fini,
+	},
+	.intr = nv50_bus_intr,
+	.hwsq_exec = nv94_bus_hwsq_exec,
+	.hwsq_size = 128,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
index b192d62..73839d7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -23,11 +23,7 @@
  *          Ben Skeggs
  */
 
-#include <subdev/bus.h>
-
-struct nvc0_bus_priv {
-	struct nouveau_bus base;
-};
+#include "nv04.h"
 
 static void
 nvc0_bus_intr(struct nouveau_subdev *subdev)
@@ -60,7 +56,7 @@
 static int
 nvc0_bus_init(struct nouveau_object *object)
 {
-	struct nvc0_bus_priv *priv = (void *)object;
+	struct nv04_bus_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_bus_init(&priv->base);
@@ -72,30 +68,14 @@
 	return 0;
 }
 
-static int
-nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	      struct nouveau_oclass *oclass, void *data, u32 size,
-	      struct nouveau_object **pobject)
-{
-	struct nvc0_bus_priv *priv;
-	int ret;
-
-	ret = nouveau_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = nvc0_bus_intr;
-	return 0;
-}
-
-struct nouveau_oclass
-nvc0_bus_oclass = {
-	.handle = NV_SUBDEV(BUS, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_bus_ctor,
+struct nouveau_oclass *
+nvc0_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
 		.dtor = _nouveau_bus_dtor,
 		.init = nvc0_bus_init,
 		.fini = _nouveau_bus_fini,
 	},
-};
+	.intr = nvc0_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
new file mode 100644
index 0000000..e2938a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/volt.h>
+#include <subdev/fb.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/boost.h>
+#include <subdev/bios/cstep.h>
+#include <subdev/bios/perf.h>
+
+/******************************************************************************
+ * misc
+ *****************************************************************************/
+static u32
+nouveau_clock_adjust(struct nouveau_clock *clk, bool adjust,
+		     u8 pstate, u8 domain, u32 input)
+{
+	struct nouveau_bios *bios = nouveau_bios(clk);
+	struct nvbios_boostE boostE;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+
+	data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
+	if (data) {
+		struct nvbios_boostS boostS;
+		u8  idx = 0, sver, shdr;
+		u16 subd;
+
+		input = max(boostE.min, input);
+		input = min(boostE.max, input);
+		do {
+			sver = ver;
+			shdr = hdr;
+			subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
+					      cnt, len, &boostS);
+			if (subd && boostS.domain == domain) {
+				if (adjust)
+					input = input * boostS.percent / 100;
+				input = max(boostS.min, input);
+				input = min(boostS.max, input);
+				break;
+			}
+		} while (subd);
+	}
+
+	return input;
+}
+
+/******************************************************************************
+ * C-States
+ *****************************************************************************/
+static int
+nouveau_cstate_prog(struct nouveau_clock *clk,
+		    struct nouveau_pstate *pstate, int cstatei)
+{
+	struct nouveau_therm *ptherm = nouveau_therm(clk);
+	struct nouveau_volt *volt = nouveau_volt(clk);
+	struct nouveau_cstate *cstate;
+	int ret;
+
+	if (!list_empty(&pstate->list)) {
+		cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+	} else {
+		cstate = &pstate->base;
+	}
+
+	ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
+	if (ret && ret != -ENODEV) {
+		nv_error(clk, "failed to raise fan speed: %d\n", ret);
+		return ret;
+	}
+
+	ret = volt->set_id(volt, cstate->voltage, +1);
+	if (ret && ret != -ENODEV) {
+		nv_error(clk, "failed to raise voltage: %d\n", ret);
+		return ret;
+	}
+
+	ret = clk->calc(clk, cstate);
+	if (ret == 0) {
+		ret = clk->prog(clk);
+		clk->tidy(clk);
+	}
+
+	ret = volt->set_id(volt, cstate->voltage, -1);
+	if (ret && ret != -ENODEV)
+		nv_error(clk, "failed to lower voltage: %d\n", ret);
+
+	ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
+	if (ret && ret != -ENODEV)
+		nv_error(clk, "failed to lower fan speed: %d\n", ret);
+
+	return 0;
+}
+
+static void
+nouveau_cstate_del(struct nouveau_cstate *cstate)
+{
+	list_del(&cstate->head);
+	kfree(cstate);
+}
+
+static int
+nouveau_cstate_new(struct nouveau_clock *clk, int idx,
+		   struct nouveau_pstate *pstate)
+{
+	struct nouveau_bios *bios = nouveau_bios(clk);
+	struct nouveau_clocks *domain = clk->domains;
+	struct nouveau_cstate *cstate = NULL;
+	struct nvbios_cstepX cstepX;
+	u8  ver, hdr;
+	u16 data;
+
+	data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
+	if (!data)
+		return -ENOENT;
+
+	cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+	if (!cstate)
+		return -ENOMEM;
+
+	*cstate = pstate->base;
+	cstate->voltage = cstepX.voltage;
+
+	while (domain && domain->name != nv_clk_src_max) {
+		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
+			u32 freq = nouveau_clock_adjust(clk, true,
+							pstate->pstate,
+							domain->bios,
+							cstepX.freq);
+			cstate->domain[domain->name] = freq;
+		}
+		domain++;
+	}
+
+	list_add(&cstate->head, &pstate->list);
+	return 0;
+}
+
+/******************************************************************************
+ * P-States
+ *****************************************************************************/
+static int
+nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
+{
+	struct nouveau_fb *pfb = nouveau_fb(clk);
+	struct nouveau_pstate *pstate;
+	int ret, idx = 0;
+
+	list_for_each_entry(pstate, &clk->states, head) {
+		if (idx++ == pstatei)
+			break;
+	}
+
+	nv_debug(clk, "setting performance state %d\n", pstatei);
+	clk->pstate = pstatei;
+
+	if (pfb->ram->calc) {
+		ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
+		if (ret == 0)
+			ret = pfb->ram->prog(pfb);
+		pfb->ram->tidy(pfb);
+	}
+
+	return nouveau_cstate_prog(clk, pstate, 0);
+}
+
+static int
+nouveau_pstate_calc(struct nouveau_clock *clk)
+{
+	int pstate, ret = 0;
+
+	nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate,
+		 clk->ustate, clk->astate, clk->tstate, clk->dstate);
+
+	if (clk->state_nr && clk->ustate != -1) {
+		pstate = (clk->ustate < 0) ? clk->astate : clk->ustate;
+		pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
+		pstate = max(pstate, clk->dstate);
+	} else {
+		pstate = clk->pstate = -1;
+	}
+
+	nv_trace(clk, "-> %d\n", pstate);
+	if (pstate != clk->pstate)
+		ret = nouveau_pstate_prog(clk, pstate);
+	return ret;
+}
+
+static void
+nouveau_pstate_info(struct nouveau_clock *clk, struct nouveau_pstate *pstate)
+{
+	struct nouveau_clocks *clock = clk->domains - 1;
+	struct nouveau_cstate *cstate;
+	char info[3][32] = { "", "", "" };
+	char name[4] = "--";
+	int i = -1;
+
+	if (pstate->pstate != 0xff)
+		snprintf(name, sizeof(name), "%02x", pstate->pstate);
+
+	while ((++clock)->name != nv_clk_src_max) {
+		u32 lo = pstate->base.domain[clock->name];
+		u32 hi = lo;
+		if (hi == 0)
+			continue;
+
+		nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
+		list_for_each_entry(cstate, &pstate->list, head) {
+			u32 freq = cstate->domain[clock->name];
+			lo = min(lo, freq);
+			hi = max(hi, freq);
+			nv_debug(clk, "%10d KHz\n", freq);
+		}
+
+		if (clock->mname && ++i < ARRAY_SIZE(info)) {
+			lo /= clock->mdiv;
+			hi /= clock->mdiv;
+			if (lo == hi) {
+				snprintf(info[i], sizeof(info[i]), "%s %d MHz",
+					 clock->mname, lo);
+			} else {
+				snprintf(info[i], sizeof(info[i]),
+					 "%s %d-%d MHz", clock->mname, lo, hi);
+			}
+		}
+	}
+
+	nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
+}
+
+static void
+nouveau_pstate_del(struct nouveau_pstate *pstate)
+{
+	struct nouveau_cstate *cstate, *temp;
+
+	list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
+		nouveau_cstate_del(cstate);
+	}
+
+	list_del(&pstate->head);
+	kfree(pstate);
+}
+
+static int
+nouveau_pstate_new(struct nouveau_clock *clk, int idx)
+{
+	struct nouveau_bios *bios = nouveau_bios(clk);
+	struct nouveau_clocks *domain = clk->domains - 1;
+	struct nouveau_pstate *pstate;
+	struct nouveau_cstate *cstate;
+	struct nvbios_cstepE cstepE;
+	struct nvbios_perfE perfE;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+
+	data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
+	if (!data)
+		return -EINVAL;
+	if (perfE.pstate == 0xff)
+		return 0;
+
+	pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+	cstate = &pstate->base;
+	if (!pstate)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&pstate->list);
+
+	pstate->pstate = perfE.pstate;
+	pstate->fanspeed = perfE.fanspeed;
+	cstate->voltage = perfE.voltage;
+	cstate->domain[nv_clk_src_core] = perfE.core;
+	cstate->domain[nv_clk_src_shader] = perfE.shader;
+	cstate->domain[nv_clk_src_mem] = perfE.memory;
+	cstate->domain[nv_clk_src_vdec] = perfE.vdec;
+	cstate->domain[nv_clk_src_dom6] = perfE.disp;
+
+	while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
+		struct nvbios_perfS perfS;
+		u8  sver = ver, shdr = hdr;
+		u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
+					  &sver, &shdr, cnt, len, &perfS);
+		if (perfSe == 0 || sver != 0x40)
+			continue;
+
+		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
+			perfS.v40.freq = nouveau_clock_adjust(clk, false,
+							      pstate->pstate,
+							      domain->bios,
+							      perfS.v40.freq);
+		}
+
+		cstate->domain[domain->name] = perfS.v40.freq;
+	}
+
+	data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
+	if (data) {
+		int idx = cstepE.index;
+		do {
+			nouveau_cstate_new(clk, idx, pstate);
+		} while(idx--);
+	}
+
+	nouveau_pstate_info(clk, pstate);
+	list_add_tail(&pstate->head, &clk->states);
+	clk->state_nr++;
+	return 0;
+}
+
+/******************************************************************************
+ * Adjustment triggers
+ *****************************************************************************/
+static int
+nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
+{
+	struct nouveau_pstate *pstate;
+	int i = 0;
+
+	/* YKW repellant */
+	return -ENOSYS;
+
+	if (req != -1 && req != -2) {
+		list_for_each_entry(pstate, &clk->states, head) {
+			if (pstate->pstate == req)
+				break;
+			i++;
+		}
+
+		if (pstate->pstate != req)
+			return -EINVAL;
+		req = i;
+	}
+
+	clk->ustate = req;
+	return 0;
+}
+
+int
+nouveau_clock_ustate(struct nouveau_clock *clk, int req)
+{
+	int ret = nouveau_clock_ustate_update(clk, req);
+	if (ret)
+		return ret;
+	return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
+{
+	if (!rel) clk->astate  = req;
+	if ( rel) clk->astate += rel;
+	clk->astate = min(clk->astate, clk->state_nr - 1);
+	clk->astate = max(clk->astate, 0);
+	return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
+{
+	if (!rel) clk->tstate  = req;
+	if ( rel) clk->tstate += rel;
+	clk->tstate = min(clk->tstate, 0);
+	clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
+	return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
+{
+	if (!rel) clk->dstate  = req;
+	if ( rel) clk->dstate += rel;
+	clk->dstate = min(clk->dstate, clk->state_nr - 1);
+	clk->dstate = max(clk->dstate, 0);
+	return nouveau_pstate_calc(clk);
+}
+
+/******************************************************************************
+ * subdev base class implementation
+ *****************************************************************************/
+int
+_nouveau_clock_init(struct nouveau_object *object)
+{
+	struct nouveau_clock *clk = (void *)object;
+	struct nouveau_clocks *clock = clk->domains;
+	int ret;
+
+	memset(&clk->bstate, 0x00, sizeof(clk->bstate));
+	INIT_LIST_HEAD(&clk->bstate.list);
+	clk->bstate.pstate = 0xff;
+
+	while (clock->name != nv_clk_src_max) {
+		ret = clk->read(clk, clock->name);
+		if (ret < 0) {
+			nv_error(clk, "%02x freq unknown\n", clock->name);
+			return ret;
+		}
+		clk->bstate.base.domain[clock->name] = ret;
+		clock++;
+	}
+
+	nouveau_pstate_info(clk, &clk->bstate);
+
+	clk->astate = clk->state_nr - 1;
+	clk->tstate = 0;
+	clk->dstate = 0;
+	clk->pstate = -1;
+	nouveau_pstate_calc(clk);
+	return 0;
+}
+
+void
+_nouveau_clock_dtor(struct nouveau_object *object)
+{
+	struct nouveau_clock *clk = (void *)object;
+	struct nouveau_pstate *pstate, *temp;
+
+	list_for_each_entry_safe(pstate, temp, &clk->states, head) {
+		nouveau_pstate_del(pstate);
+	}
+
+	nouveau_subdev_destroy(&clk->base);
+}
+
+int
+nouveau_clock_create_(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass,
+		      struct nouveau_clocks *clocks,
+		      int length, void **object)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_clock *clk;
+	int ret, idx, arglen;
+	const char *mode;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "CLK",
+				     "clock", length, object);
+	clk = *object;
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&clk->states);
+	clk->domains = clocks;
+	clk->ustate = -1;
+
+	idx = 0;
+	do {
+		ret = nouveau_pstate_new(clk, idx++);
+	} while (ret == 0);
+
+	mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
+	if (mode) {
+		if (!strncasecmpz(mode, "disabled", arglen)) {
+			clk->ustate = -1;
+		} else {
+			char save = mode[arglen];
+			long v;
+
+			((char *)mode)[arglen] = '\0';
+			if (!kstrtol(mode, 0, &v))
+				nouveau_clock_ustate_update(clk, v);
+			((char *)mode)[arglen] = save;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index a142775..da50c1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -77,7 +77,7 @@
 	struct nv04_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index 0db5dbf..db7346f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -23,11 +23,188 @@
  */
 
 #include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
 
 struct nv40_clock_priv {
 	struct nouveau_clock base;
+	u32 ctrl;
+	u32 npll_ctrl;
+	u32 npll_coef;
+	u32 spll;
 };
 
+static struct nouveau_clocks
+nv40_domain[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+	{ nv_clk_src_max }
+};
+
+static u32
+read_pll_1(struct nv40_clock_priv *priv, u32 reg)
+{
+	u32 ctrl = nv_rd32(priv, reg + 0x00);
+	int P = (ctrl & 0x00070000) >> 16;
+	int N = (ctrl & 0x0000ff00) >> 8;
+	int M = (ctrl & 0x000000ff) >> 0;
+	u32 ref = 27000, clk = 0;
+
+	if (ctrl & 0x80000000)
+		clk = ref * N / M;
+
+	return clk >> P;
+}
+
+static u32
+read_pll_2(struct nv40_clock_priv *priv, u32 reg)
+{
+	u32 ctrl = nv_rd32(priv, reg + 0x00);
+	u32 coef = nv_rd32(priv, reg + 0x04);
+	int N2 = (coef & 0xff000000) >> 24;
+	int M2 = (coef & 0x00ff0000) >> 16;
+	int N1 = (coef & 0x0000ff00) >> 8;
+	int M1 = (coef & 0x000000ff) >> 0;
+	int P = (ctrl & 0x00070000) >> 16;
+	u32 ref = 27000, clk = 0;
+
+	if ((ctrl & 0x80000000) && M1) {
+		clk = ref * N1 / M1;
+		if ((ctrl & 0x40000100) == 0x40000000) {
+			if (M2)
+				clk = clk * N2 / M2;
+			else
+				clk = 0;
+		}
+	}
+
+	return clk >> P;
+}
+
+static u32
+read_clk(struct nv40_clock_priv *priv, u32 src)
+{
+	switch (src) {
+	case 3:
+		return read_pll_2(priv, 0x004000);
+	case 2:
+		return read_pll_1(priv, 0x004008);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nv40_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nv40_clock_priv *priv = (void *)clk;
+	u32 mast = nv_rd32(priv, 0x00c040);
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return nv_device(priv)->crystal;
+	case nv_clk_src_href:
+		return 100000; /*XXX: PCIE/AGP differ*/
+	case nv_clk_src_core:
+		return read_clk(priv, (mast & 0x00000003) >> 0);
+	case nv_clk_src_shader:
+		return read_clk(priv, (mast & 0x00000030) >> 4);
+	case nv_clk_src_mem:
+		return read_pll_2(priv, 0x4020);
+	default:
+		break;
+	}
+
+	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	return -EINVAL;
+}
+
+static int
+nv40_clock_calc_pll(struct nv40_clock_priv *priv, u32 reg, u32 clk,
+		    int *N1, int *M1, int *N2, int *M2, int *log2P)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll pll;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, reg, &pll);
+	if (ret)
+		return ret;
+
+	if (clk < pll.vco1.max_freq)
+		pll.vco2.max_freq = 0;
+
+	ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
+	if (ret == 0)
+		return -ERANGE;
+	return ret;
+}
+
+static int
+nv40_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nv40_clock_priv *priv = (void *)clk;
+	int gclk = cstate->domain[nv_clk_src_core];
+	int sclk = cstate->domain[nv_clk_src_shader];
+	int N1, M1, N2, M2, log2P;
+	int ret;
+
+	/* core/geometric clock */
+	ret = nv40_clock_calc_pll(priv, 0x004000, gclk,
+				 &N1, &M1, &N2, &M2, &log2P);
+	if (ret < 0)
+		return ret;
+
+	if (N2 == M2) {
+		priv->npll_ctrl = 0x80000100 | (log2P << 16);
+		priv->npll_coef = (N1 << 8) | M1;
+	} else {
+		priv->npll_ctrl = 0xc0000000 | (log2P << 16);
+		priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+	}
+
+	/* use the second pll for shader/rop clock, if it differs from core */
+	if (sclk && sclk != gclk) {
+		ret = nv40_clock_calc_pll(priv, 0x004008, sclk,
+					 &N1, &M1, NULL, NULL, &log2P);
+		if (ret < 0)
+			return ret;
+
+		priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+		priv->ctrl = 0x00000223;
+	} else {
+		priv->spll = 0x00000000;
+		priv->ctrl = 0x00000333;
+	}
+
+	return 0;
+}
+
+static int
+nv40_clock_prog(struct nouveau_clock *clk)
+{
+	struct nv40_clock_priv *priv = (void *)clk;
+	nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
+	nv_wr32(priv, 0x004004, priv->npll_coef);
+	nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
+	nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
+	mdelay(5);
+	nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
+	return 0;
+}
+
+static void
+nv40_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
 static int
 nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		struct nouveau_oclass *oclass, void *data, u32 size,
@@ -36,13 +213,17 @@
 	struct nv40_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
 	priv->base.pll_calc = nv04_clock_pll_calc;
 	priv->base.pll_prog = nv04_clock_pll_prog;
+	priv->base.read = nv40_clock_read;
+	priv->base.calc = nv40_clock_calc;
+	priv->base.prog = nv40_clock_prog;
+	priv->base.tidy = nv40_clock_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index d09d3e7..250a6d9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -22,40 +22,538 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/clock.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
+#include "nv50.h"
 #include "pll.h"
+#include "seq.h"
 
-struct nv50_clock_priv {
-	struct nouveau_clock base;
-};
+static u32
+read_div(struct nv50_clock_priv *priv)
+{
+	switch (nv_device(priv)->chipset) {
+	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+	case 0x84:
+	case 0x86:
+	case 0x98:
+	case 0xa0:
+		return nv_rd32(priv, 0x004700);
+	case 0x92:
+	case 0x94:
+	case 0x96:
+		return nv_rd32(priv, 0x004800);
+	default:
+		return 0x00000000;
+	}
+}
+
+static u32
+read_pll_src(struct nv50_clock_priv *priv, u32 base)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
+	u32 rsel = nv_rd32(priv, 0x00e18c);
+	int P, N, M, id;
+
+	switch (nv_device(priv)->chipset) {
+	case 0x50:
+	case 0xa0:
+		switch (base) {
+		case 0x4020:
+		case 0x4028: id = !!(rsel & 0x00000004); break;
+		case 0x4008: id = !!(rsel & 0x00000008); break;
+		case 0x4030: id = 0; break;
+		default:
+			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			return 0;
+		}
+
+		coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
+		ref *=  (coef & 0x01000000) ? 2 : 4;
+		P    =  (coef & 0x00070000) >> 16;
+		N    = ((coef & 0x0000ff00) >> 8) + 1;
+		M    = ((coef & 0x000000ff) >> 0) + 1;
+		break;
+	case 0x84:
+	case 0x86:
+	case 0x92:
+		coef = nv_rd32(priv, 0x00e81c);
+		P    = (coef & 0x00070000) >> 16;
+		N    = (coef & 0x0000ff00) >> 8;
+		M    = (coef & 0x000000ff) >> 0;
+		break;
+	case 0x94:
+	case 0x96:
+	case 0x98:
+		rsel = nv_rd32(priv, 0x00c050);
+		switch (base) {
+		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+		case 0x4030: rsel = 3; break;
+		default:
+			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			return 0;
+		}
+
+		switch (rsel) {
+		case 0: id = 1; break;
+		case 1: return clk->read(clk, nv_clk_src_crystal);
+		case 2: return clk->read(clk, nv_clk_src_href);
+		case 3: id = 0; break;
+		}
+
+		coef =  nv_rd32(priv, 0x00e81c + (id * 0x28));
+		P    = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
+		P   += (coef & 0x00070000) >> 16;
+		N    = (coef & 0x0000ff00) >> 8;
+		M    = (coef & 0x000000ff) >> 0;
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	if (M)
+		return (ref * N / M) >> P;
+	return 0;
+}
+
+static u32
+read_pll_ref(struct nv50_clock_priv *priv, u32 base)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 src, mast = nv_rd32(priv, 0x00c040);
+
+	switch (base) {
+	case 0x004028:
+		src = !!(mast & 0x00200000);
+		break;
+	case 0x004020:
+		src = !!(mast & 0x00400000);
+		break;
+	case 0x004008:
+		src = !!(mast & 0x00010000);
+		break;
+	case 0x004030:
+		src = !!(mast & 0x02000000);
+		break;
+	case 0x00e810:
+		return clk->read(clk, nv_clk_src_crystal);
+	default:
+		nv_error(priv, "bad pll 0x%06x\n", base);
+		return 0;
+	}
+
+	if (src)
+		return clk->read(clk, nv_clk_src_href);
+	return read_pll_src(priv, base);
+}
+
+static u32
+read_pll(struct nv50_clock_priv *priv, u32 base)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 mast = nv_rd32(priv, 0x00c040);
+	u32 ctrl = nv_rd32(priv, base + 0);
+	u32 coef = nv_rd32(priv, base + 4);
+	u32 ref = read_pll_ref(priv, base);
+	u32 freq = 0;
+	int N1, N2, M1, M2;
+
+	if (base == 0x004028 && (mast & 0x00100000)) {
+		/* wtf, appears to only disable post-divider on nva0 */
+		if (nv_device(priv)->chipset != 0xa0)
+			return clk->read(clk, nv_clk_src_dom6);
+	}
+
+	N2 = (coef & 0xff000000) >> 24;
+	M2 = (coef & 0x00ff0000) >> 16;
+	N1 = (coef & 0x0000ff00) >> 8;
+	M1 = (coef & 0x000000ff);
+	if ((ctrl & 0x80000000) && M1) {
+		freq = ref * N1 / M1;
+		if ((ctrl & 0x40000100) == 0x40000000) {
+			if (M2)
+				freq = freq * N2 / M2;
+			else
+				freq = 0;
+		}
+	}
+
+	return freq;
+}
 
 static int
+nv50_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	u32 mast = nv_rd32(priv, 0x00c040);
+	u32 P = 0;
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return nv_device(priv)->crystal;
+	case nv_clk_src_href:
+		return 100000; /* PCIE reference clock */
+	case nv_clk_src_hclk:
+		return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
+	case nv_clk_src_hclkm3:
+		return clk->read(clk, nv_clk_src_hclk) * 3;
+	case nv_clk_src_hclkm3d2:
+		return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
+	case nv_clk_src_host:
+		switch (mast & 0x30000000) {
+		case 0x00000000: return clk->read(clk, nv_clk_src_href);
+		case 0x10000000: break;
+		case 0x20000000: /* !0x50 */
+		case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
+		}
+		break;
+	case nv_clk_src_core:
+		if (!(mast & 0x00100000))
+			P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
+		switch (mast & 0x00000003) {
+		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+		case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
+		case 0x00000002: return read_pll(priv, 0x004020) >> P;
+		case 0x00000003: return read_pll(priv, 0x004028) >> P;
+		}
+		break;
+	case nv_clk_src_shader:
+		P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
+		switch (mast & 0x00000030) {
+		case 0x00000000:
+			if (mast & 0x00000080)
+				return clk->read(clk, nv_clk_src_host) >> P;
+			return clk->read(clk, nv_clk_src_crystal) >> P;
+		case 0x00000010: break;
+		case 0x00000020: return read_pll(priv, 0x004028) >> P;
+		case 0x00000030: return read_pll(priv, 0x004020) >> P;
+		}
+		break;
+	case nv_clk_src_mem:
+		P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
+		if (nv_rd32(priv, 0x004008) & 0x00000200) {
+			switch (mast & 0x0000c000) {
+			case 0x00000000:
+				return clk->read(clk, nv_clk_src_crystal) >> P;
+			case 0x00008000:
+			case 0x0000c000:
+				return clk->read(clk, nv_clk_src_href) >> P;
+			}
+		} else {
+			return read_pll(priv, 0x004008) >> P;
+		}
+		break;
+	case nv_clk_src_vdec:
+		P = (read_div(priv) & 0x00000700) >> 8;
+		switch (nv_device(priv)->chipset) {
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0xa0:
+			switch (mast & 0x00000c00) {
+			case 0x00000000:
+				if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
+					return clk->read(clk, nv_clk_src_core) >> P;
+				return clk->read(clk, nv_clk_src_crystal) >> P;
+			case 0x00000400:
+				return 0;
+			case 0x00000800:
+				if (mast & 0x01000000)
+					return read_pll(priv, 0x004028) >> P;
+				return read_pll(priv, 0x004030) >> P;
+			case 0x00000c00:
+				return clk->read(clk, nv_clk_src_core) >> P;
+			}
+			break;
+		case 0x98:
+			switch (mast & 0x00000c00) {
+			case 0x00000000:
+				return clk->read(clk, nv_clk_src_core) >> P;
+			case 0x00000400:
+				return 0;
+			case 0x00000800:
+				return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
+			case 0x00000c00:
+				return clk->read(clk, nv_clk_src_mem) >> P;
+			}
+			break;
+		}
+		break;
+	case nv_clk_src_dom6:
+		switch (nv_device(priv)->chipset) {
+		case 0x50:
+		case 0xa0:
+			return read_pll(priv, 0x00e810) >> 2;
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0x98:
+			P = (read_div(priv) & 0x00000007) >> 0;
+			switch (mast & 0x0c000000) {
+			case 0x00000000: return clk->read(clk, nv_clk_src_href);
+			case 0x04000000: break;
+			case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
+			case 0x0c000000:
+				return clk->read(clk, nv_clk_src_hclkm3) >> P;
+			}
+			break;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	return -EINVAL;
+}
+
+static u32
+calc_pll(struct nv50_clock_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll pll;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, reg, &pll);
+	if (ret)
+		return 0;
+
+	pll.vco2.max_freq = 0;
+	pll.refclk = read_pll_ref(priv, reg);
+	if (!pll.refclk)
+		return 0;
+
+	return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+	u32 clk0 = src, clk1 = src;
+	for (*div = 0; *div <= 7; (*div)++) {
+		if (clk0 <= target) {
+			clk1 = clk0 << (*div ? 1 : 0);
+			break;
+		}
+		clk0 >>= 1;
+	}
+
+	if (target - clk0 <= clk1 - target)
+		return clk0;
+	(*div)--;
+	return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+	return ((a / 1000) == (b / 1000));
+}
+
+static int
+nv50_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	struct nv50_clock_hwsq *hwsq = &priv->hwsq;
+	const int shader = cstate->domain[nv_clk_src_shader];
+	const int core = cstate->domain[nv_clk_src_core];
+	const int vdec = cstate->domain[nv_clk_src_vdec];
+	const int dom6 = cstate->domain[nv_clk_src_dom6];
+	u32 mastm = 0, mastv = 0;
+	u32 divsm = 0, divsv = 0;
+	int N, M, P1, P2;
+	int freq, out;
+
+	/* prepare a hwsq script from which we'll perform the reclock */
+	out = clk_init(hwsq, nv_subdev(clk));
+	if (out)
+		return out;
+
+	clk_wr32(hwsq, fifo, 0x00000001); /* block fifo */
+	clk_nsec(hwsq, 8000);
+	clk_setf(hwsq, 0x10, 0x00); /* disable fb */
+	clk_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+
+	/* vdec: avoid modifying xpll until we know exactly how the other
+	 * clock domains work, i suspect at least some of them can also be
+	 * tied to xpll...
+	 */
+	if (vdec) {
+		/* see how close we can get using nvclk as a source */
+		freq = calc_div(core, vdec, &P1);
+
+		/* see how close we can get using xpll/hclk as a source */
+		if (nv_device(priv)->chipset != 0x98)
+			out = read_pll(priv, 0x004030);
+		else
+			out = clk->read(clk, nv_clk_src_hclkm3d2);
+		out = calc_div(out, vdec, &P2);
+
+		/* select whichever gets us closest */
+		if (abs(vdec - freq) <= abs(vdec - out)) {
+			if (nv_device(priv)->chipset != 0x98)
+				mastv |= 0x00000c00;
+			divsv |= P1 << 8;
+		} else {
+			mastv |= 0x00000800;
+			divsv |= P2 << 8;
+		}
+
+		mastm |= 0x00000c00;
+		divsm |= 0x00000700;
+	}
+
+	/* dom6: nfi what this is, but we're limited to various combinations
+	 * of the host clock frequency
+	 */
+	if (dom6) {
+		if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
+			mastv |= 0x00000000;
+		} else
+		if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
+			mastv |= 0x08000000;
+		} else {
+			freq = clk->read(clk, nv_clk_src_hclk) * 3;
+			freq = calc_div(freq, dom6, &P1);
+
+			mastv |= 0x0c000000;
+			divsv |= P1;
+		}
+
+		mastm |= 0x0c000000;
+		divsm |= 0x00000007;
+	}
+
+	/* vdec/dom6: switch to "safe" clocks temporarily, update dividers
+	 * and then switch to target clocks
+	 */
+	clk_mask(hwsq, mast, mastm, 0x00000000);
+	clk_mask(hwsq, divs, divsm, divsv);
+	clk_mask(hwsq, mast, mastm, mastv);
+
+	/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
+	 * sclk to hclk) before reprogramming
+	 */
+	if (nv_device(priv)->chipset < 0x92)
+		clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
+	else
+		clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
+
+	/* core: for the moment at least, always use nvpll */
+	freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
+	if (freq == 0)
+		return -ERANGE;
+
+	clk_mask(hwsq, nvpll[0], 0xc03f0100,
+				 0x80000000 | (P1 << 19) | (P1 << 16));
+	clk_mask(hwsq, nvpll[1], 0x0000ffff, (N << 8) | M);
+
+	/* shader: tie to nvclk if possible, otherwise use spll.  have to be
+	 * very careful that the shader clock is at least twice the core, or
+	 * some chipsets will be very unhappy.  i expect most or all of these
+	 * cases will be handled by tying to nvclk, but it's possible there's
+	 * corners
+	 */
+	if (P1-- && shader == (core << 1)) {
+		clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
+		clk_mask(hwsq, mast, 0x00100033, 0x00000023);
+	} else {
+		freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+		if (freq == 0)
+			return -ERANGE;
+
+		clk_mask(hwsq, spll[0], 0xc03f0100,
+					0x80000000 | (P1 << 19) | (P1 << 16));
+		clk_mask(hwsq, spll[1], 0x0000ffff, (N << 8) | M);
+		clk_mask(hwsq, mast, 0x00100033, 0x00000033);
+	}
+
+	/* restore normal operation */
+	clk_setf(hwsq, 0x10, 0x01); /* enable fb */
+	clk_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
+	clk_wr32(hwsq, fifo, 0x00000000); /* un-block fifo */
+	return 0;
+}
+
+static int
+nv50_clock_prog(struct nouveau_clock *clk)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	return clk_exec(&priv->hwsq, true);
+}
+
+static void
+nv50_clock_tidy(struct nouveau_clock *clk)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	clk_exec(&priv->hwsq, false);
+}
+
+int
 nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		struct nouveau_oclass *oclass, void *data, u32 size,
 		struct nouveau_object **pobject)
 {
+	struct nv50_clock_oclass *pclass = (void *)oclass;
 	struct nv50_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
+				  &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.pll_calc = nv04_clock_pll_calc;
+	priv->hwsq.r_fifo = hwsq_reg(0x002504);
+	priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
+	priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
+	priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
+	priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
+	switch (nv_device(priv)->chipset) {
+	case 0x92:
+	case 0x94:
+	case 0x96:
+		priv->hwsq.r_divs = hwsq_reg(0x004800);
+		break;
+	default:
+		priv->hwsq.r_divs = hwsq_reg(0x004700);
+		break;
+	}
+	priv->hwsq.r_mast = hwsq_reg(0x00c040);
+
+	priv->base.read = nv50_clock_read;
+	priv->base.calc = nv50_clock_calc;
+	priv->base.prog = nv50_clock_prog;
+	priv->base.tidy = nv50_clock_tidy;
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_clock_oclass = {
-	.handle = NV_SUBDEV(CLOCK, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+static struct nouveau_clocks
+nv50_domains[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+	{ nv_clk_src_max }
+};
+
+struct nouveau_oclass *
+nv50_clock_oclass = &(struct nv50_clock_oclass) {
+	.base.handle = NV_SUBDEV(CLOCK, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_clock_ctor,
 		.dtor = _nouveau_clock_dtor,
 		.init = _nouveau_clock_init,
 		.fini = _nouveau_clock_fini,
 	},
-};
+	.domains = nv50_domains,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
new file mode 100644
index 0000000..f10917d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
@@ -0,0 +1,31 @@
+#ifndef __NVKM_CLK_NV50_H__
+#define __NVKM_CLK_NV50_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+#include <subdev/clock.h>
+
+struct nv50_clock_hwsq {
+	struct hwsq base;
+	struct hwsq_reg r_fifo;
+	struct hwsq_reg r_spll[2];
+	struct hwsq_reg r_nvpll[2];
+	struct hwsq_reg r_divs;
+	struct hwsq_reg r_mast;
+};
+
+struct nv50_clock_priv {
+	struct nouveau_clock base;
+	struct nv50_clock_hwsq hwsq;
+};
+
+int  nv50_clock_ctor(struct nouveau_object *, struct nouveau_object *,
+		     struct nouveau_oclass *, void *, u32,
+		     struct nouveau_object **);
+
+struct nv50_clock_oclass {
+	struct nouveau_oclass base;
+	struct nouveau_clocks *domains;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
new file mode 100644
index 0000000..b0b7c14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nv50.h"
+
+static struct nouveau_clocks
+nv84_domains[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+	{ nv_clk_src_vdec   , 0xff },
+	{ nv_clk_src_max }
+};
+
+struct nouveau_oclass *
+nv84_clock_oclass = &(struct nv50_clock_oclass) {
+	.base.handle = NV_SUBDEV(CLOCK, 0x84),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+	.domains = nv84_domains,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index f074cd2..4f5a137 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -22,33 +22,277 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/clock.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
+#include <subdev/timer.h>
 
 #include "pll.h"
 
+#include "nva3.h"
+
 struct nva3_clock_priv {
 	struct nouveau_clock base;
+	struct nva3_clock_info eng[nv_clk_src_max];
 };
 
-int
-nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
-		    int clk, struct nouveau_pll_vals *pv)
+static u32 read_clk(struct nva3_clock_priv *, int, bool);
+static u32 read_pll(struct nva3_clock_priv *, int, u32);
+
+static u32
+read_vco(struct nva3_clock_priv *priv, int clk)
 {
-	int ret, N, M, P;
+	u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	if ((sctl & 0x00000030) != 0x00000030)
+		return read_pll(priv, 0x41, 0x00e820);
+	return read_pll(priv, 0x42, 0x00e8a0);
+}
 
-	ret = nva3_pll_calc(nv_subdev(clock), info, clk, &N, NULL, &M, &P);
+static u32
+read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en)
+{
+	u32 sctl, sdiv, sclk;
 
-	if (ret > 0) {
-		pv->refclk = info->refclk;
-		pv->N1 = N;
-		pv->M1 = M;
-		pv->log2P = P;
+	/* refclk for the 0xe8xx plls is a fixed frequency */
+	if (clk >= 0x40) {
+		if (nv_device(priv)->chipset == 0xaf) {
+			/* no joke.. seriously.. sigh.. */
+			return nv_rd32(priv, 0x00471c) * 1000;
+		}
+
+		return nv_device(priv)->crystal;
 	}
+
+	sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	if (!ignore_en && !(sctl & 0x00000100))
+		return 0;
+
+	switch (sctl & 0x00003000) {
+	case 0x00000000:
+		return nv_device(priv)->crystal;
+	case 0x00002000:
+		if (sctl & 0x00000040)
+			return 108000;
+		return 100000;
+	case 0x00003000:
+		sclk = read_vco(priv, clk);
+		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
+		return (sclk * 2) / sdiv;
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_pll(struct nva3_clock_priv *priv, int clk, u32 pll)
+{
+	u32 ctrl = nv_rd32(priv, pll + 0);
+	u32 sclk = 0, P = 1, N = 1, M = 1;
+
+	if (!(ctrl & 0x00000008)) {
+		if (ctrl & 0x00000001) {
+			u32 coef = nv_rd32(priv, pll + 4);
+			M = (coef & 0x000000ff) >> 0;
+			N = (coef & 0x0000ff00) >> 8;
+			P = (coef & 0x003f0000) >> 16;
+
+			/* no post-divider on these.. */
+			if ((pll & 0x00ff00) == 0x00e800)
+				P = 1;
+
+			sclk = read_clk(priv, 0x00 + clk, false);
+		}
+	} else {
+		sclk = read_clk(priv, 0x10 + clk, false);
+	}
+
+	if (M * P)
+		return sclk * N / (M * P);
+	return 0;
+}
+
+static int
+nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nva3_clock_priv *priv = (void *)clk;
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return nv_device(priv)->crystal;
+	case nv_clk_src_href:
+		return 100000;
+	case nv_clk_src_core:
+		return read_pll(priv, 0x00, 0x4200);
+	case nv_clk_src_shader:
+		return read_pll(priv, 0x01, 0x4220);
+	case nv_clk_src_mem:
+		return read_pll(priv, 0x02, 0x4000);
+	case nv_clk_src_disp:
+		return read_clk(priv, 0x20, false);
+	case nv_clk_src_vdec:
+		return read_clk(priv, 0x21, false);
+	case nv_clk_src_daemon:
+		return read_clk(priv, 0x25, false);
+	default:
+		nv_error(clk, "invalid clock source %d\n", src);
+		return -EINVAL;
+	}
+}
+
+int
+nva3_clock_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
+		struct nva3_clock_info *info)
+{
+	struct nouveau_bios *bios = nouveau_bios(clock);
+	struct nva3_clock_priv *priv = (void *)clock;
+	struct nvbios_pll limits;
+	u32 oclk, sclk, sdiv;
+	int P, N, M, diff;
+	int ret;
+
+	info->pll = 0;
+	info->clk = 0;
+
+	switch (khz) {
+	case 27000:
+		info->clk = 0x00000100;
+		return khz;
+	case 100000:
+		info->clk = 0x00002100;
+		return khz;
+	case 108000:
+		info->clk = 0x00002140;
+		return khz;
+	default:
+		sclk = read_vco(priv, clk);
+		sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
+		/* if the clock has a PLL attached, and we can get a within
+		 * [-2, 3) MHz of a divider, we'll disable the PLL and use
+		 * the divider instead.
+		 *
+		 * divider can go as low as 2, limited here because NVIDIA
+		 * and the VBIOS on my NVA8 seem to prefer using the PLL
+		 * for 810MHz - is there a good reason?
+		 */
+		if (sdiv > 4) {
+			oclk = (sclk * 2) / sdiv;
+			diff = khz - oclk;
+			if (!pll || (diff >= -2000 && diff < 3000)) {
+				info->clk = (((sdiv - 2) << 16) | 0x00003100);
+				return oclk;
+			}
+		}
+
+		if (!pll)
+			return -ERANGE;
+		break;
+	}
+
+	ret = nvbios_pll_parse(bios, pll, &limits);
+	if (ret)
+		return ret;
+
+	limits.refclk = read_clk(priv, clk - 0x10, true);
+	if (!limits.refclk)
+		return -EINVAL;
+
+	ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+	if (ret >= 0) {
+		info->clk = nv_rd32(priv, 0x4120 + (clk * 4));
+		info->pll = (P << 16) | (N << 8) | M;
+	}
+
+	return ret ? ret : -ERANGE;
+}
+
+static int
+calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate,
+	 int clk, u32 pll, int idx)
+{
+	int ret = nva3_clock_info(&priv->base, clk, pll, cstate->domain[idx],
+				  &priv->eng[idx]);
+	if (ret >= 0)
+		return 0;
 	return ret;
 }
 
+static void
+prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
+{
+	struct nva3_clock_info *info = &priv->eng[idx];
+	const u32 src0 = 0x004120 + (clk * 4);
+	const u32 src1 = 0x004160 + (clk * 4);
+	const u32 ctrl = pll + 0;
+	const u32 coef = pll + 4;
+
+	if (info->pll) {
+		nv_mask(priv, src0, 0x00000101, 0x00000101);
+		nv_wr32(priv, coef, info->pll);
+		nv_mask(priv, ctrl, 0x00000015, 0x00000015);
+		nv_mask(priv, ctrl, 0x00000010, 0x00000000);
+		nv_wait(priv, ctrl, 0x00020000, 0x00020000);
+		nv_mask(priv, ctrl, 0x00000010, 0x00000010);
+		nv_mask(priv, ctrl, 0x00000008, 0x00000000);
+		nv_mask(priv, src1, 0x00000100, 0x00000000);
+		nv_mask(priv, src1, 0x00000001, 0x00000000);
+	} else {
+		nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
+		nv_mask(priv, ctrl, 0x00000018, 0x00000018);
+		udelay(20);
+		nv_mask(priv, ctrl, 0x00000001, 0x00000000);
+		nv_mask(priv, src0, 0x00000100, 0x00000000);
+		nv_mask(priv, src0, 0x00000001, 0x00000000);
+	}
+}
+
+static void
+prog_clk(struct nva3_clock_priv *priv, int clk, int idx)
+{
+	struct nva3_clock_info *info = &priv->eng[idx];
+	nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
+}
+
+static int
+nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nva3_clock_priv *priv = (void *)clk;
+	int ret;
+
+	if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
+	    (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
+	    (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
+	    (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)))
+		return ret;
+
+	return 0;
+}
+
+static int
+nva3_clock_prog(struct nouveau_clock *clk)
+{
+	struct nva3_clock_priv *priv = (void *)clk;
+	prog_pll(priv, 0x00, 0x004200, nv_clk_src_core);
+	prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
+	prog_clk(priv, 0x20, nv_clk_src_disp);
+	prog_clk(priv, 0x21, nv_clk_src_vdec);
+	return 0;
+}
+
+static void
+nva3_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
+static struct nouveau_clocks
+nva3_domain[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_core   , 0x00, 0, "core", 1000 },
+	{ nv_clk_src_shader , 0x01, 0, "shader", 1000 },
+	{ nv_clk_src_mem    , 0x02, 0, "memory", 1000 },
+	{ nv_clk_src_vdec   , 0x03 },
+	{ nv_clk_src_disp   , 0x04 },
+	{ nv_clk_src_max }
+};
 
 static int
 nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -58,12 +302,15 @@
 	struct nva3_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.pll_calc = nva3_clock_pll_calc;
+	priv->base.read = nva3_clock_read;
+	priv->base.calc = nva3_clock_calc;
+	priv->base.prog = nva3_clock_prog;
+	priv->base.tidy = nva3_clock_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
new file mode 100644
index 0000000..6229a50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_CLK_NVA3_H__
+#define __NVKM_CLK_NVA3_H__
+
+#include <subdev/clock.h>
+
+struct nva3_clock_info {
+	u32 clk;
+	u32 pll;
+};
+
+int nva3_clock_info(struct nouveau_clock *, int, u32, u32,
+		    struct nva3_clock_info *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index 439d81c2..c310572 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -25,11 +25,408 @@
 #include <subdev/clock.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
+#include <subdev/timer.h>
 
 #include "pll.h"
 
+struct nvc0_clock_info {
+	u32 freq;
+	u32 ssel;
+	u32 mdiv;
+	u32 dsrc;
+	u32 ddiv;
+	u32 coef;
+};
+
 struct nvc0_clock_priv {
 	struct nouveau_clock base;
+	struct nvc0_clock_info eng[16];
+};
+
+static u32 read_div(struct nvc0_clock_priv *, int, u32, u32);
+
+static u32
+read_vco(struct nvc0_clock_priv *priv, u32 dsrc)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 ssrc = nv_rd32(priv, dsrc);
+	if (!(ssrc & 0x00000100))
+		return clk->read(clk, nv_clk_src_sppll0);
+	return clk->read(clk, nv_clk_src_sppll1);
+}
+
+static u32
+read_pll(struct nvc0_clock_priv *priv, u32 pll)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 ctrl = nv_rd32(priv, pll + 0x00);
+	u32 coef = nv_rd32(priv, pll + 0x04);
+	u32 P = (coef & 0x003f0000) >> 16;
+	u32 N = (coef & 0x0000ff00) >> 8;
+	u32 M = (coef & 0x000000ff) >> 0;
+	u32 sclk;
+
+	if (!(ctrl & 0x00000001))
+		return 0;
+
+	switch (pll) {
+	case 0x00e800:
+	case 0x00e820:
+		sclk = nv_device(priv)->crystal;
+		P = 1;
+		break;
+	case 0x132000:
+		sclk = clk->read(clk, nv_clk_src_mpllsrc);
+		break;
+	case 0x132020:
+		sclk = clk->read(clk, nv_clk_src_mpllsrcref);
+		break;
+	case 0x137000:
+	case 0x137020:
+	case 0x137040:
+	case 0x1370e0:
+		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		break;
+	default:
+		return 0;
+	}
+
+	return sclk * N / M / P;
+}
+
+static u32
+read_div(struct nvc0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
+{
+	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
+	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+
+	switch (ssrc & 0x00000003) {
+	case 0:
+		if ((ssrc & 0x00030000) != 0x00030000)
+			return nv_device(priv)->crystal;
+		return 108000;
+	case 2:
+		return 100000;
+	case 3:
+		if (sctl & 0x80000000) {
+			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sdiv = (sctl & 0x0000003f) + 2;
+			return (sclk * 2) / sdiv;
+		}
+
+		return read_vco(priv, dsrc + (doff * 4));
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_clk(struct nvc0_clock_priv *priv, int clk)
+{
+	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+	u32 ssel = nv_rd32(priv, 0x137100);
+	u32 sclk, sdiv;
+
+	if (ssel & (1 << clk)) {
+		if (clk < 7)
+			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+		else
+			sclk = read_pll(priv, 0x1370e0);
+		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
+	} else {
+		sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
+	}
+
+	if (sctl & 0x80000000)
+		return (sclk * 2) / sdiv;
+
+	return sclk;
+}
+
+static int
+nvc0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nouveau_device *device = nv_device(clk);
+	struct nvc0_clock_priv *priv = (void *)clk;
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return device->crystal;
+	case nv_clk_src_href:
+		return 100000;
+	case nv_clk_src_sppll0:
+		return read_pll(priv, 0x00e800);
+	case nv_clk_src_sppll1:
+		return read_pll(priv, 0x00e820);
+
+	case nv_clk_src_mpllsrcref:
+		return read_div(priv, 0, 0x137320, 0x137330);
+	case nv_clk_src_mpllsrc:
+		return read_pll(priv, 0x132020);
+	case nv_clk_src_mpll:
+		return read_pll(priv, 0x132000);
+	case nv_clk_src_mdiv:
+		return read_div(priv, 0, 0x137300, 0x137310);
+	case nv_clk_src_mem:
+		if (nv_rd32(priv, 0x1373f0) & 0x00000002)
+			return clk->read(clk, nv_clk_src_mpll);
+		return clk->read(clk, nv_clk_src_mdiv);
+
+	case nv_clk_src_gpc:
+		return read_clk(priv, 0x00);
+	case nv_clk_src_rop:
+		return read_clk(priv, 0x01);
+	case nv_clk_src_hubk07:
+		return read_clk(priv, 0x02);
+	case nv_clk_src_hubk06:
+		return read_clk(priv, 0x07);
+	case nv_clk_src_hubk01:
+		return read_clk(priv, 0x08);
+	case nv_clk_src_copy:
+		return read_clk(priv, 0x09);
+	case nv_clk_src_daemon:
+		return read_clk(priv, 0x0c);
+	case nv_clk_src_vdec:
+		return read_clk(priv, 0x0e);
+	default:
+		nv_error(clk, "invalid clock source %d\n", src);
+		return -EINVAL;
+	}
+}
+
+static u32
+calc_div(struct nvc0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+	u32 div = min((ref * 2) / freq, (u32)65);
+	if (div < 2)
+		div = 2;
+
+	*ddiv = div - 2;
+	return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+	u32 sclk;
+
+	/* use one of the fixed frequencies if possible */
+	*ddiv = 0x00000000;
+	switch (freq) {
+	case  27000:
+	case 108000:
+		*dsrc = 0x00000000;
+		if (freq == 108000)
+			*dsrc |= 0x00030000;
+		return freq;
+	case 100000:
+		*dsrc = 0x00000002;
+		return freq;
+	default:
+		*dsrc = 0x00000003;
+		break;
+	}
+
+	/* otherwise, calculate the closest divider */
+	sclk = read_vco(priv, 0x137160 + (clk * 4));
+	if (clk < 7)
+		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	return sclk;
+}
+
+static u32
+calc_pll(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *coef)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll limits;
+	int N, M, P, ret;
+
+	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	if (ret)
+		return 0;
+
+	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	if (!limits.refclk)
+		return 0;
+
+	ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	if (ret <= 0)
+		return 0;
+
+	*coef = (P << 16) | (N << 8) | M;
+	return ret;
+}
+
+static int
+calc_clk(struct nvc0_clock_priv *priv,
+	 struct nouveau_cstate *cstate, int clk, int dom)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	u32 freq = cstate->domain[dom];
+	u32 src0, div0, div1D, div1P = 0;
+	u32 clk0, clk1 = 0;
+
+	/* invalid clock domain */
+	if (!freq)
+		return 0;
+
+	/* first possible path, using only dividers */
+	clk0 = calc_src(priv, clk, freq, &src0, &div0);
+	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+
+	/* see if we can get any closer using PLLs */
+	if (clk0 != freq && (0x00004387 & (1 << clk))) {
+		if (clk <= 7)
+			clk1 = calc_pll(priv, clk, freq, &info->coef);
+		else
+			clk1 = cstate->domain[nv_clk_src_hubk06];
+		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+	}
+
+	/* select the method which gets closest to target freq */
+	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+		info->dsrc = src0;
+		if (div0) {
+			info->ddiv |= 0x80000000;
+			info->ddiv |= div0 << 8;
+			info->ddiv |= div0;
+		}
+		if (div1D) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1D;
+		}
+		info->ssel = info->coef = 0;
+		info->freq = clk0;
+	} else {
+		if (div1P) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1P << 8;
+		}
+		info->ssel = (1 << clk);
+		info->freq = clk1;
+	}
+
+	return 0;
+}
+
+static int
+nvc0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nvc0_clock_priv *priv = (void *)clk;
+	int ret;
+
+	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
+	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+		return ret;
+
+	return 0;
+}
+
+static void
+nvc0_clock_prog_0(struct nvc0_clock_priv *priv, int clk)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	if (clk < 7 && !info->ssel) {
+		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+	}
+}
+
+static void
+nvc0_clock_prog_1(struct nvc0_clock_priv *priv, int clk)
+{
+	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
+	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+}
+
+static void
+nvc0_clock_prog_2(struct nvc0_clock_priv *priv, int clk)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	const u32 addr = 0x137000 + (clk * 0x20);
+	if (clk <= 7) {
+		nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
+		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+		if (info->coef) {
+			nv_wr32(priv, addr + 0x04, info->coef);
+			nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
+			nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
+			nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+		}
+	}
+}
+
+static void
+nvc0_clock_prog_3(struct nvc0_clock_priv *priv, int clk)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	if (info->ssel) {
+		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
+		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+	}
+}
+
+static void
+nvc0_clock_prog_4(struct nvc0_clock_priv *priv, int clk)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+static int
+nvc0_clock_prog(struct nouveau_clock *clk)
+{
+	struct nvc0_clock_priv *priv = (void *)clk;
+	struct {
+		void (*exec)(struct nvc0_clock_priv *, int);
+	} stage[] = {
+		{ nvc0_clock_prog_0 }, /* div programming */
+		{ nvc0_clock_prog_1 }, /* select div mode */
+		{ nvc0_clock_prog_2 }, /* (maybe) program pll */
+		{ nvc0_clock_prog_3 }, /* (maybe) select pll mode */
+		{ nvc0_clock_prog_4 }, /* final divider */
+	};
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(stage); i++) {
+		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+			if (!priv->eng[j].freq)
+				continue;
+			stage[i].exec(priv, j);
+		}
+	}
+
+	return 0;
+}
+
+static void
+nvc0_clock_tidy(struct nouveau_clock *clk)
+{
+	struct nvc0_clock_priv *priv = (void *)clk;
+	memset(priv->eng, 0x00, sizeof(priv->eng));
+}
+
+static struct nouveau_clocks
+nvc0_domain[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_hubk06 , 0x00 },
+	{ nv_clk_src_hubk01 , 0x01 },
+	{ nv_clk_src_copy   , 0x02 },
+	{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
+	{ nv_clk_src_rop    , 0x04 },
+	{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
+	{ nv_clk_src_vdec   , 0x06 },
+	{ nv_clk_src_daemon , 0x0a },
+	{ nv_clk_src_hubk07 , 0x0b },
+	{ nv_clk_src_max }
 };
 
 static int
@@ -40,12 +437,15 @@
 	struct nvc0_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.pll_calc = nva3_clock_pll_calc;
+	priv->base.read = nvc0_clock_read;
+	priv->base.calc = nvc0_clock_calc;
+	priv->base.prog = nvc0_clock_prog;
+	priv->base.tidy = nvc0_clock_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
new file mode 100644
index 0000000..4c62e84
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nve0_clock_info {
+	u32 freq;
+	u32 ssel;
+	u32 mdiv;
+	u32 dsrc;
+	u32 ddiv;
+	u32 coef;
+};
+
+struct nve0_clock_priv {
+	struct nouveau_clock base;
+	struct nve0_clock_info eng[16];
+};
+
+static u32 read_div(struct nve0_clock_priv *, int, u32, u32);
+static u32 read_pll(struct nve0_clock_priv *, u32);
+
+static u32
+read_vco(struct nve0_clock_priv *priv, u32 dsrc)
+{
+	u32 ssrc = nv_rd32(priv, dsrc);
+	if (!(ssrc & 0x00000100))
+		return read_pll(priv, 0x00e800);
+	return read_pll(priv, 0x00e820);
+}
+
+static u32
+read_pll(struct nve0_clock_priv *priv, u32 pll)
+{
+	u32 ctrl = nv_rd32(priv, pll + 0x00);
+	u32 coef = nv_rd32(priv, pll + 0x04);
+	u32 P = (coef & 0x003f0000) >> 16;
+	u32 N = (coef & 0x0000ff00) >> 8;
+	u32 M = (coef & 0x000000ff) >> 0;
+	u32 sclk;
+	u16 fN = 0xf000;
+
+	if (!(ctrl & 0x00000001))
+		return 0;
+
+	switch (pll) {
+	case 0x00e800:
+	case 0x00e820:
+		sclk = nv_device(priv)->crystal;
+		P = 1;
+		break;
+	case 0x132000:
+		sclk = read_pll(priv, 0x132020);
+		P = (coef & 0x10000000) ? 2 : 1;
+		break;
+	case 0x132020:
+		sclk = read_div(priv, 0, 0x137320, 0x137330);
+		fN   = nv_rd32(priv, pll + 0x10) >> 16;
+		break;
+	case 0x137000:
+	case 0x137020:
+	case 0x137040:
+	case 0x1370e0:
+		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		break;
+	default:
+		return 0;
+	}
+
+	if (P == 0)
+		P = 1;
+
+	sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
+	return sclk / (M * P);
+}
+
+static u32
+read_div(struct nve0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
+{
+	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
+	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+
+	switch (ssrc & 0x00000003) {
+	case 0:
+		if ((ssrc & 0x00030000) != 0x00030000)
+			return nv_device(priv)->crystal;
+		return 108000;
+	case 2:
+		return 100000;
+	case 3:
+		if (sctl & 0x80000000) {
+			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sdiv = (sctl & 0x0000003f) + 2;
+			return (sclk * 2) / sdiv;
+		}
+
+		return read_vco(priv, dsrc + (doff * 4));
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_mem(struct nve0_clock_priv *priv)
+{
+	switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
+	case 1: return read_pll(priv, 0x132020);
+	case 2: return read_pll(priv, 0x132000);
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_clk(struct nve0_clock_priv *priv, int clk)
+{
+	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+	u32 sclk, sdiv;
+
+	if (clk < 7) {
+		u32 ssel = nv_rd32(priv, 0x137100);
+		if (ssel & (1 << clk)) {
+			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+			sdiv = 1;
+		} else {
+			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sdiv = 0;
+		}
+	} else {
+		u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
+		if ((ssrc & 0x00000003) == 0x00000003) {
+			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			if (ssrc & 0x00000100) {
+				if (ssrc & 0x40000000)
+					sclk = read_pll(priv, 0x1370e0);
+				sdiv = 1;
+			} else {
+				sdiv = 0;
+			}
+		} else {
+			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sdiv = 0;
+		}
+	}
+
+	if (sctl & 0x80000000) {
+		if (sdiv)
+			sdiv = ((sctl & 0x00003f00) >> 8) + 2;
+		else
+			sdiv = ((sctl & 0x0000003f) >> 0) + 2;
+		return (sclk * 2) / sdiv;
+	}
+
+	return sclk;
+}
+
+static int
+nve0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nouveau_device *device = nv_device(clk);
+	struct nve0_clock_priv *priv = (void *)clk;
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return device->crystal;
+	case nv_clk_src_href:
+		return 100000;
+	case nv_clk_src_mem:
+		return read_mem(priv);
+	case nv_clk_src_gpc:
+		return read_clk(priv, 0x00);
+	case nv_clk_src_rop:
+		return read_clk(priv, 0x01);
+	case nv_clk_src_hubk07:
+		return read_clk(priv, 0x02);
+	case nv_clk_src_hubk06:
+		return read_clk(priv, 0x07);
+	case nv_clk_src_hubk01:
+		return read_clk(priv, 0x08);
+	case nv_clk_src_daemon:
+		return read_clk(priv, 0x0c);
+	case nv_clk_src_vdec:
+		return read_clk(priv, 0x0e);
+	default:
+		nv_error(clk, "invalid clock source %d\n", src);
+		return -EINVAL;
+	}
+}
+
+static u32
+calc_div(struct nve0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+	u32 div = min((ref * 2) / freq, (u32)65);
+	if (div < 2)
+		div = 2;
+
+	*ddiv = div - 2;
+	return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+	u32 sclk;
+
+	/* use one of the fixed frequencies if possible */
+	*ddiv = 0x00000000;
+	switch (freq) {
+	case  27000:
+	case 108000:
+		*dsrc = 0x00000000;
+		if (freq == 108000)
+			*dsrc |= 0x00030000;
+		return freq;
+	case 100000:
+		*dsrc = 0x00000002;
+		return freq;
+	default:
+		*dsrc = 0x00000003;
+		break;
+	}
+
+	/* otherwise, calculate the closest divider */
+	sclk = read_vco(priv, 0x137160 + (clk * 4));
+	if (clk < 7)
+		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	return sclk;
+}
+
+static u32
+calc_pll(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *coef)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll limits;
+	int N, M, P, ret;
+
+	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	if (ret)
+		return 0;
+
+	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	if (!limits.refclk)
+		return 0;
+
+	ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	if (ret <= 0)
+		return 0;
+
+	*coef = (P << 16) | (N << 8) | M;
+	return ret;
+}
+
+static int
+calc_clk(struct nve0_clock_priv *priv,
+	 struct nouveau_cstate *cstate, int clk, int dom)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	u32 freq = cstate->domain[dom];
+	u32 src0, div0, div1D, div1P = 0;
+	u32 clk0, clk1 = 0;
+
+	/* invalid clock domain */
+	if (!freq)
+		return 0;
+
+	/* first possible path, using only dividers */
+	clk0 = calc_src(priv, clk, freq, &src0, &div0);
+	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+
+	/* see if we can get any closer using PLLs */
+	if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
+		if (clk <= 7)
+			clk1 = calc_pll(priv, clk, freq, &info->coef);
+		else
+			clk1 = cstate->domain[nv_clk_src_hubk06];
+		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+	}
+
+	/* select the method which gets closest to target freq */
+	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+		info->dsrc = src0;
+		if (div0) {
+			info->ddiv |= 0x80000000;
+			info->ddiv |= div0 << 8;
+			info->ddiv |= div0;
+		}
+		if (div1D) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1D;
+		}
+		info->ssel = 0;
+		info->freq = clk0;
+	} else {
+		if (div1P) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1P << 8;
+		}
+		info->ssel = (1 << clk);
+		info->dsrc = 0x40000100;
+		info->freq = clk1;
+	}
+
+	return 0;
+}
+
+static int
+nve0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nve0_clock_priv *priv = (void *)clk;
+	int ret;
+
+	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+		return ret;
+
+	return 0;
+}
+
+static void
+nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	if (!info->ssel) {
+		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+	}
+}
+
+static void
+nve0_clock_prog_1_0(struct nve0_clock_priv *priv, int clk)
+{
+	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
+	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+}
+
+static void
+nve0_clock_prog_1_1(struct nve0_clock_priv *priv, int clk)
+{
+	nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
+}
+
+static void
+nve0_clock_prog_2(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	const u32 addr = 0x137000 + (clk * 0x20);
+	nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
+	nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+	if (info->coef) {
+		nv_wr32(priv, addr + 0x04, info->coef);
+		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
+		nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
+		nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+	}
+}
+
+static void
+nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+static void
+nve0_clock_prog_4_0(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	if (info->ssel) {
+		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
+		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+	}
+}
+
+static void
+nve0_clock_prog_4_1(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	if (info->ssel) {
+		nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
+		nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
+	}
+}
+
+static int
+nve0_clock_prog(struct nouveau_clock *clk)
+{
+	struct nve0_clock_priv *priv = (void *)clk;
+	struct {
+		u32 mask;
+		void (*exec)(struct nve0_clock_priv *, int);
+	} stage[] = {
+		{ 0x007f, nve0_clock_prog_0   }, /* div programming */
+		{ 0x007f, nve0_clock_prog_1_0 }, /* select div mode */
+		{ 0xff80, nve0_clock_prog_1_1 },
+		{ 0x00ff, nve0_clock_prog_2   }, /* (maybe) program pll */
+		{ 0xff80, nve0_clock_prog_3   }, /* final divider */
+		{ 0x007f, nve0_clock_prog_4_0 }, /* (maybe) select pll mode */
+		{ 0xff80, nve0_clock_prog_4_1 },
+	};
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(stage); i++) {
+		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+			if (!(stage[i].mask & (1 << j)))
+				continue;
+			if (!priv->eng[j].freq)
+				continue;
+			stage[i].exec(priv, j);
+		}
+	}
+
+	return 0;
+}
+
+static void
+nve0_clock_tidy(struct nouveau_clock *clk)
+{
+	struct nve0_clock_priv *priv = (void *)clk;
+	memset(priv->eng, 0x00, sizeof(priv->eng));
+}
+
+static struct nouveau_clocks
+nve0_domain[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+	{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
+	{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
+	{ nv_clk_src_mem    , 0x03, 0, "memory", 1000 },
+	{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
+	{ nv_clk_src_hubk01 , 0x05 },
+	{ nv_clk_src_vdec   , 0x06 },
+	{ nv_clk_src_daemon , 0x07 },
+	{ nv_clk_src_max }
+};
+
+static int
+nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nve0_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.read = nve0_clock_read;
+	priv->base.calc = nve0_clock_calc;
+	priv->base.prog = nve0_clock_prog;
+	priv->base.tidy = nve0_clock_tidy;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
index cf1ed0d..b47d543 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -38,7 +38,7 @@
 	 * "clk" parameter in kHz
 	 * returns calculated clock
 	 */
-	int cv = nouveau_bios(subdev)->version.chip;
+	struct nouveau_bios *bios = nouveau_bios(subdev);
 	int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
 	int minM = info->vco1.min_m, maxM = info->vco1.max_m;
 	int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -54,18 +54,21 @@
 
 	/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
 	/* possibly correlated with introduction of 27MHz crystal */
-	if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
-		if (clk > 250000)
-			maxM = 6;
-		if (clk > 340000)
-			maxM = 2;
-	} else if (cv < 0x40) {
-		if (clk > 150000)
-			maxM = 6;
-		if (clk > 200000)
-			maxM = 4;
-		if (clk > 340000)
-			maxM = 2;
+	if (bios->version.major < 0x60) {
+		int cv = bios->version.chip;
+		if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
+			if (clk > 250000)
+				maxM = 6;
+			if (clk > 340000)
+				maxM = 2;
+		} else if (cv < 0x40) {
+			if (clk > 150000)
+				maxM = 6;
+			if (clk > 200000)
+				maxM = 4;
+			if (clk > 340000)
+				maxM = 2;
+		}
 	}
 
 	P = 1 << maxP;
@@ -227,10 +230,12 @@
 {
 	int ret;
 
-	if (!info->vco2.max_freq) {
+	if (!info->vco2.max_freq || !N2) {
 		ret = getMNP_single(subdev, info, freq, N1, M1, P);
-		*N2 = 1;
-		*M2 = 1;
+		if (N2) {
+			*N2 = 1;
+			*M2 = 1;
+		}
 	} else {
 		ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P);
 	}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
index 2fe1f71..8eca457 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -45,6 +45,7 @@
 	lM = max(lM, (int)info->vco1.min_m);
 	hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
 	hM = min(hM, (int)info->vco1.max_m);
+	lM = min(lM, hM);
 
 	for (M = lM; M <= hM; M++) {
 		u32 tmp = freq * *P * M;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
new file mode 100644
index 0000000..fb33f06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_CLK_SEQ_H__
+#define __NVKM_CLK_SEQ_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+
+#define clk_init(s,p)       hwsq_init(&(s)->base, (p))
+#define clk_exec(s,e)       hwsq_exec(&(s)->base, (e))
+#define clk_have(s,r)       ((s)->r_##r.addr != 0x000000)
+#define clk_rd32(s,r)       hwsq_rd32(&(s)->base, &(s)->r_##r)
+#define clk_wr32(s,r,d)     hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
+#define clk_mask(s,r,m,d)   hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define clk_setf(s,f,d)     hwsq_setf(&(s)->base, (f), (d))
+#define clk_wait(s,f,d)     hwsq_wait(&(s)->base, (f), (d))
+#define clk_nsec(s,n)       hwsq_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index b22357d..27c8235 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -168,7 +168,8 @@
 		/* downclock -- write new NM first */
 		nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
 
-	if (chip_version < 0x17 && chip_version != 0x11)
+	if ((chip_version < 0x17 || chip_version == 0x1a) &&
+	    chip_version != 0x11)
 		/* wait a bit on older chips */
 		msleep(64);
 	nv_rd32(devinit, reg);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 463b08f..8d274db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -38,12 +38,18 @@
 nv10_devinit_meminit(struct nouveau_devinit *devinit)
 {
 	struct nv10_devinit_priv *priv = (void *)devinit;
-	const int mem_width[] = { 0x10, 0x00, 0x20 };
-	const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2;
+	static const int mem_width[] = { 0x10, 0x00, 0x20 };
+	int mem_width_count;
 	uint32_t patt = 0xdeadbeef;
 	struct io_mapping *fb;
 	int i, j, k;
 
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17)
+		mem_width_count = 3;
+	else
+		mem_width_count = 2;
+
 	/* Map the framebuffer aperture */
 	fb = fbmem_init(nv_device(priv)->pdev);
 	if (!fb) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index 821cd75..f009d8a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -22,9 +22,10 @@
  * Authors: Ben Skeggs
  */
 
-#include "subdev/fb.h"
-#include "subdev/bios.h"
-#include "subdev/bios/bit.h"
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+
+#include "priv.h"
 
 int
 nouveau_fb_bios_memtype(struct nouveau_bios *bios)
@@ -106,9 +107,9 @@
 
 int
 nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
-		   struct nouveau_oclass *oclass, struct nouveau_oclass *ramcls,
-		   int length, void **pobject)
+		   struct nouveau_oclass *oclass, int length, void **pobject)
 {
+	struct nouveau_fb_impl *impl = (void *)oclass;
 	static const char *name[] = {
 		[NV_MEM_TYPE_UNKNOWN] = "unknown",
 		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
@@ -132,8 +133,10 @@
 	if (ret)
 		return ret;
 
+	pfb->memtype_valid = impl->memtype;
+
 	ret = nouveau_object_ctor(nv_object(pfb), nv_object(pfb),
-				  ramcls, NULL, 0, &ram);
+				  impl->ram, NULL, 0, &ram);
 	if (ret) {
 		nv_fatal(pfb, "error detecting memory configuration!!\n");
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
new file mode 100644
index 0000000..34f9605
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+int
+nouveau_gddr5_calc(struct nouveau_ram *ram)
+{
+	struct nouveau_bios *bios = nouveau_bios(ram);
+	int pd, lf, xd, vh, vr, vo;
+	int WL, CL, WR, at, dt, ds;
+	int rq = ram->freq < 1000000; /* XXX */
+
+	switch (!!ram->ramcfg.data * ram->ramcfg.version) {
+	case 0x11:
+		pd =  (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
+		lf =  (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
+		xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
+		vh =  (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
+		vr =  (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
+		vo =   nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	switch (!!ram->timing.data * ram->timing.version) {
+	case 0x20:
+		WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
+		CL =  nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
+		WR =  nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
+		at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
+		dt =  nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
+		ds =  nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	if (WL < 1 || WL > 7 || CL < 5 || CL > 36 || WR < 4 || WR > 35)
+		return -EINVAL;
+	CL -= 5;
+	WR -= 4;
+
+	ram->mr[0] &= ~0xf7f;
+	ram->mr[0] |= (WR & 0x0f) << 8;
+	ram->mr[0] |= (CL & 0x0f) << 3;
+	ram->mr[0] |= (WL & 0x07) << 0;
+
+	ram->mr[1] &= ~0x0bf;
+	ram->mr[1] |= (xd & 0x01) << 7;
+	ram->mr[1] |= (at & 0x03) << 4;
+	ram->mr[1] |= (dt & 0x03) << 2;
+	ram->mr[1] |= (ds & 0x03) << 0;
+
+	ram->mr[3] &= ~0x020;
+	ram->mr[3] |= (rq & 0x01) << 5;
+
+	if (!vo)
+		vo = (ram->mr[6] & 0xff0) >> 4;
+	if (ram->mr[6] & 0x001)
+		pd = 1; /* binary driver does this.. bug? */
+	ram->mr[6] &= ~0xff1;
+	ram->mr[6] |= (vo & 0xff) << 4;
+	ram->mr[6] |= (pd & 0x01) << 0;
+
+	if (!(ram->mr[7] & 0x100))
+		vr = 0; /* binary driver does this.. bug? */
+	ram->mr[7] &= ~0x188;
+	ram->mr[7] |= (vr & 0x01) << 8;
+	ram->mr[7] |= (vh & 0x01) << 7;
+	ram->mr[7] |= (lf & 0x01) << 3;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index 1f103c7..8309fe3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -22,14 +22,10 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv04.h"
 
 #define NV04_PFB_CFG0						0x00100200
 
-struct nv04_fb_priv {
-	struct nouveau_fb base;
-};
-
 bool
 nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 {
@@ -57,30 +53,37 @@
 	return 0;
 }
 
-static int
+int
 nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
 {
+	struct nv04_fb_impl *impl = (void *)oclass;
 	struct nv04_fb_priv *priv;
 	int ret;
 
-	ret = nouveau_fb_create(parent, engine, oclass, &nv04_ram_oclass, &priv);
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.tile.regions = impl->tile.regions;
+	priv->base.tile.init = impl->tile.init;
+	priv->base.tile.comp = impl->tile.comp;
+	priv->base.tile.fini = impl->tile.fini;
+	priv->base.tile.prog = impl->tile.prog;
 	return 0;
 }
 
-struct nouveau_oclass
-nv04_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x04),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x04),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv04_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv04_ram_oclass,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
new file mode 100644
index 0000000..06ce71f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
@@ -0,0 +1,55 @@
+#ifndef __NVKM_FB_NV04_H__
+#define __NVKM_FB_NV04_H__
+
+#include "priv.h"
+
+struct nv04_fb_priv {
+	struct nouveau_fb base;
+};
+
+int  nv04_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+		  struct nouveau_oclass *, void *, u32,
+		  struct nouveau_object **);
+
+struct nv04_fb_impl {
+	struct nouveau_fb_impl base;
+	struct {
+		int regions;
+		void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
+			     u32 pitch, u32 flags, struct nouveau_fb_tile *);
+		void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+			     struct nouveau_fb_tile *);
+		void (*fini)(struct nouveau_fb *, int i,
+			     struct nouveau_fb_tile *);
+		void (*prog)(struct nouveau_fb *, int i,
+			     struct nouveau_fb_tile *);
+	} tile;
+};
+
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv30_fb_init(struct nouveau_object *);
+void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+		       struct nouveau_fb_tile *);
+
+int  nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index be069b5..ffb7ec6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv10_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -57,34 +53,19 @@
 	nv_rd32(pfb, 0x100240 + (i * 0x10));
 }
 
-static int
-nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv10_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv10_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv10_fb_tile_init;
-	priv->base.tile.fini = nv10_fb_tile_fini;
-	priv->base.tile.prog = nv10_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv10_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x10),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv10_fb_ctor,
+struct nouveau_oclass *
+nv10_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x10),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = _nouveau_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv10_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv10_fb_tile_init,
+	.tile.fini = nv10_fb_tile_fini,
+	.tile.prog = nv10_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 57a2af0..9159a5c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -24,40 +24,21 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv1a_fb_priv {
-	struct nouveau_fb base;
-};
-
-static int
-nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv1a_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv1a_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv10_fb_tile_init;
-	priv->base.tile.fini = nv10_fb_tile_fini;
-	priv->base.tile.prog = nv10_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv1a_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x1a),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv1a_fb_ctor,
+struct nouveau_oclass *
+nv1a_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x1a),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = _nouveau_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv10_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv10_fb_tile_init,
+	.tile.fini = nv10_fb_tile_fini,
+	.tile.prog = nv10_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index b18c4e6..f003c1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv20_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -80,35 +76,20 @@
 	nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
-static int
-nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv20_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv20_fb_tile_init;
-	priv->base.tile.comp = nv20_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv20_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x20),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv20_fb_ctor,
+struct nouveau_oclass *
+nv20_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x20),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = _nouveau_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv20_fb_tile_init,
+	.tile.comp = nv20_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
index 32ccabf..f34f422 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv25_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 static void
 nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -46,35 +42,20 @@
 	}
 }
 
-static int
-nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv25_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv20_fb_tile_init;
-	priv->base.tile.comp = nv25_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv25_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x25),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv25_fb_ctor,
+struct nouveau_oclass *
+nv25_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x25),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = _nouveau_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv20_fb_tile_init,
+	.tile.comp = nv25_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index bef756d..69093f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv30_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -67,7 +63,7 @@
 }
 
 static int
-calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
+calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
 {
 	struct nouveau_device *device = nv_device(priv);
 	int b = (device->chipset > 0x30 ?
@@ -78,7 +74,7 @@
 }
 
 static int
-calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
+calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
 {
 	int j, x = 0;
 
@@ -95,7 +91,7 @@
 nv30_fb_init(struct nouveau_object *object)
 {
 	struct nouveau_device *device = nv_device(object);
-	struct nv30_fb_priv *priv = (void *)object;
+	struct nv04_fb_priv *priv = (void *)object;
 	int ret, i, j;
 
 	ret = nouveau_fb_init(&priv->base);
@@ -124,35 +120,20 @@
 	return 0;
 }
 
-static int
-nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv30_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv30_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv30_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x30),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv30_fb_ctor,
+struct nouveau_oclass *
+nv30_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x30),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv30_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv30_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
index 097d8e3..161b06e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv35_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 static void
 nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@
 	}
 }
 
-static int
-nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv35_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv35_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv35_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x35),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv35_fb_ctor,
+struct nouveau_oclass *
+nv35_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x35),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv30_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv35_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
index 9d6d9df..2dd3d0a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv36_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 static void
 nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@
 	}
 }
 
-static int
-nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv36_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv36_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv36_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x36),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv36_fb_ctor,
+struct nouveau_oclass *
+nv36_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x36),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv30_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv36_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 33b4393..95a115a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv40_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -50,7 +46,7 @@
 static int
 nv40_fb_init(struct nouveau_object *object)
 {
-	struct nv40_fb_priv *priv = (void *)object;
+	struct nv04_fb_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_fb_init(&priv->base);
@@ -61,36 +57,20 @@
 	return 0;
 }
 
-static int
-nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv40_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv40_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv40_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv40_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x40),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv40_fb_ctor,
+struct nouveau_oclass *
+nv40_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x40),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv40_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv40_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv40_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
new file mode 100644
index 0000000..581f808
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_FB_NV40_H__
+#define __NVKM_FB_NV40_H__
+
+#include "priv.h"
+
+struct nv40_ram {
+	struct nouveau_ram base;
+	u32 ctrl;
+	u32 coef;
+};
+
+
+int  nv40_ram_calc(struct nouveau_fb *, u32);
+int  nv40_ram_prog(struct nouveau_fb *);
+void nv40_ram_tidy(struct nouveau_fb *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
index 02cd837..b239a86 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv41_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
@@ -43,7 +39,7 @@
 int
 nv41_fb_init(struct nouveau_object *object)
 {
-	struct nv41_fb_priv *priv = (void *)object;
+	struct nv04_fb_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_fb_init(&priv->base);
@@ -54,36 +50,20 @@
 	return 0;
 }
 
-static int
-nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv41_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 12;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv40_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv41_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv41_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x41),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv41_fb_ctor,
+struct nouveau_oclass *
+nv41_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x41),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv41_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv41_ram_oclass,
+	.tile.regions = 12,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv40_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
index c5246c2..d847820 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv44_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 static void
 nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -52,7 +48,7 @@
 int
 nv44_fb_init(struct nouveau_object *object)
 {
-	struct nv44_fb_priv *priv = (void *)object;
+	struct nv04_fb_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_fb_init(&priv->base);
@@ -64,35 +60,19 @@
 	return 0;
 }
 
-static int
-nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv44_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 12;
-	priv->base.tile.init = nv44_fb_tile_init;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv44_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv44_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x44),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv44_fb_ctor,
+struct nouveau_oclass *
+nv44_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x44),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv44_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv44_ram_oclass,
+	.tile.regions = 12,
+	.tile.init = nv44_fb_tile_init,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
index e2b5790..a5b7751 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv46_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -44,35 +40,19 @@
 	tile->pitch = pitch;
 }
 
-static int
-nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv46_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 15;
-	priv->base.tile.init = nv46_fb_tile_init;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv44_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv46_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x46),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv46_fb_ctor,
+struct nouveau_oclass *
+nv46_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x46),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv44_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv44_ram_oclass,
+	.tile.regions = 15,
+	.tile.init = nv46_fb_tile_init,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
index fe6a227..3bea142 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -24,42 +24,22 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv47_fb_priv {
-	struct nouveau_fb base;
-};
-
-static int
-nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv47_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 15;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv40_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv41_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv47_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x47),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv47_fb_ctor,
+struct nouveau_oclass *
+nv47_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x47),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv41_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv41_ram_oclass,
+	.tile.regions = 15,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv40_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
index 5eca99b..666cbd5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -24,42 +24,22 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv49_fb_priv {
-	struct nouveau_fb base;
-};
-
-static int
-nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv49_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv49_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 15;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv40_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv41_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv49_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x49),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv49_fb_ctor,
+struct nouveau_oclass *
+nv49_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x49),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv41_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv49_ram_oclass,
+	.tile.regions = 15,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv40_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
index 1190b78..42e64f3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -24,40 +24,21 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv4e_fb_priv {
-	struct nouveau_fb base;
-};
-
-static int
-nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv4e_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv4e_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 12;
-	priv->base.tile.init = nv46_fb_tile_init;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv44_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv4e_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x4e),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv4e_fb_ctor,
+struct nouveau_oclass *
+nv4e_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x4e),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv44_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv4e_ram_oclass,
+	.tile.regions = 12,
+	.tile.init = nv46_fb_tile_init,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index da614ec..cbc7f00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -27,14 +27,9 @@
 #include <core/engctx.h>
 #include <core/object.h>
 
-#include "priv.h"
 #include <subdev/bios.h>
 
-struct nv50_fb_priv {
-	struct nouveau_fb base;
-	struct page *r100c08_page;
-	dma_addr_t r100c08;
-};
+#include "nv50.h"
 
 int
 nv50_fb_memtype[0x80] = {
@@ -48,7 +43,7 @@
 	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
 };
 
-static bool
+bool
 nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
 {
 	return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
@@ -239,7 +234,7 @@
 		pr_cont("0x%08x\n", st1);
 }
 
-static int
+int
 nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
@@ -248,7 +243,7 @@
 	struct nv50_fb_priv *priv;
 	int ret;
 
-	ret = nouveau_fb_create(parent, engine, oclass, &nv50_ram_oclass, &priv);
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
@@ -264,12 +259,11 @@
 		nv_warn(priv, "failed 0x100c08 page alloc\n");
 	}
 
-	priv->base.memtype_valid = nv50_fb_memtype_valid;
 	nv_subdev(priv)->intr = nv50_fb_intr;
 	return 0;
 }
 
-static void
+void
 nv50_fb_dtor(struct nouveau_object *object)
 {
 	struct nouveau_device *device = nv_device(object);
@@ -284,10 +278,10 @@
 	nouveau_fb_destroy(&priv->base);
 }
 
-static int
+int
 nv50_fb_init(struct nouveau_object *object)
 {
-	struct nouveau_device *device = nv_device(object);
+	struct nv50_fb_impl *impl = (void *)object->oclass;
 	struct nv50_fb_priv *priv = (void *)object;
 	int ret;
 
@@ -303,33 +297,20 @@
 
 	/* This is needed to get meaningful information from 100c90
 	 * on traps. No idea what these values mean exactly. */
-	switch (device->chipset) {
-	case 0x50:
-		nv_wr32(priv, 0x100c90, 0x000707ff);
-		break;
-	case 0xa3:
-	case 0xa5:
-	case 0xa8:
-		nv_wr32(priv, 0x100c90, 0x000d0fff);
-		break;
-	case 0xaf:
-		nv_wr32(priv, 0x100c90, 0x089d1fff);
-		break;
-	default:
-		nv_wr32(priv, 0x100c90, 0x001d07ff);
-		break;
-	}
-
+	nv_wr32(priv, 0x100c90, impl->trap);
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x50),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_fb_ctor,
 		.dtor = nv50_fb_dtor,
 		.init = nv50_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nv50_ram_oclass,
+	.trap = 0x000707ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
new file mode 100644
index 0000000..c5e5a88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
@@ -0,0 +1,33 @@
+#ifndef __NVKM_FB_NV50_H__
+#define __NVKM_FB_NV50_H__
+
+#include "priv.h"
+
+struct nv50_fb_priv {
+	struct nouveau_fb base;
+	struct page *r100c08_page;
+	dma_addr_t r100c08;
+};
+
+int  nv50_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+		  struct nouveau_oclass *, void *, u32,
+		  struct nouveau_object **);
+void nv50_fb_dtor(struct nouveau_object *);
+int  nv50_fb_init(struct nouveau_object *);
+
+struct nv50_fb_impl {
+	struct nouveau_fb_impl base;
+	u32 trap;
+};
+
+#define nv50_ram_create(p,e,o,d)                                               \
+	nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
+int  nv50_ram_create_(struct nouveau_object *, struct nouveau_object *,
+		      struct nouveau_oclass *, int, void **);
+int  nv50_ram_get(struct nouveau_fb *, u64 size, u32 align, u32 ncmin,
+		  u32 memtype, struct nouveau_mem **);
+void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
+extern int nv50_fb_memtype[0x80];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
new file mode 100644
index 0000000..cf0e767
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nv84_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x84),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nv50_ram_oclass,
+	.trap = 0x001d07ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
new file mode 100644
index 0000000..dab6e1c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nva3_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0xa3),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nva3_ram_oclass,
+	.trap = 0x000d0fff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
new file mode 100644
index 0000000..cba8e68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nvaa_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0xaa),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nvaa_ram_oclass,
+	.trap = 0x001d07ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
new file mode 100644
index 0000000..5423faa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nvaf_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0xaf),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nvaa_ram_oclass,
+	.trap = 0x089d1fff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index f35d76f..e5fc37c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -22,24 +22,18 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
-
-struct nvc0_fb_priv {
-	struct nouveau_fb base;
-	struct page *r100c10_page;
-	dma_addr_t r100c10;
-};
+#include "nvc0.h"
 
 extern const u8 nvc0_pte_storage_type_map[256];
 
-static bool
+bool
 nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 {
 	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
 	return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
 }
 
-static int
+int
 nvc0_fb_init(struct nouveau_object *object)
 {
 	struct nvc0_fb_priv *priv = (void *)object;
@@ -54,7 +48,7 @@
 	return 0;
 }
 
-static void
+void
 nvc0_fb_dtor(struct nouveau_object *object)
 {
 	struct nouveau_device *device = nv_device(object);
@@ -69,7 +63,7 @@
 	nouveau_fb_destroy(&priv->base);
 }
 
-static int
+int
 nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
@@ -78,13 +72,11 @@
 	struct nvc0_fb_priv *priv;
 	int ret;
 
-	ret = nouveau_fb_create(parent, engine, oclass, &nvc0_ram_oclass, &priv);
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.memtype_valid = nvc0_fb_memtype_valid;
-
 	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 	if (priv->r100c10_page) {
 		priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
@@ -97,14 +89,15 @@
 	return 0;
 }
 
-
-struct nouveau_oclass
-nvc0_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvc0_fb_oclass = &(struct nouveau_fb_impl) {
+	.base.handle = NV_SUBDEV(FB, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nvc0_fb_ctor,
 		.dtor = nvc0_fb_dtor,
 		.init = nvc0_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.memtype = nvc0_fb_memtype_valid,
+	.ram = &nvc0_ram_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
new file mode 100644
index 0000000..9e1931e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
@@ -0,0 +1,29 @@
+#ifndef __NVKM_RAM_NVC0_H__
+#define __NVKM_RAM_NVC0_H__
+
+#include "priv.h"
+#include "nv50.h"
+
+struct nvc0_fb_priv {
+	struct nouveau_fb base;
+	struct page *r100c10_page;
+	dma_addr_t r100c10;
+};
+
+int  nvc0_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+		  struct nouveau_oclass *, void *, u32,
+		  struct nouveau_object **);
+void nvc0_fb_dtor(struct nouveau_object *);
+int  nvc0_fb_init(struct nouveau_object *);
+bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32);
+
+
+#define nvc0_ram_create(p,e,o,d)                                               \
+	nvc0_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
+int  nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *,
+		      struct nouveau_oclass *, int, void **);
+int  nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32,
+		  struct nouveau_mem **);
+void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
new file mode 100644
index 0000000..595db50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+struct nouveau_oclass *
+nve0_fb_oclass = &(struct nouveau_fb_impl) {
+	.base.handle = NV_SUBDEV(FB, 0xe0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_fb_ctor,
+		.dtor = nvc0_fb_dtor,
+		.init = nvc0_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.memtype = nvc0_fb_memtype_valid,
+	.ram = &nve0_ram_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index db9d6dd..4931252 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -12,6 +12,8 @@
 #define nouveau_ram_fini(p,s)                                                  \
 	nouveau_object_fini(&(p)->base, (s))
 
+#define nouveau_ram_create_(p,e,o,s,d)                                         \
+	nouveau_object_create_((p), (e), (o), 0, (s), (void **)d)
 #define _nouveau_ram_dtor nouveau_object_destroy
 #define _nouveau_ram_init nouveau_object_init
 #define _nouveau_ram_fini nouveau_object_fini
@@ -26,10 +28,16 @@
 extern struct nouveau_oclass nv49_ram_oclass;
 extern struct nouveau_oclass nv4e_ram_oclass;
 extern struct nouveau_oclass nv50_ram_oclass;
+extern struct nouveau_oclass nva3_ram_oclass;
+extern struct nouveau_oclass nvaa_ram_oclass;
 extern struct nouveau_oclass nvc0_ram_oclass;
+extern struct nouveau_oclass nve0_ram_oclass;
 
-#define nouveau_fb_create(p,e,c,r,d)                                           \
-	nouveau_fb_create_((p), (e), (c), (r), sizeof(**d), (void **)d)
+int nouveau_sddr3_calc(struct nouveau_ram *ram);
+int nouveau_gddr5_calc(struct nouveau_ram *ram);
+
+#define nouveau_fb_create(p,e,c,d)                                             \
+	nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
 #define nouveau_fb_destroy(p) ({                                               \
 	struct nouveau_fb *pfb = (p);                                          \
 	_nouveau_fb_dtor(nv_object(pfb));                                      \
@@ -44,44 +52,21 @@
 })
 
 int nouveau_fb_create_(struct nouveau_object *, struct nouveau_object *,
-		       struct nouveau_oclass *, struct nouveau_oclass *,
-		       int length, void **pobject);
+		       struct nouveau_oclass *, int, void **);
 void _nouveau_fb_dtor(struct nouveau_object *);
 int  _nouveau_fb_init(struct nouveau_object *);
 int  _nouveau_fb_fini(struct nouveau_object *, bool);
 
-struct nouveau_bios;
-int  nouveau_fb_bios_memtype(struct nouveau_bios *);
+struct nouveau_fb_impl {
+	struct nouveau_oclass base;
+	struct nouveau_oclass *ram;
+	bool (*memtype)(struct nouveau_fb *, u32);
+};
 
 bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+bool nv50_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
 
-void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
-void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
-void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-int  nv30_fb_init(struct nouveau_object *);
-void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-
-void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
-		       struct nouveau_fb_tile *);
-
-int  nv41_fb_init(struct nouveau_object *);
-void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-int  nv44_fb_init(struct nouveau_object *);
-void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-
-void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
-extern int nv50_fb_memtype[0x80];
+struct nouveau_bios;
+int  nouveau_fb_bios_memtype(struct nouveau_bios *);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
new file mode 100644
index 0000000..0f57fcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -0,0 +1,118 @@
+#ifndef __NVKM_FBRAM_FUC_H__
+#define __NVKM_FBRAM_FUC_H__
+
+#include <subdev/pwr.h>
+
+struct ramfuc {
+	struct nouveau_memx *memx;
+	struct nouveau_fb *pfb;
+	int sequence;
+};
+
+struct ramfuc_reg {
+	int sequence;
+	bool force;
+	u32 addr[2];
+	u32 data;
+};
+
+static inline struct ramfuc_reg
+ramfuc_reg2(u32 addr1, u32 addr2)
+{
+	return (struct ramfuc_reg) {
+		.sequence = 0,
+		.addr = { addr1, addr2 },
+		.data = 0xdeadbeef,
+	};
+}
+
+static inline struct ramfuc_reg
+ramfuc_reg(u32 addr)
+{
+	return ramfuc_reg2(addr, addr);
+}
+
+static inline int
+ramfuc_init(struct ramfuc *ram, struct nouveau_fb *pfb)
+{
+	struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
+	int ret;
+
+	ret = nouveau_memx_init(ppwr, &ram->memx);
+	if (ret)
+		return ret;
+
+	ram->sequence++;
+	ram->pfb = pfb;
+	return 0;
+}
+
+static inline int
+ramfuc_exec(struct ramfuc *ram, bool exec)
+{
+	int ret = 0;
+	if (ram->pfb) {
+		ret = nouveau_memx_fini(&ram->memx, exec);
+		ram->pfb = NULL;
+	}
+	return ret;
+}
+
+static inline u32
+ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
+{
+	if (reg->sequence != ram->sequence)
+		reg->data = nv_rd32(ram->pfb, reg->addr[0]);
+	return reg->data;
+}
+
+static inline void
+ramfuc_wr32(struct ramfuc *ram, struct ramfuc_reg *reg, u32 data)
+{
+	reg->sequence = ram->sequence;
+	reg->data = data;
+	if (reg->addr[0] != reg->addr[1])
+		nouveau_memx_wr32(ram->memx, reg->addr[1], reg->data);
+	nouveau_memx_wr32(ram->memx, reg->addr[0], reg->data);
+}
+
+static inline void
+ramfuc_nuke(struct ramfuc *ram, struct ramfuc_reg *reg)
+{
+	reg->force = true;
+}
+
+static inline u32
+ramfuc_mask(struct ramfuc *ram, struct ramfuc_reg *reg, u32 mask, u32 data)
+{
+	u32 temp = ramfuc_rd32(ram, reg);
+	if (temp != ((temp & ~mask) | data) || reg->force) {
+		ramfuc_wr32(ram, reg, (temp & ~mask) | data);
+		reg->force = false;
+	}
+	return temp;
+}
+
+static inline void
+ramfuc_wait(struct ramfuc *ram, u32 addr, u32 mask, u32 data, u32 nsec)
+{
+	nouveau_memx_wait(ram->memx, addr, mask, data, nsec);
+}
+
+static inline void
+ramfuc_nsec(struct ramfuc *ram, u32 nsec)
+{
+	nouveau_memx_nsec(ram->memx, nsec);
+}
+
+#define ram_init(s,p)       ramfuc_init(&(s)->base, (p))
+#define ram_exec(s,e)       ramfuc_exec(&(s)->base, (e))
+#define ram_have(s,r)       ((s)->r_##r.addr != 0x000000)
+#define ram_rd32(s,r)       ramfuc_rd32(&(s)->base, &(s)->r_##r)
+#define ram_wr32(s,r,d)     ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
+#define ram_nuke(s,r)       ramfuc_nuke(&(s)->base, &(s)->r_##r)
+#define ram_mask(s,r,m,d)   ramfuc_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
+#define ram_nsec(s,n)       ramfuc_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
index ee49ac4..7648beb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
@@ -22,7 +22,154 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+
+#include "nv40.h"
+
+int
+nv40_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nv40_ram *ram = (void *)pfb->ram;
+	struct nvbios_pll pll;
+	int N1, M1, N2, M2;
+	int log2P, ret;
+
+	ret = nvbios_pll_parse(bios, 0x04, &pll);
+	if (ret) {
+		nv_error(pfb, "mclk pll data not found\n");
+		return ret;
+	}
+
+	ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
+			    &N1, &M1, &N2, &M2, &log2P);
+	if (ret < 0)
+		return ret;
+
+	ram->ctrl  = 0x80000000 | (log2P << 16);
+	ram->ctrl |= min(pll.bias_p + log2P, (int)pll.max_p) << 20;
+	if (N2 == M2) {
+		ram->ctrl |= 0x00000100;
+		ram->coef  = (N1 << 8) | M1;
+	} else {
+		ram->ctrl |= 0x40000000;
+		ram->coef  = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+	}
+
+	return 0;
+}
+
+int
+nv40_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nv40_ram *ram = (void *)pfb->ram;
+	struct bit_entry M;
+	u32 crtc_mask = 0;
+	u8  sr1[2];
+	int i;
+
+	/* determine which CRTCs are active, fetch VGA_SR1 for each */
+	for (i = 0; i < 2; i++) {
+		u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
+		u32 cnt = 0;
+		do {
+			if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
+				nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+				sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
+				if (!(sr1[i] & 0x20))
+					crtc_mask |= (1 << i);
+				break;
+			}
+			udelay(1);
+		} while (cnt++ < 32);
+	}
+
+	/* wait for vblank start on active crtcs, disable memory access */
+	for (i = 0; i < 2; i++) {
+		if (!(crtc_mask & (1 << i)))
+			continue;
+		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
+		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+	}
+
+	/* prepare ram for reclocking */
+	nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
+	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
+	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
+	nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+	nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
+
+	/* change the PLL of each memory partition */
+	nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
+	switch (nv_device(pfb)->chipset) {
+	case 0x40:
+	case 0x45:
+	case 0x41:
+	case 0x42:
+	case 0x47:
+		nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
+		nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
+		nv_wr32(pfb, 0x004048, ram->coef);
+		nv_wr32(pfb, 0x004030, ram->coef);
+	case 0x43:
+	case 0x49:
+	case 0x4b:
+		nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
+		nv_wr32(pfb, 0x00403c, ram->coef);
+	default:
+		nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
+		nv_wr32(pfb, 0x004024, ram->coef);
+		break;
+	}
+	udelay(100);
+	nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
+
+	/* re-enable normal operation of memory controller */
+	nv_wr32(pfb, 0x1002dc, 0x00000000);
+	nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
+	udelay(100);
+
+	/* execute memory reset script from vbios */
+	if (!bit_entry(bios, 'M', &M)) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(pfb),
+			.bios = bios,
+			.offset = nv_ro16(bios, M.offset + 0x00),
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+	}
+
+	/* make sure we're in vblank (hopefully the same one as before), and
+	 * then re-enable crtc memory access
+	 */
+	for (i = 0; i < 2; i++) {
+		if (!(crtc_mask & (1 << i)))
+			continue;
+		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
+	}
+
+	return 0;
+}
+
+void
+nv40_ram_tidy(struct nouveau_fb *pfb)
+{
+}
 
 static int
 nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +177,7 @@
 		struct nouveau_object **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_ram *ram;
+	struct nv40_ram *ram;
 	u32 pbus1218 = nv_rd32(pfb, 0x001218);
 	int ret;
 
@@ -40,15 +187,18 @@
 		return ret;
 
 	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: ram->type = NV_MEM_TYPE_DDR2; break;
+	case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
+	case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
 	}
 
-	ram->size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	ram->base.tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.calc = nv40_ram_calc;
+	ram->base.prog = nv40_ram_prog;
+	ram->base.tidy = nv40_ram_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
index 1dab7e1..d64498a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv40.h"
 
 static int
 nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@
 		struct nouveau_object **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_ram *ram;
+	struct nv40_ram *ram;
 	u32 pfb474 = nv_rd32(pfb, 0x100474);
 	int ret;
 
@@ -40,15 +40,18 @@
 		return ret;
 
 	if (pfb474 & 0x00000004)
-		ram->type = NV_MEM_TYPE_GDDR3;
+		ram->base.type = NV_MEM_TYPE_GDDR3;
 	if (pfb474 & 0x00000002)
-		ram->type = NV_MEM_TYPE_DDR2;
+		ram->base.type = NV_MEM_TYPE_DDR2;
 	if (pfb474 & 0x00000001)
-		ram->type = NV_MEM_TYPE_DDR1;
+		ram->base.type = NV_MEM_TYPE_DDR1;
 
-	ram->size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	ram->base.tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.calc = nv40_ram_calc;
+	ram->base.prog = nv40_ram_prog;
+	ram->base.tidy = nv40_ram_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
index 25fff84..089acac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv40.h"
 
 static int
 nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@
 		struct nouveau_object **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_ram *ram;
+	struct nv40_ram *ram;
 	u32 pfb474 = nv_rd32(pfb, 0x100474);
 	int ret;
 
@@ -40,13 +40,16 @@
 		return ret;
 
 	if (pfb474 & 0x00000004)
-		ram->type = NV_MEM_TYPE_GDDR3;
+		ram->base.type = NV_MEM_TYPE_GDDR3;
 	if (pfb474 & 0x00000002)
-		ram->type = NV_MEM_TYPE_DDR2;
+		ram->base.type = NV_MEM_TYPE_DDR2;
 	if (pfb474 & 0x00000001)
-		ram->type = NV_MEM_TYPE_DDR1;
+		ram->base.type = NV_MEM_TYPE_DDR1;
 
-	ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.calc = nv40_ram_calc;
+	ram->base.prog = nv40_ram_prog;
+	ram->base.tidy = nv40_ram_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index ab7ef0a..baa013a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv40.h"
 
 static int
 nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@
 		struct nouveau_object **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_ram *ram;
+	struct nv40_ram *ram;
 	u32 pfb914 = nv_rd32(pfb, 0x100914);
 	int ret;
 
@@ -40,15 +40,18 @@
 		return ret;
 
 	switch (pfb914 & 0x00000003) {
-	case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
-	case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
+	case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
 	case 0x00000003: break;
 	}
 
-	ram->size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	ram->base.tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.calc = nv40_ram_calc;
+	ram->base.prog = nv40_ram_prog;
+	ram->base.tidy = nv40_ram_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index 903baff..76762a1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -23,8 +23,215 @@
  */
 
 #include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/perf.h>
+#include <subdev/bios/timing.h>
+#include <subdev/clock/pll.h>
+#include <subdev/fb.h>
+
+#include <core/option.h>
 #include <core/mm.h>
-#include "priv.h"
+
+#include "ramseq.h"
+
+#include "nv50.h"
+
+struct nv50_ramseq {
+	struct hwsq base;
+	struct hwsq_reg r_0x002504;
+	struct hwsq_reg r_0x004008;
+	struct hwsq_reg r_0x00400c;
+	struct hwsq_reg r_0x00c040;
+	struct hwsq_reg r_0x100210;
+	struct hwsq_reg r_0x1002d0;
+	struct hwsq_reg r_0x1002d4;
+	struct hwsq_reg r_0x1002dc;
+	struct hwsq_reg r_0x100da0[8];
+	struct hwsq_reg r_0x100e20;
+	struct hwsq_reg r_0x100e24;
+	struct hwsq_reg r_0x611200;
+	struct hwsq_reg r_timing[9];
+	struct hwsq_reg r_mr[4];
+};
+
+struct nv50_ram {
+	struct nouveau_ram base;
+	struct nv50_ramseq hwsq;
+};
+
+#define QFX5800NVA0 1
+
+static int
+nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ramseq *hwsq = &ram->hwsq;
+	struct nvbios_perfE perfE;
+	struct nvbios_pll mpll;
+	struct bit_entry M;
+	struct {
+		u32 data;
+		u8  size;
+	} ramcfg, timing;
+	u8  ver, hdr, cnt, strap;
+	u32 data;
+	int N1, M1, N2, M2, P;
+	int ret, i;
+
+	/* lookup closest matching performance table entry for frequency */
+	i = 0;
+	do {
+		ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
+					   &ramcfg.size, &perfE);
+		if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
+		    (ramcfg.size < 2)) {
+			nv_error(pfb, "invalid/missing perftab entry\n");
+			return -EINVAL;
+		}
+	} while (perfE.memory < freq);
+
+	/* locate specific data set for the attached memory */
+	if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
+		nv_error(pfb, "invalid/missing memory table\n");
+		return -EINVAL;
+	}
+
+	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+	data = nv_ro16(bios, M.offset + 3);
+	if (data)
+		strap = nv_ro08(bios, data + strap);
+
+	if (strap >= cnt) {
+		nv_error(pfb, "invalid ramcfg strap\n");
+		return -EINVAL;
+	}
+
+	ramcfg.data += hdr + (strap * ramcfg.size);
+
+	/* lookup memory timings, if bios says they're present */
+	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	if (strap != 0xff) {
+		timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
+		if (!timing.data || ver != 0x10 || hdr < 0x12) {
+			nv_error(pfb, "invalid/missing timing entry "
+				 "%02x %04x %02x %02x\n",
+				 strap, timing.data, ver, hdr);
+			return -EINVAL;
+		}
+	} else {
+		timing.data = 0;
+	}
+
+	ret = ram_init(hwsq, nv_subdev(pfb));
+	if (ret)
+		return ret;
+
+	ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
+	ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
+	ram_wr32(hwsq, 0x611200, 0x00003300);
+	ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */
+	ram_nsec(hwsq, 8000);
+	ram_setf(hwsq, 0x10, 0x00); /* disable fb */
+	ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+
+	ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
+	ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
+	ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
+	ram_wr32(hwsq, 0x100210, 0x00000000); /* disable auto-refresh */
+	ram_wr32(hwsq, 0x1002dc, 0x00000001); /* enable self-refresh */
+
+	ret = nvbios_pll_parse(bios, 0x004008, &mpll);
+	mpll.vco2.max_freq = 0;
+	if (ret == 0) {
+		ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
+				   &N1, &M1, &N2, &M2, &P);
+		if (ret == 0)
+			ret = -EINVAL;
+	}
+
+	if (ret < 0)
+		return ret;
+
+	ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
+	ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
+	ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
+	ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
+					     (P << 22) | (P << 16));
+#if QFX5800NVA0
+	for (i = 0; i < 8; i++)
+		ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
+#endif
+	ram_nsec(hwsq, 96000); /*XXX*/
+	ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
+
+	ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
+	ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
+
+	ram_nsec(hwsq, 12000);
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR2:
+		ram_nuke(hwsq, mr[0]); /* force update */
+		ram_mask(hwsq, mr[0], 0x000, 0x000);
+		break;
+	case NV_MEM_TYPE_GDDR3:
+		ram_mask(hwsq, mr[2], 0x000, 0x000);
+		ram_nuke(hwsq, mr[0]); /* force update */
+		ram_mask(hwsq, mr[0], 0x000, 0x000);
+		break;
+	default:
+		break;
+	}
+
+	ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
+
+	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
+
+#if QFX5800NVA0
+	ram_nuke(hwsq, 0x100e24);
+	ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
+	ram_nuke(hwsq, 0x100e20);
+	ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
+#endif
+
+	ram_mask(hwsq, mr[0], 0x100, 0x100);
+	ram_mask(hwsq, mr[0], 0x100, 0x000);
+
+	ram_setf(hwsq, 0x10, 0x01); /* enable fb */
+	ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
+	ram_wr32(hwsq, 0x611200, 0x00003330);
+	ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
+	return 0;
+}
+
+static int
+nv50_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ramseq *hwsq = &ram->hwsq;
+
+	ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+	return 0;
+}
+
+static void
+nv50_ram_tidy(struct nouveau_fb *pfb)
+{
+	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ramseq *hwsq = &ram->hwsq;
+	ram_exec(hwsq, false);
+}
 
 void
 __nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
@@ -57,7 +264,7 @@
 	kfree(mem);
 }
 
-static int
+int
 nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
 	     u32 memtype, struct nouveau_mem **pmem)
 {
@@ -160,77 +367,114 @@
 	return rblock_size;
 }
 
-static int
-nv50_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
-		struct nouveau_oclass *oclass, void *data, u32 datasize,
-		struct nouveau_object **pobject)
+int
+nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, int length, void **pobject)
 {
-	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_device *device = nv_device(pfb);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nouveau_ram *ram;
 	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
 	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	u32 size;
+	struct nouveau_bios *bios = nouveau_bios(parent);
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nouveau_ram *ram;
 	int ret;
 
-	ret = nouveau_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	ret = nouveau_ram_create_(parent, engine, oclass, length, pobject);
+	ram = *pobject;
 	if (ret)
 		return ret;
 
 	ram->size = nv_rd32(pfb, 0x10020c);
-	ram->size = (ram->size & 0xffffff00) |
-		       ((ram->size & 0x000000ff) << 32);
+	ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
 
-	size = (ram->size >> 12) - rsvd_head - rsvd_tail;
-	switch (device->chipset) {
-	case 0xaa:
-	case 0xac:
-	case 0xaf: /* IGPs, no reordering, no real VRAM */
-		ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
-		if (ret)
-			return ret;
-
-		ram->type   = NV_MEM_TYPE_STOLEN;
-		ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+	switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+	case 0: ram->type = NV_MEM_TYPE_DDR1; break;
+	case 1:
+		if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+			ram->type = NV_MEM_TYPE_DDR3;
+		else
+			ram->type = NV_MEM_TYPE_DDR2;
 		break;
+	case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
+	case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
+	case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
 	default:
-		switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
-		case 0: ram->type = NV_MEM_TYPE_DDR1; break;
-		case 1:
-			if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
-				ram->type = NV_MEM_TYPE_DDR3;
-			else
-				ram->type = NV_MEM_TYPE_DDR2;
-			break;
-		case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
-		case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
-		case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
-		default:
-			break;
-		}
-
-		ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
-				      nv50_fb_vram_rblock(pfb, ram) >> 12);
-		if (ret)
-			return ret;
-
-		ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
-		ram->tags  =  nv_rd32(pfb, 0x100320);
 		break;
 	}
 
+	ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
+			      (rsvd_head + rsvd_tail),
+			      nv50_fb_vram_rblock(pfb, ram) >> 12);
+	if (ret)
+		return ret;
+
+	ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+	ram->tags  =  nv_rd32(pfb, 0x100320);
 	ram->get = nv50_ram_get;
 	ram->put = nv50_ram_put;
 	return 0;
 }
 
+static int
+nv50_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 datasize,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_ram *ram;
+	int ret, i;
+
+	ret = nv50_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR2:
+	case NV_MEM_TYPE_GDDR3:
+		ram->base.calc = nv50_ram_calc;
+		ram->base.prog = nv50_ram_prog;
+		ram->base.tidy = nv50_ram_tidy;
+		break;
+	default:
+		nv_warn(ram, "reclocking of this ram type unsupported\n");
+		return 0;
+	}
+
+	ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
+	ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
+	ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
+	ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
+	ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
+	ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
+	ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
+	ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
+	for (i = 0; i < 8; i++)
+		ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
+	ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
+	ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
+	ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
+
+	for (i = 0; i < 9; i++)
+		ram->hwsq.r_timing[i] = hwsq_reg(0x100220 + (i * 0x04));
+
+	if (ram->base.ranks > 1) {
+		ram->hwsq.r_mr[0] = hwsq_reg2(0x1002c0, 0x1002c8);
+		ram->hwsq.r_mr[1] = hwsq_reg2(0x1002c4, 0x1002cc);
+		ram->hwsq.r_mr[2] = hwsq_reg2(0x1002e0, 0x1002e8);
+		ram->hwsq.r_mr[3] = hwsq_reg2(0x1002e4, 0x1002ec);
+	} else {
+		ram->hwsq.r_mr[0] = hwsq_reg(0x1002c0);
+		ram->hwsq.r_mr[1] = hwsq_reg(0x1002c4);
+		ram->hwsq.r_mr[2] = hwsq_reg(0x1002e0);
+		ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4);
+	}
+
+	return 0;
+}
+
 struct nouveau_oclass
 nv50_ram_oclass = {
-	.handle = 0,
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv50_ram_create,
+		.ctor = nv50_ram_ctor,
 		.dtor = _nouveau_ram_dtor,
 		.init = _nouveau_ram_init,
 		.fini = _nouveau_ram_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
new file mode 100644
index 0000000..f6292cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
+
+#include <subdev/clock/nva3.h>
+#include <subdev/clock/pll.h>
+
+#include <core/option.h>
+
+#include "ramfuc.h"
+
+#include "nv50.h"
+
+struct nva3_ramfuc {
+	struct ramfuc base;
+	struct ramfuc_reg r_0x004000;
+	struct ramfuc_reg r_0x004004;
+	struct ramfuc_reg r_0x004018;
+	struct ramfuc_reg r_0x004128;
+	struct ramfuc_reg r_0x004168;
+	struct ramfuc_reg r_0x100200;
+	struct ramfuc_reg r_0x100210;
+	struct ramfuc_reg r_0x100220[9];
+	struct ramfuc_reg r_0x1002d0;
+	struct ramfuc_reg r_0x1002d4;
+	struct ramfuc_reg r_0x1002dc;
+	struct ramfuc_reg r_0x10053c;
+	struct ramfuc_reg r_0x1005a0;
+	struct ramfuc_reg r_0x1005a4;
+	struct ramfuc_reg r_0x100714;
+	struct ramfuc_reg r_0x100718;
+	struct ramfuc_reg r_0x10071c;
+	struct ramfuc_reg r_0x100760;
+	struct ramfuc_reg r_0x1007a0;
+	struct ramfuc_reg r_0x1007e0;
+	struct ramfuc_reg r_0x10f804;
+	struct ramfuc_reg r_0x1110e0;
+	struct ramfuc_reg r_0x111100;
+	struct ramfuc_reg r_0x111104;
+	struct ramfuc_reg r_0x611200;
+	struct ramfuc_reg r_mr[4];
+};
+
+struct nva3_ram {
+	struct nouveau_ram base;
+	struct nva3_ramfuc fuc;
+};
+
+static int
+nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nva3_ram *ram = (void *)pfb->ram;
+	struct nva3_ramfuc *fuc = &ram->fuc;
+	struct nva3_clock_info mclk;
+	struct bit_entry M;
+	u8  ver, cnt, strap;
+	u32 data;
+	struct {
+		u32 data;
+		u8  size;
+	} rammap, ramcfg, timing;
+	u32 r004018, r100760, ctrl;
+	u32 unk714, unk718, unk71c;
+	int ret;
+
+	/* lookup memory config data relevant to the target frequency */
+	rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
+					 &cnt, &ramcfg.size);
+	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
+		nv_error(pfb, "invalid/missing rammap entry\n");
+		return -EINVAL;
+	}
+
+	/* locate specific data set for the attached memory */
+	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+		nv_error(pfb, "invalid/missing memory table\n");
+		return -EINVAL;
+	}
+
+	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+	data = nv_ro16(bios, M.offset + 1);
+	if (data)
+		strap = nv_ro08(bios, data + strap);
+
+	if (strap >= cnt) {
+		nv_error(pfb, "invalid ramcfg strap\n");
+		return -EINVAL;
+	}
+
+	ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
+	if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
+		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		return -EINVAL;
+	}
+
+	/* lookup memory timings, if bios says they're present */
+	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	if (strap != 0xff) {
+		timing.data = nvbios_timing_entry(bios, strap, &ver,
+						 &timing.size);
+		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
+			nv_error(pfb, "invalid/missing timing entry\n");
+			return -EINVAL;
+		}
+	} else {
+		timing.data = 0;
+	}
+
+	ret = nva3_clock_info(nouveau_clock(pfb), 0x12, 0x4000, freq, &mclk);
+	if (ret < 0) {
+		nv_error(pfb, "failed mclk calculation\n");
+		return ret;
+	}
+
+	ret = ram_init(fuc, pfb);
+	if (ret)
+		return ret;
+
+	/* XXX: where the fuck does 750MHz come from? */
+	if (freq <= 750000) {
+		r004018 = 0x10000000;
+		r100760 = 0x22222222;
+	} else {
+		r004018 = 0x00000000;
+		r100760 = 0x00000000;
+	}
+
+	ctrl = ram_rd32(fuc, 0x004000);
+	if (ctrl & 0x00000008) {
+		if (mclk.pll) {
+			ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
+			ram_wr32(fuc, 0x004004, mclk.pll);
+			ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
+			ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
+			ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
+			ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
+			ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
+			ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
+		}
+	} else {
+		u32 ssel = 0x00000101;
+		if (mclk.clk)
+			ssel |= mclk.clk;
+		else
+			ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
+		ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
+	}
+
+	if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
+		ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
+	} else {
+		ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
+		ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
+	}
+
+	if (!(nv_ro08(bios, rammap.data + 0x04) & 0x02))
+		ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+	ram_wr32(fuc, 0x611200, 0x00003300);
+	if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x10))
+		ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
+
+	ram_wr32(fuc, 0x1002d4, 0x00000001);
+	ram_wr32(fuc, 0x1002d0, 0x00000001);
+	ram_wr32(fuc, 0x1002d0, 0x00000001);
+	ram_wr32(fuc, 0x100210, 0x00000000);
+	ram_wr32(fuc, 0x1002dc, 0x00000001);
+	ram_nsec(fuc, 2000);
+
+	ctrl = ram_rd32(fuc, 0x004000);
+	if (!(ctrl & 0x00000008) && mclk.pll) {
+		ram_wr32(fuc, 0x004000, (ctrl |=  0x00000008));
+		ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
+		ram_wr32(fuc, 0x004018, 0x00001000);
+		ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001));
+		ram_wr32(fuc, 0x004004, mclk.pll);
+		ram_wr32(fuc, 0x004000, (ctrl |=  0x00000001));
+		udelay(64);
+		ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
+		udelay(20);
+	} else
+	if (!mclk.pll) {
+		ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk);
+		ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+		ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
+		ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
+	}
+
+	if ( (nv_ro08(bios, rammap.data + 0x04) & 0x08)) {
+		u32 unk5a0 = (nv_ro16(bios, ramcfg.data + 0x05) << 8) |
+			      nv_ro08(bios, ramcfg.data + 0x05);
+		u32 unk5a4 = (nv_ro16(bios, ramcfg.data + 0x07));
+		u32 unk804 = (nv_ro08(bios, ramcfg.data + 0x09) & 0xf0) << 16 |
+			     (nv_ro08(bios, ramcfg.data + 0x03) & 0x0f) << 16 |
+			     (nv_ro08(bios, ramcfg.data + 0x09) & 0x0f) |
+			     0x80000000;
+		ram_wr32(fuc, 0x1005a0, unk5a0);
+		ram_wr32(fuc, 0x1005a4, unk5a4);
+		ram_wr32(fuc, 0x10f804, unk804);
+		ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
+	} else {
+		ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
+		ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
+		ram_mask(fuc, 0x100760, 0x22222222, r100760);
+		ram_mask(fuc, 0x1007a0, 0x22222222, r100760);
+		ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
+	}
+
+	if (mclk.pll) {
+		ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
+		ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008));
+	}
+
+	/*XXX: LEAVE */
+	ram_wr32(fuc, 0x1002dc, 0x00000000);
+	ram_wr32(fuc, 0x1002d4, 0x00000001);
+	ram_wr32(fuc, 0x100210, 0x80000000);
+	ram_nsec(fuc, 1000);
+	ram_nsec(fuc, 1000);
+
+	ram_mask(fuc, mr[2], 0x00000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+	ram_nuke(fuc, mr[0]);
+	ram_mask(fuc, mr[0], 0x00000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+
+	ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
+
+	data = (nv_ro08(bios, ramcfg.data + 0x02) & 0x08) ? 0x00000000 : 0x00001000;
+	ram_mask(fuc, 0x100200, 0x00001000, data);
+
+	unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
+	unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
+	unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+	if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x20))
+		unk714 |= 0xf0000000;
+	if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x04))
+		unk714 |= 0x00000010;
+	ram_wr32(fuc, 0x100714, unk714);
+
+	if (nv_ro08(bios, ramcfg.data + 0x02) & 0x01)
+		unk71c |= 0x00000100;
+	ram_wr32(fuc, 0x10071c, unk71c);
+
+	if (nv_ro08(bios, ramcfg.data + 0x02) & 0x02)
+		unk718 |= 0x00000100;
+	ram_wr32(fuc, 0x100718, unk718);
+
+	if (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)
+		ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
+
+	ram_mask(fuc, mr[0], 0x100, 0x100);
+	ram_nsec(fuc, 1000);
+	ram_mask(fuc, mr[0], 0x100, 0x000);
+	ram_nsec(fuc, 1000);
+
+	ram_nsec(fuc, 2000);
+	ram_nsec(fuc, 12000);
+
+	ram_wr32(fuc, 0x611200, 0x00003330);
+	if ( (nv_ro08(bios, rammap.data + 0x04) & 0x02))
+		ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
+	if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
+		ram_mask(fuc, 0x111104, 0x00000180, 0x00000180);
+		ram_mask(fuc, 0x111100, 0x40000000, 0x00000000);
+	} else {
+		ram_mask(fuc, 0x111104, 0x00000600, 0x00000600);
+	}
+
+	if (mclk.pll) {
+		ram_mask(fuc, 0x004168, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x004168, 0x00000100, 0x00000000);
+	} else {
+		ram_mask(fuc, 0x004000, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x004128, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x004128, 0x00000100, 0x00000000);
+	}
+
+	return 0;
+}
+
+static int
+nva3_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nva3_ram *ram = (void *)pfb->ram;
+	struct nva3_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+	return 0;
+}
+
+static void
+nva3_ram_tidy(struct nouveau_fb *pfb)
+{
+	struct nva3_ram *ram = (void *)pfb->ram;
+	struct nva3_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, false);
+}
+
+static int
+nva3_ram_init(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object->parent;
+	struct nva3_ram   *ram = (void *)object;
+	int ret, i;
+
+	ret = nouveau_ram_init(&ram->base);
+	if (ret)
+		return ret;
+
+	/* prepare for ddr link training, and load training patterns */
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR3: {
+		static const u32 pattern[16] = {
+			0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
+			0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
+			0x33333333, 0x55555555, 0x77777777, 0x66666666,
+			0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
+		};
+
+		nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
+		nv_wr32(pfb, 0x1005a8, 0x0000ffff);
+		nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+		for (i = 0; i < 0x30; i++) {
+			nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
+			nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
+			nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+			nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+		}
+	}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 datasize,
+	      struct nouveau_object **pobject)
+{
+	struct nva3_ram *ram;
+	int ret, i;
+
+	ret = nv50_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR3:
+		ram->base.calc = nva3_ram_calc;
+		ram->base.prog = nva3_ram_prog;
+		ram->base.tidy = nva3_ram_tidy;
+		break;
+	default:
+		nv_warn(ram, "reclocking of this ram type unsupported\n");
+		return 0;
+	}
+
+	ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
+	ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
+	ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
+	ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
+	ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
+	ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
+	ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
+	for (i = 0; i < 9; i++)
+		ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
+	ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
+	ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
+	ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
+	ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
+	ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
+	ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
+	ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
+	ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
+	ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
+	ram->fuc.r_0x100760 = ramfuc_reg(0x100760);
+	ram->fuc.r_0x1007a0 = ramfuc_reg(0x1007a0);
+	ram->fuc.r_0x1007e0 = ramfuc_reg(0x1007e0);
+	ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
+	ram->fuc.r_0x1110e0 = ramfuc_reg(0x1110e0);
+	ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
+	ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
+	ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
+
+	if (ram->base.ranks > 1) {
+		ram->fuc.r_mr[0] = ramfuc_reg2(0x1002c0, 0x1002c8);
+		ram->fuc.r_mr[1] = ramfuc_reg2(0x1002c4, 0x1002cc);
+		ram->fuc.r_mr[2] = ramfuc_reg2(0x1002e0, 0x1002e8);
+		ram->fuc.r_mr[3] = ramfuc_reg2(0x1002e4, 0x1002ec);
+	} else {
+		ram->fuc.r_mr[0] = ramfuc_reg(0x1002c0);
+		ram->fuc.r_mr[1] = ramfuc_reg(0x1002c4);
+		ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0);
+		ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_ram_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_ram_ctor,
+		.dtor = _nouveau_ram_dtor,
+		.init = nva3_ram_init,
+		.fini = _nouveau_ram_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
new file mode 100644
index 0000000..00f2ca7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static int
+nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 datasize,
+	      struct nouveau_object **pobject)
+{
+	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nouveau_ram *ram;
+	int ret;
+
+	ret = nouveau_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	ram->size = nv_rd32(pfb, 0x10020c);
+	ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+
+	ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
+			      (rsvd_head + rsvd_tail), 1);
+	if (ret)
+		return ret;
+
+	ram->type   = NV_MEM_TYPE_STOLEN;
+	ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+	ram->get = nv50_ram_get;
+	ram->put = nv50_ram_put;
+	return 0;
+}
+
+struct nouveau_oclass
+nvaa_ram_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvaa_ram_ctor,
+		.dtor = _nouveau_ram_dtor,
+		.init = _nouveau_ram_init,
+		.fini = _nouveau_ram_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index cf97c4d..f464547 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -23,9 +23,414 @@
  */
 
 #include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
 #include <subdev/ltcg.h>
 
-#include "priv.h"
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+
+#include <core/option.h>
+
+#include "ramfuc.h"
+
+#include "nvc0.h"
+
+struct nvc0_ramfuc {
+	struct ramfuc base;
+
+	struct ramfuc_reg r_0x10fe20;
+	struct ramfuc_reg r_0x10fe24;
+	struct ramfuc_reg r_0x137320;
+	struct ramfuc_reg r_0x137330;
+
+	struct ramfuc_reg r_0x132000;
+	struct ramfuc_reg r_0x132004;
+	struct ramfuc_reg r_0x132100;
+
+	struct ramfuc_reg r_0x137390;
+
+	struct ramfuc_reg r_0x10f290;
+	struct ramfuc_reg r_0x10f294;
+	struct ramfuc_reg r_0x10f298;
+	struct ramfuc_reg r_0x10f29c;
+	struct ramfuc_reg r_0x10f2a0;
+
+	struct ramfuc_reg r_0x10f300;
+	struct ramfuc_reg r_0x10f338;
+	struct ramfuc_reg r_0x10f340;
+	struct ramfuc_reg r_0x10f344;
+	struct ramfuc_reg r_0x10f348;
+
+	struct ramfuc_reg r_0x10f910;
+	struct ramfuc_reg r_0x10f914;
+
+	struct ramfuc_reg r_0x100b0c;
+	struct ramfuc_reg r_0x10f050;
+	struct ramfuc_reg r_0x10f090;
+	struct ramfuc_reg r_0x10f200;
+	struct ramfuc_reg r_0x10f210;
+	struct ramfuc_reg r_0x10f310;
+	struct ramfuc_reg r_0x10f314;
+	struct ramfuc_reg r_0x10f610;
+	struct ramfuc_reg r_0x10f614;
+	struct ramfuc_reg r_0x10f800;
+	struct ramfuc_reg r_0x10f808;
+	struct ramfuc_reg r_0x10f824;
+	struct ramfuc_reg r_0x10f830;
+	struct ramfuc_reg r_0x10f988;
+	struct ramfuc_reg r_0x10f98c;
+	struct ramfuc_reg r_0x10f990;
+	struct ramfuc_reg r_0x10f998;
+	struct ramfuc_reg r_0x10f9b0;
+	struct ramfuc_reg r_0x10f9b4;
+	struct ramfuc_reg r_0x10fb04;
+	struct ramfuc_reg r_0x10fb08;
+	struct ramfuc_reg r_0x137300;
+	struct ramfuc_reg r_0x137310;
+	struct ramfuc_reg r_0x137360;
+	struct ramfuc_reg r_0x1373ec;
+	struct ramfuc_reg r_0x1373f0;
+	struct ramfuc_reg r_0x1373f8;
+
+	struct ramfuc_reg r_0x61c140;
+	struct ramfuc_reg r_0x611200;
+
+	struct ramfuc_reg r_0x13d8f4;
+};
+
+struct nvc0_ram {
+	struct nouveau_ram base;
+	struct nvc0_ramfuc fuc;
+	struct nvbios_pll refpll;
+	struct nvbios_pll mempll;
+};
+
+static void
+nvc0_ram_train(struct nvc0_ramfuc *fuc, u32 magic)
+{
+	struct nvc0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+	struct nouveau_fb *pfb = nouveau_fb(ram);
+	u32 part = nv_rd32(pfb, 0x022438), i;
+	u32 mask = nv_rd32(pfb, 0x022554);
+	u32 addr = 0x110974;
+
+	ram_wr32(fuc, 0x10f910, magic);
+	ram_wr32(fuc, 0x10f914, magic);
+
+	for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
+		if (mask & (1 << i))
+			continue;
+		ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
+	}
+}
+
+static int
+nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_clock *clk = nouveau_clock(pfb);
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nvc0_ram *ram = (void *)pfb->ram;
+	struct nvc0_ramfuc *fuc = &ram->fuc;
+	struct bit_entry M;
+	u8  ver, cnt, strap;
+	u32 data;
+	struct {
+		u32 data;
+		u8  size;
+	} rammap, ramcfg, timing;
+	int ref, div, out;
+	int from, mode;
+	int N1, M1, P;
+	int ret;
+
+	/* lookup memory config data relevant to the target frequency */
+	rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
+					 &cnt, &ramcfg.size);
+	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
+		nv_error(pfb, "invalid/missing rammap entry\n");
+		return -EINVAL;
+	}
+
+	/* locate specific data set for the attached memory */
+	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+		nv_error(pfb, "invalid/missing memory table\n");
+		return -EINVAL;
+	}
+
+	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+	data = nv_ro16(bios, M.offset + 1);
+	if (data)
+		strap = nv_ro08(bios, data + strap);
+
+	if (strap >= cnt) {
+		nv_error(pfb, "invalid ramcfg strap\n");
+		return -EINVAL;
+	}
+
+	ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
+	if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
+		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		return -EINVAL;
+	}
+
+	/* lookup memory timings, if bios says they're present */
+	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	if (strap != 0xff) {
+		timing.data = nvbios_timing_entry(bios, strap, &ver,
+						 &timing.size);
+		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
+			nv_error(pfb, "invalid/missing timing entry\n");
+			return -EINVAL;
+		}
+	} else {
+		timing.data = 0;
+	}
+
+	ret = ram_init(fuc, pfb);
+	if (ret)
+		return ret;
+
+	/* determine current mclk configuration */
+	from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
+
+	/* determine target mclk configuration */
+	if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
+		ref = clk->read(clk, nv_clk_src_sppll0);
+	else
+		ref = clk->read(clk, nv_clk_src_sppll1);
+	div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
+	out = (ref * 2) / (div + 2);
+	mode = freq != out;
+
+	ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
+
+	if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
+		ram_nuke(fuc, 0x132000);
+		ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
+		ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
+	}
+
+	if (mode == 1) {
+		ram_nuke(fuc, 0x10fe20);
+		ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
+		ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
+	}
+
+// 0x00020034 // 0x0000000a
+	ram_wr32(fuc, 0x132100, 0x00000001);
+
+	if (mode == 1 && from == 0) {
+		/* calculate refpll */
+		ret = nva3_pll_calc(nv_subdev(pfb), &ram->refpll,
+				    ram->mempll.refclk, &N1, NULL, &M1, &P);
+		if (ret <= 0) {
+			nv_error(pfb, "unable to calc refpll\n");
+			return ret ? ret : -ERANGE;
+		}
+
+		ram_wr32(fuc, 0x10fe20, 0x20010000);
+		ram_wr32(fuc, 0x137320, 0x00000003);
+		ram_wr32(fuc, 0x137330, 0x81200006);
+		ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
+		ram_wr32(fuc, 0x10fe20, 0x20010001);
+		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+
+		/* calculate mempll */
+		ret = nva3_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
+				   &N1, NULL, &M1, &P);
+		if (ret <= 0) {
+			nv_error(pfb, "unable to calc refpll\n");
+			return ret ? ret : -ERANGE;
+		}
+
+		ram_wr32(fuc, 0x10fe20, 0x20010005);
+		ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
+		ram_wr32(fuc, 0x132000, 0x18010101);
+		ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
+	} else
+	if (mode == 0) {
+		ram_wr32(fuc, 0x137300, 0x00000003);
+	}
+
+	if (from == 0) {
+		ram_nuke(fuc, 0x10fb04);
+		ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
+		ram_nuke(fuc, 0x10fb08);
+		ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
+		ram_wr32(fuc, 0x10f988, 0x2004ff00);
+		ram_wr32(fuc, 0x10f98c, 0x003fc040);
+		ram_wr32(fuc, 0x10f990, 0x20012001);
+		ram_wr32(fuc, 0x10f998, 0x00011a00);
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+	} else {
+		ram_wr32(fuc, 0x10f988, 0x20010000);
+		ram_wr32(fuc, 0x10f98c, 0x00000000);
+		ram_wr32(fuc, 0x10f990, 0x20012001);
+		ram_wr32(fuc, 0x10f998, 0x00010a00);
+	}
+
+	if (from == 0) {
+// 0x00020039 // 0x000000ba
+	}
+
+// 0x0002003a // 0x00000002
+	ram_wr32(fuc, 0x100b0c, 0x00080012);
+// 0x00030014 // 0x00000000 // 0x02b5f070
+// 0x00030014 // 0x00010000 // 0x02b5f070
+	ram_wr32(fuc, 0x611200, 0x00003300);
+// 0x00020034 // 0x0000000a
+// 0x00030020 // 0x00000001 // 0x00000000
+
+	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+	ram_wr32(fuc, 0x10f210, 0x00000000);
+	ram_nsec(fuc, 1000);
+	if (mode == 0)
+		nvc0_ram_train(fuc, 0x000c1001);
+	ram_wr32(fuc, 0x10f310, 0x00000001);
+	ram_nsec(fuc, 1000);
+	ram_wr32(fuc, 0x10f090, 0x00000061);
+	ram_wr32(fuc, 0x10f090, 0xc000007f);
+	ram_nsec(fuc, 1000);
+
+	if (from == 0) {
+		ram_wr32(fuc, 0x10f824, 0x00007fd4);
+	} else {
+		ram_wr32(fuc, 0x1373ec, 0x00020404);
+	}
+
+	if (mode == 0) {
+		ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
+		ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+		ram_wr32(fuc, 0x10f830, 0x41500010);
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+		ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
+		ram_wr32(fuc, 0x10f050, 0xff000090);
+		ram_wr32(fuc, 0x1373ec, 0x00020f0f);
+		ram_wr32(fuc, 0x1373f0, 0x00000003);
+		ram_wr32(fuc, 0x137310, 0x81201616);
+		ram_wr32(fuc, 0x132100, 0x00000001);
+// 0x00020039 // 0x000000ba
+		ram_wr32(fuc, 0x10f830, 0x00300017);
+		ram_wr32(fuc, 0x1373f0, 0x00000001);
+		ram_wr32(fuc, 0x10f824, 0x00007e77);
+		ram_wr32(fuc, 0x132000, 0x18030001);
+		ram_wr32(fuc, 0x10f090, 0x4000007e);
+		ram_nsec(fuc, 2000);
+		ram_wr32(fuc, 0x10f314, 0x00000001);
+		ram_wr32(fuc, 0x10f210, 0x80000000);
+		ram_wr32(fuc, 0x10f338, 0x00300220);
+		ram_wr32(fuc, 0x10f300, 0x0000011d);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f290, 0x02060505);
+		ram_wr32(fuc, 0x10f294, 0x34208288);
+		ram_wr32(fuc, 0x10f298, 0x44050411);
+		ram_wr32(fuc, 0x10f29c, 0x0000114c);
+		ram_wr32(fuc, 0x10f2a0, 0x42e10069);
+		ram_wr32(fuc, 0x10f614, 0x40044f77);
+		ram_wr32(fuc, 0x10f610, 0x40044f77);
+		ram_wr32(fuc, 0x10f344, 0x00600009);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f348, 0x00700008);
+		ram_wr32(fuc, 0x61c140, 0x19240000);
+		ram_wr32(fuc, 0x10f830, 0x00300017);
+		nvc0_ram_train(fuc, 0x80021001);
+		nvc0_ram_train(fuc, 0x80081001);
+		ram_wr32(fuc, 0x10f340, 0x00500004);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f830, 0x01300017);
+		ram_wr32(fuc, 0x10f830, 0x00300017);
+// 0x00030020 // 0x00000000 // 0x00000000
+// 0x00020034 // 0x0000000b
+		ram_wr32(fuc, 0x100b0c, 0x00080028);
+		ram_wr32(fuc, 0x611200, 0x00003330);
+	} else {
+		ram_wr32(fuc, 0x10f800, 0x00001800);
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+		ram_wr32(fuc, 0x1373ec, 0x00020404);
+		ram_wr32(fuc, 0x1373f0, 0x00000003);
+		ram_wr32(fuc, 0x10f830, 0x40700010);
+		ram_wr32(fuc, 0x10f830, 0x40500010);
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+		ram_wr32(fuc, 0x1373f8, 0x00000000);
+		ram_wr32(fuc, 0x132100, 0x00000101);
+		ram_wr32(fuc, 0x137310, 0x89201616);
+		ram_wr32(fuc, 0x10f050, 0xff000090);
+		ram_wr32(fuc, 0x1373ec, 0x00030404);
+		ram_wr32(fuc, 0x1373f0, 0x00000002);
+	// 0x00020039 // 0x00000011
+		ram_wr32(fuc, 0x132100, 0x00000001);
+		ram_wr32(fuc, 0x1373f8, 0x00002000);
+		ram_nsec(fuc, 2000);
+		ram_wr32(fuc, 0x10f808, 0x7aaa0050);
+		ram_wr32(fuc, 0x10f830, 0x00500010);
+		ram_wr32(fuc, 0x10f200, 0x00ce1000);
+		ram_wr32(fuc, 0x10f090, 0x4000007e);
+		ram_nsec(fuc, 2000);
+		ram_wr32(fuc, 0x10f314, 0x00000001);
+		ram_wr32(fuc, 0x10f210, 0x80000000);
+		ram_wr32(fuc, 0x10f338, 0x00300200);
+		ram_wr32(fuc, 0x10f300, 0x0000084d);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f290, 0x0b343825);
+		ram_wr32(fuc, 0x10f294, 0x3483028e);
+		ram_wr32(fuc, 0x10f298, 0x440c0600);
+		ram_wr32(fuc, 0x10f29c, 0x0000214c);
+		ram_wr32(fuc, 0x10f2a0, 0x42e20069);
+		ram_wr32(fuc, 0x10f200, 0x00ce0000);
+		ram_wr32(fuc, 0x10f614, 0x60044e77);
+		ram_wr32(fuc, 0x10f610, 0x60044e77);
+		ram_wr32(fuc, 0x10f340, 0x00500000);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f344, 0x00600228);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f348, 0x00700000);
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+		ram_wr32(fuc, 0x61c140, 0x09a40000);
+
+		nvc0_ram_train(fuc, 0x800e1008);
+
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f800, 0x00001804);
+	// 0x00030020 // 0x00000000 // 0x00000000
+	// 0x00020034 // 0x0000000b
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+		ram_wr32(fuc, 0x100b0c, 0x00080028);
+		ram_wr32(fuc, 0x611200, 0x00003330);
+		ram_nsec(fuc, 100000);
+		ram_wr32(fuc, 0x10f9b0, 0x05313f41);
+		ram_wr32(fuc, 0x10f9b4, 0x00002f50);
+
+		nvc0_ram_train(fuc, 0x010c1001);
+	}
+
+	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
+// 0x00020016 // 0x00000000
+
+	if (mode == 0)
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+	return 0;
+}
+
+static int
+nvc0_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nvc0_ram *ram = (void *)pfb->ram;
+	struct nvc0_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+	return 0;
+}
+
+static void
+nvc0_ram_tidy(struct nouveau_fb *pfb)
+{
+	struct nvc0_ram *ram = (void *)pfb->ram;
+	struct nvc0_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, false);
+}
 
 extern const u8 nvc0_pte_storage_type_map[256];
 
@@ -110,10 +515,9 @@
 	return 0;
 }
 
-static int
-nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
-		struct nouveau_oclass *oclass, void *data, u32 size,
-		struct nouveau_object **pobject)
+int
+nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, int size, void **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
 	struct nouveau_bios *bios = nouveau_bios(pfb);
@@ -127,8 +531,8 @@
 	bool uniform = true;
 	int ret, part;
 
-	ret = nouveau_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	ret = nouveau_ram_create_(parent, engine, oclass, size, pobject);
+	ram = *pobject;
 	if (ret)
 		return ret;
 
@@ -182,13 +586,158 @@
 	return 0;
 }
 
+static int
+nvc0_ram_init(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object->parent;
+	struct nvc0_ram   *ram = (void *)object;
+	int ret, i;
+
+	ret = nouveau_ram_init(&ram->base);
+	if (ret)
+		return ret;
+
+	/* prepare for ddr link training, and load training patterns */
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_GDDR5: {
+		static const u8  train0[] = {
+			0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
+			0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+		};
+		static const u32 train1[] = {
+			0x00000000, 0xffffffff,
+			0x55555555, 0xaaaaaaaa,
+			0x33333333, 0xcccccccc,
+			0xf0f0f0f0, 0x0f0f0f0f,
+			0x00ff00ff, 0xff00ff00,
+			0x0000ffff, 0xffff0000,
+		};
+
+		for (i = 0; i < 0x30; i++) {
+			nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
+			nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
+			nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
+		}
+	}	break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nouveau_bios *bios = nouveau_bios(parent);
+	struct nvc0_ram *ram;
+	int ret;
+
+	ret = nvc0_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
+	if (ret) {
+		nv_error(ram, "mclk refpll data not found\n");
+		return ret;
+	}
+
+	ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
+	if (ret) {
+		nv_error(ram, "mclk pll data not found\n");
+		return ret;
+	}
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_GDDR5:
+		ram->base.calc = nvc0_ram_calc;
+		ram->base.prog = nvc0_ram_prog;
+		ram->base.tidy = nvc0_ram_tidy;
+		break;
+	default:
+		nv_warn(ram, "reclocking of this ram type unsupported\n");
+		return 0;
+	}
+
+	ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
+	ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
+	ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
+	ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
+
+	ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
+	ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
+	ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
+
+	ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
+
+	ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
+	ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
+	ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
+	ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
+	ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
+
+	ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
+	ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
+	ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
+	ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
+	ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
+
+	ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
+	ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
+
+	ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
+	ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
+	ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
+	ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
+	ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
+	ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
+	ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
+	ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
+	ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
+	ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
+	ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
+	ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
+	ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
+	ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
+	ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
+	ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
+	ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
+	ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
+	ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
+	ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
+	ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
+	ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
+	ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
+	ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
+	ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
+	ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
+	ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
+
+	ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
+	ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
+
+	ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
+	return 0;
+}
+
 struct nouveau_oclass
 nvc0_ram_oclass = {
 	.handle = 0,
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_ram_create,
+		.ctor = nvc0_ram_ctor,
 		.dtor = _nouveau_ram_dtor,
-		.init = _nouveau_ram_init,
+		.init = nvc0_ram_init,
 		.fini = _nouveau_ram_fini,
 	}
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
new file mode 100644
index 0000000..bc86cfd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -0,0 +1,1264 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
+
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+
+#include <subdev/timer.h>
+
+#include <core/option.h>
+
+#include "nvc0.h"
+
+#include "ramfuc.h"
+
+struct nve0_ramfuc {
+	struct ramfuc base;
+
+	struct nvbios_pll refpll;
+	struct nvbios_pll mempll;
+
+	struct ramfuc_reg r_gpioMV;
+	u32 r_funcMV[2];
+	struct ramfuc_reg r_gpio2E;
+	u32 r_func2E[2];
+	struct ramfuc_reg r_gpiotrig;
+
+	struct ramfuc_reg r_0x132020;
+	struct ramfuc_reg r_0x132028;
+	struct ramfuc_reg r_0x132024;
+	struct ramfuc_reg r_0x132030;
+	struct ramfuc_reg r_0x132034;
+	struct ramfuc_reg r_0x132000;
+	struct ramfuc_reg r_0x132004;
+	struct ramfuc_reg r_0x132040;
+
+	struct ramfuc_reg r_0x10f248;
+	struct ramfuc_reg r_0x10f290;
+	struct ramfuc_reg r_0x10f294;
+	struct ramfuc_reg r_0x10f298;
+	struct ramfuc_reg r_0x10f29c;
+	struct ramfuc_reg r_0x10f2a0;
+	struct ramfuc_reg r_0x10f2a4;
+	struct ramfuc_reg r_0x10f2a8;
+	struct ramfuc_reg r_0x10f2ac;
+	struct ramfuc_reg r_0x10f2cc;
+	struct ramfuc_reg r_0x10f2e8;
+	struct ramfuc_reg r_0x10f250;
+	struct ramfuc_reg r_0x10f24c;
+	struct ramfuc_reg r_0x10fec4;
+	struct ramfuc_reg r_0x10fec8;
+	struct ramfuc_reg r_0x10f604;
+	struct ramfuc_reg r_0x10f614;
+	struct ramfuc_reg r_0x10f610;
+	struct ramfuc_reg r_0x100770;
+	struct ramfuc_reg r_0x100778;
+	struct ramfuc_reg r_0x10f224;
+
+	struct ramfuc_reg r_0x10f870;
+	struct ramfuc_reg r_0x10f698;
+	struct ramfuc_reg r_0x10f694;
+	struct ramfuc_reg r_0x10f6b8;
+	struct ramfuc_reg r_0x10f808;
+	struct ramfuc_reg r_0x10f670;
+	struct ramfuc_reg r_0x10f60c;
+	struct ramfuc_reg r_0x10f830;
+	struct ramfuc_reg r_0x1373ec;
+	struct ramfuc_reg r_0x10f800;
+	struct ramfuc_reg r_0x10f82c;
+
+	struct ramfuc_reg r_0x10f978;
+	struct ramfuc_reg r_0x10f910;
+	struct ramfuc_reg r_0x10f914;
+
+	struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
+
+	struct ramfuc_reg r_0x62c000;
+	struct ramfuc_reg r_0x10f200;
+	struct ramfuc_reg r_0x10f210;
+	struct ramfuc_reg r_0x10f310;
+	struct ramfuc_reg r_0x10f314;
+	struct ramfuc_reg r_0x10f318;
+	struct ramfuc_reg r_0x10f090;
+	struct ramfuc_reg r_0x10f69c;
+	struct ramfuc_reg r_0x10f824;
+	struct ramfuc_reg r_0x1373f0;
+	struct ramfuc_reg r_0x1373f4;
+	struct ramfuc_reg r_0x137320;
+	struct ramfuc_reg r_0x10f65c;
+	struct ramfuc_reg r_0x10f6bc;
+	struct ramfuc_reg r_0x100710;
+	struct ramfuc_reg r_0x10f750;
+};
+
+struct nve0_ram {
+	struct nouveau_ram base;
+	struct nve0_ramfuc fuc;
+	int from;
+	int mode;
+	int N1, fN1, M1, P1;
+	int N2, M2, P2;
+};
+
+/*******************************************************************************
+ * GDDR5
+ ******************************************************************************/
+static void
+train(struct nve0_ramfuc *fuc, u32 magic)
+{
+	struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+	struct nouveau_fb *pfb = nouveau_fb(ram);
+	const int mc = nv_rd32(pfb, 0x02243c);
+	int i;
+
+	ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
+	ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
+	for (i = 0; i < mc; i++) {
+		const u32 addr = 0x110974 + (i * 0x1000);
+		ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
+	}
+}
+
+static void
+r1373f4_init(struct nve0_ramfuc *fuc)
+{
+	struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+	const u32 mcoef = ((--ram->P2 << 28) | (ram->N2 << 8) | ram->M2);
+	const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
+	const u32 runk0 = ram->fN1 << 16;
+	const u32 runk1 = ram->fN1;
+
+	if (ram->from == 2) {
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
+	} else {
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
+	}
+
+	ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
+	ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
+
+	/* (re)program refpll, if required */
+	if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
+	    (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
+		ram_wr32(fuc, 0x137320, 0x00000000);
+		ram_mask(fuc, 0x132030, 0xffff0000, runk0);
+		ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
+		ram_wr32(fuc, 0x132024, rcoef);
+		ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
+		ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
+		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+		ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
+	}
+
+	/* (re)program mempll, if required */
+	if (ram->mode == 2) {
+		ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
+		ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
+	} else {
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010100);
+	}
+
+	ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
+}
+
+static void
+r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
+{
+	struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+	struct nouveau_bios *bios = nouveau_bios(ram);
+	u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
+	u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
+	u32 tmp;
+
+	tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
+	ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16));
+	ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000);
+	if (ram->mode == 2) {
+		ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000002);
+		ram_mask(fuc, 0x1373f4, 0x00001100, 0x000000000);
+	} else {
+		ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000001);
+		ram_mask(fuc, 0x1373f4, 0x00010000, 0x000000000);
+	}
+	ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
+}
+
+static int
+nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	const u32 rammap = ram->base.rammap.data;
+	const u32 ramcfg = ram->base.ramcfg.data;
+	const u32 timing = ram->base.timing.data;
+	int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
+	int mv = 1; /*XXX*/
+	u32 mask, data;
+
+	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
+	ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+	/* MR1: turn termination on early, for some reason.. */
+	if ((ram->base.mr[1] & 0x03c) != 0x030)
+		ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
+
+	if (vc == 1 && ram_have(fuc, gpio2E)) {
+		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
+		if (temp != ram_rd32(fuc, gpio2E)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 20000);
+		}
+	}
+
+	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+
+	ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
+	ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
+
+	ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
+	ram_nsec(fuc, 1000);
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_nsec(fuc, 1000);
+
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+	ram_wr32(fuc, 0x10f090, 0x00000061);
+	ram_wr32(fuc, 0x10f090, 0xc000007f);
+	ram_nsec(fuc, 1000);
+
+	ram_wr32(fuc, 0x10f698, 0x00000000);
+	ram_wr32(fuc, 0x10f69c, 0x00000000);
+
+	/*XXX: there does appear to be some kind of condition here, simply
+	 *     modifying these bits in the vbios from the default pl0
+	 *     entries shows no change.  however, the data does appear to
+	 *     be correct and may be required for the transition back
+	 */
+	mask = 0x800f07e0;
+	data = 0x00030000;
+	if (ram_rd32(fuc, 0x10f978) & 0x00800000)
+		data |= 0x00040000;
+
+	if (1) {
+		data |= 0x800807e0;
+		switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
+		case 0xc0: data &= ~0x00000040; break;
+		case 0x80: data &= ~0x00000100; break;
+		case 0x40: data &= ~0x80000000; break;
+		case 0x00: data &= ~0x00000400; break;
+		}
+
+		switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
+		case 0x30: data &= ~0x00000020; break;
+		case 0x20: data &= ~0x00000080; break;
+		case 0x10: data &= ~0x00080000; break;
+		case 0x00: data &= ~0x00000200; break;
+		}
+	}
+
+	if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+		mask |= 0x03000000;
+	if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+		mask |= 0x00002000;
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+		mask |= 0x00004000;
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+		mask |= 0x00000003;
+	else {
+		mask |= 0x34000000;
+		if (ram_rd32(fuc, 0x10f978) & 0x00800000)
+			mask |= 0x40000000;
+	}
+	ram_mask(fuc, 0x10f824, mask, data);
+
+	ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
+
+	if (ram->from == 2 && ram->mode != 2) {
+		ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
+		ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+		ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
+		ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+		r1373f4_init(fuc);
+		ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
+		r1373f4_fini(fuc, ramcfg);
+		ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
+	} else
+	if (ram->from != 2 && ram->mode != 2) {
+		r1373f4_init(fuc);
+		r1373f4_fini(fuc, ramcfg);
+	}
+
+	if (ram_have(fuc, gpioMV)) {
+		u32 temp  = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
+		if (temp != ram_rd32(fuc, gpioMV)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 64000);
+		}
+	}
+
+	if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
+	     (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+		ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
+		ram_nsec(fuc, 20000);
+	}
+
+	if (ram->from != 2 && ram->mode == 2) {
+		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
+		ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
+		ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
+		r1373f4_init(fuc);
+		r1373f4_fini(fuc, ramcfg);
+		ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
+		ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
+	} else
+	if (ram->from == 2 && ram->mode == 2) {
+		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
+		r1373f4_init(fuc);
+		r1373f4_fini(fuc, ramcfg);
+	}
+
+	if (ram->mode != 2) /*XXX*/ {
+		if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+			ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
+	}
+
+	data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
+	ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
+	ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+	ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+
+	data = nv_ro08(bios, ramcfg + 0x04);
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+		ram_wr32(fuc, 0x10f698, 0x01010101 * data);
+		ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
+	}
+
+	if (ram->mode != 2) {
+		u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
+		ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
+	}
+
+	if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
+		data = 0x00000080;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f60c, 0x00000080, data);
+
+	mask = 0x00070000;
+	data = 0x00000000;
+	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+		data |= 0x03000000;
+	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+		data |= 0x00002000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+		data |= 0x00004000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+		data |= 0x00000003;
+	else
+		data |= 0x74000000;
+	ram_mask(fuc, 0x10f824, mask, data);
+
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
+		data = 0x00000000;
+	else
+		data = 0x00001000;
+	ram_mask(fuc, 0x10f200, 0x00001000, data);
+
+	if (ram_rd32(fuc, 0x10f670) & 0x80000000) {
+		ram_nsec(fuc, 10000);
+		ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
+	}
+
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+		data = 0x00100000;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f82c, 0x00100000, data);
+
+	data = 0x00000000;
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
+		data |= 0x00002000;
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
+		data |= 0x00001000;
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
+		data |= 0x00004000;
+	ram_mask(fuc, 0x10f830, 0x00007000, data);
+
+	/* PFB timing */
+	ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
+	ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
+	ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
+	ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
+	ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
+	ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
+	ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
+	ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
+	ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
+	ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
+	ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+
+	data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
+		data |= 0x70000000;
+	ram_mask(fuc, 0x10f604, 0x70000300, data);
+
+	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+		data |= 0x00000100;
+	ram_mask(fuc, 0x10f614, 0x70000000, data);
+
+	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
+		data |= 0x00000100;
+	ram_mask(fuc, 0x10f610, 0x70000000, data);
+
+	mask = 0x33f00000;
+	data = 0x00000000;
+	if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+		data |= 0x20200000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+		data |= 0x12800000;
+	/*XXX: see note above about there probably being some condition
+	 *     for the 10f824 stuff that uses ramcfg 3...
+	 */
+	if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
+		if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
+			if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+				mask |= 0x00000020;
+			else
+				data |= 0x00000020;
+			mask |= 0x00000004;
+		}
+	} else {
+		mask |= 0x40000020;
+		data |= 0x00000004;
+	}
+
+	ram_mask(fuc, 0x10f808, mask, data);
+
+	data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
+	ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+
+	data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
+		data |= 0x00000004;
+	if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
+		ram_wr32(fuc, 0x10f750, 0x04000009);
+		ram_wr32(fuc, 0x100710, 0x00000000);
+		ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
+	}
+	ram_mask(fuc, 0x100770, 0x00000007, data);
+
+	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+		data |= 0x80000000;
+	ram_mask(fuc, 0x100778, 0x00000700, data);
+
+	data = nv_ro16(bios, timing + 0x2c);
+	ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) <<  4);
+	ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
+
+	data = nv_ro08(bios, timing + 0x30);
+	ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+
+	data = nv_ro16(bios, timing + 0x31);
+	ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
+					    (data & 0x0780) << 10 |
+					    (data & 0x0078) <<  5 |
+					    (data & 0x0007));
+	ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
+					    (data & 0x7000) >> 12);
+
+	ram_wr32(fuc, 0x10f090, 0x4000007e);
+	ram_nsec(fuc, 1000);
+	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_nsec(fuc, 2000);
+	ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
+
+	if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
+		u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
+		train(fuc, 0xa4010000); /*XXX*/
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f294, temp);
+	}
+
+	ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]);
+	ram_wr32(fuc, mr[0], ram->base.mr[0]);
+	ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
+	ram_nsec(fuc, 1000);
+	ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
+	ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
+	ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
+	ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
+
+	if (vc == 0 && ram_have(fuc, gpio2E)) {
+		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
+		if (temp != ram_rd32(fuc, gpio2E)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 20000);
+		}
+	}
+
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+	ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+
+	data  = ram_rd32(fuc, 0x10f978);
+	data &= ~0x00046144;
+	data |=  0x0000000b;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+		if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
+			data |= 0x0000200c;
+		else
+			data |= 0x00000000;
+	} else {
+		data |= 0x00040044;
+	}
+	ram_wr32(fuc, 0x10f978, data);
+
+	if (ram->mode == 1) {
+		data = ram_rd32(fuc, 0x10f830) | 0x00000001;
+		ram_wr32(fuc, 0x10f830, data);
+	}
+
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+		data = 0x88020000;
+		if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
+			data |= 0x10000000;
+		if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
+			data |= 0x00080000;
+	} else {
+		data = 0xa40e0000;
+	}
+	train(fuc, data);
+	ram_nsec(fuc, 1000);
+
+	if (ram->mode == 2) { /*XXX*/
+		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
+	}
+
+	/* MR5: (re)enable LP3 if necessary
+	 * XXX: need to find the switch, keeping off for now
+	 */
+	ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
+
+	if (ram->mode != 2) {
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+	}
+
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
+		ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
+		ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
+	}
+
+	ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+	if (nv_ro08(bios, rammap + 0x08) & 0x01)
+		data = 0x00000800;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f200, 0x00000800, data);
+	return 0;
+}
+
+/*******************************************************************************
+ * DDR3
+ ******************************************************************************/
+
+static int
+nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
+	const u32 runk0 = ram->fN1 << 16;
+	const u32 runk1 = ram->fN1;
+	const u32 rammap = ram->base.rammap.data;
+	const u32 ramcfg = ram->base.ramcfg.data;
+	const u32 timing = ram->base.timing.data;
+	int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
+	int mv = 1; /*XXX*/
+	u32 mask, data;
+
+	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
+	ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+	if (vc == 1 && ram_have(fuc, gpio2E)) {
+		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
+		if (temp != ram_rd32(fuc, gpio2E)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 20000);
+		}
+	}
+
+	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+	if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
+		ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
+
+	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+	ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+
+	ram_wr32(fuc, 0x10f090, 0x00000060);
+	ram_wr32(fuc, 0x10f090, 0xc000007e);
+
+	/*XXX: there does appear to be some kind of condition here, simply
+	 *     modifying these bits in the vbios from the default pl0
+	 *     entries shows no change.  however, the data does appear to
+	 *     be correct and may be required for the transition back
+	 */
+	mask = 0x00010000;
+	data = 0x00010000;
+
+	if (1) {
+		mask |= 0x800807e0;
+		data |= 0x800807e0;
+		switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
+		case 0xc0: data &= ~0x00000040; break;
+		case 0x80: data &= ~0x00000100; break;
+		case 0x40: data &= ~0x80000000; break;
+		case 0x00: data &= ~0x00000400; break;
+		}
+
+		switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
+		case 0x30: data &= ~0x00000020; break;
+		case 0x20: data &= ~0x00000080; break;
+		case 0x10: data &= ~0x00080000; break;
+		case 0x00: data &= ~0x00000200; break;
+		}
+	}
+
+	if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+		mask |= 0x03000000;
+	if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+		mask |= 0x00002000;
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+		mask |= 0x00004000;
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+		mask |= 0x00000003;
+	else
+		mask |= 0x14000000;
+	ram_mask(fuc, 0x10f824, mask, data);
+
+	ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
+
+	ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
+	data  = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
+	data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
+	ram_wr32(fuc, 0x1373ec, data);
+	ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
+	ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
+
+	/* (re)program refpll, if required */
+	if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
+	    (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
+		ram_wr32(fuc, 0x137320, 0x00000000);
+		ram_mask(fuc, 0x132030, 0xffff0000, runk0);
+		ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
+		ram_wr32(fuc, 0x132024, rcoef);
+		ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
+		ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
+		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+		ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
+	}
+
+	ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000010);
+	ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001);
+	ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+
+	if (ram_have(fuc, gpioMV)) {
+		u32 temp  = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
+		if (temp != ram_rd32(fuc, gpioMV)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 64000);
+		}
+	}
+
+	if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
+	     (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+		ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
+		ram_nsec(fuc, 20000);
+	}
+
+	if (ram->mode != 2) /*XXX*/ {
+		if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+			ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
+	}
+
+	data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
+	ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
+	ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+	ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+
+	mask = 0x00010000;
+	data = 0x00000000;
+	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+		data |= 0x03000000;
+	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+		data |= 0x00002000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+		data |= 0x00004000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+		data |= 0x00000003;
+	else
+		data |= 0x14000000;
+	ram_mask(fuc, 0x10f824, mask, data);
+	ram_nsec(fuc, 1000);
+
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+		data = 0x00100000;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f82c, 0x00100000, data);
+
+	/* PFB timing */
+	ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
+	ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
+	ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
+	ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
+	ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
+	ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
+	ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
+	ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
+	ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
+	ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
+	ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+
+	mask = 0x33f00000;
+	data = 0x00000000;
+	if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+		data |= 0x20200000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+		data |= 0x12800000;
+	/*XXX: see note above about there probably being some condition
+	 *     for the 10f824 stuff that uses ramcfg 3...
+	 */
+	if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
+		if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
+			if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+				mask |= 0x00000020;
+			else
+				data |= 0x00000020;
+			mask |= 0x08000004;
+		}
+		data |= 0x04000000;
+	} else {
+		mask |= 0x44000020;
+		data |= 0x08000004;
+	}
+
+	ram_mask(fuc, 0x10f808, mask, data);
+
+	data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
+	ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+
+	data = nv_ro16(bios, timing + 0x2c);
+	ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) <<  4);
+
+	if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >>  6) >
+	    ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
+		data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >>  6;
+	else
+		data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
+	ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
+
+	data = nv_ro08(bios, timing + 0x30);
+	ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+
+	ram_wr32(fuc, 0x10f090, 0x4000007f);
+	ram_nsec(fuc, 1000);
+
+	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
+	ram_nsec(fuc, 1000);
+
+	ram_nuke(fuc, mr[0]);
+	ram_mask(fuc, mr[0], 0x100, 0x100);
+	ram_mask(fuc, mr[0], 0x100, 0x000);
+
+	ram_mask(fuc, mr[2], 0xfff, ram->base.mr[2]);
+	ram_wr32(fuc, mr[0], ram->base.mr[0]);
+	ram_nsec(fuc, 1000);
+
+	ram_nuke(fuc, mr[0]);
+	ram_mask(fuc, mr[0], 0x100, 0x100);
+	ram_mask(fuc, mr[0], 0x100, 0x000);
+
+	if (vc == 0 && ram_have(fuc, gpio2E)) {
+		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
+		if (temp != ram_rd32(fuc, gpio2E)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 20000);
+		}
+	}
+
+	if (ram->mode != 2) {
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+	}
+
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+	ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+
+	ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+	if (nv_ro08(bios, rammap + 0x08) & 0x01)
+		data = 0x00000800;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f200, 0x00000800, data);
+	return 0;
+}
+
+/*******************************************************************************
+ * main hooks
+ ******************************************************************************/
+
+static int
+nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	struct bit_entry M;
+	int ret, refclk, strap, i;
+	u32 data;
+	u8  cnt;
+
+	/* lookup memory config data relevant to the target frequency */
+	ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
+						   &ram->base.rammap.version,
+						   &ram->base.rammap.size, &cnt,
+						   &ram->base.ramcfg.size);
+	if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
+	     ram->base.rammap.size < 0x09) {
+		nv_error(pfb, "invalid/missing rammap entry\n");
+		return -EINVAL;
+	}
+
+	/* locate specific data set for the attached memory */
+	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+		nv_error(pfb, "invalid/missing memory table\n");
+		return -EINVAL;
+	}
+
+	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+	data = nv_ro16(bios, M.offset + 1);
+	if (data)
+		strap = nv_ro08(bios, data + strap);
+
+	if (strap >= cnt) {
+		nv_error(pfb, "invalid ramcfg strap\n");
+		return -EINVAL;
+	}
+
+	ram->base.ramcfg.version = ram->base.rammap.version;
+	ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
+			       (ram->base.ramcfg.size * strap);
+	if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
+	     ram->base.ramcfg.size < 0x08) {
+		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		return -EINVAL;
+	}
+
+	/* lookup memory timings, if bios says they're present */
+	strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
+	if (strap != 0xff) {
+		ram->base.timing.data =
+			nvbios_timing_entry(bios, strap,
+					   &ram->base.timing.version,
+					   &ram->base.timing.size);
+		if (!ram->base.timing.data ||
+		     ram->base.timing.version != 0x20 ||
+		     ram->base.timing.size < 0x33) {
+			nv_error(pfb, "invalid/missing timing entry\n");
+			return -EINVAL;
+		}
+	} else {
+		ram->base.timing.data = 0;
+	}
+
+	ret = ram_init(fuc, pfb);
+	if (ret)
+		return ret;
+
+	ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
+	ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
+
+	/* XXX: this is *not* what nvidia do.  on fermi nvidia generally
+	 * select, based on some unknown condition, one of the two possible
+	 * reference frequencies listed in the vbios table for mempll and
+	 * program refpll to that frequency.
+	 *
+	 * so far, i've seen very weird values being chosen by nvidia on
+	 * kepler boards, no idea how/why they're chosen.
+	 */
+	refclk = freq;
+	if (ram->mode == 2)
+		refclk = fuc->mempll.refclk;
+
+	/* calculate refpll coefficients */
+	ret = nva3_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
+			   &ram->fN1, &ram->M1, &ram->P1);
+	fuc->mempll.refclk = ret;
+	if (ret <= 0) {
+		nv_error(pfb, "unable to calc refpll\n");
+		return -EINVAL;
+	}
+
+	/* calculate mempll coefficients, if we're using it */
+	if (ram->mode == 2) {
+		/* post-divider doesn't work... the reg takes the values but
+		 * appears to completely ignore it.  there *is* a bit at
+		 * bit 28 that appears to divide the clock by 2 if set.
+		 */
+		fuc->mempll.min_p = 1;
+		fuc->mempll.max_p = 2;
+
+		ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
+				   &ram->N2, NULL, &ram->M2, &ram->P2);
+		if (ret <= 0) {
+			nv_error(pfb, "unable to calc mempll\n");
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(fuc->r_mr); i++) {
+		if (ram_have(fuc, mr[i]))
+			ram->base.mr[i] = ram_rd32(fuc, mr[i]);
+	}
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR3:
+		ret = nouveau_sddr3_calc(&ram->base);
+		if (ret == 0)
+			ret = nve0_ram_calc_sddr3(pfb, freq);
+		break;
+	case NV_MEM_TYPE_GDDR5:
+		ret = nouveau_gddr5_calc(&ram->base);
+		if (ret == 0)
+			ret = nve0_ram_calc_gddr5(pfb, freq);
+		break;
+	default:
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+nve0_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+	return 0;
+}
+
+static void
+nve0_ram_tidy(struct nouveau_fb *pfb)
+{
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, false);
+}
+
+static int
+nve0_ram_init(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object->parent;
+	struct nve0_ram *ram   = (void *)object;
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	static const u8  train0[] = {
+		0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+		0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+	};
+	static const u32 train1[] = {
+		0x00000000, 0xffffffff,
+		0x55555555, 0xaaaaaaaa,
+		0x33333333, 0xcccccccc,
+		0xf0f0f0f0, 0x0f0f0f0f,
+		0x00ff00ff, 0xff00ff00,
+		0x0000ffff, 0xffff0000,
+	};
+	u8  ver, hdr, cnt, len, snr, ssz;
+	u32 data, save;
+	int ret, i;
+
+	ret = nouveau_ram_init(&ram->base);
+	if (ret)
+		return ret;
+
+	/* run a bunch of tables from rammap table.  there's actually
+	 * individual pointers for each rammap entry too, but, nvidia
+	 * seem to just run the last two entries' scripts early on in
+	 * their init, and never again.. we'll just run 'em all once
+	 * for now.
+	 *
+	 * i strongly suspect that each script is for a separate mode
+	 * (likely selected by 0x10f65c's lower bits?), and the
+	 * binary driver skips the one that's already been setup by
+	 * the init tables.
+	 */
+	data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+	if (!data || hdr < 0x15)
+		return -EINVAL;
+
+	cnt  = nv_ro08(bios, data + 0x14); /* guess at count */
+	data = nv_ro32(bios, data + 0x10); /* guess u32... */
+	save = nv_rd32(pfb, 0x10f65c);
+	for (i = 0; i < cnt; i++) {
+		nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
+		nvbios_exec(&(struct nvbios_init) {
+				.subdev = nv_subdev(pfb),
+				.bios = bios,
+				.offset = nv_ro32(bios, data), /* guess u32 */
+				.execute = 1,
+			    });
+		data += 4;
+	}
+	nv_wr32(pfb, 0x10f65c, save);
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_GDDR5:
+		for (i = 0; i < 0x30; i++) {
+			nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
+			nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
+
+			nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
+			nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
+		}
+
+		for (i = 0; i < 0x100; i++) {
+			nv_wr32(pfb, 0x10f968, i);
+			nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
+		}
+
+		for (i = 0; i < 0x100; i++) {
+			nv_wr32(pfb, 0x10f96c, i);
+			nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nouveau_gpio *gpio = nouveau_gpio(pfb);
+	struct dcb_gpio_func func;
+	struct nve0_ram *ram;
+	int ret;
+
+	ret = nvc0_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR3:
+	case NV_MEM_TYPE_GDDR5:
+		ram->base.calc = nve0_ram_calc;
+		ram->base.prog = nve0_ram_prog;
+		ram->base.tidy = nve0_ram_tidy;
+		break;
+	default:
+		nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
+		break;
+	}
+
+	// parse bios data for both pll's
+	ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
+	if (ret) {
+		nv_error(pfb, "mclk refpll data not found\n");
+		return ret;
+	}
+
+	ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
+	if (ret) {
+		nv_error(pfb, "mclk pll data not found\n");
+		return ret;
+	}
+
+	ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
+	if (ret == 0) {
+		ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
+		ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
+		ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
+	}
+
+	ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	if (ret == 0) {
+		ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
+		ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
+		ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12;
+	}
+
+	ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
+
+	ram->fuc.r_0x132020 = ramfuc_reg(0x132020);
+	ram->fuc.r_0x132028 = ramfuc_reg(0x132028);
+	ram->fuc.r_0x132024 = ramfuc_reg(0x132024);
+	ram->fuc.r_0x132030 = ramfuc_reg(0x132030);
+	ram->fuc.r_0x132034 = ramfuc_reg(0x132034);
+	ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
+	ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
+	ram->fuc.r_0x132040 = ramfuc_reg(0x132040);
+
+	ram->fuc.r_0x10f248 = ramfuc_reg(0x10f248);
+	ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
+	ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
+	ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
+	ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
+	ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
+	ram->fuc.r_0x10f2a4 = ramfuc_reg(0x10f2a4);
+	ram->fuc.r_0x10f2a8 = ramfuc_reg(0x10f2a8);
+	ram->fuc.r_0x10f2ac = ramfuc_reg(0x10f2ac);
+	ram->fuc.r_0x10f2cc = ramfuc_reg(0x10f2cc);
+	ram->fuc.r_0x10f2e8 = ramfuc_reg(0x10f2e8);
+	ram->fuc.r_0x10f250 = ramfuc_reg(0x10f250);
+	ram->fuc.r_0x10f24c = ramfuc_reg(0x10f24c);
+	ram->fuc.r_0x10fec4 = ramfuc_reg(0x10fec4);
+	ram->fuc.r_0x10fec8 = ramfuc_reg(0x10fec8);
+	ram->fuc.r_0x10f604 = ramfuc_reg(0x10f604);
+	ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
+	ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
+	ram->fuc.r_0x100770 = ramfuc_reg(0x100770);
+	ram->fuc.r_0x100778 = ramfuc_reg(0x100778);
+	ram->fuc.r_0x10f224 = ramfuc_reg(0x10f224);
+
+	ram->fuc.r_0x10f870 = ramfuc_reg(0x10f870);
+	ram->fuc.r_0x10f698 = ramfuc_reg(0x10f698);
+	ram->fuc.r_0x10f694 = ramfuc_reg(0x10f694);
+	ram->fuc.r_0x10f6b8 = ramfuc_reg(0x10f6b8);
+	ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
+	ram->fuc.r_0x10f670 = ramfuc_reg(0x10f670);
+	ram->fuc.r_0x10f60c = ramfuc_reg(0x10f60c);
+	ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
+	ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
+	ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
+	ram->fuc.r_0x10f82c = ramfuc_reg(0x10f82c);
+
+	ram->fuc.r_0x10f978 = ramfuc_reg(0x10f978);
+	ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
+	ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_GDDR5:
+		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
+		ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
+		ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
+		ram->fuc.r_mr[3] = ramfuc_reg(0x10f338);
+		ram->fuc.r_mr[4] = ramfuc_reg(0x10f33c);
+		ram->fuc.r_mr[5] = ramfuc_reg(0x10f340);
+		ram->fuc.r_mr[6] = ramfuc_reg(0x10f344);
+		ram->fuc.r_mr[7] = ramfuc_reg(0x10f348);
+		ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
+		ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
+		break;
+	case NV_MEM_TYPE_DDR3:
+		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
+		ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
+		break;
+	default:
+		break;
+	}
+
+	ram->fuc.r_0x62c000 = ramfuc_reg(0x62c000);
+	ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
+	ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
+	ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
+	ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
+	ram->fuc.r_0x10f318 = ramfuc_reg(0x10f318);
+	ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
+	ram->fuc.r_0x10f69c = ramfuc_reg(0x10f69c);
+	ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
+	ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
+	ram->fuc.r_0x1373f4 = ramfuc_reg(0x1373f4);
+	ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
+	ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
+	ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
+	ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
+	ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_ram_oclass = {
+	.handle = 0,
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_ram_ctor,
+		.dtor = _nouveau_ram_dtor,
+		.init = nve0_ram_init,
+		.fini = _nouveau_ram_fini,
+	}
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
new file mode 100644
index 0000000..571077e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_FBRAM_SEQ_H__
+#define __NVKM_FBRAM_SEQ_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+
+#define ram_init(s,p)       hwsq_init(&(s)->base, (p))
+#define ram_exec(s,e)       hwsq_exec(&(s)->base, (e))
+#define ram_have(s,r)       ((s)->r_##r.addr != 0x000000)
+#define ram_rd32(s,r)       hwsq_rd32(&(s)->base, &(s)->r_##r)
+#define ram_wr32(s,r,d)     hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
+#define ram_nuke(s,r)       hwsq_nuke(&(s)->base, &(s)->r_##r)
+#define ram_mask(s,r,m,d)   hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define ram_setf(s,f,d)     hwsq_setf(&(s)->base, (f), (d))
+#define ram_wait(s,f,d)     hwsq_wait(&(s)->base, (f), (d))
+#define ram_nsec(s,n)       hwsq_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
new file mode 100644
index 0000000..ebd4cd9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+struct ramxlat {
+	int id;
+	u8 enc;
+};
+
+static inline int
+ramxlat(const struct ramxlat *xlat, int id)
+{
+	while (xlat->id >= 0) {
+		if (xlat->id == id)
+			return xlat->enc;
+		xlat++;
+	}
+	return -EINVAL;
+}
+
+static const struct ramxlat
+ramddr3_cl[] = {
+	{ 5, 2 }, { 6, 4 }, { 7, 6 }, { 8, 8 }, { 9, 10 }, { 10, 12 },
+	{ 11, 14 },
+	/* the below are mentioned in some, but not all, ddr3 docs */
+	{ 12, 1 }, { 13, 3 }, { 14, 5 },
+	{ -1 }
+};
+
+static const struct ramxlat
+ramddr3_wr[] = {
+	{ 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
+	/* the below are mentioned in some, but not all, ddr3 docs */
+	{ 14, 7 }, { 16, 0 },
+	{ -1 }
+};
+
+static const struct ramxlat
+ramddr3_cwl[] = {
+	{ 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
+	/* the below are mentioned in some, but not all, ddr3 docs */
+	{ 9, 4 },
+	{ -1 }
+};
+
+int
+nouveau_sddr3_calc(struct nouveau_ram *ram)
+{
+	struct nouveau_bios *bios = nouveau_bios(ram);
+	int WL, CL, WR;
+
+	switch (!!ram->timing.data * ram->timing.version) {
+	case 0x20:
+		WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
+		CL =  nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
+		WR =  nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	WL = ramxlat(ramddr3_cwl, WL);
+	CL = ramxlat(ramddr3_cl, CL);
+	WR = ramxlat(ramddr3_wr, WR);
+	if (WL < 0 || CL < 0 || WR < 0)
+		return -EINVAL;
+
+	ram->mr[0] &= ~0xe74;
+	ram->mr[0] |= (WR & 0x07) << 9;
+	ram->mr[0] |= (CL & 0x0e) << 3;
+	ram->mr[0] |= (CL & 0x01) << 2;
+
+	ram->mr[2] &= ~0x038;
+	ram->mr[2] |= (WL & 0x07) << 3;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index d422acc..f572c28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -67,7 +67,7 @@
 		}
 	}
 
-	return -EINVAL;
+	return -ENOENT;
 }
 
 static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2895c19..041fd5e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -195,7 +195,7 @@
 
 static int
 nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
-		     struct i2c_board_info *info,
+		     struct nouveau_i2c_board_info *info,
 		     bool (*match)(struct nouveau_i2c_port *,
 				   struct i2c_board_info *))
 {
@@ -208,12 +208,29 @@
 	}
 
 	nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
-	for (i = 0; info[i].addr; i++) {
-		if (nv_probe_i2c(port, info[i].addr) &&
-		    (!match || match(port, &info[i]))) {
-			nv_info(i2c, "detected %s: %s\n", what, info[i].type);
+	for (i = 0; info[i].dev.addr; i++) {
+		u8 orig_udelay = 0;
+
+		if ((port->adapter.algo == &i2c_bit_algo) &&
+		    (info[i].udelay != 0)) {
+			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
+			nv_debug(i2c, "using custom udelay %d instead of %d\n",
+			         info[i].udelay, algo->udelay);
+			orig_udelay = algo->udelay;
+			algo->udelay = info[i].udelay;
+		}
+
+		if (nv_probe_i2c(port, info[i].dev.addr) &&
+		    (!match || match(port, &info[i].dev))) {
+			nv_info(i2c, "detected %s: %s\n", what,
+				info[i].dev.type);
 			return i;
 		}
+
+		if (orig_udelay) {
+			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
+			algo->udelay = orig_udelay;
+		}
 	}
 
 	nv_debug(i2c, "no devices found.\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index e290cfa..b4b9943 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -25,38 +25,48 @@
 #include <subdev/mc.h>
 #include <core/option.h>
 
+static inline u32
+nouveau_mc_intr_mask(struct nouveau_mc *pmc)
+{
+	u32 intr = nv_rd32(pmc, 0x000100);
+	if (intr == 0xffffffff) /* likely fallen off the bus */
+		intr = 0x00000000;
+	return intr;
+}
+
 static irqreturn_t
 nouveau_mc_intr(int irq, void *arg)
 {
 	struct nouveau_mc *pmc = arg;
-	const struct nouveau_mc_intr *map = pmc->intr_map;
-	struct nouveau_device *device = nv_device(pmc);
+	const struct nouveau_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
+	const struct nouveau_mc_intr *map = oclass->intr;
 	struct nouveau_subdev *unit;
-	u32 stat, intr;
+	u32 intr;
 
-	intr = stat = nv_rd32(pmc, 0x000100);
-	if (intr == 0xffffffff)
-		return IRQ_NONE;
-	while (stat && map->stat) {
-		if (stat & map->stat) {
-			unit = nouveau_subdev(pmc, map->unit);
-			if (unit && unit->intr)
-				unit->intr(unit);
-			intr &= ~map->stat;
-		}
-		map++;
-	}
-
+	nv_wr32(pmc, 0x000140, 0x00000000);
+	nv_rd32(pmc, 0x000140);
+	intr = nouveau_mc_intr_mask(pmc);
 	if (pmc->use_msi)
-		nv_wr08(pmc->base.base.parent, 0x00088068, 0xff);
+		oclass->msi_rearm(pmc);
 
 	if (intr) {
-		nv_error(pmc, "unknown intr 0x%08x\n", stat);
+		u32 stat = intr = nouveau_mc_intr_mask(pmc);
+		while (map->stat) {
+			if (intr & map->stat) {
+				unit = nouveau_subdev(pmc, map->unit);
+				if (unit && unit->intr)
+					unit->intr(unit);
+				stat &= ~map->stat;
+			}
+			map++;
+		}
+
+		if (stat)
+			nv_error(pmc, "unknown intr 0x%08x\n", stat);
 	}
 
-	if (stat == IRQ_HANDLED)
-		pm_runtime_mark_last_busy(&device->pdev->dev);
-	return stat ? IRQ_HANDLED : IRQ_NONE;
+	nv_wr32(pmc, 0x000140, 0x00000001);
+	return intr ? IRQ_HANDLED : IRQ_NONE;
 }
 
 int
@@ -91,37 +101,42 @@
 
 int
 nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
-		   struct nouveau_oclass *oclass,
-		   const struct nouveau_mc_intr *intr_map,
-		   int length, void **pobject)
+		   struct nouveau_oclass *bclass, int length, void **pobject)
 {
+	const struct nouveau_mc_oclass *oclass = (void *)bclass;
 	struct nouveau_device *device = nv_device(parent);
 	struct nouveau_mc *pmc;
 	int ret;
 
-	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC",
+	ret = nouveau_subdev_create_(parent, engine, bclass, 0, "PMC",
 				     "master", length, pobject);
 	pmc = *pobject;
 	if (ret)
 		return ret;
 
-	pmc->intr_map = intr_map;
-
 	switch (device->pdev->device & 0x0ff0) {
-	case 0x00f0: /* BR02? */
-	case 0x02e0: /* BR02? */
-		pmc->use_msi = false;
+	case 0x00f0:
+	case 0x02e0:
+		/* BR02? NFI how these would be handled yet exactly */
 		break;
 	default:
-		pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
-		if (pmc->use_msi) {
-			pmc->use_msi = pci_enable_msi(device->pdev) == 0;
-			if (pmc->use_msi) {
-				nv_info(pmc, "MSI interrupts enabled\n");
-				nv_wr08(device, 0x00088068, 0xff);
-			}
+		switch (device->chipset) {
+		case 0xaa: break; /* reported broken, nv also disable it */
+		default:
+			pmc->use_msi = true;
+			break;
 		}
-		break;
+	}
+
+	pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", pmc->use_msi);
+	if (pmc->use_msi && oclass->msi_rearm) {
+		pmc->use_msi = pci_enable_msi(device->pdev) == 0;
+		if (pmc->use_msi) {
+			nv_info(pmc, "MSI interrupts enabled\n");
+			oclass->msi_rearm(pmc);
+		}
+	} else {
+		pmc->use_msi = false;
 	}
 
 	ret = request_irq(device->pdev->irq, nouveau_mc_intr,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 64aa4ed..2d787e4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -22,17 +22,14 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
-
-struct nv04_mc_priv {
-	struct nouveau_mc base;
-};
+#include "nv04.h"
 
 const struct nouveau_mc_intr
 nv04_mc_intr[] = {
 	{ 0x00000001, NVDEV_ENGINE_MPEG },	/* NV17- MPEG/ME */
 	{ 0x00000100, NVDEV_ENGINE_FIFO },
 	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00010000, NVDEV_ENGINE_DISP },
 	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV40- */
 	{ 0x00100000, NVDEV_SUBDEV_TIMER },
 	{ 0x01000000, NVDEV_ENGINE_DISP },	/* NV04- PCRTC0 */
@@ -42,22 +39,6 @@
 	{}
 };
 
-static int
-nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv04_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 int
 nv04_mc_init(struct nouveau_object *object)
 {
@@ -69,13 +50,30 @@
 	return nouveau_mc_init(&priv->base);
 }
 
-struct nouveau_oclass
-nv04_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0x04),
-	.ofuncs = &(struct nouveau_ofuncs) {
+int
+nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv04_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass *
+nv04_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x04),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv04_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nv04_mc_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
new file mode 100644
index 0000000..b0d5c31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_MC_NV04_H__
+#define __NVKM_MC_NV04_H__
+
+#include <subdev/mc.h>
+
+struct nv04_mc_priv {
+	struct nouveau_mc base;
+};
+
+int  nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
+		  struct nouveau_oclass *, void *, u32,
+		  struct nouveau_object **);
+
+extern const struct nouveau_mc_intr nv04_mc_intr[];
+int  nv04_mc_init(struct nouveau_object *);
+void nv40_mc_msi_rearm(struct nouveau_mc *);
+int  nv50_mc_init(struct nouveau_object *);
+extern const struct nouveau_mc_intr nv50_mc_intr[];
+extern const struct nouveau_mc_intr nvc0_mc_intr[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
new file mode 100644
index 0000000..5b1faec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+void
+nv40_mc_msi_rearm(struct nouveau_mc *pmc)
+{
+	struct nv04_mc_priv *priv = (void *)pmc;
+	nv_wr08(priv, 0x088068, 0xff);
+}
+
+struct nouveau_oclass *
+nv40_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x40),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv04_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+	.intr = nv04_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index d989178..3bfee5c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -22,32 +22,12 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
-
-struct nv44_mc_priv {
-	struct nouveau_mc base;
-};
-
-static int
-nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv44_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
+#include "nv04.h"
 
 static int
 nv44_mc_init(struct nouveau_object *object)
 {
-	struct nv44_mc_priv *priv = (void *)object;
+	struct nv04_mc_priv *priv = (void *)object;
 	u32 tmp = nv_rd32(priv, 0x10020c);
 
 	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
@@ -60,13 +40,15 @@
 	return nouveau_mc_init(&priv->base);
 }
 
-struct nouveau_oclass
-nv44_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0x44),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv44_mc_ctor,
+struct nouveau_oclass *
+nv44_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x44),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv44_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nv04_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 2b1afe2..e8822a93 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -22,13 +22,9 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
+#include "nv04.h"
 
-struct nv50_mc_priv {
-	struct nouveau_mc base;
-};
-
-static const struct nouveau_mc_intr
+const struct nouveau_mc_intr
 nv50_mc_intr[] = {
 	{ 0x00000001, NVDEV_ENGINE_MPEG },
 	{ 0x00000100, NVDEV_ENGINE_FIFO },
@@ -45,37 +41,30 @@
 	{},
 };
 
-static int
-nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
+static void
+nv50_mc_msi_rearm(struct nouveau_mc *pmc)
 {
-	struct nv50_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nouveau_device *device = nv_device(pmc);
+	pci_write_config_byte(device->pdev, 0x68, 0xff);
 }
 
 int
 nv50_mc_init(struct nouveau_object *object)
 {
-	struct nv50_mc_priv *priv = (void *)object;
+	struct nv04_mc_priv *priv = (void *)object;
 	nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
 	return nouveau_mc_init(&priv->base);
 }
 
-struct nouveau_oclass
-nv50_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv50_mc_ctor,
+struct nouveau_oclass *
+nv50_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv50_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nv50_mc_intr,
+	.msi_rearm = nv50_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
new file mode 100644
index 0000000..5f45411
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+struct nouveau_oclass *
+nv94_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x94),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+	.intr = nv50_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 0671041..f8a6f18 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -22,11 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
-
-struct nv98_mc_priv {
-	struct nouveau_mc base;
-};
+#include "nv04.h"
 
 static const struct nouveau_mc_intr
 nv98_mc_intr[] = {
@@ -36,6 +32,7 @@
 	{ 0x00004000, NVDEV_ENGINE_CRYPT },	/* NV84:NVA3 */
 	{ 0x00008000, NVDEV_ENGINE_BSP },
 	{ 0x00020000, NVDEV_ENGINE_VP },
+	{ 0x00040000, NVDEV_SUBDEV_PWR },	/* NVA3:NVC0 */
 	{ 0x00080000, NVDEV_SUBDEV_THERM },	/* NVA3:NVC0 */
 	{ 0x00100000, NVDEV_SUBDEV_TIMER },
 	{ 0x00200000, NVDEV_SUBDEV_GPIO },
@@ -47,29 +44,15 @@
 	{},
 };
 
-static int
-nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv98_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nouveau_oclass
-nv98_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0x98),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv98_mc_ctor,
+struct nouveau_oclass *
+nv98_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x98),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv50_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nv98_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 104175c..c02b476 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -22,13 +22,9 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
+#include "nv04.h"
 
-struct nvc0_mc_priv {
-	struct nouveau_mc base;
-};
-
-static const struct nouveau_mc_intr
+const struct nouveau_mc_intr
 nvc0_mc_intr[] = {
 	{ 0x00000001, NVDEV_ENGINE_PPP },
 	{ 0x00000020, NVDEV_ENGINE_COPY0 },
@@ -41,6 +37,7 @@
 	{ 0x00020000, NVDEV_ENGINE_VP },
 	{ 0x00100000, NVDEV_SUBDEV_TIMER },
 	{ 0x00200000, NVDEV_SUBDEV_GPIO },
+	{ 0x01000000, NVDEV_SUBDEV_PWR },
 	{ 0x02000000, NVDEV_SUBDEV_LTCG },
 	{ 0x04000000, NVDEV_ENGINE_DISP },
 	{ 0x10000000, NVDEV_SUBDEV_BUS },
@@ -49,29 +46,22 @@
 	{},
 };
 
-static int
-nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
+static void
+nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
 {
-	struct nvc0_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nv04_mc_priv *priv = (void *)pmc;
+	nv_wr32(priv, 0x088704, 0x00000000);
 }
 
-struct nouveau_oclass
-nvc0_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_mc_ctor,
+struct nouveau_oclass *
+nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv50_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nvc0_mc_intr,
+	.msi_rearm = nvc0_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
new file mode 100644
index 0000000..837e545
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+struct nouveau_oclass *
+nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0xc3),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+	.intr = nvc0_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
new file mode 100644
index 0000000..d4fd3bc9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+#include <subdev/timer.h>
+
+static int
+nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
+		 u32 process, u32 message, u32 data0, u32 data1)
+{
+	struct nouveau_subdev *subdev = nv_subdev(ppwr);
+	u32 addr;
+
+	/* wait for a free slot in the fifo */
+	addr  = nv_rd32(ppwr, 0x10a4a0);
+	if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
+		return -EBUSY;
+
+	/* we currently only support a single process at a time waiting
+	 * on a synchronous reply, take the PPWR mutex and tell the
+	 * receive handler what we're waiting for
+	 */
+	if (reply) {
+		mutex_lock(&subdev->mutex);
+		ppwr->recv.message = message;
+		ppwr->recv.process = process;
+	}
+
+	/* acquire data segment access */
+	do {
+		nv_wr32(ppwr, 0x10a580, 0x00000001);
+	} while (nv_rd32(ppwr, 0x10a580) != 0x00000001);
+
+	/* write the packet */
+	nv_wr32(ppwr, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+				ppwr->send.base));
+	nv_wr32(ppwr, 0x10a1c4, process);
+	nv_wr32(ppwr, 0x10a1c4, message);
+	nv_wr32(ppwr, 0x10a1c4, data0);
+	nv_wr32(ppwr, 0x10a1c4, data1);
+	nv_wr32(ppwr, 0x10a4a0, (addr + 1) & 0x0f);
+
+	/* release data segment access */
+	nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+	/* wait for reply, if requested */
+	if (reply) {
+		wait_event(ppwr->recv.wait, (ppwr->recv.process == 0));
+		reply[0] = ppwr->recv.data[0];
+		reply[1] = ppwr->recv.data[1];
+		mutex_unlock(&subdev->mutex);
+	}
+
+	return 0;
+}
+
+static void
+nouveau_pwr_recv(struct work_struct *work)
+{
+	struct nouveau_pwr *ppwr =
+		container_of(work, struct nouveau_pwr, recv.work);
+	u32 process, message, data0, data1;
+
+	/* nothing to do if GET == PUT */
+	u32 addr =  nv_rd32(ppwr, 0x10a4cc);
+	if (addr == nv_rd32(ppwr, 0x10a4c8))
+		return;
+
+	/* acquire data segment access */
+	do {
+		nv_wr32(ppwr, 0x10a580, 0x00000002);
+	} while (nv_rd32(ppwr, 0x10a580) != 0x00000002);
+
+	/* read the packet */
+	nv_wr32(ppwr, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+				ppwr->recv.base));
+	process = nv_rd32(ppwr, 0x10a1c4);
+	message = nv_rd32(ppwr, 0x10a1c4);
+	data0   = nv_rd32(ppwr, 0x10a1c4);
+	data1   = nv_rd32(ppwr, 0x10a1c4);
+	nv_wr32(ppwr, 0x10a4cc, (addr + 1) & 0x0f);
+
+	/* release data segment access */
+	nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+	/* wake process if it's waiting on a synchronous reply */
+	if (ppwr->recv.process) {
+		if (process == ppwr->recv.process &&
+		    message == ppwr->recv.message) {
+			ppwr->recv.data[0] = data0;
+			ppwr->recv.data[1] = data1;
+			ppwr->recv.process = 0;
+			wake_up(&ppwr->recv.wait);
+			return;
+		}
+	}
+
+	/* right now there's no other expected responses from the engine,
+	 * so assume that any unexpected message is an error.
+	 */
+	nv_warn(ppwr, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		(char)((process & 0x000000ff) >>  0),
+		(char)((process & 0x0000ff00) >>  8),
+		(char)((process & 0x00ff0000) >> 16),
+		(char)((process & 0xff000000) >> 24),
+		process, message, data0, data1);
+}
+
+static void
+nouveau_pwr_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_pwr *ppwr = (void *)subdev;
+	u32 disp = nv_rd32(ppwr, 0x10a01c);
+	u32 intr = nv_rd32(ppwr, 0x10a008) & disp & ~(disp >> 16);
+
+	if (intr & 0x00000020) {
+		u32 stat = nv_rd32(ppwr, 0x10a16c);
+		if (stat & 0x80000000) {
+			nv_error(ppwr, "UAS fault at 0x%06x addr 0x%08x\n",
+				 stat & 0x00ffffff, nv_rd32(ppwr, 0x10a168));
+			nv_wr32(ppwr, 0x10a16c, 0x00000000);
+			intr &= ~0x00000020;
+		}
+	}
+
+	if (intr & 0x00000040) {
+		schedule_work(&ppwr->recv.work);
+		nv_wr32(ppwr, 0x10a004, 0x00000040);
+		intr &= ~0x00000040;
+	}
+
+	if (intr & 0x00000080) {
+		nv_info(ppwr, "wr32 0x%06x 0x%08x\n", nv_rd32(ppwr, 0x10a7a0),
+						      nv_rd32(ppwr, 0x10a7a4));
+		nv_wr32(ppwr, 0x10a004, 0x00000080);
+		intr &= ~0x00000080;
+	}
+
+	if (intr) {
+		nv_error(ppwr, "intr 0x%08x\n", intr);
+		nv_wr32(ppwr, 0x10a004, intr);
+	}
+}
+
+int
+_nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_pwr *ppwr = (void *)object;
+
+	nv_wr32(ppwr, 0x10a014, 0x00000060);
+	flush_work(&ppwr->recv.work);
+
+	return nouveau_subdev_fini(&ppwr->base, suspend);
+}
+
+int
+_nouveau_pwr_init(struct nouveau_object *object)
+{
+	struct nouveau_pwr *ppwr = (void *)object;
+	int ret, i;
+
+	ret = nouveau_subdev_init(&ppwr->base);
+	if (ret)
+		return ret;
+
+	nv_subdev(ppwr)->intr = nouveau_pwr_intr;
+	ppwr->message = nouveau_pwr_send;
+
+	/* prevent previous ucode from running, wait for idle, reset */
+	nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+	nv_wait(ppwr, 0x10a04c, 0xffffffff, 0x00000000);
+	nv_mask(ppwr, 0x000200, 0x00002000, 0x00000000);
+	nv_mask(ppwr, 0x000200, 0x00002000, 0x00002000);
+
+	/* upload data segment */
+	nv_wr32(ppwr, 0x10a1c0, 0x01000000);
+	for (i = 0; i < ppwr->data.size / 4; i++)
+		nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]);
+
+	/* upload code segment */
+	nv_wr32(ppwr, 0x10a180, 0x01000000);
+	for (i = 0; i < ppwr->code.size / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(ppwr, 0x10a188, i >> 6);
+		nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]);
+	}
+
+	/* start it running */
+	nv_wr32(ppwr, 0x10a10c, 0x00000000);
+	nv_wr32(ppwr, 0x10a104, 0x00000000);
+	nv_wr32(ppwr, 0x10a100, 0x00000002);
+
+	/* wait for valid host->pwr ring configuration */
+	if (!nv_wait_ne(ppwr, 0x10a4d0, 0xffffffff, 0x00000000))
+		return -EBUSY;
+	ppwr->send.base = nv_rd32(ppwr, 0x10a4d0) & 0x0000ffff;
+	ppwr->send.size = nv_rd32(ppwr, 0x10a4d0) >> 16;
+
+	/* wait for valid pwr->host ring configuration */
+	if (!nv_wait_ne(ppwr, 0x10a4dc, 0xffffffff, 0x00000000))
+		return -EBUSY;
+	ppwr->recv.base = nv_rd32(ppwr, 0x10a4dc) & 0x0000ffff;
+	ppwr->recv.size = nv_rd32(ppwr, 0x10a4dc) >> 16;
+
+	nv_wr32(ppwr, 0x10a010, 0x000000e0);
+	return 0;
+}
+
+int
+nouveau_pwr_create_(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_pwr *ppwr;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PPWR",
+				     "pwr", length, pobject);
+	ppwr = *pobject;
+	if (ret)
+		return ret;
+
+	INIT_WORK(&ppwr->recv.work, nouveau_pwr_recv);
+	init_waitqueue_head(&ppwr->recv.wait);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
new file mode 100644
index 0000000..2284ecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_HOST, #host_init, #host_recv)
+#endif
+
+/******************************************************************************
+ * HOST data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+// HOST (R)FIFO packet format
+.equ #fifo_process 0x00
+.equ #fifo_message 0x04
+.equ #fifo_data0   0x08
+.equ #fifo_data1   0x0c
+
+// HOST HOST->PWR queue description
+.equ #fifo_qlen 4 // log2(size of queue entry in bytes)
+.equ #fifo_qnum 3 // log2(max number of entries in queue)
+.equ #fifo_qmaskb (1 << #fifo_qnum) // max number of entries in queue
+.equ #fifo_qmaskp (#fifo_qmaskb - 1)
+.equ #fifo_qmaskf ((#fifo_qmaskb << 1) - 1)
+.equ #fifo_qsize  (1 << (#fifo_qlen + #fifo_qnum))
+fifo_queue: .skip 128 // #fifo_qsize
+
+// HOST PWR->HOST queue description
+.equ #rfifo_qlen 4 // log2(size of queue entry in bytes)
+.equ #rfifo_qnum 3 // log2(max number of entries in queue)
+.equ #rfifo_qmaskb (1 << #rfifo_qnum) // max number of entries in queue
+.equ #rfifo_qmaskp (#rfifo_qmaskb - 1)
+.equ #rfifo_qmaskf ((#rfifo_qmaskb << 1) - 1)
+.equ #rfifo_qsize  (1 << (#rfifo_qlen + #rfifo_qnum))
+rfifo_queue: .skip 128 // #rfifo_qsize
+#endif
+
+/******************************************************************************
+ * HOST code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// HOST->PWR comms - dequeue message(s) for process(es) from FIFO
+//
+// $r15 - current (host)
+// $r0  - zero
+host_send:
+	nv_iord($r1, NV_PPWR_FIFO_GET(0))
+	nv_iord($r2, NV_PPWR_FIFO_PUT(0))
+	cmp b32 $r1 $r2
+	bra e #host_send_done
+		// calculate address of message
+		and $r14 $r1 #fifo_qmaskp
+		shl b32 $r14 $r14 #fifo_qlen
+		add b32 $r14 #fifo_queue
+
+		// read message data, and pass to appropriate process
+		ld b32 $r11 D[$r14 + #fifo_data1]
+		ld b32 $r12 D[$r14 + #fifo_data0]
+		ld b32 $r13 D[$r14 + #fifo_message]
+		ld b32 $r14 D[$r14 + #fifo_process]
+		call(send)
+
+		// increment GET
+		add b32 $r1 0x1
+		and $r14 $r1 #fifo_qmaskf
+		nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
+		bra #host_send
+	host_send_done:
+	ret
+
+// PWR->HOST comms - enqueue message for HOST to RFIFO
+//
+// $r15 - current (host)
+// $r14 - process
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0  - zero
+host_recv:
+	// message from intr handler == HOST->PWR comms pending
+	mov $r1 (PROC_KERN & 0x0000ffff)
+	sethi $r1 (PROC_KERN & 0xffff0000)
+	cmp b32 $r14 $r1
+	bra e #host_send
+
+	// wait for space in RFIFO
+	host_recv_wait:
+	nv_iord($r1, NV_PPWR_RFIFO_GET)
+	nv_iord($r2, NV_PPWR_RFIFO_PUT)
+	xor $r1 #rfifo_qmaskb
+	cmp b32 $r1 $r2
+	bra e #host_recv_wait
+
+	and $r3 $r2 #rfifo_qmaskp
+	shl b32 $r3 #rfifo_qlen
+	add b32 $r3 #rfifo_queue
+
+	// enqueue message
+	st b32 D[$r3 + #fifo_data1] $r11
+	st b32 D[$r3 + #fifo_data0] $r12
+	st b32 D[$r3 + #fifo_message] $r13
+	st b32 D[$r3 + #fifo_process] $r14
+
+	add b32 $r2 0x1
+	and $r2 #rfifo_qmaskf
+	nv_iowr(NV_PPWR_RFIFO_PUT, $r2)
+
+	// notify host of pending message
+	mov $r2 NV_PPWR_INTR_TRIGGER_USER0
+	nv_iowr(NV_PPWR_INTR_TRIGGER, $r2)
+	ret
+
+// $r15 - current (host)
+// $r0  - zero
+host_init:
+	// store each fifo's base/size in H2D/D2H scratch regs
+	mov $r1 #fifo_qsize
+	shl b32 $r1 16
+	or $r1 #fifo_queue
+	nv_iowr(NV_PPWR_H2D, $r1);
+
+	mov $r1 #rfifo_qsize
+	shl b32 $r1 16
+	or $r1 #rfifo_queue
+	nv_iowr(NV_PPWR_D2H, $r1);
+
+	// enable fifo subintr for first fifo
+	mov $r1 1
+	nv_iowr(NV_PPWR_FIFO_INTR_EN, $r1)
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
new file mode 100644
index 0000000..98f1c37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_IDLE, #idle, #idle_recv)
+#endif
+
+/******************************************************************************
+ * IDLE data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * IDLE code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (idle)
+// $r14 - message
+// $r0  - zero
+idle_recv:
+	ret
+
+// description
+//
+// $r15 - current (idle)
+// $r0  - zero
+idle:
+	// set our "no interrupt has occurred during our execution" flag
+	bset $flags $p0
+
+	// count IDLE invocations for debugging purposes
+	nv_iord($r1, NV_PPWR_DSCRATCH(1))
+	add b32 $r1 1
+	nv_iowr(NV_PPWR_DSCRATCH(1), $r1)
+
+	// keep looping while there's pending messages for any process
+	idle_loop:
+	mov $r1 #proc_list_head
+	bclr $flags $p2
+	idle_proc:
+		// process the process' messages until there's none left
+		idle_proc_exec:
+			push $r1
+			mov b32 $r14 $r1
+			call(recv)
+			pop $r1
+			bra not $p1 #idle_proc_next
+			bset $flags $p2
+			bra #idle_proc_exec
+		// next process!
+		idle_proc_next:
+		add b32 $r1 #proc_size
+		cmp b32 $r1 $r15
+		bra ne #idle_proc
+	bra $p2 #idle_loop
+
+	// sleep if no interrupts have occurred
+	sleep $p0
+	bra #idle
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
new file mode 100644
index 0000000..0a7b05f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/******************************************************************************
+ * kernel data segment
+ *****************************************************************************/
+#ifdef INCLUDE_PROC
+proc_kern:
+process(PROC_KERN, 0, 0)
+proc_list_head:
+#endif
+
+#ifdef INCLUDE_DATA
+proc_list_tail:
+time_prev: .b32 0
+time_next: .b32 0
+#endif
+
+/******************************************************************************
+ * kernel code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+	bra #init
+
+// read nv register
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - data (return)
+// $r0  - zero
+rd32:
+	nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
+	mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
+	sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+	nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+	rd32_wait:
+		nv_iord($r14, NV_PPWR_MMIO_CTRL)
+		and $r14 NV_PPWR_MMIO_CTRL_STATUS
+		bra nz #rd32_wait
+	nv_iord($r13, NV_PPWR_MMIO_DATA)
+	ret
+
+// write nv register
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - data
+// $r0  - zero
+wr32:
+	nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
+	nv_iowr(NV_PPWR_MMIO_DATA, $r13)
+	mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
+	or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
+	sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+
+#ifdef NVKM_FALCON_MMIO_TRAP
+	mov $r8 NV_PPWR_INTR_TRIGGER_USER1
+	nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
+	wr32_host:
+		nv_iord($r8, NV_PPWR_INTR)
+		and $r8 NV_PPWR_INTR_USER1
+		bra nz #wr32_host
+#endif
+
+	nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+	wr32_wait:
+		nv_iord($r14, NV_PPWR_MMIO_CTRL)
+		and $r14 NV_PPWR_MMIO_CTRL_STATUS
+		bra nz #wr32_wait
+	ret
+
+// busy-wait for a period of time
+//
+// $r15 - current
+// $r14 - ns
+// $r0  - zero
+nsec:
+	nv_iord($r8, NV_PPWR_TIMER_LOW)
+	nsec_loop:
+		nv_iord($r9, NV_PPWR_TIMER_LOW)
+		sub b32 $r9 $r8
+		cmp b32 $r9 $r14
+		bra l #nsec_loop
+	ret
+
+// busy-wait for a period of time
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - mask
+// $r12 - data
+// $r11 - timeout (ns)
+// $r0  - zero
+wait:
+	nv_iord($r8, NV_PPWR_TIMER_LOW)
+	wait_loop:
+		nv_rd32($r10, $r14)
+		and $r10 $r13
+		cmp b32 $r10 $r12
+		bra e #wait_done
+		nv_iord($r9, NV_PPWR_TIMER_LOW)
+		sub b32 $r9 $r8
+		cmp b32 $r9 $r11
+		bra l #wait_loop
+	wait_done:
+	ret
+
+// $r15 - current (kern)
+// $r14 - process
+// $r8  - NV_PPWR_INTR
+intr_watchdog:
+	// read process' timer status, skip if not enabled
+	ld b32 $r9 D[$r14 + #proc_time]
+	cmp b32 $r9 0
+	bra z #intr_watchdog_next_proc
+
+	// subtract last timer's value from process' timer,
+	// if it's <= 0 then the timer has expired
+	ld b32 $r10 D[$r0 + #time_prev]
+	sub b32 $r9 $r10
+	bra g #intr_watchdog_next_time
+		mov $r13 KMSG_ALARM
+		call(send_proc)
+		clear b32 $r9
+		bra #intr_watchdog_next_proc
+
+	// otherwise, update the next timer's value if this
+	// process' timer is the soonest
+	intr_watchdog_next_time:
+		// ... or if there's no next timer yet
+		ld b32 $r10 D[$r0 + #time_next]
+		cmp b32 $r10 0
+		bra z #intr_watchdog_next_time_set
+
+		cmp b32 $r9 $r10
+		bra g #intr_watchdog_next_proc
+		intr_watchdog_next_time_set:
+		st b32 D[$r0 + #time_next] $r9
+
+	// update process' timer status, and advance
+	intr_watchdog_next_proc:
+	st b32 D[$r14 + #proc_time] $r9
+	add b32 $r14 #proc_size
+	cmp b32 $r14 #proc_list_tail
+	bra ne #intr_watchdog
+	ret
+
+intr:
+	push $r0
+	clear b32 $r0
+	push $r8
+	push $r9
+	push $r10
+	push $r11
+	push $r12
+	push $r13
+	push $r14
+	push $r15
+	mov $r15 #proc_kern
+	mov $r8 $flags
+	push $r8
+
+	nv_iord($r8, NV_PPWR_DSCRATCH(0))
+	add b32 $r8 1
+	nv_iowr(NV_PPWR_DSCRATCH(0), $r8)
+
+	nv_iord($r8, NV_PPWR_INTR)
+	and $r9 $r8 NV_PPWR_INTR_WATCHDOG
+	bra z #intr_skip_watchdog
+		st b32 D[$r0 + #time_next] $r0
+		mov $r14 #proc_list_head
+		call(intr_watchdog)
+		ld b32 $r9 D[$r0 + #time_next]
+		cmp b32 $r9 0
+		bra z #intr_skip_watchdog
+			nv_iowr(NV_PPWR_WATCHDOG_TIME, $r9)
+			st b32 D[$r0 + #time_prev] $r9
+
+	intr_skip_watchdog:
+	and $r9 $r8 NV_PPWR_INTR_SUBINTR
+	bra z #intr_skip_subintr
+		nv_iord($r9, NV_PPWR_SUBINTR)
+		and $r10 $r9 NV_PPWR_SUBINTR_FIFO
+		bra z #intr_subintr_skip_fifo
+			nv_iord($r12, NV_PPWR_FIFO_INTR)
+			push $r12
+			mov $r14 (PROC_HOST & 0x0000ffff)
+			sethi $r14 (PROC_HOST & 0xffff0000)
+			mov $r13 KMSG_FIFO
+			call(send)
+			pop $r12
+			nv_iowr(NV_PPWR_FIFO_INTR, $r12)
+		intr_subintr_skip_fifo:
+		nv_iowr(NV_PPWR_SUBINTR, $r9)
+
+	intr_skip_subintr:
+	and $r9 $r8 NV_PPWR_INTR_PAUSE
+	bra z #intr_skip_pause
+		and $r10 0xffbf
+
+	intr_skip_pause:
+	and $r9 $r8 NV_PPWR_INTR_USER0
+	bra z #intr_skip_user0
+		and $r10 0xffbf
+
+	intr_skip_user0:
+	nv_iowr(NV_PPWR_INTR_ACK, $r8)
+	pop $r8
+	mov $flags $r8
+	pop $r15
+	pop $r14
+	pop $r13
+	pop $r12
+	pop $r11
+	pop $r10
+	pop $r9
+	pop $r8
+	pop $r0
+	bclr $flags $p0
+	iret
+
+// request the current process be sent a message after a timeout expires
+//
+// $r15 - current
+// $r14 - ticks
+// $r0  - zero
+timer:
+	// interrupts off to prevent racing with timer isr
+	bclr $flags ie0
+
+	// if current process already has a timer set, bail
+	ld b32 $r8 D[$r15 + #proc_time]
+	cmp b32 $r8 0
+	bra g #timer_done
+	st b32 D[$r15 + #proc_time] $r14
+
+	// halt watchdog timer temporarily and check for a pending
+	// interrupt.  if there's one already pending, we can just
+	// bail since the timer isr will queue the next soonest
+	// right after it's done
+	nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
+	nv_iord($r8, NV_PPWR_INTR)
+	and $r8 NV_PPWR_INTR_WATCHDOG
+	bra nz #timer_enable
+
+	// update the watchdog if this timer should expire first,
+	// or if there's no timeout already set
+	nv_iord($r8, NV_PPWR_WATCHDOG_TIME)
+	cmp b32 $r14 $r0
+	bra e #timer_reset
+	cmp b32 $r14 $r8
+	bra l #timer_done
+	timer_reset:
+	nv_iowr(NV_PPWR_WATCHDOG_TIME, $r14)
+	st b32 D[$r0 + #time_prev] $r14
+
+	// re-enable the watchdog timer
+	timer_enable:
+	mov $r8 1
+	nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
+
+	// interrupts back on
+	timer_done:
+	bset $flags ie0
+	ret
+
+// send message to another process
+//
+// $r15 - current
+// $r14 - process
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0  - zero
+send_proc:
+	push $r8
+	push $r9
+	// check for space in queue
+	ld b32 $r8 D[$r14 + #proc_qget]
+	ld b32 $r9 D[$r14 + #proc_qput]
+	xor $r8 #proc_qmaskb
+	cmp b32 $r8 $r9
+	bra e #send_done
+
+	// enqueue message
+	and $r8 $r9 #proc_qmaskp
+	shl b32 $r8 $r8 #proc_qlen
+	add b32 $r8 #proc_queue
+	add b32 $r8 $r14
+
+	ld b32 $r10 D[$r15 + #proc_id]
+	st b32 D[$r8 + #msg_process] $r10
+	st b32 D[$r8 + #msg_message] $r13
+	st b32 D[$r8 + #msg_data0] $r12
+	st b32 D[$r8 + #msg_data1] $r11
+
+	// increment PUT
+	add b32 $r9 1
+	and $r9 #proc_qmaskf
+	st b32 D[$r14 + #proc_qput] $r9
+	bset $flags $p2
+	send_done:
+	pop $r9
+	pop $r8
+	ret
+
+// lookup process structure by its name
+//
+// $r15 - current
+// $r14 - process name
+// $r0  - zero
+//
+// $r14 - process
+// $p1  - success
+find:
+	push $r8
+	mov $r8 #proc_list_head
+	bset $flags $p1
+	find_loop:
+		ld b32 $r10 D[$r8 + #proc_id]
+		cmp b32 $r10 $r14
+		bra e #find_done
+		add b32 $r8 #proc_size
+		cmp b32 $r8 #proc_list_tail
+		bra ne #find_loop
+		bclr $flags $p1
+	find_done:
+	mov b32 $r14 $r8
+	pop $r8
+	ret
+
+// send message to another process
+//
+// $r15 - current
+// $r14 - process id
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0  - zero
+send:
+	call(find)
+	bra $p1 #send_proc
+	ret
+
+// process single message for a given process
+//
+// $r15 - current
+// $r14 - process
+// $r0  - zero
+recv:
+	ld b32 $r8 D[$r14 + #proc_qget]
+	ld b32 $r9 D[$r14 + #proc_qput]
+	bclr $flags $p1
+	cmp b32 $r8 $r9
+	bra e #recv_done
+		// dequeue message
+		and $r9 $r8 #proc_qmaskp
+		add b32 $r8 1
+		and $r8 #proc_qmaskf
+		st b32 D[$r14 + #proc_qget] $r8
+		ld b32 $r10 D[$r14 + #proc_recv]
+
+		push $r15
+		mov $r15 $flags
+		push $r15
+		mov b32 $r15 $r14
+
+		shl b32 $r9 $r9 #proc_qlen
+		add b32 $r14 $r9
+		add b32 $r14 #proc_queue
+		ld b32 $r11 D[$r14 + #msg_data1]
+		ld b32 $r12 D[$r14 + #msg_data0]
+		ld b32 $r13 D[$r14 + #msg_message]
+		ld b32 $r14 D[$r14 + #msg_process]
+
+		// process it
+		call $r10
+		pop $r15
+		mov $flags $r15
+		bset $flags $p1
+		pop $r15
+	recv_done:
+	ret
+
+init:
+	// setup stack
+	nv_iord($r1, NV_PPWR_CAPS)
+	extr $r1 $r1 9:17
+	shl b32 $r1 8
+	mov $sp $r1
+
+#ifdef NVKM_FALCON_MMIO_UAS
+	// somehow allows the magic "access mmio via D[]" stuff that's
+	// used by the nv_rd32/nv_wr32 macros to work
+	mov $r1 0x0010
+	sethi $r1 NV_PPWR_UAS_CONFIG_ENABLE
+	nv_iowrs(NV_PPWR_UAS_CONFIG, $r1)
+#endif
+
+	// route all interrupts except user0/1 and pause to fuc
+	mov $r1 0x00e0
+	sethi $r1 0x00000000
+	nv_iowr(NV_PPWR_INTR_ROUTE, $r1)
+
+	// enable watchdog and subintr intrs
+	mov $r1 NV_PPWR_INTR_EN_CLR_MASK
+	nv_iowr(NV_PPWR_INTR_EN_CLR, $r1)
+	mov $r1 NV_PPWR_INTR_EN_SET_WATCHDOG
+	or $r1 NV_PPWR_INTR_EN_SET_SUBINTR
+	nv_iowr(NV_PPWR_INTR_EN_SET, $r1)
+
+	// enable interrupts globally
+	mov $r1 #intr
+	sethi $r1 0x00000000
+	mov $iv0 $r1
+	bset $flags ie0
+
+	// enable watchdog timer
+	mov $r1 1
+	nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r1)
+
+	// bootstrap processes, idle process will be last, and not return
+	mov $r15 #proc_list_head
+	init_proc:
+		ld b32 $r1 D[$r15 + #proc_init]
+		cmp b32 $r1 0
+		bra z #init_proc
+		call $r1
+		add b32 $r15 #proc_size
+		bra #init_proc
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
new file mode 100644
index 0000000..2a74ea9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define GT215 0xa3
+#define GF100 0xc0
+#define GF119 0xd9
+#define GK208 0x108
+
+#include "os.h"
+
+// IO addresses
+#define NV_PPWR_INTR_TRIGGER                                             0x0000
+#define NV_PPWR_INTR_TRIGGER_USER1                                   0x00000080
+#define NV_PPWR_INTR_TRIGGER_USER0                                   0x00000040
+#define NV_PPWR_INTR_ACK                                                 0x0004
+#define NV_PPWR_INTR_ACK_SUBINTR                                     0x00000800
+#define NV_PPWR_INTR_ACK_WATCHDOG                                    0x00000002
+#define NV_PPWR_INTR                                                     0x0008
+#define NV_PPWR_INTR_SUBINTR                                         0x00000800
+#define NV_PPWR_INTR_USER1                                           0x00000080
+#define NV_PPWR_INTR_USER0                                           0x00000040
+#define NV_PPWR_INTR_PAUSE                                           0x00000020
+#define NV_PPWR_INTR_WATCHDOG                                        0x00000002
+#define NV_PPWR_INTR_EN_SET                                              0x0010
+#define NV_PPWR_INTR_EN_SET_SUBINTR                                  0x00000800
+#define NV_PPWR_INTR_EN_SET_WATCHDOG                                 0x00000002
+#define NV_PPWR_INTR_EN_CLR                                              0x0014
+#define NV_PPWR_INTR_EN_CLR_MASK                    /* fuck i hate envyas */ -1
+#define NV_PPWR_INTR_ROUTE                                               0x001c
+#define NV_PPWR_TIMER_LOW                                                0x002c
+#define NV_PPWR_WATCHDOG_TIME                                            0x0034
+#define NV_PPWR_WATCHDOG_ENABLE                                          0x0038
+#define NV_PPWR_CAPS                                                     0x0108
+#define NV_PPWR_UAS_CONFIG                                               0x0164
+#define NV_PPWR_UAS_CONFIG_ENABLE                                    0x00010000
+#if NVKM_PPWR_CHIPSET >= GK208
+#define NV_PPWR_DSCRATCH(i)                                   (4 * (i) + 0x0450)
+#endif
+#define NV_PPWR_FIFO_PUT(i)                                   (4 * (i) + 0x04a0)
+#define NV_PPWR_FIFO_GET(i)                                   (4 * (i) + 0x04b0)
+#define NV_PPWR_FIFO_INTR                                                0x04c0
+#define NV_PPWR_FIFO_INTR_EN                                             0x04c4
+#define NV_PPWR_RFIFO_PUT                                                0x04c8
+#define NV_PPWR_RFIFO_GET                                                0x04cc
+#define NV_PPWR_H2D                                                      0x04d0
+#define NV_PPWR_D2H                                                      0x04dc
+#if NVKM_PPWR_CHIPSET < GK208
+#define NV_PPWR_DSCRATCH(i)                                   (4 * (i) + 0x05d0)
+#endif
+#define NV_PPWR_SUBINTR                                                  0x0688
+#define NV_PPWR_SUBINTR_FIFO                                         0x00000002
+#define NV_PPWR_MMIO_ADDR                                                0x07a0
+#define NV_PPWR_MMIO_DATA                                                0x07a4
+#define NV_PPWR_MMIO_CTRL                                                0x07ac
+#define NV_PPWR_MMIO_CTRL_TRIGGER                                    0x00010000
+#define NV_PPWR_MMIO_CTRL_STATUS                                     0x00007000
+#define NV_PPWR_MMIO_CTRL_STATUS_IDLE                                0x00000000
+#define NV_PPWR_MMIO_CTRL_MASK                                       0x000000f0
+#define NV_PPWR_MMIO_CTRL_MASK_B32_0                                 0x000000f0
+#define NV_PPWR_MMIO_CTRL_OP                                         0x00000003
+#define NV_PPWR_MMIO_CTRL_OP_RD                                      0x00000001
+#define NV_PPWR_MMIO_CTRL_OP_WR                                      0x00000002
+#define NV_PPWR_OUTPUT                                                   0x07c0
+#define NV_PPWR_OUTPUT_FB_PAUSE                                      0x00000004
+#define NV_PPWR_OUTPUT_SET                                               0x07e0
+#define NV_PPWR_OUTPUT_SET_FB_PAUSE                                  0x00000004
+#define NV_PPWR_OUTPUT_CLR                                               0x07e4
+#define NV_PPWR_OUTPUT_CLR_FB_PAUSE                                  0x00000004
+
+// Inter-process message format
+.equ #msg_process 0x00 /* send() target, recv() sender */
+.equ #msg_message 0x04
+.equ #msg_data0   0x08
+.equ #msg_data1   0x0c
+
+// Kernel message IDs
+#define KMSG_FIFO  0x00000000
+#define KMSG_ALARM 0x00000001
+
+// Process message queue description
+.equ #proc_qlen 4 // log2(size of queue entry in bytes)
+.equ #proc_qnum 2 // log2(max number of entries in queue)
+.equ #proc_qmaskb (1 << #proc_qnum) // max number of entries in queue
+.equ #proc_qmaskp (#proc_qmaskb - 1)
+.equ #proc_qmaskf ((#proc_qmaskb << 1) - 1)
+.equ #proc_qsize  (1 << (#proc_qlen + #proc_qnum))
+
+// Process table entry
+.equ #proc_id    0x00
+.equ #proc_init  0x04
+.equ #proc_recv  0x08
+.equ #proc_time  0x0c
+.equ #proc_qput  0x10
+.equ #proc_qget  0x14
+.equ #proc_queue 0x18
+.equ #proc_size (0x18 + #proc_qsize)
+
+#define process(id,init,recv) /*
+*/	.b32 id /*
+*/	.b32 init /*
+*/	.b32 recv /*
+*/	.b32 0 /*
+*/	.b32 0 /*
+*/	.b32 0 /*
+*/	.skip 64
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iord(reg,ior) /*
+*/	mov reg ior /*
+*/ 	shl b32 reg 6 /*
+*/ 	iord reg I[reg + 0x000]
+#else
+#define nv_iord(reg,ior) /*
+*/	mov reg ior /*
+*/ 	iord reg I[reg + 0x000]
+#endif
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iowr(ior,reg) /*
+*/	mov $r0 ior /*
+*/ 	shl b32 $r0 6 /*
+*/ 	iowr I[$r0 + 0x000] reg /*
+*/	clear b32 $r0
+#else
+#define nv_iowr(ior,reg) /*
+*/	mov $r0 ior /*
+*/ 	iowr I[$r0 + 0x000] reg /*
+*/	clear b32 $r0
+#endif
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iowrs(ior,reg) /*
+*/	mov $r0 ior /*
+*/ 	shl b32 $r0 6 /*
+*/ 	iowrs I[$r0 + 0x000] reg /*
+*/	clear b32 $r0
+#else
+#define nv_iowrs(ior,reg) /*
+*/	mov $r0 ior /*
+*/ 	iowrs I[$r0 + 0x000] reg /*
+*/	clear b32 $r0
+#endif
+
+#define hash #
+#define fn(a) a
+#ifndef NVKM_FALCON_PC24
+#define call(a) call fn(hash)a
+#else
+#define call(a) lcall fn(hash)a
+#endif
+
+#ifndef NVKM_FALCON_MMIO_UAS
+#define nv_rd32(reg,addr) /*
+*/	mov b32 $r14 addr /*
+*/	call(rd32) /*
+*/	mov b32 reg $r13
+#else
+#define nv_rd32(reg,addr) /*
+*/ 	sethi $r0 0x14000000 /*
+*/	or $r0 addr /*
+*/	ld b32 reg D[$r0] /*
+*/	clear b32 $r0
+#endif
+
+#if !defined(NVKM_FALCON_MMIO_UAS) || defined(NVKM_FALCON_MMIO_TRAP)
+#define nv_wr32(addr,reg) /*
+*/	push addr /*
+*/	push reg /*
+*/	pop $r13 /*
+*/	pop $r14 /*
+*/	call(wr32) /*
+#else
+#define nv_wr32(addr,reg) /*
+*/ 	sethi $r0 0x14000000 /*
+*/	or $r0 addr /*
+*/	st b32 D[$r0] reg /*
+*/	clear b32 $r0
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
new file mode 100644
index 0000000..d43741e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_MEMX, #memx_init, #memx_recv)
+#endif
+
+/******************************************************************************
+ * MEMX data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+.equ #memx_opcode 0
+.equ #memx_header 2
+.equ #memx_length 4
+.equ #memx_func   8
+
+#define handler(cmd,hdr,len,func) /*
+*/	.b16 MEMX_##cmd /*
+*/	.b16 hdr /*
+*/	.b16 len /*
+*/      .b16 0 /*
+*/	.b32 func
+
+memx_func_head:
+handler(ENTER , 0x0001, 0x0000, #memx_func_enter)
+memx_func_next:
+handler(LEAVE , 0x0000, 0x0000, #memx_func_leave)
+handler(WR32  , 0x0000, 0x0002, #memx_func_wr32)
+handler(WAIT  , 0x0004, 0x0000, #memx_func_wait)
+handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
+memx_func_tail:
+
+.equ #memx_func_size #memx_func_next - #memx_func_head
+.equ #memx_func_num (#memx_func_tail - #memx_func_head) / #memx_func_size
+
+memx_data_head:
+.skip 0x0800
+memx_data_tail:
+#endif
+
+/******************************************************************************
+ * MEMX code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+//	+00: bitmask of heads to wait for vblank on
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_enter:
+	mov $r6 NV_PPWR_OUTPUT_SET_FB_PAUSE
+	nv_iowr(NV_PPWR_OUTPUT_SET, $r6)
+	memx_func_enter_wait:
+		nv_iord($r6, NV_PPWR_OUTPUT)
+		and $r6 NV_PPWR_OUTPUT_FB_PAUSE
+		bra z #memx_func_enter_wait
+	//XXX: TODO
+	ld b32 $r6 D[$r1 + 0x00]
+	add b32 $r1 0x04
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_leave:
+	mov $r6 NV_PPWR_OUTPUT_CLR_FB_PAUSE
+	nv_iowr(NV_PPWR_OUTPUT_CLR, $r6)
+	memx_func_leave_wait:
+		nv_iord($r6, NV_PPWR_OUTPUT)
+		and $r6 NV_PPWR_OUTPUT_FB_PAUSE
+		bra nz #memx_func_leave_wait
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+//	+00*n: addr
+//	+04*n: data
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_wr32:
+	ld b32 $r6 D[$r1 + 0x00]
+	ld b32 $r5 D[$r1 + 0x04]
+	add b32 $r1 0x08
+	nv_wr32($r6, $r5)
+	sub b32 $r4 0x02
+	bra nz #memx_func_wr32
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+//	+00: addr
+//	+04: mask
+//	+08: data
+//	+0c: timeout (ns)
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_wait:
+	nv_iord($r8, NV_PPWR_TIMER_LOW)
+	ld b32 $r14 D[$r1 + 0x00]
+	ld b32 $r13 D[$r1 + 0x04]
+	ld b32 $r12 D[$r1 + 0x08]
+	ld b32 $r11 D[$r1 + 0x0c]
+	add b32 $r1 0x10
+	call(wait)
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+//	+00: time (ns)
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_delay:
+	ld b32 $r14 D[$r1 + 0x00]
+	add b32 $r1 0x04
+	call(nsec)
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message (exec)
+// $r12 - head of script
+// $r11 - tail of script
+// $r0  - zero
+memx_exec:
+	push $r14
+	push $r13
+	mov b32 $r1 $r12
+	mov b32 $r2 $r11
+	memx_exec_next:
+		// fetch the packet header, and locate opcode info
+		ld b32 $r3 D[$r1]
+		add b32 $r1 4
+		shr b32 $r4 $r3 16
+		mulu $r3 #memx_func_size
+
+		// execute the opcode handler
+		ld b32 $r5 D[$r3 + #memx_func_head + #memx_func]
+		call $r5
+
+		// keep going, if we haven't reached the end
+		cmp b32 $r1 $r2
+		bra l #memx_exec_next
+
+	// send completion reply
+	pop $r13
+	pop $r14
+	call(send)
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+memx_info:
+	mov $r12 #memx_data_head
+	mov $r11 #memx_data_tail - #memx_data_head
+	call(send)
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+memx_recv:
+	cmp b32 $r13 MEMX_MSG_EXEC
+	bra e #memx_exec
+	cmp b32 $r13 MEMX_MSG_INFO
+	bra e #memx_info
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r0  - zero
+memx_init:
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
new file mode 100644
index 0000000..947be53
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GK208
+
+#define NVKM_FALCON_PC24
+#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nv108_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nv108_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
new file mode 100644
index 0000000..9342e2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -0,0 +1,1165 @@
+uint32_t nv108_pwr_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x00000379,
+	0x0000032a,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x0000046f,
+	0x00000461,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x00000473,
+	0x00000471,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x00000494,
+	0x00000475,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x0000049f,
+	0x0000049d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+	0x00000000,
+/* 0x0214: time_next */
+	0x00000000,
+/* 0x0218: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0298: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0318: memx_func_head */
+	0x00010000,
+	0x00000000,
+	0x000003a9,
+/* 0x0324: memx_func_next */
+	0x00000001,
+	0x00000000,
+	0x000003c7,
+	0x00000002,
+	0x00000002,
+	0x000003df,
+	0x00040003,
+	0x00000000,
+	0x00000407,
+	0x00010004,
+	0x00000000,
+	0x00000421,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0b54: memx_data_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nv108_pwr_code[] = {
+	0x02910ef5,
+/* 0x0004: rd32 */
+	0xf607a040,
+	0x04bd000e,
+	0xe3f0010e,
+	0x07ac4001,
+	0xbd000ef6,
+/* 0x0019: rd32_wait */
+	0x07ac4e04,
+	0xf100eecf,
+	0xf47000e4,
+	0xa44df61b,
+	0x00ddcf07,
+/* 0x002e: wr32 */
+	0xa04000f8,
+	0x000ef607,
+	0xa44004bd,
+	0x000df607,
+	0x020e04bd,
+	0xf0f0e5f0,
+	0xac4001e3,
+	0x000ef607,
+/* 0x004e: wr32_wait */
+	0xac4e04bd,
+	0x00eecf07,
+	0x7000e4f1,
+	0xf8f61bf4,
+/* 0x005d: nsec */
+	0xcf2c0800,
+/* 0x0062: nsec_loop */
+	0x2c090088,
+	0xbb0099cf,
+	0x9ea60298,
+	0xf8f61ef4,
+/* 0x0071: wait */
+	0xcf2c0800,
+/* 0x0076: wait_loop */
+	0xeeb20088,
+	0x0000047e,
+	0xadfddab2,
+	0xf4aca604,
+	0x2c09100b,
+	0xbb0099cf,
+	0x9ba60298,
+/* 0x0093: wait_done */
+	0xf8e61ef4,
+/* 0x0095: intr_watchdog */
+	0x03e99800,
+	0xf40096b0,
+	0x0a98280b,
+	0x029abb84,
+	0x0d0e1cf4,
+	0x01de7e01,
+	0xf494bd00,
+/* 0x00b2: intr_watchdog_next_time */
+	0x0a98140e,
+	0x00a6b085,
+	0xa6080bf4,
+	0x061cf49a,
+/* 0x00c0: intr_watchdog_next_time_set */
+/* 0x00c3: intr_watchdog_next_proc */
+	0xb58509b5,
+	0xe0b603e9,
+	0x10e6b158,
+	0xc81bf402,
+/* 0x00d2: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0x000ff0f9,
+	0xf90188fe,
+	0x04504880,
+	0xb60088cf,
+	0x50400180,
+	0x0008f604,
+	0x080804bd,
+	0xc40088cf,
+	0x0bf40289,
+	0x8500b51f,
+	0x957e580e,
+	0x09980000,
+	0x0096b085,
+	0x000d0bf4,
+	0x0009f634,
+	0x09b504bd,
+/* 0x0125: intr_skip_watchdog */
+	0x0089e484,
+	0x360bf408,
+	0xcf068849,
+	0x9ac40099,
+	0x220bf402,
+	0xcf04c04c,
+	0xc0f900cc,
+	0xf14f484e,
+	0x0d5453e3,
+	0x023f7e00,
+	0x40c0fc00,
+	0x0cf604c0,
+/* 0x0157: intr_subintr_skip_fifo */
+	0x4004bd00,
+	0x09f60688,
+/* 0x015f: intr_skip_subintr */
+	0xc404bd00,
+	0x0bf42089,
+	0xbfa4f107,
+/* 0x0169: intr_skip_pause */
+	0x4089c4ff,
+	0xf1070bf4,
+/* 0x0173: intr_skip_user0 */
+	0x00ffbfa4,
+	0x0008f604,
+	0x80fc04bd,
+	0xfc0088fe,
+	0xfce0fcf0,
+	0xfcc0fcd0,
+	0xfca0fcb0,
+	0xfc80fc90,
+	0x0032f400,
+/* 0x0196: timer */
+	0x32f401f8,
+	0x03f89810,
+	0xf40086b0,
+	0xfeb53a1c,
+	0xf6380003,
+	0x04bd0008,
+	0x88cf0808,
+	0x0284f000,
+	0x081c1bf4,
+	0x0088cf34,
+	0x0bf4e0a6,
+	0xf4e8a608,
+/* 0x01c6: timer_reset */
+	0x3400161e,
+	0xbd000ef6,
+	0x840eb504,
+/* 0x01d0: timer_enable */
+	0x38000108,
+	0xbd0008f6,
+/* 0x01d9: timer_done */
+	0x1031f404,
+/* 0x01de: send_proc */
+	0x80f900f8,
+	0xe89890f9,
+	0x04e99805,
+	0xa60486f0,
+	0x2a0bf489,
+	0x940398c4,
+	0x80b60488,
+	0x008ebb18,
+	0xb500fa98,
+	0x8db5008a,
+	0x028cb501,
+	0xb6038bb5,
+	0x94f00190,
+	0x04e9b507,
+/* 0x0217: send_done */
+	0xfc0231f4,
+	0xf880fc90,
+/* 0x021d: find */
+	0x0880f900,
+	0x0131f458,
+/* 0x0224: find_loop */
+	0xa6008a98,
+	0x100bf4ae,
+	0xb15880b6,
+	0xf4021086,
+	0x32f4f11b,
+/* 0x0239: find_done */
+	0xfc8eb201,
+/* 0x023f: send */
+	0x7e00f880,
+	0xf400021d,
+	0x00f89b01,
+/* 0x0248: recv */
+	0x9805e898,
+	0x32f404e9,
+	0xf489a601,
+	0x89c43c0b,
+	0x0180b603,
+	0xb50784f0,
+	0xea9805e8,
+	0xfef0f902,
+	0xf0f9018f,
+	0x9994efb2,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x028f: recv_done */
+	0xf8f0fc01,
+/* 0x0291: init */
+	0x01084100,
+	0xe70011cf,
+	0xb6010911,
+	0x14fe0814,
+	0x00e04100,
+	0x000013f0,
+	0x0001f61c,
+	0xff0104bd,
+	0x01f61400,
+	0x0104bd00,
+	0x0015f102,
+	0xf6100008,
+	0x04bd0001,
+	0xf000d241,
+	0x10fe0013,
+	0x1031f400,
+	0x38000101,
+	0xbd0001f6,
+/* 0x02db: init_proc */
+	0x98580f04,
+	0x16b001f1,
+	0xfa0bf400,
+	0xf0b615f9,
+	0xf20ef458,
+/* 0x02ec: host_send */
+	0xcf04b041,
+	0xa0420011,
+	0x0022cf04,
+	0x0bf412a6,
+	0x071ec42e,
+	0xb704ee94,
+	0x980218e0,
+	0xec9803eb,
+	0x01ed9802,
+	0x7e00ee98,
+	0xb600023f,
+	0x1ec40110,
+	0x04b0400f,
+	0xbd0001f6,
+	0xc70ef404,
+/* 0x0328: host_send_done */
+/* 0x032a: host_recv */
+	0x494100f8,
+	0x5413f14e,
+	0xf4e1a652,
+/* 0x0336: host_recv_wait */
+	0xcc41b90b,
+	0x0011cf04,
+	0xcf04c842,
+	0x16f00022,
+	0xf412a608,
+	0x23c4ef0b,
+	0x0434b607,
+	0x029830b7,
+	0xb5033bb5,
+	0x3db5023c,
+	0x003eb501,
+	0xf00120b6,
+	0xc8400f24,
+	0x0002f604,
+	0x400204bd,
+	0x02f60000,
+	0xf804bd00,
+/* 0x0379: host_init */
+	0x00804100,
+	0xf11014b6,
+	0x40021815,
+	0x01f604d0,
+	0x4104bd00,
+	0x14b60080,
+	0x9815f110,
+	0x04dc4002,
+	0xbd0001f6,
+	0x40010104,
+	0x01f604c4,
+	0xf804bd00,
+/* 0x03a9: memx_func_enter */
+	0x40040600,
+	0x06f607e0,
+/* 0x03b3: memx_func_enter_wait */
+	0x4604bd00,
+	0x66cf07c0,
+	0x0464f000,
+	0x98f70bf4,
+	0x10b60016,
+/* 0x03c7: memx_func_leave */
+	0x0600f804,
+	0x07e44004,
+	0xbd0006f6,
+/* 0x03d1: memx_func_leave_wait */
+	0x07c04604,
+	0xf00066cf,
+	0x1bf40464,
+/* 0x03df: memx_func_wr32 */
+	0x9800f8f7,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
+	0xe0fcd0fc,
+	0x00002e7e,
+	0x140003f1,
+	0xa00506fd,
+	0xb604bd05,
+	0x1bf40242,
+/* 0x0407: memx_func_wait */
+	0x0800f8dd,
+	0x0088cf2c,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0x7e1010b6,
+	0xf8000071,
+/* 0x0421: memx_func_delay */
+	0x001e9800,
+	0x7e0410b6,
+	0xf800005d,
+/* 0x042d: memx_exec */
+	0xf9e0f900,
+	0xb2c1b2d0,
+/* 0x0435: memx_exec_next */
+	0x001398b2,
+	0x950410b6,
+	0x30f01034,
+	0xc835980c,
+	0x12a655f9,
+	0xfced1ef4,
+	0x7ee0fcd0,
+	0xf800023f,
+/* 0x0455: memx_info */
+	0x03544c00,
+	0x7e08004b,
+	0xf800023f,
+/* 0x0461: memx_recv */
+	0x01d6b000,
+	0xb0c90bf4,
+	0x0bf400d6,
+/* 0x046f: memx_init */
+	0xf800f8eb,
+/* 0x0471: perf_recv */
+/* 0x0473: perf_init */
+	0xf800f800,
+/* 0x0475: test_recv */
+	0x04584100,
+	0xb60011cf,
+	0x58400110,
+	0x0001f604,
+	0xe7f104bd,
+	0xe3f1d900,
+	0x967e134f,
+	0x00f80001,
+/* 0x0494: test_init */
+	0x7e08004e,
+	0xf8000196,
+/* 0x049d: idle_recv */
+/* 0x049f: idle */
+	0xf400f800,
+	0x54410031,
+	0x0011cf04,
+	0x400110b6,
+	0x01f60454,
+/* 0x04b3: idle_loop */
+	0x0104bd00,
+	0x0232f458,
+/* 0x04b8: idle_proc */
+/* 0x04b8: idle_proc_exec */
+	0x1eb210f9,
+	0x0002487e,
+	0x11f410fc,
+	0x0231f409,
+/* 0x04cb: idle_proc_next */
+	0xb6f00ef4,
+	0x1fa65810,
+	0xf4e81bf4,
+	0x28f4e002,
+	0xc60ef400,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
new file mode 100644
index 0000000..6fde0b89
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GT215
+
+//#define NVKM_FALCON_PC24
+//#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nva3_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nva3_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
new file mode 100644
index 0000000..0fa4d7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nva3_pwr_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x00000430,
+	0x000003cd,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x0000054e,
+	0x00000540,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x00000552,
+	0x00000550,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x0000057b,
+	0x00000554,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x00000587,
+	0x00000585,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+	0x00000000,
+/* 0x0214: time_next */
+	0x00000000,
+/* 0x0218: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0298: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0318: memx_func_head */
+	0x00010000,
+	0x00000000,
+	0x0000046f,
+/* 0x0324: memx_func_next */
+	0x00000001,
+	0x00000000,
+	0x00000496,
+	0x00000002,
+	0x00000002,
+	0x000004b7,
+	0x00040003,
+	0x00000000,
+	0x000004df,
+	0x00010004,
+	0x00000000,
+	0x000004fc,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0b54: memx_data_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nva3_pwr_code[] = {
+	0x030d0ef5,
+/* 0x0004: rd32 */
+	0x07a007f1,
+	0xd00604b6,
+	0x04bd000e,
+	0xf001e7f0,
+	0x07f101e3,
+	0x04b607ac,
+	0x000ed006,
+/* 0x0022: rd32_wait */
+	0xe7f104bd,
+	0xe4b607ac,
+	0x00eecf06,
+	0x7000e4f1,
+	0xf1f21bf4,
+	0xb607a4d7,
+	0xddcf06d4,
+/* 0x003f: wr32 */
+	0xf100f800,
+	0xb607a007,
+	0x0ed00604,
+	0xf104bd00,
+	0xb607a407,
+	0x0dd00604,
+	0xf004bd00,
+	0xe5f002e7,
+	0x01e3f0f0,
+	0x07ac07f1,
+	0xd00604b6,
+	0x04bd000e,
+/* 0x006c: wr32_wait */
+	0x07ace7f1,
+	0xcf06e4b6,
+	0xe4f100ee,
+	0x1bf47000,
+/* 0x007f: nsec */
+	0xf000f8f2,
+	0x84b62c87,
+	0x0088cf06,
+/* 0x0088: nsec_loop */
+	0xb62c97f0,
+	0x99cf0694,
+	0x0298bb00,
+	0xf4069eb8,
+	0x00f8f11e,
+/* 0x009c: wait */
+	0xb62c87f0,
+	0x88cf0684,
+/* 0x00a5: wait_loop */
+	0x02eeb900,
+	0xb90421f4,
+	0xadfd02da,
+	0x06acb804,
+	0xf0150bf4,
+	0x94b62c97,
+	0x0099cf06,
+	0xb80298bb,
+	0x1ef4069b,
+/* 0x00c9: wait_done */
+/* 0x00cb: intr_watchdog */
+	0x9800f8df,
+	0x96b003e9,
+	0x2a0bf400,
+	0xbb840a98,
+	0x1cf4029a,
+	0x01d7f00f,
+	0x025421f5,
+	0x0ef494bd,
+/* 0x00e9: intr_watchdog_next_time */
+	0x850a9815,
+	0xf400a6b0,
+	0x9ab8090b,
+	0x061cf406,
+/* 0x00f8: intr_watchdog_next_time_set */
+/* 0x00fb: intr_watchdog_next_proc */
+	0x80850980,
+	0xe0b603e9,
+	0x10e6b158,
+	0xc61bf402,
+/* 0x010a: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0xf7f0f0f9,
+	0x0188fe00,
+	0x87f180f9,
+	0x84b605d0,
+	0x0088cf06,
+	0xf10180b6,
+	0xb605d007,
+	0x08d00604,
+	0xf004bd00,
+	0x84b60887,
+	0x0088cf06,
+	0xf40289c4,
+	0x0080230b,
+	0x58e7f085,
+	0x98cb21f4,
+	0x96b08509,
+	0x110bf400,
+	0xb63407f0,
+	0x09d00604,
+	0x8004bd00,
+/* 0x016e: intr_skip_watchdog */
+	0x89e48409,
+	0x0bf40800,
+	0x8897f148,
+	0x0694b606,
+	0xc40099cf,
+	0x0bf4029a,
+	0xc0c7f12c,
+	0x06c4b604,
+	0xf900cccf,
+	0x48e7f1c0,
+	0x53e3f14f,
+	0x00d7f054,
+	0x02b921f5,
+	0x07f1c0fc,
+	0x04b604c0,
+	0x000cd006,
+/* 0x01ae: intr_subintr_skip_fifo */
+	0x07f104bd,
+	0x04b60688,
+	0x0009d006,
+/* 0x01ba: intr_skip_subintr */
+	0x89c404bd,
+	0x070bf420,
+	0xffbfa4f1,
+/* 0x01c4: intr_skip_pause */
+	0xf44089c4,
+	0xa4f1070b,
+/* 0x01ce: intr_skip_user0 */
+	0x07f0ffbf,
+	0x0604b604,
+	0xbd0008d0,
+	0xfe80fc04,
+	0xf0fc0088,
+	0xd0fce0fc,
+	0xb0fcc0fc,
+	0x90fca0fc,
+	0x00fc80fc,
+	0xf80032f4,
+/* 0x01f5: timer */
+	0x1032f401,
+	0xb003f898,
+	0x1cf40086,
+	0x03fe8051,
+	0xb63807f0,
+	0x08d00604,
+	0xf004bd00,
+	0x84b60887,
+	0x0088cf06,
+	0xf40284f0,
+	0x87f0261b,
+	0x0684b634,
+	0xb80088cf,
+	0x0bf406e0,
+	0x06e8b809,
+/* 0x0233: timer_reset */
+	0xf01f1ef4,
+	0x04b63407,
+	0x000ed006,
+	0x0e8004bd,
+/* 0x0241: timer_enable */
+	0x0187f084,
+	0xb63807f0,
+	0x08d00604,
+/* 0x024f: timer_done */
+	0xf404bd00,
+	0x00f81031,
+/* 0x0254: send_proc */
+	0x90f980f9,
+	0x9805e898,
+	0x86f004e9,
+	0x0689b804,
+	0xc42a0bf4,
+	0x88940398,
+	0x1880b604,
+	0x98008ebb,
+	0x8a8000fa,
+	0x018d8000,
+	0x80028c80,
+	0x90b6038b,
+	0x0794f001,
+	0xf404e980,
+/* 0x028e: send_done */
+	0x90fc0231,
+	0x00f880fc,
+/* 0x0294: find */
+	0x87f080f9,
+	0x0131f458,
+/* 0x029c: find_loop */
+	0xb8008a98,
+	0x0bf406ae,
+	0x5880b610,
+	0x021086b1,
+	0xf4f01bf4,
+/* 0x02b2: find_done */
+	0x8eb90132,
+	0xf880fc02,
+/* 0x02b9: send */
+	0x9421f500,
+	0x9701f402,
+/* 0x02c2: recv */
+	0xe89800f8,
+	0x04e99805,
+	0xb80132f4,
+	0x0bf40689,
+	0x0389c43d,
+	0xf00180b6,
+	0xe8800784,
+	0x02ea9805,
+	0x8ffef0f9,
+	0xb9f0f901,
+	0x999402ef,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x030b: recv_done */
+	0xf8f0fc01,
+/* 0x030d: init */
+	0x0817f100,
+	0x0614b601,
+	0xe70011cf,
+	0xb6010911,
+	0x14fe0814,
+	0xe017f100,
+	0x0013f000,
+	0xb61c07f0,
+	0x01d00604,
+	0xf004bd00,
+	0x07f0ff17,
+	0x0604b614,
+	0xbd0001d0,
+	0x0217f004,
+	0x080015f1,
+	0xb61007f0,
+	0x01d00604,
+	0xf104bd00,
+	0xf0010a17,
+	0x10fe0013,
+	0x1031f400,
+	0xf00117f0,
+	0x04b63807,
+	0x0001d006,
+	0xf7f004bd,
+/* 0x0371: init_proc */
+	0x01f19858,
+	0xf40016b0,
+	0x15f9fa0b,
+	0xf458f0b6,
+/* 0x0382: host_send */
+	0x17f1f20e,
+	0x14b604b0,
+	0x0011cf06,
+	0x04a027f1,
+	0xcf0624b6,
+	0x12b80022,
+	0x320bf406,
+	0x94071ec4,
+	0xe0b704ee,
+	0xeb980218,
+	0x02ec9803,
+	0x9801ed98,
+	0x21f500ee,
+	0x10b602b9,
+	0x0f1ec401,
+	0x04b007f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x03cb: host_send_done */
+	0xf8ba0ef4,
+/* 0x03cd: host_recv */
+	0x4917f100,
+	0x5413f14e,
+	0x06e1b852,
+/* 0x03db: host_recv_wait */
+	0xf1aa0bf4,
+	0xb604cc17,
+	0x11cf0614,
+	0xc827f100,
+	0x0624b604,
+	0xf00022cf,
+	0x12b80816,
+	0xe60bf406,
+	0xb60723c4,
+	0x30b70434,
+	0x3b800298,
+	0x023c8003,
+	0x80013d80,
+	0x20b6003e,
+	0x0f24f001,
+	0x04c807f1,
+	0xd00604b6,
+	0x04bd0002,
+	0xf04027f0,
+	0x04b60007,
+	0x0002d006,
+	0x00f804bd,
+/* 0x0430: host_init */
+	0x008017f1,
+	0xf11014b6,
+	0xf1021815,
+	0xb604d007,
+	0x01d00604,
+	0xf104bd00,
+	0xb6008017,
+	0x15f11014,
+	0x07f10298,
+	0x04b604dc,
+	0x0001d006,
+	0x17f004bd,
+	0xc407f101,
+	0x0604b604,
+	0xbd0001d0,
+/* 0x046f: memx_func_enter */
+	0xf000f804,
+	0x07f10467,
+	0x04b607e0,
+	0x0006d006,
+/* 0x047e: memx_func_enter_wait */
+	0x67f104bd,
+	0x64b607c0,
+	0x0066cf06,
+	0xf40464f0,
+	0x1698f30b,
+	0x0410b600,
+/* 0x0496: memx_func_leave */
+	0x67f000f8,
+	0xe407f104,
+	0x0604b607,
+	0xbd0006d0,
+/* 0x04a5: memx_func_leave_wait */
+	0xc067f104,
+	0x0664b607,
+	0xf00066cf,
+	0x1bf40464,
+/* 0x04b7: memx_func_wr32 */
+	0x9800f8f3,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
+	0xe0fcd0fc,
+	0xf13f21f4,
+	0xfd140003,
+	0x05800506,
+	0xb604bd00,
+	0x1bf40242,
+/* 0x04df: memx_func_wait */
+	0xf000f8dd,
+	0x84b62c87,
+	0x0088cf06,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0xf41010b6,
+	0x00f89c21,
+/* 0x04fc: memx_func_delay */
+	0xb6001e98,
+	0x21f40410,
+/* 0x0507: memx_exec */
+	0xf900f87f,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x0511: memx_exec_next */
+	0x00139802,
+	0x950410b6,
+	0x30f01034,
+	0xc835980c,
+	0x12b855f9,
+	0xec1ef406,
+	0xe0fcd0fc,
+	0x02b921f5,
+/* 0x0532: memx_info */
+	0xc7f100f8,
+	0xb7f10354,
+	0x21f50800,
+	0x00f802b9,
+/* 0x0540: memx_recv */
+	0xf401d6b0,
+	0xd6b0c40b,
+	0xe90bf400,
+/* 0x054e: memx_init */
+	0x00f800f8,
+/* 0x0550: perf_recv */
+/* 0x0552: perf_init */
+	0x00f800f8,
+/* 0x0554: test_recv */
+	0x05d817f1,
+	0xcf0614b6,
+	0x10b60011,
+	0xd807f101,
+	0x0604b605,
+	0xbd0001d0,
+	0x00e7f104,
+	0x4fe3f1d9,
+	0xf521f513,
+/* 0x057b: test_init */
+	0xf100f801,
+	0xf50800e7,
+	0xf801f521,
+/* 0x0585: idle_recv */
+/* 0x0587: idle */
+	0xf400f800,
+	0x17f10031,
+	0x14b605d4,
+	0x0011cf06,
+	0xf10110b6,
+	0xb605d407,
+	0x01d00604,
+/* 0x05a3: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x05a9: idle_proc */
+/* 0x05a9: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc02c2,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x05bd: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00bb0ef4,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
new file mode 100644
index 0000000..eaa64da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GF100
+
+//#define NVKM_FALCON_PC24
+//#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nvc0_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nvc0_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
new file mode 100644
index 0000000..82c8e8b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nvc0_pwr_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x00000430,
+	0x000003cd,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x0000054e,
+	0x00000540,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x00000552,
+	0x00000550,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x0000057b,
+	0x00000554,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x00000587,
+	0x00000585,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+	0x00000000,
+/* 0x0214: time_next */
+	0x00000000,
+/* 0x0218: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0298: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0318: memx_func_head */
+	0x00010000,
+	0x00000000,
+	0x0000046f,
+/* 0x0324: memx_func_next */
+	0x00000001,
+	0x00000000,
+	0x00000496,
+	0x00000002,
+	0x00000002,
+	0x000004b7,
+	0x00040003,
+	0x00000000,
+	0x000004df,
+	0x00010004,
+	0x00000000,
+	0x000004fc,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0b54: memx_data_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nvc0_pwr_code[] = {
+	0x030d0ef5,
+/* 0x0004: rd32 */
+	0x07a007f1,
+	0xd00604b6,
+	0x04bd000e,
+	0xf001e7f0,
+	0x07f101e3,
+	0x04b607ac,
+	0x000ed006,
+/* 0x0022: rd32_wait */
+	0xe7f104bd,
+	0xe4b607ac,
+	0x00eecf06,
+	0x7000e4f1,
+	0xf1f21bf4,
+	0xb607a4d7,
+	0xddcf06d4,
+/* 0x003f: wr32 */
+	0xf100f800,
+	0xb607a007,
+	0x0ed00604,
+	0xf104bd00,
+	0xb607a407,
+	0x0dd00604,
+	0xf004bd00,
+	0xe5f002e7,
+	0x01e3f0f0,
+	0x07ac07f1,
+	0xd00604b6,
+	0x04bd000e,
+/* 0x006c: wr32_wait */
+	0x07ace7f1,
+	0xcf06e4b6,
+	0xe4f100ee,
+	0x1bf47000,
+/* 0x007f: nsec */
+	0xf000f8f2,
+	0x84b62c87,
+	0x0088cf06,
+/* 0x0088: nsec_loop */
+	0xb62c97f0,
+	0x99cf0694,
+	0x0298bb00,
+	0xf4069eb8,
+	0x00f8f11e,
+/* 0x009c: wait */
+	0xb62c87f0,
+	0x88cf0684,
+/* 0x00a5: wait_loop */
+	0x02eeb900,
+	0xb90421f4,
+	0xadfd02da,
+	0x06acb804,
+	0xf0150bf4,
+	0x94b62c97,
+	0x0099cf06,
+	0xb80298bb,
+	0x1ef4069b,
+/* 0x00c9: wait_done */
+/* 0x00cb: intr_watchdog */
+	0x9800f8df,
+	0x96b003e9,
+	0x2a0bf400,
+	0xbb840a98,
+	0x1cf4029a,
+	0x01d7f00f,
+	0x025421f5,
+	0x0ef494bd,
+/* 0x00e9: intr_watchdog_next_time */
+	0x850a9815,
+	0xf400a6b0,
+	0x9ab8090b,
+	0x061cf406,
+/* 0x00f8: intr_watchdog_next_time_set */
+/* 0x00fb: intr_watchdog_next_proc */
+	0x80850980,
+	0xe0b603e9,
+	0x10e6b158,
+	0xc61bf402,
+/* 0x010a: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0xf7f0f0f9,
+	0x0188fe00,
+	0x87f180f9,
+	0x84b605d0,
+	0x0088cf06,
+	0xf10180b6,
+	0xb605d007,
+	0x08d00604,
+	0xf004bd00,
+	0x84b60887,
+	0x0088cf06,
+	0xf40289c4,
+	0x0080230b,
+	0x58e7f085,
+	0x98cb21f4,
+	0x96b08509,
+	0x110bf400,
+	0xb63407f0,
+	0x09d00604,
+	0x8004bd00,
+/* 0x016e: intr_skip_watchdog */
+	0x89e48409,
+	0x0bf40800,
+	0x8897f148,
+	0x0694b606,
+	0xc40099cf,
+	0x0bf4029a,
+	0xc0c7f12c,
+	0x06c4b604,
+	0xf900cccf,
+	0x48e7f1c0,
+	0x53e3f14f,
+	0x00d7f054,
+	0x02b921f5,
+	0x07f1c0fc,
+	0x04b604c0,
+	0x000cd006,
+/* 0x01ae: intr_subintr_skip_fifo */
+	0x07f104bd,
+	0x04b60688,
+	0x0009d006,
+/* 0x01ba: intr_skip_subintr */
+	0x89c404bd,
+	0x070bf420,
+	0xffbfa4f1,
+/* 0x01c4: intr_skip_pause */
+	0xf44089c4,
+	0xa4f1070b,
+/* 0x01ce: intr_skip_user0 */
+	0x07f0ffbf,
+	0x0604b604,
+	0xbd0008d0,
+	0xfe80fc04,
+	0xf0fc0088,
+	0xd0fce0fc,
+	0xb0fcc0fc,
+	0x90fca0fc,
+	0x00fc80fc,
+	0xf80032f4,
+/* 0x01f5: timer */
+	0x1032f401,
+	0xb003f898,
+	0x1cf40086,
+	0x03fe8051,
+	0xb63807f0,
+	0x08d00604,
+	0xf004bd00,
+	0x84b60887,
+	0x0088cf06,
+	0xf40284f0,
+	0x87f0261b,
+	0x0684b634,
+	0xb80088cf,
+	0x0bf406e0,
+	0x06e8b809,
+/* 0x0233: timer_reset */
+	0xf01f1ef4,
+	0x04b63407,
+	0x000ed006,
+	0x0e8004bd,
+/* 0x0241: timer_enable */
+	0x0187f084,
+	0xb63807f0,
+	0x08d00604,
+/* 0x024f: timer_done */
+	0xf404bd00,
+	0x00f81031,
+/* 0x0254: send_proc */
+	0x90f980f9,
+	0x9805e898,
+	0x86f004e9,
+	0x0689b804,
+	0xc42a0bf4,
+	0x88940398,
+	0x1880b604,
+	0x98008ebb,
+	0x8a8000fa,
+	0x018d8000,
+	0x80028c80,
+	0x90b6038b,
+	0x0794f001,
+	0xf404e980,
+/* 0x028e: send_done */
+	0x90fc0231,
+	0x00f880fc,
+/* 0x0294: find */
+	0x87f080f9,
+	0x0131f458,
+/* 0x029c: find_loop */
+	0xb8008a98,
+	0x0bf406ae,
+	0x5880b610,
+	0x021086b1,
+	0xf4f01bf4,
+/* 0x02b2: find_done */
+	0x8eb90132,
+	0xf880fc02,
+/* 0x02b9: send */
+	0x9421f500,
+	0x9701f402,
+/* 0x02c2: recv */
+	0xe89800f8,
+	0x04e99805,
+	0xb80132f4,
+	0x0bf40689,
+	0x0389c43d,
+	0xf00180b6,
+	0xe8800784,
+	0x02ea9805,
+	0x8ffef0f9,
+	0xb9f0f901,
+	0x999402ef,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x030b: recv_done */
+	0xf8f0fc01,
+/* 0x030d: init */
+	0x0817f100,
+	0x0614b601,
+	0xe70011cf,
+	0xb6010911,
+	0x14fe0814,
+	0xe017f100,
+	0x0013f000,
+	0xb61c07f0,
+	0x01d00604,
+	0xf004bd00,
+	0x07f0ff17,
+	0x0604b614,
+	0xbd0001d0,
+	0x0217f004,
+	0x080015f1,
+	0xb61007f0,
+	0x01d00604,
+	0xf104bd00,
+	0xf0010a17,
+	0x10fe0013,
+	0x1031f400,
+	0xf00117f0,
+	0x04b63807,
+	0x0001d006,
+	0xf7f004bd,
+/* 0x0371: init_proc */
+	0x01f19858,
+	0xf40016b0,
+	0x15f9fa0b,
+	0xf458f0b6,
+/* 0x0382: host_send */
+	0x17f1f20e,
+	0x14b604b0,
+	0x0011cf06,
+	0x04a027f1,
+	0xcf0624b6,
+	0x12b80022,
+	0x320bf406,
+	0x94071ec4,
+	0xe0b704ee,
+	0xeb980218,
+	0x02ec9803,
+	0x9801ed98,
+	0x21f500ee,
+	0x10b602b9,
+	0x0f1ec401,
+	0x04b007f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x03cb: host_send_done */
+	0xf8ba0ef4,
+/* 0x03cd: host_recv */
+	0x4917f100,
+	0x5413f14e,
+	0x06e1b852,
+/* 0x03db: host_recv_wait */
+	0xf1aa0bf4,
+	0xb604cc17,
+	0x11cf0614,
+	0xc827f100,
+	0x0624b604,
+	0xf00022cf,
+	0x12b80816,
+	0xe60bf406,
+	0xb60723c4,
+	0x30b70434,
+	0x3b800298,
+	0x023c8003,
+	0x80013d80,
+	0x20b6003e,
+	0x0f24f001,
+	0x04c807f1,
+	0xd00604b6,
+	0x04bd0002,
+	0xf04027f0,
+	0x04b60007,
+	0x0002d006,
+	0x00f804bd,
+/* 0x0430: host_init */
+	0x008017f1,
+	0xf11014b6,
+	0xf1021815,
+	0xb604d007,
+	0x01d00604,
+	0xf104bd00,
+	0xb6008017,
+	0x15f11014,
+	0x07f10298,
+	0x04b604dc,
+	0x0001d006,
+	0x17f004bd,
+	0xc407f101,
+	0x0604b604,
+	0xbd0001d0,
+/* 0x046f: memx_func_enter */
+	0xf000f804,
+	0x07f10467,
+	0x04b607e0,
+	0x0006d006,
+/* 0x047e: memx_func_enter_wait */
+	0x67f104bd,
+	0x64b607c0,
+	0x0066cf06,
+	0xf40464f0,
+	0x1698f30b,
+	0x0410b600,
+/* 0x0496: memx_func_leave */
+	0x67f000f8,
+	0xe407f104,
+	0x0604b607,
+	0xbd0006d0,
+/* 0x04a5: memx_func_leave_wait */
+	0xc067f104,
+	0x0664b607,
+	0xf00066cf,
+	0x1bf40464,
+/* 0x04b7: memx_func_wr32 */
+	0x9800f8f3,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
+	0xe0fcd0fc,
+	0xf13f21f4,
+	0xfd140003,
+	0x05800506,
+	0xb604bd00,
+	0x1bf40242,
+/* 0x04df: memx_func_wait */
+	0xf000f8dd,
+	0x84b62c87,
+	0x0088cf06,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0xf41010b6,
+	0x00f89c21,
+/* 0x04fc: memx_func_delay */
+	0xb6001e98,
+	0x21f40410,
+/* 0x0507: memx_exec */
+	0xf900f87f,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x0511: memx_exec_next */
+	0x00139802,
+	0x950410b6,
+	0x30f01034,
+	0xc835980c,
+	0x12b855f9,
+	0xec1ef406,
+	0xe0fcd0fc,
+	0x02b921f5,
+/* 0x0532: memx_info */
+	0xc7f100f8,
+	0xb7f10354,
+	0x21f50800,
+	0x00f802b9,
+/* 0x0540: memx_recv */
+	0xf401d6b0,
+	0xd6b0c40b,
+	0xe90bf400,
+/* 0x054e: memx_init */
+	0x00f800f8,
+/* 0x0550: perf_recv */
+/* 0x0552: perf_init */
+	0x00f800f8,
+/* 0x0554: test_recv */
+	0x05d817f1,
+	0xcf0614b6,
+	0x10b60011,
+	0xd807f101,
+	0x0604b605,
+	0xbd0001d0,
+	0x00e7f104,
+	0x4fe3f1d9,
+	0xf521f513,
+/* 0x057b: test_init */
+	0xf100f801,
+	0xf50800e7,
+	0xf801f521,
+/* 0x0585: idle_recv */
+/* 0x0587: idle */
+	0xf400f800,
+	0x17f10031,
+	0x14b605d4,
+	0x0011cf06,
+	0xf10110b6,
+	0xb605d407,
+	0x01d00604,
+/* 0x05a3: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x05a9: idle_proc */
+/* 0x05a9: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc02c2,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x05bd: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00bb0ef4,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
new file mode 100644
index 0000000..32d65ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GF119
+
+//#define NVKM_FALCON_PC24
+#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nvd0_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nvd0_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
new file mode 100644
index 0000000..ce65e2a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nvd0_pwr_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x000003be,
+	0x00000367,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x000004c4,
+	0x000004b6,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x000004c8,
+	0x000004c6,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x000004eb,
+	0x000004ca,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x000004f7,
+	0x000004f5,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+	0x00000000,
+/* 0x0214: time_next */
+	0x00000000,
+/* 0x0218: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0298: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0318: memx_func_head */
+	0x00010000,
+	0x00000000,
+	0x000003f4,
+/* 0x0324: memx_func_next */
+	0x00000001,
+	0x00000000,
+	0x00000415,
+	0x00000002,
+	0x00000002,
+	0x00000430,
+	0x00040003,
+	0x00000000,
+	0x00000458,
+	0x00010004,
+	0x00000000,
+	0x00000472,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0b54: memx_data_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nvd0_pwr_code[] = {
+	0x02bf0ef5,
+/* 0x0004: rd32 */
+	0x07a007f1,
+	0xbd000ed0,
+	0x01e7f004,
+	0xf101e3f0,
+	0xd007ac07,
+	0x04bd000e,
+/* 0x001c: rd32_wait */
+	0x07ace7f1,
+	0xf100eecf,
+	0xf47000e4,
+	0xd7f1f51b,
+	0xddcf07a4,
+/* 0x0033: wr32 */
+	0xf100f800,
+	0xd007a007,
+	0x04bd000e,
+	0x07a407f1,
+	0xbd000dd0,
+	0x02e7f004,
+	0xf0f0e5f0,
+	0x07f101e3,
+	0x0ed007ac,
+/* 0x0057: wr32_wait */
+	0xf104bd00,
+	0xcf07ace7,
+	0xe4f100ee,
+	0x1bf47000,
+/* 0x0067: nsec */
+	0xf000f8f5,
+	0x88cf2c87,
+/* 0x006d: nsec_loop */
+	0x2c97f000,
+	0xbb0099cf,
+	0x9eb80298,
+	0xf41ef406,
+/* 0x007e: wait */
+	0x87f000f8,
+	0x0088cf2c,
+/* 0x0084: wait_loop */
+	0xf402eeb9,
+	0xdab90421,
+	0x04adfd02,
+	0xf406acb8,
+	0x97f0120b,
+	0x0099cf2c,
+	0xb80298bb,
+	0x1ef4069b,
+/* 0x00a5: wait_done */
+/* 0x00a7: intr_watchdog */
+	0x9800f8e2,
+	0x96b003e9,
+	0x2a0bf400,
+	0xbb840a98,
+	0x1cf4029a,
+	0x01d7f00f,
+	0x020621f5,
+	0x0ef494bd,
+/* 0x00c5: intr_watchdog_next_time */
+	0x850a9815,
+	0xf400a6b0,
+	0x9ab8090b,
+	0x061cf406,
+/* 0x00d4: intr_watchdog_next_time_set */
+/* 0x00d7: intr_watchdog_next_proc */
+	0x80850980,
+	0xe0b603e9,
+	0x10e6b158,
+	0xc61bf402,
+/* 0x00e6: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0xf7f0f0f9,
+	0x0188fe00,
+	0x87f180f9,
+	0x88cf05d0,
+	0x0180b600,
+	0x05d007f1,
+	0xbd0008d0,
+	0x0887f004,
+	0xc40088cf,
+	0x0bf40289,
+	0x85008020,
+	0xf458e7f0,
+	0x0998a721,
+	0x0096b085,
+	0xf00e0bf4,
+	0x09d03407,
+	0x8004bd00,
+/* 0x013e: intr_skip_watchdog */
+	0x89e48409,
+	0x0bf40800,
+	0x8897f13c,
+	0x0099cf06,
+	0xf4029ac4,
+	0xc7f1260b,
+	0xcccf04c0,
+	0xf1c0f900,
+	0xf14f48e7,
+	0xf05453e3,
+	0x21f500d7,
+	0xc0fc026b,
+	0x04c007f1,
+	0xbd000cd0,
+/* 0x0175: intr_subintr_skip_fifo */
+	0x8807f104,
+	0x0009d006,
+/* 0x017e: intr_skip_subintr */
+	0x89c404bd,
+	0x070bf420,
+	0xffbfa4f1,
+/* 0x0188: intr_skip_pause */
+	0xf44089c4,
+	0xa4f1070b,
+/* 0x0192: intr_skip_user0 */
+	0x07f0ffbf,
+	0x0008d004,
+	0x80fc04bd,
+	0xfc0088fe,
+	0xfce0fcf0,
+	0xfcc0fcd0,
+	0xfca0fcb0,
+	0xfc80fc90,
+	0x0032f400,
+/* 0x01b6: timer */
+	0x32f401f8,
+	0x03f89810,
+	0xf40086b0,
+	0xfe80421c,
+	0x3807f003,
+	0xbd0008d0,
+	0x0887f004,
+	0xf00088cf,
+	0x1bf40284,
+	0x3487f020,
+	0xb80088cf,
+	0x0bf406e0,
+	0x06e8b809,
+/* 0x01eb: timer_reset */
+	0xf0191ef4,
+	0x0ed03407,
+	0x8004bd00,
+/* 0x01f6: timer_enable */
+	0x87f0840e,
+	0x3807f001,
+	0xbd0008d0,
+/* 0x0201: timer_done */
+	0x1031f404,
+/* 0x0206: send_proc */
+	0x80f900f8,
+	0xe89890f9,
+	0x04e99805,
+	0xb80486f0,
+	0x0bf40689,
+	0x0398c42a,
+	0xb6048894,
+	0x8ebb1880,
+	0x00fa9800,
+	0x80008a80,
+	0x8c80018d,
+	0x038b8002,
+	0xf00190b6,
+	0xe9800794,
+	0x0231f404,
+/* 0x0240: send_done */
+	0x80fc90fc,
+/* 0x0246: find */
+	0x80f900f8,
+	0xf45887f0,
+/* 0x024e: find_loop */
+	0x8a980131,
+	0x06aeb800,
+	0xb6100bf4,
+	0x86b15880,
+	0x1bf40210,
+	0x0132f4f0,
+/* 0x0264: find_done */
+	0xfc028eb9,
+/* 0x026b: send */
+	0xf500f880,
+	0xf4024621,
+	0x00f89701,
+/* 0x0274: recv */
+	0x9805e898,
+	0x32f404e9,
+	0x0689b801,
+	0xc43d0bf4,
+	0x80b60389,
+	0x0784f001,
+	0x9805e880,
+	0xf0f902ea,
+	0xf9018ffe,
+	0x02efb9f0,
+	0xbb049994,
+	0xe0b600e9,
+	0x03eb9818,
+	0x9802ec98,
+	0xee9801ed,
+	0xfca5f900,
+	0x00f8fef0,
+	0xfc0131f4,
+/* 0x02bd: recv_done */
+/* 0x02bf: init */
+	0xf100f8f0,
+	0xcf010817,
+	0x11e70011,
+	0x14b60109,
+	0x0014fe08,
+	0x00e017f1,
+	0xf00013f0,
+	0x01d01c07,
+	0xf004bd00,
+	0x07f0ff17,
+	0x0001d014,
+	0x17f004bd,
+	0x0015f102,
+	0x1007f008,
+	0xbd0001d0,
+	0xe617f104,
+	0x0013f000,
+	0xf40010fe,
+	0x17f01031,
+	0x3807f001,
+	0xbd0001d0,
+	0x58f7f004,
+/* 0x0314: init_proc */
+	0xb001f198,
+	0x0bf40016,
+	0xb615f9fa,
+	0x0ef458f0,
+/* 0x0325: host_send */
+	0xb017f1f2,
+	0x0011cf04,
+	0x04a027f1,
+	0xb80022cf,
+	0x0bf40612,
+	0x071ec42f,
+	0xb704ee94,
+	0x980218e0,
+	0xec9803eb,
+	0x01ed9802,
+	0xf500ee98,
+	0xb6026b21,
+	0x1ec40110,
+	0xb007f10f,
+	0x0001d004,
+	0x0ef404bd,
+/* 0x0365: host_send_done */
+/* 0x0367: host_recv */
+	0xf100f8c3,
+	0xf14e4917,
+	0xb8525413,
+	0x0bf406e1,
+/* 0x0375: host_recv_wait */
+	0xcc17f1b3,
+	0x0011cf04,
+	0x04c827f1,
+	0xf00022cf,
+	0x12b80816,
+	0xec0bf406,
+	0xb60723c4,
+	0x30b70434,
+	0x3b800298,
+	0x023c8003,
+	0x80013d80,
+	0x20b6003e,
+	0x0f24f001,
+	0x04c807f1,
+	0xbd0002d0,
+	0x4027f004,
+	0xd00007f0,
+	0x04bd0002,
+/* 0x03be: host_init */
+	0x17f100f8,
+	0x14b60080,
+	0x1815f110,
+	0xd007f102,
+	0x0001d004,
+	0x17f104bd,
+	0x14b60080,
+	0x9815f110,
+	0xdc07f102,
+	0x0001d004,
+	0x17f004bd,
+	0xc407f101,
+	0x0001d004,
+	0x00f804bd,
+/* 0x03f4: memx_func_enter */
+	0xf10467f0,
+	0xd007e007,
+	0x04bd0006,
+/* 0x0400: memx_func_enter_wait */
+	0x07c067f1,
+	0xf00066cf,
+	0x0bf40464,
+	0x001698f6,
+	0xf80410b6,
+/* 0x0415: memx_func_leave */
+	0x0467f000,
+	0x07e407f1,
+	0xbd0006d0,
+/* 0x0421: memx_func_leave_wait */
+	0xc067f104,
+	0x0066cf07,
+	0xf40464f0,
+	0x00f8f61b,
+/* 0x0430: memx_func_wr32 */
+	0x98001698,
+	0x10b60115,
+	0xf960f908,
+	0xfcd0fc50,
+	0x3321f4e0,
+	0x140003f1,
+	0x800506fd,
+	0x04bd0005,
+	0xf40242b6,
+	0x00f8dd1b,
+/* 0x0458: memx_func_wait */
+	0xcf2c87f0,
+	0x1e980088,
+	0x011d9800,
+	0x98021c98,
+	0x10b6031b,
+	0x7e21f410,
+/* 0x0472: memx_func_delay */
+	0x1e9800f8,
+	0x0410b600,
+	0xf86721f4,
+/* 0x047d: memx_exec */
+	0xf9e0f900,
+	0x02c1b9d0,
+/* 0x0487: memx_exec_next */
+	0x9802b2b9,
+	0x10b60013,
+	0x10349504,
+	0x980c30f0,
+	0x55f9c835,
+	0xf40612b8,
+	0xd0fcec1e,
+	0x21f5e0fc,
+	0x00f8026b,
+/* 0x04a8: memx_info */
+	0x0354c7f1,
+	0x0800b7f1,
+	0x026b21f5,
+/* 0x04b6: memx_recv */
+	0xd6b000f8,
+	0xc40bf401,
+	0xf400d6b0,
+	0x00f8e90b,
+/* 0x04c4: memx_init */
+/* 0x04c6: perf_recv */
+	0x00f800f8,
+/* 0x04c8: perf_init */
+/* 0x04ca: test_recv */
+	0x17f100f8,
+	0x11cf05d8,
+	0x0110b600,
+	0x05d807f1,
+	0xbd0001d0,
+	0x00e7f104,
+	0x4fe3f1d9,
+	0xb621f513,
+/* 0x04eb: test_init */
+	0xf100f801,
+	0xf50800e7,
+	0xf801b621,
+/* 0x04f5: idle_recv */
+/* 0x04f7: idle */
+	0xf400f800,
+	0x17f10031,
+	0x11cf05d4,
+	0x0110b600,
+	0x05d407f1,
+	0xbd0001d0,
+/* 0x050d: idle_loop */
+	0x5817f004,
+/* 0x0513: idle_proc */
+/* 0x0513: idle_proc_exec */
+	0xf90232f4,
+	0x021eb910,
+	0x027421f5,
+	0x11f410fc,
+	0x0231f409,
+/* 0x0527: idle_proc_next */
+	0xb6ef0ef4,
+	0x1fb85810,
+	0xe61bf406,
+	0xf4dd02f4,
+	0x0ef40028,
+	0x000000c1,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
new file mode 100644
index 0000000..5fb0ccc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -0,0 +1,27 @@
+#ifndef __NVKM_PWR_OS_H__
+#define __NVKM_PWR_OS_H__
+
+/* Process names */
+#define PROC_KERN 0x52544e49
+#define PROC_IDLE 0x454c4449
+#define PROC_HOST 0x54534f48
+#define PROC_MEMX 0x584d454d
+#define PROC_PERF 0x46524550
+#define PROC_TEST 0x54534554
+
+/* KERN: message identifiers */
+#define KMSG_FIFO   0x00000000
+#define KMSG_ALARM  0x00000001
+
+/* MEMX: message identifiers */
+#define MEMX_MSG_INFO 0
+#define MEMX_MSG_EXEC 1
+
+/* MEMX: script opcode definitions */
+#define MEMX_ENTER  0
+#define MEMX_LEAVE  1
+#define MEMX_WR32   2
+#define MEMX_WAIT   3
+#define MEMX_DELAY  4
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
new file mode 100644
index 0000000..38eadf7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_PERF, #perf_init, #perf_recv)
+#endif
+
+/******************************************************************************
+ * PERF data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * PERF code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+
+// description
+//
+// $r15 - current (perf)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+perf_recv:
+	ret
+
+// description
+//
+// $r15 - current (perf)
+// $r0  - zero
+perf_init:
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
new file mode 100644
index 0000000..0c3a71b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_TEST, #test_init, #test_recv)
+#endif
+
+/******************************************************************************
+ * TEST data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * TEST code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (test)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+test_recv:
+	nv_iord($r1, NV_PPWR_DSCRATCH(2))
+	add b32 $r1 1
+	nv_iowr(NV_PPWR_DSCRATCH(2), $r1)
+	mov $r14 -0x2700 /* 0xd900, envyas grrr! */
+	sethi $r14 0x134f0000
+	call(timer)
+	ret
+
+// description
+//
+// $r15 - current (test)
+// $r0  - zero
+test_init:
+	mov $r14 0x800
+	call(timer)
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
new file mode 100644
index 0000000..03de310
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -0,0 +1,121 @@
+#ifndef __NVKM_PWR_MEMX_H__
+#define __NVKM_PWR_MEMX_H__
+
+#include <subdev/pwr.h>
+#include <subdev/pwr/fuc/os.h>
+
+struct nouveau_memx {
+	struct nouveau_pwr *ppwr;
+	u32 base;
+	u32 size;
+	struct {
+		u32 mthd;
+		u32 size;
+		u32 data[64];
+	} c;
+};
+
+static void
+memx_out(struct nouveau_memx *memx)
+{
+	struct nouveau_pwr *ppwr = memx->ppwr;
+	int i;
+
+	if (memx->c.size) {
+		nv_wr32(ppwr, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+		for (i = 0; i < memx->c.size; i++)
+			nv_wr32(ppwr, 0x10a1c4, memx->c.data[i]);
+		memx->c.size = 0;
+	}
+}
+
+static void
+memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[])
+{
+	if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
+	    (memx->c.size && memx->c.mthd != mthd))
+		memx_out(memx);
+	memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
+	memx->c.size += size;
+	memx->c.mthd  = mthd;
+}
+
+int
+nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
+{
+	struct nouveau_memx *memx;
+	u32 reply[2];
+	int ret;
+
+	ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0);
+	if (ret)
+		return ret;
+
+	memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
+	if (!memx)
+		return -ENOMEM;
+	memx->ppwr = ppwr;
+	memx->base = reply[0];
+	memx->size = reply[1];
+
+	/* acquire data segment access */
+	do {
+		nv_wr32(ppwr, 0x10a580, 0x00000003);
+	} while (nv_rd32(ppwr, 0x10a580) != 0x00000003);
+	nv_wr32(ppwr, 0x10a1c0, 0x01000000 | memx->base);
+	nv_wr32(ppwr, 0x10a1c4, 0x00010000 | MEMX_ENTER);
+	nv_wr32(ppwr, 0x10a1c4, 0x00000000);
+	return 0;
+}
+
+int
+nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
+{
+	struct nouveau_memx *memx = *pmemx;
+	struct nouveau_pwr *ppwr = memx->ppwr;
+	u32 finish, reply[2];
+
+	/* flush the cache... */
+	memx_out(memx);
+
+	/* release data segment access */
+	nv_wr32(ppwr, 0x10a1c4, 0x00000000 | MEMX_LEAVE);
+	finish = nv_rd32(ppwr, 0x10a1c0) & 0x00ffffff;
+	nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+	/* call MEMX process to execute the script, and wait for reply */
+	if (exec) {
+		ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_EXEC,
+				 memx->base, finish);
+	}
+
+	kfree(memx);
+	return 0;
+}
+
+void
+nouveau_memx_wr32(struct nouveau_memx *memx, u32 addr, u32 data)
+{
+	nv_debug(memx->ppwr, "R[%06x] = 0x%08x\n", addr, data);
+	memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
+}
+
+void
+nouveau_memx_wait(struct nouveau_memx *memx,
+		  u32 addr, u32 mask, u32 data, u32 nsec)
+{
+	nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
+				addr, mask, data, nsec);
+	memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec });
+	memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nouveau_memx_nsec(struct nouveau_memx *memx, u32 nsec)
+{
+	nv_debug(memx->ppwr, "    DELAY = %d ns\n", nsec);
+	memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
+	memx_out(memx); /* fuc can't handle multiple */
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
new file mode 100644
index 0000000..52c8541
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nv108.fuc.h"
+
+struct nv108_pwr_priv {
+	struct nouveau_pwr base;
+};
+
+static int
+nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv108_pwr_priv *priv;
+	int ret;
+
+	ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.code.data = nv108_pwr_code;
+	priv->base.code.size = sizeof(nv108_pwr_code);
+	priv->base.data.data = nv108_pwr_data;
+	priv->base.data.size = sizeof(nv108_pwr_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nv108_pwr_oclass = {
+	.handle = NV_SUBDEV(PWR, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv108_pwr_ctor,
+		.dtor = _nouveau_pwr_dtor,
+		.init = _nouveau_pwr_init,
+		.fini = _nouveau_pwr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
new file mode 100644
index 0000000..c132b7c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nva3.fuc.h"
+
+struct nva3_pwr_priv {
+	struct nouveau_pwr base;
+};
+
+static int
+nva3_pwr_init(struct nouveau_object *object)
+{
+	struct nva3_pwr_priv *priv = (void *)object;
+	nv_mask(priv, 0x022210, 0x00000001, 0x00000000);
+	nv_mask(priv, 0x022210, 0x00000001, 0x00000001);
+	return nouveau_pwr_init(&priv->base);
+}
+
+static int
+nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nva3_pwr_priv *priv;
+	int ret;
+
+	ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.code.data = nva3_pwr_code;
+	priv->base.code.size = sizeof(nva3_pwr_code);
+	priv->base.data.data = nva3_pwr_data;
+	priv->base.data.size = sizeof(nva3_pwr_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_pwr_oclass = {
+	.handle = NV_SUBDEV(PWR, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_pwr_ctor,
+		.dtor = _nouveau_pwr_dtor,
+		.init = nva3_pwr_init,
+		.fini = _nouveau_pwr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
new file mode 100644
index 0000000..495f685
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nvc0.fuc.h"
+
+struct nvc0_pwr_priv {
+	struct nouveau_pwr base;
+};
+
+static int
+nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvc0_pwr_priv *priv;
+	int ret;
+
+	ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.code.data = nvc0_pwr_code;
+	priv->base.code.size = sizeof(nvc0_pwr_code);
+	priv->base.data.data = nvc0_pwr_data;
+	priv->base.data.size = sizeof(nvc0_pwr_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_pwr_oclass = {
+	.handle = NV_SUBDEV(PWR, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_pwr_ctor,
+		.dtor = _nouveau_pwr_dtor,
+		.init = _nouveau_pwr_init,
+		.fini = _nouveau_pwr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
new file mode 100644
index 0000000..043aa14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nvd0.fuc.h"
+
+struct nvd0_pwr_priv {
+	struct nouveau_pwr base;
+};
+
+static int
+nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvd0_pwr_priv *priv;
+	int ret;
+
+	ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.code.data = nvd0_pwr_code;
+	priv->base.code.size = sizeof(nvd0_pwr_code);
+	priv->base.data.data = nvd0_pwr_data;
+	priv->base.data.size = sizeof(nvd0_pwr_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_pwr_oclass = {
+	.handle = NV_SUBDEV(PWR, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_pwr_ctor,
+		.dtor = _nouveau_pwr_dtor,
+		.init = _nouveau_pwr_init,
+		.fini = _nouveau_pwr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index f1de7a9..80e584a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -92,10 +92,11 @@
 	struct nouveau_timer *ptimer = nouveau_timer(therm);
 	struct nouveau_therm_priv *priv = (void *)therm;
 	unsigned long flags;
-	int duty;
+	bool immd = true;
+	bool poll = true;
+	int duty = -1;
 
 	spin_lock_irqsave(&priv->lock, flags);
-	nv_debug(therm, "FAN speed check\n");
 	if (mode < 0)
 		mode = priv->mode;
 	priv->mode = mode;
@@ -106,28 +107,49 @@
 		duty = nouveau_therm_fan_get(therm);
 		if (duty < 0)
 			duty = 100;
+		poll = false;
 		break;
 	case NOUVEAU_THERM_CTRL_AUTO:
-		if (priv->fan->bios.nr_fan_trip)
+		if (priv->fan->bios.nr_fan_trip) {
 			duty = nouveau_therm_update_trip(therm);
-		else
+		} else
+		if (priv->fan->bios.linear_min_temp ||
+		    priv->fan->bios.linear_max_temp) {
 			duty = nouveau_therm_update_linear(therm);
+		} else {
+			if (priv->cstate)
+				duty = priv->cstate;
+			poll = false;
+		}
+		immd = false;
 		break;
 	case NOUVEAU_THERM_CTRL_NONE:
 	default:
 		ptimer->alarm_cancel(ptimer, &priv->alarm);
-		goto done;
+		poll = false;
 	}
 
-	nv_debug(therm, "FAN target request: %d%%\n", duty);
-	nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
-
-done:
-	if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
+	if (list_empty(&priv->alarm.head) && poll)
 		ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
-	else if (!list_empty(&priv->alarm.head))
-		nv_debug(therm, "therm fan alarm list is not empty\n");
 	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (duty >= 0) {
+		nv_debug(therm, "FAN target request: %d%%\n", duty);
+		nouveau_therm_fan_set(therm, immd, duty);
+	}
+}
+
+int
+nouveau_therm_cstate(struct nouveau_therm *ptherm, int fan, int dir)
+{
+	struct nouveau_therm_priv *priv = (void *)ptherm;
+	if (!dir || (dir < 0 && fan < priv->cstate) ||
+		    (dir > 0 && fan > priv->cstate)) {
+		nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
+		priv->cstate = fan;
+		nouveau_therm_update(ptherm, -1);
+	}
+	return 0;
 }
 
 static void
@@ -149,14 +171,15 @@
 		"automatic"
 	};
 
-	/* The default PDAEMON ucode interferes with fan management */
+	/* The default PPWR ucode on fermi interferes with fan management */
 	if ((mode >= ARRAY_SIZE(name)) ||
-	    (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
+	    (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
+	     !nouveau_subdev(device, NVDEV_SUBDEV_PWR)))
 		return -EINVAL;
 
 	/* do not allow automatic fan management if the thermal sensor is
 	 * not available */
-	if (priv->mode == 2 && therm->temp_get(therm) < 0)
+	if (priv->mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
 		return -EINVAL;
 
 	if (priv->mode == mode)
@@ -335,7 +358,7 @@
 	nouveau_therm_ic_ctor(therm);
 	nouveau_therm_fan_ctor(therm);
 
-	nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+	nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
 	nouveau_therm_sensor_preinit(therm);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 39f47b9..95f6129 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -185,8 +185,11 @@
 	priv->fan->bios.max_duty = 100;
 	priv->fan->bios.bump_period = 500;
 	priv->fan->bios.slow_down_period = 2000;
+/*XXX: talk to mupuf */
+#if 0
 	priv->fan->bios.linear_min_temp = 40;
 	priv->fan->bios.linear_max_temp = 85;
+#endif
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
index e601773..f69dab1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -97,6 +97,13 @@
 {
 	struct nouveau_therm_priv *tpriv = (void *)therm;
 	struct nouveau_fantog_priv *priv;
+	int ret;
+
+	if (therm->pwm_ctrl) {
+		ret = therm->pwm_ctrl(therm, func->line, false);
+		if (ret)
+			return ret;
+	}
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	tpriv->fan = &priv->base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 8b3adec..13b8500 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -55,28 +55,28 @@
 	return true;
 }
 
-static struct i2c_board_info
+static struct nouveau_i2c_board_info
 nv_board_infos[] = {
-	{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
-	{ I2C_BOARD_INFO("w83781d", 0x2d) },
-	{ I2C_BOARD_INFO("adt7473", 0x2e) },
-	{ I2C_BOARD_INFO("adt7473", 0x2d) },
-	{ I2C_BOARD_INFO("adt7473", 0x2c) },
-	{ I2C_BOARD_INFO("f75375", 0x2e) },
-	{ I2C_BOARD_INFO("lm99", 0x4c) },
-	{ I2C_BOARD_INFO("lm90", 0x4c) },
-	{ I2C_BOARD_INFO("lm90", 0x4d) },
-	{ I2C_BOARD_INFO("adm1021", 0x18) },
-	{ I2C_BOARD_INFO("adm1021", 0x19) },
-	{ I2C_BOARD_INFO("adm1021", 0x1a) },
-	{ I2C_BOARD_INFO("adm1021", 0x29) },
-	{ I2C_BOARD_INFO("adm1021", 0x2a) },
-	{ I2C_BOARD_INFO("adm1021", 0x2b) },
-	{ I2C_BOARD_INFO("adm1021", 0x4c) },
-	{ I2C_BOARD_INFO("adm1021", 0x4d) },
-	{ I2C_BOARD_INFO("adm1021", 0x4e) },
-	{ I2C_BOARD_INFO("lm63", 0x18) },
-	{ I2C_BOARD_INFO("lm63", 0x4e) },
+	{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
+	{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0  },
+	{ { I2C_BOARD_INFO("adt7473", 0x2e) }, 20  },
+	{ { I2C_BOARD_INFO("adt7473", 0x2d) }, 20  },
+	{ { I2C_BOARD_INFO("adt7473", 0x2c) }, 20  },
+	{ { I2C_BOARD_INFO("f75375", 0x2e) }, 0  },
+	{ { I2C_BOARD_INFO("lm99", 0x4c) }, 0  },
+	{ { I2C_BOARD_INFO("lm90", 0x4c) }, 0  },
+	{ { I2C_BOARD_INFO("lm90", 0x4d) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x18) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x19) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x1a) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x29) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x2a) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x2b) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x4c) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x4d) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x4e) }, 0  },
+	{ { I2C_BOARD_INFO("lm63", 0x18) }, 0  },
+	{ { I2C_BOARD_INFO("lm63", 0x4e) }, 0  },
 	{ }
 };
 
@@ -89,9 +89,9 @@
 	struct nvbios_extdev_func extdev_entry;
 
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
-		struct i2c_board_info board[] = {
-			{ I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) },
-			{ }
+		struct nouveau_i2c_board_info board[] = {
+		  { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
+		  { }
 		};
 
 		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
@@ -101,9 +101,9 @@
 	}
 
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
-		struct i2c_board_info board[] = {
-			{ I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) },
-			{ }
+		struct nouveau_i2c_board_info board[] = {
+		  { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
+		  { }
 		};
 
 		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
index 42ba633..1d15c52 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -126,7 +126,7 @@
 
 	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
 
-	intr = nv_rd32(therm, 0x20100);
+	intr = nv_rd32(therm, 0x20100) & 0x3ff;
 
 	/* THRS_4: downclock */
 	if (intr & 0x002) {
@@ -209,6 +209,19 @@
 	return nouveau_therm_preinit(&priv->base.base);
 }
 
+int
+nv84_therm_fini(struct nouveau_object *object, bool suspend)
+{
+	/* Disable PTherm IRQs */
+	nv_wr32(object, 0x20000, 0x00000000);
+
+	/* ACK all PTherm IRQs */
+	nv_wr32(object, 0x20100, 0xffffffff);
+	nv_wr32(object, 0x1100, 0x10000); /* PBUS */
+
+	return _nouveau_therm_fini(object, suspend);
+}
+
 struct nouveau_oclass
 nv84_therm_oclass = {
 	.handle = NV_SUBDEV(THERM, 0x84),
@@ -216,6 +229,6 @@
 		.ctor = nv84_therm_ctor,
 		.dtor = _nouveau_therm_dtor,
 		.init = _nouveau_therm_init,
-		.fini = _nouveau_therm_fini,
+		.fini = nv84_therm_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index d11a7c4..3b2c458 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -94,6 +94,6 @@
 		.ctor = nva3_therm_ctor,
 		.dtor = _nouveau_therm_dtor,
 		.init = nva3_therm_init,
-		.fini = _nouveau_therm_fini,
+		.fini = nv84_therm_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 54c28bd..4dd4f81 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -148,6 +148,6 @@
 		.ctor = nvd0_therm_ctor,
 		.dtor = _nouveau_therm_dtor,
 		.init = nvd0_therm_init,
-		.fini = _nouveau_therm_fini,
+		.fini = nv84_therm_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index dd38529..96f8f95 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -76,6 +76,7 @@
 	spinlock_t lock;
 	struct nouveau_therm_trip_point *last_trip;
 	int mode;
+	int cstate;
 	int suspend;
 
 	/* bios */
@@ -144,6 +145,7 @@
 int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
 int nv50_fan_pwm_clock(struct nouveau_therm *);
 int nv84_temp_get(struct nouveau_therm *therm);
+int nv84_therm_fini(struct nouveau_object *object, bool suspend);
 
 int nva3_therm_fan_sense(struct nouveau_therm *);
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index b80a330..cfde9eb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -180,8 +180,6 @@
 
 	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
 
-	nv_debug(therm, "polling the internal temperature\n");
-
 	nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
 					     NOUVEAU_THERM_THRS_FANBOOST);
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 57711ec..c0bdd10 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -119,16 +119,8 @@
 {
 	struct nv04_timer_priv *priv = (void *)ptimer;
 	unsigned long flags;
-
-	/* avoid deleting an entry while the alarm intr is running */
 	spin_lock_irqsave(&priv->lock, flags);
-
-	/* delete the alarm from the list */
-	list_del(&alarm->head);
-
-	/* reset the head so as list_empty returns 1 */
-	INIT_LIST_HEAD(&alarm->head);
-
+	list_del_init(&alarm->head);
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
new file mode 100644
index 0000000..32794a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/vmap.h>
+#include <subdev/bios/volt.h>
+
+static int
+nouveau_volt_get(struct nouveau_volt *volt)
+{
+	if (volt->vid_get) {
+		int ret = volt->vid_get(volt), i;
+		if (ret >= 0) {
+			for (i = 0; i < volt->vid_nr; i++) {
+				if (volt->vid[i].vid == ret)
+					return volt->vid[i].uv;
+			}
+			ret = -EINVAL;
+		}
+		return ret;
+	}
+	return -ENODEV;
+}
+
+static int
+nouveau_volt_set(struct nouveau_volt *volt, u32 uv)
+{
+	if (volt->vid_set) {
+		int i, ret = -EINVAL;
+		for (i = 0; i < volt->vid_nr; i++) {
+			if (volt->vid[i].uv == uv) {
+				ret = volt->vid_set(volt, volt->vid[i].vid);
+				nv_debug(volt, "set %duv: %d\n", uv, ret);
+				break;
+			}
+		}
+		return ret;
+	}
+	return -ENODEV;
+}
+
+static int
+nouveau_volt_map(struct nouveau_volt *volt, u8 id)
+{
+	struct nouveau_bios *bios = nouveau_bios(volt);
+	struct nvbios_vmap_entry info;
+	u8  ver, len;
+	u16 vmap;
+
+	vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+	if (vmap) {
+		if (info.link != 0xff) {
+			int ret = nouveau_volt_map(volt, info.link);
+			if (ret < 0)
+				return ret;
+			info.min += ret;
+		}
+		return info.min;
+	}
+
+	return id ? id * 10000 : -ENODEV;
+}
+
+static int
+nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
+{
+	int ret = nouveau_volt_map(volt, id);
+	if (ret >= 0) {
+		int prev = nouveau_volt_get(volt);
+		if (!condition || prev < 0 ||
+		    (condition < 0 && ret < prev) ||
+		    (condition > 0 && ret > prev)) {
+			ret = nouveau_volt_set(volt, ret);
+		} else {
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+int
+_nouveau_volt_init(struct nouveau_object *object)
+{
+	struct nouveau_volt *volt = (void *)object;
+	int ret;
+
+	ret = nouveau_subdev_init(&volt->base);
+	if (ret)
+		return ret;
+
+	ret = volt->get(volt);
+	if (ret < 0) {
+		if (ret != -ENODEV)
+			nv_debug(volt, "current voltage unknown\n");
+		return 0;
+	}
+
+	nv_info(volt, "GPU voltage: %duv\n", ret);
+	return 0;
+}
+
+void
+_nouveau_volt_dtor(struct nouveau_object *object)
+{
+	struct nouveau_volt *volt = (void *)object;
+	nouveau_subdev_destroy(&volt->base);
+}
+
+int
+nouveau_volt_create_(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_bios *bios = nouveau_bios(parent);
+	struct nouveau_volt *volt;
+	struct nvbios_volt_entry ivid;
+	struct nvbios_volt info;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+	int ret, i;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
+				     "voltage", length, pobject);
+	volt = *pobject;
+	if (ret)
+		return ret;
+
+	volt->get = nouveau_volt_get;
+	volt->set = nouveau_volt_set;
+	volt->set_id = nouveau_volt_set_id;
+
+	data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
+	if (data && info.vidmask && info.base && info.step) {
+		for (i = 0; i < info.vidmask + 1; i++) {
+			if (info.base >= info.min &&
+			    info.base <= info.max) {
+				volt->vid[volt->vid_nr].uv = info.base;
+				volt->vid[volt->vid_nr].vid = i;
+				volt->vid_nr++;
+			}
+			info.base += info.step;
+		}
+		volt->vid_mask = info.vidmask;
+	} else
+	if (data && info.vidmask) {
+		for (i = 0; i < cnt; i++) {
+			data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
+						      &ivid);
+			if (data) {
+				volt->vid[volt->vid_nr].uv = ivid.voltage;
+				volt->vid[volt->vid_nr].vid = ivid.vid;
+				volt->vid_nr++;
+			}
+		}
+		volt->vid_mask = info.vidmask;
+	}
+
+	if (volt->vid_nr) {
+		for (i = 0; i < volt->vid_nr; i++) {
+			nv_debug(volt, "VID %02x: %duv\n",
+				 volt->vid[i].vid, volt->vid[i].uv);
+		}
+
+		/*XXX: this is an assumption.. there probably exists boards
+		 * out there with i2c-connected voltage controllers too..
+		 */
+		ret = nouveau_voltgpio_init(volt);
+		if (ret == 0) {
+			volt->vid_get = nouveau_voltgpio_get;
+			volt->vid_set = nouveau_voltgpio_set;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
new file mode 100644
index 0000000..755fa91
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+#include <subdev/gpio.h>
+#include <subdev/bios/gpio.h>
+
+static const u8 tags[] = {
+	DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
+	DCB_GPIO_VID4, DCB_GPIO_VID5, DCB_GPIO_VID6, DCB_GPIO_VID7,
+};
+
+int
+nouveau_voltgpio_get(struct nouveau_volt *volt)
+{
+	struct nouveau_gpio *gpio = nouveau_gpio(volt);
+	u8 vid = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tags); i++) {
+		if (volt->vid_mask & (1 << i)) {
+			int ret = gpio->get(gpio, 0, tags[i], 0xff);
+			if (ret < 0)
+				return ret;
+			vid |= ret << i;
+		}
+	}
+
+	return vid;
+}
+
+int
+nouveau_voltgpio_set(struct nouveau_volt *volt, u8 vid)
+{
+	struct nouveau_gpio *gpio = nouveau_gpio(volt);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
+		if (volt->vid_mask & (1 << i)) {
+			int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+int
+nouveau_voltgpio_init(struct nouveau_volt *volt)
+{
+	struct nouveau_gpio *gpio = nouveau_gpio(volt);
+	struct dcb_gpio_func func;
+	int i;
+
+	/* check we have gpio function info for each vid bit.  on some
+	 * boards (ie. nvs295) the vid mask has more bits than there
+	 * are valid gpio functions... from traces, nvidia appear to
+	 * just touch the existing ones, so let's mask off the invalid
+	 * bits and continue with life
+	 */
+	for (i = 0; i < ARRAY_SIZE(tags); i++) {
+		if (volt->vid_mask & (1 << i)) {
+			int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
+			if (ret) {
+				if (ret != -ENOENT)
+					return ret;
+				nv_debug(volt, "VID bit %d has no GPIO\n", i);
+				volt->vid_mask &= ~(1 << i);
+			}
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
new file mode 100644
index 0000000..87d5358
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+
+struct nv40_volt_priv {
+	struct nouveau_volt base;
+};
+
+static int
+nv40_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv40_volt_priv *priv;
+	int ret;
+
+	ret = nouveau_volt_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_volt_oclass = {
+	.handle = NV_SUBDEV(VOLT, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_volt_ctor,
+		.dtor = _nouveau_volt_dtor,
+		.init = _nouveau_volt_init,
+		.fini = _nouveau_volt_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
index ea3f5b8..424a489 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Makefile
+++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -5,6 +5,7 @@
 nouveau-y += dispnv04/dfp.o
 nouveau-y += dispnv04/disp.o
 nouveau-y += dispnv04/hw.o
+nouveau-y += dispnv04/overlay.o
 nouveau-y += dispnv04/tvmodesnv17.o
 nouveau-y += dispnv04/tvnv04.o
 nouveau-y += dispnv04/tvnv17.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2e70462..2a15b98 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -210,8 +210,8 @@
 	sim_data.nvclk_khz = NVClk;
 	sim_data.bpp = bpp;
 	sim_data.two_heads = nv_two_heads(dev);
-	if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
-	    (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+	if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+	    (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
 		uint32_t type;
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
@@ -256,8 +256,8 @@
 
 	if (nv_device(drm->device)->card_type < NV_20)
 		nv04_update_arb(dev, vclk, bpp, burst, lwm);
-	else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
-		 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+	else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+		 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
 		*burst = 128;
 		*lwm = 0x0480;
 	} else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index d4fbf11..0e3270c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -326,8 +326,6 @@
 			regp->MiscOutReg = 0x23;	/* +hsync +vsync */
 	}
 
-	regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
-
 	/*
 	 * Time Sequencer
 	 */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 93dd23ff..936a71c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -490,10 +490,10 @@
 	/* BIOS scripts usually take care of the backlight, thanks
 	 * Apple for your consistency.
 	 */
-	if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
-	    dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
+	if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
+	    dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
 		if (mode == DRM_MODE_DPMS_ON) {
-			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
 			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
 		} else {
 			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
@@ -625,13 +625,15 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
 	struct nouveau_i2c_port *port = i2c->find(i2c, 2);
-	struct i2c_board_info info[] = {
+	struct nouveau_i2c_board_info info[] = {
 		{
-			.type = "sil164",
-			.addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
-			.platform_data = &(struct sil164_encoder_params) {
-				SIL164_INPUT_EDGE_RISING
-			}
+		    {
+		        .type = "sil164",
+		        .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
+		        .platform_data = &(struct sil164_encoder_params) {
+		            SIL164_INPUT_EDGE_RISING
+		         }
+		    }, 0
 		},
 		{ }
 	};
@@ -646,7 +648,7 @@
 		return;
 
 	drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-			     &port->adapter, &info[type]);
+			     &port->adapter, &info[type].dev);
 }
 
 static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4908d3f..b13ff0f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -140,6 +140,8 @@
 		func->save(encoder);
 	}
 
+	nouveau_overlay_init(dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 9928187..56a28db 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -123,11 +123,14 @@
 /* nv17_tv.c */
 int nv17_tv_create(struct drm_connector *, struct dcb_output *);
 
+/* overlay.c */
+void nouveau_overlay_init(struct drm_device *dev);
+
 static inline bool
 nv_two_heads(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	const int impl = dev->pci_device & 0x0ff0;
+	const int impl = dev->pdev->device & 0x0ff0;
 
 	if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
 	    impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -139,14 +142,14 @@
 static inline bool
 nv_gf4_disp_arch(struct drm_device *dev)
 {
-	return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+	return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
 }
 
 static inline bool
 nv_two_reg_pll(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	const int impl = dev->pci_device & 0x0ff0;
+	const int impl = dev->pdev->device & 0x0ff0;
 
 	if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
 		return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 973056b..aca76af 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -27,6 +27,7 @@
 #include "hw.h"
 
 #include <subdev/bios/pll.h>
+#include <subdev/fb.h>
 #include <subdev/clock.h>
 #include <subdev/timer.h>
 
@@ -220,7 +221,7 @@
 	int ret;
 
 	if (plltype == PLL_MEMORY &&
-	    (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+	    (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
 		uint32_t mpllP;
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -230,7 +231,7 @@
 		return 400000 / mpllP;
 	} else
 	if (plltype == PLL_MEMORY &&
-	    (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+	    (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
 		uint32_t clock;
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
@@ -664,6 +665,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_device *device = nv_device(drm->device);
 	struct nouveau_timer *ptimer = nouveau_timer(device);
+	struct nouveau_fb *pfb = nouveau_fb(device);
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	uint32_t reg900;
 	int i;
@@ -680,10 +682,10 @@
 		nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
 		nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
 		nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
-		nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1);
-		nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1);
-		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1);
-		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1);
+		nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1);
+		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1);
+		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1);
 		nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
 
 		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -740,7 +742,7 @@
 	}
 	/* NV11 and NV20 stop at 0x52. */
 	if (nv_gf4_disp_arch(dev)) {
-		if (nv_device(drm->device)->card_type == NV_10) {
+		if (nv_device(drm->device)->card_type < NV_20) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
 			nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
new file mode 100644
index 0000000..3618ac6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Implementation based on the pre-KMS implementation in xf86-video-nouveau,
+ * written by Arthur Huillet.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+
+#include "nouveau_drm.h"
+
+#include "nouveau_bo.h"
+#include "nouveau_connector.h"
+#include "nouveau_display.h"
+#include "nvreg.h"
+
+
+struct nouveau_plane {
+	struct drm_plane base;
+	bool flip;
+	struct nouveau_bo *cur;
+
+	struct {
+		struct drm_property *colorkey;
+		struct drm_property *contrast;
+		struct drm_property *brightness;
+		struct drm_property *hue;
+		struct drm_property *saturation;
+		struct drm_property *iturbt_709;
+	} props;
+
+	int colorkey;
+	int contrast;
+	int brightness;
+	int hue;
+	int saturation;
+	int iturbt_709;
+};
+
+static uint32_t formats[] = {
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_UYVY,
+};
+
+/* Sine can be approximated with
+ * http://en.wikipedia.org/wiki/Bhaskara_I's_sine_approximation_formula
+ * sin(x degrees) ~= 4 x (180 - x) / (40500 - x (180 - x) )
+ * Note that this only works for the range [0, 180].
+ * Also note that sin(x) == -sin(x - 180)
+ */
+static inline int
+sin_mul(int degrees, int factor)
+{
+	if (degrees > 180) {
+		degrees -= 180;
+		factor *= -1;
+	}
+	return factor * 4 * degrees * (180 - degrees) /
+		(40500 - degrees * (180 - degrees));
+}
+
+/* cos(x) = sin(x + 90) */
+static inline int
+cos_mul(int degrees, int factor)
+{
+	return sin_mul((degrees + 90) % 360, factor);
+}
+
+static int
+nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+		  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		  unsigned int crtc_w, unsigned int crtc_h,
+		  uint32_t src_x, uint32_t src_y,
+		  uint32_t src_w, uint32_t src_h)
+{
+	struct nouveau_device *dev = nouveau_dev(plane->dev);
+	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_bo *cur = nv_plane->cur;
+	bool flip = nv_plane->flip;
+	int format = ALIGN(src_w * 4, 0x100);
+	int soff = NV_PCRTC0_SIZE * nv_crtc->index;
+	int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
+	int ret;
+
+	if (format > 0xffff)
+		return -EINVAL;
+
+	ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	nv_plane->cur = nv_fb->nvbo;
+
+	/* Source parameters given in 16.16 fixed point, ignore fractional. */
+	src_x = src_x >> 16;
+	src_y = src_y >> 16;
+	src_w = src_w >> 16;
+	src_h = src_h >> 16;
+
+	nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
+	nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
+
+	nv_wr32(dev, NV_PVIDEO_BASE(flip), 0);
+	nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+	nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
+	nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
+	nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
+	nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
+	nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
+	nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
+
+	if (fb->pixel_format == DRM_FORMAT_NV12) {
+		format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
+		format |= NV_PVIDEO_FORMAT_PLANAR;
+	}
+	if (nv_plane->iturbt_709)
+		format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
+	if (nv_plane->colorkey & (1 << 24))
+		format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
+
+	if (fb->pixel_format == DRM_FORMAT_NV12) {
+		nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
+		nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
+			nv_fb->nvbo->bo.offset + fb->offsets[1]);
+	}
+	nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
+	nv_wr32(dev, NV_PVIDEO_STOP, 0);
+	/* TODO: wait for vblank? */
+	nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
+	nv_plane->flip = !flip;
+
+	if (cur)
+		nouveau_bo_unpin(cur);
+
+	return 0;
+}
+
+static int
+nv10_disable_plane(struct drm_plane *plane)
+{
+	struct nouveau_device *dev = nouveau_dev(plane->dev);
+	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+	nv_wr32(dev, NV_PVIDEO_STOP, 1);
+	if (nv_plane->cur) {
+		nouveau_bo_unpin(nv_plane->cur);
+		nv_plane->cur = NULL;
+	}
+
+	return 0;
+}
+
+static void
+nv10_destroy_plane(struct drm_plane *plane)
+{
+	nv10_disable_plane(plane);
+	drm_plane_cleanup(plane);
+	kfree(plane);
+}
+
+static void
+nv10_set_params(struct nouveau_plane *plane)
+{
+	struct nouveau_device *dev = nouveau_dev(plane->base.dev);
+	u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
+	u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
+		(cos_mul(plane->hue, plane->saturation) & 0xffff);
+	u32 format = 0;
+
+	nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
+	nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
+	nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
+	nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
+	nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
+
+	if (plane->cur) {
+		if (plane->iturbt_709)
+			format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
+		if (plane->colorkey & (1 << 24))
+			format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
+		nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
+			NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
+			NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
+			format);
+	}
+}
+
+static int
+nv10_set_property(struct drm_plane *plane,
+		  struct drm_property *property,
+		  uint64_t value)
+{
+	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+	if (property == nv_plane->props.colorkey)
+		nv_plane->colorkey = value;
+	else if (property == nv_plane->props.contrast)
+		nv_plane->contrast = value;
+	else if (property == nv_plane->props.brightness)
+		nv_plane->brightness = value;
+	else if (property == nv_plane->props.hue)
+		nv_plane->hue = value;
+	else if (property == nv_plane->props.saturation)
+		nv_plane->saturation = value;
+	else if (property == nv_plane->props.iturbt_709)
+		nv_plane->iturbt_709 = value;
+	else
+		return -EINVAL;
+
+	nv10_set_params(nv_plane);
+	return 0;
+}
+
+static const struct drm_plane_funcs nv10_plane_funcs = {
+	.update_plane = nv10_update_plane,
+	.disable_plane = nv10_disable_plane,
+	.set_property = nv10_set_property,
+	.destroy = nv10_destroy_plane,
+};
+
+static void
+nv10_overlay_init(struct drm_device *device)
+{
+	struct nouveau_device *dev = nouveau_dev(device);
+	struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+	int ret;
+
+	if (!plane)
+		return;
+
+	ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
+			     &nv10_plane_funcs,
+			     formats, ARRAY_SIZE(formats), false);
+	if (ret)
+		goto err;
+
+	/* Set up the plane properties */
+	plane->props.colorkey = drm_property_create_range(
+			device, 0, "colorkey", 0, 0x01ffffff);
+	plane->props.contrast = drm_property_create_range(
+			device, 0, "contrast", 0, 8192 - 1);
+	plane->props.brightness = drm_property_create_range(
+			device, 0, "brightness", 0, 1024);
+	plane->props.hue = drm_property_create_range(
+			device, 0, "hue", 0, 359);
+	plane->props.saturation = drm_property_create_range(
+			device, 0, "saturation", 0, 8192 - 1);
+	plane->props.iturbt_709 = drm_property_create_range(
+			device, 0, "iturbt_709", 0, 1);
+	if (!plane->props.colorkey ||
+	    !plane->props.contrast ||
+	    !plane->props.brightness ||
+	    !plane->props.hue ||
+	    !plane->props.saturation ||
+	    !plane->props.iturbt_709)
+		goto cleanup;
+
+	plane->colorkey = 0;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.colorkey, plane->colorkey);
+
+	plane->contrast = 0x1000;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.contrast, plane->contrast);
+
+	plane->brightness = 512;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.brightness, plane->brightness);
+
+	plane->hue = 0;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.hue, plane->hue);
+
+	plane->saturation = 0x1000;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.saturation, plane->saturation);
+
+	plane->iturbt_709 = 0;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.iturbt_709, plane->iturbt_709);
+
+	nv10_set_params(plane);
+	nv_wr32(dev, NV_PVIDEO_STOP, 1);
+	return;
+cleanup:
+	drm_plane_cleanup(&plane->base);
+err:
+	kfree(plane);
+	nv_error(dev, "Failed to create plane\n");
+}
+
+void
+nouveau_overlay_init(struct drm_device *device)
+{
+	struct nouveau_device *dev = nouveau_dev(device);
+	if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
+		nv10_overlay_init(device);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index bf13db4..cc4b208 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -37,15 +37,18 @@
 
 #include <subdev/i2c.h>
 
-static struct i2c_board_info nv04_tv_encoder_info[] = {
+static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
 	{
-		I2C_BOARD_INFO("ch7006", 0x75),
-		.platform_data = &(struct ch7006_encoder_params) {
-			CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
-			0, 0, 0,
-			CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
-			CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
-		}
+		{
+			I2C_BOARD_INFO("ch7006", 0x75),
+			.platform_data = &(struct ch7006_encoder_params) {
+				CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
+				0, 0, 0,
+				CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
+				CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
+			}
+		},
+		0
 	},
 	{ }
 };
@@ -229,7 +232,8 @@
 
 	/* Run the slave-specific initialization */
 	ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-				   &port->adapter, &nv04_tv_encoder_info[type]);
+				   &port->adapter,
+				   &nv04_tv_encoder_info[type].dev);
 	if (ret < 0)
 		goto fail_cleanup;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 8f467e7..6828d81 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -87,6 +87,7 @@
 	case NV_04:
 		return 0x006e;
 	case NV_10:
+	case NV_11:
 	case NV_20:
 	case NV_30:
 	case NV_40:
@@ -130,7 +131,7 @@
 	if (chan->ntfy) {
 		nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
 		nouveau_bo_unpin(chan->ntfy);
-		drm_gem_object_unreference_unlocked(chan->ntfy->gem);
+		drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
 	}
 
 	if (chan->heap.block_size)
@@ -178,10 +179,10 @@
 		getparam->value = device->chipset;
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
-		getparam->value = dev->pci_vendor;
+		getparam->value = dev->pdev->vendor;
 		break;
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
-		getparam->value = dev->pci_device;
+		getparam->value = dev->pdev->device;
 		break;
 	case NOUVEAU_GETPARAM_BUS_TYPE:
 		if (drm_pci_device_is_agp(dev))
@@ -297,7 +298,7 @@
 	else
 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
 
-	if (device->card_type < NV_C0) {
+	if (device->card_type < NV_10) {
 		init->subchan[0].handle = 0x00000000;
 		init->subchan[0].grclass = 0x0000;
 		init->subchan[1].handle = NvSw;
@@ -320,7 +321,7 @@
 			goto done;
 	}
 
-	ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
+	ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
 				    &init->notifier_handle);
 	if (ret)
 		goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dd7d2e1..07273a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -253,18 +253,15 @@
 
 static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
 {
-	acpi_handle dhandle, nvidia_handle;
-	acpi_status status;
+	acpi_handle dhandle;
 	int retval = 0;
 
 	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
 	if (!dhandle)
 		return false;
 
-	status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
-	if (ACPI_FAILURE(status)) {
+	if (!acpi_has_method(dhandle, "_DSM"))
 		return false;
-	}
 
 	if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
 		retval |= NOUVEAU_DSM_HAS_MUX;
@@ -317,6 +314,16 @@
 			has_optimus = 1;
 	}
 
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
+		vga_count++;
+
+		retval = nouveau_dsm_pci_probe(pdev);
+		if (retval & NOUVEAU_DSM_HAS_MUX)
+			has_dsm |= 1;
+		if (retval & NOUVEAU_DSM_HAS_OPT)
+			has_optimus = 1;
+	}
+
 	/* find the optimus DSM or the old v1 DSM */
 	if (has_optimus == 1) {
 		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 6e7a55f..2953c4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -11,10 +11,28 @@
 static int nouveau_agpmode = -1;
 module_param_named(agpmode, nouveau_agpmode, int, 0400);
 
+struct nouveau_agpmode_quirk {
+	u16 hostbridge_vendor;
+	u16 hostbridge_device;
+	u16 chip_vendor;
+	u16 chip_device;
+	int mode;
+};
+
+static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
+	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+
+	{},
+};
+
 static unsigned long
-get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
+get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
 {
 	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
+	int agpmode = nouveau_agpmode;
+	unsigned long mode = info->mode;
 
 	/*
 	 * FW seems to be broken on nv18, it makes the card lock up
@@ -24,11 +42,27 @@
 		mode &= ~PCI_AGP_COMMAND_FW;
 
 	/*
+	 * Go through the quirks list and adjust the agpmode accordingly.
+	 */
+	while (agpmode == -1 && quirk->hostbridge_vendor) {
+		if (info->id_vendor == quirk->hostbridge_vendor &&
+		    info->id_device == quirk->hostbridge_device &&
+		    device->pdev->vendor == quirk->chip_vendor &&
+		    device->pdev->device == quirk->chip_device) {
+			agpmode = quirk->mode;
+			nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n",
+				agpmode);
+			break;
+		}
+		++quirk;
+	}
+
+	/*
 	 * AGP mode set in the command line.
 	 */
-	if (nouveau_agpmode > 0) {
+	if (agpmode > 0) {
 		bool agpv3 = mode & 0x8;
-		int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+		int rate = agpv3 ? agpmode / 4 : agpmode;
 
 		mode = (mode & ~0x7) | (rate & 0x7);
 	}
@@ -90,7 +124,7 @@
 		if (ret)
 			return;
 
-		mode.mode  = get_agp_mode(drm, info.mode);
+		mode.mode  = get_agp_mode(drm, &info);
 		mode.mode &= ~PCI_AGP_COMMAND_FW;
 
 		ret = drm_agp_enable(dev, mode);
@@ -139,7 +173,7 @@
 	}
 
 	/* see agp.h for the AGPSTAT_* modes available */
-	mode.mode = get_agp_mode(drm, info.mode);
+	mode.mode = get_agp_mode(drm, &info);
 
 	ret = drm_agp_enable(dev, mode);
 	if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2ffad21..630f6e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -82,7 +82,7 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = 31;
-	bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
+	bd = backlight_device_register("nv_backlight", connector->kdev, drm,
 				       &nv40_bl_ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
@@ -204,7 +204,7 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = 100;
-	bd = backlight_device_register("nv_backlight", &connector->kdev,
+	bd = backlight_device_register("nv_backlight", connector->kdev,
 				       nv_encoder, ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 3e72876..4c3feaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -127,8 +127,8 @@
 #ifdef __powerpc__
 	/* Powerbook specific quirks */
 	if (script == LVDS_RESET &&
-	    (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
-	     dev->pci_device == 0x0329))
+	    (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
+	     dev->pdev->device == 0x0329))
 		nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
 #endif
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 755c38d..c0fde6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -98,12 +98,7 @@
 
 	if (tile) {
 		spin_lock(&drm->tile.lock);
-		if (fence) {
-			/* Mark it as pending. */
-			tile->fence = fence;
-			nouveau_fence_ref(fence);
-		}
-
+		tile->fence = nouveau_fence_ref(fence);
 		tile->used = false;
 		spin_unlock(&drm->tile.lock);
 	}
@@ -146,7 +141,7 @@
 	struct drm_device *dev = drm->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-	if (unlikely(nvbo->gem))
+	if (unlikely(nvbo->gem.filp))
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 	WARN_ON(nvbo->pin_refcnt > 0);
 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -269,7 +264,8 @@
 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
 	u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
 
-	if (nv_device(drm->device)->card_type == NV_10 &&
+	if ((nv_device(drm->device)->card_type == NV_10 ||
+	     nv_device(drm->device)->card_type == NV_11) &&
 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 		/*
@@ -982,7 +978,7 @@
 		     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
-	struct nouveau_channel *chan = chan = drm->ttm.chan;
+	struct nouveau_channel *chan = drm->ttm.chan;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	int ret;
@@ -1267,7 +1263,7 @@
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-	return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
+	return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
 }
 
 static int
@@ -1461,14 +1457,12 @@
 void
 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
 {
+	struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
 	struct nouveau_fence *old_fence = NULL;
 
-	if (likely(fence))
-		nouveau_fence_ref(fence);
-
 	spin_lock(&nvbo->bo.bdev->fence_lock);
 	old_fence = nvbo->bo.sync_obj;
-	nvbo->bo.sync_obj = fence;
+	nvbo->bo.sync_obj = new_fence;
 	spin_unlock(&nvbo->bo.bdev->fence_lock);
 
 	nouveau_fence_unref(&old_fence);
@@ -1551,7 +1545,8 @@
 
 	if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
 		nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
-	else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
+	else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+		 nvbo->page_shift == vma->vm->vmm->spg_shift) {
 		if (node->sg)
 			nouveau_vm_map_sg_table(vma, 0, size, node);
 		else
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 653dbbb..ff17c1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -27,7 +27,10 @@
 	u32 tile_flags;
 	struct nouveau_drm_tile *tile;
 
-	struct drm_gem_object *gem;
+	/* Only valid if allocated via nouveau_gem_new() and iff you hold a
+	 * gem reference to it! For debugging, use gem.filp != NULL to test
+	 * whether it is valid. */
+	struct drm_gem_object gem;
 
 	/* protect by the ttm reservation lock */
 	int pin_refcnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index e84f4c3..cc5152b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -346,22 +346,17 @@
 	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
 		OUT_RING(chan, 0x00000000);
 
-	/* allocate software object class (used for fences on <= nv05, and
-	 * to signal flip completion), bind it to a subchannel.
-	 */
-	if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
+	/* allocate software object class (used for fences on <= nv05) */
+	if (device->card_type < NV_10) {
 		ret = nouveau_object_new(nv_object(client), chan->handle,
-					 NvSw, nouveau_abi16_swclass(chan->drm),
-					 NULL, 0, &object);
+					 NvSw, 0x006e, NULL, 0, &object);
 		if (ret)
 			return ret;
 
 		swch = (void *)object->parent;
 		swch->flip = nouveau_flip_complete;
 		swch->flip_data = chan;
-	}
 
-	if (device->card_type < NV_C0) {
 		ret = RING_SPACE(chan, 2);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c5b36f9..1674882 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -100,6 +100,7 @@
 nouveau_connector_destroy(struct drm_connector *connector)
 {
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	nouveau_event_ref(NULL, &nv_connector->hpd_func);
 	kfree(nv_connector->edid);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
@@ -214,9 +215,10 @@
 	} else {
 		connector->doublescan_allowed = true;
 		if (nv_device(drm->device)->card_type == NV_20 ||
-		   (nv_device(drm->device)->card_type == NV_10 &&
-		    (dev->pci_device & 0x0ff0) != 0x0100 &&
-		    (dev->pci_device & 0x0ff0) != 0x0150))
+		    ((nv_device(drm->device)->card_type == NV_10 ||
+		      nv_device(drm->device)->card_type == NV_11) &&
+		     (dev->pdev->device & 0x0ff0) != 0x0100 &&
+		     (dev->pdev->device & 0x0ff0) != 0x0150))
 			/* HW is broken */
 			connector->interlace_allowed = false;
 		else
@@ -932,10 +934,9 @@
 }
 
 static int
-nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
+nouveau_connector_hotplug(void *data, int index)
 {
-	struct nouveau_connector *nv_connector =
-		container_of(event, struct nouveau_connector, hpd_func);
+	struct nouveau_connector *nv_connector = data;
 	schedule_work(&nv_connector->hpd_work);
 	return NVKM_EVENT_KEEP;
 }
@@ -1007,10 +1008,16 @@
 
 		ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
 				 DCB_GPIO_UNUSED, &nv_connector->hpd);
-		nv_connector->hpd_func.func = nouveau_connector_hotplug;
 		if (ret)
 			nv_connector->hpd.func = DCB_GPIO_UNUSED;
 
+		if (nv_connector->hpd.func != DCB_GPIO_UNUSED) {
+			nouveau_event_new(gpio->events, nv_connector->hpd.line,
+					  nouveau_connector_hotplug,
+					  nv_connector,
+					 &nv_connector->hpd_func);
+		}
+
 		nv_connector->type = nv_connector->dcb[0];
 		if (drm_conntype_from_dcb(nv_connector->type) ==
 					  DRM_MODE_CONNECTOR_Unknown) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 6e399aa..264a778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -69,7 +69,7 @@
 
 	struct dcb_gpio_func hpd;
 	struct work_struct hpd_work;
-	struct nouveau_eventh hpd_func;
+	struct nouveau_eventh *hpd_func;
 
 	int dithering_mode;
 	int dithering_depth;
@@ -107,7 +107,4 @@
 struct drm_connector *
 nouveau_connector_create(struct drm_device *, int index);
 
-int
-nouveau_connector_bpp(struct drm_connector *);
-
 #endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7848590..7809d92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -26,7 +26,6 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/ttm/ttm_execbuf_util.h>
 
 #include "nouveau_fbcon.h"
 #include "dispnv04/hw.h"
@@ -38,19 +37,92 @@
 
 #include "nouveau_fence.h"
 
-#include <subdev/bios/gpio.h>
-#include <subdev/gpio.h>
 #include <engine/disp.h>
 
 #include <core/class.h>
 
+static int
+nouveau_display_vblank_handler(void *data, int head)
+{
+	struct nouveau_drm *drm = data;
+	drm_handle_vblank(drm->dev, head);
+	return NVKM_EVENT_KEEP;
+}
+
+int
+nouveau_display_vblank_enable(struct drm_device *dev, int head)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+	if (disp) {
+		nouveau_event_get(disp->vblank[head]);
+		return 0;
+	}
+	return -EIO;
+}
+
+void
+nouveau_display_vblank_disable(struct drm_device *dev, int head)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+	if (disp)
+		nouveau_event_put(disp->vblank[head]);
+}
+
+static void
+nouveau_display_vblank_fini(struct drm_device *dev)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+	int i;
+
+	if (disp->vblank) {
+		for (i = 0; i < dev->mode_config.num_crtc; i++)
+			nouveau_event_ref(NULL, &disp->vblank[i]);
+		kfree(disp->vblank);
+		disp->vblank = NULL;
+	}
+
+	drm_vblank_cleanup(dev);
+}
+
+static int
+nouveau_display_vblank_init(struct drm_device *dev)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+	int ret, i;
+
+	disp->vblank = kzalloc(dev->mode_config.num_crtc *
+			       sizeof(*disp->vblank), GFP_KERNEL);
+	if (!disp->vblank)
+		return -ENOMEM;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		ret = nouveau_event_new(pdisp->vblank, i,
+					nouveau_display_vblank_handler,
+					drm, &disp->vblank[i]);
+		if (ret) {
+			nouveau_display_vblank_fini(dev);
+			return ret;
+		}
+	}
+
+	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+	if (ret) {
+		nouveau_display_vblank_fini(dev);
+		return ret;
+	}
+
+	return 0;
+}
+
 static void
 nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
 {
 	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
 	if (fb->nvbo)
-		drm_gem_object_unreference_unlocked(fb->nvbo->gem);
+		drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
 
 	drm_framebuffer_cleanup(drm_fb);
 	kfree(fb);
@@ -63,7 +135,7 @@
 {
 	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
-	return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+	return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
 }
 
 static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -227,9 +299,7 @@
 int
 nouveau_display_init(struct drm_device *dev)
 {
-	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_display *disp = nouveau_display(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	struct drm_connector *connector;
 	int ret;
 
@@ -243,10 +313,7 @@
 	/* enable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
-		if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
-			nouveau_event_get(gpio->events, conn->hpd.line,
-					 &conn->hpd_func);
-		}
+		if (conn->hpd_func) nouveau_event_get(conn->hpd_func);
 	}
 
 	return ret;
@@ -255,18 +322,13 @@
 void
 nouveau_display_fini(struct drm_device *dev)
 {
-	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_display *disp = nouveau_display(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	struct drm_connector *connector;
 
 	/* disable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
-		if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
-			nouveau_event_put(gpio->events, conn->hpd.line,
-					 &conn->hpd_func);
-		}
+		if (conn->hpd_func) nouveau_event_put(conn->hpd_func);
 	}
 
 	drm_kms_helper_poll_disable(dev);
@@ -336,6 +398,11 @@
 	dev->mode_config.preferred_depth = 24;
 	dev->mode_config.prefer_shadow = 1;
 
+	if (nv_device(drm->device)->chipset < 0x11)
+		dev->mode_config.async_page_flip = false;
+	else
+		dev->mode_config.async_page_flip = true;
+
 	drm_kms_helper_poll_init(dev);
 	drm_kms_helper_poll_disable(dev);
 
@@ -352,7 +419,7 @@
 		goto disp_create_err;
 
 	if (dev->mode_config.num_crtc) {
-		ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+		ret = nouveau_display_vblank_init(dev);
 		if (ret)
 			goto vblank_err;
 	}
@@ -374,7 +441,7 @@
 	struct nouveau_display *disp = nouveau_display(dev);
 
 	nouveau_backlight_exit(dev);
-	drm_vblank_cleanup(dev);
+	nouveau_display_vblank_fini(dev);
 
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
@@ -394,7 +461,7 @@
 
 	nouveau_display_fini(dev);
 
-	NV_SUSPEND(drm, "unpinning framebuffer(s)...\n");
+	NV_INFO(drm, "unpinning framebuffer(s)...\n");
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_framebuffer *nouveau_fb;
 
@@ -492,19 +559,15 @@
 		goto fail;
 
 	/* Emit the pageflip */
-	ret = RING_SPACE(chan, 3);
+	ret = RING_SPACE(chan, 2);
 	if (ret)
 		goto fail;
 
-	if (nv_device(drm->device)->card_type < NV_C0) {
+	if (nv_device(drm->device)->card_type < NV_C0)
 		BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
-		OUT_RING  (chan, 0x00000000);
-		OUT_RING  (chan, 0x00000000);
-	} else {
-		BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
-		OUT_RING  (chan, 0);
-		BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
-	}
+	else
+		BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
+	OUT_RING  (chan, 0x00000000);
 	FIRE_RING (chan);
 
 	ret = nouveau_fence_new(chan, false, pfence);
@@ -521,22 +584,16 @@
 
 int
 nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-		       struct drm_pending_vblank_event *event,
-		       uint32_t page_flip_flags)
+		       struct drm_pending_vblank_event *event, u32 flags)
 {
+	const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
 	struct drm_device *dev = crtc->dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
 	struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
 	struct nouveau_page_flip_state *s;
-	struct nouveau_channel *chan = NULL;
+	struct nouveau_channel *chan = drm->channel;
 	struct nouveau_fence *fence;
-	struct ttm_validate_buffer resv[2] = {
-		{ .bo = &old_bo->bo },
-		{ .bo = &new_bo->bo },
-	};
-	struct ww_acquire_ctx ticket;
-	LIST_HEAD(res);
 	int ret;
 
 	if (!drm->channel)
@@ -546,26 +603,22 @@
 	if (!s)
 		return -ENOMEM;
 
-	/* Choose the channel the flip will be handled in */
-	spin_lock(&old_bo->bo.bdev->fence_lock);
-	fence = new_bo->bo.sync_obj;
-	if (fence)
-		chan = fence->channel;
-	if (!chan)
-		chan = drm->channel;
-	spin_unlock(&old_bo->bo.bdev->fence_lock);
+	/* synchronise rendering channel with the kernel's channel */
+	spin_lock(&new_bo->bo.bdev->fence_lock);
+	fence = nouveau_fence_ref(new_bo->bo.sync_obj);
+	spin_unlock(&new_bo->bo.bdev->fence_lock);
+	ret = nouveau_fence_sync(fence, chan);
+	if (ret)
+		return ret;
 
 	if (new_bo != old_bo) {
 		ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
 		if (ret)
 			goto fail_free;
-
-		list_add(&resv[1].head, &res);
 	}
-	list_add(&resv[0].head, &res);
 
 	mutex_lock(&chan->cli->mutex);
-	ret = ttm_eu_reserve_buffers(&ticket, &res);
+	ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
 	if (ret)
 		goto fail_unpin;
 
@@ -577,12 +630,29 @@
 
 	/* Emit a page flip */
 	if (nv_device(drm->device)->card_type >= NV_50) {
-		ret = nv50_display_flip_next(crtc, fb, chan, 0);
+		ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
 		if (ret)
 			goto fail_unreserve;
 	} else {
 		struct nv04_display *dispnv04 = nv04_display(dev);
-		nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
+		int head = nouveau_crtc(crtc)->index;
+
+		if (swap_interval) {
+			ret = RING_SPACE(chan, 8);
+			if (ret)
+				goto fail_unreserve;
+
+			BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+			OUT_RING  (chan, 0);
+			BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+			OUT_RING  (chan, head);
+			BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+			OUT_RING  (chan, 0);
+			BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+			OUT_RING  (chan, 0);
+		}
+
+		nouveau_bo_ref(new_bo, &dispnv04->image[head]);
 	}
 
 	ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -593,14 +663,15 @@
 	/* Update the crtc struct and cleanup */
 	crtc->fb = fb;
 
-	ttm_eu_fence_buffer_objects(&ticket, &res, fence);
+	nouveau_bo_fence(old_bo, fence);
+	ttm_bo_unreserve(&old_bo->bo);
 	if (old_bo != new_bo)
 		nouveau_bo_unpin(old_bo);
 	nouveau_fence_unref(&fence);
 	return 0;
 
 fail_unreserve:
-	ttm_eu_backoff_reservation(&ticket, &res);
+	ttm_bo_unreserve(&old_bo->bo);
 fail_unpin:
 	mutex_unlock(&chan->cli->mutex);
 	if (old_bo != new_bo)
@@ -674,8 +745,8 @@
 	if (ret)
 		return ret;
 
-	ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
-	drm_gem_object_unreference_unlocked(bo->gem);
+	ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
+	drm_gem_object_unreference_unlocked(&bo->gem);
 	return ret;
 }
 
@@ -688,7 +759,7 @@
 
 	gem = drm_gem_object_lookup(dev, file_priv, handle);
 	if (gem) {
-		struct nouveau_bo *bo = gem->driver_private;
+		struct nouveau_bo *bo = nouveau_gem_object(gem);
 		*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
 		drm_gem_object_unreference_unlocked(gem);
 		return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 025c66f..8bc8bab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -36,6 +36,8 @@
 	int  (*init)(struct drm_device *);
 	void (*fini)(struct drm_device *);
 
+	struct nouveau_eventh **vblank;
+
 	struct drm_property *dithering_mode;
 	struct drm_property *dithering_depth;
 	struct drm_property *underscan_property;
@@ -59,6 +61,8 @@
 int  nouveau_display_suspend(struct drm_device *dev);
 void nouveau_display_repin(struct drm_device *dev);
 void nouveau_display_resume(struct drm_device *dev);
+int  nouveau_display_vblank_enable(struct drm_device *, int);
+void nouveau_display_vblank_disable(struct drm_device *, int);
 
 int  nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			    struct drm_pending_vblank_event *event,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 690d593..984004d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -51,9 +51,11 @@
 	NvSubCtxSurf2D  = 0,
 	NvSubSw		= 1,
 	NvSubImageBlit  = 2,
-	NvSub2D		= 3,
 	NvSubGdiRect    = 3,
-	NvSubCopy	= 4,
+
+	NvSub2D		= 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
+	NvSubCopy	= 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
+	FermiSw		= 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
 };
 
 /* Object handles. */
@@ -194,7 +196,6 @@
 #define NV84_SUBCHAN_UEVENT                                          0x00000020
 #define NV84_SUBCHAN_WRCACHE_FLUSH                                   0x00000024
 #define NV10_SUBCHAN_REF_CNT                                         0x00000050
-#define NVSW_SUBCHAN_PAGE_FLIP                                       0x00000054
 #define NV11_SUBCHAN_DMA_SEMAPHORE                                   0x00000060
 #define NV11_SUBCHAN_SEMAPHORE_OFFSET                                0x00000064
 #define NV11_SUBCHAN_SEMAPHORE_ACQUIRE                               0x00000068
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e893c53..7a3759f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -37,6 +37,7 @@
 #include <engine/device.h>
 #include <engine/disp.h>
 #include <engine/fifo.h>
+#include <engine/software.h>
 
 #include <subdev/vm.h>
 
@@ -46,7 +47,8 @@
 #include "nouveau_gem.h"
 #include "nouveau_agp.h"
 #include "nouveau_vga.h"
-#include "nouveau_pm.h"
+#include "nouveau_sysfs.h"
+#include "nouveau_hwmon.h"
 #include "nouveau_acpi.h"
 #include "nouveau_bios.h"
 #include "nouveau_ioctl.h"
@@ -78,41 +80,6 @@
 
 static struct drm_driver driver;
 
-static int
-nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
-{
-	struct nouveau_drm *drm =
-		container_of(event, struct nouveau_drm, vblank[head]);
-	drm_handle_vblank(drm->dev, head);
-	return NVKM_EVENT_KEEP;
-}
-
-static int
-nouveau_drm_vblank_enable(struct drm_device *dev, int head)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-
-	if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
-		return -EIO;
-	WARN_ON_ONCE(drm->vblank[head].func);
-	drm->vblank[head].func = nouveau_drm_vblank_handler;
-	nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
-	return 0;
-}
-
-static void
-nouveau_drm_vblank_disable(struct drm_device *dev, int head)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-	if (drm->vblank[head].func)
-		nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
-	else
-		WARN_ON_ONCE(1);
-	drm->vblank[head].func = NULL;
-}
-
 static u64
 nouveau_name(struct pci_dev *pdev)
 {
@@ -177,7 +144,8 @@
 
 	/* initialise synchronisation routines */
 	if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
-	else if (device->chipset   <  0x17) ret = nv10_fence_create(drm);
+	else if (device->card_type < NV_11 ||
+		 device->chipset   <  0x17) ret = nv10_fence_create(drm);
 	else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
 	else if (device->chipset   <  0x84) ret = nv50_fence_create(drm);
 	else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
@@ -224,6 +192,32 @@
 		return;
 	}
 
+	ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
+				 nouveau_abi16_swclass(drm), NULL, 0, &object);
+	if (ret == 0) {
+		struct nouveau_software_chan *swch = (void *)object->parent;
+		ret = RING_SPACE(drm->channel, 2);
+		if (ret == 0) {
+			if (device->card_type < NV_C0) {
+				BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
+				OUT_RING  (drm->channel, NVDRM_NVSW);
+			} else
+			if (device->card_type < NV_E0) {
+				BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
+				OUT_RING  (drm->channel, 0x001f0000);
+			}
+		}
+		swch = (void *)object->parent;
+		swch->flip = nouveau_flip_complete;
+		swch->flip_data = drm->channel;
+	}
+
+	if (ret) {
+		NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
+		nouveau_accel_fini(drm);
+		return;
+	}
+
 	if (device->card_type < NV_C0) {
 		ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
 					&drm->notify);
@@ -418,8 +412,8 @@
 			goto fail_dispinit;
 	}
 
-	nouveau_pm_init(dev);
-
+	nouveau_sysfs_init(dev);
+	nouveau_hwmon_init(dev);
 	nouveau_accel_init(drm);
 	nouveau_fbcon_init(dev);
 
@@ -455,8 +449,8 @@
 	pm_runtime_get_sync(dev->dev);
 	nouveau_fbcon_fini(dev);
 	nouveau_accel_fini(drm);
-
-	nouveau_pm_fini(dev);
+	nouveau_hwmon_fini(dev);
+	nouveau_sysfs_fini(dev);
 
 	if (dev->mode_config.num_crtc)
 		nouveau_display_fini(dev);
@@ -496,16 +490,16 @@
 	int ret;
 
 	if (dev->mode_config.num_crtc) {
-		NV_SUSPEND(drm, "suspending display...\n");
+		NV_INFO(drm, "suspending display...\n");
 		ret = nouveau_display_suspend(dev);
 		if (ret)
 			return ret;
 	}
 
-	NV_SUSPEND(drm, "evicting buffers...\n");
+	NV_INFO(drm, "evicting buffers...\n");
 	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 
-	NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n");
+	NV_INFO(drm, "waiting for kernel channels to go idle...\n");
 	if (drm->cechan) {
 		ret = nouveau_channel_idle(drm->cechan);
 		if (ret)
@@ -518,7 +512,7 @@
 			return ret;
 	}
 
-	NV_SUSPEND(drm, "suspending client object trees...\n");
+	NV_INFO(drm, "suspending client object trees...\n");
 	if (drm->fence && nouveau_fence(drm)->suspend) {
 		if (!nouveau_fence(drm)->suspend(drm))
 			return -ENOMEM;
@@ -530,7 +524,7 @@
 			goto fail_client;
 	}
 
-	NV_SUSPEND(drm, "suspending kernel object tree...\n");
+	NV_INFO(drm, "suspending kernel object tree...\n");
 	ret = nouveau_client_fini(&drm->client.base, true);
 	if (ret)
 		goto fail_client;
@@ -544,7 +538,7 @@
 	}
 
 	if (dev->mode_config.num_crtc) {
-		NV_SUSPEND(drm, "resuming display...\n");
+		NV_INFO(drm, "resuming display...\n");
 		nouveau_display_resume(dev);
 	}
 	return ret;
@@ -563,7 +557,6 @@
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 1);
 
-	nv_suspend_set_printk_level(NV_DBG_INFO);
 	ret = nouveau_do_suspend(drm_dev);
 	if (ret)
 		return ret;
@@ -571,8 +564,6 @@
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	pci_set_power_state(pdev, PCI_D3hot);
-	nv_suspend_set_printk_level(NV_DBG_DEBUG);
-
 	return 0;
 }
 
@@ -582,15 +573,15 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli;
 
-	NV_SUSPEND(drm, "re-enabling device...\n");
+	NV_INFO(drm, "re-enabling device...\n");
 
 	nouveau_agp_reset(drm);
 
-	NV_SUSPEND(drm, "resuming kernel object tree...\n");
+	NV_INFO(drm, "resuming kernel object tree...\n");
 	nouveau_client_init(&drm->client.base);
 	nouveau_agp_init(drm);
 
-	NV_SUSPEND(drm, "resuming client object trees...\n");
+	NV_INFO(drm, "resuming client object trees...\n");
 	if (drm->fence && nouveau_fence(drm)->resume)
 		nouveau_fence(drm)->resume(drm);
 
@@ -599,10 +590,9 @@
 	}
 
 	nouveau_run_vbios_init(dev);
-	nouveau_pm_resume(dev);
 
 	if (dev->mode_config.num_crtc) {
-		NV_SUSPEND(drm, "resuming display...\n");
+		NV_INFO(drm, "resuming display...\n");
 		nouveau_display_repin(dev);
 	}
 
@@ -626,19 +616,15 @@
 		return ret;
 	pci_set_master(pdev);
 
-	nv_suspend_set_printk_level(NV_DBG_INFO);
 	ret = nouveau_do_resume(drm_dev);
-	if (ret) {
-		nv_suspend_set_printk_level(NV_DBG_DEBUG);
+	if (ret)
 		return ret;
-	}
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 0);
 
 	nouveau_fbcon_zfill_all(drm_dev);
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_display_resume(drm_dev);
-	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return 0;
 }
 
@@ -648,12 +634,10 @@
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	int ret;
 
-	nv_suspend_set_printk_level(NV_DBG_INFO);
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 1);
 
 	ret = nouveau_do_suspend(drm_dev);
-	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return ret;
 }
 
@@ -663,18 +647,14 @@
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	int ret;
 
-	nv_suspend_set_printk_level(NV_DBG_INFO);
 	ret = nouveau_do_resume(drm_dev);
-	if (ret) {
-		nv_suspend_set_printk_level(NV_DBG_DEBUG);
+	if (ret)
 		return ret;
-	}
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 0);
 	nouveau_fbcon_zfill_all(drm_dev);
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_display_resume(drm_dev);
-	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return 0;
 }
 
@@ -816,8 +796,8 @@
 #endif
 
 	.get_vblank_counter = drm_vblank_count,
-	.enable_vblank = nouveau_drm_vblank_enable,
-	.disable_vblank = nouveau_drm_vblank_disable,
+	.enable_vblank = nouveau_display_vblank_enable,
+	.disable_vblank = nouveau_display_vblank_disable,
 
 	.ioctls = nouveau_ioctls,
 	.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
@@ -834,7 +814,6 @@
 	.gem_prime_vmap = nouveau_gem_prime_vmap,
 	.gem_prime_vunmap = nouveau_gem_prime_vunmap,
 
-	.gem_init_object = nouveau_gem_object_new,
 	.gem_free_object = nouveau_gem_object_del,
 	.gem_open_object = nouveau_gem_object_open,
 	.gem_close_object = nouveau_gem_object_close,
@@ -879,6 +858,7 @@
 	if (nouveau_runtime_pm == 0)
 		return -EINVAL;
 
+	nv_debug_level(SILENT);
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 	nouveau_switcheroo_optimus_dsm();
@@ -915,6 +895,7 @@
 	nv_mask(device, 0x88488, (1 << 25), (1 << 25));
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	nv_debug_level(NORMAL);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 994fd6e..4b0fb6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -51,10 +51,12 @@
 };
 
 enum nouveau_drm_handle {
-	NVDRM_CLIENT = 0xffffffff,
-	NVDRM_DEVICE = 0xdddddddd,
-	NVDRM_PUSH   = 0xbbbb0000, /* |= client chid */
-	NVDRM_CHAN   = 0xcccc0000, /* |= client chid */
+	NVDRM_CLIENT  = 0xffffffff,
+	NVDRM_DEVICE  = 0xdddddddd,
+	NVDRM_CONTROL = 0xdddddddc,
+	NVDRM_PUSH    = 0xbbbb0000, /* |= client chid */
+	NVDRM_CHAN    = 0xcccc0000, /* |= client chid */
+	NVDRM_NVSW    = 0x55550000,
 };
 
 struct nouveau_cli {
@@ -127,10 +129,10 @@
 	struct nvbios vbios;
 	struct nouveau_display *display;
 	struct backlight_device *backlight;
-	struct nouveau_eventh vblank[4];
 
 	/* power management */
-	struct nouveau_pm *pm;
+	struct nouveau_hwmon *hwmon;
+	struct nouveau_sysfs *sysfs;
 
 	/* display power reference */
 	bool have_disp_power_ref;
@@ -154,7 +156,6 @@
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
 
-#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
 #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
 #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
 #define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a86ecf6..7903e0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -420,7 +420,7 @@
 		nouveau_bo_unmap(nouveau_fb->nvbo);
 		nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
 		nouveau_bo_unpin(nouveau_fb->nvbo);
-		drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
+		drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
 		nouveau_fb->nvbo = NULL;
 	}
 	drm_fb_helper_fini(&fbcon->helper);
@@ -503,34 +503,45 @@
 	drm->fbcon = NULL;
 }
 
-void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+void
+nouveau_fbcon_save_disable_accel(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
-	drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+	if (drm->fbcon) {
+		drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
+		drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+	}
 }
 
-void nouveau_fbcon_restore_accel(struct drm_device *dev)
+void
+nouveau_fbcon_restore_accel(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+	if (drm->fbcon) {
+		drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+	}
 }
 
-void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+void
+nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	console_lock();
-	if (state == 0)
-		nouveau_fbcon_save_disable_accel(dev);
-	fb_set_suspend(drm->fbcon->helper.fbdev, state);
-	if (state == 1)
-		nouveau_fbcon_restore_accel(dev);
-	console_unlock();
+	if (drm->fbcon) {
+		console_lock();
+		if (state == 0)
+			nouveau_fbcon_save_disable_accel(dev);
+		fb_set_suspend(drm->fbcon->helper.fbdev, state);
+		if (state == 1)
+			nouveau_fbcon_restore_accel(dev);
+		console_unlock();
+	}
 }
 
-void nouveau_fbcon_zfill_all(struct drm_device *dev)
+void
+nouveau_fbcon_zfill_all(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	nouveau_fbcon_zfill(dev, drm->fbcon);
+	if (drm->fbcon) {
+		nouveau_fbcon_zfill(dev, drm->fbcon);
+	}
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index be31499..40cf52e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -165,17 +165,11 @@
 	return !fence->channel;
 }
 
-struct nouveau_fence_uevent {
-	struct nouveau_eventh handler;
-	struct nouveau_fence_priv *priv;
-};
-
 static int
-nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index)
+nouveau_fence_wait_uevent_handler(void *data, int index)
 {
-	struct nouveau_fence_uevent *uevent =
-		container_of(event, struct nouveau_fence_uevent, handler);
-	wake_up_all(&uevent->priv->waiting);
+	struct nouveau_fence_priv *priv = data;
+	wake_up_all(&priv->waiting);
 	return NVKM_EVENT_KEEP;
 }
 
@@ -186,13 +180,16 @@
 	struct nouveau_channel *chan = fence->channel;
 	struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
 	struct nouveau_fence_priv *priv = chan->drm->fence;
-	struct nouveau_fence_uevent uevent = {
-		.handler.func = nouveau_fence_wait_uevent_handler,
-		.priv = priv,
-	};
+	struct nouveau_eventh *handler;
 	int ret = 0;
 
-	nouveau_event_get(pfifo->uevent, 0, &uevent.handler);
+	ret = nouveau_event_new(pfifo->uevent, 0,
+				nouveau_fence_wait_uevent_handler,
+				priv, &handler);
+	if (ret)
+		return ret;
+
+	nouveau_event_get(handler);
 
 	if (fence->timeout) {
 		unsigned long timeout = fence->timeout - jiffies;
@@ -224,7 +221,7 @@
 		}
 	}
 
-	nouveau_event_put(pfifo->uevent, 0, &uevent.handler);
+	nouveau_event_ref(NULL, &handler);
 	if (unlikely(ret < 0))
 		return ret;
 
@@ -309,7 +306,8 @@
 struct nouveau_fence *
 nouveau_fence_ref(struct nouveau_fence *fence)
 {
-	kref_get(&fence->kref);
+	if (fence)
+		kref_get(&fence->kref);
 	return fence;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f32b712..78a27f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -34,29 +34,20 @@
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 
-int
-nouveau_gem_object_new(struct drm_gem_object *gem)
-{
-	return 0;
-}
-
 void
 nouveau_gem_object_del(struct drm_gem_object *gem)
 {
-	struct nouveau_bo *nvbo = gem->driver_private;
+	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct ttm_buffer_object *bo = &nvbo->bo;
 
-	if (!nvbo)
-		return;
-	nvbo->gem = NULL;
-
 	if (gem->import_attach)
 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
 
-	ttm_bo_unref(&bo);
-
 	drm_gem_object_release(gem);
-	kfree(gem);
+
+	/* reset filp so nouveau_bo_del_ttm() can test for it */
+	gem->filp = NULL;
+	ttm_bo_unref(&bo);
 }
 
 int
@@ -115,8 +106,7 @@
 
 	if (mapped) {
 		spin_lock(&nvbo->bo.bdev->fence_lock);
-		if (nvbo->bo.sync_obj)
-			fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
 		spin_unlock(&nvbo->bo.bdev->fence_lock);
 	}
 
@@ -186,14 +176,15 @@
 	if (nv_device(drm->device)->card_type >= NV_50)
 		nvbo->valid_domains &= domain;
 
-	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
-	if (!nvbo->gem) {
+	/* Initialize the embedded gem-object. We return a single gem-reference
+	 * to the caller, instead of a normal nouveau_bo ttm reference. */
+	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+	if (ret) {
 		nouveau_bo_ref(NULL, pnvbo);
 		return -ENOMEM;
 	}
 
-	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
-	nvbo->gem->driver_private = nvbo;
+	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
 	return 0;
 }
 
@@ -250,15 +241,15 @@
 	if (ret)
 		return ret;
 
-	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
 	if (ret == 0) {
-		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
+		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
 		if (ret)
 			drm_gem_handle_delete(file_priv, req->info.handle);
 	}
 
 	/* drop reference from allocate - handle holds it now */
-	drm_gem_object_unreference_unlocked(nvbo->gem);
+	drm_gem_object_unreference_unlocked(&nvbo->gem);
 	return ret;
 }
 
@@ -266,7 +257,7 @@
 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 		       uint32_t write_domains, uint32_t valid_domains)
 {
-	struct nouveau_bo *nvbo = gem->driver_private;
+	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct ttm_buffer_object *bo = &nvbo->bo;
 	uint32_t domains = valid_domains & nvbo->valid_domains &
 		(write_domains ? write_domains : read_domains);
@@ -317,7 +308,8 @@
 	list_for_each_safe(entry, tmp, list) {
 		nvbo = list_entry(entry, struct nouveau_bo, entry);
 
-		nouveau_bo_fence(nvbo, fence);
+		if (likely(fence))
+			nouveau_bo_fence(nvbo, fence);
 
 		if (unlikely(nvbo->validate_mapped)) {
 			ttm_bo_kunmap(&nvbo->kmap);
@@ -327,7 +319,7 @@
 		list_del(&nvbo->entry);
 		nvbo->reserved_by = NULL;
 		ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
-		drm_gem_object_unreference_unlocked(nvbo->gem);
+		drm_gem_object_unreference_unlocked(&nvbo->gem);
 	}
 }
 
@@ -376,7 +368,7 @@
 			validate_fini(op, NULL);
 			return -ENOENT;
 		}
-		nvbo = gem->driver_private;
+		nvbo = nouveau_gem_object(gem);
 		if (nvbo == res_bo) {
 			res_bo = NULL;
 			drm_gem_object_unreference_unlocked(gem);
@@ -446,8 +438,7 @@
 	int ret = 0;
 
 	spin_lock(&nvbo->bo.bdev->fence_lock);
-	if (nvbo->bo.sync_obj)
-		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+	fence = nouveau_fence_ref(nvbo->bo.sync_obj);
 	spin_unlock(&nvbo->bo.bdev->fence_lock);
 
 	if (fence) {
@@ -478,7 +469,7 @@
 			return ret;
 		}
 
-		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
 					     b->write_domains,
 					     b->valid_domains);
 		if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 502e429..7caca05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -12,14 +12,13 @@
 static inline struct nouveau_bo *
 nouveau_gem_object(struct drm_gem_object *gem)
 {
-	return gem ? gem->driver_private : NULL;
+	return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
 }
 
 /* nouveau_gem.c */
 extern int nouveau_gem_new(struct drm_device *, int size, int align,
 			   uint32_t domain, uint32_t tile_mode,
 			   uint32_t tile_flags, struct nouveau_bo **);
-extern int nouveau_gem_object_new(struct drm_gem_object *);
 extern void nouveau_gem_object_del(struct drm_gem_object *);
 extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
 extern void nouveau_gem_object_close(struct drm_gem_object *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
new file mode 100644
index 0000000..38a4db5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -0,0 +1,656 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_hwmon.h"
+
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+static ssize_t
+nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int temp = therm->temp_get(therm);
+
+	if (temp < 0)
+		return temp;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
+						  NULL, 0);
+
+static ssize_t
+nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
+					 struct device_attribute *a, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", 100);
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
+			  nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
+				     struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	      therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
+					 struct device_attribute *a,
+					 const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_temp1_auto_point1_temp,
+			  nouveau_hwmon_set_temp1_auto_point1_temp, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
+					  struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
+					      struct device_attribute *a,
+					      const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_temp1_auto_point1_temp_hyst,
+			  nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
+
+static ssize_t
+nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
+						const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
+						  nouveau_hwmon_set_max_temp,
+						  0);
+
+static ssize_t
+nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
+			    char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
+						const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_max_temp_hyst,
+			  nouveau_hwmon_set_max_temp_hyst, 0);
+
+static ssize_t
+nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
+							    const char *buf,
+								size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+						nouveau_hwmon_critical_temp,
+						nouveau_hwmon_set_critical_temp,
+						0);
+
+static ssize_t
+nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp_hyst(struct device *d,
+				     struct device_attribute *a,
+				     const char *buf,
+				     size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_critical_temp_hyst,
+			  nouveau_hwmon_set_critical_temp_hyst, 0);
+static ssize_t
+nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
+							    const char *buf,
+								size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
+					nouveau_hwmon_emergency_temp,
+					nouveau_hwmon_set_emergency_temp,
+					0);
+
+static ssize_t
+nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
+				      struct device_attribute *a,
+				      const char *buf,
+				      size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
+					nouveau_hwmon_emergency_temp_hyst,
+					nouveau_hwmon_set_emergency_temp_hyst,
+					0);
+
+static ssize_t nouveau_hwmon_show_name(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "nouveau\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
+
+static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "1000\n");
+}
+static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
+						nouveau_hwmon_show_update_rate,
+						NULL, 0);
+
+static ssize_t
+nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
+			      char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
+}
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
+			  NULL, 0);
+
+ static ssize_t
+nouveau_hwmon_get_pwm1_enable(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (strict_strtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
+	if (ret)
+		return ret;
+	else
+		return count;
+}
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_enable,
+			  nouveau_hwmon_set_pwm1_enable, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->fan_get(therm);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
+		       const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret = -ENODEV;
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->fan_set(therm, value);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1,
+			  nouveau_hwmon_set_pwm1, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1_min(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_min,
+			  nouveau_hwmon_set_pwm1_min, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1_max(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_max,
+			  nouveau_hwmon_set_pwm1_max, 0);
+
+static struct attribute *hwmon_default_attributes[] = {
+	&sensor_dev_attr_name.dev_attr.attr,
+	&sensor_dev_attr_update_rate.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_temp_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
+	&sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_fan_rpm_attributes[] = {
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_pwm_fan_attributes[] = {
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm1_min.dev_attr.attr,
+	&sensor_dev_attr_pwm1_max.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group hwmon_default_attrgroup = {
+	.attrs = hwmon_default_attributes,
+};
+static const struct attribute_group hwmon_temp_attrgroup = {
+	.attrs = hwmon_temp_attributes,
+};
+static const struct attribute_group hwmon_fan_rpm_attrgroup = {
+	.attrs = hwmon_fan_rpm_attributes,
+};
+static const struct attribute_group hwmon_pwm_fan_attrgroup = {
+	.attrs = hwmon_pwm_fan_attributes,
+};
+#endif
+
+int
+nouveau_hwmon_init(struct drm_device *dev)
+{
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	struct nouveau_hwmon *hwmon;
+	struct device *hwmon_dev;
+	int ret = 0;
+
+	hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
+	if (!hwmon)
+		return -ENOMEM;
+	hwmon->dev = dev;
+
+	if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
+		return -ENODEV;
+
+	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+	if (IS_ERR(hwmon_dev)) {
+		ret = PTR_ERR(hwmon_dev);
+		NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
+		return ret;
+	}
+	dev_set_drvdata(hwmon_dev, dev);
+
+	/* set the default attributes */
+	ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
+	if (ret) {
+		if (ret)
+			goto error;
+	}
+
+	/* if the card has a working thermal sensor */
+	if (therm->temp_get(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
+		if (ret) {
+			if (ret)
+				goto error;
+		}
+	}
+
+	/* if the card has a pwm fan */
+	/*XXX: incorrect, need better detection for this, some boards have
+	 *     the gpio entries for pwm fan control even when there's no
+	 *     actual fan connected to it... therm table? */
+	if (therm->fan_get && therm->fan_get(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj,
+					 &hwmon_pwm_fan_attrgroup);
+		if (ret)
+			goto error;
+	}
+
+	/* if the card can read the fan rpm */
+	if (therm->fan_sense(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj,
+					 &hwmon_fan_rpm_attrgroup);
+		if (ret)
+			goto error;
+	}
+
+	hwmon->hwmon = hwmon_dev;
+
+	return 0;
+
+error:
+	NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
+	hwmon_device_unregister(hwmon_dev);
+	hwmon->hwmon = NULL;
+	return ret;
+#else
+	hwmon->hwmon = NULL;
+	return 0;
+#endif
+}
+
+void
+nouveau_hwmon_fini(struct drm_device *dev)
+{
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+	struct nouveau_hwmon *hwmon = nouveau_hwmon(dev);
+
+	if (hwmon->hwmon) {
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_default_attrgroup);
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_temp_attrgroup);
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
+
+		hwmon_device_unregister(hwmon->hwmon);
+	}
+
+	nouveau_drm(dev)->hwmon = NULL;
+	kfree(hwmon);
+#endif
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.h b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
new file mode 100644
index 0000000..62ccbb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_PM_H__
+#define __NOUVEAU_PM_H__
+
+struct nouveau_hwmon {
+	struct drm_device *dev;
+	struct device *hwmon;
+};
+
+static inline struct nouveau_hwmon *
+nouveau_hwmon(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->hwmon;
+}
+
+/* nouveau_hwmon.c */
+int  nouveau_hwmon_init(struct drm_device *dev);
+void nouveau_hwmon_fini(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
deleted file mode 100644
index 6976875..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_hwsq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_HWSQ_H__
-#define __NOUVEAU_HWSQ_H__
-
-struct hwsq_ucode {
-	u8 data[0x200];
-	union {
-		u8  *u08;
-		u16 *u16;
-		u32 *u32;
-	} ptr;
-	u16 len;
-
-	u32 reg;
-	u32 val;
-};
-
-static inline void
-hwsq_init(struct hwsq_ucode *hwsq)
-{
-	hwsq->ptr.u08 = hwsq->data;
-	hwsq->reg = 0xffffffff;
-	hwsq->val = 0xffffffff;
-}
-
-static inline void
-hwsq_fini(struct hwsq_ucode *hwsq)
-{
-	do {
-		*hwsq->ptr.u08++ = 0x7f;
-		hwsq->len = hwsq->ptr.u08 - hwsq->data;
-	} while (hwsq->len & 3);
-	hwsq->ptr.u08 = hwsq->data;
-}
-
-static inline void
-hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
-{
-	u32 shift = 0;
-	while (usec & ~3) {
-		usec >>= 2;
-		shift++;
-	}
-
-	*hwsq->ptr.u08++ = (shift << 2) | usec;
-}
-
-static inline void
-hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
-{
-	flag += 0x80;
-	if (val >= 0)
-		flag += 0x20;
-	if (val >= 1)
-		flag += 0x20;
-	*hwsq->ptr.u08++ = flag;
-}
-
-static inline void
-hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
-{
-	*hwsq->ptr.u08++ = 0x5f;
-	*hwsq->ptr.u08++ = v0;
-	*hwsq->ptr.u08++ = v1;
-}
-
-static inline void
-hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
-{
-	if (val != hwsq->val) {
-		if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
-			*hwsq->ptr.u08++ = 0x42;
-			*hwsq->ptr.u16++ = (val & 0x0000ffff);
-		} else {
-			*hwsq->ptr.u08++ = 0xe2;
-			*hwsq->ptr.u32++ = val;
-		}
-
-		hwsq->val = val;
-	}
-
-	if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
-		*hwsq->ptr.u08++ = 0x40;
-		*hwsq->ptr.u16++ = (reg & 0x0000ffff);
-	} else {
-		*hwsq->ptr.u08++ = 0xe0;
-		*hwsq->ptr.u32++ = reg;
-	}
-	hwsq->reg = reg;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
deleted file mode 100644
index 4f6a572..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
- * Copyright 2005 Stephane Marchesin
- *
- * The Weather Channel (TM) funded Tungsten Graphics to develop the
- * initial release of the Radeon 8500 driver under the XFree86 license.
- * This notice must be preserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Ben Skeggs <bskeggs@redhat.com>
- *    Roy Spliet <r.spliet@student.tudelft.nl>
- */
-
-#include "nouveau_drm.h"
-#include "nouveau_pm.h"
-
-#include <subdev/fb.h>
-
-static int
-nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
-
-	/* XXX: I don't trust the -1's and +1's... they must come
-	 *      from somewhere! */
-	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
-		    1 << 16 |
-		    (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
-		    (e->tCL + 2 - (t->tCWL - 1));
-
-	t->reg[2] = 0x20200000 |
-		    ((t->tCWL - 1) << 24 |
-		     e->tRRD << 16 |
-		     e->tRCDWR << 8 |
-		     e->tRCDRD);
-
-	NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
-		 t->reg[0], t->reg[1], t->reg[2]);
-	return 0;
-}
-
-static int
-nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct bit_entry P;
-	uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
-
-	if (bit_table(dev, 'P', &P))
-		return -EINVAL;
-
-	switch (min(len, (u8) 22)) {
-	case 22:
-		unk21 = e->tUNK_21;
-	case 21:
-		unk20 = e->tUNK_20;
-	case 20:
-		if (e->tCWL > 0)
-			t->tCWL = e->tCWL;
-	case 19:
-		unk18 = e->tUNK_18;
-		break;
-	}
-
-	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
-
-	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
-				max(unk18, (u8) 1) << 16 |
-				(e->tWTR + 2 + (t->tCWL - 1)) << 8;
-
-	t->reg[2] = ((t->tCWL - 1) << 24 |
-		    e->tRRD << 16 |
-		    e->tRCDWR << 8 |
-		    e->tRCDRD);
-
-	t->reg[4] = e->tUNK_13 << 8  | e->tUNK_13;
-
-	t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
-
-	t->reg[8] = boot->reg[8] & 0xffffff00;
-
-	if (P.version == 1) {
-		t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
-
-		t->reg[3] = (0x14 + e->tCL) << 24 |
-			    0x16 << 16 |
-			    (e->tCL - 1) << 8 |
-			    (e->tCL - 1);
-
-		t->reg[4] |= boot->reg[4] & 0xffff0000;
-
-		t->reg[6] = (0x33 - t->tCWL) << 16 |
-			    t->tCWL << 8 |
-			    (0x2e + e->tCL - t->tCWL);
-
-		t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
-
-		/* XXX: P.version == 1 only has DDR2 and GDDR3? */
-		if (pfb->ram->type == NV_MEM_TYPE_DDR2) {
-			t->reg[5] |= (e->tCL + 3) << 8;
-			t->reg[6] |= (t->tCWL - 2) << 8;
-			t->reg[8] |= (e->tCL - 4);
-		} else {
-			t->reg[5] |= (e->tCL + 2) << 8;
-			t->reg[6] |= t->tCWL << 8;
-			t->reg[8] |= (e->tCL - 2);
-		}
-	} else {
-		t->reg[1] |= (5 + e->tCL - (t->tCWL));
-
-		/* XXX: 0xb? 0x30? */
-		t->reg[3] = (0x30 + e->tCL) << 24 |
-			    (boot->reg[3] & 0x00ff0000)|
-			    (0xb + e->tCL) << 8 |
-			    (e->tCL - 1);
-
-		t->reg[4] |= (unk20 << 24 | unk21 << 16);
-
-		/* XXX: +6? */
-		t->reg[5] |= (t->tCWL + 6) << 8;
-
-		t->reg[6] = (0x5a + e->tCL) << 16 |
-			    (6 - e->tCL + t->tCWL) << 8 |
-			    (0x50 + e->tCL - t->tCWL);
-
-		tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
-		t->reg[7] = (tmp7_3 << 24) |
-			    ((tmp7_3 - 6 + e->tCL) << 16) |
-			    0x202;
-	}
-
-	NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
-		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
-	NV_DEBUG(drm, "         230: %08x %08x %08x %08x\n",
-		 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
-	NV_DEBUG(drm, "         240: %08x\n", t->reg[8]);
-	return 0;
-}
-
-static int
-nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	if (e->tCWL > 0)
-		t->tCWL = e->tCWL;
-
-	t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
-		     e->tRFC << 8 | e->tRC);
-
-	t->reg[1] = (boot->reg[1] & 0xff000000) |
-		    (e->tRCDWR & 0x0f) << 20 |
-		    (e->tRCDRD & 0x0f) << 14 |
-		    (t->tCWL << 7) |
-		    (e->tCL & 0x0f);
-
-	t->reg[2] = (boot->reg[2] & 0xff0000ff) |
-		    e->tWR << 16 | e->tWTR << 8;
-
-	t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
-		    (e->tUNK_21 & 0xf) << 5 |
-		    (e->tUNK_13 & 0x1f);
-
-	t->reg[4] = (boot->reg[4] & 0xfff00fff) |
-		    (e->tRRD&0x1f) << 15;
-
-	NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
-		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
-	NV_DEBUG(drm, "         2a0: %08x\n", t->reg[4]);
-	return 0;
-}
-
-/**
- * MR generation methods
- */
-
-static int
-nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
-		    struct nouveau_pm_tbl_entry *e, u8 len,
-		    struct nouveau_pm_memtiming *boot,
-		    struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	t->drive_strength = 0;
-	if (len < 15) {
-		t->odt = boot->odt;
-	} else {
-		t->odt = e->RAM_FT1 & 0x07;
-	}
-
-	if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
-		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
-		return -ERANGE;
-	}
-
-	if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
-		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
-		return -ERANGE;
-	}
-
-	if (t->odt > 3) {
-		NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
-			t->id, t->odt);
-		t->odt = 0;
-	}
-
-	t->mr[0] = (boot->mr[0] & 0x100f) |
-		   (e->tCL) << 4 |
-		   (e->tWR - 1) << 9;
-	t->mr[1] = (boot->mr[1] & 0x101fbb) |
-		   (t->odt & 0x1) << 2 |
-		   (t->odt & 0x2) << 5;
-
-	NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
-	return 0;
-}
-
-static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
-	0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
-
-static int
-nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
-		    struct nouveau_pm_tbl_entry *e, u8 len,
-		    struct nouveau_pm_memtiming *boot,
-		    struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u8 cl = e->tCL - 4;
-
-	t->drive_strength = 0;
-	if (len < 15) {
-		t->odt = boot->odt;
-	} else {
-		t->odt = e->RAM_FT1 & 0x07;
-	}
-
-	if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
-		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
-		return -ERANGE;
-	}
-
-	if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
-		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
-		return -ERANGE;
-	}
-
-	if (e->tCWL < 5) {
-		NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
-		return -ERANGE;
-	}
-
-	t->mr[0] = (boot->mr[0] & 0x180b) |
-		   /* CAS */
-		   (cl & 0x7) << 4 |
-		   (cl & 0x8) >> 1 |
-		   (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
-	t->mr[1] = (boot->mr[1] & 0x101dbb) |
-		   (t->odt & 0x1) << 2 |
-		   (t->odt & 0x2) << 5 |
-		   (t->odt & 0x4) << 7;
-	t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
-
-	NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
-	return 0;
-}
-
-static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
-	0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
-static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
-	0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
-
-static int
-nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	if (len < 15) {
-		t->drive_strength = boot->drive_strength;
-		t->odt = boot->odt;
-	} else {
-		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
-		t->odt = e->RAM_FT1 & 0x07;
-	}
-
-	if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
-		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
-		return -ERANGE;
-	}
-
-	if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
-		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
-		return -ERANGE;
-	}
-
-	if (t->odt > 3) {
-		NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
-			t->id, t->odt);
-		t->odt = 0;
-	}
-
-	t->mr[0] = (boot->mr[0] & 0xe0b) |
-		   /* CAS */
-		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
-		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
-	t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
-		   (t->odt << 2) |
-		   (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
-	t->mr[2] = boot->mr[2];
-
-	NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
-		      t->mr[0], t->mr[1], t->mr[2]);
-	return 0;
-}
-
-static int
-nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	if (len < 15) {
-		t->drive_strength = boot->drive_strength;
-		t->odt = boot->odt;
-	} else {
-		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
-		t->odt = e->RAM_FT1 & 0x03;
-	}
-
-	if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
-		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
-		return -ERANGE;
-	}
-
-	if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
-		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
-		return -ERANGE;
-	}
-
-	if (t->odt > 3) {
-		NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
-			t->id, t->odt);
-		t->odt = 0;
-	}
-
-	t->mr[0] = (boot->mr[0] & 0x007) |
-		   ((e->tCL - 5) << 3) |
-		   ((e->tWR - 4) << 8);
-	t->mr[1] = (boot->mr[1] & 0x1007f0) |
-		   t->drive_strength |
-		   (t->odt << 2);
-
-	NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
-	return 0;
-}
-
-int
-nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
-			struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_memtiming *boot = &pm->boot.timing;
-	struct nouveau_pm_tbl_entry *e;
-	u8 ver, len, *ptr, *ramcfg;
-	int ret;
-
-	ptr = nouveau_perf_timing(dev, freq, &ver, &len);
-	if (!ptr || ptr[0] == 0x00) {
-		*t = *boot;
-		return 0;
-	}
-	e = (struct nouveau_pm_tbl_entry *)ptr;
-
-	t->tCWL = boot->tCWL;
-
-	switch (device->card_type) {
-	case NV_40:
-		ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
-		break;
-	case NV_50:
-		ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
-		break;
-	case NV_C0:
-	case NV_D0:
-		ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
-		break;
-	default:
-		ret = -ENODEV;
-		break;
-	}
-
-	switch (pfb->ram->type * !ret) {
-	case NV_MEM_TYPE_GDDR3:
-		ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
-		break;
-	case NV_MEM_TYPE_GDDR5:
-		ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
-		break;
-	case NV_MEM_TYPE_DDR2:
-		ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
-		break;
-	case NV_MEM_TYPE_DDR3:
-		ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
-	if (ramcfg) {
-		int dll_off;
-
-		if (ver == 0x00)
-			dll_off = !!(ramcfg[3] & 0x04);
-		else
-			dll_off = !!(ramcfg[2] & 0x40);
-
-		switch (pfb->ram->type) {
-		case NV_MEM_TYPE_GDDR3:
-			t->mr[1] &= ~0x00000040;
-			t->mr[1] |=  0x00000040 * dll_off;
-			break;
-		default:
-			t->mr[1] &= ~0x00000001;
-			t->mr[1] |=  0x00000001 * dll_off;
-			break;
-		}
-	}
-
-	return ret;
-}
-
-void
-nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	u32 timing_base, timing_regs, mr_base;
-	int i;
-
-	if (device->card_type >= 0xC0) {
-		timing_base = 0x10f290;
-		mr_base = 0x10f300;
-	} else {
-		timing_base = 0x100220;
-		mr_base = 0x1002c0;
-	}
-
-	t->id = -1;
-
-	switch (device->card_type) {
-	case NV_50:
-		timing_regs = 9;
-		break;
-	case NV_C0:
-	case NV_D0:
-		timing_regs = 5;
-		break;
-	case NV_30:
-	case NV_40:
-		timing_regs = 3;
-		break;
-	default:
-		timing_regs = 0;
-		return;
-	}
-	for(i = 0; i < timing_regs; i++)
-		t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
-
-	t->tCWL = 0;
-	if (device->card_type < NV_C0) {
-		t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
-	} else if (device->card_type <= NV_D0) {
-		t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
-	}
-
-	t->mr[0] = nv_rd32(device, mr_base);
-	t->mr[1] = nv_rd32(device, mr_base + 0x04);
-	t->mr[2] = nv_rd32(device, mr_base + 0x20);
-	t->mr[3] = nv_rd32(device, mr_base + 0x24);
-
-	t->odt = 0;
-	t->drive_strength = 0;
-
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_DDR3:
-		t->odt |= (t->mr[1] & 0x200) >> 7;
-	case NV_MEM_TYPE_DDR2:
-		t->odt |= (t->mr[1] & 0x04) >> 2 |
-			  (t->mr[1] & 0x40) >> 5;
-		break;
-	case NV_MEM_TYPE_GDDR3:
-	case NV_MEM_TYPE_GDDR5:
-		t->drive_strength = t->mr[1] & 0x03;
-		t->odt = (t->mr[1] & 0x0c) >> 2;
-		break;
-	default:
-		break;
-	}
-}
-
-int
-nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
-		 struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_drm *drm = nouveau_drm(exec->dev);
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct nouveau_pm_memtiming *info = &perflvl->timing;
-	u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
-	u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
-	u32 mr1_dlloff;
-
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_DDR2:
-		tDLLK = 2000;
-		mr1_dlloff = 0x00000001;
-		break;
-	case NV_MEM_TYPE_DDR3:
-		tDLLK = 12000;
-		tCKSRE = 2000;
-		tXS = 1000;
-		mr1_dlloff = 0x00000001;
-		break;
-	case NV_MEM_TYPE_GDDR3:
-		tDLLK = 40000;
-		mr1_dlloff = 0x00000040;
-		break;
-	default:
-		NV_ERROR(drm, "cannot reclock unsupported memtype\n");
-		return -ENODEV;
-	}
-
-	/* fetch current MRs */
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_GDDR3:
-	case NV_MEM_TYPE_DDR3:
-		mr[2] = exec->mrg(exec, 2);
-	default:
-		mr[1] = exec->mrg(exec, 1);
-		mr[0] = exec->mrg(exec, 0);
-		break;
-	}
-
-	/* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh  */
-	if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
-		exec->precharge(exec);
-		exec->mrs (exec, 1, mr[1] | mr1_dlloff);
-		exec->wait(exec, tMRD);
-	}
-
-	/* enter self-refresh mode */
-	exec->precharge(exec);
-	exec->refresh(exec);
-	exec->refresh(exec);
-	exec->refresh_auto(exec, false);
-	exec->refresh_self(exec, true);
-	exec->wait(exec, tCKSRE);
-
-	/* modify input clock frequency */
-	exec->clock_set(exec);
-
-	/* exit self-refresh mode */
-	exec->wait(exec, tCKSRX);
-	exec->precharge(exec);
-	exec->refresh_self(exec, false);
-	exec->refresh_auto(exec, true);
-	exec->wait(exec, tXS);
-	exec->wait(exec, tXS);
-
-	/* update MRs */
-	if (mr[2] != info->mr[2]) {
-		exec->mrs (exec, 2, info->mr[2]);
-		exec->wait(exec, tMRD);
-	}
-
-	if (mr[1] != info->mr[1]) {
-		/* need to keep DLL off until later, at least on GDDR3 */
-		exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
-		exec->wait(exec, tMRD);
-	}
-
-	if (mr[0] != info->mr[0]) {
-		exec->mrs (exec, 0, info->mr[0]);
-		exec->wait(exec, tMRD);
-	}
-
-	/* update PFB timing registers */
-	exec->timing_set(exec);
-
-	/* DLL (enable + ) reset */
-	if (!(info->mr[1] & mr1_dlloff)) {
-		if (mr[1] & mr1_dlloff) {
-			exec->mrs (exec, 1, info->mr[1]);
-			exec->wait(exec, tMRD);
-		}
-		exec->mrs (exec, 0, info->mr[0] | 0x00000100);
-		exec->wait(exec, tMRD);
-		exec->mrs (exec, 0, info->mr[0] | 0x00000000);
-		exec->wait(exec, tMRD);
-		exec->wait(exec, tDLLK);
-		if (pfb->ram->type == NV_MEM_TYPE_GDDR3)
-			exec->precharge(exec);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
deleted file mode 100644
index 4fe883c..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_reg.h"
-#include "nouveau_pm.h"
-
-static u8 *
-nouveau_perf_table(struct drm_device *dev, u8 *ver)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	struct bit_entry P;
-
-	if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
-		u8 *perf = ROMPTR(dev, P.data[0]);
-		if (perf) {
-			*ver = perf[0];
-			return perf;
-		}
-	}
-
-	if (bios->type == NVBIOS_BMP) {
-		if (bios->data[bios->offset + 6] >= 0x25) {
-			u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
-			if (perf) {
-				*ver = perf[1];
-				return perf;
-			}
-		}
-	}
-
-	return NULL;
-}
-
-static u8 *
-nouveau_perf_entry(struct drm_device *dev, int idx,
-		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
-{
-	u8 *perf = nouveau_perf_table(dev, ver);
-	if (perf) {
-		if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
-			*hdr = perf[3];
-			*cnt = 0;
-			*len = 0;
-			return perf + perf[0] + idx * perf[3];
-		} else
-		if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
-			*hdr = perf[3];
-			*cnt = perf[4];
-			*len = perf[5];
-			return perf + perf[1] + idx * (*hdr + (*cnt * *len));
-		} else
-		if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
-			*hdr = perf[2];
-			*cnt = perf[4];
-			*len = perf[3];
-			return perf + perf[1] + idx * (*hdr + (*cnt * *len));
-		}
-	}
-	return NULL;
-}
-
-u8 *
-nouveau_perf_rammap(struct drm_device *dev, u32 freq,
-		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct bit_entry P;
-	u8 *perf, i = 0;
-
-	if (!bit_table(dev, 'P', &P) && P.version == 2) {
-		u8 *rammap = ROMPTR(dev, P.data[4]);
-		if (rammap) {
-			u8 *ramcfg = rammap + rammap[1];
-
-			*ver = rammap[0];
-			*hdr = rammap[2];
-			*cnt = rammap[4];
-			*len = rammap[3];
-
-			freq /= 1000;
-			for (i = 0; i < rammap[5]; i++) {
-				if (freq >= ROM16(ramcfg[0]) &&
-				    freq <= ROM16(ramcfg[2]))
-					return ramcfg;
-
-				ramcfg += *hdr + (*cnt * *len);
-			}
-		}
-
-		return NULL;
-	}
-
-	if (nv_device(drm->device)->chipset == 0x49 ||
-	    nv_device(drm->device)->chipset == 0x4b)
-		freq /= 2;
-
-	while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
-		if (*ver >= 0x20 && *ver < 0x25) {
-			if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
-				break;
-		} else
-		if (*ver >= 0x25 && *ver < 0x40) {
-			if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
-				break;
-		}
-	}
-
-	if (perf) {
-		u8 *ramcfg = perf + *hdr;
-		*ver = 0x00;
-		*hdr = 0;
-		return ramcfg;
-	}
-
-	return NULL;
-}
-
-u8 *
-nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	u8 strap, hdr, cnt;
-	u8 *rammap;
-
-	strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
-	if (bios->ram_restrict_tbl_ptr)
-		strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
-
-	rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
-	if (rammap && strap < cnt)
-		return rammap + hdr + (strap * *len);
-
-	return NULL;
-}
-
-u8 *
-nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	struct bit_entry P;
-	u8 *perf, *timing = NULL;
-	u8 i = 0, hdr, cnt;
-
-	if (bios->type == NVBIOS_BMP) {
-		while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
-						  len)) && *ver == 0x15) {
-			if (freq <= ROM32(perf[5]) * 20) {
-				*ver = 0x00;
-				*len = 14;
-				return perf + 41;
-			}
-		}
-		return NULL;
-	}
-
-	if (!bit_table(dev, 'P', &P)) {
-		if (P.version == 1)
-			timing = ROMPTR(dev, P.data[4]);
-		else
-		if (P.version == 2)
-			timing = ROMPTR(dev, P.data[8]);
-	}
-
-	if (timing && timing[0] == 0x10) {
-		u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
-		if (ramcfg && ramcfg[1] < timing[2]) {
-			*ver = timing[0];
-			*len = timing[3];
-			return timing + timing[1] + (ramcfg[1] * timing[3]);
-		}
-	}
-
-	return NULL;
-}
-
-static void
-legacy_perf_init(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	char *perf, *entry, *bmp = &bios->data[bios->offset];
-	int headerlen, use_straps;
-
-	if (bmp[5] < 0x5 || bmp[6] < 0x14) {
-		NV_DEBUG(drm, "BMP version too old for perf\n");
-		return;
-	}
-
-	perf = ROMPTR(dev, bmp[0x73]);
-	if (!perf) {
-		NV_DEBUG(drm, "No memclock table pointer found.\n");
-		return;
-	}
-
-	switch (perf[0]) {
-	case 0x12:
-	case 0x14:
-	case 0x18:
-		use_straps = 0;
-		headerlen = 1;
-		break;
-	case 0x01:
-		use_straps = perf[1] & 1;
-		headerlen = (use_straps ? 8 : 2);
-		break;
-	default:
-		NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
-		return;
-	}
-
-	entry = perf + headerlen;
-	if (use_straps)
-		entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
-
-	sprintf(pm->perflvl[0].name, "performance_level_0");
-	pm->perflvl[0].memory = ROM16(entry[0]) * 20;
-	pm->nr_perflvl = 1;
-}
-
-static void
-nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct bit_entry P;
-	u8 *vmap;
-	int id;
-
-	id = perflvl->volt_min;
-	perflvl->volt_min = 0;
-
-	/* boards using voltage table version <0x40 store the voltage
-	 * level directly in the perflvl entry as a multiple of 10mV
-	 */
-	if (drm->pm->voltage.version < 0x40) {
-		perflvl->volt_min = id * 10000;
-		perflvl->volt_max = perflvl->volt_min;
-		return;
-	}
-
-	/* on newer ones, the perflvl stores an index into yet another
-	 * vbios table containing a min/max voltage value for the perflvl
-	 */
-	if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
-		NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
-			 P.version, P.length);
-		return;
-	}
-
-	vmap = ROMPTR(dev, P.data[32]);
-	if (!vmap) {
-		NV_DEBUG(drm, "volt map table pointer invalid\n");
-		return;
-	}
-
-	if (id < vmap[3]) {
-		vmap += vmap[1] + (vmap[2] * id);
-		perflvl->volt_min = ROM32(vmap[0]);
-		perflvl->volt_max = ROM32(vmap[4]);
-	}
-}
-
-void
-nouveau_perf_init(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nvbios *bios = &drm->vbios;
-	u8 *perf, ver, hdr, cnt, len;
-	int ret, vid, i = -1;
-
-	if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
-		legacy_perf_init(dev);
-		return;
-	}
-
-	perf = nouveau_perf_table(dev, &ver);
-
-	while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
-		struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
-
-		if (perf[0] == 0xff)
-			continue;
-
-		switch (ver) {
-		case 0x12:
-		case 0x13:
-		case 0x15:
-			perflvl->fanspeed = perf[55];
-			if (hdr > 56)
-				perflvl->volt_min = perf[56];
-			perflvl->core = ROM32(perf[1]) * 10;
-			perflvl->memory = ROM32(perf[5]) * 20;
-			break;
-		case 0x21:
-		case 0x23:
-		case 0x24:
-			perflvl->fanspeed = perf[4];
-			perflvl->volt_min = perf[5];
-			perflvl->shader = ROM16(perf[6]) * 1000;
-			perflvl->core = perflvl->shader;
-			perflvl->core += (signed char)perf[8] * 1000;
-			if (nv_device(drm->device)->chipset == 0x49 ||
-			    nv_device(drm->device)->chipset == 0x4b)
-				perflvl->memory = ROM16(perf[11]) * 1000;
-			else
-				perflvl->memory = ROM16(perf[11]) * 2000;
-			break;
-		case 0x25:
-			perflvl->fanspeed = perf[4];
-			perflvl->volt_min = perf[5];
-			perflvl->core = ROM16(perf[6]) * 1000;
-			perflvl->shader = ROM16(perf[10]) * 1000;
-			perflvl->memory = ROM16(perf[12]) * 1000;
-			break;
-		case 0x30:
-			perflvl->memscript = ROM16(perf[2]);
-		case 0x35:
-			perflvl->fanspeed = perf[6];
-			perflvl->volt_min = perf[7];
-			perflvl->core = ROM16(perf[8]) * 1000;
-			perflvl->shader = ROM16(perf[10]) * 1000;
-			perflvl->memory = ROM16(perf[12]) * 1000;
-			perflvl->vdec = ROM16(perf[16]) * 1000;
-			perflvl->dom6 = ROM16(perf[20]) * 1000;
-			break;
-		case 0x40:
-#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
-			perflvl->fanspeed = 0; /*XXX*/
-			perflvl->volt_min = perf[2];
-			if (nv_device(drm->device)->card_type == NV_50) {
-				perflvl->core   = subent(0);
-				perflvl->shader = subent(1);
-				perflvl->memory = subent(2);
-				perflvl->vdec   = subent(3);
-				perflvl->unka0  = subent(4);
-			} else {
-				perflvl->hub06  = subent(0);
-				perflvl->hub01  = subent(1);
-				perflvl->copy   = subent(2);
-				perflvl->shader = subent(3);
-				perflvl->rop    = subent(4);
-				perflvl->memory = subent(5);
-				perflvl->vdec   = subent(6);
-				perflvl->daemon = subent(10);
-				perflvl->hub07  = subent(11);
-				perflvl->core   = perflvl->shader / 2;
-			}
-			break;
-		}
-
-		/* make sure vid is valid */
-		nouveau_perf_voltage(dev, perflvl);
-		if (pm->voltage.supported && perflvl->volt_min) {
-			vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
-			if (vid < 0) {
-				NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
-				continue;
-			}
-		}
-
-		/* get the corresponding memory timings */
-		ret = nouveau_mem_timing_calc(dev, perflvl->memory,
-					          &perflvl->timing);
-		if (ret) {
-			NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
-			continue;
-		}
-
-		snprintf(perflvl->name, sizeof(perflvl->name),
-			 "performance_level_%d", i);
-		perflvl->id = i;
-
-		snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
-			 "%d", perflvl->id);
-		perflvl->profile.func = &nouveau_pm_static_profile_func;
-		list_add_tail(&perflvl->profile.head, &pm->profiles);
-
-
-		pm->nr_perflvl++;
-	}
-}
-
-void
-nouveau_perf_fini(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
deleted file mode 100644
index 936b442..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ /dev/null
@@ -1,1174 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifdef CONFIG_ACPI
-#include <linux/acpi.h>
-#endif
-#include <linux/power_supply.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_pm.h"
-
-#include <subdev/gpio.h>
-#include <subdev/timer.h>
-#include <subdev/therm.h>
-
-MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
-static char *nouveau_perflvl;
-module_param_named(perflvl, nouveau_perflvl, charp, 0400);
-
-MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
-static int nouveau_perflvl_wr;
-module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
-
-static int
-nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-		       struct nouveau_pm_level *a, struct nouveau_pm_level *b)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	/*XXX: not on all boards, we should control based on temperature
-	 *     on recent boards..  or maybe on some other factor we don't
-	 *     know about?
-	 */
-	if (therm && therm->fan_set &&
-		a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
-		ret = therm->fan_set(therm, perflvl->fanspeed);
-		if (ret && ret != -ENODEV) {
-			NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
-		}
-	}
-
-	if (pm->voltage.supported && pm->voltage_set) {
-		if (perflvl->volt_min && b->volt_min > a->volt_min) {
-			ret = pm->voltage_set(dev, perflvl->volt_min);
-			if (ret) {
-				NV_ERROR(drm, "voltage set failed: %d\n", ret);
-				return ret;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int
-nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	void *state;
-	int ret;
-
-	if (perflvl == pm->cur)
-		return 0;
-
-	ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
-	if (ret)
-		return ret;
-
-	state = pm->clocks_pre(dev, perflvl);
-	if (IS_ERR(state)) {
-		ret = PTR_ERR(state);
-		goto error;
-	}
-	ret = pm->clocks_set(dev, state);
-	if (ret)
-		goto error;
-
-	ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
-	if (ret)
-		return ret;
-
-	pm->cur = perflvl;
-	return 0;
-
-error:
-	/* restore the fan speed and voltage before leaving */
-	nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
-	return ret;
-}
-
-void
-nouveau_pm_trigger(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_timer *ptimer = nouveau_timer(drm->device);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_profile *profile = NULL;
-	struct nouveau_pm_level *perflvl = NULL;
-	int ret;
-
-	/* select power profile based on current power source */
-	if (power_supply_is_system_supplied())
-		profile = pm->profile_ac;
-	else
-		profile = pm->profile_dc;
-
-	if (profile != pm->profile) {
-		pm->profile->func->fini(pm->profile);
-		pm->profile = profile;
-		pm->profile->func->init(pm->profile);
-	}
-
-	/* select performance level based on profile */
-	perflvl = profile->func->select(profile);
-
-	/* change perflvl, if necessary */
-	if (perflvl != pm->cur) {
-		u64 time0 = ptimer->read(ptimer);
-
-		NV_INFO(drm, "setting performance level: %d", perflvl->id);
-		ret = nouveau_pm_perflvl_set(dev, perflvl);
-		if (ret)
-			NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
-
-		NV_INFO(drm, "> reclocking took %lluns\n\n",
-			     ptimer->read(ptimer) - time0);
-	}
-}
-
-static struct nouveau_pm_profile *
-profile_find(struct drm_device *dev, const char *string)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_profile *profile;
-
-	list_for_each_entry(profile, &pm->profiles, head) {
-		if (!strncmp(profile->name, string, sizeof(profile->name)))
-			return profile;
-	}
-
-	return NULL;
-}
-
-static int
-nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_profile *ac = NULL, *dc = NULL;
-	char string[16], *cur = string, *ptr;
-
-	/* safety precaution, for now */
-	if (nouveau_perflvl_wr != 7777)
-		return -EPERM;
-
-	strncpy(string, profile, sizeof(string));
-	string[sizeof(string) - 1] = 0;
-	if ((ptr = strchr(string, '\n')))
-		*ptr = '\0';
-
-	ptr = strsep(&cur, ",");
-	if (ptr)
-		ac = profile_find(dev, ptr);
-
-	ptr = strsep(&cur, ",");
-	if (ptr)
-		dc = profile_find(dev, ptr);
-	else
-		dc = ac;
-
-	if (ac == NULL || dc == NULL)
-		return -EINVAL;
-
-	pm->profile_ac = ac;
-	pm->profile_dc = dc;
-	nouveau_pm_trigger(dev);
-	return 0;
-}
-
-static void
-nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
-{
-}
-
-static struct nouveau_pm_level *
-nouveau_pm_static_select(struct nouveau_pm_profile *profile)
-{
-	return container_of(profile, struct nouveau_pm_level, profile);
-}
-
-const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
-	.destroy = nouveau_pm_static_dummy,
-	.init = nouveau_pm_static_dummy,
-	.fini = nouveau_pm_static_dummy,
-	.select = nouveau_pm_static_select,
-};
-
-static int
-nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	memset(perflvl, 0, sizeof(*perflvl));
-
-	if (pm->clocks_get) {
-		ret = pm->clocks_get(dev, perflvl);
-		if (ret)
-			return ret;
-	}
-
-	if (pm->voltage.supported && pm->voltage_get) {
-		ret = pm->voltage_get(dev);
-		if (ret > 0) {
-			perflvl->volt_min = ret;
-			perflvl->volt_max = ret;
-		}
-	}
-
-	if (therm && therm->fan_get) {
-		ret = therm->fan_get(therm);
-		if (ret >= 0)
-			perflvl->fanspeed = ret;
-	}
-
-	nouveau_mem_timing_read(dev, &perflvl->timing);
-	return 0;
-}
-
-static void
-nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
-{
-	char c[16], s[16], v[32], f[16], m[16];
-
-	c[0] = '\0';
-	if (perflvl->core)
-		snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
-
-	s[0] = '\0';
-	if (perflvl->shader)
-		snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
-
-	m[0] = '\0';
-	if (perflvl->memory)
-		snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
-
-	v[0] = '\0';
-	if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
-		snprintf(v, sizeof(v), " voltage %dmV-%dmV",
-			 perflvl->volt_min / 1000, perflvl->volt_max / 1000);
-	} else
-	if (perflvl->volt_min) {
-		snprintf(v, sizeof(v), " voltage %dmV",
-			 perflvl->volt_min / 1000);
-	}
-
-	f[0] = '\0';
-	if (perflvl->fanspeed)
-		snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
-
-	snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
-}
-
-static ssize_t
-nouveau_pm_get_perflvl_info(struct device *d,
-			    struct device_attribute *a, char *buf)
-{
-	struct nouveau_pm_level *perflvl =
-		container_of(a, struct nouveau_pm_level, dev_attr);
-	char *ptr = buf;
-	int len = PAGE_SIZE;
-
-	snprintf(ptr, len, "%d:", perflvl->id);
-	ptr += strlen(buf);
-	len -= strlen(buf);
-
-	nouveau_pm_perflvl_info(perflvl, ptr, len);
-	return strlen(buf);
-}
-
-static ssize_t
-nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_level cur;
-	int len = PAGE_SIZE, ret;
-	char *ptr = buf;
-
-	snprintf(ptr, len, "profile: %s, %s\nc:",
-		 pm->profile_ac->name, pm->profile_dc->name);
-	ptr += strlen(buf);
-	len -= strlen(buf);
-
-	ret = nouveau_pm_perflvl_get(dev, &cur);
-	if (ret == 0)
-		nouveau_pm_perflvl_info(&cur, ptr, len);
-	return strlen(buf);
-}
-
-static ssize_t
-nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
-		       const char *buf, size_t count)
-{
-	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
-	int ret;
-
-	ret = nouveau_pm_profile_set(dev, buf);
-	if (ret)
-		return ret;
-	return strlen(buf);
-}
-
-static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
-		   nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
-
-static int
-nouveau_sysfs_init(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct device *d = &dev->pdev->dev;
-	int ret, i;
-
-	ret = device_create_file(d, &dev_attr_performance_level);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < pm->nr_perflvl; i++) {
-		struct nouveau_pm_level *perflvl = &pm->perflvl[i];
-
-		perflvl->dev_attr.attr.name = perflvl->name;
-		perflvl->dev_attr.attr.mode = S_IRUGO;
-		perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
-		perflvl->dev_attr.store = NULL;
-		sysfs_attr_init(&perflvl->dev_attr.attr);
-
-		ret = device_create_file(d, &perflvl->dev_attr);
-		if (ret) {
-			NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
-				 perflvl->id, i);
-			perflvl->dev_attr.attr.name = NULL;
-			nouveau_pm_fini(dev);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static void
-nouveau_sysfs_fini(struct drm_device *dev)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct device *d = &dev->pdev->dev;
-	int i;
-
-	device_remove_file(d, &dev_attr_performance_level);
-	for (i = 0; i < pm->nr_perflvl; i++) {
-		struct nouveau_pm_level *pl = &pm->perflvl[i];
-
-		if (!pl->dev_attr.attr.name)
-			break;
-
-		device_remove_file(d, &pl->dev_attr);
-	}
-}
-
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
-static ssize_t
-nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int temp = therm->temp_get(therm);
-
-	if (temp < 0)
-		return temp;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
-}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
-						  NULL, 0);
-
-static ssize_t
-nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
-					 struct device_attribute *a, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", 100);
-}
-static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
-			  nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
-
-static ssize_t
-nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
-				     struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	      therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
-					 struct device_attribute *a,
-					 const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_temp1_auto_point1_temp,
-			  nouveau_hwmon_set_temp1_auto_point1_temp, 0);
-
-static ssize_t
-nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
-					  struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
-					      struct device_attribute *a,
-					      const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_temp1_auto_point1_temp_hyst,
-			  nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
-
-static ssize_t
-nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
-						const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
-						  nouveau_hwmon_set_max_temp,
-						  0);
-
-static ssize_t
-nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
-			    char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
-						const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_max_temp_hyst,
-			  nouveau_hwmon_set_max_temp_hyst, 0);
-
-static ssize_t
-nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
-							char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
-							    const char *buf,
-								size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
-						nouveau_hwmon_critical_temp,
-						nouveau_hwmon_set_critical_temp,
-						0);
-
-static ssize_t
-nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
-							char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_critical_temp_hyst(struct device *d,
-				     struct device_attribute *a,
-				     const char *buf,
-				     size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_critical_temp_hyst,
-			  nouveau_hwmon_set_critical_temp_hyst, 0);
-static ssize_t
-nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
-							char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
-							    const char *buf,
-								size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
-					nouveau_hwmon_emergency_temp,
-					nouveau_hwmon_set_emergency_temp,
-					0);
-
-static ssize_t
-nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
-							char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
-				      struct device_attribute *a,
-				      const char *buf,
-				      size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
-					nouveau_hwmon_emergency_temp_hyst,
-					nouveau_hwmon_set_emergency_temp_hyst,
-					0);
-
-static ssize_t nouveau_hwmon_show_name(struct device *dev,
-				      struct device_attribute *attr,
-				      char *buf)
-{
-	return sprintf(buf, "nouveau\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
-
-static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
-				      struct device_attribute *attr,
-				      char *buf)
-{
-	return sprintf(buf, "1000\n");
-}
-static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
-						nouveau_hwmon_show_update_rate,
-						NULL, 0);
-
-static ssize_t
-nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
-			      char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
-}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
-			  NULL, 0);
-
- static ssize_t
-nouveau_hwmon_get_pwm1_enable(struct device *d,
-			   struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
-			   const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-	int ret;
-
-	if (strict_strtol(buf, 10, &value) == -EINVAL)
-		return -EINVAL;
-
-	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
-	if (ret)
-		return ret;
-	else
-		return count;
-}
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm1_enable,
-			  nouveau_hwmon_set_pwm1_enable, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	ret = therm->fan_get(therm);
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
-		       const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret = -ENODEV;
-	long value;
-
-	if (nouveau_perflvl_wr != 7777)
-		return -EPERM;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return -EINVAL;
-
-	ret = therm->fan_set(therm, value);
-	if (ret)
-		return ret;
-
-	return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm1,
-			  nouveau_hwmon_set_pwm1, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm1_min(struct device *d,
-			   struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
-			   const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-	int ret;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return -EINVAL;
-
-	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
-	if (ret < 0)
-		return ret;
-
-	return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm1_min,
-			  nouveau_hwmon_set_pwm1_min, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm1_max(struct device *d,
-			   struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
-			   const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-	int ret;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return -EINVAL;
-
-	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
-	if (ret < 0)
-		return ret;
-
-	return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm1_max,
-			  nouveau_hwmon_set_pwm1_max, 0);
-
-static struct attribute *hwmon_default_attributes[] = {
-	&sensor_dev_attr_name.dev_attr.attr,
-	&sensor_dev_attr_update_rate.dev_attr.attr,
-	NULL
-};
-static struct attribute *hwmon_temp_attributes[] = {
-	&sensor_dev_attr_temp1_input.dev_attr.attr,
-	&sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
-	&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
-	&sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
-	&sensor_dev_attr_temp1_max.dev_attr.attr,
-	&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
-	&sensor_dev_attr_temp1_crit.dev_attr.attr,
-	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
-	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
-	&sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
-	NULL
-};
-static struct attribute *hwmon_fan_rpm_attributes[] = {
-	&sensor_dev_attr_fan1_input.dev_attr.attr,
-	NULL
-};
-static struct attribute *hwmon_pwm_fan_attributes[] = {
-	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
-	&sensor_dev_attr_pwm1.dev_attr.attr,
-	&sensor_dev_attr_pwm1_min.dev_attr.attr,
-	&sensor_dev_attr_pwm1_max.dev_attr.attr,
-	NULL
-};
-
-static const struct attribute_group hwmon_default_attrgroup = {
-	.attrs = hwmon_default_attributes,
-};
-static const struct attribute_group hwmon_temp_attrgroup = {
-	.attrs = hwmon_temp_attributes,
-};
-static const struct attribute_group hwmon_fan_rpm_attrgroup = {
-	.attrs = hwmon_fan_rpm_attributes,
-};
-static const struct attribute_group hwmon_pwm_fan_attrgroup = {
-	.attrs = hwmon_pwm_fan_attributes,
-};
-#endif
-
-static int
-nouveau_hwmon_init(struct drm_device *dev)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	struct device *hwmon_dev;
-	int ret = 0;
-
-	if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
-		return -ENODEV;
-
-	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
-	if (IS_ERR(hwmon_dev)) {
-		ret = PTR_ERR(hwmon_dev);
-		NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
-		return ret;
-	}
-	dev_set_drvdata(hwmon_dev, dev);
-
-	/* set the default attributes */
-	ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
-	if (ret) {
-		if (ret)
-			goto error;
-	}
-
-	/* if the card has a working thermal sensor */
-	if (therm->temp_get(therm) >= 0) {
-		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
-		if (ret) {
-			if (ret)
-				goto error;
-		}
-	}
-
-	/* if the card has a pwm fan */
-	/*XXX: incorrect, need better detection for this, some boards have
-	 *     the gpio entries for pwm fan control even when there's no
-	 *     actual fan connected to it... therm table? */
-	if (therm->fan_get && therm->fan_get(therm) >= 0) {
-		ret = sysfs_create_group(&hwmon_dev->kobj,
-					 &hwmon_pwm_fan_attrgroup);
-		if (ret)
-			goto error;
-	}
-
-	/* if the card can read the fan rpm */
-	if (therm->fan_sense(therm) >= 0) {
-		ret = sysfs_create_group(&hwmon_dev->kobj,
-					 &hwmon_fan_rpm_attrgroup);
-		if (ret)
-			goto error;
-	}
-
-	pm->hwmon = hwmon_dev;
-
-	return 0;
-
-error:
-	NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
-	hwmon_device_unregister(hwmon_dev);
-	pm->hwmon = NULL;
-	return ret;
-#else
-	pm->hwmon = NULL;
-	return 0;
-#endif
-}
-
-static void
-nouveau_hwmon_fini(struct drm_device *dev)
-{
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
-	struct nouveau_pm *pm = nouveau_pm(dev);
-
-	if (pm->hwmon) {
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup);
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup);
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
-
-		hwmon_device_unregister(pm->hwmon);
-	}
-#endif
-}
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
-static int
-nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
-{
-	struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
-	struct nouveau_drm *drm = nouveau_drm(pm->dev);
-	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
-
-	if (strcmp(entry->device_class, "ac_adapter") == 0) {
-		bool ac = power_supply_is_system_supplied();
-
-		NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC");
-		nouveau_pm_trigger(pm->dev);
-	}
-
-	return NOTIFY_OK;
-}
-#endif
-
-int
-nouveau_pm_init(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm;
-	char info[256];
-	int ret, i;
-
-	pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
-	if (!pm)
-		return -ENOMEM;
-
-	pm->dev = dev;
-
-	if (device->card_type < NV_40) {
-		pm->clocks_get = nv04_pm_clocks_get;
-		pm->clocks_pre = nv04_pm_clocks_pre;
-		pm->clocks_set = nv04_pm_clocks_set;
-		if (nouveau_gpio(drm->device)) {
-			pm->voltage_get = nouveau_voltage_gpio_get;
-			pm->voltage_set = nouveau_voltage_gpio_set;
-		}
-	} else
-	if (device->card_type < NV_50) {
-		pm->clocks_get = nv40_pm_clocks_get;
-		pm->clocks_pre = nv40_pm_clocks_pre;
-		pm->clocks_set = nv40_pm_clocks_set;
-		pm->voltage_get = nouveau_voltage_gpio_get;
-		pm->voltage_set = nouveau_voltage_gpio_set;
-	} else
-	if (device->card_type < NV_C0) {
-		if (device->chipset <  0xa3 ||
-		    device->chipset == 0xaa ||
-		    device->chipset == 0xac) {
-			pm->clocks_get = nv50_pm_clocks_get;
-			pm->clocks_pre = nv50_pm_clocks_pre;
-			pm->clocks_set = nv50_pm_clocks_set;
-		} else {
-			pm->clocks_get = nva3_pm_clocks_get;
-			pm->clocks_pre = nva3_pm_clocks_pre;
-			pm->clocks_set = nva3_pm_clocks_set;
-		}
-		pm->voltage_get = nouveau_voltage_gpio_get;
-		pm->voltage_set = nouveau_voltage_gpio_set;
-	} else
-	if (device->card_type < NV_E0) {
-		pm->clocks_get = nvc0_pm_clocks_get;
-		pm->clocks_pre = nvc0_pm_clocks_pre;
-		pm->clocks_set = nvc0_pm_clocks_set;
-		pm->voltage_get = nouveau_voltage_gpio_get;
-		pm->voltage_set = nouveau_voltage_gpio_set;
-	}
-
-
-	/* parse aux tables from vbios */
-	nouveau_volt_init(dev);
-
-	INIT_LIST_HEAD(&pm->profiles);
-
-	/* determine current ("boot") performance level */
-	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
-	if (ret) {
-		NV_ERROR(drm, "failed to determine boot perflvl\n");
-		return ret;
-	}
-
-	strncpy(pm->boot.name, "boot", 4);
-	strncpy(pm->boot.profile.name, "boot", 4);
-	pm->boot.profile.func = &nouveau_pm_static_profile_func;
-
-	list_add(&pm->boot.profile.head, &pm->profiles);
-
-	pm->profile_ac = &pm->boot.profile;
-	pm->profile_dc = &pm->boot.profile;
-	pm->profile = &pm->boot.profile;
-	pm->cur = &pm->boot;
-
-	/* add performance levels from vbios */
-	nouveau_perf_init(dev);
-
-	/* display available performance levels */
-	NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
-	for (i = 0; i < pm->nr_perflvl; i++) {
-		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
-		NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
-	}
-
-	nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
-	NV_INFO(drm, "c:%s", info);
-
-	/* switch performance levels now if requested */
-	if (nouveau_perflvl != NULL)
-		nouveau_pm_profile_set(dev, nouveau_perflvl);
-
-	nouveau_sysfs_init(dev);
-	nouveau_hwmon_init(dev);
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
-	pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
-	register_acpi_notifier(&pm->acpi_nb);
-#endif
-
-	return 0;
-}
-
-void
-nouveau_pm_fini(struct drm_device *dev)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_profile *profile, *tmp;
-
-	list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
-		list_del(&profile->head);
-		profile->func->destroy(profile);
-	}
-
-	if (pm->cur != &pm->boot)
-		nouveau_pm_perflvl_set(dev, &pm->boot);
-
-	nouveau_perf_fini(dev);
-	nouveau_volt_fini(dev);
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
-	unregister_acpi_notifier(&pm->acpi_nb);
-#endif
-	nouveau_hwmon_fini(dev);
-	nouveau_sysfs_fini(dev);
-
-	nouveau_drm(dev)->pm = NULL;
-	kfree(pm);
-}
-
-void
-nouveau_pm_resume(struct drm_device *dev)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_level *perflvl;
-
-	if (!pm->cur || pm->cur == &pm->boot)
-		return;
-
-	perflvl = pm->cur;
-	pm->cur = &pm->boot;
-	nouveau_pm_perflvl_set(dev, perflvl);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
deleted file mode 100644
index 73b789c..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_PM_H__
-#define __NOUVEAU_PM_H__
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-
-struct nouveau_pm_voltage_level {
-	u32 voltage; /* microvolts */
-	u8  vid;
-};
-
-struct nouveau_pm_voltage {
-	bool supported;
-	u8 version;
-	u8 vid_mask;
-
-	struct nouveau_pm_voltage_level *level;
-	int nr_level;
-};
-
-/* Exclusive upper limits */
-#define NV_MEM_CL_DDR2_MAX 8
-#define NV_MEM_WR_DDR2_MAX 9
-#define NV_MEM_CL_DDR3_MAX 17
-#define NV_MEM_WR_DDR3_MAX 17
-#define NV_MEM_CL_GDDR3_MAX 16
-#define NV_MEM_WR_GDDR3_MAX 18
-#define NV_MEM_CL_GDDR5_MAX 21
-#define NV_MEM_WR_GDDR5_MAX 20
-
-struct nouveau_pm_memtiming {
-	int id;
-
-	u32 reg[9];
-	u32 mr[4];
-
-	u8 tCWL;
-
-	u8 odt;
-	u8 drive_strength;
-};
-
-struct nouveau_pm_tbl_header {
-	u8 version;
-	u8 header_len;
-	u8 entry_cnt;
-	u8 entry_len;
-};
-
-struct nouveau_pm_tbl_entry {
-	u8 tWR;
-	u8 tWTR;
-	u8 tCL;
-	u8 tRC;
-	u8 empty_4;
-	u8 tRFC;	/* Byte 5 */
-	u8 empty_6;
-	u8 tRAS;	/* Byte 7 */
-	u8 empty_8;
-	u8 tRP;		/* Byte 9 */
-	u8 tRCDRD;
-	u8 tRCDWR;
-	u8 tRRD;
-	u8 tUNK_13;
-	u8 RAM_FT1;		/* 14, a bitmask of random RAM features */
-	u8 empty_15;
-	u8 tUNK_16;
-	u8 empty_17;
-	u8 tUNK_18;
-	u8 tCWL;
-	u8 tUNK_20, tUNK_21;
-};
-
-struct nouveau_pm_profile;
-struct nouveau_pm_profile_func {
-	void (*destroy)(struct nouveau_pm_profile *);
-	void (*init)(struct nouveau_pm_profile *);
-	void (*fini)(struct nouveau_pm_profile *);
-	struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
-};
-
-struct nouveau_pm_profile {
-	const struct nouveau_pm_profile_func *func;
-	struct list_head head;
-	char name[8];
-};
-
-#define NOUVEAU_PM_MAX_LEVEL 8
-struct nouveau_pm_level {
-	struct nouveau_pm_profile profile;
-	struct device_attribute dev_attr;
-	char name[32];
-	int id;
-
-	struct nouveau_pm_memtiming timing;
-	u32 memory;
-	u16 memscript;
-
-	u32 core;
-	u32 shader;
-	u32 rop;
-	u32 copy;
-	u32 daemon;
-	u32 vdec;
-	u32 dom6;
-	u32 unka0;	/* nva3:nvc0 */
-	u32 hub01;	/* nvc0- */
-	u32 hub06;	/* nvc0- */
-	u32 hub07;	/* nvc0- */
-
-	u32 volt_min; /* microvolts */
-	u32 volt_max;
-	u8  fanspeed;
-};
-
-struct nouveau_pm_temp_sensor_constants {
-	u16 offset_constant;
-	s16 offset_mult;
-	s16 offset_div;
-	s16 slope_mult;
-	s16 slope_div;
-};
-
-struct nouveau_pm_threshold_temp {
-	s16 critical;
-	s16 down_clock;
-};
-
-struct nouveau_pm {
-	struct drm_device *dev;
-
-	struct nouveau_pm_voltage voltage;
-	struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
-	int nr_perflvl;
-	struct nouveau_pm_temp_sensor_constants sensor_constants;
-	struct nouveau_pm_threshold_temp threshold_temp;
-
-	struct nouveau_pm_profile *profile_ac;
-	struct nouveau_pm_profile *profile_dc;
-	struct nouveau_pm_profile *profile;
-	struct list_head profiles;
-
-	struct nouveau_pm_level boot;
-	struct nouveau_pm_level *cur;
-
-	struct device *hwmon;
-	struct notifier_block acpi_nb;
-
-	int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
-	void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
-	int (*clocks_set)(struct drm_device *, void *);
-
-	int (*voltage_get)(struct drm_device *);
-	int (*voltage_set)(struct drm_device *, int voltage);
-};
-
-static inline struct nouveau_pm *
-nouveau_pm(struct drm_device *dev)
-{
-	return nouveau_drm(dev)->pm;
-}
-
-struct nouveau_mem_exec_func {
-	struct drm_device *dev;
-	void (*precharge)(struct nouveau_mem_exec_func *);
-	void (*refresh)(struct nouveau_mem_exec_func *);
-	void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
-	void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
-	void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
-	u32  (*mrg)(struct nouveau_mem_exec_func *, int mr);
-	void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
-	void (*clock_set)(struct nouveau_mem_exec_func *);
-	void (*timing_set)(struct nouveau_mem_exec_func *);
-	void *priv;
-};
-
-/* nouveau_mem.c */
-int  nouveau_mem_exec(struct nouveau_mem_exec_func *,
-		      struct nouveau_pm_level *);
-
-/* nouveau_pm.c */
-int  nouveau_pm_init(struct drm_device *dev);
-void nouveau_pm_fini(struct drm_device *dev);
-void nouveau_pm_resume(struct drm_device *dev);
-extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
-void nouveau_pm_trigger(struct drm_device *dev);
-
-/* nouveau_volt.c */
-void nouveau_volt_init(struct drm_device *);
-void nouveau_volt_fini(struct drm_device *);
-int  nouveau_volt_vid_lookup(struct drm_device *, int voltage);
-int  nouveau_volt_lvl_lookup(struct drm_device *, int vid);
-int  nouveau_voltage_gpio_get(struct drm_device *);
-int  nouveau_voltage_gpio_set(struct drm_device *, int voltage);
-
-/* nouveau_perf.c */
-void nouveau_perf_init(struct drm_device *);
-void nouveau_perf_fini(struct drm_device *);
-u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
-			u8 *hdr, u8 *cnt, u8 *len);
-u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
-u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
-
-/* nouveau_mem.c */
-void nouveau_mem_timing_init(struct drm_device *);
-void nouveau_mem_timing_fini(struct drm_device *);
-
-/* nv04_pm.c */
-int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv04_pm_clocks_set(struct drm_device *, void *);
-
-/* nv40_pm.c */
-int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv40_pm_clocks_set(struct drm_device *, void *);
-int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
-int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
-
-/* nv50_pm.c */
-int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv50_pm_clocks_set(struct drm_device *, void *);
-int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
-int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
-
-/* nva3_pm.c */
-int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nva3_pm_clocks_set(struct drm_device *, void *);
-
-/* nvc0_pm.c */
-int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nvc0_pm_clocks_set(struct drm_device *, void *);
-
-/* nouveau_mem.c */
-int  nouveau_mem_timing_calc(struct drm_device *, u32 freq,
-			     struct nouveau_pm_memtiming *);
-void nouveau_mem_timing_read(struct drm_device *,
-			     struct nouveau_pm_memtiming *);
-
-static inline int
-nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
-	      int *N, int *fN, int *M, int *P)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_clock *clk = nouveau_clock(device);
-	struct nouveau_pll_vals pv;
-	int ret;
-
-	ret = clk->pll_calc(clk, pll, freq, &pv);
-	*N = pv.N1;
-	*M = pv.M1;
-	*P = pv.log2P;
-	return ret;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e90468d..51a2cb1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,14 +71,16 @@
 		return ERR_PTR(ret);
 
 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
-	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
-	if (!nvbo->gem) {
+
+	/* Initialize the embedded gem-object. We return a single gem-reference
+	 * to the caller, instead of a normal nouveau_bo ttm reference. */
+	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+	if (ret) {
 		nouveau_bo_ref(NULL, &nvbo);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	nvbo->gem->driver_private = nvbo;
-	return nvbo->gem;
+	return &nvbo->gem;
 }
 
 int nouveau_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
new file mode 100644
index 0000000..89201a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_sysfs.h"
+
+#include <core/object.h>
+#include <core/class.h>
+
+static inline struct drm_device *
+drm_device(struct device *d)
+{
+	return pci_get_drvdata(to_pci_dev(d));
+}
+
+#define snappendf(p,r,f,a...) do {                                             \
+	snprintf(p, r, f, ##a);                                                \
+	r -= strlen(p);                                                        \
+	p += strlen(p);                                                        \
+} while(0)
+
+static ssize_t
+nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
+{
+	struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
+	struct nv_control_pstate_info info;
+	size_t cnt = PAGE_SIZE;
+	char *buf = b;
+	int ret, i;
+
+	ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info));
+	if (ret)
+		return ret;
+
+	for (i = 0; i < info.count + 1; i++) {
+		const s32 state = i < info.count ? i :
+			NV_CONTROL_PSTATE_ATTR_STATE_CURRENT;
+		struct nv_control_pstate_attr attr = {
+			.state = state,
+			.index = 0,
+		};
+
+		ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
+			     &attr, sizeof(attr));
+		if (ret)
+			return ret;
+
+		if (i < info.count)
+			snappendf(buf, cnt, "%02x:", attr.state);
+		else
+			snappendf(buf, cnt, "--:");
+
+		attr.index = 0;
+		do {
+			attr.state = state;
+			ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
+				     &attr, sizeof(attr));
+			if (ret)
+				return ret;
+
+			snappendf(buf, cnt, " %s %d", attr.name, attr.min);
+			if (attr.min != attr.max)
+				snappendf(buf, cnt, "-%d", attr.max);
+			snappendf(buf, cnt, " %s", attr.unit);
+		} while (attr.index);
+
+		if ((state >= 0 && info.pstate == state) ||
+		    (state <  0 && info.ustate < 0))
+			snappendf(buf, cnt, " *");
+		snappendf(buf, cnt, "\n");
+	}
+
+	return strlen(b);
+}
+
+static ssize_t
+nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
+			 const char *buf, size_t count)
+{
+	struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
+	struct nv_control_pstate_user args;
+	long value, ret;
+	char *tmp;
+
+	if ((tmp = strchr(buf, '\n')))
+		*tmp = '\0';
+
+	if (!strcasecmp(buf, "none"))
+		args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN;
+	else
+	if (!strcasecmp(buf, "auto"))
+		args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON;
+	else {
+		ret = kstrtol(buf, 16, &value);
+		if (ret)
+			return ret;
+		args.state = value;
+	}
+
+	ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args));
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static DEVICE_ATTR(pstate, S_IRUGO | S_IWUSR,
+		   nouveau_sysfs_pstate_get, nouveau_sysfs_pstate_set);
+
+void
+nouveau_sysfs_fini(struct drm_device *dev)
+{
+	struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (sysfs->ctrl) {
+		device_remove_file(&dev->pdev->dev, &dev_attr_pstate);
+		nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
+	}
+
+	drm->sysfs = NULL;
+	kfree(sysfs);
+}
+
+int
+nouveau_sysfs_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_sysfs *sysfs;
+	int ret;
+
+	sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
+	if (!sysfs)
+		return -ENOMEM;
+
+	ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
+				 NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
+	if (ret == 0)
+		device_create_file(&dev->pdev->dev, &dev_attr_pstate);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
new file mode 100644
index 0000000..74b47f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -0,0 +1,19 @@
+#ifndef __NOUVEAU_SYSFS_H__
+#define __NOUVEAU_SYSFS_H__
+
+#include "nouveau_drm.h"
+
+struct nouveau_sysfs {
+	struct nouveau_object *ctrl;
+};
+
+static inline struct nouveau_sysfs *
+nouveau_sysfs(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->sysfs;
+}
+
+int  nouveau_sysfs_init(struct drm_device *);
+void nouveau_sysfs_fini(struct drm_device *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
deleted file mode 100644
index 9976414..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/gpio.h>
-#include <subdev/gpio.h>
-
-static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
-static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
-
-int
-nouveau_voltage_gpio_get(struct drm_device *dev)
-{
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(device);
-	u8 vid = 0;
-	int i;
-
-	for (i = 0; i < nr_vidtag; i++) {
-		if (!(volt->vid_mask & (1 << i)))
-			continue;
-
-		vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
-	}
-
-	return nouveau_volt_lvl_lookup(dev, vid);
-}
-
-int
-nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(device);
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-	int vid, i;
-
-	vid = nouveau_volt_vid_lookup(dev, voltage);
-	if (vid < 0)
-		return vid;
-
-	for (i = 0; i < nr_vidtag; i++) {
-		if (!(volt->vid_mask & (1 << i)))
-			continue;
-
-		gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
-	}
-
-	return 0;
-}
-
-int
-nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
-{
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-	int i;
-
-	for (i = 0; i < volt->nr_level; i++) {
-		if (volt->level[i].voltage == voltage)
-			return volt->level[i].vid;
-	}
-
-	return -ENOENT;
-}
-
-int
-nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
-{
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-	int i;
-
-	for (i = 0; i < volt->nr_level; i++) {
-		if (volt->level[i].vid == vid)
-			return volt->level[i].voltage;
-	}
-
-	return -ENOENT;
-}
-
-void
-nouveau_volt_init(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_voltage *voltage = &pm->voltage;
-	struct nvbios *bios = &drm->vbios;
-	struct dcb_gpio_func func;
-	struct bit_entry P;
-	u8 *volt = NULL, *entry;
-	int i, headerlen, recordlen, entries, vidmask, vidshift;
-
-	if (bios->type == NVBIOS_BIT) {
-		if (bit_table(dev, 'P', &P))
-			return;
-
-		if (P.version == 1)
-			volt = ROMPTR(dev, P.data[16]);
-		else
-		if (P.version == 2)
-			volt = ROMPTR(dev, P.data[12]);
-		else {
-			NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
-		}
-	} else {
-		if (bios->data[bios->offset + 6] < 0x27) {
-			NV_DEBUG(drm, "BMP version too old for voltage\n");
-			return;
-		}
-
-		volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
-	}
-
-	if (!volt) {
-		NV_DEBUG(drm, "voltage table pointer invalid\n");
-		return;
-	}
-
-	switch (volt[0]) {
-	case 0x10:
-	case 0x11:
-	case 0x12:
-		headerlen = 5;
-		recordlen = volt[1];
-		entries   = volt[2];
-		vidshift  = 0;
-		vidmask   = volt[4];
-		break;
-	case 0x20:
-		headerlen = volt[1];
-		recordlen = volt[3];
-		entries   = volt[2];
-		vidshift  = 0; /* could be vidshift like 0x30? */
-		vidmask   = volt[5];
-		break;
-	case 0x30:
-		headerlen = volt[1];
-		recordlen = volt[2];
-		entries   = volt[3];
-		vidmask   = volt[4];
-		/* no longer certain what volt[5] is, if it's related to
-		 * the vid shift then it's definitely not a function of
-		 * how many bits are set.
-		 *
-		 * after looking at a number of nva3+ vbios images, they
-		 * all seem likely to have a static shift of 2.. lets
-		 * go with that for now until proven otherwise.
-		 */
-		vidshift  = 2;
-		break;
-	case 0x40:
-		headerlen = volt[1];
-		recordlen = volt[2];
-		entries   = volt[3]; /* not a clue what the entries are for.. */
-		vidmask   = volt[11]; /* guess.. */
-		vidshift  = 0;
-		break;
-	default:
-		NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
-		return;
-	}
-
-	/* validate vid mask */
-	voltage->vid_mask = vidmask;
-	if (!voltage->vid_mask)
-		return;
-
-	i = 0;
-	while (vidmask) {
-		if (i > nr_vidtag) {
-			NV_DEBUG(drm, "vid bit %d unknown\n", i);
-			return;
-		}
-
-		if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
-			NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
-			return;
-		}
-
-		vidmask >>= 1;
-		i++;
-	}
-
-	/* parse vbios entries into common format */
-	voltage->version = volt[0];
-	if (voltage->version < 0x40) {
-		voltage->nr_level = entries;
-		voltage->level =
-			kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
-		if (!voltage->level)
-			return;
-
-		entry = volt + headerlen;
-		for (i = 0; i < entries; i++, entry += recordlen) {
-			voltage->level[i].voltage = entry[0] * 10000;
-			voltage->level[i].vid     = entry[1] >> vidshift;
-		}
-	} else {
-		u32 volt_uv = ROM32(volt[4]);
-		s16 step_uv = ROM16(volt[8]);
-		u8 vid;
-
-		voltage->nr_level = voltage->vid_mask + 1;
-		voltage->level = kcalloc(voltage->nr_level,
-					 sizeof(*voltage->level), GFP_KERNEL);
-		if (!voltage->level)
-			return;
-
-		for (vid = 0; vid <= voltage->vid_mask; vid++) {
-			voltage->level[vid].voltage = volt_uv;
-			voltage->level[vid].vid = vid;
-			volt_uv += step_uv;
-		}
-	}
-
-	voltage->supported = true;
-}
-
-void
-nouveau_volt_fini(struct drm_device *dev)
-{
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-
-	kfree(volt->level);
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 77dcc9c..8fe32bb 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -255,6 +255,12 @@
 	OUT_RING(chan, NvCtxSurf2D);
 	BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
 	OUT_RING(chan, 3);
+	if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
+		BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
+		OUT_RING(chan, 0);
+		OUT_RING(chan, 1);
+		OUT_RING(chan, 2);
+	}
 
 	BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
 	OUT_RING(chan, NvGdiRect);
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
deleted file mode 100644
index 27afc0e..0000000
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_reg.h"
-#include "dispnv04/hw.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-
-int
-nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	int ret;
-
-	ret = nouveau_hw_get_clock(dev, PLL_CORE);
-	if (ret < 0)
-		return ret;
-	perflvl->core = ret;
-
-	ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
-	if (ret < 0)
-		return ret;
-	perflvl->memory = ret;
-
-	return 0;
-}
-
-struct nv04_pm_clock {
-	struct nvbios_pll pll;
-	struct nouveau_pll_vals calc;
-};
-
-struct nv04_pm_state {
-	struct nv04_pm_clock core;
-	struct nv04_pm_clock memory;
-};
-
-static int
-calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nouveau_clock *pclk = nouveau_clock(device);
-	int ret;
-
-	ret = nvbios_pll_parse(bios, id, &clk->pll);
-	if (ret)
-		return ret;
-
-	ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
-	if (!ret)
-		return -EINVAL;
-
-	return 0;
-}
-
-void *
-nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nv04_pm_state *info;
-	int ret;
-
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
-	ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
-	if (ret)
-		goto error;
-
-	if (perflvl->memory) {
-		ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
-		if (ret)
-			goto error;
-	}
-
-	return info;
-error:
-	kfree(info);
-	return ERR_PTR(ret);
-}
-
-static void
-prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_clock *pclk = nouveau_clock(device);
-	u32 reg = clk->pll.reg;
-
-	/* thank the insane nouveau_hw_setpll() interface for this */
-	if (device->card_type >= NV_40)
-		reg += 4;
-
-	pclk->pll_prog(pclk, reg, &clk->calc);
-}
-
-int
-nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_timer *ptimer = nouveau_timer(device);
-	struct nv04_pm_state *state = pre_state;
-
-	prog_pll(dev, &state->core);
-
-	if (state->memory.pll.reg) {
-		prog_pll(dev, &state->memory);
-		if (device->card_type < NV_30) {
-			if (device->card_type == NV_20)
-				nv_mask(device, 0x1002c4, 0, 1 << 20);
-
-			/* Reset the DLLs */
-			nv_mask(device, 0x1002c0, 0, 1 << 8);
-		}
-	}
-
-	nv_ofuncs(ptimer)->init(nv_object(ptimer));
-
-	kfree(state);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
deleted file mode 100644
index 625f80d..0000000
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-#include "dispnv04/hw.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-
-#include <engine/fifo.h>
-
-#define min2(a,b) ((a) < (b) ? (a) : (b))
-
-static u32
-read_pll_1(struct drm_device *dev, u32 reg)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, reg + 0x00);
-	int P = (ctrl & 0x00070000) >> 16;
-	int N = (ctrl & 0x0000ff00) >> 8;
-	int M = (ctrl & 0x000000ff) >> 0;
-	u32 ref = 27000, clk = 0;
-
-	if (ctrl & 0x80000000)
-		clk = ref * N / M;
-
-	return clk >> P;
-}
-
-static u32
-read_pll_2(struct drm_device *dev, u32 reg)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, reg + 0x00);
-	u32 coef = nv_rd32(device, reg + 0x04);
-	int N2 = (coef & 0xff000000) >> 24;
-	int M2 = (coef & 0x00ff0000) >> 16;
-	int N1 = (coef & 0x0000ff00) >> 8;
-	int M1 = (coef & 0x000000ff) >> 0;
-	int P = (ctrl & 0x00070000) >> 16;
-	u32 ref = 27000, clk = 0;
-
-	if ((ctrl & 0x80000000) && M1) {
-		clk = ref * N1 / M1;
-		if ((ctrl & 0x40000100) == 0x40000000) {
-			if (M2)
-				clk = clk * N2 / M2;
-			else
-				clk = 0;
-		}
-	}
-
-	return clk >> P;
-}
-
-static u32
-read_clk(struct drm_device *dev, u32 src)
-{
-	switch (src) {
-	case 3:
-		return read_pll_2(dev, 0x004000);
-	case 2:
-		return read_pll_1(dev, 0x004008);
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-int
-nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, 0x00c040);
-
-	perflvl->core   = read_clk(dev, (ctrl & 0x00000003) >> 0);
-	perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
-	perflvl->memory = read_pll_2(dev, 0x4020);
-	return 0;
-}
-
-struct nv40_pm_state {
-	u32 ctrl;
-	u32 npll_ctrl;
-	u32 npll_coef;
-	u32 spll;
-	u32 mpll_ctrl;
-	u32 mpll_coef;
-};
-
-static int
-nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
-	      u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nouveau_clock *pclk = nouveau_clock(device);
-	struct nouveau_pll_vals coef;
-	int ret;
-
-	ret = nvbios_pll_parse(bios, reg, pll);
-	if (ret)
-		return ret;
-
-	if (clk < pll->vco1.max_freq)
-		pll->vco2.max_freq = 0;
-
-	ret = pclk->pll_calc(pclk, pll, clk, &coef);
-	if (ret == 0)
-		return -ERANGE;
-
-	*N1 = coef.N1;
-	*M1 = coef.M1;
-	if (N2 && M2) {
-		if (pll->vco2.max_freq) {
-			*N2 = coef.N2;
-			*M2 = coef.M2;
-		} else {
-			*N2 = 1;
-			*M2 = 1;
-		}
-	}
-	*log2P = coef.log2P;
-	return 0;
-}
-
-void *
-nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nv40_pm_state *info;
-	struct nvbios_pll pll;
-	int N1, N2, M1, M2, log2P;
-	int ret;
-
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
-	/* core/geometric clock */
-	ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
-			    &N1, &M1, &N2, &M2, &log2P);
-	if (ret < 0)
-		goto out;
-
-	if (N2 == M2) {
-		info->npll_ctrl = 0x80000100 | (log2P << 16);
-		info->npll_coef = (N1 << 8) | M1;
-	} else {
-		info->npll_ctrl = 0xc0000000 | (log2P << 16);
-		info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
-	}
-
-	/* use the second PLL for shader/rop clock, if it differs from core */
-	if (perflvl->shader && perflvl->shader != perflvl->core) {
-		ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
-				    &N1, &M1, NULL, NULL, &log2P);
-		if (ret < 0)
-			goto out;
-
-		info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
-		info->ctrl = 0x00000223;
-	} else {
-		info->spll = 0x00000000;
-		info->ctrl = 0x00000333;
-	}
-
-	/* memory clock */
-	if (!perflvl->memory) {
-		info->mpll_ctrl = 0x00000000;
-		goto out;
-	}
-
-	ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
-			    &N1, &M1, &N2, &M2, &log2P);
-	if (ret < 0)
-		goto out;
-
-	info->mpll_ctrl  = 0x80000000 | (log2P << 16);
-	info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
-	if (N2 == M2) {
-		info->mpll_ctrl |= 0x00000100;
-		info->mpll_coef  = (N1 << 8) | M1;
-	} else {
-		info->mpll_ctrl |= 0x40000000;
-		info->mpll_coef  = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
-	}
-
-out:
-	if (ret < 0) {
-		kfree(info);
-		info = ERR_PTR(ret);
-	}
-	return info;
-}
-
-static bool
-nv40_pm_gr_idle(void *data)
-{
-	struct drm_device *dev = data;
-	struct nouveau_device *device = nouveau_dev(dev);
-
-	if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
-	    (nv_rd32(device, 0x400760) & 0x0000000f))
-		return false;
-
-	if (nv_rd32(device, 0x400700))
-		return false;
-
-	return true;
-}
-
-int
-nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_fifo *pfifo = nouveau_fifo(device);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv40_pm_state *info = pre_state;
-	unsigned long flags;
-	struct bit_entry M;
-	u32 crtc_mask = 0;
-	u8 sr1[2];
-	int i, ret = -EAGAIN;
-
-	/* determine which CRTCs are active, fetch VGA_SR1 for each */
-	for (i = 0; i < 2; i++) {
-		u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
-		u32 cnt = 0;
-		do {
-			if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
-				nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
-				sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
-				if (!(sr1[i] & 0x20))
-					crtc_mask |= (1 << i);
-				break;
-			}
-			udelay(1);
-		} while (cnt++ < 32);
-	}
-
-	/* halt and idle engines */
-	pfifo->pause(pfifo, &flags);
-
-	if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
-		goto resume;
-
-	ret = 0;
-
-	/* set engine clocks */
-	nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
-	nv_wr32(device, 0x004004, info->npll_coef);
-	nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
-	nv_mask(device, 0x004008, 0xc007ffff, info->spll);
-	mdelay(5);
-	nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
-
-	if (!info->mpll_ctrl)
-		goto resume;
-
-	/* wait for vblank start on active crtcs, disable memory access */
-	for (i = 0; i < 2; i++) {
-		if (!(crtc_mask & (1 << i)))
-			continue;
-		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
-		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
-	}
-
-	/* prepare ram for reclocking */
-	nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
-	nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
-	nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
-	nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
-	nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
-
-	/* change the PLL of each memory partition */
-	nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
-	switch (nv_device(drm->device)->chipset) {
-	case 0x40:
-	case 0x45:
-	case 0x41:
-	case 0x42:
-	case 0x47:
-		nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
-		nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(device, 0x004048, info->mpll_coef);
-		nv_wr32(device, 0x004030, info->mpll_coef);
-	case 0x43:
-	case 0x49:
-	case 0x4b:
-		nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(device, 0x00403c, info->mpll_coef);
-	default:
-		nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(device, 0x004024, info->mpll_coef);
-		break;
-	}
-	udelay(100);
-	nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
-
-	/* re-enable normal operation of memory controller */
-	nv_wr32(device, 0x1002dc, 0x00000000);
-	nv_mask(device, 0x100210, 0x80000000, 0x80000000);
-	udelay(100);
-
-	/* execute memory reset script from vbios */
-	if (!bit_table(dev, 'M', &M))
-		nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
-
-	/* make sure we're in vblank (hopefully the same one as before), and
-	 * then re-enable crtc memory access
-	 */
-	for (i = 0; i < 2; i++) {
-		if (!(crtc_mask & (1 << i)))
-			continue;
-		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
-	}
-
-	/* resume engines */
-resume:
-	pfifo->start(pfifo, &flags);
-	kfree(info);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
deleted file mode 100644
index 4efc33f..0000000
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ /dev/null
@@ -1,855 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "dispnv04/hw.h"
-#include "nouveau_pm.h"
-#include "nouveau_hwsq.h"
-
-#include "nv50_display.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-enum clk_src {
-	clk_src_crystal,
-	clk_src_href,
-	clk_src_hclk,
-	clk_src_hclkm3,
-	clk_src_hclkm3d2,
-	clk_src_host,
-	clk_src_nvclk,
-	clk_src_sclk,
-	clk_src_mclk,
-	clk_src_vdec,
-	clk_src_dom6
-};
-
-static u32 read_clk(struct drm_device *, enum clk_src);
-
-static u32
-read_div(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	switch (nv_device(drm->device)->chipset) {
-	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
-	case 0x84:
-	case 0x86:
-	case 0x98:
-	case 0xa0:
-		return nv_rd32(device, 0x004700);
-	case 0x92:
-	case 0x94:
-	case 0x96:
-		return nv_rd32(device, 0x004800);
-	default:
-		return 0x00000000;
-	}
-}
-
-static u32
-read_pll_src(struct drm_device *dev, u32 base)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 coef, ref = read_clk(dev, clk_src_crystal);
-	u32 rsel = nv_rd32(device, 0x00e18c);
-	int P, N, M, id;
-
-	switch (nv_device(drm->device)->chipset) {
-	case 0x50:
-	case 0xa0:
-		switch (base) {
-		case 0x4020:
-		case 0x4028: id = !!(rsel & 0x00000004); break;
-		case 0x4008: id = !!(rsel & 0x00000008); break;
-		case 0x4030: id = 0; break;
-		default:
-			NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
-			return 0;
-		}
-
-		coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
-		ref *=  (coef & 0x01000000) ? 2 : 4;
-		P    =  (coef & 0x00070000) >> 16;
-		N    = ((coef & 0x0000ff00) >> 8) + 1;
-		M    = ((coef & 0x000000ff) >> 0) + 1;
-		break;
-	case 0x84:
-	case 0x86:
-	case 0x92:
-		coef = nv_rd32(device, 0x00e81c);
-		P    = (coef & 0x00070000) >> 16;
-		N    = (coef & 0x0000ff00) >> 8;
-		M    = (coef & 0x000000ff) >> 0;
-		break;
-	case 0x94:
-	case 0x96:
-	case 0x98:
-		rsel = nv_rd32(device, 0x00c050);
-		switch (base) {
-		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
-		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
-		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
-		case 0x4030: rsel = 3; break;
-		default:
-			NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
-			return 0;
-		}
-
-		switch (rsel) {
-		case 0: id = 1; break;
-		case 1: return read_clk(dev, clk_src_crystal);
-		case 2: return read_clk(dev, clk_src_href);
-		case 3: id = 0; break;
-		}
-
-		coef =  nv_rd32(device, 0x00e81c + (id * 0x28));
-		P    = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
-		P   += (coef & 0x00070000) >> 16;
-		N    = (coef & 0x0000ff00) >> 8;
-		M    = (coef & 0x000000ff) >> 0;
-		break;
-	default:
-		BUG_ON(1);
-	}
-
-	if (M)
-		return (ref * N / M) >> P;
-	return 0;
-}
-
-static u32
-read_pll_ref(struct drm_device *dev, u32 base)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 src, mast = nv_rd32(device, 0x00c040);
-
-	switch (base) {
-	case 0x004028:
-		src = !!(mast & 0x00200000);
-		break;
-	case 0x004020:
-		src = !!(mast & 0x00400000);
-		break;
-	case 0x004008:
-		src = !!(mast & 0x00010000);
-		break;
-	case 0x004030:
-		src = !!(mast & 0x02000000);
-		break;
-	case 0x00e810:
-		return read_clk(dev, clk_src_crystal);
-	default:
-		NV_ERROR(drm, "bad pll 0x%06x\n", base);
-		return 0;
-	}
-
-	if (src)
-		return read_clk(dev, clk_src_href);
-	return read_pll_src(dev, base);
-}
-
-static u32
-read_pll(struct drm_device *dev, u32 base)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 mast = nv_rd32(device, 0x00c040);
-	u32 ctrl = nv_rd32(device, base + 0);
-	u32 coef = nv_rd32(device, base + 4);
-	u32 ref = read_pll_ref(dev, base);
-	u32 clk = 0;
-	int N1, N2, M1, M2;
-
-	if (base == 0x004028 && (mast & 0x00100000)) {
-		/* wtf, appears to only disable post-divider on nva0 */
-		if (nv_device(drm->device)->chipset != 0xa0)
-			return read_clk(dev, clk_src_dom6);
-	}
-
-	N2 = (coef & 0xff000000) >> 24;
-	M2 = (coef & 0x00ff0000) >> 16;
-	N1 = (coef & 0x0000ff00) >> 8;
-	M1 = (coef & 0x000000ff);
-	if ((ctrl & 0x80000000) && M1) {
-		clk = ref * N1 / M1;
-		if ((ctrl & 0x40000100) == 0x40000000) {
-			if (M2)
-				clk = clk * N2 / M2;
-			else
-				clk = 0;
-		}
-	}
-
-	return clk;
-}
-
-static u32
-read_clk(struct drm_device *dev, enum clk_src src)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 mast = nv_rd32(device, 0x00c040);
-	u32 P = 0;
-
-	switch (src) {
-	case clk_src_crystal:
-		return device->crystal;
-	case clk_src_href:
-		return 100000; /* PCIE reference clock */
-	case clk_src_hclk:
-		return read_clk(dev, clk_src_href) * 27778 / 10000;
-	case clk_src_hclkm3:
-		return read_clk(dev, clk_src_hclk) * 3;
-	case clk_src_hclkm3d2:
-		return read_clk(dev, clk_src_hclk) * 3 / 2;
-	case clk_src_host:
-		switch (mast & 0x30000000) {
-		case 0x00000000: return read_clk(dev, clk_src_href);
-		case 0x10000000: break;
-		case 0x20000000: /* !0x50 */
-		case 0x30000000: return read_clk(dev, clk_src_hclk);
-		}
-		break;
-	case clk_src_nvclk:
-		if (!(mast & 0x00100000))
-			P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
-		switch (mast & 0x00000003) {
-		case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
-		case 0x00000001: return read_clk(dev, clk_src_dom6);
-		case 0x00000002: return read_pll(dev, 0x004020) >> P;
-		case 0x00000003: return read_pll(dev, 0x004028) >> P;
-		}
-		break;
-	case clk_src_sclk:
-		P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
-		switch (mast & 0x00000030) {
-		case 0x00000000:
-			if (mast & 0x00000080)
-				return read_clk(dev, clk_src_host) >> P;
-			return read_clk(dev, clk_src_crystal) >> P;
-		case 0x00000010: break;
-		case 0x00000020: return read_pll(dev, 0x004028) >> P;
-		case 0x00000030: return read_pll(dev, 0x004020) >> P;
-		}
-		break;
-	case clk_src_mclk:
-		P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
-		if (nv_rd32(device, 0x004008) & 0x00000200) {
-			switch (mast & 0x0000c000) {
-			case 0x00000000:
-				return read_clk(dev, clk_src_crystal) >> P;
-			case 0x00008000:
-			case 0x0000c000:
-				return read_clk(dev, clk_src_href) >> P;
-			}
-		} else {
-			return read_pll(dev, 0x004008) >> P;
-		}
-		break;
-	case clk_src_vdec:
-		P = (read_div(dev) & 0x00000700) >> 8;
-		switch (nv_device(drm->device)->chipset) {
-		case 0x84:
-		case 0x86:
-		case 0x92:
-		case 0x94:
-		case 0x96:
-		case 0xa0:
-			switch (mast & 0x00000c00) {
-			case 0x00000000:
-				if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
-					return read_clk(dev, clk_src_nvclk) >> P;
-				return read_clk(dev, clk_src_crystal) >> P;
-			case 0x00000400:
-				return 0;
-			case 0x00000800:
-				if (mast & 0x01000000)
-					return read_pll(dev, 0x004028) >> P;
-				return read_pll(dev, 0x004030) >> P;
-			case 0x00000c00:
-				return read_clk(dev, clk_src_nvclk) >> P;
-			}
-			break;
-		case 0x98:
-			switch (mast & 0x00000c00) {
-			case 0x00000000:
-				return read_clk(dev, clk_src_nvclk) >> P;
-			case 0x00000400:
-				return 0;
-			case 0x00000800:
-				return read_clk(dev, clk_src_hclkm3d2) >> P;
-			case 0x00000c00:
-				return read_clk(dev, clk_src_mclk) >> P;
-			}
-			break;
-		}
-		break;
-	case clk_src_dom6:
-		switch (nv_device(drm->device)->chipset) {
-		case 0x50:
-		case 0xa0:
-			return read_pll(dev, 0x00e810) >> 2;
-		case 0x84:
-		case 0x86:
-		case 0x92:
-		case 0x94:
-		case 0x96:
-		case 0x98:
-			P = (read_div(dev) & 0x00000007) >> 0;
-			switch (mast & 0x0c000000) {
-			case 0x00000000: return read_clk(dev, clk_src_href);
-			case 0x04000000: break;
-			case 0x08000000: return read_clk(dev, clk_src_hclk);
-			case 0x0c000000:
-				return read_clk(dev, clk_src_hclkm3) >> P;
-			}
-			break;
-		default:
-			break;
-		}
-	default:
-		break;
-	}
-
-	NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
-	return 0;
-}
-
-int
-nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	if (nv_device(drm->device)->chipset == 0xaa ||
-	    nv_device(drm->device)->chipset == 0xac)
-		return 0;
-
-	perflvl->core   = read_clk(dev, clk_src_nvclk);
-	perflvl->shader = read_clk(dev, clk_src_sclk);
-	perflvl->memory = read_clk(dev, clk_src_mclk);
-	if (nv_device(drm->device)->chipset != 0x50) {
-		perflvl->vdec = read_clk(dev, clk_src_vdec);
-		perflvl->dom6 = read_clk(dev, clk_src_dom6);
-	}
-
-	return 0;
-}
-
-struct nv50_pm_state {
-	struct nouveau_pm_level *perflvl;
-	struct hwsq_ucode eclk_hwsq;
-	struct hwsq_ucode mclk_hwsq;
-	u32 mscript;
-	u32 mmast;
-	u32 mctrl;
-	u32 mcoef;
-};
-
-static u32
-calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
-	 u32 clk, int *N1, int *M1, int *log2P)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nouveau_clock *pclk = nouveau_clock(device);
-	struct nouveau_pll_vals coef;
-	int ret;
-
-	ret = nvbios_pll_parse(bios, reg, pll);
-	if (ret)
-		return 0;
-
-	pll->vco2.max_freq = 0;
-	pll->refclk = read_pll_ref(dev, reg);
-	if (!pll->refclk)
-		return 0;
-
-	ret = pclk->pll_calc(pclk, pll, clk, &coef);
-	if (ret == 0)
-		return 0;
-
-	*N1 = coef.N1;
-	*M1 = coef.M1;
-	*log2P = coef.log2P;
-	return ret;
-}
-
-static inline u32
-calc_div(u32 src, u32 target, int *div)
-{
-	u32 clk0 = src, clk1 = src;
-	for (*div = 0; *div <= 7; (*div)++) {
-		if (clk0 <= target) {
-			clk1 = clk0 << (*div ? 1 : 0);
-			break;
-		}
-		clk0 >>= 1;
-	}
-
-	if (target - clk0 <= clk1 - target)
-		return clk0;
-	(*div)--;
-	return clk1;
-}
-
-static inline u32
-clk_same(u32 a, u32 b)
-{
-	return ((a / 1000) == (b / 1000));
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	if (nsec > 1000)
-		hwsq_usec(hwsq, (nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	if (mr <= 1)
-		return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
-	if (mr <= 3)
-		return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
-	return 0;
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	if (mr <= 1) {
-		if (pfb->ram->ranks > 1)
-			hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
-		hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
-	} else
-	if (mr <= 3) {
-		if (pfb->ram->ranks > 1)
-			hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
-		hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
-	}
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-	u32 ctrl = nv_rd32(device, 0x004008);
-
-	info->mmast = nv_rd32(device, 0x00c040);
-	info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
-	info->mmast |=  0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
-
-	hwsq_wr32(hwsq, 0xc040, info->mmast);
-	hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
-	if (info->mctrl & 0x80000000)
-		hwsq_wr32(hwsq, 0x400c, info->mcoef);
-	hwsq_wr32(hwsq, 0x4008, info->mctrl);
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nv50_pm_state *info = exec->priv;
-	struct nouveau_pm_level *perflvl = info->perflvl;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-	int i;
-
-	for (i = 0; i < 9; i++) {
-		u32 reg = 0x100220 + (i * 4);
-		u32 val = nv_rd32(device, reg);
-		if (val != perflvl->timing.reg[i])
-			hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
-	}
-}
-
-static int
-calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-	  struct nv50_pm_state *info)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
-	struct nouveau_mem_exec_func exec = {
-		.dev = dev,
-		.precharge = mclk_precharge,
-		.refresh = mclk_refresh,
-		.refresh_auto = mclk_refresh_auto,
-		.refresh_self = mclk_refresh_self,
-		.wait = mclk_wait,
-		.mrg = mclk_mrg,
-		.mrs = mclk_mrs,
-		.clock_set = mclk_clock_set,
-		.timing_set = mclk_timing_set,
-		.priv = info
-	};
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-	struct nvbios_pll pll;
-	int N, M, P;
-	int ret;
-
-	/* use pcie refclock if possible, otherwise use mpll */
-	info->mctrl  = nv_rd32(device, 0x004008);
-	info->mctrl &= ~0x81ff0200;
-	if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
-		info->mctrl |= 0x00000200 | (pll.bias_p << 19);
-	} else {
-		ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
-		if (ret == 0)
-			return -EINVAL;
-
-		info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
-		info->mctrl |= pll.bias_p << 19;
-		info->mcoef  = (N << 8) | M;
-	}
-
-	/* build the ucode which will reclock the memory for us */
-	hwsq_init(hwsq);
-	if (crtc_mask) {
-		hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
-		hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
-	}
-	if (nv_device(drm->device)->chipset >= 0x92)
-		hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
-	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
-	hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
-
-	ret = nouveau_mem_exec(&exec, perflvl);
-	if (ret)
-		return ret;
-
-	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
-	hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
-	if (nv_device(drm->device)->chipset >= 0x92)
-		hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
-	hwsq_fini(hwsq);
-	return 0;
-}
-
-void *
-nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_pm_state *info;
-	struct hwsq_ucode *hwsq;
-	struct nvbios_pll pll;
-	u32 out, mast, divs, ctrl;
-	int clk, ret = -EINVAL;
-	int N, M, P1, P2;
-
-	if (nv_device(drm->device)->chipset == 0xaa ||
-	    nv_device(drm->device)->chipset == 0xac)
-		return ERR_PTR(-ENODEV);
-
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-	info->perflvl = perflvl;
-
-	/* memory: build hwsq ucode which we'll use to reclock memory.
-	 *         use pcie refclock if possible, otherwise use mpll */
-	info->mclk_hwsq.len = 0;
-	if (perflvl->memory) {
-		ret = calc_mclk(dev, perflvl, info);
-		if (ret)
-			goto error;
-		info->mscript = perflvl->memscript;
-	}
-
-	divs = read_div(dev);
-	mast = info->mmast;
-
-	/* start building HWSQ script for engine reclocking */
-	hwsq = &info->eclk_hwsq;
-	hwsq_init(hwsq);
-	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
-	hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
-
-	/* vdec/dom6: switch to "safe" clocks temporarily */
-	if (perflvl->vdec) {
-		mast &= ~0x00000c00;
-		divs &= ~0x00000700;
-	}
-
-	if (perflvl->dom6) {
-		mast &= ~0x0c000000;
-		divs &= ~0x00000007;
-	}
-
-	hwsq_wr32(hwsq, 0x00c040, mast);
-
-	/* vdec: avoid modifying xpll until we know exactly how the other
-	 * clock domains work, i suspect at least some of them can also be
-	 * tied to xpll...
-	 */
-	if (perflvl->vdec) {
-		/* see how close we can get using nvclk as a source */
-		clk = calc_div(perflvl->core, perflvl->vdec, &P1);
-
-		/* see how close we can get using xpll/hclk as a source */
-		if (nv_device(drm->device)->chipset != 0x98)
-			out = read_pll(dev, 0x004030);
-		else
-			out = read_clk(dev, clk_src_hclkm3d2);
-		out = calc_div(out, perflvl->vdec, &P2);
-
-		/* select whichever gets us closest */
-		if (abs((int)perflvl->vdec - clk) <=
-		    abs((int)perflvl->vdec - out)) {
-			if (nv_device(drm->device)->chipset != 0x98)
-				mast |= 0x00000c00;
-			divs |= P1 << 8;
-		} else {
-			mast |= 0x00000800;
-			divs |= P2 << 8;
-		}
-	}
-
-	/* dom6: nfi what this is, but we're limited to various combinations
-	 * of the host clock frequency
-	 */
-	if (perflvl->dom6) {
-		if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
-			mast |= 0x00000000;
-		} else
-		if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
-			mast |= 0x08000000;
-		} else {
-			clk = read_clk(dev, clk_src_hclk) * 3;
-			clk = calc_div(clk, perflvl->dom6, &P1);
-
-			mast |= 0x0c000000;
-			divs |= P1;
-		}
-	}
-
-	/* vdec/dom6: complete switch to new clocks */
-	switch (nv_device(drm->device)->chipset) {
-	case 0x92:
-	case 0x94:
-	case 0x96:
-		hwsq_wr32(hwsq, 0x004800, divs);
-		break;
-	default:
-		hwsq_wr32(hwsq, 0x004700, divs);
-		break;
-	}
-
-	hwsq_wr32(hwsq, 0x00c040, mast);
-
-	/* core/shader: make sure sclk/nvclk are disconnected from their
-	 * PLLs (nvclk to dom6, sclk to hclk)
-	 */
-	if (nv_device(drm->device)->chipset < 0x92)
-		mast = (mast & ~0x001000b0) | 0x00100080;
-	else
-		mast = (mast & ~0x000000b3) | 0x00000081;
-
-	hwsq_wr32(hwsq, 0x00c040, mast);
-
-	/* core: for the moment at least, always use nvpll */
-	clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
-	if (clk == 0)
-		goto error;
-
-	ctrl  = nv_rd32(device, 0x004028) & ~0xc03f0100;
-	mast &= ~0x00100000;
-	mast |= 3;
-
-	hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
-	hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
-
-	/* shader: tie to nvclk if possible, otherwise use spll.  have to be
-	 * very careful that the shader clock is at least twice the core, or
-	 * some chipsets will be very unhappy.  i expect most or all of these
-	 * cases will be handled by tying to nvclk, but it's possible there's
-	 * corners
-	 */
-	ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
-
-	if (P1-- && perflvl->shader == (perflvl->core << 1)) {
-		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
-		hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
-	} else {
-		clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
-		if (clk == 0)
-			goto error;
-		ctrl |= 0x80000000;
-
-		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
-		hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
-		hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
-	}
-
-	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
-	hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
-	hwsq_fini(hwsq);
-
-	return info;
-error:
-	kfree(info);
-	return ERR_PTR(ret);
-}
-
-static int
-prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 hwsq_data, hwsq_kick;
-	int i;
-
-	if (nv_device(drm->device)->chipset < 0x94) {
-		hwsq_data = 0x001400;
-		hwsq_kick = 0x00000003;
-	} else {
-		hwsq_data = 0x080000;
-		hwsq_kick = 0x00000001;
-	}
-	/* upload hwsq ucode */
-	nv_mask(device, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(device, 0x001304, 0x00000000);
-	if (nv_device(drm->device)->chipset >= 0x92)
-		nv_wr32(device, 0x001318, 0x00000000);
-	for (i = 0; i < hwsq->len / 4; i++)
-		nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
-	nv_mask(device, 0x001098, 0x00000018, 0x00000018);
-
-	/* launch, and wait for completion */
-	nv_wr32(device, 0x00130c, hwsq_kick);
-	if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
-		NV_ERROR(drm, "hwsq ucode exec timed out\n");
-		NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
-		for (i = 0; i < hwsq->len / 4; i++) {
-			NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
-				 nv_rd32(device, 0x001400 + (i * 4)));
-		}
-
-		return -EIO;
-	}
-
-	return 0;
-}
-
-int
-nv50_pm_clocks_set(struct drm_device *dev, void *data)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nv50_pm_state *info = data;
-	struct bit_entry M;
-	int ret = -EBUSY;
-
-	/* halt and idle execution engines */
-	nv_mask(device, 0x002504, 0x00000001, 0x00000001);
-	if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
-		goto resume;
-	if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
-		goto resume;
-
-	/* program memory clock, if necessary - must come before engine clock
-	 * reprogramming due to how we construct the hwsq scripts in pre()
-	 */
-#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
-	if (info->mclk_hwsq.len) {
-		/* execute some scripts that do ??? from the vbios.. */
-		if (!bit_table(dev, 'M', &M) && M.version == 1) {
-			if (M.length >= 6)
-				nouveau_bios_init_exec(dev, ROM16(M.data[5]));
-			if (M.length >= 8)
-				nouveau_bios_init_exec(dev, ROM16(M.data[7]));
-			if (M.length >= 10)
-				nouveau_bios_init_exec(dev, ROM16(M.data[9]));
-			nouveau_bios_init_exec(dev, info->mscript);
-		}
-
-		ret = prog_hwsq(dev, &info->mclk_hwsq);
-		if (ret)
-			goto resume;
-	}
-
-	/* program engine clocks */
-	ret = prog_hwsq(dev, &info->eclk_hwsq);
-
-resume:
-	nv_mask(device, 0x002504, 0x00000001, 0x00000000);
-	kfree(info);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
deleted file mode 100644
index 0d0ed59..0000000
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/bios.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32 read_clk(struct drm_device *, int, bool);
-static u32 read_pll(struct drm_device *, int, u32);
-
-static u32
-read_vco(struct drm_device *dev, int clk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
-	if ((sctl & 0x00000030) != 0x00000030)
-		return read_pll(dev, 0x41, 0x00e820);
-	return read_pll(dev, 0x42, 0x00e8a0);
-}
-
-static u32
-read_clk(struct drm_device *dev, int clk, bool ignore_en)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 sctl, sdiv, sclk;
-
-	/* refclk for the 0xe8xx plls is a fixed frequency */
-	if (clk >= 0x40) {
-		if (nv_device(drm->device)->chipset == 0xaf) {
-			/* no joke.. seriously.. sigh.. */
-			return nv_rd32(device, 0x00471c) * 1000;
-		}
-
-		return device->crystal;
-	}
-
-	sctl = nv_rd32(device, 0x4120 + (clk * 4));
-	if (!ignore_en && !(sctl & 0x00000100))
-		return 0;
-
-	switch (sctl & 0x00003000) {
-	case 0x00000000:
-		return device->crystal;
-	case 0x00002000:
-		if (sctl & 0x00000040)
-			return 108000;
-		return 100000;
-	case 0x00003000:
-		sclk = read_vco(dev, clk);
-		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
-		return (sclk * 2) / sdiv;
-	default:
-		return 0;
-	}
-}
-
-static u32
-read_pll(struct drm_device *dev, int clk, u32 pll)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, pll + 0);
-	u32 sclk = 0, P = 1, N = 1, M = 1;
-
-	if (!(ctrl & 0x00000008)) {
-		if (ctrl & 0x00000001) {
-			u32 coef = nv_rd32(device, pll + 4);
-			M = (coef & 0x000000ff) >> 0;
-			N = (coef & 0x0000ff00) >> 8;
-			P = (coef & 0x003f0000) >> 16;
-
-			/* no post-divider on these.. */
-			if ((pll & 0x00ff00) == 0x00e800)
-				P = 1;
-
-			sclk = read_clk(dev, 0x00 + clk, false);
-		}
-	} else {
-		sclk = read_clk(dev, 0x10 + clk, false);
-	}
-
-	if (M * P)
-		return sclk * N / (M * P);
-	return 0;
-}
-
-struct creg {
-	u32 clk;
-	u32 pll;
-};
-
-static int
-calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nvbios_pll limits;
-	u32 oclk, sclk, sdiv;
-	int P, N, M, diff;
-	int ret;
-
-	reg->pll = 0;
-	reg->clk = 0;
-	if (!khz) {
-		NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
-		return 0;
-	}
-
-	switch (khz) {
-	case 27000:
-		reg->clk = 0x00000100;
-		return khz;
-	case 100000:
-		reg->clk = 0x00002100;
-		return khz;
-	case 108000:
-		reg->clk = 0x00002140;
-		return khz;
-	default:
-		sclk = read_vco(dev, clk);
-		sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
-		/* if the clock has a PLL attached, and we can get a within
-		 * [-2, 3) MHz of a divider, we'll disable the PLL and use
-		 * the divider instead.
-		 *
-		 * divider can go as low as 2, limited here because NVIDIA
-		 * and the VBIOS on my NVA8 seem to prefer using the PLL
-		 * for 810MHz - is there a good reason?
-		 */
-		if (sdiv > 4) {
-			oclk = (sclk * 2) / sdiv;
-			diff = khz - oclk;
-			if (!pll || (diff >= -2000 && diff < 3000)) {
-				reg->clk = (((sdiv - 2) << 16) | 0x00003100);
-				return oclk;
-			}
-		}
-
-		if (!pll) {
-			NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
-			return -ERANGE;
-		}
-
-		break;
-	}
-
-	ret = nvbios_pll_parse(bios, pll, &limits);
-	if (ret)
-		return ret;
-
-	limits.refclk = read_clk(dev, clk - 0x10, true);
-	if (!limits.refclk)
-		return -EINVAL;
-
-	ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
-	if (ret >= 0) {
-		reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
-		reg->pll = (P << 16) | (N << 8) | M;
-	}
-
-	return ret;
-}
-
-static void
-prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	const u32 src0 = 0x004120 + (clk * 4);
-	const u32 src1 = 0x004160 + (clk * 4);
-	const u32 ctrl = pll + 0;
-	const u32 coef = pll + 4;
-
-	if (!reg->clk && !reg->pll) {
-		NV_DEBUG(drm, "no clock for %02x\n", clk);
-		return;
-	}
-
-	if (reg->pll) {
-		nv_mask(device, src0, 0x00000101, 0x00000101);
-		nv_wr32(device, coef, reg->pll);
-		nv_mask(device, ctrl, 0x00000015, 0x00000015);
-		nv_mask(device, ctrl, 0x00000010, 0x00000000);
-		nv_wait(device, ctrl, 0x00020000, 0x00020000);
-		nv_mask(device, ctrl, 0x00000010, 0x00000010);
-		nv_mask(device, ctrl, 0x00000008, 0x00000000);
-		nv_mask(device, src1, 0x00000100, 0x00000000);
-		nv_mask(device, src1, 0x00000001, 0x00000000);
-	} else {
-		nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
-		nv_mask(device, ctrl, 0x00000018, 0x00000018);
-		udelay(20);
-		nv_mask(device, ctrl, 0x00000001, 0x00000000);
-		nv_mask(device, src0, 0x00000100, 0x00000000);
-		nv_mask(device, src0, 0x00000001, 0x00000000);
-	}
-}
-
-static void
-prog_clk(struct drm_device *dev, int clk, struct creg *reg)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	if (!reg->clk) {
-		NV_DEBUG(drm, "no clock for %02x\n", clk);
-		return;
-	}
-
-	nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
-}
-
-int
-nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	perflvl->core   = read_pll(dev, 0x00, 0x4200);
-	perflvl->shader = read_pll(dev, 0x01, 0x4220);
-	perflvl->memory = read_pll(dev, 0x02, 0x4000);
-	perflvl->unka0  = read_clk(dev, 0x20, false);
-	perflvl->vdec   = read_clk(dev, 0x21, false);
-	perflvl->daemon = read_clk(dev, 0x25, false);
-	perflvl->copy   = perflvl->core;
-	return 0;
-}
-
-struct nva3_pm_state {
-	struct nouveau_pm_level *perflvl;
-
-	struct creg nclk;
-	struct creg sclk;
-	struct creg vdec;
-	struct creg unka0;
-
-	struct creg mclk;
-	u8 *rammap;
-	u8  rammap_ver;
-	u8  rammap_len;
-	u8 *ramcfg;
-	u8  ramcfg_len;
-	u32 r004018;
-	u32 r100760;
-};
-
-void *
-nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nva3_pm_state *info;
-	u8 ramcfg_cnt;
-	int ret;
-
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
-	ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
-	if (ret < 0)
-		goto out;
-
-	ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
-	if (ret < 0)
-		goto out;
-
-	ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
-	if (ret < 0)
-		goto out;
-
-	ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
-	if (ret < 0)
-		goto out;
-
-	ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
-	if (ret < 0)
-		goto out;
-
-	info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
-					   &info->rammap_ver,
-					   &info->rammap_len,
-					   &ramcfg_cnt, &info->ramcfg_len);
-	if (info->rammap_ver != 0x10 || info->rammap_len < 5)
-		info->rammap = NULL;
-
-	info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
-					   &info->rammap_ver,
-					   &info->ramcfg_len);
-	if (info->rammap_ver != 0x10)
-		info->ramcfg = NULL;
-
-	info->perflvl = perflvl;
-out:
-	if (ret < 0) {
-		kfree(info);
-		info = ERR_PTR(ret);
-	}
-	return info;
-}
-
-static bool
-nva3_pm_grcp_idle(void *data)
-{
-	struct drm_device *dev = data;
-	struct nouveau_device *device = nouveau_dev(dev);
-
-	if (!(nv_rd32(device, 0x400304) & 0x00000001))
-		return true;
-	if (nv_rd32(device, 0x400308) == 0x0050001c)
-		return true;
-	return false;
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x1002d4, 0x00000001);
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x1002d0, 0x00000001);
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	volatile u32 post = nv_rd32(device, 0); (void)post;
-	udelay((nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	if (mr <= 1)
-		return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
-	if (mr <= 3)
-		return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
-	return 0;
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	if (mr <= 1) {
-		if (pfb->ram->ranks > 1)
-			nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
-		nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
-	} else
-	if (mr <= 3) {
-		if (pfb->ram->ranks > 1)
-			nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
-		nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
-	}
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nva3_pm_state *info = exec->priv;
-	u32 ctrl;
-
-	ctrl = nv_rd32(device, 0x004000);
-	if (!(ctrl & 0x00000008) && info->mclk.pll) {
-		nv_wr32(device, 0x004000, (ctrl |=  0x00000008));
-		nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
-		nv_wr32(device, 0x004018, 0x00001000);
-		nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
-		nv_wr32(device, 0x004004, info->mclk.pll);
-		nv_wr32(device, 0x004000, (ctrl |=  0x00000001));
-		udelay(64);
-		nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
-		udelay(20);
-	} else
-	if (!info->mclk.pll) {
-		nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
-		nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
-		nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
-		nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
-	}
-
-	if (info->rammap) {
-		if (info->ramcfg && (info->rammap[4] & 0x08)) {
-			u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
-				      info->ramcfg[5];
-			u32 unk5a4 = ROM16(info->ramcfg[7]);
-			u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
-				     (info->ramcfg[3] & 0x0f) << 16 |
-				     (info->ramcfg[9] & 0x0f) |
-				     0x80000000;
-			nv_wr32(device, 0x1005a0, unk5a0);
-			nv_wr32(device, 0x1005a4, unk5a4);
-			nv_wr32(device, 0x10f804, unk804);
-			nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
-		} else {
-			nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
-			nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
-			nv_mask(device, 0x100760, 0x22222222, info->r100760);
-			nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
-			nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
-		}
-	}
-
-	if (info->mclk.pll) {
-		nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
-		nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
-	}
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nva3_pm_state *info = exec->priv;
-	struct nouveau_pm_level *perflvl = info->perflvl;
-	int i;
-
-	for (i = 0; i < 9; i++)
-		nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
-
-	if (info->ramcfg) {
-		u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
-		nv_mask(device, 0x100200, 0x00001000, data);
-	}
-
-	if (info->ramcfg) {
-		u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
-		u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
-		u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
-		if ( (info->ramcfg[2] & 0x20))
-			unk714 |= 0xf0000000;
-		if (!(info->ramcfg[2] & 0x04))
-			unk714 |= 0x00000010;
-		nv_wr32(device, 0x100714, unk714);
-
-		if (info->ramcfg[2] & 0x01)
-			unk71c |= 0x00000100;
-		nv_wr32(device, 0x10071c, unk71c);
-
-		if (info->ramcfg[2] & 0x02)
-			unk718 |= 0x00000100;
-		nv_wr32(device, 0x100718, unk718);
-
-		if (info->ramcfg[2] & 0x10)
-			nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
-	}
-}
-
-static void
-prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_mem_exec_func exec = {
-		.dev = dev,
-		.precharge = mclk_precharge,
-		.refresh = mclk_refresh,
-		.refresh_auto = mclk_refresh_auto,
-		.refresh_self = mclk_refresh_self,
-		.wait = mclk_wait,
-		.mrg = mclk_mrg,
-		.mrs = mclk_mrs,
-		.clock_set = mclk_clock_set,
-		.timing_set = mclk_timing_set,
-		.priv = info
-	};
-	u32 ctrl;
-
-	/* XXX: where the fuck does 750MHz come from? */
-	if (info->perflvl->memory <= 750000) {
-		info->r004018 = 0x10000000;
-		info->r100760 = 0x22222222;
-	}
-
-	ctrl = nv_rd32(device, 0x004000);
-	if (ctrl & 0x00000008) {
-		if (info->mclk.pll) {
-			nv_mask(device, 0x004128, 0x00000101, 0x00000101);
-			nv_wr32(device, 0x004004, info->mclk.pll);
-			nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
-			nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
-			nv_wait(device, 0x004000, 0x00020000, 0x00020000);
-			nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
-			nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
-			nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
-		}
-	} else {
-		u32 ssel = 0x00000101;
-		if (info->mclk.clk)
-			ssel |= info->mclk.clk;
-		else
-			ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
-		nv_mask(device, 0x004168, 0x003f3141, ctrl);
-	}
-
-	if (info->ramcfg) {
-		if (info->ramcfg[2] & 0x10) {
-			nv_mask(device, 0x111104, 0x00000600, 0x00000000);
-		} else {
-			nv_mask(device, 0x111100, 0x40000000, 0x40000000);
-			nv_mask(device, 0x111104, 0x00000180, 0x00000000);
-		}
-	}
-	if (info->rammap && !(info->rammap[4] & 0x02))
-		nv_mask(device, 0x100200, 0x00000800, 0x00000000);
-	nv_wr32(device, 0x611200, 0x00003300);
-	if (!(info->ramcfg[2] & 0x10))
-		nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
-
-	nouveau_mem_exec(&exec, info->perflvl);
-
-	nv_wr32(device, 0x611200, 0x00003330);
-	if (info->rammap && (info->rammap[4] & 0x02))
-		nv_mask(device, 0x100200, 0x00000800, 0x00000800);
-	if (info->ramcfg) {
-		if (info->ramcfg[2] & 0x10) {
-			nv_mask(device, 0x111104, 0x00000180, 0x00000180);
-			nv_mask(device, 0x111100, 0x40000000, 0x00000000);
-		} else {
-			nv_mask(device, 0x111104, 0x00000600, 0x00000600);
-		}
-	}
-
-	if (info->mclk.pll) {
-		nv_mask(device, 0x004168, 0x00000001, 0x00000000);
-		nv_mask(device, 0x004168, 0x00000100, 0x00000000);
-	} else {
-		nv_mask(device, 0x004000, 0x00000001, 0x00000000);
-		nv_mask(device, 0x004128, 0x00000001, 0x00000000);
-		nv_mask(device, 0x004128, 0x00000100, 0x00000000);
-	}
-}
-
-int
-nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nva3_pm_state *info = pre_state;
-	int ret = -EAGAIN;
-
-	/* prevent any new grctx switches from starting */
-	nv_wr32(device, 0x400324, 0x00000000);
-	nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
-	/* wait for any pending grctx switches to complete */
-	if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
-		NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
-		goto cleanup;
-	}
-	/* freeze PFIFO */
-	nv_mask(device, 0x002504, 0x00000001, 0x00000001);
-	if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
-		NV_ERROR(drm, "pm: fifo didn't go idle\n");
-		goto cleanup;
-	}
-
-	prog_pll(dev, 0x00, 0x004200, &info->nclk);
-	prog_pll(dev, 0x01, 0x004220, &info->sclk);
-	prog_clk(dev, 0x20, &info->unka0);
-	prog_clk(dev, 0x21, &info->vdec);
-
-	if (info->mclk.clk || info->mclk.pll)
-		prog_mem(dev, info);
-
-	ret = 0;
-
-cleanup:
-	/* unfreeze PFIFO */
-	nv_mask(device, 0x002504, 0x00000001, 0x00000000);
-	/* restore ctxprog to normal */
-	nv_wr32(device, 0x400324, 0x00000000);
-	nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
-	/* unblock it if necessary */
-	if (nv_rd32(device, 0x400308) == 0x0050001c)
-		nv_mask(device, 0x400824, 0x10000000, 0x10000000);
-	kfree(info);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
deleted file mode 100644
index 3b7041c..0000000
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ /dev/null
@@ -1,599 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/bios.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32 read_div(struct drm_device *, int, u32, u32);
-static u32 read_pll(struct drm_device *, u32);
-
-static u32
-read_vco(struct drm_device *dev, u32 dsrc)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ssrc = nv_rd32(device, dsrc);
-	if (!(ssrc & 0x00000100))
-		return read_pll(dev, 0x00e800);
-	return read_pll(dev, 0x00e820);
-}
-
-static u32
-read_pll(struct drm_device *dev, u32 pll)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, pll + 0);
-	u32 coef = nv_rd32(device, pll + 4);
-	u32 P = (coef & 0x003f0000) >> 16;
-	u32 N = (coef & 0x0000ff00) >> 8;
-	u32 M = (coef & 0x000000ff) >> 0;
-	u32 sclk, doff;
-
-	if (!(ctrl & 0x00000001))
-		return 0;
-
-	switch (pll & 0xfff000) {
-	case 0x00e000:
-		sclk = 27000;
-		P = 1;
-		break;
-	case 0x137000:
-		doff = (pll - 0x137000) / 0x20;
-		sclk = read_div(dev, doff, 0x137120, 0x137140);
-		break;
-	case 0x132000:
-		switch (pll) {
-		case 0x132000:
-			sclk = read_pll(dev, 0x132020);
-			break;
-		case 0x132020:
-			sclk = read_div(dev, 0, 0x137320, 0x137330);
-			break;
-		default:
-			return 0;
-		}
-		break;
-	default:
-		return 0;
-	}
-
-	return sclk * N / M / P;
-}
-
-static u32
-read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(device, dctl + (doff * 4));
-
-	switch (ssrc & 0x00000003) {
-	case 0:
-		if ((ssrc & 0x00030000) != 0x00030000)
-			return 27000;
-		return 108000;
-	case 2:
-		return 100000;
-	case 3:
-		if (sctl & 0x80000000) {
-			u32 sclk = read_vco(dev, dsrc + (doff * 4));
-			u32 sdiv = (sctl & 0x0000003f) + 2;
-			return (sclk * 2) / sdiv;
-		}
-
-		return read_vco(dev, dsrc + (doff * 4));
-	default:
-		return 0;
-	}
-}
-
-static u32
-read_mem(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ssel = nv_rd32(device, 0x1373f0);
-	if (ssel & 0x00000001)
-		return read_div(dev, 0, 0x137300, 0x137310);
-	return read_pll(dev, 0x132000);
-}
-
-static u32
-read_clk(struct drm_device *dev, int clk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
-	u32 ssel = nv_rd32(device, 0x137100);
-	u32 sclk, sdiv;
-
-	if (ssel & (1 << clk)) {
-		if (clk < 7)
-			sclk = read_pll(dev, 0x137000 + (clk * 0x20));
-		else
-			sclk = read_pll(dev, 0x1370e0);
-		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
-	} else {
-		sclk = read_div(dev, clk, 0x137160, 0x1371d0);
-		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
-	}
-
-	if (sctl & 0x80000000)
-		return (sclk * 2) / sdiv;
-	return sclk;
-}
-
-int
-nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	perflvl->shader = read_clk(dev, 0x00);
-	perflvl->core   = perflvl->shader / 2;
-	perflvl->memory = read_mem(dev);
-	perflvl->rop    = read_clk(dev, 0x01);
-	perflvl->hub07  = read_clk(dev, 0x02);
-	perflvl->hub06  = read_clk(dev, 0x07);
-	perflvl->hub01  = read_clk(dev, 0x08);
-	perflvl->copy   = read_clk(dev, 0x09);
-	perflvl->daemon = read_clk(dev, 0x0c);
-	perflvl->vdec   = read_clk(dev, 0x0e);
-	return 0;
-}
-
-struct nvc0_pm_clock {
-	u32 freq;
-	u32 ssel;
-	u32 mdiv;
-	u32 dsrc;
-	u32 ddiv;
-	u32 coef;
-};
-
-struct nvc0_pm_state {
-	struct nouveau_pm_level *perflvl;
-	struct nvc0_pm_clock eng[16];
-	struct nvc0_pm_clock mem;
-};
-
-static u32
-calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
-{
-	u32 div = min((ref * 2) / freq, (u32)65);
-	if (div < 2)
-		div = 2;
-
-	*ddiv = div - 2;
-	return (ref * 2) / div;
-}
-
-static u32
-calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
-{
-	u32 sclk;
-
-	/* use one of the fixed frequencies if possible */
-	*ddiv = 0x00000000;
-	switch (freq) {
-	case  27000:
-	case 108000:
-		*dsrc = 0x00000000;
-		if (freq == 108000)
-			*dsrc |= 0x00030000;
-		return freq;
-	case 100000:
-		*dsrc = 0x00000002;
-		return freq;
-	default:
-		*dsrc = 0x00000003;
-		break;
-	}
-
-	/* otherwise, calculate the closest divider */
-	sclk = read_vco(dev, clk);
-	if (clk < 7)
-		sclk = calc_div(dev, clk, sclk, freq, ddiv);
-	return sclk;
-}
-
-static u32
-calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nvbios_pll limits;
-	int N, M, P, ret;
-
-	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
-	if (ret)
-		return 0;
-
-	limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
-	if (!limits.refclk)
-		return 0;
-
-	ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
-	if (ret <= 0)
-		return 0;
-
-	*coef = (P << 16) | (N << 8) | M;
-	return ret;
-}
-
-/* A (likely rather simplified and incomplete) view of the clock tree
- *
- * Key:
- *
- * S: source select
- * D: divider
- * P: pll
- * F: switch
- *
- * Engine clocks:
- *
- * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
- *                      (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
- *
- * Not all registers exist for all clocks.  For example: clocks >= 8 don't
- * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
- * they have the divider at 1371d0, though the source selection at 137160
- * still exists.  You must use the divider at 137250 for these instead.
- *
- * Memory clock:
- *
- * TBD, read_mem() above is likely very wrong...
- *
- */
-
-static int
-calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
-{
-	u32 src0, div0, div1D, div1P = 0;
-	u32 clk0, clk1 = 0;
-
-	/* invalid clock domain */
-	if (!freq)
-		return 0;
-
-	/* first possible path, using only dividers */
-	clk0 = calc_src(dev, clk, freq, &src0, &div0);
-	clk0 = calc_div(dev, clk, clk0, freq, &div1D);
-
-	/* see if we can get any closer using PLLs */
-	if (clk0 != freq && (0x00004387 & (1 << clk))) {
-		if (clk < 7)
-			clk1 = calc_pll(dev, clk, freq, &info->coef);
-		else
-			clk1 = read_pll(dev, 0x1370e0);
-		clk1 = calc_div(dev, clk, clk1, freq, &div1P);
-	}
-
-	/* select the method which gets closest to target freq */
-	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
-		info->dsrc = src0;
-		if (div0) {
-			info->ddiv |= 0x80000000;
-			info->ddiv |= div0 << 8;
-			info->ddiv |= div0;
-		}
-		if (div1D) {
-			info->mdiv |= 0x80000000;
-			info->mdiv |= div1D;
-		}
-		info->ssel = 0;
-		info->freq = clk0;
-	} else {
-		if (div1P) {
-			info->mdiv |= 0x80000000;
-			info->mdiv |= div1P << 8;
-		}
-		info->ssel = (1 << clk);
-		info->freq = clk1;
-	}
-
-	return 0;
-}
-
-static int
-calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nvbios_pll pll;
-	int N, M, P, ret;
-	u32 ctrl;
-
-	/* mclk pll input freq comes from another pll, make sure it's on */
-	ctrl = nv_rd32(device, 0x132020);
-	if (!(ctrl & 0x00000001)) {
-		/* if not, program it to 567MHz.  nfi where this value comes
-		 * from - it looks like it's in the pll limits table for
-		 * 132000 but the binary driver ignores all my attempts to
-		 * change this value.
-		 */
-		nv_wr32(device, 0x137320, 0x00000103);
-		nv_wr32(device, 0x137330, 0x81200606);
-		nv_wait(device, 0x132020, 0x00010000, 0x00010000);
-		nv_wr32(device, 0x132024, 0x0001150f);
-		nv_mask(device, 0x132020, 0x00000001, 0x00000001);
-		nv_wait(device, 0x137390, 0x00020000, 0x00020000);
-		nv_mask(device, 0x132020, 0x00000004, 0x00000004);
-	}
-
-	/* for the moment, until the clock tree is better understood, use
-	 * pll mode for all clock frequencies
-	 */
-	ret = nvbios_pll_parse(bios, 0x132000, &pll);
-	if (ret == 0) {
-		pll.refclk = read_pll(dev, 0x132020);
-		if (pll.refclk) {
-			ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
-			if (ret > 0) {
-				info->coef = (P << 16) | (N << 8) | M;
-				return 0;
-			}
-		}
-	}
-
-	return -EINVAL;
-}
-
-void *
-nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nvc0_pm_state *info;
-	int ret;
-
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
-	/* NFI why this is still in the performance table, the ROPCs appear
-	 * to get their clock from clock 2 ("hub07", actually hub05 on this
-	 * chip, but, anyway...) as well.  nvatiming confirms hub05 and ROP
-	 * are always the same freq with the binary driver even when the
-	 * performance table says they should differ.
-	 */
-	if (device->chipset == 0xd9)
-		perflvl->rop = 0;
-
-	if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
-	    (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
-	    (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
-	    (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
-	    (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
-	    (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
-	    (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
-	    (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
-		kfree(info);
-		return ERR_PTR(ret);
-	}
-
-	if (perflvl->memory) {
-		ret = calc_mem(dev, &info->mem, perflvl->memory);
-		if (ret) {
-			kfree(info);
-			return ERR_PTR(ret);
-		}
-	}
-
-	info->perflvl = perflvl;
-	return info;
-}
-
-static void
-prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-
-	/* program dividers at 137160/1371d0 first */
-	if (clk < 7 && !info->ssel) {
-		nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
-		nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
-	}
-
-	/* switch clock to non-pll mode */
-	nv_mask(device, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(device, 0x137100, (1 << clk), 0x00000000);
-
-	/* reprogram pll */
-	if (clk < 7) {
-		/* make sure it's disabled first... */
-		u32 base = 0x137000 + (clk * 0x20);
-		u32 ctrl = nv_rd32(device, base + 0x00);
-		if (ctrl & 0x00000001) {
-			nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
-			nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
-		}
-		/* program it to new values, if necessary */
-		if (info->ssel) {
-			nv_wr32(device, base + 0x04, info->coef);
-			nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
-			nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
-			nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
-		}
-	}
-
-	/* select pll/non-pll mode, and program final clock divider */
-	nv_mask(device, 0x137100, (1 << clk), info->ssel);
-	nv_wait(device, 0x137100, (1 << clk), info->ssel);
-	nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
-	udelay((nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
-		if (mr <= 1)
-			return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
-		return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
-	} else {
-		if (mr == 0)
-			return nv_rd32(device, 0x10f300 + (mr * 4));
-		else
-		if (mr <= 7)
-			return nv_rd32(device, 0x10f32c + (mr * 4));
-		return nv_rd32(device, 0x10f34c);
-	}
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
-		if (mr <= 1) {
-			nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
-			if (pfb->ram->ranks > 1)
-				nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
-		} else
-		if (mr <= 3) {
-			nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
-			if (pfb->ram->ranks > 1)
-				nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
-		}
-	} else {
-		if      (mr ==  0) nv_wr32(device, 0x10f300 + (mr * 4), data);
-		else if (mr <=  7) nv_wr32(device, 0x10f32c + (mr * 4), data);
-		else if (mr == 15) nv_wr32(device, 0x10f34c, data);
-	}
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nvc0_pm_state *info = exec->priv;
-	u32 ctrl = nv_rd32(device, 0x132000);
-
-	nv_wr32(device, 0x137360, 0x00000001);
-	nv_wr32(device, 0x137370, 0x00000000);
-	nv_wr32(device, 0x137380, 0x00000000);
-	if (ctrl & 0x00000001)
-		nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
-
-	nv_wr32(device, 0x132004, info->mem.coef);
-	nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
-	nv_wait(device, 0x137390, 0x00000002, 0x00000002);
-	nv_wr32(device, 0x132018, 0x00005000);
-
-	nv_wr32(device, 0x137370, 0x00000001);
-	nv_wr32(device, 0x137380, 0x00000001);
-	nv_wr32(device, 0x137360, 0x00000000);
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nvc0_pm_state *info = exec->priv;
-	struct nouveau_pm_level *perflvl = info->perflvl;
-	int i;
-
-	for (i = 0; i < 5; i++)
-		nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
-}
-
-static void
-prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_mem_exec_func exec = {
-		.dev = dev,
-		.precharge = mclk_precharge,
-		.refresh = mclk_refresh,
-		.refresh_auto = mclk_refresh_auto,
-		.refresh_self = mclk_refresh_self,
-		.wait = mclk_wait,
-		.mrg = mclk_mrg,
-		.mrs = mclk_mrs,
-		.clock_set = mclk_clock_set,
-		.timing_set = mclk_timing_set,
-		.priv = info
-	};
-
-	if (device->chipset < 0xd0)
-		nv_wr32(device, 0x611200, 0x00003300);
-	else
-		nv_wr32(device, 0x62c000, 0x03030000);
-
-	nouveau_mem_exec(&exec, info->perflvl);
-
-	if (device->chipset < 0xd0)
-		nv_wr32(device, 0x611200, 0x00003330);
-	else
-		nv_wr32(device, 0x62c000, 0x03030300);
-}
-int
-nvc0_pm_clocks_set(struct drm_device *dev, void *data)
-{
-	struct nvc0_pm_state *info = data;
-	int i;
-
-	if (info->mem.coef)
-		prog_mem(dev, info);
-
-	for (i = 0; i < 16; i++) {
-		if (!info->eng[i].freq)
-			continue;
-		prog_clk(dev, i, &info->eng[i]);
-	}
-
-	kfree(info);
-	return 0;
-}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 20c41e7..6c220cd 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,6 +5,7 @@
 	depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
 	depends on OMAP2_DSS
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index acf6678..701c4c1 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -664,8 +664,9 @@
 	}
 
 	/* set dma mask for device */
-	/* NOTE: this is a workaround for the hwmod not initializing properly */
-	dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto fail;
 
 	omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
 
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2603d90..e7fa3cd 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -620,7 +620,6 @@
 		.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 		.gem_prime_export = omap_gem_prime_export,
 		.gem_prime_import = omap_gem_prime_import,
-		.gem_init_object = omap_gem_init_object,
 		.gem_free_object = omap_gem_free_object,
 		.gem_vm_ops = &omap_gem_vm_ops,
 		.dumb_create = omap_gem_dumb_create,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 30b95b7..0784769 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -220,7 +220,6 @@
 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 		union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
 void omap_gem_free_object(struct drm_gem_object *obj);
-int omap_gem_init_object(struct drm_gem_object *obj);
 void *omap_gem_vaddr(struct drm_gem_object *obj);
 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 		uint32_t handle, uint64_t *offset);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 533f6eb..5aec3e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1274,11 +1274,6 @@
 	return ret;
 }
 
-int omap_gem_init_object(struct drm_gem_object *obj)
-{
-	return -EINVAL;          /* unused */
-}
-
 /* don't call directly.. called from GEM core when it is time to actually
  * free the object..
  */
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 9263db1..cb85860 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -261,7 +261,7 @@
 		mutex_unlock(&dev->struct_mutex);
 		return -EBUSY;
 	}
-	dev->irq_enabled = 1;
+	dev->irq_enabled = true;
 	mutex_unlock(&dev->struct_mutex);
 
 	/* Before installing handler */
@@ -272,7 +272,7 @@
 
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
 	}
@@ -283,7 +283,7 @@
 
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		dispc_free_irq(dev);
 	}
@@ -294,11 +294,12 @@
 int omap_drm_irq_uninstall(struct drm_device *dev)
 {
 	unsigned long irqflags;
-	int irq_enabled, i;
+	bool irq_enabled;
+	int i;
 
 	mutex_lock(&dev->struct_mutex);
 	irq_enabled = dev->irq_enabled;
-	dev->irq_enabled = 0;
+	dev->irq_enabled = false;
 	mutex_unlock(&dev->struct_mutex);
 
 	/*
@@ -307,9 +308,9 @@
 	if (dev->num_crtcs) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		for (i = 0; i < dev->num_crtcs; i++) {
-			DRM_WAKEUP(&dev->vbl_queue[i]);
-			dev->vblank_enabled[i] = 0;
-			dev->last_vblank[i] =
+			DRM_WAKEUP(&dev->vblank[i].queue);
+			dev->vblank[i].enabled = false;
+			dev->vblank[i].last =
 				dev->driver->get_vblank_counter(dev, i);
 		}
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d6c1279..037d324 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@
 	select FB_SYS_IMAGEBLIT
 	select FB_DEFERRED_IO
         select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
         select DRM_TTM
 	help
 		QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 835caba..5e827c2 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -107,10 +107,17 @@
 		qxl_io_log(qdev, "failed crc check for client_monitors_config,"
 				 " retrying\n");
 	}
-	drm_helper_hpd_irq_event(qdev->ddev);
+
+	if (!drm_helper_hpd_irq_event(qdev->ddev)) {
+		/* notify that the monitor configuration changed, to
+		   adjust at the arbitrary resolution */
+		drm_kms_helper_hotplug_event(qdev->ddev);
+	}
 }
 
-static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+static int qxl_add_monitors_config_modes(struct drm_connector *connector,
+                                         unsigned *pwidth,
+                                         unsigned *pheight)
 {
 	struct drm_device *dev = connector->dev;
 	struct qxl_device *qdev = dev->dev_private;
@@ -126,11 +133,15 @@
 	mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
 			    false);
 	mode->type |= DRM_MODE_TYPE_PREFERRED;
+	*pwidth = head->width;
+	*pheight = head->height;
 	drm_mode_probed_add(connector, mode);
 	return 1;
 }
 
-static int qxl_add_common_modes(struct drm_connector *connector)
+static int qxl_add_common_modes(struct drm_connector *connector,
+                                unsigned pwidth,
+                                unsigned pheight)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode = NULL;
@@ -159,12 +170,9 @@
 	};
 
 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
-		if (common_modes[i].w < 320 || common_modes[i].h < 200)
-			continue;
-
 		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
 				    60, false, false, false);
-		if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+		if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
 			mode->type |= DRM_MODE_TYPE_PREFERRED;
 		drm_mode_probed_add(connector, mode);
 	}
@@ -720,16 +728,18 @@
 {
 	int ret = 0;
 	struct qxl_device *qdev = connector->dev->dev_private;
+	unsigned pwidth = 1024;
+	unsigned pheight = 768;
 
 	DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
 	/* TODO: what should we do here? only show the configured modes for the
 	 * device, or allow the full list, or both? */
 	if (qdev->monitors_config && qdev->monitors_config->count) {
-		ret = qxl_add_monitors_config_modes(connector);
+		ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
 		if (ret < 0)
 			return ret;
 	}
-	ret += qxl_add_common_modes(connector);
+	ret += qxl_add_common_modes(connector, pwidth, pheight);
 	return ret;
 }
 
@@ -793,7 +803,10 @@
 		     qdev->client_monitors_config->count > output->index &&
 		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG("#%d connected: %d\n", output->index, connected);
+	if (!connected)
+		qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
+
 	return connected ? connector_status_connected
 			 : connector_status_disconnected;
 }
@@ -835,8 +848,21 @@
 	.destroy = qxl_enc_destroy,
 };
 
+static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
+{
+	if (qdev->hotplug_mode_update_property)
+		return 0;
+
+	qdev->hotplug_mode_update_property =
+		drm_property_create_range(qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
+					  "hotplug_mode_update", 0, 1);
+
+	return 0;
+}
+
 static int qdev_output_init(struct drm_device *dev, int num_output)
 {
+	struct qxl_device *qdev = dev->dev_private;
 	struct qxl_output *qxl_output;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
@@ -863,6 +889,8 @@
 	drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
 	drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
 
+	drm_object_attach_property(&connector->base,
+				   qdev->hotplug_mode_update_property, 0);
 	drm_sysfs_connector_add(connector);
 	return 0;
 }
@@ -975,6 +1003,9 @@
 	qdev->ddev->mode_config.max_height = 8192;
 
 	qdev->ddev->mode_config.fb_base = qdev->vram_base;
+
+	qxl_mode_create_hotplug_mode_update_property(qdev);
+
 	for (i = 0 ; i < qxl_num_crtc; ++i) {
 		qdev_crtc_init(qdev->ddev, i);
 		qdev_output_init(qdev->ddev, i);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 514118a..fee8748 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -225,7 +225,6 @@
 	.debugfs_init = qxl_debugfs_init,
 	.debugfs_cleanup = qxl_debugfs_takedown,
 #endif
-	.gem_init_object = qxl_gem_object_init,
 	.gem_free_object = qxl_gem_object_free,
 	.gem_open_object = qxl_gem_object_open,
 	.gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index f7c9add..7bda32f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -323,6 +323,8 @@
 	struct work_struct gc_work;
 
 	struct work_struct fb_work;
+
+	struct drm_property *hotplug_mode_update_property;
 };
 
 /* forward declaration for QXL_INFO_IO */
@@ -412,7 +414,6 @@
 				      struct qxl_surface *surf,
 				      struct qxl_bo **qobj,
 				      uint32_t *handle);
-int qxl_gem_object_init(struct drm_gem_object *obj);
 void qxl_gem_object_free(struct drm_gem_object *gobj);
 int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
 void qxl_gem_object_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 88722f2..f437b30 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -108,7 +108,7 @@
 	u32 x1, x2, y1, y2;
 
 	/* TODO: hard coding 32 bpp */
-	int stride = qfbdev->qfb.base.pitches[0] * 4;
+	int stride = qfbdev->qfb.base.pitches[0];
 
 	x1 = qfbdev->dirty.x1;
 	x2 = qfbdev->dirty.x2;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 1648e41..b96f0c9 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -28,12 +28,6 @@
 #include "qxl_drv.h"
 #include "qxl_object.h"
 
-int qxl_gem_object_init(struct drm_gem_object *obj)
-{
-	/* we do nothings here */
-	return 0;
-}
-
 void qxl_gem_object_free(struct drm_gem_object *gobj)
 {
 	struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 9e8da9e..e5ca498 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -120,7 +120,7 @@
 		    struct pci_dev *pdev,
 		    unsigned long flags)
 {
-	int r;
+	int r, sb;
 
 	qdev->dev = &pdev->dev;
 	qdev->ddev = ddev;
@@ -136,21 +136,39 @@
 	qdev->rom_base = pci_resource_start(pdev, 2);
 	qdev->rom_size = pci_resource_len(pdev, 2);
 	qdev->vram_base = pci_resource_start(pdev, 0);
-	qdev->surfaceram_base = pci_resource_start(pdev, 1);
-	qdev->surfaceram_size = pci_resource_len(pdev, 1);
 	qdev->io_base = pci_resource_start(pdev, 3);
 
 	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
-	qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
-	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n",
+
+	if (pci_resource_len(pdev, 4) > 0) {
+		/* 64bit surface bar present */
+		sb = 4;
+		qdev->surfaceram_base = pci_resource_start(pdev, sb);
+		qdev->surfaceram_size = pci_resource_len(pdev, sb);
+		qdev->surface_mapping =
+			io_mapping_create_wc(qdev->surfaceram_base,
+					     qdev->surfaceram_size);
+	}
+	if (qdev->surface_mapping == NULL) {
+		/* 64bit surface bar not present (or mapping failed) */
+		sb = 1;
+		qdev->surfaceram_base = pci_resource_start(pdev, sb);
+		qdev->surfaceram_size = pci_resource_len(pdev, sb);
+		qdev->surface_mapping =
+			io_mapping_create_wc(qdev->surfaceram_base,
+					     qdev->surfaceram_size);
+	}
+
+	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
 		 (unsigned long long)qdev->vram_base,
 		 (unsigned long long)pci_resource_end(pdev, 0),
 		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
 		 (int)pci_resource_len(pdev, 0) / 1024,
 		 (unsigned long long)qdev->surfaceram_base,
-		 (unsigned long long)pci_resource_end(pdev, 1),
+		 (unsigned long long)pci_resource_end(pdev, sb),
 		 (int)qdev->surfaceram_size / 1024 / 1024,
-		 (int)qdev->surfaceram_size / 1024);
+		 (int)qdev->surfaceram_size / 1024,
+		 (sb == 4) ? "64bit" : "32bit");
 
 	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
 	if (!qdev->rom) {
@@ -230,9 +248,13 @@
 	qdev->surfaces_mem_slot = setup_slot(qdev, 1,
 		(unsigned long)qdev->surfaceram_base,
 		(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
-	DRM_INFO("main mem slot %d [%lx,%x)\n",
-		qdev->main_mem_slot,
-		(unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+	DRM_INFO("main mem slot %d [%lx,%x]\n",
+		 qdev->main_mem_slot,
+		 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+	DRM_INFO("surface mem slot %d [%lx,%lx]\n",
+		 qdev->surfaces_mem_slot,
+		 (unsigned long)qdev->surfaceram_base,
+		 (unsigned long)qdev->surfaceram_size);
 
 
 	qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 037786d..c7e7e65 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -516,6 +516,8 @@
 		 (unsigned)qdev->vram_size / (1024 * 1024));
 	DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
 		 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+	DRM_INFO("qxl: %uM of Surface memory size\n",
+		 (unsigned)qdev->surfaceram_size / (1024 * 1024));
 	if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
 		qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
 	r = qxl_ttm_debugfs_init(qdev);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index af10f85..92be50c 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1711,7 +1711,9 @@
 #define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
 #define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
 #define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6           0x08    //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
 #define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6           0x04    //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
 #define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
 #define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
 #define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK            0x40
@@ -2223,7 +2225,7 @@
   USHORT   usVoltageLevel;              // real voltage level
 }SET_VOLTAGE_PARAMETERS_V2;
 
-
+// used by both SetVoltageTable v1.3 and v1.4
 typedef struct	_SET_VOLTAGE_PARAMETERS_V1_3
 {
   UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
@@ -2290,15 +2292,36 @@
 #define	ATOM_GET_VOLTAGE_VID                0x00
 #define ATOM_GET_VOTLAGE_INIT_SEQ           0x03
 #define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID   0x04
-// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
-#define	ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+#define ATOM_GET_VOLTAGE_SVID2              0x07        //Get SVI2 Regulator Info
 
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
 // for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
 #define	ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
-// undefined power state
+
 #define	ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
 #define	ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
 
+// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id 
+  ULONG    ulSCLKFreq;                  // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
+
+// New in GetVoltageInfo v1.2 ucVoltageMode
+#define ATOM_GET_VOLTAGE_EVV_VOLTAGE        0x09        
+
+// New Added from CI Hawaii for EVV feature 
+typedef struct  _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
+{
+  USHORT   usVoltageLevel;                               // real voltage level in unit of mv
+  USHORT   usVoltageId;                                  // Voltage Id programmed in Voltage Regulator
+  ULONG    ulReseved;
+}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
+
 /****************************************************************************/	
 // Structures used by TVEncoderControlTable
 /****************************************************************************/	
@@ -3864,6 +3887,8 @@
 #define PP_AC_DC_SWITCH_GPIO_PINID          60
 //from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
 #define VDDC_VRHOT_GPIO_PINID               61
+//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
+#define VDDC_PCC_GPIO_PINID                 62
 
 typedef struct _ATOM_GPIO_PIN_LUT
 {
@@ -4169,10 +4194,10 @@
 #define ATOM_OBJECT_LINK_RECORD_TYPE                   18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
 #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
 #define ATOM_ENCODER_CAP_RECORD_TYPE                   20
-
+#define ATOM_BRACKET_LAYOUT_RECORD_TYPE                21
 
 //Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_ENCODER_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_BRACKET_LAYOUT_RECORD_TYPE
 
 typedef struct  _ATOM_I2C_RECORD
 {
@@ -4397,6 +4422,31 @@
   USHORT                      usReserved;
 }ATOM_CONNECTOR_REMOTE_CAP_RECORD;
 
+typedef struct  _ATOM_CONNECTOR_LAYOUT_INFO
+{
+   USHORT usConnectorObjectId;
+   UCHAR  ucConnectorType;
+   UCHAR  ucPosition;
+}ATOM_CONNECTOR_LAYOUT_INFO;
+
+// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
+#define CONNECTOR_TYPE_DVI_D                 1
+#define CONNECTOR_TYPE_DVI_I                 2
+#define CONNECTOR_TYPE_VGA                   3
+#define CONNECTOR_TYPE_HDMI                  4
+#define CONNECTOR_TYPE_DISPLAY_PORT          5
+#define CONNECTOR_TYPE_MINI_DISPLAY_PORT     6
+
+typedef struct  _ATOM_BRACKET_LAYOUT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucLength;
+  UCHAR                       ucWidth;
+  UCHAR                       ucConnNum;
+  UCHAR                       ucReserved;
+  ATOM_CONNECTOR_LAYOUT_INFO  asConnInfo[1];
+}ATOM_BRACKET_LAYOUT_RECORD;
+
 /****************************************************************************/	
 // ASIC voltage data table
 /****************************************************************************/	
@@ -4524,8 +4574,9 @@
 #define VOLTAGE_OBJ_VR_I2C_INIT_SEQ          3        //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
 #define VOLTAGE_OBJ_PHASE_LUT                4        //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
 #define VOLTAGE_OBJ_SVID2                    7        //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
-#define	VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT     0x10     //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
-#define	VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT   0x11     //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_EVV                      8 
+#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT     0x10     //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT   0x11     //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
 #define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT  0x12     //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
 
 typedef struct  _VOLTAGE_LUT_ENTRY_V2
@@ -4552,6 +4603,10 @@
    VOLTAGE_LUT_ENTRY asVolI2cLut[1];        // end with 0xff
 }ATOM_I2C_VOLTAGE_OBJECT_V3;
 
+// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
+#define VOLTAGE_DATA_ONE_BYTE                0
+#define VOLTAGE_DATA_TWO_BYTE                1
+
 typedef struct  _ATOM_GPIO_VOLTAGE_OBJECT_V3
 {
    ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;   // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
@@ -4584,7 +4639,8 @@
 // 1:0 – offset trim, 
    USHORT   usLoadLine_PSI;    
 // GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
-   UCHAR    ucReserved[2];
+   UCHAR    ucSVDGpioId;     //0~31 indicate GPIO0~31
+   UCHAR    ucSVCGpioId;     //0~31 indicate GPIO0~31
    ULONG    ulReserved;
 }ATOM_SVID2_VOLTAGE_OBJECT_V3;
 
@@ -4637,6 +4693,49 @@
   USHORT usElbVDDCI_LevelArrayOffset;    // offset of 2 dimension voltage level USHORT array
 }ATOM_ASIC_PROFILING_INFO_V2_1;
 
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_1
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader; 
+  ULONG  ulEvvDerateTdp;
+  ULONG  ulEvvDerateTdc;
+  ULONG  ulBoardCoreTemp;
+  ULONG  ulMaxVddc;
+  ULONG  ulMinVddc;
+  ULONG  ulLoadLineSlop;
+  ULONG  ulLeakageTemp;
+  ULONG  ulLeakageVoltage;
+  ULONG  ulCACmEncodeRange;
+  ULONG  ulCACmEncodeAverage;
+  ULONG  ulCACbEncodeRange;
+  ULONG  ulCACbEncodeAverage;
+  ULONG  ulKt_bEncodeRange;
+  ULONG  ulKt_bEncodeAverage;
+  ULONG  ulKv_mEncodeRange;
+  ULONG  ulKv_mEncodeAverage;
+  ULONG  ulKv_bEncodeRange;
+  ULONG  ulKv_bEncodeAverage;
+  ULONG  ulLkgEncodeLn_MaxDivMin;
+  ULONG  ulLkgEncodeMin;
+  ULONG  ulEfuseLogisticAlpha;
+  USHORT usPowerDpm0;
+  USHORT usCurrentDpm0;
+  USHORT usPowerDpm1;
+  USHORT usCurrentDpm1;
+  USHORT usPowerDpm2;
+  USHORT usCurrentDpm2;
+  USHORT usPowerDpm3;
+  USHORT usCurrentDpm3;
+  USHORT usPowerDpm4;
+  USHORT usCurrentDpm4;
+  USHORT usPowerDpm5;
+  USHORT usCurrentDpm5;
+  USHORT usPowerDpm6;
+  USHORT usCurrentDpm6;
+  USHORT usPowerDpm7;
+  USHORT usCurrentDpm7;
+}ATOM_ASIC_PROFILING_INFO_V3_1;
+
+
 typedef struct _ATOM_POWER_SOURCE_OBJECT
 {
 	UCHAR	ucPwrSrcId;													// Power source
@@ -5808,6 +5907,8 @@
 #define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0      0x0C
 #define ATOM_S7_DOS_MODE_PIXEL_FORMATb0     0xF0
 #define ATOM_S7_DOS_8BIT_DAC_ENb1           0x01
+#define ATOM_S7_ASIC_INIT_COMPLETEb1        0x02
+#define ATOM_S7_ASIC_INIT_COMPLETE_MASK     0x00000200
 #define ATOM_S7_DOS_MODE_NUMBERw1           0x0FFFF
 
 #define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT       8
@@ -6242,6 +6343,7 @@
 #define _128Mx32            0x53
 #define _256Mx8             0x61
 #define _256Mx16            0x62
+#define _512Mx8             0x71
 
 #define SAMSUNG             0x1
 #define INFINEON            0x2
@@ -6987,9 +7089,10 @@
   UCHAR  ucMaxDispEngineNum;
   UCHAR  ucMaxActiveDispEngineNum;
   UCHAR  ucMaxPPLLNum;
-  UCHAR  ucCoreRefClkSource;                          // value of CORE_REF_CLK_SOURCE
-  UCHAR  ucReserved[3];
-	ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[1];     // for alligment only
+  UCHAR  ucCoreRefClkSource;                    // value of CORE_REF_CLK_SOURCE
+  UCHAR  ucDispCaps;
+  UCHAR  ucReserved[2];
+  ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[1];     // for alligment only
 }ATOM_DISP_OUT_INFO_V3;
 
 //ucDispCaps
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index bf87f6d..80a2012 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1753,7 +1753,7 @@
 				if (pll != ATOM_PPLL_INVALID)
 					return pll;
 			}
-		} else {
+		} else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
 			/* use the same PPLL for all monitors with the same clock */
 			pll = radeon_get_shared_nondp_ppll(crtc);
 			if (pll != ATOM_PPLL_INVALID)
@@ -1910,6 +1910,21 @@
 	int i;
 
 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (crtc->fb) {
+		int r;
+		struct radeon_framebuffer *radeon_fb;
+		struct radeon_bo *rbo;
+
+		radeon_fb = to_radeon_framebuffer(crtc->fb);
+		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r))
+			DRM_ERROR("failed to reserve rbo before unpin\n");
+		else {
+			radeon_bo_unpin(rbo);
+			radeon_bo_unreserve(rbo);
+		}
+	}
 	/* disable the GRPH */
 	if (ASIC_IS_DCE4(rdev))
 		WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
@@ -1940,7 +1955,9 @@
 		break;
 	case ATOM_PPLL0:
 		/* disable the ppll */
-		if ((rdev->family == CHIP_ARUBA) || (rdev->family == CHIP_BONAIRE))
+		if ((rdev->family == CHIP_ARUBA) ||
+		    (rdev->family == CHIP_BONAIRE) ||
+		    (rdev->family == CHIP_HAWAII))
 			atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
 						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
 		break;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 0088541..fb3ae07 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -690,8 +690,7 @@
 
 	/* set the lane count on the sink */
 	tmp = dp_info->dp_lane_count;
-	if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
 		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 5e891b2..a42d615 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -213,7 +213,7 @@
 	props.type = BACKLIGHT_RAW;
 	snprintf(bl_name, sizeof(bl_name),
 		 "radeon_bl%d", dev->primary->index);
-	bd = backlight_device_register(bl_name, &drm_connector->kdev,
+	bd = backlight_device_register(bl_name, drm_connector->kdev,
 				       pdata, &radeon_atom_backlight_ops, &props);
 	if (IS_ERR(bd)) {
 		DRM_ERROR("Backlight registration failed\n");
@@ -1662,19 +1662,11 @@
 			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
 			/* enable the transmitter */
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 		} else {
 			/* setup and enable the encoder and transmitter */
 			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-			/* some dce3.x boards have a bug in their transmitter control table.
-			 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
-			 * does the same thing and more.
-			 */
-			if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
-			    (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 		}
 		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
 			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1692,16 +1684,11 @@
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+		if (ASIC_IS_DCE4(rdev)) {
 			/* disable the transmitter */
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-		} else if (ASIC_IS_DCE4(rdev)) {
-			/* disable the transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
 		} else {
 			/* disable the encoder and transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
 			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
 		}
@@ -2410,6 +2397,15 @@
 
 	/* this is needed for the pll/ss setup to work correctly in some cases */
 	atombios_set_encoder_crtc_source(encoder);
+	/* set up the FMT blocks */
+	if (ASIC_IS_DCE8(rdev))
+		dce8_program_fmt(encoder);
+	else if (ASIC_IS_DCE4(rdev))
+		dce4_program_fmt(encoder);
+	else if (ASIC_IS_DCE3(rdev))
+		dce3_program_fmt(encoder);
+	else if (ASIC_IS_AVIVO(rdev))
+		avivo_program_fmt(encoder);
 }
 
 static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 51e947a..1ed4799 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -40,6 +40,20 @@
 #define VOLTAGE_VID_OFFSET_SCALE1    625
 #define VOLTAGE_VID_OFFSET_SCALE2    100
 
+static const struct ci_pt_defaults defaults_hawaii_xt =
+{
+	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+	{ 0x84,  0x0,   0x0,   0x7F,  0x0,   0x0,   0x5A,  0x60,  0x51,  0x8E,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
+	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro =
+{
+	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+	{ 0x93,  0x0,   0x0,   0x97,  0x0,   0x0,   0x6B,  0x60,  0x51,  0x95,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
+	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+};
+
 static const struct ci_pt_defaults defaults_bonaire_xt =
 {
 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
@@ -187,22 +201,38 @@
 	struct ci_power_info *pi = ci_get_pi(rdev);
 
 	switch (rdev->pdev->device) {
-        case 0x6650:
-        case 0x6658:
-        case 0x665C:
-        default:
+	case 0x6650:
+	case 0x6658:
+	case 0x665C:
+	default:
 		pi->powertune_defaults = &defaults_bonaire_xt;
 		break;
-        case 0x6651:
-        case 0x665D:
+	case 0x6651:
+	case 0x665D:
 		pi->powertune_defaults = &defaults_bonaire_pro;
 		break;
-        case 0x6640:
+	case 0x6640:
 		pi->powertune_defaults = &defaults_saturn_xt;
 		break;
-        case 0x6641:
+	case 0x6641:
 		pi->powertune_defaults = &defaults_saturn_pro;
 		break;
+	case 0x67B8:
+	case 0x67B0:
+	case 0x67A0:
+	case 0x67A1:
+	case 0x67A2:
+	case 0x67A8:
+	case 0x67A9:
+	case 0x67AA:
+	case 0x67B9:
+	case 0x67BE:
+		pi->powertune_defaults = &defaults_hawaii_xt;
+		break;
+	case 0x67BA:
+	case 0x67B1:
+		pi->powertune_defaults = &defaults_hawaii_pro;
+		break;
 	}
 
 	pi->dte_tj_offset = 0;
@@ -5142,9 +5172,15 @@
 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
 
-	pi->thermal_temp_setting.temperature_low = 99500;
-	pi->thermal_temp_setting.temperature_high = 100000;
-	pi->thermal_temp_setting.temperature_shutdown = 104000;
+	if (rdev->family == CHIP_HAWAII) {
+		pi->thermal_temp_setting.temperature_low = 94500;
+		pi->thermal_temp_setting.temperature_high = 95000;
+		pi->thermal_temp_setting.temperature_shutdown = 104000;
+	} else {
+		pi->thermal_temp_setting.temperature_low = 99500;
+		pi->thermal_temp_setting.temperature_high = 100000;
+		pi->thermal_temp_setting.temperature_shutdown = 104000;
+	}
 
 	pi->uvd_enabled = false;
 
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 252e10a..9c745dd 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -217,6 +217,10 @@
 		ucode_start_address = BONAIRE_SMC_UCODE_START;
 		ucode_size = BONAIRE_SMC_UCODE_SIZE;
 		break;
+	case CHIP_HAWAII:
+		ucode_start_address = HAWAII_SMC_UCODE_START;
+		ucode_size = HAWAII_SMC_UCODE_SIZE;
+		break;
 	default:
 		DRM_ERROR("unknown asic in smc ucode loader\n");
 		BUG();
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9cd2bc9..ae92aa0 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -41,6 +41,14 @@
 MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
 MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
 MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
+MODULE_FIRMWARE("radeon/HAWAII_me.bin");
+MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
+MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
 MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
 MODULE_FIRMWARE("radeon/KAVERI_me.bin");
 MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -67,11 +75,6 @@
 extern int cik_sdma_resume(struct radeon_device *rdev);
 extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
 extern void cik_sdma_fini(struct radeon_device *rdev);
-extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
-				 struct radeon_ib *ib,
-				 uint64_t pe,
-				 uint64_t addr, unsigned count,
-				 uint32_t incr, uint32_t flags);
 static void cik_rlc_stop(struct radeon_device *rdev);
 static void cik_pcie_gen3_enable(struct radeon_device *rdev);
 static void cik_program_aspm(struct radeon_device *rdev);
@@ -1302,6 +1305,171 @@
 	0xd80c, 0xff000ff0, 0x00000100
 };
 
+static const u32 hawaii_golden_spm_registers[] =
+{
+	0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 hawaii_golden_common_registers[] =
+{
+	0x30800, 0xffffffff, 0xe0000000,
+	0x28350, 0xffffffff, 0x3a00161a,
+	0x28354, 0xffffffff, 0x0000002e,
+	0x9a10, 0xffffffff, 0x00018208,
+	0x98f8, 0xffffffff, 0x12011003
+};
+
+static const u32 hawaii_golden_registers[] =
+{
+	0x3354, 0x00000333, 0x00000333,
+	0x9a10, 0x00010000, 0x00058208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0xf0311fff, 0x80300000,
+	0x350c, 0x00810000, 0x408af000,
+	0x7030, 0x31000111, 0x00000011,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2120, 0x0000007f, 0x0000001b,
+	0x21dc, 0x00007fb6, 0x00002191,
+	0x3628, 0x0000003f, 0x0000000a,
+	0x362c, 0x0000003f, 0x0000000a,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8bf0, 0x00002001, 0x00000001,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x30a04, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x3e78, 0x00000001, 0x00000002,
+	0xc768, 0x00000008, 0x00000008,
+	0xc770, 0x00000f00, 0x00000800,
+	0xc774, 0x00000f00, 0x00000800,
+	0xc798, 0x00ffffff, 0x00ff7fbf,
+	0xc79c, 0x00ffffff, 0x00ff7faf,
+	0x8c00, 0x000000ff, 0x00000800,
+	0xe40, 0x00001fff, 0x00001fff,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xae00, 0x00100000, 0x000ff07c,
+	0xac14, 0x000003ff, 0x0000000f,
+	0xac10, 0xffffffff, 0x7564fdec,
+	0xac0c, 0xffffffff, 0x3120b9a8,
+	0xac08, 0x20000000, 0x0f9c0000
+};
+
+static const u32 hawaii_mgcg_cgcg_init[] =
+{
+	0xc420, 0xffffffff, 0xfffffffd,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c2a0, 0xffffffff, 0x00000100,
+	0x3c208, 0xffffffff, 0x00000100,
+	0x3c2c0, 0xffffffff, 0x00000100,
+	0x3c2c8, 0xffffffff, 0x00000100,
+	0x3c2c4, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00200100,
+	0x3c280, 0xffffffff, 0x00000100,
+	0x3c214, 0xffffffff, 0x06000100,
+	0x3c220, 0xffffffff, 0x00000100,
+	0x3c218, 0xffffffff, 0x06000100,
+	0x3c204, 0xffffffff, 0x00000100,
+	0x3c2e0, 0xffffffff, 0x00000100,
+	0x3c224, 0xffffffff, 0x00000100,
+	0x3c200, 0xffffffff, 0x00000100,
+	0x3c230, 0xffffffff, 0x00000100,
+	0x3c234, 0xffffffff, 0x00000100,
+	0x3c250, 0xffffffff, 0x00000100,
+	0x3c254, 0xffffffff, 0x00000100,
+	0x3c258, 0xffffffff, 0x00000100,
+	0x3c25c, 0xffffffff, 0x00000100,
+	0x3c260, 0xffffffff, 0x00000100,
+	0x3c27c, 0xffffffff, 0x00000100,
+	0x3c278, 0xffffffff, 0x00000100,
+	0x3c210, 0xffffffff, 0x06000100,
+	0x3c290, 0xffffffff, 0x00000100,
+	0x3c274, 0xffffffff, 0x00000100,
+	0x3c2b4, 0xffffffff, 0x00000100,
+	0x3c2b0, 0xffffffff, 0x00000100,
+	0x3c270, 0xffffffff, 0x00000100,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c020, 0xffffffff, 0x00010000,
+	0x3c024, 0xffffffff, 0x00030002,
+	0x3c028, 0xffffffff, 0x00040007,
+	0x3c02c, 0xffffffff, 0x00060005,
+	0x3c030, 0xffffffff, 0x00090008,
+	0x3c034, 0xffffffff, 0x00010000,
+	0x3c038, 0xffffffff, 0x00030002,
+	0x3c03c, 0xffffffff, 0x00040007,
+	0x3c040, 0xffffffff, 0x00060005,
+	0x3c044, 0xffffffff, 0x00090008,
+	0x3c048, 0xffffffff, 0x00010000,
+	0x3c04c, 0xffffffff, 0x00030002,
+	0x3c050, 0xffffffff, 0x00040007,
+	0x3c054, 0xffffffff, 0x00060005,
+	0x3c058, 0xffffffff, 0x00090008,
+	0x3c05c, 0xffffffff, 0x00010000,
+	0x3c060, 0xffffffff, 0x00030002,
+	0x3c064, 0xffffffff, 0x00040007,
+	0x3c068, 0xffffffff, 0x00060005,
+	0x3c06c, 0xffffffff, 0x00090008,
+	0x3c070, 0xffffffff, 0x00010000,
+	0x3c074, 0xffffffff, 0x00030002,
+	0x3c078, 0xffffffff, 0x00040007,
+	0x3c07c, 0xffffffff, 0x00060005,
+	0x3c080, 0xffffffff, 0x00090008,
+	0x3c084, 0xffffffff, 0x00010000,
+	0x3c088, 0xffffffff, 0x00030002,
+	0x3c08c, 0xffffffff, 0x00040007,
+	0x3c090, 0xffffffff, 0x00060005,
+	0x3c094, 0xffffffff, 0x00090008,
+	0x3c098, 0xffffffff, 0x00010000,
+	0x3c09c, 0xffffffff, 0x00030002,
+	0x3c0a0, 0xffffffff, 0x00040007,
+	0x3c0a4, 0xffffffff, 0x00060005,
+	0x3c0a8, 0xffffffff, 0x00090008,
+	0x3c0ac, 0xffffffff, 0x00010000,
+	0x3c0b0, 0xffffffff, 0x00030002,
+	0x3c0b4, 0xffffffff, 0x00040007,
+	0x3c0b8, 0xffffffff, 0x00060005,
+	0x3c0bc, 0xffffffff, 0x00090008,
+	0x3c0c0, 0xffffffff, 0x00010000,
+	0x3c0c4, 0xffffffff, 0x00030002,
+	0x3c0c8, 0xffffffff, 0x00040007,
+	0x3c0cc, 0xffffffff, 0x00060005,
+	0x3c0d0, 0xffffffff, 0x00090008,
+	0x3c0d4, 0xffffffff, 0x00010000,
+	0x3c0d8, 0xffffffff, 0x00030002,
+	0x3c0dc, 0xffffffff, 0x00040007,
+	0x3c0e0, 0xffffffff, 0x00060005,
+	0x3c0e4, 0xffffffff, 0x00090008,
+	0x3c0e8, 0xffffffff, 0x00010000,
+	0x3c0ec, 0xffffffff, 0x00030002,
+	0x3c0f0, 0xffffffff, 0x00040007,
+	0x3c0f4, 0xffffffff, 0x00060005,
+	0x3c0f8, 0xffffffff, 0x00090008,
+	0xc318, 0xffffffff, 0x00020200,
+	0x3350, 0xffffffff, 0x00000200,
+	0x15c0, 0xffffffff, 0x00000400,
+	0x55e8, 0xffffffff, 0x00000000,
+	0x2f50, 0xffffffff, 0x00000902,
+	0x3c000, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc424, 0xffffffff, 0x0020003f,
+	0x38, 0xffffffff, 0x0140001c,
+	0x3c, 0x000f0000, 0x000f0000,
+	0x220, 0xffffffff, 0xc060000c,
+	0x224, 0xc0000fff, 0x00000100,
+	0xf90, 0xffffffff, 0x00000100,
+	0xf98, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd00c, 0xff000ff0, 0x00000100,
+	0xd80c, 0xff000ff0, 0x00000100
+};
+
 static void cik_init_golden_registers(struct radeon_device *rdev)
 {
 	switch (rdev->family) {
@@ -1347,6 +1515,20 @@
 						 spectre_golden_spm_registers,
 						 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
 		break;
+	case CHIP_HAWAII:
+		radeon_program_register_sequence(rdev,
+						 hawaii_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 hawaii_golden_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 hawaii_golden_common_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
+		radeon_program_register_sequence(rdev,
+						 hawaii_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
+		break;
 	default:
 		break;
 	}
@@ -1454,6 +1636,35 @@
 	{0x0000009f, 0x00b48000}
 };
 
+#define HAWAII_IO_MC_REGS_SIZE 22
+
+static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
+{
+	{0x0000007d, 0x40000000},
+	{0x0000007e, 0x40180304},
+	{0x0000007f, 0x0000ff00},
+	{0x00000081, 0x00000000},
+	{0x00000083, 0x00000800},
+	{0x00000086, 0x00000000},
+	{0x00000087, 0x00000100},
+	{0x00000088, 0x00020100},
+	{0x00000089, 0x00000000},
+	{0x0000008b, 0x00040000},
+	{0x0000008c, 0x00000100},
+	{0x0000008e, 0xff010000},
+	{0x00000090, 0xffffefff},
+	{0x00000091, 0xfff3efff},
+	{0x00000092, 0xfff3efbf},
+	{0x00000093, 0xf7ffffff},
+	{0x00000094, 0xffffff7f},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x0000009f, 0x00c79000}
+};
+
+
 /**
  * cik_srbm_select - select specific register instances
  *
@@ -1498,11 +1709,17 @@
 
 	switch (rdev->family) {
 	case CHIP_BONAIRE:
-	default:
 		io_mc_regs = (u32 *)&bonaire_io_mc_regs;
 		ucode_size = CIK_MC_UCODE_SIZE;
 		regs_size = BONAIRE_IO_MC_REGS_SIZE;
 		break;
+	case CHIP_HAWAII:
+		io_mc_regs = (u32 *)&hawaii_io_mc_regs;
+		ucode_size = HAWAII_MC_UCODE_SIZE;
+		regs_size = HAWAII_IO_MC_REGS_SIZE;
+		break;
+	default:
+		return -EINVAL;
 	}
 
 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1564,8 +1781,8 @@
 {
 	const char *chip_name;
 	size_t pfp_req_size, me_req_size, ce_req_size,
-		mec_req_size, rlc_req_size, mc_req_size,
-		sdma_req_size, smc_req_size;
+		mec_req_size, rlc_req_size, mc_req_size = 0,
+		sdma_req_size, smc_req_size = 0;
 	char fw_name[30];
 	int err;
 
@@ -1583,6 +1800,17 @@
 		sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
 		smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
 		break;
+	case CHIP_HAWAII:
+		chip_name = "HAWAII";
+		pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+		me_req_size = CIK_ME_UCODE_SIZE * 4;
+		ce_req_size = CIK_CE_UCODE_SIZE * 4;
+		mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+		rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
+		mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
+		sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
+		break;
 	case CHIP_KAVERI:
 		chip_name = "KAVERI";
 		pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
@@ -1763,9 +1991,227 @@
 
 	num_pipe_configs = rdev->config.cik.max_tile_pipes;
 	if (num_pipe_configs > 8)
-		num_pipe_configs = 8; /* ??? */
+		num_pipe_configs = 16;
 
-	if (num_pipe_configs == 8) {
+	if (num_pipe_configs == 16) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+				break;
+			case 1:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+				break;
+			case 2:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+				break;
+			case 3:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+				break;
+			case 4:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 TILE_SPLIT(split_equal_to_row_size));
+				break;
+			case 5:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 6:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+				break;
+			case 7:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 TILE_SPLIT(split_equal_to_row_size));
+				break;
+			case 8:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+				break;
+			case 9:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+				break;
+			case 10:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 11:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 12:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 13:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+				break;
+			case 14:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 16:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 17:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 27:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+				break;
+			case 28:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 29:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 30:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+		}
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						 NUM_BANKS(ADDR_SURF_16_BANK));
+				break;
+			case 1:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						 NUM_BANKS(ADDR_SURF_16_BANK));
+				break;
+			case 2:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_16_BANK));
+				break;
+			case 3:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_16_BANK));
+				break;
+			case 4:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 5:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_4_BANK));
+				break;
+			case 6:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_2_BANK));
+				break;
+			case 8:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						 NUM_BANKS(ADDR_SURF_16_BANK));
+				break;
+			case 9:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						 NUM_BANKS(ADDR_SURF_16_BANK));
+				break;
+			case 10:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_16_BANK));
+				break;
+			case 11:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 12:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_4_BANK));
+				break;
+			case 13:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_2_BANK));
+				break;
+			case 14:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						 NUM_BANKS(ADDR_SURF_2_BANK));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+		}
+	} else if (num_pipe_configs == 8) {
 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
 			switch (reg_offset) {
 			case 0:
@@ -2650,7 +3096,10 @@
 		for (j = 0; j < sh_per_se; j++) {
 			cik_select_se_sh(rdev, i, j);
 			data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
-			disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
+			if (rdev->family == CHIP_HAWAII)
+				disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
+			else
+				disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
 		}
 	}
 	cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
@@ -2667,6 +3116,12 @@
 		data = 0;
 		for (j = 0; j < sh_per_se; j++) {
 			switch (enabled_rbs & 3) {
+			case 0:
+				if (j == 0)
+					data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
+				else
+					data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
+				break;
 			case 1:
 				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
 				break;
@@ -2719,6 +3174,23 @@
 		rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
 		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
 		break;
+	case CHIP_HAWAII:
+		rdev->config.cik.max_shader_engines = 4;
+		rdev->config.cik.max_tile_pipes = 16;
+		rdev->config.cik.max_cu_per_sh = 11;
+		rdev->config.cik.max_sh_per_se = 1;
+		rdev->config.cik.max_backends_per_se = 4;
+		rdev->config.cik.max_texture_channel_caches = 16;
+		rdev->config.cik.max_gprs = 256;
+		rdev->config.cik.max_gs_threads = 32;
+		rdev->config.cik.max_hw_contexts = 8;
+
+		rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
+		break;
 	case CHIP_KAVERI:
 		rdev->config.cik.max_shader_engines = 1;
 		rdev->config.cik.max_tile_pipes = 4;
@@ -3097,6 +3569,85 @@
 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
 }
 
+/**
+ * cik_copy_cpdma - copy pages using the CP DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the CP DMA engine (CIK+).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int cik_copy_cpdma(struct radeon_device *rdev,
+		   uint64_t src_offset, uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.blit_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes, control;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0x1fffff)
+			cur_size_in_bytes = 0x1fffff;
+		size_in_bytes -= cur_size_in_bytes;
+		control = 0;
+		if (size_in_bytes == 0)
+			control |= PACKET3_DMA_DATA_CP_SYNC;
+		radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
+		radeon_ring_write(ring, control);
+		radeon_ring_write(ring, lower_32_bits(src_offset));
+		radeon_ring_write(ring, upper_32_bits(src_offset));
+		radeon_ring_write(ring, lower_32_bits(dst_offset));
+		radeon_ring_write(ring, upper_32_bits(dst_offset));
+		radeon_ring_write(ring, cur_size_in_bytes);
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
 /*
  * IB stuff
  */
@@ -3403,7 +3954,8 @@
 	int r;
 
 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
-	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+	if (rdev->family != CHIP_HAWAII)
+		WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
 
 	/* Set the write pointer delay */
 	WREG32(CP_RB_WPTR_DELAY, 0);
@@ -4740,12 +5292,17 @@
 static void cik_vm_decode_fault(struct radeon_device *rdev,
 				u32 status, u32 addr, u32 mc_client)
 {
-	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+	u32 mc_id;
 	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
 	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 
+	if (rdev->family == CHIP_HAWAII)
+		mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+	else
+		mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+
 	printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 	       protections, vmid, addr,
 	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
@@ -4834,62 +5391,6 @@
 	}
 }
 
-/**
- * cik_vm_set_page - update the page tables using sDMA
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using CP or sDMA (CIK).
- */
-void cik_vm_set_page(struct radeon_device *rdev,
-		     struct radeon_ib *ib,
-		     uint64_t pe,
-		     uint64_t addr, unsigned count,
-		     uint32_t incr, uint32_t flags)
-{
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-	uint64_t value;
-	unsigned ndw;
-
-	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
-		/* CP */
-		while (count) {
-			ndw = 2 + count * 2;
-			if (ndw > 0x3FFE)
-				ndw = 0x3FFE;
-
-			ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
-			ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
-						    WRITE_DATA_DST_SEL(1));
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-			for (; ndw > 2; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
-				addr += incr;
-				value |= r600_flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		/* DMA */
-		cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
-	}
-}
-
 /*
  * RLC
  * The RLC is a multi-purpose microengine that handles a
@@ -5058,6 +5559,7 @@
 
 	switch (rdev->family) {
 	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
 	default:
 		size = BONAIRE_RLC_UCODE_SIZE;
 		break;
@@ -5556,7 +6058,7 @@
 		}
 
 		for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
-			dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
+			dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
 		}
 		bo_offset += CP_ME_TABLE_SIZE;
 	}
@@ -5778,52 +6280,57 @@
 	if (buffer == NULL)
 		return;
 
-	buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
-	buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
-	buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
-	buffer[count++] = 0x80000000;
-	buffer[count++] = 0x80000000;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
 
 	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
 		for (ext = sect->section; ext->extent != NULL; ++ext) {
 			if (sect->id == SECT_CONTEXT) {
-				buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
-				buffer[count++] = ext->reg_index - 0xa000;
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
 				for (i = 0; i < ext->reg_count; i++)
-					buffer[count++] = ext->extent[i];
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
 			} else {
 				return;
 			}
 		}
 	}
 
-	buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
-	buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (rdev->family) {
 	case CHIP_BONAIRE:
-		buffer[count++] = 0x16000012;
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x16000012);
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	case CHIP_KAVERI:
-		buffer[count++] = 0x00000000; /* XXX */
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	case CHIP_KABINI:
-		buffer[count++] = 0x00000000; /* XXX */
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	case CHIP_HAWAII:
+		buffer[count++] = 0x3a00161a;
+		buffer[count++] = 0x0000002e;
 		break;
 	default:
-		buffer[count++] = 0x00000000;
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000);
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	}
 
-	buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
-	buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
 
-	buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
-	buffer[count++] = 0;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
 }
 
 static void cik_init_pg(struct radeon_device *rdev)
@@ -7118,7 +7625,7 @@
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
 			     CP_RB0_RPTR, CP_RB0_WPTR,
-			     RADEON_CP_PACKET2);
+			     PACKET3(PACKET3_NOP, 0x3FFF));
 	if (r)
 		return r;
 
@@ -7428,6 +7935,70 @@
 	rdev->bios = NULL;
 }
 
+void dce8_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS/eDP FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
+		break;
+	case 10:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
+		break;
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
 /* display watermark setup */
 /**
  * dce8_line_buffer_adjust - Set up the line buffer
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index b628606..9c9529d 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -25,6 +25,7 @@
 #include <drm/drmP.h>
 #include "radeon.h"
 #include "radeon_asic.h"
+#include "radeon_trace.h"
 #include "cikd.h"
 
 /* sdma */
@@ -101,14 +102,6 @@
 {
 	struct radeon_ring *ring = &rdev->ring[fence->ring];
 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
-	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
-			  SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
-	u32 ref_and_mask;
-
-	if (fence->ring == R600_RING_TYPE_DMA_INDEX)
-		ref_and_mask = SDMA0;
-	else
-		ref_and_mask = SDMA1;
 
 	/* write the fence */
 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
@@ -118,12 +111,12 @@
 	/* generate an interrupt */
 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
 	/* flush HDP */
-	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
-	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
-	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
-	radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
-	radeon_ring_write(ring, ref_and_mask); /* MASK */
-	radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+	/* We should be using the new POLL_REG_MEM special op packet here
+	 * but it causes sDMA to hang sometimes
+	 */
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+	radeon_ring_write(ring, 0);
 }
 
 /**
@@ -653,11 +646,12 @@
 			  uint64_t addr, unsigned count,
 			  uint32_t incr, uint32_t flags)
 {
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 	uint64_t value;
 	unsigned ndw;
 
-	if (flags & RADEON_VM_PAGE_SYSTEM) {
+	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+	if (flags & R600_PTE_SYSTEM) {
 		while (count) {
 			ndw = count * 2;
 			if (ndw > 0xFFFFE)
@@ -669,16 +663,10 @@
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 			ib->ptr[ib->length_dw++] = ndw;
 			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
+				value = radeon_vm_map_gart(rdev, addr);
+				value &= 0xFFFFFFFFFFFFF000ULL;
 				addr += incr;
-				value |= r600_flags;
+				value |= flags;
 				ib->ptr[ib->length_dw++] = value;
 				ib->ptr[ib->length_dw++] = upper_32_bits(value);
 			}
@@ -689,7 +677,7 @@
 			if (ndw > 0x7FFFF)
 				ndw = 0x7FFFF;
 
-			if (flags & RADEON_VM_PAGE_VALID)
+			if (flags & R600_PTE_VALID)
 				value = addr;
 			else
 				value = 0;
@@ -697,7 +685,7 @@
 			ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
 			ib->ptr[ib->length_dw++] = pe; /* dst addr */
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-			ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+			ib->ptr[ib->length_dw++] = flags; /* mask */
 			ib->ptr[ib->length_dw++] = 0;
 			ib->ptr[ib->length_dw++] = value; /* value */
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
@@ -724,18 +712,10 @@
 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 {
 	struct radeon_ring *ring = &rdev->ring[ridx];
-	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
-			  SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
-	u32 ref_and_mask;
 
 	if (vm == NULL)
 		return;
 
-	if (ridx == R600_RING_TYPE_DMA_INDEX)
-		ref_and_mask = SDMA0;
-	else
-		ref_and_mask = SDMA1;
-
 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 	if (vm->id < 8) {
 		radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
@@ -770,12 +750,12 @@
 	radeon_ring_write(ring, VMID(0));
 
 	/* flush HDP */
-	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
-	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
-	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
-	radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
-	radeon_ring_write(ring, ref_and_mask); /* MASK */
-	radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+	/* We should be using the new POLL_REG_MEM special op packet here
+	 * but it causes sDMA to hang sometimes
+	 */
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+	radeon_ring_write(ring, 0);
 
 	/* flush TLB */
 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 203d2a0..5964af5 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -25,8 +25,10 @@
 #define CIK_H
 
 #define BONAIRE_GB_ADDR_CONFIG_GOLDEN        0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN         0x12011003
 
-#define CIK_RB_BITMAP_WIDTH_PER_SH  2
+#define CIK_RB_BITMAP_WIDTH_PER_SH     2
+#define HAWAII_RB_BITMAP_WIDTH_PER_SH  4
 
 /* DIDT IND registers */
 #define DIDT_SQ_CTRL0                                     0x0
@@ -499,6 +501,7 @@
 		 * bit 4: write
 		 */
 #define		MEMORY_CLIENT_ID_MASK			(0xff << 12)
+#define		HAWAII_MEMORY_CLIENT_ID_MASK		(0x1ff << 12)
 #define		MEMORY_CLIENT_ID_SHIFT			12
 #define		MEMORY_CLIENT_RW_MASK			(1 << 24)
 #define		MEMORY_CLIENT_RW_SHIFT			24
@@ -906,6 +909,39 @@
 #define DPG_PIPE_STUTTER_CONTROL                          0x6cd4
 #       define STUTTER_ENABLE                             (1 << 0)
 
+/* DCE8 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL                 0x6fb4
+#       define FMT_DYNAMIC_EXP_EN            (1 << 0)
+#       define FMT_DYNAMIC_EXP_MODE          (1 << 4)
+        /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL                          0x6fb8
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6fc8
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_MODE             (1 << 1)
+#       define FMT_TRUNCATE_DEPTH(x)         ((x) << 4) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH(x)   ((x) << 11) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH(x)  ((x) << 17) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x6fe4
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
 #define	GRBM_CNTL					0x8000
 #define		GRBM_READ_TIMEOUT(x)				((x) << 0)
 
@@ -1129,6 +1165,8 @@
 #              define	ADDR_SURF_P8_32x32_16x16		12
 #              define	ADDR_SURF_P8_32x32_16x32		13
 #              define	ADDR_SURF_P8_32x64_32x32		14
+#              define	ADDR_SURF_P16_32x32_8x16		16
+#              define	ADDR_SURF_P16_32x32_16x16		17
 #       define TILE_SPLIT(x)					((x) << 11)
 #              define	ADDR_SURF_TILE_SPLIT_64B		0
 #              define	ADDR_SURF_TILE_SPLIT_128B		1
@@ -1422,6 +1460,7 @@
 #       define RASTER_CONFIG_RB_MAP_1                   1
 #       define RASTER_CONFIG_RB_MAP_2                   2
 #       define RASTER_CONFIG_RB_MAP_3                   3
+#define		PKR_MAP(x)				((x) << 8)
 
 #define VGT_EVENT_INITIATOR                             0x28a90
 #       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
@@ -1714,6 +1753,68 @@
 #              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
 #              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
 #define	PACKET3_DMA_DATA				0x50
+/* 1. header
+ * 2. CONTROL
+ * 3. SRC_ADDR_LO or DATA [31:0]
+ * 4. SRC_ADDR_HI [31:0]
+ * 5. DST_ADDR_LO [31:0]
+ * 6. DST_ADDR_HI [7:0]
+ * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+/* CONTROL */
+#              define PACKET3_DMA_DATA_ENGINE(x)     ((x) << 0)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
+                /* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#              define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
+#              define PACKET3_DMA_DATA_DST_SEL(x)  ((x) << 20)
+                /* 0 - DST_ADDR using DAS
+		 * 1 - GDS
+		 * 3 - DST_ADDR using L2
+		 */
+#              define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
+                /* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#              define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
+#              define PACKET3_DMA_DATA_SRC_SEL(x)  ((x) << 29)
+                /* 0 - SRC_ADDR using SAS
+		 * 1 - GDS
+		 * 2 - DATA
+		 * 3 - SRC_ADDR using L2
+		 */
+#              define PACKET3_DMA_DATA_CP_SYNC     (1 << 31)
+/* COMMAND */
+#              define PACKET3_DMA_DATA_DIS_WC      (1 << 21)
+#              define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_DMA_DATA_CMD_SAS     (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_DMA_DATA_CMD_DAS     (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
+#              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
+#              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
 #define	PACKET3_AQUIRE_MEM				0x58
 #define	PACKET3_REWIND					0x59
 #define	PACKET3_LOAD_UCONFIG_REG			0x5E
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 9fcd338..009f46e 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -102,6 +102,49 @@
 	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
 }
 
+void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+				    struct drm_display_mode *mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector = NULL;
+	u32 tmp = 0, offset;
+
+	if (!dig->afmt->pin)
+		return;
+
+	offset = dig->afmt->pin->offset;
+
+	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
+	}
+
+	if (!radeon_connector) {
+		DRM_ERROR("Couldn't find encoder's connector\n");
+		return;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (connector->latency_present[1])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+				AUDIO_LIPSYNC(connector->audio_latency[1]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	} else {
+		if (connector->latency_present[0])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+				AUDIO_LIPSYNC(connector->audio_latency[0]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	}
+	WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
 void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
 {
 	struct radeon_device *rdev = encoder->dev->dev_private;
@@ -113,9 +156,6 @@
 	u8 *sadb;
 	int sad_count;
 
-	/* XXX: setting this register causes hangs on some asics */
-	return;
-
 	if (!dig->afmt->pin)
 		return;
 
@@ -201,20 +241,30 @@
 
 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
 		u32 value = 0;
+		u8 stereo_freqs = 0;
+		int max_channels = -1;
 		int j;
 
 		for (j = 0; j < sad_count; j++) {
 			struct cea_sad *sad = &sads[j];
 
 			if (sad->format == eld_reg_to_type[i][1]) {
-				value = MAX_CHANNELS(sad->channels) |
-					DESCRIPTOR_BYTE_2(sad->byte2) |
-					SUPPORTED_FREQUENCIES(sad->freq);
+				if (sad->channels > max_channels) {
+					value = MAX_CHANNELS(sad->channels) |
+						DESCRIPTOR_BYTE_2(sad->byte2) |
+						SUPPORTED_FREQUENCIES(sad->freq);
+					max_channels = sad->channels;
+				}
+
 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
-					value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
-				break;
+					stereo_freqs |= sad->freq;
+				else
+					break;
 			}
 		}
+
+		value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
 		WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
 	}
 
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b5c67a9..9702e55 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1174,23 +1174,72 @@
 
 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
 {
-	u16 ctl, v;
-	int err;
+	int readrq;
+	u16 v;
 
-	err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
-	if (err)
-		return;
-
-	v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
-
+	readrq = pcie_get_readrq(rdev->pdev);
+	v = ffs(readrq) - 8;
 	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
 	 * to avoid hangs or perfomance issues
 	 */
-	if ((v == 0) || (v == 6) || (v == 7)) {
-		ctl &= ~PCI_EXP_DEVCTL_READRQ;
-		ctl |= (2 << 12);
-		pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
+	if ((v == 0) || (v == 6) || (v == 7))
+		pcie_set_readrq(rdev->pdev, 512);
+}
+
+void dce4_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
 	}
+
+	/* LVDS/eDP FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN);
+		else
+			tmp |= FMT_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
 }
 
 static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
@@ -3963,7 +4012,7 @@
 		if (rdev->family >= CHIP_TAHITI) {
 			/* SI */
 			for (i = 0; i < rdev->rlc.reg_list_size; i++)
-				dst_ptr[i] = src_ptr[i];
+				dst_ptr[i] = cpu_to_le32(src_ptr[i]);
 		} else {
 			/* ON/LN/TN */
 			/* format:
@@ -3977,10 +4026,10 @@
 				if (i < dws)
 					data |= (src_ptr[i] >> 2) << 16;
 				j = (((i - 1) * 3) / 2);
-				dst_ptr[j] = data;
+				dst_ptr[j] = cpu_to_le32(data);
 			}
 			j = ((i * 3) / 2);
-			dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
+			dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
 		}
 		radeon_bo_kunmap(rdev->rlc.save_restore_obj);
 		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
@@ -4042,40 +4091,40 @@
 			cik_get_csb_buffer(rdev, dst_ptr);
 		} else if (rdev->family >= CHIP_TAHITI) {
 			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
-			dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
-			dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
-			dst_ptr[2] = rdev->rlc.clear_state_size;
+			dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+			dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+			dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
 			si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
 		} else {
 			reg_list_hdr_blk_index = 0;
 			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
 			data = upper_32_bits(reg_list_mc_addr);
-			dst_ptr[reg_list_hdr_blk_index] = data;
+			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
 			reg_list_hdr_blk_index++;
 			for (i = 0; cs_data[i].section != NULL; i++) {
 				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
 					reg_num = cs_data[i].section[j].reg_count;
 					data = reg_list_mc_addr & 0xffffffff;
-					dst_ptr[reg_list_hdr_blk_index] = data;
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
 					reg_list_hdr_blk_index++;
 
 					data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
-					dst_ptr[reg_list_hdr_blk_index] = data;
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
 					reg_list_hdr_blk_index++;
 
 					data = 0x08000000 | (reg_num * 4);
-					dst_ptr[reg_list_hdr_blk_index] = data;
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
 					reg_list_hdr_blk_index++;
 
 					for (k = 0; k < reg_num; k++) {
 						data = cs_data[i].section[j].extent[k];
-						dst_ptr[reg_list_blk_index + k] = data;
+						dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
 					}
 					reg_list_mc_addr += reg_num * 4;
 					reg_list_blk_index += reg_num;
 				}
 			}
-			dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
+			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
 		}
 		radeon_bo_kunmap(rdev->rlc.clear_state_obj);
 		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 57fcc4b..aa695c4 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -35,6 +35,8 @@
 extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
 extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
 extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
+extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+					   struct drm_display_mode *mode);
 
 /*
  * update the N and CTS parameters for a given pixel clock rate
@@ -58,6 +60,42 @@
 	WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
 }
 
+static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
+					   struct drm_display_mode *mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector = NULL;
+	u32 tmp = 0;
+
+	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
+	}
+
+	if (!radeon_connector) {
+		DRM_ERROR("Couldn't find encoder's connector\n");
+		return;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (connector->latency_present[1])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+				AUDIO_LIPSYNC(connector->audio_latency[1]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	} else {
+		if (connector->latency_present[0])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+				AUDIO_LIPSYNC(connector->audio_latency[0]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	}
+	WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
 static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
 {
 	struct radeon_device *rdev = encoder->dev->dev_private;
@@ -67,12 +105,11 @@
 	u8 *sadb;
 	int sad_count;
 
-	/* XXX: setting this register causes hangs on some asics */
-	return;
-
 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
-		if (connector->encoder == encoder)
+		if (connector->encoder == encoder) {
 			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
 	}
 
 	if (!radeon_connector) {
@@ -124,8 +161,10 @@
 	};
 
 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
-		if (connector->encoder == encoder)
+		if (connector->encoder == encoder) {
 			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
 	}
 
 	if (!radeon_connector) {
@@ -142,20 +181,30 @@
 
 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
 		u32 value = 0;
+		u8 stereo_freqs = 0;
+		int max_channels = -1;
 		int j;
 
 		for (j = 0; j < sad_count; j++) {
 			struct cea_sad *sad = &sads[j];
 
 			if (sad->format == eld_reg_to_type[i][1]) {
-				value = MAX_CHANNELS(sad->channels) |
-					DESCRIPTOR_BYTE_2(sad->byte2) |
-					SUPPORTED_FREQUENCIES(sad->freq);
+				if (sad->channels > max_channels) {
+					value = MAX_CHANNELS(sad->channels) |
+						DESCRIPTOR_BYTE_2(sad->byte2) |
+						SUPPORTED_FREQUENCIES(sad->freq);
+					max_channels = sad->channels;
+				}
+
 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
-					value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
-				break;
+					stereo_freqs |= sad->freq;
+				else
+					break;
 			}
 		}
+
+		value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
 		WREG32(eld_reg_to_type[i][0], value);
 	}
 
@@ -324,8 +373,10 @@
 	if (ASIC_IS_DCE6(rdev)) {
 		dce6_afmt_select_pin(encoder);
 		dce6_afmt_write_sad_regs(encoder);
+		dce6_afmt_write_latency_fields(encoder, mode);
 	} else {
 		evergreen_hdmi_write_sad_regs(encoder);
+		dce4_afmt_write_latency_fields(encoder, mode);
 	}
 
 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4f6d296..17f9907 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -750,6 +750,44 @@
  * bit6 = 192 kHz
  */
 
+#define AZ_CHANNEL_COUNT_CONTROL                          0x5fe4
+#       define HBR_CHANNEL_COUNT(x)                       (((x) & 0x7) << 0)
+#       define COMPRESSED_CHANNEL_COUNT(x)                (((x) & 0x7) << 4)
+/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
+ * 0   = use stream header
+ * 1-7 = channel count - 1
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC         0x5fe8
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR             0x5fec
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
+#       define DISPLAY0_TYPE(x)                           (((x) & 0x3) << 0)
+#       define DISPLAY_TYPE_NONE                   0
+#       define DISPLAY_TYPE_HDMI                   1
+#       define DISPLAY_TYPE_DP                     2
+#       define DISPLAY0_ID(x)                             (((x) & 0x3f) << 2)
+#       define DISPLAY1_TYPE(x)                           (((x) & 0x3) << 8)
+#       define DISPLAY1_ID(x)                             (((x) & 0x3f) << 10)
+#       define DISPLAY2_TYPE(x)                           (((x) & 0x3) << 16)
+#       define DISPLAY2_ID(x)                             (((x) & 0x3f) << 18)
+#       define DISPLAY3_TYPE(x)                           (((x) & 0x3) << 24)
+#       define DISPLAY3_ID(x)                             (((x) & 0x3f) << 26)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
+#       define DISPLAY4_TYPE(x)                           (((x) & 0x3) << 0)
+#       define DISPLAY4_ID(x)                             (((x) & 0x3f) << 2)
+#       define DISPLAY5_TYPE(x)                           (((x) & 0x3) << 8)
+#       define DISPLAY5_ID(x)                             (((x) & 0x3f) << 10)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER       0x5ffc
+#       define NUMBER_OF_DISPLAY_ID(x)                    (((x) & 0x7) << 0)
+
 #define AZ_HOT_PLUG_CONTROL                               0x5e78
 #       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
 #       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
@@ -1312,6 +1350,38 @@
 #       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
 #       define DC_HPDx_EN                                 (1 << 28)
 
+/* DCE4/5/6 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL                 0x6fb4
+#       define FMT_DYNAMIC_EXP_EN            (1 << 0)
+#       define FMT_DYNAMIC_EXP_MODE          (1 << 4)
+        /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL                          0x6fb8
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6fc8
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_DEPTH            (1 << 4)
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x6fe4
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
 /* ASYNC DMA */
 #define DMA_RB_RPTR                                       0xd008
 #define DMA_RB_WPTR                                       0xd00c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cac2866..11aab2a 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,11 +174,6 @@
 extern void evergreen_program_aspm(struct radeon_device *rdev);
 extern void sumo_rlc_fini(struct radeon_device *rdev);
 extern int sumo_rlc_init(struct radeon_device *rdev);
-extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
-				   struct radeon_ib *ib,
-				   uint64_t pe,
-				   uint64_t addr, unsigned count,
-				   uint32_t incr, uint32_t flags);
 
 /* Firmware Names */
 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -2400,77 +2395,6 @@
 	       block, mc_id);
 }
 
-#define R600_ENTRY_VALID   (1 << 0)
-#define R600_PTE_SYSTEM    (1 << 1)
-#define R600_PTE_SNOOPED   (1 << 2)
-#define R600_PTE_READABLE  (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
-{
-	uint32_t r600_flags = 0;
-	r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
-	r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
-	r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
-	if (flags & RADEON_VM_PAGE_SYSTEM) {
-		r600_flags |= R600_PTE_SYSTEM;
-		r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
-	}
-	return r600_flags;
-}
-
-/**
- * cayman_vm_set_page - update the page tables using the CP
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using the CP (cayman/TN).
- */
-void cayman_vm_set_page(struct radeon_device *rdev,
-			struct radeon_ib *ib,
-			uint64_t pe,
-			uint64_t addr, unsigned count,
-			uint32_t incr, uint32_t flags)
-{
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-	uint64_t value;
-	unsigned ndw;
-
-	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
-		while (count) {
-			ndw = 1 + count * 2;
-			if (ndw > 0x3FFF)
-				ndw = 0x3FFF;
-
-			ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			for (; ndw > 1; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
-				addr += incr;
-				value |= r600_flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
-	}
-}
-
 /**
  * cayman_vm_flush - vm flush using the CP
  *
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index dd6e968..bdeb65e 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -24,6 +24,7 @@
 #include <drm/drmP.h>
 #include "radeon.h"
 #include "radeon_asic.h"
+#include "radeon_trace.h"
 #include "nid.h"
 
 u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -245,8 +246,7 @@
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
- * @r600_flags: hw access flags 
+ * @flags: hw access flags 
  *
  * Update the page tables using the DMA (cayman/TN).
  */
@@ -256,11 +256,12 @@
 			    uint64_t addr, unsigned count,
 			    uint32_t incr, uint32_t flags)
 {
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 	uint64_t value;
 	unsigned ndw;
 
-	if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
+	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+	if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
 		while (count) {
 			ndw = count * 2;
 			if (ndw > 0xFFFFE)
@@ -271,16 +272,16 @@
 			ib->ptr[ib->length_dw++] = pe;
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
 			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
+				if (flags & R600_PTE_SYSTEM) {
 					value = radeon_vm_map_gart(rdev, addr);
 					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
+				} else if (flags & R600_PTE_VALID) {
 					value = addr;
 				} else {
 					value = 0;
 				}
 				addr += incr;
-				value |= r600_flags;
+				value |= flags;
 				ib->ptr[ib->length_dw++] = value;
 				ib->ptr[ib->length_dw++] = upper_32_bits(value);
 			}
@@ -291,7 +292,7 @@
 			if (ndw > 0xFFFFE)
 				ndw = 0xFFFFE;
 
-			if (flags & RADEON_VM_PAGE_VALID)
+			if (flags & R600_PTE_VALID)
 				value = addr;
 			else
 				value = 0;
@@ -299,7 +300,7 @@
 			ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
 			ib->ptr[ib->length_dw++] = pe; /* dst addr */
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+			ib->ptr[ib->length_dw++] = flags; /* mask */
 			ib->ptr[ib->length_dw++] = 0;
 			ib->ptr[ib->length_dw++] = value; /* value */
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d713330..784983d 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1434,7 +1434,7 @@
 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
-		return -EINVAL;
+		return -ENOENT;
 	}
 	crtc = obj_to_crtc(obj);
 	radeon_crtc = to_radeon_crtc(crtc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f9be220..4e609e8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -124,6 +124,59 @@
 	return 0;
 }
 
+void dce3_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= FMT_SPATIAL_DITHER_EN;
+		else
+			tmp |= FMT_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
 /* get temperature in millidegrees */
 int rv6xx_get_temp(struct radeon_device *rdev)
 {
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 01a3ec8..5dceea6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@
 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
-		return -EINVAL;
+		return -ENOENT;
 	}
 	crtc = obj_to_crtc(obj);
 	radeon_crtc = to_radeon_crtc(crtc);
@@ -2328,13 +2328,8 @@
 	unsigned i;
 
 	kfree(parser->relocs);
-	for (i = 0; i < parser->nchunks; i++) {
-		kfree(parser->chunks[i].kdata);
-		if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
-			kfree(parser->chunks[i].kpage[0]);
-			kfree(parser->chunks[i].kpage[1]);
-		}
-	}
+	for (i = 0; i < parser->nchunks; i++)
+		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
 	kfree(parser->chunks_array);
 }
@@ -2391,13 +2386,12 @@
 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
 	parser.ib.length_dw = ib_chunk->length_dw;
 	*l = parser.ib.length_dw;
-	r = r600_cs_parse(&parser);
-	if (r) {
-		DRM_ERROR("Invalid command stream !\n");
+	if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
+		r = -EFAULT;
 		r600_cs_parser_fini(&parser, r);
 		return r;
 	}
-	r = radeon_cs_finish_pages(&parser);
+	r = r600_cs_parse(&parser);
 	if (r) {
 		DRM_ERROR("Invalid command stream !\n");
 		r600_cs_parser_fini(&parser, r);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 06022e3..4b89262 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -24,6 +24,7 @@
  * Authors: Christian König
  */
 #include <linux/hdmi.h>
+#include <linux/gcd.h>
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
 #include "radeon.h"
@@ -57,35 +58,57 @@
 static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
     /*	     32kHz	  44.1kHz	48kHz    */
     /* Clock      N     CTS      N     CTS      N     CTS */
-    {  25175,  4576,  28125,  7007,  31250,  6864,  28125 }, /*  25,20/1.001 MHz */
+    {  25175,  4096,  25175, 28224, 125875,  6144,  25175 }, /*  25,20/1.001 MHz */
     {  25200,  4096,  25200,  6272,  28000,  6144,  25200 }, /*  25.20       MHz */
     {  27000,  4096,  27000,  6272,  30000,  6144,  27000 }, /*  27.00       MHz */
     {  27027,  4096,  27027,  6272,  30030,  6144,  27027 }, /*  27.00*1.001 MHz */
     {  54000,  4096,  54000,  6272,  60000,  6144,  54000 }, /*  54.00       MHz */
     {  54054,  4096,  54054,  6272,  60060,  6144,  54054 }, /*  54.00*1.001 MHz */
-    {  74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /*  74.25/1.001 MHz */
+    {  74176,  4096,  74176,  5733,  75335,  6144,  74176 }, /*  74.25/1.001 MHz */
     {  74250,  4096,  74250,  6272,  82500,  6144,  74250 }, /*  74.25       MHz */
-    { 148352, 11648, 421875,  8918, 234375,  5824, 140625 }, /* 148.50/1.001 MHz */
+    { 148352,  4096, 148352,  5733, 150670,  6144, 148352 }, /* 148.50/1.001 MHz */
     { 148500,  4096, 148500,  6272, 165000,  6144, 148500 }, /* 148.50       MHz */
-    {      0,  4096,      0,  6272,      0,  6144,      0 }  /* Other */
 };
 
-/*
- * calculate CTS value if it's not found in the table
- */
-static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
-{
-	u64 n;
-	u32 d;
 
-	if (*CTS == 0) {
-		n = (u64)clock * (u64)N * 1000ULL;
-		d = 128 * freq;
-		do_div(n, d);
-		*CTS = n;
-	}
-	DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
-		  N, *CTS, freq);
+/*
+ * calculate CTS and N values if they are not found in the table
+ */
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
+{
+	int n, cts;
+	unsigned long div, mul;
+
+	/* Safe, but overly large values */
+	n = 128 * freq;
+	cts = clock * 1000;
+
+	/* Smallest valid fraction */
+	div = gcd(n, cts);
+
+	n /= div;
+	cts /= div;
+
+	/*
+	 * The optimal N is 128*freq/1000. Calculate the closest larger
+	 * value that doesn't truncate any bits.
+	 */
+	mul = ((128*freq/1000) + (n-1))/n;
+
+	n *= mul;
+	cts *= mul;
+
+	/* Check that we are in spec (not always possible) */
+	if (n < (128*freq/1500))
+		printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
+	if (n > (128*freq/300))
+		printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
+
+	*N = n;
+	*CTS = cts;
+
+	DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
+		  *N, *CTS, freq);
 }
 
 struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
@@ -93,15 +116,16 @@
 	struct radeon_hdmi_acr res;
 	u8 i;
 
-	for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
-	     r600_hdmi_predefined_acr[i].clock != 0; i++)
-		;
-	res = r600_hdmi_predefined_acr[i];
+	/* Precalculated values for common clocks */
+	for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
+		if (r600_hdmi_predefined_acr[i].clock == clock)
+			return r600_hdmi_predefined_acr[i];
+	}
 
-	/* In case some CTS are missing */
-	r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
-	r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
-	r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+	/* And odd clocks get manually calculated */
+	r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+	r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+	r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
 
 	return res;
 }
@@ -313,8 +337,10 @@
 	return;
 
 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
-		if (connector->encoder == encoder)
+		if (connector->encoder == encoder) {
 			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
 	}
 
 	if (!radeon_connector) {
@@ -366,8 +392,10 @@
 	};
 
 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
-		if (connector->encoder == encoder)
+		if (connector->encoder == encoder) {
 			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
 	}
 
 	if (!radeon_connector) {
@@ -384,20 +412,30 @@
 
 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
 		u32 value = 0;
+		u8 stereo_freqs = 0;
+		int max_channels = -1;
 		int j;
 
 		for (j = 0; j < sad_count; j++) {
 			struct cea_sad *sad = &sads[j];
 
 			if (sad->format == eld_reg_to_type[i][1]) {
-				value = MAX_CHANNELS(sad->channels) |
-					DESCRIPTOR_BYTE_2(sad->byte2) |
-					SUPPORTED_FREQUENCIES(sad->freq);
+				if (sad->channels > max_channels) {
+					value = MAX_CHANNELS(sad->channels) |
+						DESCRIPTOR_BYTE_2(sad->byte2) |
+						SUPPORTED_FREQUENCIES(sad->freq);
+					max_channels = sad->channels;
+				}
+
 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
-					value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
-				break;
+					stereo_freqs |= sad->freq;
+				else
+					break;
 			}
 		}
+
+		value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
 		WREG32(eld_reg_to_type[i][0], value);
 	}
 
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 7b3c7b5..ebe3872 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1199,6 +1199,34 @@
 #       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
 #       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
 
+/* DCE3 FMT blocks */
+#define FMT_CONTROL                          0x6700
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6710
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_DEPTH            (1 << 4)
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x672c
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
 /* Power management */
 #define CG_SPLL_FUNC_CNTL                                 0x600
 #       define SPLL_RESET                                (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 24f4960..b9ee992 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -98,6 +98,7 @@
 extern int radeon_fastfb;
 extern int radeon_dpm;
 extern int radeon_aspm;
+extern int radeon_runtime_pm;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -327,7 +328,6 @@
 	/* sync_seq is protected by ring emission lock */
 	uint64_t			sync_seq[RADEON_NUM_RINGS];
 	atomic64_t			last_seq;
-	unsigned long			last_activity;
 	bool				initialized;
 };
 
@@ -832,6 +832,12 @@
 #define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
 #define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
 
+#define R600_PTE_VALID		(1 << 0)
+#define R600_PTE_SYSTEM		(1 << 1)
+#define R600_PTE_SNOOPED	(1 << 2)
+#define R600_PTE_READABLE	(1 << 5)
+#define R600_PTE_WRITEABLE	(1 << 6)
+
 struct radeon_vm {
 	struct list_head		list;
 	struct list_head		va;
@@ -967,12 +973,8 @@
 struct radeon_cs_chunk {
 	uint32_t		chunk_id;
 	uint32_t		length_dw;
-	int			kpage_idx[2];
-	uint32_t		*kpage[2];
 	uint32_t		*kdata;
 	void __user		*user_ptr;
-	int			last_copied_page;
-	int			last_page_index;
 };
 
 struct radeon_cs_parser {
@@ -1007,8 +1009,15 @@
 	struct ww_acquire_ctx	ticket;
 };
 
-extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
-extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
+static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+
+	if (ibc->kdata)
+		return ibc->kdata[idx];
+	return p->ib.ptr[idx];
+}
+
 
 struct radeon_cs_packet {
 	unsigned	idx;
@@ -1675,8 +1684,6 @@
 	struct {
 		int (*init)(struct radeon_device *rdev);
 		void (*fini)(struct radeon_device *rdev);
-
-		u32 pt_ring_index;
 		void (*set_page)(struct radeon_device *rdev,
 				 struct radeon_ib *ib,
 				 uint64_t pe,
@@ -2170,6 +2177,7 @@
 	bool				need_dma32;
 	bool				accel_working;
 	bool				fastfb_working; /* IGP feature*/
+	bool				needs_reset;
 	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
 	const struct firmware *me_fw;	/* all family ME firmware */
 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
@@ -2212,6 +2220,9 @@
 	/* clock, powergating flags */
 	u32 cg_flags;
 	u32 pg_flags;
+
+	struct dev_pm_domain vga_pm_domain;
+	bool have_disp_power_ref;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -2673,8 +2684,8 @@
 extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
 extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern int radeon_resume_kms(struct drm_device *dev);
-extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 extern void radeon_program_register_sequence(struct radeon_device *rdev,
 					     const u32 *registers,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8f7e045..50853c0 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1622,8 +1622,7 @@
 	.vm = {
 		.init = &cayman_vm_init,
 		.fini = &cayman_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &cayman_vm_set_page,
+		.set_page = &cayman_dma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1723,8 +1722,7 @@
 	.vm = {
 		.init = &cayman_vm_init,
 		.fini = &cayman_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &cayman_vm_set_page,
+		.set_page = &cayman_dma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1854,8 +1852,7 @@
 	.vm = {
 		.init = &si_vm_init,
 		.fini = &si_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &si_vm_set_page,
+		.set_page = &si_dma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1879,7 +1876,7 @@
 		.hdmi_setmode = &evergreen_hdmi_setmode,
 	},
 	.copy = {
-		.blit = NULL,
+		.blit = &r600_copy_cpdma,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
 		.dma = &si_copy_dma,
 		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2000,8 +1997,7 @@
 	.vm = {
 		.init = &cik_vm_init,
 		.fini = &cik_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &cik_vm_set_page,
+		.set_page = &cik_sdma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2100,8 +2096,7 @@
 	.vm = {
 		.init = &cik_vm_init,
 		.fini = &cik_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &cik_vm_set_page,
+		.set_page = &cik_sdma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2442,27 +2437,48 @@
 		}
 		break;
 	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
 		rdev->asic = &ci_asic;
 		rdev->num_crtc = 6;
 		rdev->has_uvd = true;
-		rdev->cg_flags =
-			RADEON_CG_SUPPORT_GFX_MGCG |
-			RADEON_CG_SUPPORT_GFX_MGLS |
-			/*RADEON_CG_SUPPORT_GFX_CGCG |*/
-			RADEON_CG_SUPPORT_GFX_CGLS |
-			RADEON_CG_SUPPORT_GFX_CGTS |
-			RADEON_CG_SUPPORT_GFX_CGTS_LS |
-			RADEON_CG_SUPPORT_GFX_CP_LS |
-			RADEON_CG_SUPPORT_MC_LS |
-			RADEON_CG_SUPPORT_MC_MGCG |
-			RADEON_CG_SUPPORT_SDMA_MGCG |
-			RADEON_CG_SUPPORT_SDMA_LS |
-			RADEON_CG_SUPPORT_BIF_LS |
-			RADEON_CG_SUPPORT_VCE_MGCG |
-			RADEON_CG_SUPPORT_UVD_MGCG |
-			RADEON_CG_SUPPORT_HDP_LS |
-			RADEON_CG_SUPPORT_HDP_MGCG;
-		rdev->pg_flags = 0;
+		if (rdev->family == CHIP_BONAIRE) {
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CGTS_LS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_MC_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_SDMA_LS |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+		} else {
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_MC_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_SDMA_LS |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+		}
 		break;
 	case CHIP_KAVERI:
 	case CHIP_KABINI:
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 70c29d5..f2833ee 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -581,17 +581,18 @@
 void cayman_vm_fini(struct radeon_device *rdev);
 void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev,
-			struct radeon_ib *ib,
-			uint64_t pe,
-			uint64_t addr, unsigned count,
-			uint32_t incr, uint32_t flags);
 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
 				struct radeon_ib *ib);
 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_set_page(struct radeon_device *rdev,
+			    struct radeon_ib *ib,
+			    uint64_t pe,
+			    uint64_t addr, unsigned count,
+			    uint32_t incr, uint32_t flags);
+
 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
 int ni_dpm_init(struct radeon_device *rdev);
@@ -653,17 +654,17 @@
 int si_irq_process(struct radeon_device *rdev);
 int si_vm_init(struct radeon_device *rdev);
 void si_vm_fini(struct radeon_device *rdev);
-void si_vm_set_page(struct radeon_device *rdev,
-		    struct radeon_ib *ib,
-		    uint64_t pe,
-		    uint64_t addr, unsigned count,
-		    uint32_t incr, uint32_t flags);
 void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 int si_copy_dma(struct radeon_device *rdev,
 		uint64_t src_offset, uint64_t dst_offset,
 		unsigned num_gpu_pages,
 		struct radeon_fence **fence);
+void si_dma_vm_set_page(struct radeon_device *rdev,
+			struct radeon_ib *ib,
+			uint64_t pe,
+			uint64_t addr, unsigned count,
+			uint32_t incr, uint32_t flags);
 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 u32 si_get_xclk(struct radeon_device *rdev);
 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -705,6 +706,10 @@
 		 uint64_t src_offset, uint64_t dst_offset,
 		 unsigned num_gpu_pages,
 		 struct radeon_fence **fence);
+int cik_copy_cpdma(struct radeon_device *rdev,
+		   uint64_t src_offset, uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence);
 int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
@@ -731,11 +736,11 @@
 int cik_vm_init(struct radeon_device *rdev);
 void cik_vm_fini(struct radeon_device *rdev);
 void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_vm_set_page(struct radeon_device *rdev,
-		     struct radeon_ib *ib,
-		     uint64_t pe,
-		     uint64_t addr, unsigned count,
-		     uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_page(struct radeon_device *rdev,
+			  struct radeon_ib *ib,
+			  uint64_t pe,
+			  uint64_t addr, unsigned count,
+			  uint32_t incr, uint32_t flags);
 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index d96070b..6153ec1 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -59,6 +59,10 @@
 	u16 mux;
 } __packed;
 
+bool radeon_is_px(void) {
+	return radeon_atpx_priv.atpx_detected;
+}
+
 /**
  * radeon_atpx_call - call an ATPX method
  *
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227..c155d6f 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -499,7 +499,7 @@
 	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
 	fp2_gen_cntl = 0;
 
-	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
 	}
 
@@ -536,7 +536,7 @@
 		(RADEON_CRTC_SYNC_TRISTAT |
 		 RADEON_CRTC_DISPLAY_DIS)));
 
-	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
 	}
 
@@ -554,7 +554,7 @@
 		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
 	}
 	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
-	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
 	}
 	return r;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 6456573..20a768a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -31,6 +31,8 @@
 #include "radeon.h"
 #include "atom.h"
 
+#include <linux/pm_runtime.h>
+
 extern void
 radeon_combios_connected_scratch_regs(struct drm_connector *connector,
 				      struct drm_encoder *encoder,
@@ -411,6 +413,21 @@
 		}
 	}
 
+	if (property == rdev->mode_info.dither_property) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_connector->dither != val) {
+			radeon_connector->dither = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
 	if (property == rdev->mode_info.underscan_property) {
 		/* need to find digital encoder on connector */
 		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -626,6 +643,11 @@
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
 	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
 
 	if (encoder) {
 		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -651,6 +673,8 @@
 	/* check acpi lid status ??? */
 
 	radeon_connector_update_scratch_regs(connector, ret);
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
 	return ret;
 }
 
@@ -750,6 +774,11 @@
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	bool dret = false;
 	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
 
 	encoder = radeon_best_single_encoder(connector);
 	if (!encoder)
@@ -790,9 +819,8 @@
 			 * detected a monitor via load.
 			 */
 			if (radeon_connector->detected_by_load)
-				return connector->status;
-			else
-				return ret;
+				ret = connector->status;
+			goto out;
 		}
 
 		if (radeon_connector->dac_load_detect && encoder) {
@@ -817,6 +845,11 @@
 	}
 
 	radeon_connector_update_scratch_regs(connector, ret);
+
+out:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
 	return ret;
 }
 
@@ -873,10 +906,15 @@
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
 
 	if (!radeon_connector->dac_load_detect)
 		return ret;
 
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
 	encoder = radeon_best_single_encoder(connector);
 	if (!encoder)
 		ret = connector_status_disconnected;
@@ -887,6 +925,8 @@
 	if (ret == connector_status_connected)
 		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
 	radeon_connector_update_scratch_regs(connector, ret);
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
 	return ret;
 }
 
@@ -954,12 +994,18 @@
 	struct drm_encoder *encoder = NULL;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	struct drm_mode_object *obj;
-	int i;
+	int i, r;
 	enum drm_connector_status ret = connector_status_disconnected;
 	bool dret = false, broken_edid = false;
 
-	if (!force && radeon_check_hpd_status_unchanged(connector))
-		return connector->status;
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
+	if (!force && radeon_check_hpd_status_unchanged(connector)) {
+		ret = connector->status;
+		goto exit;
+	}
 
 	if (radeon_connector->ddc_bus)
 		dret = radeon_ddc_probe(radeon_connector, false);
@@ -1110,6 +1156,11 @@
 
 	/* updated in get modes as well since we need to know if it's analog or digital */
 	radeon_connector_update_scratch_regs(connector, ret);
+
+exit:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
 	return ret;
 }
 
@@ -1377,9 +1428,16 @@
 	enum drm_connector_status ret = connector_status_disconnected;
 	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
 	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	int r;
 
-	if (!force && radeon_check_hpd_status_unchanged(connector))
-		return connector->status;
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
+	if (!force && radeon_check_hpd_status_unchanged(connector)) {
+		ret = connector->status;
+		goto out;
+	}
 
 	if (radeon_connector->edid) {
 		kfree(radeon_connector->edid);
@@ -1443,6 +1501,10 @@
 	}
 
 	radeon_connector_update_scratch_regs(connector, ret);
+out:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
 	return ret;
 }
 
@@ -1658,12 +1720,16 @@
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.underscan_vborder_property,
 						      0);
+
+			drm_object_attach_property(&radeon_connector->base.base,
+						   rdev->mode_info.dither_property,
+						   RADEON_FMT_DITHER_DISABLE);
+
 			if (radeon_audio != 0)
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
-							   (radeon_audio == 1) ?
-							   RADEON_AUDIO_AUTO :
-							   RADEON_AUDIO_DISABLE);
+							   RADEON_AUDIO_AUTO);
+
 			subpixel_order = SubPixelHorizontalRGB;
 			connector->interlace_allowed = true;
 			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1760,9 +1826,12 @@
 			if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
-							   (radeon_audio == 1) ?
-							   RADEON_AUDIO_AUTO :
-							   RADEON_AUDIO_DISABLE);
+							   RADEON_AUDIO_AUTO);
+			}
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
 			}
 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
 				radeon_connector->dac_load_detect = true;
@@ -1807,9 +1876,12 @@
 			if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
-							   (radeon_audio == 1) ?
-							   RADEON_AUDIO_AUTO :
-							   RADEON_AUDIO_DISABLE);
+							   RADEON_AUDIO_AUTO);
+			}
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
 			}
 			subpixel_order = SubPixelHorizontalRGB;
 			connector->interlace_allowed = true;
@@ -1853,9 +1925,13 @@
 			if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
-							   (radeon_audio == 1) ?
-							   RADEON_AUDIO_AUTO :
-							   RADEON_AUDIO_DISABLE);
+							   RADEON_AUDIO_AUTO);
+			}
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
+
 			}
 			connector->interlace_allowed = true;
 			/* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80285e3..26ca223 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -212,9 +212,7 @@
 			return -EFAULT;
 		}
 		p->chunks[i].length_dw = user_chunk.length_dw;
-		p->chunks[i].kdata = NULL;
 		p->chunks[i].chunk_id = user_chunk.chunk_id;
-		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
 			p->chunk_relocs_idx = i;
 		}
@@ -237,25 +235,31 @@
 				return -EINVAL;
 		}
 
-		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
-		if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
-		    (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
-			size = p->chunks[i].length_dw * sizeof(uint32_t);
-			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
-			if (p->chunks[i].kdata == NULL) {
-				return -ENOMEM;
-			}
-			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
-					       p->chunks[i].user_ptr, size)) {
-				return -EFAULT;
-			}
-			if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
-				p->cs_flags = p->chunks[i].kdata[0];
-				if (p->chunks[i].length_dw > 1)
-					ring = p->chunks[i].kdata[1];
-				if (p->chunks[i].length_dw > 2)
-					priority = (s32)p->chunks[i].kdata[2];
-			}
+		size = p->chunks[i].length_dw;
+		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+		p->chunks[i].user_ptr = cdata;
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+			continue;
+
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
+				continue;
+		}
+
+		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+		size *= sizeof(uint32_t);
+		if (p->chunks[i].kdata == NULL) {
+			return -ENOMEM;
+		}
+		if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+			return -EFAULT;
+		}
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+			p->cs_flags = p->chunks[i].kdata[0];
+			if (p->chunks[i].length_dw > 1)
+				ring = p->chunks[i].kdata[1];
+			if (p->chunks[i].length_dw > 2)
+				priority = (s32)p->chunks[i].kdata[2];
 		}
 	}
 
@@ -278,34 +282,6 @@
 		}
 	}
 
-	/* deal with non-vm */
-	if ((p->chunk_ib_idx != -1) &&
-	    ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
-	    (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
-		if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
-			DRM_ERROR("cs IB too big: %d\n",
-				  p->chunks[p->chunk_ib_idx].length_dw);
-			return -EINVAL;
-		}
-		if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
-			p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
-			p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
-			if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
-			    p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
-				kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
-				kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
-				p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
-				p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
-				return -ENOMEM;
-			}
-		}
-		p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
-		p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
-		p->chunks[p->chunk_ib_idx].last_copied_page = -1;
-		p->chunks[p->chunk_ib_idx].last_page_index =
-			((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
-	}
-
 	return 0;
 }
 
@@ -339,13 +315,8 @@
 	kfree(parser->track);
 	kfree(parser->relocs);
 	kfree(parser->relocs_ptr);
-	for (i = 0; i < parser->nchunks; i++) {
-		kfree(parser->chunks[i].kdata);
-		if ((parser->rdev->flags & RADEON_IS_AGP)) {
-			kfree(parser->chunks[i].kpage[0]);
-			kfree(parser->chunks[i].kpage[1]);
-		}
-	}
+	for (i = 0; i < parser->nchunks; i++)
+		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
 	kfree(parser->chunks_array);
 	radeon_ib_free(parser->rdev, &parser->ib);
@@ -355,7 +326,6 @@
 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
 			      struct radeon_cs_parser *parser)
 {
-	struct radeon_cs_chunk *ib_chunk;
 	int r;
 
 	if (parser->chunk_ib_idx == -1)
@@ -364,28 +334,11 @@
 	if (parser->cs_flags & RADEON_CS_USE_VM)
 		return 0;
 
-	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
-	/* Copy the packet into the IB, the parser will read from the
-	 * input memory (cached) and write to the IB (which can be
-	 * uncached).
-	 */
-	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
-			   NULL, ib_chunk->length_dw * 4);
-	if (r) {
-		DRM_ERROR("Failed to get ib !\n");
-		return r;
-	}
-	parser->ib.length_dw = ib_chunk->length_dw;
 	r = radeon_cs_parse(rdev, parser->ring, parser);
 	if (r || parser->parser_error) {
 		DRM_ERROR("Invalid command stream !\n");
 		return r;
 	}
-	r = radeon_cs_finish_pages(parser);
-	if (r) {
-		DRM_ERROR("Invalid command stream !\n");
-		return r;
-	}
 
 	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
 		radeon_uvd_note_usage(rdev);
@@ -423,7 +376,6 @@
 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 				 struct radeon_cs_parser *parser)
 {
-	struct radeon_cs_chunk *ib_chunk;
 	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
 	struct radeon_vm *vm = &fpriv->vm;
 	int r;
@@ -433,49 +385,13 @@
 	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
 		return 0;
 
-	if ((rdev->family >= CHIP_TAHITI) &&
-	    (parser->chunk_const_ib_idx != -1)) {
-		ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
-		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
-			DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
-			return -EINVAL;
-		}
-		r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
-				   vm, ib_chunk->length_dw * 4);
-		if (r) {
-			DRM_ERROR("Failed to get const ib !\n");
-			return r;
-		}
-		parser->const_ib.is_const_ib = true;
-		parser->const_ib.length_dw = ib_chunk->length_dw;
-		/* Copy the packet into the IB */
-		if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
-				       ib_chunk->length_dw * 4)) {
-			return -EFAULT;
-		}
+	if (parser->const_ib.length_dw) {
 		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
 		if (r) {
 			return r;
 		}
 	}
 
-	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
-	if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
-		DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
-		return -EINVAL;
-	}
-	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
-			   vm, ib_chunk->length_dw * 4);
-	if (r) {
-		DRM_ERROR("Failed to get ib !\n");
-		return r;
-	}
-	parser->ib.length_dw = ib_chunk->length_dw;
-	/* Copy the packet into the IB */
-	if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
-			       ib_chunk->length_dw * 4)) {
-		return -EFAULT;
-	}
 	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
 	if (r) {
 		return r;
@@ -527,6 +443,62 @@
 	return r;
 }
 
+static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
+{
+	struct radeon_cs_chunk *ib_chunk;
+	struct radeon_vm *vm = NULL;
+	int r;
+
+	if (parser->chunk_ib_idx == -1)
+		return 0;
+
+	if (parser->cs_flags & RADEON_CS_USE_VM) {
+		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+		vm = &fpriv->vm;
+
+		if ((rdev->family >= CHIP_TAHITI) &&
+		    (parser->chunk_const_ib_idx != -1)) {
+			ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+				return -EINVAL;
+			}
+			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+					   vm, ib_chunk->length_dw * 4);
+			if (r) {
+				DRM_ERROR("Failed to get const ib !\n");
+				return r;
+			}
+			parser->const_ib.is_const_ib = true;
+			parser->const_ib.length_dw = ib_chunk->length_dw;
+			if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
+					       ib_chunk->user_ptr,
+					       ib_chunk->length_dw * 4))
+				return -EFAULT;
+		}
+
+		ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+			return -EINVAL;
+		}
+	}
+	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+
+	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+			   vm, ib_chunk->length_dw * 4);
+	if (r) {
+		DRM_ERROR("Failed to get ib !\n");
+		return r;
+	}
+	parser->ib.length_dw = ib_chunk->length_dw;
+	if (ib_chunk->kdata)
+		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
+	else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+		return -EFAULT;
+	return 0;
+}
+
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct radeon_device *rdev = dev->dev_private;
@@ -552,10 +524,15 @@
 		r = radeon_cs_handle_lockup(rdev, r);
 		return r;
 	}
-	r = radeon_cs_parser_relocs(&parser);
-	if (r) {
-		if (r != -ERESTARTSYS)
+
+	r = radeon_cs_ib_fill(rdev, &parser);
+	if (!r) {
+		r = radeon_cs_parser_relocs(&parser);
+		if (r && r != -ERESTARTSYS)
 			DRM_ERROR("Failed to parse relocation %d!\n", r);
+	}
+
+	if (r) {
 		radeon_cs_parser_fini(&parser, r, false);
 		up_read(&rdev->exclusive_lock);
 		r = radeon_cs_handle_lockup(rdev, r);
@@ -579,97 +556,6 @@
 	return r;
 }
 
-int radeon_cs_finish_pages(struct radeon_cs_parser *p)
-{
-	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-	int i;
-	int size = PAGE_SIZE;
-
-	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
-		if (i == ibc->last_page_index) {
-			size = (ibc->length_dw * 4) % PAGE_SIZE;
-			if (size == 0)
-				size = PAGE_SIZE;
-		}
-		
-		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
-				       ibc->user_ptr + (i * PAGE_SIZE),
-				       size))
-			return -EFAULT;
-	}
-	return 0;
-}
-
-static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
-{
-	int new_page;
-	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-	int i;
-	int size = PAGE_SIZE;
-	bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
-		false : true;
-
-	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
-		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
-				       ibc->user_ptr + (i * PAGE_SIZE),
-				       PAGE_SIZE)) {
-			p->parser_error = -EFAULT;
-			return 0;
-		}
-	}
-
-	if (pg_idx == ibc->last_page_index) {
-		size = (ibc->length_dw * 4) % PAGE_SIZE;
-		if (size == 0)
-			size = PAGE_SIZE;
-	}
-
-	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
-	if (copy1)
-		ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
-
-	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
-			       ibc->user_ptr + (pg_idx * PAGE_SIZE),
-			       size)) {
-		p->parser_error = -EFAULT;
-		return 0;
-	}
-
-	/* copy to IB for non single case */
-	if (!copy1)
-		memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
-
-	ibc->last_copied_page = pg_idx;
-	ibc->kpage_idx[new_page] = pg_idx;
-
-	return new_page;
-}
-
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
-	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-	u32 pg_idx, pg_offset;
-	u32 idx_value = 0;
-	int new_page;
-
-	pg_idx = (idx * 4) / PAGE_SIZE;
-	pg_offset = (idx * 4) % PAGE_SIZE;
-
-	if (ibc->kpage_idx[0] == pg_idx)
-		return ibc->kpage[0][pg_offset/4];
-	if (ibc->kpage_idx[1] == pg_idx)
-		return ibc->kpage[1][pg_offset/4];
-
-	new_page = radeon_cs_update_pages(p, pg_idx);
-	if (new_page < 0) {
-		p->parser_error = new_page;
-		return 0;
-	}
-
-	idx_value = ibc->kpage[new_page][pg_offset/4];
-	return idx_value;
-}
-
 /**
  * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
  * @parser:	parser structure holding parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 841d0e0..b9234c4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,9 +98,16 @@
 	"BONAIRE",
 	"KAVERI",
 	"KABINI",
+	"HAWAII",
 	"LAST",
 };
 
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
 /**
  * radeon_program_register_sequence - program an array of registers.
  *
@@ -1076,7 +1083,10 @@
 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
-	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+	if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
+		return;
+
 	if (state == VGA_SWITCHEROO_ON) {
 		unsigned d3_delay = dev->pdev->d3_delay;
 
@@ -1087,7 +1097,7 @@
 		if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
 			dev->pdev->d3_delay = 20;
 
-		radeon_resume_kms(dev);
+		radeon_resume_kms(dev, true, true);
 
 		dev->pdev->d3_delay = d3_delay;
 
@@ -1097,7 +1107,7 @@
 		printk(KERN_INFO "radeon: switched off\n");
 		drm_kms_helper_poll_disable(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		radeon_suspend_kms(dev, pmm);
+		radeon_suspend_kms(dev, true, true);
 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
@@ -1147,6 +1157,7 @@
 {
 	int r, i;
 	int dma_bits;
+	bool runtime = false;
 
 	rdev->shutdown = false;
 	rdev->dev = &pdev->dev;
@@ -1293,7 +1304,14 @@
 	/* this will fail for cards that aren't VGA class devices, just
 	 * ignore it */
 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
-	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
+
+	if (radeon_runtime_pm == 1)
+		runtime = true;
+	if ((radeon_runtime_pm == -1) && radeon_is_px())
+		runtime = true;
+	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+	if (runtime)
+		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
 
 	r = radeon_init(rdev);
 	if (r)
@@ -1383,7 +1401,7 @@
  * Returns 0 for success or an error on failure.
  * Called at driver suspend.
  */
-int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 {
 	struct radeon_device *rdev;
 	struct drm_crtc *crtc;
@@ -1394,9 +1412,7 @@
 	if (dev == NULL || dev->dev_private == NULL) {
 		return -ENODEV;
 	}
-	if (state.event == PM_EVENT_PRETHAW) {
-		return 0;
-	}
+
 	rdev = dev->dev_private;
 
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1455,14 +1471,17 @@
 	radeon_agp_suspend(rdev);
 
 	pci_save_state(dev->pdev);
-	if (state.event == PM_EVENT_SUSPEND) {
+	if (suspend) {
 		/* Shut down the device */
 		pci_disable_device(dev->pdev);
 		pci_set_power_state(dev->pdev, PCI_D3hot);
 	}
-	console_lock();
-	radeon_fbdev_set_suspend(rdev, 1);
-	console_unlock();
+
+	if (fbcon) {
+		console_lock();
+		radeon_fbdev_set_suspend(rdev, 1);
+		console_unlock();
+	}
 	return 0;
 }
 
@@ -1475,7 +1494,7 @@
  * Returns 0 for success or an error on failure.
  * Called at driver resume.
  */
-int radeon_resume_kms(struct drm_device *dev)
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 {
 	struct drm_connector *connector;
 	struct radeon_device *rdev = dev->dev_private;
@@ -1484,12 +1503,17 @@
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	console_lock();
-	pci_set_power_state(dev->pdev, PCI_D0);
-	pci_restore_state(dev->pdev);
-	if (pci_enable_device(dev->pdev)) {
-		console_unlock();
-		return -1;
+	if (fbcon) {
+		console_lock();
+	}
+	if (resume) {
+		pci_set_power_state(dev->pdev, PCI_D0);
+		pci_restore_state(dev->pdev);
+		if (pci_enable_device(dev->pdev)) {
+			if (fbcon)
+				console_unlock();
+			return -1;
+		}
 	}
 	/* resume AGP if in use */
 	radeon_agp_resume(rdev);
@@ -1502,9 +1526,11 @@
 	radeon_pm_resume(rdev);
 	radeon_restore_bios_scratch_regs(rdev);
 
-	radeon_fbdev_set_suspend(rdev, 0);
-	console_unlock();
-
+	if (fbcon) {
+		radeon_fbdev_set_suspend(rdev, 0);
+		console_unlock();
+	}
+       
 	/* init dig PHYs, disp eng pll */
 	if (rdev->is_atom_bios) {
 		radeon_atom_encoder_init(rdev);
@@ -1549,6 +1575,14 @@
 	int resched;
 
 	down_write(&rdev->exclusive_lock);
+
+	if (!rdev->needs_reset) {
+		up_write(&rdev->exclusive_lock);
+		return 0;
+	}
+
+	rdev->needs_reset = false;
+
 	radeon_save_bios_scratch_regs(rdev);
 	/* block TTM */
 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0d1aa05..7b25381 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -30,6 +30,7 @@
 #include "atom.h"
 #include <asm/div64.h>
 
+#include <linux/pm_runtime.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
@@ -306,7 +307,7 @@
 	 */
 	if (update_pending &&
 	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
-							       &vpos, &hpos)) &&
+							       &vpos, &hpos, NULL, NULL)) &&
 	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
 	     (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
 		/* crtc didn't flip in this target vblank interval,
@@ -494,11 +495,55 @@
 	return r;
 }
 
+static int
+radeon_crtc_set_config(struct drm_mode_set *set)
+{
+	struct drm_device *dev;
+	struct radeon_device *rdev;
+	struct drm_crtc *crtc;
+	bool active = false;
+	int ret;
+
+	if (!set || !set->crtc)
+		return -EINVAL;
+
+	dev = set->crtc->dev;
+
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_crtc_helper_set_config(set);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		if (crtc->enabled)
+			active = true;
+
+	pm_runtime_mark_last_busy(dev->dev);
+
+	rdev = dev->dev_private;
+	/* if we have active crtcs and we don't have a power ref,
+	   take the current one */
+	if (active && !rdev->have_disp_power_ref) {
+		rdev->have_disp_power_ref = true;
+		return ret;
+	}
+	/* if we have no active crtcs, then drop the power ref
+	   we got before */
+	if (!active && rdev->have_disp_power_ref) {
+		pm_runtime_put_autosuspend(dev->dev);
+		rdev->have_disp_power_ref = false;
+	}
+
+	/* drop the power reference we got coming in here */
+	pm_runtime_put_autosuspend(dev->dev);
+	return ret;
+}
 static const struct drm_crtc_funcs radeon_crtc_funcs = {
 	.cursor_set = radeon_crtc_cursor_set,
 	.cursor_move = radeon_crtc_cursor_move,
 	.gamma_set = radeon_crtc_gamma_set,
-	.set_config = drm_crtc_helper_set_config,
+	.set_config = radeon_crtc_set_config,
 	.destroy = radeon_crtc_destroy,
 	.page_flip = radeon_crtc_page_flip,
 };
@@ -1178,6 +1223,12 @@
 	{ RADEON_AUDIO_AUTO, "auto" },
 };
 
+/* XXX support different dither options? spatial, temporal, both, etc. */
+static struct drm_prop_enum_list radeon_dither_enum_list[] =
+{	{ RADEON_FMT_DITHER_DISABLE, "off" },
+	{ RADEON_FMT_DITHER_ENABLE, "on" },
+};
+
 static int radeon_modeset_create_props(struct radeon_device *rdev)
 {
 	int sz;
@@ -1234,6 +1285,12 @@
 					 "audio",
 					 radeon_audio_enum_list, sz);
 
+	sz = ARRAY_SIZE(radeon_dither_enum_list);
+	rdev->mode_info.dither_property =
+		drm_property_create_enum(rdev->ddev, 0,
+					 "dither",
+					 radeon_dither_enum_list, sz);
+
 	return 0;
 }
 
@@ -1539,12 +1596,17 @@
 }
 
 /*
- * Retrieve current video scanout position of crtc on a given gpu.
+ * Retrieve current video scanout position of crtc on a given gpu, and
+ * an optional accurate timestamp of when query happened.
  *
  * \param dev Device to query.
  * \param crtc Crtc to query.
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
+ * \param *stime Target location for timestamp taken immediately before
+ *               scanout position query. Can be NULL to skip timestamp.
+ * \param *etime Target location for timestamp taken immediately after
+ *               scanout position query. Can be NULL to skip timestamp.
  *
  * Returns vpos as a positive number while in active scanout area.
  * Returns vpos as a negative number inside vblank, counting the number
@@ -1560,7 +1622,8 @@
  * unknown small number of scanlines wrt. real scanout position.
  *
  */
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
+			       ktime_t *stime, ktime_t *etime)
 {
 	u32 stat_crtc = 0, vbl = 0, position = 0;
 	int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1568,6 +1631,12 @@
 
 	struct radeon_device *rdev = dev->dev_private;
 
+	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Get optional system timestamp before query. */
+	if (stime)
+		*stime = ktime_get();
+
 	if (ASIC_IS_DCE4(rdev)) {
 		if (crtc == 0) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
@@ -1650,6 +1719,12 @@
 		}
 	}
 
+	/* Get optional system timestamp after query. */
+	if (etime)
+		*etime = ktime_get();
+
+	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
 	/* Decode into vertical and horizontal scanout position. */
 	*vpos = position & 0x1fff;
 	*hpos = (position >> 16) & 0x1fff;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9c14a1b..1aee322 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,8 +36,9 @@
 #include <drm/drm_pciids.h>
 #include <linux/console.h>
 #include <linux/module.h>
-
-
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+#include "drm_crtc_helper.h"
 /*
  * KMS wrapper.
  * - 2.0.0 - initial interface
@@ -87,8 +88,8 @@
 				 struct drm_file *file_priv);
 void radeon_driver_preclose_kms(struct drm_device *dev,
 				struct drm_file *file_priv);
-int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-int radeon_resume_kms(struct drm_device *dev);
+int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
@@ -100,14 +101,14 @@
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
 irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
 int radeon_gem_object_open(struct drm_gem_object *obj,
 				struct drm_file *file_priv);
 void radeon_gem_object_close(struct drm_gem_object *obj,
 				struct drm_file *file_priv);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
-				      int *vpos, int *hpos);
+				      int *vpos, int *hpos, ktime_t *stime,
+				      ktime_t *etime);
 extern const struct drm_ioctl_desc radeon_ioctls_kms[];
 extern int radeon_max_kms_ioctl;
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -137,9 +138,11 @@
 #if defined(CONFIG_VGA_SWITCHEROO)
 void radeon_register_atpx_handler(void);
 void radeon_unregister_atpx_handler(void);
+bool radeon_is_px(void);
 #else
 static inline void radeon_register_atpx_handler(void) {}
 static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_is_px(void) { return false; }
 #endif
 
 int radeon_no_wb;
@@ -162,6 +165,7 @@
 int radeon_fastfb = 0;
 int radeon_dpm = -1;
 int radeon_aspm = -1;
+int radeon_runtime_pm = -1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -223,6 +227,9 @@
 MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(aspm, radeon_aspm, int, 0444);
 
+MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
+module_param_named(runpm, radeon_runtime_pm, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
 	radeon_PCI_IDS
 };
@@ -259,6 +266,7 @@
 	return 0;
 }
 
+
 static const struct file_operations radeon_driver_old_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
@@ -353,25 +361,144 @@
 	drm_put_dev(dev);
 }
 
-static int
-radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int radeon_pmops_suspend(struct device *dev)
 {
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	return radeon_suspend_kms(dev, state);
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_suspend_kms(drm_dev, true, true);
 }
 
-static int
-radeon_pci_resume(struct pci_dev *pdev)
+static int radeon_pmops_resume(struct device *dev)
 {
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	return radeon_resume_kms(dev);
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_resume_kms(drm_dev, true, true);
 }
 
+static int radeon_pmops_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_suspend_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_resume_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (radeon_runtime_pm == 0)
+		return -EINVAL;
+
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+	drm_kms_helper_poll_disable(drm_dev);
+	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+
+	ret = radeon_suspend_kms(drm_dev, false, false);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3cold);
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+
+	return 0;
+}
+
+static int radeon_pmops_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (radeon_runtime_pm == 0)
+		return -EINVAL;
+
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+	pci_set_master(pdev);
+
+	ret = radeon_resume_kms(drm_dev, false, false);
+	drm_kms_helper_poll_enable(drm_dev);
+	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	return 0;
+}
+
+static int radeon_pmops_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct drm_crtc *crtc;
+
+	if (radeon_runtime_pm == 0)
+		return -EBUSY;
+
+	/* are we PX enabled? */
+	if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+		DRM_DEBUG_DRIVER("failing to power off - not px\n");
+		return -EBUSY;
+	}
+
+	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
+		if (crtc->enabled) {
+			DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+			return -EBUSY;
+		}
+	}
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_autosuspend(dev);
+	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+	return 1;
+}
+
+long radeon_drm_ioctl(struct file *filp,
+		      unsigned int cmd, unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev;
+	long ret;
+	dev = file_priv->minor->dev;
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_ioctl(filp, cmd, arg);
+	
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+	return ret;
+}
+
+static const struct dev_pm_ops radeon_pm_ops = {
+	.suspend = radeon_pmops_suspend,
+	.resume = radeon_pmops_resume,
+	.freeze = radeon_pmops_freeze,
+	.thaw = radeon_pmops_thaw,
+	.poweroff = radeon_pmops_freeze,
+	.restore = radeon_pmops_resume,
+	.runtime_suspend = radeon_pmops_runtime_suspend,
+	.runtime_resume = radeon_pmops_runtime_resume,
+	.runtime_idle = radeon_pmops_runtime_idle,
+};
+
 static const struct file_operations radeon_driver_kms_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
 	.release = drm_release,
-	.unlocked_ioctl = drm_ioctl,
+	.unlocked_ioctl = radeon_drm_ioctl,
 	.mmap = radeon_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
@@ -380,6 +507,15 @@
 #endif
 };
 
+
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	radeon_driver_unload_kms(dev);
+}
+
 static struct drm_driver kms_driver = {
 	.driver_features =
 	    DRIVER_USE_AGP |
@@ -392,8 +528,6 @@
 	.postclose = radeon_driver_postclose_kms,
 	.lastclose = radeon_driver_lastclose_kms,
 	.unload = radeon_driver_unload_kms,
-	.suspend = radeon_suspend_kms,
-	.resume = radeon_resume_kms,
 	.get_vblank_counter = radeon_get_vblank_counter_kms,
 	.enable_vblank = radeon_enable_vblank_kms,
 	.disable_vblank = radeon_disable_vblank_kms,
@@ -408,7 +542,6 @@
 	.irq_uninstall = radeon_driver_irq_uninstall_kms,
 	.irq_handler = radeon_driver_irq_handler_kms,
 	.ioctls = radeon_ioctls_kms,
-	.gem_init_object = radeon_gem_object_init,
 	.gem_free_object = radeon_gem_object_free,
 	.gem_open_object = radeon_gem_object_open,
 	.gem_close_object = radeon_gem_object_close,
@@ -451,8 +584,8 @@
 	.id_table = pciidlist,
 	.probe = radeon_pci_probe,
 	.remove = radeon_pci_remove,
-	.suspend = radeon_pci_suspend,
-	.resume = radeon_pci_resume,
+	.driver.pm = &radeon_pm_ops,
+	.shutdown = radeon_pci_shutdown,
 };
 
 static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index b369d42..543dcfa 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
 #define DRIVER_MINOR		33
 #define DRIVER_PATCHLEVEL	0
 
+long radeon_drm_ioctl(struct file *filp,
+		      unsigned int cmd, unsigned long arg);
+
 /* The rest of the file is DEPRECATED! */
 #ifdef CONFIG_DRM_RADEON_UMS
 
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 3c82890..614ad54 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -96,6 +96,7 @@
 	CHIP_BONAIRE,
 	CHIP_KAVERI,
 	CHIP_KABINI,
+	CHIP_HAWAII,
 	CHIP_LAST,
 };
 
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ddb8f8e..281d14c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -190,10 +190,8 @@
 		}
 	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 
-	if (wake) {
-		rdev->fence_drv[ring].last_activity = jiffies;
+	if (wake)
 		wake_up_all(&rdev->fence_queue);
-	}
 }
 
 /**
@@ -212,13 +210,13 @@
 }
 
 /**
- * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ * radeon_fence_seq_signaled - check if a fence sequence number has signaled
  *
  * @rdev: radeon device pointer
  * @seq: sequence number
  * @ring: ring index the fence is associated with
  *
- * Check if the last singled fence sequnce number is >= the requested
+ * Check if the last signaled fence sequnce number is >= the requested
  * sequence number (all asics).
  * Returns true if the fence has signaled (current fence value
  * is >= requested value) or false if it has not (current fence
@@ -263,113 +261,131 @@
 }
 
 /**
- * radeon_fence_wait_seq - wait for a specific sequence number
+ * radeon_fence_any_seq_signaled - check if any sequence number is signaled
  *
  * @rdev: radeon device pointer
- * @target_seq: sequence number we want to wait for
- * @ring: ring index the fence is associated with
+ * @seq: sequence numbers
+ *
+ * Check if the last signaled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if any has signaled (current value is >= requested value)
+ * or false if it has not. Helper function for radeon_fence_wait_seq.
+ */
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+	unsigned i;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
+			return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence numbers
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
  * @intr: use interruptable sleep
  * @lock_ring: whether the ring should be locked or not
  *
- * Wait for the requested sequence number to be written (all asics).
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics).  Sequnce number array is indexed by ring id.
  * @intr selects whether to use interruptable (true) or non-interruptable
  * (false) sleep when waiting for the sequence number.  Helper function
- * for radeon_fence_wait(), et al.
+ * for radeon_fence_wait_*().
  * Returns 0 if the sequence number has passed, error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected and the ring is
- * marked as not ready so no further jobs get scheduled until a successful
- * reset.
+ * -EDEADLK is returned when a GPU lockup has been detected.
  */
-static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
-				 unsigned ring, bool intr, bool lock_ring)
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
+				 bool intr, bool lock_ring)
 {
-	unsigned long timeout, last_activity;
-	uint64_t seq;
-	unsigned i;
+	uint64_t last_seq[RADEON_NUM_RINGS];
 	bool signaled;
-	int r;
+	int i, r;
 
-	while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
-		if (!rdev->ring[ring].ready) {
-			return -EBUSY;
+	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+
+		/* Save current sequence values, used to check for GPU lockups */
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (!target_seq[i])
+				continue;
+
+			last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
+			trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+			radeon_irq_kms_sw_irq_get(rdev, i);
 		}
 
-		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
-		if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
-			/* the normal case, timeout is somewhere before last_activity */
-			timeout = rdev->fence_drv[ring].last_activity - timeout;
-		} else {
-			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
-			 * anyway we will just wait for the minimum amount and then check for a lockup
-			 */
-			timeout = 1;
-		}
-		seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
-		/* Save current last activity valuee, used to check for GPU lockups */
-		last_activity = rdev->fence_drv[ring].last_activity;
-
-		trace_radeon_fence_wait_begin(rdev->ddev, seq);
-		radeon_irq_kms_sw_irq_get(rdev, ring);
 		if (intr) {
-			r = wait_event_interruptible_timeout(rdev->fence_queue,
-				(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
-				timeout);
-                } else {
-			r = wait_event_timeout(rdev->fence_queue,
-				(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
-				timeout);
+			r = wait_event_interruptible_timeout(rdev->fence_queue, (
+				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
+				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
+		} else {
+			r = wait_event_timeout(rdev->fence_queue, (
+				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
+				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
 		}
-		radeon_irq_kms_sw_irq_put(rdev, ring);
-		if (unlikely(r < 0)) {
+
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (!target_seq[i])
+				continue;
+
+			radeon_irq_kms_sw_irq_put(rdev, i);
+			trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
+		}
+
+		if (unlikely(r < 0))
 			return r;
-		}
-		trace_radeon_fence_wait_end(rdev->ddev, seq);
 
 		if (unlikely(!signaled)) {
+			if (rdev->needs_reset)
+				return -EDEADLK;
+
 			/* we were interrupted for some reason and fence
 			 * isn't signaled yet, resume waiting */
-			if (r) {
+			if (r)
 				continue;
+
+			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+				if (!target_seq[i])
+					continue;
+
+				if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
+					break;
 			}
 
-			/* check if sequence value has changed since last_activity */
-			if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+			if (i != RADEON_NUM_RINGS)
 				continue;
-			}
 
-			if (lock_ring) {
+			if (lock_ring)
 				mutex_lock(&rdev->ring_lock);
+
+			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+				if (!target_seq[i])
+					continue;
+
+				if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
+					break;
 			}
 
-			/* test if somebody else has already decided that this is a lockup */
-			if (last_activity != rdev->fence_drv[ring].last_activity) {
-				if (lock_ring) {
-					mutex_unlock(&rdev->ring_lock);
-				}
-				continue;
-			}
-
-			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+			if (i < RADEON_NUM_RINGS) {
 				/* good news we believe it's a lockup */
-				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
-					 target_seq, seq);
+				dev_warn(rdev->dev, "GPU lockup (waiting for "
+					 "0x%016llx last fence id 0x%016llx on"
+					 " ring %d)\n",
+					 target_seq[i], last_seq[i], i);
 
-				/* change last activity so nobody else think there is a lockup */
-				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-					rdev->fence_drv[i].last_activity = jiffies;
-				}
-
-				/* mark the ring as not ready any more */
-				rdev->ring[ring].ready = false;
-				if (lock_ring) {
+				/* remember that we need an reset */
+				rdev->needs_reset = true;
+				if (lock_ring)
 					mutex_unlock(&rdev->ring_lock);
-				}
+				wake_up_all(&rdev->fence_queue);
 				return -EDEADLK;
 			}
 
-			if (lock_ring) {
+			if (lock_ring)
 				mutex_unlock(&rdev->ring_lock);
-			}
 		}
 	}
 	return 0;
@@ -388,6 +404,7 @@
  */
 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 {
+	uint64_t seq[RADEON_NUM_RINGS] = {};
 	int r;
 
 	if (fence == NULL) {
@@ -395,150 +412,18 @@
 		return -EINVAL;
 	}
 
-	r = radeon_fence_wait_seq(fence->rdev, fence->seq,
-				  fence->ring, intr, true);
-	if (r) {
+	seq[fence->ring] = fence->seq;
+	if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
+		return 0;
+
+	r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
+	if (r)
 		return r;
-	}
+
 	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
 	return 0;
 }
 
-static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
-{
-	unsigned i;
-
-	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
-			return true;
-		}
-	}
-	return false;
-}
-
-/**
- * radeon_fence_wait_any_seq - wait for a sequence number on any ring
- *
- * @rdev: radeon device pointer
- * @target_seq: sequence number(s) we want to wait for
- * @intr: use interruptable sleep
- *
- * Wait for the requested sequence number(s) to be written by any ring
- * (all asics).  Sequnce number array is indexed by ring id.
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the sequence number.  Helper function
- * for radeon_fence_wait_any(), et al.
- * Returns 0 if the sequence number has passed, error for all other cases.
- */
-static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
-				     u64 *target_seq, bool intr)
-{
-	unsigned long timeout, last_activity, tmp;
-	unsigned i, ring = RADEON_NUM_RINGS;
-	bool signaled;
-	int r;
-
-	for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
-		if (!target_seq[i]) {
-			continue;
-		}
-
-		/* use the most recent one as indicator */
-		if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
-			last_activity = rdev->fence_drv[i].last_activity;
-		}
-
-		/* For lockup detection just pick the lowest ring we are
-		 * actively waiting for
-		 */
-		if (i < ring) {
-			ring = i;
-		}
-	}
-
-	/* nothing to wait for ? */
-	if (ring == RADEON_NUM_RINGS) {
-		return -ENOENT;
-	}
-
-	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
-		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
-		if (time_after(last_activity, timeout)) {
-			/* the normal case, timeout is somewhere before last_activity */
-			timeout = last_activity - timeout;
-		} else {
-			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
-			 * anyway we will just wait for the minimum amount and then check for a lockup
-			 */
-			timeout = 1;
-		}
-
-		trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
-		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-			if (target_seq[i]) {
-				radeon_irq_kms_sw_irq_get(rdev, i);
-			}
-		}
-		if (intr) {
-			r = wait_event_interruptible_timeout(rdev->fence_queue,
-				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
-				timeout);
-		} else {
-			r = wait_event_timeout(rdev->fence_queue,
-				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
-				timeout);
-		}
-		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-			if (target_seq[i]) {
-				radeon_irq_kms_sw_irq_put(rdev, i);
-			}
-		}
-		if (unlikely(r < 0)) {
-			return r;
-		}
-		trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
-
-		if (unlikely(!signaled)) {
-			/* we were interrupted for some reason and fence
-			 * isn't signaled yet, resume waiting */
-			if (r) {
-				continue;
-			}
-
-			mutex_lock(&rdev->ring_lock);
-			for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
-				if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
-					tmp = rdev->fence_drv[i].last_activity;
-				}
-			}
-			/* test if somebody else has already decided that this is a lockup */
-			if (last_activity != tmp) {
-				last_activity = tmp;
-				mutex_unlock(&rdev->ring_lock);
-				continue;
-			}
-
-			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
-				/* good news we believe it's a lockup */
-				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
-					 target_seq[ring]);
-
-				/* change last activity so nobody else think there is a lockup */
-				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-					rdev->fence_drv[i].last_activity = jiffies;
-				}
-
-				/* mark the ring as not ready any more */
-				rdev->ring[ring].ready = false;
-				mutex_unlock(&rdev->ring_lock);
-				return -EDEADLK;
-			}
-			mutex_unlock(&rdev->ring_lock);
-		}
-	}
-	return 0;
-}
-
 /**
  * radeon_fence_wait_any - wait for a fence to signal on any ring
  *
@@ -557,7 +442,7 @@
 			  bool intr)
 {
 	uint64_t seq[RADEON_NUM_RINGS];
-	unsigned i;
+	unsigned i, num_rings = 0;
 	int r;
 
 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -567,15 +452,19 @@
 			continue;
 		}
 
-		if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
-			/* something was allready signaled */
-			return 0;
-		}
-
 		seq[i] = fences[i]->seq;
+		++num_rings;
+
+		/* test if something was allready signaled */
+		if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
+			return 0;
 	}
 
-	r = radeon_fence_wait_any_seq(rdev, seq, intr);
+	/* nothing to wait for ? */
+	if (num_rings == 0)
+		return -ENOENT;
+
+	r = radeon_fence_wait_seq(rdev, seq, intr, true);
 	if (r) {
 		return r;
 	}
@@ -594,15 +483,15 @@
  */
 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
 {
-	uint64_t seq;
+	uint64_t seq[RADEON_NUM_RINGS] = {};
 
-	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
-	if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
+	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
 		/* nothing to wait for, last_seq is
 		   already the last emited fence */
 		return -ENOENT;
 	}
-	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+	return radeon_fence_wait_seq(rdev, seq, false, false);
 }
 
 /**
@@ -617,14 +506,18 @@
  */
 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
 {
-	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+	uint64_t seq[RADEON_NUM_RINGS] = {};
 	int r;
 
-	r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
+	if (!seq[ring])
+		return 0;
+
+	r = radeon_fence_wait_seq(rdev, seq, false, false);
 	if (r) {
-		if (r == -EDEADLK) {
+		if (r == -EDEADLK)
 			return -EDEADLK;
-		}
+
 		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
 			ring, r);
 	}
@@ -826,7 +719,6 @@
 	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 		rdev->fence_drv[ring].sync_seq[i] = 0;
 	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
-	rdev->fence_drv[ring].last_activity = jiffies;
 	rdev->fence_drv[ring].initialized = false;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b990b1a..8a83b89 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -607,8 +607,8 @@
  */
 int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
 {
-	unsigned pd_size, pts_size;
-	u64 *pd_addr;
+	unsigned pd_size, pd_entries, pts_size;
+	struct radeon_ib ib;
 	int r;
 
 	if (vm == NULL) {
@@ -619,8 +619,10 @@
 		return 0;
 	}
 
-retry:
 	pd_size = radeon_vm_directory_size(rdev);
+	pd_entries = radeon_vm_num_pdes(rdev);
+
+retry:
 	r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
 			     &vm->page_directory, pd_size,
 			     RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -637,9 +639,31 @@
 	vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
 
 	/* Initially clear the page directory */
-	pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
-	memset(pd_addr, 0, pd_size);
+	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+			  NULL, pd_entries * 2 + 64);
+	if (r) {
+		radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+		return r;
+	}
 
+	ib.length_dw = 0;
+
+	radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
+				0, pd_entries, 0, 0);
+
+	radeon_ib_sync_to(&ib, vm->fence);
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+		return r;
+	}
+	radeon_fence_unref(&vm->fence);
+	vm->fence = radeon_fence_ref(ib.fence);
+	radeon_ib_free(rdev, &ib);
+	radeon_fence_unref(&vm->last_flush);
+
+	/* allocate page table array */
 	pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
 	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
 
@@ -914,6 +938,26 @@
 }
 
 /**
+ * radeon_vm_page_flags - translate page flags to what the hw uses
+ *
+ * @flags: flags comming from userspace
+ *
+ * Translate the flags the userspace ABI uses to hw flags.
+ */
+static uint32_t radeon_vm_page_flags(uint32_t flags)
+{
+        uint32_t hw_flags = 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+        if (flags & RADEON_VM_PAGE_SYSTEM) {
+                hw_flags |= R600_PTE_SYSTEM;
+                hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+        }
+        return hw_flags;
+}
+
+/**
  * radeon_vm_update_pdes - make sure that page directory is valid
  *
  * @rdev: radeon_device pointer
@@ -974,7 +1018,11 @@
 			if (count) {
 				radeon_asic_vm_set_page(rdev, ib, last_pde,
 							last_pt, count, incr,
-							RADEON_VM_PAGE_VALID);
+							R600_PTE_VALID);
+
+				count *= RADEON_VM_PTE_COUNT;
+				radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+							count, 0, 0);
 			}
 
 			count = 1;
@@ -987,8 +1035,11 @@
 
 	if (count) {
 		radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
-					incr, RADEON_VM_PAGE_VALID);
+					incr, R600_PTE_VALID);
 
+		count *= RADEON_VM_PTE_COUNT;
+		radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+					count, 0, 0);
 	}
 
 	return 0;
@@ -1082,7 +1133,6 @@
 			    struct radeon_bo *bo,
 			    struct ttm_mem_reg *mem)
 {
-	unsigned ridx = rdev->asic->vm.pt_ring_index;
 	struct radeon_ib ib;
 	struct radeon_bo_va *bo_va;
 	unsigned nptes, npdes, ndw;
@@ -1151,11 +1201,14 @@
 	/* reserve space for pde addresses */
 	ndw += npdes * 2;
 
+	/* reserve space for clearing new page tables */
+	ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
+
 	/* update too big for an IB */
 	if (ndw > 0xfffff)
 		return -ENOMEM;
 
-	r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
 	ib.length_dw = 0;
 
 	r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1165,7 +1218,7 @@
 	}
 
 	radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
-			      addr, bo_va->flags);
+			      addr, radeon_vm_page_flags(bo_va->flags));
 
 	radeon_ib_sync_to(&ib, vm->fence);
 	r = radeon_ib_schedule(rdev, &ib, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dce99c8..805c5e5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -29,13 +29,6 @@
 #include <drm/radeon_drm.h>
 #include "radeon.h"
 
-int radeon_gem_object_init(struct drm_gem_object *obj)
-{
-	BUG();
-
-	return 0;
-}
-
 void radeon_gem_object_free(struct drm_gem_object *gobj)
 {
 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index c180df8..bdb0f93 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -418,7 +418,7 @@
 	if (nr < DRM_COMMAND_BASE)
 		return drm_compat_ioctl(filp, cmd, arg);
 
-	ret = drm_ioctl(filp, cmd, arg);
+	ret = radeon_drm_ioctl(filp, cmd, arg);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index cc9e848..ec6240b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,8 @@
 #include "radeon.h"
 #include "atom.h"
 
+#include <linux/pm_runtime.h>
+
 #define RADEON_WAIT_IDLE_TIMEOUT 200
 
 /**
@@ -47,8 +49,12 @@
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	struct radeon_device *rdev = dev->dev_private;
+	irqreturn_t ret;
 
-	return radeon_irq_process(rdev);
+	ret = radeon_irq_process(rdev);
+	if (ret == IRQ_HANDLED)
+		pm_runtime_mark_last_busy(dev->dev);
+	return ret;
 }
 
 /*
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 61580dd..bb87105 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,7 +32,7 @@
 
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
-
+#include <linux/pm_runtime.h>
 /**
  * radeon_driver_unload_kms - Main unload function for KMS.
  *
@@ -50,9 +50,14 @@
 
 	if (rdev == NULL)
 		return 0;
+
 	if (rdev->rmmio == NULL)
 		goto done_free;
+
+	pm_runtime_get_sync(dev->dev);
+
 	radeon_acpi_fini(rdev);
+	
 	radeon_modeset_fini(rdev);
 	radeon_device_fini(rdev);
 
@@ -125,9 +130,20 @@
 				"Error during ACPI methods call\n");
 	}
 
+	if (radeon_runtime_pm != 0) {
+		pm_runtime_use_autosuspend(dev->dev);
+		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+		pm_runtime_set_active(dev->dev);
+		pm_runtime_allow(dev->dev);
+		pm_runtime_mark_last_busy(dev->dev);
+		pm_runtime_put_autosuspend(dev->dev);
+	}
+
 out:
 	if (r)
 		radeon_driver_unload_kms(dev);
+
+
 	return r;
 }
 
@@ -191,7 +207,7 @@
 
 	switch (info->request) {
 	case RADEON_INFO_DEVICE_ID:
-		*value = dev->pci_device;
+		*value = dev->pdev->device;
 		break;
 	case RADEON_INFO_NUM_GB_PIPES:
 		*value = rdev->num_gb_pipes;
@@ -475,9 +491,14 @@
 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 {
 	struct radeon_device *rdev = dev->dev_private;
+	int r;
 
 	file_priv->driver_priv = NULL;
 
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return r;
+
 	/* new gpu have virtual address space support */
 	if (rdev->family >= CHIP_CAYMAN) {
 		struct radeon_fpriv *fpriv;
@@ -506,6 +527,9 @@
 
 		file_priv->driver_priv = fpriv;
 	}
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 7cb178a..0c7b8c6 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1056,6 +1056,26 @@
 	}
 }
 
+static void radeon_crtc_disable(struct drm_crtc *crtc)
+{
+	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (crtc->fb) {
+		int r;
+		struct radeon_framebuffer *radeon_fb;
+		struct radeon_bo *rbo;
+
+		radeon_fb = to_radeon_framebuffer(crtc->fb);
+		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r))
+			DRM_ERROR("failed to reserve rbo before unpin\n");
+		else {
+			radeon_bo_unpin(rbo);
+			radeon_bo_unreserve(rbo);
+		}
+	}
+}
+
 static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
 	.dpms = radeon_crtc_dpms,
 	.mode_fixup = radeon_crtc_mode_fixup,
@@ -1065,6 +1085,7 @@
 	.prepare = radeon_crtc_prepare,
 	.commit = radeon_crtc_commit,
 	.load_lut = radeon_crtc_load_lut,
+	.disable = radeon_crtc_disable
 };
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 62cd512..c89971d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -392,7 +392,7 @@
 	props.type = BACKLIGHT_RAW;
 	snprintf(bl_name, sizeof(bl_name),
 		 "radeon_bl%d", dev->primary->index);
-	bd = backlight_device_register(bl_name, &drm_connector->kdev,
+	bd = backlight_device_register(bl_name, drm_connector->kdev,
 				       pdata, &radeon_backlight_ops, &props);
 	if (IS_ERR(bd)) {
 		DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ef63d3f..3f0dd66 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -249,6 +249,8 @@
 	struct drm_property *underscan_vborder_property;
 	/* audio */
 	struct drm_property *audio_property;
+	/* FMT dithering */
+	struct drm_property *dither_property;
 	/* hardcoded DFP edid from BIOS */
 	struct edid *bios_hardcoded_edid;
 	int bios_hardcoded_edid_size;
@@ -479,6 +481,11 @@
 	RADEON_AUDIO_AUTO = 2
 };
 
+enum radeon_connector_dither {
+	RADEON_FMT_DITHER_DISABLE = 0,
+	RADEON_FMT_DITHER_ENABLE = 1,
+};
+
 struct radeon_connector {
 	struct drm_connector base;
 	uint32_t connector_id;
@@ -498,6 +505,7 @@
 	struct radeon_router router;
 	struct radeon_i2c_chan *router_bus;
 	enum radeon_connector_audio audio;
+	enum radeon_connector_dither dither;
 };
 
 struct radeon_framebuffer {
@@ -758,7 +766,8 @@
 				   int x, int y);
 
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
-				      int *vpos, int *hpos);
+				      int *vpos, int *hpos, ktime_t *stime,
+				      ktime_t *etime);
 
 extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
 extern struct edid *
@@ -850,6 +859,12 @@
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode);
 
+/* fmt blocks */
+void avivo_program_fmt(struct drm_encoder *encoder);
+void dce3_program_fmt(struct drm_encoder *encoder);
+void dce4_program_fmt(struct drm_encoder *encoder);
+void dce8_program_fmt(struct drm_encoder *encoder);
+
 /* fbdev layer */
 int radeon_fbdev_init(struct radeon_device *rdev);
 void radeon_fbdev_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4f6b7fc..866ace0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -508,17 +508,21 @@
 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
 		level = RADEON_DPM_FORCED_LEVEL_AUTO;
 	} else {
-		mutex_unlock(&rdev->pm.mutex);
 		count = -EINVAL;
 		goto fail;
 	}
 	if (rdev->asic->dpm.force_performance_level) {
+		if (rdev->pm.dpm.thermal_active) {
+			count = -EINVAL;
+			goto fail;
+		}
 		ret = radeon_dpm_force_performance_level(rdev, level);
 		if (ret)
 			count = -EINVAL;
 	}
-	mutex_unlock(&rdev->pm.mutex);
 fail:
+	mutex_unlock(&rdev->pm.mutex);
+
 	return count;
 }
 
@@ -881,11 +885,12 @@
 		}
 	}
 
-	printk("switching from power state:\n");
-	radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
-	printk("switching to power state:\n");
-	radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
-
+	if (radeon_dpm == 1) {
+		printk("switching from power state:\n");
+		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
+		printk("switching to power state:\n");
+		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
+	}
 	mutex_lock(&rdev->ddev->struct_mutex);
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
@@ -918,12 +923,16 @@
 	radeon_dpm_post_set_power_state(rdev);
 
 	if (rdev->asic->dpm.force_performance_level) {
-		if (rdev->pm.dpm.thermal_active)
+		if (rdev->pm.dpm.thermal_active) {
+			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 			/* force low perf level for thermal */
 			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
-		else
-			/* otherwise, enable auto */
-			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
+			/* save the user's level */
+			rdev->pm.dpm.forced_level = level;
+		} else {
+			/* otherwise, user selected level */
+			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
+		}
 	}
 
 done:
@@ -1179,7 +1188,8 @@
 	mutex_lock(&rdev->pm.mutex);
 	radeon_dpm_init(rdev);
 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
-	radeon_dpm_print_power_states(rdev);
+	if (radeon_dpm == 1)
+		radeon_dpm_print_power_states(rdev);
 	radeon_dpm_setup_asic(rdev);
 	ret = radeon_dpm_enable(rdev);
 	mutex_unlock(&rdev->pm.mutex);
@@ -1241,6 +1251,24 @@
 	case CHIP_RV670:
 	case CHIP_RS780:
 	case CHIP_RS880:
+	case CHIP_CAYMAN:
+	case CHIP_ARUBA:
+	case CHIP_BONAIRE:
+	case CHIP_KABINI:
+	case CHIP_KAVERI:
+	case CHIP_HAWAII:
+		/* DPM requires the RLC, RV770+ dGPU requires SMC */
+		if (!rdev->rlc_fw)
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if ((rdev->family >= CHIP_RV770) &&
+			 (!(rdev->flags & RADEON_IS_IGP)) &&
+			 (!rdev->smc_fw))
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if (radeon_dpm == 1)
+			rdev->pm.pm_method = PM_METHOD_DPM;
+		else
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		break;
 	case CHIP_RV770:
 	case CHIP_RV730:
 	case CHIP_RV710:
@@ -1256,16 +1284,11 @@
 	case CHIP_BARTS:
 	case CHIP_TURKS:
 	case CHIP_CAICOS:
-	case CHIP_CAYMAN:
-	case CHIP_ARUBA:
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
 	case CHIP_VERDE:
 	case CHIP_OLAND:
 	case CHIP_HAINAN:
-	case CHIP_BONAIRE:
-	case CHIP_KABINI:
-	case CHIP_KAVERI:
 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
 		if (!rdev->rlc_fw)
 			rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1273,10 +1296,10 @@
 			 (!(rdev->flags & RADEON_IS_IGP)) &&
 			 (!rdev->smc_fw))
 			rdev->pm.pm_method = PM_METHOD_PROFILE;
-		else if (radeon_dpm == 1)
-			rdev->pm.pm_method = PM_METHOD_DPM;
-		else
+		else if (radeon_dpm == 0)
 			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else
+			rdev->pm.pm_method = PM_METHOD_DPM;
 		break;
 	default:
 		/* default to profile method */
@@ -1468,7 +1491,7 @@
 	 */
 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
 		if (rdev->pm.active_crtcs & (1 << crtc)) {
-			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
 			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
 				in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f7e3678..811bca6 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,30 @@
 		      __entry->fences)
 );
 
+TRACE_EVENT(radeon_vm_set_page,
+	    TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
+		     uint32_t incr, uint32_t flags),
+	    TP_ARGS(pe, addr, count, incr, flags),
+	    TP_STRUCT__entry(
+			     __field(u64, pe)
+			     __field(u64, addr)
+			     __field(u32, count)
+			     __field(u32, incr)
+			     __field(u32, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pe = pe;
+			   __entry->addr = addr;
+			   __entry->count = count;
+			   __entry->incr = incr;
+			   __entry->flags = flags;
+			   ),
+	    TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+		      __entry->pe, __entry->addr, __entry->incr,
+		      __entry->flags, __entry->count)
+);
+
 DECLARE_EVENT_CLASS(radeon_fence_request,
 
 	    TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 3385836..a77cd27 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -59,6 +59,7 @@
 #define SI_MC_UCODE_SIZE             7769
 #define OLAND_MC_UCODE_SIZE          7863
 #define CIK_MC_UCODE_SIZE            7866
+#define HAWAII_MC_UCODE_SIZE         7933
 
 /* SDMA */
 #define CIK_SDMA_UCODE_SIZE          1050
@@ -143,4 +144,7 @@
 #define BONAIRE_SMC_UCODE_START      0x20000
 #define BONAIRE_SMC_UCODE_SIZE       0x1FDEC
 
+#define HAWAII_SMC_UCODE_START       0x20000
+#define HAWAII_SMC_UCODE_SIZE        0x1FDEC
+
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 308eff5..373d088 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -97,6 +97,7 @@
 	case CHIP_BONAIRE:
 	case CHIP_KABINI:
 	case CHIP_KAVERI:
+	case CHIP_HAWAII:
 		fw_name = FIRMWARE_BONAIRE;
 		break;
 
@@ -240,6 +241,8 @@
 		if (handle != 0 && rdev->uvd.filp[i] == filp) {
 			struct radeon_fence *fence;
 
+			radeon_uvd_note_usage(rdev);
+
 			r = radeon_uvd_get_destroy_msg(rdev,
 				R600_RING_TYPE_UVD_INDEX, handle, &fence);
 			if (r) {
@@ -620,7 +623,7 @@
 	if (r) 
 		goto err;
 
-	r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
 	if (r)
 		goto err;
 
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6acba80..76cc8d3 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -153,6 +153,70 @@
 	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
 }
 
+void avivo_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+		else
+			tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
+				AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
+				AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+		WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
 void rs600_pm_misc(struct radeon_device *rdev)
 {
 	int requested_index = rdev->pm.requested_power_state_index;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1447d79..1c56062 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -345,9 +345,11 @@
 		if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
 			rdev->pm.sideport_bandwidth.full)
 			max_bandwidth = rdev->pm.sideport_bandwidth;
-		read_delay_latency.full = dfixed_const(370 * 800 * 1000);
-		read_delay_latency.full = dfixed_div(read_delay_latency,
-			rdev->pm.igp_sideport_mclk);
+		read_delay_latency.full = dfixed_const(370 * 800);
+		a.full = dfixed_const(1000);
+		b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
+		read_delay_latency.full = dfixed_div(read_delay_latency, b);
+		read_delay_latency.full = dfixed_mul(read_delay_latency, a);
 	} else {
 		if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
 			rdev->pm.k8_bandwidth.full)
@@ -488,14 +490,10 @@
 		}
 		if (wm0->priority_mark.full > priority_mark02.full)
 			priority_mark02.full = wm0->priority_mark.full;
-		if (dfixed_trunc(priority_mark02) < 0)
-			priority_mark02.full = 0;
 		if (wm0->priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0->priority_mark_max.full;
 		if (wm1->priority_mark.full > priority_mark12.full)
 			priority_mark12.full = wm1->priority_mark.full;
-		if (dfixed_trunc(priority_mark12) < 0)
-			priority_mark12.full = 0;
 		if (wm1->priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1->priority_mark_max.full;
 		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -526,8 +524,6 @@
 		}
 		if (wm0->priority_mark.full > priority_mark02.full)
 			priority_mark02.full = wm0->priority_mark.full;
-		if (dfixed_trunc(priority_mark02) < 0)
-			priority_mark02.full = 0;
 		if (wm0->priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0->priority_mark_max.full;
 		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -555,8 +551,6 @@
 		}
 		if (wm1->priority_mark.full > priority_mark12.full)
 			priority_mark12.full = wm1->priority_mark.full;
-		if (dfixed_trunc(priority_mark12) < 0)
-			priority_mark12.full = 0;
 		if (wm1->priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1->priority_mark_max.full;
 		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 873eb4b..5d1c316 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1155,14 +1155,10 @@
 		}
 		if (wm0->priority_mark.full > priority_mark02.full)
 			priority_mark02.full = wm0->priority_mark.full;
-		if (dfixed_trunc(priority_mark02) < 0)
-			priority_mark02.full = 0;
 		if (wm0->priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0->priority_mark_max.full;
 		if (wm1->priority_mark.full > priority_mark12.full)
 			priority_mark12.full = wm1->priority_mark.full;
-		if (dfixed_trunc(priority_mark12) < 0)
-			priority_mark12.full = 0;
 		if (wm1->priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1->priority_mark_max.full;
 		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1193,8 +1189,6 @@
 		}
 		if (wm0->priority_mark.full > priority_mark02.full)
 			priority_mark02.full = wm0->priority_mark.full;
-		if (dfixed_trunc(priority_mark02) < 0)
-			priority_mark02.full = 0;
 		if (wm0->priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0->priority_mark_max.full;
 		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1222,8 +1216,6 @@
 		}
 		if (wm1->priority_mark.full > priority_mark12.full)
 			priority_mark12.full = wm1->priority_mark.full;
-		if (dfixed_trunc(priority_mark12) < 0)
-			priority_mark12.full = 0;
 		if (wm1->priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1->priority_mark_max.full;
 		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 5811d27..26633a0 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -407,9 +407,9 @@
 	WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
 }
 
-static u64 rv6xx_clocks_per_unit(u32 unit)
+static u32 rv6xx_clocks_per_unit(u32 unit)
 {
-	u64 tmp = 1 << (2 * unit);
+	u32 tmp = 1 << (2 * unit);
 
 	return tmp;
 }
@@ -417,7 +417,7 @@
 static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
 					u32 unscaled_count, u32 unit)
 {
-	u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit);
+	u32 count_per_unit = rv6xx_clocks_per_unit(unit);
 
 	return (unscaled_count + count_per_unit - 1) / count_per_unit;
 }
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d96f7cb..6a64cca 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -78,11 +78,6 @@
 extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
-extern void si_dma_vm_set_page(struct radeon_device *rdev,
-			       struct radeon_ib *ib,
-			       uint64_t pe,
-			       uint64_t addr, unsigned count,
-			       uint32_t incr, uint32_t flags);
 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
 					 bool enable);
 static void si_fini_pg(struct radeon_device *rdev);
@@ -4673,61 +4668,6 @@
 	       block, mc_id);
 }
 
-/**
- * si_vm_set_page - update the page tables using the CP
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using the CP (SI).
- */
-void si_vm_set_page(struct radeon_device *rdev,
-		    struct radeon_ib *ib,
-		    uint64_t pe,
-		    uint64_t addr, unsigned count,
-		    uint32_t incr, uint32_t flags)
-{
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-	uint64_t value;
-	unsigned ndw;
-
-	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
-		while (count) {
-			ndw = 2 + count * 2;
-			if (ndw > 0x3FFE)
-				ndw = 0x3FFE;
-
-			ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
-			ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
-					WRITE_DATA_DST_SEL(1));
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-			for (; ndw > 2; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
-				addr += incr;
-				value |= r600_flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		/* DMA */
-		si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
-	}
-}
-
 void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 {
 	struct radeon_ring *ring = &rdev->ring[ridx];
@@ -5372,52 +5312,53 @@
 	if (buffer == NULL)
 		return;
 
-	buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
-	buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
-	buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
-	buffer[count++] = 0x80000000;
-	buffer[count++] = 0x80000000;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
 
 	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
 		for (ext = sect->section; ext->extent != NULL; ++ext) {
 			if (sect->id == SECT_CONTEXT) {
-				buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
-				buffer[count++] = ext->reg_index - 0xa000;
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
 				for (i = 0; i < ext->reg_count; i++)
-					buffer[count++] = ext->extent[i];
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
 			} else {
 				return;
 			}
 		}
 	}
 
-	buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
-	buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (rdev->family) {
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
-		buffer[count++] = 0x2a00126a;
+		buffer[count++] = cpu_to_le32(0x2a00126a);
 		break;
 	case CHIP_VERDE:
-		buffer[count++] = 0x0000124a;
+		buffer[count++] = cpu_to_le32(0x0000124a);
 		break;
 	case CHIP_OLAND:
-		buffer[count++] = 0x00000082;
+		buffer[count++] = cpu_to_le32(0x00000082);
 		break;
 	case CHIP_HAINAN:
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	default:
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	}
 
-	buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
-	buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
 
-	buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
-	buffer[count++] = 0;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
 }
 
 static void si_init_pg(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 49909d2..8e8f461 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -24,6 +24,7 @@
 #include <drm/drmP.h>
 #include "radeon.h"
 #include "radeon_asic.h"
+#include "radeon_trace.h"
 #include "sid.h"
 
 u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -75,11 +76,12 @@
 			uint64_t addr, unsigned count,
 			uint32_t incr, uint32_t flags)
 {
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 	uint64_t value;
 	unsigned ndw;
 
-	if (flags & RADEON_VM_PAGE_SYSTEM) {
+	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+	if (flags & R600_PTE_SYSTEM) {
 		while (count) {
 			ndw = count * 2;
 			if (ndw > 0xFFFFE)
@@ -90,16 +92,10 @@
 			ib->ptr[ib->length_dw++] = pe;
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
 			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
+				value = radeon_vm_map_gart(rdev, addr);
+				value &= 0xFFFFFFFFFFFFF000ULL;
 				addr += incr;
-				value |= r600_flags;
+				value |= flags;
 				ib->ptr[ib->length_dw++] = value;
 				ib->ptr[ib->length_dw++] = upper_32_bits(value);
 			}
@@ -110,7 +106,7 @@
 			if (ndw > 0xFFFFE)
 				ndw = 0xFFFFE;
 
-			if (flags & RADEON_VM_PAGE_VALID)
+			if (flags & R600_PTE_VALID)
 				value = addr;
 			else
 				value = 0;
@@ -118,7 +114,7 @@
 			ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
 			ib->ptr[ib->length_dw++] = pe; /* dst addr */
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+			ib->ptr[ib->length_dw++] = flags; /* mask */
 			ib->ptr[ib->length_dw++] = 0;
 			ib->ptr[ib->length_dw++] = value; /* value */
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2332aa1..0b00c79 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3589,7 +3589,12 @@
 		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
 	}
 
-	si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
+	/* Setting this to false forces the performance state to low if the crtcs are disabled.
+	 * This can be a problem on PowerXpress systems or if you want to use the card
+	 * for offscreen rendering or compute if there are no crtcs enabled.  Set it to
+	 * true for now so that performance scales even if the displays are off.
+	 */
+	si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
 }
 
 static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -4553,7 +4558,7 @@
 		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
 
 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
-		table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+		table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
 
 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
 		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7e2e0ea..b322acc 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -478,7 +478,7 @@
 #define		STATE3_MASK				(0x1f << 15)
 #define		STATE3_SHIFT				15
 
-#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x2808
+#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x28e8
 #define		TRAIN_DONE_D0      			(1 << 30)
 #define		TRAIN_DONE_D1      			(1 << 31)
 
@@ -683,6 +683,51 @@
  * bit5 = 176.4 kHz
  * bit6 = 192 kHz
  */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC         0x37
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR             0x38
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0               0x3a
+#       define MANUFACTURER_ID(x)                        (((x) & 0xffff) << 0)
+#       define PRODUCT_ID(x)                             (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1               0x3b
+#       define SINK_DESCRIPTION_LEN(x)                   (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2               0x3c
+#       define PORT_ID0(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3               0x3d
+#       define PORT_ID1(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4               0x3e
+#       define DESCRIPTION0(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION1(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION2(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION3(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5               0x3f
+#       define DESCRIPTION4(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION5(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION6(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION7(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6               0x40
+#       define DESCRIPTION8(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION9(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION10(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION11(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7               0x41
+#       define DESCRIPTION12(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION13(x)                          (((x) & 0xff) << 8)
+#       define DESCRIPTION14(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION15(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8               0x42
+#       define DESCRIPTION16(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION17(x)                          (((x) & 0xff) << 8)
+
 #define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL          0x54
 #       define AUDIO_ENABLED                             (1 << 31)
 
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c590cd9..d8e835a 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_FB_HELPER
 	help
 	  Choose this option if you have an R-Car chipset.
 	  If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index ca498d1..2ee44ca 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,7 +1,9 @@
 config DRM_SHMOBILE
 	tristate "DRM Support for SH Mobile"
 	depends on DRM && (ARM || SUPERH)
+	select BACKLIGHT_CLASS_DEVICE
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
 	help
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 54bad98..562f9a4 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -40,7 +40,7 @@
 static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
 {
 	if (sdev->clock)
-		clk_enable(sdev->clock);
+		clk_prepare_enable(sdev->clock);
 #if 0
 	if (sdev->meram_dev && sdev->meram_dev->pdev)
 		pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
@@ -54,7 +54,7 @@
 		pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
 #endif
 	if (sdev->clock)
-		clk_disable(sdev->clock);
+		clk_disable_unprepare(sdev->clock);
 }
 
 /* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/drm/tegra/Kconfig
similarity index 86%
rename from drivers/gpu/host1x/drm/Kconfig
rename to drivers/gpu/drm/tegra/Kconfig
index 69853a4..8961ba6 100644
--- a/drivers/gpu/host1x/drm/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,7 +1,10 @@
 config DRM_TEGRA
 	bool "NVIDIA Tegra DRM"
+	depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
 	depends on DRM
+	select TEGRA_HOST1X
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
@@ -13,6 +16,11 @@
 
 if DRM_TEGRA
 
+config DRM_TEGRA_DEBUG
+	bool "NVIDIA Tegra DRM debug support"
+	help
+	  Say yes here to enable debugging support.
+
 config DRM_TEGRA_STAGING
 	bool "Enable HOST1X interface"
 	depends on STAGING
@@ -21,9 +29,4 @@
 
 	  If unsure, choose N.
 
-config DRM_TEGRA_DEBUG
-	bool "NVIDIA Tegra DRM debug support"
-	help
-	  Say yes here to enable debugging support.
-
 endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 0000000..edc76ab
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,15 @@
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := \
+	bus.o \
+	drm.o \
+	gem.o \
+	fb.o \
+	dc.o \
+	output.o \
+	rgb.o \
+	hdmi.o \
+	gr2d.o \
+	gr3d.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
new file mode 100644
index 0000000..565f8f7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static int drm_host1x_set_busid(struct drm_device *dev,
+				struct drm_master *master)
+{
+	const char *device = dev_name(dev->dev);
+	const char *driver = dev->driver->name;
+	const char *bus = dev->dev->bus->name;
+	int length;
+
+	master->unique_len = strlen(bus) + 1 + strlen(device);
+	master->unique_size = master->unique_len;
+
+	master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+	if (!master->unique)
+		return -ENOMEM;
+
+	snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
+
+	length = strlen(driver) + 1 + master->unique_len;
+
+	dev->devname = kmalloc(length + 1, GFP_KERNEL);
+	if (!dev->devname)
+		return -ENOMEM;
+
+	snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
+
+	return 0;
+}
+
+static struct drm_bus drm_host1x_bus = {
+	.bus_type = DRIVER_BUS_HOST1X,
+	.set_busid = drm_host1x_set_busid,
+};
+
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
+{
+	struct drm_device *drm;
+	int ret;
+
+	INIT_LIST_HEAD(&driver->device_list);
+	driver->bus = &drm_host1x_bus;
+
+	drm = drm_dev_alloc(driver, &device->dev);
+	if (!drm)
+		return -ENOMEM;
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto err_free;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+		 driver->major, driver->minor, driver->patchlevel,
+		 driver->date, drm->primary->index);
+
+	return 0;
+
+err_free:
+	drm_dev_free(drm);
+	return ret;
+}
+
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
+{
+	struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
+
+	drm_put_dev(tegra->drm);
+}
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/drm/tegra/dc.c
similarity index 92%
rename from drivers/gpu/host1x/drm/dc.c
rename to drivers/gpu/drm/tegra/dc.c
index b1a05ad..ae1cb31e 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,13 +8,9 @@
  */
 
 #include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
 #include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
 
-#include "host1x_client.h"
 #include "dc.h"
 #include "drm.h"
 #include "gem.h"
@@ -51,6 +47,8 @@
 	window.dst.h = crtc_h;
 	window.format = tegra_dc_format(fb->pixel_format);
 	window.bits_per_pixel = fb->bits_per_pixel;
+	window.bottom_up = tegra_fb_is_bottom_up(fb);
+	window.tiled = tegra_fb_is_tiled(fb);
 
 	for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
 		struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -97,8 +95,11 @@
 
 static void tegra_plane_destroy(struct drm_plane *plane)
 {
+	struct tegra_plane *p = to_tegra_plane(plane);
+
 	tegra_plane_disable(plane);
 	drm_plane_cleanup(plane);
+	kfree(p);
 }
 
 static const struct drm_plane_funcs tegra_plane_funcs = {
@@ -124,7 +125,7 @@
 	for (i = 0; i < 2; i++) {
 		struct tegra_plane *plane;
 
-		plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+		plane = kzalloc(sizeof(*plane), GFP_KERNEL);
 		if (!plane)
 			return -ENOMEM;
 
@@ -133,8 +134,10 @@
 		err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
 				     &tegra_plane_funcs, plane_formats,
 				     ARRAY_SIZE(plane_formats), false);
-		if (err < 0)
+		if (err < 0) {
+			kfree(plane);
 			return err;
+		}
 	}
 
 	return 0;
@@ -145,6 +148,7 @@
 {
 	unsigned int format = tegra_dc_format(fb->pixel_format);
 	struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+	unsigned int h_offset = 0, v_offset = 0;
 	unsigned long value;
 
 	tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -156,6 +160,32 @@
 	tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
 	tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
 
+	if (tegra_fb_is_tiled(fb)) {
+		value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+			DC_WIN_BUFFER_ADDR_MODE_TILE;
+	} else {
+		value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+			DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+	}
+
+	tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
+	/* make sure bottom-up buffers are properly displayed */
+	if (tegra_fb_is_bottom_up(fb)) {
+		value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+		value |= INVERT_V;
+		tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+		v_offset += fb->height - 1;
+	} else {
+		value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+		value &= ~INVERT_V;
+		tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+	}
+
+	tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+	tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
 	value = GENERAL_UPDATE | WIN_A_UPDATE;
 	tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
 
@@ -255,14 +285,26 @@
 	return 0;
 }
 
+static void drm_crtc_clear(struct drm_crtc *crtc)
+{
+	memset(crtc, 0, sizeof(*crtc));
+}
+
+static void tegra_dc_destroy(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	drm_crtc_clear(crtc);
+}
+
 static const struct drm_crtc_funcs tegra_crtc_funcs = {
 	.page_flip = tegra_dc_page_flip,
 	.set_config = drm_crtc_helper_set_config,
-	.destroy = drm_crtc_cleanup,
+	.destroy = tegra_dc_destroy,
 };
 
 static void tegra_crtc_disable(struct drm_crtc *crtc)
 {
+	struct tegra_dc *dc = to_tegra_dc(crtc);
 	struct drm_device *drm = crtc->dev;
 	struct drm_plane *plane;
 
@@ -277,6 +319,8 @@
 			}
 		}
 	}
+
+	drm_vblank_off(drm, dc->pipe);
 }
 
 static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -491,9 +535,22 @@
 		tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
 	}
 
+	if (window->bottom_up)
+		v_offset += window->src.h - 1;
+
 	tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
 	tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
 
+	if (window->tiled) {
+		value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+			DC_WIN_BUFFER_ADDR_MODE_TILE;
+	} else {
+		value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+			DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+	}
+
+	tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
 	value = WIN_ENABLE;
 
 	if (yuv) {
@@ -512,6 +569,9 @@
 		value |= COLOR_EXPAND;
 	}
 
+	if (window->bottom_up)
+		value |= INVERT_V;
+
 	tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
 
 	/*
@@ -1041,30 +1101,30 @@
 	return 0;
 }
 
-static int tegra_dc_drm_init(struct host1x_client *client,
-			     struct drm_device *drm)
+static int tegra_dc_init(struct host1x_client *client)
 {
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	int err;
 
-	dc->pipe = drm->mode_config.num_crtc;
+	dc->pipe = tegra->drm->mode_config.num_crtc;
 
-	drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+	drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
 	drm_mode_crtc_set_gamma_size(&dc->base, 256);
 	drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
 
-	err = tegra_dc_rgb_init(drm, dc);
+	err = tegra_dc_rgb_init(tegra->drm, dc);
 	if (err < 0 && err != -ENODEV) {
 		dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
 		return err;
 	}
 
-	err = tegra_dc_add_planes(drm, dc);
+	err = tegra_dc_add_planes(tegra->drm, dc);
 	if (err < 0)
 		return err;
 
 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
-		err = tegra_dc_debugfs_init(dc, drm->primary);
+		err = tegra_dc_debugfs_init(dc, tegra->drm->primary);
 		if (err < 0)
 			dev_err(dc->dev, "debugfs setup failed: %d\n", err);
 	}
@@ -1080,7 +1140,7 @@
 	return 0;
 }
 
-static int tegra_dc_drm_exit(struct host1x_client *client)
+static int tegra_dc_exit(struct host1x_client *client)
 {
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	int err;
@@ -1103,13 +1163,12 @@
 }
 
 static const struct host1x_client_ops dc_client_ops = {
-	.drm_init = tegra_dc_drm_init,
-	.drm_exit = tegra_dc_drm_exit,
+	.init = tegra_dc_init,
+	.exit = tegra_dc_exit,
 };
 
 static int tegra_dc_probe(struct platform_device *pdev)
 {
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
 	struct resource *regs;
 	struct tegra_dc *dc;
 	int err;
@@ -1153,7 +1212,7 @@
 		return err;
 	}
 
-	err = host1x_register_client(host1x, &dc->client);
+	err = host1x_client_register(&dc->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
@@ -1167,17 +1226,22 @@
 
 static int tegra_dc_remove(struct platform_device *pdev)
 {
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
 	struct tegra_dc *dc = platform_get_drvdata(pdev);
 	int err;
 
-	err = host1x_unregister_client(host1x, &dc->client);
+	err = host1x_client_unregister(&dc->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
 			err);
 		return err;
 	}
 
+	err = tegra_dc_rgb_remove(dc);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err);
+		return err;
+	}
+
 	clk_disable_unprepare(dc->clk);
 
 	return 0;
diff --git a/drivers/gpu/host1x/drm/dc.h b/drivers/gpu/drm/tegra/dc.h
similarity index 97%
rename from drivers/gpu/host1x/drm/dc.h
rename to drivers/gpu/drm/tegra/dc.h
index 79eaec9..91bbda2 100644
--- a/drivers/gpu/host1x/drm/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -302,6 +302,7 @@
 #define DC_WIN_CSC_KVB				0x618
 
 #define DC_WIN_WIN_OPTIONS			0x700
+#define INVERT_V     (1 <<  2)
 #define COLOR_EXPAND (1 <<  6)
 #define CSC_ENABLE   (1 << 18)
 #define WIN_ENABLE   (1 << 30)
@@ -365,6 +366,10 @@
 #define DC_WIN_BUF_STRIDE			0x70b
 #define DC_WIN_UV_BUF_STRIDE			0x70c
 #define DC_WIN_BUFFER_ADDR_MODE			0x70d
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR		(0 <<  0)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE		(1 <<  0)
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV	(0 << 16)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV		(1 << 16)
 #define DC_WIN_DV_CONTROL			0x70e
 
 #define DC_WIN_BLEND_NOKEY			0x70f
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 0000000..28e1781
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/host1x.h>
+
+#include "drm.h"
+#include "gem.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct tegra_drm_file {
+	struct list_head contexts;
+};
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+	struct host1x_device *device = to_host1x_device(drm->dev);
+	struct tegra_drm *tegra;
+	int err;
+
+	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	dev_set_drvdata(drm->dev, tegra);
+	mutex_init(&tegra->clients_lock);
+	INIT_LIST_HEAD(&tegra->clients);
+	drm->dev_private = tegra;
+	tegra->drm = drm;
+
+	drm_mode_config_init(drm);
+
+	err = host1x_device_init(device);
+	if (err < 0)
+		return err;
+
+	/*
+	 * We don't use the drm_irq_install() helpers provided by the DRM
+	 * core, so we need to set this manually in order to allow the
+	 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+	 */
+	drm->irq_enabled = true;
+
+	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	if (err < 0)
+		return err;
+
+	err = tegra_drm_fb_init(drm);
+	if (err < 0)
+		return err;
+
+	drm_kms_helper_poll_init(drm);
+
+	return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+	struct host1x_device *device = to_host1x_device(drm->dev);
+	int err;
+
+	drm_kms_helper_poll_fini(drm);
+	tegra_drm_fb_exit(drm);
+	drm_vblank_cleanup(drm);
+	drm_mode_config_cleanup(drm);
+
+	err = host1x_device_exit(device);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+	struct tegra_drm_file *fpriv;
+
+	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+	if (!fpriv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&fpriv->contexts);
+	filp->driver_priv = fpriv;
+
+	return 0;
+}
+
+static void tegra_drm_context_free(struct tegra_drm_context *context)
+{
+	context->client->ops->close_channel(context);
+	kfree(context);
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+	struct tegra_drm *tegra = drm->dev_private;
+
+	tegra_fbdev_restore_mode(tegra->fbdev);
+}
+
+static struct host1x_bo *
+host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
+{
+	struct drm_gem_object *gem;
+	struct tegra_bo *bo;
+
+	gem = drm_gem_object_lookup(drm, file, handle);
+	if (!gem)
+		return NULL;
+
+	mutex_lock(&drm->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&drm->struct_mutex);
+
+	bo = to_tegra_bo(gem);
+	return &bo->base;
+}
+
+int tegra_drm_submit(struct tegra_drm_context *context,
+		     struct drm_tegra_submit *args, struct drm_device *drm,
+		     struct drm_file *file)
+{
+	unsigned int num_cmdbufs = args->num_cmdbufs;
+	unsigned int num_relocs = args->num_relocs;
+	unsigned int num_waitchks = args->num_waitchks;
+	struct drm_tegra_cmdbuf __user *cmdbufs =
+		(void * __user)(uintptr_t)args->cmdbufs;
+	struct drm_tegra_reloc __user *relocs =
+		(void * __user)(uintptr_t)args->relocs;
+	struct drm_tegra_waitchk __user *waitchks =
+		(void * __user)(uintptr_t)args->waitchks;
+	struct drm_tegra_syncpt syncpt;
+	struct host1x_job *job;
+	int err;
+
+	/* We don't yet support other than one syncpt_incr struct per submit */
+	if (args->num_syncpts != 1)
+		return -EINVAL;
+
+	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+			       args->num_relocs, args->num_waitchks);
+	if (!job)
+		return -ENOMEM;
+
+	job->num_relocs = args->num_relocs;
+	job->num_waitchk = args->num_waitchks;
+	job->client = (u32)args->context;
+	job->class = context->client->base.class;
+	job->serialize = true;
+
+	while (num_cmdbufs) {
+		struct drm_tegra_cmdbuf cmdbuf;
+		struct host1x_bo *bo;
+
+		err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
+		if (err)
+			goto fail;
+
+		bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+		if (!bo) {
+			err = -ENOENT;
+			goto fail;
+		}
+
+		host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+		num_cmdbufs--;
+		cmdbufs++;
+	}
+
+	err = copy_from_user(job->relocarray, relocs,
+			     sizeof(*relocs) * num_relocs);
+	if (err)
+		goto fail;
+
+	while (num_relocs--) {
+		struct host1x_reloc *reloc = &job->relocarray[num_relocs];
+		struct host1x_bo *cmdbuf, *target;
+
+		cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
+		target = host1x_bo_lookup(drm, file, (u32)reloc->target);
+
+		reloc->cmdbuf = cmdbuf;
+		reloc->target = target;
+
+		if (!reloc->target || !reloc->cmdbuf) {
+			err = -ENOENT;
+			goto fail;
+		}
+	}
+
+	err = copy_from_user(job->waitchk, waitchks,
+			     sizeof(*waitchks) * num_waitchks);
+	if (err)
+		goto fail;
+
+	err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
+			     sizeof(syncpt));
+	if (err)
+		goto fail;
+
+	job->is_addr_reg = context->client->ops->is_addr_reg;
+	job->syncpt_incrs = syncpt.incrs;
+	job->syncpt_id = syncpt.id;
+	job->timeout = 10000;
+
+	if (args->timeout && args->timeout < 10000)
+		job->timeout = args->timeout;
+
+	err = host1x_job_pin(job, context->client->base.dev);
+	if (err)
+		goto fail;
+
+	err = host1x_job_submit(job);
+	if (err)
+		goto fail_submit;
+
+	args->fence = job->syncpt_end;
+
+	host1x_job_put(job);
+	return 0;
+
+fail_submit:
+	host1x_job_unpin(job);
+fail:
+	host1x_job_put(job);
+	return err;
+}
+
+
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
+{
+	return (struct tegra_drm_context *)(uintptr_t)context;
+}
+
+static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
+					struct tegra_drm_context *context)
+{
+	struct tegra_drm_context *ctx;
+
+	list_for_each_entry(ctx, &file->contexts, list)
+		if (ctx == context)
+			return true;
+
+	return false;
+}
+
+static int tegra_gem_create(struct drm_device *drm, void *data,
+			    struct drm_file *file)
+{
+	struct drm_tegra_gem_create *args = data;
+	struct tegra_bo *bo;
+
+	bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
+					 &args->handle);
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
+
+	return 0;
+}
+
+static int tegra_gem_mmap(struct drm_device *drm, void *data,
+			  struct drm_file *file)
+{
+	struct drm_tegra_gem_mmap *args = data;
+	struct drm_gem_object *gem;
+	struct tegra_bo *bo;
+
+	gem = drm_gem_object_lookup(drm, file, args->handle);
+	if (!gem)
+		return -EINVAL;
+
+	bo = to_tegra_bo(gem);
+
+	args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
+
+	drm_gem_object_unreference(gem);
+
+	return 0;
+}
+
+static int tegra_syncpt_read(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct host1x *host = dev_get_drvdata(drm->dev->parent);
+	struct drm_tegra_syncpt_read *args = data;
+	struct host1x_syncpt *sp;
+
+	sp = host1x_syncpt_get(host, args->id);
+	if (!sp)
+		return -EINVAL;
+
+	args->value = host1x_syncpt_read_min(sp);
+	return 0;
+}
+
+static int tegra_syncpt_incr(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+	struct drm_tegra_syncpt_incr *args = data;
+	struct host1x_syncpt *sp;
+
+	sp = host1x_syncpt_get(host1x, args->id);
+	if (!sp)
+		return -EINVAL;
+
+	return host1x_syncpt_incr(sp);
+}
+
+static int tegra_syncpt_wait(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+	struct drm_tegra_syncpt_wait *args = data;
+	struct host1x_syncpt *sp;
+
+	sp = host1x_syncpt_get(host1x, args->id);
+	if (!sp)
+		return -EINVAL;
+
+	return host1x_syncpt_wait(sp, args->thresh, args->timeout,
+				  &args->value);
+}
+
+static int tegra_open_channel(struct drm_device *drm, void *data,
+			      struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct tegra_drm *tegra = drm->dev_private;
+	struct drm_tegra_open_channel *args = data;
+	struct tegra_drm_context *context;
+	struct tegra_drm_client *client;
+	int err = -ENODEV;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return -ENOMEM;
+
+	list_for_each_entry(client, &tegra->clients, list)
+		if (client->base.class == args->client) {
+			err = client->ops->open_channel(client, context);
+			if (err)
+				break;
+
+			list_add(&context->list, &fpriv->contexts);
+			args->context = (uintptr_t)context;
+			context->client = client;
+			return 0;
+		}
+
+	kfree(context);
+	return err;
+}
+
+static int tegra_close_channel(struct drm_device *drm, void *data,
+			       struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct drm_tegra_close_channel *args = data;
+	struct tegra_drm_context *context;
+
+	context = tegra_drm_get_context(args->context);
+
+	if (!tegra_drm_file_owns_context(fpriv, context))
+		return -EINVAL;
+
+	list_del(&context->list);
+	tegra_drm_context_free(context);
+
+	return 0;
+}
+
+static int tegra_get_syncpt(struct drm_device *drm, void *data,
+			    struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct drm_tegra_get_syncpt *args = data;
+	struct tegra_drm_context *context;
+	struct host1x_syncpt *syncpt;
+
+	context = tegra_drm_get_context(args->context);
+
+	if (!tegra_drm_file_owns_context(fpriv, context))
+		return -ENODEV;
+
+	if (args->index >= context->client->base.num_syncpts)
+		return -EINVAL;
+
+	syncpt = context->client->base.syncpts[args->index];
+	args->id = host1x_syncpt_id(syncpt);
+
+	return 0;
+}
+
+static int tegra_submit(struct drm_device *drm, void *data,
+			struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct drm_tegra_submit *args = data;
+	struct tegra_drm_context *context;
+
+	context = tegra_drm_get_context(args->context);
+
+	if (!tegra_drm_file_owns_context(fpriv, context))
+		return -ENODEV;
+
+	return context->client->ops->submit(context, args, drm, file);
+}
+
+static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
+				 struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct drm_tegra_get_syncpt_base *args = data;
+	struct tegra_drm_context *context;
+	struct host1x_syncpt_base *base;
+	struct host1x_syncpt *syncpt;
+
+	context = tegra_drm_get_context(args->context);
+
+	if (!tegra_drm_file_owns_context(fpriv, context))
+		return -ENODEV;
+
+	if (args->syncpt >= context->client->base.num_syncpts)
+		return -EINVAL;
+
+	syncpt = context->client->base.syncpts[args->syncpt];
+
+	base = host1x_syncpt_get_base(syncpt);
+	if (!base)
+		return -ENXIO;
+
+	args->id = host1x_syncpt_base_id(base);
+
+	return 0;
+}
+#endif
+
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
+#ifdef CONFIG_DRM_TEGRA_STAGING
+	DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
+#endif
+};
+
+static const struct file_operations tegra_drm_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = tegra_drm_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+		struct tegra_dc *dc = to_tegra_dc(crtc);
+
+		if (dc->pipe == pipe)
+			return crtc;
+	}
+
+	return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	/* TODO: implement real hardware counter using syncpoints */
+	return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+
+	if (!crtc)
+		return -ENODEV;
+
+	tegra_dc_enable_vblank(dc);
+
+	return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+
+	if (crtc)
+		tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct tegra_drm_context *context, *tmp;
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+		tegra_dc_cancel_page_flip(crtc, file);
+
+	list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
+		tegra_drm_context_free(context);
+
+	kfree(fpriv);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)s->private;
+	struct drm_device *drm = node->minor->dev;
+	struct drm_framebuffer *fb;
+
+	mutex_lock(&drm->mode_config.fb_lock);
+
+	list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+			   fb->base.id, fb->width, fb->height, fb->depth,
+			   fb->bits_per_pixel,
+			   atomic_read(&fb->refcount.refcount));
+	}
+
+	mutex_unlock(&drm->mode_config.fb_lock);
+
+	return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+	{ "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+	return drm_debugfs_create_files(tegra_debugfs_list,
+					ARRAY_SIZE(tegra_debugfs_list),
+					minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(tegra_debugfs_list,
+				 ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
+struct drm_driver tegra_drm_driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM,
+	.load = tegra_drm_load,
+	.unload = tegra_drm_unload,
+	.open = tegra_drm_open,
+	.preclose = tegra_drm_preclose,
+	.lastclose = tegra_drm_lastclose,
+
+	.get_vblank_counter = tegra_drm_get_vblank_counter,
+	.enable_vblank = tegra_drm_enable_vblank,
+	.disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = tegra_debugfs_init,
+	.debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
+	.gem_free_object = tegra_bo_free_object,
+	.gem_vm_ops = &tegra_bo_vm_ops,
+	.dumb_create = tegra_bo_dumb_create,
+	.dumb_map_offset = tegra_bo_dumb_map_offset,
+	.dumb_destroy = drm_gem_dumb_destroy,
+
+	.ioctls = tegra_drm_ioctls,
+	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+	.fops = &tegra_drm_fops,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int tegra_drm_register_client(struct tegra_drm *tegra,
+			      struct tegra_drm_client *client)
+{
+	mutex_lock(&tegra->clients_lock);
+	list_add_tail(&client->list, &tegra->clients);
+	mutex_unlock(&tegra->clients_lock);
+
+	return 0;
+}
+
+int tegra_drm_unregister_client(struct tegra_drm *tegra,
+				struct tegra_drm_client *client)
+{
+	mutex_lock(&tegra->clients_lock);
+	list_del_init(&client->list);
+	mutex_unlock(&tegra->clients_lock);
+
+	return 0;
+}
+
+static int host1x_drm_probe(struct host1x_device *device)
+{
+	return drm_host1x_init(&tegra_drm_driver, device);
+}
+
+static int host1x_drm_remove(struct host1x_device *device)
+{
+	drm_host1x_exit(&tegra_drm_driver, device);
+
+	return 0;
+}
+
+static const struct of_device_id host1x_drm_subdevs[] = {
+	{ .compatible = "nvidia,tegra20-dc", },
+	{ .compatible = "nvidia,tegra20-hdmi", },
+	{ .compatible = "nvidia,tegra20-gr2d", },
+	{ .compatible = "nvidia,tegra20-gr3d", },
+	{ .compatible = "nvidia,tegra30-dc", },
+	{ .compatible = "nvidia,tegra30-hdmi", },
+	{ .compatible = "nvidia,tegra30-gr2d", },
+	{ .compatible = "nvidia,tegra30-gr3d", },
+	{ .compatible = "nvidia,tegra114-hdmi", },
+	{ .compatible = "nvidia,tegra114-gr3d", },
+	{ /* sentinel */ }
+};
+
+static struct host1x_driver host1x_drm_driver = {
+	.name = "drm",
+	.probe = host1x_drm_probe,
+	.remove = host1x_drm_remove,
+	.subdevs = host1x_drm_subdevs,
+};
+
+static int __init host1x_drm_init(void)
+{
+	int err;
+
+	err = host1x_driver_register(&host1x_drm_driver);
+	if (err < 0)
+		return err;
+
+	err = platform_driver_register(&tegra_dc_driver);
+	if (err < 0)
+		goto unregister_host1x;
+
+	err = platform_driver_register(&tegra_hdmi_driver);
+	if (err < 0)
+		goto unregister_dc;
+
+	err = platform_driver_register(&tegra_gr2d_driver);
+	if (err < 0)
+		goto unregister_hdmi;
+
+	err = platform_driver_register(&tegra_gr3d_driver);
+	if (err < 0)
+		goto unregister_gr2d;
+
+	return 0;
+
+unregister_gr2d:
+	platform_driver_unregister(&tegra_gr2d_driver);
+unregister_hdmi:
+	platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dc:
+	platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+	host1x_driver_unregister(&host1x_drm_driver);
+	return err;
+}
+module_init(host1x_drm_init);
+
+static void __exit host1x_drm_exit(void)
+{
+	platform_driver_unregister(&tegra_gr3d_driver);
+	platform_driver_unregister(&tegra_gr2d_driver);
+	platform_driver_unregister(&tegra_hdmi_driver);
+	platform_driver_unregister(&tegra_dc_driver);
+	host1x_driver_unregister(&host1x_drm_driver);
+}
+module_exit(host1x_drm_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/host1x/drm/drm.h b/drivers/gpu/drm/tegra/drm.h
similarity index 72%
rename from drivers/gpu/host1x/drm/drm.h
rename to drivers/gpu/drm/tegra/drm.h
index 02ce020..fdfe259 100644
--- a/drivers/gpu/host1x/drm/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -10,14 +10,14 @@
 #ifndef HOST1X_DRM_H
 #define HOST1X_DRM_H 1
 
+#include <uapi/drm/tegra_drm.h>
+#include <linux/host1x.h>
+
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fixed.h>
-#include <uapi/drm/tegra_drm.h>
-
-#include "host1x.h"
 
 struct tegra_fb {
 	struct drm_framebuffer base;
@@ -30,17 +30,8 @@
 	struct tegra_fb *fb;
 };
 
-struct host1x_drm {
+struct tegra_drm {
 	struct drm_device *drm;
-	struct device *dev;
-	void __iomem *regs;
-	struct clk *clk;
-	int syncpt;
-	int irq;
-
-	struct mutex drm_clients_lock;
-	struct list_head drm_clients;
-	struct list_head drm_active;
 
 	struct mutex clients_lock;
 	struct list_head clients;
@@ -48,66 +39,60 @@
 	struct tegra_fbdev *fbdev;
 };
 
-struct host1x_client;
+struct tegra_drm_client;
 
-struct host1x_drm_context {
-	struct host1x_client *client;
+struct tegra_drm_context {
+	struct tegra_drm_client *client;
 	struct host1x_channel *channel;
 	struct list_head list;
 };
 
-struct host1x_client_ops {
-	int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
-	int (*drm_exit)(struct host1x_client *client);
-	int (*open_channel)(struct host1x_client *client,
-			    struct host1x_drm_context *context);
-	void (*close_channel)(struct host1x_drm_context *context);
-	int (*submit)(struct host1x_drm_context *context,
+struct tegra_drm_client_ops {
+	int (*open_channel)(struct tegra_drm_client *client,
+			    struct tegra_drm_context *context);
+	void (*close_channel)(struct tegra_drm_context *context);
+	int (*is_addr_reg)(struct device *dev, u32 class, u32 offset);
+	int (*submit)(struct tegra_drm_context *context,
 		      struct drm_tegra_submit *args, struct drm_device *drm,
 		      struct drm_file *file);
 };
 
-struct host1x_drm_file {
-	struct list_head contexts;
-};
+int tegra_drm_submit(struct tegra_drm_context *context,
+		     struct drm_tegra_submit *args, struct drm_device *drm,
+		     struct drm_file *file);
 
-struct host1x_client {
-	struct host1x_drm *host1x;
-	struct device *dev;
-
-	const struct host1x_client_ops *ops;
-
-	enum host1x_class class;
-	struct host1x_channel *channel;
-
-	struct host1x_syncpt **syncpts;
-	unsigned int num_syncpts;
-
+struct tegra_drm_client {
+	struct host1x_client base;
 	struct list_head list;
+
+	const struct tegra_drm_client_ops *ops;
 };
 
-extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
-extern int host1x_drm_exit(struct host1x_drm *host1x);
+static inline struct tegra_drm_client *
+host1x_to_drm_client(struct host1x_client *client)
+{
+	return container_of(client, struct tegra_drm_client, base);
+}
 
-extern int host1x_register_client(struct host1x_drm *host1x,
-				  struct host1x_client *client);
-extern int host1x_unregister_client(struct host1x_drm *host1x,
-				    struct host1x_client *client);
+extern int tegra_drm_register_client(struct tegra_drm *tegra,
+				     struct tegra_drm_client *client);
+extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
+				       struct tegra_drm_client *client);
+
+extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
+extern int tegra_drm_exit(struct tegra_drm *tegra);
 
 struct tegra_output;
 
 struct tegra_dc {
 	struct host1x_client client;
-	spinlock_t lock;
-
-	struct host1x_drm *host1x;
 	struct device *dev;
+	spinlock_t lock;
 
 	struct drm_crtc base;
 	int pipe;
 
 	struct clk *clk;
-
 	void __iomem *regs;
 	int irq;
 
@@ -123,7 +108,8 @@
 	struct drm_pending_vblank_event *event;
 };
 
-static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+static inline struct tegra_dc *
+host1x_client_to_dc(struct host1x_client *client)
 {
 	return container_of(client, struct tegra_dc, client);
 }
@@ -162,6 +148,8 @@
 	unsigned int format;
 	unsigned int stride[2];
 	unsigned long base[3];
+	bool bottom_up;
+	bool tiled;
 };
 
 /* from dc.c */
@@ -249,23 +237,34 @@
 	return output ? -ENOSYS : -EINVAL;
 }
 
+/* from bus.c */
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
+
 /* from rgb.c */
 extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_remove(struct tegra_dc *dc);
 extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
 extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
 
 /* from output.c */
-extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_probe(struct tegra_output *output);
+extern int tegra_output_remove(struct tegra_output *output);
 extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
 extern int tegra_output_exit(struct tegra_output *output);
 
 /* from fb.c */
 struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
 				    unsigned int index);
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
 extern int tegra_drm_fb_init(struct drm_device *drm);
 extern void tegra_drm_fb_exit(struct drm_device *drm);
 extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
 
-extern struct drm_driver tegra_drm_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_gr2d_driver;
+extern struct platform_driver tegra_gr3d_driver;
 
 #endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/drm/tegra/fb.c
similarity index 92%
rename from drivers/gpu/host1x/drm/fb.c
rename to drivers/gpu/drm/tegra/fb.c
index 979a3e3..490f771 100644
--- a/drivers/gpu/host1x/drm/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,8 +10,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
-
 #include "drm.h"
 #include "gem.h"
 
@@ -36,6 +34,26 @@
 	return fb->planes[index];
 }
 
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
+{
+	struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+	if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
+		return true;
+
+	return false;
+}
+
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
+{
+	struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+	if (fb->planes[0]->flags & TEGRA_BO_TILED)
+		return true;
+
+	return false;
+}
+
 static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct tegra_fb *fb = to_tegra_fb(framebuffer);
@@ -190,7 +208,7 @@
 
 	size = cmd.pitches[0] * cmd.height;
 
-	bo = tegra_bo_create(drm, size);
+	bo = tegra_bo_create(drm, size, 0);
 	if (IS_ERR(bo))
 		return PTR_ERR(bo);
 
@@ -323,10 +341,10 @@
 
 static void tegra_fb_output_poll_changed(struct drm_device *drm)
 {
-	struct host1x_drm *host1x = drm->dev_private;
+	struct tegra_drm *tegra = drm->dev_private;
 
-	if (host1x->fbdev)
-		drm_fb_helper_hotplug_event(&host1x->fbdev->base);
+	if (tegra->fbdev)
+		drm_fb_helper_hotplug_event(&tegra->fbdev->base);
 }
 
 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
@@ -336,7 +354,7 @@
 
 int tegra_drm_fb_init(struct drm_device *drm)
 {
-	struct host1x_drm *host1x = drm->dev_private;
+	struct tegra_drm *tegra = drm->dev_private;
 	struct tegra_fbdev *fbdev;
 
 	drm->mode_config.min_width = 0;
@@ -352,16 +370,16 @@
 	if (IS_ERR(fbdev))
 		return PTR_ERR(fbdev);
 
-	host1x->fbdev = fbdev;
+	tegra->fbdev = fbdev;
 
 	return 0;
 }
 
 void tegra_drm_fb_exit(struct drm_device *drm)
 {
-	struct host1x_drm *host1x = drm->dev_private;
+	struct tegra_drm *tegra = drm->dev_private;
 
-	tegra_fbdev_free(host1x->fbdev);
+	tegra_fbdev_free(tegra->fbdev);
 }
 
 void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/drm/tegra/gem.c
similarity index 86%
rename from drivers/gpu/host1x/drm/gem.c
rename to drivers/gpu/drm/tegra/gem.c
index 59623de..28a9cbc 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,25 +18,18 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/export.h>
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
+#include <drm/tegra_drm.h>
 
 #include "gem.h"
 
-static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
+static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
 {
 	return container_of(bo, struct tegra_bo, base);
 }
 
 static void tegra_bo_put(struct host1x_bo *bo)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 	struct drm_device *drm = obj->gem.dev;
 
 	mutex_lock(&drm->struct_mutex);
@@ -46,7 +39,7 @@
 
 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 	return obj->paddr;
 }
@@ -57,7 +50,7 @@
 
 static void *tegra_bo_mmap(struct host1x_bo *bo)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 	return obj->vaddr;
 }
@@ -68,7 +61,7 @@
 
 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 	return obj->vaddr + page * PAGE_SIZE;
 }
@@ -80,7 +73,7 @@
 
 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 	struct drm_device *drm = obj->gem.dev;
 
 	mutex_lock(&drm->struct_mutex);
@@ -106,7 +99,8 @@
 	dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
 }
 
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+				 unsigned long flags)
 {
 	struct tegra_bo *bo;
 	int err;
@@ -135,6 +129,12 @@
 	if (err)
 		goto err_mmap;
 
+	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
+		bo->flags |= TEGRA_BO_TILED;
+
+	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
+		bo->flags |= TEGRA_BO_BOTTOM_UP;
+
 	return bo;
 
 err_mmap:
@@ -149,14 +149,15 @@
 }
 
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
-					    struct drm_device *drm,
-					    unsigned int size,
-					    unsigned int *handle)
+					     struct drm_device *drm,
+					     unsigned int size,
+					     unsigned long flags,
+					     unsigned int *handle)
 {
 	struct tegra_bo *bo;
 	int ret;
 
-	bo = tegra_bo_create(drm, size);
+	bo = tegra_bo_create(drm, size, flags);
 	if (IS_ERR(bo))
 		return bo;
 
@@ -178,7 +179,6 @@
 	struct tegra_bo *bo = to_tegra_bo(gem);
 
 	drm_gem_free_mmap_offset(gem);
-
 	drm_gem_object_release(gem);
 	tegra_bo_destroy(gem->dev, bo);
 
@@ -197,8 +197,8 @@
 	if (args->size < args->pitch * args->height)
 		args->size = args->pitch * args->height;
 
-	bo = tegra_bo_create_with_handle(file, drm, args->size,
-					    &args->handle);
+	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
+					 &args->handle);
 	if (IS_ERR(bo))
 		return PTR_ERR(bo);
 
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/drm/tegra/gem.h
similarity index 84%
rename from drivers/gpu/host1x/drm/gem.h
rename to drivers/gpu/drm/tegra/gem.h
index 492533a..7674000 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -19,14 +19,18 @@
 #ifndef __HOST1X_GEM_H
 #define __HOST1X_GEM_H
 
+#include <linux/host1x.h>
+
 #include <drm/drm.h>
 #include <drm/drmP.h>
 
-#include "host1x_bo.h"
+#define TEGRA_BO_TILED     (1 << 0)
+#define TEGRA_BO_BOTTOM_UP (1 << 1)
 
 struct tegra_bo {
 	struct drm_gem_object gem;
 	struct host1x_bo base;
+	unsigned long flags;
 	dma_addr_t paddr;
 	void *vaddr;
 };
@@ -38,11 +42,13 @@
 
 extern const struct host1x_bo_ops tegra_bo_ops;
 
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+				 unsigned long flags);
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
-					    struct drm_device *drm,
-					    unsigned int size,
-					    unsigned int *handle);
+					     struct drm_device *drm,
+					     unsigned int size,
+					     unsigned long flags,
+					     unsigned int *handle);
 void tegra_bo_free_object(struct drm_gem_object *gem);
 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
 			 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
new file mode 100644
index 0000000..7ec4259
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr2d.h"
+
+struct gr2d {
+	struct tegra_drm_client client;
+	struct host1x_channel *channel;
+	struct clk *clk;
+
+	DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
+};
+
+static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
+{
+	return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_init(struct host1x_client *client)
+{
+	struct tegra_drm_client *drm = host1x_to_drm_client(client);
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+	struct gr2d *gr2d = to_gr2d(drm);
+
+	gr2d->channel = host1x_channel_request(client->dev);
+	if (!gr2d->channel)
+		return -ENOMEM;
+
+	client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+	if (!client->syncpts[0]) {
+		host1x_channel_free(gr2d->channel);
+		return -ENOMEM;
+	}
+
+	return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr2d_exit(struct host1x_client *client)
+{
+	struct tegra_drm_client *drm = host1x_to_drm_client(client);
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	struct gr2d *gr2d = to_gr2d(drm);
+	int err;
+
+	err = tegra_drm_unregister_client(tegra, drm);
+	if (err < 0)
+		return err;
+
+	host1x_syncpt_free(client->syncpts[0]);
+	host1x_channel_free(gr2d->channel);
+
+	return 0;
+}
+
+static const struct host1x_client_ops gr2d_client_ops = {
+	.init = gr2d_init,
+	.exit = gr2d_exit,
+};
+
+static int gr2d_open_channel(struct tegra_drm_client *client,
+			     struct tegra_drm_context *context)
+{
+	struct gr2d *gr2d = to_gr2d(client);
+
+	context->channel = host1x_channel_get(gr2d->channel);
+	if (!context->channel)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void gr2d_close_channel(struct tegra_drm_context *context)
+{
+	host1x_channel_put(context->channel);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+	struct gr2d *gr2d = dev_get_drvdata(dev);
+
+	switch (class) {
+	case HOST1X_CLASS_HOST1X:
+		if (offset == 0x2b)
+			return 1;
+
+		break;
+
+	case HOST1X_CLASS_GR2D:
+	case HOST1X_CLASS_GR2D_SB:
+		if (offset >= GR2D_NUM_REGS)
+			break;
+
+		if (test_bit(offset, gr2d->addr_regs))
+			return 1;
+
+		break;
+	}
+
+	return 0;
+}
+
+static const struct tegra_drm_client_ops gr2d_ops = {
+	.open_channel = gr2d_open_channel,
+	.close_channel = gr2d_close_channel,
+	.is_addr_reg = gr2d_is_addr_reg,
+	.submit = tegra_drm_submit,
+};
+
+static const struct of_device_id gr2d_match[] = {
+	{ .compatible = "nvidia,tegra30-gr2d" },
+	{ .compatible = "nvidia,tegra20-gr2d" },
+	{ },
+};
+
+static const u32 gr2d_addr_regs[] = {
+	GR2D_UA_BASE_ADDR,
+	GR2D_VA_BASE_ADDR,
+	GR2D_PAT_BASE_ADDR,
+	GR2D_DSTA_BASE_ADDR,
+	GR2D_DSTB_BASE_ADDR,
+	GR2D_DSTC_BASE_ADDR,
+	GR2D_SRCA_BASE_ADDR,
+	GR2D_SRCB_BASE_ADDR,
+	GR2D_SRC_BASE_ADDR_SB,
+	GR2D_DSTA_BASE_ADDR_SB,
+	GR2D_DSTB_BASE_ADDR_SB,
+	GR2D_UA_BASE_ADDR_SB,
+	GR2D_VA_BASE_ADDR_SB,
+};
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct host1x_syncpt **syncpts;
+	struct gr2d *gr2d;
+	unsigned int i;
+	int err;
+
+	gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+	if (!gr2d)
+		return -ENOMEM;
+
+	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+	if (!syncpts)
+		return -ENOMEM;
+
+	gr2d->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(gr2d->clk)) {
+		dev_err(dev, "cannot get clock\n");
+		return PTR_ERR(gr2d->clk);
+	}
+
+	err = clk_prepare_enable(gr2d->clk);
+	if (err) {
+		dev_err(dev, "cannot turn on clock\n");
+		return err;
+	}
+
+	INIT_LIST_HEAD(&gr2d->client.base.list);
+	gr2d->client.base.ops = &gr2d_client_ops;
+	gr2d->client.base.dev = dev;
+	gr2d->client.base.class = HOST1X_CLASS_GR2D;
+	gr2d->client.base.syncpts = syncpts;
+	gr2d->client.base.num_syncpts = 1;
+
+	INIT_LIST_HEAD(&gr2d->client.list);
+	gr2d->client.ops = &gr2d_ops;
+
+	err = host1x_client_register(&gr2d->client.base);
+	if (err < 0) {
+		dev_err(dev, "failed to register host1x client: %d\n", err);
+		clk_disable_unprepare(gr2d->clk);
+		return err;
+	}
+
+	/* initialize address register map */
+	for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
+		set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
+
+	platform_set_drvdata(pdev, gr2d);
+
+	return 0;
+}
+
+static int gr2d_remove(struct platform_device *pdev)
+{
+	struct gr2d *gr2d = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_client_unregister(&gr2d->client.base);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	clk_disable_unprepare(gr2d->clk);
+
+	return 0;
+}
+
+struct platform_driver tegra_gr2d_driver = {
+	.driver = {
+		.name = "tegra-gr2d",
+		.of_match_table = gr2d_match,
+	},
+	.probe = gr2d_probe,
+	.remove = gr2d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
new file mode 100644
index 0000000..4d7304f
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR2D_H
+#define TEGRA_GR2D_H
+
+#define GR2D_UA_BASE_ADDR		0x1a
+#define GR2D_VA_BASE_ADDR		0x1b
+#define GR2D_PAT_BASE_ADDR		0x26
+#define GR2D_DSTA_BASE_ADDR		0x2b
+#define GR2D_DSTB_BASE_ADDR		0x2c
+#define GR2D_DSTC_BASE_ADDR		0x2d
+#define GR2D_SRCA_BASE_ADDR		0x31
+#define GR2D_SRCB_BASE_ADDR		0x32
+#define GR2D_SRC_BASE_ADDR_SB		0x48
+#define GR2D_DSTA_BASE_ADDR_SB		0x49
+#define GR2D_DSTB_BASE_ADDR_SB		0x4a
+#define GR2D_UA_BASE_ADDR_SB		0x4b
+#define GR2D_VA_BASE_ADDR_SB		0x4c
+
+#define GR2D_NUM_REGS			0x4d
+
+#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
new file mode 100644
index 0000000..4cec8f5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2013 Avionic Design GmbH
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/tegra-powergate.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr3d.h"
+
+struct gr3d {
+	struct tegra_drm_client client;
+	struct host1x_channel *channel;
+	struct clk *clk_secondary;
+	struct clk *clk;
+
+	DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
+};
+
+static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
+{
+	return container_of(client, struct gr3d, client);
+}
+
+static int gr3d_init(struct host1x_client *client)
+{
+	struct tegra_drm_client *drm = host1x_to_drm_client(client);
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+	struct gr3d *gr3d = to_gr3d(drm);
+
+	gr3d->channel = host1x_channel_request(client->dev);
+	if (!gr3d->channel)
+		return -ENOMEM;
+
+	client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+	if (!client->syncpts[0]) {
+		host1x_channel_free(gr3d->channel);
+		return -ENOMEM;
+	}
+
+	return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr3d_exit(struct host1x_client *client)
+{
+	struct tegra_drm_client *drm = host1x_to_drm_client(client);
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	struct gr3d *gr3d = to_gr3d(drm);
+	int err;
+
+	err = tegra_drm_unregister_client(tegra, drm);
+	if (err < 0)
+		return err;
+
+	host1x_syncpt_free(client->syncpts[0]);
+	host1x_channel_free(gr3d->channel);
+
+	return 0;
+}
+
+static const struct host1x_client_ops gr3d_client_ops = {
+	.init = gr3d_init,
+	.exit = gr3d_exit,
+};
+
+static int gr3d_open_channel(struct tegra_drm_client *client,
+			     struct tegra_drm_context *context)
+{
+	struct gr3d *gr3d = to_gr3d(client);
+
+	context->channel = host1x_channel_get(gr3d->channel);
+	if (!context->channel)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void gr3d_close_channel(struct tegra_drm_context *context)
+{
+	host1x_channel_put(context->channel);
+}
+
+static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+	struct gr3d *gr3d = dev_get_drvdata(dev);
+
+	switch (class) {
+	case HOST1X_CLASS_HOST1X:
+		if (offset == 0x2b)
+			return 1;
+
+		break;
+
+	case HOST1X_CLASS_GR3D:
+		if (offset >= GR3D_NUM_REGS)
+			break;
+
+		if (test_bit(offset, gr3d->addr_regs))
+			return 1;
+
+		break;
+	}
+
+	return 0;
+}
+
+static const struct tegra_drm_client_ops gr3d_ops = {
+	.open_channel = gr3d_open_channel,
+	.close_channel = gr3d_close_channel,
+	.is_addr_reg = gr3d_is_addr_reg,
+	.submit = tegra_drm_submit,
+};
+
+static const struct of_device_id tegra_gr3d_match[] = {
+	{ .compatible = "nvidia,tegra114-gr3d" },
+	{ .compatible = "nvidia,tegra30-gr3d" },
+	{ .compatible = "nvidia,tegra20-gr3d" },
+	{ }
+};
+
+static const u32 gr3d_addr_regs[] = {
+	GR3D_IDX_ATTRIBUTE( 0),
+	GR3D_IDX_ATTRIBUTE( 1),
+	GR3D_IDX_ATTRIBUTE( 2),
+	GR3D_IDX_ATTRIBUTE( 3),
+	GR3D_IDX_ATTRIBUTE( 4),
+	GR3D_IDX_ATTRIBUTE( 5),
+	GR3D_IDX_ATTRIBUTE( 6),
+	GR3D_IDX_ATTRIBUTE( 7),
+	GR3D_IDX_ATTRIBUTE( 8),
+	GR3D_IDX_ATTRIBUTE( 9),
+	GR3D_IDX_ATTRIBUTE(10),
+	GR3D_IDX_ATTRIBUTE(11),
+	GR3D_IDX_ATTRIBUTE(12),
+	GR3D_IDX_ATTRIBUTE(13),
+	GR3D_IDX_ATTRIBUTE(14),
+	GR3D_IDX_ATTRIBUTE(15),
+	GR3D_IDX_INDEX_BASE,
+	GR3D_QR_ZTAG_ADDR,
+	GR3D_QR_CTAG_ADDR,
+	GR3D_QR_CZ_ADDR,
+	GR3D_TEX_TEX_ADDR( 0),
+	GR3D_TEX_TEX_ADDR( 1),
+	GR3D_TEX_TEX_ADDR( 2),
+	GR3D_TEX_TEX_ADDR( 3),
+	GR3D_TEX_TEX_ADDR( 4),
+	GR3D_TEX_TEX_ADDR( 5),
+	GR3D_TEX_TEX_ADDR( 6),
+	GR3D_TEX_TEX_ADDR( 7),
+	GR3D_TEX_TEX_ADDR( 8),
+	GR3D_TEX_TEX_ADDR( 9),
+	GR3D_TEX_TEX_ADDR(10),
+	GR3D_TEX_TEX_ADDR(11),
+	GR3D_TEX_TEX_ADDR(12),
+	GR3D_TEX_TEX_ADDR(13),
+	GR3D_TEX_TEX_ADDR(14),
+	GR3D_TEX_TEX_ADDR(15),
+	GR3D_DW_MEMORY_OUTPUT_ADDRESS,
+	GR3D_GLOBAL_SURFADDR( 0),
+	GR3D_GLOBAL_SURFADDR( 1),
+	GR3D_GLOBAL_SURFADDR( 2),
+	GR3D_GLOBAL_SURFADDR( 3),
+	GR3D_GLOBAL_SURFADDR( 4),
+	GR3D_GLOBAL_SURFADDR( 5),
+	GR3D_GLOBAL_SURFADDR( 6),
+	GR3D_GLOBAL_SURFADDR( 7),
+	GR3D_GLOBAL_SURFADDR( 8),
+	GR3D_GLOBAL_SURFADDR( 9),
+	GR3D_GLOBAL_SURFADDR(10),
+	GR3D_GLOBAL_SURFADDR(11),
+	GR3D_GLOBAL_SURFADDR(12),
+	GR3D_GLOBAL_SURFADDR(13),
+	GR3D_GLOBAL_SURFADDR(14),
+	GR3D_GLOBAL_SURFADDR(15),
+	GR3D_GLOBAL_SPILLSURFADDR,
+	GR3D_GLOBAL_SURFOVERADDR( 0),
+	GR3D_GLOBAL_SURFOVERADDR( 1),
+	GR3D_GLOBAL_SURFOVERADDR( 2),
+	GR3D_GLOBAL_SURFOVERADDR( 3),
+	GR3D_GLOBAL_SURFOVERADDR( 4),
+	GR3D_GLOBAL_SURFOVERADDR( 5),
+	GR3D_GLOBAL_SURFOVERADDR( 6),
+	GR3D_GLOBAL_SURFOVERADDR( 7),
+	GR3D_GLOBAL_SURFOVERADDR( 8),
+	GR3D_GLOBAL_SURFOVERADDR( 9),
+	GR3D_GLOBAL_SURFOVERADDR(10),
+	GR3D_GLOBAL_SURFOVERADDR(11),
+	GR3D_GLOBAL_SURFOVERADDR(12),
+	GR3D_GLOBAL_SURFOVERADDR(13),
+	GR3D_GLOBAL_SURFOVERADDR(14),
+	GR3D_GLOBAL_SURFOVERADDR(15),
+	GR3D_GLOBAL_SAMP01SURFADDR( 0),
+	GR3D_GLOBAL_SAMP01SURFADDR( 1),
+	GR3D_GLOBAL_SAMP01SURFADDR( 2),
+	GR3D_GLOBAL_SAMP01SURFADDR( 3),
+	GR3D_GLOBAL_SAMP01SURFADDR( 4),
+	GR3D_GLOBAL_SAMP01SURFADDR( 5),
+	GR3D_GLOBAL_SAMP01SURFADDR( 6),
+	GR3D_GLOBAL_SAMP01SURFADDR( 7),
+	GR3D_GLOBAL_SAMP01SURFADDR( 8),
+	GR3D_GLOBAL_SAMP01SURFADDR( 9),
+	GR3D_GLOBAL_SAMP01SURFADDR(10),
+	GR3D_GLOBAL_SAMP01SURFADDR(11),
+	GR3D_GLOBAL_SAMP01SURFADDR(12),
+	GR3D_GLOBAL_SAMP01SURFADDR(13),
+	GR3D_GLOBAL_SAMP01SURFADDR(14),
+	GR3D_GLOBAL_SAMP01SURFADDR(15),
+	GR3D_GLOBAL_SAMP23SURFADDR( 0),
+	GR3D_GLOBAL_SAMP23SURFADDR( 1),
+	GR3D_GLOBAL_SAMP23SURFADDR( 2),
+	GR3D_GLOBAL_SAMP23SURFADDR( 3),
+	GR3D_GLOBAL_SAMP23SURFADDR( 4),
+	GR3D_GLOBAL_SAMP23SURFADDR( 5),
+	GR3D_GLOBAL_SAMP23SURFADDR( 6),
+	GR3D_GLOBAL_SAMP23SURFADDR( 7),
+	GR3D_GLOBAL_SAMP23SURFADDR( 8),
+	GR3D_GLOBAL_SAMP23SURFADDR( 9),
+	GR3D_GLOBAL_SAMP23SURFADDR(10),
+	GR3D_GLOBAL_SAMP23SURFADDR(11),
+	GR3D_GLOBAL_SAMP23SURFADDR(12),
+	GR3D_GLOBAL_SAMP23SURFADDR(13),
+	GR3D_GLOBAL_SAMP23SURFADDR(14),
+	GR3D_GLOBAL_SAMP23SURFADDR(15),
+};
+
+static int gr3d_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct host1x_syncpt **syncpts;
+	struct gr3d *gr3d;
+	unsigned int i;
+	int err;
+
+	gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
+	if (!gr3d)
+		return -ENOMEM;
+
+	syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
+	if (!syncpts)
+		return -ENOMEM;
+
+	gr3d->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(gr3d->clk)) {
+		dev_err(&pdev->dev, "cannot get clock\n");
+		return PTR_ERR(gr3d->clk);
+	}
+
+	if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
+		gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
+		if (IS_ERR(gr3d->clk)) {
+			dev_err(&pdev->dev, "cannot get secondary clock\n");
+			return PTR_ERR(gr3d->clk);
+		}
+	}
+
+	err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to power up 3D unit\n");
+		return err;
+	}
+
+	if (gr3d->clk_secondary) {
+		err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
+							gr3d->clk_secondary);
+		if (err < 0) {
+			dev_err(&pdev->dev,
+				"failed to power up secondary 3D unit\n");
+			return err;
+		}
+	}
+
+	INIT_LIST_HEAD(&gr3d->client.base.list);
+	gr3d->client.base.ops = &gr3d_client_ops;
+	gr3d->client.base.dev = &pdev->dev;
+	gr3d->client.base.class = HOST1X_CLASS_GR3D;
+	gr3d->client.base.syncpts = syncpts;
+	gr3d->client.base.num_syncpts = 1;
+
+	INIT_LIST_HEAD(&gr3d->client.list);
+	gr3d->client.ops = &gr3d_ops;
+
+	err = host1x_client_register(&gr3d->client.base);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	/* initialize address register map */
+	for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
+		set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
+
+	platform_set_drvdata(pdev, gr3d);
+
+	return 0;
+}
+
+static int gr3d_remove(struct platform_device *pdev)
+{
+	struct gr3d *gr3d = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_client_unregister(&gr3d->client.base);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	if (gr3d->clk_secondary) {
+		tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
+		clk_disable_unprepare(gr3d->clk_secondary);
+	}
+
+	tegra_powergate_power_off(TEGRA_POWERGATE_3D);
+	clk_disable_unprepare(gr3d->clk);
+
+	return 0;
+}
+
+struct platform_driver tegra_gr3d_driver = {
+	.driver = {
+		.name = "tegra-gr3d",
+		.of_match_table = tegra_gr3d_match,
+	},
+	.probe = gr3d_probe,
+	.remove = gr3d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr3d.h b/drivers/gpu/drm/tegra/gr3d.h
new file mode 100644
index 0000000..0c30a13
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR3D_H
+#define TEGRA_GR3D_H
+
+#define GR3D_IDX_ATTRIBUTE(x)		(0x100 + (x) * 2)
+#define GR3D_IDX_INDEX_BASE		0x121
+#define GR3D_QR_ZTAG_ADDR		0x415
+#define GR3D_QR_CTAG_ADDR		0x417
+#define GR3D_QR_CZ_ADDR			0x419
+#define GR3D_TEX_TEX_ADDR(x)		(0x710 + (x))
+#define GR3D_DW_MEMORY_OUTPUT_ADDRESS	0x904
+#define GR3D_GLOBAL_SURFADDR(x)		(0xe00 + (x))
+#define GR3D_GLOBAL_SPILLSURFADDR	0xe2a
+#define GR3D_GLOBAL_SURFOVERADDR(x)	(0xe30 + (x))
+#define GR3D_GLOBAL_SAMP01SURFADDR(x)	(0xe50 + (x))
+#define GR3D_GLOBAL_SAMP23SURFADDR(x)	(0xe60 + (x))
+
+#define GR3D_NUM_REGS			0xe88
+
+#endif
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
similarity index 83%
rename from drivers/gpu/host1x/drm/hdmi.c
rename to drivers/gpu/drm/tegra/hdmi.c
index 644d95c..0cd9bc2 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -8,21 +8,33 @@
  */
 
 #include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/gpio.h>
-#include <linux/hdmi.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
 #include <linux/clk/tegra.h>
-
-#include <drm/drm_edid.h>
+#include <linux/debugfs.h>
+#include <linux/hdmi.h>
+#include <linux/regulator/consumer.h>
 
 #include "hdmi.h"
 #include "drm.h"
 #include "dc.h"
-#include "host1x_client.h"
+
+struct tmds_config {
+	unsigned int pclk;
+	u32 pll0;
+	u32 pll1;
+	u32 pe_current;
+	u32 drive_current;
+	u32 peak_current;
+};
+
+struct tegra_hdmi_config {
+	const struct tmds_config *tmds;
+	unsigned int num_tmds;
+
+	unsigned long fuse_override_offset;
+	unsigned long fuse_override_value;
+
+	bool has_sor_io_peak_current;
+};
 
 struct tegra_hdmi {
 	struct host1x_client client;
@@ -38,6 +50,8 @@
 	struct clk *clk_parent;
 	struct clk *clk;
 
+	const struct tegra_hdmi_config *config;
+
 	unsigned int audio_source;
 	unsigned int audio_freq;
 	bool stereo;
@@ -143,15 +157,7 @@
 	{         0,     0,      0,     0 },
 };
 
-struct tmds_config {
-	unsigned int pclk;
-	u32 pll0;
-	u32 pll1;
-	u32 pe_current;
-	u32 drive_current;
-};
-
-static const struct tmds_config tegra2_tmds_config[] = {
+static const struct tmds_config tegra20_tmds_config[] = {
 	{ /* slow pixel clock modes */
 		.pclk = 27000000,
 		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -184,7 +190,7 @@
 	},
 };
 
-static const struct tmds_config tegra3_tmds_config[] = {
+static const struct tmds_config tegra30_tmds_config[] = {
 	{ /* 480p modes */
 		.pclk = 27000000,
 		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -230,6 +236,85 @@
 	},
 };
 
+static const struct tmds_config tegra114_tmds_config[] = {
+	{ /* 480p/576p / 25.2MHz/27MHz modes */
+		.pclk = 27000000,
+		.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+			SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
+		.pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
+		.pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT3(PE_CURRENT_0_mA_T114),
+		.drive_current =
+			DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+		.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+	}, { /* 720p / 74.25MHz modes */
+		.pclk = 74250000,
+		.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+			SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
+		.pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+			SOR_PLL_TMDS_TERMADJ(0),
+		.pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
+			PE_CURRENT1(PE_CURRENT_15_mA_T114) |
+			PE_CURRENT2(PE_CURRENT_15_mA_T114) |
+			PE_CURRENT3(PE_CURRENT_15_mA_T114),
+		.drive_current =
+			DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+		.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+	}, { /* 1080p / 148.5MHz modes */
+		.pclk = 148500000,
+		.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+			SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
+		.pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+			SOR_PLL_TMDS_TERMADJ(0),
+		.pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
+			PE_CURRENT1(PE_CURRENT_10_mA_T114) |
+			PE_CURRENT2(PE_CURRENT_10_mA_T114) |
+			PE_CURRENT3(PE_CURRENT_10_mA_T114),
+		.drive_current =
+			DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
+			DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
+			DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
+			DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
+		.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+	}, { /* 225/297MHz modes */
+		.pclk = UINT_MAX,
+		.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+			SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
+		.pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
+			| SOR_PLL_TMDS_TERM_ENABLE,
+		.pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT3(PE_CURRENT_0_mA_T114),
+		.drive_current =
+			DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
+			DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
+			DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
+			DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
+		.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
+			PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
+			PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
+			PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
+	},
+};
+
 static const struct tegra_hdmi_audio_config *
 tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
 {
@@ -511,7 +596,7 @@
 
 	err = hdmi_audio_infoframe_init(&frame);
 	if (err < 0) {
-		dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+		dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
 			err);
 		return;
 	}
@@ -531,7 +616,7 @@
 	 * contain 7 bytes. Including the 3 byte header only the first 10
 	 * bytes can be programmed.
 	 */
-	tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
+	tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
 
 	tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
 			  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -577,8 +662,28 @@
 	tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
 	tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
 
-	value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
-	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+	tegra_hdmi_writel(hdmi, tmds->drive_current,
+			  HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+
+	value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
+	value |= hdmi->config->fuse_override_value;
+	tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
+
+	if (hdmi->config->has_sor_io_peak_current)
+		tegra_hdmi_writel(hdmi, tmds->peak_current,
+				  HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
+}
+
+static bool tegra_output_is_hdmi(struct tegra_output *output)
+{
+	struct edid *edid;
+
+	if (!output->connector.edid_blob_ptr)
+		return false;
+
+	edid = (struct edid *)output->connector.edid_blob_ptr->data;
+
+	return drm_detect_hdmi_monitor(edid);
 }
 
 static int tegra_output_hdmi_enable(struct tegra_output *output)
@@ -589,23 +694,17 @@
 	struct tegra_hdmi *hdmi = to_hdmi(output);
 	struct device_node *node = hdmi->dev->of_node;
 	unsigned int pulse_start, div82, pclk;
-	const struct tmds_config *tmds;
-	unsigned int num_tmds;
 	unsigned long value;
 	int retries = 1000;
 	int err;
 
+	hdmi->dvi = !tegra_output_is_hdmi(output);
+
 	pclk = mode->clock * 1000;
 	h_sync_width = mode->hsync_end - mode->hsync_start;
 	h_back_porch = mode->htotal - mode->hsync_end;
 	h_front_porch = mode->hsync_start - mode->hdisplay;
 
-	err = regulator_enable(hdmi->vdd);
-	if (err < 0) {
-		dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
-		return err;
-	}
-
 	err = regulator_enable(hdmi->pll);
 	if (err < 0) {
 		dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
@@ -710,17 +809,9 @@
 	tegra_hdmi_setup_stereo_infoframe(hdmi);
 
 	/* TMDS CONFIG */
-	if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
-		num_tmds = ARRAY_SIZE(tegra3_tmds_config);
-		tmds = tegra3_tmds_config;
-	} else {
-		num_tmds = ARRAY_SIZE(tegra2_tmds_config);
-		tmds = tegra2_tmds_config;
-	}
-
-	for (i = 0; i < num_tmds; i++) {
-		if (pclk <= tmds[i].pclk) {
-			tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+	for (i = 0; i < hdmi->config->num_tmds; i++) {
+		if (pclk <= hdmi->config->tmds[i].pclk) {
+			tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
 			break;
 		}
 	}
@@ -824,7 +915,6 @@
 	tegra_periph_reset_assert(hdmi->clk);
 	clk_disable(hdmi->clk);
 	regulator_disable(hdmi->pll);
-	regulator_disable(hdmi->vdd);
 
 	return 0;
 }
@@ -1055,6 +1145,7 @@
 	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
 	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
 	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+	DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
 
 #undef DUMP_REG
 
@@ -1122,24 +1213,31 @@
 	return 0;
 }
 
-static int tegra_hdmi_drm_init(struct host1x_client *client,
-			       struct drm_device *drm)
+static int tegra_hdmi_init(struct host1x_client *client)
 {
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
 	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
 	int err;
 
+	err = regulator_enable(hdmi->vdd);
+	if (err < 0) {
+		dev_err(client->dev, "failed to enable VDD regulator: %d\n",
+			err);
+		return err;
+	}
+
 	hdmi->output.type = TEGRA_OUTPUT_HDMI;
 	hdmi->output.dev = client->dev;
 	hdmi->output.ops = &hdmi_ops;
 
-	err = tegra_output_init(drm, &hdmi->output);
+	err = tegra_output_init(tegra->drm, &hdmi->output);
 	if (err < 0) {
 		dev_err(client->dev, "output setup failed: %d\n", err);
 		return err;
 	}
 
 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
-		err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+		err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary);
 		if (err < 0)
 			dev_err(client->dev, "debugfs setup failed: %d\n", err);
 	}
@@ -1147,7 +1245,7 @@
 	return 0;
 }
 
-static int tegra_hdmi_drm_exit(struct host1x_client *client)
+static int tegra_hdmi_exit(struct host1x_client *client)
 {
 	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
 	int err;
@@ -1171,25 +1269,63 @@
 		return err;
 	}
 
+	regulator_disable(hdmi->vdd);
+
 	return 0;
 }
 
 static const struct host1x_client_ops hdmi_client_ops = {
-	.drm_init = tegra_hdmi_drm_init,
-	.drm_exit = tegra_hdmi_drm_exit,
+	.init = tegra_hdmi_init,
+	.exit = tegra_hdmi_exit,
+};
+
+static const struct tegra_hdmi_config tegra20_hdmi_config = {
+	.tmds = tegra20_tmds_config,
+	.num_tmds = ARRAY_SIZE(tegra20_tmds_config),
+	.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+	.fuse_override_value = 1 << 31,
+	.has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra30_hdmi_config = {
+	.tmds = tegra30_tmds_config,
+	.num_tmds = ARRAY_SIZE(tegra30_tmds_config),
+	.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+	.fuse_override_value = 1 << 31,
+	.has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra114_hdmi_config = {
+	.tmds = tegra114_tmds_config,
+	.num_tmds = ARRAY_SIZE(tegra114_tmds_config),
+	.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
+	.fuse_override_value = 1 << 31,
+	.has_sor_io_peak_current = true,
+};
+
+static const struct of_device_id tegra_hdmi_of_match[] = {
+	{ .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
+	{ .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
+	{ .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
+	{ },
 };
 
 static int tegra_hdmi_probe(struct platform_device *pdev)
 {
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+	const struct of_device_id *match;
 	struct tegra_hdmi *hdmi;
 	struct resource *regs;
 	int err;
 
+	match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
+	if (!match)
+		return -ENODEV;
+
 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
 	if (!hdmi)
 		return -ENOMEM;
 
+	hdmi->config = match->data;
 	hdmi->dev = &pdev->dev;
 	hdmi->audio_source = AUTO;
 	hdmi->audio_freq = 44100;
@@ -1234,7 +1370,7 @@
 
 	hdmi->output.dev = &pdev->dev;
 
-	err = tegra_output_parse_dt(&hdmi->output);
+	err = tegra_output_probe(&hdmi->output);
 	if (err < 0)
 		return err;
 
@@ -1252,11 +1388,11 @@
 
 	hdmi->irq = err;
 
-	hdmi->client.ops = &hdmi_client_ops;
 	INIT_LIST_HEAD(&hdmi->client.list);
+	hdmi->client.ops = &hdmi_client_ops;
 	hdmi->client.dev = &pdev->dev;
 
-	err = host1x_register_client(host1x, &hdmi->client);
+	err = host1x_client_register(&hdmi->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
@@ -1270,29 +1406,28 @@
 
 static int tegra_hdmi_remove(struct platform_device *pdev)
 {
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
 	struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
 	int err;
 
-	err = host1x_unregister_client(host1x, &hdmi->client);
+	err = host1x_client_unregister(&hdmi->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
 			err);
 		return err;
 	}
 
+	err = tegra_output_remove(&hdmi->output);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+		return err;
+	}
+
 	clk_unprepare(hdmi->clk_parent);
 	clk_unprepare(hdmi->clk);
 
 	return 0;
 }
 
-static struct of_device_id tegra_hdmi_of_match[] = {
-	{ .compatible = "nvidia,tegra30-hdmi", },
-	{ .compatible = "nvidia,tegra20-hdmi", },
-	{ },
-};
-
 struct platform_driver tegra_hdmi_driver = {
 	.driver = {
 		.name = "tegra-hdmi",
diff --git a/drivers/gpu/host1x/drm/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
similarity index 72%
rename from drivers/gpu/host1x/drm/hdmi.h
rename to drivers/gpu/drm/tegra/hdmi.h
index 52ac36e..0aebc48 100644
--- a/drivers/gpu/host1x/drm/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -233,7 +233,10 @@
 #define DRIVE_CURRENT_LANE1(x)      (((x) & 0x3f) <<  8)
 #define DRIVE_CURRENT_LANE2(x)      (((x) & 0x3f) << 16)
 #define DRIVE_CURRENT_LANE3(x)      (((x) & 0x3f) << 24)
-#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) <<  0)
+#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) <<  8)
+#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16)
+#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24)
 
 #define DRIVE_CURRENT_1_500_mA  0x00
 #define DRIVE_CURRENT_1_875_mA  0x01
@@ -299,6 +302,79 @@
 #define DRIVE_CURRENT_24_375_mA 0x3d
 #define DRIVE_CURRENT_24_750_mA 0x3e
 
+#define DRIVE_CURRENT_0_000_mA_T114 0x00
+#define DRIVE_CURRENT_0_400_mA_T114 0x01
+#define DRIVE_CURRENT_0_800_mA_T114 0x02
+#define DRIVE_CURRENT_1_200_mA_T114 0x03
+#define DRIVE_CURRENT_1_600_mA_T114 0x04
+#define DRIVE_CURRENT_2_000_mA_T114 0x05
+#define DRIVE_CURRENT_2_400_mA_T114 0x06
+#define DRIVE_CURRENT_2_800_mA_T114 0x07
+#define DRIVE_CURRENT_3_200_mA_T114 0x08
+#define DRIVE_CURRENT_3_600_mA_T114 0x09
+#define DRIVE_CURRENT_4_000_mA_T114 0x0a
+#define DRIVE_CURRENT_4_400_mA_T114 0x0b
+#define DRIVE_CURRENT_4_800_mA_T114 0x0c
+#define DRIVE_CURRENT_5_200_mA_T114 0x0d
+#define DRIVE_CURRENT_5_600_mA_T114 0x0e
+#define DRIVE_CURRENT_6_000_mA_T114 0x0f
+#define DRIVE_CURRENT_6_400_mA_T114 0x10
+#define DRIVE_CURRENT_6_800_mA_T114 0x11
+#define DRIVE_CURRENT_7_200_mA_T114 0x12
+#define DRIVE_CURRENT_7_600_mA_T114 0x13
+#define DRIVE_CURRENT_8_000_mA_T114 0x14
+#define DRIVE_CURRENT_8_400_mA_T114 0x15
+#define DRIVE_CURRENT_8_800_mA_T114 0x16
+#define DRIVE_CURRENT_9_200_mA_T114 0x17
+#define DRIVE_CURRENT_9_600_mA_T114 0x18
+#define DRIVE_CURRENT_10_000_mA_T114 0x19
+#define DRIVE_CURRENT_10_400_mA_T114 0x1a
+#define DRIVE_CURRENT_10_800_mA_T114 0x1b
+#define DRIVE_CURRENT_11_200_mA_T114 0x1c
+#define DRIVE_CURRENT_11_600_mA_T114 0x1d
+#define DRIVE_CURRENT_12_000_mA_T114 0x1e
+#define DRIVE_CURRENT_12_400_mA_T114 0x1f
+#define DRIVE_CURRENT_12_800_mA_T114 0x20
+#define DRIVE_CURRENT_13_200_mA_T114 0x21
+#define DRIVE_CURRENT_13_600_mA_T114 0x22
+#define DRIVE_CURRENT_14_000_mA_T114 0x23
+#define DRIVE_CURRENT_14_400_mA_T114 0x24
+#define DRIVE_CURRENT_14_800_mA_T114 0x25
+#define DRIVE_CURRENT_15_200_mA_T114 0x26
+#define DRIVE_CURRENT_15_600_mA_T114 0x27
+#define DRIVE_CURRENT_16_000_mA_T114 0x28
+#define DRIVE_CURRENT_16_400_mA_T114 0x29
+#define DRIVE_CURRENT_16_800_mA_T114 0x2a
+#define DRIVE_CURRENT_17_200_mA_T114 0x2b
+#define DRIVE_CURRENT_17_600_mA_T114 0x2c
+#define DRIVE_CURRENT_18_000_mA_T114 0x2d
+#define DRIVE_CURRENT_18_400_mA_T114 0x2e
+#define DRIVE_CURRENT_18_800_mA_T114 0x2f
+#define DRIVE_CURRENT_19_200_mA_T114 0x30
+#define DRIVE_CURRENT_19_600_mA_T114 0x31
+#define DRIVE_CURRENT_20_000_mA_T114 0x32
+#define DRIVE_CURRENT_20_400_mA_T114 0x33
+#define DRIVE_CURRENT_20_800_mA_T114 0x34
+#define DRIVE_CURRENT_21_200_mA_T114 0x35
+#define DRIVE_CURRENT_21_600_mA_T114 0x36
+#define DRIVE_CURRENT_22_000_mA_T114 0x37
+#define DRIVE_CURRENT_22_400_mA_T114 0x38
+#define DRIVE_CURRENT_22_800_mA_T114 0x39
+#define DRIVE_CURRENT_23_200_mA_T114 0x3a
+#define DRIVE_CURRENT_23_600_mA_T114 0x3b
+#define DRIVE_CURRENT_24_000_mA_T114 0x3c
+#define DRIVE_CURRENT_24_400_mA_T114 0x3d
+#define DRIVE_CURRENT_24_800_mA_T114 0x3e
+#define DRIVE_CURRENT_25_200_mA_T114 0x3f
+#define DRIVE_CURRENT_25_400_mA_T114 0x40
+#define DRIVE_CURRENT_25_800_mA_T114 0x41
+#define DRIVE_CURRENT_26_200_mA_T114 0x42
+#define DRIVE_CURRENT_26_600_mA_T114 0x43
+#define DRIVE_CURRENT_27_000_mA_T114 0x44
+#define DRIVE_CURRENT_27_400_mA_T114 0x45
+#define DRIVE_CURRENT_27_800_mA_T114 0x46
+#define DRIVE_CURRENT_28_200_mA_T114 0x47
+
 #define HDMI_NV_PDISP_AUDIO_DEBUG0				0x7f
 #define HDMI_NV_PDISP_AUDIO_DEBUG1				0x80
 #define HDMI_NV_PDISP_AUDIO_DEBUG2				0x81
@@ -358,6 +434,23 @@
 #define PE_CURRENT_7_0_mA 0xe
 #define PE_CURRENT_7_5_mA 0xf
 
+#define PE_CURRENT_0_mA_T114 0x0
+#define PE_CURRENT_1_mA_T114 0x1
+#define PE_CURRENT_2_mA_T114 0x2
+#define PE_CURRENT_3_mA_T114 0x3
+#define PE_CURRENT_4_mA_T114 0x4
+#define PE_CURRENT_5_mA_T114 0x5
+#define PE_CURRENT_6_mA_T114 0x6
+#define PE_CURRENT_7_mA_T114 0x7
+#define PE_CURRENT_8_mA_T114 0x8
+#define PE_CURRENT_9_mA_T114 0x9
+#define PE_CURRENT_10_mA_T114 0xa
+#define PE_CURRENT_11_mA_T114 0xb
+#define PE_CURRENT_12_mA_T114 0xc
+#define PE_CURRENT_13_mA_T114 0xd
+#define PE_CURRENT_14_mA_T114 0xe
+#define PE_CURRENT_15_mA_T114 0xf
+
 #define HDMI_NV_PDISP_KEY_CTRL					0x9a
 #define HDMI_NV_PDISP_KEY_DEBUG0				0x9b
 #define HDMI_NV_PDISP_KEY_DEBUG1				0x9c
@@ -383,4 +476,61 @@
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920    0xc5
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
 
+#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT		0xd1
+#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) <<  0)
+#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) <<  8)
+#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16)
+#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24)
+
+#define PEAK_CURRENT_0_000_mA 0x00
+#define PEAK_CURRENT_0_200_mA 0x01
+#define PEAK_CURRENT_0_400_mA 0x02
+#define PEAK_CURRENT_0_600_mA 0x03
+#define PEAK_CURRENT_0_800_mA 0x04
+#define PEAK_CURRENT_1_000_mA 0x05
+#define PEAK_CURRENT_1_200_mA 0x06
+#define PEAK_CURRENT_1_400_mA 0x07
+#define PEAK_CURRENT_1_600_mA 0x08
+#define PEAK_CURRENT_1_800_mA 0x09
+#define PEAK_CURRENT_2_000_mA 0x0a
+#define PEAK_CURRENT_2_200_mA 0x0b
+#define PEAK_CURRENT_2_400_mA 0x0c
+#define PEAK_CURRENT_2_600_mA 0x0d
+#define PEAK_CURRENT_2_800_mA 0x0e
+#define PEAK_CURRENT_3_000_mA 0x0f
+#define PEAK_CURRENT_3_200_mA 0x10
+#define PEAK_CURRENT_3_400_mA 0x11
+#define PEAK_CURRENT_3_600_mA 0x12
+#define PEAK_CURRENT_3_800_mA 0x13
+#define PEAK_CURRENT_4_000_mA 0x14
+#define PEAK_CURRENT_4_200_mA 0x15
+#define PEAK_CURRENT_4_400_mA 0x16
+#define PEAK_CURRENT_4_600_mA 0x17
+#define PEAK_CURRENT_4_800_mA 0x18
+#define PEAK_CURRENT_5_000_mA 0x19
+#define PEAK_CURRENT_5_200_mA 0x1a
+#define PEAK_CURRENT_5_400_mA 0x1b
+#define PEAK_CURRENT_5_600_mA 0x1c
+#define PEAK_CURRENT_5_800_mA 0x1d
+#define PEAK_CURRENT_6_000_mA 0x1e
+#define PEAK_CURRENT_6_200_mA 0x1f
+#define PEAK_CURRENT_6_400_mA 0x20
+#define PEAK_CURRENT_6_600_mA 0x21
+#define PEAK_CURRENT_6_800_mA 0x22
+#define PEAK_CURRENT_7_000_mA 0x23
+#define PEAK_CURRENT_7_200_mA 0x24
+#define PEAK_CURRENT_7_400_mA 0x25
+#define PEAK_CURRENT_7_600_mA 0x26
+#define PEAK_CURRENT_7_800_mA 0x27
+#define PEAK_CURRENT_8_000_mA 0x28
+#define PEAK_CURRENT_8_200_mA 0x29
+#define PEAK_CURRENT_8_400_mA 0x2a
+#define PEAK_CURRENT_8_600_mA 0x2b
+#define PEAK_CURRENT_8_800_mA 0x2c
+#define PEAK_CURRENT_9_000_mA 0x2d
+#define PEAK_CURRENT_9_200_mA 0x2e
+#define PEAK_CURRENT_9_400_mA 0x2f
+
+#define HDMI_NV_PDISP_SOR_PAD_CTLS0		0xd2
+
 #endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/host1x/drm/output.c b/drivers/gpu/drm/tegra/output.c
similarity index 91%
rename from drivers/gpu/host1x/drm/output.c
rename to drivers/gpu/drm/tegra/output.c
index 137ae81..2cb0065 100644
--- a/drivers/gpu/host1x/drm/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,9 +7,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
 #include <linux/of_gpio.h>
-#include <linux/i2c.h>
 
 #include "drm.h"
 
@@ -81,10 +79,16 @@
 	return status;
 }
 
+static void drm_connector_clear(struct drm_connector *connector)
+{
+	memset(connector, 0, sizeof(*connector));
+}
+
 static void tegra_connector_destroy(struct drm_connector *connector)
 {
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
+	drm_connector_clear(connector);
 }
 
 static const struct drm_connector_funcs connector_funcs = {
@@ -94,9 +98,15 @@
 	.destroy = tegra_connector_destroy,
 };
 
+static void drm_encoder_clear(struct drm_encoder *encoder)
+{
+	memset(encoder, 0, sizeof(*encoder));
+}
+
 static void tegra_encoder_destroy(struct drm_encoder *encoder)
 {
 	drm_encoder_cleanup(encoder);
+	drm_encoder_clear(encoder);
 }
 
 static const struct drm_encoder_funcs encoder_funcs = {
@@ -151,7 +161,7 @@
 	return IRQ_HANDLED;
 }
 
-int tegra_output_parse_dt(struct tegra_output *output)
+int tegra_output_probe(struct tegra_output *output)
 {
 	enum of_gpio_flags flags;
 	struct device_node *ddc;
@@ -181,14 +191,6 @@
 	output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
 						   "nvidia,hpd-gpio", 0,
 						   &flags);
-
-	return 0;
-}
-
-int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
-{
-	int connector, encoder, err;
-
 	if (gpio_is_valid(output->hpd_gpio)) {
 		unsigned long flags;
 
@@ -202,7 +204,8 @@
 		err = gpio_to_irq(output->hpd_gpio);
 		if (err < 0) {
 			dev_err(output->dev, "gpio_to_irq(): %d\n", err);
-			goto free_hpd;
+			gpio_free(output->hpd_gpio);
+			return err;
 		}
 
 		output->hpd_irq = err;
@@ -215,12 +218,33 @@
 		if (err < 0) {
 			dev_err(output->dev, "failed to request IRQ#%u: %d\n",
 				output->hpd_irq, err);
-			goto free_hpd;
+			gpio_free(output->hpd_gpio);
+			return err;
 		}
 
 		output->connector.polled = DRM_CONNECTOR_POLL_HPD;
 	}
 
+	return 0;
+}
+
+int tegra_output_remove(struct tegra_output *output)
+{
+	if (gpio_is_valid(output->hpd_gpio)) {
+		free_irq(output->hpd_irq, output);
+		gpio_free(output->hpd_gpio);
+	}
+
+	if (output->ddc)
+		put_device(&output->ddc->dev);
+
+	return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+	int connector, encoder;
+
 	switch (output->type) {
 	case TEGRA_OUTPUT_RGB:
 		connector = DRM_MODE_CONNECTOR_LVDS;
@@ -241,6 +265,7 @@
 	drm_connector_init(drm, &output->connector, &connector_funcs,
 			   connector);
 	drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+	output->connector.dpms = DRM_MODE_DPMS_OFF;
 
 	drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
 	drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
@@ -251,22 +276,9 @@
 	output->encoder.possible_crtcs = 0x3;
 
 	return 0;
-
-free_hpd:
-	gpio_free(output->hpd_gpio);
-
-	return err;
 }
 
 int tegra_output_exit(struct tegra_output *output)
 {
-	if (gpio_is_valid(output->hpd_gpio)) {
-		free_irq(output->hpd_irq, output);
-		gpio_free(output->hpd_gpio);
-	}
-
-	if (output->ddc)
-		put_device(&output->ddc->dev);
-
 	return 0;
 }
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/drm/tegra/rgb.c
similarity index 96%
rename from drivers/gpu/host1x/drm/rgb.c
rename to drivers/gpu/drm/tegra/rgb.c
index 5aa66ef..ba47ca4 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,9 +8,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
 
 #include "drm.h"
 #include "dc.h"
@@ -150,7 +147,7 @@
 	rgb->output.dev = dc->dev;
 	rgb->output.of_node = np;
 
-	err = tegra_output_parse_dt(&rgb->output);
+	err = tegra_output_probe(&rgb->output);
 	if (err < 0)
 		return err;
 
@@ -177,6 +174,20 @@
 	return 0;
 }
 
+int tegra_dc_rgb_remove(struct tegra_dc *dc)
+{
+	int err;
+
+	if (!dc->rgb)
+		return 0;
+
+	err = tegra_output_remove(dc->rgb);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
 int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
 {
 	struct tegra_rgb *rgb = to_rgb(dc->rgb);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 7a4d101..7c3ef79 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,6 +2,7 @@
 	tristate "DRM Support for TI LCDC Display Controller"
 	depends on DRM && OF && ARM
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
 	select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b2b33dd..b433b9f 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -5,10 +5,6 @@
 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
 	ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
 	ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
-	ttm_bo_manager.o
-
-ifeq ($(CONFIG_SWIOTLB),y)
-ttm-y += ttm_page_alloc_dma.o
-endif
+	ttm_bo_manager.o ttm_page_alloc_dma.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f1a857e..8d5a646 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -429,8 +429,20 @@
 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
 	spin_unlock(&bdev->fence_lock);
 
-	if (!ret)
+	if (!ret) {
+
+		/*
+		 * Make NO_EVICT bos immediately available to
+		 * shrinkers, now that they are queued for
+		 * destruction.
+		 */
+		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+			ttm_bo_add_to_lru(bo);
+		}
+
 		ww_mutex_unlock(&bo->resv->lock);
+	}
 
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -986,24 +998,32 @@
 	return ret;
 }
 
-static int ttm_bo_mem_compat(struct ttm_placement *placement,
-			     struct ttm_mem_reg *mem)
+static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+			      struct ttm_mem_reg *mem,
+			      uint32_t *new_flags)
 {
 	int i;
 
 	if (mem->mm_node && placement->lpfn != 0 &&
 	    (mem->start < placement->fpfn ||
 	     mem->start + mem->num_pages > placement->lpfn))
-		return -1;
+		return false;
 
 	for (i = 0; i < placement->num_placement; i++) {
-		if ((placement->placement[i] & mem->placement &
-			TTM_PL_MASK_CACHING) &&
-			(placement->placement[i] & mem->placement &
-			TTM_PL_MASK_MEM))
-			return i;
+		*new_flags = placement->placement[i];
+		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+			return true;
 	}
-	return -1;
+
+	for (i = 0; i < placement->num_busy_placement; i++) {
+		*new_flags = placement->busy_placement[i];
+		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+			return true;
+	}
+
+	return false;
 }
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
@@ -1012,6 +1032,7 @@
 			bool no_wait_gpu)
 {
 	int ret;
+	uint32_t new_flags;
 
 	lockdep_assert_held(&bo->resv->lock.base);
 	/* Check that range is valid */
@@ -1022,8 +1043,7 @@
 	/*
 	 * Check whether we need to move buffer.
 	 */
-	ret = ttm_bo_mem_compat(placement, &bo->mem);
-	if (ret < 0) {
+	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
 		ret = ttm_bo_move_buffer(bo, placement, interruptible,
 					 no_wait_gpu);
 		if (ret)
@@ -1033,7 +1053,7 @@
 		 * Use the access and other non-mapping-related flag bits from
 		 * the compatible memory placement flags to the active flags
 		 */
-		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+		ttm_flag_masked(&bo->mem.placement, new_flags,
 				~TTM_PL_MASK_MEMTYPE);
 	}
 	/*
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cc904d..4834c46 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -343,19 +343,25 @@
 	if (ret)
 		goto out;
 
+	/*
+	 * Single TTM move. NOP.
+	 */
 	if (old_iomap == NULL && new_iomap == NULL)
 		goto out2;
+
+	/*
+	 * Move nonexistent data. NOP.
+	 */
 	if (old_iomap == NULL && ttm == NULL)
 		goto out2;
 
-	if (ttm->state == tt_unpopulated) {
+	/*
+	 * TTM might be null for moves within the same region.
+	 */
+	if (ttm && ttm->state == tt_unpopulated) {
 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
-		if (ret) {
-			/* if we fail here don't nuke the mm node
-			 * as the bo still owns it */
-			old_copy.mm_node = NULL;
+		if (ret)
 			goto out1;
-		}
 	}
 
 	add = 0;
@@ -381,11 +387,8 @@
 						   prot);
 		} else
 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
-		if (ret) {
-			/* failing here, means keep old copy as-is */
-			old_copy.mm_node = NULL;
+		if (ret)
 			goto out1;
-		}
 	}
 	mb();
 out2:
@@ -403,7 +406,12 @@
 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
 out:
 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
-	ttm_bo_mem_put(bo, &old_copy);
+
+	/*
+	 * On error, keep the mm node!
+	 */
+	if (!ret)
+		ttm_bo_mem_put(bo, &old_copy);
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_move_memcpy);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1006c15..ac617f3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -41,6 +41,51 @@
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
+static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+				struct vm_area_struct *vma,
+				struct vm_fault *vmf)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	int ret = 0;
+
+	spin_lock(&bdev->fence_lock);
+	if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+		goto out_unlock;
+
+	/*
+	 * Quick non-stalling check for idle.
+	 */
+	ret = ttm_bo_wait(bo, false, false, true);
+	if (likely(ret == 0))
+		goto out_unlock;
+
+	/*
+	 * If possible, avoid waiting for GPU with mmap_sem
+	 * held.
+	 */
+	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+		ret = VM_FAULT_RETRY;
+		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+			goto out_unlock;
+
+		up_read(&vma->vm_mm->mmap_sem);
+		(void) ttm_bo_wait(bo, false, true, false);
+		goto out_unlock;
+	}
+
+	/*
+	 * Ordinary wait.
+	 */
+	ret = ttm_bo_wait(bo, false, true, false);
+	if (unlikely(ret != 0))
+		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+			VM_FAULT_NOPAGE;
+
+out_unlock:
+	spin_unlock(&bdev->fence_lock);
+	return ret;
+}
+
 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -57,6 +102,7 @@
 	int retval = VM_FAULT_NOPAGE;
 	struct ttm_mem_type_manager *man =
 		&bdev->man[bo->mem.mem_type];
+	struct vm_area_struct cvma;
 
 	/*
 	 * Work around locking order reversal in fault / nopfn
@@ -91,18 +137,11 @@
 	 * Wait for buffer data in transit, due to a pipelined
 	 * move.
 	 */
-
-	spin_lock(&bdev->fence_lock);
-	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
-		ret = ttm_bo_wait(bo, false, true, false);
-		spin_unlock(&bdev->fence_lock);
-		if (unlikely(ret != 0)) {
-			retval = (ret != -ERESTARTSYS) ?
-			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
-			goto out_unlock;
-		}
-	} else
-		spin_unlock(&bdev->fence_lock);
+	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
+	if (unlikely(ret != 0)) {
+		retval = ret;
+		goto out_unlock;
+	}
 
 	ret = ttm_mem_io_lock(man, true);
 	if (unlikely(ret != 0)) {
@@ -126,26 +165,21 @@
 	}
 
 	/*
-	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
-	 * since the mmap_sem is only held in read mode. However, we
-	 * modify only the caching bits of vma->vm_page_prot and
-	 * consider those bits protected by
-	 * the bo->mutex, as we should be the only writers.
-	 * There shouldn't really be any readers of these bits except
-	 * within vm_insert_mixed()? fork?
-	 *
-	 * TODO: Add a list of vmas to the bo, and change the
-	 * vma->vm_page_prot when the object changes caching policy, with
-	 * the correct locks held.
+	 * Make a local vma copy to modify the page_prot member
+	 * and vm_flags if necessary. The vma parameter is protected
+	 * by mmap_sem in write mode.
 	 */
+	cvma = *vma;
+	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
+
 	if (bo->mem.bus.is_iomem) {
-		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
-						vma->vm_page_prot);
+		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+						cvma.vm_page_prot);
 	} else {
 		ttm = bo->ttm;
-		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
-		    vm_get_page_prot(vma->vm_flags) :
-		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+		if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
+			cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+							cvma.vm_page_prot);
 
 		/* Allocate all page at once, most common usage */
 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
@@ -172,7 +206,7 @@
 			pfn = page_to_pfn(page);
 		}
 
-		ret = vm_insert_mixed(vma, address, pfn);
+		ret = vm_insert_mixed(&cvma, address, pfn);
 		/*
 		 * Somebody beat us to this PTE or prefaulting to
 		 * an already populated PTE, or prefaulting error.
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7957bee..fb8259f 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,6 +33,7 @@
  *   when freed).
  */
 
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 #define pr_fmt(fmt) "[TTM] " fmt
 
 #include <linux/dma-mapping.h>
@@ -1142,3 +1143,5 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
+
+#endif
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 6222af1..f025286 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -8,6 +8,7 @@
 	select FB_SYS_IMAGEBLIT
 	select FB_DEFERRED_IO
 	select DRM_KMS_HELPER
+        select DRM_KMS_FB_HELPER
 	help
 	  This is a KMS driver for the USB displaylink video adapters.
           Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 7650dc0..3ddd6cd 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -77,7 +77,6 @@
 	.unload = udl_driver_unload,
 
 	/* gem hooks */
-	.gem_init_object = udl_gem_init_object,
 	.gem_free_object = udl_gem_free_object,
 	.gem_vm_ops = &udl_gem_vm_ops,
 
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 56aec94..1fbf7b3 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -115,7 +115,6 @@
 int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
 		 uint32_t handle, uint64_t *offset);
 
-int udl_gem_init_object(struct drm_gem_object *obj);
 void udl_gem_free_object(struct drm_gem_object *gem_obj);
 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
 					    size_t size);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8bf6461..24ffbe9 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,13 +107,6 @@
 	}
 }
 
-int udl_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-
-	return 0;
-}
-
 static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
 {
 	struct page **pages;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 7e3ad87..9278891 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@
 
 	/* Linux specific until context tracking code gets ported to BSD */
 	/* Last context, perform cleanup */
-	if (dev->ctx_count == 1 && dev->dev_private) {
+	if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
 		DRM_DEBUG("Last Context\n");
 		drm_irq_uninstall(dev);
 		via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 96dc84d..7776e6f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -141,37 +141,374 @@
 };
 
 struct vmw_ttm_tt {
-	struct ttm_tt ttm;
+	struct ttm_dma_tt dma_ttm;
 	struct vmw_private *dev_priv;
 	int gmr_id;
+	struct sg_table sgt;
+	struct vmw_sg_table vsgt;
+	uint64_t sg_alloc_size;
+	bool mapped;
 };
 
+/**
+ * Helper functions to advance a struct vmw_piter iterator.
+ *
+ * @viter: Pointer to the iterator.
+ *
+ * These functions return false if past the end of the list,
+ * true otherwise. Functions are selected depending on the current
+ * DMA mapping mode.
+ */
+static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
+{
+	return ++(viter->i) < viter->num_pages;
+}
+
+static bool __vmw_piter_sg_next(struct vmw_piter *viter)
+{
+	return __sg_page_iter_next(&viter->iter);
+}
+
+
+/**
+ * Helper functions to return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return a pointer to the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
+{
+	return viter->pages[viter->i];
+}
+
+static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
+{
+	return sg_page_iter_page(&viter->iter);
+}
+
+
+/**
+ * Helper functions to return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return the DMA address of the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
+{
+	return page_to_phys(viter->pages[viter->i]);
+}
+
+static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+	return viter->addrs[viter->i];
+}
+
+static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
+{
+	return sg_page_iter_dma_address(&viter->iter);
+}
+
+
+/**
+ * vmw_piter_start - Initialize a struct vmw_piter.
+ *
+ * @viter: Pointer to the iterator to initialize
+ * @vsgt: Pointer to a struct vmw_sg_table to initialize from
+ *
+ * Note that we're following the convention of __sg_page_iter_start, so that
+ * the iterator doesn't point to a valid page after initialization; it has
+ * to be advanced one step first.
+ */
+void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
+		     unsigned long p_offset)
+{
+	viter->i = p_offset - 1;
+	viter->num_pages = vsgt->num_pages;
+	switch (vsgt->mode) {
+	case vmw_dma_phys:
+		viter->next = &__vmw_piter_non_sg_next;
+		viter->dma_address = &__vmw_piter_phys_addr;
+		viter->page = &__vmw_piter_non_sg_page;
+		viter->pages = vsgt->pages;
+		break;
+	case vmw_dma_alloc_coherent:
+		viter->next = &__vmw_piter_non_sg_next;
+		viter->dma_address = &__vmw_piter_dma_addr;
+		viter->page = &__vmw_piter_non_sg_page;
+		viter->addrs = vsgt->addrs;
+		break;
+	case vmw_dma_map_populate:
+	case vmw_dma_map_bind:
+		viter->next = &__vmw_piter_sg_next;
+		viter->dma_address = &__vmw_piter_sg_addr;
+		viter->page = &__vmw_piter_sg_page;
+		__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
+				     vsgt->sgt->orig_nents, p_offset);
+		break;
+	default:
+		BUG();
+	}
+}
+
+/**
+ * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
+ * TTM pages
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
+ */
+static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct device *dev = vmw_tt->dev_priv->dev->dev;
+
+	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
+		DMA_BIDIRECTIONAL);
+	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
+}
+
+/**
+ * vmw_ttm_map_for_dma - map TTM pages to get device addresses
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * This function is used to get device addresses from the kernel DMA layer.
+ * However, it's violating the DMA API in that when this operation has been
+ * performed, it's illegal for the CPU to write to the pages without first
+ * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
+ * therefore only legal to call this function if we know that the function
+ * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
+ * a CPU write buffer flush.
+ */
+static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct device *dev = vmw_tt->dev_priv->dev->dev;
+	int ret;
+
+	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
+			 DMA_BIDIRECTIONAL);
+	if (unlikely(ret == 0))
+		return -ENOMEM;
+
+	vmw_tt->sgt.nents = ret;
+
+	return 0;
+}
+
+/**
+ * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Select the correct function for and make sure the TTM pages are
+ * visible to the device. Allocate storage for the device mappings.
+ * If a mapping has already been performed, indicated by the storage
+ * pointer being non NULL, the function returns success.
+ */
+static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+	struct vmw_piter iter;
+	dma_addr_t old;
+	int ret = 0;
+	static size_t sgl_size;
+	static size_t sgt_size;
+
+	if (vmw_tt->mapped)
+		return 0;
+
+	vsgt->mode = dev_priv->map_mode;
+	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
+	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
+	vsgt->sgt = &vmw_tt->sgt;
+
+	switch (dev_priv->map_mode) {
+	case vmw_dma_map_bind:
+	case vmw_dma_map_populate:
+		if (unlikely(!sgl_size)) {
+			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
+			sgt_size = ttm_round_pot(sizeof(struct sg_table));
+		}
+		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
+		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
+					   true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
+						vsgt->num_pages, 0,
+						(unsigned long)
+						vsgt->num_pages << PAGE_SHIFT,
+						GFP_KERNEL);
+		if (unlikely(ret != 0))
+			goto out_sg_alloc_fail;
+
+		if (vsgt->num_pages > vmw_tt->sgt.nents) {
+			uint64_t over_alloc =
+				sgl_size * (vsgt->num_pages -
+					    vmw_tt->sgt.nents);
+
+			ttm_mem_global_free(glob, over_alloc);
+			vmw_tt->sg_alloc_size -= over_alloc;
+		}
+
+		ret = vmw_ttm_map_for_dma(vmw_tt);
+		if (unlikely(ret != 0))
+			goto out_map_fail;
+
+		break;
+	default:
+		break;
+	}
+
+	old = ~((dma_addr_t) 0);
+	vmw_tt->vsgt.num_regions = 0;
+	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
+		dma_addr_t cur = vmw_piter_dma_addr(&iter);
+
+		if (cur != old + PAGE_SIZE)
+			vmw_tt->vsgt.num_regions++;
+		old = cur;
+	}
+
+	vmw_tt->mapped = true;
+	return 0;
+
+out_map_fail:
+	sg_free_table(vmw_tt->vsgt.sgt);
+	vmw_tt->vsgt.sgt = NULL;
+out_sg_alloc_fail:
+	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
+	return ret;
+}
+
+/**
+ * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Tear down any previously set up device DMA mappings and free
+ * any storage space allocated for them. If there are no mappings set up,
+ * this function is a NOP.
+ */
+static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+
+	if (!vmw_tt->vsgt.sgt)
+		return;
+
+	switch (dev_priv->map_mode) {
+	case vmw_dma_map_bind:
+	case vmw_dma_map_populate:
+		vmw_ttm_unmap_from_dma(vmw_tt);
+		sg_free_table(vmw_tt->vsgt.sgt);
+		vmw_tt->vsgt.sgt = NULL;
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_tt->sg_alloc_size);
+		break;
+	default:
+		break;
+	}
+	vmw_tt->mapped = false;
+}
+
 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
-	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+	int ret;
+
+	ret = vmw_ttm_map_dma(vmw_be);
+	if (unlikely(ret != 0))
+		return ret;
 
 	vmw_be->gmr_id = bo_mem->start;
 
-	return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+	return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
 			    ttm->num_pages, vmw_be->gmr_id);
 }
 
 static int vmw_ttm_unbind(struct ttm_tt *ttm)
 {
-	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 
 	vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+
+	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
+		vmw_ttm_unmap_dma(vmw_be);
+
 	return 0;
 }
 
 static void vmw_ttm_destroy(struct ttm_tt *ttm)
 {
-	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 
-	ttm_tt_fini(ttm);
+	vmw_ttm_unmap_dma(vmw_be);
+	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+		ttm_dma_tt_fini(&vmw_be->dma_ttm);
+	else
+		ttm_tt_fini(ttm);
 	kfree(vmw_be);
 }
 
+static int vmw_ttm_populate(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+	int ret;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+		size_t size =
+			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+		ret = ttm_mem_global_alloc(glob, size, false, true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+		if (unlikely(ret != 0))
+			ttm_mem_global_free(glob, size);
+	} else
+		ret = ttm_pool_populate(ttm);
+
+	return ret;
+}
+
+static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
+						 dma_ttm.ttm);
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+
+	vmw_ttm_unmap_dma(vmw_tt);
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+		size_t size =
+			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+
+		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+		ttm_mem_global_free(glob, size);
+	} else
+		ttm_pool_unpopulate(ttm);
+}
+
 static struct ttm_backend_func vmw_ttm_func = {
 	.bind = vmw_ttm_bind,
 	.unbind = vmw_ttm_unbind,
@@ -183,20 +520,28 @@
 				 struct page *dummy_read_page)
 {
 	struct vmw_ttm_tt *vmw_be;
+	int ret;
 
-	vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
+	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
 	if (!vmw_be)
 		return NULL;
 
-	vmw_be->ttm.func = &vmw_ttm_func;
+	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
 	vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
-	if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
-		kfree(vmw_be);
-		return NULL;
-	}
+	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
+				      dummy_read_page);
+	else
+		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
+				  dummy_read_page);
+	if (unlikely(ret != 0))
+		goto out_no_init;
 
-	return &vmw_be->ttm;
+	return &vmw_be->dma_ttm.ttm;
+out_no_init:
+	kfree(vmw_be);
+	return NULL;
 }
 
 int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -332,8 +677,8 @@
 
 struct ttm_bo_driver vmw_bo_driver = {
 	.ttm_tt_create = &vmw_ttm_tt_create,
-	.ttm_tt_populate = &ttm_pool_populate,
-	.ttm_tt_unpopulate = &ttm_pool_unpopulate,
+	.ttm_tt_populate = &vmw_ttm_populate,
+	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
 	.invalidate_caches = vmw_invalidate_caches,
 	.init_mem_type = vmw_init_mem_type,
 	.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0508f93..20d5485 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,6 +32,7 @@
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_module.h>
+#include <linux/dma_remapping.h>
 
 #define VMWGFX_DRIVER_NAME "vmwgfx"
 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -185,6 +186,9 @@
 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 
 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
+static int vmw_force_iommu;
+static int vmw_restrict_iommu;
+static int vmw_force_coherent;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
@@ -193,6 +197,13 @@
 
 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
+module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
+module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+
 
 static void vmw_print_capabilities(uint32_t capabilities)
 {
@@ -427,12 +438,85 @@
 	dev_priv->initial_height = height;
 }
 
+/**
+ * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
+ * system.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * This functions tries to determine the IOMMU setup and what actions
+ * need to be taken by the driver to make system pages visible to the
+ * device.
+ * If this function decides that DMA is not possible, it returns -EINVAL.
+ * The driver may then try to disable features of the device that require
+ * DMA.
+ */
+static int vmw_dma_select_mode(struct vmw_private *dev_priv)
+{
+	static const char *names[vmw_dma_map_max] = {
+		[vmw_dma_phys] = "Using physical TTM page addresses.",
+		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
+		[vmw_dma_map_populate] = "Keeping DMA mappings.",
+		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+#ifdef CONFIG_X86
+	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
+
+#ifdef CONFIG_INTEL_IOMMU
+	if (intel_iommu_enabled) {
+		dev_priv->map_mode = vmw_dma_map_populate;
+		goto out_fixup;
+	}
+#endif
+
+	if (!(vmw_force_iommu || vmw_force_coherent)) {
+		dev_priv->map_mode = vmw_dma_phys;
+		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
+		return 0;
+	}
+
+	dev_priv->map_mode = vmw_dma_map_populate;
+
+	if (dma_ops->sync_single_for_cpu)
+		dev_priv->map_mode = vmw_dma_alloc_coherent;
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl() == 0)
+		dev_priv->map_mode = vmw_dma_map_populate;
+#endif
+
+#ifdef CONFIG_INTEL_IOMMU
+out_fixup:
+#endif
+	if (dev_priv->map_mode == vmw_dma_map_populate &&
+	    vmw_restrict_iommu)
+		dev_priv->map_mode = vmw_dma_map_bind;
+
+	if (vmw_force_coherent)
+		dev_priv->map_mode = vmw_dma_alloc_coherent;
+
+#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
+	/*
+	 * No coherent page pool
+	 */
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+		return -EINVAL;
+#endif
+
+#else /* CONFIG_X86 */
+	dev_priv->map_mode = vmw_dma_map_populate;
+#endif /* CONFIG_X86 */
+
+	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
+
+	return 0;
+}
+
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 	struct vmw_private *dev_priv;
 	int ret;
 	uint32_t svga_id;
 	enum vmw_res_type i;
+	bool refuse_dma = false;
 
 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (unlikely(dev_priv == NULL)) {
@@ -481,6 +565,11 @@
 	}
 
 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+	ret = vmw_dma_select_mode(dev_priv);
+	if (unlikely(ret != 0)) {
+		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+		refuse_dma = true;
+	}
 
 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
 	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
@@ -558,8 +647,9 @@
 	}
 
 	dev_priv->has_gmr = true;
-	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-			   dev_priv->max_gmr_ids) != 0) {
+	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+					 dev_priv->max_gmr_ids) != 0) {
 		DRM_INFO("No GMR memory available. "
 			 "Graphics memory resources are very limited.\n");
 		dev_priv->has_gmr = false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 150ec64..e401d5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -177,6 +177,58 @@
 	struct vmw_resource_val_node *node;
 };
 
+/**
+ * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
+ */
+enum vmw_dma_map_mode {
+	vmw_dma_phys,           /* Use physical page addresses */
+	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
+	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
+	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
+	vmw_dma_map_max
+};
+
+/**
+ * struct vmw_sg_table - Scatter/gather table for binding, with additional
+ * device-specific information.
+ *
+ * @sgt: Pointer to a struct sg_table with binding information
+ * @num_regions: Number of regions with device-address contigous pages
+ */
+struct vmw_sg_table {
+	enum vmw_dma_map_mode mode;
+	struct page **pages;
+	const dma_addr_t *addrs;
+	struct sg_table *sgt;
+	unsigned long num_regions;
+	unsigned long num_pages;
+};
+
+/**
+ * struct vmw_piter - Page iterator that iterates over a list of pages
+ * and DMA addresses that could be either a scatter-gather list or
+ * arrays
+ *
+ * @pages: Array of page pointers to the pages.
+ * @addrs: DMA addresses to the pages if coherent pages are used.
+ * @iter: Scatter-gather page iterator. Current position in SG list.
+ * @i: Current position in arrays.
+ * @num_pages: Number of pages total.
+ * @next: Function to advance the iterator. Returns false if past the list
+ * of pages, true otherwise.
+ * @dma_address: Function to return the DMA address of the current page.
+ */
+struct vmw_piter {
+	struct page **pages;
+	const dma_addr_t *addrs;
+	struct sg_page_iter iter;
+	unsigned long i;
+	unsigned long num_pages;
+	bool (*next)(struct vmw_piter *);
+	dma_addr_t (*dma_address)(struct vmw_piter *);
+	struct page *(*page)(struct vmw_piter *);
+};
+
 struct vmw_sw_context{
 	struct drm_open_hash res_ht;
 	bool res_ht_initialized;
@@ -358,6 +410,11 @@
 
 	struct list_head res_lru[vmw_res_max];
 	uint32_t used_memory_size;
+
+	/*
+	 * DMA mapping stuff.
+	 */
+	enum vmw_dma_map_mode map_mode;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -405,7 +462,7 @@
  */
 
 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
-			struct page *pages[],
+			const struct vmw_sg_table *vsgt,
 			unsigned long num_pages,
 			int gmr_id);
 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
@@ -568,6 +625,45 @@
 extern struct ttm_placement vmw_srf_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
+extern void vmw_piter_start(struct vmw_piter *viter,
+			    const struct vmw_sg_table *vsgt,
+			    unsigned long p_offs);
+
+/**
+ * vmw_piter_next - Advance the iterator one page.
+ *
+ * @viter: Pointer to the iterator to advance.
+ *
+ * Returns false if past the list of pages, true otherwise.
+ */
+static inline bool vmw_piter_next(struct vmw_piter *viter)
+{
+	return viter->next(viter);
+}
+
+/**
+ * vmw_piter_dma_addr - Return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * Returns the DMA address of the page pointed to by @viter.
+ */
+static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+	return viter->dma_address(viter);
+}
+
+/**
+ * vmw_piter_page - Return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * Returns the DMA address of the page pointed to by @viter.
+ */
+static inline struct page *vmw_piter_page(struct vmw_piter *viter)
+{
+	return viter->page(viter);
+}
 
 /**
  * Command submission - vmwgfx_execbuf.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 1a0bf07..6ef0b03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -32,9 +32,11 @@
 #define VMW_PPN_SIZE (sizeof(unsigned long))
 /* A future safe maximum remap size. */
 #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
+#define DMA_ADDR_INVALID ((dma_addr_t) 0)
+#define DMA_PAGE_INVALID 0UL
 
 static int vmw_gmr2_bind(struct vmw_private *dev_priv,
-			 struct page *pages[],
+			 struct vmw_piter *iter,
 			 unsigned long num_pages,
 			 int gmr_id)
 {
@@ -81,11 +83,13 @@
 
 		for (i = 0; i < nr; ++i) {
 			if (VMW_PPN_SIZE <= 4)
-				*cmd = page_to_pfn(*pages++);
+				*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
 			else
-				*((uint64_t *)cmd) = page_to_pfn(*pages++);
+				*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
+					PAGE_SHIFT;
 
 			cmd += VMW_PPN_SIZE / sizeof(*cmd);
+			vmw_piter_next(iter);
 		}
 
 		num_pages -= nr;
@@ -120,22 +124,56 @@
 	vmw_fifo_commit(dev_priv, define_size);
 }
 
+
+static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
+				     struct list_head *desc_pages)
+{
+	struct page *page, *next;
+	struct svga_guest_mem_descriptor *page_virtual;
+	unsigned int desc_per_page = PAGE_SIZE /
+		sizeof(struct svga_guest_mem_descriptor) - 1;
+
+	if (list_empty(desc_pages))
+		return;
+
+	list_for_each_entry_safe(page, next, desc_pages, lru) {
+		list_del_init(&page->lru);
+
+		if (likely(desc_dma != DMA_ADDR_INVALID)) {
+			dma_unmap_page(dev, desc_dma, PAGE_SIZE,
+				       DMA_TO_DEVICE);
+		}
+
+		page_virtual = kmap_atomic(page);
+		desc_dma = (dma_addr_t)
+			le32_to_cpu(page_virtual[desc_per_page].ppn) <<
+			PAGE_SHIFT;
+		kunmap_atomic(page_virtual);
+
+		__free_page(page);
+	}
+}
+
 /**
  * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  * the number of used descriptors.
+ *
  */
 
-static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
-				     struct page *pages[],
-				     unsigned long num_pages)
+static int vmw_gmr_build_descriptors(struct device *dev,
+				     struct list_head *desc_pages,
+				     struct vmw_piter *iter,
+				     unsigned long num_pages,
+				     dma_addr_t *first_dma)
 {
-	struct page *page, *next;
+	struct page *page;
 	struct svga_guest_mem_descriptor *page_virtual = NULL;
 	struct svga_guest_mem_descriptor *desc_virtual = NULL;
 	unsigned int desc_per_page;
 	unsigned long prev_pfn;
 	unsigned long pfn;
 	int ret;
+	dma_addr_t desc_dma;
 
 	desc_per_page = PAGE_SIZE /
 	    sizeof(struct svga_guest_mem_descriptor) - 1;
@@ -148,23 +186,12 @@
 		}
 
 		list_add_tail(&page->lru, desc_pages);
-
-		/*
-		 * Point previous page terminating descriptor to this
-		 * page before unmapping it.
-		 */
-
-		if (likely(page_virtual != NULL)) {
-			desc_virtual->ppn = page_to_pfn(page);
-			kunmap_atomic(page_virtual);
-		}
-
 		page_virtual = kmap_atomic(page);
 		desc_virtual = page_virtual - 1;
 		prev_pfn = ~(0UL);
 
 		while (likely(num_pages != 0)) {
-			pfn = page_to_pfn(*pages);
+			pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
 
 			if (pfn != prev_pfn + 1) {
 
@@ -181,104 +208,82 @@
 			}
 			prev_pfn = pfn;
 			--num_pages;
-			++pages;
+			vmw_piter_next(iter);
 		}
 
-		(++desc_virtual)->ppn = cpu_to_le32(0);
+		(++desc_virtual)->ppn = DMA_PAGE_INVALID;
 		desc_virtual->num_pages = cpu_to_le32(0);
+		kunmap_atomic(page_virtual);
 	}
 
-	if (likely(page_virtual != NULL))
+	desc_dma = 0;
+	list_for_each_entry_reverse(page, desc_pages, lru) {
+		page_virtual = kmap_atomic(page);
+		page_virtual[desc_per_page].ppn = cpu_to_le32
+			(desc_dma >> PAGE_SHIFT);
 		kunmap_atomic(page_virtual);
+		desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
+					DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev, desc_dma)))
+			goto out_err;
+	}
+	*first_dma = desc_dma;
 
 	return 0;
 out_err:
-	list_for_each_entry_safe(page, next, desc_pages, lru) {
-		list_del_init(&page->lru);
-		__free_page(page);
-	}
+	vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
 	return ret;
 }
 
-static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
-{
-	struct page *page, *next;
-
-	list_for_each_entry_safe(page, next, desc_pages, lru) {
-		list_del_init(&page->lru);
-		__free_page(page);
-	}
-}
-
 static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
-				     int gmr_id, struct list_head *desc_pages)
+				     int gmr_id, dma_addr_t desc_dma)
 {
-	struct page *page;
-
-	if (unlikely(list_empty(desc_pages)))
-		return;
-
-	page = list_entry(desc_pages->next, struct page, lru);
-
 	mutex_lock(&dev_priv->hw_mutex);
 
 	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
 	wmb();
-	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
+	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
 	mb();
 
 	mutex_unlock(&dev_priv->hw_mutex);
 
 }
 
-/**
- * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
- * the number of used descriptors.
- */
-
-static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
-					unsigned long num_pages)
-{
-	unsigned long prev_pfn = ~(0UL);
-	unsigned long pfn;
-	unsigned long descriptors = 0;
-
-	while (num_pages--) {
-		pfn = page_to_pfn(*pages++);
-		if (prev_pfn + 1 != pfn)
-			++descriptors;
-		prev_pfn = pfn;
-	}
-
-	return descriptors;
-}
-
 int vmw_gmr_bind(struct vmw_private *dev_priv,
-		 struct page *pages[],
+		 const struct vmw_sg_table *vsgt,
 		 unsigned long num_pages,
 		 int gmr_id)
 {
 	struct list_head desc_pages;
+	dma_addr_t desc_dma = 0;
+	struct device *dev = dev_priv->dev->dev;
+	struct vmw_piter data_iter;
 	int ret;
 
+	vmw_piter_start(&data_iter, vsgt, 0);
+
+	if (unlikely(!vmw_piter_next(&data_iter)))
+		return 0;
+
 	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
-		return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
+		return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
 
 	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
 		return -EINVAL;
 
-	if (vmw_gmr_count_descriptors(pages, num_pages) >
-	    dev_priv->max_gmr_descriptors)
+	if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
 		return -EINVAL;
 
 	INIT_LIST_HEAD(&desc_pages);
 
-	ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
+	ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
+					num_pages, &desc_dma);
 	if (unlikely(ret != 0))
 		return ret;
 
-	vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
-	vmw_gmr_free_descriptors(&desc_pages);
+	vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
+	vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c509d40..a51f48e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -168,7 +168,7 @@
 	fb = drm_framebuffer_lookup(dev, arg->fb_id);
 	if (!fb) {
 		DRM_ERROR("Invalid framebuffer id.\n");
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out_no_fb;
 	}
 	vfb = vmw_framebuffer_to_vfb(fb);
@@ -252,7 +252,7 @@
 	fb = drm_framebuffer_lookup(dev, arg->fb_id);
 	if (!fb) {
 		DRM_ERROR("Invalid framebuffer id.\n");
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out_no_fb;
 	}
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index fc43c06..ecb3d86 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1508,7 +1508,7 @@
 
 	obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 37fb4be..252501a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -32,6 +32,8 @@
 #include <drm/drmP.h>
 #include "vmwgfx_resource_priv.h"
 
+#define VMW_RES_EVICT_ERR_COUNT 10
+
 struct vmw_user_dma_buffer {
 	struct ttm_base_object base;
 	struct vmw_dma_buffer dma;
@@ -1091,8 +1093,9 @@
  *                         to a backup buffer.
  *
  * @res:            The resource to evict.
+ * @interruptible:  Whether to wait interruptible.
  */
-int vmw_resource_do_evict(struct vmw_resource *res)
+int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 {
 	struct ttm_validate_buffer val_buf;
 	const struct vmw_res_func *func = res->func;
@@ -1102,7 +1105,8 @@
 	BUG_ON(!func->may_evict);
 
 	val_buf.bo = NULL;
-	ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
+	ret = vmw_resource_check_buffer(res, &ticket, interruptible,
+					&val_buf);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -1141,6 +1145,7 @@
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 	struct ttm_validate_buffer val_buf;
+	unsigned err_count = 0;
 
 	if (likely(!res->func->may_evict))
 		return 0;
@@ -1155,7 +1160,7 @@
 
 		write_lock(&dev_priv->resource_lock);
 		if (list_empty(lru_list) || !res->func->may_evict) {
-			DRM_ERROR("Out of device device id entries "
+			DRM_ERROR("Out of device device resources "
 				  "for %s.\n", res->func->type_name);
 			ret = -EBUSY;
 			write_unlock(&dev_priv->resource_lock);
@@ -1168,7 +1173,19 @@
 		list_del_init(&evict_res->lru_head);
 
 		write_unlock(&dev_priv->resource_lock);
-		vmw_resource_do_evict(evict_res);
+
+		ret = vmw_resource_do_evict(evict_res, true);
+		if (unlikely(ret != 0)) {
+			write_lock(&dev_priv->resource_lock);
+			list_add_tail(&evict_res->lru_head, lru_list);
+			write_unlock(&dev_priv->resource_lock);
+			if (ret == -ERESTARTSYS ||
+			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
+				vmw_resource_unreference(&evict_res);
+				goto out_no_validate;
+			}
+		}
+
 		vmw_resource_unreference(&evict_res);
 	} while (1);
 
@@ -1253,13 +1270,15 @@
  * @type:           The resource type to evict
  *
  * To avoid thrashing starvation or as part of the hibernation sequence,
- * evict all evictable resources of a specific type.
+ * try to evict all evictable resources of a specific type.
  */
 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 				    enum vmw_res_type type)
 {
 	struct list_head *lru_list = &dev_priv->res_lru[type];
 	struct vmw_resource *evict_res;
+	unsigned err_count = 0;
+	int ret;
 
 	do {
 		write_lock(&dev_priv->resource_lock);
@@ -1272,7 +1291,18 @@
 					 lru_head));
 		list_del_init(&evict_res->lru_head);
 		write_unlock(&dev_priv->resource_lock);
-		vmw_resource_do_evict(evict_res);
+
+		ret = vmw_resource_do_evict(evict_res, false);
+		if (unlikely(ret != 0)) {
+			write_lock(&dev_priv->resource_lock);
+			list_add_tail(&evict_res->lru_head, lru_list);
+			write_unlock(&dev_priv->resource_lock);
+			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
+				vmw_resource_unreference(&evict_res);
+				return;
+			}
+		}
+
 		vmw_resource_unreference(&evict_res);
 	} while (1);
 
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index ccfd42b..7d6bed2 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -19,6 +19,4 @@
 
 	  If unsure, choose Y.
 
-source "drivers/gpu/host1x/drm/Kconfig"
-
 endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 3b037b6..afa1e9e 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -1,6 +1,5 @@
-ccflags-y = -Idrivers/gpu/host1x
-
 host1x-y = \
+	bus.o \
 	syncpt.o \
 	dev.o \
 	intr.o \
@@ -8,13 +7,7 @@
 	channel.o \
 	job.o \
 	debug.o \
-	hw/host1x01.o
+	hw/host1x01.o \
+	hw/host1x02.o
 
-ccflags-y += -Iinclude/drm
-ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
-
-host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
new file mode 100644
index 0000000..509383f
--- /dev/null
+++ b/drivers/gpu/host1x/bus.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/host1x.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+static DEFINE_MUTEX(clients_lock);
+static LIST_HEAD(clients);
+
+static DEFINE_MUTEX(drivers_lock);
+static LIST_HEAD(drivers);
+
+static DEFINE_MUTEX(devices_lock);
+static LIST_HEAD(devices);
+
+struct host1x_subdev {
+	struct host1x_client *client;
+	struct device_node *np;
+	struct list_head list;
+};
+
+/**
+ * host1x_subdev_add() - add a new subdevice with an associated device node
+ */
+static int host1x_subdev_add(struct host1x_device *device,
+			     struct device_node *np)
+{
+	struct host1x_subdev *subdev;
+
+	subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
+	if (!subdev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&subdev->list);
+	subdev->np = of_node_get(np);
+
+	mutex_lock(&device->subdevs_lock);
+	list_add_tail(&subdev->list, &device->subdevs);
+	mutex_unlock(&device->subdevs_lock);
+
+	return 0;
+}
+
+/**
+ * host1x_subdev_del() - remove subdevice
+ */
+static void host1x_subdev_del(struct host1x_subdev *subdev)
+{
+	list_del(&subdev->list);
+	of_node_put(subdev->np);
+	kfree(subdev);
+}
+
+/**
+ * host1x_device_parse_dt() - scan device tree and add matching subdevices
+ */
+static int host1x_device_parse_dt(struct host1x_device *device)
+{
+	struct device_node *np;
+	int err;
+
+	for_each_child_of_node(device->dev.parent->of_node, np) {
+		if (of_match_node(device->driver->subdevs, np) &&
+		    of_device_is_available(np)) {
+			err = host1x_subdev_add(device, np);
+			if (err < 0)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
+static void host1x_subdev_register(struct host1x_device *device,
+				   struct host1x_subdev *subdev,
+				   struct host1x_client *client)
+{
+	int err;
+
+	/*
+	 * Move the subdevice to the list of active (registered) subdevices
+	 * and associate it with a client. At the same time, associate the
+	 * client with its parent device.
+	 */
+	mutex_lock(&device->subdevs_lock);
+	mutex_lock(&device->clients_lock);
+	list_move_tail(&client->list, &device->clients);
+	list_move_tail(&subdev->list, &device->active);
+	client->parent = &device->dev;
+	subdev->client = client;
+	mutex_unlock(&device->clients_lock);
+	mutex_unlock(&device->subdevs_lock);
+
+	/*
+	 * When all subdevices have been registered, the composite device is
+	 * ready to be probed.
+	 */
+	if (list_empty(&device->subdevs)) {
+		err = device->driver->probe(device);
+		if (err < 0)
+			dev_err(&device->dev, "probe failed: %d\n", err);
+	}
+}
+
+static void __host1x_subdev_unregister(struct host1x_device *device,
+				       struct host1x_subdev *subdev)
+{
+	struct host1x_client *client = subdev->client;
+	int err;
+
+	/*
+	 * If all subdevices have been activated, we're about to remove the
+	 * first active subdevice, so unload the driver first.
+	 */
+	if (list_empty(&device->subdevs)) {
+		err = device->driver->remove(device);
+		if (err < 0)
+			dev_err(&device->dev, "remove failed: %d\n", err);
+	}
+
+	/*
+	 * Move the subdevice back to the list of idle subdevices and remove
+	 * it from list of clients.
+	 */
+	mutex_lock(&device->clients_lock);
+	subdev->client = NULL;
+	client->parent = NULL;
+	list_move_tail(&subdev->list, &device->subdevs);
+	/*
+	 * XXX: Perhaps don't do this here, but rather explicitly remove it
+	 * when the device is about to be deleted.
+	 *
+	 * This is somewhat complicated by the fact that this function is
+	 * used to remove the subdevice when a client is unregistered but
+	 * also when the composite device is about to be removed.
+	 */
+	list_del_init(&client->list);
+	mutex_unlock(&device->clients_lock);
+}
+
+static void host1x_subdev_unregister(struct host1x_device *device,
+				     struct host1x_subdev *subdev)
+{
+	mutex_lock(&device->subdevs_lock);
+	__host1x_subdev_unregister(device, subdev);
+	mutex_unlock(&device->subdevs_lock);
+}
+
+int host1x_device_init(struct host1x_device *device)
+{
+	struct host1x_client *client;
+	int err;
+
+	mutex_lock(&device->clients_lock);
+
+	list_for_each_entry(client, &device->clients, list) {
+		if (client->ops && client->ops->init) {
+			err = client->ops->init(client);
+			if (err < 0) {
+				dev_err(&device->dev,
+					"failed to initialize %s: %d\n",
+					dev_name(client->dev), err);
+				mutex_unlock(&device->clients_lock);
+				return err;
+			}
+		}
+	}
+
+	mutex_unlock(&device->clients_lock);
+
+	return 0;
+}
+
+int host1x_device_exit(struct host1x_device *device)
+{
+	struct host1x_client *client;
+	int err;
+
+	mutex_lock(&device->clients_lock);
+
+	list_for_each_entry_reverse(client, &device->clients, list) {
+		if (client->ops && client->ops->exit) {
+			err = client->ops->exit(client);
+			if (err < 0) {
+				dev_err(&device->dev,
+					"failed to cleanup %s: %d\n",
+					dev_name(client->dev), err);
+				mutex_unlock(&device->clients_lock);
+				return err;
+			}
+		}
+	}
+
+	mutex_unlock(&device->clients_lock);
+
+	return 0;
+}
+
+static int host1x_register_client(struct host1x *host1x,
+				  struct host1x_client *client)
+{
+	struct host1x_device *device;
+	struct host1x_subdev *subdev;
+
+	mutex_lock(&host1x->devices_lock);
+
+	list_for_each_entry(device, &host1x->devices, list) {
+		list_for_each_entry(subdev, &device->subdevs, list) {
+			if (subdev->np == client->dev->of_node) {
+				host1x_subdev_register(device, subdev, client);
+				mutex_unlock(&host1x->devices_lock);
+				return 0;
+			}
+		}
+	}
+
+	mutex_unlock(&host1x->devices_lock);
+	return -ENODEV;
+}
+
+static int host1x_unregister_client(struct host1x *host1x,
+				    struct host1x_client *client)
+{
+	struct host1x_device *device, *dt;
+	struct host1x_subdev *subdev;
+
+	mutex_lock(&host1x->devices_lock);
+
+	list_for_each_entry_safe(device, dt, &host1x->devices, list) {
+		list_for_each_entry(subdev, &device->active, list) {
+			if (subdev->client == client) {
+				host1x_subdev_unregister(device, subdev);
+				mutex_unlock(&host1x->devices_lock);
+				return 0;
+			}
+		}
+	}
+
+	mutex_unlock(&host1x->devices_lock);
+	return -ENODEV;
+}
+
+struct bus_type host1x_bus_type = {
+	.name = "host1x",
+};
+
+int host1x_bus_init(void)
+{
+	return bus_register(&host1x_bus_type);
+}
+
+void host1x_bus_exit(void)
+{
+	bus_unregister(&host1x_bus_type);
+}
+
+static void host1x_device_release(struct device *dev)
+{
+	struct host1x_device *device = to_host1x_device(dev);
+
+	kfree(device);
+}
+
+static int host1x_device_add(struct host1x *host1x,
+			     struct host1x_driver *driver)
+{
+	struct host1x_client *client, *tmp;
+	struct host1x_subdev *subdev;
+	struct host1x_device *device;
+	int err;
+
+	device = kzalloc(sizeof(*device), GFP_KERNEL);
+	if (!device)
+		return -ENOMEM;
+
+	mutex_init(&device->subdevs_lock);
+	INIT_LIST_HEAD(&device->subdevs);
+	INIT_LIST_HEAD(&device->active);
+	mutex_init(&device->clients_lock);
+	INIT_LIST_HEAD(&device->clients);
+	INIT_LIST_HEAD(&device->list);
+	device->driver = driver;
+
+	device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
+	device->dev.dma_mask = &device->dev.coherent_dma_mask;
+	device->dev.release = host1x_device_release;
+	dev_set_name(&device->dev, driver->name);
+	device->dev.bus = &host1x_bus_type;
+	device->dev.parent = host1x->dev;
+
+	err = device_register(&device->dev);
+	if (err < 0)
+		return err;
+
+	err = host1x_device_parse_dt(device);
+	if (err < 0) {
+		device_unregister(&device->dev);
+		return err;
+	}
+
+	mutex_lock(&host1x->devices_lock);
+	list_add_tail(&device->list, &host1x->devices);
+	mutex_unlock(&host1x->devices_lock);
+
+	mutex_lock(&clients_lock);
+
+	list_for_each_entry_safe(client, tmp, &clients, list) {
+		list_for_each_entry(subdev, &device->subdevs, list) {
+			if (subdev->np == client->dev->of_node) {
+				host1x_subdev_register(device, subdev, client);
+				break;
+			}
+		}
+	}
+
+	mutex_unlock(&clients_lock);
+
+	return 0;
+}
+
+/*
+ * Removes a device by first unregistering any subdevices and then removing
+ * itself from the list of devices.
+ *
+ * This function must be called with the host1x->devices_lock held.
+ */
+static void host1x_device_del(struct host1x *host1x,
+			      struct host1x_device *device)
+{
+	struct host1x_subdev *subdev, *sd;
+	struct host1x_client *client, *cl;
+
+	mutex_lock(&device->subdevs_lock);
+
+	/* unregister subdevices */
+	list_for_each_entry_safe(subdev, sd, &device->active, list) {
+		/*
+		 * host1x_subdev_unregister() will remove the client from
+		 * any lists, so we'll need to manually add it back to the
+		 * list of idle clients.
+		 *
+		 * XXX: Alternatively, perhaps don't remove the client from
+		 * any lists in host1x_subdev_unregister() and instead do
+		 * that explicitly from host1x_unregister_client()?
+		 */
+		client = subdev->client;
+
+		__host1x_subdev_unregister(device, subdev);
+
+		/* add the client to the list of idle clients */
+		mutex_lock(&clients_lock);
+		list_add_tail(&client->list, &clients);
+		mutex_unlock(&clients_lock);
+	}
+
+	/* remove subdevices */
+	list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
+		host1x_subdev_del(subdev);
+
+	mutex_unlock(&device->subdevs_lock);
+
+	/* move clients to idle list */
+	mutex_lock(&clients_lock);
+	mutex_lock(&device->clients_lock);
+
+	list_for_each_entry_safe(client, cl, &device->clients, list)
+		list_move_tail(&client->list, &clients);
+
+	mutex_unlock(&device->clients_lock);
+	mutex_unlock(&clients_lock);
+
+	/* finally remove the device */
+	list_del_init(&device->list);
+	device_unregister(&device->dev);
+}
+
+static void host1x_attach_driver(struct host1x *host1x,
+				 struct host1x_driver *driver)
+{
+	struct host1x_device *device;
+	int err;
+
+	mutex_lock(&host1x->devices_lock);
+
+	list_for_each_entry(device, &host1x->devices, list) {
+		if (device->driver == driver) {
+			mutex_unlock(&host1x->devices_lock);
+			return;
+		}
+	}
+
+	mutex_unlock(&host1x->devices_lock);
+
+	err = host1x_device_add(host1x, driver);
+	if (err < 0)
+		dev_err(host1x->dev, "failed to allocate device: %d\n", err);
+}
+
+static void host1x_detach_driver(struct host1x *host1x,
+				 struct host1x_driver *driver)
+{
+	struct host1x_device *device, *tmp;
+
+	mutex_lock(&host1x->devices_lock);
+
+	list_for_each_entry_safe(device, tmp, &host1x->devices, list)
+		if (device->driver == driver)
+			host1x_device_del(host1x, device);
+
+	mutex_unlock(&host1x->devices_lock);
+}
+
+int host1x_register(struct host1x *host1x)
+{
+	struct host1x_driver *driver;
+
+	mutex_lock(&devices_lock);
+	list_add_tail(&host1x->list, &devices);
+	mutex_unlock(&devices_lock);
+
+	mutex_lock(&drivers_lock);
+
+	list_for_each_entry(driver, &drivers, list)
+		host1x_attach_driver(host1x, driver);
+
+	mutex_unlock(&drivers_lock);
+
+	return 0;
+}
+
+int host1x_unregister(struct host1x *host1x)
+{
+	struct host1x_driver *driver;
+
+	mutex_lock(&drivers_lock);
+
+	list_for_each_entry(driver, &drivers, list)
+		host1x_detach_driver(host1x, driver);
+
+	mutex_unlock(&drivers_lock);
+
+	mutex_lock(&devices_lock);
+	list_del_init(&host1x->list);
+	mutex_unlock(&devices_lock);
+
+	return 0;
+}
+
+int host1x_driver_register(struct host1x_driver *driver)
+{
+	struct host1x *host1x;
+
+	INIT_LIST_HEAD(&driver->list);
+
+	mutex_lock(&drivers_lock);
+	list_add_tail(&driver->list, &drivers);
+	mutex_unlock(&drivers_lock);
+
+	mutex_lock(&devices_lock);
+
+	list_for_each_entry(host1x, &devices, list)
+		host1x_attach_driver(host1x, driver);
+
+	mutex_unlock(&devices_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(host1x_driver_register);
+
+void host1x_driver_unregister(struct host1x_driver *driver)
+{
+	mutex_lock(&drivers_lock);
+	list_del_init(&driver->list);
+	mutex_unlock(&drivers_lock);
+}
+EXPORT_SYMBOL(host1x_driver_unregister);
+
+int host1x_client_register(struct host1x_client *client)
+{
+	struct host1x *host1x;
+	int err;
+
+	mutex_lock(&devices_lock);
+
+	list_for_each_entry(host1x, &devices, list) {
+		err = host1x_register_client(host1x, client);
+		if (!err) {
+			mutex_unlock(&devices_lock);
+			return 0;
+		}
+	}
+
+	mutex_unlock(&devices_lock);
+
+	mutex_lock(&clients_lock);
+	list_add_tail(&client->list, &clients);
+	mutex_unlock(&clients_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(host1x_client_register);
+
+int host1x_client_unregister(struct host1x_client *client)
+{
+	struct host1x_client *c;
+	struct host1x *host1x;
+	int err;
+
+	mutex_lock(&devices_lock);
+
+	list_for_each_entry(host1x, &devices, list) {
+		err = host1x_unregister_client(host1x, client);
+		if (!err) {
+			mutex_unlock(&devices_lock);
+			return 0;
+		}
+	}
+
+	mutex_unlock(&devices_lock);
+	mutex_lock(&clients_lock);
+
+	list_for_each_entry(c, &clients, list) {
+		if (c == client) {
+			list_del_init(&c->list);
+			break;
+		}
+	}
+
+	mutex_unlock(&clients_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(host1x_client_unregister);
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/bus.h
similarity index 60%
rename from drivers/gpu/host1x/host1x_client.h
rename to drivers/gpu/host1x/bus.h
index 9b85f10..4099e99 100644
--- a/drivers/gpu/host1x/host1x_client.h
+++ b/drivers/gpu/host1x/bus.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2013, NVIDIA Corporation.
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -14,22 +15,15 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef HOST1X_CLIENT_H
-#define HOST1X_CLIENT_H
+#ifndef HOST1X_BUS_H
+#define HOST1X_BUS_H
 
-struct device;
-struct platform_device;
+struct host1x;
 
-#ifdef CONFIG_DRM_TEGRA
-int host1x_drm_alloc(struct platform_device *pdev);
-#else
-static inline int host1x_drm_alloc(struct platform_device *pdev)
-{
-	return 0;
-}
-#endif
+int host1x_bus_init(void);
+void host1x_bus_exit(void);
 
-void host1x_set_drm_data(struct device *dev, void *data);
-void *host1x_get_drm_data(struct device *dev);
+int host1x_register(struct host1x *host1x);
+int host1x_unregister(struct host1x *host1x);
 
 #endif
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index de72172..3995255 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/host1x.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/kfifo.h>
@@ -30,7 +31,6 @@
 #include "channel.h"
 #include "dev.h"
 #include "debug.h"
-#include "host1x_bo.h"
 #include "job.h"
 
 /*
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 48723b8..df767cf 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -40,12 +40,6 @@
 /* channel list operations */
 int host1x_channel_list_init(struct host1x *host);
 
-struct host1x_channel *host1x_channel_request(struct device *dev);
-void host1x_channel_free(struct host1x_channel *channel);
-struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
-void host1x_channel_put(struct host1x_channel *channel);
-int host1x_job_submit(struct host1x_job *job);
-
 #define host1x_for_each_channel(host, channel)				\
 	list_for_each_entry(channel, &host->chlist.list, list)
 
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 4716302..80da003 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -27,24 +27,13 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/host1x.h>
 
+#include "bus.h"
 #include "dev.h"
 #include "intr.h"
 #include "channel.h"
 #include "debug.h"
 #include "hw/host1x01.h"
-#include "host1x_client.h"
-
-void host1x_set_drm_data(struct device *dev, void *data)
-{
-	struct host1x *host1x = dev_get_drvdata(dev);
-	host1x->drm_data = data;
-}
-
-void *host1x_get_drm_data(struct device *dev)
-{
-	struct host1x *host1x = dev_get_drvdata(dev);
-	return host1x ? host1x->drm_data : NULL;
-}
+#include "hw/host1x02.h"
 
 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
 {
@@ -79,7 +68,17 @@
 	.sync_offset	= 0x3000,
 };
 
+static const struct host1x_info host1x02_info = {
+	.nb_channels = 9,
+	.nb_pts = 32,
+	.nb_mlocks = 16,
+	.nb_bases = 12,
+	.init = host1x02_init,
+	.sync_offset = 0x3000,
+};
+
 static struct of_device_id host1x_of_match[] = {
+	{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
 	{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
 	{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
 	{ },
@@ -114,6 +113,9 @@
 	if (!host)
 		return -ENOMEM;
 
+	mutex_init(&host->devices_lock);
+	INIT_LIST_HEAD(&host->devices);
+	INIT_LIST_HEAD(&host->list);
 	host->dev = &pdev->dev;
 	host->info = id->data;
 
@@ -152,7 +154,7 @@
 	err = host1x_syncpt_init(host);
 	if (err) {
 		dev_err(&pdev->dev, "failed to initialize syncpts\n");
-		return err;
+		goto fail_unprepare_disable;
 	}
 
 	err = host1x_intr_init(host, syncpt_irq);
@@ -163,19 +165,26 @@
 
 	host1x_debug_init(host);
 
-	host1x_drm_alloc(pdev);
+	err = host1x_register(host);
+	if (err < 0)
+		goto fail_deinit_intr;
 
 	return 0;
 
+fail_deinit_intr:
+	host1x_intr_deinit(host);
 fail_deinit_syncpt:
 	host1x_syncpt_deinit(host);
+fail_unprepare_disable:
+	clk_disable_unprepare(host->clk);
 	return err;
 }
 
-static int __exit host1x_remove(struct platform_device *pdev)
+static int host1x_remove(struct platform_device *pdev)
 {
 	struct host1x *host = platform_get_drvdata(pdev);
 
+	host1x_unregister(host);
 	host1x_intr_deinit(host);
 	host1x_syncpt_deinit(host);
 	clk_disable_unprepare(host->clk);
@@ -184,59 +193,36 @@
 }
 
 static struct platform_driver tegra_host1x_driver = {
-	.probe = host1x_probe,
-	.remove = __exit_p(host1x_remove),
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "tegra-host1x",
 		.of_match_table = host1x_of_match,
 	},
+	.probe = host1x_probe,
+	.remove = host1x_remove,
 };
 
 static int __init tegra_host1x_init(void)
 {
 	int err;
 
-	err = platform_driver_register(&tegra_host1x_driver);
+	err = host1x_bus_init();
 	if (err < 0)
 		return err;
 
-#ifdef CONFIG_DRM_TEGRA
-	err = platform_driver_register(&tegra_dc_driver);
-	if (err < 0)
-		goto unregister_host1x;
-
-	err = platform_driver_register(&tegra_hdmi_driver);
-	if (err < 0)
-		goto unregister_dc;
-
-	err = platform_driver_register(&tegra_gr2d_driver);
-	if (err < 0)
-		goto unregister_hdmi;
-#endif
+	err = platform_driver_register(&tegra_host1x_driver);
+	if (err < 0) {
+		host1x_bus_exit();
+		return err;
+	}
 
 	return 0;
-
-#ifdef CONFIG_DRM_TEGRA
-unregister_hdmi:
-	platform_driver_unregister(&tegra_hdmi_driver);
-unregister_dc:
-	platform_driver_unregister(&tegra_dc_driver);
-unregister_host1x:
-	platform_driver_unregister(&tegra_host1x_driver);
-	return err;
-#endif
 }
 module_init(tegra_host1x_init);
 
 static void __exit tegra_host1x_exit(void)
 {
-#ifdef CONFIG_DRM_TEGRA
-	platform_driver_unregister(&tegra_gr2d_driver);
-	platform_driver_unregister(&tegra_hdmi_driver);
-	platform_driver_unregister(&tegra_dc_driver);
-#endif
 	platform_driver_unregister(&tegra_host1x_driver);
+	host1x_bus_exit();
 }
 module_exit(tegra_host1x_exit);
 
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index bed90a8..a61a976 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -27,6 +27,7 @@
 #include "job.h"
 
 struct host1x_syncpt;
+struct host1x_syncpt_base;
 struct host1x_channel;
 struct host1x_cdma;
 struct host1x_job;
@@ -102,6 +103,7 @@
 
 	void __iomem *regs;
 	struct host1x_syncpt *syncpt;
+	struct host1x_syncpt_base *bases;
 	struct device *dev;
 	struct clk *clk;
 
@@ -125,7 +127,10 @@
 
 	struct dentry *debugfs;
 
-	void *drm_data;
+	struct mutex devices_lock;
+	struct list_head devices;
+
+	struct list_head list;
 };
 
 void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
@@ -301,8 +306,4 @@
 	host->debug_op->show_mlocks(host, o);
 }
 
-extern struct platform_driver tegra_dc_driver;
-extern struct platform_driver tegra_hdmi_driver;
-extern struct platform_driver tegra_gr2d_driver;
-
 #endif
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
deleted file mode 100644
index 8c61cee..0000000
--- a/drivers/gpu/host1x/drm/drm.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include <linux/dma-mapping.h>
-#include <asm/dma-iommu.h>
-
-#include <drm/drm.h>
-#include <drm/drmP.h>
-
-#include "host1x_client.h"
-#include "dev.h"
-#include "drm.h"
-#include "gem.h"
-#include "syncpt.h"
-
-#define DRIVER_NAME "tegra"
-#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-struct host1x_drm_client {
-	struct host1x_client *client;
-	struct device_node *np;
-	struct list_head list;
-};
-
-static int host1x_add_drm_client(struct host1x_drm *host1x,
-				 struct device_node *np)
-{
-	struct host1x_drm_client *client;
-
-	client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&client->list);
-	client->np = of_node_get(np);
-
-	list_add_tail(&client->list, &host1x->drm_clients);
-
-	return 0;
-}
-
-static int host1x_activate_drm_client(struct host1x_drm *host1x,
-				      struct host1x_drm_client *drm,
-				      struct host1x_client *client)
-{
-	mutex_lock(&host1x->drm_clients_lock);
-	list_del_init(&drm->list);
-	list_add_tail(&drm->list, &host1x->drm_active);
-	drm->client = client;
-	mutex_unlock(&host1x->drm_clients_lock);
-
-	return 0;
-}
-
-static int host1x_remove_drm_client(struct host1x_drm *host1x,
-				    struct host1x_drm_client *client)
-{
-	mutex_lock(&host1x->drm_clients_lock);
-	list_del_init(&client->list);
-	mutex_unlock(&host1x->drm_clients_lock);
-
-	of_node_put(client->np);
-	kfree(client);
-
-	return 0;
-}
-
-static int host1x_parse_dt(struct host1x_drm *host1x)
-{
-	static const char * const compat[] = {
-		"nvidia,tegra20-dc",
-		"nvidia,tegra20-hdmi",
-		"nvidia,tegra20-gr2d",
-		"nvidia,tegra30-dc",
-		"nvidia,tegra30-hdmi",
-		"nvidia,tegra30-gr2d",
-	};
-	unsigned int i;
-	int err;
-
-	for (i = 0; i < ARRAY_SIZE(compat); i++) {
-		struct device_node *np;
-
-		for_each_child_of_node(host1x->dev->of_node, np) {
-			if (of_device_is_compatible(np, compat[i]) &&
-			    of_device_is_available(np)) {
-				err = host1x_add_drm_client(host1x, np);
-				if (err < 0)
-					return err;
-			}
-		}
-	}
-
-	return 0;
-}
-
-int host1x_drm_alloc(struct platform_device *pdev)
-{
-	struct host1x_drm *host1x;
-	int err;
-
-	host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
-	if (!host1x)
-		return -ENOMEM;
-
-	mutex_init(&host1x->drm_clients_lock);
-	INIT_LIST_HEAD(&host1x->drm_clients);
-	INIT_LIST_HEAD(&host1x->drm_active);
-	mutex_init(&host1x->clients_lock);
-	INIT_LIST_HEAD(&host1x->clients);
-	host1x->dev = &pdev->dev;
-
-	err = host1x_parse_dt(host1x);
-	if (err < 0) {
-		dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
-		return err;
-	}
-
-	host1x_set_drm_data(&pdev->dev, host1x);
-
-	return 0;
-}
-
-int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
-{
-	struct host1x_client *client;
-
-	mutex_lock(&host1x->clients_lock);
-
-	list_for_each_entry(client, &host1x->clients, list) {
-		if (client->ops && client->ops->drm_init) {
-			int err = client->ops->drm_init(client, drm);
-			if (err < 0) {
-				dev_err(host1x->dev,
-					"DRM setup failed for %s: %d\n",
-					dev_name(client->dev), err);
-				mutex_unlock(&host1x->clients_lock);
-				return err;
-			}
-		}
-	}
-
-	mutex_unlock(&host1x->clients_lock);
-
-	return 0;
-}
-
-int host1x_drm_exit(struct host1x_drm *host1x)
-{
-	struct platform_device *pdev = to_platform_device(host1x->dev);
-	struct host1x_client *client;
-
-	if (!host1x->drm)
-		return 0;
-
-	mutex_lock(&host1x->clients_lock);
-
-	list_for_each_entry_reverse(client, &host1x->clients, list) {
-		if (client->ops && client->ops->drm_exit) {
-			int err = client->ops->drm_exit(client);
-			if (err < 0) {
-				dev_err(host1x->dev,
-					"DRM cleanup failed for %s: %d\n",
-					dev_name(client->dev), err);
-				mutex_unlock(&host1x->clients_lock);
-				return err;
-			}
-		}
-	}
-
-	mutex_unlock(&host1x->clients_lock);
-
-	drm_platform_exit(&tegra_drm_driver, pdev);
-	host1x->drm = NULL;
-
-	return 0;
-}
-
-int host1x_register_client(struct host1x_drm *host1x,
-			   struct host1x_client *client)
-{
-	struct host1x_drm_client *drm, *tmp;
-	int err;
-
-	mutex_lock(&host1x->clients_lock);
-	list_add_tail(&client->list, &host1x->clients);
-	mutex_unlock(&host1x->clients_lock);
-
-	list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
-		if (drm->np == client->dev->of_node)
-			host1x_activate_drm_client(host1x, drm, client);
-
-	if (list_empty(&host1x->drm_clients)) {
-		struct platform_device *pdev = to_platform_device(host1x->dev);
-
-		err = drm_platform_init(&tegra_drm_driver, pdev);
-		if (err < 0) {
-			dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-int host1x_unregister_client(struct host1x_drm *host1x,
-			     struct host1x_client *client)
-{
-	struct host1x_drm_client *drm, *tmp;
-	int err;
-
-	list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
-		if (drm->client == client) {
-			err = host1x_drm_exit(host1x);
-			if (err < 0) {
-				dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
-					err);
-				return err;
-			}
-
-			host1x_remove_drm_client(host1x, drm);
-			break;
-		}
-	}
-
-	mutex_lock(&host1x->clients_lock);
-	list_del_init(&client->list);
-	mutex_unlock(&host1x->clients_lock);
-
-	return 0;
-}
-
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
-	struct host1x_drm *host1x;
-	int err;
-
-	host1x = host1x_get_drm_data(drm->dev);
-	drm->dev_private = host1x;
-	host1x->drm = drm;
-
-	drm_mode_config_init(drm);
-
-	err = host1x_drm_init(host1x, drm);
-	if (err < 0)
-		return err;
-
-	/*
-	 * We don't use the drm_irq_install() helpers provided by the DRM
-	 * core, so we need to set this manually in order to allow the
-	 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
-	 */
-	drm->irq_enabled = 1;
-
-	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
-	if (err < 0)
-		return err;
-
-	err = tegra_drm_fb_init(drm);
-	if (err < 0)
-		return err;
-
-	drm_kms_helper_poll_init(drm);
-
-	return 0;
-}
-
-static int tegra_drm_unload(struct drm_device *drm)
-{
-	drm_kms_helper_poll_fini(drm);
-	tegra_drm_fb_exit(drm);
-
-	drm_mode_config_cleanup(drm);
-
-	return 0;
-}
-
-static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
-{
-	struct host1x_drm_file *fpriv;
-
-	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
-	if (!fpriv)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&fpriv->contexts);
-	filp->driver_priv = fpriv;
-
-	return 0;
-}
-
-static void host1x_drm_context_free(struct host1x_drm_context *context)
-{
-	context->client->ops->close_channel(context);
-	kfree(context);
-}
-
-static void tegra_drm_lastclose(struct drm_device *drm)
-{
-	struct host1x_drm *host1x = drm->dev_private;
-
-	tegra_fbdev_restore_mode(host1x->fbdev);
-}
-
-#ifdef CONFIG_DRM_TEGRA_STAGING
-static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
-					 struct host1x_drm_context *context)
-{
-	struct host1x_drm_context *ctx;
-
-	list_for_each_entry(ctx, &file->contexts, list)
-		if (ctx == context)
-			return true;
-
-	return false;
-}
-
-static int tegra_gem_create(struct drm_device *drm, void *data,
-			    struct drm_file *file)
-{
-	struct drm_tegra_gem_create *args = data;
-	struct tegra_bo *bo;
-
-	bo = tegra_bo_create_with_handle(file, drm, args->size,
-					 &args->handle);
-	if (IS_ERR(bo))
-		return PTR_ERR(bo);
-
-	return 0;
-}
-
-static int tegra_gem_mmap(struct drm_device *drm, void *data,
-			  struct drm_file *file)
-{
-	struct drm_tegra_gem_mmap *args = data;
-	struct drm_gem_object *gem;
-	struct tegra_bo *bo;
-
-	gem = drm_gem_object_lookup(drm, file, args->handle);
-	if (!gem)
-		return -EINVAL;
-
-	bo = to_tegra_bo(gem);
-
-	args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
-
-	drm_gem_object_unreference(gem);
-
-	return 0;
-}
-
-static int tegra_syncpt_read(struct drm_device *drm, void *data,
-			     struct drm_file *file)
-{
-	struct drm_tegra_syncpt_read *args = data;
-	struct host1x *host = dev_get_drvdata(drm->dev);
-	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
-	if (!sp)
-		return -EINVAL;
-
-	args->value = host1x_syncpt_read_min(sp);
-	return 0;
-}
-
-static int tegra_syncpt_incr(struct drm_device *drm, void *data,
-			     struct drm_file *file)
-{
-	struct drm_tegra_syncpt_incr *args = data;
-	struct host1x *host = dev_get_drvdata(drm->dev);
-	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
-	if (!sp)
-		return -EINVAL;
-
-	return host1x_syncpt_incr(sp);
-}
-
-static int tegra_syncpt_wait(struct drm_device *drm, void *data,
-			     struct drm_file *file)
-{
-	struct drm_tegra_syncpt_wait *args = data;
-	struct host1x *host = dev_get_drvdata(drm->dev);
-	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
-	if (!sp)
-		return -EINVAL;
-
-	return host1x_syncpt_wait(sp, args->thresh, args->timeout,
-				  &args->value);
-}
-
-static int tegra_open_channel(struct drm_device *drm, void *data,
-			      struct drm_file *file)
-{
-	struct drm_tegra_open_channel *args = data;
-	struct host1x_client *client;
-	struct host1x_drm_context *context;
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm *host1x = drm->dev_private;
-	int err = -ENODEV;
-
-	context = kzalloc(sizeof(*context), GFP_KERNEL);
-	if (!context)
-		return -ENOMEM;
-
-	list_for_each_entry(client, &host1x->clients, list)
-		if (client->class == args->client) {
-			err = client->ops->open_channel(client, context);
-			if (err)
-				break;
-
-			context->client = client;
-			list_add(&context->list, &fpriv->contexts);
-			args->context = (uintptr_t)context;
-			return 0;
-		}
-
-	kfree(context);
-	return err;
-}
-
-static int tegra_close_channel(struct drm_device *drm, void *data,
-			       struct drm_file *file)
-{
-	struct drm_tegra_close_channel *args = data;
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm_context *context =
-		(struct host1x_drm_context *)(uintptr_t)args->context;
-
-	if (!host1x_drm_file_owns_context(fpriv, context))
-		return -EINVAL;
-
-	list_del(&context->list);
-	host1x_drm_context_free(context);
-
-	return 0;
-}
-
-static int tegra_get_syncpt(struct drm_device *drm, void *data,
-			    struct drm_file *file)
-{
-	struct drm_tegra_get_syncpt *args = data;
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm_context *context =
-		(struct host1x_drm_context *)(uintptr_t)args->context;
-	struct host1x_syncpt *syncpt;
-
-	if (!host1x_drm_file_owns_context(fpriv, context))
-		return -ENODEV;
-
-	if (args->index >= context->client->num_syncpts)
-		return -EINVAL;
-
-	syncpt = context->client->syncpts[args->index];
-	args->id = host1x_syncpt_id(syncpt);
-
-	return 0;
-}
-
-static int tegra_submit(struct drm_device *drm, void *data,
-			struct drm_file *file)
-{
-	struct drm_tegra_submit *args = data;
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm_context *context =
-		(struct host1x_drm_context *)(uintptr_t)args->context;
-
-	if (!host1x_drm_file_owns_context(fpriv, context))
-		return -ENODEV;
-
-	return context->client->ops->submit(context, args, drm, file);
-}
-#endif
-
-static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
-#ifdef CONFIG_DRM_TEGRA_STAGING
-	DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
-#endif
-};
-
-static const struct file_operations tegra_drm_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.release = drm_release,
-	.unlocked_ioctl = drm_ioctl,
-	.mmap = tegra_drm_mmap,
-	.poll = drm_poll,
-	.read = drm_read,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = drm_compat_ioctl,
-#endif
-	.llseek = noop_llseek,
-};
-
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
-{
-	struct drm_crtc *crtc;
-
-	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
-		struct tegra_dc *dc = to_tegra_dc(crtc);
-
-		if (dc->pipe == pipe)
-			return crtc;
-	}
-
-	return NULL;
-}
-
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
-{
-	/* TODO: implement real hardware counter using syncpoints */
-	return drm_vblank_count(dev, crtc);
-}
-
-static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
-{
-	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-
-	if (!crtc)
-		return -ENODEV;
-
-	tegra_dc_enable_vblank(dc);
-
-	return 0;
-}
-
-static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
-{
-	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-
-	if (crtc)
-		tegra_dc_disable_vblank(dc);
-}
-
-static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
-{
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm_context *context, *tmp;
-	struct drm_crtc *crtc;
-
-	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-		tegra_dc_cancel_page_flip(crtc, file);
-
-	list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
-		host1x_drm_context_free(context);
-
-	kfree(fpriv);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *)s->private;
-	struct drm_device *drm = node->minor->dev;
-	struct drm_framebuffer *fb;
-
-	mutex_lock(&drm->mode_config.fb_lock);
-
-	list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
-		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
-			   fb->base.id, fb->width, fb->height, fb->depth,
-			   fb->bits_per_pixel,
-			   atomic_read(&fb->refcount.refcount));
-	}
-
-	mutex_unlock(&drm->mode_config.fb_lock);
-
-	return 0;
-}
-
-static struct drm_info_list tegra_debugfs_list[] = {
-	{ "framebuffers", tegra_debugfs_framebuffers, 0 },
-};
-
-static int tegra_debugfs_init(struct drm_minor *minor)
-{
-	return drm_debugfs_create_files(tegra_debugfs_list,
-					ARRAY_SIZE(tegra_debugfs_list),
-					minor->debugfs_root, minor);
-}
-
-static void tegra_debugfs_cleanup(struct drm_minor *minor)
-{
-	drm_debugfs_remove_files(tegra_debugfs_list,
-				 ARRAY_SIZE(tegra_debugfs_list), minor);
-}
-#endif
-
-struct drm_driver tegra_drm_driver = {
-	.driver_features = DRIVER_MODESET | DRIVER_GEM,
-	.load = tegra_drm_load,
-	.unload = tegra_drm_unload,
-	.open = tegra_drm_open,
-	.preclose = tegra_drm_preclose,
-	.lastclose = tegra_drm_lastclose,
-
-	.get_vblank_counter = tegra_drm_get_vblank_counter,
-	.enable_vblank = tegra_drm_enable_vblank,
-	.disable_vblank = tegra_drm_disable_vblank,
-
-#if defined(CONFIG_DEBUG_FS)
-	.debugfs_init = tegra_debugfs_init,
-	.debugfs_cleanup = tegra_debugfs_cleanup,
-#endif
-
-	.gem_free_object = tegra_bo_free_object,
-	.gem_vm_ops = &tegra_bo_vm_ops,
-	.dumb_create = tegra_bo_dumb_create,
-	.dumb_map_offset = tegra_bo_dumb_map_offset,
-	.dumb_destroy = drm_gem_dumb_destroy,
-
-	.ioctls = tegra_drm_ioctls,
-	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
-	.fops = &tegra_drm_fops,
-
-	.name = DRIVER_NAME,
-	.desc = DRIVER_DESC,
-	.date = DRIVER_DATE,
-	.major = DRIVER_MAJOR,
-	.minor = DRIVER_MINOR,
-	.patchlevel = DRIVER_PATCHLEVEL,
-};
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
deleted file mode 100644
index 27ffcf1..0000000
--- a/drivers/gpu/host1x/drm/gr2d.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * drivers/video/tegra/host/gr2d/gr2d.c
- *
- * Tegra Graphics 2D
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/export.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-
-#include "channel.h"
-#include "drm.h"
-#include "gem.h"
-#include "job.h"
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "host1x_client.h"
-#include "syncpt.h"
-
-struct gr2d {
-	struct host1x_client client;
-	struct clk *clk;
-	struct host1x_channel *channel;
-	unsigned long *addr_regs;
-};
-
-static inline struct gr2d *to_gr2d(struct host1x_client *client)
-{
-	return container_of(client, struct gr2d, client);
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
-
-static int gr2d_client_init(struct host1x_client *client,
-			    struct drm_device *drm)
-{
-	return 0;
-}
-
-static int gr2d_client_exit(struct host1x_client *client)
-{
-	return 0;
-}
-
-static int gr2d_open_channel(struct host1x_client *client,
-			     struct host1x_drm_context *context)
-{
-	struct gr2d *gr2d = to_gr2d(client);
-
-	context->channel = host1x_channel_get(gr2d->channel);
-
-	if (!context->channel)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void gr2d_close_channel(struct host1x_drm_context *context)
-{
-	host1x_channel_put(context->channel);
-}
-
-static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
-					  struct drm_file *file,
-					  u32 handle)
-{
-	struct drm_gem_object *gem;
-	struct tegra_bo *bo;
-
-	gem = drm_gem_object_lookup(drm, file, handle);
-	if (!gem)
-		return NULL;
-
-	mutex_lock(&drm->struct_mutex);
-	drm_gem_object_unreference(gem);
-	mutex_unlock(&drm->struct_mutex);
-
-	bo = to_tegra_bo(gem);
-	return &bo->base;
-}
-
-static int gr2d_submit(struct host1x_drm_context *context,
-		       struct drm_tegra_submit *args, struct drm_device *drm,
-		       struct drm_file *file)
-{
-	struct host1x_job *job;
-	unsigned int num_cmdbufs = args->num_cmdbufs;
-	unsigned int num_relocs = args->num_relocs;
-	unsigned int num_waitchks = args->num_waitchks;
-	struct drm_tegra_cmdbuf __user *cmdbufs =
-		(void * __user)(uintptr_t)args->cmdbufs;
-	struct drm_tegra_reloc __user *relocs =
-		(void * __user)(uintptr_t)args->relocs;
-	struct drm_tegra_waitchk __user *waitchks =
-		(void * __user)(uintptr_t)args->waitchks;
-	struct drm_tegra_syncpt syncpt;
-	int err;
-
-	/* We don't yet support other than one syncpt_incr struct per submit */
-	if (args->num_syncpts != 1)
-		return -EINVAL;
-
-	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
-			       args->num_relocs, args->num_waitchks);
-	if (!job)
-		return -ENOMEM;
-
-	job->num_relocs = args->num_relocs;
-	job->num_waitchk = args->num_waitchks;
-	job->client = (u32)args->context;
-	job->class = context->client->class;
-	job->serialize = true;
-
-	while (num_cmdbufs) {
-		struct drm_tegra_cmdbuf cmdbuf;
-		struct host1x_bo *bo;
-
-		err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
-		if (err)
-			goto fail;
-
-		bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
-		if (!bo) {
-			err = -ENOENT;
-			goto fail;
-		}
-
-		host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
-		num_cmdbufs--;
-		cmdbufs++;
-	}
-
-	err = copy_from_user(job->relocarray, relocs,
-			     sizeof(*relocs) * num_relocs);
-	if (err)
-		goto fail;
-
-	while (num_relocs--) {
-		struct host1x_reloc *reloc = &job->relocarray[num_relocs];
-		struct host1x_bo *cmdbuf, *target;
-
-		cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
-		target = host1x_bo_lookup(drm, file, (u32)reloc->target);
-
-		reloc->cmdbuf = cmdbuf;
-		reloc->target = target;
-
-		if (!reloc->target || !reloc->cmdbuf) {
-			err = -ENOENT;
-			goto fail;
-		}
-	}
-
-	err = copy_from_user(job->waitchk, waitchks,
-			     sizeof(*waitchks) * num_waitchks);
-	if (err)
-		goto fail;
-
-	err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
-			     sizeof(syncpt));
-	if (err)
-		goto fail;
-
-	job->syncpt_id = syncpt.id;
-	job->syncpt_incrs = syncpt.incrs;
-	job->timeout = 10000;
-	job->is_addr_reg = gr2d_is_addr_reg;
-
-	if (args->timeout && args->timeout < 10000)
-		job->timeout = args->timeout;
-
-	err = host1x_job_pin(job, context->client->dev);
-	if (err)
-		goto fail;
-
-	err = host1x_job_submit(job);
-	if (err)
-		goto fail_submit;
-
-	args->fence = job->syncpt_end;
-
-	host1x_job_put(job);
-	return 0;
-
-fail_submit:
-	host1x_job_unpin(job);
-fail:
-	host1x_job_put(job);
-	return err;
-}
-
-static struct host1x_client_ops gr2d_client_ops = {
-	.drm_init = gr2d_client_init,
-	.drm_exit = gr2d_client_exit,
-	.open_channel = gr2d_open_channel,
-	.close_channel = gr2d_close_channel,
-	.submit = gr2d_submit,
-};
-
-static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
-{
-	const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
-				      0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
-	unsigned long *bitmap;
-	int i;
-
-	bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
-			      GFP_KERNEL);
-
-	for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
-		u32 reg = gr2d_addr_regs[i];
-		bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
-	}
-
-	gr2d->addr_regs = bitmap;
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
-{
-	struct gr2d *gr2d = dev_get_drvdata(dev);
-
-	switch (class) {
-	case HOST1X_CLASS_HOST1X:
-		return reg == 0x2b;
-	case HOST1X_CLASS_GR2D:
-	case HOST1X_CLASS_GR2D_SB:
-		reg &= 0xff;
-		if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
-			return 1;
-	default:
-		return 0;
-	}
-}
-
-static const struct of_device_id gr2d_match[] = {
-	{ .compatible = "nvidia,tegra30-gr2d" },
-	{ .compatible = "nvidia,tegra20-gr2d" },
-	{ },
-};
-
-static int gr2d_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
-	int err;
-	struct gr2d *gr2d = NULL;
-	struct host1x_syncpt **syncpts;
-
-	gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
-	if (!gr2d)
-		return -ENOMEM;
-
-	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
-	if (!syncpts)
-		return -ENOMEM;
-
-	gr2d->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(gr2d->clk)) {
-		dev_err(dev, "cannot get clock\n");
-		return PTR_ERR(gr2d->clk);
-	}
-
-	err = clk_prepare_enable(gr2d->clk);
-	if (err) {
-		dev_err(dev, "cannot turn on clock\n");
-		return err;
-	}
-
-	gr2d->channel = host1x_channel_request(dev);
-	if (!gr2d->channel)
-		return -ENOMEM;
-
-	*syncpts = host1x_syncpt_request(dev, false);
-	if (!(*syncpts)) {
-		host1x_channel_free(gr2d->channel);
-		return -ENOMEM;
-	}
-
-	gr2d->client.ops = &gr2d_client_ops;
-	gr2d->client.dev = dev;
-	gr2d->client.class = HOST1X_CLASS_GR2D;
-	gr2d->client.syncpts = syncpts;
-	gr2d->client.num_syncpts = 1;
-
-	err = host1x_register_client(host1x, &gr2d->client);
-	if (err < 0) {
-		dev_err(dev, "failed to register host1x client: %d\n", err);
-		return err;
-	}
-
-	gr2d_init_addr_reg_map(dev, gr2d);
-
-	platform_set_drvdata(pdev, gr2d);
-
-	return 0;
-}
-
-static int __exit gr2d_remove(struct platform_device *pdev)
-{
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
-	struct gr2d *gr2d = platform_get_drvdata(pdev);
-	unsigned int i;
-	int err;
-
-	err = host1x_unregister_client(host1x, &gr2d->client);
-	if (err < 0) {
-		dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
-		return err;
-	}
-
-	for (i = 0; i < gr2d->client.num_syncpts; i++)
-		host1x_syncpt_free(gr2d->client.syncpts[i]);
-
-	host1x_channel_free(gr2d->channel);
-	clk_disable_unprepare(gr2d->clk);
-
-	return 0;
-}
-
-struct platform_driver tegra_gr2d_driver = {
-	.probe = gr2d_probe,
-	.remove = __exit_p(gr2d_remove),
-	.driver = {
-		.owner = THIS_MODULE,
-		.name = "gr2d",
-		.of_match_table = gr2d_match,
-	}
-};
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
deleted file mode 100644
index a2bc1e6..0000000
--- a/drivers/gpu/host1x/host1x.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Tegra host1x driver
- *
- * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#ifndef __LINUX_HOST1X_H
-#define __LINUX_HOST1X_H
-
-enum host1x_class {
-	HOST1X_CLASS_HOST1X	= 0x1,
-	HOST1X_CLASS_GR2D	= 0x51,
-	HOST1X_CLASS_GR2D_SB    = 0x52
-};
-
-#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
deleted file mode 100644
index 4c1f10b..0000000
--- a/drivers/gpu/host1x/host1x_bo.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Tegra host1x Memory Management Abstraction header
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _HOST1X_BO_H
-#define _HOST1X_BO_H
-
-struct host1x_bo;
-
-struct host1x_bo_ops {
-	struct host1x_bo *(*get)(struct host1x_bo *bo);
-	void (*put)(struct host1x_bo *bo);
-	dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
-	void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
-	void *(*mmap)(struct host1x_bo *bo);
-	void (*munmap)(struct host1x_bo *bo, void *addr);
-	void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
-	void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
-};
-
-struct host1x_bo {
-	const struct host1x_bo_ops *ops;
-};
-
-static inline void host1x_bo_init(struct host1x_bo *bo,
-				  const struct host1x_bo_ops *ops)
-{
-	bo->ops = ops;
-}
-
-static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
-{
-	return bo->ops->get(bo);
-}
-
-static inline void host1x_bo_put(struct host1x_bo *bo)
-{
-	bo->ops->put(bo);
-}
-
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
-				       struct sg_table **sgt)
-{
-	return bo->ops->pin(bo, sgt);
-}
-
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
-{
-	bo->ops->unpin(bo, sgt);
-}
-
-static inline void *host1x_bo_mmap(struct host1x_bo *bo)
-{
-	return bo->ops->mmap(bo);
-}
-
-static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
-{
-	bo->ops->munmap(bo, addr);
-}
-
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
-	return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
-				    unsigned int pagenum, void *addr)
-{
-	bo->ops->kunmap(bo, pagenum, addr);
-}
-
-#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
deleted file mode 100644
index 9b50863..0000000
--- a/drivers/gpu/host1x/hw/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-y = -Idrivers/gpu/host1x
-
-host1x-hw-objs  = \
-	host1x01.o
-
-obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 2ee4ad5..37e2a63 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -20,10 +20,10 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 
-#include "cdma.h"
-#include "channel.h"
-#include "dev.h"
-#include "debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../debug.h"
 
 /*
  * Put the restart at the end of pushbuffer memor
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index ee19962..4608257 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -16,15 +16,15 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/host1x.h>
 #include <linux/slab.h>
+
 #include <trace/events/host1x.h>
 
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "channel.h"
-#include "dev.h"
-#include "intr.h"
-#include "job.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../intr.h"
+#include "../job.h"
 
 #define HOST1X_CHANNEL_SIZE 16384
 #define TRACE_MAX_LENGTH 128U
@@ -67,6 +67,22 @@
 	}
 }
 
+static inline void synchronize_syncpt_base(struct host1x_job *job)
+{
+	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
+	struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
+	u32 id, value;
+
+	value = host1x_syncpt_read_max(sp);
+	id = sp->base->id;
+
+	host1x_cdma_push(&job->channel->cdma,
+			 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
+				HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1),
+			 HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) |
+			 HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
+}
+
 static int channel_submit(struct host1x_job *job)
 {
 	struct host1x_channel *ch = job->channel;
@@ -118,6 +134,10 @@
 					host1x_syncpt_read_max(sp)));
 	}
 
+	/* Synchronize base register to allow using it for relative waiting */
+	if (sp->base)
+		synchronize_syncpt_base(job);
+
 	syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
 
 	job->syncpt_end = syncval;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 334c038..640c75c 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -15,18 +15,10 @@
  *
  */
 
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
-#include <linux/io.h>
-
-#include "dev.h"
-#include "debug.h"
-#include "cdma.h"
-#include "channel.h"
-#include "host1x_bo.h"
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
 
 #define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
 
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index a14e91c..859b73b 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -17,17 +17,17 @@
  */
 
 /* include hw specification */
-#include "hw/host1x01.h"
-#include "hw/host1x01_hardware.h"
+#include "host1x01.h"
+#include "host1x01_hardware.h"
 
 /* include code */
-#include "hw/cdma_hw.c"
-#include "hw/channel_hw.c"
-#include "hw/debug_hw.c"
-#include "hw/intr_hw.c"
-#include "hw/syncpt_hw.c"
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
 
-#include "dev.h"
+#include "../dev.h"
 
 int host1x01_init(struct host1x *host)
 {
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
new file mode 100644
index 0000000..e98caca
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x01.h"
+#include "host1x01_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x02_init(struct host1x *host)
+{
+	host->channel_op = &host1x_channel_ops;
+	host->cdma_op = &host1x_cdma_ops;
+	host->cdma_pb_op = &host1x_pushbuffer_ops;
+	host->syncpt_op = &host1x_syncpt_ops;
+	host->intr_op = &host1x_intr_ops;
+	host->debug_op = &host1x_debug_ops;
+
+	return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x02.h b/drivers/gpu/host1x/hw/host1x02.h
new file mode 100644
index 0000000..f748660
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X02_H
+#define HOST1X_HOST1X02_H
+
+struct host1x;
+
+int host1x02_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
index 42f3ce1..f755359 100644
--- a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -111,6 +111,12 @@
 }
 #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
 	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+	return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+	host1x_uclass_load_syncpt_base_r()
 static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
 {
 	return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_channel.h b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
new file mode 100644
index 0000000..e490bcd
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_CHANNEL_H
+#define HOST1X_HW_HOST1X02_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+	host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+	return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+	host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+	return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+	host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+	return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+	host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+	return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+	host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+	return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+	host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+	return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+	host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+	return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+	host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+	host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+	return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+	host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+	return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+	host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_sync.h b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
new file mode 100644
index 0000000..4495401
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_SYNC_H
+#define HOST1X_HW_HOST1X02_SYNC_H
+
+#define REGISTER_STRIDE	4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+	return 0x400 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+	host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+	return 0x40 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+	host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+	return 0x60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+	host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+	return 0x68 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+	host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+	return 0x80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+	host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+	return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+	host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+	host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+	return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+	host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+	return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+	host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+	return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+	host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+	return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+	host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+	return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+	host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+	return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+	host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+	return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+	host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+	return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+	host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+	host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+	return 0x500 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+	host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+	return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+	host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+	return 0x700 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+	host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+	return 0x720 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+	host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+	return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+	host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+	return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+	host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+	return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+	host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+	return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+	host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+	return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+	host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+	return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+	host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+	return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+	return 0x758 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+	host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+	return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+	host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+	host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
new file mode 100644
index 0000000..a3b3c98
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_UCLASS_H
+#define HOST1X_HW_HOST1X02_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+	host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+	return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+	host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+	host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+	return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+	host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+	host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+	host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+	return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+	host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+	host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+	return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+	host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+	return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+	host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+	return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+	host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+	return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+	host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+	return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+	return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b592eef..b26dcc8 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -22,8 +22,8 @@
 #include <linux/io.h>
 #include <asm/mach/irq.h>
 
-#include "intr.h"
-#include "dev.h"
+#include "../intr.h"
+#include "../dev.h"
 
 /*
  * Sync point threshold interrupt service function
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 0cf6095..56e8539 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -18,8 +18,8 @@
 
 #include <linux/io.h>
 
-#include "dev.h"
-#include "syncpt.h"
+#include "../dev.h"
+#include "../syncpt.h"
 
 /*
  * Write the current syncpoint value back to hw.
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index c4e1050..de5ec33 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -18,6 +18,7 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
+#include <linux/host1x.h>
 #include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/scatterlist.h>
@@ -27,7 +28,6 @@
 
 #include "channel.h"
 #include "dev.h"
-#include "host1x_bo.h"
 #include "job.h"
 #include "syncpt.h"
 
@@ -264,7 +264,7 @@
 }
 
 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
-		       unsigned int offset)
+			unsigned int offset)
 {
 	offset *= sizeof(u32);
 
@@ -281,7 +281,7 @@
 	unsigned int num_relocs;
 	struct host1x_reloc *reloc;
 
-	struct host1x_bo *cmdbuf_id;
+	struct host1x_bo *cmdbuf;
 	unsigned int offset;
 
 	u32 words;
@@ -291,25 +291,37 @@
 	u32 count;
 };
 
+static int check_register(struct host1x_firewall *fw, unsigned long offset)
+{
+	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
+		if (!fw->num_relocs)
+			return -EINVAL;
+
+		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
+			return -EINVAL;
+
+		fw->num_relocs--;
+		fw->reloc++;
+	}
+
+	return 0;
+}
+
 static int check_mask(struct host1x_firewall *fw)
 {
 	u32 mask = fw->mask;
 	u32 reg = fw->reg;
+	int ret;
 
 	while (mask) {
 		if (fw->words == 0)
 			return -EINVAL;
 
 		if (mask & 1) {
-			if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
-				if (!fw->num_relocs)
-					return -EINVAL;
-				if (!check_reloc(fw->reloc, fw->cmdbuf_id,
-						 fw->offset))
-					return -EINVAL;
-				fw->reloc++;
-				fw->num_relocs--;
-			}
+			ret = check_register(fw, reg);
+			if (ret < 0)
+				return ret;
+
 			fw->words--;
 			fw->offset++;
 		}
@@ -324,19 +336,16 @@
 {
 	u32 count = fw->count;
 	u32 reg = fw->reg;
+	int ret;
 
 	while (count) {
 		if (fw->words == 0)
 			return -EINVAL;
 
-		if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
-			if (!fw->num_relocs)
-				return -EINVAL;
-			if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
-				return -EINVAL;
-			fw->reloc++;
-			fw->num_relocs--;
-		}
+		ret = check_register(fw, reg);
+		if (ret < 0)
+			return ret;
+
 		reg++;
 		fw->words--;
 		fw->offset++;
@@ -348,21 +357,17 @@
 
 static int check_nonincr(struct host1x_firewall *fw)
 {
-	int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
 	u32 count = fw->count;
+	int ret;
 
 	while (count) {
 		if (fw->words == 0)
 			return -EINVAL;
 
-		if (is_addr_reg) {
-			if (!fw->num_relocs)
-				return -EINVAL;
-			if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
-				return -EINVAL;
-			fw->reloc++;
-			fw->num_relocs--;
-		}
+		ret = check_register(fw, fw->reg);
+		if (ret < 0)
+			return ret;
+
 		fw->words--;
 		fw->offset++;
 		count--;
@@ -381,7 +386,7 @@
 		return 0;
 
 	fw->words = g->words;
-	fw->cmdbuf_id = g->bo;
+	fw->cmdbuf = g->bo;
 	fw->offset = 0;
 
 	while (fw->words && !err) {
@@ -436,10 +441,6 @@
 		}
 	}
 
-	/* No relocs should remain at this point */
-	if (fw->num_relocs)
-		err = -EINVAL;
-
 out:
 	return err;
 }
@@ -493,6 +494,10 @@
 		offset += g->words * sizeof(u32);
 	}
 
+	/* No relocs should remain at this point */
+	if (fw.num_relocs)
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index fba45f2..33a697d 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -34,15 +34,6 @@
 	u32 pad;
 };
 
-struct host1x_reloc {
-	struct host1x_bo *cmdbuf;
-	u32 cmdbuf_offset;
-	struct host1x_bo *target;
-	u32 target_offset;
-	u32 shift;
-	u32 pad;
-};
-
 struct host1x_waitchk {
 	struct host1x_bo *bo;
 	u32 offset;
@@ -56,105 +47,6 @@
 };
 
 /*
- * Each submit is tracked as a host1x_job.
- */
-struct host1x_job {
-	/* When refcount goes to zero, job can be freed */
-	struct kref ref;
-
-	/* List entry */
-	struct list_head list;
-
-	/* Channel where job is submitted to */
-	struct host1x_channel *channel;
-
-	u32 client;
-
-	/* Gathers and their memory */
-	struct host1x_job_gather *gathers;
-	unsigned int num_gathers;
-
-	/* Wait checks to be processed at submit time */
-	struct host1x_waitchk *waitchk;
-	unsigned int num_waitchk;
-	u32 waitchk_mask;
-
-	/* Array of handles to be pinned & unpinned */
-	struct host1x_reloc *relocarray;
-	unsigned int num_relocs;
-	struct host1x_job_unpin_data *unpins;
-	unsigned int num_unpins;
-
-	dma_addr_t *addr_phys;
-	dma_addr_t *gather_addr_phys;
-	dma_addr_t *reloc_addr_phys;
-
-	/* Sync point id, number of increments and end related to the submit */
-	u32 syncpt_id;
-	u32 syncpt_incrs;
-	u32 syncpt_end;
-
-	/* Maximum time to wait for this job */
-	unsigned int timeout;
-
-	/* Index and number of slots used in the push buffer */
-	unsigned int first_get;
-	unsigned int num_slots;
-
-	/* Copy of gathers */
-	size_t gather_copy_size;
-	dma_addr_t gather_copy;
-	u8 *gather_copy_mapped;
-
-	/* Check if register is marked as an address reg */
-	int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
-
-	/* Request a SETCLASS to this class */
-	u32 class;
-
-	/* Add a channel wait for previous ops to complete */
-	bool serialize;
-};
-/*
- * Allocate memory for a job. Just enough memory will be allocated to
- * accomodate the submit.
- */
-struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
-				    u32 num_cmdbufs, u32 num_relocs,
-				    u32 num_waitchks);
-
-/*
- * Add a gather to a job.
- */
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
-			   u32 words, u32 offset);
-
-/*
- * Increment reference going to host1x_job.
- */
-struct host1x_job *host1x_job_get(struct host1x_job *job);
-
-/*
- * Decrement reference job, free if goes to zero.
- */
-void host1x_job_put(struct host1x_job *job);
-
-/*
- * Pin memory related to job. This handles relocation of addresses to the
- * host1x address space. Handles both the gather memory and any other memory
- * referred to from the gather buffers.
- *
- * Handles also patching out host waits that would wait for an expired sync
- * point value.
- */
-int host1x_job_pin(struct host1x_job *job, struct device *dev);
-
-/*
- * Unpin memory related to job.
- */
-void host1x_job_unpin(struct host1x_job *job);
-
-/*
  * Dump contents of job to debug output.
  */
 void host1x_job_dump(struct device *dev, struct host1x_job *job);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 409745b..159c479 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -30,9 +30,32 @@
 #define SYNCPT_CHECK_PERIOD (2 * HZ)
 #define MAX_STUCK_CHECK_COUNT 15
 
-static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
-						  struct device *dev,
-						  bool client_managed)
+static struct host1x_syncpt_base *
+host1x_syncpt_base_request(struct host1x *host)
+{
+	struct host1x_syncpt_base *bases = host->bases;
+	unsigned int i;
+
+	for (i = 0; i < host->info->nb_bases; i++)
+		if (!bases[i].requested)
+			break;
+
+	if (i >= host->info->nb_bases)
+		return NULL;
+
+	bases[i].requested = true;
+	return &bases[i];
+}
+
+static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
+{
+	if (base)
+		base->requested = false;
+}
+
+static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+						 struct device *dev,
+						 unsigned long flags)
 {
 	int i;
 	struct host1x_syncpt *sp = host->syncpt;
@@ -44,6 +67,12 @@
 	if (i >= host->info->nb_pts)
 		return NULL;
 
+	if (flags & HOST1X_SYNCPT_HAS_BASE) {
+		sp->base = host1x_syncpt_base_request(host);
+		if (!sp->base)
+			return NULL;
+	}
+
 	name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
 			dev ? dev_name(dev) : NULL);
 	if (!name)
@@ -51,7 +80,11 @@
 
 	sp->dev = dev;
 	sp->name = name;
-	sp->client_managed = client_managed;
+
+	if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
+		sp->client_managed = true;
+	else
+		sp->client_managed = false;
 
 	return sp;
 }
@@ -303,25 +336,35 @@
 
 int host1x_syncpt_init(struct host1x *host)
 {
+	struct host1x_syncpt_base *bases;
 	struct host1x_syncpt *syncpt;
 	int i;
 
 	syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
-		GFP_KERNEL);
+			      GFP_KERNEL);
 	if (!syncpt)
 		return -ENOMEM;
 
-	for (i = 0; i < host->info->nb_pts; ++i) {
+	bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+			     GFP_KERNEL);
+	if (!bases)
+		return -ENOMEM;
+
+	for (i = 0; i < host->info->nb_pts; i++) {
 		syncpt[i].id = i;
 		syncpt[i].host = host;
 	}
 
+	for (i = 0; i < host->info->nb_bases; i++)
+		bases[i].id = i;
+
 	host->syncpt = syncpt;
+	host->bases = bases;
 
 	host1x_syncpt_restore(host);
 
 	/* Allocate sync point to use for clearing waits for expired fences */
-	host->nop_sp = _host1x_syncpt_alloc(host, NULL, false);
+	host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
 	if (!host->nop_sp)
 		return -ENOMEM;
 
@@ -329,10 +372,10 @@
 }
 
 struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
-					    bool client_managed)
+					    unsigned long flags)
 {
 	struct host1x *host = dev_get_drvdata(dev->parent);
-	return _host1x_syncpt_alloc(host, dev, client_managed);
+	return host1x_syncpt_alloc(host, dev, flags);
 }
 
 void host1x_syncpt_free(struct host1x_syncpt *sp)
@@ -340,7 +383,9 @@
 	if (!sp)
 		return;
 
+	host1x_syncpt_base_free(sp->base);
 	kfree(sp->name);
+	sp->base = NULL;
 	sp->dev = NULL;
 	sp->name = NULL;
 	sp->client_managed = false;
@@ -354,6 +399,25 @@
 		kfree(sp->name);
 }
 
+/*
+ * Read max. It indicates how many operations there are in queue, either in
+ * channel or in a software thread.
+ * */
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
+{
+	smp_rmb();
+	return (u32)atomic_read(&sp->max_val);
+}
+
+/*
+ * Read min, which is a shadow of the current sync point value in hardware.
+ */
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
+{
+	smp_rmb();
+	return (u32)atomic_read(&sp->min_val);
+}
+
 int host1x_syncpt_nb_pts(struct host1x *host)
 {
 	return host->info->nb_pts;
@@ -375,3 +439,13 @@
 		return NULL;
 	return host->syncpt + id;
 }
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
+{
+	return sp ? sp->base : NULL;
+}
+
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
+{
+	return base->id;
+}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 267c0b9..9056465 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -20,6 +20,7 @@
 #define __HOST1X_SYNCPT_H
 
 #include <linux/atomic.h>
+#include <linux/host1x.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
@@ -30,6 +31,11 @@
 /* Reserved for replacing an expired wait with a NOP */
 #define HOST1X_SYNCPT_RESERVED			0
 
+struct host1x_syncpt_base {
+	unsigned int id;
+	bool requested;
+};
+
 struct host1x_syncpt {
 	int id;
 	atomic_t min_val;
@@ -39,6 +45,7 @@
 	bool client_managed;
 	struct host1x *host;
 	struct device *dev;
+	struct host1x_syncpt_base *base;
 
 	/* interrupt data */
 	struct host1x_syncpt_intr intr;
@@ -50,25 +57,6 @@
 /*  Free sync point array */
 void host1x_syncpt_deinit(struct host1x *host);
 
-/*
- * Read max. It indicates how many operations there are in queue, either in
- * channel or in a software thread.
- * */
-static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
-{
-	smp_rmb();
-	return (u32)atomic_read(&sp->max_val);
-}
-
-/*
- * Read min, which is a shadow of the current sync point value in hardware.
- */
-static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
-{
-	smp_rmb();
-	return (u32)atomic_read(&sp->min_val);
-}
-
 /* Return number of sync point supported. */
 int host1x_syncpt_nb_pts(struct host1x *host);
 
@@ -112,9 +100,6 @@
 	return (min == max);
 }
 
-/* Return pointer to struct denoting sync point id. */
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
-
 /* Load current value from hardware to the shadow register. */
 u32 host1x_syncpt_load(struct host1x_syncpt *sp);
 
@@ -130,16 +115,9 @@
 /* Read current wait base value into shadow register and return it. */
 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
 
-/* Request incrementing a sync point. */
-int host1x_syncpt_incr(struct host1x_syncpt *sp);
-
 /* Indicate future operations by incrementing the sync point max. */
 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
 
-/* Wait until sync point reaches a threshold value, or a timeout. */
-int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
-			long timeout, u32 *value);
-
 /* Check if sync point id is valid. */
 static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
 {
@@ -149,14 +127,4 @@
 /* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
 int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
 
-/* Return id of the sync point */
-u32 host1x_syncpt_id(struct host1x_syncpt *sp);
-
-/* Allocate a sync point for a device. */
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
-					    bool client_managed);
-
-/* Free a sync point. */
-void host1x_syncpt_free(struct host1x_syncpt *sp);
-
 #endif
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index c91d547..329fbb9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -242,6 +242,7 @@
 	  - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
 	    Zalman ZM-GM1
 	  - SHARKOON DarkGlider Gaming mouse
+	  - LEETGION Hellion Gaming Mouse
 
 config HOLTEK_FF
 	bool "Holtek On Line Grip force feedback support"
@@ -323,7 +324,7 @@
 
 config HID_LENOVO_TPKBD
 	tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
-	depends on USB_HID
+	depends on HID
 	select NEW_LEDS
 	select LEDS_CLASS
 	---help---
@@ -362,19 +363,20 @@
 	  - Logitech WingMan Force 3D
 	  - Logitech Formula Force EX
 	  - Logitech WingMan Formula Force GP
-	  - Logitech MOMO Force wheel
 
 	  and if you want to enable force feedback for them.
 	  Note: if you say N here, this device will still be supported, but without
 	  force feedback.
 
 config LOGIRUMBLEPAD2_FF
-	bool "Logitech RumblePad/Rumblepad 2 force feedback support"
+	bool "Logitech force feedback support (variant 2)"
 	depends on HID_LOGITECH
 	select INPUT_FF_MEMLESS
 	help
-	  Say Y here if you want to enable force feedback support for Logitech
-	  RumblePad and Rumblepad 2 devices.
+	  Say Y here if you want to enable force feedback support for:
+	  - Logitech RumblePad
+	  - Logitech Rumblepad 2
+	  - Logitech Formula Vibration Feedback Wheel
 
 config LOGIG940_FF
 	bool "Logitech Flight System G940 force feedback support"
@@ -437,6 +439,7 @@
 	  - Chunghwa panels
 	  - CVTouch panels
 	  - Cypress TrueTouch panels
+	  - Elan Microelectronics touch panels
 	  - Elo TouchSystems IntelliTouch Plus panels
 	  - GeneralTouch 'Sensing Win7-TwoFinger' panels
 	  - GoodTouch panels
@@ -453,6 +456,7 @@
 	  - Pixcir dual touch panels
 	  - Quanta panels
 	  - eGalax dual-touch panels, including the Joojoo and Wetab tablets
+	  - SiS multitouch panels
 	  - Stantum multitouch panels
 	  - Touch International Panels
 	  - Unitec Panels
@@ -614,6 +618,14 @@
 	  * Sony PS3 Blue-ray Disk Remote Control (Bluetooth)
 	  * Logitech Harmony adapter for Sony Playstation 3 (Bluetooth)
 
+config SONY_FF
+	bool "Sony PS2/3 accessories force feedback support"
+	depends on HID_SONY
+	select INPUT_FF_MEMLESS
+	---help---
+	Say Y here if you have a Sony PS2/3 accessory and want to enable force
+	feedback support for it.
+
 config HID_SPEEDLINK
 	tristate "Speedlink VAD Cezanne mouse support"
 	depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index a959f4a..30e4431 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -95,7 +95,7 @@
 obj-$(CONFIG_HID_ROCCAT)	+= hid-roccat.o hid-roccat-common.o \
 	hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
 	hid-roccat-koneplus.o hid-roccat-konepure.o hid-roccat-kovaplus.o \
-	hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-savu.o
+	hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-ryos.o hid-roccat-savu.o
 obj-$(CONFIG_HID_SAITEK)	+= hid-saitek.o
 obj-$(CONFIG_HID_SAMSUNG)	+= hid-samsung.o
 obj-$(CONFIG_HID_SMARTJOYPLUS)	+= hid-sjoy.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 881cf7b..4975581 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -46,6 +46,12 @@
 MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
 		"(0 = disabled, [1] = enabled)");
 
+static unsigned int swap_opt_cmd;
+module_param(swap_opt_cmd, uint, 0644);
+MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") keys. "
+		"(For people who want to keep Windows PC keyboard muscle memory. "
+		"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+
 struct apple_sc {
 	unsigned long quirks;
 	unsigned int fn_on;
@@ -150,6 +156,14 @@
 	{ }
 };
 
+static const struct apple_key_translation swapped_option_cmd_keys[] = {
+	{ KEY_LEFTALT,	KEY_LEFTMETA },
+	{ KEY_LEFTMETA,	KEY_LEFTALT },
+	{ KEY_RIGHTALT,	KEY_RIGHTMETA },
+	{ KEY_RIGHTMETA,KEY_RIGHTALT },
+	{ }
+};
+
 static const struct apple_key_translation *apple_find_translation(
 		const struct apple_key_translation *table, u16 from)
 {
@@ -242,6 +256,14 @@
 		}
 	}
 
+	if (swap_opt_cmd) {
+		trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
+		if (trans) {
+			input_event(input, usage->type, trans->to, value);
+			return 1;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 64ab94a..a594e47 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -95,7 +95,7 @@
 		}
 	}
 
-	if (field_count < 4) {
+	if (field_count < 4 && hid->product != 0xf705) {
 		hid_err(hid, "not enough fields in the report: %d\n",
 			field_count);
 		return -ENODEV;
@@ -180,6 +180,7 @@
 
 static const struct hid_device_id ax_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705), },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, ax_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e80da62..8c10f27 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1418,10 +1418,8 @@
 
 	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
 		ret = hdrv->raw_event(hid, report, data, size);
-		if (ret < 0) {
-			ret = ret < 0 ? ret : 0;
+		if (ret < 0)
 			goto unlock;
-		}
 	}
 
 	ret = hid_report_raw_event(hid, type, data, size, interrupt);
@@ -1605,6 +1603,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
@@ -1716,6 +1715,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
@@ -1754,6 +1754,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
@@ -1801,21 +1802,28 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
 #if IS_ENABLED(CONFIG_HID_ROCCAT)
-	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
 #endif
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -2376,15 +2384,6 @@
 				hdev->type == HID_TYPE_USBNONE)
 			return true;
 		break;
-	case USB_VENDOR_ID_DWAV:
-		/* These are handled by usbtouchscreen. hdev->type is probably
-		 * HID_TYPE_USBNONE, but we say !HID_TYPE_USBMOUSE to match
-		 * usbtouchscreen. */
-		if ((hdev->product == USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER ||
-		     hdev->product == USB_DEVICE_ID_DWAV_TOUCHCONTROLLER) &&
-		    hdev->type != HID_TYPE_USBMOUSE)
-			return true;
-		break;
 	case USB_VENDOR_ID_VELLEMAN:
 		/* These are not HID devices.  They are handled by comedi. */
 		if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index f042a6c..4e49462 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -181,7 +181,40 @@
  */
 static bool elo_broken_firmware(struct usb_device *dev)
 {
-	return use_fw_quirk && le16_to_cpu(dev->descriptor.bcdDevice) == 0x10d;
+	struct usb_device *hub = dev->parent;
+	struct usb_device *child = NULL;
+	u16 fw_lvl = le16_to_cpu(dev->descriptor.bcdDevice);
+	u16 child_vid, child_pid;
+	int i;
+    
+	if (!use_fw_quirk)
+		return false;
+	if (fw_lvl != 0x10d)
+		return false;
+
+	/* iterate sibling devices of the touch controller */
+	usb_hub_for_each_child(hub, i, child) {
+		child_vid = le16_to_cpu(child->descriptor.idVendor);
+		child_pid = le16_to_cpu(child->descriptor.idProduct);
+
+		/*
+		 * If one of the devices below is present attached as a sibling of 
+		 * the touch controller then  this is a newer IBM 4820 monitor that 
+		 * does not need the IBM-requested workaround if fw level is
+		 * 0x010d - aka 'M'.
+		 * No other HW can have this combination.
+		 */
+		if (child_vid==0x04b3) {
+			switch (child_pid) {
+			case 0x4676: /* 4820 21x Video */
+			case 0x4677: /* 4820 51x Video */
+			case 0x4678: /* 4820 2Lx Video */
+			case 0x4679: /* 4820 5Lx Video */
+				return false;
+			}
+		}
+	}
+	return true;
 }
 
 static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index e696566..0caa676 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -28,6 +28,7 @@
  * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
  *   and Zalman ZM-GM1
  * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
+ * - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse
  */
 
 static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -40,6 +41,7 @@
 		 * 0x2fff, so they don't exceed HID_MAX_USAGES */
 		switch (hdev->product) {
 		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067:
+		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072:
 			if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f
 					&& rdesc[120] == 0xff && rdesc[121] == 0x7f) {
 				hid_info(hdev, "Fixing up report descriptor\n");
@@ -66,6 +68,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
 	{ }
 };
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f0296a5..7655962 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -332,6 +332,11 @@
 #define USB_VENDOR_ID_GENERAL_TOUCH	0x0dfc
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101 0x0101
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102 0x0102
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106 0x0106
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
 
 #define USB_VENDOR_ID_GLAB		0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30	0x0038
@@ -448,8 +453,9 @@
 
 #define USB_VENDOR_ID_HOLTEK_ALT		0x04d9
 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD	0xa055
-#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067	0xa067
 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A	0xa04a
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067	0xa067
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072	0xa072
 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081	0xa081
 
 #define USB_VENDOR_ID_IMATION		0x0718
@@ -571,6 +577,7 @@
 #define USB_DEVICE_ID_DINOVO_EDGE	0xc714
 #define USB_DEVICE_ID_DINOVO_MINI	0xc71f
 #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2	0xca03
+#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL	0xca04
 
 #define USB_VENDOR_ID_LUMIO		0x202e
 #define USB_DEVICE_ID_CRYSTALTOUCH	0x0006
@@ -726,6 +733,9 @@
 #define USB_DEVICE_ID_ROCCAT_LUA	0x2c2e
 #define USB_DEVICE_ID_ROCCAT_PYRA_WIRED	0x2c24
 #define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS	0x2cf6
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK	0x3138
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW	0x31ce
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO	0x3232
 #define USB_DEVICE_ID_ROCCAT_SAVU	0x2d5a
 
 #define USB_VENDOR_ID_SAITEK		0x06a3
@@ -745,6 +755,10 @@
 #define USB_VENDOR_ID_SIGMATEL		0x066F
 #define USB_DEVICE_ID_SIGMATEL_STMP3780	0x3780
 
+#define USB_VENDOR_ID_SIS2_TOUCH	0x0457
+#define USB_DEVICE_ID_SIS9200_TOUCH	0x9200
+#define USB_DEVICE_ID_SIS817_TOUCH	0x0817
+
 #define USB_VENDOR_ID_SKYCABLE			0x1223
 #define	USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER	0x3F07
 
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index 31cf29a..2d25b6c 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -14,11 +14,9 @@
 #include <linux/module.h>
 #include <linux/sysfs.h>
 #include <linux/device.h>
-#include <linux/usb.h>
 #include <linux/hid.h>
 #include <linux/input.h>
 #include <linux/leds.h>
-#include "usbhid/usbhid.h"
 
 #include "hid-ids.h"
 
@@ -41,10 +39,9 @@
 		struct hid_input *hi, struct hid_field *field,
 		struct hid_usage *usage, unsigned long **bit, int *max)
 {
-	struct usbhid_device *uhdev;
-
-	uhdev = (struct usbhid_device *) hdev->driver_data;
-	if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
+	if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
+		/* mark the device as pointer */
+		hid_set_drvdata(hdev, (void *)1);
 		map_key_clear(KEY_MICMUTE);
 		return 1;
 	}
@@ -339,7 +336,7 @@
 	struct tpkbd_data_pointer *data_pointer;
 	size_t name_sz = strlen(dev_name(dev)) + 16;
 	char *name_mute, *name_micmute;
-	int i, ret;
+	int i;
 
 	/* Validate required reports. */
 	for (i = 0; i < 4; i++) {
@@ -354,7 +351,9 @@
 		hid_warn(hdev, "Could not create sysfs group\n");
 	}
 
-	data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
+	data_pointer = devm_kzalloc(&hdev->dev,
+				    sizeof(struct tpkbd_data_pointer),
+				    GFP_KERNEL);
 	if (data_pointer == NULL) {
 		hid_err(hdev, "Could not allocate memory for driver data\n");
 		return -ENOMEM;
@@ -364,20 +363,13 @@
 	data_pointer->sensitivity = 0xa0;
 	data_pointer->press_speed = 0x38;
 
-	name_mute = kzalloc(name_sz, GFP_KERNEL);
-	if (name_mute == NULL) {
+	name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+	name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+	if (name_mute == NULL || name_micmute == NULL) {
 		hid_err(hdev, "Could not allocate memory for led data\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 	snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
-
-	name_micmute = kzalloc(name_sz, GFP_KERNEL);
-	if (name_micmute == NULL) {
-		hid_err(hdev, "Could not allocate memory for led data\n");
-		ret = -ENOMEM;
-		goto err2;
-	}
 	snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
 
 	hid_set_drvdata(hdev, data_pointer);
@@ -397,19 +389,12 @@
 	tpkbd_features_set(hdev);
 
 	return 0;
-
-err2:
-	kfree(name_mute);
-err:
-	kfree(data_pointer);
-	return ret;
 }
 
 static int tpkbd_probe(struct hid_device *hdev,
 		const struct hid_device_id *id)
 {
 	int ret;
-	struct usbhid_device *uhdev;
 
 	ret = hid_parse(hdev);
 	if (ret) {
@@ -423,9 +408,8 @@
 		goto err;
 	}
 
-	uhdev = (struct usbhid_device *) hdev->driver_data;
-
-	if (uhdev->ifnum == 1) {
+	if (hid_get_drvdata(hdev)) {
+		hid_set_drvdata(hdev, NULL);
 		ret = tpkbd_probe_tp(hdev);
 		if (ret)
 			goto err_hid;
@@ -449,17 +433,11 @@
 	led_classdev_unregister(&data_pointer->led_mute);
 
 	hid_set_drvdata(hdev, NULL);
-	kfree(data_pointer->led_micmute.name);
-	kfree(data_pointer->led_mute.name);
-	kfree(data_pointer);
 }
 
 static void tpkbd_remove(struct hid_device *hdev)
 {
-	struct usbhid_device *uhdev;
-
-	uhdev = (struct usbhid_device *) hdev->driver_data;
-	if (uhdev->ifnum == 1)
+	if (hid_get_drvdata(hdev))
 		tpkbd_remove_tp(hdev);
 
 	hid_hw_stop(hdev);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 6f12ecd..06eb45f 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -45,7 +45,9 @@
 /* Size of the original descriptors of the Driving Force (and Pro) wheels */
 #define DF_RDESC_ORIG_SIZE	130
 #define DFP_RDESC_ORIG_SIZE	97
+#define FV_RDESC_ORIG_SIZE	130
 #define MOMO_RDESC_ORIG_SIZE	87
+#define MOMO2_RDESC_ORIG_SIZE	87
 
 /* Fixed report descriptors for Logitech Driving Force (and Pro)
  * wheel controllers
@@ -170,6 +172,73 @@
 0xC0                /*  End Collection                          */
 };
 
+static __u8 fv_rdesc_fixed[] = {
+0x05, 0x01,         /*  Usage Page (Desktop),                   */
+0x09, 0x04,         /*  Usage (Joystik),                        */
+0xA1, 0x01,         /*  Collection (Application),               */
+0xA1, 0x02,         /*      Collection (Logical),               */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x0A,         /*          Report Size (10),               */
+0x15, 0x00,         /*          Logical Minimum (0),            */
+0x26, 0xFF, 0x03,   /*          Logical Maximum (1023),         */
+0x35, 0x00,         /*          Physical Minimum (0),           */
+0x46, 0xFF, 0x03,   /*          Physical Maximum (1023),        */
+0x09, 0x30,         /*          Usage (X),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0x95, 0x0C,         /*          Report Count (12),              */
+0x75, 0x01,         /*          Report Size (1),                */
+0x25, 0x01,         /*          Logical Maximum (1),            */
+0x45, 0x01,         /*          Physical Maximum (1),           */
+0x05, 0x09,         /*          Usage Page (Button),            */
+0x19, 0x01,         /*          Usage Minimum (01h),            */
+0x29, 0x0C,         /*          Usage Maximum (0Ch),            */
+0x81, 0x02,         /*          Input (Variable),               */
+0x95, 0x02,         /*          Report Count (2),               */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),             */
+0x09, 0x01,         /*          Usage (01h),                    */
+0x81, 0x02,         /*          Input (Variable),               */
+0x09, 0x02,         /*          Usage (02h),                    */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x81, 0x02,         /*          Input (Variable),               */
+0x05, 0x01,         /*          Usage Page (Desktop),           */
+0x25, 0x07,         /*          Logical Maximum (7),            */
+0x46, 0x3B, 0x01,   /*          Physical Maximum (315),         */
+0x75, 0x04,         /*          Report Size (4),                */
+0x65, 0x14,         /*          Unit (Degrees),                 */
+0x09, 0x39,         /*          Usage (Hat Switch),             */
+0x81, 0x42,         /*          Input (Variable, Null State),   */
+0x75, 0x01,         /*          Report Size (1),                */
+0x95, 0x04,         /*          Report Count (4),               */
+0x65, 0x00,         /*          Unit,                           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),             */
+0x09, 0x01,         /*          Usage (01h),                    */
+0x25, 0x01,         /*          Logical Maximum (1),            */
+0x45, 0x01,         /*          Physical Maximum (1),           */
+0x81, 0x02,         /*          Input (Variable),               */
+0x05, 0x01,         /*          Usage Page (Desktop),           */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x09, 0x31,         /*          Usage (Y),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0x09, 0x32,         /*          Usage (Z),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0xC0,               /*      End Collection,                     */
+0xA1, 0x02,         /*      Collection (Logical),               */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x95, 0x07,         /*          Report Count (7),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x09, 0x03,         /*          Usage (03h),                    */
+0x91, 0x02,         /*          Output (Variable),              */
+0xC0,               /*      End Collection,                     */
+0xC0                /*  End Collection                          */
+};
+
 static __u8 momo_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),               */
 0x09, 0x04,         /*  Usage (Joystik),                    */
@@ -216,6 +285,54 @@
 0xC0                /*  End Collection                      */
 };
 
+static __u8 momo2_rdesc_fixed[] = {
+0x05, 0x01,         /*  Usage Page (Desktop),               */
+0x09, 0x04,         /*  Usage (Joystik),                    */
+0xA1, 0x01,         /*  Collection (Application),           */
+0xA1, 0x02,         /*      Collection (Logical),           */
+0x95, 0x01,         /*          Report Count (1),           */
+0x75, 0x0A,         /*          Report Size (10),           */
+0x15, 0x00,         /*          Logical Minimum (0),        */
+0x26, 0xFF, 0x03,   /*          Logical Maximum (1023),     */
+0x35, 0x00,         /*          Physical Minimum (0),       */
+0x46, 0xFF, 0x03,   /*          Physical Maximum (1023),    */
+0x09, 0x30,         /*          Usage (X),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x95, 0x0A,         /*          Report Count (10),          */
+0x75, 0x01,         /*          Report Size (1),            */
+0x25, 0x01,         /*          Logical Maximum (1),        */
+0x45, 0x01,         /*          Physical Maximum (1),       */
+0x05, 0x09,         /*          Usage Page (Button),        */
+0x19, 0x01,         /*          Usage Minimum (01h),        */
+0x29, 0x0A,         /*          Usage Maximum (0Ah),        */
+0x81, 0x02,         /*          Input (Variable),           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
+0x09, 0x00,         /*          Usage (00h),                */
+0x95, 0x04,         /*          Report Count (4),           */
+0x81, 0x02,         /*          Input (Variable),           */
+0x95, 0x01,         /*          Report Count (1),           */
+0x75, 0x08,         /*          Report Size (8),            */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),      */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),     */
+0x09, 0x01,         /*          Usage (01h),                */
+0x81, 0x02,         /*          Input (Variable),           */
+0x05, 0x01,         /*          Usage Page (Desktop),       */
+0x09, 0x31,         /*          Usage (Y),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x09, 0x32,         /*          Usage (Z),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
+0x09, 0x00,         /*          Usage (00h),                */
+0x81, 0x02,         /*          Input (Variable),           */
+0xC0,               /*      End Collection,                 */
+0xA1, 0x02,         /*      Collection (Logical),           */
+0x09, 0x02,         /*          Usage (02h),                */
+0x95, 0x07,         /*          Report Count (7),           */
+0x91, 0x02,         /*          Output (Variable),          */
+0xC0,               /*      End Collection,                 */
+0xC0                /*  End Collection                      */
+};
+
 /*
  * Certain Logitech keyboards send in report #3 keys which are far
  * above the logical maximum described in descriptor. This extends
@@ -275,6 +392,24 @@
 		}
 		break;
 
+	case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+		if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
+			hid_info(hdev,
+				"fixing up Logitech Momo Racing Force (Black) report descriptor\n");
+			rdesc = momo2_rdesc_fixed;
+			*rsize = sizeof(momo2_rdesc_fixed);
+		}
+		break;
+
+	case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
+		if (*rsize == FV_RDESC_ORIG_SIZE) {
+			hid_info(hdev,
+				"fixing up Logitech Formula Vibration report descriptor\n");
+			rdesc = fv_rdesc_fixed;
+			*rsize = sizeof(fv_rdesc_fixed);
+		}
+		break;
+
 	case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
 		if (*rsize == DFP_RDESC_ORIG_SIZE) {
 			hid_info(hdev,
@@ -492,6 +627,7 @@
 		case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
 		case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
 		case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+		case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
 			field->application = HID_GD_MULTIAXIS;
 			break;
 		default:
@@ -639,6 +775,8 @@
 		.driver_data = LG_NOGET | LG_FF4 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
 		.driver_data = LG_FF4 },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL),
+		.driver_data = LG_FF2 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
 		.driver_data = LG_FF4 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index 1a42eaa..0e3fb1a 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -95,7 +95,7 @@
 
 	hid_hw_request(hid, report, HID_REQ_SET_REPORT);
 
-	hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n");
+	hid_info(hid, "Force feedback for Logitech variant 2 rumble devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
 
 	return 0;
 }
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 8782fe1..befe0e3 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -196,6 +196,21 @@
 	case FF_CONSTANT:
 		x = effect->u.ramp.start_level + 0x80;	/* 0x80 is no force */
 		CLAMP(x);
+
+		if (x == 0x80) {
+			/* De-activate force in slot-1*/
+			value[0] = 0x13;
+			value[1] = 0x00;
+			value[2] = 0x00;
+			value[3] = 0x00;
+			value[4] = 0x00;
+			value[5] = 0x00;
+			value[6] = 0x00;
+
+			hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+			return 0;
+		}
+
 		value[0] = 0x11;	/* Slot 1 */
 		value[1] = 0x08;
 		value[2] = x;
@@ -218,12 +233,70 @@
 	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
 	struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
 	__s32 *value = report->field[0]->value;
+	__u32 expand_a, expand_b;
+	struct lg4ff_device_entry *entry;
+	struct lg_drv_data *drv_data;
+
+	drv_data = hid_get_drvdata(hid);
+	if (!drv_data) {
+		hid_err(hid, "Private driver data not found!\n");
+		return;
+	}
+
+	entry = drv_data->device_props;
+	if (!entry) {
+		hid_err(hid, "Device properties not found!\n");
+		return;
+	}
+
+	/* De-activate Auto-Center */
+	if (magnitude == 0) {
+		value[0] = 0xf5;
+		value[1] = 0x00;
+		value[2] = 0x00;
+		value[3] = 0x00;
+		value[4] = 0x00;
+		value[5] = 0x00;
+		value[6] = 0x00;
+
+		hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+		return;
+	}
+
+	if (magnitude <= 0xaaaa) {
+		expand_a = 0x0c * magnitude;
+		expand_b = 0x80 * magnitude;
+	} else {
+		expand_a = (0x0c * 0xaaaa) + 0x06 * (magnitude - 0xaaaa);
+		expand_b = (0x80 * 0xaaaa) + 0xff * (magnitude - 0xaaaa);
+	}
+
+	/* Adjust for non-MOMO wheels */
+	switch (entry->product_id) {
+	case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+	case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+		break;
+	default:
+		expand_a = expand_a >> 1;
+		break;
+	}
 
 	value[0] = 0xfe;
 	value[1] = 0x0d;
-	value[2] = magnitude >> 13;
-	value[3] = magnitude >> 13;
-	value[4] = magnitude >> 8;
+	value[2] = expand_a / 0xaaaa;
+	value[3] = expand_a / 0xaaaa;
+	value[4] = expand_b / 0xaaaa;
+	value[5] = 0x00;
+	value[6] = 0x00;
+
+	hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+
+	/* Activate Auto-Center */
+	value[0] = 0x14;
+	value[1] = 0x00;
+	value[2] = 0x00;
+	value[3] = 0x00;
+	value[4] = 0x00;
 	value[5] = 0x00;
 	value[6] = 0x00;
 
@@ -540,17 +613,6 @@
 	if (error)
 		return error;
 
-	/* Check if autocentering is available and
-	 * set the centering force to zero by default */
-	if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
-		if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN)	/* Formula Force EX expects different autocentering command */
-			dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
-		else
-			dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
-
-		dev->ff->set_autocenter(dev, 0);
-	}
-
 	/* Get private driver data */
 	drv_data = hid_get_drvdata(hid);
 	if (!drv_data) {
@@ -571,6 +633,17 @@
 	entry->max_range = lg4ff_devices[i].max_range;
 	entry->set_range = lg4ff_devices[i].set_range;
 
+	/* Check if autocentering is available and
+	 * set the centering force to zero by default */
+	if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
+		if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN)	/* Formula Force EX expects different autocentering command */
+			dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+		else
+			dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+
+		dev->ff->set_autocenter(dev, 0);
+	}
+
 	/* Create sysfs interface */
 	error = device_create_file(&hid->dev, &dev_attr_range);
 	if (error)
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 2e53024..a7947d8 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -542,9 +542,9 @@
 	return 0;
 }
 
-static void rdcat(char **rdesc, unsigned int *rsize, const char *data, unsigned int size)
+static void rdcat(char *rdesc, unsigned int *rsize, const char *data, unsigned int size)
 {
-	memcpy(*rdesc + *rsize, data, size);
+	memcpy(rdesc + *rsize, data, size);
 	*rsize += size;
 }
 
@@ -567,31 +567,31 @@
 	if (djdev->reports_supported & STD_KEYBOARD) {
 		dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
 			__func__, djdev->reports_supported);
-		rdcat(&rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
+		rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
 	}
 
 	if (djdev->reports_supported & STD_MOUSE) {
 		dbg_hid("%s: sending a mouse descriptor, reports_supported: "
 			"%x\n", __func__, djdev->reports_supported);
-		rdcat(&rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
+		rdcat(rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
 	}
 
 	if (djdev->reports_supported & MULTIMEDIA) {
 		dbg_hid("%s: sending a multimedia report descriptor: %x\n",
 			__func__, djdev->reports_supported);
-		rdcat(&rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
+		rdcat(rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
 	}
 
 	if (djdev->reports_supported & POWER_KEYS) {
 		dbg_hid("%s: sending a power keys report descriptor: %x\n",
 			__func__, djdev->reports_supported);
-		rdcat(&rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
+		rdcat(rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
 	}
 
 	if (djdev->reports_supported & MEDIA_CENTER) {
 		dbg_hid("%s: sending a media center report descriptor: %x\n",
 			__func__, djdev->reports_supported);
-		rdcat(&rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
+		rdcat(rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
 	}
 
 	if (djdev->reports_supported & KBD_LEDS) {
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5e5fe1b..a2cedb8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -250,12 +250,12 @@
 	{ .name	= MT_CLS_GENERALTOUCH_TWOFINGERS,
 		.quirks	= MT_QUIRK_NOT_SEEN_MEANS_UP |
 			MT_QUIRK_VALID_IS_INRANGE |
-			MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+			MT_QUIRK_SLOT_IS_CONTACTID,
 		.maxcontacts = 2
 	},
 	{ .name	= MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
 		.quirks	= MT_QUIRK_NOT_SEEN_MEANS_UP |
-			MT_QUIRK_SLOT_IS_CONTACTNUMBER
+			MT_QUIRK_SLOT_IS_CONTACTID
 	},
 
 	{ .name = MT_CLS_FLATFROG,
@@ -1173,6 +1173,21 @@
 	{ .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
 		MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
 			USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
+	{ .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
+		MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+			USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) },
+	{ .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+		MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+			USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) },
+	{ .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+		MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+			USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) },
+	{ .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+		MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+			USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) },
+	{ .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+		MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+			USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) },
 
 	/* Gametel game controller */
 	{ .driver_data = MT_CLS_NSMU,
@@ -1284,6 +1299,14 @@
 		MT_USB_DEVICE(USB_VENDOR_ID_QUANTA,
 			USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
 
+	/* SiS panels */
+	{ .driver_data = MT_CLS_DEFAULT,
+		HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+		USB_DEVICE_ID_SIS9200_TOUCH) },
+	{ .driver_data = MT_CLS_DEFAULT,
+		HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+		USB_DEVICE_ID_SIS817_TOUCH) },
+
 	/* Stantum panels */
 	{ .driver_data = MT_CLS_CONFIDENCE,
 		MT_USB_DEVICE(USB_VENDOR_ID_STANTUM,
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index 74f7040..02e28e9 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -65,10 +65,11 @@
 EXPORT_SYMBOL_GPL(roccat_common2_send);
 
 enum roccat_common2_control_states {
-	ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD = 0,
+	ROCCAT_COMMON_CONTROL_STATUS_CRITICAL = 0,
 	ROCCAT_COMMON_CONTROL_STATUS_OK = 1,
 	ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2,
-	ROCCAT_COMMON_CONTROL_STATUS_WAIT = 3,
+	ROCCAT_COMMON_CONTROL_STATUS_BUSY = 3,
+	ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW = 4,
 };
 
 static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
@@ -88,13 +89,12 @@
 		switch (control.value) {
 		case ROCCAT_COMMON_CONTROL_STATUS_OK:
 			return 0;
-		case ROCCAT_COMMON_CONTROL_STATUS_WAIT:
+		case ROCCAT_COMMON_CONTROL_STATUS_BUSY:
 			msleep(500);
 			continue;
 		case ROCCAT_COMMON_CONTROL_STATUS_INVALID:
-
-		case ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD:
-			/* seems to be critical - replug necessary */
+		case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL:
+		case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW:
 			return -EINVAL;
 		default:
 			dev_err(&usb_dev->dev,
@@ -122,6 +122,59 @@
 }
 EXPORT_SYMBOL_GPL(roccat_common2_send_with_status);
 
+int roccat_common2_device_init_struct(struct usb_device *usb_dev,
+		struct roccat_common2_device *dev)
+{
+	mutex_init(&dev->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_device_init_struct);
+
+ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj,
+		char *buf, loff_t off, size_t count,
+		size_t real_size, uint command)
+{
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
+	struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval;
+
+	if (off >= real_size)
+		return 0;
+
+	if (off != 0 || count != real_size)
+		return -EINVAL;
+
+	mutex_lock(&roccat_dev->lock);
+	retval = roccat_common2_receive(usb_dev, command, buf, real_size);
+	mutex_unlock(&roccat_dev->lock);
+
+	return retval ? retval : real_size;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_sysfs_read);
+
+ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
+		void const *buf, loff_t off, size_t count,
+		size_t real_size, uint command)
+{
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
+	struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval;
+
+	if (off != 0 || count != real_size)
+		return -EINVAL;
+
+	mutex_lock(&roccat_dev->lock);
+	retval = roccat_common2_send_with_status(usb_dev, command, buf, real_size);
+	mutex_unlock(&roccat_dev->lock);
+
+	return retval ? retval : real_size;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_sysfs_write);
+
 MODULE_AUTHOR("Stefan Achatz");
 MODULE_DESCRIPTION("USB Roccat common driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index a97746a..eaa56eb 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -32,4 +32,66 @@
 int roccat_common2_send_with_status(struct usb_device *usb_dev,
 		uint command, void const *buf, uint size);
 
+struct roccat_common2_device {
+	int roccat_claimed;
+	int chrdev_minor;
+	struct mutex lock;
+};
+
+int roccat_common2_device_init_struct(struct usb_device *usb_dev,
+		struct roccat_common2_device *dev);
+ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj,
+		char *buf, loff_t off, size_t count,
+		size_t real_size, uint command);
+ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
+		void const *buf, loff_t off, size_t count,
+		size_t real_size, uint command);
+
+#define ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
+static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
+		struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+		loff_t off, size_t count) \
+{ \
+	return roccat_common2_sysfs_write(fp, kobj, buf, off, count, \
+			SIZE, COMMAND); \
+}
+
+#define ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE) \
+static ssize_t roccat_common2_sysfs_read_ ## thingy(struct file *fp, \
+		struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+		loff_t off, size_t count) \
+{ \
+	return roccat_common2_sysfs_read(fp, kobj, buf, off, count, \
+			SIZE, COMMAND); \
+}
+
+#define ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE)
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+	.attr = { .name = #thingy, .mode = 0660 }, \
+	.size = SIZE, \
+	.read = roccat_common2_sysfs_read_ ## thingy, \
+	.write = roccat_common2_sysfs_write_ ## thingy \
+}
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_R(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+	.attr = { .name = #thingy, .mode = 0440 }, \
+	.size = SIZE, \
+	.read = roccat_common2_sysfs_read_ ## thingy, \
+}
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_W(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+	.attr = { .name = #thingy, .mode = 0220 }, \
+	.size = SIZE, \
+	.write = roccat_common2_sysfs_write_ ## thingy \
+}
+
 #endif
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index 99a605e..07de2f9 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -15,6 +15,7 @@
  * Roccat KonePure is a smaller version of KoneXTD with less buttons and lights.
  */
 
+#include <linux/types.h>
 #include <linux/device.h>
 #include <linux/input.h>
 #include <linux/hid.h>
@@ -23,128 +24,50 @@
 #include <linux/hid-roccat.h>
 #include "hid-ids.h"
 #include "hid-roccat-common.h"
-#include "hid-roccat-konepure.h"
+
+enum {
+	KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3,
+};
+
+struct konepure_mouse_report_button {
+	uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */
+	uint8_t zero;
+	uint8_t type;
+	uint8_t data1;
+	uint8_t data2;
+	uint8_t zero2;
+	uint8_t unknown[2];
+} __packed;
 
 static struct class *konepure_class;
 
-static ssize_t konepure_sysfs_read(struct file *fp, struct kobject *kobj,
-		char *buf, loff_t off, size_t count,
-		size_t real_size, uint command)
-{
-	struct device *dev =
-			container_of(kobj, struct device, kobj)->parent->parent;
-	struct konepure_device *konepure = hid_get_drvdata(dev_get_drvdata(dev));
-	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
-	int retval;
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(actual_profile, 0x05, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_settings, 0x06, 0x1f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_buttons, 0x07, 0x3b);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(macro, 0x08, 0x0822);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x09, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(tcu, 0x0c, 0x04);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_R(tcu_image, 0x0c, 0x0404);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0x0f, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x10, 0x10);
 
-	if (off >= real_size)
-		return 0;
-
-	if (off != 0 || count != real_size)
-		return -EINVAL;
-
-	mutex_lock(&konepure->konepure_lock);
-	retval = roccat_common2_receive(usb_dev, command, buf, real_size);
-	mutex_unlock(&konepure->konepure_lock);
-
-	return retval ? retval : real_size;
-}
-
-static ssize_t konepure_sysfs_write(struct file *fp, struct kobject *kobj,
-		void const *buf, loff_t off, size_t count,
-		size_t real_size, uint command)
-{
-	struct device *dev =
-			container_of(kobj, struct device, kobj)->parent->parent;
-	struct konepure_device *konepure = hid_get_drvdata(dev_get_drvdata(dev));
-	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
-	int retval;
-
-	if (off != 0 || count != real_size)
-		return -EINVAL;
-
-	mutex_lock(&konepure->konepure_lock);
-	retval = roccat_common2_send_with_status(usb_dev, command,
-			(void *)buf, real_size);
-	mutex_unlock(&konepure->konepure_lock);
-
-	return retval ? retval : real_size;
-}
-
-#define KONEPURE_SYSFS_W(thingy, THINGY) \
-static ssize_t konepure_sysfs_write_ ## thingy(struct file *fp, \
-		struct kobject *kobj, struct bin_attribute *attr, char *buf, \
-		loff_t off, size_t count) \
-{ \
-	return konepure_sysfs_write(fp, kobj, buf, off, count, \
-			KONEPURE_SIZE_ ## THINGY, KONEPURE_COMMAND_ ## THINGY); \
-}
-
-#define KONEPURE_SYSFS_R(thingy, THINGY) \
-static ssize_t konepure_sysfs_read_ ## thingy(struct file *fp, \
-		struct kobject *kobj, struct bin_attribute *attr, char *buf, \
-		loff_t off, size_t count) \
-{ \
-	return konepure_sysfs_read(fp, kobj, buf, off, count, \
-			KONEPURE_SIZE_ ## THINGY, KONEPURE_COMMAND_ ## THINGY); \
-}
-
-#define KONEPURE_SYSFS_RW(thingy, THINGY) \
-KONEPURE_SYSFS_W(thingy, THINGY) \
-KONEPURE_SYSFS_R(thingy, THINGY)
-
-#define KONEPURE_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-KONEPURE_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
-	.attr = { .name = #thingy, .mode = 0660 }, \
-	.size = KONEPURE_SIZE_ ## THINGY, \
-	.read = konepure_sysfs_read_ ## thingy, \
-	.write = konepure_sysfs_write_ ## thingy \
-}
-
-#define KONEPURE_BIN_ATTRIBUTE_R(thingy, THINGY) \
-KONEPURE_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
-	.attr = { .name = #thingy, .mode = 0440 }, \
-	.size = KONEPURE_SIZE_ ## THINGY, \
-	.read = konepure_sysfs_read_ ## thingy, \
-}
-
-#define KONEPURE_BIN_ATTRIBUTE_W(thingy, THINGY) \
-KONEPURE_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
-	.attr = { .name = #thingy, .mode = 0220 }, \
-	.size = KONEPURE_SIZE_ ## THINGY, \
-	.write = konepure_sysfs_write_ ## thingy \
-}
-
-KONEPURE_BIN_ATTRIBUTE_RW(actual_profile, ACTUAL_PROFILE);
-KONEPURE_BIN_ATTRIBUTE_RW(info, INFO);
-KONEPURE_BIN_ATTRIBUTE_RW(sensor, SENSOR);
-KONEPURE_BIN_ATTRIBUTE_RW(tcu, TCU);
-KONEPURE_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
-KONEPURE_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
-KONEPURE_BIN_ATTRIBUTE_W(control, CONTROL);
-KONEPURE_BIN_ATTRIBUTE_W(talk, TALK);
-KONEPURE_BIN_ATTRIBUTE_W(macro, MACRO);
-KONEPURE_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE);
-
-static struct bin_attribute *konepure_bin_attributes[] = {
+static struct bin_attribute *konepure_bin_attrs[] = {
 	&bin_attr_actual_profile,
-	&bin_attr_info,
-	&bin_attr_sensor,
-	&bin_attr_tcu,
-	&bin_attr_profile_settings,
-	&bin_attr_profile_buttons,
 	&bin_attr_control,
+	&bin_attr_info,
 	&bin_attr_talk,
 	&bin_attr_macro,
+	&bin_attr_sensor,
+	&bin_attr_tcu,
 	&bin_attr_tcu_image,
+	&bin_attr_profile_settings,
+	&bin_attr_profile_buttons,
 	NULL,
 };
 
 static const struct attribute_group konepure_group = {
-	.bin_attrs = konepure_bin_attributes,
+	.bin_attrs = konepure_bin_attrs,
 };
 
 static const struct attribute_group *konepure_groups[] = {
@@ -152,20 +75,11 @@
 	NULL,
 };
 
-
-static int konepure_init_konepure_device_struct(struct usb_device *usb_dev,
-		struct konepure_device *konepure)
-{
-	mutex_init(&konepure->konepure_lock);
-
-	return 0;
-}
-
 static int konepure_init_specials(struct hid_device *hdev)
 {
 	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 	struct usb_device *usb_dev = interface_to_usbdev(intf);
-	struct konepure_device *konepure;
+	struct roccat_common2_device *konepure;
 	int retval;
 
 	if (intf->cur_altsetting->desc.bInterfaceProtocol
@@ -181,9 +95,9 @@
 	}
 	hid_set_drvdata(hdev, konepure);
 
-	retval = konepure_init_konepure_device_struct(usb_dev, konepure);
+	retval = roccat_common2_device_init_struct(usb_dev, konepure);
 	if (retval) {
-		hid_err(hdev, "couldn't init struct konepure_device\n");
+		hid_err(hdev, "couldn't init KonePure device\n");
 		goto exit_free;
 	}
 
@@ -205,7 +119,7 @@
 static void konepure_remove_specials(struct hid_device *hdev)
 {
 	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-	struct konepure_device *konepure;
+	struct roccat_common2_device *konepure;
 
 	if (intf->cur_altsetting->desc.bInterfaceProtocol
 			!= USB_INTERFACE_PROTOCOL_MOUSE)
@@ -258,7 +172,7 @@
 		struct hid_report *report, u8 *data, int size)
 {
 	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-	struct konepure_device *konepure = hid_get_drvdata(hdev);
+	struct roccat_common2_device *konepure = hid_get_drvdata(hdev);
 
 	if (intf->cur_altsetting->desc.bInterfaceProtocol
 			!= USB_INTERFACE_PROTOCOL_MOUSE)
diff --git a/drivers/hid/hid-roccat-konepure.h b/drivers/hid/hid-roccat-konepure.h
deleted file mode 100644
index 2cd24e9..0000000
--- a/drivers/hid/hid-roccat-konepure.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef __HID_ROCCAT_KONEPURE_H
-#define __HID_ROCCAT_KONEPURE_H
-
-/*
- * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/types.h>
-
-enum {
-	KONEPURE_SIZE_ACTUAL_PROFILE = 0x03,
-	KONEPURE_SIZE_CONTROL = 0x03,
-	KONEPURE_SIZE_FIRMWARE_WRITE = 0x0402,
-	KONEPURE_SIZE_INFO = 0x06,
-	KONEPURE_SIZE_MACRO = 0x0822,
-	KONEPURE_SIZE_PROFILE_SETTINGS = 0x1f,
-	KONEPURE_SIZE_PROFILE_BUTTONS = 0x3b,
-	KONEPURE_SIZE_SENSOR = 0x06,
-	KONEPURE_SIZE_TALK = 0x10,
-	KONEPURE_SIZE_TCU = 0x04,
-	KONEPURE_SIZE_TCU_IMAGE = 0x0404,
-};
-
-enum konepure_control_requests {
-	KONEPURE_CONTROL_REQUEST_GENERAL = 0x80,
-	KONEPURE_CONTROL_REQUEST_BUTTONS = 0x90,
-};
-
-enum konepure_commands {
-	KONEPURE_COMMAND_CONTROL = 0x04,
-	KONEPURE_COMMAND_ACTUAL_PROFILE = 0x05,
-	KONEPURE_COMMAND_PROFILE_SETTINGS = 0x06,
-	KONEPURE_COMMAND_PROFILE_BUTTONS = 0x07,
-	KONEPURE_COMMAND_MACRO = 0x08,
-	KONEPURE_COMMAND_INFO = 0x09,
-	KONEPURE_COMMAND_TCU = 0x0c,
-	KONEPURE_COMMAND_TCU_IMAGE = 0x0c,
-	KONEPURE_COMMAND_E = 0x0e,
-	KONEPURE_COMMAND_SENSOR = 0x0f,
-	KONEPURE_COMMAND_TALK = 0x10,
-	KONEPURE_COMMAND_FIRMWARE_WRITE = 0x1b,
-	KONEPURE_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
-};
-
-enum {
-	KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3,
-};
-
-struct konepure_mouse_report_button {
-	uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */
-	uint8_t zero;
-	uint8_t type;
-	uint8_t data1;
-	uint8_t data2;
-	uint8_t zero2;
-	uint8_t unknown[2];
-} __packed;
-
-struct konepure_device {
-	int roccat_claimed;
-	int chrdev_minor;
-	struct mutex konepure_lock;
-};
-
-#endif
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 0c8e1ef..9660477 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -554,9 +554,13 @@
 		break;
 	case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI:
 		kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1);
+		break;
 	case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY:
 		kovaplus->actual_x_sensitivity = button_report->data1;
 		kovaplus->actual_y_sensitivity = button_report->data2;
+		break;
+	default:
+		break;
 	}
 }
 
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
new file mode 100644
index 0000000..47cc8f3
--- /dev/null
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -0,0 +1,241 @@
+/*
+ * Roccat Ryos driver for Linux
+ *
+ * Copyright (c) 2013 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+
+enum {
+	RYOS_REPORT_NUMBER_SPECIAL = 3,
+	RYOS_USB_INTERFACE_PROTOCOL = 0,
+};
+
+struct ryos_report_special {
+	uint8_t number; /* RYOS_REPORT_NUMBER_SPECIAL */
+	uint8_t data[4];
+} __packed;
+
+static struct class *ryos_class;
+
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x05, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_primary, 0x06, 0x7d);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_function, 0x07, 0x5f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_macro, 0x08, 0x23);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_thumbster, 0x09, 0x17);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_extra, 0x0a, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_easyzone, 0x0b, 0x126);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(key_mask, 0x0c, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light, 0x0d, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x0e, 0x7d2);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_R(info, 0x0f, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(reset, 0x11, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(light_control, 0x13, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x16, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(stored_lights, 0x17, 0x0566);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(custom_lights, 0x18, 0x14);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light_macro, 0x19, 0x07d2);
+
+static struct bin_attribute *ryos_bin_attrs[] = {
+	&bin_attr_control,
+	&bin_attr_profile,
+	&bin_attr_keys_primary,
+	&bin_attr_keys_function,
+	&bin_attr_keys_macro,
+	&bin_attr_keys_thumbster,
+	&bin_attr_keys_extra,
+	&bin_attr_keys_easyzone,
+	&bin_attr_key_mask,
+	&bin_attr_light,
+	&bin_attr_macro,
+	&bin_attr_info,
+	&bin_attr_reset,
+	&bin_attr_light_control,
+	&bin_attr_talk,
+	&bin_attr_stored_lights,
+	&bin_attr_custom_lights,
+	&bin_attr_light_macro,
+	NULL,
+};
+
+static const struct attribute_group ryos_group = {
+	.bin_attrs = ryos_bin_attrs,
+};
+
+static const struct attribute_group *ryos_groups[] = {
+	&ryos_group,
+	NULL,
+};
+
+static int ryos_init_specials(struct hid_device *hdev)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	struct usb_device *usb_dev = interface_to_usbdev(intf);
+	struct roccat_common2_device *ryos;
+	int retval;
+
+	if (intf->cur_altsetting->desc.bInterfaceProtocol
+			!= RYOS_USB_INTERFACE_PROTOCOL) {
+		hid_set_drvdata(hdev, NULL);
+		return 0;
+	}
+
+	ryos = kzalloc(sizeof(*ryos), GFP_KERNEL);
+	if (!ryos) {
+		hid_err(hdev, "can't alloc device descriptor\n");
+		return -ENOMEM;
+	}
+	hid_set_drvdata(hdev, ryos);
+
+	retval = roccat_common2_device_init_struct(usb_dev, ryos);
+	if (retval) {
+		hid_err(hdev, "couldn't init Ryos device\n");
+		goto exit_free;
+	}
+
+	retval = roccat_connect(ryos_class, hdev,
+			sizeof(struct ryos_report_special));
+	if (retval < 0) {
+		hid_err(hdev, "couldn't init char dev\n");
+	} else {
+		ryos->chrdev_minor = retval;
+		ryos->roccat_claimed = 1;
+	}
+
+	return 0;
+exit_free:
+	kfree(ryos);
+	return retval;
+}
+
+static void ryos_remove_specials(struct hid_device *hdev)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	struct roccat_common2_device *ryos;
+
+	if (intf->cur_altsetting->desc.bInterfaceProtocol
+			!= RYOS_USB_INTERFACE_PROTOCOL)
+		return;
+
+	ryos = hid_get_drvdata(hdev);
+	if (ryos->roccat_claimed)
+		roccat_disconnect(ryos->chrdev_minor);
+	kfree(ryos);
+}
+
+static int ryos_probe(struct hid_device *hdev,
+		const struct hid_device_id *id)
+{
+	int retval;
+
+	retval = hid_parse(hdev);
+	if (retval) {
+		hid_err(hdev, "parse failed\n");
+		goto exit;
+	}
+
+	retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (retval) {
+		hid_err(hdev, "hw start failed\n");
+		goto exit;
+	}
+
+	retval = ryos_init_specials(hdev);
+	if (retval) {
+		hid_err(hdev, "couldn't install mouse\n");
+		goto exit_stop;
+	}
+
+	return 0;
+
+exit_stop:
+	hid_hw_stop(hdev);
+exit:
+	return retval;
+}
+
+static void ryos_remove(struct hid_device *hdev)
+{
+	ryos_remove_specials(hdev);
+	hid_hw_stop(hdev);
+}
+
+static int ryos_raw_event(struct hid_device *hdev,
+		struct hid_report *report, u8 *data, int size)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	struct roccat_common2_device *ryos = hid_get_drvdata(hdev);
+
+	if (intf->cur_altsetting->desc.bInterfaceProtocol
+			!= RYOS_USB_INTERFACE_PROTOCOL)
+		return 0;
+
+	if (data[0] != RYOS_REPORT_NUMBER_SPECIAL)
+		return 0;
+
+	if (ryos != NULL && ryos->roccat_claimed)
+		roccat_report_event(ryos->chrdev_minor, data);
+
+	return 0;
+}
+
+static const struct hid_device_id ryos_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(hid, ryos_devices);
+
+static struct hid_driver ryos_driver = {
+		.name = "ryos",
+		.id_table = ryos_devices,
+		.probe = ryos_probe,
+		.remove = ryos_remove,
+		.raw_event = ryos_raw_event
+};
+
+static int __init ryos_init(void)
+{
+	int retval;
+
+	ryos_class = class_create(THIS_MODULE, "ryos");
+	if (IS_ERR(ryos_class))
+		return PTR_ERR(ryos_class);
+	ryos_class->dev_groups = ryos_groups;
+
+	retval = hid_register_driver(&ryos_driver);
+	if (retval)
+		class_destroy(ryos_class);
+	return retval;
+}
+
+static void __exit ryos_exit(void)
+{
+	hid_unregister_driver(&ryos_driver);
+	class_destroy(ryos_class);
+}
+
+module_init(ryos_init);
+module_exit(ryos_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Ryos MK/Glow/Pro driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 0332267..6dbf6e0 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -27,98 +27,15 @@
 
 static struct class *savu_class;
 
-static ssize_t savu_sysfs_read(struct file *fp, struct kobject *kobj,
-		char *buf, loff_t off, size_t count,
-		size_t real_size, uint command)
-{
-	struct device *dev =
-			container_of(kobj, struct device, kobj)->parent->parent;
-	struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
-	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
-	int retval;
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x4, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x5, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(general, 0x6, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(buttons, 0x7, 0x2f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x8, 0x0823);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x9, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0xc, 0x04);
 
-	if (off >= real_size)
-		return 0;
-
-	if (off != 0 || count != real_size)
-		return -EINVAL;
-
-	mutex_lock(&savu->savu_lock);
-	retval = roccat_common2_receive(usb_dev, command, buf, real_size);
-	mutex_unlock(&savu->savu_lock);
-
-	return retval ? retval : real_size;
-}
-
-static ssize_t savu_sysfs_write(struct file *fp, struct kobject *kobj,
-		void const *buf, loff_t off, size_t count,
-		size_t real_size, uint command)
-{
-	struct device *dev =
-			container_of(kobj, struct device, kobj)->parent->parent;
-	struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
-	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
-	int retval;
-
-	if (off != 0 || count != real_size)
-		return -EINVAL;
-
-	mutex_lock(&savu->savu_lock);
-	retval = roccat_common2_send_with_status(usb_dev, command,
-			(void *)buf, real_size);
-	mutex_unlock(&savu->savu_lock);
-
-	return retval ? retval : real_size;
-}
-
-#define SAVU_SYSFS_W(thingy, THINGY) \
-static ssize_t savu_sysfs_write_ ## thingy(struct file *fp, \
-		struct kobject *kobj, struct bin_attribute *attr, char *buf, \
-		loff_t off, size_t count) \
-{ \
-	return savu_sysfs_write(fp, kobj, buf, off, count, \
-			SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
-}
-
-#define SAVU_SYSFS_R(thingy, THINGY) \
-static ssize_t savu_sysfs_read_ ## thingy(struct file *fp, \
-		struct kobject *kobj, struct bin_attribute *attr, char *buf, \
-		loff_t off, size_t count) \
-{ \
-	return savu_sysfs_read(fp, kobj, buf, off, count, \
-			SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
-}
-
-#define SAVU_SYSFS_RW(thingy, THINGY) \
-SAVU_SYSFS_W(thingy, THINGY) \
-SAVU_SYSFS_R(thingy, THINGY)
-
-#define SAVU_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-SAVU_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
-	.attr = { .name = #thingy, .mode = 0660 }, \
-	.size = SAVU_SIZE_ ## THINGY, \
-	.read = savu_sysfs_read_ ## thingy, \
-	.write = savu_sysfs_write_ ## thingy \
-}
-
-#define SAVU_BIN_ATTRIBUTE_W(thingy, THINGY) \
-SAVU_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
-	.attr = { .name = #thingy, .mode = 0220 }, \
-	.size = SAVU_SIZE_ ## THINGY, \
-	.write = savu_sysfs_write_ ## thingy \
-}
-
-SAVU_BIN_ATTRIBUTE_W(control, CONTROL);
-SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE);
-SAVU_BIN_ATTRIBUTE_RW(general, GENERAL);
-SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS);
-SAVU_BIN_ATTRIBUTE_RW(macro, MACRO);
-SAVU_BIN_ATTRIBUTE_RW(info, INFO);
-SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR);
-
-static struct bin_attribute *savu_bin_attributes[] = {
+static struct bin_attribute *savu_bin_attrs[] = {
 	&bin_attr_control,
 	&bin_attr_profile,
 	&bin_attr_general,
@@ -130,7 +47,7 @@
 };
 
 static const struct attribute_group savu_group = {
-	.bin_attrs = savu_bin_attributes,
+	.bin_attrs = savu_bin_attrs,
 };
 
 static const struct attribute_group *savu_groups[] = {
@@ -138,19 +55,11 @@
 	NULL,
 };
 
-static int savu_init_savu_device_struct(struct usb_device *usb_dev,
-		struct savu_device *savu)
-{
-	mutex_init(&savu->savu_lock);
-
-	return 0;
-}
-
 static int savu_init_specials(struct hid_device *hdev)
 {
 	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 	struct usb_device *usb_dev = interface_to_usbdev(intf);
-	struct savu_device *savu;
+	struct roccat_common2_device *savu;
 	int retval;
 
 	if (intf->cur_altsetting->desc.bInterfaceProtocol
@@ -166,9 +75,9 @@
 	}
 	hid_set_drvdata(hdev, savu);
 
-	retval = savu_init_savu_device_struct(usb_dev, savu);
+	retval = roccat_common2_device_init_struct(usb_dev, savu);
 	if (retval) {
-		hid_err(hdev, "couldn't init struct savu_device\n");
+		hid_err(hdev, "couldn't init Savu device\n");
 		goto exit_free;
 	}
 
@@ -190,7 +99,7 @@
 static void savu_remove_specials(struct hid_device *hdev)
 {
 	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-	struct savu_device *savu;
+	struct roccat_common2_device *savu;
 
 	if (intf->cur_altsetting->desc.bInterfaceProtocol
 			!= USB_INTERFACE_PROTOCOL_MOUSE)
@@ -239,7 +148,7 @@
 	hid_hw_stop(hdev);
 }
 
-static void savu_report_to_chrdev(struct savu_device const *savu,
+static void savu_report_to_chrdev(struct roccat_common2_device const *savu,
 		u8 const *data)
 {
 	struct savu_roccat_report roccat_report;
@@ -261,7 +170,7 @@
 		struct hid_report *report, u8 *data, int size)
 {
 	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-	struct savu_device *savu = hid_get_drvdata(hdev);
+	struct roccat_common2_device *savu = hid_get_drvdata(hdev);
 
 	if (intf->cur_altsetting->desc.bInterfaceProtocol
 			!= USB_INTERFACE_PROTOCOL_MOUSE)
diff --git a/drivers/hid/hid-roccat-savu.h b/drivers/hid/hid-roccat-savu.h
index 9120ba7..d23217b 100644
--- a/drivers/hid/hid-roccat-savu.h
+++ b/drivers/hid/hid-roccat-savu.h
@@ -14,31 +14,6 @@
 
 #include <linux/types.h>
 
-enum {
-	SAVU_SIZE_CONTROL = 0x03,
-	SAVU_SIZE_PROFILE = 0x03,
-	SAVU_SIZE_GENERAL = 0x10,
-	SAVU_SIZE_BUTTONS = 0x2f,
-	SAVU_SIZE_MACRO = 0x0823,
-	SAVU_SIZE_INFO = 0x08,
-	SAVU_SIZE_SENSOR = 0x04,
-};
-
-enum savu_control_requests {
-	SAVU_CONTROL_REQUEST_GENERAL = 0x80,
-	SAVU_CONTROL_REQUEST_BUTTONS = 0x90,
-};
-
-enum savu_commands {
-	SAVU_COMMAND_CONTROL = 0x4,
-	SAVU_COMMAND_PROFILE = 0x5,
-	SAVU_COMMAND_GENERAL = 0x6,
-	SAVU_COMMAND_BUTTONS = 0x7,
-	SAVU_COMMAND_MACRO = 0x8,
-	SAVU_COMMAND_INFO = 0x9,
-	SAVU_COMMAND_SENSOR = 0xc,
-};
-
 struct savu_mouse_report_special {
 	uint8_t report_number; /* always 3 */
 	uint8_t zero;
@@ -77,11 +52,4 @@
 	uint8_t data[2];
 } __packed;
 
-struct savu_device {
-	int roccat_claimed;
-	int chrdev_minor;
-
-	struct mutex savu_lock;
-};
-
 #endif
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 88fc5ae..a184e19 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -326,7 +326,8 @@
 				field->logical == attr_usage_id) {
 				sensor_hub_fill_attr_info(info, i, report->id,
 					field->unit, field->unit_exponent,
-					field->report_size);
+					field->report_size *
+							field->report_count);
 				ret = 0;
 			} else {
 				for (j = 0; j < field->maxusage; ++j) {
@@ -338,7 +339,8 @@
 							i, report->id,
 							field->unit,
 							field->unit_exponent,
-							field->report_size);
+							field->report_size *
+							field->report_count);
 						ret = 0;
 						break;
 					}
@@ -425,9 +427,10 @@
 		hid_dbg(hdev, "%d collection_index:%x hid:%x sz:%x\n",
 				i, report->field[i]->usage->collection_index,
 				report->field[i]->usage->hid,
-				report->field[i]->report_size/8);
-
-		sz = report->field[i]->report_size/8;
+				(report->field[i]->report_size *
+					report->field[i]->report_count)/8);
+		sz = (report->field[i]->report_size *
+					report->field[i]->report_count)/8;
 		if (pdata->pending.status && pdata->pending.attr_usage_id ==
 				report->field[i]->usage->hid) {
 			hid_dbg(hdev, "data was pending ...\n");
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b18320d..da551d1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -419,21 +419,14 @@
  */
 static int sixaxis_set_operational_usb(struct hid_device *hdev)
 {
-	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-	struct usb_device *dev = interface_to_usbdev(intf);
-	__u16 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
 	int ret;
 	char *buf = kmalloc(18, GFP_KERNEL);
 
 	if (!buf)
 		return -ENOMEM;
 
-	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-				 HID_REQ_GET_REPORT,
-				 USB_DIR_IN | USB_TYPE_CLASS |
-				 USB_RECIP_INTERFACE,
-				 (3 << 8) | 0xf2, ifnum, buf, 17,
-				 USB_CTRL_GET_TIMEOUT);
+	ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT);
+
 	if (ret < 0)
 		hid_err(hdev, "can't set operational mode\n");
 
@@ -621,6 +614,54 @@
 	drv_data->extra = NULL;
 }
 
+#ifdef CONFIG_SONY_FF
+static int sony_play_effect(struct input_dev *dev, void *data,
+			    struct ff_effect *effect)
+{
+	unsigned char buf[] = {
+		0x01,
+		0x00, 0xff, 0x00, 0xff, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x03,
+		0xff, 0x27, 0x10, 0x00, 0x32,
+		0xff, 0x27, 0x10, 0x00, 0x32,
+		0xff, 0x27, 0x10, 0x00, 0x32,
+		0xff, 0x27, 0x10, 0x00, 0x32,
+		0x00, 0x00, 0x00, 0x00, 0x00
+	};
+	__u8 left;
+	__u8 right;
+	struct hid_device *hid = input_get_drvdata(dev);
+
+	if (effect->type != FF_RUMBLE)
+		return 0;
+
+	left = effect->u.rumble.strong_magnitude / 256;
+	right = effect->u.rumble.weak_magnitude ? 1 : 0;
+
+	buf[3] = right;
+	buf[5] = left;
+
+	return hid->hid_output_raw_report(hid, buf, sizeof(buf),
+					  HID_OUTPUT_REPORT);
+}
+
+static int sony_init_ff(struct hid_device *hdev)
+{
+	struct hid_input *hidinput = list_entry(hdev->inputs.next,
+						struct hid_input, list);
+	struct input_dev *input_dev = hidinput->input;
+
+	input_set_capability(input_dev, EV_FF, FF_RUMBLE);
+	return input_ff_create_memless(input_dev, NULL, sony_play_effect);
+}
+
+#else
+static int sony_init_ff(struct hid_device *hdev)
+{
+	return 0;
+}
+#endif
+
 static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
 	int ret;
@@ -670,6 +711,10 @@
 	if (ret < 0)
 		goto err_stop;
 
+	ret = sony_init_ff(hdev);
+	if (ret < 0)
+		goto err_stop;
+
 	return 0;
 err_stop:
 	hid_hw_stop(hdev);
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 71adf9e..6b61f01 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -1655,10 +1655,39 @@
 	ly = (ext[4] & 0xff) | ((ext[5] & 0x0f) << 8);
 	ry = (ext[6] & 0xff) | ((ext[7] & 0x0f) << 8);
 
-	input_report_abs(wdata->extension.input, ABS_X, lx - 0x800);
-	input_report_abs(wdata->extension.input, ABS_Y, ly - 0x800);
-	input_report_abs(wdata->extension.input, ABS_RX, rx - 0x800);
-	input_report_abs(wdata->extension.input, ABS_RY, ry - 0x800);
+	/* zero-point offsets */
+	lx -= 0x800;
+	ly = 0x800 - ly;
+	rx -= 0x800;
+	ry = 0x800 - ry;
+
+	/* Trivial automatic calibration. We don't know any calibration data
+	 * in the EEPROM so we must use the first report to calibrate the
+	 * null-position of the analog sticks. Users can retrigger calibration
+	 * via sysfs, or set it explicitly. If data is off more than abs(500),
+	 * we skip calibration as the sticks are likely to be moved already. */
+	if (!(wdata->state.flags & WIIPROTO_FLAG_PRO_CALIB_DONE)) {
+		wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE;
+		if (abs(lx) < 500)
+			wdata->state.calib_pro_sticks[0] = -lx;
+		if (abs(ly) < 500)
+			wdata->state.calib_pro_sticks[1] = -ly;
+		if (abs(rx) < 500)
+			wdata->state.calib_pro_sticks[2] = -rx;
+		if (abs(ry) < 500)
+			wdata->state.calib_pro_sticks[3] = -ry;
+	}
+
+	/* apply calibration data */
+	lx += wdata->state.calib_pro_sticks[0];
+	ly += wdata->state.calib_pro_sticks[1];
+	rx += wdata->state.calib_pro_sticks[2];
+	ry += wdata->state.calib_pro_sticks[3];
+
+	input_report_abs(wdata->extension.input, ABS_X, lx);
+	input_report_abs(wdata->extension.input, ABS_Y, ly);
+	input_report_abs(wdata->extension.input, ABS_RX, rx);
+	input_report_abs(wdata->extension.input, ABS_RY, ry);
 
 	input_report_key(wdata->extension.input,
 			 wiimod_pro_map[WIIMOD_PRO_KEY_RIGHT],
@@ -1766,12 +1795,70 @@
 	return 0;
 }
 
+static ssize_t wiimod_pro_calib_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *out)
+{
+	struct wiimote_data *wdata = dev_to_wii(dev);
+	int r;
+
+	r = 0;
+	r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[0]);
+	r += sprintf(&out[r], "%+06hd ", wdata->state.calib_pro_sticks[1]);
+	r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[2]);
+	r += sprintf(&out[r], "%+06hd\n", wdata->state.calib_pro_sticks[3]);
+
+	return r;
+}
+
+static ssize_t wiimod_pro_calib_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct wiimote_data *wdata = dev_to_wii(dev);
+	int r;
+	s16 x1, y1, x2, y2;
+
+	if (!strncmp(buf, "scan\n", 5)) {
+		spin_lock_irq(&wdata->state.lock);
+		wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE;
+		spin_unlock_irq(&wdata->state.lock);
+	} else {
+		r = sscanf(buf, "%hd:%hd %hd:%hd", &x1, &y1, &x2, &y2);
+		if (r != 4)
+			return -EINVAL;
+
+		spin_lock_irq(&wdata->state.lock);
+		wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE;
+		spin_unlock_irq(&wdata->state.lock);
+
+		wdata->state.calib_pro_sticks[0] = x1;
+		wdata->state.calib_pro_sticks[1] = y1;
+		wdata->state.calib_pro_sticks[2] = x2;
+		wdata->state.calib_pro_sticks[3] = y2;
+	}
+
+	return strnlen(buf, PAGE_SIZE);
+}
+
+static DEVICE_ATTR(pro_calib, S_IRUGO|S_IWUSR|S_IWGRP, wiimod_pro_calib_show,
+		   wiimod_pro_calib_store);
+
 static int wiimod_pro_probe(const struct wiimod_ops *ops,
 			    struct wiimote_data *wdata)
 {
 	int ret, i;
+	unsigned long flags;
 
 	INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
+	wdata->state.calib_pro_sticks[0] = 0;
+	wdata->state.calib_pro_sticks[1] = 0;
+	wdata->state.calib_pro_sticks[2] = 0;
+	wdata->state.calib_pro_sticks[3] = 0;
+
+	spin_lock_irqsave(&wdata->state.lock, flags);
+	wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE;
+	spin_unlock_irqrestore(&wdata->state.lock, flags);
 
 	wdata->extension.input = input_allocate_device();
 	if (!wdata->extension.input)
@@ -1786,6 +1873,13 @@
 		goto err_free;
 	}
 
+	ret = device_create_file(&wdata->hdev->dev,
+				 &dev_attr_pro_calib);
+	if (ret) {
+		hid_err(wdata->hdev, "cannot create sysfs attribute\n");
+		goto err_free;
+	}
+
 	wdata->extension.input->open = wiimod_pro_open;
 	wdata->extension.input->close = wiimod_pro_close;
 	wdata->extension.input->dev.parent = &wdata->hdev->dev;
@@ -1806,20 +1900,23 @@
 	set_bit(ABS_RX, wdata->extension.input->absbit);
 	set_bit(ABS_RY, wdata->extension.input->absbit);
 	input_set_abs_params(wdata->extension.input,
-			     ABS_X, -0x800, 0x800, 2, 4);
+			     ABS_X, -0x400, 0x400, 4, 100);
 	input_set_abs_params(wdata->extension.input,
-			     ABS_Y, -0x800, 0x800, 2, 4);
+			     ABS_Y, -0x400, 0x400, 4, 100);
 	input_set_abs_params(wdata->extension.input,
-			     ABS_RX, -0x800, 0x800, 2, 4);
+			     ABS_RX, -0x400, 0x400, 4, 100);
 	input_set_abs_params(wdata->extension.input,
-			     ABS_RY, -0x800, 0x800, 2, 4);
+			     ABS_RY, -0x400, 0x400, 4, 100);
 
 	ret = input_register_device(wdata->extension.input);
 	if (ret)
-		goto err_free;
+		goto err_file;
 
 	return 0;
 
+err_file:
+	device_remove_file(&wdata->hdev->dev,
+			   &dev_attr_pro_calib);
 err_free:
 	input_free_device(wdata->extension.input);
 	wdata->extension.input = NULL;
@@ -1837,6 +1934,8 @@
 	input_unregister_device(wdata->extension.input);
 	wdata->extension.input = NULL;
 	cancel_work_sync(&wdata->rumble_worker);
+	device_remove_file(&wdata->hdev->dev,
+			   &dev_attr_pro_calib);
 
 	spin_lock_irqsave(&wdata->state.lock, flags);
 	wiiproto_req_rumble(wdata, 0);
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index 75db0c4..10934aa 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -46,6 +46,7 @@
 #define WIIPROTO_FLAG_DRM_LOCKED	0x8000
 #define WIIPROTO_FLAG_BUILTIN_MP	0x010000
 #define WIIPROTO_FLAG_NO_MP		0x020000
+#define WIIPROTO_FLAG_PRO_CALIB_DONE	0x040000
 
 #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
 					WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
@@ -135,6 +136,7 @@
 
 	/* calibration/cache data */
 	__u16 calib_bboard[4][3];
+	__s16 calib_pro_sticks[4];
 	__u8 cache_rumble;
 };
 
@@ -327,7 +329,7 @@
 static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
 								__u32 opt)
 {
-	INIT_COMPLETION(wdata->state.ready);
+	reinit_completion(&wdata->state.ready);
 	wdata->state.cmd = cmd;
 	wdata->state.opt = opt;
 }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c133619..ae48d18 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -455,10 +455,6 @@
 	}
 
 	list_for_each_entry(report,
-		&hid->report_enum[HID_INPUT_REPORT].report_list, list)
-		i2c_hid_init_report(report, inbuf, ihid->bufsize);
-
-	list_for_each_entry(report,
 		&hid->report_enum[HID_FEATURE_REPORT].report_list, list)
 		i2c_hid_init_report(report, inbuf, ihid->bufsize);
 
@@ -854,10 +850,10 @@
 		0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
 		0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
 	};
-	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object params[4], *obj;
+	union acpi_object params[4];
 	struct acpi_object_list input;
 	struct acpi_device *adev;
+	unsigned long long value;
 	acpi_handle handle;
 
 	handle = ACPI_HANDLE(&client->dev);
@@ -878,22 +874,14 @@
 	params[3].package.count = 0;
 	params[3].package.elements = NULL;
 
-	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+	if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input,
+								&value))) {
 		dev_err(&client->dev, "device _DSM execution failed\n");
 		return -ENODEV;
 	}
 
-	obj = (union acpi_object *)buf.pointer;
-	if (obj->type != ACPI_TYPE_INTEGER) {
-		dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
-			obj->type);
-		kfree(buf.pointer);
-		return -EINVAL;
-	}
+	pdata->hid_descriptor_address = value;
 
-	pdata->hid_descriptor_address = obj->integer.value;
-
-	kfree(buf.pointer);
 	return 0;
 }
 
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 3fca3be..0db9a67 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -84,6 +84,8 @@
 	{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index e0d66b9..a183e48 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -66,7 +66,7 @@
 
 	mutex_lock(&hwmon->lock);
 
-	INIT_COMPLETION(*completion);
+	reinit_completion(completion);
 
 	enable_irq(hwmon->irq);
 	hwmon->cell->enable(to_platform_device(dev));
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index cdff742..4c4c142 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -60,6 +60,11 @@
  * This driver also supports the G781 from GMT. This device is compatible
  * with the ADM1032.
  *
+ * This driver also supports TMP451 from Texas Instruments. This device is
+ * supported in both compatibility and extended mode. It's mostly compatible
+ * with ADT7461 except for local temperature low byte register and max
+ * conversion rate.
+ *
  * Since the LM90 was the first chipset supported by this driver, most
  * comments will refer to this chipset, but are actually general and
  * concern all supported chipsets, unless mentioned otherwise.
@@ -89,6 +94,8 @@
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
 
 /*
  * Addresses to scan
@@ -110,7 +117,7 @@
 	0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
 
 enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
-	max6646, w83l771, max6696, sa56004, g781 };
+	max6646, w83l771, max6696, sa56004, g781, tmp451 };
 
 /*
  * The LM90 registers
@@ -167,6 +174,9 @@
 #define LM90_DEF_CONVRATE_RVAL	6	/* Def conversion rate register value */
 #define LM90_MAX_CONVRATE_MS	16000	/* Maximum conversion rate in ms */
 
+/* TMP451 registers */
+#define TMP451_REG_R_LOCAL_TEMPL	0x15
+
 /*
  * Device flags
  */
@@ -179,6 +189,23 @@
 #define LM90_HAVE_TEMP3		(1 << 6) /* 3rd temperature sensor	*/
 #define LM90_HAVE_BROKEN_ALERT	(1 << 7) /* Broken alert		*/
 
+/* LM90 status */
+#define LM90_STATUS_LTHRM	(1 << 0) /* local THERM limit tripped */
+#define LM90_STATUS_RTHRM	(1 << 1) /* remote THERM limit tripped */
+#define LM90_STATUS_ROPEN	(1 << 2) /* remote is an open circuit */
+#define LM90_STATUS_RLOW	(1 << 3) /* remote low temp limit tripped */
+#define LM90_STATUS_RHIGH	(1 << 4) /* remote high temp limit tripped */
+#define LM90_STATUS_LLOW	(1 << 5) /* local low temp limit tripped */
+#define LM90_STATUS_LHIGH	(1 << 6) /* local high temp limit tripped */
+
+#define MAX6696_STATUS2_R2THRM	(1 << 1) /* remote2 THERM limit tripped */
+#define MAX6696_STATUS2_R2OPEN	(1 << 2) /* remote2 is an open circuit */
+#define MAX6696_STATUS2_R2LOW	(1 << 3) /* remote2 low temp limit tripped */
+#define MAX6696_STATUS2_R2HIGH	(1 << 4) /* remote2 high temp limit tripped */
+#define MAX6696_STATUS2_ROT2	(1 << 5) /* remote emergency limit tripped */
+#define MAX6696_STATUS2_R2OT2	(1 << 6) /* remote2 emergency limit tripped */
+#define MAX6696_STATUS2_LOT2	(1 << 7) /* local emergency limit tripped */
+
 /*
  * Driver data (common to all clients)
  */
@@ -205,6 +232,7 @@
 	{ "nct1008", adt7461 },
 	{ "w83l771", w83l771 },
 	{ "sa56004", sa56004 },
+	{ "tmp451", tmp451 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -278,7 +306,7 @@
 	[max6696] = {
 		.flags = LM90_HAVE_EMERGENCY
 		  | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
-		.alert_alarms = 0x187c,
+		.alert_alarms = 0x1c7c,
 		.max_convrate = 6,
 		.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
 	},
@@ -293,6 +321,43 @@
 		.max_convrate = 9,
 		.reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
 	},
+	[tmp451] = {
+		.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+		  | LM90_HAVE_BROKEN_ALERT,
+		.alert_alarms = 0x7c,
+		.max_convrate = 9,
+		.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
+	}
+};
+
+/*
+ * TEMP8 register index
+ */
+enum lm90_temp8_reg_index {
+	LOCAL_LOW = 0,
+	LOCAL_HIGH,
+	LOCAL_CRIT,
+	REMOTE_CRIT,
+	LOCAL_EMERG,	/* max6659 and max6695/96 */
+	REMOTE_EMERG,	/* max6659 and max6695/96 */
+	REMOTE2_CRIT,	/* max6695/96 only */
+	REMOTE2_EMERG,	/* max6695/96 only */
+	TEMP8_REG_NUM
+};
+
+/*
+ * TEMP11 register index
+ */
+enum lm90_temp11_reg_index {
+	REMOTE_TEMP = 0,
+	REMOTE_LOW,
+	REMOTE_HIGH,
+	REMOTE_OFFSET,	/* except max6646, max6657/58/59, and max6695/96 */
+	LOCAL_TEMP,
+	REMOTE2_TEMP,	/* max6695/96 only */
+	REMOTE2_LOW,	/* max6695/96 only */
+	REMOTE2_HIGH,	/* max6695/96 only */
+	TEMP11_REG_NUM
 };
 
 /*
@@ -302,6 +367,7 @@
 struct lm90_data {
 	struct device *hwmon_dev;
 	struct mutex update_lock;
+	struct regulator *regulator;
 	char valid; /* zero until following fields are valid */
 	unsigned long last_updated; /* in jiffies */
 	int kind;
@@ -317,25 +383,8 @@
 	u8 reg_local_ext;	/* local extension register offset */
 
 	/* registers values */
-	s8 temp8[8];	/* 0: local low limit
-			 * 1: local high limit
-			 * 2: local critical limit
-			 * 3: remote critical limit
-			 * 4: local emergency limit (max6659 and max6695/96)
-			 * 5: remote emergency limit (max6659 and max6695/96)
-			 * 6: remote 2 critical limit (max6695/96 only)
-			 * 7: remote 2 emergency limit (max6695/96 only)
-			 */
-	s16 temp11[8];	/* 0: remote input
-			 * 1: remote low limit
-			 * 2: remote high limit
-			 * 3: remote offset (except max6646, max6657/58/59,
-			 *		     and max6695/96)
-			 * 4: local input
-			 * 5: remote 2 input (max6695/96 only)
-			 * 6: remote 2 low limit (max6695/96 only)
-			 * 7: remote 2 high limit (max6695/96 only)
-			 */
+	s8 temp8[TEMP8_REG_NUM];
+	s16 temp11[TEMP11_REG_NUM];
 	u8 temp_hyst;
 	u16 alarms; /* bitvector (upper 8 bits for max6695/96) */
 };
@@ -477,37 +526,42 @@
 		u8 alarms;
 
 		dev_dbg(&client->dev, "Updating lm90 data.\n");
-		lm90_read_reg(client, LM90_REG_R_LOCAL_LOW, &data->temp8[0]);
-		lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH, &data->temp8[1]);
-		lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT, &data->temp8[2]);
-		lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
+		lm90_read_reg(client, LM90_REG_R_LOCAL_LOW,
+			      &data->temp8[LOCAL_LOW]);
+		lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH,
+			      &data->temp8[LOCAL_HIGH]);
+		lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT,
+			      &data->temp8[LOCAL_CRIT]);
+		lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
+			      &data->temp8[REMOTE_CRIT]);
 		lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
 
 		if (data->reg_local_ext) {
 			lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
 				    data->reg_local_ext,
-				    &data->temp11[4]);
+				    &data->temp11[LOCAL_TEMP]);
 		} else {
 			if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
 					  &h) == 0)
-				data->temp11[4] = h << 8;
+				data->temp11[LOCAL_TEMP] = h << 8;
 		}
 		lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
-			    LM90_REG_R_REMOTE_TEMPL, &data->temp11[0]);
+			    LM90_REG_R_REMOTE_TEMPL,
+			    &data->temp11[REMOTE_TEMP]);
 
 		if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) {
-			data->temp11[1] = h << 8;
+			data->temp11[REMOTE_LOW] = h << 8;
 			if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
 			 && lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL,
 					  &l) == 0)
-				data->temp11[1] |= l;
+				data->temp11[REMOTE_LOW] |= l;
 		}
 		if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) {
-			data->temp11[2] = h << 8;
+			data->temp11[REMOTE_HIGH] = h << 8;
 			if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
 			 && lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL,
 					  &l) == 0)
-				data->temp11[2] |= l;
+				data->temp11[REMOTE_HIGH] |= l;
 		}
 
 		if (data->flags & LM90_HAVE_OFFSET) {
@@ -515,13 +569,13 @@
 					  &h) == 0
 			 && lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL,
 					  &l) == 0)
-				data->temp11[3] = (h << 8) | l;
+				data->temp11[REMOTE_OFFSET] = (h << 8) | l;
 		}
 		if (data->flags & LM90_HAVE_EMERGENCY) {
 			lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG,
-				      &data->temp8[4]);
+				      &data->temp8[LOCAL_EMERG]);
 			lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
-				      &data->temp8[5]);
+				      &data->temp8[REMOTE_EMERG]);
 		}
 		lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
 		data->alarms = alarms;	/* save as 16 bit value */
@@ -529,15 +583,16 @@
 		if (data->kind == max6696) {
 			lm90_select_remote_channel(client, data, 1);
 			lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
-				      &data->temp8[6]);
+				      &data->temp8[REMOTE2_CRIT]);
 			lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
-				      &data->temp8[7]);
+				      &data->temp8[REMOTE2_EMERG]);
 			lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
-				    LM90_REG_R_REMOTE_TEMPL, &data->temp11[5]);
+				    LM90_REG_R_REMOTE_TEMPL,
+				    &data->temp11[REMOTE2_TEMP]);
 			if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h))
-				data->temp11[6] = h << 8;
+				data->temp11[REMOTE2_LOW] = h << 8;
 			if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h))
-				data->temp11[7] = h << 8;
+				data->temp11[REMOTE2_HIGH] = h << 8;
 			lm90_select_remote_channel(client, data, 0);
 
 			if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2,
@@ -709,7 +764,7 @@
 	struct lm90_data *data = lm90_update_device(dev);
 	int temp;
 
-	if (data->kind == adt7461)
+	if (data->kind == adt7461 || data->kind == tmp451)
 		temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
 	else if (data->kind == max6646)
 		temp = temp_from_u8(data->temp8[attr->index]);
@@ -726,7 +781,7 @@
 static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
 			 const char *buf, size_t count)
 {
-	static const u8 reg[8] = {
+	static const u8 reg[TEMP8_REG_NUM] = {
 		LM90_REG_W_LOCAL_LOW,
 		LM90_REG_W_LOCAL_HIGH,
 		LM90_REG_W_LOCAL_CRIT,
@@ -753,7 +808,7 @@
 		val -= 16000;
 
 	mutex_lock(&data->update_lock);
-	if (data->kind == adt7461)
+	if (data->kind == adt7461 || data->kind == tmp451)
 		data->temp8[nr] = temp_to_u8_adt7461(data, val);
 	else if (data->kind == max6646)
 		data->temp8[nr] = temp_to_u8(val);
@@ -775,7 +830,7 @@
 	struct lm90_data *data = lm90_update_device(dev);
 	int temp;
 
-	if (data->kind == adt7461)
+	if (data->kind == adt7461 || data->kind == tmp451)
 		temp = temp_from_u16_adt7461(data, data->temp11[attr->index]);
 	else if (data->kind == max6646)
 		temp = temp_from_u16(data->temp11[attr->index]);
@@ -821,7 +876,7 @@
 		val -= 16000;
 
 	mutex_lock(&data->update_lock);
-	if (data->kind == adt7461)
+	if (data->kind == adt7461 || data->kind == tmp451)
 		data->temp11[index] = temp_to_u16_adt7461(data, val);
 	else if (data->kind == max6646)
 		data->temp11[index] = temp_to_u8(val) << 8;
@@ -850,7 +905,7 @@
 	struct lm90_data *data = lm90_update_device(dev);
 	int temp;
 
-	if (data->kind == adt7461)
+	if (data->kind == adt7461 || data->kind == tmp451)
 		temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
 	else if (data->kind == max6646)
 		temp = temp_from_u8(data->temp8[attr->index]);
@@ -878,12 +933,12 @@
 		return err;
 
 	mutex_lock(&data->update_lock);
-	if (data->kind == adt7461)
-		temp = temp_from_u8_adt7461(data, data->temp8[2]);
+	if (data->kind == adt7461 || data->kind == tmp451)
+		temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]);
 	else if (data->kind == max6646)
-		temp = temp_from_u8(data->temp8[2]);
+		temp = temp_from_u8(data->temp8[LOCAL_CRIT]);
 	else
-		temp = temp_from_s8(data->temp8[2]);
+		temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
 
 	data->temp_hyst = hyst_to_reg(temp - val);
 	i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
@@ -937,25 +992,28 @@
 	return count;
 }
 
-static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL, 0, 4);
-static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL,
+	0, LOCAL_TEMP);
+static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL,
+	0, REMOTE_TEMP);
 static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 0);
+	set_temp8, LOCAL_LOW);
 static SENSOR_DEVICE_ATTR_2(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
-	set_temp11, 0, 1);
+	set_temp11, 0, REMOTE_LOW);
 static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 1);
+	set_temp8, LOCAL_HIGH);
 static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
-	set_temp11, 1, 2);
+	set_temp11, 1, REMOTE_HIGH);
 static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 2);
+	set_temp8, LOCAL_CRIT);
 static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 3);
+	set_temp8, REMOTE_CRIT);
 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temphyst,
-	set_temphyst, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL, 3);
+	set_temphyst, LOCAL_CRIT);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL,
+	REMOTE_CRIT);
 static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
-	set_temp11, 2, 3);
+	set_temp11, 2, REMOTE_OFFSET);
 
 /* Individual alarm files */
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
@@ -1003,13 +1061,13 @@
  * Additional attributes for devices with emergency sensors
  */
 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 4);
+	set_temp8, LOCAL_EMERG);
 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 5);
+	set_temp8, REMOTE_EMERG);
 static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO, show_temphyst,
-			  NULL, 4);
+			  NULL, LOCAL_EMERG);
 static SENSOR_DEVICE_ATTR(temp2_emergency_hyst, S_IRUGO, show_temphyst,
-			  NULL, 5);
+			  NULL, REMOTE_EMERG);
 
 static struct attribute *lm90_emergency_attributes[] = {
 	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
@@ -1039,18 +1097,20 @@
 /*
  * Additional attributes for devices with 3 temperature sensors
  */
-static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL, 0, 5);
+static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL,
+	0, REMOTE2_TEMP);
 static SENSOR_DEVICE_ATTR_2(temp3_min, S_IWUSR | S_IRUGO, show_temp11,
-	set_temp11, 3, 6);
+	set_temp11, 3, REMOTE2_LOW);
 static SENSOR_DEVICE_ATTR_2(temp3_max, S_IWUSR | S_IRUGO, show_temp11,
-	set_temp11, 4, 7);
+	set_temp11, 4, REMOTE2_HIGH);
 static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 6);
-static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL, 6);
+	set_temp8, REMOTE2_CRIT);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL,
+	REMOTE2_CRIT);
 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 7);
+	set_temp8, REMOTE2_EMERG);
 static SENSOR_DEVICE_ATTR(temp3_emergency_hyst, S_IRUGO, show_temphyst,
-			  NULL, 7);
+			  NULL, REMOTE2_EMERG);
 
 static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
 static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 10);
@@ -1306,6 +1366,19 @@
 		 && (config1 & 0x3F) == 0x00
 		 && convrate <= 0x08)
 			name = "g781";
+	} else
+	if (address == 0x4C
+	 && man_id == 0x55) { /* Texas Instruments */
+		int local_ext;
+
+		local_ext = i2c_smbus_read_byte_data(client,
+						     TMP451_REG_R_LOCAL_TEMPL);
+
+		if (chip_id == 0x00 /* TMP451 */
+		 && (config1 & 0x1B) == 0x00
+		 && convrate <= 0x09
+		 && (local_ext & 0x0F) == 0x00)
+			name = "tmp451";
 	}
 
 	if (!name) { /* identification failed */
@@ -1367,7 +1440,7 @@
 	data->config_orig = config;
 
 	/* Check Temperature Range Select */
-	if (data->kind == adt7461) {
+	if (data->kind == adt7461 || data->kind == tmp451) {
 		if (config & 0x04)
 			data->flags |= LM90_FLAG_ADT7461_EXT;
 	}
@@ -1391,14 +1464,74 @@
 		i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
 }
 
+static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
+{
+	struct lm90_data *data = i2c_get_clientdata(client);
+	u8 st, st2 = 0;
+
+	lm90_read_reg(client, LM90_REG_R_STATUS, &st);
+
+	if (data->kind == max6696)
+		lm90_read_reg(client, MAX6696_REG_R_STATUS2, &st2);
+
+	*status = st | (st2 << 8);
+
+	if ((st & 0x7f) == 0 && (st2 & 0xfe) == 0)
+		return false;
+
+	if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) ||
+	    (st2 & MAX6696_STATUS2_LOT2))
+		dev_warn(&client->dev,
+			 "temp%d out of range, please check!\n", 1);
+	if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) ||
+	    (st2 & MAX6696_STATUS2_ROT2))
+		dev_warn(&client->dev,
+			 "temp%d out of range, please check!\n", 2);
+	if (st & LM90_STATUS_ROPEN)
+		dev_warn(&client->dev,
+			 "temp%d diode open, please check!\n", 2);
+	if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH |
+		   MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2))
+		dev_warn(&client->dev,
+			 "temp%d out of range, please check!\n", 3);
+	if (st2 & MAX6696_STATUS2_R2OPEN)
+		dev_warn(&client->dev,
+			 "temp%d diode open, please check!\n", 3);
+
+	return true;
+}
+
+static irqreturn_t lm90_irq_thread(int irq, void *dev_id)
+{
+	struct i2c_client *client = dev_id;
+	u16 status;
+
+	if (lm90_is_tripped(client, &status))
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
+}
+
 static int lm90_probe(struct i2c_client *client,
 		      const struct i2c_device_id *id)
 {
 	struct device *dev = &client->dev;
 	struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
 	struct lm90_data *data;
+	struct regulator *regulator;
 	int err;
 
+	regulator = devm_regulator_get(dev, "vcc");
+	if (IS_ERR(regulator))
+		return PTR_ERR(regulator);
+
+	err = regulator_enable(regulator);
+	if (err < 0) {
+		dev_err(&client->dev,
+			"Failed to enable regulator: %d\n", err);
+		return err;
+	}
+
 	data = devm_kzalloc(&client->dev, sizeof(struct lm90_data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
@@ -1406,6 +1539,8 @@
 	i2c_set_clientdata(client, data);
 	mutex_init(&data->update_lock);
 
+	data->regulator = regulator;
+
 	/* Set the device type */
 	data->kind = id->driver_data;
 	if (data->kind == adm1032) {
@@ -1467,12 +1602,26 @@
 		goto exit_remove_files;
 	}
 
+	if (client->irq) {
+		dev_dbg(dev, "IRQ: %d\n", client->irq);
+		err = devm_request_threaded_irq(dev, client->irq,
+						NULL, lm90_irq_thread,
+						IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+						"lm90", client);
+		if (err < 0) {
+			dev_err(dev, "cannot request IRQ %d\n", client->irq);
+			goto exit_remove_files;
+		}
+	}
+
 	return 0;
 
 exit_remove_files:
 	lm90_remove_files(client, data);
 exit_restore:
 	lm90_restore_conf(client, data);
+	regulator_disable(data->regulator);
+
 	return err;
 }
 
@@ -1483,49 +1632,33 @@
 	hwmon_device_unregister(data->hwmon_dev);
 	lm90_remove_files(client, data);
 	lm90_restore_conf(client, data);
+	regulator_disable(data->regulator);
 
 	return 0;
 }
 
 static void lm90_alert(struct i2c_client *client, unsigned int flag)
 {
-	struct lm90_data *data = i2c_get_clientdata(client);
-	u8 config, alarms, alarms2 = 0;
+	u16 alarms;
 
-	lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
-
-	if (data->kind == max6696)
-		lm90_read_reg(client, MAX6696_REG_R_STATUS2, &alarms2);
-
-	if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
-		dev_info(&client->dev, "Everything OK\n");
-	} else {
-		if (alarms & 0x61)
-			dev_warn(&client->dev,
-				 "temp%d out of range, please check!\n", 1);
-		if (alarms & 0x1a)
-			dev_warn(&client->dev,
-				 "temp%d out of range, please check!\n", 2);
-		if (alarms & 0x04)
-			dev_warn(&client->dev,
-				 "temp%d diode open, please check!\n", 2);
-
-		if (alarms2 & 0x18)
-			dev_warn(&client->dev,
-				 "temp%d out of range, please check!\n", 3);
-
+	if (lm90_is_tripped(client, &alarms)) {
 		/*
 		 * Disable ALERT# output, because these chips don't implement
 		 * SMBus alert correctly; they should only hold the alert line
 		 * low briefly.
 		 */
+		struct lm90_data *data = i2c_get_clientdata(client);
+
 		if ((data->flags & LM90_HAVE_BROKEN_ALERT)
 		 && (alarms & data->alert_alarms)) {
+			u8 config;
 			dev_dbg(&client->dev, "Disabling ALERT#\n");
 			lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
 			i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
 						  config | 0x80);
 		}
+	} else {
+		dev_info(&client->dev, "Everything OK\n");
 	}
 }
 
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index fd05930..8edba9d 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -371,7 +371,7 @@
 	dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
 		(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
 
-	INIT_COMPLETION(dev->cmd_complete);
+	reinit_completion(&dev->cmd_complete);
 	dev->transfer_status = 0;
 
 	if (!dev->buf_len) {
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ea4b08f..d7e8600 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -151,7 +151,7 @@
 
 	i2c_dev->msg_buf = msg->buf;
 	i2c_dev->msg_buf_remaining = msg->len;
-	INIT_COMPLETION(i2c_dev->completion);
+	reinit_completion(&i2c_dev->completion);
 
 	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
 
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 132369f..960dec6 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -323,7 +323,7 @@
 
 	davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len);
 
-	INIT_COMPLETION(dev->cmd_complete);
+	reinit_completion(&dev->cmd_complete);
 	dev->cmd_err = 0;
 
 	/* Take I2C out of reset and configure it as master */
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 5888fee..e89e3e2 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -613,7 +613,7 @@
 	mutex_lock(&dev->lock);
 	pm_runtime_get_sync(dev->dev);
 
-	INIT_COMPLETION(dev->cmd_complete);
+	reinit_completion(&dev->cmd_complete);
 	dev->msgs = msgs;
 	dev->msgs_num = num;
 	dev->cmd_err = 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 1672eff..0043ede 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -541,7 +541,7 @@
 		desc->dptr_high = upper_32_bits(dma_addr);
 	}
 
-	INIT_COMPLETION(priv->cmp);
+	reinit_completion(&priv->cmp);
 
 	/* Add the descriptor */
 	ismt_submit_desc(priv);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index b7c8577..3aedd86 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -505,7 +505,7 @@
 				return err;
 		}
 	} else {
-		INIT_COMPLETION(i2c->cmd_complete);
+		reinit_completion(&i2c->cmd_complete);
 		ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
 		if (ret)
 			return ret;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 9967a6f..a6a891d 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -543,7 +543,7 @@
 	w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
 	omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w);
 
-	INIT_COMPLETION(dev->cmd_complete);
+	reinit_completion(&dev->cmd_complete);
 	dev->cmd_err = 0;
 
 	w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index c457cb4..e661ede 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -544,7 +544,7 @@
 	i2c_dev->msg_buf_remaining = msg->len;
 	i2c_dev->msg_err = I2C_ERR_NONE;
 	i2c_dev->msg_read = (msg->flags & I2C_M_RD);
-	INIT_COMPLETION(i2c_dev->msg_complete);
+	reinit_completion(&i2c_dev->msg_complete);
 
 	packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
 			PACKET_HEADER0_PROTOCOL_I2C |
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index c65da3d..31395fa 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -158,7 +158,7 @@
 		writew(val, i2c_dev->base + REG_CR);
 	}
 
-	INIT_COMPLETION(i2c_dev->complete);
+	reinit_completion(&i2c_dev->complete);
 
 	if (i2c_dev->mode == I2C_MODE_STANDARD)
 		tcr_val = TCR_STANDARD_MODE;
@@ -247,7 +247,7 @@
 		writew(val, i2c_dev->base + REG_CR);
 	}
 
-	INIT_COMPLETION(i2c_dev->complete);
+	reinit_completion(&i2c_dev->complete);
 
 	if (i2c_dev->mode == I2C_MODE_STANDARD)
 		tcr_val = TCR_STANDARD_MODE;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 3be58f8..75ba860 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -254,10 +254,12 @@
 					client->flags & I2C_CLIENT_WAKE);
 	dev_dbg(dev, "probe\n");
 
+	acpi_dev_pm_attach(&client->dev, true);
 	status = driver->probe(client, i2c_match_id(driver->id_table, client));
 	if (status) {
 		client->driver = NULL;
 		i2c_set_clientdata(client, NULL);
+		acpi_dev_pm_detach(&client->dev, true);
 	}
 	return status;
 }
@@ -283,6 +285,7 @@
 		client->driver = NULL;
 		i2c_set_clientdata(client, NULL);
 	}
+	acpi_dev_pm_detach(&client->dev, true);
 	return status;
 }
 
@@ -1111,8 +1114,10 @@
 	if (ret < 0 || !info.addr)
 		return AE_OK;
 
+	adev->power.flags.ignore_parent = true;
 	strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
 	if (!i2c_new_device(adapter, &info)) {
+		adev->power.flags.ignore_parent = false;
 		dev_err(&adapter->dev,
 			"failed to add I2C device %s from ACPI\n",
 			dev_name(&adev->dev));
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
index 24214ab..de9185d 100644
--- a/drivers/ide/cs5536.c
+++ b/drivers/ide/cs5536.c
@@ -295,15 +295,7 @@
 	.resume		= ide_pci_resume,
 };
 
-static int __init cs5536_init(void)
-{
-	return pci_register_driver(&cs5536_pci_driver);
-}
-
-static void __exit cs5536_exit(void)
-{
-	pci_unregister_driver(&cs5536_pci_driver);
-}
+module_pci_driver(cs5536_pci_driver);
 
 MODULE_AUTHOR("Martin K. Petersen, Bartlomiej Zolnierkiewicz");
 MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
@@ -312,6 +304,3 @@
 
 module_param_named(msr, use_msr, int, 0644);
 MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
-
-module_init(cs5536_init);
-module_exit(cs5536_exit);
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index bf83d7b..2db803c 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -416,8 +416,7 @@
 static void pmac_ide_apply_timings(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 
 	if (drive->dn & 1)
 		writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
@@ -434,8 +433,7 @@
 static void pmac_ide_kauai_apply_timings(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 
 	if (drive->dn & 1) {
 		writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
@@ -454,8 +452,7 @@
 pmac_ide_do_update_timings(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 
 	if (pmif->kind == controller_sh_ata6 ||
 	    pmif->kind == controller_un_ata6 ||
@@ -500,8 +497,7 @@
  */
 static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 {
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	const u8 pio = drive->pio_mode - XFER_PIO_0;
 	struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
 	u32 *timings, t;
@@ -781,8 +777,7 @@
 
 static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 {
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	int ret = 0;
 	u32 *timings, *timings2, tl[2];
 	u8 unit = drive->dn & 1;
@@ -919,8 +914,7 @@
 
 static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
 {
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	struct device_node *np = pmif->node;
 	const char *cable = of_get_property(np, "cable-type", NULL);
 	struct device_node *root = of_find_node_by_path("/");
@@ -951,8 +945,7 @@
 static void pmac_ide_init_dev(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 
 	if (on_media_bay(pmif)) {
 		if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
@@ -1228,8 +1221,7 @@
 static int
 pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
 {
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
 	int rc = 0;
 
 	if (mesg.event != mdev->ofdev.dev.power.power_state.event
@@ -1245,8 +1237,7 @@
 static int
 pmac_ide_macio_resume(struct macio_dev *mdev)
 {
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
 	int rc = 0;
 
 	if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
@@ -1318,7 +1309,6 @@
 	rc = pmac_ide_setup_device(pmif, &hw);
 	if (rc != 0) {
 		/* The inteface is released to the common IDE layer */
-		pci_set_drvdata(pdev, NULL);
 		iounmap(base);
 		pci_release_regions(pdev);
 		kfree(pmif);
@@ -1365,8 +1355,7 @@
 #ifdef CONFIG_PMAC_MEDIABAY
 static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
 {
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
 
 	switch(mb_state) {
 	case MB_CD:
@@ -1468,8 +1457,7 @@
 static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	struct dbdma_cmd *table;
 	volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
 	struct scatterlist *sg;
@@ -1546,8 +1534,7 @@
 static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4);
 	u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
 
@@ -1572,8 +1559,7 @@
 pmac_ide_dma_start(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	volatile struct dbdma_regs __iomem *dma;
 
 	dma = pmif->dma_regs;
@@ -1590,8 +1576,7 @@
 pmac_ide_dma_end (ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
 	u32 dstat;
 
@@ -1615,8 +1600,7 @@
 pmac_ide_dma_test_irq (ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
 	unsigned long status, timeout;
 
@@ -1670,8 +1654,7 @@
 pmac_ide_dma_lost_irq (ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
 	unsigned long status = readl(&dma->status);
 
@@ -1693,8 +1676,7 @@
  */
 static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
 {
-	pmac_ide_hwif_t *pmif =
-		(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
 	struct pci_dev *dev = to_pci_dev(hwif->dev);
 
 	/* We won't need pci_dev if we switch to generic consistent
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f116d66..3226ce9 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -123,7 +123,7 @@
  * which is also the index into the MWAIT hint array.
  * Thus C0 is a dummy.
  */
-static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
 	{
 		.name = "C1-NHM",
 		.desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@
 		.enter = NULL }
 };
 
-static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
 	{
 		.name = "C1-SNB",
 		.desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@
 		.enter = NULL }
 };
 
-static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
 	{
 		.name = "C1-IVB",
 		.desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@
 		.enter = NULL }
 };
 
-static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
 	{
 		.name = "C1-HSW",
 		.desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@
 		.enter = NULL }
 };
 
-static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
 	{
 		.name = "C1E-ATM",
 		.desc = "MWAIT 0x00",
@@ -390,7 +390,7 @@
 	int hotcpu = (unsigned long)hcpu;
 	struct cpuidle_device *dev;
 
-	switch (action & 0xf) {
+	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
 
 		if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
@@ -490,7 +490,7 @@
 /*
  * intel_idle_probe()
  */
-static int intel_idle_probe(void)
+static int __init intel_idle_probe(void)
 {
 	unsigned int eax, ebx, ecx;
 	const struct x86_cpu_id *id;
@@ -558,7 +558,7 @@
  * intel_idle_cpuidle_driver_init()
  * allocate, initialize cpuidle_states
  */
-static int intel_idle_cpuidle_driver_init(void)
+static int __init intel_idle_cpuidle_driver_init(void)
 {
 	int cstate;
 	struct cpuidle_driver *drv = &intel_idle_driver;
@@ -628,7 +628,7 @@
 		int num_substates, mwait_hint, mwait_cstate, mwait_substate;
 
 		if (cpuidle_state_table[cstate].enter == NULL)
-			continue;
+			break;
 
 		if (cstate + 1 > max_cstate) {
 			printk(PREFIX "max_cstate %d reached\n", max_cstate);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index e6fbd3e..9a4e0e3 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -188,7 +188,7 @@
 
 	spi_bus_lock(sigma_delta->spi->master);
 	sigma_delta->bus_locked = true;
-	INIT_COMPLETION(sigma_delta->completion);
+	reinit_completion(&sigma_delta->completion);
 
 	ret = ad_sigma_delta_set_mode(sigma_delta, mode);
 	if (ret < 0)
@@ -259,7 +259,7 @@
 
 	spi_bus_lock(sigma_delta->spi->master);
 	sigma_delta->bus_locked = true;
-	INIT_COMPLETION(sigma_delta->completion);
+	reinit_completion(&sigma_delta->completion);
 
 	ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
 
@@ -343,7 +343,7 @@
 {
 	struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
 
-	INIT_COMPLETION(sigma_delta->completion);
+	reinit_completion(&sigma_delta->completion);
 	wait_for_completion_timeout(&sigma_delta->completion, HZ);
 
 	if (!sigma_delta->irq_dis) {
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index 54c5bab..e525aa6 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -190,7 +190,7 @@
 	struct nau7802_state *st = iio_priv(indio_dev);
 	int ret;
 
-	INIT_COMPLETION(st->value_ok);
+	reinit_completion(&st->value_ok);
 	enable_irq(st->client->irq);
 
 	nau7802_sync(st);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index dac15b9..c10eab6 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -56,7 +56,7 @@
 		ev.id = ev_code;
 		ev.timestamp = timestamp;
 
-		copied = kfifo_put(&ev_int->det_events, &ev);
+		copied = kfifo_put(&ev_int->det_events, ev);
 		if (copied != 0)
 			wake_up_locked_poll(&ev_int->wait, POLLIN);
 	}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index dab4b41..d2172e7 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1848,6 +1848,26 @@
 	return 0;
 }
 
+static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+{
+	int prio;
+	struct net_device *dev;
+
+	prio = rt_tos2priority(tos);
+	dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
+		vlan_dev_real_dev(ndev) : ndev;
+
+	if (dev->num_tc)
+		return netdev_get_prio_tc_map(dev, prio);
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+	if (ndev->priv_flags & IFF_802_1Q_VLAN)
+		return (vlan_dev_get_egress_qos_mask(ndev, prio) &
+			VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+#endif
+	return 0;
+}
+
 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
 {
 	struct rdma_route *route = &id_priv->id.route;
@@ -1888,11 +1908,7 @@
 	route->path_rec->reversible = 1;
 	route->path_rec->pkey = cpu_to_be16(0xffff);
 	route->path_rec->mtu_selector = IB_SA_EQ;
-	route->path_rec->sl = netdev_get_prio_tc_map(
-			ndev->priv_flags & IFF_802_1Q_VLAN ?
-				vlan_dev_real_dev(ndev) : ndev,
-			rt_tos2priority(id_priv->tos));
-
+	route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
 	route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
 	route->path_rec->rate_selector = IB_SA_EQ;
 	route->path_rec->rate = iboe_get_rate(ndev);
@@ -2294,7 +2310,7 @@
 	int low, high, remaining;
 	unsigned int rover;
 
-	inet_get_local_port_range(&low, &high);
+	inet_get_local_port_range(&init_net, &low, &high);
 	remaining = (high - low) + 1;
 	rover = net_random() % remaining + low;
 retry:
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f061264..6a0a0d2 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -177,18 +177,18 @@
 
 	props->max_mr_size	   = ~0ull;
 	props->page_size_cap	   = dev->dev->caps.page_size_cap;
-	props->max_qp		   = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
+	props->max_qp		   = dev->dev->quotas.qp;
 	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
 	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
 					 dev->dev->caps.max_rq_sg);
-	props->max_cq		   = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
+	props->max_cq		   = dev->dev->quotas.cq;
 	props->max_cqe		   = dev->dev->caps.max_cqes;
-	props->max_mr		   = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
+	props->max_mr		   = dev->dev->quotas.mpt;
 	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
 	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
 	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
-	props->max_srq		   = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
+	props->max_srq		   = dev->dev->quotas.srq;
 	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
 	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
 	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
@@ -526,7 +526,6 @@
 	if (IS_ERR(mailbox))
 		return 0;
 
-	memset(mailbox->buf, 0, 256);
 	memcpy(mailbox->buf, props->node_desc, 64);
 	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
 		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -547,8 +546,6 @@
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
-	memset(mailbox->buf, 0, 256);
-
 	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
 		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
 		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
@@ -879,8 +876,6 @@
 	struct mlx4_ib_dev *mdev = to_mdev(qp->device);
 	struct mlx4_cmd_mailbox *mailbox;
 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
-	size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
-			   (sizeof(struct _rule_hw) * flow_attr->num_of_specs);
 
 	static const u16 __mlx4_domain[] = {
 		[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
@@ -905,7 +900,6 @@
 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
-	memset(mailbox->buf, 0, rule_size);
 	ctrl = mailbox->buf;
 
 	ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index f247fc6e..c61e2a9 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -456,13 +456,13 @@
 
 	spin_lock(&tmp->d_lock);
 	if (!(d_unhashed(tmp) && tmp->d_inode)) {
-		dget_dlock(tmp);
 		__d_drop(tmp);
 		spin_unlock(&tmp->d_lock);
 		simple_unlink(parent->d_inode, tmp);
 	} else {
 		spin_unlock(&tmp->d_lock);
 	}
+	dput(tmp);
 
 	ret = 0;
 bail:
@@ -491,6 +491,7 @@
 		goto bail;
 	}
 
+	mutex_lock(&dir->d_inode->i_mutex);
 	remove_file(dir, "counters");
 	remove_file(dir, "counter_names");
 	remove_file(dir, "portcounter_names");
@@ -505,8 +506,10 @@
 		}
 	}
 	remove_file(dir, "flash");
-	d_delete(dir);
+	mutex_unlock(&dir->d_inode->i_mutex);
 	ret = simple_rmdir(root->d_inode, dir);
+	d_delete(dir);
+	dput(dir);
 
 bail:
 	mutex_unlock(&root->d_inode->i_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 3f14009..c8d9c4a 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -51,8 +51,8 @@
  * file calls, even though this violates some
  * expectations of harmlessness.
  */
-static int qib_tune_pcie_caps(struct qib_devdata *);
-static int qib_tune_pcie_coalesce(struct qib_devdata *);
+static void qib_tune_pcie_caps(struct qib_devdata *);
+static void qib_tune_pcie_coalesce(struct qib_devdata *);
 
 /*
  * Do all the common PCIe setup and initialization.
@@ -476,30 +476,6 @@
 			"pci_enable_device failed after reset: %d\n", r);
 }
 
-/* code to adjust PCIe capabilities. */
-
-static int fld2val(int wd, int mask)
-{
-	int lsbmask;
-
-	if (!mask)
-		return 0;
-	wd &= mask;
-	lsbmask = mask ^ (mask & (mask - 1));
-	wd /= lsbmask;
-	return wd;
-}
-
-static int val2fld(int wd, int mask)
-{
-	int lsbmask;
-
-	if (!mask)
-		return 0;
-	lsbmask = mask ^ (mask & (mask - 1));
-	wd *= lsbmask;
-	return wd;
-}
 
 static int qib_pcie_coalesce;
 module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
@@ -511,7 +487,7 @@
  * of these chipsets, with some BIOS settings, and enabling it on those
  * systems may result in the system crashing, and/or data corruption.
  */
-static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
+static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
 {
 	int r;
 	struct pci_dev *parent;
@@ -519,18 +495,18 @@
 	u32 mask, bits, val;
 
 	if (!qib_pcie_coalesce)
-		return 0;
+		return;
 
 	/* Find out supported and configured values for parent (root) */
 	parent = dd->pcidev->bus->self;
 	if (parent->bus->parent) {
 		qib_devinfo(dd->pcidev, "Parent not root\n");
-		return 1;
+		return;
 	}
 	if (!pci_is_pcie(parent))
-		return 1;
+		return;
 	if (parent->vendor != 0x8086)
-		return 1;
+		return;
 
 	/*
 	 *  - bit 12: Max_rdcmp_Imt_EN: need to set to 1
@@ -563,13 +539,12 @@
 		mask = (3U << 24) | (7U << 10);
 	} else {
 		/* not one of the chipsets that we know about */
-		return 1;
+		return;
 	}
 	pci_read_config_dword(parent, 0x48, &val);
 	val &= ~mask;
 	val |= bits;
 	r = pci_write_config_dword(parent, 0x48, val);
-	return 0;
 }
 
 /*
@@ -580,55 +555,44 @@
 module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
 MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
 
-static int qib_tune_pcie_caps(struct qib_devdata *dd)
+static void qib_tune_pcie_caps(struct qib_devdata *dd)
 {
-	int ret = 1; /* Assume the worst */
 	struct pci_dev *parent;
-	u16 pcaps, pctl, ecaps, ectl;
-	int rc_sup, ep_sup;
-	int rc_cur, ep_cur;
+	u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
+	u16 rc_mrrs, ep_mrrs, max_mrrs;
 
 	/* Find out supported and configured values for parent (root) */
 	parent = dd->pcidev->bus->self;
-	if (parent->bus->parent) {
+	if (!pci_is_root_bus(parent->bus)) {
 		qib_devinfo(dd->pcidev, "Parent not root\n");
-		goto bail;
+		return;
 	}
 
 	if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
-		goto bail;
-	pcie_capability_read_word(parent, PCI_EXP_DEVCAP, &pcaps);
-	pcie_capability_read_word(parent, PCI_EXP_DEVCTL, &pctl);
+		return;
+
+	rc_mpss = parent->pcie_mpss;
+	rc_mps = ffs(pcie_get_mps(parent)) - 8;
 	/* Find out supported and configured values for endpoint (us) */
-	pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCAP, &ecaps);
-	pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+	ep_mpss = dd->pcidev->pcie_mpss;
+	ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
 
-	ret = 0;
 	/* Find max payload supported by root, endpoint */
-	rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
-	ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
-	if (rc_sup > ep_sup)
-		rc_sup = ep_sup;
-
-	rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
-	ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
+	if (rc_mpss > ep_mpss)
+		rc_mpss = ep_mpss;
 
 	/* If Supported greater than limit in module param, limit it */
-	if (rc_sup > (qib_pcie_caps & 7))
-		rc_sup = qib_pcie_caps & 7;
+	if (rc_mpss > (qib_pcie_caps & 7))
+		rc_mpss = qib_pcie_caps & 7;
 	/* If less than (allowed, supported), bump root payload */
-	if (rc_sup > rc_cur) {
-		rc_cur = rc_sup;
-		pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
-			val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
-		pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
+	if (rc_mpss > rc_mps) {
+		rc_mps = rc_mpss;
+		pcie_set_mps(parent, 128 << rc_mps);
 	}
 	/* If less than (allowed, supported), bump endpoint payload */
-	if (rc_sup > ep_cur) {
-		ep_cur = rc_sup;
-		ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
-			val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
-		pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
+	if (rc_mpss > ep_mps) {
+		ep_mps = rc_mpss;
+		pcie_set_mps(dd->pcidev, 128 << ep_mps);
 	}
 
 	/*
@@ -636,26 +600,22 @@
 	 * No field for max supported, but PCIe spec limits it to 4096,
 	 * which is code '5' (log2(4096) - 7)
 	 */
-	rc_sup = 5;
-	if (rc_sup > ((qib_pcie_caps >> 4) & 7))
-		rc_sup = (qib_pcie_caps >> 4) & 7;
-	rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
-	ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
+	max_mrrs = 5;
+	if (max_mrrs > ((qib_pcie_caps >> 4) & 7))
+		max_mrrs = (qib_pcie_caps >> 4) & 7;
 
-	if (rc_sup > rc_cur) {
-		rc_cur = rc_sup;
-		pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
-			val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
-		pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
+	max_mrrs = 128 << max_mrrs;
+	rc_mrrs = pcie_get_readrq(parent);
+	ep_mrrs = pcie_get_readrq(dd->pcidev);
+
+	if (max_mrrs > rc_mrrs) {
+		rc_mrrs = max_mrrs;
+		pcie_set_readrq(parent, rc_mrrs);
 	}
-	if (rc_sup > ep_cur) {
-		ep_cur = rc_sup;
-		ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
-			val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
-		pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
+	if (max_mrrs > ep_mrrs) {
+		ep_mrrs = max_mrrs;
+		pcie_set_readrq(dd->pcidev, ep_mrrs);
 	}
-bail:
-	return ret;
 }
 /* End of PCIe capability tuning */
 
diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig
index ce3fd32..02f9759 100644
--- a/drivers/infiniband/ulp/isert/Kconfig
+++ b/drivers/infiniband/ulp/isert/Kconfig
@@ -1,5 +1,5 @@
 config INFINIBAND_ISERT
-	tristate "iSCSI Extentions for RDMA (iSER) target support"
+	tristate "iSCSI Extensions for RDMA (iSER) target support"
 	depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
 	---help---
-	Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics.
+	Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics.
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 38b523a..a11ff74 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -80,7 +80,7 @@
 comment "Userland interfaces"
 
 config INPUT_MOUSEDEV
-	tristate "Mouse interface" if EXPERT
+	tristate "Mouse interface"
 	default y
 	help
 	  Say Y here if you want your mouse to be accessible as char devices
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index b6ded17..a06e125 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -18,6 +18,8 @@
 #include <linux/poll.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/input/mt.h>
@@ -369,7 +371,11 @@
 	mutex_unlock(&evdev->mutex);
 
 	evdev_detach_client(evdev, client);
-	kfree(client);
+
+	if (is_vmalloc_addr(client))
+		vfree(client);
+	else
+		kfree(client);
 
 	evdev_close_device(evdev);
 
@@ -389,12 +395,14 @@
 {
 	struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
 	unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
+	unsigned int size = sizeof(struct evdev_client) +
+					bufsize * sizeof(struct input_event);
 	struct evdev_client *client;
 	int error;
 
-	client = kzalloc(sizeof(struct evdev_client) +
-				bufsize * sizeof(struct input_event),
-			 GFP_KERNEL);
+	client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+	if (!client)
+		client = vzalloc(size);
 	if (!client)
 		return -ENOMEM;
 
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e75d015..846ccdd 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2052,7 +2052,7 @@
 	if (dev->hint_events_per_packet < packet_size)
 		dev->hint_events_per_packet = packet_size;
 
-	dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2;
+	dev->max_vals = dev->hint_events_per_packet + 2;
 	dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
 	if (!dev->vals) {
 		error = -ENOMEM;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index c1edd39..bb174c1 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
 # Input core configuration
 #
 menuconfig INPUT_KEYBOARD
-	bool "Keyboards" if EXPERT || !X86
+	bool "Keyboards"
 	default y
 	help
 	  Say Y here, and a list of supported keyboards will be displayed.
@@ -67,7 +67,7 @@
 	  module will be called atakbd.
 
 config KEYBOARD_ATKBD
-	tristate "AT keyboard" if EXPERT || !X86
+	tristate "AT keyboard"
 	default y
 	select SERIO
 	select SERIO_LIBPS2
@@ -525,7 +525,7 @@
 
 config KEYBOARD_SH_KEYSC
 	tristate "SuperH KEYSC keypad support"
-	depends on SUPERH || ARCH_SHMOBILE
+	depends on SUPERH || ARM || COMPILE_TEST
 	help
 	  Say Y here if you want to use a keypad attached to the KEYSC block
 	  on SuperH processors such as sh7722 and sh7343.
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 440ce32..2db1324 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -26,6 +26,7 @@
 #include <linux/gpio_keys.h>
 #include <linux/workqueue.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 #include <linux/spinlock.h>
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index cd5ed9e..4e42819 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/gpio_keys.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 4218143..8b1b013 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -383,7 +383,7 @@
 		.name	= DRV_NAME,
 		.owner	= THIS_MODULE,
 		.pm	= &lpc32xx_kscan_pm_ops,
-		.of_match_table = of_match_ptr(lpc32xx_kscan_match),
+		.of_match_table = lpc32xx_kscan_match,
 	}
 };
 
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index b3e3eda..b310649 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -143,8 +143,10 @@
 		return error;
 
 	error = nspire_keypad_chip_init(keypad);
-	if (error)
+	if (error) {
+		clk_disable_unprepare(keypad->clk);
 		return error;
+	}
 
 	return 0;
 }
@@ -267,7 +269,7 @@
 	.driver = {
 		.name = "nspire-keypad",
 		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(nspire_keypad_dt_match),
+		.of_match_table = nspire_keypad_dt_match,
 	},
 	.probe = nspire_keypad_probe,
 };
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index a2e758d..186138c 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 9cd20e6..8508879 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -614,7 +614,7 @@
 	unsigned int keymap_rows;
 	const struct of_device_id *match;
 
-	match = of_match_device(of_match_ptr(tegra_kbc_of_match), &pdev->dev);
+	match = of_match_device(tegra_kbc_of_match, &pdev->dev);
 
 	kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
 	if (!kbc) {
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 5f7b427..8bd24d5 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -60,8 +60,8 @@
 	struct clk			*clk;
 	struct device			*dev;
 	spinlock_t			lock;
-	u32				irq_press;
-	u32				irq_release;
+	int				irq_press;
+	int				irq_release;
 	int				rows, cols, row_shift;
 	int				debounce_ms, active_low;
 	u32				prev_keys[3];
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index aa51baa..5f4967d 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -156,7 +156,7 @@
 
 config INPUT_MAX8997_HAPTIC
 	tristate "MAXIM MAX8997 haptic controller support"
-	depends on HAVE_PWM && MFD_MAX8997
+	depends on PWM && HAVE_PWM && MFD_MAX8997
 	select INPUT_FF_MEMLESS
 	help
 	  This option enables device driver support for the haptic controller
@@ -461,7 +461,7 @@
 
 config INPUT_PWM_BEEPER
 	tristate "PWM beeper support"
-	depends on HAVE_PWM || PWM
+	depends on PWM && HAVE_PWM
 	help
 	  Say Y here to get support for PWM based beeper devices.
 
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 6189148..3a90b710 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -108,7 +108,6 @@
 	struct ad714x_chip *chip = spi_get_drvdata(spi);
 
 	ad714x_remove(chip);
-	spi_set_drvdata(spi, NULL);
 
 	return 0;
 }
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 4f77f87..b5d71d2 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -131,7 +131,6 @@
  err_free_mem:
 	input_free_polled_device(poll_dev);
 	kfree(bdev);
-	dev_set_drvdata(&pdev->dev, NULL);
 	return error;
 }
 
@@ -144,7 +143,6 @@
 	input_free_polled_device(bdev->poll_dev);
 	iounmap(bdev->reg);
 	kfree(bdev);
-	dev_set_drvdata(dev, NULL);
 
 	return 0;
 }
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index f330969..59d4dcd 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -168,7 +168,7 @@
  * I2C init/probing/exit functions
  */
 static int mma8450_probe(struct i2c_client *c,
-				   const struct i2c_device_id *id)
+			 const struct i2c_device_id *id)
 {
 	struct input_polled_dev *idev;
 	struct mma8450 *m;
@@ -204,6 +204,8 @@
 		goto err_free_mem;
 	}
 
+	i2c_set_clientdata(c, m);
+
 	return 0;
 
 err_free_mem:
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index dce0d95..6983ffb 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -383,6 +383,7 @@
 
 	pm_runtime_enable(&client->dev);
 	pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY);
+	i2c_set_clientdata(client, sensor);
 
 	return 0;
 
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 2ff4d1c..940566e 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -16,6 +16,7 @@
 #include <linux/input.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/slab.h>
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index fb4f8ac..83fff38 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -87,7 +87,6 @@
 
 	input_unregister_polled_device(poll_dev);
 	input_free_polled_device(poll_dev);
-	dev_set_drvdata(&pdev->dev, NULL);
 
 	return 0;
 }
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 5b1aff8..f920ba7 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -24,6 +24,7 @@
 #include <linux/gpio.h>
 #include <linux/rotary_encoder.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 0621c36..7b8b03e 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -153,7 +153,7 @@
 		.name	= "sirfsoc-pwrc",
 		.owner	= THIS_MODULE,
 		.pm	= &sirfsoc_pwrc_pm_ops,
-		.of_match_table = of_match_ptr(sirfsoc_pwrc_of_match),
+		.of_match_table = sirfsoc_pwrc_of_match,
 	}
 };
 
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index a0a4bba..7728359 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -430,20 +430,30 @@
 	return retval;
 }
 
-static ssize_t uinput_inject_event(struct uinput_device *udev,
-				   const char __user *buffer, size_t count)
+static ssize_t uinput_inject_events(struct uinput_device *udev,
+				    const char __user *buffer, size_t count)
 {
 	struct input_event ev;
+	size_t bytes = 0;
 
-	if (count < input_event_size())
+	if (count != 0 && count < input_event_size())
 		return -EINVAL;
 
-	if (input_event_from_user(buffer, &ev))
-		return -EFAULT;
+	while (bytes + input_event_size() <= count) {
+		/*
+		 * Note that even if some events were fetched successfully
+		 * we are still going to return EFAULT instead of partial
+		 * count to let userspace know that it got it's buffers
+		 * all wrong.
+		 */
+		if (input_event_from_user(buffer + bytes, &ev))
+			return -EFAULT;
 
-	input_event(udev->dev, ev.type, ev.code, ev.value);
+		input_event(udev->dev, ev.type, ev.code, ev.value);
+		bytes += input_event_size();
+	}
 
-	return input_event_size();
+	return bytes;
 }
 
 static ssize_t uinput_write(struct file *file, const char __user *buffer,
@@ -460,7 +470,7 @@
 		return retval;
 
 	retval = udev->state == UIST_CREATED ?
-			uinput_inject_event(udev, buffer, count) :
+			uinput_inject_events(udev, buffer, count) :
 			uinput_setup_device(udev, buffer, count);
 
 	mutex_unlock(&udev->mutex);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 8365847..ca7a26f 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,7 +103,6 @@
 	/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
 	{ { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
 		ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
-	{ { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT },		/* Dell XT2 */
 	{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },		/* Dell Vostro 1400 */
 	{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
 		ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },				/* Toshiba Tecra A11-11L */
@@ -1793,7 +1792,7 @@
 	snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
 	dev2->phys = priv->phys;
 	dev2->name = (priv->flags & ALPS_DUALPOINT) ?
-		     "DualPoint Stick" : "PS/2 Mouse";
+		     "DualPoint Stick" : "ALPS PS/2 Device";
 	dev2->id.bustype = BUS_I8042;
 	dev2->id.vendor  = 0x0002;
 	dev2->id.product = PSMOUSE_ALPS;
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index f51765f..a5869a8 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -439,7 +439,7 @@
 			case 2: return 5;
 			default:
 				/* Invalid contact (e.g. palm). Ignore it. */
-				return -1;
+				return 0;
 		}
 	}
 
@@ -452,17 +452,10 @@
 {
 	unsigned char *packet = psmouse->packet;
 	unsigned char header_byte = packet[0];
-	int contact_cnt;
 
 	memset(report_data, 0, sizeof(struct cytp_report_data));
 
-	contact_cnt = cypress_get_finger_count(header_byte);
-
-	if (contact_cnt < 0) /* e.g. palm detect */
-		return -EINVAL;
-
-	report_data->contact_cnt = contact_cnt;
-
+	report_data->contact_cnt = cypress_get_finger_count(header_byte);
 	report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0;
 
 	if (report_data->contact_cnt == 1) {
@@ -535,11 +528,9 @@
 	int slots[CYTP_MAX_MT_SLOTS];
 	int n;
 
-	if (cypress_parse_packet(psmouse, cytp, &report_data))
-		return;
+	cypress_parse_packet(psmouse, cytp, &report_data);
 
 	n = report_data.contact_cnt;
-
 	if (n > CYTP_MAX_MT_SLOTS)
 		n = CYTP_MAX_MT_SLOTS;
 
@@ -605,10 +596,6 @@
 		return PSMOUSE_BAD_DATA;
 
 	contact_cnt = cypress_get_finger_count(packet[0]);
-
-	if (contact_cnt < 0)
-		return PSMOUSE_BAD_DATA;
-
 	if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE)
 		cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4);
 	else
@@ -679,15 +666,15 @@
 {
 	struct cytp_data *cytp;
 
-	cytp = (struct cytp_data *)kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
-	psmouse->private = (void *)cytp;
-	if (cytp == NULL)
+	cytp = kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
+	if (!cytp)
 		return -ENOMEM;
 
-	cypress_reset(psmouse);
-
+	psmouse->private = cytp;
 	psmouse->pktsize = 8;
 
+	cypress_reset(psmouse);
+
 	if (cypress_query_hardware(psmouse)) {
 		psmouse_err(psmouse, "Unable to query Trackpad hardware.\n");
 		goto err_exit;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 1de1e5f..8541f94 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
 # Input core configuration
 #
 config SERIO
-	tristate "Serial I/O support" if EXPERT || !X86
+	tristate "Serial I/O support"
 	default y
 	help
 	  Say Yes here if you have any input device that uses serial I/O to
@@ -19,7 +19,7 @@
 if SERIO
 
 config SERIO_I8042
-	tristate "i8042 PC Keyboard controller" if EXPERT || !X86
+	tristate "i8042 PC Keyboard controller"
 	default y
 	depends on !PARISC && (!ARM || FOOTBRIDGE_HOST) && \
 		   (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
@@ -170,7 +170,7 @@
 	  module will be called maceps2.
 
 config SERIO_LIBPS2
-	tristate "PS/2 driver library" if EXPERT
+	tristate "PS/2 driver library"
 	depends on SERIO_I8042 || SERIO_I8042=n
 	help
 	  Say Y here if you are using a driver for device connected
@@ -266,4 +266,14 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called olpc_apsp.
 
+config HYPERV_KEYBOARD
+	tristate "Microsoft Synthetic Keyboard driver"
+	depends on HYPERV
+	default HYPERV
+	help
+	  Select this option to enable the Hyper-V Keyboard driver.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called hyperv_keyboard.
+
 endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 12298b1..815d874 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -28,3 +28,4 @@
 obj-$(CONFIG_SERIO_ARC_PS2)	+= arc_ps2.o
 obj-$(CONFIG_SERIO_APBPS2)	+= apbps2.o
 obj-$(CONFIG_SERIO_OLPC_APSP)	+= olpc_apsp.o
+obj-$(CONFIG_HYPERV_KEYBOARD)	+= hyperv-keyboard.o
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
new file mode 100644
index 0000000..3a83c3c
--- /dev/null
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -0,0 +1,437 @@
+/*
+ *  Copyright (c) 2013, Microsoft Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/completion.h>
+#include <linux/hyperv.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+/*
+ * Current version 1.0
+ *
+ */
+#define SYNTH_KBD_VERSION_MAJOR 1
+#define SYNTH_KBD_VERSION_MINOR	0
+#define SYNTH_KBD_VERSION		(SYNTH_KBD_VERSION_MINOR | \
+					 (SYNTH_KBD_VERSION_MAJOR << 16))
+
+
+/*
+ * Message types in the synthetic input protocol
+ */
+enum synth_kbd_msg_type {
+	SYNTH_KBD_PROTOCOL_REQUEST = 1,
+	SYNTH_KBD_PROTOCOL_RESPONSE = 2,
+	SYNTH_KBD_EVENT = 3,
+	SYNTH_KBD_LED_INDICATORS = 4,
+};
+
+/*
+ * Basic message structures.
+ */
+struct synth_kbd_msg_hdr {
+	__le32 type;
+};
+
+struct synth_kbd_msg {
+	struct synth_kbd_msg_hdr header;
+	char data[]; /* Enclosed message */
+};
+
+union synth_kbd_version {
+	__le32 version;
+};
+
+/*
+ * Protocol messages
+ */
+struct synth_kbd_protocol_request {
+	struct synth_kbd_msg_hdr header;
+	union synth_kbd_version version_requested;
+};
+
+#define PROTOCOL_ACCEPTED	BIT(0)
+struct synth_kbd_protocol_response {
+	struct synth_kbd_msg_hdr header;
+	__le32 proto_status;
+};
+
+#define IS_UNICODE	BIT(0)
+#define IS_BREAK	BIT(1)
+#define IS_E0		BIT(2)
+#define IS_E1		BIT(3)
+struct synth_kbd_keystroke {
+	struct synth_kbd_msg_hdr header;
+	__le16 make_code;
+	__le16 reserved0;
+	__le32 info; /* Additional information */
+};
+
+
+#define HK_MAXIMUM_MESSAGE_SIZE 256
+
+#define KBD_VSC_SEND_RING_BUFFER_SIZE		(10 * PAGE_SIZE)
+#define KBD_VSC_RECV_RING_BUFFER_SIZE		(10 * PAGE_SIZE)
+
+#define XTKBD_EMUL0     0xe0
+#define XTKBD_EMUL1     0xe1
+#define XTKBD_RELEASE   0x80
+
+
+/*
+ * Represents a keyboard device
+ */
+struct hv_kbd_dev {
+	struct hv_device *hv_dev;
+	struct serio *hv_serio;
+	struct synth_kbd_protocol_request protocol_req;
+	struct synth_kbd_protocol_response protocol_resp;
+	/* Synchronize the request/response if needed */
+	struct completion wait_event;
+	spinlock_t lock; /* protects 'started' field */
+	bool started;
+};
+
+static void hv_kbd_on_receive(struct hv_device *hv_dev,
+			      struct synth_kbd_msg *msg, u32 msg_length)
+{
+	struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+	struct synth_kbd_keystroke *ks_msg;
+	unsigned long flags;
+	u32 msg_type = __le32_to_cpu(msg->header.type);
+	u32 info;
+	u16 scan_code;
+
+	switch (msg_type) {
+	case SYNTH_KBD_PROTOCOL_RESPONSE:
+		/*
+		 * Validate the information provided by the host.
+		 * If the host is giving us a bogus packet,
+		 * drop the packet (hoping the problem
+		 * goes away).
+		 */
+		if (msg_length < sizeof(struct synth_kbd_protocol_response)) {
+			dev_err(&hv_dev->device,
+				"Illegal protocol response packet (len: %d)\n",
+				msg_length);
+			break;
+		}
+
+		memcpy(&kbd_dev->protocol_resp, msg,
+			sizeof(struct synth_kbd_protocol_response));
+		complete(&kbd_dev->wait_event);
+		break;
+
+	case SYNTH_KBD_EVENT:
+		/*
+		 * Validate the information provided by the host.
+		 * If the host is giving us a bogus packet,
+		 * drop the packet (hoping the problem
+		 * goes away).
+		 */
+		if (msg_length < sizeof(struct  synth_kbd_keystroke)) {
+			dev_err(&hv_dev->device,
+				"Illegal keyboard event packet (len: %d)\n",
+				msg_length);
+			break;
+		}
+
+		ks_msg = (struct synth_kbd_keystroke *)msg;
+		info = __le32_to_cpu(ks_msg->info);
+
+		/*
+		 * Inject the information through the serio interrupt.
+		 */
+		spin_lock_irqsave(&kbd_dev->lock, flags);
+		if (kbd_dev->started) {
+			if (info & IS_E0)
+				serio_interrupt(kbd_dev->hv_serio,
+						XTKBD_EMUL0, 0);
+
+			scan_code = __le16_to_cpu(ks_msg->make_code);
+			if (info & IS_BREAK)
+				scan_code |= XTKBD_RELEASE;
+
+			serio_interrupt(kbd_dev->hv_serio, scan_code, 0);
+		}
+		spin_unlock_irqrestore(&kbd_dev->lock, flags);
+		break;
+
+	default:
+		dev_err(&hv_dev->device,
+			"unhandled message type %d\n", msg_type);
+	}
+}
+
+static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
+					  struct vmpacket_descriptor *desc,
+					  u32 bytes_recvd,
+					  u64 req_id)
+{
+	struct synth_kbd_msg *msg;
+	u32 msg_sz;
+
+	switch (desc->type) {
+	case VM_PKT_COMP:
+		break;
+
+	case VM_PKT_DATA_INBAND:
+		/*
+		 * We have a packet that has "inband" data. The API used
+		 * for retrieving the packet guarantees that the complete
+		 * packet is read. So, minimally, we should be able to
+		 * parse the payload header safely (assuming that the host
+		 * can be trusted.  Trusting the host seems to be a
+		 * reasonable assumption because in a virtualized
+		 * environment there is not whole lot you can do if you
+		 * don't trust the host.
+		 *
+		 * Nonetheless, let us validate if the host can be trusted
+		 * (in a trivial way).  The interesting aspect of this
+		 * validation is how do you recover if we discover that the
+		 * host is not to be trusted? Simply dropping the packet, I
+		 * don't think is an appropriate recovery.  In the interest
+		 * of failing fast, it may be better to crash the guest.
+		 * For now, I will just drop the packet!
+		 */
+
+		msg_sz = bytes_recvd - (desc->offset8 << 3);
+		if (msg_sz <= sizeof(struct synth_kbd_msg_hdr)) {
+			/*
+			 * Drop the packet and hope
+			 * the problem magically goes away.
+			 */
+			dev_err(&hv_dev->device,
+				"Illegal packet (type: %d, tid: %llx, size: %d)\n",
+				desc->type, req_id, msg_sz);
+			break;
+		}
+
+		msg = (void *)desc + (desc->offset8 << 3);
+		hv_kbd_on_receive(hv_dev, msg, msg_sz);
+		break;
+
+	default:
+		dev_err(&hv_dev->device,
+			"unhandled packet type %d, tid %llx len %d\n",
+			desc->type, req_id, bytes_recvd);
+		break;
+	}
+}
+
+static void hv_kbd_on_channel_callback(void *context)
+{
+	struct hv_device *hv_dev = context;
+	void *buffer;
+	int bufferlen = 0x100; /* Start with sensible size */
+	u32 bytes_recvd;
+	u64 req_id;
+	int error;
+
+	buffer = kmalloc(bufferlen, GFP_ATOMIC);
+	if (!buffer)
+		return;
+
+	while (1) {
+		error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
+					     &bytes_recvd, &req_id);
+		switch (error) {
+		case 0:
+			if (bytes_recvd == 0) {
+				kfree(buffer);
+				return;
+			}
+
+			hv_kbd_handle_received_packet(hv_dev, buffer,
+						      bytes_recvd, req_id);
+			break;
+
+		case -ENOBUFS:
+			kfree(buffer);
+			/* Handle large packet */
+			bufferlen = bytes_recvd;
+			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+			if (!buffer)
+				return;
+			break;
+		}
+	}
+}
+
+static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev)
+{
+	struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+	struct synth_kbd_protocol_request *request;
+	struct synth_kbd_protocol_response *response;
+	u32 proto_status;
+	int error;
+
+	request = &kbd_dev->protocol_req;
+	memset(request, 0, sizeof(struct synth_kbd_protocol_request));
+	request->header.type = __cpu_to_le32(SYNTH_KBD_PROTOCOL_REQUEST);
+	request->version_requested.version = __cpu_to_le32(SYNTH_KBD_VERSION);
+
+	error = vmbus_sendpacket(hv_dev->channel, request,
+				 sizeof(struct synth_kbd_protocol_request),
+				 (unsigned long)request,
+				 VM_PKT_DATA_INBAND,
+				 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (error)
+		return error;
+
+	if (!wait_for_completion_timeout(&kbd_dev->wait_event, 10 * HZ))
+		return -ETIMEDOUT;
+
+	response = &kbd_dev->protocol_resp;
+	proto_status = __le32_to_cpu(response->proto_status);
+	if (!(proto_status & PROTOCOL_ACCEPTED)) {
+		dev_err(&hv_dev->device,
+			"synth_kbd protocol request failed (version %d)\n",
+		        SYNTH_KBD_VERSION);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int hv_kbd_start(struct serio *serio)
+{
+	struct hv_kbd_dev *kbd_dev = serio->port_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbd_dev->lock, flags);
+	kbd_dev->started = true;
+	spin_unlock_irqrestore(&kbd_dev->lock, flags);
+
+	return 0;
+}
+
+static void hv_kbd_stop(struct serio *serio)
+{
+	struct hv_kbd_dev *kbd_dev = serio->port_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbd_dev->lock, flags);
+	kbd_dev->started = false;
+	spin_unlock_irqrestore(&kbd_dev->lock, flags);
+}
+
+static int hv_kbd_probe(struct hv_device *hv_dev,
+			const struct hv_vmbus_device_id *dev_id)
+{
+	struct hv_kbd_dev *kbd_dev;
+	struct serio *hv_serio;
+	int error;
+
+	kbd_dev = kzalloc(sizeof(struct hv_kbd_dev), GFP_KERNEL);
+	hv_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+	if (!kbd_dev || !hv_serio) {
+		error = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	kbd_dev->hv_dev = hv_dev;
+	kbd_dev->hv_serio = hv_serio;
+	spin_lock_init(&kbd_dev->lock);
+	init_completion(&kbd_dev->wait_event);
+	hv_set_drvdata(hv_dev, kbd_dev);
+
+	hv_serio->dev.parent  = &hv_dev->device;
+	hv_serio->id.type = SERIO_8042_XL;
+	hv_serio->port_data = kbd_dev;
+	strlcpy(hv_serio->name, dev_name(&hv_dev->device),
+		sizeof(hv_serio->name));
+	strlcpy(hv_serio->phys, dev_name(&hv_dev->device),
+		sizeof(hv_serio->phys));
+
+	hv_serio->start = hv_kbd_start;
+	hv_serio->stop = hv_kbd_stop;
+
+	error = vmbus_open(hv_dev->channel,
+			   KBD_VSC_SEND_RING_BUFFER_SIZE,
+			   KBD_VSC_RECV_RING_BUFFER_SIZE,
+			   NULL, 0,
+			   hv_kbd_on_channel_callback,
+			   hv_dev);
+	if (error)
+		goto err_free_mem;
+
+	error = hv_kbd_connect_to_vsp(hv_dev);
+	if (error)
+		goto err_close_vmbus;
+
+	serio_register_port(kbd_dev->hv_serio);
+	return 0;
+
+err_close_vmbus:
+	vmbus_close(hv_dev->channel);
+err_free_mem:
+	kfree(hv_serio);
+	kfree(kbd_dev);
+	return error;
+}
+
+static int hv_kbd_remove(struct hv_device *hv_dev)
+{
+	struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+
+	serio_unregister_port(kbd_dev->hv_serio);
+	vmbus_close(hv_dev->channel);
+	kfree(kbd_dev);
+
+	hv_set_drvdata(hv_dev, NULL);
+
+	return 0;
+}
+
+/*
+ * Keyboard GUID
+ * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
+ */
+#define HV_KBD_GUID \
+	.guid = { \
+			0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, \
+			0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 \
+	}
+
+static const struct hv_vmbus_device_id id_table[] = {
+	/* Keyboard guid */
+	{ HV_KBD_GUID, },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static struct  hv_driver hv_kbd_drv = {
+	.name = KBUILD_MODNAME,
+	.id_table = id_table,
+	.probe = hv_kbd_probe,
+	.remove = hv_kbd_remove,
+};
+
+static int __init hv_kbd_init(void)
+{
+	return vmbus_driver_register(&hv_kbd_drv);
+}
+
+static void __exit hv_kbd_exit(void)
+{
+	vmbus_driver_unregister(&hv_kbd_drv);
+}
+
+MODULE_LICENSE("GPL");
+module_init(hv_kbd_init);
+module_exit(hv_kbd_exit);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5f306f7..0ec9abb 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -765,6 +765,7 @@
 	{ .id = "CPQA0D7", .driver_data = 0 },
 	{ .id = "", },
 };
+MODULE_DEVICE_TABLE(pnp, pnp_kbd_devids);
 
 static struct pnp_driver i8042_pnp_kbd_driver = {
 	.name           = "i8042 kbd",
@@ -786,6 +787,7 @@
 	{ .id = "SYN0801", .driver_data = 0 },
 	{ .id = "", },
 };
+MODULE_DEVICE_TABLE(pnp, pnp_aux_devids);
 
 static struct pnp_driver i8042_pnp_aux_driver = {
 	.name           = "i8042 aux",
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 52c9ebf..020053fa 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1036,7 +1036,7 @@
 /*
  * i8042_panic_blink() will turn the keyboard LEDs on or off and is called
  * when kernel panics. Flashing LEDs is useful for users running X who may
- * not see the console and will help distingushing panics from "real"
+ * not see the console and will help distinguishing panics from "real"
  * lockups.
  *
  * Note that DELAY has a limit of 10ms so we will not get stuck here
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index e53416a..867e7c3 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -524,9 +524,6 @@
 
 		error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
 		                         report_id, rep_data, length, 1);
-		if (error >= 0)
-			error = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
-			                         report_id, rep_data, length, 1);
 	} while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES);
 
 	kfree(rep_data);
@@ -548,7 +545,7 @@
 			/* MT Tablet PC touch */
 			return wacom_set_device_mode(intf, 3, 4, 4);
 		}
-		else if (features->type == WACOM_24HDT) {
+		else if (features->type == WACOM_24HDT || features->type == CINTIQ_HYBRID) {
 			return wacom_set_device_mode(intf, 18, 3, 2);
 		}
 	} else if (features->device_type == BTN_TOOL_PEN) {
@@ -719,7 +716,7 @@
 		return -ENOMEM;
 
 	if (wacom->wacom_wac.features.type >= INTUOS5S &&
-	    wacom->wacom_wac.features.type <= INTUOS5L)	{
+	    wacom->wacom_wac.features.type <= INTUOSPL) {
 		/*
 		 * Touch Ring and crop mark LED luminance may take on
 		 * one of four values:
@@ -981,14 +978,20 @@
 	case INTUOS5S:
 	case INTUOS5:
 	case INTUOS5L:
-		wacom->led.select[0] = 0;
-		wacom->led.select[1] = 0;
-		wacom->led.llv = 32;
-		wacom->led.hlv = 0;
-		wacom->led.img_lum = 0;
+	case INTUOSPS:
+	case INTUOSPM:
+	case INTUOSPL:
+		if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) {
+			wacom->led.select[0] = 0;
+			wacom->led.select[1] = 0;
+			wacom->led.llv = 32;
+			wacom->led.hlv = 0;
+			wacom->led.img_lum = 0;
 
-		error = sysfs_create_group(&wacom->intf->dev.kobj,
-					   &intuos5_led_attr_group);
+			error = sysfs_create_group(&wacom->intf->dev.kobj,
+						  &intuos5_led_attr_group);
+		} else
+			return 0;
 		break;
 
 	default:
@@ -1024,8 +1027,12 @@
 	case INTUOS5S:
 	case INTUOS5:
 	case INTUOS5L:
-		sysfs_remove_group(&wacom->intf->dev.kobj,
-				   &intuos5_led_attr_group);
+	case INTUOSPS:
+	case INTUOSPM:
+	case INTUOSPL:
+		if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN)
+			sysfs_remove_group(&wacom->intf->dev.kobj,
+					   &intuos5_led_attr_group);
 		break;
 	}
 }
@@ -1185,34 +1192,47 @@
 		wacom_wac1->features =
 			*((struct wacom_features *)id->driver_info);
 		wacom_wac1->features.device_type = BTN_TOOL_PEN;
+		snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen",
+			 wacom_wac1->features.name);
 		error = wacom_register_input(wacom1);
 		if (error)
-			goto fail1;
+			goto fail;
 
 		/* Touch interface */
-		wacom_wac2->features =
-			*((struct wacom_features *)id->driver_info);
-		wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
-		wacom_wac2->features.device_type = BTN_TOOL_FINGER;
-		wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
-		error = wacom_register_input(wacom2);
-		if (error)
-			goto fail2;
+		if (wacom_wac1->features.touch_max) {
+			wacom_wac2->features =
+				*((struct wacom_features *)id->driver_info);
+			wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+			wacom_wac2->features.device_type = BTN_TOOL_FINGER;
+			wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+			if (wacom_wac2->features.touch_max)
+				snprintf(wacom_wac2->name, WACOM_NAME_MAX,
+					 "%s (WL) Finger",wacom_wac2->features.name);
+			else
+				snprintf(wacom_wac2->name, WACOM_NAME_MAX,
+					 "%s (WL) Pad",wacom_wac2->features.name);
+			error = wacom_register_input(wacom2);
+			if (error)
+				goto fail;
+		}
 
 		error = wacom_initialize_battery(wacom);
 		if (error)
-			goto fail3;
+			goto fail;
 	}
 
 	return;
 
-fail3:
-	input_unregister_device(wacom_wac2->input);
-	wacom_wac2->input = NULL;
-fail2:
-	input_unregister_device(wacom_wac1->input);
-	wacom_wac1->input = NULL;
-fail1:
+fail:
+	if (wacom_wac2->input) {
+		input_unregister_device(wacom_wac2->input);
+		wacom_wac2->input = NULL;
+	}
+
+	if (wacom_wac1->input) {
+		input_unregister_device(wacom_wac1->input);
+		wacom_wac1->input = NULL;
+	}
 	return;
 }
 
@@ -1302,7 +1322,7 @@
 	 * HID descriptor. If this is the touch interface (wMaxPacketSize
 	 * of WACOM_PKGLEN_BBTOUCH3), override the table values.
 	 */
-	if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+	if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
 		if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
 			features->device_type = BTN_TOOL_FINGER;
 			features->pktlen = WACOM_PKGLEN_BBTOUCH3;
@@ -1329,10 +1349,12 @@
 		struct usb_device *other_dev;
 
 		/* Append the device type to the name */
-		strlcat(wacom_wac->name,
-			features->device_type == BTN_TOOL_PEN ?
-				" Pen" : " Finger",
-			sizeof(wacom_wac->name));
+		if (features->device_type != BTN_TOOL_FINGER)
+			strlcat(wacom_wac->name, " Pen", WACOM_NAME_MAX);
+		else if (features->touch_max)
+			strlcat(wacom_wac->name, " Finger", WACOM_NAME_MAX);
+		else
+			strlcat(wacom_wac->name, " Pad", WACOM_NAME_MAX);
 
 		other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
 		if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c59b797..782c253 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -427,6 +427,13 @@
 			(features->type == WACOM_21UX2))
 		return 1;
 
+	/* Range Report */
+	if ((data[1] & 0xfe) == 0x20) {
+		input_report_key(input, BTN_TOUCH, 0);
+		input_report_abs(input, ABS_PRESSURE, 0);
+		input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
+	}
+
 	/* Exit report */
 	if ((data[1] & 0xfe) == 0x80) {
 		if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
@@ -477,7 +484,7 @@
 	/* general pen packet */
 	if ((data[1] & 0xb8) == 0xa0) {
 		t = (data[6] << 2) | ((data[7] >> 6) & 3);
-		if (features->type >= INTUOS4S && features->type <= WACOM_24HD) {
+		if (features->type >= INTUOS4S && features->type <= CINTIQ_HYBRID) {
 			t = (t << 1) | (data[1] & 1);
 		}
 		input_report_abs(input, ABS_PRESSURE, t);
@@ -621,14 +628,30 @@
 			} else {
 				input_report_abs(input, ABS_MISC, 0);
 			}
-		} else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+		} else if (features->type == CINTIQ_HYBRID) {
+			/*
+			 * Do not send hardware buttons under Android. They
+			 * are already sent to the system through GPIO (and
+			 * have different meaning).
+			 */
+			input_report_key(input, BTN_1, (data[4] & 0x01));
+			input_report_key(input, BTN_2, (data[4] & 0x02));
+			input_report_key(input, BTN_3, (data[4] & 0x04));
+			input_report_key(input, BTN_4, (data[4] & 0x08));
+
+			input_report_key(input, BTN_5, (data[4] & 0x10));  /* Right  */
+			input_report_key(input, BTN_6, (data[4] & 0x20));  /* Up     */
+			input_report_key(input, BTN_7, (data[4] & 0x40));  /* Left   */
+			input_report_key(input, BTN_8, (data[4] & 0x80));  /* Down   */
+			input_report_key(input, BTN_0, (data[3] & 0x01));  /* Center */
+		} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
 			int i;
 
 			/* Touch ring mode switch has no capacitive sensor */
 			input_report_key(input, BTN_0, (data[3] & 0x01));
 
 			/*
-			 * ExpressKeys on Intuos5 have a capacitive sensor in
+			 * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in
 			 * addition to the mechanical switch. Switch data is
 			 * stored in data[4], capacitive data in data[5].
 			 */
@@ -716,7 +739,9 @@
 	     features->type == INTUOS4 ||
 	     features->type == INTUOS4S ||
 	     features->type == INTUOS5 ||
-	     features->type == INTUOS5S)) {
+	     features->type == INTUOS5S ||
+	     features->type == INTUOSPM ||
+	     features->type == INTUOSPS)) {
 
 		return 0;
 	}
@@ -769,8 +794,7 @@
 
 		} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
 			/* I4 mouse */
-			if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
-			    (features->type >= INTUOS5S && features->type <= INTUOS5L)) {
+			if (features->type >= INTUOS4S && features->type <= INTUOSPL) {
 				input_report_key(input, BTN_LEFT,   data[6] & 0x01);
 				input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
 				input_report_key(input, BTN_RIGHT,  data[6] & 0x04);
@@ -797,7 +821,8 @@
 				}
 			}
 		} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
-				features->type == INTUOS4L || features->type == INTUOS5L) &&
+				features->type == INTUOS4L || features->type == INTUOS5L ||
+				features->type == INTUOSPL) &&
 			   wacom->tool[idx] == BTN_TOOL_LENS) {
 			/* Lens cursor packets */
 			input_report_key(input, BTN_LEFT,   data[8] & 0x01);
@@ -1107,6 +1132,7 @@
 
 static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
 {
+	struct wacom_features *features = &wacom->features;
 	struct input_dev *input = wacom->input;
 	bool touch = data[1] & 0x80;
 	int slot = input_mt_get_slot_by_key(input, data[0]);
@@ -1122,14 +1148,23 @@
 	if (touch) {
 		int x = (data[2] << 4) | (data[4] >> 4);
 		int y = (data[3] << 4) | (data[4] & 0x0f);
-		int a = data[5];
+		int width, height;
 
-		// "a" is a scaled-down area which we assume is roughly
-		// circular and which can be described as: a=(pi*r^2)/C.
-		int x_res  = input_abs_get_res(input, ABS_X);
-		int y_res  = input_abs_get_res(input, ABS_Y);
-		int width  = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
-		int height = width * y_res / x_res;
+		if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
+			width  = data[5];
+			height = data[6];
+		} else {
+			/*
+			 * "a" is a scaled-down area which we assume is
+			 * roughly circular and which can be described as:
+			 * a=(pi*r^2)/C.
+			 */
+			int a = data[5];
+			int x_res  = input_abs_get_res(input, ABS_X);
+			int y_res  = input_abs_get_res(input, ABS_Y);
+			width  = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+			height = width * y_res / x_res;
+		}
 
 		input_report_abs(input, ABS_MT_POSITION_X, x);
 		input_report_abs(input, ABS_MT_POSITION_Y, y);
@@ -1327,6 +1362,7 @@
 	case WACOM_22HD:
 	case WACOM_24HD:
 	case DTK:
+	case CINTIQ_HYBRID:
 		sync = wacom_intuos_irq(wacom_wac);
 		break;
 
@@ -1337,6 +1373,9 @@
 	case INTUOS5S:
 	case INTUOS5:
 	case INTUOS5L:
+	case INTUOSPS:
+	case INTUOSPM:
+	case INTUOSPL:
 		if (len == WACOM_PKGLEN_BBTOUCH3)
 			sync = wacom_bpt3_touch(wacom_wac);
 		else
@@ -1420,7 +1459,7 @@
 
 	/* these device have multiple inputs */
 	if (features->type >= WIRELESS ||
-	    (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
+	    (features->type >= INTUOS5S && features->type <= INTUOSPL) ||
 	    (features->oVid && features->oPid))
 		features->quirks |= WACOM_QUIRK_MULTI_INPUT;
 
@@ -1627,6 +1666,8 @@
 
 	case INTUOS5:
 	case INTUOS5L:
+	case INTUOSPM:
+	case INTUOSPL:
 		if (features->device_type == BTN_TOOL_PEN) {
 			__set_bit(BTN_7, input_dev->keybit);
 			__set_bit(BTN_8, input_dev->keybit);
@@ -1634,6 +1675,7 @@
 		/* fall through */
 
 	case INTUOS5S:
+	case INTUOSPS:
 		__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
 
 		if (features->device_type == BTN_TOOL_PEN) {
@@ -1765,6 +1807,24 @@
 					      0, 0);
 		}
 		break;
+
+	case CINTIQ_HYBRID:
+		__set_bit(BTN_1, input_dev->keybit);
+		__set_bit(BTN_2, input_dev->keybit);
+		__set_bit(BTN_3, input_dev->keybit);
+		__set_bit(BTN_4, input_dev->keybit);
+
+		__set_bit(BTN_5, input_dev->keybit);
+		__set_bit(BTN_6, input_dev->keybit);
+		__set_bit(BTN_7, input_dev->keybit);
+		__set_bit(BTN_8, input_dev->keybit);
+		__set_bit(BTN_0, input_dev->keybit);
+
+		input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+		__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
+		wacom_setup_cintiq(wacom_wac);
+		break;
 	}
 	return 0;
 }
@@ -1952,6 +2012,18 @@
 static const struct wacom_features wacom_features_0x2A =
 	{ "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS,  44704, 27940, 2047,
 	  63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x314 =
+	{ "Wacom Intuos Pro S", WACOM_PKGLEN_INTUOS,  31496, 19685, 2047,
+	  63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  .touch_max = 16 };
+static const struct wacom_features wacom_features_0x315 =
+	{ "Wacom Intuos Pro M", WACOM_PKGLEN_INTUOS,  44704, 27940, 2047,
+	  63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  .touch_max = 16 };
+static const struct wacom_features wacom_features_0x317 =
+	{ "Wacom Intuos Pro L", WACOM_PKGLEN_INTUOS,  65024, 40640, 2047,
+	  63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  .touch_max = 16 };
 static const struct wacom_features wacom_features_0xF4 =
 	{ "Wacom Cintiq 24HD",       WACOM_PKGLEN_INTUOS,   104480, 65600, 2047,
 	  63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2131,6 +2203,13 @@
 static const struct wacom_features wacom_features_0x6004 =
 	{ "ISD-V4",               WACOM_PKGLEN_GRAPHIRE,  12800,  8000,  255,
 	  0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x0307 =
+	{ "Wacom ISDv5 307", WACOM_PKGLEN_INTUOS,  59552,  33848, 2047,
+	  63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
+static const struct wacom_features wacom_features_0x0309 =
+	{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
+	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10 };
 
 #define USB_DEVICE_WACOM(prod)					\
 	USB_DEVICE(USB_VENDOR_ID_WACOM, prod),			\
@@ -2259,12 +2338,17 @@
 	{ USB_DEVICE_WACOM(0x300) },
 	{ USB_DEVICE_WACOM(0x301) },
 	{ USB_DEVICE_WACOM(0x304) },
+	{ USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
+	{ USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
+	{ USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) },
 	{ USB_DEVICE_WACOM(0x4001) },
 	{ USB_DEVICE_WACOM(0x47) },
 	{ USB_DEVICE_WACOM(0xF4) },
 	{ USB_DEVICE_WACOM(0xF8) },
 	{ USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
 	{ USB_DEVICE_WACOM(0xFA) },
+	{ USB_DEVICE_WACOM(0x0307) },
+	{ USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) },
 	{ USB_DEVICE_LENOVO(0x6004) },
 	{ }
 };
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index dfc9e08..fd23a37 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -14,6 +14,8 @@
 /* maximum packet length for USB devices */
 #define WACOM_PKGLEN_MAX	64
 
+#define WACOM_NAME_MAX		64
+
 /* packet length for individual models */
 #define WACOM_PKGLEN_PENPRTN	 7
 #define WACOM_PKGLEN_GRAPHIRE	 8
@@ -76,10 +78,14 @@
 	INTUOS5S,
 	INTUOS5,
 	INTUOS5L,
+	INTUOSPS,
+	INTUOSPM,
+	INTUOSPL,
 	WACOM_21UX2,
 	WACOM_22HD,
 	DTK,
 	WACOM_24HD,
+	CINTIQ_HYBRID,
 	CINTIQ,
 	WACOM_BEE,
 	WACOM_13HD,
@@ -126,7 +132,7 @@
 };
 
 struct wacom_wac {
-	char name[64];
+	char name[WACOM_NAME_MAX];
 	unsigned char *data;
 	int tool[2];
 	int id[2];
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index e09ec67..00d1e54 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -919,4 +919,17 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called tps6507x_ts.
 
+config TOUCHSCREEN_ZFORCE
+	tristate "Neonode zForce infrared touchscreens"
+	depends on I2C
+	depends on GPIOLIB
+	help
+	  Say Y here if you have a touchscreen using the zforce
+	  infraread technology from Neonode.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called zforce_ts.
+
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f5216c1..7587883 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -75,3 +75,4 @@
 obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE)	+= zylonite-wm97xx.o
 obj-$(CONFIG_TOUCHSCREEN_W90X900)	+= w90p910_ts.o
 obj-$(CONFIG_TOUCHSCREEN_TPS6507X)	+= tps6507x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_ZFORCE)	+= zforce_ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index f3a174a..69834dd 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -806,7 +806,6 @@
 err_free_mem:
 	input_free_device(input_dev);
 	kfree(ts);
-	spi_set_drvdata(spi, NULL);
 	return err;
 }
 
@@ -823,7 +822,6 @@
 	kfree(ts);
 
 	dev_dbg(&spi->dev, "unregistered touchscreen\n");
-	spi_set_drvdata(spi, NULL);
 
 	return 0;
 }
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 606da5b..1a7b114 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -142,7 +142,6 @@
 	struct ad7879 *ts = spi_get_drvdata(spi);
 
 	ad7879_remove(ts);
-	spi_set_drvdata(spi, NULL);
 
 	return 0;
 }
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index d038575..42d830e 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -2113,7 +2113,6 @@
 error_request_irq:
 	if (cd->cpdata->init)
 		cd->cpdata->init(cd->cpdata, 0, dev);
-	dev_set_drvdata(dev, NULL);
 error_free_xfer:
 	kfree(cd->xfer_buf);
 error_free_cd:
@@ -2151,7 +2150,6 @@
 	free_irq(cd->irq, cd);
 	if (cd->cpdata->init)
 		cd->cpdata->init(cd->cpdata, 0, dev);
-	dev_set_drvdata(dev, NULL);
 	cyttsp4_free_si_ptrs(cd);
 	kfree(cd);
 	return 0;
diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c
index a71e114..b19434c 100644
--- a/drivers/input/touchscreen/cyttsp4_spi.c
+++ b/drivers/input/touchscreen/cyttsp4_spi.c
@@ -171,10 +171,7 @@
 	ts = cyttsp4_probe(&cyttsp_spi_bus_ops, &spi->dev, spi->irq,
 			  CY_SPI_DATA_BUF_SIZE);
 
-	if (IS_ERR(ts))
-		return PTR_ERR(ts);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(ts);
 }
 
 static int cyttsp4_spi_remove(struct spi_device *spi)
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index d53e0b7..4204841 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -242,7 +242,7 @@
 	int retval;
 
 	/* wait for interrupt to set ready completion */
-	INIT_COMPLETION(ts->bl_ready);
+	reinit_completion(&ts->bl_ready);
 	ts->state = CY_BL_STATE;
 
 	enable_irq(ts->irq);
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index ef5fcb0..054d225 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -273,7 +273,7 @@
 		.name	= "egalax_ts",
 		.owner	= THIS_MODULE,
 		.pm	= &egalax_ts_pm_ops,
-		.of_match_table	= of_match_ptr(egalax_ts_dt_ids),
+		.of_match_table	= egalax_ts_dt_ids,
 	},
 	.id_table	= egalax_ts_id,
 	.probe		= egalax_ts_probe,
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index 6650085..92e2243 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -186,8 +186,6 @@
 	release_region(HTCPEN_PORT_INIT, 1);
 	release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
 
-	dev_set_drvdata(dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 1740a24..2f03b2f 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -24,6 +24,7 @@
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/pm_qos.h>
 #include <linux/slab.h>
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 24e625c..68beada 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -354,9 +354,16 @@
 	if (err < 0)
 		return err;
 
-	err = of_property_read_u32(node, "ti,coordiante-readouts",
+	/*
+	 * Try with the new binding first. If it fails, try again with
+	 * bogus, miss-spelled version.
+	 */
+	err = of_property_read_u32(node, "ti,coordinate-readouts",
 			&ts_dev->coordinate_readouts);
 	if (err < 0)
+		err = of_property_read_u32(node, "ti,coordiante-readouts",
+				&ts_dev->coordinate_readouts);
+	if (err < 0)
 		return err;
 
 	return of_property_read_u32_array(node, "ti,wire-config",
@@ -511,7 +518,7 @@
 		.name   = "TI-am335x-tsc",
 		.owner	= THIS_MODULE,
 		.pm	= TITSC_PM_OPS,
-		.of_match_table = of_match_ptr(ti_tsc_dt_ids),
+		.of_match_table = ti_tsc_dt_ids,
 	},
 };
 module_platform_driver(ti_tsc_driver);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 7213e8b..8113533 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -678,7 +678,6 @@
 err_remove_sysfs:
 	sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
 err_clear_drvdata:
-	spi_set_drvdata(spi, NULL);
 	free_irq(spi->irq, ts);
 err_free_mem:
 	input_free_device(input_dev);
@@ -696,7 +695,6 @@
 	input_unregister_device(ts->idev);
 	kfree(ts);
 
-	spi_set_drvdata(spi, NULL);
 	return 0;
 }
 
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 721fdb3..ae4b6b9 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -146,12 +146,10 @@
 
 #define USB_DEVICE_HID_CLASS(vend, prod) \
 	.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \
-		| USB_DEVICE_ID_MATCH_INT_PROTOCOL \
 		| USB_DEVICE_ID_MATCH_DEVICE, \
 	.idVendor = (vend), \
 	.idProduct = (prod), \
-	.bInterfaceClass = USB_INTERFACE_CLASS_HID, \
-	.bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE
+	.bInterfaceClass = USB_INTERFACE_CLASS_HID
 
 static const struct usb_device_id usbtouch_devices[] = {
 #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
new file mode 100644
index 0000000..75762d6
--- /dev/null
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -0,0 +1,836 @@
+/*
+ * Copyright (C) 2012-2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based in parts on Nook zforce driver
+ *
+ * Copyright (C) 2010 Barnes & Noble, Inc.
+ * Author: Pieter Truter<ptruter@intrinsyc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/input/mt.h>
+#include <linux/platform_data/zforce_ts.h>
+
+#define WAIT_TIMEOUT		msecs_to_jiffies(1000)
+
+#define FRAME_START		0xee
+
+/* Offsets of the different parts of the payload the controller sends */
+#define PAYLOAD_HEADER		0
+#define PAYLOAD_LENGTH		1
+#define PAYLOAD_BODY		2
+
+/* Response offsets */
+#define RESPONSE_ID		0
+#define RESPONSE_DATA		1
+
+/* Commands */
+#define COMMAND_DEACTIVATE	0x00
+#define COMMAND_INITIALIZE	0x01
+#define COMMAND_RESOLUTION	0x02
+#define COMMAND_SETCONFIG	0x03
+#define COMMAND_DATAREQUEST	0x04
+#define COMMAND_SCANFREQ	0x08
+#define COMMAND_STATUS		0X1e
+
+/*
+ * Responses the controller sends as a result of
+ * command requests
+ */
+#define RESPONSE_DEACTIVATE	0x00
+#define RESPONSE_INITIALIZE	0x01
+#define RESPONSE_RESOLUTION	0x02
+#define RESPONSE_SETCONFIG	0x03
+#define RESPONSE_SCANFREQ	0x08
+#define RESPONSE_STATUS		0X1e
+
+/*
+ * Notifications are send by the touch controller without
+ * being requested by the driver and include for example
+ * touch indications
+ */
+#define NOTIFICATION_TOUCH		0x04
+#define NOTIFICATION_BOOTCOMPLETE	0x07
+#define NOTIFICATION_OVERRUN		0x25
+#define NOTIFICATION_PROXIMITY		0x26
+#define NOTIFICATION_INVALID_COMMAND	0xfe
+
+#define ZFORCE_REPORT_POINTS		2
+#define ZFORCE_MAX_AREA			0xff
+
+#define STATE_DOWN			0
+#define STATE_MOVE			1
+#define STATE_UP			2
+
+#define SETCONFIG_DUALTOUCH		(1 << 0)
+
+struct zforce_point {
+	int coord_x;
+	int coord_y;
+	int state;
+	int id;
+	int area_major;
+	int area_minor;
+	int orientation;
+	int pressure;
+	int prblty;
+};
+
+/*
+ * @client		the i2c_client
+ * @input		the input device
+ * @suspending		in the process of going to suspend (don't emit wakeup
+ *			events for commands executed to suspend the device)
+ * @suspended		device suspended
+ * @access_mutex	serialize i2c-access, to keep multipart reads together
+ * @command_done	completion to wait for the command result
+ * @command_mutex	serialize commands send to the ic
+ * @command_waiting	the id of the command that that is currently waiting
+ *			for a result
+ * @command_result	returned result of the command
+ */
+struct zforce_ts {
+	struct i2c_client	*client;
+	struct input_dev	*input;
+	const struct zforce_ts_platdata *pdata;
+	char			phys[32];
+
+	bool			suspending;
+	bool			suspended;
+	bool			boot_complete;
+
+	/* Firmware version information */
+	u16			version_major;
+	u16			version_minor;
+	u16			version_build;
+	u16			version_rev;
+
+	struct mutex		access_mutex;
+
+	struct completion	command_done;
+	struct mutex		command_mutex;
+	int			command_waiting;
+	int			command_result;
+};
+
+static int zforce_command(struct zforce_ts *ts, u8 cmd)
+{
+	struct i2c_client *client = ts->client;
+	char buf[3];
+	int ret;
+
+	dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd);
+
+	buf[0] = FRAME_START;
+	buf[1] = 1; /* data size, command only */
+	buf[2] = cmd;
+
+	mutex_lock(&ts->access_mutex);
+	ret = i2c_master_send(client, &buf[0], ARRAY_SIZE(buf));
+	mutex_unlock(&ts->access_mutex);
+	if (ret < 0) {
+		dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
+{
+	struct i2c_client *client = ts->client;
+	int ret;
+
+	ret = mutex_trylock(&ts->command_mutex);
+	if (!ret) {
+		dev_err(&client->dev, "already waiting for a command\n");
+		return -EBUSY;
+	}
+
+	dev_dbg(&client->dev, "sending %d bytes for command 0x%x\n",
+		buf[1], buf[2]);
+
+	ts->command_waiting = buf[2];
+
+	mutex_lock(&ts->access_mutex);
+	ret = i2c_master_send(client, buf, len);
+	mutex_unlock(&ts->access_mutex);
+	if (ret < 0) {
+		dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+		goto unlock;
+	}
+
+	dev_dbg(&client->dev, "waiting for result for command 0x%x\n", buf[2]);
+
+	if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) {
+		ret = -ETIME;
+		goto unlock;
+	}
+
+	ret = ts->command_result;
+
+unlock:
+	mutex_unlock(&ts->command_mutex);
+	return ret;
+}
+
+static int zforce_command_wait(struct zforce_ts *ts, u8 cmd)
+{
+	struct i2c_client *client = ts->client;
+	char buf[3];
+	int ret;
+
+	dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd);
+
+	buf[0] = FRAME_START;
+	buf[1] = 1; /* data size, command only */
+	buf[2] = cmd;
+
+	ret = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+	if (ret < 0) {
+		dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int zforce_resolution(struct zforce_ts *ts, u16 x, u16 y)
+{
+	struct i2c_client *client = ts->client;
+	char buf[7] = { FRAME_START, 5, COMMAND_RESOLUTION,
+			(x & 0xff), ((x >> 8) & 0xff),
+			(y & 0xff), ((y >> 8) & 0xff) };
+
+	dev_dbg(&client->dev, "set resolution to (%d,%d)\n", x, y);
+
+	return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_scan_frequency(struct zforce_ts *ts, u16 idle, u16 finger,
+				 u16 stylus)
+{
+	struct i2c_client *client = ts->client;
+	char buf[9] = { FRAME_START, 7, COMMAND_SCANFREQ,
+			(idle & 0xff), ((idle >> 8) & 0xff),
+			(finger & 0xff), ((finger >> 8) & 0xff),
+			(stylus & 0xff), ((stylus >> 8) & 0xff) };
+
+	dev_dbg(&client->dev, "set scan frequency to (idle: %d, finger: %d, stylus: %d)\n",
+		idle, finger, stylus);
+
+	return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_setconfig(struct zforce_ts *ts, char b1)
+{
+	struct i2c_client *client = ts->client;
+	char buf[7] = { FRAME_START, 5, COMMAND_SETCONFIG,
+			b1, 0, 0, 0 };
+
+	dev_dbg(&client->dev, "set config to (%d)\n", b1);
+
+	return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_start(struct zforce_ts *ts)
+{
+	struct i2c_client *client = ts->client;
+	const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+	int ret;
+
+	dev_dbg(&client->dev, "starting device\n");
+
+	ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
+	if (ret) {
+		dev_err(&client->dev, "Unable to initialize, %d\n", ret);
+		return ret;
+	}
+
+	ret = zforce_resolution(ts, pdata->x_max, pdata->y_max);
+	if (ret) {
+		dev_err(&client->dev, "Unable to set resolution, %d\n", ret);
+		goto error;
+	}
+
+	ret = zforce_scan_frequency(ts, 10, 50, 50);
+	if (ret) {
+		dev_err(&client->dev, "Unable to set scan frequency, %d\n",
+			ret);
+		goto error;
+	}
+
+	if (zforce_setconfig(ts, SETCONFIG_DUALTOUCH)) {
+		dev_err(&client->dev, "Unable to set config\n");
+		goto error;
+	}
+
+	/* start sending touch events */
+	ret = zforce_command(ts, COMMAND_DATAREQUEST);
+	if (ret) {
+		dev_err(&client->dev, "Unable to request data\n");
+		goto error;
+	}
+
+	/*
+	 * Per NN, initial cal. take max. of 200msec.
+	 * Allow time to complete this calibration
+	 */
+	msleep(200);
+
+	return 0;
+
+error:
+	zforce_command_wait(ts, COMMAND_DEACTIVATE);
+	return ret;
+}
+
+static int zforce_stop(struct zforce_ts *ts)
+{
+	struct i2c_client *client = ts->client;
+	int ret;
+
+	dev_dbg(&client->dev, "stopping device\n");
+
+	/* Deactivates touch sensing and puts the device into sleep. */
+	ret = zforce_command_wait(ts, COMMAND_DEACTIVATE);
+	if (ret != 0) {
+		dev_err(&client->dev, "could not deactivate device, %d\n",
+			ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
+{
+	struct i2c_client *client = ts->client;
+	const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+	struct zforce_point point;
+	int count, i, num = 0;
+
+	count = payload[0];
+	if (count > ZFORCE_REPORT_POINTS) {
+		dev_warn(&client->dev, "to many coordinates %d, expected max %d\n",
+			 count, ZFORCE_REPORT_POINTS);
+		count = ZFORCE_REPORT_POINTS;
+	}
+
+	for (i = 0; i < count; i++) {
+		point.coord_x =
+			payload[9 * i + 2] << 8 | payload[9 * i + 1];
+		point.coord_y =
+			payload[9 * i + 4] << 8 | payload[9 * i + 3];
+
+		if (point.coord_x > pdata->x_max ||
+		    point.coord_y > pdata->y_max) {
+			dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
+				point.coord_x, point.coord_y);
+			point.coord_x = point.coord_y = 0;
+		}
+
+		point.state = payload[9 * i + 5] & 0x03;
+		point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+
+		/* determine touch major, minor and orientation */
+		point.area_major = max(payload[9 * i + 6],
+					  payload[9 * i + 7]);
+		point.area_minor = min(payload[9 * i + 6],
+					  payload[9 * i + 7]);
+		point.orientation = payload[9 * i + 6] > payload[9 * i + 7];
+
+		point.pressure = payload[9 * i + 8];
+		point.prblty = payload[9 * i + 9];
+
+		dev_dbg(&client->dev,
+			"point %d/%d: state %d, id %d, pressure %d, prblty %d, x %d, y %d, amajor %d, aminor %d, ori %d\n",
+			i, count, point.state, point.id,
+			point.pressure, point.prblty,
+			point.coord_x, point.coord_y,
+			point.area_major, point.area_minor,
+			point.orientation);
+
+		/* the zforce id starts with "1", so needs to be decreased */
+		input_mt_slot(ts->input, point.id - 1);
+
+		input_mt_report_slot_state(ts->input, MT_TOOL_FINGER,
+						point.state != STATE_UP);
+
+		if (point.state != STATE_UP) {
+			input_report_abs(ts->input, ABS_MT_POSITION_X,
+					 point.coord_x);
+			input_report_abs(ts->input, ABS_MT_POSITION_Y,
+					 point.coord_y);
+			input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+					 point.area_major);
+			input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
+					 point.area_minor);
+			input_report_abs(ts->input, ABS_MT_ORIENTATION,
+					 point.orientation);
+			num++;
+		}
+	}
+
+	input_mt_sync_frame(ts->input);
+
+	input_mt_report_finger_count(ts->input, num);
+
+	input_sync(ts->input);
+
+	return 0;
+}
+
+static int zforce_read_packet(struct zforce_ts *ts, u8 *buf)
+{
+	struct i2c_client *client = ts->client;
+	int ret;
+
+	mutex_lock(&ts->access_mutex);
+
+	/* read 2 byte message header */
+	ret = i2c_master_recv(client, buf, 2);
+	if (ret < 0) {
+		dev_err(&client->dev, "error reading header: %d\n", ret);
+		goto unlock;
+	}
+
+	if (buf[PAYLOAD_HEADER] != FRAME_START) {
+		dev_err(&client->dev, "invalid frame start: %d\n", buf[0]);
+		ret = -EIO;
+		goto unlock;
+	}
+
+	if (buf[PAYLOAD_LENGTH] <= 0 || buf[PAYLOAD_LENGTH] > 255) {
+		dev_err(&client->dev, "invalid payload length: %d\n",
+			buf[PAYLOAD_LENGTH]);
+		ret = -EIO;
+		goto unlock;
+	}
+
+	/* read the message */
+	ret = i2c_master_recv(client, &buf[PAYLOAD_BODY], buf[PAYLOAD_LENGTH]);
+	if (ret < 0) {
+		dev_err(&client->dev, "error reading payload: %d\n", ret);
+		goto unlock;
+	}
+
+	dev_dbg(&client->dev, "read %d bytes for response command 0x%x\n",
+		buf[PAYLOAD_LENGTH], buf[PAYLOAD_BODY]);
+
+unlock:
+	mutex_unlock(&ts->access_mutex);
+	return ret;
+}
+
+static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
+{
+	struct i2c_client *client = ts->client;
+
+	if (ts->command_waiting == cmd) {
+		dev_dbg(&client->dev, "completing command 0x%x\n", cmd);
+		ts->command_result = result;
+		complete(&ts->command_done);
+	} else {
+		dev_dbg(&client->dev, "command %d not for us\n", cmd);
+	}
+}
+
+static irqreturn_t zforce_interrupt(int irq, void *dev_id)
+{
+	struct zforce_ts *ts = dev_id;
+	struct i2c_client *client = ts->client;
+	const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+	int ret;
+	u8 payload_buffer[512];
+	u8 *payload;
+
+	/*
+	 * When suspended, emit a wakeup signal if necessary and return.
+	 * Due to the level-interrupt we will get re-triggered later.
+	 */
+	if (ts->suspended) {
+		if (device_may_wakeup(&client->dev))
+			pm_wakeup_event(&client->dev, 500);
+		msleep(20);
+		return IRQ_HANDLED;
+	}
+
+	dev_dbg(&client->dev, "handling interrupt\n");
+
+	/* Don't emit wakeup events from commands run by zforce_suspend */
+	if (!ts->suspending && device_may_wakeup(&client->dev))
+		pm_stay_awake(&client->dev);
+
+	while (!gpio_get_value(pdata->gpio_int)) {
+		ret = zforce_read_packet(ts, payload_buffer);
+		if (ret < 0) {
+			dev_err(&client->dev, "could not read packet, ret: %d\n",
+				ret);
+			break;
+		}
+
+		payload =  &payload_buffer[PAYLOAD_BODY];
+
+		switch (payload[RESPONSE_ID]) {
+		case NOTIFICATION_TOUCH:
+			/*
+			 * Always report touch-events received while
+			 * suspending, when being a wakeup source
+			 */
+			if (ts->suspending && device_may_wakeup(&client->dev))
+				pm_wakeup_event(&client->dev, 500);
+			zforce_touch_event(ts, &payload[RESPONSE_DATA]);
+			break;
+
+		case NOTIFICATION_BOOTCOMPLETE:
+			ts->boot_complete = payload[RESPONSE_DATA];
+			zforce_complete(ts, payload[RESPONSE_ID], 0);
+			break;
+
+		case RESPONSE_INITIALIZE:
+		case RESPONSE_DEACTIVATE:
+		case RESPONSE_SETCONFIG:
+		case RESPONSE_RESOLUTION:
+		case RESPONSE_SCANFREQ:
+			zforce_complete(ts, payload[RESPONSE_ID],
+					payload[RESPONSE_DATA]);
+			break;
+
+		case RESPONSE_STATUS:
+			/*
+			 * Version Payload Results
+			 * [2:major] [2:minor] [2:build] [2:rev]
+			 */
+			ts->version_major = (payload[RESPONSE_DATA + 1] << 8) |
+						payload[RESPONSE_DATA];
+			ts->version_minor = (payload[RESPONSE_DATA + 3] << 8) |
+						payload[RESPONSE_DATA + 2];
+			ts->version_build = (payload[RESPONSE_DATA + 5] << 8) |
+						payload[RESPONSE_DATA + 4];
+			ts->version_rev   = (payload[RESPONSE_DATA + 7] << 8) |
+						payload[RESPONSE_DATA + 6];
+			dev_dbg(&ts->client->dev, "Firmware Version %04x:%04x %04x:%04x\n",
+				ts->version_major, ts->version_minor,
+				ts->version_build, ts->version_rev);
+
+			zforce_complete(ts, payload[RESPONSE_ID], 0);
+			break;
+
+		case NOTIFICATION_INVALID_COMMAND:
+			dev_err(&ts->client->dev, "invalid command: 0x%x\n",
+				payload[RESPONSE_DATA]);
+			break;
+
+		default:
+			dev_err(&ts->client->dev, "unrecognized response id: 0x%x\n",
+				payload[RESPONSE_ID]);
+			break;
+		}
+	}
+
+	if (!ts->suspending && device_may_wakeup(&client->dev))
+		pm_relax(&client->dev);
+
+	dev_dbg(&client->dev, "finished interrupt\n");
+
+	return IRQ_HANDLED;
+}
+
+static int zforce_input_open(struct input_dev *dev)
+{
+	struct zforce_ts *ts = input_get_drvdata(dev);
+	int ret;
+
+	ret = zforce_start(ts);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void zforce_input_close(struct input_dev *dev)
+{
+	struct zforce_ts *ts = input_get_drvdata(dev);
+	struct i2c_client *client = ts->client;
+	int ret;
+
+	ret = zforce_stop(ts);
+	if (ret)
+		dev_warn(&client->dev, "stopping zforce failed\n");
+
+	return;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int zforce_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct zforce_ts *ts = i2c_get_clientdata(client);
+	struct input_dev *input = ts->input;
+	int ret = 0;
+
+	mutex_lock(&input->mutex);
+	ts->suspending = true;
+
+	/*
+	 * When configured as a wakeup source device should always wake
+	 * the system, therefore start device if necessary.
+	 */
+	if (device_may_wakeup(&client->dev)) {
+		dev_dbg(&client->dev, "suspend while being a wakeup source\n");
+
+		/* Need to start device, if not open, to be a wakeup source. */
+		if (!input->users) {
+			ret = zforce_start(ts);
+			if (ret)
+				goto unlock;
+		}
+
+		enable_irq_wake(client->irq);
+	} else if (input->users) {
+		dev_dbg(&client->dev, "suspend without being a wakeup source\n");
+
+		ret = zforce_stop(ts);
+		if (ret)
+			goto unlock;
+
+		disable_irq(client->irq);
+	}
+
+	ts->suspended = true;
+
+unlock:
+	ts->suspending = false;
+	mutex_unlock(&input->mutex);
+
+	return ret;
+}
+
+static int zforce_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct zforce_ts *ts = i2c_get_clientdata(client);
+	struct input_dev *input = ts->input;
+	int ret = 0;
+
+	mutex_lock(&input->mutex);
+
+	ts->suspended = false;
+
+	if (device_may_wakeup(&client->dev)) {
+		dev_dbg(&client->dev, "resume from being a wakeup source\n");
+
+		disable_irq_wake(client->irq);
+
+		/* need to stop device if it was not open on suspend */
+		if (!input->users) {
+			ret = zforce_stop(ts);
+			if (ret)
+				goto unlock;
+		}
+	} else if (input->users) {
+		dev_dbg(&client->dev, "resume without being a wakeup source\n");
+
+		enable_irq(client->irq);
+
+		ret = zforce_start(ts);
+		if (ret < 0)
+			goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&input->mutex);
+
+	return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume);
+
+static void zforce_reset(void *data)
+{
+	struct zforce_ts *ts = data;
+
+	gpio_set_value(ts->pdata->gpio_rst, 0);
+}
+
+static int zforce_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+	struct zforce_ts *ts;
+	struct input_dev *input_dev;
+	int ret;
+
+	if (!pdata)
+		return -EINVAL;
+
+	ts = devm_kzalloc(&client->dev, sizeof(struct zforce_ts), GFP_KERNEL);
+	if (!ts)
+		return -ENOMEM;
+
+	ret = devm_gpio_request_one(&client->dev, pdata->gpio_int, GPIOF_IN,
+				    "zforce_ts_int");
+	if (ret) {
+		dev_err(&client->dev, "request of gpio %d failed, %d\n",
+			pdata->gpio_int, ret);
+		return ret;
+	}
+
+	ret = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
+				    GPIOF_OUT_INIT_LOW, "zforce_ts_rst");
+	if (ret) {
+		dev_err(&client->dev, "request of gpio %d failed, %d\n",
+			pdata->gpio_rst, ret);
+		return ret;
+	}
+
+	ret = devm_add_action(&client->dev, zforce_reset, ts);
+	if (ret) {
+		dev_err(&client->dev, "failed to register reset action, %d\n",
+			ret);
+		return ret;
+	}
+
+	snprintf(ts->phys, sizeof(ts->phys),
+		 "%s/input0", dev_name(&client->dev));
+
+	input_dev = devm_input_allocate_device(&client->dev);
+	if (!input_dev) {
+		dev_err(&client->dev, "could not allocate input device\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&ts->access_mutex);
+	mutex_init(&ts->command_mutex);
+
+	ts->pdata = pdata;
+	ts->client = client;
+	ts->input = input_dev;
+
+	input_dev->name = "Neonode zForce touchscreen";
+	input_dev->phys = ts->phys;
+	input_dev->id.bustype = BUS_I2C;
+
+	input_dev->open = zforce_input_open;
+	input_dev->close = zforce_input_close;
+
+	__set_bit(EV_KEY, input_dev->evbit);
+	__set_bit(EV_SYN, input_dev->evbit);
+	__set_bit(EV_ABS, input_dev->evbit);
+
+	/* For multi touch */
+	input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+			     pdata->x_max, 0, 0);
+	input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+			     pdata->y_max, 0, 0);
+
+	input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
+			     ZFORCE_MAX_AREA, 0, 0);
+	input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
+			     ZFORCE_MAX_AREA, 0, 0);
+	input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+	input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS, INPUT_MT_DIRECT);
+
+	input_set_drvdata(ts->input, ts);
+
+	init_completion(&ts->command_done);
+
+	/*
+	 * The zforce pulls the interrupt low when it has data ready.
+	 * After it is triggered the isr thread runs until all the available
+	 * packets have been read and the interrupt is high again.
+	 * Therefore we can trigger the interrupt anytime it is low and do
+	 * not need to limit it to the interrupt edge.
+	 */
+	ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+					zforce_interrupt,
+					IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+					input_dev->name, ts);
+	if (ret) {
+		dev_err(&client->dev, "irq %d request failed\n", client->irq);
+		return ret;
+	}
+
+	i2c_set_clientdata(client, ts);
+
+	/* let the controller boot */
+	gpio_set_value(pdata->gpio_rst, 1);
+
+	ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
+	if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
+		dev_warn(&client->dev, "bootcomplete timed out\n");
+
+	/* need to start device to get version information */
+	ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
+	if (ret) {
+		dev_err(&client->dev, "unable to initialize, %d\n", ret);
+		return ret;
+	}
+
+	/* this gets the firmware version among other informations */
+	ret = zforce_command_wait(ts, COMMAND_STATUS);
+	if (ret < 0) {
+		dev_err(&client->dev, "couldn't get status, %d\n", ret);
+		zforce_stop(ts);
+		return ret;
+	}
+
+	/* stop device and put it into sleep until it is opened */
+	ret = zforce_stop(ts);
+	if (ret < 0)
+		return ret;
+
+	device_set_wakeup_capable(&client->dev, true);
+
+	ret = input_register_device(input_dev);
+	if (ret) {
+		dev_err(&client->dev, "could not register input device, %d\n",
+			ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct i2c_device_id zforce_idtable[] = {
+	{ "zforce-ts", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, zforce_idtable);
+
+static struct i2c_driver zforce_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "zforce-ts",
+		.pm	= &zforce_pm_ops,
+	},
+	.probe		= zforce_probe,
+	.id_table	= zforce_idtable,
+};
+
+module_i2c_driver(zforce_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("zForce TouchScreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c880eba..3e7fdbb 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -206,7 +206,7 @@
 config SHMOBILE_IOMMU
 	bool "IOMMU for Renesas IPMMU/IPMMUI"
 	default n
-	depends on (ARM && ARCH_SHMOBILE)
+	depends on ARM
 	select IOMMU_API
 	select ARM_DMA_USE_IOMMU
 	select SHMOBILE_IPMMU
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 14c1f47..5d58bf1 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_API) += iommu-traces.o
 obj-$(CONFIG_OF_IOMMU)	+= of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 181c9ba..1abfb56 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -590,6 +590,9 @@
 		ret = IRQ_HANDLED;
 		resume = RESUME_RETRY;
 	} else {
+		dev_err_ratelimited(smmu->dev,
+		    "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+		    iova, fsynr, root_cfg->cbndx);
 		ret = IRQ_NONE;
 		resume = RESUME_TERMINATE;
 	}
@@ -778,7 +781,7 @@
 #ifdef __BIG_ENDIAN
 	reg |= SCTLR_E;
 #endif
-	writel(reg, cb_base + ARM_SMMU_CB_SCTLR);
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
 }
 
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
@@ -1212,7 +1215,10 @@
 
 		arm_smmu_flush_pgtable(smmu, page_address(table),
 				       ARM_SMMU_PTE_HWTABLE_SIZE);
-		pgtable_page_ctor(table);
+		if (!pgtable_page_ctor(table)) {
+			__free_page(table);
+			return -ENOMEM;
+		}
 		pmd_populate(NULL, pmd, table);
 		arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
 	}
@@ -1559,9 +1565,13 @@
 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 {
 	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-	void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR;
+	void __iomem *cb_base;
 	int i = 0;
-	u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
+	u32 reg;
+
+	/* Clear Global FSR */
+	reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+	writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR);
 
 	/* Mark all SMRn as invalid and all S2CRn as bypass */
 	for (i = 0; i < smmu->num_mapping_groups; ++i) {
@@ -1569,33 +1579,38 @@
 		writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
 	}
 
-	/* Make sure all context banks are disabled */
-	for (i = 0; i < smmu->num_context_banks; ++i)
-		writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i));
+	/* Make sure all context banks are disabled and clear CB_FSR  */
+	for (i = 0; i < smmu->num_context_banks; ++i) {
+		cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
+		writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+		writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
+	}
 
 	/* Invalidate the TLB, just in case */
 	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
 	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
 	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
 
+	reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
+
 	/* Enable fault reporting */
-	scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+	reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
 
 	/* Disable TLB broadcasting. */
-	scr0 |= (sCR0_VMIDPNE | sCR0_PTM);
+	reg |= (sCR0_VMIDPNE | sCR0_PTM);
 
 	/* Enable client access, but bypass when no mapping is found */
-	scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+	reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
 
 	/* Disable forced broadcasting */
-	scr0 &= ~sCR0_FB;
+	reg &= ~sCR0_FB;
 
 	/* Don't upgrade barriers */
-	scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+	reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
 
 	/* Push the button */
 	arm_smmu_tlb_sync(smmu);
-	writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0);
+	writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0);
 }
 
 static int arm_smmu_id_size_to_bits(int size)
@@ -1700,13 +1715,12 @@
 	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
 	smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
 
-	/* Check that we ioremapped enough */
+	/* Check for size mismatch of SMMU address space from mapped region */
 	size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
 	size *= (smmu->pagesize << 1);
-	if (smmu->size < size)
-		dev_warn(smmu->dev,
-			 "device is 0x%lx bytes but only mapped 0x%lx!\n",
-			 size, smmu->size);
+	if (smmu->size != size)
+		dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs "
+			"from mapped region size (0x%lx)!\n", size, smmu->size);
 
 	smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
 				      ID1_NUMS2CB_MASK;
@@ -1781,15 +1795,10 @@
 	smmu->dev = dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "missing base address/size\n");
-		return -ENODEV;
-	}
-
+	smmu->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(smmu->base))
+		return PTR_ERR(smmu->base);
 	smmu->size = resource_size(res);
-	smmu->base = devm_request_and_ioremap(dev, res);
-	if (!smmu->base)
-		return -EADDRNOTAVAIL;
 
 	if (of_property_read_u32(dev->of_node, "#global-interrupts",
 				 &smmu->num_global_irqs)) {
@@ -1804,12 +1813,11 @@
 			smmu->num_context_irqs++;
 	}
 
-	if (num_irqs < smmu->num_global_irqs) {
-		dev_warn(dev, "found %d interrupts but expected at least %d\n",
-			 num_irqs, smmu->num_global_irqs);
-		smmu->num_global_irqs = num_irqs;
+	if (!smmu->num_context_irqs) {
+		dev_err(dev, "found %d interrupts but expected at least %d\n",
+			num_irqs, smmu->num_global_irqs + 1);
+		return -ENODEV;
 	}
-	smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
 
 	smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
 				  GFP_KERNEL);
@@ -1933,7 +1941,7 @@
 		free_irq(smmu->irqs[i], smmu);
 
 	/* Turn the thing off */
-	writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
+	writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
 	return 0;
 }
 
@@ -1981,7 +1989,7 @@
 	return platform_driver_unregister(&arm_smmu_driver);
 }
 
-module_init(arm_smmu_init);
+subsys_initcall(arm_smmu_init);
 module_exit(arm_smmu_exit);
 
 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 785675a..8b452c9 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -88,7 +88,7 @@
 			pr_warn("Device scope bus [%d] not found\n", scope->bus);
 			break;
 		}
-		pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
+		pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
 		if (!pdev) {
 			/* warning will be printed below */
 			break;
@@ -99,7 +99,7 @@
 	}
 	if (!pdev) {
 		pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
-			segment, scope->bus, path->dev, path->fn);
+			segment, scope->bus, path->device, path->function);
 		*dev = NULL;
 		return 0;
 	}
@@ -403,7 +403,7 @@
 
 	dev = pci_physfn(dev);
 
-	list_for_each_entry(dmaru, &dmar_drhd_units, list) {
+	for_each_drhd_unit(dmaru) {
 		drhd = container_of(dmaru->hdr,
 				    struct acpi_dmar_hardware_unit,
 				    header);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 15e9b57..43b9bfe 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -782,7 +782,11 @@
 	int offset;
 
 	BUG_ON(!domain->pgd);
-	BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
+
+	if (addr_width < BITS_PER_LONG && pfn >> addr_width)
+		/* Address beyond IOMMU's addressing capabilities. */
+		return NULL;
+
 	parent = domain->pgd;
 
 	while (level > 0) {
@@ -3777,11 +3781,10 @@
 static void domain_remove_one_dev_info(struct dmar_domain *domain,
 					  struct pci_dev *pdev)
 {
-	struct device_domain_info *info;
+	struct device_domain_info *info, *tmp;
 	struct intel_iommu *iommu;
 	unsigned long flags;
 	int found = 0;
-	struct list_head *entry, *tmp;
 
 	iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
 				pdev->devfn);
@@ -3789,8 +3792,7 @@
 		return;
 
 	spin_lock_irqsave(&device_domain_lock, flags);
-	list_for_each_safe(entry, tmp, &domain->devices) {
-		info = list_entry(entry, struct device_domain_info, link);
+	list_for_each_entry_safe(info, tmp, &domain->devices, link) {
 		if (info->segment == pci_domain_nr(pdev->bus) &&
 		    info->bus == pdev->bus->number &&
 		    info->devfn == pdev->devfn) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f71673d..bab10b1 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -525,12 +525,13 @@
 	if (disable_irq_remap)
 		return 0;
 	if (irq_remap_broken) {
-		WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
-			   "This system BIOS has enabled interrupt remapping\n"
-			   "on a chipset that contains an erratum making that\n"
-			   "feature unstable.  To maintain system stability\n"
-			   "interrupt remapping is being disabled.  Please\n"
-			   "contact your BIOS vendor for an update\n");
+		printk(KERN_WARNING
+			"This system BIOS has enabled interrupt remapping\n"
+			"on a chipset that contains an erratum making that\n"
+			"feature unstable.  To maintain system stability\n"
+			"interrupt remapping is being disabled.  Please\n"
+			"contact your BIOS vendor for an update\n");
+		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 		disable_irq_remap = 1;
 		return 0;
 	}
@@ -686,12 +687,12 @@
 		 * Access PCI directly due to the PCI
 		 * subsystem isn't initialized yet.
 		 */
-		bus = read_pci_config_byte(bus, path->dev, path->fn,
+		bus = read_pci_config_byte(bus, path->device, path->function,
 					   PCI_SECONDARY_BUS);
 		path++;
 	}
 	ir_hpet[ir_hpet_num].bus   = bus;
-	ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
+	ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
 	ir_hpet[ir_hpet_num].iommu = iommu;
 	ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
 	ir_hpet_num++;
@@ -714,13 +715,13 @@
 		 * Access PCI directly due to the PCI
 		 * subsystem isn't initialized yet.
 		 */
-		bus = read_pci_config_byte(bus, path->dev, path->fn,
+		bus = read_pci_config_byte(bus, path->device, path->function,
 					   PCI_SECONDARY_BUS);
 		path++;
 	}
 
 	ir_ioapic[ir_ioapic_num].bus   = bus;
-	ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+	ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
 	ir_ioapic[ir_ioapic_num].iommu = iommu;
 	ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id;
 	ir_ioapic_num++;
diff --git a/drivers/iommu/iommu-traces.c b/drivers/iommu/iommu-traces.c
new file mode 100644
index 0000000..bf3b317
--- /dev/null
+++ b/drivers/iommu/iommu-traces.c
@@ -0,0 +1,27 @@
+/*
+ * iommu trace points
+ *
+ * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/iommu.h>
+
+/* iommu_group_event */
+EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group);
+EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group);
+
+/* iommu_device_event */
+EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain);
+EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain);
+
+/* iommu_map_unmap */
+EXPORT_TRACEPOINT_SYMBOL_GPL(map);
+EXPORT_TRACEPOINT_SYMBOL_GPL(unmap);
+
+/* iommu_error */
+EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index fbe9ca7..e5555fc 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -29,6 +29,7 @@
 #include <linux/idr.h>
 #include <linux/notifier.h>
 #include <linux/err.h>
+#include <trace/events/iommu.h>
 
 static struct kset *iommu_group_kset;
 static struct ida iommu_group_ida;
@@ -363,6 +364,8 @@
 	/* Notify any listeners about change to group. */
 	blocking_notifier_call_chain(&group->notifier,
 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
+
+	trace_add_device_to_group(group->id, dev);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -399,6 +402,8 @@
 	sysfs_remove_link(group->devices_kobj, device->name);
 	sysfs_remove_link(&dev->kobj, "iommu_group");
 
+	trace_remove_device_from_group(group->id, dev);
+
 	kfree(device->name);
 	kfree(device);
 	dev->iommu_group = NULL;
@@ -680,10 +685,14 @@
 
 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
 {
+	int ret;
 	if (unlikely(domain->ops->attach_dev == NULL))
 		return -ENODEV;
 
-	return domain->ops->attach_dev(domain, dev);
+	ret = domain->ops->attach_dev(domain, dev);
+	if (!ret)
+		trace_attach_device_to_domain(dev);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_attach_device);
 
@@ -693,6 +702,7 @@
 		return;
 
 	domain->ops->detach_dev(domain, dev);
+	trace_detach_device_from_domain(dev);
 }
 EXPORT_SYMBOL_GPL(iommu_detach_device);
 
@@ -807,17 +817,17 @@
 	 * size of the smallest page supported by the hardware
 	 */
 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
-		pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
+		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
 		       iova, &paddr, size, min_pagesz);
 		return -EINVAL;
 	}
 
-	pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
+	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
 
 	while (size) {
 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
 
-		pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
+		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
 			 iova, &paddr, pgsize);
 
 		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
@@ -832,6 +842,8 @@
 	/* unroll mapping in case something went wrong */
 	if (ret)
 		iommu_unmap(domain, orig_iova, orig_size - size);
+	else
+		trace_map(iova, paddr, size);
 
 	return ret;
 }
@@ -880,6 +892,7 @@
 		unmapped += unmapped_page;
 	}
 
+	trace_unmap(iova, 0, size);
 	return unmapped;
 }
 EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index f4003d5..b6f9a51 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -95,4 +95,4 @@
 #define iopte_offset(iopgd, da)	(iopgd_page_vaddr(iopgd) + iopte_index(da))
 
 #define to_iommu(dev)							\
-	((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+	(platform_get_drvdata(to_platform_device(dev)))
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 108c0e9..dba1a9f 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -252,7 +252,7 @@
 	spin_lock_irqsave(&gart->pte_lock, flags);
 	pfn = __phys_to_pfn(pa);
 	if (!pfn_valid(pfn)) {
-		dev_err(gart->dev, "Invalid page: %08x\n", pa);
+		dev_err(gart->dev, "Invalid page: %pa\n", &pa);
 		spin_unlock_irqrestore(&gart->pte_lock, flags);
 		return -EINVAL;
 	}
@@ -295,8 +295,8 @@
 
 	pa = (pte & GART_PAGE_MASK);
 	if (!pfn_valid(__phys_to_pfn(pa))) {
-		dev_err(gart->dev, "No entry for %08llx:%08x\n",
-			 (unsigned long long)iova, pa);
+		dev_err(gart->dev, "No entry for %08llx:%pa\n",
+			 (unsigned long long)iova, &pa);
 		gart_dump_table(gart);
 		return -EINVAL;
 	}
@@ -351,7 +351,6 @@
 	struct gart_device *gart;
 	struct resource *res, *res_remap;
 	void __iomem *gart_regs;
-	int err;
 	struct device *dev = &pdev->dev;
 
 	if (gart_handle)
@@ -376,8 +375,7 @@
 	gart_regs = devm_ioremap(dev, res->start, resource_size(res));
 	if (!gart_regs) {
 		dev_err(dev, "failed to remap GART registers\n");
-		err = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
 
 	gart->dev = &pdev->dev;
@@ -391,8 +389,7 @@
 	gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
 	if (!gart->savedata) {
 		dev_err(dev, "failed to allocate context save area\n");
-		err = -ENOMEM;
-		goto fail;
+		return -ENOMEM;
 	}
 
 	platform_set_drvdata(pdev, gart);
@@ -401,32 +398,20 @@
 	gart_handle = gart;
 	bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
 	return 0;
-
-fail:
-	if (gart_regs)
-		devm_iounmap(dev, gart_regs);
-	if (gart && gart->savedata)
-		vfree(gart->savedata);
-	devm_kfree(dev, gart);
-	return err;
 }
 
 static int tegra_gart_remove(struct platform_device *pdev)
 {
 	struct gart_device *gart = platform_get_drvdata(pdev);
-	struct device *dev = gart->dev;
 
 	writel(0, gart->regs + GART_CONFIG);
 	if (gart->savedata)
 		vfree(gart->savedata);
-	if (gart->regs)
-		devm_iounmap(dev, gart->regs);
-	devm_kfree(dev, gart);
 	gart_handle = NULL;
 	return 0;
 }
 
-const struct dev_pm_ops tegra_gart_pm_ops = {
+static const struct dev_pm_ops tegra_gart_pm_ops = {
 	.suspend	= tegra_gart_suspend,
 	.resume		= tegra_gart_resume,
 };
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e066560..605b5b4 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -731,7 +731,7 @@
 	unsigned long pfn = __phys_to_pfn(pa);
 	unsigned long flags;
 
-	dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
+	dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa);
 
 	if (!pfn_valid(pfn))
 		return -ENOMEM;
@@ -1254,7 +1254,7 @@
 	return 0;
 }
 
-const struct dev_pm_ops tegra_smmu_pm_ops = {
+static const struct dev_pm_ops tegra_smmu_pm_ops = {
 	.suspend	= tegra_smmu_suspend,
 	.resume		= tegra_smmu_resume,
 };
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d0e9480..9031171 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -253,10 +253,9 @@
 	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
 		return -EINVAL;
 
+	raw_spin_lock(&irq_controller_lock);
 	mask = 0xff << shift;
 	bit = gic_cpu_map[cpu] << shift;
-
-	raw_spin_lock(&irq_controller_lock);
 	val = readl_relaxed(reg) & ~mask;
 	writel_relaxed(val | bit, reg);
 	raw_spin_unlock(&irq_controller_lock);
@@ -652,7 +651,9 @@
 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
 	int cpu;
-	unsigned long map = 0;
+	unsigned long flags, map = 0;
+
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
 
 	/* Convert our logical CPU mask into a physical one. */
 	for_each_cpu(cpu, mask)
@@ -666,9 +667,151 @@
 
 	/* this always happens on GIC0 */
 	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 #endif
 
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+	BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+	cpu_id = 1 << cpu_id;
+	/* this always happens on GIC0 */
+	writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+	unsigned int cpu_bit;
+
+	if (cpu >= NR_GIC_CPU_IF)
+		return -1;
+	cpu_bit = gic_cpu_map[cpu];
+	if (cpu_bit & (cpu_bit - 1))
+		return -1;
+	return __ffs(cpu_bit);
+}
+
+/*
+ * gic_migrate_target - migrate IRQs to another CPU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
+ * is also updated.  Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+	unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
+	void __iomem *dist_base;
+	int i, ror_val, cpu = smp_processor_id();
+	u32 val, cur_target_mask, active_mask;
+
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	if (!dist_base)
+		return;
+	gic_irqs = gic_data[gic_nr].gic_irqs;
+
+	cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+	cur_target_mask = 0x01010101 << cur_cpu_id;
+	ror_val = (cur_cpu_id - new_cpu_id) & 31;
+
+	raw_spin_lock(&irq_controller_lock);
+
+	/* Update the target interface for this logical CPU */
+	gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+	/*
+	 * Find all the peripheral interrupts targetting the current
+	 * CPU interface and migrate them to the new CPU interface.
+	 * We skip DIST_TARGET 0 to 7 as they are read-only.
+	 */
+	for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+		val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+		active_mask = val & cur_target_mask;
+		if (active_mask) {
+			val &= ~active_mask;
+			val |= ror32(active_mask, ror_val);
+			writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
+		}
+	}
+
+	raw_spin_unlock(&irq_controller_lock);
+
+	/*
+	 * Now let's migrate and clear any potential SGIs that might be
+	 * pending for us (cur_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
+	 * is a banked register, we can only forward the SGI using
+	 * GIC_DIST_SOFTINT.  The original SGI source is lost but Linux
+	 * doesn't use that information anyway.
+	 *
+	 * For the same reason we do not adjust SGI source information
+	 * for previously sent SGIs by us to other CPUs either.
+	 */
+	for (i = 0; i < 16; i += 4) {
+		int j;
+		val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+		if (!val)
+			continue;
+		writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+		for (j = i; j < i + 4; j++) {
+			if (val & 0xff)
+				writel_relaxed((1 << (new_cpu_id + 16)) | j,
+						dist_base + GIC_DIST_SOFTINT);
+			val >>= 8;
+		}
+	}
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * REturn the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+	if (!gic_dist_physaddr)
+		return 0;
+	return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+void __init gic_init_physaddr(struct device_node *node)
+{
+	struct resource res;
+	if (of_address_to_resource(node, 0, &res) == 0) {
+		gic_dist_physaddr = res.start;
+		pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+	}
+}
+
+#else
+#define gic_init_physaddr(node)  do { } while (0)
+#endif
+
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
 				irq_hw_number_t hw)
 {
@@ -850,6 +993,8 @@
 		percpu_offset = 0;
 
 	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+	if (!gic_cnt)
+		gic_init_physaddr(node);
 
 	if (parent) {
 		irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 52377b4..a2e0ed6 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -481,7 +481,7 @@
 int diva_os_register_irq(void *context, byte irq, const char *name)
 {
 	int result = request_irq(irq, diva_os_irq_wrapper,
-				 IRQF_DISABLED | IRQF_SHARED, name, context);
+				 IRQF_SHARED, name, context);
 	return (result);
 }
 
diff --git a/drivers/isdn/hardware/eicon/um_idi.c b/drivers/isdn/hardware/eicon/um_idi.c
index 7cab5c3..e151971 100644
--- a/drivers/isdn/hardware/eicon/um_idi.c
+++ b/drivers/isdn/hardware/eicon/um_idi.c
@@ -288,9 +288,9 @@
 	cleanup_entity(e);
 	diva_os_free(0, e->os_context);
 	memset(e, 0x00, sizeof(*e));
-	diva_os_free(0, e);
 
 	DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
+	diva_os_free(0, e);
 
 	return (0);
 }
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index e74df7c..53d487f 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1580,8 +1580,7 @@
 	}
 	if (!(card2 = icn_initcard(port, id2))) {
 		printk(KERN_INFO
-		       "icn: (%s) half ICN-4B, port 0x%x added\n",
-		       card2->interface.id, port);
+		       "icn: (%s) half ICN-4B, port 0x%x added\n", id2, port);
 		return 0;
 	}
 	card->doubleS0 = 1;
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index ca997bd..92acc81 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -336,7 +336,7 @@
 		 */
 		sc_adapter[cinst]->interrupt = irq[b];
 		if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
-				IRQF_DISABLED, interface->id,
+				0, interface->id,
 				(void *)(unsigned long) cinst))
 		{
 			kfree(sc_adapter[cinst]->channel);
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index b3256ff..d0a1d8a 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -229,7 +229,7 @@
  * make a hypercall.  We hand the physical address of the virtqueue so the Host
  * knows which virtqueue we're talking about.
  */
-static void lg_notify(struct virtqueue *vq)
+static bool lg_notify(struct virtqueue *vq)
 {
 	/*
 	 * We store our virtqueue information in the "priv" pointer of the
@@ -238,6 +238,7 @@
 	struct lguest_vq_info *lvq = vq->priv;
 
 	hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
+	return true;
 }
 
 /* An extern declaration inside a C file is bad form.  Don't do it. */
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 5169239..922a1ac 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -157,7 +157,7 @@
 	 * stack, then the address of this call.  This stack layout happens to
 	 * exactly match the stack layout created by an interrupt...
 	 */
-	asm volatile("pushf; lcall *lguest_entry"
+	asm volatile("pushf; lcall *%4"
 		     /*
 		      * This is how we tell GCC that %eax ("a") and %ebx ("b")
 		      * are changed by this routine.  The "=" means output.
@@ -169,7 +169,9 @@
 		      * physical address of the Guest's top-level page
 		      * directory.
 		      */
-		     : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir))
+		     : "0"(pages), 
+		       "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)),
+		       "m"(lguest_entry)
 		     /*
 		      * We tell gcc that all these registers could change,
 		      * which means we don't have to save and restore them in
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 30b426e..f2ccbc3 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -297,6 +297,17 @@
          Allow volume managers to mirror logical volumes, also
          needed for live data migration tools such as 'pvmove'.
 
+config DM_LOG_USERSPACE
+	tristate "Mirror userspace logging"
+	depends on DM_MIRROR && NET
+	select CONNECTOR
+	---help---
+	  The userspace logging module provides a mechanism for
+	  relaying the dm-dirty-log API to userspace.  Log designs
+	  which are more suited to userspace implementation (e.g.
+	  shared storage logs) or experimental logs can be implemented
+	  by leveraging this framework.
+
 config DM_RAID
        tristate "RAID 1/4/5/6/10 target"
        depends on BLK_DEV_DM
@@ -323,17 +334,6 @@
 	 RAID-5, RAID-6 distributes the syndromes across the drives
 	 in one of the available parity distribution methods.
 
-config DM_LOG_USERSPACE
-	tristate "Mirror userspace logging"
-	depends on DM_MIRROR && NET
-	select CONNECTOR
-	---help---
-	  The userspace logging module provides a mechanism for
-	  relaying the dm-dirty-log API to userspace.  Log designs
-	  which are more suited to userspace implementation (e.g.
-	  shared storage logs) or experimental logs can be implemented
-	  by leveraging this framework.
-
 config DM_ZERO
 	tristate "Zero target"
 	depends on BLK_DEV_DM
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index f950c9d..2638417 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -13,15 +13,8 @@
 	---help---
 	Don't select this option unless you're a developer
 
-	Enables extra debugging tools (primarily a fuzz tester)
-
-config BCACHE_EDEBUG
-	bool "Extended runtime checks"
-	depends on BCACHE
-	---help---
-	Don't select this option unless you're a developer
-
-	Enables extra runtime checks which significantly affect performance
+	Enables extra debugging tools, allows expensive runtime checks to be
+	turned on.
 
 config BCACHE_CLOSURES_DEBUG
 	bool "Debug closures"
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index e45f557..2b46bf1 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,13 +63,12 @@
 #include "bcache.h"
 #include "btree.h"
 
+#include <linux/blkdev.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/random.h>
 #include <trace/events/bcache.h>
 
-#define MAX_IN_FLIGHT_DISCARDS		8U
-
 /* Bucket heap / gen */
 
 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
@@ -121,75 +120,6 @@
 	mutex_unlock(&c->bucket_lock);
 }
 
-/* Discard/TRIM */
-
-struct discard {
-	struct list_head	list;
-	struct work_struct	work;
-	struct cache		*ca;
-	long			bucket;
-
-	struct bio		bio;
-	struct bio_vec		bv;
-};
-
-static void discard_finish(struct work_struct *w)
-{
-	struct discard *d = container_of(w, struct discard, work);
-	struct cache *ca = d->ca;
-	char buf[BDEVNAME_SIZE];
-
-	if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
-		pr_notice("discard error on %s, disabling",
-			 bdevname(ca->bdev, buf));
-		d->ca->discard = 0;
-	}
-
-	mutex_lock(&ca->set->bucket_lock);
-
-	fifo_push(&ca->free, d->bucket);
-	list_add(&d->list, &ca->discards);
-	atomic_dec(&ca->discards_in_flight);
-
-	mutex_unlock(&ca->set->bucket_lock);
-
-	closure_wake_up(&ca->set->bucket_wait);
-	wake_up_process(ca->alloc_thread);
-
-	closure_put(&ca->set->cl);
-}
-
-static void discard_endio(struct bio *bio, int error)
-{
-	struct discard *d = container_of(bio, struct discard, bio);
-	schedule_work(&d->work);
-}
-
-static void do_discard(struct cache *ca, long bucket)
-{
-	struct discard *d = list_first_entry(&ca->discards,
-					     struct discard, list);
-
-	list_del(&d->list);
-	d->bucket = bucket;
-
-	atomic_inc(&ca->discards_in_flight);
-	closure_get(&ca->set->cl);
-
-	bio_init(&d->bio);
-
-	d->bio.bi_sector	= bucket_to_sector(ca->set, d->bucket);
-	d->bio.bi_bdev		= ca->bdev;
-	d->bio.bi_rw		= REQ_WRITE|REQ_DISCARD;
-	d->bio.bi_max_vecs	= 1;
-	d->bio.bi_io_vec	= d->bio.bi_inline_vecs;
-	d->bio.bi_size		= bucket_bytes(ca);
-	d->bio.bi_end_io	= discard_endio;
-	bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
-
-	submit_bio(0, &d->bio);
-}
-
 /* Allocation */
 
 static inline bool can_inc_bucket_gen(struct bucket *b)
@@ -280,7 +210,7 @@
 			 * multiple times when it can't do anything
 			 */
 			ca->invalidate_needs_gc = 1;
-			bch_queue_gc(ca->set);
+			wake_up_gc(ca->set);
 			return;
 		}
 
@@ -305,7 +235,7 @@
 
 		if (++checked >= ca->sb.nbuckets) {
 			ca->invalidate_needs_gc = 1;
-			bch_queue_gc(ca->set);
+			wake_up_gc(ca->set);
 			return;
 		}
 	}
@@ -330,7 +260,7 @@
 
 		if (++checked >= ca->sb.nbuckets / 2) {
 			ca->invalidate_needs_gc = 1;
-			bch_queue_gc(ca->set);
+			wake_up_gc(ca->set);
 			return;
 		}
 	}
@@ -398,16 +328,18 @@
 			else
 				break;
 
-			allocator_wait(ca, (int) fifo_free(&ca->free) >
-				       atomic_read(&ca->discards_in_flight));
-
 			if (ca->discard) {
-				allocator_wait(ca, !list_empty(&ca->discards));
-				do_discard(ca, bucket);
-			} else {
-				fifo_push(&ca->free, bucket);
-				closure_wake_up(&ca->set->bucket_wait);
+				mutex_unlock(&ca->set->bucket_lock);
+				blkdev_issue_discard(ca->bdev,
+					bucket_to_sector(ca->set, bucket),
+					ca->sb.block_size, GFP_KERNEL, 0);
+				mutex_lock(&ca->set->bucket_lock);
 			}
+
+			allocator_wait(ca, !fifo_full(&ca->free));
+
+			fifo_push(&ca->free, bucket);
+			wake_up(&ca->set->bucket_wait);
 		}
 
 		/*
@@ -433,16 +365,40 @@
 	}
 }
 
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
+long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
 {
-	long r = -1;
-again:
+	DEFINE_WAIT(w);
+	struct bucket *b;
+	long r;
+
+	/* fastpath */
+	if (fifo_used(&ca->free) > ca->watermark[watermark]) {
+		fifo_pop(&ca->free, r);
+		goto out;
+	}
+
+	if (!wait)
+		return -1;
+
+	while (1) {
+		if (fifo_used(&ca->free) > ca->watermark[watermark]) {
+			fifo_pop(&ca->free, r);
+			break;
+		}
+
+		prepare_to_wait(&ca->set->bucket_wait, &w,
+				TASK_UNINTERRUPTIBLE);
+
+		mutex_unlock(&ca->set->bucket_lock);
+		schedule();
+		mutex_lock(&ca->set->bucket_lock);
+	}
+
+	finish_wait(&ca->set->bucket_wait, &w);
+out:
 	wake_up_process(ca->alloc_thread);
 
-	if (fifo_used(&ca->free) > ca->watermark[watermark] &&
-	    fifo_pop(&ca->free, r)) {
-		struct bucket *b = ca->buckets + r;
-#ifdef CONFIG_BCACHE_EDEBUG
+	if (expensive_debug_checks(ca->set)) {
 		size_t iter;
 		long i;
 
@@ -455,36 +411,23 @@
 			BUG_ON(i == r);
 		fifo_for_each(i, &ca->unused, iter)
 			BUG_ON(i == r);
-#endif
-		BUG_ON(atomic_read(&b->pin) != 1);
-
-		SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
-
-		if (watermark <= WATERMARK_METADATA) {
-			SET_GC_MARK(b, GC_MARK_METADATA);
-			b->prio = BTREE_PRIO;
-		} else {
-			SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
-			b->prio = INITIAL_PRIO;
-		}
-
-		return r;
 	}
 
-	trace_bcache_alloc_fail(ca);
+	b = ca->buckets + r;
 
-	if (cl) {
-		closure_wait(&ca->set->bucket_wait, cl);
+	BUG_ON(atomic_read(&b->pin) != 1);
 
-		if (closure_blocking(cl)) {
-			mutex_unlock(&ca->set->bucket_lock);
-			closure_sync(cl);
-			mutex_lock(&ca->set->bucket_lock);
-			goto again;
-		}
+	SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
+
+	if (watermark <= WATERMARK_METADATA) {
+		SET_GC_MARK(b, GC_MARK_METADATA);
+		b->prio = BTREE_PRIO;
+	} else {
+		SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+		b->prio = INITIAL_PRIO;
 	}
 
-	return -1;
+	return r;
 }
 
 void bch_bucket_free(struct cache_set *c, struct bkey *k)
@@ -501,7 +444,7 @@
 }
 
 int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
-			   struct bkey *k, int n, struct closure *cl)
+			   struct bkey *k, int n, bool wait)
 {
 	int i;
 
@@ -514,7 +457,7 @@
 
 	for (i = 0; i < n; i++) {
 		struct cache *ca = c->cache_by_alloc[i];
-		long b = bch_bucket_alloc(ca, watermark, cl);
+		long b = bch_bucket_alloc(ca, watermark, wait);
 
 		if (b == -1)
 			goto err;
@@ -529,22 +472,202 @@
 	return 0;
 err:
 	bch_bucket_free(c, k);
-	__bkey_put(c, k);
+	bkey_put(c, k);
 	return -1;
 }
 
 int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
-			 struct bkey *k, int n, struct closure *cl)
+			 struct bkey *k, int n, bool wait)
 {
 	int ret;
 	mutex_lock(&c->bucket_lock);
-	ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
+	ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
 	mutex_unlock(&c->bucket_lock);
 	return ret;
 }
 
+/* Sector allocator */
+
+struct open_bucket {
+	struct list_head	list;
+	unsigned		last_write_point;
+	unsigned		sectors_free;
+	BKEY_PADDED(key);
+};
+
+/*
+ * We keep multiple buckets open for writes, and try to segregate different
+ * write streams for better cache utilization: first we look for a bucket where
+ * the last write to it was sequential with the current write, and failing that
+ * we look for a bucket that was last used by the same task.
+ *
+ * The ideas is if you've got multiple tasks pulling data into the cache at the
+ * same time, you'll get better cache utilization if you try to segregate their
+ * data and preserve locality.
+ *
+ * For example, say you've starting Firefox at the same time you're copying a
+ * bunch of files. Firefox will likely end up being fairly hot and stay in the
+ * cache awhile, but the data you copied might not be; if you wrote all that
+ * data to the same buckets it'd get invalidated at the same time.
+ *
+ * Both of those tasks will be doing fairly random IO so we can't rely on
+ * detecting sequential IO to segregate their data, but going off of the task
+ * should be a sane heuristic.
+ */
+static struct open_bucket *pick_data_bucket(struct cache_set *c,
+					    const struct bkey *search,
+					    unsigned write_point,
+					    struct bkey *alloc)
+{
+	struct open_bucket *ret, *ret_task = NULL;
+
+	list_for_each_entry_reverse(ret, &c->data_buckets, list)
+		if (!bkey_cmp(&ret->key, search))
+			goto found;
+		else if (ret->last_write_point == write_point)
+			ret_task = ret;
+
+	ret = ret_task ?: list_first_entry(&c->data_buckets,
+					   struct open_bucket, list);
+found:
+	if (!ret->sectors_free && KEY_PTRS(alloc)) {
+		ret->sectors_free = c->sb.bucket_size;
+		bkey_copy(&ret->key, alloc);
+		bkey_init(alloc);
+	}
+
+	if (!ret->sectors_free)
+		ret = NULL;
+
+	return ret;
+}
+
+/*
+ * Allocates some space in the cache to write to, and k to point to the newly
+ * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
+ * end of the newly allocated space).
+ *
+ * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
+ * sectors were actually allocated.
+ *
+ * If s->writeback is true, will not fail.
+ */
+bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
+		       unsigned write_point, unsigned write_prio, bool wait)
+{
+	struct open_bucket *b;
+	BKEY_PADDED(key) alloc;
+	unsigned i;
+
+	/*
+	 * We might have to allocate a new bucket, which we can't do with a
+	 * spinlock held. So if we have to allocate, we drop the lock, allocate
+	 * and then retry. KEY_PTRS() indicates whether alloc points to
+	 * allocated bucket(s).
+	 */
+
+	bkey_init(&alloc.key);
+	spin_lock(&c->data_bucket_lock);
+
+	while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
+		unsigned watermark = write_prio
+			? WATERMARK_MOVINGGC
+			: WATERMARK_NONE;
+
+		spin_unlock(&c->data_bucket_lock);
+
+		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
+			return false;
+
+		spin_lock(&c->data_bucket_lock);
+	}
+
+	/*
+	 * If we had to allocate, we might race and not need to allocate the
+	 * second time we call find_data_bucket(). If we allocated a bucket but
+	 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
+	 */
+	if (KEY_PTRS(&alloc.key))
+		bkey_put(c, &alloc.key);
+
+	for (i = 0; i < KEY_PTRS(&b->key); i++)
+		EBUG_ON(ptr_stale(c, &b->key, i));
+
+	/* Set up the pointer to the space we're allocating: */
+
+	for (i = 0; i < KEY_PTRS(&b->key); i++)
+		k->ptr[i] = b->key.ptr[i];
+
+	sectors = min(sectors, b->sectors_free);
+
+	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
+	SET_KEY_SIZE(k, sectors);
+	SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+
+	/*
+	 * Move b to the end of the lru, and keep track of what this bucket was
+	 * last used for:
+	 */
+	list_move_tail(&b->list, &c->data_buckets);
+	bkey_copy_key(&b->key, k);
+	b->last_write_point = write_point;
+
+	b->sectors_free	-= sectors;
+
+	for (i = 0; i < KEY_PTRS(&b->key); i++) {
+		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+
+		atomic_long_add(sectors,
+				&PTR_CACHE(c, &b->key, i)->sectors_written);
+	}
+
+	if (b->sectors_free < c->sb.block_size)
+		b->sectors_free = 0;
+
+	/*
+	 * k takes refcounts on the buckets it points to until it's inserted
+	 * into the btree, but if we're done with this bucket we just transfer
+	 * get_data_bucket()'s refcount.
+	 */
+	if (b->sectors_free)
+		for (i = 0; i < KEY_PTRS(&b->key); i++)
+			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
+
+	spin_unlock(&c->data_bucket_lock);
+	return true;
+}
+
 /* Init */
 
+void bch_open_buckets_free(struct cache_set *c)
+{
+	struct open_bucket *b;
+
+	while (!list_empty(&c->data_buckets)) {
+		b = list_first_entry(&c->data_buckets,
+				     struct open_bucket, list);
+		list_del(&b->list);
+		kfree(b);
+	}
+}
+
+int bch_open_buckets_alloc(struct cache_set *c)
+{
+	int i;
+
+	spin_lock_init(&c->data_bucket_lock);
+
+	for (i = 0; i < 6; i++) {
+		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+		if (!b)
+			return -ENOMEM;
+
+		list_add(&b->list, &c->data_buckets);
+	}
+
+	return 0;
+}
+
 int bch_cache_allocator_start(struct cache *ca)
 {
 	struct task_struct *k = kthread_run(bch_allocator_thread,
@@ -556,22 +679,8 @@
 	return 0;
 }
 
-void bch_cache_allocator_exit(struct cache *ca)
-{
-	struct discard *d;
-
-	while (!list_empty(&ca->discards)) {
-		d = list_first_entry(&ca->discards, struct discard, list);
-		cancel_work_sync(&d->work);
-		list_del(&d->list);
-		kfree(d);
-	}
-}
-
 int bch_cache_allocator_init(struct cache *ca)
 {
-	unsigned i;
-
 	/*
 	 * Reserve:
 	 * Prio/gen writes first
@@ -589,15 +698,5 @@
 	ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
 		ca->watermark[WATERMARK_MOVINGGC];
 
-	for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
-		struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
-		if (!d)
-			return -ENOMEM;
-
-		d->ca = ca;
-		INIT_WORK(&d->work, discard_finish);
-		list_add(&d->list, &ca->discards);
-	}
-
 	return 0;
 }
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0f12382..4beb55a 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -177,6 +177,7 @@
 
 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
 
+#include <linux/bcache.h>
 #include <linux/bio.h>
 #include <linux/kobject.h>
 #include <linux/list.h>
@@ -210,168 +211,6 @@
 #define GC_MARK_METADATA	2
 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
 
-struct bkey {
-	uint64_t	high;
-	uint64_t	low;
-	uint64_t	ptr[];
-};
-
-/* Enough for a key with 6 pointers */
-#define BKEY_PAD		8
-
-#define BKEY_PADDED(key)					\
-	union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
-
-/* Version 0: Cache device
- * Version 1: Backing device
- * Version 2: Seed pointer into btree node checksum
- * Version 3: Cache device with new UUID format
- * Version 4: Backing device with data offset
- */
-#define BCACHE_SB_VERSION_CDEV			0
-#define BCACHE_SB_VERSION_BDEV			1
-#define BCACHE_SB_VERSION_CDEV_WITH_UUID	3
-#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET	4
-#define BCACHE_SB_MAX_VERSION			4
-
-#define SB_SECTOR		8
-#define SB_SIZE			4096
-#define SB_LABEL_SIZE		32
-#define SB_JOURNAL_BUCKETS	256U
-/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
-#define MAX_CACHES_PER_SET	8
-
-#define BDEV_DATA_START_DEFAULT	16	/* sectors */
-
-struct cache_sb {
-	uint64_t		csum;
-	uint64_t		offset;	/* sector where this sb was written */
-	uint64_t		version;
-
-	uint8_t			magic[16];
-
-	uint8_t			uuid[16];
-	union {
-		uint8_t		set_uuid[16];
-		uint64_t	set_magic;
-	};
-	uint8_t			label[SB_LABEL_SIZE];
-
-	uint64_t		flags;
-	uint64_t		seq;
-	uint64_t		pad[8];
-
-	union {
-	struct {
-		/* Cache devices */
-		uint64_t	nbuckets;	/* device size */
-
-		uint16_t	block_size;	/* sectors */
-		uint16_t	bucket_size;	/* sectors */
-
-		uint16_t	nr_in_set;
-		uint16_t	nr_this_dev;
-	};
-	struct {
-		/* Backing devices */
-		uint64_t	data_offset;
-
-		/*
-		 * block_size from the cache device section is still used by
-		 * backing devices, so don't add anything here until we fix
-		 * things to not need it for backing devices anymore
-		 */
-	};
-	};
-
-	uint32_t		last_mount;	/* time_t */
-
-	uint16_t		first_bucket;
-	union {
-		uint16_t	njournal_buckets;
-		uint16_t	keys;
-	};
-	uint64_t		d[SB_JOURNAL_BUCKETS];	/* journal buckets */
-};
-
-BITMASK(CACHE_SYNC,		struct cache_sb, flags, 0, 1);
-BITMASK(CACHE_DISCARD,		struct cache_sb, flags, 1, 1);
-BITMASK(CACHE_REPLACEMENT,	struct cache_sb, flags, 2, 3);
-#define CACHE_REPLACEMENT_LRU	0U
-#define CACHE_REPLACEMENT_FIFO	1U
-#define CACHE_REPLACEMENT_RANDOM 2U
-
-BITMASK(BDEV_CACHE_MODE,	struct cache_sb, flags, 0, 4);
-#define CACHE_MODE_WRITETHROUGH	0U
-#define CACHE_MODE_WRITEBACK	1U
-#define CACHE_MODE_WRITEAROUND	2U
-#define CACHE_MODE_NONE		3U
-BITMASK(BDEV_STATE,		struct cache_sb, flags, 61, 2);
-#define BDEV_STATE_NONE		0U
-#define BDEV_STATE_CLEAN	1U
-#define BDEV_STATE_DIRTY	2U
-#define BDEV_STATE_STALE	3U
-
-/* Version 1: Seed pointer into btree node checksum
- */
-#define BCACHE_BSET_VERSION	1
-
-/*
- * This is the on disk format for btree nodes - a btree node on disk is a list
- * of these; within each set the keys are sorted
- */
-struct bset {
-	uint64_t		csum;
-	uint64_t		magic;
-	uint64_t		seq;
-	uint32_t		version;
-	uint32_t		keys;
-
-	union {
-		struct bkey	start[0];
-		uint64_t	d[0];
-	};
-};
-
-/*
- * On disk format for priorities and gens - see super.c near prio_write() for
- * more.
- */
-struct prio_set {
-	uint64_t		csum;
-	uint64_t		magic;
-	uint64_t		seq;
-	uint32_t		version;
-	uint32_t		pad;
-
-	uint64_t		next_bucket;
-
-	struct bucket_disk {
-		uint16_t	prio;
-		uint8_t		gen;
-	} __attribute((packed)) data[];
-};
-
-struct uuid_entry {
-	union {
-		struct {
-			uint8_t		uuid[16];
-			uint8_t		label[32];
-			uint32_t	first_reg;
-			uint32_t	last_reg;
-			uint32_t	invalidated;
-
-			uint32_t	flags;
-			/* Size of flash only volumes */
-			uint64_t	sectors;
-		};
-
-		uint8_t	pad[128];
-	};
-};
-
-BITMASK(UUID_FLASH_ONLY,	struct uuid_entry, flags, 0, 1);
-
 #include "journal.h"
 #include "stats.h"
 struct search;
@@ -384,8 +223,6 @@
 	void			*private;
 };
 
-typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
-
 struct keybuf {
 	struct bkey		last_scanned;
 	spinlock_t		lock;
@@ -400,7 +237,7 @@
 
 	struct rb_root		keys;
 
-#define KEYBUF_NR		100
+#define KEYBUF_NR		500
 	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
 };
 
@@ -429,16 +266,15 @@
 
 	struct gendisk		*disk;
 
-	/* If nonzero, we're closing */
-	atomic_t		closing;
+	unsigned long		flags;
+#define BCACHE_DEV_CLOSING	0
+#define BCACHE_DEV_DETACHING	1
+#define BCACHE_DEV_UNLINK_DONE	2
 
-	/* If nonzero, we're detaching/unregistering from cache set */
-	atomic_t		detaching;
-	int			flush_done;
-
-	uint64_t		nr_stripes;
-	unsigned		stripe_size_bits;
+	unsigned		nr_stripes;
+	unsigned		stripe_size;
 	atomic_t		*stripe_sectors_dirty;
+	unsigned long		*full_dirty_stripes;
 
 	unsigned long		sectors_dirty_last;
 	long			sectors_dirty_derivative;
@@ -509,7 +345,7 @@
 
 	/* Limit number of writeback bios in flight */
 	struct semaphore	in_flight;
-	struct closure_with_timer writeback;
+	struct task_struct	*writeback_thread;
 
 	struct keybuf		writeback_keys;
 
@@ -527,8 +363,8 @@
 	unsigned		sequential_cutoff;
 	unsigned		readahead;
 
-	unsigned		sequential_merge:1;
 	unsigned		verify:1;
+	unsigned		bypass_torture_test:1;
 
 	unsigned		partial_stripes_expensive:1;
 	unsigned		writeback_metadata:1;
@@ -620,15 +456,6 @@
 
 	bool			discard; /* Get rid of? */
 
-	/*
-	 * We preallocate structs for issuing discards to buckets, and keep them
-	 * on this list when they're not in use; do_discard() issues discards
-	 * whenever there's work to do and is called by free_some_buckets() and
-	 * when a discard finishes.
-	 */
-	atomic_t		discards_in_flight;
-	struct list_head	discards;
-
 	struct journal_device	journal;
 
 	/* The rest of this all shows up in sysfs */
@@ -649,7 +476,6 @@
 
 	size_t			nkeys;
 	uint64_t		data;	/* sectors */
-	uint64_t		dirty;	/* sectors */
 	unsigned		in_use; /* percent */
 };
 
@@ -744,8 +570,8 @@
 	 * basically a lock for this that we can wait on asynchronously. The
 	 * btree_root() macro releases the lock when it returns.
 	 */
-	struct closure		*try_harder;
-	struct closure_waitlist	try_wait;
+	struct task_struct	*try_harder;
+	wait_queue_head_t	try_wait;
 	uint64_t		try_harder_start;
 
 	/*
@@ -759,7 +585,7 @@
 	 * written.
 	 */
 	atomic_t		prio_blocked;
-	struct closure_waitlist	bucket_wait;
+	wait_queue_head_t	bucket_wait;
 
 	/*
 	 * For any bio we don't skip we subtract the number of sectors from
@@ -782,7 +608,7 @@
 	struct gc_stat		gc_stats;
 	size_t			nbuckets;
 
-	struct closure_with_waitlist gc;
+	struct task_struct	*gc_thread;
 	/* Where in the btree gc currently is */
 	struct bkey		gc_done;
 
@@ -795,11 +621,10 @@
 	/* Counts how many sectors bio_insert has added to the cache */
 	atomic_t		sectors_to_gc;
 
-	struct closure		moving_gc;
-	struct closure_waitlist	moving_gc_wait;
+	wait_queue_head_t	moving_gc_wait;
 	struct keybuf		moving_gc_keys;
 	/* Number of moving GC bios in flight */
-	atomic_t		in_flight;
+	struct semaphore	moving_in_flight;
 
 	struct btree		*root;
 
@@ -841,22 +666,27 @@
 	unsigned		congested_read_threshold_us;
 	unsigned		congested_write_threshold_us;
 
-	spinlock_t		sort_time_lock;
 	struct time_stats	sort_time;
 	struct time_stats	btree_gc_time;
 	struct time_stats	btree_split_time;
-	spinlock_t		btree_read_time_lock;
 	struct time_stats	btree_read_time;
 	struct time_stats	try_harder_time;
 
 	atomic_long_t		cache_read_races;
 	atomic_long_t		writeback_keys_done;
 	atomic_long_t		writeback_keys_failed;
+
+	enum			{
+		ON_ERROR_UNREGISTER,
+		ON_ERROR_PANIC,
+	}			on_error;
 	unsigned		error_limit;
 	unsigned		error_decay;
+
 	unsigned short		journal_delay_ms;
 	unsigned		verify:1;
 	unsigned		key_merging_disabled:1;
+	unsigned		expensive_debug_checks:1;
 	unsigned		gc_always_rewrite:1;
 	unsigned		shrinker_disabled:1;
 	unsigned		copy_gc_enabled:1;
@@ -865,21 +695,6 @@
 	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
 };
 
-static inline bool key_merging_disabled(struct cache_set *c)
-{
-#ifdef CONFIG_BCACHE_DEBUG
-	return c->key_merging_disabled;
-#else
-	return 0;
-#endif
-}
-
-static inline bool SB_IS_BDEV(const struct cache_sb *sb)
-{
-	return sb->version == BCACHE_SB_VERSION_BDEV
-		|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
-}
-
 struct bbio {
 	unsigned		submit_time_us;
 	union {
@@ -933,59 +748,6 @@
 #define prio_buckets(c)					\
 	DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
 
-#define JSET_MAGIC		0x245235c1a3625032ULL
-#define PSET_MAGIC		0x6750e15f87337f91ULL
-#define BSET_MAGIC		0x90135c78b99e07f5ULL
-
-#define jset_magic(c)		((c)->sb.set_magic ^ JSET_MAGIC)
-#define pset_magic(c)		((c)->sb.set_magic ^ PSET_MAGIC)
-#define bset_magic(c)		((c)->sb.set_magic ^ BSET_MAGIC)
-
-/* Bkey fields: all units are in sectors */
-
-#define KEY_FIELD(name, field, offset, size)				\
-	BITMASK(name, struct bkey, field, offset, size)
-
-#define PTR_FIELD(name, offset, size)					\
-	static inline uint64_t name(const struct bkey *k, unsigned i)	\
-	{ return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); }	\
-									\
-	static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
-	{								\
-		k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset);	\
-		k->ptr[i] |= v << offset;				\
-	}
-
-KEY_FIELD(KEY_PTRS,	high, 60, 3)
-KEY_FIELD(HEADER_SIZE,	high, 58, 2)
-KEY_FIELD(KEY_CSUM,	high, 56, 2)
-KEY_FIELD(KEY_PINNED,	high, 55, 1)
-KEY_FIELD(KEY_DIRTY,	high, 36, 1)
-
-KEY_FIELD(KEY_SIZE,	high, 20, 16)
-KEY_FIELD(KEY_INODE,	high, 0,  20)
-
-/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
-
-static inline uint64_t KEY_OFFSET(const struct bkey *k)
-{
-	return k->low;
-}
-
-static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
-{
-	k->low = v;
-}
-
-PTR_FIELD(PTR_DEV,		51, 12)
-PTR_FIELD(PTR_OFFSET,		8,  43)
-PTR_FIELD(PTR_GEN,		0,  8)
-
-#define PTR_CHECK_DEV		((1 << 12) - 1)
-
-#define PTR(gen, offset, dev)						\
-	((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
-
 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
 {
 	return s >> c->bucket_bits;
@@ -1024,27 +786,11 @@
 
 /* Btree key macros */
 
-/*
- * The high bit being set is a relic from when we used it to do binary
- * searches - it told you where a key started. It's not used anymore,
- * and can probably be safely dropped.
- */
-#define KEY(dev, sector, len)						\
-((struct bkey) {							\
-	.high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev),	\
-	.low = (sector)							\
-})
-
 static inline void bkey_init(struct bkey *k)
 {
-	*k = KEY(0, 0, 0);
+	*k = ZERO_KEY;
 }
 
-#define KEY_START(k)		(KEY_OFFSET(k) - KEY_SIZE(k))
-#define START_KEY(k)		KEY(KEY_INODE(k), KEY_START(k), 0)
-#define MAX_KEY			KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
-#define ZERO_KEY		KEY(0, 0, 0)
-
 /*
  * This is used for various on disk data structures - cache_sb, prio_set, bset,
  * jset: The checksum is _always_ the first 8 bytes of these structs
@@ -1094,14 +840,6 @@
 	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
 	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
 
-static inline void __bkey_put(struct cache_set *c, struct bkey *k)
-{
-	unsigned i;
-
-	for (i = 0; i < KEY_PTRS(k); i++)
-		atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
-}
-
 static inline void cached_dev_put(struct cached_dev *dc)
 {
 	if (atomic_dec_and_test(&dc->count))
@@ -1173,13 +911,15 @@
 void bch_rescale_priorities(struct cache_set *, int);
 bool bch_bucket_add_unused(struct cache *, struct bucket *);
 
-long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
+long bch_bucket_alloc(struct cache *, unsigned, bool);
 void bch_bucket_free(struct cache_set *, struct bkey *);
 
 int __bch_bucket_alloc_set(struct cache_set *, unsigned,
-			   struct bkey *, int, struct closure *);
+			   struct bkey *, int, bool);
 int bch_bucket_alloc_set(struct cache_set *, unsigned,
-			 struct bkey *, int, struct closure *);
+			 struct bkey *, int, bool);
+bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
+		       unsigned, unsigned, bool);
 
 __printf(2, 3)
 bool bch_cache_set_error(struct cache_set *, const char *, ...);
@@ -1187,7 +927,7 @@
 void bch_prio_write(struct cache *);
 void bch_write_bdev_super(struct cached_dev *, struct closure *);
 
-extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
+extern struct workqueue_struct *bcache_wq;
 extern const char * const bch_cache_modes[];
 extern struct mutex bch_register_lock;
 extern struct list_head bch_cache_sets;
@@ -1220,15 +960,14 @@
 void bch_btree_cache_free(struct cache_set *);
 int bch_btree_cache_alloc(struct cache_set *);
 void bch_moving_init_cache_set(struct cache_set *);
+int bch_open_buckets_alloc(struct cache_set *);
+void bch_open_buckets_free(struct cache_set *);
 
 int bch_cache_allocator_start(struct cache *ca);
-void bch_cache_allocator_exit(struct cache *ca);
 int bch_cache_allocator_init(struct cache *ca);
 
 void bch_debug_exit(void);
 int bch_debug_init(struct kobject *);
-void bch_writeback_exit(void);
-int bch_writeback_init(void);
 void bch_request_exit(void);
 int bch_request_init(void);
 void bch_btree_exit(void);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 22d1ae7..7d388b8 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -14,22 +14,12 @@
 
 /* Keylists */
 
-void bch_keylist_copy(struct keylist *dest, struct keylist *src)
-{
-	*dest = *src;
-
-	if (src->list == src->d) {
-		size_t n = (uint64_t *) src->top - src->d;
-		dest->top = (struct bkey *) &dest->d[n];
-		dest->list = dest->d;
-	}
-}
-
 int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
 {
-	unsigned oldsize = (uint64_t *) l->top - l->list;
-	unsigned newsize = oldsize + 2 + nptrs;
-	uint64_t *new;
+	size_t oldsize = bch_keylist_nkeys(l);
+	size_t newsize = oldsize + 2 + nptrs;
+	uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
+	uint64_t *new_keys;
 
 	/* The journalling code doesn't handle the case where the keys to insert
 	 * is bigger than an empty write: If we just return -ENOMEM here,
@@ -45,24 +35,23 @@
 	    roundup_pow_of_two(oldsize) == newsize)
 		return 0;
 
-	new = krealloc(l->list == l->d ? NULL : l->list,
-		       sizeof(uint64_t) * newsize, GFP_NOIO);
+	new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
 
-	if (!new)
+	if (!new_keys)
 		return -ENOMEM;
 
-	if (l->list == l->d)
-		memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
+	if (!old_keys)
+		memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
 
-	l->list = new;
-	l->top = (struct bkey *) (&l->list[oldsize]);
+	l->keys_p = new_keys;
+	l->top_p = new_keys + oldsize;
 
 	return 0;
 }
 
 struct bkey *bch_keylist_pop(struct keylist *l)
 {
-	struct bkey *k = l->bottom;
+	struct bkey *k = l->keys;
 
 	if (k == l->top)
 		return NULL;
@@ -73,21 +62,20 @@
 	return l->top = k;
 }
 
+void bch_keylist_pop_front(struct keylist *l)
+{
+	l->top_p -= bkey_u64s(l->keys);
+
+	memmove(l->keys,
+		bkey_next(l->keys),
+		bch_keylist_bytes(l));
+}
+
 /* Pointer validation */
 
-bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
 {
 	unsigned i;
-	char buf[80];
-
-	if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
-		goto bad;
-
-	if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
-		goto bad;
-
-	if (!KEY_SIZE(k))
-		return true;
 
 	for (i = 0; i < KEY_PTRS(k); i++)
 		if (ptr_available(c, k, i)) {
@@ -98,13 +86,83 @@
 			if (KEY_SIZE(k) + r > c->sb.bucket_size ||
 			    bucket <  ca->sb.first_bucket ||
 			    bucket >= ca->sb.nbuckets)
-				goto bad;
+				return true;
 		}
 
 	return false;
+}
+
+bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+	char buf[80];
+
+	if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+		goto bad;
+
+	if (__ptr_invalid(c, k))
+		goto bad;
+
+	return false;
 bad:
 	bch_bkey_to_text(buf, sizeof(buf), k);
-	cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k));
+	cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+	return true;
+}
+
+bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+	char buf[80];
+
+	if (!KEY_SIZE(k))
+		return true;
+
+	if (KEY_SIZE(k) > KEY_OFFSET(k))
+		goto bad;
+
+	if (__ptr_invalid(c, k))
+		goto bad;
+
+	return false;
+bad:
+	bch_bkey_to_text(buf, sizeof(buf), k);
+	cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
+	return true;
+}
+
+static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
+				     unsigned ptr)
+{
+	struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+	char buf[80];
+
+	if (mutex_trylock(&b->c->bucket_lock)) {
+		if (b->level) {
+			if (KEY_DIRTY(k) ||
+			    g->prio != BTREE_PRIO ||
+			    (b->c->gc_mark_valid &&
+			     GC_MARK(g) != GC_MARK_METADATA))
+				goto err;
+
+		} else {
+			if (g->prio == BTREE_PRIO)
+				goto err;
+
+			if (KEY_DIRTY(k) &&
+			    b->c->gc_mark_valid &&
+			    GC_MARK(g) != GC_MARK_DIRTY)
+				goto err;
+		}
+		mutex_unlock(&b->c->bucket_lock);
+	}
+
+	return false;
+err:
+	mutex_unlock(&b->c->bucket_lock);
+	bch_bkey_to_text(buf, sizeof(buf), k);
+	btree_bug(b,
+"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+		  buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+		  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
 	return true;
 }
 
@@ -118,64 +176,29 @@
 	    bch_ptr_invalid(b, k))
 		return true;
 
-	if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV)
-		return true;
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		if (!ptr_available(b->c, k, i))
+			return true;
 
-	for (i = 0; i < KEY_PTRS(k); i++)
-		if (ptr_available(b->c, k, i)) {
-			g = PTR_BUCKET(b->c, k, i);
-			stale = ptr_stale(b->c, k, i);
+		g = PTR_BUCKET(b->c, k, i);
+		stale = ptr_stale(b->c, k, i);
 
-			btree_bug_on(stale > 96, b,
-				     "key too stale: %i, need_gc %u",
-				     stale, b->c->need_gc);
+		btree_bug_on(stale > 96, b,
+			     "key too stale: %i, need_gc %u",
+			     stale, b->c->need_gc);
 
-			btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
-				     b, "stale dirty pointer");
+		btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+			     b, "stale dirty pointer");
 
-			if (stale)
-				return true;
+		if (stale)
+			return true;
 
-#ifdef CONFIG_BCACHE_EDEBUG
-			if (!mutex_trylock(&b->c->bucket_lock))
-				continue;
-
-			if (b->level) {
-				if (KEY_DIRTY(k) ||
-				    g->prio != BTREE_PRIO ||
-				    (b->c->gc_mark_valid &&
-				     GC_MARK(g) != GC_MARK_METADATA))
-					goto bug;
-
-			} else {
-				if (g->prio == BTREE_PRIO)
-					goto bug;
-
-				if (KEY_DIRTY(k) &&
-				    b->c->gc_mark_valid &&
-				    GC_MARK(g) != GC_MARK_DIRTY)
-					goto bug;
-			}
-			mutex_unlock(&b->c->bucket_lock);
-#endif
-		}
+		if (expensive_debug_checks(b->c) &&
+		    ptr_bad_expensive_checks(b, k, i))
+			return true;
+	}
 
 	return false;
-#ifdef CONFIG_BCACHE_EDEBUG
-bug:
-	mutex_unlock(&b->c->bucket_lock);
-
-	{
-		char buf[80];
-
-		bch_bkey_to_text(buf, sizeof(buf), k);
-		btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
-			  buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
-			  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
-	}
-	return true;
-#endif
 }
 
 /* Key/pointer manipulation */
@@ -458,16 +481,8 @@
 
 static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
 {
-#ifdef CONFIG_X86_64
-	asm("shrd %[shift],%[high],%[low]"
-	    : [low] "+Rm" (low)
-	    : [high] "R" (high),
-	    [shift] "ci" (shift)
-	    : "cc");
-#else
 	low >>= shift;
 	low  |= (high << 1) << (63U - shift);
-#endif
 	return low;
 }
 
@@ -686,7 +701,7 @@
 	} else
 		get_random_bytes(&i->seq, sizeof(uint64_t));
 
-	i->magic	= bset_magic(b->c);
+	i->magic	= bset_magic(&b->c->sb);
 	i->version	= 0;
 	i->keys		= 0;
 
@@ -824,16 +839,16 @@
 	} else
 		i = bset_search_write_set(b, t, search);
 
-#ifdef CONFIG_BCACHE_EDEBUG
-	BUG_ON(bset_written(b, t) &&
-	       i.l != t->data->start &&
-	       bkey_cmp(tree_to_prev_bkey(t,
-		  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
-			search) > 0);
+	if (expensive_debug_checks(b->c)) {
+		BUG_ON(bset_written(b, t) &&
+		       i.l != t->data->start &&
+		       bkey_cmp(tree_to_prev_bkey(t,
+			  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
+				search) > 0);
 
-	BUG_ON(i.r != end(t->data) &&
-	       bkey_cmp(i.r, search) <= 0);
-#endif
+		BUG_ON(i.r != end(t->data) &&
+		       bkey_cmp(i.r, search) <= 0);
+	}
 
 	while (likely(i.l != i.r) &&
 	       bkey_cmp(i.l, search) <= 0)
@@ -844,6 +859,13 @@
 
 /* Btree iterator */
 
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
 static inline bool btree_iter_cmp(struct btree_iter_set l,
 				  struct btree_iter_set r)
 {
@@ -867,12 +889,16 @@
 }
 
 struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
-			       struct bkey *search, struct bset_tree *start)
+				   struct bkey *search, struct bset_tree *start)
 {
 	struct bkey *ret = NULL;
 	iter->size = ARRAY_SIZE(iter->data);
 	iter->used = 0;
 
+#ifdef CONFIG_BCACHE_DEBUG
+	iter->b = b;
+#endif
+
 	for (; start <= &b->sets[b->nsets]; start++) {
 		ret = bch_bset_search(b, start, search);
 		bch_btree_iter_push(iter, ret, end(start->data));
@@ -887,6 +913,8 @@
 	struct bkey *ret = NULL;
 
 	if (!btree_iter_end(iter)) {
+		bch_btree_iter_next_check(iter);
+
 		ret = iter->data->k;
 		iter->data->k = bkey_next(iter->data->k);
 
@@ -916,14 +944,6 @@
 	return ret;
 }
 
-struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
-{
-	struct btree_iter iter;
-
-	bch_btree_iter_init(b, &iter, search);
-	return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
-}
-
 /* Mergesort */
 
 static void sort_key_next(struct btree_iter *iter,
@@ -998,7 +1018,6 @@
 	out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
 
 	pr_debug("sorted %i keys", out->keys);
-	bch_check_key_order(b, out);
 }
 
 static void __btree_sort(struct btree *b, struct btree_iter *iter,
@@ -1029,7 +1048,7 @@
 		 * memcpy()
 		 */
 
-		out->magic	= bset_magic(b->c);
+		out->magic	= bset_magic(&b->c->sb);
 		out->seq	= b->sets[0].data->seq;
 		out->version	= b->sets[0].data->version;
 		swap(out, b->sets[0].data);
@@ -1050,24 +1069,21 @@
 	if (b->written)
 		bset_build_written_tree(b);
 
-	if (!start) {
-		spin_lock(&b->c->sort_time_lock);
+	if (!start)
 		bch_time_stats_update(&b->c->sort_time, start_time);
-		spin_unlock(&b->c->sort_time_lock);
-	}
 }
 
 void bch_btree_sort_partial(struct btree *b, unsigned start)
 {
-	size_t oldsize = 0, order = b->page_order, keys = 0;
+	size_t order = b->page_order, keys = 0;
 	struct btree_iter iter;
+	int oldsize = bch_count_data(b);
+
 	__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
 
 	BUG_ON(b->sets[b->nsets].data == write_block(b) &&
 	       (b->sets[b->nsets].size || b->nsets));
 
-	if (b->written)
-		oldsize = bch_count_data(b);
 
 	if (start) {
 		unsigned i;
@@ -1083,7 +1099,7 @@
 
 	__btree_sort(b, &iter, start, order, false);
 
-	EBUG_ON(b->written && bch_count_data(b) != oldsize);
+	EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
 }
 
 void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
@@ -1101,9 +1117,7 @@
 
 	btree_mergesort(b, new->sets->data, &iter, false, true);
 
-	spin_lock(&b->c->sort_time_lock);
 	bch_time_stats_update(&b->c->sort_time, start_time);
-	spin_unlock(&b->c->sort_time_lock);
 
 	bkey_copy_key(&new->key, &b->key);
 	new->sets->size = 0;
@@ -1148,16 +1162,16 @@
 /* Sysfs stuff */
 
 struct bset_stats {
+	struct btree_op op;
 	size_t nodes;
 	size_t sets_written, sets_unwritten;
 	size_t bytes_written, bytes_unwritten;
 	size_t floats, failed;
 };
 
-static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
-			    struct bset_stats *stats)
+static int btree_bset_stats(struct btree_op *op, struct btree *b)
 {
-	struct bkey *k;
+	struct bset_stats *stats = container_of(op, struct bset_stats, op);
 	unsigned i;
 
 	stats->nodes++;
@@ -1182,30 +1196,19 @@
 		}
 	}
 
-	if (b->level) {
-		struct btree_iter iter;
-
-		for_each_key_filter(b, k, &iter, bch_ptr_bad) {
-			int ret = btree(bset_stats, k, b, op, stats);
-			if (ret)
-				return ret;
-		}
-	}
-
-	return 0;
+	return MAP_CONTINUE;
 }
 
 int bch_bset_print_stats(struct cache_set *c, char *buf)
 {
-	struct btree_op op;
 	struct bset_stats t;
 	int ret;
 
-	bch_btree_op_init_stack(&op);
 	memset(&t, 0, sizeof(struct bset_stats));
+	bch_btree_op_init(&t.op, -1);
 
-	ret = btree_root(bset_stats, c, &op, &t);
-	if (ret)
+	ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
+	if (ret < 0)
 		return ret;
 
 	return snprintf(buf, PAGE_SIZE,
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index ae115a2..1d3c24f 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -148,6 +148,9 @@
 
 struct btree_iter {
 	size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+	struct btree *b;
+#endif
 	struct btree_iter_set {
 		struct bkey *k, *end;
 	} data[MAX_BSETS];
@@ -193,54 +196,26 @@
 		: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
 }
 
-static inline size_t bkey_u64s(const struct bkey *k)
-{
-	BUG_ON(KEY_CSUM(k) > 1);
-	return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
-}
-
-static inline size_t bkey_bytes(const struct bkey *k)
-{
-	return bkey_u64s(k) * sizeof(uint64_t);
-}
-
-static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
-{
-	memcpy(dest, src, bkey_bytes(src));
-}
-
-static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
-{
-	if (!src)
-		src = &KEY(0, 0, 0);
-
-	SET_KEY_INODE(dest, KEY_INODE(src));
-	SET_KEY_OFFSET(dest, KEY_OFFSET(src));
-}
-
-static inline struct bkey *bkey_next(const struct bkey *k)
-{
-	uint64_t *d = (void *) k;
-	return (struct bkey *) (d + bkey_u64s(k));
-}
-
 /* Keylists */
 
 struct keylist {
-	struct bkey		*top;
 	union {
-		uint64_t		*list;
-		struct bkey		*bottom;
+		struct bkey		*keys;
+		uint64_t		*keys_p;
+	};
+	union {
+		struct bkey		*top;
+		uint64_t		*top_p;
 	};
 
 	/* Enough room for btree_split's keys without realloc */
 #define KEYLIST_INLINE		16
-	uint64_t		d[KEYLIST_INLINE];
+	uint64_t		inline_keys[KEYLIST_INLINE];
 };
 
 static inline void bch_keylist_init(struct keylist *l)
 {
-	l->top = (void *) (l->list = l->d);
+	l->top_p = l->keys_p = l->inline_keys;
 }
 
 static inline void bch_keylist_push(struct keylist *l)
@@ -256,17 +231,32 @@
 
 static inline bool bch_keylist_empty(struct keylist *l)
 {
-	return l->top == (void *) l->list;
+	return l->top == l->keys;
+}
+
+static inline void bch_keylist_reset(struct keylist *l)
+{
+	l->top = l->keys;
 }
 
 static inline void bch_keylist_free(struct keylist *l)
 {
-	if (l->list != l->d)
-		kfree(l->list);
+	if (l->keys_p != l->inline_keys)
+		kfree(l->keys_p);
 }
 
-void bch_keylist_copy(struct keylist *, struct keylist *);
+static inline size_t bch_keylist_nkeys(struct keylist *l)
+{
+	return l->top_p - l->keys_p;
+}
+
+static inline size_t bch_keylist_bytes(struct keylist *l)
+{
+	return bch_keylist_nkeys(l) * sizeof(uint64_t);
+}
+
 struct bkey *bch_keylist_pop(struct keylist *);
+void bch_keylist_pop_front(struct keylist *);
 int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
 
 void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
@@ -287,7 +277,9 @@
 }
 
 const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *);
+bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
+
 bool bch_ptr_bad(struct btree *, const struct bkey *);
 
 static inline uint8_t gen_after(uint8_t a, uint8_t b)
@@ -311,7 +303,6 @@
 
 typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
 
-struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
 struct bkey *bch_btree_iter_next(struct btree_iter *);
 struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
 					struct btree *, ptr_filter_fn);
@@ -361,12 +352,30 @@
 struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
 			   const struct bkey *);
 
+/*
+ * Returns the first key that is strictly greater than search
+ */
 static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
 					   const struct bkey *search)
 {
 	return search ? __bch_bset_search(b, t, search) : t->data->start;
 }
 
+#define PRECEDING_KEY(_k)					\
+({								\
+	struct bkey *_ret = NULL;				\
+								\
+	if (KEY_INODE(_k) || KEY_OFFSET(_k)) {			\
+		_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);	\
+								\
+		if (!_ret->low)					\
+			_ret->high--;				\
+		_ret->low--;					\
+	}							\
+								\
+	_ret;							\
+})
+
 bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
 void bch_btree_sort_lazy(struct btree *);
 void bch_btree_sort_into(struct btree *, struct btree *);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f42fc7e..5e2765a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -23,12 +23,13 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
-#include "request.h"
 #include "writeback.h"
 
 #include <linux/slab.h>
 #include <linux/bitops.h>
+#include <linux/freezer.h>
 #include <linux/hash.h>
+#include <linux/kthread.h>
 #include <linux/prefetch.h>
 #include <linux/random.h>
 #include <linux/rcupdate.h>
@@ -88,15 +89,13 @@
  * Test module load/unload
  */
 
-static const char * const op_types[] = {
-	"insert", "replace"
+enum {
+	BTREE_INSERT_STATUS_INSERT,
+	BTREE_INSERT_STATUS_BACK_MERGE,
+	BTREE_INSERT_STATUS_OVERWROTE,
+	BTREE_INSERT_STATUS_FRONT_MERGE,
 };
 
-static const char *op_type(struct btree_op *op)
-{
-	return op_types[op->type];
-}
-
 #define MAX_NEED_GC		64
 #define MAX_SAVE_PRIO		72
 
@@ -105,23 +104,89 @@
 #define PTR_HASH(c, k)							\
 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
 
-struct workqueue_struct *bch_gc_wq;
 static struct workqueue_struct *btree_io_wq;
 
-void bch_btree_op_init_stack(struct btree_op *op)
+static inline bool should_split(struct btree *b)
 {
-	memset(op, 0, sizeof(struct btree_op));
-	closure_init_stack(&op->cl);
-	op->lock = -1;
-	bch_keylist_init(&op->keys);
+	struct bset *i = write_block(b);
+	return b->written >= btree_blocks(b) ||
+		(b->written + __set_blocks(i, i->keys + 15, b->c)
+		 > btree_blocks(b));
 }
 
+#define insert_lock(s, b)	((b)->level <= (s)->lock)
+
+/*
+ * These macros are for recursing down the btree - they handle the details of
+ * locking and looking up nodes in the cache for you. They're best treated as
+ * mere syntax when reading code that uses them.
+ *
+ * op->lock determines whether we take a read or a write lock at a given depth.
+ * If you've got a read lock and find that you need a write lock (i.e. you're
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
+ * call you again and you'll have the correct lock.
+ */
+
+/**
+ * btree - recurse down the btree on a specified key
+ * @fn:		function to call, which will be passed the child node
+ * @key:	key to recurse on
+ * @b:		parent btree node
+ * @op:		pointer to struct btree_op
+ */
+#define btree(fn, key, b, op, ...)					\
+({									\
+	int _r, l = (b)->level - 1;					\
+	bool _w = l <= (op)->lock;					\
+	struct btree *_child = bch_btree_node_get((b)->c, key, l, _w);	\
+	if (!IS_ERR(_child)) {						\
+		_child->parent = (b);					\
+		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\
+		rw_unlock(_w, _child);					\
+	} else								\
+		_r = PTR_ERR(_child);					\
+	_r;								\
+})
+
+/**
+ * btree_root - call a function on the root of the btree
+ * @fn:		function to call, which will be passed the child node
+ * @c:		cache set
+ * @op:		pointer to struct btree_op
+ */
+#define btree_root(fn, c, op, ...)					\
+({									\
+	int _r = -EINTR;						\
+	do {								\
+		struct btree *_b = (c)->root;				\
+		bool _w = insert_lock(op, _b);				\
+		rw_lock(_w, _b, _b->level);				\
+		if (_b == (c)->root &&					\
+		    _w == insert_lock(op, _b)) {			\
+			_b->parent = NULL;				\
+			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
+		}							\
+		rw_unlock(_w, _b);					\
+		bch_cannibalize_unlock(c);				\
+		if (_r == -ENOSPC) {					\
+			wait_event((c)->try_wait,			\
+				   !(c)->try_harder);			\
+			_r = -EINTR;					\
+		}							\
+	} while (_r == -EINTR);						\
+									\
+	_r;								\
+})
+
 /* Btree key manipulation */
 
-static void bkey_put(struct cache_set *c, struct bkey *k, int level)
+void bkey_put(struct cache_set *c, struct bkey *k)
 {
-	if ((level && KEY_OFFSET(k)) || !level)
-		__bkey_put(c, k);
+	unsigned i;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		if (ptr_available(c, k, i))
+			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
 }
 
 /* Btree IO */
@@ -145,6 +210,10 @@
 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
 	iter->used = 0;
 
+#ifdef CONFIG_BCACHE_DEBUG
+	iter->b = b;
+#endif
+
 	if (!i->seq)
 		goto err;
 
@@ -160,7 +229,7 @@
 			goto err;
 
 		err = "bad magic";
-		if (i->magic != bset_magic(b->c))
+		if (i->magic != bset_magic(&b->c->sb))
 			goto err;
 
 		err = "bad checksum";
@@ -248,10 +317,7 @@
 		goto err;
 
 	bch_btree_node_read_done(b);
-
-	spin_lock(&b->c->btree_read_time_lock);
 	bch_time_stats_update(&b->c->btree_read_time, start_time);
-	spin_unlock(&b->c->btree_read_time_lock);
 
 	return;
 err:
@@ -327,7 +393,7 @@
 	b->bio = bch_bbio_alloc(b->c);
 
 	b->bio->bi_end_io	= btree_node_write_endio;
-	b->bio->bi_private	= &b->io.cl;
+	b->bio->bi_private	= cl;
 	b->bio->bi_rw		= REQ_META|WRITE_SYNC|REQ_FUA;
 	b->bio->bi_size		= set_blocks(i, b->c) * block_bytes(b->c);
 	bch_bio_map(b->bio, i);
@@ -383,7 +449,7 @@
 	BUG_ON(b->written >= btree_blocks(b));
 	BUG_ON(b->written && !i->keys);
 	BUG_ON(b->sets->data->seq != i->seq);
-	bch_check_key_order(b, i);
+	bch_check_keys(b, "writing");
 
 	cancel_delayed_work(&b->work);
 
@@ -405,6 +471,15 @@
 		bch_bset_init_next(b);
 }
 
+static void bch_btree_node_write_sync(struct btree *b)
+{
+	struct closure cl;
+
+	closure_init_stack(&cl);
+	bch_btree_node_write(b, &cl);
+	closure_sync(&cl);
+}
+
 static void btree_node_write_work(struct work_struct *w)
 {
 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
@@ -416,7 +491,7 @@
 	rw_unlock(true, b);
 }
 
-static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
+static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
 {
 	struct bset *i = b->sets[b->nsets].data;
 	struct btree_write *w = btree_current_write(b);
@@ -429,15 +504,15 @@
 
 	set_btree_node_dirty(b);
 
-	if (op && op->journal) {
+	if (journal_ref) {
 		if (w->journal &&
-		    journal_pin_cmp(b->c, w, op)) {
+		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
 			atomic_dec_bug(w->journal);
 			w->journal = NULL;
 		}
 
 		if (!w->journal) {
-			w->journal = op->journal;
+			w->journal = journal_ref;
 			atomic_inc(w->journal);
 		}
 	}
@@ -566,33 +641,32 @@
 	return b;
 }
 
-static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
+static int mca_reap(struct btree *b, unsigned min_order, bool flush)
 {
+	struct closure cl;
+
+	closure_init_stack(&cl);
 	lockdep_assert_held(&b->c->bucket_lock);
 
 	if (!down_write_trylock(&b->lock))
 		return -ENOMEM;
 
-	if (b->page_order < min_order) {
+	BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+
+	if (b->page_order < min_order ||
+	    (!flush &&
+	     (btree_node_dirty(b) ||
+	      atomic_read(&b->io.cl.remaining) != -1))) {
 		rw_unlock(true, b);
 		return -ENOMEM;
 	}
 
-	BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+	if (btree_node_dirty(b))
+		bch_btree_node_write_sync(b);
 
-	if (cl && btree_node_dirty(b))
-		bch_btree_node_write(b, NULL);
-
-	if (cl)
-		closure_wait_event_async(&b->io.wait, cl,
-			 atomic_read(&b->io.cl.remaining) == -1);
-
-	if (btree_node_dirty(b) ||
-	    !closure_is_unlocked(&b->io.cl) ||
-	    work_pending(&b->work.work)) {
-		rw_unlock(true, b);
-		return -EAGAIN;
-	}
+	/* wait for any in flight btree write */
+	closure_wait_event(&b->io.wait, &cl,
+			   atomic_read(&b->io.cl.remaining) == -1);
 
 	return 0;
 }
@@ -633,7 +707,7 @@
 			break;
 
 		if (++i > 3 &&
-		    !mca_reap(b, NULL, 0)) {
+		    !mca_reap(b, 0, false)) {
 			mca_data_free(b);
 			rw_unlock(true, b);
 			freed++;
@@ -652,7 +726,7 @@
 		list_rotate_left(&c->btree_cache);
 
 		if (!b->accessed &&
-		    !mca_reap(b, NULL, 0)) {
+		    !mca_reap(b, 0, false)) {
 			mca_bucket_free(b);
 			mca_data_free(b);
 			rw_unlock(true, b);
@@ -723,12 +797,9 @@
 {
 	unsigned i;
 
-	/* XXX: doesn't check for errors */
-
-	closure_init_unlocked(&c->gc);
-
 	for (i = 0; i < mca_reserve(c); i++)
-		mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
+			return -ENOMEM;
 
 	list_splice_init(&c->btree_cache,
 			 &c->btree_cache_freeable);
@@ -775,52 +846,27 @@
 	return b;
 }
 
-static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
-				     int level, struct closure *cl)
+static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
 {
-	int ret = -ENOMEM;
-	struct btree *i;
+	struct btree *b;
 
 	trace_bcache_btree_cache_cannibalize(c);
 
-	if (!cl)
-		return ERR_PTR(-ENOMEM);
+	if (!c->try_harder) {
+		c->try_harder = current;
+		c->try_harder_start = local_clock();
+	} else if (c->try_harder != current)
+		return ERR_PTR(-ENOSPC);
 
-	/*
-	 * Trying to free up some memory - i.e. reuse some btree nodes - may
-	 * require initiating IO to flush the dirty part of the node. If we're
-	 * running under generic_make_request(), that IO will never finish and
-	 * we would deadlock. Returning -EAGAIN causes the cache lookup code to
-	 * punt to workqueue and retry.
-	 */
-	if (current->bio_list)
-		return ERR_PTR(-EAGAIN);
+	list_for_each_entry_reverse(b, &c->btree_cache, list)
+		if (!mca_reap(b, btree_order(k), false))
+			return b;
 
-	if (c->try_harder && c->try_harder != cl) {
-		closure_wait_event_async(&c->try_wait, cl, !c->try_harder);
-		return ERR_PTR(-EAGAIN);
-	}
+	list_for_each_entry_reverse(b, &c->btree_cache, list)
+		if (!mca_reap(b, btree_order(k), true))
+			return b;
 
-	c->try_harder = cl;
-	c->try_harder_start = local_clock();
-retry:
-	list_for_each_entry_reverse(i, &c->btree_cache, list) {
-		int r = mca_reap(i, cl, btree_order(k));
-		if (!r)
-			return i;
-		if (r != -ENOMEM)
-			ret = r;
-	}
-
-	if (ret == -EAGAIN &&
-	    closure_blocking(cl)) {
-		mutex_unlock(&c->bucket_lock);
-		closure_sync(cl);
-		mutex_lock(&c->bucket_lock);
-		goto retry;
-	}
-
-	return ERR_PTR(ret);
+	return ERR_PTR(-ENOMEM);
 }
 
 /*
@@ -829,20 +875,21 @@
  * cannibalize_bucket() will take. This means every time we unlock the root of
  * the btree, we need to release this lock if we have it held.
  */
-void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl)
+static void bch_cannibalize_unlock(struct cache_set *c)
 {
-	if (c->try_harder == cl) {
+	if (c->try_harder == current) {
 		bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
 		c->try_harder = NULL;
-		__closure_wake_up(&c->try_wait);
+		wake_up(&c->try_wait);
 	}
 }
 
-static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
-			       int level, struct closure *cl)
+static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
 {
 	struct btree *b;
 
+	BUG_ON(current->bio_list);
+
 	lockdep_assert_held(&c->bucket_lock);
 
 	if (mca_find(c, k))
@@ -852,14 +899,14 @@
 	 * the list. Check if there's any freed nodes there:
 	 */
 	list_for_each_entry(b, &c->btree_cache_freeable, list)
-		if (!mca_reap(b, NULL, btree_order(k)))
+		if (!mca_reap(b, btree_order(k), false))
 			goto out;
 
 	/* We never free struct btree itself, just the memory that holds the on
 	 * disk node. Check the freed list before allocating a new one:
 	 */
 	list_for_each_entry(b, &c->btree_cache_freed, list)
-		if (!mca_reap(b, NULL, 0)) {
+		if (!mca_reap(b, 0, false)) {
 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
 			if (!b->sets[0].data)
 				goto err;
@@ -884,6 +931,7 @@
 
 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
 	b->level	= level;
+	b->parent	= (void *) ~0UL;
 
 	mca_reinit(b);
 
@@ -892,7 +940,7 @@
 	if (b)
 		rw_unlock(true, b);
 
-	b = mca_cannibalize(c, k, level, cl);
+	b = mca_cannibalize(c, k);
 	if (!IS_ERR(b))
 		goto out;
 
@@ -903,17 +951,15 @@
  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
  * in from disk if necessary.
  *
- * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
- * if that closure is in non blocking mode, will return -EAGAIN.
+ * If IO is necessary and running under generic_make_request, returns -EAGAIN.
  *
  * The btree node will have either a read or a write lock held, depending on
  * level and op->lock.
  */
 struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
-				 int level, struct btree_op *op)
+				 int level, bool write)
 {
 	int i = 0;
-	bool write = level <= op->lock;
 	struct btree *b;
 
 	BUG_ON(level < 0);
@@ -925,7 +971,7 @@
 			return ERR_PTR(-EAGAIN);
 
 		mutex_lock(&c->bucket_lock);
-		b = mca_alloc(c, k, level, &op->cl);
+		b = mca_alloc(c, k, level);
 		mutex_unlock(&c->bucket_lock);
 
 		if (!b)
@@ -971,7 +1017,7 @@
 	struct btree *b;
 
 	mutex_lock(&c->bucket_lock);
-	b = mca_alloc(c, k, level, NULL);
+	b = mca_alloc(c, k, level);
 	mutex_unlock(&c->bucket_lock);
 
 	if (!IS_ERR_OR_NULL(b)) {
@@ -982,17 +1028,12 @@
 
 /* Btree alloc */
 
-static void btree_node_free(struct btree *b, struct btree_op *op)
+static void btree_node_free(struct btree *b)
 {
 	unsigned i;
 
 	trace_bcache_btree_node_free(b);
 
-	/*
-	 * The BUG_ON() in btree_node_get() implies that we must have a write
-	 * lock on parent to free or even invalidate a node
-	 */
-	BUG_ON(op->lock <= b->level);
 	BUG_ON(b == b->c->root);
 
 	if (btree_node_dirty(b))
@@ -1015,27 +1056,26 @@
 	mutex_unlock(&b->c->bucket_lock);
 }
 
-struct btree *bch_btree_node_alloc(struct cache_set *c, int level,
-				   struct closure *cl)
+struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
 {
 	BKEY_PADDED(key) k;
 	struct btree *b = ERR_PTR(-EAGAIN);
 
 	mutex_lock(&c->bucket_lock);
 retry:
-	if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl))
+	if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
 		goto err;
 
+	bkey_put(c, &k.key);
 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
 
-	b = mca_alloc(c, &k.key, level, cl);
+	b = mca_alloc(c, &k.key, level);
 	if (IS_ERR(b))
 		goto err_free;
 
 	if (!b) {
 		cache_bug(c,
 			"Tried to allocate bucket that was in btree cache");
-		__bkey_put(c, &k.key);
 		goto retry;
 	}
 
@@ -1048,7 +1088,6 @@
 	return b;
 err_free:
 	bch_bucket_free(c, &k.key);
-	__bkey_put(c, &k.key);
 err:
 	mutex_unlock(&c->bucket_lock);
 
@@ -1056,16 +1095,31 @@
 	return b;
 }
 
-static struct btree *btree_node_alloc_replacement(struct btree *b,
-						  struct closure *cl)
+static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
 {
-	struct btree *n = bch_btree_node_alloc(b->c, b->level, cl);
+	struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
 	if (!IS_ERR_OR_NULL(n))
 		bch_btree_sort_into(b, n);
 
 	return n;
 }
 
+static void make_btree_freeing_key(struct btree *b, struct bkey *k)
+{
+	unsigned i;
+
+	bkey_copy(k, &b->key);
+	bkey_copy_key(k, &ZERO_KEY);
+
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
+
+		SET_PTR_GEN(k, i, g);
+	}
+
+	atomic_inc(&b->c->prio_blocked);
+}
+
 /* Garbage collection */
 
 uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1119,12 +1173,10 @@
 
 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
 
-static int btree_gc_mark_node(struct btree *b, unsigned *keys,
-			      struct gc_stat *gc)
+static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
 {
 	uint8_t stale = 0;
-	unsigned last_dev = -1;
-	struct bcache_device *d = NULL;
+	unsigned keys = 0, good_keys = 0;
 	struct bkey *k;
 	struct btree_iter iter;
 	struct bset_tree *t;
@@ -1132,27 +1184,17 @@
 	gc->nodes++;
 
 	for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
-		if (last_dev != KEY_INODE(k)) {
-			last_dev = KEY_INODE(k);
-
-			d = KEY_INODE(k) < b->c->nr_uuids
-				? b->c->devices[last_dev]
-				: NULL;
-		}
-
 		stale = max(stale, btree_mark_key(b, k));
+		keys++;
 
 		if (bch_ptr_bad(b, k))
 			continue;
 
-		*keys += bkey_u64s(k);
-
 		gc->key_bytes += bkey_u64s(k);
 		gc->nkeys++;
+		good_keys++;
 
 		gc->data += KEY_SIZE(k);
-		if (KEY_DIRTY(k))
-			gc->dirty += KEY_SIZE(k);
 	}
 
 	for (t = b->sets; t <= &b->sets[b->nsets]; t++)
@@ -1161,93 +1203,63 @@
 			     bkey_cmp(&b->key, &t->end) < 0,
 			     b, "found short btree key in gc");
 
-	return stale;
+	if (b->c->gc_always_rewrite)
+		return true;
+
+	if (stale > 10)
+		return true;
+
+	if ((keys - good_keys) * 2 > keys)
+		return true;
+
+	return false;
 }
 
-static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
-				    struct btree_op *op)
-{
-	/*
-	 * We block priorities from being written for the duration of garbage
-	 * collection, so we can't sleep in btree_alloc() ->
-	 * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
-	 * our closure.
-	 */
-	struct btree *n = btree_node_alloc_replacement(b, NULL);
-
-	if (!IS_ERR_OR_NULL(n)) {
-		swap(b, n);
-		__bkey_put(b->c, &b->key);
-
-		memcpy(k->ptr, b->key.ptr,
-		       sizeof(uint64_t) * KEY_PTRS(&b->key));
-
-		btree_node_free(n, op);
-		up_write(&n->lock);
-	}
-
-	return b;
-}
-
-/*
- * Leaving this at 2 until we've got incremental garbage collection done; it
- * could be higher (and has been tested with 4) except that garbage collection
- * could take much longer, adversely affecting latency.
- */
-#define GC_MERGE_NODES	2U
+#define GC_MERGE_NODES	4U
 
 struct gc_merge_info {
 	struct btree	*b;
-	struct bkey	*k;
 	unsigned	keys;
 };
 
-static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
-			      struct gc_stat *gc, struct gc_merge_info *r)
-{
-	unsigned nodes = 0, keys = 0, blocks;
-	int i;
+static int bch_btree_insert_node(struct btree *, struct btree_op *,
+				 struct keylist *, atomic_t *, struct bkey *);
 
-	while (nodes < GC_MERGE_NODES && r[nodes].b)
+static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+			     struct keylist *keylist, struct gc_stat *gc,
+			     struct gc_merge_info *r)
+{
+	unsigned i, nodes = 0, keys = 0, blocks;
+	struct btree *new_nodes[GC_MERGE_NODES];
+	struct closure cl;
+	struct bkey *k;
+
+	memset(new_nodes, 0, sizeof(new_nodes));
+	closure_init_stack(&cl);
+
+	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
 		keys += r[nodes++].keys;
 
 	blocks = btree_default_blocks(b->c) * 2 / 3;
 
 	if (nodes < 2 ||
 	    __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
-		return;
+		return 0;
 
-	for (i = nodes - 1; i >= 0; --i) {
-		if (r[i].b->written)
-			r[i].b = btree_gc_alloc(r[i].b, r[i].k, op);
-
-		if (r[i].b->written)
-			return;
+	for (i = 0; i < nodes; i++) {
+		new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
+		if (IS_ERR_OR_NULL(new_nodes[i]))
+			goto out_nocoalesce;
 	}
 
 	for (i = nodes - 1; i > 0; --i) {
-		struct bset *n1 = r[i].b->sets->data;
-		struct bset *n2 = r[i - 1].b->sets->data;
+		struct bset *n1 = new_nodes[i]->sets->data;
+		struct bset *n2 = new_nodes[i - 1]->sets->data;
 		struct bkey *k, *last = NULL;
 
 		keys = 0;
 
-		if (i == 1) {
-			/*
-			 * Last node we're not getting rid of - we're getting
-			 * rid of the node at r[0]. Have to try and fit all of
-			 * the remaining keys into this node; we can't ensure
-			 * they will always fit due to rounding and variable
-			 * length keys (shouldn't be possible in practice,
-			 * though)
-			 */
-			if (__set_blocks(n1, n1->keys + r->keys,
-					 b->c) > btree_blocks(r[i].b))
-				return;
-
-			keys = n2->keys;
-			last = &r->b->key;
-		} else
+		if (i > 1) {
 			for (k = n2->start;
 			     k < end(n2);
 			     k = bkey_next(k)) {
@@ -1258,20 +1270,36 @@
 				last = k;
 				keys += bkey_u64s(k);
 			}
+		} else {
+			/*
+			 * Last node we're not getting rid of - we're getting
+			 * rid of the node at r[0]. Have to try and fit all of
+			 * the remaining keys into this node; we can't ensure
+			 * they will always fit due to rounding and variable
+			 * length keys (shouldn't be possible in practice,
+			 * though)
+			 */
+			if (__set_blocks(n1, n1->keys + n2->keys,
+					 b->c) > btree_blocks(new_nodes[i]))
+				goto out_nocoalesce;
+
+			keys = n2->keys;
+			/* Take the key of the node we're getting rid of */
+			last = &r->b->key;
+		}
 
 		BUG_ON(__set_blocks(n1, n1->keys + keys,
-				    b->c) > btree_blocks(r[i].b));
+				    b->c) > btree_blocks(new_nodes[i]));
 
-		if (last) {
-			bkey_copy_key(&r[i].b->key, last);
-			bkey_copy_key(r[i].k, last);
-		}
+		if (last)
+			bkey_copy_key(&new_nodes[i]->key, last);
 
 		memcpy(end(n1),
 		       n2->start,
 		       (void *) node(n2, keys) - (void *) n2->start);
 
 		n1->keys += keys;
+		r[i].keys = n1->keys;
 
 		memmove(n2->start,
 			node(n2, keys),
@@ -1279,95 +1307,176 @@
 
 		n2->keys -= keys;
 
-		r[i].keys	= n1->keys;
-		r[i - 1].keys	= n2->keys;
+		if (bch_keylist_realloc(keylist,
+					KEY_PTRS(&new_nodes[i]->key), b->c))
+			goto out_nocoalesce;
+
+		bch_btree_node_write(new_nodes[i], &cl);
+		bch_keylist_add(keylist, &new_nodes[i]->key);
 	}
 
-	btree_node_free(r->b, op);
-	up_write(&r->b->lock);
+	for (i = 0; i < nodes; i++) {
+		if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
+			goto out_nocoalesce;
+
+		make_btree_freeing_key(r[i].b, keylist->top);
+		bch_keylist_push(keylist);
+	}
+
+	/* We emptied out this node */
+	BUG_ON(new_nodes[0]->sets->data->keys);
+	btree_node_free(new_nodes[0]);
+	rw_unlock(true, new_nodes[0]);
+
+	closure_sync(&cl);
+
+	for (i = 0; i < nodes; i++) {
+		btree_node_free(r[i].b);
+		rw_unlock(true, r[i].b);
+
+		r[i].b = new_nodes[i];
+	}
+
+	bch_btree_insert_node(b, op, keylist, NULL, NULL);
+	BUG_ON(!bch_keylist_empty(keylist));
+
+	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
+	r[nodes - 1].b = ERR_PTR(-EINTR);
 
 	trace_bcache_btree_gc_coalesce(nodes);
-
 	gc->nodes--;
-	nodes--;
 
-	memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes);
-	memset(&r[nodes], 0, sizeof(struct gc_merge_info));
+	/* Invalidated our iterator */
+	return -EINTR;
+
+out_nocoalesce:
+	closure_sync(&cl);
+
+	while ((k = bch_keylist_pop(keylist)))
+		if (!bkey_cmp(k, &ZERO_KEY))
+			atomic_dec(&b->c->prio_blocked);
+
+	for (i = 0; i < nodes; i++)
+		if (!IS_ERR_OR_NULL(new_nodes[i])) {
+			btree_node_free(new_nodes[i]);
+			rw_unlock(true, new_nodes[i]);
+		}
+	return 0;
+}
+
+static unsigned btree_gc_count_keys(struct btree *b)
+{
+	struct bkey *k;
+	struct btree_iter iter;
+	unsigned ret = 0;
+
+	for_each_key_filter(b, k, &iter, bch_ptr_bad)
+		ret += bkey_u64s(k);
+
+	return ret;
 }
 
 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
 			    struct closure *writes, struct gc_stat *gc)
 {
-	void write(struct btree *r)
-	{
-		if (!r->written)
-			bch_btree_node_write(r, &op->cl);
-		else if (btree_node_dirty(r))
-			bch_btree_node_write(r, writes);
-
-		up_write(&r->lock);
-	}
-
-	int ret = 0, stale;
 	unsigned i;
+	int ret = 0;
+	bool should_rewrite;
+	struct btree *n;
+	struct bkey *k;
+	struct keylist keys;
+	struct btree_iter iter;
 	struct gc_merge_info r[GC_MERGE_NODES];
+	struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
 
-	memset(r, 0, sizeof(r));
+	bch_keylist_init(&keys);
+	bch_btree_iter_init(b, &iter, &b->c->gc_done);
 
-	while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) {
-		r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op);
+	for (i = 0; i < GC_MERGE_NODES; i++)
+		r[i].b = ERR_PTR(-EINTR);
 
-		if (IS_ERR(r->b)) {
-			ret = PTR_ERR(r->b);
-			break;
+	while (1) {
+		k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+		if (k) {
+			r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
+			if (IS_ERR(r->b)) {
+				ret = PTR_ERR(r->b);
+				break;
+			}
+
+			r->keys = btree_gc_count_keys(r->b);
+
+			ret = btree_gc_coalesce(b, op, &keys, gc, r);
+			if (ret)
+				break;
 		}
 
-		r->keys	= 0;
-		stale = btree_gc_mark_node(r->b, &r->keys, gc);
-
-		if (!b->written &&
-		    (r->b->level || stale > 10 ||
-		     b->c->gc_always_rewrite))
-			r->b = btree_gc_alloc(r->b, r->k, op);
-
-		if (r->b->level)
-			ret = btree_gc_recurse(r->b, op, writes, gc);
-
-		if (ret) {
-			write(r->b);
+		if (!last->b)
 			break;
+
+		if (!IS_ERR(last->b)) {
+			should_rewrite = btree_gc_mark_node(last->b, gc);
+			if (should_rewrite) {
+				n = btree_node_alloc_replacement(last->b,
+								 false);
+
+				if (!IS_ERR_OR_NULL(n)) {
+					bch_btree_node_write_sync(n);
+					bch_keylist_add(&keys, &n->key);
+
+					make_btree_freeing_key(last->b,
+							       keys.top);
+					bch_keylist_push(&keys);
+
+					btree_node_free(last->b);
+
+					bch_btree_insert_node(b, op, &keys,
+							      NULL, NULL);
+					BUG_ON(!bch_keylist_empty(&keys));
+
+					rw_unlock(true, last->b);
+					last->b = n;
+
+					/* Invalidated our iterator */
+					ret = -EINTR;
+					break;
+				}
+			}
+
+			if (last->b->level) {
+				ret = btree_gc_recurse(last->b, op, writes, gc);
+				if (ret)
+					break;
+			}
+
+			bkey_copy_key(&b->c->gc_done, &last->b->key);
+
+			/*
+			 * Must flush leaf nodes before gc ends, since replace
+			 * operations aren't journalled
+			 */
+			if (btree_node_dirty(last->b))
+				bch_btree_node_write(last->b, writes);
+			rw_unlock(true, last->b);
 		}
 
-		bkey_copy_key(&b->c->gc_done, r->k);
+		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
+		r->b = NULL;
 
-		if (!b->written)
-			btree_gc_coalesce(b, op, gc, r);
-
-		if (r[GC_MERGE_NODES - 1].b)
-			write(r[GC_MERGE_NODES - 1].b);
-
-		memmove(&r[1], &r[0],
-			sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1));
-
-		/* When we've got incremental GC working, we'll want to do
-		 * if (should_resched())
-		 *	return -EAGAIN;
-		 */
-		cond_resched();
-#if 0
 		if (need_resched()) {
 			ret = -EAGAIN;
 			break;
 		}
-#endif
 	}
 
-	for (i = 1; i < GC_MERGE_NODES && r[i].b; i++)
-		write(r[i].b);
+	for (i = 0; i < GC_MERGE_NODES; i++)
+		if (!IS_ERR_OR_NULL(r[i].b)) {
+			if (btree_node_dirty(r[i].b))
+				bch_btree_node_write(r[i].b, writes);
+			rw_unlock(true, r[i].b);
+		}
 
-	/* Might have freed some children, must remove their keys */
-	if (!b->written)
-		bch_btree_sort(b);
+	bch_keylist_free(&keys);
 
 	return ret;
 }
@@ -1376,28 +1485,30 @@
 			     struct closure *writes, struct gc_stat *gc)
 {
 	struct btree *n = NULL;
-	unsigned keys = 0;
-	int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
+	int ret = 0;
+	bool should_rewrite;
 
-	if (b->level || stale > 10)
-		n = btree_node_alloc_replacement(b, NULL);
+	should_rewrite = btree_gc_mark_node(b, gc);
+	if (should_rewrite) {
+		n = btree_node_alloc_replacement(b, false);
 
-	if (!IS_ERR_OR_NULL(n))
-		swap(b, n);
+		if (!IS_ERR_OR_NULL(n)) {
+			bch_btree_node_write_sync(n);
+			bch_btree_set_root(n);
+			btree_node_free(b);
+			rw_unlock(true, n);
 
-	if (b->level)
+			return -EINTR;
+		}
+	}
+
+	if (b->level) {
 		ret = btree_gc_recurse(b, op, writes, gc);
-
-	if (!b->written || btree_node_dirty(b)) {
-		bch_btree_node_write(b, n ? &op->cl : NULL);
+		if (ret)
+			return ret;
 	}
 
-	if (!IS_ERR_OR_NULL(n)) {
-		closure_sync(&op->cl);
-		bch_btree_set_root(b);
-		btree_node_free(n, op);
-		rw_unlock(true, b);
-	}
+	bkey_copy_key(&b->c->gc_done, &b->key);
 
 	return ret;
 }
@@ -1479,9 +1590,8 @@
 	return available;
 }
 
-static void bch_btree_gc(struct closure *cl)
+static void bch_btree_gc(struct cache_set *c)
 {
-	struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
 	int ret;
 	unsigned long available;
 	struct gc_stat stats;
@@ -1493,47 +1603,73 @@
 
 	memset(&stats, 0, sizeof(struct gc_stat));
 	closure_init_stack(&writes);
-	bch_btree_op_init_stack(&op);
-	op.lock = SHRT_MAX;
+	bch_btree_op_init(&op, SHRT_MAX);
 
 	btree_gc_start(c);
 
-	atomic_inc(&c->prio_blocked);
+	do {
+		ret = btree_root(gc_root, c, &op, &writes, &stats);
+		closure_sync(&writes);
 
-	ret = btree_root(gc_root, c, &op, &writes, &stats);
-	closure_sync(&op.cl);
-	closure_sync(&writes);
-
-	if (ret) {
-		pr_warn("gc failed!");
-		continue_at(cl, bch_btree_gc, bch_gc_wq);
-	}
-
-	/* Possibly wait for new UUIDs or whatever to hit disk */
-	bch_journal_meta(c, &op.cl);
-	closure_sync(&op.cl);
+		if (ret && ret != -EAGAIN)
+			pr_warn("gc failed!");
+	} while (ret);
 
 	available = bch_btree_gc_finish(c);
-
-	atomic_dec(&c->prio_blocked);
 	wake_up_allocators(c);
 
 	bch_time_stats_update(&c->btree_gc_time, start_time);
 
 	stats.key_bytes *= sizeof(uint64_t);
-	stats.dirty	<<= 9;
 	stats.data	<<= 9;
 	stats.in_use	= (c->nbuckets - available) * 100 / c->nbuckets;
 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
 
 	trace_bcache_gc_end(c);
 
-	continue_at(cl, bch_moving_gc, bch_gc_wq);
+	bch_moving_gc(c);
 }
 
-void bch_queue_gc(struct cache_set *c)
+static int bch_gc_thread(void *arg)
 {
-	closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl);
+	struct cache_set *c = arg;
+	struct cache *ca;
+	unsigned i;
+
+	while (1) {
+again:
+		bch_btree_gc(c);
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop())
+			break;
+
+		mutex_lock(&c->bucket_lock);
+
+		for_each_cache(ca, c, i)
+			if (ca->invalidate_needs_gc) {
+				mutex_unlock(&c->bucket_lock);
+				set_current_state(TASK_RUNNING);
+				goto again;
+			}
+
+		mutex_unlock(&c->bucket_lock);
+
+		try_to_freeze();
+		schedule();
+	}
+
+	return 0;
+}
+
+int bch_gc_thread_start(struct cache_set *c)
+{
+	c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+	if (IS_ERR(c->gc_thread))
+		return PTR_ERR(c->gc_thread);
+
+	set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
+	return 0;
 }
 
 /* Initial partial gc */
@@ -1541,9 +1677,9 @@
 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
 				   unsigned long **seen)
 {
-	int ret;
+	int ret = 0;
 	unsigned i;
-	struct bkey *k;
+	struct bkey *k, *p = NULL;
 	struct bucket *g;
 	struct btree_iter iter;
 
@@ -1570,31 +1706,32 @@
 	}
 
 	if (b->level) {
-		k = bch_next_recurse_key(b, &ZERO_KEY);
+		bch_btree_iter_init(b, &iter, NULL);
 
-		while (k) {
-			struct bkey *p = bch_next_recurse_key(b, k);
+		do {
+			k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+			if (k)
+				btree_node_prefetch(b->c, k, b->level - 1);
+
 			if (p)
-				btree_node_prefetch(b->c, p, b->level - 1);
+				ret = btree(check_recurse, p, b, op, seen);
 
-			ret = btree(check_recurse, k, b, op, seen);
-			if (ret)
-				return ret;
-
-			k = p;
-		}
+			p = k;
+		} while (p && !ret);
 	}
 
 	return 0;
 }
 
-int bch_btree_check(struct cache_set *c, struct btree_op *op)
+int bch_btree_check(struct cache_set *c)
 {
 	int ret = -ENOMEM;
 	unsigned i;
 	unsigned long *seen[MAX_CACHES_PER_SET];
+	struct btree_op op;
 
 	memset(seen, 0, sizeof(seen));
+	bch_btree_op_init(&op, SHRT_MAX);
 
 	for (i = 0; c->cache[i]; i++) {
 		size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
@@ -1606,7 +1743,7 @@
 		memset(seen[i], 0xFF, n);
 	}
 
-	ret = btree_root(check_recurse, c, op, seen);
+	ret = btree_root(check_recurse, c, &op, seen);
 err:
 	for (i = 0; i < MAX_CACHES_PER_SET; i++)
 		kfree(seen[i]);
@@ -1628,10 +1765,9 @@
 	bch_bset_fix_lookup_table(b, where);
 }
 
-static bool fix_overlapping_extents(struct btree *b,
-				    struct bkey *insert,
+static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
 				    struct btree_iter *iter,
-				    struct btree_op *op)
+				    struct bkey *replace_key)
 {
 	void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
 	{
@@ -1659,39 +1795,38 @@
 		 * We might overlap with 0 size extents; we can't skip these
 		 * because if they're in the set we're inserting to we have to
 		 * adjust them so they don't overlap with the key we're
-		 * inserting. But we don't want to check them for BTREE_REPLACE
+		 * inserting. But we don't want to check them for replace
 		 * operations.
 		 */
 
-		if (op->type == BTREE_REPLACE &&
-		    KEY_SIZE(k)) {
+		if (replace_key && KEY_SIZE(k)) {
 			/*
 			 * k might have been split since we inserted/found the
 			 * key we're replacing
 			 */
 			unsigned i;
 			uint64_t offset = KEY_START(k) -
-				KEY_START(&op->replace);
+				KEY_START(replace_key);
 
 			/* But it must be a subset of the replace key */
-			if (KEY_START(k) < KEY_START(&op->replace) ||
-			    KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
+			if (KEY_START(k) < KEY_START(replace_key) ||
+			    KEY_OFFSET(k) > KEY_OFFSET(replace_key))
 				goto check_failed;
 
 			/* We didn't find a key that we were supposed to */
 			if (KEY_START(k) > KEY_START(insert) + sectors_found)
 				goto check_failed;
 
-			if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
+			if (KEY_PTRS(replace_key) != KEY_PTRS(k))
 				goto check_failed;
 
 			/* skip past gen */
 			offset <<= 8;
 
-			BUG_ON(!KEY_PTRS(&op->replace));
+			BUG_ON(!KEY_PTRS(replace_key));
 
-			for (i = 0; i < KEY_PTRS(&op->replace); i++)
-				if (k->ptr[i] != op->replace.ptr[i] + offset)
+			for (i = 0; i < KEY_PTRS(replace_key); i++)
+				if (k->ptr[i] != replace_key->ptr[i] + offset)
 					goto check_failed;
 
 			sectors_found = KEY_OFFSET(k) - KEY_START(insert);
@@ -1742,6 +1877,9 @@
 		if (bkey_cmp(insert, k) < 0) {
 			bch_cut_front(insert, k);
 		} else {
+			if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+				old_offset = KEY_START(insert);
+
 			if (bkey_written(b, k) &&
 			    bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
 				/*
@@ -1759,9 +1897,8 @@
 	}
 
 check_failed:
-	if (op->type == BTREE_REPLACE) {
+	if (replace_key) {
 		if (!sectors_found) {
-			op->insert_collision = true;
 			return true;
 		} else if (sectors_found < KEY_SIZE(insert)) {
 			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
@@ -1774,7 +1911,7 @@
 }
 
 static bool btree_insert_key(struct btree *b, struct btree_op *op,
-			     struct bkey *k)
+			     struct bkey *k, struct bkey *replace_key)
 {
 	struct bset *i = b->sets[b->nsets].data;
 	struct bkey *m, *prev;
@@ -1786,22 +1923,23 @@
 
 	if (!b->level) {
 		struct btree_iter iter;
-		struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0);
 
 		/*
 		 * bset_search() returns the first key that is strictly greater
 		 * than the search key - but for back merging, we want to find
-		 * the first key that is greater than or equal to KEY_START(k) -
-		 * unless KEY_START(k) is 0.
+		 * the previous key.
 		 */
-		if (KEY_OFFSET(&search))
-			SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1);
-
 		prev = NULL;
-		m = bch_btree_iter_init(b, &iter, &search);
+		m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
 
-		if (fix_overlapping_extents(b, k, &iter, op))
+		if (fix_overlapping_extents(b, k, &iter, replace_key)) {
+			op->insert_collision = true;
 			return false;
+		}
+
+		if (KEY_DIRTY(k))
+			bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+						     KEY_START(k), KEY_SIZE(k));
 
 		while (m != end(i) &&
 		       bkey_cmp(k, &START_KEY(m)) > 0)
@@ -1825,84 +1963,80 @@
 		if (m != end(i) &&
 		    bch_bkey_try_merge(b, k, m))
 			goto copy;
-	} else
+	} else {
+		BUG_ON(replace_key);
 		m = bch_bset_search(b, &b->sets[b->nsets], k);
+	}
 
 insert:	shift_keys(b, m, k);
 copy:	bkey_copy(m, k);
 merged:
-	if (KEY_DIRTY(k))
-		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-					     KEY_START(k), KEY_SIZE(k));
-
-	bch_check_keys(b, "%u for %s", status, op_type(op));
+	bch_check_keys(b, "%u for %s", status,
+		       replace_key ? "replace" : "insert");
 
 	if (b->level && !KEY_OFFSET(k))
 		btree_current_write(b)->prio_blocked++;
 
-	trace_bcache_btree_insert_key(b, k, op->type, status);
+	trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
 
 	return true;
 }
 
-static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
+static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
+				  struct keylist *insert_keys,
+				  struct bkey *replace_key)
 {
 	bool ret = false;
-	struct bkey *k;
-	unsigned oldsize = bch_count_data(b);
+	int oldsize = bch_count_data(b);
 
-	while ((k = bch_keylist_pop(&op->keys))) {
-		bkey_put(b->c, k, b->level);
-		ret |= btree_insert_key(b, op, k);
+	while (!bch_keylist_empty(insert_keys)) {
+		struct bset *i = write_block(b);
+		struct bkey *k = insert_keys->keys;
+
+		if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
+		    > btree_blocks(b))
+			break;
+
+		if (bkey_cmp(k, &b->key) <= 0) {
+			if (!b->level)
+				bkey_put(b->c, k);
+
+			ret |= btree_insert_key(b, op, k, replace_key);
+			bch_keylist_pop_front(insert_keys);
+		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
+			BKEY_PADDED(key) temp;
+			bkey_copy(&temp.key, insert_keys->keys);
+
+			bch_cut_back(&b->key, &temp.key);
+			bch_cut_front(&b->key, insert_keys->keys);
+
+			ret |= btree_insert_key(b, op, &temp.key, replace_key);
+			break;
+		} else {
+			break;
+		}
 	}
 
+	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
+
 	BUG_ON(bch_count_data(b) < oldsize);
 	return ret;
 }
 
-bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
-				   struct bio *bio)
+static int btree_split(struct btree *b, struct btree_op *op,
+		       struct keylist *insert_keys,
+		       struct bkey *replace_key)
 {
-	bool ret = false;
-	uint64_t btree_ptr = b->key.ptr[0];
-	unsigned long seq = b->seq;
-	BKEY_PADDED(k) tmp;
-
-	rw_unlock(false, b);
-	rw_lock(true, b, b->level);
-
-	if (b->key.ptr[0] != btree_ptr ||
-	    b->seq != seq + 1 ||
-	    should_split(b))
-		goto out;
-
-	op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));
-
-	SET_KEY_PTRS(&op->replace, 1);
-	get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
-
-	SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV);
-
-	bkey_copy(&tmp.k, &op->replace);
-
-	BUG_ON(op->type != BTREE_INSERT);
-	BUG_ON(!btree_insert_key(b, op, &tmp.k));
-	ret = true;
-out:
-	downgrade_write(&b->lock);
-	return ret;
-}
-
-static int btree_split(struct btree *b, struct btree_op *op)
-{
-	bool split, root = b == b->c->root;
+	bool split;
 	struct btree *n1, *n2 = NULL, *n3 = NULL;
 	uint64_t start_time = local_clock();
+	struct closure cl;
+	struct keylist parent_keys;
 
-	if (b->level)
-		set_closure_blocking(&op->cl);
+	closure_init_stack(&cl);
+	bch_keylist_init(&parent_keys);
 
-	n1 = btree_node_alloc_replacement(b, &op->cl);
+	n1 = btree_node_alloc_replacement(b, true);
 	if (IS_ERR(n1))
 		goto err;
 
@@ -1913,19 +2047,20 @@
 
 		trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
 
-		n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
+		n2 = bch_btree_node_alloc(b->c, b->level, true);
 		if (IS_ERR(n2))
 			goto err_free1;
 
-		if (root) {
-			n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
+		if (!b->parent) {
+			n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
 			if (IS_ERR(n3))
 				goto err_free2;
 		}
 
-		bch_btree_insert_keys(n1, op);
+		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
 
-		/* Has to be a linear search because we don't have an auxiliary
+		/*
+		 * Has to be a linear search because we don't have an auxiliary
 		 * search tree yet
 		 */
 
@@ -1944,60 +2079,57 @@
 
 		bkey_copy_key(&n2->key, &b->key);
 
-		bch_keylist_add(&op->keys, &n2->key);
-		bch_btree_node_write(n2, &op->cl);
+		bch_keylist_add(&parent_keys, &n2->key);
+		bch_btree_node_write(n2, &cl);
 		rw_unlock(true, n2);
 	} else {
 		trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
 
-		bch_btree_insert_keys(n1, op);
+		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
 	}
 
-	bch_keylist_add(&op->keys, &n1->key);
-	bch_btree_node_write(n1, &op->cl);
+	bch_keylist_add(&parent_keys, &n1->key);
+	bch_btree_node_write(n1, &cl);
 
 	if (n3) {
+		/* Depth increases, make a new root */
 		bkey_copy_key(&n3->key, &MAX_KEY);
-		bch_btree_insert_keys(n3, op);
-		bch_btree_node_write(n3, &op->cl);
+		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
+		bch_btree_node_write(n3, &cl);
 
-		closure_sync(&op->cl);
+		closure_sync(&cl);
 		bch_btree_set_root(n3);
 		rw_unlock(true, n3);
-	} else if (root) {
-		op->keys.top = op->keys.bottom;
-		closure_sync(&op->cl);
+
+		btree_node_free(b);
+	} else if (!b->parent) {
+		/* Root filled up but didn't need to be split */
+		closure_sync(&cl);
 		bch_btree_set_root(n1);
+
+		btree_node_free(b);
 	} else {
-		unsigned i;
+		/* Split a non root node */
+		closure_sync(&cl);
+		make_btree_freeing_key(b, parent_keys.top);
+		bch_keylist_push(&parent_keys);
 
-		bkey_copy(op->keys.top, &b->key);
-		bkey_copy_key(op->keys.top, &ZERO_KEY);
+		btree_node_free(b);
 
-		for (i = 0; i < KEY_PTRS(&b->key); i++) {
-			uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1;
-
-			SET_PTR_GEN(op->keys.top, i, g);
-		}
-
-		bch_keylist_push(&op->keys);
-		closure_sync(&op->cl);
-		atomic_inc(&b->c->prio_blocked);
+		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
+		BUG_ON(!bch_keylist_empty(&parent_keys));
 	}
 
 	rw_unlock(true, n1);
-	btree_node_free(b, op);
 
 	bch_time_stats_update(&b->c->btree_split_time, start_time);
 
 	return 0;
 err_free2:
-	__bkey_put(n2->c, &n2->key);
-	btree_node_free(n2, op);
+	btree_node_free(n2);
 	rw_unlock(true, n2);
 err_free1:
-	__bkey_put(n1->c, &n1->key);
-	btree_node_free(n1, op);
+	btree_node_free(n1);
 	rw_unlock(true, n1);
 err:
 	if (n3 == ERR_PTR(-EAGAIN) ||
@@ -2009,116 +2141,126 @@
 	return -ENOMEM;
 }
 
-static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
-				    struct keylist *stack_keys)
+static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
+				 struct keylist *insert_keys,
+				 atomic_t *journal_ref,
+				 struct bkey *replace_key)
 {
-	if (b->level) {
-		int ret;
-		struct bkey *insert = op->keys.bottom;
-		struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert));
+	BUG_ON(b->level && replace_key);
 
-		if (!k) {
-			btree_bug(b, "no key to recurse on at level %i/%i",
-				  b->level, b->c->root->level);
-
-			op->keys.top = op->keys.bottom;
-			return -EIO;
+	if (should_split(b)) {
+		if (current->bio_list) {
+			op->lock = b->c->root->level + 1;
+			return -EAGAIN;
+		} else if (op->lock <= b->c->root->level) {
+			op->lock = b->c->root->level + 1;
+			return -EINTR;
+		} else {
+			/* Invalidated all iterators */
+			return btree_split(b, op, insert_keys, replace_key) ?:
+				-EINTR;
 		}
-
-		if (bkey_cmp(insert, k) > 0) {
-			unsigned i;
-
-			if (op->type == BTREE_REPLACE) {
-				__bkey_put(b->c, insert);
-				op->keys.top = op->keys.bottom;
-				op->insert_collision = true;
-				return 0;
-			}
-
-			for (i = 0; i < KEY_PTRS(insert); i++)
-				atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin);
-
-			bkey_copy(stack_keys->top, insert);
-
-			bch_cut_back(k, insert);
-			bch_cut_front(k, stack_keys->top);
-
-			bch_keylist_push(stack_keys);
-		}
-
-		ret = btree(insert_recurse, k, b, op, stack_keys);
-		if (ret)
-			return ret;
-	}
-
-	if (!bch_keylist_empty(&op->keys)) {
-		if (should_split(b)) {
-			if (op->lock <= b->c->root->level) {
-				BUG_ON(b->level);
-				op->lock = b->c->root->level + 1;
-				return -EINTR;
-			}
-			return btree_split(b, op);
-		}
-
+	} else {
 		BUG_ON(write_block(b) != b->sets[b->nsets].data);
 
-		if (bch_btree_insert_keys(b, op)) {
+		if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
 			if (!b->level)
-				bch_btree_leaf_dirty(b, op);
+				bch_btree_leaf_dirty(b, journal_ref);
 			else
-				bch_btree_node_write(b, &op->cl);
+				bch_btree_node_write_sync(b);
 		}
-	}
 
-	return 0;
+		return 0;
+	}
 }
 
-int bch_btree_insert(struct btree_op *op, struct cache_set *c)
+int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+			       struct bkey *check_key)
 {
-	int ret = 0;
-	struct keylist stack_keys;
+	int ret = -EINTR;
+	uint64_t btree_ptr = b->key.ptr[0];
+	unsigned long seq = b->seq;
+	struct keylist insert;
+	bool upgrade = op->lock == -1;
 
-	/*
-	 * Don't want to block with the btree locked unless we have to,
-	 * otherwise we get deadlocks with try_harder and between split/gc
-	 */
-	clear_closure_blocking(&op->cl);
+	bch_keylist_init(&insert);
 
-	BUG_ON(bch_keylist_empty(&op->keys));
-	bch_keylist_copy(&stack_keys, &op->keys);
-	bch_keylist_init(&op->keys);
+	if (upgrade) {
+		rw_unlock(false, b);
+		rw_lock(true, b, b->level);
 
-	while (!bch_keylist_empty(&stack_keys) ||
-	       !bch_keylist_empty(&op->keys)) {
-		if (bch_keylist_empty(&op->keys)) {
-			bch_keylist_add(&op->keys,
-					bch_keylist_pop(&stack_keys));
-			op->lock = 0;
-		}
-
-		ret = btree_root(insert_recurse, c, op, &stack_keys);
-
-		if (ret == -EAGAIN) {
-			ret = 0;
-			closure_sync(&op->cl);
-		} else if (ret) {
-			struct bkey *k;
-
-			pr_err("error %i trying to insert key for %s",
-			       ret, op_type(op));
-
-			while ((k = bch_keylist_pop(&stack_keys) ?:
-				    bch_keylist_pop(&op->keys)))
-				bkey_put(c, k, 0);
-		}
+		if (b->key.ptr[0] != btree_ptr ||
+		    b->seq != seq + 1)
+			goto out;
 	}
 
-	bch_keylist_free(&stack_keys);
+	SET_KEY_PTRS(check_key, 1);
+	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
 
-	if (op->journal)
-		atomic_dec_bug(op->journal);
-	op->journal = NULL;
+	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
+
+	bch_keylist_add(&insert, check_key);
+
+	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
+
+	BUG_ON(!ret && !bch_keylist_empty(&insert));
+out:
+	if (upgrade)
+		downgrade_write(&b->lock);
+	return ret;
+}
+
+struct btree_insert_op {
+	struct btree_op	op;
+	struct keylist	*keys;
+	atomic_t	*journal_ref;
+	struct bkey	*replace_key;
+};
+
+int btree_insert_fn(struct btree_op *b_op, struct btree *b)
+{
+	struct btree_insert_op *op = container_of(b_op,
+					struct btree_insert_op, op);
+
+	int ret = bch_btree_insert_node(b, &op->op, op->keys,
+					op->journal_ref, op->replace_key);
+	if (ret && !bch_keylist_empty(op->keys))
+		return ret;
+	else
+		return MAP_DONE;
+}
+
+int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+		     atomic_t *journal_ref, struct bkey *replace_key)
+{
+	struct btree_insert_op op;
+	int ret = 0;
+
+	BUG_ON(current->bio_list);
+	BUG_ON(bch_keylist_empty(keys));
+
+	bch_btree_op_init(&op.op, 0);
+	op.keys		= keys;
+	op.journal_ref	= journal_ref;
+	op.replace_key	= replace_key;
+
+	while (!ret && !bch_keylist_empty(keys)) {
+		op.op.lock = 0;
+		ret = bch_btree_map_leaf_nodes(&op.op, c,
+					       &START_KEY(keys->keys),
+					       btree_insert_fn);
+	}
+
+	if (ret) {
+		struct bkey *k;
+
+		pr_err("error %i", ret);
+
+		while ((k = bch_keylist_pop(keys)))
+			bkey_put(c, k);
+	} else if (op.op.insert_collision)
+		ret = -ESRCH;
+
 	return ret;
 }
 
@@ -2141,132 +2283,81 @@
 	mutex_unlock(&b->c->bucket_lock);
 
 	b->c->root = b;
-	__bkey_put(b->c, &b->key);
 
 	bch_journal_meta(b->c, &cl);
 	closure_sync(&cl);
 }
 
-/* Cache lookup */
+/* Map across nodes or keys */
 
-static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
-				     struct bkey *k)
+static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+				       struct bkey *from,
+				       btree_map_nodes_fn *fn, int flags)
 {
-	struct search *s = container_of(op, struct search, op);
-	struct bio *bio = &s->bio.bio;
-	int ret = 0;
+	int ret = MAP_CONTINUE;
 
-	while (!ret &&
-	       !op->lookup_done) {
-		unsigned sectors = INT_MAX;
+	if (b->level) {
+		struct bkey *k;
+		struct btree_iter iter;
 
-		if (KEY_INODE(k) == op->inode) {
-			if (KEY_START(k) <= bio->bi_sector)
-				break;
+		bch_btree_iter_init(b, &iter, from);
 
-			sectors = min_t(uint64_t, sectors,
-					KEY_START(k) - bio->bi_sector);
+		while ((k = bch_btree_iter_next_filter(&iter, b,
+						       bch_ptr_bad))) {
+			ret = btree(map_nodes_recurse, k, b,
+				    op, from, fn, flags);
+			from = NULL;
+
+			if (ret != MAP_CONTINUE)
+				return ret;
 		}
-
-		ret = s->d->cache_miss(b, s, bio, sectors);
 	}
 
+	if (!b->level || flags == MAP_ALL_NODES)
+		ret = fn(op, b);
+
 	return ret;
 }
 
-/*
- * Read from a single key, handling the initial cache miss if the key starts in
- * the middle of the bio
- */
-static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
-				    struct bkey *k)
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
 {
-	struct search *s = container_of(op, struct search, op);
-	struct bio *bio = &s->bio.bio;
-	unsigned ptr;
-	struct bio *n;
-
-	int ret = submit_partial_cache_miss(b, op, k);
-	if (ret || op->lookup_done)
-		return ret;
-
-	/* XXX: figure out best pointer - for multiple cache devices */
-	ptr = 0;
-
-	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
-
-	while (!op->lookup_done &&
-	       KEY_INODE(k) == op->inode &&
-	       bio->bi_sector < KEY_OFFSET(k)) {
-		struct bkey *bio_key;
-		sector_t sector = PTR_OFFSET(k, ptr) +
-			(bio->bi_sector - KEY_START(k));
-		unsigned sectors = min_t(uint64_t, INT_MAX,
-					 KEY_OFFSET(k) - bio->bi_sector);
-
-		n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
-		if (n == bio)
-			op->lookup_done = true;
-
-		bio_key = &container_of(n, struct bbio, bio)->key;
-
-		/*
-		 * The bucket we're reading from might be reused while our bio
-		 * is in flight, and we could then end up reading the wrong
-		 * data.
-		 *
-		 * We guard against this by checking (in cache_read_endio()) if
-		 * the pointer is stale again; if so, we treat it as an error
-		 * and reread from the backing device (but we don't pass that
-		 * error up anywhere).
-		 */
-
-		bch_bkey_copy_single_ptr(bio_key, k, ptr);
-		SET_PTR_OFFSET(bio_key, 0, sector);
-
-		n->bi_end_io	= bch_cache_read_endio;
-		n->bi_private	= &s->cl;
-
-		__bch_submit_bbio(n, b->c);
-	}
-
-	return 0;
+	return btree_root(map_nodes_recurse, c, op, from, fn, flags);
 }
 
-int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
+static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+				      struct bkey *from, btree_map_keys_fn *fn,
+				      int flags)
 {
-	struct search *s = container_of(op, struct search, op);
-	struct bio *bio = &s->bio.bio;
-
-	int ret = 0;
+	int ret = MAP_CONTINUE;
 	struct bkey *k;
 	struct btree_iter iter;
-	bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
 
-	do {
-		k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
-		if (!k) {
-			/*
-			 * b->key would be exactly what we want, except that
-			 * pointers to btree nodes have nonzero size - we
-			 * wouldn't go far enough
-			 */
+	bch_btree_iter_init(b, &iter, from);
 
-			ret = submit_partial_cache_miss(b, op,
-					&KEY(KEY_INODE(&b->key),
-					     KEY_OFFSET(&b->key), 0));
-			break;
-		}
+	while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+		ret = !b->level
+			? fn(op, b, k)
+			: btree(map_keys_recurse, k, b, op, from, fn, flags);
+		from = NULL;
 
-		ret = b->level
-			? btree(search_recurse, k, b, op)
-			: submit_partial_cache_hit(b, op, k);
-	} while (!ret &&
-		 !op->lookup_done);
+		if (ret != MAP_CONTINUE)
+			return ret;
+	}
+
+	if (!b->level && (flags & MAP_END_KEY))
+		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
+				     KEY_OFFSET(&b->key), 0));
 
 	return ret;
 }
 
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+		       struct bkey *from, btree_map_keys_fn *fn, int flags)
+{
+	return btree_root(map_keys_recurse, c, op, from, fn, flags);
+}
+
 /* Keybuf code */
 
 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
@@ -2285,80 +2376,79 @@
 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
 }
 
-static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
-				   struct keybuf *buf, struct bkey *end,
-				   keybuf_pred_fn *pred)
+struct refill {
+	struct btree_op	op;
+	unsigned	nr_found;
+	struct keybuf	*buf;
+	struct bkey	*end;
+	keybuf_pred_fn	*pred;
+};
+
+static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
+			    struct bkey *k)
 {
-	struct btree_iter iter;
-	bch_btree_iter_init(b, &iter, &buf->last_scanned);
+	struct refill *refill = container_of(op, struct refill, op);
+	struct keybuf *buf = refill->buf;
+	int ret = MAP_CONTINUE;
 
-	while (!array_freelist_empty(&buf->freelist)) {
-		struct bkey *k = bch_btree_iter_next_filter(&iter, b,
-							    bch_ptr_bad);
-
-		if (!b->level) {
-			if (!k) {
-				buf->last_scanned = b->key;
-				break;
-			}
-
-			buf->last_scanned = *k;
-			if (bkey_cmp(&buf->last_scanned, end) >= 0)
-				break;
-
-			if (pred(buf, k)) {
-				struct keybuf_key *w;
-
-				spin_lock(&buf->lock);
-
-				w = array_alloc(&buf->freelist);
-
-				w->private = NULL;
-				bkey_copy(&w->key, k);
-
-				if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
-					array_free(&buf->freelist, w);
-
-				spin_unlock(&buf->lock);
-			}
-		} else {
-			if (!k)
-				break;
-
-			btree(refill_keybuf, k, b, op, buf, end, pred);
-			/*
-			 * Might get an error here, but can't really do anything
-			 * and it'll get logged elsewhere. Just read what we
-			 * can.
-			 */
-
-			if (bkey_cmp(&buf->last_scanned, end) >= 0)
-				break;
-
-			cond_resched();
-		}
+	if (bkey_cmp(k, refill->end) >= 0) {
+		ret = MAP_DONE;
+		goto out;
 	}
 
-	return 0;
+	if (!KEY_SIZE(k)) /* end key */
+		goto out;
+
+	if (refill->pred(buf, k)) {
+		struct keybuf_key *w;
+
+		spin_lock(&buf->lock);
+
+		w = array_alloc(&buf->freelist);
+		if (!w) {
+			spin_unlock(&buf->lock);
+			return MAP_DONE;
+		}
+
+		w->private = NULL;
+		bkey_copy(&w->key, k);
+
+		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
+			array_free(&buf->freelist, w);
+		else
+			refill->nr_found++;
+
+		if (array_freelist_empty(&buf->freelist))
+			ret = MAP_DONE;
+
+		spin_unlock(&buf->lock);
+	}
+out:
+	buf->last_scanned = *k;
+	return ret;
 }
 
 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
 		       struct bkey *end, keybuf_pred_fn *pred)
 {
 	struct bkey start = buf->last_scanned;
-	struct btree_op op;
-	bch_btree_op_init_stack(&op);
+	struct refill refill;
 
 	cond_resched();
 
-	btree_root(refill_keybuf, c, &op, buf, end, pred);
-	closure_sync(&op.cl);
+	bch_btree_op_init(&refill.op, -1);
+	refill.nr_found	= 0;
+	refill.buf	= buf;
+	refill.end	= end;
+	refill.pred	= pred;
 
-	pr_debug("found %s keys from %llu:%llu to %llu:%llu",
-		 RB_EMPTY_ROOT(&buf->keys) ? "no" :
-		 array_freelist_empty(&buf->freelist) ? "some" : "a few",
-		 KEY_INODE(&start), KEY_OFFSET(&start),
-		 KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned));
+	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
+			   refill_keybuf_fn, MAP_END_KEY);
+
+	trace_bcache_keyscan(refill.nr_found,
+			     KEY_INODE(&start), KEY_OFFSET(&start),
+			     KEY_INODE(&buf->last_scanned),
+			     KEY_OFFSET(&buf->last_scanned));
 
 	spin_lock(&buf->lock);
 
@@ -2436,9 +2526,9 @@
 }
 
 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
-					     struct keybuf *buf,
-					     struct bkey *end,
-					     keybuf_pred_fn *pred)
+					  struct keybuf *buf,
+					  struct bkey *end,
+					  keybuf_pred_fn *pred)
 {
 	struct keybuf_key *ret;
 
@@ -2471,14 +2561,12 @@
 {
 	if (btree_io_wq)
 		destroy_workqueue(btree_io_wq);
-	if (bch_gc_wq)
-		destroy_workqueue(bch_gc_wq);
 }
 
 int __init bch_btree_init(void)
 {
-	if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) ||
-	    !(btree_io_wq = create_singlethread_workqueue("bch_btree_io")))
+	btree_io_wq = create_singlethread_workqueue("bch_btree_io");
+	if (!btree_io_wq)
 		return -ENOMEM;
 
 	return 0;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 3333d37..767e755 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -125,6 +125,7 @@
 	unsigned long		seq;
 	struct rw_semaphore	lock;
 	struct cache_set	*c;
+	struct btree		*parent;
 
 	unsigned long		flags;
 	uint16_t		written;	/* would be nice to kill */
@@ -200,12 +201,7 @@
 
 static inline void set_gc_sectors(struct cache_set *c)
 {
-	atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
-	return __bch_ptr_invalid(b->c, b->level, k);
+	atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
 }
 
 static inline struct bkey *bch_btree_iter_init(struct btree *b,
@@ -215,6 +211,16 @@
 	return __bch_btree_iter_init(b, iter, search, b->sets);
 }
 
+static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
+{
+	if (b->level)
+		return bch_btree_ptr_invalid(b->c, k);
+	else
+		return bch_extent_ptr_invalid(b->c, k);
+}
+
+void bkey_put(struct cache_set *c, struct bkey *k);
+
 /* Looping macros */
 
 #define for_each_cached_btree(b, c, iter)				\
@@ -234,51 +240,17 @@
 /* Recursing down the btree */
 
 struct btree_op {
-	struct closure		cl;
-	struct cache_set	*c;
-
-	/* Journal entry we have a refcount on */
-	atomic_t		*journal;
-
-	/* Bio to be inserted into the cache */
-	struct bio		*cache_bio;
-
-	unsigned		inode;
-
-	uint16_t		write_prio;
-
 	/* Btree level at which we start taking write locks */
 	short			lock;
 
-	/* Btree insertion type */
-	enum {
-		BTREE_INSERT,
-		BTREE_REPLACE
-	} type:8;
-
-	unsigned		csum:1;
-	unsigned		skip:1;
-	unsigned		flush_journal:1;
-
-	unsigned		insert_data_done:1;
-	unsigned		lookup_done:1;
 	unsigned		insert_collision:1;
-
-	/* Anything after this point won't get zeroed in do_bio_hook() */
-
-	/* Keys to be inserted */
-	struct keylist		keys;
-	BKEY_PADDED(replace);
 };
 
-enum {
-	BTREE_INSERT_STATUS_INSERT,
-	BTREE_INSERT_STATUS_BACK_MERGE,
-	BTREE_INSERT_STATUS_OVERWROTE,
-	BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
-void bch_btree_op_init_stack(struct btree_op *);
+static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
+{
+	memset(op, 0, sizeof(struct btree_op));
+	op->lock = write_lock_level;
+}
 
 static inline void rw_lock(bool w, struct btree *b, int level)
 {
@@ -290,108 +262,71 @@
 
 static inline void rw_unlock(bool w, struct btree *b)
 {
-#ifdef CONFIG_BCACHE_EDEBUG
-	unsigned i;
-
-	if (w && b->key.ptr[0])
-		for (i = 0; i <= b->nsets; i++)
-			bch_check_key_order(b, b->sets[i].data);
-#endif
-
 	if (w)
 		b->seq++;
 	(w ? up_write : up_read)(&b->lock);
 }
 
-#define insert_lock(s, b)	((b)->level <= (s)->lock)
-
-/*
- * These macros are for recursing down the btree - they handle the details of
- * locking and looking up nodes in the cache for you. They're best treated as
- * mere syntax when reading code that uses them.
- *
- * op->lock determines whether we take a read or a write lock at a given depth.
- * If you've got a read lock and find that you need a write lock (i.e. you're
- * going to have to split), set op->lock and return -EINTR; btree_root() will
- * call you again and you'll have the correct lock.
- */
-
-/**
- * btree - recurse down the btree on a specified key
- * @fn:		function to call, which will be passed the child node
- * @key:	key to recurse on
- * @b:		parent btree node
- * @op:		pointer to struct btree_op
- */
-#define btree(fn, key, b, op, ...)					\
-({									\
-	int _r, l = (b)->level - 1;					\
-	bool _w = l <= (op)->lock;					\
-	struct btree *_b = bch_btree_node_get((b)->c, key, l, op);	\
-	if (!IS_ERR(_b)) {						\
-		_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);		\
-		rw_unlock(_w, _b);					\
-	} else								\
-		_r = PTR_ERR(_b);					\
-	_r;								\
-})
-
-/**
- * btree_root - call a function on the root of the btree
- * @fn:		function to call, which will be passed the child node
- * @c:		cache set
- * @op:		pointer to struct btree_op
- */
-#define btree_root(fn, c, op, ...)					\
-({									\
-	int _r = -EINTR;						\
-	do {								\
-		struct btree *_b = (c)->root;				\
-		bool _w = insert_lock(op, _b);				\
-		rw_lock(_w, _b, _b->level);				\
-		if (_b == (c)->root &&					\
-		    _w == insert_lock(op, _b))				\
-			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
-		rw_unlock(_w, _b);					\
-		bch_cannibalize_unlock(c, &(op)->cl);		\
-	} while (_r == -EINTR);						\
-									\
-	_r;								\
-})
-
-static inline bool should_split(struct btree *b)
-{
-	struct bset *i = write_block(b);
-	return b->written >= btree_blocks(b) ||
-		(i->seq == b->sets[0].data->seq &&
-		 b->written + __set_blocks(i, i->keys + 15, b->c)
-		 > btree_blocks(b));
-}
-
 void bch_btree_node_read(struct btree *);
 void bch_btree_node_write(struct btree *, struct closure *);
 
-void bch_cannibalize_unlock(struct cache_set *, struct closure *);
 void bch_btree_set_root(struct btree *);
-struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
-struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
-				int, struct btree_op *);
+struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
+struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
 
-bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
-				   struct bio *);
-int bch_btree_insert(struct btree_op *, struct cache_set *);
+int bch_btree_insert_check_key(struct btree *, struct btree_op *,
+			       struct bkey *);
+int bch_btree_insert(struct cache_set *, struct keylist *,
+		     atomic_t *, struct bkey *);
 
-int bch_btree_search_recurse(struct btree *, struct btree_op *);
-
-void bch_queue_gc(struct cache_set *);
+int bch_gc_thread_start(struct cache_set *);
 size_t bch_btree_gc_finish(struct cache_set *);
-void bch_moving_gc(struct closure *);
-int bch_btree_check(struct cache_set *, struct btree_op *);
+void bch_moving_gc(struct cache_set *);
+int bch_btree_check(struct cache_set *);
 uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
 
+static inline void wake_up_gc(struct cache_set *c)
+{
+	if (c->gc_thread)
+		wake_up_process(c->gc_thread);
+}
+
+#define MAP_DONE	0
+#define MAP_CONTINUE	1
+
+#define MAP_ALL_NODES	0
+#define MAP_LEAF_NODES	1
+
+#define MAP_END_KEY	1
+
+typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
+int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
+			  struct bkey *, btree_map_nodes_fn *, int);
+
+static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+				      struct bkey *from, btree_map_nodes_fn *fn)
+{
+	return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
+}
+
+static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
+					   struct cache_set *c,
+					   struct bkey *from,
+					   btree_map_nodes_fn *fn)
+{
+	return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
+}
+
+typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
+				struct bkey *);
+int bch_btree_map_keys(struct btree_op *, struct cache_set *,
+		       struct bkey *, btree_map_keys_fn *, int);
+
+typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
+
 void bch_keybuf_init(struct keybuf *);
-void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
-		       keybuf_pred_fn *);
+void bch_refill_keybuf(struct cache_set *, struct keybuf *,
+		       struct bkey *, keybuf_pred_fn *);
 bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
 				  struct bkey *);
 void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 9aba201..dfff241 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,17 +11,6 @@
 
 #include "closure.h"
 
-void closure_queue(struct closure *cl)
-{
-	struct workqueue_struct *wq = cl->wq;
-	if (wq) {
-		INIT_WORK(&cl->work, cl->work.func);
-		BUG_ON(!queue_work(wq, &cl->work));
-	} else
-		cl->fn(cl);
-}
-EXPORT_SYMBOL_GPL(closure_queue);
-
 #define CL_FIELD(type, field)					\
 	case TYPE_ ## type:					\
 	return &container_of(cl, struct type, cl)->field
@@ -30,17 +19,6 @@
 {
 	switch (cl->type) {
 		CL_FIELD(closure_with_waitlist, wait);
-		CL_FIELD(closure_with_waitlist_and_timer, wait);
-	default:
-		return NULL;
-	}
-}
-
-static struct timer_list *closure_timer(struct closure *cl)
-{
-	switch (cl->type) {
-		CL_FIELD(closure_with_timer, timer);
-		CL_FIELD(closure_with_waitlist_and_timer, timer);
 	default:
 		return NULL;
 	}
@@ -51,7 +29,7 @@
 	int r = flags & CLOSURE_REMAINING_MASK;
 
 	BUG_ON(flags & CLOSURE_GUARD_MASK);
-	BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
+	BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
 
 	/* Must deliver precisely one wakeup */
 	if (r == 1 && (flags & CLOSURE_SLEEPING))
@@ -59,7 +37,6 @@
 
 	if (!r) {
 		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
-			/* CLOSURE_BLOCKING might be set - clear it */
 			atomic_set(&cl->remaining,
 				   CLOSURE_REMAINING_INITIALIZER);
 			closure_queue(cl);
@@ -90,13 +67,13 @@
 {
 	closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
 }
-EXPORT_SYMBOL_GPL(closure_sub);
+EXPORT_SYMBOL(closure_sub);
 
 void closure_put(struct closure *cl)
 {
 	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
 }
-EXPORT_SYMBOL_GPL(closure_put);
+EXPORT_SYMBOL(closure_put);
 
 static void set_waiting(struct closure *cl, unsigned long f)
 {
@@ -133,7 +110,7 @@
 		closure_sub(cl, CLOSURE_WAITING + 1);
 	}
 }
-EXPORT_SYMBOL_GPL(__closure_wake_up);
+EXPORT_SYMBOL(__closure_wake_up);
 
 bool closure_wait(struct closure_waitlist *list, struct closure *cl)
 {
@@ -146,7 +123,7 @@
 
 	return true;
 }
-EXPORT_SYMBOL_GPL(closure_wait);
+EXPORT_SYMBOL(closure_wait);
 
 /**
  * closure_sync() - sleep until a closure a closure has nothing left to wait on
@@ -169,7 +146,7 @@
 
 	__closure_end_sleep(cl);
 }
-EXPORT_SYMBOL_GPL(closure_sync);
+EXPORT_SYMBOL(closure_sync);
 
 /**
  * closure_trylock() - try to acquire the closure, without waiting
@@ -183,17 +160,17 @@
 			   CLOSURE_REMAINING_INITIALIZER) != -1)
 		return false;
 
-	closure_set_ret_ip(cl);
-
 	smp_mb();
+
 	cl->parent = parent;
 	if (parent)
 		closure_get(parent);
 
+	closure_set_ret_ip(cl);
 	closure_debug_create(cl);
 	return true;
 }
-EXPORT_SYMBOL_GPL(closure_trylock);
+EXPORT_SYMBOL(closure_trylock);
 
 void __closure_lock(struct closure *cl, struct closure *parent,
 		    struct closure_waitlist *wait_list)
@@ -205,57 +182,11 @@
 		if (closure_trylock(cl, parent))
 			return;
 
-		closure_wait_event_sync(wait_list, &wait,
-					atomic_read(&cl->remaining) == -1);
+		closure_wait_event(wait_list, &wait,
+				   atomic_read(&cl->remaining) == -1);
 	}
 }
-EXPORT_SYMBOL_GPL(__closure_lock);
-
-static void closure_delay_timer_fn(unsigned long data)
-{
-	struct closure *cl = (struct closure *) data;
-	closure_sub(cl, CLOSURE_TIMER + 1);
-}
-
-void do_closure_timer_init(struct closure *cl)
-{
-	struct timer_list *timer = closure_timer(cl);
-
-	init_timer(timer);
-	timer->data	= (unsigned long) cl;
-	timer->function = closure_delay_timer_fn;
-}
-EXPORT_SYMBOL_GPL(do_closure_timer_init);
-
-bool __closure_delay(struct closure *cl, unsigned long delay,
-		     struct timer_list *timer)
-{
-	if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
-		return false;
-
-	BUG_ON(timer_pending(timer));
-
-	timer->expires	= jiffies + delay;
-
-	atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
-	add_timer(timer);
-	return true;
-}
-EXPORT_SYMBOL_GPL(__closure_delay);
-
-void __closure_flush(struct closure *cl, struct timer_list *timer)
-{
-	if (del_timer(timer))
-		closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush);
-
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
-{
-	if (del_timer_sync(timer))
-		closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush_sync);
+EXPORT_SYMBOL(__closure_lock);
 
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 
@@ -273,7 +204,7 @@
 	list_add(&cl->all, &closure_list);
 	spin_unlock_irqrestore(&closure_list_lock, flags);
 }
-EXPORT_SYMBOL_GPL(closure_debug_create);
+EXPORT_SYMBOL(closure_debug_create);
 
 void closure_debug_destroy(struct closure *cl)
 {
@@ -286,7 +217,7 @@
 	list_del(&cl->all);
 	spin_unlock_irqrestore(&closure_list_lock, flags);
 }
-EXPORT_SYMBOL_GPL(closure_debug_destroy);
+EXPORT_SYMBOL(closure_debug_destroy);
 
 static struct dentry *debug;
 
@@ -304,14 +235,12 @@
 			   cl, (void *) cl->ip, cl->fn, cl->parent,
 			   r & CLOSURE_REMAINING_MASK);
 
-		seq_printf(f, "%s%s%s%s%s%s\n",
+		seq_printf(f, "%s%s%s%s\n",
 			   test_bit(WORK_STRUCT_PENDING,
 				    work_data_bits(&cl->work)) ? "Q" : "",
 			   r & CLOSURE_RUNNING	? "R" : "",
-			   r & CLOSURE_BLOCKING	? "B" : "",
 			   r & CLOSURE_STACK	? "S" : "",
-			   r & CLOSURE_SLEEPING	? "Sl" : "",
-			   r & CLOSURE_TIMER	? "T" : "");
+			   r & CLOSURE_SLEEPING	? "Sl" : "");
 
 		if (r & CLOSURE_WAITING)
 			seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 0003992..9762f1b 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -155,21 +155,6 @@
  * delayed_work embeds a work item and a timer_list. The important thing is, use
  * it exactly like you would a regular closure and closure_put() will magically
  * handle everything for you.
- *
- * We've got closures that embed timers, too. They're called, appropriately
- * enough:
- * struct closure_with_timer;
- *
- * This gives you access to closure_delay(). It takes a refcount for a specified
- * number of jiffies - you could then call closure_sync() (for a slightly
- * convoluted version of msleep()) or continue_at() - which gives you the same
- * effect as using a delayed work item, except you can reuse the work_struct
- * already embedded in struct closure.
- *
- * Lastly, there's struct closure_with_waitlist_and_timer. It does what you
- * probably expect, if you happen to need the features of both. (You don't
- * really want to know how all this is implemented, but if I've done my job
- * right you shouldn't have to care).
  */
 
 struct closure;
@@ -182,16 +167,11 @@
 enum closure_type {
 	TYPE_closure				= 0,
 	TYPE_closure_with_waitlist		= 1,
-	TYPE_closure_with_timer			= 2,
-	TYPE_closure_with_waitlist_and_timer	= 3,
-	MAX_CLOSURE_TYPE			= 3,
+	MAX_CLOSURE_TYPE			= 1,
 };
 
 enum closure_state {
 	/*
-	 * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
-	 * waiting asynchronously
-	 *
 	 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
 	 * the thread that owns the closure, and cleared by the thread that's
 	 * waking up the closure.
@@ -200,10 +180,6 @@
 	 * - indicates that cl->task is valid and closure_put() may wake it up.
 	 * Only set or cleared by the thread that owns the closure.
 	 *
-	 * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure
-	 * has an outstanding timer. Must be set by the thread that owns the
-	 * closure, and cleared by the timer function when the timer goes off.
-	 *
 	 * The rest are for debugging and don't affect behaviour:
 	 *
 	 * CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -218,19 +194,17 @@
 	 * closure with this flag set
 	 */
 
-	CLOSURE_BITS_START	= (1 << 19),
-	CLOSURE_DESTRUCTOR	= (1 << 19),
-	CLOSURE_BLOCKING	= (1 << 21),
-	CLOSURE_WAITING		= (1 << 23),
-	CLOSURE_SLEEPING	= (1 << 25),
-	CLOSURE_TIMER		= (1 << 27),
+	CLOSURE_BITS_START	= (1 << 23),
+	CLOSURE_DESTRUCTOR	= (1 << 23),
+	CLOSURE_WAITING		= (1 << 25),
+	CLOSURE_SLEEPING	= (1 << 27),
 	CLOSURE_RUNNING		= (1 << 29),
 	CLOSURE_STACK		= (1 << 31),
 };
 
 #define CLOSURE_GUARD_MASK					\
-	((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING|	\
-	  CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+	((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING|	\
+	  CLOSURE_RUNNING|CLOSURE_STACK) << 1)
 
 #define CLOSURE_REMAINING_MASK		(CLOSURE_BITS_START - 1)
 #define CLOSURE_REMAINING_INITIALIZER	(1|CLOSURE_RUNNING)
@@ -268,17 +242,6 @@
 	struct closure_waitlist	wait;
 };
 
-struct closure_with_timer {
-	struct closure		cl;
-	struct timer_list	timer;
-};
-
-struct closure_with_waitlist_and_timer {
-	struct closure		cl;
-	struct closure_waitlist	wait;
-	struct timer_list	timer;
-};
-
 extern unsigned invalid_closure_type(void);
 
 #define __CLOSURE_TYPE(cl, _t)						\
@@ -289,14 +252,11 @@
 (									\
 	__CLOSURE_TYPE(cl, closure)					\
 	__CLOSURE_TYPE(cl, closure_with_waitlist)			\
-	__CLOSURE_TYPE(cl, closure_with_timer)				\
-	__CLOSURE_TYPE(cl, closure_with_waitlist_and_timer)		\
 	invalid_closure_type()						\
 )
 
 void closure_sub(struct closure *cl, int v);
 void closure_put(struct closure *cl);
-void closure_queue(struct closure *cl);
 void __closure_wake_up(struct closure_waitlist *list);
 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
 void closure_sync(struct closure *cl);
@@ -305,12 +265,6 @@
 void __closure_lock(struct closure *cl, struct closure *parent,
 		    struct closure_waitlist *wait_list);
 
-void do_closure_timer_init(struct closure *cl);
-bool __closure_delay(struct closure *cl, unsigned long delay,
-		     struct timer_list *timer);
-void __closure_flush(struct closure *cl, struct timer_list *timer);
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer);
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 
 void closure_debug_init(void);
@@ -354,11 +308,6 @@
 	atomic_sub(CLOSURE_RUNNING, &cl->remaining);
 }
 
-static inline bool closure_is_stopped(struct closure *cl)
-{
-	return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
-}
-
 static inline bool closure_is_unlocked(struct closure *cl)
 {
 	return atomic_read(&cl->remaining) == -1;
@@ -367,14 +316,6 @@
 static inline void do_closure_init(struct closure *cl, struct closure *parent,
 				   bool running)
 {
-	switch (cl->type) {
-	case TYPE_closure_with_timer:
-	case TYPE_closure_with_waitlist_and_timer:
-		do_closure_timer_init(cl);
-	default:
-		break;
-	}
-
 	cl->parent = parent;
 	if (parent)
 		closure_get(parent);
@@ -429,8 +370,7 @@
 static inline void closure_init_stack(struct closure *cl)
 {
 	memset(cl, 0, sizeof(struct closure));
-	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|
-		   CLOSURE_BLOCKING|CLOSURE_STACK);
+	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
 }
 
 /**
@@ -461,24 +401,6 @@
 #define closure_lock(cl, parent)				\
 	__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
 
-/**
- * closure_delay() - delay some number of jiffies
- * @cl:		the closure that will sleep
- * @delay:	the delay in jiffies
- *
- * Takes a refcount on @cl which will be released after @delay jiffies; this may
- * be used to have a function run after a delay with continue_at(), or
- * closure_sync() may be used for a convoluted version of msleep().
- */
-#define closure_delay(cl, delay)			\
-	__closure_delay(__to_internal_closure(cl), delay, &(cl)->timer)
-
-#define closure_flush(cl)				\
-	__closure_flush(__to_internal_closure(cl), &(cl)->timer)
-
-#define closure_flush_sync(cl)				\
-	__closure_flush_sync(__to_internal_closure(cl), &(cl)->timer)
-
 static inline void __closure_end_sleep(struct closure *cl)
 {
 	__set_current_state(TASK_RUNNING);
@@ -498,40 +420,6 @@
 }
 
 /**
- * closure_blocking() - returns true if the closure is in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- */
-static inline bool closure_blocking(struct closure *cl)
-{
-	return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
-}
-
-/**
- * set_closure_blocking() - put a closure in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- *
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void set_closure_blocking(struct closure *cl)
-{
-	if (!closure_blocking(cl))
-		atomic_add(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/*
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void clear_closure_blocking(struct closure *cl)
-{
-	if (closure_blocking(cl))
-		atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/**
  * closure_wake_up() - wake up all closures on a wait list.
  */
 static inline void closure_wake_up(struct closure_waitlist *list)
@@ -561,63 +449,36 @@
  * refcount on our closure. If this was a stack allocated closure, that would be
  * bad.
  */
-#define __closure_wait_event(list, cl, condition, _block)		\
+#define closure_wait_event(list, cl, condition)				\
 ({									\
-	bool block = _block;						\
 	typeof(condition) ret;						\
 									\
 	while (1) {							\
 		ret = (condition);					\
 		if (ret) {						\
 			__closure_wake_up(list);			\
-			if (block)					\
-				closure_sync(cl);			\
-									\
+			closure_sync(cl);				\
 			break;						\
 		}							\
 									\
-		if (block)						\
-			__closure_start_sleep(cl);			\
+		__closure_start_sleep(cl);				\
 									\
-		if (!closure_wait(list, cl)) {				\
-			if (!block)					\
-				break;					\
-									\
+		if (!closure_wait(list, cl))				\
 			schedule();					\
-		}							\
 	}								\
 									\
 	ret;								\
 })
 
-/**
- * closure_wait_event() - wait on a condition, synchronously or asynchronously.
- * @list:	the wait list to wait on
- * @cl:		the closure that is doing the waiting
- * @condition:	a C expression for the event to wait for
- *
- * If the closure is in blocking mode, sleeps until the @condition evaluates to
- * true - exactly like wait_event().
- *
- * If the closure is not in blocking mode, waits asynchronously; if the
- * condition is currently false the @cl is put onto @list and returns. @list
- * owns a refcount on @cl; closure_sync() or continue_at() may be used later to
- * wait for another thread to wake up @list, which drops the refcount on @cl.
- *
- * Returns the value of @condition; @cl will be on @list iff @condition was
- * false.
- *
- * closure_wake_up(@list) must be called after changing any variable that could
- * cause @condition to become true.
- */
-#define closure_wait_event(list, cl, condition)				\
-	__closure_wait_event(list, cl, condition, closure_blocking(cl))
-
-#define closure_wait_event_async(list, cl, condition)			\
-	__closure_wait_event(list, cl, condition, false)
-
-#define closure_wait_event_sync(list, cl, condition)			\
-	__closure_wait_event(list, cl, condition, true)
+static inline void closure_queue(struct closure *cl)
+{
+	struct workqueue_struct *wq = cl->wq;
+	if (wq) {
+		INIT_WORK(&cl->work, cl->work.func);
+		BUG_ON(!queue_work(wq, &cl->work));
+	} else
+		cl->fn(cl);
+}
 
 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
 				  struct workqueue_struct *wq)
@@ -642,7 +503,7 @@
 #define continue_at_nobarrier(_cl, _fn, _wq)				\
 do {									\
 	set_closure_fn(_cl, _fn, _wq);					\
-	closure_queue(cl);						\
+	closure_queue(_cl);						\
 	return;								\
 } while (0)
 
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 88e6411..264fcfb 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -8,7 +8,6 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
-#include "request.h"
 
 #include <linux/console.h>
 #include <linux/debugfs.h>
@@ -77,29 +76,17 @@
 	return out - buf;
 }
 
-int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
-{
-	return scnprintf(buf, size, "%zu level %i/%i",
-			 PTR_BUCKET_NR(b->c, &b->key, 0),
-			 b->level, b->c->root ? b->c->root->level : -1);
-}
-
-#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
-
-static bool skipped_backwards(struct btree *b, struct bkey *k)
-{
-	return bkey_cmp(k, (!b->level)
-			? &START_KEY(bkey_next(k))
-			: bkey_next(k)) > 0;
-}
+#ifdef CONFIG_BCACHE_DEBUG
 
 static void dump_bset(struct btree *b, struct bset *i)
 {
-	struct bkey *k;
+	struct bkey *k, *next;
 	unsigned j;
 	char buf[80];
 
-	for (k = i->start; k < end(i); k = bkey_next(k)) {
+	for (k = i->start; k < end(i); k = next) {
+		next = bkey_next(k);
+
 		bch_bkey_to_text(buf, sizeof(buf), k);
 		printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
 		       (uint64_t *) k - i->d, i->keys, buf);
@@ -115,15 +102,21 @@
 
 		printk(" %s\n", bch_ptr_status(b->c, k));
 
-		if (bkey_next(k) < end(i) &&
-		    skipped_backwards(b, k))
+		if (next < end(i) &&
+		    bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
 			printk(KERN_ERR "Key skipped backwards\n");
 	}
 }
 
-#endif
+static void bch_dump_bucket(struct btree *b)
+{
+	unsigned i;
 
-#ifdef CONFIG_BCACHE_DEBUG
+	console_lock();
+	for (i = 0; i <= b->nsets; i++)
+		dump_bset(b, b->sets[i].data);
+	console_unlock();
+}
 
 void bch_btree_verify(struct btree *b, struct bset *new)
 {
@@ -176,66 +169,44 @@
 	mutex_unlock(&b->c->verify_lock);
 }
 
-static void data_verify_endio(struct bio *bio, int error)
-{
-	struct closure *cl = bio->bi_private;
-	closure_put(cl);
-}
-
-void bch_data_verify(struct search *s)
+void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 {
 	char name[BDEVNAME_SIZE];
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
-	struct closure *cl = &s->cl;
 	struct bio *check;
 	struct bio_vec *bv;
 	int i;
 
-	if (!s->unaligned_bvec)
-		bio_for_each_segment(bv, s->orig_bio, i)
-			bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
-
-	check = bio_clone(s->orig_bio, GFP_NOIO);
+	check = bio_clone(bio, GFP_NOIO);
 	if (!check)
 		return;
 
 	if (bio_alloc_pages(check, GFP_NOIO))
 		goto out_put;
 
-	check->bi_rw		= READ_SYNC;
-	check->bi_private	= cl;
-	check->bi_end_io	= data_verify_endio;
+	submit_bio_wait(READ_SYNC, check);
 
-	closure_bio_submit(check, cl, &dc->disk);
-	closure_sync(cl);
+	bio_for_each_segment(bv, bio, i) {
+		void *p1 = kmap_atomic(bv->bv_page);
+		void *p2 = page_address(check->bi_io_vec[i].bv_page);
 
-	bio_for_each_segment(bv, s->orig_bio, i) {
-		void *p1 = kmap(bv->bv_page);
-		void *p2 = kmap(check->bi_io_vec[i].bv_page);
+		cache_set_err_on(memcmp(p1 + bv->bv_offset,
+					p2 + bv->bv_offset,
+					bv->bv_len),
+				 dc->disk.c,
+				 "verify failed at dev %s sector %llu",
+				 bdevname(dc->bdev, name),
+				 (uint64_t) bio->bi_sector);
 
-		if (memcmp(p1 + bv->bv_offset,
-			   p2 + bv->bv_offset,
-			   bv->bv_len))
-			printk(KERN_ERR
-			       "bcache (%s): verify failed at sector %llu\n",
-			       bdevname(dc->bdev, name),
-			       (uint64_t) s->orig_bio->bi_sector);
-
-		kunmap(bv->bv_page);
-		kunmap(check->bi_io_vec[i].bv_page);
+		kunmap_atomic(p1);
 	}
 
-	__bio_for_each_segment(bv, check, i, 0)
+	bio_for_each_segment_all(bv, check, i)
 		__free_page(bv->bv_page);
 out_put:
 	bio_put(check);
 }
 
-#endif
-
-#ifdef CONFIG_BCACHE_EDEBUG
-
-unsigned bch_count_data(struct btree *b)
+int __bch_count_data(struct btree *b)
 {
 	unsigned ret = 0;
 	struct btree_iter iter;
@@ -247,72 +218,60 @@
 	return ret;
 }
 
-static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
-				   va_list args)
-{
-	unsigned i;
-	char buf[80];
-
-	console_lock();
-
-	for (i = 0; i <= b->nsets; i++)
-		dump_bset(b, b->sets[i].data);
-
-	vprintk(fmt, args);
-
-	console_unlock();
-
-	bch_btree_to_text(buf, sizeof(buf), b);
-	panic("at %s\n", buf);
-}
-
-void bch_check_key_order_msg(struct btree *b, struct bset *i,
-			     const char *fmt, ...)
-{
-	struct bkey *k;
-
-	if (!i->keys)
-		return;
-
-	for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
-		if (skipped_backwards(b, k)) {
-			va_list args;
-			va_start(args, fmt);
-
-			vdump_bucket_and_panic(b, fmt, args);
-			va_end(args);
-		}
-}
-
-void bch_check_keys(struct btree *b, const char *fmt, ...)
+void __bch_check_keys(struct btree *b, const char *fmt, ...)
 {
 	va_list args;
 	struct bkey *k, *p = NULL;
 	struct btree_iter iter;
-
-	if (b->level)
-		return;
+	const char *err;
 
 	for_each_key(b, k, &iter) {
-		if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
-			printk(KERN_ERR "Keys out of order:\n");
-			goto bug;
-		}
+		if (!b->level) {
+			err = "Keys out of order";
+			if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+				goto bug;
 
-		if (bch_ptr_invalid(b, k))
-			continue;
+			if (bch_ptr_invalid(b, k))
+				continue;
 
-		if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
-			printk(KERN_ERR "Overlapping keys:\n");
-			goto bug;
+			err =  "Overlapping keys";
+			if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+				goto bug;
+		} else {
+			if (bch_ptr_bad(b, k))
+				continue;
+
+			err = "Duplicate keys";
+			if (p && !bkey_cmp(p, k))
+				goto bug;
 		}
 		p = k;
 	}
+
+	err = "Key larger than btree node key";
+	if (p && bkey_cmp(p, &b->key) > 0)
+		goto bug;
+
 	return;
 bug:
+	bch_dump_bucket(b);
+
 	va_start(args, fmt);
-	vdump_bucket_and_panic(b, fmt, args);
+	vprintk(fmt, args);
 	va_end(args);
+
+	panic("bcache error: %s:\n", err);
+}
+
+void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+	struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+	if (next < iter->data->end &&
+	    bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
+		bch_dump_bucket(iter->b);
+		panic("Key skipped backwards\n");
+	}
 }
 
 #endif
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 1c39b5a..2ede60e 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -4,40 +4,44 @@
 /* Btree/bkey debug printing */
 
 int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
-int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
-
-#ifdef CONFIG_BCACHE_EDEBUG
-
-unsigned bch_count_data(struct btree *);
-void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...);
-void bch_check_keys(struct btree *, const char *, ...);
-
-#define bch_check_key_order(b, i)			\
-	bch_check_key_order_msg(b, i, "keys out of order")
-#define EBUG_ON(cond)		BUG_ON(cond)
-
-#else /* EDEBUG */
-
-#define bch_count_data(b)				0
-#define bch_check_key_order(b, i)			do {} while (0)
-#define bch_check_key_order_msg(b, i, ...)		do {} while (0)
-#define bch_check_keys(b, ...)				do {} while (0)
-#define EBUG_ON(cond)					do {} while (0)
-
-#endif
 
 #ifdef CONFIG_BCACHE_DEBUG
 
 void bch_btree_verify(struct btree *, struct bset *);
-void bch_data_verify(struct search *);
+void bch_data_verify(struct cached_dev *, struct bio *);
+int __bch_count_data(struct btree *);
+void __bch_check_keys(struct btree *, const char *, ...);
+void bch_btree_iter_next_check(struct btree_iter *);
+
+#define EBUG_ON(cond)			BUG_ON(cond)
+#define expensive_debug_checks(c)	((c)->expensive_debug_checks)
+#define key_merging_disabled(c)		((c)->key_merging_disabled)
+#define bypass_torture_test(d)		((d)->bypass_torture_test)
 
 #else /* DEBUG */
 
 static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
-static inline void bch_data_verify(struct search *s) {};
+static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
+static inline int __bch_count_data(struct btree *b) { return -1; }
+static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#define EBUG_ON(cond)			do { if (cond); } while (0)
+#define expensive_debug_checks(c)	0
+#define key_merging_disabled(c)		0
+#define bypass_torture_test(d)		0
 
 #endif
 
+#define bch_count_data(b)						\
+	(expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
+
+#define bch_check_keys(b, ...)						\
+do {									\
+	if (expensive_debug_checks((b)->c))				\
+		__bch_check_keys(b, __VA_ARGS__);			\
+} while (0)
+
 #ifdef CONFIG_DEBUG_FS
 void bch_debug_init_cache_set(struct cache_set *);
 #else
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8435f81..ecdaa67 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -7,7 +7,6 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
-#include "request.h"
 
 #include <trace/events/bcache.h>
 
@@ -31,17 +30,20 @@
 }
 
 static int journal_read_bucket(struct cache *ca, struct list_head *list,
-			       struct btree_op *op, unsigned bucket_index)
+			       unsigned bucket_index)
 {
 	struct journal_device *ja = &ca->journal;
 	struct bio *bio = &ja->bio;
 
 	struct journal_replay *i;
 	struct jset *j, *data = ca->set->journal.w[0].data;
+	struct closure cl;
 	unsigned len, left, offset = 0;
 	int ret = 0;
 	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
 
+	closure_init_stack(&cl);
+
 	pr_debug("reading %llu", (uint64_t) bucket);
 
 	while (offset < ca->sb.bucket_size) {
@@ -55,11 +57,11 @@
 		bio->bi_size	= len << 9;
 
 		bio->bi_end_io	= journal_read_endio;
-		bio->bi_private = &op->cl;
+		bio->bi_private = &cl;
 		bch_bio_map(bio, data);
 
-		closure_bio_submit(bio, &op->cl, ca);
-		closure_sync(&op->cl);
+		closure_bio_submit(bio, &cl, ca);
+		closure_sync(&cl);
 
 		/* This function could be simpler now since we no longer write
 		 * journal entries that overlap bucket boundaries; this means
@@ -72,7 +74,7 @@
 			struct list_head *where;
 			size_t blocks, bytes = set_bytes(j);
 
-			if (j->magic != jset_magic(ca->set))
+			if (j->magic != jset_magic(&ca->sb))
 				return ret;
 
 			if (bytes > left << 9)
@@ -129,12 +131,11 @@
 	return ret;
 }
 
-int bch_journal_read(struct cache_set *c, struct list_head *list,
-			struct btree_op *op)
+int bch_journal_read(struct cache_set *c, struct list_head *list)
 {
 #define read_bucket(b)							\
 	({								\
-		int ret = journal_read_bucket(ca, list, op, b);		\
+		int ret = journal_read_bucket(ca, list, b);		\
 		__set_bit(b, bitmap);					\
 		if (ret < 0)						\
 			return ret;					\
@@ -292,8 +293,7 @@
 	}
 }
 
-int bch_journal_replay(struct cache_set *s, struct list_head *list,
-			  struct btree_op *op)
+int bch_journal_replay(struct cache_set *s, struct list_head *list)
 {
 	int ret = 0, keys = 0, entries = 0;
 	struct bkey *k;
@@ -301,31 +301,30 @@
 		list_entry(list->prev, struct journal_replay, list);
 
 	uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
+	struct keylist keylist;
+
+	bch_keylist_init(&keylist);
 
 	list_for_each_entry(i, list, list) {
 		BUG_ON(i->pin && atomic_read(i->pin) != 1);
 
-		if (n != i->j.seq)
-			pr_err(
-		"journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
-		n, i->j.seq - 1, start, end);
+		cache_set_err_on(n != i->j.seq, s,
+"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+				 n, i->j.seq - 1, start, end);
 
 		for (k = i->j.start;
 		     k < end(&i->j);
 		     k = bkey_next(k)) {
 			trace_bcache_journal_replay_key(k);
 
-			bkey_copy(op->keys.top, k);
-			bch_keylist_push(&op->keys);
+			bkey_copy(keylist.top, k);
+			bch_keylist_push(&keylist);
 
-			op->journal = i->pin;
-			atomic_inc(op->journal);
-
-			ret = bch_btree_insert(op, s);
+			ret = bch_btree_insert(s, &keylist, i->pin, NULL);
 			if (ret)
 				goto err;
 
-			BUG_ON(!bch_keylist_empty(&op->keys));
+			BUG_ON(!bch_keylist_empty(&keylist));
 			keys++;
 
 			cond_resched();
@@ -339,14 +338,13 @@
 
 	pr_info("journal replay done, %i keys in %i entries, seq %llu",
 		keys, entries, end);
-
+err:
 	while (!list_empty(list)) {
 		i = list_first_entry(list, struct journal_replay, list);
 		list_del(&i->list);
 		kfree(i);
 	}
-err:
-	closure_sync(&op->cl);
+
 	return ret;
 }
 
@@ -358,48 +356,35 @@
 	 * Try to find the btree node with that references the oldest journal
 	 * entry, best is our current candidate and is locked if non NULL:
 	 */
-	struct btree *b, *best = NULL;
-	unsigned iter;
+	struct btree *b, *best;
+	unsigned i;
+retry:
+	best = NULL;
 
-	for_each_cached_btree(b, c, iter) {
-		if (!down_write_trylock(&b->lock))
-			continue;
-
-		if (!btree_node_dirty(b) ||
-		    !btree_current_write(b)->journal) {
-			rw_unlock(true, b);
-			continue;
+	for_each_cached_btree(b, c, i)
+		if (btree_current_write(b)->journal) {
+			if (!best)
+				best = b;
+			else if (journal_pin_cmp(c,
+					btree_current_write(best)->journal,
+					btree_current_write(b)->journal)) {
+				best = b;
+			}
 		}
 
-		if (!best)
-			best = b;
-		else if (journal_pin_cmp(c,
-					 btree_current_write(best),
-					 btree_current_write(b))) {
-			rw_unlock(true, best);
-			best = b;
-		} else
+	b = best;
+	if (b) {
+		rw_lock(true, b, b->level);
+
+		if (!btree_current_write(b)->journal) {
 			rw_unlock(true, b);
+			/* We raced */
+			goto retry;
+		}
+
+		bch_btree_node_write(b, NULL);
+		rw_unlock(true, b);
 	}
-
-	if (best)
-		goto out;
-
-	/* We can't find the best btree node, just pick the first */
-	list_for_each_entry(b, &c->btree_cache, list)
-		if (!b->level && btree_node_dirty(b)) {
-			best = b;
-			rw_lock(true, best, best->level);
-			goto found;
-		}
-
-out:
-	if (!best)
-		return;
-found:
-	if (btree_node_dirty(best))
-		bch_btree_node_write(best, NULL);
-	rw_unlock(true, best);
 }
 
 #define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1)
@@ -495,7 +480,7 @@
 		do_journal_discard(ca);
 
 	if (c->journal.blocks_free)
-		return;
+		goto out;
 
 	/*
 	 * Allocate:
@@ -521,7 +506,7 @@
 
 	if (n)
 		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-
+out:
 	if (!journal_full(&c->journal))
 		__closure_wake_up(&c->journal.wait);
 }
@@ -554,32 +539,26 @@
 	struct journal_write *w = bio->bi_private;
 
 	cache_set_err_on(error, w->c, "journal io error");
-	closure_put(&w->c->journal.io.cl);
+	closure_put(&w->c->journal.io);
 }
 
 static void journal_write(struct closure *);
 
 static void journal_write_done(struct closure *cl)
 {
-	struct journal *j = container_of(cl, struct journal, io.cl);
-	struct cache_set *c = container_of(j, struct cache_set, journal);
-
+	struct journal *j = container_of(cl, struct journal, io);
 	struct journal_write *w = (j->cur == j->w)
 		? &j->w[1]
 		: &j->w[0];
 
 	__closure_wake_up(&w->wait);
-
-	if (c->journal_delay_ms)
-		closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
-
-	continue_at(cl, journal_write, system_wq);
+	continue_at_nobarrier(cl, journal_write, system_wq);
 }
 
 static void journal_write_unlocked(struct closure *cl)
 	__releases(c->journal.lock)
 {
-	struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
 	struct cache *ca;
 	struct journal_write *w = c->journal.cur;
 	struct bkey *k = &c->journal.key;
@@ -617,7 +596,7 @@
 	for_each_cache(ca, c, i)
 		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
 
-	w->data->magic		= jset_magic(c);
+	w->data->magic		= jset_magic(&c->sb);
 	w->data->version	= BCACHE_JSET_VERSION;
 	w->data->last_seq	= last_seq(&c->journal);
 	w->data->csum		= csum_set(w->data);
@@ -660,121 +639,134 @@
 
 static void journal_write(struct closure *cl)
 {
-	struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
 
 	spin_lock(&c->journal.lock);
 	journal_write_unlocked(cl);
 }
 
-static void __journal_try_write(struct cache_set *c, bool noflush)
+static void journal_try_write(struct cache_set *c)
 	__releases(c->journal.lock)
 {
-	struct closure *cl = &c->journal.io.cl;
+	struct closure *cl = &c->journal.io;
+	struct journal_write *w = c->journal.cur;
 
-	if (!closure_trylock(cl, &c->cl))
-		spin_unlock(&c->journal.lock);
-	else if (noflush && journal_full(&c->journal)) {
-		spin_unlock(&c->journal.lock);
-		continue_at(cl, journal_write, system_wq);
-	} else
+	w->need_write = true;
+
+	if (closure_trylock(cl, &c->cl))
 		journal_write_unlocked(cl);
+	else
+		spin_unlock(&c->journal.lock);
 }
 
-#define journal_try_write(c)	__journal_try_write(c, false)
-
-void bch_journal_meta(struct cache_set *c, struct closure *cl)
+static struct journal_write *journal_wait_for_write(struct cache_set *c,
+						    unsigned nkeys)
 {
-	struct journal_write *w;
+	size_t sectors;
+	struct closure cl;
 
-	if (CACHE_SYNC(&c->sb)) {
+	closure_init_stack(&cl);
+
+	spin_lock(&c->journal.lock);
+
+	while (1) {
+		struct journal_write *w = c->journal.cur;
+
+		sectors = __set_blocks(w->data, w->data->keys + nkeys,
+				       c) * c->sb.block_size;
+
+		if (sectors <= min_t(size_t,
+				     c->journal.blocks_free * c->sb.block_size,
+				     PAGE_SECTORS << JSET_BITS))
+			return w;
+
+		/* XXX: tracepoint */
+		if (!journal_full(&c->journal)) {
+			trace_bcache_journal_entry_full(c);
+
+			/*
+			 * XXX: If we were inserting so many keys that they
+			 * won't fit in an _empty_ journal write, we'll
+			 * deadlock. For now, handle this in
+			 * bch_keylist_realloc() - but something to think about.
+			 */
+			BUG_ON(!w->data->keys);
+
+			closure_wait(&w->wait, &cl);
+			journal_try_write(c); /* unlocks */
+		} else {
+			trace_bcache_journal_full(c);
+
+			closure_wait(&c->journal.wait, &cl);
+			journal_reclaim(c);
+			spin_unlock(&c->journal.lock);
+
+			btree_flush_write(c);
+		}
+
+		closure_sync(&cl);
 		spin_lock(&c->journal.lock);
-
-		w = c->journal.cur;
-		w->need_write = true;
-
-		if (cl)
-			BUG_ON(!closure_wait(&w->wait, cl));
-
-		closure_flush(&c->journal.io);
-		__journal_try_write(c, true);
 	}
 }
 
+static void journal_write_work(struct work_struct *work)
+{
+	struct cache_set *c = container_of(to_delayed_work(work),
+					   struct cache_set,
+					   journal.work);
+	spin_lock(&c->journal.lock);
+	journal_try_write(c);
+}
+
 /*
  * Entry point to the journalling code - bio_insert() and btree_invalidate()
  * pass bch_journal() a list of keys to be journalled, and then
  * bch_journal() hands those same keys off to btree_insert_async()
  */
 
-void bch_journal(struct closure *cl)
+atomic_t *bch_journal(struct cache_set *c,
+		      struct keylist *keys,
+		      struct closure *parent)
 {
-	struct btree_op *op = container_of(cl, struct btree_op, cl);
-	struct cache_set *c = op->c;
 	struct journal_write *w;
-	size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
+	atomic_t *ret;
 
-	if (op->type != BTREE_INSERT ||
-	    !CACHE_SYNC(&c->sb))
-		goto out;
+	if (!CACHE_SYNC(&c->sb))
+		return NULL;
 
-	/*
-	 * If we're looping because we errored, might already be waiting on
-	 * another journal write:
-	 */
-	while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
-		closure_sync(cl->parent);
+	w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
 
-	spin_lock(&c->journal.lock);
+	memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+	w->data->keys += bch_keylist_nkeys(keys);
 
-	if (journal_full(&c->journal)) {
-		trace_bcache_journal_full(c);
+	ret = &fifo_back(&c->journal.pin);
+	atomic_inc(ret);
 
-		closure_wait(&c->journal.wait, cl);
-
-		journal_reclaim(c);
-		spin_unlock(&c->journal.lock);
-
-		btree_flush_write(c);
-		continue_at(cl, bch_journal, bcache_wq);
-	}
-
-	w = c->journal.cur;
-	w->need_write = true;
-	b = __set_blocks(w->data, w->data->keys + n, c);
-
-	if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
-	    b > c->journal.blocks_free) {
-		trace_bcache_journal_entry_full(c);
-
-		/*
-		 * XXX: If we were inserting so many keys that they won't fit in
-		 * an _empty_ journal write, we'll deadlock. For now, handle
-		 * this in bch_keylist_realloc() - but something to think about.
-		 */
-		BUG_ON(!w->data->keys);
-
-		BUG_ON(!closure_wait(&w->wait, cl));
-
-		closure_flush(&c->journal.io);
-
+	if (parent) {
+		closure_wait(&w->wait, parent);
 		journal_try_write(c);
-		continue_at(cl, bch_journal, bcache_wq);
+	} else if (!w->need_write) {
+		schedule_delayed_work(&c->journal.work,
+				      msecs_to_jiffies(c->journal_delay_ms));
+		spin_unlock(&c->journal.lock);
+	} else {
+		spin_unlock(&c->journal.lock);
 	}
 
-	memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
-	w->data->keys += n;
 
-	op->journal = &fifo_back(&c->journal.pin);
-	atomic_inc(op->journal);
+	return ret;
+}
 
-	if (op->flush_journal) {
-		closure_flush(&c->journal.io);
-		closure_wait(&w->wait, cl->parent);
-	}
+void bch_journal_meta(struct cache_set *c, struct closure *cl)
+{
+	struct keylist keys;
+	atomic_t *ref;
 
-	journal_try_write(c);
-out:
-	bch_btree_insert_async(cl);
+	bch_keylist_init(&keys);
+
+	ref = bch_journal(c, &keys, cl);
+	if (ref)
+		atomic_dec_bug(ref);
 }
 
 void bch_journal_free(struct cache_set *c)
@@ -790,6 +782,7 @@
 
 	closure_init_unlocked(&j->io);
 	spin_lock_init(&j->lock);
+	INIT_DELAYED_WORK(&j->work, journal_write_work);
 
 	c->journal_delay_ms = 100;
 
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index 3d78512..a6472fd 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -75,43 +75,6 @@
  * nodes that are pinning the oldest journal entries first.
  */
 
-#define BCACHE_JSET_VERSION_UUIDv1	1
-/* Always latest UUID format */
-#define BCACHE_JSET_VERSION_UUID	1
-#define BCACHE_JSET_VERSION		1
-
-/*
- * On disk format for a journal entry:
- * seq is monotonically increasing; every journal entry has its own unique
- * sequence number.
- *
- * last_seq is the oldest journal entry that still has keys the btree hasn't
- * flushed to disk yet.
- *
- * version is for on disk format changes.
- */
-struct jset {
-	uint64_t		csum;
-	uint64_t		magic;
-	uint64_t		seq;
-	uint32_t		version;
-	uint32_t		keys;
-
-	uint64_t		last_seq;
-
-	BKEY_PADDED(uuid_bucket);
-	BKEY_PADDED(btree_root);
-	uint16_t		btree_level;
-	uint16_t		pad[3];
-
-	uint64_t		prio_bucket[MAX_CACHES_PER_SET];
-
-	union {
-		struct bkey	start[0];
-		uint64_t	d[0];
-	};
-};
-
 /*
  * Only used for holding the journal entries we read in btree_journal_read()
  * during cache_registration
@@ -140,7 +103,8 @@
 	spinlock_t		lock;
 	/* used when waiting because the journal was full */
 	struct closure_waitlist	wait;
-	struct closure_with_timer io;
+	struct closure		io;
+	struct delayed_work	work;
 
 	/* Number of blocks free in the bucket(s) we're currently writing to */
 	unsigned		blocks_free;
@@ -188,8 +152,7 @@
 };
 
 #define journal_pin_cmp(c, l, r)				\
-	(fifo_idx(&(c)->journal.pin, (l)->journal) >		\
-	 fifo_idx(&(c)->journal.pin, (r)->journal))
+	(fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
 
 #define JOURNAL_PIN	20000
 
@@ -199,15 +162,14 @@
 struct closure;
 struct cache_set;
 struct btree_op;
+struct keylist;
 
-void bch_journal(struct closure *);
+atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
 void bch_journal_next(struct journal *);
 void bch_journal_mark(struct cache_set *, struct list_head *);
 void bch_journal_meta(struct cache_set *, struct closure *);
-int bch_journal_read(struct cache_set *, struct list_head *,
-			struct btree_op *);
-int bch_journal_replay(struct cache_set *, struct list_head *,
-			  struct btree_op *);
+int bch_journal_read(struct cache_set *, struct list_head *);
+int bch_journal_replay(struct cache_set *, struct list_head *);
 
 void bch_journal_free(struct cache_set *);
 int bch_journal_alloc(struct cache_set *);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 1a3b4f4..7c1275e 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -12,8 +12,9 @@
 #include <trace/events/bcache.h>
 
 struct moving_io {
+	struct closure		cl;
 	struct keybuf_key	*w;
-	struct search		s;
+	struct data_insert_op	op;
 	struct bbio		bio;
 };
 
@@ -38,13 +39,13 @@
 
 static void moving_io_destructor(struct closure *cl)
 {
-	struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+	struct moving_io *io = container_of(cl, struct moving_io, cl);
 	kfree(io);
 }
 
 static void write_moving_finish(struct closure *cl)
 {
-	struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+	struct moving_io *io = container_of(cl, struct moving_io, cl);
 	struct bio *bio = &io->bio.bio;
 	struct bio_vec *bv;
 	int i;
@@ -52,13 +53,12 @@
 	bio_for_each_segment_all(bv, bio, i)
 		__free_page(bv->bv_page);
 
-	if (io->s.op.insert_collision)
+	if (io->op.replace_collision)
 		trace_bcache_gc_copy_collision(&io->w->key);
 
-	bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
+	bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
 
-	atomic_dec_bug(&io->s.op.c->in_flight);
-	closure_wake_up(&io->s.op.c->moving_gc_wait);
+	up(&io->op.c->moving_in_flight);
 
 	closure_return_with_destructor(cl, moving_io_destructor);
 }
@@ -66,12 +66,12 @@
 static void read_moving_endio(struct bio *bio, int error)
 {
 	struct moving_io *io = container_of(bio->bi_private,
-					    struct moving_io, s.cl);
+					    struct moving_io, cl);
 
 	if (error)
-		io->s.error = error;
+		io->op.error = error;
 
-	bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
+	bch_bbio_endio(io->op.c, bio, error, "reading data to move");
 }
 
 static void moving_init(struct moving_io *io)
@@ -85,54 +85,53 @@
 	bio->bi_size		= KEY_SIZE(&io->w->key) << 9;
 	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&io->w->key),
 					       PAGE_SECTORS);
-	bio->bi_private		= &io->s.cl;
+	bio->bi_private		= &io->cl;
 	bio->bi_io_vec		= bio->bi_inline_vecs;
 	bch_bio_map(bio, NULL);
 }
 
 static void write_moving(struct closure *cl)
 {
-	struct search *s = container_of(cl, struct search, cl);
-	struct moving_io *io = container_of(s, struct moving_io, s);
+	struct moving_io *io = container_of(cl, struct moving_io, cl);
+	struct data_insert_op *op = &io->op;
 
-	if (!s->error) {
+	if (!op->error) {
 		moving_init(io);
 
-		io->bio.bio.bi_sector	= KEY_START(&io->w->key);
-		s->op.lock		= -1;
-		s->op.write_prio	= 1;
-		s->op.cache_bio		= &io->bio.bio;
+		io->bio.bio.bi_sector = KEY_START(&io->w->key);
+		op->write_prio		= 1;
+		op->bio			= &io->bio.bio;
 
-		s->writeback		= KEY_DIRTY(&io->w->key);
-		s->op.csum		= KEY_CSUM(&io->w->key);
+		op->writeback		= KEY_DIRTY(&io->w->key);
+		op->csum		= KEY_CSUM(&io->w->key);
 
-		s->op.type = BTREE_REPLACE;
-		bkey_copy(&s->op.replace, &io->w->key);
+		bkey_copy(&op->replace_key, &io->w->key);
+		op->replace		= true;
 
-		closure_init(&s->op.cl, cl);
-		bch_insert_data(&s->op.cl);
+		closure_call(&op->cl, bch_data_insert, NULL, cl);
 	}
 
-	continue_at(cl, write_moving_finish, NULL);
+	continue_at(cl, write_moving_finish, system_wq);
 }
 
 static void read_moving_submit(struct closure *cl)
 {
-	struct search *s = container_of(cl, struct search, cl);
-	struct moving_io *io = container_of(s, struct moving_io, s);
+	struct moving_io *io = container_of(cl, struct moving_io, cl);
 	struct bio *bio = &io->bio.bio;
 
-	bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
+	bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
 
-	continue_at(cl, write_moving, bch_gc_wq);
+	continue_at(cl, write_moving, system_wq);
 }
 
-static void read_moving(struct closure *cl)
+static void read_moving(struct cache_set *c)
 {
-	struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
 	struct keybuf_key *w;
 	struct moving_io *io;
 	struct bio *bio;
+	struct closure cl;
+
+	closure_init_stack(&cl);
 
 	/* XXX: if we error, background writeback could stall indefinitely */
 
@@ -150,8 +149,8 @@
 
 		w->private	= io;
 		io->w		= w;
-		io->s.op.inode	= KEY_INODE(&w->key);
-		io->s.op.c	= c;
+		io->op.inode	= KEY_INODE(&w->key);
+		io->op.c	= c;
 
 		moving_init(io);
 		bio = &io->bio.bio;
@@ -164,13 +163,8 @@
 
 		trace_bcache_gc_copy(&w->key);
 
-		closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
-
-		if (atomic_inc_return(&c->in_flight) >= 64) {
-			closure_wait_event(&c->moving_gc_wait, cl,
-					   atomic_read(&c->in_flight) < 64);
-			continue_at(cl, read_moving, bch_gc_wq);
-		}
+		down(&c->moving_in_flight);
+		closure_call(&io->cl, read_moving_submit, NULL, &cl);
 	}
 
 	if (0) {
@@ -180,7 +174,7 @@
 		bch_keybuf_del(&c->moving_gc_keys, w);
 	}
 
-	closure_return(cl);
+	closure_sync(&cl);
 }
 
 static bool bucket_cmp(struct bucket *l, struct bucket *r)
@@ -193,15 +187,14 @@
 	return GC_SECTORS_USED(heap_peek(&ca->heap));
 }
 
-void bch_moving_gc(struct closure *cl)
+void bch_moving_gc(struct cache_set *c)
 {
-	struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
 	struct cache *ca;
 	struct bucket *b;
 	unsigned i;
 
 	if (!c->copy_gc_enabled)
-		closure_return(cl);
+		return;
 
 	mutex_lock(&c->bucket_lock);
 
@@ -242,13 +235,11 @@
 
 	c->moving_gc_keys.last_scanned = ZERO_KEY;
 
-	closure_init(&c->moving_gc, cl);
-	read_moving(&c->moving_gc);
-
-	closure_return(cl);
+	read_moving(c);
 }
 
 void bch_moving_init_cache_set(struct cache_set *c)
 {
 	bch_keybuf_init(&c->moving_gc_keys);
+	sema_init(&c->moving_in_flight, 64);
 }
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 2a7f0dd..fbcc851 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -25,7 +25,7 @@
 
 struct kmem_cache *bch_search_cache;
 
-static void check_should_skip(struct cached_dev *, struct search *);
+static void bch_data_insert_start(struct closure *);
 
 /* Cgroup interface */
 
@@ -213,221 +213,79 @@
 
 /* Insert data into cache */
 
-static void bio_invalidate(struct closure *cl)
+static void bch_data_insert_keys(struct closure *cl)
 {
-	struct btree_op *op = container_of(cl, struct btree_op, cl);
-	struct bio *bio = op->cache_bio;
+	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+	atomic_t *journal_ref = NULL;
+	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
+	int ret;
+
+	/*
+	 * If we're looping, might already be waiting on
+	 * another journal write - can't wait on more than one journal write at
+	 * a time
+	 *
+	 * XXX: this looks wrong
+	 */
+#if 0
+	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
+		closure_sync(&s->cl);
+#endif
+
+	if (!op->replace)
+		journal_ref = bch_journal(op->c, &op->insert_keys,
+					  op->flush_journal ? cl : NULL);
+
+	ret = bch_btree_insert(op->c, &op->insert_keys,
+			       journal_ref, replace_key);
+	if (ret == -ESRCH) {
+		op->replace_collision = true;
+	} else if (ret) {
+		op->error		= -ENOMEM;
+		op->insert_data_done	= true;
+	}
+
+	if (journal_ref)
+		atomic_dec_bug(journal_ref);
+
+	if (!op->insert_data_done)
+		continue_at(cl, bch_data_insert_start, bcache_wq);
+
+	bch_keylist_free(&op->insert_keys);
+	closure_return(cl);
+}
+
+static void bch_data_invalidate(struct closure *cl)
+{
+	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+	struct bio *bio = op->bio;
 
 	pr_debug("invalidating %i sectors from %llu",
 		 bio_sectors(bio), (uint64_t) bio->bi_sector);
 
 	while (bio_sectors(bio)) {
-		unsigned len = min(bio_sectors(bio), 1U << 14);
+		unsigned sectors = min(bio_sectors(bio),
+				       1U << (KEY_SIZE_BITS - 1));
 
-		if (bch_keylist_realloc(&op->keys, 0, op->c))
+		if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
 			goto out;
 
-		bio->bi_sector	+= len;
-		bio->bi_size	-= len << 9;
+		bio->bi_sector	+= sectors;
+		bio->bi_size	-= sectors << 9;
 
-		bch_keylist_add(&op->keys,
-				&KEY(op->inode, bio->bi_sector, len));
+		bch_keylist_add(&op->insert_keys,
+				&KEY(op->inode, bio->bi_sector, sectors));
 	}
 
 	op->insert_data_done = true;
 	bio_put(bio);
 out:
-	continue_at(cl, bch_journal, bcache_wq);
+	continue_at(cl, bch_data_insert_keys, bcache_wq);
 }
 
-struct open_bucket {
-	struct list_head	list;
-	struct task_struct	*last;
-	unsigned		sectors_free;
-	BKEY_PADDED(key);
-};
-
-void bch_open_buckets_free(struct cache_set *c)
+static void bch_data_insert_error(struct closure *cl)
 {
-	struct open_bucket *b;
-
-	while (!list_empty(&c->data_buckets)) {
-		b = list_first_entry(&c->data_buckets,
-				     struct open_bucket, list);
-		list_del(&b->list);
-		kfree(b);
-	}
-}
-
-int bch_open_buckets_alloc(struct cache_set *c)
-{
-	int i;
-
-	spin_lock_init(&c->data_bucket_lock);
-
-	for (i = 0; i < 6; i++) {
-		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
-		if (!b)
-			return -ENOMEM;
-
-		list_add(&b->list, &c->data_buckets);
-	}
-
-	return 0;
-}
-
-/*
- * We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
- *
- * The ideas is if you've got multiple tasks pulling data into the cache at the
- * same time, you'll get better cache utilization if you try to segregate their
- * data and preserve locality.
- *
- * For example, say you've starting Firefox at the same time you're copying a
- * bunch of files. Firefox will likely end up being fairly hot and stay in the
- * cache awhile, but the data you copied might not be; if you wrote all that
- * data to the same buckets it'd get invalidated at the same time.
- *
- * Both of those tasks will be doing fairly random IO so we can't rely on
- * detecting sequential IO to segregate their data, but going off of the task
- * should be a sane heuristic.
- */
-static struct open_bucket *pick_data_bucket(struct cache_set *c,
-					    const struct bkey *search,
-					    struct task_struct *task,
-					    struct bkey *alloc)
-{
-	struct open_bucket *ret, *ret_task = NULL;
-
-	list_for_each_entry_reverse(ret, &c->data_buckets, list)
-		if (!bkey_cmp(&ret->key, search))
-			goto found;
-		else if (ret->last == task)
-			ret_task = ret;
-
-	ret = ret_task ?: list_first_entry(&c->data_buckets,
-					   struct open_bucket, list);
-found:
-	if (!ret->sectors_free && KEY_PTRS(alloc)) {
-		ret->sectors_free = c->sb.bucket_size;
-		bkey_copy(&ret->key, alloc);
-		bkey_init(alloc);
-	}
-
-	if (!ret->sectors_free)
-		ret = NULL;
-
-	return ret;
-}
-
-/*
- * Allocates some space in the cache to write to, and k to point to the newly
- * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
- * end of the newly allocated space).
- *
- * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
- * sectors were actually allocated.
- *
- * If s->writeback is true, will not fail.
- */
-static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
-			      struct search *s)
-{
-	struct cache_set *c = s->op.c;
-	struct open_bucket *b;
-	BKEY_PADDED(key) alloc;
-	struct closure cl, *w = NULL;
-	unsigned i;
-
-	if (s->writeback) {
-		closure_init_stack(&cl);
-		w = &cl;
-	}
-
-	/*
-	 * We might have to allocate a new bucket, which we can't do with a
-	 * spinlock held. So if we have to allocate, we drop the lock, allocate
-	 * and then retry. KEY_PTRS() indicates whether alloc points to
-	 * allocated bucket(s).
-	 */
-
-	bkey_init(&alloc.key);
-	spin_lock(&c->data_bucket_lock);
-
-	while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
-		unsigned watermark = s->op.write_prio
-			? WATERMARK_MOVINGGC
-			: WATERMARK_NONE;
-
-		spin_unlock(&c->data_bucket_lock);
-
-		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
-			return false;
-
-		spin_lock(&c->data_bucket_lock);
-	}
-
-	/*
-	 * If we had to allocate, we might race and not need to allocate the
-	 * second time we call find_data_bucket(). If we allocated a bucket but
-	 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
-	 */
-	if (KEY_PTRS(&alloc.key))
-		__bkey_put(c, &alloc.key);
-
-	for (i = 0; i < KEY_PTRS(&b->key); i++)
-		EBUG_ON(ptr_stale(c, &b->key, i));
-
-	/* Set up the pointer to the space we're allocating: */
-
-	for (i = 0; i < KEY_PTRS(&b->key); i++)
-		k->ptr[i] = b->key.ptr[i];
-
-	sectors = min(sectors, b->sectors_free);
-
-	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
-	SET_KEY_SIZE(k, sectors);
-	SET_KEY_PTRS(k, KEY_PTRS(&b->key));
-
-	/*
-	 * Move b to the end of the lru, and keep track of what this bucket was
-	 * last used for:
-	 */
-	list_move_tail(&b->list, &c->data_buckets);
-	bkey_copy_key(&b->key, k);
-	b->last = s->task;
-
-	b->sectors_free	-= sectors;
-
-	for (i = 0; i < KEY_PTRS(&b->key); i++) {
-		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
-
-		atomic_long_add(sectors,
-				&PTR_CACHE(c, &b->key, i)->sectors_written);
-	}
-
-	if (b->sectors_free < c->sb.block_size)
-		b->sectors_free = 0;
-
-	/*
-	 * k takes refcounts on the buckets it points to until it's inserted
-	 * into the btree, but if we're done with this bucket we just transfer
-	 * get_data_bucket()'s refcount.
-	 */
-	if (b->sectors_free)
-		for (i = 0; i < KEY_PTRS(&b->key); i++)
-			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
-
-	spin_unlock(&c->data_bucket_lock);
-	return true;
-}
-
-static void bch_insert_data_error(struct closure *cl)
-{
-	struct btree_op *op = container_of(cl, struct btree_op, cl);
+	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 
 	/*
 	 * Our data write just errored, which means we've got a bunch of keys to
@@ -438,35 +296,34 @@
 	 * from the keys we'll accomplish just that.
 	 */
 
-	struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
+	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
 
-	while (src != op->keys.top) {
+	while (src != op->insert_keys.top) {
 		struct bkey *n = bkey_next(src);
 
 		SET_KEY_PTRS(src, 0);
-		bkey_copy(dst, src);
+		memmove(dst, src, bkey_bytes(src));
 
 		dst = bkey_next(dst);
 		src = n;
 	}
 
-	op->keys.top = dst;
+	op->insert_keys.top = dst;
 
-	bch_journal(cl);
+	bch_data_insert_keys(cl);
 }
 
-static void bch_insert_data_endio(struct bio *bio, int error)
+static void bch_data_insert_endio(struct bio *bio, int error)
 {
 	struct closure *cl = bio->bi_private;
-	struct btree_op *op = container_of(cl, struct btree_op, cl);
-	struct search *s = container_of(op, struct search, op);
+	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 
 	if (error) {
 		/* TODO: We could try to recover from this. */
-		if (s->writeback)
-			s->error = error;
-		else if (s->write)
-			set_closure_fn(cl, bch_insert_data_error, bcache_wq);
+		if (op->writeback)
+			op->error = error;
+		else if (!op->replace)
+			set_closure_fn(cl, bch_data_insert_error, bcache_wq);
 		else
 			set_closure_fn(cl, NULL, NULL);
 	}
@@ -474,18 +331,17 @@
 	bch_bbio_endio(op->c, bio, error, "writing data to cache");
 }
 
-static void bch_insert_data_loop(struct closure *cl)
+static void bch_data_insert_start(struct closure *cl)
 {
-	struct btree_op *op = container_of(cl, struct btree_op, cl);
-	struct search *s = container_of(op, struct search, op);
-	struct bio *bio = op->cache_bio, *n;
+	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+	struct bio *bio = op->bio, *n;
 
-	if (op->skip)
-		return bio_invalidate(cl);
+	if (op->bypass)
+		return bch_data_invalidate(cl);
 
 	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
 		set_gc_sectors(op->c);
-		bch_queue_gc(op->c);
+		wake_up_gc(op->c);
 	}
 
 	/*
@@ -497,29 +353,30 @@
 	do {
 		unsigned i;
 		struct bkey *k;
-		struct bio_set *split = s->d
-			? s->d->bio_split : op->c->bio_split;
+		struct bio_set *split = op->c->bio_split;
 
 		/* 1 for the device pointer and 1 for the chksum */
-		if (bch_keylist_realloc(&op->keys,
+		if (bch_keylist_realloc(&op->insert_keys,
 					1 + (op->csum ? 1 : 0),
 					op->c))
-			continue_at(cl, bch_journal, bcache_wq);
+			continue_at(cl, bch_data_insert_keys, bcache_wq);
 
-		k = op->keys.top;
+		k = op->insert_keys.top;
 		bkey_init(k);
 		SET_KEY_INODE(k, op->inode);
 		SET_KEY_OFFSET(k, bio->bi_sector);
 
-		if (!bch_alloc_sectors(k, bio_sectors(bio), s))
+		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
+				       op->write_point, op->write_prio,
+				       op->writeback))
 			goto err;
 
 		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 
-		n->bi_end_io	= bch_insert_data_endio;
+		n->bi_end_io	= bch_data_insert_endio;
 		n->bi_private	= cl;
 
-		if (s->writeback) {
+		if (op->writeback) {
 			SET_KEY_DIRTY(k, true);
 
 			for (i = 0; i < KEY_PTRS(k); i++)
@@ -532,17 +389,17 @@
 			bio_csum(n, k);
 
 		trace_bcache_cache_insert(k);
-		bch_keylist_push(&op->keys);
+		bch_keylist_push(&op->insert_keys);
 
 		n->bi_rw |= REQ_WRITE;
 		bch_submit_bbio(n, op->c, k, 0);
 	} while (n != bio);
 
 	op->insert_data_done = true;
-	continue_at(cl, bch_journal, bcache_wq);
+	continue_at(cl, bch_data_insert_keys, bcache_wq);
 err:
 	/* bch_alloc_sectors() blocks if s->writeback = true */
-	BUG_ON(s->writeback);
+	BUG_ON(op->writeback);
 
 	/*
 	 * But if it's not a writeback write we'd rather just bail out if
@@ -550,15 +407,15 @@
 	 * we might be starving btree writes for gc or something.
 	 */
 
-	if (s->write) {
+	if (!op->replace) {
 		/*
 		 * Writethrough write: We can't complete the write until we've
 		 * updated the index. But we don't want to delay the write while
 		 * we wait for buckets to be freed up, so just invalidate the
 		 * rest of the write.
 		 */
-		op->skip = true;
-		return bio_invalidate(cl);
+		op->bypass = true;
+		return bch_data_invalidate(cl);
 	} else {
 		/*
 		 * From a cache miss, we can just insert the keys for the data
@@ -567,15 +424,15 @@
 		op->insert_data_done = true;
 		bio_put(bio);
 
-		if (!bch_keylist_empty(&op->keys))
-			continue_at(cl, bch_journal, bcache_wq);
+		if (!bch_keylist_empty(&op->insert_keys))
+			continue_at(cl, bch_data_insert_keys, bcache_wq);
 		else
 			closure_return(cl);
 	}
 }
 
 /**
- * bch_insert_data - stick some data in the cache
+ * bch_data_insert - stick some data in the cache
  *
  * This is the starting point for any data to end up in a cache device; it could
  * be from a normal write, or a writeback write, or a write to a flash only
@@ -587,465 +444,24 @@
  * data is written it calls bch_journal, and after the keys have been added to
  * the next journal write they're inserted into the btree.
  *
- * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
+ * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
  * and op->inode is used for the key inode.
  *
- * If op->skip is true, instead of inserting the data it invalidates the region
- * of the cache represented by op->cache_bio and op->inode.
+ * If s->bypass is true, instead of inserting the data it invalidates the
+ * region of the cache represented by s->cache_bio and op->inode.
  */
-void bch_insert_data(struct closure *cl)
+void bch_data_insert(struct closure *cl)
 {
-	struct btree_op *op = container_of(cl, struct btree_op, cl);
+	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 
-	bch_keylist_init(&op->keys);
-	bio_get(op->cache_bio);
-	bch_insert_data_loop(cl);
+	trace_bcache_write(op->bio, op->writeback, op->bypass);
+
+	bch_keylist_init(&op->insert_keys);
+	bio_get(op->bio);
+	bch_data_insert_start(cl);
 }
 
-void bch_btree_insert_async(struct closure *cl)
-{
-	struct btree_op *op = container_of(cl, struct btree_op, cl);
-	struct search *s = container_of(op, struct search, op);
-
-	if (bch_btree_insert(op, op->c)) {
-		s->error		= -ENOMEM;
-		op->insert_data_done	= true;
-	}
-
-	if (op->insert_data_done) {
-		bch_keylist_free(&op->keys);
-		closure_return(cl);
-	} else
-		continue_at(cl, bch_insert_data_loop, bcache_wq);
-}
-
-/* Common code for the make_request functions */
-
-static void request_endio(struct bio *bio, int error)
-{
-	struct closure *cl = bio->bi_private;
-
-	if (error) {
-		struct search *s = container_of(cl, struct search, cl);
-		s->error = error;
-		/* Only cache read errors are recoverable */
-		s->recoverable = false;
-	}
-
-	bio_put(bio);
-	closure_put(cl);
-}
-
-void bch_cache_read_endio(struct bio *bio, int error)
-{
-	struct bbio *b = container_of(bio, struct bbio, bio);
-	struct closure *cl = bio->bi_private;
-	struct search *s = container_of(cl, struct search, cl);
-
-	/*
-	 * If the bucket was reused while our bio was in flight, we might have
-	 * read the wrong data. Set s->error but not error so it doesn't get
-	 * counted against the cache device, but we'll still reread the data
-	 * from the backing device.
-	 */
-
-	if (error)
-		s->error = error;
-	else if (ptr_stale(s->op.c, &b->key, 0)) {
-		atomic_long_inc(&s->op.c->cache_read_races);
-		s->error = -EINTR;
-	}
-
-	bch_bbio_endio(s->op.c, bio, error, "reading from cache");
-}
-
-static void bio_complete(struct search *s)
-{
-	if (s->orig_bio) {
-		int cpu, rw = bio_data_dir(s->orig_bio);
-		unsigned long duration = jiffies - s->start_time;
-
-		cpu = part_stat_lock();
-		part_round_stats(cpu, &s->d->disk->part0);
-		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
-		part_stat_unlock();
-
-		trace_bcache_request_end(s, s->orig_bio);
-		bio_endio(s->orig_bio, s->error);
-		s->orig_bio = NULL;
-	}
-}
-
-static void do_bio_hook(struct search *s)
-{
-	struct bio *bio = &s->bio.bio;
-	memcpy(bio, s->orig_bio, sizeof(struct bio));
-
-	bio->bi_end_io		= request_endio;
-	bio->bi_private		= &s->cl;
-	atomic_set(&bio->bi_cnt, 3);
-}
-
-static void search_free(struct closure *cl)
-{
-	struct search *s = container_of(cl, struct search, cl);
-	bio_complete(s);
-
-	if (s->op.cache_bio)
-		bio_put(s->op.cache_bio);
-
-	if (s->unaligned_bvec)
-		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
-
-	closure_debug_destroy(cl);
-	mempool_free(s, s->d->c->search);
-}
-
-static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
-{
-	struct bio_vec *bv;
-	struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
-	memset(s, 0, offsetof(struct search, op.keys));
-
-	__closure_init(&s->cl, NULL);
-
-	s->op.inode		= d->id;
-	s->op.c			= d->c;
-	s->d			= d;
-	s->op.lock		= -1;
-	s->task			= current;
-	s->orig_bio		= bio;
-	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
-	s->op.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
-	s->op.skip		= (bio->bi_rw & REQ_DISCARD) != 0;
-	s->recoverable		= 1;
-	s->start_time		= jiffies;
-	do_bio_hook(s);
-
-	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
-		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
-		memcpy(bv, bio_iovec(bio),
-		       sizeof(struct bio_vec) * bio_segments(bio));
-
-		s->bio.bio.bi_io_vec	= bv;
-		s->unaligned_bvec	= 1;
-	}
-
-	return s;
-}
-
-static void btree_read_async(struct closure *cl)
-{
-	struct btree_op *op = container_of(cl, struct btree_op, cl);
-
-	int ret = btree_root(search_recurse, op->c, op);
-
-	if (ret == -EAGAIN)
-		continue_at(cl, btree_read_async, bcache_wq);
-
-	closure_return(cl);
-}
-
-/* Cached devices */
-
-static void cached_dev_bio_complete(struct closure *cl)
-{
-	struct search *s = container_of(cl, struct search, cl);
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
-
-	search_free(cl);
-	cached_dev_put(dc);
-}
-
-/* Process reads */
-
-static void cached_dev_read_complete(struct closure *cl)
-{
-	struct search *s = container_of(cl, struct search, cl);
-
-	if (s->op.insert_collision)
-		bch_mark_cache_miss_collision(s);
-
-	if (s->op.cache_bio) {
-		int i;
-		struct bio_vec *bv;
-
-		__bio_for_each_segment(bv, s->op.cache_bio, i, 0)
-			__free_page(bv->bv_page);
-	}
-
-	cached_dev_bio_complete(cl);
-}
-
-static void request_read_error(struct closure *cl)
-{
-	struct search *s = container_of(cl, struct search, cl);
-	struct bio_vec *bv;
-	int i;
-
-	if (s->recoverable) {
-		/* Retry from the backing device: */
-		trace_bcache_read_retry(s->orig_bio);
-
-		s->error = 0;
-		bv = s->bio.bio.bi_io_vec;
-		do_bio_hook(s);
-		s->bio.bio.bi_io_vec = bv;
-
-		if (!s->unaligned_bvec)
-			bio_for_each_segment(bv, s->orig_bio, i)
-				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
-		else
-			memcpy(s->bio.bio.bi_io_vec,
-			       bio_iovec(s->orig_bio),
-			       sizeof(struct bio_vec) *
-			       bio_segments(s->orig_bio));
-
-		/* XXX: invalidate cache */
-
-		closure_bio_submit(&s->bio.bio, &s->cl, s->d);
-	}
-
-	continue_at(cl, cached_dev_read_complete, NULL);
-}
-
-static void request_read_done(struct closure *cl)
-{
-	struct search *s = container_of(cl, struct search, cl);
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
-
-	/*
-	 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
-	 * contains data ready to be inserted into the cache.
-	 *
-	 * First, we copy the data we just read from cache_bio's bounce buffers
-	 * to the buffers the original bio pointed to:
-	 */
-
-	if (s->op.cache_bio) {
-		bio_reset(s->op.cache_bio);
-		s->op.cache_bio->bi_sector	= s->cache_miss->bi_sector;
-		s->op.cache_bio->bi_bdev	= s->cache_miss->bi_bdev;
-		s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9;
-		bch_bio_map(s->op.cache_bio, NULL);
-
-		bio_copy_data(s->cache_miss, s->op.cache_bio);
-
-		bio_put(s->cache_miss);
-		s->cache_miss = NULL;
-	}
-
-	if (verify(dc, &s->bio.bio) && s->recoverable)
-		bch_data_verify(s);
-
-	bio_complete(s);
-
-	if (s->op.cache_bio &&
-	    !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
-		s->op.type = BTREE_REPLACE;
-		closure_call(&s->op.cl, bch_insert_data, NULL, cl);
-	}
-
-	continue_at(cl, cached_dev_read_complete, NULL);
-}
-
-static void request_read_done_bh(struct closure *cl)
-{
-	struct search *s = container_of(cl, struct search, cl);
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
-
-	bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
-	trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
-
-	if (s->error)
-		continue_at_nobarrier(cl, request_read_error, bcache_wq);
-	else if (s->op.cache_bio || verify(dc, &s->bio.bio))
-		continue_at_nobarrier(cl, request_read_done, bcache_wq);
-	else
-		continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
-}
-
-static int cached_dev_cache_miss(struct btree *b, struct search *s,
-				 struct bio *bio, unsigned sectors)
-{
-	int ret = 0;
-	unsigned reada;
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
-	struct bio *miss;
-
-	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
-	if (miss == bio)
-		s->op.lookup_done = true;
-
-	miss->bi_end_io		= request_endio;
-	miss->bi_private	= &s->cl;
-
-	if (s->cache_miss || s->op.skip)
-		goto out_submit;
-
-	if (miss != bio ||
-	    (bio->bi_rw & REQ_RAHEAD) ||
-	    (bio->bi_rw & REQ_META) ||
-	    s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
-		reada = 0;
-	else {
-		reada = min(dc->readahead >> 9,
-			    sectors - bio_sectors(miss));
-
-		if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
-			reada = bdev_sectors(miss->bi_bdev) -
-				bio_end_sector(miss);
-	}
-
-	s->cache_bio_sectors = bio_sectors(miss) + reada;
-	s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
-			DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
-			dc->disk.bio_split);
-
-	if (!s->op.cache_bio)
-		goto out_submit;
-
-	s->op.cache_bio->bi_sector	= miss->bi_sector;
-	s->op.cache_bio->bi_bdev	= miss->bi_bdev;
-	s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9;
-
-	s->op.cache_bio->bi_end_io	= request_endio;
-	s->op.cache_bio->bi_private	= &s->cl;
-
-	/* btree_search_recurse()'s btree iterator is no good anymore */
-	ret = -EINTR;
-	if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
-		goto out_put;
-
-	bch_bio_map(s->op.cache_bio, NULL);
-	if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
-		goto out_put;
-
-	s->cache_miss = miss;
-	bio_get(s->op.cache_bio);
-
-	closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
-
-	return ret;
-out_put:
-	bio_put(s->op.cache_bio);
-	s->op.cache_bio = NULL;
-out_submit:
-	closure_bio_submit(miss, &s->cl, s->d);
-	return ret;
-}
-
-static void request_read(struct cached_dev *dc, struct search *s)
-{
-	struct closure *cl = &s->cl;
-
-	check_should_skip(dc, s);
-	closure_call(&s->op.cl, btree_read_async, NULL, cl);
-
-	continue_at(cl, request_read_done_bh, NULL);
-}
-
-/* Process writes */
-
-static void cached_dev_write_complete(struct closure *cl)
-{
-	struct search *s = container_of(cl, struct search, cl);
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
-
-	up_read_non_owner(&dc->writeback_lock);
-	cached_dev_bio_complete(cl);
-}
-
-static void request_write(struct cached_dev *dc, struct search *s)
-{
-	struct closure *cl = &s->cl;
-	struct bio *bio = &s->bio.bio;
-	struct bkey start, end;
-	start = KEY(dc->disk.id, bio->bi_sector, 0);
-	end = KEY(dc->disk.id, bio_end_sector(bio), 0);
-
-	bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
-
-	check_should_skip(dc, s);
-	down_read_non_owner(&dc->writeback_lock);
-
-	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
-		s->op.skip	= false;
-		s->writeback	= true;
-	}
-
-	if (bio->bi_rw & REQ_DISCARD)
-		goto skip;
-
-	if (should_writeback(dc, s->orig_bio,
-			     cache_mode(dc, bio),
-			     s->op.skip)) {
-		s->op.skip = false;
-		s->writeback = true;
-	}
-
-	if (s->op.skip)
-		goto skip;
-
-	trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
-
-	if (!s->writeback) {
-		s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
-						   dc->disk.bio_split);
-
-		closure_bio_submit(bio, cl, s->d);
-	} else {
-		bch_writeback_add(dc);
-		s->op.cache_bio = bio;
-
-		if (bio->bi_rw & REQ_FLUSH) {
-			/* Also need to send a flush to the backing device */
-			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
-							     dc->disk.bio_split);
-
-			flush->bi_rw	= WRITE_FLUSH;
-			flush->bi_bdev	= bio->bi_bdev;
-			flush->bi_end_io = request_endio;
-			flush->bi_private = cl;
-
-			closure_bio_submit(flush, cl, s->d);
-		}
-	}
-out:
-	closure_call(&s->op.cl, bch_insert_data, NULL, cl);
-	continue_at(cl, cached_dev_write_complete, NULL);
-skip:
-	s->op.skip = true;
-	s->op.cache_bio = s->orig_bio;
-	bio_get(s->op.cache_bio);
-
-	if ((bio->bi_rw & REQ_DISCARD) &&
-	    !blk_queue_discard(bdev_get_queue(dc->bdev)))
-		goto out;
-
-	closure_bio_submit(bio, cl, s->d);
-	goto out;
-}
-
-static void request_nodata(struct cached_dev *dc, struct search *s)
-{
-	struct closure *cl = &s->cl;
-	struct bio *bio = &s->bio.bio;
-
-	if (bio->bi_rw & REQ_DISCARD) {
-		request_write(dc, s);
-		return;
-	}
-
-	if (s->op.flush_journal)
-		bch_journal_meta(s->op.c, cl);
-
-	closure_bio_submit(bio, cl, s->d);
-
-	continue_at(cl, cached_dev_bio_complete, NULL);
-}
-
-/* Cached devices - read & write stuff */
+/* Congested? */
 
 unsigned bch_get_congested(struct cache_set *c)
 {
@@ -1088,14 +504,15 @@
 	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
 }
 
-static void check_should_skip(struct cached_dev *dc, struct search *s)
+static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 {
-	struct cache_set *c = s->op.c;
-	struct bio *bio = &s->bio.bio;
+	struct cache_set *c = dc->disk.c;
 	unsigned mode = cache_mode(dc, bio);
 	unsigned sectors, congested = bch_get_congested(c);
+	struct task_struct *task = current;
+	struct io *i;
 
-	if (atomic_read(&dc->disk.detaching) ||
+	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
 	    (bio->bi_rw & REQ_DISCARD))
 		goto skip;
@@ -1105,12 +522,19 @@
 	     (bio->bi_rw & REQ_WRITE)))
 		goto skip;
 
-	if (bio->bi_sector   & (c->sb.block_size - 1) ||
+	if (bio->bi_sector & (c->sb.block_size - 1) ||
 	    bio_sectors(bio) & (c->sb.block_size - 1)) {
 		pr_debug("skipping unaligned io");
 		goto skip;
 	}
 
+	if (bypass_torture_test(dc)) {
+		if ((get_random_int() & 3) == 3)
+			goto skip;
+		else
+			goto rescale;
+	}
+
 	if (!congested && !dc->sequential_cutoff)
 		goto rescale;
 
@@ -1120,61 +544,576 @@
 	    (bio->bi_rw & REQ_SYNC))
 		goto rescale;
 
-	if (dc->sequential_merge) {
-		struct io *i;
+	spin_lock(&dc->io_lock);
 
-		spin_lock(&dc->io_lock);
+	hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
+		if (i->last == bio->bi_sector &&
+		    time_before(jiffies, i->jiffies))
+			goto found;
 
-		hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
-			if (i->last == bio->bi_sector &&
-			    time_before(jiffies, i->jiffies))
-				goto found;
+	i = list_first_entry(&dc->io_lru, struct io, lru);
 
-		i = list_first_entry(&dc->io_lru, struct io, lru);
-
-		add_sequential(s->task);
-		i->sequential = 0;
+	add_sequential(task);
+	i->sequential = 0;
 found:
-		if (i->sequential + bio->bi_size > i->sequential)
-			i->sequential	+= bio->bi_size;
+	if (i->sequential + bio->bi_size > i->sequential)
+		i->sequential	+= bio->bi_size;
 
-		i->last			 = bio_end_sector(bio);
-		i->jiffies		 = jiffies + msecs_to_jiffies(5000);
-		s->task->sequential_io	 = i->sequential;
+	i->last			 = bio_end_sector(bio);
+	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
+	task->sequential_io	 = i->sequential;
 
-		hlist_del(&i->hash);
-		hlist_add_head(&i->hash, iohash(dc, i->last));
-		list_move_tail(&i->lru, &dc->io_lru);
+	hlist_del(&i->hash);
+	hlist_add_head(&i->hash, iohash(dc, i->last));
+	list_move_tail(&i->lru, &dc->io_lru);
 
-		spin_unlock(&dc->io_lock);
-	} else {
-		s->task->sequential_io = bio->bi_size;
+	spin_unlock(&dc->io_lock);
 
-		add_sequential(s->task);
-	}
-
-	sectors = max(s->task->sequential_io,
-		      s->task->sequential_io_avg) >> 9;
+	sectors = max(task->sequential_io,
+		      task->sequential_io_avg) >> 9;
 
 	if (dc->sequential_cutoff &&
 	    sectors >= dc->sequential_cutoff >> 9) {
-		trace_bcache_bypass_sequential(s->orig_bio);
+		trace_bcache_bypass_sequential(bio);
 		goto skip;
 	}
 
 	if (congested && sectors >= congested) {
-		trace_bcache_bypass_congested(s->orig_bio);
+		trace_bcache_bypass_congested(bio);
 		goto skip;
 	}
 
 rescale:
 	bch_rescale_priorities(c, bio_sectors(bio));
-	return;
+	return false;
 skip:
-	bch_mark_sectors_bypassed(s, bio_sectors(bio));
-	s->op.skip = true;
+	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
+	return true;
 }
 
+/* Cache lookup */
+
+struct search {
+	/* Stack frame for bio_complete */
+	struct closure		cl;
+
+	struct bcache_device	*d;
+
+	struct bbio		bio;
+	struct bio		*orig_bio;
+	struct bio		*cache_miss;
+
+	unsigned		insert_bio_sectors;
+
+	unsigned		recoverable:1;
+	unsigned		unaligned_bvec:1;
+	unsigned		write:1;
+	unsigned		read_dirty_data:1;
+
+	unsigned long		start_time;
+
+	struct btree_op		op;
+	struct data_insert_op	iop;
+};
+
+static void bch_cache_read_endio(struct bio *bio, int error)
+{
+	struct bbio *b = container_of(bio, struct bbio, bio);
+	struct closure *cl = bio->bi_private;
+	struct search *s = container_of(cl, struct search, cl);
+
+	/*
+	 * If the bucket was reused while our bio was in flight, we might have
+	 * read the wrong data. Set s->error but not error so it doesn't get
+	 * counted against the cache device, but we'll still reread the data
+	 * from the backing device.
+	 */
+
+	if (error)
+		s->iop.error = error;
+	else if (ptr_stale(s->iop.c, &b->key, 0)) {
+		atomic_long_inc(&s->iop.c->cache_read_races);
+		s->iop.error = -EINTR;
+	}
+
+	bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
+}
+
+/*
+ * Read from a single key, handling the initial cache miss if the key starts in
+ * the middle of the bio
+ */
+static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
+{
+	struct search *s = container_of(op, struct search, op);
+	struct bio *n, *bio = &s->bio.bio;
+	struct bkey *bio_key;
+	unsigned ptr;
+
+	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+		return MAP_CONTINUE;
+
+	if (KEY_INODE(k) != s->iop.inode ||
+	    KEY_START(k) > bio->bi_sector) {
+		unsigned bio_sectors = bio_sectors(bio);
+		unsigned sectors = KEY_INODE(k) == s->iop.inode
+			? min_t(uint64_t, INT_MAX,
+				KEY_START(k) - bio->bi_sector)
+			: INT_MAX;
+
+		int ret = s->d->cache_miss(b, s, bio, sectors);
+		if (ret != MAP_CONTINUE)
+			return ret;
+
+		/* if this was a complete miss we shouldn't get here */
+		BUG_ON(bio_sectors <= sectors);
+	}
+
+	if (!KEY_SIZE(k))
+		return MAP_CONTINUE;
+
+	/* XXX: figure out best pointer - for multiple cache devices */
+	ptr = 0;
+
+	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
+
+	if (KEY_DIRTY(k))
+		s->read_dirty_data = true;
+
+	n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
+				     KEY_OFFSET(k) - bio->bi_sector),
+			  GFP_NOIO, s->d->bio_split);
+
+	bio_key = &container_of(n, struct bbio, bio)->key;
+	bch_bkey_copy_single_ptr(bio_key, k, ptr);
+
+	bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
+
+	n->bi_end_io	= bch_cache_read_endio;
+	n->bi_private	= &s->cl;
+
+	/*
+	 * The bucket we're reading from might be reused while our bio
+	 * is in flight, and we could then end up reading the wrong
+	 * data.
+	 *
+	 * We guard against this by checking (in cache_read_endio()) if
+	 * the pointer is stale again; if so, we treat it as an error
+	 * and reread from the backing device (but we don't pass that
+	 * error up anywhere).
+	 */
+
+	__bch_submit_bbio(n, b->c);
+	return n == bio ? MAP_DONE : MAP_CONTINUE;
+}
+
+static void cache_lookup(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, iop.cl);
+	struct bio *bio = &s->bio.bio;
+
+	int ret = bch_btree_map_keys(&s->op, s->iop.c,
+				     &KEY(s->iop.inode, bio->bi_sector, 0),
+				     cache_lookup_fn, MAP_END_KEY);
+	if (ret == -EAGAIN)
+		continue_at(cl, cache_lookup, bcache_wq);
+
+	closure_return(cl);
+}
+
+/* Common code for the make_request functions */
+
+static void request_endio(struct bio *bio, int error)
+{
+	struct closure *cl = bio->bi_private;
+
+	if (error) {
+		struct search *s = container_of(cl, struct search, cl);
+		s->iop.error = error;
+		/* Only cache read errors are recoverable */
+		s->recoverable = false;
+	}
+
+	bio_put(bio);
+	closure_put(cl);
+}
+
+static void bio_complete(struct search *s)
+{
+	if (s->orig_bio) {
+		int cpu, rw = bio_data_dir(s->orig_bio);
+		unsigned long duration = jiffies - s->start_time;
+
+		cpu = part_stat_lock();
+		part_round_stats(cpu, &s->d->disk->part0);
+		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
+		part_stat_unlock();
+
+		trace_bcache_request_end(s->d, s->orig_bio);
+		bio_endio(s->orig_bio, s->iop.error);
+		s->orig_bio = NULL;
+	}
+}
+
+static void do_bio_hook(struct search *s)
+{
+	struct bio *bio = &s->bio.bio;
+	memcpy(bio, s->orig_bio, sizeof(struct bio));
+
+	bio->bi_end_io		= request_endio;
+	bio->bi_private		= &s->cl;
+	atomic_set(&bio->bi_cnt, 3);
+}
+
+static void search_free(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	bio_complete(s);
+
+	if (s->iop.bio)
+		bio_put(s->iop.bio);
+
+	if (s->unaligned_bvec)
+		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
+
+	closure_debug_destroy(cl);
+	mempool_free(s, s->d->c->search);
+}
+
+static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
+{
+	struct search *s;
+	struct bio_vec *bv;
+
+	s = mempool_alloc(d->c->search, GFP_NOIO);
+	memset(s, 0, offsetof(struct search, iop.insert_keys));
+
+	__closure_init(&s->cl, NULL);
+
+	s->iop.inode		= d->id;
+	s->iop.c		= d->c;
+	s->d			= d;
+	s->op.lock		= -1;
+	s->iop.write_point	= hash_long((unsigned long) current, 16);
+	s->orig_bio		= bio;
+	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
+	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+	s->recoverable		= 1;
+	s->start_time		= jiffies;
+	do_bio_hook(s);
+
+	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
+		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
+		memcpy(bv, bio_iovec(bio),
+		       sizeof(struct bio_vec) * bio_segments(bio));
+
+		s->bio.bio.bi_io_vec	= bv;
+		s->unaligned_bvec	= 1;
+	}
+
+	return s;
+}
+
+/* Cached devices */
+
+static void cached_dev_bio_complete(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+	search_free(cl);
+	cached_dev_put(dc);
+}
+
+/* Process reads */
+
+static void cached_dev_cache_miss_done(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+
+	if (s->iop.replace_collision)
+		bch_mark_cache_miss_collision(s->iop.c, s->d);
+
+	if (s->iop.bio) {
+		int i;
+		struct bio_vec *bv;
+
+		bio_for_each_segment_all(bv, s->iop.bio, i)
+			__free_page(bv->bv_page);
+	}
+
+	cached_dev_bio_complete(cl);
+}
+
+static void cached_dev_read_error(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct bio *bio = &s->bio.bio;
+	struct bio_vec *bv;
+	int i;
+
+	if (s->recoverable) {
+		/* Retry from the backing device: */
+		trace_bcache_read_retry(s->orig_bio);
+
+		s->iop.error = 0;
+		bv = s->bio.bio.bi_io_vec;
+		do_bio_hook(s);
+		s->bio.bio.bi_io_vec = bv;
+
+		if (!s->unaligned_bvec)
+			bio_for_each_segment(bv, s->orig_bio, i)
+				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
+		else
+			memcpy(s->bio.bio.bi_io_vec,
+			       bio_iovec(s->orig_bio),
+			       sizeof(struct bio_vec) *
+			       bio_segments(s->orig_bio));
+
+		/* XXX: invalidate cache */
+
+		closure_bio_submit(bio, cl, s->d);
+	}
+
+	continue_at(cl, cached_dev_cache_miss_done, NULL);
+}
+
+static void cached_dev_read_done(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+	/*
+	 * We had a cache miss; cache_bio now contains data ready to be inserted
+	 * into the cache.
+	 *
+	 * First, we copy the data we just read from cache_bio's bounce buffers
+	 * to the buffers the original bio pointed to:
+	 */
+
+	if (s->iop.bio) {
+		bio_reset(s->iop.bio);
+		s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
+		s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+		bch_bio_map(s->iop.bio, NULL);
+
+		bio_copy_data(s->cache_miss, s->iop.bio);
+
+		bio_put(s->cache_miss);
+		s->cache_miss = NULL;
+	}
+
+	if (verify(dc, &s->bio.bio) && s->recoverable &&
+	    !s->unaligned_bvec && !s->read_dirty_data)
+		bch_data_verify(dc, s->orig_bio);
+
+	bio_complete(s);
+
+	if (s->iop.bio &&
+	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
+		BUG_ON(!s->iop.replace);
+		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
+	}
+
+	continue_at(cl, cached_dev_cache_miss_done, NULL);
+}
+
+static void cached_dev_read_done_bh(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+	bch_mark_cache_accounting(s->iop.c, s->d,
+				  !s->cache_miss, s->iop.bypass);
+	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
+
+	if (s->iop.error)
+		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
+	else if (s->iop.bio || verify(dc, &s->bio.bio))
+		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
+	else
+		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
+}
+
+static int cached_dev_cache_miss(struct btree *b, struct search *s,
+				 struct bio *bio, unsigned sectors)
+{
+	int ret = MAP_CONTINUE;
+	unsigned reada = 0;
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	struct bio *miss, *cache_bio;
+
+	if (s->cache_miss || s->iop.bypass) {
+		miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
+		goto out_submit;
+	}
+
+	if (!(bio->bi_rw & REQ_RAHEAD) &&
+	    !(bio->bi_rw & REQ_META) &&
+	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
+		reada = min_t(sector_t, dc->readahead >> 9,
+			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
+
+	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
+
+	s->iop.replace_key = KEY(s->iop.inode,
+				 bio->bi_sector + s->insert_bio_sectors,
+				 s->insert_bio_sectors);
+
+	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
+	if (ret)
+		return ret;
+
+	s->iop.replace = true;
+
+	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+
+	/* btree_search_recurse()'s btree iterator is no good anymore */
+	ret = miss == bio ? MAP_DONE : -EINTR;
+
+	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
+			dc->disk.bio_split);
+	if (!cache_bio)
+		goto out_submit;
+
+	cache_bio->bi_sector	= miss->bi_sector;
+	cache_bio->bi_bdev	= miss->bi_bdev;
+	cache_bio->bi_size	= s->insert_bio_sectors << 9;
+
+	cache_bio->bi_end_io	= request_endio;
+	cache_bio->bi_private	= &s->cl;
+
+	bch_bio_map(cache_bio, NULL);
+	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
+		goto out_put;
+
+	if (reada)
+		bch_mark_cache_readahead(s->iop.c, s->d);
+
+	s->cache_miss	= miss;
+	s->iop.bio	= cache_bio;
+	bio_get(cache_bio);
+	closure_bio_submit(cache_bio, &s->cl, s->d);
+
+	return ret;
+out_put:
+	bio_put(cache_bio);
+out_submit:
+	miss->bi_end_io		= request_endio;
+	miss->bi_private	= &s->cl;
+	closure_bio_submit(miss, &s->cl, s->d);
+	return ret;
+}
+
+static void cached_dev_read(struct cached_dev *dc, struct search *s)
+{
+	struct closure *cl = &s->cl;
+
+	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
+	continue_at(cl, cached_dev_read_done_bh, NULL);
+}
+
+/* Process writes */
+
+static void cached_dev_write_complete(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+	up_read_non_owner(&dc->writeback_lock);
+	cached_dev_bio_complete(cl);
+}
+
+static void cached_dev_write(struct cached_dev *dc, struct search *s)
+{
+	struct closure *cl = &s->cl;
+	struct bio *bio = &s->bio.bio;
+	struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
+
+	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
+
+	down_read_non_owner(&dc->writeback_lock);
+	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
+		/*
+		 * We overlap with some dirty data undergoing background
+		 * writeback, force this write to writeback
+		 */
+		s->iop.bypass = false;
+		s->iop.writeback = true;
+	}
+
+	/*
+	 * Discards aren't _required_ to do anything, so skipping if
+	 * check_overlapping returned true is ok
+	 *
+	 * But check_overlapping drops dirty keys for which io hasn't started,
+	 * so we still want to call it.
+	 */
+	if (bio->bi_rw & REQ_DISCARD)
+		s->iop.bypass = true;
+
+	if (should_writeback(dc, s->orig_bio,
+			     cache_mode(dc, bio),
+			     s->iop.bypass)) {
+		s->iop.bypass = false;
+		s->iop.writeback = true;
+	}
+
+	if (s->iop.bypass) {
+		s->iop.bio = s->orig_bio;
+		bio_get(s->iop.bio);
+
+		if (!(bio->bi_rw & REQ_DISCARD) ||
+		    blk_queue_discard(bdev_get_queue(dc->bdev)))
+			closure_bio_submit(bio, cl, s->d);
+	} else if (s->iop.writeback) {
+		bch_writeback_add(dc);
+		s->iop.bio = bio;
+
+		if (bio->bi_rw & REQ_FLUSH) {
+			/* Also need to send a flush to the backing device */
+			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
+							     dc->disk.bio_split);
+
+			flush->bi_rw	= WRITE_FLUSH;
+			flush->bi_bdev	= bio->bi_bdev;
+			flush->bi_end_io = request_endio;
+			flush->bi_private = cl;
+
+			closure_bio_submit(flush, cl, s->d);
+		}
+	} else {
+		s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
+					      dc->disk.bio_split);
+
+		closure_bio_submit(bio, cl, s->d);
+	}
+
+	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
+	continue_at(cl, cached_dev_write_complete, NULL);
+}
+
+static void cached_dev_nodata(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct bio *bio = &s->bio.bio;
+
+	if (s->iop.flush_journal)
+		bch_journal_meta(s->iop.c, cl);
+
+	/* If it's a flush, we send the flush to the backing device too */
+	closure_bio_submit(bio, cl, s->d);
+
+	continue_at(cl, cached_dev_bio_complete, NULL);
+}
+
+/* Cached devices - read & write stuff */
+
 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct search *s;
@@ -1192,14 +1131,24 @@
 
 	if (cached_dev_get(dc)) {
 		s = search_alloc(bio, d);
-		trace_bcache_request_start(s, bio);
+		trace_bcache_request_start(s->d, bio);
 
-		if (!bio_has_data(bio))
-			request_nodata(dc, s);
-		else if (rw)
-			request_write(dc, s);
-		else
-			request_read(dc, s);
+		if (!bio->bi_size) {
+			/*
+			 * can't call bch_journal_meta from under
+			 * generic_make_request
+			 */
+			continue_at_nobarrier(&s->cl,
+					      cached_dev_nodata,
+					      bcache_wq);
+		} else {
+			s->iop.bypass = check_should_bypass(dc, bio);
+
+			if (rw)
+				cached_dev_write(dc, s);
+			else
+				cached_dev_read(dc, s);
+		}
 	} else {
 		if ((bio->bi_rw & REQ_DISCARD) &&
 		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
@@ -1274,9 +1223,19 @@
 	bio_advance(bio, min(sectors << 9, bio->bi_size));
 
 	if (!bio->bi_size)
-		s->op.lookup_done = true;
+		return MAP_DONE;
 
-	return 0;
+	return MAP_CONTINUE;
+}
+
+static void flash_dev_nodata(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+
+	if (s->iop.flush_journal)
+		bch_journal_meta(s->iop.c, cl);
+
+	continue_at(cl, search_free, NULL);
 }
 
 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
@@ -1295,23 +1254,28 @@
 	cl = &s->cl;
 	bio = &s->bio.bio;
 
-	trace_bcache_request_start(s, bio);
+	trace_bcache_request_start(s->d, bio);
 
-	if (bio_has_data(bio) && !rw) {
-		closure_call(&s->op.cl, btree_read_async, NULL, cl);
-	} else if (bio_has_data(bio) || s->op.skip) {
-		bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
+	if (!bio->bi_size) {
+		/*
+		 * can't call bch_journal_meta from under
+		 * generic_make_request
+		 */
+		continue_at_nobarrier(&s->cl,
+				      flash_dev_nodata,
+				      bcache_wq);
+	} else if (rw) {
+		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
 					&KEY(d->id, bio->bi_sector, 0),
 					&KEY(d->id, bio_end_sector(bio), 0));
 
-		s->writeback	= true;
-		s->op.cache_bio	= bio;
+		s->iop.bypass		= (bio->bi_rw & REQ_DISCARD) != 0;
+		s->iop.writeback	= true;
+		s->iop.bio		= bio;
 
-		closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
 	} else {
-		/* No data - probably a cache flush */
-		if (s->op.flush_journal)
-			bch_journal_meta(s->op.c, cl);
+		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
 	}
 
 	continue_at(cl, search_free, NULL);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 57dc478..2cd65bf 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -3,40 +3,33 @@
 
 #include <linux/cgroup.h>
 
-struct search {
-	/* Stack frame for bio_complete */
+struct data_insert_op {
 	struct closure		cl;
+	struct cache_set	*c;
+	struct bio		*bio;
 
-	struct bcache_device	*d;
-	struct task_struct	*task;
-
-	struct bbio		bio;
-	struct bio		*orig_bio;
-	struct bio		*cache_miss;
-	unsigned		cache_bio_sectors;
-
-	unsigned		recoverable:1;
-	unsigned		unaligned_bvec:1;
-
-	unsigned		write:1;
-	unsigned		writeback:1;
-
-	/* IO error returned to s->bio */
+	unsigned		inode;
+	uint16_t		write_point;
+	uint16_t		write_prio;
 	short			error;
-	unsigned long		start_time;
 
-	/* Anything past op->keys won't get zeroed in do_bio_hook */
-	struct btree_op		op;
+	unsigned		bypass:1;
+	unsigned		writeback:1;
+	unsigned		flush_journal:1;
+	unsigned		csum:1;
+
+	unsigned		replace:1;
+	unsigned		replace_collision:1;
+
+	unsigned		insert_data_done:1;
+
+	/* Anything past this point won't get zeroed in search_alloc() */
+	struct keylist		insert_keys;
+	BKEY_PADDED(replace_key);
 };
 
-void bch_cache_read_endio(struct bio *, int);
 unsigned bch_get_congested(struct cache_set *);
-void bch_insert_data(struct closure *cl);
-void bch_btree_insert_async(struct closure *);
-void bch_cache_read_endio(struct bio *, int);
-
-void bch_open_buckets_free(struct cache_set *);
-int bch_open_buckets_alloc(struct cache_set *);
+void bch_data_insert(struct closure *cl);
 
 void bch_cached_dev_request_init(struct cached_dev *dc);
 void bch_flash_dev_request_init(struct bcache_device *d);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index b8730e7..84d0782 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -7,7 +7,6 @@
 #include "bcache.h"
 #include "stats.h"
 #include "btree.h"
-#include "request.h"
 #include "sysfs.h"
 
 /*
@@ -196,35 +195,36 @@
 			atomic_inc(&stats->cache_bypass_misses);
 }
 
-void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
+void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+			       bool hit, bool bypass)
 {
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
 	mark_cache_stats(&dc->accounting.collector, hit, bypass);
-	mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
+	mark_cache_stats(&c->accounting.collector, hit, bypass);
 #ifdef CONFIG_CGROUP_BCACHE
 	mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
 #endif
 }
 
-void bch_mark_cache_readahead(struct search *s)
+void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
 {
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
 	atomic_inc(&dc->accounting.collector.cache_readaheads);
-	atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
+	atomic_inc(&c->accounting.collector.cache_readaheads);
 }
 
-void bch_mark_cache_miss_collision(struct search *s)
+void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
 {
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
 	atomic_inc(&dc->accounting.collector.cache_miss_collisions);
-	atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
+	atomic_inc(&c->accounting.collector.cache_miss_collisions);
 }
 
-void bch_mark_sectors_bypassed(struct search *s, int sectors)
+void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
+			       int sectors)
 {
-	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 	atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
-	atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
+	atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
 }
 
 void bch_cache_accounting_init(struct cache_accounting *acc,
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index c7c7a8f..adbff14 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -38,7 +38,9 @@
 	struct cache_stats day;
 };
 
-struct search;
+struct cache_set;
+struct cached_dev;
+struct bcache_device;
 
 void bch_cache_accounting_init(struct cache_accounting *acc,
 			       struct closure *parent);
@@ -50,9 +52,10 @@
 
 void bch_cache_accounting_destroy(struct cache_accounting *acc);
 
-void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass);
-void bch_mark_cache_readahead(struct search *s);
-void bch_mark_cache_miss_collision(struct search *s);
-void bch_mark_sectors_bypassed(struct search *s, int sectors);
+void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
+			       bool, bool);
+void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
+void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
+void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
 
 #endif /* _BCACHE_STATS_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 547c4c5..dec15cd 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -16,6 +16,7 @@
 #include <linux/buffer_head.h>
 #include <linux/debugfs.h>
 #include <linux/genhd.h>
+#include <linux/idr.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/random.h>
@@ -45,21 +46,13 @@
 	NULL
 };
 
-struct uuid_entry_v0 {
-	uint8_t		uuid[16];
-	uint8_t		label[32];
-	uint32_t	first_reg;
-	uint32_t	last_reg;
-	uint32_t	invalidated;
-	uint32_t	pad;
-};
-
 static struct kobject *bcache_kobj;
 struct mutex bch_register_lock;
 LIST_HEAD(bch_cache_sets);
 static LIST_HEAD(uncached_devices);
 
-static int bcache_major, bcache_minor;
+static int bcache_major;
+static DEFINE_IDA(bcache_minor);
 static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
 
@@ -382,7 +375,7 @@
 {
 	struct bkey *k = &j->uuid_bucket;
 
-	if (__bch_ptr_invalid(c, 1, k))
+	if (bch_btree_ptr_invalid(c, k))
 		return "bad uuid pointer";
 
 	bkey_copy(&c->uuid_bucket, k);
@@ -427,7 +420,7 @@
 
 	lockdep_assert_held(&bch_register_lock);
 
-	if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl))
+	if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
 		return 1;
 
 	SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -435,7 +428,7 @@
 	closure_sync(&cl);
 
 	bkey_copy(&c->uuid_bucket, &k.key);
-	__bkey_put(c, &k.key);
+	bkey_put(c, &k.key);
 	return 0;
 }
 
@@ -562,10 +555,10 @@
 		}
 
 		p->next_bucket	= ca->prio_buckets[i + 1];
-		p->magic	= pset_magic(ca);
+		p->magic	= pset_magic(&ca->sb);
 		p->csum		= bch_crc64(&p->magic, bucket_bytes(ca) - 8);
 
-		bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
+		bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
 		BUG_ON(bucket == -1);
 
 		mutex_unlock(&ca->set->bucket_lock);
@@ -613,7 +606,7 @@
 			if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
 				pr_warn("bad csum reading priorities");
 
-			if (p->magic != pset_magic(ca))
+			if (p->magic != pset_magic(&ca->sb))
 				pr_warn("bad magic reading priorities");
 
 			bucket = p->next_bucket;
@@ -630,7 +623,7 @@
 static int open_dev(struct block_device *b, fmode_t mode)
 {
 	struct bcache_device *d = b->bd_disk->private_data;
-	if (atomic_read(&d->closing))
+	if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
 		return -ENXIO;
 
 	closure_get(&d->cl);
@@ -659,20 +652,24 @@
 
 void bcache_device_stop(struct bcache_device *d)
 {
-	if (!atomic_xchg(&d->closing, 1))
+	if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
 		closure_queue(&d->cl);
 }
 
 static void bcache_device_unlink(struct bcache_device *d)
 {
-	unsigned i;
-	struct cache *ca;
+	lockdep_assert_held(&bch_register_lock);
 
-	sysfs_remove_link(&d->c->kobj, d->name);
-	sysfs_remove_link(&d->kobj, "cache");
+	if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
+		unsigned i;
+		struct cache *ca;
 
-	for_each_cache(ca, d->c, i)
-		bd_unlink_disk_holder(ca->bdev, d->disk);
+		sysfs_remove_link(&d->c->kobj, d->name);
+		sysfs_remove_link(&d->kobj, "cache");
+
+		for_each_cache(ca, d->c, i)
+			bd_unlink_disk_holder(ca->bdev, d->disk);
+	}
 }
 
 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
@@ -696,19 +693,16 @@
 {
 	lockdep_assert_held(&bch_register_lock);
 
-	if (atomic_read(&d->detaching)) {
+	if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
 		struct uuid_entry *u = d->c->uuids + d->id;
 
 		SET_UUID_FLASH_ONLY(u, 0);
 		memcpy(u->uuid, invalid_uuid, 16);
 		u->invalidated = cpu_to_le32(get_seconds());
 		bch_uuid_write(d->c);
-
-		atomic_set(&d->detaching, 0);
 	}
 
-	if (!d->flush_done)
-		bcache_device_unlink(d);
+	bcache_device_unlink(d);
 
 	d->c->devices[d->id] = NULL;
 	closure_put(&d->c->caching);
@@ -739,14 +733,20 @@
 		del_gendisk(d->disk);
 	if (d->disk && d->disk->queue)
 		blk_cleanup_queue(d->disk->queue);
-	if (d->disk)
+	if (d->disk) {
+		ida_simple_remove(&bcache_minor, d->disk->first_minor);
 		put_disk(d->disk);
+	}
 
 	bio_split_pool_free(&d->bio_split_hook);
 	if (d->unaligned_bvec)
 		mempool_destroy(d->unaligned_bvec);
 	if (d->bio_split)
 		bioset_free(d->bio_split);
+	if (is_vmalloc_addr(d->full_dirty_stripes))
+		vfree(d->full_dirty_stripes);
+	else
+		kfree(d->full_dirty_stripes);
 	if (is_vmalloc_addr(d->stripe_sectors_dirty))
 		vfree(d->stripe_sectors_dirty);
 	else
@@ -760,15 +760,19 @@
 {
 	struct request_queue *q;
 	size_t n;
+	int minor;
 
-	if (!d->stripe_size_bits)
-		d->stripe_size_bits = 31;
+	if (!d->stripe_size)
+		d->stripe_size = 1 << 31;
 
-	d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >>
-		d->stripe_size_bits;
+	d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
 
-	if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t))
+	if (!d->nr_stripes ||
+	    d->nr_stripes > INT_MAX ||
+	    d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
+		pr_err("nr_stripes too large");
 		return -ENOMEM;
+	}
 
 	n = d->nr_stripes * sizeof(atomic_t);
 	d->stripe_sectors_dirty = n < PAGE_SIZE << 6
@@ -777,22 +781,38 @@
 	if (!d->stripe_sectors_dirty)
 		return -ENOMEM;
 
+	n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
+	d->full_dirty_stripes = n < PAGE_SIZE << 6
+		? kzalloc(n, GFP_KERNEL)
+		: vzalloc(n);
+	if (!d->full_dirty_stripes)
+		return -ENOMEM;
+
+	minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
+	if (minor < 0)
+		return minor;
+
 	if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
 	    !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
 				sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
 	    bio_split_pool_init(&d->bio_split_hook) ||
-	    !(d->disk = alloc_disk(1)) ||
-	    !(q = blk_alloc_queue(GFP_KERNEL)))
+	    !(d->disk = alloc_disk(1))) {
+		ida_simple_remove(&bcache_minor, minor);
 		return -ENOMEM;
+	}
 
 	set_capacity(d->disk, sectors);
-	snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
+	snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
 
 	d->disk->major		= bcache_major;
-	d->disk->first_minor	= bcache_minor++;
+	d->disk->first_minor	= minor;
 	d->disk->fops		= &bcache_ops;
 	d->disk->private_data	= d;
 
+	q = blk_alloc_queue(GFP_KERNEL);
+	if (!q)
+		return -ENOMEM;
+
 	blk_queue_make_request(q, NULL);
 	d->disk->queue			= q;
 	q->queuedata			= d;
@@ -874,7 +894,7 @@
 	struct closure cl;
 	closure_init_stack(&cl);
 
-	BUG_ON(!atomic_read(&dc->disk.detaching));
+	BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
 	BUG_ON(atomic_read(&dc->count));
 
 	mutex_lock(&bch_register_lock);
@@ -888,6 +908,8 @@
 	bcache_device_detach(&dc->disk);
 	list_move(&dc->list, &uncached_devices);
 
+	clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
+
 	mutex_unlock(&bch_register_lock);
 
 	pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
@@ -900,10 +922,10 @@
 {
 	lockdep_assert_held(&bch_register_lock);
 
-	if (atomic_read(&dc->disk.closing))
+	if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
 		return;
 
-	if (atomic_xchg(&dc->disk.detaching, 1))
+	if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
 		return;
 
 	/*
@@ -1030,6 +1052,7 @@
 	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
 
 	cancel_delayed_work_sync(&dc->writeback_rate_update);
+	kthread_stop(dc->writeback_thread);
 
 	mutex_lock(&bch_register_lock);
 
@@ -1058,11 +1081,7 @@
 	struct bcache_device *d = &dc->disk;
 
 	mutex_lock(&bch_register_lock);
-	d->flush_done = 1;
-
-	if (d->c)
-		bcache_device_unlink(d);
-
+	bcache_device_unlink(d);
 	mutex_unlock(&bch_register_lock);
 
 	bch_cache_accounting_destroy(&dc->accounting);
@@ -1088,7 +1107,6 @@
 	spin_lock_init(&dc->io_lock);
 	bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
 
-	dc->sequential_merge		= true;
 	dc->sequential_cutoff		= 4 << 20;
 
 	for (io = dc->io; io < dc->io + RECENT_IO; io++) {
@@ -1260,7 +1278,8 @@
 {
 	va_list args;
 
-	if (test_bit(CACHE_SET_STOPPING, &c->flags))
+	if (c->on_error != ON_ERROR_PANIC &&
+	    test_bit(CACHE_SET_STOPPING, &c->flags))
 		return false;
 
 	/* XXX: we can be called from atomic context
@@ -1275,6 +1294,9 @@
 
 	printk(", disabling caching\n");
 
+	if (c->on_error == ON_ERROR_PANIC)
+		panic("panic forced after error\n");
+
 	bch_cache_set_unregister(c);
 	return true;
 }
@@ -1339,6 +1361,9 @@
 	kobject_put(&c->internal);
 	kobject_del(&c->kobj);
 
+	if (c->gc_thread)
+		kthread_stop(c->gc_thread);
+
 	if (!IS_ERR_OR_NULL(c->root))
 		list_add(&c->root->list, &c->btree_cache);
 
@@ -1433,12 +1458,19 @@
 
 	c->sort_crit_factor = int_sqrt(c->btree_pages);
 
-	mutex_init(&c->bucket_lock);
-	mutex_init(&c->sort_lock);
-	spin_lock_init(&c->sort_time_lock);
 	closure_init_unlocked(&c->sb_write);
+	mutex_init(&c->bucket_lock);
+	init_waitqueue_head(&c->try_wait);
+	init_waitqueue_head(&c->bucket_wait);
 	closure_init_unlocked(&c->uuid_write);
-	spin_lock_init(&c->btree_read_time_lock);
+	mutex_init(&c->sort_lock);
+
+	spin_lock_init(&c->sort_time.lock);
+	spin_lock_init(&c->btree_gc_time.lock);
+	spin_lock_init(&c->btree_split_time.lock);
+	spin_lock_init(&c->btree_read_time.lock);
+	spin_lock_init(&c->try_harder_time.lock);
+
 	bch_moving_init_cache_set(c);
 
 	INIT_LIST_HEAD(&c->list);
@@ -1483,11 +1515,10 @@
 	const char *err = "cannot allocate memory";
 	struct cached_dev *dc, *t;
 	struct cache *ca;
+	struct closure cl;
 	unsigned i;
 
-	struct btree_op op;
-	bch_btree_op_init_stack(&op);
-	op.lock = SHRT_MAX;
+	closure_init_stack(&cl);
 
 	for_each_cache(ca, c, i)
 		c->nbuckets += ca->sb.nbuckets;
@@ -1498,7 +1529,7 @@
 		struct jset *j;
 
 		err = "cannot allocate memory for journal";
-		if (bch_journal_read(c, &journal, &op))
+		if (bch_journal_read(c, &journal))
 			goto err;
 
 		pr_debug("btree_journal_read() done");
@@ -1522,23 +1553,23 @@
 		k = &j->btree_root;
 
 		err = "bad btree root";
-		if (__bch_ptr_invalid(c, j->btree_level + 1, k))
+		if (bch_btree_ptr_invalid(c, k))
 			goto err;
 
 		err = "error reading btree root";
-		c->root = bch_btree_node_get(c, k, j->btree_level, &op);
+		c->root = bch_btree_node_get(c, k, j->btree_level, true);
 		if (IS_ERR_OR_NULL(c->root))
 			goto err;
 
 		list_del_init(&c->root->list);
 		rw_unlock(true, c->root);
 
-		err = uuid_read(c, j, &op.cl);
+		err = uuid_read(c, j, &cl);
 		if (err)
 			goto err;
 
 		err = "error in recovery";
-		if (bch_btree_check(c, &op))
+		if (bch_btree_check(c))
 			goto err;
 
 		bch_journal_mark(c, &journal);
@@ -1570,11 +1601,9 @@
 		if (j->version < BCACHE_JSET_VERSION_UUID)
 			__uuid_write(c);
 
-		bch_journal_replay(c, &journal, &op);
+		bch_journal_replay(c, &journal);
 	} else {
 		pr_notice("invalidating existing data");
-		/* Don't want invalidate_buckets() to queue a gc yet */
-		closure_lock(&c->gc, NULL);
 
 		for_each_cache(ca, c, i) {
 			unsigned j;
@@ -1600,15 +1629,15 @@
 
 		err = "cannot allocate new UUID bucket";
 		if (__uuid_write(c))
-			goto err_unlock_gc;
+			goto err;
 
 		err = "cannot allocate new btree root";
-		c->root = bch_btree_node_alloc(c, 0, &op.cl);
+		c->root = bch_btree_node_alloc(c, 0, true);
 		if (IS_ERR_OR_NULL(c->root))
-			goto err_unlock_gc;
+			goto err;
 
 		bkey_copy_key(&c->root->key, &MAX_KEY);
-		bch_btree_node_write(c->root, &op.cl);
+		bch_btree_node_write(c->root, &cl);
 
 		bch_btree_set_root(c->root);
 		rw_unlock(true, c->root);
@@ -1621,14 +1650,14 @@
 		SET_CACHE_SYNC(&c->sb, true);
 
 		bch_journal_next(&c->journal);
-		bch_journal_meta(c, &op.cl);
-
-		/* Unlock */
-		closure_set_stopped(&c->gc.cl);
-		closure_put(&c->gc.cl);
+		bch_journal_meta(c, &cl);
 	}
 
-	closure_sync(&op.cl);
+	err = "error starting gc thread";
+	if (bch_gc_thread_start(c))
+		goto err;
+
+	closure_sync(&cl);
 	c->sb.last_mount = get_seconds();
 	bcache_write_super(c);
 
@@ -1638,13 +1667,10 @@
 	flash_devs_run(c);
 
 	return;
-err_unlock_gc:
-	closure_set_stopped(&c->gc.cl);
-	closure_put(&c->gc.cl);
 err:
-	closure_sync(&op.cl);
+	closure_sync(&cl);
 	/* XXX: test this, it's broken */
-	bch_cache_set_error(c, err);
+	bch_cache_set_error(c, "%s", err);
 }
 
 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
@@ -1725,8 +1751,6 @@
 	if (ca->set)
 		ca->set->cache[ca->sb.nr_this_dev] = NULL;
 
-	bch_cache_allocator_exit(ca);
-
 	bio_split_pool_free(&ca->bio_split_hook);
 
 	free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
@@ -1758,8 +1782,6 @@
 	__module_get(THIS_MODULE);
 	kobject_init(&ca->kobj, &bch_cache_ktype);
 
-	INIT_LIST_HEAD(&ca->discards);
-
 	bio_init(&ca->journal.bio);
 	ca->journal.bio.bi_max_vecs = 8;
 	ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -2006,7 +2028,6 @@
 static void bcache_exit(void)
 {
 	bch_debug_exit();
-	bch_writeback_exit();
 	bch_request_exit();
 	bch_btree_exit();
 	if (bcache_kobj)
@@ -2039,7 +2060,6 @@
 	    sysfs_create_files(bcache_kobj, files) ||
 	    bch_btree_init() ||
 	    bch_request_init() ||
-	    bch_writeback_init() ||
 	    bch_debug_init(bcache_kobj))
 		goto err;
 
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 924dcfd..80d4c2b 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -21,6 +21,12 @@
 	NULL
 };
 
+static const char * const error_actions[] = {
+	"unregister",
+	"panic",
+	NULL
+};
+
 write_attribute(attach);
 write_attribute(detach);
 write_attribute(unregister);
@@ -66,7 +72,6 @@
 rw_attribute(congested_write_threshold_us);
 
 rw_attribute(sequential_cutoff);
-rw_attribute(sequential_merge);
 rw_attribute(data_csum);
 rw_attribute(cache_mode);
 rw_attribute(writeback_metadata);
@@ -90,11 +95,14 @@
 rw_attribute(running);
 rw_attribute(label);
 rw_attribute(readahead);
+rw_attribute(errors);
 rw_attribute(io_error_limit);
 rw_attribute(io_error_halflife);
 rw_attribute(verify);
+rw_attribute(bypass_torture_test);
 rw_attribute(key_merging_disabled);
 rw_attribute(gc_always_rewrite);
+rw_attribute(expensive_debug_checks);
 rw_attribute(freelist_percent);
 rw_attribute(cache_replacement_policy);
 rw_attribute(btree_shrinker_disabled);
@@ -116,6 +124,7 @@
 
 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
 	var_printf(verify,		"%i");
+	var_printf(bypass_torture_test,	"%i");
 	var_printf(writeback_metadata,	"%i");
 	var_printf(writeback_running,	"%i");
 	var_print(writeback_delay);
@@ -150,10 +159,9 @@
 	sysfs_hprint(dirty_data,
 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
 
-	sysfs_hprint(stripe_size,	(1 << dc->disk.stripe_size_bits) << 9);
+	sysfs_hprint(stripe_size,	dc->disk.stripe_size << 9);
 	var_printf(partial_stripes_expensive,	"%u");
 
-	var_printf(sequential_merge,	"%i");
 	var_hprint(sequential_cutoff);
 	var_hprint(readahead);
 
@@ -185,6 +193,7 @@
 
 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
 	d_strtoul(verify);
+	d_strtoul(bypass_torture_test);
 	d_strtoul(writeback_metadata);
 	d_strtoul(writeback_running);
 	d_strtoul(writeback_delay);
@@ -199,7 +208,6 @@
 			    dc->writeback_rate_p_term_inverse, 1, INT_MAX);
 	d_strtoul(writeback_rate_d_smooth);
 
-	d_strtoul(sequential_merge);
 	d_strtoi_h(sequential_cutoff);
 	d_strtoi_h(readahead);
 
@@ -311,7 +319,6 @@
 	&sysfs_stripe_size,
 	&sysfs_partial_stripes_expensive,
 	&sysfs_sequential_cutoff,
-	&sysfs_sequential_merge,
 	&sysfs_clear_stats,
 	&sysfs_running,
 	&sysfs_state,
@@ -319,6 +326,7 @@
 	&sysfs_readahead,
 #ifdef CONFIG_BCACHE_DEBUG
 	&sysfs_verify,
+	&sysfs_bypass_torture_test,
 #endif
 	NULL
 };
@@ -366,7 +374,7 @@
 	}
 
 	if (attr == &sysfs_unregister) {
-		atomic_set(&d->detaching, 1);
+		set_bit(BCACHE_DEV_DETACHING, &d->flags);
 		bcache_device_stop(d);
 	}
 
@@ -481,7 +489,6 @@
 
 	sysfs_print(btree_used_percent,	btree_used(c));
 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
-	sysfs_hprint(dirty_data,	c->gc_stats.dirty);
 	sysfs_hprint(average_key_size,	average_key_size(c));
 
 	sysfs_print(cache_read_races,
@@ -492,6 +499,10 @@
 	sysfs_print(writeback_keys_failed,
 		    atomic_long_read(&c->writeback_keys_failed));
 
+	if (attr == &sysfs_errors)
+		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
+					       c->on_error);
+
 	/* See count_io_errors for why 88 */
 	sysfs_print(io_error_halflife,	c->error_decay * 88);
 	sysfs_print(io_error_limit,	c->error_limit >> IO_ERROR_SHIFT);
@@ -506,6 +517,8 @@
 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
 	sysfs_printf(verify,			"%i", c->verify);
 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
+	sysfs_printf(expensive_debug_checks,
+		     "%i", c->expensive_debug_checks);
 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
@@ -555,7 +568,7 @@
 	}
 
 	if (attr == &sysfs_trigger_gc)
-		bch_queue_gc(c);
+		wake_up_gc(c);
 
 	if (attr == &sysfs_prune_cache) {
 		struct shrink_control sc;
@@ -569,6 +582,15 @@
 	sysfs_strtoul(congested_write_threshold_us,
 		      c->congested_write_threshold_us);
 
+	if (attr == &sysfs_errors) {
+		ssize_t v = bch_read_string_list(buf, error_actions);
+
+		if (v < 0)
+			return v;
+
+		c->on_error = v;
+	}
+
 	if (attr == &sysfs_io_error_limit)
 		c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
 
@@ -579,6 +601,7 @@
 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
 	sysfs_strtoul(verify,			c->verify);
 	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
+	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
 	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
 	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
 	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
@@ -618,8 +641,8 @@
 	&sysfs_cache_available_percent,
 
 	&sysfs_average_key_size,
-	&sysfs_dirty_data,
 
+	&sysfs_errors,
 	&sysfs_io_error_limit,
 	&sysfs_io_error_halflife,
 	&sysfs_congested,
@@ -653,6 +676,7 @@
 #ifdef CONFIG_BCACHE_DEBUG
 	&sysfs_verify,
 	&sysfs_key_merging_disabled,
+	&sysfs_expensive_debug_checks,
 #endif
 	&sysfs_gc_always_rewrite,
 	&sysfs_btree_shrinker_disabled,
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index f7b6c19..adbc3df 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -1,6 +1,5 @@
 #include "bcache.h"
 #include "btree.h"
-#include "request.h"
 
 #include <linux/blktrace_api.h>
 #include <linux/module.h>
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 420dad5..462214e 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -168,10 +168,14 @@
 
 void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
 {
-	uint64_t now		= local_clock();
-	uint64_t duration	= time_after64(now, start_time)
+	uint64_t now, duration, last;
+
+	spin_lock(&stats->lock);
+
+	now		= local_clock();
+	duration	= time_after64(now, start_time)
 		? now - start_time : 0;
-	uint64_t last		= time_after64(now, stats->last)
+	last		= time_after64(now, stats->last)
 		? now - stats->last : 0;
 
 	stats->max_duration = max(stats->max_duration, duration);
@@ -188,6 +192,8 @@
 	}
 
 	stats->last = now ?: 1;
+
+	spin_unlock(&stats->lock);
 }
 
 /**
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ea345c6..362c4b3 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -15,28 +15,18 @@
 
 struct closure;
 
-#ifdef CONFIG_BCACHE_EDEBUG
+#ifdef CONFIG_BCACHE_DEBUG
 
 #define atomic_dec_bug(v)	BUG_ON(atomic_dec_return(v) < 0)
 #define atomic_inc_bug(v, i)	BUG_ON(atomic_inc_return(v) <= i)
 
-#else /* EDEBUG */
+#else /* DEBUG */
 
 #define atomic_dec_bug(v)	atomic_dec(v)
 #define atomic_inc_bug(v, i)	atomic_inc(v)
 
 #endif
 
-#define BITMASK(name, type, field, offset, size)		\
-static inline uint64_t name(const type *k)			\
-{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); }	\
-								\
-static inline void SET_##name(type *k, uint64_t v)		\
-{								\
-	k->field &= ~(~((uint64_t) ~0 << size) << offset);	\
-	k->field |= v << offset;				\
-}
-
 #define DECLARE_HEAP(type, name)					\
 	struct {							\
 		size_t size, used;					\
@@ -388,6 +378,7 @@
 ssize_t bch_read_string_list(const char *buf, const char * const list[]);
 
 struct time_stats {
+	spinlock_t	lock;
 	/*
 	 * all fields are in nanoseconds, averages are ewmas stored left shifted
 	 * by 8
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index ba3ee48..99053b1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -11,18 +11,11 @@
 #include "debug.h"
 #include "writeback.h"
 
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
 #include <trace/events/bcache.h>
 
-static struct workqueue_struct *dirty_wq;
-
-static void read_dirty(struct closure *);
-
-struct dirty_io {
-	struct closure		cl;
-	struct cached_dev	*dc;
-	struct bio		bio;
-};
-
 /* Rate limiting */
 
 static void __update_writeback_rate(struct cached_dev *dc)
@@ -72,9 +65,6 @@
 	dc->writeback_rate_derivative = derivative;
 	dc->writeback_rate_change = change;
 	dc->writeback_rate_target = target;
-
-	schedule_delayed_work(&dc->writeback_rate_update,
-			      dc->writeback_rate_update_seconds * HZ);
 }
 
 static void update_writeback_rate(struct work_struct *work)
@@ -90,13 +80,16 @@
 		__update_writeback_rate(dc);
 
 	up_read(&dc->writeback_lock);
+
+	schedule_delayed_work(&dc->writeback_rate_update,
+			      dc->writeback_rate_update_seconds * HZ);
 }
 
 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
 {
 	uint64_t ret;
 
-	if (atomic_read(&dc->disk.detaching) ||
+	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 	    !dc->writeback_percent)
 		return 0;
 
@@ -105,37 +98,11 @@
 	return min_t(uint64_t, ret, HZ);
 }
 
-/* Background writeback */
-
-static bool dirty_pred(struct keybuf *buf, struct bkey *k)
-{
-	return KEY_DIRTY(k);
-}
-
-static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
-{
-	uint64_t stripe;
-	unsigned nr_sectors = KEY_SIZE(k);
-	struct cached_dev *dc = container_of(buf, struct cached_dev,
-					     writeback_keys);
-	unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
-
-	if (!KEY_DIRTY(k))
-		return false;
-
-	stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
-	while (1) {
-		if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
-		    stripe_size)
-			return false;
-
-		if (nr_sectors <= stripe_size)
-			return true;
-
-		nr_sectors -= stripe_size;
-		stripe++;
-	}
-}
+struct dirty_io {
+	struct closure		cl;
+	struct cached_dev	*dc;
+	struct bio		bio;
+};
 
 static void dirty_init(struct keybuf_key *w)
 {
@@ -153,131 +120,6 @@
 	bch_bio_map(bio, NULL);
 }
 
-static void refill_dirty(struct closure *cl)
-{
-	struct cached_dev *dc = container_of(cl, struct cached_dev,
-					     writeback.cl);
-	struct keybuf *buf = &dc->writeback_keys;
-	bool searched_from_start = false;
-	struct bkey end = MAX_KEY;
-	SET_KEY_INODE(&end, dc->disk.id);
-
-	if (!atomic_read(&dc->disk.detaching) &&
-	    !dc->writeback_running)
-		closure_return(cl);
-
-	down_write(&dc->writeback_lock);
-
-	if (!atomic_read(&dc->has_dirty)) {
-		SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
-		bch_write_bdev_super(dc, NULL);
-
-		up_write(&dc->writeback_lock);
-		closure_return(cl);
-	}
-
-	if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
-		buf->last_scanned = KEY(dc->disk.id, 0, 0);
-		searched_from_start = true;
-	}
-
-	if (dc->partial_stripes_expensive) {
-		uint64_t i;
-
-		for (i = 0; i < dc->disk.nr_stripes; i++)
-			if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
-			    1 << dc->disk.stripe_size_bits)
-				goto full_stripes;
-
-		goto normal_refill;
-full_stripes:
-		bch_refill_keybuf(dc->disk.c, buf, &end,
-				  dirty_full_stripe_pred);
-	} else {
-normal_refill:
-		bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
-	}
-
-	if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
-		/* Searched the entire btree  - delay awhile */
-
-		if (RB_EMPTY_ROOT(&buf->keys)) {
-			atomic_set(&dc->has_dirty, 0);
-			cached_dev_put(dc);
-		}
-
-		if (!atomic_read(&dc->disk.detaching))
-			closure_delay(&dc->writeback, dc->writeback_delay * HZ);
-	}
-
-	up_write(&dc->writeback_lock);
-
-	bch_ratelimit_reset(&dc->writeback_rate);
-
-	/* Punt to workqueue only so we don't recurse and blow the stack */
-	continue_at(cl, read_dirty, dirty_wq);
-}
-
-void bch_writeback_queue(struct cached_dev *dc)
-{
-	if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
-		if (!atomic_read(&dc->disk.detaching))
-			closure_delay(&dc->writeback, dc->writeback_delay * HZ);
-
-		continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
-	}
-}
-
-void bch_writeback_add(struct cached_dev *dc)
-{
-	if (!atomic_read(&dc->has_dirty) &&
-	    !atomic_xchg(&dc->has_dirty, 1)) {
-		atomic_inc(&dc->count);
-
-		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
-			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
-			/* XXX: should do this synchronously */
-			bch_write_bdev_super(dc, NULL);
-		}
-
-		bch_writeback_queue(dc);
-
-		if (dc->writeback_percent)
-			schedule_delayed_work(&dc->writeback_rate_update,
-				      dc->writeback_rate_update_seconds * HZ);
-	}
-}
-
-void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
-				  uint64_t offset, int nr_sectors)
-{
-	struct bcache_device *d = c->devices[inode];
-	unsigned stripe_size, stripe_offset;
-	uint64_t stripe;
-
-	if (!d)
-		return;
-
-	stripe_size = 1 << d->stripe_size_bits;
-	stripe = offset >> d->stripe_size_bits;
-	stripe_offset = offset & (stripe_size - 1);
-
-	while (nr_sectors) {
-		int s = min_t(unsigned, abs(nr_sectors),
-			      stripe_size - stripe_offset);
-
-		if (nr_sectors < 0)
-			s = -s;
-
-		atomic_add(s, d->stripe_sectors_dirty + stripe);
-		nr_sectors -= s;
-		stripe_offset = 0;
-		stripe++;
-	}
-}
-
-/* Background writeback - IO loop */
-
 static void dirty_io_destructor(struct closure *cl)
 {
 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
@@ -297,26 +139,25 @@
 
 	/* This is kind of a dumb way of signalling errors. */
 	if (KEY_DIRTY(&w->key)) {
+		int ret;
 		unsigned i;
-		struct btree_op op;
-		bch_btree_op_init_stack(&op);
+		struct keylist keys;
 
-		op.type = BTREE_REPLACE;
-		bkey_copy(&op.replace, &w->key);
+		bch_keylist_init(&keys);
 
-		SET_KEY_DIRTY(&w->key, false);
-		bch_keylist_add(&op.keys, &w->key);
+		bkey_copy(keys.top, &w->key);
+		SET_KEY_DIRTY(keys.top, false);
+		bch_keylist_push(&keys);
 
 		for (i = 0; i < KEY_PTRS(&w->key); i++)
 			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
 
-		bch_btree_insert(&op, dc->disk.c);
-		closure_sync(&op.cl);
+		ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
 
-		if (op.insert_collision)
+		if (ret)
 			trace_bcache_writeback_collision(&w->key);
 
-		atomic_long_inc(op.insert_collision
+		atomic_long_inc(ret
 				? &dc->disk.c->writeback_keys_failed
 				: &dc->disk.c->writeback_keys_done);
 	}
@@ -374,30 +215,33 @@
 	continue_at(cl, write_dirty, system_wq);
 }
 
-static void read_dirty(struct closure *cl)
+static void read_dirty(struct cached_dev *dc)
 {
-	struct cached_dev *dc = container_of(cl, struct cached_dev,
-					     writeback.cl);
-	unsigned delay = writeback_delay(dc, 0);
+	unsigned delay = 0;
 	struct keybuf_key *w;
 	struct dirty_io *io;
+	struct closure cl;
+
+	closure_init_stack(&cl);
 
 	/*
 	 * XXX: if we error, background writeback just spins. Should use some
 	 * mempools.
 	 */
 
-	while (1) {
+	while (!kthread_should_stop()) {
+		try_to_freeze();
+
 		w = bch_keybuf_next(&dc->writeback_keys);
 		if (!w)
 			break;
 
 		BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
 
-		if (delay > 0 &&
-		    (KEY_START(&w->key) != dc->last_read ||
-		     jiffies_to_msecs(delay) > 50))
-			delay = schedule_timeout_uninterruptible(delay);
+		if (KEY_START(&w->key) != dc->last_read ||
+		    jiffies_to_msecs(delay) > 50)
+			while (!kthread_should_stop() && delay)
+				delay = schedule_timeout_interruptible(delay);
 
 		dc->last_read	= KEY_OFFSET(&w->key);
 
@@ -423,7 +267,7 @@
 		trace_bcache_writeback(&w->key);
 
 		down(&dc->in_flight);
-		closure_call(&io->cl, read_dirty_submit, NULL, cl);
+		closure_call(&io->cl, read_dirty_submit, NULL, &cl);
 
 		delay = writeback_delay(dc, KEY_SIZE(&w->key));
 	}
@@ -439,52 +283,205 @@
 	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
 	 * freed) before refilling again
 	 */
-	continue_at(cl, refill_dirty, dirty_wq);
+	closure_sync(&cl);
 }
 
-/* Init */
+/* Scan for dirty data */
 
-static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
-					struct cached_dev *dc)
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+				  uint64_t offset, int nr_sectors)
 {
-	struct bkey *k;
-	struct btree_iter iter;
+	struct bcache_device *d = c->devices[inode];
+	unsigned stripe_offset, stripe, sectors_dirty;
 
-	bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
-	while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
-		if (!b->level) {
-			if (KEY_INODE(k) > dc->disk.id)
-				break;
+	if (!d)
+		return;
 
-			if (KEY_DIRTY(k))
-				bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
-							     KEY_START(k),
-							     KEY_SIZE(k));
-		} else {
-			btree(sectors_dirty_init, k, b, op, dc);
-			if (KEY_INODE(k) > dc->disk.id)
-				break;
+	stripe = offset_to_stripe(d, offset);
+	stripe_offset = offset & (d->stripe_size - 1);
 
-			cond_resched();
+	while (nr_sectors) {
+		int s = min_t(unsigned, abs(nr_sectors),
+			      d->stripe_size - stripe_offset);
+
+		if (nr_sectors < 0)
+			s = -s;
+
+		if (stripe >= d->nr_stripes)
+			return;
+
+		sectors_dirty = atomic_add_return(s,
+					d->stripe_sectors_dirty + stripe);
+		if (sectors_dirty == d->stripe_size)
+			set_bit(stripe, d->full_dirty_stripes);
+		else
+			clear_bit(stripe, d->full_dirty_stripes);
+
+		nr_sectors -= s;
+		stripe_offset = 0;
+		stripe++;
+	}
+}
+
+static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+{
+	return KEY_DIRTY(k);
+}
+
+static void refill_full_stripes(struct cached_dev *dc)
+{
+	struct keybuf *buf = &dc->writeback_keys;
+	unsigned start_stripe, stripe, next_stripe;
+	bool wrapped = false;
+
+	stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
+
+	if (stripe >= dc->disk.nr_stripes)
+		stripe = 0;
+
+	start_stripe = stripe;
+
+	while (1) {
+		stripe = find_next_bit(dc->disk.full_dirty_stripes,
+				       dc->disk.nr_stripes, stripe);
+
+		if (stripe == dc->disk.nr_stripes)
+			goto next;
+
+		next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
+						 dc->disk.nr_stripes, stripe);
+
+		buf->last_scanned = KEY(dc->disk.id,
+					stripe * dc->disk.stripe_size, 0);
+
+		bch_refill_keybuf(dc->disk.c, buf,
+				  &KEY(dc->disk.id,
+				       next_stripe * dc->disk.stripe_size, 0),
+				  dirty_pred);
+
+		if (array_freelist_empty(&buf->freelist))
+			return;
+
+		stripe = next_stripe;
+next:
+		if (wrapped && stripe > start_stripe)
+			return;
+
+		if (stripe == dc->disk.nr_stripes) {
+			stripe = 0;
+			wrapped = true;
 		}
+	}
+}
+
+static bool refill_dirty(struct cached_dev *dc)
+{
+	struct keybuf *buf = &dc->writeback_keys;
+	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+	bool searched_from_start = false;
+
+	if (dc->partial_stripes_expensive) {
+		refill_full_stripes(dc);
+		if (array_freelist_empty(&buf->freelist))
+			return false;
+	}
+
+	if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+		buf->last_scanned = KEY(dc->disk.id, 0, 0);
+		searched_from_start = true;
+	}
+
+	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+
+	return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
+}
+
+static int bch_writeback_thread(void *arg)
+{
+	struct cached_dev *dc = arg;
+	bool searched_full_index;
+
+	while (!kthread_should_stop()) {
+		down_write(&dc->writeback_lock);
+		if (!atomic_read(&dc->has_dirty) ||
+		    (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+		     !dc->writeback_running)) {
+			up_write(&dc->writeback_lock);
+			set_current_state(TASK_INTERRUPTIBLE);
+
+			if (kthread_should_stop())
+				return 0;
+
+			try_to_freeze();
+			schedule();
+			continue;
+		}
+
+		searched_full_index = refill_dirty(dc);
+
+		if (searched_full_index &&
+		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
+			atomic_set(&dc->has_dirty, 0);
+			cached_dev_put(dc);
+			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+			bch_write_bdev_super(dc, NULL);
+		}
+
+		up_write(&dc->writeback_lock);
+
+		bch_ratelimit_reset(&dc->writeback_rate);
+		read_dirty(dc);
+
+		if (searched_full_index) {
+			unsigned delay = dc->writeback_delay * HZ;
+
+			while (delay &&
+			       !kthread_should_stop() &&
+			       !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+				delay = schedule_timeout_interruptible(delay);
+		}
+	}
 
 	return 0;
 }
 
-void bch_sectors_dirty_init(struct cached_dev *dc)
-{
-	struct btree_op op;
+/* Init */
 
-	bch_btree_op_init_stack(&op);
-	btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
+struct sectors_dirty_init {
+	struct btree_op	op;
+	unsigned	inode;
+};
+
+static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+				 struct bkey *k)
+{
+	struct sectors_dirty_init *op = container_of(_op,
+						struct sectors_dirty_init, op);
+	if (KEY_INODE(k) > op->inode)
+		return MAP_DONE;
+
+	if (KEY_DIRTY(k))
+		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+					     KEY_START(k), KEY_SIZE(k));
+
+	return MAP_CONTINUE;
 }
 
-void bch_cached_dev_writeback_init(struct cached_dev *dc)
+void bch_sectors_dirty_init(struct cached_dev *dc)
+{
+	struct sectors_dirty_init op;
+
+	bch_btree_op_init(&op.op, -1);
+	op.inode = dc->disk.id;
+
+	bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+			   sectors_dirty_init_fn, 0);
+}
+
+int bch_cached_dev_writeback_init(struct cached_dev *dc)
 {
 	sema_init(&dc->in_flight, 64);
-	closure_init_unlocked(&dc->writeback);
 	init_rwsem(&dc->writeback_lock);
-
 	bch_keybuf_init(&dc->writeback_keys);
 
 	dc->writeback_metadata		= true;
@@ -498,22 +495,16 @@
 	dc->writeback_rate_p_term_inverse = 64;
 	dc->writeback_rate_d_smooth	= 8;
 
+	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+					      "bcache_writeback");
+	if (IS_ERR(dc->writeback_thread))
+		return PTR_ERR(dc->writeback_thread);
+
+	set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
+
 	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
 	schedule_delayed_work(&dc->writeback_rate_update,
 			      dc->writeback_rate_update_seconds * HZ);
-}
-
-void bch_writeback_exit(void)
-{
-	if (dirty_wq)
-		destroy_workqueue(dirty_wq);
-}
-
-int __init bch_writeback_init(void)
-{
-	dirty_wq = create_workqueue("bcache_writeback");
-	if (!dirty_wq)
-		return -ENOMEM;
 
 	return 0;
 }
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c91f61b..c9ddcf4 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -14,20 +14,27 @@
 	return ret;
 }
 
-static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
+static inline unsigned offset_to_stripe(struct bcache_device *d,
+					uint64_t offset)
+{
+	do_div(offset, d->stripe_size);
+	return offset;
+}
+
+static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
 					   uint64_t offset,
 					   unsigned nr_sectors)
 {
-	uint64_t stripe = offset >> d->stripe_size_bits;
+	unsigned stripe = offset_to_stripe(&dc->disk, offset);
 
 	while (1) {
-		if (atomic_read(d->stripe_sectors_dirty + stripe))
+		if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
 			return true;
 
-		if (nr_sectors <= 1 << d->stripe_size_bits)
+		if (nr_sectors <= dc->disk.stripe_size)
 			return false;
 
-		nr_sectors -= 1 << d->stripe_size_bits;
+		nr_sectors -= dc->disk.stripe_size;
 		stripe++;
 	}
 }
@@ -38,12 +45,12 @@
 	unsigned in_use = dc->disk.c->gc_stats.in_use;
 
 	if (cache_mode != CACHE_MODE_WRITEBACK ||
-	    atomic_read(&dc->disk.detaching) ||
+	    test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 	    in_use > CUTOFF_WRITEBACK_SYNC)
 		return false;
 
 	if (dc->partial_stripes_expensive &&
-	    bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
+	    bcache_dev_stripe_dirty(dc, bio->bi_sector,
 				    bio_sectors(bio)))
 		return true;
 
@@ -54,11 +61,30 @@
 		in_use <= CUTOFF_WRITEBACK;
 }
 
+static inline void bch_writeback_queue(struct cached_dev *dc)
+{
+	wake_up_process(dc->writeback_thread);
+}
+
+static inline void bch_writeback_add(struct cached_dev *dc)
+{
+	if (!atomic_read(&dc->has_dirty) &&
+	    !atomic_xchg(&dc->has_dirty, 1)) {
+		atomic_inc(&dc->count);
+
+		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
+			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
+			/* XXX: should do this synchronously */
+			bch_write_bdev_super(dc, NULL);
+		}
+
+		bch_writeback_queue(dc);
+	}
+}
+
 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_writeback_queue(struct cached_dev *);
-void bch_writeback_add(struct cached_dev *);
 
 void bch_sectors_dirty_init(struct cached_dev *dc);
-void bch_cached_dev_writeback_init(struct cached_dev *);
+int bch_cached_dev_writeback_init(struct cached_dev *);
 
 #endif
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 1af7255..9ef0752 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -20,7 +20,13 @@
 
 #define CACHE_SUPERBLOCK_MAGIC 06142003
 #define CACHE_SUPERBLOCK_LOCATION 0
-#define CACHE_VERSION 1
+
+/*
+ * defines a range of metadata versions that this module can handle.
+ */
+#define MIN_CACHE_VERSION 1
+#define MAX_CACHE_VERSION 1
+
 #define CACHE_METADATA_CACHE_SIZE 64
 
 /*
@@ -134,6 +140,18 @@
 						      SUPERBLOCK_CSUM_XOR));
 }
 
+static int check_metadata_version(struct cache_disk_superblock *disk_super)
+{
+	uint32_t metadata_version = le32_to_cpu(disk_super->version);
+	if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
+		DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
+		      metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int sb_check(struct dm_block_validator *v,
 		    struct dm_block *b,
 		    size_t sb_block_size)
@@ -164,7 +182,7 @@
 		return -EILSEQ;
 	}
 
-	return 0;
+	return check_metadata_version(disk_super);
 }
 
 static struct dm_block_validator sb_validator = {
@@ -198,7 +216,7 @@
 
 /*----------------------------------------------------------------*/
 
-static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
 {
 	int r;
 	unsigned i;
@@ -214,10 +232,10 @@
 		return r;
 
 	data_le = dm_block_data(b);
-	*result = 1;
+	*result = true;
 	for (i = 0; i < sb_block_size; i++) {
 		if (data_le[i] != zero) {
-			*result = 0;
+			*result = false;
 			break;
 		}
 	}
@@ -270,7 +288,7 @@
 	disk_super->flags = 0;
 	memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
 	disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
-	disk_super->version = cpu_to_le32(CACHE_VERSION);
+	disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
 	memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
 	memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
 	disk_super->policy_hint_size = 0;
@@ -411,7 +429,8 @@
 static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
 				     bool format_device)
 {
-	int r, unformatted;
+	int r;
+	bool unformatted = false;
 
 	r = __superblock_all_zeroes(cmd->bm, &unformatted);
 	if (r)
@@ -666,19 +685,85 @@
 	kfree(cmd);
 }
 
+/*
+ * Checks that the given cache block is either unmapped or clean.
+ */
+static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
+				   bool *result)
+{
+	int r;
+	__le64 value;
+	dm_oblock_t ob;
+	unsigned flags;
+
+	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
+	if (r) {
+		DMERR("block_unmapped_or_clean failed");
+		return r;
+	}
+
+	unpack_value(value, &ob, &flags);
+	*result = !((flags & M_VALID) && (flags & M_DIRTY));
+
+	return 0;
+}
+
+static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+					dm_cblock_t begin, dm_cblock_t end,
+					bool *result)
+{
+	int r;
+	*result = true;
+
+	while (begin != end) {
+		r = block_unmapped_or_clean(cmd, begin, result);
+		if (r)
+			return r;
+
+		if (!*result) {
+			DMERR("cache block %llu is dirty",
+			      (unsigned long long) from_cblock(begin));
+			return 0;
+		}
+
+		begin = to_cblock(from_cblock(begin) + 1);
+	}
+
+	return 0;
+}
+
 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
 {
 	int r;
+	bool clean;
 	__le64 null_mapping = pack_value(0, 0);
 
 	down_write(&cmd->root_lock);
 	__dm_bless_for_disk(&null_mapping);
+
+	if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
+		r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
+		if (r) {
+			__dm_unbless_for_disk(&null_mapping);
+			goto out;
+		}
+
+		if (!clean) {
+			DMERR("unable to shrink cache due to dirty blocks");
+			r = -EINVAL;
+			__dm_unbless_for_disk(&null_mapping);
+			goto out;
+		}
+	}
+
 	r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
 			    from_cblock(new_cache_size),
 			    &null_mapping, &cmd->root);
 	if (!r)
 		cmd->cache_blocks = new_cache_size;
 	cmd->changed = true;
+
+out:
 	up_write(&cmd->root_lock);
 
 	return r;
@@ -1182,3 +1267,8 @@
 
 	return r;
 }
+
+int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
+{
+	return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+}
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index f45cef2..cd906f1 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -137,6 +137,11 @@
 int dm_cache_save_hint(struct dm_cache_metadata *cmd,
 		       dm_cblock_t cblock, uint32_t hint);
 
+/*
+ * Query method.  Are all the blocks in the cache clean?
+ */
+int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
+
 /*----------------------------------------------------------------*/
 
 #endif /* DM_CACHE_METADATA_H */
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 0928abd..2256a1f 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -61,7 +61,12 @@
 
 static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
-	return p->remove_mapping(p, oblock);
+	p->remove_mapping(p, oblock);
+}
+
+static inline int policy_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
+{
+	return p->remove_cblock(p, cblock);
 }
 
 static inline void policy_force_mapping(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 4296155..416b7b7 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -26,19 +26,6 @@
 
 /*----------------------------------------------------------------*/
 
-static unsigned long *alloc_bitset(unsigned nr_entries)
-{
-	size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
-	return vzalloc(s);
-}
-
-static void free_bitset(unsigned long *bits)
-{
-	vfree(bits);
-}
-
-/*----------------------------------------------------------------*/
-
 /*
  * Large, sequential ios are probably better left on the origin device since
  * spindles tend to have good bandwidth.
@@ -151,6 +138,21 @@
 }
 
 /*
+ * Checks to see if the queue is empty.
+ * FIXME: reduce cpu usage.
+ */
+static bool queue_empty(struct queue *q)
+{
+	unsigned i;
+
+	for (i = 0; i < NR_QUEUE_LEVELS; i++)
+		if (!list_empty(q->qs + i))
+			return false;
+
+	return true;
+}
+
+/*
  * Insert an entry to the back of the given level.
  */
 static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
@@ -218,17 +220,116 @@
 	struct hlist_node hlist;
 	struct list_head list;
 	dm_oblock_t oblock;
-	dm_cblock_t cblock;	/* valid iff in_cache */
 
 	/*
 	 * FIXME: pack these better
 	 */
-	bool in_cache:1;
+	bool dirty:1;
 	unsigned hit_count;
 	unsigned generation;
 	unsigned tick;
 };
 
+/*
+ * Rather than storing the cblock in an entry, we allocate all entries in
+ * an array, and infer the cblock from the entry position.
+ *
+ * Free entries are linked together into a list.
+ */
+struct entry_pool {
+	struct entry *entries, *entries_end;
+	struct list_head free;
+	unsigned nr_allocated;
+};
+
+static int epool_init(struct entry_pool *ep, unsigned nr_entries)
+{
+	unsigned i;
+
+	ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
+	if (!ep->entries)
+		return -ENOMEM;
+
+	ep->entries_end = ep->entries + nr_entries;
+
+	INIT_LIST_HEAD(&ep->free);
+	for (i = 0; i < nr_entries; i++)
+		list_add(&ep->entries[i].list, &ep->free);
+
+	ep->nr_allocated = 0;
+
+	return 0;
+}
+
+static void epool_exit(struct entry_pool *ep)
+{
+	vfree(ep->entries);
+}
+
+static struct entry *alloc_entry(struct entry_pool *ep)
+{
+	struct entry *e;
+
+	if (list_empty(&ep->free))
+		return NULL;
+
+	e = list_entry(list_pop(&ep->free), struct entry, list);
+	INIT_LIST_HEAD(&e->list);
+	INIT_HLIST_NODE(&e->hlist);
+	ep->nr_allocated++;
+
+	return e;
+}
+
+/*
+ * This assumes the cblock hasn't already been allocated.
+ */
+static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
+{
+	struct entry *e = ep->entries + from_cblock(cblock);
+	list_del(&e->list);
+
+	INIT_LIST_HEAD(&e->list);
+	INIT_HLIST_NODE(&e->hlist);
+	ep->nr_allocated++;
+
+	return e;
+}
+
+static void free_entry(struct entry_pool *ep, struct entry *e)
+{
+	BUG_ON(!ep->nr_allocated);
+	ep->nr_allocated--;
+	INIT_HLIST_NODE(&e->hlist);
+	list_add(&e->list, &ep->free);
+}
+
+/*
+ * Returns NULL if the entry is free.
+ */
+static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
+{
+	struct entry *e = ep->entries + from_cblock(cblock);
+	return !hlist_unhashed(&e->hlist) ? e : NULL;
+}
+
+static bool epool_empty(struct entry_pool *ep)
+{
+	return list_empty(&ep->free);
+}
+
+static bool in_pool(struct entry_pool *ep, struct entry *e)
+{
+	return e >= ep->entries && e < ep->entries_end;
+}
+
+static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
+{
+	return to_cblock(e - ep->entries);
+}
+
+/*----------------------------------------------------------------*/
+
 struct mq_policy {
 	struct dm_cache_policy policy;
 
@@ -238,13 +339,22 @@
 	struct io_tracker tracker;
 
 	/*
-	 * We maintain two queues of entries.  The cache proper contains
-	 * the currently active mappings.  Whereas the pre_cache tracks
-	 * blocks that are being hit frequently and potential candidates
-	 * for promotion to the cache.
+	 * Entries come from two pools, one of pre-cache entries, and one
+	 * for the cache proper.
+	 */
+	struct entry_pool pre_cache_pool;
+	struct entry_pool cache_pool;
+
+	/*
+	 * We maintain three queues of entries.  The cache proper,
+	 * consisting of a clean and dirty queue, contains the currently
+	 * active mappings.  Whereas the pre_cache tracks blocks that
+	 * are being hit frequently and potential candidates for promotion
+	 * to the cache.
 	 */
 	struct queue pre_cache;
-	struct queue cache;
+	struct queue cache_clean;
+	struct queue cache_dirty;
 
 	/*
 	 * Keeps track of time, incremented by the core.  We use this to
@@ -282,25 +392,6 @@
 	unsigned promote_threshold;
 
 	/*
-	 * We need cache_size entries for the cache, and choose to have
-	 * cache_size entries for the pre_cache too.  One motivation for
-	 * using the same size is to make the hit counts directly
-	 * comparable between pre_cache and cache.
-	 */
-	unsigned nr_entries;
-	unsigned nr_entries_allocated;
-	struct list_head free;
-
-	/*
-	 * Cache blocks may be unallocated.  We store this info in a
-	 * bitset.
-	 */
-	unsigned long *allocation_bitset;
-	unsigned nr_cblocks_allocated;
-	unsigned find_free_nr_words;
-	unsigned find_free_last_word;
-
-	/*
 	 * The hash table allows us to quickly find an entry by origin
 	 * block.  Both pre_cache and cache entries are in here.
 	 */
@@ -310,49 +401,6 @@
 };
 
 /*----------------------------------------------------------------*/
-/* Free/alloc mq cache entry structures. */
-static void takeout_queue(struct list_head *lh, struct queue *q)
-{
-	unsigned level;
-
-	for (level = 0; level < NR_QUEUE_LEVELS; level++)
-		list_splice(q->qs + level, lh);
-}
-
-static void free_entries(struct mq_policy *mq)
-{
-	struct entry *e, *tmp;
-
-	takeout_queue(&mq->free, &mq->pre_cache);
-	takeout_queue(&mq->free, &mq->cache);
-
-	list_for_each_entry_safe(e, tmp, &mq->free, list)
-		kmem_cache_free(mq_entry_cache, e);
-}
-
-static int alloc_entries(struct mq_policy *mq, unsigned elts)
-{
-	unsigned u = mq->nr_entries;
-
-	INIT_LIST_HEAD(&mq->free);
-	mq->nr_entries_allocated = 0;
-
-	while (u--) {
-		struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL);
-
-		if (!e) {
-			free_entries(mq);
-			return -ENOMEM;
-		}
-
-
-		list_add(&e->list, &mq->free);
-	}
-
-	return 0;
-}
-
-/*----------------------------------------------------------------*/
 
 /*
  * Simple hash table implementation.  Should replace with the standard hash
@@ -388,96 +436,14 @@
 
 /*----------------------------------------------------------------*/
 
-/*
- * Allocates a new entry structure.  The memory is allocated in one lump,
- * so we just handing it out here.  Returns NULL if all entries have
- * already been allocated.  Cannot fail otherwise.
- */
-static struct entry *alloc_entry(struct mq_policy *mq)
-{
-	struct entry *e;
-
-	if (mq->nr_entries_allocated >= mq->nr_entries) {
-		BUG_ON(!list_empty(&mq->free));
-		return NULL;
-	}
-
-	e = list_entry(list_pop(&mq->free), struct entry, list);
-	INIT_LIST_HEAD(&e->list);
-	INIT_HLIST_NODE(&e->hlist);
-
-	mq->nr_entries_allocated++;
-	return e;
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Mark cache blocks allocated or not in the bitset.
- */
-static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock)
-{
-	BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
-	BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset));
-
-	set_bit(from_cblock(cblock), mq->allocation_bitset);
-	mq->nr_cblocks_allocated++;
-}
-
-static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock)
-{
-	BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
-	BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset));
-
-	clear_bit(from_cblock(cblock), mq->allocation_bitset);
-	mq->nr_cblocks_allocated--;
-}
-
 static bool any_free_cblocks(struct mq_policy *mq)
 {
-	return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
+	return !epool_empty(&mq->cache_pool);
 }
 
-/*
- * Fills result out with a cache block that isn't in use, or return
- * -ENOSPC.  This does _not_ mark the cblock as allocated, the caller is
- * reponsible for that.
- */
-static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end,
-			      dm_cblock_t *result, unsigned *last_word)
+static bool any_clean_cblocks(struct mq_policy *mq)
 {
-	int r = -ENOSPC;
-	unsigned w;
-
-	for (w = begin; w < end; w++) {
-		/*
-		 * ffz is undefined if no zero exists
-		 */
-		if (mq->allocation_bitset[w] != ~0UL) {
-			*last_word = w;
-			*result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w]));
-			if (from_cblock(*result) < from_cblock(mq->cache_size))
-				r = 0;
-
-			break;
-		}
-	}
-
-	return r;
-}
-
-static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result)
-{
-	int r;
-
-	if (!any_free_cblocks(mq))
-		return -ENOSPC;
-
-	r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word);
-	if (r == -ENOSPC && mq->find_free_last_word)
-		r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word);
-
-	return r;
+	return !queue_empty(&mq->cache_clean);
 }
 
 /*----------------------------------------------------------------*/
@@ -496,33 +462,35 @@
 	return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
 }
 
+static bool in_cache(struct mq_policy *mq, struct entry *e)
+{
+	return in_pool(&mq->cache_pool, e);
+}
+
 /*
  * Inserts the entry into the pre_cache or the cache.  Ensures the cache
- * block is marked as allocated if necc.  Inserts into the hash table.  Sets the
- * tick which records when the entry was last moved about.
+ * block is marked as allocated if necc.  Inserts into the hash table.
+ * Sets the tick which records when the entry was last moved about.
  */
 static void push(struct mq_policy *mq, struct entry *e)
 {
 	e->tick = mq->tick;
 	hash_insert(mq, e);
 
-	if (e->in_cache) {
-		alloc_cblock(mq, e->cblock);
-		queue_push(&mq->cache, queue_level(e), &e->list);
-	} else
+	if (in_cache(mq, e))
+		queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
+			   queue_level(e), &e->list);
+	else
 		queue_push(&mq->pre_cache, queue_level(e), &e->list);
 }
 
 /*
  * Removes an entry from pre_cache or cache.  Removes from the hash table.
- * Frees off the cache block if necc.
  */
 static void del(struct mq_policy *mq, struct entry *e)
 {
 	queue_remove(&e->list);
 	hash_remove(e);
-	if (e->in_cache)
-		free_cblock(mq, e->cblock);
 }
 
 /*
@@ -531,14 +499,14 @@
  */
 static struct entry *pop(struct mq_policy *mq, struct queue *q)
 {
-	struct entry *e = container_of(queue_pop(q), struct entry, list);
+	struct entry *e;
+	struct list_head *h = queue_pop(q);
 
-	if (e) {
-		hash_remove(e);
+	if (!h)
+		return NULL;
 
-		if (e->in_cache)
-			free_cblock(mq, e->cblock);
-	}
+	e = container_of(h, struct entry, list);
+	hash_remove(e);
 
 	return e;
 }
@@ -556,7 +524,8 @@
  * of the entries.
  *
  * At the moment the threshold is taken by averaging the hit counts of some
- * of the entries in the cache (the first 20 entries of the first level).
+ * of the entries in the cache (the first 20 entries across all levels in
+ * ascending order, giving preference to the clean entries at each level).
  *
  * We can be much cleverer than this though.  For example, each promotion
  * could bump up the threshold helping to prevent churn.  Much more to do
@@ -571,14 +540,21 @@
 	struct list_head *head;
 	struct entry *e;
 
-	if ((mq->hit_count >= mq->generation_period) &&
-	    (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) {
-
+	if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
 		mq->hit_count = 0;
 		mq->generation++;
 
 		for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
-			head = mq->cache.qs + level;
+			head = mq->cache_clean.qs + level;
+			list_for_each_entry(e, head, list) {
+				nr++;
+				total += e->hit_count;
+
+				if (++count >= MAX_TO_AVERAGE)
+					break;
+			}
+
+			head = mq->cache_dirty.qs + level;
 			list_for_each_entry(e, head, list) {
 				nr++;
 				total += e->hit_count;
@@ -631,19 +607,30 @@
  * - set the hit count to a hard coded value other than 1, eg, is it better
  *   if it goes in at level 2?
  */
-static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
 {
-	dm_cblock_t result;
-	struct entry *demoted = pop(mq, &mq->cache);
+	struct entry *demoted = pop(mq, &mq->cache_clean);
 
-	BUG_ON(!demoted);
-	result = demoted->cblock;
+	if (!demoted)
+		/*
+		 * We could get a block from mq->cache_dirty, but that
+		 * would add extra latency to the triggering bio as it
+		 * waits for the writeback.  Better to not promote this
+		 * time and hope there's a clean block next time this block
+		 * is hit.
+		 */
+		return -ENOSPC;
+
 	*oblock = demoted->oblock;
-	demoted->in_cache = false;
-	demoted->hit_count = 1;
-	push(mq, demoted);
+	free_entry(&mq->cache_pool, demoted);
 
-	return result;
+	/*
+	 * We used to put the demoted block into the pre-cache, but I think
+	 * it's simpler to just let it work it's way up from zero again.
+	 * Stops blocks flickering in and out of the cache.
+	 */
+
+	return 0;
 }
 
 /*
@@ -662,17 +649,18 @@
 static unsigned adjusted_promote_threshold(struct mq_policy *mq,
 					   bool discarded_oblock, int data_dir)
 {
-	if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE)
+	if (data_dir == READ)
+		return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
+
+	if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
 		/*
 		 * We don't need to do any copying at all, so give this a
-		 * very low threshold.  In practice this only triggers
-		 * during initial population after a format.
+		 * very low threshold.
 		 */
 		return DISCARDED_PROMOTE_THRESHOLD;
+	}
 
-	return data_dir == READ ?
-		(mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
-		(mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
+	return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
 }
 
 static bool should_promote(struct mq_policy *mq, struct entry *e,
@@ -688,34 +676,49 @@
 {
 	requeue_and_update_tick(mq, e);
 
-	if (e->in_cache) {
+	if (in_cache(mq, e)) {
 		result->op = POLICY_HIT;
-		result->cblock = e->cblock;
+		result->cblock = infer_cblock(&mq->cache_pool, e);
 	}
 
 	return 0;
 }
 
 /*
- * Moves and entry from the pre_cache to the cache.  The main work is
+ * Moves an entry from the pre_cache to the cache.  The main work is
  * finding which cache block to use.
  */
 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
 			      struct policy_result *result)
 {
-	dm_cblock_t cblock;
+	int r;
+	struct entry *new_e;
 
-	if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+	/* Ensure there's a free cblock in the cache */
+	if (epool_empty(&mq->cache_pool)) {
 		result->op = POLICY_REPLACE;
-		cblock = demote_cblock(mq, &result->old_oblock);
+		r = demote_cblock(mq, &result->old_oblock);
+		if (r) {
+			result->op = POLICY_MISS;
+			return 0;
+		}
 	} else
 		result->op = POLICY_NEW;
 
-	result->cblock = e->cblock = cblock;
+	new_e = alloc_entry(&mq->cache_pool);
+	BUG_ON(!new_e);
+
+	new_e->oblock = e->oblock;
+	new_e->dirty = false;
+	new_e->hit_count = e->hit_count;
+	new_e->generation = e->generation;
+	new_e->tick = e->tick;
 
 	del(mq, e);
-	e->in_cache = true;
-	push(mq, e);
+	free_entry(&mq->pre_cache_pool, e);
+	push(mq, new_e);
+
+	result->cblock = infer_cblock(&mq->cache_pool, new_e);
 
 	return 0;
 }
@@ -743,7 +746,7 @@
 static void insert_in_pre_cache(struct mq_policy *mq,
 				dm_oblock_t oblock)
 {
-	struct entry *e = alloc_entry(mq);
+	struct entry *e = alloc_entry(&mq->pre_cache_pool);
 
 	if (!e)
 		/*
@@ -757,7 +760,7 @@
 		return;
 	}
 
-	e->in_cache = false;
+	e->dirty = false;
 	e->oblock = oblock;
 	e->hit_count = 1;
 	e->generation = mq->generation;
@@ -767,30 +770,36 @@
 static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
 			    struct policy_result *result)
 {
+	int r;
 	struct entry *e;
-	dm_cblock_t cblock;
 
-	if (find_free_cblock(mq, &cblock) == -ENOSPC) {
-		result->op = POLICY_MISS;
-		insert_in_pre_cache(mq, oblock);
-		return;
-	}
+	if (epool_empty(&mq->cache_pool)) {
+		result->op = POLICY_REPLACE;
+		r = demote_cblock(mq, &result->old_oblock);
+		if (unlikely(r)) {
+			result->op = POLICY_MISS;
+			insert_in_pre_cache(mq, oblock);
+			return;
+		}
 
-	e = alloc_entry(mq);
-	if (unlikely(!e)) {
-		result->op = POLICY_MISS;
-		return;
+		/*
+		 * This will always succeed, since we've just demoted.
+		 */
+		e = alloc_entry(&mq->cache_pool);
+		BUG_ON(!e);
+
+	} else {
+		e = alloc_entry(&mq->cache_pool);
+		result->op = POLICY_NEW;
 	}
 
 	e->oblock = oblock;
-	e->cblock = cblock;
-	e->in_cache = true;
+	e->dirty = false;
 	e->hit_count = 1;
 	e->generation = mq->generation;
 	push(mq, e);
 
-	result->op = POLICY_NEW;
-	result->cblock = e->cblock;
+	result->cblock = infer_cblock(&mq->cache_pool, e);
 }
 
 static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
@@ -821,13 +830,16 @@
 	int r = 0;
 	struct entry *e = hash_lookup(mq, oblock);
 
-	if (e && e->in_cache)
+	if (e && in_cache(mq, e))
 		r = cache_entry_found(mq, e, result);
+
 	else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
 		result->op = POLICY_MISS;
+
 	else if (e)
 		r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
 					  data_dir, result);
+
 	else
 		r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
 				   data_dir, result);
@@ -854,9 +866,9 @@
 {
 	struct mq_policy *mq = to_mq_policy(p);
 
-	free_bitset(mq->allocation_bitset);
 	kfree(mq->table);
-	free_entries(mq);
+	epool_exit(&mq->cache_pool);
+	epool_exit(&mq->pre_cache_pool);
 	kfree(mq);
 }
 
@@ -904,8 +916,8 @@
 		return -EWOULDBLOCK;
 
 	e = hash_lookup(mq, oblock);
-	if (e && e->in_cache) {
-		*cblock = e->cblock;
+	if (e && in_cache(mq, e)) {
+		*cblock = infer_cblock(&mq->cache_pool, e);
 		r = 0;
 	} else
 		r = -ENOENT;
@@ -915,6 +927,36 @@
 	return r;
 }
 
+static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
+{
+	struct entry *e;
+
+	e = hash_lookup(mq, oblock);
+	BUG_ON(!e || !in_cache(mq, e));
+
+	del(mq, e);
+	e->dirty = set;
+	push(mq, e);
+}
+
+static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+	struct mq_policy *mq = to_mq_policy(p);
+
+	mutex_lock(&mq->lock);
+	__mq_set_clear_dirty(mq, oblock, true);
+	mutex_unlock(&mq->lock);
+}
+
+static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+	struct mq_policy *mq = to_mq_policy(p);
+
+	mutex_lock(&mq->lock);
+	__mq_set_clear_dirty(mq, oblock, false);
+	mutex_unlock(&mq->lock);
+}
+
 static int mq_load_mapping(struct dm_cache_policy *p,
 			   dm_oblock_t oblock, dm_cblock_t cblock,
 			   uint32_t hint, bool hint_valid)
@@ -922,13 +964,9 @@
 	struct mq_policy *mq = to_mq_policy(p);
 	struct entry *e;
 
-	e = alloc_entry(mq);
-	if (!e)
-		return -ENOMEM;
-
-	e->cblock = cblock;
+	e = alloc_particular_entry(&mq->cache_pool, cblock);
 	e->oblock = oblock;
-	e->in_cache = true;
+	e->dirty = false;	/* this gets corrected in a minute */
 	e->hit_count = hint_valid ? hint : 1;
 	e->generation = mq->generation;
 	push(mq, e);
@@ -936,57 +974,126 @@
 	return 0;
 }
 
+static int mq_save_hints(struct mq_policy *mq, struct queue *q,
+			 policy_walk_fn fn, void *context)
+{
+	int r;
+	unsigned level;
+	struct entry *e;
+
+	for (level = 0; level < NR_QUEUE_LEVELS; level++)
+		list_for_each_entry(e, q->qs + level, list) {
+			r = fn(context, infer_cblock(&mq->cache_pool, e),
+			       e->oblock, e->hit_count);
+			if (r)
+				return r;
+		}
+
+	return 0;
+}
+
 static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
 			    void *context)
 {
 	struct mq_policy *mq = to_mq_policy(p);
 	int r = 0;
-	struct entry *e;
-	unsigned level;
 
 	mutex_lock(&mq->lock);
 
-	for (level = 0; level < NR_QUEUE_LEVELS; level++)
-		list_for_each_entry(e, &mq->cache.qs[level], list) {
-			r = fn(context, e->cblock, e->oblock, e->hit_count);
-			if (r)
-				goto out;
-		}
+	r = mq_save_hints(mq, &mq->cache_clean, fn, context);
+	if (!r)
+		r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
 
-out:
 	mutex_unlock(&mq->lock);
 
 	return r;
 }
 
+static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+{
+	struct entry *e;
+
+	e = hash_lookup(mq, oblock);
+	BUG_ON(!e || !in_cache(mq, e));
+
+	del(mq, e);
+	free_entry(&mq->cache_pool, e);
+}
+
 static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
 	struct mq_policy *mq = to_mq_policy(p);
-	struct entry *e;
 
 	mutex_lock(&mq->lock);
-
-	e = hash_lookup(mq, oblock);
-
-	BUG_ON(!e || !e->in_cache);
-
-	del(mq, e);
-	e->in_cache = false;
-	push(mq, e);
-
+	__remove_mapping(mq, oblock);
 	mutex_unlock(&mq->lock);
 }
 
-static void force_mapping(struct mq_policy *mq,
-			  dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+	struct entry *e = epool_find(&mq->cache_pool, cblock);
+
+	if (!e)
+		return -ENODATA;
+
+	del(mq, e);
+	free_entry(&mq->cache_pool, e);
+
+	return 0;
+}
+
+static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
+{
+	int r;
+	struct mq_policy *mq = to_mq_policy(p);
+
+	mutex_lock(&mq->lock);
+	r = __remove_cblock(mq, cblock);
+	mutex_unlock(&mq->lock);
+
+	return r;
+}
+
+static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
+			      dm_cblock_t *cblock)
+{
+	struct entry *e = pop(mq, &mq->cache_dirty);
+
+	if (!e)
+		return -ENODATA;
+
+	*oblock = e->oblock;
+	*cblock = infer_cblock(&mq->cache_pool, e);
+	e->dirty = false;
+	push(mq, e);
+
+	return 0;
+}
+
+static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
+			     dm_cblock_t *cblock)
+{
+	int r;
+	struct mq_policy *mq = to_mq_policy(p);
+
+	mutex_lock(&mq->lock);
+	r = __mq_writeback_work(mq, oblock, cblock);
+	mutex_unlock(&mq->lock);
+
+	return r;
+}
+
+static void __force_mapping(struct mq_policy *mq,
+			    dm_oblock_t current_oblock, dm_oblock_t new_oblock)
 {
 	struct entry *e = hash_lookup(mq, current_oblock);
 
-	BUG_ON(!e || !e->in_cache);
-
-	del(mq, e);
-	e->oblock = new_oblock;
-	push(mq, e);
+	if (e && in_cache(mq, e)) {
+		del(mq, e);
+		e->oblock = new_oblock;
+		e->dirty = true;
+		push(mq, e);
+	}
 }
 
 static void mq_force_mapping(struct dm_cache_policy *p,
@@ -995,16 +1102,20 @@
 	struct mq_policy *mq = to_mq_policy(p);
 
 	mutex_lock(&mq->lock);
-	force_mapping(mq, current_oblock, new_oblock);
+	__force_mapping(mq, current_oblock, new_oblock);
 	mutex_unlock(&mq->lock);
 }
 
 static dm_cblock_t mq_residency(struct dm_cache_policy *p)
 {
+	dm_cblock_t r;
 	struct mq_policy *mq = to_mq_policy(p);
 
-	/* FIXME: lock mutex, not sure we can block here */
-	return to_cblock(mq->nr_cblocks_allocated);
+	mutex_lock(&mq->lock);
+	r = to_cblock(mq->cache_pool.nr_allocated);
+	mutex_unlock(&mq->lock);
+
+	return r;
 }
 
 static void mq_tick(struct dm_cache_policy *p)
@@ -1057,10 +1168,13 @@
 	mq->policy.destroy = mq_destroy;
 	mq->policy.map = mq_map;
 	mq->policy.lookup = mq_lookup;
+	mq->policy.set_dirty = mq_set_dirty;
+	mq->policy.clear_dirty = mq_clear_dirty;
 	mq->policy.load_mapping = mq_load_mapping;
 	mq->policy.walk_mappings = mq_walk_mappings;
 	mq->policy.remove_mapping = mq_remove_mapping;
-	mq->policy.writeback_work = NULL;
+	mq->policy.remove_cblock = mq_remove_cblock;
+	mq->policy.writeback_work = mq_writeback_work;
 	mq->policy.force_mapping = mq_force_mapping;
 	mq->policy.residency = mq_residency;
 	mq->policy.tick = mq_tick;
@@ -1072,7 +1186,6 @@
 					 sector_t origin_size,
 					 sector_t cache_block_size)
 {
-	int r;
 	struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
 
 	if (!mq)
@@ -1080,8 +1193,18 @@
 
 	init_policy_functions(mq);
 	iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
-
 	mq->cache_size = cache_size;
+
+	if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
+		DMERR("couldn't initialize pool of pre-cache entries");
+		goto bad_pre_cache_init;
+	}
+
+	if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
+		DMERR("couldn't initialize pool of cache entries");
+		goto bad_cache_init;
+	}
+
 	mq->tick_protected = 0;
 	mq->tick = 0;
 	mq->hit_count = 0;
@@ -1089,38 +1212,26 @@
 	mq->promote_threshold = 0;
 	mutex_init(&mq->lock);
 	spin_lock_init(&mq->tick_lock);
-	mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG);
-	mq->find_free_last_word = 0;
 
 	queue_init(&mq->pre_cache);
-	queue_init(&mq->cache);
+	queue_init(&mq->cache_clean);
+	queue_init(&mq->cache_dirty);
+
 	mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
 
-	mq->nr_entries = 2 * from_cblock(cache_size);
-	r = alloc_entries(mq, mq->nr_entries);
-	if (r)
-		goto bad_cache_alloc;
-
-	mq->nr_entries_allocated = 0;
-	mq->nr_cblocks_allocated = 0;
-
 	mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
 	mq->hash_bits = ffs(mq->nr_buckets) - 1;
 	mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
 	if (!mq->table)
 		goto bad_alloc_table;
 
-	mq->allocation_bitset = alloc_bitset(from_cblock(cache_size));
-	if (!mq->allocation_bitset)
-		goto bad_alloc_bitset;
-
 	return &mq->policy;
 
-bad_alloc_bitset:
-	kfree(mq->table);
 bad_alloc_table:
-	free_entries(mq);
-bad_cache_alloc:
+	epool_exit(&mq->cache_pool);
+bad_cache_init:
+	epool_exit(&mq->pre_cache_pool);
+bad_pre_cache_init:
 	kfree(mq);
 
 	return NULL;
@@ -1130,7 +1241,7 @@
 
 static struct dm_cache_policy_type mq_policy_type = {
 	.name = "mq",
-	.version = {1, 0, 0},
+	.version = {1, 1, 0},
 	.hint_size = 4,
 	.owner = THIS_MODULE,
 	.create = mq_create
@@ -1138,7 +1249,7 @@
 
 static struct dm_cache_policy_type default_policy_type = {
 	.name = "default",
-	.version = {1, 0, 0},
+	.version = {1, 1, 0},
 	.hint_size = 4,
 	.owner = THIS_MODULE,
 	.create = mq_create
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index 21c03c5..d800579 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -119,13 +119,13 @@
 	type = get_policy(name);
 	if (!type) {
 		DMWARN("unknown policy type");
-		return NULL;
+		return ERR_PTR(-EINVAL);
 	}
 
 	p = type->create(cache_size, origin_size, cache_block_size);
 	if (!p) {
 		put_policy(type);
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 	p->private = type;
 
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 33369ca..052c00a 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -135,9 +135,6 @@
 	 */
 	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
 
-	/*
-	 * oblock must be a mapped block.  Must not block.
-	 */
 	void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
 	void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
 
@@ -159,8 +156,24 @@
 	void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
 			      dm_oblock_t new_oblock);
 
-	int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+	/*
+	 * This is called via the invalidate_cblocks message.  It is
+	 * possible the particular cblock has already been removed due to a
+	 * write io in passthrough mode.  In which case this should return
+	 * -ENODATA.
+	 */
+	int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
 
+	/*
+	 * Provide a dirty block to be written back by the core target.
+	 *
+	 * Returns:
+	 *
+	 * 0 and @cblock,@oblock: block to write back provided
+	 *
+	 * -ENODATA: no dirty blocks available
+	 */
+	int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
 
 	/*
 	 * How full is the cache?
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 2956976..9efcf10 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -61,6 +61,34 @@
 
 /*----------------------------------------------------------------*/
 
+/*
+ * There are a couple of places where we let a bio run, but want to do some
+ * work before calling its endio function.  We do this by temporarily
+ * changing the endio fn.
+ */
+struct dm_hook_info {
+	bio_end_io_t *bi_end_io;
+	void *bi_private;
+};
+
+static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
+			bio_end_io_t *bi_end_io, void *bi_private)
+{
+	h->bi_end_io = bio->bi_end_io;
+	h->bi_private = bio->bi_private;
+
+	bio->bi_end_io = bi_end_io;
+	bio->bi_private = bi_private;
+}
+
+static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
+{
+	bio->bi_end_io = h->bi_end_io;
+	bio->bi_private = h->bi_private;
+}
+
+/*----------------------------------------------------------------*/
+
 #define PRISON_CELLS 1024
 #define MIGRATION_POOL_SIZE 128
 #define COMMIT_PERIOD HZ
@@ -76,14 +104,37 @@
 /*
  * FIXME: the cache is read/write for the time being.
  */
-enum cache_mode {
+enum cache_metadata_mode {
 	CM_WRITE,		/* metadata may be changed */
 	CM_READ_ONLY,		/* metadata may not be changed */
 };
 
+enum cache_io_mode {
+	/*
+	 * Data is written to cached blocks only.  These blocks are marked
+	 * dirty.  If you lose the cache device you will lose data.
+	 * Potential performance increase for both reads and writes.
+	 */
+	CM_IO_WRITEBACK,
+
+	/*
+	 * Data is written to both cache and origin.  Blocks are never
+	 * dirty.  Potential performance benfit for reads only.
+	 */
+	CM_IO_WRITETHROUGH,
+
+	/*
+	 * A degraded mode useful for various cache coherency situations
+	 * (eg, rolling back snapshots).  Reads and writes always go to the
+	 * origin.  If a write goes to a cached oblock, then the cache
+	 * block is invalidated.
+	 */
+	CM_IO_PASSTHROUGH
+};
+
 struct cache_features {
-	enum cache_mode mode;
-	bool write_through:1;
+	enum cache_metadata_mode mode;
+	enum cache_io_mode io_mode;
 };
 
 struct cache_stats {
@@ -99,6 +150,25 @@
 	atomic_t discard_count;
 };
 
+/*
+ * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
+ * the one-past-the-end value.
+ */
+struct cblock_range {
+	dm_cblock_t begin;
+	dm_cblock_t end;
+};
+
+struct invalidation_request {
+	struct list_head list;
+	struct cblock_range *cblocks;
+
+	atomic_t complete;
+	int err;
+
+	wait_queue_head_t result_wait;
+};
+
 struct cache {
 	struct dm_target *ti;
 	struct dm_target_callbacks callbacks;
@@ -148,6 +218,10 @@
 	wait_queue_head_t migration_wait;
 	atomic_t nr_migrations;
 
+	wait_queue_head_t quiescing_wait;
+	atomic_t quiescing;
+	atomic_t quiescing_ack;
+
 	/*
 	 * cache_size entries, dirty if set
 	 */
@@ -186,7 +260,7 @@
 
 	bool need_tick_bio:1;
 	bool sized:1;
-	bool quiescing:1;
+	bool invalidate:1;
 	bool commit_requested:1;
 	bool loaded_mappings:1;
 	bool loaded_discards:1;
@@ -197,6 +271,12 @@
 	struct cache_features features;
 
 	struct cache_stats stats;
+
+	/*
+	 * Invalidation fields.
+	 */
+	spinlock_t invalidation_lock;
+	struct list_head invalidation_requests;
 };
 
 struct per_bio_data {
@@ -211,7 +291,7 @@
 	 */
 	struct cache *cache;
 	dm_cblock_t cblock;
-	bio_end_io_t *saved_bi_end_io;
+	struct dm_hook_info hook_info;
 	struct dm_bio_details bio_details;
 };
 
@@ -228,6 +308,8 @@
 	bool writeback:1;
 	bool demote:1;
 	bool promote:1;
+	bool requeue_holder:1;
+	bool invalidate:1;
 
 	struct dm_bio_prison_cell *old_ocell;
 	struct dm_bio_prison_cell *new_ocell;
@@ -533,9 +615,24 @@
 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
 #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
 
+static bool writethrough_mode(struct cache_features *f)
+{
+	return f->io_mode == CM_IO_WRITETHROUGH;
+}
+
+static bool writeback_mode(struct cache_features *f)
+{
+	return f->io_mode == CM_IO_WRITEBACK;
+}
+
+static bool passthrough_mode(struct cache_features *f)
+{
+	return f->io_mode == CM_IO_PASSTHROUGH;
+}
+
 static size_t get_per_bio_data_size(struct cache *cache)
 {
-	return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+	return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
 }
 
 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
@@ -605,6 +702,7 @@
 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
 				 dm_oblock_t oblock, dm_cblock_t cblock)
 {
+	check_if_tick_bio_needed(cache, bio);
 	remap_to_cache(cache, bio, cblock);
 	if (bio_data_dir(bio) == WRITE) {
 		set_dirty(cache, oblock, cblock);
@@ -662,7 +760,8 @@
 static void writethrough_endio(struct bio *bio, int err)
 {
 	struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
-	bio->bi_end_io = pb->saved_bi_end_io;
+
+	dm_unhook_bio(&pb->hook_info, bio);
 
 	if (err) {
 		bio_endio(bio, err);
@@ -693,9 +792,8 @@
 
 	pb->cache = cache;
 	pb->cblock = cblock;
-	pb->saved_bi_end_io = bio->bi_end_io;
+	dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
 	dm_bio_record(&pb->bio_details, bio);
-	bio->bi_end_io = writethrough_endio;
 
 	remap_to_origin_clear_discard(pb->cache, bio, oblock);
 }
@@ -748,8 +846,9 @@
 
 static void cleanup_migration(struct dm_cache_migration *mg)
 {
-	dec_nr_migrations(mg->cache);
+	struct cache *cache = mg->cache;
 	free_migration(mg);
+	dec_nr_migrations(cache);
 }
 
 static void migration_failure(struct dm_cache_migration *mg)
@@ -765,13 +864,13 @@
 		DMWARN_LIMIT("demotion failed; couldn't copy block");
 		policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
 
-		cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+		cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
 		if (mg->promote)
-			cell_defer(cache, mg->new_ocell, 1);
+			cell_defer(cache, mg->new_ocell, true);
 	} else {
 		DMWARN_LIMIT("promotion failed; couldn't copy block");
 		policy_remove_mapping(cache->policy, mg->new_oblock);
-		cell_defer(cache, mg->new_ocell, 1);
+		cell_defer(cache, mg->new_ocell, true);
 	}
 
 	cleanup_migration(mg);
@@ -823,7 +922,7 @@
 		return;
 
 	} else if (mg->demote) {
-		cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+		cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
 
 		if (mg->promote) {
 			mg->demote = false;
@@ -832,11 +931,19 @@
 			list_add_tail(&mg->list, &cache->quiesced_migrations);
 			spin_unlock_irqrestore(&cache->lock, flags);
 
-		} else
+		} else {
+			if (mg->invalidate)
+				policy_remove_mapping(cache->policy, mg->old_oblock);
 			cleanup_migration(mg);
+		}
 
 	} else {
-		cell_defer(cache, mg->new_ocell, true);
+		if (mg->requeue_holder)
+			cell_defer(cache, mg->new_ocell, true);
+		else {
+			bio_endio(mg->new_ocell->holder, 0);
+			cell_defer(cache, mg->new_ocell, false);
+		}
 		clear_dirty(cache, mg->new_oblock, mg->cblock);
 		cleanup_migration(mg);
 	}
@@ -881,8 +988,46 @@
 		r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
 	}
 
-	if (r < 0)
+	if (r < 0) {
+		DMERR_LIMIT("issuing migration failed");
 		migration_failure(mg);
+	}
+}
+
+static void overwrite_endio(struct bio *bio, int err)
+{
+	struct dm_cache_migration *mg = bio->bi_private;
+	struct cache *cache = mg->cache;
+	size_t pb_data_size = get_per_bio_data_size(cache);
+	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+	unsigned long flags;
+
+	if (err)
+		mg->err = true;
+
+	spin_lock_irqsave(&cache->lock, flags);
+	list_add_tail(&mg->list, &cache->completed_migrations);
+	dm_unhook_bio(&pb->hook_info, bio);
+	mg->requeue_holder = false;
+	spin_unlock_irqrestore(&cache->lock, flags);
+
+	wake_worker(cache);
+}
+
+static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
+{
+	size_t pb_data_size = get_per_bio_data_size(mg->cache);
+	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+
+	dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
+	remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
+	generic_make_request(bio);
+}
+
+static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+{
+	return (bio_data_dir(bio) == WRITE) &&
+		(bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
 }
 
 static void avoid_copy(struct dm_cache_migration *mg)
@@ -899,9 +1044,17 @@
 	if (mg->writeback || mg->demote)
 		avoid = !is_dirty(cache, mg->cblock) ||
 			is_discarded_oblock(cache, mg->old_oblock);
-	else
+	else {
+		struct bio *bio = mg->new_ocell->holder;
+
 		avoid = is_discarded_oblock(cache, mg->new_oblock);
 
+		if (!avoid && bio_writes_complete_block(cache, bio)) {
+			issue_overwrite(mg, bio);
+			return;
+		}
+	}
+
 	avoid ? avoid_copy(mg) : issue_copy_real(mg);
 }
 
@@ -991,6 +1144,8 @@
 	mg->writeback = false;
 	mg->demote = false;
 	mg->promote = true;
+	mg->requeue_holder = true;
+	mg->invalidate = false;
 	mg->cache = cache;
 	mg->new_oblock = oblock;
 	mg->cblock = cblock;
@@ -1012,6 +1167,8 @@
 	mg->writeback = true;
 	mg->demote = false;
 	mg->promote = false;
+	mg->requeue_holder = true;
+	mg->invalidate = false;
 	mg->cache = cache;
 	mg->old_oblock = oblock;
 	mg->cblock = cblock;
@@ -1035,6 +1192,8 @@
 	mg->writeback = false;
 	mg->demote = true;
 	mg->promote = true;
+	mg->requeue_holder = true;
+	mg->invalidate = false;
 	mg->cache = cache;
 	mg->old_oblock = old_oblock;
 	mg->new_oblock = new_oblock;
@@ -1047,6 +1206,33 @@
 	quiesce_migration(mg);
 }
 
+/*
+ * Invalidate a cache entry.  No writeback occurs; any changes in the cache
+ * block are thrown away.
+ */
+static void invalidate(struct cache *cache, struct prealloc *structs,
+		       dm_oblock_t oblock, dm_cblock_t cblock,
+		       struct dm_bio_prison_cell *cell)
+{
+	struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+	mg->err = false;
+	mg->writeback = false;
+	mg->demote = true;
+	mg->promote = false;
+	mg->requeue_holder = true;
+	mg->invalidate = true;
+	mg->cache = cache;
+	mg->old_oblock = oblock;
+	mg->cblock = cblock;
+	mg->old_ocell = cell;
+	mg->new_ocell = NULL;
+	mg->start_jiffies = jiffies;
+
+	inc_nr_migrations(cache);
+	quiesce_migration(mg);
+}
+
 /*----------------------------------------------------------------
  * bio processing
  *--------------------------------------------------------------*/
@@ -1109,13 +1295,6 @@
 	return current_volume < cache->migration_threshold;
 }
 
-static bool is_writethrough_io(struct cache *cache, struct bio *bio,
-			       dm_cblock_t cblock)
-{
-	return bio_data_dir(bio) == WRITE &&
-		cache->features.write_through && !is_dirty(cache, cblock);
-}
-
 static void inc_hit_counter(struct cache *cache, struct bio *bio)
 {
 	atomic_inc(bio_data_dir(bio) == READ ?
@@ -1128,6 +1307,15 @@
 		   &cache->stats.read_miss : &cache->stats.write_miss);
 }
 
+static void issue_cache_bio(struct cache *cache, struct bio *bio,
+			    struct per_bio_data *pb,
+			    dm_oblock_t oblock, dm_cblock_t cblock)
+{
+	pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+	remap_to_cache_dirty(cache, bio, oblock, cblock);
+	issue(cache, bio);
+}
+
 static void process_bio(struct cache *cache, struct prealloc *structs,
 			struct bio *bio)
 {
@@ -1139,7 +1327,8 @@
 	size_t pb_data_size = get_per_bio_data_size(cache);
 	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 	bool discarded_block = is_discarded_oblock(cache, block);
-	bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
+	bool passthrough = passthrough_mode(&cache->features);
+	bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
 
 	/*
 	 * Check to see if that block is currently migrating.
@@ -1160,15 +1349,39 @@
 
 	switch (lookup_result.op) {
 	case POLICY_HIT:
-		inc_hit_counter(cache, bio);
-		pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+		if (passthrough) {
+			inc_miss_counter(cache, bio);
 
-		if (is_writethrough_io(cache, bio, lookup_result.cblock))
-			remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
-		else
-			remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+			/*
+			 * Passthrough always maps to the origin,
+			 * invalidating any cache blocks that are written
+			 * to.
+			 */
 
-		issue(cache, bio);
+			if (bio_data_dir(bio) == WRITE) {
+				atomic_inc(&cache->stats.demotion);
+				invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
+				release_cell = false;
+
+			} else {
+				/* FIXME: factor out issue_origin() */
+				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+				remap_to_origin_clear_discard(cache, bio, block);
+				issue(cache, bio);
+			}
+		} else {
+			inc_hit_counter(cache, bio);
+
+			if (bio_data_dir(bio) == WRITE &&
+			    writethrough_mode(&cache->features) &&
+			    !is_dirty(cache, lookup_result.cblock)) {
+				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+				remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+				issue(cache, bio);
+			} else
+				issue_cache_bio(cache, bio, pb, block, lookup_result.cblock);
+		}
+
 		break;
 
 	case POLICY_MISS:
@@ -1227,15 +1440,17 @@
 
 static int commit_if_needed(struct cache *cache)
 {
-	if (dm_cache_changed_this_transaction(cache->cmd) &&
-	    (cache->commit_requested || need_commit_due_to_time(cache))) {
+	int r = 0;
+
+	if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
+	    dm_cache_changed_this_transaction(cache->cmd)) {
 		atomic_inc(&cache->stats.commit_count);
-		cache->last_commit_jiffies = jiffies;
 		cache->commit_requested = false;
-		return dm_cache_commit(cache->cmd, false);
+		r = dm_cache_commit(cache->cmd, false);
+		cache->last_commit_jiffies = jiffies;
 	}
 
-	return 0;
+	return r;
 }
 
 static void process_deferred_bios(struct cache *cache)
@@ -1344,36 +1559,88 @@
 }
 
 /*----------------------------------------------------------------
+ * Invalidations.
+ * Dropping something from the cache *without* writing back.
+ *--------------------------------------------------------------*/
+
+static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
+{
+	int r = 0;
+	uint64_t begin = from_cblock(req->cblocks->begin);
+	uint64_t end = from_cblock(req->cblocks->end);
+
+	while (begin != end) {
+		r = policy_remove_cblock(cache->policy, to_cblock(begin));
+		if (!r) {
+			r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
+			if (r)
+				break;
+
+		} else if (r == -ENODATA) {
+			/* harmless, already unmapped */
+			r = 0;
+
+		} else {
+			DMERR("policy_remove_cblock failed");
+			break;
+		}
+
+		begin++;
+        }
+
+	cache->commit_requested = true;
+
+	req->err = r;
+	atomic_set(&req->complete, 1);
+
+	wake_up(&req->result_wait);
+}
+
+static void process_invalidation_requests(struct cache *cache)
+{
+	struct list_head list;
+	struct invalidation_request *req, *tmp;
+
+	INIT_LIST_HEAD(&list);
+	spin_lock(&cache->invalidation_lock);
+	list_splice_init(&cache->invalidation_requests, &list);
+	spin_unlock(&cache->invalidation_lock);
+
+	list_for_each_entry_safe (req, tmp, &list, list)
+		process_invalidation_request(cache, req);
+}
+
+/*----------------------------------------------------------------
  * Main worker loop
  *--------------------------------------------------------------*/
+static bool is_quiescing(struct cache *cache)
+{
+	return atomic_read(&cache->quiescing);
+}
+
+static void ack_quiescing(struct cache *cache)
+{
+	if (is_quiescing(cache)) {
+		atomic_inc(&cache->quiescing_ack);
+		wake_up(&cache->quiescing_wait);
+	}
+}
+
+static void wait_for_quiescing_ack(struct cache *cache)
+{
+	wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
+}
+
 static void start_quiescing(struct cache *cache)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&cache->lock, flags);
-	cache->quiescing = 1;
-	spin_unlock_irqrestore(&cache->lock, flags);
+	atomic_inc(&cache->quiescing);
+	wait_for_quiescing_ack(cache);
 }
 
 static void stop_quiescing(struct cache *cache)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&cache->lock, flags);
-	cache->quiescing = 0;
-	spin_unlock_irqrestore(&cache->lock, flags);
-}
-
-static bool is_quiescing(struct cache *cache)
-{
-	int r;
-	unsigned long flags;
-
-	spin_lock_irqsave(&cache->lock, flags);
-	r = cache->quiescing;
-	spin_unlock_irqrestore(&cache->lock, flags);
-
-	return r;
+	atomic_set(&cache->quiescing, 0);
+	atomic_set(&cache->quiescing_ack, 0);
 }
 
 static void wait_for_migrations(struct cache *cache)
@@ -1412,7 +1679,8 @@
 			!bio_list_empty(&cache->deferred_writethrough_bios) ||
 			!list_empty(&cache->quiesced_migrations) ||
 			!list_empty(&cache->completed_migrations) ||
-			!list_empty(&cache->need_commit_migrations);
+			!list_empty(&cache->need_commit_migrations) ||
+			cache->invalidate;
 }
 
 static void do_worker(struct work_struct *ws)
@@ -1420,16 +1688,16 @@
 	struct cache *cache = container_of(ws, struct cache, worker);
 
 	do {
-		if (!is_quiescing(cache))
+		if (!is_quiescing(cache)) {
+			writeback_some_dirty_blocks(cache);
+			process_deferred_writethrough_bios(cache);
 			process_deferred_bios(cache);
+			process_invalidation_requests(cache);
+		}
 
 		process_migrations(cache, &cache->quiesced_migrations, issue_copy);
 		process_migrations(cache, &cache->completed_migrations, complete_migration);
 
-		writeback_some_dirty_blocks(cache);
-
-		process_deferred_writethrough_bios(cache);
-
 		if (commit_if_needed(cache)) {
 			process_deferred_flush_bios(cache, false);
 
@@ -1442,6 +1710,9 @@
 			process_migrations(cache, &cache->need_commit_migrations,
 					   migration_success_post_commit);
 		}
+
+		ack_quiescing(cache);
+
 	} while (more_work(cache));
 }
 
@@ -1715,7 +1986,7 @@
 static void init_features(struct cache_features *cf)
 {
 	cf->mode = CM_WRITE;
-	cf->write_through = false;
+	cf->io_mode = CM_IO_WRITEBACK;
 }
 
 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
@@ -1740,10 +2011,13 @@
 		arg = dm_shift_arg(as);
 
 		if (!strcasecmp(arg, "writeback"))
-			cf->write_through = false;
+			cf->io_mode = CM_IO_WRITEBACK;
 
 		else if (!strcasecmp(arg, "writethrough"))
-			cf->write_through = true;
+			cf->io_mode = CM_IO_WRITETHROUGH;
+
+		else if (!strcasecmp(arg, "passthrough"))
+			cf->io_mode = CM_IO_PASSTHROUGH;
 
 		else {
 			*error = "Unrecognised cache feature requested";
@@ -1872,14 +2146,15 @@
 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
 			       char **error)
 {
-	cache->policy =	dm_cache_policy_create(ca->policy_name,
-					       cache->cache_size,
-					       cache->origin_sectors,
-					       cache->sectors_per_block);
-	if (!cache->policy) {
+	struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
+							   cache->cache_size,
+							   cache->origin_sectors,
+							   cache->sectors_per_block);
+	if (IS_ERR(p)) {
 		*error = "Error creating cache's policy";
-		return -ENOMEM;
+		return PTR_ERR(p);
 	}
+	cache->policy = p;
 
 	return 0;
 }
@@ -1995,6 +2270,22 @@
 	}
 	cache->cmd = cmd;
 
+	if (passthrough_mode(&cache->features)) {
+		bool all_clean;
+
+		r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
+		if (r) {
+			*error = "dm_cache_metadata_all_clean() failed";
+			goto bad;
+		}
+
+		if (!all_clean) {
+			*error = "Cannot enter passthrough mode unless all blocks are clean";
+			r = -EINVAL;
+			goto bad;
+		}
+	}
+
 	spin_lock_init(&cache->lock);
 	bio_list_init(&cache->deferred_bios);
 	bio_list_init(&cache->deferred_flush_bios);
@@ -2005,6 +2296,10 @@
 	atomic_set(&cache->nr_migrations, 0);
 	init_waitqueue_head(&cache->migration_wait);
 
+	init_waitqueue_head(&cache->quiescing_wait);
+	atomic_set(&cache->quiescing, 0);
+	atomic_set(&cache->quiescing_ack, 0);
+
 	r = -ENOMEM;
 	cache->nr_dirty = 0;
 	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
@@ -2064,7 +2359,7 @@
 
 	cache->need_tick_bio = true;
 	cache->sized = false;
-	cache->quiescing = false;
+	cache->invalidate = false;
 	cache->commit_requested = false;
 	cache->loaded_mappings = false;
 	cache->loaded_discards = false;
@@ -2078,6 +2373,9 @@
 	atomic_set(&cache->stats.commit_count, 0);
 	atomic_set(&cache->stats.discard_count, 0);
 
+	spin_lock_init(&cache->invalidation_lock);
+	INIT_LIST_HEAD(&cache->invalidation_requests);
+
 	*result = cache;
 	return 0;
 
@@ -2207,17 +2505,37 @@
 		return DM_MAPIO_SUBMITTED;
 	}
 
+	r = DM_MAPIO_REMAPPED;
 	switch (lookup_result.op) {
 	case POLICY_HIT:
-		inc_hit_counter(cache, bio);
-		pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+		if (passthrough_mode(&cache->features)) {
+			if (bio_data_dir(bio) == WRITE) {
+				/*
+				 * We need to invalidate this block, so
+				 * defer for the worker thread.
+				 */
+				cell_defer(cache, cell, true);
+				r = DM_MAPIO_SUBMITTED;
 
-		if (is_writethrough_io(cache, bio, lookup_result.cblock))
-			remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
-		else
-			remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+			} else {
+				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+				inc_miss_counter(cache, bio);
+				remap_to_origin_clear_discard(cache, bio, block);
 
-		cell_defer(cache, cell, false);
+				cell_defer(cache, cell, false);
+			}
+
+		} else {
+			inc_hit_counter(cache, bio);
+
+			if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+			    !is_dirty(cache, lookup_result.cblock))
+				remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+			else
+				remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+
+			cell_defer(cache, cell, false);
+		}
 		break;
 
 	case POLICY_MISS:
@@ -2242,10 +2560,10 @@
 		DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
 			    (unsigned) lookup_result.op);
 		bio_io_error(bio);
-		return DM_MAPIO_SUBMITTED;
+		r = DM_MAPIO_SUBMITTED;
 	}
 
-	return DM_MAPIO_REMAPPED;
+	return r;
 }
 
 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
@@ -2406,26 +2724,71 @@
 	return 0;
 }
 
+static dm_cblock_t get_cache_dev_size(struct cache *cache)
+{
+	sector_t size = get_dev_size(cache->cache_dev);
+	(void) sector_div(size, cache->sectors_per_block);
+	return to_cblock(size);
+}
+
+static bool can_resize(struct cache *cache, dm_cblock_t new_size)
+{
+	if (from_cblock(new_size) > from_cblock(cache->cache_size))
+		return true;
+
+	/*
+	 * We can't drop a dirty block when shrinking the cache.
+	 */
+	while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
+		new_size = to_cblock(from_cblock(new_size) + 1);
+		if (is_dirty(cache, new_size)) {
+			DMERR("unable to shrink cache; cache block %llu is dirty",
+			      (unsigned long long) from_cblock(new_size));
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
+{
+	int r;
+
+	r = dm_cache_resize(cache->cmd, cache->cache_size);
+	if (r) {
+		DMERR("could not resize cache metadata");
+		return r;
+	}
+
+	cache->cache_size = new_size;
+
+	return 0;
+}
+
 static int cache_preresume(struct dm_target *ti)
 {
 	int r = 0;
 	struct cache *cache = ti->private;
-	sector_t actual_cache_size = get_dev_size(cache->cache_dev);
-	(void) sector_div(actual_cache_size, cache->sectors_per_block);
+	dm_cblock_t csize = get_cache_dev_size(cache);
 
 	/*
 	 * Check to see if the cache has resized.
 	 */
-	if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
-		cache->cache_size = to_cblock(actual_cache_size);
-
-		r = dm_cache_resize(cache->cmd, cache->cache_size);
-		if (r) {
-			DMERR("could not resize cache metadata");
+	if (!cache->sized) {
+		r = resize_cache_dev(cache, csize);
+		if (r)
 			return r;
-		}
 
 		cache->sized = true;
+
+	} else if (csize != cache->cache_size) {
+		if (!can_resize(cache, csize))
+			return -EINVAL;
+
+		r = resize_cache_dev(cache, csize);
+		if (r)
+			return r;
 	}
 
 	if (!cache->loaded_mappings) {
@@ -2518,10 +2881,19 @@
 		       (unsigned long long) from_cblock(residency),
 		       cache->nr_dirty);
 
-		if (cache->features.write_through)
+		if (writethrough_mode(&cache->features))
 			DMEMIT("1 writethrough ");
-		else
-			DMEMIT("0 ");
+
+		else if (passthrough_mode(&cache->features))
+			DMEMIT("1 passthrough ");
+
+		else if (writeback_mode(&cache->features))
+			DMEMIT("1 writeback ");
+
+		else {
+			DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
+			goto err;
+		}
 
 		DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
 		if (sz < maxlen) {
@@ -2553,7 +2925,128 @@
 }
 
 /*
- * Supports <key> <value>.
+ * A cache block range can take two forms:
+ *
+ * i) A single cblock, eg. '3456'
+ * ii) A begin and end cblock with dots between, eg. 123-234
+ */
+static int parse_cblock_range(struct cache *cache, const char *str,
+			      struct cblock_range *result)
+{
+	char dummy;
+	uint64_t b, e;
+	int r;
+
+	/*
+	 * Try and parse form (ii) first.
+	 */
+	r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
+	if (r < 0)
+		return r;
+
+	if (r == 2) {
+		result->begin = to_cblock(b);
+		result->end = to_cblock(e);
+		return 0;
+	}
+
+	/*
+	 * That didn't work, try form (i).
+	 */
+	r = sscanf(str, "%llu%c", &b, &dummy);
+	if (r < 0)
+		return r;
+
+	if (r == 1) {
+		result->begin = to_cblock(b);
+		result->end = to_cblock(from_cblock(result->begin) + 1u);
+		return 0;
+	}
+
+	DMERR("invalid cblock range '%s'", str);
+	return -EINVAL;
+}
+
+static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
+{
+	uint64_t b = from_cblock(range->begin);
+	uint64_t e = from_cblock(range->end);
+	uint64_t n = from_cblock(cache->cache_size);
+
+	if (b >= n) {
+		DMERR("begin cblock out of range: %llu >= %llu", b, n);
+		return -EINVAL;
+	}
+
+	if (e > n) {
+		DMERR("end cblock out of range: %llu > %llu", e, n);
+		return -EINVAL;
+	}
+
+	if (b >= e) {
+		DMERR("invalid cblock range: %llu >= %llu", b, e);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int request_invalidation(struct cache *cache, struct cblock_range *range)
+{
+	struct invalidation_request req;
+
+	INIT_LIST_HEAD(&req.list);
+	req.cblocks = range;
+	atomic_set(&req.complete, 0);
+	req.err = 0;
+	init_waitqueue_head(&req.result_wait);
+
+	spin_lock(&cache->invalidation_lock);
+	list_add(&req.list, &cache->invalidation_requests);
+	spin_unlock(&cache->invalidation_lock);
+	wake_worker(cache);
+
+	wait_event(req.result_wait, atomic_read(&req.complete));
+	return req.err;
+}
+
+static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
+					      const char **cblock_ranges)
+{
+	int r = 0;
+	unsigned i;
+	struct cblock_range range;
+
+	if (!passthrough_mode(&cache->features)) {
+		DMERR("cache has to be in passthrough mode for invalidation");
+		return -EPERM;
+	}
+
+	for (i = 0; i < count; i++) {
+		r = parse_cblock_range(cache, cblock_ranges[i], &range);
+		if (r)
+			break;
+
+		r = validate_cblock_range(cache, &range);
+		if (r)
+			break;
+
+		/*
+		 * Pass begin and end origin blocks to the worker and wake it.
+		 */
+		r = request_invalidation(cache, &range);
+		if (r)
+			break;
+	}
+
+	return r;
+}
+
+/*
+ * Supports
+ *	"<key> <value>"
+ * and
+ *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
  *
  * The key migration_threshold is supported by the cache target core.
  */
@@ -2561,6 +3054,12 @@
 {
 	struct cache *cache = ti->private;
 
+	if (!argc)
+		return -EINVAL;
+
+	if (!strcasecmp(argv[0], "invalidate_cblocks"))
+		return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
+
 	if (argc != 2)
 		return -EINVAL;
 
@@ -2630,7 +3129,7 @@
 
 static struct target_type cache_target = {
 	.name = "cache",
-	.version = {1, 1, 1},
+	.version = {1, 2, 0},
 	.module = THIS_MODULE,
 	.ctr = cache_ctr,
 	.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0fce0bc..81b0fa6 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2,6 +2,7 @@
  * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
  * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
  * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
  *
  * This file is released under the GPL.
  */
@@ -98,6 +99,13 @@
 	u8 *seed;
 };
 
+#define TCW_WHITENING_SIZE 16
+struct iv_tcw_private {
+	struct crypto_shash *crc32_tfm;
+	u8 *iv_seed;
+	u8 *whitening;
+};
+
 /*
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
@@ -139,6 +147,7 @@
 		struct iv_essiv_private essiv;
 		struct iv_benbi_private benbi;
 		struct iv_lmk_private lmk;
+		struct iv_tcw_private tcw;
 	} iv_gen_private;
 	sector_t iv_offset;
 	unsigned int iv_size;
@@ -171,7 +180,8 @@
 
 	unsigned long flags;
 	unsigned int key_size;
-	unsigned int key_parts;
+	unsigned int key_parts;      /* independent parts in key buffer */
+	unsigned int key_extra_size; /* additional keys length */
 	u8 key[0];
 };
 
@@ -230,6 +240,16 @@
  *         version 3: the same as version 2 with additional IV seed
  *                   (it uses 65 keys, last key is used as IV seed)
  *
+ * tcw:  Compatible implementation of the block chaining mode used
+ *       by the TrueCrypt device encryption system (prior to version 4.1).
+ *       For more info see: http://www.truecrypt.org
+ *       It operates on full 512 byte sectors and uses CBC
+ *       with an IV derived from initial key and the sector number.
+ *       In addition, whitening value is applied on every sector, whitening
+ *       is calculated from initial key, sector number and mixed using CRC32.
+ *       Note that this encryption scheme is vulnerable to watermarking attacks
+ *       and should be used for old compatible containers access only.
+ *
  * plumb: unimplemented, see:
  * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  */
@@ -530,7 +550,7 @@
 		char ctx[crypto_shash_descsize(lmk->hash_tfm)];
 	} sdesc;
 	struct md5_state md5state;
-	u32 buf[4];
+	__le32 buf[4];
 	int i, r;
 
 	sdesc.desc.tfm = lmk->hash_tfm;
@@ -608,6 +628,153 @@
 	return r;
 }
 
+static void crypt_iv_tcw_dtr(struct crypt_config *cc)
+{
+	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+	kzfree(tcw->iv_seed);
+	tcw->iv_seed = NULL;
+	kzfree(tcw->whitening);
+	tcw->whitening = NULL;
+
+	if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
+		crypto_free_shash(tcw->crc32_tfm);
+	tcw->crc32_tfm = NULL;
+}
+
+static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
+			    const char *opts)
+{
+	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+	if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
+		ti->error = "Wrong key size for TCW";
+		return -EINVAL;
+	}
+
+	tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
+	if (IS_ERR(tcw->crc32_tfm)) {
+		ti->error = "Error initializing CRC32 in TCW";
+		return PTR_ERR(tcw->crc32_tfm);
+	}
+
+	tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
+	tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
+	if (!tcw->iv_seed || !tcw->whitening) {
+		crypt_iv_tcw_dtr(cc);
+		ti->error = "Error allocating seed storage in TCW";
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int crypt_iv_tcw_init(struct crypt_config *cc)
+{
+	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+	int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
+
+	memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
+	memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
+	       TCW_WHITENING_SIZE);
+
+	return 0;
+}
+
+static int crypt_iv_tcw_wipe(struct crypt_config *cc)
+{
+	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+	memset(tcw->iv_seed, 0, cc->iv_size);
+	memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
+
+	return 0;
+}
+
+static int crypt_iv_tcw_whitening(struct crypt_config *cc,
+				  struct dm_crypt_request *dmreq,
+				  u8 *data)
+{
+	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+	u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+	u8 buf[TCW_WHITENING_SIZE];
+	struct {
+		struct shash_desc desc;
+		char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
+	} sdesc;
+	int i, r;
+
+	/* xor whitening with sector number */
+	memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
+	crypto_xor(buf, (u8 *)&sector, 8);
+	crypto_xor(&buf[8], (u8 *)&sector, 8);
+
+	/* calculate crc32 for every 32bit part and xor it */
+	sdesc.desc.tfm = tcw->crc32_tfm;
+	sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+	for (i = 0; i < 4; i++) {
+		r = crypto_shash_init(&sdesc.desc);
+		if (r)
+			goto out;
+		r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
+		if (r)
+			goto out;
+		r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
+		if (r)
+			goto out;
+	}
+	crypto_xor(&buf[0], &buf[12], 4);
+	crypto_xor(&buf[4], &buf[8], 4);
+
+	/* apply whitening (8 bytes) to whole sector */
+	for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
+		crypto_xor(data + i * 8, buf, 8);
+out:
+	memset(buf, 0, sizeof(buf));
+	return r;
+}
+
+static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
+			    struct dm_crypt_request *dmreq)
+{
+	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+	u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+	u8 *src;
+	int r = 0;
+
+	/* Remove whitening from ciphertext */
+	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
+		src = kmap_atomic(sg_page(&dmreq->sg_in));
+		r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
+		kunmap_atomic(src);
+	}
+
+	/* Calculate IV */
+	memcpy(iv, tcw->iv_seed, cc->iv_size);
+	crypto_xor(iv, (u8 *)&sector, 8);
+	if (cc->iv_size > 8)
+		crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
+
+	return r;
+}
+
+static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
+			     struct dm_crypt_request *dmreq)
+{
+	u8 *dst;
+	int r;
+
+	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
+		return 0;
+
+	/* Apply whitening on ciphertext */
+	dst = kmap_atomic(sg_page(&dmreq->sg_out));
+	r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
+	kunmap_atomic(dst);
+
+	return r;
+}
+
 static struct crypt_iv_operations crypt_iv_plain_ops = {
 	.generator = crypt_iv_plain_gen
 };
@@ -643,6 +810,15 @@
 	.post	   = crypt_iv_lmk_post
 };
 
+static struct crypt_iv_operations crypt_iv_tcw_ops = {
+	.ctr	   = crypt_iv_tcw_ctr,
+	.dtr	   = crypt_iv_tcw_dtr,
+	.init	   = crypt_iv_tcw_init,
+	.wipe	   = crypt_iv_tcw_wipe,
+	.generator = crypt_iv_tcw_gen,
+	.post	   = crypt_iv_tcw_post
+};
+
 static void crypt_convert_init(struct crypt_config *cc,
 			       struct convert_context *ctx,
 			       struct bio *bio_out, struct bio *bio_in,
@@ -774,7 +950,7 @@
 		/* async */
 		case -EBUSY:
 			wait_for_completion(&ctx->restart);
-			INIT_COMPLETION(ctx->restart);
+			reinit_completion(&ctx->restart);
 			/* fall through*/
 		case -EINPROGRESS:
 			this_cc->req = NULL;
@@ -1274,9 +1450,12 @@
 
 static int crypt_setkey_allcpus(struct crypt_config *cc)
 {
-	unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
+	unsigned subkey_size;
 	int err = 0, i, r;
 
+	/* Ignore extra keys (which are used for IV etc) */
+	subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
+
 	for (i = 0; i < cc->tfms_count; i++) {
 		r = crypto_ablkcipher_setkey(cc->tfms[i],
 					     cc->key + (i * subkey_size),
@@ -1409,6 +1588,7 @@
 		return -EINVAL;
 	}
 	cc->key_parts = cc->tfms_count;
+	cc->key_extra_size = 0;
 
 	cc->cipher = kstrdup(cipher, GFP_KERNEL);
 	if (!cc->cipher)
@@ -1460,13 +1640,6 @@
 		goto bad;
 	}
 
-	/* Initialize and set key */
-	ret = crypt_set_key(cc, key);
-	if (ret < 0) {
-		ti->error = "Error decoding and setting key";
-		goto bad;
-	}
-
 	/* Initialize IV */
 	cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
 	if (cc->iv_size)
@@ -1493,18 +1666,33 @@
 		cc->iv_gen_ops = &crypt_iv_null_ops;
 	else if (strcmp(ivmode, "lmk") == 0) {
 		cc->iv_gen_ops = &crypt_iv_lmk_ops;
-		/* Version 2 and 3 is recognised according
+		/*
+		 * Version 2 and 3 is recognised according
 		 * to length of provided multi-key string.
 		 * If present (version 3), last key is used as IV seed.
+		 * All keys (including IV seed) are always the same size.
 		 */
-		if (cc->key_size % cc->key_parts)
+		if (cc->key_size % cc->key_parts) {
 			cc->key_parts++;
+			cc->key_extra_size = cc->key_size / cc->key_parts;
+		}
+	} else if (strcmp(ivmode, "tcw") == 0) {
+		cc->iv_gen_ops = &crypt_iv_tcw_ops;
+		cc->key_parts += 2; /* IV + whitening */
+		cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
 	} else {
 		ret = -EINVAL;
 		ti->error = "Invalid IV mode";
 		goto bad;
 	}
 
+	/* Initialize and set key */
+	ret = crypt_set_key(cc, key);
+	if (ret < 0) {
+		ti->error = "Error decoding and setting key";
+		goto bad;
+	}
+
 	/* Allocate IV */
 	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
 		ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
@@ -1817,7 +2005,7 @@
 
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 12, 1},
+	.version = {1, 13, 0},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index afe0814..5152142 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -57,7 +57,7 @@
 static struct list_head _name_buckets[NUM_BUCKETS];
 static struct list_head _uuid_buckets[NUM_BUCKETS];
 
-static void dm_hash_remove_all(int keep_open_devices);
+static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
 
 /*
  * Guards access to both hash tables.
@@ -86,7 +86,7 @@
 
 static void dm_hash_exit(void)
 {
-	dm_hash_remove_all(0);
+	dm_hash_remove_all(false, false, false);
 }
 
 /*-----------------------------------------------------------------
@@ -276,7 +276,7 @@
 	return table;
 }
 
-static void dm_hash_remove_all(int keep_open_devices)
+static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
 {
 	int i, dev_skipped;
 	struct hash_cell *hc;
@@ -293,7 +293,8 @@
 			md = hc->md;
 			dm_get(md);
 
-			if (keep_open_devices && dm_lock_for_deletion(md)) {
+			if (keep_open_devices &&
+			    dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
 				dm_put(md);
 				dev_skipped++;
 				continue;
@@ -450,6 +451,11 @@
 	return md;
 }
 
+void dm_deferred_remove(void)
+{
+	dm_hash_remove_all(true, false, true);
+}
+
 /*-----------------------------------------------------------------
  * Implementation of the ioctl commands
  *---------------------------------------------------------------*/
@@ -461,7 +467,7 @@
 
 static int remove_all(struct dm_ioctl *param, size_t param_size)
 {
-	dm_hash_remove_all(1);
+	dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
 	param->data_size = 0;
 	return 0;
 }
@@ -683,6 +689,9 @@
 	if (dm_suspended_md(md))
 		param->flags |= DM_SUSPEND_FLAG;
 
+	if (dm_test_deferred_remove_flag(md))
+		param->flags |= DM_DEFERRED_REMOVE;
+
 	param->dev = huge_encode_dev(disk_devt(disk));
 
 	/*
@@ -832,8 +841,13 @@
 	/*
 	 * Ensure the device is not open and nothing further can open it.
 	 */
-	r = dm_lock_for_deletion(md);
+	r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);
 	if (r) {
+		if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) {
+			up_write(&_hash_lock);
+			dm_put(md);
+			return 0;
+		}
 		DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
 		up_write(&_hash_lock);
 		dm_put(md);
@@ -848,6 +862,8 @@
 		dm_table_destroy(t);
 	}
 
+	param->flags &= ~DM_DEFERRED_REMOVE;
+
 	if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
 		param->flags |= DM_UEVENT_GENERATED_FLAG;
 
@@ -1469,6 +1485,14 @@
 	if (**argv != '@')
 		return 2; /* no '@' prefix, deliver to target */
 
+	if (!strcasecmp(argv[0], "@cancel_deferred_remove")) {
+		if (argc != 1) {
+			DMERR("Invalid arguments for @cancel_deferred_remove");
+			return -EINVAL;
+		}
+		return dm_cancel_deferred_remove(md);
+	}
+
 	r = dm_stats_message(md, argc, argv, result, maxlen);
 	if (r < 2)
 		return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index de570a5..6eb9dc9 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -87,6 +87,7 @@
 	unsigned queue_if_no_path:1;	/* Queue I/O if last path fails? */
 	unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
 	unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
+	unsigned pg_init_disabled:1;	/* pg_init is not currently allowed */
 
 	unsigned pg_init_retries;	/* Number of times to retry pg_init */
 	unsigned pg_init_count;		/* Number of times pg_init called */
@@ -390,13 +391,16 @@
 	if (was_queued)
 		m->queue_size--;
 
-	if ((pgpath && m->queue_io) ||
-	    (!pgpath && m->queue_if_no_path)) {
+	if (m->pg_init_required) {
+		if (!m->pg_init_in_progress)
+			queue_work(kmultipathd, &m->process_queued_ios);
+		r = DM_MAPIO_REQUEUE;
+	} else if ((pgpath && m->queue_io) ||
+		   (!pgpath && m->queue_if_no_path)) {
 		/* Queue for the daemon to resubmit */
 		list_add_tail(&clone->queuelist, &m->queued_ios);
 		m->queue_size++;
-		if ((m->pg_init_required && !m->pg_init_in_progress) ||
-		    !m->queue_io)
+		if (!m->queue_io)
 			queue_work(kmultipathd, &m->process_queued_ios);
 		pgpath = NULL;
 		r = DM_MAPIO_SUBMITTED;
@@ -497,7 +501,8 @@
 	    (!pgpath && !m->queue_if_no_path))
 		must_queue = 0;
 
-	if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
+	if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
+	    !m->pg_init_disabled)
 		__pg_init_all_paths(m);
 
 	spin_unlock_irqrestore(&m->lock, flags);
@@ -942,10 +947,20 @@
 
 static void flush_multipath_work(struct multipath *m)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&m->lock, flags);
+	m->pg_init_disabled = 1;
+	spin_unlock_irqrestore(&m->lock, flags);
+
 	flush_workqueue(kmpath_handlerd);
 	multipath_wait_for_pg_init_completion(m);
 	flush_workqueue(kmultipathd);
 	flush_work(&m->trigger_event);
+
+	spin_lock_irqsave(&m->lock, flags);
+	m->pg_init_disabled = 0;
+	spin_unlock_irqrestore(&m->lock, flags);
 }
 
 static void multipath_dtr(struct dm_target *ti)
@@ -1164,7 +1179,7 @@
 
 	spin_lock_irqsave(&m->lock, flags);
 
-	if (m->pg_init_count <= m->pg_init_retries)
+	if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
 		m->pg_init_required = 1;
 	else
 		limit_reached = 1;
@@ -1665,6 +1680,11 @@
 
 	spin_lock_irqsave(&m->lock, flags);
 
+	/* pg_init in progress, requeue until done */
+	if (m->pg_init_in_progress) {
+		busy = 1;
+		goto out;
+	}
 	/* Guess which priority_group will be used at next mapping time */
 	if (unlikely(!m->current_pgpath && m->next_pg))
 		pg = m->next_pg;
@@ -1714,7 +1734,7 @@
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
 	.name = "multipath",
-	.version = {1, 5, 1},
+	.version = {1, 6, 0},
 	.module = THIS_MODULE,
 	.ctr = multipath_ctr,
 	.dtr = multipath_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8f87835..465f08c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -545,14 +545,28 @@
 
 /*
  * Used to dynamically allocate the arg array.
+ *
+ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
+ * process messages even if some device is suspended. These messages have a
+ * small fixed number of arguments.
+ *
+ * On the other hand, dm-switch needs to process bulk data using messages and
+ * excessive use of GFP_NOIO could cause trouble.
  */
 static char **realloc_argv(unsigned *array_size, char **old_argv)
 {
 	char **argv;
 	unsigned new_size;
+	gfp_t gfp;
 
-	new_size = *array_size ? *array_size * 2 : 64;
-	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
+	if (*array_size) {
+		new_size = *array_size * 2;
+		gfp = GFP_KERNEL;
+	} else {
+		new_size = 8;
+		gfp = GFP_NOIO;
+	}
+	argv = kmalloc(new_size * sizeof(*argv), gfp);
 	if (argv) {
 		memcpy(argv, old_argv, *array_size * sizeof(*argv));
 		*array_size = new_size;
@@ -1548,8 +1562,11 @@
 			continue;
 
 		r = ti->type->preresume(ti);
-		if (r)
+		if (r) {
+			DMERR("%s: %s: preresume failed, error = %d",
+			      dm_device_name(t->md), ti->type->name, r);
 			return r;
+		}
 	}
 
 	for (i = 0; i < t->num_targets; i++) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b3e26c7..0704c52 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -49,6 +49,11 @@
 static DEFINE_IDR(_minor_idr);
 
 static DEFINE_SPINLOCK(_minor_lock);
+
+static void do_deferred_remove(struct work_struct *w);
+
+static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
+
 /*
  * For bio-based dm.
  * One of these is allocated per bio.
@@ -116,6 +121,7 @@
 #define DMF_DELETING 4
 #define DMF_NOFLUSH_SUSPENDING 5
 #define DMF_MERGE_IS_OPTIONAL 6
+#define DMF_DEFERRED_REMOVE 7
 
 /*
  * A dummy definition to make RCU happy.
@@ -299,6 +305,8 @@
 
 static void local_exit(void)
 {
+	flush_scheduled_work();
+
 	kmem_cache_destroy(_rq_tio_cache);
 	kmem_cache_destroy(_io_cache);
 	unregister_blkdev(_major, _name);
@@ -404,7 +412,10 @@
 
 	spin_lock(&_minor_lock);
 
-	atomic_dec(&md->open_count);
+	if (atomic_dec_and_test(&md->open_count) &&
+	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+		schedule_work(&deferred_remove_work);
+
 	dm_put(md);
 
 	spin_unlock(&_minor_lock);
@@ -418,14 +429,18 @@
 /*
  * Guarantees nothing is using the device before it's deleted.
  */
-int dm_lock_for_deletion(struct mapped_device *md)
+int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
 {
 	int r = 0;
 
 	spin_lock(&_minor_lock);
 
-	if (dm_open_count(md))
+	if (dm_open_count(md)) {
 		r = -EBUSY;
+		if (mark_deferred)
+			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
+	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
+		r = -EEXIST;
 	else
 		set_bit(DMF_DELETING, &md->flags);
 
@@ -434,6 +449,27 @@
 	return r;
 }
 
+int dm_cancel_deferred_remove(struct mapped_device *md)
+{
+	int r = 0;
+
+	spin_lock(&_minor_lock);
+
+	if (test_bit(DMF_DELETING, &md->flags))
+		r = -EBUSY;
+	else
+		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
+
+	spin_unlock(&_minor_lock);
+
+	return r;
+}
+
+static void do_deferred_remove(struct work_struct *w)
+{
+	dm_deferred_remove();
+}
+
 sector_t dm_get_size(struct mapped_device *md)
 {
 	return get_capacity(md->disk);
@@ -2894,6 +2930,11 @@
 	return test_bit(DMF_SUSPENDED, &md->flags);
 }
 
+int dm_test_deferred_remove_flag(struct mapped_device *md)
+{
+	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
+}
+
 int dm_suspended(struct dm_target *ti)
 {
 	return dm_suspended_md(dm_table_get_md(ti->table));
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1d1ad7b..c57ba55 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -129,6 +129,16 @@
 int dm_suspended_md(struct mapped_device *md);
 
 /*
+ * Test if the device is scheduled for deferred remove.
+ */
+int dm_test_deferred_remove_flag(struct mapped_device *md);
+
+/*
+ * Try to remove devices marked for deferred removal.
+ */
+void dm_deferred_remove(void);
+
+/*
  * The device-mapper can be driven through one of two interfaces;
  * ioctl or filesystem, depending which patch you have applied.
  */
@@ -158,7 +168,8 @@
 void dm_destroy(struct mapped_device *md);
 void dm_destroy_immediate(struct mapped_device *md);
 int dm_open_count(struct mapped_device *md);
-int dm_lock_for_deletion(struct mapped_device *md);
+int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
+int dm_cancel_deferred_remove(struct mapped_device *md);
 int dm_request_based(struct mapped_device *md);
 sector_t dm_get_size(struct mapped_device *md);
 struct dm_stats *dm_get_stats(struct mapped_device *md);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2445fec..8766eab 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -183,46 +183,6 @@
 }
 EXPORT_SYMBOL_GPL(bio_clone_mddev);
 
-void md_trim_bio(struct bio *bio, int offset, int size)
-{
-	/* 'bio' is a cloned bio which we need to trim to match
-	 * the given offset and size.
-	 * This requires adjusting bi_sector, bi_size, and bi_io_vec
-	 */
-	int i;
-	struct bio_vec *bvec;
-	int sofar = 0;
-
-	size <<= 9;
-	if (offset == 0 && size == bio->bi_size)
-		return;
-
-	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
-
-	bio_advance(bio, offset << 9);
-
-	bio->bi_size = size;
-
-	/* avoid any complications with bi_idx being non-zero*/
-	if (bio->bi_idx) {
-		memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
-			(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
-		bio->bi_vcnt -= bio->bi_idx;
-		bio->bi_idx = 0;
-	}
-	/* Make sure vcnt and last bv are not too big */
-	bio_for_each_segment(bvec, bio, i) {
-		if (sofar + bvec->bv_len > size)
-			bvec->bv_len = size - sofar;
-		if (bvec->bv_len == 0) {
-			bio->bi_vcnt = i;
-			break;
-		}
-		sofar += bvec->bv_len;
-	}
-}
-EXPORT_SYMBOL_GPL(md_trim_bio);
-
 /*
  * We have a system wide 'event count' that is incremented
  * on any 'interesting' event, and readers of /proc/mdstat
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b0051f2..2f5cc8a 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -617,7 +617,6 @@
 				   struct mddev *mddev);
 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
 				   struct mddev *mddev);
-extern void md_trim_bio(struct bio *bio, int offset, int size);
 
 extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
 static inline int mddev_check_plugged(struct mddev *mddev)
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 172147e..af96e24 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -509,15 +509,18 @@
 static int grow_needs_more_blocks(struct resize *resize)
 {
 	int r;
+	unsigned old_nr_blocks = resize->old_nr_full_blocks;
 
 	if (resize->old_nr_entries_in_last_block > 0) {
+		old_nr_blocks++;
+
 		r = grow_extend_tail_block(resize, resize->max_entries);
 		if (r)
 			return r;
 	}
 
 	r = insert_full_ablocks(resize->info, resize->size_of_block,
-				resize->old_nr_full_blocks,
+				old_nr_blocks,
 				resize->new_nr_full_blocks,
 				resize->max_entries, resize->value,
 				&resize->root);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index e735a6d..cfbf961 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -140,26 +140,10 @@
 
 static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
 {
-	int r;
-	uint32_t old_count;
 	enum allocation_event ev;
 	struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 
-	r = sm_ll_dec(&smd->ll, b, &ev);
-	if (!r && (ev == SM_FREE)) {
-		/*
-		 * It's only free if it's also free in the last
-		 * transaction.
-		 */
-		r = sm_ll_lookup(&smd->old_ll, b, &old_count);
-		if (r)
-			return r;
-
-		if (!old_count)
-			smd->nr_allocated_this_transaction--;
-	}
-
-	return r;
+	return sm_ll_dec(&smd->ll, b, &ev);
 }
 
 static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index aacf6bf..af6681b1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1097,8 +1097,8 @@
 		r1_bio->read_disk = rdisk;
 
 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-		md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
-			    max_sectors);
+		bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+			 max_sectors);
 
 		r1_bio->bios[rdisk] = read_bio;
 
@@ -1266,7 +1266,7 @@
 			continue;
 
 		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-		md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+		bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
 
 		if (first_clone) {
 			/* do behind I/O ?
@@ -2126,7 +2126,7 @@
 		wbio->bi_sector = r1_bio->sector;
 		wbio->bi_size = r1_bio->sectors << 9;
 
-		md_trim_bio(wbio, sector - r1_bio->sector, sectors);
+		bio_trim(wbio, sector - r1_bio->sector, sectors);
 		wbio->bi_sector += rdev->data_offset;
 		wbio->bi_bdev = rdev->bdev;
 		if (submit_bio_wait(WRITE, wbio) == 0)
@@ -2241,7 +2241,7 @@
 		}
 		r1_bio->read_disk = disk;
 		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
-		md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+		bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
 		r1_bio->bios[r1_bio->read_disk] = bio;
 		rdev = conf->mirrors[disk].rdev;
 		printk_ratelimited(KERN_ERR
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 73dc8a3..7c3508a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1302,8 +1302,8 @@
 		slot = r10_bio->read_slot;
 
 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-		md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
-			    max_sectors);
+		bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+			 max_sectors);
 
 		r10_bio->devs[slot].bio = read_bio;
 		r10_bio->devs[slot].rdev = rdev;
@@ -1510,8 +1510,8 @@
 		if (r10_bio->devs[i].bio) {
 			struct md_rdev *rdev = conf->mirrors[d].rdev;
 			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-			md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
-				    max_sectors);
+			bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+				 max_sectors);
 			r10_bio->devs[i].bio = mbio;
 
 			mbio->bi_sector	= (r10_bio->devs[i].addr+
@@ -1553,8 +1553,8 @@
 				rdev = conf->mirrors[d].rdev;
 			}
 			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-			md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
-				    max_sectors);
+			bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+				 max_sectors);
 			r10_bio->devs[i].repl_bio = mbio;
 
 			mbio->bi_sector	= (r10_bio->devs[i].addr +
@@ -2614,7 +2614,7 @@
 			sectors = sect_to_write;
 		/* Write at 'sector' for 'sectors' */
 		wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-		md_trim_bio(wbio, sector - bio->bi_sector, sectors);
+		bio_trim(wbio, sector - bio->bi_sector, sectors);
 		wbio->bi_sector = (r10_bio->devs[i].addr+
 				   choose_data_offset(r10_bio, rdev) +
 				   (sector - r10_bio->sector));
@@ -2687,9 +2687,7 @@
 		(unsigned long long)r10_bio->sector);
 	bio = bio_clone_mddev(r10_bio->master_bio,
 			      GFP_NOIO, mddev);
-	md_trim_bio(bio,
-		    r10_bio->sector - bio->bi_sector,
-		    max_sectors);
+	bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
 	r10_bio->devs[slot].bio = bio;
 	r10_bio->devs[slot].rdev = rdev;
 	bio->bi_sector = r10_bio->devs[slot].addr
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f8b9068..7f0e17a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -293,20 +293,6 @@
 		do_release_stripe(conf, sh);
 }
 
-static struct llist_node *llist_reverse_order(struct llist_node *head)
-{
-	struct llist_node *new_head = NULL;
-
-	while (head) {
-		struct llist_node *tmp = head;
-		head = head->next;
-		tmp->next = new_head;
-		new_head = tmp;
-	}
-
-	return new_head;
-}
-
 /* should hold conf->device_lock already */
 static int release_stripe_list(struct r5conf *conf)
 {
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2113ffa..b42e6b4 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -49,7 +49,7 @@
  * can't distinguish between a clean block that has been generated
  * from parity calculations, and a clean block that has been
  * successfully written to the spare ( or to parity when resyncing).
- * To distingush these states we have a stripe bit STRIPE_INSYNC that
+ * To distinguish these states we have a stripe bit STRIPE_INSYNC that
  * is set whenever a write is scheduled to the spare, or to the parity
  * disc if there is no spare.  A sync request clears this bit, and
  * when we find it set with no buffers locked, we know the sync is
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 0b4616b..c0363f1 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -206,8 +206,6 @@
 	/* TODO */
 	dvbdev->users--;
 	if (dvbdev->users == 1 && dmxdev->exit == 1) {
-		fops_put(file->f_op);
-		file->f_op = NULL;
 		mutex_unlock(&dmxdev->mutex);
 		wake_up(&dvbdev->wait_queue);
 	} else
@@ -1120,8 +1118,6 @@
 	mutex_lock(&dmxdev->mutex);
 	dmxdev->dvbdev->users--;
 	if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
-		fops_put(file->f_op);
-		file->f_op = NULL;
 		mutex_unlock(&dmxdev->mutex);
 		wake_up(&dmxdev->dvbdev->wait_queue);
 	} else
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 401ef64..983db75 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -74,22 +74,15 @@
 
 	if (dvbdev && dvbdev->fops) {
 		int err = 0;
-		const struct file_operations *old_fops;
+		const struct file_operations *new_fops;
 
-		file->private_data = dvbdev;
-		old_fops = file->f_op;
-		file->f_op = fops_get(dvbdev->fops);
-		if (file->f_op == NULL) {
-			file->f_op = old_fops;
+		new_fops = fops_get(dvbdev->fops);
+		if (!new_fops)
 			goto fail;
-		}
-		if(file->f_op->open)
+		file->private_data = dvbdev;
+		replace_fops(file, new_fops);
+		if (file->f_op->open)
 			err = file->f_op->open(inode,file);
-		if (err) {
-			fops_put(file->f_op);
-			file->f_op = fops_get(old_fops);
-		}
-		fops_put(old_fops);
 		up_read(&minor_rwsem);
 		mutex_unlock(&dvbdev_mutex);
 		return err;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index d18be19..cbc9ee9 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -646,7 +646,7 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called upd64083.
 
-comment "Miscelaneous helper chips"
+comment "Miscellaneous helper chips"
 
 config VIDEO_THS7303
 	tristate "THS7303/53 Video Amplifier"
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 6f738d8..d45e0e3 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -178,7 +178,7 @@
 			adv7183_read(sd, ADV7183_VS_FIELD_CTRL_1),
 			adv7183_read(sd, ADV7183_VS_FIELD_CTRL_2),
 			adv7183_read(sd, ADV7183_VS_FIELD_CTRL_3));
-	v4l2_info(sd, "adv7183: Hsync positon control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n",
+	v4l2_info(sd, "adv7183: Hsync position control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n",
 			adv7183_read(sd, ADV7183_HS_POS_CTRL_1),
 			adv7183_read(sd, ADV7183_HS_POS_CTRL_2),
 			adv7183_read(sd, ADV7183_HS_POS_CTRL_3));
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index b76ec0e..31f40b3 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1651,7 +1651,7 @@
 	if (ret < 0)
 		goto out_err;
 
-	v4l2_info(sd, "%s: completed succesfully\n", __func__);
+	v4l2_info(sd, "%s: completed successfully\n", __func__);
 	return 0;
 
 out_err:
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 4c11059..2819165 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -422,7 +422,7 @@
 		return ret;
 	}
 
-	INIT_COMPLETION(bcap_dev->comp);
+	reinit_completion(&bcap_dev->comp);
 	bcap_dev->stop = false;
 	return 0;
 }
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 449d2fe..4993610 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -3232,13 +3232,12 @@
 		dev->iram_size = CODA7_IRAM_SIZE;
 		break;
 	}
-	dev->iram_vaddr = gen_pool_alloc(dev->iram_pool, dev->iram_size);
+	dev->iram_vaddr = (unsigned long)gen_pool_dma_alloc(dev->iram_pool,
+			dev->iram_size, (dma_addr_t *)&dev->iram_paddr);
 	if (!dev->iram_vaddr) {
 		dev_err(&pdev->dev, "unable to alloc iram\n");
 		return -ENOMEM;
 	}
-	dev->iram_paddr = gen_pool_virt_to_phys(dev->iram_pool,
-						dev->iram_vaddr);
 
 	platform_set_drvdata(pdev, dev);
 
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index df3a0ec..1c36080 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2182,9 +2182,9 @@
 	isp->pdata = pdata;
 	isp->ref_count = 0;
 
-	isp->raw_dmamask = DMA_BIT_MASK(32);
-	isp->dev->dma_mask = &isp->raw_dmamask;
-	isp->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	platform_set_drvdata(pdev, isp);
 
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index cd3eff4..ce65d3a 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -152,7 +152,6 @@
  * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
  *                  regions.
  * @mmio_size: Array with ISP register regions size in bytes.
- * @raw_dmamask: Raw DMA mask
  * @stat_lock: Spinlock for handling statistics
  * @isp_mutex: Mutex for serializing requests to ISP.
  * @crashed: Bitmask of crashed entities (indexed by entity ID)
@@ -190,8 +189,6 @@
 	unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
 	resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
 
-	u64 raw_dmamask;
-
 	/* ISP Obj */
 	spinlock_t stat_lock;	/* common lock for statistic drivers */
 	struct mutex isp_mutex;	/* For handling ref_count field */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 97c2c18..9cf6731 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -375,7 +375,7 @@
 	if (r)
 		return r;
 
-	INIT_COMPLETION(radio->busy);
+	reinit_completion(&radio->busy);
 
 	/* wait for the FR IRQ */
 	r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
@@ -389,7 +389,7 @@
 	if (r)
 		return r;
 
-	INIT_COMPLETION(radio->busy);
+	reinit_completion(&radio->busy);
 
 	/* wait for the POWER_ENB IRQ */
 	r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
@@ -444,7 +444,7 @@
 		goto err;
 	}
 
-	INIT_COMPLETION(radio->busy);
+	reinit_completion(&radio->busy);
 
 	r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
 	if (!r) {
@@ -805,7 +805,7 @@
 	if (level < SCHAR_MIN || level > SCHAR_MAX)
 		return -EINVAL;
 
-	INIT_COMPLETION(radio->busy);
+	reinit_completion(&radio->busy);
 	dev_dbg(radio->dev, "%s: BUSY\n", __func__);
 
 	r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
@@ -847,7 +847,7 @@
 	if (r)
 		goto out;
 
-	INIT_COMPLETION(radio->busy);
+	reinit_completion(&radio->busy);
 	dev_dbg(radio->dev, "%s: BUSY\n", __func__);
 
 	r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_AUTO_SEEK);
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 5c57e5b..0bd2500 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -218,7 +218,7 @@
 		goto done;
 
 	/* wait till tune operation has completed */
-	INIT_COMPLETION(radio->completion);
+	reinit_completion(&radio->completion);
 	retval = wait_for_completion_timeout(&radio->completion,
 			msecs_to_jiffies(tune_timeout));
 	if (!retval)
@@ -341,7 +341,7 @@
 		return retval;
 
 	/* wait till tune operation has completed */
-	INIT_COMPLETION(radio->completion);
+	reinit_completion(&radio->completion);
 	retval = wait_for_completion_timeout(&radio->completion,
 			msecs_to_jiffies(seek_timeout));
 	if (!retval)
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 19632b1..b53626b 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -207,7 +207,7 @@
 {
 	int rc;
 
-	INIT_COMPLETION(ir->completion);
+	reinit_completion(&ir->completion);
 
 	ir->urb_out->transfer_buffer_length = size;
 	rc = usb_submit_urb(ir->urb_out, GFP_KERNEL);
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index 4d13a7f..492a05a 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -5,7 +5,7 @@
  * TODO: This table is a real mess, as it merges RC codes from several
  * devices into a big table. It also has both RC-5 and NEC codes inside.
  * It should be broken into small tables, and the protocols should properly
- * be indentificated.
+ * be identificated.
  *
  * The table were imported from dib0700_devices.c.
  *
diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
index ba81d96..454ea59 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
@@ -5,7 +5,7 @@
  * TODO: This table is a real mess, as it merges RC codes from several
  * devices into a big table. It also has both RC-5 and NEC codes inside.
  * It should be broken into small tables, and the protocols should properly
- * be indentificated.
+ * be identificated.
  *
  * The table were imported from dib0700_devices.c.
  *
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index bbf4aea..a0547db 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -253,7 +253,7 @@
 {
 	if (host->card) {
 		host->retries = cmd_retries;
-		INIT_COMPLETION(host->card->mrq_complete);
+		reinit_completion(&host->card->mrq_complete);
 		host->request(host);
 	}
 }
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 08e7023..24f2f84 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -401,10 +401,10 @@
 			sizeof(struct ms_status_register)))
 			return 0;
 
-		msb->state = MSB_RP_RECEIVE_OOB_READ;
+		msb->state = MSB_RP_RECEIVE_STATUS_REG;
 		return 0;
 
-	case MSB_RP_RECIVE_STATUS_REG:
+	case MSB_RP_RECEIVE_STATUS_REG:
 		msb->regs.status = *(struct ms_status_register *)mrq->data;
 		msb->state = MSB_RP_SEND_OOB_READ;
 		/* fallthrough */
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
index 96e6375..c75198d 100644
--- a/drivers/memstick/core/ms_block.h
+++ b/drivers/memstick/core/ms_block.h
@@ -223,7 +223,7 @@
 	MSB_RP_RECEIVE_INT_REQ_RESULT,
 
 	MSB_RP_SEND_READ_STATUS_REG,
-	MSB_RP_RECIVE_STATUS_REG,
+	MSB_RP_RECEIVE_STATUS_REG,
 
 	MSB_RP_SEND_OOB_READ,
 	MSB_RP_RECEIVE_OOB_READ,
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index f4176ca..fc145d2 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1023,8 +1023,8 @@
 	} else
 		attr_count = attr->count;
 
-	msb->attr_group.attrs = kzalloc((attr_count + 1)
-					* sizeof(struct attribute),
+	msb->attr_group.attrs = kcalloc(attr_count + 1,
+					sizeof(*msb->attr_group.attrs),
 					GFP_KERNEL);
 	if (!msb->attr_group.attrs) {
 		rc = -ENOMEM;
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 1b6e913..31727bf 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -290,7 +290,7 @@
 	dbg_verbose("doing dma transfer");
 
 	dev->dma_error = 0;
-	INIT_COMPLETION(dev->dma_done);
+	reinit_completion(&dev->dma_done);
 
 	/* TODO: hidden assumption about nenth beeing always 1 */
 	sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index b6b92d7..1b18a0d 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -105,7 +105,8 @@
 			osm_err("too many drivers registered, increase "
 				"max_drivers\n");
 			spin_unlock_irqrestore(&i2o_drivers_lock, flags);
-			return -EFAULT;
+			rc = -EFAULT;
+			goto out;
 		}
 
 	drv->context = i;
@@ -124,11 +125,14 @@
 	}
 
 	rc = driver_register(&drv->driver);
-	if (rc) {
-		if (drv->event) {
-			destroy_workqueue(drv->event_queue);
-			drv->event_queue = NULL;
-		}
+	if (rc)
+		goto out;
+
+	return 0;
+out:
+	if (drv->event_queue) {
+		destroy_workqueue(drv->event_queue);
+		drv->event_queue = NULL;
 	}
 
 	return rc;
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 7ebe9ef..c9b1f64 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1247,7 +1247,7 @@
 		.name	= "88PM860x",
 		.owner	= THIS_MODULE,
 		.pm     = &pm860x_pm_ops,
-		.of_match_table	= of_match_ptr(pm860x_dt_ids),
+		.of_match_table	= pm860x_dt_ids,
 	},
 	.probe		= pm860x_probe,
 	.remove		= pm860x_remove,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 914c3d1..62a60ca 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -27,6 +27,18 @@
 	help
 	  Support for the AS3711 PMIC from AMS
 
+config MFD_AS3722
+	bool "ams AS3722 Power Management IC"
+	select MFD_CORE
+	select REGMAP_I2C
+	select REGMAP_IRQ
+	depends on I2C && OF
+	help
+	  The ams AS3722 is a compact system PMU suitable for mobile phones,
+	  tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down
+	  controllers, 11 LDOs, RTC, automatic battery, temperature and
+	  over current monitoring, GPIOs, ADC and a watchdog.
+
 config PMIC_ADP5520
 	bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
 	depends on I2C=y
@@ -664,14 +676,14 @@
 depends on MFD_STMPE
 
 config STMPE_I2C
-	bool "STMicroelectronics STMPE I2C Inteface"
+	bool "STMicroelectronics STMPE I2C Interface"
 	depends on I2C=y
 	default y
 	help
 	  This is used to enable I2C interface of STMPE
 
 config STMPE_SPI
-	bool "STMicroelectronics STMPE SPI Inteface"
+	bool "STMicroelectronics STMPE SPI Interface"
 	depends on SPI_MASTER
 	help
 	  This is used to enable SPI interface of STMPE
@@ -1151,6 +1163,16 @@
 	  core support for the WM8994, in order to use the actual
 	  functionaltiy of the device other drivers must be enabled.
 
+config MFD_STW481X
+	bool "Support for ST Microelectronics STw481x"
+	depends on I2C && ARCH_NOMADIK
+	select REGMAP_I2C
+	select MFD_CORE
+	help
+	  Select this option to enable the STw481x chip driver used
+	  in various ST Microelectronics and ST-Ericsson embedded
+	  Nomadik series.
+
 endmenu
 endif
 
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 15b905c..8a28dc9 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -162,3 +162,5 @@
 obj-$(CONFIG_VEXPRESS_CONFIG)	+= vexpress-config.o vexpress-sysreg.o
 obj-$(CONFIG_MFD_RETU)		+= retu-mfd.o
 obj-$(CONFIG_MFD_AS3711)	+= as3711.o
+obj-$(CONFIG_MFD_AS3722)	+= as3722.o
+obj-$(CONFIG_MFD_STW481X)	+= stw481x.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 6f68472..14d9542 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -293,7 +293,7 @@
 	unsigned long addr, val;
 	int ret;
 
-	buf_size = min(count, (sizeof(buf)-1));
+	buf_size = min(count, (size_t)(sizeof(buf)-1));
 	if (copy_from_user(buf, user_buf, buf_size)) {
 		dev_err(aat2870->dev, "Failed to copy from user\n");
 		return -EFAULT;
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 022b186..75e180c 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -540,7 +540,7 @@
 		for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
 			if (arizona->pdata.gpio_defaults[i] > 0xffff)
 				arizona->pdata.gpio_defaults[i] = 0;
-			if (arizona->pdata.gpio_defaults[i] == 0)
+			else if (arizona->pdata.gpio_defaults[i] == 0)
 				arizona->pdata.gpio_defaults[i] = 0x10000;
 		}
 	} else {
@@ -633,11 +633,11 @@
 	dev_set_drvdata(arizona->dev, arizona);
 	mutex_init(&arizona->clk_lock);
 
-	arizona_of_get_core_pdata(arizona);
-
 	if (dev_get_platdata(arizona->dev))
 		memcpy(&arizona->pdata, dev_get_platdata(arizona->dev),
 		       sizeof(arizona->pdata));
+	else
+		arizona_of_get_core_pdata(arizona);
 
 	regcache_cache_only(arizona->regmap, true);
 
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 51dbabf..beccb79 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -17,6 +17,7 @@
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 
 #include <linux/mfd/arizona/core.h>
 
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 47be7b3..1ca554b 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -17,6 +17,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
+#include <linux/of.h>
 
 #include <linux/mfd/arizona/core.h>
 
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index abd3ab7..ec684fc 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -17,6 +17,7 @@
 #include <linux/mfd/as3711.h>
 #include <linux/mfd/core.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
 
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
new file mode 100644
index 0000000..f161f2e
--- /dev/null
+++ b/drivers/mfd/as3722.c
@@ -0,0 +1,449 @@
+/*
+ * Core driver for ams AS3722 PMICs
+ *
+ * Copyright (C) 2013 AMS AG
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/as3722.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AS3722_DEVICE_ID	0x0C
+
+static const struct resource as3722_rtc_resource[] = {
+	{
+		.name = "as3722-rtc-alarm",
+		.start = AS3722_IRQ_RTC_ALARM,
+		.end = AS3722_IRQ_RTC_ALARM,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static const struct resource as3722_adc_resource[] = {
+	{
+		.name = "as3722-adc",
+		.start = AS3722_IRQ_ADC,
+		.end = AS3722_IRQ_ADC,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct mfd_cell as3722_devs[] = {
+	{
+		.name = "as3722-pinctrl",
+	},
+	{
+		.name = "as3722-regulator",
+	},
+	{
+		.name = "as3722-rtc",
+		.num_resources = ARRAY_SIZE(as3722_rtc_resource),
+		.resources = as3722_rtc_resource,
+	},
+	{
+		.name = "as3722-adc",
+		.num_resources = ARRAY_SIZE(as3722_adc_resource),
+		.resources = as3722_adc_resource,
+	},
+	{
+		.name = "as3722-power-off",
+	},
+};
+
+static const struct regmap_irq as3722_irqs[] = {
+	/* INT1 IRQs */
+	[AS3722_IRQ_LID] = {
+		.mask = AS3722_INTERRUPT_MASK1_LID,
+	},
+	[AS3722_IRQ_ACOK] = {
+		.mask = AS3722_INTERRUPT_MASK1_ACOK,
+	},
+	[AS3722_IRQ_ENABLE1] = {
+		.mask = AS3722_INTERRUPT_MASK1_ENABLE1,
+	},
+	[AS3722_IRQ_OCCUR_ALARM_SD0] = {
+		.mask = AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0,
+	},
+	[AS3722_IRQ_ONKEY_LONG_PRESS] = {
+		.mask = AS3722_INTERRUPT_MASK1_ONKEY_LONG,
+	},
+	[AS3722_IRQ_ONKEY] = {
+		.mask = AS3722_INTERRUPT_MASK1_ONKEY,
+	},
+	[AS3722_IRQ_OVTMP] = {
+		.mask = AS3722_INTERRUPT_MASK1_OVTMP,
+	},
+	[AS3722_IRQ_LOWBAT] = {
+		.mask = AS3722_INTERRUPT_MASK1_LOWBAT,
+	},
+
+	/* INT2 IRQs */
+	[AS3722_IRQ_SD0_LV] = {
+		.mask = AS3722_INTERRUPT_MASK2_SD0_LV,
+		.reg_offset = 1,
+	},
+	[AS3722_IRQ_SD1_LV] = {
+		.mask = AS3722_INTERRUPT_MASK2_SD1_LV,
+		.reg_offset = 1,
+	},
+	[AS3722_IRQ_SD2_LV] = {
+		.mask = AS3722_INTERRUPT_MASK2_SD2345_LV,
+		.reg_offset = 1,
+	},
+	[AS3722_IRQ_PWM1_OV_PROT] = {
+		.mask = AS3722_INTERRUPT_MASK2_PWM1_OV_PROT,
+		.reg_offset = 1,
+	},
+	[AS3722_IRQ_PWM2_OV_PROT] = {
+		.mask = AS3722_INTERRUPT_MASK2_PWM2_OV_PROT,
+		.reg_offset = 1,
+	},
+	[AS3722_IRQ_ENABLE2] = {
+		.mask = AS3722_INTERRUPT_MASK2_ENABLE2,
+		.reg_offset = 1,
+	},
+	[AS3722_IRQ_SD6_LV] = {
+		.mask = AS3722_INTERRUPT_MASK2_SD6_LV,
+		.reg_offset = 1,
+	},
+	[AS3722_IRQ_RTC_REP] = {
+		.mask = AS3722_INTERRUPT_MASK2_RTC_REP,
+		.reg_offset = 1,
+	},
+
+	/* INT3 IRQs */
+	[AS3722_IRQ_RTC_ALARM] = {
+		.mask = AS3722_INTERRUPT_MASK3_RTC_ALARM,
+		.reg_offset = 2,
+	},
+	[AS3722_IRQ_GPIO1] = {
+		.mask = AS3722_INTERRUPT_MASK3_GPIO1,
+		.reg_offset = 2,
+	},
+	[AS3722_IRQ_GPIO2] = {
+		.mask = AS3722_INTERRUPT_MASK3_GPIO2,
+		.reg_offset = 2,
+	},
+	[AS3722_IRQ_GPIO3] = {
+		.mask = AS3722_INTERRUPT_MASK3_GPIO3,
+		.reg_offset = 2,
+	},
+	[AS3722_IRQ_GPIO4] = {
+		.mask = AS3722_INTERRUPT_MASK3_GPIO4,
+		.reg_offset = 2,
+	},
+	[AS3722_IRQ_GPIO5] = {
+		.mask = AS3722_INTERRUPT_MASK3_GPIO5,
+		.reg_offset = 2,
+	},
+	[AS3722_IRQ_WATCHDOG] = {
+		.mask = AS3722_INTERRUPT_MASK3_WATCHDOG,
+		.reg_offset = 2,
+	},
+	[AS3722_IRQ_ENABLE3] = {
+		.mask = AS3722_INTERRUPT_MASK3_ENABLE3,
+		.reg_offset = 2,
+	},
+
+	/* INT4 IRQs */
+	[AS3722_IRQ_TEMP_SD0_SHUTDOWN] = {
+		.mask = AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN,
+		.reg_offset = 3,
+	},
+	[AS3722_IRQ_TEMP_SD1_SHUTDOWN] = {
+		.mask = AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN,
+		.reg_offset = 3,
+	},
+	[AS3722_IRQ_TEMP_SD2_SHUTDOWN] = {
+		.mask = AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN,
+		.reg_offset = 3,
+	},
+	[AS3722_IRQ_TEMP_SD0_ALARM] = {
+		.mask = AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM,
+		.reg_offset = 3,
+	},
+	[AS3722_IRQ_TEMP_SD1_ALARM] = {
+		.mask = AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM,
+		.reg_offset = 3,
+	},
+	[AS3722_IRQ_TEMP_SD6_ALARM] = {
+		.mask = AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM,
+		.reg_offset = 3,
+	},
+	[AS3722_IRQ_OCCUR_ALARM_SD6] = {
+		.mask = AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6,
+		.reg_offset = 3,
+	},
+	[AS3722_IRQ_ADC] = {
+		.mask = AS3722_INTERRUPT_MASK4_ADC,
+		.reg_offset = 3,
+	},
+};
+
+static const struct regmap_irq_chip as3722_irq_chip = {
+	.name = "as3722",
+	.irqs = as3722_irqs,
+	.num_irqs = ARRAY_SIZE(as3722_irqs),
+	.num_regs = 4,
+	.status_base = AS3722_INTERRUPT_STATUS1_REG,
+	.mask_base = AS3722_INTERRUPT_MASK1_REG,
+};
+
+static int as3722_check_device_id(struct as3722 *as3722)
+{
+	u32 val;
+	int ret;
+
+	/* Check that this is actually a AS3722 */
+	ret = as3722_read(as3722, AS3722_ASIC_ID1_REG, &val);
+	if (ret < 0) {
+		dev_err(as3722->dev, "ASIC_ID1 read failed: %d\n", ret);
+		return ret;
+	}
+
+	if (val != AS3722_DEVICE_ID) {
+		dev_err(as3722->dev, "Device is not AS3722, ID is 0x%x\n", val);
+		return -ENODEV;
+	}
+
+	ret = as3722_read(as3722, AS3722_ASIC_ID2_REG, &val);
+	if (ret < 0) {
+		dev_err(as3722->dev, "ASIC_ID2 read failed: %d\n", ret);
+		return ret;
+	}
+
+	dev_info(as3722->dev, "AS3722 with revision 0x%x found\n", val);
+	return 0;
+}
+
+static int as3722_configure_pullups(struct as3722 *as3722)
+{
+	int ret;
+	u32 val = 0;
+
+	if (as3722->en_intern_int_pullup)
+		val |= AS3722_INT_PULL_UP;
+	if (as3722->en_intern_i2c_pullup)
+		val |= AS3722_I2C_PULL_UP;
+
+	ret = as3722_update_bits(as3722, AS3722_IOVOLTAGE_REG,
+			AS3722_INT_PULL_UP | AS3722_I2C_PULL_UP, val);
+	if (ret < 0)
+		dev_err(as3722->dev, "IOVOLTAGE_REG update failed: %d\n", ret);
+	return ret;
+}
+
+static const struct regmap_range as3722_readable_ranges[] = {
+	regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_SD6_VOLTAGE_REG),
+	regmap_reg_range(AS3722_GPIO0_CONTROL_REG, AS3722_LDO7_VOLTAGE_REG),
+	regmap_reg_range(AS3722_LDO9_VOLTAGE_REG, AS3722_REG_SEQU_MOD3_REG),
+	regmap_reg_range(AS3722_SD_PHSW_CTRL_REG, AS3722_PWM_CONTROL_H_REG),
+	regmap_reg_range(AS3722_WATCHDOG_TIMER_REG, AS3722_WATCHDOG_TIMER_REG),
+	regmap_reg_range(AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG,
+					AS3722_BATTERY_VOLTAGE_MONITOR2_REG),
+	regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_PWM_VCONTROL4_REG),
+	regmap_reg_range(AS3722_BB_CHARGER_REG, AS3722_SRAM_REG),
+	regmap_reg_range(AS3722_RTC_ACCESS_REG, AS3722_RTC_ACCESS_REG),
+	regmap_reg_range(AS3722_RTC_STATUS_REG, AS3722_TEMP_STATUS_REG),
+	regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC_CONFIGURATION_REG),
+	regmap_reg_range(AS3722_ASIC_ID1_REG, AS3722_ASIC_ID2_REG),
+	regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG),
+};
+
+static const struct regmap_access_table as3722_readable_table = {
+	.yes_ranges = as3722_readable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(as3722_readable_ranges),
+};
+
+static const struct regmap_range as3722_writable_ranges[] = {
+	regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_SD6_VOLTAGE_REG),
+	regmap_reg_range(AS3722_GPIO0_CONTROL_REG, AS3722_LDO7_VOLTAGE_REG),
+	regmap_reg_range(AS3722_LDO9_VOLTAGE_REG, AS3722_GPIO_SIGNAL_OUT_REG),
+	regmap_reg_range(AS3722_REG_SEQU_MOD1_REG, AS3722_REG_SEQU_MOD3_REG),
+	regmap_reg_range(AS3722_SD_PHSW_CTRL_REG, AS3722_PWM_CONTROL_H_REG),
+	regmap_reg_range(AS3722_WATCHDOG_TIMER_REG, AS3722_WATCHDOG_TIMER_REG),
+	regmap_reg_range(AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG,
+					AS3722_BATTERY_VOLTAGE_MONITOR2_REG),
+	regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_PWM_VCONTROL4_REG),
+	regmap_reg_range(AS3722_BB_CHARGER_REG, AS3722_SRAM_REG),
+	regmap_reg_range(AS3722_INTERRUPT_MASK1_REG, AS3722_TEMP_STATUS_REG),
+	regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC1_CONTROL_REG),
+	regmap_reg_range(AS3722_ADC1_THRESHOLD_HI_MSB_REG,
+					AS3722_ADC_CONFIGURATION_REG),
+	regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG),
+};
+
+static const struct regmap_access_table as3722_writable_table = {
+	.yes_ranges = as3722_writable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(as3722_writable_ranges),
+};
+
+static const struct regmap_range as3722_cacheable_ranges[] = {
+	regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_LDO11_VOLTAGE_REG),
+	regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_LDOCONTROL1_REG),
+};
+
+static const struct regmap_access_table as3722_volatile_table = {
+	.no_ranges = as3722_cacheable_ranges,
+	.n_no_ranges = ARRAY_SIZE(as3722_cacheable_ranges),
+};
+
+static const struct regmap_config as3722_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = AS3722_MAX_REGISTER,
+	.cache_type = REGCACHE_RBTREE,
+	.rd_table = &as3722_readable_table,
+	.wr_table = &as3722_writable_table,
+	.volatile_table = &as3722_volatile_table,
+};
+
+static int as3722_i2c_of_probe(struct i2c_client *i2c,
+			struct as3722 *as3722)
+{
+	struct device_node *np = i2c->dev.of_node;
+	struct irq_data *irq_data;
+
+	if (!np) {
+		dev_err(&i2c->dev, "Device Tree not found\n");
+		return -EINVAL;
+	}
+
+	irq_data = irq_get_irq_data(i2c->irq);
+	if (!irq_data) {
+		dev_err(&i2c->dev, "Invalid IRQ: %d\n", i2c->irq);
+		return -EINVAL;
+	}
+
+	as3722->en_intern_int_pullup = of_property_read_bool(np,
+					"ams,enable-internal-int-pullup");
+	as3722->en_intern_i2c_pullup = of_property_read_bool(np,
+					"ams,enable-internal-i2c-pullup");
+	as3722->irq_flags = irqd_get_trigger_type(irq_data);
+	dev_dbg(&i2c->dev, "IRQ flags are 0x%08lx\n", as3722->irq_flags);
+	return 0;
+}
+
+static int as3722_i2c_probe(struct i2c_client *i2c,
+			const struct i2c_device_id *id)
+{
+	struct as3722 *as3722;
+	unsigned long irq_flags;
+	int ret;
+
+	as3722 = devm_kzalloc(&i2c->dev, sizeof(struct as3722), GFP_KERNEL);
+	if (!as3722)
+		return -ENOMEM;
+
+	as3722->dev = &i2c->dev;
+	as3722->chip_irq = i2c->irq;
+	i2c_set_clientdata(i2c, as3722);
+
+	ret = as3722_i2c_of_probe(i2c, as3722);
+	if (ret < 0)
+		return ret;
+
+	as3722->regmap = devm_regmap_init_i2c(i2c, &as3722_regmap_config);
+	if (IS_ERR(as3722->regmap)) {
+		ret = PTR_ERR(as3722->regmap);
+		dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = as3722_check_device_id(as3722);
+	if (ret < 0)
+		return ret;
+
+	irq_flags = as3722->irq_flags | IRQF_ONESHOT;
+	ret = regmap_add_irq_chip(as3722->regmap, as3722->chip_irq,
+			irq_flags, -1, &as3722_irq_chip,
+			&as3722->irq_data);
+	if (ret < 0) {
+		dev_err(as3722->dev, "Failed to add regmap irq: %d\n", ret);
+		return ret;
+	}
+
+	ret = as3722_configure_pullups(as3722);
+	if (ret < 0)
+		goto scrub;
+
+	ret = mfd_add_devices(&i2c->dev, -1, as3722_devs,
+			ARRAY_SIZE(as3722_devs), NULL, 0,
+			regmap_irq_get_domain(as3722->irq_data));
+	if (ret) {
+		dev_err(as3722->dev, "Failed to add MFD devices: %d\n", ret);
+		goto scrub;
+	}
+
+	dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n");
+	return 0;
+
+scrub:
+	regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data);
+	return ret;
+}
+
+static int as3722_i2c_remove(struct i2c_client *i2c)
+{
+	struct as3722 *as3722 = i2c_get_clientdata(i2c);
+
+	mfd_remove_devices(as3722->dev);
+	regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data);
+	return 0;
+}
+
+static const struct of_device_id as3722_of_match[] = {
+	{ .compatible = "ams,as3722", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, as3722_of_match);
+
+static const struct i2c_device_id as3722_i2c_id[] = {
+	{ "as3722", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, as3722_i2c_id);
+
+static struct i2c_driver as3722_i2c_driver = {
+	.driver = {
+		.name = "as3722",
+		.owner = THIS_MODULE,
+		.of_match_table = as3722_of_match,
+	},
+	.probe = as3722_i2c_probe,
+	.remove = as3722_i2c_remove,
+	.id_table = as3722_i2c_id,
+};
+
+module_i2c_driver(as3722_i2c_driver);
+
+MODULE_DESCRIPTION("I2C support for AS3722 PMICs");
+MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 6a9fec4..c319c4e 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -86,7 +86,11 @@
 	return 0;
 }
 
-static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
+/*
+ * According to errata item 24, multiwrite mode should be avoided
+ * in order to prevent register data corruption after power-down.
+ */
+static int da9052_i2c_disable_multiwrite(struct da9052 *da9052)
 {
 	int reg_val, ret;
 
@@ -94,8 +98,8 @@
 	if (ret < 0)
 		return ret;
 
-	if (reg_val & DA9052_CONTROL_B_WRITEMODE) {
-		reg_val &= ~DA9052_CONTROL_B_WRITEMODE;
+	if (!(reg_val & DA9052_CONTROL_B_WRITEMODE)) {
+		reg_val |= DA9052_CONTROL_B_WRITEMODE;
 		ret = regmap_write(da9052->regmap, DA9052_CONTROL_B_REG,
 				   reg_val);
 		if (ret < 0)
@@ -154,7 +158,7 @@
 		return ret;
 	}
 
-	ret = da9052_i2c_enable_multiwrite(da9052);
+	ret = da9052_i2c_disable_multiwrite(da9052);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 7245b0c..2ed774e 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -394,16 +394,12 @@
 static int ezx_pcap_remove(struct spi_device *spi)
 {
 	struct pcap_chip *pcap = spi_get_drvdata(spi);
-	struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
-	int i, adc_irq;
+	int i;
 
 	/* remove all registered subdevs */
 	device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
 
 	/* cleanup ADC */
-	adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
-				PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
-	devm_free_irq(&spi->dev, adc_irq, pcap);
 	mutex_lock(&pcap->adc_mutex);
 	for (i = 0; i < PCAP_ADC_MAXQ; i++)
 		kfree(pcap->adc_queue[i]);
@@ -509,8 +505,6 @@
 
 remove_subdevs:
 	device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
-/* free_adc: */
-	devm_free_irq(&spi->dev, adc_irq, pcap);
 free_irqchip:
 	for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
 		irq_set_chip_and_handler(i, NULL, NULL);
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 9483bc8..da1c656 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -53,6 +53,7 @@
  *	document number TBD : Wellsburg
  *	document number TBD : Avoton SoC
  *	document number TBD : Coleto Creek
+ *	document number TBD : Wildcat Point-LP
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -211,6 +212,7 @@
 	LPC_WBG,	/* Wellsburg */
 	LPC_AVN,	/* Avoton SoC */
 	LPC_COLETO,	/* Coleto Creek */
+	LPC_WPT_LP,	/* Wildcat Point-LP */
 };
 
 static struct lpc_ich_info lpc_chipset_info[] = {
@@ -503,6 +505,10 @@
 		.name = "Coleto Creek",
 		.iTCO_version = 2,
 	},
+	[LPC_WPT_LP] = {
+		.name = "Lynx Point_LP",
+		.iTCO_version = 2,
+	},
 };
 
 /*
@@ -721,6 +727,13 @@
 	{ PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
 	{ PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
 	{ PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
+	{ PCI_VDEVICE(INTEL, 0x9cc1), LPC_WPT_LP},
+	{ PCI_VDEVICE(INTEL, 0x9cc2), LPC_WPT_LP},
+	{ PCI_VDEVICE(INTEL, 0x9cc3), LPC_WPT_LP},
+	{ PCI_VDEVICE(INTEL, 0x9cc5), LPC_WPT_LP},
+	{ PCI_VDEVICE(INTEL, 0x9cc6), LPC_WPT_LP},
+	{ PCI_VDEVICE(INTEL, 0x9cc7), LPC_WPT_LP},
+	{ PCI_VDEVICE(INTEL, 0x9cc9), LPC_WPT_LP},
 	{ 0, },			/* End of list */
 };
 MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
@@ -969,7 +982,6 @@
 	if (!cell_added) {
 		dev_warn(&dev->dev, "No MFD cells added\n");
 		lpc_ich_restore_config_space(dev);
-		pci_set_drvdata(dev, NULL);
 		return -ENODEV;
 	}
 
@@ -980,7 +992,6 @@
 {
 	mfd_remove_devices(&dev->dev);
 	lpc_ich_restore_config_space(dev);
-	pci_set_drvdata(dev, NULL);
 }
 
 static struct pci_driver lpc_ich_driver = {
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 8cc6aac..fbfbf0b 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -59,18 +59,21 @@
 	.name = "isch_smbus",
 	.num_resources = 1,
 	.resources = &smbus_sch_resource,
+	.ignore_resource_conflicts = true,
 };
 
 static struct mfd_cell sch_gpio_cell = {
 	.name = "sch_gpio",
 	.num_resources = 1,
 	.resources = &gpio_sch_resource,
+	.ignore_resource_conflicts = true,
 };
 
 static struct mfd_cell wdt_sch_cell = {
 	.name = "ie6xx_wdt",
 	.num_resources = 1,
 	.resources = &wdt_sch_resource,
+	.ignore_resource_conflicts = true,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 522be67..34520cb 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -31,6 +31,7 @@
 #include <linux/mfd/max77686.h>
 #include <linux/mfd/max77686-private.h>
 #include <linux/err.h>
+#include <linux/of.h>
 
 #define I2C_ADDR_RTC	(0x0C >> 1)
 
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
index 1029d01..66b58fe 100644
--- a/drivers/mfd/max77693-irq.c
+++ b/drivers/mfd/max77693-irq.c
@@ -128,7 +128,8 @@
 static const inline struct max77693_irq_data *
 irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
 {
-	return &max77693_irqs[irq];
+	struct irq_data *data = irq_get_irq_data(irq);
+	return &max77693_irqs[data->hwirq];
 }
 
 static void max77693_irq_mask(struct irq_data *data)
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index c04723e..9f92463 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -28,6 +28,7 @@
 #include <linux/i2c.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
+#include <linux/of.h>
 #include <linux/pm_runtime.h>
 #include <linux/mutex.h>
 #include <linux/mfd/core.h>
@@ -110,15 +111,9 @@
 			      const struct i2c_device_id *id)
 {
 	struct max77693_dev *max77693;
-	struct max77693_platform_data *pdata = dev_get_platdata(&i2c->dev);
 	u8 reg_data;
 	int ret = 0;
 
-	if (!pdata) {
-		dev_err(&i2c->dev, "No platform data found.\n");
-		return -EINVAL;
-	}
-
 	max77693 = devm_kzalloc(&i2c->dev,
 			sizeof(struct max77693_dev), GFP_KERNEL);
 	if (max77693 == NULL)
@@ -138,8 +133,6 @@
 		return ret;
 	}
 
-	max77693->wakeup = pdata->wakeup;
-
 	ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
 				&reg_data);
 	if (ret < 0) {
@@ -179,8 +172,6 @@
 	if (ret < 0)
 		goto err_mfd;
 
-	device_init_wakeup(max77693->dev, pdata->wakeup);
-
 	return ret;
 
 err_mfd:
@@ -235,11 +226,19 @@
 	.resume = max77693_resume,
 };
 
+#ifdef CONFIG_OF
+static struct of_device_id max77693_dt_match[] = {
+	{ .compatible = "maxim,max77693" },
+	{},
+};
+#endif
+
 static struct i2c_driver max77693_i2c_driver = {
 	.driver = {
 		   .name = "max77693",
 		   .owner = THIS_MODULE,
 		   .pm = &max77693_pm,
+		   .of_match_table = of_match_ptr(max77693_dt_match),
 	},
 	.probe = max77693_i2c_probe,
 	.remove = max77693_i2c_remove,
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index e9b1c93..3bbfedc 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -17,6 +17,7 @@
 #include <linux/mfd/core.h>
 #include <linux/mfd/max8907.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index de7fb80..176aa26 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -238,7 +238,7 @@
 		.name	= "max8925",
 		.owner	= THIS_MODULE,
 		.pm     = &max8925_pm_ops,
-		.of_match_table = of_match_ptr(max8925_dt_ids),
+		.of_match_table = max8925_dt_ids,
 	},
 	.probe		= max8925_probe,
 	.remove		= max8925_remove,
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index cee098c..791aea3 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/interrupt.h>
 #include <linux/pm_runtime.h>
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index f745e27..898bd33 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -78,7 +78,6 @@
 		ret = PTR_ERR(mc13xxx->regmap);
 		dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
 				ret);
-		dev_set_drvdata(&client->dev, NULL);
 		return ret;
 	}
 
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index adc8ea3..2676492 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -64,7 +64,8 @@
 EXPORT_SYMBOL(mfd_cell_disable);
 
 static int mfd_platform_add_cell(struct platform_device *pdev,
-				 const struct mfd_cell *cell)
+				 const struct mfd_cell *cell,
+				 atomic_t *usage_count)
 {
 	if (!cell)
 		return 0;
@@ -73,11 +74,12 @@
 	if (!pdev->mfd_cell)
 		return -ENOMEM;
 
+	pdev->mfd_cell->usage_count = usage_count;
 	return 0;
 }
 
 static int mfd_add_device(struct device *parent, int id,
-			  const struct mfd_cell *cell,
+			  const struct mfd_cell *cell, atomic_t *usage_count,
 			  struct resource *mem_base,
 			  int irq_base, struct irq_domain *domain)
 {
@@ -123,7 +125,7 @@
 			goto fail_alias;
 	}
 
-	ret = mfd_platform_add_cell(pdev, cell);
+	ret = mfd_platform_add_cell(pdev, cell, usage_count);
 	if (ret)
 		goto fail_alias;
 
@@ -192,12 +194,12 @@
 }
 
 int mfd_add_devices(struct device *parent, int id,
-		    struct mfd_cell *cells, int n_devs,
+		    const struct mfd_cell *cells, int n_devs,
 		    struct resource *mem_base,
 		    int irq_base, struct irq_domain *domain)
 {
 	int i;
-	int ret = 0;
+	int ret;
 	atomic_t *cnts;
 
 	/* initialize reference counting for all cells */
@@ -207,16 +209,19 @@
 
 	for (i = 0; i < n_devs; i++) {
 		atomic_set(&cnts[i], 0);
-		cells[i].usage_count = &cnts[i];
-		ret = mfd_add_device(parent, id, cells + i, mem_base,
+		ret = mfd_add_device(parent, id, cells + i, cnts + i, mem_base,
 				     irq_base, domain);
 		if (ret)
-			break;
+			goto fail;
 	}
 
-	if (ret)
-		mfd_remove_devices(parent);
+	return 0;
 
+fail:
+	if (i)
+		mfd_remove_devices(parent);
+	else
+		kfree(cnts);
 	return ret;
 }
 EXPORT_SYMBOL(mfd_add_devices);
@@ -271,8 +276,8 @@
 	for (i = 0; i < n_clones; i++) {
 		cell_entry.name = clones[i];
 		/* don't give up if a single call fails; just report error */
-		if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0,
-				   NULL))
+		if (mfd_add_device(pdev->dev.parent, -1, &cell_entry,
+				   cell_entry.usage_count, NULL, 0, NULL))
 			dev_err(dev, "failed to create platform device '%s'\n",
 					clones[i]);
 	}
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 29ee54d..142650f 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -328,13 +328,13 @@
 	omap_tll_enable(pdata);
 
 	if (!IS_ERR(omap->ehci_logic_fck))
-		clk_enable(omap->ehci_logic_fck);
+		clk_prepare_enable(omap->ehci_logic_fck);
 
 	for (i = 0; i < omap->nports; i++) {
 		switch (pdata->port_mode[i]) {
 		case OMAP_EHCI_PORT_MODE_HSIC:
 			if (!IS_ERR(omap->hsic60m_clk[i])) {
-				r = clk_enable(omap->hsic60m_clk[i]);
+				r = clk_prepare_enable(omap->hsic60m_clk[i]);
 				if (r) {
 					dev_err(dev,
 					 "Can't enable port %d hsic60m clk:%d\n",
@@ -343,7 +343,7 @@
 			}
 
 			if (!IS_ERR(omap->hsic480m_clk[i])) {
-				r = clk_enable(omap->hsic480m_clk[i]);
+				r = clk_prepare_enable(omap->hsic480m_clk[i]);
 				if (r) {
 					dev_err(dev,
 					 "Can't enable port %d hsic480m clk:%d\n",
@@ -354,7 +354,7 @@
 
 		case OMAP_EHCI_PORT_MODE_TLL:
 			if (!IS_ERR(omap->utmi_clk[i])) {
-				r = clk_enable(omap->utmi_clk[i]);
+				r = clk_prepare_enable(omap->utmi_clk[i]);
 				if (r) {
 					dev_err(dev,
 					 "Can't enable port %d clk : %d\n",
@@ -382,15 +382,15 @@
 		switch (pdata->port_mode[i]) {
 		case OMAP_EHCI_PORT_MODE_HSIC:
 			if (!IS_ERR(omap->hsic60m_clk[i]))
-				clk_disable(omap->hsic60m_clk[i]);
+				clk_disable_unprepare(omap->hsic60m_clk[i]);
 
 			if (!IS_ERR(omap->hsic480m_clk[i]))
-				clk_disable(omap->hsic480m_clk[i]);
+				clk_disable_unprepare(omap->hsic480m_clk[i]);
 		/* Fall through as utmi_clks were used in HSIC mode */
 
 		case OMAP_EHCI_PORT_MODE_TLL:
 			if (!IS_ERR(omap->utmi_clk[i]))
-				clk_disable(omap->utmi_clk[i]);
+				clk_disable_unprepare(omap->utmi_clk[i]);
 			break;
 		default:
 			break;
@@ -398,7 +398,7 @@
 	}
 
 	if (!IS_ERR(omap->ehci_logic_fck))
-		clk_disable(omap->ehci_logic_fck);
+		clk_disable_unprepare(omap->ehci_logic_fck);
 
 	omap_tll_disable(pdata);
 
@@ -893,7 +893,7 @@
 		.name		= (char *)usbhs_driver_name,
 		.owner		= THIS_MODULE,
 		.pm		= &usbhsomap_dev_pm_ops,
-		.of_match_table = of_match_ptr(usbhs_omap_dt_ids),
+		.of_match_table = usbhs_omap_dt_ids,
 	},
 	.remove		= usbhs_omap_remove,
 };
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index e59ac4c..0d946ae1 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -320,7 +320,7 @@
 	.driver = {
 		.name		= (char *)usbtll_driver_name,
 		.owner		= THIS_MODULE,
-		.of_match_table = of_match_ptr(usbtll_omap_dt_ids),
+		.of_match_table = usbtll_omap_dt_ids,
 	},
 	.probe		= usbtll_omap_probe,
 	.remove		= usbtll_omap_remove,
@@ -429,7 +429,7 @@
 			if (IS_ERR(tll->ch_clk[i]))
 				continue;
 
-			r = clk_enable(tll->ch_clk[i]);
+			r = clk_prepare_enable(tll->ch_clk[i]);
 			if (r) {
 				dev_err(tll_dev,
 				 "Error enabling ch %d clock: %d\n", i, r);
@@ -460,7 +460,7 @@
 	for (i = 0; i < tll->nch; i++) {
 		if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
 			if (!IS_ERR(tll->ch_clk[i]))
-				clk_disable(tll->ch_clk[i]);
+				clk_disable_unprepare(tll->ch_clk[i]);
 		}
 	}
 
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 135afab..d280d78 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -368,6 +368,7 @@
 	},
 	{ },
 };
+MODULE_DEVICE_TABLE(of, of_palmas_match_tbl);
 
 static int palmas_i2c_probe(struct i2c_client *i2c,
 			    const struct i2c_device_id *id)
@@ -402,7 +403,7 @@
 	palmas->dev = &i2c->dev;
 	palmas->irq = i2c->irq;
 
-	match = of_match_device(of_match_ptr(of_palmas_match_tbl), &i2c->dev);
+	match = of_match_device(of_palmas_match_tbl, &i2c->dev);
 
 	if (!match)
 		return -ENODATA;
@@ -421,7 +422,7 @@
 				dev_err(palmas->dev,
 					"can't attach client %d\n", i);
 				ret = -ENOMEM;
-				goto err;
+				goto err_i2c;
 			}
 			palmas->i2c_clients[i]->dev.of_node = of_node_get(node);
 		}
@@ -432,7 +433,7 @@
 			dev_err(palmas->dev,
 				"Failed to allocate regmap %d, err: %d\n",
 				i, ret);
-			goto err;
+			goto err_i2c;
 		}
 	}
 
@@ -451,7 +452,7 @@
 			reg);
 	if (ret < 0) {
 		dev_err(palmas->dev, "POLARITY_CTRL updat failed: %d\n", ret);
-		goto err;
+		goto err_i2c;
 	}
 
 	/* Change IRQ into clear on read mode for efficiency */
@@ -465,7 +466,7 @@
 			IRQF_ONESHOT | pdata->irq_flags, 0, &palmas_irq_chip,
 			&palmas->irq_data);
 	if (ret < 0)
-		goto err;
+		goto err_i2c;
 
 no_irq:
 	slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
@@ -551,7 +552,6 @@
 		} else if (pdata->pm_off && !pm_power_off) {
 			palmas_dev = palmas;
 			pm_power_off = palmas_power_off;
-			return ret;
 		}
 	}
 
@@ -559,17 +559,31 @@
 
 err_irq:
 	regmap_del_irq_chip(palmas->irq, palmas->irq_data);
-err:
+err_i2c:
+	for (i = 1; i < PALMAS_NUM_CLIENTS; i++) {
+		if (palmas->i2c_clients[i])
+			i2c_unregister_device(palmas->i2c_clients[i]);
+	}
 	return ret;
 }
 
 static int palmas_i2c_remove(struct i2c_client *i2c)
 {
 	struct palmas *palmas = i2c_get_clientdata(i2c);
+	int i;
 
-	mfd_remove_devices(palmas->dev);
 	regmap_del_irq_chip(palmas->irq, palmas->irq_data);
 
+	for (i = 1; i < PALMAS_NUM_CLIENTS; i++) {
+		if (palmas->i2c_clients[i])
+			i2c_unregister_device(palmas->i2c_clients[i]);
+	}
+
+	if (palmas == palmas_dev) {
+		pm_power_off = NULL;
+		palmas_dev = NULL;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index a6841f7..484fe66 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -171,11 +171,12 @@
 	drvdata = platform_get_drvdata(pdev);
 	if (drvdata)
 		pmic = drvdata->pm_chip_data;
-	if (pmic)
+	if (pmic) {
 		mfd_remove_devices(pmic->dev);
-	if (pmic->irq_chip) {
-		pm8xxx_irq_exit(pmic->irq_chip);
-		pmic->irq_chip = NULL;
+		if (pmic->irq_chip) {
+			pm8xxx_irq_exit(pmic->irq_chip);
+			pmic->irq_chip = NULL;
+		}
 	}
 
 	return 0;
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index 3b835f5..573de7b 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -130,13 +130,57 @@
 {
 	int err;
 
-	err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, 0xFE46);
+	err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV,
+			PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED |
+			PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN |
+			PHY_REG_REV_RX_PWST | PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 |
+			PHY_REG_REV_STOP_CLKRD | PHY_REG_REV_STOP_CLKWR);
 	if (err < 0)
 		return err;
 
 	msleep(1);
 
-	return rtsx_pci_write_phy_register(pcr, PHY_BPCR, 0x05C0);
+	err = rtsx_pci_write_phy_register(pcr, PHY_BPCR,
+			PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL |
+			PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN);
+	if (err < 0)
+		return err;
+	err = rtsx_pci_write_phy_register(pcr, PHY_PCR,
+			PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
+			PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 |
+			PHY_PCR_RSSI_EN);
+	if (err < 0)
+		return err;
+	err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
+			PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR |
+			PHY_RCR2_CDR_CP_10 | PHY_RCR2_CDR_SR_2 |
+			PHY_RCR2_FREQSEL_12 | PHY_RCR2_CPADJEN |
+			PHY_RCR2_CDR_SC_8 | PHY_RCR2_CALIB_LATE);
+	if (err < 0)
+		return err;
+	err = rtsx_pci_write_phy_register(pcr, PHY_FLD4,
+			PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF |
+			PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA |
+			PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER |
+			PHY_FLD4_BER_CHK_EN);
+	if (err < 0)
+		return err;
+	err = rtsx_pci_write_phy_register(pcr, PHY_RDR, PHY_RDR_RXDSEL_1_9);
+	if (err < 0)
+		return err;
+	err = rtsx_pci_write_phy_register(pcr, PHY_RCR1,
+			PHY_RCR1_ADP_TIME | PHY_RCR1_VCO_COARSE);
+	if (err < 0)
+		return err;
+	err = rtsx_pci_write_phy_register(pcr, PHY_FLD3,
+			PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 |
+			PHY_FLD3_RXDELINK);
+	if (err < 0)
+		return err;
+	return rtsx_pci_write_phy_register(pcr, PHY_TUNE,
+			PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 |
+			PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 |
+			PHY_TUNE_TUNED12);
 }
 
 static int rts5249_turn_on_led(struct rtsx_pcr *pcr)
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index e6ae772..11e20af 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1149,7 +1149,7 @@
 	pcr->remap_addr = ioremap_nocache(base, len);
 	if (!pcr->remap_addr) {
 		ret = -ENOMEM;
-		goto free_host;
+		goto free_handle;
 	}
 
 	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
@@ -1209,8 +1209,6 @@
 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
 unmap:
 	iounmap(pcr->remap_addr);
-free_host:
-	dev_set_drvdata(&pcidev->dev, NULL);
 free_handle:
 	kfree(handle);
 free_pcr:
@@ -1242,7 +1240,6 @@
 		pci_disable_msi(pcr->pci);
 	iounmap(pcr->remap_addr);
 
-	dev_set_drvdata(&pcidev->dev, NULL);
 	pci_release_regions(pcidev);
 	pci_disable_device(pcidev);
 
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index f530e4b..34c18fb 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -17,6 +17,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/interrupt.h>
 #include <linux/pm_runtime.h>
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 33f040c..c2c8c91 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1232,7 +1232,7 @@
 }
 
 
-static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL);
+static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL);
 
 /* sm501_init_reg
  *
@@ -1660,7 +1660,6 @@
  err3:
 	pci_disable_device(dev);
  err2:
-	pci_set_drvdata(dev, NULL);
 	kfree(sm);
  err1:
 	return err;
@@ -1695,7 +1694,6 @@
 	release_resource(sm->regs_claim);
 	kfree(sm->regs_claim);
 
-	pci_set_drvdata(dev, NULL);
 	pci_disable_device(dev);
 }
 
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
new file mode 100644
index 0000000..1243d5c
--- /dev/null
+++ b/drivers/mfd/stw481x.c
@@ -0,0 +1,250 @@
+/*
+ * Core driver for STw4810/STw4811
+ *
+ * Copyright (C) 2013 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/stw481x.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+/*
+ * This driver can only access the non-USB portions of STw4811, the register
+ * range 0x00-0x10 dealing with USB is bound to the two special I2C pins used
+ * for USB control.
+ */
+
+/* Registers inside the power control address space */
+#define STW_PC_VCORE_SEL	0x05U
+#define STW_PC_VAUX_SEL		0x06U
+#define STW_PC_VPLL_SEL		0x07U
+
+/**
+ * stw481x_get_pctl_reg() - get a power control register
+ * @stw481x: handle to the stw481x chip
+ * @reg: power control register to fetch
+ *
+ * The power control registers is a set of one-time-programmable registers
+ * in its own register space, accessed by writing addess bits to these
+ * two registers: bits 7,6,5 of PCTL_REG_LO corresponds to the 3 LSBs of
+ * the address and bits 8,9 of PCTL_REG_HI corresponds to the 2 MSBs of
+ * the address, forming an address space of 5 bits, i.e. 32 registers
+ * 0x00 ... 0x1f can be obtained.
+ */
+static int stw481x_get_pctl_reg(struct stw481x *stw481x, u8 reg)
+{
+	u8 msb = (reg >> 3) & 0x03;
+	u8 lsb = (reg << 5) & 0xe0;
+	unsigned int val;
+	u8 vrfy;
+	int ret;
+
+	ret = regmap_write(stw481x->map, STW_PCTL_REG_HI, msb);
+	if (ret)
+		return ret;
+	ret = regmap_write(stw481x->map, STW_PCTL_REG_LO, lsb);
+	if (ret)
+		return ret;
+	ret = regmap_read(stw481x->map, STW_PCTL_REG_HI, &val);
+	if (ret)
+		return ret;
+	vrfy = (val & 0x03) << 3;
+	ret = regmap_read(stw481x->map, STW_PCTL_REG_LO, &val);
+	if (ret)
+		return ret;
+	vrfy |= ((val >> 5) & 0x07);
+	if (vrfy != reg)
+		return -EIO;
+	return (val >> 1) & 0x0f;
+}
+
+static int stw481x_startup(struct stw481x *stw481x)
+{
+	/* Voltages multiplied by 100 */
+	u8 vcore_val[] = { 100, 105, 110, 115, 120, 122, 124, 126, 128,
+			   130, 132, 134, 136, 138, 140, 145 };
+	u8 vpll_val[] = { 105, 120, 130, 180 };
+	u8 vaux_val[] = { 15, 18, 25, 28 };
+	u8 vcore;
+	u8 vcore_slp;
+	u8 vpll;
+	u8 vaux;
+	bool vaux_en;
+	bool it_warn;
+	int ret;
+	unsigned int val;
+
+	ret = regmap_read(stw481x->map, STW_CONF1, &val);
+	if (ret)
+		return ret;
+	vaux_en = !!(val & STW_CONF1_PDN_VAUX);
+	it_warn = !!(val & STW_CONF1_IT_WARN);
+
+	dev_info(&stw481x->client->dev, "voltages %s\n",
+		(val & STW_CONF1_V_MONITORING) ? "OK" : "LOW");
+	dev_info(&stw481x->client->dev, "MMC level shifter %s\n",
+		(val & STW_CONF1_MMC_LS_STATUS) ? "high impedance" : "ON");
+	dev_info(&stw481x->client->dev, "VMMC: %s\n",
+		(val & STW_CONF1_PDN_VMMC) ? "ON" : "disabled");
+
+	dev_info(&stw481x->client->dev, "STw481x power control registers:\n");
+
+	ret = stw481x_get_pctl_reg(stw481x, STW_PC_VCORE_SEL);
+	if (ret < 0)
+		return ret;
+	vcore = ret & 0x0f;
+
+	ret = stw481x_get_pctl_reg(stw481x, STW_PC_VAUX_SEL);
+	if (ret < 0)
+		return ret;
+	vaux = (ret >> 2) & 3;
+	vpll = (ret >> 4) & 1; /* Save bit 4 */
+
+	ret = stw481x_get_pctl_reg(stw481x, STW_PC_VPLL_SEL);
+	if (ret < 0)
+		return ret;
+	vpll |= (ret >> 1) & 2;
+
+	dev_info(&stw481x->client->dev, "VCORE: %u.%uV %s\n",
+		vcore_val[vcore] / 100, vcore_val[vcore] % 100,
+		(ret & 4) ? "ON" : "OFF");
+
+	dev_info(&stw481x->client->dev, "VPLL:  %u.%uV %s\n",
+		vpll_val[vpll] / 100, vpll_val[vpll] % 100,
+		(ret & 0x10) ? "ON" : "OFF");
+
+	dev_info(&stw481x->client->dev, "VAUX:  %u.%uV %s\n",
+		vaux_val[vaux] / 10, vaux_val[vaux] % 10,
+		vaux_en ? "ON" : "OFF");
+
+	ret = regmap_read(stw481x->map, STW_CONF2, &val);
+	if (ret)
+		return ret;
+
+	dev_info(&stw481x->client->dev, "TWARN: %s threshold, %s\n",
+		it_warn ? "below" : "above",
+		(val & STW_CONF2_MASK_TWARN) ?
+		 "enabled" : "mask through VDDOK");
+	dev_info(&stw481x->client->dev, "VMMC: %s\n",
+		(val & STW_CONF2_VMMC_EXT) ? "internal" : "external");
+	dev_info(&stw481x->client->dev, "IT WAKE UP: %s\n",
+		(val & STW_CONF2_MASK_IT_WAKE_UP) ? "enabled" : "masked");
+	dev_info(&stw481x->client->dev, "GPO1: %s\n",
+		(val & STW_CONF2_GPO1) ? "low" : "high impedance");
+	dev_info(&stw481x->client->dev, "GPO2: %s\n",
+		(val & STW_CONF2_GPO2) ? "low" : "high impedance");
+
+	ret = regmap_read(stw481x->map, STW_VCORE_SLEEP, &val);
+	if (ret)
+		return ret;
+	vcore_slp = val & 0x0f;
+	dev_info(&stw481x->client->dev, "VCORE SLEEP: %u.%uV\n",
+		vcore_val[vcore_slp] / 100, vcore_val[vcore_slp] % 100);
+
+	return 0;
+}
+
+/*
+ * MFD cells - we have one cell which is selected operation
+ * mode, and we always have a GPIO cell.
+ */
+static struct mfd_cell stw481x_cells[] = {
+	{
+		.of_compatible = "st,stw481x-vmmc",
+		.name = "stw481x-vmmc-regulator",
+		.id = -1,
+	},
+};
+
+const struct regmap_config stw481x_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+static int stw481x_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct stw481x			*stw481x;
+	int ret;
+	int i;
+
+	stw481x = devm_kzalloc(&client->dev, sizeof(*stw481x), GFP_KERNEL);
+	if (!stw481x)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, stw481x);
+	stw481x->client = client;
+	stw481x->map = devm_regmap_init_i2c(client, &stw481x_regmap_config);
+
+	ret = stw481x_startup(stw481x);
+	if (ret) {
+		dev_err(&client->dev, "chip initialization failed\n");
+		return ret;
+	}
+
+	/* Set up and register the platform devices. */
+	for (i = 0; i < ARRAY_SIZE(stw481x_cells); i++) {
+		/* One state holder for all drivers, this is simple */
+		stw481x_cells[i].platform_data = stw481x;
+		stw481x_cells[i].pdata_size = sizeof(*stw481x);
+	}
+
+	ret = mfd_add_devices(&client->dev, 0, stw481x_cells,
+			ARRAY_SIZE(stw481x_cells), NULL, 0, NULL);
+	if (ret)
+		return ret;
+
+	dev_info(&client->dev, "initialized STw481x device\n");
+
+	return ret;
+}
+
+static int stw481x_remove(struct i2c_client *client)
+{
+	mfd_remove_devices(&client->dev);
+	return 0;
+}
+
+/*
+ * This ID table is completely unused, as this is a pure
+ * device-tree probed driver, but it has to be here due to
+ * the structure of the I2C core.
+ */
+static const struct i2c_device_id stw481x_id[] = {
+	{ "stw481x", 0 },
+	{ },
+};
+
+static const struct of_device_id stw481x_match[] = {
+	{ .compatible = "st,stw4810", },
+	{ .compatible = "st,stw4811", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, stw481x_match);
+
+static struct i2c_driver stw481x_driver = {
+	.driver = {
+		.name	= "stw481x",
+		.of_match_table = stw481x_match,
+	},
+	.probe		= stw481x_probe,
+	.remove		= stw481x_remove,
+	.id_table	= stw481x_id,
+};
+
+module_i2c_driver(stw481x_driver);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("STw481x PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 70f4909f..87ea51d 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -16,6 +16,19 @@
 #include <linux/mfd/core.h>
 #include <linux/mfd/tc3589x.h>
 
+/**
+ * enum tc3589x_version - indicates the TC3589x version
+ */
+enum tc3589x_version {
+	TC3589X_TC35890,
+	TC3589X_TC35892,
+	TC3589X_TC35893,
+	TC3589X_TC35894,
+	TC3589X_TC35895,
+	TC3589X_TC35896,
+	TC3589X_UNKNOWN,
+};
+
 #define TC3589x_CLKMODE_MODCTL_SLEEP		0x0
 #define TC3589x_CLKMODE_MODCTL_OPERATION	(1 << 0)
 
@@ -361,7 +374,21 @@
 	tc3589x->i2c = i2c;
 	tc3589x->pdata = pdata;
 	tc3589x->irq_base = pdata->irq_base;
-	tc3589x->num_gpio = id->driver_data;
+
+	switch (id->driver_data) {
+	case TC3589X_TC35893:
+	case TC3589X_TC35895:
+	case TC3589X_TC35896:
+		tc3589x->num_gpio = 20;
+		break;
+	case TC3589X_TC35890:
+	case TC3589X_TC35892:
+	case TC3589X_TC35894:
+	case TC3589X_UNKNOWN:
+	default:
+		tc3589x->num_gpio = 24;
+		break;
+	}
 
 	i2c_set_clientdata(i2c, tc3589x);
 
@@ -432,7 +459,13 @@
 static SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend, tc3589x_resume);
 
 static const struct i2c_device_id tc3589x_id[] = {
-	{ "tc3589x", 24 },
+	{ "tc35890", TC3589X_TC35890 },
+	{ "tc35892", TC3589X_TC35892 },
+	{ "tc35893", TC3589X_TC35893 },
+	{ "tc35894", TC3589X_TC35894 },
+	{ "tc35895", TC3589X_TC35895 },
+	{ "tc35896", TC3589X_TC35896 },
+	{ "tc3589x", TC3589X_UNKNOWN },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, tc3589x_id);
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index 1c2b994..71e3e0c 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -445,7 +445,6 @@
 	iounmap(ssp->regs);
 	release_mem_region(ssp->res->start, resource_size(ssp->res));
 	kfree(ssp);
-	dev_set_drvdata(dev, NULL);
 	return 0;
 }
 
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index baaf5a8..88718ab 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -56,21 +56,25 @@
 
 void am335x_tsc_se_set(struct ti_tscadc_dev *tsadc, u32 val)
 {
-	spin_lock(&tsadc->reg_lock);
+	unsigned long flags;
+
+	spin_lock_irqsave(&tsadc->reg_lock, flags);
 	tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
 	tsadc->reg_se_cache |= val;
 	am335x_tsc_se_update(tsadc);
-	spin_unlock(&tsadc->reg_lock);
+	spin_unlock_irqrestore(&tsadc->reg_lock, flags);
 }
 EXPORT_SYMBOL_GPL(am335x_tsc_se_set);
 
 void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val)
 {
-	spin_lock(&tsadc->reg_lock);
+	unsigned long flags;
+
+	spin_lock_irqsave(&tsadc->reg_lock, flags);
 	tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
 	tsadc->reg_se_cache &= ~val;
 	am335x_tsc_se_update(tsadc);
-	spin_unlock(&tsadc->reg_lock);
+	spin_unlock_irqrestore(&tsadc->reg_lock, flags);
 }
 EXPORT_SYMBOL_GPL(am335x_tsc_se_clr);
 
@@ -95,7 +99,7 @@
 	const __be32            *cur;
 	u32			val;
 	int			err, ctrl;
-	int			clk_value, clock_rate;
+	int			clock_rate;
 	int			tsc_wires = 0, adc_channels = 0, total_channels;
 	int			readouts = 0;
 
@@ -196,11 +200,11 @@
 	}
 	clock_rate = clk_get_rate(clk);
 	clk_put(clk);
-	clk_value = clock_rate / ADC_CLK;
+	tscadc->clk_div = clock_rate / ADC_CLK;
 
 	/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
-	clk_value = clk_value - 1;
-	tscadc_writel(tscadc, REG_CLKDIV, clk_value);
+	tscadc->clk_div--;
+	tscadc_writel(tscadc, REG_CLKDIV, tscadc->clk_div);
 
 	/* Set the control register bits */
 	ctrl = CNTRLREG_STEPCONFIGWRT |
@@ -303,6 +307,8 @@
 	tscadc_writel(tscadc_dev, REG_CTRL,
 			(restore | CNTRLREG_TSCSSENB));
 
+	tscadc_writel(tscadc_dev, REG_CLKDIV, tscadc_dev->clk_div);
+
 	return 0;
 }
 
@@ -326,7 +332,7 @@
 		.name   = "ti_am3359-tscadc",
 		.owner	= THIS_MODULE,
 		.pm	= TSCADC_PM_OPS,
-		.of_match_table = of_match_ptr(ti_tscadc_dt_ids),
+		.of_match_table = ti_tscadc_dt_ids,
 	},
 	.probe	= ti_tscadc_probe,
 	.remove	= ti_tscadc_remove,
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index a6755ec..dbb34f9 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -678,7 +678,7 @@
 	priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
 	if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) {
 		dev_err(&dev->dev, "Failed to request ctl mem\n");
-		goto err_request;
+		goto err_start;
 	}
 
 	priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
@@ -828,13 +828,10 @@
 	iounmap(priv->ctl_membase);
 err_ioremap:
 	release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
-err_request:
-	pci_set_drvdata(dev, NULL);
 err_start:
 	pci_disable_device(dev);
 err_enable:
 	kfree(priv);
-	pci_set_drvdata(dev, NULL);
 	return -ENODEV;
 }
 
@@ -851,7 +848,6 @@
 
 	pci_disable_msix(dev);
 	pci_disable_device(dev);
-	pci_set_drvdata(dev, NULL);
 	kfree(priv);
 }
 
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 5ad4b77..a081b92 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tps6507x.h>
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index b8f4864..b7be0b2 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -245,7 +245,7 @@
 	.driver		= {
 		.name	= "tps65217",
 		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(tps65217_of_match),
+		.of_match_table = tps65217_of_match,
 	},
 	.id_table	= tps65217_id_table,
 	.probe		= tps65217_probe,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index f54fe4d..ee61fd7 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -26,6 +26,7 @@
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <linux/of.h>
 
 #include <linux/mfd/core.h>
 #include <linux/mfd/tps6586x.h>
@@ -124,6 +125,7 @@
 	struct i2c_client	*client;
 	struct regmap		*regmap;
 
+	int			irq;
 	struct irq_chip		irq_chip;
 	struct mutex		irq_lock;
 	int			irq_base;
@@ -261,12 +263,23 @@
 	mutex_unlock(&tps6586x->irq_lock);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int tps6586x_irq_set_wake(struct irq_data *irq_data, unsigned int on)
+{
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
+	return irq_set_irq_wake(tps6586x->irq, on);
+}
+#else
+#define tps6586x_irq_set_wake NULL
+#endif
+
 static struct irq_chip tps6586x_irq_chip = {
 	.name = "tps6586x",
 	.irq_bus_lock = tps6586x_irq_lock,
 	.irq_bus_sync_unlock = tps6586x_irq_sync_unlock,
 	.irq_disable = tps6586x_irq_disable,
 	.irq_enable = tps6586x_irq_enable,
+	.irq_set_wake = tps6586x_irq_set_wake,
 };
 
 static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq,
@@ -331,6 +344,8 @@
 	int new_irq_base;
 	int irq_num = ARRAY_SIZE(tps6586x_irqs);
 
+	tps6586x->irq = irq;
+
 	mutex_init(&tps6586x->irq_lock);
 	for (i = 0; i < 5; i++) {
 		tps6586x->mask_reg[i] = 0xff;
@@ -360,10 +375,8 @@
 	ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
 				   "tps6586x", tps6586x);
 
-	if (!ret) {
+	if (!ret)
 		device_init_wakeup(tps6586x->dev, 1);
-		enable_irq_wake(irq);
-	}
 
 	return ret;
 }
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index d792772..c0f608e 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -25,6 +25,7 @@
 #include <linux/mfd/core.h>
 #include <linux/regmap.h>
 #include <linux/mfd/tps65910.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 
 static struct resource rtc_resources[] = {
@@ -410,14 +411,10 @@
 	ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
 	if (!ret)
 		board_info->vmbch_threshold = prop;
-	else if (*chip_id == TPS65911)
-		dev_warn(&client->dev, "VMBCH-Threshold not specified");
 
 	ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
 	if (!ret)
 		board_info->vmbch2_threshold = prop;
-	else if (*chip_id == TPS65911)
-		dev_warn(&client->dev, "VMBCH2-Threshold not specified");
 
 	prop = of_property_read_bool(np, "ti,en-ck32k-xtal");
 	board_info->en_ck32k_xtal = prop;
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index daf6694..0779d5a 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -565,13 +565,13 @@
 				      twl6040->supplies);
 	if (ret != 0) {
 		dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
-		goto regulator_get_err;
+		return ret;
 	}
 
 	ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
 	if (ret != 0) {
 		dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
-		goto regulator_get_err;
+		return ret;
 	}
 
 	twl6040->dev = &client->dev;
@@ -619,7 +619,7 @@
 					"twl6040_irq_th", twl6040);
 	if (ret) {
 		dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
-		goto thirq_err;
+		goto readyirq_err;
 	}
 
 	/* dual-access registers controlled by I2C only */
@@ -659,21 +659,14 @@
 	ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
 			      NULL, 0, NULL);
 	if (ret)
-		goto mfd_err;
+		goto readyirq_err;
 
 	return 0;
 
-mfd_err:
-	devm_free_irq(&client->dev, twl6040->irq_th, twl6040);
-thirq_err:
-	devm_free_irq(&client->dev, twl6040->irq_ready, twl6040);
 readyirq_err:
 	regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
 gpio_err:
 	regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
-regulator_get_err:
-	i2c_set_clientdata(client, NULL);
-
 	return ret;
 }
 
@@ -684,12 +677,9 @@
 	if (twl6040->power_count)
 		twl6040_power(twl6040, 0);
 
-	devm_free_irq(&client->dev, twl6040->irq_ready, twl6040);
-	devm_free_irq(&client->dev, twl6040->irq_th, twl6040);
 	regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
 
 	mfd_remove_devices(&client->dev);
-	i2c_set_clientdata(client, NULL);
 
 	regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
 
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index d5966e6..0313f83 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -553,6 +553,7 @@
 	if (ucb->irq_base < 0) {
 		dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
 			ucb->irq_base);
+		ret = ucb->irq_base;
 		goto err_irq_alloc;
 	}
 
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 802dd3c..1e9a4b2 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -903,7 +903,6 @@
 	{ 0x00000D1B, 0xFFFF },   /* R3355  - IRQ2 Status 4 Mask */ 
 	{ 0x00000D1C, 0xFFFF },   /* R3356  - IRQ2 Status 5 Mask */ 
 	{ 0x00000D1F, 0x0000 },   /* R3359  - IRQ2 Control */ 
-	{ 0x00000D50, 0x0000 },   /* R3408  - AOD wkup and trig */
 	{ 0x00000D53, 0xFFFF },   /* R3411  - AOD IRQ Mask IRQ1 */ 
 	{ 0x00000D54, 0xFFFF },   /* R3412  - AOD IRQ Mask IRQ2 */ 
 	{ 0x00000D56, 0x0000 },   /* R3414  - Jack detect debounce */ 
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 3113e39..bf8b3b5 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -243,6 +243,12 @@
 EXPORT_SYMBOL_GPL(wm5110_patch);
 
 static const struct regmap_irq wm5110_aod_irqs[ARIZONA_NUM_IRQ] = {
+	[ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+		.mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+	},
+	[ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+		.mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+	},
 	[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
 	[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
 	[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
@@ -505,6 +511,7 @@
 	{ 0x00000293, 0x0000 },    /* R659   - Accessory Detect Mode 1 */
 	{ 0x0000029B, 0x0020 },    /* R667   - Headphone Detect 1 */
 	{ 0x0000029C, 0x0000 },    /* R668   - Headphone Detect 2 */
+	{ 0x000002A2, 0x0000 },    /* R674   - Micd clamp control */
 	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
 	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
 	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
@@ -592,7 +599,7 @@
 	{ 0x0000043E, 0x0080 },    /* R1086  - DAC Volume Limit 6R */
 	{ 0x0000043F, 0x0800 },    /* R1087  - Noise Gate Select 6R */
 	{ 0x00000450, 0x0000 },    /* R1104  - DAC AEC Control 1 */
-	{ 0x00000458, 0x0001 },    /* R1112  - Noise Gate Control */
+	{ 0x00000458, 0x0000 },    /* R1112  - Noise Gate Control */
 	{ 0x00000480, 0x0040 },    /* R1152  - Class W ANC Threshold 1 */
 	{ 0x00000481, 0x0040 },    /* R1153  - Class W ANC Threshold 2 */
 	{ 0x00000490, 0x0069 },    /* R1168  - PDM SPK1 CTRL 1 */
@@ -1204,7 +1211,6 @@
 	{ 0x00000D1B, 0xFFFF },    /* R3355  - IRQ2 Status 4 Mask */
 	{ 0x00000D1C, 0xFFFF },    /* R3356  - IRQ2 Status 5 Mask */
 	{ 0x00000D1F, 0x0000 },    /* R3359  - IRQ2 Control */
-	{ 0x00000D50, 0x0000 },    /* R3408  - AOD wkup and trig */
 	{ 0x00000D53, 0xFFFF },    /* R3411  - AOD IRQ Mask IRQ1 */
 	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
 	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
@@ -1440,6 +1446,7 @@
 	case ARIZONA_ACCESSORY_DETECT_MODE_1:
 	case ARIZONA_HEADPHONE_DETECT_1:
 	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_MICD_CLAMP_CONTROL:
 	case ARIZONA_MIC_DETECT_1:
 	case ARIZONA_MIC_DETECT_2:
 	case ARIZONA_MIC_DETECT_3:
@@ -2291,21 +2298,37 @@
 	case ARIZONA_DSP1_STATUS_1:
 	case ARIZONA_DSP1_STATUS_2:
 	case ARIZONA_DSP1_STATUS_3:
+	case ARIZONA_DSP1_SCRATCH_0:
+	case ARIZONA_DSP1_SCRATCH_1:
+	case ARIZONA_DSP1_SCRATCH_2:
+	case ARIZONA_DSP1_SCRATCH_3:
 	case ARIZONA_DSP2_CONTROL_1:
 	case ARIZONA_DSP2_CLOCKING_1:
 	case ARIZONA_DSP2_STATUS_1:
 	case ARIZONA_DSP2_STATUS_2:
 	case ARIZONA_DSP2_STATUS_3:
+	case ARIZONA_DSP2_SCRATCH_0:
+	case ARIZONA_DSP2_SCRATCH_1:
+	case ARIZONA_DSP2_SCRATCH_2:
+	case ARIZONA_DSP2_SCRATCH_3:
 	case ARIZONA_DSP3_CONTROL_1:
 	case ARIZONA_DSP3_CLOCKING_1:
 	case ARIZONA_DSP3_STATUS_1:
 	case ARIZONA_DSP3_STATUS_2:
 	case ARIZONA_DSP3_STATUS_3:
+	case ARIZONA_DSP3_SCRATCH_0:
+	case ARIZONA_DSP3_SCRATCH_1:
+	case ARIZONA_DSP3_SCRATCH_2:
+	case ARIZONA_DSP3_SCRATCH_3:
 	case ARIZONA_DSP4_CONTROL_1:
 	case ARIZONA_DSP4_CLOCKING_1:
 	case ARIZONA_DSP4_STATUS_1:
 	case ARIZONA_DSP4_STATUS_2:
 	case ARIZONA_DSP4_STATUS_3:
+	case ARIZONA_DSP4_SCRATCH_0:
+	case ARIZONA_DSP4_SCRATCH_1:
+	case ARIZONA_DSP4_SCRATCH_2:
+	case ARIZONA_DSP4_SCRATCH_3:
 		return true;
 	default:
 		return false;
@@ -2347,25 +2370,41 @@
 	case ARIZONA_INTERRUPT_RAW_STATUS_7:
 	case ARIZONA_INTERRUPT_RAW_STATUS_8:
 	case ARIZONA_IRQ_PIN_STATUS:
+	case ARIZONA_AOD_WKUP_AND_TRIG:
 	case ARIZONA_AOD_IRQ1:
 	case ARIZONA_AOD_IRQ2:
+	case ARIZONA_AOD_IRQ_RAW_STATUS:
 	case ARIZONA_FX_CTRL2:
 	case ARIZONA_ASRC_STATUS:
 	case ARIZONA_DSP_STATUS:
-	case ARIZONA_DSP1_CONTROL_1:
-	case ARIZONA_DSP1_CLOCKING_1:
 	case ARIZONA_DSP1_STATUS_1:
 	case ARIZONA_DSP1_STATUS_2:
 	case ARIZONA_DSP1_STATUS_3:
+	case ARIZONA_DSP1_SCRATCH_0:
+	case ARIZONA_DSP1_SCRATCH_1:
+	case ARIZONA_DSP1_SCRATCH_2:
+	case ARIZONA_DSP1_SCRATCH_3:
 	case ARIZONA_DSP2_STATUS_1:
 	case ARIZONA_DSP2_STATUS_2:
 	case ARIZONA_DSP2_STATUS_3:
+	case ARIZONA_DSP2_SCRATCH_0:
+	case ARIZONA_DSP2_SCRATCH_1:
+	case ARIZONA_DSP2_SCRATCH_2:
+	case ARIZONA_DSP2_SCRATCH_3:
 	case ARIZONA_DSP3_STATUS_1:
 	case ARIZONA_DSP3_STATUS_2:
 	case ARIZONA_DSP3_STATUS_3:
+	case ARIZONA_DSP3_SCRATCH_0:
+	case ARIZONA_DSP3_SCRATCH_1:
+	case ARIZONA_DSP3_SCRATCH_2:
+	case ARIZONA_DSP3_SCRATCH_3:
 	case ARIZONA_DSP4_STATUS_1:
 	case ARIZONA_DSP4_STATUS_2:
 	case ARIZONA_DSP4_STATUS_3:
+	case ARIZONA_DSP4_SCRATCH_0:
+	case ARIZONA_DSP4_SCRATCH_1:
+	case ARIZONA_DSP4_SCRATCH_2:
+	case ARIZONA_DSP4_SCRATCH_3:
 		return true;
 	default:
 		return false;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index e1c283e..0308275 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -33,84 +33,6 @@
 
 #include "wm8994.h"
 
-/**
- * wm8994_reg_read: Read a single WM8994 register.
- *
- * @wm8994: Device to read from.
- * @reg: Register to read.
- */
-int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
-{
-	unsigned int val;
-	int ret;
-
-	ret = regmap_read(wm8994->regmap, reg, &val);
-
-	if (ret < 0)
-		return ret;
-	else
-		return val;
-}
-EXPORT_SYMBOL_GPL(wm8994_reg_read);
-
-/**
- * wm8994_bulk_read: Read multiple WM8994 registers
- *
- * @wm8994: Device to read from
- * @reg: First register
- * @count: Number of registers
- * @buf: Buffer to fill.  The data will be returned big endian.
- */
-int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
-		     int count, u16 *buf)
-{
-	return regmap_bulk_read(wm8994->regmap, reg, buf, count);
-}
-
-/**
- * wm8994_reg_write: Write a single WM8994 register.
- *
- * @wm8994: Device to write to.
- * @reg: Register to write to.
- * @val: Value to write.
- */
-int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
-		     unsigned short val)
-{
-	return regmap_write(wm8994->regmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(wm8994_reg_write);
-
-/**
- * wm8994_bulk_write: Write multiple WM8994 registers
- *
- * @wm8994: Device to write to
- * @reg: First register
- * @count: Number of registers
- * @buf: Buffer to write from.  Data must be big-endian formatted.
- */
-int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
-		      int count, const u16 *buf)
-{
-	return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
-}
-EXPORT_SYMBOL_GPL(wm8994_bulk_write);
-
-/**
- * wm8994_set_bits: Set the value of a bitfield in a WM8994 register
- *
- * @wm8994: Device to write to.
- * @reg: Register to write to.
- * @mask: Mask of bits to set.
- * @val: Value to set (unshifted)
- */
-int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
-		    unsigned short mask, unsigned short val)
-{
-	return regmap_update_bits(wm8994->regmap, reg, mask, val);
-}
-EXPORT_SYMBOL_GPL(wm8994_set_bits);
-
 static struct mfd_cell wm8994_regulator_devs[] = {
 	{
 		.name = "wm8994-ldo",
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e760715..a3e291d 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -381,19 +381,6 @@
 	  This driver provides support for the Honeywell HMC6352 compass,
 	  providing configuration and heading data via sysfs.
 
-config EP93XX_PWM
-	tristate "EP93xx PWM support"
-	depends on ARCH_EP93XX
-	help
-	  This option enables device driver support for the PWM channels
-	  on the Cirrus EP93xx processors.  The EP9307 chip only has one
-	  PWM channel all the others have two, the second channel is an
-	  alternate function of the EGPIO14 pin.  A sysfs interface is
-	  provided to control the PWM channels.
-
-	  To compile this driver as a module, choose M here: the module will
-	  be called ep93xx_pwm.
-
 config DS1682
 	tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
 	depends on I2C
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 0b7ea3e..f45473e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,7 +33,6 @@
 obj-$(CONFIG_ISL29003)		+= isl29003.o
 obj-$(CONFIG_ISL29020)		+= isl29020.o
 obj-$(CONFIG_SENSORS_TSL2550)	+= tsl2550.o
-obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o
 obj-$(CONFIG_DS1682)		+= ds1682.o
 obj-$(CONFIG_TI_DAC7512)	+= ti_dac7512.o
 obj-$(CONFIG_C2PORT)		+= c2port/
diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c
deleted file mode 100644
index cdb67a9..0000000
--- a/drivers/misc/ep93xx_pwm.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- *  Simple PWM driver for EP93XX
- *
- *	(c) Copyright 2009  Matthieu Crapet <mcrapet@gmail.com>
- *	(c) Copyright 2009  H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- *	This program is free software; you can redistribute it and/or
- *	modify it under the terms of the GNU General Public License
- *	as published by the Free Software Foundation; either version
- *	2 of the License, or (at your option) any later version.
- *
- *  EP9307 has only one channel:
- *    - PWMOUT
- *
- *  EP9301/02/12/15 have two channels:
- *    - PWMOUT
- *    - PWMOUT1 (alternate function for EGPIO14)
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <mach/platform.h>
-
-#define EP93XX_PWMx_TERM_COUNT	0x00
-#define EP93XX_PWMx_DUTY_CYCLE	0x04
-#define EP93XX_PWMx_ENABLE	0x08
-#define EP93XX_PWMx_INVERT	0x0C
-
-#define EP93XX_PWM_MAX_COUNT	0xFFFF
-
-struct ep93xx_pwm {
-	void __iomem	*mmio_base;
-	struct clk	*clk;
-	u32		duty_percent;
-};
-
-/*
- * /sys/devices/platform/ep93xx-pwm.N
- *   /min_freq      read-only   minimum pwm output frequency
- *   /max_req       read-only   maximum pwm output frequency
- *   /freq          read-write  pwm output frequency (0 = disable output)
- *   /duty_percent  read-write  pwm duty cycle percent (1..99)
- *   /invert        read-write  invert pwm output
- */
-
-static ssize_t ep93xx_pwm_get_min_freq(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-	unsigned long rate = clk_get_rate(pwm->clk);
-
-	return sprintf(buf, "%ld\n", rate / (EP93XX_PWM_MAX_COUNT + 1));
-}
-
-static ssize_t ep93xx_pwm_get_max_freq(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-	unsigned long rate = clk_get_rate(pwm->clk);
-
-	return sprintf(buf, "%ld\n", rate / 2);
-}
-
-static ssize_t ep93xx_pwm_get_freq(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-
-	if (readl(pwm->mmio_base + EP93XX_PWMx_ENABLE) & 0x1) {
-		unsigned long rate = clk_get_rate(pwm->clk);
-		u16 term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
-
-		return sprintf(buf, "%ld\n", rate / (term + 1));
-	} else {
-		return sprintf(buf, "disabled\n");
-	}
-}
-
-static ssize_t ep93xx_pwm_set_freq(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-	long val;
-	int err;
-
-	err = kstrtol(buf, 10, &val);
-	if (err)
-		return -EINVAL;
-
-	if (val == 0) {
-		writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE);
-	} else if (val <= (clk_get_rate(pwm->clk) / 2)) {
-		u32 term, duty;
-
-		val = (clk_get_rate(pwm->clk) / val) - 1;
-		if (val > EP93XX_PWM_MAX_COUNT)
-			val = EP93XX_PWM_MAX_COUNT;
-		if (val < 1)
-			val = 1;
-
-		term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
-		duty = ((val + 1) * pwm->duty_percent / 100) - 1;
-
-		/* If pwm is running, order is important */
-		if (val > term) {
-			writel(val, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
-			writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE);
-		} else {
-			writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE);
-			writel(val, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
-		}
-
-		if (!readl(pwm->mmio_base + EP93XX_PWMx_ENABLE) & 0x1)
-			writel(0x1, pwm->mmio_base + EP93XX_PWMx_ENABLE);
-	} else {
-		return -EINVAL;
-	}
-
-	return count;
-}
-
-static ssize_t ep93xx_pwm_get_duty_percent(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-
-	return sprintf(buf, "%d\n", pwm->duty_percent);
-}
-
-static ssize_t ep93xx_pwm_set_duty_percent(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-	long val;
-	int err;
-
-	err = kstrtol(buf, 10, &val);
-	if (err)
-		return -EINVAL;
-
-	if (val > 0 && val < 100) {
-		u32 term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
-		u32 duty = ((term + 1) * val / 100) - 1;
-
-		writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE);
-		pwm->duty_percent = val;
-		return count;
-	}
-
-	return -EINVAL;
-}
-
-static ssize_t ep93xx_pwm_get_invert(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-	int inverted = readl(pwm->mmio_base + EP93XX_PWMx_INVERT) & 0x1;
-
-	return sprintf(buf, "%d\n", inverted);
-}
-
-static ssize_t ep93xx_pwm_set_invert(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-	long val;
-	int err;
-
-	err = kstrtol(buf, 10, &val);
-	if (err)
-		return -EINVAL;
-
-	if (val == 0)
-		writel(0x0, pwm->mmio_base + EP93XX_PWMx_INVERT);
-	else if (val == 1)
-		writel(0x1, pwm->mmio_base + EP93XX_PWMx_INVERT);
-	else
-		return -EINVAL;
-
-	return count;
-}
-
-static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL);
-static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL);
-static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO,
-		   ep93xx_pwm_get_freq, ep93xx_pwm_set_freq);
-static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO,
-		   ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent);
-static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO,
-		   ep93xx_pwm_get_invert, ep93xx_pwm_set_invert);
-
-static struct attribute *ep93xx_pwm_attrs[] = {
-	&dev_attr_min_freq.attr,
-	&dev_attr_max_freq.attr,
-	&dev_attr_freq.attr,
-	&dev_attr_duty_percent.attr,
-	&dev_attr_invert.attr,
-	NULL
-};
-
-static const struct attribute_group ep93xx_pwm_sysfs_files = {
-	.attrs	= ep93xx_pwm_attrs,
-};
-
-static int ep93xx_pwm_probe(struct platform_device *pdev)
-{
-	struct ep93xx_pwm *pwm;
-	struct resource *res;
-	int ret;
-
-	pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
-	if (!pwm)
-		return -ENOMEM;
-
-	pwm->clk = devm_clk_get(&pdev->dev, "pwm_clk");
-	if (IS_ERR(pwm->clk))
-		return PTR_ERR(pwm->clk);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pwm->mmio_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(pwm->mmio_base))
-		return PTR_ERR(pwm->mmio_base);
-
-	ret = ep93xx_pwm_acquire_gpio(pdev);
-	if (ret)
-		return ret;
-
-	ret = sysfs_create_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
-	if (ret) {
-		ep93xx_pwm_release_gpio(pdev);
-		return ret;
-	}
-
-	pwm->duty_percent = 50;
-
-	/* disable pwm at startup. Avoids zero value. */
-	writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE);
-	writel(EP93XX_PWM_MAX_COUNT, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT);
-	writel(EP93XX_PWM_MAX_COUNT/2, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE);
-
-	clk_enable(pwm->clk);
-
-	platform_set_drvdata(pdev, pwm);
-	return 0;
-}
-
-static int ep93xx_pwm_remove(struct platform_device *pdev)
-{
-	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
-
-	writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE);
-	clk_disable(pwm->clk);
-	sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
-	ep93xx_pwm_release_gpio(pdev);
-
-	return 0;
-}
-
-static struct platform_driver ep93xx_pwm_driver = {
-	.driver		= {
-		.name	= "ep93xx-pwm",
-		.owner	= THIS_MODULE,
-	},
-	.probe		= ep93xx_pwm_probe,
-	.remove		= ep93xx_pwm_remove,
-};
-module_platform_driver(ep93xx_pwm_driver);
-
-MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, "
-	      "H Hartley Sweeten <hsweeten@visionengravers.com>");
-MODULE_DESCRIPTION("EP93xx PWM driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ep93xx-pwm");
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index 914cc9b..8aa42e7 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -493,7 +493,7 @@
 			ioread8(&dc->config_change), ioread8(&d->type), mvdev);
 
 		status = ioread8(&d->status);
-		INIT_COMPLETION(mvdev->reset_done);
+		reinit_completion(&mvdev->reset_done);
 		unregister_virtio_device(&mvdev->vdev);
 		mic_free_card_irq(mvdev->virtio_cookie, mvdev);
 		if (status & VIRTIO_CONFIG_S_DRIVER_OK)
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index b079c65..7558d91 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -38,7 +38,7 @@
 
 #define MIC_RESET_TO (45)
 
-	INIT_COMPLETION(mdev->reset_wait);
+	reinit_completion(&mdev->reset_wait);
 	mdev->ops->reset_fw_ready(mdev);
 	mdev->ops->reset(mdev);
 
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 83907c7..96853a0 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -218,7 +218,7 @@
 
 	pr_debug("%s", __func__);
 
-	INIT_COMPLETION(kim_gdata->kim_rcvd);
+	reinit_completion(&kim_gdata->kim_rcvd);
 	if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
 		pr_err("kim: couldn't write 4 bytes");
 		return -EIO;
@@ -229,7 +229,7 @@
 		pr_err(" waiting for ver info- timed out ");
 		return -ETIMEDOUT;
 	}
-	INIT_COMPLETION(kim_gdata->kim_rcvd);
+	reinit_completion(&kim_gdata->kim_rcvd);
 	/* the positions 12 & 13 in the response buffer provide with the
 	 * chip, major & minor numbers
 	 */
@@ -362,7 +362,7 @@
 			/* reinit completion before sending for the
 			 * relevant wait
 			 */
-			INIT_COMPLETION(kim_gdata->kim_rcvd);
+			reinit_completion(&kim_gdata->kim_rcvd);
 
 			/*
 			 * Free space found in uart buffer, call st_int_write
@@ -398,7 +398,7 @@
 				release_firmware(kim_gdata->fw_entry);
 				return -ETIMEDOUT;
 			}
-			INIT_COMPLETION(kim_gdata->kim_rcvd);
+			reinit_completion(&kim_gdata->kim_rcvd);
 			break;
 		case ACTION_DELAY:	/* sleep */
 			pr_info("sleep command in scr");
@@ -474,7 +474,7 @@
 		gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
 		mdelay(100);
 		/* re-initialize the completion */
-		INIT_COMPLETION(kim_gdata->ldisc_installed);
+		reinit_completion(&kim_gdata->ldisc_installed);
 		/* send notification to UIM */
 		kim_gdata->ldisc_install = 1;
 		pr_info("ldisc_install = 1");
@@ -525,7 +525,7 @@
 		kim_gdata->kim_pdev->dev.platform_data;
 	struct tty_struct	*tty = kim_gdata->core_data->tty;
 
-	INIT_COMPLETION(kim_gdata->ldisc_installed);
+	reinit_completion(&kim_gdata->ldisc_installed);
 
 	if (tty) {	/* can be called before ldisc is installed */
 		/* Flush any pending characters in the driver and discipline. */
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fa9632e..357bbc5 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -15,6 +15,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -196,7 +197,7 @@
 	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
 
 	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
-		limit = *mmc_dev(host)->dma_mask;
+		limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
 	mq->card = card;
 	mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c3785ed..d135c76 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -62,6 +62,7 @@
  * @signal_direction: input/out direction of bus signals can be indicated
  * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
  * @busy_detect: true if busy detection on dat0 is supported
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
  */
 struct variant_data {
 	unsigned int		clkreg;
@@ -76,6 +77,7 @@
 	bool			signal_direction;
 	bool			pwrreg_clkgate;
 	bool			busy_detect;
+	bool			pwrreg_nopower;
 };
 
 static struct variant_data variant_arm = {
@@ -109,6 +111,7 @@
 	.pwrreg_powerup		= MCI_PWR_ON,
 	.signal_direction	= true,
 	.pwrreg_clkgate		= true,
+	.pwrreg_nopower		= true,
 };
 
 static struct variant_data variant_nomadik = {
@@ -121,6 +124,7 @@
 	.pwrreg_powerup		= MCI_PWR_ON,
 	.signal_direction	= true,
 	.pwrreg_clkgate		= true,
+	.pwrreg_nopower		= true,
 };
 
 static struct variant_data variant_ux500 = {
@@ -135,6 +139,7 @@
 	.signal_direction	= true,
 	.pwrreg_clkgate		= true,
 	.busy_detect		= true,
+	.pwrreg_nopower		= true,
 };
 
 static struct variant_data variant_ux500v2 = {
@@ -150,6 +155,7 @@
 	.signal_direction	= true,
 	.pwrreg_clkgate		= true,
 	.busy_detect		= true,
+	.pwrreg_nopower		= true,
 };
 
 static int mmci_card_busy(struct mmc_host *mmc)
@@ -189,6 +195,21 @@
 	return 0;
 }
 
+static void mmci_reg_delay(struct mmci_host *host)
+{
+	/*
+	 * According to the spec, at least three feedback clock cycles
+	 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
+	 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
+	 * Worst delay time during card init is at 100 kHz => 30 us.
+	 * Worst delay time when up and running is at 25 MHz => 120 ns.
+	 */
+	if (host->cclk < 25000000)
+		udelay(30);
+	else
+		ndelay(120);
+}
+
 /*
  * This must be called with host->lock held
  */
@@ -1264,6 +1285,7 @@
 
 	mmci_set_clkreg(host, ios->clock);
 	mmci_write_pwrreg(host, pwr);
+	mmci_reg_delay(host);
 
 	spin_unlock_irqrestore(&host->lock, flags);
 
@@ -1510,23 +1532,6 @@
 		mmc->f_max = min(host->mclk, fmax);
 	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
 
-	host->pinctrl = devm_pinctrl_get(&dev->dev);
-	if (IS_ERR(host->pinctrl)) {
-		ret = PTR_ERR(host->pinctrl);
-		goto clk_disable;
-	}
-
-	host->pins_default = pinctrl_lookup_state(host->pinctrl,
-			PINCTRL_STATE_DEFAULT);
-
-	/* enable pins to be muxed in and configured */
-	if (!IS_ERR(host->pins_default)) {
-		ret = pinctrl_select_state(host->pinctrl, host->pins_default);
-		if (ret)
-			dev_warn(&dev->dev, "could not set default pins\n");
-	} else
-		dev_warn(&dev->dev, "could not get default pinstate\n");
-
 	/* Get regulators and the supported OCR mask */
 	mmc_regulator_get_supply(mmc);
 	if (!mmc->ocr_avail)
@@ -1760,6 +1765,41 @@
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
+static void mmci_save(struct mmci_host *host)
+{
+	unsigned long flags;
+
+	if (host->variant->pwrreg_nopower) {
+		spin_lock_irqsave(&host->lock, flags);
+
+		writel(0, host->base + MMCIMASK0);
+		writel(0, host->base + MMCIDATACTRL);
+		writel(0, host->base + MMCIPOWER);
+		writel(0, host->base + MMCICLOCK);
+		mmci_reg_delay(host);
+
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+
+}
+
+static void mmci_restore(struct mmci_host *host)
+{
+	unsigned long flags;
+
+	if (host->variant->pwrreg_nopower) {
+		spin_lock_irqsave(&host->lock, flags);
+
+		writel(host->clk_reg, host->base + MMCICLOCK);
+		writel(host->datactrl_reg, host->base + MMCIDATACTRL);
+		writel(host->pwr_reg, host->base + MMCIPOWER);
+		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+		mmci_reg_delay(host);
+
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+}
+
 static int mmci_runtime_suspend(struct device *dev)
 {
 	struct amba_device *adev = to_amba_device(dev);
@@ -1767,6 +1807,8 @@
 
 	if (mmc) {
 		struct mmci_host *host = mmc_priv(mmc);
+		pinctrl_pm_select_sleep_state(dev);
+		mmci_save(host);
 		clk_disable_unprepare(host->clk);
 	}
 
@@ -1781,6 +1823,8 @@
 	if (mmc) {
 		struct mmci_host *host = mmc_priv(mmc);
 		clk_prepare_enable(host->clk);
+		mmci_restore(host);
+		pinctrl_pm_select_default_state(dev);
 	}
 
 	return 0;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 69080fa..168bc72 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -200,10 +200,6 @@
 	struct sg_mapping_iter	sg_miter;
 	unsigned int		size;
 
-	/* pinctrl handles */
-	struct pinctrl		*pinctrl;
-	struct pinctrl_state	*pins_default;
-
 #ifdef CONFIG_DMA_ENGINE
 	/* DMA stuff */
 	struct dma_chan		*dma_current;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index cdd4ce0..ef19874 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -310,8 +310,9 @@
 			dma_mask = DMA_BIT_MASK(32);
 		}
 
-		dev->dma_mask = &dev->coherent_dma_mask;
-		dev->coherent_dma_mask = dma_mask;
+		err = dma_coerce_mask_and_coherent(dev, dma_mask);
+		if (err)
+			goto err_free;
 	}
 
 	if (c->slot) {
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 9279a91..7a6384b 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -27,11 +27,13 @@
 
 /* Magics */
 #define BOARD_DATA_MAGIC		0x5246504D	/* MPFR */
+#define FACTORY_MAGIC			0x59544346	/* FCTY */
 #define POT_MAGIC1			0x54544f50	/* POTT */
 #define POT_MAGIC2			0x504f		/* OP */
 #define ML_MAGIC1			0x39685a42
 #define ML_MAGIC2			0x26594131
 #define TRX_MAGIC			0x30524448
+#define SQSH_MAGIC			0x71736873	/* shsq */
 
 struct trx_header {
 	uint32_t magic;
@@ -71,7 +73,14 @@
 	/* Alloc */
 	parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
 			GFP_KERNEL);
+	if (!parts)
+		return -ENOMEM;
+
 	buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+	if (!buf) {
+		kfree(parts);
+		return -ENOMEM;
+	}
 
 	/* Parse block by block looking for magics */
 	for (offset = 0; offset <= master->size - blocksize;
@@ -110,6 +119,13 @@
 			continue;
 		}
 
+		/* Found on Huawei E970 */
+		if (buf[0x000 / 4] == FACTORY_MAGIC) {
+			bcm47xxpart_add_part(&parts[curr_part++], "factory",
+					     offset, MTD_WRITEABLE);
+			continue;
+		}
+
 		/* POT(TOP) */
 		if (buf[0x000 / 4] == POT_MAGIC1 &&
 		    (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
@@ -167,6 +183,13 @@
 			offset = rounddown(offset + trx->length, blocksize);
 			continue;
 		}
+
+		/* Squashfs on devices not using TRX */
+		if (buf[0x000 / 4] == SQSH_MAGIC) {
+			bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+					     offset, 0);
+			continue;
+		}
 	}
 
 	/* Look for NVRAM at the end of the last block. */
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 74ab4b7..0128138 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -95,13 +95,6 @@
 	  if you want to specify device partitioning or to use a device which
 	  doesn't support the JEDEC ID instruction.
 
-config M25PXX_USE_FAST_READ
-	bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz"
-	depends on MTD_M25P80
-	default y
-	help
-	  This option enables FAST_READ access supported by ST M25Pxx.
-
 config MTD_SPEAR_SMI
 	tristate "SPEAR MTD NOR Support through SMI controller"
 	depends on PLAT_SPEAR
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 5cb4c04..d9fd87a 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -20,6 +20,7 @@
 #include <linux/mutex.h>
 #include <linux/mount.h>
 #include <linux/slab.h>
+#include <linux/major.h>
 
 /* Info for the block device */
 struct block2mtd_dev {
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 3e1b0a0..4f091c1 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -2097,7 +2097,7 @@
 	ret = -ENODEV;
 	dev_info(dev, "No supported DiskOnChip found\n");
 err_probe:
-	kfree(cascade->bch);
+	free_bch(cascade->bch);
 	for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
 		if (cascade->floors[floor])
 			doc_release_device(cascade->floors[floor]);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 6bc9618..7eda71d 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -78,7 +78,7 @@
 
 /* Define max times to check status register before we give up. */
 #define	MAX_READY_WAIT_JIFFIES	(40 * HZ)	/* M25P16 specs 40s max chip erase */
-#define	MAX_CMD_SIZE		5
+#define	MAX_CMD_SIZE		6
 
 #define JEDEC_MFR(_jedec_id)	((_jedec_id) >> 16)
 
@@ -367,10 +367,6 @@
 	spi_message_init(&m);
 	memset(t, 0, (sizeof t));
 
-	/* NOTE:
-	 * OPCODE_FAST_READ (if available) is faster.
-	 * Should add 1 byte DUMMY_BYTE.
-	 */
 	t[0].tx_buf = flash->command;
 	t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
 	spi_message_add_tail(&t[0], &m);
@@ -388,11 +384,6 @@
 		return 1;
 	}
 
-	/* FIXME switch to OPCODE_FAST_READ.  It's required for higher
-	 * clocks; and at this writing, every chip this driver handles
-	 * supports that opcode.
-	 */
-
 	/* Set up the write data buffer. */
 	opcode = flash->read_opcode;
 	flash->command[0] = opcode;
@@ -749,16 +740,19 @@
 	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
 
 	/* EON -- en25xxx */
-	{ "en25f32", INFO(0x1c3116, 0, 64 * 1024,  64, SECT_4K) },
-	{ "en25p32", INFO(0x1c2016, 0, 64 * 1024,  64, 0) },
-	{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024,  64, 0) },
-	{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
-	{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
-	{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+	{ "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64, SECT_4K) },
+	{ "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64, 0) },
+	{ "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
+	{ "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
+	{ "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
+	{ "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
+
+	/* ESMT */
+	{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
 
 	/* Everspin */
-	{ "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
-	{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
+	{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
+	{ "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
 
 	/* GigaDevice */
 	{ "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64, SECT_4K) },
@@ -775,6 +769,7 @@
 	{ "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
 	{ "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
 	{ "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, 0) },
+	{ "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) },
 	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, 0) },
 	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
 	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
@@ -783,15 +778,16 @@
 	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
 
 	/* Micron */
-	{ "n25q064",  INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
-	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
-	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
-	{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, 0) },
+	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, 0) },
+	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, 0) },
+	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K) },
+	{ "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
 
 	/* PMC */
-	{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
-	{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
-	{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024,  64, SECT_4K) },
+	{ "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
+	{ "pm25lv010",   INFO(0,        0, 32 * 1024,    4, SECT_4K_PMC) },
+	{ "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64, SECT_4K) },
 
 	/* Spansion -- single (large) sector size only, at least
 	 * for the chips listed here (without boot sectors).
@@ -940,12 +936,7 @@
 	struct flash_info		*info;
 	unsigned			i;
 	struct mtd_part_parser_data	ppdata;
-	struct device_node __maybe_unused *np = spi->dev.of_node;
-
-#ifdef CONFIG_MTD_OF_PARTS
-	if (!of_device_is_available(np))
-		return -ENODEV;
-#endif
+	struct device_node *np = spi->dev.of_node;
 
 	/* Platform data helps sort out which chip type we have, as
 	 * well as how this board partitions it.  If we don't have
@@ -992,15 +983,13 @@
 		}
 	}
 
-	flash = kzalloc(sizeof *flash, GFP_KERNEL);
+	flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
 	if (!flash)
 		return -ENOMEM;
-	flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0),
-					GFP_KERNEL);
-	if (!flash->command) {
-		kfree(flash);
+
+	flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL);
+	if (!flash->command)
 		return -ENOMEM;
-	}
 
 	flash->spi = spi;
 	mutex_init(&flash->lock);
@@ -1062,13 +1051,14 @@
 	flash->page_size = info->page_size;
 	flash->mtd.writebufsize = flash->page_size;
 
-	flash->fast_read = false;
-	if (np && of_property_read_bool(np, "m25p,fast-read"))
+	if (np)
+		/* If we were instantiated by DT, use it */
+		flash->fast_read = of_property_read_bool(np, "m25p,fast-read");
+	else
+		/* If we weren't instantiated by DT, default to fast-read */
 		flash->fast_read = true;
 
-#ifdef CONFIG_M25PXX_USE_FAST_READ
-	flash->fast_read = true;
-#endif
+	/* Some devices cannot do fast-read, no matter what DT tells us */
 	if (info->flags & M25P_NO_FR)
 		flash->fast_read = false;
 
@@ -1133,15 +1123,9 @@
 static int m25p_remove(struct spi_device *spi)
 {
 	struct m25p	*flash = spi_get_drvdata(spi);
-	int		status;
 
 	/* Clean up MTD stuff. */
-	status = mtd_device_unregister(&flash->mtd);
-	if (status == 0) {
-		kfree(flash->command);
-		kfree(flash);
-	}
-	return 0;
+	return mtd_device_unregister(&flash->mtd);
 }
 
 
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 0e8cbfe..4a47b02 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -88,8 +88,6 @@
 	uint8_t			command[4];
 	char			name[24];
 
-	unsigned		partitioned:1;
-
 	unsigned short		page_offset;	/* offset in flash address */
 	unsigned int		page_size;	/* of bytes per page */
 
@@ -881,7 +879,7 @@
 		break;
 	/* obsolete AT45DB1282 not (yet?) supported */
 	default:
-		pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev),
+		dev_info(&spi->dev, "unsupported device (%x)\n",
 				status & 0x3c);
 		status = -ENODEV;
 	}
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 67823de..e1f2aeb 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -94,7 +94,7 @@
 	}
 }
 
-static int register_device(char *name, unsigned long start, unsigned long len)
+static int register_device(char *name, phys_addr_t start, size_t len)
 {
 	struct phram_mtd_list *new;
 	int ret = -ENOMEM;
@@ -141,35 +141,35 @@
 	return ret;
 }
 
-static int ustrtoul(const char *cp, char **endp, unsigned int base)
+static int parse_num64(uint64_t *num64, char *token)
 {
-	unsigned long result = simple_strtoul(cp, endp, base);
+	size_t len;
+	int shift = 0;
+	int ret;
 
-	switch (**endp) {
-	case 'G':
-		result *= 1024;
-	case 'M':
-		result *= 1024;
-	case 'k':
-		result *= 1024;
+	len = strlen(token);
 	/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
-		if ((*endp)[1] == 'i')
-			(*endp) += 2;
+	if (len > 2) {
+		if (token[len - 1] == 'i') {
+			switch (token[len - 2]) {
+			case 'G':
+				shift += 10;
+			case 'M':
+				shift += 10;
+			case 'k':
+				shift += 10;
+				token[len - 2] = 0;
+				break;
+			default:
+				return -EINVAL;
+			}
+		}
 	}
-	return result;
-}
 
-static int parse_num32(uint32_t *num32, const char *token)
-{
-	char *endp;
-	unsigned long n;
+	ret = kstrtou64(token, 0, num64);
+	*num64 <<= shift;
 
-	n = ustrtoul(token, &endp, 0);
-	if (*endp)
-		return -EINVAL;
-
-	*num32 = n;
-	return 0;
+	return ret;
 }
 
 static int parse_name(char **pname, const char *token)
@@ -209,19 +209,19 @@
  * This shall contain the module parameter if any. It is of the form:
  * - phram=<device>,<address>,<size> for module case
  * - phram.phram=<device>,<address>,<size> for built-in case
- * We leave 64 bytes for the device name, 12 for the address and 12 for the
+ * We leave 64 bytes for the device name, 20 for the address and 20 for the
  * size.
  * Example: phram.phram=rootfs,0xa0000000,512Mi
  */
-static __initdata char phram_paramline[64+12+12];
+static __initdata char phram_paramline[64 + 20 + 20];
 
 static int __init phram_setup(const char *val)
 {
-	char buf[64+12+12], *str = buf;
+	char buf[64 + 20 + 20], *str = buf;
 	char *token[3];
 	char *name;
-	uint32_t start;
-	uint32_t len;
+	uint64_t start;
+	uint64_t len;
 	int i, ret;
 
 	if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -243,13 +243,13 @@
 	if (ret)
 		return ret;
 
-	ret = parse_num32(&start, token[1]);
+	ret = parse_num64(&start, token[1]);
 	if (ret) {
 		kfree(name);
 		parse_err("illegal start address\n");
 	}
 
-	ret = parse_num32(&len, token[2]);
+	ret = parse_num64(&len, token[2]);
 	if (ret) {
 		kfree(name);
 		parse_err("illegal device length\n");
@@ -257,7 +257,7 @@
 
 	ret = register_device(name, start, len);
 	if (!ret)
-		pr_info("%s device: %#x at %#x\n", name, len, start);
+		pr_info("%s device: %#llx at %#llx\n", name, len, start);
 	else
 		kfree(name);
 
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index a42f1f0..687bf27 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -364,7 +364,7 @@
 	if (!flash_info)
 		return -ENODEV;
 
-	flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL);
+	flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
 	if (!flash)
 		return -ENOMEM;
 
@@ -402,11 +402,8 @@
 	ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
 					data ? data->parts : NULL,
 					data ? data->nr_parts : 0);
-	if (ret) {
-		kfree(flash);
-		spi_set_drvdata(spi, NULL);
+	if (ret)
 		return -ENODEV;
-	}
 
 	return 0;
 }
@@ -414,12 +411,8 @@
 static int sst25l_remove(struct spi_device *spi)
 {
 	struct sst25l_flash *flash = spi_get_drvdata(spi);
-	int ret;
 
-	ret = mtd_device_unregister(&flash->mtd);
-	if (ret == 0)
-		kfree(flash);
-	return ret;
+	return mtd_device_unregister(&flash->mtd);
 }
 
 static struct spi_driver sst25l_driver = {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 3af3514..b66b541 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -50,7 +50,7 @@
 	struct INFTLrecord *inftl;
 	unsigned long temp;
 
-	if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+	if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
 		return;
 	/* OK, this is moderately ugly.  But probably safe.  Alternatives? */
 	if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index d3cfe26b..2ef19aa 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -703,7 +703,7 @@
 
 #define DO_XXLOCK_LOCK		1
 #define DO_XXLOCK_UNLOCK	2
-int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
 {
 	int ret = 0;
 	struct map_info *map = mtd->priv;
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index f581ac1..46d195f 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -180,7 +180,6 @@
 {
 	struct vr_nor_mtd *p = pci_get_drvdata(dev);
 
-	pci_set_drvdata(dev, NULL);
 	vr_nor_destroy_partitions(p);
 	vr_nor_destroy_mtd_setup(p);
 	vr_nor_destroy_maps(p);
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index c2604f8..36da518 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -316,7 +316,6 @@
 	map->exit(dev, map);
 	kfree(map);
 
-	pci_set_drvdata(dev, NULL);
 	pci_release_regions(dev);
 }
 
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 6762716..10196f5 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -55,7 +55,7 @@
 
 static inline struct platram_info *to_platram_info(struct platform_device *dev)
 {
-	return (struct platram_info *)platform_get_drvdata(dev);
+	return platform_get_drvdata(dev);
 }
 
 /* platram_setrw
@@ -257,21 +257,7 @@
 	},
 };
 
-/* module init/exit */
-
-static int __init platram_init(void)
-{
-	printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n");
-	return platform_driver_register(&platram_driver);
-}
-
-static void __exit platram_exit(void)
-{
-	platform_driver_unregister(&platram_driver);
-}
-
-module_init(platram_init);
-module_exit(platram_exit);
+module_platform_driver(platram_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index c77b68c..3051c4c 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -212,7 +212,6 @@
 
 	if (!region_fail)
 		release_mem_region(SCB2_ADDR, SCB2_WINDOW);
-	pci_set_drvdata(dev, NULL);
 }
 
 static struct pci_device_id scb2_flash_pci_ids[] = {
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 2aef5dd..485ea75 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -32,6 +32,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/blktrans.h>
 #include <linux/mutex.h>
+#include <linux/major.h>
 
 
 struct mtdblk_dev {
@@ -373,7 +374,7 @@
 
 static struct mtd_blktrans_ops mtdblock_tr = {
 	.name		= "mtdblock",
-	.major		= 31,
+	.major		= MTD_BLOCK_MAJOR,
 	.part_bits	= 0,
 	.blksize 	= 512,
 	.open		= mtdblock_open,
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 92759a9..fb5dc89 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -24,6 +24,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/blktrans.h>
 #include <linux/module.h>
+#include <linux/major.h>
 
 static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
 			      unsigned long block, char *buf)
@@ -70,7 +71,7 @@
 
 static struct mtd_blktrans_ops mtdblock_tr = {
 	.name		= "mtdblock",
-	.major		= 31,
+	.major		= MTD_BLOCK_MAJOR,
 	.part_bits	= 0,
 	.blksize 	= 512,
 	.readsect	= mtdblock_readsect,
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 684bfa3..2147e73 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -32,6 +32,7 @@
 #include <linux/mount.h>
 #include <linux/blkpg.h>
 #include <linux/magic.h>
+#include <linux/major.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/map.h>
@@ -1099,7 +1100,7 @@
 		return (unsigned long) -EINVAL;
 
 	ret = mtd_get_unmapped_area(mtd, len, offset, flags);
-	return ret == -EOPNOTSUPP ? -ENOSYS : ret;
+	return ret == -EOPNOTSUPP ? -ENODEV : ret;
 }
 #endif
 
@@ -1124,9 +1125,9 @@
 #endif
 		return vm_iomap_memory(vma, map->phys, map->size);
 	}
-	return -ENOSYS;
+	return -ENODEV;
 #else
-	return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
+	return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
 #endif
 }
 
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5e14d54..92311a5 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -157,6 +157,9 @@
 	case MTD_UBIVOLUME:
 		type = "ubi";
 		break;
+	case MTD_MLCNANDFLASH:
+		type = "mlc-nand";
+		break;
 	default:
 		type = "unknown";
 	}
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 334da5f..20c02a3 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -17,6 +17,7 @@
 #include <linux/export.h>
 #include <linux/ctype.h>
 #include <linux/slab.h>
+#include <linux/major.h>
 
 /*
  * compare superblocks to see if they're equivalent
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index d885298..93ae6a6 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,43 +96,15 @@
 
 config MTD_NAND_OMAP_BCH
 	depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
-	tristate "Enable support for hardware BCH error correction"
+	tristate "Support hardware based BCH error correction"
 	default n
 	select BCH
-	select BCH_CONST_PARAMS
 	help
-	 Support for hardware BCH error correction.
-
-choice
-	prompt "BCH error correction capability"
-	depends on MTD_NAND_OMAP_BCH
-
-config MTD_NAND_OMAP_BCH8
-	bool "8 bits / 512 bytes (recommended)"
-	help
-	 Support correcting up to 8 bitflips per 512-byte block.
-	 This will use 13 bytes of spare area per 512 bytes of page data.
-	 This is the recommended mode, as 4-bit mode does not work
-	 on some OMAP3 revisions, due to a hardware bug.
-
-config MTD_NAND_OMAP_BCH4
-	bool "4 bits / 512 bytes"
-	help
-	 Support correcting up to 4 bitflips per 512-byte block.
-	 This will use 7 bytes of spare area per 512 bytes of page data.
-	 Note that this mode does not work on some OMAP3 revisions, due to a
-	 hardware bug. Please check your OMAP datasheet before selecting this
-	 mode.
-
-endchoice
-
-if MTD_NAND_OMAP_BCH
-config BCH_CONST_M
-	default 13
-config BCH_CONST_T
-	default 4 if MTD_NAND_OMAP_BCH4
-	default 8 if MTD_NAND_OMAP_BCH8
-endif
+	  This config enables the ELM hardware engine, which can be used to
+	  locate and correct errors when using BCH ECC scheme. This offloads
+	  the cpu from doing ECC error searching and correction. However some
+	  legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
+	  so they should not enable this config symbol.
 
 config MTD_NAND_IDS
 	tristate
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index bd1ce7d..d78a97d 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1062,56 +1062,28 @@
 }
 
 /*
- * Get ECC requirement in ONFI parameters, returns -1 if ONFI
- * parameters is not supported.
- * return 0 if success to get the ECC requirement.
- */
-static int get_onfi_ecc_param(struct nand_chip *chip,
-		int *ecc_bits, int *sector_size)
-{
-	*ecc_bits = *sector_size = 0;
-
-	if (chip->onfi_params.ecc_bits == 0xff)
-		/* TODO: the sector_size and ecc_bits need to be find in
-		 * extended ecc parameter, currently we don't support it.
-		 */
-		return -1;
-
-	*ecc_bits = chip->onfi_params.ecc_bits;
-
-	/* The default sector size (ecc codeword size) is 512 */
-	*sector_size = 512;
-
-	return 0;
-}
-
-/*
- * Get ecc requirement from ONFI parameters ecc requirement.
+ * Get minimum ecc requirements from NAND.
  * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
- * will set them according to ONFI ecc requirement. Otherwise, use the
+ * will set them according to minimum ecc requirement. Otherwise, use the
  * value in DTS file.
  * return 0 if success. otherwise return error code.
  */
 static int pmecc_choose_ecc(struct atmel_nand_host *host,
 		int *cap, int *sector_size)
 {
-	/* Get ECC requirement from ONFI parameters */
-	*cap = *sector_size = 0;
-	if (host->nand_chip.onfi_version) {
-		if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
-			dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
+	/* Get minimum ECC requirements */
+	if (host->nand_chip.ecc_strength_ds) {
+		*cap = host->nand_chip.ecc_strength_ds;
+		*sector_size = host->nand_chip.ecc_step_ds;
+		dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
 				*cap, *sector_size);
-		else
-			dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
 	} else {
-		dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
-	}
-	if (*cap == 0 && *sector_size == 0) {
 		*cap = 2;
 		*sector_size = 512;
+		dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
 	}
 
-	/* If dts file doesn't specify then use the one in ONFI parameters */
+	/* If device tree doesn't specify, use NAND's minimum ECC parameters */
 	if (host->pmecc_corr_cap == 0) {
 		/* use the most fitable ecc bits (the near bigger one ) */
 		if (*cap <= 2)
@@ -1449,7 +1421,6 @@
 		ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
 }
 
-#if defined(CONFIG_OF)
 static int atmel_of_init_port(struct atmel_nand_host *host,
 			      struct device_node *np)
 {
@@ -1457,7 +1428,7 @@
 	u32 offset[2];
 	int ecc_mode;
 	struct atmel_nand_data *board = &host->board;
-	enum of_gpio_flags flags;
+	enum of_gpio_flags flags = 0;
 
 	if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
 		if (val >= 32) {
@@ -1540,13 +1511,6 @@
 
 	return 0;
 }
-#else
-static int atmel_of_init_port(struct atmel_nand_host *host,
-			      struct device_node *np)
-{
-	return -EINVAL;
-}
-#endif
 
 static int atmel_hw_nand_init_params(struct platform_device *pdev,
 					 struct atmel_nand_host *host)
@@ -2019,7 +1983,8 @@
 	mtd = &host->mtd;
 	nand_chip = &host->nand_chip;
 	host->dev = &pdev->dev;
-	if (pdev->dev.of_node) {
+	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+		/* Only when CONFIG_OF is enabled of_node can be parsed */
 		res = atmel_of_init_port(host, pdev->dev.of_node);
 		if (res)
 			goto err_nand_ioremap;
@@ -2177,7 +2142,6 @@
 	if (host->dma_chan)
 		dma_release_channel(host->dma_chan);
 err_nand_ioremap:
-	platform_driver_unregister(&atmel_nand_nfc_driver);
 	return res;
 }
 
@@ -2207,14 +2171,12 @@
 	return 0;
 }
 
-#if defined(CONFIG_OF)
 static const struct of_device_id atmel_nand_dt_ids[] = {
 	{ .compatible = "atmel,at91rm9200-nand" },
 	{ /* sentinel */ }
 };
 
 MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
-#endif
 
 static int atmel_nand_nfc_probe(struct platform_device *pdev)
 {
@@ -2253,12 +2215,11 @@
 	return 0;
 }
 
-#if defined(CONFIG_OF)
-static struct of_device_id atmel_nand_nfc_match[] = {
+static const struct of_device_id atmel_nand_nfc_match[] = {
 	{ .compatible = "atmel,sama5d3-nfc" },
 	{ /* sentinel */ }
 };
-#endif
+MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
 
 static struct platform_driver atmel_nand_nfc_driver = {
 	.driver = {
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
index 7bae569..1074459 100644
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -29,11 +29,9 @@
 	struct bcm47xxnflash *b47n;
 	int err = 0;
 
-	b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
-	if (!b47n) {
-		err = -ENOMEM;
-		goto out;
-	}
+	b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+	if (!b47n)
+		return -ENOMEM;
 
 	b47n->nand_chip.priv = b47n;
 	b47n->mtd.owner = THIS_MODULE;
@@ -48,22 +46,16 @@
 	}
 	if (err) {
 		pr_err("Initialization failed: %d\n", err);
-		goto err_init;
+		return err;
 	}
 
 	err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
 	if (err) {
 		pr_err("Failed to register MTD device: %d\n", err);
-		goto err_dev_reg;
+		return err;
 	}
 
 	return 0;
-
-err_dev_reg:
-err_init:
-	kfree(b47n);
-out:
-	return err;
 }
 
 static int bcm47xxnflash_remove(struct platform_device *pdev)
@@ -85,22 +77,4 @@
 	},
 };
 
-static int __init bcm47xxnflash_init(void)
-{
-	int err;
-
-	err = platform_driver_register(&bcm47xxnflash_driver);
-	if (err)
-		pr_err("Failed to register bcm47xx nand flash driver: %d\n",
-		       err);
-
-	return err;
-}
-
-static void __exit bcm47xxnflash_exit(void)
-{
-	platform_driver_unregister(&bcm47xxnflash_driver);
-}
-
-module_init(bcm47xxnflash_init);
-module_exit(bcm47xxnflash_exit);
+module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 2ed2bb3..370b9dd 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1394,7 +1394,7 @@
 };
 
 /* initialize driver data structures */
-void denali_drv_init(struct denali_nand_info *denali)
+static void denali_drv_init(struct denali_nand_info *denali)
 {
 	denali->idx = 0;
 
@@ -1520,7 +1520,7 @@
 	 * so just let controller do 15bit ECC for MLC and 8bit ECC for
 	 * SLC if possible.
 	 * */
-	if (denali->nand.cellinfo & NAND_CI_CELLTYPE_MSK &&
+	if (!nand_is_slc(&denali->nand) &&
 			(denali->mtd.oobsize > (denali->bbtskipbytes +
 			ECC_15BITS * (denali->mtd.writesize /
 			ECC_SECTOR_SIZE)))) {
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index e3e4662..033f177 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -119,7 +119,6 @@
 	iounmap(denali->flash_mem);
 	pci_release_regions(dev);
 	pci_disable_device(dev);
-	pci_set_drvdata(dev, NULL);
 	kfree(denali);
 }
 
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index eaa3c29..b68a495 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -38,7 +38,7 @@
 #define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
 #endif
 
-static unsigned long __initdata doc_locations[] = {
+static unsigned long doc_locations[] __initdata = {
 #if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
 #ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
 	0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 548db23..1b0265e 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -44,6 +44,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/bch.h>
 #include <linux/bitrev.h>
+#include <linux/jiffies.h>
 
 /*
  * In "reliable mode" consecutive 2k pages are used in parallel (in some
@@ -269,7 +270,7 @@
 	 */
 
 	uint16_t flash_status;
-	unsigned int timeo;
+	unsigned long timeo;
 	void __iomem *docptr = doc->virtadr;
 
 	dev_dbg(doc->dev, "%s...\n", __func__);
@@ -277,22 +278,18 @@
 	/* hardware quirk requires reading twice initially */
 	flash_status = readw(docptr + DOC_FLASHCONTROL);
 
-	timeo = 1000;
+	timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
 	do {
 		cpu_relax();
 		flash_status = readb(docptr + DOC_FLASHCONTROL);
-	} while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
+	} while (!(flash_status & DOC_CTRL_FLASHREADY) &&
+		 time_before(jiffies, timeo));
 
-
-	if (!timeo) {
+	if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
 		dev_err(doc->dev, "%s: timed out!\n", __func__);
 		return NAND_STATUS_FAIL;
 	}
 
-	if (unlikely(timeo < 50))
-		dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
-			 __func__, timeo);
-
 	return 0;
 }
 
@@ -494,7 +491,7 @@
 		return status;
 	}
 
-	dev_warn(doc->dev, "unexpectd call to read_byte()\n");
+	dev_warn(doc->dev, "unexpected call to read_byte()\n");
 
 	return 0;
 }
@@ -1239,7 +1236,6 @@
 	nand->block_markbad = docg4_block_markbad;
 	nand->read_buf = docg4_read_buf;
 	nand->write_buf = docg4_write_buf16;
-	nand->scan_bbt = nand_default_bbt;
 	nand->erase_cmd = docg4_erase_block;
 	nand->ecc.read_page = docg4_read_page;
 	nand->ecc.write_page = docg4_write_page;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index e59c886..c966fc7 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -651,8 +651,6 @@
 	        chip->page_shift);
 	dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
 	        chip->phys_erase_shift);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
-	        chip->ecclayout);
 	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
 	        chip->ecc.mode);
 	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 2730c78..4335577 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -136,6 +136,69 @@
 	.oobfree = { {2, 6}, {136, 82} },
 };
 
+/* 8192-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_8192_ecc4 = {
+	.eccbytes = 128,
+	.eccpos = {
+		8, 9, 10, 11, 12, 13, 14, 15,
+		16, 17, 18, 19, 20, 21, 22, 23,
+		24, 25, 26, 27, 28, 29, 30, 31,
+		32, 33, 34, 35, 36, 37, 38, 39,
+		40, 41, 42, 43, 44, 45, 46, 47,
+		48, 49, 50, 51, 52, 53, 54, 55,
+		56, 57, 58, 59, 60, 61, 62, 63,
+		64, 65, 66, 67, 68, 69, 70, 71,
+		72, 73, 74, 75, 76, 77, 78, 79,
+		80, 81, 82, 83, 84, 85, 86, 87,
+		88, 89, 90, 91, 92, 93, 94, 95,
+		96, 97, 98, 99, 100, 101, 102, 103,
+		104, 105, 106, 107, 108, 109, 110, 111,
+		112, 113, 114, 115, 116, 117, 118, 119,
+		120, 121, 122, 123, 124, 125, 126, 127,
+		128, 129, 130, 131, 132, 133, 134, 135,
+	},
+	.oobfree = { {2, 6}, {136, 208} },
+};
+
+/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_8192_ecc8 = {
+	.eccbytes = 256,
+	.eccpos = {
+		8, 9, 10, 11, 12, 13, 14, 15,
+		16, 17, 18, 19, 20, 21, 22, 23,
+		24, 25, 26, 27, 28, 29, 30, 31,
+		32, 33, 34, 35, 36, 37, 38, 39,
+		40, 41, 42, 43, 44, 45, 46, 47,
+		48, 49, 50, 51, 52, 53, 54, 55,
+		56, 57, 58, 59, 60, 61, 62, 63,
+		64, 65, 66, 67, 68, 69, 70, 71,
+		72, 73, 74, 75, 76, 77, 78, 79,
+		80, 81, 82, 83, 84, 85, 86, 87,
+		88, 89, 90, 91, 92, 93, 94, 95,
+		96, 97, 98, 99, 100, 101, 102, 103,
+		104, 105, 106, 107, 108, 109, 110, 111,
+		112, 113, 114, 115, 116, 117, 118, 119,
+		120, 121, 122, 123, 124, 125, 126, 127,
+		128, 129, 130, 131, 132, 133, 134, 135,
+		136, 137, 138, 139, 140, 141, 142, 143,
+		144, 145, 146, 147, 148, 149, 150, 151,
+		152, 153, 154, 155, 156, 157, 158, 159,
+		160, 161, 162, 163, 164, 165, 166, 167,
+		168, 169, 170, 171, 172, 173, 174, 175,
+		176, 177, 178, 179, 180, 181, 182, 183,
+		184, 185, 186, 187, 188, 189, 190, 191,
+		192, 193, 194, 195, 196, 197, 198, 199,
+		200, 201, 202, 203, 204, 205, 206, 207,
+		208, 209, 210, 211, 212, 213, 214, 215,
+		216, 217, 218, 219, 220, 221, 222, 223,
+		224, 225, 226, 227, 228, 229, 230, 231,
+		232, 233, 234, 235, 236, 237, 238, 239,
+		240, 241, 242, 243, 244, 245, 246, 247,
+		248, 249, 250, 251, 252, 253, 254, 255,
+		256, 257, 258, 259, 260, 261, 262, 263,
+	},
+	.oobfree = { {2, 6}, {264, 80} },
+};
 
 /*
  * Generic flash bbt descriptors
@@ -442,20 +505,29 @@
 		if (mtd->writesize > 512) {
 			nand_fcr0 =
 				(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
-				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
+				(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
 
 			iowrite32be(
-				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
-				(IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
-				&ifc->ifc_nand.nand_fir0);
+				 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+				 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+				 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+				 (IFC_FIR_OP_WBCD  << IFC_NAND_FIR0_OP3_SHIFT) |
+				 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+				 &ifc->ifc_nand.nand_fir0);
+			iowrite32be(
+				 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				 (IFC_FIR_OP_RDSTAT <<
+					IFC_NAND_FIR1_OP6_SHIFT) |
+				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+				 &ifc->ifc_nand.nand_fir1);
 		} else {
 			nand_fcr0 = ((NAND_CMD_PAGEPROG <<
 					IFC_NAND_FCR0_CMD1_SHIFT) |
 				    (NAND_CMD_SEQIN <<
-					IFC_NAND_FCR0_CMD2_SHIFT));
+					IFC_NAND_FCR0_CMD2_SHIFT) |
+				    (NAND_CMD_STATUS <<
+					IFC_NAND_FCR0_CMD3_SHIFT));
 
 			iowrite32be(
 				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -464,8 +536,13 @@
 				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
 				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
 				&ifc->ifc_nand.nand_fir0);
-			iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
-				    &ifc->ifc_nand.nand_fir1);
+			iowrite32be(
+				 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				 (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+				 (IFC_FIR_OP_RDSTAT <<
+					IFC_NAND_FIR1_OP7_SHIFT) |
+				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+				  &ifc->ifc_nand.nand_fir1);
 
 			if (column >= mtd->writesize)
 				nand_fcr0 |=
@@ -719,8 +796,6 @@
 							chip->page_shift);
 	dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
 							chip->phys_erase_shift);
-	dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__,
-							chip->ecclayout);
 	dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
 							chip->ecc.mode);
 	dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
@@ -873,11 +948,25 @@
 		} else {
 			layout = &oob_4096_ecc8;
 			chip->ecc.bytes = 16;
+			chip->ecc.strength = 8;
 		}
 
 		priv->bufnum_mask = 1;
 		break;
 
+	case CSOR_NAND_PGS_8K:
+		if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+		    CSOR_NAND_ECC_MODE_4) {
+			layout = &oob_8192_ecc4;
+		} else {
+			layout = &oob_8192_ecc8;
+			chip->ecc.bytes = 16;
+			chip->ecc.strength = 8;
+		}
+
+		priv->bufnum_mask = 0;
+	break;
+
 	default:
 		dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
 		return -ENODEV;
@@ -908,7 +997,6 @@
 		iounmap(priv->vbase);
 
 	ifc_nand_ctrl->chips[priv->bank] = NULL;
-	dev_set_drvdata(priv->dev, NULL);
 
 	return 0;
 }
@@ -1083,25 +1171,7 @@
 	.remove      = fsl_ifc_nand_remove,
 };
 
-static int __init fsl_ifc_nand_init(void)
-{
-	int ret;
-
-	ret = platform_driver_register(&fsl_ifc_nand_driver);
-	if (ret)
-		printk(KERN_ERR "fsl-ifc: Failed to register platform"
-				"driver\n");
-
-	return ret;
-}
-
-static void __exit fsl_ifc_nand_exit(void)
-{
-	platform_driver_unregister(&fsl_ifc_nand_driver);
-}
-
-module_init(fsl_ifc_nand_init);
-module_exit(fsl_ifc_nand_exit);
+module_platform_driver(fsl_ifc_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 4f8857f..aaced29 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -187,6 +187,12 @@
 	/* Select BCH ECC. */
 	writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 
+	/*
+	 * Decouple the chip select from dma channel. We use dma0 for all
+	 * the chips.
+	 */
+	writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
 	gpmi_disable_clk(this);
 	return 0;
 err_out:
@@ -1073,6 +1079,13 @@
 		mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
 		reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
 	} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+		/*
+		 * In the imx6, all the ready/busy pins are bound
+		 * together. So we only need to check chip 0.
+		 */
+		if (GPMI_IS_MX6Q(this))
+			chip = 0;
+
 		/* MX28 shares the same R/B register as MX6Q. */
 		mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
 		reg = readl(r->gpmi_regs + HW_GPMI_STAT);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index a9830ff..dabbc14 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -45,7 +45,10 @@
 	.pattern	= scan_ff_pattern
 };
 
-/*  We will use all the (page + OOB). */
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
 static struct nand_ecclayout gpmi_hw_ecclayout = {
 	.eccbytes = 0,
 	.eccpos = { 0, },
@@ -354,9 +357,8 @@
 
 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
 {
-	int chipnr = this->current_chip;
-
-	return this->dma_chans[chipnr];
+	/* We use the DMA channel 0 to access all the nand chips. */
+	return this->dma_chans[0];
 }
 
 /* Can we use the upper's buffer directly for DMA? */
@@ -392,8 +394,6 @@
 	struct gpmi_nand_data *this = param;
 	struct completion *dma_c = &this->dma_done;
 
-	complete(dma_c);
-
 	switch (this->dma_type) {
 	case DMA_FOR_COMMAND:
 		dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
@@ -418,6 +418,8 @@
 	default:
 		pr_err("in wrong DMA operation.\n");
 	}
+
+	complete(dma_c);
 }
 
 int start_dma_without_bch_irq(struct gpmi_nand_data *this,
@@ -1263,14 +1265,22 @@
 static int
 gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
 {
-	/*
-	 * The BCH will use all the (page + oob).
-	 * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
-	 * But it can not stop some ioctls such MEMWRITEOOB which uses
-	 * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
-	 * these ioctls too.
-	 */
-	return -EPERM;
+	struct nand_oobfree *of = mtd->ecclayout->oobfree;
+	int status = 0;
+
+	/* Do we have available oob area? */
+	if (!of->length)
+		return -EPERM;
+
+	if (!nand_is_slc(chip))
+		return -EPERM;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page);
+	chip->write_buf(mtd, chip->oob_poi + of->offset, of->length);
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
 }
 
 static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -1568,8 +1578,6 @@
 
 static int gpmi_pre_bbt_scan(struct gpmi_nand_data  *this)
 {
-	int ret;
-
 	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
 	if (GPMI_IS_MX23(this))
 		this->swap_block_mark = false;
@@ -1577,12 +1585,8 @@
 		this->swap_block_mark = true;
 
 	/* Set up the medium geometry */
-	ret = gpmi_set_geometry(this);
-	if (ret)
-		return ret;
+	return gpmi_set_geometry(this);
 
-	/* NAND boot init, depends on the gpmi_set_geometry(). */
-	return nand_boot_init(this);
 }
 
 static void gpmi_nfc_exit(struct gpmi_nand_data *this)
@@ -1664,7 +1668,7 @@
 	if (ret)
 		goto err_out;
 
-	ret = nand_scan_ident(mtd, 1, NULL);
+	ret = nand_scan_ident(mtd, GPMI_IS_MX6Q(this) ? 2 : 1, NULL);
 	if (ret)
 		goto err_out;
 
@@ -1672,10 +1676,16 @@
 	if (ret)
 		goto err_out;
 
+	chip->options |= NAND_SKIP_BBTSCAN;
 	ret = nand_scan_tail(mtd);
 	if (ret)
 		goto err_out;
 
+	ret = nand_boot_init(this);
+	if (ret)
+		goto err_out;
+	chip->scan_bbt(mtd);
+
 	ppdata.of_node = this->pdev->dev.of_node;
 	ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
 	if (ret)
@@ -1691,19 +1701,19 @@
 	{ .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
 	{ .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
 	{ .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
-	{},
+	{}
 };
 
 static const struct of_device_id gpmi_nand_id_table[] = {
 	{
 		.compatible = "fsl,imx23-gpmi-nand",
-		.data = (void *)&gpmi_ids[IS_MX23]
+		.data = (void *)&gpmi_ids[IS_MX23],
 	}, {
 		.compatible = "fsl,imx28-gpmi-nand",
-		.data = (void *)&gpmi_ids[IS_MX28]
+		.data = (void *)&gpmi_ids[IS_MX28],
 	}, {
 		.compatible = "fsl,imx6q-gpmi-nand",
-		.data = (void *)&gpmi_ids[IS_MX6Q]
+		.data = (void *)&gpmi_ids[IS_MX6Q],
 	}, {}
 };
 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
@@ -1722,7 +1732,7 @@
 		return -ENODEV;
 	}
 
-	this = kzalloc(sizeof(*this), GFP_KERNEL);
+	this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
 	if (!this) {
 		pr_err("Failed to allocate per-device memory\n");
 		return -ENOMEM;
@@ -1752,7 +1762,6 @@
 	release_resources(this);
 exit_acquire_resources:
 	dev_err(this->dev, "driver registration failed: %d\n", ret);
-	kfree(this);
 
 	return ret;
 }
@@ -1763,7 +1772,6 @@
 
 	gpmi_nfc_exit(this);
 	release_resources(this);
-	kfree(this);
 	return 0;
 }
 
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
index 53397cc..82114cd 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -108,6 +108,9 @@
 #define HW_GPMI_CTRL1_CLR				0x00000068
 #define HW_GPMI_CTRL1_TOG				0x0000006c
 
+#define BP_GPMI_CTRL1_DECOUPLE_CS			24
+#define BM_GPMI_CTRL1_DECOUPLE_CS	(1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
 #define BP_GPMI_CTRL1_WRN_DLY_SEL			22
 #define BM_GPMI_CTRL1_WRN_DLY_SEL	(0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
 #define BF_GPMI_CTRL1_WRN_DLY_SEL(v)  \
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index f4dd2a8..327d96c 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -905,7 +905,7 @@
 	.driver		= {
 		.name	= DRV_NAME,
 		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(lpc32xx_nand_match),
+		.of_match_table = lpc32xx_nand_match,
 	},
 };
 
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index add7570..23e6974 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -893,7 +893,6 @@
 
 	/* Avoid extra scan if using BBT, setup BBT support */
 	if (host->ncfg->use_bbt) {
-		chip->options |= NAND_SKIP_BBTSCAN;
 		chip->bbt_options |= NAND_BBT_USE_FLASH;
 
 		/*
@@ -915,13 +914,6 @@
 		goto err_exit3;
 	}
 
-	/* Standard layout in FLASH for bad block tables */
-	if (host->ncfg->use_bbt) {
-		if (nand_default_bbt(mtd) < 0)
-			dev_err(&pdev->dev,
-			       "Error initializing default bad block tables\n");
-	}
-
 	mtd->name = "nxp_lpc3220_slc";
 	ppdata.of_node = pdev->dev.of_node;
 	res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
@@ -1023,7 +1015,7 @@
 	.driver		= {
 		.name	= LPC32XX_MODNAME,
 		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(lpc32xx_nand_match),
+		.of_match_table = lpc32xx_nand_match,
 	},
 };
 
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index ce8242b..9dfdb06 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/completion.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_mtd.h>
 
@@ -395,7 +396,7 @@
 
 	if (useirq) {
 		if (!host->devtype_data->check_int(host)) {
-			INIT_COMPLETION(host->op_completion);
+			reinit_completion(&host->op_completion);
 			irq_control(host, 1);
 			wait_for_completion(&host->op_completion);
 		}
@@ -1507,7 +1508,7 @@
 	host->devtype_data->irq_control(host, 0);
 
 	err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
-			IRQF_DISABLED, DRIVER_NAME, host);
+			0, DRIVER_NAME, host);
 	if (err)
 		return err;
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d340b2f..bd39f7b 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2912,12 +2912,13 @@
 	/* get the info we want. */
 	ecc = (struct onfi_ext_ecc_info *)cursor;
 
-	if (ecc->codeword_size) {
-		chip->ecc_strength_ds = ecc->ecc_bits;
-		chip->ecc_step_ds = 1 << ecc->codeword_size;
+	if (!ecc->codeword_size) {
+		pr_debug("Invalid codeword size\n");
+		goto ext_out;
 	}
 
-	pr_info("ONFI extended param page detected.\n");
+	chip->ecc_strength_ds = ecc->ecc_bits;
+	chip->ecc_step_ds = 1 << ecc->codeword_size;
 	ret = 0;
 
 ext_out:
@@ -2935,29 +2936,34 @@
 	int i;
 	int val;
 
-	/* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
-	if (chip->options & NAND_BUSWIDTH_16) {
-		pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
-		return 0;
-	}
 	/* Try ONFI for unknown chip or LP */
 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
 	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
 		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
 		return 0;
 
+	/*
+	 * ONFI must be probed in 8-bit mode or with NAND_BUSWIDTH_AUTO, not
+	 * with NAND_BUSWIDTH_16
+	 */
+	if (chip->options & NAND_BUSWIDTH_16) {
+		pr_err("ONFI cannot be probed in 16-bit mode; aborting\n");
+		return 0;
+	}
+
 	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
 	for (i = 0; i < 3; i++) {
 		chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
 				le16_to_cpu(p->crc)) {
-			pr_info("ONFI param page %d valid\n", i);
 			break;
 		}
 	}
 
-	if (i == 3)
+	if (i == 3) {
+		pr_err("Could not find valid ONFI parameter page; aborting\n");
 		return 0;
+	}
 
 	/* Check version */
 	val = le16_to_cpu(p->revision);
@@ -2981,11 +2987,23 @@
 	sanitize_string(p->model, sizeof(p->model));
 	if (!mtd->name)
 		mtd->name = p->model;
+
 	mtd->writesize = le32_to_cpu(p->byte_per_page);
-	mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
+
+	/*
+	 * pages_per_block and blocks_per_lun may not be a power-of-2 size
+	 * (don't ask me who thought of this...). MTD assumes that these
+	 * dimensions will be power-of-2, so just truncate the remaining area.
+	 */
+	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+	mtd->erasesize *= mtd->writesize;
+
 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-	chip->chipsize = le32_to_cpu(p->blocks_per_lun);
+
+	/* See erasesize comment */
+	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
 	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+	chip->bits_per_cell = p->bits_per_cell;
 
 	if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
 		*busw = NAND_BUSWIDTH_16;
@@ -3009,10 +3027,11 @@
 
 		/* The Extended Parameter Page is supported since ONFI 2.1. */
 		if (nand_flash_detect_ext_param_page(mtd, chip, p))
-			pr_info("Failed to detect the extended param page.\n");
+			pr_warn("Failed to detect ONFI extended param page\n");
+	} else {
+		pr_warn("Could not retrieve ONFI ECC requirements\n");
 	}
 
-	pr_info("ONFI flash detected\n");
 	return 1;
 }
 
@@ -3075,6 +3094,16 @@
 	return arrlen;
 }
 
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+	int bits;
+
+	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+	bits >>= NAND_CI_CELLTYPE_SHIFT;
+	return bits + 1;
+}
+
 /*
  * Many new NAND share similar device ID codes, which represent the size of the
  * chip. The rest of the parameters must be decoded according to generic or
@@ -3085,7 +3114,7 @@
 {
 	int extid, id_len;
 	/* The 3rd id byte holds MLC / multichip data */
-	chip->cellinfo = id_data[2];
+	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
 	/* The 4th id byte is the important one */
 	extid = id_data[3];
 
@@ -3101,8 +3130,7 @@
 	 * ID to decide what to do.
 	 */
 	if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
-			(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
-			id_data[5] != 0x00) {
+			!nand_is_slc(chip) && id_data[5] != 0x00) {
 		/* Calc pagesize */
 		mtd->writesize = 2048 << (extid & 0x03);
 		extid >>= 2;
@@ -3134,7 +3162,7 @@
 			(((extid >> 1) & 0x04) | (extid & 0x03));
 		*busw = 0;
 	} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
-			(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+			!nand_is_slc(chip)) {
 		unsigned int tmp;
 
 		/* Calc pagesize */
@@ -3197,7 +3225,7 @@
 		 * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC
 		 */
 		if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
-				!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+				nand_is_slc(chip) &&
 				(id_data[5] & 0x7) == 0x6 /* 24nm */ &&
 				!(id_data[4] & 0x80) /* !BENAND */) {
 			mtd->oobsize = 32 * mtd->writesize >> 9;
@@ -3222,6 +3250,9 @@
 	mtd->oobsize = mtd->writesize / 32;
 	*busw = type->options & NAND_BUSWIDTH_16;
 
+	/* All legacy ID NAND are small-page, SLC */
+	chip->bits_per_cell = 1;
+
 	/*
 	 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
 	 * some Spansion chips have erasesize that conflicts with size
@@ -3258,11 +3289,11 @@
 	 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
 	 * AMD/Spansion, and Macronix.  All others scan only the first page.
 	 */
-	if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+	if (!nand_is_slc(chip) &&
 			(maf_id == NAND_MFR_SAMSUNG ||
 			 maf_id == NAND_MFR_HYNIX))
 		chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
-	else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+	else if ((nand_is_slc(chip) &&
 				(maf_id == NAND_MFR_SAMSUNG ||
 				 maf_id == NAND_MFR_HYNIX ||
 				 maf_id == NAND_MFR_TOSHIBA ||
@@ -3286,7 +3317,7 @@
 		mtd->erasesize = type->erasesize;
 		mtd->oobsize = type->oobsize;
 
-		chip->cellinfo = id_data[2];
+		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
 		chip->chipsize = (uint64_t)type->chipsize << 20;
 		chip->options |= type->options;
 		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
@@ -3441,11 +3472,13 @@
 	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
 		chip->cmdfunc = nand_command_lp;
 
-	pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
-		" %dMiB, page size: %d, OOB size: %d\n",
+	pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)\n",
 		*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
-		chip->onfi_version ? chip->onfi_params.model : type->name,
-		(int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
+		chip->onfi_version ? chip->onfi_params.model : type->name);
+
+	pr_info("NAND device: %dMiB, %s, page size: %d, OOB size: %d\n",
+		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+		mtd->writesize, mtd->oobsize);
 
 	return type;
 }
@@ -3525,6 +3558,7 @@
 {
 	int i;
 	struct nand_chip *chip = mtd->priv;
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
 
 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
 	BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
@@ -3541,19 +3575,19 @@
 	/*
 	 * If no default placement scheme is given, select an appropriate one.
 	 */
-	if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
+	if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
 		switch (mtd->oobsize) {
 		case 8:
-			chip->ecc.layout = &nand_oob_8;
+			ecc->layout = &nand_oob_8;
 			break;
 		case 16:
-			chip->ecc.layout = &nand_oob_16;
+			ecc->layout = &nand_oob_16;
 			break;
 		case 64:
-			chip->ecc.layout = &nand_oob_64;
+			ecc->layout = &nand_oob_64;
 			break;
 		case 128:
-			chip->ecc.layout = &nand_oob_128;
+			ecc->layout = &nand_oob_128;
 			break;
 		default:
 			pr_warn("No oob scheme defined for oobsize %d\n",
@@ -3570,64 +3604,62 @@
 	 * selected and we have 256 byte pagesize fallback to software ECC
 	 */
 
-	switch (chip->ecc.mode) {
+	switch (ecc->mode) {
 	case NAND_ECC_HW_OOB_FIRST:
 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
-		if (!chip->ecc.calculate || !chip->ecc.correct ||
-		     !chip->ecc.hwctl) {
+		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
 			pr_warn("No ECC functions supplied; "
 				   "hardware ECC not possible\n");
 			BUG();
 		}
-		if (!chip->ecc.read_page)
-			chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_hwecc_oob_first;
 
 	case NAND_ECC_HW:
 		/* Use standard hwecc read page function? */
-		if (!chip->ecc.read_page)
-			chip->ecc.read_page = nand_read_page_hwecc;
-		if (!chip->ecc.write_page)
-			chip->ecc.write_page = nand_write_page_hwecc;
-		if (!chip->ecc.read_page_raw)
-			chip->ecc.read_page_raw = nand_read_page_raw;
-		if (!chip->ecc.write_page_raw)
-			chip->ecc.write_page_raw = nand_write_page_raw;
-		if (!chip->ecc.read_oob)
-			chip->ecc.read_oob = nand_read_oob_std;
-		if (!chip->ecc.write_oob)
-			chip->ecc.write_oob = nand_write_oob_std;
-		if (!chip->ecc.read_subpage)
-			chip->ecc.read_subpage = nand_read_subpage;
-		if (!chip->ecc.write_subpage)
-			chip->ecc.write_subpage = nand_write_subpage_hwecc;
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_hwecc;
+		if (!ecc->write_page)
+			ecc->write_page = nand_write_page_hwecc;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw;
+		if (!ecc->read_oob)
+			ecc->read_oob = nand_read_oob_std;
+		if (!ecc->write_oob)
+			ecc->write_oob = nand_write_oob_std;
+		if (!ecc->read_subpage)
+			ecc->read_subpage = nand_read_subpage;
+		if (!ecc->write_subpage)
+			ecc->write_subpage = nand_write_subpage_hwecc;
 
 	case NAND_ECC_HW_SYNDROME:
-		if ((!chip->ecc.calculate || !chip->ecc.correct ||
-		     !chip->ecc.hwctl) &&
-		    (!chip->ecc.read_page ||
-		     chip->ecc.read_page == nand_read_page_hwecc ||
-		     !chip->ecc.write_page ||
-		     chip->ecc.write_page == nand_write_page_hwecc)) {
+		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+		    (!ecc->read_page ||
+		     ecc->read_page == nand_read_page_hwecc ||
+		     !ecc->write_page ||
+		     ecc->write_page == nand_write_page_hwecc)) {
 			pr_warn("No ECC functions supplied; "
 				   "hardware ECC not possible\n");
 			BUG();
 		}
 		/* Use standard syndrome read/write page function? */
-		if (!chip->ecc.read_page)
-			chip->ecc.read_page = nand_read_page_syndrome;
-		if (!chip->ecc.write_page)
-			chip->ecc.write_page = nand_write_page_syndrome;
-		if (!chip->ecc.read_page_raw)
-			chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
-		if (!chip->ecc.write_page_raw)
-			chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
-		if (!chip->ecc.read_oob)
-			chip->ecc.read_oob = nand_read_oob_syndrome;
-		if (!chip->ecc.write_oob)
-			chip->ecc.write_oob = nand_write_oob_syndrome;
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_syndrome;
+		if (!ecc->write_page)
+			ecc->write_page = nand_write_page_syndrome;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw_syndrome;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw_syndrome;
+		if (!ecc->read_oob)
+			ecc->read_oob = nand_read_oob_syndrome;
+		if (!ecc->write_oob)
+			ecc->write_oob = nand_write_oob_syndrome;
 
-		if (mtd->writesize >= chip->ecc.size) {
-			if (!chip->ecc.strength) {
+		if (mtd->writesize >= ecc->size) {
+			if (!ecc->strength) {
 				pr_warn("Driver must set ecc.strength when using hardware ECC\n");
 				BUG();
 			}
@@ -3635,23 +3667,23 @@
 		}
 		pr_warn("%d byte HW ECC not possible on "
 			   "%d byte page size, fallback to SW ECC\n",
-			   chip->ecc.size, mtd->writesize);
-		chip->ecc.mode = NAND_ECC_SOFT;
+			   ecc->size, mtd->writesize);
+		ecc->mode = NAND_ECC_SOFT;
 
 	case NAND_ECC_SOFT:
-		chip->ecc.calculate = nand_calculate_ecc;
-		chip->ecc.correct = nand_correct_data;
-		chip->ecc.read_page = nand_read_page_swecc;
-		chip->ecc.read_subpage = nand_read_subpage;
-		chip->ecc.write_page = nand_write_page_swecc;
-		chip->ecc.read_page_raw = nand_read_page_raw;
-		chip->ecc.write_page_raw = nand_write_page_raw;
-		chip->ecc.read_oob = nand_read_oob_std;
-		chip->ecc.write_oob = nand_write_oob_std;
-		if (!chip->ecc.size)
-			chip->ecc.size = 256;
-		chip->ecc.bytes = 3;
-		chip->ecc.strength = 1;
+		ecc->calculate = nand_calculate_ecc;
+		ecc->correct = nand_correct_data;
+		ecc->read_page = nand_read_page_swecc;
+		ecc->read_subpage = nand_read_subpage;
+		ecc->write_page = nand_write_page_swecc;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->write_oob = nand_write_oob_std;
+		if (!ecc->size)
+			ecc->size = 256;
+		ecc->bytes = 3;
+		ecc->strength = 1;
 		break;
 
 	case NAND_ECC_SOFT_BCH:
@@ -3659,88 +3691,83 @@
 			pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
 			BUG();
 		}
-		chip->ecc.calculate = nand_bch_calculate_ecc;
-		chip->ecc.correct = nand_bch_correct_data;
-		chip->ecc.read_page = nand_read_page_swecc;
-		chip->ecc.read_subpage = nand_read_subpage;
-		chip->ecc.write_page = nand_write_page_swecc;
-		chip->ecc.read_page_raw = nand_read_page_raw;
-		chip->ecc.write_page_raw = nand_write_page_raw;
-		chip->ecc.read_oob = nand_read_oob_std;
-		chip->ecc.write_oob = nand_write_oob_std;
+		ecc->calculate = nand_bch_calculate_ecc;
+		ecc->correct = nand_bch_correct_data;
+		ecc->read_page = nand_read_page_swecc;
+		ecc->read_subpage = nand_read_subpage;
+		ecc->write_page = nand_write_page_swecc;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->write_oob = nand_write_oob_std;
 		/*
 		 * Board driver should supply ecc.size and ecc.bytes values to
 		 * select how many bits are correctable; see nand_bch_init()
 		 * for details. Otherwise, default to 4 bits for large page
 		 * devices.
 		 */
-		if (!chip->ecc.size && (mtd->oobsize >= 64)) {
-			chip->ecc.size = 512;
-			chip->ecc.bytes = 7;
+		if (!ecc->size && (mtd->oobsize >= 64)) {
+			ecc->size = 512;
+			ecc->bytes = 7;
 		}
-		chip->ecc.priv = nand_bch_init(mtd,
-					       chip->ecc.size,
-					       chip->ecc.bytes,
-					       &chip->ecc.layout);
-		if (!chip->ecc.priv) {
+		ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
+					       &ecc->layout);
+		if (!ecc->priv) {
 			pr_warn("BCH ECC initialization failed!\n");
 			BUG();
 		}
-		chip->ecc.strength =
-			chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
+		ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);
 		break;
 
 	case NAND_ECC_NONE:
 		pr_warn("NAND_ECC_NONE selected by board driver. "
 			   "This is not recommended!\n");
-		chip->ecc.read_page = nand_read_page_raw;
-		chip->ecc.write_page = nand_write_page_raw;
-		chip->ecc.read_oob = nand_read_oob_std;
-		chip->ecc.read_page_raw = nand_read_page_raw;
-		chip->ecc.write_page_raw = nand_write_page_raw;
-		chip->ecc.write_oob = nand_write_oob_std;
-		chip->ecc.size = mtd->writesize;
-		chip->ecc.bytes = 0;
-		chip->ecc.strength = 0;
+		ecc->read_page = nand_read_page_raw;
+		ecc->write_page = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->write_oob = nand_write_oob_std;
+		ecc->size = mtd->writesize;
+		ecc->bytes = 0;
+		ecc->strength = 0;
 		break;
 
 	default:
-		pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
+		pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
 		BUG();
 	}
 
 	/* For many systems, the standard OOB write also works for raw */
-	if (!chip->ecc.read_oob_raw)
-		chip->ecc.read_oob_raw = chip->ecc.read_oob;
-	if (!chip->ecc.write_oob_raw)
-		chip->ecc.write_oob_raw = chip->ecc.write_oob;
+	if (!ecc->read_oob_raw)
+		ecc->read_oob_raw = ecc->read_oob;
+	if (!ecc->write_oob_raw)
+		ecc->write_oob_raw = ecc->write_oob;
 
 	/*
 	 * The number of bytes available for a client to place data into
 	 * the out of band area.
 	 */
-	chip->ecc.layout->oobavail = 0;
-	for (i = 0; chip->ecc.layout->oobfree[i].length
-			&& i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
-		chip->ecc.layout->oobavail +=
-			chip->ecc.layout->oobfree[i].length;
-	mtd->oobavail = chip->ecc.layout->oobavail;
+	ecc->layout->oobavail = 0;
+	for (i = 0; ecc->layout->oobfree[i].length
+			&& i < ARRAY_SIZE(ecc->layout->oobfree); i++)
+		ecc->layout->oobavail += ecc->layout->oobfree[i].length;
+	mtd->oobavail = ecc->layout->oobavail;
 
 	/*
 	 * Set the number of read / write steps for one page depending on ECC
 	 * mode.
 	 */
-	chip->ecc.steps = mtd->writesize / chip->ecc.size;
-	if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
+	ecc->steps = mtd->writesize / ecc->size;
+	if (ecc->steps * ecc->size != mtd->writesize) {
 		pr_warn("Invalid ECC parameters\n");
 		BUG();
 	}
-	chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+	ecc->total = ecc->steps * ecc->bytes;
 
 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
-	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
-	    !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
-		switch (chip->ecc.steps) {
+	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+		switch (ecc->steps) {
 		case 2:
 			mtd->subpage_sft = 1;
 			break;
@@ -3760,11 +3787,11 @@
 	chip->pagebuf = -1;
 
 	/* Large page NAND with SOFT_ECC should support subpage reads */
-	if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
+	if ((ecc->mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
 		chip->options |= NAND_SUBPAGE_READ;
 
 	/* Fill in remaining MTD driver data */
-	mtd->type = MTD_NANDFLASH;
+	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
 	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
 						MTD_CAP_NANDFLASH;
 	mtd->_erase = nand_erase;
@@ -3785,9 +3812,9 @@
 	mtd->writebufsize = mtd->writesize;
 
 	/* propagate ecc info to mtd_info */
-	mtd->ecclayout = chip->ecc.layout;
-	mtd->ecc_strength = chip->ecc.strength;
-	mtd->ecc_step_size = chip->ecc.size;
+	mtd->ecclayout = ecc->layout;
+	mtd->ecc_strength = ecc->strength;
+	mtd->ecc_step_size = ecc->size;
 	/*
 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index bc06196..c0615d1 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -412,25 +412,6 @@
 	}
 }
 
-/* Scan a given block full */
-static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
-			   loff_t offs, uint8_t *buf, size_t readlen,
-			   int scanlen, int numpages)
-{
-	int ret, j;
-
-	ret = scan_read_oob(mtd, buf, offs, readlen);
-	/* Ignore ECC errors when checking for BBM */
-	if (ret && !mtd_is_bitflip_or_eccerr(ret))
-		return ret;
-
-	for (j = 0; j < numpages; j++, buf += scanlen) {
-		if (check_pattern(buf, scanlen, mtd->writesize, bd))
-			return 1;
-	}
-	return 0;
-}
-
 /* Scan a given block partially */
 static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
 			   loff_t offs, uint8_t *buf, int numpages)
@@ -477,24 +458,17 @@
 	struct nand_bbt_descr *bd, int chip)
 {
 	struct nand_chip *this = mtd->priv;
-	int i, numblocks, numpages, scanlen;
+	int i, numblocks, numpages;
 	int startblock;
 	loff_t from;
-	size_t readlen;
 
 	pr_info("Scanning device for bad blocks\n");
 
-	if (bd->options & NAND_BBT_SCANALLPAGES)
-		numpages = 1 << (this->bbt_erase_shift - this->page_shift);
-	else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+	if (bd->options & NAND_BBT_SCAN2NDPAGE)
 		numpages = 2;
 	else
 		numpages = 1;
 
-	/* We need only read few bytes from the OOB area */
-	scanlen = 0;
-	readlen = bd->len;
-
 	if (chip == -1) {
 		numblocks = mtd->size >> this->bbt_erase_shift;
 		startblock = 0;
@@ -519,12 +493,7 @@
 
 		BUG_ON(bd->options & NAND_BBT_NO_OOB);
 
-		if (bd->options & NAND_BBT_SCANALLPAGES)
-			ret = scan_block_full(mtd, bd, from, buf, readlen,
-					      scanlen, numpages);
-		else
-			ret = scan_block_fast(mtd, bd, from, buf, numpages);
-
+		ret = scan_block_fast(mtd, bd, from, buf, numpages);
 		if (ret < 0)
 			return ret;
 
@@ -1392,4 +1361,3 @@
 }
 
 EXPORT_SYMBOL(nand_scan_bbt);
-EXPORT_SYMBOL(nand_default_bbt);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index bdc1d15..42e8a77 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -575,7 +575,7 @@
 		cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
 		if (IS_ERR(cfile))
 			return PTR_ERR(cfile);
-		if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+		if (!cfile->f_op->read && !cfile->f_op->aio_read) {
 			NS_ERR("alloc_device: cache file not readable\n");
 			err = -EINVAL;
 			goto err_close;
@@ -2372,7 +2372,7 @@
 	if ((retval = init_nandsim(nsmtd)) != 0)
 		goto err_exit;
 
-	if ((retval = nand_default_bbt(nsmtd)) != 0)
+	if ((retval = chip->scan_bbt(nsmtd)) != 0)
 		goto err_exit;
 
 	if ((retval = parse_badblocks(nand, nsmtd)) != 0)
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 4ecf0e5f..f777250 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -25,10 +25,8 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
-#include <linux/bch.h>
+#include <linux/mtd/nand_bch.h>
 #include <linux/platform_data/elm.h>
-#endif
 
 #include <linux/platform_data/mtd-nand-omap2.h>
 
@@ -141,6 +139,8 @@
 #define BCH_ECC_SIZE0		0x0	/* ecc_size0 = 0, no oob protection */
 #define BCH_ECC_SIZE1		0x20	/* ecc_size1 = 32 */
 
+#define BADBLOCK_MARKER_LENGTH		2
+
 #ifdef CONFIG_MTD_NAND_OMAP_BCH
 static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
 	0xac, 0x6b, 0xff, 0x99, 0x7b};
@@ -149,17 +149,6 @@
 
 /* oob info generated runtime depending on ecc algorithm and layout selected */
 static struct nand_ecclayout omap_oobinfo;
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks
- */
-static uint8_t scan_ff_pattern[] = { 0xff };
-static struct nand_bbt_descr bb_descrip_flashbased = {
-	.options = NAND_BBT_SCANALLPAGES,
-	.offs = 0,
-	.len = 1,
-	.pattern = scan_ff_pattern,
-};
-
 
 struct omap_nand_info {
 	struct nand_hw_control		controller;
@@ -182,14 +171,10 @@
 	u_char				*buf;
 	int					buf_len;
 	struct gpmc_nand_regs		reg;
-
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
-	struct bch_control             *bch;
-	struct nand_ecclayout           ecclayout;
+	/* fields specific for BCHx_HW ECC scheme */
 	bool				is_elm_used;
 	struct device			*elm_dev;
 	struct device_node		*of_node;
-#endif
 };
 
 /**
@@ -1058,8 +1043,7 @@
 	}
 }
 
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
-
+#if defined(CONFIG_MTD_NAND_ECC_BCH) || defined(CONFIG_MTD_NAND_OMAP_BCH)
 /**
  * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
  * @mtd: MTD device structure
@@ -1140,7 +1124,9 @@
 	/* Clear ecc and enable bits */
 	writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
 }
+#endif
 
+#ifdef CONFIG_MTD_NAND_ECC_BCH
 /**
  * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
  * @mtd: MTD device structure
@@ -1225,7 +1211,9 @@
 
 	return 0;
 }
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
 
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
 /**
  * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
  * @mtd:	MTD device structure
@@ -1519,38 +1507,6 @@
 }
 
 /**
- * omap3_correct_data_bch - Decode received data and correct errors
- * @mtd: MTD device structure
- * @data: page data
- * @read_ecc: ecc read from nand flash
- * @calc_ecc: ecc read from HW ECC registers
- */
-static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
-				  u_char *read_ecc, u_char *calc_ecc)
-{
-	int i, count;
-	/* cannot correct more than 8 errors */
-	unsigned int errloc[8];
-	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
-						   mtd);
-
-	count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
-			   errloc);
-	if (count > 0) {
-		/* correct errors */
-		for (i = 0; i < count; i++) {
-			/* correct data only, not ecc bytes */
-			if (errloc[i] < 8*512)
-				data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
-			pr_debug("corrected bitflip %u\n", errloc[i]);
-		}
-	} else if (count < 0) {
-		pr_err("ecc unrecoverable error\n");
-	}
-	return count;
-}
-
-/**
  * omap_write_page_bch - BCH ecc based write page function for entire page
  * @mtd:		mtd info structure
  * @chip:		nand chip info structure
@@ -1637,197 +1593,46 @@
 }
 
 /**
- * omap3_free_bch - Release BCH ecc resources
- * @mtd: MTD device structure
+ * is_elm_present - checks for presence of ELM module by scanning DT nodes
+ * @omap_nand_info: NAND device structure containing platform data
+ * @bch_type: 0x0=BCH4, 0x1=BCH8, 0x2=BCH16
  */
-static void omap3_free_bch(struct mtd_info *mtd)
+static int is_elm_present(struct omap_nand_info *info,
+			struct device_node *elm_node, enum bch_ecc bch_type)
 {
-	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
-						   mtd);
-	if (info->bch) {
-		free_bch(info->bch);
-		info->bch = NULL;
+	struct platform_device *pdev;
+	info->is_elm_used = false;
+	/* check whether elm-id is passed via DT */
+	if (!elm_node) {
+		pr_err("nand: error: ELM DT node not found\n");
+		return -ENODEV;
 	}
-}
-
-/**
- * omap3_init_bch - Initialize BCH ECC
- * @mtd: MTD device structure
- * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
- */
-static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
-{
-	int max_errors;
-	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
-						   mtd);
-#ifdef CONFIG_MTD_NAND_OMAP_BCH8
-	const int hw_errors = BCH8_MAX_ERROR;
-#else
-	const int hw_errors = BCH4_MAX_ERROR;
-#endif
-	enum bch_ecc bch_type;
-	const __be32 *parp;
-	int lenp;
-	struct device_node *elm_node;
-
-	info->bch = NULL;
-
-	max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
-		BCH8_MAX_ERROR : BCH4_MAX_ERROR;
-	if (max_errors != hw_errors) {
-		pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
-		       max_errors, hw_errors);
-		goto fail;
+	pdev = of_find_device_by_node(elm_node);
+	/* check whether ELM device is registered */
+	if (!pdev) {
+		pr_err("nand: error: ELM device not found\n");
+		return -ENODEV;
 	}
-
-	info->nand.ecc.size = 512;
-	info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
-	info->nand.ecc.mode = NAND_ECC_HW;
-	info->nand.ecc.strength = max_errors;
-
-	if (hw_errors == BCH8_MAX_ERROR)
-		bch_type = BCH8_ECC;
-	else
-		bch_type = BCH4_ECC;
-
-	/* Detect availability of ELM module */
-	parp = of_get_property(info->of_node, "elm_id", &lenp);
-	if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
-		pr_err("Missing elm_id property, fall back to Software BCH\n");
-		info->is_elm_used = false;
-	} else {
-		struct platform_device *pdev;
-
-		elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
-		pdev = of_find_device_by_node(elm_node);
-		info->elm_dev = &pdev->dev;
-
-		if (elm_config(info->elm_dev, bch_type) == 0)
-			info->is_elm_used = true;
-	}
-
-	if (info->is_elm_used && (mtd->writesize <= 4096)) {
-
-		if (hw_errors == BCH8_MAX_ERROR)
-			info->nand.ecc.bytes = BCH8_SIZE;
-		else
-			info->nand.ecc.bytes = BCH4_SIZE;
-
-		info->nand.ecc.correct = omap_elm_correct_data;
-		info->nand.ecc.calculate = omap3_calculate_ecc_bch;
-		info->nand.ecc.read_page = omap_read_page_bch;
-		info->nand.ecc.write_page = omap_write_page_bch;
-	} else {
-		/*
-		 * software bch library is only used to detect and
-		 * locate errors
-		 */
-		info->bch = init_bch(13, max_errors,
-				0x201b /* hw polynomial */);
-		if (!info->bch)
-			goto fail;
-
-		info->nand.ecc.correct = omap3_correct_data_bch;
-
-		/*
-		 * The number of corrected errors in an ecc block that will
-		 * trigger block scrubbing defaults to the ecc strength (4 or 8)
-		 * Set mtd->bitflip_threshold here to define a custom threshold.
-		 */
-
-		if (max_errors == 8) {
-			info->nand.ecc.bytes = 13;
-			info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
-		} else {
-			info->nand.ecc.bytes = 7;
-			info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
-		}
-	}
-
-	pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
+	/* ELM module available, now configure it */
+	info->elm_dev = &pdev->dev;
+	if (elm_config(info->elm_dev, bch_type))
+		return -ENODEV;
+	info->is_elm_used = true;
 	return 0;
-fail:
-	omap3_free_bch(mtd);
-	return -1;
 }
-
-/**
- * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
- * @mtd: MTD device structure
- */
-static int omap3_init_bch_tail(struct mtd_info *mtd)
-{
-	int i, steps, offset;
-	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
-						   mtd);
-	struct nand_ecclayout *layout = &info->ecclayout;
-
-	/* build oob layout */
-	steps = mtd->writesize/info->nand.ecc.size;
-	layout->eccbytes = steps*info->nand.ecc.bytes;
-
-	/* do not bother creating special oob layouts for small page devices */
-	if (mtd->oobsize < 64) {
-		pr_err("BCH ecc is not supported on small page devices\n");
-		goto fail;
-	}
-
-	/* reserve 2 bytes for bad block marker */
-	if (layout->eccbytes+2 > mtd->oobsize) {
-		pr_err("no oob layout available for oobsize %d eccbytes %u\n",
-		       mtd->oobsize, layout->eccbytes);
-		goto fail;
-	}
-
-	/* ECC layout compatible with RBL for BCH8 */
-	if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
-		offset = 2;
-	else
-		offset = mtd->oobsize - layout->eccbytes;
-
-	/* put ecc bytes at oob tail */
-	for (i = 0; i < layout->eccbytes; i++)
-		layout->eccpos[i] = offset + i;
-
-	if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
-		layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
-	else
-		layout->oobfree[0].offset = 2;
-
-	layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
-	info->nand.ecc.layout = layout;
-
-	if (!(info->nand.options & NAND_BUSWIDTH_16))
-		info->nand.badblock_pattern = &bb_descrip_flashbased;
-	return 0;
-fail:
-	omap3_free_bch(mtd);
-	return -1;
-}
-
-#else
-static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
-{
-	pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
-	return -1;
-}
-static int omap3_init_bch_tail(struct mtd_info *mtd)
-{
-	return -1;
-}
-static void omap3_free_bch(struct mtd_info *mtd)
-{
-}
-#endif /* CONFIG_MTD_NAND_OMAP_BCH */
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
 
 static int omap_nand_probe(struct platform_device *pdev)
 {
 	struct omap_nand_info		*info;
 	struct omap_nand_platform_data	*pdata;
+	struct mtd_info			*mtd;
+	struct nand_chip		*nand_chip;
+	struct nand_ecclayout		*ecclayout;
 	int				err;
-	int				i, offset;
-	dma_cap_mask_t mask;
-	unsigned sig;
+	int				i;
+	dma_cap_mask_t			mask;
+	unsigned			sig;
 	struct resource			*res;
 	struct mtd_part_parser_data	ppdata = {};
 
@@ -1837,7 +1642,8 @@
 		return -ENODEV;
 	}
 
-	info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+	info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+				GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
@@ -1846,47 +1652,45 @@
 	spin_lock_init(&info->controller.lock);
 	init_waitqueue_head(&info->controller.wq);
 
-	info->pdev = pdev;
-
+	info->pdev		= pdev;
 	info->gpmc_cs		= pdata->cs;
 	info->reg		= pdata->reg;
-
-	info->mtd.priv		= &info->nand;
-	info->mtd.name		= dev_name(&pdev->dev);
-	info->mtd.owner		= THIS_MODULE;
-
-	info->nand.options	= pdata->devsize;
-	info->nand.options	|= NAND_SKIP_BBTSCAN;
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
 	info->of_node		= pdata->of_node;
-#endif
+	mtd			= &info->mtd;
+	mtd->priv		= &info->nand;
+	mtd->name		= dev_name(&pdev->dev);
+	mtd->owner		= THIS_MODULE;
+	nand_chip		= &info->nand;
+	nand_chip->ecc.priv	= NULL;
+	nand_chip->options	|= NAND_SKIP_BBTSCAN;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res == NULL) {
 		err = -EINVAL;
 		dev_err(&pdev->dev, "error getting memory resource\n");
-		goto out_free_info;
+		goto return_error;
 	}
 
 	info->phys_base = res->start;
 	info->mem_size = resource_size(res);
 
-	if (!request_mem_region(info->phys_base, info->mem_size,
-				pdev->dev.driver->name)) {
+	if (!devm_request_mem_region(&pdev->dev, info->phys_base,
+				info->mem_size,	pdev->dev.driver->name)) {
 		err = -EBUSY;
-		goto out_free_info;
+		goto return_error;
 	}
 
-	info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
-	if (!info->nand.IO_ADDR_R) {
+	nand_chip->IO_ADDR_R = devm_ioremap(&pdev->dev, info->phys_base,
+						info->mem_size);
+	if (!nand_chip->IO_ADDR_R) {
 		err = -ENOMEM;
-		goto out_release_mem_region;
+		goto return_error;
 	}
 
-	info->nand.controller = &info->controller;
+	nand_chip->controller = &info->controller;
 
-	info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
-	info->nand.cmd_ctrl  = omap_hwcontrol;
+	nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
+	nand_chip->cmd_ctrl  = omap_hwcontrol;
 
 	/*
 	 * If RDY/BSY line is connected to OMAP then use the omap ready
@@ -1896,26 +1700,42 @@
 	 * device and read status register until you get a failure or success
 	 */
 	if (pdata->dev_ready) {
-		info->nand.dev_ready = omap_dev_ready;
-		info->nand.chip_delay = 0;
+		nand_chip->dev_ready = omap_dev_ready;
+		nand_chip->chip_delay = 0;
 	} else {
-		info->nand.waitfunc = omap_wait;
-		info->nand.chip_delay = 50;
+		nand_chip->waitfunc = omap_wait;
+		nand_chip->chip_delay = 50;
 	}
 
+	/* scan NAND device connected to chip controller */
+	nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16;
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		pr_err("nand device scan failed, may be bus-width mismatch\n");
+		err = -ENXIO;
+		goto return_error;
+	}
+
+	/* check for small page devices */
+	if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) {
+		pr_err("small page devices are not supported\n");
+		err = -EINVAL;
+		goto return_error;
+	}
+
+	/* re-populate low-level callbacks based on xfer modes */
 	switch (pdata->xfer_type) {
 	case NAND_OMAP_PREFETCH_POLLED:
-		info->nand.read_buf   = omap_read_buf_pref;
-		info->nand.write_buf  = omap_write_buf_pref;
+		nand_chip->read_buf   = omap_read_buf_pref;
+		nand_chip->write_buf  = omap_write_buf_pref;
 		break;
 
 	case NAND_OMAP_POLLED:
-		if (info->nand.options & NAND_BUSWIDTH_16) {
-			info->nand.read_buf   = omap_read_buf16;
-			info->nand.write_buf  = omap_write_buf16;
+		if (nand_chip->options & NAND_BUSWIDTH_16) {
+			nand_chip->read_buf   = omap_read_buf16;
+			nand_chip->write_buf  = omap_write_buf16;
 		} else {
-			info->nand.read_buf   = omap_read_buf8;
-			info->nand.write_buf  = omap_write_buf8;
+			nand_chip->read_buf   = omap_read_buf8;
+			nand_chip->write_buf  = omap_write_buf8;
 		}
 		break;
 
@@ -1927,7 +1747,7 @@
 		if (!info->dma) {
 			dev_err(&pdev->dev, "DMA engine request failed\n");
 			err = -ENXIO;
-			goto out_release_mem_region;
+			goto return_error;
 		} else {
 			struct dma_slave_config cfg;
 
@@ -1942,10 +1762,10 @@
 			if (err) {
 				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
 					err);
-				goto out_release_mem_region;
+				goto return_error;
 			}
-			info->nand.read_buf   = omap_read_buf_dma_pref;
-			info->nand.write_buf  = omap_write_buf_dma_pref;
+			nand_chip->read_buf   = omap_read_buf_dma_pref;
+			nand_chip->write_buf  = omap_write_buf_dma_pref;
 		}
 		break;
 
@@ -1954,34 +1774,36 @@
 		if (info->gpmc_irq_fifo <= 0) {
 			dev_err(&pdev->dev, "error getting fifo irq\n");
 			err = -ENODEV;
-			goto out_release_mem_region;
+			goto return_error;
 		}
-		err = request_irq(info->gpmc_irq_fifo,	omap_nand_irq,
-					IRQF_SHARED, "gpmc-nand-fifo", info);
+		err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
+					omap_nand_irq, IRQF_SHARED,
+					"gpmc-nand-fifo", info);
 		if (err) {
 			dev_err(&pdev->dev, "requesting irq(%d) error:%d",
 						info->gpmc_irq_fifo, err);
 			info->gpmc_irq_fifo = 0;
-			goto out_release_mem_region;
+			goto return_error;
 		}
 
 		info->gpmc_irq_count = platform_get_irq(pdev, 1);
 		if (info->gpmc_irq_count <= 0) {
 			dev_err(&pdev->dev, "error getting count irq\n");
 			err = -ENODEV;
-			goto out_release_mem_region;
+			goto return_error;
 		}
-		err = request_irq(info->gpmc_irq_count,	omap_nand_irq,
-					IRQF_SHARED, "gpmc-nand-count", info);
+		err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
+					omap_nand_irq, IRQF_SHARED,
+					"gpmc-nand-count", info);
 		if (err) {
 			dev_err(&pdev->dev, "requesting irq(%d) error:%d",
 						info->gpmc_irq_count, err);
 			info->gpmc_irq_count = 0;
-			goto out_release_mem_region;
+			goto return_error;
 		}
 
-		info->nand.read_buf  = omap_read_buf_irq_pref;
-		info->nand.write_buf = omap_write_buf_irq_pref;
+		nand_chip->read_buf  = omap_read_buf_irq_pref;
+		nand_chip->write_buf = omap_write_buf_irq_pref;
 
 		break;
 
@@ -1989,117 +1811,223 @@
 		dev_err(&pdev->dev,
 			"xfer_type(%d) not supported!\n", pdata->xfer_type);
 		err = -EINVAL;
-		goto out_release_mem_region;
+		goto return_error;
 	}
 
-	/* select the ecc type */
-	if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
-		info->nand.ecc.mode = NAND_ECC_SOFT;
-	else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
-		(pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
-		info->nand.ecc.bytes            = 3;
-		info->nand.ecc.size             = 512;
-		info->nand.ecc.strength         = 1;
-		info->nand.ecc.calculate        = omap_calculate_ecc;
-		info->nand.ecc.hwctl            = omap_enable_hwecc;
-		info->nand.ecc.correct          = omap_correct_data;
-		info->nand.ecc.mode             = NAND_ECC_HW;
-	} else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
-		   (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
-		err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
-		if (err) {
+	/* populate MTD interface based on ECC scheme */
+	nand_chip->ecc.layout	= &omap_oobinfo;
+	ecclayout		= &omap_oobinfo;
+	switch (pdata->ecc_opt) {
+	case OMAP_ECC_HAM1_CODE_HW:
+		pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
+		nand_chip->ecc.mode             = NAND_ECC_HW;
+		nand_chip->ecc.bytes            = 3;
+		nand_chip->ecc.size             = 512;
+		nand_chip->ecc.strength         = 1;
+		nand_chip->ecc.calculate        = omap_calculate_ecc;
+		nand_chip->ecc.hwctl            = omap_enable_hwecc;
+		nand_chip->ecc.correct          = omap_correct_data;
+		/* define ECC layout */
+		ecclayout->eccbytes		= nand_chip->ecc.bytes *
+							(mtd->writesize /
+							nand_chip->ecc.size);
+		if (nand_chip->options & NAND_BUSWIDTH_16)
+			ecclayout->eccpos[0]	= BADBLOCK_MARKER_LENGTH;
+		else
+			ecclayout->eccpos[0]	= 1;
+		ecclayout->oobfree->offset	= ecclayout->eccpos[0] +
+							ecclayout->eccbytes;
+		break;
+
+	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_MTD_NAND_ECC_BCH
+		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		nand_chip->ecc.bytes		= 7;
+		nand_chip->ecc.strength		= 4;
+		nand_chip->ecc.hwctl		= omap3_enable_hwecc_bch;
+		nand_chip->ecc.correct		= nand_bch_correct_data;
+		nand_chip->ecc.calculate	= omap3_calculate_ecc_bch4;
+		/* define ECC layout */
+		ecclayout->eccbytes		= nand_chip->ecc.bytes *
+							(mtd->writesize /
+							nand_chip->ecc.size);
+		ecclayout->eccpos[0]		= BADBLOCK_MARKER_LENGTH;
+		ecclayout->oobfree->offset	= ecclayout->eccpos[0] +
+							ecclayout->eccbytes;
+		/* software bch library is used for locating errors */
+		nand_chip->ecc.priv		= nand_bch_init(mtd,
+							nand_chip->ecc.size,
+							nand_chip->ecc.bytes,
+							&nand_chip->ecc.layout);
+		if (!nand_chip->ecc.priv) {
+			pr_err("nand: error: unable to use s/w BCH library\n");
 			err = -EINVAL;
-			goto out_release_mem_region;
 		}
-	}
+		break;
+#else
+		pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+		err = -EINVAL;
+		goto return_error;
+#endif
 
-	/* DIP switches on some boards change between 8 and 16 bit
-	 * bus widths for flash.  Try the other width if the first try fails.
-	 */
-	if (nand_scan_ident(&info->mtd, 1, NULL)) {
-		info->nand.options ^= NAND_BUSWIDTH_16;
-		if (nand_scan_ident(&info->mtd, 1, NULL)) {
-			err = -ENXIO;
-			goto out_release_mem_region;
+	case OMAP_ECC_BCH4_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		/* 14th bit is kept reserved for ROM-code compatibility */
+		nand_chip->ecc.bytes		= 7 + 1;
+		nand_chip->ecc.strength		= 4;
+		nand_chip->ecc.hwctl		= omap3_enable_hwecc_bch;
+		nand_chip->ecc.correct		= omap_elm_correct_data;
+		nand_chip->ecc.calculate	= omap3_calculate_ecc_bch;
+		nand_chip->ecc.read_page	= omap_read_page_bch;
+		nand_chip->ecc.write_page	= omap_write_page_bch;
+		/* define ECC layout */
+		ecclayout->eccbytes		= nand_chip->ecc.bytes *
+							(mtd->writesize /
+							nand_chip->ecc.size);
+		ecclayout->eccpos[0]		= BADBLOCK_MARKER_LENGTH;
+		ecclayout->oobfree->offset	= ecclayout->eccpos[0] +
+							ecclayout->eccbytes;
+		/* This ECC scheme requires ELM H/W block */
+		if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
+			pr_err("nand: error: could not initialize ELM\n");
+			err = -ENODEV;
+			goto return_error;
 		}
-	}
+		break;
+#else
+		pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+		err = -EINVAL;
+		goto return_error;
+#endif
 
-	/* rom code layout */
-	if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
-
-		if (info->nand.options & NAND_BUSWIDTH_16)
-			offset = 2;
-		else {
-			offset = 1;
-			info->nand.badblock_pattern = &bb_descrip_flashbased;
-		}
-		omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
-		for (i = 0; i < omap_oobinfo.eccbytes; i++)
-			omap_oobinfo.eccpos[i] = i+offset;
-
-		omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
-		omap_oobinfo.oobfree->length = info->mtd.oobsize -
-					(offset + omap_oobinfo.eccbytes);
-
-		info->nand.ecc.layout = &omap_oobinfo;
-	} else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
-		   (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
-		/* build OOB layout for BCH ECC correction */
-		err = omap3_init_bch_tail(&info->mtd);
-		if (err) {
+	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_MTD_NAND_ECC_BCH
+		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		nand_chip->ecc.bytes		= 13;
+		nand_chip->ecc.strength		= 8;
+		nand_chip->ecc.hwctl		= omap3_enable_hwecc_bch;
+		nand_chip->ecc.correct		= nand_bch_correct_data;
+		nand_chip->ecc.calculate	= omap3_calculate_ecc_bch8;
+		/* define ECC layout */
+		ecclayout->eccbytes		= nand_chip->ecc.bytes *
+							(mtd->writesize /
+							nand_chip->ecc.size);
+		ecclayout->eccpos[0]		= BADBLOCK_MARKER_LENGTH;
+		ecclayout->oobfree->offset	= ecclayout->eccpos[0] +
+							ecclayout->eccbytes;
+		/* software bch library is used for locating errors */
+		nand_chip->ecc.priv		= nand_bch_init(mtd,
+							nand_chip->ecc.size,
+							nand_chip->ecc.bytes,
+							&nand_chip->ecc.layout);
+		if (!nand_chip->ecc.priv) {
+			pr_err("nand: error: unable to use s/w BCH library\n");
 			err = -EINVAL;
-			goto out_release_mem_region;
+			goto return_error;
 		}
+		break;
+#else
+		pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+		err = -EINVAL;
+		goto return_error;
+#endif
+
+	case OMAP_ECC_BCH8_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		/* 14th bit is kept reserved for ROM-code compatibility */
+		nand_chip->ecc.bytes		= 13 + 1;
+		nand_chip->ecc.strength		= 8;
+		nand_chip->ecc.hwctl		= omap3_enable_hwecc_bch;
+		nand_chip->ecc.correct		= omap_elm_correct_data;
+		nand_chip->ecc.calculate	= omap3_calculate_ecc_bch;
+		nand_chip->ecc.read_page	= omap_read_page_bch;
+		nand_chip->ecc.write_page	= omap_write_page_bch;
+		/* This ECC scheme requires ELM H/W block */
+		err = is_elm_present(info, pdata->elm_of_node, BCH8_ECC);
+		if (err < 0) {
+			pr_err("nand: error: could not initialize ELM\n");
+			goto return_error;
+		}
+		/* define ECC layout */
+		ecclayout->eccbytes		= nand_chip->ecc.bytes *
+							(mtd->writesize /
+							nand_chip->ecc.size);
+		ecclayout->eccpos[0]		= BADBLOCK_MARKER_LENGTH;
+		ecclayout->oobfree->offset	= ecclayout->eccpos[0] +
+							ecclayout->eccbytes;
+		break;
+#else
+		pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+		err = -EINVAL;
+		goto return_error;
+#endif
+
+	default:
+		pr_err("nand: error: invalid or unsupported ECC scheme\n");
+		err = -EINVAL;
+		goto return_error;
+	}
+
+	/* populate remaining ECC layout data */
+	ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH +
+							ecclayout->eccbytes);
+	for (i = 1; i < ecclayout->eccbytes; i++)
+		ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
+	/* check if NAND device's OOB is enough to store ECC signatures */
+	if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
+		pr_err("not enough OOB bytes required = %d, available=%d\n",
+					   ecclayout->eccbytes, mtd->oobsize);
+		err = -EINVAL;
+		goto return_error;
 	}
 
 	/* second phase scan */
-	if (nand_scan_tail(&info->mtd)) {
+	if (nand_scan_tail(mtd)) {
 		err = -ENXIO;
-		goto out_release_mem_region;
+		goto return_error;
 	}
 
 	ppdata.of_node = pdata->of_node;
-	mtd_device_parse_register(&info->mtd, NULL, &ppdata, pdata->parts,
+	mtd_device_parse_register(mtd, NULL, &ppdata, pdata->parts,
 				  pdata->nr_parts);
 
-	platform_set_drvdata(pdev, &info->mtd);
+	platform_set_drvdata(pdev, mtd);
 
 	return 0;
 
-out_release_mem_region:
+return_error:
 	if (info->dma)
 		dma_release_channel(info->dma);
-	if (info->gpmc_irq_count > 0)
-		free_irq(info->gpmc_irq_count, info);
-	if (info->gpmc_irq_fifo > 0)
-		free_irq(info->gpmc_irq_fifo, info);
-	release_mem_region(info->phys_base, info->mem_size);
-out_free_info:
-	kfree(info);
-
+	if (nand_chip->ecc.priv) {
+		nand_bch_free(nand_chip->ecc.priv);
+		nand_chip->ecc.priv = NULL;
+	}
 	return err;
 }
 
 static int omap_nand_remove(struct platform_device *pdev)
 {
 	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct nand_chip *nand_chip = mtd->priv;
 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
 							mtd);
-	omap3_free_bch(&info->mtd);
-
+	if (nand_chip->ecc.priv) {
+		nand_bch_free(nand_chip->ecc.priv);
+		nand_chip->ecc.priv = NULL;
+	}
 	if (info->dma)
 		dma_release_channel(info->dma);
-
-	if (info->gpmc_irq_count > 0)
-		free_irq(info->gpmc_irq_count, info);
-	if (info->gpmc_irq_fifo > 0)
-		free_irq(info->gpmc_irq_fifo, info);
-
-	/* Release NAND device, its internal structures and partitions */
-	nand_release(&info->mtd);
-	iounmap(info->nand.IO_ADDR_R);
-	release_mem_region(info->phys_base, info->mem_size);
-	kfree(info);
+	nand_release(mtd);
 	return 0;
 }
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index c28d4e2..4cabdc9 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -39,6 +39,13 @@
 #define NAND_STOP_DELAY		(2 * HZ/50)
 #define PAGE_CHUNK_SIZE		(2048)
 
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM. The largest of these is the PARAM command,
+ * needing 256 bytes.
+ */
+#define INIT_BUFFER_SIZE	256
+
 /* registers and bit definitions */
 #define NDCR		(0x00) /* Control register */
 #define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
@@ -164,6 +171,7 @@
 
 	unsigned int 		buf_start;
 	unsigned int		buf_count;
+	unsigned int		buf_size;
 
 	/* DMA information */
 	int			drcmr_dat;
@@ -540,7 +548,6 @@
 	info->oob_size		= 0;
 	info->use_ecc		= 0;
 	info->use_spare		= 1;
-	info->use_dma		= (use_dma) ? 1 : 0;
 	info->is_ready		= 0;
 	info->retcode		= ERR_NONE;
 	if (info->cs != 0)
@@ -912,26 +919,20 @@
 	return 0;
 }
 
-/* the maximum possible buffer size for large page with OOB data
- * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
- * data buffer and the DMA descriptor
- */
-#define MAX_BUFF_SIZE	PAGE_SIZE
-
 #ifdef ARCH_HAS_DMA
 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
 {
 	struct platform_device *pdev = info->pdev;
-	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
+	int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
 
 	if (use_dma == 0) {
-		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+		info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
 		if (info->data_buff == NULL)
 			return -ENOMEM;
 		return 0;
 	}
 
-	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+	info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
 				&info->data_buff_phys, GFP_KERNEL);
 	if (info->data_buff == NULL) {
 		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
@@ -945,11 +946,16 @@
 				pxa3xx_nand_data_dma_irq, info);
 	if (info->data_dma_ch < 0) {
 		dev_err(&pdev->dev, "failed to request data dma\n");
-		dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+		dma_free_coherent(&pdev->dev, info->buf_size,
 				info->data_buff, info->data_buff_phys);
 		return info->data_dma_ch;
 	}
 
+	/*
+	 * Now that DMA buffers are allocated we turn on
+	 * DMA proper for I/O operations.
+	 */
+	info->use_dma = 1;
 	return 0;
 }
 
@@ -958,7 +964,7 @@
 	struct platform_device *pdev = info->pdev;
 	if (use_dma) {
 		pxa_free_dma(info->data_dma_ch);
-		dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+		dma_free_coherent(&pdev->dev, info->buf_size,
 				  info->data_buff, info->data_buff_phys);
 	} else {
 		kfree(info->data_buff);
@@ -967,7 +973,7 @@
 #else
 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
 {
-	info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
 	if (info->data_buff == NULL)
 		return -ENOMEM;
 	return 0;
@@ -1081,7 +1087,16 @@
 	else
 		host->col_addr_cycles = 1;
 
+	/* release the initial buffer */
+	kfree(info->data_buff);
+
+	/* allocate the real data + oob buffer */
+	info->buf_size = mtd->writesize + mtd->oobsize;
+	ret = pxa3xx_nand_init_buff(info);
+	if (ret)
+		return ret;
 	info->oob_buff = info->data_buff + mtd->writesize;
+
 	if ((mtd->size >> chip->page_shift) > 65536)
 		host->row_addr_cycles = 3;
 	else
@@ -1187,15 +1202,18 @@
 	}
 	info->mmio_phys = r->start;
 
-	ret = pxa3xx_nand_init_buff(info);
-	if (ret)
+	/* Allocate a buffer to allow flash detection */
+	info->buf_size = INIT_BUFFER_SIZE;
+	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+	if (info->data_buff == NULL) {
+		ret = -ENOMEM;
 		goto fail_disable_clk;
+	}
 
 	/* initialize all interrupts to be disabled */
 	disable_int(info, NDSR_MASK);
 
-	ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED,
-			  pdev->name, info);
+	ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to request IRQ\n");
 		goto fail_free_buf;
@@ -1207,7 +1225,7 @@
 
 fail_free_buf:
 	free_irq(irq, info);
-	pxa3xx_nand_free_buff(info);
+	kfree(info->data_buff);
 fail_disable_clk:
 	clk_disable_unprepare(info->clk);
 	return ret;
@@ -1412,7 +1430,7 @@
 static struct platform_driver pxa3xx_nand_driver = {
 	.driver = {
 		.name	= "pxa3xx-nand",
-		.of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
+		.of_match_table = pxa3xx_nand_dt_ids,
 	},
 	.probe		= pxa3xx_nand_probe,
 	.remove		= pxa3xx_nand_remove,
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 9dcf02d..325930d 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -181,7 +181,7 @@
 	/* Set dma direction */
 	dev->dma_dir = do_read;
 	dev->dma_stage = 1;
-	INIT_COMPLETION(dev->dma_done);
+	reinit_completion(&dev->dma_done);
 
 	dbg_verbose("doing dma %s ", do_read ? "read" : "write");
 
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 49bd9155..fe8058a 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -150,17 +150,13 @@
 	struct mtd_part_parser_data ppdata;
 
 	/* Allocate memory for the device structure (and zero it) */
-	host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
-	if (!host) {
-		printk(KERN_ERR
-		       "socrates_nand: failed to allocate device structure.\n");
+	host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host)
 		return -ENOMEM;
-	}
 
 	host->io_base = of_iomap(ofdev->dev.of_node, 0);
 	if (host->io_base == NULL) {
-		printk(KERN_ERR "socrates_nand: ioremap failed\n");
-		kfree(host);
+		dev_err(&ofdev->dev, "ioremap failed\n");
 		return -EIO;
 	}
 
@@ -212,9 +208,7 @@
 	nand_release(mtd);
 
 out:
-	dev_set_drvdata(&ofdev->dev, NULL);
 	iounmap(host->io_base);
-	kfree(host);
 	return res;
 }
 
@@ -228,9 +222,7 @@
 
 	nand_release(mtd);
 
-	dev_set_drvdata(&ofdev->dev, NULL);
 	iounmap(host->io_base);
-	kfree(host);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 396530d..a3747c9 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -428,8 +428,7 @@
 	/* 15 us command delay time */
 	nand_chip->chip_delay = 15;
 
-	retval = request_irq(irq, &tmio_irq,
-				IRQF_DISABLED, dev_name(&dev->dev), tmio);
+	retval = request_irq(irq, &tmio_irq, 0, dev_name(&dev->dev), tmio);
 	if (retval) {
 		dev_err(&dev->dev, "request_irq error %d\n", retval);
 		goto err_irq;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index c5f4ebf..46f27de 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -50,7 +50,7 @@
 	struct NFTLrecord *nftl;
 	unsigned long temp;
 
-	if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+	if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
 		return;
 	/* OK, this is moderately ugly.  But probably safe.  Alternatives? */
 	if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 558071b..6547c84 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -159,7 +159,7 @@
 				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
 		}
 
-		INIT_COMPLETION(c->irq_done);
+		reinit_completion(&c->irq_done);
 		if (c->gpio_irq) {
 			result = gpio_get_value(c->gpio_irq);
 			if (result == -1) {
@@ -349,7 +349,7 @@
 	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
 				 dma_dst, 0, 0);
 
-	INIT_COMPLETION(c->dma_done);
+	reinit_completion(&c->dma_done);
 	omap_start_dma(c->dma_channel);
 
 	timeout = jiffies + msecs_to_jiffies(20);
@@ -420,7 +420,7 @@
 	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
 				 dma_dst, 0, 0);
 
-	INIT_COMPLETION(c->dma_done);
+	reinit_completion(&c->dma_done);
 	omap_start_dma(c->dma_channel);
 
 	timeout = jiffies + msecs_to_jiffies(20);
@@ -499,7 +499,7 @@
 	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
 				 dma_dst, 0, 0);
 
-	INIT_COMPLETION(c->dma_done);
+	reinit_completion(&c->dma_done);
 	omap_start_dma(c->dma_channel);
 	wait_for_completion(&c->dma_done);
 
@@ -544,7 +544,7 @@
 	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
 				 dma_dst, 0, 0);
 
-	INIT_COMPLETION(c->dma_done);
+	reinit_completion(&c->dma_done);
 	omap_start_dma(c->dma_channel);
 	wait_for_completion(&c->dma_done);
 
@@ -573,28 +573,6 @@
 
 static struct platform_driver omap2_onenand_driver;
 
-static int __adjust_timing(struct device *dev, void *data)
-{
-	int ret = 0;
-	struct omap2_onenand *c;
-
-	c = dev_get_drvdata(dev);
-
-	BUG_ON(c->setup == NULL);
-
-	/* DMA is not in use so this is all that is needed */
-	/* Revisit for OMAP3! */
-	ret = c->setup(c->onenand.base, &c->freq);
-
-	return ret;
-}
-
-int omap2_onenand_rephase(void)
-{
-	return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
-				      NULL, __adjust_timing);
-}
-
 static void omap2_onenand_shutdown(struct platform_device *pdev)
 {
 	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3f41f2..1de33b5 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2556,10 +2556,6 @@
 {
 	int ret;
 
-	/* Check for invalid offset */
-	if (ofs > mtd->size)
-		return -EINVAL;
-
 	onenand_get_device(mtd, FL_READING);
 	ret = onenand_block_isbad_nolock(mtd, ofs, 0);
 	onenand_release_device(mtd);
@@ -3529,7 +3525,7 @@
 {
 	struct onenand_chip *this = mtd->priv;
 	unsigned die, bdry;
-	int ret, syscfg, locked;
+	int syscfg, locked;
 
 	/* Disable ECC */
 	syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
@@ -3540,7 +3536,7 @@
 		this->wait(mtd, FL_SYNCING);
 
 		this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
-		ret = this->wait(mtd, FL_READING);
+		this->wait(mtd, FL_READING);
 
 		bdry = this->read_word(this->base + ONENAND_DATARAM);
 		if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
@@ -3550,7 +3546,7 @@
 		this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
 
 		this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-		ret = this->wait(mtd, FL_RESETING);
+		this->wait(mtd, FL_RESETING);
 
 		printk(KERN_INFO "Die %d boundary: %d%s\n", die,
 		       this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3734,7 +3730,7 @@
 
 	/* Check is boundary is locked */
 	this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
-	ret = this->wait(mtd, FL_READING);
+	this->wait(mtd, FL_READING);
 
 	thisboundary = this->read_word(this->base + ONENAND_DATARAM);
 	if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
@@ -3835,7 +3831,7 @@
 static int onenand_probe(struct mtd_info *mtd)
 {
 	struct onenand_chip *this = mtd->priv;
-	int maf_id, dev_id, ver_id;
+	int dev_id, ver_id;
 	int density;
 	int ret;
 
@@ -3843,8 +3839,7 @@
 	if (ret)
 		return ret;
 
-	/* Read manufacturer and device IDs from Register */
-	maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+	/* Device and version IDs from Register */
 	dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
 	ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
 	this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index ab2a52a..daf82ba 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -290,7 +290,7 @@
 	int cis_sector;
 
 	/* Check for small page NAND flash */
-	if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
+	if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||
 	    mtd->size > UINT_MAX)
 		return;
 
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 3cd3aab..6f97615 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -349,7 +349,7 @@
 		goto exit_mtddev;
 	}
 
-	if (mtd->type != MTD_NANDFLASH) {
+	if (!mtd_type_is_nand(mtd)) {
 		pr_info("this test requires NAND flash\n");
 		err = -ENODEV;
 		goto exit_nand;
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index ff35c46..2e9e2d1 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -289,7 +289,7 @@
 		return err;
 	}
 
-	if (mtd->type != MTD_NANDFLASH) {
+	if (!mtd_type_is_nand(mtd)) {
 		pr_info("this test requires NAND flash\n");
 		goto out;
 	}
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
index 44b96e9..ed2d3f6 100644
--- a/drivers/mtd/tests/pagetest.c
+++ b/drivers/mtd/tests/pagetest.c
@@ -353,7 +353,7 @@
 		return err;
 	}
 
-	if (mtd->type != MTD_NANDFLASH) {
+	if (!mtd_type_is_nand(mtd)) {
 		pr_info("this test requires NAND flash\n");
 		goto out;
 	}
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
index e2c0adf..a876371 100644
--- a/drivers/mtd/tests/subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -299,7 +299,7 @@
 		return err;
 	}
 
-	if (mtd->type != MTD_NANDFLASH) {
+	if (!mtd_type_is_nand(mtd)) {
 		pr_info("this test requires NAND flash\n");
 		goto out;
 	}
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index c071d41..33bb1f2 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -900,10 +900,9 @@
 		 * number.
 		 */
 		image_seq = be32_to_cpu(ech->image_seq);
-		if (!ubi->image_seq && image_seq)
+		if (!ubi->image_seq)
 			ubi->image_seq = image_seq;
-		if (ubi->image_seq && image_seq &&
-		    ubi->image_seq != image_seq) {
+		if (image_seq && ubi->image_seq != image_seq) {
 			ubi_err("bad image sequence number %d in PEB %d, expected %d",
 				image_seq, pnum, ubi->image_seq);
 			ubi_dump_ec_hdr(ech);
@@ -1417,9 +1416,11 @@
 				ai = alloc_ai("ubi_aeb_slab_cache2");
 				if (!ai)
 					return -ENOMEM;
-			}
 
-			err = scan_all(ubi, ai, UBI_FM_MAX_START);
+				err = scan_all(ubi, ai, 0);
+			} else {
+				err = scan_all(ubi, ai, UBI_FM_MAX_START);
+			}
 		}
 	}
 #else
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 315dcc6..e05dc62 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -41,6 +41,7 @@
 #include <linux/kthread.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/major.h>
 #include "ubi.h"
 
 /* Maximum length of the 'mtd=' parameter */
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index f5aa4b0..ead8613 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -407,6 +407,7 @@
 	 */
 	for (i = 0; i < pool_size; i++) {
 		int scrub = 0;
+		int image_seq;
 
 		pnum = be32_to_cpu(pebs[i]);
 
@@ -425,10 +426,16 @@
 		} else if (ret == UBI_IO_BITFLIPS)
 			scrub = 1;
 
-		if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+		/*
+		 * Older UBI implementations have image_seq set to zero, so
+		 * we shouldn't fail if image_seq == 0.
+		 */
+		image_seq = be32_to_cpu(ech->image_seq);
+
+		if (image_seq && (image_seq != ubi->image_seq)) {
 			ubi_err("bad image seq: 0x%x, expected: 0x%x",
 				be32_to_cpu(ech->image_seq), ubi->image_seq);
-			err = UBI_BAD_FASTMAP;
+			ret = UBI_BAD_FASTMAP;
 			goto out;
 		}
 
@@ -819,6 +826,10 @@
 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
 		list_move_tail(&tmp_aeb->u.list, &ai->free);
 
+	ubi_assert(list_empty(&used));
+	ubi_assert(list_empty(&eba_orphans));
+	ubi_assert(list_empty(&free));
+
 	/*
 	 * If fastmap is leaking PEBs (must not happen), raise a
 	 * fat warning and fall back to scanning mode.
@@ -834,6 +845,19 @@
 fail_bad:
 	ret = UBI_BAD_FASTMAP;
 fail:
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+		list_del(&tmp_aeb->u.list);
+	}
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
+		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+		list_del(&tmp_aeb->u.list);
+	}
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+		list_del(&tmp_aeb->u.list);
+	}
+
 	return ret;
 }
 
@@ -923,6 +947,8 @@
 	}
 
 	for (i = 0; i < used_blocks; i++) {
+		int image_seq;
+
 		pnum = be32_to_cpu(fmsb->block_loc[i]);
 
 		if (ubi_io_is_bad(ubi, pnum)) {
@@ -940,10 +966,17 @@
 		} else if (ret == UBI_IO_BITFLIPS)
 			fm->to_be_tortured[i] = 1;
 
+		image_seq = be32_to_cpu(ech->image_seq);
 		if (!ubi->image_seq)
-			ubi->image_seq = be32_to_cpu(ech->image_seq);
+			ubi->image_seq = image_seq;
 
-		if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+		/*
+		 * Older UBI implementations have image_seq set to zero, so
+		 * we shouldn't fail if image_seq == 0.
+		 */
+		if (image_seq && (image_seq != ubi->image_seq)) {
+			ubi_err("wrong image seq:%d instead of %d",
+				be32_to_cpu(ech->image_seq), ubi->image_seq);
 			ret = UBI_BAD_FASTMAP;
 			goto free_hdr;
 		}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index c95bfb1..02317c1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -599,10 +599,6 @@
 	return_unused_pool_pebs(ubi, pool);
 
 	for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
-		if (!ubi->free.rb_node ||
-		   (ubi->free_count - ubi->beb_rsvd_pebs < 1))
-			break;
-
 		pool->pebs[pool->size] = __wl_get_peb(ubi);
 		if (pool->pebs[pool->size] < 0)
 			break;
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 4c21bf6..5a5d720 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_BONDING) += bonding.o
 
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
 
 proc-$(CONFIG_PROC_FS) += bond_procfs.o
 bonding-objs += $(proc-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0d8f427..187b1b7 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -136,41 +136,6 @@
 }
 
 /**
- * __get_first_port - get the first port in the bond
- * @bond: the bond we're looking at
- *
- * Return the port of the first slave in @bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_first_port(struct bonding *bond)
-{
-	struct slave *first_slave = bond_first_slave(bond);
-
-	return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
-}
-
-/**
- * __get_next_port - get the next port in the bond
- * @port: the port we're looking at
- *
- * Return the port of the slave that is next in line of @port's slave in the
- * bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_next_port(struct port *port)
-{
-	struct bonding *bond = __get_bond_by_port(port);
-	struct slave *slave = port->slave, *slave_next;
-
-	// If there's no bond for this port, or this is the last slave
-	if (bond == NULL)
-		return NULL;
-	slave_next = bond_next_slave(bond, slave);
-	if (!slave_next || bond_is_first_slave(bond, slave_next))
-		return NULL;
-
-	return &(SLAVE_AD_INFO(slave_next).port);
-}
-
-/**
  * __get_first_agg - get the first aggregator in the bond
  * @bond: the bond we're looking at
  *
@@ -190,28 +155,6 @@
 	return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
 }
 
-/**
- * __get_next_agg - get the next aggregator in the bond
- * @aggregator: the aggregator we're looking at
- *
- * Return the aggregator of the slave that is next in line of @aggregator's
- * slave in the bond, or %NULL if it can't be found.
- */
-static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
-{
-	struct slave *slave = aggregator->slave, *slave_next;
-	struct bonding *bond = bond_get_bond_by_slave(slave);
-
-	// If there's no bond for this aggregator, or this is the last slave
-	if (bond == NULL)
-		return NULL;
-	slave_next = bond_next_slave(bond, slave);
-	if (!slave_next || bond_is_first_slave(bond, slave_next))
-		return NULL;
-
-	return &(SLAVE_AD_INFO(slave_next).aggregator);
-}
-
 /*
  * __agg_has_partner
  *
@@ -755,16 +698,15 @@
  */
 static struct aggregator *__get_active_agg(struct aggregator *aggregator)
 {
-	struct aggregator *retval = NULL;
+	struct bonding *bond = aggregator->slave->bond;
+	struct list_head *iter;
+	struct slave *slave;
 
-	for (; aggregator; aggregator = __get_next_agg(aggregator)) {
-		if (aggregator->is_active) {
-			retval = aggregator;
-			break;
-		}
-	}
+	bond_for_each_slave(bond, slave, iter)
+		if (SLAVE_AD_INFO(slave).aggregator.is_active)
+			return &(SLAVE_AD_INFO(slave).aggregator);
 
-	return retval;
+	return NULL;
 }
 
 /**
@@ -1274,12 +1216,17 @@
 {
 	struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
 	struct port *last_port = NULL, *curr_port;
+	struct list_head *iter;
+	struct bonding *bond;
+	struct slave *slave;
 	int found = 0;
 
 	// if the port is already Selected, do nothing
 	if (port->sm_vars & AD_PORT_SELECTED)
 		return;
 
+	bond = __get_bond_by_port(port);
+
 	// if the port is connected to other aggregator, detach it
 	if (port->aggregator) {
 		// detach the port from its former aggregator
@@ -1320,8 +1267,8 @@
 		}
 	}
 	// search on all aggregators for a suitable aggregator for this port
-	for (aggregator = __get_first_agg(port); aggregator;
-	     aggregator = __get_next_agg(aggregator)) {
+	bond_for_each_slave(bond, slave, iter) {
+		aggregator = &(SLAVE_AD_INFO(slave).aggregator);
 
 		// keep a free aggregator for later use(if needed)
 		if (!aggregator->lag_ports) {
@@ -1515,19 +1462,23 @@
 static void ad_agg_selection_logic(struct aggregator *agg)
 {
 	struct aggregator *best, *active, *origin;
+	struct bonding *bond = agg->slave->bond;
+	struct list_head *iter;
+	struct slave *slave;
 	struct port *port;
 
 	origin = agg;
 	active = __get_active_agg(agg);
 	best = (active && agg_device_up(active)) ? active : NULL;
 
-	do {
+	bond_for_each_slave(bond, slave, iter) {
+		agg = &(SLAVE_AD_INFO(slave).aggregator);
+
 		agg->is_active = 0;
 
 		if (agg->num_of_ports && agg_device_up(agg))
 			best = ad_agg_selection_test(best, agg);
-
-	} while ((agg = __get_next_agg(agg)));
+	}
 
 	if (best &&
 	    __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
@@ -1565,8 +1516,8 @@
 			 best->lag_ports, best->slave,
 			 best->slave ? best->slave->dev->name : "NULL");
 
-		for (agg = __get_first_agg(best->lag_ports); agg;
-		     agg = __get_next_agg(agg)) {
+		bond_for_each_slave(bond, slave, iter) {
+			agg = &(SLAVE_AD_INFO(slave).aggregator);
 
 			pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
 				 agg->aggregator_identifier, agg->num_of_ports,
@@ -1614,13 +1565,7 @@
 		}
 	}
 
-	if (origin->slave) {
-		struct bonding *bond;
-
-		bond = bond_get_bond_by_slave(origin->slave);
-		if (bond)
-			bond_3ad_set_carrier(bond);
-	}
+	bond_3ad_set_carrier(bond);
 }
 
 /**
@@ -1969,6 +1914,9 @@
 	struct port *port, *prev_port, *temp_port;
 	struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
 	int select_new_active_agg = 0;
+	struct bonding *bond = slave->bond;
+	struct slave *slave_iter;
+	struct list_head *iter;
 
 	// find the aggregator related to this slave
 	aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1998,14 +1946,16 @@
 		// reason to search for new aggregator, and that we will find one
 		if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
 			// find new aggregator for the related port(s)
-			new_aggregator = __get_first_agg(port);
-			for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
+			bond_for_each_slave(bond, slave_iter, iter) {
+				new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
 				// if the new aggregator is empty, or it is connected to our port only
 				if (!new_aggregator->lag_ports
 				    || ((new_aggregator->lag_ports == port)
 					&& !new_aggregator->lag_ports->next_port_in_aggregator))
 					break;
 			}
+			if (!slave_iter)
+				new_aggregator = NULL;
 			// if new aggregator found, copy the aggregator's parameters
 			// and connect the related lag_ports to the new aggregator
 			if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
@@ -2056,15 +2006,17 @@
 				pr_info("%s: Removing an active aggregator\n",
 					slave->bond->dev->name);
 				// select new active aggregator
-				ad_agg_selection_logic(__get_first_agg(port));
+				temp_aggregator = __get_first_agg(port);
+				if (temp_aggregator)
+					ad_agg_selection_logic(temp_aggregator);
 			}
 		}
 	}
 
 	pr_debug("Unbinding port %d\n", port->actor_port_number);
 	// find the aggregator that this port is connected to
-	temp_aggregator = __get_first_agg(port);
-	for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
+	bond_for_each_slave(bond, slave_iter, iter) {
+		temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
 		prev_port = NULL;
 		// search the port in the aggregator's related ports
 		for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2111,19 +2063,24 @@
 {
 	struct bonding *bond = container_of(work, struct bonding,
 					    ad_work.work);
-	struct port *port;
 	struct aggregator *aggregator;
+	struct list_head *iter;
+	struct slave *slave;
+	struct port *port;
 
 	read_lock(&bond->lock);
 
 	//check if there are any slaves
-	if (list_empty(&bond->slave_list))
+	if (!bond_has_slaves(bond))
 		goto re_arm;
 
 	// check if agg_select_timer timer after initialize is timed out
 	if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
+		slave = bond_first_slave(bond);
+		port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+
 		// select the active aggregator for the bond
-		if ((port = __get_first_port(bond))) {
+		if (port) {
 			if (!port->slave) {
 				pr_warning("%s: Warning: bond's first port is uninitialized\n",
 					   bond->dev->name);
@@ -2137,7 +2094,8 @@
 	}
 
 	// for each port run the state machines
-	for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+	bond_for_each_slave(bond, slave, iter) {
+		port = &(SLAVE_AD_INFO(slave).port);
 		if (!port->slave) {
 			pr_warning("%s: Warning: Found an uninitialized port\n",
 				   bond->dev->name);
@@ -2382,9 +2340,12 @@
 				   struct ad_info *ad_info)
 {
 	struct aggregator *aggregator = NULL;
+	struct list_head *iter;
+	struct slave *slave;
 	struct port *port;
 
-	for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
+		port = &(SLAVE_AD_INFO(slave).port);
 		if (port->aggregator && port->aggregator->is_active) {
 			aggregator = port->aggregator;
 			break;
@@ -2408,25 +2369,25 @@
 {
 	int ret;
 
-	read_lock(&bond->lock);
+	rcu_read_lock();
 	ret = __bond_3ad_get_active_agg_info(bond, ad_info);
-	read_unlock(&bond->lock);
+	rcu_read_unlock();
 
 	return ret;
 }
 
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 {
-	struct slave *slave, *start_at;
 	struct bonding *bond = netdev_priv(dev);
-	int slave_agg_no;
-	int slaves_in_agg;
-	int agg_id;
-	int i;
+	struct slave *slave, *first_ok_slave;
+	struct aggregator *agg;
 	struct ad_info ad_info;
+	struct list_head *iter;
+	int slaves_in_agg;
+	int slave_agg_no;
 	int res = 1;
+	int agg_id;
 
-	read_lock(&bond->lock);
 	if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
 		pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
 			 dev->name);
@@ -2437,20 +2398,28 @@
 	agg_id = ad_info.aggregator_id;
 
 	if (slaves_in_agg == 0) {
-		/*the aggregator is empty*/
 		pr_debug("%s: Error: active aggregator is empty\n", dev->name);
 		goto out;
 	}
 
-	slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
+	slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+	first_ok_slave = NULL;
 
-	bond_for_each_slave(bond, slave) {
-		struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+	bond_for_each_slave_rcu(bond, slave, iter) {
+		agg = SLAVE_AD_INFO(slave).port.aggregator;
+		if (!agg || agg->aggregator_identifier != agg_id)
+			continue;
 
-		if (agg && (agg->aggregator_identifier == agg_id)) {
+		if (slave_agg_no >= 0) {
+			if (!first_ok_slave && SLAVE_IS_OK(slave))
+				first_ok_slave = slave;
 			slave_agg_no--;
-			if (slave_agg_no < 0)
-				break;
+			continue;
+		}
+
+		if (SLAVE_IS_OK(slave)) {
+			res = bond_dev_queue_xmit(bond, skb, slave->dev);
+			goto out;
 		}
 	}
 
@@ -2460,23 +2429,12 @@
 		goto out;
 	}
 
-	start_at = slave;
-
-	bond_for_each_slave_from(bond, slave, i, start_at) {
-		int slave_agg_id = 0;
-		struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
-
-		if (agg)
-			slave_agg_id = agg->aggregator_identifier;
-
-		if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
-			res = bond_dev_queue_xmit(bond, skb, slave->dev);
-			break;
-		}
-	}
+	/* we couldn't find any suitable slave after the agg_no, so use the
+	 * first suitable found, if found. */
+	if (first_ok_slave)
+		res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
 
 out:
-	read_unlock(&bond->lock);
 	if (res) {
 		/* no suitable interface, frame not sent */
 		kfree_skb(skb);
@@ -2515,11 +2473,12 @@
 void bond_3ad_update_lacp_rate(struct bonding *bond)
 {
 	struct port *port = NULL;
+	struct list_head *iter;
 	struct slave *slave;
 	int lacp_fast;
 
 	lacp_fast = bond->params.lacp_fast;
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		port = &(SLAVE_AD_INFO(slave).port);
 		__get_state_machine_lock(port);
 		if (lacp_fast)
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f428ef57..0287240 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -223,13 +223,14 @@
 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 {
 	struct slave *slave, *least_loaded;
+	struct list_head *iter;
 	long long max_gap;
 
 	least_loaded = NULL;
 	max_gap = LLONG_MIN;
 
 	/* Find the slave with the largest gap */
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		if (SLAVE_IS_OK(slave)) {
 			long long gap = compute_gap(slave);
 
@@ -382,30 +383,64 @@
 static struct slave *rlb_next_rx_slave(struct bonding *bond)
 {
 	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-	struct slave *rx_slave, *slave, *start_at;
-	int i = 0;
+	struct slave *before = NULL, *rx_slave = NULL, *slave;
+	struct list_head *iter;
+	bool found = false;
 
-	if (bond_info->next_rx_slave)
-		start_at = bond_info->next_rx_slave;
-	else
-		start_at = bond_first_slave(bond);
-
-	rx_slave = NULL;
-
-	bond_for_each_slave_from(bond, slave, i, start_at) {
-		if (SLAVE_IS_OK(slave)) {
-			if (!rx_slave) {
+	bond_for_each_slave(bond, slave, iter) {
+		if (!SLAVE_IS_OK(slave))
+			continue;
+		if (!found) {
+			if (!before || before->speed < slave->speed)
+				before = slave;
+		} else {
+			if (!rx_slave || rx_slave->speed < slave->speed)
 				rx_slave = slave;
-			} else if (slave->speed > rx_slave->speed) {
-				rx_slave = slave;
-			}
 		}
+		if (slave == bond_info->rx_slave)
+			found = true;
 	}
+	/* we didn't find anything after the current or we have something
+	 * better before and up to the current slave
+	 */
+	if (!rx_slave || (before && rx_slave->speed < before->speed))
+		rx_slave = before;
 
-	if (rx_slave) {
-		slave = bond_next_slave(bond, rx_slave);
-		bond_info->next_rx_slave = slave;
+	if (rx_slave)
+		bond_info->rx_slave = rx_slave;
+
+	return rx_slave;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static struct slave *__rlb_next_rx_slave(struct bonding *bond)
+{
+	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+	struct slave *before = NULL, *rx_slave = NULL, *slave;
+	struct list_head *iter;
+	bool found = false;
+
+	bond_for_each_slave_rcu(bond, slave, iter) {
+		if (!SLAVE_IS_OK(slave))
+			continue;
+		if (!found) {
+			if (!before || before->speed < slave->speed)
+				before = slave;
+		} else {
+			if (!rx_slave || rx_slave->speed < slave->speed)
+				rx_slave = slave;
+		}
+		if (slave == bond_info->rx_slave)
+			found = true;
 	}
+	/* we didn't find anything after the current or we have something
+	 * better before and up to the current slave
+	 */
+	if (!rx_slave || (before && rx_slave->speed < before->speed))
+		rx_slave = before;
+
+	if (rx_slave)
+		bond_info->rx_slave = rx_slave;
 
 	return rx_slave;
 }
@@ -626,12 +661,14 @@
 {
 	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
 	struct arp_pkt *arp = arp_pkt(skb);
-	struct slave *assigned_slave;
+	struct slave *assigned_slave, *curr_active_slave;
 	struct rlb_client_info *client_info;
 	u32 hash_index = 0;
 
 	_lock_rx_hashtbl(bond);
 
+	curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
 	hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
 	client_info = &(bond_info->rx_hashtbl[hash_index]);
 
@@ -656,14 +693,14 @@
 			 * that the new client can be assigned to this entry.
 			 */
 			if (bond->curr_active_slave &&
-			    client_info->slave != bond->curr_active_slave) {
-				client_info->slave = bond->curr_active_slave;
+			    client_info->slave != curr_active_slave) {
+				client_info->slave = curr_active_slave;
 				rlb_update_client(client_info);
 			}
 		}
 	}
 	/* assign a new slave */
-	assigned_slave = rlb_next_rx_slave(bond);
+	assigned_slave = __rlb_next_rx_slave(bond);
 
 	if (assigned_slave) {
 		if (!(client_info->assigned &&
@@ -726,7 +763,7 @@
 	/* Don't modify or load balance ARPs that do not originate locally
 	 * (e.g.,arrive via a bridge).
 	 */
-	if (!bond_slave_has_mac(bond, arp->mac_src))
+	if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
 		return NULL;
 
 	if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1019,7 +1056,7 @@
 
 	/* loop through vlans and send one packet for each */
 	rcu_read_lock();
-	netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+	netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
 		if (upper->priv_flags & IFF_802_1Q_VLAN)
 			alb_send_lp_vid(slave, mac_addr,
 					vlan_dev_vlan_id(upper));
@@ -1172,10 +1209,11 @@
  */
 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
 {
-	struct slave *tmp_slave1, *free_mac_slave = NULL;
 	struct slave *has_bond_addr = bond->curr_active_slave;
+	struct slave *tmp_slave1, *free_mac_slave = NULL;
+	struct list_head *iter;
 
-	if (list_empty(&bond->slave_list)) {
+	if (!bond_has_slaves(bond)) {
 		/* this is the first slave */
 		return 0;
 	}
@@ -1196,7 +1234,7 @@
 	/* The slave's address is equal to the address of the bond.
 	 * Search for a spare address in the bond for this slave.
 	 */
-	bond_for_each_slave(bond, tmp_slave1) {
+	bond_for_each_slave(bond, tmp_slave1, iter) {
 		if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
 			/* no slave has tmp_slave1's perm addr
 			 * as its curr addr
@@ -1246,15 +1284,16 @@
  */
 static int alb_set_mac_address(struct bonding *bond, void *addr)
 {
-	char tmp_addr[ETH_ALEN];
-	struct slave *slave;
+	struct slave *slave, *rollback_slave;
+	struct list_head *iter;
 	struct sockaddr sa;
+	char tmp_addr[ETH_ALEN];
 	int res;
 
 	if (bond->alb_info.rlb_enabled)
 		return 0;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		/* save net_device's current hw address */
 		memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
 
@@ -1274,10 +1313,12 @@
 	sa.sa_family = bond->dev->type;
 
 	/* unwind from head to the slave that failed */
-	bond_for_each_slave_continue_reverse(bond, slave) {
-		memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
-		dev_set_mac_address(slave->dev, &sa);
-		memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+	bond_for_each_slave(bond, rollback_slave, iter) {
+		if (rollback_slave == slave)
+			break;
+		memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+		dev_set_mac_address(rollback_slave->dev, &sa);
+		memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
 	}
 
 	return res;
@@ -1337,11 +1378,6 @@
 	skb_reset_mac_header(skb);
 	eth_data = eth_hdr(skb);
 
-	/* make sure that the curr_active_slave do not change during tx
-	 */
-	read_lock(&bond->lock);
-	read_lock(&bond->curr_slave_lock);
-
 	switch (ntohs(skb->protocol)) {
 	case ETH_P_IP: {
 		const struct iphdr *iph = ip_hdr(skb);
@@ -1423,12 +1459,12 @@
 
 	if (!tx_slave) {
 		/* unbalanced or unassigned, send through primary */
-		tx_slave = bond->curr_active_slave;
+		tx_slave = rcu_dereference(bond->curr_active_slave);
 		bond_info->unbalanced_load += skb->len;
 	}
 
 	if (tx_slave && SLAVE_IS_OK(tx_slave)) {
-		if (tx_slave != bond->curr_active_slave) {
+		if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
 			memcpy(eth_data->h_source,
 			       tx_slave->dev->dev_addr,
 			       ETH_ALEN);
@@ -1443,8 +1479,6 @@
 		}
 	}
 
-	read_unlock(&bond->curr_slave_lock);
-	read_unlock(&bond->lock);
 	if (res) {
 		/* no suitable interface, frame not sent */
 		kfree_skb(skb);
@@ -1458,11 +1492,12 @@
 	struct bonding *bond = container_of(work, struct bonding,
 					    alb_work.work);
 	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+	struct list_head *iter;
 	struct slave *slave;
 
 	read_lock(&bond->lock);
 
-	if (list_empty(&bond->slave_list)) {
+	if (!bond_has_slaves(bond)) {
 		bond_info->tx_rebalance_counter = 0;
 		bond_info->lp_counter = 0;
 		goto re_arm;
@@ -1480,7 +1515,7 @@
 		 */
 		read_lock(&bond->curr_slave_lock);
 
-		bond_for_each_slave(bond, slave)
+		bond_for_each_slave(bond, slave, iter)
 			alb_send_learning_packets(slave, slave->dev->dev_addr);
 
 		read_unlock(&bond->curr_slave_lock);
@@ -1493,7 +1528,7 @@
 
 		read_lock(&bond->curr_slave_lock);
 
-		bond_for_each_slave(bond, slave) {
+		bond_for_each_slave(bond, slave, iter) {
 			tlb_clear_slave(bond, slave, 1);
 			if (slave == bond->curr_active_slave) {
 				SLAVE_TLB_INFO(slave).load =
@@ -1599,13 +1634,13 @@
  */
 void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
 {
-	if (!list_empty(&bond->slave_list))
+	if (bond_has_slaves(bond))
 		alb_change_hw_addr_on_detach(bond, slave);
 
 	tlb_clear_slave(bond, slave, 0);
 
 	if (bond->alb_info.rlb_enabled) {
-		bond->alb_info.next_rx_slave = NULL;
+		bond->alb_info.rx_slave = NULL;
 		rlb_clear_slave(bond, slave);
 	}
 }
@@ -1669,7 +1704,7 @@
 	swap_slave = bond->curr_active_slave;
 	rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
-	if (!new_slave || list_empty(&bond->slave_list))
+	if (!new_slave || !bond_has_slaves(bond))
 		return;
 
 	/* set the new curr_active_slave to the bonds mac address
@@ -1692,6 +1727,23 @@
 
 	ASSERT_RTNL();
 
+	/* in TLB mode, the slave might flip down/up with the old dev_addr,
+	 * and thus filter bond->dev_addr's packets, so force bond's mac
+	 */
+	if (bond->params.mode == BOND_MODE_TLB) {
+		struct sockaddr sa;
+		u8 tmp_addr[ETH_ALEN];
+
+		memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+
+		memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
+		sa.sa_family = bond->dev->type;
+		/* we don't care if it can't change its mac, best effort */
+		dev_set_mac_address(new_slave->dev, &sa);
+
+		memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+	}
+
 	/* curr_active_slave must be set before calling alb_swap_mac_addr */
 	if (swap_slave) {
 		/* swap mac address */
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index c5eff5d..4226044 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -154,9 +154,7 @@
 	u8			rx_ntt;	/* flag - need to transmit
 					 * to all rx clients
 					 */
-	struct slave		*next_rx_slave;/* next slave to be assigned
-						* to a new rx client for
-						*/
+	struct slave		*rx_slave;/* last slave to xmit from */
 	u8			primary_is_promisc;	   /* boolean */
 	u32			rlb_promisc_timeout_counter;/* counts primary
 							     * promiscuity time
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e883bfe..4dd5ee2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -78,6 +78,8 @@
 #include <net/netns/generic.h>
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
+#include <net/flow_keys.h>
+#include <linux/reciprocal_div.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -110,6 +112,7 @@
 static int all_slaves_active;
 static struct bond_params bonding_defaults;
 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
+static int packets_per_slave = 1;
 
 module_param(max_bonds, int, 0);
 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -159,7 +162,8 @@
 module_param(xmit_hash_policy, charp, 0);
 MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
 				   "0 for layer 2 (default), 1 for layer 3+4, "
-				   "2 for layer 2+3");
+				   "2 for layer 2+3, 3 for encap layer 2+3, "
+				   "4 for encap layer 3+4");
 module_param(arp_interval, int, 0);
 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
 module_param_array(arp_ip_target, charp, NULL, 0);
@@ -181,6 +185,10 @@
 module_param(resend_igmp, int, 0);
 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
 			      "link failure");
+module_param(packets_per_slave, int, 0);
+MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
+				    "mode; 0 for a random slave, 1 packet per "
+				    "slave (default), >1 packets per slave.");
 
 /*----------------------------- Global variables ----------------------------*/
 
@@ -217,6 +225,8 @@
 {	"layer2",		BOND_XMIT_POLICY_LAYER2},
 {	"layer3+4",		BOND_XMIT_POLICY_LAYER34},
 {	"layer2+3",		BOND_XMIT_POLICY_LAYER23},
+{	"encap2+3",		BOND_XMIT_POLICY_ENCAP23},
+{	"encap3+4",		BOND_XMIT_POLICY_ENCAP34},
 {	NULL,			-1},
 };
 
@@ -332,10 +342,11 @@
 				__be16 proto, u16 vid)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
-	struct slave *slave;
+	struct slave *slave, *rollback_slave;
+	struct list_head *iter;
 	int res;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		res = vlan_vid_add(slave->dev, proto, vid);
 		if (res)
 			goto unwind;
@@ -344,9 +355,13 @@
 	return 0;
 
 unwind:
-	/* unwind from the slave that failed */
-	bond_for_each_slave_continue_reverse(bond, slave)
-		vlan_vid_del(slave->dev, proto, vid);
+	/* unwind to the slave that failed */
+	bond_for_each_slave(bond, rollback_slave, iter) {
+		if (rollback_slave == slave)
+			break;
+
+		vlan_vid_del(rollback_slave->dev, proto, vid);
+	}
 
 	return res;
 }
@@ -360,9 +375,10 @@
 				 __be16 proto, u16 vid)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct list_head *iter;
 	struct slave *slave;
 
-	bond_for_each_slave(bond, slave)
+	bond_for_each_slave(bond, slave, iter)
 		vlan_vid_del(slave->dev, proto, vid);
 
 	if (bond_is_lb(bond))
@@ -382,15 +398,16 @@
  */
 static int bond_set_carrier(struct bonding *bond)
 {
+	struct list_head *iter;
 	struct slave *slave;
 
-	if (list_empty(&bond->slave_list))
+	if (!bond_has_slaves(bond))
 		goto down;
 
 	if (bond->params.mode == BOND_MODE_8023AD)
 		return bond_3ad_set_carrier(bond);
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		if (slave->link == BOND_LINK_UP) {
 			if (!netif_carrier_ok(bond->dev)) {
 				netif_carrier_on(bond->dev);
@@ -522,7 +539,9 @@
  */
 static int bond_set_promiscuity(struct bonding *bond, int inc)
 {
+	struct list_head *iter;
 	int err = 0;
+
 	if (USES_PRIMARY(bond->params.mode)) {
 		/* write lock already acquired */
 		if (bond->curr_active_slave) {
@@ -532,7 +551,7 @@
 	} else {
 		struct slave *slave;
 
-		bond_for_each_slave(bond, slave) {
+		bond_for_each_slave(bond, slave, iter) {
 			err = dev_set_promiscuity(slave->dev, inc);
 			if (err)
 				return err;
@@ -546,7 +565,9 @@
  */
 static int bond_set_allmulti(struct bonding *bond, int inc)
 {
+	struct list_head *iter;
 	int err = 0;
+
 	if (USES_PRIMARY(bond->params.mode)) {
 		/* write lock already acquired */
 		if (bond->curr_active_slave) {
@@ -556,7 +577,7 @@
 	} else {
 		struct slave *slave;
 
-		bond_for_each_slave(bond, slave) {
+		bond_for_each_slave(bond, slave, iter) {
 			err = dev_set_allmulti(slave->dev, inc);
 			if (err)
 				return err;
@@ -774,43 +795,24 @@
 /**
  * find_best_interface - select the best available slave to be the active one
  * @bond: our bonding struct
- *
- * Warning: Caller must hold curr_slave_lock for writing.
  */
 static struct slave *bond_find_best_slave(struct bonding *bond)
 {
-	struct slave *new_active, *old_active;
-	struct slave *bestslave = NULL;
+	struct slave *slave, *bestslave = NULL;
+	struct list_head *iter;
 	int mintime = bond->params.updelay;
-	int i;
 
-	new_active = bond->curr_active_slave;
+	if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
+	    bond_should_change_active(bond))
+		return bond->primary_slave;
 
-	if (!new_active) { /* there were no active slaves left */
-		new_active = bond_first_slave(bond);
-		if (!new_active)
-			return NULL; /* still no slave, return NULL */
-	}
-
-	if ((bond->primary_slave) &&
-	    bond->primary_slave->link == BOND_LINK_UP &&
-	    bond_should_change_active(bond)) {
-		new_active = bond->primary_slave;
-	}
-
-	/* remember where to stop iterating over the slaves */
-	old_active = new_active;
-
-	bond_for_each_slave_from(bond, new_active, i, old_active) {
-		if (new_active->link == BOND_LINK_UP) {
-			return new_active;
-		} else if (new_active->link == BOND_LINK_BACK &&
-			   IS_UP(new_active->dev)) {
-			/* link up, but waiting for stabilization */
-			if (new_active->delay < mintime) {
-				mintime = new_active->delay;
-				bestslave = new_active;
-			}
+	bond_for_each_slave(bond, slave, iter) {
+		if (slave->link == BOND_LINK_UP)
+			return slave;
+		if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+		    slave->delay < mintime) {
+			mintime = slave->delay;
+			bestslave = slave;
 		}
 	}
 
@@ -971,35 +973,6 @@
 	}
 }
 
-/*--------------------------- slave list handling ---------------------------*/
-
-/*
- * This function attaches the slave to the end of list.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
-{
-	list_add_tail_rcu(&new_slave->list, &bond->slave_list);
-	bond->slave_cnt++;
-}
-
-/*
- * This function detaches the slave from the list.
- * WARNING: no check is made to verify if the slave effectively
- * belongs to <bond>.
- * Nothing is freed on return, structures are just unchained.
- * If any slave pointer in bond was pointing to <slave>,
- * it should be changed by the calling function.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_detach_slave(struct bonding *bond, struct slave *slave)
-{
-	list_del_rcu(&slave->list);
-	bond->slave_cnt--;
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static inline int slave_enable_netpoll(struct slave *slave)
 {
@@ -1046,9 +1019,10 @@
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct list_head *iter;
 	struct slave *slave;
 
-	bond_for_each_slave(bond, slave)
+	bond_for_each_slave(bond, slave, iter)
 		if (IS_UP(slave->dev))
 			slave_disable_netpoll(slave);
 }
@@ -1056,10 +1030,11 @@
 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
 {
 	struct bonding *bond = netdev_priv(dev);
+	struct list_head *iter;
 	struct slave *slave;
 	int err = 0;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		err = slave_enable_netpoll(slave);
 		if (err) {
 			bond_netpoll_cleanup(dev);
@@ -1087,10 +1062,11 @@
 					   netdev_features_t features)
 {
 	struct bonding *bond = netdev_priv(dev);
+	struct list_head *iter;
 	netdev_features_t mask;
 	struct slave *slave;
 
-	if (list_empty(&bond->slave_list)) {
+	if (!bond_has_slaves(bond)) {
 		/* Disable adding VLANs to empty bond. But why? --mq */
 		features |= NETIF_F_VLAN_CHALLENGED;
 		return features;
@@ -1100,7 +1076,7 @@
 	features &= ~NETIF_F_ONE_FOR_ALL;
 	features |= NETIF_F_ALL_FOR_ALL;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		features = netdev_increment_features(features,
 						     slave->dev->features,
 						     mask);
@@ -1118,16 +1094,17 @@
 {
 	unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
 	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+	struct net_device *bond_dev = bond->dev;
+	struct list_head *iter;
+	struct slave *slave;
 	unsigned short max_hard_header_len = ETH_HLEN;
 	unsigned int gso_max_size = GSO_MAX_SIZE;
-	struct net_device *bond_dev = bond->dev;
 	u16 gso_max_segs = GSO_MAX_SEGS;
-	struct slave *slave;
 
-	if (list_empty(&bond->slave_list))
+	if (!bond_has_slaves(bond))
 		goto done;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		vlan_features = netdev_increment_features(vlan_features,
 			slave->dev->vlan_features, BOND_VLAN_FEATURES);
 
@@ -1233,15 +1210,16 @@
 }
 
 static int bond_master_upper_dev_link(struct net_device *bond_dev,
-				      struct net_device *slave_dev)
+				      struct net_device *slave_dev,
+				      struct slave *slave)
 {
 	int err;
 
-	err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+	err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
 	if (err)
 		return err;
 	slave_dev->flags |= IFF_SLAVE;
-	rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+	rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
 	return 0;
 }
 
@@ -1250,7 +1228,7 @@
 {
 	netdev_upper_dev_unlink(slave_dev, bond_dev);
 	slave_dev->flags &= ~IFF_SLAVE;
-	rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+	rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
 }
 
 /* enslave device <slave> to bond device <master> */
@@ -1258,7 +1236,7 @@
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-	struct slave *new_slave = NULL;
+	struct slave *new_slave = NULL, *prev_slave;
 	struct sockaddr addr;
 	int link_reporting;
 	int res = 0, i;
@@ -1313,7 +1291,7 @@
 	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
 	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
 	 */
-	if (list_empty(&bond->slave_list)) {
+	if (!bond_has_slaves(bond)) {
 		if (bond_dev->type != slave_dev->type) {
 			pr_debug("%s: change device type from %d to %d\n",
 				 bond_dev->name,
@@ -1352,7 +1330,7 @@
 	}
 
 	if (slave_ops->ndo_set_mac_address == NULL) {
-		if (list_empty(&bond->slave_list)) {
+		if (!bond_has_slaves(bond)) {
 			pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
 				   bond_dev->name);
 			bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1368,7 +1346,7 @@
 
 	/* If this is the first slave, then we need to set the master's hardware
 	 * address to be the same as the slave's. */
-	if (list_empty(&bond->slave_list) &&
+	if (!bond_has_slaves(bond) &&
 	    bond->dev->addr_assign_type == NET_ADDR_RANDOM)
 		bond_set_dev_addr(bond->dev, slave_dev);
 
@@ -1377,7 +1355,6 @@
 		res = -ENOMEM;
 		goto err_undo_flags;
 	}
-	INIT_LIST_HEAD(&new_slave->list);
 	/*
 	 * Set the new_slave's queue_id to be zero.  Queue ID mapping
 	 * is set via sysfs or module option if desired.
@@ -1413,17 +1390,11 @@
 		}
 	}
 
-	res = bond_master_upper_dev_link(bond_dev, slave_dev);
-	if (res) {
-		pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
-		goto err_restore_mac;
-	}
-
 	/* open the slave since the application closed it */
 	res = dev_open(slave_dev);
 	if (res) {
 		pr_debug("Opening slave %s failed\n", slave_dev->name);
-		goto err_unset_master;
+		goto err_restore_mac;
 	}
 
 	new_slave->bond = bond;
@@ -1479,21 +1450,13 @@
 		goto err_close;
 	}
 
-	write_lock_bh(&bond->lock);
-
-	bond_attach_slave(bond, new_slave);
+	prev_slave = bond_last_slave(bond);
 
 	new_slave->delay = 0;
 	new_slave->link_failure_count = 0;
 
-	write_unlock_bh(&bond->lock);
-
-	bond_compute_features(bond);
-
 	bond_update_speed_duplex(new_slave);
 
-	read_lock(&bond->lock);
-
 	new_slave->last_arp_rx = jiffies -
 		(msecs_to_jiffies(bond->params.arp_interval) + 1);
 	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
@@ -1554,12 +1517,9 @@
 		}
 	}
 
-	write_lock_bh(&bond->curr_slave_lock);
-
 	switch (bond->params.mode) {
 	case BOND_MODE_ACTIVEBACKUP:
 		bond_set_slave_inactive_flags(new_slave);
-		bond_select_active_slave(bond);
 		break;
 	case BOND_MODE_8023AD:
 		/* in 802.3ad mode, the internal mechanism
@@ -1568,16 +1528,13 @@
 		 */
 		bond_set_slave_inactive_flags(new_slave);
 		/* if this is the first slave */
-		if (bond_first_slave(bond) == new_slave) {
+		if (!prev_slave) {
 			SLAVE_AD_INFO(new_slave).id = 1;
 			/* Initialize AD with the number of times that the AD timer is called in 1 second
 			 * can be called only after the mac address of the bond is set
 			 */
 			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
 		} else {
-			struct slave *prev_slave;
-
-			prev_slave = bond_prev_slave(bond, new_slave);
 			SLAVE_AD_INFO(new_slave).id =
 				SLAVE_AD_INFO(prev_slave).id + 1;
 		}
@@ -1588,7 +1545,6 @@
 	case BOND_MODE_ALB:
 		bond_set_active_slave(new_slave);
 		bond_set_slave_inactive_flags(new_slave);
-		bond_select_active_slave(bond);
 		break;
 	default:
 		pr_debug("This slave is always active in trunk mode\n");
@@ -1606,10 +1562,6 @@
 		break;
 	} /* switch(bond_mode) */
 
-	write_unlock_bh(&bond->curr_slave_lock);
-
-	bond_set_carrier(bond);
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	slave_dev->npinfo = bond->dev->npinfo;
 	if (slave_dev->npinfo) {
@@ -1624,17 +1576,29 @@
 	}
 #endif
 
-	read_unlock(&bond->lock);
-
-	res = bond_create_slave_symlinks(bond_dev, slave_dev);
-	if (res)
-		goto err_detach;
-
 	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
 					 new_slave);
 	if (res) {
 		pr_debug("Error %d calling netdev_rx_handler_register\n", res);
-		goto err_dest_symlinks;
+		goto err_detach;
+	}
+
+	res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
+	if (res) {
+		pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+		goto err_unregister;
+	}
+
+	bond->slave_cnt++;
+	bond_compute_features(bond);
+	bond_set_carrier(bond);
+
+	if (USES_PRIMARY(bond->params.mode)) {
+		read_lock(&bond->lock);
+		write_lock_bh(&bond->curr_slave_lock);
+		bond_select_active_slave(bond);
+		write_unlock_bh(&bond->curr_slave_lock);
+		read_unlock(&bond->lock);
 	}
 
 	pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1646,8 +1610,8 @@
 	return 0;
 
 /* Undo stages on error */
-err_dest_symlinks:
-	bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_unregister:
+	netdev_rx_handler_unregister(slave_dev);
 
 err_detach:
 	if (!USES_PRIMARY(bond->params.mode))
@@ -1655,7 +1619,6 @@
 
 	vlan_vids_del_by_dev(slave_dev, bond_dev);
 	write_lock_bh(&bond->lock);
-	bond_detach_slave(bond, new_slave);
 	if (bond->primary_slave == new_slave)
 		bond->primary_slave = NULL;
 	if (bond->curr_active_slave == new_slave) {
@@ -1675,9 +1638,6 @@
 	slave_dev->priv_flags &= ~IFF_BONDING;
 	dev_close(slave_dev);
 
-err_unset_master:
-	bond_upper_dev_unlink(bond_dev, slave_dev);
-
 err_restore_mac:
 	if (!bond->params.fail_over_mac) {
 		/* XXX TODO - fom follow mode needs to change master's
@@ -1696,9 +1656,8 @@
 	kfree(new_slave);
 
 err_undo_flags:
-	bond_compute_features(bond);
 	/* Enslave of first slave has failed and we need to fix master's mac */
-	if (list_empty(&bond->slave_list) &&
+	if (!bond_has_slaves(bond) &&
 	    ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
 		eth_hw_addr_random(bond_dev);
 
@@ -1749,6 +1708,11 @@
 	}
 
 	write_unlock_bh(&bond->lock);
+
+	/* release the slave from its bond */
+	bond->slave_cnt--;
+
+	bond_upper_dev_unlink(bond_dev, slave_dev);
 	/* unregister rx_handler early so bond_handle_frame wouldn't be called
 	 * for this slave anymore.
 	 */
@@ -1772,12 +1736,9 @@
 
 	bond->current_arp_slave = NULL;
 
-	/* release the slave from its bond */
-	bond_detach_slave(bond, slave);
-
 	if (!all && !bond->params.fail_over_mac) {
 		if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
-		    !list_empty(&bond->slave_list))
+		    bond_has_slaves(bond))
 			pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
 				   bond_dev->name, slave_dev->name,
 				   slave->perm_hwaddr,
@@ -1820,7 +1781,7 @@
 		write_lock_bh(&bond->lock);
 	}
 
-	if (list_empty(&bond->slave_list)) {
+	if (!bond_has_slaves(bond)) {
 		bond_set_carrier(bond);
 		eth_hw_addr_random(bond_dev);
 
@@ -1836,7 +1797,7 @@
 	unblock_netpoll_tx();
 	synchronize_rcu();
 
-	if (list_empty(&bond->slave_list)) {
+	if (!bond_has_slaves(bond)) {
 		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
 		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
 	}
@@ -1848,8 +1809,6 @@
 			bond_dev->name, slave_dev->name, bond_dev->name);
 
 	/* must do this from outside any spinlocks */
-	bond_destroy_slave_symlinks(bond_dev, slave_dev);
-
 	vlan_vids_del_by_dev(slave_dev, bond_dev);
 
 	/* If the mode USES_PRIMARY, then this cases was handled above by
@@ -1873,8 +1832,6 @@
 		bond_hw_addr_flush(bond_dev, slave_dev);
 	}
 
-	bond_upper_dev_unlink(bond_dev, slave_dev);
-
 	slave_disable_netpoll(slave);
 
 	/* close slave before restoring its mac address */
@@ -1913,7 +1870,7 @@
 	int ret;
 
 	ret = bond_release(bond_dev, slave_dev);
-	if (ret == 0 && list_empty(&bond->slave_list)) {
+	if (ret == 0 && !bond_has_slaves(bond)) {
 		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
 		pr_info("%s: destroying bond %s.\n",
 			bond_dev->name, bond_dev->name);
@@ -1922,61 +1879,6 @@
 	return ret;
 }
 
-/*
- * This function changes the active slave to slave <slave_dev>.
- * It returns -EINVAL in the following cases.
- *  - <slave_dev> is not found in the list.
- *  - There is not active slave now.
- *  - <slave_dev> is already active.
- *  - The link state of <slave_dev> is not BOND_LINK_UP.
- *  - <slave_dev> is not running.
- * In these cases, this function does nothing.
- * In the other cases, current_slave pointer is changed and 0 is returned.
- */
-static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
-{
-	struct bonding *bond = netdev_priv(bond_dev);
-	struct slave *old_active = NULL;
-	struct slave *new_active = NULL;
-	int res = 0;
-
-	if (!USES_PRIMARY(bond->params.mode))
-		return -EINVAL;
-
-	/* Verify that bond_dev is indeed the master of slave_dev */
-	if (!(slave_dev->flags & IFF_SLAVE) ||
-	    !netdev_has_upper_dev(slave_dev, bond_dev))
-		return -EINVAL;
-
-	read_lock(&bond->lock);
-
-	old_active = bond->curr_active_slave;
-	new_active = bond_get_slave_by_dev(bond, slave_dev);
-	/*
-	 * Changing to the current active: do nothing; return success.
-	 */
-	if (new_active && new_active == old_active) {
-		read_unlock(&bond->lock);
-		return 0;
-	}
-
-	if (new_active &&
-	    old_active &&
-	    new_active->link == BOND_LINK_UP &&
-	    IS_UP(new_active->dev)) {
-		block_netpoll_tx();
-		write_lock_bh(&bond->curr_slave_lock);
-		bond_change_active_slave(bond, new_active);
-		write_unlock_bh(&bond->curr_slave_lock);
-		unblock_netpoll_tx();
-	} else
-		res = -EINVAL;
-
-	read_unlock(&bond->lock);
-
-	return res;
-}
-
 static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
@@ -1994,11 +1896,12 @@
 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct list_head *iter;
 	int i = 0, res = -ENODEV;
 	struct slave *slave;
 
 	read_lock(&bond->lock);
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		if (i++ == (int)info->slave_id) {
 			res = 0;
 			strcpy(info->slave_name, slave->dev->name);
@@ -2019,12 +1922,13 @@
 static int bond_miimon_inspect(struct bonding *bond)
 {
 	int link_state, commit = 0;
+	struct list_head *iter;
 	struct slave *slave;
 	bool ignore_updelay;
 
 	ignore_updelay = !bond->curr_active_slave ? true : false;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		slave->new_link = BOND_LINK_NOCHANGE;
 
 		link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2118,9 +2022,10 @@
 
 static void bond_miimon_commit(struct bonding *bond)
 {
+	struct list_head *iter;
 	struct slave *slave;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		switch (slave->new_link) {
 		case BOND_LINK_NOCHANGE:
 			continue;
@@ -2225,7 +2130,7 @@
 
 	delay = msecs_to_jiffies(bond->params.miimon);
 
-	if (list_empty(&bond->slave_list))
+	if (!bond_has_slaves(bond))
 		goto re_arm;
 
 	should_notify_peers = bond_should_notify_peers(bond);
@@ -2274,7 +2179,7 @@
 		return true;
 
 	rcu_read_lock();
-	netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+	netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
 		if (ip == bond_confirm_addr(upper, 0, ip)) {
 			ret = true;
 			break;
@@ -2349,10 +2254,12 @@
 		 *
 		 * TODO: QinQ?
 		 */
-		netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+		netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
+						  vlan_iter) {
 			if (!is_vlan_dev(vlan_upper))
 				continue;
-			netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+			netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
+							  iter) {
 				if (upper == rt->dst.dev) {
 					vlan_id = vlan_dev_vlan_id(vlan_upper);
 					rcu_read_unlock();
@@ -2365,7 +2272,7 @@
 		 * our upper vlans, then just search for any dev that
 		 * matches, and in case it's a vlan - save the id
 		 */
-		netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+		netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
 			if (upper == rt->dst.dev) {
 				/* if it's a vlan - get its VID */
 				if (is_vlan_dev(upper))
@@ -2512,11 +2419,12 @@
 	struct bonding *bond = container_of(work, struct bonding,
 					    arp_work.work);
 	struct slave *slave, *oldcurrent;
+	struct list_head *iter;
 	int do_failover = 0;
 
 	read_lock(&bond->lock);
 
-	if (list_empty(&bond->slave_list))
+	if (!bond_has_slaves(bond))
 		goto re_arm;
 
 	oldcurrent = bond->curr_active_slave;
@@ -2528,7 +2436,7 @@
 	 * TODO: what about up/down delay in arp mode? it wasn't here before
 	 *       so it can wait
 	 */
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		unsigned long trans_start = dev_trans_start(slave->dev);
 
 		if (slave->link != BOND_LINK_UP) {
@@ -2619,10 +2527,11 @@
 static int bond_ab_arp_inspect(struct bonding *bond)
 {
 	unsigned long trans_start, last_rx;
+	struct list_head *iter;
 	struct slave *slave;
 	int commit = 0;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		slave->new_link = BOND_LINK_NOCHANGE;
 		last_rx = slave_last_rx(bond, slave);
 
@@ -2689,9 +2598,10 @@
 static void bond_ab_arp_commit(struct bonding *bond)
 {
 	unsigned long trans_start;
+	struct list_head *iter;
 	struct slave *slave;
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		switch (slave->new_link) {
 		case BOND_LINK_NOCHANGE:
 			continue;
@@ -2762,8 +2672,9 @@
  */
 static void bond_ab_arp_probe(struct bonding *bond)
 {
-	struct slave *slave, *next_slave;
-	int i;
+	struct slave *slave, *before = NULL, *new_slave = NULL;
+	struct list_head *iter;
+	bool found = false;
 
 	read_lock(&bond->curr_slave_lock);
 
@@ -2793,18 +2704,12 @@
 
 	bond_set_slave_inactive_flags(bond->current_arp_slave);
 
-	/* search for next candidate */
-	next_slave = bond_next_slave(bond, bond->current_arp_slave);
-	bond_for_each_slave_from(bond, slave, i, next_slave) {
-		if (IS_UP(slave->dev)) {
-			slave->link = BOND_LINK_BACK;
-			bond_set_slave_active_flags(slave);
-			bond_arp_send_all(bond, slave);
-			slave->jiffies = jiffies;
-			bond->current_arp_slave = slave;
-			break;
-		}
+	bond_for_each_slave(bond, slave, iter) {
+		if (!found && !before && IS_UP(slave->dev))
+			before = slave;
 
+		if (found && !new_slave && IS_UP(slave->dev))
+			new_slave = slave;
 		/* if the link state is up at this point, we
 		 * mark it down - this can happen if we have
 		 * simultaneous link failures and
@@ -2812,7 +2717,7 @@
 		 * one the current slave so it is still marked
 		 * up when it is actually down
 		 */
-		if (slave->link == BOND_LINK_UP) {
+		if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
 			slave->link = BOND_LINK_DOWN;
 			if (slave->link_failure_count < UINT_MAX)
 				slave->link_failure_count++;
@@ -2822,7 +2727,22 @@
 			pr_info("%s: backup interface %s is now down.\n",
 				bond->dev->name, slave->dev->name);
 		}
+		if (slave == bond->current_arp_slave)
+			found = true;
 	}
+
+	if (!new_slave && before)
+		new_slave = before;
+
+	if (!new_slave)
+		return;
+
+	new_slave->link = BOND_LINK_BACK;
+	bond_set_slave_active_flags(new_slave);
+	bond_arp_send_all(bond, new_slave);
+	new_slave->jiffies = jiffies;
+	bond->current_arp_slave = new_slave;
+
 }
 
 void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2836,7 +2756,7 @@
 
 	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
 
-	if (list_empty(&bond->slave_list))
+	if (!bond_has_slaves(bond))
 		goto re_arm;
 
 	should_notify_peers = bond_should_notify_peers(bond);
@@ -3033,99 +2953,85 @@
 
 /*---------------------------- Hashing Policies -----------------------------*/
 
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+/* L2 hash helper */
+static inline u32 bond_eth_hash(struct sk_buff *skb)
 {
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 
 	if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
-		return (data->h_dest[5] ^ data->h_source[5]) % count;
+		return data->h_dest[5] ^ data->h_source[5];
 
 	return 0;
 }
 
-/*
- * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+			      struct flow_keys *fk)
 {
-	const struct ethhdr *data;
+	const struct ipv6hdr *iph6;
 	const struct iphdr *iph;
-	const struct ipv6hdr *ipv6h;
-	u32 v6hash;
-	const __be32 *s, *d;
+	int noff, proto = -1;
 
-	if (skb->protocol == htons(ETH_P_IP) &&
-	    pskb_network_may_pull(skb, sizeof(*iph))) {
+	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
+		return skb_flow_dissect(skb, fk);
+
+	fk->ports = 0;
+	noff = skb_network_offset(skb);
+	if (skb->protocol == htons(ETH_P_IP)) {
+		if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+			return false;
 		iph = ip_hdr(skb);
-		data = (struct ethhdr *)skb->data;
-		return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
-			(data->h_dest[5] ^ data->h_source[5])) % count;
-	} else if (skb->protocol == htons(ETH_P_IPV6) &&
-		   pskb_network_may_pull(skb, sizeof(*ipv6h))) {
-		ipv6h = ipv6_hdr(skb);
-		data = (struct ethhdr *)skb->data;
-		s = &ipv6h->saddr.s6_addr32[0];
-		d = &ipv6h->daddr.s6_addr32[0];
-		v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
-		v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
-		return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
+		fk->src = iph->saddr;
+		fk->dst = iph->daddr;
+		noff += iph->ihl << 2;
+		if (!ip_is_fragment(iph))
+			proto = iph->protocol;
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+			return false;
+		iph6 = ipv6_hdr(skb);
+		fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
+		fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+		noff += sizeof(*iph6);
+		proto = iph6->nexthdr;
+	} else {
+		return false;
 	}
+	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
+		fk->ports = skb_flow_get_ports(skb, noff, proto);
 
-	return bond_xmit_hash_policy_l2(skb, count);
+	return true;
 }
 
-/*
- * Hash for the output device based upon layer 3 and layer 4 data. If
- * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
- * altogether not IP, fall back on bond_xmit_hash_policy_l2()
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ * @count: modulo value
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ * which will be reduced modulo count before returning.
  */
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
 {
-	u32 layer4_xor = 0;
-	const struct iphdr *iph;
-	const struct ipv6hdr *ipv6h;
-	const __be32 *s, *d;
-	const __be16 *l4 = NULL;
-	__be16 _l4[2];
-	int noff = skb_network_offset(skb);
-	int poff;
+	struct flow_keys flow;
+	u32 hash;
 
-	if (skb->protocol == htons(ETH_P_IP) &&
-	    pskb_may_pull(skb, noff + sizeof(*iph))) {
-		iph = ip_hdr(skb);
-		poff = proto_ports_offset(iph->protocol);
+	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+	    !bond_flow_dissect(bond, skb, &flow))
+		return bond_eth_hash(skb) % count;
 
-		if (!ip_is_fragment(iph) && poff >= 0) {
-			l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
-						sizeof(_l4), &_l4);
-			if (l4)
-				layer4_xor = ntohs(l4[0] ^ l4[1]);
-		}
-		return (layer4_xor ^
-			((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-	} else if (skb->protocol == htons(ETH_P_IPV6) &&
-		   pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
-		ipv6h = ipv6_hdr(skb);
-		poff = proto_ports_offset(ipv6h->nexthdr);
-		if (poff >= 0) {
-			l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
-						sizeof(_l4), &_l4);
-			if (l4)
-				layer4_xor = ntohs(l4[0] ^ l4[1]);
-		}
-		s = &ipv6h->saddr.s6_addr32[0];
-		d = &ipv6h->daddr.s6_addr32[0];
-		layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
-		layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
-			       (layer4_xor >> 8);
-		return layer4_xor % count;
-	}
+	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
+	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+		hash = bond_eth_hash(skb);
+	else
+		hash = (__force u32)flow.ports;
+	hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+	hash ^= (hash >> 16);
+	hash ^= (hash >> 8);
 
-	return bond_xmit_hash_policy_l2(skb, count);
+	return hash % count;
 }
 
 /*-------------------------- Device entry points ----------------------------*/
@@ -3155,13 +3061,14 @@
 static int bond_open(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct list_head *iter;
 	struct slave *slave;
 
 	/* reset slave->backup and slave->inactive */
 	read_lock(&bond->lock);
-	if (!list_empty(&bond->slave_list)) {
+	if (bond_has_slaves(bond)) {
 		read_lock(&bond->curr_slave_lock);
-		bond_for_each_slave(bond, slave) {
+		bond_for_each_slave(bond, slave, iter) {
 			if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
 				&& (slave != bond->curr_active_slave)) {
 				bond_set_slave_inactive_flags(slave);
@@ -3221,12 +3128,13 @@
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct rtnl_link_stats64 temp;
+	struct list_head *iter;
 	struct slave *slave;
 
 	memset(stats, 0, sizeof(*stats));
 
 	read_lock_bh(&bond->lock);
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		const struct rtnl_link_stats64 *sstats =
 			dev_get_stats(slave->dev, &temp);
 
@@ -3263,6 +3171,7 @@
 
 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
 {
+	struct bonding *bond = netdev_priv(bond_dev);
 	struct net_device *slave_dev = NULL;
 	struct ifbond k_binfo;
 	struct ifbond __user *u_binfo = NULL;
@@ -3293,7 +3202,6 @@
 
 
 		if (mii->reg_num == 1) {
-			struct bonding *bond = netdev_priv(bond_dev);
 			mii->val_out = 0;
 			read_lock(&bond->lock);
 			read_lock(&bond->curr_slave_lock);
@@ -3365,7 +3273,7 @@
 			break;
 		case BOND_CHANGE_ACTIVE_OLD:
 		case SIOCBONDCHANGEACTIVE:
-			res = bond_ioctl_change_active(bond_dev, slave_dev);
+			res = bond_option_active_slave_set(bond, slave_dev);
 			break;
 		default:
 			res = -EOPNOTSUPP;
@@ -3393,22 +3301,24 @@
 static void bond_set_rx_mode(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct list_head *iter;
 	struct slave *slave;
 
-	ASSERT_RTNL();
 
+	rcu_read_lock();
 	if (USES_PRIMARY(bond->params.mode)) {
-		slave = rtnl_dereference(bond->curr_active_slave);
+		slave = rcu_dereference(bond->curr_active_slave);
 		if (slave) {
 			dev_uc_sync(slave->dev, bond_dev);
 			dev_mc_sync(slave->dev, bond_dev);
 		}
 	} else {
-		bond_for_each_slave(bond, slave) {
+		bond_for_each_slave_rcu(bond, slave, iter) {
 			dev_uc_sync_multiple(slave->dev, bond_dev);
 			dev_mc_sync_multiple(slave->dev, bond_dev);
 		}
 	}
+	rcu_read_unlock();
 }
 
 static int bond_neigh_init(struct neighbour *n)
@@ -3471,7 +3381,8 @@
 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
-	struct slave *slave;
+	struct slave *slave, *rollback_slave;
+	struct list_head *iter;
 	int res = 0;
 
 	pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
@@ -3492,10 +3403,9 @@
 	 * call to the base driver.
 	 */
 
-	bond_for_each_slave(bond, slave) {
-		pr_debug("s %p s->p %p c_m %p\n",
+	bond_for_each_slave(bond, slave, iter) {
+		pr_debug("s %p c_m %p\n",
 			 slave,
-			 bond_prev_slave(bond, slave),
 			 slave->dev->netdev_ops->ndo_change_mtu);
 
 		res = dev_set_mtu(slave->dev, new_mtu);
@@ -3520,13 +3430,16 @@
 
 unwind:
 	/* unwind from head to the slave that failed */
-	bond_for_each_slave_continue_reverse(bond, slave) {
+	bond_for_each_slave(bond, rollback_slave, iter) {
 		int tmp_res;
 
-		tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
+		if (rollback_slave == slave)
+			break;
+
+		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
 		if (tmp_res) {
 			pr_debug("unwind err %d dev %s\n",
-				 tmp_res, slave->dev->name);
+				 tmp_res, rollback_slave->dev->name);
 		}
 	}
 
@@ -3543,8 +3456,9 @@
 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct slave *slave, *rollback_slave;
 	struct sockaddr *sa = addr, tmp_sa;
-	struct slave *slave;
+	struct list_head *iter;
 	int res = 0;
 
 	if (bond->params.mode == BOND_MODE_ALB)
@@ -3578,7 +3492,7 @@
 	 * call to the base driver.
 	 */
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
 		pr_debug("slave %p %s\n", slave, slave->dev->name);
 
@@ -3610,13 +3524,16 @@
 	tmp_sa.sa_family = bond_dev->type;
 
 	/* unwind from head to the slave that failed */
-	bond_for_each_slave_continue_reverse(bond, slave) {
+	bond_for_each_slave(bond, rollback_slave, iter) {
 		int tmp_res;
 
-		tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
+		if (rollback_slave == slave)
+			break;
+
+		tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
 		if (tmp_res) {
 			pr_debug("unwind err %d dev %s\n",
-				 tmp_res, slave->dev->name);
+				 tmp_res, rollback_slave->dev->name);
 		}
 	}
 
@@ -3635,11 +3552,12 @@
  */
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 {
+	struct list_head *iter;
 	struct slave *slave;
 	int i = slave_id;
 
 	/* Here we start from the slave with slave_id */
-	bond_for_each_slave_rcu(bond, slave) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		if (--i < 0) {
 			if (slave_can_tx(slave)) {
 				bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -3650,7 +3568,7 @@
 
 	/* Here we start from the first slave up to slave_id */
 	i = slave_id;
-	bond_for_each_slave_rcu(bond, slave) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		if (--i < 0)
 			break;
 		if (slave_can_tx(slave)) {
@@ -3662,14 +3580,44 @@
 	kfree_skb(skb);
 }
 
+/**
+ * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
+ * @bond: bonding device to use
+ *
+ * Based on the value of the bonding device's packets_per_slave parameter
+ * this function generates a slave id, which is usually used as the next
+ * slave to transmit through.
+ */
+static u32 bond_rr_gen_slave_id(struct bonding *bond)
+{
+	int packets_per_slave = bond->params.packets_per_slave;
+	u32 slave_id;
+
+	switch (packets_per_slave) {
+	case 0:
+		slave_id = prandom_u32();
+		break;
+	case 1:
+		slave_id = bond->rr_tx_counter;
+		break;
+	default:
+		slave_id = reciprocal_divide(bond->rr_tx_counter,
+					     packets_per_slave);
+		break;
+	}
+	bond->rr_tx_counter++;
+
+	return slave_id;
+}
+
 static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct iphdr *iph = ip_hdr(skb);
 	struct slave *slave;
+	u32 slave_id;
 
-	/*
-	 * Start with the curr_active_slave that joined the bond as the
+	/* Start with the curr_active_slave that joined the bond as the
 	 * default for sending IGMP traffic.  For failover purposes one
 	 * needs to maintain some consistency for the interface that will
 	 * send the join/membership reports.  The curr_active_slave found
@@ -3682,8 +3630,8 @@
 		else
 			bond_xmit_slave_id(bond, skb, 0);
 	} else {
-		bond_xmit_slave_id(bond, skb,
-				   bond->rr_tx_counter++ % bond->slave_cnt);
+		slave_id = bond_rr_gen_slave_id(bond);
+		bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
 	}
 
 	return NETDEV_TX_OK;
@@ -3707,8 +3655,7 @@
 	return NETDEV_TX_OK;
 }
 
-/*
- * In bond_xmit_xor() , we determine the output device by using a pre-
+/* In bond_xmit_xor() , we determine the output device by using a pre-
  * determined xmit_hash_policy(), If the selected device is not enabled,
  * find the next active slave.
  */
@@ -3716,8 +3663,7 @@
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 
-	bond_xmit_slave_id(bond, skb,
-			   bond->xmit_hash_policy(skb, bond->slave_cnt));
+	bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
 
 	return NETDEV_TX_OK;
 }
@@ -3727,8 +3673,9 @@
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct slave *slave = NULL;
+	struct list_head *iter;
 
-	bond_for_each_slave_rcu(bond, slave) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		if (bond_is_last_slave(bond, slave))
 			break;
 		if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
@@ -3753,22 +3700,6 @@
 
 /*------------------------- Device initialization ---------------------------*/
 
-static void bond_set_xmit_hash_policy(struct bonding *bond)
-{
-	switch (bond->params.xmit_policy) {
-	case BOND_XMIT_POLICY_LAYER23:
-		bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
-		break;
-	case BOND_XMIT_POLICY_LAYER34:
-		bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
-		break;
-	case BOND_XMIT_POLICY_LAYER2:
-	default:
-		bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
-		break;
-	}
-}
-
 /*
  * Lookup the slave that corresponds to a qid
  */
@@ -3777,13 +3708,14 @@
 {
 	struct slave *slave = NULL;
 	struct slave *check_slave;
+	struct list_head *iter;
 	int res = 1;
 
 	if (!skb->queue_mapping)
 		return 1;
 
 	/* Find out if any slaves have the same mapping as this skb. */
-	bond_for_each_slave_rcu(bond, check_slave) {
+	bond_for_each_slave_rcu(bond, check_slave, iter) {
 		if (check_slave->queue_id == skb->queue_mapping) {
 			slave = check_slave;
 			break;
@@ -3869,7 +3801,7 @@
 		return NETDEV_TX_BUSY;
 
 	rcu_read_lock();
-	if (!list_empty(&bond->slave_list))
+	if (bond_has_slaves(bond))
 		ret = __bond_start_xmit(skb, dev);
 	else
 		kfree_skb(skb);
@@ -3878,43 +3810,12 @@
 	return ret;
 }
 
-/*
- * set bond mode specific net device operations
- */
-void bond_set_mode_ops(struct bonding *bond, int mode)
-{
-	struct net_device *bond_dev = bond->dev;
-
-	switch (mode) {
-	case BOND_MODE_ROUNDROBIN:
-		break;
-	case BOND_MODE_ACTIVEBACKUP:
-		break;
-	case BOND_MODE_XOR:
-		bond_set_xmit_hash_policy(bond);
-		break;
-	case BOND_MODE_BROADCAST:
-		break;
-	case BOND_MODE_8023AD:
-		bond_set_xmit_hash_policy(bond);
-		break;
-	case BOND_MODE_ALB:
-		/* FALLTHRU */
-	case BOND_MODE_TLB:
-		break;
-	default:
-		/* Should never happen, mode already checked */
-		pr_err("%s: Error: Unknown bonding mode %d\n",
-		       bond_dev->name, mode);
-		break;
-	}
-}
-
 static int bond_ethtool_get_settings(struct net_device *bond_dev,
 				     struct ethtool_cmd *ecmd)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	unsigned long speed = 0;
+	struct list_head *iter;
 	struct slave *slave;
 
 	ecmd->duplex = DUPLEX_UNKNOWN;
@@ -3926,7 +3827,7 @@
 	 * this is an accurate maximum.
 	 */
 	read_lock(&bond->lock);
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		if (SLAVE_IS_OK(slave)) {
 			if (slave->speed != SPEED_UNKNOWN)
 				speed += slave->speed;
@@ -3994,14 +3895,13 @@
 	free_netdev(bond_dev);
 }
 
-static void bond_setup(struct net_device *bond_dev)
+void bond_setup(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 
 	/* initialize rwlocks */
 	rwlock_init(&bond->lock);
 	rwlock_init(&bond->curr_slave_lock);
-	INIT_LIST_HEAD(&bond->slave_list);
 	bond->params = bonding_defaults;
 
 	/* Initialize pointers */
@@ -4011,7 +3911,6 @@
 	ether_setup(bond_dev);
 	bond_dev->netdev_ops = &bond_netdev_ops;
 	bond_dev->ethtool_ops = &bond_ethtool_ops;
-	bond_set_mode_ops(bond, bond->params.mode);
 
 	bond_dev->destructor = bond_destructor;
 
@@ -4057,12 +3956,13 @@
 static void bond_uninit(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
-	struct slave *slave, *tmp_slave;
+	struct list_head *iter;
+	struct slave *slave;
 
 	bond_netpoll_cleanup(bond_dev);
 
 	/* Release the bonded slaves */
-	list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+	bond_for_each_slave(bond, slave, iter)
 		__bond_release_one(bond_dev, slave->dev, true);
 	pr_info("%s: released all slaves\n", bond_dev->name);
 
@@ -4235,6 +4135,12 @@
 		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
 	}
 
+	if (packets_per_slave < 0 || packets_per_slave > USHRT_MAX) {
+		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
+			packets_per_slave, USHRT_MAX);
+		packets_per_slave = 1;
+	}
+
 	/* reset values for TLB/ALB */
 	if ((bond_mode == BOND_MODE_TLB) ||
 	    (bond_mode == BOND_MODE_ALB)) {
@@ -4424,7 +4330,10 @@
 	params->resend_igmp = resend_igmp;
 	params->min_links = min_links;
 	params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
-
+	if (packets_per_slave > 1)
+		params->packets_per_slave = reciprocal_value(packets_per_slave);
+	else
+		params->packets_per_slave = packets_per_slave;
 	if (primary) {
 		strncpy(params->primary, primary, IFNAMSIZ);
 		params->primary[IFNAMSIZ - 1] = 0;
@@ -4495,32 +4404,11 @@
 	return 0;
 }
 
-static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
-{
-	if (tb[IFLA_ADDRESS]) {
-		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
-			return -EINVAL;
-		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
-			return -EADDRNOTAVAIL;
-	}
-	return 0;
-}
-
-static unsigned int bond_get_num_tx_queues(void)
+unsigned int bond_get_num_tx_queues(void)
 {
 	return tx_queues;
 }
 
-static struct rtnl_link_ops bond_link_ops __read_mostly = {
-	.kind			= "bond",
-	.priv_size		= sizeof(struct bonding),
-	.setup			= bond_setup,
-	.validate		= bond_validate,
-	.get_num_tx_queues	= bond_get_num_tx_queues,
-	.get_num_rx_queues	= bond_get_num_tx_queues, /* Use the same number
-							     as for TX queues */
-};
-
 /* Create a new bond based on the specified name and bonding parameters.
  * If name is NULL, obtain a suitable "bond%d" name for us.
  * Caller must NOT hold rtnl_lock; we need to release it here before we
@@ -4607,7 +4495,7 @@
 	if (res)
 		goto out;
 
-	res = rtnl_link_register(&bond_link_ops);
+	res = bond_netlink_init();
 	if (res)
 		goto err_link;
 
@@ -4623,7 +4511,7 @@
 out:
 	return res;
 err:
-	rtnl_link_unregister(&bond_link_ops);
+	bond_netlink_fini();
 err_link:
 	unregister_pernet_subsys(&bond_net_ops);
 	goto out;
@@ -4636,7 +4524,7 @@
 
 	bond_destroy_debugfs();
 
-	rtnl_link_unregister(&bond_link_ops);
+	bond_netlink_fini();
 	unregister_pernet_subsys(&bond_net_ops);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4653,4 +4541,3 @@
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
-MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
new file mode 100644
index 0000000..40e7b1c
--- /dev/null
+++ b/drivers/net/bonding/bond_netlink.c
@@ -0,0 +1,131 @@
+/*
+ * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_link.h>
+#include <linux/if_ether.h>
+#include <net/netlink.h>
+#include <net/rtnetlink.h>
+#include "bonding.h"
+
+static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+	[IFLA_BOND_MODE]		= { .type = NLA_U8 },
+	[IFLA_BOND_ACTIVE_SLAVE]	= { .type = NLA_U32 },
+};
+
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+	return 0;
+}
+
+static int bond_changelink(struct net_device *bond_dev,
+			   struct nlattr *tb[], struct nlattr *data[])
+{
+	struct bonding *bond = netdev_priv(bond_dev);
+	int err;
+
+	if (data && data[IFLA_BOND_MODE]) {
+		int mode = nla_get_u8(data[IFLA_BOND_MODE]);
+
+		err = bond_option_mode_set(bond, mode);
+		if (err)
+			return err;
+	}
+	if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+		int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
+		struct net_device *slave_dev;
+
+		if (ifindex == 0) {
+			slave_dev = NULL;
+		} else {
+			slave_dev = __dev_get_by_index(dev_net(bond_dev),
+						       ifindex);
+			if (!slave_dev)
+				return -ENODEV;
+		}
+		err = bond_option_active_slave_set(bond, slave_dev);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
+			struct nlattr *tb[], struct nlattr *data[])
+{
+	int err;
+
+	err = bond_changelink(bond_dev, tb, data);
+	if (err < 0)
+		return err;
+
+	return register_netdevice(bond_dev);
+}
+
+static size_t bond_get_size(const struct net_device *bond_dev)
+{
+	return nla_total_size(sizeof(u8)) +	/* IFLA_BOND_MODE */
+		nla_total_size(sizeof(u32));	/* IFLA_BOND_ACTIVE_SLAVE */
+}
+
+static int bond_fill_info(struct sk_buff *skb,
+			  const struct net_device *bond_dev)
+{
+	struct bonding *bond = netdev_priv(bond_dev);
+	struct net_device *slave_dev = bond_option_active_slave_get(bond);
+
+	if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
+	    (slave_dev &&
+	     nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+struct rtnl_link_ops bond_link_ops __read_mostly = {
+	.kind			= "bond",
+	.priv_size		= sizeof(struct bonding),
+	.setup			= bond_setup,
+	.maxtype		= IFLA_BOND_MAX,
+	.policy			= bond_policy,
+	.validate		= bond_validate,
+	.newlink		= bond_newlink,
+	.changelink		= bond_changelink,
+	.get_size		= bond_get_size,
+	.fill_info		= bond_fill_info,
+	.get_num_tx_queues	= bond_get_num_tx_queues,
+	.get_num_rx_queues	= bond_get_num_tx_queues, /* Use the same number
+							     as for TX queues */
+};
+
+int __init bond_netlink_init(void)
+{
+	return rtnl_link_register(&bond_link_ops);
+}
+
+void bond_netlink_fini(void)
+{
+	rtnl_link_unregister(&bond_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
new file mode 100644
index 0000000..9a5223c
--- /dev/null
+++ b/drivers/net/bonding/bond_options.c
@@ -0,0 +1,142 @@
+/*
+ * drivers/net/bond/bond_options.c - bonding options
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/rwlock.h>
+#include <linux/rcupdate.h>
+#include "bonding.h"
+
+static bool bond_mode_is_valid(int mode)
+{
+	int i;
+
+	for (i = 0; bond_mode_tbl[i].modename; i++);
+
+	return mode >= 0 && mode < i;
+}
+
+int bond_option_mode_set(struct bonding *bond, int mode)
+{
+	if (!bond_mode_is_valid(mode)) {
+		pr_err("invalid mode value %d.\n", mode);
+		return -EINVAL;
+	}
+
+	if (bond->dev->flags & IFF_UP) {
+		pr_err("%s: unable to update mode because interface is up.\n",
+		       bond->dev->name);
+		return -EPERM;
+	}
+
+	if (bond_has_slaves(bond)) {
+		pr_err("%s: unable to update mode because bond has slaves.\n",
+			bond->dev->name);
+		return -EPERM;
+	}
+
+	if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
+		pr_err("%s: %s mode is incompatible with arp monitoring.\n",
+		       bond->dev->name, bond_mode_tbl[mode].modename);
+		return -EINVAL;
+	}
+
+	/* don't cache arp_validate between modes */
+	bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+	bond->params.mode = mode;
+	return 0;
+}
+
+static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
+							 struct slave *slave)
+{
+	return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+}
+
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+	struct slave *slave = rcu_dereference(bond->curr_active_slave);
+
+	return __bond_option_active_slave_get(bond, slave);
+}
+
+struct net_device *bond_option_active_slave_get(struct bonding *bond)
+{
+	return __bond_option_active_slave_get(bond, bond->curr_active_slave);
+}
+
+int bond_option_active_slave_set(struct bonding *bond,
+				 struct net_device *slave_dev)
+{
+	int ret = 0;
+
+	if (slave_dev) {
+		if (!netif_is_bond_slave(slave_dev)) {
+			pr_err("Device %s is not bonding slave.\n",
+			       slave_dev->name);
+			return -EINVAL;
+		}
+
+		if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
+			pr_err("%s: Device %s is not our slave.\n",
+			       bond->dev->name, slave_dev->name);
+			return -EINVAL;
+		}
+	}
+
+	if (!USES_PRIMARY(bond->params.mode)) {
+		pr_err("%s: Unable to change active slave; %s is in mode %d\n",
+		       bond->dev->name, bond->dev->name, bond->params.mode);
+		return -EINVAL;
+	}
+
+	block_netpoll_tx();
+	read_lock(&bond->lock);
+	write_lock_bh(&bond->curr_slave_lock);
+
+	/* check to see if we are clearing active */
+	if (!slave_dev) {
+		pr_info("%s: Clearing current active slave.\n",
+		bond->dev->name);
+		rcu_assign_pointer(bond->curr_active_slave, NULL);
+		bond_select_active_slave(bond);
+	} else {
+		struct slave *old_active = bond->curr_active_slave;
+		struct slave *new_active = bond_slave_get_rtnl(slave_dev);
+
+		BUG_ON(!new_active);
+
+		if (new_active == old_active) {
+			/* do nothing */
+			pr_info("%s: %s is already the current active slave.\n",
+				bond->dev->name, new_active->dev->name);
+		} else {
+			if (old_active && (new_active->link == BOND_LINK_UP) &&
+			    IS_UP(new_active->dev)) {
+				pr_info("%s: Setting %s as active slave.\n",
+					bond->dev->name, new_active->dev->name);
+				bond_change_active_slave(bond, new_active);
+			} else {
+				pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+				       bond->dev->name, new_active->dev->name,
+				       new_active->dev->name);
+				ret = -EINVAL;
+			}
+		}
+	}
+
+	write_unlock_bh(&bond->curr_slave_lock);
+	read_unlock(&bond->lock);
+	unblock_netpoll_tx();
+	return ret;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 20a6ee2..fb868d6 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -10,8 +10,9 @@
 	__acquires(&bond->lock)
 {
 	struct bonding *bond = seq->private;
-	loff_t off = 0;
+	struct list_head *iter;
 	struct slave *slave;
+	loff_t off = 0;
 
 	/* make sure the bond won't be taken away */
 	rcu_read_lock();
@@ -20,7 +21,7 @@
 	if (*pos == 0)
 		return SEQ_START_TOKEN;
 
-	bond_for_each_slave(bond, slave)
+	bond_for_each_slave(bond, slave, iter)
 		if (++off == *pos)
 			return slave;
 
@@ -30,17 +31,25 @@
 static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
 	struct bonding *bond = seq->private;
-	struct slave *slave = v;
+	struct list_head *iter;
+	struct slave *slave;
+	bool found = false;
 
 	++*pos;
 	if (v == SEQ_START_TOKEN)
 		return bond_first_slave(bond);
 
-	if (bond_is_last_slave(bond, slave))
+	if (bond_is_last_slave(bond, v))
 		return NULL;
-	slave = bond_next_slave(bond, slave);
 
-	return slave;
+	bond_for_each_slave(bond, slave, iter) {
+		if (found)
+			return slave;
+		if (slave == v)
+			found = true;
+	}
+
+	return NULL;
 }
 
 static void bond_info_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ec9b646..bc8fd36 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -40,6 +40,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <linux/nsproxy.h>
+#include <linux/reciprocal_div.h>
 
 #include "bonding.h"
 
@@ -159,41 +160,6 @@
 	.store = bonding_store_bonds,
 };
 
-int bond_create_slave_symlinks(struct net_device *master,
-			       struct net_device *slave)
-{
-	char linkname[IFNAMSIZ+7];
-	int ret = 0;
-
-	/* first, create a link from the slave back to the master */
-	ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
-				"master");
-	if (ret)
-		return ret;
-	/* next, create a link from the master to the slave */
-	sprintf(linkname, "slave_%s", slave->name);
-	ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
-				linkname);
-
-	/* free the master link created earlier in case of error */
-	if (ret)
-		sysfs_remove_link(&(slave->dev.kobj), "master");
-
-	return ret;
-
-}
-
-void bond_destroy_slave_symlinks(struct net_device *master,
-				 struct net_device *slave)
-{
-	char linkname[IFNAMSIZ+7];
-
-	sysfs_remove_link(&(slave->dev.kobj), "master");
-	sprintf(linkname, "slave_%s", slave->name);
-	sysfs_remove_link(&(master->dev.kobj), linkname);
-}
-
-
 /*
  * Show the slaves in the current bond.
  */
@@ -201,11 +167,14 @@
 				   struct device_attribute *attr, char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct list_head *iter;
 	struct slave *slave;
 	int res = 0;
 
-	read_lock(&bond->lock);
-	bond_for_each_slave(bond, slave) {
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	bond_for_each_slave(bond, slave, iter) {
 		if (res > (PAGE_SIZE - IFNAMSIZ)) {
 			/* not enough space for another interface name */
 			if ((PAGE_SIZE - res) > 10)
@@ -215,7 +184,9 @@
 		}
 		res += sprintf(buf + res, "%s ", slave->dev->name);
 	}
-	read_unlock(&bond->lock);
+
+	rtnl_unlock();
+
 	if (res)
 		buf[res-1] = '\n'; /* eat the leftover space */
 
@@ -304,50 +275,26 @@
 				  struct device_attribute *attr,
 				  const char *buf, size_t count)
 {
-	int new_value, ret = count;
+	int new_value, ret;
 	struct bonding *bond = to_bond(d);
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	if (bond->dev->flags & IFF_UP) {
-		pr_err("unable to update mode of %s because interface is up.\n",
-		       bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
-
-	if (!list_empty(&bond->slave_list)) {
-		pr_err("unable to update mode of %s because it has slaves.\n",
-			bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
-
 	new_value = bond_parse_parm(buf, bond_mode_tbl);
 	if (new_value < 0)  {
 		pr_err("%s: Ignoring invalid mode value %.*s.\n",
 		       bond->dev->name, (int)strlen(buf) - 1, buf);
-		ret = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
-	if ((new_value == BOND_MODE_ALB ||
-	     new_value == BOND_MODE_TLB) &&
-	    bond->params.arp_interval) {
-		pr_err("%s: %s mode is incompatible with arp monitoring.\n",
-		       bond->dev->name, bond_mode_tbl[new_value].modename);
-		ret = -EINVAL;
-		goto out;
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	ret = bond_option_mode_set(bond, new_value);
+	if (!ret) {
+		pr_info("%s: setting mode to %s (%d).\n",
+			bond->dev->name, bond_mode_tbl[new_value].modename,
+			new_value);
+		ret = count;
 	}
 
-	/* don't cache arp_validate between modes */
-	bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
-	bond->params.mode = new_value;
-	bond_set_mode_ops(bond, bond->params.mode);
-	pr_info("%s: setting mode to %s (%d).\n",
-		bond->dev->name, bond_mode_tbl[new_value].modename,
-		new_value);
-out:
 	rtnl_unlock();
 	return ret;
 }
@@ -383,7 +330,6 @@
 		ret = -EINVAL;
 	} else {
 		bond->params.xmit_policy = new_value;
-		bond_set_mode_ops(bond, bond->params.mode);
 		pr_info("%s: setting xmit hash policy to %s (%d).\n",
 			bond->dev->name,
 			xmit_hashtype_tbl[new_value].modename, new_value);
@@ -513,7 +459,7 @@
 	if (!rtnl_trylock())
 		return restart_syscall();
 
-	if (!list_empty(&bond->slave_list)) {
+	if (bond_has_slaves(bond)) {
 		pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
 		       bond->dev->name);
 		ret = -EPERM;
@@ -647,11 +593,15 @@
 					 const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
+	struct list_head *iter;
 	struct slave *slave;
 	__be32 newtarget, *targets;
 	unsigned long *targets_rx;
 	int ind, i, j, ret = -EINVAL;
 
+	if (!rtnl_trylock())
+		return restart_syscall();
+
 	targets = bond->params.arp_targets;
 	newtarget = in_aton(buf + 1);
 	/* look for adds */
@@ -679,7 +629,7 @@
 			 &newtarget);
 		/* not to race with bond_arp_rcv */
 		write_lock_bh(&bond->lock);
-		bond_for_each_slave(bond, slave)
+		bond_for_each_slave(bond, slave, iter)
 			slave->target_last_arp_rx[ind] = jiffies;
 		targets[ind] = newtarget;
 		write_unlock_bh(&bond->lock);
@@ -705,7 +655,7 @@
 			&newtarget);
 
 		write_lock_bh(&bond->lock);
-		bond_for_each_slave(bond, slave) {
+		bond_for_each_slave(bond, slave, iter) {
 			targets_rx = slave->target_last_arp_rx;
 			j = ind;
 			for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -725,6 +675,7 @@
 
 	ret = count;
 out:
+	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -1102,6 +1053,7 @@
 				     const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
+	struct list_head *iter;
 	char ifname[IFNAMSIZ];
 	struct slave *slave;
 
@@ -1129,7 +1081,7 @@
 		goto out;
 	}
 
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
 			pr_info("%s: Setting %s as primary slave.\n",
 				bond->dev->name, slave->dev->name);
@@ -1259,13 +1211,13 @@
 					 char *buf)
 {
 	struct bonding *bond = to_bond(d);
-	struct slave *curr;
+	struct net_device *slave_dev;
 	int count = 0;
 
 	rcu_read_lock();
-	curr = rcu_dereference(bond->curr_active_slave);
-	if (USES_PRIMARY(bond->params.mode) && curr)
-		count = sprintf(buf, "%s\n", curr->dev->name);
+	slave_dev = bond_option_active_slave_get_rcu(bond);
+	if (slave_dev)
+		count = sprintf(buf, "%s\n", slave_dev->name);
 	rcu_read_unlock();
 
 	return count;
@@ -1275,80 +1227,33 @@
 					  struct device_attribute *attr,
 					  const char *buf, size_t count)
 {
-	struct slave *slave, *old_active, *new_active;
+	int ret;
 	struct bonding *bond = to_bond(d);
 	char ifname[IFNAMSIZ];
+	struct net_device *dev;
 
 	if (!rtnl_trylock())
 		return restart_syscall();
 
-	old_active = new_active = NULL;
-	block_netpoll_tx();
-	read_lock(&bond->lock);
-	write_lock_bh(&bond->curr_slave_lock);
-
-	if (!USES_PRIMARY(bond->params.mode)) {
-		pr_info("%s: Unable to change active slave; %s is in mode %d\n",
-			bond->dev->name, bond->dev->name, bond->params.mode);
-		goto out;
-	}
-
 	sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
-	/* check to see if we are clearing active */
 	if (!strlen(ifname) || buf[0] == '\n') {
-		pr_info("%s: Clearing current active slave.\n",
-			bond->dev->name);
-		rcu_assign_pointer(bond->curr_active_slave, NULL);
-		bond_select_active_slave(bond);
-		goto out;
-	}
-
-	bond_for_each_slave(bond, slave) {
-		if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
-			old_active = bond->curr_active_slave;
-			new_active = slave;
-			if (new_active == old_active) {
-				/* do nothing */
-				pr_info("%s: %s is already the current"
-					" active slave.\n",
-					bond->dev->name,
-					slave->dev->name);
-				goto out;
-			} else {
-				if ((new_active) &&
-				    (old_active) &&
-				    (new_active->link == BOND_LINK_UP) &&
-				    IS_UP(new_active->dev)) {
-					pr_info("%s: Setting %s as active"
-						" slave.\n",
-						bond->dev->name,
-						slave->dev->name);
-					bond_change_active_slave(bond,
-								 new_active);
-				} else {
-					pr_info("%s: Could not set %s as"
-						" active slave; either %s is"
-						" down or the link is down.\n",
-						bond->dev->name,
-						slave->dev->name,
-						slave->dev->name);
-				}
-				goto out;
-			}
+		dev = NULL;
+	} else {
+		dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+		if (!dev) {
+			ret = -ENODEV;
+			goto out;
 		}
 	}
 
-	pr_info("%s: Unable to set %.*s as active slave.\n",
-		bond->dev->name, (int)strlen(buf) - 1, buf);
- out:
-	write_unlock_bh(&bond->curr_slave_lock);
-	read_unlock(&bond->lock);
-	unblock_netpoll_tx();
+	ret = bond_option_active_slave_set(bond, dev);
+	if (!ret)
+		ret = count;
 
+ out:
 	rtnl_unlock();
 
-	return count;
+	return ret;
 
 }
 static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
@@ -1484,14 +1389,14 @@
 				     char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct list_head *iter;
 	struct slave *slave;
 	int res = 0;
 
 	if (!rtnl_trylock())
 		return restart_syscall();
 
-	read_lock(&bond->lock);
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
 			/* not enough space for another interface_name:queue_id pair */
 			if ((PAGE_SIZE - res) > 10)
@@ -1502,9 +1407,9 @@
 		res += sprintf(buf + res, "%s:%d ",
 			       slave->dev->name, slave->queue_id);
 	}
-	read_unlock(&bond->lock);
 	if (res)
 		buf[res-1] = '\n'; /* eat the leftover space */
+
 	rtnl_unlock();
 
 	return res;
@@ -1520,6 +1425,7 @@
 {
 	struct slave *slave, *update_slave;
 	struct bonding *bond = to_bond(d);
+	struct list_head *iter;
 	u16 qid;
 	int ret = count;
 	char *delim;
@@ -1552,11 +1458,9 @@
 	if (!sdev)
 		goto err_no_cmd;
 
-	read_lock(&bond->lock);
-
 	/* Search for thes slave and check for duplicate qids */
 	update_slave = NULL;
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		if (sdev == slave->dev)
 			/*
 			 * We don't need to check the matching
@@ -1564,23 +1468,20 @@
 			 */
 			update_slave = slave;
 		else if (qid && qid == slave->queue_id) {
-			goto err_no_cmd_unlock;
+			goto err_no_cmd;
 		}
 	}
 
 	if (!update_slave)
-		goto err_no_cmd_unlock;
+		goto err_no_cmd;
 
 	/* Actually set the qids for the slave */
 	update_slave->queue_id = qid;
 
-	read_unlock(&bond->lock);
 out:
 	rtnl_unlock();
 	return ret;
 
-err_no_cmd_unlock:
-	read_unlock(&bond->lock);
 err_no_cmd:
 	pr_info("invalid input for queue_id set for %s.\n",
 		bond->dev->name);
@@ -1610,8 +1511,12 @@
 {
 	struct bonding *bond = to_bond(d);
 	int new_value, ret = count;
+	struct list_head *iter;
 	struct slave *slave;
 
+	if (!rtnl_trylock())
+		return restart_syscall();
+
 	if (sscanf(buf, "%d", &new_value) != 1) {
 		pr_err("%s: no all_slaves_active value specified.\n",
 		       bond->dev->name);
@@ -1631,8 +1536,7 @@
 		goto out;
 	}
 
-	read_lock(&bond->lock);
-	bond_for_each_slave(bond, slave) {
+	bond_for_each_slave(bond, slave, iter) {
 		if (!bond_is_active_slave(slave)) {
 			if (new_value)
 				slave->inactive = 0;
@@ -1640,8 +1544,8 @@
 				slave->inactive = 1;
 		}
 	}
-	read_unlock(&bond->lock);
 out:
+	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
@@ -1728,6 +1632,53 @@
 static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
 		   bonding_show_lp_interval, bonding_store_lp_interval);
 
+static ssize_t bonding_show_packets_per_slave(struct device *d,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct bonding *bond = to_bond(d);
+	int packets_per_slave = bond->params.packets_per_slave;
+
+	if (packets_per_slave > 1)
+		packets_per_slave = reciprocal_value(packets_per_slave);
+
+	return sprintf(buf, "%d\n", packets_per_slave);
+}
+
+static ssize_t bonding_store_packets_per_slave(struct device *d,
+					       struct device_attribute *attr,
+					       const char *buf, size_t count)
+{
+	struct bonding *bond = to_bond(d);
+	int new_value, ret = count;
+
+	if (sscanf(buf, "%d", &new_value) != 1) {
+		pr_err("%s: no packets_per_slave value specified.\n",
+		       bond->dev->name);
+		ret = -EINVAL;
+		goto out;
+	}
+	if (new_value < 0 || new_value > USHRT_MAX) {
+		pr_err("%s: packets_per_slave must be between 0 and %u\n",
+		       bond->dev->name, USHRT_MAX);
+		ret = -EINVAL;
+		goto out;
+	}
+	if (bond->params.mode != BOND_MODE_ROUNDROBIN)
+		pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
+			bond->dev->name);
+	if (new_value > 1)
+		bond->params.packets_per_slave = reciprocal_value(new_value);
+	else
+		bond->params.packets_per_slave = new_value;
+out:
+	return ret;
+}
+
+static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
+		   bonding_show_packets_per_slave,
+		   bonding_store_packets_per_slave);
+
 static struct attribute *per_bond_attrs[] = {
 	&dev_attr_slaves.attr,
 	&dev_attr_mode.attr,
@@ -1759,6 +1710,7 @@
 	&dev_attr_resend_igmp.attr,
 	&dev_attr_min_links.attr,
 	&dev_attr_lp_interval.attr,
+	&dev_attr_packets_per_slave.attr,
 	NULL,
 };
 
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 03cf3fd..77a07a1 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -58,6 +58,11 @@
 #define TX_QUEUE_OVERRIDE(mode)				\
 			(((mode) == BOND_MODE_ACTIVEBACKUP) ||	\
 			 ((mode) == BOND_MODE_ROUNDROBIN))
+
+#define BOND_MODE_IS_LB(mode)			\
+		(((mode) == BOND_MODE_TLB) ||	\
+		 ((mode) == BOND_MODE_ALB))
+
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
@@ -72,63 +77,37 @@
 	res; })
 
 /* slave list primitives */
-#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
 
 /* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
 #define bond_first_slave(bond) \
-	list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+	(bond_has_slaves(bond) ? \
+		netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+		NULL)
 #define bond_last_slave(bond) \
-	(list_empty(&(bond)->slave_list) ? NULL : \
-					   bond_to_slave((bond)->slave_list.prev))
+	(bond_has_slaves(bond) ? \
+		netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+		NULL)
 
-#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
-#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
-
-/* Since bond_first/last_slave can return NULL, these can return NULL too */
-#define bond_next_slave(bond, pos) \
-	(bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
-					 bond_to_slave((pos)->list.next))
-
-#define bond_prev_slave(bond, pos) \
-	(bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
-					  bond_to_slave((pos)->list.prev))
-
-/**
- * bond_for_each_slave_from - iterate the slaves list from a starting point
- * @bond:	the bond holding this list.
- * @pos:	current slave.
- * @cnt:	counter for max number of moves
- * @start:	starting point.
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
-	for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
-	     cnt++, pos = bond_next_slave(bond, pos))
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
 
 /**
  * bond_for_each_slave - iterate over all slaves
  * @bond:	the bond holding this list
  * @pos:	current slave
+ * @iter:	list_head * iterator
  *
  * Caller must hold bond->lock
  */
-#define bond_for_each_slave(bond, pos) \
-	list_for_each_entry(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave(bond, pos, iter) \
+	netdev_for_each_lower_private((bond)->dev, pos, iter)
 
 /* Caller must have rcu_read_lock */
-#define bond_for_each_slave_rcu(bond, pos) \
-	list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
-
-/**
- * bond_for_each_slave_reverse - iterate in reverse from a given position
- * @bond:	the bond holding this list
- * @pos:	slave to continue from
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_continue_reverse(bond, pos) \
-	list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+	netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 extern atomic_t netpoll_block_tx;
@@ -177,6 +156,7 @@
 	int all_slaves_active;
 	int resend_igmp;
 	int lp_interval;
+	int packets_per_slave;
 };
 
 struct bond_parm_tbl {
@@ -188,7 +168,6 @@
 
 struct slave {
 	struct net_device *dev; /* first - useful for panic debug */
-	struct list_head list;
 	struct bonding *bond; /* our master */
 	int    delay;
 	unsigned long jiffies;
@@ -228,7 +207,6 @@
  */
 struct bonding {
 	struct   net_device *dev; /* first - useful for panic debug */
-	struct   list_head slave_list;
 	struct   slave *curr_active_slave;
 	struct   slave *current_arp_slave;
 	struct   slave *primary_slave;
@@ -245,8 +223,7 @@
 	char     proc_file_name[IFNAMSIZ];
 #endif /* CONFIG_PROC_FS */
 	struct   list_head bond_list;
-	int      (*xmit_hash_policy)(struct sk_buff *, int);
-	u16      rr_tx_counter;
+	u32      rr_tx_counter;
 	struct   ad_bond_info ad_info;
 	struct   alb_bond_info alb_info;
 	struct   bond_params params;
@@ -276,13 +253,7 @@
 static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
 						  struct net_device *slave_dev)
 {
-	struct slave *slave = NULL;
-
-	bond_for_each_slave(bond, slave)
-		if (slave->dev == slave_dev)
-			return slave;
-
-	return NULL;
+	return netdev_lower_dev_get_private(bond->dev, slave_dev);
 }
 
 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,8 +265,7 @@
 
 static inline bool bond_is_lb(const struct bonding *bond)
 {
-	return (bond->params.mode == BOND_MODE_TLB ||
-		bond->params.mode == BOND_MODE_ALB);
+	return BOND_MODE_IS_LB(bond->params.mode);
 }
 
 static inline void bond_set_active_slave(struct slave *slave)
@@ -432,21 +402,18 @@
 struct bond_net;
 
 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
 int bond_create(struct net *net, const char *name);
 int bond_create_sysfs(struct bond_net *net);
 void bond_destroy_sysfs(struct bond_net *net);
 void bond_prepare_sysfs_group(struct bonding *bond);
-int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
-void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
 void bond_mii_monitor(struct work_struct *);
 void bond_loadbalance_arp_mon(struct work_struct *);
 void bond_activebackup_arp_mon(struct work_struct *);
-void bond_set_mode_ops(struct bonding *bond, int mode);
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
 int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
@@ -456,6 +423,14 @@
 void bond_debug_unregister(struct bonding *bond);
 void bond_debug_reregister(struct bonding *bond);
 const char *bond_mode_name(int mode);
+void bond_setup(struct net_device *bond_dev);
+unsigned int bond_get_num_tx_queues(void);
+int bond_netlink_init(void);
+void bond_netlink_fini(void);
+int bond_option_mode_set(struct bonding *bond, int mode);
+int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
+struct net_device *bond_option_active_slave_get(struct bonding *bond);
 
 struct bond_net {
 	struct net *		net;	/* Associated network namespace */
@@ -492,9 +467,24 @@
 static inline struct slave *bond_slave_has_mac(struct bonding *bond,
 					       const u8 *mac)
 {
+	struct list_head *iter;
 	struct slave *tmp;
 
-	bond_for_each_slave(bond, tmp)
+	bond_for_each_slave(bond, tmp, iter)
+		if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+			return tmp;
+
+	return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+					       const u8 *mac)
+{
+	struct list_head *iter;
+	struct slave *tmp;
+
+	bond_for_each_slave_rcu(bond, tmp, iter)
 		if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
 			return tmp;
 
@@ -528,4 +518,7 @@
 extern const struct bond_parm_tbl pri_reselect_tbl[];
 extern struct bond_parm_tbl ad_select_tbl[];
 
+/* exported from bond_netlink.c */
+extern struct rtnl_link_ops bond_link_ops;
+
 #endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index b9ed128..9856086 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -686,18 +686,19 @@
 		goto err;
 
 	/* Get the CAIF configuration from virtio config space, if available */
-#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
-	((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
-			   &_var, \
-			   FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))
-
 	if (vdev->config->get) {
-		GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
-		GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
-		GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
-		GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
-		GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
-		GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
+		virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+			     &cfv->tx_hr);
+		virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+			     &cfv->rx_hr);
+		virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+			     &cfv->tx_tr);
+		virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+			     &cfv->rx_tr);
+		virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+			     &cfv->mtu);
+		virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+			     &cfv->mru);
 	} else {
 		cfv->tx_hr = CFV_DEF_HEADROOM;
 		cfv->rx_hr = CFV_DEF_HEADROOM;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 693d8ff..cf0f63e 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1347,7 +1347,7 @@
 	priv->reg_base = addr;
 	priv->devtype_data = *devtype_data;
 	priv->clk = clk;
-	priv->pdata = pdev->dev.platform_data;
+	priv->pdata = dev_get_platdata(&pdev->dev);
 	priv->mb0_id = 0x7ff;
 
 	netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a2700d2..8a0b515 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -539,7 +539,7 @@
 	struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
 	unsigned short *pdata;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (!pdata) {
 		dev_err(&pdev->dev, "No platform data provided!\n");
 		err = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index b374be7..bce0be5 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -160,7 +160,6 @@
 	return 0;
 
 out_free_c_can:
-	pci_set_drvdata(pdev, NULL);
 	free_c_can_dev(dev);
 out_iounmap:
 	pci_iounmap(pdev, addr);
@@ -181,7 +180,6 @@
 
 	unregister_c_can_dev(dev);
 
-	pci_set_drvdata(pdev, NULL);
 	free_c_can_dev(dev);
 
 	pci_iounmap(pdev, priv->base);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 294ced3..d66ac26 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -322,7 +322,7 @@
 	.driver = {
 		.name = KBUILD_MODNAME,
 		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(c_can_of_table),
+		.of_match_table = c_can_of_table,
 	},
 	.probe = c_can_plat_probe,
 	.remove = c_can_plat_remove,
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 034bdd8..ad76734 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -152,7 +152,7 @@
 				   struct cc770_priv *priv)
 {
 
-	struct cc770_platform_data *pdata = pdev->dev.platform_data;
+	struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
 	priv->can.clock.freq = pdata->osc_freq;
 	if (priv->cpu_interface & CPUIF_DSC)
@@ -203,7 +203,7 @@
 
 	if (pdev->dev.of_node)
 		err = cc770_get_of_node_data(pdev, priv);
-	else if (pdev->dev.platform_data)
+	else if (dev_get_platdata(&pdev->dev))
 		err = cc770_get_platform_data(pdev, priv);
 	else
 		err = -ENODEV;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 1870c47..bda1888 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -645,19 +645,6 @@
 	/* We need synchronization with dev->stop() */
 	ASSERT_RTNL();
 
-	if (data[IFLA_CAN_CTRLMODE]) {
-		struct can_ctrlmode *cm;
-
-		/* Do not allow changing controller mode while running */
-		if (dev->flags & IFF_UP)
-			return -EBUSY;
-		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-		if (cm->flags & ~priv->ctrlmode_supported)
-			return -EOPNOTSUPP;
-		priv->ctrlmode &= ~cm->mask;
-		priv->ctrlmode |= cm->flags;
-	}
-
 	if (data[IFLA_CAN_BITTIMING]) {
 		struct can_bittiming bt;
 
@@ -680,6 +667,19 @@
 		}
 	}
 
+	if (data[IFLA_CAN_CTRLMODE]) {
+		struct can_ctrlmode *cm;
+
+		/* Do not allow changing controller mode while running */
+		if (dev->flags & IFF_UP)
+			return -EBUSY;
+		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+		if (cm->flags & ~priv->ctrlmode_supported)
+			return -EOPNOTSUPP;
+		priv->ctrlmode &= ~cm->mask;
+		priv->ctrlmode |= cm->flags;
+	}
+
 	if (data[IFLA_CAN_RESTART_MS]) {
 		/* Do not allow changing restart delay while running */
 		if (dev->flags & IFF_UP)
@@ -702,17 +702,17 @@
 static size_t can_get_size(const struct net_device *dev)
 {
 	struct can_priv *priv = netdev_priv(dev);
-	size_t size;
+	size_t size = 0;
 
-	size = nla_total_size(sizeof(u32));   /* IFLA_CAN_STATE */
-	size += nla_total_size(sizeof(struct can_ctrlmode));  /* IFLA_CAN_CTRLMODE */
-	size += nla_total_size(sizeof(u32));  /* IFLA_CAN_RESTART_MS */
-	size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
-	size += nla_total_size(sizeof(struct can_clock));     /* IFLA_CAN_CLOCK */
-	if (priv->do_get_berr_counter)        /* IFLA_CAN_BERR_COUNTER */
-		size += nla_total_size(sizeof(struct can_berr_counter));
-	if (priv->bittiming_const)	      /* IFLA_CAN_BITTIMING_CONST */
+	size += nla_total_size(sizeof(struct can_bittiming));	/* IFLA_CAN_BITTIMING */
+	if (priv->bittiming_const)				/* IFLA_CAN_BITTIMING_CONST */
 		size += nla_total_size(sizeof(struct can_bittiming_const));
+	size += nla_total_size(sizeof(struct can_clock));	/* IFLA_CAN_CLOCK */
+	size += nla_total_size(sizeof(u32));			/* IFLA_CAN_STATE */
+	size += nla_total_size(sizeof(struct can_ctrlmode));	/* IFLA_CAN_CTRLMODE */
+	size += nla_total_size(sizeof(u32));			/* IFLA_CAN_RESTART_MS */
+	if (priv->do_get_berr_counter)				/* IFLA_CAN_BERR_COUNTER */
+		size += nla_total_size(sizeof(struct can_berr_counter));
 
 	return size;
 }
@@ -726,23 +726,20 @@
 
 	if (priv->do_get_state)
 		priv->do_get_state(dev, &state);
-	if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
-	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
-	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
-	    nla_put(skb, IFLA_CAN_BITTIMING,
+	if (nla_put(skb, IFLA_CAN_BITTIMING,
 		    sizeof(priv->bittiming), &priv->bittiming) ||
-	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
-	    (priv->do_get_berr_counter &&
-	     !priv->do_get_berr_counter(dev, &bec) &&
-	     nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
 	    (priv->bittiming_const &&
 	     nla_put(skb, IFLA_CAN_BITTIMING_CONST,
-		     sizeof(*priv->bittiming_const), priv->bittiming_const)))
-		goto nla_put_failure;
+		     sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
+	    nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+	    (priv->do_get_berr_counter &&
+	     !priv->do_get_berr_counter(dev, &bec) &&
+	     nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)))
+		return -EMSGSIZE;
 	return 0;
-
-nla_put_failure:
-	return -EMSGSIZE;
 }
 
 static size_t can_get_xstats_size(const struct net_device *dev)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 8f5ce74..ae08cf1 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1068,7 +1068,7 @@
 	priv->dev = dev;
 	priv->clk_ipg = clk_ipg;
 	priv->clk_per = clk_per;
-	priv->pdata = pdev->dev.platform_data;
+	priv->pdata = dev_get_platdata(&pdev->dev);
 	priv->devtype_data = devtype_data;
 
 	priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 36bd6fa1..ab5909a 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1769,7 +1769,7 @@
 	struct device *dev;
 	int ret;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (!pdata)
 		return -ENXIO;
 
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index fe7dd69..08ac401 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -999,7 +999,7 @@
 {
 	struct net_device *net;
 	struct mcp251x_priv *priv;
-	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+	struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
 	int ret = -ENODEV;
 
 	if (!pdata)
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 9c24d60..e98abb9 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -297,8 +297,8 @@
 	struct napi_struct napi;
 };
 
-extern struct net_device *alloc_mscandev(void);
-extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
-extern void unregister_mscandev(struct net_device *dev);
+struct net_device *alloc_mscandev(void);
+int register_mscandev(struct net_device *dev, int mscan_clksrc);
+void unregister_mscandev(struct net_device *dev);
 
 #endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5c314a9..5f0e9b3 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -964,7 +964,6 @@
 		pci_disable_msi(priv->dev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	pch_can_reset(priv);
 	pci_iounmap(pdev, priv->regs);
 	free_candev(priv->ndev);
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 3752342..8359213 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -207,7 +207,6 @@
 	kfree(card);
 
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static void ems_pci_card_reset(struct ems_pci_card *card)
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 217585b..087b13b 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -387,7 +387,6 @@
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver kvaser_pci_driver = {
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6b6f0ad..065ca49 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -744,8 +744,6 @@
 	pci_iounmap(pdev, cfg_base);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-
-	pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver peak_pci_driver = {
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index c52c1e9..f9b4f81 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -477,7 +477,6 @@
 	kfree(card);
 
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 /*
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 8e259c54..29f9b63 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -76,7 +76,7 @@
 	struct resource *res_mem, *res_irq;
 	struct sja1000_platform_data *pdata;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (!pdata) {
 		dev_err(&pdev->dev, "No platform data provided!\n");
 		err = -ENODEV;
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
index afd7d85..35f0622 100644
--- a/drivers/net/can/softing/softing.h
+++ b/drivers/net/can/softing/softing.h
@@ -71,34 +71,34 @@
 	} id;
 };
 
-extern int softing_default_output(struct net_device *netdev);
+int softing_default_output(struct net_device *netdev);
 
-extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+ktime_t softing_raw2ktime(struct softing *card, u32 raw);
 
-extern int softing_chip_poweron(struct softing *card);
+int softing_chip_poweron(struct softing *card);
 
-extern int softing_bootloader_command(struct softing *card, int16_t cmd,
-		const char *msg);
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+			       const char *msg);
 
 /* Load firmware after reset */
-extern int softing_load_fw(const char *file, struct softing *card,
-			__iomem uint8_t *virt, unsigned int size, int offset);
+int softing_load_fw(const char *file, struct softing *card,
+		    __iomem uint8_t *virt, unsigned int size, int offset);
 
 /* Load final application firmware after bootloader */
-extern int softing_load_app_fw(const char *file, struct softing *card);
+int softing_load_app_fw(const char *file, struct softing *card);
 
 /*
  * enable or disable irq
  * only called with fw.lock locked
  */
-extern int softing_enable_irq(struct softing *card, int enable);
+int softing_enable_irq(struct softing *card, int enable);
 
 /* start/stop 1 bus on card */
-extern int softing_startstop(struct net_device *netdev, int up);
+int softing_startstop(struct net_device *netdev, int up);
 
 /* netif_rx() */
-extern int softing_netdev_rx(struct net_device *netdev,
-		const struct can_frame *msg, ktime_t ktime);
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+		      ktime_t ktime);
 
 /* SOFTING DPRAM mappings */
 #define DPRAM_RX		0x0000
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 65eef1e..6cd5c01 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -768,7 +768,7 @@
 
 static int softing_pdev_probe(struct platform_device *pdev)
 {
-	const struct softing_platform_data *pdat = pdev->dev.platform_data;
+	const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev);
 	struct softing *card;
 	struct net_device *netdev;
 	struct softing_priv *priv;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 3a349a2..60d95b4 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -286,15 +286,6 @@
 	return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
 }
 
-static int ti_hecc_get_state(const struct net_device *ndev,
-	enum can_state *state)
-{
-	struct ti_hecc_priv *priv = netdev_priv(ndev);
-
-	*state = priv->can.state;
-	return 0;
-}
-
 static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
 {
 	struct can_bittiming *bit_timing = &priv->can.bittiming;
@@ -894,7 +885,7 @@
 	void __iomem *addr;
 	int err = -ENODEV;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (!pdata) {
 		dev_err(&pdev->dev, "No platform data\n");
 		goto probe_exit;
@@ -940,7 +931,6 @@
 
 	priv->can.bittiming_const = &ti_hecc_bittiming_const;
 	priv->can.do_set_mode = ti_hecc_do_set_mode;
-	priv->can.do_get_state = ti_hecc_get_state;
 	priv->can.do_get_berr_counter = ti_hecc_get_berr_counter;
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
 
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index b710c6b..bd8f84b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -88,10 +88,16 @@
 
 static int dummy_dev_init(struct net_device *dev)
 {
+	int i;
 	dev->dstats = alloc_percpu(struct pcpu_dstats);
 	if (!dev->dstats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct pcpu_dstats *dstats;
+		dstats = per_cpu_ptr(dev->dstats, i);
+		u64_stats_init(&dstats->syncp);
+	}
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index f00c763..65b735d 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -35,7 +35,7 @@
 
 config 3C515
 	tristate "3c515 ISA \"Fast EtherLink\""
-	depends on (ISA || EISA) && ISA_DMA_API
+	depends on ISA && ISA_DMA_API
 	---help---
 	  If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
 	  network card, say Y and read the Ethernet-HOWTO, available from
@@ -70,7 +70,7 @@
 	select MII
 	---help---
 	  This option enables driver support for a large number of 10Mbps and
-	  10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+	  10/100Mbps EISA, PCI and Cardbus 3Com network cards:
 
 	  "Vortex"    (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
 	  "Boomerang" (EtherLink XL 3c900 or 3c905)            PCI
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 144942f6..465cc71 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2525,7 +2525,6 @@
 	pci_release_regions(pdev);
 	pci_clear_mwi(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 }
 
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index ef325ff..2923c51 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -28,42 +28,42 @@
 #endif
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-extern void ei_poll(struct net_device *dev);
-extern void eip_poll(struct net_device *dev);
+void ei_poll(struct net_device *dev);
+void eip_poll(struct net_device *dev);
 #endif
 
 
 /* Without I/O delay - non ISA or later chips */
-extern void NS8390_init(struct net_device *dev, int startp);
-extern int ei_open(struct net_device *dev);
-extern int ei_close(struct net_device *dev);
-extern irqreturn_t ei_interrupt(int irq, void *dev_id);
-extern void ei_tx_timeout(struct net_device *dev);
-extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void ei_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *ei_get_stats(struct net_device *dev);
+void NS8390_init(struct net_device *dev, int startp);
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+irqreturn_t ei_interrupt(int irq, void *dev_id);
+void ei_tx_timeout(struct net_device *dev);
+netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void ei_set_multicast_list(struct net_device *dev);
+struct net_device_stats *ei_get_stats(struct net_device *dev);
 
 extern const struct net_device_ops ei_netdev_ops;
 
-extern struct net_device *__alloc_ei_netdev(int size);
+struct net_device *__alloc_ei_netdev(int size);
 static inline struct net_device *alloc_ei_netdev(void)
 {
 	return __alloc_ei_netdev(0);
 }
 
 /* With I/O delay form */
-extern void NS8390p_init(struct net_device *dev, int startp);
-extern int eip_open(struct net_device *dev);
-extern int eip_close(struct net_device *dev);
-extern irqreturn_t eip_interrupt(int irq, void *dev_id);
-extern void eip_tx_timeout(struct net_device *dev);
-extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void eip_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *eip_get_stats(struct net_device *dev);
+void NS8390p_init(struct net_device *dev, int startp);
+int eip_open(struct net_device *dev);
+int eip_close(struct net_device *dev);
+irqreturn_t eip_interrupt(int irq, void *dev_id);
+void eip_tx_timeout(struct net_device *dev);
+netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void eip_set_multicast_list(struct net_device *dev);
+struct net_device_stats *eip_get_stats(struct net_device *dev);
 
 extern const struct net_device_ops eip_netdev_ops;
 
-extern struct net_device *__alloc_eip_netdev(int size);
+struct net_device *__alloc_eip_netdev(int size);
 static inline struct net_device *alloc_eip_netdev(void)
 {
 	return __alloc_eip_netdev(0);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index f92f001..36fa577 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -702,7 +702,7 @@
 			for (i = 0; i < 16; i++)
 				SA_prom[i] = SA_prom[i+i];
 
-		memcpy(dev->dev_addr, SA_prom, 6);
+		memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
 	}
 
 #ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 9220108..fc14a85 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -389,9 +389,7 @@
 	free_netdev (dev);
 err_out_free_res:
 	release_region (ioaddr, NE_IO_EXTENT);
-	pci_set_drvdata (pdev, NULL);
 	return -ENODEV;
-
 }
 
 /*
@@ -655,7 +653,6 @@
 	release_region(dev->base_addr, NE_IO_EXTENT);
 	free_netdev(dev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8b04bfc..171d73c 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -835,7 +835,6 @@
 	return 0;
 
 err_out_cleardev:
-	pci_set_drvdata(pdev, NULL);
 	iounmap(base);
 err_out_free_res:
 	pci_release_regions (pdev);
@@ -2012,7 +2011,6 @@
 	iounmap(np->base);
 	pci_release_regions(pdev);
 
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);			/* Will also free np!! */
 }
 
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 7a07ee0..6dec86a 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -104,6 +104,6 @@
 #endif
 };
 
-extern int bfin_get_ether_addr(char *addr);
+int bfin_get_ether_addr(char *addr);
 
 #endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 0a5837b..ae33a99 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -242,13 +242,13 @@
 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
 
 /* Now the prototypes we export */
-extern int lance_open(struct net_device *dev);
-extern int lance_close (struct net_device *dev);
-extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-extern void lance_set_multicast (struct net_device *dev);
-extern void lance_tx_timeout(struct net_device *dev);
+int lance_open(struct net_device *dev);
+int lance_close (struct net_device *dev);
+int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast (struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
-extern void lance_poll(struct net_device *dev);
+void lance_poll(struct net_device *dev);
 #endif
 
 #endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 1b1429d..d042511 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1711,7 +1711,6 @@
 		free_netdev(dev);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 static void amd8111e_config_ipg(struct net_device* dev)
@@ -1967,7 +1966,6 @@
 
 err_disable_pdev:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	return err;
 
 }
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 10ceca5..e07ce5f 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -586,10 +586,10 @@
 	switch( lp->cardtype ) {
 	  case OLD_RIEBL:
 		/* No ethernet address! (Set some default address) */
-		memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+		memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
 		break;
 	  case NEW_RIEBL:
-		lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+		lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
 		break;
 	  case PAM_CARD:
 		i = IO->eeprom;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 91d52b4..427c148 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1138,7 +1138,7 @@
 		aup->phy1_search_mac0 = 1;
 	} else {
 		if (is_valid_ether_addr(pd->mac)) {
-			memcpy(dev->dev_addr, pd->mac, 6);
+			memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
 		} else {
 			/* Set a random MAC since no valid provided by platform_data. */
 			eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 1b89f1a..5739729 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -344,8 +344,8 @@
 		}
 
 		clen = len & 1;
-		rtp = tp;
-		rfp = fp;
+		rtp = (unsigned char *)tp;
+		rfp = (const unsigned char *)fp;
 		while (clen--) {
 			*rtp++ = *rfp++;
 		}
@@ -372,8 +372,8 @@
 		 * do the rest, if any.
 		 */
 		clen = len & 15;
-		rtp = (unsigned char *) tp;
-		rfp = (unsigned char *) fp;
+		rtp = (unsigned char *)tp;
+		rfp = (const unsigned char *)fp;
 		while (clen--) {
 			*rtp++ = *rfp++;
 		}
@@ -403,8 +403,8 @@
 
 		clen = len & 1;
 
-		rtp = tp;
-		rfp = fp;
+		rtp = (unsigned char *)tp;
+		rfp = (const unsigned char *)fp;
 
 		while (clen--) {
 			*rtp++ = *rfp++;
@@ -433,8 +433,8 @@
 		 * do the rest, if any.
 		 */
 		clen = len & 15;
-		rtp = (unsigned char *) tp;
-		rfp = (unsigned char *) fp;
+		rtp = (unsigned char *)tp;
+		rfp = (const unsigned char *)fp;
 		while (clen--) {
 			*rtp++ = *rfp++;
 		}
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 5c72843..256f590 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -754,7 +754,7 @@
 	int i;
 
 	if (dev->irq == 0 ||
-		request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
+		request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
 		return -EAGAIN;
 	}
 
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 2d8e288..38492e0 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1675,7 +1675,7 @@
 				pr_cont(" warning: CSR address invalid,\n");
 				pr_info("    using instead PROM address of");
 			}
-			memcpy(dev->dev_addr, promaddr, 6);
+			memcpy(dev->dev_addr, promaddr, ETH_ALEN);
 		}
 	}
 
@@ -2818,7 +2818,6 @@
 				    lp->init_block, lp->init_dma_addr);
 		free_netdev(dev);
 		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a597b76..daae0e0 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1220,8 +1220,8 @@
 	if (skb != NULL) {
 		data = skb_put(skb, ETHERMINPACKET);
 		memset(data, 0, ETHERMINPACKET);
-		memcpy(data, dev->dev_addr, 6);
-		memcpy(data+6, dev->dev_addr, 6);
+		memcpy(data, dev->dev_addr, ETH_ALEN);
+		memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
 		bmac_transmit_packet(skb, dev);
 	}
 	spin_unlock_irqrestore(&bp->lock, flags);
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 4ce8ceb..58a200d 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -211,6 +211,7 @@
 	mp = netdev_priv(dev);
 
 	mp->device = &pdev->dev;
+	platform_set_drvdata(pdev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	dev->base_addr = (u32)MACE_BASE;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index d087852..b2ffad1 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -671,6 +671,7 @@
 	if (!ndev)
 		return -ENOMEM;
 
+	platform_set_drvdata(pdev, ndev);
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 
 	ndev->netdev_ops = &arc_emac_netdev_ops;
@@ -725,10 +726,10 @@
 	/* Get MAC address from device tree */
 	mac_addr = of_get_mac_address(pdev->dev.of_node);
 
-	if (!mac_addr || !is_valid_ether_addr(mac_addr))
-		eth_hw_addr_random(ndev);
-	else
+	if (mac_addr)
 		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+	else
+		eth_hw_addr_random(ndev);
 
 	dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
 
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index fc95b23..5aa5e81 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1367,7 +1367,6 @@
 
 	pci_disable_pcie_error_reporting(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 	free_netdev(alx->dev);
 }
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 0f05565..7f9369a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -600,7 +600,7 @@
 extern char atl1c_driver_name[];
 extern char atl1c_driver_version[];
 
-extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
-extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
-extern void atl1c_set_ethtool_ops(struct net_device *netdev);
+void atl1c_reinit_locked(struct atl1c_adapter *adapter);
+s32 atl1c_reset_hw(struct atl1c_hw *hw);
+void atl1c_set_ethtool_ops(struct net_device *netdev);
 #endif /* _ATL1C_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 3ef7092..1cda49a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -153,7 +153,7 @@
 bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
 {
 	int i;
-	int ret = false;
+	bool ret = false;
 	u32 otp_ctrl_data;
 	u32 control;
 	u32 data;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index b5fd934..1b0fe2d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -499,10 +499,10 @@
 extern char atl1e_driver_name[];
 extern char atl1e_driver_version[];
 
-extern void atl1e_check_options(struct atl1e_adapter *adapter);
-extern int atl1e_up(struct atl1e_adapter *adapter);
-extern void atl1e_down(struct atl1e_adapter *adapter);
-extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
-extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
-extern void atl1e_set_ethtool_ops(struct net_device *netdev);
+void atl1e_check_options(struct atl1e_adapter *adapter);
+int atl1e_up(struct atl1e_adapter *adapter);
+void atl1e_down(struct atl1e_adapter *adapter);
+void atl1e_reinit_locked(struct atl1e_adapter *adapter);
+s32 atl1e_reset_hw(struct atl1e_hw *hw);
+void atl1e_set_ethtool_ops(struct net_device *netdev);
 #endif /* _ATL1_E_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1966444..7a73f3a 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,6 +313,34 @@
 	}
 }
 
+static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
+{
+
+	if (features & NETIF_F_RXALL) {
+		/* enable RX of ALL frames */
+		*mac_ctrl_data |= MAC_CTRL_DBG;
+	} else {
+		/* disable RX of ALL frames */
+		*mac_ctrl_data &= ~MAC_CTRL_DBG;
+	}
+}
+
+static void atl1e_rx_mode(struct net_device *netdev,
+	netdev_features_t features)
+{
+	struct atl1e_adapter *adapter = netdev_priv(netdev);
+	u32 mac_ctrl_data = 0;
+
+	netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+	atl1e_irq_disable(adapter);
+	mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+	__atl1e_rx_mode(features, &mac_ctrl_data);
+	AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+	atl1e_irq_enable(adapter);
+}
+
+
 static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -394,6 +422,10 @@
 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 		atl1e_vlan_mode(netdev, features);
 
+	if (changed & NETIF_F_RXALL)
+		atl1e_rx_mode(netdev, features);
+
+
 	return 0;
 }
 
@@ -1057,7 +1089,8 @@
 		value |= MAC_CTRL_PROMIS_EN;
 	if (netdev->flags & IFF_ALLMULTI)
 		value |= MAC_CTRL_MC_ALL_EN;
-
+	if (netdev->features & NETIF_F_RXALL)
+		value |= MAC_CTRL_DBG;
 	AT_WRITE_REG(hw, REG_MAC_CTRL, value);
 }
 
@@ -1405,7 +1438,8 @@
 			rx_page_desc[que].rx_nxseq++;
 
 			/* error packet */
-			if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+			if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
+			    !(netdev->features & NETIF_F_RXALL)) {
 				if (prrs->err_flag & (RRS_ERR_BAD_CRC |
 					RRS_ERR_DRIBBLE | RRS_ERR_CODE |
 					RRS_ERR_TRUNC)) {
@@ -1418,7 +1452,10 @@
 			}
 
 			packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
-					RRS_PKT_SIZE_MASK) - 4; /* CRC */
+					RRS_PKT_SIZE_MASK);
+			if (likely(!(netdev->features & NETIF_F_RXFCS)))
+				packet_size -= 4; /* CRC */
+
 			skb = netdev_alloc_skb_ip_align(netdev, packet_size);
 			if (skb == NULL)
 				goto skip_pkt;
@@ -2245,7 +2282,8 @@
 			      NETIF_F_HW_VLAN_CTAG_RX;
 	netdev->features = netdev->hw_features | NETIF_F_LLTX |
 			   NETIF_F_HW_VLAN_CTAG_TX;
-
+	/* not enabled by default */
+	netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index 3ebe19f..2f27d4c 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -42,7 +42,7 @@
 #include "atlx.h"
 
 #ifdef ETHTOOL_OPS_COMPAT
-extern int ethtool_ioctl(struct ifreq *ifr);
+int ethtool_ioctl(struct ifreq *ifr);
 #endif
 
 #define PCI_COMMAND_REGISTER	PCI_COMMAND
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 9b017d9..90e54d5 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -596,6 +596,7 @@
 static void b44_tx(struct b44 *bp)
 {
 	u32 cur, cons;
+	unsigned bytes_compl = 0, pkts_compl = 0;
 
 	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
 	cur /= sizeof(struct dma_desc);
@@ -612,9 +613,14 @@
 				 skb->len,
 				 DMA_TO_DEVICE);
 		rp->skb = NULL;
+
+		bytes_compl += skb->len;
+		pkts_compl++;
+
 		dev_kfree_skb_irq(skb);
 	}
 
+	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
 	bp->tx_cons = cons;
 	if (netif_queue_stopped(bp->dev) &&
 	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@ -1018,6 +1024,8 @@
 	if (bp->flags & B44_FLAG_REORDER_BUG)
 		br32(bp, B44_DMATX_PTR);
 
+	netdev_sent_queue(dev, skb->len);
+
 	if (TX_BUFFS_AVAIL(bp) < 1)
 		netif_stop_queue(dev);
 
@@ -1416,6 +1424,8 @@
 
 	val = br32(bp, B44_ENET_CTRL);
 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+
+	netdev_reset_queue(bp->dev);
 }
 
 static int b44_open(struct net_device *dev)
@@ -2101,7 +2111,7 @@
 	 * valid PHY address. */
 	bp->phy_addr &= 0x1F;
 
-	memcpy(bp->dev->dev_addr, addr, 6);
+	memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
 
 	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
 		pr_err("Invalid MAC address found in EEPROM\n");
@@ -2183,8 +2193,7 @@
 		goto err_out_free_dev;
 	}
 
-	if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
-	    dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+	if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
 		dev_err(sdev->dev,
 			"Required 30BIT DMA mask unsupported by the system\n");
 		goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 9e8a3e0..e2aa09c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -149,6 +149,8 @@
 	dma_desc->ctl0 = cpu_to_le32(ctl0);
 	dma_desc->ctl1 = cpu_to_le32(ctl1);
 
+	netdev_sent_queue(net_dev, skb->len);
+
 	wmb();
 
 	/* Increase ring->end to point empty slot. We tell hardware the first
@@ -178,6 +180,7 @@
 	struct device *dma_dev = bgmac->core->dma_dev;
 	int empty_slot;
 	bool freed = false;
+	unsigned bytes_compl = 0, pkts_compl = 0;
 
 	/* The last slot that hardware didn't consume yet */
 	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
@@ -195,6 +198,9 @@
 					 slot->skb->len, DMA_TO_DEVICE);
 			slot->dma_addr = 0;
 
+			bytes_compl += slot->skb->len;
+			pkts_compl++;
+
 			/* Free memory! :) */
 			dev_kfree_skb(slot->skb);
 			slot->skb = NULL;
@@ -208,6 +214,8 @@
 		freed = true;
 	}
 
+	netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
+
 	if (freed && netif_queue_stopped(bgmac->net_dev))
 		netif_wake_queue(bgmac->net_dev);
 }
@@ -277,6 +285,26 @@
 	return 0;
 }
 
+static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
+				    struct bgmac_dma_ring *ring, int desc_idx)
+{
+	struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
+	u32 ctl0 = 0, ctl1 = 0;
+
+	if (desc_idx == ring->num_slots - 1)
+		ctl0 |= BGMAC_DESC_CTL0_EOT;
+	ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+	/* Is there any BGMAC device that requires extension? */
+	/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+	 * B43_DMA64_DCTL1_ADDREXT_MASK;
+	 */
+
+	dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
+	dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
+	dma_desc->ctl0 = cpu_to_le32(ctl0);
+	dma_desc->ctl1 = cpu_to_le32(ctl1);
+}
+
 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
 			     int weight)
 {
@@ -295,7 +323,6 @@
 		struct device *dma_dev = bgmac->core->dma_dev;
 		struct bgmac_slot_info *slot = &ring->slots[ring->start];
 		struct sk_buff *skb = slot->skb;
-		struct sk_buff *new_skb;
 		struct bgmac_rx_header *rx;
 		u16 len, flags;
 
@@ -308,38 +335,51 @@
 		len = le16_to_cpu(rx->len);
 		flags = le16_to_cpu(rx->flags);
 
-		/* Check for poison and drop or pass the packet */
-		if (len == 0xdead && flags == 0xbeef) {
-			bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
-				  ring->start);
-		} else {
+		do {
+			dma_addr_t old_dma_addr = slot->dma_addr;
+			int err;
+
+			/* Check for poison and drop or pass the packet */
+			if (len == 0xdead && flags == 0xbeef) {
+				bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+					  ring->start);
+				dma_sync_single_for_device(dma_dev,
+							   slot->dma_addr,
+							   BGMAC_RX_BUF_SIZE,
+							   DMA_FROM_DEVICE);
+				break;
+			}
+
 			/* Omit CRC. */
 			len -= ETH_FCS_LEN;
 
-			new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
-			if (new_skb) {
-				skb_put(new_skb, len);
-				skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
-								 new_skb->data,
-								 len);
-				skb_checksum_none_assert(skb);
-				new_skb->protocol =
-					eth_type_trans(new_skb, bgmac->net_dev);
-				netif_receive_skb(new_skb);
-				handled++;
-			} else {
-				bgmac->net_dev->stats.rx_dropped++;
-				bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+			/* Prepare new skb as replacement */
+			err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
+			if (err) {
+				/* Poison the old skb */
+				rx->len = cpu_to_le16(0xdead);
+				rx->flags = cpu_to_le16(0xbeef);
+
+				dma_sync_single_for_device(dma_dev,
+							   slot->dma_addr,
+							   BGMAC_RX_BUF_SIZE,
+							   DMA_FROM_DEVICE);
+				break;
 			}
+			bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
 
-			/* Poison the old skb */
-			rx->len = cpu_to_le16(0xdead);
-			rx->flags = cpu_to_le16(0xbeef);
-		}
+			/* Unmap old skb, we'll pass it to the netfif */
+			dma_unmap_single(dma_dev, old_dma_addr,
+					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
 
-		/* Make it back accessible to the hardware */
-		dma_sync_single_for_device(dma_dev, slot->dma_addr,
-					   BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+			skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
+			skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
+
+			skb_checksum_none_assert(skb);
+			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+			netif_receive_skb(skb);
+			handled++;
+		} while (0);
 
 		if (++ring->start >= BGMAC_RX_RING_SLOTS)
 			ring->start = 0;
@@ -503,8 +543,6 @@
 static void bgmac_dma_init(struct bgmac *bgmac)
 {
 	struct bgmac_dma_ring *ring;
-	struct bgmac_dma_desc *dma_desc;
-	u32 ctl0, ctl1;
 	int i;
 
 	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -537,23 +575,8 @@
 		if (ring->unaligned)
 			bgmac_dma_rx_enable(bgmac, ring);
 
-		for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
-		     j++, dma_desc++) {
-			ctl0 = ctl1 = 0;
-
-			if (j == ring->num_slots - 1)
-				ctl0 |= BGMAC_DESC_CTL0_EOT;
-			ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
-			/* Is there any BGMAC device that requires extension? */
-			/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
-			 * B43_DMA64_DCTL1_ADDREXT_MASK;
-			 */
-
-			dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
-			dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
-			dma_desc->ctl0 = cpu_to_le32(ctl0);
-			dma_desc->ctl1 = cpu_to_le32(ctl1);
-		}
+		for (j = 0; j < ring->num_slots; j++)
+			bgmac_dma_rx_setup_desc(bgmac, ring, j);
 
 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
 			    ring->index_base +
@@ -996,6 +1019,8 @@
 	bgmac_miiconfig(bgmac);
 	bgmac_phy_init(bgmac);
 
+	netdev_reset_queue(bgmac->net_dev);
+
 	bgmac->int_status = 0;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e838a3f..d9980ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5761,8 +5761,8 @@
 	if (!skb)
 		return -ENOMEM;
 	packet = skb_put(skb, pkt_size);
-	memcpy(packet, bp->dev->dev_addr, 6);
-	memset(packet + 6, 0x0, 8);
+	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+	memset(packet + ETH_ALEN, 0x0, 8);
 	for (i = 14; i < pkt_size; i++)
 		packet[i] = (unsigned char) (i & 0xff);
 
@@ -8413,7 +8413,6 @@
 
 err_out_disable:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 err_out:
 	return rc;
@@ -8514,7 +8513,7 @@
 
 	pci_set_drvdata(pdev, dev);
 
-	memcpy(dev->dev_addr, bp->mac_addr, 6);
+	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
 
 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
 		NETIF_F_TSO | NETIF_F_TSO_ECN |
@@ -8546,7 +8545,6 @@
 	pci_iounmap(pdev, bp->regview);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 err_free:
 	free_netdev(dev);
 	return rc;
@@ -8578,7 +8576,6 @@
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static int
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index c5e375d..4e01c57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1546,6 +1546,7 @@
 #define IS_VF_FLAG			(1 << 22)
 #define INTERRUPTS_ENABLED_FLAG		(1 << 23)
 #define BC_SUPPORTS_RMMOD_CMD		(1 << 24)
+#define HAS_PHYS_PORT_ID		(1 << 25)
 
 #define BP_NOMCP(bp)			((bp)->flags & NO_MCP_FLAG)
 
@@ -1876,6 +1877,8 @@
 	u32 dump_preset_idx;
 	bool					stats_started;
 	struct semaphore			stats_sema;
+
+	u8					phys_port_id[ETH_ALEN];
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -2232,7 +2235,7 @@
 #define BNX2X_NUM_TESTS_SF		7
 #define BNX2X_NUM_TESTS_MF		3
 #define BNX2X_NUM_TESTS(bp)		(IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
-						     BNX2X_NUM_TESTS_SF)
+					     IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
 
 #define BNX2X_PHY_LOOPBACK		0
 #define BNX2X_MAC_LOOPBACK		1
@@ -2492,12 +2495,6 @@
 
 #define NUM_MACS	8
 
-enum bnx2x_pci_bus_speed {
-	BNX2X_PCI_LINK_SPEED_2500 = 2500,
-	BNX2X_PCI_LINK_SPEED_5000 = 5000,
-	BNX2X_PCI_LINK_SPEED_8000 = 8000
-};
-
 void bnx2x_set_local_cmng(struct bnx2x *bp);
 
 #define MCPR_SCRATCH_BASE(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 74d6486..dcafbda 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3256,14 +3256,16 @@
 	if (prot == IPPROTO_TCP)
 		rc |= XMIT_CSUM_TCP;
 
-	if (skb_is_gso_v6(skb)) {
-		rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
-		if (rc & XMIT_CSUM_ENC)
-			rc |= XMIT_GSO_ENC_V6;
-	} else if (skb_is_gso(skb)) {
-		rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
-		if (rc & XMIT_CSUM_ENC)
-			rc |= XMIT_GSO_ENC_V4;
+	if (skb_is_gso(skb)) {
+		if (skb_is_gso_v6(skb)) {
+			rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+			if (rc & XMIT_CSUM_ENC)
+				rc |= XMIT_GSO_ENC_V6;
+		} else {
+			rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+			if (rc & XMIT_CSUM_ENC)
+				rc |= XMIT_GSO_ENC_V4;
+		}
 	}
 
 	return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index e8efa1c9..32d0f14 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -639,6 +639,9 @@
 	struct bnx2x *bp = netdev_priv(dev);
 	int regdump_len = 0;
 
+	if (IS_VF(bp))
+		return 0;
+
 	regdump_len = __bnx2x_get_regs_len(bp);
 	regdump_len *= 4;
 	regdump_len += sizeof(struct dump_header);
@@ -2864,9 +2867,16 @@
 
 	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
 
+	if (bnx2x_test_nvram(bp) != 0) {
+		if (!IS_MF(bp))
+			buf[4] = 1;
+		else
+			buf[0] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
 	if (!netif_running(dev)) {
-		DP(BNX2X_MSG_ETHTOOL,
-		   "Can't perform self-test when interface is down\n");
+		DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
 		return;
 	}
 
@@ -2928,13 +2938,7 @@
 		/* wait until link state is restored */
 		bnx2x_wait_for_link(bp, link_up, is_serdes);
 	}
-	if (bnx2x_test_nvram(bp) != 0) {
-		if (!IS_MF(bp))
-			buf[4] = 1;
-		else
-			buf[0] = 1;
-		etest->flags |= ETH_TEST_FL_FAILED;
-	}
+
 	if (bnx2x_test_intr(bp) != 0) {
 		if (!IS_MF(bp))
 			buf[5] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 32767f6..cf1df8b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -172,6 +172,7 @@
 		#define SHARED_HW_CFG_LED_MAC4                       0x000c0000
 		#define SHARED_HW_CFG_LED_PHY8                       0x000d0000
 		#define SHARED_HW_CFG_LED_EXTPHY1                    0x000e0000
+		#define SHARED_HW_CFG_LED_EXTPHY2                    0x000f0000
 
 
 	#define SHARED_HW_CFG_AN_ENABLE_MASK                0x3f000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 5146822..20dcc02 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3122,7 +3122,7 @@
 }
 
 static int bnx2x_bsc_read(struct link_params *params,
-			  struct bnx2x_phy *phy,
+			  struct bnx2x *bp,
 			  u8 sl_devid,
 			  u16 sl_addr,
 			  u8 lc_addr,
@@ -3131,7 +3131,6 @@
 {
 	u32 val, i;
 	int rc = 0;
-	struct bnx2x *bp = params->bp;
 
 	if (xfer_cnt > 16) {
 		DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -6371,9 +6370,15 @@
 			 * intended override.
 			 */
 			break;
-		} else
+		} else {
+			u32 nig_led_mode = ((params->hw_led_mode <<
+					     SHARED_HW_CFG_LED_MODE_SHIFT) ==
+					    SHARED_HW_CFG_LED_EXTPHY2) ?
+				(SHARED_HW_CFG_LED_PHY1 >>
+				 SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
 			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-			       hw_led_mode);
+			       nig_led_mode);
+		}
 
 		REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
 		/* Set blinking rate to ~15.9Hz */
@@ -7917,7 +7922,7 @@
 			usleep_range(1000, 2000);
 			bnx2x_warpcore_power_module(params, 1);
 		}
-		rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
+		rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
 				    data_array);
 	} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
 
@@ -10653,10 +10658,18 @@
 					 0x40);
 
 		} else {
+			/* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+			 * sources are all wired through LED1, rather than only
+			 * 10G in other modes.
+			 */
+			val = ((params->hw_led_mode <<
+				SHARED_HW_CFG_LED_MODE_SHIFT) ==
+			       SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
 			bnx2x_cl45_write(bp, phy,
 					 MDIO_PMA_DEVAD,
 					 MDIO_PMA_REG_8481_LED1_MASK,
-					 0x80);
+					 val);
 
 			/* Tell LED3 to blink on source */
 			bnx2x_cl45_read(bp, phy,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b42f89c..e622cc1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9916,7 +9916,7 @@
 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
 {
 	struct bnx2x_prev_path_list *tmp_list;
-	int rc = false;
+	bool rc = false;
 
 	if (down_trylock(&bnx2x_prev_sem))
 		return false;
@@ -11186,6 +11186,14 @@
 			bnx2x_get_cnic_mac_hwinfo(bp);
 	}
 
+	if (!BP_NOMCP(bp)) {
+		/* Read physical port identifier from shmem */
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+		bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+		bp->flags |= HAS_PHYS_PORT_ID;
+	}
+
 	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 
 	if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@ -11784,7 +11792,7 @@
 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
 	if (rc)
 		return rc;
-	return bnx2x_open_epilog(bp);
+	return 0;
 }
 
 /* called with rtnl_lock */
@@ -12082,6 +12090,20 @@
 	return 0;
 }
 
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+				  struct netdev_phys_port_id *ppid)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	if (!(bp->flags & HAS_PHYS_PORT_ID))
+		return -EOPNOTSUPP;
+
+	ppid->id_len = sizeof(bp->phys_port_id);
+	memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+	return 0;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
 	.ndo_open		= bnx2x_open,
 	.ndo_stop		= bnx2x_close,
@@ -12111,18 +12133,15 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	.ndo_busy_poll		= bnx2x_low_latency_recv,
 #endif
+	.ndo_get_phys_port_id	= bnx2x_get_phys_port_id,
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
 {
 	struct device *dev = &bp->pdev->dev;
 
-	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
-		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
-			dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
-			return -EIO;
-		}
-	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
 		dev_err(dev, "System does not support DMA, aborting\n");
 		return -EIO;
 	}
@@ -12274,10 +12293,13 @@
 		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
 		NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
 	if (!CHIP_IS_E1x(bp)) {
-		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+				    NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
 		dev->hw_enc_features =
 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
 			NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+			NETIF_F_GSO_IPIP |
+			NETIF_F_GSO_SIT |
 			NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
 	}
 
@@ -12310,34 +12332,11 @@
 
 err_out_disable:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 err_out:
 	return rc;
 }
 
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
-				       enum bnx2x_pci_bus_speed *speed)
-{
-	u32 link_speed, val = 0;
-
-	pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
-	*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
-	link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-
-	switch (link_speed) {
-	case 3:
-		*speed = BNX2X_PCI_LINK_SPEED_8000;
-		break;
-	case 2:
-		*speed = BNX2X_PCI_LINK_SPEED_5000;
-		break;
-	default:
-		*speed = BNX2X_PCI_LINK_SPEED_2500;
-	}
-}
-
 static int bnx2x_check_firmware(struct bnx2x *bp)
 {
 	const struct firmware *firmware = bp->firmware;
@@ -12694,8 +12693,8 @@
 {
 	struct net_device *dev = NULL;
 	struct bnx2x *bp;
-	int pcie_width;
-	enum bnx2x_pci_bus_speed pcie_speed;
+	enum pcie_link_width pcie_width;
+	enum pci_bus_speed pcie_speed;
 	int rc, max_non_def_sbs;
 	int rx_count, tx_count, rss_count, doorbell_size;
 	int max_cos_est;
@@ -12844,18 +12843,19 @@
 		dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 		rtnl_unlock();
 	}
-
-	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
-	BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
-		       pcie_width, pcie_speed);
-
-	BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+	if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+	    pcie_speed == PCI_SPEED_UNKNOWN ||
+	    pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+		BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+	else
+		BNX2X_DEV_INFO(
+		       "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
 		       board_info[ent->driver_data].name,
 		       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
 		       pcie_width,
-		       pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
-		       pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
-		       pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+		       pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+		       pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+		       pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
 		       "Unknown",
 		       dev->base_addr, bp->pdev->irq, dev->dev_addr);
 
@@ -12874,7 +12874,6 @@
 		pci_release_regions(pdev);
 
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 	return rc;
 }
@@ -12957,7 +12956,6 @@
 		pci_release_regions(pdev);
 
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static void bnx2x_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 9fbeee5..32c92ab 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1217,9 +1217,6 @@
 				     ETH_VLAN_FILTER_CLASSIFY, config);
 }
 
-#define list_next_entry(pos, member) \
-	list_entry((pos)->member.next, typeof(*(pos)), member)
-
 /**
  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
  *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5e07efb..0216d59 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2816,7 +2816,7 @@
 	u8 state;
 };
 
-void bnx2x_set_vf_state(void *cookie)
+static void bnx2x_set_vf_state(void *cookie)
 {
 	struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
 
@@ -3239,8 +3239,9 @@
 	pci_disable_sriov(bp->pdev);
 }
 
-int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
-			struct pf_vf_bulletin_content **bulletin)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+			     struct bnx2x_virtf **vf,
+			     struct pf_vf_bulletin_content **bulletin)
 {
 	if (bp->state != BNX2X_STATE_OPEN) {
 		BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3656,29 +3657,6 @@
 	return -ENOMEM;
 }
 
-int bnx2x_open_epilog(struct bnx2x *bp)
-{
-	/* Enable sriov via delayed work. This must be done via delayed work
-	 * because it causes the probe of the vf devices to be run, which invoke
-	 * register_netdevice which must have rtnl lock taken. As we are holding
-	 * the lock right now, that could only work if the probe would not take
-	 * the lock. However, as the probe of the vf may be called from other
-	 * contexts as well (such as passthrough to vm fails) it can't assume
-	 * the lock is being held for it. Using delayed work here allows the
-	 * probe code to simply take the lock (i.e. wait for it to be released
-	 * if it is being held). We only want to do this if the number of VFs
-	 * was set before PF driver was loaded.
-	 */
-	if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
-		smp_mb__before_clear_bit();
-		set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
-		smp_mb__after_clear_bit();
-		schedule_delayed_work(&bp->sp_rtnl_task, 0);
-	}
-
-	return 0;
-}
-
 void bnx2x_iov_channel_down(struct bnx2x *bp)
 {
 	int vf_idx;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 059f0d4..1ff6a93 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -782,7 +782,6 @@
 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
 void bnx2x_iov_channel_down(struct bnx2x *bp);
-int bnx2x_open_epilog(struct bnx2x *bp);
 
 #else /* CONFIG_BNX2X_SRIOV */
 
@@ -842,7 +841,6 @@
 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
-static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
 
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 28757df..9199adf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -60,6 +60,30 @@
 	mutex_unlock(&bp->vf2pf_mutex);
 }
 
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+				   enum channel_tlvs req_tlv)
+{
+	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+	do {
+		if (tlv->type == req_tlv)
+			return tlv;
+
+		if (!tlv->length) {
+			BNX2X_ERR("Found TLV with length 0\n");
+			return NULL;
+		}
+
+		tlvs_list += tlv->length;
+		tlv = (struct channel_tlv *)tlvs_list;
+	} while (tlv->type != CHANNEL_TLV_LIST_END);
+
+	DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+	return NULL;
+}
+
 /* list the types and lengths of the tlvs on the buffer */
 void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
 {
@@ -196,6 +220,7 @@
 	int rc = 0, attempts = 0;
 	struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
 	struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+	struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
 	u32 vf_id;
 	bool resources_acquired = false;
 
@@ -219,8 +244,14 @@
 	/* pf 2 vf bulletin board address */
 	req->bulletin_addr = bp->pf2vf_bulletin_mapping;
 
+	/* Request physical port identifier */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+		      CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
 	/* add list termination tlv */
-	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+	bnx2x_add_tlv(bp, req,
+		      req->first_tlv.tl.length + sizeof(struct channel_tlv),
+		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
 	/* output tlvs list */
@@ -287,6 +318,15 @@
 		}
 	}
 
+	/* Retrieve physical port id (if possible) */
+	phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+			 bnx2x_search_tlv_list(bp, resp,
+					       CHANNEL_TLV_PHYS_PORT_ID);
+	if (phys_port_resp) {
+		memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+		bp->flags |= HAS_PHYS_PORT_ID;
+	}
+
 	/* get HW info */
 	bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
 	bp->link_params.chip_id = bp->common.chip_id;
@@ -983,53 +1023,59 @@
 	return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
 }
 
-static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+					 struct bnx2x_virtf *vf)
 {
 	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
-	u64 vf_addr;
-	dma_addr_t pf_addr;
 	u16 length, type;
-	int rc;
-	struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
 
 	/* prepare response */
 	type = mbx->first_tlv.tl.type;
 	length = type == CHANNEL_TLV_ACQUIRE ?
 		sizeof(struct pfvf_acquire_resp_tlv) :
 		sizeof(struct pfvf_general_resp_tlv);
-	bnx2x_add_tlv(bp, resp, 0, type, length);
-	resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
-	bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+				       struct bnx2x_virtf *vf)
+{
+	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+	struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+	dma_addr_t pf_addr;
+	u64 vf_addr;
+	int rc;
+
 	bnx2x_dp_tlv_list(bp, resp);
 	DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
 	   mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
 
+	resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+
 	/* send response */
 	vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
 		  mbx->first_tlv.resp_msg_offset;
 	pf_addr = mbx->msg_mapping +
 		  offsetof(struct bnx2x_vf_mbx_msg, resp);
 
-	/* copy the response body, if there is one, before the header, as the vf
-	 * is sensitive to the header being written
+	/* Copy the response buffer. The first u64 is written afterwards, as
+	 * the vf is sensitive to the header being written
 	 */
-	if (resp->hdr.tl.length > sizeof(u64)) {
-		length = resp->hdr.tl.length - sizeof(u64);
-		vf_addr += sizeof(u64);
-		pf_addr += sizeof(u64);
-		rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
-					  U64_HI(vf_addr),
-					  U64_LO(vf_addr),
-					  length/4);
-		if (rc) {
-			BNX2X_ERR("Failed to copy response body to VF %d\n",
-				  vf->abs_vfid);
-			goto mbx_error;
-		}
-		vf_addr -= sizeof(u64);
-		pf_addr -= sizeof(u64);
+	vf_addr += sizeof(u64);
+	pf_addr += sizeof(u64);
+	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+				  U64_HI(vf_addr),
+				  U64_LO(vf_addr),
+				  (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+	if (rc) {
+		BNX2X_ERR("Failed to copy response body to VF %d\n",
+			  vf->abs_vfid);
+		goto mbx_error;
 	}
+	vf_addr -= sizeof(u64);
+	pf_addr -= sizeof(u64);
 
 	/* ack the FW */
 	storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
@@ -1060,6 +1106,36 @@
 	bnx2x_vf_release(bp, vf, false); /* non blocking */
 }
 
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+				       struct bnx2x_virtf *vf)
+{
+	bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+	bnx2x_vf_mbx_resp_send_msg(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+					struct bnx2x_virtf *vf,
+					void *buffer,
+					u16 *offset)
+{
+	struct vfpf_port_phys_id_resp_tlv *port_id;
+
+	if (!(bp->flags & HAS_PHYS_PORT_ID))
+		return;
+
+	bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+		      sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+	port_id = (struct vfpf_port_phys_id_resp_tlv *)
+		  (((u8 *)buffer) + *offset);
+	memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+	/* Offset should continue representing the offset to the tail
+	 * of TLV data (outside this function scope)
+	 */
+	*offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
 				      struct bnx2x_vf_mbx *mbx, int vfop_status)
 {
@@ -1067,6 +1143,7 @@
 	struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
 	struct pf_vf_resc *resc = &resp->resc;
 	u8 status = bnx2x_pfvf_status_codes(vfop_status);
+	u16 length;
 
 	memset(resp, 0, sizeof(*resp));
 
@@ -1140,9 +1217,24 @@
 			resc->hw_sbs[i].sb_qid);
 	DP_CONT(BNX2X_MSG_IOV, "]\n");
 
+	/* prepare response */
+	length = sizeof(struct pfvf_acquire_resp_tlv);
+	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+	/* Handle possible VF requests for physical port identifiers.
+	 * 'length' should continue to indicate the offset of the first empty
+	 * place in the buffer (i.e., where next TLV should be inserted)
+	 */
+	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+				  CHANNEL_TLV_PHYS_PORT_ID))
+		bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
 	/* send the response */
 	vf->op_rc = vfop_status;
-	bnx2x_vf_mbx_resp(bp, vf);
+	bnx2x_vf_mbx_resp_send_msg(bp, vf);
 }
 
 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1874,6 +1966,9 @@
 	/* process the VF message header */
 	mbx->first_tlv = mbx->msg->req.first_tlv;
 
+	/* Clean response buffer to refrain from falsely seeing chains */
+	memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
 	/* dispatch the request (will prepare the response) */
 	bnx2x_vf_mbx_request(bp, vf, mbx);
 	goto mbx_done;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 1179fe0..208568b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -188,6 +188,12 @@
 	} resc;
 };
 
+struct vfpf_port_phys_id_resp_tlv {
+	struct channel_tlv tl;
+	u8 id[ETH_ALEN];
+	u8 padding[2];
+};
+
 #define VFPF_INIT_FLG_STATS_COALESCE	(1 << 0) /* when set the VFs queues
 						  * stats will be coalesced on
 						  * the leading RSS queue
@@ -398,6 +404,7 @@
 	CHANNEL_TLV_PF_SET_MAC,
 	CHANNEL_TLV_PF_SET_VLAN,
 	CHANNEL_TLV_UPDATE_RSS,
+	CHANNEL_TLV_PHYS_PORT_ID,
 	CHANNEL_TLV_MAX
 };
 
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 99394bd..f58a8b8 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -393,7 +393,7 @@
 
 			csk->vlan_id = path_resp->vlan_id;
 
-			memcpy(csk->ha, path_resp->mac_addr, 6);
+			memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
 			if (test_bit(SK_F_IPV6, &csk->flags))
 				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
 				       sizeof(struct in6_addr));
@@ -5572,7 +5572,7 @@
 	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
 		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
 
-	memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+	memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
 
 	cp->cnic_ops = &cnic_bnx2x_ops;
 	cp->start_hw = cnic_start_bnx2x_hw;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0658b43..ebbfe25 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -353,8 +353,8 @@
 	atomic_t ref_count;
 };
 
-extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
 
-extern int cnic_unregister_driver(int ulp_type);
+int cnic_unregister_driver(int ulp_type);
 
 #endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 12d961c..00c5be8 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define TG3_MAJ_NUM			3
-#define TG3_MIN_NUM			133
+#define TG3_MIN_NUM			134
 #define DRV_MODULE_VERSION	\
 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE	"Jul 29, 2013"
+#define DRV_MODULE_RELDATE	"Sep 16, 2013"
 
 #define RESET_KIND_SHUTDOWN	0
 #define RESET_KIND_INIT		1
@@ -337,6 +337,11 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1326,6 +1331,12 @@
 	return err;
 }
 
+static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
+{
+	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
+			    reg | val | MII_TG3_MISC_SHDW_WREN);
+}
+
 static int tg3_bmcr_reset(struct tg3 *tp)
 {
 	u32 phy_control;
@@ -1364,7 +1375,7 @@
 
 	spin_lock_bh(&tp->lock);
 
-	if (tg3_readphy(tp, reg, &val))
+	if (__tg3_readphy(tp, mii_id, reg, &val))
 		val = -EIO;
 
 	spin_unlock_bh(&tp->lock);
@@ -1379,7 +1390,7 @@
 
 	spin_lock_bh(&tp->lock);
 
-	if (tg3_writephy(tp, reg, val))
+	if (__tg3_writephy(tp, mii_id, reg, val))
 		ret = -EIO;
 
 	spin_unlock_bh(&tp->lock);
@@ -1397,7 +1408,7 @@
 	u32 val;
 	struct phy_device *phydev;
 
-	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 	case PHY_ID_BCM50610:
 	case PHY_ID_BCM50610M:
@@ -1502,6 +1513,13 @@
 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
 		if (is_serdes)
 			tp->phy_addr += 7;
+	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
+		int addr;
+
+		addr = ssb_gige_get_phyaddr(tp->pdev);
+		if (addr < 0)
+			return addr;
+		tp->phy_addr = addr;
 	} else
 		tp->phy_addr = TG3_PHY_MII_ADDR;
 
@@ -1522,7 +1540,7 @@
 	tp->mdio_bus->read     = &tg3_mdio_read;
 	tp->mdio_bus->write    = &tg3_mdio_write;
 	tp->mdio_bus->reset    = &tg3_mdio_reset;
-	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
 
 	for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1543,7 +1561,7 @@
 		return i;
 	}
 
-	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
 	if (!phydev || !phydev->drv) {
 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
@@ -1953,7 +1971,7 @@
 	u32 old_tx_mode = tp->tx_mode;
 
 	if (tg3_flag(tp, USE_PHYLIB))
-		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+		autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
 	else
 		autoneg = tp->link_config.autoneg;
 
@@ -1989,7 +2007,7 @@
 	u8 oldflowctrl, linkmesg = 0;
 	u32 mac_mode, lcl_adv, rmt_adv;
 	struct tg3 *tp = netdev_priv(dev);
-	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+	struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
 	spin_lock_bh(&tp->lock);
 
@@ -2078,7 +2096,7 @@
 	/* Bring the PHY back to a known state. */
 	tg3_bmcr_reset(tp);
 
-	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
 	/* Attach the MAC to the PHY. */
 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
@@ -2105,7 +2123,7 @@
 				      SUPPORTED_Asym_Pause);
 		break;
 	default:
-		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
 		return -EINVAL;
 	}
 
@@ -2123,7 +2141,7 @@
 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 		return;
 
-	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
@@ -2143,13 +2161,13 @@
 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 		return;
 
-	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+	phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
 }
 
 static void tg3_phy_fini(struct tg3 *tp)
 {
 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
-		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
 	}
 }
@@ -2218,25 +2236,21 @@
 		return;
 	}
 
-	reg = MII_TG3_MISC_SHDW_WREN |
-	      MII_TG3_MISC_SHDW_SCR5_SEL |
-	      MII_TG3_MISC_SHDW_SCR5_LPED |
+	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
 
-	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
 
 
-	reg = MII_TG3_MISC_SHDW_WREN |
-	      MII_TG3_MISC_SHDW_APD_SEL |
-	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
 	if (enable)
 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
 
-	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
 }
 
 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
@@ -4027,7 +4041,7 @@
 			struct phy_device *phydev;
 			u32 phyid, advertising;
 
-			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+			phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 
@@ -6848,12 +6862,6 @@
 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
 					 PCI_DMA_FROMDEVICE);
 
-			skb = build_skb(data, frag_size);
-			if (!skb) {
-				tg3_frag_free(frag_size != 0, data);
-				goto drop_it_no_recycle;
-			}
-			skb_reserve(skb, TG3_RX_OFFSET(tp));
 			/* Ensure that the update to the data happens
 			 * after the usage of the old DMA mapping.
 			 */
@@ -6861,6 +6869,12 @@
 
 			ri->data = NULL;
 
+			skb = build_skb(data, frag_size);
+			if (!skb) {
+				tg3_frag_free(frag_size != 0, data);
+				goto drop_it_no_recycle;
+			}
+			skb_reserve(skb, TG3_RX_OFFSET(tp));
 		} else {
 			tg3_recycle_rx(tnapi, tpr, opaque_key,
 				       desc_idx, *post_ptr);
@@ -9196,10 +9210,7 @@
 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 	}
 
-	if (err)
-		return err;
-
-	return 0;
+	return err;
 }
 
 static int tg3_set_mac_addr(struct net_device *dev, void *p)
@@ -11035,7 +11046,18 @@
 		name = tp->dev->name;
 	else {
 		name = &tnapi->irq_lbl[0];
-		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+		if (tnapi->tx_buffers && tnapi->rx_rcb)
+			snprintf(name, IFNAMSIZ,
+				 "%s-txrx-%d", tp->dev->name, irq_num);
+		else if (tnapi->tx_buffers)
+			snprintf(name, IFNAMSIZ,
+				 "%s-tx-%d", tp->dev->name, irq_num);
+		else if (tnapi->rx_rcb)
+			snprintf(name, IFNAMSIZ,
+				 "%s-rx-%d", tp->dev->name, irq_num);
+		else
+			snprintf(name, IFNAMSIZ,
+				 "%s-%d", tp->dev->name, irq_num);
 		name[IFNAMSIZ-1] = 0;
 	}
 
@@ -11907,7 +11929,7 @@
 		struct phy_device *phydev;
 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 			return -EAGAIN;
-		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 		return phy_ethtool_gset(phydev, cmd);
 	}
 
@@ -11974,7 +11996,7 @@
 		struct phy_device *phydev;
 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 			return -EAGAIN;
-		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 		return phy_ethtool_sset(phydev, cmd);
 	}
 
@@ -12093,12 +12115,10 @@
 
 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
 
-	spin_lock_bh(&tp->lock);
 	if (device_may_wakeup(dp))
 		tg3_flag_set(tp, WOL_ENABLE);
 	else
 		tg3_flag_clear(tp, WOL_ENABLE);
-	spin_unlock_bh(&tp->lock);
 
 	return 0;
 }
@@ -12131,7 +12151,7 @@
 	if (tg3_flag(tp, USE_PHYLIB)) {
 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 			return -EAGAIN;
-		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+		r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
 	} else {
 		u32 bmcr;
 
@@ -12247,7 +12267,7 @@
 		u32 newadv;
 		struct phy_device *phydev;
 
-		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
 		if (!(phydev->supported & SUPPORTED_Pause) ||
 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
@@ -13194,8 +13214,8 @@
 		return -ENOMEM;
 
 	tx_data = skb_put(skb, tx_len);
-	memcpy(tx_data, tp->dev->dev_addr, 6);
-	memset(tx_data + 6, 0x0, 8);
+	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
+	memset(tx_data + ETH_ALEN, 0x0, 8);
 
 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
 
@@ -13683,7 +13703,7 @@
 		struct phy_device *phydev;
 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 			return -EAGAIN;
-		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 		return phy_mii_ioctl(phydev, ifr, cmd);
 	}
 
@@ -14921,6 +14941,12 @@
 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
 						 LED_CTRL_MODE_PHY_2);
+
+			if (tg3_flag(tp, 5717_PLUS) ||
+			    tg3_asic_rev(tp) == ASIC_REV_5762)
+				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
+						LED_CTRL_BLINK_RATE_MASK;
+
 			break;
 
 		case SHASTA_EXT_LED_MAC:
@@ -15759,9 +15785,12 @@
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
-		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
 			reg = TG3PCI_GEN2_PRODID_ASICREV;
 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -16632,8 +16661,8 @@
 	int len;
 
 	addr = of_get_property(dp, "local-mac-address", &len);
-	if (addr && len == 6) {
-		memcpy(dev->dev_addr, addr, 6);
+	if (addr && len == ETH_ALEN) {
+		memcpy(dev->dev_addr, addr, ETH_ALEN);
 		return 0;
 	}
 	return -ENODEV;
@@ -16643,7 +16672,7 @@
 {
 	struct net_device *dev = tp->dev;
 
-	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
 	return 0;
 }
 #endif
@@ -17052,10 +17081,6 @@
 
 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 
-#if 0
-	/* Unneeded, already done by tg3_get_invariants.  */
-	tg3_switch_clocks(tp);
-#endif
 
 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
 	    tg3_asic_rev(tp) != ASIC_REV_5701)
@@ -17083,20 +17108,6 @@
 			break;
 		}
 
-#if 0
-		/* validate data reached card RAM correctly. */
-		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
-			u32 val;
-			tg3_read_mem(tp, 0x2100 + (i*4), &val);
-			if (le32_to_cpu(val) != p[i]) {
-				dev_err(&tp->pdev->dev,
-					"%s: Buffer corrupted on device! "
-					"(%d != %d)\n", __func__, val, i);
-				/* ret = -ENODEV here? */
-			}
-			p[i] = 0;
-		}
-#endif
 		/* Now read it back. */
 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
 		if (ret) {
@@ -17362,8 +17373,10 @@
 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
 		if (ssb_gige_one_dma_at_once(pdev))
 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
-		if (ssb_gige_have_roboswitch(pdev))
+		if (ssb_gige_have_roboswitch(pdev)) {
+			tg3_flag_set(tp, USE_PHYLIB);
 			tg3_flag_set(tp, ROBOSWITCH);
+		}
 		if (ssb_gige_is_rgmii(pdev))
 			tg3_flag_set(tp, RGMII_MODE);
 	}
@@ -17409,9 +17422,12 @@
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
-	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
 		tg3_flag_set(tp, ENABLE_APE);
 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
 		if (!tp->aperegs) {
@@ -17628,7 +17644,7 @@
 
 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 		struct phy_device *phydev;
-		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 		netdev_info(dev,
 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
 			    phydev->drv->name, dev_name(&phydev->dev));
@@ -17685,7 +17701,6 @@
 err_out_disable_pdev:
 	if (pci_is_enabled(pdev))
 		pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	return err;
 }
 
@@ -17717,7 +17732,6 @@
 		free_netdev(dev);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 7025780..5c3835a 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -68,6 +68,9 @@
 #define  TG3PCI_DEVICE_TIGON3_5762	 0x1687
 #define  TG3PCI_DEVICE_TIGON3_5725	 0x1643
 #define  TG3PCI_DEVICE_TIGON3_5727	 0x16f3
+#define  TG3PCI_DEVICE_TIGON3_57764	 0x1642
+#define  TG3PCI_DEVICE_TIGON3_57767	 0x1683
+#define  TG3PCI_DEVICE_TIGON3_57787	 0x1641
 /* 0x04 --> 0x2c unused */
 #define TG3PCI_SUBVENDOR_ID_BROADCOM		PCI_VENDOR_ID_BROADCOM
 #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6	0x1644
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b78e69e..248bc37 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3212,7 +3212,6 @@
 	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
 	if (!bnad->bar0) {
 		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
-		pci_set_drvdata(pdev, NULL);
 		return -ENOMEM;
 	}
 	pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
@@ -3300,17 +3299,12 @@
 	err = pci_request_regions(pdev, BNAD_NAME);
 	if (err)
 		goto disable_device;
-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
 		*using_dac = true;
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err)
-				goto release_regions;
-		}
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err)
+			goto release_regions;
 		*using_dac = false;
 	}
 	pci_set_master(pdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index aefee77..f7e033f 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -372,38 +372,37 @@
 /*
  * EXTERN PROTOTYPES
  */
-extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+u32 *cna_get_firmware_buf(struct pci_dev *pdev);
 /* Netdev entry point prototypes */
-extern void bnad_set_rx_mode(struct net_device *netdev);
-extern struct net_device_stats *bnad_get_netdev_stats(
-				struct net_device *netdev);
-extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
-extern int bnad_enable_default_bcast(struct bnad *bnad);
-extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
-extern void bnad_set_ethtool_ops(struct net_device *netdev);
-extern void bnad_cb_completion(void *arg, enum bfa_status status);
+void bnad_set_rx_mode(struct net_device *netdev);
+struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
+int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_enable_default_bcast(struct bnad *bnad);
+void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_cb_completion(void *arg, enum bfa_status status);
 
 /* Configuration & setup */
-extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
-extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
 
-extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
-extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
 
 /* Timer start/stop protos */
-extern void bnad_dim_timer_start(struct bnad *bnad);
+void bnad_dim_timer_start(struct bnad *bnad);
 
 /* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad,
-		struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
-		struct rtnl_link_stats64 *stats);
+void bnad_netdev_qstats_fill(struct bnad *bnad,
+			     struct rtnl_link_stats64 *stats);
+void bnad_netdev_hwstats_fill(struct bnad *bnad,
+			      struct rtnl_link_stats64 *stats);
 
 /* Debugfs */
-void	bnad_debugfs_init(struct bnad *bnad);
-void	bnad_debugfs_uninit(struct bnad *bnad);
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
 
 /* MACROS */
 /* To set & get the stats counters */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 48f5288..4fc5c8e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1060,13 +1060,13 @@
 {
 	struct xgmac_priv *priv = netdev_priv(dev);
 
-	netif_stop_queue(dev);
-
 	if (readl(priv->base + XGMAC_DMA_INTR_ENA))
 		napi_disable(&priv->napi);
 
 	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
 
+	netif_tx_disable(dev);
+
 	/* Disable the MAC core */
 	xgmac_mac_disable(priv->base);
 
@@ -1370,11 +1370,8 @@
 	}
 
 	old_mtu = dev->mtu;
-	dev->mtu = new_mtu;
 
 	/* return early if the buffer sizes will not change */
-	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
-		return 0;
 	if (old_mtu == new_mtu)
 		return 0;
 
@@ -1382,8 +1379,9 @@
 	if (!netif_running(dev))
 		return 0;
 
-	/* Bring the interface down and then back up */
+	/* Bring interface down, change mtu and bring interface back up */
 	xgmac_stop(dev);
+	dev->mtu = new_mtu;
 	return xgmac_open(dev);
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 5ccbed1..8abb46b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -324,30 +324,30 @@
 	return board_info(adap)->clock_core / 1000000;
 }
 
-extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
-extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
 
-extern void t1_interrupts_enable(adapter_t *adapter);
-extern void t1_interrupts_disable(adapter_t *adapter);
-extern void t1_interrupts_clear(adapter_t *adapter);
-extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
-extern void t1_elmer0_ext_intr(adapter_t *adapter);
-extern int t1_slow_intr_handler(adapter_t *adapter);
+void t1_interrupts_enable(adapter_t *adapter);
+void t1_interrupts_disable(adapter_t *adapter);
+void t1_interrupts_clear(adapter_t *adapter);
+int t1_elmer0_ext_intr_handler(adapter_t *adapter);
+void t1_elmer0_ext_intr(adapter_t *adapter);
+int t1_slow_intr_handler(adapter_t *adapter);
 
-extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
-extern const struct board_info *t1_get_board_info(unsigned int board_id);
-extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct board_info *t1_get_board_info(unsigned int board_id);
+const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
 						    unsigned short ssid);
-extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
-extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
 		     struct adapter_params *p);
-extern int t1_init_hw_modules(adapter_t *adapter);
-extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
-extern void t1_free_sw_modules(adapter_t *adapter);
-extern void t1_fatal_err(adapter_t *adapter);
-extern void t1_link_changed(adapter_t *adapter, int port_id);
-extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+int t1_init_hw_modules(adapter_t *adapter);
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+void t1_free_sw_modules(adapter_t *adapter);
+void t1_fatal_err(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id);
+void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
 			    int speed, int duplex, int pause);
 #endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d7048db..1d02105 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1168,7 +1168,6 @@
 	pci_release_regions(pdev);
 out_disable_pdev:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	return err;
 }
 
@@ -1347,7 +1346,6 @@
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	t1_sw_reset(pdev);
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 40c7b93..eb33a31 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -499,7 +499,7 @@
 
 static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
 {
-	memcpy(mac_addr, cmac->instance->mac_addr, 6);
+	memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
 	return 0;
 }
 
@@ -526,7 +526,7 @@
 	 */
 
 	/* Store local copy */
-	memcpy(cmac->instance->mac_addr, ma, 6);
+	memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
 
 	lo  = ((u32) ma[1] << 8) | (u32) ma[0];
 	mid = ((u32) ma[3] << 8) | (u32) ma[2];
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index b650951..45d7733 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3374,7 +3374,6 @@
 	pci_release_regions(pdev);
 out_disable_device:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 out:
 	return err;
 }
@@ -3415,7 +3414,6 @@
 		kfree(adapter);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
index 6990f6c..81029b8 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -685,10 +685,6 @@
 #define V_BUSY(x) ((x) << S_BUSY)
 #define F_BUSY    V_BUSY(1U)
 
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
 #define A_MC7_EXT_MODE1 0x108
 
 #define A_MC7_EXT_MODE2 0x10c
@@ -749,14 +745,6 @@
 
 #define A_MC7_CAL 0x128
 
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
 #define S_CAL_FAULT    30
 #define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
 #define F_CAL_FAULT    V_CAL_FAULT(1U)
@@ -815,9 +803,6 @@
 #define V_OP(x) ((x) << S_OP)
 #define F_OP    V_OP(1U)
 
-#define F_OP    V_OP(1U)
-#define A_SF_OP 0x6dc
-
 #define A_MC7_BIST_ADDR_BEG 0x168
 
 #define A_MC7_BIST_ADDR_END 0x16c
@@ -830,8 +815,6 @@
 #define V_CONT(x) ((x) << S_CONT)
 #define F_CONT    V_CONT(1U)
 
-#define F_CONT    V_CONT(1U)
-
 #define A_MC7_INT_ENABLE 0x178
 
 #define S_AE    17
@@ -1017,8 +1000,6 @@
 #define V_NICMODE(x) ((x) << S_NICMODE)
 #define F_NICMODE    V_NICMODE(1U)
 
-#define F_NICMODE    V_NICMODE(1U)
-
 #define S_IPV6ENABLE    15
 #define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
 #define F_IPV6ENABLE    V_IPV6ENABLE(1U)
@@ -1562,27 +1543,15 @@
 #define A_ULPRX_STAG_ULIMIT 0x530
 
 #define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
 
 #define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
 
 #define A_ULPRX_PBL_LLIMIT 0x53c
 
 #define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
 
 #define A_ULPRX_TDDP_TAGMASK 0x524
 
-#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
-
-#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
-
-#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
-
 #define A_ULPTX_CONFIG 0x580
 
 #define S_CFG_CQE_SOP_MASK    1
@@ -2053,8 +2022,6 @@
 #define V_TMMODE(x) ((x) << S_TMMODE)
 #define F_TMMODE    V_TMMODE(1U)
 
-#define F_TMMODE    V_TMMODE(1U)
-
 #define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
 
 #define A_MC5_DB_FILTER_TABLE 0x710
@@ -2454,8 +2421,6 @@
 #define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
 #define F_TXACTENABLE    V_TXACTENABLE(1U)
 
-#define A_XGM_SERDES_CTRL0 0x8e0
-
 #define S_RESET3    23
 #define V_RESET3(x) ((x) << S_RESET3)
 #define F_RESET3    V_RESET3(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index dfd1e36..ecd2fb3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -48,7 +48,6 @@
 #include <linux/vmalloc.h>
 #include <asm/io.h>
 #include "cxgb4_uld.h"
-#include "t4_hw.h"
 
 #define FW_VERSION_MAJOR 1
 #define FW_VERSION_MINOR 4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c73cabd..8b929ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3983,6 +3983,7 @@
 	struct net_device *event_dev;
 	int ret = NOTIFY_DONE;
 	struct bonding *bond = netdev_priv(ifa->idev->dev);
+	struct list_head *iter;
 	struct slave *slave;
 	struct pci_dev *first_pdev = NULL;
 
@@ -3995,7 +3996,7 @@
 		 * in all of them only once.
 		 */
 		read_lock(&bond->lock);
-		bond_for_each_slave(bond, slave) {
+		bond_for_each_slave(bond, slave, iter) {
 			if (!first_pdev) {
 				ret = clip_add(slave->dev, ifa, event);
 				/* If clip_add is success then only initialize
@@ -6074,7 +6075,6 @@
 	pci_disable_device(pdev);
  out_release_regions:
 	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
 	return err;
 }
 
@@ -6122,7 +6122,6 @@
 		pci_disable_pcie_error_reporting(pdev);
 		pci_disable_device(pdev);
 		pci_release_regions(pdev);
-		pci_set_drvdata(pdev, NULL);
 	} else
 		pci_release_regions(pdev);
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 40c22e7..5f90ec5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2782,11 +2782,9 @@
 
 err_free_adapter:
 	kfree(adapter);
-	pci_set_drvdata(pdev, NULL);
 
 err_release_regions:
 	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
 	pci_clear_master(pdev);
 
 err_disable_device:
@@ -2851,7 +2849,6 @@
 		}
 		iounmap(adapter->regs);
 		kfree(adapter);
-		pci_set_drvdata(pdev, NULL);
 	}
 
 	/*
@@ -2908,7 +2905,7 @@
 #define CH_DEVICE(devid, idx) \
 	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
 
-static struct pci_device_id cxgb4vf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
 	CH_DEVICE(0xb000, 0),	/* PE10K FPGA */
 	CH_DEVICE(0x4800, 0),	/* T440-dbg */
 	CH_DEVICE(0x4801, 0),	/* T420-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index df296af..8475c4c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1396,8 +1396,9 @@
  *	Builds an sk_buff from the given packet gather list.  Returns the
  *	sk_buff or %NULL if sk_buff allocation failed.
  */
-struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
-				  unsigned int skb_len, unsigned int pull_len)
+static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+					 unsigned int skb_len,
+					 unsigned int pull_len)
 {
 	struct sk_buff *skb;
 
@@ -1443,7 +1444,7 @@
  *	Releases the pages of a packet gather list.  We do not own the last
  *	page on the list and do not free it.
  */
-void t4vf_pktgl_free(const struct pkt_gl *gl)
+static void t4vf_pktgl_free(const struct pkt_gl *gl)
 {
 	int frag;
 
@@ -1640,7 +1641,7 @@
  *	on this queue.  If the system is under memory shortage use a fairly
  *	long delay to help recovery.
  */
-int process_responses(struct sge_rspq *rspq, int budget)
+static int process_responses(struct sge_rspq *rspq, int budget)
 {
 	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
 	int budget_left = budget;
@@ -1893,7 +1894,7 @@
  * The MSI interrupt handler handles data events from SGE response queues as
  * well as error and other async events as they all use the same MSI vector.
  */
-irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
 {
 	struct adapter *adapter = cookie;
 
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 7b756cf9..ff78dfa 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2309,7 +2309,6 @@
 err_out_disable_device:
 	pci_disable_device(pdev);
 err_out_free_netdev:
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(netdev);
 
 	return err;
@@ -2338,7 +2337,6 @@
 		enic_iounmap(enic);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
 		free_netdev(netdev);
 	}
 }
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a7a941b..7080ad6 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1623,7 +1623,7 @@
 
 	if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
 		mac_src = "platform data";
-		memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+		memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
 	}
 
 	if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index eaab73c..38148b0 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -2110,7 +2110,6 @@
 	iounmap(de->regs);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 }
 
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 263b92c..c05b66d 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2328,7 +2328,7 @@
 	pci_disable_device (pdev);
 }
 
-static struct pci_device_id de4x5_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = {
         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 8313930..5ad9e3e 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -523,7 +523,6 @@
 err_out_disable:
 	pci_disable_device(pdev);
 err_out_free:
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 
 	return err;
@@ -548,8 +547,6 @@
 					db->buf_pool_ptr, db->buf_pool_dma_ptr);
 		pci_release_regions(pdev);
 		free_netdev(dev);	/* free board information */
-
-		pci_set_drvdata(pdev, NULL);
 	}
 
 	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 4e8cfa2..add05f1 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,7 +1939,6 @@
 	pci_iounmap(pdev, tp->base_addr);
 	free_netdev (dev);
 	pci_release_regions (pdev);
-	pci_set_drvdata (pdev, NULL);
 
 	/* pci_power_off (pdev, -1); */
 }
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 93845af..a5397b1 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -429,7 +429,6 @@
 err_out_disable:
 	pci_disable_device(pdev);
 err_out_free:
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 
 	return err;
@@ -450,7 +449,6 @@
 				db->buf_pool_ptr, db->buf_pool_dma_ptr);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 }
 
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index c7b04ec..62fe512 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -468,7 +468,6 @@
 	return 0;
 
 err_out_cleardev:
-	pci_set_drvdata(pdev, NULL);
 	pci_iounmap(pdev, ioaddr);
 err_out_free_res:
 	pci_release_regions(pdev);
@@ -1542,8 +1541,6 @@
 		pci_iounmap(pdev, np->base_addr);
 		free_netdev(dev);
 	}
-
-	pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 9b84cb0..ab7ebac 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -289,7 +289,6 @@
 err_unmap:
 	pci_iounmap(pdev, private->ioaddr);
 reg_fail:
-	pci_set_drvdata(pdev, NULL);
 	dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
 tx_buf_fail:
 	dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
@@ -317,7 +316,6 @@
 
 	unregister_netdev(dev);
 	pci_iounmap(pdev, card->ioaddr);
-	pci_set_drvdata(pdev, NULL);
 	dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
 	dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
 	free_netdev(dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index afa8e3a..4fb756d 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1746,7 +1746,6 @@
 		pci_release_regions (pdev);
 		pci_disable_device (pdev);
 	}
-	pci_set_drvdata (pdev, NULL);
 }
 
 static struct pci_driver rio_driver = {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index bf3bf6f..113cd79 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -703,7 +703,6 @@
 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 		np->tx_ring, np->tx_ring_dma);
 err_out_cleardev:
-	pci_set_drvdata(pdev, NULL);
 	pci_iounmap(pdev, ioaddr);
 err_out_res:
 	pci_release_regions(pdev);
@@ -1941,7 +1940,6 @@
 	    pci_iounmap(pdev, np->base);
 	    pci_release_regions(pdev);
 	    free_netdev(dev);
-	    pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c99dac6..f4825db 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER			"4.9.134.0u"
+#define DRV_VER			"4.9.224.0u"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"Emulex BladeEngine2"
 #define BE3_NAME		"Emulex BladeEngine3"
@@ -89,7 +89,7 @@
 
 #define BE_NUM_VLANS_SUPPORTED	64
 #define BE_UMC_NUM_VLANS_SUPPORTED	15
-#define BE_MAX_EQD		96u
+#define BE_MAX_EQD		128u
 #define	BE_MAX_TX_FRAG_COUNT	30
 
 #define EVNT_Q_LEN		1024
@@ -199,8 +199,37 @@
 	u16 spurious_intr;
 	struct napi_struct napi;
 	struct be_adapter *adapter;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define BE_EQ_IDLE		0
+#define BE_EQ_NAPI		1	/* napi owns this EQ */
+#define BE_EQ_POLL		2	/* poll owns this EQ */
+#define BE_EQ_LOCKED		(BE_EQ_NAPI | BE_EQ_POLL)
+#define BE_EQ_NAPI_YIELD	4	/* napi yielded this EQ */
+#define BE_EQ_POLL_YIELD	8	/* poll yielded this EQ */
+#define BE_EQ_YIELD		(BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
+#define BE_EQ_USER_PEND		(BE_EQ_POLL | BE_EQ_POLL_YIELD)
+	unsigned int state;
+	spinlock_t lock;	/* lock to serialize napi and busy-poll */
+#endif  /* CONFIG_NET_RX_BUSY_POLL */
 } ____cacheline_aligned_in_smp;
 
+struct be_aic_obj {		/* Adaptive interrupt coalescing (AIC) info */
+	bool enable;
+	u32 min_eqd;		/* in usecs */
+	u32 max_eqd;		/* in usecs */
+	u32 prev_eqd;		/* in usecs */
+	u32 et_eqd;		/* configured val when aic is off */
+	ulong jiffies;
+	u64 rx_pkts_prev;	/* Used to calculate RX pps */
+	u64 tx_reqs_prev;	/* Used to calculate TX pps */
+};
+
+enum {
+	NAPI_POLLING,
+	BUSY_POLLING
+};
+
 struct be_mcc_obj {
 	struct be_queue_info q;
 	struct be_queue_info cq;
@@ -215,6 +244,7 @@
 	u64 tx_compl;
 	ulong tx_jiffies;
 	u32 tx_stops;
+	u32 tx_drv_drops;	/* pkts dropped by driver */
 	struct u64_stats_sync sync;
 	struct u64_stats_sync sync_compl;
 };
@@ -239,15 +269,12 @@
 struct be_rx_stats {
 	u64 rx_bytes;
 	u64 rx_pkts;
-	u64 rx_pkts_prev;
-	ulong rx_jiffies;
 	u32 rx_drops_no_skbs;	/* skb allocation errors */
 	u32 rx_drops_no_frags;	/* HW has no fetched frags */
 	u32 rx_post_fail;	/* page post alloc failures */
 	u32 rx_compl;
 	u32 rx_mcast_pkts;
 	u32 rx_compl_err;	/* completions with err set */
-	u32 rx_pps;		/* pkts per second */
 	struct u64_stats_sync sync;
 };
 
@@ -316,6 +343,11 @@
 	u32 rx_input_fifo_overflow_drop;
 	u32 pmem_fifo_overflow_drop;
 	u32 jabber_events;
+	u32 rx_roce_bytes_lsd;
+	u32 rx_roce_bytes_msd;
+	u32 rx_roce_frames;
+	u32 roce_drops_payload_len;
+	u32 roce_drops_crc;
 };
 
 struct be_vf_cfg {
@@ -405,6 +437,7 @@
 	u32 big_page_size;	/* Compounded page size shared by rx wrbs */
 
 	struct be_drv_stats drv_stats;
+	struct be_aic_obj aic_obj[MAX_EVT_QS];
 	u16 vlans_added;
 	u8 vlan_tag[VLAN_N_VID];
 	u8 vlan_prio_bmap;	/* Available Priority BitMap */
@@ -437,7 +470,6 @@
 	u32 rx_fc;		/* Rx flow control */
 	u32 tx_fc;		/* Tx flow control */
 	bool stats_cmd_sent;
-	u32 if_type;
 	struct {
 		u32 size;
 		u32 total_size;
@@ -472,8 +504,8 @@
 
 #define be_physfn(adapter)		(!adapter->virtfn)
 #define	sriov_enabled(adapter)		(adapter->num_vfs > 0)
-#define sriov_want(adapter)             (be_max_vfs(adapter) && num_vfs && \
-					 be_physfn(adapter))
+#define sriov_want(adapter)             (be_physfn(adapter) &&	\
+					 (num_vfs || pci_num_vf(adapter->pdev)))
 #define for_all_vfs(adapter, vf_cfg, i)					\
 	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
 		i++, vf_cfg++)
@@ -546,6 +578,10 @@
 	for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
 		i++, eqo++)
 
+#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i)			\
+	for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
+		 i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
+
 #define is_mcc_eqo(eqo)			(eqo->idx == 0)
 #define mcc_eqo(adapter)		(&adapter->eq_obj[0])
 
@@ -696,6 +732,115 @@
 	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+	bool status = true;
+
+	spin_lock(&eqo->lock); /* BH is already disabled */
+	if (eqo->state & BE_EQ_LOCKED) {
+		WARN_ON(eqo->state & BE_EQ_NAPI);
+		eqo->state |= BE_EQ_NAPI_YIELD;
+		status = false;
+	} else {
+		eqo->state = BE_EQ_NAPI;
+	}
+	spin_unlock(&eqo->lock);
+	return status;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+	spin_lock(&eqo->lock); /* BH is already disabled */
+
+	WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
+	eqo->state = BE_EQ_IDLE;
+
+	spin_unlock(&eqo->lock);
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+	bool status = true;
+
+	spin_lock_bh(&eqo->lock);
+	if (eqo->state & BE_EQ_LOCKED) {
+		eqo->state |= BE_EQ_POLL_YIELD;
+		status = false;
+	} else {
+		eqo->state |= BE_EQ_POLL;
+	}
+	spin_unlock_bh(&eqo->lock);
+	return status;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+	spin_lock_bh(&eqo->lock);
+
+	WARN_ON(eqo->state & (BE_EQ_NAPI));
+	eqo->state = BE_EQ_IDLE;
+
+	spin_unlock_bh(&eqo->lock);
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+	spin_lock_init(&eqo->lock);
+	eqo->state = BE_EQ_IDLE;
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+	local_bh_disable();
+
+	/* It's enough to just acquire napi lock on the eqo to stop
+	 * be_busy_poll() from processing any queueus.
+	 */
+	while (!be_lock_napi(eqo))
+		mdelay(1);
+
+	local_bh_enable();
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+	return true;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+	return false;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+		  u16 num_popped);
+void be_link_status_update(struct be_adapter *adapter, u8 link_status);
+void be_parse_stats(struct be_adapter *adapter);
+int be_load_fw(struct be_adapter *adapter, u8 *func);
+bool be_is_wol_supported(struct be_adapter *adapter);
+bool be_pause_supported(struct be_adapter *adapter);
+u32 be_get_fw_log_level(struct be_adapter *adapter);
+
 static inline int fw_major_num(const char *fw_ver)
 {
 	int fw_major = 0;
@@ -705,27 +850,19 @@
 	return fw_major;
 }
 
-extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
-		u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
-extern void be_parse_stats(struct be_adapter *adapter);
-extern int be_load_fw(struct be_adapter *adapter, u8 *func);
-extern bool be_is_wol_supported(struct be_adapter *adapter);
-extern bool be_pause_supported(struct be_adapter *adapter);
-extern u32 be_get_fw_log_level(struct be_adapter *adapter);
 int be_update_queues(struct be_adapter *adapter);
 int be_poll(struct napi_struct *napi, int budget);
 
 /*
  * internal function to initialize-cleanup roce device.
  */
-extern void be_roce_dev_add(struct be_adapter *);
-extern void be_roce_dev_remove(struct be_adapter *);
+void be_roce_dev_add(struct be_adapter *);
+void be_roce_dev_remove(struct be_adapter *);
 
 /*
  * internal function to open-close roce device during ifup-ifdown.
  */
-extern void be_roce_dev_open(struct be_adapter *);
-extern void be_roce_dev_close(struct be_adapter *);
+void be_roce_dev_open(struct be_adapter *);
+void be_roce_dev_close(struct be_adapter *);
 
 #endif				/* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index c08fd32..7fb0edf 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -522,7 +522,7 @@
 	return sem & POST_STAGE_MASK;
 }
 
-int lancer_wait_ready(struct be_adapter *adapter)
+static int lancer_wait_ready(struct be_adapter *adapter)
 {
 #define SLIPORT_READY_TIMEOUT 30
 	u32 sliport_status;
@@ -1436,8 +1436,12 @@
 		OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
 
 	/* version 1 of the cmd is not supported only by BE2 */
-	if (!BE2_chip(adapter))
+	if (BE2_chip(adapter))
+		hdr->version = 0;
+	if (BE3_chip(adapter) || lancer_chip(adapter))
 		hdr->version = 1;
+	else
+		hdr->version = 2;
 
 	be_mcc_notify(adapter);
 	adapter->stats_cmd_sent = true;
@@ -1719,11 +1723,12 @@
 /* set the EQ delay interval of an EQ to specified value
  * Uses async mcc
  */
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+		      int num)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_modify_eq_delay *req;
-	int status = 0;
+	int status = 0, i;
 
 	spin_lock_bh(&adapter->mcc_lock);
 
@@ -1737,13 +1742,15 @@
 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
 
-	req->num_eq = cpu_to_le32(1);
-	req->delay[0].eq_id = cpu_to_le32(eq_id);
-	req->delay[0].phase = 0;
-	req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+	req->num_eq = cpu_to_le32(num);
+	for (i = 0; i < num; i++) {
+		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+		req->set_eqd[i].phase = 0;
+		req->set_eqd[i].delay_multiplier =
+				cpu_to_le32(set_eqd[i].delay_multiplier);
+	}
 
 	be_mcc_notify(adapter);
-
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
@@ -3520,7 +3527,7 @@
 	struct be_cmd_enable_disable_vf *req;
 	int status;
 
-	if (!lancer_chip(adapter))
+	if (BEx_chip(adapter))
 		return 0;
 
 	spin_lock_bh(&adapter->mcc_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 108ca8a..edf3e8a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1057,14 +1057,16 @@
 } __packed;
 
 /******************** Modify EQ Delay *******************/
+struct be_set_eqd {
+	u32 eq_id;
+	u32 phase;
+	u32 delay_multiplier;
+};
+
 struct be_cmd_req_modify_eq_delay {
 	struct be_cmd_req_hdr hdr;
 	u32 num_eq;
-	struct {
-		u32 eq_id;
-		u32 phase;
-		u32 delay_multiplier;
-	} delay[8];
+	struct be_set_eqd set_eqd[MAX_EVT_QS];
 } __packed;
 
 struct be_cmd_resp_modify_eq_delay {
@@ -1660,6 +1662,67 @@
 	u32 rsvd[4];
 };
 
+struct be_port_rxf_stats_v2 {
+	u32 rsvd0[10];
+	u32 roce_bytes_received_lsd;
+	u32 roce_bytes_received_msd;
+	u32 rsvd1[5];
+	u32 roce_frames_received;
+	u32 rx_crc_errors;
+	u32 rx_alignment_symbol_errors;
+	u32 rx_pause_frames;
+	u32 rx_priority_pause_frames;
+	u32 rx_control_frames;
+	u32 rx_in_range_errors;
+	u32 rx_out_range_errors;
+	u32 rx_frame_too_long;
+	u32 rx_address_filtered;
+	u32 rx_dropped_too_small;
+	u32 rx_dropped_too_short;
+	u32 rx_dropped_header_too_small;
+	u32 rx_dropped_tcp_length;
+	u32 rx_dropped_runt;
+	u32 rsvd2[10];
+	u32 rx_ip_checksum_errs;
+	u32 rx_tcp_checksum_errs;
+	u32 rx_udp_checksum_errs;
+	u32 rsvd3[7];
+	u32 rx_switched_unicast_packets;
+	u32 rx_switched_multicast_packets;
+	u32 rx_switched_broadcast_packets;
+	u32 rsvd4[3];
+	u32 tx_pauseframes;
+	u32 tx_priority_pauseframes;
+	u32 tx_controlframes;
+	u32 rsvd5[10];
+	u32 rxpp_fifo_overflow_drop;
+	u32 rx_input_fifo_overflow_drop;
+	u32 pmem_fifo_overflow_drop;
+	u32 jabber_events;
+	u32 rsvd6[3];
+	u32 rx_drops_payload_size;
+	u32 rx_drops_clipped_header;
+	u32 rx_drops_crc;
+	u32 roce_drops_payload_len;
+	u32 roce_drops_crc;
+	u32 rsvd7[19];
+};
+
+struct be_rxf_stats_v2 {
+	struct be_port_rxf_stats_v2 port[4];
+	u32 rsvd0[2];
+	u32 rx_drops_no_pbuf;
+	u32 rx_drops_no_txpb;
+	u32 rx_drops_no_erx_descr;
+	u32 rx_drops_no_tpre_descr;
+	u32 rsvd1[6];
+	u32 rx_drops_too_many_frags;
+	u32 rx_drops_invalid_ring;
+	u32 forwarded_packets;
+	u32 rx_drops_mtu;
+	u32 rsvd2[35];
+};
+
 struct be_hw_stats_v1 {
 	struct be_rxf_stats_v1 rxf;
 	u32 rsvd0[BE_TXP_SW_SZ];
@@ -1678,6 +1741,29 @@
 	struct be_hw_stats_v1 hw_stats;
 };
 
+struct be_erx_stats_v2 {
+	u32 rx_drops_no_fragments[136];     /* dwordS 0 to 135*/
+	u32 rsvd[3];
+};
+
+struct be_hw_stats_v2 {
+	struct be_rxf_stats_v2 rxf;
+	u32 rsvd0[BE_TXP_SW_SZ];
+	struct be_erx_stats_v2 erx;
+	struct be_pmem_stats pmem;
+	u32 rsvd1[18];
+};
+
+struct be_cmd_req_get_stats_v2 {
+	struct be_cmd_req_hdr hdr;
+	u8 rsvd[sizeof(struct be_hw_stats_v2)];
+};
+
+struct be_cmd_resp_get_stats_v2 {
+	struct be_cmd_resp_hdr hdr;
+	struct be_hw_stats_v2 hw_stats;
+};
+
 /************** get fat capabilites *******************/
 #define MAX_MODULES 27
 #define MAX_MODES 4
@@ -1865,137 +1951,119 @@
 	struct be_if_desc if_desc;
 };
 
-extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_fw_wait_ready(struct be_adapter *adapter);
-extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-				 bool permanent, u32 if_handle, u32 pmac_id);
-extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-			u32 if_id, u32 *pmac_id, u32 domain);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
-			int pmac_id, u32 domain);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
-			    u32 en_flags, u32 *if_handle, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
-			u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
-extern int be_cmd_cq_create(struct be_adapter *adapter,
-			struct be_queue_info *cq, struct be_queue_info *eq,
-			bool no_delay, int num_cqe_dma_coalesce);
-extern int be_cmd_mccq_create(struct be_adapter *adapter,
-			struct be_queue_info *mccq,
-			struct be_queue_info *cq);
-extern int be_cmd_txq_create(struct be_adapter *adapter,
-			struct be_tx_obj *txo);
-extern int be_cmd_rxq_create(struct be_adapter *adapter,
-			struct be_queue_info *rxq, u16 cq_id,
-			u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
-extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-			int type);
-extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
-			struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
-				    u8 *link_status, u32 dom);
-extern int be_cmd_reset(struct be_adapter *adapter);
-extern int be_cmd_get_stats(struct be_adapter *adapter,
-			struct be_dma_mem *nonemb_cmd);
-extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
-			struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-		char *fw_on_flash);
-
-extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
-extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
-			u16 *vtag_array, u32 num, bool untagged,
-			bool promiscuous);
-extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
-extern int be_cmd_set_flow_control(struct be_adapter *adapter,
-			u32 tx_fc, u32 rx_fc);
-extern int be_cmd_get_flow_control(struct be_adapter *adapter,
-			u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+int be_pci_fnum_get(struct be_adapter *adapter);
+int be_fw_wait_ready(struct be_adapter *adapter);
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+			  bool permanent, u32 if_handle, u32 pmac_id);
+int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+		    u32 *pmac_id, u32 domain);
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
+		    u32 domain);
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+		     u32 *if_handle, u32 domain);
+int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain);
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+		     struct be_queue_info *eq, bool no_delay,
+		     int num_cqe_dma_coalesce);
+int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq,
+		       struct be_queue_info *cq);
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo);
+int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
+		      u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+		     int type);
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+			     u8 *link_status, u32 dom);
+int be_cmd_reset(struct be_adapter *adapter);
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+			       struct be_dma_mem *nonemb_cmd);
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+		      char *fw_on_flash);
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+		       u32 num, bool untagged, bool promiscuous);
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
 			u32 *function_mode, u32 *function_caps, u16 *asic_rev);
-extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
-			     u32 rss_hash_opts, u16 table_size);
-extern int be_process_mcc(struct be_adapter *adapter);
-extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
-			u8 port_num, u8 beacon, u8 status, u8 state);
-extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
-			u8 port_num, u32 *state);
-extern int be_cmd_write_flashrom(struct be_adapter *adapter,
-			struct be_dma_mem *cmd, u32 flash_oper,
-			u32 flash_opcode, u32 buf_size);
-extern int lancer_cmd_write_object(struct be_adapter *adapter,
-				   struct be_dma_mem *cmd,
-				   u32 data_size, u32 data_offset,
-				   const char *obj_name,
-				   u32 *data_written, u8 *change_status,
-				   u8 *addn_status);
+int be_cmd_reset_function(struct be_adapter *adapter);
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+		      u32 rss_hash_opts, u16 table_size);
+int be_process_mcc(struct be_adapter *adapter);
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
+			    u8 status, u8 state);
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
+			    u32 *state);
+int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
+			  u32 flash_oper, u32 flash_opcode, u32 buf_size);
+int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+			    u32 data_size, u32 data_offset,
+			    const char *obj_name, u32 *data_written,
+			    u8 *change_status, u8 *addn_status);
 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
-		u32 data_size, u32 data_offset, const char *obj_name,
-		u32 *data_read, u32 *eof, u8 *addn_status);
+			   u32 data_size, u32 data_offset, const char *obj_name,
+			   u32 *data_read, u32 *eof, u8 *addn_status);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
-				int offset);
-extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
-				struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_fw_init(struct be_adapter *adapter);
-extern int be_cmd_fw_clean(struct be_adapter *adapter);
-extern void be_async_mcc_enable(struct be_adapter *adapter);
-extern void be_async_mcc_disable(struct be_adapter *adapter);
-extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
-				u32 loopback_type, u32 pkt_size,
-				u32 num_pkts, u64 pattern);
-extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
-			u32 byte_cnt, struct be_dma_mem *cmd);
-extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
-				struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
-				u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter);
-extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_error(struct be_adapter *adapter);
-extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
-extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
-extern int be_cmd_req_native_mode(struct be_adapter *adapter);
-extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
-				    u32 *privilege, u32 domain);
-extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
-				    u32 privileges, u32 vf_num);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
-				    bool *pmac_id_active, u32 *pmac_id,
-				    u8 domain);
-extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
-				 u8 *mac);
-extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
-extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
-						u8 mac_count, u32 domain);
-extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
-			  u32 dom);
-extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
-				 u32 domain, u16 intf_id, u16 hsw_mode);
-extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
-				 u32 domain, u16 intf_id, u8 *mode);
-extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
-extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
-					  struct be_dma_mem *cmd);
-extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
-					  struct be_dma_mem *cmd,
-					  struct be_fat_conf_params *cfgs);
-extern int lancer_wait_ready(struct be_adapter *adapter);
-extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
-extern int lancer_initiate_dump(struct be_adapter *adapter);
-extern bool dump_present(struct be_adapter *adapter);
-extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
-extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+			 int offset);
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+			    struct be_dma_mem *nonemb_cmd);
+int be_cmd_fw_init(struct be_adapter *adapter);
+int be_cmd_fw_clean(struct be_adapter *adapter);
+void be_async_mcc_enable(struct be_adapter *adapter);
+void be_async_mcc_disable(struct be_adapter *adapter);
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
+			 u64 pattern);
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt,
+			struct be_dma_mem *cmd);
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+			    struct be_dma_mem *nonemb_cmd);
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+			u8 loopback_type, u8 enable);
+int be_cmd_get_phy_info(struct be_adapter *adapter);
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+void be_detect_error(struct be_adapter *adapter);
+int be_cmd_get_die_temperature(struct be_adapter *adapter);
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+int be_cmd_req_native_mode(struct be_adapter *adapter);
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+			     u32 domain);
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+			     u32 vf_num);
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+			     bool *pmac_id_active, u32 *pmac_id, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
+			u32 domain);
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
+			  u16 intf_id, u16 hsw_mode);
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
+			  u16 intf_id, u8 *mode);
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+				   struct be_dma_mem *cmd);
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+				   struct be_dma_mem *cmd,
+				   struct be_fat_conf_params *cfgs);
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
+int lancer_initiate_dump(struct be_adapter *adapter);
+bool dump_present(struct be_adapter *adapter);
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
 int be_cmd_get_func_config(struct be_adapter *adapter,
 			   struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
 			      struct be_resources *res, u8 domain);
-extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
-				     u8 domain);
-extern int be_cmd_get_if_id(struct be_adapter *adapter,
-			    struct be_vf_cfg *vf_cfg, int vf_num);
-extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
-extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+		     int vf_num);
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b440a1f..0833003 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -116,7 +116,12 @@
 	{DRVSTAT_INFO(rx_drops_mtu)},
 	/* Number of packets dropped due to random early drop function */
 	{DRVSTAT_INFO(eth_red_drops)},
-	{DRVSTAT_INFO(be_on_die_temperature)}
+	{DRVSTAT_INFO(be_on_die_temperature)},
+	{DRVSTAT_INFO(rx_roce_bytes_lsd)},
+	{DRVSTAT_INFO(rx_roce_bytes_msd)},
+	{DRVSTAT_INFO(rx_roce_frames)},
+	{DRVSTAT_INFO(roce_drops_payload_len)},
+	{DRVSTAT_INFO(roce_drops_crc)}
 };
 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
 
@@ -155,7 +160,9 @@
 	/* Number of times the TX queue was stopped due to lack
 	 * of spaces in the TXQ.
 	 */
-	{DRVSTAT_TX_INFO(tx_stops)}
+	{DRVSTAT_TX_INFO(tx_stops)},
+	/* Pkts dropped in the driver's transmit path */
+	{DRVSTAT_TX_INFO(tx_drv_drops)}
 };
 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
 
@@ -290,19 +297,19 @@
 			   struct ethtool_coalesce *et)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
-	struct be_eq_obj *eqo = &adapter->eq_obj[0];
+	struct be_aic_obj *aic = &adapter->aic_obj[0];
 
 
-	et->rx_coalesce_usecs = eqo->cur_eqd;
-	et->rx_coalesce_usecs_high = eqo->max_eqd;
-	et->rx_coalesce_usecs_low = eqo->min_eqd;
+	et->rx_coalesce_usecs = aic->prev_eqd;
+	et->rx_coalesce_usecs_high = aic->max_eqd;
+	et->rx_coalesce_usecs_low = aic->min_eqd;
 
-	et->tx_coalesce_usecs = eqo->cur_eqd;
-	et->tx_coalesce_usecs_high = eqo->max_eqd;
-	et->tx_coalesce_usecs_low = eqo->min_eqd;
+	et->tx_coalesce_usecs = aic->prev_eqd;
+	et->tx_coalesce_usecs_high = aic->max_eqd;
+	et->tx_coalesce_usecs_low = aic->min_eqd;
 
-	et->use_adaptive_rx_coalesce = eqo->enable_aic;
-	et->use_adaptive_tx_coalesce = eqo->enable_aic;
+	et->use_adaptive_rx_coalesce = aic->enable;
+	et->use_adaptive_tx_coalesce = aic->enable;
 
 	return 0;
 }
@@ -314,14 +321,17 @@
 			   struct ethtool_coalesce *et)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
+	struct be_aic_obj *aic = &adapter->aic_obj[0];
 	struct be_eq_obj *eqo;
 	int i;
 
 	for_all_evt_queues(adapter, eqo, i) {
-		eqo->enable_aic = et->use_adaptive_rx_coalesce;
-		eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
-		eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
-		eqo->eqd = et->rx_coalesce_usecs;
+		aic->enable = et->use_adaptive_rx_coalesce;
+		aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+		aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
+		aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
+		aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
+		aic++;
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 53ed58b..eaecaad 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -22,6 +22,7 @@
 #include <asm/div64.h>
 #include <linux/aer.h>
 #include <linux/if_bridge.h>
+#include <net/busy_poll.h>
 
 MODULE_VERSION(DRV_VER);
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -306,10 +307,14 @@
 		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
 
 		return &cmd->hw_stats;
-	} else  {
+	} else if (BE3_chip(adapter)) {
 		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
 
 		return &cmd->hw_stats;
+	} else {
+		struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
+
+		return &cmd->hw_stats;
 	}
 }
 
@@ -320,10 +325,14 @@
 		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
 
 		return &hw_stats->erx;
-	} else {
+	} else if (BE3_chip(adapter)) {
 		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
 
 		return &hw_stats->erx;
+	} else {
+		struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+
+		return &hw_stats->erx;
 	}
 }
 
@@ -422,6 +431,60 @@
 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 }
 
+static void populate_be_v2_stats(struct be_adapter *adapter)
+{
+	struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+	struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
+	struct be_port_rxf_stats_v2 *port_stats =
+					&rxf_stats->port[adapter->port_num];
+	struct be_drv_stats *drvs = &adapter->drv_stats;
+
+	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
+	drvs->rx_pause_frames = port_stats->rx_pause_frames;
+	drvs->rx_crc_errors = port_stats->rx_crc_errors;
+	drvs->rx_control_frames = port_stats->rx_control_frames;
+	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+	drvs->rx_dropped_header_too_small =
+		port_stats->rx_dropped_header_too_small;
+	drvs->rx_input_fifo_overflow_drop =
+		port_stats->rx_input_fifo_overflow_drop;
+	drvs->rx_address_filtered = port_stats->rx_address_filtered;
+	drvs->rx_alignment_symbol_errors =
+		port_stats->rx_alignment_symbol_errors;
+	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+	drvs->tx_pauseframes = port_stats->tx_pauseframes;
+	drvs->tx_controlframes = port_stats->tx_controlframes;
+	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
+	drvs->jabber_events = port_stats->jabber_events;
+	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+	drvs->forwarded_packets = rxf_stats->forwarded_packets;
+	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+	if (be_roce_supported(adapter))  {
+		drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
+		drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
+		drvs->rx_roce_frames = port_stats->roce_frames_received;
+		drvs->roce_drops_crc = port_stats->roce_drops_crc;
+		drvs->roce_drops_payload_len =
+			port_stats->roce_drops_payload_len;
+	}
+}
+
 static void populate_lancer_stats(struct be_adapter *adapter)
 {
 
@@ -489,7 +552,7 @@
 
 void be_parse_stats(struct be_adapter *adapter)
 {
-	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+	struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
 	struct be_rx_obj *rxo;
 	int i;
 	u32 erx_stat;
@@ -499,11 +562,13 @@
 	} else {
 		if (BE2_chip(adapter))
 			populate_be_v0_stats(adapter);
-		else
-			/* for BE3 and Skyhawk */
+		else if (BE3_chip(adapter))
+			/* for BE3 */
 			populate_be_v1_stats(adapter);
+		else
+			populate_be_v2_stats(adapter);
 
-		/* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+		/* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
 		for_all_rx_queues(adapter, rxo, i) {
 			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
 			populate_erx_stats(adapter, rxo, erx_stat);
@@ -935,8 +1000,10 @@
 	u32 start = txq->head;
 
 	skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
-	if (!skb)
+	if (!skb) {
+		tx_stats(txo)->tx_drv_drops++;
 		return NETDEV_TX_OK;
+	}
 
 	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
@@ -965,6 +1032,7 @@
 		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
 	} else {
 		txq->head = start;
+		tx_stats(txo)->tx_drv_drops++;
 		dev_kfree_skb_any(skb);
 	}
 	return NETDEV_TX_OK;
@@ -1275,53 +1343,79 @@
 	return status;
 }
 
-static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
+static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
+			  ulong now)
 {
-	struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
-	ulong now = jiffies;
-	ulong delta = now - stats->rx_jiffies;
-	u64 pkts;
-	unsigned int start, eqd;
+	aic->rx_pkts_prev = rx_pkts;
+	aic->tx_reqs_prev = tx_pkts;
+	aic->jiffies = now;
+}
 
-	if (!eqo->enable_aic) {
-		eqd = eqo->eqd;
-		goto modify_eqd;
-	}
+static void be_eqd_update(struct be_adapter *adapter)
+{
+	struct be_set_eqd set_eqd[MAX_EVT_QS];
+	int eqd, i, num = 0, start;
+	struct be_aic_obj *aic;
+	struct be_eq_obj *eqo;
+	struct be_rx_obj *rxo;
+	struct be_tx_obj *txo;
+	u64 rx_pkts, tx_pkts;
+	ulong now;
+	u32 pps, delta;
 
-	if (eqo->idx >= adapter->num_rx_qs)
-		return;
+	for_all_evt_queues(adapter, eqo, i) {
+		aic = &adapter->aic_obj[eqo->idx];
+		if (!aic->enable) {
+			if (aic->jiffies)
+				aic->jiffies = 0;
+			eqd = aic->et_eqd;
+			goto modify_eqd;
+		}
 
-	stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+		rxo = &adapter->rx_obj[eqo->idx];
+		do {
+			start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+			rx_pkts = rxo->stats.rx_pkts;
+		} while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
 
-	/* Wrapped around */
-	if (time_before(now, stats->rx_jiffies)) {
-		stats->rx_jiffies = now;
-		return;
-	}
+		txo = &adapter->tx_obj[eqo->idx];
+		do {
+			start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+			tx_pkts = txo->stats.tx_reqs;
+		} while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
 
-	/* Update once a second */
-	if (delta < HZ)
-		return;
 
-	do {
-		start = u64_stats_fetch_begin_bh(&stats->sync);
-		pkts = stats->rx_pkts;
-	} while (u64_stats_fetch_retry_bh(&stats->sync, start));
+		/* Skip, if wrapped around or first calculation */
+		now = jiffies;
+		if (!aic->jiffies || time_before(now, aic->jiffies) ||
+		    rx_pkts < aic->rx_pkts_prev ||
+		    tx_pkts < aic->tx_reqs_prev) {
+			be_aic_update(aic, rx_pkts, tx_pkts, now);
+			continue;
+		}
 
-	stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
-	stats->rx_pkts_prev = pkts;
-	stats->rx_jiffies = now;
-	eqd = (stats->rx_pps / 110000) << 3;
-	eqd = min(eqd, eqo->max_eqd);
-	eqd = max(eqd, eqo->min_eqd);
-	if (eqd < 10)
-		eqd = 0;
+		delta = jiffies_to_msecs(now - aic->jiffies);
+		pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+			(((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+		eqd = (pps / 15000) << 2;
 
+		if (eqd < 8)
+			eqd = 0;
+		eqd = min_t(u32, eqd, aic->max_eqd);
+		eqd = max_t(u32, eqd, aic->min_eqd);
+
+		be_aic_update(aic, rx_pkts, tx_pkts, now);
 modify_eqd:
-	if (eqd != eqo->cur_eqd) {
-		be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
-		eqo->cur_eqd = eqd;
+		if (eqd != aic->prev_eqd) {
+			set_eqd[num].delay_multiplier = (eqd * 65)/100;
+			set_eqd[num].eq_id = eqo->q.id;
+			aic->prev_eqd = eqd;
+			num++;
+		}
 	}
+
+	if (num)
+		be_cmd_modify_eqd(adapter, set_eqd, num);
 }
 
 static void be_rx_stats_update(struct be_rx_obj *rxo,
@@ -1463,7 +1557,7 @@
 }
 
 /* Process the RX completion indicated by rxcp when GRO is disabled */
-static void be_rx_compl_process(struct be_rx_obj *rxo,
+static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
 				struct be_rx_compl_info *rxcp)
 {
 	struct be_adapter *adapter = rxo->adapter;
@@ -1488,7 +1582,7 @@
 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
 	if (netdev->features & NETIF_F_RXHASH)
 		skb->rxhash = rxcp->rss_hash;
-
+	skb_mark_napi_id(skb, napi);
 
 	if (rxcp->vlanf)
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@ -1546,6 +1640,7 @@
 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
 	if (adapter->netdev->features & NETIF_F_RXHASH)
 		skb->rxhash = rxcp->rss_hash;
+	skb_mark_napi_id(skb, napi);
 
 	if (rxcp->vlanf)
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@ -1726,6 +1821,8 @@
 
 	if (posted) {
 		atomic_add(posted, &rxq->used);
+		if (rxo->rx_post_starved)
+			rxo->rx_post_starved = false;
 		be_rxq_notify(adapter, rxq->id, posted);
 	} else if (atomic_read(&rxq->used) == 0) {
 		/* Let be_worker replenish when memory is available */
@@ -1928,6 +2025,7 @@
 		if (eqo->q.created) {
 			be_eq_clean(eqo);
 			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+			napi_hash_del(&eqo->napi);
 			netif_napi_del(&eqo->napi);
 		}
 		be_queue_free(adapter, &eqo->q);
@@ -1938,6 +2036,7 @@
 {
 	struct be_queue_info *eq;
 	struct be_eq_obj *eqo;
+	struct be_aic_obj *aic;
 	int i, rc;
 
 	adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
@@ -1946,11 +2045,13 @@
 	for_all_evt_queues(adapter, eqo, i) {
 		netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
 			       BE_NAPI_WEIGHT);
+		napi_hash_add(&eqo->napi);
+		aic = &adapter->aic_obj[i];
 		eqo->adapter = adapter;
 		eqo->tx_budget = BE_TX_BUDGET;
 		eqo->idx = i;
-		eqo->max_eqd = BE_MAX_EQD;
-		eqo->enable_aic = true;
+		aic->max_eqd = BE_MAX_EQD;
+		aic->enable = true;
 
 		eq = &eqo->q;
 		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -2047,6 +2148,9 @@
 		if (status)
 			return status;
 
+		u64_stats_init(&txo->stats.sync);
+		u64_stats_init(&txo->stats.sync_compl);
+
 		/* If num_evt_qs is less than num_tx_qs, then more than
 		 * one txq share an eq
 		 */
@@ -2108,6 +2212,7 @@
 		if (rc)
 			return rc;
 
+		u64_stats_init(&rxo->stats.sync);
 		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
 		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
 		if (rc)
@@ -2167,7 +2272,7 @@
 }
 
 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
-			int budget)
+			int budget, int polling)
 {
 	struct be_adapter *adapter = rxo->adapter;
 	struct be_queue_info *rx_cq = &rxo->cq;
@@ -2198,10 +2303,12 @@
 			goto loop_continue;
 		}
 
-		if (do_gro(rxcp))
+		/* Don't do gro when we're busy_polling */
+		if (do_gro(rxcp) && polling != BUSY_POLLING)
 			be_rx_compl_process_gro(rxo, napi, rxcp);
 		else
-			be_rx_compl_process(rxo, rxcp);
+			be_rx_compl_process(rxo, napi, rxcp);
+
 loop_continue:
 		be_rx_stats_update(rxo, rxcp);
 	}
@@ -2209,7 +2316,11 @@
 	if (work_done) {
 		be_cq_notify(adapter, rx_cq->id, true, work_done);
 
-		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+		/* When an rx-obj gets into post_starved state, just
+		 * let be_worker do the posting.
+		 */
+		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
+		    !rxo->rx_post_starved)
 			be_post_rx_frags(rxo, GFP_ATOMIC);
 	}
 
@@ -2254,6 +2365,7 @@
 	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
 	struct be_adapter *adapter = eqo->adapter;
 	int max_work = 0, work, i, num_evts;
+	struct be_rx_obj *rxo;
 	bool tx_done;
 
 	num_evts = events_get(eqo);
@@ -2266,13 +2378,18 @@
 			max_work = budget;
 	}
 
-	/* This loop will iterate twice for EQ0 in which
-	 * completions of the last RXQ (default one) are also processed
-	 * For other EQs the loop iterates only once
-	 */
-	for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
-		work = be_process_rx(&adapter->rx_obj[i], napi, budget);
-		max_work = max(work, max_work);
+	if (be_lock_napi(eqo)) {
+		/* This loop will iterate twice for EQ0 in which
+		 * completions of the last RXQ (default one) are also processed
+		 * For other EQs the loop iterates only once
+		 */
+		for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+			work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
+			max_work = max(work, max_work);
+		}
+		be_unlock_napi(eqo);
+	} else {
+		max_work = budget;
 	}
 
 	if (is_mcc_eqo(eqo))
@@ -2288,6 +2405,28 @@
 	return max_work;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int be_busy_poll(struct napi_struct *napi)
+{
+	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
+	struct be_adapter *adapter = eqo->adapter;
+	struct be_rx_obj *rxo;
+	int i, work = 0;
+
+	if (!be_lock_busy_poll(eqo))
+		return LL_FLUSH_BUSY;
+
+	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+		work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
+		if (work)
+			break;
+	}
+
+	be_unlock_busy_poll(eqo);
+	return work;
+}
+#endif
+
 void be_detect_error(struct be_adapter *adapter)
 {
 	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
@@ -2519,9 +2658,11 @@
 
 	be_roce_dev_close(adapter);
 
-	if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
-		for_all_evt_queues(adapter, eqo, i)
+	for_all_evt_queues(adapter, eqo, i) {
+		if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
 			napi_disable(&eqo->napi);
+			be_disable_busy_poll(eqo);
+		}
 		adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
 	}
 
@@ -2632,6 +2773,7 @@
 
 	for_all_evt_queues(adapter, eqo, i) {
 		napi_enable(&eqo->napi);
+		be_enable_busy_poll(eqo);
 		be_eq_notify(adapter, eqo->q.id, true, false, 0);
 	}
 	adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -2937,7 +3079,8 @@
 			goto err;
 		vf_cfg->def_vid = def_vlan;
 
-		be_cmd_enable_vf(adapter, vf + 1);
+		if (!old_vfs)
+			be_cmd_enable_vf(adapter, vf + 1);
 	}
 
 	if (!old_vfs) {
@@ -2962,12 +3105,12 @@
 	struct pci_dev *pdev = adapter->pdev;
 	bool use_sriov = false;
 
-	if (BE3_chip(adapter) && be_physfn(adapter)) {
+	if (BE3_chip(adapter) && sriov_want(adapter)) {
 		int max_vfs;
 
 		max_vfs = pci_sriov_get_totalvfs(pdev);
 		res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
-		use_sriov = res->max_vfs && num_vfs;
+		use_sriov = res->max_vfs;
 	}
 
 	if (be_physfn(adapter))
@@ -2983,8 +3126,9 @@
 		res->max_vlans = BE_NUM_VLANS_SUPPORTED;
 	res->max_mcast_mac = BE_MAX_MC;
 
+	/* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
 	if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
-	    !be_physfn(adapter))
+	    !be_physfn(adapter) || (adapter->port_num > 1))
 		res->max_tx_qs = 1;
 	else
 		res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3026,14 +3170,6 @@
 		adapter->res = res;
 	}
 
-	/* For BE3 only check if FW suggests a different max-txqs value */
-	if (BE3_chip(adapter)) {
-		status = be_cmd_get_profile_config(adapter, &res, 0);
-		if (!status && res.max_tx_qs)
-			adapter->res.max_tx_qs =
-				min(adapter->res.max_tx_qs, res.max_tx_qs);
-	}
-
 	/* For Lancer, SH etc read per-function resource limits from FW.
 	 * GET_FUNC_CONFIG returns per function guaranteed limits.
 	 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@ -3264,7 +3400,7 @@
 		be_cmd_set_flow_control(adapter, adapter->tx_fc,
 					adapter->rx_fc);
 
-	if (be_physfn(adapter) && num_vfs) {
+	if (sriov_want(adapter)) {
 		if (be_max_vfs(adapter))
 			be_vf_setup(adapter);
 		else
@@ -3906,6 +4042,9 @@
 #endif
 	.ndo_bridge_setlink	= be_ndo_bridge_setlink,
 	.ndo_bridge_getlink	= be_ndo_bridge_getlink,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= be_busy_poll
+#endif
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -3966,11 +4105,6 @@
 static int be_map_pci_bars(struct be_adapter *adapter)
 {
 	u8 __iomem *addr;
-	u32 sli_intf;
-
-	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
-	adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
-				SLI_INTF_IF_TYPE_SHIFT;
 
 	if (BEx_chip(adapter) && be_physfn(adapter)) {
 		adapter->csr = pci_iomap(adapter->pdev, 2, 0);
@@ -4083,9 +4217,11 @@
 		cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
 	else if (BE2_chip(adapter))
 		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
-	else
-		/* BE3 and Skyhawk */
+	else if (BE3_chip(adapter))
 		cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+	else
+		/* ALL non-BE ASICs */
+		cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
 
 	cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
 				      GFP_KERNEL);
@@ -4119,7 +4255,6 @@
 
 	pci_disable_pcie_error_reporting(pdev);
 
-	pci_set_drvdata(pdev, NULL);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 
@@ -4268,7 +4403,6 @@
 	struct be_adapter *adapter =
 		container_of(work, struct be_adapter, work.work);
 	struct be_rx_obj *rxo;
-	struct be_eq_obj *eqo;
 	int i;
 
 	/* when interrupts are not yet enabled, just reap any pending
@@ -4293,14 +4427,14 @@
 		be_cmd_get_die_temperature(adapter);
 
 	for_all_rx_queues(adapter, rxo, i) {
-		if (rxo->rx_post_starved) {
-			rxo->rx_post_starved = false;
+		/* Replenish RX-queues starved due to memory
+		 * allocation failures.
+		 */
+		if (rxo->rx_post_starved)
 			be_post_rx_frags(rxo, GFP_KERNEL);
-		}
 	}
 
-	for_all_evt_queues(adapter, eqo, i)
-		be_eqd_update(adapter, eqo);
+	be_eqd_update(adapter);
 
 reschedule:
 	adapter->work_counter++;
@@ -4357,28 +4491,22 @@
 	adapter->netdev = netdev;
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 
-	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+	status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 	if (!status) {
-		status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-		if (status < 0) {
-			dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
-			goto free_netdev;
-		}
 		netdev->features |= NETIF_F_HIGHDMA;
 	} else {
-		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-		if (!status)
-			status = dma_set_coherent_mask(&pdev->dev,
-						       DMA_BIT_MASK(32));
+		status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (status) {
 			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
 			goto free_netdev;
 		}
 	}
 
-	status = pci_enable_pcie_error_reporting(pdev);
-	if (status)
-		dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
+	if (be_physfn(adapter)) {
+		status = pci_enable_pcie_error_reporting(pdev);
+		if (!status)
+			dev_info(&pdev->dev, "PCIe error reporting enabled\n");
+	}
 
 	status = be_ctrl_init(adapter);
 	if (status)
@@ -4449,7 +4577,6 @@
 	be_ctrl_cleanup(adapter);
 free_netdev:
 	free_netdev(netdev);
-	pci_set_drvdata(pdev, NULL);
 rel_reg:
 	pci_release_regions(pdev);
 disable_dev:
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c706b7a..4b22a95 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -699,7 +699,6 @@
 		pci_iounmap(pdev, np->mem);
 		free_netdev(dev);
 		pci_release_regions(pdev);
-		pci_set_drvdata(pdev, NULL);
 	} else
 		printk(KERN_ERR "fealnx: remove for unknown device\n");
 }
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 6b60582..56f2f608 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1083,7 +1083,7 @@
 
 	mac_addr = of_get_mac_address(ofdev->dev.of_node);
 	if (mac_addr)
-		memcpy(ndev->dev_addr, mac_addr, 6);
+		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
 
 	ret = fep->ops->allocate_bd(ndev);
 	if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ef95c3a..b14d790 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2920,7 +2920,7 @@
 	struct gfar_priv_rx_q *rx_queue = NULL;
 	int work_done = 0, work_done_per_q = 0;
 	int i, budget_per_q = 0;
-	int has_tx_work;
+	int has_tx_work = 0;
 	unsigned long rstat_rxf;
 	int num_act_queues;
 
@@ -2935,62 +2935,51 @@
 	if (num_act_queues)
 		budget_per_q = budget/num_act_queues;
 
-	while (1) {
-		has_tx_work = 0;
-		for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
-			tx_queue = priv->tx_queue[i];
-			/* run Tx cleanup to completion */
-			if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
-				gfar_clean_tx_ring(tx_queue);
-				has_tx_work = 1;
-			}
+	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+		tx_queue = priv->tx_queue[i];
+		/* run Tx cleanup to completion */
+		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+			gfar_clean_tx_ring(tx_queue);
+			has_tx_work = 1;
 		}
+	}
 
-		for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
-			/* skip queue if not active */
-			if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
-				continue;
+	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+		/* skip queue if not active */
+		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+			continue;
 
-			rx_queue = priv->rx_queue[i];
-			work_done_per_q =
-				gfar_clean_rx_ring(rx_queue, budget_per_q);
-			work_done += work_done_per_q;
+		rx_queue = priv->rx_queue[i];
+		work_done_per_q =
+			gfar_clean_rx_ring(rx_queue, budget_per_q);
+		work_done += work_done_per_q;
 
-			/* finished processing this queue */
-			if (work_done_per_q < budget_per_q) {
-				/* clear active queue hw indication */
-				gfar_write(&regs->rstat,
-					   RSTAT_CLEAR_RXF0 >> i);
-				rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
-				num_act_queues--;
+		/* finished processing this queue */
+		if (work_done_per_q < budget_per_q) {
+			/* clear active queue hw indication */
+			gfar_write(&regs->rstat,
+				   RSTAT_CLEAR_RXF0 >> i);
+			num_act_queues--;
 
-				if (!num_act_queues)
-					break;
-				/* recompute budget per Rx queue */
-				budget_per_q =
-					(budget - work_done) / num_act_queues;
-			}
+			if (!num_act_queues)
+				break;
 		}
+	}
 
-		if (work_done >= budget)
-			break;
+	if (!num_act_queues && !has_tx_work) {
 
-		if (!num_act_queues && !has_tx_work) {
+		napi_complete(napi);
 
-			napi_complete(napi);
+		/* Clear the halt bit in RSTAT */
+		gfar_write(&regs->rstat, gfargrp->rstat);
 
-			/* Clear the halt bit in RSTAT */
-			gfar_write(&regs->rstat, gfargrp->rstat);
+		gfar_write(&regs->imask, IMASK_DEFAULT);
 
-			gfar_write(&regs->imask, IMASK_DEFAULT);
-
-			/* If we are coalescing interrupts, update the timer
-			 * Otherwise, clear it
-			 */
-			gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
-						  gfargrp->tx_bit_map);
-			break;
-		}
+		/* If we are coalescing interrupts, update the timer
+		 * Otherwise, clear it
+		 */
+		gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+					  gfargrp->tx_bit_map);
 	}
 
 	return work_done;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04112b9..114c58f 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1177,21 +1177,21 @@
 	*fpr = gfar_read(&regs->rqfpr);
 }
 
-extern void lock_rx_qs(struct gfar_private *priv);
-extern void lock_tx_qs(struct gfar_private *priv);
-extern void unlock_rx_qs(struct gfar_private *priv);
-extern void unlock_tx_qs(struct gfar_private *priv);
-extern irqreturn_t gfar_receive(int irq, void *dev_id);
-extern int startup_gfar(struct net_device *dev);
-extern void stop_gfar(struct net_device *dev);
-extern void gfar_halt(struct net_device *dev);
-extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
-		int enable, u32 regnum, u32 read);
-extern void gfar_configure_coalescing_all(struct gfar_private *priv);
+void lock_rx_qs(struct gfar_private *priv);
+void lock_tx_qs(struct gfar_private *priv);
+void unlock_rx_qs(struct gfar_private *priv);
+void unlock_tx_qs(struct gfar_private *priv);
+irqreturn_t gfar_receive(int irq, void *dev_id);
+int startup_gfar(struct net_device *dev);
+void stop_gfar(struct net_device *dev);
+void gfar_halt(struct net_device *dev);
+void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+		   u32 regnum, u32 read);
+void gfar_configure_coalescing_all(struct gfar_private *priv);
 void gfar_init_sysfs(struct net_device *dev);
 int gfar_set_features(struct net_device *dev, netdev_features_t features);
-extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+void gfar_check_rx_parser_mode(struct gfar_private *priv);
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 64b329f..5548b6d 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3901,7 +3901,7 @@
 
 	mac_addr = of_get_mac_address(np);
 	if (mac_addr)
-		memcpy(dev->dev_addr, mac_addr, 6);
+		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
 	ugeth->ug_info = ug_info;
 	ugeth->dev = device;
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 6231bc0..1085257 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -5,7 +5,7 @@
 config NET_VENDOR_FUJITSU
 	bool "Fujitsu devices"
 	default y
-	depends on ISA || PCMCIA
+	depends on PCMCIA
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 91227d0..3786009 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1098,7 +1098,7 @@
 	if (request_irq(dev->irq, hp100_interrupt,
 			lp->bus == HP100_BUS_PCI || lp->bus ==
 			HP100_BUS_EISA ? IRQF_SHARED : 0,
-			"hp100", dev)) {
+			dev->name, dev)) {
 		printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
 		return -EAGAIN;
 	}
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index e388161..7ce6379 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -711,7 +711,7 @@
 	i596_add_cmd(dev, &lp->cf_cmd.cmd);
 
 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
-	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
 	lp->sa_cmd.cmd.command = CmdSASetup;
 	i596_add_cmd(dev, &lp->sa_cmd.cmd);
 
@@ -1155,7 +1155,7 @@
 			err = -ENODEV;
 			goto out;
 		}
-		memcpy(eth_addr, (void *) 0xfffc1f2c, 6);	/* YUCK! Get addr from NOVRAM */
+		memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN);	/* YUCK! Get addr from NOVRAM */
 		dev->base_addr = MVME_I596_BASE;
 		dev->irq = (unsigned) MVME16x_IRQ_I596;
 		goto found;
@@ -1527,9 +1527,7 @@
 	if (debug >= 0)
 		i596_debug = debug;
 	dev_82596 = i82596_probe(-1);
-	if (IS_ERR(dev_82596))
-		return PTR_ERR(dev_82596);
-	return 0;
+	return PTR_ERR_OR_ZERO(dev_82596);
 }
 
 void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index d653bac..861fa15 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -607,7 +607,7 @@
 	i596_add_cmd(dev, &dma->cf_cmd.cmd);
 
 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
-	memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+	memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
 	dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
 	DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
 	i596_add_cmd(dev, &dma->sa_cmd.cmd);
@@ -1396,13 +1396,13 @@
 		netdev_for_each_mc_addr(ha, dev) {
 			if (!cnt--)
 				break;
-			memcpy(cp, ha->addr, 6);
+			memcpy(cp, ha->addr, ETH_ALEN);
 			if (i596_debug > 1)
 				DEB(DEB_MULTI,
 				    printk(KERN_DEBUG
 					   "%s: Adding address %pM\n",
 					   dev->name, cp));
-			cp += 6;
+			cp += ETH_ALEN;
 		}
 		DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
 		i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index cdf2321..ae342fd 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2678,7 +2678,7 @@
 		       np->full_name);
 		return -ENXIO;
 	}
-	memcpy(dev->ndev->dev_addr, p, 6);
+	memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
 
 	/* IAHT and GAHT filter parameterization */
 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 59a92d5..9c45efe 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -29,13 +29,13 @@
 struct emac_instance;
 struct mal_instance;
 
-extern void emac_dbg_register(struct emac_instance *dev);
-extern void emac_dbg_unregister(struct emac_instance *dev);
-extern void mal_dbg_register(struct mal_instance *mal);
-extern void mal_dbg_unregister(struct mal_instance *mal);
-extern int emac_init_debug(void) __init;
-extern void emac_fini_debug(void) __exit;
-extern void emac_dbg_dump_all(void);
+void emac_dbg_register(struct emac_instance *dev);
+void emac_dbg_unregister(struct emac_instance *dev);
+void mal_dbg_register(struct mal_instance *mal);
+void mal_dbg_unregister(struct mal_instance *mal);
+int emac_init_debug(void) __init;
+void emac_fini_debug(void) __exit;
+void emac_dbg_dump_all(void);
 
 # define DBG_LEVEL		1
 
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
index 668bcee..d4f1374 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.h
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -56,15 +56,15 @@
 
 #ifdef CONFIG_IBM_EMAC_RGMII
 
-extern int rgmii_init(void);
-extern void rgmii_exit(void);
-extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct platform_device *ofdev, int input);
-extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct platform_device *ofdev);
-extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
+int rgmii_init(void);
+void rgmii_exit(void);
+int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+void rgmii_detach(struct platform_device *ofdev, int input);
+void rgmii_get_mdio(struct platform_device *ofdev, int input);
+void rgmii_put_mdio(struct platform_device *ofdev, int input);
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int rgmii_get_regs_len(struct platform_device *ofdev);
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
index 350b709..4d5f336 100644
--- a/drivers/net/ethernet/ibm/emac/tah.h
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -72,13 +72,13 @@
 
 #ifdef CONFIG_IBM_EMAC_TAH
 
-extern int tah_init(void);
-extern void tah_exit(void);
-extern int tah_attach(struct platform_device *ofdev, int channel);
-extern void tah_detach(struct platform_device *ofdev, int channel);
-extern void tah_reset(struct platform_device *ofdev);
-extern int tah_get_regs_len(struct platform_device *ofdev);
-extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
+int tah_init(void);
+void tah_exit(void);
+int tah_attach(struct platform_device *ofdev, int channel);
+void tah_detach(struct platform_device *ofdev, int channel);
+void tah_reset(struct platform_device *ofdev);
+int tah_get_regs_len(struct platform_device *ofdev);
+void *tah_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 455bfb0..0959c55 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -53,15 +53,15 @@
 
 #ifdef CONFIG_IBM_EMAC_ZMII
 
-extern int zmii_init(void);
-extern void zmii_exit(void);
-extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct platform_device *ofdev, int input);
-extern void zmii_get_mdio(struct platform_device *ofdev, int input);
-extern void zmii_put_mdio(struct platform_device *ofdev, int input);
-extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct platform_device *ocpdev);
-extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
+int zmii_init(void);
+void zmii_exit(void);
+int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+void zmii_detach(struct platform_device *ofdev, int input);
+void zmii_get_mdio(struct platform_device *ofdev, int input);
+void zmii_put_mdio(struct platform_device *ofdev, int input);
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int zmii_get_regs_len(struct platform_device *ocpdev);
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 # define zmii_init()		0
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5d41aee..952d795 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1185,7 +1185,7 @@
 		netdev_for_each_mc_addr(ha, netdev) {
 			/* add the multicast address to the filter table */
 			unsigned long mcast_addr = 0;
-			memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+			memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
 			lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 						   IbmVethMcastAddFilter,
 						   mcast_addr);
@@ -1370,7 +1370,7 @@
 	netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
 
 	adapter->mac_addr = 0;
-	memcpy(&adapter->mac_addr, mac_addr_p, 6);
+	memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
 
 	netdev->irq = dev->irq;
 	netdev->netdev_ops = &ibmveth_netdev_ops;
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index bdf5023..25045ae 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2183,7 +2183,6 @@
 
 	free_netdev(dev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static const struct net_device_ops ipg_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada6e21..cbaba44 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2985,7 +2985,6 @@
 err_out_disable_pdev:
 	pci_disable_device(pdev);
 err_out_free_dev:
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(netdev);
 	return err;
 }
@@ -3003,7 +3002,6 @@
 		free_netdev(netdev);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 26d9cd5..58c1472 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -325,7 +325,7 @@
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 #define e_dbg(format, arg...) \
 	netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
 #define e_err(msglvl, format, arg...) \
@@ -346,20 +346,20 @@
 extern char e1000_driver_name[];
 extern const char e1000_driver_version[];
 
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+bool e1000_has_link(struct e1000_adapter *adapter);
+void e1000_power_up_phy(struct e1000_adapter *);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+void e1000_check_options(struct e1000_adapter *adapter);
+char *e1000_get_hw_dev_name(struct e1000_hw *hw);
 
 #endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 59ad007..e386228 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1018,19 +1018,14 @@
 	 */
 	pci_using_dac = 0;
 	if ((hw->bus_type == e1000_bus_type_pcix) &&
-	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-		/* according to DMA-API-HOWTO, coherent calls will always
-		 * succeed if the set call did
-		 */
-		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
 		pci_using_dac = 1;
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
 			pr_err("No usable DMA config, aborting\n");
 			goto err_dma;
 		}
-		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 	}
 
 	netdev->netdev_ops = &e1000_netdev_ops;
@@ -3917,8 +3912,7 @@
 			      "  next_to_watch        <%x>\n"
 			      "  jiffies              <%lx>\n"
 			      "  next_to_watch.status <%x>\n",
-				(unsigned long)((tx_ring - adapter->tx_ring) /
-					sizeof(struct e1000_tx_ring)),
+				(unsigned long)(tx_ring - adapter->tx_ring),
 				readl(hw->hw_addr + tx_ring->tdh),
 				readl(hw->hw_addr + tx_ring->tdt),
 				tx_ring->next_to_use,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ad0edd1..0150f7f 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -472,26 +472,25 @@
 extern char e1000e_driver_name[];
 extern const char e1000e_driver_version[];
 
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
 
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
-extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
-extern void e1000e_free_rx_resources(struct e1000_ring *ring);
-extern void e1000e_free_tx_resources(struct e1000_ring *ring);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
-						    struct rtnl_link_stats64
-						    *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+					     struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
 
 extern unsigned int copybreak;
 
@@ -508,8 +507,8 @@
 extern const struct e1000_info e1000_pch_lpt_info;
 extern const struct e1000_info e1000_es2_info;
 
-extern void e1000e_ptp_init(struct e1000_adapter *adapter);
-extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
 
 static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 {
@@ -536,7 +535,7 @@
 	return hw->phy.ops.write_reg_locked(hw, offset, data);
 }
 
-extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
 
 static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 4ef7867..aedd573 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6553,21 +6553,15 @@
 		return err;
 
 	pci_using_dac = 0;
-	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 	if (!err) {
-		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-		if (!err)
-			pci_using_dac = 1;
+		pci_using_dac = 1;
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err) {
-				dev_err(&pdev->dev,
-					"No usable DMA configuration, aborting\n");
-				goto err_dma;
-			}
+			dev_err(&pdev->dev,
+				"No usable DMA configuration, aborting\n");
+			goto err_dma;
 		}
 	}
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index b5252eb..1ca9834 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -46,7 +46,6 @@
 #include <linux/sctp.h>
 #include <linux/pkt_sched.h>
 #include <linux/ipv6.h>
-#include <linux/version.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
 #include <linux/ethtool.h>
@@ -347,9 +346,9 @@
 	u32 rx_buf_failed;
 	u32 rx_page_failed;
 
-	/* These are arrays of rings, allocated at run-time */
-	struct i40e_ring *rx_rings;
-	struct i40e_ring *tx_rings;
+	/* These are containers of ring pointers, allocated at run-time */
+	struct i40e_ring **rx_rings;
+	struct i40e_ring **tx_rings;
 
 	u16 work_limit;
 	/* high bit set means dynamic, use accessor routines to read/write.
@@ -366,7 +365,7 @@
 	u8  dtype;
 
 	/* List of q_vectors allocated to this VSI */
-	struct i40e_q_vector *q_vectors;
+	struct i40e_q_vector **q_vectors;
 	int num_q_vectors;
 	int base_vector;
 
@@ -422,8 +421,9 @@
 
 	u8 num_ringpairs;	/* total number of ring pairs in vector */
 
-	char name[IFNAMSIZ + 9];
 	cpumask_t affinity_mask;
+	struct rcu_head rcu;	/* to avoid race with update stats on free */
+	char name[IFNAMSIZ + 9];
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
@@ -544,6 +544,7 @@
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8dbd91f..ef4cb1c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -151,9 +151,7 @@
 				   size_t count, loff_t *ppos)
 {
 	struct i40e_pf *pf = filp->private_data;
-	char dump_request_buf[16];
 	bool seid_found = false;
-	int bytes_not_copied;
 	long seid = -1;
 	int buflen = 0;
 	int i, ret;
@@ -163,21 +161,12 @@
 	/* don't allow partial writes */
 	if (*ppos != 0)
 		return 0;
-	if (count >= sizeof(dump_request_buf))
-		return -ENOSPC;
-
-	bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
-	if (bytes_not_copied < 0)
-		return bytes_not_copied;
-	if (bytes_not_copied > 0)
-		count -= bytes_not_copied;
-	dump_request_buf[count] = '\0';
 
 	/* decode the SEID given to be dumped */
-	ret = kstrtol(dump_request_buf, 0, &seid);
-	if (ret < 0) {
-		dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
-			 dump_request_buf);
+	ret = kstrtol_from_user(buffer, count, 0, &seid);
+
+	if (ret) {
+		dev_info(&pf->pdev->dev, "bad seid value\n");
 	} else if (seid == 0) {
 		seid_found = true;
 
@@ -245,26 +234,33 @@
 			memcpy(p, vsi, len);
 			p += len;
 
-			len = (sizeof(struct i40e_q_vector)
-				* vsi->num_q_vectors);
-			memcpy(p, vsi->q_vectors, len);
-			p += len;
-
-			len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
-			memcpy(p, vsi->tx_rings, len);
-			p += len;
-			memcpy(p, vsi->rx_rings, len);
-			p += len;
-
-			for (i = 0; i < vsi->num_queue_pairs; i++) {
-				len = sizeof(struct i40e_tx_buffer);
-				memcpy(p, vsi->tx_rings[i].tx_bi, len);
+			if (vsi->num_q_vectors) {
+				len = (sizeof(struct i40e_q_vector)
+					* vsi->num_q_vectors);
+				memcpy(p, vsi->q_vectors, len);
 				p += len;
 			}
-			for (i = 0; i < vsi->num_queue_pairs; i++) {
-				len = sizeof(struct i40e_rx_buffer);
-				memcpy(p, vsi->rx_rings[i].rx_bi, len);
+
+			if (vsi->num_queue_pairs) {
+				len = (sizeof(struct i40e_ring) *
+				      vsi->num_queue_pairs);
+				memcpy(p, vsi->tx_rings, len);
 				p += len;
+				memcpy(p, vsi->rx_rings, len);
+				p += len;
+			}
+
+			if (vsi->tx_rings[0]) {
+				len = sizeof(struct i40e_tx_buffer);
+				for (i = 0; i < vsi->num_queue_pairs; i++) {
+					memcpy(p, vsi->tx_rings[i]->tx_bi, len);
+					p += len;
+				}
+				len = sizeof(struct i40e_rx_buffer);
+				for (i = 0; i < vsi->num_queue_pairs; i++) {
+					memcpy(p, vsi->rx_rings[i]->rx_bi, len);
+					p += len;
+				}
 			}
 
 			/* macvlan filter list */
@@ -484,100 +480,104 @@
 		 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
 		 vsi->tx_restart, vsi->tx_busy,
 		 vsi->rx_buf_failed, vsi->rx_page_failed);
-	if (vsi->rx_rings) {
-		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: desc = %p\n",
-				 i, vsi->rx_rings[i].desc);
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
-				 i, vsi->rx_rings[i].dev,
-				 vsi->rx_rings[i].netdev,
-				 vsi->rx_rings[i].rx_bi);
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
-				 i, vsi->rx_rings[i].state,
-				 vsi->rx_rings[i].queue_index,
-				 vsi->rx_rings[i].reg_idx);
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
-				 i, vsi->rx_rings[i].rx_hdr_len,
-				 vsi->rx_rings[i].rx_buf_len,
-				 vsi->rx_rings[i].dtype);
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-				 i, vsi->rx_rings[i].hsplit,
-				 vsi->rx_rings[i].next_to_use,
-				 vsi->rx_rings[i].next_to_clean,
-				 vsi->rx_rings[i].ring_active);
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
-				 i, vsi->rx_rings[i].rx_stats.packets,
-				 vsi->rx_rings[i].rx_stats.bytes,
-				 vsi->rx_rings[i].rx_stats.non_eop_descs);
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
-				 i,
-				 vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
-				vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
-				 i, vsi->rx_rings[i].size,
-				 (long unsigned int)vsi->rx_rings[i].dma);
-			dev_info(&pf->pdev->dev,
-				 "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
-				 i, vsi->rx_rings[i].vsi,
-				 vsi->rx_rings[i].q_vector);
-		}
+	rcu_read_lock();
+	for (i = 0; i < vsi->num_queue_pairs; i++) {
+		struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+		if (!rx_ring)
+			continue;
+
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: desc = %p\n",
+			 i, rx_ring->desc);
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+			 i, rx_ring->dev,
+			 rx_ring->netdev,
+			 rx_ring->rx_bi);
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+			 i, rx_ring->state,
+			 rx_ring->queue_index,
+			 rx_ring->reg_idx);
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+			 i, rx_ring->rx_hdr_len,
+			 rx_ring->rx_buf_len,
+			 rx_ring->dtype);
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+			 i, rx_ring->hsplit,
+			 rx_ring->next_to_use,
+			 rx_ring->next_to_clean,
+			 rx_ring->ring_active);
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+			 i, rx_ring->stats.packets,
+			 rx_ring->stats.bytes,
+			 rx_ring->rx_stats.non_eop_descs);
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+			 i,
+			 rx_ring->rx_stats.alloc_rx_page_failed,
+			rx_ring->rx_stats.alloc_rx_buff_failed);
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+			 i, rx_ring->size,
+			 (long unsigned int)rx_ring->dma);
+		dev_info(&pf->pdev->dev,
+			 "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
+			 i, rx_ring->vsi,
+			 rx_ring->q_vector);
 	}
-	if (vsi->tx_rings) {
-		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: desc = %p\n",
-				 i, vsi->tx_rings[i].desc);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
-				 i, vsi->tx_rings[i].dev,
-				 vsi->tx_rings[i].netdev,
-				 vsi->tx_rings[i].tx_bi);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
-				 i, vsi->tx_rings[i].state,
-				 vsi->tx_rings[i].queue_index,
-				 vsi->tx_rings[i].reg_idx);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: dtype = %d\n",
-				 i, vsi->tx_rings[i].dtype);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-				 i, vsi->tx_rings[i].hsplit,
-				 vsi->tx_rings[i].next_to_use,
-				 vsi->tx_rings[i].next_to_clean,
-				 vsi->tx_rings[i].ring_active);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
-				 i, vsi->tx_rings[i].tx_stats.packets,
-				 vsi->tx_rings[i].tx_stats.bytes,
-				 vsi->tx_rings[i].tx_stats.restart_queue);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
-				 i,
-				 vsi->tx_rings[i].tx_stats.tx_busy,
-				 vsi->tx_rings[i].tx_stats.completed,
-				 vsi->tx_rings[i].tx_stats.tx_done_old);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
-				 i, vsi->tx_rings[i].size,
-				 (long unsigned int)vsi->tx_rings[i].dma);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
-				 i, vsi->tx_rings[i].vsi,
-				 vsi->tx_rings[i].q_vector);
-			dev_info(&pf->pdev->dev,
-				 "    tx_rings[%i]: DCB tc = %d\n",
-				 i, vsi->tx_rings[i].dcb_tc);
-		}
+	for (i = 0; i < vsi->num_queue_pairs; i++) {
+		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+		if (!tx_ring)
+			continue;
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: desc = %p\n",
+			 i, tx_ring->desc);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+			 i, tx_ring->dev,
+			 tx_ring->netdev,
+			 tx_ring->tx_bi);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+			 i, tx_ring->state,
+			 tx_ring->queue_index,
+			 tx_ring->reg_idx);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: dtype = %d\n",
+			 i, tx_ring->dtype);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+			 i, tx_ring->hsplit,
+			 tx_ring->next_to_use,
+			 tx_ring->next_to_clean,
+			 tx_ring->ring_active);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+			 i, tx_ring->stats.packets,
+			 tx_ring->stats.bytes,
+			 tx_ring->tx_stats.restart_queue);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+			 i,
+			 tx_ring->tx_stats.tx_busy,
+			 tx_ring->tx_stats.tx_done_old);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+			 i, tx_ring->size,
+			 (long unsigned int)tx_ring->dma);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
+			 i, tx_ring->vsi,
+			 tx_ring->q_vector);
+		dev_info(&pf->pdev->dev,
+			 "    tx_rings[%i]: DCB tc = %d\n",
+			 i, tx_ring->dcb_tc);
 	}
+	rcu_read_unlock();
 	dev_info(&pf->pdev->dev,
 		 "    work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
 		 vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +587,6 @@
 	dev_info(&pf->pdev->dev,
 		 "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
 		 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
-	if (vsi->q_vectors) {
-		for (i = 0; i < vsi->num_q_vectors; i++) {
-			dev_info(&pf->pdev->dev,
-				 "    q_vectors[%i]: base index = %ld\n",
-				 i, ((long int)*vsi->q_vectors[i].rx.ring-
-					(long int)*vsi->q_vectors[0].rx.ring)/
-					sizeof(struct i40e_ring));
-		}
-	}
 	dev_info(&pf->pdev->dev,
 		 "    num_q_vectors = %i, base_vector = %i\n",
 		 vsi->num_q_vectors, vsi->base_vector);
@@ -792,9 +783,9 @@
 		return;
 	}
 	if (is_rx_ring)
-		ring = vsi->rx_rings[ring_id];
+		ring = *vsi->rx_rings[ring_id];
 	else
-		ring = vsi->tx_rings[ring_id];
+		ring = *vsi->tx_rings[ring_id];
 	if (cnt == 2) {
 		dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
 			 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
@@ -1028,11 +1019,11 @@
 				      size_t count, loff_t *ppos)
 {
 	struct i40e_pf *pf = filp->private_data;
+	char *cmd_buf, *cmd_buf_tmp;
 	int bytes_not_copied;
 	struct i40e_vsi *vsi;
 	u8 *print_buf_start;
 	u8 *print_buf;
-	char *cmd_buf;
 	int vsi_seid;
 	int veb_seid;
 	int cnt;
@@ -1051,6 +1042,12 @@
 		count -= bytes_not_copied;
 	cmd_buf[count] = '\0';
 
+	cmd_buf_tmp = strchr(cmd_buf, '\n');
+	if (cmd_buf_tmp) {
+		*cmd_buf_tmp = '\0';
+		count = cmd_buf_tmp - cmd_buf + 1;
+	}
+
 	print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
 	if (!print_buf_start)
 		goto command_write_done;
@@ -1157,9 +1154,9 @@
 		i40e_veb_release(pf->veb[i]);
 
 	} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
-		u8 ma[6];
-		int vlan = 0;
 		struct i40e_mac_filter *f;
+		int vlan = 0;
+		u8 ma[6];
 		int ret;
 
 		cnt = sscanf(&cmd_buf[11],
@@ -1195,8 +1192,8 @@
 				 ma, vlan, vsi_seid, f, ret);
 
 	} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
-		u8 ma[6];
 		int vlan = 0;
+		u8 ma[6];
 		int ret;
 
 		cnt = sscanf(&cmd_buf[11],
@@ -1232,9 +1229,9 @@
 				 ma, vlan, vsi_seid, ret);
 
 	} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
-		int v;
-		u16 vid;
 		i40e_status ret;
+		u16 vid;
+		int v;
 
 		cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
 		if (cnt != 2) {
@@ -1545,10 +1542,10 @@
 	} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
 		   (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
 		struct i40e_fdir_data fd_data;
-		int ret;
 		u16 packet_len, i, j = 0;
 		char *asc_packet;
 		bool add = false;
+		int ret;
 
 		asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
 				     GFP_KERNEL);
@@ -1636,9 +1633,9 @@
 			}
 		} else if (strncmp(&cmd_buf[5],
 			   "get local", 9) == 0) {
+			u16 llen, rlen;
 			int ret, i;
 			u8 *buff;
-			u16 llen, rlen;
 			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
 			if (!buff)
 				goto command_write_done;
@@ -1669,9 +1666,9 @@
 			kfree(buff);
 			buff = NULL;
 		} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+			u16 llen, rlen;
 			int ret, i;
 			u8 *buff;
-			u16 llen, rlen;
 			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
 			if (!buff)
 				goto command_write_done;
@@ -1747,11 +1744,13 @@
 			goto command_write_done;
 		}
 
-		/* Read at least 512 words */
-		if (buffer_len == 0)
-			buffer_len = 512;
+		/* set the max length */
+		buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
 
 		bytes = 2 * buffer_len;
+
+		/* read at least 1k bytes, no more than 4kB */
+		bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
 		buff = kzalloc(bytes, GFP_KERNEL);
 		if (!buff)
 			goto command_write_done;
@@ -1903,6 +1902,7 @@
 	struct i40e_pf *pf = filp->private_data;
 	int bytes_not_copied;
 	struct i40e_vsi *vsi;
+	char *buf_tmp;
 	int vsi_seid;
 	int i, cnt;
 
@@ -1921,6 +1921,12 @@
 		count -= bytes_not_copied;
 	i40e_dbg_netdev_ops_buf[count] = '\0';
 
+	buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+	if (buf_tmp) {
+		*buf_tmp = '\0';
+		count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+	}
+
 	if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
 		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
 		if (cnt != 1) {
@@ -1996,7 +2002,7 @@
 			goto netdev_ops_write_done;
 		}
 		for (i = 0; i < vsi->num_q_vectors; i++)
-			napi_schedule(&vsi->q_vectors[i].napi);
+			napi_schedule(&vsi->q_vectors[i]->napi);
 		dev_info(&pf->pdev->dev, "napi called\n");
 	} else {
 		dev_info(&pf->pdev->dev, "unknown command '%s'\n",
@@ -2024,21 +2030,35 @@
  **/
 void i40e_dbg_pf_init(struct i40e_pf *pf)
 {
-	struct dentry *pfile __attribute__((unused));
+	struct dentry *pfile;
 	const char *name = pci_name(pf->pdev);
+	const struct device *dev = &pf->pdev->dev;
 
 	pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
-	if (pf->i40e_dbg_pf) {
-		pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
-					    pf, &i40e_dbg_command_fops);
-		pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
-					    &i40e_dbg_dump_fops);
-		pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
-					    pf, &i40e_dbg_netdev_ops_fops);
-	} else {
-		dev_info(&pf->pdev->dev,
-			 "debugfs entry for %s failed\n", name);
-	}
+	if (!pf->i40e_dbg_pf)
+		return;
+
+	pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+				    &i40e_dbg_command_fops);
+	if (!pfile)
+		goto create_failed;
+
+	pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+				    &i40e_dbg_dump_fops);
+	if (!pfile)
+		goto create_failed;
+
+	pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+				    &i40e_dbg_netdev_ops_fops);
+	if (!pfile)
+		goto create_failed;
+
+	return;
+
+create_failed:
+	dev_info(dev, "debugfs dir/file for %s failed\n", name);
+	debugfs_remove_recursive(pf->i40e_dbg_pf);
+	return;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a76b8c..1b86138 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -399,8 +399,8 @@
 	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
 	ring->rx_mini_max_pending = 0;
 	ring->rx_jumbo_max_pending = 0;
-	ring->rx_pending = vsi->rx_rings[0].count;
-	ring->tx_pending = vsi->tx_rings[0].count;
+	ring->rx_pending = vsi->rx_rings[0]->count;
+	ring->tx_pending = vsi->tx_rings[0]->count;
 	ring->rx_mini_pending = 0;
 	ring->rx_jumbo_pending = 0;
 }
@@ -429,8 +429,8 @@
 	new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
 
 	/* if nothing to do return success */
-	if ((new_tx_count == vsi->tx_rings[0].count) &&
-	    (new_rx_count == vsi->rx_rings[0].count))
+	if ((new_tx_count == vsi->tx_rings[0]->count) &&
+	    (new_rx_count == vsi->rx_rings[0]->count))
 		return 0;
 
 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +439,8 @@
 	if (!netif_running(vsi->netdev)) {
 		/* simple case - set for the next time the netdev is started */
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			vsi->tx_rings[i].count = new_tx_count;
-			vsi->rx_rings[i].count = new_rx_count;
+			vsi->tx_rings[i]->count = new_tx_count;
+			vsi->rx_rings[i]->count = new_rx_count;
 		}
 		goto done;
 	}
@@ -451,10 +451,10 @@
 	 */
 
 	/* alloc updated Tx resources */
-	if (new_tx_count != vsi->tx_rings[0].count) {
+	if (new_tx_count != vsi->tx_rings[0]->count) {
 		netdev_info(netdev,
 			    "Changing Tx descriptor count from %d to %d.\n",
-			    vsi->tx_rings[0].count, new_tx_count);
+			    vsi->tx_rings[0]->count, new_tx_count);
 		tx_rings = kcalloc(vsi->alloc_queue_pairs,
 				   sizeof(struct i40e_ring), GFP_KERNEL);
 		if (!tx_rings) {
@@ -464,7 +464,7 @@
 
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
 			/* clone ring and setup updated count */
-			tx_rings[i] = vsi->tx_rings[i];
+			tx_rings[i] = *vsi->tx_rings[i];
 			tx_rings[i].count = new_tx_count;
 			err = i40e_setup_tx_descriptors(&tx_rings[i]);
 			if (err) {
@@ -481,10 +481,10 @@
 	}
 
 	/* alloc updated Rx resources */
-	if (new_rx_count != vsi->rx_rings[0].count) {
+	if (new_rx_count != vsi->rx_rings[0]->count) {
 		netdev_info(netdev,
 			    "Changing Rx descriptor count from %d to %d\n",
-			    vsi->rx_rings[0].count, new_rx_count);
+			    vsi->rx_rings[0]->count, new_rx_count);
 		rx_rings = kcalloc(vsi->alloc_queue_pairs,
 				   sizeof(struct i40e_ring), GFP_KERNEL);
 		if (!rx_rings) {
@@ -494,7 +494,7 @@
 
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
 			/* clone ring and setup updated count */
-			rx_rings[i] = vsi->rx_rings[i];
+			rx_rings[i] = *vsi->rx_rings[i];
 			rx_rings[i].count = new_rx_count;
 			err = i40e_setup_rx_descriptors(&rx_rings[i]);
 			if (err) {
@@ -517,8 +517,8 @@
 
 	if (tx_rings) {
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			i40e_free_tx_resources(&vsi->tx_rings[i]);
-			vsi->tx_rings[i] = tx_rings[i];
+			i40e_free_tx_resources(vsi->tx_rings[i]);
+			*vsi->tx_rings[i] = tx_rings[i];
 		}
 		kfree(tx_rings);
 		tx_rings = NULL;
@@ -526,8 +526,8 @@
 
 	if (rx_rings) {
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			i40e_free_rx_resources(&vsi->rx_rings[i]);
-			vsi->rx_rings[i] = rx_rings[i];
+			i40e_free_rx_resources(vsi->rx_rings[i]);
+			*vsi->rx_rings[i] = rx_rings[i];
 		}
 		kfree(rx_rings);
 		rx_rings = NULL;
@@ -579,6 +579,7 @@
 	char *p;
 	int j;
 	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+	unsigned int start;
 
 	i40e_update_stats(vsi);
 
@@ -587,14 +588,30 @@
 		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
-	for (j = 0; j < vsi->num_queue_pairs; j++) {
-		data[i++] = vsi->tx_rings[j].tx_stats.packets;
-		data[i++] = vsi->tx_rings[j].tx_stats.bytes;
+	rcu_read_lock();
+	for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+		struct i40e_ring *rx_ring;
+
+		if (!tx_ring)
+			continue;
+
+		/* process Tx ring statistics */
+		do {
+			start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+			data[i] = tx_ring->stats.packets;
+			data[i + 1] = tx_ring->stats.bytes;
+		} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+		/* Rx ring is the 2nd half of the queue pair */
+		rx_ring = &tx_ring[1];
+		do {
+			start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+			data[i + 2] = rx_ring->stats.packets;
+			data[i + 3] = rx_ring->stats.bytes;
+		} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
 	}
-	for (j = 0; j < vsi->num_queue_pairs; j++) {
-		data[i++] = vsi->rx_rings[j].rx_stats.packets;
-		data[i++] = vsi->rx_rings[j].rx_stats.bytes;
-	}
+	rcu_read_unlock();
 	if (vsi == pf->vsi[pf->lan_vsi]) {
 		for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
 			p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -641,8 +658,6 @@
 			p += ETH_GSTRING_LEN;
 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
 			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < vsi->num_queue_pairs; i++) {
 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
 			p += ETH_GSTRING_LEN;
 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
@@ -910,8 +925,8 @@
 	}
 
 	vector = vsi->base_vector;
-	q_vector = vsi->q_vectors;
-	for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+		q_vector = vsi->q_vectors[i];
 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
 		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 221aa47..be15938 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,7 +36,7 @@
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 11
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -347,14 +347,53 @@
  **/
 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
 					     struct net_device *netdev,
-					     struct rtnl_link_stats64 *storage)
+					     struct rtnl_link_stats64 *stats)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
+	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+	int i;
 
-	*storage = *i40e_get_vsi_stats_struct(vsi);
+	rcu_read_lock();
+	for (i = 0; i < vsi->num_queue_pairs; i++) {
+		struct i40e_ring *tx_ring, *rx_ring;
+		u64 bytes, packets;
+		unsigned int start;
 
-	return storage;
+		tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+		if (!tx_ring)
+			continue;
+
+		do {
+			start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+			packets = tx_ring->stats.packets;
+			bytes   = tx_ring->stats.bytes;
+		} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+		stats->tx_packets += packets;
+		stats->tx_bytes   += bytes;
+		rx_ring = &tx_ring[1];
+
+		do {
+			start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+			packets = rx_ring->stats.packets;
+			bytes   = rx_ring->stats.bytes;
+		} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+
+		stats->rx_packets += packets;
+		stats->rx_bytes   += bytes;
+	}
+	rcu_read_unlock();
+
+	/* following stats updated by ixgbe_watchdog_task() */
+	stats->multicast	= vsi_stats->multicast;
+	stats->tx_errors	= vsi_stats->tx_errors;
+	stats->tx_dropped	= vsi_stats->tx_dropped;
+	stats->rx_errors	= vsi_stats->rx_errors;
+	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
+	stats->rx_length_errors	= vsi_stats->rx_length_errors;
+
+	return stats;
 }
 
 /**
@@ -376,10 +415,14 @@
 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
 	if (vsi->rx_rings)
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			memset(&vsi->rx_rings[i].rx_stats, 0 ,
-			       sizeof(vsi->rx_rings[i].rx_stats));
-			memset(&vsi->tx_rings[i].tx_stats, 0,
-			       sizeof(vsi->tx_rings[i].tx_stats));
+			memset(&vsi->rx_rings[i]->stats, 0 ,
+			       sizeof(vsi->rx_rings[i]->stats));
+			memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+			       sizeof(vsi->rx_rings[i]->rx_stats));
+			memset(&vsi->tx_rings[i]->stats, 0 ,
+			       sizeof(vsi->tx_rings[i]->stats));
+			memset(&vsi->tx_rings[i]->tx_stats, 0,
+			       sizeof(vsi->tx_rings[i]->tx_stats));
 		}
 	vsi->stat_offsets_loaded = false;
 }
@@ -598,7 +641,7 @@
 			continue;
 
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			struct i40e_ring *ring = &vsi->tx_rings[i];
+			struct i40e_ring *ring = vsi->tx_rings[i];
 			clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
 		}
 	}
@@ -652,7 +695,7 @@
 			continue;
 
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			struct i40e_ring *ring = &vsi->tx_rings[i];
+			struct i40e_ring *ring = vsi->tx_rings[i];
 
 			tc = ring->dcb_tc;
 			if (xoff[tc])
@@ -704,21 +747,38 @@
 	tx_restart = tx_busy = 0;
 	rx_page = 0;
 	rx_buf = 0;
+	rcu_read_lock();
 	for (q = 0; q < vsi->num_queue_pairs; q++) {
 		struct i40e_ring *p;
+		u64 bytes, packets;
+		unsigned int start;
 
-		p = &vsi->rx_rings[q];
-		rx_b += p->rx_stats.bytes;
-		rx_p += p->rx_stats.packets;
-		rx_buf += p->rx_stats.alloc_rx_buff_failed;
-		rx_page += p->rx_stats.alloc_rx_page_failed;
+		/* locate Tx ring */
+		p = ACCESS_ONCE(vsi->tx_rings[q]);
 
-		p = &vsi->tx_rings[q];
-		tx_b += p->tx_stats.bytes;
-		tx_p += p->tx_stats.packets;
+		do {
+			start = u64_stats_fetch_begin_bh(&p->syncp);
+			packets = p->stats.packets;
+			bytes = p->stats.bytes;
+		} while (u64_stats_fetch_retry_bh(&p->syncp, start));
+		tx_b += bytes;
+		tx_p += packets;
 		tx_restart += p->tx_stats.restart_queue;
 		tx_busy += p->tx_stats.tx_busy;
+
+		/* Rx queue is part of the same block as Tx queue */
+		p = &p[1];
+		do {
+			start = u64_stats_fetch_begin_bh(&p->syncp);
+			packets = p->stats.packets;
+			bytes = p->stats.bytes;
+		} while (u64_stats_fetch_retry_bh(&p->syncp, start));
+		rx_b += bytes;
+		rx_p += packets;
+		rx_buf += p->rx_stats.alloc_rx_buff_failed;
+		rx_page += p->rx_stats.alloc_rx_page_failed;
 	}
+	rcu_read_unlock();
 	vsi->tx_restart = tx_restart;
 	vsi->tx_busy = tx_busy;
 	vsi->rx_page_failed = rx_page;
@@ -1988,7 +2048,7 @@
 	int i, err = 0;
 
 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-		err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
 
 	return err;
 }
@@ -2004,8 +2064,8 @@
 	int i;
 
 	for (i = 0; i < vsi->num_queue_pairs; i++)
-		if (vsi->tx_rings[i].desc)
-			i40e_free_tx_resources(&vsi->tx_rings[i]);
+		if (vsi->tx_rings[i]->desc)
+			i40e_free_tx_resources(vsi->tx_rings[i]);
 }
 
 /**
@@ -2023,7 +2083,7 @@
 	int i, err = 0;
 
 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-		err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
 	return err;
 }
 
@@ -2038,8 +2098,8 @@
 	int i;
 
 	for (i = 0; i < vsi->num_queue_pairs; i++)
-		if (vsi->rx_rings[i].desc)
-			i40e_free_rx_resources(&vsi->rx_rings[i]);
+		if (vsi->rx_rings[i]->desc)
+			i40e_free_rx_resources(vsi->rx_rings[i]);
 }
 
 /**
@@ -2114,8 +2174,8 @@
 
 	/* Now associate this queue with this PCI function */
 	qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
-	qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
-						& I40E_QTX_CTL_PF_INDX_MASK);
+	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+		    I40E_QTX_CTL_PF_INDX_MASK);
 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
 	i40e_flush(hw);
 
@@ -2223,8 +2283,8 @@
 	int err = 0;
 	u16 i;
 
-	for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
-		err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+		err = i40e_configure_tx_ring(vsi->tx_rings[i]);
 
 	return err;
 }
@@ -2274,7 +2334,7 @@
 
 	/* set up individual rings */
 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-		err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+		err = i40e_configure_rx_ring(vsi->rx_rings[i]);
 
 	return err;
 }
@@ -2298,8 +2358,8 @@
 		qoffset = vsi->tc_config.tc_info[n].qoffset;
 		qcount = vsi->tc_config.tc_info[n].qcount;
 		for (i = qoffset; i < (qoffset + qcount); i++) {
-			struct i40e_ring *rx_ring = &vsi->rx_rings[i];
-			struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+			struct i40e_ring *rx_ring = vsi->rx_rings[i];
+			struct i40e_ring *tx_ring = vsi->tx_rings[i];
 			rx_ring->dcb_tc = n;
 			tx_ring->dcb_tc = n;
 		}
@@ -2354,8 +2414,8 @@
 	 */
 	qp = vsi->base_queue;
 	vector = vsi->base_vector;
-	q_vector = vsi->q_vectors;
-	for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+		q_vector = vsi->q_vectors[i];
 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
 		q_vector->rx.latency_range = I40E_LOW_LATENCY;
 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2435,7 +2495,7 @@
  **/
 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
 {
-	struct i40e_q_vector *q_vector = vsi->q_vectors;
+	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
 	u32 val;
@@ -2472,7 +2532,7 @@
  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
  * @pf: board private structure
  **/
-static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->hw;
 	u32 val;
@@ -2500,7 +2560,7 @@
 	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
 	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-	i40e_flush(hw);
+	/* skip the flush */
 }
 
 /**
@@ -2512,7 +2572,7 @@
 {
 	struct i40e_q_vector *q_vector = data;
 
-	if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+	if (!q_vector->tx.ring && !q_vector->rx.ring)
 		return IRQ_HANDLED;
 
 	napi_schedule(&q_vector->napi);
@@ -2529,7 +2589,7 @@
 {
 	struct i40e_q_vector *q_vector = data;
 
-	if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+	if (!q_vector->tx.ring && !q_vector->rx.ring)
 		return IRQ_HANDLED;
 
 	pr_info("fdir ring cleaning needed\n");
@@ -2554,16 +2614,16 @@
 	int vector, err;
 
 	for (vector = 0; vector < q_vectors; vector++) {
-		struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
 
-		if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+		if (q_vector->tx.ring && q_vector->rx.ring) {
 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
 			tx_int_idx++;
-		} else if (q_vector->rx.ring[0]) {
+		} else if (q_vector->rx.ring) {
 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
-		} else if (q_vector->tx.ring[0]) {
+		} else if (q_vector->tx.ring) {
 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
 		} else {
@@ -2611,8 +2671,8 @@
 	int i;
 
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
-		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
-		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
 	}
 
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2649,6 +2709,7 @@
 		i40e_irq_dynamic_enable_icr0(pf);
 	}
 
+	i40e_flush(&pf->hw);
 	return 0;
 }
 
@@ -2681,14 +2742,14 @@
 
 	icr0 = rd32(hw, I40E_PFINT_ICR0);
 
-	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
-	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
-		return IRQ_NONE;
-
 	val = rd32(hw, I40E_PFINT_DYN_CTL0);
 	val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
 
+	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
+	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+		return IRQ_NONE;
+
 	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
 
 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2702,10 +2763,9 @@
 		qval = rd32(hw, I40E_QINT_TQCTL(0));
 		qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
 		wr32(hw, I40E_QINT_TQCTL(0), qval);
-		i40e_flush(hw);
 
 		if (!test_bit(__I40E_DOWN, &pf->state))
-			napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+			napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
 	}
 
 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2764,7 +2824,6 @@
 
 	/* re-enable interrupt causes */
 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
-	i40e_flush(hw);
 	if (!test_bit(__I40E_DOWN, &pf->state)) {
 		i40e_service_event_schedule(pf);
 		i40e_irq_dynamic_enable_icr0(pf);
@@ -2774,40 +2833,26 @@
 }
 
 /**
- * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
  * @vsi: the VSI being configured
  * @v_idx: vector index
- * @r_idx: rx queue index
+ * @qp_idx: queue pair index
  **/
-static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
 {
-	struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
-	struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
-
-	rx_ring->q_vector = q_vector;
-	q_vector->rx.ring[q_vector->rx.count] = rx_ring;
-	q_vector->rx.count++;
-	q_vector->rx.latency_range = I40E_LOW_LATENCY;
-	q_vector->vsi = vsi;
-}
-
-/**
- * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
- * @vsi: the VSI being configured
- * @v_idx: vector index
- * @t_idx: tx queue index
- **/
-static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
-{
-	struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
-	struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
 
 	tx_ring->q_vector = q_vector;
-	q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+	tx_ring->next = q_vector->tx.ring;
+	q_vector->tx.ring = tx_ring;
 	q_vector->tx.count++;
-	q_vector->tx.latency_range = I40E_LOW_LATENCY;
-	q_vector->num_ringpairs++;
-	q_vector->vsi = vsi;
+
+	rx_ring->q_vector = q_vector;
+	rx_ring->next = q_vector->rx.ring;
+	q_vector->rx.ring = rx_ring;
+	q_vector->rx.count++;
 }
 
 /**
@@ -2823,7 +2868,7 @@
 {
 	int qp_remaining = vsi->num_queue_pairs;
 	int q_vectors = vsi->num_q_vectors;
-	int qp_per_vector;
+	int num_ringpairs;
 	int v_start = 0;
 	int qp_idx = 0;
 
@@ -2831,11 +2876,21 @@
 	 * group them so there are multiple queues per vector.
 	 */
 	for (; v_start < q_vectors && qp_remaining; v_start++) {
-		qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
-		for (; qp_per_vector;
-		     qp_per_vector--, qp_idx++, qp_remaining--)	{
-			map_vector_to_rxq(vsi, v_start, qp_idx);
-			map_vector_to_txq(vsi, v_start, qp_idx);
+		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+		q_vector->num_ringpairs = num_ringpairs;
+
+		q_vector->rx.count = 0;
+		q_vector->tx.count = 0;
+		q_vector->rx.ring = NULL;
+		q_vector->tx.ring = NULL;
+
+		while (num_ringpairs--) {
+			map_vector_to_qp(vsi, v_start, qp_idx);
+			qp_idx++;
+			qp_remaining--;
 		}
 	}
 }
@@ -2887,7 +2942,7 @@
 	pf->flags |= I40E_FLAG_IN_NETPOLL;
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
 		for (i = 0; i < vsi->num_q_vectors; i++)
-			i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
 	} else {
 		i40e_intr(pf->pdev->irq, netdev);
 	}
@@ -3073,14 +3128,14 @@
 			u16 vector = i + base;
 
 			/* free only the irqs that were actually requested */
-			if (vsi->q_vectors[i].num_ringpairs == 0)
+			if (vsi->q_vectors[i]->num_ringpairs == 0)
 				continue;
 
 			/* clear the affinity_mask in the IRQ descriptor */
 			irq_set_affinity_hint(pf->msix_entries[vector].vector,
 					      NULL);
 			free_irq(pf->msix_entries[vector].vector,
-				 &vsi->q_vectors[i]);
+				 vsi->q_vectors[i]);
 
 			/* Tear down the interrupt queue link list
 			 *
@@ -3164,6 +3219,39 @@
 }
 
 /**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+	struct i40e_ring *ring;
+
+	if (!q_vector)
+		return;
+
+	/* disassociate q_vector from rings */
+	i40e_for_each_ring(ring, q_vector->tx)
+		ring->q_vector = NULL;
+
+	i40e_for_each_ring(ring, q_vector->rx)
+		ring->q_vector = NULL;
+
+	/* only VSI w/ an associated netdev is set up w/ NAPI */
+	if (vsi->netdev)
+		netif_napi_del(&q_vector->napi);
+
+	vsi->q_vectors[v_idx] = NULL;
+
+	kfree_rcu(q_vector, rcu);
+}
+
+/**
  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
  * @vsi: the VSI being un-configured
  *
@@ -3174,24 +3262,8 @@
 {
 	int v_idx;
 
-	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
-		struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
-		int r_idx;
-
-		if (!q_vector)
-			continue;
-
-		/* disassociate q_vector from rings */
-		for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
-			q_vector->tx.ring[r_idx]->q_vector = NULL;
-		for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
-			q_vector->rx.ring[r_idx]->q_vector = NULL;
-
-		/* only VSI w/ an associated netdev is set up w/ NAPI */
-		if (vsi->netdev)
-			netif_napi_del(&q_vector->napi);
-	}
-	kfree(vsi->q_vectors);
+	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+		i40e_free_q_vector(vsi, v_idx);
 }
 
 /**
@@ -3241,7 +3313,7 @@
 		return;
 
 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-		napi_enable(&vsi->q_vectors[q_idx].napi);
+		napi_enable(&vsi->q_vectors[q_idx]->napi);
 }
 
 /**
@@ -3256,7 +3328,7 @@
 		return;
 
 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-		napi_disable(&vsi->q_vectors[q_idx].napi);
+		napi_disable(&vsi->q_vectors[q_idx]->napi);
 }
 
 /**
@@ -3703,8 +3775,11 @@
 
 	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
 	    (vsi->netdev)) {
+		netdev_info(vsi->netdev, "NIC Link is Up\n");
 		netif_tx_start_all_queues(vsi->netdev);
 		netif_carrier_on(vsi->netdev);
+	} else if (vsi->netdev) {
+		netdev_info(vsi->netdev, "NIC Link is Down\n");
 	}
 	i40e_service_event_schedule(pf);
 
@@ -3772,8 +3847,8 @@
 	i40e_napi_disable_all(vsi);
 
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
-		i40e_clean_tx_ring(&vsi->tx_rings[i]);
-		i40e_clean_rx_ring(&vsi->rx_rings[i]);
+		i40e_clean_tx_ring(vsi->tx_rings[i]);
+		i40e_clean_rx_ring(vsi->rx_rings[i]);
 	}
 }
 
@@ -4153,8 +4228,9 @@
 	if (new_link == old_link)
 		return;
 
-	netdev_info(pf->vsi[pf->lan_vsi]->netdev,
-		    "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+	if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+		netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+			    "NIC Link is %s\n", (new_link ? "Up" : "Down"));
 
 	/* Notify the base of the switch tree connected to
 	 * the link.  Floating VEBs are not notified.
@@ -4199,9 +4275,9 @@
 			continue;
 
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			set_check_for_tx_hang(&vsi->tx_rings[i]);
+			set_check_for_tx_hang(vsi->tx_rings[i]);
 			if (test_bit(__I40E_HANG_CHECK_ARMED,
-				     &vsi->tx_rings[i].state))
+				     &vsi->tx_rings[i]->state))
 				armed++;
 		}
 
@@ -4537,7 +4613,8 @@
 	bool new_vsi = false;
 	int err, i;
 
-	if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+	if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
+			   I40E_FLAG_FDIR_ATR_ENABLED)))
 		return;
 
 	pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -4937,6 +5014,8 @@
 {
 	int ret = -ENODEV;
 	struct i40e_vsi *vsi;
+	int sz_vectors;
+	int sz_rings;
 	int vsi_idx;
 	int i;
 
@@ -4962,14 +5041,14 @@
 		vsi_idx = i;             /* Found one! */
 	} else {
 		ret = -ENODEV;
-		goto err_alloc_vsi;  /* out of VSI slots! */
+		goto unlock_pf;  /* out of VSI slots! */
 	}
 	pf->next_vsi = ++i;
 
 	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
 	if (!vsi) {
 		ret = -ENOMEM;
-		goto err_alloc_vsi;
+		goto unlock_pf;
 	}
 	vsi->type = type;
 	vsi->back = pf;
@@ -4982,14 +5061,40 @@
 	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
 	INIT_LIST_HEAD(&vsi->mac_filter_list);
 
-	i40e_set_num_rings_in_vsi(vsi);
+	ret = i40e_set_num_rings_in_vsi(vsi);
+	if (ret)
+		goto err_rings;
+
+	/* allocate memory for ring pointers */
+	sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+	vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
+	if (!vsi->tx_rings) {
+		ret = -ENOMEM;
+		goto err_rings;
+	}
+	vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+	/* allocate memory for q_vector pointers */
+	sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+	vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
+	if (!vsi->q_vectors) {
+		ret = -ENOMEM;
+		goto err_vectors;
+	}
 
 	/* Setup default MSIX irq handler for VSI */
 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
 
 	pf->vsi[vsi_idx] = vsi;
 	ret = vsi_idx;
-err_alloc_vsi:
+	goto unlock_pf;
+
+err_vectors:
+ 	kfree(vsi->tx_rings);
+err_rings:
+	pf->next_vsi = i - 1;
+	kfree(vsi);
+unlock_pf:
 	mutex_unlock(&pf->switch_mutex);
 	return ret;
 }
@@ -5030,6 +5135,10 @@
 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
+	/* free the ring and vector containers */
+	kfree(vsi->q_vectors);
+	kfree(vsi->tx_rings);
+
 	pf->vsi[vsi->idx] = NULL;
 	if (vsi->idx < pf->next_vsi)
 		pf->next_vsi = vsi->idx;
@@ -5043,34 +5152,40 @@
 }
 
 /**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+	int i;
+
+	if (vsi->tx_rings[0])
+		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+			kfree_rcu(vsi->tx_rings[i], rcu);
+			vsi->tx_rings[i] = NULL;
+			vsi->rx_rings[i] = NULL;
+		}
+
+	return 0;
+}
+
+/**
  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
  * @vsi: the VSI being configured
  **/
 static int i40e_alloc_rings(struct i40e_vsi *vsi)
 {
 	struct i40e_pf *pf = vsi->back;
-	int ret = 0;
 	int i;
 
-	vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
-				sizeof(struct i40e_ring), GFP_KERNEL);
-	if (!vsi->rx_rings) {
-		ret = -ENOMEM;
-		goto err_alloc_rings;
-	}
-
-	vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
-				sizeof(struct i40e_ring), GFP_KERNEL);
-	if (!vsi->tx_rings) {
-		ret = -ENOMEM;
-		kfree(vsi->rx_rings);
-		goto err_alloc_rings;
-	}
-
 	/* Set basic values in the rings to be used later during open() */
 	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
-		struct i40e_ring *rx_ring = &vsi->rx_rings[i];
-		struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+		struct i40e_ring *tx_ring;
+		struct i40e_ring *rx_ring;
+
+		tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+		if (!tx_ring)
+			goto err_out;
 
 		tx_ring->queue_index = i;
 		tx_ring->reg_idx = vsi->base_queue + i;
@@ -5081,7 +5196,9 @@
 		tx_ring->count = vsi->num_desc;
 		tx_ring->size = 0;
 		tx_ring->dcb_tc = 0;
+		vsi->tx_rings[i] = tx_ring;
 
+		rx_ring = &tx_ring[1];
 		rx_ring->queue_index = i;
 		rx_ring->reg_idx = vsi->base_queue + i;
 		rx_ring->ring_active = false;
@@ -5095,24 +5212,14 @@
 			set_ring_16byte_desc_enabled(rx_ring);
 		else
 			clear_ring_16byte_desc_enabled(rx_ring);
-	}
-
-err_alloc_rings:
-	return ret;
-}
-
-/**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
- **/
-static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
-{
-	if (vsi) {
-		kfree(vsi->rx_rings);
-		kfree(vsi->tx_rings);
+		vsi->rx_rings[i] = rx_ring;
 	}
 
 	return 0;
+
+err_out:
+	i40e_vsi_clear_rings(vsi);
+	return -ENOMEM;
 }
 
 /**
@@ -5249,6 +5356,38 @@
 }
 
 /**
+ * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+	struct i40e_q_vector *q_vector;
+
+	/* allocate q_vector */
+	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+	if (!q_vector)
+		return -ENOMEM;
+
+	q_vector->vsi = vsi;
+	q_vector->v_idx = v_idx;
+	cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+	if (vsi->netdev)
+		netif_napi_add(vsi->netdev, &q_vector->napi,
+			       i40e_napi_poll, vsi->work_limit);
+
+	q_vector->rx.latency_range = I40E_LOW_LATENCY;
+	q_vector->tx.latency_range = I40E_LOW_LATENCY;
+
+	/* tie q_vector and vsi together */
+	vsi->q_vectors[v_idx] = q_vector;
+
+	return 0;
+}
+
+/**
  * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
  * @vsi: the VSI being configured
  *
@@ -5259,6 +5398,7 @@
 {
 	struct i40e_pf *pf = vsi->back;
 	int v_idx, num_q_vectors;
+	int err;
 
 	/* if not MSIX, give the one vector only to the LAN VSI */
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5268,22 +5408,19 @@
 	else
 		return -EINVAL;
 
-	vsi->q_vectors = kcalloc(num_q_vectors,
-				 sizeof(struct i40e_q_vector),
-				 GFP_KERNEL);
-	if (!vsi->q_vectors)
-		return -ENOMEM;
-
 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-		vsi->q_vectors[v_idx].vsi = vsi;
-		vsi->q_vectors[v_idx].v_idx = v_idx;
-		cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
-		if (vsi->netdev)
-			netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
-				       i40e_napi_poll, vsi->work_limit);
+		err = i40e_alloc_q_vector(vsi, v_idx);
+		if (err)
+			goto err_out;
 	}
 
 	return 0;
+
+err_out:
+	while (v_idx--)
+		i40e_free_q_vector(vsi, v_idx);
+
+	return err;
 }
 
 /**
@@ -5297,7 +5434,8 @@
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
 		err = i40e_init_msix(pf);
 		if (err) {
-			pf->flags &= ~(I40E_FLAG_RSS_ENABLED	   |
+			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	   |
+					I40E_FLAG_RSS_ENABLED	   |
 					I40E_FLAG_MQ_ENABLED	   |
 					I40E_FLAG_DCB_ENABLED	   |
 					I40E_FLAG_SRIOV_ENABLED	   |
@@ -5312,14 +5450,17 @@
 
 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
 	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+		dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
 		err = pci_enable_msi(pf->pdev);
 		if (err) {
-			dev_info(&pf->pdev->dev,
-				 "MSI init failed (%d), trying legacy.\n", err);
+			dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
 			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
 		}
 	}
 
+	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+		dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+
 	/* track first vector for misc interrupts */
 	err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
 }
@@ -5950,7 +6091,7 @@
 	int ret = -ENOENT;
 	struct i40e_pf *pf = vsi->back;
 
-	if (vsi->q_vectors) {
+	if (vsi->q_vectors[0]) {
 		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
 			 vsi->seid);
 		return -EEXIST;
@@ -5972,8 +6113,9 @@
 		goto vector_setup_out;
 	}
 
-	vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
-					 vsi->num_q_vectors, vsi->idx);
+	if (vsi->num_q_vectors)
+		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+						 vsi->num_q_vectors, vsi->idx);
 	if (vsi->base_vector < 0) {
 		dev_info(&pf->pdev->dev,
 			 "failed to get q tracking for VSI %d, err=%d\n",
@@ -7062,8 +7204,10 @@
 	 */
 	len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
 	pf->vsi = kzalloc(len, GFP_KERNEL);
-	if (!pf->vsi)
+	if (!pf->vsi) {
+		err = -ENOMEM;
 		goto err_switch_setup;
+	}
 
 	err = i40e_setup_pf_switch(pf);
 	if (err) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 49d2cfa..f1f03bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -37,6 +37,7 @@
 			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
 }
 
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 /**
  * i40e_program_fdir_filter - Program a Flow Director filter
  * @fdir_input: Packet data that will be filter parameters
@@ -50,6 +51,7 @@
 	struct i40e_tx_buffer *tx_buf;
 	struct i40e_tx_desc *tx_desc;
 	struct i40e_ring *tx_ring;
+	unsigned int fpt, dcc;
 	struct i40e_vsi *vsi;
 	struct device *dev;
 	dma_addr_t dma;
@@ -64,93 +66,78 @@
 	if (!vsi)
 		return -ENOENT;
 
-	tx_ring = &vsi->tx_rings[0];
+	tx_ring = vsi->tx_rings[0];
 	dev = tx_ring->dev;
 
 	dma = dma_map_single(dev, fdir_data->raw_packet,
-				I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+			     I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
 	if (dma_mapping_error(dev, dma))
 		goto dma_fail;
 
 	/* grab the next descriptor */
-	fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
-	tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
-	tx_ring->next_to_use++;
-	if (tx_ring->next_to_use == tx_ring->count)
-		tx_ring->next_to_use = 0;
+	i = tx_ring->next_to_use;
+	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+	tx_buf = &tx_ring->tx_bi[i];
 
-	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
-					     << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
-					     & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+	tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
-	fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
-					    << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
-					    & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+	fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+	      I40E_TXD_FLTR_QW0_QINDEX_MASK;
 
-	fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
-					     << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-					     & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+	fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+	       I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
+
+	fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+	       I40E_TXD_FLTR_QW0_PCTYPE_MASK;
 
 	/* Use LAN VSI Id if not programmed by user */
 	if (fdir_data->dest_vsi == 0)
-		fdir_desc->qindex_flex_ptype_vsi |=
-					  cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
-					   << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+		fpt |= (pf->vsi[pf->lan_vsi]->id) <<
+		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
 	else
-		fdir_desc->qindex_flex_ptype_vsi |=
-					    cpu_to_le32((fdir_data->dest_vsi
-					    << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
-					    & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+		fpt |= ((u32)fdir_data->dest_vsi <<
+			I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+		       I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
 
-	fdir_desc->dtype_cmd_cntindex =
-				    cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+
+	dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
 
 	if (add)
-		fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-				       I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
-					<< I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+		dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+		       I40E_TXD_FLTR_QW1_PCMD_SHIFT;
 	else
-		fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-					   I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
-					   << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+		dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+		       I40E_TXD_FLTR_QW1_PCMD_SHIFT;
 
-	fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
-					  << I40E_TXD_FLTR_QW1_DEST_SHIFT)
-					  & I40E_TXD_FLTR_QW1_DEST_MASK);
+	dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+	       I40E_TXD_FLTR_QW1_DEST_MASK;
 
-	fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-		     (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-		      & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+	dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+	       I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
 
 	if (fdir_data->cnt_index != 0) {
-		fdir_desc->dtype_cmd_cntindex |=
-				    cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
-		fdir_desc->dtype_cmd_cntindex |=
-					    cpu_to_le32((fdir_data->cnt_index
-					    << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
-					    & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+		dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+		dcc |= ((u32)fdir_data->cnt_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+		       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 	}
 
+	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
 	fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
 
 	/* Now program a dummy descriptor */
-	tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
-	tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
-	tx_ring->next_to_use++;
-	if (tx_ring->next_to_use == tx_ring->count)
-		tx_ring->next_to_use = 0;
+	i = tx_ring->next_to_use;
+	tx_desc = I40E_TX_DESC(tx_ring, i);
+
+	tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
 	tx_desc->buffer_addr = cpu_to_le64(dma);
-	td_cmd = I40E_TX_DESC_CMD_EOP |
-		 I40E_TX_DESC_CMD_RS  |
-		 I40E_TX_DESC_CMD_DUMMY;
+	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 
 	tx_desc->cmd_type_offset_bsz =
 		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
 
-	/* Mark the data descriptor to be watched */
-	tx_buf->next_to_watch = tx_desc;
-
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -158,6 +145,9 @@
 	 */
 	wmb();
 
+	/* Mark the data descriptor to be watched */
+	tx_buf->next_to_watch = tx_desc;
+
 	writel(tx_ring->next_to_use, tx_ring->tail);
 	return 0;
 
@@ -188,27 +178,30 @@
 }
 
 /**
- * i40e_unmap_tx_resource - Release a Tx buffer
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
  * @ring:      the ring that owns the buffer
  * @tx_buffer: the buffer to free
  **/
-static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
-					  struct i40e_tx_buffer *tx_buffer)
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+					    struct i40e_tx_buffer *tx_buffer)
 {
-	if (tx_buffer->dma) {
-		if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
-			dma_unmap_page(ring->dev,
-				       tx_buffer->dma,
-				       tx_buffer->length,
-				       DMA_TO_DEVICE);
-		else
+	if (tx_buffer->skb) {
+		dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
 			dma_unmap_single(ring->dev,
-					 tx_buffer->dma,
-					 tx_buffer->length,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
 					 DMA_TO_DEVICE);
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(ring->dev,
+			       dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len),
+			       DMA_TO_DEVICE);
 	}
-	tx_buffer->dma = 0;
-	tx_buffer->time_stamp = 0;
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
 }
 
 /**
@@ -217,7 +210,6 @@
  **/
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
 {
-	struct i40e_tx_buffer *tx_buffer;
 	unsigned long bi_size;
 	u16 i;
 
@@ -226,13 +218,8 @@
 		return;
 
 	/* Free all the Tx ring sk_buffs */
-	for (i = 0; i < tx_ring->count; i++) {
-		tx_buffer = &tx_ring->tx_bi[i];
-		i40e_unmap_tx_resource(tx_ring, tx_buffer);
-		if (tx_buffer->skb)
-			dev_kfree_skb_any(tx_buffer->skb);
-		tx_buffer->skb = NULL;
-	}
+	for (i = 0; i < tx_ring->count; i++)
+		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
 
 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
 	memset(tx_ring->tx_bi, 0, bi_size);
@@ -242,6 +229,13 @@
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
+
+	if (!tx_ring->netdev)
+		return;
+
+	/* cleanup Tx queue statistics */
+	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+						  tx_ring->queue_index));
 }
 
 /**
@@ -300,14 +294,14 @@
 	 * run the check_tx_hang logic with a transmit completion
 	 * pending but without time to complete it yet.
 	 */
-	if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+	if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
 	    tx_pending) {
 		/* make sure it is true for two checks in a row */
 		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
 				       &tx_ring->state);
 	} else {
 		/* update completed stats and disarm the hang check */
-		tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+		tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
 		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
 	}
 
@@ -331,62 +325,88 @@
 
 	tx_buf = &tx_ring->tx_bi[i];
 	tx_desc = I40E_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
 
-	for (; budget; budget--) {
-		struct i40e_tx_desc *eop_desc;
-
-		eop_desc = tx_buf->next_to_watch;
+	do {
+		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
 
 		/* if next_to_watch is not set then there is no work pending */
 		if (!eop_desc)
 			break;
 
+		/* prevent any other reads prior to eop_desc */
+		read_barrier_depends();
+
 		/* if the descriptor isn't done, no work yet to do */
 		if (!(eop_desc->cmd_type_offset_bsz &
 		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
 			break;
 
-		/* count the packet as being completed */
-		tx_ring->tx_stats.completed++;
+		/* clear next_to_watch to prevent false hangs */
 		tx_buf->next_to_watch = NULL;
-		tx_buf->time_stamp = 0;
 
-		/* set memory barrier before eop_desc is verified */
-		rmb();
+		/* update the statistics for this packet */
+		total_bytes += tx_buf->bytecount;
+		total_packets += tx_buf->gso_segs;
 
-		do {
-			i40e_unmap_tx_resource(tx_ring, tx_buf);
+		/* free the skb */
+		dev_kfree_skb_any(tx_buf->skb);
 
-			/* clear dtype status */
-			tx_desc->cmd_type_offset_bsz &=
-				~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buf, dma),
+				 dma_unmap_len(tx_buf, len),
+				 DMA_TO_DEVICE);
 
-			if (likely(tx_desc == eop_desc)) {
-				eop_desc = NULL;
+		/* clear tx_buffer data */
+		tx_buf->skb = NULL;
+		dma_unmap_len_set(tx_buf, len, 0);
 
-				dev_kfree_skb_any(tx_buf->skb);
-				tx_buf->skb = NULL;
-
-				total_bytes += tx_buf->bytecount;
-				total_packets += tx_buf->gso_segs;
-			}
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
 
 			tx_buf++;
 			tx_desc++;
 			i++;
-			if (unlikely(i == tx_ring->count)) {
-				i = 0;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
 				tx_buf = tx_ring->tx_bi;
 				tx_desc = I40E_TX_DESC(tx_ring, 0);
 			}
-		} while (eop_desc);
-	}
 
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buf, len)) {
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buf, dma),
+					       dma_unmap_len(tx_buf, len),
+					       DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buf, len, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buf++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buf = tx_ring->tx_bi;
+			tx_desc = I40E_TX_DESC(tx_ring, 0);
+		}
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
 	tx_ring->next_to_clean = i;
-	tx_ring->tx_stats.bytes += total_bytes;
-	tx_ring->tx_stats.packets += total_packets;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	u64_stats_update_end(&tx_ring->syncp);
 	tx_ring->q_vector->tx.total_bytes += total_bytes;
 	tx_ring->q_vector->tx.total_packets += total_packets;
+
 	if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
 		/* schedule immediate reset if we believe we hung */
 		dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -414,6 +434,10 @@
 		return true;
 	}
 
+	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+						      tx_ring->queue_index),
+				  total_packets, total_bytes);
+
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
 		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -524,8 +548,6 @@
 	i40e_set_new_dynamic_itr(&q_vector->tx);
 	if (old_itr != q_vector->tx.itr)
 		wr32(hw, reg_addr, q_vector->tx.itr);
-
-	i40e_flush(hw);
 }
 
 /**
@@ -1042,8 +1064,10 @@
 	}
 
 	rx_ring->next_to_clean = i;
-	rx_ring->rx_stats.packets += total_rx_packets;
-	rx_ring->rx_stats.bytes += total_rx_bytes;
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	u64_stats_update_end(&rx_ring->syncp);
 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
@@ -1067,27 +1091,28 @@
 	struct i40e_q_vector *q_vector =
 			       container_of(napi, struct i40e_q_vector, napi);
 	struct i40e_vsi *vsi = q_vector->vsi;
+	struct i40e_ring *ring;
 	bool clean_complete = true;
 	int budget_per_ring;
-	int i;
 
 	if (test_bit(__I40E_DOWN, &vsi->state)) {
 		napi_complete(napi);
 		return 0;
 	}
 
-	/* We attempt to distribute budget to each Rx queue fairly, but don't
-	 * allow the budget to go below 1 because that would exit polling early.
-	 * Since the actual Tx work is minimal, we can give the Tx a larger
+	/* Since the actual Tx work is minimal, we can give the Tx a larger
 	 * budget and be more aggressive about cleaning up the Tx descriptors.
 	 */
+	i40e_for_each_ring(ring, q_vector->tx)
+		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
+	/* We attempt to distribute budget to each Rx queue fairly, but don't
+	 * allow the budget to go below 1 because that would exit polling early.
+	 */
 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
-	for (i = 0; i < q_vector->num_ringpairs; i++) {
-		clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
-						    vsi->work_limit);
-		clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
-						    budget_per_ring);
-	}
+
+	i40e_for_each_ring(ring, q_vector->rx)
+		clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
 
 	/* If work not completed, return budget and polling will return */
 	if (!clean_complete)
@@ -1117,7 +1142,8 @@
 			qval = rd32(hw, I40E_QINT_TQCTL(0));
 			qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
 			wr32(hw, I40E_QINT_TQCTL(0), qval);
-			i40e_flush(hw);
+
+			i40e_irq_dynamic_enable_icr0(vsi->back);
 		}
 	}
 
@@ -1144,6 +1170,7 @@
 	struct tcphdr *th;
 	unsigned int hlen;
 	u32 flex_ptype, dtype_cmd;
+	u16 i;
 
 	/* make sure ATR is enabled */
 	if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
@@ -1183,10 +1210,11 @@
 	tx_ring->atr_count = 0;
 
 	/* grab the next descriptor */
-	fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
-	tx_ring->next_to_use++;
-	if (tx_ring->next_to_use == tx_ring->count)
-		tx_ring->next_to_use = 0;
+	i = tx_ring->next_to_use;
+	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
 
 	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
 		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -1216,7 +1244,6 @@
 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
 }
 
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 /**
  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
  * @skb:     send buffer
@@ -1276,27 +1303,6 @@
 }
 
 /**
- * i40e_tx_csum - is checksum offload requested
- * @tx_ring:  ptr to the ring to send
- * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
- *
- * Returns true if checksum offload is requested
- **/
-static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
-			 u32 tx_flags, __be16 protocol)
-{
-	if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
-	    !(tx_flags & I40E_TX_FLAGS_TXSW)) {
-		if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
-			return false;
-	}
-
-	return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
-/**
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
@@ -1482,15 +1488,16 @@
 			       const u32 cd_tunneling, const u32 cd_l2tag2)
 {
 	struct i40e_tx_context_desc *context_desc;
+	int i = tx_ring->next_to_use;
 
 	if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
 		return;
 
 	/* grab the next descriptor */
-	context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
-	tx_ring->next_to_use++;
-	if (tx_ring->next_to_use == tx_ring->count)
-		tx_ring->next_to_use = 0;
+	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
 
 	/* cpu_to_le32 and assign to struct fields */
 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
@@ -1512,68 +1519,71 @@
 			struct i40e_tx_buffer *first, u32 tx_flags,
 			const u8 hdr_len, u32 td_cmd, u32 td_offset)
 {
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
 	unsigned int data_len = skb->data_len;
 	unsigned int size = skb_headlen(skb);
-	struct device *dev = tx_ring->dev;
-	u32 paylen = skb->len - hdr_len;
-	u16 i = tx_ring->next_to_use;
+	struct skb_frag_struct *frag;
 	struct i40e_tx_buffer *tx_bi;
 	struct i40e_tx_desc *tx_desc;
-	u32 buf_offset = 0;
+	u16 i = tx_ring->next_to_use;
 	u32 td_tag = 0;
 	dma_addr_t dma;
 	u16 gso_segs;
 
-	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
-	if (dma_mapping_error(dev, dma))
-		goto dma_error;
-
 	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
 		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
 		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
 			 I40E_TX_FLAGS_VLAN_SHIFT;
 	}
 
+	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+		gso_segs = skb_shinfo(skb)->gso_segs;
+	else
+		gso_segs = 1;
+
+	/* multiply data chunks by size of headers */
+	first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+	first->gso_segs = gso_segs;
+	first->skb = skb;
+	first->tx_flags = tx_flags;
+
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
 	tx_desc = I40E_TX_DESC(tx_ring, i);
-	for (;;) {
-		while (size > I40E_MAX_DATA_PER_TXD) {
-			tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+	tx_bi = first;
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_bi, len, size);
+		dma_unmap_addr_set(tx_bi, dma, dma);
+
+		tx_desc->buffer_addr = cpu_to_le64(dma);
+
+		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
 			tx_desc->cmd_type_offset_bsz =
 				build_ctob(td_cmd, td_offset,
 					   I40E_MAX_DATA_PER_TXD, td_tag);
 
-			buf_offset += I40E_MAX_DATA_PER_TXD;
-			size -= I40E_MAX_DATA_PER_TXD;
-
 			tx_desc++;
 			i++;
 			if (i == tx_ring->count) {
 				tx_desc = I40E_TX_DESC(tx_ring, 0);
 				i = 0;
 			}
+
+			dma += I40E_MAX_DATA_PER_TXD;
+			size -= I40E_MAX_DATA_PER_TXD;
+
+			tx_desc->buffer_addr = cpu_to_le64(dma);
 		}
 
-		tx_bi = &tx_ring->tx_bi[i];
-		tx_bi->length = buf_offset + size;
-		tx_bi->tx_flags = tx_flags;
-		tx_bi->dma = dma;
-
-		tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
-		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
-							  size, td_tag);
-
 		if (likely(!data_len))
 			break;
 
-		size = skb_frag_size(frag);
-		data_len -= size;
-		buf_offset = 0;
-		tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
-
-		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma))
-			goto dma_error;
+		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+							  size, td_tag);
 
 		tx_desc++;
 		i++;
@@ -1582,31 +1592,25 @@
 			i = 0;
 		}
 
-		frag++;
+		size = skb_frag_size(frag);
+		data_len -= size;
+
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+
+		tx_bi = &tx_ring->tx_bi[i];
 	}
 
-	tx_desc->cmd_type_offset_bsz |=
-		       cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+	tx_desc->cmd_type_offset_bsz =
+		build_ctob(td_cmd, td_offset, size, td_tag) |
+		cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
 
-	i++;
-	if (i == tx_ring->count)
-		i = 0;
+	netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+						 tx_ring->queue_index),
+			     first->bytecount);
 
-	tx_ring->next_to_use = i;
-
-	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
-		gso_segs = skb_shinfo(skb)->gso_segs;
-	else
-		gso_segs = 1;
-
-	/* multiply data chunks by size of headers */
-	tx_bi->bytecount = paylen + (gso_segs * hdr_len);
-	tx_bi->gso_segs = gso_segs;
-	tx_bi->skb = skb;
-
-	/* set the timestamp and next to watch values */
+	/* set the timestamp */
 	first->time_stamp = jiffies;
-	first->next_to_watch = tx_desc;
 
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
@@ -1615,16 +1619,27 @@
 	 */
 	wmb();
 
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+
+	tx_ring->next_to_use = i;
+
+	/* notify HW of packet */
 	writel(i, tx_ring->tail);
+
 	return;
 
 dma_error:
-	dev_info(dev, "TX DMA map failed\n");
+	dev_info(tx_ring->dev, "TX DMA map failed\n");
 
 	/* clear dma mappings for failed tx_bi map */
 	for (;;) {
 		tx_bi = &tx_ring->tx_bi[i];
-		i40e_unmap_tx_resource(tx_ring, tx_bi);
+		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
 		if (tx_bi == first)
 			break;
 		if (i == 0)
@@ -1632,8 +1647,6 @@
 		i--;
 	}
 
-	dev_kfree_skb_any(skb);
-
 	tx_ring->next_to_use = i;
 }
 
@@ -1758,16 +1771,16 @@
 
 	skb_tx_timestamp(skb);
 
-	/* Always offload the checksum, since it's in the data descriptor */
-	if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
-		tx_flags |= I40E_TX_FLAGS_CSUM;
-
-	/* always enable offload insertion */
+	/* always enable CRC insertion offload */
 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
-	if (tx_flags & I40E_TX_FLAGS_CSUM)
+	/* Always offload the checksum, since it's in the data descriptor */
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		tx_flags |= I40E_TX_FLAGS_CSUM;
+
 		i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
 				    tx_ring, &cd_tunneling);
+	}
 
 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
 			   cd_tunneling, cd_l2tag2);
@@ -1801,7 +1814,7 @@
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
-	struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
 
 	/* hardware can't handle really short frames, hardware padding works
 	 * beyond this point
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b1d7722..db55d99 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,23 +102,20 @@
 #define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
 #define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
-#define I40E_TX_FLAGS_TXSW		(u32)(1 << 8)
-#define I40E_TX_FLAGS_MAPPED_AS_PAGE	(u32)(1 << 9)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
 #define I40E_TX_FLAGS_VLAN_SHIFT	16
 
 struct i40e_tx_buffer {
-	struct sk_buff *skb;
-	dma_addr_t dma;
-	unsigned long time_stamp;
-	u16 length;
-	u32 tx_flags;
 	struct i40e_tx_desc *next_to_watch;
+	unsigned long time_stamp;
+	struct sk_buff *skb;
 	unsigned int bytecount;
-	u16 gso_segs;
-	u8 mapped_as_page;
+	unsigned short gso_segs;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+	u32 tx_flags;
 };
 
 struct i40e_rx_buffer {
@@ -129,18 +126,18 @@
 	unsigned int page_offset;
 };
 
-struct i40e_tx_queue_stats {
+struct i40e_queue_stats {
 	u64 packets;
 	u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
 	u64 restart_queue;
 	u64 tx_busy;
-	u64 completed;
 	u64 tx_done_old;
 };
 
 struct i40e_rx_queue_stats {
-	u64 packets;
-	u64 bytes;
 	u64 non_eop_descs;
 	u64 alloc_rx_page_failed;
 	u64 alloc_rx_buff_failed;
@@ -183,6 +180,7 @@
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
+	struct i40e_ring *next;		/* pointer to next ring in q_vector */
 	void *desc;			/* Descriptor ring memory */
 	struct device *dev;		/* Used for DMA mapping */
 	struct net_device *netdev;	/* netdev ring maps to */
@@ -219,6 +217,8 @@
 	bool ring_active;		/* is ring online or not */
 
 	/* stats structs */
+	struct i40e_queue_stats	stats;
+	struct u64_stats_sync syncp;
 	union {
 		struct i40e_tx_queue_stats tx_stats;
 		struct i40e_rx_queue_stats rx_stats;
@@ -229,6 +229,8 @@
 
 	struct i40e_vsi *vsi;		/* Backreference to associated VSI */
 	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
+
+	struct rcu_head rcu;		/* to avoid race on free */
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
@@ -238,9 +240,8 @@
 };
 
 struct i40e_ring_container {
-#define I40E_MAX_RINGPAIR_PER_VECTOR 8
 	/* array of pointers to rings */
-	struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+	struct i40e_ring *ring;
 	unsigned int total_bytes;	/* total bytes processed this int */
 	unsigned int total_packets;	/* total packets processed this int */
 	u16 count;
@@ -248,6 +249,10 @@
 	u16 itr;
 };
 
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+	for (pos = (head).ring; pos != NULL; pos = pos->next)
+
 void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8967e58..0759698 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -251,7 +251,7 @@
 		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 	else
 		reg_idx = I40E_VPINT_LNKLSTN(
-			    ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+					   (pf->hw.func_caps.num_msix_vectors_vf
 					      * vf->vf_id) + (vector_id - 1));
 
 	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@@ -383,7 +383,7 @@
 
 	/* associate this queue with the PCI VF function */
 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
-	qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 		    & I40E_QTX_CTL_PF_INDX_MASK);
 	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 74a1506..8c24377 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -28,14 +28,14 @@
 #ifndef _E1000_82575_H_
 #define _E1000_82575_H_
 
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
-				u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
-				 u8 dev_addr, u8 data);
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+		      u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+		       u8 data);
 
 #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
                                      (ID_LED_DEF1_DEF2 <<  8) | \
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 37a9c06..2e166b2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -562,11 +562,11 @@
 	u8  revision_id;
 };
 
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
 #define hw_dbg(format, arg...) \
 	netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
 
 /* These functions must be implemented by drivers */
-s32  igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32  igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 #endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index dde3c4b..2d91371 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -28,26 +28,24 @@
 #ifndef _E1000_I210_H_
 #define _E1000_I210_H_
 
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
-			      u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
-			     u16 words, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
-				 struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
-			      u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
-			       u16 data);
-extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
-extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_update_flash_i210(struct e1000_hw *hw);
+s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+			    u16 *data);
+s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+			   u16 *data);
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
+void igb_release_nvm_i210(struct e1000_hw *hw);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+			  struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE		0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD	0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 5e13e83..e4cbe8e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -86,6 +86,6 @@
 
 #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN	0x2
 
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
 
 #endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index e726675..c4c4fe3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -708,11 +708,6 @@
 		hw_dbg("Error committing the PHY changes\n");
 		goto out;
 	}
-	if (phy->type == e1000_phy_i210) {
-		ret_val = igb_set_master_slave_mode(hw);
-		if (ret_val)
-			return ret_val;
-	}
 
 out:
 	return ret_val;
@@ -806,6 +801,9 @@
 		hw_dbg("Error committing the PHY changes\n");
 		return ret_val;
 	}
+	ret_val = igb_set_master_slave_mode(hw);
+	if (ret_val)
+		return ret_val;
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 6807b09..5e9ed89 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -483,40 +483,38 @@
 extern char igb_driver_name[];
 extern char igb_driver_version[];
 
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern void igb_write_rss_indir_tbl(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
-					   struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
-				struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
-				unsigned char *va,
-				struct sk_buff *skb);
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct net_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_tx_work(struct work_struct *work);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+			 struct sk_buff *skb);
 static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
 				       union e1000_adv_rx_desc *rx_desc,
 				       struct sk_buff *skb)
@@ -531,11 +529,11 @@
 	rx_ring->last_rx_timestamp = jiffies;
 }
 
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
-				  struct ifreq *ifr, int cmd);
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd);
 #ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
 #endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 151e00c..b0f3666 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -146,6 +146,7 @@
 	struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
 	u32 status;
 
+	status = rd32(E1000_STATUS);
 	if (hw->phy.media_type == e1000_media_type_copper) {
 
 		ecmd->supported = (SUPPORTED_10baseT_Half |
@@ -169,13 +170,22 @@
 		ecmd->transceiver = XCVR_INTERNAL;
 	} else {
 		ecmd->supported = (SUPPORTED_FIBRE |
+				   SUPPORTED_1000baseKX_Full |
 				   SUPPORTED_Autoneg |
 				   SUPPORTED_Pause);
-		ecmd->advertising = ADVERTISED_FIBRE;
-
-		if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
-			ecmd->supported |= SUPPORTED_1000baseT_Full;
-			ecmd->advertising |= ADVERTISED_1000baseT_Full;
+		ecmd->advertising = (ADVERTISED_FIBRE |
+				     ADVERTISED_1000baseKX_Full);
+		if (hw->mac.type == e1000_i354) {
+			if ((hw->device_id ==
+			     E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
+			    !(status & E1000_STATUS_2P5_SKU_OVER)) {
+				ecmd->supported |= SUPPORTED_2500baseX_Full;
+				ecmd->supported &=
+					~SUPPORTED_1000baseKX_Full;
+				ecmd->advertising |= ADVERTISED_2500baseX_Full;
+				ecmd->advertising &=
+					~ADVERTISED_1000baseKX_Full;
+			}
 		}
 		if (eth_flags->e100_base_fx) {
 			ecmd->supported |= SUPPORTED_100baseT_Full;
@@ -187,35 +197,29 @@
 		ecmd->port = PORT_FIBRE;
 		ecmd->transceiver = XCVR_EXTERNAL;
 	}
-
 	if (hw->mac.autoneg != 1)
 		ecmd->advertising &= ~(ADVERTISED_Pause |
 				       ADVERTISED_Asym_Pause);
 
-	if (hw->fc.requested_mode == e1000_fc_full)
+	switch (hw->fc.requested_mode) {
+	case e1000_fc_full:
 		ecmd->advertising |= ADVERTISED_Pause;
-	else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+		break;
+	case e1000_fc_rx_pause:
 		ecmd->advertising |= (ADVERTISED_Pause |
 				      ADVERTISED_Asym_Pause);
-	else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+		break;
+	case e1000_fc_tx_pause:
 		ecmd->advertising |=  ADVERTISED_Asym_Pause;
-	else
+		break;
+	default:
 		ecmd->advertising &= ~(ADVERTISED_Pause |
 				       ADVERTISED_Asym_Pause);
-
-	status = rd32(E1000_STATUS);
-
+	}
 	if (status & E1000_STATUS_LU) {
-		if (hw->mac.type == e1000_i354) {
-			if ((status & E1000_STATUS_2P5_SKU) &&
-			    !(status & E1000_STATUS_2P5_SKU_OVER)) {
-				ecmd->supported = SUPPORTED_2500baseX_Full;
-				ecmd->advertising = ADVERTISED_2500baseX_Full;
-				ecmd->speed = SPEED_2500;
-			} else {
-				ecmd->supported = SUPPORTED_1000baseT_Full;
-				ecmd->advertising = ADVERTISED_1000baseT_Full;
-			}
+		if ((status & E1000_STATUS_2P5_SKU) &&
+		    !(status & E1000_STATUS_2P5_SKU_OVER)) {
+			ecmd->speed = SPEED_2500;
 		} else if (status & E1000_STATUS_SPEED_1000) {
 			ecmd->speed = SPEED_1000;
 		} else if (status & E1000_STATUS_SPEED_100) {
@@ -232,7 +236,6 @@
 		ecmd->speed = -1;
 		ecmd->duplex = -1;
 	}
-
 	if ((hw->phy.media_type == e1000_media_type_fiber) ||
 	    hw->mac.autoneg)
 		ecmd->autoneg = AUTONEG_ENABLE;
@@ -771,8 +774,10 @@
 	if (eeprom->len == 0)
 		return -EOPNOTSUPP;
 
-	if (hw->mac.type == e1000_i211)
+	if ((hw->mac.type >= e1000_i210) &&
+	    !igb_get_flash_presence_i210(hw)) {
 		return -EOPNOTSUPP;
+	}
 
 	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
 		return -EFAULT;
@@ -1659,7 +1664,8 @@
 		if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
 		(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
 		(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
-		(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+		(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+		(hw->device_id == E1000_DEV_ID_I354_SGMII)) {
 
 			/* Enable DH89xxCC MPHY for near end loopback */
 			reg = rd32(E1000_MPHY_ADDR_CTL);
@@ -1725,7 +1731,8 @@
 	if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
 	(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
 	(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
-	(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+	(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+	(hw->device_id == E1000_DEV_ID_I354_SGMII)) {
 		u32 reg;
 
 		/* Disable near end loopback on DH89xxCC */
@@ -2877,6 +2884,88 @@
 	return 0;
 }
 
+static unsigned int igb_max_channels(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int max_combined = 0;
+
+	switch (hw->mac.type) {
+	case e1000_i211:
+		max_combined = IGB_MAX_RX_QUEUES_I211;
+		break;
+	case e1000_82575:
+	case e1000_i210:
+		max_combined = IGB_MAX_RX_QUEUES_82575;
+		break;
+	case e1000_i350:
+		if (!!adapter->vfs_allocated_count) {
+			max_combined = 1;
+			break;
+		}
+		/* fall through */
+	case e1000_82576:
+		if (!!adapter->vfs_allocated_count) {
+			max_combined = 2;
+			break;
+		}
+		/* fall through */
+	case e1000_82580:
+	case e1000_i354:
+	default:
+		max_combined = IGB_MAX_RX_QUEUES;
+		break;
+	}
+
+	return max_combined;
+}
+
+static void igb_get_channels(struct net_device *netdev,
+			     struct ethtool_channels *ch)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	/* Report maximum channels */
+	ch->max_combined = igb_max_channels(adapter);
+
+	/* Report info for other vector */
+	if (adapter->msix_entries) {
+		ch->max_other = NON_Q_VECTORS;
+		ch->other_count = NON_Q_VECTORS;
+	}
+
+	ch->combined_count = adapter->rss_queues;
+}
+
+static int igb_set_channels(struct net_device *netdev,
+			    struct ethtool_channels *ch)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	unsigned int count = ch->combined_count;
+
+	/* Verify they are not requesting separate vectors */
+	if (!count || ch->rx_count || ch->tx_count)
+		return -EINVAL;
+
+	/* Verify other_count is valid and has not been changed */
+	if (ch->other_count != NON_Q_VECTORS)
+		return -EINVAL;
+
+	/* Verify the number of channels doesn't exceed hw limits */
+	if (count > igb_max_channels(adapter))
+		return -EINVAL;
+
+	if (count != adapter->rss_queues) {
+		adapter->rss_queues = count;
+
+		/* Hardware has to reinitialize queues and interrupts to
+		 * match the new configuration.
+		 */
+		return igb_reinit_queues(adapter);
+	}
+
+	return 0;
+}
+
 static const struct ethtool_ops igb_ethtool_ops = {
 	.get_settings		= igb_get_settings,
 	.set_settings		= igb_set_settings,
@@ -2913,6 +3002,8 @@
 	.get_rxfh_indir_size	= igb_get_rxfh_indir_size,
 	.get_rxfh_indir		= igb_get_rxfh_indir,
 	.set_rxfh_indir		= igb_set_rxfh_indir,
+	.get_channels		= igb_get_channels,
+	.set_channels		= igb_set_channels,
 	.begin			= igb_ethtool_begin,
 	.complete		= igb_ethtool_complete,
 };
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8cf44f2..025e5f4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -182,6 +182,7 @@
 
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
 #endif
 
 #ifdef CONFIG_PM
@@ -1223,6 +1224,9 @@
 		ring->count = adapter->tx_ring_count;
 		ring->queue_index = txr_idx;
 
+		u64_stats_init(&ring->tx_syncp);
+		u64_stats_init(&ring->tx_syncp2);
+
 		/* assign ring to adapter */
 		adapter->tx_ring[txr_idx] = ring;
 
@@ -1256,6 +1260,8 @@
 		ring->count = adapter->rx_ring_count;
 		ring->queue_index = rxr_idx;
 
+		u64_stats_init(&ring->rx_syncp);
+
 		/* assign ring to adapter */
 		adapter->rx_ring[rxr_idx] = ring;
 	}
@@ -2034,21 +2040,15 @@
 		return err;
 
 	pci_using_dac = 0;
-	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 	if (!err) {
-		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-		if (!err)
-			pci_using_dac = 1;
+		pci_using_dac = 1;
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err) {
-				dev_err(&pdev->dev,
-					"No usable DMA configuration, aborting\n");
-				goto err_dma;
-			}
+			dev_err(&pdev->dev,
+				"No usable DMA configuration, aborting\n");
+			goto err_dma;
 		}
 	}
 
@@ -2429,7 +2429,7 @@
 }
 
 #ifdef CONFIG_PCI_IOV
-static int  igb_disable_sriov(struct pci_dev *pdev)
+static int igb_disable_sriov(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2470,27 +2470,19 @@
 	int err = 0;
 	int i;
 
-	if (!adapter->msix_entries) {
+	if (!adapter->msix_entries || num_vfs > 7) {
 		err = -EPERM;
 		goto out;
 	}
-
 	if (!num_vfs)
 		goto out;
-	else if (old_vfs && old_vfs == num_vfs)
-		goto out;
-	else if (old_vfs && old_vfs != num_vfs)
-		err = igb_disable_sriov(pdev);
 
-	if (err)
-		goto out;
-
-	if (num_vfs > 7) {
-		err = -EPERM;
-		goto out;
-	}
-
-	adapter->vfs_allocated_count = num_vfs;
+	if (old_vfs) {
+		dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
+			 old_vfs, max_vfs);
+		adapter->vfs_allocated_count = old_vfs;
+	} else
+		adapter->vfs_allocated_count = num_vfs;
 
 	adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
 				sizeof(struct vf_data_storage), GFP_KERNEL);
@@ -2504,10 +2496,12 @@
 		goto out;
 	}
 
-	err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
-	if (err)
-		goto err_out;
-
+	/* only call pci_enable_sriov() if no VFs are allocated already */
+	if (!old_vfs) {
+		err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+		if (err)
+			goto err_out;
+	}
 	dev_info(&pdev->dev, "%d VFs allocated\n",
 		 adapter->vfs_allocated_count);
 	for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -2623,7 +2617,7 @@
 		return;
 
 	pci_sriov_set_totalvfs(pdev, 7);
-	igb_enable_sriov(pdev, max_vfs);
+	igb_pci_enable_sriov(pdev, max_vfs);
 
 #endif /* CONFIG_PCI_IOV */
 }
@@ -5708,7 +5702,7 @@
 
 	/* reply to reset with ack and vf mac address */
 	msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
-	memcpy(addr, vf_mac, 6);
+	memcpy(addr, vf_mac, ETH_ALEN);
 	igb_write_mbx(hw, msgbuf, 3, vf);
 }
 
@@ -7838,4 +7832,26 @@
 		return E1000_SUCCESS;
 
 }
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	int err = 0;
+
+	if (netif_running(netdev))
+		igb_close(netdev);
+
+	igb_clear_interrupt_scheme(adapter);
+
+	if (igb_init_interrupt_scheme(adapter, true)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	if (netif_running(netdev))
+		err = igb_open(netdev);
+
+	return err;
+}
 /* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a1463e3..7d6a25c 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -312,17 +312,17 @@
 extern char igbvf_driver_name[];
 extern const char igbvf_driver_version[];
 
-extern void igbvf_check_options(struct igbvf_adapter *);
-extern void igbvf_set_ethtool_ops(struct net_device *);
+void igbvf_check_options(struct igbvf_adapter *);
+void igbvf_set_ethtool_ops(struct net_device *);
 
-extern int igbvf_up(struct igbvf_adapter *);
-extern void igbvf_down(struct igbvf_adapter *);
-extern void igbvf_reinit_locked(struct igbvf_adapter *);
-extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern void igbvf_free_rx_resources(struct igbvf_ring *);
-extern void igbvf_free_tx_resources(struct igbvf_ring *);
-extern void igbvf_update_stats(struct igbvf_adapter *);
+int igbvf_up(struct igbvf_adapter *);
+void igbvf_down(struct igbvf_adapter *);
+void igbvf_reinit_locked(struct igbvf_adapter *);
+int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+void igbvf_free_rx_resources(struct igbvf_ring *);
+void igbvf_free_tx_resources(struct igbvf_ring *);
+void igbvf_update_stats(struct igbvf_adapter *);
 
 extern unsigned int copybreak;
 
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 93eb7ee..04bf22e 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2343,10 +2343,9 @@
 	struct igbvf_adapter *adapter = netdev_priv(netdev);
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
-	if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-		dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+	if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
+	    max_frame > MAX_JUMBO_FRAME_SIZE)
 		return -EINVAL;
-	}
 
 #define MAX_STD_JUMBO_FRAME_SIZE 9234
 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
@@ -2638,21 +2637,15 @@
 		return err;
 
 	pci_using_dac = 0;
-	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 	if (!err) {
-		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-		if (!err)
-			pci_using_dac = 1;
+		pci_using_dac = 1;
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err) {
-				dev_err(&pdev->dev, "No usable DMA "
-				        "configuration, aborting\n");
-				goto err_dma;
-			}
+			dev_err(&pdev->dev, "No usable DMA "
+			        "configuration, aborting\n");
+			goto err_dma;
 		}
 	}
 
@@ -2699,7 +2692,7 @@
 	if (ei->get_variants) {
 		err = ei->get_variants(adapter);
 		if (err)
-			goto err_ioremap;
+			goto err_get_variants;
 	}
 
 	/* setup adapter struct */
@@ -2796,6 +2789,7 @@
 	kfree(adapter->rx_ring);
 err_sw_init:
 	igbvf_reset_interrupt_capability(adapter);
+err_get_variants:
 	iounmap(adapter->hw.hw_addr);
 err_ioremap:
 	free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index eea0e10..955ad8c 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -154,7 +154,7 @@
 		ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
 		if (!ret_val) {
 			if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
-				memcpy(hw->mac.perm_addr, addr, 6);
+				memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
 			else
 				ret_val = -E1000_ERR_MAC_INIT;
 		}
@@ -314,7 +314,7 @@
 
 	memset(msgbuf, 0, 12);
 	msgbuf[0] = E1000_VF_SET_MAC_ADDR;
-	memcpy(msg_addr, addr, 6);
+	memcpy(msg_addr, addr, ETH_ALEN);
 	ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
 	if (!ret_val)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 4d2ae97..2224cc2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -187,21 +187,21 @@
 };
 
 /* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+void ixgb_check_options(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
 extern char ixgb_driver_name[];
 extern const char ixgb_driver_version[];
 
-extern void ixgb_set_speed_duplex(struct net_device *netdev);
+void ixgb_set_speed_duplex(struct net_device *netdev);
 
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
 
 
 #endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 2a99a35..0bd5d72 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -759,27 +759,20 @@
 };
 
 /* Function Prototypes */
-extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
-extern bool ixgb_init_hw(struct ixgb_hw *hw);
-extern bool ixgb_adapter_start(struct ixgb_hw *hw);
-extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
+bool ixgb_adapter_stop(struct ixgb_hw *hw);
+bool ixgb_init_hw(struct ixgb_hw *hw);
+bool ixgb_adapter_start(struct ixgb_hw *hw);
+void ixgb_check_for_link(struct ixgb_hw *hw);
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
 
-extern void ixgb_rar_set(struct ixgb_hw *hw,
-				u8 *addr,
-				u32 index);
-
+void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
 
 /* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
-				   u8 *mc_addr_list,
-				   u32 mc_addr_count,
-				   u32 pad);
+void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
+			      u32 mc_addr_count, u32 pad);
 
 /* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
-				 u32 offset,
-				 u32 value);
+void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
 
 /* Access functions to eeprom data */
 void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 9f6b236..57e390c 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -408,20 +408,14 @@
 		return err;
 
 	pci_using_dac = 0;
-	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 	if (!err) {
-		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-		if (!err)
-			pci_using_dac = 1;
+		pci_using_dac = 1;
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err) {
-				pr_err("No usable DMA configuration, aborting\n");
-				goto err_dma_mask;
-			}
+			pr_err("No usable DMA configuration, aborting\n");
+			goto err_dma_mask;
 		}
 	}
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0ac6b11c..f38fc0a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -55,7 +55,7 @@
 #include <net/busy_poll.h>
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
 #endif
 /* common prefix used by pr_<> macros */
 #undef pr_fmt
@@ -67,7 +67,11 @@
 #define IXGBE_MAX_TXD			   4096
 #define IXGBE_MIN_TXD			     64
 
+#if (PAGE_SIZE < 8192)
 #define IXGBE_DEFAULT_RXD		    512
+#else
+#define IXGBE_DEFAULT_RXD		    128
+#endif
 #define IXGBE_MAX_RXD			   4096
 #define IXGBE_MIN_RXD			     64
 
@@ -187,11 +191,11 @@
 struct ixgbe_queue_stats {
 	u64 packets;
 	u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
 	u64 yields;
 	u64 misses;
 	u64 cleaned;
-#endif  /* LL_EXTENDED_STATS */
+#endif  /* BP_EXTENDED_STATS */
 };
 
 struct ixgbe_tx_queue_stats {
@@ -219,6 +223,15 @@
 	__IXGBE_RX_FCOE,
 };
 
+struct ixgbe_fwd_adapter {
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	struct net_device *netdev;
+	struct ixgbe_adapter *real_adapter;
+	unsigned int tx_base_queue;
+	unsigned int rx_base_queue;
+	int pool;
+};
+
 #define check_for_tx_hang(ring) \
 	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 #define set_check_for_tx_hang(ring) \
@@ -236,6 +249,7 @@
 	struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
 	struct net_device *netdev;	/* netdev ring belongs to */
 	struct device *dev;		/* device for DMA mapping */
+	struct ixgbe_fwd_adapter *l2_accel_priv;
 	void *desc;			/* descriptor ring memory */
 	union {
 		struct ixgbe_tx_buffer *tx_buffer_info;
@@ -293,6 +307,12 @@
 #define IXGBE_MAX_FCOE_INDICES  8
 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
 #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_BAD_L2A_QUEUE 3
+#define IXGBE_MAX_MACVLANS	31
+#define IXGBE_MAX_DCBMACVLANS	8
+
 struct ixgbe_ring_feature {
 	u16 limit;	/* upper limit on feature indices */
 	u16 indices;	/* current value of indices */
@@ -369,11 +389,13 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	unsigned int state;
 #define IXGBE_QV_STATE_IDLE        0
-#define IXGBE_QV_STATE_NAPI	   1    /* NAPI owns this QV */
-#define IXGBE_QV_STATE_POLL	   2    /* poll owns this QV */
-#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
-#define IXGBE_QV_STATE_NAPI_YIELD  4    /* NAPI yielded this QV */
-#define IXGBE_QV_STATE_POLL_YIELD  8    /* poll yielded this QV */
+#define IXGBE_QV_STATE_NAPI	   1     /* NAPI owns this QV */
+#define IXGBE_QV_STATE_POLL	   2     /* poll owns this QV */
+#define IXGBE_QV_STATE_DISABLED	   4     /* QV is disabled */
+#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
+#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
+#define IXGBE_QV_STATE_NAPI_YIELD  8     /* NAPI yielded this QV */
+#define IXGBE_QV_STATE_POLL_YIELD  16    /* poll yielded this QV */
 #define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
 #define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
 	spinlock_t lock;
@@ -394,18 +416,18 @@
 static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
 {
 	int rc = true;
-	spin_lock(&q_vector->lock);
+	spin_lock_bh(&q_vector->lock);
 	if (q_vector->state & IXGBE_QV_LOCKED) {
 		WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
 		q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
 		rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
 		q_vector->tx.ring->stats.yields++;
 #endif
 	} else
 		/* we don't care if someone yielded */
 		q_vector->state = IXGBE_QV_STATE_NAPI;
-	spin_unlock(&q_vector->lock);
+	spin_unlock_bh(&q_vector->lock);
 	return rc;
 }
 
@@ -413,14 +435,15 @@
 static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
 {
 	int rc = false;
-	spin_lock(&q_vector->lock);
+	spin_lock_bh(&q_vector->lock);
 	WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
 			       IXGBE_QV_STATE_NAPI_YIELD));
 
 	if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
 		rc = true;
-	q_vector->state = IXGBE_QV_STATE_IDLE;
-	spin_unlock(&q_vector->lock);
+	/* will reset state to idle, unless QV is disabled */
+	q_vector->state &= IXGBE_QV_STATE_DISABLED;
+	spin_unlock_bh(&q_vector->lock);
 	return rc;
 }
 
@@ -432,7 +455,7 @@
 	if ((q_vector->state & IXGBE_QV_LOCKED)) {
 		q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
 		rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
 		q_vector->rx.ring->stats.yields++;
 #endif
 	} else
@@ -451,17 +474,32 @@
 
 	if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
 		rc = true;
-	q_vector->state = IXGBE_QV_STATE_IDLE;
+	/* will reset state to idle, unless QV is disabled */
+	q_vector->state &= IXGBE_QV_STATE_DISABLED;
 	spin_unlock_bh(&q_vector->lock);
 	return rc;
 }
 
 /* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
-	WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
+	WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
 	return q_vector->state & IXGBE_QV_USER_PEND;
 }
+
+/* false if QV is currently owned */
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+	int rc = true;
+	spin_lock_bh(&q_vector->lock);
+	if (q_vector->state & IXGBE_QV_OWNED)
+		rc = false;
+	q_vector->state |= IXGBE_QV_STATE_DISABLED;
+	spin_unlock_bh(&q_vector->lock);
+
+	return rc;
+}
+
 #else /* CONFIG_NET_RX_BUSY_POLL */
 static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
 {
@@ -487,10 +525,16 @@
 	return false;
 }
 
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
 	return false;
 }
+
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+	return true;
+}
+
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 
 #ifdef CONFIG_IXGBE_HWMON
@@ -738,6 +782,7 @@
 #endif /*CONFIG_DEBUG_FS*/
 
 	u8 default_up;
+	unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
 };
 
 struct ixgbe_fdir_filter {
@@ -786,93 +831,89 @@
 extern char ixgbe_default_device_descr[];
 #endif /* IXGBE_FCOE */
 
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
-				   struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+void ixgbe_up(struct ixgbe_adapter *adapter);
+void ixgbe_down(struct ixgbe_adapter *adapter);
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+void ixgbe_reset(struct ixgbe_adapter *adapter);
+void ixgbe_set_ethtool_ops(struct net_device *netdev);
+int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+void ixgbe_free_rx_resources(struct ixgbe_ring *);
+void ixgbe_free_tx_resources(struct ixgbe_ring *);
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
 			       u16 subdevice_id);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
-					 struct ixgbe_adapter *,
-					 struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
-                                             struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ixgbe_poll(struct napi_struct *napi, int budget);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-						 union ixgbe_atr_hash_dword input,
-						 union ixgbe_atr_hash_dword common,
-                                                 u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
-					   union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-						 union ixgbe_atr_input *input,
-						 u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
-						 union ixgbe_atr_input *input,
-						 u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-						 union ixgbe_atr_input *mask);
-extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
+				  struct ixgbe_ring *);
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+				      struct ixgbe_tx_buffer *);
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+void ixgbe_write_eitr(struct ixgbe_q_vector *);
+int ixgbe_poll(struct napi_struct *napi, int budget);
+int ethtool_ioctl(struct ifreq *ifr);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_hash_dword input,
+					  union ixgbe_atr_hash_dword common,
+					  u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+				    union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_input *input,
+					  u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_input *input,
+					  u16 soft_id);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+					  union ixgbe_atr_input *mask);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 #endif
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-extern void ixgbe_do_reset(struct net_device *netdev);
+int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+void ixgbe_do_reset(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_HWMON
-extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
 #endif /* CONFIG_IXGBE_HWMON */
 #ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
-		     struct ixgbe_tx_buffer *first,
-		     u8 *hdr_len);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
-			  union ixgbe_adv_rx_desc *rx_desc,
-			  struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
-                              struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
-				 struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+	      u8 *hdr_len);
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+		   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+		       struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+			  struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_enable(struct net_device *netdev);
+int ixgbe_fcoe_disable(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
 #endif /* CONFIG_IXGBE_DCB */
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
-				  struct netdev_fcoe_hbainfo *info);
-extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+			   struct netdev_fcoe_hbainfo *info);
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
 #endif /* IXGBE_FCOE */
 #ifdef CONFIG_DEBUG_FS
-extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_init(void);
-extern void ixgbe_dbg_exit(void);
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_init(void);
+void ixgbe_dbg_exit(void);
 #else
 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,12 +925,12 @@
 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 }
 
-extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
-				    struct sk_buff *skb);
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+			     struct sk_buff *skb);
 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
 					 union ixgbe_adv_rx_desc *rx_desc,
 					 struct sk_buff *skb)
@@ -906,13 +947,16 @@
 	rx_ring->last_rx_timestamp = jiffies;
 }
 
-extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
-				    struct ifreq *ifr, int cmd);
-extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
+			     int cmd);
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
 #ifdef CONFIG_PCI_IOV
 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
 #endif
 
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+				  struct ixgbe_adapter *adapter,
+				  struct ixgbe_ring *tx_ring);
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e8649ab..4e7c9b0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -442,7 +442,7 @@
 
 static int ixgbe_get_regs_len(struct net_device *netdev)
 {
-#define IXGBE_REGS_LEN  1129
+#define IXGBE_REGS_LEN  1139
 	return IXGBE_REGS_LEN * sizeof(u32);
 }
 
@@ -602,22 +602,53 @@
 	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 
 	/* DCB */
-	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
-	regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
-	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
-	regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
+	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+		for (i = 0; i < 8; i++)
+			regs_buff[833 + i] =
+				IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+		for (i = 0; i < 8; i++)
+			regs_buff[841 + i] =
+				IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+		for (i = 0; i < 8; i++)
+			regs_buff[849 + i] =
+				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+		for (i = 0; i < 8; i++)
+			regs_buff[857 + i] =
+				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
+		for (i = 0; i < 8; i++)
+			regs_buff[833 + i] =
+				IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
+		for (i = 0; i < 8; i++)
+			regs_buff[841 + i] =
+				IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
+		for (i = 0; i < 8; i++)
+			regs_buff[849 + i] =
+				IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
+		for (i = 0; i < 8; i++)
+			regs_buff[857 + i] =
+				IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
+		break;
+	default:
+		break;
+	}
+
 	for (i = 0; i < 8; i++)
-		regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+		regs_buff[865 + i] =
+		IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
 	for (i = 0; i < 8; i++)
-		regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
-	for (i = 0; i < 8; i++)
-		regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
-	for (i = 0; i < 8; i++)
-		regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
-	for (i = 0; i < 8; i++)
-		regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
-	for (i = 0; i < 8; i++)
-		regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+		regs_buff[873 + i] =
+		IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
 
 	/* Statistics */
 	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +788,20 @@
 
 	/* 82599 X540 specific registers  */
 	regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+	/* 82599 X540 specific DCB registers  */
+	regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+	regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
+	for (i = 0; i < 4; i++)
+		regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
+	regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
+					/* same as RTTQCNRM */
+	regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
+					/* same as RTTQCNRR */
+
+	/* X540 specific DCB registers  */
+	regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
+	regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
 }
 
 static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1072,7 +1117,7 @@
 			data[i] = 0;
 			data[i+1] = 0;
 			i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
 			data[i] = 0;
 			data[i+1] = 0;
 			data[i+2] = 0;
@@ -1087,7 +1132,7 @@
 			data[i+1] = ring->stats.bytes;
 		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
 		i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
 		data[i] = ring->stats.yields;
 		data[i+1] = ring->stats.misses;
 		data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1145,7 @@
 			data[i] = 0;
 			data[i+1] = 0;
 			i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
 			data[i] = 0;
 			data[i+1] = 0;
 			data[i+2] = 0;
@@ -1115,7 +1160,7 @@
 			data[i+1] = ring->stats.bytes;
 		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
 		i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
 		data[i] = ring->stats.yields;
 		data[i+1] = ring->stats.misses;
 		data[i+2] = ring->stats.cleaned;
@@ -1157,28 +1202,28 @@
 			p += ETH_GSTRING_LEN;
 			sprintf(p, "tx_queue_%u_bytes", i);
 			p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-			sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+			sprintf(p, "tx_queue_%u_bp_napi_yield", i);
 			p += ETH_GSTRING_LEN;
-			sprintf(p, "tx_queue_%u_ll_misses", i);
+			sprintf(p, "tx_queue_%u_bp_misses", i);
 			p += ETH_GSTRING_LEN;
-			sprintf(p, "tx_queue_%u_ll_cleaned", i);
+			sprintf(p, "tx_queue_%u_bp_cleaned", i);
 			p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
 		}
 		for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
 			sprintf(p, "rx_queue_%u_packets", i);
 			p += ETH_GSTRING_LEN;
 			sprintf(p, "rx_queue_%u_bytes", i);
 			p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-			sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+			sprintf(p, "rx_queue_%u_bp_poll_yield", i);
 			p += ETH_GSTRING_LEN;
-			sprintf(p, "rx_queue_%u_ll_misses", i);
+			sprintf(p, "rx_queue_%u_bp_misses", i);
 			p += ETH_GSTRING_LEN;
-			sprintf(p, "rx_queue_%u_ll_cleaned", i);
+			sprintf(p, "rx_queue_%u_bp_cleaned", i);
 			p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
 		}
 		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
 			sprintf(p, "tx_pb_%u_pxon", i);
@@ -2212,13 +2257,13 @@
 
 #if IS_ENABLED(CONFIG_BQL)
 	/* detect ITR changes that require update of TXDCTL.WTHRESH */
-	if ((adapter->tx_itr_setting > 1) &&
+	if ((adapter->tx_itr_setting != 1) &&
 	    (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
 		if ((tx_itr_prev == 1) ||
-		    (tx_itr_prev > IXGBE_100K_ITR))
+		    (tx_itr_prev >= IXGBE_100K_ITR))
 			need_reset = true;
 	} else {
-		if ((tx_itr_prev > 1) &&
+		if ((tx_itr_prev != 1) &&
 		    (tx_itr_prev < IXGBE_100K_ITR))
 			need_reset = true;
 	}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 90b4e10..32e3eaa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -498,6 +498,7 @@
 #ifdef IXGBE_FCOE
 	u16 fcoe_i = 0;
 #endif
+	bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
 
 	/* only proceed if SR-IOV is enabled */
 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
@@ -510,7 +511,7 @@
 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
 
 	/* 64 pool mode with 2 queues per pool */
-	if ((vmdq_i > 32) || (rss_i < 4)) {
+	if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
 		rss_m = IXGBE_RSS_2Q_MASK;
 		rss_i = min_t(u16, rss_i, 2);
@@ -852,7 +853,11 @@
 
 		/* apply Tx specific ring traits */
 		ring->count = adapter->tx_ring_count;
-		ring->queue_index = txr_idx;
+		if (adapter->num_rx_pools > 1)
+			ring->queue_index =
+				txr_idx % adapter->num_rx_queues_per_pool;
+		else
+			ring->queue_index = txr_idx;
 
 		/* assign ring to adapter */
 		adapter->tx_ring[txr_idx] = ring;
@@ -895,7 +900,11 @@
 #endif /* IXGBE_FCOE */
 		/* apply Rx specific ring traits */
 		ring->count = adapter->rx_ring_count;
-		ring->queue_index = rxr_idx;
+		if (adapter->num_rx_pools > 1)
+			ring->queue_index =
+				rxr_idx % adapter->num_rx_queues_per_pool;
+		else
+			ring->queue_index = rxr_idx;
 
 		/* assign ring to adapter */
 		adapter->rx_ring[rxr_idx] = ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0ade0cd..0c55079 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
 #include <linux/ethtool.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
 #include <linux/if_bridge.h>
 #include <linux/prefetch.h>
 #include <scsi/fc/fc_fcoe.h>
@@ -132,7 +133,7 @@
 static unsigned int max_vfs;
 module_param(max_vfs, uint, 0);
 MODULE_PARM_DESC(max_vfs,
-		 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
+		 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
 #endif /* CONFIG_PCI_IOV */
 
 static unsigned int allow_unsupported_sfp;
@@ -153,7 +154,6 @@
 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
 					  u32 reg, u16 *value)
 {
-	int pos = 0;
 	struct pci_dev *parent_dev;
 	struct pci_bus *parent_bus;
 
@@ -165,11 +165,10 @@
 	if (!parent_dev)
 		return -1;
 
-	pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
-	if (!pos)
+	if (!pci_is_pcie(parent_dev))
 		return -1;
 
-	pci_read_config_word(parent_dev, pos + reg, value);
+	pcie_capability_read_word(parent_dev, reg, value);
 	return 0;
 }
 
@@ -247,7 +246,7 @@
 		max_gts = 4 * width;
 		break;
 	case PCIE_SPEED_8_0GT:
-		/* 128b/130b encoding only reduces throughput by 1% */
+		/* 128b/130b encoding reduces throughput by less than 2% */
 		max_gts = 8 * width;
 		break;
 	default:
@@ -265,7 +264,7 @@
 		   width,
 		   (speed == PCIE_SPEED_2_5GT ? "20%" :
 		    speed == PCIE_SPEED_5_0GT ? "20%" :
-		    speed == PCIE_SPEED_8_0GT ? "N/a" :
+		    speed == PCIE_SPEED_8_0GT ? "<2%" :
 		    "Unknown"));
 
 	if (max_gts < expected_gts) {
@@ -872,11 +871,18 @@
 
 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
 {
-	struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
-	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_adapter *adapter;
+	struct ixgbe_hw *hw;
+	u32 head, tail;
 
-	u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
-	u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+	if (ring->l2_accel_priv)
+		adapter = ring->l2_accel_priv->real_adapter;
+	else
+		adapter = netdev_priv(ring->netdev);
+
+	hw = &adapter->hw;
+	head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+	tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
 
 	if (head != tail)
 		return (head < tail) ?
@@ -1585,7 +1591,7 @@
 {
 	struct ixgbe_adapter *adapter = q_vector->adapter;
 
-	if (ixgbe_qv_ll_polling(q_vector))
+	if (ixgbe_qv_busy_polling(q_vector))
 		netif_receive_skb(skb);
 	else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
 		napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2103,7 @@
 
 	ixgbe_for_each_ring(ring, q_vector->rx) {
 		found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
 		if (found)
 			ring->stats.cleaned += found;
 		else
@@ -3005,7 +3011,7 @@
 		struct ixgbe_q_vector *q_vector = ring->q_vector;
 
 		if (q_vector)
-			netif_set_xps_queue(adapter->netdev,
+			netif_set_xps_queue(ring->netdev,
 					    &q_vector->affinity_mask,
 					    ring->queue_index);
 	}
@@ -3395,7 +3401,7 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	int rss_i = adapter->ring_feature[RING_F_RSS].indices;
-	int p;
+	u16 pool;
 
 	/* PSRTYPE must be initialized in non 82598 adapters */
 	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -3412,9 +3418,8 @@
 	else if (rss_i > 1)
 		psrtype |= 1 << 29;
 
-	for (p = 0; p < adapter->num_rx_pools; p++)
-		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
-				psrtype);
+	for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
 }
 
 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
@@ -3683,7 +3688,11 @@
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_X540:
 		for (i = 0; i < adapter->num_rx_queues; i++) {
-			j = adapter->rx_ring[i]->reg_idx;
+			struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+			if (ring->l2_accel_priv)
+				continue;
+			j = ring->reg_idx;
 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
 			vlnctrl &= ~IXGBE_RXDCTL_VME;
 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3713,7 +3722,11 @@
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_X540:
 		for (i = 0; i < adapter->num_rx_queues; i++) {
-			j = adapter->rx_ring[i]->reg_idx;
+			struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+			if (ring->l2_accel_priv)
+				continue;
+			j = ring->reg_idx;
 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
 			vlnctrl |= IXGBE_RXDCTL_VME;
 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3750,7 +3763,7 @@
 	unsigned int rar_entries = hw->mac.num_rar_entries - 1;
 	int count = 0;
 
-	/* In SR-IOV mode significantly less RAR entries are available */
+	/* In SR-IOV/VMDQ modes significantly less RAR entries are available */
 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 		rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
 
@@ -3825,14 +3838,6 @@
 		if (netdev->flags & IFF_ALLMULTI) {
 			fctrl |= IXGBE_FCTRL_MPE;
 			vmolr |= IXGBE_VMOLR_MPE;
-		} else {
-			/*
-			 * Write addresses to the MTA, if the attempt fails
-			 * then we should just turn on promiscuous mode so
-			 * that we can at least receive multicast traffic
-			 */
-			hw->mac.ops.update_mc_addr_list(hw, netdev);
-			vmolr |= IXGBE_VMOLR_ROMPE;
 		}
 		ixgbe_vlan_filter_enable(adapter);
 		hw->addr_ctrl.user_set_promisc = false;
@@ -3849,6 +3854,13 @@
 		vmolr |= IXGBE_VMOLR_ROPE;
 	}
 
+	/* Write addresses to the MTA, if the attempt fails
+	 * then we should just turn on promiscuous mode so
+	 * that we can at least receive multicast traffic
+	 */
+	hw->mac.ops.update_mc_addr_list(hw, netdev);
+	vmolr |= IXGBE_VMOLR_ROMPE;
+
 	if (adapter->num_vfs)
 		ixgbe_restore_vf_multicasts(adapter);
 
@@ -3893,15 +3905,13 @@
 {
 	int q_idx;
 
-	local_bh_disable(); /* for ixgbe_qv_lock_napi() */
 	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
 		napi_disable(&adapter->q_vector[q_idx]->napi);
-		while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
+		while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
 			pr_info("QV %d locked\n", q_idx);
-			mdelay(1);
+			usleep_range(1000, 20000);
 		}
 	}
-	local_bh_enable();
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4118,6 +4128,228 @@
 	spin_unlock(&adapter->fdir_perfect_lock);
 }
 
+static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
+				      struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 vmolr;
+
+	/* No unicast promiscuous support for VMDQ devices. */
+	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+	vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
+
+	/* clear the affected bit */
+	vmolr &= ~IXGBE_VMOLR_MPE;
+
+	if (dev->flags & IFF_ALLMULTI) {
+		vmolr |= IXGBE_VMOLR_MPE;
+	} else {
+		vmolr |= IXGBE_VMOLR_ROMPE;
+		hw->mac.ops.update_mc_addr_list(hw, dev);
+	}
+	ixgbe_write_uc_addr_list(adapter->netdev);
+	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+}
+
+static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+				 u8 *addr, u16 pool)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	unsigned int entry;
+
+	entry = hw->mac.num_rar_entries - pool;
+	hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
+}
+
+static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
+{
+	struct ixgbe_adapter *adapter = vadapter->real_adapter;
+	int rss_i = adapter->num_rx_queues_per_pool;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u16 pool = vadapter->pool;
+	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+		      IXGBE_PSRTYPE_UDPHDR |
+		      IXGBE_PSRTYPE_IPV4HDR |
+		      IXGBE_PSRTYPE_L2HDR |
+		      IXGBE_PSRTYPE_IPV6HDR;
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		return;
+
+	if (rss_i > 3)
+		psrtype |= 2 << 29;
+	else if (rss_i > 1)
+		psrtype |= 1 << 29;
+
+	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
+}
+
+/**
+ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	unsigned long size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!rx_ring->rx_buffer_info)
+		return;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		struct ixgbe_rx_buffer *rx_buffer;
+
+		rx_buffer = &rx_ring->rx_buffer_info[i];
+		if (rx_buffer->skb) {
+			struct sk_buff *skb = rx_buffer->skb;
+			if (IXGBE_CB(skb)->page_released) {
+				dma_unmap_page(dev,
+					       IXGBE_CB(skb)->dma,
+					       ixgbe_rx_bufsz(rx_ring),
+					       DMA_FROM_DEVICE);
+				IXGBE_CB(skb)->page_released = false;
+			}
+			dev_kfree_skb(skb);
+		}
+		rx_buffer->skb = NULL;
+		if (rx_buffer->dma)
+			dma_unmap_page(dev, rx_buffer->dma,
+				       ixgbe_rx_pg_size(rx_ring),
+				       DMA_FROM_DEVICE);
+		rx_buffer->dma = 0;
+		if (rx_buffer->page)
+			__free_pages(rx_buffer->page,
+				     ixgbe_rx_pg_order(rx_ring));
+		rx_buffer->page = NULL;
+	}
+
+	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
+				   struct ixgbe_ring *rx_ring)
+{
+	struct ixgbe_adapter *adapter = vadapter->real_adapter;
+	int index = rx_ring->queue_index + vadapter->rx_base_queue;
+
+	/* shutdown specific queue receive and wait for dma to settle */
+	ixgbe_disable_rx_queue(adapter, rx_ring);
+	usleep_range(10000, 20000);
+	ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+	ixgbe_clean_rx_ring(rx_ring);
+	rx_ring->l2_accel_priv = NULL;
+}
+
+int ixgbe_fwd_ring_down(struct net_device *vdev,
+			struct ixgbe_fwd_adapter *accel)
+{
+	struct ixgbe_adapter *adapter = accel->real_adapter;
+	unsigned int rxbase = accel->rx_base_queue;
+	unsigned int txbase = accel->tx_base_queue;
+	int i;
+
+	netif_tx_stop_all_queues(vdev);
+
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+		ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+		adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
+	}
+
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+		adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
+		adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
+	}
+
+
+	return 0;
+}
+
+static int ixgbe_fwd_ring_up(struct net_device *vdev,
+			     struct ixgbe_fwd_adapter *accel)
+{
+	struct ixgbe_adapter *adapter = accel->real_adapter;
+	unsigned int rxbase, txbase, queues;
+	int i, baseq, err = 0;
+
+	if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+		return 0;
+
+	baseq = accel->pool * adapter->num_rx_queues_per_pool;
+	netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+		   accel->pool, adapter->num_rx_pools,
+		   baseq, baseq + adapter->num_rx_queues_per_pool,
+		   adapter->fwd_bitmask);
+
+	accel->netdev = vdev;
+	accel->rx_base_queue = rxbase = baseq;
+	accel->tx_base_queue = txbase = baseq;
+
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+		ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+		adapter->rx_ring[rxbase + i]->netdev = vdev;
+		adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
+		ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
+	}
+
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+		adapter->tx_ring[txbase + i]->netdev = vdev;
+		adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+	}
+
+	queues = min_t(unsigned int,
+		       adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
+	err = netif_set_real_num_tx_queues(vdev, queues);
+	if (err)
+		goto fwd_queue_err;
+
+	err = netif_set_real_num_rx_queues(vdev, queues);
+	if (err)
+		goto fwd_queue_err;
+
+	if (is_valid_ether_addr(vdev->dev_addr))
+		ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+
+	ixgbe_fwd_psrtype(accel);
+	ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+	return err;
+fwd_queue_err:
+	ixgbe_fwd_ring_down(vdev, accel);
+	return err;
+}
+
+static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
+{
+	struct net_device *upper;
+	struct list_head *iter;
+	int err;
+
+	netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+		if (netif_is_macvlan(upper)) {
+			struct macvlan_dev *dfwd = netdev_priv(upper);
+			struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+			if (dfwd->fwd_priv) {
+				err = ixgbe_fwd_ring_up(upper, vadapter);
+				if (err)
+					continue;
+			}
+		}
+	}
+}
+
 static void ixgbe_configure(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -4169,6 +4401,7 @@
 #endif /* IXGBE_FCOE */
 	ixgbe_configure_tx(adapter);
 	ixgbe_configure_rx(adapter);
+	ixgbe_configure_dfwd(adapter);
 }
 
 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -4322,6 +4555,8 @@
 static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *upper;
+	struct list_head *iter;
 	int err;
 	u32 ctrl_ext;
 
@@ -4365,6 +4600,16 @@
 	/* enable transmits */
 	netif_tx_start_all_queues(adapter->netdev);
 
+	/* enable any upper devices */
+	netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+		if (netif_is_macvlan(upper)) {
+			struct macvlan_dev *vlan = netdev_priv(upper);
+
+			if (vlan->fwd_priv)
+				netif_tx_start_all_queues(upper);
+		}
+	}
+
 	/* bring the link up in the watchdog, this could race with our first
 	 * link up interrupt but shouldn't be a problem */
 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -4456,59 +4701,6 @@
 }
 
 /**
- * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
- **/
-static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
-{
-	struct device *dev = rx_ring->dev;
-	unsigned long size;
-	u16 i;
-
-	/* ring already cleared, nothing to do */
-	if (!rx_ring->rx_buffer_info)
-		return;
-
-	/* Free all the Rx ring sk_buffs */
-	for (i = 0; i < rx_ring->count; i++) {
-		struct ixgbe_rx_buffer *rx_buffer;
-
-		rx_buffer = &rx_ring->rx_buffer_info[i];
-		if (rx_buffer->skb) {
-			struct sk_buff *skb = rx_buffer->skb;
-			if (IXGBE_CB(skb)->page_released) {
-				dma_unmap_page(dev,
-					       IXGBE_CB(skb)->dma,
-					       ixgbe_rx_bufsz(rx_ring),
-					       DMA_FROM_DEVICE);
-				IXGBE_CB(skb)->page_released = false;
-			}
-			dev_kfree_skb(skb);
-		}
-		rx_buffer->skb = NULL;
-		if (rx_buffer->dma)
-			dma_unmap_page(dev, rx_buffer->dma,
-				       ixgbe_rx_pg_size(rx_ring),
-				       DMA_FROM_DEVICE);
-		rx_buffer->dma = 0;
-		if (rx_buffer->page)
-			__free_pages(rx_buffer->page,
-				     ixgbe_rx_pg_order(rx_ring));
-		rx_buffer->page = NULL;
-	}
-
-	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
-	memset(rx_ring->rx_buffer_info, 0, size);
-
-	/* Zero out the descriptor ring */
-	memset(rx_ring->desc, 0, rx_ring->size);
-
-	rx_ring->next_to_alloc = 0;
-	rx_ring->next_to_clean = 0;
-	rx_ring->next_to_use = 0;
-}
-
-/**
  * ixgbe_clean_tx_ring - Free Tx Buffers
  * @tx_ring: ring to be cleaned
  **/
@@ -4585,6 +4777,8 @@
 {
 	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *upper;
+	struct list_head *iter;
 	u32 rxctrl;
 	int i;
 
@@ -4608,6 +4802,19 @@
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 
+	/* disable any upper devices */
+	netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+		if (netif_is_macvlan(upper)) {
+			struct macvlan_dev *vlan = netdev_priv(upper);
+
+			if (vlan->fwd_priv) {
+				netif_tx_stop_all_queues(upper);
+				netif_carrier_off(upper);
+				netif_tx_disable(upper);
+			}
+		}
+	}
+
 	ixgbe_irq_disable(adapter);
 
 	ixgbe_napi_disable_all(adapter);
@@ -4816,11 +5023,20 @@
 	hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
 
 #ifdef CONFIG_PCI_IOV
-	/* assign number of SR-IOV VFs */
-	if (hw->mac.type != ixgbe_mac_82598EB)
-		adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
+	if (max_vfs > 0)
+		e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
 
-#endif
+	/* assign number of SR-IOV VFs */
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		if (max_vfs > 63) {
+			adapter->num_vfs = 0;
+			e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
+		} else {
+			adapter->num_vfs = max_vfs;
+		}
+	}
+#endif /* CONFIG_PCI_IOV */
+
 	/* enable itr by default in dynamic mode */
 	adapter->rx_itr_setting = 1;
 	adapter->tx_itr_setting = 1;
@@ -4838,6 +5054,8 @@
 		return -EIO;
 	}
 
+	/* PF holds first pool slot */
+	set_bit(0, &adapter->fwd_bitmask);
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
 	return 0;
@@ -4867,6 +5085,8 @@
 	if (!tx_ring->tx_buffer_info)
 		goto err;
 
+	u64_stats_init(&tx_ring->syncp);
+
 	/* round up to nearest 4K */
 	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
 	tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -4949,6 +5169,8 @@
 	if (!rx_ring->rx_buffer_info)
 		goto err;
 
+	u64_stats_init(&rx_ring->syncp);
+
 	/* Round up to nearest 4K */
 	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
 	rx_ring->size = ALIGN(rx_ring->size, 4096);
@@ -5143,7 +5365,7 @@
 static int ixgbe_open(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	int err;
+	int err, queues;
 
 	/* disallow open during test */
 	if (test_bit(__IXGBE_TESTING, &adapter->state))
@@ -5168,16 +5390,21 @@
 		goto err_req_irq;
 
 	/* Notify the stack of the actual queue counts. */
-	err = netif_set_real_num_tx_queues(netdev,
-					   adapter->num_rx_pools > 1 ? 1 :
-					   adapter->num_tx_queues);
+	if (adapter->num_rx_pools > 1)
+		queues = adapter->num_rx_queues_per_pool;
+	else
+		queues = adapter->num_tx_queues;
+
+	err = netif_set_real_num_tx_queues(netdev, queues);
 	if (err)
 		goto err_set_queues;
 
-
-	err = netif_set_real_num_rx_queues(netdev,
-					   adapter->num_rx_pools > 1 ? 1 :
-					   adapter->num_rx_queues);
+	if (adapter->num_rx_pools > 1 &&
+	    adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
+		queues = IXGBE_MAX_L2A_QUEUES;
+	else
+		queues = adapter->num_rx_queues;
+	err = netif_set_real_num_rx_queues(netdev, queues);
 	if (err)
 		goto err_set_queues;
 
@@ -6767,8 +6994,9 @@
 	return NETDEV_TX_OK;
 }
 
-static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
-				    struct net_device *netdev)
+static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
+				      struct net_device *netdev,
+				      struct ixgbe_ring *ring)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_ring *tx_ring;
@@ -6784,10 +7012,17 @@
 		skb_set_tail_pointer(skb, 17);
 	}
 
-	tx_ring = adapter->tx_ring[skb->queue_mapping];
+	tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+
 	return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
 
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
+				    struct net_device *netdev)
+{
+	return __ixgbe_xmit_frame(skb, netdev, NULL);
+}
+
 /**
  * ixgbe_set_mac - Change the Ethernet Address of the NIC
  * @netdev: network interface device structure
@@ -7044,6 +7279,7 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	struct ixgbe_hw *hw = &adapter->hw;
+	bool pools;
 
 	/* Hardware supports up to 8 traffic classes */
 	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
@@ -7051,6 +7287,10 @@
 	     tc < MAX_TRAFFIC_CLASS))
 		return -EINVAL;
 
+	pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
+	if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
+		return -EBUSY;
+
 	/* Hardware has to reinitialize queues and interrupts to
 	 * match packet buffer alignment. Unfortunately, the
 	 * hardware is not flexible enough to do this dynamically.
@@ -7305,6 +7545,104 @@
 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
 }
 
+static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
+{
+	struct ixgbe_fwd_adapter *fwd_adapter = NULL;
+	struct ixgbe_adapter *adapter = netdev_priv(pdev);
+	unsigned int limit;
+	int pool, err;
+
+#ifdef CONFIG_RPS
+	if (vdev->num_rx_queues != vdev->num_tx_queues) {
+		netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
+			    vdev->name);
+		return ERR_PTR(-EINVAL);
+	}
+#endif
+	/* Check for hardware restriction on number of rx/tx queues */
+	if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
+	    vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
+		netdev_info(pdev,
+			    "%s: Supports RX/TX Queue counts 1,2, and 4\n",
+			    pdev->name);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+	      adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+	    (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
+		return ERR_PTR(-EBUSY);
+
+	fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
+	if (!fwd_adapter)
+		return ERR_PTR(-ENOMEM);
+
+	pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
+	adapter->num_rx_pools++;
+	set_bit(pool, &adapter->fwd_bitmask);
+	limit = find_last_bit(&adapter->fwd_bitmask, 32);
+
+	/* Enable VMDq flag so device will be set in VM mode */
+	adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
+	adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+	adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
+
+	/* Force reinit of ring allocation with VMDQ enabled */
+	err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
+	if (err)
+		goto fwd_add_err;
+	fwd_adapter->pool = pool;
+	fwd_adapter->real_adapter = adapter;
+	err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+	if (err)
+		goto fwd_add_err;
+	netif_tx_start_all_queues(vdev);
+	return fwd_adapter;
+fwd_add_err:
+	/* unwind counter and free adapter struct */
+	netdev_info(pdev,
+		    "%s: dfwd hardware acceleration failed\n", vdev->name);
+	clear_bit(pool, &adapter->fwd_bitmask);
+	adapter->num_rx_pools--;
+	kfree(fwd_adapter);
+	return ERR_PTR(err);
+}
+
+static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
+{
+	struct ixgbe_fwd_adapter *fwd_adapter = priv;
+	struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
+	unsigned int limit;
+
+	clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
+	adapter->num_rx_pools--;
+
+	limit = find_last_bit(&adapter->fwd_bitmask, 32);
+	adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+	ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
+	ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
+	netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+		   fwd_adapter->pool, adapter->num_rx_pools,
+		   fwd_adapter->rx_base_queue,
+		   fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
+		   adapter->fwd_bitmask);
+	kfree(fwd_adapter);
+}
+
+static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
+				  struct net_device *dev,
+				  void *priv)
+{
+	struct ixgbe_fwd_adapter *fwd_adapter = priv;
+	unsigned int queue;
+	struct ixgbe_ring *tx_ring;
+
+	queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
+	tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
+
+	return __ixgbe_xmit_frame(skb, dev, tx_ring);
+}
+
 static const struct net_device_ops ixgbe_netdev_ops = {
 	.ndo_open		= ixgbe_open,
 	.ndo_stop		= ixgbe_close,
@@ -7349,6 +7687,9 @@
 	.ndo_fdb_add		= ixgbe_ndo_fdb_add,
 	.ndo_bridge_setlink	= ixgbe_ndo_bridge_setlink,
 	.ndo_bridge_getlink	= ixgbe_ndo_bridge_getlink,
+	.ndo_dfwd_add_station	= ixgbe_fwd_add,
+	.ndo_dfwd_del_station	= ixgbe_fwd_del,
+	.ndo_dfwd_start_xmit	= ixgbe_fwd_xmit,
 };
 
 /**
@@ -7362,19 +7703,16 @@
  **/
 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
 {
-	struct ixgbe_hw *hw = &adapter->hw;
 	struct list_head *entry;
 	int physfns = 0;
 
-	/* Some cards can not use the generic count PCIe functions method, and
-	 * so must be hardcoded to the correct value.
+	/* Some cards can not use the generic count PCIe functions method,
+	 * because they are behind a parent switch, so we hardcode these with
+	 * the correct number of functions.
 	 */
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82599_SFP_SF_QP:
-	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+	if (ixgbe_pcie_from_parent(&adapter->hw)) {
 		physfns = 4;
-		break;
-	default:
+	} else {
 		list_for_each(entry, &adapter->pdev->bus_list) {
 			struct pci_dev *pdev =
 				list_entry(entry, struct pci_dev, bus_list);
@@ -7490,19 +7828,14 @@
 	if (err)
 		return err;
 
-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
 		pci_using_dac = 1;
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err) {
-				dev_err(&pdev->dev,
-					"No usable DMA configuration, aborting\n");
-				goto err_dma;
-			}
+			dev_err(&pdev->dev,
+				"No usable DMA configuration, aborting\n");
+			goto err_dma;
 		}
 		pci_using_dac = 0;
 	}
@@ -7653,7 +7986,8 @@
 			   NETIF_F_TSO |
 			   NETIF_F_TSO6 |
 			   NETIF_F_RXHASH |
-			   NETIF_F_RXCSUM;
+			   NETIF_F_RXCSUM |
+			   NETIF_F_HW_L2FW_DOFFLOAD;
 
 	netdev->hw_features = netdev->features;
 
@@ -7759,29 +8093,6 @@
 	if (ixgbe_pcie_from_parent(hw))
 		ixgbe_get_parent_bus_info(adapter);
 
-	/* print bus type/speed/width info */
-	e_dev_info("(PCI Express:%s:%s) %pM\n",
-		   (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
-		    hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
-		    hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
-		    "Unknown"),
-		   (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
-		    hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
-		    hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
-		    "Unknown"),
-		   netdev->dev_addr);
-
-	err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
-	if (err)
-		strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
-	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
-			   hw->mac.type, hw->phy.type, hw->phy.sfp_type,
-		           part_str);
-	else
-		e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
-			   hw->mac.type, hw->phy.type, part_str);
-
 	/* calculate the expected PCIe bandwidth required for optimal
 	 * performance. Note that some older parts will never have enough
 	 * bandwidth due to being older generation PCIe parts. We clamp these
@@ -7797,6 +8108,19 @@
 	}
 	ixgbe_check_minimum_link(adapter, expected_gts);
 
+	err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+	if (err)
+		strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+			   hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+		           part_str);
+	else
+		e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+			   hw->mac.type, hw->phy.type, part_str);
+
+	e_dev_info("%pM\n", netdev->dev_addr);
+
 	/* reset the hardware with the new settings */
 	err = hw->mac.ops.start_hw(hw);
 	if (err == IXGBE_ERR_EEPROM_VERSION) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 24af12e..aae900a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -57,28 +57,28 @@
 #define IXGBE_SFF_QSFP_DEVICE_TECH	0x93
 
 /* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE            0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING    0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE           0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE           0x2
-#define IXGBE_SFF_1GBASET_CAPABLE            0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE          0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE          0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK	0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G	0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G	0x0
-#define IXGBE_SFF_ADDRESSING_MODE	     0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE       0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE      0x8
+#define IXGBE_SFF_DA_PASSIVE_CABLE		0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE		0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING	0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE		0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE		0x2
+#define IXGBE_SFF_1GBASET_CAPABLE		0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE		0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE		0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK		0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G		0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G		0x0
+#define IXGBE_SFF_ADDRESSING_MODE		0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE		0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE		0x8
 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE	0x23
 #define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL	0x0
-#define IXGBE_I2C_EEPROM_READ_MASK           0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS         0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
+#define IXGBE_I2C_EEPROM_READ_MASK		0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK		0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION	0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS		0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL		0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS	0x3
 
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 276d7b1..d6f0c0d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -129,10 +129,6 @@
 	if (!pre_existing_vfs && !adapter->num_vfs)
 		return;
 
-	if (!pre_existing_vfs)
-		dev_warn(&adapter->pdev->dev,
-			 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
-
 	/* If there are pre-existing VFs then we have to force
 	 * use of that many - over ride any module parameter value.
 	 * This may result from the user unloading the PF driver
@@ -223,17 +219,19 @@
 	IXGBE_WRITE_FLUSH(hw);
 
 	/* Disable VMDq flag so device will be set in VM mode */
-	if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+	if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
 		adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
-	adapter->ring_feature[RING_F_VMDQ].offset = 0;
+		adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+		rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+	} else {
+		rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
+	}
 
-	rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+	adapter->ring_feature[RING_F_VMDQ].offset = 0;
 	adapter->ring_feature[RING_F_RSS].limit = rss;
 
 	/* take a breather then clean up driver data */
 	msleep(100);
-
-	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 	return 0;
 }
 
@@ -298,13 +296,10 @@
 	err = ixgbe_disable_sriov(adapter);
 
 	/* Only reinit if no error and state changed */
-	if (!err && current_flags != adapter->flags) {
-		/* ixgbe_disable_sriov() doesn't clear VMDQ flag */
-		adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
 #ifdef CONFIG_PCI_IOV
+	if (!err && current_flags != adapter->flags)
 		ixgbe_sriov_reinit(adapter);
 #endif
-	}
 
 	return err;
 }
@@ -558,7 +553,7 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 
-	memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+	memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
 	hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
 
 	return 0;
@@ -621,16 +616,13 @@
 
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 {
-	unsigned char vf_mac_addr[6];
 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 	unsigned int vfn = (event_mask & 0x3f);
 
 	bool enable = ((event_mask & 0x10000000U) != 0);
 
-	if (enable) {
-		eth_zero_addr(vf_mac_addr);
-		memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-	}
+	if (enable)
+		eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 10775cb..7c19e96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -561,6 +561,10 @@
 #define IXGBE_RTTDQSEL    0x04904
 #define IXGBE_RTTDT1C     0x04908
 #define IXGBE_RTTDT1S     0x0490C
+#define IXGBE_RTTQCNCR    0x08B00
+#define IXGBE_RTTQCNTG    0x04A90
+#define IXGBE_RTTBCNRD    0x0498C
+#define IXGBE_RTTQCNRR    0x0498C
 #define IXGBE_RTTDTECC    0x04990
 #define IXGBE_RTTDTECC_NO_BCN   0x00000100
 #define IXGBE_RTTBCNRC    0x04984
@@ -570,6 +574,7 @@
 #define IXGBE_RTTBCNRC_RF_INT_MASK	\
 	(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
 #define IXGBE_RTTBCNRM    0x04980
+#define IXGBE_RTTQCNRM    0x04980
 
 /* FCoE DMA Context Registers */
 #define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 389324f..24b80a6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -32,12 +32,12 @@
 #include "ixgbe.h"
 #include "ixgbe_phy.h"
 
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES   128
-#define IXGBE_X540_MC_TBL_SIZE   128
-#define IXGBE_X540_VFT_TBL_SIZE  128
-#define IXGBE_X540_RX_PB_SIZE	 384
+#define IXGBE_X540_MAX_TX_QUEUES	128
+#define IXGBE_X540_MAX_RX_QUEUES	128
+#define IXGBE_X540_RAR_ENTRIES		128
+#define IXGBE_X540_MC_TBL_SIZE		128
+#define IXGBE_X540_VFT_TBL_SIZE		128
+#define IXGBE_X540_RX_PB_SIZE		384
 
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c9d0c12..54d9ace 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -45,16 +45,27 @@
 
 struct ixgbe_stats {
 	char stat_string[ETH_GSTRING_LEN];
-	int sizeof_stat;
-	int stat_offset;
-	int base_stat_offset;
-	int saved_reset_offset;
+	struct {
+		int sizeof_stat;
+		int stat_offset;
+		int base_stat_offset;
+		int saved_reset_offset;
+	};
 };
 
-#define IXGBEVF_STAT(m, b, r)  sizeof(((struct ixgbevf_adapter *)0)->m), \
-			    offsetof(struct ixgbevf_adapter, m),         \
-			    offsetof(struct ixgbevf_adapter, b),         \
-			    offsetof(struct ixgbevf_adapter, r)
+#define IXGBEVF_STAT(m, b, r) { \
+	.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+	.stat_offset = offsetof(struct ixgbevf_adapter, m), \
+	.base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
+	.saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+}
+
+#define IXGBEVF_ZSTAT(m) { \
+	.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+	.stat_offset = offsetof(struct ixgbevf_adapter, m), \
+	.base_stat_offset = -1, \
+	.saved_reset_offset = -1 \
+}
 
 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
@@ -65,15 +76,20 @@
 				  stats.saved_reset_vfgorc)},
 	{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
 				  stats.saved_reset_vfgotc)},
-	{"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
+	{"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
 	{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
 				   stats.saved_reset_vfmprc)},
-	{"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
-					      zero_base)},
-	{"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
-						zero_base)},
-	{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
-					      zero_base)},
+	{"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
+	{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
+	{"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
+#ifdef BP_EXTENDED_STATS
+	{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
+	{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
+	{"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
+	{"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
+	{"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
+	{"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
+#endif
 };
 
 #define IXGBE_QUEUE_STATS_LEN 0
@@ -140,58 +156,10 @@
 
 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
 
-static char *ixgbevf_reg_names[] = {
-	"IXGBE_VFCTRL",
-	"IXGBE_VFSTATUS",
-	"IXGBE_VFLINKS",
-	"IXGBE_VFRXMEMWRAP",
-	"IXGBE_VFFRTIMER",
-	"IXGBE_VTEICR",
-	"IXGBE_VTEICS",
-	"IXGBE_VTEIMS",
-	"IXGBE_VTEIMC",
-	"IXGBE_VTEIAC",
-	"IXGBE_VTEIAM",
-	"IXGBE_VTEITR",
-	"IXGBE_VTIVAR",
-	"IXGBE_VTIVAR_MISC",
-	"IXGBE_VFRDBAL0",
-	"IXGBE_VFRDBAL1",
-	"IXGBE_VFRDBAH0",
-	"IXGBE_VFRDBAH1",
-	"IXGBE_VFRDLEN0",
-	"IXGBE_VFRDLEN1",
-	"IXGBE_VFRDH0",
-	"IXGBE_VFRDH1",
-	"IXGBE_VFRDT0",
-	"IXGBE_VFRDT1",
-	"IXGBE_VFRXDCTL0",
-	"IXGBE_VFRXDCTL1",
-	"IXGBE_VFSRRCTL0",
-	"IXGBE_VFSRRCTL1",
-	"IXGBE_VFPSRTYPE",
-	"IXGBE_VFTDBAL0",
-	"IXGBE_VFTDBAL1",
-	"IXGBE_VFTDBAH0",
-	"IXGBE_VFTDBAH1",
-	"IXGBE_VFTDLEN0",
-	"IXGBE_VFTDLEN1",
-	"IXGBE_VFTDH0",
-	"IXGBE_VFTDH1",
-	"IXGBE_VFTDT0",
-	"IXGBE_VFTDT1",
-	"IXGBE_VFTXDCTL0",
-	"IXGBE_VFTXDCTL1",
-	"IXGBE_VFTDWBAL0",
-	"IXGBE_VFTDWBAL1",
-	"IXGBE_VFTDWBAH0",
-	"IXGBE_VFTDWBAH1"
-};
-
-
 static int ixgbevf_get_regs_len(struct net_device *netdev)
 {
-	return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+#define IXGBE_REGS_LEN 45
+	return IXGBE_REGS_LEN * sizeof(u32);
 }
 
 static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +232,6 @@
 		regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
 	for (i = 0; i < 2; i++)
 		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
-	for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
-		hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
 }
 
 static void ixgbevf_get_drvinfo(struct net_device *netdev,
@@ -441,22 +406,50 @@
 				      struct ethtool_stats *stats, u64 *data)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	char *base = (char *) adapter;
 	int i;
+#ifdef BP_EXTENDED_STATS
+	u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
+	    tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rx_yields += adapter->rx_ring[i].bp_yields;
+		rx_cleaned += adapter->rx_ring[i].bp_cleaned;
+		rx_yields += adapter->rx_ring[i].bp_yields;
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		tx_yields += adapter->tx_ring[i].bp_yields;
+		tx_cleaned += adapter->tx_ring[i].bp_cleaned;
+		tx_yields += adapter->tx_ring[i].bp_yields;
+	}
+
+	adapter->bp_rx_yields = rx_yields;
+	adapter->bp_rx_cleaned = rx_cleaned;
+	adapter->bp_rx_missed = rx_missed;
+
+	adapter->bp_tx_yields = tx_yields;
+	adapter->bp_tx_cleaned = tx_cleaned;
+	adapter->bp_tx_missed = tx_missed;
+#endif
 
 	ixgbevf_update_stats(adapter);
 	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
-		char *p = (char *)adapter +
-			ixgbe_gstrings_stats[i].stat_offset;
-		char *b = (char *)adapter +
-			ixgbe_gstrings_stats[i].base_stat_offset;
-		char *r = (char *)adapter +
-			ixgbe_gstrings_stats[i].saved_reset_offset;
-		data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
-			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
-			  ((ixgbe_gstrings_stats[i].sizeof_stat ==
-			    sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
-			  ((ixgbe_gstrings_stats[i].sizeof_stat ==
-			    sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
+		char *p = base + ixgbe_gstrings_stats[i].stat_offset;
+		char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
+		char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
+
+		if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
+			if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+				data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
+			else
+				data[i] = *(u64 *)p;
+		} else {
+			if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+				data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
+			else
+				data[i] = *(u32 *)p;
+		}
 	}
 }
 
@@ -685,6 +678,85 @@
 	return 0;
 }
 
+static int ixgbevf_get_coalesce(struct net_device *netdev,
+				struct ethtool_coalesce *ec)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	/* only valid if in constant ITR mode */
+	if (adapter->rx_itr_setting <= 1)
+		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+	else
+		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+	/* if in mixed tx/rx queues per vector mode, report only rx settings */
+	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+		return 0;
+
+	/* only valid if in constant ITR mode */
+	if (adapter->tx_itr_setting <= 1)
+		ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+	else
+		ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+
+	return 0;
+}
+
+static int ixgbevf_set_coalesce(struct net_device *netdev,
+				struct ethtool_coalesce *ec)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbevf_q_vector *q_vector;
+	int num_vectors, i;
+	u16 tx_itr_param, rx_itr_param;
+
+	/* don't accept tx specific changes if we've got mixed RxTx vectors */
+	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
+	    && ec->tx_coalesce_usecs)
+		return -EINVAL;
+
+
+	if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
+	    (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
+		return -EINVAL;
+
+	if (ec->rx_coalesce_usecs > 1)
+		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+	else
+		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+
+	if (adapter->rx_itr_setting == 1)
+		rx_itr_param = IXGBE_20K_ITR;
+	else
+		rx_itr_param = adapter->rx_itr_setting;
+
+
+	if (ec->tx_coalesce_usecs > 1)
+		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+	else
+		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+
+	if (adapter->tx_itr_setting == 1)
+		tx_itr_param = IXGBE_10K_ITR;
+	else
+		tx_itr_param = adapter->tx_itr_setting;
+
+	num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	for (i = 0; i < num_vectors; i++) {
+		q_vector = adapter->q_vector[i];
+		if (q_vector->tx.count && !q_vector->rx.count)
+			/* tx only */
+			q_vector->itr = tx_itr_param;
+		else
+			/* rx only or mixed */
+			q_vector->itr = rx_itr_param;
+		ixgbevf_write_eitr(q_vector);
+	}
+
+	return 0;
+}
+
 static const struct ethtool_ops ixgbevf_ethtool_ops = {
 	.get_settings           = ixgbevf_get_settings,
 	.get_drvinfo            = ixgbevf_get_drvinfo,
@@ -700,6 +772,8 @@
 	.get_sset_count         = ixgbevf_get_sset_count,
 	.get_strings            = ixgbevf_get_strings,
 	.get_ethtool_stats      = ixgbevf_get_ethtool_stats,
+	.get_coalesce           = ixgbevf_get_coalesce,
+	.set_coalesce           = ixgbevf_set_coalesce,
 };
 
 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fff0d98..8971e2d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -38,6 +38,11 @@
 
 #include "vf.h"
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#define BP_EXTENDED_STATS
+#endif
+
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
 struct ixgbevf_tx_buffer {
@@ -76,6 +81,11 @@
 	struct u64_stats_sync	syncp;
 	u64 hw_csum_rx_error;
 	u64 hw_csum_rx_good;
+#ifdef BP_EXTENDED_STATS
+	u64 bp_yields;
+	u64 bp_misses;
+	u64 bp_cleaned;
+#endif
 
 	u16 head;
 	u16 tail;
@@ -145,7 +155,118 @@
 	struct napi_struct napi;
 	struct ixgbevf_ring_container rx, tx;
 	char name[IFNAMSIZ + 9];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	unsigned int state;
+#define IXGBEVF_QV_STATE_IDLE		0
+#define IXGBEVF_QV_STATE_NAPI		1    /* NAPI owns this QV */
+#define IXGBEVF_QV_STATE_POLL		2    /* poll owns this QV */
+#define IXGBEVF_QV_STATE_DISABLED	4    /* QV is disabled */
+#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
+#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
+#define IXGBEVF_QV_STATE_NAPI_YIELD	8    /* NAPI yielded this QV */
+#define IXGBEVF_QV_STATE_POLL_YIELD	16   /* poll yielded this QV */
+#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
+	spinlock_t lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 };
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
+{
+
+	spin_lock_init(&q_vector->lock);
+	q_vector->state = IXGBEVF_QV_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a q_vector */
+static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
+{
+	int rc = true;
+	spin_lock_bh(&q_vector->lock);
+	if (q_vector->state & IXGBEVF_QV_LOCKED) {
+		WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
+		q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
+		rc = false;
+#ifdef BP_EXTENDED_STATS
+		q_vector->tx.ring->bp_yields++;
+#endif
+	} else {
+		/* we don't care if someone yielded */
+		q_vector->state = IXGBEVF_QV_STATE_NAPI;
+	}
+	spin_unlock_bh(&q_vector->lock);
+	return rc;
+}
+
+/* returns true is someone tried to get the qv while napi had it */
+static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
+{
+	int rc = false;
+	spin_lock_bh(&q_vector->lock);
+	WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
+				   IXGBEVF_QV_STATE_NAPI_YIELD));
+
+	if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+		rc = true;
+	/* reset state to idle, unless QV is disabled */
+	q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+	spin_unlock_bh(&q_vector->lock);
+	return rc;
+}
+
+/* called from ixgbevf_low_latency_poll() */
+static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
+{
+	int rc = true;
+	spin_lock_bh(&q_vector->lock);
+	if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
+		q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
+		rc = false;
+#ifdef BP_EXTENDED_STATS
+		q_vector->rx.ring->bp_yields++;
+#endif
+	} else {
+		/* preserve yield marks */
+		q_vector->state |= IXGBEVF_QV_STATE_POLL;
+	}
+	spin_unlock_bh(&q_vector->lock);
+	return rc;
+}
+
+/* returns true if someone tried to get the qv while it was locked */
+static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
+{
+	int rc = false;
+	spin_lock_bh(&q_vector->lock);
+	WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
+
+	if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+		rc = true;
+	/* reset state to idle, unless QV is disabled */
+	q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+	spin_unlock_bh(&q_vector->lock);
+	return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
+{
+	WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
+	return q_vector->state & IXGBEVF_QV_USER_PEND;
+}
+
+/* false if QV is currently owned */
+static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
+{
+	int rc = true;
+	spin_lock_bh(&q_vector->lock);
+	if (q_vector->state & IXGBEVF_QV_OWNED)
+		rc = false;
+	spin_unlock_bh(&q_vector->lock);
+	return rc;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 
 /*
  * microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -165,9 +286,13 @@
 	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
 #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
 
-#define IXGBE_DESC_UNUSED(R) \
-	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
-	(R)->next_to_clean - (R)->next_to_use - 1)
+static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
+{
+	u16 ntc = ring->next_to_clean;
+	u16 ntu = ring->next_to_use;
+
+	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
 
 #define IXGBEVF_RX_DESC(R, i)	    \
 	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
@@ -240,7 +365,6 @@
 	struct ixgbe_hw hw;
 	u16 msg_enable;
 	struct ixgbevf_hw_stats stats;
-	u64 zero_base;
 	/* Interrupt Throttle Rate */
 	u32 eitr_param;
 
@@ -249,6 +373,16 @@
 	unsigned int tx_ring_count;
 	unsigned int rx_ring_count;
 
+#ifdef BP_EXTENDED_STATS
+	u64 bp_rx_yields;
+	u64 bp_rx_cleaned;
+	u64 bp_rx_missed;
+
+	u64 bp_tx_yields;
+	u64 bp_tx_cleaned;
+	u64 bp_tx_missed;
+#endif
+
 	u32 link_speed;
 	bool link_up;
 
@@ -281,27 +415,25 @@
 extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
 
-extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
-				      struct ixgbevf_ring *);
-extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
-				      struct ixgbevf_ring *);
-extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
-				      struct ixgbevf_ring *);
-extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
-				      struct ixgbevf_ring *);
-extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-extern int ethtool_ioctl(struct ifreq *ifr);
+void ixgbevf_up(struct ixgbevf_adapter *adapter);
+void ixgbevf_down(struct ixgbevf_adapter *adapter);
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+int ethtool_ioctl(struct ifreq *ifr);
 
-extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
+
+void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
 
 #ifdef DEBUG
-extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
 	printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
 #else
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59a62bb..92ef4cb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@
 static const char ixgbevf_driver_string[] =
 	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "2.7.12-k"
+#define DRV_VERSION "2.11.3-k"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
 	"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -251,7 +251,7 @@
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
 	if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
-		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		     (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
 		/* Make sure that anybody stopping the queue after this
 		 * sees the new next_to_clean.
 		 */
@@ -300,6 +300,30 @@
 }
 
 /**
+ * ixgbevf_rx_skb - Helper function to determine proper Rx method
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
+			   struct sk_buff *skb, u8 status,
+			   union ixgbe_adv_rx_desc *rx_desc)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	skb_mark_napi_id(skb, &q_vector->napi);
+
+	if (ixgbevf_qv_busy_polling(q_vector)) {
+		netif_receive_skb(skb);
+		/* exit early if we busy polled */
+		return;
+	}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+	ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
+}
+
+/**
  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
  * @ring: pointer to Rx descriptor ring structure
  * @status_err: hardware indication of status of receive
@@ -396,9 +420,9 @@
 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
 }
 
-static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
-				 struct ixgbevf_ring *rx_ring,
-				 int budget)
+static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+				struct ixgbevf_ring *rx_ring,
+				int budget)
 {
 	struct ixgbevf_adapter *adapter = q_vector->adapter;
 	struct pci_dev *pdev = adapter->pdev;
@@ -473,15 +497,6 @@
 		total_rx_bytes += skb->len;
 		total_rx_packets++;
 
-		/*
-		 * Work around issue of some types of VM to VM loop back
-		 * packets not getting split correctly
-		 */
-		if (staterr & IXGBE_RXD_STAT_LB) {
-			u32 header_fixup_len = skb_headlen(skb);
-			if (header_fixup_len < 14)
-				skb_push(skb, header_fixup_len);
-		}
 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
 		/* Workaround hardware that can't do proper VEPA multicast
@@ -494,7 +509,7 @@
 			goto next_desc;
 		}
 
-		ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
+		ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
 
 next_desc:
 		rx_desc->wb.upper.status_error = 0;
@@ -514,7 +529,7 @@
 	}
 
 	rx_ring->next_to_clean = i;
-	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+	cleaned_count = ixgbevf_desc_unused(rx_ring);
 
 	if (cleaned_count)
 		ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
@@ -526,7 +541,7 @@
 	q_vector->rx.total_packets += total_rx_packets;
 	q_vector->rx.total_bytes += total_rx_bytes;
 
-	return !!budget;
+	return total_rx_packets;
 }
 
 /**
@@ -549,6 +564,11 @@
 	ixgbevf_for_each_ring(ring, q_vector->tx)
 		clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	if (!ixgbevf_qv_lock_napi(q_vector))
+		return budget;
+#endif
+
 	/* attempt to distribute budget to each queue fairly, but don't allow
 	 * the budget to go below 1 because we'll exit polling */
 	if (q_vector->rx.count > 1)
@@ -558,10 +578,15 @@
 
 	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
 	ixgbevf_for_each_ring(ring, q_vector->rx)
-		clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
-						       per_ring_budget);
+		clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
+							per_ring_budget)
+				   < per_ring_budget);
 	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	ixgbevf_qv_unlock_napi(q_vector);
+#endif
+
 	/* If all work not completed, return budget and keep polling */
 	if (!clean_complete)
 		return budget;
@@ -580,7 +605,7 @@
  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
  * @q_vector: structure containing interrupt and ring information
  */
-static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
+void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
 {
 	struct ixgbevf_adapter *adapter = q_vector->adapter;
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -596,6 +621,40 @@
 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
+{
+	struct ixgbevf_q_vector *q_vector =
+			container_of(napi, struct ixgbevf_q_vector, napi);
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	struct ixgbevf_ring  *ring;
+	int found = 0;
+
+	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+		return LL_FLUSH_FAILED;
+
+	if (!ixgbevf_qv_lock_poll(q_vector))
+		return LL_FLUSH_BUSY;
+
+	ixgbevf_for_each_ring(ring, q_vector->rx) {
+		found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
+#ifdef BP_EXTENDED_STATS
+		if (found)
+			ring->bp_cleaned += found;
+		else
+			ring->bp_misses++;
+#endif
+		if (found)
+			break;
+	}
+
+	ixgbevf_qv_unlock_poll(q_vector);
+
+	return found;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 /**
  * ixgbevf_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -756,37 +815,12 @@
 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
 {
 	struct ixgbevf_adapter *adapter = data;
-	struct pci_dev *pdev = adapter->pdev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 msg;
-	bool got_ack = false;
 
 	hw->mac.get_link_status = 1;
-	if (!hw->mbx.ops.check_for_ack(hw))
-		got_ack = true;
 
-	if (!hw->mbx.ops.check_for_msg(hw)) {
-		hw->mbx.ops.read(hw, &msg, 1);
-
-		if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
-			mod_timer(&adapter->watchdog_timer,
-				  round_jiffies(jiffies + 1));
-			adapter->link_up = false;
-		}
-
-		if (msg & IXGBE_VT_MSGTYPE_NACK)
-			dev_info(&pdev->dev,
-				 "Last Request of type %2.2x to PF Nacked\n",
-				 msg & 0xFF);
-		hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
-	}
-
-	/* checking for the ack clears the PFACK bit.  Place
-	 * it back in the v2p_mailbox cache so that anyone
-	 * polling for an ack will not miss it
-	 */
-	if (got_ack)
-		hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+		mod_timer(&adapter->watchdog_timer, jiffies);
 
 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
 
@@ -1107,6 +1141,21 @@
 	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
 }
 
+static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	/* PSRTYPE must be initialized in 82599 */
+	u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
+		      IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
+		      IXGBE_PSRTYPE_L2HDR;
+
+	if (adapter->num_rx_queues > 1)
+		psrtype |= 1 << 29;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+}
+
 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -1154,8 +1203,7 @@
 	int i, j;
 	u32 rdlen;
 
-	/* PSRTYPE must be initialized in 82599 */
-	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+	ixgbevf_setup_psrtype(adapter);
 
 	/* set_rx_buffer_len must be called before ring initialization */
 	ixgbevf_set_rx_buffer_len(adapter);
@@ -1293,6 +1341,9 @@
 
 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
 		q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+		ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
+#endif
 		napi_enable(&q_vector->napi);
 	}
 }
@@ -1306,6 +1357,12 @@
 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
 		q_vector = adapter->q_vector[q_idx];
 		napi_disable(&q_vector->napi);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+		while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
+			pr_info("QV %d locked\n", q_idx);
+			usleep_range(1000, 20000);
+		}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 	}
 }
 
@@ -1323,31 +1380,55 @@
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct ixgbevf_ring *ring = &adapter->rx_ring[i];
 		ixgbevf_alloc_rx_buffers(adapter, ring,
-					 IXGBE_DESC_UNUSED(ring));
+					 ixgbevf_desc_unused(ring));
 	}
 }
 
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
-						int rxr)
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+					 int rxr)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+	u32 rxdctl;
 	int j = adapter->rx_ring[rxr].reg_idx;
-	int k;
 
-	for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
-		if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
-			break;
-		else
-			msleep(1);
-	}
-	if (k >= IXGBE_MAX_RX_DESC_POLL) {
-		hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
-		       "not set within the polling period\n", rxr);
-	}
+	do {
+		usleep_range(1000, 2000);
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+	} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 
-	ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
-				adapter->rx_ring[rxr].count - 1);
+	if (!wait_loop)
+		hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
+		       rxr);
+
+	ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+				(adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+				     struct ixgbevf_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
+
+	rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+	/* write value back with RXDCTL.ENABLE bit cleared */
+	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+	/* the hardware may take up to 100us to really disable the rx queue */
+	do {
+		udelay(10);
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+	if (!wait_loop)
+		hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
+		       reg_idx);
 }
 
 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1545,8 +1626,6 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	ixgbevf_negotiate_api(adapter);
-
 	ixgbevf_reset_queues(adapter);
 
 	ixgbevf_configure(adapter);
@@ -1679,7 +1758,10 @@
 
 	/* signal that we are down to the interrupt handler */
 	set_bit(__IXGBEVF_DOWN, &adapter->state);
-	/* disable receives */
+
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
 
 	netif_tx_disable(netdev);
 
@@ -1733,10 +1815,12 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
 
-	if (hw->mac.ops.reset_hw(hw))
+	if (hw->mac.ops.reset_hw(hw)) {
 		hw_dbg(hw, "PF still resetting\n");
-	else
+	} else {
 		hw->mac.ops.init_hw(hw);
+		ixgbevf_negotiate_api(adapter);
+	}
 
 	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
 		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1929,6 +2013,9 @@
 		q_vector->v_idx = q_idx;
 		netif_napi_add(adapter->netdev, &q_vector->napi,
 			       ixgbevf_poll, 64);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+		napi_hash_add(&q_vector->napi);
+#endif
 		adapter->q_vector[q_idx] = q_vector;
 	}
 
@@ -1938,6 +2025,9 @@
 	while (q_idx) {
 		q_idx--;
 		q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+		napi_hash_del(&q_vector->napi);
+#endif
 		netif_napi_del(&q_vector->napi);
 		kfree(q_vector);
 		adapter->q_vector[q_idx] = NULL;
@@ -1961,6 +2051,9 @@
 		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
 
 		adapter->q_vector[q_idx] = NULL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+		napi_hash_del(&q_vector->napi);
+#endif
 		netif_napi_del(&q_vector->napi);
 		kfree(q_vector);
 	}
@@ -2072,6 +2165,9 @@
 	hw->mac.max_tx_queues = 2;
 	hw->mac.max_rx_queues = 2;
 
+	/* lock to protect mailbox accesses */
+	spin_lock_init(&adapter->mbx_lock);
+
 	err = hw->mac.ops.reset_hw(hw);
 	if (err) {
 		dev_info(&pdev->dev,
@@ -2082,6 +2178,7 @@
 			pr_err("init_shared_code failed: %d\n", err);
 			goto out;
 		}
+		ixgbevf_negotiate_api(adapter);
 		err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
 		if (err)
 			dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2194,6 @@
 		memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
 	}
 
-	/* lock to protect mailbox accesses */
-	spin_lock_init(&adapter->mbx_lock);
-
 	/* Enable dynamic interrupt throttling rates */
 	adapter->rx_itr_setting = 1;
 	adapter->tx_itr_setting = 1;
@@ -2620,8 +2714,6 @@
 		}
 	}
 
-	ixgbevf_negotiate_api(adapter);
-
 	/* setup queue reg_idx and Rx queue count */
 	err = ixgbevf_setup_queues(adapter);
 	if (err)
@@ -3010,7 +3102,7 @@
 
 	/* We need to check again in a case another CPU has just
 	 * made room available. */
-	if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+	if (likely(ixgbevf_desc_unused(tx_ring) < size))
 		return -EBUSY;
 
 	/* A reprieve! - use start_queue because it doesn't call schedule */
@@ -3021,7 +3113,7 @@
 
 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
 {
-	if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+	if (likely(ixgbevf_desc_unused(tx_ring) >= size))
 		return 0;
 	return __ixgbevf_maybe_stop_tx(tx_ring, size);
 }
@@ -3216,6 +3308,8 @@
 	}
 	pci_set_master(pdev);
 
+	ixgbevf_reset(adapter);
+
 	rtnl_lock();
 	err = ixgbevf_init_interrupt_scheme(adapter);
 	rtnl_unlock();
@@ -3224,8 +3318,6 @@
 		return err;
 	}
 
-	ixgbevf_reset(adapter);
-
 	if (netif_running(netdev)) {
 		err = ixgbevf_open(netdev);
 		if (err)
@@ -3293,6 +3385,9 @@
 	.ndo_tx_timeout		= ixgbevf_tx_timeout,
 	.ndo_vlan_rx_add_vid	= ixgbevf_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= ixgbevf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= ixgbevf_busy_poll_recv,
+#endif
 };
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3326,19 +3421,14 @@
 	if (err)
 		return err;
 
-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
 		pci_using_dac = 1;
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err) {
-				dev_err(&pdev->dev, "No usable DMA "
-					"configuration, aborting\n");
-				goto err_dma;
-			}
+			dev_err(&pdev->dev, "No usable DMA "
+				"configuration, aborting\n");
+			goto err_dma;
 		}
 		pci_using_dac = 0;
 	}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 387b526..4d44d64 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -242,7 +242,7 @@
 	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
 	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
 	if (addr)
-		memcpy(msg_addr, addr, 6);
+		memcpy(msg_addr, addr, ETH_ALEN);
 	ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
 	if (!ret_val)
@@ -275,7 +275,7 @@
 
 	memset(msgbuf, 0, sizeof(msgbuf));
 	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
-	memcpy(msg_addr, addr, 6);
+	memcpy(msg_addr, addr, ETH_ALEN);
 	ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
 	if (!ret_val)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 23de82a..f5685c0 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -309,7 +309,7 @@
 jme_load_macaddr(struct net_device *netdev)
 {
 	struct jme_adapter *jme = netdev_priv(netdev);
-	unsigned char macaddr[6];
+	unsigned char macaddr[ETH_ALEN];
 	u32 val;
 
 	spin_lock_bh(&jme->macaddr_lock);
@@ -321,7 +321,7 @@
 	val = jread32(jme, JME_RXUMA_HI);
 	macaddr[4] = (val >>  0) & 0xFF;
 	macaddr[5] = (val >>  8) & 0xFF;
-	memcpy(netdev->dev_addr, macaddr, 6);
+	memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
 	spin_unlock_bh(&jme->macaddr_lock);
 }
 
@@ -3192,7 +3192,6 @@
 err_out_unmap:
 	iounmap(jme->regs);
 err_out_free_netdev:
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(netdev);
 err_out_release_regions:
 	pci_release_regions(pdev);
@@ -3210,7 +3209,6 @@
 
 	unregister_netdev(netdev);
 	iounmap(jme->regs);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(netdev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 3efc897..58cd67c 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -28,7 +28,6 @@
 
 #define DRV_NAME	"jme"
 #define DRV_VERSION	"1.0.8"
-#define PFX		DRV_NAME ": "
 
 #define PCI_DEVICE_ID_JMICRON_JMC250	0x0250
 #define PCI_DEVICE_ID_JMICRON_JMC260	0x0260
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index a36fa80..4a5e3b0 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1110,7 +1110,7 @@
 	lp = netdev_priv(dev);
 
 	bif->dev = dev;
-	memcpy(dev->dev_addr, bif->mac, 6);
+	memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
 
 	lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
 	lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2c210ec..00cd36e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2513,7 +2513,7 @@
 
 	mac_addr = of_get_mac_address(pnp);
 	if (mac_addr)
-		memcpy(ppd.mac_addr, mac_addr, 6);
+		memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
 
 	mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
 	mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2696,7 +2696,7 @@
 	struct net_device *dev = mp->dev;
 
 	if (is_valid_ether_addr(pd->mac_addr))
-		memcpy(dev->dev_addr, pd->mac_addr, 6);
+		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
 	else
 		uc_addr_get(mp, dev->dev_addr);
 
@@ -2890,6 +2890,7 @@
 					 PHY_INTERFACE_MODE_GMII);
 		if (!mp->phy)
 			err = -ENODEV;
+		phy_addr_set(mp, mp->phy->addr);
 	} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
 		mp->phy = phy_scan(mp, pd->phy_addr);
 
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index e2f6626..7354960 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -4,11 +4,9 @@
  * Since the MDIO interface of Marvell network interfaces is shared
  * between all network interfaces, having a single driver allows to
  * handle concurrent accesses properly (you may have four Ethernet
- * ports, but they in fact share the same SMI interface to access the
- * MDIO bus). Moreover, this MDIO interface code is similar between
- * the mv643xx_eth driver and the mvneta driver. For now, it is only
- * used by the mvneta driver, but it could later be used by the
- * mv643xx_eth driver as well.
+ * ports, but they in fact share the same SMI interface to access
+ * the MDIO bus). This driver is currently used by the mvneta and
+ * mv643xx_eth drivers.
  *
  * Copyright (C) 2012 Marvell
  *
@@ -44,6 +42,15 @@
 #define  MVMDIO_ERR_INT_SMI_DONE	   0x00000010
 #define MVMDIO_ERR_INT_MASK		   0x0080
 
+/*
+ * SMI Timeout measurements:
+ * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
+ * - Armada 370       (Globalscale Mirabox):   41us to 43us (Polled)
+ */
+#define MVMDIO_SMI_TIMEOUT		   1000 /* 1000us = 1ms */
+#define MVMDIO_SMI_POLL_INTERVAL_MIN	   45
+#define MVMDIO_SMI_POLL_INTERVAL_MAX	   55
+
 struct orion_mdio_dev {
 	struct mutex lock;
 	void __iomem *regs;
@@ -68,77 +75,68 @@
 static int orion_mdio_wait_ready(struct mii_bus *bus)
 {
 	struct orion_mdio_dev *dev = bus->priv;
-	int count;
+	unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
+	unsigned long end = jiffies + timeout;
+	int timedout = 0;
 
-	if (dev->err_interrupt <= 0) {
-		count = 0;
-		while (1) {
-			if (orion_mdio_smi_is_done(dev))
-				break;
+	while (1) {
+	        if (orion_mdio_smi_is_done(dev))
+			return 0;
+	        else if (timedout)
+			break;
 
-			if (count > 100) {
-				dev_err(bus->parent,
-					"Timeout: SMI busy for too long\n");
-				return -ETIMEDOUT;
-			}
+	        if (dev->err_interrupt <= 0) {
+			usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN,
+				     MVMDIO_SMI_POLL_INTERVAL_MAX);
 
-			udelay(10);
-			count++;
-		}
-	} else {
-		if (!orion_mdio_smi_is_done(dev)) {
+			if (time_is_before_jiffies(end))
+				++timedout;
+	        } else {
 			wait_event_timeout(dev->smi_busy_wait,
-				orion_mdio_smi_is_done(dev),
-				msecs_to_jiffies(100));
-			if (!orion_mdio_smi_is_done(dev))
-				return -ETIMEDOUT;
-		}
+				           orion_mdio_smi_is_done(dev),
+				           timeout);
+
+			++timedout;
+	        }
 	}
 
-	return 0;
+	dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+	return  -ETIMEDOUT;
 }
 
 static int orion_mdio_read(struct mii_bus *bus, int mii_id,
 			   int regnum)
 {
 	struct orion_mdio_dev *dev = bus->priv;
-	int count;
 	u32 val;
 	int ret;
 
 	mutex_lock(&dev->lock);
 
 	ret = orion_mdio_wait_ready(bus);
-	if (ret < 0) {
-		mutex_unlock(&dev->lock);
-		return ret;
-	}
+	if (ret < 0)
+		goto out;
 
 	writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
 		(regnum << MVMDIO_SMI_PHY_REG_SHIFT)  |
 		MVMDIO_SMI_READ_OPERATION),
 	       dev->regs);
 
-	/* Wait for the value to become available */
-	count = 0;
-	while (1) {
-		val = readl(dev->regs);
-		if (val & MVMDIO_SMI_READ_VALID)
-			break;
+	ret = orion_mdio_wait_ready(bus);
+	if (ret < 0)
+		goto out;
 
-		if (count > 100) {
-			dev_err(bus->parent, "Timeout when reading PHY\n");
-			mutex_unlock(&dev->lock);
-			return -ETIMEDOUT;
-		}
-
-		udelay(10);
-		count++;
+	val = readl(dev->regs);
+	if (!(val & MVMDIO_SMI_READ_VALID)) {
+		dev_err(bus->parent, "SMI bus read not valid\n");
+		ret = -ENODEV;
+		goto out;
 	}
 
+	ret = val & 0xFFFF;
+out:
 	mutex_unlock(&dev->lock);
-
-	return val & 0xFFFF;
+	return ret;
 }
 
 static int orion_mdio_write(struct mii_bus *bus, int mii_id,
@@ -150,10 +148,8 @@
 	mutex_lock(&dev->lock);
 
 	ret = orion_mdio_wait_ready(bus);
-	if (ret < 0) {
-		mutex_unlock(&dev->lock);
-		return ret;
-	}
+	if (ret < 0)
+		goto out;
 
 	writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
 		(regnum << MVMDIO_SMI_PHY_REG_SHIFT)  |
@@ -161,9 +157,9 @@
 		(value << MVMDIO_SMI_DATA_SHIFT)),
 	       dev->regs);
 
+out:
 	mutex_unlock(&dev->lock);
-
-	return 0;
+	return ret;
 }
 
 static int orion_mdio_reset(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e35bac7..b8e232b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2792,6 +2792,9 @@
 
 	pp = netdev_priv(dev);
 
+	u64_stats_init(&pp->tx_stats.syncp);
+	u64_stats_init(&pp->rx_stats.syncp);
+
 	pp->weight = MVNETA_RX_POLL_WEIGHT;
 	pp->phy_node = phy_node;
 	pp->phy_interface = phy_mode;
@@ -2811,7 +2814,7 @@
 	}
 
 	dt_mac_addr = of_get_mac_address(dn);
-	if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+	if (dt_mac_addr) {
 		mac_from = "device tree";
 		memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
 	} else {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ecc7f7b..5978461 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4046,7 +4046,6 @@
 	pci_release_regions(pdev);
 err_out_disable_pdev:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 err_out:
 	return err;
 }
@@ -4090,7 +4089,6 @@
 
 	iounmap(hw->regs);
 	kfree(hw);
-	pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e09a8c6..43aa7acd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4763,6 +4763,9 @@
 	sky2->hw = hw;
 	sky2->msg_enable = netif_msg_init(debug, default_msg);
 
+	u64_stats_init(&sky2->tx_stats.syncp);
+	u64_stats_init(&sky2->rx_stats.syncp);
+
 	/* Auto speed and flow control */
 	sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
 	if (hw->chip_id != CHIP_ID_YUKON_XL)
@@ -5081,7 +5084,6 @@
 err_out_disable:
 	pci_disable_device(pdev);
 err_out:
-	pci_set_drvdata(pdev, NULL);
 	return err;
 }
 
@@ -5124,8 +5126,6 @@
 
 	iounmap(hw->regs);
 	kfree(hw);
-
-	pci_set_drvdata(pdev, NULL);
 }
 
 static int sky2_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index bb11624..1e9970d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1539,11 +1539,6 @@
 	return ret;
 }
 
-static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
-{
-	return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
-}
-
 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
 					    int slave, int port)
 {
@@ -1553,7 +1548,6 @@
 	struct mlx4_dev *dev = &(priv->dev);
 	int err;
 	int admin_vlan_ix = NO_INDX;
-	enum mlx4_vlan_transition vlan_trans;
 
 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
@@ -1563,12 +1557,8 @@
 	    vp_oper->state.link_state == vp_admin->link_state)
 		return 0;
 
-	vlan_trans = calculate_transition(vp_oper->state.default_vlan,
-					  vp_admin->default_vlan);
-
 	if (!(priv->mfunc.master.slave_state[slave].active &&
-	      dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP &&
-	      vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) {
+	      dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
 		/* even if the UPDATE_QP command isn't supported, we still want
 		 * to set this VF link according to the admin directive
 		 */
@@ -1586,15 +1576,19 @@
 		return -ENOMEM;
 
 	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
-		err = __mlx4_register_vlan(&priv->dev, port,
-					   vp_admin->default_vlan,
-					   &admin_vlan_ix);
-		if (err) {
-			kfree(work);
-			mlx4_warn((&priv->dev),
-				  "No vlan resources slave %d, port %d\n",
-				  slave, port);
-			return err;
+		if (MLX4_VGT != vp_admin->default_vlan) {
+			err = __mlx4_register_vlan(&priv->dev, port,
+						   vp_admin->default_vlan,
+						   &admin_vlan_ix);
+			if (err) {
+				kfree(work);
+				mlx4_warn((&priv->dev),
+					  "No vlan resources slave %d, port %d\n",
+					  slave, port);
+				return err;
+			}
+		} else {
+			admin_vlan_ix = NO_INDX;
 		}
 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
 		mlx4_dbg((&(priv->dev)),
@@ -1687,7 +1681,7 @@
 		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 		if (NO_INDX != vp_oper->vlan_idx) {
 			__mlx4_unregister_vlan(&priv->dev,
-					       port, vp_oper->vlan_idx);
+					       port, vp_oper->state.default_vlan);
 			vp_oper->vlan_idx = NO_INDX;
 		}
 		if (NO_INDX != vp_oper->mac_idx) {
@@ -1718,6 +1712,7 @@
 	if (cmd == MLX4_COMM_CMD_RESET) {
 		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
 		slave_state[slave].active = false;
+		slave_state[slave].old_vlan_api = false;
 		mlx4_master_deactivate_admin_state(priv, slave);
 		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
 				slave_state[slave].event_eq[i].eqn = -1;
@@ -2198,6 +2193,8 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
+	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
+
 	return mailbox;
 }
 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
@@ -2253,7 +2250,6 @@
 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	struct mlx4_vport_oper_state *vf_oper;
 	struct mlx4_vport_state *vf_admin;
 	int slave;
 
@@ -2269,7 +2265,6 @@
 		return -EINVAL;
 
 	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
-	vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 
 	if ((0 == vlan) && (0 == qos))
 		vf_admin->default_vlan = MLX4_VGT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 004e423..22fcbe7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -128,8 +128,6 @@
 		return PTR_ERR(mailbox);
 
 	cq_context = mailbox->buf;
-	memset(cq_context, 0, sizeof *cq_context);
-
 	cq_context->cq_max_count = cpu_to_be16(count);
 	cq_context->cq_period    = cpu_to_be16(period);
 
@@ -153,8 +151,6 @@
 		return PTR_ERR(mailbox);
 
 	cq_context = mailbox->buf;
-	memset(cq_context, 0, sizeof *cq_context);
-
 	cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
 	cq_context->log_page_size   = mtt->page_shift - 12;
 	mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -274,8 +270,6 @@
 	}
 
 	cq_context = mailbox->buf;
-	memset(cq_context, 0, sizeof *cq_context);
-
 	cq_context->flags	    = cpu_to_be32(!!collapsed << 18);
 	if (timestamp_en)
 		cq_context->flags  |= cpu_to_be32(1 << 19);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3e2d504..3a098cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -44,12 +44,23 @@
 
 
 int mlx4_en_create_cq(struct mlx4_en_priv *priv,
-		      struct mlx4_en_cq *cq,
-		      int entries, int ring, enum cq_type mode)
+		      struct mlx4_en_cq **pcq,
+		      int entries, int ring, enum cq_type mode,
+		      int node)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_cq *cq;
 	int err;
 
+	cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
+	if (!cq) {
+		cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+		if (!cq) {
+			en_err(priv, "Failed to allocate CQ structure\n");
+			return -ENOMEM;
+		}
+	}
+
 	cq->size = entries;
 	cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
 
@@ -57,17 +68,30 @@
 	cq->is_tx = mode;
 	spin_lock_init(&cq->lock);
 
+	/* Allocate HW buffers on provided NUMA node.
+	 * dev->numa_node is used in mtt range allocation flow.
+	 */
+	set_dev_node(&mdev->dev->pdev->dev, node);
 	err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
 				cq->buf_size, 2 * PAGE_SIZE);
+	set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
 	if (err)
-		return err;
+		goto err_cq;
 
 	err = mlx4_en_map_buffer(&cq->wqres.buf);
 	if (err)
-		mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
-	else
-		cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+		goto err_res;
 
+	cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
+	*pcq = cq;
+
+	return 0;
+
+err_res:
+	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+err_cq:
+	kfree(cq);
+	*pcq = NULL;
 	return err;
 }
 
@@ -117,12 +141,12 @@
 		struct mlx4_en_cq *rx_cq;
 
 		cq_idx = cq_idx % priv->rx_ring_num;
-		rx_cq = &priv->rx_cq[cq_idx];
+		rx_cq = priv->rx_cq[cq_idx];
 		cq->vector = rx_cq->vector;
 	}
 
 	if (!cq->is_tx)
-		cq->size = priv->rx_ring[cq->ring].actual_size;
+		cq->size = priv->rx_ring[cq->ring]->actual_size;
 
 	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
 	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
@@ -146,9 +170,10 @@
 	return 0;
 }
 
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_cq *cq = *pcq;
 
 	mlx4_en_unmap_buffer(&cq->wqres.buf);
 	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
@@ -157,6 +182,8 @@
 	cq->vector = 0;
 	cq->buf_size = 0;
 	cq->buf = NULL;
+	kfree(cq);
+	*pcq = NULL;
 }
 
 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0c75098..0596f9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -51,10 +51,10 @@
 	int err = 0;
 
 	for (i = 0; i < priv->tx_ring_num; i++) {
-		priv->tx_cq[i].moder_cnt = priv->tx_frames;
-		priv->tx_cq[i].moder_time = priv->tx_usecs;
+		priv->tx_cq[i]->moder_cnt = priv->tx_frames;
+		priv->tx_cq[i]->moder_time = priv->tx_usecs;
 		if (priv->port_up) {
-			err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+			err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
 			if (err)
 				return err;
 		}
@@ -64,11 +64,11 @@
 		return 0;
 
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		priv->rx_cq[i].moder_cnt = priv->rx_frames;
-		priv->rx_cq[i].moder_time = priv->rx_usecs;
+		priv->rx_cq[i]->moder_cnt = priv->rx_frames;
+		priv->rx_cq[i]->moder_time = priv->rx_usecs;
 		priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
 		if (priv->port_up) {
-			err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+			err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
 			if (err)
 				return err;
 		}
@@ -274,16 +274,16 @@
 		}
 	}
 	for (i = 0; i < priv->tx_ring_num; i++) {
-		data[index++] = priv->tx_ring[i].packets;
-		data[index++] = priv->tx_ring[i].bytes;
+		data[index++] = priv->tx_ring[i]->packets;
+		data[index++] = priv->tx_ring[i]->bytes;
 	}
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		data[index++] = priv->rx_ring[i].packets;
-		data[index++] = priv->rx_ring[i].bytes;
+		data[index++] = priv->rx_ring[i]->packets;
+		data[index++] = priv->rx_ring[i]->bytes;
 #ifdef CONFIG_NET_RX_BUSY_POLL
-		data[index++] = priv->rx_ring[i].yields;
-		data[index++] = priv->rx_ring[i].misses;
-		data[index++] = priv->rx_ring[i].cleaned;
+		data[index++] = priv->rx_ring[i]->yields;
+		data[index++] = priv->rx_ring[i]->misses;
+		data[index++] = priv->rx_ring[i]->cleaned;
 #endif
 	}
 	spin_unlock_bh(&priv->stats_lock);
@@ -510,9 +510,9 @@
 	tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
 	tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
 
-	if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
-					priv->rx_ring[0].size) &&
-	    tx_size == priv->tx_ring[0].size)
+	if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
+					priv->rx_ring[0]->size) &&
+	    tx_size == priv->tx_ring[0]->size)
 		return 0;
 
 	mutex_lock(&mdev->state_lock);
@@ -553,8 +553,8 @@
 	param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
 	param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
 	param->rx_pending = priv->port_up ?
-		priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
-	param->tx_pending = priv->tx_ring[0].size;
+		priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
+	param->tx_pending = priv->tx_ring[0]->size;
 }
 
 static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a071cda..0d087b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -264,6 +264,10 @@
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
 		mdev->port_cnt++;
 
+	/* Initialize time stamp mechanism */
+	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+		mlx4_en_init_timestamp(mdev);
+
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
 		if (!dev->caps.comp_pool) {
 			mdev->profile.prof[i].rx_ring_num =
@@ -301,10 +305,6 @@
 			mdev->pndev[i] = NULL;
 	}
 
-	/* Initialize time stamp mechanism */
-	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
-		mlx4_en_init_timestamp(mdev);
-
 	return mdev;
 
 err_mr:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fa37b7a..e72d8a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -75,7 +75,7 @@
 	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
 	struct net_device *dev = cq->dev;
 	struct mlx4_en_priv *priv = netdev_priv(dev);
-	struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+	struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
 	int done;
 
 	if (!priv->port_up)
@@ -102,6 +102,7 @@
 	struct list_head next;
 	struct work_struct work;
 
+	u8     ip_proto;
 	__be32 src_ip;
 	__be32 dst_ip;
 	__be16 src_port;
@@ -120,14 +121,26 @@
 
 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
 
+static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
+{
+	switch (ip_proto) {
+	case IPPROTO_UDP:
+		return MLX4_NET_TRANS_RULE_ID_UDP;
+	case IPPROTO_TCP:
+		return MLX4_NET_TRANS_RULE_ID_TCP;
+	default:
+		return -EPROTONOSUPPORT;
+	}
+};
+
 static void mlx4_en_filter_work(struct work_struct *work)
 {
 	struct mlx4_en_filter *filter = container_of(work,
 						     struct mlx4_en_filter,
 						     work);
 	struct mlx4_en_priv *priv = filter->priv;
-	struct mlx4_spec_list spec_tcp = {
-		.id = MLX4_NET_TRANS_RULE_ID_TCP,
+	struct mlx4_spec_list spec_tcp_udp = {
+		.id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
 		{
 			.tcp_udp = {
 				.dst_port = filter->dst_port,
@@ -163,9 +176,14 @@
 	int rc;
 	__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
 
+	if (spec_tcp_udp.id < 0) {
+		en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
+			filter->ip_proto);
+		goto ignore;
+	}
 	list_add_tail(&spec_eth.list, &rule.list);
 	list_add_tail(&spec_ip.list, &rule.list);
-	list_add_tail(&spec_tcp.list, &rule.list);
+	list_add_tail(&spec_tcp_udp.list, &rule.list);
 
 	rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
 	memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
@@ -183,6 +201,7 @@
 	if (rc)
 		en_err(priv, "Error attaching flow. err = %d\n", rc);
 
+ignore:
 	mlx4_en_filter_rfs_expire(priv);
 
 	filter->activated = 1;
@@ -206,8 +225,8 @@
 
 static struct mlx4_en_filter *
 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
-		     __be32 dst_ip, __be16 src_port, __be16 dst_port,
-		     u32 flow_id)
+		     __be32 dst_ip, u8 ip_proto, __be16 src_port,
+		     __be16 dst_port, u32 flow_id)
 {
 	struct mlx4_en_filter *filter = NULL;
 
@@ -221,6 +240,7 @@
 
 	filter->src_ip = src_ip;
 	filter->dst_ip = dst_ip;
+	filter->ip_proto = ip_proto;
 	filter->src_port = src_port;
 	filter->dst_port = dst_port;
 
@@ -252,7 +272,7 @@
 
 static inline struct mlx4_en_filter *
 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
-		    __be16 src_port, __be16 dst_port)
+		    u8 ip_proto, __be16 src_port, __be16 dst_port)
 {
 	struct mlx4_en_filter *filter;
 	struct mlx4_en_filter *ret = NULL;
@@ -263,6 +283,7 @@
 			     filter_chain) {
 		if (filter->src_ip == src_ip &&
 		    filter->dst_ip == dst_ip &&
+		    filter->ip_proto == ip_proto &&
 		    filter->src_port == src_port &&
 		    filter->dst_port == dst_port) {
 			ret = filter;
@@ -281,6 +302,7 @@
 	struct mlx4_en_filter *filter;
 	const struct iphdr *ip;
 	const __be16 *ports;
+	u8 ip_proto;
 	__be32 src_ip;
 	__be32 dst_ip;
 	__be16 src_port;
@@ -295,18 +317,19 @@
 	if (ip_is_fragment(ip))
 		return -EPROTONOSUPPORT;
 
+	if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
+		return -EPROTONOSUPPORT;
 	ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
 
+	ip_proto = ip->protocol;
 	src_ip = ip->saddr;
 	dst_ip = ip->daddr;
 	src_port = ports[0];
 	dst_port = ports[1];
 
-	if (ip->protocol != IPPROTO_TCP)
-		return -EPROTONOSUPPORT;
-
 	spin_lock_bh(&priv->filters_lock);
-	filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
+	filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
+				     src_port, dst_port);
 	if (filter) {
 		if (filter->rxq_index == rxq_index)
 			goto out;
@@ -314,7 +337,7 @@
 		filter->rxq_index = rxq_index;
 	} else {
 		filter = mlx4_en_filter_alloc(priv, rxq_index,
-					      src_ip, dst_ip,
+					      src_ip, dst_ip, ip_proto,
 					      src_port, dst_port, flow_id);
 		if (!filter) {
 			ret = -ENOMEM;
@@ -332,8 +355,7 @@
 	return ret;
 }
 
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
-			     struct mlx4_en_rx_ring *rx_ring)
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
 {
 	struct mlx4_en_filter *filter, *tmp;
 	LIST_HEAD(del_list);
@@ -417,7 +439,6 @@
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err;
-	int idx;
 
 	en_dbg(HW, priv, "Killing VID:%d\n", vid);
 
@@ -425,10 +446,7 @@
 
 	/* Remove VID from port VLAN filter */
 	mutex_lock(&mdev->state_lock);
-	if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
-		mlx4_unregister_vlan(mdev->dev, priv->port, idx);
-	else
-		en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
+	mlx4_unregister_vlan(mdev->dev, priv->port, vid);
 
 	if (mdev->device_up && priv->port_up) {
 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
@@ -1223,7 +1241,7 @@
 	int i;
 
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		cq = &priv->rx_cq[i];
+		cq = priv->rx_cq[i];
 		spin_lock_irqsave(&cq->lock, flags);
 		napi_synchronize(&cq->napi);
 		mlx4_en_process_rx_cq(dev, cq, 0);
@@ -1245,8 +1263,8 @@
 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
 			continue;
 		en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
-			i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
-			priv->tx_ring[i].cons, priv->tx_ring[i].prod);
+			i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
+			priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
 	}
 
 	priv->port_stats.tx_timeout++;
@@ -1286,7 +1304,7 @@
 
 	/* Setup cq moderation params */
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		cq = &priv->rx_cq[i];
+		cq = priv->rx_cq[i];
 		cq->moder_cnt = priv->rx_frames;
 		cq->moder_time = priv->rx_usecs;
 		priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
@@ -1295,7 +1313,7 @@
 	}
 
 	for (i = 0; i < priv->tx_ring_num; i++) {
-		cq = &priv->tx_cq[i];
+		cq = priv->tx_cq[i];
 		cq->moder_cnt = priv->tx_frames;
 		cq->moder_time = priv->tx_usecs;
 	}
@@ -1329,8 +1347,8 @@
 
 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
 		spin_lock_bh(&priv->stats_lock);
-		rx_packets = priv->rx_ring[ring].packets;
-		rx_bytes = priv->rx_ring[ring].bytes;
+		rx_packets = priv->rx_ring[ring]->packets;
+		rx_bytes = priv->rx_ring[ring]->bytes;
 		spin_unlock_bh(&priv->stats_lock);
 
 		rx_pkt_diff = ((unsigned long) (rx_packets -
@@ -1359,7 +1377,7 @@
 
 		if (moder_time != priv->last_moder_time[ring]) {
 			priv->last_moder_time[ring] = moder_time;
-			cq = &priv->rx_cq[ring];
+			cq = priv->rx_cq[ring];
 			cq->moder_time = moder_time;
 			cq->moder_cnt = priv->rx_frames;
 			err = mlx4_en_set_cq_moder(priv, cq);
@@ -1482,7 +1500,7 @@
 		return err;
 	}
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		cq = &priv->rx_cq[i];
+		cq = priv->rx_cq[i];
 
 		mlx4_en_cq_init_lock(cq);
 
@@ -1500,7 +1518,7 @@
 			goto cq_err;
 		}
 		mlx4_en_arm_cq(priv, cq);
-		priv->rx_ring[i].cqn = cq->mcq.cqn;
+		priv->rx_ring[i]->cqn = cq->mcq.cqn;
 		++rx_index;
 	}
 
@@ -1526,7 +1544,7 @@
 	/* Configure tx cq's and rings */
 	for (i = 0; i < priv->tx_ring_num; i++) {
 		/* Configure cq */
-		cq = &priv->tx_cq[i];
+		cq = priv->tx_cq[i];
 		err = mlx4_en_activate_cq(priv, cq, i);
 		if (err) {
 			en_err(priv, "Failed allocating Tx CQ\n");
@@ -1542,7 +1560,7 @@
 		cq->buf->wqe_index = cpu_to_be16(0xffff);
 
 		/* Configure ring */
-		tx_ring = &priv->tx_ring[i];
+		tx_ring = priv->tx_ring[i];
 		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
 			i / priv->num_tx_rings_p_up);
 		if (err) {
@@ -1612,8 +1630,8 @@
 
 tx_err:
 	while (tx_index--) {
-		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
-		mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+		mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
+		mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
 	}
 	mlx4_en_destroy_drop_qp(priv);
 rss_err:
@@ -1622,9 +1640,9 @@
 	mlx4_en_put_qp(priv);
 cq_err:
 	while (rx_index--)
-		mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+		mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
 	for (i = 0; i < priv->rx_ring_num; i++)
-		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+		mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
 
 	return err; /* need to close devices */
 }
@@ -1720,25 +1738,25 @@
 
 	/* Free TX Rings */
 	for (i = 0; i < priv->tx_ring_num; i++) {
-		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
-		mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+		mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
+		mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
 	}
 	msleep(10);
 
 	for (i = 0; i < priv->tx_ring_num; i++)
-		mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+		mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
 
 	/* Free RSS qps */
 	mlx4_en_release_rss_steer(priv);
 
 	/* Unregister Mac address for the port */
 	mlx4_en_put_qp(priv);
-	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
 		mdev->mac_removed[priv->port] = 1;
 
 	/* Free RX Rings */
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		struct mlx4_en_cq *cq = &priv->rx_cq[i];
+		struct mlx4_en_cq *cq = priv->rx_cq[i];
 
 		local_bh_disable();
 		while (!mlx4_en_cq_lock_napi(cq)) {
@@ -1749,7 +1767,7 @@
 
 		while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
 			msleep(1);
-		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+		mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
 		mlx4_en_deactivate_cq(priv, cq);
 	}
 }
@@ -1787,15 +1805,15 @@
 	memset(&priv->port_stats, 0, sizeof(priv->port_stats));
 
 	for (i = 0; i < priv->tx_ring_num; i++) {
-		priv->tx_ring[i].bytes = 0;
-		priv->tx_ring[i].packets = 0;
-		priv->tx_ring[i].tx_csum = 0;
+		priv->tx_ring[i]->bytes = 0;
+		priv->tx_ring[i]->packets = 0;
+		priv->tx_ring[i]->tx_csum = 0;
 	}
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		priv->rx_ring[i].bytes = 0;
-		priv->rx_ring[i].packets = 0;
-		priv->rx_ring[i].csum_ok = 0;
-		priv->rx_ring[i].csum_none = 0;
+		priv->rx_ring[i]->bytes = 0;
+		priv->rx_ring[i]->packets = 0;
+		priv->rx_ring[i]->csum_ok = 0;
+		priv->rx_ring[i]->csum_none = 0;
 	}
 }
 
@@ -1852,17 +1870,17 @@
 #endif
 
 	for (i = 0; i < priv->tx_ring_num; i++) {
-		if (priv->tx_ring[i].tx_info)
+		if (priv->tx_ring && priv->tx_ring[i])
 			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
-		if (priv->tx_cq[i].buf)
+		if (priv->tx_cq && priv->tx_cq[i])
 			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
 	}
 
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		if (priv->rx_ring[i].rx_info)
+		if (priv->rx_ring[i])
 			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
 				priv->prof->rx_ring_size, priv->stride);
-		if (priv->rx_cq[i].buf)
+		if (priv->rx_cq[i])
 			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
 	}
 
@@ -1877,6 +1895,7 @@
 	struct mlx4_en_port_profile *prof = priv->prof;
 	int i;
 	int err;
+	int node;
 
 	err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
 	if (err) {
@@ -1886,23 +1905,26 @@
 
 	/* Create tx Rings */
 	for (i = 0; i < priv->tx_ring_num; i++) {
+		node = cpu_to_node(i % num_online_cpus());
 		if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
-				      prof->tx_ring_size, i, TX))
+				      prof->tx_ring_size, i, TX, node))
 			goto err;
 
 		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
-					   prof->tx_ring_size, TXBB_SIZE))
+					   prof->tx_ring_size, TXBB_SIZE, node))
 			goto err;
 	}
 
 	/* Create rx Rings */
 	for (i = 0; i < priv->rx_ring_num; i++) {
+		node = cpu_to_node(i % num_online_cpus());
 		if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
-				      prof->rx_ring_size, i, RX))
+				      prof->rx_ring_size, i, RX, node))
 			goto err;
 
 		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
-					   prof->rx_ring_size, priv->stride))
+					   prof->rx_ring_size, priv->stride,
+					   node))
 			goto err;
 	}
 
@@ -1918,6 +1940,20 @@
 
 err:
 	en_err(priv, "Failed to allocate NIC resources\n");
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		if (priv->rx_ring[i])
+			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
+						prof->rx_ring_size,
+						priv->stride);
+		if (priv->rx_cq[i])
+			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+	}
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		if (priv->tx_ring[i])
+			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+		if (priv->tx_cq[i])
+			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+	}
 	return -ENOMEM;
 }
 
@@ -2211,13 +2247,13 @@
 	priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
 	priv->tx_ring_num = prof->tx_ring_num;
 
-	priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
+	priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
 				GFP_KERNEL);
 	if (!priv->tx_ring) {
 		err = -ENOMEM;
 		goto out;
 	}
-	priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
+	priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
 			      GFP_KERNEL);
 	if (!priv->tx_cq) {
 		err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3317914..dae1a1f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -56,7 +56,6 @@
 		return PTR_ERR(mailbox);
 
 	filter = mailbox->buf;
-	memset(filter, 0, sizeof(*filter));
 	for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
 		entry = 0;
 		for (j = 0; j < 32; j++)
@@ -81,7 +80,6 @@
 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
-	memset(mailbox->buf, 0, sizeof(*qport_context));
 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
 			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
 			   MLX4_CMD_WRAPPED);
@@ -127,7 +125,6 @@
 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
-	memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
 			   MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
 			   MLX4_CMD_WRAPPED);
@@ -143,18 +140,18 @@
 	priv->port_stats.rx_chksum_good = 0;
 	priv->port_stats.rx_chksum_none = 0;
 	for (i = 0; i < priv->rx_ring_num; i++) {
-		stats->rx_packets += priv->rx_ring[i].packets;
-		stats->rx_bytes += priv->rx_ring[i].bytes;
-		priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok;
-		priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none;
+		stats->rx_packets += priv->rx_ring[i]->packets;
+		stats->rx_bytes += priv->rx_ring[i]->bytes;
+		priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
+		priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
 	}
 	stats->tx_packets = 0;
 	stats->tx_bytes = 0;
 	priv->port_stats.tx_chksum_offload = 0;
 	for (i = 0; i < priv->tx_ring_num; i++) {
-		stats->tx_packets += priv->tx_ring[i].packets;
-		stats->tx_bytes += priv->tx_ring[i].bytes;
-		priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum;
+		stats->tx_packets += priv->tx_ring[i]->packets;
+		stats->tx_bytes += priv->tx_ring[i]->bytes;
+		priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
 	}
 
 	stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index afe2efa..07a1d0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -264,7 +264,7 @@
 
 	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
 		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
-			ring = &priv->rx_ring[ring_ind];
+			ring = priv->rx_ring[ring_ind];
 
 			if (mlx4_en_prepare_rx_desc(priv, ring,
 						    ring->actual_size,
@@ -289,7 +289,7 @@
 
 reduce_rings:
 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
-		ring = &priv->rx_ring[ring_ind];
+		ring = priv->rx_ring[ring_ind];
 		while (ring->actual_size > new_size) {
 			ring->actual_size--;
 			ring->prod--;
@@ -319,12 +319,23 @@
 }
 
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
-			   struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+			   struct mlx4_en_rx_ring **pring,
+			   u32 size, u16 stride, int node)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_rx_ring *ring;
 	int err = -ENOMEM;
 	int tmp;
 
+	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
+	if (!ring) {
+		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+		if (!ring) {
+			en_err(priv, "Failed to allocate RX ring structure\n");
+			return -ENOMEM;
+		}
+	}
+
 	ring->prod = 0;
 	ring->cons = 0;
 	ring->size = size;
@@ -335,17 +346,25 @@
 
 	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
 					sizeof(struct mlx4_en_rx_alloc));
-	ring->rx_info = vmalloc(tmp);
-	if (!ring->rx_info)
-		return -ENOMEM;
+	ring->rx_info = vmalloc_node(tmp, node);
+	if (!ring->rx_info) {
+		ring->rx_info = vmalloc(tmp);
+		if (!ring->rx_info) {
+			err = -ENOMEM;
+			goto err_ring;
+		}
+	}
 
 	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
 		 ring->rx_info, tmp);
 
+	/* Allocate HW buffers on provided NUMA node */
+	set_dev_node(&mdev->dev->pdev->dev, node);
 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
 				 ring->buf_size, 2 * PAGE_SIZE);
+	set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
 	if (err)
-		goto err_ring;
+		goto err_info;
 
 	err = mlx4_en_map_buffer(&ring->wqres.buf);
 	if (err) {
@@ -356,13 +375,18 @@
 
 	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
 
+	*pring = ring;
 	return 0;
 
 err_hwq:
 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-err_ring:
+err_info:
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
+err_ring:
+	kfree(ring);
+	*pring = NULL;
+
 	return err;
 }
 
@@ -376,12 +400,12 @@
 					DS_SIZE * priv->num_frags);
 
 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
-		ring = &priv->rx_ring[ring_ind];
+		ring = priv->rx_ring[ring_ind];
 
 		ring->prod = 0;
 		ring->cons = 0;
 		ring->actual_size = 0;
-		ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
 
 		ring->stride = stride;
 		if (ring->stride <= TXBB_SIZE)
@@ -412,7 +436,7 @@
 		goto err_buffers;
 
 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
-		ring = &priv->rx_ring[ring_ind];
+		ring = priv->rx_ring[ring_ind];
 
 		ring->size_mask = ring->actual_size - 1;
 		mlx4_en_update_rx_prod_db(ring);
@@ -422,30 +446,34 @@
 
 err_buffers:
 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
-		mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
 
 	ring_ind = priv->rx_ring_num - 1;
 err_allocator:
 	while (ring_ind >= 0) {
-		if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
-			priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
-		mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+		if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
+			priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
+		mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
 		ring_ind--;
 	}
 	return err;
 }
 
 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
-			     struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+			     struct mlx4_en_rx_ring **pring,
+			     u32 size, u16 stride)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_rx_ring *ring = *pring;
 
 	mlx4_en_unmap_buffer(&ring->wqres.buf);
 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
+	kfree(ring);
+	*pring = NULL;
 #ifdef CONFIG_RFS_ACCEL
-	mlx4_en_cleanup_filters(priv, ring);
+	mlx4_en_cleanup_filters(priv);
 #endif
 }
 
@@ -592,7 +620,7 @@
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_cqe *cqe;
-	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+	struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
 	struct mlx4_en_rx_alloc *frags;
 	struct mlx4_en_rx_desc *rx_desc;
 	struct sk_buff *skb;
@@ -991,7 +1019,7 @@
 
 	for (i = 0; i < priv->rx_ring_num; i++) {
 		qpn = rss_map->base_qpn + i;
-		err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
+		err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
 					    &rss_map->state[i],
 					    &rss_map->qps[i]);
 		if (err)
@@ -1008,7 +1036,7 @@
 	}
 	rss_map->indir_qp.event = mlx4_en_sqp_event;
 	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
-				priv->rx_ring[0].cqn, -1, &context);
+				priv->rx_ring[0]->cqn, -1, &context);
 
 	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
 		rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2448f0d..4062669 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -156,7 +156,7 @@
 		 * since we turned the carrier off */
 		msleep(200);
 		for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
-			tx_ring = &priv->tx_ring[i];
+			tx_ring = priv->tx_ring[i];
 			if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
 				goto retry_tx;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0698c82..f54ebd5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -54,13 +54,23 @@
 MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
 
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
-			   struct mlx4_en_tx_ring *ring, int qpn, u32 size,
-			   u16 stride)
+			   struct mlx4_en_tx_ring **pring, int qpn, u32 size,
+			   u16 stride, int node)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_tx_ring *ring;
 	int tmp;
 	int err;
 
+	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
+	if (!ring) {
+		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+		if (!ring) {
+			en_err(priv, "Failed allocating TX ring\n");
+			return -ENOMEM;
+		}
+	}
+
 	ring->size = size;
 	ring->size_mask = size - 1;
 	ring->stride = stride;
@@ -68,22 +78,33 @@
 	inline_thold = min(inline_thold, MAX_INLINE);
 
 	tmp = size * sizeof(struct mlx4_en_tx_info);
-	ring->tx_info = vmalloc(tmp);
-	if (!ring->tx_info)
-		return -ENOMEM;
+	ring->tx_info = vmalloc_node(tmp, node);
+	if (!ring->tx_info) {
+		ring->tx_info = vmalloc(tmp);
+		if (!ring->tx_info) {
+			err = -ENOMEM;
+			goto err_ring;
+		}
+	}
 
 	en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
 		 ring->tx_info, tmp);
 
-	ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+	ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
 	if (!ring->bounce_buf) {
-		err = -ENOMEM;
-		goto err_tx;
+		ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+		if (!ring->bounce_buf) {
+			err = -ENOMEM;
+			goto err_info;
+		}
 	}
 	ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
 
+	/* Allocate HW buffers on provided NUMA node */
+	set_dev_node(&mdev->dev->pdev->dev, node);
 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
 				 2 * PAGE_SIZE);
+	set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
 	if (err) {
 		en_err(priv, "Failed allocating hwq resources\n");
 		goto err_bounce;
@@ -109,7 +130,7 @@
 	}
 	ring->qp.event = mlx4_en_sqp_event;
 
-	err = mlx4_bf_alloc(mdev->dev, &ring->bf);
+	err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
 	if (err) {
 		en_dbg(DRV, priv, "working without blueflame (%d)", err);
 		ring->bf.uar = &mdev->priv_uar;
@@ -120,6 +141,7 @@
 
 	ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
 
+	*pring = ring;
 	return 0;
 
 err_map:
@@ -129,16 +151,20 @@
 err_bounce:
 	kfree(ring->bounce_buf);
 	ring->bounce_buf = NULL;
-err_tx:
+err_info:
 	vfree(ring->tx_info);
 	ring->tx_info = NULL;
+err_ring:
+	kfree(ring);
+	*pring = NULL;
 	return err;
 }
 
 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
-			     struct mlx4_en_tx_ring *ring)
+			     struct mlx4_en_tx_ring **pring)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_tx_ring *ring = *pring;
 	en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
 
 	if (ring->bf_enabled)
@@ -151,6 +177,8 @@
 	ring->bounce_buf = NULL;
 	vfree(ring->tx_info);
 	ring->tx_info = NULL;
+	kfree(ring);
+	*pring = NULL;
 }
 
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -330,7 +358,7 @@
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_cq *mcq = &cq->mcq;
-	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
 	struct mlx4_cqe *cqe;
 	u16 index;
 	u16 new_index, ring_index, stamp_index;
@@ -622,7 +650,7 @@
 	}
 
 	tx_ind = skb->queue_mapping;
-	ring = &priv->tx_ring[tx_ind];
+	ring = priv->tx_ring[tx_ind];
 	if (vlan_tx_tag_present(skb))
 		vlan_tag = vlan_tx_tag_get(skb);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 0416c5b..c9cdb2a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -936,7 +936,6 @@
 	if (err)
 		goto err_out_free_mtt;
 
-	memset(eq_context, 0, sizeof *eq_context);
 	eq_context->flags	  = cpu_to_be32(MLX4_EQ_STATUS_OK   |
 						MLX4_EQ_STATE_ARMED);
 	eq_context->log_eq_size	  = ilog2(eq->nent);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 0d63daa..1949282 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,6 @@
 		return PTR_ERR(mailbox);
 	inbox = mailbox->buf;
 
-	memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
-
 	MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
 	MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
 
@@ -177,6 +175,7 @@
 				struct mlx4_cmd_mailbox *outbox,
 				struct mlx4_cmd_info *cmd)
 {
+	struct mlx4_priv *priv = mlx4_priv(dev);
 	u8	field;
 	u32	size;
 	int	err = 0;
@@ -185,18 +184,26 @@
 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET		0x1
 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET		0x4
 #define QUERY_FUNC_CAP_FMR_OFFSET		0x8
-#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET		0x10
-#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET		0x14
-#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET		0x18
-#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET		0x20
-#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET		0x24
-#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET		0x28
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP	0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP	0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP	0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP	0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP	0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP	0x28
 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET		0x2c
 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET	0x30
 
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET		0x50
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET		0x54
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET		0x58
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET		0x60
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET		0x64
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET		0x68
+
 #define QUERY_FUNC_CAP_FMR_FLAG			0x80
 #define QUERY_FUNC_CAP_FLAG_RDMA		0x40
 #define QUERY_FUNC_CAP_FLAG_ETH			0x80
+#define QUERY_FUNC_CAP_FLAG_QUOTAS		0x10
 
 /* when opcode modifier = 1 */
 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET		0x3
@@ -237,8 +244,9 @@
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
 
 	} else if (vhcr->op_modifier == 0) {
-		/* enable rdma and ethernet interfaces */
-		field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
+		/* enable rdma and ethernet interfaces, and new quota locations */
+		field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
+			 QUERY_FUNC_CAP_FLAG_QUOTAS);
 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
 
 		field = dev->caps.num_ports;
@@ -250,14 +258,20 @@
 		field = 0; /* protected FMR support not available as yet */
 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
 
-		size = dev->caps.num_qps;
+		size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+		size = dev->caps.num_qps;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
 
-		size = dev->caps.num_srqs;
+		size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+		size = dev->caps.num_srqs;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
 
-		size = dev->caps.num_cqs;
+		size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+		size = dev->caps.num_cqs;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
 
 		size = dev->caps.num_eqs;
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
@@ -265,14 +279,19 @@
 		size = dev->caps.reserved_eqs;
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
 
-		size = dev->caps.num_mpts;
+		size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+		size = dev->caps.num_mpts;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
 
-		size = dev->caps.num_mtts;
+		size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+		size = dev->caps.num_mtts;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
 
 		size = dev->caps.num_mgms + dev->caps.num_amgms;
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
 
 	} else
 		err = -EINVAL;
@@ -287,7 +306,7 @@
 	u32			*outbox;
 	u8			field, op_modifier;
 	u32			size;
-	int			err = 0;
+	int			err = 0, quotas = 0;
 
 	op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
 
@@ -311,6 +330,7 @@
 			goto out;
 		}
 		func_cap->flags = field;
+		quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
 
 		MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
 		func_cap->num_ports = field;
@@ -318,29 +338,50 @@
 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
 		func_cap->pf_context_behaviour = size;
 
-		MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
-		func_cap->qp_quota = size & 0xFFFFFF;
+		if (quotas) {
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+			func_cap->qp_quota = size & 0xFFFFFF;
 
-		MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
-		func_cap->srq_quota = size & 0xFFFFFF;
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+			func_cap->srq_quota = size & 0xFFFFFF;
 
-		MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
-		func_cap->cq_quota = size & 0xFFFFFF;
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+			func_cap->cq_quota = size & 0xFFFFFF;
 
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+			func_cap->mpt_quota = size & 0xFFFFFF;
+
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+			func_cap->mtt_quota = size & 0xFFFFFF;
+
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+			func_cap->mcg_quota = size & 0xFFFFFF;
+
+		} else {
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
+			func_cap->qp_quota = size & 0xFFFFFF;
+
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
+			func_cap->srq_quota = size & 0xFFFFFF;
+
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
+			func_cap->cq_quota = size & 0xFFFFFF;
+
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
+			func_cap->mpt_quota = size & 0xFFFFFF;
+
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
+			func_cap->mtt_quota = size & 0xFFFFFF;
+
+			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
+			func_cap->mcg_quota = size & 0xFFFFFF;
+		}
 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
 		func_cap->max_eq = size & 0xFFFFFF;
 
 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
 		func_cap->reserved_eq = size & 0xFFFFFF;
 
-		MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
-		func_cap->mpt_quota = size & 0xFFFFFF;
-
-		MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
-		func_cap->mtt_quota = size & 0xFFFFFF;
-
-		MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
-		func_cap->mcg_quota = size & 0xFFFFFF;
 		goto out;
 	}
 
@@ -652,7 +693,7 @@
 		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
 	if (field & 1<<6)
-		dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
 	MLX4_GET(dev_cap->max_icm_sz, outbox,
 		 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
 	if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -924,7 +965,6 @@
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
-	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
 	pages = mailbox->buf;
 
 	for (mlx4_icm_first(icm, &iter);
@@ -1273,8 +1313,6 @@
 		return PTR_ERR(mailbox);
 	inbox = mailbox->buf;
 
-	memset(inbox, 0, INIT_HCA_IN_SIZE);
-
 	*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
 
 	*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
@@ -1573,8 +1611,6 @@
 			return PTR_ERR(mailbox);
 		inbox = mailbox->buf;
 
-		memset(inbox, 0, INIT_PORT_IN_SIZE);
-
 		flags = 0;
 		flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
 		flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
@@ -1713,7 +1749,6 @@
 	u32 *outbox;
 	u32 modifier;
 	u16 token;
-	u16 type_m;
 	u16 type;
 	int err;
 	u32 num_qps;
@@ -1739,14 +1774,13 @@
 				   MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
 				   MLX4_CMD_NATIVE);
 		if (err) {
-			mlx4_err(dev, "Failed to retreive required operation: %d\n",
+			mlx4_err(dev, "Failed to retrieve required operation: %d\n",
 				 err);
 			return;
 		}
 		MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
 		MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
 		MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
-		type_m = type >> 12;
 		type &= 0xfff;
 
 		switch (type) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 31d0264..5fbf492 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -93,13 +93,17 @@
 	kfree(icm);
 }
 
-static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
+				gfp_t gfp_mask, int node)
 {
 	struct page *page;
 
-	page = alloc_pages(gfp_mask, order);
-	if (!page)
-		return -ENOMEM;
+	page = alloc_pages_node(node, gfp_mask, order);
+	if (!page) {
+		page = alloc_pages(gfp_mask, order);
+		if (!page)
+			return -ENOMEM;
+	}
 
 	sg_set_page(mem, page, PAGE_SIZE << order, 0);
 	return 0;
@@ -130,9 +134,15 @@
 	/* We use sg_set_buf for coherent allocs, which assumes low memory */
 	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
 
-	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
-	if (!icm)
-		return NULL;
+	icm = kmalloc_node(sizeof(*icm),
+			   gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
+			   dev->numa_node);
+	if (!icm) {
+		icm = kmalloc(sizeof(*icm),
+			      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+		if (!icm)
+			return NULL;
+	}
 
 	icm->refcount = 0;
 	INIT_LIST_HEAD(&icm->chunk_list);
@@ -141,10 +151,17 @@
 
 	while (npages > 0) {
 		if (!chunk) {
-			chunk = kmalloc(sizeof *chunk,
-					gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
-			if (!chunk)
-				goto fail;
+			chunk = kmalloc_node(sizeof(*chunk),
+					     gfp_mask & ~(__GFP_HIGHMEM |
+							  __GFP_NOWARN),
+					     dev->numa_node);
+			if (!chunk) {
+				chunk = kmalloc(sizeof(*chunk),
+						gfp_mask & ~(__GFP_HIGHMEM |
+							     __GFP_NOWARN));
+				if (!chunk)
+					goto fail;
+			}
 
 			sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
 			chunk->npages = 0;
@@ -161,7 +178,8 @@
 						      cur_order, gfp_mask);
 		else
 			ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
-						   cur_order, gfp_mask);
+						   cur_order, gfp_mask,
+						   dev->numa_node);
 
 		if (ret) {
 			if (--cur_order < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 60c9f4f..5789ea2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
 #include <linux/io-mapping.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
+#include <linux/kmod.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -561,13 +562,17 @@
 	}
 
 	dev->caps.num_ports		= func_cap.num_ports;
-	dev->caps.num_qps		= func_cap.qp_quota;
-	dev->caps.num_srqs		= func_cap.srq_quota;
-	dev->caps.num_cqs		= func_cap.cq_quota;
-	dev->caps.num_eqs               = func_cap.max_eq;
-	dev->caps.reserved_eqs          = func_cap.reserved_eq;
-	dev->caps.num_mpts		= func_cap.mpt_quota;
-	dev->caps.num_mtts		= func_cap.mtt_quota;
+	dev->quotas.qp			= func_cap.qp_quota;
+	dev->quotas.srq			= func_cap.srq_quota;
+	dev->quotas.cq			= func_cap.cq_quota;
+	dev->quotas.mpt			= func_cap.mpt_quota;
+	dev->quotas.mtt			= func_cap.mtt_quota;
+	dev->caps.num_qps		= 1 << hca_param.log_num_qps;
+	dev->caps.num_srqs		= 1 << hca_param.log_num_srqs;
+	dev->caps.num_cqs		= 1 << hca_param.log_num_cqs;
+	dev->caps.num_mpts		= 1 << hca_param.log_mpt_sz;
+	dev->caps.num_eqs		= func_cap.max_eq;
+	dev->caps.reserved_eqs		= func_cap.reserved_eq;
 	dev->caps.num_pds               = MLX4_NUM_PDS;
 	dev->caps.num_mgms              = 0;
 	dev->caps.num_amgms             = 0;
@@ -650,6 +655,27 @@
 	return err;
 }
 
+static void mlx4_request_modules(struct mlx4_dev *dev)
+{
+	int port;
+	int has_ib_port = false;
+	int has_eth_port = false;
+#define EN_DRV_NAME	"mlx4_en"
+#define IB_DRV_NAME	"mlx4_ib"
+
+	for (port = 1; port <= dev->caps.num_ports; port++) {
+		if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+			has_ib_port = true;
+		else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+			has_eth_port = true;
+	}
+
+	if (has_ib_port)
+		request_module_nowait(IB_DRV_NAME);
+	if (has_eth_port)
+		request_module_nowait(EN_DRV_NAME);
+}
+
 /*
  * Change the port configuration of the device.
  * Every user of this function must hold the port mutex.
@@ -681,6 +707,11 @@
 		}
 		mlx4_set_port_mask(dev);
 		err = mlx4_register_device(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to register device\n");
+			goto out;
+		}
+		mlx4_request_modules(dev);
 	}
 
 out:
@@ -2075,9 +2106,15 @@
 			"aborting.\n");
 		return err;
 	}
-	if (num_vfs > MLX4_MAX_NUM_VF) {
-		printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
-		       num_vfs, MLX4_MAX_NUM_VF);
+
+	/* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
+	 * per port, we must limit the number of VFs to 63 (since their are
+	 * 128 MACs)
+	 */
+	if (num_vfs >= MLX4_MAX_NUM_VF) {
+		dev_err(&pdev->dev,
+			"Requested more VF's (%d) than allowed (%d)\n",
+			num_vfs, MLX4_MAX_NUM_VF - 1);
 		return -EINVAL;
 	}
 
@@ -2154,6 +2191,7 @@
 	mutex_init(&priv->bf_mutex);
 
 	dev->rev_id = pdev->revision;
+	dev->numa_node = dev_to_node(&pdev->dev);
 	/* Detect if this device is a virtual function */
 	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
 		/* When acting as pf, we normally skip vfs unless explicitly
@@ -2295,6 +2333,8 @@
 	if (err)
 		goto err_steer;
 
+	mlx4_init_quotas(dev);
+
 	for (port = 1; port <= dev->caps.num_ports; port++) {
 		err = mlx4_init_port_info(dev, port);
 		if (err)
@@ -2305,6 +2345,8 @@
 	if (err)
 		goto err_port;
 
+	mlx4_request_modules(dev);
+
 	mlx4_sense_init(dev);
 	mlx4_start_sense(dev);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 55f6245..acf9d5f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -506,7 +506,6 @@
 		goto out_list;
 	}
 	mgm = mailbox->buf;
-	memset(mgm, 0, sizeof *mgm);
 	members_count = 0;
 	list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
 		mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
@@ -645,7 +644,7 @@
 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
 				    enum mlx4_net_trans_promisc_mode flow_type)
 {
-	if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+	if (flow_type >= MLX4_FS_MODE_NUM) {
 		mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
 		return -EINVAL;
 	}
@@ -681,7 +680,7 @@
 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
 				  enum mlx4_net_trans_rule_id id)
 {
-	if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+	if (id >= MLX4_NET_TRANS_RULE_NUM) {
 		mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
 		return -EINVAL;
 	}
@@ -706,7 +705,7 @@
 int mlx4_hw_rule_sz(struct mlx4_dev *dev,
 	       enum mlx4_net_trans_rule_id id)
 {
-	if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+	if (id >= MLX4_NET_TRANS_RULE_NUM) {
 		mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
 		return -EINVAL;
 	}
@@ -857,7 +856,6 @@
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
-	memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
 	trans_rule_ctrl_to_hw(rule, mailbox->buf);
 
 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 348bb8c..e582a41 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -455,6 +455,7 @@
 	u8 last_cmd;
 	u8 init_port_mask;
 	bool active;
+	bool old_vlan_api;
 	u8 function;
 	dma_addr_t vhcr_dma;
 	u16 mtu[MLX4_MAX_PORTS + 1];
@@ -503,12 +504,28 @@
 	struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
 };
 
+struct resource_allocator {
+	spinlock_t alloc_lock; /* protect quotas */
+	union {
+		int res_reserved;
+		int res_port_rsvd[MLX4_MAX_PORTS];
+	};
+	union {
+		int res_free;
+		int res_port_free[MLX4_MAX_PORTS];
+	};
+	int *quota;
+	int *allocated;
+	int *guaranteed;
+};
+
 struct mlx4_resource_tracker {
 	spinlock_t lock;
 	/* tree for each resources */
 	struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
 	/* num_of_slave's lists, one per slave */
 	struct slave_list *slave_list;
+	struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE];
 };
 
 #define SLAVE_EVENT_EQ_SIZE	128
@@ -1111,7 +1128,7 @@
 
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
-void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
 
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
@@ -1252,4 +1269,6 @@
 
 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
 
+void mlx4_init_quotas(struct mlx4_dev *dev);
+
 #endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index bf06e36..f3758de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -530,10 +530,10 @@
 	u16 num_frags;
 	u16 log_rx_info;
 
-	struct mlx4_en_tx_ring *tx_ring;
-	struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
-	struct mlx4_en_cq *tx_cq;
-	struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+	struct mlx4_en_tx_ring **tx_ring;
+	struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
+	struct mlx4_en_cq **tx_cq;
+	struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
 	struct mlx4_qp drop_qp;
 	struct work_struct rx_mode_task;
 	struct work_struct watchdog_task;
@@ -626,7 +626,7 @@
 	if ((cq->state & MLX4_CQ_LOCKED)) {
 		struct net_device *dev = cq->dev;
 		struct mlx4_en_priv *priv = netdev_priv(dev);
-		struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+		struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
 
 		cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
 		rc = false;
@@ -704,9 +704,9 @@
 void mlx4_en_free_resources(struct mlx4_en_priv *priv);
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
 
-int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
-		      int entries, int ring, enum cq_type mode);
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
+		      int entries, int ring, enum cq_type mode, int node);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
 			int cq_idx);
 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -717,9 +717,11 @@
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
 
-int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
-			   int qpn, u32 size, u16 stride);
-void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+			   struct mlx4_en_tx_ring **pring,
+			   int qpn, u32 size, u16 stride, int node);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+			     struct mlx4_en_tx_ring **pring);
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring *ring,
 			     int cq, int user_prio);
@@ -727,10 +729,10 @@
 				struct mlx4_en_tx_ring *ring);
 
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
-			   struct mlx4_en_rx_ring *ring,
-			   u32 size, u16 stride);
+			   struct mlx4_en_rx_ring **pring,
+			   u32 size, u16 stride, int node);
 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
-			     struct mlx4_en_rx_ring *ring,
+			     struct mlx4_en_rx_ring **pring,
 			     u32 size, u16 stride);
 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -768,8 +770,7 @@
 int mlx4_en_setup_tc(struct net_device *dev, u8 up);
 
 #ifdef CONFIG_RFS_ACCEL
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
-			     struct mlx4_en_rx_ring *rx_ring);
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
 #endif
 
 #define MLX4_EN_NUM_SELF_TEST	5
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index f91719a..b3ee9ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -480,9 +480,6 @@
 		goto err_table;
 	}
 	mpt_entry = mailbox->buf;
-
-	memset(mpt_entry, 0, sizeof *mpt_entry);
-
 	mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO	 |
 				       MLX4_MPT_FLAG_REGION	 |
 				       mr->access);
@@ -695,8 +692,6 @@
 	}
 	mpt_entry = mailbox->buf;
 
-	memset(mpt_entry, 0, sizeof(*mpt_entry));
-
 	/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
 	 * off, thus creating a memory window and not a memory region.
 	 */
@@ -755,14 +750,14 @@
 	struct mlx4_mr_table *mr_table = &priv->mr_table;
 	int err;
 
-	if (!is_power_of_2(dev->caps.num_mpts))
-		return -EINVAL;
-
 	/* Nothing to do for slaves - all MR handling is forwarded
 	* to the master */
 	if (mlx4_is_slave(dev))
 		return 0;
 
+	if (!is_power_of_2(dev->caps.num_mpts))
+		return -EINVAL;
+
 	err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
 			       ~0, dev->caps.reserved_mrws, 0);
 	if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 00f223a..84cfb40 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -168,7 +168,7 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_uar_free);
 
-int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_uar *uar;
@@ -186,10 +186,13 @@
 			err = -ENOMEM;
 			goto out;
 		}
-		uar = kmalloc(sizeof *uar, GFP_KERNEL);
+		uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
 		if (!uar) {
-			err = -ENOMEM;
-			goto out;
+			uar = kmalloc(sizeof(*uar), GFP_KERNEL);
+			if (!uar) {
+				err = -ENOMEM;
+				goto out;
+			}
 		}
 		err = mlx4_uar_alloc(dev, uar);
 		if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 946e0af..97d342f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -178,13 +178,24 @@
 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
 	u64 out_param = 0;
-	int err;
+	int err = -EINVAL;
 
 	if (mlx4_is_mfunc(dev)) {
-		set_param_l(&out_param, port);
-		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
-				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
-				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
+			err = mlx4_cmd_imm(dev, mac, &out_param,
+					   ((u32) port) << 8 | (u32) RES_MAC,
+					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		}
+		if (err && err == -EINVAL && mlx4_is_slave(dev)) {
+			/* retry using old REG_MAC format */
+			set_param_l(&out_param, port);
+			err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+			if (!err)
+				dev->flags |= MLX4_FLAG_OLD_REG_MAC;
+		}
 		if (err)
 			return err;
 
@@ -231,10 +242,18 @@
 	u64 out_param = 0;
 
 	if (mlx4_is_mfunc(dev)) {
-		set_param_l(&out_param, port);
-		(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
-				    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
-				    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
+			(void) mlx4_cmd_imm(dev, mac, &out_param,
+					    ((u32) port) << 8 | (u32) RES_MAC,
+					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		} else {
+			/* use old unregister mac format */
+			set_param_l(&out_param, port);
+			(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		}
 		return;
 	}
 	__mlx4_unregister_mac(dev, port, mac);
@@ -284,7 +303,7 @@
 	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
 	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -370,9 +389,12 @@
 	u64 out_param = 0;
 	int err;
 
+	if (vlan > 4095)
+		return -EINVAL;
+
 	if (mlx4_is_mfunc(dev)) {
-		set_param_l(&out_param, port);
-		err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+		err = mlx4_cmd_imm(dev, vlan, &out_param,
+				   ((u32) port) << 8 | (u32) RES_VLAN,
 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 		if (!err)
@@ -384,23 +406,26 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
 
-void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
 {
 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+	int index;
+
+	mutex_lock(&table->mutex);
+	if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
+		mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
+		goto out;
+	}
 
 	if (index < MLX4_VLAN_REGULAR) {
 		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
-		return;
-	}
-
-	mutex_lock(&table->mutex);
-	if (!table->refs[index]) {
-		mlx4_warn(dev, "No vlan entry for index %d\n", index);
 		goto out;
 	}
+
 	if (--table->refs[index]) {
-		mlx4_dbg(dev, "Have more references for index %d,"
-			 "no need to modify vlan table\n", index);
+		mlx4_dbg(dev, "Have %d more references for index %d,"
+			 "no need to modify vlan table\n", table->refs[index],
+			 index);
 		goto out;
 	}
 	table->entries[index] = 0;
@@ -410,23 +435,19 @@
 	mutex_unlock(&table->mutex);
 }
 
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
 {
-	u64 in_param = 0;
-	int err;
+	u64 out_param = 0;
 
 	if (mlx4_is_mfunc(dev)) {
-		set_param_l(&in_param, port);
-		err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
-			       MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
-			       MLX4_CMD_WRAPPED);
-		if (!err)
-			mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
-					index);
-
+		(void) mlx4_cmd_imm(dev, vlan, &out_param,
+				    ((u32) port) << 8 | (u32) RES_VLAN,
+				    RES_OP_RESERVE_AND_MAP,
+				    MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+				    MLX4_CMD_WRAPPED);
 		return;
 	}
-	__mlx4_unregister_vlan(dev, port, index);
+	__mlx4_unregister_vlan(dev, port, vlan);
 }
 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
 
@@ -448,8 +469,6 @@
 
 	inbuf = inmailbox->buf;
 	outbuf = outmailbox->buf;
-	memset(inbuf, 0, 256);
-	memset(outbuf, 0, 256);
 	inbuf[0] = 1;
 	inbuf[1] = 1;
 	inbuf[2] = 1;
@@ -632,8 +651,6 @@
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
-	memset(mailbox->buf, 0, 256);
-
 	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
 
 	if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
@@ -671,8 +688,6 @@
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 	context = mailbox->buf;
-	memset(context, 0, sizeof *context);
-
 	context->flags = SET_PORT_GEN_ALL_VALID;
 	context->mtu = cpu_to_be16(mtu);
 	context->pptx = (pptx * (!pfctx)) << 7;
@@ -706,8 +721,6 @@
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 	context = mailbox->buf;
-	memset(context, 0, sizeof *context);
-
 	context->base_qpn = cpu_to_be32(base_qpn);
 	context->n_mac = dev->caps.log_num_macs;
 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
@@ -740,8 +753,6 @@
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 	context = mailbox->buf;
-	memset(context, 0, sizeof *context);
-
 	for (i = 0; i < MLX4_NUM_UP; i += 2)
 		context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
 
@@ -767,7 +778,6 @@
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 	context = mailbox->buf;
-	memset(context, 0, sizeof *context);
 
 	for (i = 0; i < MLX4_NUM_TC; i++) {
 		struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index e891b05..2715e61 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -480,8 +480,7 @@
 	*/
 
 	err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
-			       (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 +
-			       16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
+			       (1 << 23) - 1, mlx4_num_reserved_sqps(dev),
 			       reserved_from_top);
 	if (err)
 		return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index dd68763..2f3f2bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -55,6 +55,14 @@
 	u8 port;
 };
 
+struct vlan_res {
+	struct list_head list;
+	u16 vlan;
+	int ref_count;
+	int vlan_index;
+	u8 port;
+};
+
 struct res_common {
 	struct list_head	list;
 	struct rb_node		node;
@@ -102,7 +110,14 @@
 	int			local_qpn;
 	atomic_t		ref_count;
 	u32			qpc_flags;
+	/* saved qp params before VST enforcement in order to restore on VGT */
 	u8			sched_queue;
+	__be32			param3;
+	u8			vlan_control;
+	u8			fvl_rx;
+	u8			pri_path_fl;
+	u8			vlan_index;
+	u8			feup;
 };
 
 enum res_mtt_states {
@@ -266,6 +281,7 @@
 	case RES_MPT: return "RES_MPT";
 	case RES_MTT: return "RES_MTT";
 	case RES_MAC: return  "RES_MAC";
+	case RES_VLAN: return  "RES_VLAN";
 	case RES_EQ: return "RES_EQ";
 	case RES_COUNTER: return "RES_COUNTER";
 	case RES_FS_RULE: return "RES_FS_RULE";
@@ -274,10 +290,139 @@
 	};
 }
 
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
+static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
+				      enum mlx4_resource res_type, int count,
+				      int port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct resource_allocator *res_alloc =
+		&priv->mfunc.master.res_tracker.res_alloc[res_type];
+	int err = -EINVAL;
+	int allocated, free, reserved, guaranteed, from_free;
+
+	if (slave > dev->num_vfs)
+		return -EINVAL;
+
+	spin_lock(&res_alloc->alloc_lock);
+	allocated = (port > 0) ?
+		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+		res_alloc->allocated[slave];
+	free = (port > 0) ? res_alloc->res_port_free[port - 1] :
+		res_alloc->res_free;
+	reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
+		res_alloc->res_reserved;
+	guaranteed = res_alloc->guaranteed[slave];
+
+	if (allocated + count > res_alloc->quota[slave])
+		goto out;
+
+	if (allocated + count <= guaranteed) {
+		err = 0;
+	} else {
+		/* portion may need to be obtained from free area */
+		if (guaranteed - allocated > 0)
+			from_free = count - (guaranteed - allocated);
+		else
+			from_free = count;
+
+		if (free - from_free > reserved)
+			err = 0;
+	}
+
+	if (!err) {
+		/* grant the request */
+		if (port > 0) {
+			res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
+			res_alloc->res_port_free[port - 1] -= count;
+		} else {
+			res_alloc->allocated[slave] += count;
+			res_alloc->res_free -= count;
+		}
+	}
+
+out:
+	spin_unlock(&res_alloc->alloc_lock);
+	return err;
+}
+
+static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
+				    enum mlx4_resource res_type, int count,
+				    int port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct resource_allocator *res_alloc =
+		&priv->mfunc.master.res_tracker.res_alloc[res_type];
+
+	if (slave > dev->num_vfs)
+		return;
+
+	spin_lock(&res_alloc->alloc_lock);
+	if (port > 0) {
+		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
+		res_alloc->res_port_free[port - 1] += count;
+	} else {
+		res_alloc->allocated[slave] -= count;
+		res_alloc->res_free += count;
+	}
+
+	spin_unlock(&res_alloc->alloc_lock);
+	return;
+}
+
+static inline void initialize_res_quotas(struct mlx4_dev *dev,
+					 struct resource_allocator *res_alloc,
+					 enum mlx4_resource res_type,
+					 int vf, int num_instances)
+{
+	res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
+	res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
+	if (vf == mlx4_master_func_num(dev)) {
+		res_alloc->res_free = num_instances;
+		if (res_type == RES_MTT) {
+			/* reserved mtts will be taken out of the PF allocation */
+			res_alloc->res_free += dev->caps.reserved_mtts;
+			res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
+			res_alloc->quota[vf] += dev->caps.reserved_mtts;
+		}
+	}
+}
+
+void mlx4_init_quotas(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int pf;
+
+	/* quotas for VFs are initialized in mlx4_slave_cap */
+	if (mlx4_is_slave(dev))
+		return;
+
+	if (!mlx4_is_mfunc(dev)) {
+		dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
+			mlx4_num_reserved_sqps(dev);
+		dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
+		dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
+		dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
+		dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
+		return;
+	}
+
+	pf = mlx4_master_func_num(dev);
+	dev->quotas.qp =
+		priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
+	dev->quotas.cq =
+		priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
+	dev->quotas.srq =
+		priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
+	dev->quotas.mtt =
+		priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
+	dev->quotas.mpt =
+		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
+}
 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	int i;
+	int i, j;
 	int t;
 
 	priv->mfunc.master.res_tracker.slave_list =
@@ -298,8 +443,105 @@
 	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 
+	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+		struct resource_allocator *res_alloc =
+			&priv->mfunc.master.res_tracker.res_alloc[i];
+		res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+		res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+		if (i == RES_MAC || i == RES_VLAN)
+			res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
+						       (dev->num_vfs + 1) * sizeof(int),
+							GFP_KERNEL);
+		else
+			res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+
+		if (!res_alloc->quota || !res_alloc->guaranteed ||
+		    !res_alloc->allocated)
+			goto no_mem_err;
+
+		spin_lock_init(&res_alloc->alloc_lock);
+		for (t = 0; t < dev->num_vfs + 1; t++) {
+			switch (i) {
+			case RES_QP:
+				initialize_res_quotas(dev, res_alloc, RES_QP,
+						      t, dev->caps.num_qps -
+						      dev->caps.reserved_qps -
+						      mlx4_num_reserved_sqps(dev));
+				break;
+			case RES_CQ:
+				initialize_res_quotas(dev, res_alloc, RES_CQ,
+						      t, dev->caps.num_cqs -
+						      dev->caps.reserved_cqs);
+				break;
+			case RES_SRQ:
+				initialize_res_quotas(dev, res_alloc, RES_SRQ,
+						      t, dev->caps.num_srqs -
+						      dev->caps.reserved_srqs);
+				break;
+			case RES_MPT:
+				initialize_res_quotas(dev, res_alloc, RES_MPT,
+						      t, dev->caps.num_mpts -
+						      dev->caps.reserved_mrws);
+				break;
+			case RES_MTT:
+				initialize_res_quotas(dev, res_alloc, RES_MTT,
+						      t, dev->caps.num_mtts -
+						      dev->caps.reserved_mtts);
+				break;
+			case RES_MAC:
+				if (t == mlx4_master_func_num(dev)) {
+					res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+					res_alloc->guaranteed[t] = 2;
+					for (j = 0; j < MLX4_MAX_PORTS; j++)
+						res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
+				} else {
+					res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+					res_alloc->guaranteed[t] = 2;
+				}
+				break;
+			case RES_VLAN:
+				if (t == mlx4_master_func_num(dev)) {
+					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
+					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
+					for (j = 0; j < MLX4_MAX_PORTS; j++)
+						res_alloc->res_port_free[j] =
+							res_alloc->quota[t];
+				} else {
+					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
+					res_alloc->guaranteed[t] = 0;
+				}
+				break;
+			case RES_COUNTER:
+				res_alloc->quota[t] = dev->caps.max_counters;
+				res_alloc->guaranteed[t] = 0;
+				if (t == mlx4_master_func_num(dev))
+					res_alloc->res_free = res_alloc->quota[t];
+				break;
+			default:
+				break;
+			}
+			if (i == RES_MAC || i == RES_VLAN) {
+				for (j = 0; j < MLX4_MAX_PORTS; j++)
+					res_alloc->res_port_rsvd[j] +=
+						res_alloc->guaranteed[t];
+			} else {
+				res_alloc->res_reserved += res_alloc->guaranteed[t];
+			}
+		}
+	}
 	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
-	return 0 ;
+	return 0;
+
+no_mem_err:
+	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+		kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+		priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+		kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+		priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+		kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+		priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+	}
+	return -ENOMEM;
 }
 
 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
@@ -309,13 +551,28 @@
 	int i;
 
 	if (priv->mfunc.master.res_tracker.slave_list) {
-		if (type != RES_TR_FREE_STRUCTS_ONLY)
-			for (i = 0 ; i < dev->num_slaves; i++)
+		if (type != RES_TR_FREE_STRUCTS_ONLY) {
+			for (i = 0; i < dev->num_slaves; i++) {
 				if (type == RES_TR_FREE_ALL ||
 				    dev->caps.function != i)
 					mlx4_delete_all_resources_for_slave(dev, i);
+			}
+			/* free master's vlans */
+			i = dev->caps.function;
+			mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+			rem_slave_vlans(dev, i);
+			mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+		}
 
 		if (type != RES_TR_FREE_SLAVES_ONLY) {
+			for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+				kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+				priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+				kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+				priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+				kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+				priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+			}
 			kfree(priv->mfunc.master.res_tracker.slave_list);
 			priv->mfunc.master.res_tracker.slave_list = NULL;
 		}
@@ -1229,12 +1486,19 @@
 	case RES_OP_RESERVE:
 		count = get_param_l(&in_param);
 		align = get_param_h(&in_param);
-		err = __mlx4_qp_reserve_range(dev, count, align, &base);
+		err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
 		if (err)
 			return err;
 
+		err = __mlx4_qp_reserve_range(dev, count, align, &base);
+		if (err) {
+			mlx4_release_resource(dev, slave, RES_QP, count, 0);
+			return err;
+		}
+
 		err = add_res_range(dev, slave, base, count, RES_QP, 0);
 		if (err) {
+			mlx4_release_resource(dev, slave, RES_QP, count, 0);
 			__mlx4_qp_release_range(dev, base, count);
 			return err;
 		}
@@ -1282,15 +1546,24 @@
 		return err;
 
 	order = get_param_l(&in_param);
+
+	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
+	if (err)
+		return err;
+
 	base = __mlx4_alloc_mtt_range(dev, order);
-	if (base == -1)
+	if (base == -1) {
+		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
 		return -ENOMEM;
+	}
 
 	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
-	if (err)
+	if (err) {
+		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
 		__mlx4_free_mtt_range(dev, base, order);
-	else
+	} else {
 		set_param_l(out_param, base);
+	}
 
 	return err;
 }
@@ -1305,13 +1578,20 @@
 
 	switch (op) {
 	case RES_OP_RESERVE:
-		index = __mlx4_mpt_reserve(dev);
-		if (index == -1)
+		err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
+		if (err)
 			break;
+
+		index = __mlx4_mpt_reserve(dev);
+		if (index == -1) {
+			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
+			break;
+		}
 		id = index & mpt_mask(dev);
 
 		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
 		if (err) {
+			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
 			__mlx4_mpt_release(dev, index);
 			break;
 		}
@@ -1345,12 +1625,19 @@
 
 	switch (op) {
 	case RES_OP_RESERVE_AND_MAP:
-		err = __mlx4_cq_alloc_icm(dev, &cqn);
+		err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
 		if (err)
 			break;
 
+		err = __mlx4_cq_alloc_icm(dev, &cqn);
+		if (err) {
+			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
+			break;
+		}
+
 		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
 		if (err) {
+			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
 			__mlx4_cq_free_icm(dev, cqn);
 			break;
 		}
@@ -1373,12 +1660,19 @@
 
 	switch (op) {
 	case RES_OP_RESERVE_AND_MAP:
-		err = __mlx4_srq_alloc_icm(dev, &srqn);
+		err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
 		if (err)
 			break;
 
+		err = __mlx4_srq_alloc_icm(dev, &srqn);
+		if (err) {
+			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
+			break;
+		}
+
 		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
 		if (err) {
+			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
 			__mlx4_srq_free_icm(dev, srqn);
 			break;
 		}
@@ -1399,9 +1693,13 @@
 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
 	struct mac_res *res;
 
+	if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
+		return -EINVAL;
 	res = kzalloc(sizeof *res, GFP_KERNEL);
-	if (!res)
+	if (!res) {
+		mlx4_release_resource(dev, slave, RES_MAC, 1, port);
 		return -ENOMEM;
+	}
 	res->mac = mac;
 	res->port = (u8) port;
 	list_add_tail(&res->list,
@@ -1421,6 +1719,7 @@
 	list_for_each_entry_safe(res, tmp, mac_list, list) {
 		if (res->mac == mac && res->port == (u8) port) {
 			list_del(&res->list);
+			mlx4_release_resource(dev, slave, RES_MAC, 1, port);
 			kfree(res);
 			break;
 		}
@@ -1438,12 +1737,13 @@
 	list_for_each_entry_safe(res, tmp, mac_list, list) {
 		list_del(&res->list);
 		__mlx4_unregister_mac(dev, res->port, res->mac);
+		mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
 		kfree(res);
 	}
 }
 
 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
-			 u64 in_param, u64 *out_param)
+			 u64 in_param, u64 *out_param, int in_port)
 {
 	int err = -EINVAL;
 	int port;
@@ -1452,7 +1752,7 @@
 	if (op != RES_OP_RESERVE_AND_MAP)
 		return err;
 
-	port = get_param_l(out_param);
+	port = !in_port ? get_param_l(out_param) : in_port;
 	mac = in_param;
 
 	err = __mlx4_register_mac(dev, port, mac);
@@ -1469,12 +1769,114 @@
 	return err;
 }
 
-static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
-			 u64 in_param, u64 *out_param)
+static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+			     int port, int vlan_index)
 {
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *vlan_list =
+		&tracker->slave_list[slave].res_list[RES_VLAN];
+	struct vlan_res *res, *tmp;
+
+	list_for_each_entry_safe(res, tmp, vlan_list, list) {
+		if (res->vlan == vlan && res->port == (u8) port) {
+			/* vlan found. update ref count */
+			++res->ref_count;
+			return 0;
+		}
+	}
+
+	if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
+		return -EINVAL;
+	res = kzalloc(sizeof(*res), GFP_KERNEL);
+	if (!res) {
+		mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
+		return -ENOMEM;
+	}
+	res->vlan = vlan;
+	res->port = (u8) port;
+	res->vlan_index = vlan_index;
+	res->ref_count = 1;
+	list_add_tail(&res->list,
+		      &tracker->slave_list[slave].res_list[RES_VLAN]);
 	return 0;
 }
 
+
+static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+				int port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *vlan_list =
+		&tracker->slave_list[slave].res_list[RES_VLAN];
+	struct vlan_res *res, *tmp;
+
+	list_for_each_entry_safe(res, tmp, vlan_list, list) {
+		if (res->vlan == vlan && res->port == (u8) port) {
+			if (!--res->ref_count) {
+				list_del(&res->list);
+				mlx4_release_resource(dev, slave, RES_VLAN,
+						      1, port);
+				kfree(res);
+			}
+			break;
+		}
+	}
+}
+
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *vlan_list =
+		&tracker->slave_list[slave].res_list[RES_VLAN];
+	struct vlan_res *res, *tmp;
+	int i;
+
+	list_for_each_entry_safe(res, tmp, vlan_list, list) {
+		list_del(&res->list);
+		/* dereference the vlan the num times the slave referenced it */
+		for (i = 0; i < res->ref_count; i++)
+			__mlx4_unregister_vlan(dev, res->port, res->vlan);
+		mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
+		kfree(res);
+	}
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			  u64 in_param, u64 *out_param, int in_port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+	int err;
+	u16 vlan;
+	int vlan_index;
+	int port;
+
+	port = !in_port ? get_param_l(out_param) : in_port;
+
+	if (!port || op != RES_OP_RESERVE_AND_MAP)
+		return -EINVAL;
+
+	/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
+	if (!in_port && port > 0 && port <= dev->caps.num_ports) {
+		slave_state[slave].old_vlan_api = true;
+		return 0;
+	}
+
+	vlan = (u16) in_param;
+
+	err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
+	if (!err) {
+		set_param_l(out_param, (u32) vlan_index);
+		err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
+		if (err)
+			__mlx4_unregister_vlan(dev, port, vlan);
+	}
+	return err;
+}
+
 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 			     u64 in_param, u64 *out_param)
 {
@@ -1484,15 +1886,23 @@
 	if (op != RES_OP_RESERVE)
 		return -EINVAL;
 
-	err = __mlx4_counter_alloc(dev, &index);
+	err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
 	if (err)
 		return err;
 
+	err = __mlx4_counter_alloc(dev, &index);
+	if (err) {
+		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+		return err;
+	}
+
 	err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
-	if (err)
+	if (err) {
 		__mlx4_counter_free(dev, index);
-	else
+		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+	} else {
 		set_param_l(out_param, index);
+	}
 
 	return err;
 }
@@ -1528,7 +1938,7 @@
 	int err;
 	int alop = vhcr->op_modifier;
 
-	switch (vhcr->in_modifier) {
+	switch (vhcr->in_modifier & 0xFF) {
 	case RES_QP:
 		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
 				   vhcr->in_param, &vhcr->out_param);
@@ -1556,12 +1966,14 @@
 
 	case RES_MAC:
 		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
-				    vhcr->in_param, &vhcr->out_param);
+				    vhcr->in_param, &vhcr->out_param,
+				    (vhcr->in_modifier >> 8) & 0xFF);
 		break;
 
 	case RES_VLAN:
 		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
-				    vhcr->in_param, &vhcr->out_param);
+				     vhcr->in_param, &vhcr->out_param,
+				     (vhcr->in_modifier >> 8) & 0xFF);
 		break;
 
 	case RES_COUNTER:
@@ -1597,6 +2009,7 @@
 		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
 		if (err)
 			break;
+		mlx4_release_resource(dev, slave, RES_QP, count, 0);
 		__mlx4_qp_release_range(dev, base, count);
 		break;
 	case RES_OP_MAP_ICM:
@@ -1634,8 +2047,10 @@
 	base = get_param_l(&in_param);
 	order = get_param_h(&in_param);
 	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
-	if (!err)
+	if (!err) {
+		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
 		__mlx4_free_mtt_range(dev, base, order);
+	}
 	return err;
 }
 
@@ -1660,6 +2075,7 @@
 		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
 		if (err)
 			break;
+		mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
 		__mlx4_mpt_release(dev, index);
 		break;
 	case RES_OP_MAP_ICM:
@@ -1694,6 +2110,7 @@
 		if (err)
 			break;
 
+		mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
 		__mlx4_cq_free_icm(dev, cqn);
 		break;
 
@@ -1718,6 +2135,7 @@
 		if (err)
 			break;
 
+		mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
 		__mlx4_srq_free_icm(dev, srqn);
 		break;
 
@@ -1730,14 +2148,14 @@
 }
 
 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
-			    u64 in_param, u64 *out_param)
+			    u64 in_param, u64 *out_param, int in_port)
 {
 	int port;
 	int err = 0;
 
 	switch (op) {
 	case RES_OP_RESERVE_AND_MAP:
-		port = get_param_l(out_param);
+		port = !in_port ? get_param_l(out_param) : in_port;
 		mac_del_from_slave(dev, slave, in_param, port);
 		__mlx4_unregister_mac(dev, port, in_param);
 		break;
@@ -1751,9 +2169,27 @@
 }
 
 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
-			    u64 in_param, u64 *out_param)
+			    u64 in_param, u64 *out_param, int port)
 {
-	return 0;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+	int err = 0;
+
+	switch (op) {
+	case RES_OP_RESERVE_AND_MAP:
+		if (slave_state[slave].old_vlan_api)
+			return 0;
+		if (!port)
+			return -EINVAL;
+		vlan_del_from_slave(dev, slave, in_param, port);
+		__mlx4_unregister_vlan(dev, port, in_param);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
 }
 
 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1771,6 +2207,7 @@
 		return err;
 
 	__mlx4_counter_free(dev, index);
+	mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
 
 	return err;
 }
@@ -1803,7 +2240,7 @@
 	int err = -EINVAL;
 	int alop = vhcr->op_modifier;
 
-	switch (vhcr->in_modifier) {
+	switch (vhcr->in_modifier & 0xFF) {
 	case RES_QP:
 		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
 				  vhcr->in_param);
@@ -1831,12 +2268,14 @@
 
 	case RES_MAC:
 		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
-				   vhcr->in_param, &vhcr->out_param);
+				   vhcr->in_param, &vhcr->out_param,
+				   (vhcr->in_modifier >> 8) & 0xFF);
 		break;
 
 	case RES_VLAN:
 		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
-				   vhcr->in_param, &vhcr->out_param);
+				    vhcr->in_param, &vhcr->out_param,
+				    (vhcr->in_modifier >> 8) & 0xFF);
 		break;
 
 	case RES_COUNTER:
@@ -2136,6 +2575,12 @@
 		return err;
 	qp->local_qpn = local_qpn;
 	qp->sched_queue = 0;
+	qp->param3 = 0;
+	qp->vlan_control = 0;
+	qp->fvl_rx = 0;
+	qp->pri_path_fl = 0;
+	qp->vlan_index = 0;
+	qp->feup = 0;
 	qp->qpc_flags = be32_to_cpu(qpc->flags);
 
 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -2862,6 +3307,12 @@
 	int qpn = vhcr->in_modifier & 0x7fffff;
 	struct res_qp *qp;
 	u8 orig_sched_queue;
+	__be32	orig_param3 = qpc->param3;
+	u8 orig_vlan_control = qpc->pri_path.vlan_control;
+	u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
+	u8 orig_pri_path_fl = qpc->pri_path.fl;
+	u8 orig_vlan_index = qpc->pri_path.vlan_index;
+	u8 orig_feup = qpc->pri_path.feup;
 
 	err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
 	if (err)
@@ -2889,9 +3340,15 @@
 	 * essentially the QOS value provided by the VF. This will be useful
 	 * if we allow dynamic changes from VST back to VGT
 	 */
-	if (!err)
+	if (!err) {
 		qp->sched_queue = orig_sched_queue;
-
+		qp->param3	= orig_param3;
+		qp->vlan_control = orig_vlan_control;
+		qp->fvl_rx	=  orig_fvl_rx;
+		qp->pri_path_fl = orig_pri_path_fl;
+		qp->vlan_index  = orig_vlan_index;
+		qp->feup	= orig_feup;
+	}
 	put_res(dev, slave, qpn, RES_QP);
 	return err;
 }
@@ -3498,6 +3955,11 @@
 						 &tracker->res_tree[RES_QP]);
 					list_del(&qp->com.list);
 					spin_unlock_irq(mlx4_tlock(dev));
+					if (!valid_reserved(dev, slave, qpn)) {
+						__mlx4_qp_release_range(dev, qpn, 1);
+						mlx4_release_resource(dev, slave,
+								      RES_QP, 1, 0);
+					}
 					kfree(qp);
 					state = 0;
 					break;
@@ -3569,6 +4031,8 @@
 						 &tracker->res_tree[RES_SRQ]);
 					list_del(&srq->com.list);
 					spin_unlock_irq(mlx4_tlock(dev));
+					mlx4_release_resource(dev, slave,
+							      RES_SRQ, 1, 0);
 					kfree(srq);
 					state = 0;
 					break;
@@ -3635,6 +4099,8 @@
 						 &tracker->res_tree[RES_CQ]);
 					list_del(&cq->com.list);
 					spin_unlock_irq(mlx4_tlock(dev));
+					mlx4_release_resource(dev, slave,
+							      RES_CQ, 1, 0);
 					kfree(cq);
 					state = 0;
 					break;
@@ -3698,6 +4164,8 @@
 						 &tracker->res_tree[RES_MPT]);
 					list_del(&mpt->com.list);
 					spin_unlock_irq(mlx4_tlock(dev));
+					mlx4_release_resource(dev, slave,
+							      RES_MPT, 1, 0);
 					kfree(mpt);
 					state = 0;
 					break;
@@ -3767,6 +4235,8 @@
 						 &tracker->res_tree[RES_MTT]);
 					list_del(&mtt->com.list);
 					spin_unlock_irq(mlx4_tlock(dev));
+					mlx4_release_resource(dev, slave, RES_MTT,
+							      1 << mtt->order, 0);
 					kfree(mtt);
 					state = 0;
 					break;
@@ -3925,6 +4395,7 @@
 			list_del(&counter->com.list);
 			kfree(counter);
 			__mlx4_counter_free(dev, index);
+			mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
 		}
 	}
 	spin_unlock_irq(mlx4_tlock(dev));
@@ -3964,7 +4435,7 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
 	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
-	/*VLAN*/
+	rem_slave_vlans(dev, slave);
 	rem_slave_macs(dev, slave);
 	rem_slave_fs_rule(dev, slave);
 	rem_slave_qps(dev, slave);
@@ -3991,13 +4462,20 @@
 		&tracker->slave_list[work->slave].res_list[RES_QP];
 	struct res_qp *qp;
 	struct res_qp *tmp;
-	u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
+	u64 qp_path_mask_vlan_ctrl =
+		       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
-		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
-		       (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
+
+	u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
+		       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
+		       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
+		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
 		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
 
 	int err;
@@ -4029,9 +4507,7 @@
 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 
 	upd_context = mailbox->buf;
-	upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
-	upd_context->qp_context.pri_path.vlan_control = vlan_control;
-	upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+	upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
 
 	spin_lock_irq(mlx4_tlock(dev));
 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4049,10 +4525,35 @@
 				spin_lock_irq(mlx4_tlock(dev));
 				continue;
 			}
-			upd_context->qp_context.pri_path.sched_queue =
-				qp->sched_queue & 0xC7;
-			upd_context->qp_context.pri_path.sched_queue |=
-				((work->qos & 0x7) << 3);
+			if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
+				upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
+			else
+				upd_context->primary_addr_path_mask =
+					cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
+			if (work->vlan_id == MLX4_VGT) {
+				upd_context->qp_context.param3 = qp->param3;
+				upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
+				upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
+				upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
+				upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
+				upd_context->qp_context.pri_path.feup = qp->feup;
+				upd_context->qp_context.pri_path.sched_queue =
+					qp->sched_queue;
+			} else {
+				upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
+				upd_context->qp_context.pri_path.vlan_control = vlan_control;
+				upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+				upd_context->qp_context.pri_path.fvl_rx =
+					qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
+				upd_context->qp_context.pri_path.fl =
+					qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+				upd_context->qp_context.pri_path.feup =
+					qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
+				upd_context->qp_context.pri_path.sched_queue =
+					qp->sched_queue & 0xC7;
+				upd_context->qp_context.pri_path.sched_queue |=
+					((work->qos & 0x7) << 3);
+			}
 
 			err = mlx4_cmd(dev, mailbox->dma,
 				       qp->local_qpn & 0xffffff,
@@ -4081,7 +4582,7 @@
 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
 	    NO_INDX != work->orig_vlan_ix)
 		__mlx4_unregister_vlan(&work->priv->dev, work->port,
-				       work->orig_vlan_ix);
+				       work->orig_vlan_id);
 out:
 	kfree(work);
 	return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 79fd269..8fdf237 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -34,6 +34,7 @@
 #include <linux/init.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/srq.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
 
@@ -188,8 +189,6 @@
 	}
 
 	srq_context = mailbox->buf;
-	memset(srq_context, 0, sizeof *srq_context);
-
 	srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
 						      srq->srqn);
 	srq_context->logstride          = srq->wqe_shift - 4;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 075f4e2..c83d16d 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1248,7 +1248,7 @@
 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
 	ks_wrreg16(ks, KS_MARL, w);
 
-	memcpy(ks->mac_addr, data, 6);
+	memcpy(ks->mac_addr, data, ETH_ALEN);
 
 	if (ks->enabled)
 		ks_start_rx(ks);
@@ -1651,7 +1651,7 @@
 	}
 	netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
 
-	memcpy(netdev->dev_addr, ks->mac_addr, 6);
+	memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
 
 	ks_set_mac(ks, netdev->dev_addr);
 
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 8ebc352..ddd252a 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7150,8 +7150,6 @@
 	struct platform_info *info = pci_get_drvdata(pdev);
 	struct dev_info *hw_priv = &info->dev_info;
 
-	pci_set_drvdata(pdev, NULL);
-
 	release_mem_region(pci_resource_start(pdev, 0),
 		pci_resource_len(pdev, 0));
 	for (i = 0; i < hw_priv->hw.dev_count; i++) {
@@ -7227,7 +7225,7 @@
 
 static char pcidev_name[] = "ksz884xp";
 
-static struct pci_device_id pcidev_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = {
 	{ PCI_VENDOR_ID_MICREL_KS, 0x8841,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 	{ PCI_VENDOR_ID_MICREL_KS, 0x8842,
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index ea54d95..cbd0133 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -26,7 +26,6 @@
 #include <linux/of_irq.h>
 #include <linux/crc32.h>
 #include <linux/crc32c.h>
-#include <linux/dma-mapping.h>
 
 #include "moxart_ether.h"
 
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 149355b..68026f7 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -934,7 +934,7 @@
 
 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
 {
-	int rc = true;
+	bool rc = true;
 	spin_lock(&ss->lock);
 	if ((ss->state & SLICE_LOCKED)) {
 		WARN_ON((ss->state & SLICE_STATE_NAPI));
@@ -957,7 +957,7 @@
 
 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
 {
-	int rc = true;
+	bool rc = true;
 	spin_lock_bh(&ss->lock);
 	if ((ss->state & SLICE_LOCKED)) {
 		ss->state |= SLICE_STATE_POLL_YIELD;
@@ -3164,7 +3164,7 @@
 
 	/* Walk the multicast list, and add each address */
 	netdev_for_each_mc_addr(ha, dev) {
-		memcpy(data, &ha->addr, 6);
+		memcpy(data, &ha->addr, ETH_ALEN);
 		cmd.data0 = ntohl(data[0]);
 		cmd.data1 = ntohl(data[1]);
 		err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3207,7 +3207,7 @@
 	}
 
 	/* change the dev structure */
-	memcpy(dev->dev_addr, sa->sa_data, 6);
+	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
 	return 0;
 }
 
@@ -4208,7 +4208,6 @@
 	set_fw_name(mgp, NULL, false);
 	free_netdev(netdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 	0x0008
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 7a5e295..64ec2a4 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -970,7 +970,6 @@
 
  err_ioremap:
 	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
 
  err_pci_request_regions:
 	free_netdev(dev);
@@ -3220,7 +3219,6 @@
 	pci_release_regions (pdev);
 	iounmap(ioaddr);
 	free_netdev (dev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 4da172a..7007d21 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -264,6 +264,7 @@
 
 	lp = netdev_priv(dev);
 	lp->device = &pdev->dev;
+	platform_set_drvdata(pdev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	netdev_boot_setup_check(dev);
 
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 51b0094..9eeddbd 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8185,7 +8185,6 @@
 	free_shared_mem(sp);
 	pci_disable_device(pdev);
 	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 
 	return ret;
@@ -8221,7 +8220,6 @@
 	iounmap(sp->bar0);
 	iounmap(sp->bar1);
 	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 	pci_disable_device(pdev);
 }
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 5a20eaf..f9876ea 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2072,6 +2072,10 @@
 				vdev->config.tx_steering_type;
 			vpath->fifo.ndev = vdev->ndev;
 			vpath->fifo.pdev = vdev->pdev;
+
+			u64_stats_init(&vpath->fifo.stats.syncp);
+			u64_stats_init(&vpath->ring.stats.syncp);
+
 			if (vdev->config.tx_steering_type)
 				vpath->fifo.txq =
 					netdev_get_tx_queue(vdev->ndev, i);
@@ -4739,7 +4743,6 @@
 _exit5:
 	vxge_device_unregister(hldev);
 _exit4:
-	pci_set_drvdata(pdev, NULL);
 	vxge_hw_device_terminate(hldev);
 	pci_disable_sriov(pdev);
 _exit3:
@@ -4782,7 +4785,6 @@
 		vxge_free_mac_add_list(&vdev->vpaths[i]);
 
 	vxge_device_unregister(hldev);
-	pci_set_drvdata(pdev, NULL);
 	/* Do not call pci_disable_sriov here, as it will break child devices */
 	vxge_hw_device_terminate(hldev);
 	iounmap(vdev->bar0);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 098b96d..2d045be 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5619,6 +5619,8 @@
 	spin_lock_init(&np->lock);
 	spin_lock_init(&np->hwstats_lock);
 	SET_NETDEV_DEV(dev, &pci_dev->dev);
+	u64_stats_init(&np->swstats_rx_syncp);
+	u64_stats_init(&np->swstats_tx_syncp);
 
 	init_timer(&np->oom_kick);
 	np->oom_kick.data = (unsigned long) dev;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index a061b93..ba3ca18 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1399,8 +1399,10 @@
 	}
 
 	if (pldat->dma_buff_base_v == 0) {
-		pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF;
-		pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask;
+		ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (ret)
+			goto err_out_free_irq;
+
 		pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
 
 		/* Allocate a chunk of memory for the DMA ethernet buffers
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 622aa75..7dc3e9b 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1545,15 +1545,16 @@
 
 	mac = of_get_mac_address(pdev->dev.of_node);
 
-	if (mac && is_valid_ether_addr(mac))
+	if (mac)
 		memcpy(netdev->dev_addr, mac, ETH_ALEN);
 	else
 		eth_hw_addr_random(netdev);
 
 	p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
 
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
-	pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+	result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (result)
+		goto err;
 
 	netif_carrier_off(netdev);
 	result = register_netdev(netdev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 6797b10..2a900307 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -653,38 +653,38 @@
 extern const char pch_driver_version[];
 
 /* pch_gbe_main.c */
-extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
-extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
-				       struct pch_gbe_tx_ring *txdr);
-extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
-				       struct pch_gbe_rx_ring *rxdr);
-extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
-				       struct pch_gbe_tx_ring *tx_ring);
-extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
-				       struct pch_gbe_rx_ring *rx_ring);
-extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-extern u32 pch_ch_control_read(struct pci_dev *pdev);
-extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_ch_event_read(struct pci_dev *pdev);
-extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
-extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
-extern u64 pch_rx_snap_read(struct pci_dev *pdev);
-extern u64 pch_tx_snap_read(struct pci_dev *pdev);
-extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+int pch_gbe_up(struct pch_gbe_adapter *adapter);
+void pch_gbe_down(struct pch_gbe_adapter *adapter);
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+			       struct pch_gbe_tx_ring *txdr);
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+			       struct pch_gbe_rx_ring *rxdr);
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+			       struct pch_gbe_tx_ring *tx_ring);
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+			       struct pch_gbe_rx_ring *rx_ring);
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+u32 pch_ch_control_read(struct pci_dev *pdev);
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
 
 /* pch_gbe_param.c */
-extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
 
 /* pch_gbe_ethtool.c */
-extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+void pch_gbe_set_ethtool_ops(struct net_device *netdev);
 
 /* pch_gbe_mac.c */
-extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
-extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
-				  u32 addr, u32 dir, u32 reg, u16 data);
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+			  u16 data);
 #endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index cac33e5..b6bdeb3 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1910,7 +1910,6 @@
 		iounmap(hmp->base);
 		free_netdev(dev);
 		pci_release_regions(pdev);
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index d28593b..07a890e 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -513,7 +513,6 @@
 err_out_unmap_tx:
         pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 err_out_cleardev:
-	pci_set_drvdata(pdev, NULL);
 	pci_iounmap(pdev, ioaddr);
 err_out_free_res:
 	pci_release_regions(pdev);
@@ -1392,7 +1391,6 @@
 	pci_release_regions (pdev);
 
 	free_netdev (dev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 5b65356..dbaa49e 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1870,7 +1870,6 @@
 	pasemi_dma_free_chan(&mac->tx->chan);
 	pasemi_dma_free_chan(&mac->rx->chan);
 
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(netdev);
 }
 
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 32675e1..9adcdbb 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 81
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.81"
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.82"
 
 #define NETXEN_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
 #define _major(v)	(((v) >> 24) & 0xff)
@@ -1883,9 +1883,8 @@
 
 int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
 int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
-extern void netxen_change_ringparam(struct netxen_adapter *adapter);
-extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
-				int *valp);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
 
 extern const struct ethtool_ops netxen_nic_ethtool_ops;
 
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 32c7906..0c64c82 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -958,6 +958,7 @@
 #define NETXEN_PEG_HALT_STATUS2 	(NETXEN_CAM_RAM(0xac))
 #define NX_CRB_DEV_REF_COUNT		(NETXEN_CAM_RAM(0x138))
 #define NX_CRB_DEV_STATE		(NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY			(NETXEN_CAM_RAM(0x178))
 
 /* MiniDIMM related macros */
 #define NETXEN_DIMM_CAPABILITY		(NETXEN_CAM_RAM(0x258))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 8375cbd..67efe75 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -648,7 +648,7 @@
 
 	mac_req = (nx_mac_req_t *)&req.words[0];
 	mac_req->op = op;
-	memcpy(mac_req->mac_addr, addr, 6);
+	memcpy(mac_req->mac_addr, addr, ETH_ALEN);
 
 	return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 }
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index cbd75f9..3bec8cf 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1415,6 +1415,32 @@
 	return 0;
 }
 
+#define NETXEN_ULA_ADAPTER_KEY		(0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY	(0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+	u32 temp;
+
+	/* Print ULA info only once for an adapter */
+	if (adapter->portnum != 0)
+		return;
+
+	temp = NXRD32(adapter, NETXEN_ULA_KEY);
+	switch (temp) {
+	case NETXEN_ULA_ADAPTER_KEY:
+		dev_info(&adapter->pdev->dev, "ULA adapter");
+		break;
+	case NETXEN_NON_ULA_ADAPTER_KEY:
+		dev_info(&adapter->pdev->dev, "non ULA adapter");
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
 #ifdef CONFIG_PCIEAER
 static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
 {
@@ -1561,6 +1587,8 @@
 		goto err_out_disable_msi;
 	}
 
+	netxen_read_ula_info(adapter);
+
 	err = netxen_setup_netdev(adapter, netdev);
 	if (err)
 		goto err_out_disable_msi;
@@ -1602,7 +1630,6 @@
 	pci_release_regions(pdev);
 
 err_out_disable_pdev:
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 	return err;
 }
@@ -1661,7 +1688,6 @@
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 	free_netdev(netdev);
 }
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 91a8fcd..0758b94 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3916,7 +3916,6 @@
 	pci_release_regions(pdev);
 err_out_disable_pdev:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 err_out:
 	return err;
 }
@@ -3939,7 +3938,6 @@
 
 	iounmap(qdev->mem_map_registers);
 	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(ndev);
 }
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81bf836..631ea0a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 50
-#define QLCNIC_LINUX_VERSIONID  "5.3.50"
+#define _QLCNIC_LINUX_SUBVERSION 52
+#define QLCNIC_LINUX_VERSIONID  "5.3.52"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,8 +98,22 @@
 #define TX_STOP_THRESH		((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
 							+ MGMT_CMD_DESC_RESV)
 #define QLCNIC_MAX_TX_TIMEOUTS	2
-#define QLCNIC_MAX_TX_RINGS	8
-#define QLCNIC_MAX_SDS_RINGS	8
+
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING		1
+#define QLCNIC_DEF_SDS_RINGS		4
+#define QLCNIC_DEF_TX_RINGS		4
+#define QLCNIC_MAX_VNIC_TX_RINGS	4
+#define QLCNIC_MAX_VNIC_SDS_RINGS	4
+
+enum qlcnic_queue_type {
+	QLCNIC_TX_QUEUE = 1,
+	QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE	0xFF
+#define QLCNIC_DEFAULT_MODE	0x0
 
 /*
  * Following are the states of the Phantom. Phantom will set them and
@@ -533,6 +547,14 @@
 	char name[IFNAMSIZ + 12];
 } ____cacheline_internodealigned_in_smp;
 
+struct qlcnic_tx_queue_stats {
+	u64 xmit_on;
+	u64 xmit_off;
+	u64 xmit_called;
+	u64 xmit_finished;
+	u64 tx_bytes;
+};
+
 struct qlcnic_host_tx_ring {
 	int irq;
 	void __iomem *crb_intr_mask;
@@ -544,10 +566,7 @@
 	u32 sw_consumer;
 	u32 num_desc;
 
-	u64 xmit_on;
-	u64 xmit_off;
-	u64 xmit_called;
-	u64 xmit_finished;
+	struct qlcnic_tx_queue_stats tx_stats;
 
 	void __iomem *crb_cmd_producer;
 	struct cmd_desc_type0 *desc_head;
@@ -940,8 +959,6 @@
 #define QLCNIC_BEACON_EANBLE		0xC
 #define QLCNIC_BEACON_DISABLE		0xD
 
-#define QLCNIC_DEF_NUM_STS_DESC_RINGS	4
-#define QLCNIC_DEF_NUM_TX_RINGS		4
 #define QLCNIC_MSIX_TBL_SPACE		8192
 #define QLCNIC_PCI_REG_MSIX_TBL 	0x44
 #define QLCNIC_MSIX_TBL_PGSIZE		4096
@@ -961,8 +978,7 @@
 #define __QLCNIC_SRIOV_CAPABLE		11
 #define __QLCNIC_MBX_POLL_ENABLE	12
 #define __QLCNIC_DIAG_MODE		13
-#define __QLCNIC_DCB_STATE		14
-#define __QLCNIC_DCB_IN_AEN		15
+#define __QLCNIC_MAINTENANCE_MODE	16
 
 #define QLCNIC_INTERRUPT_TEST		1
 #define QLCNIC_LOOPBACK_TEST		2
@@ -1013,7 +1029,6 @@
 	unsigned long state;
 	u32 flags;
 
-	int max_drv_tx_rings;
 	u16 num_txd;
 	u16 num_rxd;
 	u16 num_jumbo_rxd;
@@ -1021,7 +1036,13 @@
 	u16 max_jumbo_rxd;
 
 	u8 max_rds_rings;
-	u8 max_sds_rings;
+
+	u8 max_sds_rings; /* max sds rings supported by adapter */
+	u8 max_tx_rings;  /* max tx rings supported by adapter */
+
+	u8 drv_tx_rings;  /* max tx rings supported by driver */
+	u8 drv_sds_rings; /* max sds rings supported by driver */
+
 	u8 rx_csum;
 	u8 portnum;
 
@@ -1199,6 +1220,7 @@
 	u8	promisc_mode;
 	u8	offload_flags;
 	u8      pci_func;
+	u8      mac[ETH_ALEN];
 };
 
 struct qlcnic_eswitch {
@@ -1543,12 +1565,13 @@
 
 /* Functions from qlcnic_main.c */
 int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
-int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
-int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
 void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
 void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
 int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1641,19 +1664,18 @@
 static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
 					     struct net_device *netdev)
 {
-	int err, tx_q;
+	int err;
 
-	tx_q = adapter->max_drv_tx_rings;
+	netdev->num_tx_queues = adapter->drv_tx_rings;
+	netdev->real_num_tx_queues = adapter->drv_tx_rings;
 
-	netdev->num_tx_queues = tx_q;
-	netdev->real_num_tx_queues = tx_q;
-
-	err = netif_set_real_num_tx_queues(netdev, tx_q);
+	err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
 	if (err)
 		dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
-			tx_q);
+			adapter->drv_tx_rings);
 	else
-		dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+		dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
+			 adapter->drv_tx_rings);
 
 	return err;
 }
@@ -1695,7 +1717,7 @@
 	int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
 	void (*get_ocm_win) (struct qlcnic_hardware_context *);
 	int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
-	int (*setup_intr) (struct qlcnic_adapter *, u8, int);
+	int (*setup_intr) (struct qlcnic_adapter *);
 	int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
 			      struct qlcnic_adapter *, u32);
 	int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1766,10 +1788,9 @@
 	return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
 }
 
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
-				    u8 num_intr, int txq)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
 {
-	return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
+	return adapter->ahw->hw_ops->setup_intr(adapter);
 }
 
 static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -2005,7 +2026,7 @@
 static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
 {
 	test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
-	adapter->max_drv_tx_rings = 1;
+	adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
 }
 
 /* When operating in a muti tx mode, driver needs to write 0x1
@@ -2115,98 +2136,4 @@
 
 	return status;
 }
-
-static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->get_hw_capability)
-		return dcb->ops->get_hw_capability(adapter);
-
-	return 0;
-}
-
-static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->free)
-		dcb->ops->free(adapter);
-}
-
-static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->attach)
-		return dcb->ops->attach(adapter);
-
-	return 0;
-}
-
-static inline int
-qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->query_hw_capability)
-		return dcb->ops->query_hw_capability(adapter, buf);
-
-	return 0;
-}
-
-static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->get_info)
-		dcb->ops->get_info(adapter);
-}
-
-static inline int
-qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->query_cee_param)
-		return dcb->ops->query_cee_param(adapter, buf, type);
-
-	return 0;
-}
-
-static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->get_cee_cfg)
-		return dcb->ops->get_cee_cfg(adapter);
-
-	return 0;
-}
-
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->register_aen)
-		dcb->ops->register_aen(adapter, flag);
-}
-
-static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
-					 void *msg)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->handle_aen)
-		dcb->ops->handle_aen(adapter, msg);
-}
-
-static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
-{
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (dcb && dcb->ops->init_dcbnl_ops)
-		dcb->ops->init_dcbnl_ops(adapter);
-}
 #endif				/* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index ace217c..b1cb0ff 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -13,7 +13,6 @@
 #include <linux/interrupt.h>
 #include <linux/aer.h>
 
-#define QLCNIC_MAX_TX_QUEUES		1
 #define RSS_HASHTYPE_IP_TCP		0x3
 #define QLC_83XX_FW_MBX_CMD		0
 
@@ -268,20 +267,18 @@
 	}
 }
 
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
 {
 	int err, i, num_msix;
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 
-	if (!num_intr)
-		num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
-	num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
-					      num_intr));
+	num_msix = adapter->drv_sds_rings;
+
 	/* account for AEN interrupt MSI-X based interrupts */
 	num_msix += 1;
 
 	if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
-		num_msix += adapter->max_drv_tx_rings;
+		num_msix += adapter->drv_tx_rings;
 
 	err = qlcnic_enable_msix(adapter, num_msix);
 	if (err == -ENOMEM)
@@ -325,7 +322,8 @@
 
 inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
 {
-	writel(1, adapter->tgt_mask_reg);
+	if (adapter->tgt_mask_reg)
+		writel(1, adapter->tgt_mask_reg);
 }
 
 /* Enable MSI-x and INT-x interrupts */
@@ -498,8 +496,11 @@
 		num_msix = 0;
 
 	msleep(20);
-	synchronize_irq(adapter->msix_entries[num_msix].vector);
-	free_irq(adapter->msix_entries[num_msix].vector, adapter);
+
+	if (adapter->msix_entries) {
+		synchronize_irq(adapter->msix_entries[num_msix].vector);
+		free_irq(adapter->msix_entries[num_msix].vector, adapter);
+	}
 }
 
 int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
@@ -760,6 +761,9 @@
 	int cmd_type, err, opcode;
 	unsigned long timeout;
 
+	if (!mbx)
+		return -EIO;
+
 	opcode = LSW(cmd->req.arg[0]);
 	cmd_type = cmd->type;
 	err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
@@ -902,7 +906,7 @@
 			 QLCNIC_MBX_RSP(event[0]));
 		break;
 	case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
-		qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+		qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
 		break;
 	default:
 		dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
@@ -979,14 +983,14 @@
 
 	sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
 	context_id = recv_ctx->context_id;
-	num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS);
+	num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
 	ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
 				    QLCNIC_CMD_ADD_RCV_RINGS);
 	cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
 
 	/* set up status rings, mbx 2-81 */
 	index = 2;
-	for (i = 8; i < adapter->max_sds_rings; i++) {
+	for (i = 8; i < adapter->drv_sds_rings; i++) {
 		memset(&sds_mbx, 0, sds_mbx_size);
 		sds = &recv_ctx->sds_rings[i];
 		sds->consumer = 0;
@@ -1021,7 +1025,7 @@
 	mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
 	index = 0;
 	/* status descriptor ring */
-	for (i = 8; i < adapter->max_sds_rings; i++) {
+	for (i = 8; i < adapter->drv_sds_rings; i++) {
 		sds = &recv_ctx->sds_rings[i];
 		sds->crb_sts_consumer = ahw->pci_base0 +
 					mbx_out->host_csmr[index];
@@ -1079,10 +1083,10 @@
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	num_rds = adapter->max_rds_rings;
 
-	if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS)
-		num_sds = adapter->max_sds_rings;
+	if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+		num_sds = adapter->drv_sds_rings;
 	else
-		num_sds = QLCNIC_MAX_RING_SETS;
+		num_sds = QLCNIC_MAX_SDS_RINGS;
 
 	sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
 	rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
@@ -1183,7 +1187,7 @@
 		sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
 	}
 
-	if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS)
+	if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
 		err = qlcnic_83xx_add_rings(adapter);
 out:
 	qlcnic_free_mbx_args(&cmd);
@@ -1239,9 +1243,9 @@
 	mbx.size = tx->num_desc;
 	if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 		if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
-			msix_vector = adapter->max_sds_rings + ring;
+			msix_vector = adapter->drv_sds_rings + ring;
 		else
-			msix_vector = adapter->max_sds_rings - 1;
+			msix_vector = adapter->drv_sds_rings - 1;
 		msix_id = ahw->intr_tbl[msix_vector].id;
 	} else {
 		msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
@@ -1264,7 +1268,8 @@
 		qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
 
 	cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
-	cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
+	cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
 	buf = &cmd.req.arg[6];
 	memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
 	/* send the mailbox command*/
@@ -1279,7 +1284,7 @@
 	tx->ctx_id = mbx_out->ctx_id;
 	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
-		intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
+		intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
 		tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
 	}
 	dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
@@ -1290,7 +1295,7 @@
 }
 
 static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
-				      int num_sds_ring)
+				      u8 num_sds_ring)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_host_sds_ring *sds_ring;
@@ -1306,7 +1311,7 @@
 
 	qlcnic_detach(adapter);
 
-	adapter->max_sds_rings = 1;
+	adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
 	adapter->ahw->diag_test = test;
 	adapter->ahw->linkup = 0;
 
@@ -1320,7 +1325,7 @@
 	if (ret) {
 		qlcnic_detach(adapter);
 		if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
-			adapter->max_sds_rings = num_sds_ring;
+			adapter->drv_sds_rings = num_sds_ring;
 			qlcnic_attach(adapter);
 		}
 		netif_device_attach(netdev);
@@ -1333,7 +1338,7 @@
 	}
 
 	if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
-		for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
 			qlcnic_83xx_enable_intr(adapter, sds_ring);
 		}
@@ -1354,7 +1359,7 @@
 }
 
 static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
-					int max_sds_rings)
+				      u8 drv_sds_rings)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_host_sds_ring *sds_ring;
@@ -1362,7 +1367,7 @@
 
 	clear_bit(__QLCNIC_DEV_UP, &adapter->state);
 	if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
-		for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
 			qlcnic_83xx_disable_intr(adapter, sds_ring);
 			if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -1386,7 +1391,7 @@
 		}
 	}
 	adapter->ahw->diag_test = 0;
-	adapter->max_sds_rings = max_sds_rings;
+	adapter->drv_sds_rings = drv_sds_rings;
 
 	if (qlcnic_attach(adapter))
 		goto out;
@@ -1648,7 +1653,9 @@
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
-	int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+	u8 drv_sds_rings = adapter->drv_sds_rings;
+	u8 drv_tx_rings = adapter->drv_tx_rings;
+	int ret = 0, loop = 0;
 
 	if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
 		netdev_warn(netdev,
@@ -1670,7 +1677,7 @@
 		    mode == QLCNIC_ILB_MODE ? "internal" : "external");
 
 	ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
-					 max_sds_rings);
+					 drv_sds_rings);
 	if (ret)
 		goto fail_diag_alloc;
 
@@ -1708,10 +1715,11 @@
 	qlcnic_83xx_clear_lb_mode(adapter, mode);
 
 free_diag_res:
-	qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+	qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
 
 fail_diag_alloc:
-	adapter->max_sds_rings = max_sds_rings;
+	adapter->drv_sds_rings = drv_sds_rings;
+	adapter->drv_tx_rings = drv_tx_rings;
 	qlcnic_release_diag_lock(adapter);
 	return ret;
 }
@@ -1722,7 +1730,7 @@
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	int temp;
 
-	netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
+	netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n",
 		    ahw->extend_lb_time);
 	temp = ahw->extend_lb_time * 1000;
 	*max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
@@ -2321,19 +2329,7 @@
 			i++;
 			memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
 			i = i + 3;
-			if (ahw->op_mode == QLCNIC_MGMT_FUNC)
-				dev_info(dev, "id = %d active = %d type = %d\n"
-					 "\tport = %d min bw = %d max bw = %d\n"
-					 "\tmac_addr =  %pM\n", pci_info->id,
-					 pci_info->active, pci_info->type,
-					 pci_info->default_port,
-					 pci_info->tx_min_bw,
-					 pci_info->tx_max_bw, pci_info->mac);
 		}
-		if (ahw->op_mode == QLCNIC_MGMT_FUNC)
-			dev_info(dev, "Max functions = %d, active functions = %d\n",
-				 ahw->max_pci_func, ahw->act_pci_func);
-
 	} else {
 		dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
 		err = -EIO;
@@ -3061,11 +3057,14 @@
 	int status = 0;
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 
-	/* Get port configuration info */
-	status = qlcnic_83xx_get_port_info(adapter);
-	/* Get Link Status related info */
-	config = qlcnic_83xx_test_link(adapter);
-	ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+	if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+		/* Get port configuration info */
+		status = qlcnic_83xx_get_port_info(adapter);
+		/* Get Link Status related info */
+		config = qlcnic_83xx_test_link(adapter);
+		ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+	}
+
 	/* hard code until there is a way to get it from flash */
 	ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
 
@@ -3279,12 +3278,12 @@
 	return 0;
 }
 
-int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
 {
 	return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
-		sizeof(adapter->ahw->ext_reg_tbl)) +
-		(ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
-		sizeof(adapter->ahw->reg_tbl));
+		sizeof(*adapter->ahw->ext_reg_tbl)) +
+		(ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+		sizeof(*adapter->ahw->reg_tbl));
 }
 
 int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
@@ -3305,10 +3304,11 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlcnic_cmd_args cmd;
+	u8 val, drv_sds_rings = adapter->drv_sds_rings;
+	u8 drv_tx_rings = adapter->drv_tx_rings;
 	u32 data;
 	u16 intrpt_id, id;
-	u8 val;
-	int ret, max_sds_rings = adapter->max_sds_rings;
+	int ret;
 
 	if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
 		netdev_info(netdev, "Device is resetting\n");
@@ -3321,7 +3321,7 @@
 	}
 
 	ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
-					 max_sds_rings);
+					 drv_sds_rings);
 	if (ret)
 		goto fail_diag_irq;
 
@@ -3358,10 +3358,11 @@
 
 done:
 	qlcnic_free_mbx_args(&cmd);
-	qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+	qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
 
 fail_diag_irq:
-	adapter->max_sds_rings = max_sds_rings;
+	adapter->drv_sds_rings = drv_sds_rings;
+	adapter->drv_tx_rings = drv_tx_rings;
 	qlcnic_release_diag_lock(adapter);
 	return ret;
 }
@@ -3381,10 +3382,21 @@
 	}
 	config = ahw->port_config;
 	if (config & QLC_83XX_CFG_STD_PAUSE) {
-		if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+		switch (MSW(config)) {
+		case QLC_83XX_TX_PAUSE:
 			pause->tx_pause = 1;
-		if (config & QLC_83XX_CFG_STD_RX_PAUSE)
+			break;
+		case QLC_83XX_RX_PAUSE:
 			pause->rx_pause = 1;
+			break;
+		case QLC_83XX_TX_RX_PAUSE:
+		default:
+			/* Backward compatibility for existing
+			 * flash definitions
+			 */
+			pause->tx_pause = 1;
+			pause->rx_pause = 1;
+		}
 	}
 
 	if (QLC_83XX_AUTONEG(config))
@@ -3427,7 +3439,8 @@
 		ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
 		ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
 	} else if (!pause->rx_pause && !pause->tx_pause) {
-		ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+		ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+				      QLC_83XX_CFG_STD_PAUSE);
 	}
 	status = qlcnic_83xx_set_port_config(adapter);
 	if (status) {
@@ -3503,7 +3516,7 @@
 	if (err)
 		return err;
 
-	if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) {
+	if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
 		if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
 			qlcnic_83xx_set_vnic_opmode(adapter);
 		} else {
@@ -3524,12 +3537,15 @@
 
 void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
 {
-	INIT_COMPLETION(mbx->completion);
+	reinit_completion(&mbx->completion);
 	set_bit(QLC_83XX_MBX_READY, &mbx->status);
 }
 
 void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
 {
+	if (!mbx)
+		return;
+
 	destroy_workqueue(mbx->work_q);
 	kfree(mbx);
 }
@@ -3650,6 +3666,9 @@
 {
 	struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 
+	if (!mbx)
+		return;
+
 	clear_bit(QLC_83XX_MBX_READY, &mbx->status);
 	complete(&mbx->completion);
 	cancel_work_sync(&mbx->work);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 533e150..4cae6ca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -61,7 +61,6 @@
 #define QLC_83XX_HOST_SDS_MBX_IDX		8
 
 #define QLCNIC_HOST_RDS_MBX_IDX			88
-#define QLCNIC_MAX_RING_SETS			8
 
 /* Pause control registers */
 #define QLC_83XX_SRE_SHIM_REG		0x0D200284
@@ -183,8 +182,8 @@
 	u8	num_pci_func;
 	u8	state;
 #endif
-	u32	host_csmr[QLCNIC_MAX_RING_SETS];
-	struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+	u32	host_csmr[QLCNIC_MAX_SDS_RINGS];
+	struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
 } __packed;
 
 struct qlcnic_add_rings_mbx_out {
@@ -197,8 +196,8 @@
 	u8	sts_num;
 	u8	rcv_num;
 #endif
-	u32  host_csmr[QLCNIC_MAX_RING_SETS];
-	struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+	u32  host_csmr[QLCNIC_MAX_SDS_RINGS];
+	struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
 } __packed;
 
 /* Transmit context mailbox inbox registers
@@ -363,6 +362,9 @@
 #define QLC_83XX_LINK_EEE(data)		((data) & BIT_13)
 #define QLC_83XX_DCBX(data)			(((data) >> 28) & 7)
 #define QLC_83XX_AUTONEG(data)			((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE			0x10
+#define QLC_83XX_RX_PAUSE			0x20
+#define QLC_83XX_TX_RX_PAUSE			0x30
 #define QLC_83XX_CFG_STD_PAUSE			(1 << 5)
 #define QLC_83XX_CFG_STD_TX_PAUSE		(1 << 20)
 #define QLC_83XX_CFG_STD_RX_PAUSE		(2 << 20)
@@ -412,8 +414,6 @@
 #define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val)	(val & 0x4000)
 #define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val)	(val & 0x20000)
 #define QLC_83XX_ESWITCH_CAPABILITY			BIT_23
-#define QLC_83XX_VIRTUAL_NIC_MODE			0xFF
-#define QLC_83XX_DEFAULT_MODE				0x0
 #define QLC_83XX_SRIOV_MODE				0x1
 #define QLCNIC_BRDTYPE_83XX_10G			0x0083
 
@@ -521,7 +521,7 @@
 /* 83xx funcitons */
 int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
 int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
 void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
 int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
 void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -626,7 +626,7 @@
 int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
 				    struct qlcnic_info *, u8);
 int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f09e787..89208e5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -636,7 +636,7 @@
 	if (adapter->portnum == 0)
 		qlcnic_set_drv_version(adapter);
 
-	qlcnic_dcb_get_info(adapter);
+	qlcnic_dcb_get_info(adapter->dcb);
 	qlcnic_83xx_idc_attach_driver(adapter);
 
 	return 0;
@@ -818,6 +818,7 @@
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlcnic_mailbox *mbx = ahw->mailbox;
 	int ret = 0;
+	u32 owner;
 	u32 val;
 
 	/* Perform NIC configuration based ready state entry actions */
@@ -846,6 +847,10 @@
 			clear_bit(QLC_83XX_MBX_READY, &mbx->status);
 			set_bit(__QLCNIC_RESETTING, &adapter->state);
 			qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+		}  else {
+			owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+			if (ahw->pci_func == owner)
+				qlcnic_dump_fw(adapter);
 		}
 		return -EIO;
 	}
@@ -897,7 +902,7 @@
 		qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
 		set_bit(__QLCNIC_RESETTING, &adapter->state);
 		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
-		if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+		if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
 			qlcnic_83xx_disable_vnic_mode(adapter, 1);
 
 		if (qlcnic_check_diag_status(adapter)) {
@@ -1058,6 +1063,12 @@
 	adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
 	qlcnic_83xx_periodic_tasks(adapter);
 
+	/* Do not reschedule if firmaware is in hanged state and auto
+	 * recovery is disabled
+	 */
+	if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
+		return;
+
 	/* Re-schedule the function */
 	if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
 		qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -2022,6 +2033,8 @@
 	ahw->max_mac_filters = nic_info.max_mac_filters;
 	ahw->max_mtu = nic_info.max_mtu;
 
+	adapter->max_tx_rings = ahw->max_tx_ques;
+	adapter->max_sds_rings = ahw->max_rx_ques;
 	/* eSwitch capability indicates vNIC mode.
 	 * vNIC and SRIOV are mutually exclusive operational modes.
 	 * If SR-IOV capability is detected, SR-IOV physical function
@@ -2034,7 +2047,7 @@
 		return QLC_83XX_DEFAULT_OPMODE;
 
 	if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
-		return QLC_83XX_VIRTUAL_NIC_MODE;
+		return QLCNIC_VNIC_MODE;
 
 	return QLC_83XX_DEFAULT_OPMODE;
 }
@@ -2048,15 +2061,20 @@
 	if (ret == -EIO)
 		return -EIO;
 
-	if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
-		ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
+	if (ret == QLCNIC_VNIC_MODE) {
+		ahw->nic_mode = QLCNIC_VNIC_MODE;
+
 		if (qlcnic_83xx_config_vnic_opmode(adapter))
 			return -EIO;
 
+		adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+		adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
 	} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
-		ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+		ahw->nic_mode = QLCNIC_DEFAULT_MODE;
 		adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
 		ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+		adapter->max_sds_rings = ahw->max_rx_ques;
+		adapter->max_tx_rings = ahw->max_tx_ques;
 	} else {
 		return -EIO;
 	}
@@ -2159,13 +2177,34 @@
 	return err;
 }
 
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+	u8 rx_cnt = QLCNIC_DEF_SDS_RINGS;
+	u8 tx_cnt = QLCNIC_DEF_TX_RINGS;
+
+	adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+	adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+	if (!adapter->ahw->msix_supported) {
+		rx_cnt = QLCNIC_SINGLE_RING;
+		tx_cnt = QLCNIC_SINGLE_RING;
+	}
+
+	/* compute and set drv sds rings */
+	qlcnic_set_tx_ring_count(adapter, tx_cnt);
+	qlcnic_set_sds_ring_count(adapter, rx_cnt);
+}
 
 int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	struct qlcnic_dcb *dcb;
 	int err = 0;
 
 	ahw->msix_supported = !!qlcnic_use_msi_x;
+
+	qlcnic_83xx_init_rings(adapter);
+
 	err = qlcnic_83xx_init_mailbox_work(adapter);
 	if (err)
 		goto exit;
@@ -2178,22 +2217,26 @@
 			return err;
 	}
 
+	if (qlcnic_83xx_read_flash_descriptor_table(adapter) ||
+	    qlcnic_83xx_read_flash_mfg_id(adapter)) {
+		dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n");
+		err = -ENOTRECOVERABLE;
+		goto detach_mbx;
+	}
+
 	err = qlcnic_83xx_check_hw_status(adapter);
 	if (err)
 		goto detach_mbx;
 
-	if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
-		qlcnic_83xx_read_flash_mfg_id(adapter);
-
 	err = qlcnic_83xx_get_fw_info(adapter);
 	if (err)
 		goto detach_mbx;
 
 	err = qlcnic_83xx_idc_init(adapter);
 	if (err)
-		goto clear_fw_info;
+		goto detach_mbx;
 
-	err = qlcnic_setup_intr(adapter, 0, 0);
+	err = qlcnic_setup_intr(adapter);
 	if (err) {
 		dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
 		goto disable_intr;
@@ -2215,13 +2258,16 @@
 	if (err)
 		goto disable_mbx_intr;
 
+
 	/* Perform operating mode specific initialization */
 	err = adapter->nic_ops->init_driver(adapter);
 	if (err)
 		goto disable_mbx_intr;
 
-	if (adapter->dcb && qlcnic_dcb_attach(adapter))
-		qlcnic_clear_dcb_ops(adapter);
+	dcb = adapter->dcb;
+
+	if (dcb && qlcnic_dcb_attach(dcb))
+		qlcnic_clear_dcb_ops(dcb);
 
 	/* Periodically monitor device status */
 	qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
@@ -2233,12 +2279,10 @@
 disable_intr:
 	qlcnic_teardown_intr(adapter);
 
-clear_fw_info:
-	kfree(ahw->fw_info);
-
 detach_mbx:
 	qlcnic_83xx_detach_mailbox_work(adapter);
 	qlcnic_83xx_free_mailbox(ahw->mailbox);
+	ahw->mailbox = NULL;
 exit:
 	return err;
 }
@@ -2251,7 +2295,7 @@
 	clear_bit(QLC_83XX_MBX_READY, &idc->status);
 	cancel_delayed_work_sync(&adapter->fw_work);
 
-	if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+	if (ahw->nic_mode == QLCNIC_VNIC_MODE)
 		qlcnic_83xx_disable_vnic_mode(adapter, 1);
 
 	qlcnic_83xx_idc_detach_driver(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 0248a4c..734d286 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -94,13 +94,29 @@
  **/
 static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
 {
-	int err = -EIO;
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	struct device *dev = &adapter->pdev->dev;
+	struct qlcnic_npar_info *npar;
+	int i, err = -EIO;
 
 	qlcnic_83xx_get_minidump_template(adapter);
+
 	if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
 		if (qlcnic_init_pci_info(adapter))
 			return err;
 
+		npar = adapter->npars;
+
+		for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+			dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+				 npar->pci_func, npar->active, npar->type,
+				 npar->phy_port, npar->min_bw, npar->max_bw,
+				 npar->mac);
+		}
+
+		dev_info(dev, "Max functions = %d, active functions = %d\n",
+			 ahw->max_pci_func, ahw->act_pci_func);
+
 		if (qlcnic_83xx_set_vnic_opmode(adapter))
 			return err;
 
@@ -115,12 +131,12 @@
 		return err;
 
 	qlcnic_83xx_config_vnic_buff_descriptors(adapter);
-	adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+	ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
 	adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
 	qlcnic_83xx_enable_vnic_mode(adapter, 1);
 
-	dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
-		 adapter->ahw->fw_hal_version);
+	dev_info(dev, "HAL Version: %d, Management function\n",
+		 ahw->fw_hal_version);
 
 	return 0;
 }
@@ -240,8 +256,8 @@
 	return 0;
 }
 
-static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
-					     int func, int *port_id)
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+					int func, int *port_id)
 {
 	struct qlcnic_info nic_info;
 	int err = 0;
@@ -257,23 +273,8 @@
 	else
 		err = -EIO;
 
-	return err;
-}
-
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
-{
-	int id, err = 0;
-
-	err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
-	if (err)
-		return err;
-
-	if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
-		if (!qlcnic_enable_eswitch(adapter, id, 1))
-			adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
-		else
-			err = -EIO;
-	}
+	if (!err)
+		adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
 
 	return err;
 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 86850dd..859cb16 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -270,7 +270,7 @@
 	int err;
 
 	nrds_rings = adapter->max_rds_rings;
-	nsds_rings = adapter->max_sds_rings;
+	nsds_rings = adapter->drv_sds_rings;
 
 	rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
 				   nsds_rings);
@@ -475,7 +475,7 @@
 
 	if (qlcnic_check_multi_tx(adapter) &&
 	    !adapter->ahw->diag_test) {
-		temp_nsds_rings = adapter->max_sds_rings;
+		temp_nsds_rings = adapter->drv_sds_rings;
 		index = temp_nsds_rings + ring;
 		msix_id = ahw->intr_tbl[index].id;
 		prq->msi_index = cpu_to_le16(msix_id);
@@ -512,7 +512,7 @@
 		if (qlcnic_check_multi_tx(adapter) &&
 		    !adapter->ahw->diag_test &&
 		    (adapter->flags & QLCNIC_MSIX_ENABLED)) {
-			index = adapter->max_sds_rings + ring;
+			index = adapter->drv_sds_rings + ring;
 			intr_mask = ahw->intr_tbl[index].src;
 			tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
 		}
@@ -582,7 +582,7 @@
 
 	recv_ctx = adapter->recv_ctx;
 
-	for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 		tx_ring = &adapter->tx_ring[ring];
 		ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
 						   &tx_ring->hw_cons_phys_addr,
@@ -616,7 +616,7 @@
 
 	}
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 
 		addr = dma_alloc_coherent(&adapter->pdev->dev,
@@ -664,7 +664,7 @@
 	if (err)
 		goto err_out;
 
-	for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
+	for (ring = 0; ring < dev->drv_tx_rings; ring++) {
 		err = qlcnic_fw_cmd_create_tx_ctx(dev,
 						  &dev->tx_ring[ring],
 						  ring);
@@ -703,7 +703,7 @@
 
 	if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
 		qlcnic_fw_cmd_del_rx_ctx(adapter);
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++)
 			qlcnic_fw_cmd_del_tx_ctx(adapter,
 						 &adapter->tx_ring[ring]);
 
@@ -733,7 +733,7 @@
 
 	recv_ctx = adapter->recv_ctx;
 
-	for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 		tx_ring = &adapter->tx_ring[ring];
 		if (tx_ring->hw_consumer != NULL) {
 			dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
@@ -764,7 +764,7 @@
 		}
 	}
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 
 		if (sds_ring->desc_head != NULL) {
@@ -895,6 +895,8 @@
 		npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
 		npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
 		npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+		adapter->max_tx_rings = npar_info->max_tx_ques;
+		adapter->max_sds_rings = npar_info->max_rx_ques;
 	}
 
 	qlcnic_free_mbx_args(&cmd);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index d62d5ce..86bca7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -57,22 +57,22 @@
 static void qlcnic_dcb_aen_work(struct work_struct *);
 static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
 
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
-static void __qlcnic_dcb_free(struct qlcnic_adapter *);
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
 
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
 
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
 
 struct qlcnic_dcb_capability {
 	bool	tsa_capability;
@@ -180,7 +180,7 @@
 	.query_cee_param	= qlcnic_83xx_dcb_query_cee_param,
 	.get_cee_cfg		= qlcnic_83xx_dcb_get_cee_cfg,
 	.register_aen		= qlcnic_83xx_dcb_register_aen,
-	.handle_aen		= qlcnic_83xx_dcb_handle_aen,
+	.aen_handler		= qlcnic_83xx_dcb_aen_handler,
 };
 
 static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
@@ -193,7 +193,7 @@
 	.get_hw_capability	= qlcnic_82xx_dcb_get_hw_capability,
 	.query_cee_param	= qlcnic_82xx_dcb_query_cee_param,
 	.get_cee_cfg		= qlcnic_82xx_dcb_get_cee_cfg,
-	.handle_aen		= qlcnic_82xx_dcb_handle_aen,
+	.aen_handler		= qlcnic_82xx_dcb_aen_handler,
 };
 
 static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
@@ -242,10 +242,10 @@
 	return j;
 }
 
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
 {
-	if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
-		adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+	if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+		dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
 }
 
 static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
@@ -256,7 +256,7 @@
 		adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
 }
 
-int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_dcb *dcb;
 
@@ -267,20 +267,22 @@
 	adapter->dcb = dcb;
 	dcb->adapter = adapter;
 	qlcnic_set_dcb_ops(adapter);
+	dcb->state = 0;
 
 	return 0;
 }
 
-static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
 {
-	struct qlcnic_dcb *dcb = adapter->dcb;
+	struct qlcnic_adapter *adapter;
 
 	if (!dcb)
 		return;
 
-	qlcnic_dcb_register_aen(adapter, 0);
+	adapter = dcb->adapter;
+	qlcnic_dcb_register_aen(dcb, 0);
 
-	while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+	while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
 		usleep_range(10000, 11000);
 
 	cancel_delayed_work_sync(&dcb->aen_work);
@@ -298,23 +300,22 @@
 	adapter->dcb = NULL;
 }
 
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
 {
-	qlcnic_dcb_get_hw_capability(adapter);
-	qlcnic_dcb_get_cee_cfg(adapter);
-	qlcnic_dcb_register_aen(adapter, 1);
+	qlcnic_dcb_get_hw_capability(dcb);
+	qlcnic_dcb_get_cee_cfg(dcb);
+	qlcnic_dcb_register_aen(dcb, 1);
 }
 
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
 {
-	struct qlcnic_dcb *dcb = adapter->dcb;
 	int err = 0;
 
 	INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
 
 	dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
 	if (!dcb->wq) {
-		dev_err(&adapter->pdev->dev,
+		dev_err(&dcb->adapter->pdev->dev,
 			"DCB workqueue allocation failed. DCB will be disabled\n");
 		return -1;
 	}
@@ -331,7 +332,7 @@
 		goto out_free_cfg;
 	}
 
-	qlcnic_dcb_get_info(adapter);
+	qlcnic_dcb_get_info(dcb);
 
 	return 0;
 out_free_cfg:
@@ -345,9 +346,9 @@
 	return err;
 }
 
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
-					    char *buf)
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
 {
+	struct qlcnic_adapter *adapter = dcb->adapter;
 	struct qlcnic_cmd_args cmd;
 	u32 mbx_out;
 	int err;
@@ -371,15 +372,15 @@
 	return err;
 }
 
-static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
 {
-	struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+	struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
 	u32 mbx_out;
 	int err;
 
 	memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
 
-	err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+	err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
 	if (err)
 		return err;
 
@@ -397,21 +398,21 @@
 	if (cap->max_num_tc > QLC_DCB_MAX_TC ||
 	    cap->max_ets_tc > cap->max_num_tc ||
 	    cap->max_pfc_tc > cap->max_num_tc) {
-		dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+		dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
 		return -EINVAL;
 	}
 
 	return err;
 }
 
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
 {
-	struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+	struct qlcnic_dcb_cfg *cfg = dcb->cfg;
 	struct qlcnic_dcb_capability *cap;
 	u32 mbx_out;
 	int err;
 
-	err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+	err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
 	if (err)
 		return err;
 
@@ -419,15 +420,16 @@
 	cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
 
 	if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
-		set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+		set_bit(QLCNIC_DCB_STATE, &dcb->state);
 
 	return err;
 }
 
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
 					   char *buf, u8 type)
 {
 	u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+	struct qlcnic_adapter *adapter = dcb->adapter;
 	struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
 	struct device *dev = &adapter->pdev->dev;
 	dma_addr_t cardrsp_phys_addr;
@@ -447,8 +449,7 @@
 		return -EINVAL;
 	}
 
-	addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
-				  GFP_KERNEL);
+	addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
 	if (addr == NULL)
 		return -ENOMEM;
 
@@ -488,72 +489,67 @@
 	qlcnic_free_mbx_args(&cmd);
 
 out_free_rsp:
-	dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+	dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
 
 	return err;
 }
 
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
 {
 	struct qlcnic_dcb_mbx_params *mbx;
 	int err;
 
-	mbx = adapter->dcb->param;
+	mbx = dcb->param;
 	if (!mbx)
 		return 0;
 
-	err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+	err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
 					 QLC_DCB_LOCAL_PARAM_FWID);
 	if (err)
 		return err;
 
-	err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+	err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
 					 QLC_DCB_OPER_PARAM_FWID);
 	if (err)
 		return err;
 
-	err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+	err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
 					 QLC_DCB_PEER_PARAM_FWID);
 	if (err)
 		return err;
 
 	mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
 
-	qlcnic_dcb_data_cee_param_map(adapter);
+	qlcnic_dcb_data_cee_param_map(dcb->adapter);
 
 	return err;
 }
 
 static void qlcnic_dcb_aen_work(struct work_struct *work)
 {
-	struct qlcnic_adapter *adapter;
 	struct qlcnic_dcb *dcb;
 
 	dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
-	adapter = dcb->adapter;
 
-	qlcnic_dcb_get_cee_cfg(adapter);
-	clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+	qlcnic_dcb_get_cee_cfg(dcb);
+	clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
 }
 
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
-				       void *data)
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
 {
-	struct qlcnic_dcb *dcb = adapter->dcb;
-
-	if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+	if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
 		return;
 
 	queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
 }
 
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
 {
-	struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+	struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
 	u32 mbx_out;
 	int err;
 
-	err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+	err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
 	if (err)
 		return err;
 
@@ -565,14 +561,15 @@
 		cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
 
 	if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
-		set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+		set_bit(QLCNIC_DCB_STATE, &dcb->state);
 
 	return err;
 }
 
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
 					   char *buf, u8 idx)
 {
+	struct qlcnic_adapter *adapter = dcb->adapter;
 	struct qlcnic_dcb_mbx_params mbx_out;
 	int err, i, j, k, max_app, size;
 	struct qlcnic_dcb_param *each;
@@ -632,24 +629,23 @@
 	return err;
 }
 
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
 {
-	struct qlcnic_dcb *dcb = adapter->dcb;
 	int err;
 
-	err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+	err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
 	if (err)
 		return err;
 
-	qlcnic_dcb_data_cee_param_map(adapter);
+	qlcnic_dcb_data_cee_param_map(dcb->adapter);
 
 	return err;
 }
 
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
-					bool flag)
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
 {
 	u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+	struct qlcnic_adapter *adapter = dcb->adapter;
 	struct qlcnic_cmd_args cmd;
 	int err;
 
@@ -669,19 +665,17 @@
 	return err;
 }
 
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
-				       void *data)
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
 {
-	struct qlcnic_dcb *dcb = adapter->dcb;
 	u32 *val = data;
 
-	if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+	if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
 		return;
 
 	if (*val & BIT_8)
-		set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+		set_bit(QLCNIC_DCB_STATE, &dcb->state);
 	else
-		clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+		clear_bit(QLCNIC_DCB_STATE, &dcb->state);
 
 	queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
 }
@@ -814,12 +808,12 @@
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
-	return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+	return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
 }
 
 static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
 {
-	memcpy(addr, netdev->dev_addr, netdev->addr_len);
+	memcpy(addr, netdev->perm_addr, netdev->addr_len);
 }
 
 static void
@@ -834,7 +828,7 @@
 	type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
 	*prio = *pgid = *bw_per = *up_tc_map = 0;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
 	    !type->tc_param_valid)
 		return;
 
@@ -870,7 +864,7 @@
 	*bw_pct = 0;
 	type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
 	    !type->tc_param_valid)
 		return;
 
@@ -896,7 +890,7 @@
 	*setting = 0;
 	type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
 	    !type->pfc_mode_enable)
 		return;
 
@@ -915,7 +909,7 @@
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return 0;
 
 	switch (capid) {
@@ -944,7 +938,7 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return -EINVAL;
 
 	switch (attr) {
@@ -967,7 +961,7 @@
 				.protocol = id,
 			     };
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return 0;
 
 	return dcb_getapp(netdev, &app);
@@ -978,7 +972,7 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_dcb *dcb = adapter->dcb;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
 		return 0;
 
 	return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
@@ -989,7 +983,7 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return 0;
 
 	return cfg->capability.dcb_capability;
@@ -1000,7 +994,7 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_dcb_cee *type;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return 1;
 
 	type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
@@ -1055,7 +1049,7 @@
 
 	*app_count = 0;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return 0;
 
 	peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1076,7 +1070,7 @@
 	struct qlcnic_dcb_app *app;
 	int i, j;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return 0;
 
 	peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1101,7 +1095,7 @@
 	struct qlcnic_dcb_cee *peer;
 	u8 i, j, k, map;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return 0;
 
 	peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1136,7 +1130,7 @@
 
 	pfc->pfc_en = 0;
 
-	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 		return 0;
 
 	peer = &cfg->type[QLC_DCB_PEER_IDX];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index b87ce9f..c04ae0c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -8,26 +8,29 @@
 #ifndef __QLCNIC_DCBX_H
 #define __QLCNIC_DCBX_H
 
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+#define QLCNIC_DCB_STATE	0
+#define QLCNIC_DCB_AEN_MODE	1
 
 #ifdef CONFIG_QLCNIC_DCB
-int __qlcnic_register_dcb(struct qlcnic_adapter *);
+int qlcnic_register_dcb(struct qlcnic_adapter *);
 #else
-static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
 { return 0; }
 #endif
 
+struct qlcnic_dcb;
+
 struct qlcnic_dcb_ops {
-	void (*init_dcbnl_ops) (struct qlcnic_adapter *);
-	void (*free) (struct qlcnic_adapter *);
-	int (*attach) (struct qlcnic_adapter *);
-	int (*query_hw_capability) (struct qlcnic_adapter *, char *);
-	int (*get_hw_capability) (struct qlcnic_adapter *);
-	void (*get_info) (struct qlcnic_adapter *);
-	int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
-	int (*get_cee_cfg) (struct qlcnic_adapter *);
-	int (*register_aen) (struct qlcnic_adapter *, bool);
-	void (*handle_aen) (struct qlcnic_adapter *, void *);
+	int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+	int (*get_hw_capability) (struct qlcnic_dcb *);
+	int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+	void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+	int (*register_aen) (struct qlcnic_dcb *, bool);
+	void (*aen_handler) (struct qlcnic_dcb *, void *);
+	int (*get_cee_cfg) (struct qlcnic_dcb *);
+	void (*get_info) (struct qlcnic_dcb *);
+	int (*attach) (struct qlcnic_dcb *);
+	void (*free) (struct qlcnic_dcb *);
 };
 
 struct qlcnic_dcb {
@@ -37,5 +40,85 @@
 	struct workqueue_struct		*wq;
 	struct qlcnic_dcb_ops		*ops;
 	struct qlcnic_dcb_cfg		*cfg;
+	unsigned long			state;
 };
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+	kfree(dcb);
+	dcb = NULL;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+	if (dcb && dcb->ops->get_hw_capability)
+		return dcb->ops->get_hw_capability(dcb);
+
+	return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+	if (dcb && dcb->ops->free)
+		dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+	if (dcb && dcb->ops->attach)
+		return dcb->ops->attach(dcb);
+
+	return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+	if (dcb && dcb->ops->query_hw_capability)
+		return dcb->ops->query_hw_capability(dcb, buf);
+
+	return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+	if (dcb && dcb->ops->get_info)
+		dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+	if (dcb && dcb->ops->query_cee_param)
+		return dcb->ops->query_cee_param(dcb, buf, type);
+
+	return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+	if (dcb && dcb->ops->get_cee_cfg)
+		return dcb->ops->get_cee_cfg(dcb);
+
+	return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
+{
+	if (dcb && dcb->ops->register_aen)
+		dcb->ops->register_aen(dcb, flag);
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+	if (dcb && dcb->ops->aen_handler)
+		dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+	if (dcb && dcb->ops->init_dcbnl_ops)
+		dcb->ops->init_dcbnl_ops(dcb);
+}
 #endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index ff83a9f..b36c02f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -27,43 +27,36 @@
 };
 
 static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
-	{"xmit_called", QLC_SIZEOF(stats.xmitcalled),
-		QLC_OFF(stats.xmitcalled)},
-	{"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
-		QLC_OFF(stats.xmitfinished)},
-	{"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
-	{"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
-	{"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
-	{"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
-	{"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
-	{"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
-	{"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
-	{"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
-	{"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
 	{"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
 	{"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+	{"xmit_called", QLC_SIZEOF(stats.xmitcalled),
+	 QLC_OFF(stats.xmitcalled)},
+	{"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
+	 QLC_OFF(stats.xmitfinished)},
+	{"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+	 QLC_OFF(stats.tx_dma_map_error)},
+	{"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+	{"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+	{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+	 QLC_OFF(stats.rx_dma_map_error)},
+	{"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+	{"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+	{"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+	{"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+	{"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+	{"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+	{"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+	{"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
 	{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
 	 QLC_OFF(stats.skb_alloc_failure)},
-	{"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
-	{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
-					 QLC_OFF(stats.rx_dma_map_error)},
-	{"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
-					 QLC_OFF(stats.tx_dma_map_error)},
 	{"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
-				QLC_OFF(stats.mac_filter_limit_overrun)},
+	 QLC_OFF(stats.mac_filter_limit_overrun)},
 	{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
 	 QLC_OFF(stats.spurious_intr)},
 
 };
 
 static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
-	"rx unicast frames",
-	"rx multicast frames",
-	"rx broadcast frames",
-	"rx dropped frames",
-	"rx errors",
-	"rx local frames",
-	"rx numbytes",
 	"tx unicast frames",
 	"tx multicast frames",
 	"tx broadcast frames",
@@ -71,6 +64,13 @@
 	"tx errors",
 	"tx local frames",
 	"tx numbytes",
+	"rx unicast frames",
+	"rx multicast frames",
+	"rx broadcast frames",
+	"rx dropped frames",
+	"rx errors",
+	"rx local frames",
+	"rx numbytes",
 };
 
 static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
@@ -126,13 +126,16 @@
 
 #define QLCNIC_STATS_LEN	ARRAY_SIZE(qlcnic_gstrings_stats)
 
-static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
 	"xmit_on",
 	"xmit_off",
 	"xmit_called",
 	"xmit_finished",
+	"tx_bytes",
 };
 
+#define QLCNIC_TX_STATS_LEN	ARRAY_SIZE(qlcnic_tx_queue_stats_strings)
+
 static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
 	"ctx_rx_bytes",
 	"ctx_rx_pkts",
@@ -187,8 +190,8 @@
 		return -1;
 }
 
-#define QLCNIC_RING_REGS_COUNT	20
-#define QLCNIC_RING_REGS_LEN	(QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define	QLCNIC_TX_INTR_NOT_CONFIGURED	0X78563412
+
 #define QLCNIC_MAX_EEPROM_LEN   1024
 
 static const u32 diag_registers[] = {
@@ -219,7 +222,15 @@
 };
 
 #define QLCNIC_MGMT_API_VERSION	2
-#define QLCNIC_ETHTOOL_REGS_VER	3
+#define QLCNIC_ETHTOOL_REGS_VER	4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+	int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
+			    (adapter->max_rds_rings * 2) +
+			    (adapter->drv_sds_rings * 3) + 5;
+	return ring_regs_cnt * sizeof(u32);
+}
 
 static int qlcnic_get_regs_len(struct net_device *dev)
 {
@@ -231,7 +242,9 @@
 	else
 		len = sizeof(ext_diag_registers) + sizeof(diag_registers);
 
-	return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
+	len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+	len += qlcnic_get_ring_regs_len(adapter);
+	return len;
 }
 
 static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -493,6 +506,8 @@
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
 	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_host_rds_ring *rds_rings;
+	struct qlcnic_host_tx_ring *tx_ring;
 	u32 *regs_buff = p;
 	int ring, i = 0;
 
@@ -512,21 +527,35 @@
 	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
 		return;
 
-	regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
+	/* Marker btw regs and TX ring count */
+	regs_buff[i++] = 0xFFEFCDAB;
 
-	regs_buff[i++] = 1; /* No. of tx ring */
-	regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
-	regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
+	regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+		tx_ring = &adapter->tx_ring[ring];
+		regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+		regs_buff[i++] = tx_ring->sw_consumer;
+		regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+		regs_buff[i++] = tx_ring->producer;
+		if (tx_ring->crb_intr_mask)
+			regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+		else
+			regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+	}
 
-	regs_buff[i++] = 2; /* No. of rx ring */
-	regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
-	regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+	regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_rings = &recv_ctx->rds_rings[ring];
+		regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+		regs_buff[i++] = rds_rings->producer;
+	}
 
-	regs_buff[i++] = adapter->max_sds_rings;
-
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &(recv_ctx->sds_rings[ring]);
 		regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+		regs_buff[i++] = sds_ring->consumer;
+		regs_buff[i++] = readl(sds_ring->crb_intr_mask);
 	}
 }
 
@@ -635,46 +664,88 @@
 	return qlcnic_reset_context(adapter);
 }
 
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+				      u8 rx_ring, u8 tx_ring)
+{
+	if (rx_ring != 0) {
+		if (rx_ring > adapter->max_sds_rings) {
+			netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+				   rx_ring, adapter->max_sds_rings);
+			return -EINVAL;
+		}
+	}
+
+	 if (tx_ring != 0) {
+		if (qlcnic_82xx_check(adapter) &&
+		    (tx_ring > adapter->max_tx_rings)) {
+			netdev_err(adapter->netdev,
+				   "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+				   tx_ring, adapter->max_tx_rings);
+			return -EINVAL;
+		}
+
+		if (qlcnic_83xx_check(adapter) &&
+		    (tx_ring > QLCNIC_SINGLE_RING)) {
+			netdev_err(adapter->netdev,
+				   "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
+				   tx_ring, QLCNIC_SINGLE_RING);
+			 return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 static void qlcnic_get_channels(struct net_device *dev,
 		struct ethtool_channels *channel)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
-	int min;
 
-	min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
-	channel->max_rx = rounddown_pow_of_two(min);
-	channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
-
-	channel->rx_count = adapter->max_sds_rings;
-	channel->tx_count = adapter->max_drv_tx_rings;
+	channel->max_rx = adapter->max_sds_rings;
+	channel->max_tx = adapter->max_tx_rings;
+	channel->rx_count = adapter->drv_sds_rings;
+	channel->tx_count = adapter->drv_tx_rings;
 }
 
 static int qlcnic_set_channels(struct net_device *dev,
-		struct ethtool_channels *channel)
+			       struct ethtool_channels *channel)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
 	int err;
-	int txq = 0;
 
 	if (channel->other_count || channel->combined_count)
 		return -EINVAL;
 
+	err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+					 channel->tx_count);
+	if (err)
+		return err;
+
 	if (channel->rx_count) {
-		err = qlcnic_validate_max_rss(adapter, channel->rx_count);
-		if (err)
+		err = qlcnic_validate_rings(adapter, channel->rx_count,
+					    QLCNIC_RX_QUEUE);
+		if (err) {
+			netdev_err(dev, "Unable to configure %u SDS rings\n",
+				   channel->rx_count);
 			return err;
+		}
 	}
 
-	if (qlcnic_82xx_check(adapter) && channel->tx_count) {
-		err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
-		if (err)
+	if (channel->tx_count) {
+		err = qlcnic_validate_rings(adapter, channel->tx_count,
+					    QLCNIC_TX_QUEUE);
+		if (err) {
+			netdev_err(dev, "Unable to configure %u Tx rings\n",
+				   channel->tx_count);
 			return err;
-		txq = channel->tx_count;
+		}
 	}
 
-	err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
-	netdev_info(dev, "allocated 0x%x sds rings and  0x%x tx rings\n",
-		    adapter->max_sds_rings, adapter->max_drv_tx_rings);
+	err = qlcnic_setup_rings(adapter, channel->rx_count,
+				 channel->tx_count);
+	netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+		    adapter->drv_sds_rings, adapter->drv_tx_rings);
+
 	return err;
 }
 
@@ -876,7 +947,7 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlcnic_cmd_args cmd;
-	int ret, max_sds_rings = adapter->max_sds_rings;
+	int ret, drv_sds_rings = adapter->drv_sds_rings;
 
 	if (qlcnic_83xx_check(adapter))
 		return qlcnic_83xx_interrupt_test(netdev);
@@ -905,10 +976,10 @@
 	qlcnic_free_mbx_args(&cmd);
 
 free_diag_res:
-	qlcnic_diag_free_res(netdev, max_sds_rings);
+	qlcnic_diag_free_res(netdev, drv_sds_rings);
 
 clear_diag_irq:
-	adapter->max_sds_rings = max_sds_rings;
+	adapter->drv_sds_rings = drv_sds_rings;
 	clear_bit(__QLCNIC_RESETTING, &adapter->state);
 
 	return ret;
@@ -984,8 +1055,8 @@
 int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	int max_drv_tx_rings = adapter->max_drv_tx_rings;
-	int max_sds_rings = adapter->max_sds_rings;
+	int drv_tx_rings = adapter->drv_tx_rings;
+	int drv_sds_rings = adapter->drv_sds_rings;
 	struct qlcnic_host_sds_ring *sds_ring;
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	int loop = 0;
@@ -1040,11 +1111,11 @@
 	qlcnic_clear_lb_mode(adapter, mode);
 
  free_res:
-	qlcnic_diag_free_res(netdev, max_sds_rings);
+	qlcnic_diag_free_res(netdev, drv_sds_rings);
 
  clear_it:
-	adapter->max_sds_rings = max_sds_rings;
-	adapter->max_drv_tx_rings = max_drv_tx_rings;
+	adapter->drv_sds_rings = drv_sds_rings;
+	adapter->drv_tx_rings = drv_tx_rings;
 	clear_bit(__QLCNIC_RESETTING, &adapter->state);
 	return ret;
 }
@@ -1097,11 +1168,11 @@
 		       QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
 		break;
 	case ETH_SS_STATS:
-		num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
-		for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+		num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
+		for (i = 0; i < adapter->drv_tx_rings; i++) {
 			for (index = 0; index < num_stats; index++) {
-				sprintf(data, "tx_ring_%d %s", i,
-					qlcnic_tx_ring_stats_strings[index]);
+				sprintf(data, "tx_queue_%d %s", i,
+					qlcnic_tx_queue_stats_strings[index]);
 				data += ETH_GSTRING_LEN;
 			}
 		}
@@ -1199,6 +1270,36 @@
 	return data;
 }
 
+static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_host_tx_ring *tx_ring;
+	int ring;
+
+	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+		tx_ring = &adapter->tx_ring[ring];
+		adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on;
+		adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off;
+		adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called;
+		adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished;
+		adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes;
+	}
+}
+
+static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
+{
+	struct qlcnic_host_tx_ring *tx_ring;
+
+	tx_ring = (struct qlcnic_host_tx_ring *)stats;
+
+	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
+	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
+	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
+	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
+	*data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
+
+	return data;
+}
+
 static void qlcnic_get_ethtool_stats(struct net_device *dev,
 				     struct ethtool_stats *stats, u64 *data)
 {
@@ -1206,19 +1307,20 @@
 	struct qlcnic_host_tx_ring *tx_ring;
 	struct qlcnic_esw_statistics port_stats;
 	struct qlcnic_mac_statistics mac_stats;
-	int index, ret, length, size, ring;
+	int index, ret, length, size, tx_size, ring;
 	char *p;
 
-	memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
-	for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+	tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
+
+	memset(data, 0, tx_size * sizeof(u64));
+	for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
 		if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
 			tx_ring = &adapter->tx_ring[ring];
-			*data++ = tx_ring->xmit_on;
-			*data++ = tx_ring->xmit_off;
-			*data++ = tx_ring->xmit_called;
-			*data++ = tx_ring->xmit_finished;
+			data = qlcnic_fill_tx_queue_stats(data, tx_ring);
+			qlcnic_update_stats(adapter);
 		}
 	}
+
 	memset(data, 0, stats->n_stats * sizeof(u64));
 	length = QLCNIC_STATS_LEN;
 	for (index = 0; index < length; index++) {
@@ -1260,7 +1362,7 @@
 			  enum ethtool_phys_id_state state)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
-	int max_sds_rings = adapter->max_sds_rings;
+	int drv_sds_rings = adapter->drv_sds_rings;
 	int err = -EIO, active = 1;
 
 	if (qlcnic_83xx_check(adapter))
@@ -1318,7 +1420,7 @@
 	}
 
 	if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
-		qlcnic_diag_free_res(dev, max_sds_rings);
+		qlcnic_diag_free_res(dev, drv_sds_rings);
 
 	if (!active || err)
 		clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
@@ -1659,7 +1761,6 @@
 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
 	bool valid_mask = false;
 	int i, ret = 0;
-	u32 state;
 
 	switch (val->flag) {
 	case QLCNIC_FORCE_FW_DUMP_KEY:
@@ -1712,9 +1813,8 @@
 
 	case QLCNIC_SET_QUIESCENT:
 	case QLCNIC_RESET_QUIESCENT:
-		state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
-		if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
-			netdev_info(netdev, "Device in FAILED state\n");
+		if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+			netdev_info(netdev, "Device is in non-operational state\n");
 		break;
 
 	default:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index b64e2be..6f7f60c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -445,7 +445,7 @@
 
 	mac_req = (struct qlcnic_mac_req *)&req.words[0];
 	mac_req->op = op;
-	memcpy(mac_req->mac_addr, addr, 6);
+	memcpy(mac_req->mac_addr, addr, ETH_ALEN);
 
 	vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
 	vlan_req->vlan_id = cpu_to_le16(vlan_id);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 272c356..13303e7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -146,6 +146,12 @@
 #define QLCNIC_MBX_PORT_RSP_OK	0x1a
 #define QLCNIC_MBX_ASYNC_EVENT	BIT_15
 
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS		8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS	4
+#define QLCNIC_MAX_TX_RINGS		8
+#define QLCNIC_MAX_SDS_RINGS		8
+
 struct qlcnic_pci_info;
 struct qlcnic_info;
 struct qlcnic_cmd_args;
@@ -176,7 +182,7 @@
 void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
 void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
 void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
 irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
 			  struct qlcnic_cmd_args *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 66c26cf..e9c21e5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -236,7 +236,7 @@
 		spin_lock_init(&rds_ring->lock);
 	}
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		sds_ring->irq = adapter->msix_entries[ring].vector;
 		sds_ring->adapter = adapter;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 11b4bb8..0149c94 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -581,10 +581,7 @@
 			goto drop_packet;
 	}
 
-	if (qlcnic_check_multi_tx(adapter))
-		tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
-	else
-		tx_ring = &adapter->tx_ring[0];
+	tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
 	num_txd = tx_ring->num_desc;
 
 	frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -607,8 +604,7 @@
 		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
 			netif_tx_start_queue(tx_ring->txq);
 		} else {
-			adapter->stats.xmit_off++;
-			tx_ring->xmit_off++;
+			tx_ring->tx_stats.xmit_off++;
 			return NETDEV_TX_BUSY;
 		}
 	}
@@ -669,9 +665,8 @@
 	if (adapter->drv_mac_learn)
 		qlcnic_send_filter(adapter, first_desc, skb);
 
-	adapter->stats.txbytes += skb->len;
-	adapter->stats.xmitcalled++;
-	tx_ring->xmit_called++;
+	tx_ring->tx_stats.tx_bytes += skb->len;
+	tx_ring->tx_stats.xmit_called++;
 
 	qlcnic_update_cmd_producer(tx_ring);
 
@@ -789,6 +784,9 @@
 	struct net_device *netdev = adapter->netdev;
 	struct qlcnic_skb_frag *frag;
 
+	if (!spin_trylock(&adapter->tx_clean_lock))
+		return 1;
+
 	sw_consumer = tx_ring->sw_consumer;
 	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
 
@@ -805,8 +803,7 @@
 					       PCI_DMA_TODEVICE);
 				frag->dma = 0ULL;
 			}
-			adapter->stats.xmitfinished++;
-			tx_ring->xmit_finished++;
+			tx_ring->tx_stats.xmit_finished++;
 			dev_kfree_skb_any(buffer->skb);
 			buffer->skb = NULL;
 		}
@@ -823,8 +820,7 @@
 		    netif_carrier_ok(netdev)) {
 			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
 				netif_tx_wake_queue(tx_ring->txq);
-				adapter->stats.xmit_on++;
-				tx_ring->xmit_on++;
+				tx_ring->tx_stats.xmit_on++;
 			}
 		}
 		adapter->tx_timeo_cnt = 0;
@@ -844,6 +840,7 @@
 	 */
 	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
 	done = (sw_consumer == hw_consumer);
+	spin_unlock(&adapter->tx_clean_lock);
 
 	return done;
 }
@@ -1011,7 +1008,7 @@
 		}
 		break;
 	case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
-		qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+		qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
 		break;
 	default:
 		break;
@@ -1463,18 +1460,18 @@
 	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 	struct qlcnic_host_tx_ring *tx_ring;
 
-	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
 		return -ENOMEM;
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		if (qlcnic_check_multi_tx(adapter) &&
 		    !adapter->ahw->diag_test &&
-		    (adapter->max_drv_tx_rings > 1)) {
+		    (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
 			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
 				       NAPI_POLL_WEIGHT);
 		} else {
-			if (ring == (adapter->max_sds_rings - 1))
+			if (ring == (adapter->drv_sds_rings - 1))
 				netif_napi_add(netdev, &sds_ring->napi,
 					       qlcnic_poll,
 					       NAPI_POLL_WEIGHT);
@@ -1491,7 +1488,7 @@
 	}
 
 	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
 				       NAPI_POLL_WEIGHT);
@@ -1508,7 +1505,7 @@
 	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 	struct qlcnic_host_tx_ring *tx_ring;
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		netif_napi_del(&sds_ring->napi);
 	}
@@ -1516,7 +1513,7 @@
 	qlcnic_free_sds_rings(adapter->recv_ctx);
 
 	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			netif_napi_del(&tx_ring->napi);
 		}
@@ -1535,7 +1532,7 @@
 	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
 		return;
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		napi_enable(&sds_ring->napi);
 		qlcnic_enable_int(sds_ring);
@@ -1544,8 +1541,8 @@
 	if (qlcnic_check_multi_tx(adapter) &&
 	    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
 	    !adapter->ahw->diag_test &&
-	    (adapter->max_drv_tx_rings > 1)) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+	    (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			napi_enable(&tx_ring->napi);
 			qlcnic_enable_tx_intr(adapter, tx_ring);
@@ -1563,7 +1560,7 @@
 	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
 		return;
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		qlcnic_disable_int(sds_ring);
 		napi_synchronize(&sds_ring->napi);
@@ -1573,7 +1570,7 @@
 	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 	    !adapter->ahw->diag_test &&
 	    qlcnic_check_multi_tx(adapter)) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			qlcnic_disable_tx_int(adapter, tx_ring);
 			napi_synchronize(&tx_ring->napi);
@@ -1911,7 +1908,7 @@
 	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
 		return;
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		napi_enable(&sds_ring->napi);
 		if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1920,7 +1917,7 @@
 
 	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			napi_enable(&tx_ring->napi);
 			qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
@@ -1938,7 +1935,7 @@
 	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
 		return;
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		if (adapter->flags & QLCNIC_MSIX_ENABLED)
 			qlcnic_83xx_disable_intr(adapter, sds_ring);
@@ -1948,7 +1945,7 @@
 
 	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
 			napi_synchronize(&tx_ring->napi);
@@ -1965,10 +1962,10 @@
 	struct qlcnic_host_tx_ring *tx_ring;
 	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 
-	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
 		return -ENOMEM;
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 			if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
@@ -1994,7 +1991,7 @@
 
 	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			netif_napi_add(netdev, &tx_ring->napi,
 				       qlcnic_83xx_msix_tx_poll,
@@ -2012,7 +2009,7 @@
 	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 	struct qlcnic_host_tx_ring *tx_ring;
 
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		netif_napi_del(&sds_ring->napi);
 	}
@@ -2021,7 +2018,7 @@
 
 	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			netif_napi_del(&tx_ring->napi);
 		}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d8f4897e..05c1eef 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -548,36 +548,75 @@
 	.io_resume			= qlcnic_82xx_io_resume,
 };
 
-static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
-	int num_tx_q;
 
-	if (ahw->msix_supported &&
+	if (qlcnic_82xx_check(adapter) &&
 	    (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
-		num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
-				 num_online_cpus());
-		if (num_tx_q > 1) {
-			test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
-					 &adapter->state);
-			adapter->max_drv_tx_rings = num_tx_q;
-		}
+		test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+		return 0;
 	} else {
-		adapter->max_drv_tx_rings = 1;
+		return 1;
 	}
 }
 
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+			    int queue_type)
+{
+	int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+	if (queue_type == QLCNIC_RX_QUEUE)
+		max_rings = adapter->max_sds_rings;
+	else if (queue_type == QLCNIC_TX_QUEUE)
+		max_rings = adapter->max_tx_rings;
+
+	num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+					      max_rings));
+
+	if (ring_cnt > num_rings)
+		return num_rings;
+	else
+		return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+	/* 83xx adapter does not have max_tx_rings intialized in probe */
+	if (adapter->max_tx_rings)
+		adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+							 QLCNIC_TX_QUEUE);
+	else
+		adapter->drv_tx_rings = tx_cnt;
+
+	dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
+		 adapter->drv_tx_rings);
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+	/* 83xx adapter does not have max_sds_rings intialized in probe */
+	if (adapter->max_sds_rings)
+		adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+							  QLCNIC_RX_QUEUE);
+	else
+		adapter->drv_sds_rings = rx_cnt;
+
+	dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
+		 adapter->drv_sds_rings);
+}
+
 int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
 {
 	struct pci_dev *pdev = adapter->pdev;
-	int max_tx_rings, max_sds_rings, tx_vector;
+	int drv_tx_rings, drv_sds_rings, tx_vector;
 	int err = -1, i;
 
 	if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
-		max_tx_rings = 0;
+		drv_tx_rings = 0;
 		tx_vector = 0;
 	} else {
-		max_tx_rings = adapter->max_drv_tx_rings;
+		drv_tx_rings = adapter->drv_tx_rings;
 		tx_vector = 1;
 	}
 
@@ -589,7 +628,7 @@
 			return -ENOMEM;
 	}
 
-	adapter->max_sds_rings = 1;
+	adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
 	adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
 
 	if (adapter->ahw->msix_supported) {
@@ -602,18 +641,18 @@
 			if (qlcnic_83xx_check(adapter)) {
 				adapter->ahw->num_msix = num_msix;
 				/* subtract mail box and tx ring vectors */
-				adapter->max_sds_rings = num_msix -
-							 max_tx_rings - 1;
+				adapter->drv_sds_rings = num_msix -
+							 drv_tx_rings - 1;
 			} else {
 				adapter->ahw->num_msix = num_msix;
 				if (qlcnic_check_multi_tx(adapter) &&
 				    !adapter->ahw->diag_test &&
-				    (adapter->max_drv_tx_rings > 1))
-					max_sds_rings = num_msix - max_tx_rings;
+				    (adapter->drv_tx_rings > 1))
+					drv_sds_rings = num_msix - drv_tx_rings;
 				else
-					max_sds_rings = num_msix;
+					drv_sds_rings = num_msix;
 
-				adapter->max_sds_rings = max_sds_rings;
+				adapter->drv_sds_rings = drv_sds_rings;
 			}
 			dev_info(&pdev->dev, "using msi-x interrupts\n");
 			return err;
@@ -624,13 +663,13 @@
 			if (qlcnic_83xx_check(adapter)) {
 				if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
 					return err;
-				err -= (max_tx_rings + 1);
+				err -= drv_tx_rings + 1;
 				num_msix = rounddown_pow_of_two(err);
-				num_msix += (max_tx_rings + 1);
+				num_msix += drv_tx_rings + 1;
 			} else {
 				num_msix = rounddown_pow_of_two(err);
 				if (qlcnic_check_multi_tx(adapter))
-					num_msix += max_tx_rings;
+					num_msix += drv_tx_rings;
 			}
 
 			if (num_msix) {
@@ -683,25 +722,14 @@
 	return err;
 }
 
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
 {
-	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	int num_msix, err = 0;
 
-	if (!num_intr)
-		num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+	num_msix = adapter->drv_sds_rings;
 
-	if (ahw->msix_supported) {
-		num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
-						num_intr));
-		if (qlcnic_check_multi_tx(adapter)) {
-			if (txq)
-				adapter->max_drv_tx_rings = txq;
-			num_msix += adapter->max_drv_tx_rings;
-		}
-	} else {
-		num_msix = 1;
-	}
+	if (qlcnic_check_multi_tx(adapter))
+		num_msix += adapter->drv_tx_rings;
 
 	err = qlcnic_enable_msix(adapter, num_msix);
 	if (err == -ENOMEM)
@@ -819,7 +847,7 @@
 int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_pci_info *pci_info;
-	int i, ret = 0, j = 0;
+	int i, id = 0, ret = 0, j = 0;
 	u16 act_pci_func;
 	u8 pfn;
 
@@ -860,7 +888,8 @@
 			continue;
 
 		if (qlcnic_port_eswitch_cfg_capability(adapter)) {
-			if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+			if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+								 &id))
 				adapter->npars[j].eswitch_status = true;
 			else
 				continue;
@@ -875,15 +904,16 @@
 		adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
 		adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
 
+		memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
 		j++;
 	}
 
-	if (qlcnic_82xx_check(adapter)) {
+	/* Update eSwitch status for adapters without per port eSwitch
+	 * configuration capability
+	 */
+	if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
 		for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
 			adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
-	} else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
-		for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
-			qlcnic_enable_eswitch(adapter, i, 1);
 	}
 
 	kfree(pci_info);
@@ -1138,14 +1168,18 @@
 	adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
 	adapter->ahw->max_mtu = nic_info.max_mtu;
 
-	/* Disable NPAR for 83XX */
-	if (qlcnic_83xx_check(adapter))
-		return err;
-
-	if (adapter->ahw->capabilities & BIT_6)
+	if (adapter->ahw->capabilities & BIT_6) {
 		adapter->flags |= QLCNIC_ESWITCH_ENABLED;
-	else
+		adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+		adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+		adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+		dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+	} else {
+		adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+		adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
 		adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+	}
 
 	return err;
 }
@@ -1293,6 +1327,8 @@
 				"HAL Version: %d, Privileged function\n",
 				 adapter->ahw->fw_hal_version);
 		}
+	} else {
+		adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
 	}
 
 	adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
@@ -1552,7 +1588,7 @@
 		if (qlcnic_82xx_check(adapter) ||
 		    (qlcnic_83xx_check(adapter) &&
 		     (adapter->flags & QLCNIC_MSIX_ENABLED))) {
-			num_sds_rings = adapter->max_sds_rings;
+			num_sds_rings = adapter->drv_sds_rings;
 			for (ring = 0; ring < num_sds_rings; ring++) {
 				sds_ring = &recv_ctx->sds_rings[ring];
 				if (qlcnic_82xx_check(adapter) &&
@@ -1586,7 +1622,7 @@
 		     (adapter->flags & QLCNIC_MSIX_ENABLED) &&
 		     !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
 			handler = qlcnic_msix_tx_intr;
-			for (ring = 0; ring < adapter->max_drv_tx_rings;
+			for (ring = 0; ring < adapter->drv_tx_rings;
 			     ring++) {
 				tx_ring = &adapter->tx_ring[ring];
 				snprintf(tx_ring->name, sizeof(tx_ring->name),
@@ -1614,7 +1650,7 @@
 		if (qlcnic_82xx_check(adapter) ||
 		    (qlcnic_83xx_check(adapter) &&
 		     (adapter->flags & QLCNIC_MSIX_ENABLED))) {
-			for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+			for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 				sds_ring = &recv_ctx->sds_rings[ring];
 				free_irq(sds_ring->irq, sds_ring);
 			}
@@ -1623,7 +1659,7 @@
 		     !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
 		    (qlcnic_82xx_check(adapter) &&
 		     qlcnic_check_multi_tx(adapter))) {
-			for (ring = 0; ring < adapter->max_drv_tx_rings;
+			for (ring = 0; ring < adapter->drv_tx_rings;
 			     ring++) {
 				tx_ring = &adapter->tx_ring[ring];
 				if (tx_ring->irq)
@@ -1677,7 +1713,7 @@
 
 	adapter->ahw->linkup = 0;
 
-	if (adapter->max_sds_rings > 1)
+	if (adapter->drv_sds_rings > 1)
 		qlcnic_config_rss(adapter, 1);
 
 	qlcnic_config_intr_coalesce(adapter);
@@ -1719,6 +1755,7 @@
 	if (qlcnic_sriov_vf_check(adapter))
 		qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
 	smp_mb();
+	spin_lock(&adapter->tx_clean_lock);
 	netif_carrier_off(netdev);
 	adapter->ahw->linkup = 0;
 	netif_tx_disable(netdev);
@@ -1737,8 +1774,9 @@
 
 	qlcnic_reset_rx_buffers_list(adapter);
 
-	for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+	for (ring = 0; ring < adapter->drv_tx_rings; ring++)
 		qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
+	spin_unlock(&adapter->tx_clean_lock);
 }
 
 /* Usage: During suspend and firmware recovery module */
@@ -1814,16 +1852,16 @@
 	adapter->is_up = 0;
 }
 
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_host_sds_ring *sds_ring;
-	int max_tx_rings = adapter->max_drv_tx_rings;
+	int drv_tx_rings = adapter->drv_tx_rings;
 	int ring;
 
 	clear_bit(__QLCNIC_DEV_UP, &adapter->state);
 	if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
-		for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
 			qlcnic_disable_int(sds_ring);
 		}
@@ -1834,8 +1872,8 @@
 	qlcnic_detach(adapter);
 
 	adapter->ahw->diag_test = 0;
-	adapter->max_sds_rings = max_sds_rings;
-	adapter->max_drv_tx_rings = max_tx_rings;
+	adapter->drv_sds_rings = drv_sds_rings;
+	adapter->drv_tx_rings = drv_tx_rings;
 
 	if (qlcnic_attach(adapter))
 		goto out;
@@ -1901,10 +1939,10 @@
 
 	qlcnic_detach(adapter);
 
-	adapter->max_sds_rings = 1;
+	adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+	adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
 	adapter->ahw->diag_test = test;
 	adapter->ahw->linkup = 0;
-	adapter->max_drv_tx_rings = 1;
 
 	ret = qlcnic_attach(adapter);
 	if (ret) {
@@ -1925,7 +1963,7 @@
 	}
 
 	if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
-		for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
 			qlcnic_enable_int(sds_ring);
 		}
@@ -2072,7 +2110,7 @@
 		return err;
 	}
 
-	qlcnic_dcb_init_dcbnl_ops(adapter);
+	qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
 
 	return 0;
 }
@@ -2098,7 +2136,7 @@
 	int ring;
 	struct qlcnic_host_tx_ring *tx_ring;
 
-	for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 		tx_ring = &adapter->tx_ring[ring];
 		if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
 			vfree(tx_ring->cmd_buf_arr);
@@ -2116,14 +2154,14 @@
 	struct qlcnic_host_tx_ring *tx_ring;
 	struct qlcnic_cmd_buffer *cmd_buf_arr;
 
-	tx_ring = kcalloc(adapter->max_drv_tx_rings,
+	tx_ring = kcalloc(adapter->drv_tx_rings,
 			  sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
 	if (tx_ring == NULL)
 		return -ENOMEM;
 
 	adapter->tx_ring = tx_ring;
 
-	for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 		tx_ring = &adapter->tx_ring[ring];
 		tx_ring->num_desc = adapter->num_txd;
 		tx_ring->txq = netdev_get_tx_queue(netdev, ring);
@@ -2138,11 +2176,11 @@
 
 	if (qlcnic_83xx_check(adapter) ||
 	    (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
-		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			tx_ring->adapter = adapter;
 			if (adapter->flags & QLCNIC_MSIX_ENABLED) {
-				index = adapter->max_sds_rings + ring;
+				index = adapter->drv_sds_rings + ring;
 				vector = adapter->msix_entries[index].vector;
 				tx_ring->irq = vector;
 			}
@@ -2166,17 +2204,6 @@
 		qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
 }
 
-static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
-{
-	return __qlcnic_register_dcb(adapter);
-}
-
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
-{
-	kfree(adapter->dcb);
-	adapter->dcb = NULL;
-}
-
 static int
 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -2185,6 +2212,7 @@
 	struct qlcnic_hardware_context *ahw;
 	int err, pci_using_dac = -1;
 	char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+	struct qlcnic_dcb *dcb;
 
 	if (pdev->is_virtfn)
 		return -ENODEV;
@@ -2271,6 +2299,7 @@
 	rwlock_init(&adapter->ahw->crb_lock);
 	mutex_init(&adapter->ahw->mem_lock);
 
+	spin_lock_init(&adapter->tx_clean_lock);
 	INIT_LIST_HEAD(&adapter->mac_list);
 
 	qlcnic_register_dcb(adapter);
@@ -2285,38 +2314,51 @@
 			goto err_out_maintenance_mode;
 		}
 
-		qlcnic_get_multiq_capability(adapter);
-
-		if ((adapter->ahw->act_pci_func > 2) &&
-		    qlcnic_check_multi_tx(adapter)) {
-			adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
-			dev_info(&adapter->pdev->dev,
-				 "vNIC mode enabled, Set max TX rings = %d\n",
-				 adapter->max_drv_tx_rings);
+		/* compute and set default and max tx/sds rings */
+		if (adapter->ahw->msix_supported) {
+			if (qlcnic_check_multi_tx_capability(adapter) == 1)
+				qlcnic_set_tx_ring_count(adapter,
+							 QLCNIC_SINGLE_RING);
+			else
+				qlcnic_set_tx_ring_count(adapter,
+							 QLCNIC_DEF_TX_RINGS);
+			qlcnic_set_sds_ring_count(adapter,
+						  QLCNIC_DEF_SDS_RINGS);
+		} else {
+			qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+			qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
 		}
 
-		if (!qlcnic_check_multi_tx(adapter)) {
-			clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
-			adapter->max_drv_tx_rings = 1;
-		}
 		err = qlcnic_setup_idc_param(adapter);
 		if (err)
 			goto err_out_free_hw;
 
 		adapter->flags |= QLCNIC_NEED_FLR;
 
-		if (adapter->dcb && qlcnic_dcb_attach(adapter))
-			qlcnic_clear_dcb_ops(adapter);
+		dcb = adapter->dcb;
 
+		if (dcb && qlcnic_dcb_attach(dcb))
+			qlcnic_clear_dcb_ops(dcb);
 	} else if (qlcnic_83xx_check(adapter)) {
-		adapter->max_drv_tx_rings = 1;
 		qlcnic_83xx_check_vf(adapter, ent);
 		adapter->portnum = adapter->ahw->pci_func;
 		err = qlcnic_83xx_init(adapter, pci_using_dac);
 		if (err) {
-			dev_err(&pdev->dev, "%s: failed\n", __func__);
-			goto err_out_free_hw;
+			switch (err) {
+			case -ENOTRECOVERABLE:
+				dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n");
+				dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n");
+				goto err_out_free_hw;
+			case -ENOMEM:
+				dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
+				goto err_out_free_hw;
+			default:
+				dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
+				dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+				goto err_out_maintenance_mode;
+			}
 		}
+
 		if (qlcnic_sriov_vf_check(adapter))
 			return 0;
 	} else {
@@ -2344,7 +2386,7 @@
 			 "Device does not support MSI interrupts\n");
 
 	if (qlcnic_82xx_check(adapter)) {
-		err = qlcnic_setup_intr(adapter, 0, 0);
+		err = qlcnic_setup_intr(adapter);
 		if (err) {
 			dev_err(&pdev->dev, "Failed to setup interrupt\n");
 			goto err_out_disable_msi;
@@ -2414,13 +2456,20 @@
 	pci_release_regions(pdev);
 
 err_out_disable_pdev:
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 	return err;
 
 err_out_maintenance_mode:
+	set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
 	netdev->netdev_ops = &qlcnic_netdev_failed_ops;
 	SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+	ahw->port_type = QLCNIC_XGBE;
+
+	if (qlcnic_83xx_check(adapter))
+		adapter->tgt_status_reg = NULL;
+	else
+		ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS;
+
 	err = register_netdev(netdev);
 
 	if (err) {
@@ -2451,7 +2500,7 @@
 	qlcnic_cancel_idc_work(adapter);
 	ahw = adapter->ahw;
 
-	qlcnic_dcb_free(adapter);
+	qlcnic_dcb_free(adapter->dcb);
 
 	unregister_netdev(netdev);
 	qlcnic_sriov_cleanup(adapter);
@@ -2490,7 +2539,6 @@
 	pci_disable_pcie_error_reporting(pdev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 	if (adapter->qlcnic_wq) {
 		destroy_workqueue(adapter->qlcnic_wq);
@@ -2543,12 +2591,11 @@
 static int qlcnic_open(struct net_device *netdev)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	u32 state;
 	int err;
 
-	state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
-	if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
-		netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
+	if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+		netdev_err(netdev, "%s: Device is in non-operational state\n",
+			   __func__);
 
 		return -EIO;
 	}
@@ -2710,24 +2757,21 @@
 						      QLCNIC_FORCE_FW_DUMP_KEY);
 	} else {
 		netdev_info(netdev, "Tx timeout, reset adapter context.\n");
-		if (qlcnic_82xx_check(adapter)) {
-			for (ring = 0; ring < adapter->max_drv_tx_rings;
-			     ring++) {
-				tx_ring = &adapter->tx_ring[ring];
-				dev_info(&netdev->dev, "ring=%d\n", ring);
-				dev_info(&netdev->dev, "crb_intr_mask=%d\n",
-					 readl(tx_ring->crb_intr_mask));
-				dev_info(&netdev->dev, "producer=%d\n",
-					 readl(tx_ring->crb_cmd_producer));
-				dev_info(&netdev->dev, "sw_consumer = %d\n",
-					 tx_ring->sw_consumer);
-				dev_info(&netdev->dev, "hw_consumer = %d\n",
-					 le32_to_cpu(*(tx_ring->hw_consumer)));
-				dev_info(&netdev->dev, "xmit-on=%llu\n",
-					 tx_ring->xmit_on);
-				dev_info(&netdev->dev, "xmit-off=%llu\n",
-					 tx_ring->xmit_off);
-			}
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+			tx_ring = &adapter->tx_ring[ring];
+			netdev_info(netdev, "Tx ring=%d\n", ring);
+			netdev_info(netdev,
+				    "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n",
+				    readl(tx_ring->crb_intr_mask),
+				    readl(tx_ring->crb_cmd_producer),
+				    tx_ring->sw_consumer,
+				    le32_to_cpu(*(tx_ring->hw_consumer)));
+			netdev_info(netdev,
+				    "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+				    tx_ring->tx_stats.xmit_finished,
+				    tx_ring->tx_stats.xmit_called,
+				    tx_ring->tx_stats.xmit_on,
+				    tx_ring->tx_stats.xmit_off);
 		}
 		adapter->ahw->reset_context = 1;
 	}
@@ -2841,7 +2885,7 @@
 	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 
 	disable_irq(adapter->irq);
-	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		qlcnic_intr(adapter->irq, sds_ring);
 	}
@@ -3261,8 +3305,9 @@
 		return;
 
 	state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
-	if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
-		netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
+
+	if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+		netdev_err(adapter->netdev, "%s: Device is in non-operational state\n",
 			   __func__);
 		qlcnic_api_unlock(adapter);
 
@@ -3329,7 +3374,7 @@
 		return;
 	}
 attach:
-	qlcnic_dcb_get_info(adapter);
+	qlcnic_dcb_get_info(adapter->dcb);
 
 	if (netif_running(netdev)) {
 		if (qlcnic_up(adapter, netdev))
@@ -3354,6 +3399,8 @@
 static int
 qlcnic_check_health(struct qlcnic_adapter *adapter)
 {
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
 	u32 state = 0, heartbeat;
 	u32 peg_status;
 	int err = 0;
@@ -3378,7 +3425,7 @@
 		if (adapter->need_fw_reset)
 			goto detach;
 
-		if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
+		if (ahw->reset_context && qlcnic_auto_fw_reset)
 			qlcnic_reset_hw_context(adapter);
 
 		return 0;
@@ -3421,6 +3468,9 @@
 
 		qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
 		QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+	} else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+		   adapter->flags & QLCNIC_FW_RESET_OWNER) {
+		qlcnic_dump_fw(adapter);
 	}
 
 	return 1;
@@ -3502,7 +3552,7 @@
 	qlcnic_clr_drv_state(adapter);
 	kfree(adapter->msix_entries);
 	adapter->msix_entries = NULL;
-	err = qlcnic_setup_intr(adapter, 0, 0);
+	err = qlcnic_setup_intr(adapter);
 
 	if (err) {
 		kfree(adapter->msix_entries);
@@ -3647,130 +3697,90 @@
 	return err;
 }
 
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+			  int queue_type)
 {
 	struct net_device *netdev = adapter->netdev;
-	u8 max_hw = QLCNIC_MAX_TX_RINGS;
-	u32 max_allowed;
+	u8 max_hw_rings = 0;
+	char buf[8];
+	int cur_rings;
+
+	if (queue_type == QLCNIC_RX_QUEUE) {
+		max_hw_rings = adapter->max_sds_rings;
+		cur_rings = adapter->drv_sds_rings;
+		strcpy(buf, "SDS");
+	} else if (queue_type == QLCNIC_TX_QUEUE) {
+		max_hw_rings = adapter->max_tx_rings;
+		cur_rings = adapter->drv_tx_rings;
+		strcpy(buf, "Tx");
+	}
 
 	if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
-		netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
+		netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
 		return -EINVAL;
 	}
 
-	if (!qlcnic_check_multi_tx(adapter)) {
-		netdev_err(netdev, "No Multi TX-Q support\n");
+	if (adapter->flags & QLCNIC_MSI_ENABLED) {
+		netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
 		return -EINVAL;
 	}
 
-	if (txq > QLCNIC_MAX_TX_RINGS) {
-		netdev_err(netdev, "Invalid ring count\n");
+	if (ring_cnt < 2) {
+		netdev_err(netdev,
+			   "%s rings value should not be lower than 2\n", buf);
 		return -EINVAL;
 	}
 
-	max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
-						 num_online_cpus()));
-	if ((txq > max_allowed) || !is_power_of_2(txq)) {
-		if (!is_power_of_2(txq))
-			netdev_err(netdev,
-				   "TX queue should be a power of 2\n");
-		if (txq > num_online_cpus())
-			netdev_err(netdev,
-				   "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
-				   num_online_cpus());
-		netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+	if (!is_power_of_2(ring_cnt)) {
+		netdev_err(netdev, "%s rings value should be a power of 2\n",
+			   buf);
+		return -EINVAL;
+	}
+
+	if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+	    !qlcnic_check_multi_tx(adapter)) {
+			netdev_err(netdev, "No Multi Tx queue support\n");
+			return -EINVAL;
+	}
+
+	if (ring_cnt > num_online_cpus()) {
+		netdev_err(netdev,
+			   "%s value[%u] should not be higher than, number of online CPUs\n",
+			   buf, num_online_cpus());
 		return -EINVAL;
 	}
 
 	return 0;
 }
 
-int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
-				__u32 val)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
 {
 	struct net_device *netdev = adapter->netdev;
-	u8 max_hw = adapter->ahw->max_rx_ques;
-	u32 max_allowed;
-
-	if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
-		netdev_err(netdev, "No RSS support in INT-x mode\n");
-		return -EINVAL;
-	}
-
-	if (val > QLCNIC_MAX_SDS_RINGS) {
-		netdev_err(netdev, "RSS value should not be higher than %u\n",
-			   QLCNIC_MAX_SDS_RINGS);
-		return -EINVAL;
-	}
-
-	max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
-						 num_online_cpus()));
-	if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
-		if (!is_power_of_2(val))
-			netdev_err(netdev, "RSS value should be a power of 2\n");
-
-		if (val < 2)
-			netdev_err(netdev, "RSS value should not be lower than 2\n");
-
-		if (val > max_hw)
-			netdev_err(netdev,
-				   "RSS value should not be higher than[%u], the max RSS rings supported by the adapter\n",
-				   max_hw);
-
-		if (val > num_online_cpus())
-			netdev_err(netdev,
-				   "RSS value should not be higher than[%u], number of online CPUs in the system\n",
-				   num_online_cpus());
-
-		netdev_err(netdev, "Unable to configure %u RSS rings\n", val);
-
-		return -EINVAL;
-	}
-	return 0;
-}
-
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
-{
 	int err;
-	struct net_device *netdev = adapter->netdev;
-	int num_msix;
 
 	if (test_bit(__QLCNIC_RESETTING, &adapter->state))
 		return -EBUSY;
 
-	if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
-	    !qlcnic_use_msi) {
-		netdev_err(netdev, "No RSS support in INT-x mode\n");
-		return -EINVAL;
-	}
-
 	netif_device_detach(netdev);
 	if (netif_running(netdev))
 		__qlcnic_down(adapter, netdev);
 
 	qlcnic_detach(adapter);
 
-	if (qlcnic_82xx_check(adapter)) {
-		if (txq != 0)
-			adapter->max_drv_tx_rings = txq;
-
-		if (qlcnic_check_multi_tx(adapter) &&
-		    (txq > adapter->max_drv_tx_rings))
-			num_msix = adapter->max_drv_tx_rings;
-		else
-			num_msix = data;
-	}
-
 	if (qlcnic_83xx_check(adapter)) {
 		qlcnic_83xx_free_mbx_intr(adapter);
 		qlcnic_83xx_enable_mbx_poll(adapter);
 	}
 
-	netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
-
 	qlcnic_teardown_intr(adapter);
 
-	err = qlcnic_setup_intr(adapter, data, txq);
+	/* compute and set default and max tx/sds rings */
+	qlcnic_set_tx_ring_count(adapter, tx_cnt);
+	qlcnic_set_sds_ring_count(adapter, rx_cnt);
+
+	netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
+	err = qlcnic_setup_intr(adapter);
 	if (err) {
 		kfree(adapter->msix_entries);
 		netdev_err(netdev, "failed to setup interrupt\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 1551360..7763962 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1187,41 +1187,38 @@
 		}
 
 		if (ops_index == ops_cnt) {
-			dev_info(&adapter->pdev->dev,
-				 "Invalid entry type %d, exiting dump\n",
+			dev_info(dev, "Skipping unknown entry opcode %d\n",
 				 entry->hdr.type);
-			goto error;
+			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+			entry_offset += entry->hdr.offset;
+			continue;
 		}
 
 		/* Collect dump for this entry */
 		dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
-		if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
+		if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
 			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+			entry_offset += entry->hdr.offset;
+			continue;
+		}
+
 		buf_offset += entry->hdr.cap_size;
 		entry_offset += entry->hdr.offset;
 		buffer = fw_dump->data + buf_offset;
 	}
-	if (dump_size != buf_offset) {
-		dev_info(&adapter->pdev->dev,
-			 "Captured(%d) and expected size(%d) do not match\n",
-			 buf_offset, dump_size);
-		goto error;
-	} else {
-		fw_dump->clr = 1;
-		snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
-			 adapter->netdev->name);
-		dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
-			 adapter->netdev->name, fw_dump->size);
-		/* Send a udev event to notify availability of FW dump */
-		kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
-		return 0;
-	}
-error:
+
+	fw_dump->clr = 1;
+	snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+	dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
+		 adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+	/* Send a udev event to notify availability of FW dump */
+	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
 	if (fw_dump->use_pex_dma)
 		dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
 				  fw_dump->dma_buffer, fw_dump->phys_addr);
-	vfree(fw_dump->data);
-	return -EINVAL;
+
+	return 0;
 }
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 392b9bd..21a4b27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -500,6 +500,7 @@
 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
 				 int pci_using_dac)
 {
+	struct qlcnic_dcb *dcb;
 	int err;
 
 	INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -507,7 +508,11 @@
 		dev_warn(&adapter->pdev->dev,
 			 "Device does not support MSI interrupts\n");
 
-	err = qlcnic_setup_intr(adapter, 1, 0);
+	/* compute and set default and max tx/sds rings */
+	qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+	qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+	err = qlcnic_setup_intr(adapter);
 	if (err) {
 		dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
 		goto err_out_disable_msi;
@@ -533,8 +538,10 @@
 	if (err)
 		goto err_out_send_channel_term;
 
-	if (adapter->dcb && qlcnic_dcb_attach(adapter))
-		qlcnic_clear_dcb_ops(adapter);
+	dcb = adapter->dcb;
+
+	if (dcb && qlcnic_dcb_attach(dcb))
+		qlcnic_clear_dcb_ops(dcb);
 
 	err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
 	if (err)
@@ -1577,7 +1584,7 @@
 	if (err)
 		goto err_out_term_channel;
 
-	qlcnic_dcb_get_info(adapter);
+	qlcnic_dcb_get_info(adapter->dcb);
 
 	return 0;
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 019f437..1a9f8a4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -156,7 +156,7 @@
 				    const char *buf, size_t len)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
-	int err, max_sds_rings = adapter->max_sds_rings;
+	int err, drv_sds_rings = adapter->drv_sds_rings;
 	u16 beacon;
 	u8 h_beacon_state, b_state, b_rate;
 
@@ -211,7 +211,7 @@
 	}
 
 	if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
-		qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+		qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
 
 out:
 	if (!ahw->beacon_state)
@@ -1272,7 +1272,6 @@
 void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
 {
 	struct device *dev = &adapter->pdev->dev;
-	u32 state;
 
 	if (device_create_bin_file(dev, &bin_attr_port_stats))
 		dev_info(dev, "failed to create port stats sysfs entry");
@@ -1286,8 +1285,7 @@
 	if (device_create_bin_file(dev, &bin_attr_mem))
 		dev_info(dev, "failed to create mem sysfs entry\n");
 
-	state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
-	if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+	if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
 		return;
 
 	if (device_create_bin_file(dev, &bin_attr_pci_config))
@@ -1313,7 +1311,6 @@
 void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
 {
 	struct device *dev = &adapter->pdev->dev;
-	u32 state;
 
 	device_remove_bin_file(dev, &bin_attr_port_stats);
 
@@ -1323,8 +1320,7 @@
 	device_remove_bin_file(dev, &bin_attr_crb);
 	device_remove_bin_file(dev, &bin_attr_mem);
 
-	state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
-	if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+	if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
 		return;
 
 	device_remove_bin_file(dev, &bin_attr_pci_config);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8994337..0c9c4e8 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
  */
 #define DRV_NAME  	"qlge"
 #define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION	"v1.00.00.32"
+#define DRV_VERSION	"1.00.00.33"
 
 #define WQ_ADDR_ALIGN	0x3	/* 4 byte alignment */
 
@@ -2206,14 +2206,14 @@
 extern const char qlge_driver_version[];
 extern const struct ethtool_ops qlge_ethtool_ops;
 
-extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
-			       u32 *value);
-extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
-			u16 q_id);
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+			u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+		 u16 q_id);
 void ql_queue_fw_error(struct ql_adapter *qdev);
 void ql_mpi_work(struct work_struct *work);
 void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@
 int ql_pause_mpi_risc(struct ql_adapter *qdev);
 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
 int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
-		u32 ram_addr, int word_count);
-int ql_core_dump(struct ql_adapter *qdev,
-		struct ql_mpi_coredump *mpi_coredump);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+			  int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
 int ql_mb_about_fw(struct ql_adapter *qdev);
 int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
 int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,7 @@
 int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
 void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev,
-			struct ql_reg_dump *mpi_coredump);
+void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
 void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
 int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2262,9 @@
 /* #define QL_OB_DUMP */
 
 #ifdef QL_REG_DUMP
-extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-extern void ql_dump_routing_entries(struct ql_adapter *qdev);
-extern void ql_dump_regs(struct ql_adapter *qdev);
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
 #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
 #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
 #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2275,26 @@
 #endif
 
 #ifdef QL_STAT_DUMP
-extern void ql_dump_stat(struct ql_adapter *qdev);
+void ql_dump_stat(struct ql_adapter *qdev);
 #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
 #else
 #define QL_DUMP_STAT(qdev)
 #endif
 
 #ifdef QL_DEV_DUMP
-extern void ql_dump_qdev(struct ql_adapter *qdev);
+void ql_dump_qdev(struct ql_adapter *qdev);
 #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
 #else
 #define QL_DUMP_QDEV(qdev)
 #endif
 
 #ifdef QL_CB_DUMP
-extern void ql_dump_wqicb(struct wqicb *wqicb);
-extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
-extern void ql_dump_ricb(struct ricb *ricb);
-extern void ql_dump_cqicb(struct cqicb *cqicb);
-extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
-extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
 #define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
 #define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
 #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2312,9 @@
 #endif
 
 #ifdef QL_OB_DUMP
-extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
 #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
 #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
 #else
@@ -2325,14 +2323,14 @@
 #endif
 
 #ifdef QL_IB_DUMP
-extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
 #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
 #else
 #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
 #endif
 
 #ifdef	QL_ALL_DUMP
-extern void ql_dump_all(struct ql_adapter *qdev);
+void ql_dump_all(struct ql_adapter *qdev);
 #define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
 #else
 #define QL_DUMP_ALL(qdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 2553cf45..a245dc1 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -96,8 +96,10 @@
 
 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
 
-static int ql_wol(struct ql_adapter *qdev);
-static void qlge_set_multicast_list(struct net_device *ndev);
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
 
 /* This hardware semaphore causes exclusive access to
  * resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@
 	}
 }
 
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+				  struct ib_mac_iocb_rsp *ib_mac_rsp,
+				  void *page, size_t *len)
+{
+	u16 *tags;
+
+	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+		return;
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+		tags = (u16 *)page;
+		/* Look for stacked vlan tags in ethertype field */
+		if (tags[6] == ETH_P_8021Q &&
+		    tags[8] == ETH_P_8021Q)
+			*len += 2 * VLAN_HLEN;
+		else
+			*len += VLAN_HLEN;
+	}
+}
+
 /* Process an inbound completion from an rx ring. */
 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
 					struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@
 	void *addr;
 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 	struct napi_struct *napi = &rx_ring->napi;
+	size_t hlen = ETH_HLEN;
 
 	skb = netdev_alloc_skb(ndev, length);
 	if (!skb) {
@@ -1540,25 +1566,28 @@
 		goto err_out;
 	}
 
+	/* Update the MAC header length*/
+	ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
 	/* The max framesize filter on this chip is set higher than
 	 * MTU since FCoE uses 2k frames.
 	 */
-	if (skb->len > ndev->mtu + ETH_HLEN) {
+	if (skb->len > ndev->mtu + hlen) {
 		netif_err(qdev, drv, qdev->ndev,
 			  "Segment too small, dropping.\n");
 		rx_ring->rx_dropped++;
 		goto err_out;
 	}
-	memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+	memcpy(skb_put(skb, hlen), addr, hlen);
 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
 		     length);
 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
-				lbq_desc->p.pg_chunk.offset+ETH_HLEN,
-				length-ETH_HLEN);
-	skb->len += length-ETH_HLEN;
-	skb->data_len += length-ETH_HLEN;
-	skb->truesize += length-ETH_HLEN;
+				lbq_desc->p.pg_chunk.offset + hlen,
+				length - hlen);
+	skb->len += length - hlen;
+	skb->data_len += length - hlen;
+	skb->truesize += length - hlen;
 
 	rx_ring->rx_packets++;
 	rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@
 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
 			/* Unfragmented ipv4 UDP frame. */
 			struct iphdr *iph =
-				(struct iphdr *) ((u8 *)addr + ETH_HLEN);
+				(struct iphdr *)((u8 *)addr + hlen);
 			if (!(iph->frag_off &
 				htons(IP_MF|IP_OFFSET))) {
 				skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@
 	struct bq_desc *sbq_desc;
 	struct sk_buff *skb = NULL;
 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
-       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+	u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+	size_t hlen = ETH_HLEN;
 
 	/*
 	 * Handle the header buffer if present.
@@ -1853,9 +1883,10 @@
 			skb->data_len += length;
 			skb->truesize += length;
 			length -= length;
-			__pskb_pull_tail(skb,
-				(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
-				VLAN_ETH_HLEN : ETH_HLEN);
+			ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+					      lbq_desc->p.pg_chunk.va,
+					      &hlen);
+			__pskb_pull_tail(skb, hlen);
 		}
 	} else {
 		/*
@@ -1910,8 +1941,9 @@
 			length -= size;
 			i++;
 		}
-		__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
-				VLAN_ETH_HLEN : ETH_HLEN);
+		ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+				      &hlen);
+		__pskb_pull_tail(skb, hlen);
 	}
 	return skb;
 }
@@ -2003,7 +2035,7 @@
 	rx_ring->rx_packets++;
 	rx_ring->rx_bytes += skb->len;
 	skb_record_rx_queue(skb, rx_ring->cq_id);
-	if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+	if (vlan_id != 0xffff)
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 		napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@
 					struct ib_mac_iocb_rsp *ib_mac_rsp)
 {
 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
-	u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+	u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+			(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
 			((le16_to_cpu(ib_mac_rsp->vlan_id) &
 			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
 
@@ -2310,9 +2343,39 @@
 	}
 }
 
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+					netdev_features_t features)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int status = 0;
+
+	status = ql_adapter_down(qdev);
+	if (status) {
+		netif_err(qdev, link, qdev->ndev,
+			  "Failed to bring down the adapter\n");
+		return status;
+	}
+
+	/* update the features with resent change */
+	ndev->features = features;
+
+	status = ql_adapter_up(qdev);
+	if (status) {
+		netif_err(qdev, link, qdev->ndev,
+			  "Failed to bring up the adapter\n");
+		return status;
+	}
+	return status;
+}
+
 static netdev_features_t qlge_fix_features(struct net_device *ndev,
 	netdev_features_t features)
 {
+	int err;
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
 	 * enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@
 	else
 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 
+	/* Update the behavior of vlan accel in the adapter */
+	err = qlge_update_hw_vlan_features(ndev, features);
+	if (err)
+		return err;
+
 	return features;
 }
 
@@ -3704,8 +3772,12 @@
 	ql_write32(qdev, SYS, mask | value);
 
 	/* Set the default queue, and VLAN behavior. */
-	value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
-	mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+	value = NIC_RCV_CFG_DFQ;
+	mask = NIC_RCV_CFG_DFQ_MASK;
+	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+		value |= NIC_RCV_CFG_RV;
+		mask |= (NIC_RCV_CFG_RV << 16);
+	}
 	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
 
 	/* Set the MPI interrupt to enabled. */
@@ -4505,7 +4577,6 @@
 		iounmap(qdev->doorbell_area);
 	vfree(qdev->mpi_coredump);
 	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
@@ -4692,11 +4763,15 @@
 
 	qdev = netdev_priv(ndev);
 	SET_NETDEV_DEV(ndev, &pdev->dev);
-	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
-		NETIF_F_TSO | NETIF_F_TSO_ECN |
-		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
-	ndev->features = ndev->hw_features |
-		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+	ndev->hw_features = NETIF_F_SG |
+			    NETIF_F_IP_CSUM |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO_ECN |
+			    NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_FILTER |
+			    NETIF_F_RXCSUM;
+	ndev->features = ndev->hw_features;
 	ndev->vlan_features = ndev->hw_features;
 
 	if (test_bit(QL_DMA64, &qdev->flags))
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index e9dc849..1e49ec5 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1231,7 +1231,6 @@
 	mdiobus_free(lp->mii_bus);
 err_out_unmap:
 	netif_napi_del(&lp->napi);
-	pci_set_drvdata(pdev, NULL);
 	pci_iounmap(pdev, ioaddr);
 err_out_free_res:
 	pci_release_regions(pdev);
@@ -1257,7 +1256,6 @@
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d2e5919..f2a2128 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -2052,7 +2052,6 @@
 	pci_release_regions(pdev);
 	pci_clear_mwi(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 }
 
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 3ccedeb..da5972e 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -727,7 +727,6 @@
 	pci_release_regions (pdev);
 
 	free_netdev(dev);
-	pci_set_drvdata (pdev, NULL);
 }
 
 
@@ -791,6 +790,9 @@
 
 	pci_set_master (pdev);
 
+	u64_stats_init(&tp->rx_stats.syncp);
+	u64_stats_init(&tp->tx_stats.syncp);
+
 retry:
 	/* PIO bar register comes first. */
 	bar = !use_io;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3397cee..7993875 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6811,7 +6811,6 @@
 
 	rtl_disable_msi(pdev, tp);
 	rtl8169_release_board(pdev, dev, tp->mmio_addr);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static const struct net_device_ops rtl_netdev_ops = {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b57c278d..d256ce1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -483,7 +483,7 @@
 	.register_type	= SH_ETH_REG_FAST_SH4,
 
 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
-	.rmcr_value	= 0x00000001,
+	.rmcr_value	= RMCR_RNC,
 
 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -561,7 +561,7 @@
 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 			  EESR_TDE | EESR_ECI,
 	.fdr_value	= 0x0000072f,
-	.rmcr_value	= 0x00000001,
+	.rmcr_value	= RMCR_RNC,
 
 	.irq_flags	= IRQF_SHARED,
 	.apr		= 1,
@@ -689,7 +689,7 @@
 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 			  EESR_TDE | EESR_ECI,
 	.fdr_value	= 0x0000070f,
-	.rmcr_value	= 0x00000001,
+	.rmcr_value	= RMCR_RNC,
 
 	.apr		= 1,
 	.mpr		= 1,
@@ -872,7 +872,7 @@
 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 {
 	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
-		memcpy(ndev->dev_addr, mac, 6);
+		memcpy(ndev->dev_addr, mac, ETH_ALEN);
 	} else {
 		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
 		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
@@ -2663,6 +2663,12 @@
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_resume(&pdev->dev);
 
+	if (!pd) {
+		dev_err(&pdev->dev, "no platform data\n");
+		ret = -EINVAL;
+		goto out_release;
+	}
+
 	/* get PHY ID */
 	mdp->phy_id = pd->phy;
 	mdp->phy_interface = pd->phy_interface;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a0db02c..f32c169 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -321,6 +321,9 @@
 #define TD_TFP	(TD_TFP1|TD_TFP0)
 
 /* RMCR */
+enum RMCR_BIT {
+	RMCR_RNC = 0x00000001,
+};
 #define DEFAULT_RMCR_VALUE	0x00000000
 
 /* ECMR */
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21f9ad6..676c3c0 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -285,6 +285,181 @@
 	return rc;
 }
 
+#ifdef EFX_USE_PIO
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
+	unsigned int i;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
+
+	for (i = 0; i < nic_data->n_piobufs; i++) {
+		MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
+			       nic_data->piobuf_handle[i]);
+		rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
+				  NULL, 0, NULL);
+		WARN_ON(rc);
+	}
+
+	nic_data->n_piobufs = 0;
+}
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
+	unsigned int i;
+	size_t outlen;
+	int rc = 0;
+
+	BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
+
+	for (i = 0; i < n; i++) {
+		rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+				  outbuf, sizeof(outbuf), &outlen);
+		if (rc)
+			break;
+		if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+			rc = -EIO;
+			break;
+		}
+		nic_data->piobuf_handle[i] =
+			MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+		netif_dbg(efx, probe, efx->net_dev,
+			  "allocated PIO buffer %u handle %x\n", i,
+			  nic_data->piobuf_handle[i]);
+	}
+
+	nic_data->n_piobufs = i;
+	if (rc)
+		efx_ef10_free_piobufs(efx);
+	return rc;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	MCDI_DECLARE_BUF(inbuf,
+			 max(MC_CMD_LINK_PIOBUF_IN_LEN,
+			     MC_CMD_UNLINK_PIOBUF_IN_LEN));
+	struct efx_channel *channel;
+	struct efx_tx_queue *tx_queue;
+	unsigned int offset, index;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
+	BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+
+	/* Link a buffer to each VI in the write-combining mapping */
+	for (index = 0; index < nic_data->n_piobufs; ++index) {
+		MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
+			       nic_data->piobuf_handle[index]);
+		MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
+			       nic_data->pio_write_vi_base + index);
+		rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+				  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+				  NULL, 0, NULL);
+		if (rc) {
+			netif_err(efx, drv, efx->net_dev,
+				  "failed to link VI %u to PIO buffer %u (%d)\n",
+				  nic_data->pio_write_vi_base + index, index,
+				  rc);
+			goto fail;
+		}
+		netif_dbg(efx, probe, efx->net_dev,
+			  "linked VI %u to PIO buffer %u\n",
+			  nic_data->pio_write_vi_base + index, index);
+	}
+
+	/* Link a buffer to each TX queue */
+	efx_for_each_channel(channel, efx) {
+		efx_for_each_channel_tx_queue(tx_queue, channel) {
+			/* We assign the PIO buffers to queues in
+			 * reverse order to allow for the following
+			 * special case.
+			 */
+			offset = ((efx->tx_channel_offset + efx->n_tx_channels -
+				   tx_queue->channel->channel - 1) *
+				  efx_piobuf_size);
+			index = offset / ER_DZ_TX_PIOBUF_SIZE;
+			offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+
+			/* When the host page size is 4K, the first
+			 * host page in the WC mapping may be within
+			 * the same VI page as the last TX queue.  We
+			 * can only link one buffer to each VI.
+			 */
+			if (tx_queue->queue == nic_data->pio_write_vi_base) {
+				BUG_ON(index != 0);
+				rc = 0;
+			} else {
+				MCDI_SET_DWORD(inbuf,
+					       LINK_PIOBUF_IN_PIOBUF_HANDLE,
+					       nic_data->piobuf_handle[index]);
+				MCDI_SET_DWORD(inbuf,
+					       LINK_PIOBUF_IN_TXQ_INSTANCE,
+					       tx_queue->queue);
+				rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+						  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+						  NULL, 0, NULL);
+			}
+
+			if (rc) {
+				/* This is non-fatal; the TX path just
+				 * won't use PIO for this queue
+				 */
+				netif_err(efx, drv, efx->net_dev,
+					  "failed to link VI %u to PIO buffer %u (%d)\n",
+					  tx_queue->queue, index, rc);
+				tx_queue->piobuf = NULL;
+			} else {
+				tx_queue->piobuf =
+					nic_data->pio_write_base +
+					index * EFX_VI_PAGE_SIZE + offset;
+				tx_queue->piobuf_offset = offset;
+				netif_dbg(efx, probe, efx->net_dev,
+					  "linked VI %u to PIO buffer %u offset %x addr %p\n",
+					  tx_queue->queue, index,
+					  tx_queue->piobuf_offset,
+					  tx_queue->piobuf);
+			}
+		}
+	}
+
+	return 0;
+
+fail:
+	while (index--) {
+		MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
+			       nic_data->pio_write_vi_base + index);
+		efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
+			     inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+			     NULL, 0, NULL);
+	}
+	return rc;
+}
+
+#else /* !EFX_USE_PIO */
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+	return n == 0 ? 0 : -ENOBUFS;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+	return 0;
+}
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+}
+
+#endif /* EFX_USE_PIO */
+
 static void efx_ef10_remove(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -295,9 +470,15 @@
 	/* This needs to be after efx_ptp_remove_channel() with no filters */
 	efx_ef10_rx_free_indir_table(efx);
 
+	if (nic_data->wc_membase)
+		iounmap(nic_data->wc_membase);
+
 	rc = efx_ef10_free_vis(efx);
 	WARN_ON(rc != 0);
 
+	if (!nic_data->must_restore_piobufs)
+		efx_ef10_free_piobufs(efx);
+
 	efx_mcdi_fini(efx);
 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
 	kfree(nic_data);
@@ -330,12 +511,126 @@
 	return 0;
 }
 
+/* Note that the failure path of this function does not free
+ * resources, as this will be done by efx_ef10_remove().
+ */
 static int efx_ef10_dimension_resources(struct efx_nic *efx)
 {
-	unsigned int n_vis =
-		max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	unsigned int uc_mem_map_size, wc_mem_map_size;
+	unsigned int min_vis, pio_write_vi_base, max_vis;
+	void __iomem *membase;
+	int rc;
 
-	return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+	min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef EFX_USE_PIO
+	/* Try to allocate PIO buffers if wanted and if the full
+	 * number of PIO buffers would be sufficient to allocate one
+	 * copy-buffer per TX channel.  Failure is non-fatal, as there
+	 * are only a small number of PIO buffers shared between all
+	 * functions of the controller.
+	 */
+	if (efx_piobuf_size != 0 &&
+	    ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+	    efx->n_tx_channels) {
+		unsigned int n_piobufs =
+			DIV_ROUND_UP(efx->n_tx_channels,
+				     ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+
+		rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
+		if (rc)
+			netif_err(efx, probe, efx->net_dev,
+				  "failed to allocate PIO buffers (%d)\n", rc);
+		else
+			netif_dbg(efx, probe, efx->net_dev,
+				  "allocated %u PIO buffers\n", n_piobufs);
+	}
+#else
+	nic_data->n_piobufs = 0;
+#endif
+
+	/* PIO buffers should be mapped with write-combining enabled,
+	 * and we want to make single UC and WC mappings rather than
+	 * several of each (in fact that's the only option if host
+	 * page size is >4K).  So we may allocate some extra VIs just
+	 * for writing PIO buffers through.
+	 */
+	uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+				     ER_DZ_TX_PIOBUF);
+	if (nic_data->n_piobufs) {
+		pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+		wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+					       nic_data->n_piobufs) *
+					      EFX_VI_PAGE_SIZE) -
+				   uc_mem_map_size);
+		max_vis = pio_write_vi_base + nic_data->n_piobufs;
+	} else {
+		pio_write_vi_base = 0;
+		wc_mem_map_size = 0;
+		max_vis = min_vis;
+	}
+
+	/* In case the last attached driver failed to free VIs, do it now */
+	rc = efx_ef10_free_vis(efx);
+	if (rc != 0)
+		return rc;
+
+	rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
+	if (rc != 0)
+		return rc;
+
+	/* If we didn't get enough VIs to map all the PIO buffers, free the
+	 * PIO buffers
+	 */
+	if (nic_data->n_piobufs &&
+	    nic_data->n_allocated_vis <
+	    pio_write_vi_base + nic_data->n_piobufs) {
+		netif_dbg(efx, probe, efx->net_dev,
+			  "%u VIs are not sufficient to map %u PIO buffers\n",
+			  nic_data->n_allocated_vis, nic_data->n_piobufs);
+		efx_ef10_free_piobufs(efx);
+	}
+
+	/* Shrink the original UC mapping of the memory BAR */
+	membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+	if (!membase) {
+		netif_err(efx, probe, efx->net_dev,
+			  "could not shrink memory BAR to %x\n",
+			  uc_mem_map_size);
+		return -ENOMEM;
+	}
+	iounmap(efx->membase);
+	efx->membase = membase;
+
+	/* Set up the WC mapping if needed */
+	if (wc_mem_map_size) {
+		nic_data->wc_membase = ioremap_wc(efx->membase_phys +
+						  uc_mem_map_size,
+						  wc_mem_map_size);
+		if (!nic_data->wc_membase) {
+			netif_err(efx, probe, efx->net_dev,
+				  "could not allocate WC mapping of size %x\n",
+				  wc_mem_map_size);
+			return -ENOMEM;
+		}
+		nic_data->pio_write_vi_base = pio_write_vi_base;
+		nic_data->pio_write_base =
+			nic_data->wc_membase +
+			(pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+			 uc_mem_map_size);
+
+		rc = efx_ef10_link_piobufs(efx);
+		if (rc)
+			efx_ef10_free_piobufs(efx);
+	}
+
+	netif_dbg(efx, probe, efx->net_dev,
+		  "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
+		  &efx->membase_phys, efx->membase, uc_mem_map_size,
+		  nic_data->wc_membase, wc_mem_map_size);
+
+	return 0;
 }
 
 static int efx_ef10_init_nic(struct efx_nic *efx)
@@ -359,6 +654,21 @@
 		nic_data->must_realloc_vis = false;
 	}
 
+	if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
+		rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
+		if (rc == 0) {
+			rc = efx_ef10_link_piobufs(efx);
+			if (rc)
+				efx_ef10_free_piobufs(efx);
+		}
+
+		/* Log an error on failure, but this is non-fatal */
+		if (rc)
+			netif_err(efx, drv, efx->net_dev,
+				  "failed to restore PIO buffers (%d)\n", rc);
+		nic_data->must_restore_piobufs = false;
+	}
+
 	efx_ef10_rx_push_indir_table(efx);
 	return 0;
 }
@@ -759,6 +1069,7 @@
 	/* All our allocations have been reset */
 	nic_data->must_realloc_vis = true;
 	nic_data->must_restore_filters = true;
+	nic_data->must_restore_piobufs = true;
 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 
 	/* The datapath firmware might have been changed */
@@ -2180,7 +2491,7 @@
 	return rc;
 }
 
-void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
 {
 	/* no need to do anything here on EF10 */
 }
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index b3f4e37..207ac9a 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -315,6 +315,7 @@
 #define	ESF_DZ_TX_PIO_TYPE_WIDTH 1
 #define	ESF_DZ_TX_PIO_OPT_LBN 60
 #define	ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define	ESE_DZ_TX_OPTION_DESC_PIO 1
 #define	ESF_DZ_TX_PIO_CONT_LBN 59
 #define	ESF_DZ_TX_PIO_CONT_WIDTH 1
 #define	ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 07c9bc4..2e27837 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1121,7 +1121,7 @@
 	 */
 	while (dma_mask > 0x7fffffffUL) {
 		if (dma_supported(&pci_dev->dev, dma_mask)) {
-			rc = dma_set_mask(&pci_dev->dev, dma_mask);
+			rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
 			if (rc == 0)
 				break;
 		}
@@ -1134,16 +1134,6 @@
 	}
 	netif_dbg(efx, probe, efx->net_dev,
 		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
-	rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
-	if (rc) {
-		/* dma_set_coherent_mask() is not *allowed* to
-		 * fail with a mask that dma_set_mask() accepted,
-		 * but just in case...
-		 */
-		netif_err(efx, probe, efx->net_dev,
-			  "failed to set consistent DMA mask\n");
-		goto fail2;
-	}
 
 	efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
 	rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 34d00f5..b8235ee 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -18,37 +18,36 @@
 #define EFX_MEM_BAR 2
 
 /* TX */
-extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern netdev_tx_t
-efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
-extern netdev_tx_t
-efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
-extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+				struct net_device *net_dev);
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+extern unsigned int efx_piobuf_size;
 
 /* RX */
-extern void efx_rx_config_page_split(struct efx_nic *efx);
-extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
-			  unsigned int index, unsigned int n_frags,
-			  unsigned int len, u16 flags);
+void efx_rx_config_page_split(struct efx_nic *efx);
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_rx_slow_fill(unsigned long context);
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+		   unsigned int n_frags, unsigned int len, u16 flags);
 static inline void efx_rx_flush_packet(struct efx_channel *channel)
 {
 	if (channel->rx_pkt_n_frags)
 		__efx_rx_packet(channel);
 }
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 
 #define EFX_MAX_DMAQ_SIZE 4096UL
 #define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -162,9 +161,9 @@
 	return efx->type->filter_get_rx_ids(efx, priority, buf, size);
 }
 #ifdef CONFIG_RFS_ACCEL
-extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
-			  u16 rxq_index, u32 flow_id);
-extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+		   u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
 static inline void efx_filter_rfs_expire(struct efx_channel *channel)
 {
 	if (channel->rfs_filters_added >= 60 &&
@@ -176,50 +175,48 @@
 static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
 #define efx_filter_rfs_enabled() 0
 #endif
-extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
 
 /* Channels */
-extern int efx_channel_dummy_op_int(struct efx_channel *channel);
-extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
 
 /* Ports */
-extern int efx_reconfigure_port(struct efx_nic *efx);
-extern int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_reconfigure_port(struct efx_nic *efx);
 
 /* Ethtool support */
 extern const struct ethtool_ops efx_ethtool_ops;
 
 /* Reset handling */
-extern int efx_reset(struct efx_nic *efx, enum reset_type method);
-extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-extern int efx_try_recovery(struct efx_nic *efx);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_try_recovery(struct efx_nic *efx);
 
 /* Global */
-extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
-				   unsigned int rx_usecs, bool rx_adaptive,
-				   bool rx_may_override_tx);
-extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
-				   unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+			    unsigned int rx_usecs, bool rx_adaptive,
+			    bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+			    unsigned int *rx_usecs, bool *rx_adaptive);
 
 /* Dummy PHY ops for PHY drivers */
-extern int efx_port_dummy_op_int(struct efx_nic *efx);
-extern void efx_port_dummy_op_void(struct efx_nic *efx);
-
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
 
 /* MTD */
 #ifdef CONFIG_SFC_MTD
-extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
-		       size_t n_parts, size_t sizeof_part);
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+		size_t n_parts, size_t sizeof_part);
 static inline int efx_mtd_probe(struct efx_nic *efx)
 {
 	return efx->type->mtd_probe(efx);
 }
-extern void efx_mtd_rename(struct efx_nic *efx);
-extern void efx_mtd_remove(struct efx_nic *efx);
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
 #else
 static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
 static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -241,9 +238,9 @@
 	efx_schedule_channel(channel);
 }
 
-extern void efx_link_status_changed(struct efx_nic *efx);
-extern void efx_link_set_advertising(struct efx_nic *efx, u32);
-extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+void efx_link_status_changed(struct efx_nic *efx);
+void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
 
 static inline void efx_device_detach_sync(struct efx_nic *efx)
 {
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5b471cf..1f529fa 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -70,6 +70,7 @@
 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
 	EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+	EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
 	EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -1035,8 +1036,8 @@
 	return 0;
 }
 
-int efx_ethtool_get_ts_info(struct net_device *net_dev,
-			    struct ethtool_ts_info *ts_info)
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+				   struct ethtool_ts_info *ts_info)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96ce507..4d3f119 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,6 +66,11 @@
 #define EFX_USE_QWORD_IO 1
 #endif
 
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+
 #ifdef EFX_USE_QWORD_IO
 static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
 				  unsigned int reg)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index c34d0d4..656a327 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -108,38 +108,35 @@
 }
 #endif
 
-extern int efx_mcdi_init(struct efx_nic *efx);
-extern void efx_mcdi_fini(struct efx_nic *efx);
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
 
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
-			const efx_dword_t *inbuf, size_t inlen,
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+		 size_t inlen, efx_dword_t *outbuf, size_t outlen,
+		 size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+		       const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
 			efx_dword_t *outbuf, size_t outlen,
 			size_t *outlen_actual);
 
-extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
-			      const efx_dword_t *inbuf, size_t inlen);
-extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
-			       efx_dword_t *outbuf, size_t outlen,
-			       size_t *outlen_actual);
-
 typedef void efx_mcdi_async_completer(struct efx_nic *efx,
 				      unsigned long cookie, int rc,
 				      efx_dword_t *outbuf,
 				      size_t outlen_actual);
-extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
-			      const efx_dword_t *inbuf, size_t inlen,
-			      size_t outlen,
-			      efx_mcdi_async_completer *complete,
-			      unsigned long cookie);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+		       const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+		       efx_mcdi_async_completer *complete,
+		       unsigned long cookie);
 
-extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
-extern void efx_mcdi_mode_poll(struct efx_nic *efx);
-extern void efx_mcdi_mode_event(struct efx_nic *efx);
-extern void efx_mcdi_flush_async(struct efx_nic *efx);
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
 
-extern void efx_mcdi_process_event(struct efx_channel *channel,
-				   efx_qword_t *event);
-extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 
 /* We expect that 16- and 32-bit fields in MCDI requests and responses
  * are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@
 #define MCDI_EVENT_FIELD(_ev, _field)			\
 	EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
 
-extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
-				  u16 *fw_subtype_list, u32 *capabilities);
-extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
-			     u32 dest_evq);
-extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
-extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
-			       size_t *size_out, size_t *erase_size_out,
-			       bool *protected_out);
-extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
-extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
-extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
-					 const u8 *mac, int *id_out);
-extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
-extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
-extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-extern int efx_mcdi_port_probe(struct efx_nic *efx);
-extern void efx_mcdi_port_remove(struct efx_nic *efx);
-extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-extern int efx_mcdi_port_get_number(struct efx_nic *efx);
-extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
-extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-extern int efx_mcdi_set_mac(struct efx_nic *efx);
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+			   u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+			size_t *size_out, size_t *erase_size_out,
+			bool *protected_out);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+				  int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+int efx_mcdi_set_mac(struct efx_nic *efx);
 #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
-extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
-extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
-extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
-extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
 
 #ifdef CONFIG_SFC_MCDI_MON
-extern int efx_mcdi_mon_probe(struct efx_nic *efx);
-extern void efx_mcdi_mon_remove(struct efx_nic *efx);
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
 #else
 static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
 static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
 #endif
 
 #ifdef CONFIG_SFC_MTD
-extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
-			     size_t len, size_t *retlen, u8 *buffer);
-extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
-extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
-			      size_t len, size_t *retlen, const u8 *buffer);
-extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
-extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+		      size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+		       size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
 #endif
 
 #endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index 16824fe..4a2dc4c 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -20,7 +20,7 @@
 
 static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
 static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-extern unsigned efx_mdio_id_oui(u32 id);
+unsigned efx_mdio_id_oui(u32 id);
 
 static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
 {
@@ -56,7 +56,7 @@
 	return sync;
 }
 
-extern const char *efx_mdio_mmd_name(int mmd);
+const char *efx_mdio_mmd_name(int mmd);
 
 /*
  * Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@
  *
  * This function will sleep
  */
-extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
-			      int spins, int spintime);
+int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
 
 /* As efx_mdio_check_mmd but for multiple MMDs */
 int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Check the link status of specified mmds in bit mask */
-extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Generic transmit disable support though PMAPMD */
-extern void efx_mdio_transmit_disable(struct efx_nic *efx);
+void efx_mdio_transmit_disable(struct efx_nic *efx);
 
 /* Generic part of reconfigure: set/clear loopback bits */
-extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+void efx_mdio_phy_reconfigure(struct efx_nic *efx);
 
 /* Set the power state of the specified MMDs */
-extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
-				     int low_power, unsigned int mmd_mask);
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+			      unsigned int mmd_mask);
 
 /* Set (some of) the PHY settings over MDIO */
-extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
 
 /* Push advertising flags and restart autonegotiation */
-extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+void efx_mdio_an_reconfigure(struct efx_nic *efx);
 
 /* Get pause parameters from AN if available (otherwise return
  * requested pause parameters)
@@ -95,8 +94,7 @@
 u8 efx_mdio_get_pause(struct efx_nic *efx);
 
 /* Wait for specified MMDs to exit reset within a timeout */
-extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
-				    unsigned int mmd_mask);
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Set or clear flag, debouncing */
 static inline void
@@ -107,6 +105,6 @@
 }
 
 /* Liveness self-test for MDIO PHYs */
-extern int efx_mdio_test_alive(struct efx_nic *efx);
+int efx_mdio_test_alive(struct efx_nic *efx);
 
 #endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b172ed1..b14a717 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -141,6 +141,8 @@
  * @len: Length of this fragment.
  *	This field is zero when the queue slot is empty.
  * @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
  */
 struct efx_tx_buffer {
 	union {
@@ -154,6 +156,7 @@
 	unsigned short flags;
 	unsigned short len;
 	unsigned short unmap_len;
+	unsigned short dma_offset;
 };
 #define EFX_TX_BUF_CONT		1	/* not last descriptor of packet */
 #define EFX_TX_BUF_SKB		2	/* buffer is last part of skb */
@@ -182,6 +185,9 @@
  * @tsoh_page: Array of pages of TSO header buffers
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ *	Size of the region is efx_piobuf_size.
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
  * @initialised: Has hardware queue been initialised?
  * @read_count: Current read pointer.
  *	This is the number of buffers that have been removed from both rings.
@@ -209,6 +215,7 @@
  *	blocks
  * @tso_packets: Number of packets via the TSO xmit path
  * @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
  * @empty_read_count: If the completion path has seen the queue as empty
  *	and the transmission path has not yet checked this, the value of
  *	@read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -223,6 +230,8 @@
 	struct efx_buffer *tsoh_page;
 	struct efx_special_buffer txd;
 	unsigned int ptr_mask;
+	void __iomem *piobuf;
+	unsigned int piobuf_offset;
 	bool initialised;
 
 	/* Members used mainly on the completion path */
@@ -238,6 +247,7 @@
 	unsigned int tso_long_headers;
 	unsigned int tso_packets;
 	unsigned int pushes;
+	unsigned int pio_packets;
 
 	/* Members shared between paths and sometimes updated */
 	unsigned int empty_read_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9826594..9c90bf5 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -19,6 +19,7 @@
 #include "bitfield.h"
 #include "efx.h"
 #include "nic.h"
+#include "ef10_regs.h"
 #include "farch_regs.h"
 #include "io.h"
 #include "workarounds.h"
@@ -166,26 +167,30 @@
 
 /* Register dump */
 
-#define REGISTER_REVISION_A	1
-#define REGISTER_REVISION_B	2
-#define REGISTER_REVISION_C	3
-#define REGISTER_REVISION_Z	3	/* latest revision */
+#define REGISTER_REVISION_FA	1
+#define REGISTER_REVISION_FB	2
+#define REGISTER_REVISION_FC	3
+#define REGISTER_REVISION_FZ	3	/* last Falcon arch revision */
+#define REGISTER_REVISION_ED	4
+#define REGISTER_REVISION_EZ	4	/* latest EF10 revision */
 
 struct efx_nic_reg {
 	u32 offset:24;
-	u32 min_revision:2, max_revision:2;
+	u32 min_revision:3, max_revision:3;
 };
 
-#define REGISTER(name, min_rev, max_rev) {				\
-	FR_ ## min_rev ## max_rev ## _ ## name,				\
-	REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev	\
+#define REGISTER(name, arch, min_rev, max_rev) {			\
+	arch ## R_ ## min_rev ## max_rev ## _ ## name,			\
+	REGISTER_REVISION_ ## arch ## min_rev,				\
+	REGISTER_REVISION_ ## arch ## max_rev				\
 }
-#define REGISTER_AA(name) REGISTER(name, A, A)
-#define REGISTER_AB(name) REGISTER(name, A, B)
-#define REGISTER_AZ(name) REGISTER(name, A, Z)
-#define REGISTER_BB(name) REGISTER(name, B, B)
-#define REGISTER_BZ(name) REGISTER(name, B, Z)
-#define REGISTER_CZ(name) REGISTER(name, C, Z)
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
 
 static const struct efx_nic_reg efx_nic_regs[] = {
 	REGISTER_AZ(ADR_REGION),
@@ -292,37 +297,42 @@
 	REGISTER_AB(XX_TXDRV_CTL),
 	/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
 	/* XX_CORE_STAT is partly RC */
+	REGISTER_DZ(BIU_HW_REV_ID),
+	REGISTER_DZ(MC_DB_LWRD),
+	REGISTER_DZ(MC_DB_HWRD),
 };
 
 struct efx_nic_reg_table {
 	u32 offset:24;
-	u32 min_revision:2, max_revision:2;
+	u32 min_revision:3, max_revision:3;
 	u32 step:6, rows:21;
 };
 
-#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
 	offset,								\
-	REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev,	\
+	REGISTER_REVISION_ ## arch ## min_rev,				\
+	REGISTER_REVISION_ ## arch ## max_rev,				\
 	step, rows							\
 }
-#define REGISTER_TABLE(name, min_rev, max_rev)				\
+#define REGISTER_TABLE(name, arch, min_rev, max_rev)			\
 	REGISTER_TABLE_DIMENSIONS(					\
-		name, FR_ ## min_rev ## max_rev ## _ ## name,		\
-		min_rev, max_rev,					\
-		FR_ ## min_rev ## max_rev ## _ ## name ## _STEP,	\
-		FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
-#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
-#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
-#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
-#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+		name, arch ## R_ ## min_rev ## max_rev ## _ ## name,	\
+		arch, min_rev, max_rev,					\
+		arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP,	\
+		arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
 #define REGISTER_TABLE_BB_CZ(name)					\
-	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B,		\
+	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B,	\
 				  FR_BZ_ ## name ## _STEP,		\
 				  FR_BB_ ## name ## _ROWS),		\
-	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z,		\
+	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z,	\
 				  FR_BZ_ ## name ## _STEP,		\
 				  FR_CZ_ ## name ## _ROWS)
-#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
 
 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
 	/* DRIVER is not used */
@@ -340,9 +350,9 @@
 	 * 1K entries allows for some expansion of queue count and
 	 * size before we need to change the version. */
 	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
-				  A, A, 8, 1024),
+				  F, A, A, 8, 1024),
 	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
-				  B, Z, 8, 1024),
+				  F, B, Z, 8, 1024),
 	REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
 	REGISTER_TABLE_BB_CZ(TIMER_TBL),
 	REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -353,6 +363,7 @@
 	/* MSIX_PBA_TABLE is not mapped */
 	/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
 	REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+	REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
 };
 
 size_t efx_nic_get_regs_len(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 890bbbe..11b6112 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -30,7 +30,7 @@
 	return efx->type->revision;
 }
 
-extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
 
 /* NIC has two interlinked PCI functions for the same port. */
 static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -71,6 +71,26 @@
 	return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
 }
 
+/* Report whether the NIC considers this TX queue empty, given the
+ * write_count used for the last doorbell push.  May return false
+ * negative.
+ */
+static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
+					 unsigned int write_count)
+{
+	unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+	if (empty_read_count == 0)
+		return false;
+
+	return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+	return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count);
+}
+
 /* Decide whether to push a TX descriptor to the NIC vs merely writing
  * the doorbell.  This can reduce latency when we are adding a single
  * descriptor to an empty queue, but is otherwise pointless.  Further,
@@ -80,14 +100,10 @@
 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
 					    unsigned int write_count)
 {
-	unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
-	if (empty_read_count == 0)
-		return false;
+	bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
 
 	tx_queue->empty_read_count = 0;
-	return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
-		&& tx_queue->write_count - write_count == 1;
+	return was_empty && tx_queue->write_count - write_count == 1;
 }
 
 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
@@ -401,6 +417,12 @@
 	EF10_STAT_COUNT
 };
 
+/* Maximum number of TX PIO buffers we may allocate to a function.
+ * This matches the total number of buffers on each SFC9100-family
+ * controller.
+ */
+#define EF10_TX_PIOBUF_COUNT 16
+
 /**
  * struct efx_ef10_nic_data - EF10 architecture NIC state
  * @mcdi_buf: DMA buffer for MCDI
@@ -409,6 +431,13 @@
  * @n_allocated_vis: Number of VIs allocated to this function
  * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
  * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @n_piobufs: Number of PIO buffers allocated to this function
+ * @wc_membase: Base address of write-combining mapping of the memory BAR
+ * @pio_write_base: Base address for writing PIO buffers
+ * @pio_write_vi_base: Relative VI number for @pio_write_base
+ * @piobuf_handle: Handle of each PIO buffer allocated
+ * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
+ *	reboot
  * @rx_rss_context: Firmware handle for our RSS context
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
@@ -424,6 +453,11 @@
 	unsigned int n_allocated_vis;
 	bool must_realloc_vis;
 	bool must_restore_filters;
+	unsigned int n_piobufs;
+	void __iomem *wc_membase, *pio_write_base;
+	unsigned int pio_write_vi_base;
+	unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+	bool must_restore_piobufs;
 	u32 rx_rss_context;
 	u64 stats[EF10_STAT_COUNT];
 	bool workaround_35388;
@@ -475,18 +509,18 @@
 	return 1 << efx->vi_scale;
 }
 
-extern int efx_init_sriov(void);
-extern void efx_sriov_probe(struct efx_nic *efx);
-extern int efx_sriov_init(struct efx_nic *efx);
-extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
-extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-extern void efx_sriov_reset(struct efx_nic *efx);
-extern void efx_sriov_fini(struct efx_nic *efx);
-extern void efx_fini_sriov(void);
+int efx_init_sriov(void);
+void efx_sriov_probe(struct efx_nic *efx);
+int efx_sriov_init(struct efx_nic *efx);
+void efx_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_sriov_reset(struct efx_nic *efx);
+void efx_sriov_fini(struct efx_nic *efx);
+void efx_fini_sriov(void);
 
 #else
 
@@ -512,22 +546,20 @@
 
 #endif
 
-extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
-				 u16 vlan, u8 qos);
-extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
-				   struct ifla_vf_info *ivf);
-extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
-				     bool spoofchk);
+int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
+int efx_sriov_get_vf_config(struct net_device *dev, int vf,
+			    struct ifla_vf_info *ivf);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+			      bool spoofchk);
 
 struct ethtool_ts_info;
-extern void efx_ptp_probe(struct efx_nic *efx);
-extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
-extern void efx_ptp_get_ts_info(struct efx_nic *efx,
-				struct ethtool_ts_info *ts_info);
-extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_ptp_probe(struct efx_nic *efx);
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
 
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
@@ -541,7 +573,7 @@
  **************************************************************************
  */
 
-extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
 
 /* TX data path */
 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -609,58 +641,58 @@
 {
 	channel->efx->type->ev_read_ack(channel);
 }
-extern void efx_nic_event_test_start(struct efx_channel *channel);
+void efx_nic_event_test_start(struct efx_channel *channel);
 
 /* Falcon/Siena queue operations */
-extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
-extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
-extern int efx_farch_ev_probe(struct efx_channel *channel);
-extern int efx_farch_ev_init(struct efx_channel *channel);
-extern void efx_farch_ev_fini(struct efx_channel *channel);
-extern void efx_farch_ev_remove(struct efx_channel *channel);
-extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
-extern void efx_farch_ev_read_ack(struct efx_channel *channel);
-extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
 
 /* Falcon/Siena filter operations */
-extern int efx_farch_filter_table_probe(struct efx_nic *efx);
-extern void efx_farch_filter_table_restore(struct efx_nic *efx);
-extern void efx_farch_filter_table_remove(struct efx_nic *efx);
-extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_farch_filter_insert(struct efx_nic *efx,
-				   struct efx_filter_spec *spec, bool replace);
-extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
-					enum efx_filter_priority priority,
-					u32 filter_id);
-extern int efx_farch_filter_get_safe(struct efx_nic *efx,
-				     enum efx_filter_priority priority,
-				     u32 filter_id, struct efx_filter_spec *);
-extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
-				      enum efx_filter_priority priority);
-extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
-					  enum efx_filter_priority priority);
-extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
-				       enum efx_filter_priority priority,
-				       u32 *buf, u32 size);
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+			    bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+				 enum efx_filter_priority priority,
+				 u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+			      enum efx_filter_priority priority, u32 filter_id,
+			      struct efx_filter_spec *);
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+			       enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+				   enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+				enum efx_filter_priority priority, u32 *buf,
+				u32 size);
 #ifdef CONFIG_RFS_ACCEL
-extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
-				       struct efx_filter_spec *spec);
-extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
-					    unsigned int index);
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+				struct efx_filter_spec *spec);
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+				     unsigned int index);
 #endif
-extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
 
-extern bool efx_nic_event_present(struct efx_channel *channel);
+bool efx_nic_event_present(struct efx_channel *channel);
 
 /* Some statistics are computed as A - B where A and B each increase
  * linearly with some hardware counter(s) and the counters are read
@@ -681,17 +713,17 @@
 }
 
 /* Interrupts */
-extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+int efx_nic_init_interrupt(struct efx_nic *efx);
+void efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
 
 /* Falcon/Siena interrupts */
-extern void efx_farch_irq_enable_master(struct efx_nic *efx);
-extern void efx_farch_irq_test_generate(struct efx_nic *efx);
-extern void efx_farch_irq_disable_master(struct efx_nic *efx);
-extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+void efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
 
 static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
 {
@@ -703,21 +735,21 @@
 }
 
 /* Global Resources */
-extern int efx_nic_flush_queues(struct efx_nic *efx);
-extern void siena_prepare_flush(struct efx_nic *efx);
-extern int efx_farch_fini_dmaq(struct efx_nic *efx);
-extern void siena_finish_flush(struct efx_nic *efx);
-extern void falcon_start_nic_stats(struct efx_nic *efx);
-extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_farch_init_common(struct efx_nic *efx);
-extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+int efx_nic_flush_queues(struct efx_nic *efx);
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_ef10_handle_drain_event(struct efx_nic *efx);
 static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
 {
 	efx->type->rx_push_indir_table(efx);
 }
-extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
 
 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
 			 unsigned int len, gfp_t gfp_flags);
@@ -728,24 +760,22 @@
 	unsigned address;
 	efx_oword_t mask;
 };
-extern int efx_farch_test_registers(struct efx_nic *efx,
-				    const struct efx_farch_register_test *regs,
-				    size_t n_regs);
+int efx_farch_test_registers(struct efx_nic *efx,
+			     const struct efx_farch_register_test *regs,
+			     size_t n_regs);
 
-extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
-extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
 
-extern size_t
-efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
-		       const unsigned long *mask, u8 *names);
-extern void
-efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
-		     const unsigned long *mask,
-		     u64 *stats, const void *dma_buf, bool accumulate);
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+			      const unsigned long *mask, u8 *names);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+			  const unsigned long *mask, u64 *stats,
+			  const void *dma_buf, bool accumulate);
 
 #define EFX_MAX_FLUSH_TIME 5000
 
-extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
-				     efx_qword_t *event);
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+			      efx_qword_t *event);
 
 #endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 45eeb70..803bf44 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -15,7 +15,7 @@
  */
 extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
 
-extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
 
 /****************************************************************************
  * AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@
 #define QUAKE_LED_TXLINK	(0)
 #define QUAKE_LED_RXLINK	(8)
 
-extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
 
 /****************************************************************************
 * Transwitch CX4 retimer
@@ -44,7 +44,7 @@
 #define TXC_GPIO_DIR_INPUT	0
 #define TXC_GPIO_DIR_OUTPUT	1
 
-extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
-extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
 
 #endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 4a59672..8f09e68 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -12,6 +12,7 @@
 #include <linux/in.h>
 #include <linux/slab.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/prefetch.h>
@@ -818,44 +819,70 @@
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_channel *channel;
 	struct efx_filter_spec spec;
-	const struct iphdr *ip;
 	const __be16 *ports;
+	__be16 ether_type;
 	int nhoff;
 	int rc;
 
-	nhoff = skb_network_offset(skb);
+	/* The core RPS/RFS code has already parsed and validated
+	 * VLAN, IP and transport headers.  We assume they are in the
+	 * header area.
+	 */
 
 	if (skb->protocol == htons(ETH_P_8021Q)) {
-		EFX_BUG_ON_PARANOID(skb_headlen(skb) <
-				    nhoff + sizeof(struct vlan_hdr));
-		if (((const struct vlan_hdr *)skb->data + nhoff)->
-		    h_vlan_encapsulated_proto != htons(ETH_P_IP))
-			return -EPROTONOSUPPORT;
+		const struct vlan_hdr *vh =
+			(const struct vlan_hdr *)skb->data;
 
-		/* This is IP over 802.1q VLAN.  We can't filter on the
-		 * IP 5-tuple and the vlan together, so just strip the
-		 * vlan header and filter on the IP part.
+		/* We can't filter on the IP 5-tuple and the vlan
+		 * together, so just strip the vlan header and filter
+		 * on the IP part.
 		 */
-		nhoff += sizeof(struct vlan_hdr);
-	} else if (skb->protocol != htons(ETH_P_IP)) {
-		return -EPROTONOSUPPORT;
+		EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
+		ether_type = vh->h_vlan_encapsulated_proto;
+		nhoff = sizeof(struct vlan_hdr);
+	} else {
+		ether_type = skb->protocol;
+		nhoff = 0;
 	}
 
-	/* RFS must validate the IP header length before calling us */
-	EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
-	ip = (const struct iphdr *)(skb->data + nhoff);
-	if (ip_is_fragment(ip))
+	if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
 		return -EPROTONOSUPPORT;
-	EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
-	ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
 
 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
 			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
 			   rxq_index);
-	rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
-				      ip->daddr, ports[1], ip->saddr, ports[0]);
-	if (rc)
-		return rc;
+	spec.match_flags =
+		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+	spec.ether_type = ether_type;
+
+	if (ether_type == htons(ETH_P_IP)) {
+		const struct iphdr *ip =
+			(const struct iphdr *)(skb->data + nhoff);
+
+		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+		if (ip_is_fragment(ip))
+			return -EPROTONOSUPPORT;
+		spec.ip_proto = ip->protocol;
+		spec.rem_host[0] = ip->saddr;
+		spec.loc_host[0] = ip->daddr;
+		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+		ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+	} else {
+		const struct ipv6hdr *ip6 =
+			(const struct ipv6hdr *)(skb->data + nhoff);
+
+		EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+				    nhoff + sizeof(*ip6) + 4);
+		spec.ip_proto = ip6->nexthdr;
+		memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
+		memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
+		ports = (const __be16 *)(ip6 + 1);
+	}
+
+	spec.rem_port = ports[0];
+	spec.loc_port = ports[1];
 
 	rc = efx->type->filter_rfs_insert(efx, &spec);
 	if (rc < 0)
@@ -866,11 +893,18 @@
 	channel = efx_get_channel(efx, skb_get_rx_queue(skb));
 	++channel->rfs_filters_added;
 
-	netif_info(efx, rx_status, efx->net_dev,
-		   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
-		   (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
-		   &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
-		   rxq_index, flow_id, rc);
+	if (ether_type == htons(ETH_P_IP))
+		netif_info(efx, rx_status, efx->net_dev,
+			   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
+			   ntohs(ports[1]), rxq_index, flow_id, rc);
+	else
+		netif_info(efx, rx_status, efx->net_dev,
+			   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
+			   ntohs(ports[1]), rxq_index, flow_id, rc);
 
 	return rc;
 }
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 87698ae..a2f4a06 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -43,13 +43,12 @@
 	struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
 };
 
-extern void efx_loopback_rx_packet(struct efx_nic *efx,
-				   const char *buf_ptr, int pkt_len);
-extern int efx_selftest(struct efx_nic *efx,
-			struct efx_self_tests *tests,
-			unsigned flags);
-extern void efx_selftest_async_start(struct efx_nic *efx);
-extern void efx_selftest_async_cancel(struct efx_nic *efx);
-extern void efx_selftest_async_work(struct work_struct *data);
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+			    int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+		 unsigned flags);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+void efx_selftest_async_work(struct work_struct *data);
 
 #endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 2ac91c5..c49d1fb 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -17,10 +17,46 @@
 #include <net/ipv6.h>
 #include <linux/if_ether.h>
 #include <linux/highmem.h>
+#include <linux/cache.h>
 #include "net_driver.h"
 #include "efx.h"
+#include "io.h"
 #include "nic.h"
 #include "workarounds.h"
+#include "ef10_regs.h"
+
+#ifdef EFX_USE_PIO
+
+#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
+#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
+unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
+
+#endif /* EFX_USE_PIO */
+
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+	return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+	return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+	struct efx_tx_buffer *buffer =
+		__efx_tx_queue_get_insert_buffer(tx_queue);
+
+	EFX_BUG_ON_PARANOID(buffer->len);
+	EFX_BUG_ON_PARANOID(buffer->flags);
+	EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+	return buffer;
+}
 
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
 			       struct efx_tx_buffer *buffer,
@@ -29,8 +65,7 @@
 {
 	if (buffer->unmap_len) {
 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
-		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
-					 buffer->unmap_len);
+		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
 					 DMA_TO_DEVICE);
@@ -83,8 +118,10 @@
 	 */
 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
 
-	/* Possibly one more per segment for the alignment workaround */
-	if (EFX_WORKAROUND_5391(efx))
+	/* Possibly one more per segment for the alignment workaround,
+	 * or for option descriptors
+	 */
+	if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
 		max_descs += EFX_TSO_MAX_SEGS;
 
 	/* Possibly more for PCIe page boundaries within input fragments */
@@ -145,6 +182,145 @@
 	}
 }
 
+#ifdef EFX_USE_PIO
+
+struct efx_short_copy_buffer {
+	int used;
+	u8 buf[L1_CACHE_BYTES];
+};
+
+/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
+				    u8 *data, int len,
+				    struct efx_short_copy_buffer *copy_buf)
+{
+	int block_len = len & ~(sizeof(copy_buf->buf) - 1);
+
+	memcpy_toio(*piobuf, data, block_len);
+	*piobuf += block_len;
+	len -= block_len;
+
+	if (len) {
+		data += block_len;
+		BUG_ON(copy_buf->used);
+		BUG_ON(len > sizeof(copy_buf->buf));
+		memcpy(copy_buf->buf, data, len);
+		copy_buf->used = len;
+	}
+}
+
+/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
+				       u8 *data, int len,
+				       struct efx_short_copy_buffer *copy_buf)
+{
+	if (copy_buf->used) {
+		/* if the copy buffer is partially full, fill it up and write */
+		int copy_to_buf =
+			min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
+
+		memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
+		copy_buf->used += copy_to_buf;
+
+		/* if we didn't fill it up then we're done for now */
+		if (copy_buf->used < sizeof(copy_buf->buf))
+			return;
+
+		memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+		*piobuf += sizeof(copy_buf->buf);
+		data += copy_to_buf;
+		len -= copy_to_buf;
+		copy_buf->used = 0;
+	}
+
+	efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
+}
+
+static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
+				  struct efx_short_copy_buffer *copy_buf)
+{
+	/* if there's anything in it, write the whole buffer, including junk */
+	if (copy_buf->used)
+		memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+}
+
+/* Traverse skb structure and copy fragments in to PIO buffer.
+ * Advances piobuf pointer.
+ */
+static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
+				     u8 __iomem **piobuf,
+				     struct efx_short_copy_buffer *copy_buf)
+{
+	int i;
+
+	efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
+				copy_buf);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+		u8 *vaddr;
+
+		vaddr = kmap_atomic(skb_frag_page(f));
+
+		efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+					   skb_frag_size(f), copy_buf);
+		kunmap_atomic(vaddr);
+	}
+
+	EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
+}
+
+static struct efx_tx_buffer *
+efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+	struct efx_tx_buffer *buffer =
+		efx_tx_queue_get_insert_buffer(tx_queue);
+	u8 __iomem *piobuf = tx_queue->piobuf;
+
+	/* Copy to PIO buffer. Ensure the writes are padded to the end
+	 * of a cache line, as this is required for write-combining to be
+	 * effective on at least x86.
+	 */
+
+	if (skb_shinfo(skb)->nr_frags) {
+		/* The size of the copy buffer will ensure all writes
+		 * are the size of a cache line.
+		 */
+		struct efx_short_copy_buffer copy_buf;
+
+		copy_buf.used = 0;
+
+		efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
+					 &piobuf, &copy_buf);
+		efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
+	} else {
+		/* Pad the write to the size of a cache line.
+		 * We can do this because we know the skb_shared_info sruct is
+		 * after the source, and the destination buffer is big enough.
+		 */
+		BUILD_BUG_ON(L1_CACHE_BYTES >
+			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+		memcpy_toio(tx_queue->piobuf, skb->data,
+			    ALIGN(skb->len, L1_CACHE_BYTES));
+	}
+
+	EFX_POPULATE_QWORD_5(buffer->option,
+			     ESF_DZ_TX_DESC_IS_OPT, 1,
+			     ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
+			     ESF_DZ_TX_PIO_CONT, 0,
+			     ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
+			     ESF_DZ_TX_PIO_BUF_ADDR,
+			     tx_queue->piobuf_offset);
+	++tx_queue->pio_packets;
+	++tx_queue->insert_count;
+	return buffer;
+}
+#endif /* EFX_USE_PIO */
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -167,7 +343,7 @@
 	struct device *dma_dev = &efx->pci_dev->dev;
 	struct efx_tx_buffer *buffer;
 	skb_frag_t *fragment;
-	unsigned int len, unmap_len = 0, insert_ptr;
+	unsigned int len, unmap_len = 0;
 	dma_addr_t dma_addr, unmap_addr = 0;
 	unsigned int dma_len;
 	unsigned short dma_flags;
@@ -189,6 +365,17 @@
 			return NETDEV_TX_OK;
 	}
 
+	/* Consider using PIO for short packets */
+#ifdef EFX_USE_PIO
+	if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
+	    efx_nic_tx_is_empty(tx_queue) &&
+	    efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
+		buffer = efx_enqueue_skb_pio(tx_queue, skb);
+		dma_flags = EFX_TX_BUF_OPTION;
+		goto finish_packet;
+	}
+#endif
+
 	/* Map for DMA.  Use dma_map_single rather than dma_map_page
 	 * since this is more efficient on machines with sparse
 	 * memory.
@@ -208,11 +395,7 @@
 
 		/* Add to TX queue, splitting across DMA boundaries */
 		do {
-			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-			buffer = &tx_queue->buffer[insert_ptr];
-			EFX_BUG_ON_PARANOID(buffer->flags);
-			EFX_BUG_ON_PARANOID(buffer->len);
-			EFX_BUG_ON_PARANOID(buffer->unmap_len);
+			buffer = efx_tx_queue_get_insert_buffer(tx_queue);
 
 			dma_len = efx_max_tx_len(efx, dma_addr);
 			if (likely(dma_len >= len))
@@ -230,6 +413,7 @@
 		/* Transfer ownership of the unmapping to the final buffer */
 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
 		buffer->unmap_len = unmap_len;
+		buffer->dma_offset = buffer->dma_addr - unmap_addr;
 		unmap_len = 0;
 
 		/* Get address and size of next fragment */
@@ -245,6 +429,7 @@
 	}
 
 	/* Transfer ownership of the skb to the final buffer */
+finish_packet:
 	buffer->skb = skb;
 	buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
@@ -270,8 +455,7 @@
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		unsigned int pkts_compl = 0, bytes_compl = 0;
 		--tx_queue->insert_count;
-		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-		buffer = &tx_queue->buffer[insert_ptr];
+		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
 	}
 
@@ -628,6 +812,9 @@
  * @tcp_off: Offset of TCP header
  * @header_len: Number of bytes of header
  * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
+ * @header_dma_addr: Header DMA address, when using option descriptors
+ * @header_unmap_len: Header DMA mapped length, or 0 if not using option
+ *	descriptors
  *
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
@@ -636,7 +823,7 @@
 	/* Output position */
 	unsigned out_len;
 	unsigned seqnum;
-	unsigned ipv4_id;
+	u16 ipv4_id;
 	unsigned packet_space;
 
 	/* Input position */
@@ -651,6 +838,8 @@
 	unsigned int tcp_off;
 	unsigned header_len;
 	unsigned int ip_base_len;
+	dma_addr_t header_dma_addr;
+	unsigned int header_unmap_len;
 };
 
 
@@ -737,23 +926,18 @@
 {
 	struct efx_tx_buffer *buffer;
 	struct efx_nic *efx = tx_queue->efx;
-	unsigned dma_len, insert_ptr;
+	unsigned dma_len;
 
 	EFX_BUG_ON_PARANOID(len <= 0);
 
 	while (1) {
-		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-		buffer = &tx_queue->buffer[insert_ptr];
+		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
 		++tx_queue->insert_count;
 
 		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
 				    tx_queue->read_count >=
 				    efx->txq_entries);
 
-		EFX_BUG_ON_PARANOID(buffer->len);
-		EFX_BUG_ON_PARANOID(buffer->unmap_len);
-		EFX_BUG_ON_PARANOID(buffer->flags);
-
 		buffer->dma_addr = dma_addr;
 
 		dma_len = efx_max_tx_len(efx, dma_addr);
@@ -796,6 +980,7 @@
 			return -ENOMEM;
 		}
 		buffer->unmap_len = buffer->len;
+		buffer->dma_offset = 0;
 		buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
 	}
 
@@ -814,19 +999,27 @@
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		--tx_queue->insert_count;
-		buffer = &tx_queue->buffer[tx_queue->insert_count &
-					   tx_queue->ptr_mask];
+		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
 		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
 	}
 }
 
 
 /* Parse the SKB header and initialise state. */
-static void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static int tso_start(struct tso_state *st, struct efx_nic *efx,
+		     const struct sk_buff *skb)
 {
+	bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+	struct device *dma_dev = &efx->pci_dev->dev;
+	unsigned int header_len, in_len;
+	dma_addr_t dma_addr;
+
 	st->ip_off = skb_network_header(skb) - skb->data;
 	st->tcp_off = skb_transport_header(skb) - skb->data;
-	st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+	header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+	in_len = skb_headlen(skb) - header_len;
+	st->header_len = header_len;
+	st->in_len = in_len;
 	if (st->protocol == htons(ETH_P_IP)) {
 		st->ip_base_len = st->header_len - st->ip_off;
 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
@@ -840,9 +1033,34 @@
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
 
-	st->out_len = skb->len - st->header_len;
-	st->unmap_len = 0;
-	st->dma_flags = 0;
+	st->out_len = skb->len - header_len;
+
+	if (!use_options) {
+		st->header_unmap_len = 0;
+
+		if (likely(in_len == 0)) {
+			st->dma_flags = 0;
+			st->unmap_len = 0;
+			return 0;
+		}
+
+		dma_addr = dma_map_single(dma_dev, skb->data + header_len,
+					  in_len, DMA_TO_DEVICE);
+		st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
+		st->dma_addr = dma_addr;
+		st->unmap_addr = dma_addr;
+		st->unmap_len = in_len;
+	} else {
+		dma_addr = dma_map_single(dma_dev, skb->data,
+					  skb_headlen(skb), DMA_TO_DEVICE);
+		st->header_dma_addr = dma_addr;
+		st->header_unmap_len = skb_headlen(skb);
+		st->dma_flags = 0;
+		st->dma_addr = dma_addr + header_len;
+		st->unmap_len = 0;
+	}
+
+	return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
 }
 
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -860,24 +1078,6 @@
 	return -ENOMEM;
 }
 
-static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
-				 const struct sk_buff *skb)
-{
-	int hl = st->header_len;
-	int len = skb_headlen(skb) - hl;
-
-	st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
-					len, DMA_TO_DEVICE);
-	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-		st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
-		st->unmap_len = len;
-		st->in_len = len;
-		st->dma_addr = st->unmap_addr;
-		return 0;
-	}
-	return -ENOMEM;
-}
-
 
 /**
  * tso_fill_packet_with_fragment - form descriptors for the current fragment
@@ -922,6 +1122,7 @@
 	if (st->in_len == 0) {
 		/* Transfer ownership of the DMA mapping */
 		buffer->unmap_len = st->unmap_len;
+		buffer->dma_offset = buffer->unmap_len - buffer->len;
 		buffer->flags |= st->dma_flags;
 		st->unmap_len = 0;
 	}
@@ -944,55 +1145,98 @@
 				struct tso_state *st)
 {
 	struct efx_tx_buffer *buffer =
-		&tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
-	struct tcphdr *tsoh_th;
-	unsigned ip_length;
-	u8 *header;
-	int rc;
+		efx_tx_queue_get_insert_buffer(tx_queue);
+	bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
+	u8 tcp_flags_clear;
 
-	/* Allocate and insert a DMA-mapped header buffer. */
-	header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
-	if (!header)
-		return -ENOMEM;
-
-	tsoh_th = (struct tcphdr *)(header + st->tcp_off);
-
-	/* Copy and update the headers. */
-	memcpy(header, skb->data, st->header_len);
-
-	tsoh_th->seq = htonl(st->seqnum);
-	st->seqnum += skb_shinfo(skb)->gso_size;
-	if (st->out_len > skb_shinfo(skb)->gso_size) {
-		/* This packet will not finish the TSO burst. */
+	if (!is_last) {
 		st->packet_space = skb_shinfo(skb)->gso_size;
-		tsoh_th->fin = 0;
-		tsoh_th->psh = 0;
+		tcp_flags_clear = 0x09; /* mask out FIN and PSH */
 	} else {
-		/* This packet will be the last in the TSO burst. */
 		st->packet_space = st->out_len;
-		tsoh_th->fin = tcp_hdr(skb)->fin;
-		tsoh_th->psh = tcp_hdr(skb)->psh;
+		tcp_flags_clear = 0x00;
 	}
-	ip_length = st->ip_base_len + st->packet_space;
 
-	if (st->protocol == htons(ETH_P_IP)) {
-		struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
+	if (!st->header_unmap_len) {
+		/* Allocate and insert a DMA-mapped header buffer. */
+		struct tcphdr *tsoh_th;
+		unsigned ip_length;
+		u8 *header;
+		int rc;
 
-		tsoh_iph->tot_len = htons(ip_length);
+		header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+		if (!header)
+			return -ENOMEM;
 
-		/* Linux leaves suitable gaps in the IP ID space for us to fill. */
-		tsoh_iph->id = htons(st->ipv4_id);
-		st->ipv4_id++;
+		tsoh_th = (struct tcphdr *)(header + st->tcp_off);
+
+		/* Copy and update the headers. */
+		memcpy(header, skb->data, st->header_len);
+
+		tsoh_th->seq = htonl(st->seqnum);
+		((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
+
+		ip_length = st->ip_base_len + st->packet_space;
+
+		if (st->protocol == htons(ETH_P_IP)) {
+			struct iphdr *tsoh_iph =
+				(struct iphdr *)(header + st->ip_off);
+
+			tsoh_iph->tot_len = htons(ip_length);
+			tsoh_iph->id = htons(st->ipv4_id);
+		} else {
+			struct ipv6hdr *tsoh_iph =
+				(struct ipv6hdr *)(header + st->ip_off);
+
+			tsoh_iph->payload_len = htons(ip_length);
+		}
+
+		rc = efx_tso_put_header(tx_queue, buffer, header);
+		if (unlikely(rc))
+			return rc;
 	} else {
-		struct ipv6hdr *tsoh_iph =
-			(struct ipv6hdr *)(header + st->ip_off);
+		/* Send the original headers with a TSO option descriptor
+		 * in front
+		 */
+		u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
 
-		tsoh_iph->payload_len = htons(ip_length);
+		buffer->flags = EFX_TX_BUF_OPTION;
+		buffer->len = 0;
+		buffer->unmap_len = 0;
+		EFX_POPULATE_QWORD_5(buffer->option,
+				     ESF_DZ_TX_DESC_IS_OPT, 1,
+				     ESF_DZ_TX_OPTION_TYPE,
+				     ESE_DZ_TX_OPTION_DESC_TSO,
+				     ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+				     ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
+				     ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
+		++tx_queue->insert_count;
+
+		/* We mapped the headers in tso_start().  Unmap them
+		 * when the last segment is completed.
+		 */
+		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+		buffer->dma_addr = st->header_dma_addr;
+		buffer->len = st->header_len;
+		if (is_last) {
+			buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
+			buffer->unmap_len = st->header_unmap_len;
+			buffer->dma_offset = 0;
+			/* Ensure we only unmap them once in case of a
+			 * later DMA mapping error and rollback
+			 */
+			st->header_unmap_len = 0;
+		} else {
+			buffer->flags = EFX_TX_BUF_CONT;
+			buffer->unmap_len = 0;
+		}
+		++tx_queue->insert_count;
 	}
 
-	rc = efx_tso_put_header(tx_queue, buffer, header);
-	if (unlikely(rc))
-		return rc;
+	st->seqnum += skb_shinfo(skb)->gso_size;
+
+	/* Linux leaves suitable gaps in the IP ID space for us to fill. */
+	++st->ipv4_id;
 
 	++tx_queue->tso_packets;
 
@@ -1023,12 +1267,11 @@
 
 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
-	tso_start(&state, skb);
+	rc = tso_start(&state, efx, skb);
+	if (rc)
+		goto mem_err;
 
-	/* Assume that skb header area contains exactly the headers, and
-	 * all payload is in the frag list.
-	 */
-	if (skb_headlen(skb) == state.header_len) {
+	if (likely(state.in_len == 0)) {
 		/* Grab the first payload fragment. */
 		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
 		frag_i = 0;
@@ -1037,9 +1280,7 @@
 		if (rc)
 			goto mem_err;
 	} else {
-		rc = tso_get_head_fragment(&state, efx, skb);
-		if (rc)
-			goto mem_err;
+		/* Payload starts in the header area. */
 		frag_i = -1;
 	}
 
@@ -1091,6 +1332,11 @@
 				       state.unmap_len, DMA_TO_DEVICE);
 	}
 
+	/* Free the header DMA mapping, if using option descriptors */
+	if (state.header_unmap_len)
+		dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
+				 state.header_unmap_len, DMA_TO_DEVICE);
+
 	efx_enqueue_unwind(tx_queue);
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 770036b..513ed8b 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -839,7 +839,7 @@
 	dev->watchdog_timeo	= timeout;
 	dev->irq		= MACE_ETHERNET_IRQ;
 	dev->base_addr		= (unsigned long)&mace->eth;
-	memcpy(dev->dev_addr, o2meth_eaddr, 6);
+	memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
 
 	priv = netdev_priv(dev);
 	spin_lock_init(&priv->meth_lock);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index ee18e6f..acbbe48 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1921,7 +1921,6 @@
 	cancel_work_sync(&tp->phy_task);
 	unregister_netdev(dev);
 	sis190_release_board(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver sis190_pci_driver = {
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 068fc44..753630f 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -6,7 +6,7 @@
 	bool "SMC (SMSC)/Western Digital devices"
 	default y
 	depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
-		BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA
+		BLACKFIN || MN10300 || COLDFIRE || XTENSA || PCI || PCMCIA
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
@@ -39,7 +39,7 @@
 	select CRC32
 	select MII
 	depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
-		    MN10300 || COLDFIRE || ARM64)
+		    MN10300 || COLDFIRE || ARM64 || XTENSA)
 	---help---
 	  This is a driver for SMC's 91x series of Ethernet chipsets,
 	  including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 03b256a..8ae1f8a7 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -91,9 +91,9 @@
 
 /* These identify the driver base version and may not be removed. */
 static char version[] =
-DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
+DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
 static char version2[] =
-"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
 
 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
@@ -332,9 +332,7 @@
 
 /* when built into the kernel, we only print version if device is found */
 #ifndef MODULE
-	static int printed_version;
-	if (!printed_version++)
-		printk(KERN_INFO "%s%s", version, version2);
+	pr_info_once("%s%s\n", version, version2);
 #endif
 
 	card_idx++;
@@ -423,9 +421,9 @@
 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
 
 	if (debug > 2) {
-		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
+		dev_dbg(&pdev->dev, "EEPROM contents:\n");
 		for (i = 0; i < 64; i++)
-			printk(" %4.4x%s", read_eeprom(ep, i),
+			pr_cont(" %4.4x%s", read_eeprom(ep, i),
 				   i % 16 == 15 ? "\n" : "");
 	}
 
@@ -490,10 +488,10 @@
 	if (ret < 0)
 		goto err_out_unmap_rx;
 
-	printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
-	       dev->name, pci_id_tbl[chip_idx].name,
-	       (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
-	       dev->dev_addr);
+	netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
+		    pci_id_tbl[chip_idx].name,
+		    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
+		    dev->dev_addr);
 
 out:
 	return ret;
@@ -703,9 +701,8 @@
 			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
 		if (dev->if_port == 1) {
 			if (debug > 1)
-				printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
-					   "status %4.4x.\n",
-					   dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
+				netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
+					    mdio_read(dev, ep->phys[0], MII_BMSR));
 		}
 	} else {
 		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
@@ -715,10 +712,10 @@
 			else if (! (mii_lpa & LPA_LPACK))
 				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 			if (debug > 1)
-				printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
-					   " register read of %4.4x.\n", dev->name,
-					   ep->mii.full_duplex ? "full" : "half",
-					   ep->phys[0], mii_lpa);
+				netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
+					    ep->mii.full_duplex ? "full"
+								: "half",
+					    ep->phys[0], mii_lpa);
 		}
 	}
 
@@ -738,10 +735,9 @@
 	     TxUnderrun);
 
 	if (debug > 1) {
-		printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
-		       "status %4.4x %s-duplex.\n",
-		       dev->name, ioaddr, irq, er32(GENCTL),
-		       ep->mii.full_duplex ? "full" : "half");
+		netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
+			   ioaddr, irq, er32(GENCTL),
+			   ep->mii.full_duplex ? "full" : "half");
 	}
 
 	/* Set the timer to switch to check for link beat and perhaps switch
@@ -790,8 +786,8 @@
 	/* Soft reset the chip. */
 	ew32(GENCTL, 0x4001);
 
-	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
-		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+	netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+		   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
 	udelay(1);
 
 	/* This magic is documented in SMSC app note 7.15 */
@@ -827,9 +823,8 @@
 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 	     TxUnderrun);
 
-	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
-		   " interrupt %4.4x.\n",
-		   dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
+	netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
+		   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 }
 
 static void check_media(struct net_device *dev)
@@ -846,9 +841,9 @@
 		return;
 	if (ep->mii.full_duplex != duplex) {
 		ep->mii.full_duplex = duplex;
-		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
-			   " partner capability of %4.4x.\n", dev->name,
-			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
+		netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
+			    ep->mii.full_duplex ? "full" : "half",
+			    ep->phys[0], mii_lpa);
 		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
 	}
 }
@@ -861,11 +856,10 @@
 	int next_tick = 5*HZ;
 
 	if (debug > 3) {
-		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
-		       dev->name, er32(TxSTAT));
-		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
-		       "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
-		       er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
+		netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
+			   er32(TxSTAT));
+		netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
+			   er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
 	}
 
 	check_media(dev);
@@ -880,11 +874,11 @@
 	void __iomem *ioaddr = ep->ioaddr;
 
 	if (debug > 0) {
-		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
-		       "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
+		netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
+			    er16(TxSTAT));
 		if (debug > 1) {
-			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
-				   dev->name, ep->dirty_tx, ep->cur_tx);
+			netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
+				   ep->dirty_tx, ep->cur_tx);
 		}
 	}
 	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
@@ -994,9 +988,8 @@
 	ew32(COMMAND, TxQueued);
 
 	if (debug > 4)
-		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
-		       "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
-		       entry, ctrl_word, er32(TxSTAT));
+		netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
+			   skb->len, entry, ctrl_word, er32(TxSTAT));
 
 	return NETDEV_TX_OK;
 }
@@ -1009,8 +1002,8 @@
 #ifndef final_version
 	/* There was an major error, log it. */
 	if (debug > 1)
-		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
-		       dev->name, status);
+		netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
+			   status);
 #endif
 	stats->tx_errors++;
 	if (status & 0x1050)
@@ -1057,9 +1050,8 @@
 
 #ifndef final_version
 	if (cur_tx - dirty_tx > TX_RING_SIZE) {
-		printk(KERN_WARNING
-		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
-		       dev->name, dirty_tx, cur_tx, ep->tx_full);
+		netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+			    dirty_tx, cur_tx, ep->tx_full);
 		dirty_tx += TX_RING_SIZE;
 	}
 #endif
@@ -1086,8 +1078,8 @@
 	ew32(INTSTAT, status & EpicNormalEvent);
 
 	if (debug > 4) {
-		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
-		       "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
+		netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
+			   status, er32(INTSTAT));
 	}
 
 	if ((status & IntrSummary) == 0)
@@ -1125,8 +1117,8 @@
 			ew32(COMMAND, RestartTx);
 		}
 		if (status & PCIBusErr170) {
-			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
-					 dev->name, status);
+			netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
+				   status);
 			epic_pause(dev);
 			epic_restart(dev);
 		}
@@ -1136,8 +1128,8 @@
 
 out:
 	if (debug > 3) {
-		printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
-				   dev->name, status);
+		netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
+			   status);
 	}
 
 	return IRQ_RETVAL(handled);
@@ -1151,7 +1143,7 @@
 	int work_done = 0;
 
 	if (debug > 4)
-		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+		netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
 			   ep->rx_ring[entry].rxstatus);
 
 	if (rx_work_limit > budget)
@@ -1162,16 +1154,17 @@
 		int status = ep->rx_ring[entry].rxstatus;
 
 		if (debug > 4)
-			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);
+			netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
+				   status);
 		if (--rx_work_limit < 0)
 			break;
 		if (status & 0x2006) {
 			if (debug > 2)
-				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
-					   dev->name, status);
+				netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
+					   status);
 			if (status & 0x2000) {
-				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
-					   "multiple buffers, status %4.4x!\n", dev->name, status);
+				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
+					    status);
 				dev->stats.rx_length_errors++;
 			} else if (status & 0x0006)
 				/* Rx Frame errors are counted in hardware. */
@@ -1183,9 +1176,8 @@
 			struct sk_buff *skb;
 
 			if (pkt_len > PKT_BUF_SZ - 4) {
-				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
-					   "%d bytes.\n",
-					   dev->name, status, pkt_len);
+				netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
+					   status, pkt_len);
 				pkt_len = 1514;
 			}
 			/* Check if the packet is long enough to accept without copying
@@ -1305,8 +1297,8 @@
 	napi_disable(&ep->napi);
 
 	if (debug > 1)
-		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
-		       dev->name, er32(INTSTAT));
+		netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
+			   er32(INTSTAT));
 
 	del_timer_sync(&ep->timer);
 
@@ -1324,7 +1316,7 @@
 		ep->rx_ring[i].buflength = 0;
 		if (skb) {
 			pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
-				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+					 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 			dev_kfree_skb(skb);
 		}
 		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
@@ -1535,7 +1527,6 @@
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	/* pci_power_off(pdev, -1); */
 }
 
@@ -1588,8 +1579,7 @@
 {
 /* when a module, this is printed whether or not devices are found in probe */
 #ifdef MODULE
-	printk (KERN_INFO "%s%s",
-		version, version2);
+	pr_info("%s%s\n", version, version2);
 #endif
 
 	return pci_register_driver(&epic_driver);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index afe01c4..0f096a8 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -106,16 +106,16 @@
 #define POWER_DOWN		 1
 
 #if SMC_DEBUG > 0
-#define DBG(n, args...)				 \
+#define DBG(n, dev, args...)			 \
 	do {					 \
 		if (SMC_DEBUG & (n))		 \
-			printk(args);		 \
+			netdev_dbg(dev, args);	 \
 	} while (0)
 
-#define PRINTK(args...)   printk(args)
+#define PRINTK(dev, args...)   netdev_info(dev, args)
 #else
-#define DBG(n, args...)   do { } while (0)
-#define PRINTK(args...)   printk(KERN_DEBUG args)
+#define DBG(n, dev, args...)   do { } while (0)
+#define PRINTK(dev, args...)   netdev_dbg(dev, args)
 #endif
 
 #if SMC_DEBUG_PKTS > 0
@@ -130,21 +130,23 @@
 
 	for (i = 0; i < lines ; i ++) {
 		int cur;
+		printk(KERN_DEBUG);
 		for (cur = 0; cur < 8; cur++) {
 			u_char a, b;
 			a = *buf++;
 			b = *buf++;
-			printk("%02x%02x ", a, b);
+			pr_cont("%02x%02x ", a, b);
 		}
-		printk("\n");
+		pr_cont("\n");
 	}
+	printk(KERN_DEBUG);
 	for (i = 0; i < remainder/2 ; i++) {
 		u_char a, b;
 		a = *buf++;
 		b = *buf++;
-		printk("%02x%02x ", a, b);
+		pr_cont("%02x%02x ", a, b);
 	}
-	printk("\n");
+	pr_cont("\n");
 }
 #else
 #define PRINT_PKT(x...)  do { } while (0)
@@ -176,7 +178,7 @@
 	unsigned int reg, timeout=0, resets=1, irq_cfg;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	/*	 Take out of PM setting first */
 	if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
@@ -188,7 +190,7 @@
 			reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
 		} while (--timeout && !reg);
 		if (timeout == 0) {
-			PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
+			PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
 			return;
 		}
 	}
@@ -206,14 +208,14 @@
 			reg = SMC_GET_HW_CFG(lp);
 			/* If chip indicates reset timeout then try again */
 			if (reg & HW_CFG_SRST_TO_) {
-				PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
+				PRINTK(dev, "chip reset timeout, retrying...\n");
 				resets++;
 				break;
 			}
 		} while (--timeout && (reg & HW_CFG_SRST_));
 	}
 	if (timeout == 0) {
-		PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
+		PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
 		return;
 	}
 
@@ -223,7 +225,7 @@
 		udelay(10);
 
 	if (timeout == 0){
-		PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
+		PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
 		return;
 	}
 
@@ -270,7 +272,7 @@
 	unsigned mask, cfg, cr;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 
@@ -296,7 +298,7 @@
 
 	/* Turn on receiver and enable RX */
 	if (cr & MAC_CR_RXEN_)
-		DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
+		DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
 
 	SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
 
@@ -327,7 +329,7 @@
 	unsigned cr;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
 
 	/* Disable IRQ's */
 	SMC_SET_INT_EN(lp, 0);
@@ -346,7 +348,8 @@
 	struct smc911x_local *lp = netdev_priv(dev);
 	unsigned int fifo_count, timeout, reg;
 
-	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
+	    CARDNAME, __func__);
 	fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
 	if (fifo_count <= 4) {
 		/* Manually dump the packet data */
@@ -361,7 +364,7 @@
 			reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
 		} while (--timeout && reg);
 		if (timeout == 0) {
-			PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
+			PRINTK(dev, "timeout waiting for RX fast forward\n");
 		}
 	}
 }
@@ -379,11 +382,11 @@
 	struct sk_buff *skb;
 	unsigned char *data;
 
-	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
-		dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
+	    __func__);
 	status = SMC_GET_RX_STS_FIFO(lp);
-	DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
-		dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
+	DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
+	    (status & 0x3fff0000) >> 16, status & 0xc000ffff);
 	pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
 	if (status & RX_STS_ES_) {
 		/* Deal with a bad packet */
@@ -403,8 +406,7 @@
 		/* Alloc a buffer with extra room for DMA alignment */
 		skb = netdev_alloc_skb(dev, pkt_len+32);
 		if (unlikely(skb == NULL)) {
-			PRINTK( "%s: Low memory, rcvd packet dropped.\n",
-				dev->name);
+			PRINTK(dev, "Low memory, rcvd packet dropped.\n");
 			dev->stats.rx_dropped++;
 			smc911x_drop_pkt(dev);
 			return;
@@ -422,8 +424,8 @@
 		/* Lower the FIFO threshold if possible */
 		fifo = SMC_GET_FIFO_INT(lp);
 		if (fifo & 0xFF) fifo--;
-		DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
-			dev->name, fifo & 0xff);
+		DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
+		    fifo & 0xff);
 		SMC_SET_FIFO_INT(lp, fifo);
 		/* Setup RX DMA */
 		SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
@@ -436,7 +438,7 @@
 		SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
 		SMC_PULL_DATA(lp, data, pkt_len+2+3);
 
-		DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
+		DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
 		PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
 		skb->protocol = eth_type_trans(skb, dev);
 		netif_rx(skb);
@@ -456,7 +458,7 @@
 	unsigned int cmdA, cmdB, len;
 	unsigned char *buf;
 
-	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
 	BUG_ON(lp->pending_tx_skb == NULL);
 
 	skb = lp->pending_tx_skb;
@@ -481,12 +483,12 @@
 	/* tag is packet length so we can use this in stats update later */
 	cmdB = (skb->len  << 16) | (skb->len & 0x7FF);
 
-	DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
-		 dev->name, len, len, buf, cmdA, cmdB);
+	DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
+	    len, len, buf, cmdA, cmdB);
 	SMC_SET_TX_FIFO(lp, cmdA);
 	SMC_SET_TX_FIFO(lp, cmdB);
 
-	DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
+	DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
 	PRINT_PKT(buf, len <= 64 ? len : 64);
 
 	/* Send pkt via PIO or DMA */
@@ -517,20 +519,20 @@
 	unsigned int free;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
-		dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
+	    __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 
 	BUG_ON(lp->pending_tx_skb != NULL);
 
 	free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
-	DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
+	DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
 
 	/* Turn off the flow when running out of space in FIFO */
 	if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
-		DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
-			dev->name, free);
+		DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
+		    free);
 		/* Reenable when at least 1 packet of size MTU present */
 		SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
 		lp->tx_throttle = 1;
@@ -545,8 +547,8 @@
 	 *	  End padding				 15 bytes
 	 */
 	if (unlikely(free < (skb->len + 8 + 15 + 15))) {
-		printk("%s: No Tx free space %d < %d\n",
-			dev->name, free, skb->len);
+		netdev_warn(dev, "No Tx free space %d < %d\n",
+			    free, skb->len);
 		lp->pending_tx_skb = NULL;
 		dev->stats.tx_errors++;
 		dev->stats.tx_dropped++;
@@ -561,13 +563,13 @@
 		 * the DMA IRQ starts it
 		 */
 		if (lp->txdma_active) {
-			DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
+			DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
 			lp->pending_tx_skb = skb;
 			netif_stop_queue(dev);
 			spin_unlock_irqrestore(&lp->lock, flags);
 			return NETDEV_TX_OK;
 		} else {
-			DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
+			DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
 			lp->txdma_active = 1;
 		}
 	}
@@ -589,20 +591,19 @@
 	struct smc911x_local *lp = netdev_priv(dev);
 	unsigned int tx_status;
 
-	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
-		dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
+	    __func__);
 
 	/* Collect the TX status */
 	while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
-		DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
-			dev->name,
-			(SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
+		DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
+		    (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
 		tx_status = SMC_GET_TX_STS_FIFO(lp);
 		dev->stats.tx_packets++;
 		dev->stats.tx_bytes+=tx_status>>16;
-		DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
-			dev->name, (tx_status & 0xffff0000) >> 16,
-			tx_status & 0x0000ffff);
+		DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
+		    (tx_status & 0xffff0000) >> 16,
+		    tx_status & 0x0000ffff);
 		/* count Tx errors, but ignore lost carrier errors when in
 		 * full-duplex mode */
 		if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
@@ -640,8 +641,8 @@
 
 	SMC_GET_MII(lp, phyreg, phyaddr, phydata);
 
-	DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
-		__func__, phyaddr, phyreg, phydata);
+	DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
+	    __func__, phyaddr, phyreg, phydata);
 	return phydata;
 }
 
@@ -654,8 +655,8 @@
 {
 	struct smc911x_local *lp = netdev_priv(dev);
 
-	DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
-		__func__, phyaddr, phyreg, phydata);
+	DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+	    __func__, phyaddr, phyreg, phydata);
 
 	SMC_SET_MII(lp, phyreg, phyaddr, phydata);
 }
@@ -670,7 +671,7 @@
 	int phyaddr;
 	unsigned int cfg, id1, id2;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	lp->phy_type = 0;
 
@@ -731,8 +732,8 @@
 			lp->phy_type = id1 << 16 | id2;
 	}
 
-	DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
-		dev->name, id1, id2, lp->mii.phy_id);
+	DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+	    id1, id2, lp->mii.phy_id);
 }
 
 /*
@@ -745,7 +746,7 @@
 	int phyaddr = lp->mii.phy_id;
 	int bmcr;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	/* Enter Link Disable state */
 	SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
@@ -792,7 +793,7 @@
 	unsigned long flags;
 	unsigned int reg;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 	reg = SMC_GET_PMT_CTRL(lp);
@@ -851,18 +852,18 @@
 	int phyaddr = lp->mii.phy_id;
 	unsigned int bmcr, cr;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
 		/* duplex state has changed */
 		SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
 		SMC_GET_MAC_CR(lp, cr);
 		if (lp->mii.full_duplex) {
-			DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
+			DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
 			bmcr |= BMCR_FULLDPLX;
 			cr |= MAC_CR_RCVOWN_;
 		} else {
-			DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
+			DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
 			bmcr &= ~BMCR_FULLDPLX;
 			cr &= ~MAC_CR_RCVOWN_;
 		}
@@ -891,7 +892,7 @@
 	int status;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
 
 	/*
 	 * We should not be called if phy_type is zero.
@@ -900,7 +901,7 @@
 		return;
 
 	if (smc911x_phy_reset(dev, phyaddr)) {
-		printk("%s: PHY reset timed out\n", dev->name);
+		netdev_info(dev, "PHY reset timed out\n");
 		return;
 	}
 	spin_lock_irqsave(&lp->lock, flags);
@@ -922,7 +923,7 @@
 	/* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
 	SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
 	if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
-		printk(KERN_INFO "Auto negotiation NOT supported\n");
+		netdev_info(dev, "Auto negotiation NOT supported\n");
 		smc911x_phy_fixed(dev);
 		goto smc911x_phy_configure_exit;
 	}
@@ -960,8 +961,8 @@
 	udelay(10);
 	SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
 
-	DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
-	DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
+	DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
+	DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
 
 	/* Restart auto-negotiation process in order to advertise my caps */
 	SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
@@ -984,7 +985,7 @@
 	int phyaddr = lp->mii.phy_id;
 	int status;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	if (lp->phy_type == 0)
 		return;
@@ -992,10 +993,10 @@
 	smc911x_phy_check_media(dev, 0);
 	/* read to clear status bits */
 	SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
-	DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
-		dev->name, status & 0xffff);
-	DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
-		dev->name, SMC_GET_AFC_CFG(lp));
+	DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
+	    status & 0xffff);
+	DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
+	    SMC_GET_AFC_CFG(lp));
 }
 
 /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
@@ -1012,7 +1013,7 @@
 	unsigned int rx_overrun=0, cr, pkts;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 
@@ -1033,8 +1034,8 @@
 	do {
 		status = SMC_GET_INT(lp);
 
-		DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
-			dev->name, status, mask, status & ~mask);
+		DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
+		    status, mask, status & ~mask);
 
 		status &= mask;
 		if (!status)
@@ -1066,7 +1067,7 @@
 				SMC_GET_MAC_CR(lp, cr);
 				cr &= ~MAC_CR_RXEN_;
 				SMC_SET_MAC_CR(lp, cr);
-				DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+				DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
 				dev->stats.rx_errors++;
 				dev->stats.rx_fifo_errors++;
 			}
@@ -1078,7 +1079,7 @@
 				cr &= ~MAC_CR_RXEN_;
 				SMC_SET_MAC_CR(lp, cr);
 				rx_overrun=1;
-				DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+				DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
 				dev->stats.rx_errors++;
 				dev->stats.rx_fifo_errors++;
 			}
@@ -1087,23 +1088,23 @@
 		/* Handle receive condition */
 		if ((status & INT_STS_RSFL_) || rx_overrun) {
 			unsigned int fifo;
-			DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
+			DBG(SMC_DEBUG_RX, dev, "RX irq\n");
 			fifo = SMC_GET_RX_FIFO_INF(lp);
 			pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
-			DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
-				dev->name, pkts, fifo & 0xFFFF );
+			DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
+			    pkts, fifo & 0xFFFF);
 			if (pkts != 0) {
 #ifdef SMC_USE_DMA
 				unsigned int fifo;
 				if (lp->rxdma_active){
-					DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
-						"%s: RX DMA active\n", dev->name);
+					DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
+					    "RX DMA active\n");
 					/* The DMA is already running so up the IRQ threshold */
 					fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
 					fifo |= pkts & 0xFF;
-					DBG(SMC_DEBUG_RX,
-						"%s: Setting RX stat FIFO threshold to %d\n",
-						dev->name, fifo & 0xff);
+					DBG(SMC_DEBUG_RX, dev,
+					    "Setting RX stat FIFO threshold to %d\n",
+					    fifo & 0xff);
 					SMC_SET_FIFO_INT(lp, fifo);
 				} else
 #endif
@@ -1113,7 +1114,7 @@
 		}
 		/* Handle transmit FIFO available */
 		if (status & INT_STS_TDFA_) {
-			DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
+			DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
 			SMC_SET_FIFO_TDA(lp, 0xFF);
 			lp->tx_throttle = 0;
 #ifdef SMC_USE_DMA
@@ -1125,9 +1126,9 @@
 		/* Handle transmit done condition */
 #if 1
 		if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
-			DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
-				"%s: Tx stat FIFO limit (%d) /GPT irq\n",
-				dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
+			DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
+			    "Tx stat FIFO limit (%d) /GPT irq\n",
+			    (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
 			smc911x_tx(dev);
 			SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
 			SMC_ACK_INT(lp, INT_STS_TSFL_);
@@ -1135,23 +1136,20 @@
 		}
 #else
 		if (status & INT_STS_TSFL_) {
-			DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
+			DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
 			smc911x_tx(dev);
 			SMC_ACK_INT(lp, INT_STS_TSFL_);
 		}
 
 		if (status & INT_STS_GPT_INT_) {
-			DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
-				dev->name,
-				SMC_GET_IRQ_CFG(lp),
-				SMC_GET_FIFO_INT(lp),
-				SMC_GET_RX_CFG(lp));
-			DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
-				"Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
-				dev->name,
-				(SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
-				SMC_GET_RX_FIFO_INF(lp) & 0xffff,
-				SMC_GET_RX_STS_FIFO_PEEK(lp));
+			DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
+			    SMC_GET_IRQ_CFG(lp),
+			    SMC_GET_FIFO_INT(lp),
+			    SMC_GET_RX_CFG(lp));
+			DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
+			    (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
+			    SMC_GET_RX_FIFO_INF(lp) & 0xffff,
+			    SMC_GET_RX_STS_FIFO_PEEK(lp));
 			SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
 			SMC_ACK_INT(lp, INT_STS_GPT_INT_);
 		}
@@ -1159,7 +1157,7 @@
 
 		/* Handle PHY interrupt condition */
 		if (status & INT_STS_PHY_INT_) {
-			DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
+			DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
 			smc911x_phy_interrupt(dev);
 			SMC_ACK_INT(lp, INT_STS_PHY_INT_);
 		}
@@ -1168,8 +1166,8 @@
 	/* restore mask state */
 	SMC_SET_INT_EN(lp, mask);
 
-	DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
-		dev->name, 8-timeout);
+	DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
+	    8-timeout);
 
 	spin_unlock_irqrestore(&lp->lock, flags);
 
@@ -1185,9 +1183,9 @@
 	struct sk_buff *skb = lp->current_tx_skb;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
-	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
+	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
 	/* Clear the DMA interrupt sources */
 	SMC_DMA_ACK_IRQ(dev, dma);
 	BUG_ON(skb == NULL);
@@ -1198,8 +1196,8 @@
 	if (lp->pending_tx_skb != NULL)
 		smc911x_hardware_send_pkt(dev);
 	else {
-		DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
-			"%s: No pending Tx packets. DMA disabled\n", dev->name);
+		DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
+		    "No pending Tx packets. DMA disabled\n");
 		spin_lock_irqsave(&lp->lock, flags);
 		lp->txdma_active = 0;
 		if (!lp->tx_throttle) {
@@ -1208,8 +1206,8 @@
 		spin_unlock_irqrestore(&lp->lock, flags);
 	}
 
-	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
-		"%s: TX DMA irq completed\n", dev->name);
+	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
+	    "TX DMA irq completed\n");
 }
 static void
 smc911x_rx_dma_irq(int dma, void *data)
@@ -1221,8 +1219,8 @@
 	unsigned long flags;
 	unsigned int pkts;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
-	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
+	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
 	/* Clear the DMA interrupt sources */
 	SMC_DMA_ACK_IRQ(dev, dma);
 	dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
@@ -1242,9 +1240,9 @@
 		lp->rxdma_active = 0;
 	}
 	spin_unlock_irqrestore(&lp->lock, flags);
-	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
-		"%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
-		dev->name, pkts);
+	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
+	    "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
+	    pkts);
 }
 #endif	 /* SMC_USE_DMA */
 
@@ -1268,14 +1266,14 @@
 	int status, mask;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 	status = SMC_GET_INT(lp);
 	mask = SMC_GET_INT_EN(lp);
 	spin_unlock_irqrestore(&lp->lock, flags);
-	DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
-		dev->name, status, mask);
+	DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
+	    status, mask);
 
 	/* Dump the current TX FIFO contents and restart */
 	mask = SMC_GET_TX_CFG(lp);
@@ -1306,7 +1304,7 @@
 	unsigned int mcr, update_multicast = 0;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	spin_lock_irqsave(&lp->lock, flags);
 	SMC_GET_MAC_CR(lp, mcr);
@@ -1314,7 +1312,7 @@
 
 	if (dev->flags & IFF_PROMISC) {
 
-		DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
+		DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
 		mcr |= MAC_CR_PRMS_;
 	}
 	/*
@@ -1323,7 +1321,7 @@
 	 * checked before the table is
 	 */
 	else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
-		DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
+		DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
 		mcr |= MAC_CR_MCPAS_;
 	}
 
@@ -1363,8 +1361,7 @@
 		/* now, the table can be loaded into the chipset */
 		update_multicast = 1;
 	} else	 {
-		DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
-			dev->name);
+		DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
 		mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
 
 		/*
@@ -1378,9 +1375,9 @@
 	spin_lock_irqsave(&lp->lock, flags);
 	SMC_SET_MAC_CR(lp, mcr);
 	if (update_multicast) {
-		DBG(SMC_DEBUG_MISC,
-			"%s: update mcast hash table 0x%08x 0x%08x\n",
-			dev->name, multicast_table[0], multicast_table[1]);
+		DBG(SMC_DEBUG_MISC, dev,
+		    "update mcast hash table 0x%08x 0x%08x\n",
+		    multicast_table[0], multicast_table[1]);
 		SMC_SET_HASHL(lp, multicast_table[0]);
 		SMC_SET_HASHH(lp, multicast_table[1]);
 	}
@@ -1398,7 +1395,7 @@
 {
 	struct smc911x_local *lp = netdev_priv(dev);
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	/* reset the hardware */
 	smc911x_reset(dev);
@@ -1425,7 +1422,7 @@
 {
 	struct smc911x_local *lp = netdev_priv(dev);
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	netif_stop_queue(dev);
 	netif_carrier_off(dev);
@@ -1459,7 +1456,7 @@
 	int ret, status;
 	unsigned long flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 	cmd->maxtxpkt = 1;
 	cmd->maxrxpkt = 1;
 
@@ -1597,16 +1594,16 @@
 	e2p_cmd = SMC_GET_E2P_CMD(lp);
 	for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
 		if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
-			PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
-				dev->name, __func__);
+			PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
+			       __func__);
 			return -EFAULT;
 		}
 		mdelay(1);
 		e2p_cmd = SMC_GET_E2P_CMD(lp);
 	}
 	if (timeout == 0) {
-		PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
-			dev->name, __func__);
+		PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
+		       __func__);
 		return -ETIMEDOUT;
 	}
 	return 0;
@@ -1719,7 +1716,7 @@
 	int timeout = 20;
 	unsigned long cookie;
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	cookie = probe_irq_on();
 
@@ -1799,13 +1796,14 @@
 	const char *version_string;
 	unsigned long irq_flags;
 
-	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
 
 	/* First, see if the endian word is recognized */
 	val = SMC_GET_BYTE_TEST(lp);
-	DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
+	DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
+	    CARDNAME, val);
 	if (val != 0x87654321) {
-		printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
+		netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
 		retval = -ENODEV;
 		goto err_out;
 	}
@@ -1816,26 +1814,29 @@
 	 * as future revisions could be added.
 	 */
 	chip_id = SMC_GET_PN(lp);
-	DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
+	DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
+	    CARDNAME, chip_id);
 	for(i=0;chip_ids[i].id != 0; i++) {
 		if (chip_ids[i].id == chip_id) break;
 	}
 	if (!chip_ids[i].id) {
-		printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
+		netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
 		retval = -ENODEV;
 		goto err_out;
 	}
 	version_string = chip_ids[i].name;
 
 	revision = SMC_GET_REV(lp);
-	DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
+	DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
 
 	/* At this point I'll assume that the chip is an SMC911x. */
-	DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
+	DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
+	    CARDNAME, chip_ids[i].name);
 
 	/* Validate the TX FIFO size requested */
 	if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
-		printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
+		netdev_err(dev, "Invalid TX FIFO size requested %d\n",
+			   tx_fifo_kb);
 		retval = -EINVAL;
 		goto err_out;
 	}
@@ -1887,14 +1888,13 @@
 		case 14:/* 1920 Rx Data Fifo Size */
 			lp->afc_cfg=0x0006032F;break;
 		 default:
-			 PRINTK("%s: ERROR -- no AFC_CFG setting found",
-				dev->name);
+			 PRINTK(dev, "ERROR -- no AFC_CFG setting found");
 			 break;
 	}
 
-	DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
-		"%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
-		lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
+	DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
+	    "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
+	    lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
 
 	spin_lock_init(&lp->lock);
 
@@ -1924,8 +1924,7 @@
 		}
 	}
 	if (dev->irq == 0) {
-		printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
-			dev->name);
+		netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
 		retval = -ENODEV;
 		goto err_out;
 	}
@@ -1980,33 +1979,32 @@
 	retval = register_netdev(dev);
 	if (retval == 0) {
 		/* now, print out the card info, in a short format.. */
-		printk("%s: %s (rev %d) at %#lx IRQ %d",
-			dev->name, version_string, lp->revision,
-			dev->base_addr, dev->irq);
+		netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
+			    version_string, lp->revision,
+			    dev->base_addr, dev->irq);
 
 #ifdef SMC_USE_DMA
 		if (lp->rxdma != -1)
-			printk(" RXDMA %d ", lp->rxdma);
+			pr_cont(" RXDMA %d", lp->rxdma);
 
 		if (lp->txdma != -1)
-			printk("TXDMA %d", lp->txdma);
+			pr_cont(" TXDMA %d", lp->txdma);
 #endif
-		printk("\n");
+		pr_cont("\n");
 		if (!is_valid_ether_addr(dev->dev_addr)) {
-			printk("%s: Invalid ethernet MAC address. Please "
-					"set using ifconfig\n", dev->name);
+			netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
 		} else {
 			/* Print the Ethernet address */
-			printk("%s: Ethernet addr: %pM\n",
-				dev->name, dev->dev_addr);
+			netdev_info(dev, "Ethernet addr: %pM\n",
+				    dev->dev_addr);
 		}
 
 		if (lp->phy_type == 0) {
-			PRINTK("%s: No PHY found\n", dev->name);
+			PRINTK(dev, "No PHY found\n");
 		} else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
-			PRINTK("%s: LAN911x Internal PHY\n", dev->name);
+			PRINTK(dev, "LAN911x Internal PHY\n");
 		} else {
-			PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
+			PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
 		}
 	}
 
@@ -2025,7 +2023,7 @@
 }
 
 /*
- * smc911x_init(void)
+ * smc911x_drv_probe(void)
  *
  *	  Output:
  *	 0 --> there is a device
@@ -2039,6 +2037,7 @@
 	void __iomem *addr;
 	int ret;
 
+	/* ndev is not valid yet, so avoid passing it in. */
 	DBG(SMC_DEBUG_FUNC, "--> %s\n",  __func__);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
@@ -2093,7 +2092,7 @@
 release_1:
 		release_mem_region(res->start, SMC911X_IO_EXTENT);
 out:
-		printk("%s: not found (%d).\n", CARDNAME, ret);
+		pr_info("%s: not found (%d).\n", CARDNAME, ret);
 	}
 #ifdef SMC_USE_DMA
 	else {
@@ -2111,7 +2110,7 @@
 	struct smc911x_local *lp = netdev_priv(ndev);
 	struct resource *res;
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+	DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
 
 	unregister_netdev(ndev);
 
@@ -2140,7 +2139,7 @@
 	struct net_device *ndev = platform_get_drvdata(dev);
 	struct smc911x_local *lp = netdev_priv(ndev);
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+	DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
 	if (ndev) {
 		if (netif_running(ndev)) {
 			netif_device_detach(ndev);
@@ -2158,7 +2157,7 @@
 {
 	struct net_device *ndev = platform_get_drvdata(dev);
 
-	DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+	DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
 	if (ndev) {
 		struct smc911x_local *lp = netdev_priv(ndev);
 
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index d51261b..9965da3 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -227,7 +227,7 @@
 #define SMC_DMA_ACK_IRQ(dev, dma)					\
 {									\
 	if (DCSR(dma) & DCSR_BUSERR) {					\
-		printk("%s: DMA %d bus error!\n", dev->name, dma);	\
+		netdev_err(dev, "DMA %d bus error!\n", dma);		\
 	}								\
 	DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;		\
 }
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index afd9873..67d9fde 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -55,7 +55,7 @@
  ----------------------------------------------------------------------------*/
 
 static const char version[] =
-	"smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n";
+	"smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)";
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -597,7 +597,7 @@
 	packet_no = inb( ioaddr + PNR_ARR + 1 );
 	if ( packet_no & 0x80 ) {
 		/* or isn't there?  BAD CHIP! */
-		printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
+		netdev_dbg(dev, CARDNAME": Memory allocation failed.\n");
 		dev_kfree_skb_any(skb);
 		lp->saved_skb = NULL;
 		netif_wake_queue(dev);
@@ -610,7 +610,7 @@
 	/* point to the beginning of the packet */
 	outw( PTR_AUTOINC , ioaddr + POINTER );
 
-   	PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length ));
+	PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length));
 #if SMC_DEBUG > 2
 	print_packet( buf, length );
 #endif
@@ -846,7 +846,6 @@
 static int __init smc_probe(struct net_device *dev, int ioaddr)
 {
 	int i, memory, retval;
-	static unsigned version_printed;
 	unsigned int bank;
 
 	const char *version_string;
@@ -913,8 +912,7 @@
 	   It might be prudent to check a listing of MAC addresses
 	   against the hardware address, or do some other tests. */
 
-	if (version_printed++ == 0)
-		printk("%s", version);
+	pr_info_once("%s\n", version);
 
 	/* fill in some of the fields */
 	dev->base_addr = ioaddr;
@@ -1003,21 +1001,21 @@
 
 	/* now, print out the card info, in a short format.. */
 
-	printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name,
-		version_string, revision_register & 0xF, ioaddr, dev->irq,
-		if_string, memory );
+	netdev_info(dev, "%s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ",
+		    version_string, revision_register & 0xF, ioaddr, dev->irq,
+		    if_string, memory);
 	/*
 	 . Print the Ethernet address
 	*/
-	printk("ADDR: %pM\n", dev->dev_addr);
+	netdev_info(dev, "ADDR: %pM\n", dev->dev_addr);
 
 	/* Grab the IRQ */
-      	retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
-      	if (retval) {
-		printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME,
-			dev->irq, retval);
-  	  	goto err_out;
-      	}
+	retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
+	if (retval) {
+		netdev_warn(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
+			    DRV_NAME, dev->irq, retval);
+		goto err_out;
+	}
 
 	dev->netdev_ops			= &smc_netdev_ops;
 	dev->watchdog_timeo		= HZ/20;
@@ -1037,30 +1035,32 @@
 	int remainder;
 	int lines;
 
-	printk("Packet of length %d\n", length);
+	pr_dbg("Packet of length %d\n", length);
 	lines = length / 16;
 	remainder = length % 16;
 
 	for ( i = 0; i < lines ; i ++ ) {
 		int cur;
 
+		printk(KERN_DEBUG);
 		for ( cur = 0; cur < 8; cur ++ ) {
 			byte a, b;
 
 			a = *(buf ++ );
 			b = *(buf ++ );
-			printk("%02x%02x ", a, b );
+			pr_cont("%02x%02x ", a, b);
 		}
-		printk("\n");
+		pr_cont("\n");
 	}
+	printk(KERN_DEBUG);
 	for ( i = 0; i < remainder/2 ; i++ ) {
 		byte a, b;
 
 		a = *(buf ++ );
 		b = *(buf ++ );
-		printk("%02x%02x ", a, b );
+		pr_cont("%02x%02x ", a, b);
 	}
-	printk("\n");
+	pr_cont("\n");
 #endif
 }
 #endif
@@ -1127,9 +1127,8 @@
 {
 	/* If we get here, some higher level has decided we are broken.
 	   There should really be a "kick me" function call instead. */
-	printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n",
-		tx_done(dev) ? "IRQ conflict" :
-		"network cable problem");
+	netdev_warn(dev, CARDNAME": transmit timed out, %s?\n",
+		    tx_done(dev) ? "IRQ conflict" : "network cable problem");
 	/* "kick" the adaptor */
 	smc_reset( dev->base_addr );
 	smc_enable( dev->base_addr );
@@ -1299,8 +1298,7 @@
 	dev->stats.tx_errors++;
 	if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
 	if ( tx_status & TS_LATCOL  ) {
-		printk(KERN_DEBUG CARDNAME
-			": Late collision occurred on last xmit.\n");
+		netdev_dbg(dev, CARDNAME": Late collision occurred on last xmit.\n");
 		dev->stats.tx_window_errors++;
 	}
 #if 0
@@ -1308,7 +1306,7 @@
 #endif
 
 	if ( tx_status & TS_SUCCESS ) {
-		printk(CARDNAME": Successful packet caused interrupt\n");
+		netdev_info(dev, CARDNAME": Successful packet caused interrupt\n");
 	}
 	/* re-enable transmit */
 	SMC_SELECT_BANK( 0 );
@@ -1547,9 +1545,7 @@
 
 	/* copy the parameters from insmod into the device structure */
 	devSMC9194 = smc_init(-1);
-	if (IS_ERR(devSMC9194))
-		return PTR_ERR(devSMC9194);
-	return 0;
+	return PTR_ERR_OR_ZERO(devSMC9194);
 }
 
 void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 656d2e2..8ef70d9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -740,7 +740,7 @@
 	     (smc->cardid == PRODID_PSION_NET100))) {
 		i = osi_load_firmware(link);
 		if (i) {
-			pr_err("smc91c92_cs: Failed to load firmware\n");
+			netdev_err(dev, "Failed to load firmware\n");
 			return i;
 		}
 	}
@@ -793,7 +793,7 @@
     }
 
     if (width) {
-	    pr_info("using 8-bit IO window\n");
+	    netdev_info(dev, "using 8-bit IO window\n");
 
 	    smc91c92_suspend(link);
 	    pcmcia_fixup_iowidth(link);
@@ -1036,7 +1036,7 @@
     save = inw(ioaddr + BANK_SELECT);
     for (w = 0; w < 4; w++) {
 	SMC_SELECT_BANK(w);
-	netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
+	netdev_dbg(dev, "bank %d: ", w);
 	for (i = 0; i < 14; i += 2)
 	    pr_cont(" %04x", inw(ioaddr + i));
 	pr_cont("\n");
@@ -1213,8 +1213,7 @@
     if (smc->saved_skb) {
 	/* THIS SHOULD NEVER HAPPEN. */
 	dev->stats.tx_aborted_errors++;
-	netdev_printk(KERN_DEBUG, dev,
-		      "Internal error -- sent packet while busy\n");
+	netdev_dbg(dev, "Internal error -- sent packet while busy\n");
 	return NETDEV_TX_BUSY;
     }
     smc->saved_skb = skb;
@@ -1254,7 +1253,7 @@
     }
 
     /* Otherwise defer until the Tx-space-allocated interrupt. */
-    pr_debug("%s: memory allocation deferred.\n", dev->name);
+    netdev_dbg(dev, "memory allocation deferred.\n");
     outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
     spin_unlock_irqrestore(&smc->lock, flags);
 
@@ -1317,8 +1316,8 @@
 
     SMC_SELECT_BANK(0);
     ephs = inw(ioaddr + EPH);
-    pr_debug("%s: Ethernet protocol handler interrupt, status"
-	  " %4.4x.\n", dev->name, ephs);
+    netdev_dbg(dev, "Ethernet protocol handler interrupt, status %4.4x.\n",
+	       ephs);
     /* Could be a counter roll-over warning: update stats. */
     card_stats = inw(ioaddr + COUNTER);
     /* single collisions */
@@ -1357,8 +1356,8 @@
 
     ioaddr = dev->base_addr;
 
-    pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
-	  irq, ioaddr);
+    netdev_dbg(dev, "SMC91c92 interrupt %d at %#x.\n",
+	       irq, ioaddr);
 
     spin_lock(&smc->lock);
     smc->watchdog = 0;
@@ -1366,8 +1365,8 @@
     if ((saved_bank & 0xff00) != 0x3300) {
 	/* The device does not exist -- the card could be off-line, or
 	   maybe it has been ejected. */
-	pr_debug("%s: SMC91c92 interrupt %d for non-existent"
-	      "/ejected device.\n", dev->name, irq);
+	netdev_dbg(dev, "SMC91c92 interrupt %d for non-existent/ejected device.\n",
+		   irq);
 	handled = 0;
 	goto irq_done;
     }
@@ -1380,8 +1379,8 @@
 
     do { /* read the status flag, and mask it */
 	status = inw(ioaddr + INTERRUPT) & 0xff;
-	pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
-	      status, mask);
+	netdev_dbg(dev, "Status is %#2.2x (mask %#2.2x).\n",
+		   status, mask);
 	if ((status & mask) == 0) {
 	    if (bogus_cnt == INTR_WORK)
 		handled = 0;
@@ -1425,15 +1424,15 @@
 	    smc_eph_irq(dev);
     } while (--bogus_cnt);
 
-    pr_debug("  Restoring saved registers mask %2.2x bank %4.4x"
-	  " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
+    netdev_dbg(dev, "  Restoring saved registers mask %2.2x bank %4.4x pointer %4.4x.\n",
+	       mask, saved_bank, saved_pointer);
 
     /* restore state register */
     outw((mask<<8), ioaddr + INTERRUPT);
     outw(saved_pointer, ioaddr + POINTER);
     SMC_SELECT_BANK(saved_bank);
 
-    pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
+    netdev_dbg(dev, "Exiting interrupt IRQ%d.\n", irq);
 
 irq_done:
 
@@ -1491,10 +1490,10 @@
     rx_status = inw(ioaddr + DATA_1);
     packet_length = inw(ioaddr + DATA_1) & 0x07ff;
 
-    pr_debug("%s: Receive status %4.4x length %d.\n",
-	  dev->name, rx_status, packet_length);
+    netdev_dbg(dev, "Receive status %4.4x length %d.\n",
+	       rx_status, packet_length);
 
-    if (!(rx_status & RS_ERRORS)) {		
+    if (!(rx_status & RS_ERRORS)) {
 	/* do stuff to make a new packet */
 	struct sk_buff *skb;
 	
@@ -1502,7 +1501,7 @@
 	skb = netdev_alloc_skb(dev, packet_length+2);
 	
 	if (skb == NULL) {
-	    pr_debug("%s: Low memory, packet dropped.\n", dev->name);
+	    netdev_dbg(dev, "Low memory, packet dropped.\n");
 	    dev->stats.rx_dropped++;
 	    outw(MC_RELEASE, ioaddr + MMU_CMD);
 	    return;
@@ -1643,7 +1642,7 @@
     struct smc_private *smc = netdev_priv(dev);
     int i;
 
-    pr_debug("%s: smc91c92 reset called.\n", dev->name);
+    netdev_dbg(dev, "smc91c92 reset called.\n");
 
     /* The first interaction must be a write to bring the chip out
        of sleep mode. */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 73be7f3..0c9b5d9 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -58,7 +58,7 @@
  *   22/09/04  Nicolas Pitre      big update (see commit log for details)
  */
 static const char version[] =
-	"smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>\n";
+	"smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>";
 
 /* Debugging level */
 #ifndef SMC_DEBUG
@@ -149,16 +149,16 @@
 #define MII_DELAY		1
 
 #if SMC_DEBUG > 0
-#define DBG(n, args...)					\
+#define DBG(n, dev, args...)				\
 	do {						\
 		if (SMC_DEBUG >= (n))			\
-			printk(args);	\
+			netdev_dbg(dev, args);		\
 	} while (0)
 
-#define PRINTK(args...)   printk(args)
+#define PRINTK(dev, args...)   netdev_info(dev, args)
 #else
-#define DBG(n, args...)   do { } while(0)
-#define PRINTK(args...)   printk(KERN_DEBUG args)
+#define DBG(n, dev, args...)   do { } while (0)
+#define PRINTK(dev, args...)   netdev_dbg(dev, args)
 #endif
 
 #if SMC_DEBUG > 3
@@ -173,24 +173,26 @@
 
 	for (i = 0; i < lines ; i ++) {
 		int cur;
+		printk(KERN_DEBUG);
 		for (cur = 0; cur < 8; cur++) {
 			u_char a, b;
 			a = *buf++;
 			b = *buf++;
-			printk("%02x%02x ", a, b);
+			pr_cont("%02x%02x ", a, b);
 		}
-		printk("\n");
+		pr_cont("\n");
 	}
+	printk(KERN_DEBUG);
 	for (i = 0; i < remainder/2 ; i++) {
 		u_char a, b;
 		a = *buf++;
 		b = *buf++;
-		printk("%02x%02x ", a, b);
+		pr_cont("%02x%02x ", a, b);
 	}
-	printk("\n");
+	pr_cont("\n");
 }
 #else
-#define PRINT_PKT(x...)  do { } while(0)
+#define PRINT_PKT(x...)  do { } while (0)
 #endif
 
 
@@ -226,8 +228,8 @@
 		unsigned long timeout = jiffies + 2;			\
 		while (SMC_GET_MMU_CMD(lp) & MC_BUSY) {		\
 			if (time_after(jiffies, timeout)) {		\
-				printk("%s: timeout %s line %d\n",	\
-					dev->name, __FILE__, __LINE__);	\
+				netdev_dbg(dev, "timeout %s line %d\n",	\
+					   __FILE__, __LINE__);		\
 				break;					\
 			}						\
 			cpu_relax();					\
@@ -246,7 +248,7 @@
 	unsigned int ctl, cfg;
 	struct sk_buff *pending_skb;
 
-	DBG(2, "%s: %s\n", dev->name, __func__);
+	DBG(2, dev, "%s\n", __func__);
 
 	/* Disable all interrupts, block TX tasklet */
 	spin_lock_irq(&lp->lock);
@@ -339,7 +341,7 @@
 	void __iomem *ioaddr = lp->base;
 	int mask;
 
-	DBG(2, "%s: %s\n", dev->name, __func__);
+	DBG(2, dev, "%s\n", __func__);
 
 	/* see the header file for options in TCR/RCR DEFAULT */
 	SMC_SELECT_BANK(lp, 0);
@@ -373,7 +375,7 @@
 	void __iomem *ioaddr = lp->base;
 	struct sk_buff *pending_skb;
 
-	DBG(2, "%s: %s\n", CARDNAME, __func__);
+	DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
 
 	/* no more interrupts for me */
 	spin_lock_irq(&lp->lock);
@@ -406,11 +408,11 @@
 	void __iomem *ioaddr = lp->base;
 	unsigned int packet_number, status, packet_len;
 
-	DBG(3, "%s: %s\n", dev->name, __func__);
+	DBG(3, dev, "%s\n", __func__);
 
 	packet_number = SMC_GET_RXFIFO(lp);
 	if (unlikely(packet_number & RXFIFO_REMPTY)) {
-		PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name);
+		PRINTK(dev, "smc_rcv with nothing on FIFO.\n");
 		return;
 	}
 
@@ -420,9 +422,8 @@
 	/* First two words are status and packet length */
 	SMC_GET_PKT_HDR(lp, status, packet_len);
 	packet_len &= 0x07ff;  /* mask off top bits */
-	DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
-		dev->name, packet_number, status,
-		packet_len, packet_len);
+	DBG(2, dev, "RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
+	    packet_number, status, packet_len, packet_len);
 
 	back:
 	if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
@@ -433,8 +434,8 @@
 		}
 		if (packet_len < 6) {
 			/* bloody hardware */
-			printk(KERN_ERR "%s: fubar (rxlen %u status %x\n",
-					dev->name, packet_len, status);
+			netdev_err(dev, "fubar (rxlen %u status %x\n",
+				   packet_len, status);
 			status |= RS_TOOSHORT;
 		}
 		SMC_WAIT_MMU_BUSY(lp);
@@ -551,7 +552,7 @@
 	unsigned char *buf;
 	unsigned long flags;
 
-	DBG(3, "%s: %s\n", dev->name, __func__);
+	DBG(3, dev, "%s\n", __func__);
 
 	if (!smc_special_trylock(&lp->lock, flags)) {
 		netif_stop_queue(dev);
@@ -568,7 +569,7 @@
 
 	packet_no = SMC_GET_AR(lp);
 	if (unlikely(packet_no & AR_FAILED)) {
-		printk("%s: Memory allocation failed.\n", dev->name);
+		netdev_err(dev, "Memory allocation failed.\n");
 		dev->stats.tx_errors++;
 		dev->stats.tx_fifo_errors++;
 		smc_special_unlock(&lp->lock, flags);
@@ -581,8 +582,8 @@
 
 	buf = skb->data;
 	len = skb->len;
-	DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
-		dev->name, packet_no, len, len, buf);
+	DBG(2, dev, "TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
+	    packet_no, len, len, buf);
 	PRINT_PKT(buf, len);
 
 	/*
@@ -637,7 +638,7 @@
 	unsigned int numPages, poll_count, status;
 	unsigned long flags;
 
-	DBG(3, "%s: %s\n", dev->name, __func__);
+	DBG(3, dev, "%s\n", __func__);
 
 	BUG_ON(lp->pending_tx_skb != NULL);
 
@@ -654,7 +655,7 @@
 	 */
 	numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
 	if (unlikely(numPages > 7)) {
-		printk("%s: Far too big packet error.\n", dev->name);
+		netdev_warn(dev, "Far too big packet error.\n");
 		dev->stats.tx_errors++;
 		dev->stats.tx_dropped++;
 		dev_kfree_skb(skb);
@@ -685,7 +686,7 @@
    	if (!poll_count) {
 		/* oh well, wait until the chip finds memory later */
 		netif_stop_queue(dev);
-		DBG(2, "%s: TX memory allocation deferred.\n", dev->name);
+		DBG(2, dev, "TX memory allocation deferred.\n");
 		SMC_ENABLE_INT(lp, IM_ALLOC_INT);
    	} else {
 		/*
@@ -709,12 +710,12 @@
 	void __iomem *ioaddr = lp->base;
 	unsigned int saved_packet, packet_no, tx_status, pkt_len;
 
-	DBG(3, "%s: %s\n", dev->name, __func__);
+	DBG(3, dev, "%s\n", __func__);
 
 	/* If the TX FIFO is empty then nothing to do */
 	packet_no = SMC_GET_TXFIFO(lp);
 	if (unlikely(packet_no & TXFIFO_TEMPTY)) {
-		PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name);
+		PRINTK(dev, "smc_tx with nothing on FIFO.\n");
 		return;
 	}
 
@@ -725,8 +726,8 @@
 	/* read the first word (status word) from this packet */
 	SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ);
 	SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
-	DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
-		dev->name, tx_status, packet_no);
+	DBG(2, dev, "TX STATUS 0x%04x PNR 0x%02x\n",
+	    tx_status, packet_no);
 
 	if (!(tx_status & ES_TX_SUC))
 		dev->stats.tx_errors++;
@@ -735,14 +736,12 @@
 		dev->stats.tx_carrier_errors++;
 
 	if (tx_status & (ES_LATCOL | ES_16COL)) {
-		PRINTK("%s: %s occurred on last xmit\n", dev->name,
+		PRINTK(dev, "%s occurred on last xmit\n",
 		       (tx_status & ES_LATCOL) ?
 			"late collision" : "too many collisions");
 		dev->stats.tx_window_errors++;
 		if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) {
-			printk(KERN_INFO "%s: unexpectedly large number of "
-			       "bad collisions. Please check duplex "
-			       "setting.\n", dev->name);
+			netdev_info(dev, "unexpectedly large number of bad collisions. Please check duplex setting.\n");
 		}
 	}
 
@@ -830,8 +829,8 @@
 	/* Return to idle state */
 	SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
 
-	DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
-		__func__, phyaddr, phyreg, phydata);
+	DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+	    __func__, phyaddr, phyreg, phydata);
 
 	SMC_SELECT_BANK(lp, 2);
 	return phydata;
@@ -857,8 +856,8 @@
 	/* Return to idle state */
 	SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
 
-	DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
-		__func__, phyaddr, phyreg, phydata);
+	DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+	    __func__, phyaddr, phyreg, phydata);
 
 	SMC_SELECT_BANK(lp, 2);
 }
@@ -871,7 +870,7 @@
 	struct smc_local *lp = netdev_priv(dev);
 	int phyaddr;
 
-	DBG(2, "%s: %s\n", dev->name, __func__);
+	DBG(2, dev, "%s\n", __func__);
 
 	lp->phy_type = 0;
 
@@ -886,8 +885,8 @@
 		id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
 		id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
 
-		DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n",
-			dev->name, id1, id2);
+		DBG(3, dev, "phy_id1=0x%x, phy_id2=0x%x\n",
+		    id1, id2);
 
 		/* Make sure it is a valid identifier */
 		if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
@@ -910,7 +909,7 @@
 	int phyaddr = lp->mii.phy_id;
 	int bmcr, cfg1;
 
-	DBG(3, "%s: %s\n", dev->name, __func__);
+	DBG(3, dev, "%s\n", __func__);
 
 	/* Enter Link Disable state */
 	cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
@@ -1044,7 +1043,7 @@
 	int my_ad_caps; /* My Advertised capabilities */
 	int status;
 
-	DBG(3, "%s:smc_program_phy()\n", dev->name);
+	DBG(3, dev, "smc_program_phy()\n");
 
 	spin_lock_irq(&lp->lock);
 
@@ -1055,7 +1054,7 @@
 		goto smc_phy_configure_exit;
 
 	if (smc_phy_reset(dev, phyaddr)) {
-		printk("%s: PHY reset timed out\n", dev->name);
+		netdev_info(dev, "PHY reset timed out\n");
 		goto smc_phy_configure_exit;
 	}
 
@@ -1082,7 +1081,7 @@
 	my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
 
 	if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
-		printk(KERN_INFO "Auto negotiation NOT supported\n");
+		netdev_info(dev, "Auto negotiation NOT supported\n");
 		smc_phy_fixed(dev);
 		goto smc_phy_configure_exit;
 	}
@@ -1118,8 +1117,8 @@
 	 */
 	status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
 
-	DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps);
-	DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps);
+	DBG(2, dev, "phy caps=%x\n", my_phy_caps);
+	DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps);
 
 	/* Restart auto-negotiation process in order to advertise my caps */
 	smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
@@ -1143,7 +1142,7 @@
 	int phyaddr = lp->mii.phy_id;
 	int phy18;
 
-	DBG(2, "%s: %s\n", dev->name, __func__);
+	DBG(2, dev, "%s\n", __func__);
 
 	if (lp->phy_type == 0)
 		return;
@@ -1179,8 +1178,8 @@
 			netif_carrier_on(dev);
 		}
 		if (netif_msg_link(lp))
-			printk(KERN_INFO "%s: link %s\n", dev->name,
-			       new_carrier ? "up" : "down");
+			netdev_info(dev, "link %s\n",
+				    new_carrier ? "up" : "down");
 	}
 }
 
@@ -1211,7 +1210,7 @@
 	int status, mask, timeout, card_stats;
 	int saved_pointer;
 
-	DBG(3, "%s: %s\n", dev->name, __func__);
+	DBG(3, dev, "%s\n", __func__);
 
 	spin_lock(&lp->lock);
 
@@ -1230,12 +1229,12 @@
 	do {
 		status = SMC_GET_INT(lp);
 
-		DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
-			dev->name, status, mask,
-			({ int meminfo; SMC_SELECT_BANK(lp, 0);
-			   meminfo = SMC_GET_MIR(lp);
-			   SMC_SELECT_BANK(lp, 2); meminfo; }),
-			SMC_GET_FIFO(lp));
+		DBG(2, dev, "INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
+		    status, mask,
+		    ({ int meminfo; SMC_SELECT_BANK(lp, 0);
+		       meminfo = SMC_GET_MIR(lp);
+		       SMC_SELECT_BANK(lp, 2); meminfo; }),
+		    SMC_GET_FIFO(lp));
 
 		status &= mask;
 		if (!status)
@@ -1243,20 +1242,20 @@
 
 		if (status & IM_TX_INT) {
 			/* do this before RX as it will free memory quickly */
-			DBG(3, "%s: TX int\n", dev->name);
+			DBG(3, dev, "TX int\n");
 			smc_tx(dev);
 			SMC_ACK_INT(lp, IM_TX_INT);
 			if (THROTTLE_TX_PKTS)
 				netif_wake_queue(dev);
 		} else if (status & IM_RCV_INT) {
-			DBG(3, "%s: RX irq\n", dev->name);
+			DBG(3, dev, "RX irq\n");
 			smc_rcv(dev);
 		} else if (status & IM_ALLOC_INT) {
-			DBG(3, "%s: Allocation irq\n", dev->name);
+			DBG(3, dev, "Allocation irq\n");
 			tasklet_hi_schedule(&lp->tx_task);
 			mask &= ~IM_ALLOC_INT;
 		} else if (status & IM_TX_EMPTY_INT) {
-			DBG(3, "%s: TX empty\n", dev->name);
+			DBG(3, dev, "TX empty\n");
 			mask &= ~IM_TX_EMPTY_INT;
 
 			/* update stats */
@@ -1271,10 +1270,10 @@
 			/* multiple collisions */
 			dev->stats.collisions += card_stats & 0xF;
 		} else if (status & IM_RX_OVRN_INT) {
-			DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
-			       ({ int eph_st; SMC_SELECT_BANK(lp, 0);
-				  eph_st = SMC_GET_EPH_STATUS(lp);
-				  SMC_SELECT_BANK(lp, 2); eph_st; }));
+			DBG(1, dev, "RX overrun (EPH_ST 0x%04x)\n",
+			    ({ int eph_st; SMC_SELECT_BANK(lp, 0);
+			       eph_st = SMC_GET_EPH_STATUS(lp);
+			       SMC_SELECT_BANK(lp, 2); eph_st; }));
 			SMC_ACK_INT(lp, IM_RX_OVRN_INT);
 			dev->stats.rx_errors++;
 			dev->stats.rx_fifo_errors++;
@@ -1285,7 +1284,7 @@
 			smc_phy_interrupt(dev);
 		} else if (status & IM_ERCV_INT) {
 			SMC_ACK_INT(lp, IM_ERCV_INT);
-			PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
+			PRINTK(dev, "UNSUPPORTED: ERCV INTERRUPT\n");
 		}
 	} while (--timeout);
 
@@ -1296,11 +1295,11 @@
 
 #ifndef CONFIG_NET_POLL_CONTROLLER
 	if (timeout == MAX_IRQ_LOOPS)
-		PRINTK("%s: spurious interrupt (mask = 0x%02x)\n",
-		       dev->name, mask);
+		PRINTK(dev, "spurious interrupt (mask = 0x%02x)\n",
+		       mask);
 #endif
-	DBG(3, "%s: Interrupt done (%d loops)\n",
-	       dev->name, MAX_IRQ_LOOPS - timeout);
+	DBG(3, dev, "Interrupt done (%d loops)\n",
+	    MAX_IRQ_LOOPS - timeout);
 
 	/*
 	 * We return IRQ_HANDLED unconditionally here even if there was
@@ -1333,7 +1332,7 @@
 	void __iomem *ioaddr = lp->base;
 	int status, mask, eph_st, meminfo, fifo;
 
-	DBG(2, "%s: %s\n", dev->name, __func__);
+	DBG(2, dev, "%s\n", __func__);
 
 	spin_lock_irq(&lp->lock);
 	status = SMC_GET_INT(lp);
@@ -1344,9 +1343,8 @@
 	meminfo = SMC_GET_MIR(lp);
 	SMC_SELECT_BANK(lp, 2);
 	spin_unlock_irq(&lp->lock);
-	PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
-		"MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
-		dev->name, status, mask, meminfo, fifo, eph_st );
+	PRINTK(dev, "TX timeout (INT 0x%02x INTMASK 0x%02x MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
+	       status, mask, meminfo, fifo, eph_st);
 
 	smc_reset(dev);
 	smc_enable(dev);
@@ -1377,10 +1375,10 @@
 	unsigned char multicast_table[8];
 	int update_multicast = 0;
 
-	DBG(2, "%s: %s\n", dev->name, __func__);
+	DBG(2, dev, "%s\n", __func__);
 
 	if (dev->flags & IFF_PROMISC) {
-		DBG(2, "%s: RCR_PRMS\n", dev->name);
+		DBG(2, dev, "RCR_PRMS\n");
 		lp->rcr_cur_mode |= RCR_PRMS;
 	}
 
@@ -1395,7 +1393,7 @@
 	 * checked before the table is
 	 */
 	else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
-		DBG(2, "%s: RCR_ALMUL\n", dev->name);
+		DBG(2, dev, "RCR_ALMUL\n");
 		lp->rcr_cur_mode |= RCR_ALMUL;
 	}
 
@@ -1437,7 +1435,7 @@
 		/* now, the table can be loaded into the chipset */
 		update_multicast = 1;
 	} else  {
-		DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name);
+		DBG(2, dev, "~(RCR_PRMS|RCR_ALMUL)\n");
 		lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
 
 		/*
@@ -1470,7 +1468,7 @@
 {
 	struct smc_local *lp = netdev_priv(dev);
 
-	DBG(2, "%s: %s\n", dev->name, __func__);
+	DBG(2, dev, "%s\n", __func__);
 
 	/* Setup the default Register Modes */
 	lp->tcr_cur_mode = TCR_DEFAULT;
@@ -1514,7 +1512,7 @@
 {
 	struct smc_local *lp = netdev_priv(dev);
 
-	DBG(2, "%s: %s\n", dev->name, __func__);
+	DBG(2, dev, "%s\n", __func__);
 
 	netif_stop_queue(dev);
 	netif_carrier_off(dev);
@@ -1694,7 +1692,7 @@
 	int i;
 	int imax;
 
-	DBG(1, "Reading %d bytes at %d(0x%x)\n",
+	DBG(1, dev, "Reading %d bytes at %d(0x%x)\n",
 		eeprom->len, eeprom->offset, eeprom->offset);
 	imax = smc_ethtool_geteeprom_len(dev);
 	for (i = 0; i < eeprom->len; i += 2) {
@@ -1706,7 +1704,7 @@
 		ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
 		if (ret != 0)
 			return ret;
-		DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
+		DBG(2, dev, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
 		data[i] = (wbuf >> 8) & 0xff;
 		data[i+1] = wbuf & 0xff;
 	}
@@ -1719,8 +1717,8 @@
 	int i;
 	int imax;
 
-	DBG(1, "Writing %d bytes to %d(0x%x)\n",
-			eeprom->len, eeprom->offset, eeprom->offset);
+	DBG(1, dev, "Writing %d bytes to %d(0x%x)\n",
+	    eeprom->len, eeprom->offset, eeprom->offset);
 	imax = smc_ethtool_geteeprom_len(dev);
 	for (i = 0; i < eeprom->len; i += 2) {
 		int ret;
@@ -1729,7 +1727,7 @@
 		if (offset > imax)
 			break;
 		wbuf = (data[i] << 8) | data[i + 1];
-		DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
+		DBG(2, dev, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
 		ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
 		if (ret != 0)
 			return ret;
@@ -1784,7 +1782,7 @@
 	int timeout = 20;
 	unsigned long cookie;
 
-	DBG(2, "%s: %s\n", CARDNAME, __func__);
+	DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
 
 	cookie = probe_irq_on();
 
@@ -1856,21 +1854,21 @@
 		     unsigned long irq_flags)
 {
 	struct smc_local *lp = netdev_priv(dev);
-	static int version_printed = 0;
 	int retval;
 	unsigned int val, revision_register;
 	const char *version_string;
 
-	DBG(2, "%s: %s\n", CARDNAME, __func__);
+	DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
 
 	/* First, see if the high byte is 0x33 */
 	val = SMC_CURRENT_BANK(lp);
-	DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val);
+	DBG(2, dev, "%s: bank signature probe returned 0x%04x\n",
+	    CARDNAME, val);
 	if ((val & 0xFF00) != 0x3300) {
 		if ((val & 0xFF) == 0x33) {
-			printk(KERN_WARNING
-				"%s: Detected possible byte-swapped interface"
-				" at IOADDR %p\n", CARDNAME, ioaddr);
+			netdev_warn(dev,
+				    "%s: Detected possible byte-swapped interface at IOADDR %p\n",
+				    CARDNAME, ioaddr);
 		}
 		retval = -ENODEV;
 		goto err_out;
@@ -1897,8 +1895,8 @@
 	val = SMC_GET_BASE(lp);
 	val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
 	if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
-		printk("%s: IOADDR %p doesn't match configuration (%x).\n",
-			CARDNAME, ioaddr, val);
+		netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
+			    CARDNAME, ioaddr, val);
 	}
 
 	/*
@@ -1908,21 +1906,19 @@
 	 */
 	SMC_SELECT_BANK(lp, 3);
 	revision_register = SMC_GET_REV(lp);
-	DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
+	DBG(2, dev, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
 	version_string = chip_ids[ (revision_register >> 4) & 0xF];
 	if (!version_string || (revision_register & 0xff00) != 0x3300) {
 		/* I don't recognize this chip, so... */
-		printk("%s: IO %p: Unrecognized revision register 0x%04x"
-			", Contact author.\n", CARDNAME,
-			ioaddr, revision_register);
+		netdev_warn(dev, "%s: IO %p: Unrecognized revision register 0x%04x, Contact author.\n",
+			    CARDNAME, ioaddr, revision_register);
 
 		retval = -ENODEV;
 		goto err_out;
 	}
 
 	/* At this point I'll assume that the chip is an SMC91x. */
-	if (version_printed++ == 0)
-		printk("%s", version);
+	pr_info_once("%s\n", version);
 
 	/* fill in some of the fields */
 	dev->base_addr = (unsigned long)ioaddr;
@@ -1940,7 +1936,7 @@
 	/*
 	 * If dev->irq is 0, then the device has to be banged on to see
 	 * what the IRQ is.
- 	 *
+	 *
 	 * This banging doesn't always detect the IRQ, for unknown reasons.
 	 * a workaround is to reset the chip and try again.
 	 *
@@ -1965,8 +1961,7 @@
 		}
 	}
 	if (dev->irq == 0) {
-		printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
-			dev->name);
+		netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
 		retval = -ENODEV;
 		goto err_out;
 	}
@@ -2030,32 +2025,31 @@
 	retval = register_netdev(dev);
 	if (retval == 0) {
 		/* now, print out the card info, in a short format.. */
-		printk("%s: %s (rev %d) at %p IRQ %d",
-			dev->name, version_string, revision_register & 0x0f,
-			lp->base, dev->irq);
+		netdev_info(dev, "%s (rev %d) at %p IRQ %d",
+			    version_string, revision_register & 0x0f,
+			    lp->base, dev->irq);
 
 		if (dev->dma != (unsigned char)-1)
-			printk(" DMA %d", dev->dma);
+			pr_cont(" DMA %d", dev->dma);
 
-		printk("%s%s\n",
+		pr_cont("%s%s\n",
 			lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
 			THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
 
 		if (!is_valid_ether_addr(dev->dev_addr)) {
-			printk("%s: Invalid ethernet MAC address.  Please "
-			       "set using ifconfig\n", dev->name);
+			netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
 		} else {
 			/* Print the Ethernet address */
-			printk("%s: Ethernet addr: %pM\n",
-			       dev->name, dev->dev_addr);
+			netdev_info(dev, "Ethernet addr: %pM\n",
+				    dev->dev_addr);
 		}
 
 		if (lp->phy_type == 0) {
-			PRINTK("%s: No PHY found\n", dev->name);
+			PRINTK(dev, "No PHY found\n");
 		} else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
-			PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name);
+			PRINTK(dev, "PHY LAN83C183 (LAN91C111 Internal)\n");
 		} else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
-			PRINTK("%s: PHY LAN83C180\n", dev->name);
+			PRINTK(dev, "PHY LAN83C180\n");
 		}
 	}
 
@@ -2165,7 +2159,8 @@
 			return;
 
 		if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
-			printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+			netdev_info(ndev, "%s: failed to request datacs memory region.\n",
+				    CARDNAME);
 			return;
 		}
 
@@ -2307,7 +2302,7 @@
  out_free_netdev:
 	free_netdev(ndev);
  out:
-	printk("%s: not found (%d).\n", CARDNAME, ret);
+	pr_info("%s: not found (%d).\n", CARDNAME, ret);
 
 	return ret;
 }
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 98eedb9..c9d4c87 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -907,8 +907,8 @@
 	({								\
 		int __b = SMC_CURRENT_BANK(lp);			\
 		if (unlikely((__b & ~0xf0) != (0x3300 | bank))) {	\
-			printk( "%s: bank reg screwed (0x%04x)\n",	\
-				CARDNAME, __b );			\
+			pr_err("%s: bank reg screwed (0x%04x)\n",	\
+			       CARDNAME, __b);				\
 			BUG();						\
 		}							\
 		reg<<SMC_IO_SHIFT;					\
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 5fdbc26..8564f23 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2167,7 +2167,7 @@
 		udelay(1000);
 
 	if (to == 0) {
-		pr_err("Device not READY in 100ms aborting\n");
+		netdev_err(dev, "Device not READY in 100ms aborting\n");
 		return -ENODEV;
 	}
 
@@ -2502,7 +2502,7 @@
 		SMSC_TRACE(pdata, probe,
 			   "MAC Address is specified by configuration");
 	} else if (is_valid_ether_addr(pdata->config.mac)) {
-		memcpy(dev->dev_addr, pdata->config.mac, 6);
+		memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
 		SMSC_TRACE(pdata, probe,
 			   "MAC Address specified by platform data");
 	} else {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 5f9e79f..f433d97 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -19,6 +19,8 @@
  ***************************************************************************
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
@@ -33,7 +35,6 @@
 #include "smsc9420.h"
 
 #define DRV_NAME		"smsc9420"
-#define PFX			DRV_NAME ": "
 #define DRV_MDIONAME		"smsc9420-mdio"
 #define DRV_DESCRIPTION		"SMSC LAN9420 driver"
 #define DRV_VERSION		"1.01"
@@ -97,21 +98,6 @@
 module_param(debug, uint, 0);
 MODULE_PARM_DESC(debug, "debug level");
 
-#define smsc_dbg(TYPE, f, a...) \
-do {	if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
-		printk(KERN_DEBUG PFX f "\n", ## a); \
-} while (0)
-
-#define smsc_info(TYPE, f, a...) \
-do {	if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
-		printk(KERN_INFO PFX f "\n", ## a); \
-} while (0)
-
-#define smsc_warn(TYPE, f, a...) \
-do {	if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
-		printk(KERN_WARNING PFX f "\n", ## a); \
-} while (0)
-
 static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
 {
 	return ioread32(pd->ioaddr + offset);
@@ -140,7 +126,7 @@
 
 	/*  confirm MII not busy */
 	if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
-		smsc_warn(DRV, "MII is busy???");
+		netif_warn(pd, drv, pd->dev, "MII is busy???\n");
 		goto out;
 	}
 
@@ -159,7 +145,7 @@
 		udelay(10);
 	}
 
-	smsc_warn(DRV, "MII busy timeout!");
+	netif_warn(pd, drv, pd->dev, "MII busy timeout!\n");
 
 out:
 	spin_unlock_irqrestore(&pd->phy_lock, flags);
@@ -178,7 +164,7 @@
 
 	/* confirm MII not busy */
 	if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
-		smsc_warn(DRV, "MII is busy???");
+		netif_warn(pd, drv, pd->dev, "MII is busy???\n");
 		goto out;
 	}
 
@@ -200,7 +186,7 @@
 		udelay(10);
 	}
 
-	smsc_warn(DRV, "MII busy timeout!");
+	netif_warn(pd, drv, pd->dev, "MII busy timeout!\n");
 
 out:
 	spin_unlock_irqrestore(&pd->phy_lock, flags);
@@ -222,7 +208,7 @@
 	BUG_ON(!pd);
 
 	if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
-		smsc_dbg(DRV, "smsc9420_eeprom_reload: Eeprom busy");
+		netif_dbg(pd, drv, pd->dev, "%s: Eeprom busy\n", __func__);
 		return -EIO;
 	}
 
@@ -235,7 +221,7 @@
 			return 0;
 	} while (timeout--);
 
-	smsc_warn(DRV, "smsc9420_eeprom_reload: Eeprom timed out");
+	netif_warn(pd, drv, pd->dev, "%s: Eeprom timed out\n", __func__);
 	return -EIO;
 }
 
@@ -347,9 +333,9 @@
 	int timeout = 100;
 	u32 e2cmd;
 
-	smsc_dbg(HW, "op 0x%08x", op);
+	netif_dbg(pd, hw, pd->dev, "op 0x%08x\n", op);
 	if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
-		smsc_warn(HW, "Busy at start");
+		netif_warn(pd, hw, pd->dev, "Busy at start\n");
 		return -EBUSY;
 	}
 
@@ -362,12 +348,13 @@
 	} while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
 
 	if (!timeout) {
-		smsc_info(HW, "TIMED OUT");
+		netif_info(pd, hw, pd->dev, "TIMED OUT\n");
 		return -EAGAIN;
 	}
 
 	if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
-		smsc_info(HW, "Error occurred during eeprom operation");
+		netif_info(pd, hw, pd->dev,
+			   "Error occurred during eeprom operation\n");
 		return -EINVAL;
 	}
 
@@ -380,7 +367,7 @@
 	u32 op = E2P_CMD_EPC_CMD_READ_ | address;
 	int ret;
 
-	smsc_dbg(HW, "address 0x%x", address);
+	netif_dbg(pd, hw, pd->dev, "address 0x%x\n", address);
 	ret = smsc9420_eeprom_send_cmd(pd, op);
 
 	if (!ret)
@@ -395,7 +382,7 @@
 	u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
 	int ret;
 
-	smsc_dbg(HW, "address 0x%x, data 0x%x", address, data);
+	netif_dbg(pd, hw, pd->dev, "address 0x%x, data 0x%x\n", address, data);
 	ret = smsc9420_eeprom_send_cmd(pd, op);
 
 	if (!ret) {
@@ -492,7 +479,8 @@
 	/* Check if mac address has been specified when bringing interface up */
 	if (is_valid_ether_addr(dev->dev_addr)) {
 		smsc9420_set_mac_address(dev);
-		smsc_dbg(PROBE, "MAC Address is specified by configuration");
+		netif_dbg(pd, probe, pd->dev,
+			  "MAC Address is specified by configuration\n");
 	} else {
 		/* Try reading mac address from device. if EEPROM is present
 		 * it will already have been set */
@@ -507,12 +495,14 @@
 
 		if (is_valid_ether_addr(dev->dev_addr)) {
 			/* eeprom values are valid  so use them */
-			smsc_dbg(PROBE, "Mac Address is read from EEPROM");
+			netif_dbg(pd, probe, pd->dev,
+				  "Mac Address is read from EEPROM\n");
 		} else {
 			/* eeprom values are invalid, generate random MAC */
 			eth_hw_addr_random(dev);
 			smsc9420_set_mac_address(dev);
-			smsc_dbg(PROBE, "MAC Address is set to random");
+			netif_dbg(pd, probe, pd->dev,
+				  "MAC Address is set to random\n");
 		}
 	}
 }
@@ -535,7 +525,7 @@
 	}
 
 	if (!timeout)
-		smsc_warn(IFDOWN, "TX DMAC failed to stop");
+		netif_warn(pd, ifdown, pd->dev, "TX DMAC failed to stop\n");
 
 	/* ACK Tx DMAC stop bit */
 	smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_);
@@ -646,7 +636,8 @@
 	}
 
 	if (!timeout)
-		smsc_warn(IFDOWN, "RX DMAC did not stop! timeout.");
+		netif_warn(pd, ifdown, pd->dev,
+			   "RX DMAC did not stop! timeout\n");
 
 	/* ACK the Rx DMAC stop bit */
 	smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_);
@@ -736,7 +727,7 @@
 	smsc9420_reg_read(pd, BUS_MODE);
 	udelay(2);
 	if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_)
-		smsc_warn(DRV, "Software reset not cleared");
+		netif_warn(pd, drv, pd->dev, "Software reset not cleared\n");
 }
 
 static int smsc9420_stop(struct net_device *dev)
@@ -855,7 +846,7 @@
 				 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 	if (pci_dma_mapping_error(pd->pdev, mapping)) {
 		dev_kfree_skb_any(skb);
-		smsc_warn(RX_ERR, "pci_map_single failed!");
+		netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
 		return -ENOMEM;
 	}
 
@@ -1004,7 +995,8 @@
 	mapping = pci_map_single(pd->pdev, skb->data,
 				 skb->len, PCI_DMA_TODEVICE);
 	if (pci_dma_mapping_error(pd->pdev, mapping)) {
-		smsc_warn(TX_ERR, "pci_map_single failed, dropping packet");
+		netif_warn(pd, tx_err, pd->dev,
+			   "pci_map_single failed, dropping packet\n");
 		return NETDEV_TX_BUSY;
 	}
 
@@ -1056,12 +1048,12 @@
 	u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
 
 	if (dev->flags & IFF_PROMISC) {
-		smsc_dbg(HW, "Promiscuous Mode Enabled");
+		netif_dbg(pd, hw, pd->dev, "Promiscuous Mode Enabled\n");
 		mac_cr |= MAC_CR_PRMS_;
 		mac_cr &= (~MAC_CR_MCPAS_);
 		mac_cr &= (~MAC_CR_HPFILT_);
 	} else if (dev->flags & IFF_ALLMULTI) {
-		smsc_dbg(HW, "Receive all Multicast Enabled");
+		netif_dbg(pd, hw, pd->dev, "Receive all Multicast Enabled\n");
 		mac_cr &= (~MAC_CR_PRMS_);
 		mac_cr |= MAC_CR_MCPAS_;
 		mac_cr &= (~MAC_CR_HPFILT_);
@@ -1069,7 +1061,7 @@
 		struct netdev_hw_addr *ha;
 		u32 hash_lo = 0, hash_hi = 0;
 
-		smsc_dbg(HW, "Multicast filter enabled");
+		netif_dbg(pd, hw, pd->dev, "Multicast filter enabled\n");
 		netdev_for_each_mc_addr(ha, dev) {
 			u32 bit_num = smsc9420_hash(ha->addr);
 			u32 mask = 1 << (bit_num & 0x1F);
@@ -1087,7 +1079,7 @@
 		mac_cr &= (~MAC_CR_MCPAS_);
 		mac_cr |= MAC_CR_HPFILT_;
 	} else {
-		smsc_dbg(HW, "Receive own packets only.");
+		netif_dbg(pd, hw, pd->dev, "Receive own packets only\n");
 		smsc9420_reg_write(pd, HASHH, 0);
 		smsc9420_reg_write(pd, HASHL, 0);
 
@@ -1115,11 +1107,11 @@
 		else
 			flow = 0;
 
-		smsc_info(LINK, "rx pause %s, tx pause %s",
-			(cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
-			(cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+		netif_info(pd, link, pd->dev, "rx pause %s, tx pause %s\n",
+			   cap & FLOW_CTRL_RX ? "enabled" : "disabled",
+			   cap & FLOW_CTRL_TX ? "enabled" : "disabled");
 	} else {
-		smsc_info(LINK, "half duplex");
+		netif_info(pd, link, pd->dev, "half duplex\n");
 		flow = 0;
 	}
 
@@ -1137,10 +1129,10 @@
 	if (phy_dev->duplex != pd->last_duplex) {
 		u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
 		if (phy_dev->duplex) {
-			smsc_dbg(LINK, "full duplex mode");
+			netif_dbg(pd, link, pd->dev, "full duplex mode\n");
 			mac_cr |= MAC_CR_FDPX_;
 		} else {
-			smsc_dbg(LINK, "half duplex mode");
+			netif_dbg(pd, link, pd->dev, "half duplex mode\n");
 			mac_cr &= ~MAC_CR_FDPX_;
 		}
 		smsc9420_reg_write(pd, MAC_CR, mac_cr);
@@ -1152,9 +1144,9 @@
 	carrier = netif_carrier_ok(dev);
 	if (carrier != pd->last_carrier) {
 		if (carrier)
-			smsc_dbg(LINK, "carrier OK");
+			netif_dbg(pd, link, pd->dev, "carrier OK\n");
 		else
-			smsc_dbg(LINK, "no carrier");
+			netif_dbg(pd, link, pd->dev, "no carrier\n");
 		pd->last_carrier = carrier;
 	}
 }
@@ -1168,24 +1160,24 @@
 
 	/* Device only supports internal PHY at address 1 */
 	if (!pd->mii_bus->phy_map[1]) {
-		pr_err("%s: no PHY found at address 1\n", dev->name);
+		netdev_err(dev, "no PHY found at address 1\n");
 		return -ENODEV;
 	}
 
 	phydev = pd->mii_bus->phy_map[1];
-	smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr,
-		phydev->phy_id);
+	netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n",
+		   phydev->addr, phydev->phy_id);
 
 	phydev = phy_connect(dev, dev_name(&phydev->dev),
 			     smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII);
 
 	if (IS_ERR(phydev)) {
-		pr_err("%s: Could not attach to PHY\n", dev->name);
+		netdev_err(dev, "Could not attach to PHY\n");
 		return PTR_ERR(phydev);
 	}
 
-	pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
-		dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+	netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+		    phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
 
 	/* mask with MAC supported features */
 	phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
@@ -1223,12 +1215,12 @@
 	pd->mii_bus->phy_mask = ~(1 << 1);
 
 	if (mdiobus_register(pd->mii_bus)) {
-		smsc_warn(PROBE, "Error registering mii bus");
+		netif_warn(pd, probe, pd->dev, "Error registering mii bus\n");
 		goto err_out_free_bus_2;
 	}
 
 	if (smsc9420_mii_probe(dev) < 0) {
-		smsc_warn(PROBE, "Error probing mii bus");
+		netif_warn(pd, probe, pd->dev, "Error probing mii bus\n");
 		goto err_out_unregister_bus_3;
 	}
 
@@ -1281,12 +1273,11 @@
 
 	BUG_ON(!pd->rx_ring);
 
-	pd->rx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
-		RX_RING_SIZE), GFP_KERNEL);
-	if (pd->rx_buffers == NULL) {
-		smsc_warn(IFUP, "Failed to allocated rx_buffers");
+	pd->rx_buffers = kmalloc_array(RX_RING_SIZE,
+				       sizeof(struct smsc9420_ring_info),
+				       GFP_KERNEL);
+	if (pd->rx_buffers == NULL)
 		goto out;
-	}
 
 	/* initialize the rx ring */
 	for (i = 0; i < RX_RING_SIZE; i++) {
@@ -1301,7 +1292,8 @@
 	/* now allocate the entire ring of skbs */
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		if (smsc9420_alloc_rx_buffer(pd, i)) {
-			smsc_warn(IFUP, "failed to allocate rx skb %d", i);
+			netif_warn(pd, ifup, pd->dev,
+				   "failed to allocate rx skb %d\n", i);
 			goto out_free_rx_skbs;
 		}
 	}
@@ -1310,13 +1302,14 @@
 	pd->rx_ring_tail = 0;
 
 	smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q);
-	smsc_dbg(IFUP, "VLAN1 = 0x%08x", smsc9420_reg_read(pd, VLAN1));
+	netif_dbg(pd, ifup, pd->dev, "VLAN1 = 0x%08x\n",
+		  smsc9420_reg_read(pd, VLAN1));
 
 	if (pd->rx_csum) {
 		/* Enable RX COE */
 		u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN;
 		smsc9420_reg_write(pd, COE_CR, coe);
-		smsc_dbg(IFUP, "COE_CR = 0x%08x", coe);
+		netif_dbg(pd, ifup, pd->dev, "COE_CR = 0x%08x\n", coe);
 	}
 
 	smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr);
@@ -1339,7 +1332,8 @@
 	int result = 0, timeout;
 
 	if (!is_valid_ether_addr(dev->dev_addr)) {
-		smsc_warn(IFUP, "dev_addr is not a valid MAC address");
+		netif_warn(pd, ifup, pd->dev,
+			   "dev_addr is not a valid MAC address\n");
 		result = -EADDRNOTAVAIL;
 		goto out_0;
 	}
@@ -1358,7 +1352,7 @@
 
 	result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
 	if (result) {
-		smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
+		netif_warn(pd, ifup, pd->dev, "Unable to use IRQ = %d\n", irq);
 		result = -ENODEV;
 		goto out_0;
 	}
@@ -1393,7 +1387,7 @@
 	smsc9420_pci_flush_write(pd);
 
 	/* test the IRQ connection to the ISR */
-	smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
+	netif_dbg(pd, ifup, pd->dev, "Testing ISR using IRQ %d\n", irq);
 	pd->software_irq_signal = false;
 
 	spin_lock_irqsave(&pd->int_lock, flags);
@@ -1423,30 +1417,32 @@
 	spin_unlock_irqrestore(&pd->int_lock, flags);
 
 	if (!pd->software_irq_signal) {
-		smsc_warn(IFUP, "ISR failed signaling test");
+		netif_warn(pd, ifup, pd->dev, "ISR failed signaling test\n");
 		result = -ENODEV;
 		goto out_free_irq_1;
 	}
 
-	smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
+	netif_dbg(pd, ifup, pd->dev, "ISR passed test using IRQ %d\n", irq);
 
 	result = smsc9420_alloc_tx_ring(pd);
 	if (result) {
-		smsc_warn(IFUP, "Failed to Initialize tx dma ring");
+		netif_warn(pd, ifup, pd->dev,
+			   "Failed to Initialize tx dma ring\n");
 		result = -ENOMEM;
 		goto out_free_irq_1;
 	}
 
 	result = smsc9420_alloc_rx_ring(pd);
 	if (result) {
-		smsc_warn(IFUP, "Failed to Initialize rx dma ring");
+		netif_warn(pd, ifup, pd->dev,
+			   "Failed to Initialize rx dma ring\n");
 		result = -ENOMEM;
 		goto out_free_tx_ring_2;
 	}
 
 	result = smsc9420_mii_init(dev);
 	if (result) {
-		smsc_warn(IFUP, "Failed to initialize Phy");
+		netif_warn(pd, ifup, pd->dev, "Failed to initialize Phy\n");
 		result = -ENODEV;
 		goto out_free_rx_ring_3;
 	}
@@ -1547,7 +1543,8 @@
 
 	err = pci_enable_wake(pdev, 0, 0);
 	if (err)
-		smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
+		netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
+			   err);
 
 	if (netif_running(dev)) {
 		/* FIXME: gross. It looks like ancient PM relic.*/
@@ -1582,12 +1579,12 @@
 	int result = 0;
 	u32 id_rev;
 
-	printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n");
+	pr_info("%s version %s\n", DRV_DESCRIPTION, DRV_VERSION);
 
 	/* First do the PCI initialisation */
 	result = pci_enable_device(pdev);
 	if (unlikely(result)) {
-		printk(KERN_ERR "Cannot enable smsc9420\n");
+		pr_err("Cannot enable smsc9420\n");
 		goto out_0;
 	}
 
@@ -1600,24 +1597,24 @@
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) {
-		printk(KERN_ERR "Cannot find PCI device base address\n");
+		netdev_err(dev, "Cannot find PCI device base address\n");
 		goto out_free_netdev_2;
 	}
 
 	if ((pci_request_regions(pdev, DRV_NAME))) {
-		printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n");
+		netdev_err(dev, "Cannot obtain PCI resources, aborting\n");
 		goto out_free_netdev_2;
 	}
 
 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
-		printk(KERN_ERR "No usable DMA configuration, aborting.\n");
+		netdev_err(dev, "No usable DMA configuration, aborting\n");
 		goto out_free_regions_3;
 	}
 
 	virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR),
 		pci_resource_len(pdev, SMSC_BAR));
 	if (!virt_addr) {
-		printk(KERN_ERR "Cannot map device registers, aborting.\n");
+		netdev_err(dev, "Cannot map device registers, aborting\n");
 		goto out_free_regions_3;
 	}
 
@@ -1646,16 +1643,17 @@
 	pd->msg_enable = smsc_debug;
 	pd->rx_csum = true;
 
-	smsc_dbg(PROBE, "lan_base=0x%08lx", (ulong)virt_addr);
+	netif_dbg(pd, probe, pd->dev, "lan_base=0x%08lx\n", (ulong)virt_addr);
 
 	id_rev = smsc9420_reg_read(pd, ID_REV);
 	switch (id_rev & 0xFFFF0000) {
 	case 0x94200000:
-		smsc_info(PROBE, "LAN9420 identified, ID_REV=0x%08X", id_rev);
+		netif_info(pd, probe, pd->dev,
+			   "LAN9420 identified, ID_REV=0x%08X\n", id_rev);
 		break;
 	default:
-		smsc_warn(PROBE, "LAN9420 NOT identified");
-		smsc_warn(PROBE, "ID_REV=0x%08X", id_rev);
+		netif_warn(pd, probe, pd->dev, "LAN9420 NOT identified\n");
+		netif_warn(pd, probe, pd->dev, "ID_REV=0x%08X\n", id_rev);
 		goto out_free_dmadesc_5;
 	}
 
@@ -1670,7 +1668,8 @@
 
 	result = register_netdev(dev);
 	if (result) {
-		smsc_warn(PROBE, "error %i registering device", result);
+		netif_warn(pd, probe, pd->dev, "error %i registering device\n",
+			   result);
 		goto out_free_dmadesc_5;
 	}
 
@@ -1707,8 +1706,6 @@
 	if (!dev)
 		return;
 
-	pci_set_drvdata(pdev, NULL);
-
 	pd = netdev_priv(dev);
 	unregister_netdev(dev);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7eb8bab..fc94f20 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -451,14 +451,14 @@
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
 struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
 
-extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
-				unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
-				unsigned int high, unsigned int low);
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+			 unsigned int high, unsigned int low);
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+			 unsigned int high, unsigned int low);
 
-extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
-extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
 extern const struct stmmac_chain_mode_ops chain_mode_ops;
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 8e5662c..def266d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -104,14 +104,13 @@
 #define DMA_STATUS_TI	0x00000001	/* Transmit Interrupt */
 #define DMA_CONTROL_FTF		0x00100000	/* Flush transmit FIFO */
 
-extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_dma_start_tx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
-extern void dwmac_dma_start_rx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
-extern int dwmac_dma_interrupt(void __iomem *ioaddr,
-			       struct stmmac_extra_stats *x);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_irq(void __iomem *ioaddr);
+void dwmac_disable_dma_irq(void __iomem *ioaddr);
+void dwmac_dma_start_tx(void __iomem *ioaddr);
+void dwmac_dma_stop_tx(void __iomem *ioaddr);
+void dwmac_dma_start_rx(void __iomem *ioaddr);
+void dwmac_dma_stop_rx(void __iomem *ioaddr);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
 
 #endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 48ec001..8607488 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -128,8 +128,8 @@
 	unsigned int mmc_rx_icmp_err_octets;
 };
 
-extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
 
 #endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f16a9bd..22f89ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -110,14 +110,14 @@
 
 extern int phyaddr;
 
-extern int stmmac_mdio_unregister(struct net_device *ndev);
-extern int stmmac_mdio_register(struct net_device *ndev);
-extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_mdio_unregister(struct net_device *ndev);
+int stmmac_mdio_register(struct net_device *ndev);
+void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
 extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern int stmmac_ptp_register(struct stmmac_priv *priv);
-extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_unregister(struct stmmac_priv *priv);
 int stmmac_freeze(struct net_device *ndev);
 int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 023b7c2..644d80e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -138,7 +138,6 @@
 
 	stmmac_dvr_remove(ndev);
 
-	pci_set_drvdata(pdev, NULL);
 	pci_iounmap(pdev, priv->ioaddr);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 759441b..b4d50d7 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3354,7 +3354,7 @@
 #if defined(CONFIG_SPARC)
 	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
 	if (addr != NULL) {
-		memcpy(dev_addr, addr, 6);
+		memcpy(dev_addr, addr, ETH_ALEN);
 		goto done;
 	}
 #endif
@@ -5168,7 +5168,6 @@
 
 err_out_disable_pdev:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	return -ENODEV;
 }
 
@@ -5206,7 +5205,6 @@
 	free_netdev(dev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f28460c..388540f 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9875,7 +9875,6 @@
 
 err_out_disable_pdev:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 	return err;
 }
@@ -9900,7 +9899,6 @@
 		free_netdev(dev);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index e62df2b..b5655b7 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2779,7 +2779,7 @@
 		return -1;
 #endif
 	}
-	memcpy(dev->dev_addr, addr, 6);
+	memcpy(dev->dev_addr, addr, ETH_ALEN);
 #else
 	get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
 #endif
@@ -2806,8 +2806,6 @@
 		iounmap(gp->regs);
 		pci_release_regions(pdev);
 		free_netdev(dev);
-
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index e37b587..0dbf46f 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2675,10 +2675,10 @@
 
 		addr = of_get_property(dp, "local-mac-address", &len);
 
-		if (qfe_slot != -1 && addr && len == 6)
-			memcpy(dev->dev_addr, addr, 6);
+		if (qfe_slot != -1 && addr && len == ETH_ALEN)
+			memcpy(dev->dev_addr, addr, ETH_ALEN);
 		else
-			memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+			memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
 	}
 
 	hp = netdev_priv(dev);
@@ -3024,9 +3024,9 @@
 		    (addr = of_get_property(dp, "local-mac-address", &len))
 			!= NULL &&
 		    len == 6) {
-			memcpy(dev->dev_addr, addr, 6);
+			memcpy(dev->dev_addr, addr, ETH_ALEN);
 		} else {
-			memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+			memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
 		}
 #else
 		get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
@@ -3170,8 +3170,6 @@
 	pci_release_regions(hp->happy_dev);
 
 	free_netdev(net_dev);
-
-	pci_set_drvdata(pdev, NULL);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index b072f4d..5695ae2 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -843,7 +843,7 @@
 	if (!dev)
 		return -ENOMEM;
 
-	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
 
 	qe = netdev_priv(dev);
 
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 571452e..dd0dd627 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2447,7 +2447,6 @@
 	iounmap(nic->regs);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	vfree(nic);
 
 	RET();
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index de71b1e..53150c2 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,11 +49,19 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called davinci_cpdma.  This is recommended.
 
+config TI_CPSW_PHY_SEL
+	boolean "TI CPSW Switch Phy sel Support"
+	depends on TI_CPSW
+	---help---
+	  This driver supports configuring of the phy mode connected to
+	  the CPSW.
+
 config TI_CPSW
 	tristate "TI CPSW Switch Support"
 	depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
 	select TI_DAVINCI_CPDMA
 	select TI_DAVINCI_MDIO
+	select TI_CPSW_PHY_SEL
 	---help---
 	  This driver supports TI's CPSW Ethernet Switch.
 
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index c65148e..9cfaab8 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -7,5 +7,6 @@
 obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
 obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
+obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
 ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644
index 0000000..148da9a
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -0,0 +1,161 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "cpsw.h"
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII	0
+#define AM33XX_GMII_SEL_MODE_RMII	1
+#define AM33XX_GMII_SEL_MODE_RGMII	2
+
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN	BIT(7)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN	BIT(6)
+
+struct cpsw_phy_sel_priv {
+	struct device	*dev;
+	u32 __iomem	*gmii_sel;
+	bool		rmii_clock_external;
+	void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
+			     phy_interface_t phy_mode, int slave);
+};
+
+
+static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
+				 phy_interface_t phy_mode, int slave)
+{
+	u32 reg;
+	u32 mask;
+	u32 mode = 0;
+
+	reg = readl(priv->gmii_sel);
+
+	switch (phy_mode) {
+	case PHY_INTERFACE_MODE_RMII:
+		mode = AM33XX_GMII_SEL_MODE_RMII;
+		break;
+
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_TXID:
+		mode = AM33XX_GMII_SEL_MODE_RGMII;
+		break;
+
+	case PHY_INTERFACE_MODE_MII:
+	default:
+		mode = AM33XX_GMII_SEL_MODE_MII;
+		break;
+	};
+
+	mask = 0x3 << (slave * 2) | BIT(slave + 6);
+	mode <<= slave * 2;
+
+	if (priv->rmii_clock_external) {
+		if (slave == 0)
+			mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+		else
+			mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+	}
+
+	reg &= ~mask;
+	reg |= mode;
+
+	writel(reg, priv->gmii_sel);
+}
+
+static struct platform_driver cpsw_phy_sel_driver;
+static int match(struct device *dev, void *data)
+{
+	struct device_node *node = (struct device_node *)data;
+	return dev->of_node == node &&
+		dev->driver == &cpsw_phy_sel_driver.driver;
+}
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{
+	struct device_node *node;
+	struct cpsw_phy_sel_priv *priv;
+
+	node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+	if (!node) {
+		dev_err(dev, "Phy mode driver DT not found\n");
+		return;
+	}
+
+	dev = bus_find_device(&platform_bus_type, NULL, node, match);
+	priv = dev_get_drvdata(dev);
+
+	priv->cpsw_phy_sel(priv, phy_mode, slave);
+}
+EXPORT_SYMBOL_GPL(cpsw_phy_sel);
+
+static const struct of_device_id cpsw_phy_sel_id_table[] = {
+	{
+		.compatible	= "ti,am3352-cpsw-phy-sel",
+		.data		= &cpsw_gmii_sel_am3352,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
+
+static int cpsw_phy_sel_probe(struct platform_device *pdev)
+{
+	struct resource	*res;
+	const struct of_device_id *of_id;
+	struct cpsw_phy_sel_priv *priv;
+
+	of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
+	if (!of_id)
+		return -EINVAL;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
+		return -ENOMEM;
+	}
+
+	priv->cpsw_phy_sel = of_id->data;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
+	priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->gmii_sel))
+		return PTR_ERR(priv->gmii_sel);
+
+	if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
+		priv->rmii_clock_external = true;
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	return 0;
+}
+
+static struct platform_driver cpsw_phy_sel_driver = {
+	.probe		= cpsw_phy_sel_probe,
+	.driver		= {
+		.name	= "cpsw-phy-sel",
+		.owner	= THIS_MODULE,
+		.of_match_table = cpsw_phy_sel_id_table,
+	},
+};
+
+module_platform_driver(cpsw_phy_sel_driver);
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index cc3ce55..90d41d2 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,8 +367,6 @@
 	spinlock_t			lock;
 	struct platform_device		*pdev;
 	struct net_device		*ndev;
-	struct resource			*cpsw_res;
-	struct resource			*cpsw_wr_res;
 	struct napi_struct		napi;
 	struct device			*dev;
 	struct cpsw_platform_data	data;
@@ -1016,6 +1014,10 @@
 		dev_info(priv->dev, "phy found : id is : 0x%x\n",
 			 slave->phy->phy_id);
 		phy_start(slave->phy);
+
+		/* Configure GMII_SEL register */
+		cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
+			     slave->slave_num);
 	}
 }
 
@@ -1705,62 +1707,55 @@
 
 	if (of_property_read_u32(node, "active_slave", &prop)) {
 		pr_err("Missing active_slave property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
+		return -EINVAL;
 	}
 	data->active_slave = prop;
 
 	if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
 		pr_err("Missing cpts_clock_mult property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
+		return -EINVAL;
 	}
 	data->cpts_clock_mult = prop;
 
 	if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
 		pr_err("Missing cpts_clock_shift property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
+		return -EINVAL;
 	}
 	data->cpts_clock_shift = prop;
 
-	data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
-				   GFP_KERNEL);
+	data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
+					* sizeof(struct cpsw_slave_data),
+					GFP_KERNEL);
 	if (!data->slave_data)
-		return -EINVAL;
+		return -ENOMEM;
 
 	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
 		pr_err("Missing cpdma_channels property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
+		return -EINVAL;
 	}
 	data->channels = prop;
 
 	if (of_property_read_u32(node, "ale_entries", &prop)) {
 		pr_err("Missing ale_entries property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
+		return -EINVAL;
 	}
 	data->ale_entries = prop;
 
 	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
 		pr_err("Missing bd_ram_size property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
+		return -EINVAL;
 	}
 	data->bd_ram_size = prop;
 
 	if (of_property_read_u32(node, "rx_descs", &prop)) {
 		pr_err("Missing rx_descs property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
+		return -EINVAL;
 	}
 	data->rx_descs = prop;
 
 	if (of_property_read_u32(node, "mac_control", &prop)) {
 		pr_err("Missing mac_control property in the DT.\n");
-		ret = -EINVAL;
-		goto error_ret;
+		return -EINVAL;
 	}
 	data->mac_control = prop;
 
@@ -1791,8 +1786,7 @@
 		parp = of_get_property(slave_node, "phy_id", &lenp);
 		if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
 			pr_err("Missing slave[%d] phy_id property\n", i);
-			ret = -EINVAL;
-			goto error_ret;
+			return -EINVAL;
 		}
 		mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
 		phyid = be32_to_cpup(parp+1);
@@ -1822,10 +1816,6 @@
 	}
 
 	return 0;
-
-error_ret:
-	kfree(data->slave_data);
-	return ret;
 }
 
 static int cpsw_probe_dual_emac(struct platform_device *pdev,
@@ -1867,7 +1857,6 @@
 	priv_sl2->coal_intvl = 0;
 	priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
 
-	priv_sl2->cpsw_res = priv->cpsw_res;
 	priv_sl2->regs = priv->regs;
 	priv_sl2->host_port = priv->host_port;
 	priv_sl2->host_port_regs = priv->host_port_regs;
@@ -1911,8 +1900,8 @@
 	struct cpsw_priv		*priv;
 	struct cpdma_params		dma_params;
 	struct cpsw_ale_params		ale_params;
-	void __iomem			*ss_regs, *wr_regs;
-	struct resource			*res;
+	void __iomem			*ss_regs;
+	struct resource			*res, *ss_res;
 	u32 slave_offset, sliver_offset, slave_size;
 	int ret = 0, i, k = 0;
 
@@ -1948,7 +1937,7 @@
 	if (cpsw_probe_dt(&priv->data, pdev)) {
 		pr_err("cpsw: platform data missing\n");
 		ret = -ENODEV;
-		goto clean_ndev_ret;
+		goto clean_runtime_disable_ret;
 	}
 	data = &priv->data;
 
@@ -1962,11 +1951,12 @@
 
 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
 
-	priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
-			       GFP_KERNEL);
+	priv->slaves = devm_kzalloc(&pdev->dev,
+				    sizeof(struct cpsw_slave) * data->slaves,
+				    GFP_KERNEL);
 	if (!priv->slaves) {
-		ret = -EBUSY;
-		goto clean_ndev_ret;
+		ret = -ENOMEM;
+		goto clean_runtime_disable_ret;
 	}
 	for (i = 0; i < data->slaves; i++)
 		priv->slaves[i].slave_num = i;
@@ -1974,55 +1964,31 @@
 	priv->slaves[0].ndev = ndev;
 	priv->emac_port = 0;
 
-	priv->clk = clk_get(&pdev->dev, "fck");
+	priv->clk = devm_clk_get(&pdev->dev, "fck");
 	if (IS_ERR(priv->clk)) {
-		dev_err(&pdev->dev, "fck is not found\n");
+		dev_err(priv->dev, "fck is not found\n");
 		ret = -ENODEV;
-		goto clean_slave_ret;
+		goto clean_runtime_disable_ret;
 	}
 	priv->coal_intvl = 0;
 	priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
 
-	priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!priv->cpsw_res) {
-		dev_err(priv->dev, "error getting i/o resource\n");
-		ret = -ENOENT;
-		goto clean_clk_ret;
-	}
-	if (!request_mem_region(priv->cpsw_res->start,
-				resource_size(priv->cpsw_res), ndev->name)) {
-		dev_err(priv->dev, "failed request i/o region\n");
-		ret = -ENXIO;
-		goto clean_clk_ret;
-	}
-	ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
-	if (!ss_regs) {
-		dev_err(priv->dev, "unable to map i/o region\n");
-		goto clean_cpsw_iores_ret;
+	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
+	if (IS_ERR(ss_regs)) {
+		ret = PTR_ERR(ss_regs);
+		goto clean_runtime_disable_ret;
 	}
 	priv->regs = ss_regs;
 	priv->version = __raw_readl(&priv->regs->id_ver);
 	priv->host_port = HOST_PORT_NUM;
 
-	priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!priv->cpsw_wr_res) {
-		dev_err(priv->dev, "error getting i/o resource\n");
-		ret = -ENOENT;
-		goto clean_iomap_ret;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->wr_regs)) {
+		ret = PTR_ERR(priv->wr_regs);
+		goto clean_runtime_disable_ret;
 	}
-	if (!request_mem_region(priv->cpsw_wr_res->start,
-			resource_size(priv->cpsw_wr_res), ndev->name)) {
-		dev_err(priv->dev, "failed request i/o region\n");
-		ret = -ENXIO;
-		goto clean_iomap_ret;
-	}
-	wr_regs = ioremap(priv->cpsw_wr_res->start,
-				resource_size(priv->cpsw_wr_res));
-	if (!wr_regs) {
-		dev_err(priv->dev, "unable to map i/o region\n");
-		goto clean_cpsw_wr_iores_ret;
-	}
-	priv->wr_regs = wr_regs;
 
 	memset(&dma_params, 0, sizeof(dma_params));
 	memset(&ale_params, 0, sizeof(ale_params));
@@ -2053,12 +2019,12 @@
 		slave_size           = CPSW2_SLAVE_SIZE;
 		sliver_offset        = CPSW2_SLIVER_OFFSET;
 		dma_params.desc_mem_phys =
-			(u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+			(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
 		break;
 	default:
 		dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
 		ret = -ENODEV;
-		goto clean_cpsw_wr_iores_ret;
+		goto clean_runtime_disable_ret;
 	}
 	for (i = 0; i < priv->data.slaves; i++) {
 		struct cpsw_slave *slave = &priv->slaves[i];
@@ -2086,7 +2052,7 @@
 	if (!priv->dma) {
 		dev_err(priv->dev, "error initializing dma\n");
 		ret = -ENOMEM;
-		goto clean_wr_iomap_ret;
+		goto clean_runtime_disable_ret;
 	}
 
 	priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -2121,8 +2087,8 @@
 
 	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
 		for (i = res->start; i <= res->end; i++) {
-			if (request_irq(i, cpsw_interrupt, 0,
-					dev_name(&pdev->dev), priv)) {
+			if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
+					     dev_name(priv->dev), priv)) {
 				dev_err(priv->dev, "error attaching irq\n");
 				goto clean_ale_ret;
 			}
@@ -2144,7 +2110,7 @@
 	if (ret) {
 		dev_err(priv->dev, "error registering net device\n");
 		ret = -ENODEV;
-		goto clean_irq_ret;
+		goto clean_ale_ret;
 	}
 
 	if (cpts_register(&pdev->dev, priv->cpts,
@@ -2152,44 +2118,27 @@
 		dev_err(priv->dev, "error registering cpts device\n");
 
 	cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
-		  priv->cpsw_res->start, ndev->irq);
+		    ss_res->start, ndev->irq);
 
 	if (priv->data.dual_emac) {
 		ret = cpsw_probe_dual_emac(pdev, priv);
 		if (ret) {
 			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
-			goto clean_irq_ret;
+			goto clean_ale_ret;
 		}
 	}
 
 	return 0;
 
-clean_irq_ret:
-	for (i = 0; i < priv->num_irqs; i++)
-		free_irq(priv->irqs_table[i], priv);
 clean_ale_ret:
 	cpsw_ale_destroy(priv->ale);
 clean_dma_ret:
 	cpdma_chan_destroy(priv->txch);
 	cpdma_chan_destroy(priv->rxch);
 	cpdma_ctlr_destroy(priv->dma);
-clean_wr_iomap_ret:
-	iounmap(priv->wr_regs);
-clean_cpsw_wr_iores_ret:
-	release_mem_region(priv->cpsw_wr_res->start,
-			   resource_size(priv->cpsw_wr_res));
-clean_iomap_ret:
-	iounmap(priv->regs);
-clean_cpsw_iores_ret:
-	release_mem_region(priv->cpsw_res->start,
-			   resource_size(priv->cpsw_res));
-clean_clk_ret:
-	clk_put(priv->clk);
-clean_slave_ret:
+clean_runtime_disable_ret:
 	pm_runtime_disable(&pdev->dev);
-	kfree(priv->slaves);
 clean_ndev_ret:
-	kfree(priv->data.slave_data);
 	free_netdev(priv->ndev);
 	return ret;
 }
@@ -2198,30 +2147,18 @@
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct cpsw_priv *priv = netdev_priv(ndev);
-	int i;
 
 	if (priv->data.dual_emac)
 		unregister_netdev(cpsw_get_slave_ndev(priv, 1));
 	unregister_netdev(ndev);
 
 	cpts_unregister(priv->cpts);
-	for (i = 0; i < priv->num_irqs; i++)
-		free_irq(priv->irqs_table[i], priv);
 
 	cpsw_ale_destroy(priv->ale);
 	cpdma_chan_destroy(priv->txch);
 	cpdma_chan_destroy(priv->rxch);
 	cpdma_ctlr_destroy(priv->dma);
-	iounmap(priv->regs);
-	release_mem_region(priv->cpsw_res->start,
-			   resource_size(priv->cpsw_res));
-	iounmap(priv->wr_regs);
-	release_mem_region(priv->cpsw_wr_res->start,
-			   resource_size(priv->cpsw_wr_res));
 	pm_runtime_disable(&pdev->dev);
-	clk_put(priv->clk);
-	kfree(priv->slaves);
-	kfree(priv->data.slave_data);
 	if (priv->data.dual_emac)
 		free_netdev(cpsw_get_slave_ndev(priv, 1));
 	free_netdev(ndev);
@@ -2277,7 +2214,7 @@
 		.name	 = "cpsw",
 		.owner	 = THIS_MODULE,
 		.pm	 = &cpsw_pm_ops,
-		.of_match_table = of_match_ptr(cpsw_of_mtable),
+		.of_match_table = cpsw_of_mtable,
 	},
 	.probe = cpsw_probe,
 	.remove = cpsw_remove,
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index eb3e101..574f49d 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -39,4 +39,6 @@
 	bool	dual_emac;	/* Enable Dual EMAC mode */
 };
 
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+
 #endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index fe993cd..1a581ef 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -127,8 +127,8 @@
 };
 
 #ifdef CONFIG_TI_CPTS
-extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
-extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
 #else
 static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
 {
@@ -138,8 +138,7 @@
 }
 #endif
 
-extern int cpts_register(struct device *dev, struct cpts *cpts,
-			 u32 mult, u32 shift);
-extern void cpts_unregister(struct cpts *cpts);
+int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
+void cpts_unregister(struct cpts *cpts);
 
 #endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 6a32ef9d..41ba974 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1852,7 +1852,7 @@
 	}
 
 	/* MAC addr and PHY mask , RMII enable info from platform_data */
-	memcpy(priv->mac_addr, pdata->mac_addr, 6);
+	memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
 	priv->phy_id = pdata->phy_id;
 	priv->rmii_en = pdata->rmii_en;
 	priv->version = pdata->version;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 591437e..62b19be 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -319,7 +319,6 @@
 
 	free_netdev(dev);
 
-	pci_set_drvdata(pdev, NULL);
 	cancel_work_sync(&priv->tlan_tqueue);
 }
 
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 13e6fff..628b736 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2230,7 +2230,7 @@
 		nz_addr |= mac[i];
 
 	if (nz_addr) {
-		memcpy(dev->dev_addr, mac, 6);
+		memcpy(dev->dev_addr, mac, ETH_ALEN);
 		dev->addr_len = 6;
 	} else {
 		eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 106be47..edb2e12 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -1008,6 +1008,8 @@
 	info->egress_timer.data = (long)info;
 	info->egress_timer.function = tile_net_handle_egress_timer;
 
+	u64_stats_init(&info->stats.syncp);
+
 	priv->cpu[my_cpu] = info;
 
 	/*
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 309abb4..8505196 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,27 +359,26 @@
 }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-extern void udbg_shutdown_ps3gelic(void);
+void udbg_shutdown_ps3gelic(void);
 #else
 static inline void udbg_shutdown_ps3gelic(void) {}
 #endif
 
-extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
+int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
 /* shared netdev ops */
-extern void gelic_card_up(struct gelic_card *card);
-extern void gelic_card_down(struct gelic_card *card);
-extern int gelic_net_open(struct net_device *netdev);
-extern int gelic_net_stop(struct net_device *netdev);
-extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
-extern void gelic_net_set_multi(struct net_device *netdev);
-extern void gelic_net_tx_timeout(struct net_device *netdev);
-extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
-extern int gelic_net_setup_netdev(struct net_device *netdev,
-				  struct gelic_card *card);
+void gelic_card_up(struct gelic_card *card);
+void gelic_card_down(struct gelic_card *card);
+int gelic_net_open(struct net_device *netdev);
+int gelic_net_stop(struct net_device *netdev);
+int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+void gelic_net_set_multi(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev);
+int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
+int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
 
 /* shared ethtool ops */
-extern void gelic_net_get_drvinfo(struct net_device *netdev,
-				  struct ethtool_drvinfo *info);
-extern void gelic_net_poll_controller(struct net_device *netdev);
+void gelic_net_get_drvinfo(struct net_device *netdev,
+			   struct ethtool_drvinfo *info);
+void gelic_net_poll_controller(struct net_device *netdev);
 
 #endif /* _GELIC_NET_H */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index f7e51b7..11f443d 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -320,7 +320,7 @@
 #define GELIC_WL_PRIV_SET_PSK		(SIOCIWFIRSTPRIV + 0)
 #define GELIC_WL_PRIV_GET_PSK		(SIOCIWFIRSTPRIV + 1)
 
-extern int gelic_wl_driver_probe(struct gelic_card *card);
-extern int gelic_wl_driver_remove(struct gelic_card *card);
-extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
+int gelic_wl_driver_probe(struct gelic_card *card);
+int gelic_wl_driver_remove(struct gelic_card *card);
+void gelic_wl_interrupt(struct net_device *netdev, u64 status);
 #endif /* _GELIC_WIRELESS_H */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5734480c1..3f4a32e 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2478,7 +2478,6 @@
 	pci_release_regions(pdev);
 out_disable_dev:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	return NULL;
 }
 
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index 4ba2135..9b6af08 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -29,8 +29,8 @@
 
 #include <linux/sungem_phy.h>
 
-extern int spider_net_stop(struct net_device *netdev);
-extern int spider_net_open(struct net_device *netdev);
+int spider_net_stop(struct net_device *netdev);
+int spider_net_open(struct net_device *netdev);
 
 extern const struct ethtool_ops spider_net_ethtool_ops;
 
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index a971b9c..1322546 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -887,7 +887,6 @@
 	mdiobus_free(lp->mii_bus);
 	unregister_netdev(dev);
 	free_netdev(dev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static int
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index bdf697b..cce6c4b 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -987,6 +987,9 @@
 
 	rp->base = ioaddr;
 
+	u64_stats_init(&rp->tx_stats.syncp);
+	u64_stats_init(&rp->rx_stats.syncp);
+
 	/* Get chip registers into a sane state */
 	rhine_power_init(dev);
 	rhine_hw_init(dev, pioaddr);
@@ -2292,7 +2295,6 @@
 
 	free_netdev(dev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static void rhine_shutdown (struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 4c619ea..74234a5 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -31,7 +31,7 @@
 #define DRIVER_NAME "xilinx_emaclite"
 
 /* Register offsets for the EmacLite Core */
-#define XEL_TXBUFF_OFFSET 	0x0		/* Transmit Buffer */
+#define XEL_TXBUFF_OFFSET	0x0		/* Transmit Buffer */
 #define XEL_MDIOADDR_OFFSET	0x07E4		/* MDIO Address Register */
 #define XEL_MDIOWR_OFFSET	0x07E8		/* MDIO Write Data Register */
 #define XEL_MDIORD_OFFSET	0x07EC		/* MDIO Read Data Register */
@@ -63,13 +63,13 @@
 #define XEL_MDIOCTRL_MDIOEN_MASK  0x00000008	/* MDIO Enable */
 
 /* Global Interrupt Enable Register (GIER) Bit Masks */
-#define XEL_GIER_GIE_MASK	0x80000000 	/* Global Enable */
+#define XEL_GIER_GIE_MASK	0x80000000	/* Global Enable */
 
 /* Transmit Status Register (TSR) Bit Masks */
-#define XEL_TSR_XMIT_BUSY_MASK	 0x00000001 	/* Tx complete */
-#define XEL_TSR_PROGRAM_MASK	 0x00000002 	/* Program the MAC address */
-#define XEL_TSR_XMIT_IE_MASK	 0x00000008 	/* Tx interrupt enable bit */
-#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 	/* Buffer is active, SW bit
+#define XEL_TSR_XMIT_BUSY_MASK	 0x00000001	/* Tx complete */
+#define XEL_TSR_PROGRAM_MASK	 0x00000002	/* Program the MAC address */
+#define XEL_TSR_XMIT_IE_MASK	 0x00000008	/* Tx interrupt enable bit */
+#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000	/* Buffer is active, SW bit
 						 * only. This is not documented
 						 * in the HW spec */
 
@@ -77,21 +77,21 @@
 #define XEL_TSR_PROG_MAC_ADDR	(XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
 
 /* Receive Status Register (RSR) */
-#define XEL_RSR_RECV_DONE_MASK	0x00000001 	/* Rx complete */
-#define XEL_RSR_RECV_IE_MASK	0x00000008 	/* Rx interrupt enable bit */
+#define XEL_RSR_RECV_DONE_MASK	0x00000001	/* Rx complete */
+#define XEL_RSR_RECV_IE_MASK	0x00000008	/* Rx interrupt enable bit */
 
 /* Transmit Packet Length Register (TPLR) */
-#define XEL_TPLR_LENGTH_MASK	0x0000FFFF 	/* Tx packet length */
+#define XEL_TPLR_LENGTH_MASK	0x0000FFFF	/* Tx packet length */
 
 /* Receive Packet Length Register (RPLR) */
-#define XEL_RPLR_LENGTH_MASK	0x0000FFFF 	/* Rx packet length */
+#define XEL_RPLR_LENGTH_MASK	0x0000FFFF	/* Rx packet length */
 
-#define XEL_HEADER_OFFSET	12 		/* Offset to length field */
-#define XEL_HEADER_SHIFT	16 		/* Shift value for length */
+#define XEL_HEADER_OFFSET	12		/* Offset to length field */
+#define XEL_HEADER_SHIFT	16		/* Shift value for length */
 
 /* General Ethernet Definitions */
-#define XEL_ARP_PACKET_SIZE		28 	/* Max ARP packet size */
-#define XEL_HEADER_IP_LENGTH_OFFSET	16 	/* IP Length Offset */
+#define XEL_ARP_PACKET_SIZE		28	/* Max ARP packet size */
+#define XEL_HEADER_IP_LENGTH_OFFSET	16	/* IP Length Offset */
 
 
 
@@ -1075,14 +1075,9 @@
  * This function un maps the IO region of the Emaclite device and frees the net
  * device.
  */
-static void xemaclite_remove_ndev(struct net_device *ndev,
-				  struct platform_device *pdev)
+static void xemaclite_remove_ndev(struct net_device *ndev)
 {
 	if (ndev) {
-		struct net_local *lp = netdev_priv(ndev);
-
-		if (lp->base_addr)
-			devm_iounmap(&pdev->dev, lp->base_addr);
 		free_netdev(ndev);
 	}
 }
@@ -1177,7 +1172,7 @@
 
 	if (mac_address)
 		/* Set the MAC address. */
-		memcpy(ndev->dev_addr, mac_address, 6);
+		memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
 	else
 		dev_warn(dev, "No MAC address found\n");
 
@@ -1214,7 +1209,7 @@
 	return 0;
 
 error:
-	xemaclite_remove_ndev(ndev, ofdev);
+	xemaclite_remove_ndev(ndev);
 	return rc;
 }
 
@@ -1248,7 +1243,7 @@
 		of_node_put(lp->phy_node);
 	lp->phy_node = NULL;
 
-	xemaclite_remove_ndev(ndev, of_dev);
+	xemaclite_remove_ndev(ndev);
 
 	return 0;
 }
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index a20ed1a..f839935 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -453,7 +453,7 @@
 	 */
 	* (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
 	a[1] = 0 ;
-	memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
+	memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
 
 	CHECK_NPP() ;
 	 /* set memory address reg for writes */
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 3ca308b..bd1166b 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -469,20 +469,20 @@
 
 extern const struct fddi_addr fddi_broadcast;
 
-extern void all_selection_criteria(struct s_smc *smc);
-extern void card_stop(struct s_smc *smc);
-extern void init_board(struct s_smc *smc, u_char *mac_addr);
-extern int init_fplus(struct s_smc *smc);
-extern void init_plc(struct s_smc *smc);
-extern int init_smt(struct s_smc *smc, u_char * mac_addr);
-extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
-extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
-extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
-extern int pcm_status_twisted(struct s_smc *smc);
-extern void plc1_irq(struct s_smc *smc);
-extern void plc2_irq(struct s_smc *smc);
-extern void read_address(struct s_smc *smc, u_char * mac_addr);
-extern void timer_irq(struct s_smc *smc);
+void all_selection_criteria(struct s_smc *smc);
+void card_stop(struct s_smc *smc);
+void init_board(struct s_smc *smc, u_char *mac_addr);
+int init_fplus(struct s_smc *smc);
+void init_plc(struct s_smc *smc);
+int init_smt(struct s_smc *smc, u_char *mac_addr);
+void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
+void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
+void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
+int pcm_status_twisted(struct s_smc *smc);
+void plc1_irq(struct s_smc *smc);
+void plc2_irq(struct s_smc *smc);
+void read_address(struct s_smc *smc, u_char *mac_addr);
+void timer_irq(struct s_smc *smc);
 
 #endif	/* _SCMECM_ */
 
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index f5d7305..713d303 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -436,7 +436,7 @@
 	}
 	read_address(smc, NULL);
 	pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
-	memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+	memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
 
 	smt_reset_defaults(smc, 0);
 
@@ -503,7 +503,7 @@
 	 *               address.
 	 */
 	read_address(smc, NULL);
-	memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+	memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
 
 	init_smt(smc, NULL);
 	smt_online(smc, 1);
@@ -1213,7 +1213,7 @@
 	if ((unsigned short) frame[1 + 10] != 0)
 		return;
 	SRBit = frame[1 + 6] & 0x01;
-	memcpy(&frame[1 + 6], hw_addr, 6);
+	memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
 	frame[8] |= SRBit;
 }				// CheckSourceAddress
 
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index a974727..636b65c 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -445,7 +445,7 @@
 	outb(0, FCR(dev->base_addr));  /* disable FIFOs */
 	outb(0x0d, MCR(dev->base_addr));
 	outb(0, IER(dev->base_addr));
-	if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+	if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
 			"baycom_ser_fdx", dev)) {
 		release_region(dev->base_addr, SER12_EXTENT);
 		return -EBUSY;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index e349d86..f9a8976 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -490,7 +490,7 @@
 	outb(0, FCR(dev->base_addr));  /* disable FIFOs */
 	outb(0x0d, MCR(dev->base_addr));
 	outb(0, IER(dev->base_addr));
-	if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+	if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
 			"baycom_ser12", dev)) {
 		release_region(dev->base_addr, SER12_EXTENT);       
 		return -EBUSY;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index bc1d521..4bc6ee8 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1734,7 +1734,7 @@
 			if (!Ivec[hwcfg.irq].used && hwcfg.irq)
 			{
 				if (request_irq(hwcfg.irq, scc_isr,
-						IRQF_DISABLED, "AX.25 SCC",
+						0, "AX.25 SCC",
 						(void *)(long) hwcfg.irq))
 					printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
 				else
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5af1c3e..1971411 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -888,7 +888,7 @@
 		goto out_release_base;
 	}
 	outb(0, IER(dev->base_addr));
-	if (request_irq(dev->irq, yam_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev)) {
+	if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) {
 		printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
 		ret = -EBUSY;
 		goto out_release_base;
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 6f10b49..2cbe1c2 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -561,7 +561,7 @@
 
 	spin_lock_irqsave(&lp->lock, flags);
 	lp->is_tx = 1;
-	INIT_COMPLETION(lp->tx_complete);
+	reinit_completion(&lp->tx_complete);
 	spin_unlock_irqrestore(&lp->lock, flags);
 
 	rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 0632d34..c6e46d6 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -343,7 +343,7 @@
 	if (ret)
 		goto err;
 
-	INIT_COMPLETION(devrec->tx_complete);
+	reinit_completion(&devrec->tx_complete);
 
 	/* Set TXNTRIG bit of TXNCON to send packet */
 	ret = read_short_reg(devrec, REG_TXNCON, &val);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index a3bed28..c14d39b 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -265,6 +265,7 @@
 static int __init ifb_init_one(int index)
 {
 	struct net_device *dev_ifb;
+	struct ifb_private *dp;
 	int err;
 
 	dev_ifb = alloc_netdev(sizeof(struct ifb_private),
@@ -273,6 +274,10 @@
 	if (!dev_ifb)
 		return -ENOMEM;
 
+	dp = netdev_priv(dev_ifb);
+	u64_stats_init(&dp->rsync);
+	u64_stats_init(&dp->tsync);
+
 	dev_ifb->rtnl_link_ops = &ifb_link_ops;
 	err = register_netdevice(dev_ifb);
 	if (err < 0)
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 7bbd318..befa45f 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -627,7 +627,7 @@
 /*
  * Function ali_ircc_read_dongle_id (int index, info)
  *
- * Try to read dongle indentification. This procedure needs to be executed
+ * Try to read dongle identification. This procedure needs to be executed
  * once after power-on/reset. It also needs to be used whenever you suspect
  * that the user may have plugged/unplugged the IrDA Dongle.
  */
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index c74f384..303c4bd 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -411,12 +411,12 @@
 
 #else
 
-	if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
+	if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
 		dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
 		return -EBUSY;
 	}
 
-	if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
+	if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
 		dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
 		free_irq(port->irq, dev);
 		return -EBUSY;
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 31bcb98..768dfe9 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1352,7 +1352,7 @@
     return 0;
 
   rc = request_irq (self->io.irq, toshoboe_interrupt,
-                    IRQF_SHARED | IRQF_DISABLED, dev->name, self);
+                    IRQF_SHARED, dev->name, self);
   if (rc)
   	return rc;
 
@@ -1559,7 +1559,7 @@
   self->io.fir_base = self->base;
   self->io.fir_ext = OBOE_IO_EXTENT;
   self->io.irq = pci_dev->irq;
-  self->io.irqflags = IRQF_SHARED | IRQF_DISABLED;
+  self->io.irqflags = IRQF_SHARED;
 
   self->speed = self->io.speed = 9600;
   self->async = 0;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index ceeb537..66bc03b 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1035,7 +1035,7 @@
 /*
  * Function nsc_ircc_read_dongle_id (void)
  *
- * Try to read dongle indentification. This procedure needs to be executed
+ * Try to read dongle identification. This procedure needs to be executed
  * once after power-on/reset. It also needs to be used whenever you suspect
  * that the user may have plugged/unplugged the IrDA Dongle.
  */
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4455425..ff45cd0 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@
 		goto err_mem_4;
 
 	platform_set_drvdata(pdev, ndev);
-	err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
+	err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
 	if (err) {
 		dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
 		goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 89682b4..8d9ae5a 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@
 		goto err_mem_4;
 
 	platform_set_drvdata(pdev, ndev);
-	err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
+	err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
 	if (err) {
 		dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
 		goto err_mem_4;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 6d5b1e2..f50b9c1 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -102,28 +102,29 @@
 
 /* exported */
 
-extern int irda_register_dongle(struct dongle_driver *new);
-extern int irda_unregister_dongle(struct dongle_driver *drv);
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
 
-extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
-extern int sirdev_put_instance(struct sir_dev *self);
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+				    const char *name);
+int sirdev_put_instance(struct sir_dev *self);
 
-extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
-extern void sirdev_write_complete(struct sir_dev *dev);
-extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
 
 /* low level helpers for SIR device/dongle setup */
-extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
-extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
-extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
 
 /* not exported */
 
-extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
-extern int sirdev_put_dongle(struct sir_dev *self);
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
 
-extern void sirdev_enable_rx(struct sir_dev *dev);
-extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
 
 /* inline helpers */
 
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index a17d85a..ac24c27 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -137,10 +137,16 @@
 
 static int loopback_dev_init(struct net_device *dev)
 {
+	int i;
 	dev->lstats = alloc_percpu(struct pcpu_lstats);
 	if (!dev->lstats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct pcpu_lstats *lb_stats;
+		lb_stats = per_cpu_ptr(dev->lstats, i);
+		u64_stats_init(&lb_stats->syncp);
+	}
 	return 0;
 }
 
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9bf46bd..acf9379 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -297,7 +297,13 @@
 	int ret;
 	const struct macvlan_dev *vlan = netdev_priv(dev);
 
-	ret = macvlan_queue_xmit(skb, dev);
+	if (vlan->fwd_priv) {
+		skb->dev = vlan->lowerdev;
+		ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
+	} else {
+		ret = macvlan_queue_xmit(skb, dev);
+	}
+
 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 		struct macvlan_pcpu_stats *pcpu_stats;
 
@@ -347,6 +353,21 @@
 		goto hash_add;
 	}
 
+	if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+		vlan->fwd_priv =
+		      lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
+
+		/* If we get a NULL pointer back, or if we get an error
+		 * then we should just fall through to the non accelerated path
+		 */
+		if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
+			vlan->fwd_priv = NULL;
+		} else {
+			dev->features &= ~NETIF_F_LLTX;
+			return 0;
+		}
+	}
+
 	err = -EBUSY;
 	if (macvlan_addr_busy(vlan->port, dev->dev_addr))
 		goto out;
@@ -367,6 +388,11 @@
 del_unicast:
 	dev_uc_del(lowerdev, dev->dev_addr);
 out:
+	if (vlan->fwd_priv) {
+		lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
+							   vlan->fwd_priv);
+		vlan->fwd_priv = NULL;
+	}
 	return err;
 }
 
@@ -375,6 +401,13 @@
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
 
+	if (vlan->fwd_priv) {
+		lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
+							   vlan->fwd_priv);
+		vlan->fwd_priv = NULL;
+		return 0;
+	}
+
 	dev_uc_unsync(lowerdev, dev);
 	dev_mc_unsync(lowerdev, dev);
 
@@ -501,6 +534,7 @@
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	const struct net_device *lowerdev = vlan->lowerdev;
+	int i;
 
 	dev->state		= (dev->state & ~MACVLAN_STATE_MASK) |
 				  (lowerdev->state & MACVLAN_STATE_MASK);
@@ -516,6 +550,12 @@
 	if (!vlan->pcpu_stats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct macvlan_pcpu_stats *mvlstats;
+		mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
+		u64_stats_init(&mvlstats->syncp);
+	}
+
 	return 0;
 }
 
@@ -828,22 +868,22 @@
 		eth_hw_addr_inherit(dev, lowerdev);
 	}
 
+	port->count += 1;
+	err = register_netdevice(dev);
+	if (err < 0)
+		goto destroy_port;
+
+	dev->priv_flags |= IFF_MACVLAN;
 	err = netdev_upper_dev_link(lowerdev, dev);
 	if (err)
 		goto destroy_port;
 
-	port->count += 1;
-	err = register_netdevice(dev);
-	if (err < 0)
-		goto upper_dev_unlink;
 
 	list_add_tail_rcu(&vlan->list, &port->vlans);
 	netif_stacked_transfer_operstate(lowerdev, dev);
 
 	return 0;
 
-upper_dev_unlink:
-	netdev_upper_dev_unlink(lowerdev, dev);
 destroy_port:
 	port->count -= 1;
 	if (!port->count)
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index c9a1592..ba2f5e7 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -34,6 +34,8 @@
  *
  ****************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -320,8 +322,8 @@
 	if (enabled < 0 || enabled > 1)
 		return -EINVAL;
 	if (enabled == nt->enabled) {
-		printk(KERN_INFO "netconsole: network logging has already %s\n",
-				nt->enabled ? "started" : "stopped");
+		pr_info("network logging has already %s\n",
+			nt->enabled ? "started" : "stopped");
 		return -EINVAL;
 	}
 
@@ -336,7 +338,7 @@
 		if (err)
 			return err;
 
-		printk(KERN_INFO "netconsole: network logging started\n");
+		pr_info("netconsole: network logging started\n");
 	} else {	/* 0 */
 		/* We need to disable the netconsole before cleaning it up
 		 * otherwise we might end up in write_msg() with
@@ -360,9 +362,8 @@
 	size_t len;
 
 	if (nt->enabled) {
-		printk(KERN_ERR "netconsole: target (%s) is enabled, "
-				"disable to update parameters\n",
-				config_item_name(&nt->item));
+		pr_err("target (%s) is enabled, disable to update parameters\n",
+		       config_item_name(&nt->item));
 		return -EINVAL;
 	}
 
@@ -383,9 +384,8 @@
 	int rv;
 
 	if (nt->enabled) {
-		printk(KERN_ERR "netconsole: target (%s) is enabled, "
-				"disable to update parameters\n",
-				config_item_name(&nt->item));
+		pr_err("target (%s) is enabled, disable to update parameters\n",
+		       config_item_name(&nt->item));
 		return -EINVAL;
 	}
 
@@ -402,9 +402,8 @@
 	int rv;
 
 	if (nt->enabled) {
-		printk(KERN_ERR "netconsole: target (%s) is enabled, "
-				"disable to update parameters\n",
-				config_item_name(&nt->item));
+		pr_err("target (%s) is enabled, disable to update parameters\n",
+		       config_item_name(&nt->item));
 		return -EINVAL;
 	}
 
@@ -419,9 +418,8 @@
 			      size_t count)
 {
 	if (nt->enabled) {
-		printk(KERN_ERR "netconsole: target (%s) is enabled, "
-				"disable to update parameters\n",
-				config_item_name(&nt->item));
+		pr_err("target (%s) is enabled, disable to update parameters\n",
+		       config_item_name(&nt->item));
 		return -EINVAL;
 	}
 
@@ -429,7 +427,7 @@
 		const char *end;
 		if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
 			if (*end && *end != '\n') {
-				printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+				pr_err("invalid IPv6 address at: <%c>\n", *end);
 				return -EINVAL;
 			}
 			nt->np.ipv6 = true;
@@ -450,9 +448,8 @@
 			       size_t count)
 {
 	if (nt->enabled) {
-		printk(KERN_ERR "netconsole: target (%s) is enabled, "
-				"disable to update parameters\n",
-				config_item_name(&nt->item));
+		pr_err("target (%s) is enabled, disable to update parameters\n",
+		       config_item_name(&nt->item));
 		return -EINVAL;
 	}
 
@@ -460,7 +457,7 @@
 		const char *end;
 		if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
 			if (*end && *end != '\n') {
-				printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+				pr_err("invalid IPv6 address at: <%c>\n", *end);
 				return -EINVAL;
 			}
 			nt->np.ipv6 = true;
@@ -483,9 +480,8 @@
 	u8 remote_mac[ETH_ALEN];
 
 	if (nt->enabled) {
-		printk(KERN_ERR "netconsole: target (%s) is enabled, "
-				"disable to update parameters\n",
-				config_item_name(&nt->item));
+		pr_err("target (%s) is enabled, disable to update parameters\n",
+		       config_item_name(&nt->item));
 		return -EINVAL;
 	}
 
@@ -708,19 +704,20 @@
 	}
 	spin_unlock_irqrestore(&target_list_lock, flags);
 	if (stopped) {
-		printk(KERN_INFO "netconsole: network logging stopped on "
-		       "interface %s as it ", dev->name);
+		const char *msg = "had an event";
 		switch (event) {
 		case NETDEV_UNREGISTER:
-			printk(KERN_CONT "unregistered\n");
+			msg = "unregistered";
 			break;
 		case NETDEV_RELEASE:
-			printk(KERN_CONT "released slaves\n");
+			msg = "released slaves";
 			break;
 		case NETDEV_JOIN:
-			printk(KERN_CONT "is joining a master device\n");
+			msg = "is joining a master device";
 			break;
 		}
+		pr_info("network logging stopped on interface %s as it %s\n",
+			dev->name, msg);
 	}
 
 done:
@@ -806,7 +803,7 @@
 		goto undonotifier;
 
 	register_console(&netconsole);
-	printk(KERN_INFO "netconsole: network logging started\n");
+	pr_info("network logging started\n");
 
 	return err;
 
@@ -814,7 +811,7 @@
 	unregister_netdevice_notifier(&netconsole_netdev_notifier);
 
 fail:
-	printk(KERN_ERR "netconsole: cleaning up\n");
+	pr_err("cleaning up\n");
 
 	/*
 	 * Remove all targets and destroy them (only targets created
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index b57ce5f..d2bb12b 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -47,8 +47,16 @@
 
 static int nlmon_dev_init(struct net_device *dev)
 {
+	int i;
+
 	dev->lstats = alloc_percpu(struct pcpu_lstats);
 
+	for_each_possible_cpu(i) {
+		struct pcpu_lstats *nlmstats;
+		nlmstats = per_cpu_ptr(dev->lstats, i);
+		u64_stats_init(&nlmstats->syncp);
+	}
+
 	return dev->lstats == NULL ? -ENOMEM : 0;
 }
 
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 342561a..9b5d46c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -154,6 +154,13 @@
 	  interface units of the Allwinner SoC that have an EMAC (A10,
 	  A12, A10s, etc.)
 
+config MDIO_MOXART
+        tristate "MOXA ART MDIO interface support"
+        depends on ARCH_MOXART
+        help
+          This driver supports the MDIO interface found in the network
+          interface units of the MOXA ART SoC
+
 config MDIO_BUS_MUX
 	tristate
 	depends on OF_MDIO
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 23a2ab2..9013dfa 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -31,3 +31,4 @@
 obj-$(CONFIG_MDIO_BUS_MUX_GPIO)	+= mdio-mux-gpio.o
 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
 obj-$(CONFIG_MDIO_SUN4I)	+= mdio-sun4i.o
+obj-$(CONFIG_MDIO_MOXART)	+= mdio-moxart.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ac22283..bc71947 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -100,6 +100,45 @@
 		wol->wolopts |= WAKE_MAGIC;
 }
 
+static int at803x_suspend(struct phy_device *phydev)
+{
+	int value;
+	int wol_enabled;
+
+	mutex_lock(&phydev->lock);
+
+	value = phy_read(phydev, AT803X_INTR_ENABLE);
+	wol_enabled = value & AT803X_WOL_ENABLE;
+
+	value = phy_read(phydev, MII_BMCR);
+
+	if (wol_enabled)
+		value |= BMCR_ISOLATE;
+	else
+		value |= BMCR_PDOWN;
+
+	phy_write(phydev, MII_BMCR, value);
+
+	mutex_unlock(&phydev->lock);
+
+	return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+	int value;
+
+	mutex_lock(&phydev->lock);
+
+	value = phy_read(phydev, MII_BMCR);
+	value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
+	phy_write(phydev, MII_BMCR, value);
+
+	mutex_unlock(&phydev->lock);
+
+	return 0;
+}
+
 static int at803x_config_init(struct phy_device *phydev)
 {
 	int val;
@@ -161,10 +200,12 @@
 	.config_init	= at803x_config_init,
 	.set_wol	= at803x_set_wol,
 	.get_wol	= at803x_get_wol,
+	.suspend	= at803x_suspend,
+	.resume		= at803x_resume,
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
 	.driver		= {
 		.owner = THIS_MODULE,
 	},
@@ -176,10 +217,12 @@
 	.config_init	= at803x_config_init,
 	.set_wol	= at803x_set_wol,
 	.get_wol	= at803x_get_wol,
+	.suspend	= at803x_suspend,
+	.resume		= at803x_resume,
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
 	.driver		= {
 		.owner = THIS_MODULE,
 	},
@@ -191,10 +234,12 @@
 	.config_init	= at803x_config_init,
 	.set_wol	= at803x_set_wol,
 	.get_wol	= at803x_get_wol,
+	.suspend	= at803x_suspend,
+	.resume		= at803x_resume,
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
 	.driver		= {
 		.owner = THIS_MODULE,
 	},
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e91477..2e3c778e 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -34,9 +34,9 @@
 #include <linux/marvell_phy.h>
 #include <linux/of.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #define MII_MARVELL_PHY_PAGE		22
 
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
new file mode 100644
index 0000000..a5741cb
--- /dev/null
+++ b/drivers/net/phy/mdio-moxart.c
@@ -0,0 +1,201 @@
+/* MOXA ART Ethernet (RTL8201CP) MDIO interface driver
+ *
+ * Copyright (C) 2013 Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define REG_PHY_CTRL            0
+#define REG_PHY_WRITE_DATA      4
+
+/* REG_PHY_CTRL */
+#define MIIWR                   BIT(27) /* init write sequence (auto cleared)*/
+#define MIIRD                   BIT(26)
+#define REGAD_MASK              0x3e00000
+#define PHYAD_MASK              0x1f0000
+#define MIIRDATA_MASK           0xffff
+
+/* REG_PHY_WRITE_DATA */
+#define MIIWDATA_MASK           0xffff
+
+struct moxart_mdio_data {
+	void __iomem		*base;
+};
+
+static int moxart_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+	struct moxart_mdio_data *data = bus->priv;
+	u32 ctrl = 0;
+	unsigned int count = 5;
+
+	dev_dbg(&bus->dev, "%s\n", __func__);
+
+	ctrl |= MIIRD | ((mii_id << 16) & PHYAD_MASK) |
+		((regnum << 21) & REGAD_MASK);
+
+	writel(ctrl, data->base + REG_PHY_CTRL);
+
+	do {
+		ctrl = readl(data->base + REG_PHY_CTRL);
+
+		if (!(ctrl & MIIRD))
+			return ctrl & MIIRDATA_MASK;
+
+		mdelay(10);
+		count--;
+	} while (count > 0);
+
+	dev_dbg(&bus->dev, "%s timed out\n", __func__);
+
+	return -ETIMEDOUT;
+}
+
+static int moxart_mdio_write(struct mii_bus *bus, int mii_id,
+			     int regnum, u16 value)
+{
+	struct moxart_mdio_data *data = bus->priv;
+	u32 ctrl = 0;
+	unsigned int count = 5;
+
+	dev_dbg(&bus->dev, "%s\n", __func__);
+
+	ctrl |= MIIWR | ((mii_id << 16) & PHYAD_MASK) |
+		((regnum << 21) & REGAD_MASK);
+
+	value &= MIIWDATA_MASK;
+
+	writel(value, data->base + REG_PHY_WRITE_DATA);
+	writel(ctrl, data->base + REG_PHY_CTRL);
+
+	do {
+		ctrl = readl(data->base + REG_PHY_CTRL);
+
+		if (!(ctrl & MIIWR))
+			return 0;
+
+		mdelay(10);
+		count--;
+	} while (count > 0);
+
+	dev_dbg(&bus->dev, "%s timed out\n", __func__);
+
+	return -ETIMEDOUT;
+}
+
+static int moxart_mdio_reset(struct mii_bus *bus)
+{
+	int data, i;
+
+	for (i = 0; i < PHY_MAX_ADDR; i++) {
+		data = moxart_mdio_read(bus, i, MII_BMCR);
+		if (data < 0)
+			continue;
+
+		data |= BMCR_RESET;
+		if (moxart_mdio_write(bus, i, MII_BMCR, data) < 0)
+			continue;
+	}
+
+	return 0;
+}
+
+static int moxart_mdio_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct mii_bus *bus;
+	struct moxart_mdio_data *data;
+	struct resource *res;
+	int ret, i;
+
+	bus = mdiobus_alloc_size(sizeof(*data));
+	if (!bus)
+		return -ENOMEM;
+
+	bus->name = "MOXA ART Ethernet MII";
+	bus->read = &moxart_mdio_read;
+	bus->write = &moxart_mdio_write;
+	bus->reset = &moxart_mdio_reset;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d-mii", pdev->name, pdev->id);
+	bus->parent = &pdev->dev;
+
+	bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+			GFP_KERNEL);
+	if (!bus->irq) {
+		ret = -ENOMEM;
+		goto err_out_free_mdiobus;
+	}
+
+	/* Setting PHY_IGNORE_INTERRUPT here even if it has no effect,
+	 * of_mdiobus_register() sets these PHY_POLL.
+	 * Ideally, the interrupt from MAC controller could be used to
+	 * detect link state changes, not polling, i.e. if there was
+	 * a way phy_driver could set PHY_HAS_INTERRUPT but have that
+	 * interrupt handled in ethernet drivercode.
+	 */
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		bus->irq[i] = PHY_IGNORE_INTERRUPT;
+
+	data = bus->priv;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(data->base)) {
+		ret = PTR_ERR(data->base);
+		goto err_out_free_mdiobus;
+	}
+
+	ret = of_mdiobus_register(bus, np);
+	if (ret < 0)
+		goto err_out_free_mdiobus;
+
+	platform_set_drvdata(pdev, bus);
+
+	return 0;
+
+err_out_free_mdiobus:
+	mdiobus_free(bus);
+	return ret;
+}
+
+static int moxart_mdio_remove(struct platform_device *pdev)
+{
+	struct mii_bus *bus = platform_get_drvdata(pdev);
+
+	mdiobus_unregister(bus);
+	mdiobus_free(bus);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_mdio_dt_ids[] = {
+	{ .compatible = "moxa,moxart-mdio" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, moxart_mdio_dt_ids);
+
+static struct platform_driver moxart_mdio_driver = {
+	.probe = moxart_mdio_probe,
+	.remove = moxart_mdio_remove,
+	.driver = {
+		.name = "moxart-mdio",
+		.of_match_table = moxart_mdio_dt_ids,
+	},
+};
+
+module_platform_driver(moxart_mdio_driver);
+
+MODULE_DESCRIPTION("MOXA ART MDIO interface driver");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c31aad0..3ae28f4 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -287,6 +287,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= ks8737_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= PHY_ID_KSZ8021,
@@ -300,6 +302,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= PHY_ID_KSZ8031,
@@ -313,6 +317,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= PHY_ID_KSZ8041,
@@ -326,6 +332,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= PHY_ID_KSZ8051,
@@ -339,6 +347,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= PHY_ID_KSZ8001,
@@ -351,6 +361,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= PHY_ID_KSZ8081,
@@ -363,6 +375,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= PHY_ID_KSZ8061,
@@ -375,6 +389,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE,},
 }, {
 	.phy_id		= PHY_ID_KSZ9021,
@@ -387,6 +403,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= ksz9021_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE, },
 }, {
 	.phy_id		= PHY_ID_KSZ9031,
@@ -400,6 +418,8 @@
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= ksz9021_config_intr,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE, },
 }, {
 	.phy_id		= PHY_ID_KSZ8873MLL,
@@ -410,6 +430,8 @@
 	.config_init	= kszphy_config_init,
 	.config_aneg	= ksz8873mll_config_aneg,
 	.read_status	= ksz8873mll_read_status,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE, },
 }, {
 	.phy_id		= PHY_ID_KSZ886X,
@@ -420,6 +442,8 @@
 	.config_init	= kszphy_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 	.driver		= { .owner = THIS_MODULE, },
 } };
 
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 138de83..fa1d69a 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -64,6 +64,18 @@
 	return err;
 }
 
+/* RTL8201CP */
+static struct phy_driver rtl8201cp_driver = {
+	.phy_id         = 0x00008201,
+	.name           = "RTL8201CP Ethernet",
+	.phy_id_mask    = 0x0000ffff,
+	.features       = PHY_BASIC_FEATURES,
+	.flags          = PHY_HAS_INTERRUPT,
+	.config_aneg    = &genphy_config_aneg,
+	.read_status    = &genphy_read_status,
+	.driver         = { .owner = THIS_MODULE,},
+};
+
 /* RTL8211B */
 static struct phy_driver rtl8211b_driver = {
 	.phy_id		= 0x001cc912,
@@ -98,6 +110,9 @@
 {
 	int ret;
 
+	ret = phy_driver_register(&rtl8201cp_driver);
+	if (ret < 0)
+		return -ENODEV;
 	ret = phy_driver_register(&rtl8211b_driver);
 	if (ret < 0)
 		return -ENODEV;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 1f7bef9..7b4ff35 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1002,7 +1002,7 @@
 		/* Any address will do - we take the first */
 		const struct in_ifaddr *ifa = in_dev->ifa_list;
 		if (ifa) {
-			memcpy(eth->h_source, dev->dev_addr, 6);
+			memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
 			memset(eth->h_dest, 0xfc, 2);
 			memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
 		}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 50e43e6..6574eb8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1540,6 +1540,12 @@
 	if (!team->pcpu_stats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct team_pcpu_stats *team_stats;
+		team_stats = per_cpu_ptr(team->pcpu_stats, i);
+		u64_stats_init(&team_stats->syncp);
+	}
+
 	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
 		INIT_HLIST_HEAD(&team->en_port_hlist[i]);
 	INIT_LIST_HEAD(&team->port_list);
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 829a9cd..d671fc3 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -570,7 +570,7 @@
 {
 	struct lb_priv *lb_priv = get_lb_priv(team);
 	lb_select_tx_port_func_t *func;
-	int err;
+	int i, err;
 
 	/* set default tx port selector */
 	func = lb_select_tx_port_get_func("hash");
@@ -588,6 +588,13 @@
 		goto err_alloc_pcpu_stats;
 	}
 
+	for_each_possible_cpu(i) {
+		struct lb_pcpu_stats *team_lb_stats;
+		team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+		u64_stats_init(&team_lb_stats->syncp);
+	}
+
+
 	INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
 
 	err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 40db312..85e4a016 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -242,6 +242,21 @@
 	    * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
 	    * Ericsson F5521gw Mobile Broadband Module
 
+config USB_NET_HUAWEI_CDC_NCM
+	tristate "Huawei NCM embedded AT channel support"
+	depends on USB_USBNET
+	select USB_WDM
+	select USB_NET_CDC_NCM
+	help
+		This driver supports huawei-style NCM devices, that use NCM as a
+		transport for other protocols, usually an embedded AT channel.
+		Good examples are:
+		* Huawei E3131
+		* Huawei E3251
+
+		To compile this driver as a module, choose M here: the module will be
+		called huawei_cdc_ncm.ko.
+
 config USB_NET_CDC_MBIM
 	tristate "CDC MBIM support"
 	depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 8b342cf..b17b5e8 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -32,6 +32,7 @@
 obj-$(CONFIG_USB_SIERRA_NET)	+= sierra_net.o
 obj-$(CONFIG_USB_NET_CX82310_ETH)	+= cx82310_eth.o
 obj-$(CONFIG_USB_NET_CDC_NCM)	+= cdc_ncm.o
+obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM)	+= huawei_cdc_ncm.o
 obj-$(CONFIG_USB_VL600)		+= lg-vl600.o
 obj-$(CONFIG_USB_NET_QMI_WWAN)	+= qmi_wwan.o
 obj-$(CONFIG_USB_NET_CDC_MBIM)	+= cdc_mbim.o
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 8d5cac2..df507e6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -640,10 +640,10 @@
 {
 	struct catc *catc = netdev_priv(netdev);
 	struct netdev_hw_addr *ha;
-	u8 broadcast[6];
+	u8 broadcast[ETH_ALEN];
 	u8 rx = RxEnable | RxPolarity | RxMultiCast;
 
-	memset(broadcast, 0xff, 6);
+	memset(broadcast, 0xff, ETH_ALEN);
 	memset(catc->multicast, 0, 64);
 
 	catc_multicast(broadcast, catc->multicast);
@@ -778,7 +778,7 @@
 	struct usb_device *usbdev = interface_to_usbdev(intf);
 	struct net_device *netdev;
 	struct catc *catc;
-	u8 broadcast[6];
+	u8 broadcast[ETH_ALEN];
 	int i, pktsz;
 
 	if (usb_set_interface(usbdev,
@@ -882,7 +882,7 @@
 		
 		dev_dbg(dev, "Filling the multicast list.\n");
 	  
-		memset(broadcast, 0xff, 6);
+		memset(broadcast, 0xff, ETH_ALEN);
 		catc_multicast(broadcast, catc->multicast);
 		catc_multicast(netdev->dev_addr, catc->multicast);
 		catc_write_mem(catc, 0xfa80, catc->multicast, 64);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 7d78669..6358d42 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -328,7 +328,7 @@
 
 static struct usb_driver usbpn_driver;
 
-int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
 	static const char ifname[] = "usbpn%d";
 	const struct usb_cdc_union_desc *union_header = NULL;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 25ba7ec..c9f3281 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -21,6 +21,8 @@
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc-wdm.h>
 #include <linux/usb/cdc_ncm.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
 
 /* driver specific data - must match cdc_ncm usage */
 struct cdc_mbim_state {
@@ -42,13 +44,11 @@
 	if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
 		/* need autopm_get/put here to ensure the usbcore sees the new value */
 		rv = usb_autopm_get_interface(dev->intf);
-		if (rv < 0)
-			goto err;
 		dev->intf->needs_remote_wakeup = on;
-		usb_autopm_put_interface(dev->intf);
+		if (!rv)
+			usb_autopm_put_interface(dev->intf);
 	}
-err:
-	return rv;
+	return 0;
 }
 
 static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
@@ -173,7 +173,7 @@
 	}
 
 	spin_lock_bh(&ctx->mtx);
-	skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
+	skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
 	spin_unlock_bh(&ctx->mtx);
 	return skb_out;
 
@@ -184,6 +184,60 @@
 	return NULL;
 }
 
+/* Some devices are known to send Neigbor Solicitation messages and
+ * require Neigbor Advertisement replies.  The IPv6 core will not
+ * respond since IFF_NOARP is set, so we must handle them ourselves.
+ */
+static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
+{
+	struct ipv6hdr *iph = (void *)buf;
+	struct nd_msg *msg = (void *)(iph + 1);
+	struct net_device *netdev;
+	struct inet6_dev *in6_dev;
+	bool is_router;
+
+	/* we'll only respond to requests from unicast addresses to
+	 * our solicited node addresses.
+	 */
+	if (!ipv6_addr_is_solict_mult(&iph->daddr) ||
+	    !(ipv6_addr_type(&iph->saddr) & IPV6_ADDR_UNICAST))
+		return;
+
+	/* need to send the NA on the VLAN dev, if any */
+	if (tci)
+		netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
+					      tci);
+	else
+		netdev = dev->net;
+	if (!netdev)
+		return;
+
+	in6_dev = in6_dev_get(netdev);
+	if (!in6_dev)
+		return;
+	is_router = !!in6_dev->cnf.forwarding;
+	in6_dev_put(in6_dev);
+
+	/* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
+	ipv6_stub->ndisc_send_na(netdev, NULL, &iph->saddr, &msg->target,
+				 is_router /* router */,
+				 true /* solicited */,
+				 false /* override */,
+				 true /* inc_opt */);
+}
+
+static bool is_neigh_solicit(u8 *buf, size_t len)
+{
+	struct ipv6hdr *iph = (void *)buf;
+	struct nd_msg *msg = (void *)(iph + 1);
+
+	return (len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
+		iph->nexthdr == IPPROTO_ICMPV6 &&
+		msg->icmph.icmp6_code == 0 &&
+		msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION);
+}
+
+
 static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci)
 {
 	__be16 proto = htons(ETH_P_802_3);
@@ -198,6 +252,8 @@
 			proto = htons(ETH_P_IP);
 			break;
 		case 0x60:
+			if (is_neigh_solicit(buf, len))
+				do_neigh_solicit(dev, buf, tci);
 			proto = htons(ETH_P_IPV6);
 			break;
 		default:
@@ -313,15 +369,13 @@
 
 static int cdc_mbim_suspend(struct usb_interface *intf, pm_message_t message)
 {
-	int ret = 0;
+	int ret = -ENODEV;
 	struct usbnet *dev = usb_get_intfdata(intf);
 	struct cdc_mbim_state *info = (void *)&dev->data;
 	struct cdc_ncm_ctx *ctx = info->ctx;
 
-	if (ctx == NULL) {
-		ret = -1;
+	if (!ctx)
 		goto error;
-	}
 
 	/*
 	 * Both usbnet_suspend() and subdriver->suspend() MUST return 0
@@ -354,7 +408,7 @@
 	if (ret < 0)
 		goto err;
 	ret = usbnet_resume(intf);
-	if (ret < 0 && callsub && info->subdriver->suspend)
+	if (ret < 0 && callsub)
 		info->subdriver->suspend(intf, PMSG_SUSPEND);
 err:
 	return ret;
@@ -371,9 +425,18 @@
 };
 
 /* MBIM and NCM devices should not need a ZLP after NTBs with
- * dwNtbOutMaxSize length. This driver_info is for the exceptional
- * devices requiring it anyway, allowing them to be supported without
- * forcing the performance penalty on all the sane devices.
+ * dwNtbOutMaxSize length. Nevertheless, a number of devices from
+ * different vendor IDs will fail unless we send ZLPs, forcing us
+ * to make this the default.
+ *
+ * This default may cause a performance penalty for spec conforming
+ * devices wanting to take advantage of optimizations possible without
+ * ZLPs.  A whitelist is added in an attempt to avoid this for devices
+ * known to conform to the MBIM specification.
+ *
+ * All known devices supporting NCM compatibility mode are also
+ * conforming to the NCM and MBIM specifications. For this reason, the
+ * NCM subclass entry is also in the ZLP whitelist.
  */
 static const struct driver_info cdc_mbim_info_zlp = {
 	.description = "CDC MBIM",
@@ -396,17 +459,14 @@
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
 	  .driver_info = (unsigned long)&cdc_mbim_info,
 	},
-	/* Sierra Wireless MC7710 need ZLPs */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
-	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
-	},
-	/* HP hs2434 Mobile Broadband Module needs ZLPs */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
-	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
-	},
-	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+	/* ZLP conformance whitelist: All Ericsson MBIM devices */
+	{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
 	  .driver_info = (unsigned long)&cdc_mbim_info,
 	},
+	/* default entry */
+	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+	},
 	{
 	},
 };
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 43afde8..f74786a 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -53,8 +53,6 @@
 #include <linux/usb/cdc.h>
 #include <linux/usb/cdc_ncm.h>
 
-#define	DRIVER_VERSION				"14-Mar-2012"
-
 #if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
 static bool prefer_mbim = true;
 #else
@@ -68,71 +66,67 @@
 static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
 static struct usb_driver cdc_ncm_driver;
 
-static void
-cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+static u8 cdc_ncm_setup(struct usbnet *dev)
 {
-	struct usbnet *dev = netdev_priv(net);
-
-	strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
-	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
-	strlcpy(info->fw_version, dev->driver_info->description,
-		sizeof(info->fw_version));
-	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
-}
-
-static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
-{
+	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+	struct usb_cdc_ncm_ntb_parameters ncm_parm;
 	u32 val;
 	u8 flags;
 	u8 iface_no;
 	int err;
 	int eth_hlen;
 	u16 ntb_fmt_supported;
-	u32 min_dgram_size;
-	u32 min_hdr_size;
-	struct usbnet *dev = netdev_priv(ctx->netdev);
+	__le16 max_datagram_size;
 
 	iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
 
 	err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
 			      USB_TYPE_CLASS | USB_DIR_IN
 			      |USB_RECIP_INTERFACE,
-			      0, iface_no, &ctx->ncm_parm,
-			      sizeof(ctx->ncm_parm));
+			      0, iface_no, &ncm_parm,
+			      sizeof(ncm_parm));
 	if (err < 0) {
-		pr_debug("failed GET_NTB_PARAMETERS\n");
-		return 1;
+		dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
+		return err; /* GET_NTB_PARAMETERS is required */
 	}
 
 	/* read correct set of parameters according to device mode */
-	ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
-	ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
-	ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
-	ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
-	ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+	ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
+	ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
+	ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
+	ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
+	ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
 	/* devices prior to NCM Errata shall set this field to zero */
-	ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
-	ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
+	ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
+	ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
 
-	eth_hlen = ETH_HLEN;
-	min_dgram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-	min_hdr_size = CDC_NCM_MIN_HDR_SIZE;
-	if (ctx->mbim_desc != NULL) {
-		flags = ctx->mbim_desc->bmNetworkCapabilities;
+	/* there are some minor differences in NCM and MBIM defaults */
+	if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
+		if (!ctx->mbim_desc)
+			return -EINVAL;
 		eth_hlen = 0;
-		min_dgram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
-		min_hdr_size = 0;
-	} else if (ctx->func_desc != NULL) {
-		flags = ctx->func_desc->bmNetworkCapabilities;
+		flags = ctx->mbim_desc->bmNetworkCapabilities;
+		ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+		if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
+			ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
 	} else {
-		flags = 0;
+		if (!ctx->func_desc)
+			return -EINVAL;
+		eth_hlen = ETH_HLEN;
+		flags = ctx->func_desc->bmNetworkCapabilities;
+		ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+		if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+			ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
 	}
 
-	pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
-		 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
-		 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
-		 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
-		 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+	/* common absolute max for NCM and MBIM */
+	if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
+		ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+
+	dev_dbg(&dev->intf->dev,
+		"dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
+		ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
+		ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
 
 	/* max count of tx datagrams */
 	if ((ctx->tx_max_datagrams == 0) ||
@@ -141,19 +135,19 @@
 
 	/* verify maximum size of received NTB in bytes */
 	if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
-		pr_debug("Using min receive length=%d\n",
-						USB_CDC_NCM_NTB_MIN_IN_SIZE);
+		dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
+			USB_CDC_NCM_NTB_MIN_IN_SIZE);
 		ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
 	}
 
 	if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
-		pr_debug("Using default maximum receive length=%d\n",
-						CDC_NCM_NTB_MAX_SIZE_RX);
+		dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n",
+			CDC_NCM_NTB_MAX_SIZE_RX);
 		ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
 	}
 
 	/* inform device about NTB input size changes */
-	if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+	if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
 		__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
 
 		err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -161,16 +155,22 @@
 				       | USB_RECIP_INTERFACE,
 				       0, iface_no, &dwNtbInMaxSize, 4);
 		if (err < 0)
-			pr_debug("Setting NTB Input Size failed\n");
+			dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
 	}
 
 	/* verify maximum size of transmitted NTB in bytes */
-	if ((ctx->tx_max <
-	    (min_hdr_size + min_dgram_size)) ||
-	    (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
-		pr_debug("Using default maximum transmit length=%d\n",
-						CDC_NCM_NTB_MAX_SIZE_TX);
+	if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) {
+		dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
+			CDC_NCM_NTB_MAX_SIZE_TX);
 		ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+
+		/* Adding a pad byte here simplifies the handling in
+		 * cdc_ncm_fill_tx_frame, by making tx_max always
+		 * represent the real skb max size.
+		 */
+		if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+			ctx->tx_max++;
+
 	}
 
 	/*
@@ -183,7 +183,7 @@
 
 	if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
 	    (val != ((-val) & val)) || (val >= ctx->tx_max)) {
-		pr_debug("Using default alignment: 4 bytes\n");
+		dev_dbg(&dev->intf->dev, "Using default alignment: 4 bytes\n");
 		ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
 	}
 
@@ -197,13 +197,13 @@
 
 	if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
 	    (val != ((-val) & val)) || (val >= ctx->tx_max)) {
-		pr_debug("Using default transmit modulus: 4 bytes\n");
+		dev_dbg(&dev->intf->dev, "Using default transmit modulus: 4 bytes\n");
 		ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
 	}
 
 	/* verify the payload remainder */
 	if (ctx->tx_remainder >= ctx->tx_modulus) {
-		pr_debug("Using default transmit remainder: 0 bytes\n");
+		dev_dbg(&dev->intf->dev, "Using default transmit remainder: 0 bytes\n");
 		ctx->tx_remainder = 0;
 	}
 
@@ -221,7 +221,7 @@
 				       USB_CDC_NCM_CRC_NOT_APPENDED,
 				       iface_no, NULL, 0);
 		if (err < 0)
-			pr_debug("Setting CRC mode off failed\n");
+			dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
 	}
 
 	/* set NTB format, if both formats are supported */
@@ -232,69 +232,43 @@
 				       USB_CDC_NCM_NTB16_FORMAT,
 				       iface_no, NULL, 0);
 		if (err < 0)
-			pr_debug("Setting NTB format to 16-bit failed\n");
+			dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
 	}
 
-	ctx->max_datagram_size = min_dgram_size;
+	/* inform the device about the selected Max Datagram Size */
+	if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
+		goto out;
 
-	/* set Max Datagram Size (MTU) */
-	if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
-		__le16 max_datagram_size;
-		u16 eth_max_sz;
-		if (ctx->ether_desc != NULL)
-			eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-		else if (ctx->mbim_desc != NULL)
-			eth_max_sz = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
-		else
-			goto max_dgram_err;
-
-		err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
-				      USB_TYPE_CLASS | USB_DIR_IN
-				      | USB_RECIP_INTERFACE,
-				      0, iface_no, &max_datagram_size, 2);
-		if (err < 0) {
-			pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
-				 min_dgram_size);
-		} else {
-			ctx->max_datagram_size =
-				le16_to_cpu(max_datagram_size);
-			/* Check Eth descriptor value */
-			if (ctx->max_datagram_size > eth_max_sz)
-					ctx->max_datagram_size = eth_max_sz;
-
-			if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
-				ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
-
-			if (ctx->max_datagram_size < min_dgram_size)
-				ctx->max_datagram_size = min_dgram_size;
-
-			/* if value changed, update device */
-			if (ctx->max_datagram_size !=
-					le16_to_cpu(max_datagram_size)) {
-				err = usbnet_write_cmd(dev,
-						USB_CDC_SET_MAX_DATAGRAM_SIZE,
-						USB_TYPE_CLASS | USB_DIR_OUT
-						 | USB_RECIP_INTERFACE,
-						0,
-						iface_no, &max_datagram_size,
-						2);
-				if (err < 0)
-					pr_debug("SET_MAX_DGRAM_SIZE failed\n");
-			}
-		}
+	/* read current mtu value from device */
+	err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+			      USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+			      0, iface_no, &max_datagram_size, 2);
+	if (err < 0) {
+		dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+		goto out;
 	}
 
-max_dgram_err:
-	if (ctx->netdev->mtu != (ctx->max_datagram_size - eth_hlen))
-		ctx->netdev->mtu = ctx->max_datagram_size - eth_hlen;
+	if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
+		goto out;
 
+	max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+	err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+			       USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+			       0, iface_no, &max_datagram_size, 2);
+	if (err < 0)
+		dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+
+out:
+	/* set MTU to max supported by the device if necessary */
+	if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
+		dev->net->mtu = ctx->max_datagram_size - eth_hlen;
 	return 0;
 }
 
 static void
-cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
+cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
 {
-	struct usb_host_endpoint *e;
+	struct usb_host_endpoint *e, *in = NULL, *out = NULL;
 	u8 ep;
 
 	for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
@@ -303,18 +277,18 @@
 		switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
 		case USB_ENDPOINT_XFER_INT:
 			if (usb_endpoint_dir_in(&e->desc)) {
-				if (ctx->status_ep == NULL)
-					ctx->status_ep = e;
+				if (!dev->status)
+					dev->status = e;
 			}
 			break;
 
 		case USB_ENDPOINT_XFER_BULK:
 			if (usb_endpoint_dir_in(&e->desc)) {
-				if (ctx->in_ep == NULL)
-					ctx->in_ep = e;
+				if (!in)
+					in = e;
 			} else {
-				if (ctx->out_ep == NULL)
-					ctx->out_ep = e;
+				if (!out)
+					out = e;
 			}
 			break;
 
@@ -322,6 +296,14 @@
 			break;
 		}
 	}
+	if (in && !dev->in)
+		dev->in = usb_rcvbulkpipe(dev->udev,
+					  in->desc.bEndpointAddress &
+					  USB_ENDPOINT_NUMBER_MASK);
+	if (out && !dev->out)
+		dev->out = usb_sndbulkpipe(dev->udev,
+					   out->desc.bEndpointAddress &
+					   USB_ENDPOINT_NUMBER_MASK);
 }
 
 static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
@@ -342,18 +324,9 @@
 	kfree(ctx);
 }
 
-static const struct ethtool_ops cdc_ncm_ethtool_ops = {
-	.get_drvinfo = cdc_ncm_get_drvinfo,
-	.get_link = usbnet_get_link,
-	.get_msglevel = usbnet_get_msglevel,
-	.set_msglevel = usbnet_set_msglevel,
-	.get_settings = usbnet_get_settings,
-	.set_settings = usbnet_set_settings,
-	.nway_reset = usbnet_nway_reset,
-};
-
 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
 {
+	const struct usb_cdc_union_desc *union_desc = NULL;
 	struct cdc_ncm_ctx *ctx;
 	struct usb_driver *driver;
 	u8 *buf;
@@ -367,23 +340,22 @@
 
 	hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
-	ctx->bh.data = (unsigned long)ctx;
+	ctx->bh.data = (unsigned long)dev;
 	ctx->bh.func = cdc_ncm_txpath_bh;
 	atomic_set(&ctx->stop, 0);
 	spin_lock_init(&ctx->mtx);
-	ctx->netdev = dev->net;
 
 	/* store ctx pointer in device data field */
 	dev->data[0] = (unsigned long)ctx;
 
+	/* only the control interface can be successfully probed */
+	ctx->control = intf;
+
 	/* get some pointers */
 	driver = driver_of(intf);
 	buf = intf->cur_altsetting->extra;
 	len = intf->cur_altsetting->extralen;
 
-	ctx->udev = dev->udev;
-	ctx->intf = intf;
-
 	/* parse through descriptors associated with control interface */
 	while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
 
@@ -392,16 +364,18 @@
 
 		switch (buf[2]) {
 		case USB_CDC_UNION_TYPE:
-			if (buf[0] < sizeof(*(ctx->union_desc)))
+			if (buf[0] < sizeof(*union_desc))
 				break;
 
-			ctx->union_desc =
-					(const struct usb_cdc_union_desc *)buf;
-
-			ctx->control = usb_ifnum_to_if(dev->udev,
-					ctx->union_desc->bMasterInterface0);
+			union_desc = (const struct usb_cdc_union_desc *)buf;
+			/* the master must be the interface we are probing */
+			if (intf->cur_altsetting->desc.bInterfaceNumber !=
+			    union_desc->bMasterInterface0) {
+				dev_dbg(&intf->dev, "bogus CDC Union\n");
+				goto error;
+			}
 			ctx->data = usb_ifnum_to_if(dev->udev,
-					ctx->union_desc->bSlaveInterface0);
+						    union_desc->bSlaveInterface0);
 			break;
 
 		case USB_CDC_ETHERNET_TYPE:
@@ -410,13 +384,6 @@
 
 			ctx->ether_desc =
 					(const struct usb_cdc_ether_desc *)buf;
-			dev->hard_mtu =
-				le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-
-			if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
-				dev->hard_mtu =	CDC_NCM_MIN_DATAGRAM_SIZE;
-			else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
-				dev->hard_mtu =	CDC_NCM_MAX_DATAGRAM_SIZE;
 			break;
 
 		case USB_CDC_NCM_TYPE:
@@ -444,69 +411,71 @@
 	}
 
 	/* some buggy devices have an IAD but no CDC Union */
-	if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
-		ctx->control = intf;
+	if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
 		ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
 		dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
 	}
 
 	/* check if we got everything */
-	if ((ctx->control == NULL) || (ctx->data == NULL) ||
-	    ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
+	if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) {
+		dev_dbg(&intf->dev, "CDC descriptors missing\n");
 		goto error;
+	}
 
 	/* claim data interface, if different from control */
 	if (ctx->data != ctx->control) {
 		temp = usb_driver_claim_interface(driver, ctx->data, dev);
-		if (temp)
+		if (temp) {
+			dev_dbg(&intf->dev, "failed to claim data intf\n");
 			goto error;
+		}
 	}
 
 	iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
 
 	/* reset data interface */
 	temp = usb_set_interface(dev->udev, iface_no, 0);
-	if (temp)
+	if (temp) {
+		dev_dbg(&intf->dev, "set interface failed\n");
 		goto error2;
-
-	/* initialize data interface */
-	if (cdc_ncm_setup(ctx))
-		goto error2;
+	}
 
 	/* configure data interface */
 	temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
-	if (temp)
+	if (temp) {
+		dev_dbg(&intf->dev, "set interface failed\n");
 		goto error2;
+	}
 
-	cdc_ncm_find_endpoints(ctx, ctx->data);
-	cdc_ncm_find_endpoints(ctx, ctx->control);
-
-	if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
-	    (ctx->status_ep == NULL))
+	cdc_ncm_find_endpoints(dev, ctx->data);
+	cdc_ncm_find_endpoints(dev, ctx->control);
+	if (!dev->in || !dev->out || !dev->status) {
+		dev_dbg(&intf->dev, "failed to collect endpoints\n");
 		goto error2;
+	}
 
-	dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+	/* initialize data interface */
+	if (cdc_ncm_setup(dev))	{
+		dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
+		goto error2;
+	}
 
 	usb_set_intfdata(ctx->data, dev);
 	usb_set_intfdata(ctx->control, dev);
-	usb_set_intfdata(ctx->intf, dev);
 
 	if (ctx->ether_desc) {
 		temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
-		if (temp)
+		if (temp) {
+			dev_dbg(&intf->dev, "failed to get mac address\n");
 			goto error2;
-		dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
+		}
+		dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
 	}
 
-
-	dev->in = usb_rcvbulkpipe(dev->udev,
-		ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
-	dev->out = usb_sndbulkpipe(dev->udev,
-		ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
-	dev->status = ctx->status_ep;
+	/* usbnet use these values for sizing tx/rx queues */
+	dev->hard_mtu = ctx->tx_max;
 	dev->rx_urb_size = ctx->rx_max;
 
-	ctx->tx_speed = ctx->rx_speed = 0;
 	return 0;
 
 error2:
@@ -517,7 +486,7 @@
 error:
 	cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
 	dev->data[0] = 0;
-	dev_info(&dev->udev->dev, "bind() failure\n");
+	dev_info(&intf->dev, "bind() failure\n");
 	return -ENODEV;
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
@@ -553,7 +522,7 @@
 		ctx->control = NULL;
 	}
 
-	usb_set_intfdata(ctx->intf, NULL);
+	usb_set_intfdata(intf, NULL);
 	cdc_ncm_free(ctx);
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
@@ -662,8 +631,9 @@
 }
 
 struct sk_buff *
-cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
+cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
 {
+	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
 	struct usb_cdc_ncm_nth16 *nth16;
 	struct usb_cdc_ncm_ndp16 *ndp16;
 	struct sk_buff *skb_out;
@@ -683,11 +653,11 @@
 
 	/* allocate a new OUT skb */
 	if (!skb_out) {
-		skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
+		skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
 		if (skb_out == NULL) {
 			if (skb != NULL) {
 				dev_kfree_skb_any(skb);
-				ctx->netdev->stats.tx_dropped++;
+				dev->net->stats.tx_dropped++;
 			}
 			goto exit_no_skb;
 		}
@@ -725,12 +695,12 @@
 				/* won't fit, MTU problem? */
 				dev_kfree_skb_any(skb);
 				skb = NULL;
-				ctx->netdev->stats.tx_dropped++;
+				dev->net->stats.tx_dropped++;
 			} else {
 				/* no room for skb - store for later */
 				if (ctx->tx_rem_skb != NULL) {
 					dev_kfree_skb_any(ctx->tx_rem_skb);
-					ctx->netdev->stats.tx_dropped++;
+					dev->net->stats.tx_dropped++;
 				}
 				ctx->tx_rem_skb = skb;
 				ctx->tx_rem_sign = sign;
@@ -763,7 +733,7 @@
 	if (skb != NULL) {
 		dev_kfree_skb_any(skb);
 		skb = NULL;
-		ctx->netdev->stats.tx_dropped++;
+		dev->net->stats.tx_dropped++;
 	}
 
 	ctx->tx_curr_frame_num = n;
@@ -788,19 +758,20 @@
 		/* variables will be reset at next call */
 	}
 
-	/*
-	 * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
-	 * we send buffers as it is. If we get more data, it would be more
-	 * efficient for USB HS mobile device with DMA engine to receive a full
-	 * size NTB, than canceling DMA transfer and receiving a short packet.
+	/* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
+	 * bytes, we send buffers as it is. If we get more data, it
+	 * would be more efficient for USB HS mobile device with DMA
+	 * engine to receive a full size NTB, than canceling DMA
+	 * transfer and receiving a short packet.
+	 *
+	 * This optimization support is pointless if we end up sending
+	 * a ZLP after full sized NTBs.
 	 */
-	if (skb_out->len > CDC_NCM_MIN_TX_PKT)
-		/* final zero padding */
-		memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len);
-
-	/* do we need to prevent a ZLP? */
-	if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) &&
-	    (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out))
+	if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
+	    skb_out->len > CDC_NCM_MIN_TX_PKT)
+		memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
+		       ctx->tx_max - skb_out->len);
+	else if ((skb_out->len % dev->maxpacket) == 0)
 		*skb_put(skb_out, 1) = 0;	/* force short packet */
 
 	/* set final frame length */
@@ -809,7 +780,7 @@
 
 	/* return skb */
 	ctx->tx_curr_skb = NULL;
-	ctx->netdev->stats.tx_packets += ctx->tx_curr_frame_num;
+	dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
 	return skb_out;
 
 exit_no_skb:
@@ -841,24 +812,25 @@
 
 static void cdc_ncm_txpath_bh(unsigned long param)
 {
-	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)param;
+	struct usbnet *dev = (struct usbnet *)param;
+	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
 
 	spin_lock_bh(&ctx->mtx);
 	if (ctx->tx_timer_pending != 0) {
 		ctx->tx_timer_pending--;
 		cdc_ncm_tx_timeout_start(ctx);
 		spin_unlock_bh(&ctx->mtx);
-	} else if (ctx->netdev != NULL) {
+	} else if (dev->net != NULL) {
 		spin_unlock_bh(&ctx->mtx);
-		netif_tx_lock_bh(ctx->netdev);
-		usbnet_start_xmit(NULL, ctx->netdev);
-		netif_tx_unlock_bh(ctx->netdev);
+		netif_tx_lock_bh(dev->net);
+		usbnet_start_xmit(NULL, dev->net);
+		netif_tx_unlock_bh(dev->net);
 	} else {
 		spin_unlock_bh(&ctx->mtx);
 	}
 }
 
-static struct sk_buff *
+struct sk_buff *
 cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
 	struct sk_buff *skb_out;
@@ -875,7 +847,7 @@
 		goto error;
 
 	spin_lock_bh(&ctx->mtx);
-	skb_out = cdc_ncm_fill_tx_frame(ctx, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+	skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
 	spin_unlock_bh(&ctx->mtx);
 	return skb_out;
 
@@ -885,10 +857,12 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(cdc_ncm_tx_fixup);
 
 /* verify NTB header and return offset of first NDP, or negative error */
 int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
 {
+	struct usbnet *dev = netdev_priv(skb_in->dev);
 	struct usb_cdc_ncm_nth16 *nth16;
 	int len;
 	int ret = -EINVAL;
@@ -898,30 +872,33 @@
 
 	if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) +
 					sizeof(struct usb_cdc_ncm_ndp16))) {
-		pr_debug("frame too short\n");
+		netif_dbg(dev, rx_err, dev->net, "frame too short\n");
 		goto error;
 	}
 
 	nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data;
 
-	if (le32_to_cpu(nth16->dwSignature) != USB_CDC_NCM_NTH16_SIGN) {
-		pr_debug("invalid NTH16 signature <%u>\n",
-					le32_to_cpu(nth16->dwSignature));
+	if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
+		netif_dbg(dev, rx_err, dev->net,
+			  "invalid NTH16 signature <%#010x>\n",
+			  le32_to_cpu(nth16->dwSignature));
 		goto error;
 	}
 
 	len = le16_to_cpu(nth16->wBlockLength);
 	if (len > ctx->rx_max) {
-		pr_debug("unsupported NTB block length %u/%u\n", len,
-								ctx->rx_max);
+		netif_dbg(dev, rx_err, dev->net,
+			  "unsupported NTB block length %u/%u\n", len,
+			  ctx->rx_max);
 		goto error;
 	}
 
 	if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) &&
-		(ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
-		!((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
-		pr_debug("sequence number glitch prev=%d curr=%d\n",
-				ctx->rx_seq, le16_to_cpu(nth16->wSequence));
+	    (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+	    !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
+		netif_dbg(dev, rx_err, dev->net,
+			  "sequence number glitch prev=%d curr=%d\n",
+			  ctx->rx_seq, le16_to_cpu(nth16->wSequence));
 	}
 	ctx->rx_seq = le16_to_cpu(nth16->wSequence);
 
@@ -934,18 +911,20 @@
 /* verify NDP header and return number of datagrams, or negative error */
 int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
 {
+	struct usbnet *dev = netdev_priv(skb_in->dev);
 	struct usb_cdc_ncm_ndp16 *ndp16;
 	int ret = -EINVAL;
 
 	if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
-		pr_debug("invalid NDP offset  <%u>\n", ndpoffset);
+		netif_dbg(dev, rx_err, dev->net, "invalid NDP offset  <%u>\n",
+			  ndpoffset);
 		goto error;
 	}
 	ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
 
 	if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
-		pr_debug("invalid DPT16 length <%u>\n",
-					le32_to_cpu(ndp16->dwSignature));
+		netif_dbg(dev, rx_err, dev->net, "invalid DPT16 length <%u>\n",
+			  le16_to_cpu(ndp16->wLength));
 		goto error;
 	}
 
@@ -954,9 +933,9 @@
 					sizeof(struct usb_cdc_ncm_dpe16));
 	ret--; /* we process NDP entries except for the last one */
 
-	if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) >
-								skb_in->len) {
-		pr_debug("Invalid nframes = %d\n", ret);
+	if ((sizeof(struct usb_cdc_ncm_ndp16) +
+	     ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) {
+		netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
 		ret = -EINVAL;
 	}
 
@@ -965,7 +944,7 @@
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
 
-static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
 {
 	struct sk_buff *skb;
 	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -989,9 +968,10 @@
 
 	ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
 
-	if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
-		pr_debug("invalid DPT16 signature <%u>\n",
-			 le32_to_cpu(ndp16->dwSignature));
+	if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
+		netif_dbg(dev, rx_err, dev->net,
+			  "invalid DPT16 signature <%#010x>\n",
+			  le32_to_cpu(ndp16->dwSignature));
 		goto err_ndp;
 	}
 	dpe16 = ndp16->dpe16;
@@ -1013,9 +993,9 @@
 		/* sanity checking */
 		if (((offset + len) > skb_in->len) ||
 				(len > ctx->rx_max) || (len < ETH_HLEN)) {
-			pr_debug("invalid frame detected (ignored)"
-					"offset[%u]=%u, length=%u, skb=%p\n",
-					x, offset, len, skb_in);
+			netif_dbg(dev, rx_err, dev->net,
+				  "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
+				  x, offset, len, skb_in);
 			if (!x)
 				goto err_ndp;
 			break;
@@ -1040,9 +1020,10 @@
 error:
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_fixup);
 
 static void
-cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
+cdc_ncm_speed_change(struct usbnet *dev,
 		     struct usb_cdc_speed_change *data)
 {
 	uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
@@ -1052,25 +1033,16 @@
 	 * Currently the USB-NET API does not support reporting the actual
 	 * device speed. Do print it instead.
 	 */
-	if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
-		ctx->tx_speed = tx_speed;
-		ctx->rx_speed = rx_speed;
-
-		if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
-			printk(KERN_INFO KBUILD_MODNAME
-				": %s: %u mbit/s downlink "
-				"%u mbit/s uplink\n",
-				ctx->netdev->name,
-				(unsigned int)(rx_speed / 1000000U),
-				(unsigned int)(tx_speed / 1000000U));
-		} else {
-			printk(KERN_INFO KBUILD_MODNAME
-				": %s: %u kbit/s downlink "
-				"%u kbit/s uplink\n",
-				ctx->netdev->name,
-				(unsigned int)(rx_speed / 1000U),
-				(unsigned int)(tx_speed / 1000U));
-		}
+	if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
+		netif_info(dev, link, dev->net,
+		       "%u mbit/s downlink %u mbit/s uplink\n",
+		       (unsigned int)(rx_speed / 1000000U),
+		       (unsigned int)(tx_speed / 1000000U));
+	} else {
+		netif_info(dev, link, dev->net,
+		       "%u kbit/s downlink %u kbit/s uplink\n",
+		       (unsigned int)(rx_speed / 1000U),
+		       (unsigned int)(tx_speed / 1000U));
 	}
 }
 
@@ -1086,7 +1058,7 @@
 
 	/* test for split data in 8-byte chunks */
 	if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
-		cdc_ncm_speed_change(ctx,
+		cdc_ncm_speed_change(dev,
 		      (struct usb_cdc_speed_change *)urb->transfer_buffer);
 		return;
 	}
@@ -1101,14 +1073,10 @@
 		 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
 		 */
 		ctx->connected = le16_to_cpu(event->wValue);
-
-		printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
-			" %sconnected\n",
-			ctx->netdev->name, ctx->connected ? "" : "dis");
-
+		netif_info(dev, link, dev->net,
+			   "network connection: %sconnected\n",
+			   ctx->connected ? "" : "dis");
 		usbnet_link_change(dev, ctx->connected, 0);
-		if (!ctx->connected)
-			ctx->tx_speed = ctx->rx_speed = 0;
 		break;
 
 	case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1116,8 +1084,8 @@
 					sizeof(struct usb_cdc_speed_change)))
 			set_bit(EVENT_STS_SPLIT, &dev->flags);
 		else
-			cdc_ncm_speed_change(ctx,
-				(struct usb_cdc_speed_change *) &event[1]);
+			cdc_ncm_speed_change(dev,
+					     (struct usb_cdc_speed_change *)&event[1]);
 		break;
 
 	default:
@@ -1139,22 +1107,6 @@
 	return !ctx->connected;
 }
 
-static int
-cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
-{
-	return usbnet_probe(udev, prod);
-}
-
-static void cdc_ncm_disconnect(struct usb_interface *intf)
-{
-	struct usbnet *dev = usb_get_intfdata(intf);
-
-	if (dev == NULL)
-		return;		/* already disconnected */
-
-	usbnet_disconnect(intf);
-}
-
 static const struct driver_info cdc_ncm_info = {
 	.description = "CDC NCM",
 	.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
@@ -1234,17 +1186,6 @@
 	  .driver_info = (unsigned long)&wwan_info,
 	},
 
-	/* Huawei NCM devices disguised as vendor specific */
-	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
-	  .driver_info = (unsigned long)&wwan_info,
-	},
-	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
-	  .driver_info = (unsigned long)&wwan_info,
-	},
-	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
-	  .driver_info = (unsigned long)&wwan_info,
-	},
-
 	/* Infineon(now Intel) HSPA Modem platform */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
 		USB_CLASS_COMM,
@@ -1265,8 +1206,8 @@
 static struct usb_driver cdc_ncm_driver = {
 	.name = "cdc_ncm",
 	.id_table = cdc_devs,
-	.probe = cdc_ncm_probe,
-	.disconnect = cdc_ncm_disconnect,
+	.probe = usbnet_probe,
+	.disconnect = usbnet_disconnect,
 	.suspend = usbnet_suspend,
 	.resume = usbnet_resume,
 	.reset_resume =	usbnet_resume,
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
new file mode 100644
index 0000000..312178d
--- /dev/null
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -0,0 +1,230 @@
+/* huawei_cdc_ncm.c - handles Huawei devices using the CDC NCM protocol as
+ * transport layer.
+ * Copyright (C) 2013	 Enrico Mioso <mrkiko.rs@gmail.com>
+ *
+ *
+ * ABSTRACT:
+ * This driver handles devices resembling the CDC NCM standard, but
+ * encapsulating another protocol inside it. An example are some Huawei 3G
+ * devices, exposing an embedded AT channel where you can set up the NCM
+ * connection.
+ * This code has been heavily inspired by the cdc_mbim.c driver, which is
+ * Copyright (c) 2012  Smith Micro Software, Inc.
+ * Copyright (c) 2012  Bjørn Mork <bjorn@mork.no>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc-wdm.h>
+#include <linux/usb/cdc_ncm.h>
+
+/* Driver data */
+struct huawei_cdc_ncm_state {
+	struct cdc_ncm_ctx *ctx;
+	atomic_t pmcount;
+	struct usb_driver *subdriver;
+	struct usb_interface *control;
+	struct usb_interface *data;
+};
+
+static int huawei_cdc_ncm_manage_power(struct usbnet *usbnet_dev, int on)
+{
+	struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+	int rv;
+
+	if ((on && atomic_add_return(1, &drvstate->pmcount) == 1) ||
+			(!on && atomic_dec_and_test(&drvstate->pmcount))) {
+		rv = usb_autopm_get_interface(usbnet_dev->intf);
+		usbnet_dev->intf->needs_remote_wakeup = on;
+		if (!rv)
+			usb_autopm_put_interface(usbnet_dev->intf);
+	}
+	return 0;
+}
+
+static int huawei_cdc_ncm_wdm_manage_power(struct usb_interface *intf,
+					   int status)
+{
+	struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+
+	/* can be called while disconnecting */
+	if (!usbnet_dev)
+		return 0;
+
+	return huawei_cdc_ncm_manage_power(usbnet_dev, status);
+}
+
+
+static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
+			       struct usb_interface *intf)
+{
+	struct cdc_ncm_ctx *ctx;
+	struct usb_driver *subdriver = ERR_PTR(-ENODEV);
+	int ret = -ENODEV;
+	struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+
+	/* altsetting should always be 1 for NCM devices - so we hard-coded
+	 * it here
+	 */
+	ret = cdc_ncm_bind_common(usbnet_dev, intf, 1);
+	if (ret)
+		goto err;
+
+	ctx = drvstate->ctx;
+
+	if (usbnet_dev->status)
+		/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
+		 * decimal (0x100)"
+		 */
+		subdriver = usb_cdc_wdm_register(ctx->control,
+						 &usbnet_dev->status->desc,
+						 256, /* wMaxCommand */
+						 huawei_cdc_ncm_wdm_manage_power);
+	if (IS_ERR(subdriver)) {
+		ret = PTR_ERR(subdriver);
+		cdc_ncm_unbind(usbnet_dev, intf);
+		goto err;
+	}
+
+	/* Prevent usbnet from using the status descriptor */
+	usbnet_dev->status = NULL;
+
+	drvstate->subdriver = subdriver;
+
+err:
+	return ret;
+}
+
+static void huawei_cdc_ncm_unbind(struct usbnet *usbnet_dev,
+				  struct usb_interface *intf)
+{
+	struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+	struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+	if (drvstate->subdriver && drvstate->subdriver->disconnect)
+		drvstate->subdriver->disconnect(ctx->control);
+	drvstate->subdriver = NULL;
+
+	cdc_ncm_unbind(usbnet_dev, intf);
+}
+
+static int huawei_cdc_ncm_suspend(struct usb_interface *intf,
+				  pm_message_t message)
+{
+	int ret = 0;
+	struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+	struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+	struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+	if (ctx == NULL) {
+		ret = -ENODEV;
+		goto error;
+	}
+
+	ret = usbnet_suspend(intf, message);
+	if (ret < 0)
+		goto error;
+
+	if (intf == ctx->control &&
+		drvstate->subdriver &&
+		drvstate->subdriver->suspend)
+		ret = drvstate->subdriver->suspend(intf, message);
+	if (ret < 0)
+		usbnet_resume(intf);
+
+error:
+	return ret;
+}
+
+static int huawei_cdc_ncm_resume(struct usb_interface *intf)
+{
+	int ret = 0;
+	struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+	struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+	bool callsub;
+	struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+	/* should we call subdriver's resume function? */
+	callsub =
+		(intf == ctx->control &&
+		drvstate->subdriver &&
+		drvstate->subdriver->resume);
+
+	if (callsub)
+		ret = drvstate->subdriver->resume(intf);
+	if (ret < 0)
+		goto err;
+	ret = usbnet_resume(intf);
+	if (ret < 0 && callsub)
+		drvstate->subdriver->suspend(intf, PMSG_SUSPEND);
+err:
+	return ret;
+}
+
+static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
+{
+	struct cdc_ncm_ctx *ctx;
+
+	ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
+
+	if (ctx == NULL)
+		return 1; /* disconnected */
+
+	return !ctx->connected;
+}
+
+static const struct driver_info huawei_cdc_ncm_info = {
+	.description = "Huawei CDC NCM device",
+	.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+	.bind = huawei_cdc_ncm_bind,
+	.unbind = huawei_cdc_ncm_unbind,
+	.check_connect = huawei_cdc_ncm_check_connect,
+	.manage_power = huawei_cdc_ncm_manage_power,
+	.rx_fixup = cdc_ncm_rx_fixup,
+	.tx_fixup = cdc_ncm_tx_fixup,
+};
+
+static const struct usb_device_id huawei_cdc_ncm_devs[] = {
+	/* Huawei NCM devices disguised as vendor specific */
+	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
+	  .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+	},
+	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
+	  .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+	},
+	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+	  .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+	},
+
+	/* Terminating entry */
+	{
+	},
+};
+MODULE_DEVICE_TABLE(usb, huawei_cdc_ncm_devs);
+
+static struct usb_driver huawei_cdc_ncm_driver = {
+	.name = "huawei_cdc_ncm",
+	.id_table = huawei_cdc_ncm_devs,
+	.probe = usbnet_probe,
+	.disconnect = usbnet_disconnect,
+	.suspend = huawei_cdc_ncm_suspend,
+	.resume = huawei_cdc_ncm_resume,
+	.reset_resume = huawei_cdc_ncm_resume,
+	.supports_autosuspend = 1,
+	.disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(huawei_cdc_ncm_driver);
+MODULE_AUTHOR("Enrico Mioso <mrkiko.rs@gmail.com>");
+MODULE_DESCRIPTION("USB CDC NCM host driver with encapsulated protocol support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 818ce90..23bdd5b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -143,24 +143,28 @@
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
-/* using a counter to merge subdriver requests with our own into a combined state */
+/* using a counter to merge subdriver requests with our own into a
+ * combined state
+ */
 static int qmi_wwan_manage_power(struct usbnet *dev, int on)
 {
 	struct qmi_wwan_state *info = (void *)&dev->data;
-	int rv = 0;
+	int rv;
 
-	dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+	dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__,
+		atomic_read(&info->pmcount), on);
 
-	if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
-		/* need autopm_get/put here to ensure the usbcore sees the new value */
+	if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
+	    (!on && atomic_dec_and_test(&info->pmcount))) {
+		/* need autopm_get/put here to ensure the usbcore sees
+		 * the new value
+		 */
 		rv = usb_autopm_get_interface(dev->intf);
-		if (rv < 0)
-			goto err;
 		dev->intf->needs_remote_wakeup = on;
-		usb_autopm_put_interface(dev->intf);
+		if (!rv)
+			usb_autopm_put_interface(dev->intf);
 	}
-err:
-	return rv;
+	return 0;
 }
 
 static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
@@ -199,7 +203,8 @@
 	atomic_set(&info->pmcount, 0);
 
 	/* register subdriver */
-	subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
+	subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
+					 4096, &qmi_wwan_cdc_wdm_manage_power);
 	if (IS_ERR(subdriver)) {
 		dev_err(&info->control->dev, "subdriver registration failed\n");
 		rv = PTR_ERR(subdriver);
@@ -228,7 +233,8 @@
 	struct usb_driver *driver = driver_of(intf);
 	struct qmi_wwan_state *info = (void *)&dev->data;
 
-	BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
+	BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) <
+		      sizeof(struct qmi_wwan_state)));
 
 	/* set up initial state */
 	info->control = intf;
@@ -250,7 +256,8 @@
 				goto err;
 			}
 			if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
-				dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
+				dev_dbg(&intf->dev, "CDC header len %u\n",
+					h->bLength);
 				goto err;
 			}
 			break;
@@ -260,7 +267,8 @@
 				goto err;
 			}
 			if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
-				dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
+				dev_dbg(&intf->dev, "CDC union len %u\n",
+					h->bLength);
 				goto err;
 			}
 			cdc_union = (struct usb_cdc_union_desc *)buf;
@@ -271,15 +279,15 @@
 				goto err;
 			}
 			if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
-				dev_dbg(&intf->dev, "CDC ether len %u\n",  h->bLength);
+				dev_dbg(&intf->dev, "CDC ether len %u\n",
+					h->bLength);
 				goto err;
 			}
 			cdc_ether = (struct usb_cdc_ether_desc *)buf;
 			break;
 		}
 
-		/*
-		 * Remember which CDC functional descriptors we've seen.  Works
+		/* Remember which CDC functional descriptors we've seen.  Works
 		 * for all types we care about, of which USB_CDC_ETHERNET_TYPE
 		 * (0x0f) is the highest numbered
 		 */
@@ -293,10 +301,14 @@
 
 	/* Use separate control and data interfaces if we found a CDC Union */
 	if (cdc_union) {
-		info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
-		if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
-			dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
-				cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
+		info->data = usb_ifnum_to_if(dev->udev,
+					     cdc_union->bSlaveInterface0);
+		if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 ||
+		    !info->data) {
+			dev_err(&intf->dev,
+				"bogus CDC Union: master=%u, slave=%u\n",
+				cdc_union->bMasterInterface0,
+				cdc_union->bSlaveInterface0);
 			goto err;
 		}
 	}
@@ -374,8 +386,7 @@
 	struct qmi_wwan_state *info = (void *)&dev->data;
 	int ret;
 
-	/*
-	 * Both usbnet_suspend() and subdriver->suspend() MUST return 0
+	/* Both usbnet_suspend() and subdriver->suspend() MUST return 0
 	 * in system sleep context, otherwise, the resume callback has
 	 * to recover device from previous suspend failure.
 	 */
@@ -383,7 +394,8 @@
 	if (ret < 0)
 		goto err;
 
-	if (intf == info->control && info->subdriver && info->subdriver->suspend)
+	if (intf == info->control && info->subdriver &&
+	    info->subdriver->suspend)
 		ret = info->subdriver->suspend(intf, message);
 	if (ret < 0)
 		usbnet_resume(intf);
@@ -396,14 +408,15 @@
 	struct usbnet *dev = usb_get_intfdata(intf);
 	struct qmi_wwan_state *info = (void *)&dev->data;
 	int ret = 0;
-	bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
+	bool callsub = (intf == info->control && info->subdriver &&
+			info->subdriver->resume);
 
 	if (callsub)
 		ret = info->subdriver->resume(intf);
 	if (ret < 0)
 		goto err;
 	ret = usbnet_resume(intf);
-	if (ret < 0 && callsub && info->subdriver->suspend)
+	if (ret < 0 && callsub)
 		info->subdriver->suspend(intf, PMSG_SUSPEND);
 err:
 	return ret;
@@ -714,6 +727,7 @@
 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+	{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},	/* Telit LE920 */
 	{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
 	{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},	/* Cinterion PLxx */
 
@@ -777,7 +791,8 @@
 };
 MODULE_DEVICE_TABLE(usb, products);
 
-static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+static int qmi_wwan_probe(struct usb_interface *intf,
+			  const struct usb_device_id *prod)
 {
 	struct usb_device_id *id = (struct usb_device_id *)prod;
 
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index eee1f19..2ec2041 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -188,6 +188,11 @@
 	return tot;
 }
 
+/* fake multicast ability */
+static void veth_set_multicast_list(struct net_device *dev)
+{
+}
+
 static int veth_open(struct net_device *dev)
 {
 	struct veth_priv *priv = netdev_priv(dev);
@@ -230,10 +235,18 @@
 
 static int veth_dev_init(struct net_device *dev)
 {
+	int i;
+
 	dev->vstats = alloc_percpu(struct pcpu_vstats);
 	if (!dev->vstats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct pcpu_vstats *veth_stats;
+		veth_stats = per_cpu_ptr(dev->vstats, i);
+		u64_stats_init(&veth_stats->syncp);
+	}
+
 	return 0;
 }
 
@@ -250,11 +263,14 @@
 	.ndo_start_xmit      = veth_xmit,
 	.ndo_change_mtu      = veth_change_mtu,
 	.ndo_get_stats64     = veth_get_stats64,
+	.ndo_set_rx_mode     = veth_set_multicast_list,
 	.ndo_set_mac_address = eth_mac_addr,
 };
 
 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
 		       NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+		       NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |	    \
+		       NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO	|   \
 		       NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
 		       NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
 
@@ -273,6 +289,7 @@
 	dev->destructor = veth_dev_free;
 
 	dev->hw_features = VETH_FEATURES;
+	dev->hw_enc_features = VETH_FEATURES;
 }
 
 /*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bbc9cb8..cdc7c90 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -124,12 +124,14 @@
 	/* Lock for config space updates */
 	struct mutex config_lock;
 
+	/* Page_frag for GFP_KERNEL packet buffer allocation when we run
+	 * low on memory.
+	 */
+	struct page_frag alloc_frag;
+
 	/* Does the affinity hint is set for virtqueues? */
 	bool affinity_hint_set;
 
-	/* Per-cpu variable to show the mapping from CPU to virtqueue */
-	int __percpu *vq_index;
-
 	/* CPU hot plug notifier */
 	struct notifier_block nb;
 };
@@ -217,33 +219,18 @@
 	netif_wake_subqueue(vi->dev, vq2txq(vq));
 }
 
-static void set_skb_frag(struct sk_buff *skb, struct page *page,
-			 unsigned int offset, unsigned int *len)
-{
-	int size = min((unsigned)PAGE_SIZE - offset, *len);
-	int i = skb_shinfo(skb)->nr_frags;
-
-	__skb_fill_page_desc(skb, i, page, offset, size);
-
-	skb->data_len += size;
-	skb->len += size;
-	skb->truesize += PAGE_SIZE;
-	skb_shinfo(skb)->nr_frags++;
-	skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
-	*len -= size;
-}
-
 /* Called from bottom half context */
 static struct sk_buff *page_to_skb(struct receive_queue *rq,
-				   struct page *page, unsigned int len)
+				   struct page *page, unsigned int offset,
+				   unsigned int len, unsigned int truesize)
 {
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	struct sk_buff *skb;
 	struct skb_vnet_hdr *hdr;
-	unsigned int copy, hdr_len, offset;
+	unsigned int copy, hdr_len, hdr_padded_len;
 	char *p;
 
-	p = page_address(page);
+	p = page_address(page) + offset;
 
 	/* copy small packet so we can reuse these pages for small data */
 	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
@@ -254,16 +241,17 @@
 
 	if (vi->mergeable_rx_bufs) {
 		hdr_len = sizeof hdr->mhdr;
-		offset = hdr_len;
+		hdr_padded_len = sizeof hdr->mhdr;
 	} else {
 		hdr_len = sizeof hdr->hdr;
-		offset = sizeof(struct padded_vnet_hdr);
+		hdr_padded_len = sizeof(struct padded_vnet_hdr);
 	}
 
 	memcpy(hdr, p, hdr_len);
 
 	len -= hdr_len;
-	p += offset;
+	offset += hdr_padded_len;
+	p += hdr_padded_len;
 
 	copy = len;
 	if (copy > skb_tailroom(skb))
@@ -273,6 +261,14 @@
 	len -= copy;
 	offset += copy;
 
+	if (vi->mergeable_rx_bufs) {
+		if (len)
+			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
+		else
+			put_page(page);
+		return skb;
+	}
+
 	/*
 	 * Verify that we can indeed put this data into a skb.
 	 * This is here to handle cases when the device erroneously
@@ -284,9 +280,12 @@
 		dev_kfree_skb(skb);
 		return NULL;
 	}
-
+	BUG_ON(offset >= PAGE_SIZE);
 	while (len) {
-		set_skb_frag(skb, page, offset, &len);
+		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
+				frag_size, truesize);
+		len -= frag_size;
 		page = (struct page *)page->private;
 		offset = 0;
 	}
@@ -297,33 +296,59 @@
 	return skb;
 }
 
-static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
+static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
 {
-	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+	struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
+	struct sk_buff *curr_skb = head_skb;
+	char *buf;
 	struct page *page;
-	int num_buf, i, len;
+	int num_buf, len, offset;
 
 	num_buf = hdr->mhdr.num_buffers;
 	while (--num_buf) {
-		i = skb_shinfo(skb)->nr_frags;
-		if (i >= MAX_SKB_FRAGS) {
-			pr_debug("%s: packet too long\n", skb->dev->name);
-			skb->dev->stats.rx_length_errors++;
-			return -EINVAL;
-		}
-		page = virtqueue_get_buf(rq->vq, &len);
-		if (!page) {
+		int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+		buf = virtqueue_get_buf(rq->vq, &len);
+		if (unlikely(!buf)) {
 			pr_debug("%s: rx error: %d buffers missing\n",
-				 skb->dev->name, hdr->mhdr.num_buffers);
-			skb->dev->stats.rx_length_errors++;
+				 head_skb->dev->name, hdr->mhdr.num_buffers);
+			head_skb->dev->stats.rx_length_errors++;
 			return -EINVAL;
 		}
-
-		if (len > PAGE_SIZE)
-			len = PAGE_SIZE;
-
-		set_skb_frag(skb, page, 0, &len);
-
+		if (unlikely(len > MAX_PACKET_LEN)) {
+			pr_debug("%s: rx error: merge buffer too long\n",
+				 head_skb->dev->name);
+			len = MAX_PACKET_LEN;
+		}
+		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+			if (unlikely(!nskb)) {
+				head_skb->dev->stats.rx_dropped++;
+				return -ENOMEM;
+			}
+			if (curr_skb == head_skb)
+				skb_shinfo(curr_skb)->frag_list = nskb;
+			else
+				curr_skb->next = nskb;
+			curr_skb = nskb;
+			head_skb->truesize += nskb->truesize;
+			num_skb_frags = 0;
+		}
+		if (curr_skb != head_skb) {
+			head_skb->data_len += len;
+			head_skb->len += len;
+			head_skb->truesize += MAX_PACKET_LEN;
+		}
+		page = virt_to_head_page(buf);
+		offset = buf - (char *)page_address(page);
+		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
+			put_page(page);
+			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
+					     len, MAX_PACKET_LEN);
+		} else {
+			skb_add_rx_frag(curr_skb, num_skb_frags, page,
+					offset, len,
+					MAX_PACKET_LEN);
+		}
 		--rq->num;
 	}
 	return 0;
@@ -341,8 +366,10 @@
 	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
 		pr_debug("%s: short packet %i\n", dev->name, len);
 		dev->stats.rx_length_errors++;
-		if (vi->mergeable_rx_bufs || vi->big_packets)
+		if (vi->big_packets)
 			give_pages(rq, buf);
+		else if (vi->mergeable_rx_bufs)
+			put_page(virt_to_head_page(buf));
 		else
 			dev_kfree_skb(buf);
 		return;
@@ -352,19 +379,28 @@
 		skb = buf;
 		len -= sizeof(struct virtio_net_hdr);
 		skb_trim(skb, len);
+	} else if (vi->mergeable_rx_bufs) {
+		struct page *page = virt_to_head_page(buf);
+		skb = page_to_skb(rq, page,
+				  (char *)buf - (char *)page_address(page),
+				  len, MAX_PACKET_LEN);
+		if (unlikely(!skb)) {
+			dev->stats.rx_dropped++;
+			put_page(page);
+			return;
+		}
+		if (receive_mergeable(rq, skb)) {
+			dev_kfree_skb(skb);
+			return;
+		}
 	} else {
 		page = buf;
-		skb = page_to_skb(rq, page, len);
+		skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
 		if (unlikely(!skb)) {
 			dev->stats.rx_dropped++;
 			give_pages(rq, page);
 			return;
 		}
-		if (vi->mergeable_rx_bufs)
-			if (receive_mergeable(rq, skb)) {
-				dev_kfree_skb(skb);
-				return;
-			}
 	}
 
 	hdr = skb_vnet_hdr(skb);
@@ -501,18 +537,28 @@
 
 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
 {
-	struct page *page;
+	struct virtnet_info *vi = rq->vq->vdev->priv;
+	char *buf = NULL;
 	int err;
 
-	page = get_a_page(rq, gfp);
-	if (!page)
+	if (gfp & __GFP_WAIT) {
+		if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag,
+					 gfp)) {
+			buf = (char *)page_address(vi->alloc_frag.page) +
+			      vi->alloc_frag.offset;
+			get_page(vi->alloc_frag.page);
+			vi->alloc_frag.offset += MAX_PACKET_LEN;
+		}
+	} else {
+		buf = netdev_alloc_frag(MAX_PACKET_LEN);
+	}
+	if (!buf)
 		return -ENOMEM;
 
-	sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
-
-	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
+	sg_init_one(rq->sg, buf, MAX_PACKET_LEN);
+	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
 	if (err < 0)
-		give_pages(rq, page);
+		put_page(virt_to_head_page(buf));
 
 	return err;
 }
@@ -545,7 +591,8 @@
 	} while (rq->vq->num_free);
 	if (unlikely(rq->num > rq->max))
 		rq->max = rq->num;
-	virtqueue_kick(rq->vq);
+	if (unlikely(!virtqueue_kick(rq->vq)))
+		return false;
 	return !oom;
 }
 
@@ -751,7 +798,7 @@
 	err = xmit_skb(sq, skb);
 
 	/* This should not happen! */
-	if (unlikely(err)) {
+	if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
 		dev->stats.tx_fifo_errors++;
 		if (net_ratelimit())
 			dev_warn(&dev->dev,
@@ -760,7 +807,6 @@
 		kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
-	virtqueue_kick(sq->vq);
 
 	/* Don't wait up for transmitted skbs to be freed. */
 	skb_orphan(skb);
@@ -819,12 +865,14 @@
 	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
 	       < 0);
 
-	virtqueue_kick(vi->cvq);
+	if (unlikely(!virtqueue_kick(vi->cvq)))
+		return status == VIRTIO_NET_OK;
 
 	/* Spin for a response, the kick causes an ioport write, trapping
 	 * into the hypervisor, so the request should be handled immediately.
 	 */
-	while (!virtqueue_get_buf(vi->cvq, &tmp))
+	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
+	       !virtqueue_is_broken(vi->cvq))
 		cpu_relax();
 
 	return status == VIRTIO_NET_OK;
@@ -852,8 +900,13 @@
 			return -EINVAL;
 		}
 	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
-		vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
-				  addr->sa_data, dev->addr_len);
+		unsigned int i;
+
+		/* Naturally, this has an atomicity problem. */
+		for (i = 0; i < dev->addr_len; i++)
+			virtio_cwrite8(vdev,
+				       offsetof(struct virtio_net_config, mac) +
+				       i, addr->sa_data[i]);
 	}
 
 	eth_commit_mac_addr_change(dev, p);
@@ -1065,7 +1118,6 @@
 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
 {
 	int i;
-	int cpu;
 
 	if (vi->affinity_hint_set) {
 		for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1075,16 +1127,6 @@
 
 		vi->affinity_hint_set = false;
 	}
-
-	i = 0;
-	for_each_online_cpu(cpu) {
-		if (cpu == hcpu) {
-			*per_cpu_ptr(vi->vq_index, cpu) = -1;
-		} else {
-			*per_cpu_ptr(vi->vq_index, cpu) =
-				++i % vi->curr_queue_pairs;
-		}
-	}
 }
 
 static void virtnet_set_affinity(struct virtnet_info *vi)
@@ -1106,7 +1148,7 @@
 	for_each_online_cpu(cpu) {
 		virtqueue_set_affinity(vi->rq[i].vq, cpu);
 		virtqueue_set_affinity(vi->sq[i].vq, cpu);
-		*per_cpu_ptr(vi->vq_index, cpu) = i;
+		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
 		i++;
 	}
 
@@ -1220,28 +1262,6 @@
 	return 0;
 }
 
-/* To avoid contending a lock hold by a vcpu who would exit to host, select the
- * txq based on the processor id.
- */
-static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-	int txq;
-	struct virtnet_info *vi = netdev_priv(dev);
-
-	if (skb_rx_queue_recorded(skb)) {
-		txq = skb_get_rx_queue(skb);
-	} else {
-		txq = *__this_cpu_ptr(vi->vq_index);
-		if (txq == -1)
-			txq = 0;
-	}
-
-	while (unlikely(txq >= dev->real_num_tx_queues))
-		txq -= dev->real_num_tx_queues;
-
-	return txq;
-}
-
 static const struct net_device_ops virtnet_netdev = {
 	.ndo_open            = virtnet_open,
 	.ndo_stop   	     = virtnet_close,
@@ -1253,7 +1273,6 @@
 	.ndo_get_stats64     = virtnet_stats,
 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-	.ndo_select_queue     = virtnet_select_queue,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = virtnet_netpoll,
 #endif
@@ -1269,9 +1288,8 @@
 	if (!vi->config_enable)
 		goto done;
 
-	if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
-			      offsetof(struct virtio_net_config, status),
-			      &v) < 0)
+	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
+				 struct virtio_net_config, status, &v) < 0)
 		goto done;
 
 	if (v & VIRTIO_NET_S_ANNOUNCE) {
@@ -1336,8 +1354,10 @@
 		struct virtqueue *vq = vi->rq[i].vq;
 
 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-			if (vi->mergeable_rx_bufs || vi->big_packets)
+			if (vi->big_packets)
 				give_pages(&vi->rq[i], buf);
+			else if (vi->mergeable_rx_bufs)
+				put_page(virt_to_head_page(buf));
 			else
 				dev_kfree_skb(buf);
 			--vi->rq[i].num;
@@ -1493,9 +1513,9 @@
 	u16 max_queue_pairs;
 
 	/* Find if host supports multiqueue virtio_net device */
-	err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
-				offsetof(struct virtio_net_config,
-				max_virtqueue_pairs), &max_queue_pairs);
+	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
+				   struct virtio_net_config,
+				   max_virtqueue_pairs, &max_queue_pairs);
 
 	/* We need at least 2 queue's */
 	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
@@ -1547,9 +1567,11 @@
 	dev->vlan_features = dev->features;
 
 	/* Configuration may specify what MAC to use.  Otherwise random. */
-	if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
-				  offsetof(struct virtio_net_config, mac),
-				  dev->dev_addr, dev->addr_len) < 0)
+	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+		virtio_cread_bytes(vdev,
+				   offsetof(struct virtio_net_config, mac),
+				   dev->dev_addr, dev->addr_len);
+	else
 		eth_hw_addr_random(dev);
 
 	/* Set up our device-specific information */
@@ -1562,9 +1584,12 @@
 	if (vi->stats == NULL)
 		goto free;
 
-	vi->vq_index = alloc_percpu(int);
-	if (vi->vq_index == NULL)
-		goto free_stats;
+	for_each_possible_cpu(i) {
+		struct virtnet_stats *virtnet_stats;
+		virtnet_stats = per_cpu_ptr(vi->stats, i);
+		u64_stats_init(&virtnet_stats->tx_syncp);
+		u64_stats_init(&virtnet_stats->rx_syncp);
+	}
 
 	mutex_init(&vi->config_lock);
 	vi->config_enable = true;
@@ -1592,7 +1617,7 @@
 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
 	err = init_vqs(vi);
 	if (err)
-		goto free_index;
+		goto free_stats;
 
 	netif_set_real_num_tx_queues(dev, 1);
 	netif_set_real_num_rx_queues(dev, 1);
@@ -1643,8 +1668,8 @@
 free_vqs:
 	cancel_delayed_work_sync(&vi->refill);
 	virtnet_del_vqs(vi);
-free_index:
-	free_percpu(vi->vq_index);
+	if (vi->alloc_frag.page)
+		put_page(vi->alloc_frag.page);
 free_stats:
 	free_percpu(vi->stats);
 free:
@@ -1678,15 +1703,16 @@
 	unregister_netdev(vi->dev);
 
 	remove_vq_common(vi);
+	if (vi->alloc_frag.page)
+		put_page(vi->alloc_frag.page);
 
 	flush_work(&vi->config_work);
 
-	free_percpu(vi->vq_index);
 	free_percpu(vi->stats);
 	free_netdev(vi->dev);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtnet_freeze(struct virtio_device *vdev)
 {
 	struct virtnet_info *vi = vdev->priv;
@@ -1777,7 +1803,7 @@
 	.probe =	virtnet_probe,
 	.remove =	virtnet_remove,
 	.config_changed = virtnet_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 	.freeze =	virtnet_freeze,
 	.restore =	virtnet_restore,
 #endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a03f358..12040a3 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -410,9 +410,9 @@
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
 		      u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
 
-extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+void vmxnet3_set_ethtool_ops(struct net_device *netdev);
 
-extern struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
 vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
 
 extern char vmxnet3_driver_name[];
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2ef5b62..0358c07 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -60,10 +60,6 @@
 
 #define VXLAN_N_VID	(1u << 24)
 #define VXLAN_VID_MASK	(VXLAN_N_VID - 1)
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-/* IPv6 header + UDP + VXLAN + Ethernet header */
-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
 
 #define VXLAN_FLAGS 0x08000000	/* struct vxlanhdr.vx_flags required value. */
@@ -1884,11 +1880,19 @@
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
 	struct vxlan_sock *vs;
+	int i;
 
 	dev->tstats = alloc_percpu(struct pcpu_tstats);
 	if (!dev->tstats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct pcpu_tstats *vxlan_stats;
+		vxlan_stats = per_cpu_ptr(dev->tstats, i);
+		u64_stats_init(&vxlan_stats->syncp);
+	}
+
+
 	spin_lock(&vn->sock_lock);
 	vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
 	if (vs) {
@@ -2087,7 +2091,7 @@
 	vxlan->age_timer.function = vxlan_cleanup;
 	vxlan->age_timer.data = (unsigned long) vxlan;
 
-	inet_get_local_port_range(&low, &high);
+	inet_get_local_port_range(dev_net(dev), &low, &high);
 	vxlan->port_min = low;
 	vxlan->port_max = high;
 	vxlan->dst_port = htons(vxlan_port);
@@ -2180,7 +2184,7 @@
  * could be used for both IPv4 and IPv6 communications, but
  * users may set bindv6only=1.
  */
-static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v6_sock(struct net *net, __be16 port)
 {
 	struct sock *sk;
 	struct socket *sock;
@@ -2193,7 +2197,7 @@
 	rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
 	if (rc < 0) {
 		pr_debug("UDPv6 socket create failed\n");
-		return rc;
+		return ERR_PTR(rc);
 	}
 
 	/* Put in proper namespace */
@@ -2208,28 +2212,27 @@
 		pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
 			 &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
 		sk_release_kernel(sk);
-		return rc;
+		return ERR_PTR(rc);
 	}
 	/* At this point, IPv6 module should have been loaded in
 	 * sock_create_kern().
 	 */
 	BUG_ON(!ipv6_stub);
 
-	*psock = sock;
 	/* Disable multicast loopback */
 	inet_sk(sk)->mc_loop = 0;
-	return 0;
+	return sock;
 }
 
 #else
 
-static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v6_sock(struct net *net, __be16 port)
 {
-		return -EPFNOSUPPORT;
+		return ERR_PTR(-EPFNOSUPPORT);
 }
 #endif
 
-static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v4_sock(struct net *net, __be16 port)
 {
 	struct sock *sk;
 	struct socket *sock;
@@ -2244,7 +2247,7 @@
 	rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
 	if (rc < 0) {
 		pr_debug("UDP socket create failed\n");
-		return rc;
+		return ERR_PTR(rc);
 	}
 
 	/* Put in proper namespace */
@@ -2257,13 +2260,12 @@
 		pr_debug("bind for UDP socket %pI4:%u (%d)\n",
 			 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
 		sk_release_kernel(sk);
-		return rc;
+		return ERR_PTR(rc);
 	}
 
-	*psock = sock;
 	/* Disable multicast loopback */
 	inet_sk(sk)->mc_loop = 0;
-	return 0;
+	return sock;
 }
 
 /* Create new listen socket if needed */
@@ -2274,7 +2276,6 @@
 	struct vxlan_sock *vs;
 	struct socket *sock;
 	struct sock *sk;
-	int rc = 0;
 	unsigned int h;
 
 	vs = kmalloc(sizeof(*vs), GFP_KERNEL);
@@ -2287,12 +2288,12 @@
 	INIT_WORK(&vs->del_work, vxlan_del_work);
 
 	if (ipv6)
-		rc = create_v6_sock(net, port, &sock);
+		sock = create_v6_sock(net, port);
 	else
-		rc = create_v4_sock(net, port, &sock);
-	if (rc < 0) {
+		sock = create_v4_sock(net, port);
+	if (IS_ERR(sock)) {
 		kfree(vs);
-		return ERR_PTR(rc);
+		return ERR_CAST(sock);
 	}
 
 	vs->sock = sock;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 3d80e42..3d74166 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -220,7 +220,7 @@
 	/* We want a fast IRQ for this device. Actually we'd like an even faster
 	   IRQ ;) - This is one driver RtLinux is made for */
 
-	if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+	if (request_irq(irq, z8530_interrupt, 0,
 			"Hostess SV11", sv) < 0) {
 		pr_warn("IRQ %d already in use\n", irq);
 		goto err_irq;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 4f774847..27860b4 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -266,7 +266,7 @@
 	/* We want a fast IRQ for this device. Actually we'd like an even faster
 	   IRQ ;) - This is one driver RtLinux is made for */
 
-	if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+	if (request_irq(irq, z8530_interrupt, 0,
 			"SeaLevel", dev) < 0) {
 		pr_warn("IRQ %d already in use\n", irq);
 		goto err_request_irq;
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
index 8f0fc2e..f57ee67 100644
--- a/drivers/net/wan/x25_asy.h
+++ b/drivers/net/wan/x25_asy.h
@@ -41,6 +41,6 @@
 
 #define X25_ASY_MAGIC 0x5303
 
-extern int x25_asy_init(struct net_device *dev);
+int x25_asy_init(struct net_device *dev);
 
 #endif	/* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index f29d554..2416a9d 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -395,20 +395,19 @@
 extern u8 z8530_dead_port[];
 extern u8 z8530_hdlc_kilostream_85230[];
 extern u8 z8530_hdlc_kilostream[];
-extern irqreturn_t z8530_interrupt(int, void *);
-extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-extern int z8530_init(struct z8530_dev *);
-extern int z8530_shutdown(struct z8530_dev *);
-extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_channel_load(struct z8530_channel *, u8 *);
-extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
-					  struct sk_buff *skb);
-extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+irqreturn_t z8530_interrupt(int, void *);
+void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+int z8530_init(struct z8530_dev *);
+int z8530_shutdown(struct z8530_dev *);
+int z8530_sync_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+int z8530_channel_load(struct z8530_channel *, u8 *);
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
 
 
 /*
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 9f1e947..649ecad 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -256,21 +256,20 @@
 	i2400mu->rx_size_auto_shrink = 1;
 }
 
-extern int i2400mu_notification_setup(struct i2400mu *);
-extern void i2400mu_notification_release(struct i2400mu *);
+int i2400mu_notification_setup(struct i2400mu *);
+void i2400mu_notification_release(struct i2400mu *);
 
-extern int i2400mu_rx_setup(struct i2400mu *);
-extern void i2400mu_rx_release(struct i2400mu *);
-extern void i2400mu_rx_kick(struct i2400mu *);
+int i2400mu_rx_setup(struct i2400mu *);
+void i2400mu_rx_release(struct i2400mu *);
+void i2400mu_rx_kick(struct i2400mu *);
 
-extern int i2400mu_tx_setup(struct i2400mu *);
-extern void i2400mu_tx_release(struct i2400mu *);
-extern void i2400mu_bus_tx_kick(struct i2400m *);
+int i2400mu_tx_setup(struct i2400mu *);
+void i2400mu_tx_release(struct i2400mu *);
+void i2400mu_bus_tx_kick(struct i2400m *);
 
-extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
-				       const struct i2400m_bootrom_header *,
-				       size_t, int);
-extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
-					   struct i2400m_bootrom_header *,
-					   size_t);
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+				const struct i2400m_bootrom_header *, size_t,
+				int);
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+				    struct i2400m_bootrom_header *, size_t);
 #endif /* #ifndef __I2400M_USB_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 79c6505..5a34e72 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -710,18 +710,18 @@
 	I2400M_BRI_MAC_REINIT = 1 << 3,
 };
 
-extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
-extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
-extern int i2400m_read_mac_addr(struct i2400m *);
-extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
-extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+int i2400m_read_mac_addr(struct i2400m *);
+int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
 static inline
 int i2400m_is_d2h_barker(const void *buf)
 {
 	const __le32 *barker = buf;
 	return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
 }
-extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
+void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
 
 /* Make/grok boot-rom header commands */
 
@@ -789,32 +789,31 @@
 /*
  * Driver / device setup and internal functions
  */
-extern void i2400m_init(struct i2400m *);
-extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
-extern void i2400m_netdev_setup(struct net_device *net_dev);
-extern int i2400m_sysfs_setup(struct device_driver *);
-extern void i2400m_sysfs_release(struct device_driver *);
-extern int i2400m_tx_setup(struct i2400m *);
-extern void i2400m_wake_tx_work(struct work_struct *);
-extern void i2400m_tx_release(struct i2400m *);
+void i2400m_init(struct i2400m *);
+int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
+void i2400m_netdev_setup(struct net_device *net_dev);
+int i2400m_sysfs_setup(struct device_driver *);
+void i2400m_sysfs_release(struct device_driver *);
+int i2400m_tx_setup(struct i2400m *);
+void i2400m_wake_tx_work(struct work_struct *);
+void i2400m_tx_release(struct i2400m *);
 
-extern int i2400m_rx_setup(struct i2400m *);
-extern void i2400m_rx_release(struct i2400m *);
+int i2400m_rx_setup(struct i2400m *);
+void i2400m_rx_release(struct i2400m *);
 
-extern void i2400m_fw_cache(struct i2400m *);
-extern void i2400m_fw_uncache(struct i2400m *);
+void i2400m_fw_cache(struct i2400m *);
+void i2400m_fw_uncache(struct i2400m *);
 
-extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
-			  const void *, int);
-extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
-			   enum i2400m_cs);
-extern void i2400m_net_wake_stop(struct i2400m *);
+void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
+		   int);
+void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
+void i2400m_net_wake_stop(struct i2400m *);
 enum i2400m_pt;
-extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
 
 #ifdef CONFIG_DEBUG_FS
-extern int i2400m_debugfs_add(struct i2400m *);
-extern void i2400m_debugfs_rm(struct i2400m *);
+int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_rm(struct i2400m *);
 #else
 static inline int i2400m_debugfs_add(struct i2400m *i2400m)
 {
@@ -824,8 +823,8 @@
 #endif
 
 /* Initialize/shutdown the device */
-extern int i2400m_dev_initialize(struct i2400m *);
-extern void i2400m_dev_shutdown(struct i2400m *);
+int i2400m_dev_initialize(struct i2400m *);
+void i2400m_dev_shutdown(struct i2400m *);
 
 extern struct attribute_group i2400m_dev_attr_group;
 
@@ -873,21 +872,21 @@
 	dev_put(i2400m->wimax_dev.net_dev);
 }
 
-extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
-extern int i2400m_pre_reset(struct i2400m *);
-extern int i2400m_post_reset(struct i2400m *);
-extern void i2400m_error_recovery(struct i2400m *);
+int i2400m_dev_reset_handle(struct i2400m *, const char *);
+int i2400m_pre_reset(struct i2400m *);
+int i2400m_post_reset(struct i2400m *);
+void i2400m_error_recovery(struct i2400m *);
 
 /*
  * _setup()/_release() are called by the probe/disconnect functions of
  * the bus-specific drivers.
  */
-extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
-extern void i2400m_release(struct i2400m *);
+int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+void i2400m_release(struct i2400m *);
 
-extern int i2400m_rx(struct i2400m *, struct sk_buff *);
-extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
-extern void i2400m_tx_msg_sent(struct i2400m *);
+int i2400m_rx(struct i2400m *, struct sk_buff *);
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+void i2400m_tx_msg_sent(struct i2400m *);
 
 
 /*
@@ -900,20 +899,19 @@
 	return i2400m->wimax_dev.net_dev->dev.parent;
 }
 
-extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
-				   char *, size_t);
-extern int i2400m_msg_size_check(struct i2400m *,
-				 const struct i2400m_l3l4_hdr *, size_t);
-extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
-extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
-extern void i2400m_report_hook(struct i2400m *,
-			       const struct i2400m_l3l4_hdr *, size_t);
-extern void i2400m_report_hook_work(struct work_struct *);
-extern int i2400m_cmd_enter_powersave(struct i2400m *);
-extern int i2400m_cmd_exit_idle(struct i2400m *);
-extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
-extern int i2400m_firmware_check(struct i2400m *);
-extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
+int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
+			  size_t);
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
+			size_t);
+void i2400m_report_hook_work(struct work_struct *);
+int i2400m_cmd_enter_powersave(struct i2400m *);
+int i2400m_cmd_exit_idle(struct i2400m *);
+struct sk_buff *i2400m_get_device_info(struct i2400m *);
+int i2400m_firmware_check(struct i2400m *);
+int i2400m_set_idle_timeout(struct i2400m *, unsigned);
 
 static inline
 struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
@@ -921,10 +919,9 @@
 	return &iface->cur_altsetting->endpoint[ep].desc;
 }
 
-extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
-				      enum wimax_rf_state);
-extern void i2400m_report_tlv_rf_switches_status(
-	struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
+void i2400m_report_tlv_rf_switches_status(struct i2400m *,
+					  const struct i2400m_tlv_rf_switches_status *);
 
 /*
  * Helpers for firmware backwards compatibility
@@ -968,8 +965,8 @@
 
 
 /* module initialization helpers */
-extern int i2400m_barker_db_init(const char *);
-extern void i2400m_barker_db_exit(void);
+int i2400m_barker_db_init(const char *);
+void i2400m_barker_db_exit(void);
 
 
 
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9a24e5..cfce83e 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1924,7 +1924,6 @@
 	pci_iounmap(pdev, priv->map);
 
  err_free_dev:
-	pci_set_drvdata(pdev, NULL);
 	ieee80211_free_hw(dev);
 
  err_free_reg:
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7fe1964..edf4b57 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5570,7 +5570,6 @@
 	airo_print_info(dev->name, "Unregistering...");
 	stop_airo_card(dev, 1);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1abf1d4..c63d115 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,6 +25,23 @@
 	  Say Y, if you want to debug atheros wireless drivers.
 	  Right now only ath9k makes use of this.
 
+config ATH_REG_DYNAMIC_USER_REG_HINTS
+	bool "Atheros dynamic user regulatory hints"
+	depends on CFG80211_CERTIFICATION_ONUS
+	default n
+	---help---
+	  Say N. This should only be enabled in countries where
+	  this feature is explicitly allowed and only on cards that
+	  specifically have been tested for this.
+
+config ATH_REG_DYNAMIC_USER_CERT_TESTING
+	bool "Atheros dynamic user regulatory testing"
+	depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
+	default n
+	---help---
+	  Say N. This should only be enabled on systems
+	  undergoing certification testing.
+
 source "drivers/net/wireless/ath/ath5k/Kconfig"
 source "drivers/net/wireless/ath/ath9k/Kconfig"
 source "drivers/net/wireless/ath/carl9170/Kconfig"
@@ -32,5 +49,6 @@
 source "drivers/net/wireless/ath/ar5523/Kconfig"
 source "drivers/net/wireless/ath/wil6210/Kconfig"
 source "drivers/net/wireless/ath/ath10k/Kconfig"
+source "drivers/net/wireless/ath/wcn36xx/Kconfig"
 
 endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index fb05cfd..7d023b0 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -5,13 +5,16 @@
 obj-$(CONFIG_AR5523)		+= ar5523/
 obj-$(CONFIG_WIL6210)		+= wil6210/
 obj-$(CONFIG_ATH10K)		+= ath10k/
+obj-$(CONFIG_WCN36XX)		+= wcn36xx/
 
 obj-$(CONFIG_ATH_COMMON)	+= ath.o
 
 ath-objs :=	main.o \
 		regd.o \
 		hw.o \
-		key.o
+		key.o \
+		dfs_pattern_detector.o \
+		dfs_pri_detector.o
 
 ath-$(CONFIG_ATH_DEBUG) += debug.o
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 17d7fec..280fc3d 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1762,6 +1762,7 @@
 	AR5523_DEVICE_UX(0x2001, 0x3a00),	/* Dlink / DWLAG132 */
 	AR5523_DEVICE_UG(0x2001, 0x3a02),	/* Dlink / DWLG132 */
 	AR5523_DEVICE_UX(0x2001, 0x3a04),	/* Dlink / DWLAG122 */
+	AR5523_DEVICE_UG(0x07d1, 0x3a07),	/* D-Link / WUA-2340 rev A1 */
 	AR5523_DEVICE_UG(0x1690, 0x0712),	/* Gigaset / AR5523 */
 	AR5523_DEVICE_UG(0x1690, 0x0710),	/* Gigaset / SMCWUSBTG */
 	AR5523_DEVICE_UG(0x129b, 0x160c),	/* Gigaset / USB stick 108
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 744da6d..a1f0996 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -22,7 +22,8 @@
 
 void ath10k_bmi_start(struct ath10k *ar)
 {
-	ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+	ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
+
 	ar->bmi.done_sent = false;
 }
 
@@ -32,8 +33,10 @@
 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
 	int ret;
 
+	ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
+
 	if (ar->bmi.done_sent) {
-		ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+		ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
 		return 0;
 	}
 
@@ -46,7 +49,6 @@
 		return ret;
 	}
 
-	ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
 	return 0;
 }
 
@@ -59,6 +61,8 @@
 	u32 resplen = sizeof(resp.get_target_info);
 	int ret;
 
+	ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
+
 	if (ar->bmi.done_sent) {
 		ath10k_warn("BMI Get Target Info Command disallowed\n");
 		return -EBUSY;
@@ -80,6 +84,7 @@
 
 	target_info->version = __le32_to_cpu(resp.get_target_info.version);
 	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
+
 	return 0;
 }
 
@@ -92,15 +97,14 @@
 	u32 rxlen;
 	int ret;
 
+	ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+		   address, length);
+
 	if (ar->bmi.done_sent) {
 		ath10k_warn("command disallowed\n");
 		return -EBUSY;
 	}
 
-	ath10k_dbg(ATH10K_DBG_CORE,
-		   "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
-		   __func__, ar, address, length);
-
 	while (length) {
 		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
 
@@ -133,15 +137,14 @@
 	u32 txlen;
 	int ret;
 
+	ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+		   address, length);
+
 	if (ar->bmi.done_sent) {
 		ath10k_warn("command disallowed\n");
 		return -EBUSY;
 	}
 
-	ath10k_dbg(ATH10K_DBG_CORE,
-		   "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
-		   __func__, ar, address, length);
-
 	while (length) {
 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
 
@@ -180,15 +183,14 @@
 	u32 resplen = sizeof(resp.execute);
 	int ret;
 
+	ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+		   address, *param);
+
 	if (ar->bmi.done_sent) {
 		ath10k_warn("command disallowed\n");
 		return -EBUSY;
 	}
 
-	ath10k_dbg(ATH10K_DBG_CORE,
-		   "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
-		   __func__, ar, address, *param);
-
 	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
 	cmd.execute.addr  = __cpu_to_le32(address);
 	cmd.execute.param = __cpu_to_le32(*param);
@@ -216,6 +218,9 @@
 	u32 txlen;
 	int ret;
 
+	ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+		   buffer, length);
+
 	if (ar->bmi.done_sent) {
 		ath10k_warn("command disallowed\n");
 		return -EBUSY;
@@ -250,6 +255,9 @@
 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
 	int ret;
 
+	ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+		   address);
+
 	if (ar->bmi.done_sent) {
 		ath10k_warn("command disallowed\n");
 		return -EBUSY;
@@ -275,6 +283,10 @@
 	u32 trailer_len = length - head_len;
 	int ret;
 
+	ath10k_dbg(ATH10K_DBG_BMI,
+		   "bmi fast download address 0x%x buffer 0x%p length %d\n",
+		   address, buffer, length);
+
 	ret = ath10k_bmi_lz_stream_start(ar, address);
 	if (ret)
 		return ret;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f8b969f..e46951b 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -76,36 +76,7 @@
 						      u32 ce_ctrl_addr,
 						      unsigned int n)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	void __iomem *indicator_addr;
-
-	if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
-		ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
-		return;
-	}
-
-	/* workaround for QCA988x_1.0 HW CE */
-	indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
-
-	if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
-		iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
-	} else {
-		unsigned long irq_flags;
-		local_irq_save(irq_flags);
-		iowrite32(1, indicator_addr);
-
-		/*
-		 * PCIE write waits for ACK in IPQ8K, there is no
-		 * need to read back value.
-		 */
-		(void)ioread32(indicator_addr);
-		(void)ioread32(indicator_addr); /* conservative */
-
-		ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
-
-		iowrite32(0, indicator_addr);
-		local_irq_restore(irq_flags);
-	}
+	ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
 }
 
 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@@ -285,7 +256,7 @@
  * ath10k_ce_sendlist_send.
  * The caller takes responsibility for any needed locking.
  */
-static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
 				 void *per_transfer_context,
 				 u32 buffer,
 				 unsigned int nbytes,
@@ -293,7 +264,7 @@
 				 unsigned int flags)
 {
 	struct ath10k *ar = ce_state->ar;
-	struct ce_ring_state *src_ring = ce_state->src_ring;
+	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
 	struct ce_desc *desc, *sdesc;
 	unsigned int nentries_mask = src_ring->nentries_mask;
 	unsigned int sw_index = src_ring->sw_index;
@@ -306,11 +277,13 @@
 		ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
 			    __func__, nbytes, ce_state->src_sz_max);
 
-	ath10k_pci_wake(ar);
+	ret = ath10k_pci_wake(ar);
+	if (ret)
+		return ret;
 
 	if (unlikely(CE_RING_DELTA(nentries_mask,
 				   write_index, sw_index - 1) <= 0)) {
-		ret = -EIO;
+		ret = -ENOSR;
 		goto exit;
 	}
 
@@ -346,7 +319,7 @@
 	return ret;
 }
 
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 		   void *per_transfer_context,
 		   u32 buffer,
 		   unsigned int nbytes,
@@ -365,77 +338,26 @@
 	return ret;
 }
 
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
-				unsigned int nbytes, u32 flags)
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
 {
-	unsigned int num_items = sendlist->num_items;
-	struct ce_sendlist_item *item;
-
-	item = &sendlist->item[num_items];
-	item->data = buffer;
-	item->u.nbytes = nbytes;
-	item->flags = flags;
-	sendlist->num_items++;
-}
-
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
-			    void *per_transfer_context,
-			    struct ce_sendlist *sendlist,
-			    unsigned int transfer_id)
-{
-	struct ce_ring_state *src_ring = ce_state->src_ring;
-	struct ce_sendlist_item *item;
-	struct ath10k *ar = ce_state->ar;
+	struct ath10k *ar = pipe->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	unsigned int nentries_mask = src_ring->nentries_mask;
-	unsigned int num_items = sendlist->num_items;
-	unsigned int sw_index;
-	unsigned int write_index;
-	int i, delta, ret = -ENOMEM;
+	int delta;
 
 	spin_lock_bh(&ar_pci->ce_lock);
-
-	sw_index = src_ring->sw_index;
-	write_index = src_ring->write_index;
-
-	delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
-
-	if (delta >= num_items) {
-		/*
-		 * Handle all but the last item uniformly.
-		 */
-		for (i = 0; i < num_items - 1; i++) {
-			item = &sendlist->item[i];
-			ret = ath10k_ce_send_nolock(ce_state,
-						    CE_SENDLIST_ITEM_CTXT,
-						    (u32) item->data,
-						    item->u.nbytes, transfer_id,
-						    item->flags |
-						    CE_SEND_FLAG_GATHER);
-			if (ret)
-				ath10k_warn("CE send failed for item: %d\n", i);
-		}
-		/*
-		 * Provide valid context pointer for final item.
-		 */
-		item = &sendlist->item[i];
-		ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
-					    (u32) item->data, item->u.nbytes,
-					    transfer_id, item->flags);
-		if (ret)
-			ath10k_warn("CE send failed for last item: %d\n", i);
-	}
-
+	delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+			      pipe->src_ring->write_index,
+			      pipe->src_ring->sw_index - 1);
 	spin_unlock_bh(&ar_pci->ce_lock);
 
-	return ret;
+	return delta;
 }
 
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
 			       void *per_recv_context,
 			       u32 buffer)
 {
-	struct ce_ring_state *dest_ring = ce_state->dest_ring;
+	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	u32 ctrl_addr = ce_state->ctrl_addr;
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -448,7 +370,9 @@
 	write_index = dest_ring->write_index;
 	sw_index = dest_ring->sw_index;
 
-	ath10k_pci_wake(ar);
+	ret = ath10k_pci_wake(ar);
+	if (ret)
+		goto out;
 
 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
 		struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -470,6 +394,8 @@
 		ret = -EIO;
 	}
 	ath10k_pci_sleep(ar);
+
+out:
 	spin_unlock_bh(&ar_pci->ce_lock);
 
 	return ret;
@@ -479,14 +405,14 @@
  * Guts of ath10k_ce_completed_recv_next.
  * The caller takes responsibility for any necessary locking.
  */
-static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 						void **per_transfer_contextp,
 						u32 *bufferp,
 						unsigned int *nbytesp,
 						unsigned int *transfer_idp,
 						unsigned int *flagsp)
 {
-	struct ce_ring_state *dest_ring = ce_state->dest_ring;
+	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
 	unsigned int sw_index = dest_ring->sw_index;
 
@@ -535,7 +461,7 @@
 	return 0;
 }
 
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
 				  u32 *bufferp,
 				  unsigned int *nbytesp,
@@ -556,11 +482,11 @@
 	return ret;
 }
 
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
 			       u32 *bufferp)
 {
-	struct ce_ring_state *dest_ring;
+	struct ath10k_ce_ring *dest_ring;
 	unsigned int nentries_mask;
 	unsigned int sw_index;
 	unsigned int write_index;
@@ -612,19 +538,20 @@
  * Guts of ath10k_ce_completed_send_next.
  * The caller takes responsibility for any necessary locking.
  */
-static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
 						void **per_transfer_contextp,
 						u32 *bufferp,
 						unsigned int *nbytesp,
 						unsigned int *transfer_idp)
 {
-	struct ce_ring_state *src_ring = ce_state->src_ring;
+	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
 	u32 ctrl_addr = ce_state->ctrl_addr;
 	struct ath10k *ar = ce_state->ar;
 	unsigned int nentries_mask = src_ring->nentries_mask;
 	unsigned int sw_index = src_ring->sw_index;
+	struct ce_desc *sdesc, *sbase;
 	unsigned int read_index;
-	int ret = -EIO;
+	int ret;
 
 	if (src_ring->hw_index == sw_index) {
 		/*
@@ -634,48 +561,54 @@
 		 * the SW has really caught up to the HW, or if the cached
 		 * value of the HW index has become stale.
 		 */
-		ath10k_pci_wake(ar);
+
+		ret = ath10k_pci_wake(ar);
+		if (ret)
+			return ret;
+
 		src_ring->hw_index =
 			ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
 		src_ring->hw_index &= nentries_mask;
+
 		ath10k_pci_sleep(ar);
 	}
+
 	read_index = src_ring->hw_index;
 
-	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
-		struct ce_desc *sbase = src_ring->shadow_base;
-		struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+	if ((read_index == sw_index) || (read_index == 0xffffffff))
+		return -EIO;
 
-		/* Return data from completed source descriptor */
-		*bufferp = __le32_to_cpu(sdesc->addr);
-		*nbytesp = __le16_to_cpu(sdesc->nbytes);
-		*transfer_idp = MS(__le16_to_cpu(sdesc->flags),
-						CE_DESC_FLAGS_META_DATA);
+	sbase = src_ring->shadow_base;
+	sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
 
-		if (per_transfer_contextp)
-			*per_transfer_contextp =
-				src_ring->per_transfer_context[sw_index];
+	/* Return data from completed source descriptor */
+	*bufferp = __le32_to_cpu(sdesc->addr);
+	*nbytesp = __le16_to_cpu(sdesc->nbytes);
+	*transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+			   CE_DESC_FLAGS_META_DATA);
 
-		/* sanity */
-		src_ring->per_transfer_context[sw_index] = NULL;
+	if (per_transfer_contextp)
+		*per_transfer_contextp =
+			src_ring->per_transfer_context[sw_index];
 
-		/* Update sw_index */
-		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
-		src_ring->sw_index = sw_index;
-		ret = 0;
-	}
+	/* sanity */
+	src_ring->per_transfer_context[sw_index] = NULL;
 
-	return ret;
+	/* Update sw_index */
+	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+	src_ring->sw_index = sw_index;
+
+	return 0;
 }
 
 /* NB: Modeled after ath10k_ce_completed_send_next */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
 			       u32 *bufferp,
 			       unsigned int *nbytesp,
 			       unsigned int *transfer_idp)
 {
-	struct ce_ring_state *src_ring;
+	struct ath10k_ce_ring *src_ring;
 	unsigned int nentries_mask;
 	unsigned int sw_index;
 	unsigned int write_index;
@@ -727,7 +660,7 @@
 	return ret;
 }
 
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
 				  u32 *bufferp,
 				  unsigned int *nbytesp,
@@ -756,53 +689,29 @@
 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
 	u32 ctrl_addr = ce_state->ctrl_addr;
-	void *transfer_context;
-	u32 buf;
-	unsigned int nbytes;
-	unsigned int id;
-	unsigned int flags;
+	int ret;
 
-	ath10k_pci_wake(ar);
+	ret = ath10k_pci_wake(ar);
+	if (ret)
+		return;
+
 	spin_lock_bh(&ar_pci->ce_lock);
 
 	/* Clear the copy-complete interrupts that will be handled here. */
 	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
 					  HOST_IS_COPY_COMPLETE_MASK);
 
-	if (ce_state->recv_cb) {
-		/*
-		 * Pop completed recv buffers and call the registered
-		 * recv callback for each
-		 */
-		while (ath10k_ce_completed_recv_next_nolock(ce_state,
-							    &transfer_context,
-							    &buf, &nbytes,
-							    &id, &flags) == 0) {
-			spin_unlock_bh(&ar_pci->ce_lock);
-			ce_state->recv_cb(ce_state, transfer_context, buf,
-					  nbytes, id, flags);
-			spin_lock_bh(&ar_pci->ce_lock);
-		}
-	}
+	spin_unlock_bh(&ar_pci->ce_lock);
 
-	if (ce_state->send_cb) {
-		/*
-		 * Pop completed send buffers and call the registered
-		 * send callback for each
-		 */
-		while (ath10k_ce_completed_send_next_nolock(ce_state,
-							    &transfer_context,
-							    &buf,
-							    &nbytes,
-							    &id) == 0) {
-			spin_unlock_bh(&ar_pci->ce_lock);
-			ce_state->send_cb(ce_state, transfer_context,
-					  buf, nbytes, id);
-			spin_lock_bh(&ar_pci->ce_lock);
-		}
-	}
+	if (ce_state->recv_cb)
+		ce_state->recv_cb(ce_state);
+
+	if (ce_state->send_cb)
+		ce_state->send_cb(ce_state);
+
+	spin_lock_bh(&ar_pci->ce_lock);
 
 	/*
 	 * Misc CE interrupts are not being handled, but still need
@@ -823,10 +732,13 @@
 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int ce_id;
+	int ce_id, ret;
 	u32 intr_summary;
 
-	ath10k_pci_wake(ar);
+	ret = ath10k_pci_wake(ar);
+	if (ret)
+		return;
+
 	intr_summary = CE_INTERRUPT_SUMMARY(ar);
 
 	for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@@ -849,13 +761,16 @@
  *
  * Called with ce_lock held.
  */
-static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
 						int disable_copy_compl_intr)
 {
 	u32 ctrl_addr = ce_state->ctrl_addr;
 	struct ath10k *ar = ce_state->ar;
+	int ret;
 
-	ath10k_pci_wake(ar);
+	ret = ath10k_pci_wake(ar);
+	if (ret)
+		return;
 
 	if ((!disable_copy_compl_intr) &&
 	    (ce_state->send_cb || ce_state->recv_cb))
@@ -871,11 +786,14 @@
 void ath10k_ce_disable_interrupts(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int ce_id;
+	int ce_id, ret;
 
-	ath10k_pci_wake(ar);
+	ret = ath10k_pci_wake(ar);
+	if (ret)
+		return;
+
 	for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
-		struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+		struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
 		u32 ctrl_addr = ce_state->ctrl_addr;
 
 		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@@ -883,12 +801,8 @@
 	ath10k_pci_sleep(ar);
 }
 
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
-				void (*send_cb) (struct ce_state *ce_state,
-						 void *transfer_context,
-						 u32 buffer,
-						 unsigned int nbytes,
-						 unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+				void (*send_cb)(struct ath10k_ce_pipe *),
 				int disable_interrupts)
 {
 	struct ath10k *ar = ce_state->ar;
@@ -900,13 +814,8 @@
 	spin_unlock_bh(&ar_pci->ce_lock);
 }
 
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
-				void (*recv_cb) (struct ce_state *ce_state,
-						 void *transfer_context,
-						 u32 buffer,
-						 unsigned int nbytes,
-						 unsigned int transfer_id,
-						 unsigned int flags))
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+				void (*recv_cb)(struct ath10k_ce_pipe *))
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -919,11 +828,11 @@
 
 static int ath10k_ce_init_src_ring(struct ath10k *ar,
 				   unsigned int ce_id,
-				   struct ce_state *ce_state,
+				   struct ath10k_ce_pipe *ce_state,
 				   const struct ce_attr *attr)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ce_ring_state *src_ring;
+	struct ath10k_ce_ring *src_ring;
 	unsigned int nentries = attr->src_nentries;
 	unsigned int ce_nbytes;
 	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -937,19 +846,18 @@
 		return 0;
 	}
 
-	ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+	ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
 	ptr = kzalloc(ce_nbytes, GFP_KERNEL);
 	if (ptr == NULL)
 		return -ENOMEM;
 
-	ce_state->src_ring = (struct ce_ring_state *)ptr;
+	ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
 	src_ring = ce_state->src_ring;
 
-	ptr += sizeof(struct ce_ring_state);
+	ptr += sizeof(struct ath10k_ce_ring);
 	src_ring->nentries = nentries;
 	src_ring->nentries_mask = nentries - 1;
 
-	ath10k_pci_wake(ar);
 	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
 	src_ring->sw_index &= src_ring->nentries_mask;
 	src_ring->hw_index = src_ring->sw_index;
@@ -957,7 +865,6 @@
 	src_ring->write_index =
 		ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
 	src_ring->write_index &= src_ring->nentries_mask;
-	ath10k_pci_sleep(ar);
 
 	src_ring->per_transfer_context = (void **)ptr;
 
@@ -970,6 +877,12 @@
 				     (nentries * sizeof(struct ce_desc) +
 				      CE_DESC_RING_ALIGN),
 				     &base_addr);
+	if (!src_ring->base_addr_owner_space_unaligned) {
+		kfree(ce_state->src_ring);
+		ce_state->src_ring = NULL;
+		return -ENOMEM;
+	}
+
 	src_ring->base_addr_ce_space_unaligned = base_addr;
 
 	src_ring->base_addr_owner_space = PTR_ALIGN(
@@ -986,12 +899,21 @@
 	src_ring->shadow_base_unaligned =
 		kmalloc((nentries * sizeof(struct ce_desc) +
 			 CE_DESC_RING_ALIGN), GFP_KERNEL);
+	if (!src_ring->shadow_base_unaligned) {
+		pci_free_consistent(ar_pci->pdev,
+				    (nentries * sizeof(struct ce_desc) +
+				     CE_DESC_RING_ALIGN),
+				    src_ring->base_addr_owner_space,
+				    src_ring->base_addr_ce_space);
+		kfree(ce_state->src_ring);
+		ce_state->src_ring = NULL;
+		return -ENOMEM;
+	}
 
 	src_ring->shadow_base = PTR_ALIGN(
 			src_ring->shadow_base_unaligned,
 			CE_DESC_RING_ALIGN);
 
-	ath10k_pci_wake(ar);
 	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
 					 src_ring->base_addr_ce_space);
 	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@@ -999,18 +921,21 @@
 	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
 	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
 	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
-	ath10k_pci_sleep(ar);
+
+	ath10k_dbg(ATH10K_DBG_BOOT,
+		   "boot ce src ring id %d entries %d base_addr %p\n",
+		   ce_id, nentries, src_ring->base_addr_owner_space);
 
 	return 0;
 }
 
 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
 				    unsigned int ce_id,
-				    struct ce_state *ce_state,
+				    struct ath10k_ce_pipe *ce_state,
 				    const struct ce_attr *attr)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ce_ring_state *dest_ring;
+	struct ath10k_ce_ring *dest_ring;
 	unsigned int nentries = attr->dest_nentries;
 	unsigned int ce_nbytes;
 	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -1024,25 +949,23 @@
 		return 0;
 	}
 
-	ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+	ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
 	ptr = kzalloc(ce_nbytes, GFP_KERNEL);
 	if (ptr == NULL)
 		return -ENOMEM;
 
-	ce_state->dest_ring = (struct ce_ring_state *)ptr;
+	ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
 	dest_ring = ce_state->dest_ring;
 
-	ptr += sizeof(struct ce_ring_state);
+	ptr += sizeof(struct ath10k_ce_ring);
 	dest_ring->nentries = nentries;
 	dest_ring->nentries_mask = nentries - 1;
 
-	ath10k_pci_wake(ar);
 	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
 	dest_ring->sw_index &= dest_ring->nentries_mask;
 	dest_ring->write_index =
 		ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
 	dest_ring->write_index &= dest_ring->nentries_mask;
-	ath10k_pci_sleep(ar);
 
 	dest_ring->per_transfer_context = (void **)ptr;
 
@@ -1055,6 +978,12 @@
 				     (nentries * sizeof(struct ce_desc) +
 				      CE_DESC_RING_ALIGN),
 				     &base_addr);
+	if (!dest_ring->base_addr_owner_space_unaligned) {
+		kfree(ce_state->dest_ring);
+		ce_state->dest_ring = NULL;
+		return -ENOMEM;
+	}
+
 	dest_ring->base_addr_ce_space_unaligned = base_addr;
 
 	/*
@@ -1071,44 +1000,35 @@
 			dest_ring->base_addr_ce_space_unaligned,
 			CE_DESC_RING_ALIGN);
 
-	ath10k_pci_wake(ar);
 	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
 					  dest_ring->base_addr_ce_space);
 	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
 	ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
 	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
 	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
-	ath10k_pci_sleep(ar);
+
+	ath10k_dbg(ATH10K_DBG_BOOT,
+		   "boot ce dest ring id %d entries %d base_addr %p\n",
+		   ce_id, nentries, dest_ring->base_addr_owner_space);
 
 	return 0;
 }
 
-static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
 					     unsigned int ce_id,
 					     const struct ce_attr *attr)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ce_state *ce_state = NULL;
+	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
 	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
 
 	spin_lock_bh(&ar_pci->ce_lock);
 
-	if (!ar_pci->ce_id_to_state[ce_id]) {
-		ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
-		if (ce_state == NULL) {
-			spin_unlock_bh(&ar_pci->ce_lock);
-			return NULL;
-		}
-
-		ar_pci->ce_id_to_state[ce_id] = ce_state;
-		ce_state->ar = ar;
-		ce_state->id = ce_id;
-		ce_state->ctrl_addr = ctrl_addr;
-		ce_state->state = CE_RUNNING;
-		/* Save attribute flags */
-		ce_state->attr_flags = attr->flags;
-		ce_state->src_sz_max = attr->src_sz_max;
-	}
+	ce_state->ar = ar;
+	ce_state->id = ce_id;
+	ce_state->ctrl_addr = ctrl_addr;
+	ce_state->attr_flags = attr->flags;
+	ce_state->src_sz_max = attr->src_sz_max;
 
 	spin_unlock_bh(&ar_pci->ce_lock);
 
@@ -1122,12 +1042,17 @@
  * initialization. It may be that only one side or the other is
  * initialized by software/firmware.
  */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
 				unsigned int ce_id,
 				const struct ce_attr *attr)
 {
-	struct ce_state *ce_state;
+	struct ath10k_ce_pipe *ce_state;
 	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+	int ret;
+
+	ret = ath10k_pci_wake(ar);
+	if (ret)
+		return NULL;
 
 	ce_state = ath10k_ce_init_state(ar, ce_id, attr);
 	if (!ce_state) {
@@ -1136,40 +1061,38 @@
 	}
 
 	if (attr->src_nentries) {
-		if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
-			ath10k_err("Failed to initialize CE src ring for ID: %d\n",
-				   ce_id);
+		ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+		if (ret) {
+			ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
+				   ce_id, ret);
 			ath10k_ce_deinit(ce_state);
 			return NULL;
 		}
 	}
 
 	if (attr->dest_nentries) {
-		if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
-			ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
-				   ce_id);
+		ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+		if (ret) {
+			ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
+				   ce_id, ret);
 			ath10k_ce_deinit(ce_state);
 			return NULL;
 		}
 	}
 
 	/* Enable CE error interrupts */
-	ath10k_pci_wake(ar);
 	ath10k_ce_error_intr_enable(ar, ctrl_addr);
+
 	ath10k_pci_sleep(ar);
 
 	return ce_state;
 }
 
-void ath10k_ce_deinit(struct ce_state *ce_state)
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
 {
-	unsigned int ce_id = ce_state->id;
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-	ce_state->state = CE_UNUSED;
-	ar_pci->ce_id_to_state[ce_id] = NULL;
-
 	if (ce_state->src_ring) {
 		kfree(ce_state->src_ring->shadow_base_unaligned);
 		pci_free_consistent(ar_pci->pdev,
@@ -1190,5 +1113,7 @@
 				    ce_state->dest_ring->base_addr_ce_space);
 		kfree(ce_state->dest_ring);
 	}
-	kfree(ce_state);
+
+	ce_state->src_ring = NULL;
+	ce_state->dest_ring = NULL;
 }
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index c17f07c..15d45b5 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -27,7 +27,6 @@
 
 /* Descriptor rings must be aligned to this boundary */
 #define CE_DESC_RING_ALIGN	8
-#define CE_SENDLIST_ITEMS_MAX	12
 #define CE_SEND_FLAG_GATHER	0x00010000
 
 /*
@@ -36,16 +35,9 @@
  * how to use copy engines.
  */
 
-struct ce_state;
+struct ath10k_ce_pipe;
 
 
-/* Copy Engine operational state */
-enum ce_op_state {
-	CE_UNUSED,
-	CE_PAUSED,
-	CE_RUNNING,
-};
-
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
 #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@@ -57,8 +49,7 @@
 	__le16 flags; /* %CE_DESC_FLAGS_ */
 };
 
-/* Copy Engine Ring internal state */
-struct ce_ring_state {
+struct ath10k_ce_ring {
 	/* Number of entries in this ring; must be power of 2 */
 	unsigned int nentries;
 	unsigned int nentries_mask;
@@ -116,49 +107,20 @@
 	void **per_transfer_context;
 };
 
-/* Copy Engine internal state */
-struct ce_state {
+struct ath10k_ce_pipe {
 	struct ath10k *ar;
 	unsigned int id;
 
 	unsigned int attr_flags;
 
 	u32 ctrl_addr;
-	enum ce_op_state state;
 
-	void (*send_cb) (struct ce_state *ce_state,
-			 void *per_transfer_send_context,
-			 u32 buffer,
-			 unsigned int nbytes,
-			 unsigned int transfer_id);
-	void (*recv_cb) (struct ce_state *ce_state,
-			 void *per_transfer_recv_context,
-			 u32 buffer,
-			 unsigned int nbytes,
-			 unsigned int transfer_id,
-			 unsigned int flags);
+	void (*send_cb)(struct ath10k_ce_pipe *);
+	void (*recv_cb)(struct ath10k_ce_pipe *);
 
 	unsigned int src_sz_max;
-	struct ce_ring_state *src_ring;
-	struct ce_ring_state *dest_ring;
-};
-
-struct ce_sendlist_item {
-	/* e.g. buffer or desc list */
-	dma_addr_t data;
-	union {
-		/* simple buffer */
-		unsigned int nbytes;
-		/* Rx descriptor list */
-		unsigned int ndesc;
-	} u;
-	/* externally-specified flags; OR-ed with internal flags */
-	u32 flags;
-};
-
-struct ce_sendlist {
-	unsigned int num_items;
-	struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+	struct ath10k_ce_ring *src_ring;
+	struct ath10k_ce_ring *dest_ring;
 };
 
 /* Copy Engine settable attributes */
@@ -182,7 +144,7 @@
  *
  * Implementation note: pushes 1 buffer to Source ring
  */
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 		   void *per_transfer_send_context,
 		   u32 buffer,
 		   unsigned int nbytes,
@@ -190,36 +152,11 @@
 		   unsigned int transfer_id,
 		   unsigned int flags);
 
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
-				void (*send_cb) (struct ce_state *ce_state,
-						 void *transfer_context,
-						 u32 buffer,
-						 unsigned int nbytes,
-						 unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+				void (*send_cb)(struct ath10k_ce_pipe *),
 				int disable_interrupts);
 
-/* Append a simple buffer (address/length) to a sendlist. */
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
-				u32 buffer,
-				unsigned int nbytes,
-				/* OR-ed with internal flags */
-				u32 flags);
-
-/*
- * Queue a "sendlist" of buffers to be sent using gather to a single
- * anonymous destination buffer
- *   ce         - which copy engine to use
- *   sendlist        - list of simple buffers to send using gather
- *   transfer_id     - arbitrary ID; reflected to destination
- * Returns 0 on success; otherwise an error status.
- *
- * Implemenation note: Pushes multiple buffers with Gather to Source ring.
- */
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
-			    void *per_transfer_send_context,
-			    struct ce_sendlist *sendlist,
-			    /* 14 bits */
-			    unsigned int transfer_id);
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
 
 /*==================Recv=======================*/
 
@@ -233,17 +170,12 @@
  *
  * Implemenation note: Pushes a buffer to Dest ring.
  */
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
 			       void *per_transfer_recv_context,
 			       u32 buffer);
 
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
-				void (*recv_cb) (struct ce_state *ce_state,
-						 void *transfer_context,
-						 u32 buffer,
-						 unsigned int nbytes,
-						 unsigned int transfer_id,
-						 unsigned int flags));
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+				void (*recv_cb)(struct ath10k_ce_pipe *));
 
 /* recv flags */
 /* Data is byte-swapped */
@@ -253,7 +185,7 @@
  * Supply data for the next completed unprocessed receive descriptor.
  * Pops buffer from Dest ring.
  */
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
 				  u32 *bufferp,
 				  unsigned int *nbytesp,
@@ -263,7 +195,7 @@
  * Supply data for the next completed unprocessed send descriptor.
  * Pops 1 completed send buffer from Source ring.
  */
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
 			   void **per_transfer_contextp,
 			   u32 *bufferp,
 			   unsigned int *nbytesp,
@@ -272,7 +204,7 @@
 /*==================CE Engine Initialization=======================*/
 
 /* Initialize an instance of a CE */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
 				unsigned int ce_id,
 				const struct ce_attr *attr);
 
@@ -282,7 +214,7 @@
  * receive buffers.  Target DMA must be stopped before using
  * this API.
  */
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
 			       u32 *bufferp);
 
@@ -291,13 +223,13 @@
  * pending sends.  Target DMA must be stopped before using
  * this API.
  */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
 			       u32 *bufferp,
 			       unsigned int *nbytesp,
 			       unsigned int *transfer_idp);
 
-void ath10k_ce_deinit(struct ce_state *ce_state);
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
 
 /*==================CE Interrupt Handlers====================*/
 void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@@ -322,9 +254,6 @@
 	/* CE_ATTR_* values */
 	unsigned int flags;
 
-	/* currently not in use */
-	unsigned int priority;
-
 	/* #entries in source ring - Must be a power of 2 */
 	unsigned int src_nentries;
 
@@ -336,21 +265,8 @@
 
 	/* #entries in destination ring - Must be a power of 2 */
 	unsigned int dest_nentries;
-
-	/* Future use */
-	void *reserved;
 };
 
-/*
- * When using sendlist_send to transfer multiple buffer fragments, the
- * transfer context of each fragment, except last one, will be filled
- * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
- * each fragment done with send and the transfer context would be
- * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
- * status of a send completion.
- */
-#define CE_SENDLIST_ITEM_CTXT	((void *)0xcecebeef)
-
 #define SR_BA_ADDRESS		0x0000
 #define SR_SIZE_ADDRESS		0x0004
 #define DR_BA_ADDRESS		0x0008
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7226c23..1129994 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -39,17 +39,6 @@
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 	{
-		.id = QCA988X_HW_1_0_VERSION,
-		.name = "qca988x hw1.0",
-		.patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
-		.fw = {
-			.dir = QCA988X_HW_1_0_FW_DIR,
-			.fw = QCA988X_HW_1_0_FW_FILE,
-			.otp = QCA988X_HW_1_0_OTP_FILE,
-			.board = QCA988X_HW_1_0_BOARD_DATA_FILE,
-		},
-	},
-	{
 		.id = QCA988X_HW_2_0_VERSION,
 		.name = "qca988x hw2.0",
 		.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
@@ -64,33 +53,12 @@
 
 static void ath10k_send_suspend_complete(struct ath10k *ar)
 {
-	ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+	ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
 
 	ar->is_target_paused = true;
 	wake_up(&ar->event_queue);
 }
 
-static int ath10k_check_fw_version(struct ath10k *ar)
-{
-	char version[32];
-
-	if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
-	    ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
-	    ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
-	    ar->fw_version_build >= SUPPORTED_FW_BUILD)
-		return 0;
-
-	snprintf(version, sizeof(version), "%u.%u.%u.%u",
-		 SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
-		 SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
-
-	ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
-		    ar->hw->wiphy->fw_version);
-	ath10k_warn("Please upgrade to version %s (or newer)\n", version);
-
-	return 0;
-}
-
 static int ath10k_init_connect_htc(struct ath10k *ar)
 {
 	int status;
@@ -112,7 +80,7 @@
 		goto timeout;
 	}
 
-	ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+	ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
 	return 0;
 
 timeout:
@@ -200,8 +168,7 @@
 	return fw;
 }
 
-static int ath10k_push_board_ext_data(struct ath10k *ar,
-				      const struct firmware *fw)
+static int ath10k_push_board_ext_data(struct ath10k *ar)
 {
 	u32 board_data_size = QCA988X_BOARD_DATA_SZ;
 	u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
@@ -214,21 +181,21 @@
 		return ret;
 	}
 
-	ath10k_dbg(ATH10K_DBG_CORE,
-		   "ath10k: Board extended Data download addr: 0x%x\n",
+	ath10k_dbg(ATH10K_DBG_BOOT,
+		   "boot push board extended data addr 0x%x\n",
 		   board_ext_data_addr);
 
 	if (board_ext_data_addr == 0)
 		return 0;
 
-	if (fw->size != (board_data_size + board_ext_data_size)) {
+	if (ar->board_len != (board_data_size + board_ext_data_size)) {
 		ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
-			   fw->size, board_data_size, board_ext_data_size);
+			   ar->board_len, board_data_size, board_ext_data_size);
 		return -EINVAL;
 	}
 
 	ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
-				      fw->data + board_data_size,
+				      ar->board_data + board_data_size,
 				      board_ext_data_size);
 	if (ret) {
 		ath10k_err("could not write board ext data (%d)\n", ret);
@@ -247,12 +214,11 @@
 
 static int ath10k_download_board_data(struct ath10k *ar)
 {
-	const struct firmware *fw = ar->board_data;
 	u32 board_data_size = QCA988X_BOARD_DATA_SZ;
 	u32 address;
 	int ret;
 
-	ret = ath10k_push_board_ext_data(ar, fw);
+	ret = ath10k_push_board_ext_data(ar);
 	if (ret) {
 		ath10k_err("could not push board ext data (%d)\n", ret);
 		goto exit;
@@ -264,8 +230,9 @@
 		goto exit;
 	}
 
-	ret = ath10k_bmi_write_memory(ar, address, fw->data,
-				      min_t(u32, board_data_size, fw->size));
+	ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
+				      min_t(u32, board_data_size,
+					    ar->board_len));
 	if (ret) {
 		ath10k_err("could not write board data (%d)\n", ret);
 		goto exit;
@@ -283,17 +250,16 @@
 
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
-	const struct firmware *fw = ar->otp;
 	u32 address = ar->hw_params.patch_load_addr;
 	u32 exec_param;
 	int ret;
 
 	/* OTP is optional */
 
-	if (!ar->otp)
+	if (!ar->otp_data || !ar->otp_len)
 		return 0;
 
-	ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
 	if (ret) {
 		ath10k_err("could not write otp (%d)\n", ret);
 		goto exit;
@@ -312,13 +278,13 @@
 
 static int ath10k_download_fw(struct ath10k *ar)
 {
-	const struct firmware *fw = ar->firmware;
 	u32 address;
 	int ret;
 
 	address = ar->hw_params.patch_load_addr;
 
-	ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+	ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
+				       ar->firmware_len);
 	if (ret) {
 		ath10k_err("could not write fw (%d)\n", ret);
 		goto exit;
@@ -330,8 +296,8 @@
 
 static void ath10k_core_free_firmware_files(struct ath10k *ar)
 {
-	if (ar->board_data && !IS_ERR(ar->board_data))
-		release_firmware(ar->board_data);
+	if (ar->board && !IS_ERR(ar->board))
+		release_firmware(ar->board);
 
 	if (ar->otp && !IS_ERR(ar->otp))
 		release_firmware(ar->otp);
@@ -339,12 +305,20 @@
 	if (ar->firmware && !IS_ERR(ar->firmware))
 		release_firmware(ar->firmware);
 
+	ar->board = NULL;
 	ar->board_data = NULL;
+	ar->board_len = 0;
+
 	ar->otp = NULL;
+	ar->otp_data = NULL;
+	ar->otp_len = 0;
+
 	ar->firmware = NULL;
+	ar->firmware_data = NULL;
+	ar->firmware_len = 0;
 }
 
-static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
 {
 	int ret = 0;
 
@@ -358,15 +332,18 @@
 		return -EINVAL;
 	}
 
-	ar->board_data = ath10k_fetch_fw_file(ar,
-					      ar->hw_params.fw.dir,
-					      ar->hw_params.fw.board);
-	if (IS_ERR(ar->board_data)) {
-		ret = PTR_ERR(ar->board_data);
+	ar->board = ath10k_fetch_fw_file(ar,
+					 ar->hw_params.fw.dir,
+					 ar->hw_params.fw.board);
+	if (IS_ERR(ar->board)) {
+		ret = PTR_ERR(ar->board);
 		ath10k_err("could not fetch board data (%d)\n", ret);
 		goto err;
 	}
 
+	ar->board_data = ar->board->data;
+	ar->board_len = ar->board->size;
+
 	ar->firmware = ath10k_fetch_fw_file(ar,
 					    ar->hw_params.fw.dir,
 					    ar->hw_params.fw.fw);
@@ -376,6 +353,9 @@
 		goto err;
 	}
 
+	ar->firmware_data = ar->firmware->data;
+	ar->firmware_len = ar->firmware->size;
+
 	/* OTP may be undefined. If so, don't fetch it at all */
 	if (ar->hw_params.fw.otp == NULL)
 		return 0;
@@ -389,6 +369,9 @@
 		goto err;
 	}
 
+	ar->otp_data = ar->otp->data;
+	ar->otp_len = ar->otp->size;
+
 	return 0;
 
 err:
@@ -396,6 +379,191 @@
 	return ret;
 }
 
+static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+{
+	size_t magic_len, len, ie_len;
+	int ie_id, i, index, bit, ret;
+	struct ath10k_fw_ie *hdr;
+	const u8 *data;
+	__le32 *timestamp;
+
+	/* first fetch the firmware file (firmware-*.bin) */
+	ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
+	if (IS_ERR(ar->firmware)) {
+		ath10k_err("Could not fetch firmware file '%s': %ld\n",
+			   name, PTR_ERR(ar->firmware));
+		return PTR_ERR(ar->firmware);
+	}
+
+	data = ar->firmware->data;
+	len = ar->firmware->size;
+
+	/* magic also includes the null byte, check that as well */
+	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+	if (len < magic_len) {
+		ath10k_err("firmware image too small to contain magic: %zu\n",
+			   len);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+		ath10k_err("Invalid firmware magic\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* jump over the padding */
+	magic_len = ALIGN(magic_len, 4);
+
+	len -= magic_len;
+	data += magic_len;
+
+	/* loop elements */
+	while (len > sizeof(struct ath10k_fw_ie)) {
+		hdr = (struct ath10k_fw_ie *)data;
+
+		ie_id = le32_to_cpu(hdr->id);
+		ie_len = le32_to_cpu(hdr->len);
+
+		len -= sizeof(*hdr);
+		data += sizeof(*hdr);
+
+		if (len < ie_len) {
+			ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+				   ie_id, len, ie_len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		switch (ie_id) {
+		case ATH10K_FW_IE_FW_VERSION:
+			if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+				break;
+
+			memcpy(ar->hw->wiphy->fw_version, data, ie_len);
+			ar->hw->wiphy->fw_version[ie_len] = '\0';
+
+			ath10k_dbg(ATH10K_DBG_BOOT,
+				   "found fw version %s\n",
+				    ar->hw->wiphy->fw_version);
+			break;
+		case ATH10K_FW_IE_TIMESTAMP:
+			if (ie_len != sizeof(u32))
+				break;
+
+			timestamp = (__le32 *)data;
+
+			ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+				   le32_to_cpup(timestamp));
+			break;
+		case ATH10K_FW_IE_FEATURES:
+			ath10k_dbg(ATH10K_DBG_BOOT,
+				   "found firmware features ie (%zd B)\n",
+				   ie_len);
+
+			for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+				index = i / 8;
+				bit = i % 8;
+
+				if (index == ie_len)
+					break;
+
+				if (data[index] & (1 << bit))
+					__set_bit(i, ar->fw_features);
+			}
+
+			ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
+					ar->fw_features,
+					sizeof(ar->fw_features));
+			break;
+		case ATH10K_FW_IE_FW_IMAGE:
+			ath10k_dbg(ATH10K_DBG_BOOT,
+				   "found fw image ie (%zd B)\n",
+				   ie_len);
+
+			ar->firmware_data = data;
+			ar->firmware_len = ie_len;
+
+			break;
+		case ATH10K_FW_IE_OTP_IMAGE:
+			ath10k_dbg(ATH10K_DBG_BOOT,
+				   "found otp image ie (%zd B)\n",
+				   ie_len);
+
+			ar->otp_data = data;
+			ar->otp_len = ie_len;
+
+			break;
+		default:
+			ath10k_warn("Unknown FW IE: %u\n",
+				    le32_to_cpu(hdr->id));
+			break;
+		}
+
+		/* jump over the padding */
+		ie_len = ALIGN(ie_len, 4);
+
+		len -= ie_len;
+		data += ie_len;
+	}
+
+	if (!ar->firmware_data || !ar->firmware_len) {
+		ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
+			    name);
+		ret = -ENOMEDIUM;
+		goto err;
+	}
+
+	/* now fetch the board file */
+	if (ar->hw_params.fw.board == NULL) {
+		ath10k_err("board data file not defined");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ar->board = ath10k_fetch_fw_file(ar,
+					 ar->hw_params.fw.dir,
+					 ar->hw_params.fw.board);
+	if (IS_ERR(ar->board)) {
+		ret = PTR_ERR(ar->board);
+		ath10k_err("could not fetch board data (%d)\n", ret);
+		goto err;
+	}
+
+	ar->board_data = ar->board->data;
+	ar->board_len = ar->board->size;
+
+	return 0;
+
+err:
+	ath10k_core_free_firmware_files(ar);
+	return ret;
+}
+
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
+	if (ret == 0) {
+		ar->fw_api = 2;
+		goto out;
+	}
+
+	ret = ath10k_core_fetch_firmware_api_1(ar);
+	if (ret)
+		return ret;
+
+	ar->fw_api = 1;
+
+out:
+	ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+	return 0;
+}
+
 static int ath10k_init_download_firmware(struct ath10k *ar)
 {
 	int ret;
@@ -446,6 +614,13 @@
 		return ret;
 	}
 
+	/* Set the UART baud rate to 19200. */
+	ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+	if (ret) {
+		ath10k_warn("could not set the baud rate (%d)\n", ret);
+		return ret;
+	}
+
 	ath10k_info("UART prints enabled\n");
 	return 0;
 }
@@ -545,6 +720,9 @@
 	INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
 	skb_queue_head_init(&ar->offchan_tx_queue);
 
+	INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+	skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
 	init_waitqueue_head(&ar->event_queue);
 
 	INIT_WORK(&ar->restart_work, ath10k_core_restart);
@@ -559,6 +737,8 @@
 
 void ath10k_core_destroy(struct ath10k *ar)
 {
+	ath10k_debug_destroy(ar);
+
 	flush_workqueue(ar->workqueue);
 	destroy_workqueue(ar->workqueue);
 
@@ -570,6 +750,8 @@
 {
 	int status;
 
+	lockdep_assert_held(&ar->conf_mutex);
+
 	ath10k_bmi_start(ar);
 
 	if (ath10k_init_configure_target(ar)) {
@@ -620,10 +802,6 @@
 
 	ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
 
-	status = ath10k_check_fw_version(ar);
-	if (status)
-		goto err_disconnect_htc;
-
 	status = ath10k_wmi_cmd_init(ar);
 	if (status) {
 		ath10k_err("could not send WMI init command (%d)\n", status);
@@ -641,7 +819,12 @@
 	if (status)
 		goto err_disconnect_htc;
 
+	status = ath10k_debug_start(ar);
+	if (status)
+		goto err_disconnect_htc;
+
 	ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+	INIT_LIST_HEAD(&ar->arvifs);
 
 	return 0;
 
@@ -658,6 +841,9 @@
 
 void ath10k_core_stop(struct ath10k *ar)
 {
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_debug_stop(ar);
 	ath10k_htc_stop(&ar->htc);
 	ath10k_htt_detach(&ar->htt);
 	ath10k_wmi_detach(ar);
@@ -704,23 +890,65 @@
 		return ret;
 	}
 
+	mutex_lock(&ar->conf_mutex);
+
 	ret = ath10k_core_start(ar);
 	if (ret) {
 		ath10k_err("could not init core (%d)\n", ret);
 		ath10k_core_free_firmware_files(ar);
 		ath10k_hif_power_down(ar);
+		mutex_unlock(&ar->conf_mutex);
 		return ret;
 	}
 
 	ath10k_core_stop(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+
 	ath10k_hif_power_down(ar);
 	return 0;
 }
 
-int ath10k_core_register(struct ath10k *ar)
+static int ath10k_core_check_chip_id(struct ath10k *ar)
+{
+	u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
+
+	ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
+		   ar->chip_id, hw_revision);
+
+	/* Check that we are not using hw1.0 (some of them have same pci id
+	 * as hw2.0) before doing anything else as ath10k crashes horribly
+	 * due to missing hw1.0 workarounds. */
+	switch (hw_revision) {
+	case QCA988X_HW_1_0_CHIP_ID_REV:
+		ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
+		return -EOPNOTSUPP;
+
+	case QCA988X_HW_2_0_CHIP_ID_REV:
+		/* known hardware revision, continue normally */
+		return 0;
+
+	default:
+		ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
+			    ar->chip_id);
+		return 0;
+	}
+
+	return 0;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
 {
 	int status;
 
+	ar->chip_id = chip_id;
+
+	status = ath10k_core_check_chip_id(ar);
+	if (status) {
+		ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+		return status;
+	}
+
 	status = ath10k_core_probe_fw(ar);
 	if (status) {
 		ath10k_err("could not probe fw (%d)\n", status);
@@ -755,6 +983,7 @@
 	 * Otherwise we will fail to submit commands to FW and mac80211 will be
 	 * unhappy about callback failures. */
 	ath10k_mac_unregister(ar);
+
 	ath10k_core_free_firmware_files(ar);
 }
 EXPORT_SYMBOL(ath10k_core_unregister);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e4bba56..0934f76 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -43,27 +43,23 @@
 /* Antenna noise floor */
 #define ATH10K_DEFAULT_NOISE_FLOOR -95
 
+#define ATH10K_MAX_NUM_MGMT_PENDING 16
+
 struct ath10k;
 
 struct ath10k_skb_cb {
 	dma_addr_t paddr;
 	bool is_mapped;
 	bool is_aborted;
+	u8 vdev_id;
 
 	struct {
-		u8 vdev_id;
-		u16 msdu_id;
 		u8 tid;
 		bool is_offchan;
-		bool is_conf;
-		bool discard;
-		bool no_ack;
-		u8 refcount;
-		struct sk_buff *txfrag;
-		struct sk_buff *msdu;
-	} __packed htt;
 
-	/* 4 bytes left on 64bit arch */
+		u8 frag_len;
+		u8 pad_len;
+	} __packed htt;
 } __packed;
 
 static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -108,15 +104,26 @@
 	bool done_sent;
 };
 
+#define ATH10K_MAX_MEM_REQS 16
+
+struct ath10k_mem_chunk {
+	void *vaddr;
+	dma_addr_t paddr;
+	u32 len;
+	u32 req_id;
+};
+
 struct ath10k_wmi {
 	enum ath10k_htc_ep_id eid;
 	struct completion service_ready;
 	struct completion unified_ready;
-	atomic_t pending_tx_count;
-	wait_queue_head_t wq;
+	wait_queue_head_t tx_credits_wq;
+	struct wmi_cmd_map *cmd;
+	struct wmi_vdev_param_map *vdev_param;
+	struct wmi_pdev_param_map *pdev_param;
 
-	struct sk_buff_head wmi_event_list;
-	struct work_struct wmi_event_work;
+	u32 num_mem_chunks;
+	struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
 };
 
 struct ath10k_peer_stat {
@@ -198,17 +205,22 @@
 #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
 
 struct ath10k_vif {
+	struct list_head list;
+
 	u32 vdev_id;
 	enum wmi_vdev_type vdev_type;
 	enum wmi_vdev_subtype vdev_subtype;
 	u32 beacon_interval;
 	u32 dtim_period;
+	struct sk_buff *beacon;
 
 	struct ath10k *ar;
 	struct ieee80211_vif *vif;
 
+	struct work_struct wep_key_work;
 	struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
-	u8 def_wep_key_index;
+	u8 def_wep_key_idx;
+	u8 def_wep_key_newidx;
 
 	u16 tx_seq_no;
 
@@ -246,6 +258,9 @@
 	u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
 
 	struct completion event_stats_compl;
+
+	unsigned long htt_stats_mask;
+	struct delayed_work htt_stats_dwork;
 };
 
 enum ath10k_state {
@@ -270,12 +285,27 @@
 	ATH10K_STATE_WEDGED,
 };
 
+enum ath10k_fw_features {
+	/* wmi_mgmt_rx_hdr contains extra RSSI information */
+	ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+	/* firmware from 10X branch */
+	ATH10K_FW_FEATURE_WMI_10X = 1,
+
+	/* firmware support tx frame management over WMI, otherwise it's HTT */
+	ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+	/* keep last */
+	ATH10K_FW_FEATURE_COUNT,
+};
+
 struct ath10k {
 	struct ath_common ath_common;
 	struct ieee80211_hw *hw;
 	struct device *dev;
 	u8 mac_addr[ETH_ALEN];
 
+	u32 chip_id;
 	u32 target_version;
 	u8 fw_version_major;
 	u32 fw_version_minor;
@@ -288,6 +318,8 @@
 	u32 vht_cap_info;
 	u32 num_rf_chains;
 
+	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
 	struct targetdef *targetdef;
 	struct hostdef *hostdef;
 
@@ -319,9 +351,19 @@
 		} fw;
 	} hw_params;
 
-	const struct firmware *board_data;
+	const struct firmware *board;
+	const void *board_data;
+	size_t board_len;
+
 	const struct firmware *otp;
+	const void *otp_data;
+	size_t otp_len;
+
 	const struct firmware *firmware;
+	const void *firmware_data;
+	size_t firmware_len;
+
+	int fw_api;
 
 	struct {
 		struct completion started;
@@ -364,6 +406,7 @@
 	/* protects shared structure data */
 	spinlock_t data_lock;
 
+	struct list_head arvifs;
 	struct list_head peers;
 	wait_queue_head_t peer_mapping_wq;
 
@@ -372,6 +415,9 @@
 	struct completion offchan_tx_completed;
 	struct sk_buff *offchan_tx_skb;
 
+	struct work_struct wmi_mgmt_tx_work;
+	struct sk_buff_head wmi_mgmt_tx_queue;
+
 	enum ath10k_state state;
 
 	struct work_struct restart_work;
@@ -393,7 +439,7 @@
 
 int ath10k_core_start(struct ath10k *ar);
 void ath10k_core_stop(struct ath10k *ar);
-int ath10k_core_register(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
 void ath10k_core_unregister(struct ath10k *ar);
 
 #endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 3d65594..760ff22 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -21,6 +21,9 @@
 #include "core.h"
 #include "debug.h"
 
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
 static int ath10k_printk(const char *level, const char *fmt, ...)
 {
 	struct va_format vaf;
@@ -260,7 +263,6 @@
 	}
 
 	spin_unlock_bh(&ar->data_lock);
-	mutex_unlock(&ar->conf_mutex);
 	complete(&ar->debug.event_stats_compl);
 }
 
@@ -499,6 +501,144 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned int len;
+	char buf[50];
+
+	len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+	.read = ath10k_read_chip_id,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+	u64 cookie;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (ar->debug.htt_stats_mask == 0)
+		/* htt stats are disabled */
+		return 0;
+
+	if (ar->state != ATH10K_STATE_ON)
+		return 0;
+
+	cookie = get_jiffies_64();
+
+	ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+				       cookie);
+	if (ret) {
+		ath10k_warn("failed to send htt stats request: %d\n", ret);
+		return ret;
+	}
+
+	queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+			   msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+	return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k,
+					 debug.htt_stats_dwork.work);
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_debug_htt_stats_req(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+					    char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	unsigned int len;
+
+	len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+					     const char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned long mask;
+	int ret;
+
+	ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+	if (ret)
+		return ret;
+
+	/* max 8 bit masks (for now) */
+	if (mask > 0xff)
+		return -E2BIG;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ar->debug.htt_stats_mask = mask;
+
+	ret = ath10k_debug_htt_stats_req(ar);
+	if (ret)
+		goto out;
+
+	ret = count;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+	.read = ath10k_read_htt_stats_mask,
+	.write = ath10k_write_htt_stats_mask,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_debug_htt_stats_req(ar);
+	if (ret)
+		/* continue normally anyway, this isn't serious */
+		ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+
+	return 0;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* Must not use _sync to avoid deadlock, we do that in
+	 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+	 * warning from del_timer(). */
+	if (ar->debug.htt_stats_mask != 0)
+		cancel_delayed_work(&ar->debug.htt_stats_dwork);
+}
+
 int ath10k_debug_create(struct ath10k *ar)
 {
 	ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -507,6 +647,9 @@
 	if (!ar->debug.debugfs_phy)
 		return -ENOMEM;
 
+	INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+			  ath10k_debug_htt_stats_dwork);
+
 	init_completion(&ar->debug.event_stats_compl);
 
 	debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -518,8 +661,20 @@
 	debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
 			    ar, &fops_simulate_fw_crash);
 
+	debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_chip_id);
+
+	debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_htt_stats_mask);
+
 	return 0;
 }
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+	cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
 #endif /* CONFIG_ATH10K_DEBUGFS */
 
 #ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 168140c..3cfe3ee 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -27,22 +27,26 @@
 	ATH10K_DBG_HTC		= 0x00000004,
 	ATH10K_DBG_HTT		= 0x00000008,
 	ATH10K_DBG_MAC		= 0x00000010,
-	ATH10K_DBG_CORE		= 0x00000020,
+	ATH10K_DBG_BOOT		= 0x00000020,
 	ATH10K_DBG_PCI_DUMP	= 0x00000040,
 	ATH10K_DBG_HTT_DUMP	= 0x00000080,
 	ATH10K_DBG_MGMT		= 0x00000100,
 	ATH10K_DBG_DATA		= 0x00000200,
+	ATH10K_DBG_BMI		= 0x00000400,
 	ATH10K_DBG_ANY		= 0xffffffff,
 };
 
 extern unsigned int ath10k_debug_mask;
 
-extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+__printf(1, 2) int ath10k_info(const char *fmt, ...);
+__printf(1, 2) int ath10k_err(const char *fmt, ...);
+__printf(1, 2) int ath10k_warn(const char *fmt, ...);
 
 #ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
 int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
 void ath10k_debug_read_service_map(struct ath10k *ar,
 				   void *service_map,
 				   size_t map_size);
@@ -50,11 +54,24 @@
 				    struct wmi_stats_event *ev);
 
 #else
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
 static inline int ath10k_debug_create(struct ath10k *ar)
 {
 	return 0;
 }
 
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
 static inline void ath10k_debug_read_service_map(struct ath10k *ar,
 						 void *service_map,
 						 size_t map_size)
@@ -68,7 +85,7 @@
 #endif /* CONFIG_ATH10K_DEBUGFS */
 
 #ifdef CONFIG_ATH10K_DEBUG
-extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
 				      const char *fmt, ...);
 void ath10k_dbg_dump(enum ath10k_debug_mask mask,
 		     const char *msg, const char *prefix,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index ef3329e..edae50b 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -103,10 +103,10 @@
 	struct ath10k_htc_hdr *hdr;
 
 	hdr = (struct ath10k_htc_hdr *)skb->data;
-	memset(hdr, 0, sizeof(*hdr));
 
 	hdr->eid = ep->eid;
 	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+	hdr->flags = 0;
 
 	spin_lock_bh(&ep->htc->tx_lock);
 	hdr->seq_no = ep->seq_no++;
@@ -117,134 +117,13 @@
 	spin_unlock_bh(&ep->htc->tx_lock);
 }
 
-static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
-				struct ath10k_htc_ep *ep,
-				struct sk_buff *skb,
-				u8 credits)
-{
-	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
-	int ret;
-
-	ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
-		   ep->eid, skb);
-
-	ath10k_htc_prepare_tx_skb(ep, skb);
-
-	ret = ath10k_skb_map(htc->ar->dev, skb);
-	if (ret)
-		goto err;
-
-	ret = ath10k_hif_send_head(htc->ar,
-				   ep->ul_pipe_id,
-				   ep->eid,
-				   skb->len,
-				   skb);
-	if (unlikely(ret))
-		goto err;
-
-	return 0;
-err:
-	ath10k_warn("HTC issue failed: %d\n", ret);
-
-	spin_lock_bh(&htc->tx_lock);
-	ep->tx_credits += credits;
-	spin_unlock_bh(&htc->tx_lock);
-
-	/* this is the simplest way to handle out-of-resources for non-credit
-	 * based endpoints. credit based endpoints can still get -ENOSR, but
-	 * this is highly unlikely as credit reservation should prevent that */
-	if (ret == -ENOSR) {
-		spin_lock_bh(&htc->tx_lock);
-		__skb_queue_head(&ep->tx_queue, skb);
-		spin_unlock_bh(&htc->tx_lock);
-
-		return ret;
-	}
-
-	skb_cb->is_aborted = true;
-	ath10k_htc_notify_tx_completion(ep, skb);
-
-	return ret;
-}
-
-static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
-						       struct ath10k_htc_ep *ep,
-						       u8 *credits)
-{
-	struct sk_buff *skb;
-	struct ath10k_skb_cb *skb_cb;
-	int credits_required;
-	int remainder;
-	unsigned int transfer_len;
-
-	lockdep_assert_held(&htc->tx_lock);
-
-	skb = __skb_dequeue(&ep->tx_queue);
-	if (!skb)
-		return NULL;
-
-	skb_cb = ATH10K_SKB_CB(skb);
-	transfer_len = skb->len;
-
-	if (likely(transfer_len <= htc->target_credit_size)) {
-		credits_required = 1;
-	} else {
-		/* figure out how many credits this message requires */
-		credits_required = transfer_len / htc->target_credit_size;
-		remainder = transfer_len % htc->target_credit_size;
-
-		if (remainder)
-			credits_required++;
-	}
-
-	ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
-		   credits_required, ep->tx_credits);
-
-	if (ep->tx_credits < credits_required) {
-		__skb_queue_head(&ep->tx_queue, skb);
-		return NULL;
-	}
-
-	ep->tx_credits -= credits_required;
-	*credits = credits_required;
-	return skb;
-}
-
-static void ath10k_htc_send_work(struct work_struct *work)
-{
-	struct ath10k_htc_ep *ep = container_of(work,
-					struct ath10k_htc_ep, send_work);
-	struct ath10k_htc *htc = ep->htc;
-	struct sk_buff *skb;
-	u8 credits = 0;
-	int ret;
-
-	while (true) {
-		if (ep->ul_is_polled)
-			ath10k_htc_send_complete_check(ep, 0);
-
-		spin_lock_bh(&htc->tx_lock);
-		if (ep->tx_credit_flow_enabled)
-			skb = ath10k_htc_get_skb_credit_based(htc, ep,
-							      &credits);
-		else
-			skb = __skb_dequeue(&ep->tx_queue);
-		spin_unlock_bh(&htc->tx_lock);
-
-		if (!skb)
-			break;
-
-		ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
-		if (ret == -ENOSR)
-			break;
-	}
-}
-
 int ath10k_htc_send(struct ath10k_htc *htc,
 		    enum ath10k_htc_ep_id eid,
 		    struct sk_buff *skb)
 {
 	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+	int credits = 0;
+	int ret;
 
 	if (htc->ar->state == ATH10K_STATE_WEDGED)
 		return -ECOMM;
@@ -254,18 +133,55 @@
 		return -ENOENT;
 	}
 
+	/* FIXME: This looks ugly, can we fix it? */
 	spin_lock_bh(&htc->tx_lock);
 	if (htc->stopped) {
 		spin_unlock_bh(&htc->tx_lock);
 		return -ESHUTDOWN;
 	}
-
-	__skb_queue_tail(&ep->tx_queue, skb);
-	skb_push(skb, sizeof(struct ath10k_htc_hdr));
 	spin_unlock_bh(&htc->tx_lock);
 
-	queue_work(htc->ar->workqueue, &ep->send_work);
+	skb_push(skb, sizeof(struct ath10k_htc_hdr));
+
+	if (ep->tx_credit_flow_enabled) {
+		credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+		spin_lock_bh(&htc->tx_lock);
+		if (ep->tx_credits < credits) {
+			spin_unlock_bh(&htc->tx_lock);
+			ret = -EAGAIN;
+			goto err_pull;
+		}
+		ep->tx_credits -= credits;
+		spin_unlock_bh(&htc->tx_lock);
+	}
+
+	ath10k_htc_prepare_tx_skb(ep, skb);
+
+	ret = ath10k_skb_map(htc->ar->dev, skb);
+	if (ret)
+		goto err_credits;
+
+	ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
+				   skb->len, skb);
+	if (ret)
+		goto err_unmap;
+
 	return 0;
+
+err_unmap:
+	ath10k_skb_unmap(htc->ar->dev, skb);
+err_credits:
+	if (ep->tx_credit_flow_enabled) {
+		spin_lock_bh(&htc->tx_lock);
+		ep->tx_credits += credits;
+		spin_unlock_bh(&htc->tx_lock);
+
+		if (ep->ep_ops.ep_tx_credits)
+			ep->ep_ops.ep_tx_credits(htc->ar);
+	}
+err_pull:
+	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+	return ret;
 }
 
 static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
@@ -278,39 +194,9 @@
 	ath10k_htc_notify_tx_completion(ep, skb);
 	/* the skb now belongs to the completion handler */
 
-	/* note: when using TX credit flow, the re-checking of queues happens
-	 * when credits flow back from the target.  in the non-TX credit case,
-	 * we recheck after the packet completes */
-	spin_lock_bh(&htc->tx_lock);
-	if (!ep->tx_credit_flow_enabled && !htc->stopped)
-		queue_work(ar->workqueue, &ep->send_work);
-	spin_unlock_bh(&htc->tx_lock);
-
 	return 0;
 }
 
-/* flush endpoint TX queue */
-static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
-					 struct ath10k_htc_ep *ep)
-{
-	struct sk_buff *skb;
-	struct ath10k_skb_cb *skb_cb;
-
-	spin_lock_bh(&htc->tx_lock);
-	for (;;) {
-		skb = __skb_dequeue(&ep->tx_queue);
-		if (!skb)
-			break;
-
-		skb_cb = ATH10K_SKB_CB(skb);
-		skb_cb->is_aborted = true;
-		ath10k_htc_notify_tx_completion(ep, skb);
-	}
-	spin_unlock_bh(&htc->tx_lock);
-
-	cancel_work_sync(&ep->send_work);
-}
-
 /***********/
 /* Receive */
 /***********/
@@ -340,8 +226,11 @@
 		ep = &htc->endpoint[report->eid];
 		ep->tx_credits += report->credits;
 
-		if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
-			queue_work(htc->ar->workqueue, &ep->send_work);
+		if (ep->ep_ops.ep_tx_credits) {
+			spin_unlock_bh(&htc->tx_lock);
+			ep->ep_ops.ep_tx_credits(htc->ar);
+			spin_lock_bh(&htc->tx_lock);
+		}
 	}
 	spin_unlock_bh(&htc->tx_lock);
 }
@@ -599,10 +488,8 @@
 		ep->max_ep_message_len = 0;
 		ep->max_tx_queue_depth = 0;
 		ep->eid = i;
-		skb_queue_head_init(&ep->tx_queue);
 		ep->htc = htc;
 		ep->tx_credit_flow_enabled = true;
-		INIT_WORK(&ep->send_work, ath10k_htc_send_work);
 	}
 }
 
@@ -647,7 +534,7 @@
 	u16 credit_count;
 	u16 credit_size;
 
-	INIT_COMPLETION(htc->ctl_resp);
+	reinit_completion(&htc->ctl_resp);
 
 	status = ath10k_hif_start(htc->ar);
 	if (status) {
@@ -752,8 +639,8 @@
 	tx_alloc = ath10k_htc_get_credit_allocation(htc,
 						    conn_req->service_id);
 	if (!tx_alloc)
-		ath10k_dbg(ATH10K_DBG_HTC,
-			   "HTC Service %s does not allocate target credits\n",
+		ath10k_dbg(ATH10K_DBG_BOOT,
+			   "boot htc service %s does not allocate target credits\n",
 			   htc_service_name(conn_req->service_id));
 
 	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
@@ -772,17 +659,17 @@
 
 	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
 
-	req_msg = &msg->connect_service;
-	req_msg->flags = __cpu_to_le16(flags);
-	req_msg->service_id = __cpu_to_le16(conn_req->service_id);
-
 	/* Only enable credit flow control for WMI ctrl service */
 	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
 		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
 		disable_credit_flow_ctrl = true;
 	}
 
-	INIT_COMPLETION(htc->ctl_resp);
+	req_msg = &msg->connect_service;
+	req_msg->flags = __cpu_to_le16(flags);
+	req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+	reinit_completion(&htc->ctl_resp);
 
 	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
 	if (status) {
@@ -873,19 +760,19 @@
 	if (status)
 		return status;
 
-	ath10k_dbg(ATH10K_DBG_HTC,
-		   "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+	ath10k_dbg(ATH10K_DBG_BOOT,
+		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
 		   htc_service_name(ep->service_id), ep->ul_pipe_id,
 		   ep->dl_pipe_id, ep->eid);
 
-	ath10k_dbg(ATH10K_DBG_HTC,
-		   "EP %d UL polled: %d, DL polled: %d\n",
+	ath10k_dbg(ATH10K_DBG_BOOT,
+		   "boot htc ep %d ul polled %d dl polled %d\n",
 		   ep->eid, ep->ul_is_polled, ep->dl_is_polled);
 
 	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
 		ep->tx_credit_flow_enabled = false;
-		ath10k_dbg(ATH10K_DBG_HTC,
-			   "HTC service: %s eid: %d TX flow control disabled\n",
+		ath10k_dbg(ATH10K_DBG_BOOT,
+			   "boot htc service '%s' eid %d TX flow control disabled\n",
 			   htc_service_name(ep->service_id), assigned_eid);
 	}
 
@@ -945,18 +832,10 @@
  */
 void ath10k_htc_stop(struct ath10k_htc *htc)
 {
-	int i;
-	struct ath10k_htc_ep *ep;
-
 	spin_lock_bh(&htc->tx_lock);
 	htc->stopped = true;
 	spin_unlock_bh(&htc->tx_lock);
 
-	for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
-		ep = &htc->endpoint[i];
-		ath10k_htc_flush_endpoint_tx(htc, ep);
-	}
-
 	ath10k_hif_stop(htc->ar);
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e1dd8c7..4716d33 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -276,6 +276,7 @@
 struct ath10k_htc_ep_ops {
 	void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
 	void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+	void (*ep_tx_credits)(struct ath10k *);
 };
 
 /* service connection information */
@@ -315,15 +316,11 @@
 	int ul_is_polled; /* call HIF to get tx completions */
 	int dl_is_polled; /* call HIF to fetch rx (not implemented) */
 
-	struct sk_buff_head tx_queue;
-
 	u8 seq_no; /* for debugging */
 	int tx_credits;
 	int tx_credit_size;
 	int tx_credits_per_max_message;
 	bool tx_credit_flow_enabled;
-
-	struct work_struct send_work;
 };
 
 struct ath10k_htc_svc_tx_credits {
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 39342c5..5f7eeeb 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,21 +104,16 @@
 
 static int ath10k_htt_verify_version(struct ath10k_htt *htt)
 {
-	ath10k_dbg(ATH10K_DBG_HTT,
-		   "htt target version %d.%d; host version %d.%d\n",
-		    htt->target_version_major,
-		    htt->target_version_minor,
-		    HTT_CURRENT_VERSION_MAJOR,
-		    HTT_CURRENT_VERSION_MINOR);
+	ath10k_info("htt target version %d.%d\n",
+		    htt->target_version_major, htt->target_version_minor);
 
-	if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
-		ath10k_err("htt major versions are incompatible!\n");
+	if (htt->target_version_major != 2 &&
+	    htt->target_version_major != 3) {
+		ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
+			   htt->target_version_major);
 		return -ENOTSUPP;
 	}
 
-	if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
-		ath10k_warn("htt minor version differ but still compatible\n");
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 318be46..1a337e9 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -19,13 +19,11 @@
 #define _HTT_H_
 
 #include <linux/bug.h>
+#include <linux/interrupt.h>
 
 #include "htc.h"
 #include "rx_desc.h"
 
-#define HTT_CURRENT_VERSION_MAJOR	2
-#define HTT_CURRENT_VERSION_MINOR	1
-
 enum htt_dbg_stats_type {
 	HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
 	HTT_DBG_STATS_RX_REORDER    = 1 << 1,
@@ -45,6 +43,9 @@
 	HTT_H2T_MSG_TYPE_SYNC               = 4,
 	HTT_H2T_MSG_TYPE_AGGR_CFG           = 5,
 	HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+	/* This command is used for sending management frames in HTT < 3.0.
+	 * HTT >= 3.0 uses TX_FRM for everything. */
 	HTT_H2T_MSG_TYPE_MGMT_TX            = 7,
 
 	HTT_H2T_NUM_MSGS /* keep this last */
@@ -1268,6 +1269,7 @@
 	/* set if host-fw communication goes haywire
 	 * used to avoid further failures */
 	bool rx_confused;
+	struct tasklet_struct rx_replenish_task;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1308,6 +1310,10 @@
 #define HTT_RX_BUF_SIZE 1920
 #define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
 
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely. */
+#define ATH10K_HTT_MAX_NUM_REFILL 16
+
 /*
  * DMA_MAP expects the buffer to be an integral number of cache lines.
  * Rather than checking the actual cache line size, this code makes a
@@ -1327,6 +1333,7 @@
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 
 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e784c40..90d4f74 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -20,6 +20,7 @@
 #include "htt.h"
 #include "txrx.h"
 #include "debug.h"
+#include "trace.h"
 
 #include <linux/log2.h>
 
@@ -40,6 +41,10 @@
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
 
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+
+
 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
 {
 	int size;
@@ -177,10 +182,27 @@
 
 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
 {
-	int ret, num_to_fill;
+	int ret, num_deficit, num_to_fill;
 
+	/* Refilling the whole RX ring buffer proves to be a bad idea. The
+	 * reason is RX may take up significant amount of CPU cycles and starve
+	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
+	 * with ath10k wlan interface. This ended up with very poor performance
+	 * once CPU the host system was overwhelmed with RX on ath10k.
+	 *
+	 * By limiting the number of refills the replenishing occurs
+	 * progressively. This in turns makes use of the fact tasklets are
+	 * processed in FIFO order. This means actual RX processing can starve
+	 * out refilling. If there's not enough buffers on RX ring FW will not
+	 * report RX until it is refilled with enough buffers. This
+	 * automatically balances load wrt to CPU power.
+	 *
+	 * This probably comes at a cost of lower maximum throughput but
+	 * improves the avarage and stability. */
 	spin_lock_bh(&htt->rx_ring.lock);
-	num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+	num_deficit -= num_to_fill;
 	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
 	if (ret == -ENOMEM) {
 		/*
@@ -191,6 +213,8 @@
 		 */
 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+	} else if (num_deficit > 0) {
+		tasklet_schedule(&htt->rx_replenish_task);
 	}
 	spin_unlock_bh(&htt->rx_ring.lock);
 }
@@ -212,6 +236,7 @@
 	int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 
 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
+	tasklet_kill(&htt->rx_replenish_task);
 
 	while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
 		struct sk_buff *skb =
@@ -441,6 +466,12 @@
 	return msdu_chaining;
 }
 
+static void ath10k_htt_rx_replenish_task(unsigned long ptr)
+{
+	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+	ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
 int ath10k_htt_rx_attach(struct ath10k_htt *htt)
 {
 	dma_addr_t paddr;
@@ -501,7 +532,10 @@
 	if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
 		goto err_fill_ring;
 
-	ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
+		     (unsigned long)htt);
+
+	ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
 		   htt->rx_ring.size, htt->rx_ring.fill_level);
 	return 0;
 
@@ -590,134 +624,144 @@
 	return false;
 }
 
-static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
-			struct htt_rx_info *info)
+struct rfc1042_hdr {
+	u8 llc_dsap;
+	u8 llc_ssap;
+	u8 llc_ctrl;
+	u8 snap_oui[3];
+	__be16 snap_type;
+} __packed;
+
+struct amsdu_subframe_hdr {
+	u8 dst[ETH_ALEN];
+	u8 src[ETH_ALEN];
+	__be16 len;
+} __packed;
+
+static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+				struct htt_rx_info *info)
 {
 	struct htt_rx_desc *rxd;
-	struct sk_buff *amsdu;
 	struct sk_buff *first;
-	struct ieee80211_hdr *hdr;
 	struct sk_buff *skb = info->skb;
 	enum rx_msdu_decap_format fmt;
 	enum htt_rx_mpdu_encrypt_type enctype;
+	struct ieee80211_hdr *hdr;
+	u8 hdr_buf[64], addr[ETH_ALEN], *qos;
 	unsigned int hdr_len;
-	int crypto_len;
 
 	rxd = (void *)skb->data - sizeof(*rxd);
-	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
-			RX_MSDU_START_INFO1_DECAP_FORMAT);
 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 			RX_MPDU_START_INFO0_ENCRYPT_TYPE);
 
-	/* FIXME: No idea what assumptions are safe here. Need logs */
-	if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
-	    (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
-		ath10k_htt_rx_free_msdu_chain(skb->next);
-		skb->next = NULL;
-		return -ENOTSUPP;
-	}
+	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+	memcpy(hdr_buf, hdr, hdr_len);
+	hdr = (struct ieee80211_hdr *)hdr_buf;
 
-	/* A-MSDU max is a little less than 8K */
-	amsdu = dev_alloc_skb(8*1024);
-	if (!amsdu) {
-		ath10k_warn("A-MSDU allocation failed\n");
-		ath10k_htt_rx_free_msdu_chain(skb->next);
-		skb->next = NULL;
-		return -ENOMEM;
-	}
-
-	if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
-		int hdrlen;
-
-		hdr = (void *)rxd->rx_hdr_status;
-		hdrlen = ieee80211_hdrlen(hdr->frame_control);
-		memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
-	}
+	/* FIXME: Hopefully this is a temporary measure.
+	 *
+	 * Reporting individual A-MSDU subframes means each reported frame
+	 * shares the same sequence number.
+	 *
+	 * mac80211 drops frames it recognizes as duplicates, i.e.
+	 * retransmission flag is set and sequence number matches sequence
+	 * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
+	 * "Duplicate detection and recovery")
+	 *
+	 * To avoid frames being dropped clear retransmission flag for all
+	 * received A-MSDUs.
+	 *
+	 * Worst case: actual duplicate frames will be reported but this should
+	 * still be handled gracefully by other OSI/ISO layers. */
+	hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
 
 	first = skb;
 	while (skb) {
 		void *decap_hdr;
-		int decap_len = 0;
+		int len;
 
 		rxd = (void *)skb->data - sizeof(*rxd);
 		fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
-				RX_MSDU_START_INFO1_DECAP_FORMAT);
+			 RX_MSDU_START_INFO1_DECAP_FORMAT);
 		decap_hdr = (void *)rxd->rx_hdr_status;
 
+		skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
+
+		/* First frame in an A-MSDU chain has more decapped data. */
 		if (skb == first) {
-			/* We receive linked A-MSDU subframe skbuffs. The
-			 * first one contains the original 802.11 header (and
-			 * possible crypto param) in the RX descriptor. The
-			 * A-MSDU subframe header follows that. Each part is
-			 * aligned to 4 byte boundary. */
-
-			hdr = (void *)amsdu->data;
-			hdr_len = ieee80211_hdrlen(hdr->frame_control);
-			crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
-
-			decap_hdr += roundup(hdr_len, 4);
-			decap_hdr += roundup(crypto_len, 4);
+			len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+			len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
+					4);
+			decap_hdr += len;
 		}
 
-		if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
-			/* Ethernet2 decap inserts ethernet header in place of
-			 * A-MSDU subframe header. */
-			skb_pull(skb, 6 + 6 + 2);
-
-			/* A-MSDU subframe header length */
-			decap_len += 6 + 6 + 2;
-
-			/* Ethernet2 decap also strips the LLC/SNAP so we need
-			 * to re-insert it. The LLC/SNAP follows A-MSDU
-			 * subframe header. */
-			/* FIXME: Not all LLCs are 8 bytes long */
-			decap_len += 8;
-
-			memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
-		}
-
-		if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
-			/* Native Wifi decap inserts regular 802.11 header
-			 * in place of A-MSDU subframe header. */
+		switch (fmt) {
+		case RX_MSDU_DECAP_RAW:
+			/* remove trailing FCS */
+			skb_trim(skb, skb->len - FCS_LEN);
+			break;
+		case RX_MSDU_DECAP_NATIVE_WIFI:
+			/* pull decapped header and copy DA */
 			hdr = (struct ieee80211_hdr *)skb->data;
-			skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+			hdr_len = ieee80211_hdrlen(hdr->frame_control);
+			memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
+			skb_pull(skb, hdr_len);
 
-			/* A-MSDU subframe header length */
-			decap_len += 6 + 6 + 2;
+			/* push original 802.11 header */
+			hdr = (struct ieee80211_hdr *)hdr_buf;
+			hdr_len = ieee80211_hdrlen(hdr->frame_control);
+			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
 
-			memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+			/* original A-MSDU header has the bit set but we're
+			 * not including A-MSDU subframe header */
+			hdr = (struct ieee80211_hdr *)skb->data;
+			qos = ieee80211_get_qos_ctl(hdr);
+			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+			/* original 802.11 header has a different DA */
+			memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
+			break;
+		case RX_MSDU_DECAP_ETHERNET2_DIX:
+			/* strip ethernet header and insert decapped 802.11
+			 * header, amsdu subframe header and rfc1042 header */
+
+			len = 0;
+			len += sizeof(struct rfc1042_hdr);
+			len += sizeof(struct amsdu_subframe_hdr);
+
+			skb_pull(skb, sizeof(struct ethhdr));
+			memcpy(skb_push(skb, len), decap_hdr, len);
+			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+			break;
+		case RX_MSDU_DECAP_8023_SNAP_LLC:
+			/* insert decapped 802.11 header making a singly
+			 * A-MSDU */
+			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+			break;
 		}
 
-		if (fmt == RX_MSDU_DECAP_RAW)
-			skb_trim(skb, skb->len - 4); /* remove FCS */
-
-		memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
-
-		/* A-MSDU subframes are padded to 4bytes
-		 * but relative to first subframe, not the whole MPDU */
-		if (skb->next && ((decap_len + skb->len) & 3)) {
-			int padlen = 4 - ((decap_len + skb->len) & 3);
-			memset(skb_put(amsdu, padlen), 0, padlen);
-		}
-
+		info->skb = skb;
+		info->encrypt_type = enctype;
 		skb = skb->next;
+		info->skb->next = NULL;
+
+		ath10k_process_rx(htt->ar, info);
 	}
 
-	info->skb = amsdu;
-	info->encrypt_type = enctype;
-
-	ath10k_htt_rx_free_msdu_chain(first);
-
-	return 0;
+	/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
+	 * monitor interface active for sniffing purposes. */
 }
 
-static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
 {
 	struct sk_buff *skb = info->skb;
 	struct htt_rx_desc *rxd;
 	struct ieee80211_hdr *hdr;
 	enum rx_msdu_decap_format fmt;
 	enum htt_rx_mpdu_encrypt_type enctype;
+	int hdr_len;
+	void *rfc1042;
 
 	/* This shouldn't happen. If it does than it may be a FW bug. */
 	if (skb->next) {
@@ -731,49 +775,53 @@
 			RX_MSDU_START_INFO1_DECAP_FORMAT);
 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 			RX_MPDU_START_INFO0_ENCRYPT_TYPE);
-	hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+	skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
 
 	switch (fmt) {
 	case RX_MSDU_DECAP_RAW:
 		/* remove trailing FCS */
-		skb_trim(skb, skb->len - 4);
+		skb_trim(skb, skb->len - FCS_LEN);
 		break;
 	case RX_MSDU_DECAP_NATIVE_WIFI:
-		/* nothing to do here */
+		/* Pull decapped header */
+		hdr = (struct ieee80211_hdr *)skb->data;
+		hdr_len = ieee80211_hdrlen(hdr->frame_control);
+		skb_pull(skb, hdr_len);
+
+		/* Push original header */
+		hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+		hdr_len = ieee80211_hdrlen(hdr->frame_control);
+		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
 		break;
 	case RX_MSDU_DECAP_ETHERNET2_DIX:
-		/* macaddr[6] + macaddr[6] + ethertype[2] */
-		skb_pull(skb, 6 + 6 + 2);
+		/* strip ethernet header and insert decapped 802.11 header and
+		 * rfc1042 header */
+
+		rfc1042 = hdr;
+		rfc1042 += roundup(hdr_len, 4);
+		rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
+
+		skb_pull(skb, sizeof(struct ethhdr));
+		memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
+		       rfc1042, sizeof(struct rfc1042_hdr));
+		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
 		break;
 	case RX_MSDU_DECAP_8023_SNAP_LLC:
-		/* macaddr[6] + macaddr[6] + len[2] */
-		/* we don't need this for non-A-MSDU */
-		skb_pull(skb, 6 + 6 + 2);
+		/* remove A-MSDU subframe header and insert
+		 * decapped 802.11 header. rfc1042 header is already there */
+
+		skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
+		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
 		break;
 	}
 
-	if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
-		void *llc;
-		int llclen;
-
-		llclen = 8;
-		llc  = hdr;
-		llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
-		llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
-
-		skb_push(skb, llclen);
-		memcpy(skb->data, llc, llclen);
-	}
-
-	if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
-		int len = ieee80211_hdrlen(hdr->frame_control);
-		skb_push(skb, len);
-		memcpy(skb->data, hdr, len);
-	}
-
 	info->skb = skb;
 	info->encrypt_type = enctype;
-	return 0;
+
+	ath10k_process_rx(htt->ar, info);
 }
 
 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
@@ -845,8 +893,6 @@
 	int fw_desc_len;
 	u8 *fw_desc;
 	int i, j;
-	int ret;
-	int ip_summed;
 
 	memset(&info, 0, sizeof(info));
 
@@ -921,11 +967,6 @@
 				continue;
 			}
 
-			/* The skb is not yet processed and it may be
-			 * reallocated. Since the offload is in the original
-			 * skb extract the checksum now and assign it later */
-			ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
-
 			info.skb     = msdu_head;
 			info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
 			info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -938,28 +979,13 @@
 			hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
 
 			if (ath10k_htt_rx_hdr_is_amsdu(hdr))
-				ret = ath10k_htt_rx_amsdu(htt, &info);
+				ath10k_htt_rx_amsdu(htt, &info);
 			else
-				ret = ath10k_htt_rx_msdu(htt, &info);
-
-			if (ret && !info.fcs_err) {
-				ath10k_warn("error processing msdus %d\n", ret);
-				dev_kfree_skb_any(info.skb);
-				continue;
-			}
-
-			if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
-				ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
-
-			info.skb->ip_summed = ip_summed;
-
-			ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
-					info.skb->data, info.skb->len);
-			ath10k_process_rx(htt->ar, &info);
+				ath10k_htt_rx_msdu(htt, &info);
 		}
 	}
 
-	ath10k_htt_rx_msdu_buff_replenish(htt);
+	tasklet_schedule(&htt->rx_replenish_task);
 }
 
 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
@@ -1131,7 +1157,7 @@
 			break;
 		}
 
-		ath10k_txrx_tx_completed(htt, &tx_done);
+		ath10k_txrx_tx_unref(htt, &tx_done);
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
@@ -1165,7 +1191,7 @@
 		for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
 			msdu_id = resp->data_tx_completion.msdus[i];
 			tx_done.msdu_id = __le16_to_cpu(msdu_id);
-			ath10k_txrx_tx_completed(htt, &tx_done);
+			ath10k_txrx_tx_unref(htt, &tx_done);
 		}
 		break;
 	}
@@ -1190,8 +1216,10 @@
 	case HTT_T2H_MSG_TYPE_TEST:
 		/* FIX THIS */
 		break;
-	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
 	case HTT_T2H_MSG_TYPE_STATS_CONF:
+		trace_ath10k_htt_stats(skb->data, skb->len);
+		break;
+	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
 	case HTT_T2H_MSG_TYPE_RX_DELBA:
 	case HTT_T2H_MSG_TYPE_RX_FLUSH:
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 656c254..d9335e9 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -96,7 +96,7 @@
 	htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
 								   pipe);
 
-	ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+	ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
 		   htt->max_num_pending_tx);
 
 	htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@@ -117,7 +117,7 @@
 
 static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
 {
-	struct sk_buff *txdesc;
+	struct htt_tx_done tx_done = {0};
 	int msdu_id;
 
 	/* No locks needed. Called after communication with the device has
@@ -127,18 +127,13 @@
 		if (!test_bit(msdu_id, htt->used_msdu_ids))
 			continue;
 
-		txdesc = htt->pending_tx[msdu_id];
-		if (!txdesc)
-			continue;
-
 		ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
 			   msdu_id);
 
-		if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
-			ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+		tx_done.discard = 1;
+		tx_done.msdu_id = msdu_id;
 
-		ATH10K_SKB_CB(txdesc)->htt.discard = true;
-		ath10k_txrx_tx_unref(htt, txdesc);
+		ath10k_txrx_tx_unref(htt, &tx_done);
 	}
 }
 
@@ -152,26 +147,7 @@
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 {
-	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
-	struct ath10k_htt *htt = &ar->htt;
-
-	if (skb_cb->htt.is_conf) {
-		dev_kfree_skb_any(skb);
-		return;
-	}
-
-	if (skb_cb->is_aborted) {
-		skb_cb->htt.discard = true;
-
-		/* if the skbuff is aborted we need to make sure we'll free up
-		 * the tx resources, we can't simply run tx_unref() 2 times
-		 * because if htt tx completion came in earlier we'd access
-		 * unallocated memory */
-		if (skb_cb->htt.refcount > 1)
-			skb_cb->htt.refcount = 1;
-	}
-
-	ath10k_txrx_tx_unref(htt, skb);
+	dev_kfree_skb_any(skb);
 }
 
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
@@ -192,10 +168,48 @@
 	cmd = (struct htt_cmd *)skb->data;
 	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
 
-	ATH10K_SKB_CB(skb)->htt.is_conf = true;
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+	struct htt_stats_req *req;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	int len = 0, ret;
+
+	len += sizeof(cmd->hdr);
+	len += sizeof(cmd->stats_req);
+
+	skb = ath10k_htc_alloc_skb(len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+	req = &cmd->stats_req;
+
+	memset(req, 0, sizeof(*req));
+
+	/* currently we support only max 8 bit masks so no need to worry
+	 * about endian support */
+	req->upload_types[0] = mask;
+	req->reset_types[0] = mask;
+	req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+	req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+	req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
 
 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 	if (ret) {
+		ath10k_warn("failed to send htt type stats request: %d", ret);
 		dev_kfree_skb_any(skb);
 		return ret;
 	}
@@ -279,8 +293,6 @@
 
 #undef desc_offset
 
-	ATH10K_SKB_CB(skb)->htt.is_conf = true;
-
 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 	if (ret) {
 		dev_kfree_skb_any(skb);
@@ -293,10 +305,10 @@
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 {
 	struct device *dev = htt->ar->dev;
-	struct ath10k_skb_cb *skb_cb;
 	struct sk_buff *txdesc = NULL;
 	struct htt_cmd *cmd;
-	u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+	u8 vdev_id = skb_cb->vdev_id;
 	int len = 0;
 	int msdu_id = -1;
 	int res;
@@ -304,30 +316,30 @@
 
 	res = ath10k_htt_tx_inc_pending(htt);
 	if (res)
-		return res;
+		goto err;
 
 	len += sizeof(cmd->hdr);
 	len += sizeof(cmd->mgmt_tx);
 
+	spin_lock_bh(&htt->tx_lock);
+	res = ath10k_htt_tx_alloc_msdu_id(htt);
+	if (res < 0) {
+		spin_unlock_bh(&htt->tx_lock);
+		goto err_tx_dec;
+	}
+	msdu_id = res;
+	htt->pending_tx[msdu_id] = msdu;
+	spin_unlock_bh(&htt->tx_lock);
+
 	txdesc = ath10k_htc_alloc_skb(len);
 	if (!txdesc) {
 		res = -ENOMEM;
-		goto err;
+		goto err_free_msdu_id;
 	}
 
-	spin_lock_bh(&htt->tx_lock);
-	msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
-	if (msdu_id < 0) {
-		spin_unlock_bh(&htt->tx_lock);
-		res = msdu_id;
-		goto err;
-	}
-	htt->pending_tx[msdu_id] = txdesc;
-	spin_unlock_bh(&htt->tx_lock);
-
 	res = ath10k_skb_map(dev, msdu);
 	if (res)
-		goto err;
+		goto err_free_txdesc;
 
 	skb_put(txdesc, len);
 	cmd = (struct htt_cmd *)txdesc->data;
@@ -339,31 +351,27 @@
 	memcpy(cmd->mgmt_tx.hdr, msdu->data,
 	       min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
 
-	/* refcount is decremented by HTC and HTT completions until it reaches
-	 * zero and is freed */
-	skb_cb = ATH10K_SKB_CB(txdesc);
-	skb_cb->htt.msdu_id = msdu_id;
-	skb_cb->htt.refcount = 2;
-	skb_cb->htt.msdu = msdu;
+	skb_cb->htt.frag_len = 0;
+	skb_cb->htt.pad_len = 0;
 
 	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
 	if (res)
-		goto err;
+		goto err_unmap_msdu;
 
 	return 0;
 
-err:
+err_unmap_msdu:
 	ath10k_skb_unmap(dev, msdu);
-
-	if (txdesc)
-		dev_kfree_skb_any(txdesc);
-	if (msdu_id >= 0) {
-		spin_lock_bh(&htt->tx_lock);
-		htt->pending_tx[msdu_id] = NULL;
-		ath10k_htt_tx_free_msdu_id(htt, msdu_id);
-		spin_unlock_bh(&htt->tx_lock);
-	}
+err_free_txdesc:
+	dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+	spin_lock_bh(&htt->tx_lock);
+	htt->pending_tx[msdu_id] = NULL;
+	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+	spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
 	ath10k_htt_tx_dec_pending(htt);
+err:
 	return res;
 }
 
@@ -373,13 +381,12 @@
 	struct htt_cmd *cmd;
 	struct htt_data_tx_desc_frag *tx_frags;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
-	struct ath10k_skb_cb *skb_cb;
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
 	struct sk_buff *txdesc = NULL;
-	struct sk_buff *txfrag = NULL;
-	u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+	bool use_frags;
+	u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
 	u8 tid;
-	int prefetch_len, desc_len, frag_len;
-	dma_addr_t frags_paddr;
+	int prefetch_len, desc_len;
 	int msdu_id = -1;
 	int res;
 	u8 flags0;
@@ -387,69 +394,82 @@
 
 	res = ath10k_htt_tx_inc_pending(htt);
 	if (res)
-		return res;
+		goto err;
+
+	spin_lock_bh(&htt->tx_lock);
+	res = ath10k_htt_tx_alloc_msdu_id(htt);
+	if (res < 0) {
+		spin_unlock_bh(&htt->tx_lock);
+		goto err_tx_dec;
+	}
+	msdu_id = res;
+	htt->pending_tx[msdu_id] = msdu;
+	spin_unlock_bh(&htt->tx_lock);
 
 	prefetch_len = min(htt->prefetch_len, msdu->len);
 	prefetch_len = roundup(prefetch_len, 4);
 
 	desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
-	frag_len = sizeof(*tx_frags) * 2;
 
 	txdesc = ath10k_htc_alloc_skb(desc_len);
 	if (!txdesc) {
 		res = -ENOMEM;
-		goto err;
+		goto err_free_msdu_id;
 	}
 
-	txfrag = dev_alloc_skb(frag_len);
-	if (!txfrag) {
-		res = -ENOMEM;
-		goto err;
-	}
+	/* Since HTT 3.0 there is no separate mgmt tx command. However in case
+	 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
+	 * fragment list host driver specifies directly frame pointer. */
+	use_frags = htt->target_version_major < 3 ||
+		    !ieee80211_is_mgmt(hdr->frame_control);
 
 	if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
 		ath10k_warn("htt alignment check failed. dropping packet.\n");
 		res = -EIO;
-		goto err;
+		goto err_free_txdesc;
 	}
 
-	spin_lock_bh(&htt->tx_lock);
-	msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
-	if (msdu_id < 0) {
-		spin_unlock_bh(&htt->tx_lock);
-		res = msdu_id;
-		goto err;
+	if (use_frags) {
+		skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
+		skb_cb->htt.pad_len = (unsigned long)msdu->data -
+				      round_down((unsigned long)msdu->data, 4);
+
+		skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+	} else {
+		skb_cb->htt.frag_len = 0;
+		skb_cb->htt.pad_len = 0;
 	}
-	htt->pending_tx[msdu_id] = txdesc;
-	spin_unlock_bh(&htt->tx_lock);
 
 	res = ath10k_skb_map(dev, msdu);
 	if (res)
-		goto err;
+		goto err_pull_txfrag;
 
-	/* tx fragment list must be terminated with zero-entry */
-	skb_put(txfrag, frag_len);
-	tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
-	tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
-	tx_frags[0].len   = __cpu_to_le32(msdu->len);
-	tx_frags[1].paddr = __cpu_to_le32(0);
-	tx_frags[1].len   = __cpu_to_le32(0);
+	if (use_frags) {
+		dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
+					DMA_TO_DEVICE);
 
-	res = ath10k_skb_map(dev, txfrag);
-	if (res)
-		goto err;
+		/* tx fragment list must be terminated with zero-entry */
+		tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
+		tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
+						  skb_cb->htt.frag_len +
+						  skb_cb->htt.pad_len);
+		tx_frags[0].len   = __cpu_to_le32(msdu->len -
+						  skb_cb->htt.frag_len -
+						  skb_cb->htt.pad_len);
+		tx_frags[1].paddr = __cpu_to_le32(0);
+		tx_frags[1].len   = __cpu_to_le32(0);
 
-	ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
-		   (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+		dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
+					   DMA_TO_DEVICE);
+	}
+
+	ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
 		   (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
-	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
-			txfrag->data, frag_len);
 	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
 			msdu->data, msdu->len);
 
 	skb_put(txdesc, desc_len);
 	cmd = (struct htt_cmd *)txdesc->data;
-	memset(cmd, 0, desc_len);
 
 	tid = ATH10K_SKB_CB(msdu)->htt.tid;
 
@@ -459,8 +479,13 @@
 	if (!ieee80211_has_protected(hdr->frame_control))
 		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 	flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
-	flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
-		     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+	if (use_frags)
+		flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+	else
+		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 
 	flags1  = 0;
 	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -468,45 +493,37 @@
 	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
 	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
 
-	frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
-
 	cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
 	cmd->data_tx.flags0      = flags0;
 	cmd->data_tx.flags1      = __cpu_to_le16(flags1);
-	cmd->data_tx.len         = __cpu_to_le16(msdu->len);
+	cmd->data_tx.len         = __cpu_to_le16(msdu->len -
+						 skb_cb->htt.frag_len -
+						 skb_cb->htt.pad_len);
 	cmd->data_tx.id          = __cpu_to_le16(msdu_id);
-	cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+	cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
 	cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);
 
-	memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
-
-	/* refcount is decremented by HTC and HTT completions until it reaches
-	 * zero and is freed */
-	skb_cb = ATH10K_SKB_CB(txdesc);
-	skb_cb->htt.msdu_id = msdu_id;
-	skb_cb->htt.refcount = 2;
-	skb_cb->htt.txfrag = txfrag;
-	skb_cb->htt.msdu = msdu;
+	memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
 
 	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
 	if (res)
-		goto err;
+		goto err_unmap_msdu;
 
 	return 0;
-err:
-	if (txfrag)
-		ath10k_skb_unmap(dev, txfrag);
-	if (txdesc)
-		dev_kfree_skb_any(txdesc);
-	if (txfrag)
-		dev_kfree_skb_any(txfrag);
-	if (msdu_id >= 0) {
-		spin_lock_bh(&htt->tx_lock);
-		htt->pending_tx[msdu_id] = NULL;
-		ath10k_htt_tx_free_msdu_id(htt, msdu_id);
-		spin_unlock_bh(&htt->tx_lock);
-	}
-	ath10k_htt_tx_dec_pending(htt);
+
+err_unmap_msdu:
 	ath10k_skb_unmap(dev, msdu);
+err_pull_txfrag:
+	skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+err_free_txdesc:
+	dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+	spin_lock_bh(&htt->tx_lock);
+	htt->pending_tx[msdu_id] = NULL;
+	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+	spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+	ath10k_htt_tx_dec_pending(htt);
+err:
 	return res;
 }
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 44ed5af..8aeb46d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -20,28 +20,37 @@
 
 #include "targaddrs.h"
 
-/* Supported FW version */
-#define SUPPORTED_FW_MAJOR	1
-#define SUPPORTED_FW_MINOR	0
-#define SUPPORTED_FW_RELEASE	0
-#define SUPPORTED_FW_BUILD	629
-
-/* QCA988X 1.0 definitions */
-#define QCA988X_HW_1_0_VERSION		0x4000002c
-#define QCA988X_HW_1_0_FW_DIR		"ath10k/QCA988X/hw1.0"
-#define QCA988X_HW_1_0_FW_FILE		"firmware.bin"
-#define QCA988X_HW_1_0_OTP_FILE		"otp.bin"
-#define QCA988X_HW_1_0_BOARD_DATA_FILE	"board.bin"
-#define QCA988X_HW_1_0_PATCH_LOAD_ADDR	0x1234
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV	0x0
 
 /* QCA988X 2.0 definitions */
 #define QCA988X_HW_2_0_VERSION		0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV	0x2
 #define QCA988X_HW_2_0_FW_DIR		"ath10k/QCA988X/hw2.0"
 #define QCA988X_HW_2_0_FW_FILE		"firmware.bin"
 #define QCA988X_HW_2_0_OTP_FILE		"otp.bin"
 #define QCA988X_HW_2_0_BOARD_DATA_FILE	"board.bin"
 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR	0x1234
 
+#define ATH10K_FW_API2_FILE		"firmware-2.bin"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC               "QCA-ATH10K"
+
+struct ath10k_fw_ie {
+	__le32 id;
+	__le32 len;
+	u8 data[0];
+};
+
+enum ath10k_fw_ie_type {
+	ATH10K_FW_IE_FW_VERSION = 0,
+	ATH10K_FW_IE_TIMESTAMP = 1,
+	ATH10K_FW_IE_FEATURES = 2,
+	ATH10K_FW_IE_FW_IMAGE = 3,
+	ATH10K_FW_IE_OTP_IMAGE = 4,
+};
+
 /* Known pecularities:
  *  - current FW doesn't support raw rx mode (last tested v599)
  *  - current FW dumps upon raw tx mode (last tested v599)
@@ -53,6 +62,9 @@
 	ATH10K_HW_TXRX_RAW = 0,
 	ATH10K_HW_TXRX_NATIVE_WIFI = 1,
 	ATH10K_HW_TXRX_ETHERNET = 2,
+
+	/* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+	ATH10K_HW_TXRX_MGMT = 3,
 };
 
 enum ath10k_mcast2ucast_mode {
@@ -60,6 +72,7 @@
 	ATH10K_MCAST2UCAST_ENABLED = 1,
 };
 
+/* Target specific defines for MAIN firmware */
 #define TARGET_NUM_VDEVS			8
 #define TARGET_NUM_PEER_AST			2
 #define TARGET_NUM_WDS_ENTRIES			32
@@ -75,7 +88,11 @@
 #define TARGET_RX_CHAIN_MASK			(BIT(0) | BIT(1) | BIT(2))
 #define TARGET_RX_TIMEOUT_LO_PRI		100
 #define TARGET_RX_TIMEOUT_HI_PRI		40
-#define TARGET_RX_DECAP_MODE			ATH10K_HW_TXRX_ETHERNET
+
+/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
+ * avoid a very expensive re-alignment in mac80211. */
+#define TARGET_RX_DECAP_MODE			ATH10K_HW_TXRX_NATIVE_WIFI
+
 #define TARGET_SCAN_MAX_PENDING_REQS		4
 #define TARGET_BMISS_OFFLOAD_MAX_VDEV		3
 #define TARGET_ROAM_OFFLOAD_MAX_VDEV		3
@@ -90,6 +107,36 @@
 #define TARGET_NUM_MSDU_DESC			(1024 + 400)
 #define TARGET_MAX_FRAG_ENTRIES			0
 
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS			16
+#define TARGET_10X_NUM_PEER_AST			2
+#define TARGET_10X_NUM_WDS_ENTRIES		32
+#define TARGET_10X_DMA_BURST_SIZE		0
+#define TARGET_10X_MAC_AGGR_DELIM		0
+#define TARGET_10X_AST_SKID_LIMIT		16
+#define TARGET_10X_NUM_PEERS			(128 + (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS		0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS	0
+#define TARGET_10X_NUM_PEER_KEYS		2
+#define TARGET_10X_NUM_TIDS			256
+#define TARGET_10X_TX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI		100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI		40
+#define TARGET_10X_RX_DECAP_MODE		ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10X_SCAN_MAX_PENDING_REQS	4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV	2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV	2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES	8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV		3
+#define TARGET_10X_NUM_MCAST_GROUPS		0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS	0
+#define TARGET_10X_MCAST2UCAST_MODE		ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE		1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG			0
+#define TARGET_10X_NUM_MSDU_DESC		(1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES		0
 
 /* Number of Copy Engines supported */
 #define CE_COUNT 8
@@ -169,6 +216,10 @@
 #define SOC_LPO_CAL_ENABLE_LSB			20
 #define SOC_LPO_CAL_ENABLE_MASK			0x00100000
 
+#define SOC_CHIP_ID_ADDRESS			0x000000ec
+#define SOC_CHIP_ID_REV_LSB			8
+#define SOC_CHIP_ID_REV_MASK			0x00000f00
+
 #define WLAN_RESET_CONTROL_COLD_RST_MASK	0x00000008
 #define WLAN_RESET_CONTROL_WARM_RST_MASK	0x00000004
 #define WLAN_SYSTEM_SLEEP_DISABLE_LSB		0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index cf2ba4d..97ac8c8 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -92,7 +92,7 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	INIT_COMPLETION(ar->install_key_done);
+	reinit_completion(&ar->install_key_done);
 
 	ret = ath10k_send_key(arvif, key, cmd, macaddr);
 	if (ret)
@@ -334,25 +334,29 @@
 
 static int  ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
 {
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param;
+
 	if (value != 0xFFFFFFFF)
 		value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
 			      ATH10K_RTS_MAX);
 
-	return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
-					 WMI_VDEV_PARAM_RTS_THRESHOLD,
-					 value);
+	vdev_param = ar->wmi.vdev_param->rts_threshold;
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
 static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
 {
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param;
+
 	if (value != 0xFFFFFFFF)
 		value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
 				ATH10K_FRAGMT_THRESHOLD_MIN,
 				ATH10K_FRAGMT_THRESHOLD_MAX);
 
-	return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
-					 WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
-					 value);
+	vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
@@ -434,7 +438,7 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	INIT_COMPLETION(ar->vdev_setup_done);
+	reinit_completion(&ar->vdev_setup_done);
 
 	arg.vdev_id = arvif->vdev_id;
 	arg.dtim_period = arvif->dtim_period;
@@ -460,6 +464,11 @@
 		arg.ssid_len = arvif->vif->bss_conf.ssid_len;
 	}
 
+	ath10k_dbg(ATH10K_DBG_MAC,
+		   "mac vdev %d start center_freq %d phymode %s\n",
+		   arg.vdev_id, arg.channel.freq,
+		   ath10k_wmi_phymode_str(arg.channel.mode));
+
 	ret = ath10k_wmi_vdev_start(ar, &arg);
 	if (ret) {
 		ath10k_warn("WMI vdev start failed: ret %d\n", ret);
@@ -482,7 +491,7 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	INIT_COMPLETION(ar->vdev_setup_done);
+	reinit_completion(&ar->vdev_setup_done);
 
 	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
 	if (ret) {
@@ -503,13 +512,10 @@
 {
 	struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
 	struct wmi_vdev_start_request_arg arg = {};
-	enum nl80211_channel_type type;
 	int ret = 0;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
-
 	arg.vdev_id = vdev_id;
 	arg.channel.freq = channel->center_freq;
 	arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -560,12 +566,9 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	/* For some reasons, ath10k_wmi_vdev_down() here couse
-	 * often ath10k_wmi_vdev_stop() to fail. Next we could
-	 * not run monitor vdev and driver reload
-	 * required. Don't see such problems we skip
-	 * ath10k_wmi_vdev_down() here.
-	 */
+	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+	if (ret)
+		ath10k_warn("Monitor vdev down failed: %d\n", ret);
 
 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
 	if (ret)
@@ -607,7 +610,7 @@
 		goto vdev_fail;
 	}
 
-	ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+	ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
 		   ar->monitor_vdev_id);
 
 	ar->monitor_present = true;
@@ -639,7 +642,7 @@
 	ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
 	ar->monitor_present = false;
 
-	ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+	ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
 		   ar->monitor_vdev_id);
 	return ret;
 }
@@ -668,13 +671,14 @@
 			    arvif->vdev_id);
 		return;
 	}
-	ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
 }
 
 static void ath10k_control_ibss(struct ath10k_vif *arvif,
 				struct ieee80211_bss_conf *info,
 				const u8 self_peer[ETH_ALEN])
 {
+	u32 vdev_param;
 	int ret = 0;
 
 	lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -708,8 +712,8 @@
 		return;
 	}
 
-	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
-					WMI_VDEV_PARAM_ATIM_WINDOW,
+	vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
 					ATH10K_DEFAULT_ATIM);
 	if (ret)
 		ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
@@ -719,47 +723,45 @@
 /*
  * Review this when mac80211 gains per-interface powersave support.
  */
-static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 {
-	struct ath10k_generic_iter *ar_iter = data;
-	struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
-	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_conf *conf = &ar->hw->conf;
 	enum wmi_sta_powersave_param param;
 	enum wmi_sta_ps_mode psmode;
 	int ret;
 
 	lockdep_assert_held(&arvif->ar->conf_mutex);
 
-	if (vif->type != NL80211_IFTYPE_STATION)
-		return;
+	if (arvif->vif->type != NL80211_IFTYPE_STATION)
+		return 0;
 
 	if (conf->flags & IEEE80211_CONF_PS) {
 		psmode = WMI_STA_PS_MODE_ENABLED;
 		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
 
-		ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
-						  arvif->vdev_id,
-						  param,
+		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
 						  conf->dynamic_ps_timeout);
 		if (ret) {
 			ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
 				    arvif->vdev_id);
-			return;
+			return ret;
 		}
-
-		ar_iter->ret = ret;
 	} else {
 		psmode = WMI_STA_PS_MODE_DISABLED;
 	}
 
-	ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
-					     psmode);
-	if (ar_iter->ret)
+	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+		   arvif->vdev_id, psmode ? "enable" : "disable");
+
+	ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+	if (ret) {
 		ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
 			    psmode, arvif->vdev_id);
-	else
-		ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
-			   psmode, arvif->vdev_id);
+		return ret;
+	}
+
+	return 0;
 }
 
 /**********************/
@@ -949,7 +951,8 @@
 	arg->peer_ht_rates.num_rates = n;
 	arg->peer_num_spatial_streams = max((n+7) / 8, 1);
 
-	ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+	ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+		   arg->addr,
 		   arg->peer_ht_rates.num_rates,
 		   arg->peer_num_spatial_streams);
 }
@@ -969,11 +972,11 @@
 		arg->peer_flags |= WMI_PEER_QOS;
 
 	if (sta->wme && sta->uapsd_queues) {
-		ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+		ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
 			   sta->uapsd_queues, sta->max_sp);
 
 		arg->peer_flags |= WMI_PEER_APSD;
-		arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+		arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
 
 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
 			uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
@@ -1028,14 +1031,27 @@
 				    struct wmi_peer_assoc_complete_arg *arg)
 {
 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	u8 ampdu_factor;
 
 	if (!vht_cap->vht_supported)
 		return;
 
 	arg->peer_flags |= WMI_PEER_VHT;
-
 	arg->peer_vht_caps = vht_cap->cap;
 
+
+	ampdu_factor = (vht_cap->cap &
+			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+	/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+	 * zero in VHT IE. Using it would result in degraded throughput.
+	 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+	 * it if VHT max_mpdu is smaller. */
+	arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+					ampdu_factor)) - 1);
+
 	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		arg->peer_flags |= WMI_PEER_80MHZ;
 
@@ -1048,7 +1064,8 @@
 	arg->peer_vht_rates.tx_mcs_set =
 		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
 
-	ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+	ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
 }
 
 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -1076,8 +1093,6 @@
 {
 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
 
-	/* FIXME: add VHT */
-
 	switch (ar->hw->conf.chandef.chan->band) {
 	case IEEE80211_BAND_2GHZ:
 		if (sta->ht_cap.ht_supported) {
@@ -1091,7 +1106,17 @@
 
 		break;
 	case IEEE80211_BAND_5GHZ:
-		if (sta->ht_cap.ht_supported) {
+		/*
+		 * Check VHT first.
+		 */
+		if (sta->vht_cap.vht_supported) {
+			if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+				phymode = MODE_11AC_VHT80;
+			else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+				phymode = MODE_11AC_VHT40;
+			else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+				phymode = MODE_11AC_VHT20;
+		} else if (sta->ht_cap.ht_supported) {
 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11NA_HT40;
 			else
@@ -1105,30 +1130,32 @@
 		break;
 	}
 
+	ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+		   sta->addr, ath10k_wmi_phymode_str(phymode));
+
 	arg->peer_phymode = phymode;
 	WARN_ON(phymode == MODE_UNKNOWN);
 }
 
-static int ath10k_peer_assoc(struct ath10k *ar,
-			     struct ath10k_vif *arvif,
-			     struct ieee80211_sta *sta,
-			     struct ieee80211_bss_conf *bss_conf)
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+				     struct ath10k_vif *arvif,
+				     struct ieee80211_sta *sta,
+				     struct ieee80211_bss_conf *bss_conf,
+				     struct wmi_peer_assoc_complete_arg *arg)
 {
-	struct wmi_peer_assoc_complete_arg arg;
-
 	lockdep_assert_held(&ar->conf_mutex);
 
-	memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
+	memset(arg, 0, sizeof(*arg));
 
-	ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
-	ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
-	ath10k_peer_assoc_h_rates(ar, sta, &arg);
-	ath10k_peer_assoc_h_ht(ar, sta, &arg);
-	ath10k_peer_assoc_h_vht(ar, sta, &arg);
-	ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
-	ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
+	ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
+	ath10k_peer_assoc_h_crypto(ar, arvif, arg);
+	ath10k_peer_assoc_h_rates(ar, sta, arg);
+	ath10k_peer_assoc_h_ht(ar, sta, arg);
+	ath10k_peer_assoc_h_vht(ar, sta, arg);
+	ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
+	ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
 
-	return ath10k_wmi_peer_assoc(ar, &arg);
+	return 0;
 }
 
 /* can be called only in mac80211 callbacks due to `key_count` usage */
@@ -1138,6 +1165,7 @@
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct wmi_peer_assoc_complete_arg peer_arg;
 	struct ieee80211_sta *ap_sta;
 	int ret;
 
@@ -1153,24 +1181,33 @@
 		return;
 	}
 
-	ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
+	ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
+					bss_conf, &peer_arg);
 	if (ret) {
-		ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
+		ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
+			    bss_conf->bssid, ret);
 		rcu_read_unlock();
 		return;
 	}
 
 	rcu_read_unlock();
 
+	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+	if (ret) {
+		ath10k_warn("Peer assoc failed for %pM\n: %d",
+			    bss_conf->bssid, ret);
+		return;
+	}
+
+	ath10k_dbg(ATH10K_DBG_MAC,
+		   "mac vdev %d up (associated) bssid %pM aid %d\n",
+		   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
 	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
 				 bss_conf->bssid);
 	if (ret)
 		ath10k_warn("VDEV: %d up failed: ret %d\n",
 			    arvif->vdev_id, ret);
-	else
-		ath10k_dbg(ATH10K_DBG_MAC,
-			   "VDEV: %d associated, BSSID: %pM, AID: %d\n",
-			   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
 }
 
 /*
@@ -1191,10 +1228,11 @@
 	 * No idea why this happens, even though VDEV-DOWN is supposed
 	 * to be analogous to link down, so just stop the VDEV.
 	 */
+	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
+		   arvif->vdev_id);
+
+	/* FIXME: check return value */
 	ret = ath10k_vdev_stop(arvif);
-	if (!ret)
-		ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
-			   arvif->vdev_id);
 
 	/*
 	 * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
@@ -1203,26 +1241,33 @@
 	 * interfaces as it expects there is no rx when no interface is
 	 * running.
 	 */
+	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
+
+	/* FIXME: why don't we print error if wmi call fails? */
 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
-	if (ret)
-		ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
-			   arvif->vdev_id, ret);
 
-	ath10k_wmi_flush_tx(ar);
-
-	arvif->def_wep_key_index = 0;
+	arvif->def_wep_key_idx = 0;
 }
 
 static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
 				struct ieee80211_sta *sta)
 {
+	struct wmi_peer_assoc_complete_arg peer_arg;
 	int ret = 0;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
+	ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
 	if (ret) {
-		ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
+		ath10k_warn("WMI peer assoc prepare failed for %pM\n",
+			    sta->addr);
+		return ret;
+	}
+
+	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+	if (ret) {
+		ath10k_warn("Peer assoc failed for STA %pM\n: %d",
+			    sta->addr, ret);
 		return ret;
 	}
 
@@ -1333,8 +1378,8 @@
 				continue;
 
 			ath10k_dbg(ATH10K_DBG_WMI,
-				   "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
-				   __func__, ch - arg.channels, arg.n_channels,
+				   "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+				    ch - arg.channels, arg.n_channels,
 				   ch->freq, ch->max_power, ch->max_reg_power,
 				   ch->max_antenna_gain, ch->mode);
 
@@ -1391,6 +1436,33 @@
 /* TX handlers */
 /***************/
 
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+	if (ieee80211_is_mgmt(hdr->frame_control))
+		return HTT_DATA_TX_EXT_TID_MGMT;
+
+	if (!ieee80211_is_data_qos(hdr->frame_control))
+		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+	if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+	return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
+				  struct ieee80211_tx_info *info)
+{
+	if (info->control.vif)
+		return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
+
+	if (ar->monitor_enabled)
+		return ar->monitor_vdev_id;
+
+	ath10k_warn("could not resolve vdev id\n");
+	return 0;
+}
+
 /*
  * Frames sent to the FW have to be in "Native Wifi" format.
  * Strip the QoS field from the 802.11 header.
@@ -1411,6 +1483,30 @@
 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
 }
 
+static void ath10k_tx_wep_key_work(struct work_struct *work)
+{
+	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+						wep_key_work);
+	int ret, keyidx = arvif->def_wep_key_newidx;
+
+	if (arvif->def_wep_key_idx == keyidx)
+		return;
+
+	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+		   arvif->vdev_id, keyidx);
+
+	ret = ath10k_wmi_vdev_set_param(arvif->ar,
+					arvif->vdev_id,
+					arvif->ar->wmi.vdev_param->def_keyid,
+					keyidx);
+	if (ret) {
+		ath10k_warn("could not update wep keyidx (%d)\n", ret);
+		return;
+	}
+
+	arvif->def_wep_key_idx = keyidx;
+}
+
 static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1419,11 +1515,6 @@
 	struct ath10k *ar = arvif->ar;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_key_conf *key = info->control.hw_key;
-	int ret;
-
-	/* TODO AP mode should be implemented */
-	if (vif->type != NL80211_IFTYPE_STATION)
-		return;
 
 	if (!ieee80211_has_protected(hdr->frame_control))
 		return;
@@ -1435,20 +1526,14 @@
 	    key->cipher != WLAN_CIPHER_SUITE_WEP104)
 		return;
 
-	if (key->keyidx == arvif->def_wep_key_index)
+	if (key->keyidx == arvif->def_wep_key_idx)
 		return;
 
-	ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
-
-	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-					WMI_VDEV_PARAM_DEF_KEYID,
-					key->keyidx);
-	if (ret) {
-		ath10k_warn("could not update wep keyidx (%d)\n", ret);
-		return;
-	}
-
-	arvif->def_wep_key_index = key->keyidx;
+	/* FIXME: Most likely a few frames will be TXed with an old key. Simply
+	 * queueing frames until key index is updated is not an option because
+	 * sk_buff may need more processing to be done, e.g. offchannel */
+	arvif->def_wep_key_newidx = key->keyidx;
+	ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
 }
 
 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
@@ -1478,19 +1563,42 @@
 static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	int ret;
+	int ret = 0;
 
-	if (ieee80211_is_mgmt(hdr->frame_control))
-		ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
-	else if (ieee80211_is_nullfunc(hdr->frame_control))
+	if (ar->htt.target_version_major >= 3) {
+		/* Since HTT 3.0 there is no separate mgmt tx command */
+		ret = ath10k_htt_tx(&ar->htt, skb);
+		goto exit;
+	}
+
+	if (ieee80211_is_mgmt(hdr->frame_control)) {
+		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+			     ar->fw_features)) {
+			if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
+			    ATH10K_MAX_NUM_MGMT_PENDING) {
+				ath10k_warn("wmi mgmt_tx queue limit reached\n");
+				ret = -EBUSY;
+				goto exit;
+			}
+
+			skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
+			ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+		} else {
+			ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
+		}
+	} else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+			     ar->fw_features) &&
+		   ieee80211_is_nullfunc(hdr->frame_control)) {
 		/* FW does not report tx status properly for NullFunc frames
 		 * unless they are sent through mgmt tx path. mac80211 sends
-		 * those frames when it detects link/beacon loss and depends on
-		 * the tx status to be correct. */
+		 * those frames when it detects link/beacon loss and depends
+		 * on the tx status to be correct. */
 		ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
-	else
+	} else {
 		ret = ath10k_htt_tx(&ar->htt, skb);
+	}
 
+exit:
 	if (ret) {
 		ath10k_warn("tx failed (%d). dropping packet.\n", ret);
 		ieee80211_free_txskb(ar->hw, skb);
@@ -1534,18 +1642,19 @@
 
 		mutex_lock(&ar->conf_mutex);
 
-		ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+		ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
 			   skb);
 
 		hdr = (struct ieee80211_hdr *)skb->data;
 		peer_addr = ieee80211_get_DA(hdr);
-		vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
+		vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
 
 		spin_lock_bh(&ar->data_lock);
 		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
 		spin_unlock_bh(&ar->data_lock);
 
 		if (peer)
+			/* FIXME: should this use ath10k_warn()? */
 			ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
 				   peer_addr, vdev_id);
 
@@ -1557,7 +1666,7 @@
 		}
 
 		spin_lock_bh(&ar->data_lock);
-		INIT_COMPLETION(ar->offchan_tx_completed);
+		reinit_completion(&ar->offchan_tx_completed);
 		ar->offchan_tx_skb = skb;
 		spin_unlock_bh(&ar->data_lock);
 
@@ -1580,6 +1689,36 @@
 	}
 }
 
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	for (;;) {
+		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+		if (!skb)
+			break;
+
+		ieee80211_free_txskb(ar->hw, skb);
+	}
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+	struct sk_buff *skb;
+	int ret;
+
+	for (;;) {
+		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+		if (!skb)
+			break;
+
+		ret = ath10k_wmi_mgmt_tx(ar, skb);
+		if (ret)
+			ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+	}
+}
+
 /************/
 /* Scanning */
 /************/
@@ -1643,8 +1782,6 @@
 		return -EIO;
 	}
 
-	ath10k_wmi_flush_tx(ar);
-
 	ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
 	if (ret == 0)
 		ath10k_warn("timed out while waiting for scan to stop\n");
@@ -1678,10 +1815,6 @@
 	if (ret)
 		return ret;
 
-	/* make sure we submit the command so the completion
-	* timeout makes sense */
-	ath10k_wmi_flush_tx(ar);
-
 	ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
 	if (ret == 0) {
 		ath10k_abort_scan(ar);
@@ -1709,16 +1842,7 @@
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ath10k *ar = hw->priv;
-	struct ath10k_vif *arvif = NULL;
-	u32 vdev_id = 0;
-	u8 tid;
-
-	if (info->control.vif) {
-		arvif = ath10k_vif_to_arvif(info->control.vif);
-		vdev_id = arvif->vdev_id;
-	} else if (ar->monitor_enabled) {
-		vdev_id = ar->monitor_vdev_id;
-	}
+	u8 tid, vdev_id;
 
 	/* We should disable CCK RATE due to P2P */
 	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
@@ -1726,12 +1850,8 @@
 
 	/* we must calculate tid before we apply qos workaround
 	 * as we'd lose the qos control field */
-	tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-	if (ieee80211_is_data_qos(hdr->frame_control) &&
-	    is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
-		u8 *qc = ieee80211_get_qos_ctl(hdr);
-		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-	}
+	tid = ath10k_tx_h_get_tid(hdr);
+	vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
 
 	/* it makes no sense to process injected frames like that */
 	if (info->control.vif &&
@@ -1742,14 +1862,14 @@
 		ath10k_tx_h_seq_no(skb);
 	}
 
-	memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
-	ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
+	ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
+	ATH10K_SKB_CB(skb)->htt.is_offchan = false;
 	ATH10K_SKB_CB(skb)->htt.tid = tid;
 
 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
 		spin_lock_bh(&ar->data_lock);
 		ATH10K_SKB_CB(skb)->htt.is_offchan = true;
-		ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
+		ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
 		spin_unlock_bh(&ar->data_lock);
 
 		ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
@@ -1771,6 +1891,7 @@
 
 	del_timer_sync(&ar->scan.timeout);
 	ath10k_offchan_tx_purge(ar);
+	ath10k_mgmt_over_wmi_tx_purge(ar);
 	ath10k_peer_cleanup_all(ar);
 	ath10k_core_stop(ar);
 	ath10k_hif_power_down(ar);
@@ -1817,12 +1938,12 @@
 	else if (ar->state == ATH10K_STATE_RESTARTING)
 		ar->state = ATH10K_STATE_RESTARTED;
 
-	ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
 	if (ret)
 		ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
 			    ret);
 
-	ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
 	if (ret)
 		ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
 			    ret);
@@ -1847,32 +1968,29 @@
 	ar->state = ATH10K_STATE_OFF;
 	mutex_unlock(&ar->conf_mutex);
 
+	ath10k_mgmt_over_wmi_tx_purge(ar);
+
 	cancel_work_sync(&ar->offchan_tx_work);
+	cancel_work_sync(&ar->wmi_mgmt_tx_work);
 	cancel_work_sync(&ar->restart_work);
 }
 
-static void ath10k_config_ps(struct ath10k *ar)
+static int ath10k_config_ps(struct ath10k *ar)
 {
-	struct ath10k_generic_iter ar_iter;
+	struct ath10k_vif *arvif;
+	int ret = 0;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	/* During HW reconfiguration mac80211 reports all interfaces that were
-	 * running until reconfiguration was started. Since FW doesn't have any
-	 * vdevs at this point we must not iterate over this interface list.
-	 * This setting will be updated upon add_interface(). */
-	if (ar->state == ATH10K_STATE_RESTARTED)
-		return;
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ret = ath10k_mac_vif_setup_ps(arvif);
+		if (ret) {
+			ath10k_warn("could not setup powersave (%d)\n", ret);
+			break;
+		}
+	}
 
-	memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
-	ar_iter.ar = ar;
-
-	ieee80211_iterate_active_interfaces_atomic(
-		ar->hw, IEEE80211_IFACE_ITER_NORMAL,
-		ath10k_ps_iter, &ar_iter);
-
-	if (ar_iter.ret)
-		ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
+	return ret;
 }
 
 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1884,7 +2002,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-		ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+		ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
 			   conf->chandef.chan->center_freq);
 		spin_lock_bh(&ar->data_lock);
 		ar->rx_channel = conf->chandef.chan;
@@ -1901,7 +2019,6 @@
 			ret = ath10k_monitor_destroy(ar);
 	}
 
-	ath10k_wmi_flush_tx(ar);
 	mutex_unlock(&ar->conf_mutex);
 	return ret;
 }
@@ -1922,6 +2039,7 @@
 	int ret = 0;
 	u32 value;
 	int bit;
+	u32 vdev_param;
 
 	mutex_lock(&ar->conf_mutex);
 
@@ -1930,21 +2048,22 @@
 	arvif->ar = ar;
 	arvif->vif = vif;
 
+	INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
+
 	if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
 		ath10k_warn("Only one monitor interface allowed\n");
 		ret = -EBUSY;
-		goto exit;
+		goto err;
 	}
 
 	bit = ffs(ar->free_vdev_map);
 	if (bit == 0) {
 		ret = -EBUSY;
-		goto exit;
+		goto err;
 	}
 
 	arvif->vdev_id = bit - 1;
 	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
-	ar->free_vdev_map &= ~(1 << arvif->vdev_id);
 
 	if (ar->p2p)
 		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
@@ -1973,32 +2092,41 @@
 		break;
 	}
 
-	ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
 		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
 
 	ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
 				     arvif->vdev_subtype, vif->addr);
 	if (ret) {
 		ath10k_warn("WMI vdev create failed: ret %d\n", ret);
-		goto exit;
+		goto err;
 	}
 
-	ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
-					arvif->def_wep_key_index);
-	if (ret)
-		ath10k_warn("Failed to set default keyid: %d\n", ret);
+	ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+	list_add(&arvif->list, &ar->arvifs);
 
-	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-					WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+	vdev_param = ar->wmi.vdev_param->def_keyid;
+	ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
+					arvif->def_wep_key_idx);
+	if (ret) {
+		ath10k_warn("Failed to set default keyid: %d\n", ret);
+		goto err_vdev_delete;
+	}
+
+	vdev_param = ar->wmi.vdev_param->tx_encap_type;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
 					ATH10K_HW_TXRX_NATIVE_WIFI);
-	if (ret)
+	/* 10.X firmware does not support this VDEV parameter. Do not warn */
+	if (ret && ret != -EOPNOTSUPP) {
 		ath10k_warn("Failed to set TX encap: %d\n", ret);
+		goto err_vdev_delete;
+	}
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
 		ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
 		if (ret) {
 			ath10k_warn("Failed to create peer for AP: %d\n", ret);
-			goto exit;
+			goto err_vdev_delete;
 		}
 	}
 
@@ -2007,39 +2135,62 @@
 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
 						  param, value);
-		if (ret)
+		if (ret) {
 			ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+			goto err_peer_delete;
+		}
 
 		param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
 						  param, value);
-		if (ret)
+		if (ret) {
 			ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+			goto err_peer_delete;
+		}
 
 		param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
 		value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
 						  param, value);
-		if (ret)
+		if (ret) {
 			ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+			goto err_peer_delete;
+		}
 	}
 
 	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
-	if (ret)
+	if (ret) {
 		ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
 			    arvif->vdev_id, ret);
+		goto err_peer_delete;
+	}
 
 	ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
-	if (ret)
+	if (ret) {
 		ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
 			    arvif->vdev_id, ret);
+		goto err_peer_delete;
+	}
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
 		ar->monitor_present = true;
 
-exit:
 	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_peer_delete:
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+
+err_vdev_delete:
+	ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+	ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+	list_del(&arvif->list);
+
+err:
+	mutex_unlock(&ar->conf_mutex);
+
 	return ret;
 }
 
@@ -2052,9 +2203,17 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+	cancel_work_sync(&arvif->wep_key_work);
+
+	spin_lock_bh(&ar->data_lock);
+	if (arvif->beacon) {
+		dev_kfree_skb_any(arvif->beacon);
+		arvif->beacon = NULL;
+	}
+	spin_unlock_bh(&ar->data_lock);
 
 	ar->free_vdev_map |= 1 << (arvif->vdev_id);
+	list_del(&arvif->list);
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
 		ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
@@ -2064,6 +2223,9 @@
 		kfree(arvif->u.ap.noa_data);
 	}
 
+	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+		   arvif->vdev_id);
+
 	ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
 	if (ret)
 		ath10k_warn("WMI vdev delete failed: %d\n", ret);
@@ -2105,18 +2267,20 @@
 
 	if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
 	    !ar->monitor_enabled) {
+		ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
+			   ar->monitor_vdev_id);
+
 		ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
 		if (ret)
 			ath10k_warn("Unable to start monitor mode\n");
-		else
-			ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
 	} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
 		   ar->monitor_enabled) {
+		ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
+			   ar->monitor_vdev_id);
+
 		ret = ath10k_monitor_stop(ar);
 		if (ret)
 			ath10k_warn("Unable to stop monitor mode\n");
-		else
-			ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
 	}
 
 	mutex_unlock(&ar->conf_mutex);
@@ -2130,6 +2294,7 @@
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	int ret = 0;
+	u32 vdev_param, pdev_param;
 
 	mutex_lock(&ar->conf_mutex);
 
@@ -2138,44 +2303,44 @@
 
 	if (changed & BSS_CHANGED_BEACON_INT) {
 		arvif->beacon_interval = info->beacon_int;
-		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-						WMI_VDEV_PARAM_BEACON_INTERVAL,
+		vdev_param = ar->wmi.vdev_param->beacon_interval;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
 						arvif->beacon_interval);
+		ath10k_dbg(ATH10K_DBG_MAC,
+			   "mac vdev %d beacon_interval %d\n",
+			   arvif->vdev_id, arvif->beacon_interval);
+
 		if (ret)
 			ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
 				    arvif->vdev_id);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Beacon interval: %d set for VDEV: %d\n",
-				   arvif->beacon_interval, arvif->vdev_id);
 	}
 
 	if (changed & BSS_CHANGED_BEACON) {
-		ret = ath10k_wmi_pdev_set_param(ar,
-						WMI_PDEV_PARAM_BEACON_TX_MODE,
+		ath10k_dbg(ATH10K_DBG_MAC,
+			   "vdev %d set beacon tx mode to staggered\n",
+			   arvif->vdev_id);
+
+		pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+		ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
 						WMI_BEACON_STAGGERED_MODE);
 		if (ret)
 			ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
 				    arvif->vdev_id);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Set staggered beacon mode for VDEV: %d\n",
-				   arvif->vdev_id);
 	}
 
 	if (changed & BSS_CHANGED_BEACON_INFO) {
 		arvif->dtim_period = info->dtim_period;
 
-		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-						WMI_VDEV_PARAM_DTIM_PERIOD,
+		ath10k_dbg(ATH10K_DBG_MAC,
+			   "mac vdev %d dtim_period %d\n",
+			   arvif->vdev_id, arvif->dtim_period);
+
+		vdev_param = ar->wmi.vdev_param->dtim_period;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
 						arvif->dtim_period);
 		if (ret)
 			ath10k_warn("Failed to set dtim period for VDEV: %d\n",
 				    arvif->vdev_id);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Set dtim period: %d for VDEV: %d\n",
-				   arvif->dtim_period, arvif->vdev_id);
 	}
 
 	if (changed & BSS_CHANGED_SSID &&
@@ -2188,16 +2353,15 @@
 
 	if (changed & BSS_CHANGED_BSSID) {
 		if (!is_zero_ether_addr(info->bssid)) {
+			ath10k_dbg(ATH10K_DBG_MAC,
+				   "mac vdev %d create peer %pM\n",
+				   arvif->vdev_id, info->bssid);
+
 			ret = ath10k_peer_create(ar, arvif->vdev_id,
 						 info->bssid);
 			if (ret)
 				ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
 					    info->bssid, arvif->vdev_id);
-			else
-				ath10k_dbg(ATH10K_DBG_MAC,
-					   "Added peer: %pM for VDEV: %d\n",
-					   info->bssid, arvif->vdev_id);
-
 
 			if (vif->type == NL80211_IFTYPE_STATION) {
 				/*
@@ -2207,11 +2371,12 @@
 				memcpy(arvif->u.sta.bssid, info->bssid,
 				       ETH_ALEN);
 
+				ath10k_dbg(ATH10K_DBG_MAC,
+					   "mac vdev %d start %pM\n",
+					   arvif->vdev_id, info->bssid);
+
+				/* FIXME: check return value */
 				ret = ath10k_vdev_start(arvif);
-				if (!ret)
-					ath10k_dbg(ATH10K_DBG_MAC,
-						   "VDEV: %d started with BSSID: %pM\n",
-						   arvif->vdev_id, info->bssid);
 			}
 
 			/*
@@ -2235,16 +2400,15 @@
 		else
 			cts_prot = 0;
 
-		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-						WMI_VDEV_PARAM_ENABLE_RTSCTS,
+		ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+			   arvif->vdev_id, cts_prot);
+
+		vdev_param = ar->wmi.vdev_param->enable_rtscts;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
 						cts_prot);
 		if (ret)
 			ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
 				    arvif->vdev_id);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Set CTS prot: %d for VDEV: %d\n",
-				   cts_prot, arvif->vdev_id);
 	}
 
 	if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2255,16 +2419,15 @@
 		else
 			slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
 
-		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-						WMI_VDEV_PARAM_SLOT_TIME,
+		ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+			   arvif->vdev_id, slottime);
+
+		vdev_param = ar->wmi.vdev_param->slot_time;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
 						slottime);
 		if (ret)
 			ath10k_warn("Failed to set erp slot for VDEV: %d\n",
 				    arvif->vdev_id);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Set slottime: %d for VDEV: %d\n",
-				   slottime, arvif->vdev_id);
 	}
 
 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2274,16 +2437,16 @@
 		else
 			preamble = WMI_VDEV_PREAMBLE_LONG;
 
-		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-						WMI_VDEV_PARAM_PREAMBLE,
+		ath10k_dbg(ATH10K_DBG_MAC,
+			   "mac vdev %d preamble %dn",
+			   arvif->vdev_id, preamble);
+
+		vdev_param = ar->wmi.vdev_param->preamble;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
 						preamble);
 		if (ret)
 			ath10k_warn("Failed to set preamble for VDEV: %d\n",
 				    arvif->vdev_id);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Set preamble: %d for VDEV: %d\n",
-				   preamble, arvif->vdev_id);
 	}
 
 	if (changed & BSS_CHANGED_ASSOC) {
@@ -2313,8 +2476,8 @@
 		goto exit;
 	}
 
-	INIT_COMPLETION(ar->scan.started);
-	INIT_COMPLETION(ar->scan.completed);
+	reinit_completion(&ar->scan.started);
+	reinit_completion(&ar->scan.completed);
 	ar->scan.in_progress = true;
 	ar->scan.aborting = false;
 	ar->scan.is_roc = false;
@@ -2474,27 +2637,26 @@
 		/*
 		 * New station addition.
 		 */
+		ath10k_dbg(ATH10K_DBG_MAC,
+			   "mac vdev %d peer create %pM (new sta)\n",
+			   arvif->vdev_id, sta->addr);
+
 		ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
 		if (ret)
 			ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
 				    sta->addr, arvif->vdev_id);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Added peer: %pM for VDEV: %d\n",
-				   sta->addr, arvif->vdev_id);
 	} else if ((old_state == IEEE80211_STA_NONE &&
 		    new_state == IEEE80211_STA_NOTEXIST)) {
 		/*
 		 * Existing station deletion.
 		 */
+		ath10k_dbg(ATH10K_DBG_MAC,
+			   "mac vdev %d peer delete %pM (sta gone)\n",
+			   arvif->vdev_id, sta->addr);
 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
 		if (ret)
 			ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
 				    sta->addr, arvif->vdev_id);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Removed peer: %pM for VDEV: %d\n",
-				   sta->addr, arvif->vdev_id);
 
 		if (vif->type == NL80211_IFTYPE_STATION)
 			ath10k_bss_disassoc(hw, vif);
@@ -2505,14 +2667,13 @@
 		/*
 		 * New association.
 		 */
+		ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
+			   sta->addr);
+
 		ret = ath10k_station_assoc(ar, arvif, sta);
 		if (ret)
 			ath10k_warn("Failed to associate station: %pM\n",
 				    sta->addr);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Station %pM moved to assoc state\n",
-				   sta->addr);
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTH &&
 		   (vif->type == NL80211_IFTYPE_AP ||
@@ -2520,14 +2681,13 @@
 		/*
 		 * Disassociation.
 		 */
+		ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+			   sta->addr);
+
 		ret = ath10k_station_disassoc(ar, arvif, sta);
 		if (ret)
 			ath10k_warn("Failed to disassociate station: %pM\n",
 				    sta->addr);
-		else
-			ath10k_dbg(ATH10K_DBG_MAC,
-				   "Station %pM moved to disassociated state\n",
-				   sta->addr);
 	}
 
 	mutex_unlock(&ar->conf_mutex);
@@ -2672,9 +2832,9 @@
 		goto exit;
 	}
 
-	INIT_COMPLETION(ar->scan.started);
-	INIT_COMPLETION(ar->scan.completed);
-	INIT_COMPLETION(ar->scan.on_channel);
+	reinit_completion(&ar->scan.started);
+	reinit_completion(&ar->scan.completed);
+	reinit_completion(&ar->scan.on_channel);
 	ar->scan.in_progress = true;
 	ar->scan.aborting = false;
 	ar->scan.is_roc = true;
@@ -2732,88 +2892,51 @@
  * Both RTS and Fragmentation threshold are interface-specific
  * in ath10k, but device-specific in mac80211.
  */
-static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-	struct ath10k_generic_iter *ar_iter = data;
-	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
-	u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
-
-	lockdep_assert_held(&arvif->ar->conf_mutex);
-
-	/* During HW reconfiguration mac80211 reports all interfaces that were
-	 * running until reconfiguration was started. Since FW doesn't have any
-	 * vdevs at this point we must not iterate over this interface list.
-	 * This setting will be updated upon add_interface(). */
-	if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
-		return;
-
-	ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
-	if (ar_iter->ret)
-		ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
-			    arvif->vdev_id);
-	else
-		ath10k_dbg(ATH10K_DBG_MAC,
-			   "Set RTS threshold: %d for VDEV: %d\n",
-			   rts, arvif->vdev_id);
-}
 
 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
-	struct ath10k_generic_iter ar_iter;
 	struct ath10k *ar = hw->priv;
-
-	memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
-	ar_iter.ar = ar;
+	struct ath10k_vif *arvif;
+	int ret = 0;
 
 	mutex_lock(&ar->conf_mutex);
-	ieee80211_iterate_active_interfaces_atomic(
-		hw, IEEE80211_IFACE_ITER_NORMAL,
-		ath10k_set_rts_iter, &ar_iter);
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+			   arvif->vdev_id, value);
+
+		ret = ath10k_mac_set_rts(arvif, value);
+		if (ret) {
+			ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+				    arvif->vdev_id, ret);
+			break;
+		}
+	}
 	mutex_unlock(&ar->conf_mutex);
 
-	return ar_iter.ret;
-}
-
-static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-	struct ath10k_generic_iter *ar_iter = data;
-	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
-	u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
-
-	lockdep_assert_held(&arvif->ar->conf_mutex);
-
-	/* During HW reconfiguration mac80211 reports all interfaces that were
-	 * running until reconfiguration was started. Since FW doesn't have any
-	 * vdevs at this point we must not iterate over this interface list.
-	 * This setting will be updated upon add_interface(). */
-	if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
-		return;
-
-	ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
-	if (ar_iter->ret)
-		ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
-			    arvif->vdev_id);
-	else
-		ath10k_dbg(ATH10K_DBG_MAC,
-			   "Set frag threshold: %d for VDEV: %d\n",
-			   frag, arvif->vdev_id);
+	return ret;
 }
 
 static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
 {
-	struct ath10k_generic_iter ar_iter;
 	struct ath10k *ar = hw->priv;
-
-	memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
-	ar_iter.ar = ar;
+	struct ath10k_vif *arvif;
+	int ret = 0;
 
 	mutex_lock(&ar->conf_mutex);
-	ieee80211_iterate_active_interfaces_atomic(
-		hw, IEEE80211_IFACE_ITER_NORMAL,
-		ath10k_set_frag_iter, &ar_iter);
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
+			   arvif->vdev_id, value);
+
+		ret = ath10k_mac_set_rts(arvif, value);
+		if (ret) {
+			ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+				    arvif->vdev_id, ret);
+			break;
+		}
+	}
 	mutex_unlock(&ar->conf_mutex);
 
-	return ar_iter.ret;
+	return ret;
 }
 
 static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
@@ -2836,8 +2959,7 @@
 			bool empty;
 
 			spin_lock_bh(&ar->htt.tx_lock);
-			empty = bitmap_empty(ar->htt.used_msdu_ids,
-					     ar->htt.max_num_pending_tx);
+			empty = (ar->htt.num_pending_tx == 0);
 			spin_unlock_bh(&ar->htt.tx_lock);
 
 			skip = (ar->state == ATH10K_STATE_WEDGED);
@@ -3326,6 +3448,10 @@
 			IEEE80211_HW_WANT_MONITOR_VIF |
 			IEEE80211_HW_AP_LINK_PS;
 
+	/* MSDU can have HTT TX fragment pushed in front. The additional 4
+	 * bytes is used for padding/alignment if necessary. */
+	ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
+
 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
 		ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
 
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 6fce9bf..ba10219 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -34,6 +34,8 @@
 void ath10k_reset_scan(unsigned long ptr);
 void ath10k_offchan_tx_purge(struct ath10k *ar);
 void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
 void ath10k_halt(struct ath10k *ar);
 
 static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e2f9ef5..9e86a81 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -36,11 +36,9 @@
 module_param(ath10k_target_ps, uint, 0644);
 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
 
-#define QCA988X_1_0_DEVICE_ID	(0xabcd)
 #define QCA988X_2_0_DEVICE_ID	(0x003c)
 
 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
-	{ PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
 	{0}
 };
@@ -50,9 +48,9 @@
 
 static void ath10k_pci_process_ce(struct ath10k *ar);
 static int ath10k_pci_post_rx(struct ath10k *ar);
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
 					     int num);
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
 static void ath10k_pci_stop_ce(struct ath10k *ar);
 static void ath10k_pci_device_reset(struct ath10k *ar);
 static int ath10k_pci_reset_target(struct ath10k *ar);
@@ -60,43 +58,145 @@
 static void ath10k_pci_stop_intr(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
-	/* host->target HTC control and raw streams */
-	{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
-	/* could be moved to share CE3 */
-	/* target->host HTT + HTC control */
-	{ /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
-	/* target->host WMI */
-	{ /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
-	/* host->target WMI */
-	{ /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
-	/* host->target HTT */
-	{ /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
-		    CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
-	/* unused */
-	{ /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
-	/* Target autonomous hif_memcpy */
-	{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
-	/* ce_diag, the Diagnostic Window */
-	{ /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+	/* CE0: host->target HTC control and raw streams */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 16,
+		.src_sz_max = 256,
+		.dest_nentries = 0,
+	},
+
+	/* CE1: target->host HTT + HTC control */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 512,
+		.dest_nentries = 512,
+	},
+
+	/* CE2: target->host WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 32,
+	},
+
+	/* CE3: host->target WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 32,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+	},
+
+	/* CE4: host->target HTT */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+		.src_sz_max = 256,
+		.dest_nentries = 0,
+	},
+
+	/* CE5: unused */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE6: target autonomous hif_memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE7: ce_diag, the Diagnostic Window */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 2,
+		.src_sz_max = DIAG_TRANSFER_LIMIT,
+		.dest_nentries = 2,
+	},
 };
 
 /* Target firmware's Copy Engine configuration. */
 static const struct ce_pipe_config target_ce_config_wlan[] = {
-	/* host->target HTC control and raw streams */
-	{ /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
-	/* target->host HTT + HTC control */
-	{ /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
-	/* target->host WMI */
-	{ /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
-	/* host->target WMI */
-	{ /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
-	/* host->target HTT */
-	{ /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+	/* CE0: host->target HTC control and raw streams */
+	{
+		.pipenum = 0,
+		.pipedir = PIPEDIR_OUT,
+		.nentries = 32,
+		.nbytes_max = 256,
+		.flags = CE_ATTR_FLAGS,
+		.reserved = 0,
+	},
+
+	/* CE1: target->host HTT + HTC control */
+	{
+		.pipenum = 1,
+		.pipedir = PIPEDIR_IN,
+		.nentries = 32,
+		.nbytes_max = 512,
+		.flags = CE_ATTR_FLAGS,
+		.reserved = 0,
+	},
+
+	/* CE2: target->host WMI */
+	{
+		.pipenum = 2,
+		.pipedir = PIPEDIR_IN,
+		.nentries = 32,
+		.nbytes_max = 2048,
+		.flags = CE_ATTR_FLAGS,
+		.reserved = 0,
+	},
+
+	/* CE3: host->target WMI */
+	{
+		.pipenum = 3,
+		.pipedir = PIPEDIR_OUT,
+		.nentries = 32,
+		.nbytes_max = 2048,
+		.flags = CE_ATTR_FLAGS,
+		.reserved = 0,
+	},
+
+	/* CE4: host->target HTT */
+	{
+		.pipenum = 4,
+		.pipedir = PIPEDIR_OUT,
+		.nentries = 256,
+		.nbytes_max = 256,
+		.flags = CE_ATTR_FLAGS,
+		.reserved = 0,
+	},
+
 	/* NB: 50% of src nentries, since tx has 2 frags */
-	/* unused */
-	{ /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
-	/* Reserved for target autonomous hif_memcpy */
-	{ /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+
+	/* CE5: unused */
+	{
+		.pipenum = 5,
+		.pipedir = PIPEDIR_OUT,
+		.nentries = 32,
+		.nbytes_max = 2048,
+		.flags = CE_ATTR_FLAGS,
+		.reserved = 0,
+	},
+
+	/* CE6: Reserved for target autonomous hif_memcpy */
+	{
+		.pipenum = 6,
+		.pipedir = PIPEDIR_INOUT,
+		.nentries = 32,
+		.nbytes_max = 4096,
+		.flags = CE_ATTR_FLAGS,
+		.reserved = 0,
+	},
+
 	/* CE7 used only by Host */
 };
 
@@ -114,7 +214,7 @@
 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 	unsigned int id;
 	unsigned int flags;
-	struct ce_state *ce_diag;
+	struct ath10k_ce_pipe *ce_diag;
 	/* Host buffer address in CE space */
 	u32 ce_data;
 	dma_addr_t ce_data_base = 0;
@@ -278,7 +378,7 @@
 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 	unsigned int id;
 	unsigned int flags;
-	struct ce_state *ce_diag;
+	struct ath10k_ce_pipe *ce_diag;
 	void *data_buf = NULL;
 	u32 ce_data;	/* Host buffer address in CE space */
 	dma_addr_t ce_data_base = 0;
@@ -437,7 +537,7 @@
 		ath10k_warn("Unable to wakeup target\n");
 }
 
-void ath10k_do_pci_wake(struct ath10k *ar)
+int ath10k_do_pci_wake(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +553,19 @@
 	atomic_inc(&ar_pci->keep_awake_count);
 
 	if (ar_pci->verified_awake)
-		return;
+		return 0;
 
 	for (;;) {
 		if (ath10k_pci_target_is_awake(ar)) {
 			ar_pci->verified_awake = true;
-			break;
+			return 0;
 		}
 
 		if (tot_delay > PCIE_WAKE_TIMEOUT) {
-			ath10k_warn("target takes too long to wake up (awake count %d)\n",
+			ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
+				    PCIE_WAKE_TIMEOUT,
 				    atomic_read(&ar_pci->keep_awake_count));
-			break;
+			return -ETIMEDOUT;
 		}
 
 		udelay(curr_delay);
@@ -493,7 +594,7 @@
  * FIXME: Handle OOM properly.
  */
 static inline
-struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
 {
 	struct ath10k_pci_compl *compl = NULL;
 
@@ -511,39 +612,28 @@
 }
 
 /* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
-				    void *transfer_context,
-				    u32 ce_data,
-				    unsigned int nbytes,
-				    unsigned int transfer_id)
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
 	struct ath10k_pci_compl *compl;
-	bool process = false;
+	void *transfer_context;
+	u32 ce_data;
+	unsigned int nbytes;
+	unsigned int transfer_id;
 
-	do {
-		/*
-		 * For the send completion of an item in sendlist, just
-		 * increment num_sends_allowed. The upper layer callback will
-		 * be triggered when last fragment is done with send.
-		 */
-		if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
-			spin_lock_bh(&pipe_info->pipe_lock);
-			pipe_info->num_sends_allowed++;
-			spin_unlock_bh(&pipe_info->pipe_lock);
-			continue;
-		}
-
+	while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
+					     &ce_data, &nbytes,
+					     &transfer_id) == 0) {
 		compl = get_free_compl(pipe_info);
 		if (!compl)
 			break;
 
-		compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+		compl->state = ATH10K_PCI_COMPL_SEND;
 		compl->ce_state = ce_state;
 		compl->pipe_info = pipe_info;
-		compl->transfer_context = transfer_context;
+		compl->skb = transfer_context;
 		compl->nbytes = nbytes;
 		compl->transfer_id = transfer_id;
 		compl->flags = 0;
@@ -554,46 +644,36 @@
 		spin_lock_bh(&ar_pci->compl_lock);
 		list_add_tail(&compl->list, &ar_pci->compl_process);
 		spin_unlock_bh(&ar_pci->compl_lock);
-
-		process = true;
-	} while (ath10k_ce_completed_send_next(ce_state,
-							   &transfer_context,
-							   &ce_data, &nbytes,
-							   &transfer_id) == 0);
-
-	/*
-	 * If only some of the items within a sendlist have completed,
-	 * don't invoke completion processing until the entire sendlist
-	 * has been sent.
-	 */
-	if (!process)
-		return;
+	}
 
 	ath10k_pci_process_ce(ar);
 }
 
 /* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
-				    void *transfer_context, u32 ce_data,
-				    unsigned int nbytes,
-				    unsigned int transfer_id,
-				    unsigned int flags)
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
 	struct ath10k_pci_compl *compl;
 	struct sk_buff *skb;
+	void *transfer_context;
+	u32 ce_data;
+	unsigned int nbytes;
+	unsigned int transfer_id;
+	unsigned int flags;
 
-	do {
+	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+					     &ce_data, &nbytes, &transfer_id,
+					     &flags) == 0) {
 		compl = get_free_compl(pipe_info);
 		if (!compl)
 			break;
 
-		compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+		compl->state = ATH10K_PCI_COMPL_RECV;
 		compl->ce_state = ce_state;
 		compl->pipe_info = pipe_info;
-		compl->transfer_context = transfer_context;
+		compl->skb = transfer_context;
 		compl->nbytes = nbytes;
 		compl->transfer_id = transfer_id;
 		compl->flags = flags;
@@ -608,12 +688,7 @@
 		spin_lock_bh(&ar_pci->compl_lock);
 		list_add_tail(&compl->list, &ar_pci->compl_process);
 		spin_unlock_bh(&ar_pci->compl_lock);
-
-	} while (ath10k_ce_completed_recv_next(ce_state,
-							   &transfer_context,
-							   &ce_data, &nbytes,
-							   &transfer_id,
-							   &flags) == 0);
+	}
 
 	ath10k_pci_process_ce(ar);
 }
@@ -625,15 +700,12 @@
 {
 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
-	struct ce_state *ce_hdl = pipe_info->ce_hdl;
-	struct ce_sendlist sendlist;
+	struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+	struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
 	unsigned int len;
 	u32 flags = 0;
 	int ret;
 
-	memset(&sendlist, 0, sizeof(struct ce_sendlist));
-
 	len = min(bytes, nbuf->len);
 	bytes -= len;
 
@@ -648,19 +720,8 @@
 			"ath10k tx: data: ",
 			nbuf->data, nbuf->len);
 
-	ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
-
-	/* Make sure we have resources to handle this request */
-	spin_lock_bh(&pipe_info->pipe_lock);
-	if (!pipe_info->num_sends_allowed) {
-		ath10k_warn("Pipe: %d is full\n", pipe_id);
-		spin_unlock_bh(&pipe_info->pipe_lock);
-		return -ENOSR;
-	}
-	pipe_info->num_sends_allowed--;
-	spin_unlock_bh(&pipe_info->pipe_lock);
-
-	ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+	ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
+			     flags);
 	if (ret)
 		ath10k_warn("CE send failed: %p\n", nbuf);
 
@@ -670,14 +731,7 @@
 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
-	int ret;
-
-	spin_lock_bh(&pipe_info->pipe_lock);
-	ret = pipe_info->num_sends_allowed;
-	spin_unlock_bh(&pipe_info->pipe_lock);
-
-	return ret;
+	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
 }
 
 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
@@ -764,9 +818,9 @@
 static int ath10k_pci_start_ce(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ce_state *ce_diag = ar_pci->ce_diag;
+	struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
 	const struct ce_attr *attr;
-	struct hif_ce_pipe_info *pipe_info;
+	struct ath10k_pci_pipe *pipe_info;
 	struct ath10k_pci_compl *compl;
 	int i, pipe_num, completions, disable_interrupts;
 
@@ -792,7 +846,6 @@
 						   ath10k_pci_ce_send_done,
 						   disable_interrupts);
 			completions += attr->src_nentries;
-			pipe_info->num_sends_allowed = attr->src_nentries - 1;
 		}
 
 		if (attr->dest_nentries) {
@@ -805,15 +858,14 @@
 			continue;
 
 		for (i = 0; i < completions; i++) {
-			compl = kmalloc(sizeof(struct ath10k_pci_compl),
-					GFP_KERNEL);
+			compl = kmalloc(sizeof(*compl), GFP_KERNEL);
 			if (!compl) {
 				ath10k_warn("No memory for completion state\n");
 				ath10k_pci_stop_ce(ar);
 				return -ENOMEM;
 			}
 
-			compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+			compl->state = ATH10K_PCI_COMPL_FREE;
 			list_add_tail(&compl->list, &pipe_info->compl_free);
 		}
 	}
@@ -840,7 +892,7 @@
 	 * their associated resources */
 	spin_lock_bh(&ar_pci->compl_lock);
 	list_for_each_entry(compl, &ar_pci->compl_process, list) {
-		skb = (struct sk_buff *)compl->transfer_context;
+		skb = compl->skb;
 		ATH10K_SKB_CB(skb)->is_aborted = true;
 	}
 	spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +902,7 @@
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	struct ath10k_pci_compl *compl, *tmp;
-	struct hif_ce_pipe_info *pipe_info;
+	struct ath10k_pci_pipe *pipe_info;
 	struct sk_buff *netbuf;
 	int pipe_num;
 
@@ -861,7 +913,7 @@
 
 	list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
 		list_del(&compl->list);
-		netbuf = (struct sk_buff *)compl->transfer_context;
+		netbuf = compl->skb;
 		dev_kfree_skb_any(netbuf);
 		kfree(compl);
 	}
@@ -912,12 +964,14 @@
 		list_del(&compl->list);
 		spin_unlock_bh(&ar_pci->compl_lock);
 
-		if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+		switch (compl->state) {
+		case ATH10K_PCI_COMPL_SEND:
 			cb->tx_completion(ar,
-					  compl->transfer_context,
+					  compl->skb,
 					  compl->transfer_id);
 			send_done = 1;
-		} else {
+			break;
+		case ATH10K_PCI_COMPL_RECV:
 			ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
 			if (ret) {
 				ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@@ -925,7 +979,7 @@
 				break;
 			}
 
-			skb = (struct sk_buff *)compl->transfer_context;
+			skb = compl->skb;
 			nbytes = compl->nbytes;
 
 			ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,16 +998,23 @@
 					    nbytes,
 					    skb->len + skb_tailroom(skb));
 			}
+			break;
+		case ATH10K_PCI_COMPL_FREE:
+			ath10k_warn("free completion cannot be processed\n");
+			break;
+		default:
+			ath10k_warn("invalid completion state (%d)\n",
+				    compl->state);
+			break;
 		}
 
-		compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+		compl->state = ATH10K_PCI_COMPL_FREE;
 
 		/*
 		 * Add completion back to the pipe's free list.
 		 */
 		spin_lock_bh(&compl->pipe_info->pipe_lock);
 		list_add_tail(&compl->list, &compl->pipe_info->compl_free);
-		compl->pipe_info->num_sends_allowed += send_done;
 		spin_unlock_bh(&compl->pipe_info->pipe_lock);
 	}
 
@@ -1037,12 +1098,12 @@
 						 &dl_is_polled);
 }
 
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
 				   int num)
 {
 	struct ath10k *ar = pipe_info->hif_ce_state;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ce_state *ce_state = pipe_info->ce_hdl;
+	struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
 	struct sk_buff *skb;
 	dma_addr_t ce_data;
 	int i, ret = 0;
@@ -1097,7 +1158,7 @@
 static int ath10k_pci_post_rx(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct hif_ce_pipe_info *pipe_info;
+	struct ath10k_pci_pipe *pipe_info;
 	const struct ce_attr *attr;
 	int pipe_num, ret = 0;
 
@@ -1147,11 +1208,11 @@
 	return 0;
 }
 
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
 {
 	struct ath10k *ar;
 	struct ath10k_pci *ar_pci;
-	struct ce_state *ce_hdl;
+	struct ath10k_ce_pipe *ce_hdl;
 	u32 buf_sz;
 	struct sk_buff *netbuf;
 	u32 ce_data;
@@ -1179,11 +1240,11 @@
 	}
 }
 
-static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
 {
 	struct ath10k *ar;
 	struct ath10k_pci *ar_pci;
-	struct ce_state *ce_hdl;
+	struct ath10k_ce_pipe *ce_hdl;
 	struct sk_buff *netbuf;
 	u32 ce_data;
 	unsigned int nbytes;
@@ -1206,15 +1267,14 @@
 
 	while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
 					  &ce_data, &nbytes, &id) == 0) {
-		if (netbuf != CE_SENDLIST_ITEM_CTXT)
-			/*
-			 * Indicate the completion to higer layer to free
-			 * the buffer
-			 */
-			ATH10K_SKB_CB(netbuf)->is_aborted = true;
-			ar_pci->msg_callbacks_current.tx_completion(ar,
-								    netbuf,
-								    id);
+		/*
+		 * Indicate the completion to higer layer to free
+		 * the buffer
+		 */
+		ATH10K_SKB_CB(netbuf)->is_aborted = true;
+		ar_pci->msg_callbacks_current.tx_completion(ar,
+							    netbuf,
+							    id);
 	}
 }
 
@@ -1232,7 +1292,7 @@
 	int pipe_num;
 
 	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
-		struct hif_ce_pipe_info *pipe_info;
+		struct ath10k_pci_pipe *pipe_info;
 
 		pipe_info = &ar_pci->pipe_info[pipe_num];
 		ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,7 +1303,7 @@
 static void ath10k_pci_ce_deinit(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct hif_ce_pipe_info *pipe_info;
+	struct ath10k_pci_pipe *pipe_info;
 	int pipe_num;
 
 	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@@ -1293,8 +1353,10 @@
 					   void *resp, u32 *resp_len)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
-	struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
 	dma_addr_t req_paddr = 0;
 	dma_addr_t resp_paddr = 0;
 	struct bmi_xfer xfer = {};
@@ -1378,13 +1440,16 @@
 	return ret;
 }
 
-static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
-				     void *transfer_context,
-				     u32 data,
-				     unsigned int nbytes,
-				     unsigned int transfer_id)
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
 {
-	struct bmi_xfer *xfer = transfer_context;
+	struct bmi_xfer *xfer;
+	u32 ce_data;
+	unsigned int nbytes;
+	unsigned int transfer_id;
+
+	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
+					  &nbytes, &transfer_id))
+		return;
 
 	if (xfer->wait_for_resp)
 		return;
@@ -1392,14 +1457,17 @@
 	complete(&xfer->done);
 }
 
-static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
-				     void *transfer_context,
-				     u32 data,
-				     unsigned int nbytes,
-				     unsigned int transfer_id,
-				     unsigned int flags)
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
 {
-	struct bmi_xfer *xfer = transfer_context;
+	struct bmi_xfer *xfer;
+	u32 ce_data;
+	unsigned int nbytes;
+	unsigned int transfer_id;
+	unsigned int flags;
+
+	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+					  &nbytes, &transfer_id, &flags))
+		return;
 
 	if (!xfer->wait_for_resp) {
 		ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1679,7 +1747,7 @@
 static int ath10k_pci_ce_init(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct hif_ce_pipe_info *pipe_info;
+	struct ath10k_pci_pipe *pipe_info;
 	const struct ce_attr *attr;
 	int pipe_num;
 
@@ -1895,7 +1963,7 @@
 
 static void ath10k_pci_ce_tasklet(unsigned long ptr)
 {
-	struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
 	struct ath10k_pci *ar_pci = pipe->ar_pci;
 
 	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -2212,18 +2280,13 @@
 
 static void ath10k_pci_device_reset(struct ath10k *ar)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	void __iomem *mem = ar_pci->mem;
 	int i;
 	u32 val;
 
 	if (!SOC_GLOBAL_RESET_ADDRESS)
 		return;
 
-	if (!mem)
-		return;
-
-	ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+	ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
 			       PCIE_SOC_WAKE_V_MASK);
 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
 		if (ath10k_pci_target_is_awake(ar))
@@ -2232,12 +2295,12 @@
 	}
 
 	/* Put Target, including PCIe, into RESET. */
-	val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
 	val |= 1;
-	ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-		if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
 					  RTC_STATE_COLD_RESET_MASK)
 			break;
 		msleep(1);
@@ -2245,16 +2308,16 @@
 
 	/* Pull Target, including PCIe, out of RESET. */
 	val &= ~1;
-	ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-		if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
 					    RTC_STATE_COLD_RESET_MASK))
 			break;
 		msleep(1);
 	}
 
-	ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+	ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
 }
 
 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2330,10 @@
 
 		switch (i) {
 		case ATH10K_PCI_FEATURE_MSI_X:
-			ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
-			break;
-		case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
-			ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+			ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
 			break;
 		case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
-			ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+			ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
 			break;
 		}
 	}
@@ -2286,7 +2346,7 @@
 	int ret = 0;
 	struct ath10k *ar;
 	struct ath10k_pci *ar_pci;
-	u32 lcr_val;
+	u32 lcr_val, chip_id;
 
 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
 
@@ -2298,15 +2358,12 @@
 	ar_pci->dev = &pdev->dev;
 
 	switch (pci_dev->device) {
-	case QCA988X_1_0_DEVICE_ID:
-		set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
-		break;
 	case QCA988X_2_0_DEVICE_ID:
 		set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
 		break;
 	default:
 		ret = -ENODEV;
-		ath10k_err("Unkown device ID: %d\n", pci_dev->device);
+		ath10k_err("Unknown device ID: %d\n", pci_dev->device);
 		goto err_ar_pci;
 	}
 
@@ -2322,10 +2379,6 @@
 		goto err_ar_pci;
 	}
 
-	/* Enable QCA988X_1.0 HW workarounds */
-	if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
-		spin_lock_init(&ar_pci->hw_v1_workaround_lock);
-
 	ar_pci->ar = ar;
 	ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
 	atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2395,9 +2448,20 @@
 
 	spin_lock_init(&ar_pci->ce_lock);
 
-	ar_pci->cacheline_sz = dma_get_cache_alignment();
+	ret = ath10k_do_pci_wake(ar);
+	if (ret) {
+		ath10k_err("Failed to get chip id: %d\n", ret);
+		return ret;
+	}
 
-	ret = ath10k_core_register(ar);
+	chip_id = ath10k_pci_read32(ar,
+				    RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+
+	ath10k_do_pci_sleep(ar);
+
+	ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+
+	ret = ath10k_core_register(ar, chip_id);
 	if (ret) {
 		ath10k_err("could not register driver core (%d)\n", ret);
 		goto err_iomap;
@@ -2414,7 +2478,6 @@
 err_device:
 	pci_disable_device(pdev);
 err_ar:
-	pci_set_drvdata(pdev, NULL);
 	ath10k_core_destroy(ar);
 err_ar_pci:
 	/* call HIF PCI free here */
@@ -2442,7 +2505,6 @@
 
 	ath10k_core_unregister(ar);
 
-	pci_set_drvdata(pdev, NULL);
 	pci_iounmap(pdev, ar_pci->mem);
 	pci_release_region(pdev, BAR_NUM);
 	pci_clear_master(pdev);
@@ -2483,9 +2545,6 @@
 MODULE_AUTHOR("Qualcomm Atheros");
 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 871bb33..52fb7b9 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,22 +43,23 @@
 	u32 resp_len;
 };
 
+enum ath10k_pci_compl_state {
+	ATH10K_PCI_COMPL_FREE = 0,
+	ATH10K_PCI_COMPL_SEND,
+	ATH10K_PCI_COMPL_RECV,
+};
+
 struct ath10k_pci_compl {
 	struct list_head list;
-	int send_or_recv;
-	struct ce_state *ce_state;
-	struct hif_ce_pipe_info *pipe_info;
-	void *transfer_context;
+	enum ath10k_pci_compl_state state;
+	struct ath10k_ce_pipe *ce_state;
+	struct ath10k_pci_pipe *pipe_info;
+	struct sk_buff *skb;
 	unsigned int nbytes;
 	unsigned int transfer_id;
 	unsigned int flags;
 };
 
-/* compl_state.send_or_recv */
-#define HIF_CE_COMPLETE_FREE 0
-#define HIF_CE_COMPLETE_SEND 1
-#define HIF_CE_COMPLETE_RECV 2
-
 /*
  * PCI-specific Target state
  *
@@ -152,17 +153,16 @@
 
 enum ath10k_pci_features {
 	ATH10K_PCI_FEATURE_MSI_X		= 0,
-	ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND	= 1,
-	ATH10K_PCI_FEATURE_SOC_POWER_SAVE	= 2,
+	ATH10K_PCI_FEATURE_SOC_POWER_SAVE	= 1,
 
 	/* keep last */
 	ATH10K_PCI_FEATURE_COUNT
 };
 
 /* Per-pipe state. */
-struct hif_ce_pipe_info {
+struct ath10k_pci_pipe {
 	/* Handle of underlying Copy Engine */
-	struct ce_state *ce_hdl;
+	struct ath10k_ce_pipe *ce_hdl;
 
 	/* Our pipe number; facilitiates use of pipe_info ptrs. */
 	u8 pipe_num;
@@ -178,9 +178,6 @@
 	/* List of free CE completion slots */
 	struct list_head compl_free;
 
-	/* Limit the number of outstanding send requests. */
-	int num_sends_allowed;
-
 	struct ath10k_pci *ar_pci;
 	struct tasklet_struct intr;
 };
@@ -190,7 +187,6 @@
 	struct device *dev;
 	struct ath10k *ar;
 	void __iomem *mem;
-	int cacheline_sz;
 
 	DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
 
@@ -219,7 +215,7 @@
 
 	bool compl_processing;
 
-	struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+	struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
 	struct ath10k_hif_cb msg_callbacks_current;
 
@@ -227,16 +223,13 @@
 	u32 fw_indicator_address;
 
 	/* Copy Engine used for Diagnostic Accesses */
-	struct ce_state *ce_diag;
+	struct ath10k_ce_pipe *ce_diag;
 
 	/* FIXME: document what this really protects */
 	spinlock_t ce_lock;
 
 	/* Map CE id to ce_state */
-	struct ce_state *ce_id_to_state[CE_COUNT_MAX];
-
-	/* makes sure that dummy reads are atomic */
-	spinlock_t hw_v1_workaround_lock;
+	struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
 };
 
 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -244,14 +237,18 @@
 	return ar->hif.priv;
 }
 
-static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 {
-	return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
 }
 
-static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 {
-	iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
 }
 
 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
@@ -310,23 +307,8 @@
 				      u32 value)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	void __iomem *addr = ar_pci->mem;
 
-	if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
-		unsigned long irq_flags;
-
-		spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
-
-		ioread32(addr+offset+4); /* 3rd read prior to write */
-		ioread32(addr+offset+4); /* 2nd read prior to write */
-		ioread32(addr+offset+4); /* 1st read prior to write */
-		iowrite32(value, addr+offset);
-
-		spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
-				       irq_flags);
-	} else {
-		iowrite32(value, addr+offset);
-	}
+	iowrite32(value, ar_pci->mem + offset);
 }
 
 static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
@@ -336,15 +318,17 @@
 	return ioread32(ar_pci->mem + offset);
 }
 
-void ath10k_do_pci_wake(struct ath10k *ar);
+int ath10k_do_pci_wake(struct ath10k *ar);
 void ath10k_do_pci_sleep(struct ath10k *ar);
 
-static inline void ath10k_pci_wake(struct ath10k *ar)
+static inline int ath10k_pci_wake(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
 	if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
-		ath10k_do_pci_wake(ar);
+		return ath10k_do_pci_wake(ar);
+
+	return 0;
 }
 
 static inline void ath10k_pci_sleep(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index bfec6c8..1c584c4 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,10 +422,30 @@
 #define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
 #define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
 
+/* The decapped header (rx_hdr_status) contains the following:
+ *  a) 802.11 header
+ *  [padding to 4 bytes]
+ *  b) HW crypto parameter
+ *     - 0 bytes for no security
+ *     - 4 bytes for WEP
+ *     - 8 bytes for TKIP, AES
+ *  [padding to 4 bytes]
+ *  c) A-MSDU subframe header (14 bytes) if appliable
+ *  d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b). */
 enum rx_msdu_decap_format {
-	RX_MSDU_DECAP_RAW           = 0,
-	RX_MSDU_DECAP_NATIVE_WIFI   = 1,
+	RX_MSDU_DECAP_RAW = 0,
+
+	/* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+	 * htt_rx_desc contains the original decapped 802.11 header. */
+	RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+	/* Payload contains an ethernet header (struct ethhdr). */
 	RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+	/* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+	 * total), followed by an RFC1042 header (8 bytes). */
 	RX_MSDU_DECAP_8023_SNAP_LLC = 3
 };
 
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 85e806b..90817dd 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -111,26 +111,29 @@
 );
 
 TRACE_EVENT(ath10k_wmi_cmd,
-	TP_PROTO(int id, void *buf, size_t buf_len),
+	TP_PROTO(int id, void *buf, size_t buf_len, int ret),
 
-	TP_ARGS(id, buf, buf_len),
+	TP_ARGS(id, buf, buf_len, ret),
 
 	TP_STRUCT__entry(
 		__field(unsigned int, id)
 		__field(size_t, buf_len)
 		__dynamic_array(u8, buf, buf_len)
+		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		__entry->id = id;
 		__entry->buf_len = buf_len;
+		__entry->ret = ret;
 		memcpy(__get_dynamic_array(buf), buf, buf_len);
 	),
 
 	TP_printk(
-		"id %d len %zu",
+		"id %d len %zu ret %d",
 		__entry->id,
-		__entry->buf_len
+		__entry->buf_len,
+		__entry->ret
 	)
 );
 
@@ -158,6 +161,27 @@
 	)
 );
 
+TRACE_EVENT(ath10k_htt_stats,
+	TP_PROTO(void *buf, size_t buf_len),
+
+	TP_ARGS(buf, buf_len),
+
+	TP_STRUCT__entry(
+		__field(size_t, buf_len)
+		__dynamic_array(u8, buf, buf_len)
+	),
+
+	TP_fast_assign(
+		__entry->buf_len = buf_len;
+		memcpy(__get_dynamic_array(buf), buf, buf_len);
+	),
+
+	TP_printk(
+		"len %zu",
+		__entry->buf_len
+	)
+);
+
 #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
 
 /* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 68b6fae..5ae373a 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -44,70 +44,15 @@
 	spin_unlock_bh(&ar->data_lock);
 }
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+			  const struct htt_tx_done *tx_done)
 {
 	struct device *dev = htt->ar->dev;
 	struct ieee80211_tx_info *info;
-	struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
-	struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+	struct ath10k_skb_cb *skb_cb;
+	struct sk_buff *msdu;
 	int ret;
 
-	if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
-		return;
-
-	ATH10K_SKB_CB(txdesc)->htt.refcount--;
-
-	if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
-		return;
-
-	if (txfrag) {
-		ret = ath10k_skb_unmap(dev, txfrag);
-		if (ret)
-			ath10k_warn("txfrag unmap failed (%d)\n", ret);
-
-		dev_kfree_skb_any(txfrag);
-	}
-
-	ret = ath10k_skb_unmap(dev, msdu);
-	if (ret)
-		ath10k_warn("data skb unmap failed (%d)\n", ret);
-
-	ath10k_report_offchan_tx(htt->ar, msdu);
-
-	info = IEEE80211_SKB_CB(msdu);
-	memset(&info->status, 0, sizeof(info->status));
-
-	if (ATH10K_SKB_CB(txdesc)->htt.discard) {
-		ieee80211_free_txskb(htt->ar->hw, msdu);
-		goto exit;
-	}
-
-	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
-		info->flags |= IEEE80211_TX_STAT_ACK;
-
-	if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
-		info->flags &= ~IEEE80211_TX_STAT_ACK;
-
-	ieee80211_tx_status(htt->ar->hw, msdu);
-	/* we do not own the msdu anymore */
-
-exit:
-	spin_lock_bh(&htt->tx_lock);
-	htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
-	ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
-	__ath10k_htt_tx_dec_pending(htt);
-	if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
-		wake_up(&htt->empty_tx_wq);
-	spin_unlock_bh(&htt->tx_lock);
-
-	dev_kfree_skb_any(txdesc);
-}
-
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
-			      const struct htt_tx_done *tx_done)
-{
-	struct sk_buff *txdesc;
-
 	ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
 		   tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
 
@@ -117,12 +62,42 @@
 		return;
 	}
 
-	txdesc = htt->pending_tx[tx_done->msdu_id];
+	msdu = htt->pending_tx[tx_done->msdu_id];
+	skb_cb = ATH10K_SKB_CB(msdu);
 
-	ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
-	ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
+	ret = ath10k_skb_unmap(dev, msdu);
+	if (ret)
+		ath10k_warn("data skb unmap failed (%d)\n", ret);
 
-	ath10k_txrx_tx_unref(htt, txdesc);
+	if (skb_cb->htt.frag_len)
+		skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+
+	ath10k_report_offchan_tx(htt->ar, msdu);
+
+	info = IEEE80211_SKB_CB(msdu);
+
+	if (tx_done->discard) {
+		ieee80211_free_txskb(htt->ar->hw, msdu);
+		goto exit;
+	}
+
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		info->flags |= IEEE80211_TX_STAT_ACK;
+
+	if (tx_done->no_ack)
+		info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+	ieee80211_tx_status(htt->ar->hw, msdu);
+	/* we do not own the msdu anymore */
+
+exit:
+	spin_lock_bh(&htt->tx_lock);
+	htt->pending_tx[tx_done->msdu_id] = NULL;
+	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+	__ath10k_htt_tx_dec_pending(htt);
+	if (htt->num_pending_tx == 0)
+		wake_up(&htt->empty_tx_wq);
+	spin_unlock_bh(&htt->tx_lock);
 }
 
 static const u8 rx_legacy_rate_idx[] = {
@@ -293,6 +268,8 @@
 		   status->vht_nss,
 		   status->freq,
 		   status->band);
+	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+			info->skb->data, info->skb->len);
 
 	ieee80211_rx(ar->hw, info->skb);
 }
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e78632a..356dc9c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,9 +19,8 @@
 
 #include "htt.h"
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
-			      const struct htt_tx_done *tx_done);
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+			  const struct htt_tx_done *tx_done);
 void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 55f90c7..ccf3597 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -23,29 +23,470 @@
 #include "wmi.h"
 #include "mac.h"
 
-void ath10k_wmi_flush_tx(struct ath10k *ar)
-{
-	int ret;
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+	.init_cmdid = WMI_INIT_CMDID,
+	.start_scan_cmdid = WMI_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+	.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+	.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+	.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+	.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+	.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+	.network_list_offload_config_cmdid =
+				WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+	.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+	.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+	.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+	.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+	.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+	.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+	.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+	.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+	.echo_cmdid = WMI_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+	.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+	.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+	.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+};
 
-	lockdep_assert_held(&ar->conf_mutex);
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+	.init_cmdid = WMI_10X_INIT_CMDID,
+	.start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+	.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+	.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold =
+				WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+	.ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid =
+				WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid =
+				WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+	.echo_cmdid = WMI_10X_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+};
 
-	if (ar->state == ATH10K_STATE_WEDGED) {
-		ath10k_warn("wmi flush skipped - device is wedged anyway\n");
-		return;
-	}
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+	.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+					WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_VDEV_PARAM_WDS,
+	.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+	.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+	.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection =	WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_VDEV_PARAM_SGI,
+	.ldpc = WMI_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_VDEV_PARAM_TXBF,
+	.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+	.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+					WMI_VDEV_PARAM_UNSUPPORTED,
+};
 
-	ret = wait_event_timeout(ar->wmi.wq,
-				 atomic_read(&ar->wmi.pending_tx_count) == 0,
-				 5*HZ);
-	if (atomic_read(&ar->wmi.pending_tx_count) == 0)
-		return;
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_10X_VDEV_PARAM_WDS,
+	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_10X_VDEV_PARAM_SGI,
+	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_10X_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
 
-	if (ret == 0)
-		ret = -ETIMEDOUT;
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+	.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+	.arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dcs = WMI_PDEV_PARAM_DCS,
+	.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+	.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+	.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+	.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+};
 
-	if (ret < 0)
-		ath10k_warn("wmi flush failed (%d)\n", ret);
-}
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period =
+				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+	.dcs = WMI_10X_PDEV_PARAM_DCS,
+	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
 
 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
 {
@@ -85,18 +526,14 @@
 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 {
 	dev_kfree_skb(skb);
-
-	if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
-		wake_up(&ar->wmi.wq);
 }
 
-/* WMI command API */
-static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
-			       enum wmi_cmd_id cmd_id)
+static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+				      u32 cmd_id)
 {
 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
 	struct wmi_cmd_hdr *cmd_hdr;
-	int status;
+	int ret;
 	u32 cmd = 0;
 
 	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
@@ -107,25 +544,146 @@
 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
 
-	if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
-	    WMI_MAX_PENDING_TX_COUNT) {
-		/* avoid using up memory when FW hangs */
-		atomic_dec(&ar->wmi.pending_tx_count);
-		return -EBUSY;
-	}
-
 	memset(skb_cb, 0, sizeof(*skb_cb));
+	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+	trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
 
-	trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
-
-	status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
-	if (status) {
-		dev_kfree_skb_any(skb);
-		atomic_dec(&ar->wmi.pending_tx_count);
-		return status;
-	}
+	if (ret)
+		goto err_pull;
 
 	return 0;
+
+err_pull:
+	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+	return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+	struct wmi_bcn_tx_arg arg = {0};
+	int ret;
+
+	lockdep_assert_held(&arvif->ar->data_lock);
+
+	if (arvif->beacon == NULL)
+		return;
+
+	arg.vdev_id = arvif->vdev_id;
+	arg.tx_rate = 0;
+	arg.tx_power = 0;
+	arg.bcn = arvif->beacon->data;
+	arg.bcn_len = arvif->beacon->len;
+
+	ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+	if (ret)
+		return;
+
+	dev_kfree_skb_any(arvif->beacon);
+	arvif->beacon = NULL;
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+	spin_lock_bh(&ar->data_lock);
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_wmi_tx_beacons_iter,
+						   NULL);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+	/* try to send pending beacons first. they take priority */
+	ath10k_wmi_tx_beacons_nowait(ar);
+
+	wake_up(&ar->wmi.tx_credits_wq);
+}
+
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+			       u32 cmd_id)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	if (cmd_id == WMI_CMD_UNSUPPORTED) {
+		ath10k_warn("wmi command %d is not supported by firmware\n",
+			    cmd_id);
+		return ret;
+	}
+
+	wait_event_timeout(ar->wmi.tx_credits_wq, ({
+		/* try to send pending beacons first. they take priority */
+		ath10k_wmi_tx_beacons_nowait(ar);
+
+		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+		(ret != -EAGAIN);
+	}), 3*HZ);
+
+	if (ret)
+		dev_kfree_skb_any(skb);
+
+	return ret;
+}
+
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+	int ret = 0;
+	struct wmi_mgmt_tx_cmd *cmd;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff *wmi_skb;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	int len;
+	u16 fc;
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	fc = le16_to_cpu(hdr->frame_control);
+
+	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+		return -EINVAL;
+
+	len = sizeof(cmd->hdr) + skb->len;
+	len = round_up(len, 4);
+
+	wmi_skb = ath10k_wmi_alloc_skb(len);
+	if (!wmi_skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
+
+	cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
+	cmd->hdr.tx_rate = 0;
+	cmd->hdr.tx_power = 0;
+	cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
+
+	memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
+	memcpy(cmd->buf, skb->data, skb->len);
+
+	ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+		   wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
+		   fc & IEEE80211_FCTL_STYPE);
+
+	/* Send the management frame buffer to the target */
+	ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	/* TODO: report tx status to mac80211 - temporary just ACK */
+	info->flags |= IEEE80211_TX_STAT_ACK;
+	ieee80211_tx_status_irqsafe(ar->hw, skb);
+
+	return ret;
 }
 
 static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
@@ -315,7 +873,9 @@
 
 static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 {
-	struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+	struct wmi_mgmt_rx_event_v1 *ev_v1;
+	struct wmi_mgmt_rx_event_v2 *ev_v2;
+	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct ieee80211_hdr *hdr;
 	u32 rx_status;
@@ -325,13 +885,24 @@
 	u32 rate;
 	u32 buf_len;
 	u16 fc;
+	int pull_len;
 
-	channel   = __le32_to_cpu(event->hdr.channel);
-	buf_len   = __le32_to_cpu(event->hdr.buf_len);
-	rx_status = __le32_to_cpu(event->hdr.status);
-	snr       = __le32_to_cpu(event->hdr.snr);
-	phy_mode  = __le32_to_cpu(event->hdr.phy_mode);
-	rate	  = __le32_to_cpu(event->hdr.rate);
+	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+		ev_hdr = &ev_v2->hdr.v1;
+		pull_len = sizeof(*ev_v2);
+	} else {
+		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+		ev_hdr = &ev_v1->hdr;
+		pull_len = sizeof(*ev_v1);
+	}
+
+	channel   = __le32_to_cpu(ev_hdr->channel);
+	buf_len   = __le32_to_cpu(ev_hdr->buf_len);
+	rx_status = __le32_to_cpu(ev_hdr->status);
+	snr       = __le32_to_cpu(ev_hdr->snr);
+	phy_mode  = __le32_to_cpu(ev_hdr->phy_mode);
+	rate	  = __le32_to_cpu(ev_hdr->rate);
 
 	memset(status, 0, sizeof(*status));
 
@@ -358,7 +929,7 @@
 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
 	status->rate_idx = get_rate_idx(rate, status->band);
 
-	skb_pull(skb, sizeof(event->hdr));
+	skb_pull(skb, pull_len);
 
 	hdr = (struct ieee80211_hdr *)skb->data;
 	fc = le16_to_cpu(hdr->frame_control);
@@ -734,10 +1305,8 @@
 	int i = -1;
 	struct wmi_bcn_info *bcn_info;
 	struct ath10k_vif *arvif;
-	struct wmi_bcn_tx_arg arg;
 	struct sk_buff *bcn;
 	int vdev_id = 0;
-	int ret;
 
 	ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
 
@@ -794,17 +1363,17 @@
 		ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
 		ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
 
-		arg.vdev_id = arvif->vdev_id;
-		arg.tx_rate = 0;
-		arg.tx_power = 0;
-		arg.bcn = bcn->data;
-		arg.bcn_len = bcn->len;
+		spin_lock_bh(&ar->data_lock);
+		if (arvif->beacon) {
+			ath10k_warn("SWBA overrun on vdev %d\n",
+				    arvif->vdev_id);
+			dev_kfree_skb_any(arvif->beacon);
+		}
 
-		ret = ath10k_wmi_beacon_send(ar, &arg);
-		if (ret)
-			ath10k_warn("could not send beacon (%d)\n", ret);
+		arvif->beacon = bcn;
 
-		dev_kfree_skb_any(bcn);
+		ath10k_wmi_tx_beacon_nowait(arvif);
+		spin_unlock_bh(&ar->data_lock);
 	}
 }
 
@@ -919,6 +1488,55 @@
 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
 }
 
+static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
+					     struct sk_buff *skb)
+{
+	ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
+					      struct sk_buff *skb)
+{
+	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
+					     struct sk_buff *skb)
+{
+	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+				      u32 num_units, u32 unit_len)
+{
+	dma_addr_t paddr;
+	u32 pool_size;
+	int idx = ar->wmi.num_mem_chunks;
+
+	pool_size = num_units * round_up(unit_len, 4);
+
+	if (!pool_size)
+		return -EINVAL;
+
+	ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
+							   pool_size,
+							   &paddr,
+							   GFP_ATOMIC);
+	if (!ar->wmi.mem_chunks[idx].vaddr) {
+		ath10k_warn("failed to allocate memory chunk\n");
+		return -ENOMEM;
+	}
+
+	memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+
+	ar->wmi.mem_chunks[idx].paddr = paddr;
+	ar->wmi.mem_chunks[idx].len = pool_size;
+	ar->wmi.mem_chunks[idx].req_id = req_id;
+	ar->wmi.num_mem_chunks++;
+
+	return 0;
+}
+
 static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
 					      struct sk_buff *skb)
 {
@@ -943,6 +1561,10 @@
 	ar->phy_capability = __le32_to_cpu(ev->phy_capability);
 	ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
 
+	/* only manually set fw features when not using FW IE format */
+	if (ar->fw_api == 1 && ar->fw_version_build > 636)
+		set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
+
 	if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
 		ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
 			    ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
@@ -987,6 +1609,108 @@
 	complete(&ar->wmi.service_ready);
 }
 
+static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
+						  struct sk_buff *skb)
+{
+	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+	int ret;
+	struct wmi_service_ready_event_10x *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev)) {
+		ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+			    skb->len, sizeof(*ev));
+		return;
+	}
+
+	ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
+	ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
+	ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
+	ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+	ar->fw_version_major =
+		(__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
+	ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+	ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+	ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+
+	if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+		ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+			    ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+		ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+	}
+
+	ar->ath_common.regulatory.current_rd =
+		__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
+
+	ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
+				      sizeof(ev->wmi_service_bitmap));
+
+	if (strlen(ar->hw->wiphy->fw_version) == 0) {
+		snprintf(ar->hw->wiphy->fw_version,
+			 sizeof(ar->hw->wiphy->fw_version),
+			 "%u.%u",
+			 ar->fw_version_major,
+			 ar->fw_version_minor);
+	}
+
+	num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
+
+	if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
+		ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
+			    num_mem_reqs);
+		return;
+	}
+
+	if (!num_mem_reqs)
+		goto exit;
+
+	ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
+		   num_mem_reqs);
+
+	for (i = 0; i < num_mem_reqs; ++i) {
+		req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
+		num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
+		unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
+		num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
+
+		if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+			/* number of units to allocate is number of
+			 * peers, 1 extra for self peer on target */
+			/* this needs to be tied, host and target
+			 * can get out of sync */
+			num_units = TARGET_10X_NUM_PEERS + 1;
+		else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
+			num_units = TARGET_10X_NUM_VDEVS + 1;
+
+		ath10k_dbg(ATH10K_DBG_WMI,
+			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+			   req_id,
+			   __le32_to_cpu(ev->mem_reqs[i].num_units),
+			   num_unit_info,
+			   unit_size,
+			   num_units);
+
+		ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+						unit_size);
+		if (ret)
+			return;
+	}
+
+exit:
+	ath10k_dbg(ATH10K_DBG_WMI,
+		   "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
+		   __le32_to_cpu(ev->sw_version),
+		   __le32_to_cpu(ev->abi_version),
+		   __le32_to_cpu(ev->phy_capability),
+		   __le32_to_cpu(ev->ht_cap_info),
+		   __le32_to_cpu(ev->vht_cap_info),
+		   __le32_to_cpu(ev->vht_supp_mcs),
+		   __le32_to_cpu(ev->sys_cap_info),
+		   __le32_to_cpu(ev->num_mem_reqs),
+		   __le32_to_cpu(ev->num_rf_chains));
+
+	complete(&ar->wmi.service_ready);
+}
+
 static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
@@ -1007,7 +1731,7 @@
 	return 0;
 }
 
-static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_cmd_hdr *cmd_hdr;
 	enum wmi_event_id id;
@@ -1126,64 +1850,158 @@
 	dev_kfree_skb(skb);
 }
 
-static void ath10k_wmi_event_work(struct work_struct *work)
+static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
-	struct ath10k *ar = container_of(work, struct ath10k,
-					 wmi.wmi_event_work);
-	struct sk_buff *skb;
+	struct wmi_cmd_hdr *cmd_hdr;
+	enum wmi_10x_event_id id;
+	u16 len;
 
-	for (;;) {
-		skb = skb_dequeue(&ar->wmi.wmi_event_list);
-		if (!skb)
-			break;
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 
-		ath10k_wmi_event_process(ar, skb);
-	}
-}
-
-static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
-{
-	struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
-	enum wmi_event_id event_id;
-
-	event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
-
-	/* some events require to be handled ASAP
-	 * thus can't be defered to a worker thread */
-	switch (event_id) {
-	case WMI_HOST_SWBA_EVENTID:
-	case WMI_MGMT_RX_EVENTID:
-		ath10k_wmi_event_process(ar, skb);
+	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
 		return;
+
+	len = skb->len;
+
+	trace_ath10k_wmi_event(id, skb->data, skb->len);
+
+	switch (id) {
+	case WMI_10X_MGMT_RX_EVENTID:
+		ath10k_wmi_event_mgmt_rx(ar, skb);
+		/* mgmt_rx() owns the skb now! */
+		return;
+	case WMI_10X_SCAN_EVENTID:
+		ath10k_wmi_event_scan(ar, skb);
+		break;
+	case WMI_10X_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_chan_info(ar, skb);
+		break;
+	case WMI_10X_ECHO_EVENTID:
+		ath10k_wmi_event_echo(ar, skb);
+		break;
+	case WMI_10X_DEBUG_MESG_EVENTID:
+		ath10k_wmi_event_debug_mesg(ar, skb);
+		break;
+	case WMI_10X_UPDATE_STATS_EVENTID:
+		ath10k_wmi_event_update_stats(ar, skb);
+		break;
+	case WMI_10X_VDEV_START_RESP_EVENTID:
+		ath10k_wmi_event_vdev_start_resp(ar, skb);
+		break;
+	case WMI_10X_VDEV_STOPPED_EVENTID:
+		ath10k_wmi_event_vdev_stopped(ar, skb);
+		break;
+	case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+		ath10k_wmi_event_peer_sta_kickout(ar, skb);
+		break;
+	case WMI_10X_HOST_SWBA_EVENTID:
+		ath10k_wmi_event_host_swba(ar, skb);
+		break;
+	case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+		ath10k_wmi_event_tbttoffset_update(ar, skb);
+		break;
+	case WMI_10X_PHYERR_EVENTID:
+		ath10k_wmi_event_phyerr(ar, skb);
+		break;
+	case WMI_10X_ROAM_EVENTID:
+		ath10k_wmi_event_roam(ar, skb);
+		break;
+	case WMI_10X_PROFILE_MATCH:
+		ath10k_wmi_event_profile_match(ar, skb);
+		break;
+	case WMI_10X_DEBUG_PRINT_EVENTID:
+		ath10k_wmi_event_debug_print(ar, skb);
+		break;
+	case WMI_10X_PDEV_QVIT_EVENTID:
+		ath10k_wmi_event_pdev_qvit(ar, skb);
+		break;
+	case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+		ath10k_wmi_event_wlan_profile_data(ar, skb);
+		break;
+	case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_measurement_report(ar, skb);
+		break;
+	case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_tsf_measurement_report(ar, skb);
+		break;
+	case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_error_report(ar, skb);
+		break;
+	case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+		ath10k_wmi_event_wow_wakeup_host(ar, skb);
+		break;
+	case WMI_10X_DCS_INTERFERENCE_EVENTID:
+		ath10k_wmi_event_dcs_interference(ar, skb);
+		break;
+	case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+		ath10k_wmi_event_pdev_tpc_config(ar, skb);
+		break;
+	case WMI_10X_INST_RSSI_STATS_EVENTID:
+		ath10k_wmi_event_inst_rssi_stats(ar, skb);
+		break;
+	case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+		ath10k_wmi_event_vdev_standby_req(ar, skb);
+		break;
+	case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+		ath10k_wmi_event_vdev_resume_req(ar, skb);
+		break;
+	case WMI_10X_SERVICE_READY_EVENTID:
+		ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+		break;
+	case WMI_10X_READY_EVENTID:
+		ath10k_wmi_ready_event_rx(ar, skb);
+		break;
 	default:
+		ath10k_warn("Unknown eventid: %d\n", id);
 		break;
 	}
 
-	skb_queue_tail(&ar->wmi.wmi_event_list, skb);
-	queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
+	dev_kfree_skb(skb);
+}
+
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+		ath10k_wmi_10x_process_rx(ar, skb);
+	else
+		ath10k_wmi_main_process_rx(ar, skb);
 }
 
 /* WMI Initialization functions */
 int ath10k_wmi_attach(struct ath10k *ar)
 {
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+		ar->wmi.cmd = &wmi_10x_cmd_map;
+		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+	} else {
+		ar->wmi.cmd = &wmi_cmd_map;
+		ar->wmi.vdev_param = &wmi_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_pdev_param_map;
+	}
+
 	init_completion(&ar->wmi.service_ready);
 	init_completion(&ar->wmi.unified_ready);
-	init_waitqueue_head(&ar->wmi.wq);
-
-	skb_queue_head_init(&ar->wmi.wmi_event_list);
-	INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+	init_waitqueue_head(&ar->wmi.tx_credits_wq);
 
 	return 0;
 }
 
 void ath10k_wmi_detach(struct ath10k *ar)
 {
-	/* HTC should've drained the packets already */
-	if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
-		ath10k_warn("there are still pending packets\n");
+	int i;
 
-	cancel_work_sync(&ar->wmi.wmi_event_work);
-	skb_queue_purge(&ar->wmi.wmi_event_list);
+	/* free the host memory chunks requested by firmware */
+	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+		dma_free_coherent(ar->dev,
+				  ar->wmi.mem_chunks[i].len,
+				  ar->wmi.mem_chunks[i].vaddr,
+				  ar->wmi.mem_chunks[i].paddr);
+	}
+
+	ar->wmi.num_mem_chunks = 0;
 }
 
 int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@@ -1198,6 +2016,7 @@
 	/* these fields are the same for all service endpoints */
 	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
 	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+	conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
 
 	/* connect to control service */
 	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
@@ -1234,7 +2053,8 @@
 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
 		   rd, rd2g, rd5g, ctl2g, ctl5g);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
 }
 
 int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -1264,7 +2084,8 @@
 		   "wmi set channel mode %d freq %d\n",
 		   arg->mode, arg->freq);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_set_channel_cmdid);
 }
 
 int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
@@ -1279,7 +2100,7 @@
 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
 	cmd->suspend_opt = WMI_PDEV_SUSPEND;
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
 }
 
 int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
@@ -1290,15 +2111,19 @@
 	if (skb == NULL)
 		return -ENOMEM;
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
 }
 
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
-			      u32 value)
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
 {
 	struct wmi_pdev_set_param_cmd *cmd;
 	struct sk_buff *skb;
 
+	if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+		ath10k_warn("pdev param %d not supported by firmware\n", id);
+		return -EOPNOTSUPP;
+	}
+
 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
 	if (!skb)
 		return -ENOMEM;
@@ -1309,15 +2134,16 @@
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
 		   id, value);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
 }
 
-int ath10k_wmi_cmd_init(struct ath10k *ar)
+static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
 {
 	struct wmi_init_cmd *cmd;
 	struct sk_buff *buf;
 	struct wmi_resource_config config = {};
-	u32 val;
+	u32 len, val;
+	int i;
 
 	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
 	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
@@ -1370,23 +2196,158 @@
 	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
 	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
 
-	buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
+	len = sizeof(*cmd) +
+	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+	buf = ath10k_wmi_alloc_skb(len);
 	if (!buf)
 		return -ENOMEM;
 
 	cmd = (struct wmi_init_cmd *)buf->data;
-	cmd->num_host_mem_chunks = 0;
+
+	if (ar->wmi.num_mem_chunks == 0) {
+		cmd->num_host_mem_chunks = 0;
+		goto out;
+	}
+
+	ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+		   __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+		cmd->host_mem_chunks[i].ptr =
+			__cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+		cmd->host_mem_chunks[i].size =
+			__cpu_to_le32(ar->wmi.mem_chunks[i].len);
+		cmd->host_mem_chunks[i].req_id =
+			__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+		ath10k_dbg(ATH10K_DBG_WMI,
+			   "wmi chunk %d len %d requested, addr 0x%x\n",
+			   i,
+			   cmd->host_mem_chunks[i].size,
+			   cmd->host_mem_chunks[i].ptr);
+	}
+out:
 	memcpy(&cmd->resource_config, &config, sizeof(config));
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
-	return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
+	return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
 }
 
-static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
+static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
+{
+	struct wmi_init_cmd_10x *cmd;
+	struct sk_buff *buf;
+	struct wmi_resource_config_10x config = {};
+	u32 len, val;
+	int i;
+
+	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+	config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+
+	config.scan_max_pending_reqs =
+		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+	config.bmiss_offload_max_vdev =
+		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+	config.roam_offload_max_vdev =
+		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+	config.roam_offload_max_ap_profiles =
+		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+	config.num_mcast_table_elems =
+		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+	config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+	len = sizeof(*cmd) +
+	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+	buf = ath10k_wmi_alloc_skb(len);
+	if (!buf)
+		return -ENOMEM;
+
+	cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+	if (ar->wmi.num_mem_chunks == 0) {
+		cmd->num_host_mem_chunks = 0;
+		goto out;
+	}
+
+	ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+		   __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+		cmd->host_mem_chunks[i].ptr =
+			__cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+		cmd->host_mem_chunks[i].size =
+			__cpu_to_le32(ar->wmi.mem_chunks[i].len);
+		cmd->host_mem_chunks[i].req_id =
+			__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+		ath10k_dbg(ATH10K_DBG_WMI,
+			   "wmi chunk %d len %d requested, addr 0x%x\n",
+			   i,
+			   cmd->host_mem_chunks[i].size,
+			   cmd->host_mem_chunks[i].ptr);
+	}
+out:
+	memcpy(&cmd->resource_config, &config, sizeof(config));
+
+	ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
+	return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
+}
+
+int ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+	int ret;
+
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+		ret = ath10k_wmi_10x_cmd_init(ar);
+	else
+		ret = ath10k_wmi_main_cmd_init(ar);
+
+	return ret;
+}
+
+static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
+					  const struct wmi_start_scan_arg *arg)
 {
 	int len;
 
-	len = sizeof(struct wmi_start_scan_cmd);
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+		len = sizeof(struct wmi_start_scan_cmd_10x);
+	else
+		len = sizeof(struct wmi_start_scan_cmd);
 
 	if (arg->ie_len) {
 		if (!arg->ie)
@@ -1446,7 +2407,7 @@
 	int len = 0;
 	int i;
 
-	len = ath10k_wmi_start_scan_calc_len(arg);
+	len = ath10k_wmi_start_scan_calc_len(ar, arg);
 	if (len < 0)
 		return len; /* len contains error code here */
 
@@ -1478,7 +2439,14 @@
 	cmd->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
 
 	/* TLV list starts after fields included in the struct */
-	off = sizeof(*cmd);
+	/* There's just one filed that differes the two start_scan
+	 * structures - burst_duration, which we are not using btw,
+	   no point to make the split here, just shift the buffer to fit with
+	   given FW */
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+		off = sizeof(struct wmi_start_scan_cmd_10x);
+	else
+		off = sizeof(struct wmi_start_scan_cmd);
 
 	if (arg->n_channels) {
 		channels = (void *)skb->data + off;
@@ -1540,7 +2508,7 @@
 	}
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
-	return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
 }
 
 void ath10k_wmi_start_scan_init(struct ath10k *ar,
@@ -1556,7 +2524,7 @@
 	arg->repeat_probe_time = 0;
 	arg->probe_spacing_time = 0;
 	arg->idle_time = 0;
-	arg->max_scan_time = 5000;
+	arg->max_scan_time = 20000;
 	arg->probe_delay = 5;
 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
 		| WMI_SCAN_EVENT_COMPLETED
@@ -1600,7 +2568,7 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
 		   arg->req_id, arg->req_type, arg->u.scan_id);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
 }
 
 int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
@@ -1625,7 +2593,7 @@
 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
 		   vdev_id, type, subtype, macaddr);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
 }
 
 int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
@@ -1643,20 +2611,20 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "WMI vdev delete id %d\n", vdev_id);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
 }
 
 static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
 				const struct wmi_vdev_start_request_arg *arg,
-				enum wmi_cmd_id cmd_id)
+				u32 cmd_id)
 {
 	struct wmi_vdev_start_request_cmd *cmd;
 	struct sk_buff *skb;
 	const char *cmdname;
 	u32 flags = 0;
 
-	if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
-	    cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
+	if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
+	    cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
 		return -EINVAL;
 	if (WARN_ON(arg->ssid && arg->ssid_len == 0))
 		return -EINVAL;
@@ -1665,9 +2633,9 @@
 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
 		return -EINVAL;
 
-	if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
+	if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
 		cmdname = "start";
-	else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
+	else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
 		cmdname = "restart";
 	else
 		return -EINVAL; /* should not happen, we already check cmd_id */
@@ -1718,15 +2686,17 @@
 int ath10k_wmi_vdev_start(struct ath10k *ar,
 			  const struct wmi_vdev_start_request_arg *arg)
 {
-	return ath10k_wmi_vdev_start_restart(ar, arg,
-					     WMI_VDEV_START_REQUEST_CMDID);
+	u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
+
+	return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
 }
 
 int ath10k_wmi_vdev_restart(struct ath10k *ar,
 		     const struct wmi_vdev_start_request_arg *arg)
 {
-	return ath10k_wmi_vdev_start_restart(ar, arg,
-					     WMI_VDEV_RESTART_REQUEST_CMDID);
+	u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
+
+	return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
 }
 
 int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
@@ -1743,7 +2713,7 @@
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
 }
 
 int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
@@ -1758,13 +2728,13 @@
 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
 	cmd->vdev_id       = __cpu_to_le32(vdev_id);
 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
-	memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+	memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
 
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
 		   vdev_id, aid, bssid);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
 }
 
 int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
@@ -1782,15 +2752,22 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi mgmt vdev down id 0x%x\n", vdev_id);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
 }
 
 int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
-			      enum wmi_vdev_param param_id, u32 param_value)
+			      u32 param_id, u32 param_value)
 {
 	struct wmi_vdev_set_param_cmd *cmd;
 	struct sk_buff *skb;
 
+	if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+		ath10k_dbg(ATH10K_DBG_WMI,
+			   "vdev param %d not supported by firmware\n",
+			    param_id);
+		return -EOPNOTSUPP;
+	}
+
 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
 	if (!skb)
 		return -ENOMEM;
@@ -1804,7 +2781,7 @@
 		   "wmi vdev id 0x%x set param %d value %d\n",
 		   vdev_id, param_id, param_value);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
 }
 
 int ath10k_wmi_vdev_install_key(struct ath10k *ar,
@@ -1839,7 +2816,8 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi vdev install key idx %d cipher %d len %d\n",
 		   arg->key_idx, arg->key_cipher, arg->key_len);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->vdev_install_key_cmdid);
 }
 
 int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -1859,7 +2837,7 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi peer create vdev_id %d peer_addr %pM\n",
 		   vdev_id, peer_addr);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
 }
 
 int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
@@ -1879,7 +2857,7 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi peer delete vdev_id %d peer_addr %pM\n",
 		   vdev_id, peer_addr);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
 }
 
 int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
@@ -1900,7 +2878,7 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
 		   vdev_id, peer_addr, tid_bitmap);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
 }
 
 int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
@@ -1918,13 +2896,13 @@
 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
 	cmd->param_id    = __cpu_to_le32(param_id);
 	cmd->param_value = __cpu_to_le32(param_value);
-	memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+	memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
 
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi vdev %d peer 0x%pM set param %d value %d\n",
 		   vdev_id, peer_addr, param_id, param_value);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
 }
 
 int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
@@ -1945,7 +2923,8 @@
 		   "wmi set powersave id 0x%x mode %d\n",
 		   vdev_id, psmode);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->sta_powersave_mode_cmdid);
 }
 
 int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
@@ -1967,7 +2946,8 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
 		   vdev_id, param_id, value);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->sta_powersave_param_cmdid);
 }
 
 int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
@@ -1993,7 +2973,8 @@
 		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
 		   vdev_id, param_id, value, mac);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
 }
 
 int ath10k_wmi_scan_chan_list(struct ath10k *ar,
@@ -2046,7 +3027,7 @@
 		ci->flags            |= __cpu_to_le32(flags);
 	}
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
 }
 
 int ath10k_wmi_peer_assoc(struct ath10k *ar,
@@ -2105,10 +3086,11 @@
 	ath10k_dbg(ATH10K_DBG_WMI,
 		   "wmi peer assoc vdev %d addr %pM\n",
 		   arg->vdev_id, arg->addr);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
 }
 
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+				  const struct wmi_bcn_tx_arg *arg)
 {
 	struct wmi_bcn_tx_cmd *cmd;
 	struct sk_buff *skb;
@@ -2124,7 +3106,7 @@
 	cmd->hdr.bcn_len  = __cpu_to_le32(arg->bcn_len);
 	memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
 
-	return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+	return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
 }
 
 static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -2155,7 +3137,8 @@
 	ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
-	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
 }
 
 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
@@ -2171,7 +3154,7 @@
 	cmd->stats_id = __cpu_to_le32(stats_id);
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
 }
 
 int ath10k_wmi_force_fw_hang(struct ath10k *ar,
@@ -2190,5 +3173,5 @@
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
 		   type, delay_ms);
-	return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
 }
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2c5a4f8..78c991a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -208,6 +208,118 @@
 	(c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
 	} while (0)
 
+struct wmi_cmd_map {
+	u32 init_cmdid;
+	u32 start_scan_cmdid;
+	u32 stop_scan_cmdid;
+	u32 scan_chan_list_cmdid;
+	u32 scan_sch_prio_tbl_cmdid;
+	u32 pdev_set_regdomain_cmdid;
+	u32 pdev_set_channel_cmdid;
+	u32 pdev_set_param_cmdid;
+	u32 pdev_pktlog_enable_cmdid;
+	u32 pdev_pktlog_disable_cmdid;
+	u32 pdev_set_wmm_params_cmdid;
+	u32 pdev_set_ht_cap_ie_cmdid;
+	u32 pdev_set_vht_cap_ie_cmdid;
+	u32 pdev_set_dscp_tid_map_cmdid;
+	u32 pdev_set_quiet_mode_cmdid;
+	u32 pdev_green_ap_ps_enable_cmdid;
+	u32 pdev_get_tpc_config_cmdid;
+	u32 pdev_set_base_macaddr_cmdid;
+	u32 vdev_create_cmdid;
+	u32 vdev_delete_cmdid;
+	u32 vdev_start_request_cmdid;
+	u32 vdev_restart_request_cmdid;
+	u32 vdev_up_cmdid;
+	u32 vdev_stop_cmdid;
+	u32 vdev_down_cmdid;
+	u32 vdev_set_param_cmdid;
+	u32 vdev_install_key_cmdid;
+	u32 peer_create_cmdid;
+	u32 peer_delete_cmdid;
+	u32 peer_flush_tids_cmdid;
+	u32 peer_set_param_cmdid;
+	u32 peer_assoc_cmdid;
+	u32 peer_add_wds_entry_cmdid;
+	u32 peer_remove_wds_entry_cmdid;
+	u32 peer_mcast_group_cmdid;
+	u32 bcn_tx_cmdid;
+	u32 pdev_send_bcn_cmdid;
+	u32 bcn_tmpl_cmdid;
+	u32 bcn_filter_rx_cmdid;
+	u32 prb_req_filter_rx_cmdid;
+	u32 mgmt_tx_cmdid;
+	u32 prb_tmpl_cmdid;
+	u32 addba_clear_resp_cmdid;
+	u32 addba_send_cmdid;
+	u32 addba_status_cmdid;
+	u32 delba_send_cmdid;
+	u32 addba_set_resp_cmdid;
+	u32 send_singleamsdu_cmdid;
+	u32 sta_powersave_mode_cmdid;
+	u32 sta_powersave_param_cmdid;
+	u32 sta_mimo_ps_mode_cmdid;
+	u32 pdev_dfs_enable_cmdid;
+	u32 pdev_dfs_disable_cmdid;
+	u32 roam_scan_mode;
+	u32 roam_scan_rssi_threshold;
+	u32 roam_scan_period;
+	u32 roam_scan_rssi_change_threshold;
+	u32 roam_ap_profile;
+	u32 ofl_scan_add_ap_profile;
+	u32 ofl_scan_remove_ap_profile;
+	u32 ofl_scan_period;
+	u32 p2p_dev_set_device_info;
+	u32 p2p_dev_set_discoverability;
+	u32 p2p_go_set_beacon_ie;
+	u32 p2p_go_set_probe_resp_ie;
+	u32 p2p_set_vendor_ie_data_cmdid;
+	u32 ap_ps_peer_param_cmdid;
+	u32 ap_ps_peer_uapsd_coex_cmdid;
+	u32 peer_rate_retry_sched_cmdid;
+	u32 wlan_profile_trigger_cmdid;
+	u32 wlan_profile_set_hist_intvl_cmdid;
+	u32 wlan_profile_get_profile_data_cmdid;
+	u32 wlan_profile_enable_profile_id_cmdid;
+	u32 wlan_profile_list_profile_id_cmdid;
+	u32 pdev_suspend_cmdid;
+	u32 pdev_resume_cmdid;
+	u32 add_bcn_filter_cmdid;
+	u32 rmv_bcn_filter_cmdid;
+	u32 wow_add_wake_pattern_cmdid;
+	u32 wow_del_wake_pattern_cmdid;
+	u32 wow_enable_disable_wake_event_cmdid;
+	u32 wow_enable_cmdid;
+	u32 wow_hostwakeup_from_sleep_cmdid;
+	u32 rtt_measreq_cmdid;
+	u32 rtt_tsf_cmdid;
+	u32 vdev_spectral_scan_configure_cmdid;
+	u32 vdev_spectral_scan_enable_cmdid;
+	u32 request_stats_cmdid;
+	u32 set_arp_ns_offload_cmdid;
+	u32 network_list_offload_config_cmdid;
+	u32 gtk_offload_cmdid;
+	u32 csa_offload_enable_cmdid;
+	u32 csa_offload_chanswitch_cmdid;
+	u32 chatter_set_mode_cmdid;
+	u32 peer_tid_addba_cmdid;
+	u32 peer_tid_delba_cmdid;
+	u32 sta_dtim_ps_method_cmdid;
+	u32 sta_uapsd_auto_trig_cmdid;
+	u32 sta_keepalive_cmd;
+	u32 echo_cmdid;
+	u32 pdev_utf_cmdid;
+	u32 dbglog_cfg_cmdid;
+	u32 pdev_qvit_cmdid;
+	u32 pdev_ftm_intg_cmdid;
+	u32 vdev_set_keepalive_cmdid;
+	u32 vdev_get_keepalive_cmdid;
+	u32 force_fw_hang_cmdid;
+	u32 gpio_config_cmdid;
+	u32 gpio_output_cmdid;
+};
+
 /*
  * wmi command groups.
  */
@@ -247,7 +359,9 @@
 #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
 #define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
 
-/* Command IDs and commande events. */
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
 enum wmi_cmd_id {
 	WMI_INIT_CMDID = 0x1,
 
@@ -488,6 +602,217 @@
 	WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
 };
 
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+	WMI_10X_START_CMDID = 0x9000,
+	WMI_10X_END_CMDID = 0x9FFF,
+
+	/* initialize the wlan sub system */
+	WMI_10X_INIT_CMDID,
+
+	/* Scan specific commands */
+
+	WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+	WMI_10X_STOP_SCAN_CMDID,
+	WMI_10X_SCAN_CHAN_LIST_CMDID,
+	WMI_10X_ECHO_CMDID,
+
+	/* PDEV(physical device) specific commands */
+	WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+	WMI_10X_PDEV_SET_CHANNEL_CMDID,
+	WMI_10X_PDEV_SET_PARAM_CMDID,
+	WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+	WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+	WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+	WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+	WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+	WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+	WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+	WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+	/* VDEV(virtual device) specific commands */
+	WMI_10X_VDEV_CREATE_CMDID,
+	WMI_10X_VDEV_DELETE_CMDID,
+	WMI_10X_VDEV_START_REQUEST_CMDID,
+	WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+	WMI_10X_VDEV_UP_CMDID,
+	WMI_10X_VDEV_STOP_CMDID,
+	WMI_10X_VDEV_DOWN_CMDID,
+	WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+	WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+	WMI_10X_VDEV_SET_PARAM_CMDID,
+	WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+	/* peer specific commands */
+	WMI_10X_PEER_CREATE_CMDID,
+	WMI_10X_PEER_DELETE_CMDID,
+	WMI_10X_PEER_FLUSH_TIDS_CMDID,
+	WMI_10X_PEER_SET_PARAM_CMDID,
+	WMI_10X_PEER_ASSOC_CMDID,
+	WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+	WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+	WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+	/* beacon/management specific commands */
+
+	WMI_10X_BCN_TX_CMDID,
+	WMI_10X_BCN_PRB_TMPL_CMDID,
+	WMI_10X_BCN_FILTER_RX_CMDID,
+	WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+	WMI_10X_MGMT_TX_CMDID,
+
+	/* commands to directly control ba negotiation directly from host. */
+	WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+	WMI_10X_ADDBA_SEND_CMDID,
+	WMI_10X_ADDBA_STATUS_CMDID,
+	WMI_10X_DELBA_SEND_CMDID,
+	WMI_10X_ADDBA_SET_RESP_CMDID,
+	WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+	/* Station power save specific config */
+	WMI_10X_STA_POWERSAVE_MODE_CMDID,
+	WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+	WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+	/* set debug log config */
+	WMI_10X_DBGLOG_CFG_CMDID,
+
+	/* DFS-specific commands */
+	WMI_10X_PDEV_DFS_ENABLE_CMDID,
+	WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+	/* QVIT specific command id */
+	WMI_10X_PDEV_QVIT_CMDID,
+
+	/* Offload Scan and Roaming related  commands */
+	WMI_10X_ROAM_SCAN_MODE,
+	WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_10X_ROAM_SCAN_PERIOD,
+	WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_10X_ROAM_AP_PROFILE,
+	WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+	WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+	WMI_10X_OFL_SCAN_PERIOD,
+
+	/* P2P specific commands */
+	WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+	WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+	WMI_10X_P2P_GO_SET_BEACON_IE,
+	WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+	/* AP power save specific config */
+	WMI_10X_AP_PS_PEER_PARAM_CMDID,
+	WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+	/* Rate-control specific commands */
+	WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+	/* WLAN Profiling commands. */
+	WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+	WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+	/* Suspend resume command Ids */
+	WMI_10X_PDEV_SUSPEND_CMDID,
+	WMI_10X_PDEV_RESUME_CMDID,
+
+	/* Beacon filter commands */
+	WMI_10X_ADD_BCN_FILTER_CMDID,
+	WMI_10X_RMV_BCN_FILTER_CMDID,
+
+	/* WOW Specific WMI commands*/
+	WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+	WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+	WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	WMI_10X_WOW_ENABLE_CMDID,
+	WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+	/* RTT measurement related cmd */
+	WMI_10X_RTT_MEASREQ_CMDID,
+	WMI_10X_RTT_TSF_CMDID,
+
+	/* transmit beacon by value */
+	WMI_10X_PDEV_SEND_BCN_CMDID,
+
+	/* F/W stats */
+	WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	WMI_10X_REQUEST_STATS_CMDID,
+
+	/* GPIO Configuration */
+	WMI_10X_GPIO_CONFIG_CMDID,
+	WMI_10X_GPIO_OUTPUT_CMDID,
+
+	WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+	WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+	WMI_10X_READY_EVENTID,
+	WMI_10X_START_EVENTID = 0x9000,
+	WMI_10X_END_EVENTID = 0x9FFF,
+
+	/* Scan specific events */
+	WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+	WMI_10X_ECHO_EVENTID,
+	WMI_10X_DEBUG_MESG_EVENTID,
+	WMI_10X_UPDATE_STATS_EVENTID,
+
+	/* Instantaneous RSSI event */
+	WMI_10X_INST_RSSI_STATS_EVENTID,
+
+	/* VDEV specific events */
+	WMI_10X_VDEV_START_RESP_EVENTID,
+	WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+	WMI_10X_VDEV_RESUME_REQ_EVENTID,
+	WMI_10X_VDEV_STOPPED_EVENTID,
+
+	/* peer  specific events */
+	WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+	/* beacon/mgmt specific events */
+	WMI_10X_HOST_SWBA_EVENTID,
+	WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+	WMI_10X_MGMT_RX_EVENTID,
+
+	/* Channel stats event */
+	WMI_10X_CHAN_INFO_EVENTID,
+
+	/* PHY Error specific WMI event */
+	WMI_10X_PHYERR_EVENTID,
+
+	/* Roam event to trigger roaming on host */
+	WMI_10X_ROAM_EVENTID,
+
+	/* matching AP found from list of profiles */
+	WMI_10X_PROFILE_MATCH,
+
+	/* debug print message used for tracing FW code while debugging */
+	WMI_10X_DEBUG_PRINT_EVENTID,
+	/* VI spoecific event */
+	WMI_10X_PDEV_QVIT_EVENTID,
+	/* FW code profile data in response to profile request */
+	WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+	/*RTT related event ID*/
+	WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+	WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+	WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+	WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+	WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+	/* TPC config for the current operating channel */
+	WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+	WMI_10X_GPIO_INPUT_EVENTID,
+	WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+};
+
 enum wmi_phy_mode {
 	MODE_11A        = 0,   /* 11a Mode */
 	MODE_11G        = 1,   /* 11b/g Mode */
@@ -508,6 +833,48 @@
 	MODE_MAX        = 14
 };
 
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+	switch (mode) {
+	case MODE_11A:
+		return "11a";
+	case MODE_11G:
+		return "11g";
+	case MODE_11B:
+		return "11b";
+	case MODE_11GONLY:
+		return "11gonly";
+	case MODE_11NA_HT20:
+		return "11na-ht20";
+	case MODE_11NG_HT20:
+		return "11ng-ht20";
+	case MODE_11NA_HT40:
+		return "11na-ht40";
+	case MODE_11NG_HT40:
+		return "11ng-ht40";
+	case MODE_11AC_VHT20:
+		return "11ac-vht20";
+	case MODE_11AC_VHT40:
+		return "11ac-vht40";
+	case MODE_11AC_VHT80:
+		return "11ac-vht80";
+	case MODE_11AC_VHT20_2G:
+		return "11ac-vht20-2g";
+	case MODE_11AC_VHT40_2G:
+		return "11ac-vht40-2g";
+	case MODE_11AC_VHT80_2G:
+		return "11ac-vht80-2g";
+	case MODE_UNKNOWN:
+		/* skip */
+		break;
+
+		/* no default handler to allow compiler to check that the
+		 * enum is fully handled */
+	};
+
+	return "<unknown>";
+}
+
 #define WMI_CHAN_LIST_TAG	0x1
 #define WMI_SSID_LIST_TAG	0x2
 #define WMI_BSSID_LIST_TAG	0x3
@@ -763,13 +1130,45 @@
 	struct wlan_host_mem_req mem_reqs[1];
 } __packed;
 
-/*
- * status consists of  upper 16 bits fo int status and lower 16 bits of
- * module ID that retuned status
- */
-#define WLAN_INIT_STATUS_SUCCESS   0x0
-#define WLAN_GET_INIT_STATUS_REASON(status)    ((status) & 0xffff)
-#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
+/* This is the definition from 10.X firmware branch */
+struct wmi_service_ready_event_10x {
+	__le32 sw_version;
+	__le32 abi_version;
+
+	/* WMI_PHY_CAPABILITY */
+	__le32 phy_capability;
+
+	/* Maximum number of frag table entries that SW will populate less 1 */
+	__le32 max_frag_entry;
+	__le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+	__le32 num_rf_chains;
+
+	/*
+	 * The following field is only valid for service type
+	 * WMI_SERVICE_11AC
+	 */
+	__le32 ht_cap_info; /* WMI HT Capability */
+	__le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+	__le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+	__le32 hw_min_tx_power;
+	__le32 hw_max_tx_power;
+
+	struct hal_reg_capabilities hal_reg_capabilities;
+
+	__le32 sys_cap_info;
+	__le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+	/*
+	 * request to host to allocate a chuck of memory and pss it down to FW
+	 * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+	 * data structures. Only valid for low latency interfaces like PCIE
+	 * where FW can access this memory directly (or) by DMA.
+	 */
+	__le32 num_mem_reqs;
+
+	struct wlan_host_mem_req mem_reqs[1];
+} __packed;
+
 
 #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
 #define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
@@ -978,6 +1377,192 @@
 	__le32 max_frag_entries;
 } __packed;
 
+struct wmi_resource_config_10x {
+	/* number of virtual devices (VAPs) to support */
+	__le32 num_vdevs;
+
+	/* number of peer nodes to support */
+	__le32 num_peers;
+
+	/* number of keys per peer */
+	__le32 num_peer_keys;
+
+	/* total number of TX/RX data TIDs */
+	__le32 num_tids;
+
+	/*
+	 * max skid for resolving hash collisions
+	 *
+	 *   The address search table is sparse, so that if two MAC addresses
+	 *   result in the same hash value, the second of these conflicting
+	 *   entries can slide to the next index in the address search table,
+	 *   and use it, if it is unoccupied.  This ast_skid_limit parameter
+	 *   specifies the upper bound on how many subsequent indices to search
+	 *   over to find an unoccupied space.
+	 */
+	__le32 ast_skid_limit;
+
+	/*
+	 * the nominal chain mask for transmit
+	 *
+	 *   The chain mask may be modified dynamically, e.g. to operate AP
+	 *   tx with a reduced number of chains if no clients are associated.
+	 *   This configuration parameter specifies the nominal chain-mask that
+	 *   should be used when not operating with a reduced set of tx chains.
+	 */
+	__le32 tx_chain_mask;
+
+	/*
+	 * the nominal chain mask for receive
+	 *
+	 *   The chain mask may be modified dynamically, e.g. for a client
+	 *   to use a reduced number of chains for receive if the traffic to
+	 *   the client is low enough that it doesn't require downlink MIMO
+	 *   or antenna diversity.
+	 *   This configuration parameter specifies the nominal chain-mask that
+	 *   should be used when not operating with a reduced set of rx chains.
+	 */
+	__le32 rx_chain_mask;
+
+	/*
+	 * what rx reorder timeout (ms) to use for the AC
+	 *
+	 *   Each WMM access class (voice, video, best-effort, background) will
+	 *   have its own timeout value to dictate how long to wait for missing
+	 *   rx MPDUs to arrive before flushing subsequent MPDUs that have
+	 *   already been received.
+	 *   This parameter specifies the timeout in milliseconds for each
+	 *   class.
+	 */
+	__le32 rx_timeout_pri_vi;
+	__le32 rx_timeout_pri_vo;
+	__le32 rx_timeout_pri_be;
+	__le32 rx_timeout_pri_bk;
+
+	/*
+	 * what mode the rx should decap packets to
+	 *
+	 *   MAC can decap to RAW (no decap), native wifi or Ethernet types
+	 *   THis setting also determines the default TX behavior, however TX
+	 *   behavior can be modified on a per VAP basis during VAP init
+	 */
+	__le32 rx_decap_mode;
+
+	/* what is the maximum scan requests than can be queued */
+	__le32 scan_max_pending_reqs;
+
+	/* maximum VDEV that could use BMISS offload */
+	__le32 bmiss_offload_max_vdev;
+
+	/* maximum VDEV that could use offload roaming */
+	__le32 roam_offload_max_vdev;
+
+	/* maximum AP profiles that would push to offload roaming */
+	__le32 roam_offload_max_ap_profiles;
+
+	/*
+	 * how many groups to use for mcast->ucast conversion
+	 *
+	 *   The target's WAL maintains a table to hold information regarding
+	 *   which peers belong to a given multicast group, so that if
+	 *   multicast->unicast conversion is enabled, the target can convert
+	 *   multicast tx frames to a series of unicast tx frames, to each
+	 *   peer within the multicast group.
+	     This num_mcast_groups configuration parameter tells the target how
+	 *   many multicast groups to provide storage for within its multicast
+	 *   group membership table.
+	 */
+	__le32 num_mcast_groups;
+
+	/*
+	 * size to alloc for the mcast membership table
+	 *
+	 *   This num_mcast_table_elems configuration parameter tells the
+	 *   target how many peer elements it needs to provide storage for in
+	 *   its multicast group membership table.
+	 *   These multicast group membership table elements are shared by the
+	 *   multicast groups stored within the table.
+	 */
+	__le32 num_mcast_table_elems;
+
+	/*
+	 * whether/how to do multicast->unicast conversion
+	 *
+	 *   This configuration parameter specifies whether the target should
+	 *   perform multicast --> unicast conversion on transmit, and if so,
+	 *   what to do if it finds no entries in its multicast group
+	 *   membership table for the multicast IP address in the tx frame.
+	 *   Configuration value:
+	 *   0 -> Do not perform multicast to unicast conversion.
+	 *   1 -> Convert multicast frames to unicast, if the IP multicast
+	 *        address from the tx frame is found in the multicast group
+	 *        membership table.  If the IP multicast address is not found,
+	 *        drop the frame.
+	 *   2 -> Convert multicast frames to unicast, if the IP multicast
+	 *        address from the tx frame is found in the multicast group
+	 *        membership table.  If the IP multicast address is not found,
+	 *        transmit the frame as multicast.
+	 */
+	__le32 mcast2ucast_mode;
+
+	/*
+	 * how much memory to allocate for a tx PPDU dbg log
+	 *
+	 *   This parameter controls how much memory the target will allocate
+	 *   to store a log of tx PPDU meta-information (how large the PPDU
+	 *   was, when it was sent, whether it was successful, etc.)
+	 */
+	__le32 tx_dbg_log_size;
+
+	/* how many AST entries to be allocated for WDS */
+	__le32 num_wds_entries;
+
+	/*
+	 * MAC DMA burst size, e.g., For target PCI limit can be
+	 * 0 -default, 1 256B
+	 */
+	__le32 dma_burst_size;
+
+	/*
+	 * Fixed delimiters to be inserted after every MPDU to
+	 * account for interface latency to avoid underrun.
+	 */
+	__le32 mac_aggr_delim;
+
+	/*
+	 *   determine whether target is responsible for detecting duplicate
+	 *   non-aggregate MPDU and timing out stale fragments.
+	 *
+	 *   A-MPDU reordering is always performed on the target.
+	 *
+	 *   0: target responsible for frag timeout and dup checking
+	 *   1: host responsible for frag timeout and dup checking
+	 */
+	__le32 rx_skip_defrag_timeout_dup_detection_check;
+
+	/*
+	 * Configuration for VoW :
+	 * No of Video Nodes to be supported
+	 * and Max no of descriptors for each Video link (node).
+	 */
+	__le32 vow_config;
+
+	/* Number of msdu descriptors target should use */
+	__le32 num_msdu_desc;
+
+	/*
+	 * Max. number of Tx fragments per MSDU
+	 *  This parameter controls the max number of Tx fragments per MSDU.
+	 *  This is sent by the target as part of the WMI_SERVICE_READY event
+	 *  and is overriden by the OS shim as required.
+	 */
+	__le32 max_frag_entries;
+} __packed;
+
+
+#define NUM_UNITS_IS_NUM_VDEVS   0x1
+#define NUM_UNITS_IS_NUM_PEERS   0x2
+
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
 	/* id of the request that is passed up in service ready */
@@ -999,6 +1584,18 @@
 	struct host_memory_chunk host_mem_chunks[1];
 } __packed;
 
+/* _10x stucture is from 10.X FW API */
+struct wmi_init_cmd_10x {
+	struct wmi_resource_config_10x resource_config;
+	__le32 num_host_mem_chunks;
+
+	/*
+	 * variable number of host memory chunks.
+	 * This should be the last element in the structure
+	 */
+	struct host_memory_chunk host_mem_chunks[1];
+} __packed;
+
 /* TLV for channel list */
 struct wmi_chan_list {
 	__le32 tag; /* WMI_CHAN_LIST_TAG */
@@ -1118,6 +1715,88 @@
 	 */
 } __packed;
 
+/* This is the definition from 10.X firmware branch */
+struct wmi_start_scan_cmd_10x {
+	/* Scan ID */
+	__le32 scan_id;
+
+	/* Scan requestor ID */
+	__le32 scan_req_id;
+
+	/* VDEV id(interface) that is requesting scan */
+	__le32 vdev_id;
+
+	/* Scan Priority, input to scan scheduler */
+	__le32 scan_priority;
+
+	/* Scan events subscription */
+	__le32 notify_scan_events;
+
+	/* dwell time in msec on active channels */
+	__le32 dwell_time_active;
+
+	/* dwell time in msec on passive channels */
+	__le32 dwell_time_passive;
+
+	/*
+	 * min time in msec on the BSS channel,only valid if atleast one
+	 * VDEV is active
+	 */
+	__le32 min_rest_time;
+
+	/*
+	 * max rest time in msec on the BSS channel,only valid if at least
+	 * one VDEV is active
+	 */
+	/*
+	 * the scanner will rest on the bss channel at least min_rest_time
+	 * after min_rest_time the scanner will start checking for tx/rx
+	 * activity on all VDEVs. if there is no activity the scanner will
+	 * switch to off channel. if there is activity the scanner will let
+	 * the radio on the bss channel until max_rest_time expires.at
+	 * max_rest_time scanner will switch to off channel irrespective of
+	 * activity. activity is determined by the idle_time parameter.
+	 */
+	__le32 max_rest_time;
+
+	/*
+	 * time before sending next set of probe requests.
+	 * The scanner keeps repeating probe requests transmission with
+	 * period specified by repeat_probe_time.
+	 * The number of probe requests specified depends on the ssid_list
+	 * and bssid_list
+	 */
+	__le32 repeat_probe_time;
+
+	/* time in msec between 2 consequetive probe requests with in a set. */
+	__le32 probe_spacing_time;
+
+	/*
+	 * data inactivity time in msec on bss channel that will be used by
+	 * scanner for measuring the inactivity.
+	 */
+	__le32 idle_time;
+
+	/* maximum time in msec allowed for scan  */
+	__le32 max_scan_time;
+
+	/*
+	 * delay in msec before sending first probe request after switching
+	 * to a channel
+	 */
+	__le32 probe_delay;
+
+	/* Scan control flags */
+	__le32 scan_ctrl_flags;
+
+	/*
+	 * TLV (tag length value )  paramerters follow the scan_cmd structure.
+	 * TLV can contain channel list, bssid list, ssid list and
+	 * ie. the TLV tags are defined above;
+	 */
+} __packed;
+
+
 struct wmi_ssid_arg {
 	int len;
 	const u8 *ssid;
@@ -1268,7 +1947,7 @@
  * good idea to pass all the fields in the RX status
  * descriptor up to the host.
  */
-struct wmi_mgmt_rx_hdr {
+struct wmi_mgmt_rx_hdr_v1 {
 	__le32 channel;
 	__le32 snr;
 	__le32 rate;
@@ -1277,8 +1956,18 @@
 	__le32 status; /* %WMI_RX_STATUS_ */
 } __packed;
 
-struct wmi_mgmt_rx_event {
-	struct wmi_mgmt_rx_hdr hdr;
+struct wmi_mgmt_rx_hdr_v2 {
+	struct wmi_mgmt_rx_hdr_v1 v1;
+	__le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+	struct wmi_mgmt_rx_hdr_v1 hdr;
+	u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+	struct wmi_mgmt_rx_hdr_v2 hdr;
 	u8 buf[0];
 } __packed;
 
@@ -1465,6 +2154,60 @@
 #define VDEV_DEFAULT_STATS_UPDATE_PERIOD    500
 #define PEER_DEFAULT_STATS_UPDATE_PERIOD    500
 
+struct wmi_pdev_param_map {
+	u32 tx_chain_mask;
+	u32 rx_chain_mask;
+	u32 txpower_limit2g;
+	u32 txpower_limit5g;
+	u32 txpower_scale;
+	u32 beacon_gen_mode;
+	u32 beacon_tx_mode;
+	u32 resmgr_offchan_mode;
+	u32 protection_mode;
+	u32 dynamic_bw;
+	u32 non_agg_sw_retry_th;
+	u32 agg_sw_retry_th;
+	u32 sta_kickout_th;
+	u32 ac_aggrsize_scaling;
+	u32 ltr_enable;
+	u32 ltr_ac_latency_be;
+	u32 ltr_ac_latency_bk;
+	u32 ltr_ac_latency_vi;
+	u32 ltr_ac_latency_vo;
+	u32 ltr_ac_latency_timeout;
+	u32 ltr_sleep_override;
+	u32 ltr_rx_override;
+	u32 ltr_tx_activity_timeout;
+	u32 l1ss_enable;
+	u32 dsleep_enable;
+	u32 pcielp_txbuf_flush;
+	u32 pcielp_txbuf_watermark;
+	u32 pcielp_txbuf_tmo_en;
+	u32 pcielp_txbuf_tmo_value;
+	u32 pdev_stats_update_period;
+	u32 vdev_stats_update_period;
+	u32 peer_stats_update_period;
+	u32 bcnflt_stats_update_period;
+	u32 pmf_qos;
+	u32 arp_ac_override;
+	u32 arpdhcp_ac_override;
+	u32 dcs;
+	u32 ani_enable;
+	u32 ani_poll_period;
+	u32 ani_listen_period;
+	u32 ani_ofdm_level;
+	u32 ani_cck_level;
+	u32 dyntxchain;
+	u32 proxy_sta;
+	u32 idle_ps_config;
+	u32 power_gating_sleep;
+	u32 fast_channel_reset;
+	u32 burst_dur;
+	u32 burst_enable;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
 enum wmi_pdev_param {
 	/* TX chian mask */
 	WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
@@ -1564,6 +2307,97 @@
 	WMI_PDEV_PARAM_POWER_GATING_SLEEP,
 };
 
+enum wmi_10x_pdev_param {
+	/* TX chian mask */
+	WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+	/* RX chian mask */
+	WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+	/* TX power limit for 2G Radio */
+	WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+	/* TX power limit for 5G Radio */
+	WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+	/* TX power scale */
+	WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+	/* Beacon generation mode . 0: host, 1: target   */
+	WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+	/* Beacon generation mode . 0: staggered 1: bursted   */
+	WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+	/*
+	 * Resource manager off chan mode .
+	 * 0: turn off off chan mode. 1: turn on offchan mode
+	 */
+	WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	/*
+	 * Protection mode:
+	 * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+	 */
+	WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+	/* Dynamic bandwidth 0: disable 1: enable */
+	WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+	/* Non aggregrate/ 11g sw retry threshold.0-disable */
+	WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	/* aggregrate sw retry threshold. 0-disable*/
+	WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+	/* Station kickout threshold (non of consecutive failures).0-disable */
+	WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+	/* Aggerate size scaling configuration per AC */
+	WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	/* LTR enable */
+	WMI_10X_PDEV_PARAM_LTR_ENABLE,
+	/* LTR latency for BE, in us */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	/* LTR latency for BK, in us */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	/* LTR latency for VI, in us */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	/* LTR latency for VO, in us  */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	/* LTR AC latency timeout, in ms */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	/* LTR platform latency override, in us */
+	WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	/* LTR-RX override, in us */
+	WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+	/* Tx activity timeout for LTR, in us */
+	WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	/* L1SS state machine enable */
+	WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+	/* Deep sleep state machine enable */
+	WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+	/* pdev level stats update period in ms */
+	WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	/* vdev level stats update period in ms */
+	WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	/* peer level stats update period in ms */
+	WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	/* beacon filter status update period */
+	WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	/* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+	WMI_10X_PDEV_PARAM_PMF_QOS,
+	/* Access category on which ARP and DHCP frames are sent */
+	WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+	/* DCS configuration */
+	WMI_10X_PDEV_PARAM_DCS,
+	/* Enable/Disable ANI on target */
+	WMI_10X_PDEV_PARAM_ANI_ENABLE,
+	/* configure the ANI polling period */
+	WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+	/* configure the ANI listening period */
+	WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	/* configure OFDM immunity level */
+	WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+	/* configure CCK immunity level */
+	WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+	/* Enable/Disable CDD for 1x1 STAs in rate control module */
+	WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+	/* Enable/Disable Fast channel reset*/
+	WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+	/* Set Bursting DUR */
+	WMI_10X_PDEV_PARAM_BURST_DUR,
+	/* Set Bursting Enable*/
+	WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
+
 struct wmi_pdev_set_param_cmd {
 	__le32 param_id;
 	__le32 param_value;
@@ -2088,6 +2922,61 @@
 /* Value to disable fixed rate setting */
 #define WMI_FIXED_RATE_NONE    (0xff)
 
+struct wmi_vdev_param_map {
+	u32 rts_threshold;
+	u32 fragmentation_threshold;
+	u32 beacon_interval;
+	u32 listen_interval;
+	u32 multicast_rate;
+	u32 mgmt_tx_rate;
+	u32 slot_time;
+	u32 preamble;
+	u32 swba_time;
+	u32 wmi_vdev_stats_update_period;
+	u32 wmi_vdev_pwrsave_ageout_time;
+	u32 wmi_vdev_host_swba_interval;
+	u32 dtim_period;
+	u32 wmi_vdev_oc_scheduler_air_time_limit;
+	u32 wds;
+	u32 atim_window;
+	u32 bmiss_count_max;
+	u32 bmiss_first_bcnt;
+	u32 bmiss_final_bcnt;
+	u32 feature_wmm;
+	u32 chwidth;
+	u32 chextoffset;
+	u32 disable_htprotection;
+	u32 sta_quickkickout;
+	u32 mgmt_rate;
+	u32 protection_mode;
+	u32 fixed_rate;
+	u32 sgi;
+	u32 ldpc;
+	u32 tx_stbc;
+	u32 rx_stbc;
+	u32 intra_bss_fwd;
+	u32 def_keyid;
+	u32 nss;
+	u32 bcast_data_rate;
+	u32 mcast_data_rate;
+	u32 mcast_indicate;
+	u32 dhcp_indicate;
+	u32 unknown_dest_indicate;
+	u32 ap_keepalive_min_idle_inactive_time_secs;
+	u32 ap_keepalive_max_idle_inactive_time_secs;
+	u32 ap_keepalive_max_unresponsive_time_secs;
+	u32 ap_enable_nawds;
+	u32 mcast2ucast_set;
+	u32 enable_rtscts;
+	u32 txbf;
+	u32 packet_powersave;
+	u32 drop_unencry;
+	u32 tx_encap_type;
+	u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
 /* the definition of different VDEV parameters */
 enum wmi_vdev_param {
 	/* RTS Threshold */
@@ -2219,6 +3108,121 @@
 	WMI_VDEV_PARAM_TX_ENCAP_TYPE,
 };
 
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+	/* RTS Threshold */
+	WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+	/* Fragmentation threshold */
+	WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	/* beacon interval in TUs */
+	WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+	/* Listen interval in TUs */
+	WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+	/* muticast rate in Mbps */
+	WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+	/* management frame rate in Mbps */
+	WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+	/* slot time (long vs short) */
+	WMI_10X_VDEV_PARAM_SLOT_TIME,
+	/* preamble (long vs short) */
+	WMI_10X_VDEV_PARAM_PREAMBLE,
+	/* SWBA time (time before tbtt in msec) */
+	WMI_10X_VDEV_PARAM_SWBA_TIME,
+	/* time period for updating VDEV stats */
+	WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+	/* age out time in msec for frames queued for station in power save */
+	WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+	/*
+	 * Host SWBA interval (time in msec before tbtt for SWBA event
+	 * generation).
+	 */
+	WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+	/* DTIM period (specified in units of num beacon intervals) */
+	WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+	/*
+	 * scheduler air time limit for this VDEV. used by off chan
+	 * scheduler.
+	 */
+	WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	/* enable/dsiable WDS for this VDEV  */
+	WMI_10X_VDEV_PARAM_WDS,
+	/* ATIM Window */
+	WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+	/* BMISS max */
+	WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+	/* WMM enables/disabled */
+	WMI_10X_VDEV_PARAM_FEATURE_WMM,
+	/* Channel width */
+	WMI_10X_VDEV_PARAM_CHWIDTH,
+	/* Channel Offset */
+	WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+	/* Disable HT Protection */
+	WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+	/* Quick STA Kickout */
+	WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+	/* Rate to be used with Management frames */
+	WMI_10X_VDEV_PARAM_MGMT_RATE,
+	/* Protection Mode */
+	WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+	/* Fixed rate setting */
+	WMI_10X_VDEV_PARAM_FIXED_RATE,
+	/* Short GI Enable/Disable */
+	WMI_10X_VDEV_PARAM_SGI,
+	/* Enable LDPC */
+	WMI_10X_VDEV_PARAM_LDPC,
+	/* Enable Tx STBC */
+	WMI_10X_VDEV_PARAM_TX_STBC,
+	/* Enable Rx STBC */
+	WMI_10X_VDEV_PARAM_RX_STBC,
+	/* Intra BSS forwarding  */
+	WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+	/* Setting Default xmit key for Vdev */
+	WMI_10X_VDEV_PARAM_DEF_KEYID,
+	/* NSS width */
+	WMI_10X_VDEV_PARAM_NSS,
+	/* Set the custom rate for the broadcast data frames */
+	WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+	/* Set the custom rate (rate-code) for multicast data frames */
+	WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+	/* Tx multicast packet indicate Enable/Disable */
+	WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+	/* Tx DHCP packet indicate Enable/Disable */
+	WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+	/* Enable host inspection of Tx unicast packet to unknown destination */
+	WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+	/* The minimum amount of time AP begins to consider STA inactive */
+	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+	/*
+	 * An associated STA is considered inactive when there is no recent
+	 * TX/RX activity and no downlink frames are buffered for it. Once a
+	 * STA exceeds the maximum idle inactive time, the AP will send an
+	 * 802.11 data-null as a keep alive to verify the STA is still
+	 * associated. If the STA does ACK the data-null, or if the data-null
+	 * is buffered and the STA does not retrieve it, the STA will be
+	 * considered unresponsive
+	 * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+	 */
+	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+	/*
+	 * An associated STA is considered unresponsive if there is no recent
+	 * TX/RX activity and downlink frames are buffered for it. Once a STA
+	 * exceeds the maximum unresponsive time, the AP will send a
+	 * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
+	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+	/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+	WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+	WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+	/* Enable/Disable RTS-CTS */
+	WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+	WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
 /* slot time long */
 #define WMI_VDEV_SLOT_TIME_LONG		0x1
 /* slot time short */
@@ -3000,7 +4004,6 @@
 
 #define WMI_MAX_EVENT 0x1000
 /* Maximum number of pending TXed WMI packets */
-#define WMI_MAX_PENDING_TX_COUNT 128
 #define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
 
 /* By default disable power save for IBSS */
@@ -3013,7 +4016,6 @@
 void ath10k_wmi_detach(struct ath10k *ar);
 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
-void ath10k_wmi_flush_tx(struct ath10k *ar);
 
 int ath10k_wmi_connect_htc_service(struct ath10k *ar);
 int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -3022,8 +4024,7 @@
 int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
 				  u16 rd5g, u16 ctl2g, u16 ctl5g);
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
-			      u32 value);
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
 int ath10k_wmi_cmd_init(struct ath10k *ar);
 int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
 void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
@@ -3043,7 +4044,7 @@
 		       const u8 *bssid);
 int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
 int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
-			      enum wmi_vdev_param param_id, u32 param_value);
+			      u32 param_id, u32 param_value);
 int ath10k_wmi_vdev_install_key(struct ath10k *ar,
 				const struct wmi_vdev_install_key_arg *arg);
 int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -3066,11 +4067,13 @@
 			       enum wmi_ap_ps_peer_param param_id, u32 value);
 int ath10k_wmi_scan_chan_list(struct ath10k *ar,
 			      const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+				  const struct wmi_bcn_tx_arg *arg);
 int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
 			const struct wmi_pdev_set_wmm_params_arg *arg);
 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
 int ath10k_wmi_force_fw_hang(struct ath10k *ar,
 			     enum wmi_force_fw_hang_type type, u32 delay_ms);
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
 
 #endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e9bc9e6..79bffe1 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -37,12 +37,9 @@
 {
 	struct ath5k_hw *ah = common->priv;
 	struct platform_device *pdev = to_platform_device(ah->dev);
-	struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+	struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
 	u16 *eeprom, *eeprom_end;
 
-
-
-	bcfg = pdev->dev.platform_data;
 	eeprom = (u16 *) bcfg->radio;
 	eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
 
@@ -57,7 +54,7 @@
 int ath5k_hw_read_srev(struct ath5k_hw *ah)
 {
 	struct platform_device *pdev = to_platform_device(ah->dev);
-	struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+	struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
 	ah->ah_mac_srev = bcfg->devid;
 	return 0;
 }
@@ -65,7 +62,7 @@
 static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
 {
 	struct platform_device *pdev = to_platform_device(ah->dev);
-	struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+	struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
 	u8 *cfg_mac;
 
 	if (to_platform_device(ah->dev)->id == 0)
@@ -87,7 +84,7 @@
 /*Initialization*/
 static int ath_ahb_probe(struct platform_device *pdev)
 {
-	struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+	struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
 	struct ath5k_hw *ah;
 	struct ieee80211_hw *hw;
 	struct resource *res;
@@ -96,7 +93,7 @@
 	int ret = 0;
 	u32 reg;
 
-	if (!pdev->dev.platform_data) {
+	if (!dev_get_platdata(&pdev->dev)) {
 		dev_err(&pdev->dev, "no platform data specified\n");
 		ret = -EINVAL;
 		goto err_out;
@@ -193,7 +190,7 @@
 
 static int ath_ahb_remove(struct platform_device *pdev)
 {
-	struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+	struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
 	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
 	struct ath5k_hw *ah;
 	u32 reg;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 48161ed..69f58b0 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1663,15 +1663,15 @@
 	ah->stats.tx_bytes_count += skb->len;
 	info = IEEE80211_SKB_CB(skb);
 
+	size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+	memcpy(info->status.rates, bf->rates, size);
+
 	tries[0] = info->status.rates[0].count;
 	tries[1] = info->status.rates[1].count;
 	tries[2] = info->status.rates[2].count;
 
 	ieee80211_tx_info_clear_status(info);
 
-	size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
-	memcpy(info->status.rates, bf->rates, size);
-
 	for (i = 0; i < ts->ts_final_idx; i++) {
 		struct ieee80211_tx_rate *r =
 			&info->status.rates[i];
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index ce86f15..ba200b2 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -661,7 +661,7 @@
 			ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
 						AR5K_SISR1_QCU_TXEOL);
 
-		/* Currently this is not much usefull since we treat
+		/* Currently this is not much useful since we treat
 		 * all queues the same way if we get a TXURN (update
 		 * tx trigger level) but we might need it later on*/
 		if (pisr & AR5K_ISR_TXURN)
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index 98a8861..05debf7 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,8 +22,7 @@
 
 #define ATH6KL_MAX_IE			256
 
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
 
 /*
  * Reflects the version of binary interface exposed by ATH6KL target
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 74369de..ca9ba00 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -50,11 +50,10 @@
 };
 
 extern unsigned int debug_mask;
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) int ath6kl_info(const char *fmt, ...);
+__printf(1, 2) int ath6kl_err(const char *fmt, ...);
+__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
 
 enum ath6kl_war {
 	ATH6KL_WAR_INVALID_RATE,
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index a2c8ff8..14cab14 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -60,7 +60,7 @@
 /* disable credit flow control on a specific service */
 #define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL          (1 << 3)
 #define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT    8
-#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK     0xFF00
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK     0xFF00U
 
 /* connect response status codes */
 #define HTC_SERVICE_SUCCESS      0
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7944c25..32f139e 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -84,6 +84,26 @@
 	  developed. At this point enabling this option won't do anything
 	  except increase code size.
 
+config ATH9K_TX99
+	bool "Atheros ath9k TX99 testing support"
+	depends on CFG80211_CERTIFICATION_ONUS
+	default n
+	---help---
+	  Say N. This should only be enabled on systems undergoing
+	  certification testing and evaluation in a controlled environment.
+	  Enabling this will only enable TX99 support, all other modes of
+	  operation will be disabled.
+
+	  TX99 support enables Specific Absorption Rate (SAR) testing.
+	  SAR is the unit of measurement for the amount of radio frequency(RF)
+	  absorbed by the body when using a wireless device. The RF exposure
+	  limits used are expressed in the terms of SAR, which is a measure
+	  of the electric and magnetic field strength and power density for
+	  transmitters operating at frequencies from 300 kHz to 100 GHz.
+	  Regulatory bodies around the world require that wireless device
+	  be evaluated to meet the RF exposure limits set forth in the
+	  governmental SAR regulations.
+
 config ATH9K_LEGACY_RATE_CONTROL
 	bool "Atheros ath9k rate control"
 	depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 75ee9e7..6205ef5 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -14,9 +14,7 @@
 ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
 ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
 ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
-		dfs.o \
-		dfs_pattern_detector.o \
-		dfs_pri_detector.o
+		dfs.o
 ath9k-$(CONFIG_PM_SLEEP) += wow.o
 
 obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 072e4b5..2dff276 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,7 @@
 	struct platform_device *pdev = to_platform_device(sc->dev);
 	struct ath9k_platform_data *pdata;
 
-	pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
 		ath_err(common,
 			"%s: flash read failed, offset %08x is out of range\n",
@@ -84,7 +84,7 @@
 	struct ath_hw *ah;
 	char hw_name[64];
 
-	if (!pdev->dev.platform_data) {
+	if (!dev_get_platdata(&pdev->dev)) {
 		dev_err(&pdev->dev, "no platform data specified\n");
 		return -EINVAL;
 	}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index be466b0..d28923b7 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -338,10 +338,9 @@
 		    aniState->cckNoiseImmunityLevel !=
 		    ATH9K_ANI_CCK_DEF_LEVEL) {
 			ath_dbg(common, ANI,
-				"Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+				"Restore defaults: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
 				ah->opmode,
 				chan->channel,
-				chan->channelFlags,
 				is_scanning,
 				aniState->ofdmNoiseImmunityLevel,
 				aniState->cckNoiseImmunityLevel);
@@ -354,10 +353,9 @@
 		 * restore historical levels for this channel
 		 */
 		ath_dbg(common, ANI,
-			"Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+			"Restore history: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
 			ah->opmode,
 			chan->channel,
-			chan->channelFlags,
 			is_scanning,
 			aniState->ofdmNoiseImmunityLevel,
 			aniState->cckNoiseImmunityLevel);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index dd1cc73..bd048cc 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -332,7 +332,7 @@
 		}
 
 		if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
-		    ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
+		    div_ant_conf->lna1_lna2_switch_delta)
 			div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
 		else
 			div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
@@ -554,42 +554,22 @@
 			ant_conf->fast_div_bias = 0x1;
 			break;
 		case 0x10: /* LNA2 A-B */
-			if ((antcomb->scan == 0) &&
-			    (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
-				ant_conf->fast_div_bias = 0x3f;
-			} else {
-				ant_conf->fast_div_bias = 0x1;
-			}
+			ant_conf->fast_div_bias = 0x2;
 			break;
 		case 0x12: /* LNA2 LNA1 */
-			ant_conf->fast_div_bias = 0x39;
+			ant_conf->fast_div_bias = 0x3f;
 			break;
 		case 0x13: /* LNA2 A+B */
-			if ((antcomb->scan == 0) &&
-			    (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
-				ant_conf->fast_div_bias = 0x3f;
-			} else {
-				ant_conf->fast_div_bias = 0x1;
-			}
+			ant_conf->fast_div_bias = 0x2;
 			break;
 		case 0x20: /* LNA1 A-B */
-			if ((antcomb->scan == 0) &&
-			    (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
-				ant_conf->fast_div_bias = 0x3f;
-			} else {
-				ant_conf->fast_div_bias = 0x4;
-			}
+			ant_conf->fast_div_bias = 0x3;
 			break;
 		case 0x21: /* LNA1 LNA2 */
-			ant_conf->fast_div_bias = 0x6;
+			ant_conf->fast_div_bias = 0x3;
 			break;
 		case 0x23: /* LNA1 A+B */
-			if ((antcomb->scan == 0) &&
-			    (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
-				ant_conf->fast_div_bias = 0x3f;
-			} else {
-				ant_conf->fast_div_bias = 0x6;
-			}
+			ant_conf->fast_div_bias = 0x3;
 			break;
 		case 0x30: /* A+B A-B */
 			ant_conf->fast_div_bias = 0x1;
@@ -638,7 +618,7 @@
 		antcomb->rssi_sub = alt_rssi_avg;
 		antcomb->scan = false;
 		if (antcomb->rssi_lna2 >
-		    (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+		    (antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
 			/* use LNA2 as main LNA */
 			if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
 			    (antcomb->rssi_add > antcomb->rssi_sub)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 0865647..ff415e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -626,12 +626,11 @@
 		if (AR_SREV_9287_11_OR_LATER(ah))
 			val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
 
+		val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
+
 		REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
 	}
 
-	REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
-		    AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
-
 	if (AR_SREV_9280_20_OR_LATER(ah))
 		return;
 	/*
@@ -667,14 +666,13 @@
 	if (IS_CHAN_HT40(chan)) {
 		phymode |= AR_PHY_FC_DYN2040_EN;
 
-		if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
-		    (chan->chanmode == CHANNEL_G_HT40PLUS))
+		if (IS_CHAN_HT40PLUS(chan))
 			phymode |= AR_PHY_FC_DYN2040_PRI_CH;
 
 	}
 	REG_WRITE(ah, AR_PHY_TURBO, phymode);
 
-	ath9k_hw_set11nmac2040(ah);
+	ath9k_hw_set11nmac2040(ah, chan);
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
@@ -692,31 +690,12 @@
 	int i, regWrites = 0;
 	u32 modesIndex, freqIndex;
 
-	switch (chan->chanmode) {
-	case CHANNEL_A:
-	case CHANNEL_A_HT20:
-		modesIndex = 1;
+	if (IS_CHAN_5GHZ(chan)) {
 		freqIndex = 1;
-		break;
-	case CHANNEL_A_HT40PLUS:
-	case CHANNEL_A_HT40MINUS:
-		modesIndex = 2;
-		freqIndex = 1;
-		break;
-	case CHANNEL_G:
-	case CHANNEL_G_HT20:
-	case CHANNEL_B:
-		modesIndex = 4;
+		modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+	} else {
 		freqIndex = 2;
-		break;
-	case CHANNEL_G_HT40PLUS:
-	case CHANNEL_G_HT40MINUS:
-		modesIndex = 3;
-		freqIndex = 2;
-		break;
-
-	default:
-		return -EINVAL;
+		modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
 	}
 
 	/*
@@ -815,8 +794,10 @@
 	if (chan == NULL)
 		return;
 
-	rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
-		? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+	if (IS_CHAN_2GHZ(chan))
+		rfMode |= AR_PHY_MODE_DYNAMIC;
+	else
+		rfMode |= AR_PHY_MODE_OFDM;
 
 	if (!AR_SREV_9280_20_OR_LATER(ah))
 		rfMode |= (IS_CHAN_5GHZ(chan)) ?
@@ -1219,12 +1200,11 @@
 
 	iniDef = &aniState->iniDef;
 
-	ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+	ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
 		ah->hw_version.macVersion,
 		ah->hw_version.macRev,
 		ah->opmode,
-		chan->channel,
-		chan->channelFlags);
+		chan->channel);
 
 	val = REG_READ(ah, AR_PHY_SFCORR);
 	iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 9f58974..cdc7400 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -33,15 +33,12 @@
 	bool supported = false;
 	switch (ah->supp_cals & cal_type) {
 	case IQ_MISMATCH_CAL:
-		/* Run IQ Mismatch for non-CCK only */
-		if (!IS_CHAN_B(chan))
-			supported = true;
+		supported = true;
 		break;
 	case ADC_GAIN_CAL:
 	case ADC_DC_CAL:
 		/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
-		if (!IS_CHAN_B(chan) &&
-		    !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+		if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
 		      IS_CHAN_HT20(chan)))
 			supported = true;
 		break;
@@ -671,7 +668,7 @@
 
 	nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
 	if (ah->caldata)
-		nfcal_pending = ah->caldata->nfcal_pending;
+		nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
 
 	if (currCal && !nfcal &&
 	    (currCal->calState == CAL_RUNNING ||
@@ -861,7 +858,7 @@
 	ar9002_hw_pa_cal(ah, true);
 
 	if (ah->caldata)
-		ah->caldata->nfcal_pending = true;
+		set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
 
 	ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index fb61b08..5c95fd9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -419,28 +419,10 @@
 	u32 modesIndex;
 	int i;
 
-	switch (chan->chanmode) {
-	case CHANNEL_A:
-	case CHANNEL_A_HT20:
-		modesIndex = 1;
-		break;
-	case CHANNEL_A_HT40PLUS:
-	case CHANNEL_A_HT40MINUS:
-		modesIndex = 2;
-		break;
-	case CHANNEL_G:
-	case CHANNEL_G_HT20:
-	case CHANNEL_B:
-		modesIndex = 4;
-		break;
-	case CHANNEL_G_HT40PLUS:
-	case CHANNEL_G_HT40MINUS:
-		modesIndex = 3;
-		break;
-
-	default:
-		return;
-	}
+	if (IS_CHAN_5GHZ(chan))
+		modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+	else
+		modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 1fc1fa9..f087117 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -485,7 +485,7 @@
 	if (IS_CHAN_HT40(ah->curchan))
 		nfarray[3] = sign_extend32(nf, 8);
 
-	if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+	if (!(ah->rxchainmask & BIT(1)))
 		return;
 
 	nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
@@ -532,6 +532,7 @@
 				 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
 	antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
 				  AR_PHY_9285_FAST_DIV_BIAS_S;
+	antconf->lna1_lna2_switch_delta = -1;
 	antconf->lna1_lna2_delta = -3;
 	antconf->div_group = 0;
 }
@@ -679,6 +680,26 @@
 	}
 }
 
+static void ar9002_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+	REG_SET_BIT(ah, 0x9864, 0x7f000);
+	REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+	REG_WRITE(ah, AR_CR, AR_CR_RXD);
+	REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+	REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20);
+	REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+	REG_WRITE(ah, AR_D_FPCTL, 0x10|qnum);
+	REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+	REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+	REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9002_hw_tx99_stop(struct ath_hw *ah)
+{
+	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
 void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -700,6 +721,8 @@
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 	ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
 #endif
+	ops->tx99_start = ar9002_hw_tx99_start;
+	ops->tx99_stop = ar9002_hw_tx99_stop;
 
 	ar9002_hw_set_nf_limits(ah);
 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 6988e1d..22934d3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -727,8 +727,12 @@
 	REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
 		      AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
 
-	if (caldata)
-		caldata->done_txiqcal_once = is_reusable;
+	if (caldata) {
+		if (is_reusable)
+			set_bit(TXIQCAL_DONE, &caldata->cal_flags);
+		else
+			clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+	}
 
 	return;
 }
@@ -961,18 +965,44 @@
 }
 
 static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
-					 struct ath9k_channel *chan)
+					 struct ath9k_channel *chan,
+					 bool run_rtt_cal)
 {
+	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	int i;
 
 	if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
 		return;
 
+	if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
+		return;
+
 	for (i = 0; i < AR9300_MAX_CHAINS; i++) {
 		if (!(ah->rxchainmask & (1 << i)))
 			continue;
 		ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
 	}
+
+	if (caldata)
+		set_bit(SW_PKDET_DONE, &caldata->cal_flags);
+
+	if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
+		if (IS_CHAN_2GHZ(chan)){
+			caldata->caldac[0] = REG_READ_FIELD(ah,
+						    AR_PHY_65NM_RXRF_AGC(0),
+						    AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+			caldata->caldac[1] = REG_READ_FIELD(ah,
+						    AR_PHY_65NM_RXRF_AGC(1),
+						    AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+		} else {
+			caldata->caldac[0] = REG_READ_FIELD(ah,
+						    AR_PHY_65NM_RXRF_AGC(0),
+						    AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+			caldata->caldac[1] = REG_READ_FIELD(ah,
+						    AR_PHY_65NM_RXRF_AGC(1),
+						    AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+		}
+	}
 }
 
 static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
@@ -990,7 +1020,7 @@
 	txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
 			  AR_PHY_AGC_CONTROL_CLC_SUCCESS);
 
-	if (caldata->done_txclcal_once) {
+	if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
 		for (i = 0; i < AR9300_MAX_CHAINS; i++) {
 			if (!(ah->txchainmask & (1 << i)))
 				continue;
@@ -1006,7 +1036,7 @@
 				caldata->tx_clcal[i][j] =
 					REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
 		}
-		caldata->done_txclcal_once = true;
+		set_bit(TXCLCAL_DONE, &caldata->cal_flags);
 	}
 }
 
@@ -1019,6 +1049,7 @@
 	bool is_reusable = true, status = true;
 	bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
 	bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+	u32 rx_delay = 0;
 	u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
 					  AR_PHY_AGC_CONTROL_FLTR_CAL   |
 					  AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1042,17 +1073,22 @@
 		ar9003_hw_rtt_clear_hist(ah);
 	}
 
-	if (rtt && !run_rtt_cal) {
-		agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
-		agc_supp_cals &= agc_ctrl;
-		agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
-			     AR_PHY_AGC_CONTROL_FLTR_CAL |
-			     AR_PHY_AGC_CONTROL_PKDET_CAL);
-		REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+	if (rtt) {
+		if (!run_rtt_cal) {
+			agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+			agc_supp_cals &= agc_ctrl;
+			agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
+				      AR_PHY_AGC_CONTROL_FLTR_CAL |
+				      AR_PHY_AGC_CONTROL_PKDET_CAL);
+			REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+		} else {
+			if (ah->ah_flags & AH_FASTCC)
+				run_agc_cal = true;
+		}
 	}
 
 	if (ah->enabled_cals & TX_CL_CAL) {
-		if (caldata && caldata->done_txclcal_once)
+		if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
 			REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
 				    AR_PHY_CL_CAL_ENABLE);
 		else {
@@ -1076,14 +1112,14 @@
 	 * AGC calibration
 	 */
 	if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
-		if (caldata && !caldata->done_txiqcal_once)
+		if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
 			REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
 				    AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
 		else
 			REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
 				    AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
 		txiqcal_done = run_agc_cal = true;
-	} else if (caldata && !caldata->done_txiqcal_once) {
+	} else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
 		run_agc_cal = true;
 		sep_iq_cal = true;
 	}
@@ -1099,6 +1135,15 @@
 		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
 	}
 
+	if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+		rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
+		/* Disable BB_active */
+		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+		udelay(5);
+		REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
+		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+	}
+
 	if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
 		/* Calibrate the AGC */
 		REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1110,7 +1155,12 @@
 				       AR_PHY_AGC_CONTROL_CAL,
 				       0, AH_WAIT_TIMEOUT);
 
-		ar9003_hw_do_manual_peak_cal(ah, chan);
+		ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+	}
+
+	if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+		REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
+		udelay(5);
 	}
 
 	if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1133,19 +1183,23 @@
 
 	if (txiqcal_done)
 		ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
-	else if (caldata && caldata->done_txiqcal_once)
+	else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
 		ar9003_hw_tx_iq_cal_reload(ah);
 
 	ar9003_hw_cl_cal_post_proc(ah, is_reusable);
 
 	if (run_rtt_cal && caldata) {
 		if (is_reusable) {
-			if (!ath9k_hw_rfbus_req(ah))
+			if (!ath9k_hw_rfbus_req(ah)) {
 				ath_err(ath9k_hw_common(ah),
 					"Could not stop baseband\n");
-			else
+			} else {
 				ar9003_hw_rtt_fill_hist(ah);
 
+				if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
+					ar9003_hw_rtt_load_hist(ah);
+			}
+
 			ath9k_hw_rfbus_done(ah);
 		}
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index f486480..1ec5235 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -2991,7 +2991,10 @@
 	case EEP_CHAIN_MASK_REDUCE:
 		return (pBase->miscConfiguration >> 0x3) & 0x1;
 	case EEP_ANT_DIV_CTL1:
-		return eep->base_ext1.ant_div_control;
+		if (AR_SREV_9565(ah))
+			return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
+		else
+			return eep->base_ext1.ant_div_control;
 	case EEP_ANTENNA_GAIN_5G:
 		return eep->modalHeader5G.antennaGain;
 	case EEP_ANTENNA_GAIN_2G:
@@ -3424,12 +3427,12 @@
 	struct ar9300_base_eep_hdr *pBase;
 
 	if (!dump_base_hdr) {
-		len += snprintf(buf + len, size - len,
-				"%20s :\n", "2GHz modal Header");
+		len += scnprintf(buf + len, size - len,
+				 "%20s :\n", "2GHz modal Header");
 		len = ar9003_dump_modal_eeprom(buf, len, size,
 						&eep->modalHeader2G);
-		len += snprintf(buf + len, size - len,
-				"%20s :\n", "5GHz modal Header");
+		len += scnprintf(buf + len, size - len,
+				 "%20s :\n", "5GHz modal Header");
 		len = ar9003_dump_modal_eeprom(buf, len, size,
 						&eep->modalHeader5G);
 		goto out;
@@ -3479,8 +3482,8 @@
 	PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
 	PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
 
-	len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-			ah->eeprom.ar9300_eep.macAddr);
+	len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+			 ah->eeprom.ar9300_eep.macAddr);
 out:
 	if (len > size)
 		len = size;
@@ -3656,9 +3659,23 @@
 		if (AR_SREV_9565(ah)) {
 			if (common->bt_ant_diversity) {
 				regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+				REG_SET_BIT(ah, AR_PHY_RESTART,
+					    AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+
+				/* Force WLAN LNA diversity ON */
+				REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+					    AR_BTCOEX_WL_LNADIV_FORCE_ON);
 			} else {
 				regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
 				regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+				REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+					    (1 << AR_PHY_ANT_SW_RX_PROT_S));
+
+				/* Force WLAN LNA diversity OFF */
+				REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+					    AR_BTCOEX_WL_LNADIV_FORCE_ON);
 			}
 		}
 
@@ -3669,7 +3686,8 @@
 		regval &= (~AR_FAST_DIV_ENABLE);
 		regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
 
-		if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+		if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
+		    && common->bt_ant_diversity)
 			regval |= AR_FAST_DIV_ENABLE;
 
 		REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 75d4fb4..0e5daa5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -52,6 +52,8 @@
 #define AR9300_PAPRD_SCALE_2		0x70000000
 #define AR9300_PAPRD_SCALE_2_S		28
 
+#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
+
 /* Delta from which to start power to pdadc table */
 /* This offset is used in both open loop and closed loop power control
  * schemes. In open loop power control, it is not really needed, but for
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 608bb48..b07f164 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -364,6 +364,8 @@
 
 		INIT_INI_ARRAY(&ah->iniModesFastClock,
 				ar9565_1p0_modes_fast_clock);
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+			       ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
 	} else {
 		/* mac */
 		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -628,6 +630,9 @@
 	else if (AR_SREV_9462_20(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 				ar9462_common_rx_gain_table_2p0);
+	else if (AR_SREV_9565(ah))
+		INIT_INI_ARRAY(&ah->iniModesRxGain,
+			       ar9565_1p0_Common_rx_gain_table);
 	else
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 				ar9300Common_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 8dd0692..7b94a6c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -753,9 +753,9 @@
 		    1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
 
 	if (caldata) {
-		caldata->done_txiqcal_once = false;
-		caldata->done_txclcal_once = false;
-		caldata->rtt_done = false;
+		clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+		clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
+		clear_bit(RTT_DONE, &caldata->cal_flags);
 	}
 
 	if (!ath9k_hw_init_cal(ah, chan))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e897648..11f5358 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -551,8 +551,7 @@
 	if (IS_CHAN_HT40(chan)) {
 		phymode |= AR_PHY_GC_DYN2040_EN;
 		/* Configure control (primary) channel at +-10MHz */
-		if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
-		    (chan->chanmode == CHANNEL_G_HT40PLUS))
+		if (IS_CHAN_HT40PLUS(chan))
 			phymode |= AR_PHY_GC_DYN2040_PRI_CH;
 
 	}
@@ -565,7 +564,7 @@
 	REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
 
 	/* Configure MAC for 20/40 operation */
-	ath9k_hw_set11nmac2040(ah);
+	ath9k_hw_set11nmac2040(ah, chan);
 
 	/* global transmit timeout (25 TUs default)*/
 	REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
@@ -627,11 +626,10 @@
 	 * MAC addr only will fail.
 	 */
 	val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
-	REG_WRITE(ah, AR_PCU_MISC_MODE2,
-		  val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
-
-	REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
-		    AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+	val |= AR_AGG_WEP_ENABLE_FIX |
+	       AR_AGG_WEP_ENABLE |
+	       AR_PCU_MISC_MODE2_CFP_IGNORE;
+	REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
 
 	if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
 		REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
@@ -683,42 +681,23 @@
 {
 	int ret;
 
-	switch (chan->chanmode) {
-	case CHANNEL_A:
-	case CHANNEL_A_HT20:
-		if (chan->channel <= 5350)
-			ret = 1;
-		else if ((chan->channel > 5350) && (chan->channel <= 5600))
-			ret = 3;
+	if (IS_CHAN_2GHZ(chan)) {
+		if (IS_CHAN_HT40(chan))
+			return 7;
 		else
-			ret = 5;
-		break;
-
-	case CHANNEL_A_HT40PLUS:
-	case CHANNEL_A_HT40MINUS:
-		if (chan->channel <= 5350)
-			ret = 2;
-		else if ((chan->channel > 5350) && (chan->channel <= 5600))
-			ret = 4;
-		else
-			ret = 6;
-		break;
-
-	case CHANNEL_G:
-	case CHANNEL_G_HT20:
-	case CHANNEL_B:
-		ret = 8;
-		break;
-
-	case CHANNEL_G_HT40PLUS:
-	case CHANNEL_G_HT40MINUS:
-		ret = 7;
-		break;
-
-	default:
-		ret = -EINVAL;
+			return 8;
 	}
 
+	if (chan->channel <= 5350)
+		ret = 1;
+	else if ((chan->channel > 5350) && (chan->channel <= 5600))
+		ret = 3;
+	else
+		ret = 5;
+
+	if (IS_CHAN_HT40(chan))
+		ret++;
+
 	return ret;
 }
 
@@ -728,28 +707,10 @@
 	unsigned int regWrites = 0, i;
 	u32 modesIndex;
 
-	switch (chan->chanmode) {
-	case CHANNEL_A:
-	case CHANNEL_A_HT20:
-		modesIndex = 1;
-		break;
-	case CHANNEL_A_HT40PLUS:
-	case CHANNEL_A_HT40MINUS:
-		modesIndex = 2;
-		break;
-	case CHANNEL_G:
-	case CHANNEL_G_HT20:
-	case CHANNEL_B:
-		modesIndex = 4;
-		break;
-	case CHANNEL_G_HT40PLUS:
-	case CHANNEL_G_HT40MINUS:
-		modesIndex = 3;
-		break;
-
-	default:
-		return -EINVAL;
-	}
+	if (IS_CHAN_5GHZ(chan))
+		modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+	else
+		modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
 
 	/*
 	 * SOC, MAC, BB, RADIO initvals.
@@ -847,8 +808,10 @@
 	if (chan == NULL)
 		return;
 
-	rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
-		? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+	if (IS_CHAN_2GHZ(chan))
+		rfMode |= AR_PHY_MODE_DYNAMIC;
+	else
+		rfMode |= AR_PHY_MODE_OFDM;
 
 	if (IS_CHAN_A_FAST_CLOCK(ah, chan))
 		rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
@@ -1274,12 +1237,11 @@
 	aniState = &ah->ani;
 	iniDef = &aniState->iniDef;
 
-	ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+	ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
 		ah->hw_version.macVersion,
 		ah->hw_version.macRev,
 		ah->opmode,
-		chan->channel,
-		chan->channelFlags);
+		chan->channel);
 
 	val = REG_READ(ah, AR_PHY_SFCORR);
 	iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1375,15 +1337,19 @@
 				  AR_PHY_ANT_FAST_DIV_BIAS_S;
 
 	if (AR_SREV_9330_11(ah)) {
+		antconf->lna1_lna2_switch_delta = -1;
 		antconf->lna1_lna2_delta = -9;
 		antconf->div_group = 1;
 	} else if (AR_SREV_9485(ah)) {
+		antconf->lna1_lna2_switch_delta = -1;
 		antconf->lna1_lna2_delta = -9;
 		antconf->div_group = 2;
 	} else if (AR_SREV_9565(ah)) {
-		antconf->lna1_lna2_delta = -3;
+		antconf->lna1_lna2_switch_delta = 3;
+		antconf->lna1_lna2_delta = -9;
 		antconf->div_group = 3;
 	} else {
+		antconf->lna1_lna2_switch_delta = -1;
 		antconf->lna1_lna2_delta = -3;
 		antconf->div_group = 0;
 	}
@@ -1489,17 +1455,24 @@
 	} else if (AR_SREV_9565(ah)) {
 		if (enable) {
 			REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+				    AR_ANT_DIV_ENABLE);
+			REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
 				    (1 << AR_PHY_ANT_SW_RX_PROT_S));
-			if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
-				REG_SET_BIT(ah, AR_PHY_RESTART,
-					    AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+			REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+				    AR_FAST_DIV_ENABLE);
+			REG_SET_BIT(ah, AR_PHY_RESTART,
+				    AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
 			REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
 				    AR_BTCOEX_WL_LNADIV_FORCE_ON);
 		} else {
-			REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+			REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+				    AR_ANT_DIV_ENABLE);
 			REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
 				    (1 << AR_PHY_ANT_SW_RX_PROT_S));
-			REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+			REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
+				    AR_FAST_DIV_ENABLE);
+			REG_CLR_BIT(ah, AR_PHY_RESTART,
+				    AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
 			REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
 				    AR_BTCOEX_WL_LNADIV_FORCE_ON);
 
@@ -1526,28 +1499,10 @@
 	unsigned int regWrites = 0;
 	u32 modesIndex;
 
-	switch (chan->chanmode) {
-	case CHANNEL_A:
-	case CHANNEL_A_HT20:
-		modesIndex = 1;
-		break;
-	case CHANNEL_A_HT40PLUS:
-	case CHANNEL_A_HT40MINUS:
-		modesIndex = 2;
-		break;
-	case CHANNEL_G:
-	case CHANNEL_G_HT20:
-	case CHANNEL_B:
-		modesIndex = 4;
-		break;
-	case CHANNEL_G_HT40PLUS:
-	case CHANNEL_G_HT40MINUS:
-		modesIndex = 3;
-		break;
-
-	default:
-		return -EINVAL;
-	}
+	if (IS_CHAN_5GHZ(chan))
+		modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+	else
+		modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
 
 	if (modesIndex == ah->modes_index) {
 		*ini_reloaded = false;
@@ -1662,6 +1617,98 @@
 	}
 }
 
+static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+	REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+	REG_SET_BIT(ah, 0x9864, 0x7f000);
+	REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+	REG_WRITE(ah, AR_CR, AR_CR_RXD);
+	REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+	REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20); /* 50 OK */
+	REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+	REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+	REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+	REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9003_hw_tx99_stop(struct ath_hw *ah)
+{
+	REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
+static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
+{
+	static s16 p_pwr_array[ar9300RateSize] = { 0 };
+	unsigned int i;
+
+	if (txpower <= MAX_RATE_POWER) {
+		for (i = 0; i < ar9300RateSize; i++)
+			p_pwr_array[i] = txpower;
+	} else {
+		for (i = 0; i < ar9300RateSize; i++)
+			p_pwr_array[i] = MAX_RATE_POWER;
+	}
+
+	REG_WRITE(ah, 0xa458, 0);
+
+	REG_WRITE(ah, 0xa3c0,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24],  8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24],  0));
+	REG_WRITE(ah, 0xa3c4,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54],  24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48],  16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36],   8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+	REG_WRITE(ah, 0xa3c8,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L],  0));
+	REG_WRITE(ah, 0xa3cc,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S],   24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L],   16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S],     8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L],  0));
+	REG_WRITE(ah, 0xa3d0,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5],  24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4],  16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
+	REG_WRITE(ah, 0xa3d4,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7],   8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6],   0));
+	REG_WRITE(ah, 0xa3e4,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15],  8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14],  0));
+	REG_WRITE(ah, 0xa3e8,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23],  8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22],  0));
+	REG_WRITE(ah, 0xa3d8,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
+	REG_WRITE(ah, 0xa3dc,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7],   8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6],   0));
+	REG_WRITE(ah, 0xa3ec,
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15],  8) |
+		  ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14],  0));
+}
+
 void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1701,6 +1748,9 @@
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 	ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
 #endif
+	ops->tx99_start = ar9003_hw_tx99_start;
+	ops->tx99_stop = ar9003_hw_tx99_stop;
+	ops->tx99_set_txpower = ar9003_hw_tx99_set_txpower;
 
 	ar9003_hw_set_nf_limits(ah);
 	ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6fd7523..fca6243 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -343,8 +343,12 @@
 
 #define AR_PHY_CCA_NOM_VAL_9462_2GHZ          -127
 #define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ     -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ     -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
 #define AR_PHY_CCA_NOM_VAL_9462_5GHZ          -127
 #define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ     -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ     -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
 
 #define AR_PHY_CCA_NOM_VAL_9330_2GHZ          -118
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 74de353..9344188 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -118,6 +118,27 @@
 	}
 }
 
+static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
+{
+	int agc, caldac;
+
+	if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+		return;
+
+	if ((index != 5) || (chain >= 2))
+		return;
+
+	agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+			     AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
+	if (!agc)
+		return;
+
+	caldac = ah->caldata->caldac[chain];
+	ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
+	caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
+	ah->caldata->rtt_table[chain][index] |= (caldac << 4);
+}
+
 static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
 {
 	u32 val;
@@ -155,13 +176,16 @@
 		for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
 			ah->caldata->rtt_table[chain][i] =
 				ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+
+			ar9003_hw_patch_rtt(ah, i, chain);
+
 			ath_dbg(ath9k_hw_common(ah), CALIBRATE,
 				"RTT value at idx %d, chain %d is: 0x%x\n",
 				i, chain, ah->caldata->rtt_table[chain][i]);
 		}
 	}
 
-	ah->caldata->rtt_done = true;
+	set_bit(RTT_DONE, &ah->caldata->cal_flags);
 }
 
 void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
@@ -176,7 +200,7 @@
 	}
 
 	if (ah->caldata)
-		ah->caldata->rtt_done = false;
+		clear_bit(RTT_DONE, &ah->caldata->cal_flags);
 }
 
 bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -186,11 +210,37 @@
 	if (!ah->caldata)
 		return false;
 
-	if (!ah->caldata->rtt_done)
+	if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
+		if (IS_CHAN_2GHZ(chan)){
+			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+				      AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+				      ah->caldata->caldac[0]);
+			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+				      AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+				      ah->caldata->caldac[1]);
+		} else {
+			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+				      AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+				      ah->caldata->caldac[0]);
+			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+				      AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+				      ah->caldata->caldac[1]);
+		}
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+			      AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+			      AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+	}
+
+	if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
 		return false;
 
 	ar9003_hw_rtt_enable(ah);
-	ar9003_hw_rtt_set_mask(ah, 0x10);
+
+	if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+		ar9003_hw_rtt_set_mask(ah, 0x30);
+	else
+		ar9003_hw_rtt_set_mask(ah, 0x10);
 
 	if (!ath9k_hw_rfbus_req(ah)) {
 		ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 88ff1d7..6f899c6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,7 +20,17 @@
 
 /* AR9485 1.1 */
 
-#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+	{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+	{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+	{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+	{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+	{0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+	{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+	{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
 
 static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
 	/* Addr      allmodes  */
@@ -34,6 +44,7 @@
 	{0x00009e00, 0x037216a0},
 	{0x00009e04, 0x00182020},
 	{0x00009e18, 0x00000000},
+	{0x00009e20, 0x000003a8},
 	{0x00009e2c, 0x00004121},
 	{0x00009e44, 0x02282324},
 	{0x0000a000, 0x00060005},
@@ -174,7 +185,7 @@
 	{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
 	{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
 	{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
 	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
 	{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -200,14 +211,14 @@
 	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
 	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
 	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
 	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -263,6 +274,11 @@
 static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+	{0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
 	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
 	{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
 	{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
@@ -297,6 +313,22 @@
 	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
 	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
 	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+	{0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+	{0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+	{0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
 	{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
 	{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
 	{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -341,7 +373,7 @@
 	{0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
 	{0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
 	{0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
 	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
 	{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -367,14 +399,14 @@
 	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
 	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
 	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
 	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -521,12 +553,109 @@
 	{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
 };
 
-#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+	{0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+	{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+	{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+	{0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+	{0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+	{0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+	{0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+	{0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+	{0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+	{0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+	{0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+	{0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+	{0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+	{0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+	{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+	{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+	{0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+	{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+	{0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+	{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+	{0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+	{0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+	{0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+	{0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+	{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
 
 static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+	{0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
+	{0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
+	{0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
 	{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
 	{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
 	{0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
@@ -543,23 +672,39 @@
 	{0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
 	{0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
 	{0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
-	{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
-	{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
-	{0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
-	{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
-	{0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
-	{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
-	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
-	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
-	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
-	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
-	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
-	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+	{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
+	{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
+	{0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
+	{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
+	{0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
+	{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
+	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
+	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
+	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
+	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
+	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
+	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
+	{0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+	{0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
+	{0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+	{0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+	{0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
+	{0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
+	{0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+	{0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+	{0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+	{0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+	{0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+	{0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
 	{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
 	{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
 	{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -823,6 +968,7 @@
 	{0x00009e00, 0x03721b20},
 	{0x00009e04, 0x00082020},
 	{0x00009e18, 0x0300501e},
+	{0x00009e20, 0x000003ba},
 	{0x00009e2c, 0x00002e21},
 	{0x00009e44, 0x02182324},
 	{0x0000a000, 0x00060005},
@@ -1001,7 +1147,6 @@
 	{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
 	{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
-	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
 	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1020,7 +1165,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
 	{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
 	{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
 	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1206,6 +1351,11 @@
 	{0x000083d0, 0x000301ff},
 };
 
-#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+	/* Addr      allmodes  */
+	{0x0000a398, 0x00000000},
+	{0x0000a39c, 0x6f7f0301},
+	{0x0000a3a0, 0xca9228ee},
+};
 
 #endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index e85a8b0..a8c757b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -272,9 +272,9 @@
 	{0x0000a398, 0x001f0e0f},
 	{0x0000a39c, 0x0075393f},
 	{0x0000a3a0, 0xb79f6427},
-	{0x0000a3a4, 0x00000000},
-	{0x0000a3a8, 0xaaaaaaaa},
-	{0x0000a3ac, 0x3c466478},
+	{0x0000a3a4, 0x00000011},
+	{0x0000a3a8, 0xaaaaaa6e},
+	{0x0000a3ac, 0x3c466455},
 	{0x0000a3c0, 0x20202020},
 	{0x0000a3c4, 0x22222220},
 	{0x0000a3c8, 0x20200020},
@@ -295,11 +295,11 @@
 	{0x0000a404, 0x00000000},
 	{0x0000a408, 0x0e79e5c6},
 	{0x0000a40c, 0x00820820},
-	{0x0000a414, 0x1ce739ce},
+	{0x0000a414, 0x1ce739c5},
 	{0x0000a418, 0x2d001dce},
-	{0x0000a41c, 0x1ce739ce},
+	{0x0000a41c, 0x1ce739c5},
 	{0x0000a420, 0x000001ce},
-	{0x0000a424, 0x1ce739ce},
+	{0x0000a424, 0x1ce739c5},
 	{0x0000a428, 0x000001ce},
 	{0x0000a42c, 0x1ce739ce},
 	{0x0000a430, 0x1ce739ce},
@@ -351,9 +351,9 @@
 	{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
 	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
-	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -452,6 +452,7 @@
 	/* Addr      allmodes  */
 	{0x00004050, 0x00300300},
 	{0x0000406c, 0x00100000},
+	{0x00009e20, 0x000003b6},
 	{0x0000a000, 0x00010000},
 	{0x0000a004, 0x00030002},
 	{0x0000a008, 0x00050004},
@@ -1230,4 +1231,11 @@
 	{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 };
 
+static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+	/* Addr      allmodes  */
+	{0x0000a398, 0x00000000},
+	{0x0000a39c, 0x6f7f0301},
+	{0x0000a3a0, 0xca9228ee},
+};
+
 #endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 2ee35f6..e7a38d8 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -64,7 +64,6 @@
 
 struct ath_config {
 	u16 txpowlimit;
-	u8 cabqReadytime;
 };
 
 /*************************/
@@ -207,6 +206,14 @@
 	u8 baw_tracked : 1;
 };
 
+struct ath_rxbuf {
+	struct list_head list;
+	struct sk_buff *bf_mpdu;
+	void *bf_desc;
+	dma_addr_t bf_daddr;
+	dma_addr_t bf_buf_addr;
+};
+
 struct ath_buf_state {
 	u8 bf_type;
 	u8 bfs_paprd;
@@ -307,7 +314,7 @@
 	struct ath_descdma rxdma;
 	struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
 
-	struct ath_buf *buf_hold;
+	struct ath_rxbuf *buf_hold;
 	struct sk_buff *frag;
 
 	u32 ampdu_ref;
@@ -459,8 +466,8 @@
 
 #define ATH_DUMP_BTCOEX(_s, _val)				\
 	do {							\
-		len += snprintf(buf + len, size - len,		\
-				"%20s : %10d\n", _s, (_val));	\
+		len += scnprintf(buf + len, size - len,		\
+				 "%20s : %10d\n", _s, (_val));	\
 	} while (0)
 
 enum bt_op_flags {
@@ -581,7 +588,6 @@
 #define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
 #define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
 
-#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
@@ -626,12 +632,15 @@
 /* Main driver core */
 /********************/
 
-#define ATH9K_PCI_CUS198     0x0001
-#define ATH9K_PCI_CUS230     0x0002
-#define ATH9K_PCI_CUS217     0x0004
-#define ATH9K_PCI_WOW        0x0008
-#define ATH9K_PCI_BT_ANT_DIV 0x0010
-#define ATH9K_PCI_D3_L1_WAR  0x0020
+#define ATH9K_PCI_CUS198      0x0001
+#define ATH9K_PCI_CUS230      0x0002
+#define ATH9K_PCI_CUS217      0x0004
+#define ATH9K_PCI_CUS252      0x0008
+#define ATH9K_PCI_WOW         0x0010
+#define ATH9K_PCI_BT_ANT_DIV  0x0020
+#define ATH9K_PCI_D3_L1_WAR   0x0040
+#define ATH9K_PCI_AR9565_1ANT 0x0080
+#define ATH9K_PCI_AR9565_2ANT 0x0100
 
 /*
  * Default cache line size, in bytes.
@@ -769,6 +778,11 @@
 	enum spectral_mode spectral_mode;
 	struct ath_spec_scan spec_config;
 
+	struct ieee80211_vif *tx99_vif;
+	struct sk_buff *tx99_skb;
+	bool tx99_state;
+	s16 tx99_power;
+
 #ifdef CONFIG_PM_SLEEP
 	atomic_t wow_got_bmiss_intr;
 	atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
@@ -877,6 +891,7 @@
  */
 enum ath_fft_sample_type {
 	ATH_FFT_SAMPLE_HT20 = 1,
+	ATH_FFT_SAMPLE_HT20_40,
 };
 
 struct fft_sample_tlv {
@@ -903,6 +918,39 @@
 	u8 data[SPECTRAL_HT20_NUM_BINS];
 } __packed;
 
+struct fft_sample_ht20_40 {
+	struct fft_sample_tlv tlv;
+
+	u8 channel_type;
+	__be16 freq;
+
+	s8 lower_rssi;
+	s8 upper_rssi;
+
+	__be64 tsf;
+
+	s8 lower_noise;
+	s8 upper_noise;
+
+	__be16 lower_max_magnitude;
+	__be16 upper_max_magnitude;
+
+	u8 lower_max_index;
+	u8 upper_max_index;
+
+	u8 lower_bitmap_weight;
+	u8 upper_bitmap_weight;
+
+	u8 max_exp;
+
+	u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+int ath9k_tx99_init(struct ath_softc *sc);
+void ath9k_tx99_deinit(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+		    struct ath_tx_control *txctl);
+
 void ath9k_tasklet(unsigned long data);
 int ath_cabq_update(struct ath_softc *);
 
@@ -924,7 +972,6 @@
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
 void ath9k_reload_chainmask_settings(struct ath_softc *sc);
 
-bool ath9k_uses_beacons(int type);
 void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
 int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
 			       enum spectral_mode spectral_mode);
@@ -952,7 +999,7 @@
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
 
 void ath_start_rfkill_poll(struct ath_softc *sc);
-extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
 void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif,
 			       struct ath9k_vif_iter_data *iter_data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b5c16b3a..17be353 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -334,6 +334,8 @@
 	if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
 		sc->beacon.bmisscnt++;
 
+		ath9k_hw_check_nav(ah);
+
 		if (!ath9k_hw_check_alive(ah))
 			ieee80211_queue_work(sc->hw, &sc->hw_check_work);
 
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 5e8219a..278365b 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -63,13 +63,13 @@
 	return ath9k_hw_get_nf_limits(ah, chan)->nominal;
 }
 
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+			   s16 nf)
 {
 	s8 noise = ATH_DEFAULT_NOISE_FLOOR;
 
-	if (chan && chan->noisefloor) {
-		s8 delta = chan->noisefloor -
-			   ATH9K_NF_CAL_NOISE_THRESH -
+	if (nf) {
+		s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
 			   ath9k_hw_get_default_nf(ah, chan);
 		if (delta > 0)
 			noise += delta;
@@ -119,7 +119,7 @@
 			ath_dbg(common, CALIBRATE,
 				"NFmid[%d] (%d) > MAX (%d), %s\n",
 				i, h[i].privNF, limit->max,
-				(cal->nfcal_interference ?
+				(test_bit(NFCAL_INTF, &cal->cal_flags) ?
 				 "not corrected (due to interference)" :
 				 "correcting to MAX"));
 
@@ -130,7 +130,7 @@
 			 * we bypass this limit here in order to better deal
 			 * with our environment.
 			 */
-			if (!cal->nfcal_interference)
+			if (!test_bit(NFCAL_INTF, &cal->cal_flags))
 				h[i].privNF = limit->max;
 		}
 	}
@@ -141,7 +141,7 @@
 	 * Re-enable the enforcement of the NF maximum again.
 	 */
 	if (!high_nf_mid)
-		cal->nfcal_interference = false;
+		clear_bit(NFCAL_INTF, &cal->cal_flags);
 }
 
 static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -186,7 +186,6 @@
 bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
-	struct ieee80211_conf *conf = &common->hw->conf;
 	struct ath9k_cal_list *currCal = ah->cal_list_curr;
 
 	if (!ah->caldata)
@@ -208,7 +207,7 @@
 		return true;
 
 	ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
-		currCal->calData->calType, conf->chandef.chan->center_freq);
+		currCal->calData->calType, ah->curchan->chan->center_freq);
 
 	ah->caldata->CalValid &= ~currCal->calData->calType;
 	currCal->calState = CAL_WAITING;
@@ -220,7 +219,7 @@
 void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
 {
 	if (ah->caldata)
-		ah->caldata->nfcal_pending = true;
+		set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
 
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
 		    AR_PHY_AGC_CONTROL_ENABLE_NF);
@@ -242,7 +241,6 @@
 	int32_t val;
 	u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
 	struct ath_common *common = ath9k_hw_common(ah);
-	struct ieee80211_conf *conf = &common->hw->conf;
 	s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
 
 	if (ah->caldata)
@@ -252,7 +250,7 @@
 		if (chainmask & (1 << i)) {
 			s16 nfval;
 
-			if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+			if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
 				continue;
 
 			if (h)
@@ -314,7 +312,7 @@
 	ENABLE_REGWRITE_BUFFER(ah);
 	for (i = 0; i < NUM_NF_READINGS; i++) {
 		if (chainmask & (1 << i)) {
-			if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+			if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
 				continue;
 
 			val = REG_READ(ah, ah->nf_regs[i]);
@@ -391,10 +389,10 @@
 	}
 
 	h = caldata->nfCalHist;
-	caldata->nfcal_pending = false;
+	clear_bit(NFCAL_PENDING, &caldata->cal_flags);
 	ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
 	chan->noisefloor = h[0].privNF;
-	ah->noise = ath9k_hw_getchan_noise(ah, chan);
+	ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
 	return true;
 }
 EXPORT_SYMBOL(ath9k_hw_getnf);
@@ -408,7 +406,6 @@
 
 	ah->caldata->channel = chan->channel;
 	ah->caldata->channelFlags = chan->channelFlags;
-	ah->caldata->chanmode = chan->chanmode;
 	h = ah->caldata->nfCalHist;
 	default_nf = ath9k_hw_get_default_nf(ah, chan);
 	for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -437,12 +434,12 @@
 	 * the baseband update the internal NF value itself, similar to
 	 * what is being done after a full reset.
 	 */
-	if (!caldata->nfcal_pending)
+	if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
 		ath9k_hw_start_nfcal(ah, true);
 	else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
 		ath9k_hw_getnf(ah, ah->curchan);
 
-	caldata->nfcal_interference = true;
+	set_bit(NFCAL_INTF, &caldata->cal_flags);
 }
 EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
 
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 3d70b8c..b8ed95e 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -116,7 +116,8 @@
 void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
 void ath9k_hw_reset_calibration(struct ath_hw *ah,
 				struct ath9k_cal_list *currCal);
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+			   s16 nf);
 
 
 #endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index d3063c2..a7e5a05 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,103 +49,64 @@
 }
 EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
 
-static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
-{
-	u32 chanmode = 0;
-
-	switch (chandef->chan->band) {
-	case IEEE80211_BAND_2GHZ:
-		switch (chandef->width) {
-		case NL80211_CHAN_WIDTH_20_NOHT:
-		case NL80211_CHAN_WIDTH_20:
-			chanmode = CHANNEL_G_HT20;
-			break;
-		case NL80211_CHAN_WIDTH_40:
-			if (chandef->center_freq1 > chandef->chan->center_freq)
-				chanmode = CHANNEL_G_HT40PLUS;
-			else
-				chanmode = CHANNEL_G_HT40MINUS;
-			break;
-		default:
-			break;
-		}
-		break;
-	case IEEE80211_BAND_5GHZ:
-		switch (chandef->width) {
-		case NL80211_CHAN_WIDTH_20_NOHT:
-		case NL80211_CHAN_WIDTH_20:
-			chanmode = CHANNEL_A_HT20;
-			break;
-		case NL80211_CHAN_WIDTH_40:
-			if (chandef->center_freq1 > chandef->chan->center_freq)
-				chanmode = CHANNEL_A_HT40PLUS;
-			else
-				chanmode = CHANNEL_A_HT40MINUS;
-			break;
-		default:
-			break;
-		}
-		break;
-	default:
-		break;
-	}
-
-	return chanmode;
-}
-
 /*
  * Update internal channel flags.
  */
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
-			       struct cfg80211_chan_def *chandef)
+static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+				      struct cfg80211_chan_def *chandef)
 {
-	ichan->channel = chandef->chan->center_freq;
-	ichan->chan = chandef->chan;
+	struct ieee80211_channel *chan = chandef->chan;
+	u16 flags = 0;
 
-	if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
-		ichan->chanmode = CHANNEL_G;
-		ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
-	} else {
-		ichan->chanmode = CHANNEL_A;
-		ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
-	}
+	ichan->channel = chan->center_freq;
+	ichan->chan = chan;
+
+	if (chan->band == IEEE80211_BAND_5GHZ)
+		flags |= CHANNEL_5GHZ;
 
 	switch (chandef->width) {
 	case NL80211_CHAN_WIDTH_5:
-		ichan->channelFlags |= CHANNEL_QUARTER;
+		flags |= CHANNEL_QUARTER;
 		break;
 	case NL80211_CHAN_WIDTH_10:
-		ichan->channelFlags |= CHANNEL_HALF;
+		flags |= CHANNEL_HALF;
 		break;
 	case NL80211_CHAN_WIDTH_20_NOHT:
 		break;
 	case NL80211_CHAN_WIDTH_20:
+		flags |= CHANNEL_HT;
+		break;
 	case NL80211_CHAN_WIDTH_40:
-		ichan->chanmode = ath9k_get_extchanmode(chandef);
+		if (chandef->center_freq1 > chandef->chan->center_freq)
+			flags |= CHANNEL_HT40PLUS | CHANNEL_HT;
+		else
+			flags |= CHANNEL_HT40MINUS | CHANNEL_HT;
 		break;
 	default:
 		WARN_ON(1);
 	}
+
+	ichan->channelFlags = flags;
 }
-EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
 
 /*
  * Get the internal channel reference.
  */
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
-					       struct ath_hw *ah)
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+					    struct ath_hw *ah,
+					    struct cfg80211_chan_def *chandef)
 {
-	struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+	struct ieee80211_channel *curchan = chandef->chan;
 	struct ath9k_channel *channel;
 	u8 chan_idx;
 
 	chan_idx = curchan->hw_value;
 	channel = &ah->channels[chan_idx];
-	ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
+	ath9k_cmn_update_ichannel(channel, chandef);
 
 	return channel;
 }
-EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+EXPORT_SYMBOL(ath9k_cmn_get_channel);
 
 int ath9k_cmn_count_streams(unsigned int chainmask, int max)
 {
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e039bcb..eb85e1b 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -43,10 +43,9 @@
 	(((x) + ((mul)/2)) / (mul))
 
 int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
-			       struct cfg80211_chan_def *chandef);
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
-					       struct ath_hw *ah);
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+					    struct ath_hw *ah,
+					    struct cfg80211_chan_def *chandef);
 int ath9k_cmn_count_streams(unsigned int chainmask, int max);
 void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
 				  enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c088744..83a2c59 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -104,37 +104,37 @@
 		return -ENOMEM;
 
 	if (common->disable_ani) {
-		len += snprintf(buf + len, size - len, "%s: %s\n",
-				"ANI", "DISABLED");
+		len += scnprintf(buf + len, size - len, "%s: %s\n",
+				 "ANI", "DISABLED");
 		goto exit;
 	}
 
-	len += snprintf(buf + len, size - len, "%15s: %s\n",
-			"ANI", "ENABLED");
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"ANI RESET", ah->stats.ast_ani_reset);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"SPUR UP", ah->stats.ast_ani_spurup);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"SPUR DOWN", ah->stats.ast_ani_spurup);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"MRC-CCK ON", ah->stats.ast_ani_ccklow);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"FIR-STEP UP", ah->stats.ast_ani_stepup);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
-	len += snprintf(buf + len, size - len, "%15s: %u\n",
-			"CCK ERRORS", ah->stats.ast_ani_cckerrs);
+	len += scnprintf(buf + len, size - len, "%15s: %s\n",
+			 "ANI", "ENABLED");
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "ANI RESET", ah->stats.ast_ani_reset);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "SPUR UP", ah->stats.ast_ani_spurup);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "SPUR DOWN", ah->stats.ast_ani_spurup);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "MRC-CCK ON", ah->stats.ast_ani_ccklow);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "FIR-STEP UP", ah->stats.ast_ani_stepup);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
+	len += scnprintf(buf + len, size - len, "%15s: %u\n",
+			 "CCK ERRORS", ah->stats.ast_ani_cckerrs);
 exit:
 	if (len > size)
 		len = size;
@@ -280,70 +280,70 @@
 		return -ENOMEM;
 
 	if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
-		len += snprintf(buf + len, size - len, "%s\n",
-				"Antenna Diversity Combining is disabled");
+		len += scnprintf(buf + len, size - len, "%s\n",
+				 "Antenna Diversity Combining is disabled");
 		goto exit;
 	}
 
 	ath9k_ps_wakeup(sc);
 	ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
-	len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
-			lna_conf_str[div_ant_conf.main_lna_conf]);
-	len += snprintf(buf + len, size - len, "Current ALT config  : %s\n",
-			lna_conf_str[div_ant_conf.alt_lna_conf]);
-	len += snprintf(buf + len, size - len, "Average MAIN RSSI   : %d\n",
-			as_main->rssi_avg);
-	len += snprintf(buf + len, size - len, "Average ALT RSSI    : %d\n\n",
-			as_alt->rssi_avg);
+	len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
+			 lna_conf_str[div_ant_conf.main_lna_conf]);
+	len += scnprintf(buf + len, size - len, "Current ALT config  : %s\n",
+			 lna_conf_str[div_ant_conf.alt_lna_conf]);
+	len += scnprintf(buf + len, size - len, "Average MAIN RSSI   : %d\n",
+			 as_main->rssi_avg);
+	len += scnprintf(buf + len, size - len, "Average ALT RSSI    : %d\n\n",
+			 as_alt->rssi_avg);
 	ath9k_ps_restore(sc);
 
-	len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
-	len += snprintf(buf + len, size - len, "-------------------\n");
+	len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+	len += scnprintf(buf + len, size - len, "-------------------\n");
 
-	len += snprintf(buf + len, size - len, "%30s%15s\n",
-			"MAIN", "ALT");
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"TOTAL COUNT",
-			as_main->recv_cnt,
-			as_alt->recv_cnt);
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"LNA1",
-			as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
-			as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"LNA2",
-			as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
-			as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"LNA1 + LNA2",
-			as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
-			as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"LNA1 - LNA2",
-			as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
-			as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+	len += scnprintf(buf + len, size - len, "%30s%15s\n",
+			 "MAIN", "ALT");
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "TOTAL COUNT",
+			 as_main->recv_cnt,
+			 as_alt->recv_cnt);
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "LNA1",
+			 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+			 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "LNA2",
+			 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+			 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "LNA1 + LNA2",
+			 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+			 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "LNA1 - LNA2",
+			 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+			 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
 
-	len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
-	len += snprintf(buf + len, size - len, "--------------------\n");
+	len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+	len += scnprintf(buf + len, size - len, "--------------------\n");
 
-	len += snprintf(buf + len, size - len, "%30s%15s\n",
-			"MAIN", "ALT");
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"LNA1",
-			as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
-			as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"LNA2",
-			as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
-			as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"LNA1 + LNA2",
-			as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
-			as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
-	len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-			"LNA1 - LNA2",
-			as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
-			as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+	len += scnprintf(buf + len, size - len, "%30s%15s\n",
+			 "MAIN", "ALT");
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "LNA1",
+			 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+			 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "LNA2",
+			 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+			 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "LNA1 + LNA2",
+			 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+			 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+	len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+			 "LNA1 - LNA2",
+			 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+			 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
 
 exit:
 	if (len > size)
@@ -385,21 +385,21 @@
 		   (AR_MACMISC_MISC_OBS_BUS_1 <<
 		    AR_MACMISC_MISC_OBS_BUS_MSB_S)));
 
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
-			"Raw DMA Debug values:\n");
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
+			 "Raw DMA Debug values:\n");
 
 	for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
 		if (i % 4 == 0)
-			len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+			len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
 
 		val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
-		len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
-				i, val[i]);
+		len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
+				 i, val[i]);
 	}
 
-	len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
-			"Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+	len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
+			 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
 
 	for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
 		if (i == 8) {
@@ -412,39 +412,39 @@
 			dcuBase++;
 		}
 
-		len += snprintf(buf + len, DMA_BUF_LEN - len,
-			"%2d          %2x      %1x     %2x           %2x\n",
-			i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
-			(*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
-			val[2] & (0x7 << (i * 3)) >> (i * 3),
-			(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+		len += scnprintf(buf + len, DMA_BUF_LEN - len,
+			 "%2d          %2x      %1x     %2x           %2x\n",
+			 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+			 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
+			 val[2] & (0x7 << (i * 3)) >> (i * 3),
+			 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
 	}
 
-	len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+	len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
 
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
 		"qcu_stitch state:   %2x    qcu_fetch state:        %2x\n",
 		(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
 		"qcu_complete state: %2x    dcu_complete state:     %2x\n",
 		(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
 		"dcu_arb state:      %2x    dcu_fp state:           %2x\n",
 		(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
 		"chan_idle_dur:     %3d    chan_idle_dur_valid:     %1d\n",
 		(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
 		"txfifo_valid_0:      %1d    txfifo_valid_1:          %1d\n",
 		(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
 		"txfifo_dcu_num_0:   %2d    txfifo_dcu_num_1:       %2d\n",
 		(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
 
-	len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
-			REG_READ_D(ah, AR_OBS_BUS_1));
-	len += snprintf(buf + len, DMA_BUF_LEN - len,
-			"AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
+	len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
+			 REG_READ_D(ah, AR_OBS_BUS_1));
+	len += scnprintf(buf + len, DMA_BUF_LEN - len,
+			 "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
 
 	ath9k_ps_restore(sc);
 
@@ -530,9 +530,9 @@
 
 #define PR_IS(a, s)						\
 	do {							\
-		len += snprintf(buf + len, mxlen - len,		\
-				"%21s: %10u\n", a,		\
-				sc->debug.stats.istats.s);	\
+		len += scnprintf(buf + len, mxlen - len,	\
+				 "%21s: %10u\n", a,		\
+				 sc->debug.stats.istats.s);	\
 	} while (0)
 
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -563,8 +563,8 @@
 	PR_IS("GENTIMER", gen_timer);
 	PR_IS("TOTAL", total);
 
-	len += snprintf(buf + len, mxlen - len,
-			"SYNC_CAUSE stats:\n");
+	len += scnprintf(buf + len, mxlen - len,
+			 "SYNC_CAUSE stats:\n");
 
 	PR_IS("Sync-All", sync_cause_all);
 	PR_IS("RTC-IRQ", sync_rtc_irq);
@@ -655,16 +655,16 @@
 
 	ath_txq_lock(sc, txq);
 
-	len += snprintf(buf + len, size - len, "%s: %d ",
-			"qnum", txq->axq_qnum);
-	len += snprintf(buf + len, size - len, "%s: %2d ",
-			"qdepth", txq->axq_depth);
-	len += snprintf(buf + len, size - len, "%s: %2d ",
-			"ampdu-depth", txq->axq_ampdu_depth);
-	len += snprintf(buf + len, size - len, "%s: %3d ",
-			"pending", txq->pending_frames);
-	len += snprintf(buf + len, size - len, "%s: %d\n",
-			"stopped", txq->stopped);
+	len += scnprintf(buf + len, size - len, "%s: %d ",
+			 "qnum", txq->axq_qnum);
+	len += scnprintf(buf + len, size - len, "%s: %2d ",
+			 "qdepth", txq->axq_depth);
+	len += scnprintf(buf + len, size - len, "%s: %2d ",
+			 "ampdu-depth", txq->axq_ampdu_depth);
+	len += scnprintf(buf + len, size - len, "%s: %3d ",
+			 "pending", txq->pending_frames);
+	len += scnprintf(buf + len, size - len, "%s: %d\n",
+			 "stopped", txq->stopped);
 
 	ath_txq_unlock(sc, txq);
 	return len;
@@ -687,11 +687,11 @@
 
 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 		txq = sc->tx.txq_map[i];
-		len += snprintf(buf + len, size - len, "(%s):  ", qname[i]);
+		len += scnprintf(buf + len, size - len, "(%s):  ", qname[i]);
 		len += print_queue(sc, txq, buf + len, size - len);
 	}
 
-	len += snprintf(buf + len, size - len, "(CAB): ");
+	len += scnprintf(buf + len, size - len, "(CAB): ");
 	len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
 
 	if (len > size)
@@ -716,80 +716,82 @@
 	unsigned int reg;
 	u32 rxfilter;
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"BSSID: %pM\n", common->curbssid);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"BSSID-MASK: %pM\n", common->bssidmask);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "BSSID: %pM\n", common->curbssid);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "BSSID-MASK: %pM\n", common->bssidmask);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "OPMODE: %s\n",
+			 ath_opmode_to_string(sc->sc_ah->opmode));
 
 	ath9k_ps_wakeup(sc);
 	rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
 	ath9k_ps_restore(sc);
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"RXFILTER: 0x%x", rxfilter);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "RXFILTER: 0x%x", rxfilter);
 
 	if (rxfilter & ATH9K_RX_FILTER_UCAST)
-		len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+		len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
 	if (rxfilter & ATH9K_RX_FILTER_MCAST)
-		len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+		len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
 	if (rxfilter & ATH9K_RX_FILTER_BCAST)
-		len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+		len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
 	if (rxfilter & ATH9K_RX_FILTER_CONTROL)
-		len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+		len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
 	if (rxfilter & ATH9K_RX_FILTER_BEACON)
-		len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+		len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
 	if (rxfilter & ATH9K_RX_FILTER_PROM)
-		len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+		len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
 	if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
-		len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+		len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
 	if (rxfilter & ATH9K_RX_FILTER_PHYERR)
-		len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
+		len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
 	if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
-		len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
+		len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
 	if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
-		len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
+		len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
 	if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
-		len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
+		len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
 	if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
-		len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
+		len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
 	if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
-		len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+		len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
 	if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
-		len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
+		len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
 
-	len += snprintf(buf + len, sizeof(buf) - len, "\n");
+	len += scnprintf(buf + len, sizeof(buf) - len, "\n");
 
 	reg = sc->sc_ah->imask;
 
-	len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "INTERRUPT-MASK: 0x%x", reg);
 
 	if (reg & ATH9K_INT_SWBA)
-		len += snprintf(buf + len, sizeof(buf) - len, " SWBA");
+		len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
 	if (reg & ATH9K_INT_BMISS)
-		len += snprintf(buf + len, sizeof(buf) - len, " BMISS");
+		len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
 	if (reg & ATH9K_INT_CST)
-		len += snprintf(buf + len, sizeof(buf) - len, " CST");
+		len += scnprintf(buf + len, sizeof(buf) - len, " CST");
 	if (reg & ATH9K_INT_RX)
-		len += snprintf(buf + len, sizeof(buf) - len, " RX");
+		len += scnprintf(buf + len, sizeof(buf) - len, " RX");
 	if (reg & ATH9K_INT_RXHP)
-		len += snprintf(buf + len, sizeof(buf) - len, " RXHP");
+		len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
 	if (reg & ATH9K_INT_RXLP)
-		len += snprintf(buf + len, sizeof(buf) - len, " RXLP");
+		len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
 	if (reg & ATH9K_INT_BB_WATCHDOG)
-		len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
+		len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
 
-	len += snprintf(buf + len, sizeof(buf) - len, "\n");
+	len += scnprintf(buf + len, sizeof(buf) - len, "\n");
 
 	ath9k_calculate_iter_data(hw, NULL, &iter_data);
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
-			" ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
-			iter_data.naps, iter_data.nstations, iter_data.nmeshes,
-			iter_data.nwds, iter_data.nadhocs,
-			sc->nvifs, sc->nbcnvifs);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
+			 " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+			 iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+			 iter_data.nwds, iter_data.nadhocs,
+			 sc->nvifs, sc->nbcnvifs);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -805,27 +807,27 @@
 	char buf[512];
 	unsigned int len = 0;
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%17s: %2d\n", "Baseband Hang",
-			sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%17s: %2d\n", "Baseband Watchdog",
-			sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%17s: %2d\n", "Fatal HW Error",
-			sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%17s: %2d\n", "TX HW error",
-			sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%17s: %2d\n", "TX Path Hang",
-			sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%17s: %2d\n", "PLL RX Hang",
-			sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%17s: %2d\n", "MCI Reset",
-			sc->debug.stats.reset[RESET_TYPE_MCI]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%17s: %2d\n", "Baseband Hang",
+			 sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%17s: %2d\n", "Baseband Watchdog",
+			 sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%17s: %2d\n", "Fatal HW Error",
+			 sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%17s: %2d\n", "TX HW error",
+			 sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%17s: %2d\n", "TX Path Hang",
+			 sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%17s: %2d\n", "PLL RX Hang",
+			 sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%17s: %2d\n", "MCI Reset",
+			 sc->debug.stats.reset[RESET_TYPE_MCI]);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -902,14 +904,14 @@
 			      size_t count, loff_t *ppos)
 {
 #define PHY_ERR(s, p) \
-	len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
-			sc->debug.stats.rxstats.phy_err_stats[p]);
+	len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+			 sc->debug.stats.rxstats.phy_err_stats[p]);
 
 #define RXS_ERR(s, e)					    \
 	do {						    \
-		len += snprintf(buf + len, size - len,	    \
-				"%22s : %10u\n", s,	    \
-				sc->debug.stats.rxstats.e); \
+		len += scnprintf(buf + len, size - len,	    \
+				 "%22s : %10u\n", s,	    \
+				 sc->debug.stats.rxstats.e);\
 	} while (0)
 
 	struct ath_softc *sc = file->private_data;
@@ -1048,6 +1050,9 @@
 	char buf[32];
 	ssize_t len;
 
+	if (config_enabled(CONFIG_ATH9K_TX99))
+		return -EOPNOTSUPP;
+
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
 		return -EFAULT;
@@ -1439,22 +1444,22 @@
 	if (!buf)
 		return -ENOMEM;
 
-	len += snprintf(buf + len, size - len,
-			"Channel Noise Floor : %d\n", ah->noise);
-	len += snprintf(buf + len, size - len,
-			"Chain | privNF | # Readings | NF Readings\n");
+	len += scnprintf(buf + len, size - len,
+			 "Channel Noise Floor : %d\n", ah->noise);
+	len += scnprintf(buf + len, size - len,
+			 "Chain | privNF | # Readings | NF Readings\n");
 	for (i = 0; i < NUM_NF_READINGS; i++) {
 		if (!(chainmask & (1 << i)) ||
 		    ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
 			continue;
 
 		nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
-		len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
-				i, h[i].privNF, nread);
+		len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
+				 i, h[i].privNF, nread);
 		for (j = 0; j < nread; j++)
-			len += snprintf(buf + len, size - len,
-					" %d", h[i].nfCalBuffer[j]);
-		len += snprintf(buf + len, size - len, "\n");
+			len += scnprintf(buf + len, size - len,
+					 " %d", h[i].nfCalBuffer[j]);
+		len += scnprintf(buf + len, size - len, "\n");
 	}
 
 	if (len > size)
@@ -1543,8 +1548,8 @@
 		return -ENOMEM;
 
 	if (!sc->sc_ah->common.btcoex_enabled) {
-		len = snprintf(buf, size, "%s\n",
-			       "BTCOEX is disabled");
+		len = scnprintf(buf, size, "%s\n",
+				"BTCOEX is disabled");
 		goto exit;
 	}
 
@@ -1582,43 +1587,43 @@
 		return -ENOMEM;
 
 	if (!an->sta->ht_cap.ht_supported) {
-		len = snprintf(buf, size, "%s\n",
-			       "HT not supported");
+		len = scnprintf(buf, size, "%s\n",
+				"HT not supported");
 		goto exit;
 	}
 
-	len = snprintf(buf, size, "Max-AMPDU: %d\n",
-		       an->maxampdu);
-	len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
-			an->mpdudensity);
+	len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+			an->maxampdu);
+	len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+			 an->mpdudensity);
 
-	len += snprintf(buf + len, size - len,
-			"%2s%7s\n", "AC", "SCHED");
+	len += scnprintf(buf + len, size - len,
+			 "%2s%7s\n", "AC", "SCHED");
 
 	for (acno = 0, ac = &an->ac[acno];
 	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
 		txq = ac->txq;
 		ath_txq_lock(sc, txq);
-		len += snprintf(buf + len, size - len,
-				"%2d%7d\n",
-				acno, ac->sched);
+		len += scnprintf(buf + len, size - len,
+				 "%2d%7d\n",
+				 acno, ac->sched);
 		ath_txq_unlock(sc, txq);
 	}
 
-	len += snprintf(buf + len, size - len,
-			"\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
-			"TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
-			"BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+	len += scnprintf(buf + len, size - len,
+			 "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+			 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+			 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
 
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 		txq = tid->ac->txq;
 		ath_txq_lock(sc, txq);
-		len += snprintf(buf + len, size - len,
-				"%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
-				tid->tidno, tid->seq_start, tid->seq_next,
-				tid->baw_size, tid->baw_head, tid->baw_tail,
-				tid->bar_index, tid->sched, tid->paused);
+		len += scnprintf(buf + len, size - len,
+				 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+				 tid->tidno, tid->seq_start, tid->seq_next,
+				 tid->baw_size, tid->baw_head, tid->baw_tail,
+				 tid->bar_index, tid->sched, tid->paused);
 		ath_txq_unlock(sc, txq);
 	}
 exit:
@@ -1773,6 +1778,111 @@
 	}
 }
 
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+			      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char buf[3];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", sc->tx99_state);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+			       size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	char buf[32];
+	bool start;
+	ssize_t len;
+	int r;
+
+	if (sc->nvifs > 1)
+		return -EOPNOTSUPP;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	if (strtobool(buf, &start))
+		return -EINVAL;
+
+	if (start == sc->tx99_state) {
+		if (!start)
+			return count;
+		ath_dbg(common, XMIT, "Resetting TX99\n");
+		ath9k_tx99_deinit(sc);
+	}
+
+	if (!start) {
+		ath9k_tx99_deinit(sc);
+		return count;
+	}
+
+	r = ath9k_tx99_init(sc);
+	if (r)
+		return r;
+
+	return count;
+}
+
+static const struct file_operations fops_tx99 = {
+	.read = read_file_tx99,
+	.write = write_file_tx99,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+				    char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d (%d dBm)\n",
+		      sc->tx99_power,
+		      sc->tx99_power / 2);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	int r;
+	u8 tx_power;
+
+	r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+	if (r)
+		return r;
+
+	if (tx_power > MAX_RATE_POWER)
+		return -EINVAL;
+
+	sc->tx99_power = tx_power;
+
+	ath9k_ps_wakeup(sc);
+	ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+	ath9k_ps_restore(sc);
+
+	return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+	.read = read_file_tx99_power,
+	.write = write_file_tx99_power,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 int ath9k_init_debug(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -1864,5 +1974,15 @@
 	debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
 			    &fops_btcoex);
 #endif
+	if (config_enabled(CONFIG_ATH9K_TX99) &&
+	    AR_SREV_9300_20_OR_LATER(ah)) {
+		debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+				    sc->debug.debugfs_phy, sc,
+				    &fops_tx99);
+		debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+				    sc->debug.debugfs_phy, sc,
+				    &fops_tx99_power);
+	}
+
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 6e1556f..d6e3fa4 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -193,12 +193,12 @@
 #define TXSTATS sc->debug.stats.txstats
 #define PR(str, elem)							\
 	do {								\
-		len += snprintf(buf + len, size - len,			\
-				"%s%13u%11u%10u%10u\n", str,		\
-				TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,	\
-				TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,	\
-				TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,	\
-				TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+		len += scnprintf(buf + len, size - len,			\
+				 "%s%13u%11u%10u%10u\n", str,		\
+				 TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
+				 TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
+				 TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
+				 TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
 	} while(0)
 
 #define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
index 3c839f0..c6fa3d5 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.h
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -17,7 +17,7 @@
 
 #ifndef ATH9K_DFS_H
 #define ATH9K_DFS_H
-#include "dfs_pattern_detector.h"
+#include "../dfs_pattern_detector.h"
 
 #if defined(CONFIG_ATH9K_DFS_CERTIFIED)
 /**
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 3c6e413..90b8342 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -20,16 +20,16 @@
 
 #include "ath9k.h"
 #include "dfs_debug.h"
+#include "../dfs_pattern_detector.h"
 
-
-struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
+static struct ath_dfs_pool_stats dfs_pool_stats = { 0 };
 
 #define ATH9K_DFS_STAT(s, p) \
-	len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
-			sc->debug.stats.dfs_stats.p);
+	len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+			 sc->debug.stats.dfs_stats.p);
 #define ATH9K_DFS_POOL_STAT(s, p) \
-	len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
-			global_dfs_pool_stats.p);
+	len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+			 dfs_pool_stats.p);
 
 static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
 			     size_t count, loff_t *ppos)
@@ -44,12 +44,15 @@
 	if (buf == NULL)
 		return -ENOMEM;
 
-	len += snprintf(buf + len, size - len, "DFS support for "
-			"macVersion = 0x%x, macRev = 0x%x: %s\n",
-			hw_ver->macVersion, hw_ver->macRev,
-			(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+	if (sc->dfs_detector)
+		dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
+
+	len += scnprintf(buf + len, size - len, "DFS support for "
+			 "macVersion = 0x%x, macRev = 0x%x: %s\n",
+			 hw_ver->macVersion, hw_ver->macRev,
+			 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
 					"enabled" : "disabled");
-	len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
+	len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
 	ATH9K_DFS_STAT("pulse events reported   ", pulses_total);
 	ATH9K_DFS_STAT("invalid pulse events    ", pulses_no_dfs);
 	ATH9K_DFS_STAT("DFS pulses detected     ", pulses_detected);
@@ -59,11 +62,12 @@
 	ATH9K_DFS_STAT("Primary channel pulses  ", pri_phy_errors);
 	ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
 	ATH9K_DFS_STAT("Dual channel pulses     ", dc_phy_errors);
-	len += snprintf(buf + len, size - len, "Radar detector statistics "
-			"(current DFS region: %d)\n", sc->dfs_detector->region);
+	len += scnprintf(buf + len, size - len, "Radar detector statistics "
+			 "(current DFS region: %d)\n",
+			 sc->dfs_detector->region);
 	ATH9K_DFS_STAT("Pulse events processed  ", pulses_processed);
 	ATH9K_DFS_STAT("Radars detected         ", radar_detected);
-	len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
+	len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
 	ATH9K_DFS_POOL_STAT("Pool references         ", pool_reference);
 	ATH9K_DFS_POOL_STAT("Pulses allocated        ", pulse_allocated);
 	ATH9K_DFS_POOL_STAT("Pulses alloc error      ", pulse_alloc_error);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index e36810a..0a7ddf4 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -51,25 +51,11 @@
 	u32 radar_detected;
 };
 
-/**
- * struct ath_dfs_pool_stats - DFS Statistics for global pools
- */
-struct ath_dfs_pool_stats {
-	u32 pool_reference;
-	u32 pulse_allocated;
-	u32 pulse_alloc_error;
-	u32 pulse_used;
-	u32 pseq_allocated;
-	u32 pseq_alloc_error;
-	u32 pseq_used;
-};
 #if defined(CONFIG_ATH9K_DFS_DEBUGFS)
 
 #define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
 void ath9k_dfs_init_debug(struct ath_softc *sc);
 
-#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
-#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
 extern struct ath_dfs_pool_stats global_dfs_pool_stats;
 
 #else
@@ -77,8 +63,6 @@
 #define DFS_STAT_INC(sc, c) do { } while (0)
 static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
 
-#define DFS_POOL_STAT_INC(c) do { } while (0)
-#define DFS_POOL_STAT_DEC(c) do { } while (0)
 #endif /* CONFIG_ATH9K_DFS_DEBUGFS */
 
 #endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9ea8e4b..b409171 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -129,10 +129,10 @@
 	struct base_eep_header_4k *pBase = &eep->baseEepHeader;
 
 	if (!dump_base_hdr) {
-		len += snprintf(buf + len, size - len,
-				"%20s :\n", "2GHz modal Header");
+		len += scnprintf(buf + len, size - len,
+				 "%20s :\n", "2GHz modal Header");
 		len = ath9k_dump_4k_modal_eeprom(buf, len, size,
-						  &eep->modalHeader);
+						 &eep->modalHeader);
 		goto out;
 	}
 
@@ -160,8 +160,8 @@
 	PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
 	PR_EEP("TX Gain type", pBase->txGainType);
 
-	len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-			pBase->macAddr);
+	len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+			 pBase->macAddr);
 
 out:
 	if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 3ae1f3d..e1d0c21 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -125,8 +125,8 @@
 	struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
 
 	if (!dump_base_hdr) {
-		len += snprintf(buf + len, size - len,
-				"%20s :\n", "2GHz modal Header");
+		len += scnprintf(buf + len, size - len,
+				 "%20s :\n", "2GHz modal Header");
 		len = ar9287_dump_modal_eeprom(buf, len, size,
 						&eep->modalHeader);
 		goto out;
@@ -157,8 +157,8 @@
 	PR_EEP("Power Table Offset", pBase->pwrTableOffset);
 	PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
 
-	len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-			pBase->macAddr);
+	len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+			 pBase->macAddr);
 
 out:
 	if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 1c25368..39107e3 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -205,12 +205,12 @@
 	struct base_eep_header *pBase = &eep->baseEepHeader;
 
 	if (!dump_base_hdr) {
-		len += snprintf(buf + len, size - len,
-				"%20s :\n", "2GHz modal Header");
+		len += scnprintf(buf + len, size - len,
+				 "%20s :\n", "2GHz modal Header");
 		len = ath9k_def_dump_modal_eeprom(buf, len, size,
 						   &eep->modalHeader[0]);
-		len += snprintf(buf + len, size - len,
-				"%20s :\n", "5GHz modal Header");
+		len += scnprintf(buf + len, size - len,
+				 "%20s :\n", "5GHz modal Header");
 		len = ath9k_def_dump_modal_eeprom(buf, len, size,
 						   &eep->modalHeader[1]);
 		goto out;
@@ -240,8 +240,8 @@
 	PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
 	PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
 
-	len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-			pBase->macAddr);
+	len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+			 pBase->macAddr);
 
 out:
 	if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4b412aa..c34f212 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -522,22 +522,22 @@
 	ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
 	ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
 
-	len += snprintf(buf + len, size - len, "BT Weights: ");
+	len += scnprintf(buf + len, size - len, "BT Weights: ");
 	for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
-		len += snprintf(buf + len, size - len, "%08x ",
-				btcoex_hw->bt_weight[i]);
-	len += snprintf(buf + len, size - len, "\n");
-	len += snprintf(buf + len, size - len, "WLAN Weights: ");
+		len += scnprintf(buf + len, size - len, "%08x ",
+				 btcoex_hw->bt_weight[i]);
+	len += scnprintf(buf + len, size - len, "\n");
+	len += scnprintf(buf + len, size - len, "WLAN Weights: ");
 	for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
-		len += snprintf(buf + len, size - len, "%08x ",
-				btcoex_hw->wlan_weight[i]);
-	len += snprintf(buf + len, size - len, "\n");
-	len += snprintf(buf + len, size - len, "Tx Priorities: ");
+		len += scnprintf(buf + len, size - len, "%08x ",
+				 btcoex_hw->wlan_weight[i]);
+	len += scnprintf(buf + len, size - len, "\n");
+	len += scnprintf(buf + len, size - len, "Tx Priorities: ");
 	for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
-		len += snprintf(buf + len, size - len, "%08x ",
+		len += scnprintf(buf + len, size - len, "%08x ",
 				btcoex_hw->tx_prio[i]);
 
-	len += snprintf(buf + len, size - len, "\n");
+	len += scnprintf(buf + len, size - len, "\n");
 
 	return len;
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index c1b45e2..fb071ee 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -37,29 +37,29 @@
 
 	ath9k_htc_ps_restore(priv);
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "RX",
-			be32_to_cpu(cmd_rsp.rx));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "RX",
+			 be32_to_cpu(cmd_rsp.rx));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "RXORN",
-			be32_to_cpu(cmd_rsp.rxorn));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "RXORN",
+			 be32_to_cpu(cmd_rsp.rxorn));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "RXEOL",
-			be32_to_cpu(cmd_rsp.rxeol));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "RXEOL",
+			 be32_to_cpu(cmd_rsp.rxeol));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "TXURN",
-			be32_to_cpu(cmd_rsp.txurn));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "TXURN",
+			 be32_to_cpu(cmd_rsp.txurn));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "TXTO",
-			be32_to_cpu(cmd_rsp.txto));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "TXTO",
+			 be32_to_cpu(cmd_rsp.txto));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "CST",
-			be32_to_cpu(cmd_rsp.cst));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "CST",
+			 be32_to_cpu(cmd_rsp.cst));
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -95,41 +95,41 @@
 
 	ath9k_htc_ps_restore(priv);
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "Xretries",
-			be32_to_cpu(cmd_rsp.xretries));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "Xretries",
+			 be32_to_cpu(cmd_rsp.xretries));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "FifoErr",
-			be32_to_cpu(cmd_rsp.fifoerr));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "FifoErr",
+			 be32_to_cpu(cmd_rsp.fifoerr));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "Filtered",
-			be32_to_cpu(cmd_rsp.filtered));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "Filtered",
+			 be32_to_cpu(cmd_rsp.filtered));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "TimerExp",
-			be32_to_cpu(cmd_rsp.timer_exp));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "TimerExp",
+			 be32_to_cpu(cmd_rsp.timer_exp));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "ShortRetries",
-			be32_to_cpu(cmd_rsp.shortretries));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "ShortRetries",
+			 be32_to_cpu(cmd_rsp.shortretries));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "LongRetries",
-			be32_to_cpu(cmd_rsp.longretries));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "LongRetries",
+			 be32_to_cpu(cmd_rsp.longretries));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "QueueNull",
-			be32_to_cpu(cmd_rsp.qnull));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "QueueNull",
+			 be32_to_cpu(cmd_rsp.qnull));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "EncapFail",
-			be32_to_cpu(cmd_rsp.encap_fail));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "EncapFail",
+			 be32_to_cpu(cmd_rsp.encap_fail));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "NoBuf",
-			be32_to_cpu(cmd_rsp.nobuf));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "NoBuf",
+			 be32_to_cpu(cmd_rsp.nobuf));
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -165,17 +165,17 @@
 
 	ath9k_htc_ps_restore(priv);
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "NoBuf",
-			be32_to_cpu(cmd_rsp.nobuf));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "NoBuf",
+			 be32_to_cpu(cmd_rsp.nobuf));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "HostSend",
-			be32_to_cpu(cmd_rsp.host_send));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "HostSend",
+			 be32_to_cpu(cmd_rsp.host_send));
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "HostDone",
-			be32_to_cpu(cmd_rsp.host_done));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "HostDone",
+			 be32_to_cpu(cmd_rsp.host_done));
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -197,37 +197,37 @@
 	char buf[512];
 	unsigned int len = 0;
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "Buffers queued",
-			priv->debug.tx_stats.buf_queued);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "Buffers completed",
-			priv->debug.tx_stats.buf_completed);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "SKBs queued",
-			priv->debug.tx_stats.skb_queued);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "SKBs success",
-			priv->debug.tx_stats.skb_success);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "SKBs failed",
-			priv->debug.tx_stats.skb_failed);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "CAB queued",
-			priv->debug.tx_stats.cab_queued);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "Buffers queued",
+			 priv->debug.tx_stats.buf_queued);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "Buffers completed",
+			 priv->debug.tx_stats.buf_completed);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "SKBs queued",
+			 priv->debug.tx_stats.skb_queued);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "SKBs success",
+			 priv->debug.tx_stats.skb_success);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "SKBs failed",
+			 priv->debug.tx_stats.skb_failed);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "CAB queued",
+			 priv->debug.tx_stats.cab_queued);
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "BE queued",
-			priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "BK queued",
-			priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "VI queued",
-			priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"%20s : %10u\n", "VO queued",
-			priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "BE queued",
+			 priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "BK queued",
+			 priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "VI queued",
+			 priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "%20s : %10u\n", "VO queued",
+			 priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -273,8 +273,8 @@
 			      size_t count, loff_t *ppos)
 {
 #define PHY_ERR(s, p)							\
-	len += snprintf(buf + len, size - len, "%20s : %10u\n", s,	\
-			priv->debug.rx_stats.err_phy_stats[p]);
+	len += scnprintf(buf + len, size - len, "%20s : %10u\n", s,	\
+			 priv->debug.rx_stats.err_phy_stats[p]);
 
 	struct ath9k_htc_priv *priv = file->private_data;
 	char *buf;
@@ -285,37 +285,37 @@
 	if (buf == NULL)
 		return -ENOMEM;
 
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "SKBs allocated",
-			priv->debug.rx_stats.skb_allocated);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "SKBs completed",
-			priv->debug.rx_stats.skb_completed);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "SKBs Dropped",
-			priv->debug.rx_stats.skb_dropped);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "SKBs allocated",
+			 priv->debug.rx_stats.skb_allocated);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "SKBs completed",
+			 priv->debug.rx_stats.skb_completed);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "SKBs Dropped",
+			 priv->debug.rx_stats.skb_dropped);
 
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "CRC ERR",
-			priv->debug.rx_stats.err_crc);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "DECRYPT CRC ERR",
-			priv->debug.rx_stats.err_decrypt_crc);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "MIC ERR",
-			priv->debug.rx_stats.err_mic);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "PRE-DELIM CRC ERR",
-			priv->debug.rx_stats.err_pre_delim);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "POST-DELIM CRC ERR",
-			priv->debug.rx_stats.err_post_delim);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "DECRYPT BUSY ERR",
-			priv->debug.rx_stats.err_decrypt_busy);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10u\n", "TOTAL PHY ERR",
-			priv->debug.rx_stats.err_phy);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "CRC ERR",
+			 priv->debug.rx_stats.err_crc);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "DECRYPT CRC ERR",
+			 priv->debug.rx_stats.err_decrypt_crc);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "MIC ERR",
+			 priv->debug.rx_stats.err_mic);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "PRE-DELIM CRC ERR",
+			 priv->debug.rx_stats.err_pre_delim);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "POST-DELIM CRC ERR",
+			 priv->debug.rx_stats.err_post_delim);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "DECRYPT BUSY ERR",
+			 priv->debug.rx_stats.err_decrypt_busy);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10u\n", "TOTAL PHY ERR",
+			 priv->debug.rx_stats.err_phy);
 
 
 	PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
@@ -372,16 +372,16 @@
 
 	spin_lock_bh(&priv->tx.tx_lock);
 
-	len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
+	len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
 
 	len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
 			       priv->tx.tx_slot, MAX_TX_BUF_NUM);
 
-	len += snprintf(buf + len, sizeof(buf) - len, "\n");
+	len += scnprintf(buf + len, sizeof(buf) - len, "\n");
 
-	len += snprintf(buf + len, sizeof(buf) - len,
-			"Used slots     : %d\n",
-			bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
+	len += scnprintf(buf + len, sizeof(buf) - len,
+			 "Used slots     : %d\n",
+			 bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
 
 	spin_unlock_bh(&priv->tx.tx_lock);
 
@@ -405,30 +405,30 @@
 	char buf[512];
 	unsigned int len = 0;
 
-	len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-			"Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
+	len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+			 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
 
-	len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-			"Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
+	len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+			 "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
 
-	len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-			"Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
+	len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+			 "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
 
-	len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-			"Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
+	len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+			 "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
 
-	len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-			"Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
+	len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+			 "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
 
-	len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-			"Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
+	len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+			 "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
 
-	len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-			"Failed queue", skb_queue_len(&priv->tx.tx_failed));
+	len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+			 "Failed queue", skb_queue_len(&priv->tx.tx_failed));
 
 	spin_lock_bh(&priv->tx.tx_lock);
-	len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-			"Queued count", priv->tx.queued_cnt);
+	len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+			 "Queued count", priv->tx.queued_cnt);
 	spin_unlock_bh(&priv->tx.tx_lock);
 
 	if (len > sizeof(buf))
@@ -507,70 +507,70 @@
 	if (buf == NULL)
 		return -ENOMEM;
 
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n", "Major Version",
-			pBase->version >> 12);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n", "Minor Version",
-			pBase->version & 0xFFF);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n", "Checksum",
-			pBase->checksum);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n", "Length",
-			pBase->length);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n", "RegDomain1",
-			pBase->regDmn[0]);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n", "RegDomain2",
-			pBase->regDmn[1]);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"TX Mask", pBase->txMask);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"RX Mask", pBase->rxMask);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Allow 5GHz",
-			!!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Allow 2GHz",
-			!!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Disable 2GHz HT20",
-			!!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Disable 2GHz HT40",
-			!!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Disable 5Ghz HT20",
-			!!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Disable 5Ghz HT40",
-			!!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Big Endian",
-			!!(pBase->eepMisc & 0x01));
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Cal Bin Major Ver",
-			(pBase->binBuildNumber >> 24) & 0xFF);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Cal Bin Minor Ver",
-			(pBase->binBuildNumber >> 16) & 0xFF);
-	len += snprintf(buf + len, size - len,
-			"%20s : %10d\n",
-			"Cal Bin Build",
-			(pBase->binBuildNumber >> 8) & 0xFF);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n", "Major Version",
+			 pBase->version >> 12);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n", "Minor Version",
+			 pBase->version & 0xFFF);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n", "Checksum",
+			 pBase->checksum);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n", "Length",
+			 pBase->length);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n", "RegDomain1",
+			 pBase->regDmn[0]);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n", "RegDomain2",
+			 pBase->regDmn[1]);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "TX Mask", pBase->txMask);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "RX Mask", pBase->rxMask);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Allow 5GHz",
+			 !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Allow 2GHz",
+			 !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Disable 2GHz HT20",
+			 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Disable 2GHz HT40",
+			 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Disable 5Ghz HT20",
+			 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Disable 5Ghz HT40",
+			 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Big Endian",
+			 !!(pBase->eepMisc & 0x01));
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Cal Bin Major Ver",
+			 (pBase->binBuildNumber >> 24) & 0xFF);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Cal Bin Minor Ver",
+			 (pBase->binBuildNumber >> 16) & 0xFF);
+	len += scnprintf(buf + len, size - len,
+			 "%20s : %10d\n",
+			 "Cal Bin Build",
+			 (pBase->binBuildNumber >> 8) & 0xFF);
 
 	/*
 	 * UB91 specific data.
@@ -579,10 +579,10 @@
 		struct base_eep_header_4k *pBase4k =
 			&priv->ah->eeprom.map4k.baseEepHeader;
 
-		len += snprintf(buf + len, size - len,
-				"%20s : %10d\n",
-				"TX Gain type",
-				pBase4k->txGainType);
+		len += scnprintf(buf + len, size - len,
+				 "%20s : %10d\n",
+				 "TX Gain type",
+				 pBase4k->txGainType);
 	}
 
 	/*
@@ -592,19 +592,19 @@
 		struct base_eep_ar9287_header *pBase9287 =
 			&priv->ah->eeprom.map9287.baseEepHeader;
 
-		len += snprintf(buf + len, size - len,
-				"%20s : %10ddB\n",
-				"Power Table Offset",
-				pBase9287->pwrTableOffset);
+		len += scnprintf(buf + len, size - len,
+				 "%20s : %10ddB\n",
+				 "Power Table Offset",
+				 pBase9287->pwrTableOffset);
 
-		len += snprintf(buf + len, size - len,
-				"%20s : %10d\n",
-				"OpenLoop Power Ctrl",
-				pBase9287->openLoopPwrCntl);
+		len += scnprintf(buf + len, size - len,
+				 "%20s : %10d\n",
+				 "OpenLoop Power Ctrl",
+				 pBase9287->openLoopPwrCntl);
 	}
 
-	len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-			pBase->macAddr);
+	len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+			 pBase->macAddr);
 	if (len > size)
 		len = size;
 
@@ -627,8 +627,8 @@
 {
 #define PR_EEP(_s, _val)						\
 	do {								\
-		len += snprintf(buf + len, size - len, "%20s : %10d\n",	\
-				_s, (_val));				\
+		len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+				 _s, (_val));				\
 	} while (0)
 
 	struct ath9k_htc_priv *priv = file->private_data;
@@ -708,12 +708,12 @@
 	do {								\
 		if (pBase->opCapFlags & AR5416_OPFLAGS_11G) {		\
 			pModal = &priv->ah->eeprom.def.modalHeader[1];	\
-			len += snprintf(buf + len, size - len, "%20s : %8d%7s", \
-					_s, (_val), "|");		\
+			len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
+					 _s, (_val), "|");		\
 		}							\
 		if (pBase->opCapFlags & AR5416_OPFLAGS_11A) {		\
 			pModal = &priv->ah->eeprom.def.modalHeader[0];	\
-			len += snprintf(buf + len, size - len, "%9d\n", \
+			len += scnprintf(buf + len, size - len, "%9d\n",\
 					(_val));			\
 		}							\
 	} while (0)
@@ -729,10 +729,10 @@
 	if (buf == NULL)
 		return -ENOMEM;
 
-	len += snprintf(buf + len, size - len,
-			"%31s %15s\n", "2G", "5G");
-	len += snprintf(buf + len, size - len,
-			"%32s %16s\n", "====", "====\n");
+	len += scnprintf(buf + len, size - len,
+			 "%31s %15s\n", "2G", "5G");
+	len += scnprintf(buf + len, size - len,
+			 "%32s %16s\n", "====", "====\n");
 
 	PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
 	PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
@@ -814,8 +814,8 @@
 {
 #define PR_EEP(_s, _val)						\
 	do {								\
-		len += snprintf(buf + len, size - len, "%20s : %10d\n",	\
-				_s, (_val));				\
+		len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+				 _s, (_val));				\
 	} while (0)
 
 	struct ath9k_htc_priv *priv = file->private_data;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index d442581..9a2657f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,30 +24,10 @@
 static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
 					      struct ath9k_channel *ichan)
 {
-	enum htc_phymode mode;
+	if (IS_CHAN_5GHZ(ichan))
+		return HTC_MODE_11NA;
 
-	mode = -EINVAL;
-
-	switch (ichan->chanmode) {
-	case CHANNEL_G:
-	case CHANNEL_G_HT20:
-	case CHANNEL_G_HT40PLUS:
-	case CHANNEL_G_HT40MINUS:
-		mode = HTC_MODE_11NG;
-		break;
-	case CHANNEL_A:
-	case CHANNEL_A_HT20:
-	case CHANNEL_A_HT40PLUS:
-	case CHANNEL_A_HT40MINUS:
-		mode = HTC_MODE_11NA;
-		break;
-	default:
-		break;
-	}
-
-	WARN_ON(mode < 0);
-
-	return mode;
+	return HTC_MODE_11NG;
 }
 
 bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
@@ -926,7 +906,7 @@
 	WMI_CMD(WMI_FLUSH_RECV_CMDID);
 
 	/* setup initial channel */
-	init_channel = ath9k_cmn_get_curchannel(hw, ah);
+	init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
 
 	ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
 	if (ret) {
@@ -1208,9 +1188,7 @@
 		ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
 			curchan->center_freq);
 
-		ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
-					  &hw->conf.chandef);
-
+		ath9k_cmn_get_channel(hw, priv->ah, &hw->conf.chandef);
 		if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
 			ath_err(common, "Unable to set channel\n");
 			ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 83f4927..4f9378d 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,6 +78,22 @@
 	ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
 }
 
+static inline void ath9k_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+	ath9k_hw_ops(ah)->tx99_start(ah, qnum);
+}
+
+static inline void ath9k_hw_tx99_stop(struct ath_hw *ah)
+{
+	ath9k_hw_ops(ah)->tx99_stop(ah);
+}
+
+static inline void ath9k_hw_tx99_set_txpower(struct ath_hw *ah, u8 power)
+{
+	if (ath9k_hw_ops(ah)->tx99_set_txpower)
+		ath9k_hw_ops(ah)->tx99_set_txpower(ah, power);
+}
+
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 
 static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ecc6ec4..54b0415 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -130,29 +130,29 @@
 
 static void ath9k_hw_set_clockrate(struct ath_hw *ah)
 {
-	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_channel *chan = ah->curchan;
 	unsigned int clockrate;
 
 	/* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
 	if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
 		clockrate = 117;
-	else if (!ah->curchan) /* should really check for CCK instead */
+	else if (!chan) /* should really check for CCK instead */
 		clockrate = ATH9K_CLOCK_RATE_CCK;
-	else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ)
+	else if (IS_CHAN_2GHZ(chan))
 		clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
 	else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
 		clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
 	else
 		clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
 
-	if (conf_is_ht40(conf))
+	if (IS_CHAN_HT40(chan))
 		clockrate *= 2;
 
 	if (ah->curchan) {
-		if (IS_CHAN_HALF_RATE(ah->curchan))
+		if (IS_CHAN_HALF_RATE(chan))
 			clockrate /= 2;
-		if (IS_CHAN_QUARTER_RATE(ah->curchan))
+		if (IS_CHAN_QUARTER_RATE(chan))
 			clockrate /= 4;
 	}
 
@@ -190,10 +190,7 @@
 void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
 			  int hw_delay)
 {
-	if (IS_CHAN_B(chan))
-		hw_delay = (4 * hw_delay) / 22;
-	else
-		hw_delay /= 10;
+	hw_delay /= 10;
 
 	if (IS_CHAN_HALF_RATE(chan))
 		hw_delay *= 2;
@@ -294,8 +291,7 @@
 		return;
 	}
 
-	if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
-	    (chan->chanmode == CHANNEL_G_HT40PLUS)) {
+	if (IS_CHAN_HT40PLUS(chan)) {
 		centers->synth_center =
 			chan->channel + HT40_CHANNEL_CENTER_SHIFT;
 		extoff = 1;
@@ -549,6 +545,18 @@
 
 	ath9k_hw_ani_init(ah);
 
+	/*
+	 * EEPROM needs to be initialized before we do this.
+	 * This is required for regulatory compliance.
+	 */
+	if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+		u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
+		if ((regdmn & 0xF0) == CTL_FCC) {
+			ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
+			ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+		}
+	}
+
 	return 0;
 }
 
@@ -1030,7 +1038,6 @@
 void ath9k_hw_init_global_settings(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
-	struct ieee80211_conf *conf = &common->hw->conf;
 	const struct ath9k_channel *chan = ah->curchan;
 	int acktimeout, ctstimeout, ack_offset = 0;
 	int slottime;
@@ -1105,8 +1112,7 @@
 	 * BA frames in some implementations, but it has been found to fix ACK
 	 * timeout issues in other cases as well.
 	 */
-	if (conf->chandef.chan &&
-	    conf->chandef.chan->band == IEEE80211_BAND_2GHZ &&
+	if (IS_CHAN_2GHZ(chan) &&
 	    !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
 		acktimeout += 64 - sifstime - ah->slottime;
 		ctstimeout += 48 - sifstime - ah->slottime;
@@ -1148,9 +1154,7 @@
 {
 	u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
 
-	if (IS_CHAN_B(chan))
-		ctl |= CTL_11B;
-	else if (IS_CHAN_G(chan))
+	if (IS_CHAN_2GHZ(chan))
 		ctl |= CTL_11G;
 	else
 		ctl |= CTL_11A;
@@ -1498,10 +1502,8 @@
 	int r;
 
 	if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
-		u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
-		u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
-		band_switch = (cur != new);
-		mode_diff = (chan->chanmode != ah->curchan->chanmode);
+		band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
+		mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
 	}
 
 	for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1540,9 +1542,7 @@
 	ath9k_hw_set_clockrate(ah);
 	ath9k_hw_apply_txpower(ah, chan, false);
 
-	if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
-		ath9k_hw_set_delta_slope(ah, chan);
-
+	ath9k_hw_set_delta_slope(ah, chan);
 	ath9k_hw_spur_mitigate_freq(ah, chan);
 
 	if (band_switch || ini_reloaded)
@@ -1644,6 +1644,19 @@
 	return true;
 }
 
+void ath9k_hw_check_nav(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 val;
+
+	val = REG_READ(ah, AR_NAV);
+	if (val != 0xdeadbeef && val > 0x7fff) {
+		ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
+		REG_WRITE(ah, AR_NAV, 0);
+	}
+}
+EXPORT_SYMBOL(ath9k_hw_check_nav);
+
 bool ath9k_hw_check_alive(struct ath_hw *ah)
 {
 	int count = 50;
@@ -1799,20 +1812,11 @@
 		goto fail;
 
 	/*
-	 * If cross-band fcc is not supoprted, bail out if
-	 * either channelFlags or chanmode differ.
-	 *
-	 * chanmode will be different if the HT operating mode
-	 * changes because of CSA.
+	 * If cross-band fcc is not supoprted, bail out if channelFlags differ.
 	 */
-	if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
-		if ((chan->channelFlags & CHANNEL_ALL) !=
-		    (ah->curchan->channelFlags & CHANNEL_ALL))
-			goto fail;
-
-		if (chan->chanmode != ah->curchan->chanmode)
-			goto fail;
-	}
+	if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
+	    chan->channelFlags != ah->curchan->channelFlags)
+		goto fail;
 
 	if (!ath9k_hw_check_alive(ah))
 		goto fail;
@@ -1822,9 +1826,9 @@
 	 * re-using are present.
 	 */
 	if (AR_SREV_9462(ah) && (ah->caldata &&
-				 (!ah->caldata->done_txiqcal_once ||
-				  !ah->caldata->done_txclcal_once ||
-				  !ah->caldata->rtt_done)))
+				 (!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
+				  !test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
+				  !test_bit(RTT_DONE, &ah->caldata->cal_flags))))
 		goto fail;
 
 	ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1874,15 +1878,14 @@
 
 	ah->caldata = caldata;
 	if (caldata && (chan->channel != caldata->channel ||
-			chan->channelFlags != caldata->channelFlags ||
-			chan->chanmode != caldata->chanmode)) {
+			chan->channelFlags != caldata->channelFlags)) {
 		/* Operating channel changed, reset channel calibration data */
 		memset(caldata, 0, sizeof(*caldata));
 		ath9k_init_nfcal_hist_buffer(ah, chan);
 	} else if (caldata) {
-		caldata->paprd_packet_sent = false;
+		clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
 	}
-	ah->noise = ath9k_hw_getchan_noise(ah, chan);
+	ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
 
 	if (fastcc) {
 		r = ath9k_hw_do_fastcc(ah, chan);
@@ -1964,9 +1967,7 @@
 
 	ath9k_hw_init_mfp(ah);
 
-	if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
-		ath9k_hw_set_delta_slope(ah, chan);
-
+	ath9k_hw_set_delta_slope(ah, chan);
 	ath9k_hw_spur_mitigate_freq(ah, chan);
 	ah->eep_ops->set_board_values(ah, chan);
 
@@ -2017,8 +2018,8 @@
 	ath9k_hw_init_bb(ah, chan);
 
 	if (caldata) {
-		caldata->done_txiqcal_once = false;
-		caldata->done_txclcal_once = false;
+		clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+		clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
 	}
 	if (!ath9k_hw_init_cal(ah, chan))
 		return -EIO;
@@ -2943,12 +2944,11 @@
 }
 EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
 
-void ath9k_hw_set11nmac2040(struct ath_hw *ah)
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan)
 {
-	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 	u32 macmode;
 
-	if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
+	if (IS_CHAN_HT40(chan) && !ah->config.cwm_ignore_extcca)
 		macmode = AR_2040_JOINED_RX_CLEAR;
 	else
 		macmode = 0;
@@ -3240,19 +3240,19 @@
 
 	/* chipsets >= AR9280 are single-chip */
 	if (AR_SREV_9280_20_OR_LATER(ah)) {
-		used = snprintf(hw_name, len,
-			       "Atheros AR%s Rev:%x",
-			       ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
-			       ah->hw_version.macRev);
+		used = scnprintf(hw_name, len,
+				 "Atheros AR%s Rev:%x",
+				 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+				 ah->hw_version.macRev);
 	}
 	else {
-		used = snprintf(hw_name, len,
-			       "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
-			       ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
-			       ah->hw_version.macRev,
-			       ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
-						AR_RADIO_SREV_MAJOR)),
-			       ah->hw_version.phyRev);
+		used = scnprintf(hw_name, len,
+				 "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
+				 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+				 ah->hw_version.macRev,
+				 ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
+						  & AR_RADIO_SREV_MAJOR)),
+				 ah->hw_version.phyRev);
 	}
 
 	hw_name[used] = '\0';
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 69a907b..9ea24f1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -98,8 +98,8 @@
 
 #define PR_EEP(_s, _val)						\
 	do {								\
-		len += snprintf(buf + len, size - len, "%20s : %10d\n",	\
-				_s, (_val));				\
+		len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+				 _s, (_val));				\
 	} while (0)
 
 #define SM(_v, _f)  (((_v) << _f##_S) & _f)
@@ -369,55 +369,30 @@
 	ATH9K_INT_NOCARD = 0xffffffff
 };
 
-#define CHANNEL_CCK       0x00020
-#define CHANNEL_OFDM      0x00040
-#define CHANNEL_2GHZ      0x00080
-#define CHANNEL_5GHZ      0x00100
-#define CHANNEL_PASSIVE   0x00200
-#define CHANNEL_DYN       0x00400
-#define CHANNEL_HALF      0x04000
-#define CHANNEL_QUARTER   0x08000
-#define CHANNEL_HT20      0x10000
-#define CHANNEL_HT40PLUS  0x20000
-#define CHANNEL_HT40MINUS 0x40000
-
-#define CHANNEL_A           (CHANNEL_5GHZ|CHANNEL_OFDM)
-#define CHANNEL_B           (CHANNEL_2GHZ|CHANNEL_CCK)
-#define CHANNEL_G           (CHANNEL_2GHZ|CHANNEL_OFDM)
-#define CHANNEL_G_HT20      (CHANNEL_2GHZ|CHANNEL_HT20)
-#define CHANNEL_A_HT20      (CHANNEL_5GHZ|CHANNEL_HT20)
-#define CHANNEL_G_HT40PLUS  (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_A_HT40PLUS  (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_ALL				\
-	(CHANNEL_OFDM|				\
-	 CHANNEL_CCK|				\
-	 CHANNEL_2GHZ |				\
-	 CHANNEL_5GHZ |				\
-	 CHANNEL_HT20 |				\
-	 CHANNEL_HT40PLUS |			\
-	 CHANNEL_HT40MINUS)
-
 #define MAX_RTT_TABLE_ENTRY     6
 #define MAX_IQCAL_MEASUREMENT	8
 #define MAX_CL_TAB_ENTRY	16
 #define CL_TAB_ENTRY(reg_base)	(reg_base + (4 * j))
 
+enum ath9k_cal_flags {
+	RTT_DONE,
+	PAPRD_PACKET_SENT,
+	PAPRD_DONE,
+	NFCAL_PENDING,
+	NFCAL_INTF,
+	TXIQCAL_DONE,
+	TXCLCAL_DONE,
+	SW_PKDET_DONE,
+};
+
 struct ath9k_hw_cal_data {
 	u16 channel;
-	u32 channelFlags;
-	u32 chanmode;
+	u16 channelFlags;
+	unsigned long cal_flags;
 	int32_t CalValid;
 	int8_t iCoff;
 	int8_t qCoff;
-	bool rtt_done;
-	bool paprd_packet_sent;
-	bool paprd_done;
-	bool nfcal_pending;
-	bool nfcal_interference;
-	bool done_txiqcal_once;
-	bool done_txclcal_once;
+	u8 caldac[2];
 	u16 small_signal_gain[AR9300_MAX_CHAINS];
 	u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
 	u32 num_measures[AR9300_MAX_CHAINS];
@@ -430,33 +405,34 @@
 struct ath9k_channel {
 	struct ieee80211_channel *chan;
 	u16 channel;
-	u32 channelFlags;
-	u32 chanmode;
+	u16 channelFlags;
 	s16 noisefloor;
 };
 
-#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
-       (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
-       (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
-       (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
-#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
-#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
-#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
-#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
-#define IS_CHAN_A_FAST_CLOCK(_ah, _c)			\
-	((((_c)->channelFlags & CHANNEL_5GHZ) != 0) &&	\
-	 ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
+#define CHANNEL_5GHZ		BIT(0)
+#define CHANNEL_HALF		BIT(1)
+#define CHANNEL_QUARTER		BIT(2)
+#define CHANNEL_HT		BIT(3)
+#define CHANNEL_HT40PLUS	BIT(4)
+#define CHANNEL_HT40MINUS	BIT(5)
 
-/* These macros check chanmode and not channelFlags */
-#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
-#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) ||	\
-			  ((_c)->chanmode == CHANNEL_G_HT20))
-#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) ||	\
-			  ((_c)->chanmode == CHANNEL_A_HT40MINUS) ||	\
-			  ((_c)->chanmode == CHANNEL_G_HT40PLUS) ||	\
-			  ((_c)->chanmode == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
+#define IS_CHAN_5GHZ(_c) (!!((_c)->channelFlags & CHANNEL_5GHZ))
+#define IS_CHAN_2GHZ(_c) (!IS_CHAN_5GHZ(_c))
+
+#define IS_CHAN_HALF_RATE(_c) (!!((_c)->channelFlags & CHANNEL_HALF))
+#define IS_CHAN_QUARTER_RATE(_c) (!!((_c)->channelFlags & CHANNEL_QUARTER))
+#define IS_CHAN_A_FAST_CLOCK(_ah, _c)			\
+	(IS_CHAN_5GHZ(_c) && ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
+
+#define IS_CHAN_HT(_c) ((_c)->channelFlags & CHANNEL_HT)
+
+#define IS_CHAN_HT20(_c) (IS_CHAN_HT(_c) && !IS_CHAN_HT40(_c))
+
+#define IS_CHAN_HT40(_c) \
+	(!!((_c)->channelFlags & (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)))
+
+#define IS_CHAN_HT40PLUS(_c) ((_c)->channelFlags & CHANNEL_HT40PLUS)
+#define IS_CHAN_HT40MINUS(_c) ((_c)->channelFlags & CHANNEL_HT40MINUS)
 
 enum ath9k_power_mode {
 	ATH9K_PM_AWAKE = 0,
@@ -558,6 +534,7 @@
 	u8 main_gaintb;
 	u8 alt_gaintb;
 	int lna1_lna2_delta;
+	int lna1_lna2_switch_delta;
 	u8 div_group;
 };
 
@@ -726,6 +703,10 @@
 	void (*spectral_scan_trigger)(struct ath_hw *ah);
 	void (*spectral_scan_wait)(struct ath_hw *ah);
 
+	void (*tx99_start)(struct ath_hw *ah, u32 qnum);
+	void (*tx99_stop)(struct ath_hw *ah);
+	void (*tx99_set_txpower)(struct ath_hw *ah, u8 power);
+
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 	void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
 #endif
@@ -1026,10 +1007,11 @@
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
 void ath9k_hw_init_global_settings(struct ath_hw *ah);
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah);
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan);
 void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
 void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
 				    const struct ath9k_beacon_state *bs);
+void ath9k_hw_check_nav(struct ath_hw *ah);
 bool ath9k_hw_check_alive(struct ath_hw *ah);
 
 bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 9a1f349..d8643eb 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -347,7 +347,6 @@
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	u8 *ds;
-	struct ath_buf *bf;
 	int i, bsize, desc_len;
 
 	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
@@ -399,33 +398,68 @@
 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
 
 	/* allocate buffers */
-	bsize = sizeof(struct ath_buf) * nbuf;
-	bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
-	if (!bf)
-		return -ENOMEM;
+	if (is_tx) {
+		struct ath_buf *bf;
 
-	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
-		bf->bf_desc = ds;
-		bf->bf_daddr = DS2PHYS(dd, ds);
+		bsize = sizeof(struct ath_buf) * nbuf;
+		bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+		if (!bf)
+			return -ENOMEM;
 
-		if (!(sc->sc_ah->caps.hw_caps &
-		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
-			/*
-			 * Skip descriptor addresses which can cause 4KB
-			 * boundary crossing (addr + length) with a 32 dword
-			 * descriptor fetch.
-			 */
-			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
-				BUG_ON((caddr_t) bf->bf_desc >=
-				       ((caddr_t) dd->dd_desc +
-					dd->dd_desc_len));
+		for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+			bf->bf_desc = ds;
+			bf->bf_daddr = DS2PHYS(dd, ds);
 
-				ds += (desc_len * ndesc);
-				bf->bf_desc = ds;
-				bf->bf_daddr = DS2PHYS(dd, ds);
+			if (!(sc->sc_ah->caps.hw_caps &
+				  ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+				/*
+				 * Skip descriptor addresses which can cause 4KB
+				 * boundary crossing (addr + length) with a 32 dword
+				 * descriptor fetch.
+				 */
+				while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+					BUG_ON((caddr_t) bf->bf_desc >=
+						   ((caddr_t) dd->dd_desc +
+						dd->dd_desc_len));
+
+					ds += (desc_len * ndesc);
+					bf->bf_desc = ds;
+					bf->bf_daddr = DS2PHYS(dd, ds);
+				}
 			}
+			list_add_tail(&bf->list, head);
 		}
-		list_add_tail(&bf->list, head);
+	} else {
+		struct ath_rxbuf *bf;
+
+		bsize = sizeof(struct ath_rxbuf) * nbuf;
+		bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+		if (!bf)
+			return -ENOMEM;
+
+		for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+			bf->bf_desc = ds;
+			bf->bf_daddr = DS2PHYS(dd, ds);
+
+			if (!(sc->sc_ah->caps.hw_caps &
+				  ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+				/*
+				 * Skip descriptor addresses which can cause 4KB
+				 * boundary crossing (addr + length) with a 32 dword
+				 * descriptor fetch.
+				 */
+				while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+					BUG_ON((caddr_t) bf->bf_desc >=
+						   ((caddr_t) dd->dd_desc +
+						dd->dd_desc_len));
+
+					ds += (desc_len * ndesc);
+					bf->bf_desc = ds;
+					bf->bf_daddr = DS2PHYS(dd, ds);
+				}
+			}
+			list_add_tail(&bf->list, head);
+		}
 	}
 	return 0;
 }
@@ -437,7 +471,6 @@
 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
 
-	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
 	ath_cabq_update(sc);
 
 	sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -547,6 +580,26 @@
 	if (sc->driver_data & ATH9K_PCI_CUS217)
 		ath_info(common, "CUS217 card detected\n");
 
+	if (sc->driver_data & ATH9K_PCI_CUS252)
+		ath_info(common, "CUS252 card detected\n");
+
+	if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
+		ath_info(common, "WB335 1-ANT card detected\n");
+
+	if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
+		ath_info(common, "WB335 2-ANT card detected\n");
+
+	/*
+	 * Some WB335 cards do not support antenna diversity. Since
+	 * we use a hardcoded value for AR9565 instead of using the
+	 * EEPROM/OTP data, remove the combining feature from
+	 * the HW capabilities bitmap.
+	 */
+	if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
+		if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
+			pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
+	}
+
 	if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
 		pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
 		ath_info(common, "Set BT/WLAN RX diversity capability\n");
@@ -627,7 +680,9 @@
 	sc->sc_ah = ah;
 	pCap = &ah->caps;
 
-	sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
+	common = ath9k_hw_common(ah);
+	sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
+	sc->tx99_power = MAX_RATE_POWER + 1;
 
 	if (!pdata) {
 		ah->ah_flags |= AH_USE_EEPROM;
@@ -641,7 +696,6 @@
 		ah->external_reset = pdata->external_reset;
 	}
 
-	common = ath9k_hw_common(ah);
 	common->ops = &ah->reg_ops;
 	common->bus_ops = bus_ops;
 	common->ah = ah;
@@ -732,6 +786,7 @@
 	ath9k_hw_deinit(ah);
 err_hw:
 	ath9k_eeprom_release(sc);
+	dev_kfree_skb_any(sc->tx99_skb);
 	return ret;
 }
 
@@ -748,7 +803,7 @@
 		chan = &sband->channels[i];
 		ah->curchan = &ah->channels[chan->hw_value];
 		cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
-		ath9k_cmn_update_ichannel(ah->curchan, &chandef);
+		ath9k_cmn_get_channel(sc->hw, ah, &chandef);
 		ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
 	}
 }
@@ -789,9 +844,9 @@
 				 BIT(NL80211_IFTYPE_P2P_GO) },
 };
 
-
 static const struct ieee80211_iface_limit if_dfs_limits[] = {
-	{ .max = 1,	.types = BIT(NL80211_IFTYPE_AP) },
+	{ .max = 1,	.types = BIT(NL80211_IFTYPE_AP) |
+				 BIT(NL80211_IFTYPE_ADHOC) },
 };
 
 static const struct ieee80211_iface_combination if_comb[] = {
@@ -850,17 +905,18 @@
 
 	hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
 
-	hw->wiphy->interface_modes =
-		BIT(NL80211_IFTYPE_P2P_GO) |
-		BIT(NL80211_IFTYPE_P2P_CLIENT) |
-		BIT(NL80211_IFTYPE_AP) |
-		BIT(NL80211_IFTYPE_WDS) |
-		BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_ADHOC) |
-		BIT(NL80211_IFTYPE_MESH_POINT);
-
-	hw->wiphy->iface_combinations = if_comb;
-	hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+	if (!config_enabled(CONFIG_ATH9K_TX99)) {
+		hw->wiphy->interface_modes =
+			BIT(NL80211_IFTYPE_P2P_GO) |
+			BIT(NL80211_IFTYPE_P2P_CLIENT) |
+			BIT(NL80211_IFTYPE_AP) |
+			BIT(NL80211_IFTYPE_WDS) |
+			BIT(NL80211_IFTYPE_STATION) |
+			BIT(NL80211_IFTYPE_ADHOC) |
+			BIT(NL80211_IFTYPE_MESH_POINT);
+		hw->wiphy->iface_combinations = if_comb;
+		hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+	}
 
 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 2f831db..aed7e29 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -28,6 +28,13 @@
 	int i;
 	bool needreset = false;
 
+
+	if (sc->tx99_state) {
+		ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+			"skip tx hung detection on tx99\n");
+		return;
+	}
+
 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 		txq = sc->tx.txq_map[i];
 
@@ -70,7 +77,7 @@
 	ath9k_ps_wakeup(sc);
 	is_alive = ath9k_hw_check_alive(sc->sc_ah);
 
-	if (is_alive && !AR_SREV_9300(sc->sc_ah))
+	if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
 		goto out;
 	else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
 		ath_dbg(common, RESET,
@@ -141,6 +148,9 @@
 	if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
 		return;
 
+	if (sc->tx99_state)
+		return;
+
 	ath9k_ps_wakeup(sc);
 	pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
 	ath9k_ps_restore(sc);
@@ -184,7 +194,7 @@
 	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	int chain;
 
-	if (!caldata || !caldata->paprd_done) {
+	if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
 		ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
 		return;
 	}
@@ -256,7 +266,9 @@
 	int len = 1800;
 	int ret;
 
-	if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
+	if (!caldata ||
+	    !test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
+	    test_bit(PAPRD_DONE, &caldata->cal_flags)) {
 		ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
 		return;
 	}
@@ -316,7 +328,7 @@
 	kfree_skb(skb);
 
 	if (chain_ok) {
-		caldata->paprd_done = true;
+		set_bit(PAPRD_DONE, &caldata->cal_flags);
 		ath_paprd_activate(sc);
 	}
 
@@ -343,7 +355,7 @@
 	u32 cal_interval, short_cal_interval, long_cal_interval;
 	unsigned long flags;
 
-	if (ah->caldata && ah->caldata->nfcal_interference)
+	if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
 		long_cal_interval = ATH_LONG_CALINTERVAL_INT;
 	else
 		long_cal_interval = ATH_LONG_CALINTERVAL;
@@ -432,7 +444,7 @@
 	mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
 
 	if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
-		if (!ah->caldata->paprd_done) {
+		if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
 			ieee80211_queue_work(sc->hw, &sc->paprd_work);
 		} else if (!ah->paprd_table_write_done) {
 			ath9k_ps_wakeup(sc);
@@ -516,7 +528,8 @@
 
 	if (chan->noisefloor) {
 		survey->filled |= SURVEY_INFO_NOISE_DBM;
-		survey->noise = ath9k_hw_getchan_noise(ah, chan);
+		survey->noise = ath9k_hw_getchan_noise(ah, chan,
+						       chan->noisefloor);
 	}
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index a3eff09..6a18f9d 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -374,7 +374,6 @@
 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
-	struct ath9k_channel *chan = ah->curchan;
 	struct ath9k_tx_queue_info *qi;
 	u32 cwMin, chanCwMin, value;
 
@@ -387,10 +386,7 @@
 	ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
 
 	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
-		if (chan && IS_CHAN_B(chan))
-			chanCwMin = INIT_CWMIN_11B;
-		else
-			chanCwMin = INIT_CWMIN;
+		chanCwMin = INIT_CWMIN;
 
 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
 	} else
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index bfccace..e3eed81 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -603,8 +603,6 @@
 #define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
 
 #define ATH9K_DECOMP_MASK_SIZE     128
-#define ATH9K_READY_TIME_LO_BOUND  50
-#define ATH9K_READY_TIME_HI_BOUND  96
 
 enum ath9k_pkt_type {
 	ATH9K_PKT_TYPE_NORMAL = 0,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 709301f..74f452c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -312,17 +312,91 @@
  * by reseting the chip.  To accomplish this we must first cleanup any pending
  * DMA, then restart stuff.
 */
-static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
-		    struct ath9k_channel *hchan)
+static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
 {
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ieee80211_hw *hw = sc->hw;
+	struct ath9k_channel *hchan;
+	struct ieee80211_channel *chan = chandef->chan;
+	unsigned long flags;
+	bool offchannel;
+	int pos = chan->hw_value;
+	int old_pos = -1;
 	int r;
 
 	if (test_bit(SC_OP_INVALID, &sc->sc_flags))
 		return -EIO;
 
-	r = ath_reset_internal(sc, hchan);
+	offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
 
-	return r;
+	if (ah->curchan)
+		old_pos = ah->curchan - &ah->channels[0];
+
+	ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+		chan->center_freq, chandef->width);
+
+	/* update survey stats for the old channel before switching */
+	spin_lock_irqsave(&common->cc_lock, flags);
+	ath_update_survey_stats(sc);
+	spin_unlock_irqrestore(&common->cc_lock, flags);
+
+	ath9k_cmn_get_channel(hw, ah, chandef);
+
+	/*
+	 * If the operating channel changes, change the survey in-use flags
+	 * along with it.
+	 * Reset the survey data for the new channel, unless we're switching
+	 * back to the operating channel from an off-channel operation.
+	 */
+	if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
+		if (sc->cur_survey)
+			sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+
+		sc->cur_survey = &sc->survey[pos];
+
+		memset(sc->cur_survey, 0, sizeof(struct survey_info));
+		sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+	} else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
+		memset(&sc->survey[pos], 0, sizeof(struct survey_info));
+	}
+
+	hchan = &sc->sc_ah->channels[pos];
+	r = ath_reset_internal(sc, hchan);
+	if (r)
+		return r;
+
+	/*
+	 * The most recent snapshot of channel->noisefloor for the old
+	 * channel is only available after the hardware reset. Copy it to
+	 * the survey stats now.
+	 */
+	if (old_pos >= 0)
+		ath_update_survey_nf(sc, old_pos);
+
+	/*
+	 * Enable radar pulse detection if on a DFS channel. Spectral
+	 * scanning and radar detection can not be used concurrently.
+	 */
+	if (hw->conf.radar_enabled) {
+		u32 rxfilter;
+
+		/* set HW specific DFS configuration */
+		ath9k_hw_set_radar_params(ah);
+		rxfilter = ath9k_hw_getrxfilter(ah);
+		rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
+				ATH9K_RX_FILTER_PHYERR;
+		ath9k_hw_setrxfilter(ah, rxfilter);
+		ath_dbg(common, DFS, "DFS enabled at freq %d\n",
+			chan->center_freq);
+	} else {
+		/* perform spectral scan if requested. */
+		if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+			sc->spectral_mode == SPECTRAL_CHANSCAN)
+			ath9k_spectral_scan_trigger(hw);
+	}
+
+	return 0;
 }
 
 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -372,6 +446,13 @@
 			type = RESET_TYPE_BB_WATCHDOG;
 
 		ath9k_queue_reset(sc, type);
+
+		/*
+		 * Increment the ref. counter here so that
+		 * interrupts are enabled in the reset routine.
+		 */
+		atomic_inc(&ah->intr_ref_cnt);
+		ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
 		goto out;
 	}
 
@@ -410,10 +491,9 @@
 
 	ath9k_btcoex_handle_interrupt(sc, status);
 
-out:
 	/* re-enable hardware interrupt */
 	ath9k_hw_enable_interrupts(ah);
-
+out:
 	spin_unlock(&sc->sc_pcu_lock);
 	ath9k_ps_restore(sc);
 }
@@ -594,7 +674,7 @@
 	ath9k_ps_wakeup(sc);
 	mutex_lock(&sc->mutex);
 
-	init_channel = ath9k_cmn_get_curchannel(hw, ah);
+	init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
 
 	/* Reset SERDES registers */
 	ath9k_hw_configpcipowersave(ah, false);
@@ -797,7 +877,7 @@
 	}
 
 	if (!ah->curchan)
-		ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+		ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
 
 	ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
 	ath9k_hw_phy_disable(ah);
@@ -816,7 +896,7 @@
 	ath_dbg(common, CONFIG, "Driver halt\n");
 }
 
-bool ath9k_uses_beacons(int type)
+static bool ath9k_uses_beacons(int type)
 {
 	switch (type) {
 	case NL80211_IFTYPE_AP:
@@ -966,6 +1046,14 @@
 
 	mutex_lock(&sc->mutex);
 
+	if (config_enabled(CONFIG_ATH9K_TX99)) {
+		if (sc->nvifs >= 1) {
+			mutex_unlock(&sc->mutex);
+			return -EOPNOTSUPP;
+		}
+		sc->tx99_vif = vif;
+	}
+
 	ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
 	sc->nvifs++;
 
@@ -994,9 +1082,15 @@
 	struct ath_softc *sc = hw->priv;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
-	ath_dbg(common, CONFIG, "Change Interface\n");
 	mutex_lock(&sc->mutex);
 
+	if (config_enabled(CONFIG_ATH9K_TX99)) {
+		mutex_unlock(&sc->mutex);
+		return -EOPNOTSUPP;
+	}
+
+	ath_dbg(common, CONFIG, "Change Interface\n");
+
 	if (ath9k_uses_beacons(vif->type))
 		ath9k_beacon_remove_slot(sc, vif);
 
@@ -1026,6 +1120,7 @@
 	mutex_lock(&sc->mutex);
 
 	sc->nvifs--;
+	sc->tx99_vif = NULL;
 
 	if (ath9k_uses_beacons(vif->type))
 		ath9k_beacon_remove_slot(sc, vif);
@@ -1047,6 +1142,9 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
+	if (config_enabled(CONFIG_ATH9K_TX99))
+		return;
+
 	sc->ps_enabled = true;
 	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
 		if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -1063,6 +1161,9 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
+	if (config_enabled(CONFIG_ATH9K_TX99))
+		return;
+
 	sc->ps_enabled = false;
 	ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
 	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1086,6 +1187,9 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	u32 rxfilter;
 
+	if (config_enabled(CONFIG_ATH9K_TX99))
+		return;
+
 	if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
 		ath_err(common, "spectrum analyzer not implemented on this hardware\n");
 		return;
@@ -1201,81 +1305,12 @@
 	}
 
 	if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
-		struct ieee80211_channel *curchan = hw->conf.chandef.chan;
-		int pos = curchan->hw_value;
-		int old_pos = -1;
-		unsigned long flags;
-
-		if (ah->curchan)
-			old_pos = ah->curchan - &ah->channels[0];
-
-		ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
-			curchan->center_freq, hw->conf.chandef.width);
-
-		/* update survey stats for the old channel before switching */
-		spin_lock_irqsave(&common->cc_lock, flags);
-		ath_update_survey_stats(sc);
-		spin_unlock_irqrestore(&common->cc_lock, flags);
-
-		ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
-					  &conf->chandef);
-
-		/*
-		 * If the operating channel changes, change the survey in-use flags
-		 * along with it.
-		 * Reset the survey data for the new channel, unless we're switching
-		 * back to the operating channel from an off-channel operation.
-		 */
-		if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
-		    sc->cur_survey != &sc->survey[pos]) {
-
-			if (sc->cur_survey)
-				sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
-
-			sc->cur_survey = &sc->survey[pos];
-
-			memset(sc->cur_survey, 0, sizeof(struct survey_info));
-			sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
-		} else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
-			memset(&sc->survey[pos], 0, sizeof(struct survey_info));
-		}
-
-		if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
+		if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
 			ath_err(common, "Unable to set channel\n");
 			mutex_unlock(&sc->mutex);
 			ath9k_ps_restore(sc);
 			return -EINVAL;
 		}
-
-		/*
-		 * The most recent snapshot of channel->noisefloor for the old
-		 * channel is only available after the hardware reset. Copy it to
-		 * the survey stats now.
-		 */
-		if (old_pos >= 0)
-			ath_update_survey_nf(sc, old_pos);
-
-		/*
-		 * Enable radar pulse detection if on a DFS channel. Spectral
-		 * scanning and radar detection can not be used concurrently.
-		 */
-		if (hw->conf.radar_enabled) {
-			u32 rxfilter;
-
-			/* set HW specific DFS configuration */
-			ath9k_hw_set_radar_params(ah);
-			rxfilter = ath9k_hw_getrxfilter(ah);
-			rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
-				    ATH9K_RX_FILTER_PHYERR;
-			ath9k_hw_setrxfilter(ah, rxfilter);
-			ath_dbg(common, DFS, "DFS enabled at freq %d\n",
-				curchan->center_freq);
-		} else {
-			/* perform spectral scan if requested. */
-			if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
-			    sc->spectral_mode == SPECTRAL_CHANSCAN)
-				ath9k_spectral_scan_trigger(hw);
-		}
 	}
 
 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -1734,6 +1769,9 @@
 	unsigned long flags;
 	int pos;
 
+	if (config_enabled(CONFIG_ATH9K_TX99))
+		return -EOPNOTSUPP;
+
 	spin_lock_irqsave(&common->cc_lock, flags);
 	if (idx == 0)
 		ath_update_survey_stats(sc);
@@ -1766,6 +1804,9 @@
 	struct ath_softc *sc = hw->priv;
 	struct ath_hw *ah = sc->sc_ah;
 
+	if (config_enabled(CONFIG_ATH9K_TX99))
+		return;
+
 	mutex_lock(&sc->mutex);
 	ah->coverage_class = coverage_class;
 
@@ -2332,6 +2373,134 @@
 	sc->csa_vif = vif;
 }
 
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	ath_drain_all_txq(sc);
+	ath_startrecv(sc);
+
+	ath9k_hw_set_interrupts(ah);
+	ath9k_hw_enable_interrupts(ah);
+
+	ieee80211_wake_queues(sc->hw);
+
+	kfree_skb(sc->tx99_skb);
+	sc->tx99_skb = NULL;
+	sc->tx99_state = false;
+
+	ath9k_hw_tx99_stop(sc->sc_ah);
+	ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+	static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+			       0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+			       0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+			       0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+			       0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+			       0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+			       0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+			       0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+	u32 len = 1200;
+	struct ieee80211_hw *hw = sc->hw;
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_tx_info *tx_info;
+	struct sk_buff *skb;
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return NULL;
+
+	skb_put(skb, len);
+
+	memset(skb->data, 0, len);
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+	hdr->duration_id = 0;
+
+	memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+	hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+
+	tx_info = IEEE80211_SKB_CB(skb);
+	memset(tx_info, 0, sizeof(*tx_info));
+	tx_info->band = hw->conf.chandef.chan->band;
+	tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+	tx_info->control.vif = sc->tx99_vif;
+
+	memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+	return skb;
+}
+
+void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+	ath_reset(sc);
+
+	ath9k_ps_wakeup(sc);
+	ath9k_tx99_stop(sc);
+	ath9k_ps_restore(sc);
+}
+
+int ath9k_tx99_init(struct ath_softc *sc)
+{
+	struct ieee80211_hw *hw = sc->hw;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_tx_control txctl;
+	int r;
+
+	if (sc->sc_flags & SC_OP_INVALID) {
+		ath_err(common,
+			"driver is in invalid state unable to use TX99");
+		return -EINVAL;
+	}
+
+	sc->tx99_skb = ath9k_build_tx99_skb(sc);
+	if (!sc->tx99_skb)
+		return -ENOMEM;
+
+	memset(&txctl, 0, sizeof(txctl));
+	txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+	ath_reset(sc);
+
+	ath9k_ps_wakeup(sc);
+
+	ath9k_hw_disable_interrupts(ah);
+	atomic_set(&ah->intr_ref_cnt, -1);
+	ath_drain_all_txq(sc);
+	ath_stoprecv(sc);
+
+	sc->tx99_state = true;
+
+	ieee80211_stop_queues(hw);
+
+	if (sc->tx99_power == MAX_RATE_POWER + 1)
+		sc->tx99_power = MAX_RATE_POWER;
+
+	ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+	r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+	if (r) {
+		ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+		return r;
+	}
+
+	ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+		sc->tx99_power,
+		sc->tx99_power / 2);
+
+	/* We leave the harware awake as it will be chugging on */
+
+	return 0;
+}
+
 struct ieee80211_ops ath9k_ops = {
 	.tx 		    = ath9k_tx,
 	.start 		    = ath9k_start,
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 815bee2..0ac1b5f 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -661,9 +661,9 @@
 	chan_start = wlan_chan - 10;
 	chan_end = wlan_chan + 10;
 
-	if (chan->chanmode == CHANNEL_G_HT40PLUS)
+	if (IS_CHAN_HT40PLUS(chan))
 		chan_end += 20;
-	else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+	else if (IS_CHAN_HT40MINUS(chan))
 		chan_start -= 20;
 
 	/* adjust side band */
@@ -707,11 +707,11 @@
 
 	if (setchannel) {
 		struct ath9k_hw_cal_data *caldata = &sc->caldata;
-		if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+		if (IS_CHAN_HT40PLUS(ah->curchan) &&
 		    (ah->curchan->channel > caldata->channel) &&
 		    (ah->curchan->channel <= caldata->channel + 20))
 			return;
-		if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+		if (IS_CHAN_HT40MINUS(ah->curchan) &&
 		    (ah->curchan->channel < caldata->channel) &&
 		    (ah->curchan->channel >= caldata->channel - 20))
 			return;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index d089a7c..7e4c252 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -269,7 +269,200 @@
 
 	{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
 	{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
-	{ PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E  AR9565 */
+
+	/* CUS252 */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_ATHEROS,
+			 0x3028),
+	  .driver_data = ATH9K_PCI_CUS252 |
+			 ATH9K_PCI_AR9565_2ANT |
+			 ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_AZWAVE,
+			 0x2176),
+	  .driver_data = ATH9K_PCI_CUS252 |
+			 ATH9K_PCI_AR9565_2ANT |
+			 ATH9K_PCI_BT_ANT_DIV },
+
+	/* WB335 1-ANT */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_FOXCONN,
+			 0xE068),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x185F, /* WNC */
+			 0xA119),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0632),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x6671),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x1B9A, /* XAVI */
+			 0x2811),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x1B9A, /* XAVI */
+			 0x2812),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+
+	/* WB335 1-ANT / Antenna Diversity */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_ATHEROS,
+			 0x3025),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_ATHEROS,
+			 0x3026),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_ATHEROS,
+			 0x302B),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_FOXCONN,
+			 0xE069),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x185F, /* WNC */
+			 0x3028),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0622),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0672),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0662),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_AZWAVE,
+			 0x213A),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_LENOVO,
+			 0x3026),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_HP,
+			 0x18E3),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_HP,
+			 0x217F),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_DELL,
+			 0x020E),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+
+	/* WB335 2-ANT */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_SAMSUNG,
+			 0x411A),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_SAMSUNG,
+			 0x411B),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_SAMSUNG,
+			 0x411C),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_SAMSUNG,
+			 0x411D),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_SAMSUNG,
+			 0x411E),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+
+	/* WB335 2-ANT / Antenna-Diversity */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_ATHEROS,
+			 0x3027),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_ATHEROS,
+			 0x302C),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0642),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0652),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0612),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_AZWAVE,
+			 0x2130),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x144F, /* ASKEY */
+			 0x7202),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x1B9A, /* XAVI */
+			 0x2810),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x185F, /* WNC */
+			 0x3027),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+
+	/* PCI-E AR9565 (WB335) */
+	{ PCI_VDEVICE(ATHEROS, 0x0036),
+	  .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
 	{ 0 }
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index d3d7c51..d829bb6 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1387,31 +1387,31 @@
 		int used_mcs = 0, used_htmode = 0;
 
 		if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
-			used_mcs = snprintf(mcs, 5, "%d",
-				rc->rate_table->info[i].ratecode);
+			used_mcs = scnprintf(mcs, 5, "%d",
+					     rc->rate_table->info[i].ratecode);
 
 			if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
-				used_htmode = snprintf(htmode, 5, "HT40");
+				used_htmode = scnprintf(htmode, 5, "HT40");
 			else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
-				used_htmode = snprintf(htmode, 5, "HT20");
+				used_htmode = scnprintf(htmode, 5, "HT20");
 			else
-				used_htmode = snprintf(htmode, 5, "????");
+				used_htmode = scnprintf(htmode, 5, "????");
 		}
 
 		mcs[used_mcs] = '\0';
 		htmode[used_htmode] = '\0';
 
-		len += snprintf(buf + len, max - len,
-			"%6s %6s %3u.%d: "
-			"%10u %10u %10u %10u\n",
-			htmode,
-			mcs,
-			ratekbps / 1000,
-			(ratekbps % 1000) / 100,
-			stats->success,
-			stats->retries,
-			stats->xretries,
-			stats->per);
+		len += scnprintf(buf + len, max - len,
+				 "%6s %6s %3u.%d: "
+				 "%10u %10u %10u %10u\n",
+				 htmode,
+				 mcs,
+				 ratekbps / 1000,
+				 (ratekbps % 1000) / 100,
+				 stats->success,
+				 stats->retries,
+				 stats->xretries,
+				 stats->per);
 	}
 
 	if (len > max)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ab9e3a8..95ddca5 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,7 +19,7 @@
 #include "ath9k.h"
 #include "ar9003_mac.h"
 
-#define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))
+#define SKB_CB_ATHBUF(__skb)	(*((struct ath_rxbuf **)__skb->cb))
 
 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
 {
@@ -35,7 +35,7 @@
  * buffer (or rx fifo). This can incorrectly acknowledge packets
  * to a sender if last desc is self-linked.
  */
-static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -68,7 +68,7 @@
 	sc->rx.rxlink = &ds->ds_link;
 }
 
-static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
 {
 	if (sc->rx.buf_hold)
 		ath_rx_buf_link(sc, sc->rx.buf_hold);
@@ -112,13 +112,13 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_rx_edma *rx_edma;
 	struct sk_buff *skb;
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 
 	rx_edma = &sc->rx.rx_edma[qtype];
 	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
 		return false;
 
-	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+	bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
 	list_del_init(&bf->list);
 
 	skb = bf->bf_mpdu;
@@ -138,7 +138,7 @@
 				  enum ath9k_rx_qtype qtype)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-	struct ath_buf *bf, *tbf;
+	struct ath_rxbuf *bf, *tbf;
 
 	if (list_empty(&sc->rx.rxbuf)) {
 		ath_dbg(common, QUEUE, "No free rx buf available\n");
@@ -154,7 +154,7 @@
 static void ath_rx_remove_buffer(struct ath_softc *sc,
 				 enum ath9k_rx_qtype qtype)
 {
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 	struct ath_rx_edma *rx_edma;
 	struct sk_buff *skb;
 
@@ -171,7 +171,7 @@
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 
 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
@@ -199,7 +199,7 @@
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_hw *ah = sc->sc_ah;
 	struct sk_buff *skb;
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 	int error = 0, i;
 	u32 size;
 
@@ -211,7 +211,7 @@
 	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
 			       ah->caps.rx_hp_qdepth);
 
-	size = sizeof(struct ath_buf) * nbufs;
+	size = sizeof(struct ath_rxbuf) * nbufs;
 	bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
 	if (!bf)
 		return -ENOMEM;
@@ -271,7 +271,7 @@
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct sk_buff *skb;
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 	int error = 0;
 
 	spin_lock_init(&sc->sc_pcu_lock);
@@ -332,7 +332,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct sk_buff *skb;
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 		ath_rx_edma_cleanup(sc);
@@ -375,6 +375,9 @@
 {
 	u32 rfilt;
 
+	if (config_enabled(CONFIG_ATH9K_TX99))
+		return 0;
+
 	rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
 		| ATH9K_RX_FILTER_MCAST;
 
@@ -427,7 +430,7 @@
 int ath_startrecv(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
-	struct ath_buf *bf, *tbf;
+	struct ath_rxbuf *bf, *tbf;
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 		ath_edma_start_recv(sc);
@@ -447,7 +450,7 @@
 	if (list_empty(&sc->rx.rxbuf))
 		goto start_recv;
 
-	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+	bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
 	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
 	ath9k_hw_rxena(ah);
 
@@ -603,13 +606,13 @@
 static bool ath_edma_get_buffers(struct ath_softc *sc,
 				 enum ath9k_rx_qtype qtype,
 				 struct ath_rx_status *rs,
-				 struct ath_buf **dest)
+				 struct ath_rxbuf **dest)
 {
 	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct sk_buff *skb;
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 	int ret;
 
 	skb = skb_peek(&rx_edma->rx_fifo);
@@ -653,11 +656,11 @@
 	return true;
 }
 
-static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
 						struct ath_rx_status *rs,
 						enum ath9k_rx_qtype qtype)
 {
-	struct ath_buf *bf = NULL;
+	struct ath_rxbuf *bf = NULL;
 
 	while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
 		if (!bf)
@@ -668,13 +671,13 @@
 	return NULL;
 }
 
-static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
 					   struct ath_rx_status *rs)
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_desc *ds;
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 	int ret;
 
 	if (list_empty(&sc->rx.rxbuf)) {
@@ -682,7 +685,7 @@
 		return NULL;
 	}
 
-	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+	bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
 	if (bf == sc->rx.buf_hold)
 		return NULL;
 
@@ -702,7 +705,7 @@
 	ret = ath9k_hw_rxprocdesc(ah, ds, rs);
 	if (ret == -EINPROGRESS) {
 		struct ath_rx_status trs;
-		struct ath_buf *tbf;
+		struct ath_rxbuf *tbf;
 		struct ath_desc *tds;
 
 		memset(&trs, 0, sizeof(trs));
@@ -711,7 +714,7 @@
 			return NULL;
 		}
 
-		tbf = list_entry(bf->list.next, struct ath_buf, list);
+		tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
 
 		/*
 		 * On some hardware the descriptor status words could
@@ -972,14 +975,15 @@
 {
 #ifdef CONFIG_ATH9K_DEBUGFS
 	struct ath_hw *ah = sc->sc_ah;
-	u8 bins[SPECTRAL_HT20_NUM_BINS];
-	u8 *vdata = (u8 *)hdr;
-	struct fft_sample_ht20 fft_sample;
+	u8 num_bins, *bins, *vdata = (u8 *)hdr;
+	struct fft_sample_ht20 fft_sample_20;
+	struct fft_sample_ht20_40 fft_sample_40;
+	struct fft_sample_tlv *tlv;
 	struct ath_radar_info *radar_info;
-	struct ath_ht20_mag_info *mag_info;
 	int len = rs->rs_datalen;
 	int dc_pos;
-	u16 length, max_magnitude;
+	u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+	enum nl80211_channel_type chan_type;
 
 	/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
 	 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -997,45 +1001,44 @@
 	if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
 		return 0;
 
-	/* Variation in the data length is possible and will be fixed later.
-	 * Note that we only support HT20 for now.
-	 *
-	 * TODO: add HT20_40 support as well.
-	 */
-	if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
-	    (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
+	chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+	if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+	    (chan_type == NL80211_CHAN_HT40PLUS)) {
+		fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+		num_bins = SPECTRAL_HT20_40_NUM_BINS;
+		bins = (u8 *)fft_sample_40.data;
+	} else {
+		fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+		num_bins = SPECTRAL_HT20_NUM_BINS;
+		bins = (u8 *)fft_sample_20.data;
+	}
+
+	/* Variation in the data length is possible and will be fixed later */
+	if ((len > fft_len + 2) || (len < fft_len - 1))
 		return 1;
 
-	fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
-	length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
-	fft_sample.tlv.length = __cpu_to_be16(length);
-
-	fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
-	fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
-	fft_sample.noise = ah->noise;
-
-	switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
+	switch (len - fft_len) {
 	case 0:
 		/* length correct, nothing to do. */
-		memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
+		memcpy(bins, vdata, num_bins);
 		break;
 	case -1:
 		/* first byte missing, duplicate it. */
-		memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
+		memcpy(&bins[1], vdata, num_bins - 1);
 		bins[0] = vdata[0];
 		break;
 	case 2:
 		/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
 		memcpy(bins, vdata, 30);
 		bins[30] = vdata[31];
-		memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
+		memcpy(&bins[31], &vdata[33], num_bins - 31);
 		break;
 	case 1:
 		/* MAC added 2 extra bytes AND first byte is missing. */
 		bins[0] = vdata[0];
-		memcpy(&bins[0], vdata, 30);
+		memcpy(&bins[1], vdata, 30);
 		bins[31] = vdata[31];
-		memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
+		memcpy(&bins[32], &vdata[33], num_bins - 32);
 		break;
 	default:
 		return 1;
@@ -1044,23 +1047,93 @@
 	/* DC value (value in the middle) is the blind spot of the spectral
 	 * sample and invalid, interpolate it.
 	 */
-	dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+	dc_pos = num_bins / 2;
 	bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
 
-	/* mag data is at the end of the frame, in front of radar_info */
-	mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+	if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+	    (chan_type == NL80211_CHAN_HT40PLUS)) {
+		s8 lower_rssi, upper_rssi;
+		s16 ext_nf;
+		u8 lower_max_index, upper_max_index;
+		u8 lower_bitmap_w, upper_bitmap_w;
+		u16 lower_mag, upper_mag;
+		struct ath9k_hw_cal_data *caldata = ah->caldata;
+		struct ath_ht20_40_mag_info *mag_info;
 
-	/* copy raw bins without scaling them */
-	memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
-	fft_sample.max_exp = mag_info->max_exp & 0xf;
+		if (caldata)
+			ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+					caldata->nfCalHist[3].privNF);
+		else
+			ext_nf = ATH_DEFAULT_NOISE_FLOOR;
 
-	max_magnitude = spectral_max_magnitude(mag_info->all_bins);
-	fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
-	fft_sample.max_index = spectral_max_index(mag_info->all_bins);
-	fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
-	fft_sample.tsf = __cpu_to_be64(tsf);
+		length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+		fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+		fft_sample_40.tlv.length = __cpu_to_be16(length);
+		fft_sample_40.freq = __cpu_to_be16(freq);
+		fft_sample_40.channel_type = chan_type;
 
-	ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+		if (chan_type == NL80211_CHAN_HT40PLUS) {
+			lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+			upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
+
+			fft_sample_40.lower_noise = ah->noise;
+			fft_sample_40.upper_noise = ext_nf;
+		} else {
+			lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
+			upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+
+			fft_sample_40.lower_noise = ext_nf;
+			fft_sample_40.upper_noise = ah->noise;
+		}
+		fft_sample_40.lower_rssi = lower_rssi;
+		fft_sample_40.upper_rssi = upper_rssi;
+
+		mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+		lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+		upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+		fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+		fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+		lower_max_index = spectral_max_index(mag_info->lower_bins);
+		upper_max_index = spectral_max_index(mag_info->upper_bins);
+		fft_sample_40.lower_max_index = lower_max_index;
+		fft_sample_40.upper_max_index = upper_max_index;
+		lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+		upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+		fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+		fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+		fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+		fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+		tlv = (struct fft_sample_tlv *)&fft_sample_40;
+	} else {
+		u8 max_index, bitmap_w;
+		u16 magnitude;
+		struct ath_ht20_mag_info *mag_info;
+
+		length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+		fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+		fft_sample_20.tlv.length = __cpu_to_be16(length);
+		fft_sample_20.freq = __cpu_to_be16(freq);
+
+		fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+		fft_sample_20.noise = ah->noise;
+
+		mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+		magnitude = spectral_max_magnitude(mag_info->all_bins);
+		fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+		max_index = spectral_max_index(mag_info->all_bins);
+		fft_sample_20.max_index = max_index;
+		bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+		fft_sample_20.bitmap_weight = bitmap_w;
+		fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+		fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+		tlv = (struct fft_sample_tlv *)&fft_sample_20;
+	}
+
+	ath_debug_send_fft_sample(sc, tlv);
 	return 1;
 #else
 	return 0;
@@ -1308,7 +1381,7 @@
 
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 {
-	struct ath_buf *bf;
+	struct ath_rxbuf *bf;
 	struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
 	struct ieee80211_rx_status *rxs;
 	struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index fde6da6..0db37f2 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -39,7 +39,7 @@
 struct wmi_event_swba {
 	__be64 tsf;
 	u8 beacon_pending;
-};
+} __packed;
 
 /*
  * 64 - HTC header - WMI header - 1 / txstatus
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index dd30452..09cdbcd 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1241,12 +1241,13 @@
 		if (bf->bf_next)
 			info.link = bf->bf_next->bf_daddr;
 		else
-			info.link = 0;
+			info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
 
 		if (!bf_first) {
 			bf_first = bf;
 
-			info.flags = ATH9K_TXDESC_INTREQ;
+			if (!sc->tx99_state)
+				info.flags = ATH9K_TXDESC_INTREQ;
 			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
 			    txq == sc->tx.uapsdq)
 				info.flags |= ATH9K_TXDESC_CLRDMASK;
@@ -1704,16 +1705,9 @@
 	int qnum = sc->beacon.cabq->axq_qnum;
 
 	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
-	/*
-	 * Ensure the readytime % is within the bounds.
-	 */
-	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
-		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
-	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
-		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
 
 	qi.tqi_readyTime = (cur_conf->beacon_interval *
-			    sc->config.cabqReadytime) / 100;
+			    ATH_CABQ_READY_TIME) / 100;
 	ath_txq_update(sc, qnum, &qi);
 
 	return 0;
@@ -1948,7 +1942,7 @@
 			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
 	}
 
-	if (!edma) {
+	if (!edma || sc->tx99_state) {
 		TX_STAT_INC(txq->axq_qnum, txstart);
 		ath9k_hw_txstart(ah, txq->axq_qnum);
 	}
@@ -2027,6 +2021,9 @@
 		fi->keyix = ATH9K_TXKEYIX_INVALID;
 	fi->keytype = keytype;
 	fi->framelen = framelen;
+
+	if (!rate)
+		return;
 	fi->rtscts_rate = rate->hw_value;
 	if (short_preamble)
 		fi->rtscts_rate |= rate->hw_value_short;
@@ -2037,8 +2034,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath9k_channel *curchan = ah->curchan;
 
-	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
-	    (curchan->channelFlags & CHANNEL_5GHZ) &&
+	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
 	    (chainmask == 0x7) && (rate < 0x90))
 		return 0x3;
 	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
@@ -2329,7 +2325,7 @@
 	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
 
 	if (sc->sc_ah->caldata)
-		sc->sc_ah->caldata->paprd_packet_sent = true;
+		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
 
 	if (!(tx_flags & ATH_TX_ERROR))
 		/* Frame was ACKed */
@@ -2379,6 +2375,8 @@
 
 	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
 	bf->bf_buf_addr = 0;
+	if (sc->tx99_state)
+		goto skip_tx_complete;
 
 	if (bf->bf_state.bfs_paprd) {
 		if (time_after(jiffies,
@@ -2391,6 +2389,7 @@
 		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
 		ath_tx_complete(sc, skb, tx_flags, txq);
 	}
+skip_tx_complete:
 	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
 	 * accidentally reference it later.
 	 */
@@ -2749,3 +2748,46 @@
 		ath_txq_unlock(sc, txq);
 	}
 }
+
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+		    struct ath_tx_control *txctl)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct ath_frame_info *fi = get_frame_info(skb);
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	struct ath_buf *bf;
+	int padpos, padsize;
+
+	padpos = ieee80211_hdrlen(hdr->frame_control);
+	padsize = padpos & 3;
+
+	if (padsize && skb->len > padpos) {
+		if (skb_headroom(skb) < padsize) {
+			ath_dbg(common, XMIT,
+				"tx99 padding failed\n");
+		return -EINVAL;
+		}
+
+		skb_push(skb, padsize);
+		memmove(skb->data, skb->data + padsize, padpos);
+	}
+
+	fi->keyix = ATH9K_TXKEYIX_INVALID;
+	fi->framelen = skb->len + FCS_LEN;
+	fi->keytype = ATH9K_KEY_TYPE_CLEAR;
+
+	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
+	if (!bf) {
+		ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
+		return -EINVAL;
+	}
+
+	ath_set_rates(sc->tx99_vif, NULL, bf);
+
+	ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
+	ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
+
+	ath_tx_send_normal(sc, txctl->txq, NULL, skb);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 307bc0d..ca115f3 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -773,7 +773,7 @@
 	complete_all(&ar->cmd_wait);
 
 	/* This is required to prevent an early completion on _start */
-	INIT_COMPLETION(ar->cmd_wait);
+	reinit_completion(&ar->cmd_wait);
 
 	/*
 	 * Note:
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
similarity index 95%
rename from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
rename to drivers/net/wireless/ath/dfs_pattern_detector.c
index 491305c..a1a69c5 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -19,7 +19,7 @@
 
 #include "dfs_pattern_detector.h"
 #include "dfs_pri_detector.h"
-#include "ath9k.h"
+#include "ath.h"
 
 /*
  * tolerated deviation of radar time stamp in usecs on both sides
@@ -143,7 +143,6 @@
 {
 	u32 sz, i;
 	struct channel_detector *cd;
-	struct ath_common *common = ath9k_hw_common(dpd->ah);
 
 	cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
 	if (cd == NULL)
@@ -167,7 +166,7 @@
 	return cd;
 
 fail:
-	ath_dbg(common, DFS,
+	ath_dbg(dpd->common, DFS,
 		"failed to allocate channel_detector for freq=%d\n", freq);
 	channel_detector_exit(dpd, cd);
 	return NULL;
@@ -242,7 +241,7 @@
 		struct pri_detector *pd = cd->detectors[i];
 		struct pri_sequence *ps = pd->add_pulse(pd, event);
 		if (ps != NULL) {
-			ath_dbg(ath9k_hw_common(dpd->ah), DFS,
+			ath_dbg(dpd->common, DFS,
 				"DFS: radar found on freq=%d: id=%d, pri=%d, "
 				"count=%d, count_false=%d\n",
 				event->freq, pd->rs->type_id,
@@ -254,6 +253,12 @@
 	return false;
 }
 
+static struct ath_dfs_pool_stats
+dpd_get_stats(struct dfs_pattern_detector *dpd)
+{
+	return global_dfs_pool_stats;
+}
+
 static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
 			   enum nl80211_dfs_regions region)
 {
@@ -284,14 +289,18 @@
 	.exit		= dpd_exit,
 	.set_dfs_domain	= dpd_set_domain,
 	.add_pulse	= dpd_add_pulse,
+	.get_stats	= dpd_get_stats,
 	.region		= NL80211_DFS_UNSET,
 };
 
 struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
+dfs_pattern_detector_init(struct ath_common *common,
+			  enum nl80211_dfs_regions region)
 {
 	struct dfs_pattern_detector *dpd;
-	struct ath_common *common = ath9k_hw_common(ah);
+
+	if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+		return NULL;
 
 	dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
 	if (dpd == NULL)
@@ -300,7 +309,7 @@
 	*dpd = default_dpd;
 	INIT_LIST_HEAD(&dpd->channel_detectors);
 
-	dpd->ah = ah;
+	dpd->common = common;
 	if (dpd->set_dfs_domain(dpd, region))
 		return dpd;
 
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
similarity index 87%
rename from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
rename to drivers/net/wireless/ath/dfs_pattern_detector.h
index 90a5abc..dde2652 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -22,6 +22,19 @@
 #include <linux/nl80211.h>
 
 /**
+ * struct ath_dfs_pool_stats - DFS Statistics for global pools
+ */
+struct ath_dfs_pool_stats {
+	u32 pool_reference;
+	u32 pulse_allocated;
+	u32 pulse_alloc_error;
+	u32 pulse_used;
+	u32 pseq_allocated;
+	u32 pseq_alloc_error;
+	u32 pseq_used;
+};
+
+/**
  * struct pulse_event - describing pulses reported by PHY
  * @ts: pulse time stamp in us
  * @freq: channel frequency in MHz
@@ -77,11 +90,12 @@
 	bool (*add_pulse)(struct dfs_pattern_detector *dpd,
 			  struct pulse_event *pe);
 
+	struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd);
 	enum nl80211_dfs_regions region;
 	u8 num_radar_types;
 	u64 last_pulse_ts;
 	/* needed for ath_dbg() */
-	struct ath_hw *ah;
+	struct ath_common *common;
 
 	const struct radar_detector_specs *radar_spec;
 	struct list_head channel_detectors;
@@ -92,15 +106,7 @@
  * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
  * @return instance pointer on success, NULL otherwise
  */
-#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
 extern struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region);
-#else
-static inline struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
-{
-	return NULL;
-}
-#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
-
+dfs_pattern_detector_init(struct ath_common *common,
+			  enum nl80211_dfs_regions region);
 #endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
similarity index 97%
rename from drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
rename to drivers/net/wireless/ath/dfs_pri_detector.c
index 5ba4b6f..43b6081 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -17,10 +17,14 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
-#include "ath9k.h"
+#include "ath.h"
 #include "dfs_pattern_detector.h"
 #include "dfs_pri_detector.h"
-#include "dfs_debug.h"
+
+struct ath_dfs_pool_stats global_dfs_pool_stats = {};
+
+#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
+#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
 
 /**
  * struct pulse_elem - elements in pulse queue
@@ -392,7 +396,7 @@
 
 	if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
 		pri_detector_reset(de, ts);
-		return false;
+		return NULL;
 	}
 
 	ps = pseq_handler_check_detection(de);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/dfs_pri_detector.h
similarity index 97%
rename from drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
rename to drivers/net/wireless/ath/dfs_pri_detector.h
index 723962d..79f0fff 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
+++ b/drivers/net/wireless/ath/dfs_pri_detector.h
@@ -19,6 +19,8 @@
 
 #include <linux/list.h>
 
+extern struct ath_dfs_pool_stats global_dfs_pool_stats;
+
 /**
  * struct pri_sequence - sequence of pulses matching one PRI
  * @head: list_head
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 7d077c7..c00687e 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -356,14 +356,131 @@
 	return -1;
 }
 
+static int __ath_reg_dyn_country(struct wiphy *wiphy,
+				 struct ath_regulatory *reg,
+				 struct regulatory_request *request)
+{
+	u16 country_code;
+
+	if (!ath_is_world_regd(reg))
+		return -EINVAL;
+
+	country_code = ath_regd_find_country_by_name(request->alpha2);
+	if (country_code == (u16) -1)
+		return -EINVAL;
+
+	reg->current_rd = COUNTRY_ERD_FLAG;
+	reg->current_rd |= country_code;
+
+	__ath_regd_init(reg);
+
+	ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
+	return 0;
+}
+
+static void ath_reg_dyn_country(struct wiphy *wiphy,
+				struct ath_regulatory *reg,
+				struct regulatory_request *request)
+{
+	if (__ath_reg_dyn_country(wiphy, reg, request))
+		return;
+
+	printk(KERN_DEBUG "ath: regdomain 0x%0x "
+			  "dynamically updated by %s\n",
+	       reg->current_rd,
+	       reg_initiator_name(request->initiator));
+}
+
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+	if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+		return true;
+
+	switch (reg->country_code) {
+	case CTRY_UNITED_STATES:
+	case CTRY_JAPAN1:
+	case CTRY_JAPAN2:
+	case CTRY_JAPAN3:
+	case CTRY_JAPAN4:
+	case CTRY_JAPAN5:
+	case CTRY_JAPAN6:
+	case CTRY_JAPAN7:
+	case CTRY_JAPAN8:
+	case CTRY_JAPAN9:
+	case CTRY_JAPAN10:
+	case CTRY_JAPAN11:
+	case CTRY_JAPAN12:
+	case CTRY_JAPAN13:
+	case CTRY_JAPAN14:
+	case CTRY_JAPAN15:
+	case CTRY_JAPAN16:
+	case CTRY_JAPAN17:
+	case CTRY_JAPAN18:
+	case CTRY_JAPAN19:
+	case CTRY_JAPAN20:
+	case CTRY_JAPAN21:
+	case CTRY_JAPAN22:
+	case CTRY_JAPAN23:
+	case CTRY_JAPAN24:
+	case CTRY_JAPAN25:
+	case CTRY_JAPAN26:
+	case CTRY_JAPAN27:
+	case CTRY_JAPAN28:
+	case CTRY_JAPAN29:
+	case CTRY_JAPAN30:
+	case CTRY_JAPAN31:
+	case CTRY_JAPAN32:
+	case CTRY_JAPAN33:
+	case CTRY_JAPAN34:
+	case CTRY_JAPAN35:
+	case CTRY_JAPAN36:
+	case CTRY_JAPAN37:
+	case CTRY_JAPAN38:
+	case CTRY_JAPAN39:
+	case CTRY_JAPAN40:
+	case CTRY_JAPAN41:
+	case CTRY_JAPAN42:
+	case CTRY_JAPAN43:
+	case CTRY_JAPAN44:
+	case CTRY_JAPAN45:
+	case CTRY_JAPAN46:
+	case CTRY_JAPAN47:
+	case CTRY_JAPAN48:
+	case CTRY_JAPAN49:
+	case CTRY_JAPAN50:
+	case CTRY_JAPAN51:
+	case CTRY_JAPAN52:
+	case CTRY_JAPAN53:
+	case CTRY_JAPAN54:
+	case CTRY_JAPAN55:
+	case CTRY_JAPAN56:
+	case CTRY_JAPAN57:
+	case CTRY_JAPAN58:
+	case CTRY_JAPAN59:
+		return false;
+	}
+
+	return true;
+}
+
+static void ath_reg_dyn_country_user(struct wiphy *wiphy,
+				     struct ath_regulatory *reg,
+				     struct regulatory_request *request)
+{
+	if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+		return;
+	if (!dynamic_country_user_possible(reg))
+		return;
+	ath_reg_dyn_country(wiphy, reg, request);
+}
+
 void ath_reg_notifier_apply(struct wiphy *wiphy,
 			    struct regulatory_request *request,
 			    struct ath_regulatory *reg)
 {
 	struct ath_common *common = container_of(reg, struct ath_common,
 						 regulatory);
-	u16 country_code;
-
 	/* We always apply this */
 	ath_reg_apply_radar_flags(wiphy);
 
@@ -388,25 +505,12 @@
 		       sizeof(struct ath_regulatory));
 		break;
 	case NL80211_REGDOM_SET_BY_DRIVER:
+		break;
 	case NL80211_REGDOM_SET_BY_USER:
+		ath_reg_dyn_country_user(wiphy, reg, request);
 		break;
 	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
-		if (!ath_is_world_regd(reg))
-			break;
-
-		country_code = ath_regd_find_country_by_name(request->alpha2);
-		if (country_code == (u16) -1)
-			break;
-
-		reg->current_rd = COUNTRY_ERD_FLAG;
-		reg->current_rd |= country_code;
-
-		printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
-			reg->current_rd);
-		__ath_regd_init(reg);
-
-		ath_reg_apply_world_flags(wiphy, request->initiator, reg);
-
+		ath_reg_dyn_country(wiphy, reg, request);
 		break;
 	}
 }
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
new file mode 100644
index 0000000..591ebae
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -0,0 +1,16 @@
+config WCN36XX
+	tristate "Qualcomm Atheros WCN3660/3680 support"
+	depends on MAC80211 && HAS_DMA
+	---help---
+	  This module adds support for wireless adapters based on
+	  Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
+
+	  If you choose to build a module, it'll be called wcn36xx.
+
+config WCN36XX_DEBUGFS
+	bool "WCN36XX debugfs support"
+	depends on WCN36XX
+	---help---
+	  Enabled debugfs support
+
+	  If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
new file mode 100644
index 0000000..50c43b4
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_WCN36XX) := wcn36xx.o
+wcn36xx-y +=   main.o \
+               dxe.o \
+               txrx.o \
+               smd.o \
+               pmc.o \
+               debug.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
new file mode 100644
index 0000000..5b84f7a
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "wcn36xx.h"
+#include "debug.h"
+#include "pmc.h"
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+
+static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct wcn36xx *wcn = file->private_data;
+	struct wcn36xx_vif *vif_priv = NULL;
+	struct ieee80211_vif *vif = NULL;
+	char buf[3];
+
+	list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+			vif = container_of((void *)vif_priv,
+				   struct ieee80211_vif,
+				   drv_priv);
+			if (NL80211_IFTYPE_STATION == vif->type) {
+				if (vif_priv->pw_state == WCN36XX_BMPS)
+					buf[0] = '1';
+				else
+					buf[0] = '0';
+				break;
+			}
+	}
+	buf[1] = '\n';
+	buf[2] = 0x00;
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_file_bool_bmps(struct file *file,
+				    const char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct wcn36xx *wcn = file->private_data;
+	struct wcn36xx_vif *vif_priv = NULL;
+	struct ieee80211_vif *vif = NULL;
+
+	char buf[32];
+	int buf_size;
+
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	switch (buf[0]) {
+	case 'y':
+	case 'Y':
+	case '1':
+		list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+			vif = container_of((void *)vif_priv,
+				   struct ieee80211_vif,
+				   drv_priv);
+			if (NL80211_IFTYPE_STATION == vif->type) {
+				wcn36xx_enable_keep_alive_null_packet(wcn, vif);
+				wcn36xx_pmc_enter_bmps_state(wcn, vif);
+			}
+		}
+		break;
+	case 'n':
+	case 'N':
+	case '0':
+		list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+			vif = container_of((void *)vif_priv,
+				   struct ieee80211_vif,
+				   drv_priv);
+			if (NL80211_IFTYPE_STATION == vif->type)
+				wcn36xx_pmc_exit_bmps_state(wcn, vif);
+		}
+		break;
+	}
+
+	return count;
+}
+
+static const struct file_operations fops_wcn36xx_bmps = {
+	.open = simple_open,
+	.read  =       read_file_bool_bmps,
+	.write =       write_file_bool_bmps,
+};
+
+static ssize_t write_file_dump(struct file *file,
+				    const char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct wcn36xx *wcn = file->private_data;
+	char buf[255], *tmp;
+	int buf_size;
+	u32 arg[WCN36xx_MAX_DUMP_ARGS];
+	int i;
+
+	memset(buf, 0, sizeof(buf));
+	memset(arg, 0, sizeof(arg));
+
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	tmp = buf;
+
+	for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
+		char *begin;
+		begin = strsep(&tmp, " ");
+		if (begin == NULL)
+			break;
+
+		if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0)
+			break;
+	}
+
+	wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
+		     arg[3], arg[4]);
+	wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
+
+	return count;
+}
+
+static const struct file_operations fops_wcn36xx_dump = {
+	.open = simple_open,
+	.write =       write_file_dump,
+};
+
+#define ADD_FILE(name, mode, fop, priv_data)		\
+	do {							\
+		struct dentry *d;				\
+		d = debugfs_create_file(__stringify(name),	\
+					mode, dfs->rootdir,	\
+					priv_data, fop);	\
+		dfs->file_##name.dentry = d;			\
+		if (IS_ERR(d)) {				\
+			wcn36xx_warn("Create the debugfs entry failed");\
+			dfs->file_##name.dentry = NULL;		\
+		}						\
+	} while (0)
+
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+	struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+
+	dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
+					  wcn->hw->wiphy->debugfsdir);
+	if (IS_ERR(dfs->rootdir)) {
+		wcn36xx_warn("Create the debugfs failed\n");
+		dfs->rootdir = NULL;
+	}
+
+	ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR,
+		 &fops_wcn36xx_bmps, wcn);
+	ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn);
+}
+
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+	struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+	debugfs_remove_recursive(dfs->rootdir);
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
new file mode 100644
index 0000000..46307aa
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_DEBUG_H_
+#define _WCN36XX_DEBUG_H_
+
+#include <linux/kernel.h>
+
+#define WCN36xx_MAX_DUMP_ARGS	5
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+struct wcn36xx_dfs_file {
+	struct dentry *dentry;
+	u32 value;
+};
+
+struct wcn36xx_dfs_entry {
+	struct dentry *rootdir;
+	struct wcn36xx_dfs_file file_bmps_switcher;
+	struct wcn36xx_dfs_file file_dump;
+};
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn);
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn);
+
+#else
+static inline void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+}
+static inline void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+#endif	/* _WCN36XX_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
new file mode 100644
index 0000000..ee25786
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* DXE - DMA transfer engine
+ * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
+ * through low channels data packets are transfered
+ * through high channels managment packets are transfered
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include "wcn36xx.h"
+#include "txrx.h"
+
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
+{
+	struct wcn36xx_dxe_ch *ch = is_low ?
+		&wcn->dxe_tx_l_ch :
+		&wcn->dxe_tx_h_ch;
+
+	return ch->head_blk_ctl->bd_cpu_addr;
+}
+
+static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
+{
+	wcn36xx_dbg(WCN36XX_DBG_DXE,
+		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
+		    addr, data);
+
+	writel(data, wcn->mmio + addr);
+}
+
+static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
+{
+	*data = readl(wcn->mmio + addr);
+
+	wcn36xx_dbg(WCN36XX_DBG_DXE,
+		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
+		    addr, *data);
+}
+
+static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
+	int i;
+
+	for (i = 0; i < ch->desc_num && ctl; i++) {
+		next = ctl->next;
+		kfree(ctl);
+		ctl = next;
+	}
+}
+
+static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
+	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+	int i;
+
+	for (i = 0; i < ch->desc_num; i++) {
+		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
+		if (!cur_ctl)
+			goto out_fail;
+
+		cur_ctl->ctl_blk_order = i;
+		if (i == 0) {
+			ch->head_blk_ctl = cur_ctl;
+			ch->tail_blk_ctl = cur_ctl;
+		} else if (ch->desc_num - 1 == i) {
+			prev_ctl->next = cur_ctl;
+			cur_ctl->next = ch->head_blk_ctl;
+		} else {
+			prev_ctl->next = cur_ctl;
+		}
+		prev_ctl = cur_ctl;
+	}
+
+	return 0;
+
+out_fail:
+	wcn36xx_dxe_free_ctl_block(ch);
+	return -ENOMEM;
+}
+
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
+{
+	int ret;
+
+	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
+	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
+	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
+	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
+
+	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
+	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
+
+	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
+	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
+
+	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
+	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
+
+	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
+	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
+
+	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
+	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
+
+	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
+	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
+
+	/* DXE control block allocation */
+	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
+	if (ret)
+		goto out_err;
+	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
+	if (ret)
+		goto out_err;
+	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
+	if (ret)
+		goto out_err;
+	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
+	if (ret)
+		goto out_err;
+
+	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
+	ret = wcn->ctrl_ops->smsm_change_state(
+		WCN36XX_SMSM_WLAN_TX_ENABLE,
+		WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+
+	return 0;
+
+out_err:
+	wcn36xx_err("Failed to allocate DXE control blocks\n");
+	wcn36xx_dxe_free_ctl_blks(wcn);
+	return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
+{
+	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
+	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
+	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
+	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
+}
+
+static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+{
+	struct wcn36xx_dxe_desc *cur_dxe = NULL;
+	struct wcn36xx_dxe_desc *prev_dxe = NULL;
+	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+	size_t size;
+	int i;
+
+	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+	wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+					      GFP_KERNEL);
+	if (!wcn_ch->cpu_addr)
+		return -ENOMEM;
+
+	memset(wcn_ch->cpu_addr, 0, size);
+
+	cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
+	cur_ctl = wcn_ch->head_blk_ctl;
+
+	for (i = 0; i < wcn_ch->desc_num; i++) {
+		cur_ctl->desc = cur_dxe;
+		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
+			i * sizeof(struct wcn36xx_dxe_desc);
+
+		switch (wcn_ch->ch_type) {
+		case WCN36XX_DXE_CH_TX_L:
+			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
+			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
+			break;
+		case WCN36XX_DXE_CH_TX_H:
+			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
+			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
+			break;
+		case WCN36XX_DXE_CH_RX_L:
+			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
+			break;
+		case WCN36XX_DXE_CH_RX_H:
+			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
+			break;
+		}
+		if (0 == i) {
+			cur_dxe->phy_next_l = 0;
+		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
+			prev_dxe->phy_next_l =
+				cur_ctl->desc_phy_addr;
+		} else if (i == (wcn_ch->desc_num - 1)) {
+			prev_dxe->phy_next_l =
+				cur_ctl->desc_phy_addr;
+			cur_dxe->phy_next_l =
+				wcn_ch->head_blk_ctl->desc_phy_addr;
+		}
+		cur_ctl = cur_ctl->next;
+		prev_dxe = cur_dxe;
+		cur_dxe++;
+	}
+
+	return 0;
+}
+
+static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
+				   struct wcn36xx_dxe_mem_pool *pool)
+{
+	int i, chunk_size = pool->chunk_size;
+	dma_addr_t bd_phy_addr = pool->phy_addr;
+	void *bd_cpu_addr = pool->virt_addr;
+	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
+
+	for (i = 0; i < ch->desc_num; i++) {
+		/* Only every second dxe needs a bd pointer,
+		   the other will point to the skb data */
+		if (!(i & 1)) {
+			cur->bd_phy_addr = bd_phy_addr;
+			cur->bd_cpu_addr = bd_cpu_addr;
+			bd_phy_addr += chunk_size;
+			bd_cpu_addr += chunk_size;
+		} else {
+			cur->bd_phy_addr = 0;
+			cur->bd_cpu_addr = NULL;
+		}
+		cur = cur->next;
+	}
+}
+
+static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
+{
+	int reg_data = 0;
+
+	wcn36xx_dxe_read_register(wcn,
+				  WCN36XX_DXE_INT_MASK_REG,
+				  &reg_data);
+
+	reg_data |= wcn_ch;
+
+	wcn36xx_dxe_write_register(wcn,
+				   WCN36XX_DXE_INT_MASK_REG,
+				   (int)reg_data);
+	return 0;
+}
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+{
+	struct wcn36xx_dxe_desc *dxe = ctl->desc;
+	struct sk_buff *skb;
+
+	skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	dxe->dst_addr_l = dma_map_single(NULL,
+					 skb_tail_pointer(skb),
+					 WCN36XX_PKT_SIZE,
+					 DMA_FROM_DEVICE);
+	ctl->skb = skb;
+
+	return 0;
+}
+
+static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
+				    struct wcn36xx_dxe_ch *wcn_ch)
+{
+	int i;
+	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+
+	cur_ctl = wcn_ch->head_blk_ctl;
+
+	for (i = 0; i < wcn_ch->desc_num; i++) {
+		wcn36xx_dxe_fill_skb(cur_ctl);
+		cur_ctl = cur_ctl->next;
+	}
+
+	return 0;
+}
+
+static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
+				     struct wcn36xx_dxe_ch *wcn_ch)
+{
+	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
+	int i;
+
+	for (i = 0; i < wcn_ch->desc_num; i++) {
+		kfree_skb(cur->skb);
+		cur = cur->next;
+	}
+}
+
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
+{
+	struct ieee80211_tx_info *info;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&wcn->dxe_lock, flags);
+	skb = wcn->tx_ack_skb;
+	wcn->tx_ack_skb = NULL;
+	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+	if (!skb) {
+		wcn36xx_warn("Spurious TX complete indication\n");
+		return;
+	}
+
+	info = IEEE80211_SKB_CB(skb);
+
+	if (status == 1)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+
+	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
+
+	ieee80211_tx_status_irqsafe(wcn->hw, skb);
+	ieee80211_wake_queues(wcn->hw);
+}
+
+static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+	struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+	struct ieee80211_tx_info *info;
+	unsigned long flags;
+
+	/*
+	 * Make at least one loop of do-while because in case ring is
+	 * completely full head and tail are pointing to the same element
+	 * and while-do will not make any cycles.
+	 */
+	do {
+		if (ctl->skb) {
+			dma_unmap_single(NULL, ctl->desc->src_addr_l,
+					 ctl->skb->len, DMA_TO_DEVICE);
+			info = IEEE80211_SKB_CB(ctl->skb);
+			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
+				/* Keep frame until TX status comes */
+				ieee80211_free_txskb(wcn->hw, ctl->skb);
+			}
+			spin_lock_irqsave(&ctl->skb_lock, flags);
+			if (wcn->queues_stopped) {
+				wcn->queues_stopped = false;
+				ieee80211_wake_queues(wcn->hw);
+			}
+			spin_unlock_irqrestore(&ctl->skb_lock, flags);
+
+			ctl->skb = NULL;
+		}
+		ctl = ctl->next;
+	} while (ctl != ch->head_blk_ctl &&
+	       !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
+
+	ch->tail_blk_ctl = ctl;
+}
+
+static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
+{
+	struct wcn36xx *wcn = (struct wcn36xx *)dev;
+	int int_src, int_reason;
+
+	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
+		wcn36xx_dxe_read_register(wcn,
+					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
+					  &int_reason);
+
+		/* TODO: Check int_reason */
+
+		wcn36xx_dxe_write_register(wcn,
+					   WCN36XX_DXE_0_INT_CLR,
+					   WCN36XX_INT_MASK_CHAN_TX_H);
+
+		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+					   WCN36XX_INT_MASK_CHAN_TX_H);
+		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
+		reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+	}
+
+	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
+		wcn36xx_dxe_read_register(wcn,
+					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
+					  &int_reason);
+		/* TODO: Check int_reason */
+
+		wcn36xx_dxe_write_register(wcn,
+					   WCN36XX_DXE_0_INT_CLR,
+					   WCN36XX_INT_MASK_CHAN_TX_L);
+
+		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+					   WCN36XX_INT_MASK_CHAN_TX_L);
+		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
+		reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
+{
+	struct wcn36xx *wcn = (struct wcn36xx *)dev;
+
+	disable_irq_nosync(wcn->rx_irq);
+	wcn36xx_dxe_rx_frame(wcn);
+	enable_irq(wcn->rx_irq);
+	return IRQ_HANDLED;
+}
+
+static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
+{
+	int ret;
+
+	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
+			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
+	if (ret) {
+		wcn36xx_err("failed to alloc tx irq\n");
+		goto out_err;
+	}
+
+	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
+			  "wcn36xx_rx", wcn);
+	if (ret) {
+		wcn36xx_err("failed to alloc rx irq\n");
+		goto out_txirq;
+	}
+
+	enable_irq_wake(wcn->rx_irq);
+
+	return 0;
+
+out_txirq:
+	free_irq(wcn->tx_irq, wcn);
+out_err:
+	return ret;
+
+}
+
+static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
+				     struct wcn36xx_dxe_ch *ch)
+{
+	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
+	struct wcn36xx_dxe_desc *dxe = ctl->desc;
+	dma_addr_t  dma_addr;
+	struct sk_buff *skb;
+
+	while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+		skb = ctl->skb;
+		dma_addr = dxe->dst_addr_l;
+		wcn36xx_dxe_fill_skb(ctl);
+
+		switch (ch->ch_type) {
+		case WCN36XX_DXE_CH_RX_L:
+			dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+			wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+						   WCN36XX_DXE_INT_CH1_MASK);
+			break;
+		case WCN36XX_DXE_CH_RX_H:
+			dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+			wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+						   WCN36XX_DXE_INT_CH3_MASK);
+			break;
+		default:
+			wcn36xx_warn("Unknown channel\n");
+		}
+
+		dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+				 DMA_FROM_DEVICE);
+		wcn36xx_rx_skb(wcn, skb);
+		ctl = ctl->next;
+		dxe = ctl->desc;
+	}
+
+	ch->head_blk_ctl = ctl;
+
+	return 0;
+}
+
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
+{
+	int int_src;
+
+	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+	/* RX_LOW_PRI */
+	if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+					   WCN36XX_DXE_INT_CH1_MASK);
+		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+	}
+
+	/* RX_HIGH_PRI */
+	if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+		/* Clean up all the INT within this channel */
+		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+					   WCN36XX_DXE_INT_CH3_MASK);
+		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
+	}
+
+	if (!int_src)
+		wcn36xx_warn("No DXE interrupt pending\n");
+}
+
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
+{
+	size_t s;
+	void *cpu_addr;
+
+	/* Allocate BD headers for MGMT frames */
+
+	/* Where this come from ask QC */
+	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
+		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+	cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+				      GFP_KERNEL);
+	if (!cpu_addr)
+		goto out_err;
+
+	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
+	memset(cpu_addr, 0, s);
+
+	/* Allocate BD headers for DATA frames */
+
+	/* Where this come from ask QC */
+	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+	cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+				      GFP_KERNEL);
+	if (!cpu_addr)
+		goto out_err;
+
+	wcn->data_mem_pool.virt_addr = cpu_addr;
+	memset(cpu_addr, 0, s);
+
+	return 0;
+
+out_err:
+	wcn36xx_dxe_free_mem_pools(wcn);
+	wcn36xx_err("Failed to allocate BD mempool\n");
+	return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
+{
+	if (wcn->mgmt_mem_pool.virt_addr)
+		dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
+				  wcn->mgmt_mem_pool.virt_addr,
+				  wcn->mgmt_mem_pool.phy_addr);
+
+	if (wcn->data_mem_pool.virt_addr) {
+		dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
+				  wcn->data_mem_pool.virt_addr,
+				  wcn->data_mem_pool.phy_addr);
+	}
+}
+
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+			 struct wcn36xx_vif *vif_priv,
+			 struct sk_buff *skb,
+			 bool is_low)
+{
+	struct wcn36xx_dxe_ctl *ctl = NULL;
+	struct wcn36xx_dxe_desc *desc = NULL;
+	struct wcn36xx_dxe_ch *ch = NULL;
+	unsigned long flags;
+
+	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+
+	ctl = ch->head_blk_ctl;
+
+	spin_lock_irqsave(&ctl->next->skb_lock, flags);
+
+	/*
+	 * If skb is not null that means that we reached the tail of the ring
+	 * hence ring is full. Stop queues to let mac80211 back off until ring
+	 * has an empty slot again.
+	 */
+	if (NULL != ctl->next->skb) {
+		ieee80211_stop_queues(wcn->hw);
+		wcn->queues_stopped = true;
+		spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+		return -EBUSY;
+	}
+	spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+
+	ctl->skb = NULL;
+	desc = ctl->desc;
+
+	/* Set source address of the BD we send */
+	desc->src_addr_l = ctl->bd_phy_addr;
+
+	desc->dst_addr_l = ch->dxe_wq;
+	desc->fr_len = sizeof(struct wcn36xx_tx_bd);
+	desc->ctrl = ch->ctrl_bd;
+
+	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
+
+	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
+			 (char *)desc, sizeof(*desc));
+	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
+			 "BD   >>> ", (char *)ctl->bd_cpu_addr,
+			 sizeof(struct wcn36xx_tx_bd));
+
+	/* Set source address of the SKB we send */
+	ctl = ctl->next;
+	ctl->skb = skb;
+	desc = ctl->desc;
+	if (ctl->bd_cpu_addr) {
+		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+		return -EINVAL;
+	}
+
+	desc->src_addr_l = dma_map_single(NULL,
+					  ctl->skb->data,
+					  ctl->skb->len,
+					  DMA_TO_DEVICE);
+
+	desc->dst_addr_l = ch->dxe_wq;
+	desc->fr_len = ctl->skb->len;
+
+	/* set dxe descriptor to VALID */
+	desc->ctrl = ch->ctrl_skb;
+
+	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
+			 (char *)desc, sizeof(*desc));
+	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
+			 (char *)ctl->skb->data, ctl->skb->len);
+
+	/* Move the head of the ring to the next empty descriptor */
+	 ch->head_blk_ctl = ctl->next;
+
+	/*
+	 * When connected and trying to send data frame chip can be in sleep
+	 * mode and writing to the register will not wake up the chip. Instead
+	 * notify chip about new frame through SMSM bus.
+	 */
+	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
+		wcn->ctrl_ops->smsm_change_state(
+				  0,
+				  WCN36XX_SMSM_WLAN_TX_ENABLE);
+	} else {
+		/* indicate End Of Packet and generate interrupt on descriptor
+		 * done.
+		 */
+		wcn36xx_dxe_write_register(wcn,
+			ch->reg_ctrl, ch->def_ctrl);
+	}
+
+	return 0;
+}
+
+int wcn36xx_dxe_init(struct wcn36xx *wcn)
+{
+	int reg_data = 0, ret;
+
+	reg_data = WCN36XX_DXE_REG_RESET;
+	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
+
+	/* Setting interrupt path */
+	reg_data = WCN36XX_DXE_CCU_INT;
+	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+
+	/***************************************/
+	/* Init descriptors for TX LOW channel */
+	/***************************************/
+	wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
+
+	/* Write channel head to a NEXT register */
+	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
+		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
+
+	/* Program DMA destination addr for TX LOW */
+	wcn36xx_dxe_write_register(wcn,
+		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
+		WCN36XX_DXE_WQ_TX_L);
+
+	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+
+	/***************************************/
+	/* Init descriptors for TX HIGH channel */
+	/***************************************/
+	wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
+
+	/* Write channel head to a NEXT register */
+	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
+		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
+
+	/* Program DMA destination addr for TX HIGH */
+	wcn36xx_dxe_write_register(wcn,
+		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
+		WCN36XX_DXE_WQ_TX_H);
+
+	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+
+	/* Enable channel interrupts */
+	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+
+	/***************************************/
+	/* Init descriptors for RX LOW channel */
+	/***************************************/
+	wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+
+	/* For RX we need to preallocated buffers */
+	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
+
+	/* Write channel head to a NEXT register */
+	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
+		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
+
+	/* Write DMA source address */
+	wcn36xx_dxe_write_register(wcn,
+		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
+		WCN36XX_DXE_WQ_RX_L);
+
+	/* Program preallocated destination address */
+	wcn36xx_dxe_write_register(wcn,
+		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
+		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
+
+	/* Enable default control registers */
+	wcn36xx_dxe_write_register(wcn,
+		WCN36XX_DXE_REG_CTL_RX_L,
+		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+
+	/* Enable channel interrupts */
+	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+
+	/***************************************/
+	/* Init descriptors for RX HIGH channel */
+	/***************************************/
+	wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+
+	/* For RX we need to prealocat buffers */
+	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
+
+	/* Write chanel head to a NEXT register */
+	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
+		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
+
+	/* Write DMA source address */
+	wcn36xx_dxe_write_register(wcn,
+		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
+		WCN36XX_DXE_WQ_RX_H);
+
+	/* Program preallocated destination address */
+	wcn36xx_dxe_write_register(wcn,
+		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
+		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
+
+	/* Enable default control registers */
+	wcn36xx_dxe_write_register(wcn,
+		WCN36XX_DXE_REG_CTL_RX_H,
+		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
+
+	/* Enable channel interrupts */
+	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+
+	ret = wcn36xx_dxe_request_irqs(wcn);
+	if (ret < 0)
+		goto out_err;
+
+	return 0;
+
+out_err:
+	return ret;
+}
+
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
+{
+	free_irq(wcn->tx_irq, wcn);
+	free_irq(wcn->rx_irq, wcn);
+
+	if (wcn->tx_ack_skb) {
+		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+		wcn->tx_ack_skb = NULL;
+	}
+
+	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
+	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
new file mode 100644
index 0000000..c88562f
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DXE_H_
+#define _DXE_H_
+
+#include "wcn36xx.h"
+
+/*
+TX_LOW	= DMA0
+TX_HIGH	= DMA4
+RX_LOW	= DMA1
+RX_HIGH	= DMA3
+H2H_TEST_RX_TX = DMA2
+*/
+
+/* DXE registers */
+#define WCN36XX_DXE_MEM_BASE			0x03000000
+#define WCN36XX_DXE_MEM_REG			0x202000
+
+#define WCN36XX_DXE_CCU_INT			0xA0011
+#define WCN36XX_DXE_REG_CCU_INT			0x200b10
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_CTRL_TX_L			0x328a44
+#define WCN36XX_DXE_CTRL_TX_H			0x32ce44
+#define WCN36XX_DXE_CTRL_RX_L			0x12ad2f
+#define WCN36XX_DXE_CTRL_RX_H			0x12d12f
+#define WCN36XX_DXE_CTRL_TX_H_BD		0x30ce45
+#define WCN36XX_DXE_CTRL_TX_H_SKB		0x32ce4d
+#define WCN36XX_DXE_CTRL_TX_L_BD		0x308a45
+#define WCN36XX_DXE_CTRL_TX_L_SKB		0x328a4d
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_WQ_TX_L			0x17
+#define WCN36XX_DXE_WQ_TX_H			0x17
+#define WCN36XX_DXE_WQ_RX_L			0xB
+#define WCN36XX_DXE_WQ_RX_H			0x4
+
+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
+/* TODO This must calculated properly but not hardcoded */
+/* DXE default control register values */
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L		0x847EAD2F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H		0x84FED12F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H		0x853ECF4D
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L		0x843e8b4d
+
+/* Common DXE registers */
+#define WCN36XX_DXE_MEM_CSR			(WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_REG_CSR_RESET		(WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_ENCH_ADDR			(WCN36XX_DXE_MEM_REG + 0x04)
+#define WCN36XX_DXE_REG_CH_EN			(WCN36XX_DXE_MEM_REG + 0x08)
+#define WCN36XX_DXE_REG_CH_DONE			(WCN36XX_DXE_MEM_REG + 0x0C)
+#define WCN36XX_DXE_REG_CH_ERR			(WCN36XX_DXE_MEM_REG + 0x10)
+#define WCN36XX_DXE_INT_MASK_REG		(WCN36XX_DXE_MEM_REG + 0x18)
+#define WCN36XX_DXE_INT_SRC_RAW_REG		(WCN36XX_DXE_MEM_REG + 0x20)
+	/* #define WCN36XX_DXE_INT_CH6_MASK	0x00000040 */
+	/* #define WCN36XX_DXE_INT_CH5_MASK	0x00000020 */
+	#define WCN36XX_DXE_INT_CH4_MASK	0x00000010
+	#define WCN36XX_DXE_INT_CH3_MASK	0x00000008
+	/* #define WCN36XX_DXE_INT_CH2_MASK	0x00000004 */
+	#define WCN36XX_DXE_INT_CH1_MASK	0x00000002
+	#define WCN36XX_DXE_INT_CH0_MASK	0x00000001
+#define WCN36XX_DXE_0_INT_CLR			(WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR		(WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR		(WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR		(WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS		(WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS		(WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS		(WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS		(WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS		(WCN36XX_DXE_MEM_REG + 0x504)
+
+#define WCN36XX_DXE_REG_RESET			0x5c89
+
+/* Temporary BMU Workqueue 4 */
+#define WCN36XX_DXE_BMU_WQ_RX_LOW		0xB
+#define WCN36XX_DXE_BMU_WQ_RX_HIGH		0x4
+/* DMA channel offset */
+#define WCN36XX_DXE_TX_LOW_OFFSET		0x400
+#define WCN36XX_DXE_TX_HIGH_OFFSET		0x500
+#define WCN36XX_DXE_RX_LOW_OFFSET		0x440
+#define WCN36XX_DXE_RX_HIGH_OFFSET		0x4C0
+
+/* Address of the next DXE descriptor */
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR		0x001C
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L	(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_TX_LOW_OFFSET + \
+						 WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H	(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_TX_HIGH_OFFSET + \
+						 WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L	(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_LOW_OFFSET + \
+						 WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H	(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_HIGH_OFFSET + \
+						 WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+
+/* DXE Descriptor source address */
+#define WCN36XX_DXE_CH_SRC_ADDR			0x000C
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_L		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_LOW_OFFSET + \
+						 WCN36XX_DXE_CH_SRC_ADDR)
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_H		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_HIGH_OFFSET + \
+						 WCN36XX_DXE_CH_SRC_ADDR)
+
+/* DXE Descriptor address destination address */
+#define WCN36XX_DXE_CH_DEST_ADDR		0x0014
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_L		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_TX_LOW_OFFSET + \
+						 WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_H		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_TX_HIGH_OFFSET + \
+						 WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_L		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_LOW_OFFSET + \
+						 WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_H		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_HIGH_OFFSET + \
+						 WCN36XX_DXE_CH_DEST_ADDR)
+
+/* Interrupt status */
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR		0x0004
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L	(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_TX_LOW_OFFSET + \
+						 WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H	(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_TX_HIGH_OFFSET + \
+						 WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L	(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_LOW_OFFSET + \
+						 WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H	(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_HIGH_OFFSET + \
+						 WCN36XX_DXE_CH_STATUS_REG_ADDR)
+
+
+/* DXE default control register */
+#define WCN36XX_DXE_REG_CTL_RX_L		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_LOW_OFFSET)
+#define WCN36XX_DXE_REG_CTL_RX_H		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_RX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_H		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_TX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_L		(WCN36XX_DXE_MEM_REG + \
+						 WCN36XX_DXE_TX_LOW_OFFSET)
+
+#define WCN36XX_SMSM_WLAN_TX_ENABLE		0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY	0x00000200
+
+
+/* Interrupt control channel mask */
+#define WCN36XX_INT_MASK_CHAN_TX_L		0x00000001
+#define WCN36XX_INT_MASK_CHAN_RX_L		0x00000002
+#define WCN36XX_INT_MASK_CHAN_RX_H		0x00000008
+#define WCN36XX_INT_MASK_CHAN_TX_H		0x00000010
+
+#define WCN36XX_BD_CHUNK_SIZE			128
+
+#define WCN36XX_PKT_SIZE			0xF20
+enum wcn36xx_dxe_ch_type {
+	WCN36XX_DXE_CH_TX_L,
+	WCN36XX_DXE_CH_TX_H,
+	WCN36XX_DXE_CH_RX_L,
+	WCN36XX_DXE_CH_RX_H
+};
+
+/* amount of descriptors per channel */
+enum wcn36xx_dxe_ch_desc_num {
+	WCN36XX_DXE_CH_DESC_NUMB_TX_L		= 128,
+	WCN36XX_DXE_CH_DESC_NUMB_TX_H		= 10,
+	WCN36XX_DXE_CH_DESC_NUMB_RX_L		= 512,
+	WCN36XX_DXE_CH_DESC_NUMB_RX_H		= 40
+};
+
+/**
+ * struct wcn36xx_dxe_desc - describes descriptor of one DXE buffer
+ *
+ * @ctrl: is a union that consists of following bits:
+ * union {
+ *	u32	valid		:1; //0 = DMA stop, 1 = DMA continue with this
+ *				    //descriptor
+ *	u32	transfer_type	:2; //0 = Host to Host space
+ *	u32	eop		:1; //End of Packet
+ *	u32	bd_handling	:1; //if transferType = Host to BMU, then 0
+ *				    // means first 128 bytes contain BD, and 1
+ *				    // means create new empty BD
+ *	u32	siq		:1; // SIQ
+ *	u32	diq		:1; // DIQ
+ *	u32	pdu_rel		:1; //0 = don't release BD and PDUs when done,
+ *				    // 1 = release them
+ *	u32	bthld_sel	:4; //BMU Threshold Select
+ *	u32	prio		:3; //Specifies the priority level to use for
+ *				    // the transfer
+ *	u32	stop_channel	:1; //1 = DMA stops processing further, channel
+ *				    //requires re-enabling after this
+ *	u32	intr		:1; //Interrupt on Descriptor Done
+ *	u32	rsvd		:1; //reserved
+ *	u32	size		:14;//14 bits used - ignored for BMU transfers,
+ *				    //only used for host to host transfers?
+ * } ctrl;
+ */
+struct wcn36xx_dxe_desc {
+	u32	ctrl;
+	u32	fr_len;
+
+	u32	src_addr_l;
+	u32	dst_addr_l;
+	u32	phy_next_l;
+	u32	src_addr_h;
+	u32	dst_addr_h;
+	u32	phy_next_h;
+} __packed;
+
+/* DXE Control block */
+struct wcn36xx_dxe_ctl {
+	struct wcn36xx_dxe_ctl	*next;
+	struct wcn36xx_dxe_desc	*desc;
+	unsigned int		desc_phy_addr;
+	int			ctl_blk_order;
+	struct sk_buff		*skb;
+	spinlock_t              skb_lock;
+	void			*bd_cpu_addr;
+	dma_addr_t		bd_phy_addr;
+};
+
+struct wcn36xx_dxe_ch {
+	enum wcn36xx_dxe_ch_type	ch_type;
+	void				*cpu_addr;
+	dma_addr_t			dma_addr;
+	enum wcn36xx_dxe_ch_desc_num	desc_num;
+	/* DXE control block ring */
+	struct wcn36xx_dxe_ctl		*head_blk_ctl;
+	struct wcn36xx_dxe_ctl		*tail_blk_ctl;
+
+	/* DXE channel specific configs */
+	u32				dxe_wq;
+	u32				ctrl_bd;
+	u32				ctrl_skb;
+	u32				reg_ctrl;
+	u32				def_ctrl;
+};
+
+/* Memory Pool for BD headers */
+struct wcn36xx_dxe_mem_pool {
+	int		chunk_size;
+	void		*virt_addr;
+	dma_addr_t	phy_addr;
+};
+
+struct wcn36xx_vif;
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn);
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn);
+int wcn36xx_dxe_init(struct wcn36xx *wcn);
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn);
+int wcn36xx_dxe_init_channels(struct wcn36xx *wcn);
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+			 struct wcn36xx_vif *vif_priv,
+			 struct sk_buff *skb,
+			 bool is_low);
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low);
+#endif	/* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
new file mode 100644
index 0000000..c02dbc6
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -0,0 +1,4657 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HAL_H_
+#define _HAL_H_
+
+/*---------------------------------------------------------------------------
+  API VERSIONING INFORMATION
+
+  The RIVA API is versioned as MAJOR.MINOR.VERSION.REVISION
+  The MAJOR is incremented for major product/architecture changes
+      (and then MINOR/VERSION/REVISION are zeroed)
+  The MINOR is incremented for minor product/architecture changes
+      (and then VERSION/REVISION are zeroed)
+  The VERSION is incremented if a significant API change occurs
+      (and then REVISION is zeroed)
+  The REVISION is incremented if an insignificant API change occurs
+      or if a new API is added
+  All values are in the range 0..255 (ie they are 8-bit values)
+ ---------------------------------------------------------------------------*/
+#define WCN36XX_HAL_VER_MAJOR 1
+#define WCN36XX_HAL_VER_MINOR 4
+#define WCN36XX_HAL_VER_VERSION 1
+#define WCN36XX_HAL_VER_REVISION 2
+
+/* This is to force compiler to use the maximum of an int ( 4 bytes ) */
+#define WCN36XX_HAL_MAX_ENUM_SIZE    0x7FFFFFFF
+#define WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE    0x7FFF
+
+/* Max no. of transmit categories */
+#define STACFG_MAX_TC    8
+
+/* The maximum value of access category */
+#define WCN36XX_HAL_MAX_AC  4
+
+#define WCN36XX_HAL_IPV4_ADDR_LEN       4
+
+#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
+
+/* Default Beacon template size */
+#define BEACON_TEMPLATE_SIZE 0x180
+
+/* Param Change Bitmap sent to HAL */
+#define PARAM_BCN_INTERVAL_CHANGED                      (1 << 0)
+#define PARAM_SHORT_PREAMBLE_CHANGED                 (1 << 1)
+#define PARAM_SHORT_SLOT_TIME_CHANGED                 (1 << 2)
+#define PARAM_llACOEXIST_CHANGED                            (1 << 3)
+#define PARAM_llBCOEXIST_CHANGED                            (1 << 4)
+#define PARAM_llGCOEXIST_CHANGED                            (1 << 5)
+#define PARAM_HT20MHZCOEXIST_CHANGED                  (1<<6)
+#define PARAM_NON_GF_DEVICES_PRESENT_CHANGED (1<<7)
+#define PARAM_RIFS_MODE_CHANGED                            (1<<8)
+#define PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED   (1<<9)
+#define PARAM_OBSS_MODE_CHANGED                               (1<<10)
+#define PARAM_BEACON_UPDATE_MASK \
+	(PARAM_BCN_INTERVAL_CHANGED |					\
+	 PARAM_SHORT_PREAMBLE_CHANGED |					\
+	 PARAM_SHORT_SLOT_TIME_CHANGED |				\
+	 PARAM_llACOEXIST_CHANGED |					\
+	 PARAM_llBCOEXIST_CHANGED |					\
+	 PARAM_llGCOEXIST_CHANGED |					\
+	 PARAM_HT20MHZCOEXIST_CHANGED |					\
+	 PARAM_NON_GF_DEVICES_PRESENT_CHANGED |				\
+	 PARAM_RIFS_MODE_CHANGED |					\
+	 PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED |				\
+	 PARAM_OBSS_MODE_CHANGED)
+
+/* dump command response Buffer size */
+#define DUMPCMD_RSP_BUFFER 100
+
+/* version string max length (including NULL) */
+#define WCN36XX_HAL_VERSION_LENGTH  64
+
+/* message types for messages exchanged between WDI and HAL */
+enum wcn36xx_hal_host_msg_type {
+	/* Init/De-Init */
+	WCN36XX_HAL_START_REQ = 0,
+	WCN36XX_HAL_START_RSP = 1,
+	WCN36XX_HAL_STOP_REQ = 2,
+	WCN36XX_HAL_STOP_RSP = 3,
+
+	/* Scan */
+	WCN36XX_HAL_INIT_SCAN_REQ = 4,
+	WCN36XX_HAL_INIT_SCAN_RSP = 5,
+	WCN36XX_HAL_START_SCAN_REQ = 6,
+	WCN36XX_HAL_START_SCAN_RSP = 7,
+	WCN36XX_HAL_END_SCAN_REQ = 8,
+	WCN36XX_HAL_END_SCAN_RSP = 9,
+	WCN36XX_HAL_FINISH_SCAN_REQ = 10,
+	WCN36XX_HAL_FINISH_SCAN_RSP = 11,
+
+	/* HW STA configuration/deconfiguration */
+	WCN36XX_HAL_CONFIG_STA_REQ = 12,
+	WCN36XX_HAL_CONFIG_STA_RSP = 13,
+	WCN36XX_HAL_DELETE_STA_REQ = 14,
+	WCN36XX_HAL_DELETE_STA_RSP = 15,
+	WCN36XX_HAL_CONFIG_BSS_REQ = 16,
+	WCN36XX_HAL_CONFIG_BSS_RSP = 17,
+	WCN36XX_HAL_DELETE_BSS_REQ = 18,
+	WCN36XX_HAL_DELETE_BSS_RSP = 19,
+
+	/* Infra STA asscoiation */
+	WCN36XX_HAL_JOIN_REQ = 20,
+	WCN36XX_HAL_JOIN_RSP = 21,
+	WCN36XX_HAL_POST_ASSOC_REQ = 22,
+	WCN36XX_HAL_POST_ASSOC_RSP = 23,
+
+	/* Security */
+	WCN36XX_HAL_SET_BSSKEY_REQ = 24,
+	WCN36XX_HAL_SET_BSSKEY_RSP = 25,
+	WCN36XX_HAL_SET_STAKEY_REQ = 26,
+	WCN36XX_HAL_SET_STAKEY_RSP = 27,
+	WCN36XX_HAL_RMV_BSSKEY_REQ = 28,
+	WCN36XX_HAL_RMV_BSSKEY_RSP = 29,
+	WCN36XX_HAL_RMV_STAKEY_REQ = 30,
+	WCN36XX_HAL_RMV_STAKEY_RSP = 31,
+
+	/* Qos Related */
+	WCN36XX_HAL_ADD_TS_REQ = 32,
+	WCN36XX_HAL_ADD_TS_RSP = 33,
+	WCN36XX_HAL_DEL_TS_REQ = 34,
+	WCN36XX_HAL_DEL_TS_RSP = 35,
+	WCN36XX_HAL_UPD_EDCA_PARAMS_REQ = 36,
+	WCN36XX_HAL_UPD_EDCA_PARAMS_RSP = 37,
+	WCN36XX_HAL_ADD_BA_REQ = 38,
+	WCN36XX_HAL_ADD_BA_RSP = 39,
+	WCN36XX_HAL_DEL_BA_REQ = 40,
+	WCN36XX_HAL_DEL_BA_RSP = 41,
+
+	WCN36XX_HAL_CH_SWITCH_REQ = 42,
+	WCN36XX_HAL_CH_SWITCH_RSP = 43,
+	WCN36XX_HAL_SET_LINK_ST_REQ = 44,
+	WCN36XX_HAL_SET_LINK_ST_RSP = 45,
+	WCN36XX_HAL_GET_STATS_REQ = 46,
+	WCN36XX_HAL_GET_STATS_RSP = 47,
+	WCN36XX_HAL_UPDATE_CFG_REQ = 48,
+	WCN36XX_HAL_UPDATE_CFG_RSP = 49,
+
+	WCN36XX_HAL_MISSED_BEACON_IND = 50,
+	WCN36XX_HAL_UNKNOWN_ADDR2_FRAME_RX_IND = 51,
+	WCN36XX_HAL_MIC_FAILURE_IND = 52,
+	WCN36XX_HAL_FATAL_ERROR_IND = 53,
+	WCN36XX_HAL_SET_KEYDONE_MSG = 54,
+
+	/* NV Interface */
+	WCN36XX_HAL_DOWNLOAD_NV_REQ = 55,
+	WCN36XX_HAL_DOWNLOAD_NV_RSP = 56,
+
+	WCN36XX_HAL_ADD_BA_SESSION_REQ = 57,
+	WCN36XX_HAL_ADD_BA_SESSION_RSP = 58,
+	WCN36XX_HAL_TRIGGER_BA_REQ = 59,
+	WCN36XX_HAL_TRIGGER_BA_RSP = 60,
+	WCN36XX_HAL_UPDATE_BEACON_REQ = 61,
+	WCN36XX_HAL_UPDATE_BEACON_RSP = 62,
+	WCN36XX_HAL_SEND_BEACON_REQ = 63,
+	WCN36XX_HAL_SEND_BEACON_RSP = 64,
+
+	WCN36XX_HAL_SET_BCASTKEY_REQ = 65,
+	WCN36XX_HAL_SET_BCASTKEY_RSP = 66,
+	WCN36XX_HAL_DELETE_STA_CONTEXT_IND = 67,
+	WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ = 68,
+	WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP = 69,
+
+	/* PTT interface support */
+	WCN36XX_HAL_PROCESS_PTT_REQ = 70,
+	WCN36XX_HAL_PROCESS_PTT_RSP = 71,
+
+	/* BTAMP related events */
+	WCN36XX_HAL_SIGNAL_BTAMP_EVENT_REQ = 72,
+	WCN36XX_HAL_SIGNAL_BTAMP_EVENT_RSP = 73,
+	WCN36XX_HAL_TL_HAL_FLUSH_AC_REQ = 74,
+	WCN36XX_HAL_TL_HAL_FLUSH_AC_RSP = 75,
+
+	WCN36XX_HAL_ENTER_IMPS_REQ = 76,
+	WCN36XX_HAL_EXIT_IMPS_REQ = 77,
+	WCN36XX_HAL_ENTER_BMPS_REQ = 78,
+	WCN36XX_HAL_EXIT_BMPS_REQ = 79,
+	WCN36XX_HAL_ENTER_UAPSD_REQ = 80,
+	WCN36XX_HAL_EXIT_UAPSD_REQ = 81,
+	WCN36XX_HAL_UPDATE_UAPSD_PARAM_REQ = 82,
+	WCN36XX_HAL_CONFIGURE_RXP_FILTER_REQ = 83,
+	WCN36XX_HAL_ADD_BCN_FILTER_REQ = 84,
+	WCN36XX_HAL_REM_BCN_FILTER_REQ = 85,
+	WCN36XX_HAL_ADD_WOWL_BCAST_PTRN = 86,
+	WCN36XX_HAL_DEL_WOWL_BCAST_PTRN = 87,
+	WCN36XX_HAL_ENTER_WOWL_REQ = 88,
+	WCN36XX_HAL_EXIT_WOWL_REQ = 89,
+	WCN36XX_HAL_HOST_OFFLOAD_REQ = 90,
+	WCN36XX_HAL_SET_RSSI_THRESH_REQ = 91,
+	WCN36XX_HAL_GET_RSSI_REQ = 92,
+	WCN36XX_HAL_SET_UAPSD_AC_PARAMS_REQ = 93,
+	WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_REQ = 94,
+
+	WCN36XX_HAL_ENTER_IMPS_RSP = 95,
+	WCN36XX_HAL_EXIT_IMPS_RSP = 96,
+	WCN36XX_HAL_ENTER_BMPS_RSP = 97,
+	WCN36XX_HAL_EXIT_BMPS_RSP = 98,
+	WCN36XX_HAL_ENTER_UAPSD_RSP = 99,
+	WCN36XX_HAL_EXIT_UAPSD_RSP = 100,
+	WCN36XX_HAL_SET_UAPSD_AC_PARAMS_RSP = 101,
+	WCN36XX_HAL_UPDATE_UAPSD_PARAM_RSP = 102,
+	WCN36XX_HAL_CONFIGURE_RXP_FILTER_RSP = 103,
+	WCN36XX_HAL_ADD_BCN_FILTER_RSP = 104,
+	WCN36XX_HAL_REM_BCN_FILTER_RSP = 105,
+	WCN36XX_HAL_SET_RSSI_THRESH_RSP = 106,
+	WCN36XX_HAL_HOST_OFFLOAD_RSP = 107,
+	WCN36XX_HAL_ADD_WOWL_BCAST_PTRN_RSP = 108,
+	WCN36XX_HAL_DEL_WOWL_BCAST_PTRN_RSP = 109,
+	WCN36XX_HAL_ENTER_WOWL_RSP = 110,
+	WCN36XX_HAL_EXIT_WOWL_RSP = 111,
+	WCN36XX_HAL_RSSI_NOTIFICATION_IND = 112,
+	WCN36XX_HAL_GET_RSSI_RSP = 113,
+	WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_RSP = 114,
+
+	/* 11k related events */
+	WCN36XX_HAL_SET_MAX_TX_POWER_REQ = 115,
+	WCN36XX_HAL_SET_MAX_TX_POWER_RSP = 116,
+
+	/* 11R related msgs */
+	WCN36XX_HAL_AGGR_ADD_TS_REQ = 117,
+	WCN36XX_HAL_AGGR_ADD_TS_RSP = 118,
+
+	/* P2P  WLAN_FEATURE_P2P */
+	WCN36XX_HAL_SET_P2P_GONOA_REQ = 119,
+	WCN36XX_HAL_SET_P2P_GONOA_RSP = 120,
+
+	/* WLAN Dump commands */
+	WCN36XX_HAL_DUMP_COMMAND_REQ = 121,
+	WCN36XX_HAL_DUMP_COMMAND_RSP = 122,
+
+	/* OEM_DATA FEATURE SUPPORT */
+	WCN36XX_HAL_START_OEM_DATA_REQ = 123,
+	WCN36XX_HAL_START_OEM_DATA_RSP = 124,
+
+	/* ADD SELF STA REQ and RSP */
+	WCN36XX_HAL_ADD_STA_SELF_REQ = 125,
+	WCN36XX_HAL_ADD_STA_SELF_RSP = 126,
+
+	/* DEL SELF STA SUPPORT */
+	WCN36XX_HAL_DEL_STA_SELF_REQ = 127,
+	WCN36XX_HAL_DEL_STA_SELF_RSP = 128,
+
+	/* Coex Indication */
+	WCN36XX_HAL_COEX_IND = 129,
+
+	/* Tx Complete Indication */
+	WCN36XX_HAL_OTA_TX_COMPL_IND = 130,
+
+	/* Host Suspend/resume messages */
+	WCN36XX_HAL_HOST_SUSPEND_IND = 131,
+	WCN36XX_HAL_HOST_RESUME_REQ = 132,
+	WCN36XX_HAL_HOST_RESUME_RSP = 133,
+
+	WCN36XX_HAL_SET_TX_POWER_REQ = 134,
+	WCN36XX_HAL_SET_TX_POWER_RSP = 135,
+	WCN36XX_HAL_GET_TX_POWER_REQ = 136,
+	WCN36XX_HAL_GET_TX_POWER_RSP = 137,
+
+	WCN36XX_HAL_P2P_NOA_ATTR_IND = 138,
+
+	WCN36XX_HAL_ENABLE_RADAR_DETECT_REQ = 139,
+	WCN36XX_HAL_ENABLE_RADAR_DETECT_RSP = 140,
+	WCN36XX_HAL_GET_TPC_REPORT_REQ = 141,
+	WCN36XX_HAL_GET_TPC_REPORT_RSP = 142,
+	WCN36XX_HAL_RADAR_DETECT_IND = 143,
+	WCN36XX_HAL_RADAR_DETECT_INTR_IND = 144,
+	WCN36XX_HAL_KEEP_ALIVE_REQ = 145,
+	WCN36XX_HAL_KEEP_ALIVE_RSP = 146,
+
+	/* PNO messages */
+	WCN36XX_HAL_SET_PREF_NETWORK_REQ = 147,
+	WCN36XX_HAL_SET_PREF_NETWORK_RSP = 148,
+	WCN36XX_HAL_SET_RSSI_FILTER_REQ = 149,
+	WCN36XX_HAL_SET_RSSI_FILTER_RSP = 150,
+	WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ = 151,
+	WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP = 152,
+	WCN36XX_HAL_PREF_NETW_FOUND_IND = 153,
+
+	WCN36XX_HAL_SET_TX_PER_TRACKING_REQ = 154,
+	WCN36XX_HAL_SET_TX_PER_TRACKING_RSP = 155,
+	WCN36XX_HAL_TX_PER_HIT_IND = 156,
+
+	WCN36XX_HAL_8023_MULTICAST_LIST_REQ = 157,
+	WCN36XX_HAL_8023_MULTICAST_LIST_RSP = 158,
+
+	WCN36XX_HAL_SET_PACKET_FILTER_REQ = 159,
+	WCN36XX_HAL_SET_PACKET_FILTER_RSP = 160,
+	WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_REQ = 161,
+	WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_RSP = 162,
+	WCN36XX_HAL_CLEAR_PACKET_FILTER_REQ = 163,
+	WCN36XX_HAL_CLEAR_PACKET_FILTER_RSP = 164,
+
+	/*
+	 * This is temp fix. Should be removed once Host and Riva code is
+	 * in sync.
+	 */
+	WCN36XX_HAL_INIT_SCAN_CON_REQ = 165,
+
+	WCN36XX_HAL_SET_POWER_PARAMS_REQ = 166,
+	WCN36XX_HAL_SET_POWER_PARAMS_RSP = 167,
+
+	WCN36XX_HAL_TSM_STATS_REQ = 168,
+	WCN36XX_HAL_TSM_STATS_RSP = 169,
+
+	/* wake reason indication (WOW) */
+	WCN36XX_HAL_WAKE_REASON_IND = 170,
+
+	/* GTK offload support */
+	WCN36XX_HAL_GTK_OFFLOAD_REQ = 171,
+	WCN36XX_HAL_GTK_OFFLOAD_RSP = 172,
+	WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ = 173,
+	WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP = 174,
+
+	WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ = 175,
+	WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP = 176,
+	WCN36XX_HAL_EXCLUDE_UNENCRYPTED_IND = 177,
+
+	WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ = 178,
+	WCN36XX_HAL_SET_THERMAL_MITIGATION_RSP = 179,
+
+	WCN36XX_HAL_UPDATE_VHT_OP_MODE_REQ = 182,
+	WCN36XX_HAL_UPDATE_VHT_OP_MODE_RSP = 183,
+
+	WCN36XX_HAL_P2P_NOA_START_IND = 184,
+
+	WCN36XX_HAL_GET_ROAM_RSSI_REQ = 185,
+	WCN36XX_HAL_GET_ROAM_RSSI_RSP = 186,
+
+	WCN36XX_HAL_CLASS_B_STATS_IND = 187,
+	WCN36XX_HAL_DEL_BA_IND = 188,
+	WCN36XX_HAL_DHCP_START_IND = 189,
+	WCN36XX_HAL_DHCP_STOP_IND = 190,
+
+	WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
+};
+
+/* Enumeration for Version */
+enum wcn36xx_hal_host_msg_version {
+	WCN36XX_HAL_MSG_VERSION0 = 0,
+	WCN36XX_HAL_MSG_VERSION1 = 1,
+	/* define as 2 bytes data */
+	WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION = 0x7FFF,
+	WCN36XX_HAL_MSG_VERSION_MAX_FIELD = WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION
+};
+
+enum driver_type {
+	DRIVER_TYPE_PRODUCTION = 0,
+	DRIVER_TYPE_MFG = 1,
+	DRIVER_TYPE_DVT = 2,
+	DRIVER_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stop_type {
+	HAL_STOP_TYPE_SYS_RESET,
+	HAL_STOP_TYPE_SYS_DEEP_SLEEP,
+	HAL_STOP_TYPE_RF_KILL,
+	HAL_STOP_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_sys_mode {
+	HAL_SYS_MODE_NORMAL,
+	HAL_SYS_MODE_LEARN,
+	HAL_SYS_MODE_SCAN,
+	HAL_SYS_MODE_PROMISC,
+	HAL_SYS_MODE_SUSPEND_LINK,
+	HAL_SYS_MODE_ROAM_SCAN,
+	HAL_SYS_MODE_ROAM_SUSPEND_LINK,
+	HAL_SYS_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum phy_chan_bond_state {
+	/* 20MHz IF bandwidth centered on IF carrier */
+	PHY_SINGLE_CHANNEL_CENTERED = 0,
+
+	/* 40MHz IF bandwidth with lower 20MHz supporting the primary channel */
+	PHY_DOUBLE_CHANNEL_LOW_PRIMARY = 1,
+
+	/* 40MHz IF bandwidth centered on IF carrier */
+	PHY_DOUBLE_CHANNEL_CENTERED = 2,
+
+	/* 40MHz IF bandwidth with higher 20MHz supporting the primary ch */
+	PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3,
+
+	/* 20/40MHZ offset LOW 40/80MHZ offset CENTERED */
+	PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED = 4,
+
+	/* 20/40MHZ offset CENTERED 40/80MHZ offset CENTERED */
+	PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED = 5,
+
+	/* 20/40MHZ offset HIGH 40/80MHZ offset CENTERED */
+	PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED = 6,
+
+	/* 20/40MHZ offset LOW 40/80MHZ offset LOW */
+	PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW = 7,
+
+	/* 20/40MHZ offset HIGH 40/80MHZ offset LOW */
+	PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW = 8,
+
+	/* 20/40MHZ offset LOW 40/80MHZ offset HIGH */
+	PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH = 9,
+
+	/* 20/40MHZ offset-HIGH 40/80MHZ offset HIGH */
+	PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH = 10,
+
+	PHY_CHANNEL_BONDING_STATE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Spatial Multiplexing(SM) Power Save mode */
+enum wcn36xx_hal_ht_mimo_state {
+	/* Static SM Power Save mode */
+	WCN36XX_HAL_HT_MIMO_PS_STATIC = 0,
+
+	/* Dynamic SM Power Save mode */
+	WCN36XX_HAL_HT_MIMO_PS_DYNAMIC = 1,
+
+	/* reserved */
+	WCN36XX_HAL_HT_MIMO_PS_NA = 2,
+
+	/* SM Power Save disabled */
+	WCN36XX_HAL_HT_MIMO_PS_NO_LIMIT = 3,
+
+	WCN36XX_HAL_HT_MIMO_PS_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* each station added has a rate mode which specifies the sta attributes */
+enum sta_rate_mode {
+	STA_TAURUS = 0,
+	STA_TITAN,
+	STA_POLARIS,
+	STA_11b,
+	STA_11bg,
+	STA_11a,
+	STA_11n,
+	STA_11ac,
+	STA_INVALID_RATE_MODE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* 1,2,5.5,11 */
+#define WCN36XX_HAL_NUM_DSSS_RATES           4
+
+/* 6,9,12,18,24,36,48,54 */
+#define WCN36XX_HAL_NUM_OFDM_RATES           8
+
+/* 72,96,108 */
+#define WCN36XX_HAL_NUM_POLARIS_RATES       3
+
+#define WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET    16
+
+enum wcn36xx_hal_bss_type {
+	WCN36XX_HAL_INFRASTRUCTURE_MODE,
+
+	/* Added for softAP support */
+	WCN36XX_HAL_INFRA_AP_MODE,
+
+	WCN36XX_HAL_IBSS_MODE,
+
+	/* Added for BT-AMP support */
+	WCN36XX_HAL_BTAMP_STA_MODE,
+
+	/* Added for BT-AMP support */
+	WCN36XX_HAL_BTAMP_AP_MODE,
+
+	WCN36XX_HAL_AUTO_MODE,
+
+	WCN36XX_HAL_DONOT_USE_BSS_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_nw_type {
+	WCN36XX_HAL_11A_NW_TYPE,
+	WCN36XX_HAL_11B_NW_TYPE,
+	WCN36XX_HAL_11G_NW_TYPE,
+	WCN36XX_HAL_11N_NW_TYPE,
+	WCN36XX_HAL_DONOT_USE_NW_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WCN36XX_HAL_MAC_RATESET_EID_MAX            12
+
+enum wcn36xx_hal_ht_operating_mode {
+	/* No Protection */
+	WCN36XX_HAL_HT_OP_MODE_PURE,
+
+	/* Overlap Legacy device present, protection is optional */
+	WCN36XX_HAL_HT_OP_MODE_OVERLAP_LEGACY,
+
+	/* No legacy device, but 20 MHz HT present */
+	WCN36XX_HAL_HT_OP_MODE_NO_LEGACY_20MHZ_HT,
+
+	/* Protection is required */
+	WCN36XX_HAL_HT_OP_MODE_MIXED,
+
+	WCN36XX_HAL_HT_OP_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type enum used with peer */
+enum ani_ed_type {
+	WCN36XX_HAL_ED_NONE,
+	WCN36XX_HAL_ED_WEP40,
+	WCN36XX_HAL_ED_WEP104,
+	WCN36XX_HAL_ED_TKIP,
+	WCN36XX_HAL_ED_CCMP,
+	WCN36XX_HAL_ED_WPI,
+	WCN36XX_HAL_ED_AES_128_CMAC,
+	WCN36XX_HAL_ED_NOT_IMPLEMENTED = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WLAN_MAX_KEY_RSC_LEN                16
+#define WLAN_WAPI_KEY_RSC_LEN               16
+
+/* MAX key length when ULA is used */
+#define WCN36XX_HAL_MAC_MAX_KEY_LENGTH              32
+#define WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS     4
+
+/*
+ * Enum to specify whether key is used for TX only, RX only or both.
+ */
+enum ani_key_direction {
+	WCN36XX_HAL_TX_ONLY,
+	WCN36XX_HAL_RX_ONLY,
+	WCN36XX_HAL_TX_RX,
+	WCN36XX_HAL_TX_DEFAULT,
+	WCN36XX_HAL_DONOT_USE_KEY_DIRECTION = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum ani_wep_type {
+	WCN36XX_HAL_WEP_STATIC,
+	WCN36XX_HAL_WEP_DYNAMIC,
+	WCN36XX_HAL_WEP_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_link_state {
+
+	WCN36XX_HAL_LINK_IDLE_STATE = 0,
+	WCN36XX_HAL_LINK_PREASSOC_STATE = 1,
+	WCN36XX_HAL_LINK_POSTASSOC_STATE = 2,
+	WCN36XX_HAL_LINK_AP_STATE = 3,
+	WCN36XX_HAL_LINK_IBSS_STATE = 4,
+
+	/* BT-AMP Case */
+	WCN36XX_HAL_LINK_BTAMP_PREASSOC_STATE = 5,
+	WCN36XX_HAL_LINK_BTAMP_POSTASSOC_STATE = 6,
+	WCN36XX_HAL_LINK_BTAMP_AP_STATE = 7,
+	WCN36XX_HAL_LINK_BTAMP_STA_STATE = 8,
+
+	/* Reserved for HAL Internal Use */
+	WCN36XX_HAL_LINK_LEARN_STATE = 9,
+	WCN36XX_HAL_LINK_SCAN_STATE = 10,
+	WCN36XX_HAL_LINK_FINISH_SCAN_STATE = 11,
+	WCN36XX_HAL_LINK_INIT_CAL_STATE = 12,
+	WCN36XX_HAL_LINK_FINISH_CAL_STATE = 13,
+	WCN36XX_HAL_LINK_LISTEN_STATE = 14,
+
+	WCN36XX_HAL_LINK_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stats_mask {
+	HAL_SUMMARY_STATS_INFO = 0x00000001,
+	HAL_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+	HAL_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+	HAL_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+	HAL_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+	HAL_PER_STA_STATS_INFO = 0x00000020
+};
+
+/* BT-AMP events type */
+enum bt_amp_event_type {
+	BTAMP_EVENT_CONNECTION_START,
+	BTAMP_EVENT_CONNECTION_STOP,
+	BTAMP_EVENT_CONNECTION_TERMINATED,
+
+	/* This and beyond are invalid values */
+	BTAMP_EVENT_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+/* PE Statistics */
+enum pe_stats_mask {
+	PE_SUMMARY_STATS_INFO = 0x00000001,
+	PE_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+	PE_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+	PE_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+	PE_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+	PE_PER_STA_STATS_INFO = 0x00000020,
+
+	/* This and beyond are invalid values */
+	PE_STATS_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/*
+ * Configuration Parameter IDs
+ */
+#define WCN36XX_HAL_CFG_STA_ID				0
+#define WCN36XX_HAL_CFG_CURRENT_TX_ANTENNA		1
+#define WCN36XX_HAL_CFG_CURRENT_RX_ANTENNA		2
+#define WCN36XX_HAL_CFG_LOW_GAIN_OVERRIDE		3
+#define WCN36XX_HAL_CFG_POWER_STATE_PER_CHAIN		4
+#define WCN36XX_HAL_CFG_CAL_PERIOD			5
+#define WCN36XX_HAL_CFG_CAL_CONTROL			6
+#define WCN36XX_HAL_CFG_PROXIMITY			7
+#define WCN36XX_HAL_CFG_NETWORK_DENSITY			8
+#define WCN36XX_HAL_CFG_MAX_MEDIUM_TIME			9
+#define WCN36XX_HAL_CFG_MAX_MPDUS_IN_AMPDU		10
+#define WCN36XX_HAL_CFG_RTS_THRESHOLD			11
+#define WCN36XX_HAL_CFG_SHORT_RETRY_LIMIT		12
+#define WCN36XX_HAL_CFG_LONG_RETRY_LIMIT		13
+#define WCN36XX_HAL_CFG_FRAGMENTATION_THRESHOLD		14
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ZERO		15
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ONE		16
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_TWO		17
+#define WCN36XX_HAL_CFG_FIXED_RATE			18
+#define WCN36XX_HAL_CFG_RETRYRATE_POLICY		19
+#define WCN36XX_HAL_CFG_RETRYRATE_SECONDARY		20
+#define WCN36XX_HAL_CFG_RETRYRATE_TERTIARY		21
+#define WCN36XX_HAL_CFG_FORCE_POLICY_PROTECTION		22
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_24GHZ	23
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_5GHZ	24
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_24GHZ	25
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_5GHZ		26
+#define WCN36XX_HAL_CFG_MAX_BA_SESSIONS			27
+#define WCN36XX_HAL_CFG_PS_DATA_INACTIVITY_TIMEOUT	28
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_FILTER		29
+#define WCN36XX_HAL_CFG_PS_ENABLE_RSSI_MONITOR		30
+#define WCN36XX_HAL_CFG_NUM_BEACON_PER_RSSI_AVERAGE	31
+#define WCN36XX_HAL_CFG_STATS_PERIOD			32
+#define WCN36XX_HAL_CFG_CFP_MAX_DURATION		33
+#define WCN36XX_HAL_CFG_FRAME_TRANS_ENABLED		34
+#define WCN36XX_HAL_CFG_DTIM_PERIOD			35
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBK			36
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBE			37
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVO			38
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVI			39
+#define WCN36XX_HAL_CFG_BA_THRESHOLD_HIGH		40
+#define WCN36XX_HAL_CFG_MAX_BA_BUFFERS			41
+#define WCN36XX_HAL_CFG_RPE_POLLING_THRESHOLD		42
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC0_REG	43
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC1_REG	44
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC2_REG	45
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC3_REG	46
+#define WCN36XX_HAL_CFG_NO_OF_ONCHIP_REORDER_SESSIONS	47
+#define WCN36XX_HAL_CFG_PS_LISTEN_INTERVAL		48
+#define WCN36XX_HAL_CFG_PS_HEART_BEAT_THRESHOLD		49
+#define WCN36XX_HAL_CFG_PS_NTH_BEACON_FILTER		50
+#define WCN36XX_HAL_CFG_PS_MAX_PS_POLL			51
+#define WCN36XX_HAL_CFG_PS_MIN_RSSI_THRESHOLD		52
+#define WCN36XX_HAL_CFG_PS_RSSI_FILTER_PERIOD		53
+#define WCN36XX_HAL_CFG_PS_BROADCAST_FRAME_FILTER_ENABLE 54
+#define WCN36XX_HAL_CFG_PS_IGNORE_DTIM			55
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_EARLY_TERM	56
+#define WCN36XX_HAL_CFG_DYNAMIC_PS_POLL_VALUE		57
+#define WCN36XX_HAL_CFG_PS_NULLDATA_AP_RESP_TIMEOUT	58
+#define WCN36XX_HAL_CFG_TELE_BCN_WAKEUP_EN		59
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI		60
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS	61
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI			62
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI_IDLE_BCNS	63
+#define WCN36XX_HAL_CFG_TX_PWR_CTRL_ENABLE		64
+#define WCN36XX_HAL_CFG_VALID_RADAR_CHANNEL_LIST	65
+#define WCN36XX_HAL_CFG_TX_POWER_24_20			66
+#define WCN36XX_HAL_CFG_TX_POWER_24_40			67
+#define WCN36XX_HAL_CFG_TX_POWER_50_20			68
+#define WCN36XX_HAL_CFG_TX_POWER_50_40			69
+#define WCN36XX_HAL_CFG_MCAST_BCAST_FILTER_SETTING	70
+#define WCN36XX_HAL_CFG_BCN_EARLY_TERM_WAKEUP_INTERVAL	71
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_2_4		72
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_5			73
+#define WCN36XX_HAL_CFG_INFRA_STA_KEEP_ALIVE_PERIOD	74
+#define WCN36XX_HAL_CFG_ENABLE_CLOSE_LOOP		75
+#define WCN36XX_HAL_CFG_BTC_EXECUTION_MODE		76
+#define WCN36XX_HAL_CFG_BTC_DHCP_BT_SLOTS_TO_BLOCK	77
+#define WCN36XX_HAL_CFG_BTC_A2DP_DHCP_BT_SUB_INTERVALS	78
+#define WCN36XX_HAL_CFG_PS_TX_INACTIVITY_TIMEOUT	79
+#define WCN36XX_HAL_CFG_WCNSS_API_VERSION		80
+#define WCN36XX_HAL_CFG_AP_KEEPALIVE_TIMEOUT		81
+#define WCN36XX_HAL_CFG_GO_KEEPALIVE_TIMEOUT		82
+#define WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST		83
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_BT		84
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_BT		85
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_BT		86
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_BT		87
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_WLAN		88
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_WLAN	89
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_WLAN	90
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_WLAN		91
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_BT		92
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_WLAN		93
+#define WCN36XX_HAL_CFG_BTC_MAX_SCO_BLOCK_PERC		94
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_A2DP		95
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_SCO		96
+#define WCN36XX_HAL_CFG_ENABLE_UNICAST_FILTER		97
+#define WCN36XX_HAL_CFG_MAX_ASSOC_LIMIT			98
+#define WCN36XX_HAL_CFG_ENABLE_LPWR_IMG_TRANSITION	99
+#define WCN36XX_HAL_CFG_ENABLE_MCC_ADAPTIVE_SCHEDULER	100
+#define WCN36XX_HAL_CFG_ENABLE_DETECT_PS_SUPPORT	101
+#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT		102
+#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER	103
+#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE		104
+#define WCN36XX_HAL_CFG_MAX_PARAMS			105
+
+/* Message definitons - All the messages below need to be packed */
+
+/* Definition for HAL API Version. */
+struct wcnss_wlan_version {
+	u8 revision;
+	u8 version;
+	u8 minor;
+	u8 major;
+} __packed;
+
+/* Definition for Encryption Keys */
+struct wcn36xx_hal_keys {
+	u8 id;
+
+	/* 0 for multicast */
+	u8 unicast;
+
+	enum ani_key_direction direction;
+
+	/* Usage is unknown */
+	u8 rsc[WLAN_MAX_KEY_RSC_LEN];
+
+	/* =1 for authenticator,=0 for supplicant */
+	u8 pae_role;
+
+	u16 length;
+	u8 key[WCN36XX_HAL_MAC_MAX_KEY_LENGTH];
+} __packed;
+
+/*
+ * set_sta_key_params Moving here since it is shared by
+ * configbss/setstakey msgs
+ */
+struct wcn36xx_hal_set_sta_key_params {
+	/* STA Index */
+	u16 sta_index;
+
+	/* Encryption Type used with peer */
+	enum ani_ed_type enc_type;
+
+	/* STATIC/DYNAMIC - valid only for WEP */
+	enum ani_wep_type wep_type;
+
+	/* Default WEP key, valid only for static WEP, must between 0 and 3. */
+	u8 def_wep_idx;
+
+	/* valid only for non-static WEP encyrptions */
+	struct wcn36xx_hal_keys key[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+	/*
+	 * Control for Replay Count, 1= Single TID based replay count on Tx
+	 * 0 = Per TID based replay count on TX
+	 */
+	u8 single_tid_rc;
+
+} __packed;
+
+/* 4-byte control message header used by HAL*/
+struct wcn36xx_hal_msg_header {
+	enum wcn36xx_hal_host_msg_type msg_type:16;
+	enum wcn36xx_hal_host_msg_version msg_version:16;
+	u32 len;
+} __packed;
+
+/* Config format required by HAL for each CFG item*/
+struct wcn36xx_hal_cfg {
+	/* Cfg Id. The Id required by HAL is exported by HAL
+	 * in shared header file between UMAC and HAL.*/
+	u16 id;
+
+	/* Length of the Cfg. This parameter is used to go to next cfg
+	 * in the TLV format.*/
+	u16 len;
+
+	/* Padding bytes for unaligned address's */
+	u16 pad_bytes;
+
+	/* Reserve bytes for making cfgVal to align address */
+	u16 reserve;
+
+	/* Following the uCfgLen field there should be a 'uCfgLen' bytes
+	 * containing the uCfgValue ; u8 uCfgValue[uCfgLen] */
+} __packed;
+
+struct wcn36xx_hal_mac_start_parameters {
+	/* Drive Type - Production or FTM etc */
+	enum driver_type type;
+
+	/* Length of the config buffer */
+	u32 len;
+
+	/* Following this there is a TLV formatted buffer of length
+	 * "len" bytes containing all config values.
+	 * The TLV is expected to be formatted like this:
+	 * 0           15            31           31+CFG_LEN-1        length-1
+	 * |   CFG_ID   |   CFG_LEN   |   CFG_BODY    |  CFG_ID  |......|
+	 */
+} __packed;
+
+struct wcn36xx_hal_mac_start_req_msg {
+	/* config buffer must start in TLV format just here */
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_mac_start_parameters params;
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_params {
+	/* success or failure */
+	u16 status;
+
+	/* Max number of STA supported by the device */
+	u8 stations;
+
+	/* Max number of BSS supported by the device */
+	u8 bssids;
+
+	/* API Version */
+	struct wcnss_wlan_version version;
+
+	/* CRM build information */
+	u8 crm_version[WCN36XX_HAL_VERSION_LENGTH];
+
+	/* hardware/chipset/misc version information */
+	u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH];
+
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_mac_start_rsp_params start_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_params {
+	/* The reason for which the device is being stopped */
+	enum wcn36xx_hal_stop_type reason;
+
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_mac_stop_req_params stop_req_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
+struct wcn36xx_hal_update_cfg_req_msg {
+	/*
+	 * Note: The length specified in tHalUpdateCfgReqMsg messages should be
+	 * header.msgLen = sizeof(tHalUpdateCfgReqMsg) + uConfigBufferLen
+	 */
+	struct wcn36xx_hal_msg_header header;
+
+	/* Length of the config buffer. Allows UMAC to update multiple CFGs */
+	u32 len;
+
+	/*
+	 * Following this there is a TLV formatted buffer of length
+	 * "uConfigBufferLen" bytes containing all config values.
+	 * The TLV is expected to be formatted like this:
+	 * 0           15            31           31+CFG_LEN-1        length-1
+	 * |   CFG_ID   |   CFG_LEN   |   CFG_BODY    |  CFG_ID  |......|
+	 */
+
+} __packed;
+
+struct wcn36xx_hal_update_cfg_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+} __packed;
+
+/* Frame control field format (2 bytes) */
+struct wcn36xx_hal_mac_frame_ctl {
+
+#ifndef ANI_LITTLE_BIT_ENDIAN
+
+	u8 subType:4;
+	u8 type:2;
+	u8 protVer:2;
+
+	u8 order:1;
+	u8 wep:1;
+	u8 moreData:1;
+	u8 powerMgmt:1;
+	u8 retry:1;
+	u8 moreFrag:1;
+	u8 fromDS:1;
+	u8 toDS:1;
+
+#else
+
+	u8 protVer:2;
+	u8 type:2;
+	u8 subType:4;
+
+	u8 toDS:1;
+	u8 fromDS:1;
+	u8 moreFrag:1;
+	u8 retry:1;
+	u8 powerMgmt:1;
+	u8 moreData:1;
+	u8 wep:1;
+	u8 order:1;
+
+#endif
+
+};
+
+/* Sequence control field */
+struct wcn36xx_hal_mac_seq_ctl {
+	u8 fragNum:4;
+	u8 seqNumLo:4;
+	u8 seqNumHi:8;
+};
+
+/* Management header format */
+struct wcn36xx_hal_mac_mgmt_hdr {
+	struct wcn36xx_hal_mac_frame_ctl fc;
+	u8 durationLo;
+	u8 durationHi;
+	u8 da[6];
+	u8 sa[6];
+	u8 bssId[6];
+	struct wcn36xx_hal_mac_seq_ctl seqControl;
+};
+
+/* FIXME: pronto v1 apparently has 4 */
+#define WCN36XX_HAL_NUM_BSSID               2
+
+/* Scan Entry to hold active BSS idx's */
+struct wcn36xx_hal_scan_entry {
+	u8 bss_index[WCN36XX_HAL_NUM_BSSID];
+	u8 active_bss_count;
+};
+
+struct wcn36xx_hal_init_scan_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* LEARN - AP Role
+	   SCAN - STA Role */
+	enum wcn36xx_hal_sys_mode mode;
+
+	/* BSSID of the BSS */
+	u8 bssid[ETH_ALEN];
+
+	/* Whether BSS needs to be notified */
+	u8 notify;
+
+	/* Kind of frame to be used for notifying the BSS (Data Null, QoS
+	 * Null, or CTS to Self). Must always be a valid frame type. */
+	u8 frame_type;
+
+	/* UMAC has the option of passing the MAC frame to be used for
+	 * notifying the BSS. If non-zero, HAL will use the MAC frame
+	 * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+	 * appropriate MAC frame based on frameType. */
+	u8 frame_len;
+
+	/* Following the framelength there is a MAC frame buffer if
+	 * frameLength is non-zero. */
+	struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+	/* Entry to hold number of active BSS idx's */
+	struct wcn36xx_hal_scan_entry scan_entry;
+};
+
+struct wcn36xx_hal_init_scan_con_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* LEARN - AP Role
+	   SCAN - STA Role */
+	enum wcn36xx_hal_sys_mode mode;
+
+	/* BSSID of the BSS */
+	u8 bssid[ETH_ALEN];
+
+	/* Whether BSS needs to be notified */
+	u8 notify;
+
+	/* Kind of frame to be used for notifying the BSS (Data Null, QoS
+	 * Null, or CTS to Self). Must always be a valid frame type. */
+	u8 frame_type;
+
+	/* UMAC has the option of passing the MAC frame to be used for
+	 * notifying the BSS. If non-zero, HAL will use the MAC frame
+	 * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+	 * appropriate MAC frame based on frameType. */
+	u8 frame_length;
+
+	/* Following the framelength there is a MAC frame buffer if
+	 * frameLength is non-zero. */
+	struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+	/* Entry to hold number of active BSS idx's */
+	struct wcn36xx_hal_scan_entry scan_entry;
+
+	/* Single NoA usage in Scanning */
+	u8 use_noa;
+
+	/* Indicates the scan duration (in ms) */
+	u16 scan_duration;
+
+};
+
+struct wcn36xx_hal_init_scan_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+} __packed;
+
+struct wcn36xx_hal_start_scan_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Indicates the channel to scan */
+	u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_start_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	u32 start_tsf[2];
+	u8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_end_scan_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Indicates the channel to stop scanning. Not used really. But
+	 * retained for symmetry with "start Scan" message. It can also
+	 * help in error check if needed. */
+	u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_end_scan_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
+struct wcn36xx_hal_finish_scan_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Identifies the operational state of the AP/STA
+	 * LEARN - AP Role SCAN - STA Role */
+	enum wcn36xx_hal_sys_mode mode;
+
+	/* Operating channel to tune to. */
+	u8 oper_channel;
+
+	/* Channel Bonding state If 20/40 MHz is operational, this will
+	 * indicate the 40 MHz extension channel in combination with the
+	 * control channel */
+	enum phy_chan_bond_state cb_state;
+
+	/* BSSID of the BSS */
+	u8 bssid[ETH_ALEN];
+
+	/* Whether BSS needs to be notified */
+	u8 notify;
+
+	/* Kind of frame to be used for notifying the BSS (Data Null, QoS
+	 * Null, or CTS to Self). Must always be a valid frame type. */
+	u8 frame_type;
+
+	/* UMAC has the option of passing the MAC frame to be used for
+	 * notifying the BSS. If non-zero, HAL will use the MAC frame
+	 * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+	 * appropriate MAC frame based on frameType. */
+	u8 frame_length;
+
+	/* Following the framelength there is a MAC frame buffer if
+	 * frameLength is non-zero. */
+	struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+	/* Entry to hold number of active BSS idx's */
+	struct wcn36xx_hal_scan_entry scan_entry;
+
+} __packed;
+
+struct wcn36xx_hal_finish_scan_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+} __packed;
+
+enum wcn36xx_hal_rate_index {
+	HW_RATE_INDEX_1MBPS	= 0x82,
+	HW_RATE_INDEX_2MBPS	= 0x84,
+	HW_RATE_INDEX_5_5MBPS	= 0x8B,
+	HW_RATE_INDEX_6MBPS	= 0x0C,
+	HW_RATE_INDEX_9MBPS	= 0x12,
+	HW_RATE_INDEX_11MBPS	= 0x96,
+	HW_RATE_INDEX_12MBPS	= 0x18,
+	HW_RATE_INDEX_18MBPS	= 0x24,
+	HW_RATE_INDEX_24MBPS	= 0x30,
+	HW_RATE_INDEX_36MBPS	= 0x48,
+	HW_RATE_INDEX_48MBPS	= 0x60,
+	HW_RATE_INDEX_54MBPS	= 0x6C
+};
+
+struct wcn36xx_hal_supported_rates {
+	/*
+	 * For Self STA Entry: this represents Self Mode.
+	 * For Peer Stations, this represents the mode of the peer.
+	 * On Station:
+	 *
+	 * --this mode is updated when PE adds the Self Entry.
+	 *
+	 * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+	 *    is used to indicate the mode of the AP.
+	 *
+	 * ON AP:
+	 *
+	 * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+	 *     for that BSS is used to indicate the self mode of the AP.
+	 *
+	 * -- OR when a station is associated, PE sends 'ADD_STA' message
+	 *    with this mode updated.
+	 */
+
+	enum sta_rate_mode op_rate_mode;
+
+	/* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+	 * unit of 500Kbps */
+	u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+	u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+	u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+	u16 reserved;
+
+	/* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+	 * supported) First 26 bits are reserved for those Titan rates and
+	 * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+	 * reserved. */
+	/* Titan and Taurus Rates */
+	u32 enhanced_rate_bitmap;
+
+	/*
+	 * 0-76 bits used, remaining reserved
+	 * bits 0-15 and 32 should be set.
+	 */
+	u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+	/*
+	 * RX Highest Supported Data Rate defines the highest data
+	 * rate that the STA is able to receive, in unites of 1Mbps.
+	 * This value is derived from "Supported MCS Set field" inside
+	 * the HT capability element.
+	 */
+	u16 rx_highest_data_rate;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_params {
+	/* BSSID of STA */
+	u8 bssid[ETH_ALEN];
+
+	/* ASSOC ID, as assigned by UMAC */
+	u16 aid;
+
+	/* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+	u8 type;
+
+	/* Short Preamble Supported. */
+	u8 short_preamble_supported;
+
+	/* MAC Address of STA */
+	u8 mac[ETH_ALEN];
+
+	/* Listen interval of the STA */
+	u16 listen_interval;
+
+	/* Support for 11e/WMM */
+	u8 wmm_enabled;
+
+	/* 11n HT capable STA */
+	u8 ht_capable;
+
+	/* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+	u8 tx_channel_width_set;
+
+	/* RIFS mode 0 - NA, 1 - Allowed */
+	u8 rifs_mode;
+
+	/* L-SIG TXOP Protection mechanism
+	   0 - No Support, 1 - Supported
+	   SG - there is global field */
+	u8 lsig_txop_protection;
+
+	/* Max Ampdu Size supported by STA. TPE programming.
+	   0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+	u8 max_ampdu_size;
+
+	/* Max Ampdu density. Used by RA.  3 : 0~7 : 2^(11nAMPDUdensity -4) */
+	u8 max_ampdu_density;
+
+	/* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+	u8 max_amsdu_size;
+
+	/* Short GI support for 40Mhz packets */
+	u8 sgi_40mhz;
+
+	/* Short GI support for 20Mhz packets */
+	u8 sgi_20Mhz;
+
+	/* TODO move this parameter to the end for 3680 */
+	/* These rates are the intersection of peer and self capabilities. */
+	struct wcn36xx_hal_supported_rates supported_rates;
+
+	/* Robust Management Frame (RMF) enabled/disabled */
+	u8 rmf;
+
+	/* The unicast encryption type in the association */
+	u32 encrypt_type;
+
+	/* HAL should update the existing STA entry, if this flag is set. UMAC
+	   will set this flag in case of RE-ASSOC, where we want to reuse the
+	   old STA ID. 0 = Add, 1 = Update */
+	u8 action;
+
+	/* U-APSD Flags: 1b per AC.  Encoded as follows:
+	   b7 b6 b5 b4 b3 b2 b1 b0 =
+	   X  X  X  X  BE BK VI VO */
+	u8 uapsd;
+
+	/* Max SP Length */
+	u8 max_sp_len;
+
+	/* 11n Green Field preamble support
+	   0 - Not supported, 1 - Supported */
+	u8 green_field_capable;
+
+	/* MIMO Power Save mode */
+	enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+	/* Delayed BA Support */
+	u8 delayed_ba_support;
+
+	/* Max AMPDU duration in 32us */
+	u8 max_ampdu_duration;
+
+	/* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+	 * set it to 0 if AP does not support it. This indication is sent
+	 * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+	 * rates. */
+	u8 dsss_cck_mode_40mhz;
+
+	/* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+	 * Retained for backward compalibity with existing HAL code */
+	u8 sta_index;
+
+	/* BSSID of BSS to which station is associated. Set to 0xFF when
+	 * invalid. Retained for backward compalibity with existing HAL
+	 * code */
+	u8 bssid_index;
+
+	u8 p2p;
+
+	/* TODO add this parameter for 3680. */
+	/* Reserved to align next field on a dword boundary */
+	/* u8 reserved; */
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_config_sta_params sta_params;
+} __packed;
+
+struct wcn36xx_hal_config_sta_params_v1 {
+	/* BSSID of STA */
+	u8 bssid[ETH_ALEN];
+
+	/* ASSOC ID, as assigned by UMAC */
+	u16 aid;
+
+	/* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+	u8 type;
+
+	/* Short Preamble Supported. */
+	u8 short_preamble_supported;
+
+	/* MAC Address of STA */
+	u8 mac[ETH_ALEN];
+
+	/* Listen interval of the STA */
+	u16 listen_interval;
+
+	/* Support for 11e/WMM */
+	u8 wmm_enabled;
+
+	/* 11n HT capable STA */
+	u8 ht_capable;
+
+	/* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+	u8 tx_channel_width_set;
+
+	/* RIFS mode 0 - NA, 1 - Allowed */
+	u8 rifs_mode;
+
+	/* L-SIG TXOP Protection mechanism
+	   0 - No Support, 1 - Supported
+	   SG - there is global field */
+	u8 lsig_txop_protection;
+
+	/* Max Ampdu Size supported by STA. TPE programming.
+	   0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+	u8 max_ampdu_size;
+
+	/* Max Ampdu density. Used by RA.  3 : 0~7 : 2^(11nAMPDUdensity -4) */
+	u8 max_ampdu_density;
+
+	/* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+	u8 max_amsdu_size;
+
+	/* Short GI support for 40Mhz packets */
+	u8 sgi_40mhz;
+
+	/* Short GI support for 20Mhz packets */
+	u8 sgi_20Mhz;
+
+	/* Robust Management Frame (RMF) enabled/disabled */
+	u8 rmf;
+
+	/* The unicast encryption type in the association */
+	u32 encrypt_type;
+
+	/* HAL should update the existing STA entry, if this flag is set. UMAC
+	   will set this flag in case of RE-ASSOC, where we want to reuse the
+	   old STA ID. 0 = Add, 1 = Update */
+	u8 action;
+
+	/* U-APSD Flags: 1b per AC.  Encoded as follows:
+	   b7 b6 b5 b4 b3 b2 b1 b0 =
+	   X  X  X  X  BE BK VI VO */
+	u8 uapsd;
+
+	/* Max SP Length */
+	u8 max_sp_len;
+
+	/* 11n Green Field preamble support
+	   0 - Not supported, 1 - Supported */
+	u8 green_field_capable;
+
+	/* MIMO Power Save mode */
+	enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+	/* Delayed BA Support */
+	u8 delayed_ba_support;
+
+	/* Max AMPDU duration in 32us */
+	u8 max_ampdu_duration;
+
+	/* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+	 * set it to 0 if AP does not support it. This indication is sent
+	 * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+	 * rates. */
+	u8 dsss_cck_mode_40mhz;
+
+	/* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+	 * Retained for backward compalibity with existing HAL code */
+	u8 sta_index;
+
+	/* BSSID of BSS to which station is associated. Set to 0xFF when
+	 * invalid. Retained for backward compalibity with existing HAL
+	 * code */
+	u8 bssid_index;
+
+	u8 p2p;
+
+	/* Reserved to align next field on a dword boundary */
+	u8 reserved;
+
+	/* These rates are the intersection of peer and self capabilities. */
+	struct wcn36xx_hal_supported_rates supported_rates;
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg_v1 {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_config_sta_params_v1 sta_params;
+} __packed;
+
+struct config_sta_rsp_params {
+	/* success or failure */
+	u32 status;
+
+	/* Station index; valid only when 'status' field value SUCCESS */
+	u8 sta_index;
+
+	/* BSSID Index of BSS to which the station is associated */
+	u8 bssid_index;
+
+	/* DPU Index for PTK */
+	u8 dpu_index;
+
+	/* DPU Index for GTK */
+	u8 bcast_dpu_index;
+
+	/* DPU Index for IGTK  */
+	u8 bcast_mgmt_dpu_idx;
+
+	/* PTK DPU signature */
+	u8 uc_ucast_sig;
+
+	/* GTK DPU isignature */
+	u8 uc_bcast_sig;
+
+	/* IGTK DPU signature */
+	u8 uc_mgmt_sig;
+
+	u8 p2p;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	struct config_sta_rsp_params params;
+} __packed;
+
+/* Delete STA Request message */
+struct wcn36xx_hal_delete_sta_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Index of STA to delete */
+	u8 sta_index;
+
+} __packed;
+
+/* Delete STA Response message */
+struct wcn36xx_hal_delete_sta_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	/* Index of STA deleted */
+	u8 sta_id;
+} __packed;
+
+/* 12 Bytes long because this structure can be used to represent rate and
+ * extended rate set IEs. The parser assume this to be at least 12 */
+struct wcn36xx_hal_rate_set {
+	u8 num_rates;
+	u8 rate[WCN36XX_HAL_MAC_RATESET_EID_MAX];
+} __packed;
+
+/* access category record */
+struct wcn36xx_hal_aci_aifsn {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+	u8 rsvd:1;
+	u8 aci:2;
+	u8 acm:1;
+	u8 aifsn:4;
+#else
+	u8 aifsn:4;
+	u8 acm:1;
+	u8 aci:2;
+	u8 rsvd:1;
+#endif
+} __packed;
+
+/* contention window size */
+struct wcn36xx_hal_mac_cw {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+	u8 max:4;
+	u8 min:4;
+#else
+	u8 min:4;
+	u8 max:4;
+#endif
+} __packed;
+
+struct wcn36xx_hal_edca_param_record {
+	struct wcn36xx_hal_aci_aifsn aci;
+	struct wcn36xx_hal_mac_cw cw;
+	u16 txop_limit;
+} __packed;
+
+struct wcn36xx_hal_mac_ssid {
+	u8 length;
+	u8 ssid[32];
+} __packed;
+
+/* Concurrency role. These are generic IDs that identify the various roles
+ *  in the software system. */
+enum wcn36xx_hal_con_mode {
+	WCN36XX_HAL_STA_MODE = 0,
+
+	/* to support softAp mode . This is misleading.
+	   It means AP MODE only. */
+	WCN36XX_HAL_STA_SAP_MODE = 1,
+
+	WCN36XX_HAL_P2P_CLIENT_MODE,
+	WCN36XX_HAL_P2P_GO_MODE,
+	WCN36XX_HAL_MONITOR_MODE,
+};
+
+/* This is a bit pattern to be set for each mode
+ * bit 0 - sta mode
+ * bit 1 - ap mode
+ * bit 2 - p2p client mode
+ * bit 3 - p2p go mode */
+enum wcn36xx_hal_concurrency_mode {
+	HAL_STA = 1,
+	HAL_SAP = 2,
+
+	/* to support sta, softAp  mode . This means STA+AP mode */
+	HAL_STA_SAP = 3,
+
+	HAL_P2P_CLIENT = 4,
+	HAL_P2P_GO = 8,
+	HAL_MAX_CONCURRENCY_PERSONA = 4
+};
+
+struct wcn36xx_hal_config_bss_params {
+	/* BSSID */
+	u8 bssid[ETH_ALEN];
+
+	/* Self Mac Address */
+	u8 self_mac_addr[ETH_ALEN];
+
+	/* BSS type */
+	enum wcn36xx_hal_bss_type bss_type;
+
+	/* Operational Mode: AP =0, STA = 1 */
+	u8 oper_mode;
+
+	/* Network Type */
+	enum wcn36xx_hal_nw_type nw_type;
+
+	/* Used to classify PURE_11G/11G_MIXED to program MTU */
+	u8 short_slot_time_supported;
+
+	/* Co-exist with 11a STA */
+	u8 lla_coexist;
+
+	/* Co-exist with 11b STA */
+	u8 llb_coexist;
+
+	/* Co-exist with 11g STA */
+	u8 llg_coexist;
+
+	/* Coexistence with 11n STA */
+	u8 ht20_coexist;
+
+	/* Non GF coexist flag */
+	u8 lln_non_gf_coexist;
+
+	/* TXOP protection support */
+	u8 lsig_tx_op_protection_full_support;
+
+	/* RIFS mode */
+	u8 rifs_mode;
+
+	/* Beacon Interval in TU */
+	u16 beacon_interval;
+
+	/* DTIM period */
+	u8 dtim_period;
+
+	/* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+	u8 tx_channel_width_set;
+
+	/* Operating channel */
+	u8 oper_channel;
+
+	/* Extension channel for channel bonding */
+	u8 ext_channel;
+
+	/* Reserved to align next field on a dword boundary */
+	u8 reserved;
+
+	/* TODO move sta to the end for 3680 */
+	/* Context of the station being added in HW
+	 *  Add a STA entry for "itself" -
+	 *
+	 *  On AP  - Add the AP itself in an "STA context"
+	 *
+	 *  On STA - Add the AP to which this STA is joining in an
+	 *  "STA context"
+	 */
+	struct wcn36xx_hal_config_sta_params sta;
+	/* SSID of the BSS */
+	struct wcn36xx_hal_mac_ssid ssid;
+
+	/* HAL should update the existing BSS entry, if this flag is set.
+	 * UMAC will set this flag in case of reassoc, where we want to
+	 * resue the the old BSSID and still return success 0 = Add, 1 =
+	 * Update */
+	u8 action;
+
+	/* MAC Rate Set */
+	struct wcn36xx_hal_rate_set rateset;
+
+	/* Enable/Disable HT capabilities of the BSS */
+	u8 ht;
+
+	/* Enable/Disable OBSS protection */
+	u8 obss_prot_enabled;
+
+	/* RMF enabled/disabled */
+	u8 rmf;
+
+	/* HT Operating Mode operating mode of the 802.11n STA */
+	enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+	/* Dual CTS Protection: 0 - Unused, 1 - Used */
+	u8 dual_cts_protection;
+
+	/* Probe Response Max retries */
+	u8 max_probe_resp_retry_limit;
+
+	/* To Enable Hidden ssid */
+	u8 hidden_ssid;
+
+	/* To Enable Disable FW Proxy Probe Resp */
+	u8 proxy_probe_resp;
+
+	/* Boolean to indicate if EDCA params are valid. UMAC might not
+	 * have valid EDCA params or might not desire to apply EDCA params
+	 * during config BSS. 0 implies Not Valid ; Non-Zero implies
+	 * valid */
+	u8 edca_params_valid;
+
+	/* EDCA Parameters for Best Effort Access Category */
+	struct wcn36xx_hal_edca_param_record acbe;
+
+	/* EDCA Parameters forBackground Access Category */
+	struct wcn36xx_hal_edca_param_record acbk;
+
+	/* EDCA Parameters for Video Access Category */
+	struct wcn36xx_hal_edca_param_record acvi;
+
+	/* EDCA Parameters for Voice Access Category */
+	struct wcn36xx_hal_edca_param_record acvo;
+
+	/* Ext Bss Config Msg if set */
+	u8 ext_set_sta_key_param_valid;
+
+	/* SetStaKeyParams for ext bss msg */
+	struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+	/* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+	 * wcn36xx_hal_con_mode */
+	u8 wcn36xx_hal_persona;
+
+	u8 spectrum_mgt_enable;
+
+	/* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+	s8 tx_mgmt_power;
+
+	/* maxTxPower has max power to be used after applying the power
+	 * constraint if any */
+	s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_config_bss_params bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_params_v1 {
+	/* BSSID */
+	u8 bssid[ETH_ALEN];
+
+	/* Self Mac Address */
+	u8 self_mac_addr[ETH_ALEN];
+
+	/* BSS type */
+	enum wcn36xx_hal_bss_type bss_type;
+
+	/* Operational Mode: AP =0, STA = 1 */
+	u8 oper_mode;
+
+	/* Network Type */
+	enum wcn36xx_hal_nw_type nw_type;
+
+	/* Used to classify PURE_11G/11G_MIXED to program MTU */
+	u8 short_slot_time_supported;
+
+	/* Co-exist with 11a STA */
+	u8 lla_coexist;
+
+	/* Co-exist with 11b STA */
+	u8 llb_coexist;
+
+	/* Co-exist with 11g STA */
+	u8 llg_coexist;
+
+	/* Coexistence with 11n STA */
+	u8 ht20_coexist;
+
+	/* Non GF coexist flag */
+	u8 lln_non_gf_coexist;
+
+	/* TXOP protection support */
+	u8 lsig_tx_op_protection_full_support;
+
+	/* RIFS mode */
+	u8 rifs_mode;
+
+	/* Beacon Interval in TU */
+	u16 beacon_interval;
+
+	/* DTIM period */
+	u8 dtim_period;
+
+	/* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+	u8 tx_channel_width_set;
+
+	/* Operating channel */
+	u8 oper_channel;
+
+	/* Extension channel for channel bonding */
+	u8 ext_channel;
+
+	/* Reserved to align next field on a dword boundary */
+	u8 reserved;
+
+	/* SSID of the BSS */
+	struct wcn36xx_hal_mac_ssid ssid;
+
+	/* HAL should update the existing BSS entry, if this flag is set.
+	 * UMAC will set this flag in case of reassoc, where we want to
+	 * resue the the old BSSID and still return success 0 = Add, 1 =
+	 * Update */
+	u8 action;
+
+	/* MAC Rate Set */
+	struct wcn36xx_hal_rate_set rateset;
+
+	/* Enable/Disable HT capabilities of the BSS */
+	u8 ht;
+
+	/* Enable/Disable OBSS protection */
+	u8 obss_prot_enabled;
+
+	/* RMF enabled/disabled */
+	u8 rmf;
+
+	/* HT Operating Mode operating mode of the 802.11n STA */
+	enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+	/* Dual CTS Protection: 0 - Unused, 1 - Used */
+	u8 dual_cts_protection;
+
+	/* Probe Response Max retries */
+	u8 max_probe_resp_retry_limit;
+
+	/* To Enable Hidden ssid */
+	u8 hidden_ssid;
+
+	/* To Enable Disable FW Proxy Probe Resp */
+	u8 proxy_probe_resp;
+
+	/* Boolean to indicate if EDCA params are valid. UMAC might not
+	 * have valid EDCA params or might not desire to apply EDCA params
+	 * during config BSS. 0 implies Not Valid ; Non-Zero implies
+	 * valid */
+	u8 edca_params_valid;
+
+	/* EDCA Parameters for Best Effort Access Category */
+	struct wcn36xx_hal_edca_param_record acbe;
+
+	/* EDCA Parameters forBackground Access Category */
+	struct wcn36xx_hal_edca_param_record acbk;
+
+	/* EDCA Parameters for Video Access Category */
+	struct wcn36xx_hal_edca_param_record acvi;
+
+	/* EDCA Parameters for Voice Access Category */
+	struct wcn36xx_hal_edca_param_record acvo;
+
+	/* Ext Bss Config Msg if set */
+	u8 ext_set_sta_key_param_valid;
+
+	/* SetStaKeyParams for ext bss msg */
+	struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+	/* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+	 * wcn36xx_hal_con_mode */
+	u8 wcn36xx_hal_persona;
+
+	u8 spectrum_mgt_enable;
+
+	/* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+	s8 tx_mgmt_power;
+
+	/* maxTxPower has max power to be used after applying the power
+	 * constraint if any */
+	s8 max_tx_power;
+
+	/* Context of the station being added in HW
+	 *  Add a STA entry for "itself" -
+	 *
+	 *  On AP  - Add the AP itself in an "STA context"
+	 *
+	 *  On STA - Add the AP to which this STA is joining in an
+	 *  "STA context"
+	 */
+	struct wcn36xx_hal_config_sta_params_v1 sta;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg_v1 {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_config_bss_params_v1 bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_params {
+	/* Success or Failure */
+	u32 status;
+
+	/* BSS index allocated by HAL */
+	u8 bss_index;
+
+	/* DPU descriptor index for PTK */
+	u8 dpu_desc_index;
+
+	/* PTK DPU signature */
+	u8 ucast_dpu_signature;
+
+	/* DPU descriptor index for GTK */
+	u8 bcast_dpu_desc_indx;
+
+	/* GTK DPU signature */
+	u8 bcast_dpu_signature;
+
+	/* DPU descriptor for IGTK */
+	u8 mgmt_dpu_desc_index;
+
+	/* IGTK DPU signature */
+	u8 mgmt_dpu_signature;
+
+	/* Station Index for BSS entry */
+	u8 bss_sta_index;
+
+	/* Self station index for this BSS */
+	u8 bss_self_sta_index;
+
+	/* Bcast station for buffering bcast frames in AP role */
+	u8 bss_bcast_sta_idx;
+
+	/* MAC Address of STA(PEER/SELF) in staContext of configBSSReq */
+	u8 mac[ETH_ALEN];
+
+	/* HAL fills in the tx power used for mgmt frames in this field. */
+	s8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_delete_bss_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* BSS index to be deleted */
+	u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_delete_bss_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Success or Failure */
+	u32 status;
+
+	/* BSS index that has been deleted */
+	u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_join_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Indicates the BSSID to which STA is going to associate */
+	u8 bssid[ETH_ALEN];
+
+	/* Indicates the channel to switch to. */
+	u8 channel;
+
+	/* Self STA MAC */
+	u8 self_sta_mac_addr[ETH_ALEN];
+
+	/* Local power constraint */
+	u8 local_power_constraint;
+
+	/* Secondary channel offset */
+	enum phy_chan_bond_state secondary_channel_offset;
+
+	/* link State */
+	enum wcn36xx_hal_link_state link_state;
+
+	/* Max TX power */
+	s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_join_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	/* HAL fills in the tx power used for mgmt frames in this field */
+	u8 tx_mgmt_power;
+} __packed;
+
+struct post_assoc_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	struct wcn36xx_hal_config_sta_params sta_params;
+	struct wcn36xx_hal_config_bss_params bss_params;
+};
+
+struct post_assoc_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct config_sta_rsp_params sta_rsp_params;
+	struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+};
+
+/* This is used to create a set of WEP keys for a given BSS. */
+struct wcn36xx_hal_set_bss_key_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* BSS Index of the BSS */
+	u8 bss_idx;
+
+	/* Encryption Type used with peer */
+	enum ani_ed_type enc_type;
+
+	/* Number of keys */
+	u8 num_keys;
+
+	/* Array of keys. */
+	struct wcn36xx_hal_keys keys[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+	/* Control for Replay Count, 1= Single TID based replay count on Tx
+	 * 0 = Per TID based replay count on TX */
+	u8 single_tid_rc;
+} __packed;
+
+/* tagged version of set bss key */
+struct wcn36xx_hal_set_bss_key_req_msg_tagged {
+	struct wcn36xx_hal_set_bss_key_req_msg Msg;
+	u32 tag;
+} __packed;
+
+struct wcn36xx_hal_set_bss_key_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
+/*
+ * This is used  configure the key information on a given station.
+ * When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
+ * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a new key descriptor is created based on the key field.
+ */
+struct wcn36xx_hal_set_sta_key_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_set_sta_key_params set_sta_key_params;
+} __packed;
+
+struct wcn36xx_hal_set_sta_key_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* BSS Index of the BSS */
+	u8 bss_idx;
+
+	/* Encryption Type used with peer */
+	enum ani_ed_type enc_type;
+
+	/* Key Id */
+	u8 key_id;
+
+	/* STATIC/DYNAMIC. Used in Nullifying in Key Descriptors for
+	 * Static/Dynamic keys */
+	enum ani_wep_type wep_type;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
+/*
+ * This is used by PE to Remove the key information on a given station.
+ */
+struct wcn36xx_hal_remove_sta_key_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* STA Index */
+	u16 sta_idx;
+
+	/* Encryption Type used with peer */
+	enum ani_ed_type enc_type;
+
+	/* Key Id */
+	u8 key_id;
+
+	/* Whether to invalidate the Broadcast key or Unicast key. In case
+	 * of WEP, the same key is used for both broadcast and unicast. */
+	u8 unicast;
+
+} __packed;
+
+struct wcn36xx_hal_remove_sta_key_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/*success or failure */
+	u32 status;
+
+} __packed;
+
+#ifdef FEATURE_OEM_DATA_SUPPORT
+
+#ifndef OEM_DATA_REQ_SIZE
+#define OEM_DATA_REQ_SIZE 134
+#endif
+
+#ifndef OEM_DATA_RSP_SIZE
+#define OEM_DATA_RSP_SIZE 1968
+#endif
+
+struct start_oem_data_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u32 status;
+	tSirMacAddr self_mac_addr;
+	u8 oem_data_req[OEM_DATA_REQ_SIZE];
+
+};
+
+struct start_oem_data_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 oem_data_rsp[OEM_DATA_RSP_SIZE];
+};
+
+#endif
+
+struct wcn36xx_hal_switch_channel_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Channel number */
+	u8 channel_number;
+
+	/* Local power constraint */
+	u8 local_power_constraint;
+
+	/* Secondary channel offset */
+	enum phy_chan_bond_state secondary_channel_offset;
+
+	/* HAL fills in the tx power used for mgmt frames in this field. */
+	u8 tx_mgmt_power;
+
+	/* Max TX power */
+	u8 max_tx_power;
+
+	/* Self STA MAC */
+	u8 self_sta_mac_addr[ETH_ALEN];
+
+	/* VO WIFI comment: BSSID needed to identify session. As the
+	 * request has power constraints, this should be applied only to
+	 * that session Since MTU timing and EDCA are sessionized, this
+	 * struct needs to be sessionized and bssid needs to be out of the
+	 * VOWifi feature flag V IMP: Keep bssId field at the end of this
+	 * msg. It is used to mantain backward compatbility by way of
+	 * ignoring if using new host/old FW or old host/new FW since it is
+	 * at the end of this struct
+	 */
+	u8 bssid[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_switch_channel_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Status */
+	u32 status;
+
+	/* Channel number - same as in request */
+	u8 channel_number;
+
+	/* HAL fills in the tx power used for mgmt frames in this field */
+	u8 tx_mgmt_power;
+
+	/* BSSID needed to identify session - same as in request */
+	u8 bssid[ETH_ALEN];
+
+} __packed;
+
+struct update_edca_params_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/*BSS Index */
+	u16 bss_index;
+
+	/* Best Effort */
+	struct wcn36xx_hal_edca_param_record acbe;
+
+	/* Background */
+	struct wcn36xx_hal_edca_param_record acbk;
+
+	/* Video */
+	struct wcn36xx_hal_edca_param_record acvi;
+
+	/* Voice */
+	struct wcn36xx_hal_edca_param_record acvo;
+};
+
+struct update_edca_params_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct dpu_stats_params {
+	/* Index of STA to which the statistics */
+	u16 sta_index;
+
+	/* Encryption mode */
+	u8 enc_mode;
+
+	/* status */
+	u32 status;
+
+	/* Statistics */
+	u32 send_blocks;
+	u32 recv_blocks;
+	u32 replays;
+	u8 mic_error_cnt;
+	u32 prot_excl_cnt;
+	u16 format_err_cnt;
+	u16 un_decryptable_cnt;
+	u32 decrypt_err_cnt;
+	u32 decrypt_ok_cnt;
+};
+
+struct wcn36xx_hal_stats_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Valid STA Idx for per STA stats request */
+	u32 sta_id;
+
+	/* Categories of stats requested as specified in eHalStatsMask */
+	u32 stats_mask;
+};
+
+struct ani_summary_stats_info {
+	/* Total number of packets(per AC) that were successfully
+	 * transmitted with retries */
+	u32 retry_cnt[4];
+
+	/* The number of MSDU packets and MMPDU frames per AC that the
+	 * 802.11 station successfully transmitted after more than one
+	 * retransmission attempt */
+	u32 multiple_retry_cnt[4];
+
+	/* Total number of packets(per AC) that were successfully
+	 * transmitted (with and without retries, including multi-cast,
+	 * broadcast) */
+	u32 tx_frm_cnt[4];
+
+	/* Total number of packets that were successfully received (after
+	 * appropriate filter rules including multi-cast, broadcast) */
+	u32 rx_frm_cnt;
+
+	/* Total number of duplicate frames received successfully */
+	u32 frm_dup_cnt;
+
+	/* Total number packets(per AC) failed to transmit */
+	u32 fail_cnt[4];
+
+	/* Total number of RTS/CTS sequence failures for transmission of a
+	 * packet */
+	u32 rts_fail_cnt;
+
+	/* Total number packets failed transmit because of no ACK from the
+	 * remote entity */
+	u32 ack_fail_cnt;
+
+	/* Total number of RTS/CTS sequence success for transmission of a
+	 * packet */
+	u32 rts_succ_cnt;
+
+	/* The sum of the receive error count and dropped-receive-buffer
+	 * error count. HAL will provide this as a sum of (FCS error) +
+	 * (Fail get BD/PDU in HW) */
+	u32 rx_discard_cnt;
+
+	/*
+	 * The receive error count. HAL will provide the RxP FCS error
+	 * global counter. */
+	u32 rx_error_cnt;
+
+	/* The sum of the transmit-directed byte count, transmit-multicast
+	 * byte count and transmit-broadcast byte count. HAL will sum TPE
+	 * UC/MC/BCAST global counters to provide this. */
+	u32 tx_byte_cnt;
+};
+
+/* defines tx_rate_flags */
+enum tx_rate_info {
+	/* Legacy rates */
+	HAL_TX_RATE_LEGACY = 0x1,
+
+	/* HT20 rates */
+	HAL_TX_RATE_HT20 = 0x2,
+
+	/* HT40 rates */
+	HAL_TX_RATE_HT40 = 0x4,
+
+	/* Rate with Short guard interval */
+	HAL_TX_RATE_SGI = 0x8,
+
+	/* Rate with Long guard interval */
+	HAL_TX_RATE_LGI = 0x10
+};
+
+struct ani_global_class_a_stats_info {
+	/* The number of MPDU frames received by the 802.11 station for
+	 * MSDU packets or MMPDU frames */
+	u32 rx_frag_cnt;
+
+	/* The number of MPDU frames received by the 802.11 station for
+	 * MSDU packets or MMPDU frames when a promiscuous packet filter
+	 * was enabled */
+	u32 promiscuous_rx_frag_cnt;
+
+	/* The receiver input sensitivity referenced to a FER of 8% at an
+	 * MPDU length of 1024 bytes at the antenna connector. Each element
+	 * of the array shall correspond to a supported rate and the order
+	 * shall be the same as the supporteRates parameter. */
+	u32 rx_input_sensitivity;
+
+	/* The maximum transmit power in dBm upto one decimal. for eg: if
+	 * it is 10.5dBm, the value would be 105 */
+	u32 max_pwr;
+
+	/* Number of times the receiver failed to synchronize with the
+	 * incoming signal after detecting the sync in the preamble of the
+	 * transmitted PLCP protocol data unit. */
+	u32 sync_fail_cnt;
+
+	/* Legacy transmit rate, in units of 500 kbit/sec, for the most
+	 * recently transmitted frame */
+	u32 tx_rate;
+
+	/* mcs index for HT20 and HT40 rates */
+	u32 mcs_index;
+
+	/* to differentiate between HT20 and HT40 rates; short and long
+	 * guard interval */
+	u32 tx_rate_flags;
+};
+
+struct ani_global_security_stats {
+	/* The number of unencrypted received MPDU frames that the MAC
+	 * layer discarded when the IEEE 802.11 dot11ExcludeUnencrypted
+	 * management information base (MIB) object is enabled */
+	u32 rx_wep_unencrypted_frm_cnt;
+
+	/* The number of received MSDU packets that that the 802.11 station
+	 * discarded because of MIC failures */
+	u32 rx_mic_fail_cnt;
+
+	/* The number of encrypted MPDU frames that the 802.11 station
+	 * failed to decrypt because of a TKIP ICV error */
+	u32 tkip_icv_err;
+
+	/* The number of received MPDU frames that the 802.11 discarded
+	 * because of an invalid AES-CCMP format */
+	u32 aes_ccmp_format_err;
+
+	/* The number of received MPDU frames that the 802.11 station
+	 * discarded because of the AES-CCMP replay protection procedure */
+	u32 aes_ccmp_replay_cnt;
+
+	/* The number of received MPDU frames that the 802.11 station
+	 * discarded because of errors detected by the AES-CCMP decryption
+	 * algorithm */
+	u32 aes_ccmp_decrpt_err;
+
+	/* The number of encrypted MPDU frames received for which a WEP
+	 * decryption key was not available on the 802.11 station */
+	u32 wep_undecryptable_cnt;
+
+	/* The number of encrypted MPDU frames that the 802.11 station
+	 * failed to decrypt because of a WEP ICV error */
+	u32 wep_icv_err;
+
+	/* The number of received encrypted packets that the 802.11 station
+	 * successfully decrypted */
+	u32 rx_decrypt_succ_cnt;
+
+	/* The number of encrypted packets that the 802.11 station failed
+	 * to decrypt */
+	u32 rx_decrypt_fail_cnt;
+};
+
+struct ani_global_class_b_stats_info {
+	struct ani_global_security_stats uc_stats;
+	struct ani_global_security_stats mc_bc_stats;
+};
+
+struct ani_global_class_c_stats_info {
+	/* This counter shall be incremented for a received A-MSDU frame
+	 * with the stations MAC address in the address 1 field or an
+	 * A-MSDU frame with a group address in the address 1 field */
+	u32 rx_amsdu_cnt;
+
+	/* This counter shall be incremented when the MAC receives an AMPDU
+	 * from the PHY */
+	u32 rx_ampdu_cnt;
+
+	/* This counter shall be incremented when a Frame is transmitted
+	 * only on the primary channel */
+	u32 tx_20_frm_cnt;
+
+	/* This counter shall be incremented when a Frame is received only
+	 * on the primary channel */
+	u32 rx_20_frm_cnt;
+
+	/* This counter shall be incremented by the number of MPDUs
+	 * received in the A-MPDU when an A-MPDU is received */
+	u32 rx_mpdu_in_ampdu_cnt;
+
+	/* This counter shall be incremented when an MPDU delimiter has a
+	 * CRC error when this is the first CRC error in the received AMPDU
+	 * or when the previous delimiter has been decoded correctly */
+	u32 ampdu_delimiter_crc_err;
+};
+
+struct ani_per_sta_stats_info {
+	/* The number of MPDU frames that the 802.11 station transmitted
+	 * and acknowledged through a received 802.11 ACK frame */
+	u32 tx_frag_cnt[4];
+
+	/* This counter shall be incremented when an A-MPDU is transmitted */
+	u32 tx_ampdu_cnt;
+
+	/* This counter shall increment by the number of MPDUs in the AMPDU
+	 * when an A-MPDU is transmitted */
+	u32 tx_mpdu_in_ampdu_cnt;
+};
+
+struct wcn36xx_hal_stats_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Success or Failure */
+	u32 status;
+
+	/* STA Idx */
+	u32 sta_index;
+
+	/* Categories of STATS being returned as per eHalStatsMask */
+	u32 stats_mask;
+
+	/* message type is same as the request type */
+	u16 msg_type;
+
+	/* length of the entire request, includes the pStatsBuf length too */
+	u16 msg_len;
+};
+
+struct wcn36xx_hal_set_link_state_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bssid[ETH_ALEN];
+	enum wcn36xx_hal_link_state state;
+	u8 self_mac_addr[ETH_ALEN];
+
+} __packed;
+
+struct set_link_state_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+/* TSPEC Params */
+struct wcn36xx_hal_ts_info_tfc {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+	u16 ackPolicy:2;
+	u16 userPrio:3;
+	u16 psb:1;
+	u16 aggregation:1;
+	u16 accessPolicy:2;
+	u16 direction:2;
+	u16 tsid:4;
+	u16 trafficType:1;
+#else
+	u16 trafficType:1;
+	u16 tsid:4;
+	u16 direction:2;
+	u16 accessPolicy:2;
+	u16 aggregation:1;
+	u16 psb:1;
+	u16 userPrio:3;
+	u16 ackPolicy:2;
+#endif
+};
+
+/* Flag to schedule the traffic type */
+struct wcn36xx_hal_ts_info_sch {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+	u8 rsvd:7;
+	u8 schedule:1;
+#else
+	u8 schedule:1;
+	u8 rsvd:7;
+#endif
+};
+
+/* Traffic and scheduling info */
+struct wcn36xx_hal_ts_info {
+	struct wcn36xx_hal_ts_info_tfc traffic;
+	struct wcn36xx_hal_ts_info_sch schedule;
+};
+
+/* Information elements */
+struct wcn36xx_hal_tspec_ie {
+	u8 type;
+	u8 length;
+	struct wcn36xx_hal_ts_info ts_info;
+	u16 nom_msdu_size;
+	u16 max_msdu_size;
+	u32 min_svc_interval;
+	u32 max_svc_interval;
+	u32 inact_interval;
+	u32 suspend_interval;
+	u32 svc_start_time;
+	u32 min_data_rate;
+	u32 mean_data_rate;
+	u32 peak_data_rate;
+	u32 max_burst_sz;
+	u32 delay_bound;
+	u32 min_phy_rate;
+	u16 surplus_bw;
+	u16 medium_time;
+};
+
+struct add_ts_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Station Index */
+	u16 sta_index;
+
+	/* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS */
+	u16 tspec_index;
+
+	/* To program TPE with required parameters */
+	struct wcn36xx_hal_tspec_ie tspec;
+
+	/* U-APSD Flags: 1b per AC.  Encoded as follows:
+	   b7 b6 b5 b4 b3 b2 b1 b0 =
+	   X  X  X  X  BE BK VI VO */
+	u8 uapsd;
+
+	/* These parameters are for all the access categories */
+
+	/* Service Interval */
+	u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+	/* Suspend Interval */
+	u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+	/* Delay Interval */
+	u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct add_rs_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct del_ts_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Station Index */
+	u16 sta_index;
+
+	/* TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS */
+	u16 tspec_index;
+
+	/* To lookup station id using the mac address */
+	u8 bssid[ETH_ALEN];
+};
+
+struct del_ts_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+/* End of TSpec Parameters */
+
+/* Start of BLOCK ACK related Parameters */
+
+struct wcn36xx_hal_add_ba_session_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Station Index */
+	u16 sta_index;
+
+	/* Peer MAC Address */
+	u8 mac_addr[ETH_ALEN];
+
+	/* ADDBA Action Frame dialog token
+	   HAL will not interpret this object */
+	u8 dialog_token;
+
+	/* TID for which the BA is being setup
+	   This identifies the TC or TS of interest */
+	u8 tid;
+
+	/* 0 - Delayed BA (Not supported)
+	   1 - Immediate BA */
+	u8 policy;
+
+	/* Indicates the number of buffers for this TID (baTID)
+	   NOTE - This is the requested buffer size. When this
+	   is processed by HAL and subsequently by HDD, it is
+	   possible that HDD may change this buffer size. Any
+	   change in the buffer size should be noted by PE and
+	   advertized appropriately in the ADDBA response */
+	u16 buffer_size;
+
+	/* BA timeout in TU's 0 means no timeout will occur */
+	u16 timeout;
+
+	/* b0..b3 - Fragment Number - Always set to 0
+	   b4..b15 - Starting Sequence Number of first MSDU
+	   for which this BA is setup */
+	u16 ssn;
+
+	/* ADDBA direction
+	   1 - Originator
+	   0 - Recipient */
+	u8 direction;
+} __packed;
+
+struct wcn36xx_hal_add_ba_session_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	/* Dialog token */
+	u8 dialog_token;
+
+	/* TID for which the BA session has been setup */
+	u8 ba_tid;
+
+	/* BA Buffer Size allocated for the current BA session */
+	u8 ba_buffer_size;
+
+	u8 ba_session_id;
+
+	/* Reordering Window buffer */
+	u8 win_size;
+
+	/* Station Index to id the sta */
+	u8 sta_index;
+
+	/* Starting Sequence Number */
+	u16 ssn;
+} __packed;
+
+struct wcn36xx_hal_add_ba_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Session Id */
+	u8 session_id;
+
+	/* Reorder Window Size */
+	u8 win_size;
+/* Old FW 1.2.2.4 does not support this*/
+#ifdef FEATURE_ON_CHIP_REORDERING
+	u8 reordering_done_on_chip;
+#endif
+} __packed;
+
+struct wcn36xx_hal_add_ba_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	/* Dialog token */
+	u8 dialog_token;
+} __packed;
+
+struct add_ba_info {
+	u16 ba_enable:1;
+	u16 starting_seq_num:12;
+	u16 reserved:3;
+};
+
+struct wcn36xx_hal_trigger_ba_rsp_candidate {
+	u8 sta_addr[ETH_ALEN];
+	struct add_ba_info ba_info[STACFG_MAX_TC];
+} __packed;
+
+struct wcn36xx_hal_trigget_ba_req_candidate {
+	u8 sta_index;
+	u8 tid_bitmap;
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Session Id */
+	u8 session_id;
+
+	/* baCandidateCnt is followed by trigger BA
+	 * Candidate List(tTriggerBaCandidate)
+	 */
+	u16 candidate_cnt;
+
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* TO SUPPORT BT-AMP */
+	u8 bssid[ETH_ALEN];
+
+	/* success or failure */
+	u32 status;
+
+	/* baCandidateCnt is followed by trigger BA
+	 * Rsp Candidate List(tTriggerRspBaCandidate)
+	 */
+	u16 candidate_cnt;
+} __packed;
+
+struct wcn36xx_hal_del_ba_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Station Index */
+	u16 sta_index;
+
+	/* TID for which the BA session is being deleted */
+	u8 tid;
+
+	/* DELBA direction
+	   1 - Originator
+	   0 - Recipient */
+	u8 direction;
+} __packed;
+
+struct wcn36xx_hal_del_ba_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
+struct tsm_stats_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Traffic Id */
+	u8 tid;
+
+	u8 bssid[ETH_ALEN];
+};
+
+struct tsm_stats_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/*success or failure */
+	u32 status;
+
+	/* Uplink Packet Queue delay */
+	u16 uplink_pkt_queue_delay;
+
+	/* Uplink Packet Queue delay histogram */
+	u16 uplink_pkt_queue_delay_hist[4];
+
+	/* Uplink Packet Transmit delay */
+	u32 uplink_pkt_tx_delay;
+
+	/* Uplink Packet loss */
+	u16 uplink_pkt_loss;
+
+	/* Uplink Packet count */
+	u16 uplink_pkt_count;
+
+	/* Roaming count */
+	u8 roaming_count;
+
+	/* Roaming Delay */
+	u16 roaming_delay;
+};
+
+struct set_key_done_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/*bssid of the keys */
+	u8 bssidx;
+	u8 enc_type;
+};
+
+struct wcn36xx_hal_nv_img_download_req_msg {
+	/* Note: The length specified in wcn36xx_hal_nv_img_download_req_msg
+	 * messages should be
+	 * header.len = sizeof(wcn36xx_hal_nv_img_download_req_msg) +
+	 * nv_img_buffer_size */
+	struct wcn36xx_hal_msg_header header;
+
+	/* Fragment sequence number of the NV Image. Note that NV Image
+	 * might not fit into one message due to size limitation of the SMD
+	 * channel FIFO. UMAC can hence choose to chop the NV blob into
+	 * multiple fragments starting with seqeunce number 0, 1, 2 etc.
+	 * The last fragment MUST be indicated by marking the
+	 * isLastFragment field to 1. Note that all the NV blobs would be
+	 * concatenated together by HAL without any padding bytes in
+	 * between.*/
+	u16 frag_number;
+
+	/* Is this the last fragment? When set to 1 it indicates that no
+	 * more fragments will be sent by UMAC and HAL can concatenate all
+	 * the NV blobs rcvd & proceed with the parsing. HAL would generate
+	 * a WCN36XX_HAL_DOWNLOAD_NV_RSP to the WCN36XX_HAL_DOWNLOAD_NV_REQ
+	 * after it receives each fragment */
+	u16 last_fragment;
+
+	/* NV Image size (number of bytes) */
+	u32 nv_img_buffer_size;
+
+	/* Following the 'nv_img_buffer_size', there should be
+	 * nv_img_buffer_size bytes of NV Image i.e.
+	 * u8[nv_img_buffer_size] */
+} __packed;
+
+struct wcn36xx_hal_nv_img_download_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Success or Failure. HAL would generate a
+	 * WCN36XX_HAL_DOWNLOAD_NV_RSP after each fragment */
+	u32 status;
+} __packed;
+
+struct wcn36xx_hal_nv_store_ind {
+	/* Note: The length specified in tHalNvStoreInd messages should be
+	 * header.msgLen = sizeof(tHalNvStoreInd) + nvBlobSize */
+	struct wcn36xx_hal_msg_header header;
+
+	/* NV Item */
+	u32 table_id;
+
+	/* Size of NV Blob */
+	u32 nv_blob_size;
+
+	/* Following the 'nvBlobSize', there should be nvBlobSize bytes of
+	 * NV blob i.e. u8[nvBlobSize] */
+};
+
+/* End of Block Ack Related Parameters */
+
+#define WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE 6
+
+/* Definition for MIC failure indication MAC reports this each time a MIC
+ * failure occures on Rx TKIP packet
+ */
+struct mic_failure_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bssid[ETH_ALEN];
+
+	/* address used to compute MIC */
+	u8 src_addr[ETH_ALEN];
+
+	/* transmitter address */
+	u8 ta_addr[ETH_ALEN];
+
+	u8 dst_addr[ETH_ALEN];
+
+	u8 multicast;
+
+	/* first byte of IV */
+	u8 iv1;
+
+	/* second byte of IV */
+	u8 key_id;
+
+	/* sequence number */
+	u8 tsc[WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE];
+
+	/* receive address */
+	u8 rx_addr[ETH_ALEN];
+};
+
+struct update_vht_op_mode_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u16 op_mode;
+	u16 sta_id;
+};
+
+struct update_vht_op_mode_params_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u32 status;
+};
+
+struct update_beacon_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bss_index;
+
+	/* shortPreamble mode. HAL should update all the STA rates when it
+	 * receives this message */
+	u8 short_preamble;
+
+	/* short Slot time. */
+	u8 short_slot_time;
+
+	/* Beacon Interval */
+	u16 beacon_interval;
+
+	/* Protection related */
+	u8 lla_coexist;
+	u8 llb_coexist;
+	u8 llg_coexist;
+	u8 ht20_coexist;
+	u8 lln_non_gf_coexist;
+	u8 lsig_tx_op_protection_full_support;
+	u8 rifs_mode;
+
+	u16 param_change_bitmap;
+};
+
+struct update_beacon_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+	u32 status;
+};
+
+struct wcn36xx_hal_send_beacon_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* length of the template. */
+	u32 beacon_length;
+
+	/* Beacon data. */
+	u8 beacon[BEACON_TEMPLATE_SIZE];
+
+	u8 bssid[ETH_ALEN];
+
+	/* TIM IE offset from the beginning of the template. */
+	u32 tim_ie_offset;
+
+	/* P2P IE offset from the begining of the template */
+	u16 p2p_ie_offset;
+} __packed;
+
+struct send_beacon_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+	u32 status;
+} __packed;
+
+struct enable_radar_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bssid[ETH_ALEN];
+	u8 channel;
+};
+
+struct enable_radar_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Link Parameters */
+	u8 bssid[ETH_ALEN];
+
+	/* success or failure */
+	u32 status;
+};
+
+struct radar_detect_intr_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 radar_det_channel;
+};
+
+struct radar_detect_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* channel number in which the RADAR detected */
+	u8 channel_number;
+
+	/* RADAR pulse width in usecond */
+	u16 radar_pulse_width;
+
+	/* Number of RADAR pulses */
+	u16 num_radar_pulse;
+};
+
+struct wcn36xx_hal_get_tpc_report_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 sta[ETH_ALEN];
+	u8 dialog_token;
+	u8 txpower;
+};
+
+struct wcn36xx_hal_get_tpc_report_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_send_probe_resp_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 probe_resp_template[BEACON_TEMPLATE_SIZE];
+	u32 probe_resp_template_len;
+	u32 proxy_probe_req_valid_ie_bmap[8];
+	u8 bssid[ETH_ALEN];
+};
+
+struct send_probe_resp_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct send_unknown_frame_rx_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_delete_sta_context_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u16 aid;
+	u16 sta_id;
+
+	/* TO SUPPORT BT-AMP */
+	u8 bssid[ETH_ALEN];
+
+	/* HAL copies bssid from the sta table. */
+	u8 addr2[ETH_ALEN];
+
+	/* To unify the keepalive / unknown A2 / tim-based disa */
+	u16 reason_code;
+} __packed;
+
+struct indicate_del_sta {
+	struct wcn36xx_hal_msg_header header;
+	u8 aid;
+	u8 sta_index;
+	u8 bss_index;
+	u8 reason_code;
+	u32 status;
+};
+
+struct bt_amp_event_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	enum bt_amp_event_type btAmpEventType;
+};
+
+struct bt_amp_event_rsp {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct tl_hal_flush_ac_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Station Index. originates from HAL */
+	u8 sta_id;
+
+	/* TID for which the transmit queue is being flushed */
+	u8 tid;
+};
+
+struct tl_hal_flush_ac_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Station Index. originates from HAL */
+	u8 sta_id;
+
+	/* TID for which the transmit queue is being flushed */
+	u8 tid;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_enter_imps_req_msg {
+	struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_exit_imps_req {
+	struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_enter_bmps_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bss_index;
+
+	/* TBTT value derived from the last beacon */
+#ifndef BUILD_QWPTTSTATIC
+	u64 tbtt;
+#endif
+	u8 dtim_count;
+
+	/* DTIM period given to HAL during association may not be valid, if
+	 * association is based on ProbeRsp instead of beacon. */
+	u8 dtim_period;
+
+	/* For CCX and 11R Roaming */
+	u32 rssi_filter_period;
+
+	u32 num_beacon_per_rssi_average;
+	u8 rssi_filter_enable;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 send_data_null;
+	u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_missed_beacon_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bss_index;
+} __packed;
+
+/* Beacon Filtering data structures */
+
+/* The above structure would be followed by multiple of below mentioned
+ * structure
+ */
+struct beacon_filter_ie {
+	u8 element_id;
+	u8 check_ie_presence;
+	u8 offset;
+	u8 value;
+	u8 bitmask;
+	u8 ref;
+};
+
+struct wcn36xx_hal_add_bcn_filter_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u16 capability_info;
+	u16 capability_mask;
+	u16 beacon_interval;
+	u16 ie_num;
+	u8 bss_index;
+	u8 reserved;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_req {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 ie_Count;
+	u8 rem_ie_id[1];
+};
+
+#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD                  0
+#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD         1
+#define WCN36XX_HAL_IPV6_NS_OFFLOAD                         2
+#define WCN36XX_HAL_IPV6_ADDR_LEN                           16
+#define WCN36XX_HAL_OFFLOAD_DISABLE                         0
+#define WCN36XX_HAL_OFFLOAD_ENABLE                          1
+#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE             0x2
+#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE	\
+	(HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+
+struct wcn36xx_hal_ns_offload_params {
+	u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+	u8 self_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+	/* Only support 2 possible Network Advertisement IPv6 address */
+	u8 target_ipv6_addr1[WCN36XX_HAL_IPV6_ADDR_LEN];
+	u8 target_ipv6_addr2[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+	u8 self_addr[ETH_ALEN];
+	u8 src_ipv6_addr_valid:1;
+	u8 target_ipv6_addr1_valid:1;
+	u8 target_ipv6_addr2_valid:1;
+	u8 reserved1:5;
+
+	/* make it DWORD aligned */
+	u8 reserved2;
+
+	/* slot index for this offload */
+	u32 slot_index;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_req {
+	u8 offload_Type;
+
+	/* enable or disable */
+	u8 enable;
+
+	union {
+		u8 host_ipv4_addr[4];
+		u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+	} u;
+};
+
+struct wcn36xx_hal_host_offload_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_host_offload_req host_offload_params;
+	struct wcn36xx_hal_ns_offload_params ns_offload_params;
+};
+
+/* Packet Types. */
+#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT              1
+#define WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP     2
+
+/* Enable or disable keep alive */
+#define WCN36XX_HAL_KEEP_ALIVE_DISABLE   0
+#define WCN36XX_HAL_KEEP_ALIVE_ENABLE    1
+#define WCN36XX_KEEP_ALIVE_TIME_PERIOD	 30 /* unit: s */
+
+/* Keep Alive request. */
+struct wcn36xx_hal_keep_alive_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 packet_type;
+	u32 time_period;
+	u8 host_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+	u8 dest_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+	u8 dest_addr[ETH_ALEN];
+	u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_rssi_threshold_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	s8 threshold1:8;
+	s8 threshold2:8;
+	s8 threshold3:8;
+	u8 thres1_pos_notify:1;
+	u8 thres1_neg_notify:1;
+	u8 thres2_pos_notify:1;
+	u8 thres2_neg_notify:1;
+	u8 thres3_pos_notify:1;
+	u8 thres3_neg_notify:1;
+	u8 reserved10:2;
+};
+
+struct wcn36xx_hal_enter_uapsd_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bk_delivery:1;
+	u8 be_delivery:1;
+	u8 vi_delivery:1;
+	u8 vo_delivery:1;
+	u8 bk_trigger:1;
+	u8 be_trigger:1;
+	u8 vi_trigger:1;
+	u8 vo_trigger:1;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	u8 bss_index;
+};
+
+#define WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE 128
+#define WCN36XX_HAL_WOWL_BCAST_MAX_NUM_PATTERNS 16
+
+struct wcn36xx_hal_wowl_add_bcast_ptrn_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Pattern ID */
+	u8 id;
+
+	/* Pattern byte offset from beginning of the 802.11 packet to start
+	 * of the wake-up pattern */
+	u8 byte_Offset;
+
+	/* Non-Zero Pattern size */
+	u8 size;
+
+	/* Pattern */
+	u8 pattern[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+	/* Non-zero pattern mask size */
+	u8 mask_size;
+
+	/* Pattern mask */
+	u8 mask[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+	/* Extra pattern */
+	u8 extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+	/* Extra pattern mask */
+	u8 mask_extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_wow_del_bcast_ptrn_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Pattern ID of the wakeup pattern to be deleted */
+	u8 id;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_enter_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Enables/disables magic packet filtering */
+	u8 magic_packet_enable;
+
+	/* Magic pattern */
+	u8 magic_pattern[ETH_ALEN];
+
+	/* Enables/disables packet pattern filtering in firmware. Enabling
+	 * this flag enables broadcast pattern matching in Firmware. If
+	 * unicast pattern matching is also desired,
+	 * ucUcastPatternFilteringEnable flag must be set tot true as well
+	 */
+	u8 pattern_filtering_enable;
+
+	/* Enables/disables unicast packet pattern filtering. This flag
+	 * specifies whether we want to do pattern match on unicast packets
+	 * as well and not just broadcast packets. This flag has no effect
+	 * if the ucPatternFilteringEnable (main controlling flag) is set
+	 * to false
+	 */
+	u8 ucast_pattern_filtering_enable;
+
+	/* This configuration is valid only when magicPktEnable=1. It
+	 * requests hardware to wake up when it receives the Channel Switch
+	 * Action Frame.
+	 */
+	u8 wow_channel_switch_receive;
+
+	/* This configuration is valid only when magicPktEnable=1. It
+	 * requests hardware to wake up when it receives the
+	 * Deauthentication Frame.
+	 */
+	u8 wow_deauth_receive;
+
+	/* This configuration is valid only when magicPktEnable=1. It
+	 * requests hardware to wake up when it receives the Disassociation
+	 * Frame.
+	 */
+	u8 wow_disassoc_receive;
+
+	/* This configuration is valid only when magicPktEnable=1. It
+	 * requests hardware to wake up when it has missed consecutive
+	 * beacons. This is a hardware register configuration (NOT a
+	 * firmware configuration).
+	 */
+	u8 wow_max_missed_beacons;
+
+	/* This configuration is valid only when magicPktEnable=1. This is
+	 * a timeout value in units of microsec. It requests hardware to
+	 * unconditionally wake up after it has stayed in WoWLAN mode for
+	 * some time. Set 0 to disable this feature.
+	 */
+	u8 wow_max_sleep;
+
+	/* This configuration directs the WoW packet filtering to look for
+	 * EAP-ID requests embedded in EAPOL frames and use this as a wake
+	 * source.
+	 */
+	u8 wow_eap_id_request_enable;
+
+	/* This configuration directs the WoW packet filtering to look for
+	 * EAPOL-4WAY requests and use this as a wake source.
+	 */
+	u8 wow_eapol_4way_enable;
+
+	/* This configuration allows a host wakeup on an network scan
+	 * offload match.
+	 */
+	u8 wow_net_scan_offload_match;
+
+	/* This configuration allows a host wakeup on any GTK rekeying
+	 * error.
+	 */
+	u8 wow_gtk_rekey_error;
+
+	/* This configuration allows a host wakeup on BSS connection loss.
+	 */
+	u8 wow_bss_connection_loss;
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_get_rssi_req_msg {
+	struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_get_roam_rssi_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Valid STA Idx for per STA stats request */
+	u32 sta_id;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* STA index */
+	u8 sta_idx;
+
+	/* Access Category */
+	u8 ac;
+
+	/* User Priority */
+	u8 up;
+
+	/* Service Interval */
+	u32 service_interval;
+
+	/* Suspend Interval */
+	u32 suspend_interval;
+
+	/* Delay Interval */
+	u32 delay_interval;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 set_mcst_bcst_filter_setting;
+	u8 set_mcst_bcst_filter;
+};
+
+struct wcn36xx_hal_enter_imps_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_exit_imps_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_enter_bmps_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_enter_uapsd_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_rssi_notification_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u32 rssi_thres1_pos_cross:1;
+	u32 rssi_thres1_neg_cross:1;
+	u32 rssi_thres2_pos_cross:1;
+	u32 rssi_thres2_neg_cross:1;
+	u32 rssi_thres3_pos_cross:1;
+	u32 rssi_thres3_neg_cross:1;
+	u32 avg_rssi:8;
+	u32 reserved:18;
+
+};
+
+struct wcn36xx_hal_get_rssio_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+	s8 rssi;
+
+};
+
+struct wcn36xx_hal_get_roam_rssi_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	u8 sta_id;
+	s8 rssi;
+};
+
+struct wcn36xx_hal_wowl_enter_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_add_bcn_filter_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_add_wowl_bcast_ptrn_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_del_wowl_bcast_ptrn_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_keep_alive_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_set_rssi_thresh_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct set_max_tx_pwr_req {
+	struct wcn36xx_hal_msg_header header;
+
+	/* BSSID is needed to identify which session issued this request.
+	 * As the request has power constraints, this should be applied
+	 * only to that session */
+	u8 bssid[ETH_ALEN];
+
+	u8 self_addr[ETH_ALEN];
+
+	/* In request, power == MaxTx power to be used. */
+	u8 power;
+};
+
+struct set_max_tx_pwr_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* power == tx power used for management frames */
+	u8 power;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct set_tx_pwr_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* TX Power in milli watts */
+	u32 tx_power;
+
+	u8 bss_index;
+};
+
+struct set_tx_pwr_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct get_tx_pwr_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 sta_id;
+};
+
+struct get_tx_pwr_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	/* TX Power in milli watts */
+	u32 tx_power;
+};
+
+struct set_p2p_gonoa_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 opp_ps;
+	u32 ct_window;
+	u8 count;
+	u32 duration;
+	u32 interval;
+	u32 single_noa_duration;
+	u8 ps_selection;
+};
+
+struct set_p2p_gonoa_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_add_sta_self_req {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 self_addr[ETH_ALEN];
+	u32 status;
+} __packed;
+
+struct wcn36xx_hal_add_sta_self_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	/* Self STA Index */
+	u8 self_sta_index;
+
+	/* DPU Index (IGTK, PTK, GTK all same) */
+	u8 dpu_index;
+
+	/* DPU Signature */
+	u8 dpu_signature;
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/*success or failure */
+	u32 status;
+
+	u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct aggr_add_ts_req {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Station Index */
+	u16 sta_idx;
+
+	/* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS.
+	 * This will carry the bitmap with the bit positions representing
+	 * different AC.s */
+	u16 tspec_index;
+
+	/* Tspec info per AC To program TPE with required parameters */
+	struct wcn36xx_hal_tspec_ie tspec[WCN36XX_HAL_MAX_AC];
+
+	/* U-APSD Flags: 1b per AC.  Encoded as follows:
+	   b7 b6 b5 b4 b3 b2 b1 b0 =
+	   X  X  X  X  BE BK VI VO */
+	u8 uapsd;
+
+	/* These parameters are for all the access categories */
+
+	/* Service Interval */
+	u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+	/* Suspend Interval */
+	u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+	/* Delay Interval */
+	u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct aggr_add_ts_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status0;
+
+	/* FIXME PRIMA for future use for 11R */
+	u32 status1;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 is_apps_cpu_awake;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_dump_cmd_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u32 arg1;
+	u32 arg2;
+	u32 arg3;
+	u32 arg4;
+	u32 arg5;
+} __packed;
+
+struct wcn36xx_hal_dump_cmd_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	/* Length of the responce message */
+	u32 rsp_length;
+
+	/* FIXME: Currently considering the the responce will be less than
+	 * 100bytes */
+	u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
+} __packed;
+
+#define WLAN_COEX_IND_DATA_SIZE (4)
+#define WLAN_COEX_IND_TYPE_DISABLE_HB_MONITOR (0)
+#define WLAN_COEX_IND_TYPE_ENABLE_HB_MONITOR (1)
+
+struct coex_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Coex Indication Type */
+	u32 type;
+
+	/* Coex Indication Data */
+	u32 data[WLAN_COEX_IND_DATA_SIZE];
+};
+
+struct wcn36xx_hal_tx_compl_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Tx Complete Indication Success or Failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_wlan_host_suspend_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u32 configured_mcst_bcst_filter_setting;
+	u32 active_session_count;
+};
+
+struct wcn36xx_hal_wlan_exclude_unencrpted_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 dot11_exclude_unencrypted;
+	u8 bssid[ETH_ALEN];
+};
+
+struct noa_attr_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 index;
+	u8 opp_ps_flag;
+	u16 ctwin;
+
+	u16 noa1_interval_count;
+	u16 bss_index;
+	u32 noa1_duration;
+	u32 noa1_interval;
+	u32 noa1_starttime;
+
+	u16 noa2_interval_count;
+	u16 reserved2;
+	u32 noa2_duration;
+	u32 noa2_interval;
+	u32 noa2_start_time;
+
+	u32 status;
+};
+
+struct noa_start_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u32 status;
+	u32 bss_index;
+};
+
+struct wcn36xx_hal_wlan_host_resume_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 configured_mcst_bcst_filter_setting;
+};
+
+struct wcn36xx_hal_host_resume_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+struct wcn36xx_hal_del_ba_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u16 sta_idx;
+
+	/* Peer MAC Address, whose BA session has timed out */
+	u8 peer_addr[ETH_ALEN];
+
+	/* TID for which a BA session timeout is being triggered */
+	u8 ba_tid;
+
+	/* DELBA direction
+	 * 1 - Originator
+	 * 0 - Recipient
+	 */
+	u8 direction;
+
+	u32 reason_code;
+
+	/* TO SUPPORT BT-AMP */
+	u8 bssid[ETH_ALEN];
+};
+
+/* PNO Messages */
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS  26
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX  60
+
+/* Maximum numbers of networks supported by PNO */
+#define WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS  16
+
+/* The number of scan time intervals that can be programmed into PNO */
+#define WCN36XX_HAL_PNO_MAX_SCAN_TIMERS    10
+
+/* Maximum size of the probe template */
+#define WCN36XX_HAL_PNO_MAX_PROBE_SIZE     450
+
+/* Type of PNO enabling:
+ *
+ * Immediate - scanning will start immediately and PNO procedure will be
+ * repeated based on timer
+ *
+ * Suspend - scanning will start at suspend
+ *
+ * Resume - scanning will start on system resume
+ */
+enum pno_mode {
+	PNO_MODE_IMMEDIATE,
+	PNO_MODE_ON_SUSPEND,
+	PNO_MODE_ON_RESUME,
+	PNO_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Authentication type */
+enum auth_type {
+	AUTH_TYPE_ANY = 0,
+	AUTH_TYPE_OPEN_SYSTEM = 1,
+
+	/* Upper layer authentication types */
+	AUTH_TYPE_WPA = 2,
+	AUTH_TYPE_WPA_PSK = 3,
+
+	AUTH_TYPE_RSN = 4,
+	AUTH_TYPE_RSN_PSK = 5,
+	AUTH_TYPE_FT_RSN = 6,
+	AUTH_TYPE_FT_RSN_PSK = 7,
+	AUTH_TYPE_WAPI_WAI_CERTIFICATE = 8,
+	AUTH_TYPE_WAPI_WAI_PSK = 9,
+
+	AUTH_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type */
+enum ed_type {
+	ED_ANY = 0,
+	ED_NONE = 1,
+	ED_WEP = 2,
+	ED_TKIP = 3,
+	ED_CCMP = 4,
+	ED_WPI = 5,
+
+	ED_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* SSID broadcast  type */
+enum ssid_bcast_type {
+	BCAST_UNKNOWN = 0,
+	BCAST_NORMAL = 1,
+	BCAST_HIDDEN = 2,
+
+	BCAST_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type {
+	/* SSID of the BSS */
+	struct wcn36xx_hal_mac_ssid ssid;
+
+	/* Authentication type for the network */
+	enum auth_type authentication;
+
+	/* Encryption type for the network */
+	enum ed_type encryption;
+
+	/* Indicate the channel on which the Network can be found 0 - if
+	 * all channels */
+	u8 channel_count;
+	u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+	/* Indicates the RSSI threshold for the network to be considered */
+	u8 rssi_threshold;
+};
+
+struct scan_timer {
+	/* How much it should wait */
+	u32 value;
+
+	/* How many times it should repeat that wait value 0 - keep using
+	 * this timer until PNO is disabled */
+	u32 repeat;
+
+	/* e.g: 2 3 4 0 - it will wait 2s between consecutive scans for 3
+	 * times - after that it will wait 4s between consecutive scans
+	 * until disabled */
+};
+
+/* The network parameters to be sent to the PNO algorithm */
+struct scan_timers_type {
+	/* set to 0 if you wish for PNO to use its default telescopic timer */
+	u8 count;
+
+	/* A set value represents the amount of time that PNO will wait
+	 * between two consecutive scan procedures If the desired is for a
+	 * uniform timer that fires always at the exact same interval - one
+	 * single value is to be set If there is a desire for a more
+	 * complex - telescopic like timer multiple values can be set -
+	 * once PNO reaches the end of the array it will continue scanning
+	 * at intervals presented by the last value */
+	struct scan_timer values[WCN36XX_HAL_PNO_MAX_SCAN_TIMERS];
+};
+
+/* Preferred network list request */
+struct set_pref_netw_list_req {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Enable PNO */
+	u32 enable;
+
+	/* Immediate,  On Suspend,   On Resume */
+	enum pno_mode mode;
+
+	/* Number of networks sent for PNO */
+	u32 networks_count;
+
+	/* The networks that PNO needs to look for */
+	struct network_type networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+	/* The scan timers required for PNO */
+	struct scan_timers_type scan_timers;
+
+	/* Probe template for 2.4GHz band */
+	u16 band_24g_probe_size;
+	u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+	/* Probe template for 5GHz band */
+	u16 band_5g_probe_size;
+	u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type_new {
+	/* SSID of the BSS */
+	struct wcn36xx_hal_mac_ssid ssid;
+
+	/* Authentication type for the network */
+	enum auth_type authentication;
+
+	/* Encryption type for the network */
+	enum ed_type encryption;
+
+	/* SSID broadcast type, normal, hidden or unknown */
+	enum ssid_bcast_type bcast_network_type;
+
+	/* Indicate the channel on which the Network can be found 0 - if
+	 * all channels */
+	u8 channel_count;
+	u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+	/* Indicates the RSSI threshold for the network to be considered */
+	u8 rssi_threshold;
+};
+
+/* Preferred network list request new */
+struct set_pref_netw_list_req_new {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Enable PNO */
+	u32 enable;
+
+	/* Immediate,  On Suspend,   On Resume */
+	enum pno_mode mode;
+
+	/* Number of networks sent for PNO */
+	u32 networks_count;
+
+	/* The networks that PNO needs to look for */
+	struct network_type_new networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+	/* The scan timers required for PNO */
+	struct scan_timers_type scan_timers;
+
+	/* Probe template for 2.4GHz band */
+	u16 band_24g_probe_size;
+	u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+	/* Probe template for 5GHz band */
+	u16 band_5g_probe_size;
+	u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* Preferred network list response */
+struct set_pref_netw_list_resp {
+	struct wcn36xx_hal_msg_header header;
+
+	/* status of the request - just to indicate that PNO has
+	 * acknowledged the request and will start scanning */
+	u32 status;
+};
+
+/* Preferred network found indication */
+struct pref_netw_found_ind {
+
+	struct wcn36xx_hal_msg_header header;
+
+	/* Network that was found with the highest RSSI */
+	struct wcn36xx_hal_mac_ssid ssid;
+
+	/* Indicates the RSSI */
+	u8 rssi;
+};
+
+/* RSSI Filter request */
+struct set_rssi_filter_req {
+	struct wcn36xx_hal_msg_header header;
+
+	/* RSSI Threshold */
+	u8 rssi_threshold;
+};
+
+/* Set RSSI filter resp */
+struct set_rssi_filter_resp {
+	struct wcn36xx_hal_msg_header header;
+
+	/* status of the request */
+	u32 status;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_req {
+
+	struct wcn36xx_hal_msg_header header;
+
+	/* Host setting for 11d */
+	u8 dot11d_enabled;
+
+	/* Lets PNO know that host has determined the regulatory domain */
+	u8 dot11d_resolved;
+
+	/* Channels on which PNO is allowed to scan */
+	u8 channel_count;
+	u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+	/* Minimum channel time */
+	u16 active_min_ch_time;
+
+	/* Maximum channel time */
+	u16 active_max_ch_time;
+
+	/* Minimum channel time */
+	u16 passive_min_ch_time;
+
+	/* Maximum channel time */
+	u16 passive_max_ch_time;
+
+	/* Cb State */
+	enum phy_chan_bond_state state;
+} __packed;
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct update_scan_params_req_ex {
+
+	struct wcn36xx_hal_msg_header header;
+
+	/* Host setting for 11d */
+	u8 dot11d_enabled;
+
+	/* Lets PNO know that host has determined the regulatory domain */
+	u8 dot11d_resolved;
+
+	/* Channels on which PNO is allowed to scan */
+	u8 channel_count;
+	u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+
+	/* Minimum channel time */
+	u16 active_min_ch_time;
+
+	/* Maximum channel time */
+	u16 active_max_ch_time;
+
+	/* Minimum channel time */
+	u16 passive_min_ch_time;
+
+	/* Maximum channel time */
+	u16 passive_max_ch_time;
+
+	/* Cb State */
+	enum phy_chan_bond_state state;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_resp {
+
+	struct wcn36xx_hal_msg_header header;
+
+	/* status of the request */
+	u32 status;
+} __packed;
+
+struct wcn36xx_hal_set_tx_per_tracking_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* 0: disable, 1:enable */
+	u8 tx_per_tracking_enable;
+
+	/* Check period, unit is sec. */
+	u8 tx_per_tracking_period;
+
+	/* (Fail TX packet)/(Total TX packet) ratio, the unit is 10%. */
+	u8 tx_per_tracking_ratio;
+
+	/* A watermark of check number, once the tx packet exceed this
+	 * number, we do the check, default is 5 */
+	u32 tx_per_tracking_watermark;
+};
+
+struct wcn36xx_hal_set_tx_per_tracking_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+};
+
+struct tx_per_hit_ind_msg {
+	struct wcn36xx_hal_msg_header header;
+};
+
+/* Packet Filtering Definitions Begin */
+#define    WCN36XX_HAL_PROTOCOL_DATA_LEN                  8
+#define    WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS        240
+#define    WCN36XX_HAL_MAX_NUM_FILTERS                   20
+#define    WCN36XX_HAL_MAX_CMP_PER_FILTER                10
+
+enum wcn36xx_hal_receive_packet_filter_type {
+	HAL_RCV_FILTER_TYPE_INVALID,
+	HAL_RCV_FILTER_TYPE_FILTER_PKT,
+	HAL_RCV_FILTER_TYPE_BUFFER_PKT,
+	HAL_RCV_FILTER_TYPE_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_protocol_type {
+	HAL_FILTER_PROTO_TYPE_INVALID,
+	HAL_FILTER_PROTO_TYPE_MAC,
+	HAL_FILTER_PROTO_TYPE_ARP,
+	HAL_FILTER_PROTO_TYPE_IPV4,
+	HAL_FILTER_PROTO_TYPE_IPV6,
+	HAL_FILTER_PROTO_TYPE_UDP,
+	HAL_FILTER_PROTO_TYPE_MAX
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_cmp_flag_type {
+	HAL_FILTER_CMP_TYPE_INVALID,
+	HAL_FILTER_CMP_TYPE_EQUAL,
+	HAL_FILTER_CMP_TYPE_MASK_EQUAL,
+	HAL_FILTER_CMP_TYPE_NOT_EQUAL,
+	HAL_FILTER_CMP_TYPE_MAX
+};
+
+struct wcn36xx_hal_rcv_pkt_filter_params {
+	u8 protocol_layer;
+	u8 cmp_flag;
+
+	/* Length of the data to compare */
+	u16 data_length;
+
+	/* from start of the respective frame header */
+	u8 data_offset;
+
+	/* Reserved field */
+	u8 reserved;
+
+	/* Data to compare */
+	u8 compare_data[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+
+	/* Mask to be applied on the received packet data before compare */
+	u8 data_mask[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+};
+
+struct wcn36xx_hal_sessionized_rcv_pkt_filter_cfg_type {
+	u8 id;
+	u8 type;
+	u8 params_count;
+	u32 coleasce_time;
+	u8 bss_index;
+	struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_set_rcv_pkt_filter_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 id;
+	u8 type;
+	u8 params_count;
+	u32 coalesce_time;
+	struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
+	/* from start of the respective frame header */
+	u8 data_offset;
+
+	u32 mc_addr_count;
+	u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_set_pkt_filter_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt {
+	u8 id;
+	u32 match_cnt;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Success or Failure */
+	u32 status;
+
+	u32 match_count;
+	struct wcn36xx_hal_rcv_flt_pkt_match_cnt
+		matches[WCN36XX_HAL_MAX_NUM_FILTERS];
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_param {
+	/* only valid for response message */
+	u32 status;
+	u8 id;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+	u32 status;
+	u8 bss_index;
+};
+
+/* Packet Filtering Definitions End */
+
+struct wcn36xx_hal_set_power_params_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/*  Ignore DTIM */
+	u32 ignore_dtim;
+
+	/* DTIM Period */
+	u32 dtim_period;
+
+	/* Listen Interval */
+	u32 listen_interval;
+
+	/* Broadcast Multicast Filter  */
+	u32 bcast_mcast_filter;
+
+	/* Beacon Early Termination */
+	u32 enable_bet;
+
+	/* Beacon Early Termination Interval */
+	u32 bet_interval;
+} __packed;
+
+struct wcn36xx_hal_set_power_params_resp {
+
+	struct wcn36xx_hal_msg_header header;
+
+	/* status of the request */
+	u32 status;
+} __packed;
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum place_holder_in_cap_bitmap {
+	MCC = 0,
+	P2P = 1,
+	DOT11AC = 2,
+	SLM_SESSIONIZATION = 3,
+	DOT11AC_OPMODE = 4,
+	SAP32STA = 5,
+	TDLS = 6,
+	P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+	WLANACTIVE_OFFLOAD = 8,
+	BEACON_OFFLOAD = 9,
+	SCAN_OFFLOAD = 10,
+	ROAM_OFFLOAD = 11,
+	BCN_MISS_OFFLOAD = 12,
+	STA_POWERSAVE = 13,
+	STA_ADVANCED_PWRSAVE = 14,
+	AP_UAPSD = 15,
+	AP_DFS = 16,
+	BLOCKACK = 17,
+	PHY_ERR = 18,
+	BCN_FILTER = 19,
+	RTT = 20,
+	RATECTRL = 21,
+	WOW = 22,
+	MAX_FEATURE_SUPPORTED = 128,
+};
+
+struct wcn36xx_hal_feat_caps_msg {
+
+	struct wcn36xx_hal_msg_header header;
+
+	u32 feat_caps[4];
+} __packed;
+
+/* status codes to help debug rekey failures */
+enum gtk_rekey_status {
+	WCN36XX_HAL_GTK_REKEY_STATUS_SUCCESS = 0,
+
+	/* rekey detected, but not handled */
+	WCN36XX_HAL_GTK_REKEY_STATUS_NOT_HANDLED = 1,
+
+	/* MIC check error on M1 */
+	WCN36XX_HAL_GTK_REKEY_STATUS_MIC_ERROR = 2,
+
+	/* decryption error on M1  */
+	WCN36XX_HAL_GTK_REKEY_STATUS_DECRYPT_ERROR = 3,
+
+	/* M1 replay detected */
+	WCN36XX_HAL_GTK_REKEY_STATUS_REPLAY_ERROR = 4,
+
+	/* missing GTK key descriptor in M1 */
+	WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_KDE = 5,
+
+	/* missing iGTK key descriptor in M1 */
+	WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_IGTK_KDE = 6,
+
+	/* key installation error */
+	WCN36XX_HAL_GTK_REKEY_STATUS_INSTALL_ERROR = 7,
+
+	/* iGTK key installation error */
+	WCN36XX_HAL_GTK_REKEY_STATUS_IGTK_INSTALL_ERROR = 8,
+
+	/* GTK rekey M2 response TX error */
+	WCN36XX_HAL_GTK_REKEY_STATUS_RESP_TX_ERROR = 9,
+
+	/* non-specific general error */
+	WCN36XX_HAL_GTK_REKEY_STATUS_GEN_ERROR = 255
+};
+
+/* wake reason types */
+enum wake_reason_type {
+	WCN36XX_HAL_WAKE_REASON_NONE = 0,
+
+	/* magic packet match */
+	WCN36XX_HAL_WAKE_REASON_MAGIC_PACKET = 1,
+
+	/* host defined pattern match */
+	WCN36XX_HAL_WAKE_REASON_PATTERN_MATCH = 2,
+
+	/* EAP-ID frame detected */
+	WCN36XX_HAL_WAKE_REASON_EAPID_PACKET = 3,
+
+	/* start of EAPOL 4-way handshake detected */
+	WCN36XX_HAL_WAKE_REASON_EAPOL4WAY_PACKET = 4,
+
+	/* network scan offload match */
+	WCN36XX_HAL_WAKE_REASON_NETSCAN_OFFL_MATCH = 5,
+
+	/* GTK rekey status wakeup (see status) */
+	WCN36XX_HAL_WAKE_REASON_GTK_REKEY_STATUS = 6,
+
+	/* BSS connection lost */
+	WCN36XX_HAL_WAKE_REASON_BSS_CONN_LOST = 7,
+};
+
+/*
+  Wake Packet which is saved at tWakeReasonParams.DataStart
+  This data is sent for any wake reasons that involve a packet-based wakeup :
+
+  WCN36XX_HAL_WAKE_REASON_TYPE_MAGIC_PACKET
+  WCN36XX_HAL_WAKE_REASON_TYPE_PATTERN_MATCH
+  WCN36XX_HAL_WAKE_REASON_TYPE_EAPID_PACKET
+  WCN36XX_HAL_WAKE_REASON_TYPE_EAPOL4WAY_PACKET
+  WCN36XX_HAL_WAKE_REASON_TYPE_GTK_REKEY_STATUS
+
+  The information is provided to the host for auditing and debug purposes
+
+*/
+
+/* Wake reason indication */
+struct wcn36xx_hal_wake_reason_ind {
+	struct wcn36xx_hal_msg_header header;
+
+	/* see tWakeReasonType */
+	u32 reason;
+
+	/* argument specific to the reason type */
+	u32 reason_arg;
+
+	/* length of optional data stored in this message, in case HAL
+	 * truncates the data (i.e. data packets) this length will be less
+	 * than the actual length */
+	u32 stored_data_len;
+
+	/* actual length of data */
+	u32 actual_data_len;
+
+	/* variable length start of data (length == storedDataLen) see
+	 * specific wake type */
+	u8 data_start[1];
+
+	u32 bss_index:8;
+	u32 reserved:24;
+};
+
+#define WCN36XX_HAL_GTK_KEK_BYTES 16
+#define WCN36XX_HAL_GTK_KCK_BYTES 16
+
+#define WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE (1 << 0)
+
+#define GTK_SET_BSS_KEY_TAG  0x1234AA55
+
+struct wcn36xx_hal_gtk_offload_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* optional flags */
+	u32 flags;
+
+	/* Key confirmation key */
+	u8 kck[WCN36XX_HAL_GTK_KCK_BYTES];
+
+	/* key encryption key */
+	u8 kek[WCN36XX_HAL_GTK_KEK_BYTES];
+
+	/* replay counter */
+	u64 key_replay_counter;
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_req_msg {
+	struct wcn36xx_hal_msg_header header;
+	u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+
+	/* last rekey status when the rekey was offloaded */
+	u32 last_rekey_status;
+
+	/* current replay counter value */
+	u64 key_replay_counter;
+
+	/* total rekey attempts */
+	u32 total_rekey_count;
+
+	/* successful GTK rekeys */
+	u32 gtk_rekey_count;
+
+	/* successful iGTK rekeys */
+	u32 igtk_rekey_count;
+
+	u8 bss_index;
+};
+
+struct dhcp_info {
+	/* Indicates the device mode which indicates about the DHCP activity */
+	u8 device_mode;
+
+	u8 addr[ETH_ALEN];
+};
+
+struct dhcp_ind_status {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+};
+
+/*
+ *   Thermal Mitigation mode of operation.
+ *
+ *  WCN36XX_HAL_THERMAL_MITIGATION_MODE_0 - Based on AMPDU disabling aggregation
+ *
+ *  WCN36XX_HAL_THERMAL_MITIGATION_MODE_1 - Based on AMPDU disabling aggregation
+ *  and reducing transmit power
+ *
+ *  WCN36XX_HAL_THERMAL_MITIGATION_MODE_2 - Not supported */
+enum wcn36xx_hal_thermal_mitigation_mode_type {
+	HAL_THERMAL_MITIGATION_MODE_INVALID = -1,
+	HAL_THERMAL_MITIGATION_MODE_0,
+	HAL_THERMAL_MITIGATION_MODE_1,
+	HAL_THERMAL_MITIGATION_MODE_2,
+	HAL_THERMAL_MITIGATION_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/*
+ *   Thermal Mitigation level.
+ * Note the levels are incremental i.e WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 =
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 +
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 - lowest level of thermal mitigation.
+ * This level indicates normal mode of operation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1 - 1st level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 - 2nd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_3 - 3rd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_4 - 4th level of thermal mitigation
+ */
+enum wcn36xx_hal_thermal_mitigation_level_type {
+	HAL_THERMAL_MITIGATION_LEVEL_INVALID = -1,
+	HAL_THERMAL_MITIGATION_LEVEL_0,
+	HAL_THERMAL_MITIGATION_LEVEL_1,
+	HAL_THERMAL_MITIGATION_LEVEL_2,
+	HAL_THERMAL_MITIGATION_LEVEL_3,
+	HAL_THERMAL_MITIGATION_LEVEL_4,
+	HAL_THERMAL_MITIGATION_LEVEL_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/* WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ */
+struct set_thermal_mitigation_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Thermal Mitigation Operation Mode */
+	enum wcn36xx_hal_thermal_mitigation_mode_type mode;
+
+	/* Thermal Mitigation Level */
+	enum wcn36xx_hal_thermal_mitigation_level_type level;
+};
+
+struct set_thermal_mitigation_resp {
+
+	struct wcn36xx_hal_msg_header header;
+
+	/* status of the request */
+	u32 status;
+};
+
+/* Per STA Class B Statistics. Class B statistics are STA TX/RX stats
+ * provided to FW from Host via periodic messages */
+struct stats_class_b_ind {
+	struct wcn36xx_hal_msg_header header;
+
+	/* Duration over which this stats was collected */
+	u32 duration;
+
+	/* Per STA Stats */
+
+	/* TX stats */
+	u32 tx_bytes_pushed;
+	u32 tx_packets_pushed;
+
+	/* RX stats */
+	u32 rx_bytes_rcvd;
+	u32 rx_packets_rcvd;
+	u32 rx_time_total;
+};
+
+#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
new file mode 100644
index 0000000..7839b31
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -0,0 +1,1036 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "wcn36xx.h"
+
+unsigned int wcn36xx_dbg_mask;
+module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+#define CHAN2G(_freq, _idx) { \
+	.band = IEEE80211_BAND_2GHZ, \
+	.center_freq = (_freq), \
+	.hw_value = (_idx), \
+	.max_power = 25, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+	.band = IEEE80211_BAND_5GHZ, \
+	.center_freq = (_freq), \
+	.hw_value = (_idx), \
+	.max_power = 25, \
+}
+
+/* The wcn firmware expects channel values to matching
+ * their mnemonic values. So use these for .hw_value. */
+static struct ieee80211_channel wcn_2ghz_channels[] = {
+	CHAN2G(2412, 1), /* Channel 1 */
+	CHAN2G(2417, 2), /* Channel 2 */
+	CHAN2G(2422, 3), /* Channel 3 */
+	CHAN2G(2427, 4), /* Channel 4 */
+	CHAN2G(2432, 5), /* Channel 5 */
+	CHAN2G(2437, 6), /* Channel 6 */
+	CHAN2G(2442, 7), /* Channel 7 */
+	CHAN2G(2447, 8), /* Channel 8 */
+	CHAN2G(2452, 9), /* Channel 9 */
+	CHAN2G(2457, 10), /* Channel 10 */
+	CHAN2G(2462, 11), /* Channel 11 */
+	CHAN2G(2467, 12), /* Channel 12 */
+	CHAN2G(2472, 13), /* Channel 13 */
+	CHAN2G(2484, 14)  /* Channel 14 */
+
+};
+
+static struct ieee80211_channel wcn_5ghz_channels[] = {
+	CHAN5G(5180, 36),
+	CHAN5G(5200, 40),
+	CHAN5G(5220, 44),
+	CHAN5G(5240, 48),
+	CHAN5G(5260, 52),
+	CHAN5G(5280, 56),
+	CHAN5G(5300, 60),
+	CHAN5G(5320, 64),
+	CHAN5G(5500, 100),
+	CHAN5G(5520, 104),
+	CHAN5G(5540, 108),
+	CHAN5G(5560, 112),
+	CHAN5G(5580, 116),
+	CHAN5G(5600, 120),
+	CHAN5G(5620, 124),
+	CHAN5G(5640, 128),
+	CHAN5G(5660, 132),
+	CHAN5G(5700, 140),
+	CHAN5G(5745, 149),
+	CHAN5G(5765, 153),
+	CHAN5G(5785, 157),
+	CHAN5G(5805, 161),
+	CHAN5G(5825, 165)
+};
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+	.bitrate        = (_bitrate),                   \
+	.flags          = (_flags),                     \
+	.hw_value       = (_hw_rate),                   \
+	.hw_value_short = (_hw_rate)  \
+}
+
+static struct ieee80211_rate wcn_2ghz_rates[] = {
+	RATE(10, HW_RATE_INDEX_1MBPS, 0),
+	RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATE(60, HW_RATE_INDEX_6MBPS, 0),
+	RATE(90, HW_RATE_INDEX_9MBPS, 0),
+	RATE(120, HW_RATE_INDEX_12MBPS, 0),
+	RATE(180, HW_RATE_INDEX_18MBPS, 0),
+	RATE(240, HW_RATE_INDEX_24MBPS, 0),
+	RATE(360, HW_RATE_INDEX_36MBPS, 0),
+	RATE(480, HW_RATE_INDEX_48MBPS, 0),
+	RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_rate wcn_5ghz_rates[] = {
+	RATE(60, HW_RATE_INDEX_6MBPS, 0),
+	RATE(90, HW_RATE_INDEX_9MBPS, 0),
+	RATE(120, HW_RATE_INDEX_12MBPS, 0),
+	RATE(180, HW_RATE_INDEX_18MBPS, 0),
+	RATE(240, HW_RATE_INDEX_24MBPS, 0),
+	RATE(360, HW_RATE_INDEX_36MBPS, 0),
+	RATE(480, HW_RATE_INDEX_48MBPS, 0),
+	RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_supported_band wcn_band_2ghz = {
+	.channels	= wcn_2ghz_channels,
+	.n_channels	= ARRAY_SIZE(wcn_2ghz_channels),
+	.bitrates	= wcn_2ghz_rates,
+	.n_bitrates	= ARRAY_SIZE(wcn_2ghz_rates),
+	.ht_cap		= {
+		.cap =	IEEE80211_HT_CAP_GRN_FLD |
+			IEEE80211_HT_CAP_SGI_20 |
+			IEEE80211_HT_CAP_DSSSCCK40 |
+			IEEE80211_HT_CAP_LSIG_TXOP_PROT,
+		.ht_supported = true,
+		.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+		.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+		.mcs = {
+			.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+			.rx_highest = cpu_to_le16(72),
+			.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+		}
+	}
+};
+
+static struct ieee80211_supported_band wcn_band_5ghz = {
+	.channels	= wcn_5ghz_channels,
+	.n_channels	= ARRAY_SIZE(wcn_5ghz_channels),
+	.bitrates	= wcn_5ghz_rates,
+	.n_bitrates	= ARRAY_SIZE(wcn_5ghz_rates),
+	.ht_cap		= {
+		.cap =	IEEE80211_HT_CAP_GRN_FLD |
+			IEEE80211_HT_CAP_SGI_20 |
+			IEEE80211_HT_CAP_DSSSCCK40 |
+			IEEE80211_HT_CAP_LSIG_TXOP_PROT |
+			IEEE80211_HT_CAP_SGI_40 |
+			IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+		.ht_supported = true,
+		.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+		.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+		.mcs = {
+			.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+			.rx_highest = cpu_to_le16(72),
+			.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+		}
+	}
+};
+
+#ifdef CONFIG_PM
+
+static const struct wiphy_wowlan_support wowlan_support = {
+	.flags = WIPHY_WOWLAN_ANY
+};
+
+#endif
+
+static inline u8 get_sta_index(struct ieee80211_vif *vif,
+			       struct wcn36xx_sta *sta_priv)
+{
+	return NL80211_IFTYPE_STATION == vif->type ?
+	       sta_priv->bss_sta_index :
+	       sta_priv->sta_index;
+}
+
+static int wcn36xx_start(struct ieee80211_hw *hw)
+{
+	struct wcn36xx *wcn = hw->priv;
+	int ret;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
+
+	/* SMD initialization */
+	ret = wcn36xx_smd_open(wcn);
+	if (ret) {
+		wcn36xx_err("Failed to open smd channel: %d\n", ret);
+		goto out_err;
+	}
+
+	/* Allocate memory pools for Mgmt BD headers and Data BD headers */
+	ret = wcn36xx_dxe_allocate_mem_pools(wcn);
+	if (ret) {
+		wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
+		goto out_smd_close;
+	}
+
+	ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
+	if (ret) {
+		wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
+		goto out_free_dxe_pool;
+	}
+
+	wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+	if (!wcn->hal_buf) {
+		wcn36xx_err("Failed to allocate smd buf\n");
+		ret = -ENOMEM;
+		goto out_free_dxe_ctl;
+	}
+
+	ret = wcn36xx_smd_load_nv(wcn);
+	if (ret) {
+		wcn36xx_err("Failed to push NV to chip\n");
+		goto out_free_smd_buf;
+	}
+
+	ret = wcn36xx_smd_start(wcn);
+	if (ret) {
+		wcn36xx_err("Failed to start chip\n");
+		goto out_free_smd_buf;
+	}
+
+	/* DMA channel initialization */
+	ret = wcn36xx_dxe_init(wcn);
+	if (ret) {
+		wcn36xx_err("DXE init failed\n");
+		goto out_smd_stop;
+	}
+
+	wcn36xx_debugfs_init(wcn);
+
+	if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+		ret = wcn36xx_smd_feature_caps_exchange(wcn);
+		if (ret)
+			wcn36xx_warn("Exchange feature caps failed\n");
+	}
+	INIT_LIST_HEAD(&wcn->vif_list);
+	return 0;
+
+out_smd_stop:
+	wcn36xx_smd_stop(wcn);
+out_free_smd_buf:
+	kfree(wcn->hal_buf);
+out_free_dxe_pool:
+	wcn36xx_dxe_free_mem_pools(wcn);
+out_free_dxe_ctl:
+	wcn36xx_dxe_free_ctl_blks(wcn);
+out_smd_close:
+	wcn36xx_smd_close(wcn);
+out_err:
+	return ret;
+}
+
+static void wcn36xx_stop(struct ieee80211_hw *hw)
+{
+	struct wcn36xx *wcn = hw->priv;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
+
+	wcn36xx_debugfs_exit(wcn);
+	wcn36xx_smd_stop(wcn);
+	wcn36xx_dxe_deinit(wcn);
+	wcn36xx_smd_close(wcn);
+
+	wcn36xx_dxe_free_mem_pools(wcn);
+	wcn36xx_dxe_free_ctl_blks(wcn);
+
+	kfree(wcn->hal_buf);
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct ieee80211_vif *vif = NULL;
+	struct wcn36xx_vif *tmp;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		int ch = WCN36XX_HW_CHANNEL(wcn);
+		wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+			    ch);
+		list_for_each_entry(tmp, &wcn->vif_list, list) {
+			vif = container_of((void *)tmp,
+					   struct ieee80211_vif,
+					   drv_priv);
+			wcn36xx_smd_switch_channel(wcn, vif, ch);
+		}
+	}
+
+	return 0;
+}
+
+#define WCN36XX_SUPPORTED_FILTERS (0)
+
+static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+				     unsigned int changed,
+				     unsigned int *total, u64 multicast)
+{
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
+
+	*total &= WCN36XX_SUPPORTED_FILTERS;
+}
+
+static void wcn36xx_tx(struct ieee80211_hw *hw,
+		       struct ieee80211_tx_control *control,
+		       struct sk_buff *skb)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct wcn36xx_sta *sta_priv = NULL;
+
+	if (control->sta)
+		sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+
+	if (wcn36xx_start_tx(wcn, sta_priv, skb))
+		ieee80211_free_txskb(wcn->hw, skb);
+}
+
+static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+			   struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta,
+			   struct ieee80211_key_conf *key_conf)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_sta *sta_priv = vif_priv->sta;
+	int ret = 0;
+	u8 key[WLAN_MAX_KEY_LEN];
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
+		    cmd, key_conf->cipher, key_conf->keyidx,
+		    key_conf->keylen, key_conf->flags);
+	wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
+			 key_conf->key,
+			 key_conf->keylen);
+
+	switch (key_conf->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
+		break;
+	default:
+		wcn36xx_err("Unsupported key type 0x%x\n",
+			      key_conf->cipher);
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+
+	switch (cmd) {
+	case SET_KEY:
+		if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
+			/*
+			 * Supplicant is sending key in the wrong order:
+			 * Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
+			 * but HW expects it to be in the order as described in
+			 * IEEE 802.11 spec (see chapter 11.7) like this:
+			 * Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
+			 */
+			memcpy(key, key_conf->key, 16);
+			memcpy(key + 16, key_conf->key + 24, 8);
+			memcpy(key + 24, key_conf->key + 16, 8);
+		} else {
+			memcpy(key, key_conf->key, key_conf->keylen);
+		}
+
+		if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
+			sta_priv->is_data_encrypted = true;
+			/* Reconfigure bss with encrypt_type */
+			if (NL80211_IFTYPE_STATION == vif->type)
+				wcn36xx_smd_config_bss(wcn,
+						       vif,
+						       sta,
+						       sta->addr,
+						       true);
+
+			wcn36xx_smd_set_stakey(wcn,
+				vif_priv->encrypt_type,
+				key_conf->keyidx,
+				key_conf->keylen,
+				key,
+				get_sta_index(vif, sta_priv));
+		} else {
+			wcn36xx_smd_set_bsskey(wcn,
+				vif_priv->encrypt_type,
+				key_conf->keyidx,
+				key_conf->keylen,
+				key);
+			if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
+			    (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
+				sta_priv->is_data_encrypted = true;
+				wcn36xx_smd_set_stakey(wcn,
+					vif_priv->encrypt_type,
+					key_conf->keyidx,
+					key_conf->keylen,
+					key,
+					get_sta_index(vif, sta_priv));
+			}
+		}
+		break;
+	case DISABLE_KEY:
+		if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+			wcn36xx_smd_remove_bsskey(wcn,
+				vif_priv->encrypt_type,
+				key_conf->keyidx);
+		} else {
+			sta_priv->is_data_encrypted = false;
+			/* do not remove key if disassociated */
+			if (sta_priv->aid)
+				wcn36xx_smd_remove_stakey(wcn,
+					vif_priv->encrypt_type,
+					key_conf->keyidx,
+					get_sta_index(vif, sta_priv));
+		}
+		break;
+	default:
+		wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
+		ret = -EOPNOTSUPP;
+		goto out;
+		break;
+	}
+
+out:
+	return ret;
+}
+
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
+{
+	struct wcn36xx *wcn = hw->priv;
+
+	wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
+	wcn36xx_smd_start_scan(wcn);
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
+{
+	struct wcn36xx *wcn = hw->priv;
+
+	wcn36xx_smd_end_scan(wcn);
+	wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+}
+
+static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
+					 enum ieee80211_band band)
+{
+	int i, size;
+	u16 *rates_table;
+	struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+	u32 rates = sta->supp_rates[band];
+
+	memset(&sta_priv->supported_rates, 0,
+		sizeof(sta_priv->supported_rates));
+	sta_priv->supported_rates.op_rate_mode = STA_11n;
+
+	size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
+	rates_table = sta_priv->supported_rates.dsss_rates;
+	if (band == IEEE80211_BAND_2GHZ) {
+		for (i = 0; i < size; i++) {
+			if (rates & 0x01) {
+				rates_table[i] = wcn_2ghz_rates[i].hw_value;
+				rates = rates >> 1;
+			}
+		}
+	}
+
+	size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
+	rates_table = sta_priv->supported_rates.ofdm_rates;
+	for (i = 0; i < size; i++) {
+		if (rates & 0x01) {
+			rates_table[i] = wcn_5ghz_rates[i].hw_value;
+			rates = rates >> 1;
+		}
+	}
+
+	if (sta->ht_cap.ht_supported) {
+		BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
+			sizeof(sta_priv->supported_rates.supported_mcs_set));
+		memcpy(sta_priv->supported_rates.supported_mcs_set,
+		       sta->ht_cap.mcs.rx_mask,
+		       sizeof(sta->ht_cap.mcs.rx_mask));
+	}
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
+{
+	u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
+		HW_RATE_INDEX_6MBPS,
+		HW_RATE_INDEX_9MBPS,
+		HW_RATE_INDEX_12MBPS,
+		HW_RATE_INDEX_18MBPS,
+		HW_RATE_INDEX_24MBPS,
+		HW_RATE_INDEX_36MBPS,
+		HW_RATE_INDEX_48MBPS,
+		HW_RATE_INDEX_54MBPS
+	};
+	u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
+		HW_RATE_INDEX_1MBPS,
+		HW_RATE_INDEX_2MBPS,
+		HW_RATE_INDEX_5_5MBPS,
+		HW_RATE_INDEX_11MBPS
+	};
+
+	rates->op_rate_mode = STA_11n;
+	memcpy(rates->dsss_rates, dsss_rates,
+		sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
+	memcpy(rates->ofdm_rates, ofdm_rates,
+		sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
+	rates->supported_mcs_set[0] = 0xFF;
+}
+static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif,
+				     struct ieee80211_bss_conf *bss_conf,
+				     u32 changed)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct sk_buff *skb = NULL;
+	u16 tim_off, tim_len;
+	enum wcn36xx_hal_link_state link_state;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+		    vif, changed);
+
+	if (changed & BSS_CHANGED_BEACON_INFO) {
+		wcn36xx_dbg(WCN36XX_DBG_MAC,
+			    "mac bss changed dtim period %d\n",
+			    bss_conf->dtim_period);
+
+		vif_priv->dtim_period = bss_conf->dtim_period;
+	}
+
+	if (changed & BSS_CHANGED_PS) {
+		wcn36xx_dbg(WCN36XX_DBG_MAC,
+			    "mac bss PS set %d\n",
+			    bss_conf->ps);
+		if (bss_conf->ps) {
+			wcn36xx_pmc_enter_bmps_state(wcn, vif);
+		} else {
+			wcn36xx_pmc_exit_bmps_state(wcn, vif);
+		}
+	}
+
+	if (changed & BSS_CHANGED_BSSID) {
+		wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
+			    bss_conf->bssid);
+
+		if (!is_zero_ether_addr(bss_conf->bssid)) {
+			vif_priv->is_joining = true;
+			vif_priv->bss_index = 0xff;
+			wcn36xx_smd_join(wcn, bss_conf->bssid,
+					 vif->addr, WCN36XX_HW_CHANNEL(wcn));
+			wcn36xx_smd_config_bss(wcn, vif, NULL,
+					       bss_conf->bssid, false);
+		} else {
+			vif_priv->is_joining = false;
+			wcn36xx_smd_delete_bss(wcn, vif);
+		}
+	}
+
+	if (changed & BSS_CHANGED_SSID) {
+		wcn36xx_dbg(WCN36XX_DBG_MAC,
+			    "mac bss changed ssid\n");
+		wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
+				 bss_conf->ssid, bss_conf->ssid_len);
+
+		vif_priv->ssid.length = bss_conf->ssid_len;
+		memcpy(&vif_priv->ssid.ssid,
+		       bss_conf->ssid,
+		       bss_conf->ssid_len);
+	}
+
+	if (changed & BSS_CHANGED_ASSOC) {
+		vif_priv->is_joining = false;
+		if (bss_conf->assoc) {
+			struct ieee80211_sta *sta;
+			struct wcn36xx_sta *sta_priv;
+
+			wcn36xx_dbg(WCN36XX_DBG_MAC,
+				    "mac assoc bss %pM vif %pM AID=%d\n",
+				     bss_conf->bssid,
+				     vif->addr,
+				     bss_conf->aid);
+
+			rcu_read_lock();
+			sta = ieee80211_find_sta(vif, bss_conf->bssid);
+			if (!sta) {
+				wcn36xx_err("sta %pM is not found\n",
+					      bss_conf->bssid);
+				rcu_read_unlock();
+				goto out;
+			}
+			sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+			wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+
+			wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
+				vif->addr,
+				WCN36XX_HAL_LINK_POSTASSOC_STATE);
+			wcn36xx_smd_config_bss(wcn, vif, sta,
+					       bss_conf->bssid,
+					       true);
+			sta_priv->aid = bss_conf->aid;
+			/*
+			 * config_sta must be called from  because this is the
+			 * place where AID is available.
+			 */
+			wcn36xx_smd_config_sta(wcn, vif, sta);
+			rcu_read_unlock();
+		} else {
+			wcn36xx_dbg(WCN36XX_DBG_MAC,
+				    "disassociated bss %pM vif %pM AID=%d\n",
+				    bss_conf->bssid,
+				    vif->addr,
+				    bss_conf->aid);
+			wcn36xx_smd_set_link_st(wcn,
+						bss_conf->bssid,
+						vif->addr,
+						WCN36XX_HAL_LINK_IDLE_STATE);
+		}
+	}
+
+	if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+		wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
+		skb = ieee80211_proberesp_get(hw, vif);
+		if (!skb) {
+			wcn36xx_err("failed to alloc probereq skb\n");
+			goto out;
+		}
+
+		wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
+		dev_kfree_skb(skb);
+	}
+
+	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+		wcn36xx_dbg(WCN36XX_DBG_MAC,
+			    "mac bss changed beacon enabled %d\n",
+			    bss_conf->enable_beacon);
+
+		if (bss_conf->enable_beacon) {
+			vif_priv->bss_index = 0xff;
+			wcn36xx_smd_config_bss(wcn, vif, NULL,
+					       vif->addr, false);
+			skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
+						       &tim_len);
+			if (!skb) {
+				wcn36xx_err("failed to alloc beacon skb\n");
+				goto out;
+			}
+			wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
+			dev_kfree_skb(skb);
+
+			if (vif->type == NL80211_IFTYPE_ADHOC ||
+			    vif->type == NL80211_IFTYPE_MESH_POINT)
+				link_state = WCN36XX_HAL_LINK_IBSS_STATE;
+			else
+				link_state = WCN36XX_HAL_LINK_AP_STATE;
+
+			wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+						link_state);
+		} else {
+			wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+						WCN36XX_HAL_LINK_IDLE_STATE);
+			wcn36xx_smd_delete_bss(wcn, vif);
+		}
+	}
+out:
+	return;
+}
+
+/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
+static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct wcn36xx *wcn = hw->priv;
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
+
+	wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
+	return 0;
+}
+
+static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
+
+	list_del(&vif_priv->list);
+	wcn36xx_smd_delete_sta_self(wcn, vif->addr);
+}
+
+static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
+		    vif, vif->type);
+
+	if (!(NL80211_IFTYPE_STATION == vif->type ||
+	      NL80211_IFTYPE_AP == vif->type ||
+	      NL80211_IFTYPE_ADHOC == vif->type ||
+	      NL80211_IFTYPE_MESH_POINT == vif->type)) {
+		wcn36xx_warn("Unsupported interface type requested: %d\n",
+			     vif->type);
+		return -EOPNOTSUPP;
+	}
+
+	list_add(&vif_priv->list, &wcn->vif_list);
+	wcn36xx_smd_add_sta_self(wcn, vif);
+
+	return 0;
+}
+
+static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+		    vif, sta->addr);
+
+	vif_priv->sta = sta_priv;
+	sta_priv->vif = vif_priv;
+	/*
+	 * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
+	 * at this stage AID is not available yet.
+	 */
+	if (NL80211_IFTYPE_STATION != vif->type) {
+		wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+		sta_priv->aid = sta->aid;
+		wcn36xx_smd_config_sta(wcn, vif, sta);
+	}
+	return 0;
+}
+
+static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_sta *sta)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+		    vif, sta->addr, sta_priv->sta_index);
+
+	wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+	vif_priv->sta = NULL;
+	sta_priv->vif = NULL;
+	return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
+{
+	struct wcn36xx *wcn = hw->priv;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
+
+	flush_workqueue(wcn->hal_ind_wq);
+	wcn36xx_smd_set_power_params(wcn, true);
+	return 0;
+}
+
+static int wcn36xx_resume(struct ieee80211_hw *hw)
+{
+	struct wcn36xx *wcn = hw->priv;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
+
+	flush_workqueue(wcn->hal_ind_wq);
+	wcn36xx_smd_set_power_params(wcn, false);
+	return 0;
+}
+
+#endif
+
+static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+		    struct ieee80211_vif *vif,
+		    enum ieee80211_ampdu_mlme_action action,
+		    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+		    u8 buf_size)
+{
+	struct wcn36xx *wcn = hw->priv;
+	struct wcn36xx_sta *sta_priv = NULL;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+		    action, tid);
+
+	sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		sta_priv->tid = tid;
+		wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+			get_sta_index(vif, sta_priv));
+		wcn36xx_smd_add_ba(wcn);
+		wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+		ieee80211_start_tx_ba_session(sta, tid, 0);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
+			get_sta_index(vif, sta_priv));
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	default:
+		wcn36xx_err("Unknown AMPDU action\n");
+	}
+
+	return 0;
+}
+
+static const struct ieee80211_ops wcn36xx_ops = {
+	.start			= wcn36xx_start,
+	.stop			= wcn36xx_stop,
+	.add_interface		= wcn36xx_add_interface,
+	.remove_interface	= wcn36xx_remove_interface,
+#ifdef CONFIG_PM
+	.suspend		= wcn36xx_suspend,
+	.resume			= wcn36xx_resume,
+#endif
+	.config			= wcn36xx_config,
+	.configure_filter       = wcn36xx_configure_filter,
+	.tx			= wcn36xx_tx,
+	.set_key		= wcn36xx_set_key,
+	.sw_scan_start		= wcn36xx_sw_scan_start,
+	.sw_scan_complete	= wcn36xx_sw_scan_complete,
+	.bss_info_changed	= wcn36xx_bss_info_changed,
+	.set_rts_threshold	= wcn36xx_set_rts_threshold,
+	.sta_add		= wcn36xx_sta_add,
+	.sta_remove		= wcn36xx_sta_remove,
+	.ampdu_action		= wcn36xx_ampdu_action,
+};
+
+static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
+{
+	int ret = 0;
+
+	static const u32 cipher_suites[] = {
+		WLAN_CIPHER_SUITE_WEP40,
+		WLAN_CIPHER_SUITE_WEP104,
+		WLAN_CIPHER_SUITE_TKIP,
+		WLAN_CIPHER_SUITE_CCMP,
+	};
+
+	wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+		IEEE80211_HW_HAS_RATE_CONTROL |
+		IEEE80211_HW_SUPPORTS_PS |
+		IEEE80211_HW_CONNECTION_MONITOR |
+		IEEE80211_HW_AMPDU_AGGREGATION |
+		IEEE80211_HW_TIMING_BEACON_ONLY;
+
+	wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_ADHOC) |
+		BIT(NL80211_IFTYPE_MESH_POINT);
+
+	wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
+	wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+
+	wcn->hw->wiphy->cipher_suites = cipher_suites;
+	wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+	wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+#ifdef CONFIG_PM
+	wcn->hw->wiphy->wowlan = &wowlan_support;
+#endif
+
+	wcn->hw->max_listen_interval = 200;
+
+	wcn->hw->queues = 4;
+
+	SET_IEEE80211_DEV(wcn->hw, wcn->dev);
+
+	wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
+	wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+
+	return ret;
+}
+
+static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
+					  struct platform_device *pdev)
+{
+	struct resource *res;
+	/* Set TX IRQ */
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+					   "wcnss_wlantx_irq");
+	if (!res) {
+		wcn36xx_err("failed to get tx_irq\n");
+		return -ENOENT;
+	}
+	wcn->tx_irq = res->start;
+
+	/* Set RX IRQ */
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+					   "wcnss_wlanrx_irq");
+	if (!res) {
+		wcn36xx_err("failed to get rx_irq\n");
+		return -ENOENT;
+	}
+	wcn->rx_irq = res->start;
+
+	/* Map the memory */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "wcnss_mmio");
+	if (!res) {
+		wcn36xx_err("failed to get mmio\n");
+		return -ENOENT;
+	}
+	wcn->mmio = ioremap(res->start, resource_size(res));
+	if (!wcn->mmio) {
+		wcn36xx_err("failed to map io memory\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int wcn36xx_probe(struct platform_device *pdev)
+{
+	struct ieee80211_hw *hw;
+	struct wcn36xx *wcn;
+	int ret;
+	u8 addr[ETH_ALEN];
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
+
+	hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
+	if (!hw) {
+		wcn36xx_err("failed to alloc hw\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	platform_set_drvdata(pdev, hw);
+	wcn = hw->priv;
+	wcn->hw = hw;
+	wcn->dev = &pdev->dev;
+	wcn->ctrl_ops = pdev->dev.platform_data;
+
+	mutex_init(&wcn->hal_mutex);
+
+	if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+		wcn36xx_info("mac address: %pM\n", addr);
+		SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
+	}
+
+	ret = wcn36xx_platform_get_resources(wcn, pdev);
+	if (ret)
+		goto out_wq;
+
+	wcn36xx_init_ieee80211(wcn);
+	ret = ieee80211_register_hw(wcn->hw);
+	if (ret)
+		goto out_unmap;
+
+	return 0;
+
+out_unmap:
+	iounmap(wcn->mmio);
+out_wq:
+	ieee80211_free_hw(hw);
+out_err:
+	return ret;
+}
+static int wcn36xx_remove(struct platform_device *pdev)
+{
+	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+	struct wcn36xx *wcn = hw->priv;
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+
+	mutex_destroy(&wcn->hal_mutex);
+
+	ieee80211_unregister_hw(hw);
+	iounmap(wcn->mmio);
+	ieee80211_free_hw(hw);
+
+	return 0;
+}
+static const struct platform_device_id wcn36xx_platform_id_table[] = {
+	{
+		.name = "wcn36xx",
+		.driver_data = 0
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+
+static struct platform_driver wcn36xx_driver = {
+	.probe      = wcn36xx_probe,
+	.remove     = wcn36xx_remove,
+	.driver         = {
+		.name   = "wcn36xx",
+		.owner  = THIS_MODULE,
+	},
+	.id_table    = wcn36xx_platform_id_table,
+};
+
+static int __init wcn36xx_init(void)
+{
+	platform_driver_register(&wcn36xx_driver);
+	return 0;
+}
+module_init(wcn36xx_init);
+
+static void __exit wcn36xx_exit(void)
+{
+	platform_driver_unregister(&wcn36xx_driver);
+}
+module_exit(wcn36xx_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
new file mode 100644
index 0000000..28b515c
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "wcn36xx.h"
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+				 struct ieee80211_vif *vif)
+{
+	int ret = 0;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	/* TODO: Make sure the TX chain clean */
+	ret = wcn36xx_smd_enter_bmps(wcn, vif);
+	if (!ret) {
+		wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
+		vif_priv->pw_state = WCN36XX_BMPS;
+	} else {
+		/*
+		 * One of the reasons why HW will not enter BMPS is because
+		 * driver is trying to enter bmps before first beacon was
+		 * received just after auth complete
+		 */
+		wcn36xx_err("Can not enter BMPS!\n");
+	}
+	return ret;
+}
+
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+				struct ieee80211_vif *vif)
+{
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+	if (WCN36XX_BMPS != vif_priv->pw_state) {
+		wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
+		return -EINVAL;
+	}
+	wcn36xx_smd_exit_bmps(wcn, vif);
+	vif_priv->pw_state = WCN36XX_FULL_POWER;
+	return 0;
+}
+
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+					  struct ieee80211_vif *vif)
+{
+	wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
+	return wcn36xx_smd_keep_alive_req(wcn, vif,
+					  WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.h b/drivers/net/wireless/ath/wcn36xx/pmc.h
new file mode 100644
index 0000000..f72ed68
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_PMC_H_
+#define _WCN36XX_PMC_H_
+
+struct wcn36xx;
+
+enum wcn36xx_power_state {
+	WCN36XX_FULL_POWER,
+	WCN36XX_BMPS
+};
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+				 struct ieee80211_vif *vif);
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+				struct ieee80211_vif *vif);
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+					  struct ieee80211_vif *vif);
+#endif	/* _WCN36XX_PMC_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
new file mode 100644
index 0000000..f8c3a10
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -0,0 +1,2126 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+#include "smd.h"
+
+static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
+{
+	struct wcn36xx_hal_cfg *entry;
+	u32 *val;
+
+	if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
+		wcn36xx_err("Not enough room for TLV entry\n");
+		return -ENOMEM;
+	}
+
+	entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
+	entry->id = id;
+	entry->len = sizeof(u32);
+	entry->pad_bytes = 0;
+	entry->reserve = 0;
+
+	val = (u32 *) (entry + 1);
+	*val = value;
+
+	*len += sizeof(*entry) + sizeof(u32);
+
+	return 0;
+}
+
+static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
+		struct ieee80211_sta *sta,
+		struct wcn36xx_hal_config_bss_params *bss_params)
+{
+	if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+		bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
+	else if (sta && sta->ht_cap.ht_supported)
+		bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+	else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+		bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
+	else
+		bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
+}
+
+static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
+{
+	return caps & flag ? 1 : 0;
+}
+static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
+		struct ieee80211_sta *sta,
+		struct wcn36xx_hal_config_bss_params *bss_params)
+{
+	if (sta && sta->ht_cap.ht_supported) {
+		unsigned long caps = sta->ht_cap.cap;
+		bss_params->ht = sta->ht_cap.ht_supported;
+		bss_params->tx_channel_width_set = is_cap_supported(caps,
+			IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+		bss_params->lsig_tx_op_protection_full_support =
+			is_cap_supported(caps,
+					 IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+		bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
+		bss_params->lln_non_gf_coexist =
+			!!(vif->bss_conf.ht_operation_mode &
+			   IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+		/* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
+		bss_params->dual_cts_protection = 0;
+		/* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
+		bss_params->ht20_coexist = 0;
+	}
+}
+
+static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
+		struct wcn36xx_hal_config_sta_params *sta_params)
+{
+	if (sta->ht_cap.ht_supported) {
+		unsigned long caps = sta->ht_cap.cap;
+		sta_params->ht_capable = sta->ht_cap.ht_supported;
+		sta_params->tx_channel_width_set = is_cap_supported(caps,
+			IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+		sta_params->lsig_txop_protection = is_cap_supported(caps,
+			IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+		sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
+		sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+		sta_params->max_amsdu_size = is_cap_supported(caps,
+			IEEE80211_HT_CAP_MAX_AMSDU);
+		sta_params->sgi_20Mhz = is_cap_supported(caps,
+			IEEE80211_HT_CAP_SGI_20);
+		sta_params->sgi_40mhz =	is_cap_supported(caps,
+			IEEE80211_HT_CAP_SGI_40);
+		sta_params->green_field_capable = is_cap_supported(caps,
+			IEEE80211_HT_CAP_GRN_FLD);
+		sta_params->delayed_ba_support = is_cap_supported(caps,
+			IEEE80211_HT_CAP_DELAY_BA);
+		sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
+			IEEE80211_HT_CAP_DSSSCCK40);
+	}
+}
+
+static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
+		struct ieee80211_vif *vif,
+		struct ieee80211_sta *sta,
+		struct wcn36xx_hal_config_sta_params *sta_params)
+{
+	struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_sta *priv_sta = NULL;
+	if (vif->type == NL80211_IFTYPE_ADHOC ||
+	    vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_MESH_POINT) {
+		sta_params->type = 1;
+		sta_params->sta_index = 0xFF;
+	} else {
+		sta_params->type = 0;
+		sta_params->sta_index = 1;
+	}
+
+	sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+	/*
+	 * In STA mode ieee80211_sta contains bssid and ieee80211_vif
+	 * contains our mac address. In  AP mode we are bssid so vif
+	 * contains bssid and ieee80211_sta contains mac.
+	 */
+	if (NL80211_IFTYPE_STATION == vif->type)
+		memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
+	else
+		memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
+
+	sta_params->encrypt_type = priv_vif->encrypt_type;
+	sta_params->short_preamble_supported =
+		!(WCN36XX_FLAGS(wcn) &
+		  IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+
+	sta_params->rifs_mode = 0;
+	sta_params->rmf = 0;
+	sta_params->action = 0;
+	sta_params->uapsd = 0;
+	sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
+	sta_params->max_ampdu_duration = 0;
+	sta_params->bssid_index = priv_vif->bss_index;
+	sta_params->p2p = 0;
+
+	if (sta) {
+		priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+		if (NL80211_IFTYPE_STATION == vif->type)
+			memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
+		else
+			memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
+		sta_params->wmm_enabled = sta->wme;
+		sta_params->max_sp_len = sta->max_sp;
+		sta_params->aid = priv_sta->aid;
+		wcn36xx_smd_set_sta_ht_params(sta, sta_params);
+		memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
+			sizeof(priv_sta->supported_rates));
+	} else {
+		wcn36xx_set_default_rates(&sta_params->supported_rates);
+	}
+}
+
+static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
+{
+	int ret = 0;
+	wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
+
+	init_completion(&wcn->hal_rsp_compl);
+	ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+	if (ret) {
+		wcn36xx_err("HAL TX failed\n");
+		goto out;
+	}
+	if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
+		msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
+		wcn36xx_err("Timeout while waiting SMD response\n");
+		ret = -ETIME;
+		goto out;
+	}
+out:
+	return ret;
+}
+
+#define INIT_HAL_MSG(msg_body, type) \
+	do {								\
+		memset(&msg_body, 0, sizeof(msg_body));			\
+		msg_body.header.msg_type = type;			\
+		msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+		msg_body.header.len = sizeof(msg_body);			\
+	} while (0)							\
+
+#define PREPARE_HAL_BUF(send_buf, msg_body) \
+	do {							\
+		memset(send_buf, 0, msg_body.header.len);	\
+		memcpy(send_buf, &msg_body, sizeof(msg_body));	\
+	} while (0)						\
+
+static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
+{
+	struct wcn36xx_fw_msg_status_rsp *rsp;
+
+	if (len < sizeof(struct wcn36xx_hal_msg_header) +
+	    sizeof(struct wcn36xx_fw_msg_status_rsp))
+		return -EIO;
+
+	rsp = (struct wcn36xx_fw_msg_status_rsp *)
+		(buf + sizeof(struct wcn36xx_hal_msg_header));
+
+	if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+		return rsp->status;
+
+	return 0;
+}
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
+{
+	const struct firmware *nv;
+	struct nv_data *nv_d;
+	struct wcn36xx_hal_nv_img_download_req_msg msg_body;
+	int fw_bytes_left;
+	int ret;
+	u16 fm_offset = 0;
+
+	ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
+	if (ret) {
+		wcn36xx_err("Failed to load nv file %s: %d\n",
+			      WLAN_NV_FILE, ret);
+		goto out_free_nv;
+	}
+
+	nv_d = (struct nv_data *)nv->data;
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
+
+	msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
+
+	msg_body.frag_number = 0;
+	/* hal_buf must be protected with  mutex */
+	mutex_lock(&wcn->hal_mutex);
+
+	do {
+		fw_bytes_left = nv->size - fm_offset - 4;
+		if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
+			msg_body.last_fragment = 0;
+			msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
+		} else {
+			msg_body.last_fragment = 1;
+			msg_body.nv_img_buffer_size = fw_bytes_left;
+
+			/* Do not forget update general message len */
+			msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
+
+		}
+
+		/* Add load NV request message header */
+		memcpy(wcn->hal_buf, &msg_body,	sizeof(msg_body));
+
+		/* Add NV body itself */
+		memcpy(wcn->hal_buf + sizeof(msg_body),
+		       &nv_d->table + fm_offset,
+		       msg_body.nv_img_buffer_size);
+
+		ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+		if (ret)
+			goto out_unlock;
+		ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
+						   wcn->hal_rsp_len);
+		if (ret) {
+			wcn36xx_err("hal_load_nv response failed err=%d\n",
+				    ret);
+			goto out_unlock;
+		}
+		msg_body.frag_number++;
+		fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
+
+	} while (msg_body.last_fragment != 1);
+
+out_unlock:
+	mutex_unlock(&wcn->hal_mutex);
+out_free_nv:
+	release_firmware(nv);
+
+	return ret;
+}
+
+static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
+{
+	struct wcn36xx_hal_mac_start_rsp_msg *rsp;
+
+	if (len < sizeof(*rsp))
+		return -EIO;
+
+	rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
+
+	if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
+		return -EIO;
+
+	memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
+	       WCN36XX_HAL_VERSION_LENGTH);
+	memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
+	       WCN36XX_HAL_VERSION_LENGTH);
+
+	/* null terminate the strings, just in case */
+	wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+	wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+
+	wcn->fw_revision = rsp->start_rsp_params.version.revision;
+	wcn->fw_version = rsp->start_rsp_params.version.version;
+	wcn->fw_minor = rsp->start_rsp_params.version.minor;
+	wcn->fw_major = rsp->start_rsp_params.version.major;
+
+	wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
+		     wcn->wlan_version, wcn->crm_version);
+
+	wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
+		     wcn->fw_major, wcn->fw_minor,
+		     wcn->fw_version, wcn->fw_revision,
+		     rsp->start_rsp_params.stations,
+		     rsp->start_rsp_params.bssids);
+
+	return 0;
+}
+
+int wcn36xx_smd_start(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_mac_start_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
+
+	msg_body.params.type = DRIVER_TYPE_PRODUCTION;
+	msg_body.params.len = 0;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
+		    msg_body.params.type);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_start failed\n");
+		goto out;
+	}
+
+	ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_start response failed err=%d\n", ret);
+		goto out;
+	}
+
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_stop(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_mac_stop_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
+
+	msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_stop failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_stop response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+{
+	struct wcn36xx_hal_init_scan_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
+
+	msg_body.mode = mode;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_init_scan failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_start_scan_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
+
+	msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
+		    msg_body.scan_channel);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_start_scan failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_end_scan_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
+
+	msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
+		    msg_body.scan_channel);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_end_scan failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+			    enum wcn36xx_hal_sys_mode mode)
+{
+	struct wcn36xx_hal_finish_scan_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
+
+	msg_body.mode = mode;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
+		    msg_body.mode);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_finish_scan failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
+{
+	struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
+	int ret = 0;
+
+	ret = wcn36xx_smd_rsp_status_check(buf, len);
+	if (ret)
+		return ret;
+	rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
+		    rsp->channel_number, rsp->status);
+	return ret;
+}
+
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+			       struct ieee80211_vif *vif, int ch)
+{
+	struct wcn36xx_hal_switch_channel_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
+
+	msg_body.channel_number = (u8)ch;
+	msg_body.tx_mgmt_power = 0xbf;
+	msg_body.max_tx_power = 0xbf;
+	memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_switch_channel failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
+{
+	struct wcn36xx_hal_update_scan_params_resp *rsp;
+
+	rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
+
+	/* Remove the PNO version bit */
+	rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
+
+	if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
+		wcn36xx_warn("error response from update scan\n");
+		return rsp->status;
+	}
+
+	return 0;
+}
+
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_update_scan_params_req msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
+
+	msg_body.dot11d_enabled	= 0;
+	msg_body.dot11d_resolved = 0;
+	msg_body.channel_count = 26;
+	msg_body.active_min_ch_time = 60;
+	msg_body.active_max_ch_time = 120;
+	msg_body.passive_min_ch_time = 60;
+	msg_body.passive_max_ch_time = 110;
+	msg_body.state = 0;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal update scan params channel_count %d\n",
+		    msg_body.channel_count);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_update_scan_params failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
+						 wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_update_scan_params response failed err=%d\n",
+			    ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
+					struct ieee80211_vif *vif,
+					void *buf,
+					size_t len)
+{
+	struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
+	struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+	if (len < sizeof(*rsp))
+		return -EINVAL;
+
+	rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
+
+	if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+		wcn36xx_warn("hal add sta self failure: %d\n",
+			     rsp->status);
+		return rsp->status;
+	}
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal add sta self status %d self_sta_index %d dpu_index %d\n",
+		    rsp->status, rsp->self_sta_index, rsp->dpu_index);
+
+	priv_vif->self_sta_index = rsp->self_sta_index;
+	priv_vif->self_dpu_desc_index = rsp->dpu_index;
+
+	return 0;
+}
+
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+	struct wcn36xx_hal_add_sta_self_req msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
+
+	memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal add sta self self_addr %pM status %d\n",
+		    msg_body.self_addr, msg_body.status);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_add_sta_self failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_add_sta_self_rsp(wcn,
+					   vif,
+					   wcn->hal_buf,
+					   wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
+{
+	struct wcn36xx_hal_del_sta_self_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
+
+	memcpy(&msg_body.self_addr, addr, ETH_ALEN);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_delete_sta_self failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
+			    ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
+{
+	struct wcn36xx_hal_delete_sta_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
+
+	msg_body.sta_index = sta_index;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal delete sta sta_index %d\n",
+		    msg_body.sta_index);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_delete_sta failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+static int wcn36xx_smd_join_rsp(void *buf, size_t len)
+{
+	struct wcn36xx_hal_join_rsp_msg *rsp;
+
+	if (wcn36xx_smd_rsp_status_check(buf, len))
+		return -EIO;
+
+	rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal rsp join status %d tx_mgmt_power %d\n",
+		    rsp->status, rsp->tx_mgmt_power);
+
+	return 0;
+}
+
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
+{
+	struct wcn36xx_hal_join_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
+
+	memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+	memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
+	msg_body.channel = ch;
+
+	if (conf_is_ht40_minus(&wcn->hw->conf))
+		msg_body.secondary_channel_offset =
+			PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
+	else if (conf_is_ht40_plus(&wcn->hw->conf))
+		msg_body.secondary_channel_offset =
+			PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
+	else
+		msg_body.secondary_channel_offset =
+			PHY_SINGLE_CHANNEL_CENTERED;
+
+	msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
+
+	msg_body.max_tx_power = 0xbf;
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
+		    msg_body.bssid, msg_body.self_sta_mac_addr,
+		    msg_body.channel, msg_body.link_state);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_join failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_join response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+			    const u8 *sta_mac,
+			    enum wcn36xx_hal_link_state state)
+{
+	struct wcn36xx_hal_set_link_state_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
+
+	memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+	memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
+	msg_body.state = state;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal set link state bssid %pM self_mac_addr %pM state %d\n",
+		    msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_set_link_st failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
+			const struct wcn36xx_hal_config_sta_params *orig,
+			struct wcn36xx_hal_config_sta_params_v1 *v1)
+{
+	/* convert orig to v1 format */
+	memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
+	memcpy(&v1->mac, orig->mac, ETH_ALEN);
+	v1->aid = orig->aid;
+	v1->type = orig->type;
+	v1->listen_interval = orig->listen_interval;
+	v1->ht_capable = orig->ht_capable;
+
+	v1->max_ampdu_size = orig->max_ampdu_size;
+	v1->max_ampdu_density = orig->max_ampdu_density;
+	v1->sgi_40mhz = orig->sgi_40mhz;
+	v1->sgi_20Mhz = orig->sgi_20Mhz;
+
+	memcpy(&v1->supported_rates, &orig->supported_rates,
+	       sizeof(orig->supported_rates));
+	v1->sta_index = orig->sta_index;
+}
+
+static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
+				      struct ieee80211_sta *sta,
+				      void *buf,
+				      size_t len)
+{
+	struct wcn36xx_hal_config_sta_rsp_msg *rsp;
+	struct config_sta_rsp_params *params;
+	struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+	if (len < sizeof(*rsp))
+		return -EINVAL;
+
+	rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
+	params = &rsp->params;
+
+	if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+		wcn36xx_warn("hal config sta response failure: %d\n",
+			     params->status);
+		return -EIO;
+	}
+
+	sta_priv->sta_index = params->sta_index;
+	sta_priv->dpu_desc_index = params->dpu_index;
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+		    params->status, params->sta_index, params->bssid_index,
+		    params->p2p);
+
+	return 0;
+}
+
+static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
+		     const struct wcn36xx_hal_config_sta_req_msg *orig)
+{
+	struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
+	struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+
+	wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
+				      &msg_body.sta_params);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+		    sta->action, sta->sta_index, sta->bssid_index,
+		    sta->bssid, sta->type, sta->mac, sta->aid);
+
+	return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta)
+{
+	struct wcn36xx_hal_config_sta_req_msg msg;
+	struct wcn36xx_hal_config_sta_params *sta_params;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
+
+	sta_params = &msg.sta_params;
+
+	wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+	if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+		ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
+	} else {
+		PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+		wcn36xx_dbg(WCN36XX_DBG_HAL,
+			    "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+			    sta_params->action, sta_params->sta_index,
+			    sta_params->bssid_index, sta_params->bssid,
+			    sta_params->type, sta_params->mac, sta_params->aid);
+
+		ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+	}
+	if (ret) {
+		wcn36xx_err("Sending hal_config_sta failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_config_sta_rsp(wcn,
+					 sta,
+					 wcn->hal_buf,
+					 wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+			const struct wcn36xx_hal_config_bss_req_msg *orig)
+{
+	struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
+	struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
+	struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
+
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+	/* convert orig to v1 */
+	memcpy(&msg_body.bss_params.bssid,
+	       &orig->bss_params.bssid, ETH_ALEN);
+	memcpy(&msg_body.bss_params.self_mac_addr,
+	       &orig->bss_params.self_mac_addr, ETH_ALEN);
+
+	msg_body.bss_params.bss_type = orig->bss_params.bss_type;
+	msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
+	msg_body.bss_params.nw_type = orig->bss_params.nw_type;
+
+	msg_body.bss_params.short_slot_time_supported =
+		orig->bss_params.short_slot_time_supported;
+	msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
+	msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
+	msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
+	msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
+	msg_body.bss_params.lln_non_gf_coexist =
+		orig->bss_params.lln_non_gf_coexist;
+
+	msg_body.bss_params.lsig_tx_op_protection_full_support =
+		orig->bss_params.lsig_tx_op_protection_full_support;
+	msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
+	msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
+	msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
+	msg_body.bss_params.tx_channel_width_set =
+		orig->bss_params.tx_channel_width_set;
+	msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
+	msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
+
+	msg_body.bss_params.reserved = orig->bss_params.reserved;
+
+	memcpy(&msg_body.bss_params.ssid,
+	       &orig->bss_params.ssid,
+	       sizeof(orig->bss_params.ssid));
+
+	msg_body.bss_params.action = orig->bss_params.action;
+	msg_body.bss_params.rateset = orig->bss_params.rateset;
+	msg_body.bss_params.ht = orig->bss_params.ht;
+	msg_body.bss_params.obss_prot_enabled =
+		orig->bss_params.obss_prot_enabled;
+	msg_body.bss_params.rmf = orig->bss_params.rmf;
+	msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
+	msg_body.bss_params.dual_cts_protection =
+		orig->bss_params.dual_cts_protection;
+
+	msg_body.bss_params.max_probe_resp_retry_limit =
+		orig->bss_params.max_probe_resp_retry_limit;
+	msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
+	msg_body.bss_params.proxy_probe_resp =
+		orig->bss_params.proxy_probe_resp;
+	msg_body.bss_params.edca_params_valid =
+		orig->bss_params.edca_params_valid;
+
+	memcpy(&msg_body.bss_params.acbe,
+	       &orig->bss_params.acbe,
+	       sizeof(orig->bss_params.acbe));
+	memcpy(&msg_body.bss_params.acbk,
+	       &orig->bss_params.acbk,
+	       sizeof(orig->bss_params.acbk));
+	memcpy(&msg_body.bss_params.acvi,
+	       &orig->bss_params.acvi,
+	       sizeof(orig->bss_params.acvi));
+	memcpy(&msg_body.bss_params.acvo,
+	       &orig->bss_params.acvo,
+	       sizeof(orig->bss_params.acvo));
+
+	msg_body.bss_params.ext_set_sta_key_param_valid =
+		orig->bss_params.ext_set_sta_key_param_valid;
+
+	memcpy(&msg_body.bss_params.ext_set_sta_key_param,
+	       &orig->bss_params.ext_set_sta_key_param,
+	       sizeof(orig->bss_params.acvo));
+
+	msg_body.bss_params.wcn36xx_hal_persona =
+		orig->bss_params.wcn36xx_hal_persona;
+	msg_body.bss_params.spectrum_mgt_enable =
+		orig->bss_params.spectrum_mgt_enable;
+	msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
+	msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
+
+	wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
+				      &msg_body.bss_params.sta);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+		    bss->bssid, bss->self_mac_addr, bss->bss_type,
+		    bss->oper_mode, bss->nw_type);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+		    sta->bssid, sta->action, sta->sta_index,
+		    sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+	return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+				      struct ieee80211_vif *vif,
+				      void *buf,
+				      size_t len)
+{
+	struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+	struct wcn36xx_hal_config_bss_rsp_params *params;
+	struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+	if (len < sizeof(*rsp))
+		return -EINVAL;
+
+	rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+	params = &rsp->bss_rsp_params;
+
+	if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+		wcn36xx_warn("hal config bss response failure: %d\n",
+			     params->status);
+		return -EIO;
+	}
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+		    " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+		    " power %d ucast_dpu_signature %d\n",
+		    params->status, params->bss_index, params->dpu_desc_index,
+		    params->bss_sta_index, params->bss_self_sta_index,
+		    params->bss_bcast_sta_idx, params->mac,
+		    params->tx_mgmt_power, params->ucast_dpu_signature);
+
+	priv_vif->bss_index = params->bss_index;
+
+	if (priv_vif->sta) {
+		priv_vif->sta->bss_sta_index =  params->bss_sta_index;
+		priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+	}
+
+	priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+
+	return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta, const u8 *bssid,
+			   bool update)
+{
+	struct wcn36xx_hal_config_bss_req_msg msg;
+	struct wcn36xx_hal_config_bss_params *bss;
+	struct wcn36xx_hal_config_sta_params *sta_params;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+	bss = &msg.bss_params;
+	sta_params = &bss->sta;
+
+	WARN_ON(is_zero_ether_addr(bssid));
+
+	memcpy(&bss->bssid, bssid, ETH_ALEN);
+
+	memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
+
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
+
+		/* STA */
+		bss->oper_mode = 1;
+		bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
+	} else if (vif->type == NL80211_IFTYPE_AP) {
+		bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
+
+		/* AP */
+		bss->oper_mode = 0;
+		bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
+	} else if (vif->type == NL80211_IFTYPE_ADHOC ||
+		   vif->type == NL80211_IFTYPE_MESH_POINT) {
+		bss->bss_type = WCN36XX_HAL_IBSS_MODE;
+
+		/* STA */
+		bss->oper_mode = 1;
+	} else {
+		wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
+	}
+
+	if (vif->type == NL80211_IFTYPE_STATION)
+		wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
+	else
+		bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+
+	bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
+	bss->lla_coexist = 0;
+	bss->llb_coexist = 0;
+	bss->llg_coexist = 0;
+	bss->rifs_mode = 0;
+	bss->beacon_interval = vif->bss_conf.beacon_int;
+	bss->dtim_period = vif_priv->dtim_period;
+
+	wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
+
+	bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
+
+	if (conf_is_ht40_minus(&wcn->hw->conf))
+		bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+	else if (conf_is_ht40_plus(&wcn->hw->conf))
+		bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+	else
+		bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+	bss->reserved = 0;
+	wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+	/* wcn->ssid is only valid in AP and IBSS mode */
+	bss->ssid.length = vif_priv->ssid.length;
+	memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
+
+	bss->obss_prot_enabled = 0;
+	bss->rmf = 0;
+	bss->max_probe_resp_retry_limit = 0;
+	bss->hidden_ssid = vif->bss_conf.hidden_ssid;
+	bss->proxy_probe_resp = 0;
+	bss->edca_params_valid = 0;
+
+	/* FIXME: set acbe, acbk, acvi and acvo */
+
+	bss->ext_set_sta_key_param_valid = 0;
+
+	/* FIXME: set ext_set_sta_key_param */
+
+	bss->spectrum_mgt_enable = 0;
+	bss->tx_mgmt_power = 0;
+	bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
+
+	bss->action = update;
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+		    bss->bssid, bss->self_mac_addr, bss->bss_type,
+		    bss->oper_mode, bss->nw_type);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+		    sta_params->bssid, sta_params->action,
+		    sta_params->sta_index, sta_params->bssid_index,
+		    sta_params->aid, sta_params->type,
+		    sta_params->mac);
+
+	if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+		ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
+	} else {
+		PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+		ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+	}
+	if (ret) {
+		wcn36xx_err("Sending hal_config_bss failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_config_bss_rsp(wcn,
+					 vif,
+					 wcn->hal_buf,
+					 wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+	struct wcn36xx_hal_delete_bss_req_msg msg_body;
+	struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
+
+	msg_body.bss_index = priv_vif->bss_index;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_delete_bss failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			    struct sk_buff *skb_beacon, u16 tim_off,
+			    u16 p2p_off)
+{
+	struct wcn36xx_hal_send_beacon_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
+
+	/* TODO need to find out why this is needed? */
+	msg_body.beacon_length = skb_beacon->len + 6;
+
+	if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
+		memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
+		memcpy(&(msg_body.beacon[4]), skb_beacon->data,
+		       skb_beacon->len);
+	} else {
+		wcn36xx_err("Beacon is to big: beacon size=%d\n",
+			      msg_body.beacon_length);
+		return -ENOMEM;
+	}
+	memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+
+	/* TODO need to find out why this is needed? */
+	msg_body.tim_ie_offset = tim_off+4;
+	msg_body.p2p_ie_offset = p2p_off;
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal send beacon beacon_length %d\n",
+		    msg_body.beacon_length);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_send_beacon failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+				      struct ieee80211_vif *vif,
+				      struct sk_buff *skb)
+{
+	struct wcn36xx_hal_send_probe_resp_req_msg msg;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
+
+	if (skb->len > BEACON_TEMPLATE_SIZE) {
+		wcn36xx_warn("probe response template is too big: %d\n",
+			     skb->len);
+		return -E2BIG;
+	}
+
+	msg.probe_resp_template_len = skb->len;
+	memcpy(&msg.probe_resp_template, skb->data, skb->len);
+
+	memcpy(msg.bssid, vif->addr, ETH_ALEN);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal update probe rsp len %d bssid %pM\n",
+		    msg.probe_resp_template_len, msg.bssid);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
+			    ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+			   enum ani_ed_type enc_type,
+			   u8 keyidx,
+			   u8 keylen,
+			   u8 *key,
+			   u8 sta_index)
+{
+	struct wcn36xx_hal_set_sta_key_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
+
+	msg_body.set_sta_key_params.sta_index = sta_index;
+	msg_body.set_sta_key_params.enc_type = enc_type;
+
+	msg_body.set_sta_key_params.key[0].id = keyidx;
+	msg_body.set_sta_key_params.key[0].unicast = 1;
+	msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+	msg_body.set_sta_key_params.key[0].pae_role = 0;
+	msg_body.set_sta_key_params.key[0].length = keylen;
+	memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+	msg_body.set_sta_key_params.single_tid_rc = 1;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_set_stakey failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+			   enum ani_ed_type enc_type,
+			   u8 keyidx,
+			   u8 keylen,
+			   u8 *key)
+{
+	struct wcn36xx_hal_set_bss_key_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
+	msg_body.bss_idx = 0;
+	msg_body.enc_type = enc_type;
+	msg_body.num_keys = 1;
+	msg_body.keys[0].id = keyidx;
+	msg_body.keys[0].unicast = 0;
+	msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
+	msg_body.keys[0].pae_role = 0;
+	msg_body.keys[0].length = keylen;
+	memcpy(msg_body.keys[0].key, key, keylen);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_set_bsskey failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+			      enum ani_ed_type enc_type,
+			      u8 keyidx,
+			      u8 sta_index)
+{
+	struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
+
+	msg_body.sta_idx = sta_index;
+	msg_body.enc_type = enc_type;
+	msg_body.key_id = keyidx;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_remove_stakey failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+			      enum ani_ed_type enc_type,
+			      u8 keyidx)
+{
+	struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
+	msg_body.bss_idx = 0;
+	msg_body.enc_type = enc_type;
+	msg_body.key_id = keyidx;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_remove_bsskey failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+	struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
+
+	msg_body.bss_index = vif_priv->bss_index;
+	msg_body.tbtt = vif->bss_conf.sync_tsf;
+	msg_body.dtim_period = vif_priv->dtim_period;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_enter_bmps failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+	struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
+
+	msg_body.bss_index = vif_priv->bss_index;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_exit_bmps failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
+{
+	struct wcn36xx_hal_set_power_params_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
+
+	/*
+	 * When host is down ignore every second dtim
+	 */
+	if (ignore_dtim) {
+		msg_body.ignore_dtim = 1;
+		msg_body.dtim_period = 2;
+	}
+	msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_set_power_params failed\n");
+		goto out;
+	}
+
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+/* Notice: This function should be called after associated, or else it
+ * will be invalid
+ */
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+			       struct ieee80211_vif *vif,
+			       int packet_type)
+{
+	struct wcn36xx_hal_keep_alive_req_msg msg_body;
+	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
+
+	if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
+		msg_body.bss_index = vif_priv->bss_index;
+		msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
+		msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
+	} else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
+		/* TODO: it also support ARP response type */
+	} else {
+		wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
+		return -EINVAL;
+	}
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_exit_bmps failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+			     u32 arg3, u32 arg4, u32 arg5)
+{
+	struct wcn36xx_hal_dump_cmd_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
+
+	msg_body.arg1 = arg1;
+	msg_body.arg2 = arg2;
+	msg_body.arg3 = arg3;
+	msg_body.arg4 = arg4;
+	msg_body.arg5 = arg5;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_dump_cmd failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+static inline void set_feat_caps(u32 *bitmap,
+				 enum place_holder_in_cap_bitmap cap)
+{
+	int arr_idx, bit_idx;
+
+	if (cap < 0 || cap > 127) {
+		wcn36xx_warn("error cap idx %d\n", cap);
+		return;
+	}
+
+	arr_idx = cap / 32;
+	bit_idx = cap % 32;
+	bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+static inline int get_feat_caps(u32 *bitmap,
+				enum place_holder_in_cap_bitmap cap)
+{
+	int arr_idx, bit_idx;
+	int ret = 0;
+
+	if (cap < 0 || cap > 127) {
+		wcn36xx_warn("error cap idx %d\n", cap);
+		return -EINVAL;
+	}
+
+	arr_idx = cap / 32;
+	bit_idx = cap % 32;
+	ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+	return ret;
+}
+
+static inline void clear_feat_caps(u32 *bitmap,
+				enum place_holder_in_cap_bitmap cap)
+{
+	int arr_idx, bit_idx;
+
+	if (cap < 0 || cap > 127) {
+		wcn36xx_warn("error cap idx %d\n", cap);
+		return;
+	}
+
+	arr_idx = cap / 32;
+	bit_idx = cap % 32;
+	bitmap[arr_idx] &= ~(1 << bit_idx);
+}
+
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_feat_caps_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
+
+	set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
+			    ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+		struct ieee80211_sta *sta,
+		u16 tid,
+		u16 *ssn,
+		u8 direction,
+		u8 sta_index)
+{
+	struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
+
+	msg_body.sta_index = sta_index;
+	memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
+	msg_body.dialog_token = 0x10;
+	msg_body.tid = tid;
+
+	/* Immediate BA because Delayed BA is not supported */
+	msg_body.policy = 1;
+	msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
+	msg_body.timeout = 0;
+	if (ssn)
+		msg_body.ssn = *ssn;
+	msg_body.direction = direction;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_add_ba_session failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_add_ba_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
+
+	msg_body.session_id = 0;
+	msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_add_ba failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
+{
+	struct wcn36xx_hal_del_ba_req_msg msg_body;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
+
+	msg_body.sta_index = sta_index;
+	msg_body.tid = tid;
+	msg_body.direction = 0;
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_del_ba failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+{
+	struct wcn36xx_hal_trigger_ba_req_msg msg_body;
+	struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
+
+	msg_body.session_id = 0;
+	msg_body.candidate_cnt = 1;
+	msg_body.header.len += sizeof(*candidate);
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+		(wcn->hal_buf + sizeof(msg_body));
+	candidate->sta_index = sta_index;
+	candidate->tid_bitmap = 1;
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_trigger_ba failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+	struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
+
+	if (len != sizeof(*rsp)) {
+		wcn36xx_warn("Bad TX complete indication\n");
+		return -EIO;
+	}
+
+	wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
+
+	return 0;
+}
+
+static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
+					 void *buf,
+					 size_t len)
+{
+	struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
+	struct ieee80211_vif *vif = NULL;
+	struct wcn36xx_vif *tmp;
+
+	/* Old FW does not have bss index */
+	if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+		list_for_each_entry(tmp, &wcn->vif_list, list) {
+			wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+				    tmp->bss_index);
+			vif = container_of((void *)tmp,
+						 struct ieee80211_vif,
+						 drv_priv);
+			ieee80211_connection_loss(vif);
+		}
+		return 0;
+	}
+
+	if (len != sizeof(*rsp)) {
+		wcn36xx_warn("Corrupted missed beacon indication\n");
+		return -EIO;
+	}
+
+	list_for_each_entry(tmp, &wcn->vif_list, list) {
+		if (tmp->bss_index == rsp->bss_index) {
+			wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+				    rsp->bss_index);
+			vif = container_of((void *)tmp,
+						 struct ieee80211_vif,
+						 drv_priv);
+			ieee80211_connection_loss(vif);
+			return 0;
+		}
+	}
+
+	wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
+	return -ENOENT;
+}
+
+static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
+					      void *buf,
+					      size_t len)
+{
+	struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
+	struct wcn36xx_vif *tmp;
+	struct ieee80211_sta *sta = NULL;
+
+	if (len != sizeof(*rsp)) {
+		wcn36xx_warn("Corrupted delete sta indication\n");
+		return -EIO;
+	}
+
+	list_for_each_entry(tmp, &wcn->vif_list, list) {
+		if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
+			sta = container_of((void *)tmp->sta,
+						 struct ieee80211_sta,
+						 drv_priv);
+			wcn36xx_dbg(WCN36XX_DBG_HAL,
+				    "delete station indication %pM index %d\n",
+				    rsp->addr2,
+				    rsp->sta_id);
+			ieee80211_report_low_ack(sta, 0);
+			return 0;
+		}
+	}
+
+	wcn36xx_warn("STA with addr %pM and index %d not found\n",
+		     rsp->addr2,
+		     rsp->sta_id);
+	return -ENOENT;
+}
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
+{
+	struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
+	size_t len;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
+	len = msg_body.header.len;
+
+	put_cfg_tlv_u32(wcn, &len, cfg_id, value);
+	body->header.len = len;
+	body->len = len - sizeof(*body);
+
+	ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_update_cfg failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+{
+	struct wcn36xx_hal_msg_header *msg_header = buf;
+	struct wcn36xx_hal_ind_msg *msg_ind;
+	wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
+
+	switch (msg_header->msg_type) {
+	case WCN36XX_HAL_START_RSP:
+	case WCN36XX_HAL_CONFIG_STA_RSP:
+	case WCN36XX_HAL_CONFIG_BSS_RSP:
+	case WCN36XX_HAL_ADD_STA_SELF_RSP:
+	case WCN36XX_HAL_STOP_RSP:
+	case WCN36XX_HAL_DEL_STA_SELF_RSP:
+	case WCN36XX_HAL_DELETE_STA_RSP:
+	case WCN36XX_HAL_INIT_SCAN_RSP:
+	case WCN36XX_HAL_START_SCAN_RSP:
+	case WCN36XX_HAL_END_SCAN_RSP:
+	case WCN36XX_HAL_FINISH_SCAN_RSP:
+	case WCN36XX_HAL_DOWNLOAD_NV_RSP:
+	case WCN36XX_HAL_DELETE_BSS_RSP:
+	case WCN36XX_HAL_SEND_BEACON_RSP:
+	case WCN36XX_HAL_SET_LINK_ST_RSP:
+	case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
+	case WCN36XX_HAL_SET_BSSKEY_RSP:
+	case WCN36XX_HAL_SET_STAKEY_RSP:
+	case WCN36XX_HAL_RMV_STAKEY_RSP:
+	case WCN36XX_HAL_RMV_BSSKEY_RSP:
+	case WCN36XX_HAL_ENTER_BMPS_RSP:
+	case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
+	case WCN36XX_HAL_EXIT_BMPS_RSP:
+	case WCN36XX_HAL_KEEP_ALIVE_RSP:
+	case WCN36XX_HAL_DUMP_COMMAND_RSP:
+	case WCN36XX_HAL_ADD_BA_SESSION_RSP:
+	case WCN36XX_HAL_ADD_BA_RSP:
+	case WCN36XX_HAL_DEL_BA_RSP:
+	case WCN36XX_HAL_TRIGGER_BA_RSP:
+	case WCN36XX_HAL_UPDATE_CFG_RSP:
+	case WCN36XX_HAL_JOIN_RSP:
+	case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
+	case WCN36XX_HAL_CH_SWITCH_RSP:
+	case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+		memcpy(wcn->hal_buf, buf, len);
+		wcn->hal_rsp_len = len;
+		complete(&wcn->hal_rsp_compl);
+		break;
+
+	case WCN36XX_HAL_OTA_TX_COMPL_IND:
+	case WCN36XX_HAL_MISSED_BEACON_IND:
+	case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+		mutex_lock(&wcn->hal_ind_mutex);
+		msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
+		msg_ind->msg_len = len;
+		msg_ind->msg = kmalloc(len, GFP_KERNEL);
+		memcpy(msg_ind->msg, buf, len);
+		list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+		queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
+		wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+		mutex_unlock(&wcn->hal_ind_mutex);
+		break;
+	default:
+		wcn36xx_err("SMD_EVENT (%d) not supported\n",
+			      msg_header->msg_type);
+	}
+}
+static void wcn36xx_ind_smd_work(struct work_struct *work)
+{
+	struct wcn36xx *wcn =
+		container_of(work, struct wcn36xx, hal_ind_work);
+	struct wcn36xx_hal_msg_header *msg_header;
+	struct wcn36xx_hal_ind_msg *hal_ind_msg;
+
+	mutex_lock(&wcn->hal_ind_mutex);
+
+	hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
+				       struct wcn36xx_hal_ind_msg,
+				       list);
+
+	msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
+
+	switch (msg_header->msg_type) {
+	case WCN36XX_HAL_OTA_TX_COMPL_IND:
+		wcn36xx_smd_tx_compl_ind(wcn,
+					 hal_ind_msg->msg,
+					 hal_ind_msg->msg_len);
+		break;
+	case WCN36XX_HAL_MISSED_BEACON_IND:
+		wcn36xx_smd_missed_beacon_ind(wcn,
+					      hal_ind_msg->msg,
+					      hal_ind_msg->msg_len);
+		break;
+	case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+		wcn36xx_smd_delete_sta_context_ind(wcn,
+						   hal_ind_msg->msg,
+						   hal_ind_msg->msg_len);
+		break;
+	default:
+		wcn36xx_err("SMD_EVENT (%d) not supported\n",
+			      msg_header->msg_type);
+	}
+	list_del(wcn->hal_ind_queue.next);
+	kfree(hal_ind_msg->msg);
+	kfree(hal_ind_msg);
+	mutex_unlock(&wcn->hal_ind_mutex);
+}
+int wcn36xx_smd_open(struct wcn36xx *wcn)
+{
+	int ret = 0;
+	wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
+	if (!wcn->hal_ind_wq) {
+		wcn36xx_err("failed to allocate wq\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
+	INIT_LIST_HEAD(&wcn->hal_ind_queue);
+	mutex_init(&wcn->hal_ind_mutex);
+
+	ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
+	if (ret) {
+		wcn36xx_err("failed to open control channel\n");
+		goto free_wq;
+	}
+
+	return ret;
+
+free_wq:
+	destroy_workqueue(wcn->hal_ind_wq);
+out:
+	return ret;
+}
+
+void wcn36xx_smd_close(struct wcn36xx *wcn)
+{
+	wcn->ctrl_ops->close();
+	destroy_workqueue(wcn->hal_ind_wq);
+	mutex_destroy(&wcn->hal_ind_mutex);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
new file mode 100644
index 0000000..e7c3901
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SMD_H_
+#define _SMD_H_
+
+#include "wcn36xx.h"
+
+/* Max shared size is 4k but we take less.*/
+#define WCN36XX_NV_FRAGMENT_SIZE			3072
+
+#define WCN36XX_HAL_BUF_SIZE				4096
+
+#define HAL_MSG_TIMEOUT 200
+#define WCN36XX_SMSM_WLAN_TX_ENABLE			0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY		0x00000200
+/* The PNO version info be contained in the rsp msg */
+#define WCN36XX_FW_MSG_PNO_VERSION_MASK			0x8000
+
+enum wcn36xx_fw_msg_result {
+	WCN36XX_FW_MSG_RESULT_SUCCESS			= 0,
+	WCN36XX_FW_MSG_RESULT_SUCCESS_SYNC		= 1,
+
+	WCN36XX_FW_MSG_RESULT_MEM_FAIL			= 5,
+};
+
+/******************************/
+/* SMD requests and responses */
+/******************************/
+struct wcn36xx_fw_msg_status_rsp {
+	u32	status;
+} __packed;
+
+struct wcn36xx_hal_ind_msg {
+	struct list_head list;
+	u8 *msg;
+	size_t msg_len;
+};
+
+struct wcn36xx;
+
+int wcn36xx_smd_open(struct wcn36xx *wcn);
+void wcn36xx_smd_close(struct wcn36xx *wcn);
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
+int wcn36xx_smd_start(struct wcn36xx *wcn);
+int wcn36xx_smd_stop(struct wcn36xx *wcn);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+			    enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch);
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+			    const u8 *sta_mac,
+			    enum wcn36xx_hal_link_state state);
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta, const u8 *bssid,
+			   bool update);
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta);
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			    struct sk_buff *skb_beacon, u16 tim_off,
+			    u16 p2p_off);
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+			       struct ieee80211_vif *vif, int ch);
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+				      struct ieee80211_vif *vif,
+				      struct sk_buff *skb);
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+			   enum ani_ed_type enc_type,
+			   u8 keyidx,
+			   u8 keylen,
+			   u8 *key,
+			   u8 sta_index);
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+			   enum ani_ed_type enc_type,
+			   u8 keyidx,
+			   u8 keylen,
+			   u8 *key);
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+			      enum ani_ed_type enc_type,
+			      u8 keyidx,
+			      u8 sta_index);
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+			      enum ani_ed_type enc_type,
+			      u8 keyidx);
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim);
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+			       struct ieee80211_vif *vif,
+			       int packet_type);
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+			     u32 arg3, u32 arg4, u32 arg5);
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+		struct ieee80211_sta *sta,
+		u16 tid,
+		u16 *ssn,
+		u8 direction,
+		u8 sta_index);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+#endif	/* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
new file mode 100644
index 0000000..b2b60e3
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "txrx.h"
+
+static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
+{
+	return 100 - ((bd->phy_stat0 >> 24) & 0xff);
+}
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
+{
+	struct ieee80211_rx_status status;
+	struct ieee80211_hdr *hdr;
+	struct wcn36xx_rx_bd *bd;
+	u16 fc, sn;
+
+	/*
+	 * All fields must be 0, otherwise it can lead to
+	 * unexpected consequences.
+	 */
+	memset(&status, 0, sizeof(status));
+
+	bd = (struct wcn36xx_rx_bd *)skb->data;
+	buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+	wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
+			 "BD   <<< ", (char *)bd,
+			 sizeof(struct wcn36xx_rx_bd));
+
+	skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+	skb_pull(skb, bd->pdu.mpdu_header_off);
+
+	status.mactime = 10;
+	status.freq = WCN36XX_CENTER_FREQ(wcn);
+	status.band = WCN36XX_BAND(wcn);
+	status.signal = -get_rssi0(bd);
+	status.antenna = 1;
+	status.rate_idx = 1;
+	status.flag = 0;
+	status.rx_flags = 0;
+	status.flag |= RX_FLAG_IV_STRIPPED |
+		       RX_FLAG_MMIC_STRIPPED |
+		       RX_FLAG_DECRYPTED;
+
+	wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
+		    status.flag,  status.vendor_radiotap_len);
+
+	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = __le16_to_cpu(hdr->frame_control);
+	sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+	if (ieee80211_is_beacon(hdr->frame_control)) {
+		wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
+			    skb, skb->len, fc, sn);
+		wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
+				 (char *)skb->data, skb->len);
+	} else {
+		wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
+			    skb, skb->len, fc, sn);
+		wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
+				 (char *)skb->data, skb->len);
+	}
+
+	ieee80211_rx_irqsafe(wcn->hw, skb);
+
+	return 0;
+}
+
+static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
+			       u32 mpdu_header_len,
+			       u32 len,
+			       u16 tid)
+{
+	bd->pdu.mpdu_header_len = mpdu_header_len;
+	bd->pdu.mpdu_header_off = sizeof(*bd);
+	bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
+		bd->pdu.mpdu_header_off;
+	bd->pdu.mpdu_len = len;
+	bd->pdu.tid = tid;
+}
+
+static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
+						  u8 *addr)
+{
+	struct wcn36xx_vif *vif_priv = NULL;
+	struct ieee80211_vif *vif = NULL;
+	list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+			vif = container_of((void *)vif_priv,
+				   struct ieee80211_vif,
+				   drv_priv);
+			if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
+				return vif_priv;
+	}
+	wcn36xx_warn("vif %pM not found\n", addr);
+	return NULL;
+}
+static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
+				struct wcn36xx *wcn,
+				struct wcn36xx_vif **vif_priv,
+				struct wcn36xx_sta *sta_priv,
+				struct ieee80211_hdr *hdr,
+				bool bcast)
+{
+	struct ieee80211_vif *vif = NULL;
+	struct wcn36xx_vif *__vif_priv = NULL;
+	bd->bd_rate = WCN36XX_BD_RATE_DATA;
+
+	/*
+	 * For not unicast frames mac80211 will not set sta pointer so use
+	 * self_sta_index instead.
+	 */
+	if (sta_priv) {
+		__vif_priv = sta_priv->vif;
+		vif = container_of((void *)__vif_priv,
+				   struct ieee80211_vif,
+				   drv_priv);
+
+		if (vif->type == NL80211_IFTYPE_STATION) {
+			bd->sta_index = sta_priv->bss_sta_index;
+			bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
+		} else if (vif->type == NL80211_IFTYPE_AP ||
+			   vif->type == NL80211_IFTYPE_ADHOC ||
+			   vif->type == NL80211_IFTYPE_MESH_POINT) {
+			bd->sta_index = sta_priv->sta_index;
+			bd->dpu_desc_idx = sta_priv->dpu_desc_index;
+		}
+	} else {
+		__vif_priv = get_vif_by_addr(wcn, hdr->addr2);
+		bd->sta_index = __vif_priv->self_sta_index;
+		bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+	}
+
+	bd->dpu_sign = __vif_priv->ucast_dpu_signature;
+
+	if (ieee80211_is_nullfunc(hdr->frame_control) ||
+	   (sta_priv && !sta_priv->is_data_encrypted))
+		bd->dpu_ne = 1;
+
+	if (bcast) {
+		bd->ub = 1;
+		bd->ack_policy = 1;
+	}
+	*vif_priv = __vif_priv;
+}
+
+static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
+				struct wcn36xx *wcn,
+				struct wcn36xx_vif **vif_priv,
+				struct ieee80211_hdr *hdr,
+				bool bcast)
+{
+	struct wcn36xx_vif *__vif_priv =
+		get_vif_by_addr(wcn, hdr->addr2);
+	bd->sta_index = __vif_priv->self_sta_index;
+	bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+	bd->dpu_ne = 1;
+
+	/* default rate for unicast */
+	if (ieee80211_is_mgmt(hdr->frame_control))
+		bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+			WCN36XX_BD_RATE_CTRL :
+			WCN36XX_BD_RATE_MGMT;
+	else if (ieee80211_is_ctl(hdr->frame_control))
+		bd->bd_rate = WCN36XX_BD_RATE_CTRL;
+	else
+		wcn36xx_warn("frame control type unknown\n");
+
+	/*
+	 * In joining state trick hardware that probe is sent as
+	 * unicast even if address is broadcast.
+	 */
+	if (__vif_priv->is_joining &&
+	    ieee80211_is_probe_req(hdr->frame_control))
+		bcast = false;
+
+	if (bcast) {
+		/* broadcast */
+		bd->ub = 1;
+		/* No ack needed not unicast */
+		bd->ack_policy = 1;
+		bd->queue_id = WCN36XX_TX_B_WQ_ID;
+	} else
+		bd->queue_id = WCN36XX_TX_U_WQ_ID;
+	*vif_priv = __vif_priv;
+}
+
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+		     struct wcn36xx_sta *sta_priv,
+		     struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct wcn36xx_vif *vif_priv = NULL;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	unsigned long flags;
+	bool is_low = ieee80211_is_data(hdr->frame_control);
+	bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
+		is_multicast_ether_addr(hdr->addr1);
+	struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low);
+
+	if (!bd) {
+		/*
+		 * TX DXE are used in pairs. One for the BD and one for the
+		 * actual frame. The BD DXE's has a preallocated buffer while
+		 * the skb ones does not. If this isn't true something is really
+		 * wierd. TODO: Recover from this situation
+		 */
+
+		wcn36xx_err("bd address may not be NULL for BD DXE\n");
+		return -EINVAL;
+	}
+
+	memset(bd, 0, sizeof(*bd));
+
+	wcn36xx_dbg(WCN36XX_DBG_TX,
+		    "tx skb %p len %d fc %04x sn %d %s %s\n",
+		    skb, skb->len, __le16_to_cpu(hdr->frame_control),
+		    IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
+		    is_low ? "low" : "high", bcast ? "bcast" : "ucast");
+
+	wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
+
+	bd->dpu_rf = WCN36XX_BMU_WQ_TX;
+
+	bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS;
+	if (bd->tx_comp) {
+		wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+		spin_lock_irqsave(&wcn->dxe_lock, flags);
+		if (wcn->tx_ack_skb) {
+			spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+			wcn36xx_warn("tx_ack_skb already set\n");
+			return -EINVAL;
+		}
+
+		wcn->tx_ack_skb = skb;
+		spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+		/* Only one at a time is supported by fw. Stop the TX queues
+		 * until the ack status gets back.
+		 *
+		 * TODO: Add watchdog in case FW does not answer
+		 */
+		ieee80211_stop_queues(wcn->hw);
+	}
+
+	/* Data frames served first*/
+	if (is_low) {
+		wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast);
+		wcn36xx_set_tx_pdu(bd,
+			   ieee80211_is_data_qos(hdr->frame_control) ?
+			   sizeof(struct ieee80211_qos_hdr) :
+			   sizeof(struct ieee80211_hdr_3addr),
+			   skb->len, sta_priv ? sta_priv->tid : 0);
+	} else {
+		/* MGMT and CTRL frames are handeld here*/
+		wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast);
+		wcn36xx_set_tx_pdu(bd,
+			   ieee80211_is_data_qos(hdr->frame_control) ?
+			   sizeof(struct ieee80211_qos_hdr) :
+			   sizeof(struct ieee80211_hdr_3addr),
+			   skb->len, WCN36XX_TID);
+	}
+
+	buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+	bd->tx_bd_sign = 0xbdbdbdbd;
+
+	return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
new file mode 100644
index 0000000..bbfbcf8
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include <linux/etherdevice.h>
+#include "wcn36xx.h"
+
+/* TODO describe all properties */
+#define WCN36XX_802_11_HEADER_LEN	24
+#define WCN36XX_BMU_WQ_TX		25
+#define WCN36XX_TID			7
+/* broadcast wq ID */
+#define WCN36XX_TX_B_WQ_ID		0xA
+#define WCN36XX_TX_U_WQ_ID		0x9
+/* bd_rate */
+#define WCN36XX_BD_RATE_DATA 0
+#define WCN36XX_BD_RATE_MGMT 2
+#define WCN36XX_BD_RATE_CTRL 3
+
+struct wcn36xx_pdu {
+	u32	dpu_fb:8;
+	u32	adu_fb:8;
+	u32	pdu_id:16;
+
+	/* 0x04*/
+	u32	tail_pdu_idx:16;
+	u32	head_pdu_idx:16;
+
+	/* 0x08*/
+	u32	pdu_count:7;
+	u32	mpdu_data_off:9;
+	u32	mpdu_header_off:8;
+	u32	mpdu_header_len:8;
+
+	/* 0x0c*/
+	u32	reserved4:8;
+	u32	tid:4;
+	u32	reserved3:4;
+	u32	mpdu_len:16;
+};
+
+struct wcn36xx_rx_bd {
+	u32	bdt:2;
+	u32	ft:1;
+	u32	dpu_ne:1;
+	u32	rx_key_id:3;
+	u32	ub:1;
+	u32	rmf:1;
+	u32	uma_bypass:1;
+	u32	csr11:1;
+	u32	reserved0:1;
+	u32	scan_learn:1;
+	u32	rx_ch:4;
+	u32	rtsf:1;
+	u32	bsf:1;
+	u32	a2hf:1;
+	u32	st_auf:1;
+	u32	dpu_sign:3;
+	u32	dpu_rf:8;
+
+	struct wcn36xx_pdu pdu;
+
+	/* 0x14*/
+	u32	addr3:8;
+	u32	addr2:8;
+	u32	addr1:8;
+	u32	dpu_desc_idx:8;
+
+	/* 0x18*/
+	u32	rxp_flags:23;
+	u32	rate_id:9;
+
+	u32	phy_stat0;
+	u32	phy_stat1;
+
+	/* 0x24 */
+	u32	rx_times;
+
+	u32	pmi_cmd[6];
+
+	/* 0x40 */
+	u32	reserved7:4;
+	u32	reorder_slot_id:6;
+	u32	reorder_fwd_id:6;
+	u32	reserved6:12;
+	u32	reorder_code:4;
+
+	/* 0x44 */
+	u32	exp_seq_num:12;
+	u32	cur_seq_num:12;
+	u32	fr_type_subtype:8;
+
+	/* 0x48 */
+	u32	msdu_size:16;
+	u32	sub_fr_id:4;
+	u32	proc_order:4;
+	u32	reserved9:4;
+	u32	aef:1;
+	u32	lsf:1;
+	u32	esf:1;
+	u32	asf:1;
+};
+
+struct wcn36xx_tx_bd {
+	u32	bdt:2;
+	u32	ft:1;
+	u32	dpu_ne:1;
+	u32	fw_tx_comp:1;
+	u32	tx_comp:1;
+	u32	reserved1:1;
+	u32	ub:1;
+	u32	rmf:1;
+	u32	reserved0:12;
+	u32	dpu_sign:3;
+	u32	dpu_rf:8;
+
+	struct wcn36xx_pdu pdu;
+
+	/* 0x14*/
+	u32	reserved5:7;
+	u32	queue_id:5;
+	u32	bd_rate:2;
+	u32	ack_policy:2;
+	u32	sta_index:8;
+	u32	dpu_desc_idx:8;
+
+	u32	tx_bd_sign;
+	u32	reserved6;
+	u32	dxe_start_time;
+	u32	dxe_end_time;
+
+	/*u32	tcp_udp_start_off:10;
+	u32	header_cks:16;
+	u32	reserved7:6;*/
+};
+
+struct wcn36xx_sta;
+struct wcn36xx;
+
+int  wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+		     struct wcn36xx_sta *sta_priv,
+		     struct sk_buff *skb);
+
+#endif	/* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
new file mode 100644
index 0000000..58b6383
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_H_
+#define _WCN36XX_H_
+
+#include <linux/completion.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+#include "hal.h"
+#include "smd.h"
+#include "txrx.h"
+#include "dxe.h"
+#include "pmc.h"
+#include "debug.h"
+
+#define WLAN_NV_FILE               "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+#define WCN36XX_AGGR_BUFFER_SIZE 64
+
+extern unsigned int wcn36xx_dbg_mask;
+
+enum wcn36xx_debug_mask {
+	WCN36XX_DBG_DXE		= 0x00000001,
+	WCN36XX_DBG_DXE_DUMP	= 0x00000002,
+	WCN36XX_DBG_SMD		= 0x00000004,
+	WCN36XX_DBG_SMD_DUMP	= 0x00000008,
+	WCN36XX_DBG_RX		= 0x00000010,
+	WCN36XX_DBG_RX_DUMP	= 0x00000020,
+	WCN36XX_DBG_TX		= 0x00000040,
+	WCN36XX_DBG_TX_DUMP	= 0x00000080,
+	WCN36XX_DBG_HAL		= 0x00000100,
+	WCN36XX_DBG_HAL_DUMP	= 0x00000200,
+	WCN36XX_DBG_MAC		= 0x00000400,
+	WCN36XX_DBG_BEACON	= 0x00000800,
+	WCN36XX_DBG_BEACON_DUMP	= 0x00001000,
+	WCN36XX_DBG_PMC		= 0x00002000,
+	WCN36XX_DBG_PMC_DUMP	= 0x00004000,
+	WCN36XX_DBG_ANY		= 0xffffffff,
+};
+
+#define wcn36xx_err(fmt, arg...)				\
+	printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+
+#define wcn36xx_warn(fmt, arg...)				\
+	printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
+
+#define wcn36xx_info(fmt, arg...)		\
+	printk(KERN_INFO pr_fmt(fmt), ##arg)
+
+#define wcn36xx_dbg(mask, fmt, arg...) do {			\
+	if (wcn36xx_dbg_mask & mask)					\
+		printk(KERN_DEBUG pr_fmt(fmt), ##arg);	\
+} while (0)
+
+#define wcn36xx_dbg_dump(mask, prefix_str, buf, len) do {	\
+	if (wcn36xx_dbg_mask & mask)					\
+		print_hex_dump(KERN_DEBUG, pr_fmt(prefix_str),	\
+			       DUMP_PREFIX_OFFSET, 32, 1,	\
+			       buf, len, false);		\
+} while (0)
+
+#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
+#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
+#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
+#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
+#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
+
+static inline void buff_to_be(u32 *buf, size_t len)
+{
+	int i;
+	for (i = 0; i < len; i++)
+		buf[i] = cpu_to_be32(buf[i]);
+}
+
+struct nv_data {
+	int	is_valid;
+	u8	table;
+};
+
+/* Interface for platform control path
+ *
+ * @open: hook must be called when wcn36xx wants to open control channel.
+ * @tx: sends a buffer.
+ */
+struct wcn36xx_platform_ctrl_ops {
+	int (*open)(void *drv_priv, void *rsp_cb);
+	void (*close)(void);
+	int (*tx)(char *buf, size_t len);
+	int (*get_hw_mac)(u8 *addr);
+	int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
+};
+
+/**
+ * struct wcn36xx_vif - holds VIF related fields
+ *
+ * @bss_index: bss_index is initially set to 0xFF. bss_index is received from
+ * HW after first config_bss call and must be used in delete_bss and
+ * enter/exit_bmps.
+ */
+struct wcn36xx_vif {
+	struct list_head list;
+	struct wcn36xx_sta *sta;
+	u8 dtim_period;
+	enum ani_ed_type encrypt_type;
+	bool is_joining;
+	struct wcn36xx_hal_mac_ssid ssid;
+
+	/* Power management */
+	enum wcn36xx_power_state pw_state;
+
+	u8 bss_index;
+	u8 ucast_dpu_signature;
+	/* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
+	u8 self_sta_index;
+	u8 self_dpu_desc_index;
+};
+
+/**
+ * struct wcn36xx_sta - holds STA related fields
+ *
+ * @tid: traffic ID that is used during AMPDU and in TX BD.
+ * @sta_index: STA index is returned from HW after config_sta call and is
+ * used in both SMD channel and TX BD.
+ * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
+ * call and is used in TX BD.
+ * @bss_sta_index: STA index is returned from HW after config_bss call and is
+ * used in both SMD channel and TX BD. See table bellow when it is used.
+ * @bss_dpu_desc_index: DPU descriptor index is returned from HW after
+ * config_bss call and is used in TX BD.
+ * ______________________________________________
+ * |		  |	STA	|	AP	|
+ * |______________|_____________|_______________|
+ * |    TX BD     |bss_sta_index|   sta_index   |
+ * |______________|_____________|_______________|
+ * |all SMD calls |bss_sta_index|   sta_index	|
+ * |______________|_____________|_______________|
+ * |smd_delete_sta|  sta_index  |   sta_index	|
+ * |______________|_____________|_______________|
+ */
+struct wcn36xx_sta {
+	struct wcn36xx_vif *vif;
+	u16 aid;
+	u16 tid;
+	u8 sta_index;
+	u8 dpu_desc_index;
+	u8 bss_sta_index;
+	u8 bss_dpu_desc_index;
+	bool is_data_encrypted;
+	/* Rates */
+	struct wcn36xx_hal_supported_rates supported_rates;
+};
+struct wcn36xx_dxe_ch;
+struct wcn36xx {
+	struct ieee80211_hw	*hw;
+	struct device		*dev;
+	struct list_head	vif_list;
+
+	u8			fw_revision;
+	u8			fw_version;
+	u8			fw_minor;
+	u8			fw_major;
+
+	/* extra byte for the NULL termination */
+	u8			crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+	u8			wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+
+	/* IRQs */
+	int			tx_irq;
+	int			rx_irq;
+	void __iomem		*mmio;
+
+	struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+	/*
+	 * smd_buf must be protected with smd_mutex to garantee
+	 * that all messages are sent one after another
+	 */
+	u8			*hal_buf;
+	size_t			hal_rsp_len;
+	struct mutex		hal_mutex;
+	struct completion	hal_rsp_compl;
+	struct workqueue_struct	*hal_ind_wq;
+	struct work_struct	hal_ind_work;
+	struct mutex		hal_ind_mutex;
+	struct list_head	hal_ind_queue;
+
+	/* DXE channels */
+	struct wcn36xx_dxe_ch	dxe_tx_l_ch;	/* TX low */
+	struct wcn36xx_dxe_ch	dxe_tx_h_ch;	/* TX high */
+	struct wcn36xx_dxe_ch	dxe_rx_l_ch;	/* RX low */
+	struct wcn36xx_dxe_ch	dxe_rx_h_ch;	/* RX high */
+
+	/* For synchronization of DXE resources from BH, IRQ and WQ contexts */
+	spinlock_t	dxe_lock;
+	bool                    queues_stopped;
+
+	/* Memory pools */
+	struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
+	struct wcn36xx_dxe_mem_pool data_mem_pool;
+
+	struct sk_buff		*tx_ack_skb;
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+	/* Debug file system entry */
+	struct wcn36xx_dfs_entry    dfs;
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+};
+
+static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
+					 u8 major,
+					 u8 minor,
+					 u8 version,
+					 u8 revision)
+{
+	return (wcn->fw_major == major &&
+		wcn->fw_minor == minor &&
+		wcn->fw_version == version &&
+		wcn->fw_revision == revision);
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+
+#endif	/* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 61c302a..5b34076 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -316,8 +316,8 @@
 	}
 	conn.channel = ch - 1;
 
-	memcpy(conn.bssid, bss->bssid, 6);
-	memcpy(conn.dst_mac, bss->bssid, 6);
+	memcpy(conn.bssid, bss->bssid, ETH_ALEN);
+	memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
 	/*
 	 * FW don't support scan after connection attempt
 	 */
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 0a2844c..fd30cdd 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -250,7 +250,7 @@
 
 	/* init after reset */
 	wil->pending_connect_cid = -1;
-	INIT_COMPLETION(wil->wmi_ready);
+	reinit_completion(&wil->wmi_ready);
 
 	/* TODO: release MAC reset */
 	wil6210_enable_irq(wil);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eb1dc7a..eeceab3 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -197,7 +197,6 @@
 	pci_iounmap(pdev, wil->csr);
 	pci_release_region(pdev, 0);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index b827d51..0d950f2 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -844,18 +844,18 @@
 	if (priv->wep_is_on)
 		frame_ctl |= IEEE80211_FCTL_PROTECTED;
 	if (priv->operating_mode == IW_MODE_ADHOC) {
-		skb_copy_from_linear_data(skb, &header.addr1, 6);
-		memcpy(&header.addr2, dev->dev_addr, 6);
-		memcpy(&header.addr3, priv->BSSID, 6);
+		skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
+		memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+		memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
 	} else {
 		frame_ctl |= IEEE80211_FCTL_TODS;
-		memcpy(&header.addr1, priv->CurrentBSSID, 6);
-		memcpy(&header.addr2, dev->dev_addr, 6);
-		skb_copy_from_linear_data(skb, &header.addr3, 6);
+		memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
+		memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+		skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
 	}
 
 	if (priv->use_wpa)
-		memcpy(&header.addr4, SNAP_RFC1024, 6);
+		memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
 
 	header.frame_control = cpu_to_le16(frame_ctl);
 	/* Copy the wireless header into the card */
@@ -929,11 +929,11 @@
 		}
 	}
 
-	memcpy(skbp, header->addr1, 6); /* destination address */
+	memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
 	if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
-		memcpy(&skbp[6], header->addr3, 6);
+		memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
 	else
-		memcpy(&skbp[6], header->addr2, 6); /* source address */
+		memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
 
 	skb->protocol = eth_type_trans(skb, priv->dev);
 	skb->ip_summed = CHECKSUM_NONE;
@@ -969,14 +969,14 @@
 			 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
 			 u8 frag_no, int more_frags)
 {
-	u8 mac4[6];
-	u8 source[6];
+	u8 mac4[ETH_ALEN];
+	u8 source[ETH_ALEN];
 	struct sk_buff *skb;
 
 	if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
-		memcpy(source, header->addr3, 6);
+		memcpy(source, header->addr3, ETH_ALEN);
 	else
-		memcpy(source, header->addr2, 6);
+		memcpy(source, header->addr2, ETH_ALEN);
 
 	rx_packet_loc += 24; /* skip header */
 
@@ -984,9 +984,9 @@
 		msdu_size -= 4;
 
 	if (frag_no == 0) { /* first fragment */
-		atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6);
-		msdu_size -= 6;
-		rx_packet_loc += 6;
+		atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
+		msdu_size -= ETH_ALEN;
+		rx_packet_loc += ETH_ALEN;
 
 		if (priv->do_rx_crc)
 			crc = crc32_le(crc, mac4, 6);
@@ -994,9 +994,9 @@
 		priv->frag_seq = seq_no;
 		priv->frag_no = 1;
 		priv->frag_len = msdu_size;
-		memcpy(priv->frag_source, source, 6);
-		memcpy(&priv->rx_buf[6], source, 6);
-		memcpy(priv->rx_buf, header->addr1, 6);
+		memcpy(priv->frag_source, source, ETH_ALEN);
+		memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
+		memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
 
 		atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
 
@@ -1006,13 +1006,13 @@
 			atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
 			if ((crc ^ 0xffffffff) != netcrc) {
 				priv->dev->stats.rx_crc_errors++;
-				memset(priv->frag_source, 0xff, 6);
+				memset(priv->frag_source, 0xff, ETH_ALEN);
 			}
 		}
 
 	} else if (priv->frag_no == frag_no &&
 		   priv->frag_seq == seq_no &&
-		   memcmp(priv->frag_source, source, 6) == 0) {
+		   memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
 
 		atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
 				   rx_packet_loc, msdu_size);
@@ -1024,7 +1024,7 @@
 			atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
 			if ((crc ^ 0xffffffff) != netcrc) {
 				priv->dev->stats.rx_crc_errors++;
-				memset(priv->frag_source, 0xff, 6);
+				memset(priv->frag_source, 0xff, ETH_ALEN);
 				more_frags = 1; /* don't send broken assembly */
 			}
 		}
@@ -1033,7 +1033,7 @@
 		priv->frag_no++;
 
 		if (!more_frags) { /* last one */
-			memset(priv->frag_source, 0xff, 6);
+			memset(priv->frag_source, 0xff, ETH_ALEN);
 			if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
 				priv->dev->stats.rx_dropped++;
 			} else {
@@ -1129,7 +1129,7 @@
 			atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
 
 			/* we use the same buffer for frag reassembly and control packets */
-			memset(priv->frag_source, 0xff, 6);
+			memset(priv->frag_source, 0xff, ETH_ALEN);
 
 			if (priv->do_rx_crc) {
 				/* last 4 octets is crc */
@@ -1557,7 +1557,7 @@
 	priv->last_qual = jiffies;
 	priv->last_beacon_timestamp = 0;
 	memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
-	memset(priv->BSSID, 0, 6);
+	memset(priv->BSSID, 0, ETH_ALEN);
 	priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
 	priv->station_was_associated = 0;
 
@@ -1718,7 +1718,7 @@
 			 char *extra)
 {
 	struct atmel_private *priv = netdev_priv(dev);
-	memcpy(awrq->sa_data, priv->CurrentBSSID, 6);
+	memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
 	awrq->sa_family = ARPHRD_ETHER;
 
 	return 0;
@@ -2356,7 +2356,7 @@
 	for (i = 0; i < priv->BSS_list_entries; i++) {
 		iwe.cmd = SIOCGIWAP;
 		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
-		memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
+		memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
 		current_ev = iwe_stream_add_event(info, current_ev,
 						  extra + IW_SCAN_MAX_DATA,
 						  &iwe, IW_EV_ADDR_LEN);
@@ -2760,7 +2760,7 @@
 static void atmel_scan(struct atmel_private *priv, int specific_ssid)
 {
 	struct {
-		u8 BSSID[6];
+		u8 BSSID[ETH_ALEN];
 		u8 SSID[MAX_SSID_LENGTH];
 		u8 scan_type;
 		u8 channel;
@@ -2771,7 +2771,7 @@
 		u8 SSID_size;
 	} cmd;
 
-	memset(cmd.BSSID, 0xff, 6);
+	memset(cmd.BSSID, 0xff, ETH_ALEN);
 
 	if (priv->fast_scan) {
 		cmd.SSID_size = priv->SSID_size;
@@ -2816,7 +2816,7 @@
 
 	cmd.SSID_size = priv->SSID_size;
 	memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
-	memcpy(cmd.BSSID, priv->CurrentBSSID, 6);
+	memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
 	cmd.channel = (priv->channel & 0x7f);
 	cmd.BSS_type = type;
 	cmd.timeout = cpu_to_le16(2000);
@@ -2837,7 +2837,7 @@
 
 	cmd.SSID_size = priv->SSID_size;
 	memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
-	memcpy(cmd.BSSID, priv->BSSID, 6);
+	memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
 	cmd.BSS_type = type;
 	cmd.channel = (priv->channel & 0x7f);
 
@@ -2883,9 +2883,9 @@
 	header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
 	header.duration_id = cpu_to_le16(0x8000);
 	header.seq_ctrl = 0;
-	memcpy(header.addr1, priv->CurrentBSSID, 6);
-	memcpy(header.addr2, priv->dev->dev_addr, 6);
-	memcpy(header.addr3, priv->CurrentBSSID, 6);
+	memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+	memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+	memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
 
 	if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
 		/* no WEP for authentication frames with TrSeqNo 1 */
@@ -2916,7 +2916,7 @@
 	struct ass_req_format {
 		__le16 capability;
 		__le16 listen_interval;
-		u8 ap[6]; /* nothing after here directly accessible */
+		u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
 		u8 ssid_el_id;
 		u8 ssid_len;
 		u8 ssid[MAX_SSID_LENGTH];
@@ -2930,9 +2930,9 @@
 	header.duration_id = cpu_to_le16(0x8000);
 	header.seq_ctrl = 0;
 
-	memcpy(header.addr1, priv->CurrentBSSID, 6);
-	memcpy(header.addr2, priv->dev->dev_addr, 6);
-	memcpy(header.addr3, priv->CurrentBSSID, 6);
+	memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+	memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+	memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
 
 	body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
 	if (priv->wep_is_on)
@@ -2944,7 +2944,7 @@
 
 	/* current AP address - only in reassoc frame */
 	if (is_reassoc) {
-		memcpy(body.ap, priv->CurrentBSSID, 6);
+		memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
 		ssid_el_p = &body.ssid_el_id;
 		bodysize = 18 + priv->SSID_size;
 	} else {
@@ -3021,7 +3021,7 @@
 	int i, index;
 
 	for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
-		if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
+		if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
 			index = i;
 
 	/* If we process a probe and an entry from this BSS exists
@@ -3032,7 +3032,7 @@
 		if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
 			return;
 		index = priv->BSS_list_entries++;
-		memcpy(priv->BSSinfo[index].BSSID, bss, 6);
+		memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
 		priv->BSSinfo[index].RSSI = rssi;
 	} else {
 		if (rssi > priv->BSSinfo[index].RSSI)
@@ -3212,7 +3212,7 @@
 	if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
 	    status != WLAN_STATUS_ASSOC_DENIED_RATES &&
 	    status != WLAN_STATUS_CAPS_UNSUPPORTED &&
-	    priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
+	    priv->ReAssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
 		mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
 		priv->ReAssociationRequestRetryCnt++;
 		send_association_request(priv, 1);
@@ -3235,7 +3235,7 @@
 {
 	struct bss_info *bss =  &priv->BSSinfo[bss_index];
 
-	memcpy(priv->CurrentBSSID, bss->BSSID, 6);
+	memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
 	memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
 
 	/* The WPA stuff cares about the current AP address */
@@ -3767,7 +3767,7 @@
 				0x00, 0x04, 0x25, 0x00, 0x00, 0x00
 			};
 			printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
-			memcpy(dev->dev_addr, default_mac, 6);
+			memcpy(dev->dev_addr, default_mac, ETH_ALEN);
 		}
 	}
 
@@ -3819,7 +3819,7 @@
 
 	struct { /* NB this is matched to the hardware, don't change. */
 		u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
-		u8 receiver_address[6];
+		u8 receiver_address[ETH_ALEN];
 		u8 wep_is_on;
 		u8 default_key; /* 0..3 */
 		u8 group_key;
@@ -3837,7 +3837,7 @@
 
 	mib.wep_is_on = priv->wep_is_on;
 	mib.exclude_unencrypted = priv->exclude_unencrypted;
-	memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
+	memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
 
 	/* zero all the keys before adding in valid ones. */
 	memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index c51d2dc..1d7982a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1065,12 +1065,9 @@
 	/* Try to set the DMA mask. If it fails, try falling back to a
 	 * lower mask, as we can always also support a lower one. */
 	while (1) {
-		err = dma_set_mask(dev->dev->dma_dev, mask);
-		if (!err) {
-			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
-			if (!err)
-				break;
-		}
+		err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+		if (!err)
+			break;
 		if (mask == DMA_BIT_MASK(64)) {
 			mask = DMA_BIT_MASK(32);
 			fallback = true;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 7c970d3..05ee7f1 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -164,7 +164,8 @@
 		}
 		en_addr = en_addrs[override][i];
 
-		val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+		if (e)
+			val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
 
 		if (off) {
 			b43_phy_mask(dev, en_addr, ~en_mask);
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 8cb206a..4ae63f4 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -278,7 +278,7 @@
 	else
 		txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate);
 	txhdr->mac_frame_ctl = wlhdr->frame_control;
-	memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+	memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
 
 	/* Calculate duration for fallback rate */
 	if ((rate_fb == rate) ||
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 42eb26c..b2ed179 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -806,12 +806,9 @@
 	/* Try to set the DMA mask. If it fails, try falling back to a
 	 * lower mask, as we can always also support a lower one. */
 	while (1) {
-		err = dma_set_mask(dev->dev->dma_dev, mask);
-		if (!err) {
-			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
-			if (!err)
-				break;
-		}
+		err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+		if (!err)
+			break;
 		if (mask == DMA_BIT_MASK(64)) {
 			mask = DMA_BIT_MASK(32);
 			fallback = true;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 849a28c..86588c9 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -215,7 +215,7 @@
 	rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
 
 	txhdr->mac_frame_ctl = wlhdr->frame_control;
-	memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+	memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
 
 	/* Calculate duration for fallback rate */
 	if ((rate_fb->hw_value == rate) ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e13b1a6..3e10b80 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -26,7 +26,6 @@
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
 #include <linux/platform_data/brcmfmac-sdio.h>
 
 #include <defs.h>
@@ -239,7 +238,9 @@
 		func_num = SDIO_FUNC_1;
 		reg_size = 4;
 
-		brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+		ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+		if (ret)
+			goto done;
 	}
 
 	do {
@@ -255,6 +256,7 @@
 						       func_num, addr, data, 4);
 	} while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
 
+done:
 	if (ret != 0)
 		brcmf_err("failed with %d\n", ret);
 
@@ -315,8 +317,36 @@
 		*ret = retval;
 }
 
+static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+			     bool write, u32 addr, struct sk_buff *pkt)
+{
+	unsigned int req_sz;
+
+	brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+	if (brcmf_pm_resume_error(sdiodev))
+		return -EIO;
+
+	/* Single skb use the standard mmc interface */
+	req_sz = pkt->len + 3;
+	req_sz &= (uint)~3;
+
+	if (write)
+		return sdio_memcpy_toio(sdiodev->func[fn], addr,
+					((u8 *)(pkt->data)),
+					req_sz);
+	else if (fn == 1)
+		return sdio_memcpy_fromio(sdiodev->func[fn],
+					  ((u8 *)(pkt->data)),
+					  addr, req_sz);
+	else
+		/* function 2 read is FIFO operation */
+		return sdio_readsb(sdiodev->func[fn],
+				   ((u8 *)(pkt->data)), addr,
+				   req_sz);
+}
+
 /**
- * brcmf_sdio_buffrw - SDIO interface function for block data access
+ * brcmf_sdio_sglist_rw - SDIO interface function for block data access
  * @sdiodev: brcmfmac sdio device
  * @fn: SDIO function number
  * @write: direction flag
@@ -327,12 +357,13 @@
  * stack for block data access. It assumes that the skb passed down by the
  * caller has already been padded and aligned.
  */
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
-			     bool write, u32 addr, struct sk_buff_head *pktlist)
+static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+				bool write, u32 addr,
+				struct sk_buff_head *pktlist)
 {
 	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
-	unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
-	unsigned short max_seg_sz, seg_sz;
+	unsigned int max_req_sz, orig_offset, dst_offset;
+	unsigned short max_seg_cnt, seg_sz;
 	unsigned char *pkt_data, *orig_data, *dst_data;
 	struct sk_buff *pkt_next = NULL, *local_pkt_next;
 	struct sk_buff_head local_list, *target_list;
@@ -341,7 +372,6 @@
 	struct mmc_data mmc_dat;
 	struct sg_table st;
 	struct scatterlist *sgl;
-	struct mmc_host *host;
 	int ret = 0;
 
 	if (!pktlist->qlen)
@@ -351,27 +381,6 @@
 	if (brcmf_pm_resume_error(sdiodev))
 		return -EIO;
 
-	/* Single skb use the standard mmc interface */
-	if (pktlist->qlen == 1) {
-		pkt_next = pktlist->next;
-		req_sz = pkt_next->len + 3;
-		req_sz &= (uint)~3;
-
-		if (write)
-			return sdio_memcpy_toio(sdiodev->func[fn], addr,
-						((u8 *)(pkt_next->data)),
-						req_sz);
-		else if (fn == 1)
-			return sdio_memcpy_fromio(sdiodev->func[fn],
-						  ((u8 *)(pkt_next->data)),
-						  addr, req_sz);
-		else
-			/* function 2 read is FIFO operation */
-			return sdio_readsb(sdiodev->func[fn],
-					   ((u8 *)(pkt_next->data)), addr,
-					   req_sz);
-	}
-
 	target_list = pktlist;
 	/* for host with broken sg support, prepare a page aligned list */
 	__skb_queue_head_init(&local_list);
@@ -398,38 +407,46 @@
 		target_list = &local_list;
 	}
 
-	host = sdiodev->func[fn]->card->host;
 	func_blk_sz = sdiodev->func[fn]->cur_blksize;
-	/* Blocks per command is limited by host count, host transfer
-	 * size and the maximum for IO_RW_EXTENDED of 511 blocks.
-	 */
-	max_blks = min_t(unsigned int, host->max_blk_count, 511u);
-	max_req_sz = min_t(unsigned int, host->max_req_size,
-			   max_blks * func_blk_sz);
-	max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
-	max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
+	max_req_sz = sdiodev->max_request_size;
+	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
+			    target_list->qlen);
 	seg_sz = target_list->qlen;
 	pkt_offset = 0;
 	pkt_next = target_list->next;
 
-	if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
+	if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
 		ret = -ENOMEM;
 		goto exit;
 	}
 
+	memset(&mmc_req, 0, sizeof(struct mmc_request));
+	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+	memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+	mmc_dat.sg = st.sgl;
+	mmc_dat.blksz = func_blk_sz;
+	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+	mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
+	mmc_cmd.arg |= (fn & 0x7) << 28;	/* SDIO func num */
+	mmc_cmd.arg |= 1<<27;			/* block mode */
+	/* for function 1 the addr will be incremented */
+	mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+	mmc_req.cmd = &mmc_cmd;
+	mmc_req.data = &mmc_dat;
+
 	while (seg_sz) {
 		req_sz = 0;
 		sg_cnt = 0;
-		memset(&mmc_req, 0, sizeof(struct mmc_request));
-		memset(&mmc_cmd, 0, sizeof(struct mmc_command));
-		memset(&mmc_dat, 0, sizeof(struct mmc_data));
 		sgl = st.sgl;
 		/* prep sg table */
 		while (pkt_next != (struct sk_buff *)target_list) {
 			pkt_data = pkt_next->data + pkt_offset;
 			sg_data_sz = pkt_next->len - pkt_offset;
-			if (sg_data_sz > host->max_seg_size)
-				sg_data_sz = host->max_seg_size;
+			if (sg_data_sz > sdiodev->max_segment_size)
+				sg_data_sz = sdiodev->max_segment_size;
 			if (sg_data_sz > max_req_sz - req_sz)
 				sg_data_sz = max_req_sz - req_sz;
 
@@ -444,7 +461,7 @@
 				pkt_next = pkt_next->next;
 			}
 
-			if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
+			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
 				break;
 		}
 		seg_sz -= sg_cnt;
@@ -455,27 +472,17 @@
 			ret = -ENOTBLK;
 			goto exit;
 		}
-		mmc_dat.sg = st.sgl;
+
 		mmc_dat.sg_len = sg_cnt;
-		mmc_dat.blksz = func_blk_sz;
 		mmc_dat.blocks = req_sz / func_blk_sz;
-		mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
-		mmc_cmd.opcode = SD_IO_RW_EXTENDED;
-		mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
-		mmc_cmd.arg |= (fn & 0x7) << 28;	/* SDIO func num */
-		mmc_cmd.arg |= 1<<27;			/* block mode */
-		/* incrementing addr for function 1 */
-		mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
 		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;	/* address */
 		mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;	/* block count */
-		mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
-		mmc_req.cmd = &mmc_cmd;
-		mmc_req.data = &mmc_dat;
+		/* incrementing addr for function 1 */
 		if (fn == 1)
 			addr += req_sz;
 
 		mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
-		mmc_wait_for_req(host, &mmc_req);
+		mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
 
 		ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
 		if (ret != 0) {
@@ -546,7 +553,6 @@
 {
 	uint width;
 	int err = 0;
-	struct sk_buff_head pkt_list;
 
 	brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
 		  fn, addr, pkt->len);
@@ -556,19 +562,17 @@
 	if (err)
 		goto done;
 
-	skb_queue_head_init(&pkt_list);
-	skb_queue_tail(&pkt_list, pkt);
-	err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
-	skb_dequeue_tail(&pkt_list);
+	err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
 
 done:
 	return err;
 }
 
 int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-			    uint flags, struct sk_buff_head *pktq)
+			    uint flags, struct sk_buff_head *pktq, uint totlen)
 {
-	uint incr_fix;
+	struct sk_buff *glom_skb;
+	struct sk_buff *skb;
 	uint width;
 	int err = 0;
 
@@ -580,8 +584,22 @@
 	if (err)
 		goto done;
 
-	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
-	err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
+	if (pktq->qlen == 1)
+		err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+	else if (!sdiodev->sg_support) {
+		glom_skb = brcmu_pkt_buf_get_skb(totlen);
+		if (!glom_skb)
+			return -ENOMEM;
+		err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+		if (err)
+			goto done;
+
+		skb_queue_walk(pktq, skb) {
+			memcpy(skb->data, glom_skb->data, skb->len);
+			skb_pull(glom_skb, skb->len);
+		}
+	} else
+		err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
 
 done:
 	return err;
@@ -592,7 +610,7 @@
 		      uint flags, u8 *buf, uint nbytes)
 {
 	struct sk_buff *mypkt;
-	struct sk_buff_head pktq;
+	uint width;
 	int err;
 
 	mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -603,10 +621,12 @@
 	}
 
 	memcpy(mypkt->data, buf, nbytes);
-	__skb_queue_head_init(&pktq);
-	__skb_queue_tail(&pktq, mypkt);
-	err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
-	__skb_dequeue_tail(&pktq);
+
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+
+	if (!err)
+		err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
 
 	brcmu_pkt_buf_free_skb(mypkt);
 	return err;
@@ -617,16 +637,26 @@
 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
 		      uint flags, struct sk_buff_head *pktq)
 {
+	struct sk_buff *skb;
 	uint width;
-	int err = 0;
+	int err;
 
 	brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
 		  fn, addr, pktq->qlen);
 
 	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-	brcmf_sdio_addrprep(sdiodev, width, &addr);
+	err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+	if (err)
+		return err;
 
-	err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
+	if (pktq->qlen == 1 || !sdiodev->sg_support)
+		skb_queue_walk(pktq, skb) {
+			err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+			if (err)
+				break;
+		}
+	else
+		err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
 
 	return err;
 }
@@ -639,7 +669,6 @@
 	struct sk_buff *pkt;
 	u32 sdaddr;
 	uint dsize;
-	struct sk_buff_head pkt_list;
 
 	dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
 	pkt = dev_alloc_skb(dsize);
@@ -648,7 +677,6 @@
 		return -EIO;
 	}
 	pkt->priority = 0;
-	skb_queue_head_init(&pkt_list);
 
 	/* Determine initial transfer parameters */
 	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -676,10 +704,8 @@
 		skb_put(pkt, dsize);
 		if (write)
 			memcpy(pkt->data, data, dsize);
-		skb_queue_tail(&pkt_list, pkt);
 		bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
-					     sdaddr, &pkt_list);
-		skb_dequeue_tail(&pkt_list);
+					     sdaddr, pkt);
 		if (bcmerror) {
 			brcmf_err("membytes transfer failed\n");
 			break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c3462b7..905704e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -21,6 +21,7 @@
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/sched.h>	/* request_irq() */
@@ -34,6 +35,7 @@
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
 #include "sdio_host.h"
+#include "sdio_chip.h"
 #include "dhd_dbg.h"
 #include "dhd_bus.h"
 
@@ -41,13 +43,6 @@
 
 #define DMA_ALIGN_MASK	0x03
 
-#define SDIO_DEVICE_ID_BROADCOM_43143	43143
-#define SDIO_DEVICE_ID_BROADCOM_43241	0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329	0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330	0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334	0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335	0x4335
-
 #define SDIO_FUNC1_BLOCKSIZE		64
 #define SDIO_FUNC2_BLOCKSIZE		512
 
@@ -58,7 +53,8 @@
 	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
 	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
 	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+		     SDIO_DEVICE_ID_BROADCOM_4335_4339)},
 	{ /* end: all zeroes */ },
 };
 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -320,6 +316,8 @@
 	int err;
 	struct brcmf_sdio_dev *sdiodev;
 	struct brcmf_bus *bus_if;
+	struct mmc_host *host;
+	uint max_blocks;
 
 	brcmf_dbg(SDIO, "Enter\n");
 	brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -366,6 +364,20 @@
 		brcmf_err("F2 error, probe failed %d...\n", err);
 		goto fail;
 	}
+
+	/*
+	 * determine host related variables after brcmf_sdio_probe()
+	 * as func->cur_blksize is properly set and F2 init has been
+	 * completed successfully.
+	 */
+	host = func->card->host;
+	sdiodev->sg_support = host->max_segs > 1;
+	max_blocks = min_t(uint, host->max_blk_count, 511u);
+	sdiodev->max_request_size = min_t(uint, host->max_req_size,
+					  max_blocks * func->cur_blksize);
+	sdiodev->max_segment_count = min_t(uint, host->max_segs,
+					   SG_MAX_SINGLE_ALLOC);
+	sdiodev->max_segment_size = host->max_seg_size;
 	brcmf_dbg(SDIO, "F2 init completed...\n");
 	return 0;
 
@@ -466,7 +478,7 @@
 {
 	brcmf_dbg(SDIO, "Enter\n");
 
-	brcmfmac_sdio_pdata = pdev->dev.platform_data;
+	brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
 
 	if (brcmfmac_sdio_pdata->power_on)
 		brcmfmac_sdio_pdata->power_on();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 2eb9e64..899a2ad 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -97,8 +97,6 @@
 #define	WLC_PHY_TYPE_LCN	8
 #define	WLC_PHY_TYPE_NULL	0xf
 
-#define BRCMF_EVENTING_MASK_LEN	16
-
 #define TOE_TX_CSUM_OL		0x00000001
 #define TOE_RX_CSUM_OL		0x00000002
 
@@ -632,29 +630,29 @@
 	u8 *reorder;
 };
 
-extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
 /* Return pointer to interface name */
-extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
 
 /* Query dongle */
-extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
-				       uint cmd, void *buf, uint len);
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-				    void *buf, uint len);
+int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+			       void *buf, uint len);
+int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+			     void *buf, uint len);
 
 /* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
-			       struct sk_buff *rxp);
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+			struct sk_buff *rxp);
 
-extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
-				     s32 ifidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+			      char *name, u8 *mac_addr);
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
 			  enum brcmf_netif_stop_reason reason, bool state);
-extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
-			     bool success);
+u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+		      bool success);
 
 #endif				/* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 74156f8..a6eb09e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -132,35 +132,34 @@
  * interface functions from common layer
  */
 
-extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
-			 struct sk_buff *pkt, int prec);
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
+		      int prec);
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
-extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
 
 /* Indication from bus module regarding presence/insertion of dongle. */
-extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(uint bus_hdrlen, struct device *dev);
 /* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct device *dev);
+void brcmf_detach(struct device *dev);
 /* Indication from bus module that dongle should be reset */
-extern void brcmf_dev_reset(struct device *dev);
+void brcmf_dev_reset(struct device *dev);
 /* Indication from bus module to change flow-control state */
-extern void brcmf_txflowblock(struct device *dev, bool state);
+void brcmf_txflowblock(struct device *dev, bool state);
 
 /* Notify the bus has transferred the tx packet to firmware */
-extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
-			     bool success);
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
 
-extern int brcmf_bus_start(struct device *dev);
+int brcmf_bus_start(struct device *dev);
 
 #ifdef CONFIG_BRCMFMAC_SDIO
-extern void brcmf_sdio_exit(void);
-extern void brcmf_sdio_init(void);
-extern void brcmf_sdio_register(void);
+void brcmf_sdio_exit(void);
+void brcmf_sdio_init(void);
+void brcmf_sdio_register(void);
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
-extern void brcmf_usb_exit(void);
-extern void brcmf_usb_register(void);
+void brcmf_usb_exit(void);
+void brcmf_usb_register(void);
 #endif
 
 #endif				/* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 40e7f85..64e9cff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -509,9 +509,8 @@
 	}
 }
 
-void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
 {
-	struct sk_buff *skb, *pnext;
 	struct brcmf_if *ifp;
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_pub *drvr = bus_if->drvr;
@@ -519,29 +518,24 @@
 	u8 ifidx;
 	int ret;
 
-	brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
-		  skb_queue_len(skb_list));
+	brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
 
-	skb_queue_walk_safe(skb_list, skb, pnext) {
-		skb_unlink(skb, skb_list);
+	/* process and remove protocol-specific header */
+	ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
+	ifp = drvr->iflist[ifidx];
 
-		/* process and remove protocol-specific header */
-		ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
-		ifp = drvr->iflist[ifidx];
-
-		if (ret || !ifp || !ifp->ndev) {
-			if ((ret != -ENODATA) && ifp)
-				ifp->stats.rx_errors++;
-			brcmu_pkt_buf_free_skb(skb);
-			continue;
-		}
-
-		rd = (struct brcmf_skb_reorder_data *)skb->cb;
-		if (rd->reorder)
-			brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
-		else
-			brcmf_netif_rx(ifp, skb);
+	if (ret || !ifp || !ifp->ndev) {
+		if ((ret != -ENODATA) && ifp)
+			ifp->stats.rx_errors++;
+		brcmu_pkt_buf_free_skb(skb);
+		return;
 	}
+
+	rd = (struct brcmf_skb_reorder_data *)skb->cb;
+	if (rd->reorder)
+		brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
+	else
+		brcmf_netif_rx(ifp, skb);
 }
 
 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index ef91798..53c6e71 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -22,21 +22,21 @@
  */
 
 /* Linkage, sets prot link and updates hdrlen in pub */
-extern int brcmf_proto_attach(struct brcmf_pub *drvr);
+int brcmf_proto_attach(struct brcmf_pub *drvr);
 
 /* Unlink, frees allocated protocol memory (including brcmf_proto) */
-extern void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
 
 /* Stop protocol: sync w/dongle state. */
-extern void brcmf_proto_stop(struct brcmf_pub *drvr);
+void brcmf_proto_stop(struct brcmf_pub *drvr);
 
 /* Add any protocol-specific data header.
  * Caller must reserve prot_hdrlen prepend space.
  */
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
-				struct sk_buff *txp);
+void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
+			 struct sk_buff *txp);
 
 /* Sets dongle media info (drv_version, mac address). */
-extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
 
 #endif				/* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1aa75d5..b02953c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -275,11 +275,6 @@
 /* Flags for SDH calls */
 #define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
 
-#define BRCMF_SDIO_FW_NAME	"brcm/brcmfmac-sdio.bin"
-#define BRCMF_SDIO_NV_NAME	"brcm/brcmfmac-sdio.txt"
-MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
-MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
-
 #define BRCMF_IDLE_IMMEDIATE	(-1)	/* Enter idle immediately */
 #define BRCMF_IDLE_ACTIVE	0	/* Do not request any SD clock change
 					 * when idle
@@ -454,9 +449,6 @@
 	struct work_struct datawork;
 	atomic_t dpc_tskcnt;
 
-	const struct firmware *firmware;
-	u32 fw_ptr;
-
 	bool txoff;		/* Transmit flow-controlled */
 	struct brcmf_sdio_count sdcnt;
 	bool sr_enabled; /* SaveRestore enabled */
@@ -493,6 +485,100 @@
 	BRCMF_SDIO_FT_SUB,
 };
 
+#define BCM43143_FIRMWARE_NAME		"brcm/brcmfmac43143-sdio.bin"
+#define BCM43143_NVRAM_NAME		"brcm/brcmfmac43143-sdio.txt"
+#define BCM43241B0_FIRMWARE_NAME	"brcm/brcmfmac43241b0-sdio.bin"
+#define BCM43241B0_NVRAM_NAME		"brcm/brcmfmac43241b0-sdio.txt"
+#define BCM43241B4_FIRMWARE_NAME	"brcm/brcmfmac43241b4-sdio.bin"
+#define BCM43241B4_NVRAM_NAME		"brcm/brcmfmac43241b4-sdio.txt"
+#define BCM4329_FIRMWARE_NAME		"brcm/brcmfmac4329-sdio.bin"
+#define BCM4329_NVRAM_NAME		"brcm/brcmfmac4329-sdio.txt"
+#define BCM4330_FIRMWARE_NAME		"brcm/brcmfmac4330-sdio.bin"
+#define BCM4330_NVRAM_NAME		"brcm/brcmfmac4330-sdio.txt"
+#define BCM4334_FIRMWARE_NAME		"brcm/brcmfmac4334-sdio.bin"
+#define BCM4334_NVRAM_NAME		"brcm/brcmfmac4334-sdio.txt"
+#define BCM4335_FIRMWARE_NAME		"brcm/brcmfmac4335-sdio.bin"
+#define BCM4335_NVRAM_NAME		"brcm/brcmfmac4335-sdio.txt"
+
+MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+
+struct brcmf_firmware_names {
+	u32 chipid;
+	u32 revmsk;
+	const char *bin;
+	const char *nv;
+};
+
+enum brcmf_firmware_type {
+	BRCMF_FIRMWARE_BIN,
+	BRCMF_FIRMWARE_NVRAM
+};
+
+#define BRCMF_FIRMWARE_NVRAM(name) \
+	name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
+
+static const struct brcmf_firmware_names brcmf_fwname_data[] = {
+	{ BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
+	{ BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
+	{ BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+	{ BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
+	{ BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
+	{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+	{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+};
+
+
+static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+						  enum brcmf_firmware_type type)
+{
+	const struct firmware *fw;
+	const char *name;
+	int err, i;
+
+	for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+		if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
+		    brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
+			switch (type) {
+			case BRCMF_FIRMWARE_BIN:
+				name = brcmf_fwname_data[i].bin;
+				break;
+			case BRCMF_FIRMWARE_NVRAM:
+				name = brcmf_fwname_data[i].nv;
+				break;
+			default:
+				brcmf_err("invalid firmware type (%d)\n", type);
+				return NULL;
+			}
+			goto found;
+		}
+	}
+	brcmf_err("Unknown chipid %d [%d]\n",
+		  bus->ci->chip, bus->ci->chiprev);
+	return NULL;
+
+found:
+	err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
+	if ((err) || (!fw)) {
+		brcmf_err("fail to request firmware %s (%d)\n", name, err);
+		return NULL;
+	}
+
+	return fw;
+}
+
 static void pkt_align(struct sk_buff *p, int len, int align)
 {
 	uint datalign;
@@ -1061,6 +1147,8 @@
 	u8 rx_seq, fc, tx_seq_max;
 	u32 swheader;
 
+	trace_brcmf_sdpcm_hdr(false, header);
+
 	/* hw header */
 	len = get_unaligned_le16(header);
 	checksum = get_unaligned_le16(header + sizeof(u16));
@@ -1183,6 +1271,7 @@
 		     SDPCM_DOFFSET_MASK;
 	*(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
 	*(((__le32 *)header) + 2) = 0;
+	trace_brcmf_sdpcm_hdr(true, header);
 }
 
 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1303,7 +1392,7 @@
 		sdio_claim_host(bus->sdiodev->func[1]);
 		errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
 				bus->sdiodev->sbwad,
-				SDIO_FUNC_2, F2SYNC, &bus->glom);
+				SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
 		sdio_release_host(bus->sdiodev->func[1]);
 		bus->sdcnt.f2rxdata++;
 
@@ -1406,13 +1495,12 @@
 					   bus->glom.qlen, pfirst, pfirst->data,
 					   pfirst->len, pfirst->next,
 					   pfirst->prev);
+			skb_unlink(pfirst, &bus->glom);
+			brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+			bus->sdcnt.rxglompkts++;
 		}
-		/* sent any remaining packets up */
-		if (bus->glom.qlen)
-			brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
 
 		bus->sdcnt.rxglomframes++;
-		bus->sdcnt.rxglompkts += bus->glom.qlen;
 	}
 	return num;
 }
@@ -1557,7 +1645,6 @@
 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 {
 	struct sk_buff *pkt;		/* Packet for event or data frames */
-	struct sk_buff_head pktlist;	/* needed for bus interface */
 	u16 pad;		/* Number of pad bytes to read */
 	uint rxleft = 0;	/* Remaining number of frames allowed */
 	int ret;		/* Return code from calls */
@@ -1759,9 +1846,7 @@
 			continue;
 		}
 
-		skb_queue_head_init(&pktlist);
-		skb_queue_tail(&pktlist, pkt);
-		brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
+		brcmf_rx_frame(bus->sdiodev->dev, pkt);
 	}
 
 	rxcount = maxframes - rxleft;
@@ -1786,10 +1871,65 @@
 	return;
 }
 
+/**
+ * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
+ * bus layer usage.
+ */
 /* flag marking a dummy skb added for DMA alignment requirement */
-#define DUMMY_SKB_FLAG		0x10000
+#define ALIGN_SKB_FLAG		0x8000
 /* bit mask of data length chopped from the previous packet */
-#define DUMMY_SKB_CHOP_LEN_MASK	0xffff
+#define ALIGN_SKB_CHOP_LEN_MASK	0x7fff
+
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
+				    struct sk_buff_head *pktq,
+				    struct sk_buff *pkt, uint chan)
+{
+	struct sk_buff *pkt_pad;
+	u16 tail_pad, tail_chop, sg_align;
+	unsigned int blksize;
+	u8 *dat_buf;
+	int ntail;
+
+	blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+	sg_align = 4;
+	if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
+		sg_align = sdiodev->pdata->sd_sgentry_align;
+	/* sg entry alignment should be a divisor of block size */
+	WARN_ON(blksize % sg_align);
+
+	/* Check tail padding */
+	pkt_pad = NULL;
+	tail_chop = pkt->len % sg_align;
+	tail_pad = sg_align - tail_chop;
+	tail_pad += blksize - (pkt->len + tail_pad) % blksize;
+	if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
+		pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+		if (pkt_pad == NULL)
+			return -ENOMEM;
+		memcpy(pkt_pad->data,
+		       pkt->data + pkt->len - tail_chop,
+		       tail_chop);
+		*(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+		skb_trim(pkt, pkt->len - tail_chop);
+		__skb_queue_after(pktq, pkt, pkt_pad);
+	} else {
+		ntail = pkt->data_len + tail_pad -
+			(pkt->end - pkt->tail);
+		if (skb_cloned(pkt) || ntail > 0)
+			if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
+				return -ENOMEM;
+		if (skb_linearize(pkt))
+			return -ENOMEM;
+		dat_buf = (u8 *)(pkt->data);
+		__skb_put(pkt, tail_pad);
+	}
+
+	if (pkt_pad)
+		return pkt->len + tail_chop;
+	else
+		return pkt->len - tail_pad;
+}
+
 /**
  * brcmf_sdio_txpkt_prep - packet preparation for transmit
  * @bus: brcmf_sdio structure pointer
@@ -1806,24 +1946,16 @@
 brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
 		      uint chan)
 {
-	u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
-	int ntail;
-	struct sk_buff *pkt_next, *pkt_new;
+	u16 head_pad, head_align;
+	struct sk_buff *pkt_next;
 	u8 *dat_buf;
-	unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+	int err;
 	struct brcmf_sdio_hdrinfo hd_info = {0};
 
 	/* SDIO ADMA requires at least 32 bit alignment */
 	head_align = 4;
-	sg_align = 4;
-	if (bus->sdiodev->pdata) {
-		head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
-			     bus->sdiodev->pdata->sd_head_align : 4;
-		sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
-			   bus->sdiodev->pdata->sd_sgentry_align : 4;
-	}
-	/* sg entry alignment should be a divisor of block size */
-	WARN_ON(blksize % sg_align);
+	if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
+		head_align = bus->sdiodev->pdata->sd_head_align;
 
 	pkt_next = pktq->next;
 	dat_buf = (u8 *)(pkt_next->data);
@@ -1842,40 +1974,20 @@
 		memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
 	}
 
-	/* Check tail padding */
-	pkt_new = NULL;
-	tail_chop = pkt_next->len % sg_align;
-	tail_pad = sg_align - tail_chop;
-	tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
-	if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
-		pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
-		if (pkt_new == NULL)
-			return -ENOMEM;
-		memcpy(pkt_new->data,
-		       pkt_next->data + pkt_next->len - tail_chop,
-		       tail_chop);
-		*(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
-		skb_trim(pkt_next, pkt_next->len - tail_chop);
-		__skb_queue_after(pktq, pkt_next, pkt_new);
+	if (bus->sdiodev->sg_support && pktq->qlen > 1) {
+		err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
+					       pkt_next, chan);
+		if (err < 0)
+			return err;
+		hd_info.len = (u16)err;
 	} else {
-		ntail = pkt_next->data_len + tail_pad -
-			(pkt_next->end - pkt_next->tail);
-		if (skb_cloned(pkt_next) || ntail > 0)
-			if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
-				return -ENOMEM;
-		if (skb_linearize(pkt_next))
-			return -ENOMEM;
-		dat_buf = (u8 *)(pkt_next->data);
-		__skb_put(pkt_next, tail_pad);
+		hd_info.len = pkt_next->len;
 	}
 
-	/* Now prep the header */
-	if (pkt_new)
-		hd_info.len = pkt_next->len + tail_chop;
-	else
-		hd_info.len = pkt_next->len - tail_pad;
 	hd_info.channel = chan;
 	hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+
+	/* Now fill the header */
 	brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
 
 	if (BRCMF_BYTES_ON() &&
@@ -1908,8 +2020,8 @@
 
 	skb_queue_walk_safe(pktq, pkt_next, tmp) {
 		dummy_flags = *(u32 *)(pkt_next->cb);
-		if (dummy_flags & DUMMY_SKB_FLAG) {
-			chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+		if (dummy_flags & ALIGN_SKB_FLAG) {
+			chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
 			if (chop_len) {
 				pkt_prev = pkt_next->prev;
 				memcpy(pkt_prev->data + pkt_prev->len,
@@ -3037,69 +3149,43 @@
 	return true;
 }
 
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
-{
-	if (bus->firmware->size < bus->fw_ptr + len)
-		len = bus->firmware->size - bus->fw_ptr;
-
-	memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
-	bus->fw_ptr += len;
-	return len;
-}
-
 static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
 {
+	const struct firmware *fw;
+	int err;
 	int offset;
-	uint len;
-	u8 *memblock = NULL, *memptr;
-	int ret;
-	u8 idx;
+	int address;
+	int len;
 
-	brcmf_dbg(INFO, "Enter\n");
+	fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
+	if (fw == NULL)
+		return -ENOENT;
 
-	ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
-			       &bus->sdiodev->func[2]->dev);
-	if (ret) {
-		brcmf_err("Fail to request firmware %d\n", ret);
-		return ret;
-	}
-	bus->fw_ptr = 0;
+	if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
+	    BRCMF_MAX_CORENUM)
+		memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
 
-	memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
-	if (memblock == NULL) {
-		ret = -ENOMEM;
-		goto err;
-	}
-	if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
-		memptr += (BRCMF_SDALIGN -
-			   ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
-
-	offset = bus->ci->rambase;
-
-	/* Download image */
-	len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
-	idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
-	if (BRCMF_MAX_CORENUM != idx)
-		memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
-	while (len) {
-		ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
-		if (ret) {
+	err = 0;
+	offset = 0;
+	address = bus->ci->rambase;
+	while (offset < fw->size) {
+		len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
+		      fw->size - offset;
+		err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
+				       (u8 *)&fw->data[offset], len);
+		if (err) {
 			brcmf_err("error %d on writing %d membytes at 0x%08x\n",
-				  ret, MEMBLOCK, offset);
-			goto err;
+				  err, len, address);
+			goto failure;
 		}
-
-		offset += MEMBLOCK;
-		len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
+		offset += len;
+		address += len;
 	}
 
-err:
-	kfree(memblock);
+failure:
+	release_firmware(fw);
 
-	release_firmware(bus->firmware);
-	bus->fw_ptr = 0;
-
-	return ret;
+	return err;
 }
 
 /*
@@ -3111,7 +3197,8 @@
  * by two NULs.
 */
 
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
+static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
+				    const struct firmware *nv)
 {
 	char *varbuf;
 	char *dp;
@@ -3120,12 +3207,12 @@
 	int ret = 0;
 	uint buf_len, n, len;
 
-	len = bus->firmware->size;
+	len = nv->size;
 	varbuf = vmalloc(len);
 	if (!varbuf)
 		return -ENOMEM;
 
-	memcpy(varbuf, bus->firmware->data, len);
+	memcpy(varbuf, nv->data, len);
 	dp = varbuf;
 
 	findNewline = false;
@@ -3177,18 +3264,16 @@
 
 static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
 {
+	const struct firmware *nv;
 	int ret;
 
-	ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
-			       &bus->sdiodev->func[2]->dev);
-	if (ret) {
-		brcmf_err("Fail to request nvram %d\n", ret);
-		return ret;
-	}
+	nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+	if (nv == NULL)
+		return -ENOENT;
 
-	ret = brcmf_process_nvram_vars(bus);
+	ret = brcmf_process_nvram_vars(bus, nv);
 
-	release_firmware(bus->firmware);
+	release_firmware(nv);
 
 	return ret;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index e679214..14bc24d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -102,7 +102,8 @@
 	BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
 	BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
 	BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
-	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+	BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
 
 #define BRCMF_ENUM_DEF(id, val) \
 	BRCMF_E_##id = (val),
@@ -114,6 +115,8 @@
 };
 #undef BRCMF_ENUM_DEF
 
+#define BRCMF_EVENTING_MASK_LEN		DIV_ROUND_UP(BRCMF_E_LAST, 8)
+
 /* flags field values in struct brcmf_event_msg */
 #define BRCMF_EVENT_MSG_LINK		0x01
 #define BRCMF_EVENT_MSG_FLUSHTXQ	0x02
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 82f9140..d0cd0bf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -168,6 +168,7 @@
 /**
  * struct brcmf_skbuff_cb - control buffer associated with skbuff.
  *
+ * @bus_flags: 2 bytes reserved for bus specific parameters
  * @if_flags: holds interface index and packet related flags.
  * @htod: host to device packet identifier (used in PKTTAG tlv).
  * @state: transmit state of the packet.
@@ -177,6 +178,7 @@
  * provides 48 bytes of storage so this structure should not exceed that.
  */
 struct brcmf_skbuff_cb {
+	u16 bus_flags;
 	u16 if_flags;
 	u32 htod;
 	enum brcmf_fws_skb_state state;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index d7a9745..5b5b952 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -1148,7 +1148,7 @@
 
 	pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
 
-	INIT_COMPLETION(afx_hdl->act_frm_scan);
+	reinit_completion(&afx_hdl->act_frm_scan);
 	set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
 	afx_hdl->is_active = true;
 	afx_hdl->peer_chan = P2P_INVALID_CHANNEL;
@@ -1501,7 +1501,7 @@
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	INIT_COMPLETION(p2p->send_af_done);
+	reinit_completion(&p2p->send_af_done);
 	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
 	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index ca72177..2096a14 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -18,6 +18,7 @@
 #include <linux/types.h>
 #include <linux/netdevice.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
 #include <linux/ssb/ssb_regs.h>
 #include <linux/bcma/bcma.h>
 
@@ -136,6 +137,8 @@
 	u8 idx;
 
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+	if (idx == BRCMF_MAX_CORENUM)
+		return false;
 
 	regdata = brcmf_sdio_regrl(sdiodev,
 				   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
@@ -154,6 +157,8 @@
 	bool ret;
 
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+	if (idx == BRCMF_MAX_CORENUM)
+		return false;
 
 	regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
 				   NULL);
@@ -261,6 +266,8 @@
 	u32 regdata;
 
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+	if (idx == BRCMF_MAX_CORENUM)
+		return;
 
 	/* if core is already in reset, just return */
 	regdata = brcmf_sdio_regrl(sdiodev,
@@ -304,6 +311,8 @@
 	u8 idx;
 
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+	if (idx == BRCMF_MAX_CORENUM)
+		return;
 
 	/*
 	 * Must do the disable sequence first to work for
@@ -368,6 +377,8 @@
 	u32 regdata;
 
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+	if (idx == BRCMF_MAX_CORENUM)
+		return;
 
 	/* must disable first to work for arbitrary current core state */
 	brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
@@ -444,6 +455,9 @@
 				   NULL);
 	ci->chip = regdata & CID_ID_MASK;
 	ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+	if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+	    ci->chiprev >= 2)
+		ci->chip = BCM4339_CHIP_ID;
 	ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
 
 	brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@@ -541,6 +555,20 @@
 		ci->ramsize = 0xc0000;
 		ci->rambase = 0x180000;
 		break;
+	case BCM4339_CHIP_ID:
+		ci->c_inf[0].wrapbase = 0x18100000;
+		ci->c_inf[0].cib = 0x2e084411;
+		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+		ci->c_inf[1].base = 0x18005000;
+		ci->c_inf[1].wrapbase = 0x18105000;
+		ci->c_inf[1].cib = 0x15004211;
+		ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+		ci->c_inf[2].base = 0x18002000;
+		ci->c_inf[2].wrapbase = 0x18102000;
+		ci->c_inf[2].cib = 0x04084411;
+		ci->ramsize = 0xc0000;
+		ci->rambase = 0x180000;
+		break;
 	default:
 		brcmf_err("chipid 0x%x is not supported\n", ci->chip);
 		return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 83c041f..507c61c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,6 +54,14 @@
 
 #define BRCMF_MAX_CORENUM	6
 
+/* SDIO device ID */
+#define SDIO_DEVICE_ID_BROADCOM_43143		43143
+#define SDIO_DEVICE_ID_BROADCOM_43241		0x4324
+#define SDIO_DEVICE_ID_BROADCOM_4329		0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330		0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334		0x4334
+#define SDIO_DEVICE_ID_BROADCOM_4335_4339	0x4335
+
 struct chip_core_info {
 	u16 id;
 	u16 rev;
@@ -215,17 +223,16 @@
 	u16 PAD[0x80];
 };
 
-extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
-				  struct chip_info **ci_ptr, u32 regs);
-extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
-extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
-					      struct chip_info *ci,
-					      u32 drivestrength);
-extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
-extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
-					   struct chip_info *ci);
-extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
-					  struct chip_info *ci, char *nvram_dat,
-					  uint nvram_sz);
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+			   struct chip_info **ci_ptr, u32 regs);
+void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+				       struct chip_info *ci, u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+				    struct chip_info *ci);
+bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+				   struct chip_info *ci, char *nvram_dat,
+				   uint nvram_sz);
 
 #endif		/* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 2b5407f..fc0d4f0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -178,21 +178,25 @@
 	bool irq_en;			/* irq enable flags */
 	spinlock_t irq_en_lock;
 	bool irq_wake;			/* irq wake enable flags */
+	bool sg_support;
+	uint max_request_size;
+	ushort max_segment_count;
+	uint max_segment_size;
 };
 
 /* Register/deregister interrupt handler. */
-extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
 /* sdio device register access interface */
-extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
-			     u8 data, int *ret);
-extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
-			     u32 data, int *ret);
-extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-				   void *data, bool write);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+		      int *ret);
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+		      int *ret);
+int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+			    void *data, bool write);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
@@ -206,22 +210,17 @@
  * Returns 0 or error code.
  * NOTE: Async operation is not currently supported.
  */
-extern int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, struct sk_buff_head *pktq);
-extern int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, u8 *buf, uint nbytes);
+int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+			  uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+			  uint flags, u8 *buf, uint nbytes);
 
-extern int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, struct sk_buff *pkt);
-extern int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, u8 *buf, uint nbytes);
-extern int
-brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-			uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+			  uint flags, struct sk_buff *pkt);
+int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+			  uint flags, u8 *buf, uint nbytes);
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+			    uint flags, struct sk_buff_head *pktq, uint totlen);
 
 /* Flags bits */
 
@@ -237,46 +236,43 @@
  *   nbytes:   number of bytes to transfer to/from buf
  * Returns 0 or error code.
  */
-extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
-			       u32 addr, u8 *buf, uint nbytes);
-extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write,
-			    u32 address, u8 *data, uint size);
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+			u8 *buf, uint nbytes);
+int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+		     u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
 
 /* platform specific/high level functions */
-extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
 
 /* attach, return handler on success, NULL if failed.
  *  The handler shall be provided by all subsequent calls. No local cache
  *  cfghdl points to the starting address of pci device mapped memory
  */
-extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
 
 /* read or write one byte using cmd52 */
-extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
-				    uint fnc, uint addr, u8 *byte);
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+			     uint addr, u8 *byte);
 
 /* read or write 2/4 bytes using cmd53 */
-extern int
-brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
-			 uint rw, uint fnc, uint addr,
-			 u32 *word, uint nbyte);
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+			     uint addr, u32 *word, uint nbyte);
 
 /* Watchdog timer interface for pm ops */
-extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
-				    bool enable);
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
 
-extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdbrcm_disconnect(void *ptr);
-extern void brcmf_sdbrcm_isr(void *arg);
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdbrcm_disconnect(void *ptr);
+void brcmf_sdbrcm_isr(void *arg);
 
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 
-extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
-				 wait_queue_head_t *wq);
-extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
+void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+			  wait_queue_head_t *wq);
+bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
 #endif				/* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index bc29171..3c67529 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -78,13 +78,15 @@
 	TP_ARGS(data, len),
 	TP_STRUCT__entry(
 		__field(unsigned long, len)
+		__field(unsigned long, addr)
 		__dynamic_array(u8, hdata, len)
 	),
 	TP_fast_assign(
 		__entry->len = len;
+		__entry->addr = (unsigned long)data;
 		memcpy(__get_dynamic_array(hdata), data, len);
 	),
-	TP_printk("hexdump [length=%lu]", __entry->len)
+	TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
 );
 
 TRACE_EVENT(brcmf_bdchdr,
@@ -108,6 +110,23 @@
 	TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
 );
 
+TRACE_EVENT(brcmf_sdpcm_hdr,
+	TP_PROTO(bool tx, void *data),
+	TP_ARGS(tx, data),
+	TP_STRUCT__entry(
+		__field(u8, tx)
+		__field(u16, len)
+		__array(u8, hdr, 12)
+	),
+	TP_fast_assign(
+		memcpy(__entry->hdr, data, 12);
+		__entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
+		__entry->tx = tx ? 1 : 0;
+	),
+	TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
+		  __entry->len, __entry->hdr[4])
+);
+
 #ifdef CONFIG_BRCM_TRACING
 
 #undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index f4aea47..422f44c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -435,7 +435,6 @@
 	struct brcmf_usbreq  *req = (struct brcmf_usbreq *)urb->context;
 	struct brcmf_usbdev_info *devinfo = req->devinfo;
 	struct sk_buff *skb;
-	struct sk_buff_head skbq;
 
 	brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
 	brcmf_usb_del_fromq(devinfo, req);
@@ -450,10 +449,8 @@
 	}
 
 	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
-		skb_queue_head_init(&skbq);
-		skb_queue_tail(&skbq, skb);
 		skb_put(skb, urb->actual_length);
-		brcmf_rx_frames(devinfo->dev, &skbq);
+		brcmf_rx_frame(devinfo->dev, skb);
 		brcmf_usb_rx_refill(devinfo, req);
 	} else {
 		brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index a8a267b..2d08c15 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -172,19 +172,19 @@
 
 
 /* AMBA Interconnect exported externs */
-extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
+u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
 
 /* === exported functions === */
-extern struct si_pub *ai_attach(struct bcma_bus *pbus);
-extern void ai_detach(struct si_pub *sih);
-extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_clkctl_init(struct si_pub *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
-extern bool ai_deviceremoved(struct si_pub *sih);
+struct si_pub *ai_attach(struct bcma_bus *pbus);
+void ai_detach(struct si_pub *sih);
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
+void ai_clkctl_init(struct si_pub *sih);
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
+bool ai_deviceremoved(struct si_pub *sih);
 
 /* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(struct si_pub *sih);
+void ai_epa_4313war(struct si_pub *sih);
 
 static inline u32 ai_get_cccaps(struct si_pub *sih)
 {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 73d01e5..03bdcf2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
@@ -37,17 +37,17 @@
 	u16 dma_len;
 };
 
-extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
-					struct brcms_c_info *wlc);
-extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
-				   struct sk_buff *p);
-extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+				 struct brcms_c_info *wlc);
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+			    struct sk_buff *p);
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
 
-extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
-				 struct sk_buff *p, struct tx_status *txs);
-extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
+struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
+void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
+void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+			      struct sk_buff *p, struct tx_status *txs);
+void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
+void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
 
 #endif				/* _BRCM_AMPDU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
index 97ea388..a3d487a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
@@ -17,13 +17,11 @@
 #ifndef _BRCM_ANTSEL_H_
 #define _BRCM_ANTSEL_H_
 
-extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
-extern void brcms_c_antsel_detach(struct antsel_info *asi);
-extern void brcms_c_antsel_init(struct antsel_info *asi);
-extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
-				  bool sel,
-				  u8 id, u8 fbid, u8 *antcfg,
-				  u8 *fbantcfg);
-extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
+struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
+void brcms_c_antsel_detach(struct antsel_info *asi);
+void brcms_c_antsel_init(struct antsel_info *asi);
+void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
+			       u8 id, u8 fbid, u8 *antcfg, u8 *fbantcfg);
+u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
 
 #endif /* _BRCM_ANTSEL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 006483a..39dd3a5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -32,20 +32,16 @@
 
 #define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
 
-extern struct brcms_cm_info *
-brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
 
-extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
 
-extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
-				      u16 chspec);
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec);
 
-extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
-				   u16 chanspec,
-				   struct txpwr_limits *txpwr);
-extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
-				     u16 chanspec,
-				     u8 local_constraint_qdbm);
-extern void brcms_c_regd_init(struct brcms_c_info *wlc);
+void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
+				struct txpwr_limits *txpwr);
+void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
+				  u8 local_constraint_qdbm);
+void brcms_c_regd_init(struct brcms_c_info *wlc);
 
 #endif				/* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 4090032..198053d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -88,26 +88,26 @@
 };
 
 /* misc callbacks */
-extern void brcms_init(struct brcms_info *wl);
-extern uint brcms_reset(struct brcms_info *wl);
-extern void brcms_intrson(struct brcms_info *wl);
-extern u32 brcms_intrsoff(struct brcms_info *wl);
-extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
-extern int brcms_up(struct brcms_info *wl);
-extern void brcms_down(struct brcms_info *wl);
-extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
-				bool state, int prio);
-extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
+void brcms_init(struct brcms_info *wl);
+uint brcms_reset(struct brcms_info *wl);
+void brcms_intrson(struct brcms_info *wl);
+u32 brcms_intrsoff(struct brcms_info *wl);
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+int brcms_up(struct brcms_info *wl);
+void brcms_down(struct brcms_info *wl);
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+			 bool state, int prio);
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
 
 /* timer functions */
-extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
-				      void (*fn) (void *arg), void *arg,
-				      const char *name);
-extern void brcms_free_timer(struct brcms_timer *timer);
-extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
-extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_dpc(unsigned long data);
-extern void brcms_timer(struct brcms_timer *t);
-extern void brcms_fatal_error(struct brcms_info *wl);
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+				     void (*fn) (void *arg), void *arg,
+				     const char *name);
+void brcms_free_timer(struct brcms_timer *timer);
+void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
+bool brcms_del_timer(struct brcms_timer *timer);
+void brcms_dpc(unsigned long data);
+void brcms_timer(struct brcms_timer *t);
+void brcms_fatal_error(struct brcms_info *wl);
 
 #endif				/* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 4608e0e..8138f1c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1906,14 +1906,14 @@
 
 	/* If macaddr exists, use it (Sromrev4, CIS, ...). */
 	if (!is_zero_ether_addr(sprom->il0mac)) {
-		memcpy(etheraddr, sprom->il0mac, 6);
+		memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
 		return;
 	}
 
 	if (wlc_hw->_nbands > 1)
-		memcpy(etheraddr, sprom->et1mac, 6);
+		memcpy(etheraddr, sprom->et1mac, ETH_ALEN);
 	else
-		memcpy(etheraddr, sprom->il0mac, 6);
+		memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
 }
 
 /* power both the pll and external oscillator on/off */
@@ -5695,7 +5695,7 @@
 		return true;
 	if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
 		return true;
-	if (device == BCM4313_D11N2G_ID)
+	if (device == BCM4313_D11N2G_ID || device == BCM4313_CHIP_ID)
 		return true;
 	if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
 		return true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index b5d7a38..c4d135c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -616,66 +616,54 @@
 	struct brcms_bss_info *current_bss;
 };
 
-extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
-			   struct sk_buff *p);
-extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
-		   uint *blocks);
+int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+			   uint *blocks);
 
-extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
-				uint mac_len);
-extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
-					     u32 rspec,
-					     bool use_rspec, u16 mimo_ctlchbw);
-extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
-				      u32 rts_rate,
-				      u32 frame_rate,
-				      u8 rts_preamble_type,
-				      u8 frame_preamble_type, uint frame_len,
-				      bool ba);
-extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
-			       struct ieee80211_sta *sta,
-			       void (*dma_callback_fn));
-extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
-extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
-extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
-					  u32 bcn_rate);
-extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
-				     u8 antsel_type);
-extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
-				  u16 chanspec,
-				  bool mute, struct txpwr_limits *txpwr);
-extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
-			      u16 v);
-extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
-extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
-			u16 val, int bands);
-extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
-extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
-extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
-extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
-					u32 override_bit);
-extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
-					  u32 override_bit);
-extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
-				       int offset, int len, void *buf);
-extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
-extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
-				   uint offset, const void *buf, int len,
-				   u32 sel);
-extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
-				     void *buf, int len, u32 sel);
-extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
-extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
-extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
-extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
-extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
-				    u8 stf_mode);
-extern void brcms_c_init_scb(struct scb *scb);
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
+u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len);
+u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
+			       bool use_rspec, u16 mimo_ctlchbw);
+u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+			       u32 rts_rate, u32 frame_rate,
+			       u8 rts_preamble_type, u8 frame_preamble_type,
+			       uint frame_len, bool ba);
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+			    struct ieee80211_sta *sta, void (*dma_callback_fn));
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+int brcms_c_set_nmode(struct brcms_c_info *wlc);
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rate);
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type);
+void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
+			  bool mute, struct txpwr_limits *txpwr);
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v);
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
+		 int bands);
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+				     u32 override_bit);
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+				       u32 override_bit);
+void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset,
+				int len, void *buf);
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+			   const void *buf, int len, u32 sel);
+void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+			     void *buf, int len, u32 sel);
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode);
+void brcms_c_init_scb(struct scb *scb);
 
 #endif				/* _BRCM_MAIN_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index e34a71e..4d3734f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -179,121 +179,106 @@
 };
 
 
-extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
-extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
-					    struct bcma_device *d11core,
-					    int bandtype, struct wiphy *wiphy);
-extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
+struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
+struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
+				     struct bcma_device *d11core, int bandtype,
+				     struct wiphy *wiphy);
+void wlc_phy_detach(struct brcms_phy_pub *ppi);
 
-extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
-				   u16 *phyrev, u16 *radioid,
-				   u16 *radiover);
-extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
-extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
+bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
+			    u16 *phyrev, u16 *radioid, u16 *radiover);
+bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
+u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
 
-extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
-extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
-extern int wlc_phy_down(struct brcms_phy_pub *ppi);
-extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
-extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
-extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
+void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
+void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
+int wlc_phy_down(struct brcms_phy_pub *ppi);
+u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
+void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
+void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
 
-extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
-				 u16 chanspec);
-extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
-				       u16 newch);
-extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
+u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
+u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
+void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
 
-extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
-				struct d11rxhdr *rxh);
-extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
-extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
+int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
+void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
+bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
 
-extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
+void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
 
-extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
-extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
+void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
+void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
 
 
-extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
+void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
 
-extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
-						 bool wide_filter);
-extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
-					  struct brcms_chanvec *channels);
-extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
-					 uint band);
+void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+					  bool wide_filter);
+void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+				   struct brcms_chanvec *channels);
+u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
 
-extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
-				      u8 *_min_, u8 *_max_, int rate);
-extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi,
-					      uint chan, u8 *_max_, u8 *_min_);
-extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
-					    uint band, s32 *, s32 *, u32 *);
-extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
-				      struct txpwr_limits *,
-				      u16 chanspec);
-extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
-			       bool *override);
-extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
-			       bool override);
-extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
-				       struct txpwr_limits *);
-extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi,
-					bool hwpwrctrl);
-extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
-extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
+void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
+			       u8 *_max_, int rate);
+void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
+				       u8 *_max_, u8 *_min_);
+void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
+				     s32 *, s32 *, u32 *);
+void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
+			       u16 chanspec);
+int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
+int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
+void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+				struct txpwr_limits *);
+bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
+void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
+u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
+u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
+bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
 
-extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain,
-				   u8 rxchain);
-extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain,
-				  u8 rxchain);
-extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
-				  u8 *rxchain);
-extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
-extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
-				 u16 chanspec);
-extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
+void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
+u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
+void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
 
-extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
-extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
-extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
-extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
+void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
+void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
+void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
+void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
 
-extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
-extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
-extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
-extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
+void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
+void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
+void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
+void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
 
-extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
+void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
 
-extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
-					struct tx_power *power, uint channel);
+void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
+				 struct tx_power *power, uint channel);
 
-extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
-extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi,
-				      u8 txpwr_percent);
-extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
-extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
-				      bool bf_preempt);
-extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
+void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
+bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
+void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
+void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
+void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
+void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
 
-extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
+void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
 
-extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
-extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
+void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
+void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
 
-extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
+const u8 *wlc_phy_get_ofdm_rate_lookup(void);
 
-extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
-					     u8 mcs_offset);
-extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
+s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
+				      u8 mcs_offset);
+s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
 #endif                          /* _BRCM_PHY_HAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index 1dc767c..4960f7d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -910,113 +910,103 @@
 	u8 do_init_g;
 };
 
-extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
-extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
+void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
 
-extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
-extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask,
-			  u16 val);
-extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
+u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
+void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
 
-extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
 
-extern void wlc_phyreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_phyreg_exit(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_exit(struct brcms_phy_pub *pih);
+void wlc_phyreg_enter(struct brcms_phy_pub *pih);
+void wlc_phyreg_exit(struct brcms_phy_pub *pih);
+void wlc_radioreg_enter(struct brcms_phy_pub *pih);
+void wlc_radioreg_exit(struct brcms_phy_pub *pih);
 
-extern void wlc_phy_read_table(struct brcms_phy *pi,
-			       const struct phytbl_info *ptbl_info,
-			       u16 tblAddr, u16 tblDataHi,
-			       u16 tblDatalo);
-extern void wlc_phy_write_table(struct brcms_phy *pi,
-				const struct phytbl_info *ptbl_info,
-				u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
-extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id,
-			       uint tbl_offset, u16 tblAddr, u16 tblDataHi,
-			       u16 tblDataLo);
-extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
+void wlc_phy_read_table(struct brcms_phy *pi,
+			const struct phytbl_info *ptbl_info,
+			u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_write_table(struct brcms_phy *pi,
+			 const struct phytbl_info *ptbl_info,
+			 u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
+			u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
+void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
 
-extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
-extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
+void write_phy_channel_reg(struct brcms_phy *pi, uint val);
+void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
 
-extern u8 wlc_phy_nbits(s32 value);
-extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
+u8 wlc_phy_nbits(s32 value);
+void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
 
-extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
-					     struct radio_20xx_regs *radioregs);
-extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
-				    const struct radio_regs *radioregs,
-				    u16 core_offset);
+uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+				      struct radio_20xx_regs *radioregs);
+uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
+			     const struct radio_regs *radioregs,
+			     u16 core_offset);
 
-extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
+void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
 
-extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
-extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real,
-					s32 *eps_imag);
+void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
+void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
 
-extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
-extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
+void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
+void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
 
-extern bool wlc_phy_attach_nphy(struct brcms_phy *pi);
-extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
+bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
 
-extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
+void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
 
-extern void wlc_phy_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_init_lcnphy(struct brcms_phy *pi);
+void wlc_phy_init_nphy(struct brcms_phy *pi);
+void wlc_phy_init_lcnphy(struct brcms_phy *pi);
 
-extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
 
-extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
-				      u16 chanspec);
-extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
-					u16 chanspec);
-extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
-					      u16 chanspec);
-extern int wlc_phy_channel2freq(uint channel);
-extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, u16 chanspec);
+int wlc_phy_channel2freq(uint channel);
+int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
 
-extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
-extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
+void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
 
-extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
+void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
 
-extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
-extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
-extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
-extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz,
-				     u16 max_val, bool iqcalmode);
+void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
+void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
+void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
+void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
+			      bool iqcalmode);
 
-extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
-					       u8 *max_pwr, u8 rate_id);
-extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
-					    u8 rate_mcs_end,
-					    u8 rate_ofdm_start);
-extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power,
-					    u8 rate_ofdm_start,
-					    u8 rate_ofdm_end,
-					    u8 rate_mcs_start);
+void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
+					u8 *max_pwr, u8 rate_id);
+void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
+				     u8 rate_mcs_end, u8 rate_ofdm_start);
+void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
+				     u8 rate_ofdm_end, u8 rate_mcs_start);
 
-extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
-extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
-extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
-extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
-extern void wlc_2064_vco_cal(struct brcms_phy *pi);
+u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
+s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
+void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
+void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
+void wlc_2064_vco_cal(struct brcms_phy *pi);
 
-extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
 
 #define LCNPHY_TBL_ID_PAPDCOMPDELTATBL	0x18
 #define LCNPHY_TX_POWER_TABLE_SIZE	128
@@ -1030,26 +1020,24 @@
 
 #define LCNPHY_TX_PWR_CTRL_TEMPBASED	0xE001
 
-extern void wlc_lcnphy_write_table(struct brcms_phy *pi,
-				   const struct phytbl_info *pti);
-extern void wlc_lcnphy_read_table(struct brcms_phy *pi,
-				  struct phytbl_info *pti);
-extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
-extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
-extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
-extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
-extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0,
-				      u8 *eq0, u8 *fi0, u8 *fq0);
-extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
-extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
-extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
-extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
-extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr,
-				s8 *cck_pwr);
-extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
+void wlc_lcnphy_write_table(struct brcms_phy *pi,
+			    const struct phytbl_info *pti);
+void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti);
+void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
+void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
+void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
+u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
+void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, u8 *eq0, u8 *fi0,
+			       u8 *fq0);
+void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
+void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
+bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
+s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
+void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr);
+void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
 
-extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
+s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
 
 #define NPHY_MAX_HPVGA1_INDEX		10
 #define NPHY_DEF_HPVGA1_INDEXLIMIT	7
@@ -1060,9 +1048,8 @@
 	u32 q_pwr;
 };
 
-extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
-					       bool enable);
-extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
+void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
+void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
 
 #define wlc_phy_write_table_nphy(pi, pti) \
 	wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1076,10 +1063,10 @@
 #define wlc_nphy_table_data_write(pi, w, v) \
 	wlc_phy_table_data_write((pi), (w), (v))
 
-extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
-				    u32 w, void *d);
-extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
-				     u32, const void *);
+void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w,
+			     void *d);
+void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, u32,
+			      const void *);
 
 #define	PHY_IPA(pi) \
 	((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
@@ -1089,73 +1076,67 @@
 	if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
 		(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
 
-extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
-extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
-extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
+void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
+void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
 
-extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
-extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
+u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
 
-extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
 
-extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
-extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
+void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
+s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
 
-extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
+u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
 
-extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
-				   u16 num_samps, u8 wait_time,
-				   u8 wait_for_crs);
+void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
+			    u16 num_samps, u8 wait_time, u8 wait_for_crs);
 
-extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
-				      struct nphy_iq_comp *comp);
-extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
+void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+			       struct nphy_iq_comp *comp);
+void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
 
-extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih,
-					 u8 rxcore_bitmask);
-extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask);
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
 
-extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
-extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
-extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
+void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
+u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
 
-extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
-extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
-				   struct nphy_txgains target_gain,
-				   bool full, bool m);
-extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi,
-				 struct nphy_txgains target_gain,
-				 u8 type, bool d);
-extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
-				     s8 txpwrindex, bool res);
-extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
-extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
-				  s32 *rssi_buf, u8 nsamps);
-extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
-extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi,
-					s32 dBm_targetpower, bool debug);
-extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
-				u8 mode, u8, bool);
-extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
-extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
-				     u8 num_samps);
-extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
+struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
+int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
+			    struct nphy_txgains target_gain, bool full, bool m);
+int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+			  u8 type, bool d);
+void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
+			      s8 txpwrindex, bool res);
+void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
+int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
+			   s32 *rssi_buf, u8 nsamps);
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
+int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+				 bool debug);
+int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, u8 mode,
+			 u8, bool);
+void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
+void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
+			      u8 num_samps);
+void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
 
-extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
-				     struct d11rxhdr *rxh);
+int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
 
 #define NPHY_TESTPATTERN_BPHY_EVM   0
 #define NPHY_TESTPATTERN_BPHY_RFCS  1
 
-extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
+void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
 
 void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
 				s8 *ofdmoffset);
-extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
-				  u16 chanspec);
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
 
-extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
 #endif				/* _BRCM_PHY_INT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
index 2c5b66b..dd87747 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
@@ -124,56 +124,49 @@
 
 struct brcms_phy;
 
-extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
-						 struct brcms_info *wl,
-						 struct brcms_c_info *wlc);
-extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+					  struct brcms_info *wl,
+					  struct brcms_c_info *wlc);
+void wlc_phy_shim_detach(struct phy_shim_info *physhim);
 
 /* PHY to WL utility functions */
-extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
-					    void (*fn) (struct brcms_phy *pi),
-					    void *arg, const char *name);
-extern void wlapi_free_timer(struct wlapi_timer *t);
-extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(struct wlapi_timer *t);
-extern void wlapi_intrson(struct phy_shim_info *physhim);
-extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
-extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
-			       u32 macintmask);
+struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+				     void (*fn)(struct brcms_phy *pi),
+				     void *arg, const char *name);
+void wlapi_free_timer(struct wlapi_timer *t);
+void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+bool wlapi_del_timer(struct wlapi_timer *t);
+void wlapi_intrson(struct phy_shim_info *physhim);
+u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask);
 
-extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
-				 u16 v);
-extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
-extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
-			   u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
-extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
-extern void wlapi_enable_mac(struct phy_shim_info *physhim);
-extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
-			     u32 val);
-extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
-						      physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
-							physhim);
-extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
-					  int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
-					 u8 rate);
-extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
-extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
-				  void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
-				const void *buf, int, u32);
+void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v);
+u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val,
+		    int bands);
+void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+void wlapi_enable_mac(struct phy_shim_info *physhim);
+void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val);
+void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim);
+void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+				   int len, void *buf);
+u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate);
+void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, void *buf,
+			   int, u32 sel);
+void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint, const void *buf,
+			 int, u32);
 
-extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
-				       u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+void wlapi_high_update_phy_mode(struct phy_shim_info *physhim, u32 phy_mode);
+u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
 
 #endif				/* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 20e2012..a014bbc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -20,7 +20,7 @@
 
 #include "types.h"
 
-extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
-extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
+u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
+u32 si_pmu_measure_alpclk(struct si_pub *sih);
 
 #endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index d36ea5e..4da38cb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -266,83 +266,76 @@
 };
 
 /* common functions for every port */
-extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
-	       bool piomode, uint *perr);
-extern uint brcms_c_detach(struct brcms_c_info *wlc);
-extern int brcms_c_up(struct brcms_c_info *wlc);
-extern uint brcms_c_down(struct brcms_c_info *wlc);
+struct brcms_c_info *brcms_c_attach(struct brcms_info *wl,
+				    struct bcma_device *core, uint unit,
+				    bool piomode, uint *perr);
+uint brcms_c_detach(struct brcms_c_info *wlc);
+int brcms_c_up(struct brcms_c_info *wlc);
+uint brcms_c_down(struct brcms_c_info *wlc);
 
-extern bool brcms_c_chipmatch(struct bcma_device *core);
-extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
-extern void brcms_c_reset(struct brcms_c_info *wlc);
+bool brcms_c_chipmatch(struct bcma_device *core);
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
+void brcms_c_reset(struct brcms_c_info *wlc);
 
-extern void brcms_c_intrson(struct brcms_c_info *wlc);
-extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
-extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc);
-extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
-				     struct sk_buff *sdu,
-				     struct ieee80211_hw *hw);
-extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
-extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
-				   int val);
-extern int brcms_c_get_header_len(void);
-extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
-				  int match_reg_offset,
-				  const u8 *addr);
-extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
-			      const struct ieee80211_tx_queue_params *arg,
-			      bool suspend);
-extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
-			    struct ieee80211_sta *sta, u16 tid);
-extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
-					 u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern int brcms_c_module_register(struct brcms_pub *pub,
-				   const char *name, struct brcms_info *hdl,
-				   int (*down_fn)(void *handle));
-extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
-				     struct brcms_info *hdl);
-extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
-extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
-extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
-extern void brcms_c_scan_start(struct brcms_c_info *wlc);
-extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
-extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
-extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
-extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+void brcms_c_intrson(struct brcms_c_info *wlc);
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+bool brcms_c_isr(struct brcms_c_info *wlc);
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+			      struct ieee80211_hw *hw);
+bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val);
+int brcms_c_get_header_len(void);
+void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
+			   const u8 *addr);
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+			   const struct ieee80211_tx_queue_params *arg,
+			   bool suspend);
+struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
+void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta,
+			 u16 tid);
+void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+				  u8 ba_wsize, uint max_rx_ampdu_bytes);
+int brcms_c_module_register(struct brcms_pub *pub, const char *name,
+			    struct brcms_info *hdl,
+			    int (*down_fn)(void *handle));
+int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+			      struct brcms_info *hdl);
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+void brcms_c_enable_mac(struct brcms_c_info *wlc);
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+void brcms_c_scan_start(struct brcms_c_info *wlc);
+void brcms_c_scan_stop(struct brcms_c_info *wlc);
+int brcms_c_get_curband(struct brcms_c_info *wlc);
+int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
+int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
+void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
 				 struct brcm_rateset *currs);
-extern int brcms_c_set_rateset(struct brcms_c_info *wlc,
-					struct brcm_rateset *rs);
-extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
-extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
-extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
+int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs);
+int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
+u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
+void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
 				    s8 sslot_override);
-extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
-					u8 interval);
-extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
-extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
-extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
-extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
-extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
-extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
-extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr,
-			     const u8 *bssid, u8 *ssid, size_t ssid_len);
-extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
-extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc,
-				   struct sk_buff *beacon, u16 tim_offset,
-				   u16 dtim_period);
-extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
-				       struct sk_buff *probe_resp);
-extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
-extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
-			     size_t ssid_len);
+void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval);
+u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
+void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
+int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
+int brcms_c_get_tx_power(struct brcms_c_info *wlc);
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
+void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
+		      u8 *ssid, size_t ssid_len);
+void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_update_beacon(struct brcms_c_info *wlc);
+void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
+			    u16 tim_offset, u16 dtim_period);
+void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
+				struct sk_buff *probe_resp);
+void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
+void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len);
 
 #endif				/* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index 980d578..5bb88b7 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -216,34 +216,30 @@
 
 /* sanitize, and sort a rateset with the basic bit(s) preserved, validate
  * rateset */
-extern bool
-brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
-				       const struct brcms_c_rateset *hw_rs,
-				       bool check_brate, u8 txstreams);
+bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+					    const struct brcms_c_rateset *hw_rs,
+					    bool check_brate, u8 txstreams);
 /* copy rateset src to dst as-is (no masking or sorting) */
-extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
-			     struct brcms_c_rateset *dst);
+void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+			  struct brcms_c_rateset *dst);
 
 /* would be nice to have these documented ... */
-extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
 
-extern void brcms_c_rateset_filter(struct brcms_c_rateset *src,
-	struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask,
-	bool mcsallow);
+void brcms_c_rateset_filter(struct brcms_c_rateset *src,
+			    struct brcms_c_rateset *dst, bool basic_only,
+			    u8 rates, uint xmask, bool mcsallow);
 
-extern void
-brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
-			const struct brcms_c_rateset *rs_hw, uint phy_type,
-			int bandtype, bool cck_only, uint rate_mask,
-			bool mcsallow, u8 bw, u8 txstreams);
+void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+			     const struct brcms_c_rateset *rs_hw, uint phy_type,
+			     int bandtype, bool cck_only, uint rate_mask,
+			     bool mcsallow, u8 bw, u8 txstreams);
 
-extern s16 brcms_c_rate_legacy_phyctl(uint rate);
+s16 brcms_c_rate_legacy_phyctl(uint rate);
 
-extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
-extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
-extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset,
-				      u8 txstreams);
-extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
-					  u8 bw);
+void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
+void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
+void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams);
+void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw);
 
 #endif				/* _BRCM_RATE_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
index 19f6580..ba94930 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
@@ -19,24 +19,19 @@
 
 #include "types.h"
 
-extern int brcms_c_stf_attach(struct brcms_c_info *wlc);
-extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
+int brcms_c_stf_attach(struct brcms_c_info *wlc);
+void brcms_c_stf_detach(struct brcms_c_info *wlc);
 
-extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
-					u16 *ss_algo_channel,
-					u16 chanspec);
-extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
-			     struct brcms_band *band);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val,
-			       bool force);
-extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
-extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
-				      u32 rspec);
-extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
-					u32 rspec);
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
+				     u16 *ss_algo_channel, u16 chanspec);
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
 
 #endif				/* _BRCM_STF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
index 18750a8..c87dd89 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
@@ -43,16 +43,14 @@
 	u32 *bcm43xx_bomminor;
 };
 
-extern int
-brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
+int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
 
-extern void brcms_ucode_data_free(struct brcms_ucode *ucode);
+void brcms_ucode_data_free(struct brcms_ucode *ucode);
 
-extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
-				unsigned int idx);
-extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
-				 unsigned int idx);
-extern void brcms_ucode_free_buf(void *);
-extern int  brcms_check_firmwares(struct brcms_info *wl);
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, unsigned int idx);
+int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
+			  unsigned int idx);
+void brcms_ucode_free_buf(void *);
+int  brcms_check_firmwares(struct brcms_info *wl);
 
 #endif	/* _BRCM_UCODE_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index c1fe245..84113ea 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,5 +41,6 @@
 #define BCM4331_CHIP_ID		0x4331
 #define BCM4334_CHIP_ID		0x4334
 #define BCM4335_CHIP_ID		0x4335
+#define BCM4339_CHIP_ID		0x4339
 
 #endif				/* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 92623f0..8660a2c 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -140,6 +140,6 @@
 	void (*decchspec)(struct brcmu_chan *ch);
 };
 
-extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
+void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
 
 #endif	/* _BRCMU_CHANNELS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 898cacb..8ba445b 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -114,31 +114,29 @@
 	return skb_peek_tail(&pq->q[prec].skblist);
 }
 
-extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
-				 struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
-				      struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
-					     bool (*match_fn)(struct sk_buff *p,
-							      void *arg),
-					     void *arg);
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+				     struct sk_buff *p);
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+				      bool (*match_fn)(struct sk_buff *p,
+						       void *arg),
+				      void *arg);
 
 /* packet primitives */
-extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
-extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
 
 /* Empty the queue at particular precedence level */
 /* callback function fn(pkt, arg) returns true if pkt belongs to if */
-extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
-	bool dir, bool (*fn)(struct sk_buff *, void *), void *arg);
+void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+		       bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* operations on a set of precedences in packet queue */
 
-extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
-	int *prec_out);
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
 
 /* operations on packet queue as a whole */
 
@@ -167,11 +165,11 @@
 	return pq->len == 0;
 }
 
-extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
 /* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
-		bool (*fn)(struct sk_buff *, void *), void *arg);
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+		      bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* externs */
 /* ip address */
@@ -204,13 +202,13 @@
 /* externs */
 /* format/print */
 #ifdef DEBUG
-extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+void brcmu_prpkt(const char *msg, struct sk_buff *p0);
 #else
 #define brcmu_prpkt(a, b)
 #endif				/* DEBUG */
 
 #ifdef DEBUG
-extern __printf(3, 4)
+__printf(3, 4)
 void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
 #else
 __printf(3, 4)
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 755a0c8..40078f5 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -365,7 +365,7 @@
 static int cw1200_spi_probe(struct spi_device *func)
 {
 	const struct cw1200_platform_data_spi *plat_data =
-		func->dev.platform_data;
+		dev_get_platdata(&func->dev);
 	struct hwbus_priv *self;
 	int status;
 
@@ -443,7 +443,7 @@
 		}
 		kfree(self);
 	}
-	cw1200_spi_off(func->dev.platform_data);
+	cw1200_spi_off(dev_get_platdata(&func->dev));
 
 	return 0;
 }
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 970a48b..de7c4ff 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -217,7 +217,7 @@
 		}
 	}
 
-	memcpy(req.bssid, selected->bssid, 6);
+	memcpy(req.bssid, selected->bssid, ETH_ALEN);
 	req.channel = selected->chid;
 	spin_unlock_irqrestore(&local->lock, flags);
 
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 6b823a1..81903e3 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2698,7 +2698,7 @@
 /* data's copy of the eeprom data                                 */
 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
 {
-	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
+	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
 }
 
 static void ipw_read_eeprom(struct ipw_priv *priv)
@@ -11885,7 +11885,6 @@
 	pci_release_regions(pdev);
       out_pci_disable_device:
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
       out_free_libipw:
 	free_libipw(priv->net_dev, 0);
       out:
@@ -11966,7 +11965,6 @@
 	iounmap(priv->hw_base);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	/* wiphy_unregister needs to be here, before free_libipw */
 	wiphy_unregister(priv->ieee->wdev.wiphy);
 	kfree(priv->ieee->a_band.channels);
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 6eede52..5ce2f59 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -950,66 +950,55 @@
 }
 
 /* libipw.c */
-extern void free_libipw(struct net_device *dev, int monitor);
-extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
-extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
+void free_libipw(struct net_device *dev, int monitor);
+struct net_device *alloc_libipw(int sizeof_priv, int monitor);
+int libipw_change_mtu(struct net_device *dev, int new_mtu);
 
-extern void libipw_networks_age(struct libipw_device *ieee,
-				   unsigned long age_secs);
+void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
 
-extern int libipw_set_encryption(struct libipw_device *ieee);
+int libipw_set_encryption(struct libipw_device *ieee);
 
 /* libipw_tx.c */
-extern netdev_tx_t libipw_xmit(struct sk_buff *skb,
-			       struct net_device *dev);
-extern void libipw_txb_free(struct libipw_txb *);
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
+void libipw_txb_free(struct libipw_txb *);
 
 /* libipw_rx.c */
-extern void libipw_rx_any(struct libipw_device *ieee,
-		     struct sk_buff *skb, struct libipw_rx_stats *stats);
-extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
-			struct libipw_rx_stats *rx_stats);
+void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
+		   struct libipw_rx_stats *stats);
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+	      struct libipw_rx_stats *rx_stats);
 /* make sure to set stats->len */
-extern void libipw_rx_mgt(struct libipw_device *ieee,
-			     struct libipw_hdr_4addr *header,
-			     struct libipw_rx_stats *stats);
-extern void libipw_network_reset(struct libipw_network *network);
+void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
+		   struct libipw_rx_stats *stats);
+void libipw_network_reset(struct libipw_network *network);
 
 /* libipw_geo.c */
-extern const struct libipw_geo *libipw_get_geo(struct libipw_device
-						     *ieee);
-extern void libipw_set_geo(struct libipw_device *ieee,
-			     const struct libipw_geo *geo);
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
+void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
 
-extern int libipw_is_valid_channel(struct libipw_device *ieee,
-				      u8 channel);
-extern int libipw_channel_to_index(struct libipw_device *ieee,
-				      u8 channel);
-extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
-extern u8 libipw_get_channel_flags(struct libipw_device *ieee,
-				      u8 channel);
-extern const struct libipw_channel *libipw_get_channel(struct
-							     libipw_device
-							     *ieee, u8 channel);
-extern u32 libipw_channel_to_freq(struct libipw_device * ieee,
-				      u8 channel);
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
+u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
+u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
+const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
+						u8 channel);
+u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
 
 /* libipw_wx.c */
-extern int libipw_wx_get_scan(struct libipw_device *ieee,
-				 struct iw_request_info *info,
-				 union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encode(struct libipw_device *ieee,
-				   struct iw_request_info *info,
-				   union iwreq_data *wrqu, char *key);
-extern int libipw_wx_get_encode(struct libipw_device *ieee,
-				   struct iw_request_info *info,
-				   union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encodeext(struct libipw_device *ieee,
-				      struct iw_request_info *info,
-				      union iwreq_data *wrqu, char *extra);
-extern int libipw_wx_get_encodeext(struct libipw_device *ieee,
-				      struct iw_request_info *info,
-				      union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *key);
+int libipw_wx_set_encode(struct libipw_device *ieee,
+			 struct iw_request_info *info, union iwreq_data *wrqu,
+			 char *key);
+int libipw_wx_get_encode(struct libipw_device *ieee,
+			 struct iw_request_info *info, union iwreq_data *wrqu,
+			 char *key);
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra);
 
 static inline void libipw_increment_scans(struct libipw_device *ieee)
 {
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 9581d07..dea3b50 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3811,7 +3811,6 @@
 out_pci_release_regions:
 	pci_release_regions(pdev);
 out_pci_disable_device:
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 out_ieee80211_free_hw:
 	ieee80211_free_hw(il->hw);
@@ -3888,7 +3887,6 @@
 	iounmap(il->hw_base);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 	il_free_channel_map(il);
 	il_free_geos(il);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 9a8703d..00030d4 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -189,15 +189,14 @@
  * for use by iwl-*.c
  *
  *****************************************************************************/
-extern int il3945_calc_db_from_ratio(int sig_ratio);
-extern void il3945_rx_replenish(void *data);
-extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
-extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
-					     struct ieee80211_hdr *hdr,
-					     int left);
-extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
-				     char **buf, bool display);
-extern void il3945_dump_nic_error_log(struct il_priv *il);
+int il3945_calc_db_from_ratio(int sig_ratio);
+void il3945_rx_replenish(void *data);
+void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+				      struct ieee80211_hdr *hdr, int left);
+int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
+			      bool display);
+void il3945_dump_nic_error_log(struct il_priv *il);
 
 /******************************************************************************
  *
@@ -215,39 +214,36 @@
  * il3945_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
-extern void il3945_hw_handler_setup(struct il_priv *il);
-extern void il3945_hw_setup_deferred_work(struct il_priv *il);
-extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
-extern int il3945_hw_rxq_stop(struct il_priv *il);
-extern int il3945_hw_set_hw_params(struct il_priv *il);
-extern int il3945_hw_nic_init(struct il_priv *il);
-extern int il3945_hw_nic_stop_master(struct il_priv *il);
-extern void il3945_hw_txq_ctx_free(struct il_priv *il);
-extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
-extern int il3945_hw_nic_reset(struct il_priv *il);
-extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
-					   struct il_tx_queue *txq,
-					   dma_addr_t addr, u16 len, u8 reset,
-					   u8 pad);
-extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
-extern int il3945_hw_get_temperature(struct il_priv *il);
-extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
-extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
-					     struct il3945_frame *frame,
-					     u8 rate);
+void il3945_hw_handler_setup(struct il_priv *il);
+void il3945_hw_setup_deferred_work(struct il_priv *il);
+void il3945_hw_cancel_deferred_work(struct il_priv *il);
+int il3945_hw_rxq_stop(struct il_priv *il);
+int il3945_hw_set_hw_params(struct il_priv *il);
+int il3945_hw_nic_init(struct il_priv *il);
+int il3945_hw_nic_stop_master(struct il_priv *il);
+void il3945_hw_txq_ctx_free(struct il_priv *il);
+void il3945_hw_txq_ctx_stop(struct il_priv *il);
+int il3945_hw_nic_reset(struct il_priv *il);
+int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+				    dma_addr_t addr, u16 len, u8 reset, u8 pad);
+void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il3945_hw_get_temperature(struct il_priv *il);
+int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+				      struct il3945_frame *frame, u8 rate);
 void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
 				 struct ieee80211_tx_info *info,
 				 struct ieee80211_hdr *hdr, int sta_id);
-extern int il3945_hw_reg_send_txpower(struct il_priv *il);
-extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
-extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+int il3945_hw_reg_send_txpower(struct il_priv *il);
+int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
 void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
-extern void il3945_disable_events(struct il_priv *il);
-extern int il4965_get_temperature(const struct il_priv *il);
-extern void il3945_post_associate(struct il_priv *il);
-extern void il3945_config_ap(struct il_priv *il);
+void il3945_disable_events(struct il_priv *il);
+int il4965_get_temperature(const struct il_priv *il);
+void il3945_post_associate(struct il_priv *il);
+void il3945_config_ap(struct il_priv *il);
 
-extern int il3945_commit_rxon(struct il_priv *il);
+int il3945_commit_rxon(struct il_priv *il);
 
 /**
  * il3945_hw_find_station - Find station id for a given BSSID
@@ -257,14 +253,14 @@
  * not yet been merged into a single common layer for managing the
  * station tables.
  */
-extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
 
-extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
-extern int il3945_init_hw_rate_table(struct il_priv *il);
-extern void il3945_reg_txpower_periodic(struct il_priv *il);
-extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+__le32 il3945_get_antenna_flags(const struct il_priv *il);
+int il3945_init_hw_rate_table(struct il_priv *il);
+void il3945_reg_txpower_periodic(struct il_priv *il);
+int il3945_txpower_set_from_eeprom(struct il_priv *il);
 
-extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+int il3945_rs_next_rate(struct il_priv *il, int rate);
 
 /* scanning */
 int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 5ab50a5..3982ab7 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6706,7 +6706,6 @@
 out_iounmap:
 	iounmap(il->hw_base);
 out_pci_release_regions:
-	pci_set_drvdata(pdev, NULL);
 	pci_release_regions(pdev);
 out_pci_disable_device:
 	pci_disable_device(pdev);
@@ -6787,7 +6786,6 @@
 	iounmap(il->hw_base);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
 	il4965_uninit_drv(il);
 
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1b15b0b..337dfcf 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -272,7 +272,7 @@
 	((t) < IL_TX_POWER_TEMPERATURE_MIN || \
 	 (t) > IL_TX_POWER_TEMPERATURE_MAX)
 
-extern void il4965_temperature_calib(struct il_priv *il);
+void il4965_temperature_calib(struct il_priv *il);
 /********************* END TEMPERATURE ***************************************/
 
 /********************* START TXPOWER *****************************************/
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 83f8ed8..ad123d6 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -858,9 +858,9 @@
  * il4965_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
-extern void il4965_update_chain_flags(struct il_priv *il);
+void il4965_update_chain_flags(struct il_priv *il);
 extern const u8 il_bcast_addr[ETH_ALEN];
-extern int il_queue_space(const struct il_queue *q);
+int il_queue_space(const struct il_queue *q);
 static inline int
 il_queue_used(const struct il_queue *q, int i)
 {
@@ -1727,7 +1727,7 @@
 void il_free_txq_mem(struct il_priv *il);
 
 #ifdef CONFIG_IWLEGACY_DEBUGFS
-extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
 #else
 static inline void
 il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
@@ -1760,12 +1760,12 @@
 /*****************************************************
 * TX
 ******************************************************/
-extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
-extern int il_tx_queue_init(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_unmap(struct il_priv *il, int txq_id);
-extern void il_tx_queue_free(struct il_priv *il, int txq_id);
-extern void il_setup_watchdog(struct il_priv *il);
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
 /*****************************************************
  * TX power
  ****************************************************/
@@ -1931,10 +1931,10 @@
 	return il_is_ready(il);
 }
 
-extern void il_send_bt_config(struct il_priv *il);
-extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
-extern void il_apm_stop(struct il_priv *il);
-extern void _il_apm_stop(struct il_priv *il);
+void il_send_bt_config(struct il_priv *il);
+int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+void _il_apm_stop(struct il_priv *il);
 
 int il_apm_init(struct il_priv *il);
 
@@ -1968,15 +1968,15 @@
 
 irqreturn_t il_isr(int irq, void *data);
 
-extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
-extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
-extern bool _il_grab_nic_access(struct il_priv *il);
-extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
-extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
-extern u32 il_rd_prph(struct il_priv *il, u32 reg);
-extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
-extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
-extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+void il_set_bit(struct il_priv *p, u32 r, u32 m);
+void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+bool _il_grab_nic_access(struct il_priv *il);
+int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+u32 il_rd_prph(struct il_priv *il, u32 reg);
+void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
 
 static inline void
 _il_write8(struct il_priv *il, u32 ofs, u8 val)
@@ -2868,13 +2868,13 @@
  * The specific throughput table used is based on the type of network
  * the associated with, including A, B, G, and G w/ TGG protection
  */
-extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
 
 /* Initialize station's rate scaling information after adding station */
-extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
-				u8 sta_id);
-extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
-				u8 sta_id);
+void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+			 u8 sta_id);
+void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+			 u8 sta_id);
 
 /**
  * il_rate_control_register - Register the rate control algorithm callbacks
@@ -2886,8 +2886,8 @@
  * ieee80211_register_hw
  *
  */
-extern int il4965_rate_control_register(void);
-extern int il3945_rate_control_register(void);
+int il4965_rate_control_register(void);
+int il3945_rate_control_register(void);
 
 /**
  * il_rate_control_unregister - Unregister the rate control callbacks
@@ -2895,11 +2895,11 @@
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void il4965_rate_control_unregister(void);
-extern void il3945_rate_control_unregister(void);
+void il4965_rate_control_unregister(void);
+void il3945_rate_control_unregister(void);
 
-extern int il_power_update_mode(struct il_priv *il, bool force);
-extern void il_power_initialize(struct il_priv *il);
+int il_power_update_mode(struct il_priv *il, bool force);
+void il_power_initialize(struct il_priv *il);
 
 extern u32 il_debug_level;
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index f2a86ff..23d5f02 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -397,7 +397,7 @@
 	return cpu_to_le32(flags|(u32)rate);
 }
 
-extern int iwl_alive_start(struct iwl_priv *priv);
+int iwl_alive_start(struct iwl_priv *priv);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 void iwl_print_rx_config_cmd(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a79fdd1..7434d9e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -270,7 +270,7 @@
  * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
  *
  ****************************************************************************/
-extern void iwl_update_chain_flags(struct iwl_priv *priv);
+void iwl_update_chain_flags(struct iwl_priv *priv);
 extern const u8 iwl_bcast_addr[ETH_ALEN];
 
 #define IWL_OPERATION_MODE_AUTO     0
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 5d83cab..26fc550 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -407,8 +407,8 @@
 
 
 /* Initialize station's rate scaling information after adding station */
-extern void iwl_rs_rate_init(struct iwl_priv *priv,
-			     struct ieee80211_sta *sta, u8 sta_id);
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
+		      u8 sta_id);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -420,7 +420,7 @@
  * ieee80211_register_hw
  *
  */
-extern int iwlagn_rate_control_register(void);
+int iwlagn_rate_control_register(void);
 
 /**
  * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -428,6 +428,6 @@
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void iwlagn_rate_control_unregister(void);
+void iwlagn_rate_control_unregister(void);
 
 #endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index da442b8..1fef524 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -433,27 +433,19 @@
 	/* Copy MAC header from skb into command buffer */
 	memcpy(tx_cmd->hdr, hdr, hdr_len);
 
+	txq_id = info->hw_queue;
+
 	if (is_agg)
 		txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
 	else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
 		/*
-		 * Send this frame after DTIM -- there's a special queue
-		 * reserved for this for contexts that support AP mode.
-		 */
-		txq_id = ctx->mcast_queue;
-
-		/*
 		 * The microcode will clear the more data
 		 * bit in the last frame it transmits.
 		 */
 		hdr->frame_control |=
 			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-	} else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
-		txq_id = IWL_AUX_QUEUE;
-	else
-		txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+	}
 
-	WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
 	WARN_ON_ONCE(is_agg &&
 		     priv->queue_to_mac80211[txq_id] != info->hw_queue);
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 86270b6..6363794 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -330,15 +330,14 @@
 	enum iwl_ucode_type old_type;
 	static const u8 alive_cmd[] = { REPLY_ALIVE };
 
+	fw = iwl_get_ucode_image(priv, ucode_type);
+	if (WARN_ON(!fw))
+		return -EINVAL;
+
 	old_type = priv->cur_ucode;
 	priv->cur_ucode = ucode_type;
-	fw = iwl_get_ucode_image(priv, ucode_type);
-
 	priv->ucode_loaded = false;
 
-	if (!fw)
-		return -EINVAL;
-
 	iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
 				   alive_cmd, ARRAY_SIZE(alive_cmd),
 				   iwl_alive_fn, &alive_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 76e14c0..85879db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -83,6 +83,8 @@
 #define IWL7260_TX_POWER_VERSION	0xffff /* meaningless */
 #define IWL3160_NVM_VERSION		0x709
 #define IWL3160_TX_POWER_VERSION	0xffff /* meaningless */
+#define IWL7265_NVM_VERSION		0x0a1d
+#define IWL7265_TX_POWER_VERSION	0xffff /* meaningless */
 
 #define IWL7260_FW_PRE "iwlwifi-7260-"
 #define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -90,6 +92,9 @@
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
+#define IWL7265_FW_PRE "iwlwifi-7265-"
+#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+
 static const struct iwl_base_params iwl7000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -182,5 +187,14 @@
 	.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
 };
 
+const struct iwl_cfg iwl7265_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 7265",
+	.fw_name_pre = IWL7265_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL7265_NVM_VERSION,
+	.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+};
+
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index b03c25e..18f232e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -293,6 +293,7 @@
 extern const struct iwl_cfg iwl3160_2ac_cfg;
 extern const struct iwl_cfg iwl3160_2n_cfg;
 extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl7265_2ac_cfg;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a276af4..54a4fdc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -394,6 +394,38 @@
 #define CSR_DRAM_INT_TBL_ENABLE		(1 << 31)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK	(1 << 27)
 
+/* SECURE boot registers */
+#define CSR_SECURE_BOOT_CONFIG_ADDR	(0x100)
+enum secure_boot_config_reg {
+	CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP	= 0x00000001,
+	CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ	= 0x00000002,
+};
+
+#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR	(0x100)
+#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR	(0x100)
+enum secure_boot_status_reg {
+	CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS		= 0x00000003,
+	CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED	= 0x00000002,
+	CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS		= 0x00000004,
+	CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL		= 0x00000008,
+	CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL	= 0x00000010,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR	(0x100)
+enum secure_load_status_reg {
+	CSR_CPU_STATUS_LOADING_STARTED			= 0x00000001,
+	CSR_CPU_STATUS_LOADING_COMPLETED		= 0x00000002,
+	CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED		= 0x000000F8,
+	CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK		= 0x0000FF00,
+};
+
+#define CSR_SECURE_INSPECTOR_CODE_ADDR	(0x100)
+#define CSR_SECURE_INSPECTOR_DATA_ADDR	(0x100)
+
+#define CSR_SECURE_TIME_OUT	(100)
+
+#define FH_TCSR_0_REG0 (0x1D00)
+
 /*
  * HBUS (Host-side Bus)
  *
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 99e1da3..ff57002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -483,6 +483,7 @@
 	const u8 *tlv_data;
 	char buildstr[25];
 	u32 build;
+	int num_of_cpus;
 
 	if (len < sizeof(*ucode)) {
 		IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -692,6 +693,42 @@
 				goto invalid_tlv_len;
 			drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
 			break;
+		 case IWL_UCODE_TLV_SECURE_SEC_RT:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
+					    tlv_len);
+			drv->fw.mvm_fw = true;
+			drv->fw.img[IWL_UCODE_REGULAR].is_secure = true;
+			break;
+		case IWL_UCODE_TLV_SECURE_SEC_INIT:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
+					    tlv_len);
+			drv->fw.mvm_fw = true;
+			drv->fw.img[IWL_UCODE_INIT].is_secure = true;
+			break;
+		case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
+					    tlv_len);
+			drv->fw.mvm_fw = true;
+			drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true;
+			break;
+		case IWL_UCODE_TLV_NUM_OF_CPU:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			num_of_cpus =
+				le32_to_cpup((__le32 *)tlv_data);
+
+			if (num_of_cpus == 2) {
+				drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
+					true;
+				drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
+					true;
+				drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
+					true;
+			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
+				IWL_ERR(drv, "Driver support upto 2 CPUs\n");
+				return -EINVAL;
+			}
+			break;
 		default:
 			IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
 			break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 8b6c6fd..6c6c35c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -121,6 +121,10 @@
 	IWL_UCODE_TLV_SEC_WOWLAN	= 21,
 	IWL_UCODE_TLV_DEF_CALIB		= 22,
 	IWL_UCODE_TLV_PHY_SKU		= 23,
+	IWL_UCODE_TLV_SECURE_SEC_RT	= 24,
+	IWL_UCODE_TLV_SECURE_SEC_INIT	= 25,
+	IWL_UCODE_TLV_SECURE_SEC_WOWLAN	= 26,
+	IWL_UCODE_TLV_NUM_OF_CPU	= 27,
 };
 
 struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index a122368..75db0871 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -75,11 +75,23 @@
  * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
  * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
  * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ *	offload profile config command.
  * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
  * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
  * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
  *	(rather than two) IPv6 addresses
  * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ *	from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
+ *	connection when going back to D0
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
+ * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
+ * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
+ *	containing CAM (Continuous Active Mode) indication.
  */
 enum iwl_ucode_tlv_flag {
 	IWL_UCODE_TLV_FLAGS_PAN			= BIT(0),
@@ -87,11 +99,21 @@
 	IWL_UCODE_TLV_FLAGS_MFP			= BIT(2),
 	IWL_UCODE_TLV_FLAGS_P2P			= BIT(3),
 	IWL_UCODE_TLV_FLAGS_DW_BC_TABLE		= BIT(4),
-	IWL_UCODE_TLV_FLAGS_UAPSD		= BIT(6),
+	IWL_UCODE_TLV_FLAGS_NEWBT_COEX		= BIT(5),
+	IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT	= BIT(6),
+	IWL_UCODE_TLV_FLAGS_SHORT_BL		= BIT(7),
 	IWL_UCODE_TLV_FLAGS_RX_ENERGY_API	= BIT(8),
 	IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2	= BIT(9),
 	IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS	= BIT(10),
 	IWL_UCODE_TLV_FLAGS_BF_UPDATED		= BIT(11),
+	IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID	= BIT(12),
+	IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API	= BIT(14),
+	IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL	= BIT(15),
+	IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE	= BIT(16),
+	IWL_UCODE_TLV_FLAGS_SCHED_SCAN		= BIT(17),
+	IWL_UCODE_TLV_FLAGS_STA_KEY_CMD		= BIT(19),
+	IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD	= BIT(20),
+	IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT	= BIT(24),
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -133,7 +155,8 @@
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 4
+#define IWL_UCODE_SECTION_MAX 6
+#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU	(IWL_UCODE_SECTION_MAX/2)
 
 struct iwl_ucode_capabilities {
 	u32 max_probe_length;
@@ -150,6 +173,8 @@
 
 struct fw_img {
 	struct fw_desc sec[IWL_UCODE_SECTION_MAX];
+	bool is_secure;
+	bool is_dual_cpus;
 };
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index dfa4d2e..ad8e19a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -34,7 +34,6 @@
 #include "iwl-csr.h"
 #include "iwl-debug.h"
 #include "iwl-fh.h"
-#include "iwl-csr.h"
 
 #define IWL_POLL_INTERVAL 10	/* microseconds */
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index ff8cc75..a70c7b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,6 +97,8 @@
 
 #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS		(0x00000800)
 
+#define APMG_RTC_INT_STT_RFKILL		(0x10000000)
+
 /* Device system time */
 #define DEVICE_SYSTEM_TIME_REG 0xA0206C
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 80b4750..143292b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -344,7 +344,7 @@
 	u8 cmd_queue;
 	u8 cmd_fifo;
 	const u8 *no_reclaim_cmds;
-	int n_no_reclaim_cmds;
+	unsigned int n_no_reclaim_cmds;
 
 	bool rx_buf_size_8k;
 	bool bc_table_dword;
@@ -601,7 +601,7 @@
 {
 	int ret;
 
-	if (trans->state != IWL_TRANS_FW_ALIVE) {
+	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
 		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 		return -EIO;
 	}
@@ -640,8 +640,8 @@
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
 			       struct iwl_device_cmd *dev_cmd, int queue)
 {
-	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-		  "%s bad state = %d", __func__, trans->state);
+	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
 	return trans->ops->tx(trans, skb, dev_cmd, queue);
 }
@@ -649,16 +649,16 @@
 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
 				     int ssn, struct sk_buff_head *skbs)
 {
-	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-		  "%s bad state = %d", __func__, trans->state);
+	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
 	trans->ops->reclaim(trans, queue, ssn, skbs);
 }
 
 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
 {
-	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-		  "%s bad state = %d", __func__, trans->state);
+	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
 	trans->ops->txq_disable(trans, queue);
 }
@@ -669,8 +669,8 @@
 {
 	might_sleep();
 
-	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-		  "%s bad state = %d", __func__, trans->state);
+	if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
+		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
 	trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
 				 frame_limit, ssn);
@@ -685,8 +685,8 @@
 
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
 {
-	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-		  "%s bad state = %d", __func__, trans->state);
+	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
 	return trans->ops->wait_tx_queue_empty(trans);
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 0fad98b..5d066cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -98,126 +98,258 @@
 
 #undef EVENT_PRIO_ANT
 
-/* BT Antenna Coupling Threshold (dB) */
-#define IWL_BT_ANTENNA_COUPLING_THRESHOLD	(35)
-#define IWL_BT_LOAD_FORCE_SISO_THRESHOLD	(3)
-
 #define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD	(-62)
 #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD	(-65)
-#define BT_REDUCED_TX_POWER_BIT			BIT(7)
-
-static inline bool is_loose_coex(void)
-{
-	return iwlwifi_mod_params.ant_coupling >
-		IWL_BT_ANTENNA_COUPLING_THRESHOLD;
-}
+#define BT_ANTENNA_COUPLING_THRESHOLD		(30)
 
 int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
 {
+	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+		return 0;
+
 	return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
 				    sizeof(struct iwl_bt_coex_prio_tbl_cmd),
 				    &iwl_bt_prio_tbl);
 }
 
-static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type)
-{
-	struct iwl_bt_coex_prot_env_cmd env_cmd;
-	int ret;
+const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
+	[BT_KILL_MSK_DEFAULT] = 0xffff0000,
+	[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
+	[BT_KILL_MSK_REDUCED_TXPOW] = 0,
+};
 
-	env_cmd.action = action;
-	env_cmd.type = type;
-	ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
-				   sizeof(env_cmd), &env_cmd);
-	if (ret)
-		IWL_ERR(mvm, "failed to send BT env command\n");
+const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
+	[BT_KILL_MSK_DEFAULT] = 0xffff0000,
+	[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
+	[BT_KILL_MSK_REDUCED_TXPOW] = 0,
+};
+
+static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
+	cpu_to_le32(0xf0f0f0f0),
+	cpu_to_le32(0xc0c0c0c0),
+	cpu_to_le32(0xfcfcfcfc),
+	cpu_to_le32(0xff00ff00),
+};
+
+static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+	{
+		cpu_to_le32(0x40000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x44000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x40000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x44000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0xc0004000),
+		cpu_to_le32(0xf0005000),
+		cpu_to_le32(0xc0004000),
+		cpu_to_le32(0xf0005000),
+	},
+	{
+		cpu_to_le32(0x40000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x44000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x40000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x44000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0xc0004000),
+		cpu_to_le32(0xf0005000),
+		cpu_to_le32(0xc0004000),
+		cpu_to_le32(0xf0005000),
+	},
+	{
+		cpu_to_le32(0x40000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x44000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x40000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x44000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0xc0004000),
+		cpu_to_le32(0xf0005000),
+		cpu_to_le32(0xc0004000),
+		cpu_to_le32(0xf0005000),
+	},
+};
+
+static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+	{
+		/* Tight */
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xaeaaaaaa),
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xcc00ff28),
+		cpu_to_le32(0x0000aaaa),
+		cpu_to_le32(0xcc00aaaa),
+		cpu_to_le32(0x0000aaaa),
+		cpu_to_le32(0xc0004000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0xf0005000),
+		cpu_to_le32(0xf0005000),
+	},
+	{
+		/* Loose */
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xcc00ff28),
+		cpu_to_le32(0x0000aaaa),
+		cpu_to_le32(0xcc00aaaa),
+		cpu_to_le32(0x0000aaaa),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0x00000000),
+		cpu_to_le32(0xf0005000),
+		cpu_to_le32(0xf0005000),
+	},
+	{
+		/* Tx Tx disabled */
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xaaaaaaaa),
+		cpu_to_le32(0xcc00ff28),
+		cpu_to_le32(0x0000aaaa),
+		cpu_to_le32(0xcc00aaaa),
+		cpu_to_le32(0x0000aaaa),
+		cpu_to_le32(0xC0004000),
+		cpu_to_le32(0xC0004000),
+		cpu_to_le32(0xF0005000),
+		cpu_to_le32(0xF0005000),
+	},
+};
+
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+	/* dummy entry for channel 0 */
+	{cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+	{
+		cpu_to_le64(0x0000001FFFULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x00007FFFFFULL),
+	},
+	{
+		cpu_to_le64(0x000000FFFFULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x0003FFFFFFULL),
+	},
+	{
+		cpu_to_le64(0x000003FFFCULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x000FFFFFFCULL),
+	},
+	{
+		cpu_to_le64(0x00001FFFE0ULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x007FFFFFE0ULL),
+	},
+	{
+		cpu_to_le64(0x00007FFF80ULL),
+		cpu_to_le64(0x00007FFFFFULL),
+		cpu_to_le64(0x01FFFFFF80ULL),
+	},
+	{
+		cpu_to_le64(0x0003FFFC00ULL),
+		cpu_to_le64(0x0003FFFFFFULL),
+		cpu_to_le64(0x0FFFFFFC00ULL),
+	},
+	{
+		cpu_to_le64(0x000FFFF000ULL),
+		cpu_to_le64(0x000FFFFFFCULL),
+		cpu_to_le64(0x3FFFFFF000ULL),
+	},
+	{
+		cpu_to_le64(0x007FFF8000ULL),
+		cpu_to_le64(0x007FFFFFE0ULL),
+		cpu_to_le64(0xFFFFFF8000ULL),
+	},
+	{
+		cpu_to_le64(0x01FFFE0000ULL),
+		cpu_to_le64(0x01FFFFFF80ULL),
+		cpu_to_le64(0xFFFFFE0000ULL),
+	},
+	{
+		cpu_to_le64(0x0FFFF00000ULL),
+		cpu_to_le64(0x0FFFFFFC00ULL),
+		cpu_to_le64(0x0ULL),
+	},
+	{
+		cpu_to_le64(0x3FFFC00000ULL),
+		cpu_to_le64(0x3FFFFFF000ULL),
+		cpu_to_le64(0x0)
+	},
+	{
+		cpu_to_le64(0xFFFE000000ULL),
+		cpu_to_le64(0xFFFFFF8000ULL),
+		cpu_to_le64(0x0)
+	},
+	{
+		cpu_to_le64(0xFFF8000000ULL),
+		cpu_to_le64(0xFFFFFE0000ULL),
+		cpu_to_le64(0x0)
+	},
+	{
+		cpu_to_le64(0xFE00000000ULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x0)
+	},
+};
+
+static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+	cpu_to_le32(0x22002200),
+	cpu_to_le32(0x33113311),
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	enum iwl_bt_coex_lut_type ret;
+	u16 phy_ctx_id;
+
+	/*
+	 * Checking that we hold mvm->mutex is a good idea, but the rate
+	 * control can't acquire the mutex since it runs in Tx path.
+	 * So this is racy in that case, but in the worst case, the AMPDU
+	 * size limit will be wrong for a short time which is not a big
+	 * issue.
+	 */
+
+	rcu_read_lock();
+
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+	if (!chanctx_conf ||
+	     chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+		rcu_read_unlock();
+		return BT_COEX_LOOSE_LUT;
+	}
+
+	ret = BT_COEX_TX_DIS_LUT;
+
+	if (mvm->cfg->bt_shared_single_ant) {
+		rcu_read_unlock();
+		return ret;
+	}
+
+	phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+
+	if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
+		ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+	else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
+		ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+	/* else - default = TX TX disallowed */
+
+	rcu_read_unlock();
+
 	return ret;
 }
 
-enum iwl_bt_kill_msk {
-	BT_KILL_MSK_DEFAULT,
-	BT_KILL_MSK_SCO_HID_A2DP,
-	BT_KILL_MSK_REDUCED_TXPOW,
-	BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
-	[BT_KILL_MSK_DEFAULT] = 0xffff0000,
-	[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
-	[BT_KILL_MSK_REDUCED_TXPOW] = 0,
-};
-
-static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
-	[BT_KILL_MSK_DEFAULT] = 0xffff0000,
-	[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
-	[BT_KILL_MSK_REDUCED_TXPOW] = 0,
-};
-
-#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0)
-
-/* Tight Coex */
-static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaeaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xcc00ff28),
-	cpu_to_le32(0x0000aaaa),
-	cpu_to_le32(0xcc00aaaa),
-	cpu_to_le32(0x0000aaaa),
-	cpu_to_le32(0xc0004000),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0xf0005000),
-	cpu_to_le32(0xf0005000),
-};
-
-/* Loose Coex */
-static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xcc00ff28),
-	cpu_to_le32(0x0000aaaa),
-	cpu_to_le32(0xcc00aaaa),
-	cpu_to_le32(0x0000aaaa),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0xf0005000),
-	cpu_to_le32(0xf0005000),
-};
-
-/* Full concurrency */
-static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0xaaaaaaaa),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0x00000000),
-};
-
-/* single shared antenna */
-static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
-	cpu_to_le32(0x40000000),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0x44000000),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0x40000000),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0x44000000),
-	cpu_to_le32(0x00000000),
-	cpu_to_le32(0xC0004000),
-	cpu_to_le32(0xF0005000),
-	cpu_to_le32(0xC0004000),
-	cpu_to_le32(0xF0005000),
-};
-
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 {
 	struct iwl_bt_coex_cmd *bt_cmd;
@@ -228,17 +360,10 @@
 		.flags = CMD_SYNC,
 	};
 	int ret;
+	u32 flags;
 
-	/* go to CALIB state in internal BT-Coex state machine */
-	ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
-			      BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
-	if (ret)
-		return ret;
-
-	ret  = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
-			       BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
-	if (ret)
-		return ret;
+	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+		return 0;
 
 	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
 	if (!bt_cmd)
@@ -246,40 +371,52 @@
 	cmd.data[0] = bt_cmd;
 
 	bt_cmd->max_kill = 5;
-	bt_cmd->bt3_time_t7_value = 1;
-	bt_cmd->bt3_prio_sample_time = 2;
-	bt_cmd->bt3_timer_t2_value = 0xc;
+	bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
+	bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
+	bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
+	bt_cmd->bt4_tx_rx_max_freq0 = 15,
 
-	bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
+	flags = iwlwifi_mod_params.bt_coex_active ?
 			BT_COEX_NW : BT_COEX_DISABLE;
-	bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
+	flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
+	bt_cmd->flags = cpu_to_le32(flags);
 
-	bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+	bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
 					    BT_VALID_BT_PRIO_BOOST |
 					    BT_VALID_MAX_KILL |
 					    BT_VALID_3W_TMRS |
 					    BT_VALID_KILL_ACK |
 					    BT_VALID_KILL_CTS |
 					    BT_VALID_REDUCED_TX_POWER |
-					    BT_VALID_LUT);
+					    BT_VALID_LUT |
+					    BT_VALID_WIFI_RX_SW_PRIO_BOOST |
+					    BT_VALID_WIFI_TX_SW_PRIO_BOOST |
+					    BT_VALID_MULTI_PRIO_LUT |
+					    BT_VALID_CORUN_LUT_20 |
+					    BT_VALID_CORUN_LUT_40 |
+					    BT_VALID_ANT_ISOLATION |
+					    BT_VALID_ANT_ISOLATION_THRS |
+					    BT_VALID_TXTX_DELTA_FREQ_THRS |
+					    BT_VALID_TXRX_MAX_FREQ_0);
 
 	if (mvm->cfg->bt_shared_single_ant)
-		memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
-		       sizeof(iwl_single_shared_ant_lookup));
-	else if (is_loose_coex())
-		memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
-		       sizeof(iwl_tight_lookup));
+		memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
+		       sizeof(iwl_single_shared_ant));
 	else
-		memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
-		       sizeof(iwl_tight_lookup));
+		memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
+		       sizeof(iwl_combined_lookup));
 
-	bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+	memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
+	       sizeof(iwl_bt_prio_boost));
+	memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
+	       sizeof(iwl_bt_mprio_lut));
 	bt_cmd->kill_ack_msk =
 		cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
 	bt_cmd->kill_cts_msk =
 		cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
 
 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 
 	ret = iwl_mvm_send_cmd(mvm, &cmd);
 
@@ -334,13 +471,17 @@
 	if (!bt_cmd)
 		return -ENOMEM;
 	cmd.data[0] = bt_cmd;
+	bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
 
 	bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
 	bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
-	bt_cmd->valid_bit_msk =
-		cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+	bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+					     BT_VALID_KILL_ACK |
+					     BT_VALID_KILL_CTS);
 
-	IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
+	IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
+		       iwl_bt_ack_kill_msk[bt_kill_msk],
+		       iwl_bt_cts_kill_msk[bt_kill_msk]);
 
 	ret = iwl_mvm_send_cmd(mvm, &cmd);
 
@@ -364,12 +505,16 @@
 	struct iwl_mvm_sta *mvmsta;
 	int ret;
 
-	/* This can happen if the station has been removed right now */
 	if (sta_id == IWL_MVM_STATION_COUNT)
 		return 0;
 
 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 					lockdep_is_held(&mvm->mutex));
+
+	/* This can happen if the station has been removed right now */
+	if (IS_ERR_OR_NULL(sta))
+		return 0;
+
 	mvmsta = (void *)sta->drv_priv;
 
 	/* nothing to do */
@@ -380,8 +525,10 @@
 	if (!bt_cmd)
 		return -ENOMEM;
 	cmd.data[0] = bt_cmd;
+	bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
 
-	bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
+	bt_cmd->valid_bit_msk =
+		cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
 	bt_cmd->bt_reduced_tx_power = sta_id;
 
 	if (enable)
@@ -403,8 +550,25 @@
 	struct iwl_mvm *mvm;
 	u32 num_bss_ifaces;
 	bool reduced_tx_power;
+	struct ieee80211_chanctx_conf *primary;
+	struct ieee80211_chanctx_conf *secondary;
 };
 
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif,
+				       bool enable, int rssi)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	mvmvif->bf_data.last_bt_coex_event = rssi;
+	mvmvif->bf_data.bt_coex_max_thold =
+		enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+	mvmvif->bf_data.bt_coex_min_thold =
+		enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+}
+
+/* must be called under rcu_read_lock */
 static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
 				      struct ieee80211_vif *vif)
 {
@@ -413,65 +577,94 @@
 	struct iwl_mvm *mvm = data->mvm;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	enum ieee80211_smps_mode smps_mode;
-	enum ieee80211_band band;
 	int ave_rssi;
 
 	lockdep_assert_held(&mvm->mutex);
-	if (vif->type != NL80211_IFTYPE_STATION)
-		return;
 
-	rcu_read_lock();
-	chanctx_conf = rcu_dereference(vif->chanctx_conf);
-	if (chanctx_conf && chanctx_conf->def.chan)
-		band = chanctx_conf->def.chan->band;
-	else
-		band = -1;
-	rcu_read_unlock();
+	if (vif->type != NL80211_IFTYPE_STATION &&
+	    vif->type != NL80211_IFTYPE_AP)
+		return;
 
 	smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
-	/* non associated BSSes aren't to be considered */
-	if (!vif->bss_conf.assoc)
-		return;
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
 
-	if (band != IEEE80211_BAND_2GHZ) {
-		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-				    smps_mode);
+	/* If channel context is invalid or not on 2.4GHz .. */
+	if ((!chanctx_conf ||
+	     chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+		/* ... and it is an associated STATION, relax constraints */
+		if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
+			iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+					    smps_mode);
+		iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
 		return;
 	}
 
-	if (data->notif->bt_status)
+	/* SoftAP / GO will always be primary */
+	if (vif->type == NL80211_IFTYPE_AP) {
+		if (!mvmvif->ap_ibss_active)
+			return;
+
+		/* the Ack / Cts kill mask must be default if AP / GO */
+		data->reduced_tx_power = false;
+
+		if (chanctx_conf == data->primary)
+			return;
+
+		/* downgrade the current primary no matter what its type is */
+		data->secondary = data->primary;
+		data->primary = chanctx_conf;
+		return;
+	}
+
+	data->num_bss_ifaces++;
+
+	/* we are now a STA / P2P Client, and take associated ones only */
+	if (!vif->bss_conf.assoc)
+		return;
+
+	/* STA / P2P Client, try to be primary if first vif */
+	if (!data->primary || data->primary == chanctx_conf)
+		data->primary = chanctx_conf;
+	else if (!data->secondary)
+		/* if secondary is not NULL, it might be a GO */
+		data->secondary = chanctx_conf;
+
+	if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
+		smps_mode = IEEE80211_SMPS_STATIC;
+	else if (le32_to_cpu(data->notif->bt_activity_grading) >=
+		 BT_LOW_TRAFFIC)
 		smps_mode = IEEE80211_SMPS_DYNAMIC;
 
-	if (data->notif->bt_traffic_load >= IWL_BT_LOAD_FORCE_SISO_THRESHOLD)
-		smps_mode = IEEE80211_SMPS_STATIC;
-
 	IWL_DEBUG_COEX(data->mvm,
-		       "mac %d: bt_status %d traffic_load %d smps_req %d\n",
+		       "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
 		       mvmvif->id,  data->notif->bt_status,
-		       data->notif->bt_traffic_load, smps_mode);
+		       data->notif->bt_activity_grading, smps_mode);
 
 	iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
 
 	/* don't reduce the Tx power if in loose scheme */
-	if (is_loose_coex())
+	if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+	    mvm->cfg->bt_shared_single_ant) {
+		data->reduced_tx_power = false;
+		iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
 		return;
+	}
 
-	data->num_bss_ifaces++;
-
-	/* reduced Txpower only if there are open BT connections, so ...*/
-	if (!BT_MBOX_MSG(data->notif, 3, OPEN_CON_2)) {
+	/* reduced Txpower only if BT is on, so ...*/
+	if (!data->notif->bt_status) {
 		/* ... cancel reduced Tx power ... */
 		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
 			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
 		data->reduced_tx_power = false;
 
 		/* ... and there is no need to get reports on RSSI any more. */
-		ieee80211_disable_rssi_reports(vif);
+		iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
 		return;
 	}
 
-	ave_rssi = ieee80211_ave_rssi(vif);
+	/* try to get the avg rssi from fw */
+	ave_rssi = mvmvif->bf_data.ave_beacon_signal;
 
 	/* if the RSSI isn't valid, fake it is very low */
 	if (!ave_rssi)
@@ -499,8 +692,7 @@
 	}
 
 	/* Begin to monitor the RSSI: it may influence the reduced Tx power */
-	ieee80211_enable_rssi_reports(vif, BT_DISABLE_REDUCED_TXPOWER_THRESHOLD,
-				      BT_ENABLE_REDUCED_TXPOWER_THRESHOLD);
+	iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
 }
 
 static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
@@ -510,11 +702,72 @@
 		.notif = &mvm->last_bt_notif,
 		.reduced_tx_power = true,
 	};
+	struct iwl_bt_coex_ci_cmd cmd = {};
+	u8 ci_bw_idx;
 
+	rcu_read_lock();
 	ieee80211_iterate_active_interfaces_atomic(
 					mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 					iwl_mvm_bt_notif_iterator, &data);
 
+	if (data.primary) {
+		struct ieee80211_chanctx_conf *chan = data.primary;
+		if (WARN_ON(!chan->def.chan)) {
+			rcu_read_unlock();
+			return;
+		}
+
+		if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+			ci_bw_idx = 0;
+			cmd.co_run_bw_primary = 0;
+		} else {
+			cmd.co_run_bw_primary = 1;
+			if (chan->def.center_freq1 >
+			    chan->def.chan->center_freq)
+				ci_bw_idx = 2;
+			else
+				ci_bw_idx = 1;
+		}
+
+		cmd.bt_primary_ci =
+			iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+		cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+	}
+
+	if (data.secondary) {
+		struct ieee80211_chanctx_conf *chan = data.secondary;
+		if (WARN_ON(!data.secondary->def.chan)) {
+			rcu_read_unlock();
+			return;
+		}
+
+		if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+			ci_bw_idx = 0;
+			cmd.co_run_bw_secondary = 0;
+		} else {
+			cmd.co_run_bw_secondary = 1;
+			if (chan->def.center_freq1 >
+			    chan->def.chan->center_freq)
+				ci_bw_idx = 2;
+			else
+				ci_bw_idx = 1;
+		}
+
+		cmd.bt_secondary_ci =
+			iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+		cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
+	}
+
+	rcu_read_unlock();
+
+	/* Don't spam the fw with the same command over and over */
+	if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+		if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+					 sizeof(cmd), &cmd))
+			IWL_ERR(mvm, "Failed to send BT_CI cmd");
+		memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+	}
+
 	/*
 	 * If there are no BSS / P2P client interfaces, reduced Tx Power is
 	 * irrelevant since it is based on the RSSI coming from the beacon.
@@ -536,12 +789,18 @@
 
 
 	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-	IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not ");
+	IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
+		       notif->bt_status ? "ON" : "OFF");
 	IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
-	IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load);
+	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+	IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+		       le32_to_cpu(notif->primary_ch_lut));
+	IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+		       le32_to_cpu(notif->secondary_ch_lut));
+	IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+		       le32_to_cpu(notif->bt_activity_grading));
 	IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
 		       notif->bt_agg_traffic_load);
-	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
 
 	/* remember this notification for future use: rssi fluctuations */
 	memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
@@ -565,6 +824,18 @@
 	struct ieee80211_sta *sta;
 	struct iwl_mvm_sta *mvmsta;
 
+	struct ieee80211_chanctx_conf *chanctx_conf;
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+	/* If channel context is invalid or not on 2.4GHz - don't count it */
+	if (!chanctx_conf ||
+	    chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
 	if (vif->type != NL80211_IFTYPE_STATION ||
 	    mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
 		return;
@@ -594,15 +865,15 @@
 	};
 	int ret;
 
-	mutex_lock(&mvm->mutex);
+	lockdep_assert_held(&mvm->mutex);
 
 	/* Rssi update while not associated ?! */
 	if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
-		goto out_unlock;
+		return;
 
-	/* No open connection - reports should be disabled */
-	if (!BT_MBOX_MSG(&mvm->last_bt_notif, 3, OPEN_CON_2))
-		goto out_unlock;
+	/* No BT - reports should be disabled */
+	if (!mvm->last_bt_notif.bt_status)
+		return;
 
 	IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
 		       rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
@@ -611,7 +882,8 @@
 	 * Check if rssi is good enough for reduced Tx power, but not in loose
 	 * scheme.
 	 */
-	if (rssi_event == RSSI_EVENT_LOW || is_loose_coex())
+	if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+	    iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
 		ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
 						  false);
 	else
@@ -633,12 +905,52 @@
 
 	if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
 		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-
- out_unlock:
-	mutex_unlock(&mvm->mutex);
 }
 
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT	(1200)
+
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+				   struct ieee80211_sta *sta)
 {
+	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	enum iwl_bt_coex_lut_type lut_type;
+
+	if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+	    BT_LOW_TRAFFIC)
+		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+	lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+	if (lut_type == BT_COEX_LOOSE_LUT)
+		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+	/* tight coex, high bt traffic, reduce AGG time limit */
+	return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+}
+
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+				     struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+	if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+	    BT_HIGH_TRAFFIC)
+		return true;
+
+	/*
+	 * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
+	 * already killed.
+	 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
+	 * Tx.
+	 */
+	return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
+}
+
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
+{
+	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+		return;
+
 	iwl_mvm_bt_coex_notif_handle(mvm);
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 2bf29f7..4b6d670 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -70,7 +70,9 @@
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT		(50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT		(50 * USEC_PER_MSEC)
 #define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS	20
-#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS	20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS	8
+#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS	30
+#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS	20
 #define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT	50
 #define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT	50
 #define IWL_MVM_PS_SNOOZE_INTERVAL		25
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 417639f..6f45966 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -67,6 +67,7 @@
 #include <net/cfg80211.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
+#include <net/addrconf.h>
 #include "iwl-modparams.h"
 #include "fw-api.h"
 #include "mvm.h"
@@ -381,14 +382,74 @@
 	union {
 		struct iwl_proto_offload_cmd_v1 v1;
 		struct iwl_proto_offload_cmd_v2 v2;
+		struct iwl_proto_offload_cmd_v3_small v3s;
+		struct iwl_proto_offload_cmd_v3_large v3l;
 	} cmd = {};
+	struct iwl_host_cmd hcmd = {
+		.id = PROT_OFFLOAD_CONFIG_CMD,
+		.flags = CMD_SYNC,
+		.data[0] = &cmd,
+		.dataflags[0] = IWL_HCMD_DFL_DUP,
+	};
 	struct iwl_proto_offload_cmd_common *common;
 	u32 enabled = 0, size;
+	u32 capa_flags = mvm->fw->ucode_capa.flags;
 #if IS_ENABLED(CONFIG_IPV6)
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	int i;
 
-	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+	if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+	    capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+		struct iwl_ns_config *nsc;
+		struct iwl_targ_addr *addrs;
+		int n_nsc, n_addrs;
+		int c;
+
+		if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+			nsc = cmd.v3s.ns_config;
+			n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+			addrs = cmd.v3s.targ_addrs;
+			n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+		} else {
+			nsc = cmd.v3l.ns_config;
+			n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+			addrs = cmd.v3l.targ_addrs;
+			n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+		}
+
+		if (mvmvif->num_target_ipv6_addrs)
+			enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+
+		/*
+		 * For each address we have (and that will fit) fill a target
+		 * address struct and combine for NS offload structs with the
+		 * solicited node addresses.
+		 */
+		for (i = 0, c = 0;
+		     i < mvmvif->num_target_ipv6_addrs &&
+		     i < n_addrs && c < n_nsc; i++) {
+			struct in6_addr solicited_addr;
+			int j;
+
+			addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+						  &solicited_addr);
+			for (j = 0; j < c; j++)
+				if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+						  &solicited_addr) == 0)
+					break;
+			if (j == c)
+				c++;
+			addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+			addrs[i].config_num = cpu_to_le32(j);
+			nsc[j].dest_ipv6_addr = solicited_addr;
+			memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+		}
+
+		if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+			cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
+		else
+			cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
+	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
 		if (mvmvif->num_target_ipv6_addrs) {
 			enabled |= IWL_D3_PROTO_OFFLOAD_NS;
 			memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
@@ -419,7 +480,13 @@
 	}
 #endif
 
-	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+	if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+		common = &cmd.v3s.common;
+		size = sizeof(cmd.v3s);
+	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+		common = &cmd.v3l.common;
+		size = sizeof(cmd.v3l);
+	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
 		common = &cmd.v2.common;
 		size = sizeof(cmd.v2);
 	} else {
@@ -438,8 +505,8 @@
 
 	common->enabled = cpu_to_le32(enabled);
 
-	return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
-				    size, &cmd);
+	hcmd.len[0] = size;
+	return iwl_mvm_send_cmd(mvm, &hcmd);
 }
 
 enum iwl_mvm_tcp_packet_type {
@@ -793,6 +860,74 @@
 	return 0;
 }
 
+static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_nonqos_seq_query_cmd query_cmd = {
+		.get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+		.mac_id_n_color =
+			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+							mvmvif->color)),
+	};
+	struct iwl_host_cmd cmd = {
+		.id = NON_QOS_TX_COUNTER_CMD,
+		.flags = CMD_SYNC | CMD_WANT_SKB,
+	};
+	int err;
+	u32 size;
+
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+		cmd.data[0] = &query_cmd;
+		cmd.len[0] = sizeof(query_cmd);
+	}
+
+	err = iwl_mvm_send_cmd(mvm, &cmd);
+	if (err)
+		return err;
+
+	size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+	size -= sizeof(cmd.resp_pkt->hdr);
+	if (size < sizeof(__le16)) {
+		err = -EINVAL;
+	} else {
+		err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
+		/* new API returns next, not last-used seqno */
+		if (mvm->fw->ucode_capa.flags &
+				IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+			err -= 0x10;
+	}
+
+	iwl_free_resp(&cmd);
+	return err;
+}
+
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_nonqos_seq_query_cmd query_cmd = {
+		.get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+		.mac_id_n_color =
+			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+							mvmvif->color)),
+		.value = cpu_to_le16(mvmvif->seqno),
+	};
+
+	/* return if called during restart, not resume from D3 */
+	if (!mvmvif->seqno_valid)
+		return;
+
+	mvmvif->seqno_valid = false;
+
+	if (!(mvm->fw->ucode_capa.flags &
+			IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
+		return;
+
+	if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
+				 sizeof(query_cmd), &query_cmd))
+		IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+}
+
 static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 			     struct cfg80211_wowlan *wowlan,
 			     bool test)
@@ -829,7 +964,6 @@
 	};
 	int ret, i;
 	int len __maybe_unused;
-	u16 seq;
 	u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
 
 	if (!wowlan) {
@@ -872,26 +1006,15 @@
 
 	mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
 
-	/*
-	 * The D3 firmware still hardcodes the AP station ID for the
-	 * BSS we're associated with as 0. Store the real STA ID here
-	 * and assign 0. When we leave this function, we'll restore
-	 * the original value for the resume code.
-	 */
-	old_ap_sta_id = mvm_ap_sta->sta_id;
-	mvm_ap_sta->sta_id = 0;
-	mvmvif->ap_sta_id = 0;
-
 	/* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
 
 	wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
 
-	/*
-	 * We know the last used seqno, and the uCode expects to know that
-	 * one, it will increment before TX.
-	 */
-	seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ;
-	wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
+	/* Query the last used seqno and set it */
+	ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+	if (ret < 0)
+		goto out_noreset;
+	wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
 
 	/*
 	 * For QoS counters, we store the one to use next, so subtract 0x10
@@ -899,7 +1022,7 @@
 	 * increment after using the value (i.e. store the next value to use).
 	 */
 	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-		seq = mvm_ap_sta->tid_data[i].seq_number;
+		u16 seq = mvm_ap_sta->tid_data[i].seq_number;
 		seq -= 0x10;
 		wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
 	}
@@ -945,6 +1068,16 @@
 	iwl_trans_stop_device(mvm->trans);
 
 	/*
+	 * The D3 firmware still hardcodes the AP station ID for the
+	 * BSS we're associated with as 0. Store the real STA ID here
+	 * and assign 0. When we leave this function, we'll restore
+	 * the original value for the resume code.
+	 */
+	old_ap_sta_id = mvm_ap_sta->sta_id;
+	mvm_ap_sta->sta_id = 0;
+	mvmvif->ap_sta_id = 0;
+
+	/*
 	 * Set the HW restart bit -- this is mostly true as we're
 	 * going to load new firmware and reprogram that, though
 	 * the reprogramming is going to be manual to avoid adding
@@ -1059,6 +1192,10 @@
 	if (ret)
 		goto out;
 
+	ret = iwl_mvm_power_update_device_mode(mvm);
+	if (ret)
+		goto out;
+
 	ret = iwl_mvm_power_update_mode(mvm, vif);
 	if (ret)
 		goto out;
@@ -1109,16 +1246,26 @@
 	return __iwl_mvm_suspend(hw, wowlan, false);
 }
 
+/* converted data from the different status responses */
+struct iwl_wowlan_status_data {
+	u16 pattern_number;
+	u16 qos_seq_ctr[8];
+	u32 wakeup_reasons;
+	u32 wake_packet_length;
+	u32 wake_packet_bufsize;
+	const u8 *wake_packet;
+};
+
 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
 					  struct ieee80211_vif *vif,
-					  struct iwl_wowlan_status *status)
+					  struct iwl_wowlan_status_data *status)
 {
 	struct sk_buff *pkt = NULL;
 	struct cfg80211_wowlan_wakeup wakeup = {
 		.pattern_idx = -1,
 	};
 	struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
-	u32 reasons = le32_to_cpu(status->wakeup_reasons);
+	u32 reasons = status->wakeup_reasons;
 
 	if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
 		wakeup_report = NULL;
@@ -1130,7 +1277,7 @@
 
 	if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
 		wakeup.pattern_idx =
-			le16_to_cpu(status->pattern_number);
+			status->pattern_number;
 
 	if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
 		       IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
@@ -1158,8 +1305,8 @@
 		wakeup.tcp_match = true;
 
 	if (status->wake_packet_bufsize) {
-		int pktsize = le32_to_cpu(status->wake_packet_bufsize);
-		int pktlen = le32_to_cpu(status->wake_packet_length);
+		int pktsize = status->wake_packet_bufsize;
+		int pktlen = status->wake_packet_length;
 		const u8 *pktdata = status->wake_packet;
 		struct ieee80211_hdr *hdr = (void *)pktdata;
 		int truncated = pktlen - pktsize;
@@ -1239,8 +1386,229 @@
 	kfree_skb(pkt);
 }
 
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+				  struct ieee80211_key_seq *seq)
+{
+	u64 pn;
+
+	pn = le64_to_cpu(sc->pn);
+	seq->ccmp.pn[0] = pn >> 40;
+	seq->ccmp.pn[1] = pn >> 32;
+	seq->ccmp.pn[2] = pn >> 24;
+	seq->ccmp.pn[3] = pn >> 16;
+	seq->ccmp.pn[4] = pn >> 8;
+	seq->ccmp.pn[5] = pn;
+}
+
+static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
+				   struct ieee80211_key_seq *seq)
+{
+	seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+	seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+}
+
+static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
+				   struct ieee80211_key_conf *key)
+{
+	int tid;
+
+	BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+	for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+		struct ieee80211_key_seq seq = {};
+
+		iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+		ieee80211_set_key_rx_seq(key, tid, &seq);
+	}
+}
+
+static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
+				    struct ieee80211_key_conf *key)
+{
+	int tid;
+
+	BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+	for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+		struct ieee80211_key_seq seq = {};
+
+		iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
+		ieee80211_set_key_rx_seq(key, tid, &seq);
+	}
+}
+
+static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
+				   struct iwl_wowlan_status_v6 *status)
+{
+	union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+struct iwl_mvm_d3_gtk_iter_data {
+	struct iwl_wowlan_status_v6 *status;
+	void *last_gtk;
+	u32 cipher;
+	bool find_phase, unhandled_cipher;
+	int num_keys;
+};
+
+static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_sta *sta,
+				   struct ieee80211_key_conf *key,
+				   void *_data)
+{
+	struct iwl_mvm_d3_gtk_iter_data *data = _data;
+
+	if (data->unhandled_cipher)
+		return;
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		/* ignore WEP completely, nothing to do */
+		return;
+	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_TKIP:
+		/* we support these */
+		break;
+	default:
+		/* everything else (even CMAC for MFP) - disconnect from AP */
+		data->unhandled_cipher = true;
+		return;
+	}
+
+	data->num_keys++;
+
+	/*
+	 * pairwise key - update sequence counters only;
+	 * note that this assumes no TDLS sessions are active
+	 */
+	if (sta) {
+		struct ieee80211_key_seq seq = {};
+		union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+
+		if (data->find_phase)
+			return;
+
+		switch (key->cipher) {
+		case WLAN_CIPHER_SUITE_CCMP:
+			iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
+			iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+			iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+			break;
+		}
+		ieee80211_set_key_tx_seq(key, &seq);
+
+		/* that's it for this key */
+		return;
+	}
+
+	if (data->find_phase) {
+		data->last_gtk = key;
+		data->cipher = key->cipher;
+		return;
+	}
+
+	if (data->status->num_of_gtk_rekeys)
+		ieee80211_remove_key(key);
+	else if (data->last_gtk == key)
+		iwl_mvm_set_key_rx_seq(key, data->status);
+}
+
+static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  struct iwl_wowlan_status_v6 *status)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+		.status = status,
+	};
+
+	if (!status || !vif->bss_conf.bssid)
+		return false;
+
+	/* find last GTK that we used initially, if any */
+	gtkdata.find_phase = true;
+	ieee80211_iter_keys(mvm->hw, vif,
+			    iwl_mvm_d3_update_gtks, &gtkdata);
+	/* not trying to keep connections with MFP/unhandled ciphers */
+	if (gtkdata.unhandled_cipher)
+		return false;
+	if (!gtkdata.num_keys)
+		return true;
+	if (!gtkdata.last_gtk)
+		return false;
+
+	/*
+	 * invalidate all other GTKs that might still exist and update
+	 * the one that we used
+	 */
+	gtkdata.find_phase = false;
+	ieee80211_iter_keys(mvm->hw, vif,
+			    iwl_mvm_d3_update_gtks, &gtkdata);
+
+	if (status->num_of_gtk_rekeys) {
+		struct ieee80211_key_conf *key;
+		struct {
+			struct ieee80211_key_conf conf;
+			u8 key[32];
+		} conf = {
+			.conf.cipher = gtkdata.cipher,
+			.conf.keyidx = status->gtk.key_index,
+		};
+
+		switch (gtkdata.cipher) {
+		case WLAN_CIPHER_SUITE_CCMP:
+			conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+			memcpy(conf.conf.key, status->gtk.decrypt_key,
+			       WLAN_KEY_LEN_CCMP);
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+			memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+			/* leave TX MIC key zeroed, we don't use it anyway */
+			memcpy(conf.conf.key +
+			       NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+			       status->gtk.tkip_mic_key, 8);
+			break;
+		}
+
+		key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+		if (IS_ERR(key))
+			return false;
+		iwl_mvm_set_key_rx_seq(key, status);
+	}
+
+	if (status->num_of_gtk_rekeys) {
+		__be64 replay_ctr =
+			cpu_to_be64(le64_to_cpu(status->replay_ctr));
+		ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+					   (void *)&replay_ctr, GFP_KERNEL);
+	}
+
+	mvmvif->seqno_valid = true;
+	/* +0x10 because the set API expects next-to-use, not last-used */
+	mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+
+	return true;
+}
+
 /* releases the MVM mutex */
-static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
 					 struct ieee80211_vif *vif)
 {
 	u32 base = mvm->error_event_table;
@@ -1253,8 +1621,12 @@
 		.id = WOWLAN_GET_STATUSES,
 		.flags = CMD_SYNC | CMD_WANT_SKB,
 	};
-	struct iwl_wowlan_status *status;
-	int ret, len;
+	struct iwl_wowlan_status_data status;
+	struct iwl_wowlan_status_v6 *status_v6;
+	int ret, len, status_size, i;
+	bool keep;
+	struct ieee80211_sta *ap_sta;
+	struct iwl_mvm_sta *mvm_ap_sta;
 
 	iwl_trans_read_mem_bytes(mvm->trans, base,
 				 &err_info, sizeof(err_info));
@@ -1287,32 +1659,83 @@
 	if (!cmd.resp_pkt)
 		goto out_unlock;
 
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+		status_size = sizeof(struct iwl_wowlan_status_v6);
+	else
+		status_size = sizeof(struct iwl_wowlan_status_v4);
+
 	len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-	if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+	if (len - sizeof(struct iwl_cmd_header) < status_size) {
 		IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
 		goto out_free_resp;
 	}
 
-	status = (void *)cmd.resp_pkt->data;
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+		status_v6 = (void *)cmd.resp_pkt->data;
+
+		status.pattern_number = le16_to_cpu(status_v6->pattern_number);
+		for (i = 0; i < 8; i++)
+			status.qos_seq_ctr[i] =
+				le16_to_cpu(status_v6->qos_seq_ctr[i]);
+		status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
+		status.wake_packet_length =
+			le32_to_cpu(status_v6->wake_packet_length);
+		status.wake_packet_bufsize =
+			le32_to_cpu(status_v6->wake_packet_bufsize);
+		status.wake_packet = status_v6->wake_packet;
+	} else {
+		struct iwl_wowlan_status_v4 *status_v4;
+		status_v6 = NULL;
+		status_v4 = (void *)cmd.resp_pkt->data;
+
+		status.pattern_number = le16_to_cpu(status_v4->pattern_number);
+		for (i = 0; i < 8; i++)
+			status.qos_seq_ctr[i] =
+				le16_to_cpu(status_v4->qos_seq_ctr[i]);
+		status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
+		status.wake_packet_length =
+			le32_to_cpu(status_v4->wake_packet_length);
+		status.wake_packet_bufsize =
+			le32_to_cpu(status_v4->wake_packet_bufsize);
+		status.wake_packet = status_v4->wake_packet;
+	}
 
 	if (len - sizeof(struct iwl_cmd_header) !=
-	    sizeof(*status) +
-	    ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
+	    status_size + ALIGN(status.wake_packet_bufsize, 4)) {
 		IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
 		goto out_free_resp;
 	}
 
+	/* still at hard-coded place 0 for D3 image */
+	ap_sta = rcu_dereference_protected(
+			mvm->fw_id_to_mac_id[0],
+			lockdep_is_held(&mvm->mutex));
+	if (IS_ERR_OR_NULL(ap_sta))
+		goto out_free_resp;
+
+	mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+		u16 seq = status.qos_seq_ctr[i];
+		/* firmware stores last-used value, we store next value */
+		seq += 0x10;
+		mvm_ap_sta->tid_data[i].seq_number = seq;
+	}
+
 	/* now we have all the data we need, unlock to avoid mac80211 issues */
 	mutex_unlock(&mvm->mutex);
 
-	iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+	iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+
+	keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+
 	iwl_free_resp(&cmd);
-	return;
+	return keep;
 
  out_free_resp:
 	iwl_free_resp(&cmd);
  out_unlock:
 	mutex_unlock(&mvm->mutex);
+	return false;
 }
 
 static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1335,6 +1758,17 @@
 #endif
 }
 
+static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	/* skip the one we keep connection on */
+	if (data == vif)
+		return;
+
+	if (vif->type == NL80211_IFTYPE_STATION)
+		ieee80211_resume_disconnect(vif);
+}
+
 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 {
 	struct iwl_d3_iter_data resume_iter_data = {
@@ -1343,6 +1777,7 @@
 	struct ieee80211_vif *vif = NULL;
 	int ret;
 	enum iwl_d3_status d3_status;
+	bool keep = false;
 
 	mutex_lock(&mvm->mutex);
 
@@ -1368,7 +1803,7 @@
 	/* query SRAM first in case we want event logging */
 	iwl_mvm_read_d3_sram(mvm);
 
-	iwl_mvm_query_wakeup_reasons(mvm, vif);
+	keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
 	/* has unlocked the mutex, so skip that */
 	goto out;
 
@@ -1376,8 +1811,10 @@
 	mutex_unlock(&mvm->mutex);
 
  out:
-	if (!test && vif)
-		ieee80211_resume_disconnect(vif);
+	if (!test)
+		ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+			IEEE80211_IFACE_ITER_NORMAL,
+			iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
 	/* return 1 to reconfigure the device */
 	set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index aac81b8..9864d71 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -246,58 +246,56 @@
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file,
-						const char __user *user_buf,
+static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
+						char __user *user_buf,
 						size_t count, loff_t *ppos)
 {
 	struct iwl_mvm *mvm = file->private_data;
-	char buf[8] = {};
-	int allow;
+	char buf[64];
+	int bufsz = sizeof(buf);
+	int pos = 0;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
+			 mvm->disable_power_off);
+	pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
+			 mvm->disable_power_off_d3);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
+						 const char __user *user_buf,
+						 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	char buf[64] = {};
+	int ret;
+	int val;
 
 	if (!mvm->ucode_loaded)
 		return -EIO;
 
-	if (copy_from_user(buf, user_buf, sizeof(buf)))
+	count = min_t(size_t, count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, count))
 		return -EFAULT;
 
-	if (sscanf(buf, "%d", &allow) != 1)
+	if (!strncmp("disable_power_off_d0=", buf, 21)) {
+		if (sscanf(buf + 21, "%d", &val) != 1)
+			return -EINVAL;
+		mvm->disable_power_off = val;
+	} else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+		if (sscanf(buf + 21, "%d", &val) != 1)
+			return -EINVAL;
+		mvm->disable_power_off_d3 = val;
+	} else {
 		return -EINVAL;
+	}
 
-	IWL_DEBUG_POWER(mvm, "%s device power down\n",
-			allow ? "allow" : "prevent");
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_power_update_device_mode(mvm);
+	mutex_unlock(&mvm->mutex);
 
-	/*
-	 * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it
-	 */
-
-	return count;
-}
-
-static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
-						   const char __user *user_buf,
-						   size_t count, loff_t *ppos)
-{
-	struct iwl_mvm *mvm = file->private_data;
-	char buf[8] = {};
-	int allow;
-
-	if (copy_from_user(buf, user_buf, sizeof(buf)))
-		return -EFAULT;
-
-	if (sscanf(buf, "%d", &allow) != 1)
-		return -EINVAL;
-
-	IWL_DEBUG_POWER(mvm, "%s device power down in d3\n",
-			allow ? "allow" : "prevent");
-
-	/*
-	 * TODO: When WoWLAN FW alive notification happens, driver will send
-	 * REPLY_DEBUG_CMD setting power_down_allow flag according to
-	 * mvm->prevent_power_down_d3
-	 */
-	mvm->prevent_power_down_d3 = !allow;
-
-	return count;
+	return ret ?: count;
 }
 
 static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -344,6 +342,7 @@
 	case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
 		IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
 		dbgfs_pm->disable_power_off = val;
+		break;
 	case MVM_DEBUGFS_PM_LPRX_ENA:
 		IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
 		dbgfs_pm->lprx_ena = val;
@@ -371,7 +370,8 @@
 	int val;
 	int ret;
 
-	if (copy_from_user(buf, user_buf, sizeof(buf)))
+	count = min_t(size_t, count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, count))
 		return -EFAULT;
 
 	if (!strncmp("keep_alive=", buf, 11)) {
@@ -394,7 +394,9 @@
 		if (sscanf(buf + 16, "%d", &val) != 1)
 			return -EINVAL;
 		param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
-	} else if (!strncmp("disable_power_off=", buf, 18)) {
+	} else if (!strncmp("disable_power_off=", buf, 18) &&
+		   !(mvm->fw->ucode_capa.flags &
+		     IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
 		if (sscanf(buf + 18, "%d", &val) != 1)
 			return -EINVAL;
 		param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
@@ -581,15 +583,21 @@
 	BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
 
 	pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
-					 notif->bt_status);
+			 notif->bt_status);
 	pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
-					 notif->bt_open_conn);
+			 notif->bt_open_conn);
 	pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
-					 notif->bt_traffic_load);
+			 notif->bt_traffic_load);
 	pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
-					 notif->bt_agg_traffic_load);
+			 notif->bt_agg_traffic_load);
 	pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-					 notif->bt_ci_compliance);
+			 notif->bt_ci_compliance);
+	pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
+			 le32_to_cpu(notif->primary_ch_lut));
+	pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
+			 le32_to_cpu(notif->secondary_ch_lut));
+	pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
+			 le32_to_cpu(notif->bt_activity_grading));
 
 	mutex_unlock(&mvm->mutex);
 
@@ -600,6 +608,38 @@
 }
 #undef BT_MBOX_PRINT
 
+static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
+	char buf[256];
+	int bufsz = sizeof(buf);
+	int pos = 0;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf+pos, bufsz-pos, "Channel inhibition CMD\n");
+	pos += scnprintf(buf+pos, bufsz-pos,
+		       "\tPrimary Channel Bitmap 0x%016llx Fat: %d\n",
+		       le64_to_cpu(cmd->bt_primary_ci),
+		       !!cmd->co_run_bw_primary);
+	pos += scnprintf(buf+pos, bufsz-pos,
+		       "\tSecondary Channel Bitmap 0x%016llx Fat: %d\n",
+		       le64_to_cpu(cmd->bt_secondary_ci),
+		       !!cmd->co_run_bw_secondary);
+
+	pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
+	pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
+			 iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
+	pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
+			 iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
 #define PRINT_STATS_LE32(_str, _val)					\
 			 pos += scnprintf(buf + pos, bufsz - pos,	\
 					  fmt_table, _str,		\
@@ -615,9 +655,11 @@
 	int pos = 0;
 	char *buf;
 	int ret;
-	int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
-		    sizeof(struct mvm_statistics_rx_non_phy) * 10 +
-		    sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
+	/* 43 is the size of each data line, 33 is the size of each header */
+	size_t bufsz =
+		((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) +
+		(4 * 33) + 1;
+
 	struct mvm_statistics_rx_phy *ofdm;
 	struct mvm_statistics_rx_phy *cck;
 	struct mvm_statistics_rx_non_phy *general;
@@ -712,6 +754,7 @@
 	PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
 	PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
 	PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
+	PRINT_STATS_LE32("mac_id", general->mac_id);
 	PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
 
 	pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
@@ -757,6 +800,59 @@
 	return count;
 }
 
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
+				char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	int pos = 0;
+	char buf[32];
+	const size_t bufsz = sizeof(buf);
+
+	/* print which antennas were set for the scan command by the user */
+	pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+	if (mvm->scan_rx_ant & ANT_A)
+		pos += scnprintf(buf + pos, bufsz - pos, "A");
+	if (mvm->scan_rx_ant & ANT_B)
+		pos += scnprintf(buf + pos, bufsz - pos, "B");
+	if (mvm->scan_rx_ant & ANT_C)
+		pos += scnprintf(buf + pos, bufsz - pos, "C");
+	pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
+				 const char __user *user_buf,
+				 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	char buf[8];
+	int buf_size;
+	u8 scan_rx_ant;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+
+	/* get the argument from the user and check if it is valid */
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
+		return -EINVAL;
+	if (scan_rx_ant > ANT_ABC)
+		return -EINVAL;
+	if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
+		return -EINVAL;
+
+	/* change the rx antennas for scan command */
+	mvm->scan_rx_ant = scan_rx_ant;
+
+	return count;
+}
+
+
 static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
 				enum iwl_dbgfs_bf_mask param, int value)
 {
@@ -968,7 +1064,8 @@
 	char buf[8] = {};
 	int store;
 
-	if (copy_from_user(buf, user_buf, sizeof(buf)))
+	count = min_t(size_t, count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, count))
 		return -EFAULT;
 
 	if (sscanf(buf, "%d", &store) != 1)
@@ -1063,10 +1160,12 @@
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
 MVM_DEBUGFS_READ_FILE_OPS(stations);
 MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
 MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+
 #ifdef CONFIG_PM_SLEEP
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
 #endif
@@ -1087,10 +1186,14 @@
 	MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
 	MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
 	MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
-	MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
-	MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+	MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
+		MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+				     S_IRUSR | S_IWUSR);
 	MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
 	MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+	MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
+			     S_IWUSR | S_IRUSR);
 #ifdef CONFIG_PM_SLEEP
 	MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
 	MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 05c61d6..4ea5e24 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -82,6 +82,8 @@
  * @BT_USE_DEFAULTS:
  * @BT_SYNC_2_BT_DISABLE:
  * @BT_COEX_CORUNNING_TBL_EN:
+ *
+ * The COEX_MODE must be set for each command. Even if it is not changed.
  */
 enum iwl_bt_coex_flags {
 	BT_CH_PRIMARY_EN		= BIT(0),
@@ -95,14 +97,16 @@
 	BT_COEX_NW			= 0x3 << BT_COEX_MODE_POS,
 	BT_USE_DEFAULTS			= BIT(6),
 	BT_SYNC_2_BT_DISABLE		= BIT(7),
-	/*
-	 * For future use - when the flags will be enlarged
-	 * BT_COEX_CORUNNING_TBL_EN	= BIT(8),
-	 */
+	BT_COEX_CORUNNING_TBL_EN	= BIT(8),
+	BT_COEX_MPLUT_TBL_EN		= BIT(9),
+	/* Bit 10 is reserved */
+	BT_COEX_WF_PRIO_BOOST_CHECK_EN	= BIT(11),
 };
 
 /*
  * indicates what has changed in the BT_COEX command.
+ * BT_VALID_ENABLE must be set for each command. Commands without this bit will
+ * discarded by the firmware
  */
 enum iwl_bt_coex_valid_bit_msk {
 	BT_VALID_ENABLE			= BIT(0),
@@ -121,11 +125,8 @@
 	BT_VALID_CORUN_LUT_40		= BIT(13),
 	BT_VALID_ANT_ISOLATION		= BIT(14),
 	BT_VALID_ANT_ISOLATION_THRS	= BIT(15),
-	/*
-	 * For future use - when the valid flags will be enlarged
-	 * BT_VALID_TXTX_DELTA_FREQ_THRS	= BIT(16),
-	 * BT_VALID_TXRX_MAX_FREQ_0	= BIT(17),
-	 */
+	BT_VALID_TXTX_DELTA_FREQ_THRS	= BIT(16),
+	BT_VALID_TXRX_MAX_FREQ_0	= BIT(17),
 };
 
 /**
@@ -142,48 +143,88 @@
 	BT_REDUCED_TX_POWER_DATA	= BIT(1),
 };
 
+enum iwl_bt_coex_lut_type {
+	BT_COEX_TIGHT_LUT = 0,
+	BT_COEX_LOOSE_LUT,
+	BT_COEX_TX_DIS_LUT,
+
+	BT_COEX_MAX_LUT,
+};
+
 #define BT_COEX_LUT_SIZE (12)
+#define BT_COEX_CORUN_LUT_SIZE (32)
+#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
+#define BT_COEX_BOOST_SIZE (4)
+#define BT_REDUCED_TX_POWER_BIT BIT(7)
 
 /**
  * struct iwl_bt_coex_cmd - bt coex configuration command
  * @flags:&enum iwl_bt_coex_flags
- * @lead_time:
  * @max_kill:
- * @bt3_time_t7_value:
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @bt3_prio_sample_time:
- * @bt3_timer_t2_value:
- * @bt4_reaction_time:
- * @decision_lut[12]:
  * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
- * @bt_prio_boost: values for PTA boost register
+ * @bt4_antenna_isolation:
+ * @bt4_antenna_isolation_thr:
+ * @bt4_tx_tx_delta_freq_thr:
+ * @bt4_tx_rx_max_freq0:
+ * @bt_prio_boost:
  * @wifi_tx_prio_boost: SW boost of wifi tx priority
  * @wifi_rx_prio_boost: SW boost of wifi rx priority
+ * @kill_ack_msk:
+ * @kill_cts_msk:
+ * @decision_lut:
+ * @bt4_multiprio_lut:
+ * @bt4_corun_lut20:
+ * @bt4_corun_lut40:
+ * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
  *
  * The structure is used for the BT_COEX command.
  */
 struct iwl_bt_coex_cmd {
-	u8 flags;
-	u8 lead_time;
+	__le32 flags;
 	u8 max_kill;
-	u8 bt3_time_t7_value;
+	u8 bt_reduced_tx_power;
+	u8 reserved[2];
+
+	u8 bt4_antenna_isolation;
+	u8 bt4_antenna_isolation_thr;
+	u8 bt4_tx_tx_delta_freq_thr;
+	u8 bt4_tx_rx_max_freq0;
+
+	__le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
+	__le32 wifi_tx_prio_boost;
+	__le32 wifi_rx_prio_boost;
 	__le32 kill_ack_msk;
 	__le32 kill_cts_msk;
-	u8 bt3_prio_sample_time;
-	u8 bt3_timer_t2_value;
-	__le16 bt4_reaction_time;
-	__le32 decision_lut[BT_COEX_LUT_SIZE];
-	u8 bt_reduced_tx_power;
-	u8 reserved;
-	__le16 valid_bit_msk;
-	__le32 bt_prio_boost;
-	u8 reserved2;
-	u8 wifi_tx_prio_boost;
-	__le16 wifi_rx_prio_boost;
+
+	__le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
+	__le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
+	__le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
+	__le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
+
+	__le32 valid_bit_msk;
 } __packed; /* BT_COEX_CMD_API_S_VER_3 */
 
+/**
+ * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
+ * @bt_primary_ci:
+ * @bt_secondary_ci:
+ * @co_run_bw_primary:
+ * @co_run_bw_secondary:
+ * @primary_ch_phy_id:
+ * @secondary_ch_phy_id:
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd {
+	__le64 bt_primary_ci;
+	__le64 bt_secondary_ci;
+
+	u8 co_run_bw_primary;
+	u8 co_run_bw_secondary;
+	u8 primary_ch_phy_id;
+	u8 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_1 */
+
 #define BT_MBOX(n_dw, _msg, _pos, _nbits)	\
 	BT_MBOX##n_dw##_##_msg##_POS = (_pos),	\
 	BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
@@ -244,23 +285,39 @@
 	((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
 	>> BT_MBOX##_num##_##_field##_POS)
 
+enum iwl_bt_activity_grading {
+	BT_OFF			= 0,
+	BT_ON_NO_CONNECTION	= 1,
+	BT_LOW_TRAFFIC		= 2,
+	BT_HIGH_TRAFFIC		= 3,
+};
+
 /**
  * struct iwl_bt_coex_profile_notif - notification about BT coex
  * @mbox_msg: message from BT to WiFi
- * @:bt_status: 0 - off, 1 - on
- * @:bt_open_conn: number of BT connections open
- * @:bt_traffic_load: load of BT traffic
- * @:bt_agg_traffic_load: aggregated load of BT traffic
- * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @msg_idx: the index of the message
+ * @bt_status: 0 - off, 1 - on
+ * @bt_open_conn: number of BT connections open
+ * @bt_traffic_load: load of BT traffic
+ * @bt_agg_traffic_load: aggregated load of BT traffic
+ * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @primary_ch_lut: LUT used for primary channel
+ * @secondary_ch_lut: LUT used for secondary channel
+ * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
  */
 struct iwl_bt_coex_profile_notif {
 	__le32 mbox_msg[4];
+	__le32 msg_idx;
 	u8 bt_status;
 	u8 bt_open_conn;
 	u8 bt_traffic_load;
 	u8 bt_agg_traffic_load;
 	u8 bt_ci_compliance;
 	u8 reserved[3];
+
+	__le32 primary_ch_lut;
+	__le32 secondary_ch_lut;
+	__le32 bt_activity_grading;
 } __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
 
 enum iwl_bt_coex_prio_table_event {
@@ -300,20 +357,4 @@
 	u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
 } __packed;
 
-enum iwl_bt_coex_env_action {
-	BT_COEX_ENV_CLOSE	 = 0,
-	BT_COEX_ENV_OPEN	 = 1,
-}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
-
-/**
- * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
- * @action: enum %iwl_bt_coex_env_action
- * @type: enum %iwl_bt_coex_prio_table_event
- */
-struct iwl_bt_coex_prot_env_cmd {
-	u8 action; /* 0 = closed, 1 = open */
-	u8 type; /* 0 .. 15 */
-	u8 reserved[2];
-} __packed;
-
 #endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index df72fcdf..4e7dd8c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -100,7 +100,12 @@
 
 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1	2
 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2	6
-#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX	6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L	12
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S	4
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX	12
+
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L	4
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S	2
 
 /**
  * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
@@ -155,6 +160,43 @@
 	u8 reserved2[3];
 } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
 
+struct iwl_ns_config {
+	struct in6_addr source_ipv6_addr;
+	struct in6_addr dest_ipv6_addr;
+	u8 target_mac_addr[ETH_ALEN];
+	__le16 reserved;
+} __packed; /* NS_OFFLOAD_CONFIG */
+
+struct iwl_targ_addr {
+	struct in6_addr addr;
+	__le32 config_num;
+} __packed; /* TARGET_IPV6_ADDRESS */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_small {
+	struct iwl_proto_offload_cmd_common common;
+	__le32 num_valid_ipv6_addrs;
+	struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
+	struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_large {
+	struct iwl_proto_offload_cmd_common common;
+	__le32 num_valid_ipv6_addrs;
+	struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+	struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
 
 /*
  * WOWLAN_PATTERNS
@@ -293,7 +335,7 @@
 	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET		= BIT(12),
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v4 {
 	__le64 replay_ctr;
 	__le16 pattern_number;
 	__le16 non_qos_seq_ctr;
@@ -308,6 +350,29 @@
 	u8 wake_packet[]; /* can be truncated from _length to _bufsize */
 } __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
 
+struct iwl_wowlan_gtk_status {
+	u8 key_index;
+	u8 reserved[3];
+	u8 decrypt_key[16];
+	u8 tkip_mic_key[8];
+	struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+} __packed;
+
+struct iwl_wowlan_status_v6 {
+	struct iwl_wowlan_gtk_status gtk;
+	__le64 replay_ctr;
+	__le16 pattern_number;
+	__le16 non_qos_seq_ctr;
+	__le16 qos_seq_ctr[8];
+	__le32 wakeup_reasons;
+	__le32 num_of_gtk_rekeys;
+	__le32 transmitted_ndps;
+	__le32 received_beacons;
+	__le32 wake_packet_length;
+	__le32 wake_packet_bufsize;
+	u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
 #define IWL_WOWLAN_TCP_MAX_PACKET_LEN		64
 #define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN	128
 #define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS	2048
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index 98b1feb..39c3148 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -170,12 +170,14 @@
  * @beacon_tsf: beacon transmit time in TSF
  * @bi: beacon interval in TU
  * @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
  */
 struct iwl_mac_data_ibss {
 	__le32 beacon_time;
 	__le64 beacon_tsf;
 	__le32 bi;
 	__le32 bi_reciprocal;
+	__le32 beacon_template;
 } __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
 
 /**
@@ -372,4 +374,13 @@
 	return 0xFFFFFFFF / v;
 }
 
+#define IWL_NONQOS_SEQ_GET	0x1
+#define IWL_NONQOS_SEQ_SET	0x2
+struct iwl_nonqos_seq_query_cmd {
+	__le32 get_set_flag;
+	__le32 mac_id_n_color;
+	__le16 value;
+	__le16 reserved;
+} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
+
 #endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 8e7ab41..5cb93ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -132,6 +132,33 @@
 } __packed;
 
 /**
+ * enum iwl_device_power_flags - masks for device power command flags
+ * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ *	receiver and transmitter. '0' - does not allow. This flag should be
+ *	always set to '1' unless one need to disable actual power down for debug
+ *	purposes.
+ * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
+ *	that power management is disabled. '0' Power management is enabled, one
+ *	of power schemes is applied.
+*/
+enum iwl_device_power_flags {
+	DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK	= BIT(0),
+	DEVICE_POWER_FLAGS_CAM_MSK		= BIT(13),
+};
+
+/**
+ * struct iwl_device_power_cmd - device wide power command.
+ * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags:	Power table command flags from DEVICE_POWER_FLAGS_*
+ */
+struct iwl_device_power_cmd {
+	/* PM_POWER_TABLE_CMD_API_S_VER_6 */
+	__le16 flags;
+	__le16 reserved;
+} __packed;
+
+/**
  * struct iwl_mac_power_cmd - New power command containing uAPSD support
  * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
  * @id_and_color:	MAC contex identifier
@@ -290,7 +317,7 @@
 #define IWL_BF_ESCAPE_TIMER_MIN 0
 
 #define IWL_BA_ESCAPE_TIMER_DEFAULT 6
-#define IWL_BA_ESCAPE_TIMER_D3 6
+#define IWL_BA_ESCAPE_TIMER_D3 9
 #define IWL_BA_ESCAPE_TIMER_MAX 1024
 #define IWL_BA_ESCAPE_TIMER_MIN 0
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index fdd33bc..538f1c7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -68,6 +68,7 @@
 /*
  * These serve as indexes into
  * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ * TODO: avoid overlap between legacy and HT rates
  */
 enum {
 	IWL_RATE_1M_INDEX = 0,
@@ -78,18 +79,31 @@
 	IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
 	IWL_RATE_6M_INDEX,
 	IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+	IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
+	IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
+	IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
 	IWL_RATE_9M_INDEX,
 	IWL_RATE_12M_INDEX,
+	IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
 	IWL_RATE_18M_INDEX,
+	IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
 	IWL_RATE_24M_INDEX,
+	IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
 	IWL_RATE_36M_INDEX,
+	IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
 	IWL_RATE_48M_INDEX,
+	IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
 	IWL_RATE_54M_INDEX,
+	IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
 	IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
 	IWL_RATE_60M_INDEX,
-	IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+	IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
+	IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
+	IWL_RATE_MCS_8_INDEX,
+	IWL_RATE_MCS_9_INDEX,
+	IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
 	IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
-	IWL_RATE_COUNT,
+	IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
 };
 
 #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -108,6 +122,7 @@
 	IWL_RATE_2M_PLCP  = 20,
 	IWL_RATE_5M_PLCP  = 55,
 	IWL_RATE_11M_PLCP = 110,
+	IWL_RATE_INVM_PLCP = -1,
 };
 
 /*
@@ -164,6 +179,8 @@
  * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
  */
 #define RATE_HT_MCS_RATE_CODE_MSK	0x7
+#define RATE_HT_MCS_NSS_POS             3
+#define RATE_HT_MCS_NSS_MSK             (3 << RATE_HT_MCS_NSS_POS)
 
 /* Bit 10: (1) Use Green Field preamble */
 #define RATE_HT_MCS_GF_POS		10
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 83cb9b9..c3782b4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -356,6 +356,7 @@
 /* scan offload */
 #define IWL_MAX_SCAN_CHANNELS		40
 #define IWL_SCAN_MAX_BLACKLIST_LEN	64
+#define IWL_SCAN_SHORT_BLACKLIST_LEN	16
 #define IWL_SCAN_MAX_PROFILES		11
 #define SCAN_OFFLOAD_PROBE_REQ_SIZE	512
 
@@ -368,6 +369,12 @@
 #define IWL_FULL_SCAN_MULTIPLIER 5
 #define IWL_FAST_SCHED_SCAN_ITERATIONS 3
 
+enum scan_framework_client {
+	SCAN_CLIENT_SCHED_SCAN		= BIT(0),
+	SCAN_CLIENT_NETDETECT		= BIT(1),
+	SCAN_CLIENT_ASSET_TRACKING	= BIT(2),
+};
+
 /**
  * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
  * @scan_flags:		see enum iwl_scan_flags
@@ -449,11 +456,12 @@
  * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
  * @ssid:		MAC address to filter out
  * @reported_rssi:	AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry  - enum scan_framework_client
  */
 struct iwl_scan_offload_blacklist {
 	u8 ssid[ETH_ALEN];
 	u8 reported_rssi;
-	u8 reserved;
+	u8 client_bitmap;
 } __packed;
 
 enum iwl_scan_offload_network_type {
@@ -475,6 +483,7 @@
  * @aut_alg:		authentication olgorithm to match - bitmap
  * @network_type:	enum iwl_scan_offload_network_type
  * @band_selection:	enum iwl_scan_offload_band_selection
+ * @client_bitmap:	clients waiting for match - enum scan_framework_client
  */
 struct iwl_scan_offload_profile {
 	u8 ssid_index;
@@ -482,7 +491,8 @@
 	u8 auth_alg;
 	u8 network_type;
 	u8 band_selection;
-	u8 reserved[3];
+	u8 client_bitmap;
+	u8 reserved[2];
 } __packed;
 
 /**
@@ -491,13 +501,18 @@
  * @profiles:		profiles to search for match
  * @blacklist_len:	length of blacklist
  * @num_profiles:	num of profiles in the list
+ * @match_notify:	clients waiting for match found notification
+ * @pass_match:		clients waiting for the results
+ * @active_clients:	active clients bitmap - enum scan_framework_client
  */
 struct iwl_scan_offload_profile_cfg {
-	struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
 	struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
 	u8 blacklist_len;
 	u8 num_profiles;
-	u8 reserved[2];
+	u8 match_notify;
+	u8 pass_match;
+	u8 active_clients;
+	u8 reserved[3];
 } __packed;
 
 /**
@@ -560,4 +575,15 @@
 	u8 reserved;
 } __packed;
 
+/**
+ * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
+ * @ssid_bitmap:	SSIDs indexes found in this iteration
+ * @client_bitmap:	clients that are active and wait for this notification
+ */
+struct iwl_sched_scan_results {
+	__le16 ssid_bitmap;
+	u8 client_bitmap;
+	u8 reserved;
+};
+
 #endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index a30691a..4aca593 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -247,7 +247,7 @@
 } __packed;
 
 /**
- * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table
+ * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
  * ( REPLY_ADD_STA = 0x18 )
  * @add_modify: 1: modify existing, 0: add new station
  * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
@@ -286,7 +286,7 @@
  * ADD_STA sets up the table entry for one station, either creating a new
  * entry, or modifying a pre-existing one.
  */
-struct iwl_mvm_add_sta_cmd {
+struct iwl_mvm_add_sta_cmd_v5 {
 	u8 add_modify;
 	u8 unicast_tx_key_id;
 	u8 multicast_tx_key_id;
@@ -313,6 +313,57 @@
 } __packed; /* ADD_STA_CMD_API_S_VER_5 */
 
 /**
+ * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
+ * VER_6 of this command is quite similar to VER_5 except
+ * exclusion of all fields related to the security key installation.
+ */
+struct iwl_mvm_add_sta_cmd_v6 {
+	u8 add_modify;
+	u8 reserved1;
+	__le16 tid_disable_tx;
+	__le32 mac_id_n_color;
+	u8 addr[ETH_ALEN];	/* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+	__le16 reserved2;
+	u8 sta_id;
+	u8 modify_mask;
+	__le16 reserved3;
+	__le32 station_flags;
+	__le32 station_flags_msk;
+	u8 add_immediate_ba_tid;
+	u8 remove_immediate_ba_tid;
+	__le16 add_immediate_ba_ssn;
+	__le16 sleep_tx_count;
+	__le16 sleep_state_flags;
+	__le16 assoc_id;
+	__le16 beamform_flags;
+	__le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type %iwl_sta_key_flag
+ * @key: key material data
+ * @key2: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd {
+	u8 sta_id;
+	u8 key_offset;
+	__le16 key_flags;
+	u8 key[16];
+	u8 key2[16];
+	u8 rx_secur_seq_cnt[16];
+	u8 tkip_rx_tsc_byte2;
+	u8 reserved;
+	__le16 tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
  * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
  * @ADD_STA_SUCCESS: operation was executed successfully
  * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 66264cc..bad5a55 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -72,17 +72,17 @@
 #include "fw-api-d3.h"
 #include "fw-api-bt-coex.h"
 
-/* queue and FIFO numbers by usage */
+/* maximal number of Tx queues in any platform */
+#define IWL_MVM_MAX_QUEUES	20
+
+/* Tx queue numbers */
 enum {
 	IWL_MVM_OFFCHANNEL_QUEUE = 8,
 	IWL_MVM_CMD_QUEUE = 9,
-	IWL_MVM_AUX_QUEUE = 15,
-	IWL_MVM_FIRST_AGG_QUEUE = 16,
-	IWL_MVM_NUM_QUEUES = 20,
-	IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
-	IWL_MVM_CMD_FIFO = 7
 };
 
+#define IWL_MVM_CMD_FIFO	7
+
 #define IWL_MVM_STATION_COUNT	16
 
 /* commands */
@@ -97,6 +97,7 @@
 	DBG_CFG = 0x9,
 
 	/* station table */
+	ADD_STA_KEY = 0x17,
 	ADD_STA = 0x18,
 	REMOVE_STA = 0x19,
 
@@ -114,6 +115,7 @@
 	TIME_EVENT_NOTIFICATION = 0x2a,
 	BINDING_CONTEXT_CMD = 0x2b,
 	TIME_QUOTA_CMD = 0x2c,
+	NON_QOS_TX_COUNTER_CMD = 0x2d,
 
 	LQ_CMD = 0x4e,
 
@@ -130,6 +132,7 @@
 	SCAN_OFFLOAD_COMPLETE = 0x6D,
 	SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
 	SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+	MATCH_FOUND_NOTIFICATION = 0xd9,
 
 	/* Phy */
 	PHY_CONFIGURATION_CMD = 0x6a,
@@ -178,6 +181,7 @@
 	BT_COEX_PRIO_TABLE = 0xcc,
 	BT_COEX_PROT_ENV = 0xcd,
 	BT_PROFILE_NOTIFICATION = 0xce,
+	BT_COEX_CI = 0x5d,
 
 	REPLY_BEACON_FILTERING_CMD = 0xd2,
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c76299a..70e5297 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -151,13 +151,11 @@
 	enum iwl_ucode_type old_type = mvm->cur_ucode;
 	static const u8 alive_cmd[] = { MVM_ALIVE };
 
-	mvm->cur_ucode = ucode_type;
 	fw = iwl_get_ucode_image(mvm, ucode_type);
-
-	mvm->ucode_loaded = false;
-
-	if (!fw)
+	if (WARN_ON(!fw))
 		return -EINVAL;
+	mvm->cur_ucode = ucode_type;
+	mvm->ucode_loaded = false;
 
 	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
 				   alive_cmd, ARRAY_SIZE(alive_cmd),
@@ -199,7 +197,7 @@
 	 */
 
 	for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
-		if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE)
+		if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
 			mvm->queue_to_mac80211[i] = i;
 		else
 			mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
@@ -243,7 +241,7 @@
 
 	lockdep_assert_held(&mvm->mutex);
 
-	if (mvm->init_ucode_run)
+	if (mvm->init_ucode_complete)
 		return 0;
 
 	iwl_init_notification_wait(&mvm->notif_wait,
@@ -264,6 +262,7 @@
 	if (ret)
 		goto error;
 
+	/* Read the NVM only at driver load time, no need to do this twice */
 	if (read_nvm) {
 		/* Read nvm */
 		ret = iwl_nvm_init(mvm);
@@ -273,6 +272,10 @@
 		}
 	}
 
+	/* In case we read the NVM from external file, load it to the NIC */
+	if (iwlwifi_mod_params.nvm_file)
+		iwl_mvm_load_nvm_to_nic(mvm);
+
 	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
 	WARN_ON(ret);
 
@@ -310,7 +313,7 @@
 	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
 			MVM_UCODE_CALIB_TIMEOUT);
 	if (!ret)
-		mvm->init_ucode_run = true;
+		mvm->init_ucode_complete = true;
 	goto out;
 
 error:
@@ -353,8 +356,12 @@
 	if (ret)
 		return ret;
 
-	/* If we were in RFKILL during module loading, load init ucode now */
-	if (!mvm->init_ucode_run) {
+	/*
+	 * If we haven't completed the run of the init ucode during
+	 * module loading, load init ucode now
+	 * (for example, if we were in RFKILL)
+	 */
+	if (!mvm->init_ucode_complete) {
 		ret = iwl_run_init_mvm_ucode(mvm, false);
 		if (ret && !iwlmvm_mod_params.init_dbg) {
 			IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
@@ -424,6 +431,10 @@
 			goto error;
 	}
 
+	ret = iwl_mvm_power_update_device_mode(mvm);
+	if (ret)
+		goto error;
+
 	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
 	return 0;
  error:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 5fe23a5..f41f9b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -80,7 +80,7 @@
 	struct ieee80211_vif *vif;
 	unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
 	unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
-	unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)];
+	unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_MAX_QUEUES)];
 	enum iwl_tsf_id preferred_tsf;
 	bool found_vif;
 };
@@ -218,7 +218,7 @@
 		.preferred_tsf = NUM_TSF_IDS,
 		.used_hw_queues = {
 			BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
-			BIT(IWL_MVM_AUX_QUEUE) |
+			BIT(mvm->aux_queue) |
 			BIT(IWL_MVM_CMD_QUEUE)
 		},
 		.found_vif = false,
@@ -242,9 +242,17 @@
 	 * that we should share it with another interface.
 	 */
 
-	/* Currently, MAC ID 0 should be used only for the managed vif */
-	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+	/* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
+	switch (vif->type) {
+	case NL80211_IFTYPE_ADHOC:
+		break;
+	case NL80211_IFTYPE_STATION:
+		if (!vif->p2p)
+			break;
+		/* fall through */
+	default:
 		__clear_bit(0, data.available_mac_ids);
+	}
 
 	ieee80211_iterate_active_interfaces_atomic(
 		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -302,9 +310,9 @@
 	/* Find available queues, and allocate them to the ACs */
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		u8 queue = find_first_zero_bit(data.used_hw_queues,
-					       IWL_MVM_FIRST_AGG_QUEUE);
+					       mvm->first_agg_queue);
 
-		if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+		if (queue >= mvm->first_agg_queue) {
 			IWL_ERR(mvm, "Failed to allocate queue\n");
 			ret = -EIO;
 			goto exit_fail;
@@ -317,9 +325,9 @@
 	/* Allocate the CAB queue for softAP and GO interfaces */
 	if (vif->type == NL80211_IFTYPE_AP) {
 		u8 queue = find_first_zero_bit(data.used_hw_queues,
-					       IWL_MVM_FIRST_AGG_QUEUE);
+					       mvm->first_agg_queue);
 
-		if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+		if (queue >= mvm->first_agg_queue) {
 			IWL_ERR(mvm, "Failed to allocate cab queue\n");
 			ret = -EIO;
 			goto exit_fail;
@@ -559,8 +567,12 @@
 		cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
 
 	/* Don't use cts to self as the fw doesn't support it currently. */
-	if (vif->bss_conf.use_cts_prot)
+	if (vif->bss_conf.use_cts_prot) {
 		cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+		if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+			cmd->protection_flags |=
+				cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+	}
 
 	/*
 	 * I think that we should enable these 2 flags regardless the HT PROT
@@ -707,8 +719,35 @@
 	cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
 				       MAC_FILTER_IN_CONTROL_AND_MGMT |
 				       MAC_FILTER_IN_BEACON |
+				       MAC_FILTER_IN_PROBE_REQUEST |
+				       MAC_FILTER_IN_CRC32);
+	mvm->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+
+	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+				     struct ieee80211_vif *vif,
+				     u32 action)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mac_ctx_cmd cmd = {};
+
+	WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+	cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
 				       MAC_FILTER_IN_PROBE_REQUEST);
 
+	/* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+	cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+	cmd.ibss.bi_reciprocal =
+		cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+
+	/* TODO: Assumes that the beacon id == mac context id */
+	cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+
 	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
@@ -721,7 +760,8 @@
 	struct iwl_mvm_go_iterator_data *data = _data;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-	if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active)
+	if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+	    mvmvif->ap_ibss_active)
 		data->go_active = true;
 }
 
@@ -833,9 +873,10 @@
 		cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
 
 	/* Set up TX beacon command fields */
-	iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
-				 beacon->data,
-				 beacon_skb_len);
+	if (vif->type == NL80211_IFTYPE_AP)
+		iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+					 beacon->data,
+					 beacon_skb_len);
 
 	/* Submit command */
 	cmd.len[0] = sizeof(beacon_cmd);
@@ -848,14 +889,15 @@
 	return iwl_mvm_send_cmd(mvm, &cmd);
 }
 
-/* The beacon template for the AP/GO context has changed and needs update */
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif)
 {
 	struct sk_buff *beacon;
 	int ret;
 
-	WARN_ON(vif->type != NL80211_IFTYPE_AP);
+	WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+		vif->type != NL80211_IFTYPE_ADHOC);
 
 	beacon = ieee80211_beacon_get(mvm->hw, vif);
 	if (!beacon)
@@ -1018,6 +1060,8 @@
 		return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
 	case NL80211_IFTYPE_P2P_DEVICE:
 		return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+	case NL80211_IFTYPE_ADHOC:
+		return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
 	default:
 		break;
 	}
@@ -1038,6 +1082,9 @@
 	if (ret)
 		return ret;
 
+	/* will only do anything at resume from D3 time */
+	iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
 	mvmvif->uploaded = true;
 	return 0;
 }
@@ -1077,6 +1124,10 @@
 	}
 
 	mvmvif->uploaded = false;
+
+	if (vif->type == NL80211_IFTYPE_MONITOR)
+		mvm->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 9833cdf..74bc2c8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -77,6 +77,7 @@
 #include "iwl-eeprom-parse.h"
 #include "fw-api-scan.h"
 #include "iwl-phy-db.h"
+#include "testmode.h"
 
 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
 	{
@@ -138,6 +139,14 @@
 	}
 }
 
+static int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
+{
+	/* we create the 802.11 header and SSID element */
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID)
+		return mvm->fw->ucode_capa.max_probe_length - 24 - 2;
+	return mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+}
+
 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 {
 	struct ieee80211_hw *hw = mvm->hw;
@@ -155,10 +164,9 @@
 		    IEEE80211_HW_TIMING_BEACON_ONLY |
 		    IEEE80211_HW_CONNECTION_MONITOR |
 		    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
-		    IEEE80211_HW_SUPPORTS_STATIC_SMPS |
-		    IEEE80211_HW_SUPPORTS_UAPSD;
+		    IEEE80211_HW_SUPPORTS_STATIC_SMPS;
 
-	hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
+	hw->queues = mvm->first_agg_queue;
 	hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
 	hw->rate_control_algorithm = "iwl-mvm-rs";
 
@@ -171,6 +179,12 @@
 	    !iwlwifi_mod_params.sw_crypto)
 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+		hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
+		hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+		hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+	}
+
 	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
 	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
 	hw->chanctx_data_size = sizeof(u16);
@@ -181,6 +195,10 @@
 		BIT(NL80211_IFTYPE_P2P_GO) |
 		BIT(NL80211_IFTYPE_P2P_DEVICE);
 
+	/* IBSS has bugs in older versions */
+	if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+		hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
 	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
 			    WIPHY_FLAG_DISABLE_BEACON_HINTS |
 			    WIPHY_FLAG_IBSS_RSN;
@@ -191,8 +209,6 @@
 
 	hw->wiphy->max_remain_on_channel_duration = 10000;
 	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-	hw->uapsd_queues = IWL_UAPSD_AC_INFO;
-	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
 	/* Extract MAC address */
 	memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -212,9 +228,8 @@
 
 	iwl_mvm_reset_phy_ctxts(mvm);
 
-	/* we create the 802.11 header and a max-length SSID element */
-	hw->wiphy->max_scan_ie_len =
-		mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+	hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
+
 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
 
 	if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
@@ -231,6 +246,15 @@
 	else
 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+		hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+		hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+		/* we create the 802.11 header and zero length SSID IE. */
+		hw->wiphy->max_sched_scan_ie_len =
+					SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+	}
+
 	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
 			       NL80211_FEATURE_P2P_GO_OPPPS;
 
@@ -548,7 +572,8 @@
 	 * In short: there's not much we can do at this point, other than
 	 * allocating resources :)
 	 */
-	if (vif->type == NL80211_IFTYPE_AP) {
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC) {
 		u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
 		ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
 					       qmask);
@@ -698,7 +723,14 @@
 	 * For AP/GO interface, the tear down of the resources allocated to the
 	 * interface is be handled as part of the stop_ap flow.
 	 */
-	if (vif->type == NL80211_IFTYPE_AP) {
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC) {
+#ifdef CONFIG_NL80211_TESTMODE
+		if (vif == mvm->noa_vif) {
+			mvm->noa_vif = NULL;
+			mvm->noa_duration = 0;
+		}
+#endif
 		iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
 		goto out_release;
 	}
@@ -796,6 +828,27 @@
 				return;
 			}
 			iwl_mvm_configure_mcast_filter(mvm, vif);
+
+			if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+				     &mvm->status)) {
+				/*
+				 * If we're restarting then the firmware will
+				 * obviously have lost synchronisation with
+				 * the AP. It will attempt to synchronise by
+				 * itself, but we can make it more reliable by
+				 * scheduling a session protection time event.
+				 *
+				 * The firmware needs to receive a beacon to
+				 * catch up with synchronisation, use 110% of
+				 * the beacon interval.
+				 *
+				 * Set a large maximum delay to allow for more
+				 * than a single interface.
+				 */
+				u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
+				iwl_mvm_protect_session(mvm, vif, dur, dur,
+							5 * dur);
+			}
 		} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
 			/* remove AP station now that the MAC is unassoc */
 			ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -811,7 +864,8 @@
 		/* reset rssi values */
 		mvmvif->bf_data.ave_beacon_signal = 0;
 
-		if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
+		if (!(mvm->fw->ucode_capa.flags &
+					IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
 			/* Workaround for FW bug, otherwise FW disables device
 			 * power save upon disassociation
 			 */
@@ -819,7 +873,7 @@
 			if (ret)
 				IWL_ERR(mvm, "failed to update power mode\n");
 		}
-		iwl_mvm_bt_coex_vif_assoc(mvm, vif);
+		iwl_mvm_bt_coex_vif_change(mvm);
 	} else if (changes & BSS_CHANGED_BEACON_INFO) {
 		/*
 		 * We received a beacon _after_ association so
@@ -848,7 +902,8 @@
 	}
 }
 
-static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -871,7 +926,7 @@
 	if (ret)
 		goto out_remove;
 
-	mvmvif->ap_active = true;
+	mvmvif->ap_ibss_active = true;
 
 	/* Send the bcast station. At this stage the TBTT and DTIM time events
 	 * are added and applied to the scheduler */
@@ -883,10 +938,12 @@
 	if (ret)
 		goto out_rm_bcast;
 
-	/* Need to update the P2P Device MAC */
+	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
 	if (vif->p2p && mvm->p2p_device_vif)
 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
 
+	iwl_mvm_bt_coex_vif_change(mvm);
+
 	mutex_unlock(&mvm->mutex);
 	return 0;
 
@@ -901,7 +958,8 @@
 	return ret;
 }
 
-static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -910,9 +968,11 @@
 
 	mutex_lock(&mvm->mutex);
 
-	mvmvif->ap_active = false;
+	mvmvif->ap_ibss_active = false;
 
-	/* Need to update the P2P Device MAC */
+	iwl_mvm_bt_coex_vif_change(mvm);
+
+	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
 	if (vif->p2p && mvm->p2p_device_vif)
 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
 
@@ -924,10 +984,11 @@
 	mutex_unlock(&mvm->mutex);
 }
 
-static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
-					struct ieee80211_vif *vif,
-					struct ieee80211_bss_conf *bss_conf,
-					u32 changes)
+static void
+iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_bss_conf *bss_conf,
+				 u32 changes)
 {
 	/* Need to send a new beacon template to the FW */
 	if (changes & BSS_CHANGED_BEACON) {
@@ -950,7 +1011,8 @@
 		iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
 		break;
 	case NL80211_IFTYPE_AP:
-		iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
+	case NL80211_IFTYPE_ADHOC:
+		iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
 		break;
 	default:
 		/* shouldn't happen */
@@ -1163,7 +1225,54 @@
 
 	mutex_lock(&mvm->mutex);
 	/* Try really hard to protect the session and hear a beacon */
-	iwl_mvm_protect_session(mvm, vif, duration, min_duration);
+	iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500);
+	mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					struct cfg80211_sched_scan_request *req,
+					struct ieee80211_sched_scan_ies *ies)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+
+	if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
+		IWL_DEBUG_SCAN(mvm,
+			       "SCHED SCAN request during internal scan - abort\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	mvm->scan_status = IWL_MVM_SCAN_SCHED;
+
+	ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
+	if (ret)
+		goto err;
+
+	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+	if (ret)
+		goto err;
+
+	ret = iwl_mvm_sched_scan_start(mvm, req);
+	if (!ret)
+		goto out;
+err:
+	mvm->scan_status = IWL_MVM_SCAN_NONE;
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret;
+}
+
+static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	mutex_lock(&mvm->mutex);
+	iwl_mvm_sched_scan_stop(mvm);
 	mutex_unlock(&mvm->mutex);
 }
 
@@ -1207,8 +1316,13 @@
 
 	switch (cmd) {
 	case SET_KEY:
-		if (vif->type == NL80211_IFTYPE_AP && !sta) {
-			/* GTK on AP interface is a TX-only key, return 0 */
+		if ((vif->type == NL80211_IFTYPE_ADHOC ||
+		     vif->type == NL80211_IFTYPE_AP) && !sta) {
+			/*
+			 * GTK on AP interface is a TX-only key, return 0;
+			 * on IBSS they're per-station and because we're lazy
+			 * we don't support them for RX, so do the same.
+			 */
 			ret = 0;
 			key->hw_key_idx = STA_KEY_IDX_INVALID;
 			break;
@@ -1252,6 +1366,9 @@
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
+	if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+		return;
+
 	iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
 }
 
@@ -1445,6 +1562,7 @@
 	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
 				 ctx->rx_chains_static,
 				 ctx->rx_chains_dynamic);
+	iwl_mvm_bt_coex_vif_change(mvm);
 	mutex_unlock(&mvm->mutex);
 }
 
@@ -1464,14 +1582,14 @@
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_ADHOC:
 		/*
 		 * The AP binding flow is handled as part of the start_ap flow
-		 * (in bss_info_changed).
+		 * (in bss_info_changed), similarly for IBSS.
 		 */
 		ret = 0;
 		goto out_unlock;
 	case NL80211_IFTYPE_STATION:
-	case NL80211_IFTYPE_ADHOC:
 	case NL80211_IFTYPE_MONITOR:
 		break;
 	default:
@@ -1517,10 +1635,10 @@
 
 	iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
 
-	if (vif->type == NL80211_IFTYPE_AP)
-		goto out_unlock;
-
 	switch (vif->type) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_ADHOC:
+		goto out_unlock;
 	case NL80211_IFTYPE_MONITOR:
 		mvmvif->monitor_active = false;
 		iwl_mvm_update_quotas(mvm, NULL);
@@ -1550,14 +1668,72 @@
 	return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
 }
 
-static void iwl_mvm_mac_rssi_callback(struct ieee80211_hw *hw,
+#ifdef CONFIG_NL80211_TESTMODE
+static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
+	[IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
+	[IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
+	[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
+};
+
+static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
 				      struct ieee80211_vif *vif,
-				      enum ieee80211_rssi_event rssi_event)
+				      void *data, int len)
+{
+	struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
+	int err;
+	u32 noa_duration;
+
+	err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
+	if (err)
+		return err;
+
+	if (!tb[IWL_MVM_TM_ATTR_CMD])
+		return -EINVAL;
+
+	switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
+	case IWL_MVM_TM_CMD_SET_NOA:
+		if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
+		    !vif->bss_conf.enable_beacon ||
+		    !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
+			return -EINVAL;
+
+		noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
+		if (noa_duration >= vif->bss_conf.beacon_int)
+			return -EINVAL;
+
+		mvm->noa_duration = noa_duration;
+		mvm->noa_vif = vif;
+
+		return iwl_mvm_update_quotas(mvm, NULL);
+	case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
+		/* must be associated client vif - ignore authorized */
+		if (!vif || vif->type != NL80211_IFTYPE_STATION ||
+		    !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+		    !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
+			return -EINVAL;
+
+		if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
+			return iwl_mvm_enable_beacon_filter(mvm, vif);
+		return iwl_mvm_disable_beacon_filter(mvm, vif);
+	}
+
+	return -EOPNOTSUPP;
+}
+
+static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    void *data, int len)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int err;
 
-	iwl_mvm_bt_rssi_event(mvm, vif, rssi_event);
+	mutex_lock(&mvm->mutex);
+	err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
+	mutex_unlock(&mvm->mutex);
+
+	return err;
 }
+#endif
 
 struct ieee80211_ops iwl_mvm_hw_ops = {
 	.tx = iwl_mvm_mac_tx,
@@ -1578,23 +1754,27 @@
 	.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
 	.conf_tx = iwl_mvm_mac_conf_tx,
 	.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+	.sched_scan_start = iwl_mvm_mac_sched_scan_start,
+	.sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
 	.set_key = iwl_mvm_mac_set_key,
 	.update_tkip_key = iwl_mvm_mac_update_tkip_key,
 	.remain_on_channel = iwl_mvm_roc,
 	.cancel_remain_on_channel = iwl_mvm_cancel_roc,
-	.rssi_callback = iwl_mvm_mac_rssi_callback,
-
 	.add_chanctx = iwl_mvm_add_chanctx,
 	.remove_chanctx = iwl_mvm_remove_chanctx,
 	.change_chanctx = iwl_mvm_change_chanctx,
 	.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
 	.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
 
-	.start_ap = iwl_mvm_start_ap,
-	.stop_ap = iwl_mvm_stop_ap,
+	.start_ap = iwl_mvm_start_ap_ibss,
+	.stop_ap = iwl_mvm_stop_ap_ibss,
+	.join_ibss = iwl_mvm_start_ap_ibss,
+	.leave_ibss = iwl_mvm_stop_ap_ibss,
 
 	.set_tim = iwl_mvm_set_tim,
 
+	CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
+
 #ifdef CONFIG_PM_SLEEP
 	/* look at d3.c */
 	.suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b038927..fed21ef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -73,7 +73,6 @@
 #include "iwl-trans.h"
 #include "iwl-notif-wait.h"
 #include "iwl-eeprom-parse.h"
-#include "iwl-trans.h"
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
@@ -162,6 +161,7 @@
 struct iwl_mvm_power_ops {
 	int (*power_update_mode)(struct iwl_mvm *mvm,
 				 struct ieee80211_vif *vif);
+	int (*power_update_device_mode)(struct iwl_mvm *mvm);
 	int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -241,12 +241,18 @@
 * @last_beacon_signal: last beacon rssi signal in dbm
 * @ave_beacon_signal: average beacon signal
 * @last_cqm_event: rssi of the last cqm event
+* @bt_coex_min_thold: minimum threshold for BT coex
+* @bt_coex_max_thold: maximum threshold for BT coex
+* @last_bt_coex_event: rssi of the last BT coex event
 */
 struct iwl_mvm_vif_bf_data {
 	bool bf_enabled;
 	bool ba_enabled;
 	s8 ave_beacon_signal;
 	s8 last_cqm_event;
+	s8 bt_coex_min_thold;
+	s8 bt_coex_max_thold;
+	s8 last_bt_coex_event;
 };
 
 /**
@@ -255,8 +261,8 @@
  * @color: to solve races upon MAC addition and removal
  * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
  * @uploaded: indicates the MAC context has been added to the device
- * @ap_active: indicates that ap context is configured, and that the interface
- *  should get quota etc.
+ * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
+ *	should get quota etc.
  * @monitor_active: indicates that monitor context is configured, and that the
  * interface should get quota etc.
  * @queue_params: QoS params for this MAC
@@ -272,7 +278,7 @@
 	u8 ap_sta_id;
 
 	bool uploaded;
-	bool ap_active;
+	bool ap_ibss_active;
 	bool monitor_active;
 	struct iwl_mvm_vif_bf_data bf_data;
 
@@ -306,6 +312,9 @@
 
 	int tx_key_idx;
 
+	bool seqno_valid;
+	u16 seqno;
+
 #if IS_ENABLED(CONFIG_IPV6)
 	/* IPv6 addresses for WoWLAN */
 	struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
@@ -333,6 +342,7 @@
 enum iwl_scan_status {
 	IWL_MVM_SCAN_NONE,
 	IWL_MVM_SCAN_OS,
+	IWL_MVM_SCAN_SCHED,
 };
 
 /**
@@ -434,7 +444,7 @@
 
 	enum iwl_ucode_type cur_ucode;
 	bool ucode_loaded;
-	bool init_ucode_run;
+	bool init_ucode_complete;
 	u32 error_event_table;
 	u32 log_event_table;
 
@@ -470,6 +480,9 @@
 	enum iwl_scan_status scan_status;
 	struct iwl_scan_cmd *scan_cmd;
 
+	/* rx chain antennas set through debugfs for the scan command */
+	u8 scan_rx_ant;
+
 	/* Internal station */
 	struct iwl_mvm_int_sta aux_sta;
 
@@ -479,7 +492,8 @@
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	struct dentry *debugfs_dir;
 	u32 dbgfs_sram_offset, dbgfs_sram_len;
-	bool prevent_power_down_d3;
+	bool disable_power_off;
+	bool disable_power_off_d3;
 #endif
 
 	struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -523,12 +537,23 @@
 	/* BT-Coex */
 	u8 bt_kill_msk;
 	struct iwl_bt_coex_profile_notif last_bt_notif;
+	struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
 
 	/* Thermal Throttling and CTkill */
 	struct iwl_mvm_tt_mgmt thermal_throttle;
 	s32 temperature;	/* Celsius */
 
 	const struct iwl_mvm_power_ops *pm_ops;
+
+#ifdef CONFIG_NL80211_TESTMODE
+	u32 noa_duration;
+	struct ieee80211_vif *noa_vif;
+#endif
+
+	/* Tx queues */
+	u8 aux_queue;
+	u8 first_agg_queue;
+	u8 last_agg_queue;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -570,6 +595,9 @@
 /* Utils */
 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
 					enum ieee80211_band band);
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+			       enum ieee80211_band band,
+			       struct ieee80211_tx_rate *r);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
 void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
@@ -608,6 +636,7 @@
 
 /* NVM */
 int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
 
 int iwl_mvm_up(struct iwl_mvm *mvm);
 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
@@ -682,6 +711,23 @@
 			     struct iwl_device_cmd *cmd);
 void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
 
+/* Scheduled scan */
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+					   struct iwl_rx_cmd_buffer *rxb,
+					   struct iwl_device_cmd *cmd);
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif,
+			      struct cfg80211_sched_scan_request *req,
+			      struct ieee80211_sched_scan_ies *ies);
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+				       struct cfg80211_sched_scan_request *req);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+			     struct cfg80211_sched_scan_request *req);
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+				  struct iwl_rx_cmd_buffer *rxb,
+				  struct iwl_device_cmd *cmd);
+
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -720,6 +766,13 @@
 	return mvm->pm_ops->power_disable(mvm, vif);
 }
 
+static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
+{
+	if (mvm->pm_ops->power_update_device_mode)
+		return mvm->pm_ops->power_update_device_mode(mvm);
+	return 0;
+}
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
 					    struct ieee80211_vif *vif,
@@ -745,6 +798,15 @@
 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
 				     struct ieee80211_vif *vif, int idx);
 extern const struct file_operations iwl_dbgfs_d3_test_ops;
+#ifdef CONFIG_PM_SLEEP
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif);
+#else
+static inline void
+iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif
 
 /* BT Coex */
 int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -754,7 +816,20 @@
 			     struct iwl_device_cmd *cmd);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			   enum ieee80211_rssi_event rssi_event);
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+				   struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+				     struct ieee80211_sta *sta);
+
+enum iwl_bt_kill_msk {
+	BT_KILL_MSK_DEFAULT,
+	BT_KILL_MSK_SCO_HID_A2DP,
+	BT_KILL_MSK_REDUCED_TXPOW,
+	BT_KILL_MSK_MAX,
+};
+extern const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX];
+extern const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX];
 
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index edb94ea..2beffd0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -77,7 +77,7 @@
 
 /* Default NVM size to read */
 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
-#define IWL_MAX_NVM_SECTION_SIZE 6000
+#define IWL_MAX_NVM_SECTION_SIZE 7000
 
 #define NVM_WRITE_OPCODE 1
 #define NVM_READ_OPCODE 0
@@ -259,6 +259,8 @@
 #define MAX_NVM_FILE_LEN	16384
 
 /*
+ * Reads external NVM from a file into mvm->nvm_sections
+ *
  * HOW TO CREATE THE NVM FILE FORMAT:
  * ------------------------------
  * 1. create hex file, format:
@@ -277,20 +279,23 @@
  *
  * 4. save as "iNVM_xxx.bin" under /lib/firmware
  */
-static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
+static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
 {
-	int ret, section_id, section_size;
+	int ret, section_size;
+	u16 section_id;
 	const struct firmware *fw_entry;
 	const struct {
 		__le16 word1;
 		__le16 word2;
 		u8 data[];
 	} *file_sec;
-	const u8 *eof;
+	const u8 *eof, *temp;
 
 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
 #define NVM_WORD2_ID(x) (x >> 12)
 
+	IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
+
 	/*
 	 * Obtain NVM image via request_firmware. Since we already used
 	 * request_firmware_nowait() for the firmware binary load and only
@@ -362,12 +367,18 @@
 			break;
 		}
 
-		ret = iwl_nvm_write_section(mvm, section_id, file_sec->data,
-					    section_size);
-		if (ret < 0) {
-			IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+		temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
+		if (!temp) {
+			ret = -ENOMEM;
 			break;
 		}
+		if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
+			IWL_ERR(mvm, "Invalid NVM section ID\n");
+			ret = -EINVAL;
+			break;
+		}
+		mvm->nvm_sections[section_id].data = temp;
+		mvm->nvm_sections[section_id].length = section_size;
 
 		/* advance to the next section */
 		file_sec = (void *)(file_sec->data + section_size);
@@ -377,6 +388,28 @@
 	return ret;
 }
 
+/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
+{
+	int i, ret;
+	u16 section_id;
+	struct iwl_nvm_section *sections = mvm->nvm_sections;
+
+	IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+
+	for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+		section_id = nvm_to_read[i];
+		ret = iwl_nvm_write_section(mvm, section_id,
+					    sections[section_id].data,
+					    sections[section_id].length);
+		if (ret < 0) {
+			IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+			break;
+		}
+	}
+	return ret;
+}
+
 int iwl_nvm_init(struct iwl_mvm *mvm)
 {
 	int ret, i, section;
@@ -385,36 +418,36 @@
 	/* load external NVM if configured */
 	if (iwlwifi_mod_params.nvm_file) {
 		/* move to External NVM flow */
-		ret = iwl_mvm_load_external_nvm(mvm);
+		ret = iwl_mvm_read_external_nvm(mvm);
 		if (ret)
 			return ret;
-	}
+	} else {
+		/* Read From FW NVM */
+		IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
 
-	/* Read From FW NVM */
-	IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
-
-	/* TODO: find correct NVM max size for a section */
-	nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
-			     GFP_KERNEL);
-	if (!nvm_buffer)
-		return -ENOMEM;
-	for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
-		section = nvm_to_read[i];
-		/* we override the constness for initial read */
-		ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
-		if (ret < 0)
-			break;
-		temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
-		if (!temp) {
-			ret = -ENOMEM;
-			break;
+		/* TODO: find correct NVM max size for a section */
+		nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+				     GFP_KERNEL);
+		if (!nvm_buffer)
+			return -ENOMEM;
+		for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+			section = nvm_to_read[i];
+			/* we override the constness for initial read */
+			ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+			if (ret < 0)
+				break;
+			temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+			if (!temp) {
+				ret = -ENOMEM;
+				break;
+			}
+			mvm->nvm_sections[section].data = temp;
+			mvm->nvm_sections[section].length = ret;
 		}
-		mvm->nvm_sections[section].data = temp;
-		mvm->nvm_sections[section].length = ret;
+		kfree(nvm_buffer);
+		if (ret < 0)
+			return ret;
 	}
-	kfree(nvm_buffer);
-	if (ret < 0)
-		return ret;
 
 	mvm->nvm_data = iwl_parse_nvm_sections(mvm);
 	if (!mvm->nvm_data)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2fcc8ef..d86083c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -224,6 +224,10 @@
 
 	RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
 	RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+	RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
+		   iwl_mvm_rx_scan_offload_complete_notif, false),
+	RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
+		   false),
 
 	RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
 	RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -249,6 +253,7 @@
 	CMD(TIME_EVENT_NOTIFICATION),
 	CMD(BINDING_CONTEXT_CMD),
 	CMD(TIME_QUOTA_CMD),
+	CMD(NON_QOS_TX_COUNTER_CMD),
 	CMD(RADIO_VERSION_NOTIFICATION),
 	CMD(SCAN_REQUEST_CMD),
 	CMD(SCAN_ABORT_CMD),
@@ -260,10 +265,12 @@
 	CMD(CALIB_RES_NOTIF_PHY_DB),
 	CMD(SET_CALIB_DEFAULT_CMD),
 	CMD(CALIBRATION_COMPLETE_NOTIFICATION),
+	CMD(ADD_STA_KEY),
 	CMD(ADD_STA),
 	CMD(REMOVE_STA),
 	CMD(LQ_CMD),
 	CMD(SCAN_OFFLOAD_CONFIG_CMD),
+	CMD(MATCH_FOUND_NOTIFICATION),
 	CMD(SCAN_OFFLOAD_REQUEST_CMD),
 	CMD(SCAN_OFFLOAD_ABORT_CMD),
 	CMD(SCAN_OFFLOAD_COMPLETE),
@@ -303,6 +310,7 @@
 	CMD(REPLY_BEACON_FILTERING_CMD),
 	CMD(REPLY_THERMAL_MNG_BACKOFF),
 	CMD(MAC_PM_POWER_TABLE),
+	CMD(BT_COEX_CI),
 };
 #undef CMD
 
@@ -344,6 +352,14 @@
 
 	mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
 
+	mvm->aux_queue = 15;
+	mvm->first_agg_queue = 16;
+	mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+	if (mvm->cfg->base_params->num_of_queues == 16) {
+		mvm->aux_queue = 11;
+		mvm->first_agg_queue = 12;
+	}
+
 	mutex_init(&mvm->mutex);
 	spin_lock_init(&mvm->async_handlers_lock);
 	INIT_LIST_HEAD(&mvm->time_event_list);
@@ -401,24 +417,32 @@
 	IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
 		 mvm->cfg->name, mvm->trans->hw_rev);
 
-	err = iwl_trans_start_hw(mvm->trans);
-	if (err)
-		goto out_free;
-
 	iwl_mvm_tt_initialize(mvm);
 
-	mutex_lock(&mvm->mutex);
-	err = iwl_run_init_mvm_ucode(mvm, true);
-	mutex_unlock(&mvm->mutex);
-	/* returns 0 if successful, 1 if success but in rfkill */
-	if (err < 0 && !iwlmvm_mod_params.init_dbg) {
-		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
-		goto out_free;
-	}
+	/*
+	 * If the NVM exists in an external file,
+	 * there is no need to unnecessarily power up the NIC at driver load
+	 */
+	if (iwlwifi_mod_params.nvm_file) {
+			iwl_nvm_init(mvm);
+	} else {
+		err = iwl_trans_start_hw(mvm->trans);
+		if (err)
+			goto out_free;
 
-	/* Stop the hw after the ALIVE and NVM has been read */
-	if (!iwlmvm_mod_params.init_dbg)
-		iwl_trans_stop_hw(mvm->trans, false);
+		mutex_lock(&mvm->mutex);
+		err = iwl_run_init_mvm_ucode(mvm, true);
+		mutex_unlock(&mvm->mutex);
+		/* returns 0 if successful, 1 if success but in rfkill */
+		if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+			IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+			goto out_free;
+		}
+
+		/* Stop the hw after the ALIVE and NVM has been read */
+		if (!iwlmvm_mod_params.init_dbg)
+			iwl_trans_stop_hw(mvm->trans, false);
+	}
 
 	scan_size = sizeof(struct iwl_scan_cmd) +
 		mvm->fw->ucode_capa.max_probe_length +
@@ -435,7 +459,7 @@
 	if (err)
 		goto out_unregister;
 
-	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
 		mvm->pm_ops = &pm_mac_ops;
 	else
 		mvm->pm_ops = &pm_legacy_ops;
@@ -449,7 +473,8 @@
  out_free:
 	iwl_phy_db_free(mvm->phy_db);
 	kfree(mvm->scan_cmd);
-	iwl_trans_stop_hw(trans, true);
+	if (!iwlwifi_mod_params.nvm_file)
+		iwl_trans_stop_hw(trans, true);
 	ieee80211_free_hw(mvm->hw);
 	return NULL;
 }
@@ -715,6 +740,9 @@
 		case IWL_MVM_SCAN_OS:
 			ieee80211_scan_completed(mvm->hw, true);
 			break;
+		case IWL_MVM_SCAN_SCHED:
+			ieee80211_sched_scan_stopped(mvm->hw);
+			break;
 		}
 
 		if (mvm->restart_fw > 0)
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d58e393..550824a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -300,11 +300,6 @@
 	}
 
 	if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
-		cmd->rx_data_timeout_uapsd =
-			cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
-		cmd->tx_data_timeout_uapsd =
-			cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
-
 		if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
 					    BIT(IEEE80211_AC_VI) |
 					    BIT(IEEE80211_AC_BE) |
@@ -319,10 +314,31 @@
 		}
 
 		cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
-		cmd->heavy_tx_thld_packets =
-			IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
-		cmd->heavy_rx_thld_packets =
-			IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+
+		if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+		    cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+			cmd->rx_data_timeout_uapsd =
+				cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+			cmd->tx_data_timeout_uapsd =
+				cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+		} else {
+			cmd->rx_data_timeout_uapsd =
+				cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+			cmd->tx_data_timeout_uapsd =
+				cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+		}
+
+		if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+			cmd->heavy_tx_thld_packets =
+				IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+			cmd->heavy_rx_thld_packets =
+				IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+		} else {
+			cmd->heavy_tx_thld_packets =
+				IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+			cmd->heavy_rx_thld_packets =
+				IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+		}
 		cmd->heavy_tx_thld_percentage =
 			IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
 		cmd->heavy_rx_thld_percentage =
@@ -430,6 +446,32 @@
 				    sizeof(cmd), &cmd);
 }
 
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+	struct iwl_device_power_cmd cmd = {
+		.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+	};
+
+	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+		return 0;
+
+	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+		cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
+	    mvm->disable_power_off)
+		cmd.flags &=
+			cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+	IWL_DEBUG_POWER(mvm,
+			"Sending device power command with flags = 0x%X\n",
+			cmd.flags);
+
+	return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
+				    &cmd);
+}
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
 					struct ieee80211_vif *vif, char *buf,
@@ -440,10 +482,11 @@
 
 	iwl_mvm_power_build_cmd(mvm, vif, &cmd);
 
-	pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
-			 (cmd.flags &
-			 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
-			 0 : 1);
+	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+		pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+				 (cmd.flags &
+				 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+				 0 : 1);
 	pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
 			 iwlmvm_mod_params.power_scheme);
 	pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -609,6 +652,7 @@
 
 const struct iwl_mvm_power_ops pm_mac_ops = {
 	.power_update_mode = iwl_mvm_power_mac_update_mode,
+	.power_update_device_mode = iwl_mvm_power_update_device,
 	.power_disable = iwl_mvm_power_mac_disable,
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 5c6ae16..17e2bc8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -110,7 +110,8 @@
 			data->n_interfaces[id]++;
 		break;
 	case NL80211_IFTYPE_AP:
-		if (mvmvif->ap_active)
+	case NL80211_IFTYPE_ADHOC:
+		if (mvmvif->ap_ibss_active)
 			data->n_interfaces[id]++;
 		break;
 	case NL80211_IFTYPE_MONITOR:
@@ -119,16 +120,45 @@
 		break;
 	case NL80211_IFTYPE_P2P_DEVICE:
 		break;
-	case NL80211_IFTYPE_ADHOC:
-		if (vif->bss_conf.ibss_joined)
-			data->n_interfaces[id]++;
-		break;
 	default:
 		WARN_ON_ONCE(1);
 		break;
 	}
 }
 
+static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
+					 struct iwl_time_quota_cmd *cmd)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+	struct iwl_mvm_vif *mvmvif;
+	int i, phy_id = -1, beacon_int = 0;
+
+	if (!mvm->noa_duration || !mvm->noa_vif)
+		return;
+
+	mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
+	if (!mvmvif->ap_ibss_active)
+		return;
+
+	phy_id = mvmvif->phy_ctxt->id;
+	beacon_int = mvm->noa_vif->bss_conf.beacon_int;
+
+	for (i = 0; i < MAX_BINDINGS; i++) {
+		u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
+		u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+		u32 quota = le32_to_cpu(cmd->quotas[i].quota);
+
+		if (id != phy_id)
+			continue;
+
+		quota *= (beacon_int - mvm->noa_duration);
+		quota /= beacon_int;
+
+		cmd->quotas[i].quota = cpu_to_le32(quota);
+	}
+#endif
+}
+
 int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
 {
 	struct iwl_time_quota_cmd cmd = {};
@@ -196,6 +226,8 @@
 	/* Give the remainder of the session to the first binding */
 	le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
 
+	iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
+
 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
 				   sizeof(cmd), &cmd);
 	if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 4ffaa3f..a0b4cc8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -82,13 +82,24 @@
 	[ANT_ABC] = ANT_ABC,
 };
 
-#define IWL_DECLARE_RATE_INFO(r, s, rp, rn)		       \
-	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
-				    IWL_RATE_SISO_##s##M_PLCP, \
-				    IWL_RATE_MIMO2_##s##M_PLCP,\
-				    IWL_RATE_##rp##M_INDEX,    \
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn)			      \
+	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,	      \
+				    IWL_RATE_HT_SISO_MCS_##s##_PLCP,  \
+				    IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+				    IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+				    IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
+				    IWL_RATE_##rp##M_INDEX,	      \
 				    IWL_RATE_##rn##M_INDEX }
 
+#define IWL_DECLARE_MCS_RATE(s)						  \
+	[IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP,		  \
+				       IWL_RATE_HT_SISO_MCS_##s##_PLCP,	  \
+				       IWL_RATE_HT_MIMO2_MCS_##s##_PLCP,  \
+				       IWL_RATE_VHT_SISO_MCS_##s##_PLCP,  \
+				       IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
+				       IWL_RATE_INVM_INDEX,	          \
+				       IWL_RATE_INVM_INDEX }
+
 /*
  * Parameter order:
  *   rate, ht rate, prev rate, next rate
@@ -102,16 +113,17 @@
 	IWL_DECLARE_RATE_INFO(2, INV, 1, 5),     /*  2mbps */
 	IWL_DECLARE_RATE_INFO(5, INV, 2, 11),    /*5.5mbps */
 	IWL_DECLARE_RATE_INFO(11, INV, 9, 12),   /* 11mbps */
-	IWL_DECLARE_RATE_INFO(6, 6, 5, 11),      /*  6mbps */
-	IWL_DECLARE_RATE_INFO(9, 6, 6, 11),      /*  9mbps */
-	IWL_DECLARE_RATE_INFO(12, 12, 11, 18),   /* 12mbps */
-	IWL_DECLARE_RATE_INFO(18, 18, 12, 24),   /* 18mbps */
-	IWL_DECLARE_RATE_INFO(24, 24, 18, 36),   /* 24mbps */
-	IWL_DECLARE_RATE_INFO(36, 36, 24, 48),   /* 36mbps */
-	IWL_DECLARE_RATE_INFO(48, 48, 36, 54),   /* 48mbps */
-	IWL_DECLARE_RATE_INFO(54, 54, 48, INV),  /* 54mbps */
-	IWL_DECLARE_RATE_INFO(60, 60, 48, INV),  /* 60mbps */
-	/* FIXME:RS:          ^^    should be INV (legacy) */
+	IWL_DECLARE_RATE_INFO(6, 0, 5, 11),      /*  6mbps ; MCS 0 */
+	IWL_DECLARE_RATE_INFO(9, INV, 6, 11),    /*  9mbps */
+	IWL_DECLARE_RATE_INFO(12, 1, 11, 18),    /* 12mbps ; MCS 1 */
+	IWL_DECLARE_RATE_INFO(18, 2, 12, 24),    /* 18mbps ; MCS 2 */
+	IWL_DECLARE_RATE_INFO(24, 3, 18, 36),    /* 24mbps ; MCS 3 */
+	IWL_DECLARE_RATE_INFO(36, 4, 24, 48),    /* 36mbps ; MCS 4 */
+	IWL_DECLARE_RATE_INFO(48, 5, 36, 54),    /* 48mbps ; MCS 5 */
+	IWL_DECLARE_RATE_INFO(54, 6, 48, INV),   /* 54mbps ; MCS 6 */
+	IWL_DECLARE_MCS_RATE(7),                 /* MCS 7 */
+	IWL_DECLARE_MCS_RATE(8),                 /* MCS 8 */
+	IWL_DECLARE_MCS_RATE(9),                 /* MCS 9 */
 };
 
 static inline u8 rs_extract_rate(u32 rate_n_flags)
@@ -124,26 +136,30 @@
 {
 	int idx = 0;
 
-	/* HT rate format */
 	if (rate_n_flags & RATE_MCS_HT_MSK) {
-		idx = rs_extract_rate(rate_n_flags);
+		idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
+		idx += IWL_RATE_MCS_0_INDEX;
 
-		WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
-		if (idx >= IWL_RATE_MIMO2_6M_PLCP)
-			idx = idx - IWL_RATE_MIMO2_6M_PLCP;
-
-		idx += IWL_FIRST_OFDM_RATE;
-		/* skip 9M not supported in ht*/
+		/* skip 9M not supported in HT*/
 		if (idx >= IWL_RATE_9M_INDEX)
 			idx += 1;
-		if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+		if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
 			return idx;
+	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+		idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+		idx += IWL_RATE_MCS_0_INDEX;
 
-	/* legacy rate format, search for match in table */
+		/* skip 9M not supported in VHT*/
+		if (idx >= IWL_RATE_9M_INDEX)
+			idx++;
+		if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
+			return idx;
 	} else {
+		/* legacy rate format, search for match in table */
+
+		u8 legacy_rate = rs_extract_rate(rate_n_flags);
 		for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
-			if (iwl_rates[idx].plcp ==
-					rs_extract_rate(rate_n_flags))
+			if (iwl_rates[idx].plcp == legacy_rate)
 				return idx;
 	}
 
@@ -155,6 +171,7 @@
 				   struct ieee80211_sta *sta,
 				   struct iwl_lq_sta *lq_sta);
 static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+			     struct ieee80211_sta *sta,
 			     struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
 static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
 
@@ -180,35 +197,52 @@
  */
 
 static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
-	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
 };
 
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
-	{0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202}, /* Norm */
-	{0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210}, /* SGI */
-	{0, 0, 0, 0, 47, 0,  91, 133, 171, 242, 305, 334, 362}, /* AGG */
-	{0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+/* Expected TpT tables. 4 indexes:
+ * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
+ */
+static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202, 216, 0},
+	{0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210, 225, 0},
+	{0, 0, 0, 0, 49, 0,  97, 145, 192, 285, 375, 420, 464, 551, 0},
+	{0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
 };
 
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
-	{0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
-	{0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
-	{0, 0, 0, 0,  94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
-	{0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250,  257,  269,  275},
+	{0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257,  264,  275,  280},
+	{0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828,  911, 1070, 1173},
+	{0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
+};
+
+static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 130, 0, 191, 223, 244,  273,  288,  294,  298,  305,  308},
+	{0, 0, 0, 0, 138, 0, 200, 231, 251,  279,  293,  298,  302,  308,  312},
+	{0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
+	{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
 };
 
 static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
-	{0, 0, 0, 0,  74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
-	{0, 0, 0, 0,  81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
-	{0, 0, 0, 0,  89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
-	{0, 0, 0, 0,  97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+	{0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250,  261, 0},
+	{0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256,  267, 0},
+	{0, 0, 0, 0,  98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
+	{0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
 };
 
 static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
-	{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
-	{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
-	{0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
-	{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+	{0, 0, 0, 0, 123, 0, 182, 214, 235,  264,  279,  285,  289,  296,  300},
+	{0, 0, 0, 0, 131, 0, 191, 222, 242,  270,  284,  289,  293,  300,  303},
+	{0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
+	{0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
+};
+
+static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 182, 0, 240,  264,  278,  299,  308,  311,  313,  317,  319},
+	{0, 0, 0, 0, 190, 0, 247,  269,  282,  302,  310,  313,  315,  319,  320},
+	{0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
+	{0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
 };
 
 /* mbps, mcs */
@@ -263,7 +297,7 @@
 		       lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
 
 	if (lq_sta->dbg_fixed_rate) {
-		rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+		rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
 		iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
 	}
 }
@@ -275,17 +309,6 @@
 {
 	int ret = -EAGAIN;
 
-	/*
-	 * Don't create TX aggregation sessions when in high
-	 * BT traffic, as they would just be disrupted by BT.
-	 */
-	if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
-		IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
-			       BT_MBOX_MSG(&mvm->last_bt_notif,
-					   3, TRAFFIC_LOAD));
-		return ret;
-	}
-
 	IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
 		     sta->addr, tid);
 	ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -416,49 +439,54 @@
  */
 /* FIXME:RS:remove this function and put the flags statically in the table */
 static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
-				 struct iwl_scale_tbl_info *tbl,
-				 int index, u8 use_green)
+				 struct iwl_scale_tbl_info *tbl, int index)
 {
 	u32 rate_n_flags = 0;
 
+	rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+			 RATE_MCS_ANT_ABC_MSK);
+
 	if (is_legacy(tbl->lq_type)) {
-		rate_n_flags = iwl_rates[index].plcp;
+		rate_n_flags |= iwl_rates[index].plcp;
 		if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
 			rate_n_flags |= RATE_MCS_CCK_MSK;
-	} else if (is_Ht(tbl->lq_type)) {
-		if (index > IWL_LAST_OFDM_RATE) {
-			IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
-			index = IWL_LAST_OFDM_RATE;
-		}
-		rate_n_flags = RATE_MCS_HT_MSK;
+		return rate_n_flags;
+	}
 
-		if (is_siso(tbl->lq_type))
-			rate_n_flags |=	iwl_rates[index].plcp_siso;
-		else if (is_mimo2(tbl->lq_type))
-			rate_n_flags |=	iwl_rates[index].plcp_mimo2;
+	if (is_ht(tbl->lq_type)) {
+		if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
+			IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
+			index = IWL_LAST_HT_RATE;
+		}
+		rate_n_flags |= RATE_MCS_HT_MSK;
+
+		if (is_ht_siso(tbl->lq_type))
+			rate_n_flags |=	iwl_rates[index].plcp_ht_siso;
+		else if (is_ht_mimo2(tbl->lq_type))
+			rate_n_flags |=	iwl_rates[index].plcp_ht_mimo2;
 		else
 			WARN_ON_ONCE(1);
+	} else if (is_vht(tbl->lq_type)) {
+		if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
+			IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
+			index = IWL_LAST_VHT_RATE;
+		}
+		rate_n_flags |= RATE_MCS_VHT_MSK;
+		if (is_vht_siso(tbl->lq_type))
+			rate_n_flags |=	iwl_rates[index].plcp_vht_siso;
+		else if (is_vht_mimo2(tbl->lq_type))
+			rate_n_flags |=	iwl_rates[index].plcp_vht_mimo2;
+		else
+			WARN_ON_ONCE(1);
+
 	} else {
 		IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
 	}
 
-	rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
-						     RATE_MCS_ANT_ABC_MSK);
+	rate_n_flags |= tbl->bw;
+	if (tbl->is_SGI)
+		rate_n_flags |= RATE_MCS_SGI_MSK;
 
-	if (is_Ht(tbl->lq_type)) {
-		if (tbl->is_ht40)
-			rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
-		if (tbl->is_SGI)
-			rate_n_flags |= RATE_MCS_SGI_MSK;
-
-		if (use_green) {
-			rate_n_flags |= RATE_HT_MCS_GF_MSK;
-			if (is_siso(tbl->lq_type) && tbl->is_SGI) {
-				rate_n_flags &= ~RATE_MCS_SGI_MSK;
-				IWL_ERR(mvm, "GF was set with SGI:SISO\n");
-			}
-		}
-	}
 	return rate_n_flags;
 }
 
@@ -473,7 +501,7 @@
 {
 	u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
 	u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
-	u8 mcs;
+	u8 nss;
 
 	memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
 	*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
@@ -483,41 +511,62 @@
 		return -EINVAL;
 	}
 	tbl->is_SGI = 0;	/* default legacy setup */
-	tbl->is_ht40 = 0;
+	tbl->bw = 0;
 	tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
 	tbl->lq_type = LQ_NONE;
 	tbl->max_search = IWL_MAX_SEARCH;
 
-	/* legacy rate format */
-	if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+	/* Legacy */
+	if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
+	    !(rate_n_flags & RATE_MCS_VHT_MSK)) {
 		if (num_of_ant == 1) {
 			if (band == IEEE80211_BAND_5GHZ)
-				tbl->lq_type = LQ_A;
+				tbl->lq_type = LQ_LEGACY_A;
 			else
-				tbl->lq_type = LQ_G;
+				tbl->lq_type = LQ_LEGACY_G;
 		}
-	/* HT rate format */
-	} else {
-		if (rate_n_flags & RATE_MCS_SGI_MSK)
-			tbl->is_SGI = 1;
 
-		if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
-			tbl->is_ht40 = 1;
+		return 0;
+	}
 
-		mcs = rs_extract_rate(rate_n_flags);
+	/* HT or VHT */
+	if (rate_n_flags & RATE_MCS_SGI_MSK)
+		tbl->is_SGI = 1;
 
-		/* SISO */
-		if (mcs <= IWL_RATE_SISO_60M_PLCP) {
-			if (num_of_ant == 1)
-				tbl->lq_type = LQ_SISO; /*else NONE*/
-		/* MIMO2 */
-		} else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
-			if (num_of_ant == 2)
-				tbl->lq_type = LQ_MIMO2;
+	tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+		       RATE_HT_MCS_NSS_POS) + 1;
+
+		if (nss == 1) {
+			tbl->lq_type = LQ_HT_SISO;
+			WARN_ON_ONCE(num_of_ant != 1);
+		} else if (nss == 2) {
+			tbl->lq_type = LQ_HT_MIMO2;
+			WARN_ON_ONCE(num_of_ant != 2);
 		} else {
-			WARN_ON_ONCE(num_of_ant == 3);
+			WARN_ON_ONCE(1);
+		}
+	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+		nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+		       RATE_VHT_MCS_NSS_POS) + 1;
+
+		if (nss == 1) {
+			tbl->lq_type = LQ_VHT_SISO;
+			WARN_ON_ONCE(num_of_ant != 1);
+		} else if (nss == 2) {
+			tbl->lq_type = LQ_VHT_MIMO2;
+			WARN_ON_ONCE(num_of_ant != 2);
+		} else {
+			WARN_ON_ONCE(1);
 		}
 	}
+
+	WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
+	WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
+		     !is_vht(tbl->lq_type));
+
 	return 0;
 }
 
@@ -550,22 +599,6 @@
 }
 
 /**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool rs_use_green(struct ieee80211_sta *sta)
-{
-	/*
-	 * There's a bug somewhere in this code that causes the
-	 * scaling to get stuck because GF+SGI can't be combined
-	 * in SISO rates. Until we find that bug, disable GF, it
-	 * has only limited benefit and we still interoperate with
-	 * GF APs since we can always receive GF transmissions.
-	 */
-	return false;
-}
-
-/**
  * rs_get_supported_rates - get the available rates
  *
  * if management frame or broadcast frame only return
@@ -576,16 +609,15 @@
 				  struct ieee80211_hdr *hdr,
 				  enum iwl_table_type rate_type)
 {
-	if (is_legacy(rate_type)) {
+	if (is_legacy(rate_type))
 		return lq_sta->active_legacy_rate;
-	} else {
-		if (is_siso(rate_type))
-			return lq_sta->active_siso_rate;
-		else {
-			WARN_ON_ONCE(!is_mimo2(rate_type));
-			return lq_sta->active_mimo2_rate;
-		}
-	}
+	else if (is_siso(rate_type))
+		return lq_sta->active_siso_rate;
+	else if (is_mimo2(rate_type))
+		return lq_sta->active_mimo2_rate;
+
+	WARN_ON_ONCE(1);
+	return 0;
 }
 
 static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
@@ -652,7 +684,6 @@
 	u16 rate_mask;
 	u16 high_low;
 	u8 switch_to_legacy = 0;
-	u8 is_green = lq_sta->is_green;
 	struct iwl_mvm *mvm = lq_sta->drv;
 
 	/* check if we need to switch from HT to legacy rates.
@@ -662,15 +693,15 @@
 		switch_to_legacy = 1;
 		scale_index = rs_ht_to_legacy[scale_index];
 		if (lq_sta->band == IEEE80211_BAND_5GHZ)
-			tbl->lq_type = LQ_A;
+			tbl->lq_type = LQ_LEGACY_A;
 		else
-			tbl->lq_type = LQ_G;
+			tbl->lq_type = LQ_LEGACY_G;
 
 		if (num_of_ant(tbl->ant_type) > 1)
 			tbl->ant_type =
 			    first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
 
-		tbl->is_ht40 = 0;
+		tbl->bw = 0;
 		tbl->is_SGI = 0;
 		tbl->max_search = IWL_MAX_SEARCH;
 	}
@@ -701,7 +732,7 @@
 		low = scale_index;
 
 out:
-	return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+	return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
 }
 
 /*
@@ -714,6 +745,18 @@
 		(a->is_SGI == b->is_SGI);
 }
 
+static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
+{
+	if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+		return RATE_MCS_CHAN_WIDTH_40;
+	else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+		return RATE_MCS_CHAN_WIDTH_80;
+	else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+		return RATE_MCS_CHAN_WIDTH_160;
+
+	return RATE_MCS_CHAN_WIDTH_20;
+}
+
 /*
  * mac80211 sends us Tx status
  */
@@ -783,16 +826,23 @@
 		 */
 		if (info->band == IEEE80211_BAND_2GHZ)
 			mac_index += IWL_FIRST_OFDM_RATE;
+	} else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
+		mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
+		if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+			mac_index++;
 	}
+
 	/* Here we actually compare this rate to the latest LQ command */
 	if ((mac_index < 0) ||
 	    (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
-	    (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+	    (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
 	    (tbl_type.ant_type != info->status.antenna) ||
 	    (!!(tx_rate & RATE_MCS_HT_MSK) !=
-				!!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+	     !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+	    (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+	     !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
 	    (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
-				!!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+	     !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
 	    (rs_index != mac_index)) {
 		IWL_DEBUG_RATE(mvm,
 			       "initial rate %d does not match %d (0x%x)\n",
@@ -947,7 +997,8 @@
 	s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
 
 	/* Check for invalid LQ type */
-	if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+	if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
+			 !(is_vht(tbl->lq_type)))) {
 		tbl->expected_tpt = expected_tpt_legacy;
 		return;
 	}
@@ -958,18 +1009,40 @@
 		return;
 	}
 
+	ht_tbl_pointer = expected_tpt_mimo2_20MHz;
 	/* Choose among many HT tables depending on number of streams
-	 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+	 * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
 	 * status */
-	if (is_siso(tbl->lq_type) && !tbl->is_ht40)
-		ht_tbl_pointer = expected_tpt_siso20MHz;
-	else if (is_siso(tbl->lq_type))
-		ht_tbl_pointer = expected_tpt_siso40MHz;
-	else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
-		ht_tbl_pointer = expected_tpt_mimo2_20MHz;
-	else {
-		WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
-		ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+	if (is_siso(tbl->lq_type)) {
+		switch (tbl->bw) {
+		case RATE_MCS_CHAN_WIDTH_20:
+			ht_tbl_pointer = expected_tpt_siso_20MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_40:
+			ht_tbl_pointer = expected_tpt_siso_40MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_80:
+			ht_tbl_pointer = expected_tpt_siso_80MHz;
+			break;
+		default:
+			WARN_ON_ONCE(1);
+		}
+	} else if (is_mimo2(tbl->lq_type)) {
+		switch (tbl->bw) {
+		case RATE_MCS_CHAN_WIDTH_20:
+			ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_40:
+			ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_80:
+			ht_tbl_pointer = expected_tpt_mimo2_80MHz;
+			break;
+		default:
+			WARN_ON_ONCE(1);
+		}
+	} else {
+		WARN_ON_ONCE(1);
 	}
 
 	if (!tbl->is_SGI && !lq_sta->is_agg)		/* Normal */
@@ -1084,9 +1157,47 @@
 	return new_rate;
 }
 
-static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta)
+/* Move to the next action and wrap around to the first action in case
+ * we're at the last action. Assumes actions start at 0.
+ */
+static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
+				       u8 last_action)
 {
-	return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+	BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
+	BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
+	BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
+
+	tbl->action = (tbl->action + 1) % (last_action + 1);
+}
+
+static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
+			       struct ieee80211_sta *sta)
+{
+	if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
+		tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+	else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+		tbl->bw = RATE_MCS_CHAN_WIDTH_40;
+	else
+		tbl->bw = RATE_MCS_CHAN_WIDTH_20;
+}
+
+static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
+			   struct ieee80211_sta *sta)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+	if (is_ht20(tbl) && (ht_cap->cap &
+			     IEEE80211_HT_CAP_SGI_20))
+		return true;
+	if (is_ht40(tbl) && (ht_cap->cap &
+			     IEEE80211_HT_CAP_SGI_40))
+		return true;
+	if (is_ht80(tbl) && (vht_cap->cap &
+			     IEEE80211_VHT_CAP_SHORT_GI_80))
+		return true;
+
+	return false;
 }
 
 /*
@@ -1099,7 +1210,6 @@
 {
 	u16 rate_mask;
 	s32 rate;
-	s8 is_green = lq_sta->is_green;
 
 	if (!sta->ht_cap.ht_supported)
 		return -1;
@@ -1113,16 +1223,12 @@
 
 	IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
 
-	tbl->lq_type = LQ_MIMO2;
+	tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
 	tbl->action = 0;
 	tbl->max_search = IWL_MAX_SEARCH;
 	rate_mask = lq_sta->active_mimo2_rate;
 
-	if (iwl_is_ht40_tx_allowed(sta))
-		tbl->is_ht40 = 1;
-	else
-		tbl->is_ht40 = 0;
-
+	rs_set_bw_from_sta(tbl, sta);
 	rs_set_expected_tpt_table(lq_sta, tbl);
 
 	rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1134,10 +1240,10 @@
 			       rate, rate_mask);
 		return -1;
 	}
-	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
 
-	IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
-		       tbl->current_rate, is_green);
+	IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+		       tbl->current_rate);
 	return 0;
 }
 
@@ -1150,7 +1256,6 @@
 			     struct iwl_scale_tbl_info *tbl, int index)
 {
 	u16 rate_mask;
-	u8 is_green = lq_sta->is_green;
 	s32 rate;
 
 	if (!sta->ht_cap.ht_supported)
@@ -1158,19 +1263,12 @@
 
 	IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
 
-	tbl->lq_type = LQ_SISO;
+	tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
 	tbl->action = 0;
 	tbl->max_search = IWL_MAX_SEARCH;
 	rate_mask = lq_sta->active_siso_rate;
 
-	if (iwl_is_ht40_tx_allowed(sta))
-		tbl->is_ht40 = 1;
-	else
-		tbl->is_ht40 = 0;
-
-	if (is_green)
-		tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
+	rs_set_bw_from_sta(tbl, sta);
 	rs_set_expected_tpt_table(lq_sta, tbl);
 	rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
 
@@ -1181,9 +1279,9 @@
 			       rate, rate_mask);
 		return -1;
 	}
-	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
-	IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
-		       tbl->current_rate, is_green);
+	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
+	IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+		       tbl->current_rate);
 	return 0;
 }
 
@@ -1211,14 +1309,10 @@
 	while (1) {
 		lq_sta->action_counter++;
 		switch (tbl->action) {
-		case IWL_LEGACY_SWITCH_ANTENNA1:
-		case IWL_LEGACY_SWITCH_ANTENNA2:
+		case IWL_LEGACY_SWITCH_ANTENNA:
 			IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
 
-			if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
-			     tx_chains_num <= 1) ||
-			    (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
-			     tx_chains_num <= 2))
+			if (tx_chains_num <= 1)
 				break;
 
 			/* Don't change antenna if success has been great */
@@ -1273,9 +1367,7 @@
 		default:
 			WARN_ON_ONCE(1);
 		}
-		tbl->action++;
-		if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
-			tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+		rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
 
 		if (tbl->action == start_action)
 			break;
@@ -1285,9 +1377,7 @@
 
 out:
 	lq_sta->search_better_tbl = 1;
-	tbl->action++;
-	if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
-		tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+	rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
 	return 0;
@@ -1300,12 +1390,10 @@
 				 struct iwl_lq_sta *lq_sta,
 				 struct ieee80211_sta *sta, int index)
 {
-	u8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 	struct iwl_scale_tbl_info *search_tbl =
 				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
 	struct iwl_rate_scale_data *window = &(tbl->win[index]);
-	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action;
@@ -1314,40 +1402,17 @@
 	u8 update_search_tbl_counter = 0;
 	int ret;
 
-	switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
-	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
-		/* nothing */
-		break;
-	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
-		/* avoid antenna B unless MIMO */
-		if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
-			tbl->action = IWL_SISO_SWITCH_MIMO2;
-		break;
-	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
-	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
-		/* avoid antenna B and MIMO */
-		valid_tx_ant =
-			first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
-		if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
-			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-		break;
-	default:
-		IWL_ERR(mvm, "Invalid BT load %d",
-			BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
-		break;
-	}
+	if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
+	    !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+		tbl->action = IWL_SISO_SWITCH_ANTENNA;
 
 	start_action = tbl->action;
 	while (1) {
 		lq_sta->action_counter++;
 		switch (tbl->action) {
-		case IWL_SISO_SWITCH_ANTENNA1:
-		case IWL_SISO_SWITCH_ANTENNA2:
+		case IWL_SISO_SWITCH_ANTENNA:
 			IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
-			if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
-			     tx_chains_num <= 1) ||
-			    (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
-			     tx_chains_num <= 2))
+			if (tx_chains_num <= 1)
 				break;
 
 			if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
@@ -1380,23 +1445,12 @@
 				goto out;
 			break;
 		case IWL_SISO_SWITCH_GI:
-			if (!tbl->is_ht40 && !(ht_cap->cap &
-						IEEE80211_HT_CAP_SGI_20))
-				break;
-			if (tbl->is_ht40 && !(ht_cap->cap &
-						IEEE80211_HT_CAP_SGI_40))
+			if (!rs_sgi_allowed(tbl, sta))
 				break;
 
 			IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
 
 			memcpy(search_tbl, tbl, sz);
-			if (is_green) {
-				if (!tbl->is_SGI)
-					break;
-				else
-					IWL_ERR(mvm,
-						"SGI was set in GF+SISO\n");
-			}
 			search_tbl->is_SGI = !tbl->is_SGI;
 			rs_set_expected_tpt_table(lq_sta, search_tbl);
 			if (tbl->is_SGI) {
@@ -1405,16 +1459,13 @@
 					break;
 			}
 			search_tbl->current_rate =
-				rate_n_flags_from_tbl(mvm, search_tbl,
-						      index, is_green);
+				rate_n_flags_from_tbl(mvm, search_tbl, index);
 			update_search_tbl_counter = 1;
 			goto out;
 		default:
 			WARN_ON_ONCE(1);
 		}
-		tbl->action++;
-		if (tbl->action > IWL_SISO_SWITCH_GI)
-			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+		rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
 
 		if (tbl->action == start_action)
 			break;
@@ -1424,9 +1475,7 @@
 
  out:
 	lq_sta->search_better_tbl = 1;
-	tbl->action++;
-	if (tbl->action > IWL_SISO_SWITCH_GI)
-		tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+	rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
 
@@ -1440,63 +1489,20 @@
 				 struct iwl_lq_sta *lq_sta,
 				 struct ieee80211_sta *sta, int index)
 {
-	s8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 	struct iwl_scale_tbl_info *search_tbl =
 				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-	struct iwl_rate_scale_data *window = &(tbl->win[index]);
-	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action;
 	u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-	u8 tx_chains_num = num_of_ant(valid_tx_ant);
 	u8 update_search_tbl_counter = 0;
 	int ret;
 
-	switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
-	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
-		/* nothing */
-		break;
-	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
-	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
-		/* avoid antenna B and MIMO */
-		if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
-			tbl->action = IWL_MIMO2_SWITCH_SISO_A;
-		break;
-	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
-		/* avoid antenna B unless MIMO */
-		if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
-			tbl->action = IWL_MIMO2_SWITCH_SISO_A;
-		break;
-	default:
-		IWL_ERR(mvm, "Invalid BT load %d",
-			BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
-		break;
-	}
-
 	start_action = tbl->action;
 	while (1) {
 		lq_sta->action_counter++;
 		switch (tbl->action) {
-		case IWL_MIMO2_SWITCH_ANTENNA1:
-		case IWL_MIMO2_SWITCH_ANTENNA2:
-			IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
-
-			if (tx_chains_num <= 2)
-				break;
-
-			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-				break;
-
-			memcpy(search_tbl, tbl, sz);
-			if (rs_toggle_antenna(valid_tx_ant,
-					      &search_tbl->current_rate,
-					      search_tbl)) {
-				update_search_tbl_counter = 1;
-				goto out;
-			}
-			break;
 		case IWL_MIMO2_SWITCH_SISO_A:
 		case IWL_MIMO2_SWITCH_SISO_B:
 			IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
@@ -1521,11 +1527,7 @@
 			break;
 
 		case IWL_MIMO2_SWITCH_GI:
-			if (!tbl->is_ht40 && !(ht_cap->cap &
-						IEEE80211_HT_CAP_SGI_20))
-				break;
-			if (tbl->is_ht40 && !(ht_cap->cap &
-						IEEE80211_HT_CAP_SGI_40))
+			if (!rs_sgi_allowed(tbl, sta))
 				break;
 
 			IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
@@ -1546,16 +1548,13 @@
 					break;
 			}
 			search_tbl->current_rate =
-				rate_n_flags_from_tbl(mvm, search_tbl,
-						      index, is_green);
+				rate_n_flags_from_tbl(mvm, search_tbl, index);
 			update_search_tbl_counter = 1;
 			goto out;
 		default:
 			WARN_ON_ONCE(1);
 		}
-		tbl->action++;
-		if (tbl->action > IWL_MIMO2_SWITCH_GI)
-			tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+		rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
 
 		if (tbl->action == start_action)
 			break;
@@ -1564,9 +1563,7 @@
 	return 0;
  out:
 	lq_sta->search_better_tbl = 1;
-	tbl->action++;
-	if (tbl->action > IWL_MIMO2_SWITCH_GI)
-		tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+	rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
 
@@ -1660,15 +1657,16 @@
  * setup rate table in uCode
  */
 static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+			       struct ieee80211_sta *sta,
 			       struct iwl_lq_sta *lq_sta,
 			       struct iwl_scale_tbl_info *tbl,
-			       int index, u8 is_green)
+			       int index)
 {
 	u32 rate;
 
 	/* Update uCode's rate table. */
-	rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
-	rs_fill_link_cmd(mvm, lq_sta, rate);
+	rate = rate_n_flags_from_tbl(mvm, tbl, index);
+	rs_fill_link_cmd(mvm, sta, lq_sta, rate);
 	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
 }
 
@@ -1712,7 +1710,6 @@
 	u8 update_lq = 0;
 	struct iwl_scale_tbl_info *tbl, *tbl1;
 	u16 rate_scale_index_msk = 0;
-	u8 is_green = 0;
 	u8 active_tbl = 0;
 	u8 done_search = 0;
 	u16 high_low;
@@ -1754,11 +1751,6 @@
 		active_tbl = 1 - lq_sta->active_tbl;
 
 	tbl = &(lq_sta->lq_info[active_tbl]);
-	if (is_legacy(tbl->lq_type))
-		lq_sta->is_green = 0;
-	else
-		lq_sta->is_green = rs_use_green(sta);
-	is_green = lq_sta->is_green;
 
 	/* current tx rate */
 	index = lq_sta->last_txrate_idx;
@@ -1797,7 +1789,7 @@
 			tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 			/* get "active" rate info */
 			index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
-			rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+			rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
 		}
 		return;
 	}
@@ -1978,24 +1970,24 @@
 	     (current_tpt > (100 * tbl->expected_tpt[low]))))
 		scale_action = 0;
 
-	if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+	if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
 	     IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
 		if (lq_sta->last_bt_traffic >
-		    BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+		    le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
 			/*
 			 * don't set scale_action, don't want to scale up if
 			 * the rate scale doesn't otherwise think that is a
 			 * good idea.
 			 */
 		} else if (lq_sta->last_bt_traffic <=
-			   BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+			   le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
 			scale_action = -1;
 		}
 	}
 	lq_sta->last_bt_traffic =
-		BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
+		le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-	if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+	if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
 	     IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
 		/* search for a new modulation */
 		rs_stay_in_table(lq_sta, true);
@@ -2032,7 +2024,7 @@
 lq_update:
 	/* Replace uCode's rate table for the destination station. */
 	if (update_lq)
-		rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+		rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
 
 	rs_stay_in_table(lq_sta, false);
 
@@ -2071,7 +2063,7 @@
 			IWL_DEBUG_RATE(mvm,
 				       "Switch current  mcs: %X index: %d\n",
 				       tbl->current_rate, index);
-			rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate);
+			rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
 			iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
 		} else {
 			done_search = 1;
@@ -2113,7 +2105,7 @@
 	}
 
 out:
-	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
 	lq_sta->last_txrate_idx = index;
 }
 
@@ -2140,7 +2132,6 @@
 	int rate_idx;
 	int i;
 	u32 rate;
-	u8 use_green = rs_use_green(sta);
 	u8 active_tbl = 0;
 	u8 valid_tx_ant;
 
@@ -2172,10 +2163,10 @@
 	if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
 		rs_toggle_antenna(valid_tx_ant, &rate, tbl);
 
-	rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green);
+	rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
 	tbl->current_rate = rate;
 	rs_set_expected_tpt_table(lq_sta, tbl);
-	rs_fill_link_cmd(NULL, lq_sta, rate);
+	rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
 	/* TODO restore station should remember the lq cmd */
 	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
 }
@@ -2190,7 +2181,6 @@
 	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct iwl_lq_sta *lq_sta = mvm_sta;
-	int rate_idx;
 
 	IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
 
@@ -2215,36 +2205,9 @@
 	if (rate_control_send_low(sta, mvm_sta, txrc))
 		return;
 
-	rate_idx  = lq_sta->last_txrate_idx;
+	iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
+				  info->band, &info->control.rates[0]);
 
-	if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
-		rate_idx -= IWL_FIRST_OFDM_RATE;
-		/* 6M and 9M shared same MCS index */
-		rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
-		WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
-			     IWL_RATE_MIMO3_6M_PLCP);
-		if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
-		    IWL_RATE_MIMO2_6M_PLCP)
-			rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
-		info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
-		if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
-			info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
-		if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
-			info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-		if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
-			info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
-	} else {
-		/* Check for invalid rates */
-		if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
-		    ((sband->band == IEEE80211_BAND_5GHZ) &&
-		     (rate_idx < IWL_FIRST_OFDM_RATE)))
-			rate_idx = rate_lowest_index(sband, sta);
-		/* On valid 5 GHz rate, adjust index */
-		else if (sband->band == IEEE80211_BAND_5GHZ)
-			rate_idx -= IWL_FIRST_OFDM_RATE;
-		info->control.rates[0].flags = 0;
-	}
-	info->control.rates[0].idx = rate_idx;
 	info->control.rates[0].count = 1;
 }
 
@@ -2261,6 +2224,24 @@
 	return &sta_priv->lq_sta;
 }
 
+static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+				       int nss)
+{
+	u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+		(0x3 << (2 * (nss - 1)));
+	rx_mcs >>= (2 * (nss - 1));
+
+	if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
+		return IWL_RATE_MCS_7_INDEX;
+	else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
+		return IWL_RATE_MCS_8_INDEX;
+	else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
+		return IWL_RATE_MCS_9_INDEX;
+
+	WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
+	return -1;
+}
+
 /*
  * Called after adding a new station to initialize rate scaling
  */
@@ -2270,6 +2251,7 @@
 	int i, j;
 	struct ieee80211_hw *hw = mvm->hw;
 	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
 	struct iwl_mvm_sta *sta_priv;
 	struct iwl_lq_sta *lq_sta;
 	struct ieee80211_supported_band *sband;
@@ -2298,7 +2280,6 @@
 
 	lq_sta->max_rate_idx = -1;
 	lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
-	lq_sta->is_green = rs_use_green(sta);
 	lq_sta->band = sband->band;
 	/*
 	 * active legacy rates as per supported rates bitmap
@@ -2308,25 +2289,54 @@
 	for_each_set_bit(i, &supp, BITS_PER_LONG)
 		lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
 
-	/*
-	 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
-	 * supp_rates[] does not; shift to convert format, force 9 MBits off.
-	 */
-	lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
-	lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
-	lq_sta->active_siso_rate &= ~((u16)0x2);
-	lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+	/* TODO: should probably account for rx_highest for both HT/VHT */
+	if (!vht_cap || !vht_cap->vht_supported) {
+		/* active_siso_rate mask includes 9 MBits (bit 5),
+		 * and CCK (bits 0-3), supp_rates[] does not;
+		 * shift to convert format, force 9 MBits off.
+		 */
+		lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+		lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+		lq_sta->active_siso_rate &= ~((u16)0x2);
+		lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
 
-	/* Same here */
-	lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
-	lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
-	lq_sta->active_mimo2_rate &= ~((u16)0x2);
-	lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+		/* Same here */
+		lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+		lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+		lq_sta->active_mimo2_rate &= ~((u16)0x2);
+		lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+		lq_sta->is_vht = false;
+	} else {
+		int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+		if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+			for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+				if (i == IWL_RATE_9M_INDEX)
+					continue;
+
+				lq_sta->active_siso_rate |= BIT(i);
+			}
+		}
+
+		highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+		if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+			for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+				if (i == IWL_RATE_9M_INDEX)
+					continue;
+
+				lq_sta->active_mimo2_rate |= BIT(i);
+			}
+		}
+
+		/* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+		lq_sta->is_vht = true;
+	}
 
 	IWL_DEBUG_RATE(mvm,
-		       "SISO-RATE=%X MIMO2-RATE=%X\n",
+		       "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n",
 		       lq_sta->active_siso_rate,
-		       lq_sta->active_mimo2_rate);
+		       lq_sta->active_mimo2_rate,
+		       lq_sta->is_vht);
 
 	/* These values will be overridden later */
 	lq_sta->lq.single_stream_ant_msk =
@@ -2358,6 +2368,7 @@
 }
 
 static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+			     struct ieee80211_sta *sta,
 			     struct iwl_lq_sta *lq_sta, u32 new_rate)
 {
 	struct iwl_scale_tbl_info tbl_type;
@@ -2429,7 +2440,6 @@
 		rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
 					 &rate_idx);
 
-
 		/* Indicate to uCode which entries might be MIMO.
 		 * If initial rate was MIMO, this will finally end up
 		 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@@ -2455,7 +2465,9 @@
 		}
 
 		/* Don't allow HT rates after next pass.
-		 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+		 * rs_get_lower_rate() will change type to LQ_LEGACY_A
+		 * or LQ_LEGACY_G.
+		 */
 		use_ht_possible = 0;
 
 		/* Override next rate if needed for debug purposes */
@@ -2474,12 +2486,9 @@
 	lq_cmd->agg_time_limit =
 		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
 
-	/*
-	 * overwrite if needed, pass aggregation time limit
-	 * to uCode in uSec - This is racy - but heh, at least it helps...
-	 */
-	if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
-		lq_cmd->agg_time_limit = cpu_to_le16(1200);
+	if (sta)
+		lq_cmd->agg_time_limit =
+			cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
 }
 
 static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2586,16 +2595,18 @@
 	    (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
 	    (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
 	desc += sprintf(buff+desc, "lq type %s\n",
-	   (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
-	if (is_Ht(tbl->lq_type)) {
+			(is_legacy(tbl->lq_type)) ? "legacy" :
+			is_vht(tbl->lq_type) ? "VHT" : "HT");
+	if (is_ht(tbl->lq_type)) {
 		desc += sprintf(buff+desc, " %s",
 		   (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
 		   desc += sprintf(buff+desc, " %s",
-		   (tbl->is_ht40) ? "40MHz" : "20MHz");
-		   desc += sprintf(buff+desc, " %s %s %s\n",
+				   (is_ht20(tbl)) ? "20MHz" :
+				   (is_ht40(tbl)) ? "40MHz" :
+				   (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+		   desc += sprintf(buff+desc, " %s %s\n",
 				   (tbl->is_SGI) ? "SGI" : "",
-		   (lq_sta->is_green) ? "GF enabled" : "",
-		   (lq_sta->is_agg) ? "AGG on" : "");
+				   (lq_sta->is_agg) ? "AGG on" : "");
 	}
 	desc += sprintf(buff+desc, "last tx rate=0x%X\n",
 			lq_sta->last_rate_n_flags);
@@ -2653,7 +2664,7 @@
 	int desc = 0;
 	int i, j;
 	ssize_t ret;
-
+	struct iwl_scale_tbl_info *tbl;
 	struct iwl_lq_sta *lq_sta = file->private_data;
 
 	buff = kmalloc(1024, GFP_KERNEL);
@@ -2661,21 +2672,23 @@
 		return -ENOMEM;
 
 	for (i = 0; i < LQ_SIZE; i++) {
+		tbl = &(lq_sta->lq_info[i]);
 		desc += sprintf(buff+desc,
-				"%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n"
+				"%s type=%d SGI=%d BW=%s DUP=0\n"
 				"rate=0x%X\n",
 				lq_sta->active_tbl == i ? "*" : "x",
-				lq_sta->lq_info[i].lq_type,
-				lq_sta->lq_info[i].is_SGI,
-				lq_sta->lq_info[i].is_ht40,
-				lq_sta->is_green,
-				lq_sta->lq_info[i].current_rate);
+				tbl->lq_type,
+				tbl->is_SGI,
+				is_ht20(tbl) ? "20Mhz" :
+				is_ht40(tbl) ? "40Mhz" :
+				is_ht80(tbl) ? "80Mhz" : "ERR",
+				tbl->current_rate);
 		for (j = 0; j < IWL_RATE_COUNT; j++) {
 			desc += sprintf(buff+desc,
 				"counter=%d success=%d %%=%d\n",
-				lq_sta->lq_info[i].win[j].counter,
-				lq_sta->lq_info[i].win[j].success_counter,
-				lq_sta->lq_info[i].win[j].success_ratio);
+				tbl->win[j].counter,
+				tbl->win[j].success_counter,
+				tbl->win[j].success_ratio);
 		}
 	}
 	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 335cf16..5d5344f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -35,9 +35,11 @@
 #include "iwl-trans.h"
 
 struct iwl_rs_rate_info {
-	u8 plcp;	/* uCode API:  IWL_RATE_6M_PLCP, etc. */
-	u8 plcp_siso;	/* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
-	u8 plcp_mimo2;	/* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+	u8 plcp;	  /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+	u8 plcp_ht_siso;  /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+	u8 plcp_ht_mimo2; /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+	u8 plcp_vht_siso;
+	u8 plcp_vht_mimo2;
 	u8 prev_rs;      /* previous rate used in rs algo */
 	u8 next_rs;      /* next rate used in rs algo */
 };
@@ -83,35 +85,52 @@
 #define	IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
 
 
-/* uCode API values for OFDM high-throughput (HT) bit rates */
+/* uCode API values for HT/VHT bit rates */
 enum {
-	IWL_RATE_SISO_6M_PLCP = 0,
-	IWL_RATE_SISO_12M_PLCP = 1,
-	IWL_RATE_SISO_18M_PLCP = 2,
-	IWL_RATE_SISO_24M_PLCP = 3,
-	IWL_RATE_SISO_36M_PLCP = 4,
-	IWL_RATE_SISO_48M_PLCP = 5,
-	IWL_RATE_SISO_54M_PLCP = 6,
-	IWL_RATE_SISO_60M_PLCP = 7,
-	IWL_RATE_MIMO2_6M_PLCP  = 0x8,
-	IWL_RATE_MIMO2_12M_PLCP = 0x9,
-	IWL_RATE_MIMO2_18M_PLCP = 0xa,
-	IWL_RATE_MIMO2_24M_PLCP = 0xb,
-	IWL_RATE_MIMO2_36M_PLCP = 0xc,
-	IWL_RATE_MIMO2_48M_PLCP = 0xd,
-	IWL_RATE_MIMO2_54M_PLCP = 0xe,
-	IWL_RATE_MIMO2_60M_PLCP = 0xf,
-	IWL_RATE_MIMO3_6M_PLCP  = 0x10,
-	IWL_RATE_MIMO3_12M_PLCP = 0x11,
-	IWL_RATE_MIMO3_18M_PLCP = 0x12,
-	IWL_RATE_MIMO3_24M_PLCP = 0x13,
-	IWL_RATE_MIMO3_36M_PLCP = 0x14,
-	IWL_RATE_MIMO3_48M_PLCP = 0x15,
-	IWL_RATE_MIMO3_54M_PLCP = 0x16,
-	IWL_RATE_MIMO3_60M_PLCP = 0x17,
-	IWL_RATE_SISO_INVM_PLCP,
-	IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
-	IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+	IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
+	IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
+	IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
+	IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
+	IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
+	IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
+	IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
+	IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
+	IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
+	IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
+	IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
+	IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
+	IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
+	IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
+	IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
+	IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
+	IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
+	IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
+	IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
+	IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
+	IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
+	IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
+	IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
+	IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
+	IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
+	IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
+	IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
+	IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
+	IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
+	IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
+	IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
+	IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
+	IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
+	IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
+	IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
+	IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
+	IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
 };
 
 #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
@@ -139,25 +158,33 @@
 #define IWL_RATE_DECREASE_TH		1920	/*  15% */
 
 /* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1      0
-#define IWL_LEGACY_SWITCH_ANTENNA2      1
-#define IWL_LEGACY_SWITCH_SISO          2
-#define IWL_LEGACY_SWITCH_MIMO2         3
+enum {
+	IWL_LEGACY_SWITCH_ANTENNA,
+	IWL_LEGACY_SWITCH_SISO,
+	IWL_LEGACY_SWITCH_MIMO2,
+	IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
+	IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
+};
 
 /* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1        0
-#define IWL_SISO_SWITCH_ANTENNA2        1
-#define IWL_SISO_SWITCH_MIMO2           2
-#define IWL_SISO_SWITCH_GI              3
+enum {
+	IWL_SISO_SWITCH_ANTENNA,
+	IWL_SISO_SWITCH_MIMO2,
+	IWL_SISO_SWITCH_GI,
+	IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
+	IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
+};
 
 /* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1       0
-#define IWL_MIMO2_SWITCH_ANTENNA2       1
-#define IWL_MIMO2_SWITCH_SISO_A         2
-#define IWL_MIMO2_SWITCH_SISO_B         3
-#define IWL_MIMO2_SWITCH_GI             4
+enum {
+	IWL_MIMO2_SWITCH_SISO_A,
+	IWL_MIMO2_SWITCH_SISO_B,
+	IWL_MIMO2_SWITCH_GI,
+	IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
+	IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
+};
 
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
 
 #define IWL_ACTION_LIMIT		3	/* # possible actions */
 
@@ -188,20 +215,31 @@
 
 enum iwl_table_type {
 	LQ_NONE,
-	LQ_G,		/* legacy types */
-	LQ_A,
-	LQ_SISO,	/* high-throughput types */
-	LQ_MIMO2,
+	LQ_LEGACY_G,	/* legacy types */
+	LQ_LEGACY_A,
+	LQ_HT_SISO,	/* HT types */
+	LQ_HT_MIMO2,
+	LQ_VHT_SISO,    /* VHT types */
+	LQ_VHT_MIMO2,
 	LQ_MAX,
 };
 
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) is_mimo2(tbl)
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
+#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
+#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
+#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
+#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
+#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
+#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
+#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
+#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
+#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
+
+#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
 
 #define IWL_MAX_MCS_DISPLAY_SIZE	12
 
@@ -232,7 +270,7 @@
 	enum iwl_table_type lq_type;
 	u8 ant_type;
 	u8 is_SGI;	/* 1 = short guard interval */
-	u8 is_ht40;	/* 1 = 40 MHz channel width */
+	u32 bw;	        /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
 	u8 action;	/* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
 	u8 max_search;	/* maximun number of tables we can search */
 	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
@@ -262,7 +300,7 @@
 	u64 flush_timer;	/* time staying in mode before new search */
 
 	u8 action_counter;	/* # mode-switch actions tried */
-	u8 is_green;
+	bool is_vht;
 	enum ieee80211_band band;
 
 	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -314,9 +352,8 @@
 }
 
 /* Initialize station's rate scaling information after adding station */
-extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
-				 struct ieee80211_sta *sta,
-				 enum ieee80211_band band);
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  enum ieee80211_band band);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -328,7 +365,7 @@
  * ieee80211_register_hw
  *
  */
-extern int iwl_mvm_rate_control_register(void);
+int iwl_mvm_rate_control_register(void);
 
 /**
  * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -336,7 +373,7 @@
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void iwl_mvm_rate_control_unregister(void);
+void iwl_mvm_rate_control_unregister(void);
 
 struct iwl_mvm_sta;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 2a8cb5a..3a1f398 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -300,10 +300,14 @@
 		return 0;
 	}
 
+	/*
+	 * Keep packets with CRC errors (and with overrun) for monitor mode
+	 * (otherwise the firmware discards them) but mark them as bad.
+	 */
 	if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
 	    !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
 		IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
-		return 0;
+		rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
 	}
 
 	/* This will be used in several places later */
@@ -422,6 +426,27 @@
 
 	mvmvif->bf_data.ave_beacon_signal = sig;
 
+	/* BT Coex */
+	if (mvmvif->bf_data.bt_coex_min_thold !=
+	    mvmvif->bf_data.bt_coex_max_thold) {
+		last_event = mvmvif->bf_data.last_bt_coex_event;
+		if (sig > mvmvif->bf_data.bt_coex_max_thold &&
+		    (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+		     last_event == 0)) {
+			mvmvif->bf_data.last_bt_coex_event = sig;
+			IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
+				     sig);
+			iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+		} else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
+			   (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+			    last_event == 0)) {
+			mvmvif->bf_data.last_bt_coex_event = sig;
+			IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
+				     sig);
+			iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
+		}
+	}
+
 	if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
 		return;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 621fb71..dff7592 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -74,8 +74,12 @@
 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
 {
 	u16 rx_chain;
-	u8 rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
+	u8 rx_ant;
 
+	if (mvm->scan_rx_ant != ANT_NONE)
+		rx_ant = mvm->scan_rx_ant;
+	else
+		rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
 	rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
 	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
 	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -93,10 +97,10 @@
 
 static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
 {
-	if (vif->bss_conf.assoc)
-		return cpu_to_le32(vif->bss_conf.beacon_int);
-	else
+	if (!vif->bss_conf.assoc)
 		return 0;
+
+	return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
 }
 
 static inline __le32
@@ -133,11 +137,12 @@
  * request.
  */
 static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
-				    struct cfg80211_scan_request *req)
+				    struct cfg80211_scan_request *req,
+				    int first)
 {
 	int fw_idx, req_idx;
 
-	for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+	for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx >= first;
 	     req_idx--, fw_idx++) {
 		cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
 		cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
@@ -153,9 +158,9 @@
  * just to notify that this scan is active and not passive.
  * In order to notify the FW of the number of SSIDs we wish to scan (including
  * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). The first SSID is already
- * included in the probe template, so we need to set only req->n_ssids - 1 bits
- * in addition to the first bit.
+ * one for each SSID, and set the active bit (first). If the first SSID is
+ * already included in the probe template, so we need to set only
+ * req->n_ssids - 1 bits in addition to the first bit.
  */
 static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
 {
@@ -170,7 +175,8 @@
 }
 
 static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
-				       struct cfg80211_scan_request *req)
+				       struct cfg80211_scan_request *req,
+				       bool basic_ssid)
 {
 	u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
 	u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
@@ -178,10 +184,14 @@
 	struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
 		(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
 	int i;
+	int type = BIT(req->n_ssids) - 1;
+
+	if (!basic_ssid)
+		type |= BIT(req->n_ssids);
 
 	for (i = 0; i < cmd->channel_count; i++) {
 		chan->channel = cpu_to_le16(req->channels[i]->hw_value);
-		chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
+		chan->type = cpu_to_le32(type);
 		if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
 			chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
 		chan->active_dwell = cpu_to_le16(active_dwell);
@@ -268,6 +278,8 @@
 	u32 status;
 	int ssid_len = 0;
 	u8 *ssid = NULL;
+	bool basic_ssid = !(mvm->fw->ucode_capa.flags &
+			   IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
 
 	lockdep_assert_held(&mvm->mutex);
 	BUG_ON(mvm->scan_cmd == NULL);
@@ -302,14 +314,16 @@
 	if (req->n_ssids > 0) {
 		cmd->passive2active = cpu_to_le16(1);
 		cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
-		ssid = req->ssids[0].ssid;
-		ssid_len = req->ssids[0].ssid_len;
+		if (basic_ssid) {
+			ssid = req->ssids[0].ssid;
+			ssid_len = req->ssids[0].ssid_len;
+		}
 	} else {
 		cmd->passive2active = 0;
 		cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
 	}
 
-	iwl_mvm_scan_fill_ssids(cmd, req);
+	iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
 
 	cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
 	cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
@@ -326,7 +340,7 @@
 			    req->ie, req->ie_len,
 			    mvm->fw->ucode_capa.max_probe_length));
 
-	iwl_mvm_scan_fill_channels(cmd, req);
+	iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
 
 	cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
 		le16_to_cpu(cmd->tx_cmd.len) +
@@ -377,6 +391,21 @@
 	return 0;
 }
 
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+				  struct iwl_rx_cmd_buffer *rxb,
+				  struct iwl_device_cmd *cmd)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_sched_scan_results *notif = (void *)pkt->data;
+
+	if (notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
+		IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+		ieee80211_sched_scan_results(mvm->hw);
+	}
+
+	return 0;
+}
+
 static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
 				     struct iwl_rx_packet *pkt, void *data)
 {
@@ -447,3 +476,406 @@
 out_remove_notif:
 	iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
 }
+
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+					   struct iwl_rx_cmd_buffer *rxb,
+					   struct iwl_device_cmd *cmd)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+
+	IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+		       scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+		       "completed" : "aborted");
+
+	mvm->scan_status = IWL_MVM_SCAN_NONE;
+	ieee80211_sched_scan_stopped(mvm->hw);
+
+	return 0;
+}
+
+static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  struct ieee80211_sched_scan_ies *ies,
+					  enum ieee80211_band band,
+					  struct iwl_tx_cmd *cmd,
+					  u8 *data)
+{
+	u16 cmd_len;
+
+	cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+	cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+	cmd->sta_id = mvm->aux_sta.sta_id;
+
+	cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
+
+	cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
+					 vif->addr,
+					 1, NULL, 0,
+					 ies->ie[band], ies->len[band],
+					 SCAN_OFFLOAD_PROBE_REQ_SIZE);
+	cmd->len = cpu_to_le16(cmd_len);
+}
+
+static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
+			       struct ieee80211_vif *vif,
+			       struct cfg80211_sched_scan_request *req,
+			       struct iwl_scan_offload_cmd *scan)
+{
+	scan->channel_count =
+		mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
+		mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+	scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
+	scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
+	scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
+	scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
+	scan->max_out_time = cpu_to_le32(200 * 1024);
+	scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
+	scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+					  MAC_FILTER_IN_BEACON);
+	scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
+	scan->rep_count = cpu_to_le32(1);
+}
+
+static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+	int i;
+
+	for (i = 0; i < PROBE_OPTION_MAX; i++) {
+		if (!ssid_list[i].len)
+			break;
+		if (ssid_list[i].len == ssid_len &&
+		    !memcmp(ssid_list->ssid, ssid, ssid_len))
+			return i;
+	}
+	return -1;
+}
+
+static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
+					struct iwl_scan_offload_cmd *scan,
+					u32 *ssid_bitmap)
+{
+	int i, j;
+	int index;
+
+	/*
+	 * copy SSIDs from match list.
+	 * iwl_config_sched_scan_profiles() uses the order of these ssids to
+	 * config match list.
+	 */
+	for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+		scan->direct_scan[i].id = WLAN_EID_SSID;
+		scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
+		memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
+		       scan->direct_scan[i].len);
+	}
+
+	/* add SSIDs from scan SSID list */
+	*ssid_bitmap = 0;
+	for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
+		index = iwl_ssid_exist(req->ssids[j].ssid,
+				       req->ssids[j].ssid_len,
+				       scan->direct_scan);
+		if (index < 0) {
+			if (!req->ssids[j].ssid_len)
+				continue;
+			scan->direct_scan[i].id = WLAN_EID_SSID;
+			scan->direct_scan[i].len = req->ssids[j].ssid_len;
+			memcpy(scan->direct_scan[i].ssid, req->ssids[j].ssid,
+			       scan->direct_scan[i].len);
+			*ssid_bitmap |= BIT(i + 1);
+			i++;
+		} else {
+			*ssid_bitmap |= BIT(index + 1);
+		}
+	}
+}
+
+static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
+				  struct cfg80211_sched_scan_request *req,
+				  struct iwl_scan_channel_cfg *channels,
+				  enum ieee80211_band band,
+				  int *head, int *tail,
+				  u32 ssid_bitmap)
+{
+	struct ieee80211_supported_band *s_band;
+	int n_probes = req->n_ssids;
+	int n_channels = req->n_channels;
+	u8 active_dwell, passive_dwell;
+	int i, j, index = 0;
+	bool partial;
+
+	/*
+	 * We have to configure all supported channels, even if we don't want to
+	 * scan on them, but we have to send channels in the order that we want
+	 * to scan. So add requested channels to head of the list and others to
+	 * the end.
+	*/
+	active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
+	passive_dwell = iwl_mvm_get_passive_dwell(band);
+	s_band = &mvm->nvm_data->bands[band];
+
+	for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
+		partial = false;
+		for (j = 0; j < n_channels; j++)
+			if (s_band->channels[i].center_freq ==
+						req->channels[j]->center_freq) {
+				index = *head;
+				(*head)++;
+				/*
+				 * Channels that came with the request will be
+				 * in partial scan .
+				 */
+				partial = true;
+				break;
+			}
+		if (!partial) {
+			index = *tail;
+			(*tail)--;
+		}
+		channels->channel_number[index] =
+			cpu_to_le16(ieee80211_frequency_to_channel(
+					s_band->channels[i].center_freq));
+		channels->dwell_time[index][0] = active_dwell;
+		channels->dwell_time[index][1] = passive_dwell;
+
+		channels->iter_count[index] = cpu_to_le16(1);
+		channels->iter_interval[index] = 0;
+
+		if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
+			channels->type[index] |=
+				cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
+
+		channels->type[index] |=
+				cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
+		if (partial)
+			channels->type[index] |=
+				cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+
+		if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+			channels->type[index] |=
+				cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
+
+		/* scan for all SSIDs from req->ssids */
+		channels->type[index] |= cpu_to_le32(ssid_bitmap);
+	}
+}
+
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif,
+			      struct cfg80211_sched_scan_request *req,
+			      struct ieee80211_sched_scan_ies *ies)
+{
+	int supported_bands = 0;
+	int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
+	int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+	int head = 0;
+	int tail = band_2ghz + band_5ghz;
+	u32 ssid_bitmap;
+	int cmd_len;
+	int ret;
+
+	struct iwl_scan_offload_cfg *scan_cfg;
+	struct iwl_host_cmd cmd = {
+		.id = SCAN_OFFLOAD_CONFIG_CMD,
+		.flags = CMD_SYNC,
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (band_2ghz)
+		supported_bands++;
+	if (band_5ghz)
+		supported_bands++;
+
+	cmd_len = sizeof(struct iwl_scan_offload_cfg) +
+				supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+	scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
+	if (!scan_cfg)
+		return -ENOMEM;
+
+	iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
+	scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
+
+	iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
+	/* build tx frames for supported bands */
+	if (band_2ghz) {
+		iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+					      IEEE80211_BAND_2GHZ,
+					      &scan_cfg->scan_cmd.tx_cmd[0],
+					      scan_cfg->data);
+		iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+				      IEEE80211_BAND_2GHZ, &head, &tail,
+				      ssid_bitmap);
+	}
+	if (band_5ghz) {
+		iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+					      IEEE80211_BAND_5GHZ,
+					      &scan_cfg->scan_cmd.tx_cmd[1],
+					      scan_cfg->data +
+						SCAN_OFFLOAD_PROBE_REQ_SIZE);
+		iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+				      IEEE80211_BAND_5GHZ, &head, &tail,
+				      ssid_bitmap);
+	}
+
+	cmd.data[0] = scan_cfg;
+	cmd.len[0] = cmd_len;
+	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+	IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	kfree(scan_cfg);
+	return ret;
+}
+
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+				       struct cfg80211_sched_scan_request *req)
+{
+	struct iwl_scan_offload_profile *profile;
+	struct iwl_scan_offload_profile_cfg *profile_cfg;
+	struct iwl_scan_offload_blacklist *blacklist;
+	struct iwl_host_cmd cmd = {
+		.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+		.flags = CMD_SYNC,
+		.len[1] = sizeof(*profile_cfg),
+		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+		.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
+	};
+	int blacklist_len;
+	int i;
+	int ret;
+
+	if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+			return -EIO;
+
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
+		blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
+	else
+		blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
+
+	blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
+	if (!blacklist)
+		return -ENOMEM;
+
+	profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
+	if (!profile_cfg) {
+		ret = -ENOMEM;
+		goto free_blacklist;
+	}
+
+	cmd.data[0] = blacklist;
+	cmd.len[0] = sizeof(*blacklist) * blacklist_len;
+	cmd.data[1] = profile_cfg;
+
+	/* No blacklist configuration */
+
+	profile_cfg->num_profiles = req->n_match_sets;
+	profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
+	profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
+	profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+	for (i = 0; i < req->n_match_sets; i++) {
+		profile = &profile_cfg->profiles[i];
+		profile->ssid_index = i;
+		/* Support any cipher and auth algorithm */
+		profile->unicast_cipher = 0xff;
+		profile->auth_alg = 0xff;
+		profile->network_type = IWL_NETWORK_TYPE_ANY;
+		profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+		profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+	}
+
+	IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	kfree(profile_cfg);
+free_blacklist:
+	kfree(blacklist);
+
+	return ret;
+}
+
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+			     struct cfg80211_sched_scan_request *req)
+{
+	struct iwl_scan_offload_req scan_req = {
+		.watchdog = IWL_SCHED_SCAN_WATCHDOG,
+
+		.schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
+		.schedule_line[0].delay = req->interval / 1000,
+		.schedule_line[0].full_scan_mul = 1,
+
+		.schedule_line[1].iterations = 0xff,
+		.schedule_line[1].delay = req->interval / 1000,
+		.schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
+	};
+
+	if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+		IWL_DEBUG_SCAN(mvm,
+			       "Sending scheduled scan with filtering, filter len %d\n",
+			       req->n_match_sets);
+		scan_req.flags |=
+				cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
+	} else {
+		IWL_DEBUG_SCAN(mvm,
+			       "Sending Scheduled scan without filtering\n");
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
+				    sizeof(scan_req), &scan_req);
+}
+
+static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
+{
+	int ret;
+	struct iwl_host_cmd cmd = {
+		.id = SCAN_OFFLOAD_ABORT_CMD,
+		.flags = CMD_SYNC,
+	};
+	u32 status;
+
+	/* Exit instantly with error when device is not ready
+	 * to receive scan abort command or it does not perform
+	 * scheduled scan currently */
+	if (mvm->scan_status != IWL_MVM_SCAN_SCHED)
+		return -EIO;
+
+	ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+	if (ret)
+		return ret;
+
+	if (status != CAN_ABORT_STATUS) {
+		/*
+		 * The scan abort will return 1 for success or
+		 * 2 for "failure".  A failure condition can be
+		 * due to simply not being in an active scan which
+		 * can occur if we send the scan abort before the
+		 * microcode has notified us that a scan is completed.
+		 */
+		IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+{
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
+		IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
+		return;
+	}
+
+	ret = iwl_mvm_send_sched_scan_abort(mvm);
+	if (ret)
+		IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
+	else
+		IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 44add29..3299523 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,6 +66,115 @@
 #include "sta.h"
 #include "rs.h"
 
+static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+					 struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
+{
+	memset(cmd_v5, 0, sizeof(*cmd_v5));
+
+	cmd_v5->add_modify = cmd_v6->add_modify;
+	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
+	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
+	memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
+	cmd_v5->sta_id = cmd_v6->sta_id;
+	cmd_v5->modify_mask = cmd_v6->modify_mask;
+	cmd_v5->station_flags = cmd_v6->station_flags;
+	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
+	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
+	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
+	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
+	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
+	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
+	cmd_v5->assoc_id = cmd_v6->assoc_id;
+	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
+	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+}
+
+static void
+iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
+				      struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
+				      u32 mac_id_n_color)
+{
+	memset(sta_cmd, 0, sizeof(*sta_cmd));
+
+	sta_cmd->sta_id = key_cmd->sta_id;
+	sta_cmd->add_modify = STA_MODE_MODIFY;
+	sta_cmd->modify_mask = STA_MODIFY_KEY;
+	sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
+
+	sta_cmd->key.key_offset = key_cmd->key_offset;
+	sta_cmd->key.key_flags = key_cmd->key_flags;
+	memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
+	sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
+	memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
+	       sizeof(sta_cmd->key.tkip_rx_ttak));
+}
+
+static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
+					   struct iwl_mvm_add_sta_cmd_v6 *cmd,
+					   int *status)
+{
+	struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+		return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
+						   cmd, status);
+
+	iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+	return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
+					   &cmd_v5, status);
+}
+
+static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
+				    struct iwl_mvm_add_sta_cmd_v6 *cmd)
+{
+	struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+		return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
+					    sizeof(*cmd), cmd);
+
+	iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+	return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
+				    &cmd_v5);
+}
+
+static int
+iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
+				    struct iwl_mvm_add_sta_key_cmd *cmd,
+				    u32 mac_id_n_color,
+				    int *status)
+{
+	struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+		return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
+						   sizeof(*cmd), cmd, status);
+
+	iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+	return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
+					   &sta_cmd, status);
+}
+
+static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
+					u32 flags,
+					struct iwl_mvm_add_sta_key_cmd *cmd,
+					u32 mac_id_n_color)
+{
+	struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+		return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
+					    sizeof(*cmd), cmd);
+
+	iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+	return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
+				    &sta_cmd);
+}
+
 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
 {
 	int sta_id;
@@ -87,7 +196,7 @@
 			   bool update)
 {
 	struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-	struct iwl_mvm_add_sta_cmd add_sta_cmd;
+	struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
 	int ret;
 	u32 status;
 	u32 agg_size = 0, mpdu_dens = 0;
@@ -175,8 +284,7 @@
 		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 
 	status = ADD_STA_SUCCESS;
-	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
-					  &add_sta_cmd, &status);
+	ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
 	if (ret)
 		return ret;
 
@@ -229,8 +337,12 @@
 		if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
 			mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
 
-	/* for HW restart - need to reset the seq_number etc... */
-	memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
+	/* for HW restart - reset everything but the sequence number */
+	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+		u16 seq = mvm_sta->tid_data[i].seq_number;
+		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
+		mvm_sta->tid_data[i].seq_number = seq;
+	}
 
 	ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
 	if (ret)
@@ -256,7 +368,7 @@
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 		      bool drain)
 {
-	struct iwl_mvm_add_sta_cmd cmd = {};
+	struct iwl_mvm_add_sta_cmd_v6 cmd = {};
 	int ret;
 	u32 status;
 
@@ -269,8 +381,7 @@
 	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
 
 	status = ADD_STA_SUCCESS;
-	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-					  &cmd, &status);
+	ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
 	if (ret)
 		return ret;
 
@@ -469,13 +580,13 @@
 				      const u8 *addr,
 				      u16 mac_id, u16 color)
 {
-	struct iwl_mvm_add_sta_cmd cmd;
+	struct iwl_mvm_add_sta_cmd_v6 cmd;
 	int ret;
 	u32 status;
 
 	lockdep_assert_held(&mvm->mutex);
 
-	memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd));
+	memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
 	cmd.sta_id = sta->sta_id;
 	cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
 							     color));
@@ -485,8 +596,7 @@
 	if (addr)
 		memcpy(cmd.addr, addr, ETH_ALEN);
 
-	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-					  &cmd, &status);
+	ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
 	if (ret)
 		return ret;
 
@@ -534,10 +644,14 @@
 			   struct iwl_mvm_int_sta *bsta)
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+	static const u8 *baddr = _baddr;
 
 	lockdep_assert_held(&mvm->mutex);
 
+	if (vif->type == NL80211_IFTYPE_ADHOC)
+		baddr = vif->bss_conf.bssid;
+
 	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
 		return -ENOSPC;
 
@@ -614,7 +728,7 @@
 		       int tid, u16 ssn, bool start)
 {
 	struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-	struct iwl_mvm_add_sta_cmd cmd = {};
+	struct iwl_mvm_add_sta_cmd_v6 cmd = {};
 	int ret;
 	u32 status;
 
@@ -638,8 +752,7 @@
 				  STA_MODIFY_REMOVE_BA_TID;
 
 	status = ADD_STA_SUCCESS;
-	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-					  &cmd, &status);
+	ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
 	if (ret)
 		return ret;
 
@@ -674,7 +787,7 @@
 			      int tid, u8 queue, bool start)
 {
 	struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-	struct iwl_mvm_add_sta_cmd cmd = {};
+	struct iwl_mvm_add_sta_cmd_v6 cmd = {};
 	int ret;
 	u32 status;
 
@@ -696,8 +809,7 @@
 	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
 
 	status = ADD_STA_SUCCESS;
-	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-					  &cmd, &status);
+	ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
 	if (ret)
 		return ret;
 
@@ -743,13 +855,13 @@
 
 	lockdep_assert_held(&mvm->mutex);
 
-	for (txq_id = IWL_MVM_FIRST_AGG_QUEUE;
-	     txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++)
+	for (txq_id = mvm->first_agg_queue;
+	     txq_id <= mvm->last_agg_queue; txq_id++)
 		if (mvm->queue_to_mac80211[txq_id] ==
 		    IWL_INVALID_MAC80211_QUEUE)
 			break;
 
-	if (txq_id > IWL_MVM_LAST_AGG_QUEUE) {
+	if (txq_id > mvm->last_agg_queue) {
 		IWL_ERR(mvm, "Failed to allocate agg queue\n");
 		return -EIO;
 	}
@@ -987,10 +1099,11 @@
 				u32 cmd_flags)
 {
 	__le16 key_flags;
-	struct iwl_mvm_add_sta_cmd cmd = {};
+	struct iwl_mvm_add_sta_key_cmd cmd = {};
 	int ret, status;
 	u16 keyidx;
 	int i;
+	u32 mac_id_n_color = mvm_sta->mac_id_n_color;
 
 	keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
 		 STA_KEY_FLG_KEYID_MSK;
@@ -1000,14 +1113,14 @@
 	switch (keyconf->cipher) {
 	case WLAN_CIPHER_SUITE_TKIP:
 		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
-		cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+		cmd.tkip_rx_tsc_byte2 = tkip_iv32;
 		for (i = 0; i < 5; i++)
-			cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
-		memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+			cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+		memcpy(cmd.key, keyconf->key, keyconf->keylen);
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
-		memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+		memcpy(cmd.key, keyconf->key, keyconf->keylen);
 		break;
 	default:
 		WARN_ON(1);
@@ -1017,20 +1130,18 @@
 	if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
 		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
 
-	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
-	cmd.key.key_offset = keyconf->hw_key_idx;
-	cmd.key.key_flags = key_flags;
-	cmd.add_modify = STA_MODE_MODIFY;
-	cmd.modify_mask = STA_MODIFY_KEY;
+	cmd.key_offset = keyconf->hw_key_idx;
+	cmd.key_flags = key_flags;
 	cmd.sta_id = sta_id;
 
 	status = ADD_STA_SUCCESS;
 	if (cmd_flags == CMD_SYNC)
-		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-						  &cmd, &status);
+		ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+							  mac_id_n_color,
+							  &status);
 	else
-		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
-					   sizeof(cmd), &cmd);
+		ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
+						   mac_id_n_color);
 
 	switch (status) {
 	case ADD_STA_SUCCESS:
@@ -1197,7 +1308,7 @@
 			   struct ieee80211_key_conf *keyconf)
 {
 	struct iwl_mvm_sta *mvm_sta;
-	struct iwl_mvm_add_sta_cmd cmd = {};
+	struct iwl_mvm_add_sta_key_cmd cmd = {};
 	__le16 key_flags;
 	int ret, status;
 	u8 sta_id;
@@ -1252,17 +1363,14 @@
 	if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
 		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
 
-	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
-	cmd.key.key_flags = key_flags;
-	cmd.key.key_offset = keyconf->hw_key_idx;
+	cmd.key_flags = key_flags;
+	cmd.key_offset = keyconf->hw_key_idx;
 	cmd.sta_id = sta_id;
 
-	cmd.modify_mask = STA_MODIFY_KEY;
-	cmd.add_modify = STA_MODE_MODIFY;
-
 	status = ADD_STA_SUCCESS;
-	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-					  &cmd, &status);
+	ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+						  mvm_sta->mac_id_n_color,
+						  &status);
 
 	switch (status) {
 	case ADD_STA_SUCCESS:
@@ -1309,7 +1417,7 @@
 				struct ieee80211_sta *sta)
 {
 	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
-	struct iwl_mvm_add_sta_cmd cmd = {
+	struct iwl_mvm_add_sta_cmd_v6 cmd = {
 		.add_modify = STA_MODE_MODIFY,
 		.sta_id = mvmsta->sta_id,
 		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1317,7 +1425,7 @@
 	};
 	int ret;
 
-	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+	ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
 	if (ret)
 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
@@ -1331,7 +1439,7 @@
 		(reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
 			STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
 	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
-	struct iwl_mvm_add_sta_cmd cmd = {
+	struct iwl_mvm_add_sta_cmd_v6 cmd = {
 		.add_modify = STA_MODE_MODIFY,
 		.sta_id = mvmsta->sta_id,
 		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1346,7 +1454,7 @@
 	int ret;
 
 	/* TODO: somehow the fw doesn't seem to take PS_POLL into account */
-	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+	ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
 	if (ret)
 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 94b265e..4dfc359 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -293,10 +293,6 @@
 	struct iwl_lq_sta lq_sta;
 	struct ieee80211_vif *vif;
 
-#ifdef CONFIG_PM_SLEEP
-	u16 last_seq_ctl;
-#endif
-
 	/* Temporary, until the new TLC will control the Tx protection */
 	s8 tx_protection;
 	bool tt_tx_protection;
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
new file mode 100644
index 0000000..eb74391
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h
@@ -0,0 +1,95 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+	IWL_MVM_TM_ATTR_UNSPEC,
+	IWL_MVM_TM_ATTR_CMD,
+	IWL_MVM_TM_ATTR_NOA_DURATION,
+	IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+	/* keep last */
+	NUM_IWL_MVM_TM_ATTRS,
+	IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+	IWL_MVM_TM_CMD_SET_NOA,
+	IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 76a3c17..33cf56f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -387,7 +387,8 @@
 
 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 			     struct ieee80211_vif *vif,
-			     u32 duration, u32 min_duration)
+			     u32 duration, u32 min_duration,
+			     u32 max_delay)
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
@@ -426,7 +427,7 @@
 		cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
 
 	time_cmd.max_frags = TE_V2_FRAG_NONE;
-	time_cmd.max_delay = cpu_to_le32(500);
+	time_cmd.max_delay = cpu_to_le32(max_delay);
 	/* TODO: why do we need to interval = bi if it is not periodic? */
 	time_cmd.interval = cpu_to_le32(1);
 	time_cmd.duration = cpu_to_le32(duration);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index f86c510..d9c8d6c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -123,6 +123,7 @@
  * @duration: the duration of the session in TU.
  * @min_duration: will start a new session if the current session will end
  *	in less than min_duration.
+ * @max_delay: maximum delay before starting the time event (in TU)
  *
  * This function can be used to start a session protection which means that the
  * fw will stay on the channel for %duration_ms milliseconds. This function
@@ -133,7 +134,8 @@
  */
 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 			     struct ieee80211_vif *vif,
-			     u32 duration, u32 min_duration);
+			     u32 duration, u32 min_duration,
+			     u32 max_delay);
 
 /**
  * iwl_mvm_stop_session_protection - cancel the session protection.
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index e05440d..43d97c3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -417,7 +417,7 @@
 
 	spin_unlock(&mvmsta->lock);
 
-	if (txq_id < IWL_MVM_FIRST_AGG_QUEUE)
+	if (txq_id < mvm->first_agg_queue)
 		atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 
 	return 0;
@@ -511,16 +511,10 @@
 }
 #endif /* CONFIG_IWLWIFI_DEBUG */
 
-/**
- * translate ucode response to mac80211 tx status control values
- */
-static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
-					 struct ieee80211_tx_info *info)
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+			       enum ieee80211_band band,
+			       struct ieee80211_tx_rate *r)
 {
-	struct ieee80211_tx_rate *r = &info->status.rates[0];
-
-	info->status.antenna =
-		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
 		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
 	switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
@@ -549,10 +543,23 @@
 		r->flags |= IEEE80211_TX_RC_VHT_MCS;
 	} else {
 		r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
-							     info->band);
+							     band);
 	}
 }
 
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
+					struct ieee80211_tx_info *info)
+{
+	struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+	info->status.antenna =
+		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+	iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
+}
+
 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 				     struct iwl_rx_packet *pkt)
 {
@@ -602,11 +609,11 @@
 		}
 
 		info->status.rates[0].count = tx_resp->failure_frame + 1;
-		iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate),
-					     info);
+		iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+					    info);
 
 		/* Single frame failure in an AMPDU queue => send BAR */
-		if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE &&
+		if (txq_id >= mvm->first_agg_queue &&
 		    !(info->flags & IEEE80211_TX_STAT_ACK))
 			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
 
@@ -619,7 +626,7 @@
 		ieee80211_tx_status_ni(mvm->hw, skb);
 	}
 
-	if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE) {
+	if (txq_id >= mvm->first_agg_queue) {
 		/* If this is an aggregation queue, we use the ssn since:
 		 * ssn = wifi seq_num % 256.
 		 * The seq_ctl is the sequence control of the packet to which
@@ -668,10 +675,6 @@
 			iwl_mvm_check_ratid_empty(mvm, sta, tid);
 			spin_unlock_bh(&mvmsta->lock);
 		}
-
-#ifdef CONFIG_PM_SLEEP
-		mvmsta->last_seq_ctl = seq_ctl;
-#endif
 	} else {
 		sta = NULL;
 		mvmsta = NULL;
@@ -681,7 +684,7 @@
 	 * If the txq is not an AMPDU queue, there is no chance we freed
 	 * several skbs. Check that out...
 	 */
-	if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) &&
+	if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
 	    atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
 		if (mvmsta) {
 			/*
@@ -777,7 +780,7 @@
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	struct ieee80211_sta *sta;
 
-	if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_MVM_FIRST_AGG_QUEUE))
+	if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
 		return;
 
 	if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -904,8 +907,8 @@
 			info->flags |= IEEE80211_TX_STAT_AMPDU;
 			info->status.ampdu_ack_len = ba_notif->txed_2_done;
 			info->status.ampdu_len = ba_notif->txed;
-			iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags,
-						     info);
+			iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
+						    info);
 		}
 	}
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a9c3574..ed69e9b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -466,7 +466,7 @@
 	ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
 	len = img->sec[IWL_UCODE_SECTION_DATA].len;
 
-	buf = kzalloc(len, GFP_KERNEL);
+	buf = kzalloc(len, GFP_ATOMIC);
 	if (!buf)
 		return;
 
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 26108a1..941c0c8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -268,7 +268,7 @@
 #endif /* CONFIG_IWLDVM */
 
 #if IS_ENABLED(CONFIG_IWLMVM)
-/* 7000 Series */
+/* 7260 Series */
 	{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
@@ -350,6 +350,9 @@
 	{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+
+/* 7265 Series */
+	{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
 	{0}
@@ -391,7 +394,6 @@
 	iwl_drv_stop(trans_pcie->drv);
 out_free_trans:
 	iwl_trans_pcie_free(iwl_trans);
-	pci_set_drvdata(pdev, NULL);
 	return ret;
 }
 
@@ -402,8 +404,6 @@
 
 	iwl_drv_stop(trans_pcie->drv);
 	iwl_trans_pcie_free(trans);
-
-	pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index c3f904d..5d9337b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -220,6 +220,9 @@
 	iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
 
+	/* Clear the interrupt in APMG if the NIC is in RFKILL */
+	iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+
 	set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
 
 out:
@@ -443,22 +446,138 @@
 	return ret;
 }
 
+static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+{
+	int shift_param;
+	u32 address;
+	int ret = 0;
+
+	if (cpu == 1) {
+		shift_param = 0;
+		address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+	} else {
+		shift_param = 16;
+		address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+	}
+
+	/* set CPU to started */
+	iwl_trans_set_bits_mask(trans,
+				CSR_UCODE_LOAD_STATUS_ADDR,
+				CSR_CPU_STATUS_LOADING_STARTED << shift_param,
+				1);
+
+	/* set last complete descriptor number */
+	iwl_trans_set_bits_mask(trans,
+				CSR_UCODE_LOAD_STATUS_ADDR,
+				CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
+				<< shift_param,
+				1);
+
+	/* set last loaded block */
+	iwl_trans_set_bits_mask(trans,
+				CSR_UCODE_LOAD_STATUS_ADDR,
+				CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
+				<< shift_param,
+				1);
+
+	/* image loading complete */
+	iwl_trans_set_bits_mask(trans,
+				CSR_UCODE_LOAD_STATUS_ADDR,
+				CSR_CPU_STATUS_LOADING_COMPLETED
+				<< shift_param,
+				1);
+
+	/* set FH_TCSR_0_REG  */
+	iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
+
+	/* verify image verification started  */
+	ret = iwl_poll_bit(trans, address,
+			   CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+			   CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+			   CSR_SECURE_TIME_OUT);
+	if (ret < 0) {
+		IWL_ERR(trans, "secure boot process didn't start\n");
+		return ret;
+	}
+
+	/* wait for image verification to complete  */
+	ret = iwl_poll_bit(trans, address,
+			   CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+			   CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+			   CSR_SECURE_TIME_OUT);
+
+	if (ret < 0) {
+		IWL_ERR(trans, "Time out on secure boot process\n");
+		return ret;
+	}
+
+	return 0;
+}
+
 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
 				const struct fw_img *image)
 {
 	int i, ret = 0;
 
-	for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+	IWL_DEBUG_FW(trans,
+		     "working with %s image\n",
+		     image->is_secure ? "Secured" : "Non Secured");
+	IWL_DEBUG_FW(trans,
+		     "working with %s CPU\n",
+		     image->is_dual_cpus ? "Dual" : "Single");
+
+	/* configure the ucode to be ready to get the secured image */
+	if (image->is_secure) {
+		/* set secure boot inspector addresses */
+		iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
+		iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
+
+		/* release CPU1 reset if secure inspector image burned in OTP */
+		iwl_write32(trans, CSR_RESET, 0);
+	}
+
+	/* load to FW the binary sections of CPU1 */
+	IWL_DEBUG_INFO(trans, "Loading CPU1\n");
+	for (i = 0;
+	     i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+	     i++) {
 		if (!image->sec[i].data)
 			break;
-
 		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
 		if (ret)
 			return ret;
 	}
 
-	/* Remove all resets to allow NIC to operate */
-	iwl_write32(trans, CSR_RESET, 0);
+	/* configure the ucode to start secure process on CPU1 */
+	if (image->is_secure) {
+		/* config CPU1 to start secure protocol */
+		ret = iwl_pcie_secure_set(trans, 1);
+		if (ret)
+			return ret;
+	} else {
+		/* Remove all resets to allow NIC to operate */
+		iwl_write32(trans, CSR_RESET, 0);
+	}
+
+	if (image->is_dual_cpus) {
+		/* load to FW the binary sections of CPU2 */
+		IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
+		for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+			i < IWL_UCODE_SECTION_MAX; i++) {
+			if (!image->sec[i].data)
+				break;
+			ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+			if (ret)
+				return ret;
+		}
+
+		if (image->is_secure) {
+			/* set CPU2 for secure protocol */
+			ret = iwl_pcie_secure_set(trans, 2);
+			if (ret)
+				return ret;
+		}
+	}
 
 	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 1424335..059c5ac 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1465,7 +1465,8 @@
 	spin_unlock_bh(&txq->lock);
 }
 
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define HOST_COMPLETE_TIMEOUT	(2 * HZ)
+#define COMMAND_POKE_TIMEOUT	(HZ / 10)
 
 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
 				    struct iwl_host_cmd *cmd)
@@ -1493,16 +1494,16 @@
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int cmd_idx;
 	int ret;
+	int timeout = HOST_COMPLETE_TIMEOUT;
 
 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
 		       get_cmd_string(trans_pcie, cmd->id));
 
-	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
-				     &trans_pcie->status))) {
-		IWL_ERR(trans, "Command %s: a command is already active!\n",
-			get_cmd_string(trans_pcie, cmd->id));
+	if (WARN(test_and_set_bit(STATUS_HCMD_ACTIVE,
+				  &trans_pcie->status),
+		 "Command %s: a command is already active!\n",
+		 get_cmd_string(trans_pcie, cmd->id)))
 		return -EIO;
-	}
 
 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
 		       get_cmd_string(trans_pcie, cmd->id));
@@ -1517,10 +1518,29 @@
 		return ret;
 	}
 
-	ret = wait_event_timeout(trans_pcie->wait_command_queue,
-				 !test_bit(STATUS_HCMD_ACTIVE,
-					   &trans_pcie->status),
-				 HOST_COMPLETE_TIMEOUT);
+	while (timeout > 0) {
+		unsigned long flags;
+
+		timeout -= COMMAND_POKE_TIMEOUT;
+		ret = wait_event_timeout(trans_pcie->wait_command_queue,
+					 !test_bit(STATUS_HCMD_ACTIVE,
+						   &trans_pcie->status),
+					 COMMAND_POKE_TIMEOUT);
+		if (ret)
+			break;
+		/* poke the device - it may have lost the command */
+		if (iwl_trans_grab_nic_access(trans, true, &flags)) {
+			iwl_trans_release_nic_access(trans, &flags);
+			IWL_DEBUG_INFO(trans,
+				       "Tried to wake NIC for command %s\n",
+				       get_cmd_string(trans_pcie, cmd->id));
+		} else {
+			IWL_ERR(trans, "Failed to poke NIC for command %s\n",
+				get_cmd_string(trans_pcie, cmd->id));
+			break;
+		}
+	}
+
 	if (!ret) {
 		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
 			struct iwl_txq *txq =
@@ -1541,6 +1561,9 @@
 				       "Clearing HCMD_ACTIVE for command %s\n",
 				       get_cmd_string(trans_pcie, cmd->id));
 			ret = -ETIMEDOUT;
+
+			iwl_op_mode_nic_error(trans->op_mode);
+
 			goto cancel;
 		}
 	}
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
index c0f9e7e..51b92b5 100644
--- a/drivers/net/wireless/libertas/firmware.c
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -53,6 +53,11 @@
 
 	/* Firmware found! */
 	lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
+	if (priv->helper_fw) {
+		release_firmware (priv->helper_fw);
+		priv->helper_fw = NULL;
+	}
+	release_firmware (firmware);
 }
 
 static void helper_firmware_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index c94dd68..ef8c98e 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -754,14 +754,14 @@
 	if (ret == 0 && (card->model != MODEL_8305))
 		ret = if_cs_prog_real(card, mainfw);
 	if (ret)
-		goto out;
+		return;
 
 	/* Now actually get the IRQ */
 	ret = request_irq(card->p_dev->irq, if_cs_interrupt,
 		IRQF_SHARED, DRV_NAME, card);
 	if (ret) {
 		pr_err("error in request_irq\n");
-		goto out;
+		return;
 	}
 
 	/*
@@ -777,10 +777,6 @@
 		pr_err("could not activate card\n");
 		free_irq(card->p_dev->irq, card);
 	}
-
-out:
-	release_firmware(helper);
-	release_firmware(mainfw);
 }
 
 
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 4557833..991238a 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -708,20 +708,16 @@
 
 	ret = if_sdio_prog_helper(card, helper);
 	if (ret)
-		goto out;
+		return;
 
 	lbs_deb_sdio("Helper firmware loaded\n");
 
 	ret = if_sdio_prog_real(card, mainfw);
 	if (ret)
-		goto out;
+		return;
 
 	lbs_deb_sdio("Firmware loaded\n");
 	if_sdio_finish_power_on(card);
-
-out:
-	release_firmware(helper);
-	release_firmware(mainfw);
 }
 
 static int if_sdio_prog_firmware(struct if_sdio_card *card)
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 4bb6574..8366915 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1094,11 +1094,7 @@
 		goto out;
 
 out:
-	release_firmware(helper);
-	release_firmware(mainfw);
-
 	lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
-
 	return err;
 }
 
@@ -1128,7 +1124,7 @@
 {
 	struct if_spi_card *card;
 	struct lbs_private *priv = NULL;
-	struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+	struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
 	int err = 0;
 
 	lbs_deb_enter(LBS_DEB_SPI);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 2798077..dff08a2 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -844,7 +844,7 @@
 	cardp->fw = fw;
 	if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
 		ret = -EINVAL;
-		goto release_fw;
+		goto done;
 	}
 
 	/* Cancel any pending usb business */
@@ -861,7 +861,7 @@
 	if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
 		lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
 		ret = -EIO;
-		goto release_fw;
+		goto done;
 	}
 
 	cardp->bootcmdresp = 0;
@@ -883,14 +883,14 @@
 		usb_kill_urb(cardp->tx_urb);
 		if (if_usb_submit_rx_urb(cardp) < 0)
 			ret = -EIO;
-		goto release_fw;
+		goto done;
 	} else if (cardp->bootcmdresp <= 0) {
 		if (--reset_count >= 0) {
 			if_usb_reset_device(cardp);
 			goto restart;
 		}
 		ret = -EIO;
-		goto release_fw;
+		goto done;
 	}
 
 	i = 0;
@@ -921,14 +921,14 @@
 
 		pr_info("FW download failure, time = %d ms\n", i * 100);
 		ret = -EIO;
-		goto release_fw;
+		goto done;
 	}
 
 	cardp->priv->fw_ready = 1;
 	if_usb_submit_rx_urb(cardp);
 
 	if (lbs_start_card(priv))
-		goto release_fw;
+		goto done;
 
 	if_usb_setup_firmware(priv);
 
@@ -939,11 +939,8 @@
 	if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
 		priv->ehs_remove_supported = false;
 
- release_fw:
-	release_firmware(cardp->fw);
-	cardp->fw = NULL;
-
  done:
+	cardp->fw = NULL;
 	lbs_deb_leave(LBS_DEB_USB);
 }
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 2cd3f54..de0df86 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -167,6 +167,7 @@
 	u32 magic;
 	u8 bssid[ETH_ALEN];
 	bool assoc;
+	bool bcn_en;
 	u16 aid;
 };
 
@@ -1170,6 +1171,16 @@
 	*total_flags = data->rx_filter;
 }
 
+static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	unsigned int *count = data;
+	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+
+	if (vp->bcn_en)
+		(*count)++;
+}
+
 static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
 					    struct ieee80211_vif *vif,
 					    struct ieee80211_bss_conf *info,
@@ -1180,7 +1191,8 @@
 
 	hwsim_check_magic(vif);
 
-	wiphy_debug(hw->wiphy, "%s(changed=0x%x)\n", __func__, changed);
+	wiphy_debug(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n",
+		    __func__, changed, vif->addr);
 
 	if (changed & BSS_CHANGED_BSSID) {
 		wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n",
@@ -1202,6 +1214,7 @@
 
 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
 		wiphy_debug(hw->wiphy, "  BCN EN: %d\n", info->enable_beacon);
+		vp->bcn_en = info->enable_beacon;
 		if (data->started &&
 		    !hrtimer_is_queued(&data->beacon_timer.timer) &&
 		    info->enable_beacon) {
@@ -1215,8 +1228,16 @@
 			tasklet_hrtimer_start(&data->beacon_timer,
 					      ns_to_ktime(until_tbtt * 1000),
 					      HRTIMER_MODE_REL);
-		} else if (!info->enable_beacon)
-			tasklet_hrtimer_cancel(&data->beacon_timer);
+		} else if (!info->enable_beacon) {
+			unsigned int count = 0;
+			ieee80211_iterate_active_interfaces(
+				data->hw, IEEE80211_IFACE_ITER_NORMAL,
+				mac80211_hwsim_bcn_en_iter, &count);
+			wiphy_debug(hw->wiphy, "  beaconing vifs remaining: %u",
+				    count);
+			if (count == 0)
+				tasklet_hrtimer_cancel(&data->beacon_timer);
+		}
 	}
 
 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index a6c46f3..e47f4e3 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1048,7 +1048,7 @@
 	struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
 	unsigned long cmd_flags;
 	unsigned long scan_pending_q_flags;
-	uint16_t cancel_scan_cmd = false;
+	bool cancel_scan_cmd = false;
 
 	if ((adapter->curr_cmd) &&
 	    (adapter->curr_cmd->wait_q_enabled)) {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 37f873b..4e4686e 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -621,7 +621,7 @@
 	int ret = 0;
 	struct ieee_types_assoc_rsp *assoc_rsp;
 	struct mwifiex_bssdescriptor *bss_desc;
-	u8 enable_data = true;
+	bool enable_data = true;
 	u16 cap_info, status_code;
 
 	assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index c2b91f5..9d7c9d3 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -882,7 +882,9 @@
 	adapter->cmd_wait_q.status = 0;
 	adapter->scan_wait_q_woken = false;
 
-	adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
+	adapter->workqueue =
+		alloc_workqueue("MWIFIEX_WORK_QUEUE",
+				WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
 	if (!adapter->workqueue)
 		goto err_kmalloc;
 
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 52da8ee..33fa943 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -93,7 +93,7 @@
 	struct pci_dev *pdev = to_pci_dev(dev);
 
 	if (pdev) {
-		card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+		card = pci_get_drvdata(pdev);
 		if (!card || !card->adapter) {
 			pr_err("Card or adapter structure is not valid\n");
 			return 0;
@@ -128,7 +128,7 @@
 	struct pci_dev *pdev = to_pci_dev(dev);
 
 	if (pdev) {
-		card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+		card = pci_get_drvdata(pdev);
 		if (!card || !card->adapter) {
 			pr_err("Card or adapter structure is not valid\n");
 			return 0;
@@ -2037,7 +2037,7 @@
 		goto exit;
 	}
 
-	card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+	card = pci_get_drvdata(pdev);
 	if (!card || !card->adapter) {
 		pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
 			 card ? card->adapter : NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c0268b5..7d66018 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -327,7 +327,7 @@
 {
 	struct mwifiex_adapter *adapter = priv->adapter;
 	struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
-	u16 hs_activate = false;
+	bool hs_activate = false;
 
 	if (!hscfg_param)
 		/* New Activate command */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 95fa359..5dd0ccc 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -708,7 +708,7 @@
 {
 	u8 *curr = (u8 *) &resp->params.get_wmm_status;
 	uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
-	int valid = true;
+	bool valid = true;
 
 	struct mwifiex_ie_types_data *tlv_hdr;
 	struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 644d6e0..0f129d4 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -83,11 +83,10 @@
 }
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
-					struct sk_buff *skb);
+				 struct sk_buff *skb);
 void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
-			      struct mwifiex_ra_list_tbl *ra,
-			      int tid);
+			      struct mwifiex_ra_list_tbl *ra, int tid);
 
 int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
 void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
@@ -95,21 +94,18 @@
 			    struct mwifiex_ra_list_tbl *ra_list, int tid);
 
 u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
-					     const struct sk_buff *skb);
+				     const struct sk_buff *skb);
 void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
 
-extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
-						 u8 **assoc_buf,
-						 struct ieee_types_wmm_parameter
-						 *wmmie,
-						 struct ieee80211_ht_cap
-						 *htcap);
+u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
+					u8 **assoc_buf,
+					struct ieee_types_wmm_parameter *wmmie,
+					struct ieee80211_ht_cap *htcap);
 
 void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
-					struct ieee_types_wmm_parameter
-					*wmm_ie);
+					struct ieee_types_wmm_parameter *wmm_ie);
 void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
-extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
-				      const struct host_cmd_ds_command *resp);
+int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
+			       const struct host_cmd_ds_command *resp);
 
 #endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a3707fd..b953ad6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -6093,7 +6093,6 @@
 	if (priv->sram != NULL)
 		pci_iounmap(pdev, priv->sram);
 
-	pci_set_drvdata(pdev, NULL);
 	ieee80211_free_hw(hw);
 
 err_free_reg:
@@ -6147,7 +6146,6 @@
 unmap:
 	pci_iounmap(pdev, priv->regs);
 	pci_iounmap(pdev, priv->sram);
-	pci_set_drvdata(pdev, NULL);
 	ieee80211_free_hw(hw);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 3bb936b..eebd2be 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -182,23 +182,20 @@
 /* Exported prototypes                                              */
 /********************************************************************/
 
-extern struct orinoco_private *alloc_orinocodev(
-	int sizeof_card, struct device *device,
-	int (*hard_reset)(struct orinoco_private *),
-	int (*stop_fw)(struct orinoco_private *, int));
-extern void free_orinocodev(struct orinoco_private *priv);
-extern int orinoco_init(struct orinoco_private *priv);
-extern int orinoco_if_add(struct orinoco_private *priv,
-			  unsigned long base_addr,
-			  unsigned int irq,
-			  const struct net_device_ops *ops);
-extern void orinoco_if_del(struct orinoco_private *priv);
-extern int orinoco_up(struct orinoco_private *priv);
-extern void orinoco_down(struct orinoco_private *priv);
-extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
+					 int (*hard_reset)(struct orinoco_private *),
+					 int (*stop_fw)(struct orinoco_private *, int));
+void free_orinocodev(struct orinoco_private *priv);
+int orinoco_init(struct orinoco_private *priv);
+int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
+		   unsigned int irq, const struct net_device_ops *ops);
+void orinoco_if_del(struct orinoco_private *priv);
+int orinoco_up(struct orinoco_private *priv);
+void orinoco_down(struct orinoco_private *priv);
+irqreturn_t orinoco_interrupt(int irq, void *dev_id);
 
-extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
-extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
+void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
+void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
 
 int orinoco_process_xmit_skb(struct sk_buff *skb,
 			     struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index d73fdf6..ffb2469 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -234,7 +234,6 @@
 	free_irq(pdev->irq, priv);
 
  fail_irq:
-	pci_set_drvdata(pdev, NULL);
 	free_orinocodev(priv);
 
  fail_alloc:
@@ -265,7 +264,6 @@
 
 	orinoco_if_del(priv);
 	free_irq(pdev->irq, priv);
-	pci_set_drvdata(pdev, NULL);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
 	pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 677bf14..5ae1191 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -184,7 +184,6 @@
 	free_irq(pdev->irq, priv);
 
  fail_irq:
-	pci_set_drvdata(pdev, NULL);
 	free_orinocodev(priv);
 
  fail_alloc:
@@ -205,7 +204,6 @@
 
 	orinoco_if_del(priv);
 	free_irq(pdev->irq, priv);
-	pci_set_drvdata(pdev, NULL);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
 	pci_release_regions(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 2559dbd..bbd36d1 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -273,7 +273,6 @@
 	free_irq(pdev->irq, priv);
 
  fail_irq:
-	pci_set_drvdata(pdev, NULL);
 	free_orinocodev(priv);
 
  fail_alloc:
@@ -301,7 +300,6 @@
 
 	orinoco_if_del(priv);
 	free_irq(pdev->irq, priv);
-	pci_set_drvdata(pdev, NULL);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
 	pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 42afeee..04b08de 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -170,7 +170,6 @@
 	free_irq(pdev->irq, priv);
 
  fail_irq:
-	pci_set_drvdata(pdev, NULL);
 	free_orinocodev(priv);
 
  fail_alloc:
@@ -195,7 +194,6 @@
 
 	orinoco_if_del(priv);
 	free_irq(pdev->irq, priv);
-	pci_set_drvdata(pdev, NULL);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
 	pci_iounmap(pdev, card->bridge_io);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 57e3af8..f9a07b0 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -631,7 +631,6 @@
 	iounmap(priv->map);
 
  err_free_dev:
-	pci_set_drvdata(pdev, NULL);
 	p54_free_common(dev);
 
  err_free_reg:
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7fc46f2..de15171 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -636,7 +636,7 @@
 	gpio_direction_input(p54spi_gpio_irq);
 
 	ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
-			  p54spi_interrupt, IRQF_DISABLED, "p54spi",
+			  p54spi_interrupt, 0, "p54spi",
 			  priv->spi);
 	if (ret < 0) {
 		dev_err(&priv->spi->dev, "request_irq() failed");
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 1c22b81..8863a6c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -183,7 +183,7 @@
 	data = r.ptr;
 
 	/* copy this MAC to the bss */
-	memcpy(bss.address, data, 6);
+	memcpy(bss.address, data, ETH_ALEN);
 	kfree(data);
 
 	/* now ask for the corresponding bss */
@@ -531,7 +531,7 @@
 		return -EINVAL;
 
 	/* prepare the structure for the set object */
-	memcpy(&bssid[0], awrq->sa_data, 6);
+	memcpy(&bssid[0], awrq->sa_data, ETH_ALEN);
 
 	/* set the bssid -- does this make sense when in AP mode? */
 	rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
@@ -550,7 +550,7 @@
 	int rvalue;
 
 	rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
-	memcpy(awrq->sa_data, r.ptr, 6);
+	memcpy(awrq->sa_data, r.ptr, ETH_ALEN);
 	awrq->sa_family = ARPHRD_ETHER;
 	kfree(r.ptr);
 
@@ -582,7 +582,7 @@
 	size_t wpa_ie_len;
 
 	/* The first entry must be the MAC address */
-	memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
+	memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN);
 	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
 	iwe.cmd = SIOCGIWAP;
 	current_ev = iwe_stream_add_event(info, current_ev, end_buf,
@@ -2489,7 +2489,7 @@
 			      &((struct sockaddr *) addr)->sa_data);
 	if (!ret)
 		memcpy(priv->ndev->dev_addr,
-		       &((struct sockaddr *) addr)->sa_data, 6);
+		       &((struct sockaddr *) addr)->sa_data, ETH_ALEN);
 
 	return ret;
 }
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5970ff6..41a16d3 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -837,7 +837,7 @@
 	/* ndev->set_multicast_list = &islpci_set_multicast_list; */
 	ndev->addr_len = ETH_ALEN;
 	/* Get a non-zero dummy MAC address for nameif. Jean II */
-	memcpy(ndev->dev_addr, dummy_mac, 6);
+	memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
 
 	ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
 
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index a01606b..056af38 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -682,7 +682,7 @@
 				     isl_oid[GEN_OID_MACADDRESS].size, &res);
 
 	if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
-		memcpy(priv->ndev->dev_addr, res->data, 6);
+		memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
 	else
 		ret = -EIO;
 	if (res)
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 68dbbb9..006b8bc 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -58,11 +58,11 @@
 
 config RT2800PCI
 	tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
-	depends on PCI || SOC_RT288X || SOC_RT305X
+	depends on PCI
 	select RT2800_LIB
+	select RT2800_LIB_MMIO
 	select RT2X00_LIB_MMIO
-	select RT2X00_LIB_PCI if PCI
-	select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
+	select RT2X00_LIB_PCI
 	select RT2X00_LIB_FIRMWARE
 	select RT2X00_LIB_CRYPTO
 	select CRC_CCITT
@@ -199,9 +199,30 @@
 
 endif
 
+config RT2800SOC
+	tristate "Ralink WiSoC support"
+	depends on SOC_RT288X || SOC_RT305X
+	select RT2X00_LIB_SOC
+	select RT2X00_LIB_MMIO
+	select RT2X00_LIB_CRYPTO
+	select RT2X00_LIB_FIRMWARE
+	select RT2800_LIB
+	select RT2800_LIB_MMIO
+	---help---
+	  This adds support for Ralink WiSoC devices.
+	  Supported chips: RT2880, RT3050, RT3052, RT3350, RT3352.
+
+	  When compiled as a module, this driver will be called rt2800soc.
+
+
 config RT2800_LIB
 	tristate
 
+config RT2800_LIB_MMIO
+	tristate
+	select RT2X00_LIB_MMIO
+	select RT2800_LIB
+
 config RT2X00_LIB_MMIO
 	tristate
 
@@ -219,6 +240,7 @@
 
 config RT2X00_LIB
 	tristate
+	select AVERAGE
 
 config RT2X00_LIB_FIRMWARE
 	boolean
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index f069d8b..24a6601 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_RT2X00_LIB_SOC)		+= rt2x00soc.o
 obj-$(CONFIG_RT2X00_LIB_USB)		+= rt2x00usb.o
 obj-$(CONFIG_RT2800_LIB)		+= rt2800lib.o
+obj-$(CONFIG_RT2800_LIB_MMIO)		+= rt2800mmio.o
 obj-$(CONFIG_RT2400PCI)			+= rt2400pci.o
 obj-$(CONFIG_RT2500PCI)			+= rt2500pci.o
 obj-$(CONFIG_RT61PCI)			+= rt61pci.o
@@ -21,3 +22,4 @@
 obj-$(CONFIG_RT2500USB)			+= rt2500usb.o
 obj-$(CONFIG_RT73USB)			+= rt73usb.o
 obj-$(CONFIG_RT2800USB)			+= rt2800usb.o
+obj-$(CONFIG_RT2800SOC)			+= rt2800soc.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 3d53a09..38ed9a3 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1261,7 +1261,7 @@
 	 */
 	rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
 	rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
-	rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
+	rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
 	    entry->queue->rt2x00dev->rssi_offset;
 	rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
 
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index fa33b5e..aab6b5e 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -52,6 +52,7 @@
  * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
  * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
  * RF5592 2.4G/5G 2T2R
+ * RF3070 2.4G 1T1R
  * RF5360 2.4G 1T1R
  * RF5370 2.4G 1T1R
  * RF5390 2.4G 1T1R
@@ -70,6 +71,7 @@
 #define RF3322				0x000c
 #define RF3053				0x000d
 #define RF5592				0x000f
+#define RF3070				0x3070
 #define RF3290				0x3290
 #define RF5360				0x5360
 #define RF5370				0x5370
@@ -122,7 +124,7 @@
 /*
  * MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
  */
-#define MAC_CSR0_3290				0x0000
+#define MAC_CSR0_3290			0x0000
 
 /*
  * E2PROM_CSR: PCI EEPROM control register.
@@ -211,17 +213,17 @@
 /*
  * COEX_CFG_0
  */
-#define COEX_CFG0			0x0040
+#define COEX_CFG0		0x0040
 #define COEX_CFG_ANT		FIELD32(0xff000000)
 /*
  * COEX_CFG_1
  */
-#define COEX_CFG1			0x0044
+#define COEX_CFG1		0x0044
 
 /*
  * COEX_CFG_2
  */
-#define COEX_CFG2			0x0048
+#define COEX_CFG2		0x0048
 #define BT_COEX_CFG1		FIELD32(0xff000000)
 #define BT_COEX_CFG0		FIELD32(0x00ff0000)
 #define WL_COEX_CFG1		FIELD32(0x0000ff00)
@@ -235,8 +237,8 @@
 #define PLL_RESERVED_INPUT2	FIELD32(0x0000ff00)
 #define PLL_CONTROL		FIELD32(0x00070000)
 #define PLL_LPF_R1		FIELD32(0x00080000)
-#define PLL_LPF_C1_CTRL	FIELD32(0x00300000)
-#define PLL_LPF_C2_CTRL	FIELD32(0x00c00000)
+#define PLL_LPF_C1_CTRL		FIELD32(0x00300000)
+#define PLL_LPF_C2_CTRL		FIELD32(0x00c00000)
 #define PLL_CP_CURRENT_CTRL	FIELD32(0x03000000)
 #define PLL_PFD_DELAY_CTRL	FIELD32(0x0c000000)
 #define PLL_LOCK_CTRL		FIELD32(0x70000000)
@@ -2164,7 +2166,7 @@
  */
 #define RFCSR6_R1			FIELD8(0x03)
 #define RFCSR6_R2			FIELD8(0x40)
-#define RFCSR6_TXDIV		FIELD8(0x0c)
+#define RFCSR6_TXDIV			FIELD8(0x0c)
 /* bits for RF3053 */
 #define RFCSR6_VCO_IC			FIELD8(0xc0)
 
@@ -2202,13 +2204,13 @@
  * RFCSR 12:
  */
 #define RFCSR12_TX_POWER		FIELD8(0x1f)
-#define RFCSR12_DR0				FIELD8(0xe0)
+#define RFCSR12_DR0			FIELD8(0xe0)
 
 /*
  * RFCSR 13:
  */
 #define RFCSR13_TX_POWER		FIELD8(0x1f)
-#define RFCSR13_DR0				FIELD8(0xe0)
+#define RFCSR13_DR0			FIELD8(0xe0)
 
 /*
  * RFCSR 15:
@@ -2226,7 +2228,7 @@
 #define RFCSR17_TXMIXER_GAIN		FIELD8(0x07)
 #define RFCSR17_TX_LO1_EN		FIELD8(0x08)
 #define RFCSR17_R			FIELD8(0x20)
-#define RFCSR17_CODE                   FIELD8(0x7f)
+#define RFCSR17_CODE			FIELD8(0x7f)
 
 /* RFCSR 18 */
 #define RFCSR18_XO_TUNE_BYPASS		FIELD8(0x40)
@@ -2449,7 +2451,7 @@
  */
 #define EEPROM_NIC_CONF0_RXPATH		FIELD16(0x000f)
 #define EEPROM_NIC_CONF0_TXPATH		FIELD16(0x00f0)
-#define EEPROM_NIC_CONF0_RF_TYPE		FIELD16(0x0f00)
+#define EEPROM_NIC_CONF0_RF_TYPE	FIELD16(0x0f00)
 
 /*
  * EEPROM NIC Configuration 1
@@ -2471,18 +2473,18 @@
  * DAC_TEST: 0: disable, 1: enable
  */
 #define EEPROM_NIC_CONF1_HW_RADIO		FIELD16(0x0001)
-#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC		FIELD16(0x0002)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G		FIELD16(0x0004)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G		FIELD16(0x0008)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC	FIELD16(0x0002)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G	FIELD16(0x0004)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G	FIELD16(0x0008)
 #define EEPROM_NIC_CONF1_CARDBUS_ACCEL		FIELD16(0x0010)
 #define EEPROM_NIC_CONF1_BW40M_SB_2G		FIELD16(0x0020)
 #define EEPROM_NIC_CONF1_BW40M_SB_5G		FIELD16(0x0040)
 #define EEPROM_NIC_CONF1_WPS_PBC		FIELD16(0x0080)
 #define EEPROM_NIC_CONF1_BW40M_2G		FIELD16(0x0100)
 #define EEPROM_NIC_CONF1_BW40M_5G		FIELD16(0x0200)
-#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA		FIELD16(0x400)
+#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA	FIELD16(0x400)
 #define EEPROM_NIC_CONF1_ANT_DIVERSITY		FIELD16(0x1800)
-#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC		FIELD16(0x2000)
+#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC	FIELD16(0x2000)
 #define EEPROM_NIC_CONF1_BT_COEXIST		FIELD16(0x4000)
 #define EEPROM_NIC_CONF1_DAC_TEST		FIELD16(0x8000)
 
@@ -2521,9 +2523,9 @@
  * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
  * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
  */
-#define EEPROM_NIC_CONF2_RX_STREAM		FIELD16(0x000f)
-#define EEPROM_NIC_CONF2_TX_STREAM		FIELD16(0x00f0)
-#define EEPROM_NIC_CONF2_CRYSTAL		FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_RX_STREAM	FIELD16(0x000f)
+#define EEPROM_NIC_CONF2_TX_STREAM	FIELD16(0x00f0)
+#define EEPROM_NIC_CONF2_CRYSTAL	FIELD16(0x0600)
 
 /*
  * EEPROM LNA
@@ -2790,7 +2792,7 @@
 #define MCU_CURRENT			0x36
 #define MCU_LED				0x50
 #define MCU_LED_STRENGTH		0x51
-#define MCU_LED_AG_CONF		0x52
+#define MCU_LED_AG_CONF			0x52
 #define MCU_LED_ACT_CONF		0x53
 #define MCU_LED_LED_POLARITY		0x54
 #define MCU_RADAR			0x60
@@ -2799,7 +2801,7 @@
 #define MCU_FREQ_OFFSET			0x74
 #define MCU_BBP_SIGNAL			0x80
 #define MCU_POWER_SAVE			0x83
-#define MCU_BAND_SELECT		0x91
+#define MCU_BAND_SELECT			0x91
 
 /*
  * MCU mailbox tokens
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88ce656..c5738f1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -278,12 +278,9 @@
 	[EEPROM_LNA]			= 0x0026,
 	[EEPROM_EXT_LNA2]		= 0x0027,
 	[EEPROM_RSSI_BG]		= 0x0028,
-	[EEPROM_TXPOWER_DELTA]		= 0x0028, /* Overlaps with RSSI_BG */
 	[EEPROM_RSSI_BG2]		= 0x0029,
-	[EEPROM_TXMIXER_GAIN_BG]	= 0x0029, /* Overlaps with RSSI_BG2 */
 	[EEPROM_RSSI_A]			= 0x002a,
 	[EEPROM_RSSI_A2]		= 0x002b,
-	[EEPROM_TXMIXER_GAIN_A]		= 0x002b, /* Overlaps with RSSI_A2 */
 	[EEPROM_TXPOWER_BG1]		= 0x0030,
 	[EEPROM_TXPOWER_BG2]		= 0x0037,
 	[EEPROM_EXT_TXPOWER_BG3]	= 0x003e,
@@ -1783,7 +1780,7 @@
 	rt2800_bbp_read(rt2x00dev, 3, &r3);
 
 	if (rt2x00_rt(rt2x00dev, RT3572) &&
-	    test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+	    rt2x00_has_cap_bt_coexist(rt2x00dev))
 		rt2800_config_3572bt_ant(rt2x00dev);
 
 	/*
@@ -1795,7 +1792,7 @@
 		break;
 	case 2:
 		if (rt2x00_rt(rt2x00dev, RT3572) &&
-		    test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+		    rt2x00_has_cap_bt_coexist(rt2x00dev))
 			rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
 		else
 			rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
@@ -1825,7 +1822,7 @@
 		break;
 	case 2:
 		if (rt2x00_rt(rt2x00dev, RT3572) &&
-		    test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+		    rt2x00_has_cap_bt_coexist(rt2x00dev)) {
 			rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
 			rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
 				rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
@@ -2029,13 +2026,6 @@
 			  rt2x00dev->default_ant.tx_chain_num <= 2);
 	rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
 
-	rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
-	rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
-	rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-	msleep(1);
-	rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
-	rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-
 	rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
 	rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
 	rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2141,7 +2131,7 @@
 	rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
 	rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
 	rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
-	if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+	if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
 		if (rf->channel <= 14) {
 			rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
 			rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
@@ -2674,7 +2664,7 @@
 	if (rf->channel <= 14) {
 		int idx = rf->channel-1;
 
-		if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+		if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
 			if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
 				/* r55/r59 value array of channel 1~14 */
 				static const char r55_bt_rev[] = {0x83, 0x83,
@@ -3152,6 +3142,7 @@
 	case RF3322:
 		rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
 		break;
+	case RF3070:
 	case RF5360:
 	case RF5370:
 	case RF5372:
@@ -3166,7 +3157,8 @@
 		rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
 	}
 
-	if (rt2x00_rf(rt2x00dev, RF3290) ||
+	if (rt2x00_rf(rt2x00dev, RF3070) ||
+	    rt2x00_rf(rt2x00dev, RF3290) ||
 	    rt2x00_rf(rt2x00dev, RF3322) ||
 	    rt2x00_rf(rt2x00dev, RF5360) ||
 	    rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3218,8 +3210,7 @@
 	if (rf->channel <= 14) {
 		if (!rt2x00_rt(rt2x00dev, RT5390) &&
 		    !rt2x00_rt(rt2x00dev, RT5392)) {
-			if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
-				     &rt2x00dev->cap_flags)) {
+			if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
 				rt2800_bbp_write(rt2x00dev, 82, 0x62);
 				rt2800_bbp_write(rt2x00dev, 75, 0x46);
 			} else {
@@ -3244,7 +3235,7 @@
 		if (rt2x00_rt(rt2x00dev, RT3593))
 			rt2800_bbp_write(rt2x00dev, 83, 0x9a);
 
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+		if (rt2x00_has_cap_external_lna_a(rt2x00dev))
 			rt2800_bbp_write(rt2x00dev, 75, 0x46);
 		else
 			rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@ -3280,7 +3271,7 @@
 		/* Turn on primary PAs */
 		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
 				   rf->channel > 14);
-		if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+		if (rt2x00_has_cap_bt_coexist(rt2x00dev))
 			rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
 		else
 			rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
@@ -3311,33 +3302,50 @@
 
 	rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
 
-	if (rt2x00_rt(rt2x00dev, RT3572))
+	if (rt2x00_rt(rt2x00dev, RT3572)) {
 		rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
 
-	if (rt2x00_rt(rt2x00dev, RT3593)) {
-		if (rt2x00_is_usb(rt2x00dev)) {
-			rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+		/* AGC init */
+		if (rf->channel <= 14)
+			reg = 0x1c + (2 * rt2x00dev->lna_gain);
+		else
+			reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
 
-			/* Band selection. GPIO #8 controls all paths */
+		rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+	}
+
+	if (rt2x00_rt(rt2x00dev, RT3593)) {
+		rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+
+		/* Band selection */
+		if (rt2x00_is_usb(rt2x00dev) ||
+		    rt2x00_is_pcie(rt2x00dev)) {
+			/* GPIO #8 controls all paths */
 			rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
 			if (rf->channel <= 14)
 				rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
 			else
 				rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+		}
 
+		/* LNA PE control. */
+		if (rt2x00_is_usb(rt2x00dev)) {
+			/* GPIO #4 controls PE0 and PE1,
+			 * GPIO #7 controls PE2
+			 */
 			rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
 			rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
 
-			/* LNA PE control.
-			* GPIO #4 controls PE0 and PE1,
-			* GPIO #7 controls PE2
-			*/
 			rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
 			rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
-
-			rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+		} else if (rt2x00_is_pcie(rt2x00dev)) {
+			/* GPIO #4 controls PE0, PE1 and PE2 */
+			rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+			rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
 		}
 
+		rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
 		/* AGC init */
 		if (rf->channel <= 14)
 			reg = 0x1c + 2 * rt2x00dev->lna_gain;
@@ -3565,7 +3573,7 @@
 {
 	int delta;
 
-	if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
+	if (rt2x00_has_cap_power_limit(rt2x00dev))
 		return 0;
 
 	/*
@@ -3594,7 +3602,7 @@
 	if (rt2x00_rt(rt2x00dev, RT3593))
 		return min_t(u8, txpower, 0xc);
 
-	if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
+	if (rt2x00_has_cap_power_limit(rt2x00dev)) {
 		/*
 		 * Check if eirp txpower exceed txpower_limit.
 		 * We use OFDM 6M as criterion and its eirp txpower
@@ -4264,6 +4272,7 @@
 		rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
 		break;
 	case RF3053:
+	case RF3070:
 	case RF3290:
 	case RF5360:
 	case RF5370:
@@ -4405,6 +4414,7 @@
 		    rt2x00_rt(rt2x00dev, RT3290) ||
 		    rt2x00_rt(rt2x00dev, RT3390) ||
 		    rt2x00_rt(rt2x00dev, RT3572) ||
+		    rt2x00_rt(rt2x00dev, RT3593) ||
 		    rt2x00_rt(rt2x00dev, RT5390) ||
 		    rt2x00_rt(rt2x00dev, RT5392) ||
 		    rt2x00_rt(rt2x00dev, RT5592))
@@ -4412,8 +4422,8 @@
 		else
 			vgc = 0x2e + rt2x00dev->lna_gain;
 	} else { /* 5GHZ band */
-		if (rt2x00_rt(rt2x00dev, RT3572))
-			vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+		if (rt2x00_rt(rt2x00dev, RT3593))
+			vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
 		else if (rt2x00_rt(rt2x00dev, RT5592))
 			vgc = 0x24 + (2 * rt2x00dev->lna_gain);
 		else {
@@ -4431,11 +4441,17 @@
 				  struct link_qual *qual, u8 vgc_level)
 {
 	if (qual->vgc_level != vgc_level) {
-		if (rt2x00_rt(rt2x00dev, RT5592)) {
+		if (rt2x00_rt(rt2x00dev, RT3572) ||
+		    rt2x00_rt(rt2x00dev, RT3593)) {
+			rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
+						       vgc_level);
+		} else if (rt2x00_rt(rt2x00dev, RT5592)) {
 			rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
 			rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
-		} else
+		} else {
 			rt2800_bbp_write(rt2x00dev, 66, vgc_level);
+		}
+
 		qual->vgc_level = vgc_level;
 		qual->vgc_level_reg = vgc_level;
 	}
@@ -4454,17 +4470,35 @@
 
 	if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
 		return;
-	/*
-	 * When RSSI is better then -80 increase VGC level with 0x10, except
-	 * for rt5592 chip.
+
+	/* When RSSI is better than a certain threshold, increase VGC
+	 * with a chip specific value in order to improve the balance
+	 * between sensibility and noise isolation.
 	 */
 
 	vgc = rt2800_get_default_vgc(rt2x00dev);
 
-	if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
-		vgc += 0x20;
-	else if (qual->rssi > -80)
-		vgc += 0x10;
+	switch (rt2x00dev->chip.rt) {
+	case RT3572:
+	case RT3593:
+		if (qual->rssi > -65) {
+			if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+				vgc += 0x20;
+			else
+				vgc += 0x10;
+		}
+		break;
+
+	case RT5592:
+		if (qual->rssi > -65)
+			vgc += 0x20;
+		break;
+
+	default:
+		if (qual->rssi > -80)
+			vgc += 0x10;
+		break;
+	}
 
 	rt2800_set_vgc(rt2x00dev, qual, vgc);
 }
@@ -5489,7 +5523,7 @@
 	ant = (div_mode == 3) ? 1 : 0;
 
 	/* check if this is a Bluetooth combo card */
-	if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+	if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
 		u32 reg;
 
 		rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
@@ -5798,7 +5832,7 @@
 	    rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
 	    rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
 	    rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
-		if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+		if (!rt2x00_has_cap_external_lna_bg(rt2x00dev))
 			rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
 	}
 
@@ -5985,7 +6019,7 @@
 	rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
 	rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
 	rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
-	rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+	rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
 	rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
 
 	if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -6441,7 +6475,7 @@
 	rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
 	rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
 
-	rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+	rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
 	rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
 	rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
 	rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
@@ -6479,7 +6513,7 @@
 	rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
 	rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
 	rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
-	rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+	rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
 
 	rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
 	if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
@@ -6499,7 +6533,6 @@
 	rt2800_rf_init_calibration(rt2x00dev, 2);
 
 	rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
-	rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
 	rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
 	rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
 	rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
@@ -6653,17 +6686,20 @@
 	u16 word;
 
 	/*
-	 * Initialize all registers.
+	 * Initialize MAC registers.
 	 */
 	if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
 		     rt2800_init_registers(rt2x00dev)))
 		return -EIO;
 
+	/*
+	 * Wait BBP/RF to wake up.
+	 */
 	if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
 		return -EIO;
 
 	/*
-	 * Send signal to firmware during boot time.
+	 * Send signal during boot time to initialize firmware.
 	 */
 	rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
 	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
@@ -6672,9 +6708,15 @@
 	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
 	msleep(1);
 
+	/*
+	 * Make sure BBP is up and running.
+	 */
 	if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
 		return -EIO;
 
+	/*
+	 * Initialize BBP/RF registers.
+	 */
 	rt2800_init_bbp(rt2x00dev);
 	rt2800_init_rfcsr(rt2x00dev);
 
@@ -7021,6 +7063,7 @@
 	case RF3022:
 	case RF3052:
 	case RF3053:
+	case RF3070:
 	case RF3290:
 	case RF3320:
 	case RF3322:
@@ -7203,7 +7246,7 @@
 
 /*
  * RF value list for rt3xxx
- * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053)
  */
 static const struct rf_channel rf_vals_3x[] = {
 	{1,  241, 2, 2 },
@@ -7399,72 +7442,6 @@
 	{196, 83, 0, 12, 1},
 };
 
-static const struct rf_channel rf_vals_3053[] = {
-	/* Channel, N, R, K */
-	{1, 241, 2, 2},
-	{2, 241, 2, 7},
-	{3, 242, 2, 2},
-	{4, 242, 2, 7},
-	{5, 243, 2, 2},
-	{6, 243, 2, 7},
-	{7, 244, 2, 2},
-	{8, 244, 2, 7},
-	{9, 245, 2, 2},
-	{10, 245, 2, 7},
-	{11, 246, 2, 2},
-	{12, 246, 2, 7},
-	{13, 247, 2, 2},
-	{14, 248, 2, 4},
-
-	{36, 0x56, 0, 4},
-	{38, 0x56, 0, 6},
-	{40, 0x56, 0, 8},
-	{44, 0x57, 0, 0},
-	{46, 0x57, 0, 2},
-	{48, 0x57, 0, 4},
-	{52, 0x57, 0, 8},
-	{54, 0x57, 0, 10},
-	{56, 0x58, 0, 0},
-	{60, 0x58, 0, 4},
-	{62, 0x58, 0, 6},
-	{64, 0x58, 0, 8},
-
-	{100, 0x5B, 0, 8},
-	{102, 0x5B, 0, 10},
-	{104, 0x5C, 0, 0},
-	{108, 0x5C, 0, 4},
-	{110, 0x5C, 0, 6},
-	{112, 0x5C, 0, 8},
-
-	/* NOTE: Channel 114 has been removed intentionally.
-	 * The EEPROM contains no TX power values for that,
-	 * and it is disabled in the vendor driver as well.
-	 */
-
-	{116, 0x5D, 0, 0},
-	{118, 0x5D, 0, 2},
-	{120, 0x5D, 0, 4},
-	{124, 0x5D, 0, 8},
-	{126, 0x5D, 0, 10},
-	{128, 0x5E, 0, 0},
-	{132, 0x5E, 0, 4},
-	{134, 0x5E, 0, 6},
-	{136, 0x5E, 0, 8},
-	{140, 0x5F, 0, 0},
-
-	{149, 0x5F, 0, 9},
-	{151, 0x5F, 0, 11},
-	{153, 0x60, 0, 1},
-	{157, 0x60, 0, 5},
-	{159, 0x60, 0, 7},
-	{161, 0x60, 0, 9},
-	{165, 0x61, 0, 1},
-	{167, 0x61, 0, 3},
-	{169, 0x61, 0, 5},
-	{171, 0x61, 0, 7},
-	{173, 0x61, 0, 9},
-};
-
 static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
 	struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7473,7 +7450,6 @@
 	char *default_power2;
 	char *default_power3;
 	unsigned int i;
-	u16 eeprom;
 	u32 reg;
 
 	/*
@@ -7522,48 +7498,48 @@
 	rt2x00dev->hw->max_report_rates = 7;
 	rt2x00dev->hw->max_rate_tries = 1;
 
-	rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
-
 	/*
 	 * Initialize hw_mode information.
 	 */
-	spec->supported_bands = SUPPORT_BAND_2GHZ;
 	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
 
-	if (rt2x00_rf(rt2x00dev, RF2820) ||
-	    rt2x00_rf(rt2x00dev, RF2720)) {
+	switch (rt2x00dev->chip.rf) {
+	case RF2720:
+	case RF2820:
 		spec->num_channels = 14;
 		spec->channels = rf_vals;
-	} else if (rt2x00_rf(rt2x00dev, RF2850) ||
-		   rt2x00_rf(rt2x00dev, RF2750)) {
-		spec->supported_bands |= SUPPORT_BAND_5GHZ;
+		break;
+
+	case RF2750:
+	case RF2850:
 		spec->num_channels = ARRAY_SIZE(rf_vals);
 		spec->channels = rf_vals;
-	} else if (rt2x00_rf(rt2x00dev, RF3020) ||
-		   rt2x00_rf(rt2x00dev, RF2020) ||
-		   rt2x00_rf(rt2x00dev, RF3021) ||
-		   rt2x00_rf(rt2x00dev, RF3022) ||
-		   rt2x00_rf(rt2x00dev, RF3290) ||
-		   rt2x00_rf(rt2x00dev, RF3320) ||
-		   rt2x00_rf(rt2x00dev, RF3322) ||
-		   rt2x00_rf(rt2x00dev, RF5360) ||
-		   rt2x00_rf(rt2x00dev, RF5370) ||
-		   rt2x00_rf(rt2x00dev, RF5372) ||
-		   rt2x00_rf(rt2x00dev, RF5390) ||
-		   rt2x00_rf(rt2x00dev, RF5392)) {
+		break;
+
+	case RF2020:
+	case RF3020:
+	case RF3021:
+	case RF3022:
+	case RF3070:
+	case RF3290:
+	case RF3320:
+	case RF3322:
+	case RF5360:
+	case RF5370:
+	case RF5372:
+	case RF5390:
+	case RF5392:
 		spec->num_channels = 14;
 		spec->channels = rf_vals_3x;
-	} else if (rt2x00_rf(rt2x00dev, RF3052)) {
-		spec->supported_bands |= SUPPORT_BAND_5GHZ;
+		break;
+
+	case RF3052:
+	case RF3053:
 		spec->num_channels = ARRAY_SIZE(rf_vals_3x);
 		spec->channels = rf_vals_3x;
-	} else if (rt2x00_rf(rt2x00dev, RF3053)) {
-		spec->supported_bands |= SUPPORT_BAND_5GHZ;
-		spec->num_channels = ARRAY_SIZE(rf_vals_3053);
-		spec->channels = rf_vals_3053;
-	} else if (rt2x00_rf(rt2x00dev, RF5592)) {
-		spec->supported_bands |= SUPPORT_BAND_5GHZ;
+		break;
 
+	case RF5592:
 		rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
 		if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
 			spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
@@ -7572,11 +7548,16 @@
 			spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
 			spec->channels = rf_vals_5592_xtal20;
 		}
+		break;
 	}
 
 	if (WARN_ON_ONCE(!spec->channels))
 		return -ENODEV;
 
+	spec->supported_bands = SUPPORT_BAND_2GHZ;
+	if (spec->num_channels > 14)
+		spec->supported_bands |= SUPPORT_BAND_5GHZ;
+
 	/*
 	 * Initialize HT information.
 	 */
@@ -7591,22 +7572,21 @@
 	    IEEE80211_HT_CAP_SGI_20 |
 	    IEEE80211_HT_CAP_SGI_40;
 
-	if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) >= 2)
+	if (rt2x00dev->default_ant.tx_chain_num >= 2)
 		spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
 
-	spec->ht.cap |=
-	    rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) <<
-		IEEE80211_HT_CAP_RX_STBC_SHIFT;
+	spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num <<
+			IEEE80211_HT_CAP_RX_STBC_SHIFT;
 
 	spec->ht.ampdu_factor = 3;
 	spec->ht.ampdu_density = 4;
 	spec->ht.mcs.tx_params =
 	    IEEE80211_HT_MCS_TX_DEFINED |
 	    IEEE80211_HT_MCS_TX_RX_DIFF |
-	    ((rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) - 1) <<
-		IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+	    ((rt2x00dev->default_ant.tx_chain_num - 1) <<
+	     IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
 
-	switch (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH)) {
+	switch (rt2x00dev->default_ant.rx_chain_num) {
 	case 3:
 		spec->ht.mcs.rx_mask[2] = 0xff;
 	case 2:
@@ -7671,6 +7651,7 @@
 	case RF3320:
 	case RF3052:
 	case RF3053:
+	case RF3070:
 	case RF3290:
 	case RF5360:
 	case RF5370:
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
new file mode 100644
index 0000000..a8cc736
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -0,0 +1,873 @@
+/*	Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ *	Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ *	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *	Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ *	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ *	Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ *	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ *	Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ *	<http://rt2x00.serialmonkey.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program; if not, write to the
+ *	Free Software Foundation, Inc.,
+ *	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*	Module: rt2800mmio
+ *	Abstract: rt2800 MMIO device routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/*
+ * TX descriptor initialization
+ */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
+{
+	return (__le32 *) entry->skb->data;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
+
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+			      struct txentry_desc *txdesc)
+{
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+	__le32 *txd = entry_priv->desc;
+	u32 word;
+	const unsigned int txwi_size = entry->queue->winfo_size;
+
+	/*
+	 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
+	 * must contains a TXWI structure + 802.11 header + padding + 802.11
+	 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
+	 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
+	 * data. It means that LAST_SEC0 is always 0.
+	 */
+
+	/*
+	 * Initialize TX descriptor
+	 */
+	word = 0;
+	rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
+	rt2x00_desc_write(txd, 0, word);
+
+	word = 0;
+	rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
+	rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
+			   !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W1_BURST,
+			   test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
+	rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
+	rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
+	rt2x00_desc_write(txd, 1, word);
+
+	word = 0;
+	rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
+			   skbdesc->skb_dma + txwi_size);
+	rt2x00_desc_write(txd, 2, word);
+
+	word = 0;
+	rt2x00_set_field32(&word, TXD_W3_WIV,
+			   !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
+	rt2x00_desc_write(txd, 3, word);
+
+	/*
+	 * Register descriptor details in skb frame descriptor.
+	 */
+	skbdesc->desc = txd;
+	skbdesc->desc_len = TXD_DESC_SIZE;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
+
+/*
+ * RX control handlers
+ */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+			    struct rxdone_entry_desc *rxdesc)
+{
+	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+	__le32 *rxd = entry_priv->desc;
+	u32 word;
+
+	rt2x00_desc_read(rxd, 3, &word);
+
+	if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
+		rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
+
+	/*
+	 * Unfortunately we don't know the cipher type used during
+	 * decryption. This prevents us from correct providing
+	 * correct statistics through debugfs.
+	 */
+	rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
+
+	if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
+		/*
+		 * Hardware has stripped IV/EIV data from 802.11 frame during
+		 * decryption. Unfortunately the descriptor doesn't contain
+		 * any fields with the EIV/IV data either, so they can't
+		 * be restored by rt2x00lib.
+		 */
+		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+		/*
+		 * The hardware has already checked the Michael Mic and has
+		 * stripped it from the frame. Signal this to mac80211.
+		 */
+		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
+		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+			rxdesc->flags |= RX_FLAG_DECRYPTED;
+		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+			rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+	}
+
+	if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
+		rxdesc->dev_flags |= RXDONE_MY_BSS;
+
+	if (rt2x00_get_field32(word, RXD_W3_L2PAD))
+		rxdesc->dev_flags |= RXDONE_L2PAD;
+
+	/*
+	 * Process the RXWI structure that is at the start of the buffer.
+	 */
+	rt2800_process_rxwi(entry, rxdesc);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
+
+/*
+ * Interrupt functions.
+ */
+static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+	struct ieee80211_conf conf = { .flags = 0 };
+	struct rt2x00lib_conf libconf = { .conf = &conf };
+
+	rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
+static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
+{
+	__le32 *txwi;
+	u32 word;
+	int wcid, tx_wcid;
+
+	wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
+
+	txwi = rt2800_drv_get_txwi(entry);
+	rt2x00_desc_read(txwi, 1, &word);
+	tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+
+	return (tx_wcid == wcid);
+}
+
+static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
+{
+	u32 status = *(u32 *)data;
+
+	/*
+	 * rt2800pci hardware might reorder frames when exchanging traffic
+	 * with multiple BA enabled STAs.
+	 *
+	 * For example, a tx queue
+	 *    [ STA1 | STA2 | STA1 | STA2 ]
+	 * can result in tx status reports
+	 *    [ STA1 | STA1 | STA2 | STA2 ]
+	 * when the hw decides to aggregate the frames for STA1 into one AMPDU.
+	 *
+	 * To mitigate this effect, associate the tx status to the first frame
+	 * in the tx queue with a matching wcid.
+	 */
+	if (rt2800mmio_txdone_entry_check(entry, status) &&
+	    !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+		/*
+		 * Got a matching frame, associate the tx status with
+		 * the frame
+		 */
+		entry->status = status;
+		set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+		return true;
+	}
+
+	/* Check the next frame */
+	return false;
+}
+
+static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
+{
+	u32 status = *(u32 *)data;
+
+	/*
+	 * Find the first frame without tx status and assign this status to it
+	 * regardless if it matches or not.
+	 */
+	if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+		/*
+		 * Got a matching frame, associate the tx status with
+		 * the frame
+		 */
+		entry->status = status;
+		set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+		return true;
+	}
+
+	/* Check the next frame */
+	return false;
+}
+static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
+					      void *data)
+{
+	if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+		rt2800_txdone_entry(entry, entry->status,
+				    rt2800mmio_get_txwi(entry));
+		return false;
+	}
+
+	/* No more frames to release */
+	return true;
+}
+
+static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
+{
+	struct data_queue *queue;
+	u32 status;
+	u8 qid;
+	int max_tx_done = 16;
+
+	while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
+		qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
+		if (unlikely(qid >= QID_RX)) {
+			/*
+			 * Unknown queue, this shouldn't happen. Just drop
+			 * this tx status.
+			 */
+			rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
+				    qid);
+			break;
+		}
+
+		queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+		if (unlikely(queue == NULL)) {
+			/*
+			 * The queue is NULL, this shouldn't happen. Stop
+			 * processing here and drop the tx status
+			 */
+			rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
+				    qid);
+			break;
+		}
+
+		if (unlikely(rt2x00queue_empty(queue))) {
+			/*
+			 * The queue is empty. Stop processing here
+			 * and drop the tx status.
+			 */
+			rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+				    qid);
+			break;
+		}
+
+		/*
+		 * Let's associate this tx status with the first
+		 * matching frame.
+		 */
+		if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+						Q_INDEX, &status,
+						rt2800mmio_txdone_find_entry)) {
+			/*
+			 * We cannot match the tx status to any frame, so just
+			 * use the first one.
+			 */
+			if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+							Q_INDEX, &status,
+							rt2800mmio_txdone_match_first)) {
+				rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
+					    qid);
+				break;
+			}
+		}
+
+		/*
+		 * Release all frames with a valid tx status.
+		 */
+		rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+					   Q_INDEX, NULL,
+					   rt2800mmio_txdone_release_entries);
+
+		if (--max_tx_done == 0)
+			break;
+	}
+
+	return !max_tx_done;
+}
+
+static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+					       struct rt2x00_field32 irq_field)
+{
+	u32 reg;
+
+	/*
+	 * Enable a single interrupt. The interrupt mask register
+	 * access needs locking.
+	 */
+	spin_lock_irq(&rt2x00dev->irqmask_lock);
+	rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+	rt2x00_set_field32(&reg, irq_field, 1);
+	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+	spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+void rt2800mmio_txstatus_tasklet(unsigned long data)
+{
+	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+	if (rt2800mmio_txdone(rt2x00dev))
+		tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+	/*
+	 * No need to enable the tx status interrupt here as we always
+	 * leave it enabled to minimize the possibility of a tx status
+	 * register overflow. See comment in interrupt handler.
+	 */
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
+
+void rt2800mmio_pretbtt_tasklet(unsigned long data)
+{
+	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+	rt2x00lib_pretbtt(rt2x00dev);
+	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+		rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
+
+void rt2800mmio_tbtt_tasklet(unsigned long data)
+{
+	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+	struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+	u32 reg;
+
+	rt2x00lib_beacondone(rt2x00dev);
+
+	if (rt2x00dev->intf_ap_count) {
+		/*
+		 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
+		 * causing beacon skew and as a result causing problems with
+		 * some powersaving clients over time. Shorten the beacon
+		 * interval every 64 beacons by 64us to mitigate this effect.
+		 */
+		if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
+			rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+			rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+					   (rt2x00dev->beacon_int * 16) - 1);
+			rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+		} else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
+			rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+			rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+					   (rt2x00dev->beacon_int * 16));
+			rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+		}
+		drv_data->tbtt_tick++;
+		drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
+	}
+
+	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+		rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
+
+void rt2800mmio_rxdone_tasklet(unsigned long data)
+{
+	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+	if (rt2x00mmio_rxdone(rt2x00dev))
+		tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+	else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+		rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
+
+void rt2800mmio_autowake_tasklet(unsigned long data)
+{
+	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+	rt2800mmio_wakeup(rt2x00dev);
+	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+		rt2800mmio_enable_interrupt(rt2x00dev,
+					    INT_MASK_CSR_AUTO_WAKEUP);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
+
+static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+	u32 status;
+	int i;
+
+	/*
+	 * The TX_FIFO_STATUS interrupt needs special care. We should
+	 * read TX_STA_FIFO but we should do it immediately as otherwise
+	 * the register can overflow and we would lose status reports.
+	 *
+	 * Hence, read the TX_STA_FIFO register and copy all tx status
+	 * reports into a kernel FIFO which is handled in the txstatus
+	 * tasklet. We use a tasklet to process the tx status reports
+	 * because we can schedule the tasklet multiple times (when the
+	 * interrupt fires again during tx status processing).
+	 *
+	 * Furthermore we don't disable the TX_FIFO_STATUS
+	 * interrupt here but leave it enabled so that the TX_STA_FIFO
+	 * can also be read while the tx status tasklet gets executed.
+	 *
+	 * Since we have only one producer and one consumer we don't
+	 * need to lock the kfifo.
+	 */
+	for (i = 0; i < rt2x00dev->tx->limit; i++) {
+		rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
+
+		if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
+			break;
+
+		if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
+			rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
+			break;
+		}
+	}
+
+	/* Schedule the tasklet for processing the tx status. */
+	tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+}
+
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
+{
+	struct rt2x00_dev *rt2x00dev = dev_instance;
+	u32 reg, mask;
+
+	/* Read status and ACK all interrupts */
+	rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+	rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+	if (!reg)
+		return IRQ_NONE;
+
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+		return IRQ_HANDLED;
+
+	/*
+	 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+	 * for interrupts and interrupt masks we can just use the value of
+	 * INT_SOURCE_CSR to create the interrupt mask.
+	 */
+	mask = ~reg;
+
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+		rt2800mmio_txstatus_interrupt(rt2x00dev);
+		/*
+		 * Never disable the TX_FIFO_STATUS interrupt.
+		 */
+		rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+	}
+
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+		tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
+
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+		tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+		tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+		tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+	/*
+	 * Disable all interrupts for which a tasklet was scheduled right now,
+	 * the tasklet will reenable the appropriate interrupts.
+	 */
+	spin_lock(&rt2x00dev->irqmask_lock);
+	rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+	reg &= mask;
+	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+	spin_unlock(&rt2x00dev->irqmask_lock);
+
+	return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
+
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+			   enum dev_state state)
+{
+	u32 reg;
+	unsigned long flags;
+
+	/*
+	 * When interrupts are being enabled, the interrupt registers
+	 * should clear the register to assure a clean state.
+	 */
+	if (state == STATE_RADIO_IRQ_ON) {
+		rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+		rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+	}
+
+	spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+	reg = 0;
+	if (state == STATE_RADIO_IRQ_ON) {
+		rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+		rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+		rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+		rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+		rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+	}
+	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+	spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+	if (state == STATE_RADIO_IRQ_OFF) {
+		/*
+		 * Wait for possibly running tasklets to finish.
+		 */
+		tasklet_kill(&rt2x00dev->txstatus_tasklet);
+		tasklet_kill(&rt2x00dev->rxdone_tasklet);
+		tasklet_kill(&rt2x00dev->autowake_tasklet);
+		tasklet_kill(&rt2x00dev->tbtt_tasklet);
+		tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+	}
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
+
+/*
+ * Queue handlers.
+ */
+void rt2800mmio_start_queue(struct data_queue *queue)
+{
+	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+	u32 reg;
+
+	switch (queue->qid) {
+	case QID_RX:
+		rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+		rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+		rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+		break;
+	case QID_BEACON:
+		rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+		rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+		rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+		rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+		rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+		break;
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
+
+void rt2800mmio_kick_queue(struct data_queue *queue)
+{
+	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+	struct queue_entry *entry;
+
+	switch (queue->qid) {
+	case QID_AC_VO:
+	case QID_AC_VI:
+	case QID_AC_BE:
+	case QID_AC_BK:
+		entry = rt2x00queue_get_entry(queue, Q_INDEX);
+		rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
+					  entry->entry_idx);
+		break;
+	case QID_MGMT:
+		entry = rt2x00queue_get_entry(queue, Q_INDEX);
+		rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
+					  entry->entry_idx);
+		break;
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
+
+void rt2800mmio_stop_queue(struct data_queue *queue)
+{
+	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+	u32 reg;
+
+	switch (queue->qid) {
+	case QID_RX:
+		rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+		rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+		rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+		break;
+	case QID_BEACON:
+		rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+		rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+		rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+		rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+		rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+		/*
+		 * Wait for current invocation to finish. The tasklet
+		 * won't be scheduled anymore afterwards since we disabled
+		 * the TBTT and PRE TBTT timer.
+		 */
+		tasklet_kill(&rt2x00dev->tbtt_tasklet);
+		tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+
+		break;
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
+
+void rt2800mmio_queue_init(struct data_queue *queue)
+{
+	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+	unsigned short txwi_size, rxwi_size;
+
+	rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
+
+	switch (queue->qid) {
+	case QID_RX:
+		queue->limit = 128;
+		queue->data_size = AGGREGATION_SIZE;
+		queue->desc_size = RXD_DESC_SIZE;
+		queue->winfo_size = rxwi_size;
+		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+		break;
+
+	case QID_AC_VO:
+	case QID_AC_VI:
+	case QID_AC_BE:
+	case QID_AC_BK:
+		queue->limit = 64;
+		queue->data_size = AGGREGATION_SIZE;
+		queue->desc_size = TXD_DESC_SIZE;
+		queue->winfo_size = txwi_size;
+		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+		break;
+
+	case QID_BEACON:
+		queue->limit = 8;
+		queue->data_size = 0; /* No DMA required for beacons */
+		queue->desc_size = TXD_DESC_SIZE;
+		queue->winfo_size = txwi_size;
+		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+		break;
+
+	case QID_ATIM:
+		/* fallthrough */
+	default:
+		BUG();
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
+
+/*
+ * Initialization functions.
+ */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry)
+{
+	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+	u32 word;
+
+	if (entry->queue->qid == QID_RX) {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+		return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
+	} else {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+		return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
+	}
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
+
+void rt2800mmio_clear_entry(struct queue_entry *entry)
+{
+	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+	u32 word;
+
+	if (entry->queue->qid == QID_RX) {
+		rt2x00_desc_read(entry_priv->desc, 0, &word);
+		rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
+		rt2x00_desc_write(entry_priv->desc, 0, word);
+
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+		rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
+		rt2x00_desc_write(entry_priv->desc, 1, word);
+
+		/*
+		 * Set RX IDX in register to inform hardware that we have
+		 * handled this entry and it is available for reuse again.
+		 */
+		rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+					  entry->entry_idx);
+	} else {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+		rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
+		rt2x00_desc_write(entry_priv->desc, 1, word);
+	}
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
+
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
+{
+	struct queue_entry_priv_mmio *entry_priv;
+
+	/*
+	 * Initialize registers.
+	 */
+	entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
+	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
+				  entry_priv->desc_dma);
+	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
+				  rt2x00dev->tx[0].limit);
+	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
+
+	entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
+	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
+				  entry_priv->desc_dma);
+	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
+				  rt2x00dev->tx[1].limit);
+	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
+
+	entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
+	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
+				  entry_priv->desc_dma);
+	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
+				  rt2x00dev->tx[2].limit);
+	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
+
+	entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
+	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
+				  entry_priv->desc_dma);
+	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
+				  rt2x00dev->tx[3].limit);
+	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
+
+	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
+
+	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
+
+	entry_priv = rt2x00dev->rx->entries[0].priv_data;
+	rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
+				  entry_priv->desc_dma);
+	rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
+				  rt2x00dev->rx[0].limit);
+	rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+				  rt2x00dev->rx[0].limit - 1);
+	rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
+
+	rt2800_disable_wpdma(rt2x00dev);
+
+	rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
+
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+
+	/*
+	 * Reset DMA indexes
+	 */
+	rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+	rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+	rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+	rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+	if (rt2x00_is_pcie(rt2x00dev) &&
+	    (rt2x00_rt(rt2x00dev, RT3090) ||
+	     rt2x00_rt(rt2x00dev, RT3390) ||
+	     rt2x00_rt(rt2x00dev, RT3572) ||
+	     rt2x00_rt(rt2x00dev, RT3593) ||
+	     rt2x00_rt(rt2x00dev, RT5390) ||
+	     rt2x00_rt(rt2x00dev, RT5392) ||
+	     rt2x00_rt(rt2x00dev, RT5592))) {
+		rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
+		rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+		rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+		rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
+	}
+
+	rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+	reg = 0;
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+	rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
+
+/*
+ * Device state switch handlers.
+ */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+	/* Wait for DMA, ignore error until we initialize queues. */
+	rt2800_wait_wpdma_ready(rt2x00dev);
+
+	if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
+		return -EIO;
+
+	return rt2800_enable_radio(rt2x00dev);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2800 MMIO library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
new file mode 100644
index 0000000..6a10de3
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.h
@@ -0,0 +1,165 @@
+/*	Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ *	Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ *	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *	Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ *	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ *	Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ *	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ *	Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ *	<http://rt2x00.serialmonkey.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program; if not, write to the
+ *	Free Software Foundation, Inc.,
+ *	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*	Module: rt2800mmio
+ *	Abstract: forward declarations for the rt2800mmio module.
+ */
+
+#ifndef RT2800MMIO_H
+#define RT2800MMIO_H
+
+/*
+ * Queue register offset macros
+ */
+#define TX_QUEUE_REG_OFFSET	0x10
+#define TX_BASE_PTR(__x)	(TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_MAX_CNT(__x)		(TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_CTX_IDX(__x)		(TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_DTX_IDX(__x)		(TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXD_DESC_SIZE			(4 * sizeof(__le32))
+#define RXD_DESC_SIZE			(4 * sizeof(__le32))
+
+/*
+ * TX descriptor format for TX, PRIO and Beacon Ring.
+ */
+
+/*
+ * Word0
+ */
+#define TXD_W0_SD_PTR0			FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define TXD_W1_SD_LEN1			FIELD32(0x00003fff)
+#define TXD_W1_LAST_SEC1		FIELD32(0x00004000)
+#define TXD_W1_BURST			FIELD32(0x00008000)
+#define TXD_W1_SD_LEN0			FIELD32(0x3fff0000)
+#define TXD_W1_LAST_SEC0		FIELD32(0x40000000)
+#define TXD_W1_DMA_DONE			FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define TXD_W2_SD_PTR1			FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ *       0:MGMT, 1:HCCA 2:EDCA
+ */
+#define TXD_W3_WIV			FIELD32(0x01000000)
+#define TXD_W3_QSEL			FIELD32(0x06000000)
+#define TXD_W3_TCO			FIELD32(0x20000000)
+#define TXD_W3_UCO			FIELD32(0x40000000)
+#define TXD_W3_ICO			FIELD32(0x80000000)
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+
+/*
+ * Word0
+ */
+#define RXD_W0_SDP0			FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define RXD_W1_SDL1			FIELD32(0x00003fff)
+#define RXD_W1_SDL0			FIELD32(0x3fff0000)
+#define RXD_W1_LS0			FIELD32(0x40000000)
+#define RXD_W1_DMA_DONE			FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define RXD_W2_SDP1			FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * AMSDU: RX with 802.3 header, not 802.11 header.
+ * DECRYPTED: This frame is being decrypted.
+ */
+#define RXD_W3_BA			FIELD32(0x00000001)
+#define RXD_W3_DATA			FIELD32(0x00000002)
+#define RXD_W3_NULLDATA			FIELD32(0x00000004)
+#define RXD_W3_FRAG			FIELD32(0x00000008)
+#define RXD_W3_UNICAST_TO_ME		FIELD32(0x00000010)
+#define RXD_W3_MULTICAST		FIELD32(0x00000020)
+#define RXD_W3_BROADCAST		FIELD32(0x00000040)
+#define RXD_W3_MY_BSS			FIELD32(0x00000080)
+#define RXD_W3_CRC_ERROR		FIELD32(0x00000100)
+#define RXD_W3_CIPHER_ERROR		FIELD32(0x00000600)
+#define RXD_W3_AMSDU			FIELD32(0x00000800)
+#define RXD_W3_HTC			FIELD32(0x00001000)
+#define RXD_W3_RSSI			FIELD32(0x00002000)
+#define RXD_W3_L2PAD			FIELD32(0x00004000)
+#define RXD_W3_AMPDU			FIELD32(0x00008000)
+#define RXD_W3_DECRYPTED		FIELD32(0x00010000)
+#define RXD_W3_PLCP_SIGNAL		FIELD32(0x00020000)
+#define RXD_W3_PLCP_RSSI		FIELD32(0x00040000)
+
+/* TX descriptor initialization */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry);
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+			      struct txentry_desc *txdesc);
+
+/* RX control handlers */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+			    struct rxdone_entry_desc *rxdesc);
+
+/* Interrupt functions */
+void rt2800mmio_txstatus_tasklet(unsigned long data);
+void rt2800mmio_pretbtt_tasklet(unsigned long data);
+void rt2800mmio_tbtt_tasklet(unsigned long data);
+void rt2800mmio_rxdone_tasklet(unsigned long data);
+void rt2800mmio_autowake_tasklet(unsigned long data);
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance);
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+			   enum dev_state state);
+
+/* Queue handlers */
+void rt2800mmio_start_queue(struct data_queue *queue);
+void rt2800mmio_kick_queue(struct data_queue *queue);
+void rt2800mmio_stop_queue(struct data_queue *queue);
+void rt2800mmio_queue_init(struct data_queue *queue);
+
+/* Initialization functions */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry);
+void rt2800mmio_clear_entry(struct queue_entry *entry);
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev);
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev);
+
+/* Device state switch handlers. */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev);
+
+#endif /* RT2800MMIO_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index f8f2abb..b504455 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -37,14 +37,13 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/platform_device.h>
 #include <linux/eeprom_93cx6.h>
 
 #include "rt2x00.h"
 #include "rt2x00mmio.h"
 #include "rt2x00pci.h"
-#include "rt2x00soc.h"
 #include "rt2800lib.h"
+#include "rt2800mmio.h"
 #include "rt2800.h"
 #include "rt2800pci.h"
 
@@ -90,27 +89,6 @@
 	rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
 }
 
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
-	void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
-
-	if (!base_addr)
-		return -ENOMEM;
-
-	memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
-
-	iounmap(base_addr);
-	return 0;
-}
-#else
-static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
-	return -ENOMEM;
-}
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
 static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
 {
 	struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -183,112 +161,6 @@
 {
 	return rt2800_read_eeprom_efuse(rt2x00dev);
 }
-#else
-static inline int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
-{
-	return 0;
-}
-
-static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
-{
-	return -EOPNOTSUPP;
-}
-#endif /* CONFIG_PCI */
-
-/*
- * Queue handlers.
- */
-static void rt2800pci_start_queue(struct data_queue *queue)
-{
-	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-	u32 reg;
-
-	switch (queue->qid) {
-	case QID_RX:
-		rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
-		rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
-		rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-		break;
-	case QID_BEACON:
-		rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-		rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-		rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
-		rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
-		rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
-		break;
-	default:
-		break;
-	}
-}
-
-static void rt2800pci_kick_queue(struct data_queue *queue)
-{
-	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-	struct queue_entry *entry;
-
-	switch (queue->qid) {
-	case QID_AC_VO:
-	case QID_AC_VI:
-	case QID_AC_BE:
-	case QID_AC_BK:
-		entry = rt2x00queue_get_entry(queue, Q_INDEX);
-		rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
-					  entry->entry_idx);
-		break;
-	case QID_MGMT:
-		entry = rt2x00queue_get_entry(queue, Q_INDEX);
-		rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
-					  entry->entry_idx);
-		break;
-	default:
-		break;
-	}
-}
-
-static void rt2800pci_stop_queue(struct data_queue *queue)
-{
-	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-	u32 reg;
-
-	switch (queue->qid) {
-	case QID_RX:
-		rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
-		rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
-		rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-		break;
-	case QID_BEACON:
-		rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
-		rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
-		rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-		rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
-		rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
-		rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
-		/*
-		 * Wait for current invocation to finish. The tasklet
-		 * won't be scheduled anymore afterwards since we disabled
-		 * the TBTT and PRE TBTT timer.
-		 */
-		tasklet_kill(&rt2x00dev->tbtt_tasklet);
-		tasklet_kill(&rt2x00dev->pretbtt_tasklet);
-
-		break;
-	default:
-		break;
-	}
-}
 
 /*
  * Firmware functions
@@ -332,217 +204,13 @@
 }
 
 /*
- * Initialization functions.
- */
-static bool rt2800pci_get_entry_state(struct queue_entry *entry)
-{
-	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
-	u32 word;
-
-	if (entry->queue->qid == QID_RX) {
-		rt2x00_desc_read(entry_priv->desc, 1, &word);
-
-		return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
-	} else {
-		rt2x00_desc_read(entry_priv->desc, 1, &word);
-
-		return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
-	}
-}
-
-static void rt2800pci_clear_entry(struct queue_entry *entry)
-{
-	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
-	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
-	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-	u32 word;
-
-	if (entry->queue->qid == QID_RX) {
-		rt2x00_desc_read(entry_priv->desc, 0, &word);
-		rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
-		rt2x00_desc_write(entry_priv->desc, 0, word);
-
-		rt2x00_desc_read(entry_priv->desc, 1, &word);
-		rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
-		rt2x00_desc_write(entry_priv->desc, 1, word);
-
-		/*
-		 * Set RX IDX in register to inform hardware that we have
-		 * handled this entry and it is available for reuse again.
-		 */
-		rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
-					  entry->entry_idx);
-	} else {
-		rt2x00_desc_read(entry_priv->desc, 1, &word);
-		rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
-		rt2x00_desc_write(entry_priv->desc, 1, word);
-	}
-}
-
-static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
-{
-	struct queue_entry_priv_mmio *entry_priv;
-
-	/*
-	 * Initialize registers.
-	 */
-	entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
-	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
-				  entry_priv->desc_dma);
-	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
-				  rt2x00dev->tx[0].limit);
-	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
-
-	entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
-	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
-				  entry_priv->desc_dma);
-	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
-				  rt2x00dev->tx[1].limit);
-	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
-
-	entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
-	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
-				  entry_priv->desc_dma);
-	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
-				  rt2x00dev->tx[2].limit);
-	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
-
-	entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
-	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
-				  entry_priv->desc_dma);
-	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
-				  rt2x00dev->tx[3].limit);
-	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
-
-	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
-
-	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
-	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
-
-	entry_priv = rt2x00dev->rx->entries[0].priv_data;
-	rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
-				  entry_priv->desc_dma);
-	rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
-				  rt2x00dev->rx[0].limit);
-	rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
-				  rt2x00dev->rx[0].limit - 1);
-	rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
-
-	rt2800_disable_wpdma(rt2x00dev);
-
-	rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
-
-	return 0;
-}
-
-/*
  * Device state switch handlers.
  */
-static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
-				 enum dev_state state)
-{
-	u32 reg;
-	unsigned long flags;
-
-	/*
-	 * When interrupts are being enabled, the interrupt registers
-	 * should clear the register to assure a clean state.
-	 */
-	if (state == STATE_RADIO_IRQ_ON) {
-		rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
-		rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
-	}
-
-	spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
-	reg = 0;
-	if (state == STATE_RADIO_IRQ_ON) {
-		rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
-		rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
-		rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
-		rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
-		rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
-	}
-	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
-	spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
-
-	if (state == STATE_RADIO_IRQ_OFF) {
-		/*
-		 * Wait for possibly running tasklets to finish.
-		 */
-		tasklet_kill(&rt2x00dev->txstatus_tasklet);
-		tasklet_kill(&rt2x00dev->rxdone_tasklet);
-		tasklet_kill(&rt2x00dev->autowake_tasklet);
-		tasklet_kill(&rt2x00dev->tbtt_tasklet);
-		tasklet_kill(&rt2x00dev->pretbtt_tasklet);
-	}
-}
-
-static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
-{
-	u32 reg;
-
-	/*
-	 * Reset DMA indexes
-	 */
-	rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
-	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
-	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
-	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
-	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
-	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
-	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
-	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
-	rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
-	rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
-	rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
-	if (rt2x00_is_pcie(rt2x00dev) &&
-	    (rt2x00_rt(rt2x00dev, RT3090) ||
-	     rt2x00_rt(rt2x00dev, RT3390) ||
-	     rt2x00_rt(rt2x00dev, RT3572) ||
-	     rt2x00_rt(rt2x00dev, RT3593) ||
-	     rt2x00_rt(rt2x00dev, RT5390) ||
-	     rt2x00_rt(rt2x00dev, RT5392) ||
-	     rt2x00_rt(rt2x00dev, RT5592))) {
-		rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
-		rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
-		rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
-		rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
-	}
-
-	rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
-
-	reg = 0;
-	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
-	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
-	rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
-	rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
-
-	return 0;
-}
-
 static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
 {
 	int retval;
 
-	/* Wait for DMA, ignore error until we initialize queues. */
-	rt2800_wait_wpdma_ready(rt2x00dev);
-
-	if (unlikely(rt2800pci_init_queues(rt2x00dev)))
-		return -EIO;
-
-	retval = rt2800_enable_radio(rt2x00dev);
+	retval = rt2800mmio_enable_radio(rt2x00dev);
 	if (retval)
 		return retval;
 
@@ -559,15 +227,6 @@
 	return retval;
 }
 
-static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
-{
-	if (rt2x00_is_soc(rt2x00dev)) {
-		rt2800_disable_radio(rt2x00dev);
-		rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
-		rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
-	}
-}
-
 static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
 			       enum dev_state state)
 {
@@ -601,12 +260,11 @@
 		 * After the radio has been disabled, the device should
 		 * be put to sleep for powersaving.
 		 */
-		rt2800pci_disable_radio(rt2x00dev);
 		rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
 		break;
 	case STATE_RADIO_IRQ_ON:
 	case STATE_RADIO_IRQ_OFF:
-		rt2800pci_toggle_irq(rt2x00dev, state);
+		rt2800mmio_toggle_irq(rt2x00dev, state);
 		break;
 	case STATE_DEEP_SLEEP:
 	case STATE_SLEEP:
@@ -627,479 +285,13 @@
 }
 
 /*
- * TX descriptor initialization
- */
-static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
-{
-	return (__le32 *) entry->skb->data;
-}
-
-static void rt2800pci_write_tx_desc(struct queue_entry *entry,
-				    struct txentry_desc *txdesc)
-{
-	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
-	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
-	__le32 *txd = entry_priv->desc;
-	u32 word;
-	const unsigned int txwi_size = entry->queue->winfo_size;
-
-	/*
-	 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
-	 * must contains a TXWI structure + 802.11 header + padding + 802.11
-	 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
-	 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
-	 * data. It means that LAST_SEC0 is always 0.
-	 */
-
-	/*
-	 * Initialize TX descriptor
-	 */
-	word = 0;
-	rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
-	rt2x00_desc_write(txd, 0, word);
-
-	word = 0;
-	rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
-	rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
-			   !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
-	rt2x00_set_field32(&word, TXD_W1_BURST,
-			   test_bit(ENTRY_TXD_BURST, &txdesc->flags));
-	rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
-	rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
-	rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
-	rt2x00_desc_write(txd, 1, word);
-
-	word = 0;
-	rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
-			   skbdesc->skb_dma + txwi_size);
-	rt2x00_desc_write(txd, 2, word);
-
-	word = 0;
-	rt2x00_set_field32(&word, TXD_W3_WIV,
-			   !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
-	rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
-	rt2x00_desc_write(txd, 3, word);
-
-	/*
-	 * Register descriptor details in skb frame descriptor.
-	 */
-	skbdesc->desc = txd;
-	skbdesc->desc_len = TXD_DESC_SIZE;
-}
-
-/*
- * RX control handlers
- */
-static void rt2800pci_fill_rxdone(struct queue_entry *entry,
-				  struct rxdone_entry_desc *rxdesc)
-{
-	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
-	__le32 *rxd = entry_priv->desc;
-	u32 word;
-
-	rt2x00_desc_read(rxd, 3, &word);
-
-	if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
-		rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
-
-	/*
-	 * Unfortunately we don't know the cipher type used during
-	 * decryption. This prevents us from correct providing
-	 * correct statistics through debugfs.
-	 */
-	rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
-
-	if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
-		/*
-		 * Hardware has stripped IV/EIV data from 802.11 frame during
-		 * decryption. Unfortunately the descriptor doesn't contain
-		 * any fields with the EIV/IV data either, so they can't
-		 * be restored by rt2x00lib.
-		 */
-		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
-
-		/*
-		 * The hardware has already checked the Michael Mic and has
-		 * stripped it from the frame. Signal this to mac80211.
-		 */
-		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
-
-		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
-			rxdesc->flags |= RX_FLAG_DECRYPTED;
-		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
-			rxdesc->flags |= RX_FLAG_MMIC_ERROR;
-	}
-
-	if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
-		rxdesc->dev_flags |= RXDONE_MY_BSS;
-
-	if (rt2x00_get_field32(word, RXD_W3_L2PAD))
-		rxdesc->dev_flags |= RXDONE_L2PAD;
-
-	/*
-	 * Process the RXWI structure that is at the start of the buffer.
-	 */
-	rt2800_process_rxwi(entry, rxdesc);
-}
-
-/*
- * Interrupt functions.
- */
-static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
-{
-	struct ieee80211_conf conf = { .flags = 0 };
-	struct rt2x00lib_conf libconf = { .conf = &conf };
-
-	rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
-}
-
-static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
-{
-	__le32 *txwi;
-	u32 word;
-	int wcid, tx_wcid;
-
-	wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
-
-	txwi = rt2800_drv_get_txwi(entry);
-	rt2x00_desc_read(txwi, 1, &word);
-	tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-
-	return (tx_wcid == wcid);
-}
-
-static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
-{
-	u32 status = *(u32 *)data;
-
-	/*
-	 * rt2800pci hardware might reorder frames when exchanging traffic
-	 * with multiple BA enabled STAs.
-	 *
-	 * For example, a tx queue
-	 *    [ STA1 | STA2 | STA1 | STA2 ]
-	 * can result in tx status reports
-	 *    [ STA1 | STA1 | STA2 | STA2 ]
-	 * when the hw decides to aggregate the frames for STA1 into one AMPDU.
-	 *
-	 * To mitigate this effect, associate the tx status to the first frame
-	 * in the tx queue with a matching wcid.
-	 */
-	if (rt2800pci_txdone_entry_check(entry, status) &&
-	    !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-		/*
-		 * Got a matching frame, associate the tx status with
-		 * the frame
-		 */
-		entry->status = status;
-		set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
-		return true;
-	}
-
-	/* Check the next frame */
-	return false;
-}
-
-static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
-{
-	u32 status = *(u32 *)data;
-
-	/*
-	 * Find the first frame without tx status and assign this status to it
-	 * regardless if it matches or not.
-	 */
-	if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-		/*
-		 * Got a matching frame, associate the tx status with
-		 * the frame
-		 */
-		entry->status = status;
-		set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
-		return true;
-	}
-
-	/* Check the next frame */
-	return false;
-}
-static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
-					     void *data)
-{
-	if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-		rt2800_txdone_entry(entry, entry->status,
-				    rt2800pci_get_txwi(entry));
-		return false;
-	}
-
-	/* No more frames to release */
-	return true;
-}
-
-static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
-{
-	struct data_queue *queue;
-	u32 status;
-	u8 qid;
-	int max_tx_done = 16;
-
-	while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
-		qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
-		if (unlikely(qid >= QID_RX)) {
-			/*
-			 * Unknown queue, this shouldn't happen. Just drop
-			 * this tx status.
-			 */
-			rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
-				    qid);
-			break;
-		}
-
-		queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
-		if (unlikely(queue == NULL)) {
-			/*
-			 * The queue is NULL, this shouldn't happen. Stop
-			 * processing here and drop the tx status
-			 */
-			rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
-				    qid);
-			break;
-		}
-
-		if (unlikely(rt2x00queue_empty(queue))) {
-			/*
-			 * The queue is empty. Stop processing here
-			 * and drop the tx status.
-			 */
-			rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
-				    qid);
-			break;
-		}
-
-		/*
-		 * Let's associate this tx status with the first
-		 * matching frame.
-		 */
-		if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-						Q_INDEX, &status,
-						rt2800pci_txdone_find_entry)) {
-			/*
-			 * We cannot match the tx status to any frame, so just
-			 * use the first one.
-			 */
-			if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-							Q_INDEX, &status,
-							rt2800pci_txdone_match_first)) {
-				rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
-					    qid);
-				break;
-			}
-		}
-
-		/*
-		 * Release all frames with a valid tx status.
-		 */
-		rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-					   Q_INDEX, NULL,
-					   rt2800pci_txdone_release_entries);
-
-		if (--max_tx_done == 0)
-			break;
-	}
-
-	return !max_tx_done;
-}
-
-static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
-					      struct rt2x00_field32 irq_field)
-{
-	u32 reg;
-
-	/*
-	 * Enable a single interrupt. The interrupt mask register
-	 * access needs locking.
-	 */
-	spin_lock_irq(&rt2x00dev->irqmask_lock);
-	rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
-	rt2x00_set_field32(&reg, irq_field, 1);
-	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
-	spin_unlock_irq(&rt2x00dev->irqmask_lock);
-}
-
-static void rt2800pci_txstatus_tasklet(unsigned long data)
-{
-	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-	if (rt2800pci_txdone(rt2x00dev))
-		tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-
-	/*
-	 * No need to enable the tx status interrupt here as we always
-	 * leave it enabled to minimize the possibility of a tx status
-	 * register overflow. See comment in interrupt handler.
-	 */
-}
-
-static void rt2800pci_pretbtt_tasklet(unsigned long data)
-{
-	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-	rt2x00lib_pretbtt(rt2x00dev);
-	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-		rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
-}
-
-static void rt2800pci_tbtt_tasklet(unsigned long data)
-{
-	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-	struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
-	u32 reg;
-
-	rt2x00lib_beacondone(rt2x00dev);
-
-	if (rt2x00dev->intf_ap_count) {
-		/*
-		 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
-		 * causing beacon skew and as a result causing problems with
-		 * some powersaving clients over time. Shorten the beacon
-		 * interval every 64 beacons by 64us to mitigate this effect.
-		 */
-		if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
-			rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-			rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
-					   (rt2x00dev->beacon_int * 16) - 1);
-			rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-		} else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
-			rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-			rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
-					   (rt2x00dev->beacon_int * 16));
-			rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-		}
-		drv_data->tbtt_tick++;
-		drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
-	}
-
-	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-		rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
-}
-
-static void rt2800pci_rxdone_tasklet(unsigned long data)
-{
-	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-	if (rt2x00mmio_rxdone(rt2x00dev))
-		tasklet_schedule(&rt2x00dev->rxdone_tasklet);
-	else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-		rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
-}
-
-static void rt2800pci_autowake_tasklet(unsigned long data)
-{
-	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-	rt2800pci_wakeup(rt2x00dev);
-	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-		rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
-}
-
-static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
-{
-	u32 status;
-	int i;
-
-	/*
-	 * The TX_FIFO_STATUS interrupt needs special care. We should
-	 * read TX_STA_FIFO but we should do it immediately as otherwise
-	 * the register can overflow and we would lose status reports.
-	 *
-	 * Hence, read the TX_STA_FIFO register and copy all tx status
-	 * reports into a kernel FIFO which is handled in the txstatus
-	 * tasklet. We use a tasklet to process the tx status reports
-	 * because we can schedule the tasklet multiple times (when the
-	 * interrupt fires again during tx status processing).
-	 *
-	 * Furthermore we don't disable the TX_FIFO_STATUS
-	 * interrupt here but leave it enabled so that the TX_STA_FIFO
-	 * can also be read while the tx status tasklet gets executed.
-	 *
-	 * Since we have only one producer and one consumer we don't
-	 * need to lock the kfifo.
-	 */
-	for (i = 0; i < rt2x00dev->tx->limit; i++) {
-		rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
-
-		if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
-			break;
-
-		if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
-			rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
-			break;
-		}
-	}
-
-	/* Schedule the tasklet for processing the tx status. */
-	tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-}
-
-static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
-{
-	struct rt2x00_dev *rt2x00dev = dev_instance;
-	u32 reg, mask;
-
-	/* Read status and ACK all interrupts */
-	rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
-	rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
-
-	if (!reg)
-		return IRQ_NONE;
-
-	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-		return IRQ_HANDLED;
-
-	/*
-	 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
-	 * for interrupts and interrupt masks we can just use the value of
-	 * INT_SOURCE_CSR to create the interrupt mask.
-	 */
-	mask = ~reg;
-
-	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
-		rt2800pci_txstatus_interrupt(rt2x00dev);
-		/*
-		 * Never disable the TX_FIFO_STATUS interrupt.
-		 */
-		rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
-	}
-
-	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
-		tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
-
-	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
-		tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
-
-	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
-		tasklet_schedule(&rt2x00dev->rxdone_tasklet);
-
-	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
-		tasklet_schedule(&rt2x00dev->autowake_tasklet);
-
-	/*
-	 * Disable all interrupts for which a tasklet was scheduled right now,
-	 * the tasklet will reenable the appropriate interrupts.
-	 */
-	spin_lock(&rt2x00dev->irqmask_lock);
-	rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
-	reg &= mask;
-	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
-	spin_unlock(&rt2x00dev->irqmask_lock);
-
-	return IRQ_HANDLED;
-}
-
-/*
  * Device probe functions.
  */
 static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
 {
 	int retval;
 
-	if (rt2x00_is_soc(rt2x00dev))
-		retval = rt2800pci_read_eeprom_soc(rt2x00dev);
-	else if (rt2800pci_efuse_detect(rt2x00dev))
+	if (rt2800pci_efuse_detect(rt2x00dev))
 		retval = rt2800pci_read_eeprom_efuse(rt2x00dev);
 	else
 		retval = rt2800pci_read_eeprom_pci(rt2x00dev);
@@ -1145,25 +337,25 @@
 	.read_eeprom		= rt2800pci_read_eeprom,
 	.hwcrypt_disabled	= rt2800pci_hwcrypt_disabled,
 	.drv_write_firmware	= rt2800pci_write_firmware,
-	.drv_init_registers	= rt2800pci_init_registers,
-	.drv_get_txwi		= rt2800pci_get_txwi,
+	.drv_init_registers	= rt2800mmio_init_registers,
+	.drv_get_txwi		= rt2800mmio_get_txwi,
 };
 
 static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
-	.irq_handler		= rt2800pci_interrupt,
-	.txstatus_tasklet	= rt2800pci_txstatus_tasklet,
-	.pretbtt_tasklet	= rt2800pci_pretbtt_tasklet,
-	.tbtt_tasklet		= rt2800pci_tbtt_tasklet,
-	.rxdone_tasklet		= rt2800pci_rxdone_tasklet,
-	.autowake_tasklet	= rt2800pci_autowake_tasklet,
+	.irq_handler		= rt2800mmio_interrupt,
+	.txstatus_tasklet	= rt2800mmio_txstatus_tasklet,
+	.pretbtt_tasklet	= rt2800mmio_pretbtt_tasklet,
+	.tbtt_tasklet		= rt2800mmio_tbtt_tasklet,
+	.rxdone_tasklet		= rt2800mmio_rxdone_tasklet,
+	.autowake_tasklet	= rt2800mmio_autowake_tasklet,
 	.probe_hw		= rt2800_probe_hw,
 	.get_firmware_name	= rt2800pci_get_firmware_name,
 	.check_firmware		= rt2800_check_firmware,
 	.load_firmware		= rt2800_load_firmware,
 	.initialize		= rt2x00mmio_initialize,
 	.uninitialize		= rt2x00mmio_uninitialize,
-	.get_entry_state	= rt2800pci_get_entry_state,
-	.clear_entry		= rt2800pci_clear_entry,
+	.get_entry_state	= rt2800mmio_get_entry_state,
+	.clear_entry		= rt2800mmio_clear_entry,
 	.set_device_state	= rt2800pci_set_device_state,
 	.rfkill_poll		= rt2800_rfkill_poll,
 	.link_stats		= rt2800_link_stats,
@@ -1171,15 +363,15 @@
 	.link_tuner		= rt2800_link_tuner,
 	.gain_calibration	= rt2800_gain_calibration,
 	.vco_calibration	= rt2800_vco_calibration,
-	.start_queue		= rt2800pci_start_queue,
-	.kick_queue		= rt2800pci_kick_queue,
-	.stop_queue		= rt2800pci_stop_queue,
+	.start_queue		= rt2800mmio_start_queue,
+	.kick_queue		= rt2800mmio_kick_queue,
+	.stop_queue		= rt2800mmio_stop_queue,
 	.flush_queue		= rt2x00mmio_flush_queue,
-	.write_tx_desc		= rt2800pci_write_tx_desc,
+	.write_tx_desc		= rt2800mmio_write_tx_desc,
 	.write_tx_data		= rt2800_write_tx_data,
 	.write_beacon		= rt2800_write_beacon,
 	.clear_beacon		= rt2800_clear_beacon,
-	.fill_rxdone		= rt2800pci_fill_rxdone,
+	.fill_rxdone		= rt2800mmio_fill_rxdone,
 	.config_shared_key	= rt2800_config_shared_key,
 	.config_pairwise_key	= rt2800_config_pairwise_key,
 	.config_filter		= rt2800_config_filter,
@@ -1191,49 +383,6 @@
 	.sta_remove		= rt2800_sta_remove,
 };
 
-static void rt2800pci_queue_init(struct data_queue *queue)
-{
-	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-	unsigned short txwi_size, rxwi_size;
-
-	rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
-
-	switch (queue->qid) {
-	case QID_RX:
-		queue->limit = 128;
-		queue->data_size = AGGREGATION_SIZE;
-		queue->desc_size = RXD_DESC_SIZE;
-		queue->winfo_size = rxwi_size;
-		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
-		break;
-
-	case QID_AC_VO:
-	case QID_AC_VI:
-	case QID_AC_BE:
-	case QID_AC_BK:
-		queue->limit = 64;
-		queue->data_size = AGGREGATION_SIZE;
-		queue->desc_size = TXD_DESC_SIZE;
-		queue->winfo_size = txwi_size;
-		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
-		break;
-
-	case QID_BEACON:
-		queue->limit = 8;
-		queue->data_size = 0; /* No DMA required for beacons */
-		queue->desc_size = TXD_DESC_SIZE;
-		queue->winfo_size = txwi_size;
-		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
-		break;
-
-	case QID_ATIM:
-		/* fallthrough */
-	default:
-		BUG();
-		break;
-	}
-}
-
 static const struct rt2x00_ops rt2800pci_ops = {
 	.name			= KBUILD_MODNAME,
 	.drv_data_size		= sizeof(struct rt2800_drv_data),
@@ -1241,7 +390,7 @@
 	.eeprom_size		= EEPROM_SIZE,
 	.rf_size		= RF_SIZE,
 	.tx_queues		= NUM_TX_QUEUES,
-	.queue_init		= rt2800pci_queue_init,
+	.queue_init		= rt2800mmio_queue_init,
 	.lib			= &rt2800pci_rt2x00_ops,
 	.drv			= &rt2800pci_rt2800_ops,
 	.hw			= &rt2800pci_mac80211_ops,
@@ -1253,7 +402,6 @@
 /*
  * RT2800pci module information.
  */
-#ifdef CONFIG_PCI
 static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
 	{ PCI_DEVICE(0x1814, 0x0601) },
 	{ PCI_DEVICE(0x1814, 0x0681) },
@@ -1298,38 +446,15 @@
 #endif
 	{ 0, }
 };
-#endif /* CONFIG_PCI */
 
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
 MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
-#ifdef CONFIG_PCI
 MODULE_FIRMWARE(FIRMWARE_RT2860);
 MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
-#endif /* CONFIG_PCI */
 MODULE_LICENSE("GPL");
 
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800soc_probe(struct platform_device *pdev)
-{
-	return rt2x00soc_probe(pdev, &rt2800pci_ops);
-}
-
-static struct platform_driver rt2800soc_driver = {
-	.driver		= {
-		.name		= "rt2800_wmac",
-		.owner		= THIS_MODULE,
-		.mod_name	= KBUILD_MODNAME,
-	},
-	.probe		= rt2800soc_probe,
-	.remove		= rt2x00soc_remove,
-	.suspend	= rt2x00soc_suspend,
-	.resume		= rt2x00soc_resume,
-};
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
 static int rt2800pci_probe(struct pci_dev *pci_dev,
 			   const struct pci_device_id *id)
 {
@@ -1344,39 +469,5 @@
 	.suspend	= rt2x00pci_suspend,
 	.resume		= rt2x00pci_resume,
 };
-#endif /* CONFIG_PCI */
 
-static int __init rt2800pci_init(void)
-{
-	int ret = 0;
-
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-	ret = platform_driver_register(&rt2800soc_driver);
-	if (ret)
-		return ret;
-#endif
-#ifdef CONFIG_PCI
-	ret = pci_register_driver(&rt2800pci_driver);
-	if (ret) {
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-		platform_driver_unregister(&rt2800soc_driver);
-#endif
-		return ret;
-	}
-#endif
-
-	return ret;
-}
-
-static void __exit rt2800pci_exit(void)
-{
-#ifdef CONFIG_PCI
-	pci_unregister_driver(&rt2800pci_driver);
-#endif
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-	platform_driver_unregister(&rt2800soc_driver);
-#endif
-}
-
-module_init(rt2800pci_init);
-module_exit(rt2800pci_exit);
+module_pci_driver(rt2800pci_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index ab22a08..a81c9ee 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -35,107 +35,10 @@
 #define RT2800PCI_H
 
 /*
- * Queue register offset macros
- */
-#define TX_QUEUE_REG_OFFSET		0x10
-#define TX_BASE_PTR(__x)		(TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_MAX_CNT(__x)			(TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_CTX_IDX(__x)			(TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_DTX_IDX(__x)			(TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-
-/*
  * 8051 firmware image.
  */
 #define FIRMWARE_RT2860			"rt2860.bin"
 #define FIRMWARE_RT3290			"rt3290.bin"
 #define FIRMWARE_IMAGE_BASE		0x2000
 
-/*
- * DMA descriptor defines.
- */
-#define TXD_DESC_SIZE			(4 * sizeof(__le32))
-#define RXD_DESC_SIZE			(4 * sizeof(__le32))
-
-/*
- * TX descriptor format for TX, PRIO and Beacon Ring.
- */
-
-/*
- * Word0
- */
-#define TXD_W0_SD_PTR0			FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define TXD_W1_SD_LEN1			FIELD32(0x00003fff)
-#define TXD_W1_LAST_SEC1		FIELD32(0x00004000)
-#define TXD_W1_BURST			FIELD32(0x00008000)
-#define TXD_W1_SD_LEN0			FIELD32(0x3fff0000)
-#define TXD_W1_LAST_SEC0		FIELD32(0x40000000)
-#define TXD_W1_DMA_DONE			FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define TXD_W2_SD_PTR1			FIELD32(0xffffffff)
-
-/*
- * Word3
- * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
- * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
- *       0:MGMT, 1:HCCA 2:EDCA
- */
-#define TXD_W3_WIV			FIELD32(0x01000000)
-#define TXD_W3_QSEL			FIELD32(0x06000000)
-#define TXD_W3_TCO			FIELD32(0x20000000)
-#define TXD_W3_UCO			FIELD32(0x40000000)
-#define TXD_W3_ICO			FIELD32(0x80000000)
-
-/*
- * RX descriptor format for RX Ring.
- */
-
-/*
- * Word0
- */
-#define RXD_W0_SDP0			FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define RXD_W1_SDL1			FIELD32(0x00003fff)
-#define RXD_W1_SDL0			FIELD32(0x3fff0000)
-#define RXD_W1_LS0			FIELD32(0x40000000)
-#define RXD_W1_DMA_DONE			FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define RXD_W2_SDP1			FIELD32(0xffffffff)
-
-/*
- * Word3
- * AMSDU: RX with 802.3 header, not 802.11 header.
- * DECRYPTED: This frame is being decrypted.
- */
-#define RXD_W3_BA			FIELD32(0x00000001)
-#define RXD_W3_DATA			FIELD32(0x00000002)
-#define RXD_W3_NULLDATA			FIELD32(0x00000004)
-#define RXD_W3_FRAG			FIELD32(0x00000008)
-#define RXD_W3_UNICAST_TO_ME		FIELD32(0x00000010)
-#define RXD_W3_MULTICAST		FIELD32(0x00000020)
-#define RXD_W3_BROADCAST		FIELD32(0x00000040)
-#define RXD_W3_MY_BSS			FIELD32(0x00000080)
-#define RXD_W3_CRC_ERROR		FIELD32(0x00000100)
-#define RXD_W3_CIPHER_ERROR		FIELD32(0x00000600)
-#define RXD_W3_AMSDU			FIELD32(0x00000800)
-#define RXD_W3_HTC			FIELD32(0x00001000)
-#define RXD_W3_RSSI			FIELD32(0x00002000)
-#define RXD_W3_L2PAD			FIELD32(0x00004000)
-#define RXD_W3_AMPDU			FIELD32(0x00008000)
-#define RXD_W3_DECRYPTED		FIELD32(0x00010000)
-#define RXD_W3_PLCP_SIGNAL		FIELD32(0x00020000)
-#define RXD_W3_PLCP_RSSI		FIELD32(0x00040000)
-
 #endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
new file mode 100644
index 0000000..1359227
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -0,0 +1,263 @@
+/*	Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ *	Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ *	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *	Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ *	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ *	Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ *	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ *	Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ *	<http://rt2x00.serialmonkey.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program; if not, write to the
+ *	Free Software Foundation, Inc.,
+ *	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*	Module: rt2800soc
+ *	Abstract: rt2800 WiSoC specific routines.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2x00soc.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/* Allow hardware encryption to be disabled. */
+static bool modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+	return modparam_nohwcrypt;
+}
+
+static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+	rt2800_disable_radio(rt2x00dev);
+	rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+	rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
+}
+
+static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev,
+				      enum dev_state state)
+{
+	int retval = 0;
+
+	switch (state) {
+	case STATE_RADIO_ON:
+		retval = rt2800mmio_enable_radio(rt2x00dev);
+		break;
+
+	case STATE_RADIO_OFF:
+		rt2800soc_disable_radio(rt2x00dev);
+		break;
+
+	case STATE_RADIO_IRQ_ON:
+	case STATE_RADIO_IRQ_OFF:
+		rt2800mmio_toggle_irq(rt2x00dev, state);
+		break;
+
+	case STATE_DEEP_SLEEP:
+	case STATE_SLEEP:
+	case STATE_STANDBY:
+	case STATE_AWAKE:
+		/* These states are not supported, but don't report an error */
+		retval = 0;
+		break;
+
+	default:
+		retval = -ENOTSUPP;
+		break;
+	}
+
+	if (unlikely(retval))
+		rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
+			   state, retval);
+
+	return retval;
+}
+
+static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+	void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
+
+	if (!base_addr)
+		return -ENOMEM;
+
+	memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
+
+	iounmap(base_addr);
+	return 0;
+}
+
+/* Firmware functions */
+static char *rt2800soc_get_firmware_name(struct rt2x00_dev *rt2x00dev)
+{
+	WARN_ON_ONCE(1);
+	return NULL;
+}
+
+static int rt2800soc_load_firmware(struct rt2x00_dev *rt2x00dev,
+				   const u8 *data, const size_t len)
+{
+	WARN_ON_ONCE(1);
+	return 0;
+}
+
+static int rt2800soc_check_firmware(struct rt2x00_dev *rt2x00dev,
+				    const u8 *data, const size_t len)
+{
+	WARN_ON_ONCE(1);
+	return 0;
+}
+
+static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
+				    const u8 *data, const size_t len)
+{
+	WARN_ON_ONCE(1);
+	return 0;
+}
+
+static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+	.tx			= rt2x00mac_tx,
+	.start			= rt2x00mac_start,
+	.stop			= rt2x00mac_stop,
+	.add_interface		= rt2x00mac_add_interface,
+	.remove_interface	= rt2x00mac_remove_interface,
+	.config			= rt2x00mac_config,
+	.configure_filter	= rt2x00mac_configure_filter,
+	.set_key		= rt2x00mac_set_key,
+	.sw_scan_start		= rt2x00mac_sw_scan_start,
+	.sw_scan_complete	= rt2x00mac_sw_scan_complete,
+	.get_stats		= rt2x00mac_get_stats,
+	.get_tkip_seq		= rt2800_get_tkip_seq,
+	.set_rts_threshold	= rt2800_set_rts_threshold,
+	.sta_add		= rt2x00mac_sta_add,
+	.sta_remove		= rt2x00mac_sta_remove,
+	.bss_info_changed	= rt2x00mac_bss_info_changed,
+	.conf_tx		= rt2800_conf_tx,
+	.get_tsf		= rt2800_get_tsf,
+	.rfkill_poll		= rt2x00mac_rfkill_poll,
+	.ampdu_action		= rt2800_ampdu_action,
+	.flush			= rt2x00mac_flush,
+	.get_survey		= rt2800_get_survey,
+	.get_ringparam		= rt2x00mac_get_ringparam,
+	.tx_frames_pending	= rt2x00mac_tx_frames_pending,
+};
+
+static const struct rt2800_ops rt2800soc_rt2800_ops = {
+	.register_read		= rt2x00mmio_register_read,
+	.register_read_lock	= rt2x00mmio_register_read, /* same for SoCs */
+	.register_write		= rt2x00mmio_register_write,
+	.register_write_lock	= rt2x00mmio_register_write, /* same for SoCs */
+	.register_multiread	= rt2x00mmio_register_multiread,
+	.register_multiwrite	= rt2x00mmio_register_multiwrite,
+	.regbusy_read		= rt2x00mmio_regbusy_read,
+	.read_eeprom		= rt2800soc_read_eeprom,
+	.hwcrypt_disabled	= rt2800soc_hwcrypt_disabled,
+	.drv_write_firmware	= rt2800soc_write_firmware,
+	.drv_init_registers	= rt2800mmio_init_registers,
+	.drv_get_txwi		= rt2800mmio_get_txwi,
+};
+
+static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
+	.irq_handler		= rt2800mmio_interrupt,
+	.txstatus_tasklet	= rt2800mmio_txstatus_tasklet,
+	.pretbtt_tasklet	= rt2800mmio_pretbtt_tasklet,
+	.tbtt_tasklet		= rt2800mmio_tbtt_tasklet,
+	.rxdone_tasklet		= rt2800mmio_rxdone_tasklet,
+	.autowake_tasklet	= rt2800mmio_autowake_tasklet,
+	.probe_hw		= rt2800_probe_hw,
+	.get_firmware_name	= rt2800soc_get_firmware_name,
+	.check_firmware		= rt2800soc_check_firmware,
+	.load_firmware		= rt2800soc_load_firmware,
+	.initialize		= rt2x00mmio_initialize,
+	.uninitialize		= rt2x00mmio_uninitialize,
+	.get_entry_state	= rt2800mmio_get_entry_state,
+	.clear_entry		= rt2800mmio_clear_entry,
+	.set_device_state	= rt2800soc_set_device_state,
+	.rfkill_poll		= rt2800_rfkill_poll,
+	.link_stats		= rt2800_link_stats,
+	.reset_tuner		= rt2800_reset_tuner,
+	.link_tuner		= rt2800_link_tuner,
+	.gain_calibration	= rt2800_gain_calibration,
+	.vco_calibration	= rt2800_vco_calibration,
+	.start_queue		= rt2800mmio_start_queue,
+	.kick_queue		= rt2800mmio_kick_queue,
+	.stop_queue		= rt2800mmio_stop_queue,
+	.flush_queue		= rt2x00mmio_flush_queue,
+	.write_tx_desc		= rt2800mmio_write_tx_desc,
+	.write_tx_data		= rt2800_write_tx_data,
+	.write_beacon		= rt2800_write_beacon,
+	.clear_beacon		= rt2800_clear_beacon,
+	.fill_rxdone		= rt2800mmio_fill_rxdone,
+	.config_shared_key	= rt2800_config_shared_key,
+	.config_pairwise_key	= rt2800_config_pairwise_key,
+	.config_filter		= rt2800_config_filter,
+	.config_intf		= rt2800_config_intf,
+	.config_erp		= rt2800_config_erp,
+	.config_ant		= rt2800_config_ant,
+	.config			= rt2800_config,
+	.sta_add		= rt2800_sta_add,
+	.sta_remove		= rt2800_sta_remove,
+};
+
+static const struct rt2x00_ops rt2800soc_ops = {
+	.name			= KBUILD_MODNAME,
+	.drv_data_size		= sizeof(struct rt2800_drv_data),
+	.max_ap_intf		= 8,
+	.eeprom_size		= EEPROM_SIZE,
+	.rf_size		= RF_SIZE,
+	.tx_queues		= NUM_TX_QUEUES,
+	.queue_init		= rt2800mmio_queue_init,
+	.lib			= &rt2800soc_rt2x00_ops,
+	.drv			= &rt2800soc_rt2800_ops,
+	.hw			= &rt2800soc_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+	.debugfs		= &rt2800_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
+static int rt2800soc_probe(struct platform_device *pdev)
+{
+	return rt2x00soc_probe(pdev, &rt2800soc_ops);
+}
+
+static struct platform_driver rt2800soc_driver = {
+	.driver		= {
+		.name		= "rt2800_wmac",
+		.owner		= THIS_MODULE,
+		.mod_name	= KBUILD_MODNAME,
+	},
+	.probe		= rt2800soc_probe,
+	.remove		= rt2x00soc_remove,
+	.suspend	= rt2x00soc_suspend,
+	.resume		= rt2x00soc_resume,
+};
+
+module_platform_driver(rt2800soc_driver);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink WiSoC Wireless LAN driver.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 96961b9..a81ceb6 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -148,6 +148,8 @@
 	return false;
 }
 
+#define TXSTATUS_READ_INTERVAL 1000000
+
 static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
 						 int urb_status, u32 tx_status)
 {
@@ -162,7 +164,7 @@
 
 	valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID);
 	if (valid) {
-		if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status))
+		if (!kfifo_put(&rt2x00dev->txstatus_fifo, tx_status))
 			rt2x00_warn(rt2x00dev, "TX status FIFO overrun\n");
 
 		queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
@@ -176,8 +178,9 @@
 		queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 
 	if (rt2800usb_txstatus_pending(rt2x00dev)) {
-		/* Read register after 250 us */
-		hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
+		/* Read register after 1 ms */
+		hrtimer_start(&rt2x00dev->txstatus_timer,
+			      ktime_set(0, TXSTATUS_READ_INTERVAL),
 			      HRTIMER_MODE_REL);
 		return false;
 	}
@@ -202,8 +205,9 @@
 	if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
 		return;
 
-	/* Read TX_STA_FIFO register after 500 us */
-	hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
+	/* Read TX_STA_FIFO register after 2 ms */
+	hrtimer_start(&rt2x00dev->txstatus_timer,
+		      ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
 		      HRTIMER_MODE_REL);
 }
 
@@ -1176,6 +1180,8 @@
 	/* Linksys */
 	{ USB_DEVICE(0x13b1, 0x002f) },
 	{ USB_DEVICE(0x1737, 0x0079) },
+	/* Logitec */
+	{ USB_DEVICE(0x0789, 0x0170) },
 	/* Ralink */
 	{ USB_DEVICE(0x148f, 0x3572) },
 	/* Sitecom */
@@ -1199,6 +1205,8 @@
 	{ USB_DEVICE(0x050d, 0x1103) },
 	/* Cameo */
 	{ USB_DEVICE(0x148f, 0xf301) },
+	/* D-Link */
+	{ USB_DEVICE(0x2001, 0x3c1f) },
 	/* Edimax */
 	{ USB_DEVICE(0x7392, 0x7733) },
 	/* Hawking */
@@ -1212,6 +1220,7 @@
 	{ USB_DEVICE(0x0789, 0x016b) },
 	/* NETGEAR */
 	{ USB_DEVICE(0x0846, 0x9012) },
+	{ USB_DEVICE(0x0846, 0x9013) },
 	{ USB_DEVICE(0x0846, 0x9019) },
 	/* Planex */
 	{ USB_DEVICE(0x2019, 0xed19) },
@@ -1220,6 +1229,7 @@
 	/* Sitecom */
 	{ USB_DEVICE(0x0df6, 0x0067) },
 	{ USB_DEVICE(0x0df6, 0x006a) },
+	{ USB_DEVICE(0x0df6, 0x006e) },
 	/* ZyXEL */
 	{ USB_DEVICE(0x0586, 0x3421) },
 #endif
@@ -1236,6 +1246,9 @@
 	{ USB_DEVICE(0x2001, 0x3c1c) },
 	{ USB_DEVICE(0x2001, 0x3c1d) },
 	{ USB_DEVICE(0x2001, 0x3c1e) },
+	{ USB_DEVICE(0x2001, 0x3c20) },
+	{ USB_DEVICE(0x2001, 0x3c22) },
+	{ USB_DEVICE(0x2001, 0x3c23) },
 	/* LG innotek */
 	{ USB_DEVICE(0x043e, 0x7a22) },
 	{ USB_DEVICE(0x043e, 0x7a42) },
@@ -1258,12 +1271,17 @@
 	{ USB_DEVICE(0x043e, 0x7a32) },
 	/* AVM GmbH */
 	{ USB_DEVICE(0x057c, 0x8501) },
-	/* D-Link DWA-160-B2 */
+	/* Buffalo */
+	{ USB_DEVICE(0x0411, 0x0241) },
+	/* D-Link */
 	{ USB_DEVICE(0x2001, 0x3c1a) },
+	{ USB_DEVICE(0x2001, 0x3c21) },
 	/* Proware */
 	{ USB_DEVICE(0x043e, 0x7a13) },
 	/* Ralink */
 	{ USB_DEVICE(0x148f, 0x5572) },
+	/* TRENDnet */
+	{ USB_DEVICE(0x20f4, 0x724a) },
 #endif
 #ifdef CONFIG_RT2800USB_UNKNOWN
 	/*
@@ -1333,6 +1351,7 @@
 	{ USB_DEVICE(0x1d4d, 0x0010) },
 	/* Planex */
 	{ USB_DEVICE(0x2019, 0xab24) },
+	{ USB_DEVICE(0x2019, 0xab29) },
 	/* Qcom */
 	{ USB_DEVICE(0x18e8, 0x6259) },
 	/* RadioShack */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index fe4c572..e4ba2ce 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
 #include <linux/input-polldev.h>
 #include <linux/kfifo.h>
 #include <linux/hrtimer.h>
+#include <linux/average.h>
 
 #include <net/mac80211.h>
 
@@ -138,17 +139,6 @@
 #define SHORT_EIFS		( SIFS + SHORT_DIFS + \
 				  GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
 
-/*
- * Structure for average calculation
- * The avg field contains the actual average value,
- * but avg_weight is internally used during calculations
- * to prevent rounding errors.
- */
-struct avg_val {
-	int avg;
-	int avg_weight;
-};
-
 enum rt2x00_chip_intf {
 	RT2X00_CHIP_INTF_PCI,
 	RT2X00_CHIP_INTF_PCIE,
@@ -297,7 +287,7 @@
 	 * Similar to the avg_rssi in the link_qual structure
 	 * this value is updated by using the walking average.
 	 */
-	struct avg_val rssi_ant;
+	struct ewma rssi_ant;
 };
 
 /*
@@ -326,7 +316,7 @@
 	/*
 	 * Currently active average RSSI value
 	 */
-	struct avg_val avg_rssi;
+	struct ewma avg_rssi;
 
 	/*
 	 * Work structure for scheduling periodic link tuning.
@@ -1179,6 +1169,93 @@
 	return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
 }
 
+/* Helpers for capability flags */
+
+static inline bool
+rt2x00_has_cap_flag(struct rt2x00_dev *rt2x00dev,
+		    enum rt2x00_capability_flags cap_flag)
+{
+	return test_bit(cap_flag, &rt2x00dev->cap_flags);
+}
+
+static inline bool
+rt2x00_has_cap_hw_crypto(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_HW_CRYPTO);
+}
+
+static inline bool
+rt2x00_has_cap_power_limit(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_POWER_LIMIT);
+}
+
+static inline bool
+rt2x00_has_cap_control_filters(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTERS);
+}
+
+static inline bool
+rt2x00_has_cap_control_filter_pspoll(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTER_PSPOLL);
+}
+
+static inline bool
+rt2x00_has_cap_pre_tbtt_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_PRE_TBTT_INTERRUPT);
+}
+
+static inline bool
+rt2x00_has_cap_link_tuning(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_LINK_TUNING);
+}
+
+static inline bool
+rt2x00_has_cap_frame_type(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_FRAME_TYPE);
+}
+
+static inline bool
+rt2x00_has_cap_rf_sequence(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_RF_SEQUENCE);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_a(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_A);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_BG);
+}
+
+static inline bool
+rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_DOUBLE_ANTENNA);
+}
+
+static inline bool
+rt2x00_has_cap_bt_coexist(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_BT_COEXIST);
+}
+
+static inline bool
+rt2x00_has_cap_vco_recalibration(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_VCO_RECALIBRATION);
+}
+
 /**
  * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
  * @entry: Pointer to &struct queue_entry
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 1ca4c7f..3db0d99 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -52,7 +52,7 @@
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
 
-	if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
+	if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
 		return;
 
 	__set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@ -80,7 +80,7 @@
 	struct ieee80211_key_conf *key = tx_info->control.hw_key;
 	unsigned int overhead = 0;
 
-	if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key)
+	if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
 		return overhead;
 
 	/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fe7a7f6..7f7baae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -750,7 +750,7 @@
 				intf, &rt2x00debug_fop_queue_stats);
 
 #ifdef CONFIG_RT2X00_LIB_CRYPTO
-	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+	if (rt2x00_has_cap_hw_crypto(rt2x00dev))
 		intf->crypto_stats_entry =
 		    debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
 					intf, &rt2x00debug_fop_crypto_stats);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 712eea9..080b1fc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -88,7 +88,7 @@
 	rt2x00queue_start_queues(rt2x00dev);
 	rt2x00link_start_tuner(rt2x00dev);
 	rt2x00link_start_agc(rt2x00dev);
-	if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+	if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
 		rt2x00link_start_vcocal(rt2x00dev);
 
 	/*
@@ -113,7 +113,7 @@
 	 * Stop all queues
 	 */
 	rt2x00link_stop_agc(rt2x00dev);
-	if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+	if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
 		rt2x00link_stop_vcocal(rt2x00dev);
 	rt2x00link_stop_tuner(rt2x00dev);
 	rt2x00queue_stop_queues(rt2x00dev);
@@ -234,7 +234,7 @@
 	 * here as they will fetch the next beacon directly prior to
 	 * transmission.
 	 */
-	if (test_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags))
+	if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev))
 		return;
 
 	/* fetch next beacon */
@@ -358,7 +358,7 @@
 	 * mac80211 will expect the same data to be present it the
 	 * frame as it was passed to us.
 	 */
-	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+	if (rt2x00_has_cap_hw_crypto(rt2x00dev))
 		rt2x00crypto_tx_insert_iv(entry->skb, header_length);
 
 	/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 8368aab..c2b3b66 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -35,50 +35,28 @@
  */
 #define DEFAULT_RSSI		-128
 
-/*
- * Helper struct and macro to work with moving/walking averages.
- * When adding a value to the average value the following calculation
- * is needed:
- *
- *        avg_rssi = ((avg_rssi * 7) + rssi) / 8;
- *
- * The advantage of this approach is that we only need 1 variable
- * to store the average in (No need for a count and a total).
- * But more importantly, normal average values will over time
- * move less and less towards newly added values this results
- * that with link tuning, the device can have a very good RSSI
- * for a few minutes but when the device is moved away from the AP
- * the average will not decrease fast enough to compensate.
- * The walking average compensates this and will move towards
- * the new values correctly allowing a effective link tuning,
- * the speed of the average moving towards other values depends
- * on the value for the number of samples. The higher the number
- * of samples, the slower the average will move.
- * We use two variables to keep track of the average value to
- * compensate for the rounding errors. This can be a significant
- * error (>5dBm) if the factor is too low.
- */
-#define AVG_SAMPLES	8
-#define AVG_FACTOR	1000
-#define MOVING_AVERAGE(__avg, __val) \
-({ \
-	struct avg_val __new; \
-	__new.avg_weight = \
-	    (__avg).avg_weight  ? \
-		((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
-		  ((__val) * (AVG_FACTOR))) / \
-		 (AVG_SAMPLES)) : \
-		((__val) * (AVG_FACTOR)); \
-	__new.avg = __new.avg_weight / (AVG_FACTOR); \
-	__new; \
-})
+/* Constants for EWMA calculations. */
+#define RT2X00_EWMA_FACTOR	1024
+#define RT2X00_EWMA_WEIGHT	8
+
+static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+{
+	unsigned long avg;
+
+	avg = ewma_read(ewma);
+	if (avg)
+		return -avg;
+
+	return DEFAULT_RSSI;
+}
 
 static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
 {
 	struct link_ant *ant = &rt2x00dev->link.ant;
 
-	if (ant->rssi_ant.avg && rt2x00dev->link.qual.rx_success)
-		return ant->rssi_ant.avg;
+	if (rt2x00dev->link.qual.rx_success)
+		return rt2x00link_get_avg_rssi(&ant->rssi_ant);
+
 	return DEFAULT_RSSI;
 }
 
@@ -100,8 +78,8 @@
 
 static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
 {
-	rt2x00dev->link.ant.rssi_ant.avg = 0;
-	rt2x00dev->link.ant.rssi_ant.avg_weight = 0;
+	ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
+		  RT2X00_EWMA_WEIGHT);
 }
 
 static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -249,12 +227,12 @@
 	/*
 	 * Update global RSSI
 	 */
-	link->avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi);
+	ewma_add(&link->avg_rssi, -rxdesc->rssi);
 
 	/*
 	 * Update antenna RSSI
 	 */
-	ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi);
+	ewma_add(&ant->rssi_ant, -rxdesc->rssi);
 }
 
 void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -309,6 +287,8 @@
 	 */
 	rt2x00dev->link.count = 0;
 	memset(qual, 0, sizeof(*qual));
+	ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
+		  RT2X00_EWMA_WEIGHT);
 
 	/*
 	 * Restore the VGC level as stored in the registers,
@@ -363,17 +343,17 @@
 	 * collect the RSSI data we could use this. Otherwise we
 	 * must fallback to the default RSSI value.
 	 */
-	if (!link->avg_rssi.avg || !qual->rx_success)
+	if (!qual->rx_success)
 		qual->rssi = DEFAULT_RSSI;
 	else
-		qual->rssi = link->avg_rssi.avg;
+		qual->rssi = rt2x00link_get_avg_rssi(&link->avg_rssi);
 
 	/*
 	 * Check if link tuning is supported by the hardware, some hardware
 	 * do not support link tuning at all, while other devices can disable
 	 * the feature from the EEPROM.
 	 */
-	if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags))
+	if (rt2x00_has_cap_link_tuning(rt2x00dev))
 		rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
 
 	/*
@@ -513,7 +493,7 @@
 void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
 {
 	INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
-	if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+	if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
 		INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
 	INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
 	INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f883802..7c15785 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -382,11 +382,11 @@
 	 * of different types, but has no a separate filter for PS Poll frames,
 	 * FIF_CONTROL flag implies FIF_PSPOLL.
 	 */
-	if (!test_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags)) {
+	if (!rt2x00_has_cap_control_filters(rt2x00dev)) {
 		if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
 			*total_flags |= FIF_CONTROL | FIF_PSPOLL;
 	}
-	if (!test_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags)) {
+	if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) {
 		if (*total_flags & FIF_CONTROL)
 			*total_flags |= FIF_PSPOLL;
 	}
@@ -469,7 +469,7 @@
 	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
 		return 0;
 
-	if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+	if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
 		return -EOPNOTSUPP;
 
 	/*
@@ -754,6 +754,9 @@
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 	struct data_queue *queue;
 
+	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+		return;
+
 	tx_queue_for_each(rt2x00dev, queue)
 		rt2x00queue_flush_queue(queue, drop);
 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index dc49e52..25da20e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -119,7 +119,7 @@
 	rt2x00dev->ops = ops;
 	rt2x00dev->hw = hw;
 	rt2x00dev->irq = pci_dev->irq;
-	rt2x00dev->name = pci_name(pci_dev);
+	rt2x00dev->name = ops->name;
 
 	if (pci_is_pcie(pci_dev))
 		rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c8a33b..50590b1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -61,7 +61,7 @@
 	 * at least 8 bytes bytes available in headroom for IV/EIV
 	 * and 8 bytes for ICV data as tailroon.
 	 */
-	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
+	if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
 		head_size += 8;
 		tail_size += 8;
 	}
@@ -1033,38 +1033,21 @@
 
 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
 {
-	bool started;
 	bool tx_queue =
 		(queue->qid == QID_AC_VO) ||
 		(queue->qid == QID_AC_VI) ||
 		(queue->qid == QID_AC_BE) ||
 		(queue->qid == QID_AC_BK);
 
-	mutex_lock(&queue->status_lock);
 
 	/*
-	 * If the queue has been started, we must stop it temporarily
-	 * to prevent any new frames to be queued on the device. If
-	 * we are not dropping the pending frames, the queue must
-	 * only be stopped in the software and not the hardware,
-	 * otherwise the queue will never become empty on its own.
+	 * If we are not supposed to drop any pending
+	 * frames, this means we must force a start (=kick)
+	 * to the queue to make sure the hardware will
+	 * start transmitting.
 	 */
-	started = test_bit(QUEUE_STARTED, &queue->flags);
-	if (started) {
-		/*
-		 * Pause the queue
-		 */
-		rt2x00queue_pause_queue(queue);
-
-		/*
-		 * If we are not supposed to drop any pending
-		 * frames, this means we must force a start (=kick)
-		 * to the queue to make sure the hardware will
-		 * start transmitting.
-		 */
-		if (!drop && tx_queue)
-			queue->rt2x00dev->ops->lib->kick_queue(queue);
-	}
+	if (!drop && tx_queue)
+		queue->rt2x00dev->ops->lib->kick_queue(queue);
 
 	/*
 	 * Check if driver supports flushing, if that is the case we can
@@ -1080,14 +1063,6 @@
 	if (unlikely(!rt2x00queue_empty(queue)))
 		rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
 			    queue->qid);
-
-	/*
-	 * Restore the queue to the previous status
-	 */
-	if (started)
-		rt2x00queue_unpause_queue(queue);
-
-	mutex_unlock(&queue->status_lock);
 }
 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 8828987..4e12162 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -523,7 +523,9 @@
 	rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
 		    queue->qid);
 
+	rt2x00queue_stop_queue(queue);
 	rt2x00queue_flush_queue(queue, true);
+	rt2x00queue_start_queue(queue);
 }
 
 static int rt2x00usb_dma_timeout(struct data_queue *queue)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 54d3ddf..a5b69cb 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -685,7 +685,7 @@
 
 	rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
 	rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
-			  !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+			  !rt2x00_has_cap_frame_type(rt2x00dev));
 
 	/*
 	 * Configure the RX antenna.
@@ -813,10 +813,10 @@
 
 	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
 		sel = antenna_sel_a;
-		lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+		lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
 	} else {
 		sel = antenna_sel_bg;
-		lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+		lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -836,7 +836,7 @@
 	else if (rt2x00_rf(rt2x00dev, RF2527))
 		rt61pci_config_antenna_2x(rt2x00dev, ant);
 	else if (rt2x00_rf(rt2x00dev, RF2529)) {
-		if (test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags))
+		if (rt2x00_has_cap_double_antenna(rt2x00dev))
 			rt61pci_config_antenna_2x(rt2x00dev, ant);
 		else
 			rt61pci_config_antenna_2529(rt2x00dev, ant);
@@ -850,13 +850,13 @@
 	short lna_gain = 0;
 
 	if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+		if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
 			lna_gain += 14;
 
 		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
 		lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
 	} else {
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+		if (rt2x00_has_cap_external_lna_a(rt2x00dev))
 			lna_gain += 14;
 
 		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
@@ -1054,14 +1054,14 @@
 	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
 		low_bound = 0x28;
 		up_bound = 0x48;
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+		if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
 			low_bound += 0x10;
 			up_bound += 0x10;
 		}
 	} else {
 		low_bound = 0x20;
 		up_bound = 0x40;
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+		if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
 			low_bound += 0x10;
 			up_bound += 0x10;
 		}
@@ -2578,7 +2578,7 @@
 	 * eeprom word.
 	 */
 	if (rt2x00_rf(rt2x00dev, RF2529) &&
-	    !test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) {
+	    !rt2x00_has_cap_double_antenna(rt2x00dev)) {
 		rt2x00dev->default_ant.rx =
 		    ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
 		rt2x00dev->default_ant.tx =
@@ -2793,7 +2793,7 @@
 	spec->supported_bands = SUPPORT_BAND_2GHZ;
 	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
 
-	if (!test_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags)) {
+	if (!rt2x00_has_cap_rf_sequence(rt2x00dev)) {
 		spec->num_channels = 14;
 		spec->channels = rf_vals_noseq;
 	} else {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1d3880e..1baf9c8 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -595,8 +595,8 @@
 	switch (ant->rx) {
 	case ANTENNA_HW_DIVERSITY:
 		rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
-		temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)
-		       && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+		temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
+		       (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
 		rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
 		break;
 	case ANTENNA_A:
@@ -636,7 +636,7 @@
 
 	rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
 	rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
-			  !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+			  !rt2x00_has_cap_frame_type(rt2x00dev));
 
 	/*
 	 * Configure the RX antenna.
@@ -709,10 +709,10 @@
 
 	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
 		sel = antenna_sel_a;
-		lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+		lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
 	} else {
 		sel = antenna_sel_bg;
-		lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+		lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -740,7 +740,7 @@
 	short lna_gain = 0;
 
 	if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+		if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
 			lna_gain += 14;
 
 		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
@@ -930,7 +930,7 @@
 		low_bound = 0x28;
 		up_bound = 0x48;
 
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+		if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
 			low_bound += 0x10;
 			up_bound += 0x10;
 		}
@@ -946,7 +946,7 @@
 			up_bound = 0x1c;
 		}
 
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+		if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
 			low_bound += 0x14;
 			up_bound += 0x10;
 		}
@@ -1661,7 +1661,7 @@
 	}
 
 	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
-		if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+		if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
 			if (lna == 3 || lna == 2)
 				offset += 10;
 		} else {
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index fc207b2..a91506b 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1122,7 +1122,6 @@
 	iounmap(priv->map);
 
  err_free_dev:
-	pci_set_drvdata(pdev, NULL);
 	ieee80211_free_hw(dev);
 
  err_free_reg:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 8bb4a9a..9a78e3d 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1613,6 +1613,35 @@
 }
 EXPORT_SYMBOL(rtl_send_smps_action);
 
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	enum io_type iotype;
+
+	if (!is_hal_stop(rtlhal)) {
+		switch (operation) {
+		case SCAN_OPT_BACKUP:
+			iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_IO_CMD,
+						      (u8 *)&iotype);
+			break;
+		case SCAN_OPT_RESTORE:
+			iotype = IO_CMD_RESUME_DM_BY_SCAN;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_IO_CMD,
+						      (u8 *)&iotype);
+			break;
+		default:
+			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+				 "Unknown Scan Backup operation.\n");
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL(rtl_phy_scan_operation_backup);
+
 /* There seem to be issues in mac80211 regarding when del ba frames can be
  * received. As a work around, we make a fake del_ba if we receive a ba_req;
  * however, rx_agg was opened to let mac80211 release some ba related
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 0e5fe09..0cd0742 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -114,7 +114,6 @@
 void rtl_deinit_rfkill(struct ieee80211_hw *hw);
 
 void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
 void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
 
 bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -153,5 +152,6 @@
 bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
 struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
 				u8 *sa, u8 *bssid, u16 tid);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
index 35e0008..0105e6c 100644
--- a/drivers/net/wireless/rtlwifi/cam.h
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -41,12 +41,12 @@
 #define	CAM_CONFIG_USEDK				1
 #define	CAM_CONFIG_NO_USEDK				0
 
-extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
-			u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
-			u32 ul_default_key, u8 *key_content);
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+			 u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+			 u32 ul_default_key, u8 *key_content);
 int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
-			u32 ul_key_id);
+			     u32 ul_key_id);
 void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
 void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
 void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 733b7ce..210ce7c 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -115,7 +115,7 @@
 	mutex_lock(&rtlpriv->locks.conf_mutex);
 
 	mac->link_state = MAC80211_NOLINK;
-	memset(mac->bssid, 0, 6);
+	memset(mac->bssid, 0, ETH_ALEN);
 	mac->vendor = PEER_UNKNOWN;
 
 	/*reset sec info */
@@ -280,7 +280,7 @@
 	mac->p2p = 0;
 	mac->vif = NULL;
 	mac->link_state = MAC80211_NOLINK;
-	memset(mac->bssid, 0, 6);
+	memset(mac->bssid, 0, ETH_ALEN);
 	mac->vendor = PEER_UNKNOWN;
 	mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
 	rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -721,7 +721,7 @@
 			mac->link_state = MAC80211_LINKED;
 			mac->cnt_after_linked = 0;
 			mac->assoc_id = bss_conf->aid;
-			memcpy(mac->bssid, bss_conf->bssid, 6);
+			memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
 
 			if (rtlpriv->cfg->ops->linked_set_reg)
 				rtlpriv->cfg->ops->linked_set_reg(hw);
@@ -750,7 +750,7 @@
 			if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
 				rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
 			mac->link_state = MAC80211_NOLINK;
-			memset(mac->bssid, 0, 6);
+			memset(mac->bssid, 0, ETH_ALEN);
 			mac->vendor = PEER_UNKNOWN;
 
 			if (rtlpriv->dm.supp_phymode_switch) {
@@ -826,7 +826,7 @@
 			 bss_conf->bssid);
 
 		mac->vendor = PEER_UNKNOWN;
-		memcpy(mac->bssid, bss_conf->bssid, 6);
+		memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
 		rtlpriv->cfg->ops->set_network_type(hw, vif->type);
 
 		rcu_read_lock();
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 838a1ed..ae13fb9 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1203,20 +1203,18 @@
 
 static u16 efuse_get_current_size(struct ieee80211_hw *hw)
 {
-	int continual = true;
 	u16 efuse_addr = 0;
 	u8 hworden;
 	u8 efuse_data, word_cnts;
 
-	while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
-	       && (efuse_addr < EFUSE_MAX_SIZE)) {
-		if (efuse_data != 0xFF) {
-			hworden = efuse_data & 0x0F;
-			word_cnts = efuse_calculate_word_cnts(hworden);
-			efuse_addr = efuse_addr + (word_cnts * 2) + 1;
-		} else {
-			continual = false;
-		}
+	while (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+	       efuse_addr < EFUSE_MAX_SIZE) {
+		if (efuse_data == 0xFF)
+			break;
+
+		hworden = efuse_data & 0x0F;
+		word_cnts = efuse_calculate_word_cnts(hworden);
+		efuse_addr = efuse_addr + (word_cnts * 2) + 1;
 	}
 
 	return efuse_addr;
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 395a326..1663b3a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -104,20 +104,19 @@
 	u8 tx_power_g[14];
 };
 
-extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
-extern void efuse_initialize(struct ieee80211_hw *hw);
-extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
-extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
-extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
-		       u16 _size_byte, u8 *pbuf);
-extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
-			      u16 offset, u32 *value);
-extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
-			       u16 offset, u32 value);
-extern bool efuse_shadow_update(struct ieee80211_hw *hw);
-extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
-extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
-extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+void efuse_initialize(struct ieee80211_hw *hw);
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf);
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset,
+		       u32 *value);
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+			u32 value);
+bool efuse_shadow_update(struct ieee80211_hw *hw);
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 703f839..0f49444 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -736,7 +736,6 @@
 
 	struct rtl_stats stats = {
 		.signal = 0,
-		.noise = -98,
 		.rate = 0,
 	};
 	int index = rtlpci->rx_ring[rx_queue_idx].idx;
@@ -2009,7 +2008,6 @@
 fail1:
 	if (hw)
 		ieee80211_free_hw(hw);
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 
 	return err;
@@ -2064,8 +2062,6 @@
 
 	rtl_pci_disable_aspm(hw);
 
-	pci_set_drvdata(pdev, NULL);
-
 	ieee80211_free_hw(hw);
 }
 EXPORT_SYMBOL(rtl_pci_disconnect);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index b68cae3..e06971b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -143,6 +143,7 @@
 		} else {
 			rtlhal->fw_clk_change_in_progress = false;
 			spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+			break;
 		}
 	}
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index e655c04..d67f9c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -1136,34 +1136,6 @@
 					   &bw40_pwr[0], channel);
 }
 
-void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	enum io_type iotype;
-
-	if (!is_hal_stop(rtlhal)) {
-		switch (operation) {
-		case SCAN_OPT_BACKUP:
-			iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-			rtlpriv->cfg->ops->set_hw_reg(hw,
-						      HW_VAR_IO_CMD,
-						      (u8 *)&iotype);
-			break;
-		case SCAN_OPT_RESTORE:
-			iotype = IO_CMD_RESUME_DM_BY_SCAN;
-			rtlpriv->cfg->ops->set_hw_reg(hw,
-						      HW_VAR_IO_CMD,
-						      (u8 *)&iotype);
-			break;
-		default:
-			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-				 "Unknown Scan Backup operation.\n");
-			break;
-		}
-	}
-}
-
 void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
index f1acd6d..89f0f1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
@@ -200,37 +200,35 @@
 	CGCS_RX_SW_ANTDIV		= 0x05,
 };
 
-extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
-				   u32 regaddr, u32 bitmask);
-extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
-				  u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
-				   enum radio_path rfpath, u32 regaddr,
-				   u32 bitmask);
-extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
-				  enum radio_path rfpath, u32 regaddr,
-				  u32 bitmask, u32 data);
-extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
-					 long *powerlevel);
-extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw,
-					     u8 operation);
-extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
-				   enum nl80211_channel_type ch_type);
-extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
+			    u32 regaddr, u32 bitmask);
+void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+			   u32 regaddr, u32 bitmask, u32 data);
+u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+			    enum radio_path rfpath, u32 regaddr,
+			    u32 bitmask);
+void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+			   enum radio_path rfpath, u32 regaddr,
+			   u32 bitmask, u32 data);
+bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
+void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
+				  long *powerlevel);
+void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
+			    enum nl80211_channel_type ch_type);
+void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
 void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
 					  enum radio_path rfpath);
 bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
-					  enum rf_pwrstate rfpwr_state);
+bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
+				   enum rf_pwrstate rfpwr_state);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index c254693..347af1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../core.h"
 #include "../pci.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -244,7 +245,7 @@
 	.set_bw_mode = rtl88e_phy_set_bw_mode,
 	.switch_channel = rtl88e_phy_sw_chnl,
 	.dm_watchdog = rtl88e_dm_watchdog,
-	.scan_operation_backup = rtl88e_phy_scan_operation_backup,
+	.scan_operation_backup = rtl_phy_scan_operation_backup,
 	.set_rf_power_state = rtl88e_phy_set_rf_power_state,
 	.led_control = rtl88ee_led_control,
 	.set_desc = rtl88ee_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 68685a8..aece6c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -478,7 +478,6 @@
 
 	/*rx_status->qual = status->signal; */
 	rx_status->signal = status->recvsignalpower + 10;
-	/*rx_status->noise = -status->noise; */
 	if (status->packet_report_type == TX_REPORT2) {
 		status->macid_valid_entry[0] =
 			 GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index d2d57a2..e9caa5d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -541,29 +541,6 @@
 
 static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
 {
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
-	u8 h2c_parameter[3] = { 0 };
-
-	return;
-
-	if (tmpentry_max_pwdb != 0) {
-		rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
-	} else {
-		rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
-	}
-
-	if (tmpentry_min_pwdb != 0xff) {
-		rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
-	} else {
-		rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
-	}
-
-	h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
-	h2c_parameter[0] = 0;
-
-	rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
 }
 
 void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -673,7 +650,7 @@
 	s8 cck_index = 0;
 	int i;
 	bool is2t = IS_92C_SERIAL(rtlhal->version);
-	s8 txpwr_level[2] = {0, 0};
+	s8 txpwr_level[3] = {0, 0, 0};
 	u8 ofdm_min_index = 6, rf;
 
 	rtlpriv->dm.txpower_trackinginit = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 246e535..0c0e782 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -592,36 +592,6 @@
 }
 EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
 
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	enum io_type iotype;
-
-	if (!is_hal_stop(rtlhal)) {
-		switch (operation) {
-		case SCAN_OPT_BACKUP:
-			iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-			rtlpriv->cfg->ops->set_hw_reg(hw,
-						      HW_VAR_IO_CMD,
-						      (u8 *)&iotype);
-
-			break;
-		case SCAN_OPT_RESTORE:
-			iotype = IO_CMD_RESUME_DM_BY_SCAN;
-			rtlpriv->cfg->ops->set_hw_reg(hw,
-						      HW_VAR_IO_CMD,
-						      (u8 *)&iotype);
-			break;
-		default:
-			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-				 "Unknown Scan Backup operation\n");
-			break;
-		}
-	}
-}
-EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
-
 void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
 			    enum nl80211_channel_type ch_type)
 {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
index cec10d6..e79dabe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -39,9 +39,7 @@
 #define RT_CANNOT_IO(hw)		false
 #define HIGHPOWER_RADIOA_ARRAYLEN	22
 
-#define IQK_ADDA_REG_NUM		16
 #define MAX_TOLERANCE			5
-#define	IQK_DELAY_TIME			1
 
 #define	APK_BB_REG_NUM			5
 #define	APK_AFE_REG_NUM			16
@@ -205,8 +203,6 @@
 void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
 bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
 					  long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
-					     u8 operation);
 void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
 				   enum nl80211_channel_type ch_type);
 void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 3cfa1bb..fa24de4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -152,8 +152,6 @@
 #define IS_VENDOR_UMC_A_CUT(version)	((IS_CHIP_VENDOR_UMC(version)) ? \
 	((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
 #define IS_CHIP_VER_B(version)  ((version & CHIP_VER_B) ? true : false)
-#define IS_VENDOR_UMC_A_CUT(version)	((IS_CHIP_VENDOR_UMC(version)) ? \
-	((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
 #define IS_92C_SERIAL(version)  ((version & CHIP_92C_BITMASK) ? true : false)
 #define IS_CHIP_VENDOR_UMC(version)		\
 	((version & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index d5e3b70..94486cc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -39,9 +39,7 @@
 #define RT_CANNOT_IO(hw)		false
 #define HIGHPOWER_RADIOA_ARRAYLEN	22
 
-#define IQK_ADDA_REG_NUM		16
 #define MAX_TOLERANCE			5
-#define	IQK_DELAY_TIME			1
 
 #define	APK_BB_REG_NUM			5
 #define	APK_AFE_REG_NUM			16
@@ -188,36 +186,29 @@
 };
 
 bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
-				   u32 regaddr, u32 bitmask);
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
-				  u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
-				   enum radio_path rfpath, u32 regaddr,
-				   u32 bitmask);
-extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
-				   enum radio_path rfpath, u32 regaddr,
-				   u32 bitmask, u32 data);
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+			   u32 data);
+u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+			    u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+			    u32 regaddr, u32 bitmask, u32 data);
 bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
 bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
 bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
 bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-						 enum radio_path rfpath);
+					  enum radio_path rfpath);
 void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
-					 long *powerlevel);
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
 void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
 bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
 					  long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
-					     u8 operation);
 void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
-				   enum nl80211_channel_type ch_type);
+			    enum nl80211_channel_type ch_type);
 void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
 u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
 void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
-					 u16 beaconinterval);
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
 void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
 void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
 void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
@@ -225,28 +216,25 @@
 bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
 					  enum radio_path rfpath);
 bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
-					      u32 rfpath);
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+				       u32 rfpath);
 bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
-					  enum rf_pwrstate rfpwr_state);
+				    enum rf_pwrstate rfpwr_state);
 void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
 void rtl92c_phy_set_io(struct ieee80211_hw *hw);
 void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
-				      enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+			       u32 offset);
 u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
-					 enum radio_path rfpath, u32 offset);
+				  enum radio_path rfpath, u32 offset);
 u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
 void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
-					enum radio_path rfpath, u32 offset,
-					u32 data);
+				 enum radio_path rfpath, u32 offset, u32 data);
 void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
-					   enum radio_path rfpath, u32 offset,
-					   u32 data);
+				    enum radio_path rfpath, u32 offset,
+				    u32 data);
 void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
-						   u32 regaddr, u32 bitmask,
-						   u32 data);
+					    u32 regaddr, u32 bitmask, u32 data);
 bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
 void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
 bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index bd4aef7..8922ecb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -560,7 +560,6 @@
 #define	EEPROM_DEFAULT_TXPOWERLEVEL		0x22
 #define	EEPROM_DEFAULT_HT40_2SDIFF		0x0
 #define EEPROM_DEFAULT_HT20_DIFF		2
-#define	EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF	0x3
 #define EEPROM_DEFAULT_HT40_PWRMAXOFFSET	0
 #define EEPROM_DEFAULT_HT20_PWRMAXOFFSET	0
 
@@ -639,17 +638,8 @@
 
 #define	EEPROM_TXPWR_GROUP			0x6F
 
-#define EEPROM_TSSI_A				0x76
-#define EEPROM_TSSI_B				0x77
-#define EEPROM_THERMAL_METER			0x78
-
 #define EEPROM_CHANNELPLAN			0x75
 
-#define RF_OPTION1				0x79
-#define RF_OPTION2				0x7A
-#define RF_OPTION3				0x7B
-#define RF_OPTION4				0x7C
-
 #define	STOPBECON				BIT(6)
 #define	STOPHIGHT				BIT(5)
 #define	STOPMGT					BIT(4)
@@ -689,13 +679,6 @@
 #define RSV_CTRL				0x001C
 #define RD_CTRL					0x0524
 
-#define REG_USB_INFO				0xFE17
-#define REG_USB_SPECIAL_OPTION			0xFE55
-
-#define REG_USB_DMA_AGG_TO			0xFE5B
-#define REG_USB_AGG_TO				0xFE5C
-#define REG_USB_AGG_TH				0xFE5D
-
 #define REG_USB_VID				0xFE60
 #define REG_USB_PID				0xFE62
 #define REG_USB_OPTIONAL			0xFE64
@@ -1196,9 +1179,6 @@
 #define POLLING_LLT_THRESHOLD			20
 #define POLLING_READY_TIMEOUT_COUNT		1000
 
-#define	MAX_MSS_DENSITY_2T			0x13
-#define	MAX_MSS_DENSITY_1T			0x0A
-
 #define EPROM_CMD_OPERATING_MODE_MASK	((1<<7)|(1<<6))
 #define EPROM_CMD_CONFIG			0x3
 #define EPROM_CMD_LOAD				1
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index 6c8d56e..d8fe68b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -34,11 +34,10 @@
 #define RF6052_MAX_REG			0x3F
 #define RF6052_MAX_PATH			2
 
-extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-					     u8 bandwidth);
-extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-					       u8 *ppowerlevel);
-extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-						u8 *ppowerlevel, u8 channel);
-extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+					u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+					 u8 *ppowerlevel, u8 channel);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 1420356..b790320 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../core.h"
 #include "../pci.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -219,7 +220,7 @@
 	.set_bw_mode = rtl92c_phy_set_bw_mode,
 	.switch_channel = rtl92c_phy_sw_chnl,
 	.dm_watchdog = rtl92c_dm_watchdog,
-	.scan_operation_backup = rtl92c_phy_scan_operation_backup,
+	.scan_operation_backup = rtl_phy_scan_operation_backup,
 	.set_rf_power_state = rtl92c_phy_set_rf_power_state,
 	.led_control = rtl92ce_led_control,
 	.set_desc = rtl92ce_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 6ad23b4..52abf0a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -420,7 +420,6 @@
 
 	/*rx_status->qual = stats->signal; */
 	rx_status->signal = stats->recvsignalpower + 10;
-	/*rx_status->noise = -stats->noise; */
 
 	return true;
 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index da4f587..3936853 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -32,6 +32,7 @@
 #include "../usb.h"
 #include "../ps.h"
 #include "../cam.h"
+#include "../stats.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -738,16 +739,6 @@
 	return ret_val;
 }
 
-static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
-				     u8 signal_strength_index)
-{
-	long signal_power;
-
-	signal_power = (long)((signal_strength_index + 1) >> 1);
-	signal_power -= 95;
-	return signal_power;
-}
-
 static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
 		long currsig)
 {
@@ -913,180 +904,6 @@
 			  (hw, total_rssi /= rf_rx_num));
 }
 
-static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
-		struct rtl_stats *pstats)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_phy *rtlphy = &(rtlpriv->phy);
-	u8 rfpath;
-	u32 last_rssi, tmpval;
-
-	if (pstats->packet_toself || pstats->packet_beacon) {
-		rtlpriv->stats.rssi_calculate_cnt++;
-		if (rtlpriv->stats.ui_rssi.total_num++ >=
-		    PHY_RSSI_SLID_WIN_MAX) {
-			rtlpriv->stats.ui_rssi.total_num =
-			    PHY_RSSI_SLID_WIN_MAX;
-			last_rssi =
-			    rtlpriv->stats.ui_rssi.elements[rtlpriv->
-							   stats.ui_rssi.index];
-			rtlpriv->stats.ui_rssi.total_val -= last_rssi;
-		}
-		rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
-		rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
-					index++] = pstats->signalstrength;
-		if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
-			rtlpriv->stats.ui_rssi.index = 0;
-		tmpval = rtlpriv->stats.ui_rssi.total_val /
-		    rtlpriv->stats.ui_rssi.total_num;
-		rtlpriv->stats.signal_strength =
-		    _rtl92c_translate_todbm(hw, (u8) tmpval);
-		pstats->rssi = rtlpriv->stats.signal_strength;
-	}
-	if (!pstats->is_cck && pstats->packet_toself) {
-		for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
-		     rfpath++) {
-			if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
-				continue;
-			if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
-				rtlpriv->stats.rx_rssi_percentage[rfpath] =
-				    pstats->rx_mimo_signalstrength[rfpath];
-			}
-			if (pstats->rx_mimo_signalstrength[rfpath] >
-			    rtlpriv->stats.rx_rssi_percentage[rfpath]) {
-				rtlpriv->stats.rx_rssi_percentage[rfpath] =
-				    ((rtlpriv->stats.
-				      rx_rssi_percentage[rfpath] *
-				      (RX_SMOOTH_FACTOR - 1)) +
-				     (pstats->rx_mimo_signalstrength[rfpath])) /
-				    (RX_SMOOTH_FACTOR);
-
-				rtlpriv->stats.rx_rssi_percentage[rfpath] =
-				    rtlpriv->stats.rx_rssi_percentage[rfpath] +
-				    1;
-			} else {
-				rtlpriv->stats.rx_rssi_percentage[rfpath] =
-				    ((rtlpriv->stats.
-				      rx_rssi_percentage[rfpath] *
-				      (RX_SMOOTH_FACTOR - 1)) +
-				     (pstats->rx_mimo_signalstrength[rfpath])) /
-				    (RX_SMOOTH_FACTOR);
-			}
-		}
-	}
-}
-
-static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
-					       struct rtl_stats *pstats)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	int weighting = 0;
-
-	if (rtlpriv->stats.recv_signal_power == 0)
-		rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
-	if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
-		weighting = 5;
-	else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
-		weighting = (-5);
-	rtlpriv->stats.recv_signal_power =
-	    (rtlpriv->stats.recv_signal_power * 5 +
-	     pstats->recvsignalpower + weighting) / 6;
-}
-
-static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
-		struct rtl_stats *pstats)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	long undec_sm_pwdb = 0;
-
-	if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-		return;
-	} else {
-		undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
-	}
-	if (pstats->packet_toself || pstats->packet_beacon) {
-		if (undec_sm_pwdb < 0)
-			undec_sm_pwdb = pstats->rx_pwdb_all;
-		if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
-			undec_sm_pwdb = (((undec_sm_pwdb) *
-			      (RX_SMOOTH_FACTOR - 1)) +
-			     (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
-			undec_sm_pwdb += 1;
-		} else {
-			undec_sm_pwdb = (((undec_sm_pwdb) *
-			      (RX_SMOOTH_FACTOR - 1)) +
-			     (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
-		}
-		rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
-		_rtl92c_update_rxsignalstatistics(hw, pstats);
-	}
-}
-
-static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
-					     struct rtl_stats *pstats)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u32 last_evm = 0, n_stream, tmpval;
-
-	if (pstats->signalquality != 0) {
-		if (pstats->packet_toself || pstats->packet_beacon) {
-			if (rtlpriv->stats.LINK_Q.total_num++ >=
-			    PHY_LINKQUALITY_SLID_WIN_MAX) {
-				rtlpriv->stats.LINK_Q.total_num =
-				    PHY_LINKQUALITY_SLID_WIN_MAX;
-				last_evm =
-				    rtlpriv->stats.LINK_Q.elements
-				    [rtlpriv->stats.LINK_Q.index];
-				rtlpriv->stats.LINK_Q.total_val -=
-				    last_evm;
-			}
-			rtlpriv->stats.LINK_Q.total_val +=
-			    pstats->signalquality;
-			rtlpriv->stats.LINK_Q.elements
-			   [rtlpriv->stats.LINK_Q.index++] =
-			    pstats->signalquality;
-			if (rtlpriv->stats.LINK_Q.index >=
-			    PHY_LINKQUALITY_SLID_WIN_MAX)
-				rtlpriv->stats.LINK_Q.index = 0;
-			tmpval = rtlpriv->stats.LINK_Q.total_val /
-			    rtlpriv->stats.LINK_Q.total_num;
-			rtlpriv->stats.signal_quality = tmpval;
-			rtlpriv->stats.last_sigstrength_inpercent = tmpval;
-			for (n_stream = 0; n_stream < 2;
-			     n_stream++) {
-				if (pstats->RX_SIGQ[n_stream] != -1) {
-					if (!rtlpriv->stats.RX_EVM[n_stream]) {
-						rtlpriv->stats.RX_EVM[n_stream]
-						 = pstats->RX_SIGQ[n_stream];
-					}
-					rtlpriv->stats.RX_EVM[n_stream] =
-					    ((rtlpriv->stats.RX_EVM
-					    [n_stream] *
-					    (RX_SMOOTH_FACTOR - 1)) +
-					    (pstats->RX_SIGQ
-					    [n_stream] * 1)) /
-					    (RX_SMOOTH_FACTOR);
-				}
-			}
-		}
-	} else {
-		;
-	}
-}
-
-static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
-				     u8 *buffer,
-				     struct rtl_stats *pcurrent_stats)
-{
-	if (!pcurrent_stats->packet_matchbssid &&
-	    !pcurrent_stats->packet_beacon)
-		return;
-	_rtl92c_process_ui_rssi(hw, pcurrent_stats);
-	_rtl92c_process_pwdb(hw, pcurrent_stats);
-	_rtl92c_process_LINK_Q(hw, pcurrent_stats);
-}
-
 void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
 					       struct sk_buff *skb,
 					       struct rtl_stats *pstats,
@@ -1123,5 +940,5 @@
 	_rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
 				   packet_matchbssid, packet_toself,
 				   packet_beacon);
-	_rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+	rtl_process_phyinfo(hw, tmp_buf, pstats);
 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index 090fd33..11b439d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -34,15 +34,14 @@
 #define RF6052_MAX_REG			0x3F
 #define RF6052_MAX_PATH			2
 
-extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-					    u8 bandwidth);
-extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-					      u8 *ppowerlevel);
-extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-					       u8 *ppowerlevel, u8 channel);
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+				       u8 *ppowerlevel);
+void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+					u8 *ppowerlevel, u8 channel);
 bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
 bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
-					  enum radio_path rfpath);
+					   enum radio_path rfpath);
 void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
 					u8 *ppowerlevel);
 void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 2bd5985..9936de7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -31,6 +31,7 @@
 #include "../core.h"
 #include "../usb.h"
 #include "../efuse.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -117,7 +118,7 @@
 	.set_bw_mode = rtl92c_phy_set_bw_mode,
 	.switch_channel = rtl92c_phy_sw_chnl,
 	.dm_watchdog = rtl92c_dm_watchdog,
-	.scan_operation_backup = rtl92c_phy_scan_operation_backup,
+	.scan_operation_backup = rtl_phy_scan_operation_backup,
 	.set_rf_power_state = rtl92cu_phy_set_rf_power_state,
 	.led_control = rtl92cu_led_control,
 	.enable_hw_sec = rtl92cu_enable_hw_security_config,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 5a060e5..25e50ff 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -350,7 +350,6 @@
 	}
 	/*rx_status->qual = stats->signal; */
 	rx_status->signal = stats->rssi + 10;
-	/*rx_status->noise = -stats->noise; */
 	return true;
 }
 
@@ -365,7 +364,6 @@
 	u8 *rxdesc;
 	struct rtl_stats stats = {
 		.signal = 0,
-		.noise = -98,
 		.rate = 0,
 	};
 	struct rx_fwinfo_92c *p_drvinfo;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index f700f7a..7908e1c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -840,9 +840,9 @@
 	bool internal_pa = false;
 	long ele_a = 0, ele_d, temp_cck, val_x, value32;
 	long val_y, ele_c = 0;
-	u8 ofdm_index[2];
+	u8 ofdm_index[3];
 	s8 cck_index = 0;
-	u8 ofdm_index_old[2] = {0, 0};
+	u8 ofdm_index_old[3] = {0, 0, 0};
 	s8 cck_index_old = 0;
 	u8 index;
 	int i;
@@ -1118,6 +1118,10 @@
 				 val_x, val_y, ele_a, ele_c, ele_d,
 				 val_x, val_y);
 
+			if (cck_index >= CCK_TABLE_SIZE)
+				cck_index = CCK_TABLE_SIZE - 1;
+			if (cck_index < 0)
+				cck_index = 0;
 			if (rtlhal->current_bandtype == BAND_ON_2_4G) {
 				/* Adjust CCK according to IQK result */
 				if (!rtlpriv->dm.cck_inch14) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 7dd8f6d..c4a7db9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1194,25 +1194,7 @@
  * mac80211 will send pkt when scan */
 void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
 {
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	rtl92d_dm_init_edca_turbo(hw);
-	return;
-	switch (aci) {
-	case AC1_BK:
-		rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
-		break;
-	case AC0_BE:
-		break;
-	case AC2_VI:
-		rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
-		break;
-	case AC3_VO:
-		rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
-		break;
-	default:
-		RT_ASSERT(false, "invalid aci: %d !\n", aci);
-		break;
-	}
 }
 
 void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
index 7c9f7a2..1bc7b1a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
@@ -55,10 +55,9 @@
 		     u8 *p_macaddr, bool is_group, u8 enc_algo,
 		     bool is_wepkey, bool clear_all);
 
-extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset,
-				    u32 value, u8 direct);
-extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset,
-				  u8 direct);
+void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
+			     u8 direct);
+u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct);
 void rtl92de_suspend(struct ieee80211_hw *hw);
 void rtl92de_resume(struct ieee80211_hw *hw);
 void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 840bac5..13196cc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1022,34 +1022,6 @@
 	rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
 }
 
-void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	enum io_type iotype;
-
-	if (!is_hal_stop(rtlhal)) {
-		switch (operation) {
-		case SCAN_OPT_BACKUP:
-			rtlhal->current_bandtypebackup =
-						 rtlhal->current_bandtype;
-			iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-			rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
-						      (u8 *)&iotype);
-			break;
-		case SCAN_OPT_RESTORE:
-			iotype = IO_CMD_RESUME_DM_BY_SCAN;
-			rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
-						      (u8 *)&iotype);
-			break;
-		default:
-			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-				 "Unknown Scan Backup operation\n");
-			break;
-		}
-	}
-}
-
 void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
 			    enum nl80211_channel_type ch_type)
 {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
index f074952..48d5c68 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
@@ -39,9 +39,7 @@
 #define RT_CANNOT_IO(hw)			false
 #define HIGHPOWER_RADIOA_ARRAYLEN		22
 
-#define IQK_ADDA_REG_NUM			16
 #define MAX_TOLERANCE				5
-#define	IQK_DELAY_TIME				1
 
 #define	APK_BB_REG_NUM				5
 #define	APK_AFE_REG_NUM				16
@@ -127,34 +125,32 @@
 			*flag);
 }
 
-extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
-				   u32 regaddr, u32 bitmask);
-extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
-				  u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
-				   enum radio_path rfpath, u32 regaddr,
-				   u32 bitmask);
-extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
-				  enum radio_path rfpath, u32 regaddr,
-				  u32 bitmask, u32 data);
-extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-						 enum radio_path rfpath);
-extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw,
-					     u8 operation);
-extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
-				   enum nl80211_channel_type ch_type);
-extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
+u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
+			    u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+			   u32 regaddr, u32 bitmask, u32 data);
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+			    enum radio_path rfpath, u32 regaddr,
+			    u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
+			   enum radio_path rfpath, u32 regaddr,
+			   u32 bitmask, u32 data);
+bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+					  enum radio_path rfpath);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+			    enum nl80211_channel_type ch_type);
+u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
 bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
 					  enum rf_content content,
 					  enum radio_path rfpath);
 bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
-					  enum rf_pwrstate rfpwr_state);
+bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+				   enum rf_pwrstate rfpwr_state);
 
 void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
 void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
@@ -173,6 +169,5 @@
 				       unsigned long *flag);
 u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
 void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
-void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
index 0fe1a48..7303d12 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
@@ -30,15 +30,13 @@
 #ifndef __RTL92D_RF_H__
 #define __RTL92D_RF_H__
 
-extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-					    u8 bandwidth);
-extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-					      u8 *ppowerlevel);
-extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-					       u8 *ppowerlevel, u8 channel);
-extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
-extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
-					    bool bmac0);
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+				       u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+					u8 *ppowerlevel, u8 channel);
+bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index c18c04b..edab5a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../core.h"
 #include "../pci.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -236,7 +237,7 @@
 	.set_bw_mode = rtl92d_phy_set_bw_mode,
 	.switch_channel = rtl92d_phy_sw_chnl,
 	.dm_watchdog = rtl92d_dm_watchdog,
-	.scan_operation_backup = rtl92d_phy_scan_operation_backup,
+	.scan_operation_backup = rtl_phy_scan_operation_backup,
 	.set_rf_power_state = rtl92d_phy_set_rf_power_state,
 	.led_control = rtl92de_led_control,
 	.set_desc = rtl92de_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index b8ec718..945ddec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -526,7 +526,6 @@
 	}
 	/*rx_status->qual = stats->signal; */
 	rx_status->signal = stats->rssi + 10;
-	/*rx_status->noise = -stats->noise; */
 	return true;
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index 84d1181..c81c835 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -425,14 +425,9 @@
 #define	EXT_IMEM_CODE_DONE			BIT(2)
 #define	IMEM_CHK_RPT				BIT(1)
 #define	IMEM_CODE_DONE				BIT(0)
-#define	IMEM_CODE_DONE				BIT(0)
-#define	IMEM_CHK_RPT				BIT(1)
 #define	EMEM_CODE_DONE				BIT(2)
 #define	EMEM_CHK_RPT				BIT(3)
-#define	DMEM_CODE_DONE				BIT(4)
 #define	IMEM_RDY				BIT(5)
-#define	BASECHG					BIT(6)
-#define	FWRDY					BIT(7)
 #define	LOAD_FW_READY				(IMEM_CODE_DONE | \
 						IMEM_CHK_RPT | \
 						EMEM_CODE_DONE | \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index c709511..222d2e7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -330,7 +330,6 @@
 
 	/*rx_status->qual = stats->signal; */
 	rx_status->signal = stats->rssi + 10;
-	/*rx_status->noise = -stats->noise; */
 
 	return true;
 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index eafbb18..5d318a8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -934,35 +934,6 @@
 	return pwrout_dbm;
 }
 
-void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	enum io_type iotype;
-
-	if (!is_hal_stop(rtlhal)) {
-		switch (operation) {
-		case SCAN_OPT_BACKUP:
-			iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-			rtlpriv->cfg->ops->set_hw_reg(hw,
-						      HW_VAR_IO_CMD,
-						      (u8 *)&iotype);
-
-			break;
-		case SCAN_OPT_RESTORE:
-			iotype = IO_CMD_RESUME_DM_BY_SCAN;
-			rtlpriv->cfg->ops->set_hw_reg(hw,
-						      HW_VAR_IO_CMD,
-						      (u8 *)&iotype);
-			break;
-		default:
-			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-				 "Unknown Scan Backup operation.\n");
-			break;
-		}
-	}
-}
-
 void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index e7a59eb..007ebdb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -183,42 +183,40 @@
 	u32 mcs_original_offset[4][16];
 };
 
-extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
-				      u32 regaddr, u32 bitmask);
-extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
-				     u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
-				      enum radio_path rfpath, u32 regaddr,
-				      u32 bitmask);
-extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
-				     enum radio_path rfpath, u32 regaddr,
-				     u32 bitmask, u32 data);
-extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-						 enum radio_path rfpath);
-extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
-					    long *powerlevel);
-extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
-					    u8 channel);
-extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
-					     long power_indbm);
-extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
-						u8 operation);
-extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
-				      enum nl80211_channel_type ch_type);
-extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+			       u32 regaddr, u32 bitmask);
+void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+			      u32 regaddr, u32 bitmask, u32 data);
+u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+			       enum radio_path rfpath, u32 regaddr,
+			       u32 bitmask);
+void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+			      enum radio_path rfpath, u32 regaddr,
+			      u32 bitmask, u32 data);
+bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+					  enum radio_path rfpath);
+void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+				     long *powerlevel);
+void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
+				     u8 channel);
+bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+				      long power_indbm);
+void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+			       enum nl80211_channel_type ch_type);
+void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
 void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
 					     enum radio_path rfpath);
 bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
-					     enum rf_pwrstate rfpwr_state);
+bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+				      enum rf_pwrstate rfpwr_state);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
index d0f9dd7..57f1933 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
@@ -32,12 +32,11 @@
 
 #define RF6052_MAX_TX_PWR		0x3F
 
-extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-					    u8 bandwidth);
-extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-					      u8 *ppowerlevel);
-extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-					       u8 *ppowerlevel, u8 channel);
-extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+					  u8 *ppowerlevel);
+void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+					   u8 *ppowerlevel, u8 channel);
+bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index d9ee2ef..62b204f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -33,6 +33,7 @@
 
 #include "../core.h"
 #include "../pci.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -220,7 +221,7 @@
 	.set_bw_mode = rtl8723ae_phy_set_bw_mode,
 	.switch_channel = rtl8723ae_phy_sw_chnl,
 	.dm_watchdog = rtl8723ae_dm_watchdog,
-	.scan_operation_backup = rtl8723ae_phy_scan_operation_backup,
+	.scan_operation_backup = rtl_phy_scan_operation_backup,
 	.set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
 	.led_control = rtl8723ae_led_control,
 	.set_desc = rtl8723ae_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index bcd82a1..50b7be3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -359,7 +359,6 @@
 
 	/*rx_status->qual = status->signal; */
 	rx_status->signal = status->recvsignalpower + 10;
-	/*rx_status->noise = -status->noise; */
 
 	return true;
 }
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index e56778c..6e2b5c5 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -455,7 +455,6 @@
 	struct ieee80211_rx_status rx_status = {0};
 	struct rtl_stats stats = {
 		.signal = 0,
-		.noise = -98,
 		.rate = 0,
 	};
 
@@ -498,7 +497,6 @@
 	struct ieee80211_rx_status rx_status = {0};
 	struct rtl_stats stats = {
 		.signal = 0,
-		.noise = -98,
 		.rate = 0,
 	};
 
@@ -582,12 +580,15 @@
 static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
 					unsigned int len)
 {
+#if NET_IP_ALIGN != 0
 	unsigned int padding = 0;
+#endif
 
 	/* make function no-op when possible */
 	if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
 		return 0;
 
+#if NET_IP_ALIGN != 0
 	/* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
 	/* TODO: deduplicate common code, define helper function instead? */
 
@@ -608,6 +609,7 @@
 		padding ^= NET_IP_ALIGN;
 
 	return padding;
+#endif
 }
 
 #define __RADIO_TAP_SIZE_RSV	32
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 7032587..d224dc3 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -192,8 +192,6 @@
 (IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
 #define	IS_HARDWARE_TYPE_8723(rtlhal)			\
 (IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
-#define IS_HARDWARE_TYPE_8723U(rtlhal)			\
-	(rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
 
 #define RX_HAL_IS_CCK_RATE(_pdesc)\
 	(_pdesc->rxmcs == DESC92_RATE1M ||		\
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index c7dc6fe..1342f81 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -243,7 +243,7 @@
 	struct wl1251 *wl;
 	int ret;
 
-	pdata = spi->dev.platform_data;
+	pdata = dev_get_platdata(&spi->dev);
 	if (!pdata) {
 		wl1251_error("no platform data");
 		return -ENODEV;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index fd02060..2c3bd1b 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -424,8 +424,8 @@
 #define CHIP_ID_1271_PG10	           (0x4030101)
 #define CHIP_ID_1271_PG20	           (0x4030111)
 
-#define WL1251_FW_NAME "wl1251-fw.bin"
-#define WL1251_NVS_NAME "wl1251-nvs.bin"
+#define WL1251_FW_NAME "ti-connectivity/wl1251-fw.bin"
+#define WL1251_NVS_NAME "ti-connectivity/wl1251-nvs.bin"
 
 #define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
 
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 1c627da..be7129b 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -333,11 +333,11 @@
 		.always                        = 0,
 	},
 	.fwlog = {
-		.mode                         = WL12XX_FWLOG_ON_DEMAND,
+		.mode                         = WL12XX_FWLOG_CONTINUOUS,
 		.mem_blocks                   = 2,
 		.severity                     = 0,
 		.timestamp                    = WL12XX_FWLOG_TIMESTAMP_DISABLED,
-		.output                       = WL12XX_FWLOG_OUTPUT_HOST,
+		.output                       = WL12XX_FWLOG_OUTPUT_DBG_PINS,
 		.threshold                    = 0,
 	},
 	.rate = {
@@ -717,6 +717,9 @@
 		goto out;
 	}
 
+	wl->fw_mem_block_size = 256;
+	wl->fwlog_end = 0x2000000;
+
 	/* common settings */
 	wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY;
 	wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY;
@@ -1262,9 +1265,10 @@
 		BA_SESSION_RX_CONSTRAINT_EVENT_ID |
 		REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
 		INACTIVE_STA_EVENT_ID |
-		MAX_TX_RETRY_EVENT_ID |
 		CHANNEL_SWITCH_COMPLETE_EVENT_ID;
 
+	wl->ap_event_mask = MAX_TX_RETRY_EVENT_ID;
+
 	ret = wlcore_boot_run_firmware(wl);
 	if (ret < 0)
 		goto out;
@@ -1648,6 +1652,11 @@
 	return true;
 }
 
+static u32 wl12xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+	return hwaddr << 5;
+}
+
 static int wl12xx_setup(struct wl1271 *wl);
 
 static struct wlcore_ops wl12xx_ops = {
@@ -1684,6 +1693,7 @@
 	.channel_switch		= wl12xx_cmd_channel_switch,
 	.pre_pkt_send		= NULL,
 	.set_peer_cap		= wl12xx_set_peer_cap,
+	.convert_hwaddr		= wl12xx_convert_hwaddr,
 	.lnk_high_prio		= wl12xx_lnk_high_prio,
 	.lnk_low_prio		= wl12xx_lnk_low_prio,
 };
@@ -1704,7 +1714,7 @@
 static int wl12xx_setup(struct wl1271 *wl)
 {
 	struct wl12xx_priv *priv = wl->priv;
-	struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
 	struct wl12xx_platform_data *pdata = pdev_data->pdata;
 
 	wl->rtable = wl12xx_rtable;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 7aa0eb8..ec37b16 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -456,11 +456,11 @@
 		.always                        = 0,
 	},
 	.fwlog = {
-		.mode                         = WL12XX_FWLOG_ON_DEMAND,
+		.mode                         = WL12XX_FWLOG_CONTINUOUS,
 		.mem_blocks                   = 2,
 		.severity                     = 0,
 		.timestamp                    = WL12XX_FWLOG_TIMESTAMP_DISABLED,
-		.output                       = WL12XX_FWLOG_OUTPUT_HOST,
+		.output                       = WL12XX_FWLOG_OUTPUT_DBG_PINS,
 		.threshold                    = 0,
 	},
 	.rate = {
@@ -505,7 +505,7 @@
 
 static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
 	.ht = {
-		.mode				= HT_MODE_DEFAULT,
+		.mode				= HT_MODE_WIDE,
 	},
 	.phy = {
 		.phy_standalone			= 0x00,
@@ -516,7 +516,7 @@
 		.auto_detect			= 0x00,
 		.dedicated_fem			= FEM_NONE,
 		.low_band_component		= COMPONENT_3_WAY_SWITCH,
-		.low_band_component_type	= 0x04,
+		.low_band_component_type	= 0x05,
 		.high_band_component		= COMPONENT_2_WAY_SWITCH,
 		.high_band_component_type	= 0x09,
 		.tcxo_ldo_voltage		= 0x00,
@@ -556,15 +556,15 @@
 		.per_chan_pwr_limit_arr_11p	= { 0xff, 0xff, 0xff, 0xff,
 						    0xff, 0xff, 0xff },
 		.psat				= 0,
-		.low_power_val			= 0x08,
-		.med_power_val			= 0x12,
-		.high_power_val			= 0x18,
-		.low_power_val_2nd		= 0x05,
-		.med_power_val_2nd		= 0x0a,
-		.high_power_val_2nd		= 0x14,
 		.external_pa_dc2dc		= 0,
 		.number_of_assembled_ant2_4	= 2,
 		.number_of_assembled_ant5	= 1,
+		.low_power_val			= 0xff,
+		.med_power_val			= 0xff,
+		.high_power_val			= 0xff,
+		.low_power_val_2nd		= 0xff,
+		.med_power_val_2nd		= 0xff,
+		.high_power_val_2nd		= 0xff,
 		.tx_rf_margin			= 1,
 	},
 };
@@ -623,6 +623,18 @@
 	[REG_RAW_FW_STATUS_ADDR]	= WL18XX_FW_STATUS_ADDR,
 };
 
+static const struct wl18xx_clk_cfg wl18xx_clk_table_coex[NUM_CLOCK_CONFIGS] = {
+	[CLOCK_CONFIG_16_2_M]	= { 8,  121, 0, 0, false },
+	[CLOCK_CONFIG_16_368_M]	= { 8,  120, 0, 0, false },
+	[CLOCK_CONFIG_16_8_M]	= { 8,  117, 0, 0, false },
+	[CLOCK_CONFIG_19_2_M]	= { 10, 128, 0, 0, false },
+	[CLOCK_CONFIG_26_M]	= { 11, 104, 0, 0, false },
+	[CLOCK_CONFIG_32_736_M]	= { 8,  120, 0, 0, false },
+	[CLOCK_CONFIG_33_6_M]	= { 8,  117, 0, 0, false },
+	[CLOCK_CONFIG_38_468_M]	= { 10, 128, 0, 0, false },
+	[CLOCK_CONFIG_52_M]	= { 11, 104, 0, 0, false },
+};
+
 static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
 	[CLOCK_CONFIG_16_2_M]	= { 7,  104,  801, 4,  true },
 	[CLOCK_CONFIG_16_368_M]	= { 9,  132, 3751, 4,  true },
@@ -674,6 +686,9 @@
 		goto out;
 	}
 
+	wl->fw_mem_block_size = 272;
+	wl->fwlog_end = 0x40000000;
+
 	wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
 	wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
 	wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
@@ -704,6 +719,23 @@
 		     wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
 		     wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
 
+	/* coex PLL configuration */
+	ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_N,
+				   wl18xx_clk_table_coex[clk_freq].n);
+	if (ret < 0)
+		goto out;
+
+	ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_M,
+				   wl18xx_clk_table_coex[clk_freq].m);
+	if (ret < 0)
+		goto out;
+
+	/* bypass the swallowing logic */
+	ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+				   PLLSH_COEX_PLL_SWALLOW_EN_VAL1);
+	if (ret < 0)
+		goto out;
+
 	ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
 				   wl18xx_clk_table[clk_freq].n);
 	if (ret < 0)
@@ -745,6 +777,30 @@
 					   PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
 	}
 
+	/* choose WCS PLL */
+	ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_SEL,
+				   PLLSH_WL_PLL_SEL_WCS_PLL);
+	if (ret < 0)
+		goto out;
+
+	/* enable both PLLs */
+	ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL1);
+	if (ret < 0)
+		goto out;
+
+	udelay(1000);
+
+	/* disable coex PLL */
+	ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL2);
+	if (ret < 0)
+		goto out;
+
+	/* reset the swallowing logic */
+	ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+				   PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
+	if (ret < 0)
+		goto out;
+
 out:
 	return ret;
 }
@@ -935,10 +991,11 @@
 		BA_SESSION_RX_CONSTRAINT_EVENT_ID |
 		REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
 		INACTIVE_STA_EVENT_ID |
-		MAX_TX_FAILURE_EVENT_ID |
 		CHANNEL_SWITCH_COMPLETE_EVENT_ID |
 		DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
 
+	wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
+
 	ret = wlcore_boot_run_firmware(wl);
 	if (ret < 0)
 		goto out;
@@ -1175,16 +1232,48 @@
 	}
 }
 
+static const char *wl18xx_rdl_name(enum wl18xx_rdl_num rdl_num)
+{
+	switch (rdl_num) {
+	case RDL_1_HP:
+		return "183xH";
+	case RDL_2_SP:
+		return "183x or 180x";
+	case RDL_3_HP:
+		return "187xH";
+	case RDL_4_SP:
+		return "187x";
+	case RDL_5_SP:
+		return "RDL11 - Not Supported";
+	case RDL_6_SP:
+		return "180xD";
+	case RDL_7_SP:
+		return "RDL13 - Not Supported (1893Q)";
+	case RDL_8_SP:
+		return "18xxQ";
+	case RDL_NONE:
+		return "UNTRIMMED";
+	default:
+		return "UNKNOWN";
+	}
+}
+
 static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
 {
 	u32 fuse;
-	s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0;
+	s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0, package_type = 0;
 	int ret;
 
 	ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
 	if (ret < 0)
 		goto out;
 
+	ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
+	if (ret < 0)
+		goto out;
+
+	package_type = (fuse >> WL18XX_PACKAGE_TYPE_OFFSET) & 1;
+
 	ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
 	if (ret < 0)
 		goto out;
@@ -1192,7 +1281,7 @@
 	pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
 	rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
 
-	if (rom <= 0xE)
+	if ((rom <= 0xE) && (package_type == WL18XX_PACKAGE_TYPE_WSP))
 		metal = (fuse & WL18XX_METAL_VER_MASK) >>
 			WL18XX_METAL_VER_OFFSET;
 	else
@@ -1204,11 +1293,9 @@
 		goto out;
 
 	rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
-	if (rdl_ver > RDL_MAX)
-		rdl_ver = RDL_NONE;
 
-	wl1271_info("wl18xx HW: RDL %d, %s, PG %x.%x (ROM %x)",
-		    rdl_ver, rdl_names[rdl_ver], pg_ver, metal, rom);
+	wl1271_info("wl18xx HW: %s, PG %d.%d (ROM 0x%x)",
+		    wl18xx_rdl_name(rdl_ver), pg_ver, metal, rom);
 
 	if (ver)
 		*ver = pg_ver;
@@ -1521,6 +1608,11 @@
 	return lnk->allocated_pkts < thold;
 }
 
+static u32 wl18xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+	return hwaddr & ~0x80000000;
+}
+
 static int wl18xx_setup(struct wl1271 *wl);
 
 static struct wlcore_ops wl18xx_ops = {
@@ -1558,6 +1650,7 @@
 	.pre_pkt_send	= wl18xx_pre_pkt_send,
 	.sta_rc_update	= wl18xx_sta_rc_update,
 	.set_peer_cap	= wl18xx_set_peer_cap,
+	.convert_hwaddr = wl18xx_convert_hwaddr,
 	.lnk_high_prio	= wl18xx_lnk_high_prio,
 	.lnk_low_prio	= wl18xx_lnk_low_prio,
 };
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 05dd8ba..a433a75 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -114,6 +114,11 @@
 #define PLATFORM_DETECTION		0xA0E3E0
 #define OCS_EN				0xA02080
 #define PRIMARY_CLK_DETECT		0xA020A6
+#define PLLSH_COEX_PLL_N		0xA02384
+#define PLLSH_COEX_PLL_M		0xA02382
+#define PLLSH_COEX_PLL_SWALLOW_EN	0xA0238E
+#define PLLSH_WL_PLL_SEL		0xA02398
+
 #define PLLSH_WCS_PLL_N			0xA02362
 #define PLLSH_WCS_PLL_M			0xA02360
 #define PLLSH_WCS_PLL_Q_FACTOR_CFG_1	0xA02364
@@ -128,19 +133,30 @@
 #define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK	0xFFFF
 #define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK	0x000F
 
+#define PLLSH_WL_PLL_EN_VAL1		0x7
+#define PLLSH_WL_PLL_EN_VAL2		0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL1	0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL2	0x11
+
 #define PLLSH_WCS_PLL_SWALLOW_EN_VAL1	0x1
 #define PLLSH_WCS_PLL_SWALLOW_EN_VAL2	0x12
 
+#define PLLSH_WL_PLL_SEL_WCS_PLL	0x0
+#define PLLSH_WL_PLL_SEL_COEX_PLL	0x1
+
 #define WL18XX_REG_FUSE_DATA_1_3	0xA0260C
 #define WL18XX_PG_VER_MASK		0x70
 #define WL18XX_PG_VER_OFFSET		4
-#define WL18XX_ROM_VER_MASK		0x3
-#define WL18XX_ROM_VER_OFFSET		0
+#define WL18XX_ROM_VER_MASK		0x3e00
+#define WL18XX_ROM_VER_OFFSET		9
 #define WL18XX_METAL_VER_MASK		0xC
 #define WL18XX_METAL_VER_OFFSET		2
 #define WL18XX_NEW_METAL_VER_MASK	0x180
 #define WL18XX_NEW_METAL_VER_OFFSET	7
 
+#define WL18XX_PACKAGE_TYPE_OFFSET	13
+#define WL18XX_PACKAGE_TYPE_WSP		0
+
 #define WL18XX_REG_FUSE_DATA_2_3	0xA02614
 #define WL18XX_RDL_VER_MASK		0x1f00
 #define WL18XX_RDL_VER_OFFSET		8
@@ -201,24 +217,21 @@
 	NUM_BOARD_TYPES,
 };
 
-enum {
+enum wl18xx_rdl_num {
 	RDL_NONE	= 0,
 	RDL_1_HP	= 1,
 	RDL_2_SP	= 2,
 	RDL_3_HP	= 3,
 	RDL_4_SP	= 4,
+	RDL_5_SP	= 0x11,
+	RDL_6_SP	= 0x12,
+	RDL_7_SP	= 0x13,
+	RDL_8_SP	= 0x14,
 
 	_RDL_LAST,
 	RDL_MAX = _RDL_LAST - 1,
 };
 
-static const char * const rdl_names[] = {
-	[RDL_NONE]	= "",
-	[RDL_1_HP]	= "1853 SISO",
-	[RDL_2_SP]	= "1857 MIMO",
-	[RDL_3_HP]	= "1893 SISO",
-	[RDL_4_SP]	= "1897 MIMO",
-};
 
 /* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
 #define WL18XX_PHY_FPGA_SPARE_1		0x8093CA40
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 7a970cd..ec83675 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -162,7 +162,8 @@
 
 	wl1271_debug(DEBUG_ACX, "acx mem map");
 
-	ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len);
+	ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map,
+				     sizeof(struct acx_header), len);
 	if (ret < 0)
 		return ret;
 
@@ -722,6 +723,7 @@
 	wl1271_debug(DEBUG_ACX, "acx statistics");
 
 	ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats,
+				     sizeof(struct acx_header),
 				     wl->stats.fw_stats_len);
 	if (ret < 0) {
 		wl1271_warning("acx statistics failed: %d", ret);
@@ -1470,8 +1472,8 @@
 
 	tsf_info->role_id = wlvif->role_id;
 
-	ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO,
-				     tsf_info, sizeof(*tsf_info));
+	ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO, tsf_info,
+				sizeof(struct acx_header), sizeof(*tsf_info));
 	if (ret < 0) {
 		wl1271_warning("acx tsf info interrogate failed");
 		goto out;
@@ -1752,7 +1754,7 @@
 
 	acx->role_id = wlvif->role_id;
 	ret = wl1271_cmd_interrogate(wl, ACX_ROAMING_STATISTICS_TBL,
-				     acx, sizeof(*acx));
+				     acx, sizeof(*acx), sizeof(*acx));
 	if (ret	< 0) {
 		wl1271_warning("acx roaming statistics failed: %d", ret);
 		ret = -ENOMEM;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index c9e0607..34d9dff 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -60,7 +60,8 @@
 	u16 status;
 	u16 poll_count = 0;
 
-	if (WARN_ON(unlikely(wl->state == WLCORE_STATE_RESTARTING)))
+	if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING &&
+		    id != CMD_STOP_FWLOGGER))
 		return -EIO;
 
 	cmd = buf;
@@ -845,7 +846,8 @@
  * @buf: buffer for the response, including all headers, must work with dma
  * @len: length of buf
  */
-int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
+int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
+			   size_t cmd_len, size_t res_len)
 {
 	struct acx_header *acx = buf;
 	int ret;
@@ -854,10 +856,10 @@
 
 	acx->id = cpu_to_le16(id);
 
-	/* payload length, does not include any headers */
-	acx->len = cpu_to_le16(len - sizeof(*acx));
+	/* response payload length, does not include any headers */
+	acx->len = cpu_to_le16(res_len - sizeof(*acx));
 
-	ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx), len);
+	ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, cmd_len, res_len);
 	if (ret < 0)
 		wl1271_error("INTERROGATE command failed");
 
@@ -1126,6 +1128,8 @@
 	u16 template_id_2_4 = wl->scan_templ_id_2_4;
 	u16 template_id_5 = wl->scan_templ_id_5;
 
+	wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
+
 	skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
 				     ie_len);
 	if (!skb) {
@@ -1135,8 +1139,6 @@
 	if (ie_len)
 		memcpy(skb_put(skb, ie_len), ie, ie_len);
 
-	wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
-
 	if (sched_scan &&
 	    (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
 		template_id_2_4 = wl->sched_scan_templ_id_2_4;
@@ -1172,7 +1174,7 @@
 	if (!skb)
 		goto out;
 
-	wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
+	wl1271_debug(DEBUG_SCAN, "set ap probe request template");
 
 	rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
 	if (wlvif->band == IEEE80211_BAND_2GHZ)
@@ -1607,33 +1609,43 @@
 
 static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
 {
-	int idx = -1;
-
+	/*
+	 * map the given band/channel to the respective predefined
+	 * bit expected by the fw
+	 */
 	switch (band) {
-	case IEEE80211_BAND_5GHZ:
-		if (ch >= 8 && ch <= 16)
-			idx = ((ch-8)/4 + 18);
-		else if (ch >= 34 && ch <= 64)
-			idx = ((ch-34)/2 + 3 + 18);
-		else if (ch >= 100 && ch <= 140)
-			idx = ((ch-100)/4 + 15 + 18);
-		else if (ch >= 149 && ch <= 165)
-			idx = ((ch-149)/4 + 26 + 18);
-		else
-			idx = -1;
-		break;
 	case IEEE80211_BAND_2GHZ:
+		/* channels 1..14 are mapped to 0..13 */
 		if (ch >= 1 && ch <= 14)
-			idx = ch - 1;
-		else
-			idx = -1;
+			return ch - 1;
+		break;
+	case IEEE80211_BAND_5GHZ:
+		switch (ch) {
+		case 8 ... 16:
+			/* channels 8,12,16 are mapped to 18,19,20 */
+			return 18 + (ch-8)/4;
+		case 34 ... 48:
+			/* channels 34,36..48 are mapped to 21..28 */
+			return 21 + (ch-34)/2;
+		case 52 ... 64:
+			/* channels 52,56..64 are mapped to 29..32 */
+			return 29 + (ch-52)/4;
+		case 100 ... 140:
+			/* channels 100,104..140 are mapped to 33..43 */
+			return 33 + (ch-100)/4;
+		case 149 ... 165:
+			/* channels 149,153..165 are mapped to 44..48 */
+			return 44 + (ch-149)/4;
+		default:
+			break;
+		}
 		break;
 	default:
-		wl1271_error("get reg conf ch idx - unknown band: %d",
-			     (int)band);
+		break;
 	}
 
-	return idx;
+	wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch);
+	return -1;
 }
 
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
@@ -1646,7 +1658,7 @@
 
 	ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
 
-	if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+	if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
 		set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
 }
 
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index fd34123..323d4a8 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -45,7 +45,8 @@
 		     enum ieee80211_band band, int channel);
 int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
-int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
+int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
+			   size_t cmd_len, size_t res_len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
 				  size_t len, unsigned long valid_rets);
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 2b96ff8..40995c4 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -1274,6 +1274,9 @@
 	u8 always;
 } __packed;
 
+#define CONF_FWLOG_MIN_MEM_BLOCKS	2
+#define CONF_FWLOG_MAX_MEM_BLOCKS	16
+
 struct conf_fwlog {
 	/* Continuous or on-demand */
 	u8 mode;
@@ -1281,7 +1284,7 @@
 	/*
 	 * Number of memory blocks dedicated for the FW logger
 	 *
-	 * Range: 1-3, or 0 to disable the FW logger
+	 * Range: 2-16, or 0 to disable the FW logger
 	 */
 	u8 mem_blocks;
 
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index e17630c..89893c7 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -437,6 +437,7 @@
 	int res = 0;
 	ssize_t ret;
 	char *buf;
+	struct wl12xx_vif *wlvif;
 
 #define DRIVER_STATE_BUF_LEN 1024
 
@@ -450,12 +451,28 @@
 	(res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
 			  #x " = " fmt "\n", wl->x))
 
+#define DRIVER_STATE_PRINT_GENERIC(x, fmt, args...)   \
+	(res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
+			  #x " = " fmt "\n", args))
+
 #define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
 #define DRIVER_STATE_PRINT_INT(x)  DRIVER_STATE_PRINT(x, "%d")
 #define DRIVER_STATE_PRINT_STR(x)  DRIVER_STATE_PRINT(x, "%s")
 #define DRIVER_STATE_PRINT_LHEX(x) DRIVER_STATE_PRINT(x, "0x%lx")
 #define DRIVER_STATE_PRINT_HEX(x)  DRIVER_STATE_PRINT(x, "0x%x")
 
+	wl12xx_for_each_wlvif_sta(wl, wlvif) {
+		if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+			continue;
+
+		DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel,
+					   wlvif->p2p ? "P2P-CL" : "STA");
+	}
+
+	wl12xx_for_each_wlvif_ap(wl, wlvif)
+		DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel,
+					   wlvif->p2p ? "P2P-GO" : "AP");
+
 	DRIVER_STATE_PRINT_INT(tx_blocks_available);
 	DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
 	DRIVER_STATE_PRINT_INT(tx_allocated_pkts[0]);
@@ -474,7 +491,6 @@
 	DRIVER_STATE_PRINT_INT(tx_blocks_freed);
 	DRIVER_STATE_PRINT_INT(rx_counter);
 	DRIVER_STATE_PRINT_INT(state);
-	DRIVER_STATE_PRINT_INT(channel);
 	DRIVER_STATE_PRINT_INT(band);
 	DRIVER_STATE_PRINT_INT(power_level);
 	DRIVER_STATE_PRINT_INT(sg_enabled);
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 67f6168..8d3b349 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -266,6 +266,7 @@
 {
 	int ret;
 
+	wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
 	ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
 	if (ret < 0)
 		return ret;
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 7fd260c..51f8d63 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -222,6 +222,15 @@
 	return 0;
 }
 
+static inline u32
+wlcore_hw_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+	if (!wl->ops->convert_hwaddr)
+		BUG_ON(1);
+
+	return wl->ops->convert_hwaddr(wl, hwaddr);
+}
+
 static inline bool
 wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid,
 			struct wl1271_link *lnk)
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 5c6f11e..7699f9d 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -571,6 +571,12 @@
 		ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
 		if (ret < 0)
 			return ret;
+
+		/* unmask ap events */
+		wl->event_mask |= wl->ap_event_mask;
+		ret = wl1271_event_unmask(wl);
+		if (ret < 0)
+			return ret;
 	/* first STA, no APs */
 	} else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) {
 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index af7d9f9..07e3d6a 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -165,8 +165,8 @@
 	int physical;
 	int addr;
 
-	/* Addresses are stored internally as addresses to 32 bytes blocks */
-	addr = hwaddr << 5;
+	/* Convert from FW internal address which is chip arch dependent */
+	addr = wl->ops->convert_hwaddr(wl, hwaddr);
 
 	physical = wlcore_translate_addr(wl, addr);
 
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 38995f9..0368b9c 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -44,6 +44,7 @@
 #define WL1271_BOOT_RETRIES 3
 
 static char *fwlog_param;
+static int fwlog_mem_blocks = -1;
 static int bug_on_recovery = -1;
 static int no_recovery     = -1;
 
@@ -291,6 +292,18 @@
 {
 	/* Adjust settings according to optional module parameters */
 
+	/* Firmware Logger params */
+	if (fwlog_mem_blocks != -1) {
+		if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
+		    fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
+			wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
+		} else {
+			wl1271_error(
+				"Illegal fwlog_mem_blocks=%d using default %d",
+				fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
+		}
+	}
+
 	if (fwlog_param) {
 		if (!strcmp(fwlog_param, "continuous")) {
 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
@@ -780,6 +793,7 @@
 	if (wl->state == WLCORE_STATE_ON) {
 		wl->state = WLCORE_STATE_RESTARTING;
 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
+		wl1271_ps_elp_wakeup(wl);
 		wlcore_disable_interrupts_nosync(wl);
 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
 	}
@@ -787,19 +801,10 @@
 
 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
 {
-	size_t len = 0;
-
-	/* The FW log is a length-value list, find where the log end */
-	while (len < maxlen) {
-		if (memblock[len] == 0)
-			break;
-		if (len + memblock[len] + 1 > maxlen)
-			break;
-		len += memblock[len] + 1;
-	}
+	size_t len;
 
 	/* Make sure we have enough room */
-	len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
+	len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
 
 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
@@ -808,10 +813,9 @@
 	return len;
 }
 
-#define WLCORE_FW_LOG_END 0x2000000
-
 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
 {
+	struct wlcore_partition_set part, old_part;
 	u32 addr;
 	u32 offset;
 	u32 end_of_log;
@@ -824,7 +828,7 @@
 
 	wl1271_info("Reading FW panic log");
 
-	block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
+	block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
 	if (!block)
 		return;
 
@@ -850,17 +854,31 @@
 
 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
-		end_of_log = WLCORE_FW_LOG_END;
+		end_of_log = wl->fwlog_end;
 	} else {
 		offset = sizeof(addr);
 		end_of_log = addr;
 	}
 
+	old_part = wl->curr_part;
+	memset(&part, 0, sizeof(part));
+
 	/* Traverse the memory blocks linked list */
 	do {
-		memset(block, 0, WL12XX_HW_BLOCK_SIZE);
-		ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
-					 false);
+		part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
+		part.mem.size  = PAGE_SIZE;
+
+		ret = wlcore_set_partition(wl, &part);
+		if (ret < 0) {
+			wl1271_error("%s: set_partition start=0x%X size=%d",
+				__func__, part.mem.start, part.mem.size);
+			goto out;
+		}
+
+		memset(block, 0, wl->fw_mem_block_size);
+		ret = wlcore_read_hwaddr(wl, addr, block,
+					wl->fw_mem_block_size, false);
+
 		if (ret < 0)
 			goto out;
 
@@ -871,8 +889,9 @@
 		 * on demand mode and is equal to 0x2000000 in continuous mode.
 		 */
 		addr = le32_to_cpup((__le32 *)block);
+
 		if (!wl12xx_copy_fwlog(wl, block + offset,
-				       WL12XX_HW_BLOCK_SIZE - offset))
+					wl->fw_mem_block_size - offset))
 			break;
 	} while (addr && (addr != end_of_log));
 
@@ -880,6 +899,7 @@
 
 out:
 	kfree(block);
+	wlcore_set_partition(wl, &old_part);
 }
 
 static void wlcore_print_recovery(struct wl1271 *wl)
@@ -924,7 +944,8 @@
 		goto out_unlock;
 
 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
-		wl12xx_read_fwlog_panic(wl);
+		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
+			wl12xx_read_fwlog_panic(wl);
 		wlcore_print_recovery(wl);
 	}
 
@@ -1062,7 +1083,8 @@
 	static const char* const PLT_MODE[] = {
 		"PLT_OFF",
 		"PLT_ON",
-		"PLT_FEM_DETECT"
+		"PLT_FEM_DETECT",
+		"PLT_CHIP_AWAKE"
 	};
 
 	int ret;
@@ -1088,9 +1110,11 @@
 		if (ret < 0)
 			goto power_off;
 
-		ret = wl->ops->plt_init(wl);
-		if (ret < 0)
-			goto power_off;
+		if (plt_mode != PLT_CHIP_AWAKE) {
+			ret = wl->ops->plt_init(wl);
+			if (ret < 0)
+				goto power_off;
+		}
 
 		wl->state = WLCORE_STATE_ON;
 		wl1271_notice("firmware booted in PLT mode %s (%s)",
@@ -1925,8 +1949,10 @@
 
 	/*
 	 * FW channels must be re-calibrated after recovery,
-	 * clear the last Reg-Domain channel configuration.
+	 * save current Reg-Domain channel configuration and clear it.
 	 */
+	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
+	       sizeof(wl->reg_ch_conf_pending));
 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
 }
 
@@ -2008,6 +2034,47 @@
 	mutex_unlock(&wl->mutex);
 }
 
+static void wlcore_pending_auth_complete_work(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct wl1271 *wl;
+	struct wl12xx_vif *wlvif;
+	unsigned long time_spare;
+	int ret;
+
+	dwork = container_of(work, struct delayed_work, work);
+	wlvif = container_of(dwork, struct wl12xx_vif,
+			     pending_auth_complete_work);
+	wl = wlvif->wl;
+
+	mutex_lock(&wl->mutex);
+
+	if (unlikely(wl->state != WLCORE_STATE_ON))
+		goto out;
+
+	/*
+	 * Make sure a second really passed since the last auth reply. Maybe
+	 * a second auth reply arrived while we were stuck on the mutex.
+	 * Check for a little less than the timeout to protect from scheduler
+	 * irregularities.
+	 */
+	time_spare = jiffies +
+			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
+	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
+		goto out;
+
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out;
+
+	/* cancel the ROC if active */
+	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
+
+	wl1271_ps_elp_sleep(wl);
+out:
+	mutex_unlock(&wl->mutex);
+}
+
 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
 {
 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2159,6 +2226,8 @@
 			  wlcore_channel_switch_work);
 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
 			  wlcore_connection_loss_work);
+	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
+			  wlcore_pending_auth_complete_work);
 	INIT_LIST_HEAD(&wlvif->list);
 
 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2376,6 +2445,11 @@
 	int ret = 0;
 	u8 role_type;
 
+	if (wl->plt) {
+		wl1271_error("Adding Interface not allowed while in PLT mode");
+		return -EBUSY;
+	}
+
 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
 
@@ -2572,6 +2646,12 @@
 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
 		goto unlock;
 
+	if (wl->ap_count == 0 && is_ap) {
+		/* mask ap events */
+		wl->event_mask &= ~wl->ap_event_mask;
+		wl1271_event_unmask(wl);
+	}
+
 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
 		/* Configure for power according to debugfs */
@@ -2590,6 +2670,7 @@
 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
+	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
 
 	mutex_lock(&wl->mutex);
 }
@@ -2875,6 +2956,25 @@
 	wlvif->rate_set = wlvif->basic_rate_set;
 }
 
+static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				   bool idle)
+{
+	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+
+	if (idle == cur_idle)
+		return;
+
+	if (idle) {
+		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+	} else {
+		/* The current firmware only supports sched_scan in idle */
+		if (wl->sched_vif == wlvif)
+			wl->ops->sched_scan_stop(wl, wlvif);
+
+		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+	}
+}
+
 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			     struct ieee80211_conf *conf, u32 changed)
 {
@@ -3969,6 +4069,13 @@
 			}
 		} else {
 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+				/*
+				 * AP might be in ROC in case we have just
+				 * sent auth reply. handle it.
+				 */
+				if (test_bit(wlvif->role_id, wl->roc_map))
+					wl12xx_croc(wl, wlvif->role_id);
+
 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
 				if (ret < 0)
 					goto out;
@@ -4120,6 +4227,9 @@
 		do_join = true;
 	}
 
+	if (changed & BSS_CHANGED_IDLE && !is_ibss)
+		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+
 	if (changed & BSS_CHANGED_CQM) {
 		bool enable = false;
 		if (bss_conf->cqm_rssi_thold)
@@ -4656,29 +4766,49 @@
 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
 }
 
-static void wlcore_update_inconn_sta(struct wl1271 *wl,
-				     struct wl12xx_vif *wlvif,
-				     struct wl1271_station *wl_sta,
-				     bool in_connection)
+/*
+ * when wl_sta is NULL, we treat this call as if coming from a
+ * pending auth reply.
+ * wl->mutex must be taken and the FW must be awake when the call
+ * takes place.
+ */
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			      struct wl1271_station *wl_sta, bool in_conn)
 {
-	if (in_connection) {
-		if (WARN_ON(wl_sta->in_connection))
+	if (in_conn) {
+		if (WARN_ON(wl_sta && wl_sta->in_connection))
 			return;
-		wl_sta->in_connection = true;
-		if (!wlvif->inconn_count++)
+
+		if (!wlvif->ap_pending_auth_reply &&
+		    !wlvif->inconn_count)
 			wlcore_roc_if_possible(wl, wlvif);
+
+		if (wl_sta) {
+			wl_sta->in_connection = true;
+			wlvif->inconn_count++;
+		} else {
+			wlvif->ap_pending_auth_reply = true;
+		}
 	} else {
-		if (!wl_sta->in_connection)
+		if (wl_sta && !wl_sta->in_connection)
 			return;
 
-		wl_sta->in_connection = false;
-		wlvif->inconn_count--;
-		if (WARN_ON(wlvif->inconn_count < 0))
+		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
 			return;
 
-		if (!wlvif->inconn_count)
-			if (test_bit(wlvif->role_id, wl->roc_map))
-				wl12xx_croc(wl, wlvif->role_id);
+		if (WARN_ON(wl_sta && !wlvif->inconn_count))
+			return;
+
+		if (wl_sta) {
+			wl_sta->in_connection = false;
+			wlvif->inconn_count--;
+		} else {
+			wlvif->ap_pending_auth_reply = false;
+		}
+
+		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
+		    test_bit(wlvif->role_id, wl->roc_map))
+			wl12xx_croc(wl, wlvif->role_id);
 	}
 }
 
@@ -5313,10 +5443,7 @@
 
 /* 5 GHz band channels for WL1273 */
 static struct ieee80211_channel wl1271_channels_5ghz[] = {
-	{ .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
-	{ .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
-	{ .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
@@ -5896,14 +6023,20 @@
 };
 #endif
 
+static irqreturn_t wlcore_hardirq(int irq, void *cookie)
+{
+	return IRQ_WAKE_THREAD;
+}
+
 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
 {
 	struct wl1271 *wl = context;
 	struct platform_device *pdev = wl->pdev;
-	struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
 	struct wl12xx_platform_data *pdata = pdev_data->pdata;
 	unsigned long irqflags;
 	int ret;
+	irq_handler_t hardirq_fn = NULL;
 
 	if (fw) {
 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
@@ -5932,12 +6065,14 @@
 	wl->platform_quirks = pdata->platform_quirks;
 	wl->if_ops = pdev_data->if_ops;
 
-	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
 		irqflags = IRQF_TRIGGER_RISING;
-	else
+		hardirq_fn = wlcore_hardirq;
+	} else {
 		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+	}
 
-	ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
+	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
 				   irqflags, pdev->name, wl);
 	if (ret < 0) {
 		wl1271_error("request_irq() failed: %d", ret);
@@ -6046,6 +6181,9 @@
 MODULE_PARM_DESC(fwlog,
 		 "FW logger options: continuous, ondemand, dbgpins or disable");
 
+module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
+
 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
 
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 98066d4..26bfc36 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -83,6 +83,10 @@
 	struct wl12xx_vif *wlvif;
 	u32 timeout;
 
+	/* We do not enter elp sleep in PLT mode */
+	if (wl->plt)
+		return;
+
 	if (wl->sleep_auth != WL1271_PSM_ELP)
 		return;
 
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index f407101..7ed8620 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -92,9 +92,31 @@
 static void wlcore_started_vifs_iter(void *data, u8 *mac,
 				     struct ieee80211_vif *vif)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	bool active = false;
 	int *count = (int *)data;
 
-	if (!vif->bss_conf.idle)
+	/*
+	 * count active interfaces according to interface type.
+	 * checking only bss_conf.idle is bad for some cases, e.g.
+	 * we don't want to count sta in p2p_find as active interface.
+	 */
+	switch (wlvif->bss_type) {
+	case BSS_TYPE_STA_BSS:
+		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+			active = true;
+		break;
+
+	case BSS_TYPE_AP_BSS:
+		if (wlvif->wl->active_sta_count > 0)
+			active = true;
+		break;
+
+	default:
+		break;
+	}
+
+	if (active)
 		(*count)++;
 }
 
@@ -174,17 +196,7 @@
 		    /* if radar is set, we ignore the passive flag */
 		    (radar ||
 		     !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
-			wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
-				     req_channels[i]->band,
-				     req_channels[i]->center_freq);
-			wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
-				     req_channels[i]->hw_value,
-				     req_channels[i]->flags);
-			wl1271_debug(DEBUG_SCAN, "max_power %d",
-				     req_channels[i]->max_power);
-			wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
-				     min_dwell_time_active,
-				     max_dwell_time_active);
+
 
 			if (flags & IEEE80211_CHAN_RADAR) {
 				channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -222,6 +234,17 @@
 					     *n_pactive_ch);
 			}
 
+			wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
+				     req_channels[i]->center_freq,
+				     req_channels[i]->hw_value,
+				     req_channels[i]->flags,
+				     req_channels[i]->max_power,
+				     min_dwell_time_active,
+				     max_dwell_time_active,
+				     flags & IEEE80211_CHAN_RADAR ?
+					", DFS" : "",
+				     flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+					", PASSIVE" : "");
 			j++;
 		}
 	}
@@ -364,7 +387,7 @@
 	struct cfg80211_ssid *ssids = req->ssids;
 	int ret = 0, type, i, j, n_match_ssids = 0;
 
-	wl1271_debug(DEBUG_CMD, "cmd sched scan ssid list");
+	wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list");
 
 	/* count the match sets that contain SSIDs */
 	for (i = 0; i < req->n_match_sets; i++)
@@ -442,8 +465,6 @@
 		}
 	}
 
-	wl1271_dump(DEBUG_SCAN, "SSID_LIST: ", cmd, sizeof(*cmd));
-
 	ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
 			      sizeof(*cmd), 0);
 	if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 1b0cd98..b2c018d 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -335,7 +335,7 @@
 	if (!pdev_data)
 		goto out;
 
-	pdev_data->pdata = spi->dev.platform_data;
+	pdev_data->pdata = dev_get_platdata(&spi->dev);
 	if (!pdev_data->pdata) {
 		dev_err(&spi->dev, "no platform data\n");
 		ret = -ENODEV;
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 527590f..ddad58f 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -179,7 +179,8 @@
 		goto out_sleep;
 	}
 
-	ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
+	ret = wl1271_cmd_interrogate(wl, ie_id, cmd,
+				     sizeof(struct acx_header), sizeof(*cmd));
 	if (ret < 0) {
 		wl1271_warning("testmode cmd interrogate failed: %d", ret);
 		goto out_free;
@@ -297,7 +298,8 @@
 		ret = wl1271_plt_stop(wl);
 		break;
 	case PLT_ON:
-		ret = wl1271_plt_start(wl, PLT_ON);
+	case PLT_CHIP_AWAKE:
+		ret = wl1271_plt_start(wl, val);
 		break;
 	case PLT_FEM_DETECT:
 		ret = wl1271_tm_detect_fem(wl, tb);
@@ -361,6 +363,7 @@
 {
 	struct wl1271 *wl = hw->priv;
 	struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
+	u32 nla_cmd;
 	int err;
 
 	err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
@@ -370,7 +373,14 @@
 	if (!tb[WL1271_TM_ATTR_CMD_ID])
 		return -EINVAL;
 
-	switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
+	nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
+
+	/* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */
+	if (wl->plt_mode == PLT_CHIP_AWAKE &&
+	    nla_cmd != WL1271_TM_CMD_SET_PLT_MODE)
+		return -EOPNOTSUPP;
+
+	switch (nla_cmd) {
 	case WL1271_TM_CMD_TEST:
 		return wl1271_tm_cmd_test(wl, tb);
 	case WL1271_TM_CMD_INTERROGATE:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 7e93fe6..87cd707 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -86,19 +86,34 @@
 EXPORT_SYMBOL(wl1271_free_tx_id);
 
 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+						 struct wl12xx_vif *wlvif,
 						 struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr;
 
+	hdr = (struct ieee80211_hdr *)(skb->data +
+				       sizeof(struct wl1271_tx_hw_descr));
+	if (!ieee80211_is_auth(hdr->frame_control))
+		return;
+
 	/*
 	 * add the station to the known list before transmitting the
 	 * authentication response. this way it won't get de-authed by FW
 	 * when transmitting too soon.
 	 */
-	hdr = (struct ieee80211_hdr *)(skb->data +
-				       sizeof(struct wl1271_tx_hw_descr));
-	if (ieee80211_is_auth(hdr->frame_control))
-		wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+	wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+
+	/*
+	 * ROC for 1 second on the AP channel for completing the connection.
+	 * Note the ROC will be continued by the update_sta_state callbacks
+	 * once the station reaches the associated state.
+	 */
+	wlcore_update_inconn_sta(wl, wlvif, NULL, true);
+	wlvif->pending_auth_reply_time = jiffies;
+	cancel_delayed_work(&wlvif->pending_auth_complete_work);
+	ieee80211_queue_delayed_work(wl->hw,
+				&wlvif->pending_auth_complete_work,
+				msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
 }
 
 static void wl1271_tx_regulate_link(struct wl1271 *wl,
@@ -386,7 +401,7 @@
 		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
 			 (cipher == WLAN_CIPHER_SUITE_WEP104);
 
-		if (WARN_ON(is_wep && wlvif->default_key != idx)) {
+		if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
 			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
 			if (ret < 0)
 				return ret;
@@ -404,7 +419,7 @@
 	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
 
 	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
-		wl1271_tx_ap_update_inconnection_sta(wl, skb);
+		wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
 		wl1271_tx_regulate_link(wl, wlvif, hlid);
 	}
 
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 55aa4ac..35489c3 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -56,6 +56,9 @@
 /* Used for management frames and dummy packets */
 #define WL1271_TID_MGMT 7
 
+/* stop a ROC for pending authentication reply after this time (ms) */
+#define WLCORE_PEND_AUTH_ROC_TIMEOUT     1000
+
 struct wl127x_tx_mem {
 	/*
 	 * Number of extra memory blocks to allocate for this packet
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0034979..06efc12 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -110,6 +110,7 @@
 			    struct ieee80211_sta_ht_cap *ht_cap,
 			    bool allow_ht_operation,
 			    u32 rate_set, u8 hlid);
+	u32 (*convert_hwaddr)(struct wl1271 *wl, u32 hwaddr);
 	bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid,
 			      struct wl1271_link *lnk);
 	bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
@@ -290,6 +291,12 @@
 	/* Number of valid bytes in the FW log buffer */
 	ssize_t fwlog_size;
 
+	/* FW log end marker */
+	u32 fwlog_end;
+
+	/* FW memory block size */
+	u32 fw_mem_block_size;
+
 	/* Sysfs FW log entry readers wait queue */
 	wait_queue_head_t fwlog_waitq;
 
@@ -307,6 +314,8 @@
 
 	/* The mbox event mask */
 	u32 event_mask;
+	/* events to unmask only when ap interface is up */
+	u32 ap_event_mask;
 
 	/* Mailbox pointers */
 	u32 mbox_size;
@@ -481,6 +490,8 @@
 		   struct ieee80211_sta *sta,
 		   struct ieee80211_key_conf *key_conf);
 void wlcore_regdomain_config(struct wl1271 *wl);
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			      struct wl1271_station *wl_sta, bool in_conn);
 
 static inline void
 wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e5e1464..ce7261c 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -255,6 +255,7 @@
 	WLVIF_FLAG_CS_PROGRESS,
 	WLVIF_FLAG_AP_PROBE_RESP_SET,
 	WLVIF_FLAG_IN_USE,
+	WLVIF_FLAG_ACTIVE,
 };
 
 struct wl12xx_vif;
@@ -307,6 +308,7 @@
 	PLT_OFF = 0,
 	PLT_ON = 1,
 	PLT_FEM_DETECT = 2,
+	PLT_CHIP_AWAKE = 3
 };
 
 struct wl12xx_rx_filter_field {
@@ -456,6 +458,15 @@
 	 */
 	int hw_queue_base;
 
+	/* do we have a pending auth reply? (and ROC) */
+	bool ap_pending_auth_reply;
+
+	/* time when we sent the pending auth reply */
+	unsigned long pending_auth_reply_time;
+
+	/* work for canceling ROC after pending auth reply */
+	struct delayed_work pending_auth_complete_work;
+
 	/*
 	 * This struct must be last!
 	 * data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -539,6 +550,4 @@
 #define HW_HT_RATES_OFFSET	16
 #define HW_MIMO_RATES_OFFSET	24
 
-#define WL12XX_HW_BLOCK_SIZE	256
-
 #endif /* __WLCORE_I_H__ */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 7ef0b4a..84d94f5 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1619,7 +1619,7 @@
 	atomic_set(&intr->read_regs_enabled, 1);
 	intr->read_regs.req = req;
 	intr->read_regs.req_count = count;
-	INIT_COMPLETION(intr->read_regs.completion);
+	reinit_completion(&intr->read_regs.completion);
 	spin_unlock_irq(&intr->lock);
 }
 
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 400fea1..08ae01b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -87,9 +87,13 @@
 struct xenvif_rx_meta {
 	int id;
 	int size;
+	int gso_type;
 	int gso_size;
 };
 
+#define GSO_BIT(type) \
+	(1 << XEN_NETIF_GSO_TYPE_ ## type)
+
 /* Discriminate from any valid pending_idx value. */
 #define INVALID_PENDING_IDX 0xFFFF
 
@@ -150,10 +154,12 @@
 	u8               fe_dev_addr[6];
 
 	/* Frontend feature information. */
+	int gso_mask;
+	int gso_prefix_mask;
+
 	u8 can_sg:1;
-	u8 gso:1;
-	u8 gso_prefix:1;
-	u8 csum:1;
+	u8 ip_csum:1;
+	u8 ipv6_csum:1;
 
 	/* Internal feature information. */
 	u8 can_queue:1;	    /* can queue packets for receiver? */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 459935a..b78ee10 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -214,10 +214,14 @@
 
 	if (!vif->can_sg)
 		features &= ~NETIF_F_SG;
-	if (!vif->gso && !vif->gso_prefix)
+	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
 		features &= ~NETIF_F_TSO;
-	if (!vif->csum)
+	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
+		features &= ~NETIF_F_TSO6;
+	if (!vif->ip_csum)
 		features &= ~NETIF_F_IP_CSUM;
+	if (!vif->ipv6_csum)
+		features &= ~NETIF_F_IPV6_CSUM;
 
 	return features;
 }
@@ -306,7 +310,7 @@
 	vif->domid  = domid;
 	vif->handle = handle;
 	vif->can_sg = 1;
-	vif->csum = 1;
+	vif->ip_csum = 1;
 	vif->dev = dev;
 
 	vif->credit_bytes = vif->remaining_credit = ~0UL;
@@ -315,8 +319,10 @@
 	vif->credit_window_start = get_jiffies_64();
 
 	dev->netdev_ops	= &xenvif_netdev_ops;
-	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
-	dev->features = dev->hw_features;
+	dev->hw_features = NETIF_F_SG |
+		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_TSO | NETIF_F_TSO6;
+	dev->features = dev->hw_features | NETIF_F_RXCSUM;
 	SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
 
 	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 900da4b..919b650 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -109,15 +109,12 @@
 	return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
 }
 
-/*
- * This is the amount of packet we copy rather than map, so that the
- * guest can't fiddle with the contents of the headers while we do
- * packet processing on them (netfilter, routing, etc).
+/* This is a miniumum size for the linear area to avoid lots of
+ * calls to __pskb_pull_tail() as we set up checksum offsets. The
+ * value 128 was chosen as it covers all IPv4 and most likely
+ * IPv6 headers.
  */
-#define PKT_PROT_LEN    (ETH_HLEN + \
-			 VLAN_HLEN + \
-			 sizeof(struct iphdr) + MAX_IPOPTLEN + \
-			 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+#define PKT_PROT_LEN 128
 
 static u16 frag_get_pending_idx(skb_frag_t *frag)
 {
@@ -145,7 +142,7 @@
 	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
 
 	/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-	if (vif->can_sg || vif->gso || vif->gso_prefix)
+	if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
 		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
 
 	return max;
@@ -317,6 +314,7 @@
 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
 
 	meta = npo->meta + npo->meta_prod++;
+	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
 	meta->gso_size = 0;
 	meta->size = 0;
 	meta->id = req->id;
@@ -339,6 +337,7 @@
 	struct gnttab_copy *copy_gop;
 	struct xenvif_rx_meta *meta;
 	unsigned long bytes;
+	int gso_type;
 
 	/* Data must not cross a page boundary. */
 	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -397,7 +396,14 @@
 		}
 
 		/* Leave a gap for the GSO descriptor. */
-		if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+		else
+			gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+		if (*head && ((1 << gso_type) & vif->gso_mask))
 			vif->rx.req_cons++;
 
 		*head = 0; /* There must be something in this buffer now. */
@@ -428,14 +434,28 @@
 	unsigned char *data;
 	int head = 1;
 	int old_meta_prod;
+	int gso_type;
+	int gso_size;
 
 	old_meta_prod = npo->meta_prod;
 
+	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+		gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+		gso_size = skb_shinfo(skb)->gso_size;
+	} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+		gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+		gso_size = skb_shinfo(skb)->gso_size;
+	} else {
+		gso_type = XEN_NETIF_GSO_TYPE_NONE;
+		gso_size = 0;
+	}
+
 	/* Set up a GSO prefix descriptor, if necessary */
-	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
+	if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
 		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
 		meta = npo->meta + npo->meta_prod++;
-		meta->gso_size = skb_shinfo(skb)->gso_size;
+		meta->gso_type = gso_type;
+		meta->gso_size = gso_size;
 		meta->size = 0;
 		meta->id = req->id;
 	}
@@ -443,10 +463,13 @@
 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
 	meta = npo->meta + npo->meta_prod++;
 
-	if (!vif->gso_prefix)
-		meta->gso_size = skb_shinfo(skb)->gso_size;
-	else
+	if ((1 << gso_type) & vif->gso_mask) {
+		meta->gso_type = gso_type;
+		meta->gso_size = gso_size;
+	} else {
+		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
 		meta->gso_size = 0;
+	}
 
 	meta->size = 0;
 	meta->id = req->id;
@@ -592,7 +615,8 @@
 
 		vif = netdev_priv(skb->dev);
 
-		if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+		if ((1 << vif->meta[npo.meta_cons].gso_type) &
+		    vif->gso_prefix_mask) {
 			resp = RING_GET_RESPONSE(&vif->rx,
 						 vif->rx.rsp_prod_pvt++);
 
@@ -629,7 +653,8 @@
 					vif->meta[npo.meta_cons].size,
 					flags);
 
-		if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+		if ((1 << vif->meta[npo.meta_cons].gso_type) &
+		    vif->gso_mask) {
 			struct xen_netif_extra_info *gso =
 				(struct xen_netif_extra_info *)
 				RING_GET_RESPONSE(&vif->rx,
@@ -637,8 +662,8 @@
 
 			resp->flags |= XEN_NETRXF_extra_info;
 
+			gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
 			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
-			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
 			gso->u.gso.pad = 0;
 			gso->u.gso.features = 0;
 
@@ -1101,15 +1126,20 @@
 		return -EINVAL;
 	}
 
-	/* Currently only TCPv4 S.O. is supported. */
-	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+	switch (gso->u.gso.type) {
+	case XEN_NETIF_GSO_TYPE_TCPV4:
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+		break;
+	case XEN_NETIF_GSO_TYPE_TCPV6:
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+		break;
+	default:
 		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
 		xenvif_fatal_tx_err(vif);
 		return -EINVAL;
 	}
 
 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
-	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 
 	/* Header must be checked, and gso_segs computed. */
 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -1118,14 +1148,214 @@
 	return 0;
 }
 
+static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+{
+	if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
+		/* If we need to pullup then pullup to the max, so we
+		 * won't need to do it again.
+		 */
+		int target = min_t(int, skb->len, MAX_TCP_HEADER);
+		__pskb_pull_tail(skb, target - skb_headlen(skb));
+	}
+}
+
+static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
+			     int recalculate_partial_csum)
+{
+	struct iphdr *iph = (void *)skb->data;
+	unsigned int header_size;
+	unsigned int off;
+	int err = -EPROTO;
+
+	off = sizeof(struct iphdr);
+
+	header_size = skb->network_header + off + MAX_IPOPTLEN;
+	maybe_pull_tail(skb, header_size);
+
+	off = iph->ihl * 4;
+
+	switch (iph->protocol) {
+	case IPPROTO_TCP:
+		if (!skb_partial_csum_set(skb, off,
+					  offsetof(struct tcphdr, check)))
+			goto out;
+
+		if (recalculate_partial_csum) {
+			struct tcphdr *tcph = tcp_hdr(skb);
+
+			header_size = skb->network_header +
+				off +
+				sizeof(struct tcphdr);
+			maybe_pull_tail(skb, header_size);
+
+			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+							 skb->len - off,
+							 IPPROTO_TCP, 0);
+		}
+		break;
+	case IPPROTO_UDP:
+		if (!skb_partial_csum_set(skb, off,
+					  offsetof(struct udphdr, check)))
+			goto out;
+
+		if (recalculate_partial_csum) {
+			struct udphdr *udph = udp_hdr(skb);
+
+			header_size = skb->network_header +
+				off +
+				sizeof(struct udphdr);
+			maybe_pull_tail(skb, header_size);
+
+			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+							 skb->len - off,
+							 IPPROTO_UDP, 0);
+		}
+		break;
+	default:
+		if (net_ratelimit())
+			netdev_err(vif->dev,
+				   "Attempting to checksum a non-TCP/UDP packet, "
+				   "dropping a protocol %d packet\n",
+				   iph->protocol);
+		goto out;
+	}
+
+	err = 0;
+
+out:
+	return err;
+}
+
+static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
+			       int recalculate_partial_csum)
+{
+	int err = -EPROTO;
+	struct ipv6hdr *ipv6h = (void *)skb->data;
+	u8 nexthdr;
+	unsigned int header_size;
+	unsigned int off;
+	bool fragment;
+	bool done;
+
+	done = false;
+
+	off = sizeof(struct ipv6hdr);
+
+	header_size = skb->network_header + off;
+	maybe_pull_tail(skb, header_size);
+
+	nexthdr = ipv6h->nexthdr;
+
+	while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
+	       !done) {
+		switch (nexthdr) {
+		case IPPROTO_DSTOPTS:
+		case IPPROTO_HOPOPTS:
+		case IPPROTO_ROUTING: {
+			struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+
+			header_size = skb->network_header +
+				off +
+				sizeof(struct ipv6_opt_hdr);
+			maybe_pull_tail(skb, header_size);
+
+			nexthdr = hp->nexthdr;
+			off += ipv6_optlen(hp);
+			break;
+		}
+		case IPPROTO_AH: {
+			struct ip_auth_hdr *hp = (void *)(skb->data + off);
+
+			header_size = skb->network_header +
+				off +
+				sizeof(struct ip_auth_hdr);
+			maybe_pull_tail(skb, header_size);
+
+			nexthdr = hp->nexthdr;
+			off += (hp->hdrlen+2)<<2;
+			break;
+		}
+		case IPPROTO_FRAGMENT:
+			fragment = true;
+			/* fall through */
+		default:
+			done = true;
+			break;
+		}
+	}
+
+	if (!done) {
+		if (net_ratelimit())
+			netdev_err(vif->dev, "Failed to parse packet header\n");
+		goto out;
+	}
+
+	if (fragment) {
+		if (net_ratelimit())
+			netdev_err(vif->dev, "Packet is a fragment!\n");
+		goto out;
+	}
+
+	switch (nexthdr) {
+	case IPPROTO_TCP:
+		if (!skb_partial_csum_set(skb, off,
+					  offsetof(struct tcphdr, check)))
+			goto out;
+
+		if (recalculate_partial_csum) {
+			struct tcphdr *tcph = tcp_hdr(skb);
+
+			header_size = skb->network_header +
+				off +
+				sizeof(struct tcphdr);
+			maybe_pull_tail(skb, header_size);
+
+			tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+						       &ipv6h->daddr,
+						       skb->len - off,
+						       IPPROTO_TCP, 0);
+		}
+		break;
+	case IPPROTO_UDP:
+		if (!skb_partial_csum_set(skb, off,
+					  offsetof(struct udphdr, check)))
+			goto out;
+
+		if (recalculate_partial_csum) {
+			struct udphdr *udph = udp_hdr(skb);
+
+			header_size = skb->network_header +
+				off +
+				sizeof(struct udphdr);
+			maybe_pull_tail(skb, header_size);
+
+			udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+						       &ipv6h->daddr,
+						       skb->len - off,
+						       IPPROTO_UDP, 0);
+		}
+		break;
+	default:
+		if (net_ratelimit())
+			netdev_err(vif->dev,
+				   "Attempting to checksum a non-TCP/UDP packet, "
+				   "dropping a protocol %d packet\n",
+				   nexthdr);
+		goto out;
+	}
+
+	err = 0;
+
+out:
+	return err;
+}
+
 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
 {
-	struct iphdr *iph;
 	int err = -EPROTO;
 	int recalculate_partial_csum = 0;
 
-	/*
-	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 	 * recalculate the partial checksum.
@@ -1140,46 +1370,11 @@
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 		return 0;
 
-	if (skb->protocol != htons(ETH_P_IP))
-		goto out;
+	if (skb->protocol == htons(ETH_P_IP))
+		err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
+	else if (skb->protocol == htons(ETH_P_IPV6))
+		err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
 
-	iph = (void *)skb->data;
-	switch (iph->protocol) {
-	case IPPROTO_TCP:
-		if (!skb_partial_csum_set(skb, 4 * iph->ihl,
-					  offsetof(struct tcphdr, check)))
-			goto out;
-
-		if (recalculate_partial_csum) {
-			struct tcphdr *tcph = tcp_hdr(skb);
-			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-							 skb->len - iph->ihl*4,
-							 IPPROTO_TCP, 0);
-		}
-		break;
-	case IPPROTO_UDP:
-		if (!skb_partial_csum_set(skb, 4 * iph->ihl,
-					  offsetof(struct udphdr, check)))
-			goto out;
-
-		if (recalculate_partial_csum) {
-			struct udphdr *udph = udp_hdr(skb);
-			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-							 skb->len - iph->ihl*4,
-							 IPPROTO_UDP, 0);
-		}
-		break;
-	default:
-		if (net_ratelimit())
-			netdev_err(vif->dev,
-				   "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
-				   iph->protocol);
-		goto out;
-	}
-
-	err = 0;
-
-out:
 	return err;
 }
 
@@ -1428,12 +1623,7 @@
 
 		xenvif_fill_frags(vif, skb);
 
-		/*
-		 * If the initial fragment was < PKT_PROT_LEN then
-		 * pull through some bytes from the other fragments to
-		 * increase the linear region to PKT_PROT_LEN bytes.
-		 */
-		if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
+		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
 			int target = min_t(int, skb->len, PKT_PROT_LEN);
 			__pskb_pull_tail(skb, target - skb_headlen(skb));
 		}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1b08d87..f035899 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -105,6 +105,22 @@
 			goto abort_transaction;
 		}
 
+		err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+				    "%d", sg);
+		if (err) {
+			message = "writing feature-gso-tcpv6";
+			goto abort_transaction;
+		}
+
+		/* We support partial checksum setup for IPv6 packets */
+		err = xenbus_printf(xbt, dev->nodename,
+				    "feature-ipv6-csum-offload",
+				    "%d", 1);
+		if (err) {
+			message = "writing feature-ipv6-csum-offload";
+			goto abort_transaction;
+		}
+
 		/* We support rx-copy path. */
 		err = xenbus_printf(xbt, dev->nodename,
 				    "feature-rx-copy", "%d", 1);
@@ -561,20 +577,50 @@
 		val = 0;
 	vif->can_sg = !!val;
 
+	vif->gso_mask = 0;
+	vif->gso_prefix_mask = 0;
+
 	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
 			 "%d", &val) < 0)
 		val = 0;
-	vif->gso = !!val;
+	if (val)
+		vif->gso_mask |= GSO_BIT(TCPV4);
 
 	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
 			 "%d", &val) < 0)
 		val = 0;
-	vif->gso_prefix = !!val;
+	if (val)
+		vif->gso_prefix_mask |= GSO_BIT(TCPV4);
+
+	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
+			 "%d", &val) < 0)
+		val = 0;
+	if (val)
+		vif->gso_mask |= GSO_BIT(TCPV6);
+
+	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
+			 "%d", &val) < 0)
+		val = 0;
+	if (val)
+		vif->gso_prefix_mask |= GSO_BIT(TCPV6);
+
+	if (vif->gso_mask & vif->gso_prefix_mask) {
+		xenbus_dev_fatal(dev, err,
+				 "%s: gso and gso prefix flags are not "
+				 "mutually exclusive",
+				 dev->otherend);
+		return -EOPNOTSUPP;
+	}
 
 	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
 			 "%d", &val) < 0)
 		val = 0;
-	vif->csum = !val;
+	vif->ip_csum = !val;
+
+	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
+			 "%d", &val) < 0)
+		val = 0;
+	vif->ipv6_csum = !!val;
 
 	/* Map the shared frame, irq etc. */
 	err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 36808bf..d85e669 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -952,7 +952,7 @@
 		u64_stats_update_end(&stats->syncp);
 
 		/* Pass it up. */
-		netif_receive_skb(skb);
+		napi_gro_receive(&np->napi, skb);
 	}
 
 	return packets_dropped;
@@ -1051,6 +1051,8 @@
 	if (work_done < budget) {
 		int more_to_do = 0;
 
+		napi_gro_flush(napi, false);
+
 		local_irq_save(flags);
 
 		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
@@ -1338,6 +1340,12 @@
 	if (np->stats == NULL)
 		goto exit;
 
+	for_each_possible_cpu(i) {
+		struct netfront_stats *xen_nf_stats;
+		xen_nf_stats = per_cpu_ptr(np->stats, i);
+		u64_stats_init(&xen_nf_stats->syncp);
+	}
+
 	/* Initialise tx_skbs as a free chain containing every entry. */
 	np->tx_skb_freelist = 0;
 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index b0b64cc..c1fb206 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -46,6 +46,16 @@
 
 	  If unsure, say N.
 
+config NFC_PORT100
+	tristate "Sony NFC Port-100 Series USB device support"
+	depends on USB
+	depends on NFC_DIGITAL
+	help
+	  This adds support for Sony Port-100 chip based USB devices such as the
+	  RC-S380 dongle.
+
+	  If unsure, say N.
+
 source "drivers/nfc/pn544/Kconfig"
 source "drivers/nfc/microread/Kconfig"
 
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index be7636a..c715fe8 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -8,5 +8,6 @@
 obj-$(CONFIG_NFC_WILINK)	+= nfcwilink.o
 obj-$(CONFIG_NFC_MEI_PHY)	+= mei_phy.o
 obj-$(CONFIG_NFC_SIM)		+= nfcsim.o
+obj-$(CONFIG_NFC_PORT100)	+= port100.o
 
 ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 606bf55..85f9009 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/nfc.h>
@@ -60,13 +62,13 @@
 
 	r = mei_cl_enable_device(phy->device);
 	if (r < 0) {
-		pr_err("MEI_PHY: Could not enable device\n");
+		pr_err("Could not enable device\n");
 		return r;
 	}
 
 	r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
 	if (r) {
-		pr_err("MEY_PHY: Event cb registration failed\n");
+		pr_err("Event cb registration failed\n");
 		mei_cl_disable_device(phy->device);
 		phy->powered = 0;
 
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 1010894..696e346 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
@@ -95,12 +97,8 @@
 		crc = crc ^ skb->data[i];
 
 	if (crc != skb->data[skb->len-1]) {
-		pr_err(MICROREAD_I2C_DRIVER_NAME
-		       ": CRC error 0x%x != 0x%x\n",
-		       crc, skb->data[skb->len-1]);
-
-		pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
-
+		pr_err("CRC error 0x%x != 0x%x\n", crc, skb->data[skb->len-1]);
+		pr_info("%s: BAD CRC\n", __func__);
 		return -EPERM;
 	}
 
@@ -160,18 +158,15 @@
 	u8 tmp[MICROREAD_I2C_LLC_MAX_SIZE - 1];
 	struct i2c_client *client = phy->i2c_dev;
 
-	pr_debug("%s\n", __func__);
-
 	r = i2c_master_recv(client, &len, 1);
 	if (r != 1) {
-		dev_err(&client->dev, "cannot read len byte\n");
+		nfc_err(&client->dev, "cannot read len byte\n");
 		return -EREMOTEIO;
 	}
 
 	if ((len < MICROREAD_I2C_LLC_MIN_SIZE) ||
 	    (len > MICROREAD_I2C_LLC_MAX_SIZE)) {
-		dev_err(&client->dev, "invalid len byte\n");
-		pr_err("invalid len byte\n");
+		nfc_err(&client->dev, "invalid len byte\n");
 		r = -EBADMSG;
 		goto flush;
 	}
@@ -228,7 +223,6 @@
 	}
 
 	client = phy->i2c_dev;
-	dev_dbg(&client->dev, "IRQ\n");
 
 	if (phy->hard_fault != 0)
 		return IRQ_HANDLED;
@@ -263,20 +257,18 @@
 		dev_get_platdata(&client->dev);
 	int r;
 
-	dev_dbg(&client->dev, "client %p", client);
+	dev_dbg(&client->dev, "client %p\n", client);
 
 	if (!pdata) {
-		dev_err(&client->dev, "client %p: missing platform data",
+		nfc_err(&client->dev, "client %p: missing platform data\n",
 			client);
 		return -EINVAL;
 	}
 
 	phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
 			   GFP_KERNEL);
-	if (!phy) {
-		dev_err(&client->dev, "Can't allocate microread phy");
+	if (!phy)
 		return -ENOMEM;
-	}
 
 	i2c_set_clientdata(client, phy);
 	phy->i2c_dev = client;
@@ -285,7 +277,7 @@
 				 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
 				 MICROREAD_I2C_DRIVER_NAME, phy);
 	if (r) {
-		dev_err(&client->dev, "Unable to register IRQ handler");
+		nfc_err(&client->dev, "Unable to register IRQ handler\n");
 		return r;
 	}
 
@@ -296,7 +288,7 @@
 	if (r < 0)
 		goto err_irq;
 
-	dev_info(&client->dev, "Probed");
+	nfc_info(&client->dev, "Probed");
 
 	return 0;
 
@@ -310,8 +302,6 @@
 {
 	struct microread_i2c_phy *phy = i2c_get_clientdata(client);
 
-	dev_dbg(&client->dev, "%s\n", __func__);
-
 	microread_remove(phy->hdev);
 
 	free_irq(client->irq, phy);
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index cdf1bc5..72fafec 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
 #include <linux/nfc.h>
@@ -59,8 +61,6 @@
 {
 	struct nfc_mei_phy *phy = mei_cl_get_drvdata(device);
 
-	pr_info("Removing microread\n");
-
 	microread_remove(phy->hdev);
 
 	nfc_mei_phy_free(phy);
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index cdb9f6d..970ded6 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -546,7 +548,7 @@
 	kfree_skb(skb);
 
 	if (r)
-		pr_err("Failed to handle discovered target err=%d", r);
+		pr_err("Failed to handle discovered target err=%d\n", r);
 }
 
 static int microread_event_received(struct nfc_hci_dev *hdev, u8 gate,
@@ -656,7 +658,6 @@
 
 	info = kzalloc(sizeof(struct microread_info), GFP_KERNEL);
 	if (!info) {
-		pr_err("Cannot allocate memory for microread_info.\n");
 		r = -ENOMEM;
 		goto err_info_alloc;
 	}
@@ -686,7 +687,7 @@
 					     MICROREAD_CMD_TAILROOM,
 					     phy_payload);
 	if (!info->hdev) {
-		pr_err("Cannot allocate nfc hdev.\n");
+		pr_err("Cannot allocate nfc hdev\n");
 		r = -ENOMEM;
 		goto err_alloc_hdev;
 	}
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index 9a53f13..93111fa 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -19,10 +19,10 @@
 #include <linux/nfc.h>
 #include <net/nfc/nfc.h>
 
-#define DEV_ERR(_dev, fmt, args...) nfc_dev_err(&_dev->nfc_dev->dev, \
+#define DEV_ERR(_dev, fmt, args...) nfc_err(&_dev->nfc_dev->dev, \
 						"%s: " fmt, __func__, ## args)
 
-#define DEV_DBG(_dev, fmt, args...) nfc_dev_dbg(&_dev->nfc_dev->dev, \
+#define DEV_DBG(_dev, fmt, args...) dev_dbg(&_dev->nfc_dev->dev, \
 						"%s: " fmt, __func__, ## args)
 
 #define NFCSIM_VERSION "0.1"
@@ -64,7 +64,7 @@
 
 static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
 {
-	DEV_DBG(dev, "shutdown=%d", shutdown);
+	DEV_DBG(dev, "shutdown=%d\n", shutdown);
 
 	mutex_lock(&dev->lock);
 
@@ -84,7 +84,7 @@
 {
 	struct nfc_target nfc_tgt;
 
-	DEV_DBG(dev, "");
+	DEV_DBG(dev, "\n");
 
 	memset(&nfc_tgt, 0, sizeof(struct nfc_target));
 
@@ -98,7 +98,7 @@
 {
 	struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-	DEV_DBG(dev, "");
+	DEV_DBG(dev, "\n");
 
 	mutex_lock(&dev->lock);
 
@@ -113,7 +113,7 @@
 {
 	struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-	DEV_DBG(dev, "");
+	DEV_DBG(dev, "\n");
 
 	mutex_lock(&dev->lock);
 
@@ -143,7 +143,7 @@
 
 	remote_gb = nfc_get_local_general_bytes(peer->nfc_dev, &remote_gb_len);
 	if (!remote_gb) {
-		DEV_ERR(peer, "Can't get remote general bytes");
+		DEV_ERR(peer, "Can't get remote general bytes\n");
 
 		mutex_unlock(&peer->lock);
 		return -EINVAL;
@@ -155,7 +155,7 @@
 
 	rc = nfc_set_remote_general_bytes(nfc_dev, remote_gb, remote_gb_len);
 	if (rc) {
-		DEV_ERR(dev, "Can't set remote general bytes");
+		DEV_ERR(dev, "Can't set remote general bytes\n");
 		mutex_unlock(&dev->lock);
 		return rc;
 	}
@@ -172,7 +172,7 @@
 {
 	struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-	DEV_DBG(dev, "");
+	DEV_DBG(dev, "\n");
 
 	nfcsim_cleanup_dev(dev, 0);
 
@@ -188,7 +188,7 @@
 	mutex_lock(&dev->lock);
 
 	if (dev->polling_mode != NFCSIM_POLL_NONE) {
-		DEV_ERR(dev, "Already in polling mode");
+		DEV_ERR(dev, "Already in polling mode\n");
 		rc = -EBUSY;
 		goto exit;
 	}
@@ -200,7 +200,7 @@
 		dev->polling_mode |= NFCSIM_POLL_TARGET;
 
 	if (dev->polling_mode == NFCSIM_POLL_NONE) {
-		DEV_ERR(dev, "Unsupported polling mode");
+		DEV_ERR(dev, "Unsupported polling mode\n");
 		rc = -EINVAL;
 		goto exit;
 	}
@@ -210,7 +210,7 @@
 
 	queue_delayed_work(wq, &dev->poll_work, 0);
 
-	DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X", im_protocols,
+	DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X\n", im_protocols,
 		tm_protocols);
 
 	rc = 0;
@@ -224,7 +224,7 @@
 {
 	struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-	DEV_DBG(dev, "Stop poll");
+	DEV_DBG(dev, "Stop poll\n");
 
 	mutex_lock(&dev->lock);
 
@@ -240,7 +240,7 @@
 {
 	struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-	DEV_DBG(dev, "");
+	DEV_DBG(dev, "\n");
 
 	return -ENOTSUPP;
 }
@@ -250,7 +250,7 @@
 {
 	struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-	DEV_DBG(dev, "");
+	DEV_DBG(dev, "\n");
 }
 
 static void nfcsim_wq_recv(struct work_struct *work)
@@ -267,7 +267,7 @@
 
 	if (dev->initiator) {
 		if (!dev->cb) {
-			DEV_ERR(dev, "Null recv callback");
+			DEV_ERR(dev, "Null recv callback\n");
 			dev_kfree_skb(dev->clone_skb);
 			goto exit;
 		}
@@ -310,7 +310,7 @@
 	peer->clone_skb = skb_clone(skb, GFP_KERNEL);
 
 	if (!peer->clone_skb) {
-		DEV_ERR(dev, "skb_clone failed");
+		DEV_ERR(dev, "skb_clone failed\n");
 		mutex_unlock(&peer->lock);
 		err = -ENOMEM;
 		goto exit;
@@ -397,13 +397,13 @@
 	nfcsim_set_polling_mode(dev);
 
 	if (dev->curr_polling_mode == NFCSIM_POLL_NONE) {
-		DEV_DBG(dev, "Not polling");
+		DEV_DBG(dev, "Not polling\n");
 		goto unlock;
 	}
 
 	DEV_DBG(dev, "Polling as %s",
 		dev->curr_polling_mode == NFCSIM_POLL_INITIATOR ?
-		"initiator" : "target");
+		"initiator\n" : "target\n");
 
 	if (dev->curr_polling_mode == NFCSIM_POLL_TARGET)
 		goto sched_work;
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 59f95d8..7130864 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -146,13 +146,11 @@
 	unsigned long comp_ret;
 	int rc;
 
-	nfc_dev_dbg(&drv->pdev->dev, "get_bts_file_name entry");
-
 	skb = nfcwilink_skb_alloc(sizeof(struct nci_vs_nfcc_info_cmd),
 					GFP_KERNEL);
 	if (!skb) {
-		nfc_dev_err(&drv->pdev->dev,
-				"no memory for nci_vs_nfcc_info_cmd");
+		nfc_err(&drv->pdev->dev,
+			"no memory for nci_vs_nfcc_info_cmd\n");
 		return -ENOMEM;
 	}
 
@@ -170,21 +168,19 @@
 
 	comp_ret = wait_for_completion_timeout(&drv->completed,
 				msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
-	nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
-			comp_ret);
+	dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n",
+		comp_ret);
 	if (comp_ret == 0) {
-		nfc_dev_err(&drv->pdev->dev,
-				"timeout on wait_for_completion_timeout");
+		nfc_err(&drv->pdev->dev,
+			"timeout on wait_for_completion_timeout\n");
 		return -ETIMEDOUT;
 	}
 
-	nfc_dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d",
-			drv->nfcc_info.plen,
-			drv->nfcc_info.status);
+	dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d\n",
+		drv->nfcc_info.plen, drv->nfcc_info.status);
 
 	if ((drv->nfcc_info.plen != 5) || (drv->nfcc_info.status != 0)) {
-		nfc_dev_err(&drv->pdev->dev,
-				"invalid nci_vs_nfcc_info_rsp");
+		nfc_err(&drv->pdev->dev, "invalid nci_vs_nfcc_info_rsp\n");
 		return -EINVAL;
 	}
 
@@ -195,7 +191,7 @@
 			drv->nfcc_info.sw_ver_z,
 			drv->nfcc_info.patch_id);
 
-	nfc_dev_info(&drv->pdev->dev, "nfcwilink FW file name: %s", file_name);
+	nfc_info(&drv->pdev->dev, "nfcwilink FW file name: %s\n", file_name);
 
 	return 0;
 }
@@ -207,15 +203,13 @@
 	unsigned long comp_ret;
 	int rc;
 
-	nfc_dev_dbg(&drv->pdev->dev, "send_bts_cmd entry");
-
 	/* verify valid cmd for the NFC channel */
 	if ((len <= sizeof(struct nfcwilink_hdr)) ||
 		(len > BTS_FILE_CMD_MAX_LEN) ||
 		(hdr->chnl != NFCWILINK_CHNL) ||
 		(hdr->opcode != NFCWILINK_OPCODE)) {
-		nfc_dev_err(&drv->pdev->dev,
-			"ignoring invalid bts cmd, len %d, chnl %d, opcode %d",
+		nfc_err(&drv->pdev->dev,
+			"ignoring invalid bts cmd, len %d, chnl %d, opcode %d\n",
 			len, hdr->chnl, hdr->opcode);
 		return 0;
 	}
@@ -226,7 +220,7 @@
 
 	skb = nfcwilink_skb_alloc(len, GFP_KERNEL);
 	if (!skb) {
-		nfc_dev_err(&drv->pdev->dev, "no memory for bts cmd");
+		nfc_err(&drv->pdev->dev, "no memory for bts cmd\n");
 		return -ENOMEM;
 	}
 
@@ -238,11 +232,11 @@
 
 	comp_ret = wait_for_completion_timeout(&drv->completed,
 				msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
-	nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
-			comp_ret);
+	dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n",
+		comp_ret);
 	if (comp_ret == 0) {
-		nfc_dev_err(&drv->pdev->dev,
-				"timeout on wait_for_completion_timeout");
+		nfc_err(&drv->pdev->dev,
+			"timeout on wait_for_completion_timeout\n");
 		return -ETIMEDOUT;
 	}
 
@@ -257,8 +251,6 @@
 	__u8 *ptr;
 	int len, rc;
 
-	nfc_dev_dbg(&drv->pdev->dev, "download_fw entry");
-
 	set_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags);
 
 	rc = nfcwilink_get_bts_file_name(drv, file_name);
@@ -267,7 +259,7 @@
 
 	rc = request_firmware(&fw, file_name, &drv->pdev->dev);
 	if (rc) {
-		nfc_dev_err(&drv->pdev->dev, "request_firmware failed %d", rc);
+		nfc_err(&drv->pdev->dev, "request_firmware failed %d\n", rc);
 
 		/* if the file is not found, don't exit with failure */
 		if (rc == -ENOENT)
@@ -280,14 +272,14 @@
 	ptr = (__u8 *)fw->data;
 
 	if ((len == 0) || (ptr == NULL)) {
-		nfc_dev_dbg(&drv->pdev->dev,
-				"request_firmware returned size %d", len);
+		dev_dbg(&drv->pdev->dev,
+			"request_firmware returned size %d\n", len);
 		goto release_fw;
 	}
 
 	if (__le32_to_cpu(((struct bts_file_hdr *)ptr)->magic) !=
 			BTS_FILE_HDR_MAGIC) {
-		nfc_dev_err(&drv->pdev->dev, "wrong bts magic number");
+		nfc_err(&drv->pdev->dev, "wrong bts magic number\n");
 		rc = -EINVAL;
 		goto release_fw;
 	}
@@ -302,8 +294,8 @@
 		action_len =
 			__le16_to_cpu(((struct bts_file_action *)ptr)->len);
 
-		nfc_dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d",
-				action_type, action_len);
+		dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d\n",
+			action_type, action_len);
 
 		switch (action_type) {
 		case BTS_FILE_ACTION_TYPE_SEND_CMD:
@@ -333,8 +325,6 @@
 {
 	struct nfcwilink *drv = priv_data;
 
-	nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
-
 	/* store ST registration status */
 	drv->st_register_cb_status = data;
 
@@ -356,7 +346,7 @@
 		return -EFAULT;
 	}
 
-	nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+	dev_dbg(&drv->pdev->dev, "receive entry, len %d\n", skb->len);
 
 	/* strip the ST header
 	(apart for the chnl byte, which is not received in the hdr) */
@@ -370,7 +360,7 @@
 	/* Forward skb to NCI core layer */
 	rc = nci_recv_frame(drv->ndev, skb);
 	if (rc < 0) {
-		nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
+		nfc_err(&drv->pdev->dev, "nci_recv_frame failed %d\n", rc);
 		return rc;
 	}
 
@@ -396,8 +386,6 @@
 	unsigned long comp_ret;
 	int rc;
 
-	nfc_dev_dbg(&drv->pdev->dev, "open entry");
-
 	if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
 		rc = -EBUSY;
 		goto exit;
@@ -415,9 +403,9 @@
 			&drv->completed,
 			msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
 
-			nfc_dev_dbg(&drv->pdev->dev,
-			"wait_for_completion_timeout returned %ld",
-			comp_ret);
+			dev_dbg(&drv->pdev->dev,
+				"wait_for_completion_timeout returned %ld\n",
+				comp_ret);
 
 			if (comp_ret == 0) {
 				/* timeout */
@@ -425,13 +413,12 @@
 				goto clear_exit;
 			} else if (drv->st_register_cb_status != 0) {
 				rc = drv->st_register_cb_status;
-				nfc_dev_err(&drv->pdev->dev,
-				"st_register_cb failed %d", rc);
+				nfc_err(&drv->pdev->dev,
+					"st_register_cb failed %d\n", rc);
 				goto clear_exit;
 			}
 		} else {
-			nfc_dev_err(&drv->pdev->dev,
-				"st_register failed %d", rc);
+			nfc_err(&drv->pdev->dev, "st_register failed %d\n", rc);
 			goto clear_exit;
 		}
 	}
@@ -441,8 +428,8 @@
 	drv->st_write = nfcwilink_proto.write;
 
 	if (nfcwilink_download_fw(drv)) {
-		nfc_dev_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d",
-				rc);
+		nfc_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d\n",
+			rc);
 		/* open should succeed, even if the FW download failed */
 	}
 
@@ -460,14 +447,12 @@
 	struct nfcwilink *drv = nci_get_drvdata(ndev);
 	int rc;
 
-	nfc_dev_dbg(&drv->pdev->dev, "close entry");
-
 	if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
 		return 0;
 
 	rc = st_unregister(&nfcwilink_proto);
 	if (rc)
-		nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
+		nfc_err(&drv->pdev->dev, "st_unregister failed %d\n", rc);
 
 	drv->st_write = NULL;
 
@@ -480,7 +465,7 @@
 	struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
 	long len;
 
-	nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
+	dev_dbg(&drv->pdev->dev, "send entry, len %d\n", skb->len);
 
 	if (!test_bit(NFCWILINK_RUNNING, &drv->flags)) {
 		kfree_skb(skb);
@@ -498,7 +483,7 @@
 	len = drv->st_write(skb);
 	if (len < 0) {
 		kfree_skb(skb);
-		nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
+		nfc_err(&drv->pdev->dev, "st_write failed %ld\n", len);
 		return -EFAULT;
 	}
 
@@ -517,8 +502,6 @@
 	int rc;
 	__u32 protocols;
 
-	nfc_dev_dbg(&pdev->dev, "probe entry");
-
 	drv = devm_kzalloc(&pdev->dev, sizeof(struct nfcwilink), GFP_KERNEL);
 	if (!drv) {
 		rc = -ENOMEM;
@@ -538,7 +521,7 @@
 					NFCWILINK_HDR_LEN,
 					0);
 	if (!drv->ndev) {
-		nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
+		nfc_err(&pdev->dev, "nci_allocate_device failed\n");
 		rc = -ENOMEM;
 		goto exit;
 	}
@@ -548,7 +531,7 @@
 
 	rc = nci_register_device(drv->ndev);
 	if (rc < 0) {
-		nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
+		nfc_err(&pdev->dev, "nci_register_device failed %d\n", rc);
 		goto free_dev_exit;
 	}
 
@@ -568,8 +551,6 @@
 	struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
 	struct nci_dev *ndev;
 
-	nfc_dev_dbg(&pdev->dev, "remove entry");
-
 	if (!drv)
 		return -EFAULT;
 
@@ -578,8 +559,6 @@
 	nci_unregister_device(ndev);
 	nci_free_device(ndev);
 
-	dev_set_drvdata(&pdev->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 5df730b..2daf04c 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -150,6 +150,7 @@
 #define PN533_CMD_TG_INIT_AS_TARGET 0x8c
 #define PN533_CMD_TG_GET_DATA 0x86
 #define PN533_CMD_TG_SET_DATA 0x8e
+#define PN533_CMD_TG_SET_META_DATA 0x94
 #define PN533_CMD_UNDEF 0xff
 
 #define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -373,6 +374,8 @@
 	struct delayed_work poll_work;
 	struct work_struct mi_rx_work;
 	struct work_struct mi_tx_work;
+	struct work_struct mi_tm_rx_work;
+	struct work_struct mi_tm_tx_work;
 	struct work_struct tg_work;
 	struct work_struct rf_work;
 
@@ -387,6 +390,7 @@
 	struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
 	u8 poll_mod_count;
 	u8 poll_mod_curr;
+	u8 poll_dep;
 	u32 poll_protocols;
 	u32 listen_protocols;
 	struct timer_list listen_timer;
@@ -722,32 +726,32 @@
 		break; /* success */
 	case -ECONNRESET:
 	case -ENOENT:
-		nfc_dev_dbg(&dev->interface->dev,
-			    "The urb has been canceled (status %d)",
-			    urb->status);
+		dev_dbg(&dev->interface->dev,
+			"The urb has been canceled (status %d)\n",
+			urb->status);
 		goto sched_wq;
 	case -ESHUTDOWN:
 	default:
-		nfc_dev_err(&dev->interface->dev,
-			    "Urb failure (status %d)", urb->status);
+		nfc_err(&dev->interface->dev,
+			"Urb failure (status %d)\n", urb->status);
 		goto sched_wq;
 	}
 
 	in_frame = dev->in_urb->transfer_buffer;
 
-	nfc_dev_dbg(&dev->interface->dev, "Received a frame.");
+	dev_dbg(&dev->interface->dev, "Received a frame\n");
 	print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
 			     dev->ops->rx_frame_size(in_frame), false);
 
 	if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
-		nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
+		nfc_err(&dev->interface->dev, "Received an invalid frame\n");
 		cmd->status = -EIO;
 		goto sched_wq;
 	}
 
 	if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) {
-		nfc_dev_err(&dev->interface->dev,
-			    "It it not the response to the last command");
+		nfc_err(&dev->interface->dev,
+			"It it not the response to the last command\n");
 		cmd->status = -EIO;
 		goto sched_wq;
 	}
@@ -777,29 +781,29 @@
 		break; /* success */
 	case -ECONNRESET:
 	case -ENOENT:
-		nfc_dev_dbg(&dev->interface->dev,
-			    "The urb has been stopped (status %d)",
-			    urb->status);
+		dev_dbg(&dev->interface->dev,
+			"The urb has been stopped (status %d)\n",
+			urb->status);
 		goto sched_wq;
 	case -ESHUTDOWN:
 	default:
-		nfc_dev_err(&dev->interface->dev,
-			    "Urb failure (status %d)", urb->status);
+		nfc_err(&dev->interface->dev,
+			"Urb failure (status %d)\n", urb->status);
 		goto sched_wq;
 	}
 
 	in_frame = dev->in_urb->transfer_buffer;
 
 	if (!pn533_std_rx_frame_is_ack(in_frame)) {
-		nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
+		nfc_err(&dev->interface->dev, "Received an invalid ack\n");
 		cmd->status = -EIO;
 		goto sched_wq;
 	}
 
 	rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
 	if (rc) {
-		nfc_dev_err(&dev->interface->dev,
-			    "usb_submit_urb failed with result %d", rc);
+		nfc_err(&dev->interface->dev,
+			"usb_submit_urb failed with result %d\n", rc);
 		cmd->status = rc;
 		goto sched_wq;
 	}
@@ -823,8 +827,6 @@
 	/* spec 7.1.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
 	dev->out_urb->transfer_buffer = ack;
 	dev->out_urb->transfer_buffer_length = sizeof(ack);
 	rc = usb_submit_urb(dev->out_urb, flags);
@@ -927,7 +929,7 @@
 	struct pn533_cmd *cmd;
 	int rc = 0;
 
-	nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x", cmd_code);
+	dev_dbg(&dev->interface->dev, "Sending command 0x%x\n", cmd_code);
 
 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 	if (!cmd)
@@ -954,8 +956,8 @@
 		goto unlock;
 	}
 
-	nfc_dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x", __func__,
-		    cmd_code);
+	dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x\n",
+		__func__, cmd_code);
 
 	INIT_LIST_HEAD(&cmd->queue);
 	list_add_tail(&cmd->queue, &dev->cmd_queue);
@@ -1168,14 +1170,14 @@
 		break; /* success */
 	case -ECONNRESET:
 	case -ENOENT:
-		nfc_dev_dbg(&dev->interface->dev,
-			    "The urb has been stopped (status %d)",
-			    urb->status);
+		dev_dbg(&dev->interface->dev,
+			"The urb has been stopped (status %d)\n",
+			urb->status);
 		break;
 	case -ESHUTDOWN:
 	default:
-		nfc_dev_err(&dev->interface->dev,
-			    "Urb failure (status %d)", urb->status);
+		nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
+			urb->status);
 	}
 }
 
@@ -1452,8 +1454,8 @@
 	struct nfc_target nfc_tgt;
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s - modulation=%d", __func__,
-		    dev->poll_mod_curr);
+	dev_dbg(&dev->interface->dev, "%s: modulation=%d\n",
+		__func__, dev->poll_mod_curr);
 
 	if (tg != 1)
 		return -EPROTO;
@@ -1475,8 +1477,8 @@
 		rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len);
 		break;
 	default:
-		nfc_dev_err(&dev->interface->dev,
-			    "Unknown current poll modulation");
+		nfc_err(&dev->interface->dev,
+			"Unknown current poll modulation\n");
 		return -EPROTO;
 	}
 
@@ -1484,14 +1486,14 @@
 		return rc;
 
 	if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
-		nfc_dev_dbg(&dev->interface->dev,
-			    "The Tg found doesn't have the desired protocol");
+		dev_dbg(&dev->interface->dev,
+			"The Tg found doesn't have the desired protocol\n");
 		return -EAGAIN;
 	}
 
-	nfc_dev_dbg(&dev->interface->dev,
-		    "Target found - supported protocols: 0x%x",
-		    nfc_tgt.supported_protocols);
+	dev_dbg(&dev->interface->dev,
+		"Target found - supported protocols: 0x%x\n",
+		nfc_tgt.supported_protocols);
 
 	dev->tgt_available_prots = nfc_tgt.supported_protocols;
 
@@ -1548,7 +1550,8 @@
 	u8 nbtg, tg, *tgdata;
 	int rc, tgdata_len;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	/* Toggle the DEP polling */
+	dev->poll_dep = 1;
 
 	nbtg = resp->data[0];
 	tg = resp->data[1];
@@ -1624,37 +1627,130 @@
 
 #define PN533_CMD_DATAEXCH_HEAD_LEN 1
 #define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+static void pn533_wq_tm_mi_recv(struct work_struct *work);
+static struct sk_buff *pn533_build_response(struct pn533 *dev);
+
 static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
 				      struct sk_buff *resp)
 {
-	u8 status;
+	struct sk_buff *skb;
+	u8 status, ret, mi;
+	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
-	if (IS_ERR(resp))
+	if (IS_ERR(resp)) {
+		skb_queue_purge(&dev->resp_q);
 		return PTR_ERR(resp);
-
-	status = resp->data[0];
-	skb_pull(resp, sizeof(status));
-
-	if (status != 0) {
-		nfc_tm_deactivated(dev->nfc_dev);
-		dev->tgt_mode = 0;
-		dev_kfree_skb(resp);
-		return 0;
 	}
 
-	return nfc_tm_data_received(dev->nfc_dev, resp);
+	status = resp->data[0];
+
+	ret = status & PN533_CMD_RET_MASK;
+	mi = status & PN533_CMD_MI_MASK;
+
+	skb_pull(resp, sizeof(status));
+
+	if (ret != PN533_CMD_RET_SUCCESS) {
+		rc = -EIO;
+		goto error;
+	}
+
+	skb_queue_tail(&dev->resp_q, resp);
+
+	if (mi) {
+		queue_work(dev->wq, &dev->mi_tm_rx_work);
+		return -EINPROGRESS;
+	}
+
+	skb = pn533_build_response(dev);
+	if (!skb) {
+		rc = -EIO;
+		goto error;
+	}
+
+	return nfc_tm_data_received(dev->nfc_dev, skb);
+
+error:
+	nfc_tm_deactivated(dev->nfc_dev);
+	dev->tgt_mode = 0;
+	skb_queue_purge(&dev->resp_q);
+	dev_kfree_skb(resp);
+
+	return rc;
+}
+
+static void pn533_wq_tm_mi_recv(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+	skb = pn533_alloc_skb(dev, 0);
+	if (!skb)
+		return;
+
+	rc = pn533_send_cmd_direct_async(dev,
+					PN533_CMD_TG_GET_DATA,
+					skb,
+					pn533_tm_get_data_complete,
+					NULL);
+
+	if (rc < 0)
+		dev_kfree_skb(skb);
+
+	return;
+}
+
+static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
+				  struct sk_buff *resp);
+static void pn533_wq_tm_mi_send(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+	/* Grab the first skb in the queue */
+	skb = skb_dequeue(&dev->fragment_skb);
+	if (skb == NULL) {	/* No more data */
+		/* Reset the queue for future use */
+		skb_queue_head_init(&dev->fragment_skb);
+		goto error;
+	}
+
+	/* last entry - remove MI bit */
+	if (skb_queue_len(&dev->fragment_skb) == 0) {
+		rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_SET_DATA,
+					skb, pn533_tm_send_complete, NULL);
+	} else
+		rc = pn533_send_cmd_direct_async(dev,
+					PN533_CMD_TG_SET_META_DATA,
+					skb, pn533_tm_send_complete, NULL);
+
+	if (rc == 0) /* success */
+		return;
+
+	dev_err(&dev->interface->dev,
+		"Error %d when trying to perform set meta data_exchange", rc);
+
+	dev_kfree_skb(skb);
+
+error:
+	pn533_send_ack(dev, GFP_KERNEL);
+	queue_work(dev->wq, &dev->cmd_work);
 }
 
 static void pn533_wq_tg_get_data(struct work_struct *work)
 {
 	struct pn533 *dev = container_of(work, struct pn533, tg_work);
-
 	struct sk_buff *skb;
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	skb = pn533_alloc_skb(dev, 0);
 	if (!skb)
@@ -1676,7 +1772,7 @@
 	size_t gb_len;
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (resp->len < ATR_REQ_GB_OFFSET + 1)
 		return -EINVAL;
@@ -1684,8 +1780,8 @@
 	mode = resp->data[0];
 	cmd = &resp->data[1];
 
-	nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
-		    mode, resp->len);
+	dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
+		mode, resp->len);
 
 	if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) ==
 	    PN533_INIT_TARGET_RESP_ACTIVE)
@@ -1700,8 +1796,8 @@
 	rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
 			      comm_mode, gb, gb_len);
 	if (rc < 0) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Error when signaling target activation");
+		nfc_err(&dev->interface->dev,
+			"Error when signaling target activation\n");
 		return rc;
 	}
 
@@ -1715,7 +1811,7 @@
 {
 	struct pn533 *dev = (struct pn533 *)data;
 
-	nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
+	dev_dbg(&dev->interface->dev, "Listen mode timeout\n");
 
 	dev->cancel_listen = 1;
 
@@ -1730,13 +1826,12 @@
 {
 	int rc = 0;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (IS_ERR(resp)) {
 		rc = PTR_ERR(resp);
 
-		nfc_dev_err(&dev->interface->dev, "%s RF setting error %d",
-			    __func__, rc);
+		nfc_err(&dev->interface->dev, "RF setting error %d", rc);
 
 		return rc;
 	}
@@ -1754,7 +1849,7 @@
 	struct sk_buff *skb;
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	skb = pn533_alloc_skb(dev, 2);
 	if (!skb)
@@ -1767,25 +1862,136 @@
 				  pn533_rf_complete, NULL);
 	if (rc < 0) {
 		dev_kfree_skb(skb);
-		nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc);
+		nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
 	}
 
 	return;
 }
 
+static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
+				   struct sk_buff *resp)
+{
+	struct pn533_cmd_jump_dep_response *rsp;
+	struct nfc_target nfc_target;
+	u8 target_gt_len;
+	int rc;
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+	rc = rsp->status & PN533_CMD_RET_MASK;
+	if (rc != PN533_CMD_RET_SUCCESS) {
+		/* Not target found, turn radio off */
+		queue_work(dev->wq, &dev->rf_work);
+
+		dev_kfree_skb(resp);
+		return 0;
+	}
+
+	dev_dbg(&dev->interface->dev, "Creating new target");
+
+	nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+	nfc_target.nfcid1_len = 10;
+	memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+	rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+	if (rc)
+		goto error;
+
+	dev->tgt_available_prots = 0;
+	dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+	/* ATR_RES general bytes are located at offset 17 */
+	target_gt_len = resp->len - 17;
+	rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+					  rsp->gt, target_gt_len);
+	if (!rc) {
+		rc = nfc_dep_link_is_up(dev->nfc_dev,
+					dev->nfc_dev->targets[0].idx,
+					0, NFC_RF_INITIATOR);
+
+		if (!rc)
+			pn533_poll_reset_mod_list(dev);
+	}
+error:
+	dev_kfree_skb(resp);
+	return rc;
+}
+
+#define PASSIVE_DATA_LEN 5
+static int pn533_poll_dep(struct nfc_dev *nfc_dev)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct sk_buff *skb;
+	int rc, skb_len;
+	u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
+	u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
+
+	dev_dbg(&dev->interface->dev, "%s", __func__);
+
+	if (!dev->gb) {
+		dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
+
+		if (!dev->gb || !dev->gb_len) {
+			dev->poll_dep = 0;
+			queue_work(dev->wq, &dev->rf_work);
+		}
+	}
+
+	skb_len = 3 + dev->gb_len; /* ActPass + BR + Next */
+	skb_len += PASSIVE_DATA_LEN;
+
+	/* NFCID3 */
+	skb_len += NFC_NFCID3_MAXSIZE;
+	nfcid3[0] = 0x1;
+	nfcid3[1] = 0xfe;
+	get_random_bytes(nfcid3 + 2, 6);
+
+	skb = pn533_alloc_skb(dev, skb_len);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = 0x01;  /* Active */
+	*skb_put(skb, 1) = 0x02;  /* 424 kbps */
+
+	next = skb_put(skb, 1);  /* Next */
+	*next = 0;
+
+	/* Copy passive data */
+	memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+	*next |= 1;
+
+	/* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+	memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+	       NFC_NFCID3_MAXSIZE);
+	*next |= 2;
+
+	memcpy(skb_put(skb, dev->gb_len), dev->gb, dev->gb_len);
+	*next |= 4; /* We have some Gi */
+
+	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
+				  pn533_poll_dep_complete, NULL);
+
+	if (rc < 0)
+		dev_kfree_skb(skb);
+
+	return rc;
+}
+
 static int pn533_poll_complete(struct pn533 *dev, void *arg,
 			       struct sk_buff *resp)
 {
 	struct pn533_poll_modulations *cur_mod;
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (IS_ERR(resp)) {
 		rc = PTR_ERR(resp);
 
-		nfc_dev_err(&dev->interface->dev, "%s  Poll complete error %d",
-			    __func__, rc);
+		nfc_err(&dev->interface->dev, "%s  Poll complete error %d\n",
+			__func__, rc);
 
 		if (rc == -ENOENT) {
 			if (dev->poll_mod_count != 0)
@@ -1793,8 +1999,8 @@
 			else
 				goto stop_poll;
 		} else if (rc < 0) {
-			nfc_dev_err(&dev->interface->dev,
-				    "Error %d when running poll", rc);
+			nfc_err(&dev->interface->dev,
+				"Error %d when running poll\n", rc);
 			goto stop_poll;
 		}
 	}
@@ -1813,7 +2019,7 @@
 		goto done;
 
 	if (!dev->poll_mod_count) {
-		nfc_dev_dbg(&dev->interface->dev, "Polling has been stopped.");
+		dev_dbg(&dev->interface->dev, "Polling has been stopped\n");
 		goto done;
 	}
 
@@ -1826,7 +2032,7 @@
 	return rc;
 
 stop_poll:
-	nfc_dev_err(&dev->interface->dev, "Polling operation has been stopped");
+	nfc_err(&dev->interface->dev, "Polling operation has been stopped\n");
 
 	pn533_poll_reset_mod_list(dev);
 	dev->poll_protocols = 0;
@@ -1856,8 +2062,13 @@
 
 	mod = dev->poll_mod_active[dev->poll_mod_curr];
 
-	nfc_dev_dbg(&dev->interface->dev, "%s mod len %d\n",
-		    __func__, mod->len);
+	dev_dbg(&dev->interface->dev, "%s mod len %d\n",
+		__func__, mod->len);
+
+	if (dev->poll_dep)  {
+		dev->poll_dep = 0;
+		return pn533_poll_dep(dev->nfc_dev);
+	}
 
 	if (mod->len == 0) {  /* Listen mode */
 		cmd_code = PN533_CMD_TG_INIT_AS_TARGET;
@@ -1868,7 +2079,7 @@
 	}
 
 	if (!skb) {
-		nfc_dev_err(&dev->interface->dev, "Failed to allocate skb.");
+		nfc_err(&dev->interface->dev, "Failed to allocate skb\n");
 		return -ENOMEM;
 	}
 
@@ -1876,7 +2087,7 @@
 				  NULL);
 	if (rc < 0) {
 		dev_kfree_skb(skb);
-		nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
+		nfc_err(&dev->interface->dev, "Polling loop error %d\n", rc);
 	}
 
 	return rc;
@@ -1890,9 +2101,9 @@
 
 	cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
 
-	nfc_dev_dbg(&dev->interface->dev,
-		    "%s cancel_listen %d modulation len %d",
-		    __func__, dev->cancel_listen, cur_mod->len);
+	dev_dbg(&dev->interface->dev,
+		"%s cancel_listen %d modulation len %d\n",
+		__func__, dev->cancel_listen, cur_mod->len);
 
 	if (dev->cancel_listen == 1) {
 		dev->cancel_listen = 0;
@@ -1913,21 +2124,23 @@
 			    u32 im_protocols, u32 tm_protocols)
 {
 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct pn533_poll_modulations *cur_mod;
 	u8 rand_mod;
+	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev,
-		    "%s: im protocols 0x%x tm protocols 0x%x",
-		    __func__, im_protocols, tm_protocols);
+	dev_dbg(&dev->interface->dev,
+		"%s: im protocols 0x%x tm protocols 0x%x\n",
+		__func__, im_protocols, tm_protocols);
 
 	if (dev->tgt_active_prot) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Cannot poll with a target already activated");
+		nfc_err(&dev->interface->dev,
+			"Cannot poll with a target already activated\n");
 		return -EBUSY;
 	}
 
 	if (dev->tgt_mode) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Cannot poll while already being activated");
+		nfc_err(&dev->interface->dev,
+			"Cannot poll while already being activated\n");
 		return -EBUSY;
 	}
 
@@ -1946,20 +2159,26 @@
 	rand_mod %= dev->poll_mod_count;
 	dev->poll_mod_curr = rand_mod;
 
-	return pn533_send_poll_frame(dev);
+	cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+	rc = pn533_send_poll_frame(dev);
+
+	/* Start listen timer */
+	if (!rc && cur_mod->len == 0 && dev->poll_mod_count > 1)
+		mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
+
+	return rc;
 }
 
 static void pn533_stop_poll(struct nfc_dev *nfc_dev)
 {
 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
 	del_timer(&dev->listen_timer);
 
 	if (!dev->poll_mod_count) {
-		nfc_dev_dbg(&dev->interface->dev,
-			    "Polling operation was not running");
+		dev_dbg(&dev->interface->dev,
+			"Polling operation was not running\n");
 		return;
 	}
 
@@ -1973,11 +2192,10 @@
 	struct pn533_cmd_activate_response *rsp;
 	u16 gt_len;
 	int rc;
-
 	struct sk_buff *skb;
 	struct sk_buff *resp;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
 	if (!skb)
@@ -1993,8 +2211,8 @@
 	rsp = (struct pn533_cmd_activate_response *)resp->data;
 	rc = rsp->status & PN533_CMD_RET_MASK;
 	if (rc != PN533_CMD_RET_SUCCESS) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Target activation failed (error 0x%x)", rc);
+		nfc_err(&dev->interface->dev,
+			"Target activation failed (error 0x%x)\n", rc);
 		dev_kfree_skb(resp);
 		return -EIO;
 	}
@@ -2013,39 +2231,38 @@
 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s - protocol=%u", __func__,
-		    protocol);
+	dev_dbg(&dev->interface->dev, "%s: protocol=%u\n", __func__, protocol);
 
 	if (dev->poll_mod_count) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Cannot activate while polling");
+		nfc_err(&dev->interface->dev,
+			"Cannot activate while polling\n");
 		return -EBUSY;
 	}
 
 	if (dev->tgt_active_prot) {
-		nfc_dev_err(&dev->interface->dev,
-			    "There is already an active target");
+		nfc_err(&dev->interface->dev,
+			"There is already an active target\n");
 		return -EBUSY;
 	}
 
 	if (!dev->tgt_available_prots) {
-		nfc_dev_err(&dev->interface->dev,
-			    "There is no available target to activate");
+		nfc_err(&dev->interface->dev,
+			"There is no available target to activate\n");
 		return -EINVAL;
 	}
 
 	if (!(dev->tgt_available_prots & (1 << protocol))) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Target doesn't support requested proto %u",
-			    protocol);
+		nfc_err(&dev->interface->dev,
+			"Target doesn't support requested proto %u\n",
+			protocol);
 		return -EINVAL;
 	}
 
 	if (protocol == NFC_PROTO_NFC_DEP) {
 		rc = pn533_activate_target_nfcdep(dev);
 		if (rc) {
-			nfc_dev_err(&dev->interface->dev,
-				    "Activating target with DEP failed %d", rc);
+			nfc_err(&dev->interface->dev,
+				"Activating target with DEP failed %d\n", rc);
 			return rc;
 		}
 	}
@@ -2060,16 +2277,14 @@
 				    struct nfc_target *target)
 {
 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-
 	struct sk_buff *skb;
 	struct sk_buff *resp;
-
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (!dev->tgt_active_prot) {
-		nfc_dev_err(&dev->interface->dev, "There is no active target");
+		nfc_err(&dev->interface->dev, "There is no active target\n");
 		return;
 	}
 
@@ -2088,8 +2303,8 @@
 
 	rc = resp->data[0] & PN533_CMD_RET_MASK;
 	if (rc != PN533_CMD_RET_SUCCESS)
-		nfc_dev_err(&dev->interface->dev,
-			    "Error 0x%x when releasing the target", rc);
+		nfc_err(&dev->interface->dev,
+			"Error 0x%x when releasing the target\n", rc);
 
 	dev_kfree_skb(resp);
 	return;
@@ -2111,8 +2326,8 @@
 
 	if (dev->tgt_available_prots &&
 	    !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
-		nfc_dev_err(&dev->interface->dev,
-			    "The target does not support DEP");
+		nfc_err(&dev->interface->dev,
+			"The target does not support DEP\n");
 		rc =  -EINVAL;
 		goto error;
 	}
@@ -2121,15 +2336,15 @@
 
 	rc = rsp->status & PN533_CMD_RET_MASK;
 	if (rc != PN533_CMD_RET_SUCCESS) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Bringing DEP link up failed (error 0x%x)", rc);
+		nfc_err(&dev->interface->dev,
+			"Bringing DEP link up failed (error 0x%x)\n", rc);
 		goto error;
 	}
 
 	if (!dev->tgt_available_prots) {
 		struct nfc_target nfc_target;
 
-		nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+		dev_dbg(&dev->interface->dev, "Creating new target\n");
 
 		nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
 		nfc_target.nfcid1_len = 10;
@@ -2158,7 +2373,6 @@
 }
 
 static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
-#define PASSIVE_DATA_LEN 5
 static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
 			     u8 comm_mode, u8 *gb, size_t gb_len)
 {
@@ -2166,20 +2380,19 @@
 	struct sk_buff *skb;
 	int rc, skb_len;
 	u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
-
 	u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (dev->poll_mod_count) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Cannot bring the DEP link up while polling");
+		nfc_err(&dev->interface->dev,
+			"Cannot bring the DEP link up while polling\n");
 		return -EBUSY;
 	}
 
 	if (dev->tgt_active_prot) {
-		nfc_dev_err(&dev->interface->dev,
-			    "There is already an active target");
+		nfc_err(&dev->interface->dev,
+			"There is already an active target\n");
 		return -EBUSY;
 	}
 
@@ -2249,7 +2462,7 @@
 {
 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	pn533_poll_reset_mod_list(dev);
 
@@ -2274,7 +2487,7 @@
 	struct sk_buff *skb, *tmp, *t;
 	unsigned int skb_len = 0, tmp_len = 0;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (skb_queue_empty(&dev->resp_q))
 		return NULL;
@@ -2287,8 +2500,8 @@
 	skb_queue_walk_safe(&dev->resp_q, tmp, t)
 		skb_len += tmp->len;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s total length %d\n",
-		    __func__, skb_len);
+	dev_dbg(&dev->interface->dev, "%s total length %d\n",
+		__func__, skb_len);
 
 	skb = alloc_skb(skb_len, GFP_KERNEL);
 	if (skb == NULL)
@@ -2315,7 +2528,7 @@
 	int rc = 0;
 	u8 status, ret, mi;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (IS_ERR(resp)) {
 		rc = PTR_ERR(resp);
@@ -2329,8 +2542,8 @@
 	skb_pull(resp, sizeof(status));
 
 	if (ret != PN533_CMD_RET_SUCCESS) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Exchanging data failed (error 0x%x)", ret);
+		nfc_err(&dev->interface->dev,
+			"Exchanging data failed (error 0x%x)\n", ret);
 		rc = -EIO;
 		goto error;
 	}
@@ -2388,14 +2601,17 @@
 			break;
 		}
 
-		/* Reserve the TG/MI byte */
-		skb_reserve(frag, 1);
+		if (!dev->tgt_mode) {
+			/* Reserve the TG/MI byte */
+			skb_reserve(frag, 1);
 
-		/* MI + TG */
-		if (frag_size  == PN533_CMD_DATAFRAME_MAXLEN)
-			*skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1);
-		else
-			*skb_push(frag, sizeof(u8)) =  1; /* TG */
+			/* MI + TG */
+			if (frag_size  == PN533_CMD_DATAFRAME_MAXLEN)
+				*skb_push(frag, sizeof(u8)) =
+							(PN533_CMD_MI_MASK | 1);
+			else
+				*skb_push(frag, sizeof(u8)) =  1; /* TG */
+		}
 
 		memcpy(skb_put(frag, frag_size), skb->data, frag_size);
 
@@ -2420,11 +2636,11 @@
 	struct pn533_data_exchange_arg *arg = NULL;
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (!dev->tgt_active_prot) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Can't exchange data if there is no active target");
+		nfc_err(&dev->interface->dev,
+			"Can't exchange data if there is no active target\n");
 		rc = -EINVAL;
 		goto error;
 	}
@@ -2487,13 +2703,18 @@
 {
 	u8 status;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	if (IS_ERR(resp))
 		return PTR_ERR(resp);
 
 	status = resp->data[0];
 
+	/* Prepare for the next round */
+	if (skb_queue_len(&dev->fragment_skb) > 0) {
+		queue_work(dev->wq, &dev->mi_tm_tx_work);
+		return -EINPROGRESS;
+	}
 	dev_kfree_skb(resp);
 
 	if (status != 0) {
@@ -2514,19 +2735,34 @@
 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
+	/* let's split in multiple chunks if size's too big */
 	if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Data length greater than the max allowed: %d",
-			    PN533_CMD_DATAEXCH_DATA_MAXLEN);
-		return -ENOSYS;
+		rc = pn533_fill_fragment_skbs(dev, skb);
+		if (rc <= 0)
+			goto error;
+
+		/* get the first skb */
+		skb = skb_dequeue(&dev->fragment_skb);
+		if (!skb) {
+			rc = -EIO;
+			goto error;
+		}
+
+		rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_META_DATA, skb,
+						pn533_tm_send_complete, NULL);
+	} else {
+		/* Send th skb */
+		rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
+						pn533_tm_send_complete, NULL);
 	}
 
-	rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
-				   pn533_tm_send_complete, NULL);
-	if (rc < 0)
+error:
+	if (rc < 0) {
 		dev_kfree_skb(skb);
+		skb_queue_purge(&dev->fragment_skb);
+	}
 
 	return rc;
 }
@@ -2534,11 +2770,10 @@
 static void pn533_wq_mi_recv(struct work_struct *work)
 {
 	struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
-
 	struct sk_buff *skb;
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
 	if (!skb)
@@ -2570,8 +2805,8 @@
 	if (rc == 0) /* success */
 		return;
 
-	nfc_dev_err(&dev->interface->dev,
-		    "Error %d when trying to perform data_exchange", rc);
+	nfc_err(&dev->interface->dev,
+		"Error %d when trying to perform data_exchange\n", rc);
 
 	dev_kfree_skb(skb);
 	kfree(dev->cmd_complete_mi_arg);
@@ -2587,7 +2822,7 @@
 	struct sk_buff *skb;
 	int rc;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	/* Grab the first skb in the queue */
 	skb = skb_dequeue(&dev->fragment_skb);
@@ -2625,8 +2860,8 @@
 	if (rc == 0) /* success */
 		return;
 
-	nfc_dev_err(&dev->interface->dev,
-		    "Error %d when trying to perform data_exchange", rc);
+	nfc_err(&dev->interface->dev,
+		"Error %d when trying to perform data_exchange\n", rc);
 
 	dev_kfree_skb(skb);
 	kfree(dev->cmd_complete_dep_arg);
@@ -2641,10 +2876,9 @@
 {
 	struct sk_buff *skb;
 	struct sk_buff *resp;
-
 	int skb_len;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
 
@@ -2691,7 +2925,7 @@
 	struct sk_buff *skb;
 	struct sk_buff *resp;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	skb = pn533_alloc_skb(dev, sizeof(u8));
 	if (!skb)
@@ -2717,7 +2951,7 @@
 {
 	struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
 
-	nfc_dev_dbg(&urb->dev->dev, "%s", __func__);
+	dev_dbg(&urb->dev->dev, "%s\n", __func__);
 
 	print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
 		       urb->transfer_buffer, urb->transfer_buffer_length,
@@ -2737,7 +2971,7 @@
 	void *cntx;
 	struct pn533_acr122_poweron_rdr_arg arg;
 
-	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
 	init_completion(&arg.done);
 	cntx = dev->in_urb->context;  /* backup context */
@@ -2755,16 +2989,15 @@
 
 	rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
 	if (rc) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Reader power on cmd error %d", rc);
+		nfc_err(&dev->interface->dev,
+			"Reader power on cmd error %d\n", rc);
 		return rc;
 	}
 
 	rc =  usb_submit_urb(dev->in_urb, GFP_KERNEL);
 	if (rc) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Can't submit for reader power on cmd response %d",
-			    rc);
+		nfc_err(&dev->interface->dev,
+			"Can't submit reader poweron cmd response %d\n", rc);
 		return rc;
 	}
 
@@ -2785,20 +3018,19 @@
 	rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
 				     (u8 *)&rf_field, 1);
 	if (rc) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Error on setting RF field");
+		nfc_err(&dev->interface->dev, "Error on setting RF field\n");
 		return rc;
 	}
 
 	return rc;
 }
 
-int pn533_dev_up(struct nfc_dev *nfc_dev)
+static int pn533_dev_up(struct nfc_dev *nfc_dev)
 {
 	return pn533_rf_field(nfc_dev, 1);
 }
 
-int pn533_dev_down(struct nfc_dev *nfc_dev)
+static int pn533_dev_down(struct nfc_dev *nfc_dev)
 {
 	return pn533_rf_field(nfc_dev, 0);
 }
@@ -2839,16 +3071,16 @@
 		break;
 
 	default:
-		nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
-			    dev->device_type);
+		nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+			dev->device_type);
 		return -EINVAL;
 	}
 
 	rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
 				     (u8 *)&max_retries, sizeof(max_retries));
 	if (rc) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Error on setting MAX_RETRIES config");
+		nfc_err(&dev->interface->dev,
+			"Error on setting MAX_RETRIES config\n");
 		return rc;
 	}
 
@@ -2856,8 +3088,7 @@
 	rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
 				     (u8 *)&timing, sizeof(timing));
 	if (rc) {
-		nfc_dev_err(&dev->interface->dev,
-			    "Error on setting RF timings");
+		nfc_err(&dev->interface->dev, "Error on setting RF timings\n");
 		return rc;
 	}
 
@@ -2871,8 +3102,8 @@
 		rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
 					     pasori_cfg, 3);
 		if (rc) {
-			nfc_dev_err(&dev->interface->dev,
-				    "Error while settings PASORI config");
+			nfc_err(&dev->interface->dev,
+				"Error while settings PASORI config\n");
 			return rc;
 		}
 
@@ -2917,8 +3148,8 @@
 	}
 
 	if (!in_endpoint || !out_endpoint) {
-		nfc_dev_err(&interface->dev,
-			    "Could not find bulk-in or bulk-out endpoint");
+		nfc_err(&interface->dev,
+			"Could not find bulk-in or bulk-out endpoint\n");
 		rc = -ENODEV;
 		goto error;
 	}
@@ -2941,6 +3172,8 @@
 	INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
 	INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
 	INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
+	INIT_WORK(&dev->mi_tm_rx_work, pn533_wq_tm_mi_recv);
+	INIT_WORK(&dev->mi_tm_tx_work, pn533_wq_tm_mi_send);
 	INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
 	INIT_WORK(&dev->rf_work, pn533_wq_rf);
 	dev->wq = alloc_ordered_workqueue("pn533", 0);
@@ -2978,16 +3211,15 @@
 
 		rc = pn533_acr122_poweron_rdr(dev);
 		if (rc < 0) {
-			nfc_dev_err(&dev->interface->dev,
-				    "Couldn't poweron the reader (error %d)",
-				    rc);
+			nfc_err(&dev->interface->dev,
+				"Couldn't poweron the reader (error %d)\n", rc);
 			goto destroy_wq;
 		}
 		break;
 
 	default:
-		nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
-			    dev->device_type);
+		nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+			dev->device_type);
 		rc = -EINVAL;
 		goto destroy_wq;
 	}
@@ -2997,9 +3229,9 @@
 	if (rc < 0)
 		goto destroy_wq;
 
-	nfc_dev_info(&dev->interface->dev,
-		     "NXP PN5%02X firmware ver %d.%d now attached",
-		     fw_ver.ic, fw_ver.ver, fw_ver.rev);
+	nfc_info(&dev->interface->dev,
+		 "NXP PN5%02X firmware ver %d.%d now attached\n",
+		 fw_ver.ic, fw_ver.ver, fw_ver.rev);
 
 
 	dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
@@ -3070,7 +3302,7 @@
 	usb_free_urb(dev->out_urb);
 	kfree(dev);
 
-	nfc_dev_info(&interface->dev, "NXP PN533 NFC device disconnected");
+	nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
 }
 
 static struct usb_driver pn533_driver = {
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 01e27d4..b158ee1 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/crc-ccitt.h>
 #include <linux/module.h>
 #include <linux/i2c.h>
@@ -151,8 +153,7 @@
 	char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
 	int count = sizeof(rset_cmd);
 
-	pr_info(DRIVER_DESC ": %s\n", __func__);
-	dev_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
+	nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
 
 	/* Disable fw download */
 	gpio_set_value(phy->gpio_fw, 0);
@@ -173,7 +174,7 @@
 			dev_dbg(&phy->i2c_dev->dev, "Sending reset cmd\n");
 			ret = i2c_master_send(phy->i2c_dev, rset_cmd, count);
 			if (ret == count) {
-				dev_info(&phy->i2c_dev->dev,
+				nfc_info(&phy->i2c_dev->dev,
 					 "nfc_en polarity : active %s\n",
 					 (polarity == 0 ? "low" : "high"));
 				goto out;
@@ -181,7 +182,7 @@
 		}
 	}
 
-	dev_err(&phy->i2c_dev->dev,
+	nfc_err(&phy->i2c_dev->dev,
 		"Could not detect nfc_en polarity, fallback to active high\n");
 
 out:
@@ -201,7 +202,7 @@
 {
 	struct pn544_i2c_phy *phy = phy_id;
 
-	pr_info(DRIVER_DESC ": %s\n", __func__);
+	pr_info("%s\n", __func__);
 
 	pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
 
@@ -214,8 +215,6 @@
 {
 	struct pn544_i2c_phy *phy = phy_id;
 
-	pr_info(DRIVER_DESC ": %s\n", __func__);
-
 	gpio_set_value(phy->gpio_fw, 0);
 	gpio_set_value(phy->gpio_en, !phy->en_polarity);
 	usleep_range(10000, 15000);
@@ -298,11 +297,9 @@
 	crc = ~crc;
 
 	if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
-		pr_err(PN544_HCI_I2C_DRIVER_NAME
-		       ": CRC error 0x%x != 0x%x 0x%x\n",
+		pr_err("CRC error 0x%x != 0x%x 0x%x\n",
 		       crc, buf[len - 1], buf[len - 2]);
-
-		pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+		pr_info("%s: BAD CRC\n", __func__);
 		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
 			       16, 2, buf, buflen, false);
 		return -EPERM;
@@ -328,13 +325,13 @@
 
 	r = i2c_master_recv(client, &len, 1);
 	if (r != 1) {
-		dev_err(&client->dev, "cannot read len byte\n");
+		nfc_err(&client->dev, "cannot read len byte\n");
 		return -EREMOTEIO;
 	}
 
 	if ((len < (PN544_HCI_I2C_LLC_MIN_SIZE - 1)) ||
 	    (len > (PN544_HCI_I2C_LLC_MAX_SIZE - 1))) {
-		dev_err(&client->dev, "invalid len byte\n");
+		nfc_err(&client->dev, "invalid len byte\n");
 		r = -EBADMSG;
 		goto flush;
 	}
@@ -386,7 +383,7 @@
 
 	r = i2c_master_recv(client, (char *) &response, sizeof(response));
 	if (r != sizeof(response)) {
-		dev_err(&client->dev, "cannot read fw status\n");
+		nfc_err(&client->dev, "cannot read fw status\n");
 		return -EIO;
 	}
 
@@ -478,8 +475,7 @@
 {
 	struct pn544_i2c_phy *phy = phy_id;
 
-	pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n",
-		firmware_name);
+	pr_info("Starting Firmware Download (%s)\n", firmware_name);
 
 	strcpy(phy->firmware_name, firmware_name);
 
@@ -493,7 +489,7 @@
 static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy,
 					   int result)
 {
-	pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result);
+	pr_info("Firmware Download Complete, result=%d\n", result);
 
 	pn544_hci_i2c_disable(phy);
 
@@ -694,14 +690,14 @@
 	dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-		dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
+		nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
 		return -ENODEV;
 	}
 
 	phy = devm_kzalloc(&client->dev, sizeof(struct pn544_i2c_phy),
 			   GFP_KERNEL);
 	if (!phy) {
-		dev_err(&client->dev,
+		nfc_err(&client->dev,
 			"Cannot allocate memory for pn544 i2c phy.\n");
 		return -ENOMEM;
 	}
@@ -714,18 +710,18 @@
 
 	pdata = client->dev.platform_data;
 	if (pdata == NULL) {
-		dev_err(&client->dev, "No platform data\n");
+		nfc_err(&client->dev, "No platform data\n");
 		return -EINVAL;
 	}
 
 	if (pdata->request_resources == NULL) {
-		dev_err(&client->dev, "request_resources() missing\n");
+		nfc_err(&client->dev, "request_resources() missing\n");
 		return -EINVAL;
 	}
 
 	r = pdata->request_resources(client);
 	if (r) {
-		dev_err(&client->dev, "Cannot get platform resources\n");
+		nfc_err(&client->dev, "Cannot get platform resources\n");
 		return r;
 	}
 
@@ -739,7 +735,7 @@
 				 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
 				 PN544_HCI_I2C_DRIVER_NAME, phy);
 	if (r < 0) {
-		dev_err(&client->dev, "Unable to register IRQ handler\n");
+		nfc_err(&client->dev, "Unable to register IRQ handler\n");
 		goto err_rti;
 	}
 
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 078e62f..74cfa0a 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -41,6 +43,7 @@
 
 /* Proprietary commands */
 #define PN544_WRITE		0x3f
+#define PN544_TEST_SWP		0x21
 
 /* Proprietary gates, events, commands and registers */
 
@@ -81,14 +84,17 @@
 #define PN544_PL_NFCT_DEACTIVATED		0x09
 
 #define PN544_SWP_MGMT_GATE			0xA0
+#define PN544_SWP_DEFAULT_MODE			0x01
 
 #define PN544_NFC_WI_MGMT_GATE			0xA1
+#define PN544_NFC_ESE_DEFAULT_MODE		0x01
 
 #define PN544_HCI_EVT_SND_DATA			0x01
 #define PN544_HCI_EVT_ACTIVATED			0x02
 #define PN544_HCI_EVT_DEACTIVATED		0x03
 #define PN544_HCI_EVT_RCV_DATA			0x04
 #define PN544_HCI_EVT_CONTINUE_MI		0x05
+#define PN544_HCI_EVT_SWITCH_MODE		0x03
 
 #define PN544_HCI_CMD_ATTREQUEST		0x12
 #define PN544_HCI_CMD_CONTINUE_ACTIVATION	0x13
@@ -187,13 +193,6 @@
 
 		{{0x9e, 0xb4}, 0x00},
 
-		{{0x9e, 0xd9}, 0xff},
-		{{0x9e, 0xda}, 0xff},
-		{{0x9e, 0xdb}, 0x23},
-		{{0x9e, 0xdc}, 0x21},
-		{{0x9e, 0xdd}, 0x22},
-		{{0x9e, 0xde}, 0x24},
-
 		{{0x9c, 0x01}, 0x08},
 
 		{{0x9e, 0xaa}, 0x01},
@@ -394,7 +393,7 @@
 	if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
 		hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
 							&hdev->gb_len);
-		pr_debug("generate local bytes %p", hdev->gb);
+		pr_debug("generate local bytes %p\n", hdev->gb);
 		if (hdev->gb == NULL || hdev->gb_len == 0) {
 			im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
 			tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
@@ -696,7 +695,7 @@
 static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
 				   struct nfc_target *target)
 {
-	pr_debug("supported protocol %d", target->supported_protocols);
+	pr_debug("supported protocol %d\b", target->supported_protocols);
 	if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK |
 					NFC_PROTO_ISO14443_B_MASK)) {
 		return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
@@ -733,7 +732,7 @@
 	struct sk_buff *rgb_skb = NULL;
 	int r;
 
-	pr_debug("hci event %d", event);
+	pr_debug("hci event %d\n", event);
 	switch (event) {
 	case PN544_HCI_EVT_ACTIVATED:
 		if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE) {
@@ -764,7 +763,7 @@
 		}
 
 		if (skb->data[0] != 0) {
-			pr_debug("data0 %d", skb->data[0]);
+			pr_debug("data0 %d\n", skb->data[0]);
 			r = -EPROTO;
 			goto exit;
 		}
@@ -792,6 +791,108 @@
 	return info->fw_download(info->phy_id, firmware_name);
 }
 
+static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
+{
+	u32 se_idx = 0;
+	u8 ese_mode = 0x01; /* Default mode */
+	struct sk_buff *res_skb;
+	int r;
+
+	r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_TEST_SWP,
+			     NULL, 0, &res_skb);
+
+	if (r == 0) {
+		if (res_skb->len == 2 && res_skb->data[0] == 0x00)
+			nfc_add_se(hdev->ndev, se_idx++, NFC_SE_UICC);
+
+		kfree_skb(res_skb);
+	}
+
+	r = nfc_hci_send_event(hdev, PN544_NFC_WI_MGMT_GATE,
+				PN544_HCI_EVT_SWITCH_MODE,
+				&ese_mode, 1);
+	if (r == 0)
+		nfc_add_se(hdev->ndev, se_idx++, NFC_SE_EMBEDDED);
+
+	return !se_idx;
+}
+
+#define PN544_SE_MODE_OFF	0x00
+#define PN544_SE_MODE_ON	0x01
+static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+	struct nfc_se *se;
+	u8 enable = PN544_SE_MODE_ON;
+	static struct uicc_gatelist {
+		u8 head;
+		u8 adr[2];
+		u8 value;
+	} uicc_gatelist[] = {
+		{0x00, {0x9e, 0xd9}, 0x23},
+		{0x00, {0x9e, 0xda}, 0x21},
+		{0x00, {0x9e, 0xdb}, 0x22},
+		{0x00, {0x9e, 0xdc}, 0x24},
+	};
+	struct uicc_gatelist *p = uicc_gatelist;
+	int count = ARRAY_SIZE(uicc_gatelist);
+	struct sk_buff *res_skb;
+	int r;
+
+	se = nfc_find_se(hdev->ndev, se_idx);
+
+	switch (se->type) {
+	case NFC_SE_UICC:
+		while (count--) {
+			r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE,
+					PN544_WRITE, (u8 *)p, 4, &res_skb);
+			if (r < 0)
+				return r;
+
+			if (res_skb->len != 1) {
+				kfree_skb(res_skb);
+				return -EPROTO;
+			}
+
+			if (res_skb->data[0] != p->value) {
+				kfree_skb(res_skb);
+				return -EIO;
+			}
+
+			kfree_skb(res_skb);
+
+			p++;
+		}
+
+		return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE,
+			      PN544_SWP_DEFAULT_MODE, &enable, 1);
+	case NFC_SE_EMBEDDED:
+		return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE,
+			      PN544_NFC_ESE_DEFAULT_MODE, &enable, 1);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+	struct nfc_se *se;
+	u8 disable = PN544_SE_MODE_OFF;
+
+	se = nfc_find_se(hdev->ndev, se_idx);
+
+	switch (se->type) {
+	case NFC_SE_UICC:
+		return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE,
+			      PN544_SWP_DEFAULT_MODE, &disable, 1);
+	case NFC_SE_EMBEDDED:
+		return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE,
+			      PN544_NFC_ESE_DEFAULT_MODE, &disable, 1);
+	default:
+		return -EINVAL;
+	}
+}
+
 static struct nfc_hci_ops pn544_hci_ops = {
 	.open = pn544_hci_open,
 	.close = pn544_hci_close,
@@ -807,6 +908,9 @@
 	.check_presence = pn544_hci_check_presence,
 	.event_received = pn544_hci_event_received,
 	.fw_download = pn544_hci_fw_download,
+	.discover_se = pn544_hci_discover_se,
+	.enable_se = pn544_hci_enable_se,
+	.disable_se = pn544_hci_disable_se,
 };
 
 int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
@@ -820,7 +924,6 @@
 
 	info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
 	if (!info) {
-		pr_err("Cannot allocate memory for pn544_hci_info.\n");
 		r = -ENOMEM;
 		goto err_info_alloc;
 	}
@@ -853,7 +956,7 @@
 					     phy_headroom + PN544_CMDS_HEADROOM,
 					     phy_tailroom, phy_payload);
 	if (!info->hdev) {
-		pr_err("Cannot allocate nfc hdev.\n");
+		pr_err("Cannot allocate nfc hdev\n");
 		r = -ENOMEM;
 		goto err_alloc_hdev;
 	}
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
new file mode 100644
index 0000000..8a0571e
--- /dev/null
+++ b/drivers/nfc/port100.c
@@ -0,0 +1,1529 @@
+/*
+ * Sony NFC Port-100 Series driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * Partly based/Inspired by Stephen Tiedemann's nfcpy
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <net/nfc/digital.h>
+
+#define VERSION "0.1"
+
+#define SONY_VENDOR_ID    0x054c
+#define RCS380_PRODUCT_ID 0x06c1
+
+#define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK    | \
+			   NFC_PROTO_MIFARE_MASK   | \
+			   NFC_PROTO_FELICA_MASK   | \
+			   NFC_PROTO_NFC_DEP_MASK)
+
+#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
+			      NFC_DIGITAL_DRV_CAPS_TG_CRC)
+
+/* Standard port100 frame definitions */
+#define PORT100_FRAME_HEADER_LEN (sizeof(struct port100_frame) \
+				  + 2) /* data[0] CC, data[1] SCC */
+#define PORT100_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+
+#define PORT100_COMM_RF_HEAD_MAX_LEN (sizeof(struct port100_tg_comm_rf_cmd))
+
+/*
+ * Max extended frame payload len, excluding CC and SCC
+ * which are already in PORT100_FRAME_HEADER_LEN.
+ */
+#define PORT100_FRAME_MAX_PAYLOAD_LEN 1001
+
+#define PORT100_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2),
+				    Postamble (1) */
+static u8 ack_frame[PORT100_FRAME_ACK_SIZE] = {
+	0x00, 0x00, 0xff, 0x00, 0xff, 0x00
+};
+
+#define PORT100_FRAME_CHECKSUM(f) (f->data[le16_to_cpu(f->datalen)])
+#define PORT100_FRAME_POSTAMBLE(f) (f->data[le16_to_cpu(f->datalen) + 1])
+
+/* start of frame */
+#define PORT100_FRAME_SOF	0x00FF
+#define PORT100_FRAME_EXT	0xFFFF
+#define PORT100_FRAME_ACK	0x00FF
+
+/* Port-100 command: in or out */
+#define PORT100_FRAME_DIRECTION(f) (f->data[0]) /* CC */
+#define PORT100_FRAME_DIR_OUT 0xD6
+#define PORT100_FRAME_DIR_IN  0xD7
+
+/* Port-100 sub-command */
+#define PORT100_FRAME_CMD(f) (f->data[1]) /* SCC */
+
+#define PORT100_CMD_GET_FIRMWARE_VERSION 0x20
+#define PORT100_CMD_GET_COMMAND_TYPE     0x28
+#define PORT100_CMD_SET_COMMAND_TYPE     0x2A
+
+#define PORT100_CMD_IN_SET_RF       0x00
+#define PORT100_CMD_IN_SET_PROTOCOL 0x02
+#define PORT100_CMD_IN_COMM_RF      0x04
+
+#define PORT100_CMD_TG_SET_RF       0x40
+#define PORT100_CMD_TG_SET_PROTOCOL 0x42
+#define PORT100_CMD_TG_SET_RF_OFF   0x46
+#define PORT100_CMD_TG_COMM_RF      0x48
+
+#define PORT100_CMD_SWITCH_RF       0x06
+
+#define PORT100_CMD_RESPONSE(cmd) (cmd + 1)
+
+#define PORT100_CMD_TYPE_IS_SUPPORTED(mask, cmd_type) \
+	((mask) & (0x01 << (cmd_type)))
+#define PORT100_CMD_TYPE_0	0
+#define PORT100_CMD_TYPE_1	1
+
+#define PORT100_CMD_STATUS_OK      0x00
+#define PORT100_CMD_STATUS_TIMEOUT 0x80
+
+#define PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK 0x01
+#define PORT100_MDAA_TGT_WAS_ACTIVATED_MASK      0x02
+
+struct port100;
+
+typedef void (*port100_send_async_complete_t)(struct port100 *dev, void *arg,
+					      struct sk_buff *resp);
+
+/**
+ * Setting sets structure for in_set_rf command
+ *
+ * @in_*_set_number: Represent the entry indexes in the port-100 RF Base Table.
+ *              This table contains multiple RF setting sets required for RF
+ *              communication.
+ *
+ * @in_*_comm_type: Theses fields set the communication type to be used.
+ */
+struct port100_in_rf_setting {
+	u8 in_send_set_number;
+	u8 in_send_comm_type;
+	u8 in_recv_set_number;
+	u8 in_recv_comm_type;
+} __packed;
+
+#define PORT100_COMM_TYPE_IN_212F 0x01
+#define PORT100_COMM_TYPE_IN_424F 0x02
+#define PORT100_COMM_TYPE_IN_106A 0x03
+
+static const struct port100_in_rf_setting in_rf_settings[] = {
+	[NFC_DIGITAL_RF_TECH_212F] = {
+		.in_send_set_number = 1,
+		.in_send_comm_type  = PORT100_COMM_TYPE_IN_212F,
+		.in_recv_set_number = 15,
+		.in_recv_comm_type  = PORT100_COMM_TYPE_IN_212F,
+	},
+	[NFC_DIGITAL_RF_TECH_424F] = {
+		.in_send_set_number = 1,
+		.in_send_comm_type  = PORT100_COMM_TYPE_IN_424F,
+		.in_recv_set_number = 15,
+		.in_recv_comm_type  = PORT100_COMM_TYPE_IN_424F,
+	},
+	[NFC_DIGITAL_RF_TECH_106A] = {
+		.in_send_set_number = 2,
+		.in_send_comm_type  = PORT100_COMM_TYPE_IN_106A,
+		.in_recv_set_number = 15,
+		.in_recv_comm_type  = PORT100_COMM_TYPE_IN_106A,
+	},
+};
+
+/**
+ * Setting sets structure for tg_set_rf command
+ *
+ * @tg_set_number: Represents the entry index in the port-100 RF Base Table.
+ *                 This table contains multiple RF setting sets required for RF
+ *                 communication. this field is used for both send and receive
+ *                 settings.
+ *
+ * @tg_comm_type: Sets the communication type to be used to send and receive
+ *                data.
+ */
+struct port100_tg_rf_setting {
+	u8 tg_set_number;
+	u8 tg_comm_type;
+} __packed;
+
+#define PORT100_COMM_TYPE_TG_106A 0x0B
+#define PORT100_COMM_TYPE_TG_212F 0x0C
+#define PORT100_COMM_TYPE_TG_424F 0x0D
+
+static const struct port100_tg_rf_setting tg_rf_settings[] = {
+	[NFC_DIGITAL_RF_TECH_106A] = {
+		.tg_set_number = 8,
+		.tg_comm_type = PORT100_COMM_TYPE_TG_106A,
+	},
+	[NFC_DIGITAL_RF_TECH_212F] = {
+		.tg_set_number = 8,
+		.tg_comm_type = PORT100_COMM_TYPE_TG_212F,
+	},
+	[NFC_DIGITAL_RF_TECH_424F] = {
+		.tg_set_number = 8,
+		.tg_comm_type = PORT100_COMM_TYPE_TG_424F,
+	},
+};
+
+#define PORT100_IN_PROT_INITIAL_GUARD_TIME      0x00
+#define PORT100_IN_PROT_ADD_CRC                 0x01
+#define PORT100_IN_PROT_CHECK_CRC               0x02
+#define PORT100_IN_PROT_MULTI_CARD              0x03
+#define PORT100_IN_PROT_ADD_PARITY              0x04
+#define PORT100_IN_PROT_CHECK_PARITY            0x05
+#define PORT100_IN_PROT_BITWISE_AC_RECV_MODE    0x06
+#define PORT100_IN_PROT_VALID_BIT_NUMBER        0x07
+#define PORT100_IN_PROT_CRYPTO1                 0x08
+#define PORT100_IN_PROT_ADD_SOF                 0x09
+#define PORT100_IN_PROT_CHECK_SOF               0x0A
+#define PORT100_IN_PROT_ADD_EOF                 0x0B
+#define PORT100_IN_PROT_CHECK_EOF               0x0C
+#define PORT100_IN_PROT_DEAF_TIME               0x0E
+#define PORT100_IN_PROT_CRM                     0x0F
+#define PORT100_IN_PROT_CRM_MIN_LEN             0x10
+#define PORT100_IN_PROT_T1_TAG_FRAME            0x11
+#define PORT100_IN_PROT_RFCA                    0x12
+#define PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR 0x13
+#define PORT100_IN_PROT_END                     0x14
+
+#define PORT100_IN_MAX_NUM_PROTOCOLS            19
+
+#define PORT100_TG_PROT_TU           0x00
+#define PORT100_TG_PROT_RF_OFF       0x01
+#define PORT100_TG_PROT_CRM          0x02
+#define PORT100_TG_PROT_END          0x03
+
+#define PORT100_TG_MAX_NUM_PROTOCOLS 3
+
+struct port100_protocol {
+	u8 number;
+	u8 value;
+} __packed;
+
+static struct port100_protocol
+in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
+	[NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
+		{ PORT100_IN_PROT_INITIAL_GUARD_TIME,      6 },
+		{ PORT100_IN_PROT_ADD_CRC,                 0 },
+		{ PORT100_IN_PROT_CHECK_CRC,               0 },
+		{ PORT100_IN_PROT_MULTI_CARD,              0 },
+		{ PORT100_IN_PROT_ADD_PARITY,              0 },
+		{ PORT100_IN_PROT_CHECK_PARITY,            1 },
+		{ PORT100_IN_PROT_BITWISE_AC_RECV_MODE,    0 },
+		{ PORT100_IN_PROT_VALID_BIT_NUMBER,        7 },
+		{ PORT100_IN_PROT_CRYPTO1,                 0 },
+		{ PORT100_IN_PROT_ADD_SOF,                 0 },
+		{ PORT100_IN_PROT_CHECK_SOF,               0 },
+		{ PORT100_IN_PROT_ADD_EOF,                 0 },
+		{ PORT100_IN_PROT_CHECK_EOF,               0 },
+		{ PORT100_IN_PROT_DEAF_TIME,               4 },
+		{ PORT100_IN_PROT_CRM,                     0 },
+		{ PORT100_IN_PROT_CRM_MIN_LEN,             0 },
+		{ PORT100_IN_PROT_T1_TAG_FRAME,            0 },
+		{ PORT100_IN_PROT_RFCA,                    0 },
+		{ PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+		{ PORT100_IN_PROT_END,                     0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_STANDARD] = {
+		{ PORT100_IN_PROT_INITIAL_GUARD_TIME,      6 },
+		{ PORT100_IN_PROT_ADD_CRC,                 0 },
+		{ PORT100_IN_PROT_CHECK_CRC,               0 },
+		{ PORT100_IN_PROT_MULTI_CARD,              0 },
+		{ PORT100_IN_PROT_ADD_PARITY,              1 },
+		{ PORT100_IN_PROT_CHECK_PARITY,            1 },
+		{ PORT100_IN_PROT_BITWISE_AC_RECV_MODE,    0 },
+		{ PORT100_IN_PROT_VALID_BIT_NUMBER,        8 },
+		{ PORT100_IN_PROT_CRYPTO1,                 0 },
+		{ PORT100_IN_PROT_ADD_SOF,                 0 },
+		{ PORT100_IN_PROT_CHECK_SOF,               0 },
+		{ PORT100_IN_PROT_ADD_EOF,                 0 },
+		{ PORT100_IN_PROT_CHECK_EOF,               0 },
+		{ PORT100_IN_PROT_DEAF_TIME,               4 },
+		{ PORT100_IN_PROT_CRM,                     0 },
+		{ PORT100_IN_PROT_CRM_MIN_LEN,             0 },
+		{ PORT100_IN_PROT_T1_TAG_FRAME,            0 },
+		{ PORT100_IN_PROT_RFCA,                    0 },
+		{ PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+		{ PORT100_IN_PROT_END,                     0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = {
+		{ PORT100_IN_PROT_INITIAL_GUARD_TIME,      6 },
+		{ PORT100_IN_PROT_ADD_CRC,                 1 },
+		{ PORT100_IN_PROT_CHECK_CRC,               1 },
+		{ PORT100_IN_PROT_MULTI_CARD,              0 },
+		{ PORT100_IN_PROT_ADD_PARITY,              1 },
+		{ PORT100_IN_PROT_CHECK_PARITY,            1 },
+		{ PORT100_IN_PROT_BITWISE_AC_RECV_MODE,    0 },
+		{ PORT100_IN_PROT_VALID_BIT_NUMBER,        8 },
+		{ PORT100_IN_PROT_CRYPTO1,                 0 },
+		{ PORT100_IN_PROT_ADD_SOF,                 0 },
+		{ PORT100_IN_PROT_CHECK_SOF,               0 },
+		{ PORT100_IN_PROT_ADD_EOF,                 0 },
+		{ PORT100_IN_PROT_CHECK_EOF,               0 },
+		{ PORT100_IN_PROT_DEAF_TIME,               4 },
+		{ PORT100_IN_PROT_CRM,                     0 },
+		{ PORT100_IN_PROT_CRM_MIN_LEN,             0 },
+		{ PORT100_IN_PROT_T1_TAG_FRAME,            0 },
+		{ PORT100_IN_PROT_RFCA,                    0 },
+		{ PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+		{ PORT100_IN_PROT_END,                     0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_T1T] = {
+		/* nfc_digital_framing_nfca_short */
+		{ PORT100_IN_PROT_ADD_CRC,          2 },
+		{ PORT100_IN_PROT_CHECK_CRC,        2 },
+		{ PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+		{ PORT100_IN_PROT_T1_TAG_FRAME,     2 },
+		{ PORT100_IN_PROT_END,              0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_T2T] = {
+		/* nfc_digital_framing_nfca_standard */
+		{ PORT100_IN_PROT_ADD_CRC,   1 },
+		{ PORT100_IN_PROT_CHECK_CRC, 0 },
+		{ PORT100_IN_PROT_END,       0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
+		/* nfc_digital_framing_nfca_standard */
+		{ PORT100_IN_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCF] = {
+		{ PORT100_IN_PROT_INITIAL_GUARD_TIME,     18 },
+		{ PORT100_IN_PROT_ADD_CRC,                 1 },
+		{ PORT100_IN_PROT_CHECK_CRC,               1 },
+		{ PORT100_IN_PROT_MULTI_CARD,              0 },
+		{ PORT100_IN_PROT_ADD_PARITY,              0 },
+		{ PORT100_IN_PROT_CHECK_PARITY,            0 },
+		{ PORT100_IN_PROT_BITWISE_AC_RECV_MODE,    0 },
+		{ PORT100_IN_PROT_VALID_BIT_NUMBER,        8 },
+		{ PORT100_IN_PROT_CRYPTO1,                 0 },
+		{ PORT100_IN_PROT_ADD_SOF,                 0 },
+		{ PORT100_IN_PROT_CHECK_SOF,               0 },
+		{ PORT100_IN_PROT_ADD_EOF,                 0 },
+		{ PORT100_IN_PROT_CHECK_EOF,               0 },
+		{ PORT100_IN_PROT_DEAF_TIME,               4 },
+		{ PORT100_IN_PROT_CRM,                     0 },
+		{ PORT100_IN_PROT_CRM_MIN_LEN,             0 },
+		{ PORT100_IN_PROT_T1_TAG_FRAME,            0 },
+		{ PORT100_IN_PROT_RFCA,                    0 },
+		{ PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+		{ PORT100_IN_PROT_END,                     0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCF_T3T] = {
+		/* nfc_digital_framing_nfcf */
+		{ PORT100_IN_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
+		/* nfc_digital_framing_nfcf */
+		{ PORT100_IN_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
+		{ PORT100_IN_PROT_END, 0 },
+	},
+};
+
+static struct port100_protocol
+tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
+	[NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
+		{ PORT100_TG_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_STANDARD] = {
+		{ PORT100_TG_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = {
+		{ PORT100_TG_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_T1T] = {
+		{ PORT100_TG_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_T2T] = {
+		{ PORT100_TG_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
+		{ PORT100_TG_PROT_TU,     1 },
+		{ PORT100_TG_PROT_RF_OFF, 0 },
+		{ PORT100_TG_PROT_CRM,    7 },
+		{ PORT100_TG_PROT_END,    0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCF] = {
+		{ PORT100_TG_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCF_T3T] = {
+		{ PORT100_TG_PROT_END, 0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
+		{ PORT100_TG_PROT_TU,     1 },
+		{ PORT100_TG_PROT_RF_OFF, 0 },
+		{ PORT100_TG_PROT_CRM,    7 },
+		{ PORT100_TG_PROT_END,    0 },
+	},
+	[NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
+		{ PORT100_TG_PROT_RF_OFF, 1 },
+		{ PORT100_TG_PROT_END,    0 },
+	},
+};
+
+struct port100 {
+	struct nfc_digital_dev *nfc_digital_dev;
+
+	int skb_headroom;
+	int skb_tailroom;
+
+	struct usb_device *udev;
+	struct usb_interface *interface;
+
+	struct urb *out_urb;
+	struct urb *in_urb;
+
+	struct work_struct cmd_complete_work;
+
+	u8 cmd_type;
+
+	/* The digital stack serializes commands to be sent. There is no need
+	 * for any queuing/locking mechanism at driver level.
+	 */
+	struct port100_cmd *cmd;
+};
+
+struct port100_cmd {
+	u8 code;
+	int status;
+	struct sk_buff *req;
+	struct sk_buff *resp;
+	int resp_len;
+	port100_send_async_complete_t  complete_cb;
+	void *complete_cb_context;
+};
+
+struct port100_frame {
+	u8 preamble;
+	__be16 start_frame;
+	__be16 extended_frame;
+	__le16 datalen;
+	u8 datalen_checksum;
+	u8 data[];
+} __packed;
+
+struct port100_ack_frame {
+	u8 preamble;
+	__be16 start_frame;
+	__be16 ack_frame;
+	u8 postambule;
+} __packed;
+
+struct port100_cb_arg {
+	nfc_digital_cmd_complete_t complete_cb;
+	void *complete_arg;
+	u8 mdaa;
+};
+
+struct port100_tg_comm_rf_cmd {
+	__le16 guard_time;
+	__le16 send_timeout;
+	u8 mdaa;
+	u8 nfca_param[6];
+	u8 nfcf_param[18];
+	u8 mf_halted;
+	u8 arae_flag;
+	__le16 recv_timeout;
+	u8 data[];
+} __packed;
+
+struct port100_tg_comm_rf_res {
+	u8 comm_type;
+	u8 ar_status;
+	u8 target_activated;
+	__le32 status;
+	u8 data[];
+} __packed;
+
+/* The rule: value + checksum = 0 */
+static inline u8 port100_checksum(u16 value)
+{
+	return ~(((u8 *)&value)[0] + ((u8 *)&value)[1]) + 1;
+}
+
+/* The rule: sum(data elements) + checksum = 0 */
+static u8 port100_data_checksum(u8 *data, int datalen)
+{
+	u8 sum = 0;
+	int i;
+
+	for (i = 0; i < datalen; i++)
+		sum += data[i];
+
+	return port100_checksum(sum);
+}
+
+static void port100_tx_frame_init(void *_frame, u8 cmd_code)
+{
+	struct port100_frame *frame = _frame;
+
+	frame->preamble = 0;
+	frame->start_frame = cpu_to_be16(PORT100_FRAME_SOF);
+	frame->extended_frame = cpu_to_be16(PORT100_FRAME_EXT);
+	PORT100_FRAME_DIRECTION(frame) = PORT100_FRAME_DIR_OUT;
+	PORT100_FRAME_CMD(frame) = cmd_code;
+	frame->datalen = cpu_to_le16(2);
+}
+
+static void port100_tx_frame_finish(void *_frame)
+{
+	struct port100_frame *frame = _frame;
+
+	frame->datalen_checksum = port100_checksum(le16_to_cpu(frame->datalen));
+
+	PORT100_FRAME_CHECKSUM(frame) =
+		port100_data_checksum(frame->data, le16_to_cpu(frame->datalen));
+
+	PORT100_FRAME_POSTAMBLE(frame) = 0;
+}
+
+static void port100_tx_update_payload_len(void *_frame, int len)
+{
+	struct port100_frame *frame = _frame;
+
+	frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
+}
+
+static bool port100_rx_frame_is_valid(void *_frame)
+{
+	u8 checksum;
+	struct port100_frame *frame = _frame;
+
+	if (frame->start_frame != cpu_to_be16(PORT100_FRAME_SOF) ||
+	    frame->extended_frame != cpu_to_be16(PORT100_FRAME_EXT))
+		return false;
+
+	checksum = port100_checksum(le16_to_cpu(frame->datalen));
+	if (checksum != frame->datalen_checksum)
+		return false;
+
+	checksum = port100_data_checksum(frame->data,
+					 le16_to_cpu(frame->datalen));
+	if (checksum != PORT100_FRAME_CHECKSUM(frame))
+		return false;
+
+	return true;
+}
+
+static bool port100_rx_frame_is_ack(struct port100_ack_frame *frame)
+{
+	return (frame->start_frame == cpu_to_be16(PORT100_FRAME_SOF) &&
+		frame->ack_frame == cpu_to_be16(PORT100_FRAME_ACK));
+}
+
+static inline int port100_rx_frame_size(void *frame)
+{
+	struct port100_frame *f = frame;
+
+	return sizeof(struct port100_frame) + le16_to_cpu(f->datalen) +
+	       PORT100_FRAME_TAIL_LEN;
+}
+
+static bool port100_rx_frame_is_cmd_response(struct port100 *dev, void *frame)
+{
+	struct port100_frame *f = frame;
+
+	return (PORT100_FRAME_CMD(f) == PORT100_CMD_RESPONSE(dev->cmd->code));
+}
+
+static void port100_recv_response(struct urb *urb)
+{
+	struct port100 *dev = urb->context;
+	struct port100_cmd *cmd = dev->cmd;
+	u8 *in_frame;
+
+	cmd->status = urb->status;
+
+	switch (urb->status) {
+	case 0:
+		break; /* success */
+	case -ECONNRESET:
+	case -ENOENT:
+		nfc_err(&dev->interface->dev,
+			"The urb has been canceled (status %d)", urb->status);
+		goto sched_wq;
+	case -ESHUTDOWN:
+	default:
+		nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+			urb->status);
+		goto sched_wq;
+	}
+
+	in_frame = dev->in_urb->transfer_buffer;
+
+	if (!port100_rx_frame_is_valid(in_frame)) {
+		nfc_err(&dev->interface->dev, "Received an invalid frame");
+		cmd->status = -EIO;
+		goto sched_wq;
+	}
+
+	print_hex_dump_debug("PORT100 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
+			     port100_rx_frame_size(in_frame), false);
+
+	if (!port100_rx_frame_is_cmd_response(dev, in_frame)) {
+		nfc_err(&dev->interface->dev,
+			"It's not the response to the last command");
+		cmd->status = -EIO;
+		goto sched_wq;
+	}
+
+sched_wq:
+	schedule_work(&dev->cmd_complete_work);
+}
+
+static int port100_submit_urb_for_response(struct port100 *dev, gfp_t flags)
+{
+	dev->in_urb->complete = port100_recv_response;
+
+	return usb_submit_urb(dev->in_urb, flags);
+}
+
+static void port100_recv_ack(struct urb *urb)
+{
+	struct port100 *dev = urb->context;
+	struct port100_cmd *cmd = dev->cmd;
+	struct port100_ack_frame *in_frame;
+	int rc;
+
+	cmd->status = urb->status;
+
+	switch (urb->status) {
+	case 0:
+		break; /* success */
+	case -ECONNRESET:
+	case -ENOENT:
+		nfc_err(&dev->interface->dev,
+			"The urb has been stopped (status %d)", urb->status);
+		goto sched_wq;
+	case -ESHUTDOWN:
+	default:
+		nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+			urb->status);
+		goto sched_wq;
+	}
+
+	in_frame = dev->in_urb->transfer_buffer;
+
+	if (!port100_rx_frame_is_ack(in_frame)) {
+		nfc_err(&dev->interface->dev, "Received an invalid ack");
+		cmd->status = -EIO;
+		goto sched_wq;
+	}
+
+	rc = port100_submit_urb_for_response(dev, GFP_ATOMIC);
+	if (rc) {
+		nfc_err(&dev->interface->dev,
+			"usb_submit_urb failed with result %d", rc);
+		cmd->status = rc;
+		goto sched_wq;
+	}
+
+	return;
+
+sched_wq:
+	schedule_work(&dev->cmd_complete_work);
+}
+
+static int port100_submit_urb_for_ack(struct port100 *dev, gfp_t flags)
+{
+	dev->in_urb->complete = port100_recv_ack;
+
+	return usb_submit_urb(dev->in_urb, flags);
+}
+
+static int port100_send_ack(struct port100 *dev)
+{
+	int rc;
+
+	dev->out_urb->transfer_buffer = ack_frame;
+	dev->out_urb->transfer_buffer_length = sizeof(ack_frame);
+	rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+
+	return rc;
+}
+
+static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
+				    struct sk_buff *in, int in_len)
+{
+	int rc;
+
+	dev->out_urb->transfer_buffer = out->data;
+	dev->out_urb->transfer_buffer_length = out->len;
+
+	dev->in_urb->transfer_buffer = in->data;
+	dev->in_urb->transfer_buffer_length = in_len;
+
+	print_hex_dump_debug("PORT100 TX: ", DUMP_PREFIX_NONE, 16, 1,
+			     out->data, out->len, false);
+
+	rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+	if (rc)
+		return rc;
+
+	rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
+	if (rc)
+		goto error;
+
+	return 0;
+
+error:
+	usb_unlink_urb(dev->out_urb);
+	return rc;
+}
+
+static void port100_build_cmd_frame(struct port100 *dev, u8 cmd_code,
+				    struct sk_buff *skb)
+{
+	/* payload is already there, just update datalen */
+	int payload_len = skb->len;
+
+	skb_push(skb, PORT100_FRAME_HEADER_LEN);
+	skb_put(skb, PORT100_FRAME_TAIL_LEN);
+
+	port100_tx_frame_init(skb->data, cmd_code);
+	port100_tx_update_payload_len(skb->data, payload_len);
+	port100_tx_frame_finish(skb->data);
+}
+
+static void port100_send_async_complete(struct port100 *dev)
+{
+	struct port100_cmd *cmd = dev->cmd;
+	int status = cmd->status;
+
+	struct sk_buff *req = cmd->req;
+	struct sk_buff *resp = cmd->resp;
+
+	dev_kfree_skb(req);
+
+	dev->cmd = NULL;
+
+	if (status < 0) {
+		cmd->complete_cb(dev, cmd->complete_cb_context,
+				 ERR_PTR(status));
+		dev_kfree_skb(resp);
+		goto done;
+	}
+
+	skb_put(resp, port100_rx_frame_size(resp->data));
+	skb_pull(resp, PORT100_FRAME_HEADER_LEN);
+	skb_trim(resp, resp->len - PORT100_FRAME_TAIL_LEN);
+
+	cmd->complete_cb(dev, cmd->complete_cb_context, resp);
+
+done:
+	kfree(cmd);
+}
+
+static int port100_send_cmd_async(struct port100 *dev, u8 cmd_code,
+				struct sk_buff *req,
+				port100_send_async_complete_t complete_cb,
+				void *complete_cb_context)
+{
+	struct port100_cmd *cmd;
+	struct sk_buff *resp;
+	int rc;
+	int  resp_len = PORT100_FRAME_HEADER_LEN +
+			PORT100_FRAME_MAX_PAYLOAD_LEN +
+			PORT100_FRAME_TAIL_LEN;
+
+	resp = alloc_skb(resp_len, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd) {
+		dev_kfree_skb(resp);
+		return -ENOMEM;
+	}
+
+	cmd->code = cmd_code;
+	cmd->req = req;
+	cmd->resp = resp;
+	cmd->resp_len = resp_len;
+	cmd->complete_cb = complete_cb;
+	cmd->complete_cb_context = complete_cb_context;
+
+	port100_build_cmd_frame(dev, cmd_code, req);
+
+	dev->cmd = cmd;
+
+	rc = port100_send_frame_async(dev, req, resp, resp_len);
+	if (rc) {
+		kfree(cmd);
+		dev_kfree_skb(resp);
+		dev->cmd = NULL;
+	}
+
+	return rc;
+}
+
+struct port100_sync_cmd_response {
+	struct sk_buff *resp;
+	struct completion done;
+};
+
+static void port100_wq_cmd_complete(struct work_struct *work)
+{
+	struct port100 *dev = container_of(work, struct port100,
+					   cmd_complete_work);
+
+	port100_send_async_complete(dev);
+}
+
+static void port100_send_sync_complete(struct port100 *dev, void *_arg,
+				      struct sk_buff *resp)
+{
+	struct port100_sync_cmd_response *arg = _arg;
+
+	arg->resp = resp;
+	complete(&arg->done);
+}
+
+static struct sk_buff *port100_send_cmd_sync(struct port100 *dev, u8 cmd_code,
+					     struct sk_buff *req)
+{
+	int rc;
+	struct port100_sync_cmd_response arg;
+
+	init_completion(&arg.done);
+
+	rc = port100_send_cmd_async(dev, cmd_code, req,
+				    port100_send_sync_complete, &arg);
+	if (rc) {
+		dev_kfree_skb(req);
+		return ERR_PTR(rc);
+	}
+
+	wait_for_completion(&arg.done);
+
+	return arg.resp;
+}
+
+static void port100_send_complete(struct urb *urb)
+{
+	struct port100 *dev = urb->context;
+
+	switch (urb->status) {
+	case 0:
+		break; /* success */
+	case -ECONNRESET:
+	case -ENOENT:
+		nfc_err(&dev->interface->dev,
+			"The urb has been stopped (status %d)", urb->status);
+		break;
+	case -ESHUTDOWN:
+	default:
+		nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+			urb->status);
+	}
+}
+
+static void port100_abort_cmd(struct nfc_digital_dev *ddev)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+
+	/* An ack will cancel the last issued command */
+	port100_send_ack(dev);
+
+	/* cancel the urb request */
+	usb_kill_urb(dev->in_urb);
+}
+
+static struct sk_buff *port100_alloc_skb(struct port100 *dev, unsigned int size)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(dev->skb_headroom + dev->skb_tailroom + size,
+			GFP_KERNEL);
+	if (skb)
+		skb_reserve(skb, dev->skb_headroom);
+
+	return skb;
+}
+
+static int port100_set_command_type(struct port100 *dev, u8 command_type)
+{
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+	int rc;
+
+	skb = port100_alloc_skb(dev, 1);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, sizeof(u8)) = command_type;
+
+	resp = port100_send_cmd_sync(dev, PORT100_CMD_SET_COMMAND_TYPE, skb);
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	rc = resp->data[0];
+
+	dev_kfree_skb(resp);
+
+	return rc;
+}
+
+static u64 port100_get_command_type_mask(struct port100 *dev)
+{
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+	u64 mask;
+
+	skb = port100_alloc_skb(dev, 0);
+	if (!skb)
+		return -ENOMEM;
+
+	resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	if (resp->len < 8)
+		mask = 0;
+	else
+		mask = be64_to_cpu(*(__be64 *)resp->data);
+
+	dev_kfree_skb(resp);
+
+	return mask;
+}
+
+static u16 port100_get_firmware_version(struct port100 *dev)
+{
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+	u16 fw_ver;
+
+	skb = port100_alloc_skb(dev, 0);
+	if (!skb)
+		return 0;
+
+	resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_FIRMWARE_VERSION,
+				     skb);
+	if (IS_ERR(resp))
+		return 0;
+
+	fw_ver = le16_to_cpu(*(__le16 *)resp->data);
+
+	dev_kfree_skb(resp);
+
+	return fw_ver;
+}
+
+static int port100_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct sk_buff *skb, *resp;
+
+	skb = port100_alloc_skb(dev, 1);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = on ? 1 : 0;
+
+	resp = port100_send_cmd_sync(dev, PORT100_CMD_SWITCH_RF, skb);
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	dev_kfree_skb(resp);
+
+	return 0;
+}
+
+static int port100_in_set_rf(struct nfc_digital_dev *ddev, u8 rf)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+	int rc;
+
+	if (rf >= NFC_DIGITAL_RF_TECH_LAST)
+		return -EINVAL;
+
+	skb = port100_alloc_skb(dev, sizeof(struct port100_in_rf_setting));
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb_put(skb, sizeof(struct port100_in_rf_setting)),
+	       &in_rf_settings[rf],
+	       sizeof(struct port100_in_rf_setting));
+
+	resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_RF, skb);
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	rc = resp->data[0];
+
+	dev_kfree_skb(resp);
+
+	return rc;
+}
+
+static int port100_in_set_framing(struct nfc_digital_dev *ddev, int param)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct port100_protocol *protocols;
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+	int num_protocols;
+	size_t size;
+	int rc;
+
+	if (param >= NFC_DIGITAL_FRAMING_LAST)
+		return -EINVAL;
+
+	protocols = in_protocols[param];
+
+	num_protocols = 0;
+	while (protocols[num_protocols].number != PORT100_IN_PROT_END)
+		num_protocols++;
+
+	if (!num_protocols)
+		return 0;
+
+	size = sizeof(struct port100_protocol) * num_protocols;
+
+	skb = port100_alloc_skb(dev, size);
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb_put(skb, size), protocols, size);
+
+	resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_PROTOCOL, skb);
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	rc = resp->data[0];
+
+	dev_kfree_skb(resp);
+
+	return rc;
+}
+
+static int port100_in_configure_hw(struct nfc_digital_dev *ddev, int type,
+				   int param)
+{
+	if (type == NFC_DIGITAL_CONFIG_RF_TECH)
+		return port100_in_set_rf(ddev, param);
+
+	if (type == NFC_DIGITAL_CONFIG_FRAMING)
+		return port100_in_set_framing(ddev, param);
+
+	return -EINVAL;
+}
+
+static void port100_in_comm_rf_complete(struct port100 *dev, void *arg,
+				       struct sk_buff *resp)
+{
+	struct port100_cb_arg *cb_arg = arg;
+	nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
+	u32 status;
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc =  PTR_ERR(resp);
+		goto exit;
+	}
+
+	if (resp->len < 4) {
+		nfc_err(&dev->interface->dev,
+			"Invalid packet length received.\n");
+		rc = -EIO;
+		goto error;
+	}
+
+	status = le32_to_cpu(*(__le32 *)resp->data);
+
+	skb_pull(resp, sizeof(u32));
+
+	if (status == PORT100_CMD_STATUS_TIMEOUT) {
+		rc = -ETIMEDOUT;
+		goto error;
+	}
+
+	if (status != PORT100_CMD_STATUS_OK) {
+		nfc_err(&dev->interface->dev,
+			"in_comm_rf failed with status 0x%08x\n", status);
+		rc = -EIO;
+		goto error;
+	}
+
+	/* Remove collision bits byte */
+	skb_pull(resp, 1);
+
+	goto exit;
+
+error:
+	kfree_skb(resp);
+	resp = ERR_PTR(rc);
+
+exit:
+	cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
+
+	kfree(cb_arg);
+}
+
+static int port100_in_send_cmd(struct nfc_digital_dev *ddev,
+			       struct sk_buff *skb, u16 _timeout,
+			       nfc_digital_cmd_complete_t cb, void *arg)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct port100_cb_arg *cb_arg;
+	__le16 timeout;
+
+	cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+	if (!cb_arg)
+		return -ENOMEM;
+
+	cb_arg->complete_cb = cb;
+	cb_arg->complete_arg = arg;
+
+	timeout = cpu_to_le16(_timeout * 10);
+
+	memcpy(skb_push(skb, sizeof(__le16)), &timeout, sizeof(__le16));
+
+	return port100_send_cmd_async(dev, PORT100_CMD_IN_COMM_RF, skb,
+				      port100_in_comm_rf_complete, cb_arg);
+}
+
+static int port100_tg_set_rf(struct nfc_digital_dev *ddev, u8 rf)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+	int rc;
+
+	if (rf >= NFC_DIGITAL_RF_TECH_LAST)
+		return -EINVAL;
+
+	skb = port100_alloc_skb(dev, sizeof(struct port100_tg_rf_setting));
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb_put(skb, sizeof(struct port100_tg_rf_setting)),
+	       &tg_rf_settings[rf],
+	       sizeof(struct port100_tg_rf_setting));
+
+	resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_RF, skb);
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	rc = resp->data[0];
+
+	dev_kfree_skb(resp);
+
+	return rc;
+}
+
+static int port100_tg_set_framing(struct nfc_digital_dev *ddev, int param)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct port100_protocol *protocols;
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+	int rc;
+	int num_protocols;
+	size_t size;
+
+	if (param >= NFC_DIGITAL_FRAMING_LAST)
+		return -EINVAL;
+
+	protocols = tg_protocols[param];
+
+	num_protocols = 0;
+	while (protocols[num_protocols].number != PORT100_TG_PROT_END)
+		num_protocols++;
+
+	if (!num_protocols)
+		return 0;
+
+	size = sizeof(struct port100_protocol) * num_protocols;
+
+	skb = port100_alloc_skb(dev, size);
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb_put(skb, size), protocols, size);
+
+	resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_PROTOCOL, skb);
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	rc = resp->data[0];
+
+	dev_kfree_skb(resp);
+
+	return rc;
+}
+
+static int port100_tg_configure_hw(struct nfc_digital_dev *ddev, int type,
+				   int param)
+{
+	if (type == NFC_DIGITAL_CONFIG_RF_TECH)
+		return port100_tg_set_rf(ddev, param);
+
+	if (type == NFC_DIGITAL_CONFIG_FRAMING)
+		return port100_tg_set_framing(ddev, param);
+
+	return -EINVAL;
+}
+
+static bool port100_tg_target_activated(struct port100 *dev, u8 tgt_activated)
+{
+	u8 mask;
+
+	switch (dev->cmd_type) {
+	case PORT100_CMD_TYPE_0:
+		mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK;
+		break;
+	case PORT100_CMD_TYPE_1:
+		mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK |
+		       PORT100_MDAA_TGT_WAS_ACTIVATED_MASK;
+		break;
+	default:
+		nfc_err(&dev->interface->dev, "Unknonwn command type.\n");
+		return false;
+	}
+
+	return ((tgt_activated & mask) == mask);
+}
+
+static void port100_tg_comm_rf_complete(struct port100 *dev, void *arg,
+					struct sk_buff *resp)
+{
+	u32 status;
+	struct port100_cb_arg *cb_arg = arg;
+	nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
+	struct port100_tg_comm_rf_res *hdr;
+
+	if (IS_ERR(resp))
+		goto exit;
+
+	hdr = (struct port100_tg_comm_rf_res *)resp->data;
+
+	status = le32_to_cpu(hdr->status);
+
+	if (cb_arg->mdaa &&
+	    !port100_tg_target_activated(dev, hdr->target_activated)) {
+		kfree_skb(resp);
+		resp = ERR_PTR(-ETIMEDOUT);
+
+		goto exit;
+	}
+
+	skb_pull(resp, sizeof(struct port100_tg_comm_rf_res));
+
+	if (status != PORT100_CMD_STATUS_OK) {
+		kfree_skb(resp);
+
+		if (status == PORT100_CMD_STATUS_TIMEOUT)
+			resp = ERR_PTR(-ETIMEDOUT);
+		else
+			resp = ERR_PTR(-EIO);
+	}
+
+exit:
+	cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
+
+	kfree(cb_arg);
+}
+
+static int port100_tg_send_cmd(struct nfc_digital_dev *ddev,
+			       struct sk_buff *skb, u16 timeout,
+			       nfc_digital_cmd_complete_t cb, void *arg)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct port100_tg_comm_rf_cmd *hdr;
+	struct port100_cb_arg *cb_arg;
+
+	cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+	if (!cb_arg)
+		return -ENOMEM;
+
+	cb_arg->complete_cb = cb;
+	cb_arg->complete_arg = arg;
+
+	skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
+
+	hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
+
+	memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd));
+	hdr->guard_time = cpu_to_le16(500);
+	hdr->send_timeout = cpu_to_le16(0xFFFF);
+	hdr->recv_timeout = cpu_to_le16(timeout);
+
+	return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
+				      port100_tg_comm_rf_complete, cb_arg);
+}
+
+static int port100_listen_mdaa(struct nfc_digital_dev *ddev,
+			       struct digital_tg_mdaa_params *params,
+			       u16 timeout,
+			       nfc_digital_cmd_complete_t cb, void *arg)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct port100_tg_comm_rf_cmd *hdr;
+	struct port100_cb_arg *cb_arg;
+	struct sk_buff *skb;
+	int rc;
+
+	rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+				     NFC_DIGITAL_RF_TECH_106A);
+	if (rc)
+		return rc;
+
+	rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+				     NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+	if (rc)
+		return rc;
+
+	cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+	if (!cb_arg)
+		return -ENOMEM;
+
+	cb_arg->complete_cb = cb;
+	cb_arg->complete_arg = arg;
+	cb_arg->mdaa = 1;
+
+	skb = port100_alloc_skb(dev, 0);
+	if (!skb) {
+		kfree(cb_arg);
+		return -ENOMEM;
+	}
+
+	skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
+	hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
+
+	memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd));
+
+	hdr->guard_time = 0;
+	hdr->send_timeout = cpu_to_le16(0xFFFF);
+	hdr->mdaa = 1;
+	hdr->nfca_param[0] = (params->sens_res >> 8) & 0xFF;
+	hdr->nfca_param[1] = params->sens_res & 0xFF;
+	memcpy(hdr->nfca_param + 2, params->nfcid1, 3);
+	hdr->nfca_param[5] = params->sel_res;
+	memcpy(hdr->nfcf_param, params->nfcid2, 8);
+	hdr->nfcf_param[16] = (params->sc >> 8) & 0xFF;
+	hdr->nfcf_param[17] = params->sc & 0xFF;
+	hdr->recv_timeout = cpu_to_le16(timeout);
+
+	return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
+				      port100_tg_comm_rf_complete, cb_arg);
+}
+
+static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout,
+			  nfc_digital_cmd_complete_t cb, void *arg)
+{
+	struct port100 *dev = nfc_digital_get_drvdata(ddev);
+	struct sk_buff *skb;
+
+	skb = port100_alloc_skb(dev, 0);
+	if (!skb)
+		return -ENOMEM;
+
+	return port100_tg_send_cmd(ddev, skb, timeout, cb, arg);
+}
+
+static struct nfc_digital_ops port100_digital_ops = {
+	.in_configure_hw = port100_in_configure_hw,
+	.in_send_cmd = port100_in_send_cmd,
+
+	.tg_listen_mdaa = port100_listen_mdaa,
+	.tg_listen = port100_listen,
+	.tg_configure_hw = port100_tg_configure_hw,
+	.tg_send_cmd = port100_tg_send_cmd,
+
+	.switch_rf = port100_switch_rf,
+	.abort_cmd = port100_abort_cmd,
+};
+
+static const struct usb_device_id port100_table[] = {
+	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE,
+	  .idVendor		= SONY_VENDOR_ID,
+	  .idProduct		= RCS380_PRODUCT_ID,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(usb, port100_table);
+
+static int port100_probe(struct usb_interface *interface,
+			 const struct usb_device_id *id)
+{
+	struct port100 *dev;
+	int rc;
+	struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint;
+	int in_endpoint;
+	int out_endpoint;
+	u16 fw_version;
+	u64 cmd_type_mask;
+	int i;
+
+	dev = devm_kzalloc(&interface->dev, sizeof(struct port100), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->udev = usb_get_dev(interface_to_usbdev(interface));
+	dev->interface = interface;
+	usb_set_intfdata(interface, dev);
+
+	in_endpoint = out_endpoint = 0;
+	iface_desc = interface->cur_altsetting;
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+		endpoint = &iface_desc->endpoint[i].desc;
+
+		if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
+			in_endpoint = endpoint->bEndpointAddress;
+
+		if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
+			out_endpoint = endpoint->bEndpointAddress;
+	}
+
+	if (!in_endpoint || !out_endpoint) {
+		nfc_err(&interface->dev,
+			"Could not find bulk-in or bulk-out endpoint\n");
+		rc = -ENODEV;
+		goto error;
+	}
+
+	dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+	dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+	if (!dev->in_urb || !dev->out_urb) {
+		nfc_err(&interface->dev, "Could not allocate USB URBs\n");
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	usb_fill_bulk_urb(dev->in_urb, dev->udev,
+			  usb_rcvbulkpipe(dev->udev, in_endpoint),
+			  NULL, 0, NULL, dev);
+	usb_fill_bulk_urb(dev->out_urb, dev->udev,
+			  usb_sndbulkpipe(dev->udev, out_endpoint),
+			  NULL, 0, port100_send_complete, dev);
+
+	dev->skb_headroom = PORT100_FRAME_HEADER_LEN +
+			    PORT100_COMM_RF_HEAD_MAX_LEN;
+	dev->skb_tailroom = PORT100_FRAME_TAIL_LEN;
+
+	INIT_WORK(&dev->cmd_complete_work, port100_wq_cmd_complete);
+
+	/* The first thing to do with the Port-100 is to set the command type
+	 * to be used. If supported we use command type 1. 0 otherwise.
+	 */
+	cmd_type_mask = port100_get_command_type_mask(dev);
+	if (!cmd_type_mask) {
+		nfc_err(&interface->dev,
+			"Could not get supported command types.\n");
+		rc = -ENODEV;
+		goto error;
+	}
+
+	if (PORT100_CMD_TYPE_IS_SUPPORTED(cmd_type_mask, PORT100_CMD_TYPE_1))
+		dev->cmd_type = PORT100_CMD_TYPE_1;
+	else
+		dev->cmd_type = PORT100_CMD_TYPE_0;
+
+	rc = port100_set_command_type(dev, dev->cmd_type);
+	if (rc) {
+		nfc_err(&interface->dev,
+			"The device does not support command type %u.\n",
+			dev->cmd_type);
+		goto error;
+	}
+
+	fw_version = port100_get_firmware_version(dev);
+	if (!fw_version)
+		nfc_err(&interface->dev,
+			"Could not get device firmware version.\n");
+
+	nfc_info(&interface->dev,
+		 "Sony NFC Port-100 Series attached (firmware v%x.%02x)\n",
+		 (fw_version & 0xFF00) >> 8, fw_version & 0xFF);
+
+	dev->nfc_digital_dev = nfc_digital_allocate_device(&port100_digital_ops,
+							   PORT100_PROTOCOLS,
+							   PORT100_CAPABILITIES,
+							   dev->skb_headroom,
+							   dev->skb_tailroom);
+	if (!dev->nfc_digital_dev) {
+		nfc_err(&interface->dev,
+			"Could not allocate nfc_digital_dev.\n");
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	nfc_digital_set_parent_dev(dev->nfc_digital_dev, &interface->dev);
+	nfc_digital_set_drvdata(dev->nfc_digital_dev, dev);
+
+	rc = nfc_digital_register_device(dev->nfc_digital_dev);
+	if (rc) {
+		nfc_err(&interface->dev,
+			"Could not register digital device.\n");
+		goto free_nfc_dev;
+	}
+
+	return 0;
+
+free_nfc_dev:
+	nfc_digital_free_device(dev->nfc_digital_dev);
+
+error:
+	usb_free_urb(dev->in_urb);
+	usb_free_urb(dev->out_urb);
+	usb_put_dev(dev->udev);
+
+	return rc;
+}
+
+static void port100_disconnect(struct usb_interface *interface)
+{
+	struct port100 *dev;
+
+	dev = usb_get_intfdata(interface);
+	usb_set_intfdata(interface, NULL);
+
+	nfc_digital_unregister_device(dev->nfc_digital_dev);
+	nfc_digital_free_device(dev->nfc_digital_dev);
+
+	usb_kill_urb(dev->in_urb);
+	usb_kill_urb(dev->out_urb);
+
+	usb_free_urb(dev->in_urb);
+	usb_free_urb(dev->out_urb);
+
+	kfree(dev->cmd);
+
+	nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected");
+}
+
+static struct usb_driver port100_driver = {
+	.name =		"port100",
+	.probe =	port100_probe,
+	.disconnect =	port100_disconnect,
+	.id_table =	port100_table,
+};
+
+module_usb_driver(port100_driver);
+
+MODULE_DESCRIPTION("NFC Port-100 series usb driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index fce088e..404d1da 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -282,9 +282,6 @@
 	else
 		of_device_make_bus_id(&dev->dev);
 
-	/* setup amba-specific device info */
-	dev->dma_mask = ~0;
-
 	/* Allow the HW Peripheral ID to be overridden */
 	prop = of_get_property(node, "arm,primecell-periphid", NULL);
 	if (prop)
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index dc82ef0..2872ece 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -31,15 +31,18 @@
 
 	  If unsure, say Y.
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+	bool
+	help
+	  Select this config option from the architecture Kconfig if
+	  the architecture might have PC parallel port hardware.
+
 if PARPORT
 
 config PARPORT_PC
 	tristate "PC-style hardware"
-	depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
-		(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
-		!XTENSA && !CRIS
-
-	---help---
+	depends on ARCH_MIGHT_HAVE_PC_PARPORT
+	help
 	  You should say Y here if you have a PC-style parallel port. All
 	  IBM PC compatible computers and some Alphas have PC-style
 	  parallel ports.  PA-RISC owners should only say Y here if they
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index d471627..c864f82 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -1331,7 +1331,7 @@
 			break;
 
 		/* Initialize mutex used to take interrupts into account */
-		INIT_COMPLETION(priv->irq_complete);
+		reinit_completion(&priv->irq_complete);
 
 		/* Enable serviceIntr */
 		parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
@@ -1446,7 +1446,7 @@
 	priv->irq_mode = PARPORT_IP32_IRQ_HERE;
 
 	parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
-	INIT_COMPLETION(priv->irq_complete);
+	reinit_completion(&priv->irq_complete);
 	parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
 
 	nfault_timeout = min((unsigned long)physport->cad->timeout,
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 903e128..9637615 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2004,6 +2004,7 @@
 	struct resource	*ECR_res = NULL;
 	struct resource	*EPP_res = NULL;
 	struct platform_device *pdev = NULL;
+	int ret;
 
 	if (!dev) {
 		/* We need a physical device to attach to, but none was
@@ -2014,8 +2015,11 @@
 			return NULL;
 		dev = &pdev->dev;
 
-		dev->coherent_dma_mask = DMA_BIT_MASK(24);
-		dev->dma_mask = &dev->coherent_dma_mask;
+		ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
+		if (ret) {
+			dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
+			dma = PARPORT_DMA_NONE;
+		}
 	}
 
 	ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 43186fe..47d46c6 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -15,8 +15,22 @@
 	select PCIEPORTBUS
 	select PCIE_DW
 
+config PCI_IMX6
+	bool "Freescale i.MX6 PCIe controller"
+	depends on SOC_IMX6Q
+	select PCIEPORTBUS
+	select PCIE_DW
+
 config PCI_TEGRA
 	bool "NVIDIA Tegra PCIe controller"
 	depends on ARCH_TEGRA
 
+config PCI_RCAR_GEN2
+	bool "Renesas R-Car Gen2 Internal PCI controller"
+	depends on ARM && (ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST)
+	help
+	  Say Y here if you want internal PCI support on R-Car Gen2 SoC.
+	  There are 3 internal PCI controllers available with a single
+	  built-in EHCI/OHCI host controller present on each one.
+
 endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index c9a997b..13fb333 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,4 +1,6 @@
 obj-$(CONFIG_PCIE_DW) += pcie-designware.o
 obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 94e096b..24beed3 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -48,6 +48,7 @@
 #define PCIE_IRQ_SPECIAL		0x008
 #define PCIE_IRQ_EN_PULSE		0x00c
 #define PCIE_IRQ_EN_LEVEL		0x010
+#define IRQ_MSI_ENABLE			(0x1 << 2)
 #define PCIE_IRQ_EN_SPECIAL		0x014
 #define PCIE_PWR_RESET			0x018
 #define PCIE_CORE_RESET			0x01c
@@ -77,18 +78,28 @@
 #define PCIE_PHY_PLL_BIAS		0x00c
 #define PCIE_PHY_DCC_FEEDBACK		0x014
 #define PCIE_PHY_PLL_DIV_1		0x05c
+#define PCIE_PHY_COMMON_POWER		0x064
+#define PCIE_PHY_COMMON_PD_CMN		(0x1 << 3)
 #define PCIE_PHY_TRSV0_EMP_LVL		0x084
 #define PCIE_PHY_TRSV0_DRV_LVL		0x088
 #define PCIE_PHY_TRSV0_RXCDR		0x0ac
+#define PCIE_PHY_TRSV0_POWER		0x0c4
+#define PCIE_PHY_TRSV0_PD_TSV		(0x1 << 7)
 #define PCIE_PHY_TRSV0_LVCC		0x0dc
 #define PCIE_PHY_TRSV1_EMP_LVL		0x144
 #define PCIE_PHY_TRSV1_RXCDR		0x16c
+#define PCIE_PHY_TRSV1_POWER		0x184
+#define PCIE_PHY_TRSV1_PD_TSV		(0x1 << 7)
 #define PCIE_PHY_TRSV1_LVCC		0x19c
 #define PCIE_PHY_TRSV2_EMP_LVL		0x204
 #define PCIE_PHY_TRSV2_RXCDR		0x22c
+#define PCIE_PHY_TRSV2_POWER		0x244
+#define PCIE_PHY_TRSV2_PD_TSV		(0x1 << 7)
 #define PCIE_PHY_TRSV2_LVCC		0x25c
 #define PCIE_PHY_TRSV3_EMP_LVL		0x2c4
 #define PCIE_PHY_TRSV3_RXCDR		0x2ec
+#define PCIE_PHY_TRSV3_POWER		0x304
+#define PCIE_PHY_TRSV3_PD_TSV		(0x1 << 7)
 #define PCIE_PHY_TRSV3_LVCC		0x31c
 
 static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
@@ -202,6 +213,58 @@
 	exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
 }
 
+static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+{
+	u32 val;
+	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+	val &= ~PCIE_PHY_COMMON_PD_CMN;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+	val &= ~PCIE_PHY_TRSV0_PD_TSV;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+	val &= ~PCIE_PHY_TRSV1_PD_TSV;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+	val &= ~PCIE_PHY_TRSV2_PD_TSV;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+	val &= ~PCIE_PHY_TRSV3_PD_TSV;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+{
+	u32 val;
+	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+	val |= PCIE_PHY_COMMON_PD_CMN;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+	val |= PCIE_PHY_TRSV0_PD_TSV;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+	val |= PCIE_PHY_TRSV1_PD_TSV;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+	val |= PCIE_PHY_TRSV2_PD_TSV;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+	val |= PCIE_PHY_TRSV3_PD_TSV;
+	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
 static void exynos_pcie_init_phy(struct pcie_port *pp)
 {
 	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
@@ -270,6 +333,9 @@
 	/* de-assert phy reset */
 	exynos_pcie_deassert_phy_reset(pp);
 
+	/* power on phy */
+	exynos_pcie_power_on_phy(pp);
+
 	/* initialize phy */
 	exynos_pcie_init_phy(pp);
 
@@ -302,6 +368,9 @@
 						       PCIE_PHY_PLL_LOCKED);
 				dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
 			}
+			/* power off phy */
+			exynos_pcie_power_off_phy(pp);
+
 			dev_err(pp->dev, "PCIe Link Fail\n");
 			return -EINVAL;
 		}
@@ -342,9 +411,36 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
+{
+	struct pcie_port *pp = arg;
+
+	dw_handle_msi_irq(pp);
+
+	return IRQ_HANDLED;
+}
+
+static void exynos_pcie_msi_init(struct pcie_port *pp)
+{
+	u32 val;
+	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+	dw_pcie_msi_init(pp);
+
+	/* enable MSI interrupt */
+	val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
+	val |= IRQ_MSI_ENABLE;
+	exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
+	return;
+}
+
 static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
 {
 	exynos_pcie_enable_irq_pulse(pp);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		exynos_pcie_msi_init(pp);
+
 	return;
 }
 
@@ -430,6 +526,22 @@
 		return ret;
 	}
 
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq(pdev, 0);
+		if (!pp->msi_irq) {
+			dev_err(&pdev->dev, "failed to get msi irq\n");
+			return -ENODEV;
+		}
+
+		ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+					exynos_pcie_msi_irq_handler,
+					IRQF_SHARED, "exynos-pcie", pp);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to request msi irq\n");
+			return ret;
+		}
+	}
+
 	pp->root_bus_nr = -1;
 	pp->ops = &exynos_pcie_host_ops;
 
@@ -487,18 +599,24 @@
 
 	elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
-	if (IS_ERR(exynos_pcie->elbi_base))
-		return PTR_ERR(exynos_pcie->elbi_base);
+	if (IS_ERR(exynos_pcie->elbi_base)) {
+		ret = PTR_ERR(exynos_pcie->elbi_base);
+		goto fail_bus_clk;
+	}
 
 	phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
-	if (IS_ERR(exynos_pcie->phy_base))
-		return PTR_ERR(exynos_pcie->phy_base);
+	if (IS_ERR(exynos_pcie->phy_base)) {
+		ret = PTR_ERR(exynos_pcie->phy_base);
+		goto fail_bus_clk;
+	}
 
 	block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
 	exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
-	if (IS_ERR(exynos_pcie->block_base))
-		return PTR_ERR(exynos_pcie->block_base);
+	if (IS_ERR(exynos_pcie->block_base)) {
+		ret = PTR_ERR(exynos_pcie->block_base);
+		goto fail_bus_clk;
+	}
 
 	ret = add_pcie_port(pp, pdev);
 	if (ret < 0)
@@ -535,7 +653,7 @@
 	.driver = {
 		.name	= "exynos-pcie",
 		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(exynos_pcie_of_match),
+		.of_match_table = exynos_pcie_of_match,
 	},
 };
 
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
new file mode 100644
index 0000000..bd70af8
--- /dev/null
+++ b/drivers/pci/host/pci-imx6.c
@@ -0,0 +1,568 @@
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ *		http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_imx6_pcie(x)	container_of(x, struct imx6_pcie, pp)
+
+struct imx6_pcie {
+	int			reset_gpio;
+	int			power_on_gpio;
+	int			wake_up_gpio;
+	int			disable_gpio;
+	struct clk		*lvds_gate;
+	struct clk		*sata_ref_100m;
+	struct clk		*pcie_ref_125m;
+	struct clk		*pcie_axi;
+	struct pcie_port	pp;
+	struct regmap		*iomuxc_gpr;
+	void __iomem		*mem_base;
+};
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA_LOC 0
+#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
+#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
+#define PCIE_PHY_CTRL_WR_LOC 18
+#define PCIE_PHY_CTRL_RD_LOC 19
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK_LOC 16
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+
+static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+{
+	u32 val;
+	u32 max_iterations = 10;
+	u32 wait_counter = 0;
+
+	do {
+		val = readl(dbi_base + PCIE_PHY_STAT);
+		val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+		wait_counter++;
+
+		if (val == exp_val)
+			return 0;
+
+		udelay(1);
+	} while (wait_counter < max_iterations);
+
+	return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+{
+	u32 val;
+	int ret;
+
+	val = addr << PCIE_PHY_CTRL_DATA_LOC;
+	writel(val, dbi_base + PCIE_PHY_CTRL);
+
+	val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+	writel(val, dbi_base + PCIE_PHY_CTRL);
+
+	ret = pcie_phy_poll_ack(dbi_base, 1);
+	if (ret)
+		return ret;
+
+	val = addr << PCIE_PHY_CTRL_DATA_LOC;
+	writel(val, dbi_base + PCIE_PHY_CTRL);
+
+	ret = pcie_phy_poll_ack(dbi_base, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
+{
+	u32 val, phy_ctl;
+	int ret;
+
+	ret = pcie_phy_wait_ack(dbi_base, addr);
+	if (ret)
+		return ret;
+
+	/* assert Read signal */
+	phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+	writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+
+	ret = pcie_phy_poll_ack(dbi_base, 1);
+	if (ret)
+		return ret;
+
+	val = readl(dbi_base + PCIE_PHY_STAT);
+	*data = val & 0xffff;
+
+	/* deassert Read signal */
+	writel(0x00, dbi_base + PCIE_PHY_CTRL);
+
+	ret = pcie_phy_poll_ack(dbi_base, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+{
+	u32 var;
+	int ret;
+
+	/* write addr */
+	/* cap addr */
+	ret = pcie_phy_wait_ack(dbi_base, addr);
+	if (ret)
+		return ret;
+
+	var = data << PCIE_PHY_CTRL_DATA_LOC;
+	writel(var, dbi_base + PCIE_PHY_CTRL);
+
+	/* capture data */
+	var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+	writel(var, dbi_base + PCIE_PHY_CTRL);
+
+	ret = pcie_phy_poll_ack(dbi_base, 1);
+	if (ret)
+		return ret;
+
+	/* deassert cap data */
+	var = data << PCIE_PHY_CTRL_DATA_LOC;
+	writel(var, dbi_base + PCIE_PHY_CTRL);
+
+	/* wait for ack de-assertion */
+	ret = pcie_phy_poll_ack(dbi_base, 0);
+	if (ret)
+		return ret;
+
+	/* assert wr signal */
+	var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+	writel(var, dbi_base + PCIE_PHY_CTRL);
+
+	/* wait for ack */
+	ret = pcie_phy_poll_ack(dbi_base, 1);
+	if (ret)
+		return ret;
+
+	/* deassert wr signal */
+	var = data << PCIE_PHY_CTRL_DATA_LOC;
+	writel(var, dbi_base + PCIE_PHY_CTRL);
+
+	/* wait for ack de-assertion */
+	ret = pcie_phy_poll_ack(dbi_base, 0);
+	if (ret)
+		return ret;
+
+	writel(0x0, dbi_base + PCIE_PHY_CTRL);
+
+	return 0;
+}
+
+/*  Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+		unsigned int fsr, struct pt_regs *regs)
+{
+	return 0;
+}
+
+static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+{
+	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+			IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+			IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+			IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+	gpio_set_value(imx6_pcie->reset_gpio, 0);
+	msleep(100);
+	gpio_set_value(imx6_pcie->reset_gpio, 1);
+
+	return 0;
+}
+
+static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+	int ret;
+
+	if (gpio_is_valid(imx6_pcie->power_on_gpio))
+		gpio_set_value(imx6_pcie->power_on_gpio, 1);
+
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+			IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+			IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
+	ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
+	if (ret) {
+		dev_err(pp->dev, "unable to enable sata_ref_100m\n");
+		goto err_sata_ref;
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m);
+	if (ret) {
+		dev_err(pp->dev, "unable to enable pcie_ref_125m\n");
+		goto err_pcie_ref;
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+	if (ret) {
+		dev_err(pp->dev, "unable to enable lvds_gate\n");
+		goto err_lvds_gate;
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->pcie_axi);
+	if (ret) {
+		dev_err(pp->dev, "unable to enable pcie_axi\n");
+		goto err_pcie_axi;
+	}
+
+	/* allow the clocks to stabilize */
+	usleep_range(200, 500);
+
+	return 0;
+
+err_pcie_axi:
+	clk_disable_unprepare(imx6_pcie->lvds_gate);
+err_lvds_gate:
+	clk_disable_unprepare(imx6_pcie->pcie_ref_125m);
+err_pcie_ref:
+	clk_disable_unprepare(imx6_pcie->sata_ref_100m);
+err_sata_ref:
+	return ret;
+
+}
+
+static void imx6_pcie_init_phy(struct pcie_port *pp)
+{
+	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+			IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+	/* configure constant input signal to the pcie ctrl and phy */
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+			IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+			IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+			IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+			IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+			IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+			IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+			IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
+}
+
+static void imx6_pcie_host_init(struct pcie_port *pp)
+{
+	int count = 0;
+	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+	imx6_pcie_assert_core_reset(pp);
+
+	imx6_pcie_init_phy(pp);
+
+	imx6_pcie_deassert_core_reset(pp);
+
+	dw_pcie_setup_rc(pp);
+
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+			IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+	while (!dw_pcie_link_up(pp)) {
+		usleep_range(100, 1000);
+		count++;
+		if (count >= 200) {
+			dev_err(pp->dev, "phy link never came up\n");
+			dev_dbg(pp->dev,
+				"DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+				readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+				readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+			break;
+		}
+	}
+
+	return;
+}
+
+static int imx6_pcie_link_up(struct pcie_port *pp)
+{
+	u32 rc, ltssm, rx_valid, temp;
+
+	/* link is debug bit 36, debug register 1 starts at bit 32 */
+	rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
+	if (rc)
+		return -EAGAIN;
+
+	/*
+	 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
+	 * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
+	 * If (MAC/LTSSM.state == Recovery.RcvrLock)
+	 * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
+	 * to gen2 is stuck
+	 */
+	pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
+	ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
+
+	if (rx_valid & 0x01)
+		return 0;
+
+	if (ltssm != 0x0d)
+		return 0;
+
+	dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+
+	pcie_phy_read(pp->dbi_base,
+		PHY_RX_OVRD_IN_LO, &temp);
+	temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
+		| PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+	pcie_phy_write(pp->dbi_base,
+		PHY_RX_OVRD_IN_LO, temp);
+
+	usleep_range(2000, 3000);
+
+	pcie_phy_read(pp->dbi_base,
+		PHY_RX_OVRD_IN_LO, &temp);
+	temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
+		| PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+	pcie_phy_write(pp->dbi_base,
+		PHY_RX_OVRD_IN_LO, temp);
+
+	return 0;
+}
+
+static struct pcie_host_ops imx6_pcie_host_ops = {
+	.link_up = imx6_pcie_link_up,
+	.host_init = imx6_pcie_host_init,
+};
+
+static int imx6_add_pcie_port(struct pcie_port *pp,
+			struct platform_device *pdev)
+{
+	int ret;
+
+	pp->irq = platform_get_irq(pdev, 0);
+	if (!pp->irq) {
+		dev_err(&pdev->dev, "failed to get irq\n");
+		return -ENODEV;
+	}
+
+	pp->root_bus_nr = -1;
+	pp->ops = &imx6_pcie_host_ops;
+
+	spin_lock_init(&pp->conf_lock);
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init imx6_pcie_probe(struct platform_device *pdev)
+{
+	struct imx6_pcie *imx6_pcie;
+	struct pcie_port *pp;
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *dbi_base;
+	int ret;
+
+	imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+	if (!imx6_pcie)
+		return -ENOMEM;
+
+	pp = &imx6_pcie->pp;
+	pp->dev = &pdev->dev;
+
+	/* Added for PCI abort handling */
+	hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+		"imprecise external abort");
+
+	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!dbi_base) {
+		dev_err(&pdev->dev, "dbi_base memory resource not found\n");
+		return -ENODEV;
+	}
+
+	pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+	if (IS_ERR(pp->dbi_base)) {
+		ret = PTR_ERR(pp->dbi_base);
+		goto err;
+	}
+
+	/* Fetch GPIOs */
+	imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+	if (!gpio_is_valid(imx6_pcie->reset_gpio)) {
+		dev_err(&pdev->dev, "no reset-gpio defined\n");
+		ret = -ENODEV;
+	}
+	ret = devm_gpio_request_one(&pdev->dev,
+				imx6_pcie->reset_gpio,
+				GPIOF_OUT_INIT_LOW,
+				"PCIe reset");
+	if (ret) {
+		dev_err(&pdev->dev, "unable to get reset gpio\n");
+		goto err;
+	}
+
+	imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
+	if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
+		ret = devm_gpio_request_one(&pdev->dev,
+					imx6_pcie->power_on_gpio,
+					GPIOF_OUT_INIT_LOW,
+					"PCIe power enable");
+		if (ret) {
+			dev_err(&pdev->dev, "unable to get power-on gpio\n");
+			goto err;
+		}
+	}
+
+	imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
+	if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
+		ret = devm_gpio_request_one(&pdev->dev,
+					imx6_pcie->wake_up_gpio,
+					GPIOF_IN,
+					"PCIe wake up");
+		if (ret) {
+			dev_err(&pdev->dev, "unable to get wake-up gpio\n");
+			goto err;
+		}
+	}
+
+	imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
+	if (gpio_is_valid(imx6_pcie->disable_gpio)) {
+		ret = devm_gpio_request_one(&pdev->dev,
+					imx6_pcie->disable_gpio,
+					GPIOF_OUT_INIT_HIGH,
+					"PCIe disable endpoint");
+		if (ret) {
+			dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
+			goto err;
+		}
+	}
+
+	/* Fetch clocks */
+	imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate");
+	if (IS_ERR(imx6_pcie->lvds_gate)) {
+		dev_err(&pdev->dev,
+			"lvds_gate clock select missing or invalid\n");
+		ret = PTR_ERR(imx6_pcie->lvds_gate);
+		goto err;
+	}
+
+	imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
+	if (IS_ERR(imx6_pcie->sata_ref_100m)) {
+		dev_err(&pdev->dev,
+			"sata_ref_100m clock source missing or invalid\n");
+		ret = PTR_ERR(imx6_pcie->sata_ref_100m);
+		goto err;
+	}
+
+	imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
+	if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
+		dev_err(&pdev->dev,
+			"pcie_ref_125m clock source missing or invalid\n");
+		ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
+		goto err;
+	}
+
+	imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
+	if (IS_ERR(imx6_pcie->pcie_axi)) {
+		dev_err(&pdev->dev,
+			"pcie_axi clock source missing or invalid\n");
+		ret = PTR_ERR(imx6_pcie->pcie_axi);
+		goto err;
+	}
+
+	/* Grab GPR config register range */
+	imx6_pcie->iomuxc_gpr =
+		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+		dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+		ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
+		goto err;
+	}
+
+	ret = imx6_add_pcie_port(pp, pdev);
+	if (ret < 0)
+		goto err;
+
+	platform_set_drvdata(pdev, imx6_pcie);
+	return 0;
+
+err:
+	return ret;
+}
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+	{ .compatible = "fsl,imx6q-pcie", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
+
+static struct platform_driver imx6_pcie_driver = {
+	.driver = {
+		.name	= "imx6q-pcie",
+		.owner	= THIS_MODULE,
+		.of_match_table = imx6_pcie_of_match,
+	},
+};
+
+/* Freescale PCIe driver does not allow module unload */
+
+static int __init imx6_pcie_init(void)
+{
+	return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+}
+fs_initcall(imx6_pcie_init);
+
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
new file mode 100644
index 0000000..cbaa5c4
--- /dev/null
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -0,0 +1,333 @@
+/*
+ *  pci-rcar-gen2: internal PCI bus support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* AHB-PCI Bridge PCI communication registers */
+#define RCAR_AHBPCI_PCICOM_OFFSET	0x800
+
+#define RCAR_PCIAHB_WIN1_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x00)
+#define RCAR_PCIAHB_WIN2_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x04)
+#define RCAR_PCIAHB_PREFETCH0		0x0
+#define RCAR_PCIAHB_PREFETCH4		0x1
+#define RCAR_PCIAHB_PREFETCH8		0x2
+#define RCAR_PCIAHB_PREFETCH16		0x3
+
+#define RCAR_AHBPCI_WIN1_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x10)
+#define RCAR_AHBPCI_WIN2_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x14)
+#define RCAR_AHBPCI_WIN_CTR_MEM		(3 << 1)
+#define RCAR_AHBPCI_WIN_CTR_CFG		(5 << 1)
+#define RCAR_AHBPCI_WIN1_HOST		(1 << 30)
+#define RCAR_AHBPCI_WIN1_DEVICE		(1 << 31)
+
+#define RCAR_PCI_INT_ENABLE_REG		(RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
+#define RCAR_PCI_INT_STATUS_REG		(RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_A			(1 << 16)
+#define RCAR_PCI_INT_B			(1 << 17)
+#define RCAR_PCI_INT_PME		(1 << 19)
+
+#define RCAR_AHB_BUS_CTR_REG		(RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
+#define RCAR_AHB_BUS_MMODE_HTRANS	(1 << 0)
+#define RCAR_AHB_BUS_MMODE_BYTE_BURST	(1 << 1)
+#define RCAR_AHB_BUS_MMODE_WR_INCR	(1 << 2)
+#define RCAR_AHB_BUS_MMODE_HBUS_REQ	(1 << 7)
+#define RCAR_AHB_BUS_SMODE_READYCTR	(1 << 17)
+#define RCAR_AHB_BUS_MODE		(RCAR_AHB_BUS_MMODE_HTRANS |	\
+					RCAR_AHB_BUS_MMODE_BYTE_BURST |	\
+					RCAR_AHB_BUS_MMODE_WR_INCR |	\
+					RCAR_AHB_BUS_MMODE_HBUS_REQ |	\
+					RCAR_AHB_BUS_SMODE_READYCTR)
+
+#define RCAR_USBCTR_REG			(RCAR_AHBPCI_PCICOM_OFFSET + 0x34)
+#define RCAR_USBCTR_USBH_RST		(1 << 0)
+#define RCAR_USBCTR_PCICLK_MASK		(1 << 1)
+#define RCAR_USBCTR_PLL_RST		(1 << 2)
+#define RCAR_USBCTR_DIRPD		(1 << 8)
+#define RCAR_USBCTR_PCIAHB_WIN2_EN	(1 << 9)
+#define RCAR_USBCTR_PCIAHB_WIN1_256M	(0 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_512M	(1 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_1G	(2 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_2G	(3 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_MASK	(3 << 10)
+
+#define RCAR_PCI_ARBITER_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x40)
+#define RCAR_PCI_ARBITER_PCIREQ0	(1 << 0)
+#define RCAR_PCI_ARBITER_PCIREQ1	(1 << 1)
+#define RCAR_PCI_ARBITER_PCIBP_MODE	(1 << 12)
+
+#define RCAR_PCI_UNIT_REV_REG		(RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
+
+/* Number of internal PCI controllers */
+#define RCAR_PCI_NR_CONTROLLERS		3
+
+struct rcar_pci_priv {
+	void __iomem *reg;
+	struct resource io_res;
+	struct resource mem_res;
+	struct resource *cfg_res;
+	int irq;
+};
+
+/* PCI configuration space operations */
+static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
+				       int where)
+{
+	struct pci_sys_data *sys = bus->sysdata;
+	struct rcar_pci_priv *priv = sys->private_data;
+	int slot, val;
+
+	if (sys->busnr != bus->number || PCI_FUNC(devfn))
+		return NULL;
+
+	/* Only one EHCI/OHCI device built-in */
+	slot = PCI_SLOT(devfn);
+	if (slot > 2)
+		return NULL;
+
+	val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
+		     RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
+
+	iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG);
+	return priv->reg + (slot >> 1) * 0x100 + where;
+}
+
+static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 *val)
+{
+	void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
+
+	if (!reg)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	switch (size) {
+	case 1:
+		*val = ioread8(reg);
+		break;
+	case 2:
+		*val = ioread16(reg);
+		break;
+	default:
+		*val = ioread32(reg);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+				 int where, int size, u32 val)
+{
+	void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
+
+	if (!reg)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	switch (size) {
+	case 1:
+		iowrite8(val, reg);
+		break;
+	case 2:
+		iowrite16(val, reg);
+		break;
+	default:
+		iowrite32(val, reg);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCI interrupt mapping */
+static int __init rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+	struct pci_sys_data *sys = dev->bus->sysdata;
+	struct rcar_pci_priv *priv = sys->private_data;
+
+	return priv->irq;
+}
+
+/* PCI host controller setup */
+static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
+{
+	struct rcar_pci_priv *priv = sys->private_data;
+	void __iomem *reg = priv->reg;
+	u32 val;
+
+	val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
+	pr_info("PCI: bus%u revision %x\n", sys->busnr, val);
+
+	/* Disable Direct Power Down State and assert reset */
+	val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
+	val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST;
+	iowrite32(val, reg + RCAR_USBCTR_REG);
+	udelay(4);
+
+	/* De-assert reset and set PCIAHB window1 size to 1GB */
+	val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
+		 RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
+	iowrite32(val | RCAR_USBCTR_PCIAHB_WIN1_1G, reg + RCAR_USBCTR_REG);
+
+	/* Configure AHB master and slave modes */
+	iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
+
+	/* Configure PCI arbiter */
+	val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG);
+	val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 |
+	       RCAR_PCI_ARBITER_PCIBP_MODE;
+	iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
+
+	/* PCI-AHB mapping: 0x40000000-0x80000000 */
+	iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
+		  reg + RCAR_PCIAHB_WIN1_CTR_REG);
+
+	/* AHB-PCI mapping: OHCI/EHCI registers */
+	val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM;
+	iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG);
+
+	/* Enable AHB-PCI bridge PCI configuration access */
+	iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
+		  reg + RCAR_AHBPCI_WIN1_CTR_REG);
+	/* Set PCI-AHB Window1 address */
+	iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH,
+		  reg + PCI_BASE_ADDRESS_1);
+	/* Set AHB-PCI bridge PCI communication area address */
+	val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
+	iowrite32(val, reg + PCI_BASE_ADDRESS_0);
+
+	val = ioread32(reg + PCI_COMMAND);
+	val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+	       PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+	iowrite32(val, reg + PCI_COMMAND);
+
+	/* Enable PCI interrupts */
+	iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
+		  reg + RCAR_PCI_INT_ENABLE_REG);
+
+	/* Add PCI resources */
+	pci_add_resource(&sys->resources, &priv->io_res);
+	pci_add_resource(&sys->resources, &priv->mem_res);
+
+	return 1;
+}
+
+static struct pci_ops rcar_pci_ops = {
+	.read	= rcar_pci_read_config,
+	.write	= rcar_pci_write_config,
+};
+
+static struct hw_pci rcar_hw_pci __initdata = {
+	.map_irq	= rcar_pci_map_irq,
+	.ops		= &rcar_pci_ops,
+	.setup		= rcar_pci_setup,
+};
+
+static int rcar_pci_count __initdata;
+
+static int __init rcar_pci_add_controller(struct rcar_pci_priv *priv)
+{
+	void **private_data;
+	int count;
+
+	if (rcar_hw_pci.nr_controllers < rcar_pci_count)
+		goto add_priv;
+
+	/* (Re)allocate private data pointer array if needed */
+	count = rcar_pci_count + RCAR_PCI_NR_CONTROLLERS;
+	private_data = kzalloc(count * sizeof(void *), GFP_KERNEL);
+	if (!private_data)
+		return -ENOMEM;
+
+	rcar_pci_count = count;
+	if (rcar_hw_pci.private_data) {
+		memcpy(private_data, rcar_hw_pci.private_data,
+		       rcar_hw_pci.nr_controllers * sizeof(void *));
+		kfree(rcar_hw_pci.private_data);
+	}
+
+	rcar_hw_pci.private_data = private_data;
+
+add_priv:
+	/* Add private data pointer to the array */
+	rcar_hw_pci.private_data[rcar_hw_pci.nr_controllers++] = priv;
+	return 0;
+}
+
+static int __init rcar_pci_probe(struct platform_device *pdev)
+{
+	struct resource *cfg_res, *mem_res;
+	struct rcar_pci_priv *priv;
+	void __iomem *reg;
+
+	cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	reg = devm_ioremap_resource(&pdev->dev, cfg_res);
+	if (!reg)
+		return -ENODEV;
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!mem_res || !mem_res->start)
+		return -ENODEV;
+
+	priv = devm_kzalloc(&pdev->dev,
+			    sizeof(struct rcar_pci_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->mem_res = *mem_res;
+	/*
+	 * The controller does not support/use port I/O,
+	 * so setup a dummy port I/O region here.
+	 */
+	priv->io_res.start = priv->mem_res.start;
+	priv->io_res.end = priv->mem_res.end;
+	priv->io_res.flags = IORESOURCE_IO;
+
+	priv->cfg_res = cfg_res;
+
+	priv->irq = platform_get_irq(pdev, 0);
+	priv->reg = reg;
+
+	return rcar_pci_add_controller(priv);
+}
+
+static struct platform_driver rcar_pci_driver = {
+	.driver = {
+		.name = "pci-rcar-gen2",
+	},
+};
+
+static int __init rcar_pci_init(void)
+{
+	int retval;
+
+	retval = platform_driver_probe(&rcar_pci_driver, rcar_pci_probe);
+	if (!retval)
+		pci_common_init(&rcar_hw_pci);
+
+	/* Private data pointer array is not needed any more */
+	kfree(rcar_hw_pci.private_data);
+	rcar_hw_pci.private_data = NULL;
+
+	return retval;
+}
+
+subsys_initcall(rcar_pci_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
+MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 2e9888a..7c4f38d 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -408,7 +408,7 @@
 
 	list_for_each_entry(bus, &pcie->busses, list)
 		if (bus->nr == busnr)
-			return bus->area->addr;
+			return (void __iomem *)bus->area->addr;
 
 	bus = tegra_pcie_bus_alloc(pcie, busnr);
 	if (IS_ERR(bus))
@@ -416,7 +416,7 @@
 
 	list_add_tail(&bus->list, &pcie->busses);
 
-	return bus->area->addr;
+	return (void __iomem *)bus->area->addr;
 }
 
 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index c10e9ac..1e1fea4 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -11,8 +11,11 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
@@ -64,7 +67,7 @@
 
 static struct hw_pci dw_pci;
 
-unsigned long global_io_offset;
+static unsigned long global_io_offset;
 
 static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
 {
@@ -115,8 +118,8 @@
 		writel(val, pp->dbi_base + reg);
 }
 
-int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
-				u32 *val)
+static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+			       u32 *val)
 {
 	int ret;
 
@@ -128,8 +131,8 @@
 	return ret;
 }
 
-int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
-				u32 val)
+static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+			       u32 val)
 {
 	int ret;
 
@@ -142,6 +145,205 @@
 	return ret;
 }
 
+static struct irq_chip dw_msi_irq_chip = {
+	.name = "PCI-MSI",
+	.irq_enable = unmask_msi_irq,
+	.irq_disable = mask_msi_irq,
+	.irq_mask = mask_msi_irq,
+	.irq_unmask = unmask_msi_irq,
+};
+
+/* MSI int handler */
+void dw_handle_msi_irq(struct pcie_port *pp)
+{
+	unsigned long val;
+	int i, pos, irq;
+
+	for (i = 0; i < MAX_MSI_CTRLS; i++) {
+		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
+				(u32 *)&val);
+		if (val) {
+			pos = 0;
+			while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+				irq = irq_find_mapping(pp->irq_domain,
+						i * 32 + pos);
+				generic_handle_irq(irq);
+				pos++;
+			}
+		}
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
+	}
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+	pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+
+	/* program the msi_data */
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+			virt_to_phys((void *)pp->msi_data));
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+}
+
+static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
+{
+	int flag = 1;
+
+	do {
+		pos = find_next_zero_bit(pp->msi_irq_in_use,
+				MAX_MSI_IRQS, pos);
+		/*if you have reached to the end then get out from here.*/
+		if (pos == MAX_MSI_IRQS)
+			return -ENOSPC;
+		/*
+		 * Check if this position is at correct offset.nvec is always a
+		 * power of two. pos0 must be nvec bit alligned.
+		 */
+		if (pos % msgvec)
+			pos += msgvec - (pos % msgvec);
+		else
+			flag = 0;
+	} while (flag);
+
+	*pos0 = pos;
+	return 0;
+}
+
+static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+{
+	int res, bit, irq, pos0, pos1, i;
+	u32 val;
+	struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+
+	if (!pp) {
+		BUG();
+		return -EINVAL;
+	}
+
+	pos0 = find_first_zero_bit(pp->msi_irq_in_use,
+			MAX_MSI_IRQS);
+	if (pos0 % no_irqs) {
+		if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
+			goto no_valid_irq;
+	}
+	if (no_irqs > 1) {
+		pos1 = find_next_bit(pp->msi_irq_in_use,
+				MAX_MSI_IRQS, pos0);
+		/* there must be nvec number of consecutive free bits */
+		while ((pos1 - pos0) < no_irqs) {
+			if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
+				goto no_valid_irq;
+			pos1 = find_next_bit(pp->msi_irq_in_use,
+					MAX_MSI_IRQS, pos0);
+		}
+	}
+
+	irq = irq_find_mapping(pp->irq_domain, pos0);
+	if (!irq)
+		goto no_valid_irq;
+
+	i = 0;
+	while (i < no_irqs) {
+		set_bit(pos0 + i, pp->msi_irq_in_use);
+		irq_alloc_descs((irq + i), (irq + i), 1, 0);
+		irq_set_msi_desc(irq + i, desc);
+		/*Enable corresponding interrupt in MSI interrupt controller */
+		res = ((pos0 + i) / 32) * 12;
+		bit = (pos0 + i) % 32;
+		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+		val |= 1 << bit;
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+		i++;
+	}
+
+	*pos = pos0;
+	return irq;
+
+no_valid_irq:
+	*pos = pos0;
+	return -ENOSPC;
+}
+
+static void clear_irq(unsigned int irq)
+{
+	int res, bit, val, pos;
+	struct irq_desc *desc;
+	struct msi_desc *msi;
+	struct pcie_port *pp;
+	struct irq_data *data = irq_get_irq_data(irq);
+
+	/* get the port structure */
+	desc = irq_to_desc(irq);
+	msi = irq_desc_get_msi_desc(desc);
+	pp = sys_to_pcie(msi->dev->bus->sysdata);
+	if (!pp) {
+		BUG();
+		return;
+	}
+
+	pos = data->hwirq;
+
+	irq_free_desc(irq);
+
+	clear_bit(pos, pp->msi_irq_in_use);
+
+	/* Disable corresponding interrupt on MSI interrupt controller */
+	res = (pos / 32) * 12;
+	bit = pos % 32;
+	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+	val &= ~(1 << bit);
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+			struct msi_desc *desc)
+{
+	int irq, pos, msgvec;
+	u16 msg_ctr;
+	struct msi_msg msg;
+	struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+
+	if (!pp) {
+		BUG();
+		return -EINVAL;
+	}
+
+	pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+				&msg_ctr);
+	msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+	if (msgvec == 0)
+		msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+	if (msgvec > 5)
+		msgvec = 0;
+
+	irq = assign_irq((1 << msgvec), desc, &pos);
+	if (irq < 0)
+		return irq;
+
+	msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
+	msg_ctr |= msgvec << 4;
+	pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
+				msg_ctr);
+	desc->msi_attrib.multiple = msgvec;
+
+	msg.address_lo = virt_to_phys((void *)pp->msi_data);
+	msg.address_hi = 0x0;
+	msg.data = pos;
+	write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+	clear_irq(irq);
+}
+
+static struct msi_chip dw_pcie_msi_chip = {
+	.setup_irq = dw_msi_setup_irq,
+	.teardown_irq = dw_msi_teardown_irq,
+};
+
 int dw_pcie_link_up(struct pcie_port *pp)
 {
 	if (pp->ops->link_up)
@@ -150,12 +352,27 @@
 		return 0;
 }
 
+static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+			irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	set_irq_flags(irq, IRQF_VALID);
+
+	return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.map = dw_pcie_msi_map,
+};
+
 int __init dw_pcie_host_init(struct pcie_port *pp)
 {
 	struct device_node *np = pp->dev->of_node;
 	struct of_pci_range range;
 	struct of_pci_range_parser parser;
 	u32 val;
+	int i;
 
 	if (of_pci_range_parser_init(&parser, np)) {
 		dev_err(pp->dev, "missing ranges property\n");
@@ -223,6 +440,19 @@
 		return -EINVAL;
 	}
 
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
+					MAX_MSI_IRQS, &msi_domain_ops,
+					&dw_pcie_msi_chip);
+		if (!pp->irq_domain) {
+			dev_err(pp->dev, "irq domain init failed\n");
+			return -ENXIO;
+		}
+
+		for (i = 0; i < MAX_MSI_IRQS; i++)
+			irq_create_mapping(pp->irq_domain, i);
+	}
+
 	if (pp->ops->host_init)
 		pp->ops->host_init(pp);
 
@@ -438,7 +668,7 @@
 	.write = dw_pcie_wr_conf,
 };
 
-int dw_pcie_setup(int nr, struct pci_sys_data *sys)
+static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
 {
 	struct pcie_port *pp;
 
@@ -461,7 +691,7 @@
 	return 1;
 }
 
-struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
 {
 	struct pci_bus *bus;
 	struct pcie_port *pp = sys_to_pcie(sys);
@@ -478,17 +708,28 @@
 	return bus;
 }
 
-int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
 	struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
 
 	return pp->irq;
 }
 
+static void dw_pcie_add_bus(struct pci_bus *bus)
+{
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+
+		dw_pcie_msi_chip.dev = pp->dev;
+		bus->msi = &dw_pcie_msi_chip;
+	}
+}
+
 static struct hw_pci dw_pci = {
 	.setup		= dw_pcie_setup,
 	.scan		= dw_pcie_scan_bus,
 	.map_irq	= dw_pcie_map_irq,
+	.add_bus	= dw_pcie_add_bus,
 };
 
 void dw_pcie_setup_rc(struct pcie_port *pp)
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index 133820f..c15379b 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -11,6 +11,9 @@
  * published by the Free Software Foundation.
  */
 
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
 struct pcie_port_info {
 	u32		cfg0_size;
 	u32		cfg1_size;
@@ -20,6 +23,14 @@
 	phys_addr_t	mem_bus_addr;
 };
 
+/*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+ * then increment it in multiple of 32.
+ */
+#define MAX_MSI_IRQS			32
+#define MAX_MSI_CTRLS			(MAX_MSI_IRQS / 32)
+
 struct pcie_port {
 	struct device		*dev;
 	u8			root_bus_nr;
@@ -38,6 +49,10 @@
 	int			irq;
 	u32			lanes;
 	struct pcie_host_ops	*ops;
+	int			msi_irq;
+	struct irq_domain	*irq_domain;
+	unsigned long		msi_data;
+	DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
 };
 
 struct pcie_host_ops {
@@ -51,15 +66,12 @@
 	void (*host_init)(struct pcie_port *pp);
 };
 
-extern unsigned long global_io_offset;
-
 int cfg_read(void __iomem *addr, int where, int size, u32 *val);
 int cfg_write(void __iomem *addr, int where, int size, u32 val);
-int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
-int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+void dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
 int dw_pcie_link_up(struct pcie_port *pp);
 void dw_pcie_setup_rc(struct pcie_port *pp);
 int dw_pcie_host_init(struct pcie_port *pp);
-int dw_pcie_setup(int nr, struct pci_sys_data *sys);
-struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys);
-int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 2a47e82..1ce8ee0 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,7 +338,7 @@
 	acpi_handle chandle, handle;
 	struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
 
-	flags &= OSC_SHPC_NATIVE_HP_CONTROL;
+	flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
 	if (!flags) {
 		err("Invalid flags %u specified!\n", flags);
 		return -EINVAL;
@@ -411,13 +411,10 @@
 static int pcihp_is_ejectable(acpi_handle handle)
 {
 	acpi_status status;
-	acpi_handle tmp;
 	unsigned long long removable;
-	status = acpi_get_handle(handle, "_ADR", &tmp);
-	if (ACPI_FAILURE(status))
+	if (!acpi_has_method(handle, "_ADR"))
 		return 0;
-	status = acpi_get_handle(handle, "_EJ0", &tmp);
-	if (ACPI_SUCCESS(status))
+	if (acpi_has_method(handle, "_EJ0"))
 		return 1;
 	status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
 	if (ACPI_SUCCESS(status) && removable)
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index f4e0289..26100f5 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -39,16 +39,6 @@
 #include <linux/mutex.h>
 #include <linux/pci_hotplug.h>
 
-#define dbg(format, arg...)					\
-	do {							\
-		if (acpiphp_debug)				\
-			printk(KERN_DEBUG "%s: " format,	\
-				MY_NAME , ## arg);		\
-	} while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
-
 struct acpiphp_context;
 struct acpiphp_bridge;
 struct acpiphp_slot;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index bf2203e..8650d39 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -31,6 +31,8 @@
  *
  */
 
+#define pr_fmt(fmt) "acpiphp: " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -43,12 +45,9 @@
 #include <linux/smp.h>
 #include "acpiphp.h"
 
-#define MY_NAME	"acpiphp"
-
 /* name size which is used for entries in pcihpfs */
 #define SLOT_NAME_SIZE  21              /* {_SUN} */
 
-bool acpiphp_debug;
 bool acpiphp_disabled;
 
 /* local variables */
@@ -61,9 +60,7 @@
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
 MODULE_PARM_DESC(disable, "disable acpiphp driver");
-module_param_named(debug, acpiphp_debug, bool, 0644);
 module_param_named(disable, acpiphp_disabled, bool, 0444);
 
 /* export the attention callback registration methods */
@@ -139,7 +136,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+	pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	/* enable the specified slot */
 	return acpiphp_enable_slot(slot->acpi_slot);
@@ -156,7 +153,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+	pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	/* disable the specified slot */
 	return acpiphp_disable_and_eject_slot(slot->acpi_slot);
@@ -176,8 +173,9 @@
  {
 	int retval = -ENODEV;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
- 
+	pr_debug("%s - physical_slot = %s\n", __func__,
+		hotplug_slot_name(hotplug_slot));
+
 	if (attention_info && try_module_get(attention_info->owner)) {
 		retval = attention_info->set_attn(hotplug_slot, status);
 		module_put(attention_info->owner);
@@ -199,7 +197,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+	pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = acpiphp_get_power_status(slot->acpi_slot);
 
@@ -221,7 +219,8 @@
 {
 	int retval = -EINVAL;
 
-	dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+	pr_debug("%s - physical_slot = %s\n", __func__,
+		hotplug_slot_name(hotplug_slot));
 
 	if (attention_info && try_module_get(attention_info->owner)) {
 		retval = attention_info->get_attn(hotplug_slot, value);
@@ -244,7 +243,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+	pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = acpiphp_get_latch_status(slot->acpi_slot);
 
@@ -264,7 +263,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+	pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	*value = acpiphp_get_adapter_status(slot->acpi_slot);
 
@@ -279,7 +278,7 @@
 {
 	struct slot *slot = hotplug_slot->private;
 
-	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+	pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
 	kfree(slot->hotplug_slot);
 	kfree(slot);
@@ -322,11 +321,11 @@
 	if (retval == -EBUSY)
 		goto error_hpslot;
 	if (retval) {
-		err("pci_hp_register failed with error %d\n", retval);
+		pr_err("pci_hp_register failed with error %d\n", retval);
 		goto error_hpslot;
  	}
 
-	info("Slot [%s] registered\n", slot_name(slot));
+	pr_info("Slot [%s] registered\n", slot_name(slot));
 
 	return 0;
 error_hpslot:
@@ -343,17 +342,17 @@
 	struct slot *slot = acpiphp_slot->slot;
 	int retval = 0;
 
-	info("Slot [%s] unregistered\n", slot_name(slot));
+	pr_info("Slot [%s] unregistered\n", slot_name(slot));
 
 	retval = pci_hp_deregister(slot->hotplug_slot);
 	if (retval)
-		err("pci_hp_deregister failed with error %d\n", retval);
+		pr_err("pci_hp_deregister failed with error %d\n", retval);
 }
 
 
 void __init acpiphp_init(void)
 {
-	info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+	pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
 		acpiphp_disabled ? ", disabled by user; please report a bug"
 				 : "");
 }
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1ea7523..5b4e9eb 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -39,6 +39,8 @@
  *    bus. It loses the refcount when the the driver unloads.
  */
 
+#define pr_fmt(fmt) "acpiphp_glue: " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 
@@ -58,8 +60,6 @@
 static DEFINE_MUTEX(bridge_mutex);
 static DEFINE_MUTEX(acpiphp_context_lock);
 
-#define MY_NAME "acpiphp_glue"
-
 static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
 static void acpiphp_set_hpp_values(struct pci_bus *bus);
@@ -335,7 +335,7 @@
 		if (ACPI_FAILURE(status))
 			sun = bridge->nr_slots;
 
-		dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
+		pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
 		    sun, pci_domain_nr(pbus), pbus->number, device);
 
 		retval = acpiphp_register_hotplug_slot(slot, sun);
@@ -343,10 +343,10 @@
 			slot->slot = NULL;
 			bridge->nr_slots--;
 			if (retval == -EBUSY)
-				warn("Slot %llu already registered by another "
+				pr_warn("Slot %llu already registered by another "
 					"hotplug driver\n", sun);
 			else
-				warn("acpiphp_register_hotplug_slot failed "
+				pr_warn("acpiphp_register_hotplug_slot failed "
 					"(err code = 0x%x)\n", retval);
 		}
 		/* Even if the slot registration fails, we can still use it. */
@@ -369,7 +369,7 @@
 		if (register_hotplug_dock_device(handle,
 			&acpiphp_dock_ops, context,
 			acpiphp_dock_init, acpiphp_dock_release))
-			dbg("failed to register dock device\n");
+			pr_debug("failed to register dock device\n");
 	}
 
 	/* install notify handler */
@@ -427,7 +427,7 @@
 							ACPI_SYSTEM_NOTIFY,
 							handle_hotplug_event);
 				if (ACPI_FAILURE(status))
-					err("failed to remove notify handler\n");
+					pr_err("failed to remove notify handler\n");
 			}
 		}
 		if (slot->slot)
@@ -826,8 +826,9 @@
 	switch (type) {
 	case ACPI_NOTIFY_BUS_CHECK:
 		/* bus re-enumerate */
-		dbg("%s: Bus check notify on %s\n", __func__, objname);
-		dbg("%s: re-enumerating slots under %s\n", __func__, objname);
+		pr_debug("%s: Bus check notify on %s\n", __func__, objname);
+		pr_debug("%s: re-enumerating slots under %s\n",
+			 __func__, objname);
 		if (bridge) {
 			acpiphp_check_bridge(bridge);
 		} else {
@@ -841,7 +842,7 @@
 
 	case ACPI_NOTIFY_DEVICE_CHECK:
 		/* device check */
-		dbg("%s: Device check notify on %s\n", __func__, objname);
+		pr_debug("%s: Device check notify on %s\n", __func__, objname);
 		if (bridge) {
 			acpiphp_check_bridge(bridge);
 		} else {
@@ -862,7 +863,7 @@
 
 	case ACPI_NOTIFY_EJECT_REQUEST:
 		/* request device eject */
-		dbg("%s: Device eject notify on %s\n", __func__, objname);
+		pr_debug("%s: Device eject notify on %s\n", __func__, objname);
 		acpiphp_disable_and_eject_slot(func->slot);
 		break;
 	}
@@ -871,21 +872,17 @@
 		put_bridge(bridge);
 }
 
-static void hotplug_event_work(struct work_struct *work)
+static void hotplug_event_work(void *data, u32 type)
 {
-	struct acpiphp_context *context;
-	struct acpi_hp_work *hp_work;
+	struct acpiphp_context *context = data;
+	acpi_handle handle = context->handle;
 
-	hp_work = container_of(work, struct acpi_hp_work, work);
-	context = hp_work->context;
 	acpi_scan_lock_acquire();
 
-	hotplug_event(hp_work->handle, hp_work->type, context);
+	hotplug_event(handle, type, context);
 
 	acpi_scan_lock_release();
-	acpi_evaluate_hotplug_ost(hp_work->handle, hp_work->type,
-				  ACPI_OST_SC_SUCCESS, NULL);
-	kfree(hp_work); /* allocated in handle_hotplug_event() */
+	acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
 	put_bridge(context->func.parent);
 }
 
@@ -936,10 +933,10 @@
 
 	mutex_lock(&acpiphp_context_lock);
 	context = acpiphp_get_context(handle);
-	if (context) {
+	if (context && !WARN_ON(context->handle != handle)) {
 		get_bridge(context->func.parent);
 		acpiphp_put_context(context);
-		alloc_acpi_hp_work(handle, type, context, hotplug_event_work);
+		acpi_hotplug_execute(hotplug_event_work, context, type);
 		mutex_unlock(&acpiphp_context_lock);
 		return;
 	}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2f5786c..0d64c41 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) "acpiphp_ibm: " fmt
+
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -43,23 +45,11 @@
 #define DRIVER_AUTHOR	"Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
 #define DRIVER_DESC	"ACPI Hot Plug PCI Controller Driver IBM extension"
 
-static bool debug;
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRIVER_VERSION);
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, " Debugging mode enabled or not");
-#define MY_NAME "acpiphp_ibm"
-
-#undef dbg
-#define dbg(format, arg...)				\
-do {							\
-	if (debug)					\
-		printk(KERN_DEBUG "%s: " format,	\
-				MY_NAME , ## arg);	\
-} while (0)
 
 #define FOUND_APCI 0x61504349
 /* these are the names for the IBM ACPI pseudo-device */
@@ -189,7 +179,7 @@
 
 	ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
 
-	dbg("%s: set slot %d (%d) attention status to %d\n", __func__,
+	pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__,
 			ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
 			(status ? 1 : 0));
 
@@ -202,10 +192,10 @@
 
 	stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc);
 	if (ACPI_FAILURE(stat)) {
-		err("APLS evaluation failed:  0x%08x\n", stat);
+		pr_err("APLS evaluation failed:  0x%08x\n", stat);
 		return -ENODEV;
 	} else if (!rc) {
-		err("APLS method failed:  0x%08llx\n", rc);
+		pr_err("APLS method failed:  0x%08llx\n", rc);
 		return -ERANGE;
 	}
 	return 0;
@@ -234,7 +224,7 @@
 	else
 		*status = 0;
 
-	dbg("%s: get slot %d (%d) attention status is %d\n", __func__,
+	pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__,
 			ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
 			*status);
 
@@ -266,10 +256,10 @@
 	u8 subevent = event & 0xf0;
 	struct notification *note = context;
 
-	dbg("%s: Received notification %02x\n", __func__, event);
+	pr_debug("%s: Received notification %02x\n", __func__, event);
 
 	if (subevent == 0x80) {
-		dbg("%s: generationg bus event\n", __func__);
+		pr_debug("%s: generationg bus event\n", __func__);
 		acpi_bus_generate_netlink_event(note->device->pnp.device_class,
 						  dev_name(&note->device->dev),
 						  note->event, detail);
@@ -301,7 +291,7 @@
 
 	status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer);
 	if (ACPI_FAILURE(status)) {
-		err("%s:  APCI evaluation failed\n", __func__);
+		pr_err("%s:  APCI evaluation failed\n", __func__);
 		return -ENODEV;
 	}
 
@@ -309,13 +299,13 @@
 	if (!(package) ||
 			(package->type != ACPI_TYPE_PACKAGE) ||
 			!(package->package.elements)) {
-		err("%s:  Invalid APCI object\n", __func__);
+		pr_err("%s:  Invalid APCI object\n", __func__);
 		goto read_table_done;
 	}
 
 	for(size = 0, i = 0; i < package->package.count; i++) {
 		if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
-			err("%s:  Invalid APCI element %d\n", __func__, i);
+			pr_err("%s:  Invalid APCI element %d\n", __func__, i);
 			goto read_table_done;
 		}
 		size += package->package.elements[i].buffer.length;
@@ -325,7 +315,7 @@
 		goto read_table_done;
 
 	lbuf = kzalloc(size, GFP_KERNEL);
-	dbg("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
+	pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
 			__func__, package->package.count, size, lbuf);
 
 	if (lbuf) {
@@ -370,8 +360,8 @@
 {
 	int bytes_read = -EINVAL;
 	char *table = NULL;
-	
-	dbg("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
+
+	pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
 
 	if (pos == 0) {
 		bytes_read = ibm_get_table_from_acpi(&table);
@@ -403,7 +393,7 @@
 
 	status = acpi_get_object_info(handle, &info);
 	if (ACPI_FAILURE(status)) {
-		err("%s:  Failed to get device information status=0x%x\n",
+		pr_err("%s:  Failed to get device information status=0x%x\n",
 			__func__, status);
 		return retval;
 	}
@@ -411,7 +401,7 @@
 	if (info->current_status && (info->valid & ACPI_VALID_HID) &&
 			(!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
 			 !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
-		dbg("found hardware: %s, handle: %p\n",
+		pr_debug("found hardware: %s, handle: %p\n",
 			info->hardware_id.string, handle);
 		*phandle = handle;
 		/* returning non-zero causes the search to stop
@@ -432,18 +422,18 @@
 	struct acpi_device *device;
 	struct kobject *sysdir = &pci_slots_kset->kobj;
 
-	dbg("%s\n", __func__);
+	pr_debug("%s\n", __func__);
 
 	if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
 			ACPI_UINT32_MAX, ibm_find_acpi_device, NULL,
 			&ibm_acpi_handle, NULL) != FOUND_APCI) {
-		err("%s: acpi_walk_namespace failed\n", __func__);
+		pr_err("%s: acpi_walk_namespace failed\n", __func__);
 		retval = -ENODEV;
 		goto init_return;
 	}
-	dbg("%s: found IBM aPCI device\n", __func__);
+	pr_debug("%s: found IBM aPCI device\n", __func__);
 	if (acpi_bus_get_device(ibm_acpi_handle, &device)) {
-		err("%s: acpi_bus_get_device failed\n", __func__);
+		pr_err("%s: acpi_bus_get_device failed\n", __func__);
 		retval = -ENODEV;
 		goto init_return;
 	}
@@ -457,7 +447,7 @@
 			ACPI_DEVICE_NOTIFY, ibm_handle_events,
 			&ibm_note);
 	if (ACPI_FAILURE(status)) {
-		err("%s: Failed to register notification handler\n",
+		pr_err("%s: Failed to register notification handler\n",
 				__func__);
 		retval = -EBUSY;
 		goto init_cleanup;
@@ -479,17 +469,17 @@
 	acpi_status status;
 	struct kobject *sysdir = &pci_slots_kset->kobj;
 
-	dbg("%s\n", __func__);
+	pr_debug("%s\n", __func__);
 
 	if (acpiphp_unregister_attention(&ibm_attention_info))
-		err("%s: attention info deregistration failed", __func__);
+		pr_err("%s: attention info deregistration failed", __func__);
 
 	status = acpi_remove_notify_handler(
 			   ibm_acpi_handle,
 			   ACPI_DEVICE_NOTIFY,
 			   ibm_handle_events);
 	if (ACPI_FAILURE(status))
-		err("%s: Notification handler removal failed\n", __func__);
+		pr_err("%s: Notification handler removal failed\n", __func__);
 	/* remove the /sys entries */
 	sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr);
 }
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index e260f20..d876e4b 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -191,7 +191,7 @@
 #include <linux/pci-acpi.h>
 static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
 {
-	u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
+	u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
 	return acpi_get_hp_hw_control_from_firmware(dev, flags);
 }
 #else
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d5f90d6..5e63645 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -185,7 +185,7 @@
  * reliably as devices without an INTx disable bit will then generate a
  * level IRQ which will never be cleared.
  */
-static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
 {
 	u32 mask_bits = desc->masked;
 
@@ -199,9 +199,14 @@
 	return mask_bits;
 }
 
+__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+	return default_msi_mask_irq(desc, mask, flag);
+}
+
 static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
 {
-	desc->masked = __msi_mask_irq(desc, mask, flag);
+	desc->masked = arch_msi_mask_irq(desc, mask, flag);
 }
 
 /*
@@ -211,7 +216,7 @@
  * file.  This saves a few milliseconds when initialising devices with lots
  * of MSI-X interrupts.
  */
-static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
+u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag)
 {
 	u32 mask_bits = desc->masked;
 	unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
@@ -224,9 +229,14 @@
 	return mask_bits;
 }
 
+__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+	return default_msix_mask_irq(desc, flag);
+}
+
 static void msix_mask_irq(struct msi_desc *desc, u32 flag)
 {
-	desc->masked = __msix_mask_irq(desc, flag);
+	desc->masked = arch_msix_mask_irq(desc, flag);
 }
 
 static void msi_set_mask_bit(struct irq_data *data, u32 flag)
@@ -831,7 +841,7 @@
 	int status, maxvec;
 	u16 msgctl;
 
-	if (!dev->msi_cap)
+	if (!dev->msi_cap || dev->current_state != PCI_D0)
 		return -EINVAL;
 
 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
@@ -862,7 +872,7 @@
 	int ret, nvec;
 	u16 msgctl;
 
-	if (!dev->msi_cap)
+	if (!dev->msi_cap || dev->current_state != PCI_D0)
 		return -EINVAL;
 
 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
@@ -902,7 +912,7 @@
 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
 	mask = msi_capable_mask(ctrl);
 	/* Keep cached state to be restored */
-	__msi_mask_irq(desc, mask, ~mask);
+	arch_msi_mask_irq(desc, mask, ~mask);
 
 	/* Restore dev->irq to its default pin-assertion irq */
 	dev->irq = desc->msi_attrib.default_irq;
@@ -955,7 +965,7 @@
 	int status, nr_entries;
 	int i, j;
 
-	if (!entries || !dev->msix_cap)
+	if (!entries || !dev->msix_cap || dev->current_state != PCI_D0)
 		return -EINVAL;
 
 	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
@@ -998,7 +1008,7 @@
 	/* Return the device with MSI-X masked as initial states */
 	list_for_each_entry(entry, &dev->msi_list, list) {
 		/* Keep cached states to be restored */
-		__msix_mask_irq(entry, 1);
+		arch_msix_mask_irq(entry, 1);
 	}
 
 	msix_set_enable(dev, 0);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index b0299e6..dfd1f59 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -181,7 +181,6 @@
 static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 {
 	acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
-	acpi_handle tmp;
 	static const u8 state_conv[] = {
 		[PCI_D0] = ACPI_STATE_D0,
 		[PCI_D1] = ACPI_STATE_D1,
@@ -192,7 +191,7 @@
 	int error = -EINVAL;
 
 	/* If the ACPI device has _EJ0, ignore the device */
-	if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+	if (!handle || acpi_has_method(handle, "_EJ0"))
 		return -ENODEV;
 
 	switch (state) {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 38f3c01..4548535 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -267,11 +267,19 @@
 	pm_runtime_get_sync(dev);
 	pci_dev->driver = pci_drv;
 	rc = pci_drv->probe(pci_dev, ddi->id);
-	if (rc) {
+	if (!rc)
+		return rc;
+	if (rc < 0) {
 		pci_dev->driver = NULL;
 		pm_runtime_put_sync(dev);
+		return rc;
 	}
-	return rc;
+	/*
+	 * Probe function should return < 0 for failure, 0 for success
+	 * Treat values > 0 as success, but warn.
+	 */
+	dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
+	return 0;
 }
 
 static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
@@ -602,18 +610,10 @@
 	return error;
 }
 
-static void pci_pm_complete(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-
-	if (drv && drv->pm && drv->pm->complete)
-		drv->pm->complete(dev);
-}
 
 #else /* !CONFIG_PM_SLEEP */
 
 #define pci_pm_prepare	NULL
-#define pci_pm_complete	NULL
 
 #endif /* !CONFIG_PM_SLEEP */
 
@@ -1124,9 +1124,8 @@
 
 #ifdef CONFIG_PM
 
-const struct dev_pm_ops pci_dev_pm_ops = {
+static const struct dev_pm_ops pci_dev_pm_ops = {
 	.prepare = pci_pm_prepare,
-	.complete = pci_pm_complete,
 	.suspend = pci_pm_suspend,
 	.resume = pci_pm_resume,
 	.freeze = pci_pm_freeze,
@@ -1319,7 +1318,7 @@
 	.probe		= pci_device_probe,
 	.remove		= pci_device_remove,
 	.shutdown	= pci_device_shutdown,
-	.dev_attrs	= pci_dev_attrs,
+	.dev_groups	= pci_dev_groups,
 	.bus_groups	= pci_bus_groups,
 	.drv_groups	= pci_drv_groups,
 	.pm		= PCI_PM_OPS_PTR,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index d8eb880bd..2aaa83c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -42,7 +42,8 @@
 									\
 	pdev = to_pci_dev (dev);					\
 	return sprintf (buf, format_string, pdev->field);		\
-}
+}									\
+static DEVICE_ATTR_RO(field)
 
 pci_config_attr(vendor, "0x%04x\n");
 pci_config_attr(device, "0x%04x\n");
@@ -73,28 +74,12 @@
 
 	return count;
 }
+static DEVICE_ATTR_RW(broken_parity_status);
 
-static ssize_t local_cpus_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
-{		
-	const struct cpumask *mask;
-	int len;
-
-#ifdef CONFIG_NUMA
-	mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
-					  cpumask_of_node(dev_to_node(dev));
-#else
-	mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
-#endif
-	len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
-	buf[len++] = '\n';
-	buf[len] = '\0';
-	return len;
-}
-
-
-static ssize_t local_cpulist_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
+static ssize_t pci_dev_show_local_cpu(struct device *dev,
+		int type,
+		struct device_attribute *attr,
+		char *buf)
 {
 	const struct cpumask *mask;
 	int len;
@@ -105,12 +90,29 @@
 #else
 	mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
 #endif
-	len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
+	len = type ?
+		cpumask_scnprintf(buf, PAGE_SIZE-2, mask) :
+		cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
+
 	buf[len++] = '\n';
 	buf[len] = '\0';
 	return len;
 }
 
+static ssize_t local_cpus_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return pci_dev_show_local_cpu(dev, 1, attr, buf);
+}
+static DEVICE_ATTR_RO(local_cpus);
+
+static ssize_t local_cpulist_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return pci_dev_show_local_cpu(dev, 0, attr, buf);
+}
+static DEVICE_ATTR_RO(local_cpulist);
+
 /*
  * PCI Bus Class Devices
  */
@@ -170,6 +172,7 @@
 	}
 	return (str - buf);
 }
+static DEVICE_ATTR_RO(resource);
 
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -181,10 +184,11 @@
 		       (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
 		       (u8)(pci_dev->class));
 }
+static DEVICE_ATTR_RO(modalias);
 
-static ssize_t is_enabled_store(struct device *dev,
-				struct device_attribute *attr, const char *buf,
-				size_t count)
+static ssize_t enabled_store(struct device *dev,
+			     struct device_attribute *attr, const char *buf,
+			     size_t count)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	unsigned long val;
@@ -208,14 +212,15 @@
 	return result < 0 ? result : count;
 }
 
-static ssize_t is_enabled_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
+static ssize_t enabled_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
 {
 	struct pci_dev *pdev;
 
 	pdev = to_pci_dev (dev);
 	return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
 }
+static DEVICE_ATTR_RW(enabled);
 
 #ifdef CONFIG_NUMA
 static ssize_t
@@ -223,6 +228,7 @@
 {
 	return sprintf (buf, "%d\n", dev->numa_node);
 }
+static DEVICE_ATTR_RO(numa_node);
 #endif
 
 static ssize_t
@@ -232,6 +238,7 @@
 
 	return sprintf (buf, "%d\n", fls64(pdev->dma_mask));
 }
+static DEVICE_ATTR_RO(dma_mask_bits);
 
 static ssize_t
 consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
@@ -239,6 +246,7 @@
 {
 	return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask));
 }
+static DEVICE_ATTR_RO(consistent_dma_mask_bits);
 
 static ssize_t
 msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -283,6 +291,7 @@
 
 	return count;
 }
+static DEVICE_ATTR_RW(msi_bus);
 
 static DEFINE_MUTEX(pci_remove_rescan_mutex);
 static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
@@ -304,7 +313,7 @@
 }
 static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store);
 
-struct attribute *pci_bus_attrs[] = {
+static struct attribute *pci_bus_attrs[] = {
 	&bus_attr_rescan.attr,
 	NULL,
 };
@@ -335,8 +344,9 @@
 	}
 	return count;
 }
-struct device_attribute dev_rescan_attr = __ATTR(rescan, (S_IWUSR|S_IWGRP),
-						 NULL, dev_rescan_store);
+static struct device_attribute dev_rescan_attr = __ATTR(rescan,
+							(S_IWUSR|S_IWGRP),
+							NULL, dev_rescan_store);
 
 static void remove_callback(struct device *dev)
 {
@@ -366,8 +376,9 @@
 		count = ret;
 	return count;
 }
-struct device_attribute dev_remove_attr = __ATTR(remove, (S_IWUSR|S_IWGRP),
-						 NULL, remove_store);
+static struct device_attribute dev_remove_attr = __ATTR(remove,
+							(S_IWUSR|S_IWGRP),
+							NULL, remove_store);
 
 static ssize_t
 dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
@@ -414,6 +425,7 @@
 	struct pci_dev *pdev = to_pci_dev(dev);
 	return sprintf (buf, "%u\n", pdev->d3cold_allowed);
 }
+static DEVICE_ATTR_RW(d3cold_allowed);
 #endif
 
 #ifdef CONFIG_PCI_IOV
@@ -499,30 +511,38 @@
 		       sriov_numvfs_show, sriov_numvfs_store);
 #endif /* CONFIG_PCI_IOV */
 
-struct device_attribute pci_dev_attrs[] = {
-	__ATTR_RO(resource),
-	__ATTR_RO(vendor),
-	__ATTR_RO(device),
-	__ATTR_RO(subsystem_vendor),
-	__ATTR_RO(subsystem_device),
-	__ATTR_RO(class),
-	__ATTR_RO(irq),
-	__ATTR_RO(local_cpus),
-	__ATTR_RO(local_cpulist),
-	__ATTR_RO(modalias),
+static struct attribute *pci_dev_attrs[] = {
+	&dev_attr_resource.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_device.attr,
+	&dev_attr_subsystem_vendor.attr,
+	&dev_attr_subsystem_device.attr,
+	&dev_attr_class.attr,
+	&dev_attr_irq.attr,
+	&dev_attr_local_cpus.attr,
+	&dev_attr_local_cpulist.attr,
+	&dev_attr_modalias.attr,
 #ifdef CONFIG_NUMA
-	__ATTR_RO(numa_node),
+	&dev_attr_numa_node.attr,
 #endif
-	__ATTR_RO(dma_mask_bits),
-	__ATTR_RO(consistent_dma_mask_bits),
-	__ATTR(enable, 0600, is_enabled_show, is_enabled_store),
-	__ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
-		broken_parity_status_show,broken_parity_status_store),
-	__ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
+	&dev_attr_dma_mask_bits.attr,
+	&dev_attr_consistent_dma_mask_bits.attr,
+	&dev_attr_enabled.attr,
+	&dev_attr_broken_parity_status.attr,
+	&dev_attr_msi_bus.attr,
 #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
-	__ATTR(d3cold_allowed, 0644, d3cold_allowed_show, d3cold_allowed_store),
+	&dev_attr_d3cold_allowed.attr,
 #endif
-	__ATTR_NULL,
+	NULL,
+};
+
+static const struct attribute_group pci_dev_group = {
+	.attrs = pci_dev_attrs,
+};
+
+const struct attribute_group *pci_dev_groups[] = {
+	&pci_dev_group,
+	NULL,
 };
 
 static struct attribute *pcibus_attrs[] = {
@@ -554,7 +574,7 @@
 		!!(pdev->resource[PCI_ROM_RESOURCE].flags &
 		   IORESOURCE_ROM_SHADOW));
 }
-struct device_attribute vga_attr = __ATTR_RO(boot_vga);
+static struct device_attribute vga_attr = __ATTR_RO(boot_vga);
 
 static ssize_t
 pci_read_config(struct file *filp, struct kobject *kobj,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index bdd64b1..b127fbda 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1148,18 +1148,16 @@
 
 static void pci_enable_bridge(struct pci_dev *dev)
 {
+	struct pci_dev *bridge;
 	int retval;
 
-	if (!dev)
-		return;
-
-	pci_enable_bridge(dev->bus->self);
+	bridge = pci_upstream_bridge(dev);
+	if (bridge)
+		pci_enable_bridge(bridge);
 
 	if (pci_is_enabled(dev)) {
-		if (!dev->is_busmaster) {
-			dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
+		if (!dev->is_busmaster)
 			pci_set_master(dev);
-		}
 		return;
 	}
 
@@ -1172,6 +1170,7 @@
 
 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
 {
+	struct pci_dev *bridge;
 	int err;
 	int i, bars = 0;
 
@@ -1190,7 +1189,9 @@
 	if (atomic_inc_return(&dev->enable_cnt) > 1)
 		return 0;		/* already enabled */
 
-	pci_enable_bridge(dev->bus->self);
+	bridge = pci_upstream_bridge(dev);
+	if (bridge)
+		pci_enable_bridge(bridge);
 
 	/* only skip sriov related */
 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
@@ -1644,8 +1645,10 @@
 		if (enable) {
 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
 					  GFP_KERNEL);
-			if (!pme_dev)
-				goto out;
+			if (!pme_dev) {
+				dev_warn(&dev->dev, "can't enable PME#\n");
+				return;
+			}
 			pme_dev->dev = dev;
 			mutex_lock(&pci_pme_list_mutex);
 			list_add(&pme_dev->list, &pci_pme_list);
@@ -1666,7 +1669,6 @@
 		}
 	}
 
-out:
 	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
 }
 
@@ -2860,7 +2862,7 @@
 		lat = pcibios_max_latency;
 	else
 		return;
-	dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
+
 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 }
 
@@ -3978,6 +3980,7 @@
 
 	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
 }
+EXPORT_SYMBOL(pcie_get_mps);
 
 /**
  * pcie_set_mps - set PCI Express maximum payload size
@@ -4002,6 +4005,7 @@
 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
 						  PCI_EXP_DEVCTL_PAYLOAD, v);
 }
+EXPORT_SYMBOL(pcie_set_mps);
 
 /**
  * pcie_get_minimum_link - determine minimum link settings of a PCI device
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 607be58..9c91ecc 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -153,7 +153,7 @@
 	return (dev->no_d1d2 || parent_dstates);
 
 }
-extern struct device_attribute pci_dev_attrs[];
+extern const struct attribute_group *pci_dev_groups[];
 extern const struct attribute_group *pcibus_groups[];
 extern struct device_type pci_dev_type;
 extern const struct attribute_group *pci_bus_groups[];
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 85ca36f..6b3a958 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -574,7 +574,7 @@
 	};
 
 	spin_lock_irqsave(&aer_recover_ring_lock, flags);
-	if (kfifo_put(&aer_recover_ring, &entry))
+	if (kfifo_put(&aer_recover_ring, entry))
 		schedule_work(&aer_recover_work);
 	else
 		pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 31063ac..08d131f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -260,13 +260,14 @@
 	if (pcie_ports_disabled)
 		return 0;
 
-	err = pcie_port_platform_notify(dev, &cap_mask);
-	if (!pcie_ports_auto) {
-		cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
-				| PCIE_PORT_SERVICE_VC;
-		if (pci_aer_available())
-			cap_mask |= PCIE_PORT_SERVICE_AER;
-	} else if (err) {
+	cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+			| PCIE_PORT_SERVICE_VC;
+	if (pci_aer_available())
+		cap_mask |= PCIE_PORT_SERVICE_AER;
+
+	if (pcie_ports_auto) {
+		err = pcie_port_platform_notify(dev, &cap_mask);
+		if (err)
 			return 0;
 	}
 
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 7ef0f86..5e14f5a 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -641,8 +641,7 @@
 		return;
 	}
 
-	pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
-	if (pos) {
+	if (pci_is_pcie(bridge)) {
 		u32 linkcap;
 		u16 linksta;
 
@@ -984,7 +983,6 @@
 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
 	if (!pos)
 		return;
-	pdev->is_pcie = 1;
 	pdev->pcie_cap = pos;
 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
 	pdev->pcie_flags_reg = reg16;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f6c31fa..9149045 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2955,6 +2955,29 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 
 /*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
+ */
+static void quirk_remove_d3_delay(struct pci_dev *dev)
+{
+	dev->d3_delay = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+
+/*
  * Some devices may pass our check in pci_intx_mask_supported if
  * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
  * support this feature.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index bc26d79..4ce83b2 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -982,7 +982,7 @@
 	}
 
 	min_align = calculate_mem_align(aligns, max_order);
-	min_align = max(min_align, window_alignment(bus, b_res->flags & mask));
+	min_align = max(min_align, window_alignment(bus, b_res->flags));
 	size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
 	if (children_add_size > add_size)
 		add_size = children_add_size;
@@ -1136,7 +1136,7 @@
 	}
 
 	/* The root bus? */
-	if (!bus->self)
+	if (pci_is_root_bus(bus))
 		return;
 
 	switch (bus->self->class >> 8) {
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 8eea2ef..605a9be 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -289,7 +289,7 @@
 static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
 				   enum vga_switcheroo_state state)
 {
-	INIT_COMPLETION(gmux_data->powerchange_done);
+	reinit_completion(&gmux_data->powerchange_done);
 
 	if (state == VGA_SWITCHEROO_ON) {
 		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index a6afd41..aefcc32 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -190,16 +190,10 @@
  */
 static int write_acpi_int(acpi_handle handle, const char *method, int val)
 {
-	struct acpi_object_list params;
-	union acpi_object in_obj;
 	acpi_status status;
 
-	params.count = 1;
-	params.pointer = &in_obj;
-	in_obj.type = ACPI_TYPE_INTEGER;
-	in_obj.integer.value = val;
+	status = acpi_execute_simple_method(handle, (char *)method, val);
 
-	status = acpi_evaluate_object(handle, (char *)method, &params, NULL);
 	return (status == AE_OK ? 0 : -1);
 }
 
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 52b8a97..9d30d69 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -219,8 +219,7 @@
 	{ .type = ACPI_TYPE_INTEGER }
 	};
 	struct acpi_object_list arg_list = { 4, &params[0] };
-	struct acpi_buffer output;
-	union acpi_object out_obj;
+	unsigned long long value;
 	acpi_handle handle = NULL;
 
 	status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
@@ -235,10 +234,7 @@
 	params[2].integer.value = arg1;
 	params[3].integer.value = arg2;
 
-	output.length = sizeof(out_obj);
-	output.pointer = &out_obj;
-
-	status = acpi_evaluate_object(handle, NULL, &arg_list, &output);
+	status = acpi_evaluate_integer(handle, NULL, &arg_list, &value);
 	if (ACPI_FAILURE(status)) {
 		vdbg_printk(FUJLAPTOP_DBG_WARN,
 			"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
@@ -246,18 +242,10 @@
 		return -ENODEV;
 	}
 
-	if (out_obj.type != ACPI_TYPE_INTEGER) {
-		vdbg_printk(FUJLAPTOP_DBG_WARN,
-			"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not "
-			"return an integer\n",
-			cmd, arg0, arg1, arg2);
-		return -ENODEV;
-	}
-
 	vdbg_printk(FUJLAPTOP_DBG_TRACE,
 		"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
-			cmd, arg0, arg1, arg2, (int)out_obj.integer.value);
-	return out_obj.integer.value;
+			cmd, arg0, arg1, arg2, (int)value);
+	return value;
 }
 
 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
@@ -317,8 +305,6 @@
 static int set_lcd_level(int level)
 {
 	acpi_status status = AE_OK;
-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-	struct acpi_object_list arg_list = { 1, &arg0 };
 	acpi_handle handle = NULL;
 
 	vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
@@ -333,9 +319,8 @@
 		return -ENODEV;
 	}
 
-	arg0.integer.value = level;
 
-	status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+	status = acpi_execute_simple_method(handle, NULL, level);
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
 
@@ -345,8 +330,6 @@
 static int set_lcd_level_alt(int level)
 {
 	acpi_status status = AE_OK;
-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-	struct acpi_object_list arg_list = { 1, &arg0 };
 	acpi_handle handle = NULL;
 
 	vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
@@ -361,9 +344,7 @@
 		return -ENODEV;
 	}
 
-	arg0.integer.value = level;
-
-	status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+	status = acpi_execute_simple_method(handle, NULL, level);
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
 
@@ -586,11 +567,10 @@
 
 static void dmi_check_cb_common(const struct dmi_system_id *id)
 {
-	acpi_handle handle;
 	pr_info("Identified laptop model '%s'\n", id->ident);
 	if (use_alt_lcd_levels == -1) {
-		if (ACPI_SUCCESS(acpi_get_handle(NULL,
-				"\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
+		if (acpi_has_method(NULL,
+				"\\_SB.PCI0.LPCB.FJEX.SBL2"))
 			use_alt_lcd_levels = 1;
 		else
 			use_alt_lcd_levels = 0;
@@ -653,7 +633,6 @@
 
 static int acpi_fujitsu_add(struct acpi_device *device)
 {
-	acpi_handle handle;
 	int result = 0;
 	int state = 0;
 	struct input_dev *input;
@@ -702,8 +681,7 @@
 
 	fujitsu->dev = device;
 
-	if (ACPI_SUCCESS
-	    (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+	if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
 		vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
 		if (ACPI_FAILURE
 		    (acpi_evaluate_object
@@ -803,7 +781,6 @@
 
 static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
 {
-	acpi_handle handle;
 	int result = 0;
 	int state = 0;
 	struct input_dev *input;
@@ -866,8 +843,7 @@
 
 	fujitsu_hotkey->dev = device;
 
-	if (ACPI_SUCCESS
-	    (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+	if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
 		vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
 		if (ACPI_FAILURE
 		    (acpi_evaluate_object
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 89c4519..6788acc 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -72,8 +72,15 @@
 	VPCCMD_W_BL_POWER = 0x33,
 };
 
+struct ideapad_rfk_priv {
+	int dev;
+	struct ideapad_private *priv;
+};
+
 struct ideapad_private {
+	struct acpi_device *adev;
 	struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+	struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
 	struct platform_device *platform_device;
 	struct input_dev *inputdev;
 	struct backlight_device *blightdev;
@@ -81,8 +88,6 @@
 	unsigned long cfg;
 };
 
-static acpi_handle ideapad_handle;
-static struct ideapad_private *ideapad_priv;
 static bool no_bt_rfkill;
 module_param(no_bt_rfkill, bool, 0444);
 MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -200,34 +205,38 @@
  */
 static int debugfs_status_show(struct seq_file *s, void *data)
 {
+	struct ideapad_private *priv = s->private;
 	unsigned long value;
 
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &value))
+	if (!priv)
+		return -EINVAL;
+
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
 		seq_printf(s, "Backlight max:\t%lu\n", value);
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_BL, &value))
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
 		seq_printf(s, "Backlight now:\t%lu\n", value);
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &value))
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
 		seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off");
 	seq_printf(s, "=====================\n");
 
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_RF, &value))
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
 		seq_printf(s, "Radio status:\t%s(%lu)\n",
 			   value ? "On" : "Off", value);
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_WIFI, &value))
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
 		seq_printf(s, "Wifi status:\t%s(%lu)\n",
 			   value ? "On" : "Off", value);
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_BT, &value))
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
 		seq_printf(s, "BT status:\t%s(%lu)\n",
 			   value ? "On" : "Off", value);
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_3G, &value))
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
 		seq_printf(s, "3G status:\t%s(%lu)\n",
 			   value ? "On" : "Off", value);
 	seq_printf(s, "=====================\n");
 
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_TOUCHPAD, &value))
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
 		seq_printf(s, "Touchpad status:%s(%lu)\n",
 			   value ? "On" : "Off", value);
-	if (!read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &value))
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
 		seq_printf(s, "Camera status:\t%s(%lu)\n",
 			   value ? "On" : "Off", value);
 
@@ -236,7 +245,7 @@
 
 static int debugfs_status_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, debugfs_status_show, NULL);
+	return single_open(file, debugfs_status_show, inode->i_private);
 }
 
 static const struct file_operations debugfs_status_fops = {
@@ -249,21 +258,23 @@
 
 static int debugfs_cfg_show(struct seq_file *s, void *data)
 {
-	if (!ideapad_priv) {
+	struct ideapad_private *priv = s->private;
+
+	if (!priv) {
 		seq_printf(s, "cfg: N/A\n");
 	} else {
 		seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ",
-			   ideapad_priv->cfg);
-		if (test_bit(CFG_BT_BIT, &ideapad_priv->cfg))
+			   priv->cfg);
+		if (test_bit(CFG_BT_BIT, &priv->cfg))
 			seq_printf(s, "Bluetooth ");
-		if (test_bit(CFG_3G_BIT, &ideapad_priv->cfg))
+		if (test_bit(CFG_3G_BIT, &priv->cfg))
 			seq_printf(s, "3G ");
-		if (test_bit(CFG_WIFI_BIT, &ideapad_priv->cfg))
+		if (test_bit(CFG_WIFI_BIT, &priv->cfg))
 			seq_printf(s, "Wireless ");
-		if (test_bit(CFG_CAMERA_BIT, &ideapad_priv->cfg))
+		if (test_bit(CFG_CAMERA_BIT, &priv->cfg))
 			seq_printf(s, "Camera ");
 		seq_printf(s, "\nGraphic: ");
-		switch ((ideapad_priv->cfg)&0x700) {
+		switch ((priv->cfg)&0x700) {
 		case 0x100:
 			seq_printf(s, "Intel");
 			break;
@@ -287,7 +298,7 @@
 
 static int debugfs_cfg_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, debugfs_cfg_show, NULL);
+	return single_open(file, debugfs_cfg_show, inode->i_private);
 }
 
 static const struct file_operations debugfs_cfg_fops = {
@@ -308,14 +319,14 @@
 		goto errout;
 	}
 
-	node = debugfs_create_file("cfg", S_IRUGO, priv->debug, NULL,
+	node = debugfs_create_file("cfg", S_IRUGO, priv->debug, priv,
 				   &debugfs_cfg_fops);
 	if (!node) {
 		pr_err("failed to create cfg in debugfs");
 		goto errout;
 	}
 
-	node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
+	node = debugfs_create_file("status", S_IRUGO, priv->debug, priv,
 				   &debugfs_status_fops);
 	if (!node) {
 		pr_err("failed to create status in debugfs");
@@ -342,8 +353,9 @@
 				char *buf)
 {
 	unsigned long result;
+	struct ideapad_private *priv = dev_get_drvdata(dev);
 
-	if (read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &result))
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result))
 		return sprintf(buf, "-1\n");
 	return sprintf(buf, "%lu\n", result);
 }
@@ -353,12 +365,13 @@
 				 const char *buf, size_t count)
 {
 	int ret, state;
+	struct ideapad_private *priv = dev_get_drvdata(dev);
 
 	if (!count)
 		return 0;
 	if (sscanf(buf, "%i", &state) != 1)
 		return -EINVAL;
-	ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
+	ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
 	if (ret < 0)
 		return -EIO;
 	return count;
@@ -371,8 +384,9 @@
 				char *buf)
 {
 	unsigned long result;
+	struct ideapad_private *priv = dev_get_drvdata(dev);
 
-	if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result))
 		return sprintf(buf, "-1\n");
 	return sprintf(buf, "%lu\n", result);
 }
@@ -382,6 +396,7 @@
 				 const char *buf, size_t count)
 {
 	int ret, state;
+	struct ideapad_private *priv = dev_get_drvdata(dev);
 
 	if (!count)
 		return 0;
@@ -389,7 +404,7 @@
 		return -EINVAL;
 	if (state < 0 || state > 4 || state == 3)
 		return -EINVAL;
-	ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
+	ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
 	if (ret < 0)
 		return -EIO;
 	return count;
@@ -415,7 +430,8 @@
 		supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
 	else if (attr == &dev_attr_fan_mode.attr) {
 		unsigned long value;
-		supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
+		supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN,
+					  &value);
 	} else
 		supported = true;
 
@@ -445,9 +461,9 @@
 
 static int ideapad_rfk_set(void *data, bool blocked)
 {
-	unsigned long opcode = (unsigned long)data;
+	struct ideapad_rfk_priv *priv = data;
 
-	return write_ec_cmd(ideapad_handle, opcode, !blocked);
+	return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
 }
 
 static struct rfkill_ops ideapad_rfk_ops = {
@@ -459,7 +475,7 @@
 	unsigned long hw_blocked;
 	int i;
 
-	if (read_ec_data(ideapad_handle, VPCCMD_R_RF, &hw_blocked))
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
 		return;
 	hw_blocked = !hw_blocked;
 
@@ -468,27 +484,30 @@
 			rfkill_set_hw_state(priv->rfk[i], hw_blocked);
 }
 
-static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
 {
-	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
 	int ret;
 	unsigned long sw_blocked;
 
 	if (no_bt_rfkill &&
 	    (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
 		/* Force to enable bluetooth when no_bt_rfkill=1 */
-		write_ec_cmd(ideapad_handle,
+		write_ec_cmd(priv->adev->handle,
 			     ideapad_rfk_data[dev].opcode, 1);
 		return 0;
 	}
+	priv->rfk_priv[dev].dev = dev;
+	priv->rfk_priv[dev].priv = priv;
 
-	priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &adevice->dev,
-				      ideapad_rfk_data[dev].type, &ideapad_rfk_ops,
-				      (void *)(long)dev);
+	priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name,
+				      &priv->platform_device->dev,
+				      ideapad_rfk_data[dev].type,
+				      &ideapad_rfk_ops,
+				      &priv->rfk_priv[dev]);
 	if (!priv->rfk[dev])
 		return -ENOMEM;
 
-	if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1,
+	if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1,
 			 &sw_blocked)) {
 		rfkill_init_sw_state(priv->rfk[dev], 0);
 	} else {
@@ -504,10 +523,8 @@
 	return 0;
 }
 
-static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
 {
-	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-
 	if (!priv->rfk[dev])
 		return;
 
@@ -518,37 +535,16 @@
 /*
  * Platform device
  */
-static int ideapad_platform_init(struct ideapad_private *priv)
+static int ideapad_sysfs_init(struct ideapad_private *priv)
 {
-	int result;
-
-	priv->platform_device = platform_device_alloc("ideapad", -1);
-	if (!priv->platform_device)
-		return -ENOMEM;
-	platform_set_drvdata(priv->platform_device, priv);
-
-	result = platform_device_add(priv->platform_device);
-	if (result)
-		goto fail_platform_device;
-
-	result = sysfs_create_group(&priv->platform_device->dev.kobj,
+	return sysfs_create_group(&priv->platform_device->dev.kobj,
 				    &ideapad_attribute_group);
-	if (result)
-		goto fail_sysfs;
-	return 0;
-
-fail_sysfs:
-	platform_device_del(priv->platform_device);
-fail_platform_device:
-	platform_device_put(priv->platform_device);
-	return result;
 }
 
-static void ideapad_platform_exit(struct ideapad_private *priv)
+static void ideapad_sysfs_exit(struct ideapad_private *priv)
 {
 	sysfs_remove_group(&priv->platform_device->dev.kobj,
 			   &ideapad_attribute_group);
-	platform_device_unregister(priv->platform_device);
 }
 
 /*
@@ -623,7 +619,7 @@
 {
 	unsigned long long_pressed;
 
-	if (read_ec_data(ideapad_handle, VPCCMD_R_NOVO, &long_pressed))
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
 		return;
 	if (long_pressed)
 		ideapad_input_report(priv, 17);
@@ -635,7 +631,7 @@
 {
 	unsigned long bit, value;
 
-	read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
+	read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
 
 	for (bit = 0; bit < 16; bit++) {
 		if (test_bit(bit, &value)) {
@@ -662,19 +658,28 @@
  */
 static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
 {
+	struct ideapad_private *priv = bl_get_data(blightdev);
 	unsigned long now;
 
-	if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+	if (!priv)
+		return -EINVAL;
+
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
 		return -EIO;
 	return now;
 }
 
 static int ideapad_backlight_update_status(struct backlight_device *blightdev)
 {
-	if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL,
+	struct ideapad_private *priv = bl_get_data(blightdev);
+
+	if (!priv)
+		return -EINVAL;
+
+	if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
 			 blightdev->props.brightness))
 		return -EIO;
-	if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL_POWER,
+	if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
 			 blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
 		return -EIO;
 
@@ -692,11 +697,11 @@
 	struct backlight_properties props;
 	unsigned long max, now, power;
 
-	if (read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &max))
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max))
 		return -EIO;
-	if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
 		return -EIO;
-	if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
 		return -EIO;
 
 	memset(&props, 0, sizeof(struct backlight_properties));
@@ -734,7 +739,7 @@
 
 	if (!blightdev)
 		return;
-	if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+	if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
 		return;
 	blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
 }
@@ -745,7 +750,7 @@
 
 	/* if we control brightness via acpi video driver */
 	if (priv->blightdev == NULL) {
-		read_ec_data(ideapad_handle, VPCCMD_R_BL, &now);
+		read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
 		return;
 	}
 
@@ -755,19 +760,12 @@
 /*
  * module init/exit
  */
-static const struct acpi_device_id ideapad_device_ids[] = {
-	{ "VPC2004", 0},
-	{ "", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
-
-static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
+static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
 {
-	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
 	unsigned long value;
 
 	/* Without reading from EC touchpad LED doesn't switch state */
-	if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
+	if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
 		/* Some IdeaPads don't really turn off touchpad - they only
 		 * switch the LED state. We (de)activate KBC AUX port to turn
 		 * touchpad off and on. We send KEY_TOUCHPAD_OFF and
@@ -779,86 +777,9 @@
 	}
 }
 
-static int ideapad_acpi_add(struct acpi_device *adevice)
+static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
 {
-	int ret, i;
-	int cfg;
-	struct ideapad_private *priv;
-
-	if (read_method_int(adevice->handle, "_CFG", &cfg))
-		return -ENODEV;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	dev_set_drvdata(&adevice->dev, priv);
-	ideapad_priv = priv;
-	ideapad_handle = adevice->handle;
-	priv->cfg = cfg;
-
-	ret = ideapad_platform_init(priv);
-	if (ret)
-		goto platform_failed;
-
-	ret = ideapad_debugfs_init(priv);
-	if (ret)
-		goto debugfs_failed;
-
-	ret = ideapad_input_init(priv);
-	if (ret)
-		goto input_failed;
-
-	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
-		if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
-			ideapad_register_rfkill(adevice, i);
-		else
-			priv->rfk[i] = NULL;
-	}
-	ideapad_sync_rfk_state(priv);
-	ideapad_sync_touchpad_state(adevice);
-
-	if (!acpi_video_backlight_support()) {
-		ret = ideapad_backlight_init(priv);
-		if (ret && ret != -ENODEV)
-			goto backlight_failed;
-	}
-
-	return 0;
-
-backlight_failed:
-	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
-		ideapad_unregister_rfkill(adevice, i);
-	ideapad_input_exit(priv);
-input_failed:
-	ideapad_debugfs_exit(priv);
-debugfs_failed:
-	ideapad_platform_exit(priv);
-platform_failed:
-	kfree(priv);
-	return ret;
-}
-
-static int ideapad_acpi_remove(struct acpi_device *adevice)
-{
-	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-	int i;
-
-	ideapad_backlight_exit(priv);
-	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
-		ideapad_unregister_rfkill(adevice, i);
-	ideapad_input_exit(priv);
-	ideapad_debugfs_exit(priv);
-	ideapad_platform_exit(priv);
-	dev_set_drvdata(&adevice->dev, NULL);
-	kfree(priv);
-
-	return 0;
-}
-
-static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
-{
-	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-	acpi_handle handle = adevice->handle;
+	struct ideapad_private *priv = data;
 	unsigned long vpc1, vpc2, vpc_bit;
 
 	if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
@@ -880,7 +801,7 @@
 				ideapad_input_report(priv, vpc_bit);
 				break;
 			case 5:
-				ideapad_sync_touchpad_state(adevice);
+				ideapad_sync_touchpad_state(priv);
 				break;
 			case 4:
 				ideapad_backlight_notify_brightness(priv);
@@ -901,26 +822,129 @@
 	}
 }
 
-static int ideapad_acpi_resume(struct device *device)
+static int ideapad_acpi_add(struct platform_device *pdev)
 {
-	ideapad_sync_rfk_state(ideapad_priv);
-	ideapad_sync_touchpad_state(to_acpi_device(device));
+	int ret, i;
+	int cfg;
+	struct ideapad_private *priv;
+	struct acpi_device *adev;
+
+	ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+	if (ret)
+		return -ENODEV;
+
+	if (read_method_int(adev->handle, "_CFG", &cfg))
+		return -ENODEV;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, priv);
+	priv->cfg = cfg;
+	priv->adev = adev;
+	priv->platform_device = pdev;
+
+	ret = ideapad_sysfs_init(priv);
+	if (ret)
+		goto sysfs_failed;
+
+	ret = ideapad_debugfs_init(priv);
+	if (ret)
+		goto debugfs_failed;
+
+	ret = ideapad_input_init(priv);
+	if (ret)
+		goto input_failed;
+
+	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
+		if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
+			ideapad_register_rfkill(priv, i);
+		else
+			priv->rfk[i] = NULL;
+	}
+	ideapad_sync_rfk_state(priv);
+	ideapad_sync_touchpad_state(priv);
+
+	if (!acpi_video_backlight_support()) {
+		ret = ideapad_backlight_init(priv);
+		if (ret && ret != -ENODEV)
+			goto backlight_failed;
+	}
+	ret = acpi_install_notify_handler(adev->handle,
+		ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
+	if (ret)
+		goto notification_failed;
+
+	return 0;
+notification_failed:
+	ideapad_backlight_exit(priv);
+backlight_failed:
+	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+		ideapad_unregister_rfkill(priv, i);
+	ideapad_input_exit(priv);
+input_failed:
+	ideapad_debugfs_exit(priv);
+debugfs_failed:
+	ideapad_sysfs_exit(priv);
+sysfs_failed:
+	kfree(priv);
+	return ret;
+}
+
+static int ideapad_acpi_remove(struct platform_device *pdev)
+{
+	struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
+	int i;
+
+	acpi_remove_notify_handler(priv->adev->handle,
+		ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
+	ideapad_backlight_exit(priv);
+	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+		ideapad_unregister_rfkill(priv, i);
+	ideapad_input_exit(priv);
+	ideapad_debugfs_exit(priv);
+	ideapad_sysfs_exit(priv);
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(priv);
+
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int ideapad_acpi_resume(struct device *device)
+{
+	struct ideapad_private *priv;
+
+	if (!device)
+		return -EINVAL;
+	priv = dev_get_drvdata(device);
+
+	ideapad_sync_rfk_state(priv);
+	ideapad_sync_touchpad_state(priv);
+	return 0;
+}
+#endif
 static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
 
-static struct acpi_driver ideapad_acpi_driver = {
-	.name = "ideapad_acpi",
-	.class = "IdeaPad",
-	.ids = ideapad_device_ids,
-	.ops.add = ideapad_acpi_add,
-	.ops.remove = ideapad_acpi_remove,
-	.ops.notify = ideapad_acpi_notify,
-	.drv.pm = &ideapad_pm,
-	.owner = THIS_MODULE,
+static const struct acpi_device_id ideapad_device_ids[] = {
+	{ "VPC2004", 0},
+	{ "", 0},
 };
-module_acpi_driver(ideapad_acpi_driver);
+MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+
+static struct platform_driver ideapad_acpi_driver = {
+	.probe = ideapad_acpi_add,
+	.remove = ideapad_acpi_remove,
+	.driver = {
+		.name   = "ideapad_acpi",
+		.owner  = THIS_MODULE,
+		.pm     = &ideapad_pm,
+		.acpi_match_table = ACPI_PTR(ideapad_device_ids),
+	},
+};
+
+module_platform_driver(ideapad_acpi_driver);
 
 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 MODULE_DESCRIPTION("IdeaPad ACPI Extras");
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index 41b740c..a2083a9 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -29,24 +29,16 @@
 				       char *buf)
 {
 	struct acpi_device *acpi;
-	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *result;
+	unsigned long long value;
 	acpi_status status;
 
 	acpi = to_acpi_device(dev);
 
-	status = acpi_evaluate_object(acpi->handle, "GFFS", NULL, &output);
+	status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
 	if (!ACPI_SUCCESS(status))
 		return -EINVAL;
 
-	result = output.pointer;
-
-	if (result->type != ACPI_TYPE_INTEGER) {
-		kfree(result);
-		return -EINVAL;
-	}
-
-	return sprintf(buf, "%lld\n", result->integer.value);
+	return sprintf(buf, "%lld\n", value);
 }
 
 static ssize_t irst_store_wakeup_events(struct device *dev,
@@ -54,8 +46,6 @@
 					const char *buf, size_t count)
 {
 	struct acpi_device *acpi;
-	struct acpi_object_list input;
-	union acpi_object param;
 	acpi_status status;
 	unsigned long value;
 	int error;
@@ -67,13 +57,7 @@
 	if (error)
 		return error;
 
-	param.type = ACPI_TYPE_INTEGER;
-	param.integer.value = value;
-
-	input.count = 1;
-	input.pointer = &param;
-
-	status = acpi_evaluate_object(acpi->handle, "SFFS", &input, NULL);
+	status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
 
 	if (!ACPI_SUCCESS(status))
 		return -EINVAL;
@@ -91,24 +75,16 @@
 				     struct device_attribute *attr, char *buf)
 {
 	struct acpi_device *acpi;
-	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *result;
+	unsigned long long value;
 	acpi_status status;
 
 	acpi = to_acpi_device(dev);
 
-	status = acpi_evaluate_object(acpi->handle, "GFTV", NULL, &output);
+	status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
 	if (!ACPI_SUCCESS(status))
 		return -EINVAL;
 
-	result = output.pointer;
-
-	if (result->type != ACPI_TYPE_INTEGER) {
-		kfree(result);
-		return -EINVAL;
-	}
-
-	return sprintf(buf, "%lld\n", result->integer.value);
+	return sprintf(buf, "%lld\n", value);
 }
 
 static ssize_t irst_store_wakeup_time(struct device *dev,
@@ -116,8 +92,6 @@
 				      const char *buf, size_t count)
 {
 	struct acpi_device *acpi;
-	struct acpi_object_list input;
-	union acpi_object param;
 	acpi_status status;
 	unsigned long value;
 	int error;
@@ -129,13 +103,7 @@
 	if (error)
 		return error;
 
-	param.type = ACPI_TYPE_INTEGER;
-	param.integer.value = value;
-
-	input.count = 1;
-	input.pointer = &param;
-
-	status = acpi_evaluate_object(acpi->handle, "SFTV", &input, NULL);
+	status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
 
 	if (!ACPI_SUCCESS(status))
 		return -EINVAL;
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 52259dc..1838400 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -25,37 +25,18 @@
 
 static int smartconnect_acpi_init(struct acpi_device *acpi)
 {
-	struct acpi_object_list input;
-	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *result;
-	union acpi_object param;
+	unsigned long long value;
 	acpi_status status;
 
-	status = acpi_evaluate_object(acpi->handle, "GAOS", NULL, &output);
+	status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
 	if (!ACPI_SUCCESS(status))
 		return -EINVAL;
 
-	result = output.pointer;
-
-	if (result->type != ACPI_TYPE_INTEGER) {
-		kfree(result);
-		return -EINVAL;
-	}
-
-	if (result->integer.value & 0x1) {
-		param.type = ACPI_TYPE_INTEGER;
-		param.integer.value = 0;
-
-		input.count = 1;
-		input.pointer = &param;
-
+	if (value & 0x1) {
 		dev_info(&acpi->dev, "Disabling Intel Smart Connect\n");
-		status = acpi_evaluate_object(acpi->handle, "SAOS", &input,
-					      NULL);
+		status = acpi_execute_simple_method(acpi->handle, "SAOS", 0);
 	}
 
-	kfree(result);
-
 	return 0;
 }
 
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index d6cfc15..11244f8 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -156,19 +156,15 @@
 static int intel_menlow_memory_add(struct acpi_device *device)
 {
 	int result = -ENODEV;
-	acpi_status status = AE_OK;
-	acpi_handle dummy;
 	struct thermal_cooling_device *cdev;
 
 	if (!device)
 		return -EINVAL;
 
-	status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy);
-	if (ACPI_FAILURE(status))
+	if (!acpi_has_method(device->handle, MEMORY_GET_BANDWIDTH))
 		goto end;
 
-	status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy);
-	if (ACPI_FAILURE(status))
+	if (!acpi_has_method(device->handle, MEMORY_SET_BANDWIDTH))
 		goto end;
 
 	cdev = thermal_cooling_device_register("Memory controller", device,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 13ec195..47caab0 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1508,7 +1508,6 @@
 static int sony_nc_resume(struct device *dev)
 {
 	struct sony_nc_value *item;
-	acpi_handle handle;
 
 	for (item = sony_nc_values; item->name; item++) {
 		int ret;
@@ -1523,15 +1522,13 @@
 		}
 	}
 
-	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
-					 &handle))) {
+	if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
 		int arg = 1;
 		if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
 			dprintk("ECON Method failed\n");
 	}
 
-	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-					 &handle)))
+	if (acpi_has_method(sony_nc_acpi_handle, "SN00"))
 		sony_nc_function_resume();
 
 	return 0;
@@ -2682,7 +2679,6 @@
 
 static void sony_nc_backlight_setup(void)
 {
-	acpi_handle unused;
 	int max_brightness = 0;
 	const struct backlight_ops *ops = NULL;
 	struct backlight_properties props;
@@ -2717,8 +2713,7 @@
 		sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
 		max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
-	} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
-						&unused))) {
+	} else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) {
 		ops = &sony_backlight_ops;
 		max_brightness = SONY_MAX_BRIGHTNESS - 1;
 
@@ -2750,7 +2745,6 @@
 {
 	acpi_status status;
 	int result = 0;
-	acpi_handle handle;
 	struct sony_nc_value *item;
 
 	pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
@@ -2790,15 +2784,13 @@
 		goto outplatform;
 	}
 
-	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
-					 &handle))) {
+	if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
 		int arg = 1;
 		if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
 			dprintk("ECON Method failed\n");
 	}
 
-	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-					 &handle))) {
+	if (acpi_has_method(sony_nc_acpi_handle, "SN00")) {
 		dprintk("Doing SNC setup\n");
 		/* retrieve the available handles */
 		result = sony_nc_handles_setup(sony_pf_device);
@@ -2821,9 +2813,8 @@
 
 		/* find the available acpiget as described in the DSDT */
 		for (; item->acpiget && *item->acpiget; ++item->acpiget) {
-			if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
-							 *item->acpiget,
-							 &handle))) {
+			if (acpi_has_method(sony_nc_acpi_handle,
+							*item->acpiget)) {
 				dprintk("Found %s getter: %s\n",
 						item->name, *item->acpiget);
 				item->devattr.attr.mode |= S_IRUGO;
@@ -2833,9 +2824,8 @@
 
 		/* find the available acpiset as described in the DSDT */
 		for (; item->acpiset && *item->acpiset; ++item->acpiset) {
-			if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
-							 *item->acpiset,
-							 &handle))) {
+			if (acpi_has_method(sony_nc_acpi_handle,
+							*item->acpiset)) {
 				dprintk("Found %s setter: %s\n",
 						item->name, *item->acpiset);
 				item->devattr.attr.mode |= S_IWUSR;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0b7efb2..05e046a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -701,6 +701,14 @@
 static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle,
 			u32 level, void *context, void **return_value)
 {
+	struct acpi_device *dev;
+	if (!strcmp(context, "video")) {
+		if (acpi_bus_get_device(handle, &dev))
+			return AE_OK;
+		if (strcmp(ACPI_VIDEO_HID, acpi_device_hid(dev)))
+			return AE_OK;
+	}
+
 	*(acpi_handle *)return_value = handle;
 
 	return AE_CTRL_TERMINATE;
@@ -713,10 +721,10 @@
 	acpi_status status;
 	acpi_handle device_found;
 
-	BUG_ON(!name || !hid || !handle);
+	BUG_ON(!name || !handle);
 	vdbg_printk(TPACPI_DBG_INIT,
 			"trying to locate ACPI handle for %s, using HID %s\n",
-			name, hid);
+			name, hid ? hid : "NULL");
 
 	memset(&device_found, 0, sizeof(device_found));
 	status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback,
@@ -6091,19 +6099,28 @@
 {
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 	union acpi_object *obj;
+	struct acpi_device *device, *child;
 	int rc;
 
-	if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
+	if (acpi_bus_get_device(handle, &device))
+		return 0;
+
+	rc = 0;
+	list_for_each_entry(child, &device->children, node) {
+		acpi_status status = acpi_evaluate_object(child->handle, "_BCL",
+							  NULL, &buffer);
+		if (ACPI_FAILURE(status))
+			continue;
+
 		obj = (union acpi_object *)buffer.pointer;
 		if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
 			pr_err("Unknown _BCL data, please report this to %s\n",
-			       TPACPI_MAIL);
+				TPACPI_MAIL);
 			rc = 0;
 		} else {
 			rc = obj->package.count;
 		}
-	} else {
-		return 0;
+		break;
 	}
 
 	kfree(buffer.pointer);
@@ -6119,7 +6136,7 @@
 	acpi_handle video_device;
 	int bcl_levels = 0;
 
-	tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+	tpacpi_acpi_handle_locate("video", NULL, &video_device);
 	if (video_device)
 		bcl_levels = tpacpi_query_bcl_levels(video_device);
 
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 4ab618c..67897c8 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -80,13 +80,9 @@
 static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
 {
 	acpi_status status;
-	union acpi_object fncx_params[1] = {
-		{ .type = ACPI_TYPE_INTEGER }
-	};
-	struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] };
 
-	fncx_params[0].integer.value = state ? 0x86 : 0x87;
-	status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL);
+	status = acpi_execute_simple_method(device->handle, "FNCX",
+						state ? 0x86 : 0x87);
 	if (ACPI_FAILURE(status)) {
 		pr_err("Unable to switch FNCX notifications\n");
 		return -ENODEV;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index eb3467e..0cfadb65 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -191,16 +191,9 @@
 
 static int write_acpi_int(const char *methodName, int val)
 {
-	struct acpi_object_list params;
-	union acpi_object in_objs[1];
 	acpi_status status;
 
-	params.count = ARRAY_SIZE(in_objs);
-	params.pointer = in_objs;
-	in_objs[0].type = ACPI_TYPE_INTEGER;
-	in_objs[0].integer.value = val;
-
-	status = acpi_evaluate_object(NULL, (char *)methodName, &params, NULL);
+	status = acpi_execute_simple_method(NULL, (char *)methodName, val);
 	return (status == AE_OK) ? 0 : -EIO;
 }
 
@@ -947,21 +940,17 @@
  */
 static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev)
 {
-	struct acpi_buffer buf;
-	union acpi_object out_obj;
+	unsigned long long value;
 	acpi_status status;
 
-	buf.pointer = &out_obj;
-	buf.length = sizeof(out_obj);
-
-	status = acpi_evaluate_object(dev->acpi_dev->handle, "INFO",
-				      NULL, &buf);
-	if (ACPI_FAILURE(status) || out_obj.type != ACPI_TYPE_INTEGER) {
+	status = acpi_evaluate_integer(dev->acpi_dev->handle, "INFO",
+				      NULL, &value);
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI INFO method execution failed\n");
 		return -EIO;
 	}
 
-	return out_obj.integer.value;
+	return value;
 }
 
 static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
@@ -981,7 +970,7 @@
 static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
 {
 	acpi_status status;
-	acpi_handle ec_handle, handle;
+	acpi_handle ec_handle;
 	int error;
 	u32 hci_result;
 
@@ -1008,10 +997,7 @@
 	 */
 	status = AE_ERROR;
 	ec_handle = ec_get_handle();
-	if (ec_handle)
-		status = acpi_get_handle(ec_handle, "NTFY", &handle);
-
-	if (ACPI_SUCCESS(status)) {
+	if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
 		INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
 
 		error = i8042_install_filter(toshiba_acpi_i8042_filter);
@@ -1027,10 +1013,9 @@
 	 * Determine hotkey query interface. Prefer using the INFO
 	 * method when it is available.
 	 */
-	status = acpi_get_handle(dev->acpi_dev->handle, "INFO", &handle);
-	if (ACPI_SUCCESS(status)) {
+	if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
 		dev->info_supported = 1;
-	} else {
+	else {
 		hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
 		if (hci_result == HCI_SUCCESS)
 			dev->system_event_supported = 1;
@@ -1155,15 +1140,10 @@
 
 static const char *find_hci_method(acpi_handle handle)
 {
-	acpi_status status;
-	acpi_handle hci_handle;
-
-	status = acpi_get_handle(handle, "GHCI", &hci_handle);
-	if (ACPI_SUCCESS(status))
+	if (acpi_has_method(handle, "GHCI"))
 		return "GHCI";
 
-	status = acpi_get_handle(handle, "SPFC", &hci_handle);
-	if (ACPI_SUCCESS(status))
+	if (acpi_has_method(handle, "SPFC"))
 		return "SPFC";
 
 	return NULL;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 601ea95..62e8c22 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -252,8 +252,6 @@
 {
 	struct guid_block *block = NULL;
 	char method[5];
-	struct acpi_object_list input;
-	union acpi_object params[1];
 	acpi_status status;
 	acpi_handle handle;
 
@@ -263,13 +261,9 @@
 	if (!block)
 		return AE_NOT_EXIST;
 
-	input.count = 1;
-	input.pointer = params;
-	params[0].type = ACPI_TYPE_INTEGER;
-	params[0].integer.value = enable;
 
 	snprintf(method, 5, "WE%02X", block->notify_id);
-	status = acpi_evaluate_object(handle, method, &input, NULL);
+	status = acpi_execute_simple_method(handle, method, enable);
 
 	if (status != AE_OK && status != AE_NOT_FOUND)
 		return status;
@@ -353,10 +347,10 @@
 {
 	struct guid_block *block = NULL;
 	struct wmi_block *wblock = NULL;
-	acpi_handle handle, wc_handle;
+	acpi_handle handle;
 	acpi_status status, wc_status = AE_ERROR;
-	struct acpi_object_list input, wc_input;
-	union acpi_object wc_params[1], wq_params[1];
+	struct acpi_object_list input;
+	union acpi_object wq_params[1];
 	char method[5];
 	char wc_method[5] = "WC";
 
@@ -386,11 +380,6 @@
 	 * enable collection.
 	 */
 	if (block->flags & ACPI_WMI_EXPENSIVE) {
-		wc_input.count = 1;
-		wc_input.pointer = wc_params;
-		wc_params[0].type = ACPI_TYPE_INTEGER;
-		wc_params[0].integer.value = 1;
-
 		strncat(wc_method, block->object_id, 2);
 
 		/*
@@ -398,10 +387,9 @@
 		 * expensive, but have no corresponding WCxx method. So we
 		 * should not fail if this happens.
 		 */
-		wc_status = acpi_get_handle(handle, wc_method, &wc_handle);
-		if (ACPI_SUCCESS(wc_status))
-			wc_status = acpi_evaluate_object(handle, wc_method,
-				&wc_input, NULL);
+		if (acpi_has_method(handle, wc_method))
+			wc_status = acpi_execute_simple_method(handle,
+								wc_method, 1);
 	}
 
 	strcpy(method, "WQ");
@@ -414,9 +402,7 @@
 	 * the WQxx method failed - we should disable collection anyway.
 	 */
 	if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
-		wc_params[0].integer.value = 0;
-		status = acpi_evaluate_object(handle,
-		wc_method, &wc_input, NULL);
+		status = acpi_execute_simple_method(handle, wc_method, 0);
 	}
 
 	return status;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 34049b0..747826d 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -239,8 +239,6 @@
 
 static int __init pnpacpi_add_device(struct acpi_device *device)
 {
-	acpi_handle temp = NULL;
-	acpi_status status;
 	struct pnp_dev *dev;
 	char *pnpid;
 	struct acpi_hardware_id *id;
@@ -253,8 +251,7 @@
 	 * If a PnPacpi device is not present , the device
 	 * driver should not be loaded.
 	 */
-	status = acpi_get_handle(device->handle, "_CRS", &temp);
-	if (ACPI_FAILURE(status))
+	if (!acpi_has_method(device->handle, "_CRS"))
 		return 0;
 
 	pnpid = pnpacpi_get_id(device);
@@ -271,16 +268,14 @@
 	dev->data = device;
 	/* .enabled means the device can decode the resources */
 	dev->active = device->status.enabled;
-	status = acpi_get_handle(device->handle, "_SRS", &temp);
-	if (ACPI_SUCCESS(status))
+	if (acpi_has_method(device->handle, "_SRS"))
 		dev->capabilities |= PNP_CONFIGURABLE;
 	dev->capabilities |= PNP_READ;
 	if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
 		dev->capabilities |= PNP_WRITE;
 	if (device->flags.removable)
 		dev->capabilities |= PNP_REMOVABLE;
-	status = acpi_get_handle(device->handle, "_DIS", &temp);
-	if (ACPI_SUCCESS(status))
+	if (acpi_has_method(device->handle, "_DIS"))
 		dev->capabilities |= PNP_DISABLE;
 
 	if (strlen(acpi_device_name(device)))
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 7549707..3cb4178 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -574,8 +574,8 @@
 	}
 
 	/* Return and WFI */
-	INIT_COMPLETION(di->ab8500_fg_started);
-	INIT_COMPLETION(di->ab8500_fg_complete);
+	reinit_completion(&di->ab8500_fg_started);
+	reinit_completion(&di->ab8500_fg_complete);
 	enable_irq(di->irq);
 
 	/* Note: cc_lock is still locked */
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index d9686aa..6c8931d 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -73,7 +73,7 @@
 
 	mutex_lock(&battery->lock);
 
-	INIT_COMPLETION(battery->read_completion);
+	reinit_completion(&battery->read_completion);
 
 	enable_irq(battery->irq);
 	battery->cell->enable(battery->pdev);
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
new file mode 100644
index 0000000..a7c81b5
--- /dev/null
+++ b/drivers/powercap/Kconfig
@@ -0,0 +1,32 @@
+#
+# Generic power capping sysfs interface configuration
+#
+
+menuconfig POWERCAP
+	bool "Generic powercap sysfs driver"
+	help
+	  The power capping sysfs interface allows kernel subsystems to expose power
+	  capping settings to user space in a consistent way.  Usually, it consists
+	  of multiple control types that determine which settings may be exposed and
+	  power zones representing parts of the system that can be subject to power
+	  capping.
+
+	  If you want this code to be compiled in, say Y here.
+
+if POWERCAP
+# Client driver configurations go here.
+config INTEL_RAPL
+	tristate "Intel RAPL Support"
+	depends on X86
+	default n
+	---help---
+	  This enables support for the Intel Running Average Power Limit (RAPL)
+	  technology which allows power limits to be enforced and monitored on
+	  modern Intel processors (Sandy Bridge and later).
+
+	  In RAPL, the platform level settings are divided into domains for
+	  fine grained control. These domains include processor package, DRAM
+	  controller, CPU core (Power Plance 0), graphics uncore (Power Plane
+	  1), etc.
+
+endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
new file mode 100644
index 0000000..0a21ef3
--- /dev/null
+++ b/drivers/powercap/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_POWERCAP)	+= powercap_sys.o
+obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
new file mode 100644
index 0000000..2a786c5
--- /dev/null
+++ b/drivers/powercap/intel_rapl.c
@@ -0,0 +1,1395 @@
+/*
+ * Intel Running Average Power Limit (RAPL) Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/powercap.h>
+
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+
+/* bitmasks for RAPL MSRs, used by primitive access functions */
+#define ENERGY_STATUS_MASK      0xffffffff
+
+#define POWER_LIMIT1_MASK       0x7FFF
+#define POWER_LIMIT1_ENABLE     BIT(15)
+#define POWER_LIMIT1_CLAMP      BIT(16)
+
+#define POWER_LIMIT2_MASK       (0x7FFFULL<<32)
+#define POWER_LIMIT2_ENABLE     BIT_ULL(47)
+#define POWER_LIMIT2_CLAMP      BIT_ULL(48)
+#define POWER_PACKAGE_LOCK      BIT_ULL(63)
+#define POWER_PP_LOCK           BIT(31)
+
+#define TIME_WINDOW1_MASK       (0x7FULL<<17)
+#define TIME_WINDOW2_MASK       (0x7FULL<<49)
+
+#define POWER_UNIT_OFFSET	0
+#define POWER_UNIT_MASK		0x0F
+
+#define ENERGY_UNIT_OFFSET	0x08
+#define ENERGY_UNIT_MASK	0x1F00
+
+#define TIME_UNIT_OFFSET	0x10
+#define TIME_UNIT_MASK		0xF0000
+
+#define POWER_INFO_MAX_MASK     (0x7fffULL<<32)
+#define POWER_INFO_MIN_MASK     (0x7fffULL<<16)
+#define POWER_INFO_MAX_TIME_WIN_MASK     (0x3fULL<<48)
+#define POWER_INFO_THERMAL_SPEC_MASK     0x7fff
+
+#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
+#define PP_POLICY_MASK         0x1F
+
+/* Non HW constants */
+#define RAPL_PRIMITIVE_DERIVED       BIT(1) /* not from raw data */
+#define RAPL_PRIMITIVE_DUMMY         BIT(2)
+
+/* scale RAPL units to avoid floating point math inside kernel */
+#define POWER_UNIT_SCALE     (1000000)
+#define ENERGY_UNIT_SCALE    (1000000)
+#define TIME_UNIT_SCALE      (1000000)
+
+#define TIME_WINDOW_MAX_MSEC 40000
+#define TIME_WINDOW_MIN_MSEC 250
+
+enum unit_type {
+	ARBITRARY_UNIT, /* no translation */
+	POWER_UNIT,
+	ENERGY_UNIT,
+	TIME_UNIT,
+};
+
+enum rapl_domain_type {
+	RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+	RAPL_DOMAIN_PP0, /* core power plane */
+	RAPL_DOMAIN_PP1, /* graphics uncore */
+	RAPL_DOMAIN_DRAM,/* DRAM control_type */
+	RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_msr_id {
+	RAPL_DOMAIN_MSR_LIMIT,
+	RAPL_DOMAIN_MSR_STATUS,
+	RAPL_DOMAIN_MSR_PERF,
+	RAPL_DOMAIN_MSR_POLICY,
+	RAPL_DOMAIN_MSR_INFO,
+	RAPL_DOMAIN_MSR_MAX,
+};
+
+/* per domain data, some are optional */
+enum rapl_primitives {
+	ENERGY_COUNTER,
+	POWER_LIMIT1,
+	POWER_LIMIT2,
+	FW_LOCK,
+
+	PL1_ENABLE,  /* power limit 1, aka long term */
+	PL1_CLAMP,   /* allow frequency to go below OS request */
+	PL2_ENABLE,  /* power limit 2, aka short term, instantaneous */
+	PL2_CLAMP,
+
+	TIME_WINDOW1, /* long term */
+	TIME_WINDOW2, /* short term */
+	THERMAL_SPEC_POWER,
+	MAX_POWER,
+
+	MIN_POWER,
+	MAX_TIME_WINDOW,
+	THROTTLED_TIME,
+	PRIORITY_LEVEL,
+
+	/* below are not raw primitive data */
+	AVERAGE_POWER,
+	NR_RAPL_PRIMITIVES,
+};
+
+#define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
+
+/* Can be expanded to include events, etc.*/
+struct rapl_domain_data {
+	u64 primitives[NR_RAPL_PRIMITIVES];
+	unsigned long timestamp;
+};
+
+
+#define	DOMAIN_STATE_INACTIVE           BIT(0)
+#define	DOMAIN_STATE_POWER_LIMIT_SET    BIT(1)
+#define DOMAIN_STATE_BIOS_LOCKED        BIT(2)
+
+#define NR_POWER_LIMITS (2)
+struct rapl_power_limit {
+	struct powercap_zone_constraint *constraint;
+	int prim_id; /* primitive ID used to enable */
+	struct rapl_domain *domain;
+	const char *name;
+};
+
+static const char pl1_name[] = "long_term";
+static const char pl2_name[] = "short_term";
+
+struct rapl_domain {
+	const char *name;
+	enum rapl_domain_type id;
+	int msrs[RAPL_DOMAIN_MSR_MAX];
+	struct powercap_zone power_zone;
+	struct rapl_domain_data rdd;
+	struct rapl_power_limit rpl[NR_POWER_LIMITS];
+	u64 attr_map; /* track capabilities */
+	unsigned int state;
+	int package_id;
+};
+#define power_zone_to_rapl_domain(_zone) \
+	container_of(_zone, struct rapl_domain, power_zone)
+
+
+/* Each physical package contains multiple domains, these are the common
+ * data across RAPL domains within a package.
+ */
+struct rapl_package {
+	unsigned int id; /* physical package/socket id */
+	unsigned int nr_domains;
+	unsigned long domain_map; /* bit map of active domains */
+	unsigned int power_unit_divisor;
+	unsigned int energy_unit_divisor;
+	unsigned int time_unit_divisor;
+	struct rapl_domain *domains; /* array of domains, sized at runtime */
+	struct powercap_zone *power_zone; /* keep track of parent zone */
+	int nr_cpus; /* active cpus on the package, topology info is lost during
+		      * cpu hotplug. so we have to track ourselves.
+		      */
+	unsigned long power_limit_irq; /* keep track of package power limit
+					* notify interrupt enable status.
+					*/
+	struct list_head plist;
+};
+#define PACKAGE_PLN_INT_SAVED   BIT(0)
+#define MAX_PRIM_NAME (32)
+
+/* per domain data. used to describe individual knobs such that access function
+ * can be consolidated into one instead of many inline functions.
+ */
+struct rapl_primitive_info {
+	const char *name;
+	u64 mask;
+	int shift;
+	enum rapl_domain_msr_id id;
+	enum unit_type unit;
+	u32 flag;
+};
+
+#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) {	\
+		.name = #p,			\
+		.mask = m,			\
+		.shift = s,			\
+		.id = i,			\
+		.unit = u,			\
+		.flag = f			\
+	}
+
+static void rapl_init_domains(struct rapl_package *rp);
+static int rapl_read_data_raw(struct rapl_domain *rd,
+			enum rapl_primitives prim,
+			bool xlate, u64 *data);
+static int rapl_write_data_raw(struct rapl_domain *rd,
+			enum rapl_primitives prim,
+			unsigned long long value);
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+			int to_raw);
+static void package_power_limit_irq_save(int package_id);
+
+static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
+
+static const char * const rapl_domain_names[] = {
+	"package",
+	"core",
+	"uncore",
+	"dram",
+};
+
+static struct powercap_control_type *control_type; /* PowerCap Controller */
+
+/* caller to ensure CPU hotplug lock is held */
+static struct rapl_package *find_package_by_id(int id)
+{
+	struct rapl_package *rp;
+
+	list_for_each_entry(rp, &rapl_packages, plist) {
+		if (rp->id == id)
+			return rp;
+	}
+
+	return NULL;
+}
+
+/* caller to ensure CPU hotplug lock is held */
+static int find_active_cpu_on_package(int package_id)
+{
+	int i;
+
+	for_each_online_cpu(i) {
+		if (topology_physical_package_id(i) == package_id)
+			return i;
+	}
+	/* all CPUs on this package are offline */
+
+	return -ENODEV;
+}
+
+/* caller must hold cpu hotplug lock */
+static void rapl_cleanup_data(void)
+{
+	struct rapl_package *p, *tmp;
+
+	list_for_each_entry_safe(p, tmp, &rapl_packages, plist) {
+		kfree(p->domains);
+		list_del(&p->plist);
+		kfree(p);
+	}
+}
+
+static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
+{
+	struct rapl_domain *rd;
+	u64 energy_now;
+
+	/* prevent CPU hotplug, make sure the RAPL domain does not go
+	 * away while reading the counter.
+	 */
+	get_online_cpus();
+	rd = power_zone_to_rapl_domain(power_zone);
+
+	if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
+		*energy_raw = energy_now;
+		put_online_cpus();
+
+		return 0;
+	}
+	put_online_cpus();
+
+	return -EIO;
+}
+
+static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
+{
+	*energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
+	return 0;
+}
+
+static int release_zone(struct powercap_zone *power_zone)
+{
+	struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+	struct rapl_package *rp;
+
+	/* package zone is the last zone of a package, we can free
+	 * memory here since all children has been unregistered.
+	 */
+	if (rd->id == RAPL_DOMAIN_PACKAGE) {
+		rp = find_package_by_id(rd->package_id);
+		if (!rp) {
+			dev_warn(&power_zone->dev, "no package id %s\n",
+				rd->name);
+			return -ENODEV;
+		}
+		kfree(rd);
+		rp->domains = NULL;
+	}
+
+	return 0;
+
+}
+
+static int find_nr_power_limit(struct rapl_domain *rd)
+{
+	int i;
+
+	for (i = 0; i < NR_POWER_LIMITS; i++) {
+		if (rd->rpl[i].name == NULL)
+			break;
+	}
+
+	return i;
+}
+
+static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
+{
+	struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+	int nr_powerlimit;
+
+	if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
+		return -EACCES;
+	get_online_cpus();
+	nr_powerlimit = find_nr_power_limit(rd);
+	/* here we activate/deactivate the hardware for power limiting */
+	rapl_write_data_raw(rd, PL1_ENABLE, mode);
+	/* always enable clamp such that p-state can go below OS requested
+	 * range. power capping priority over guranteed frequency.
+	 */
+	rapl_write_data_raw(rd, PL1_CLAMP, mode);
+	/* some domains have pl2 */
+	if (nr_powerlimit > 1) {
+		rapl_write_data_raw(rd, PL2_ENABLE, mode);
+		rapl_write_data_raw(rd, PL2_CLAMP, mode);
+	}
+	put_online_cpus();
+
+	return 0;
+}
+
+static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
+{
+	struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+	u64 val;
+
+	if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+		*mode = false;
+		return 0;
+	}
+	get_online_cpus();
+	if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
+		put_online_cpus();
+		return -EIO;
+	}
+	*mode = val;
+	put_online_cpus();
+
+	return 0;
+}
+
+/* per RAPL domain ops, in the order of rapl_domain_type */
+static struct powercap_zone_ops zone_ops[] = {
+	/* RAPL_DOMAIN_PACKAGE */
+	{
+		.get_energy_uj = get_energy_counter,
+		.get_max_energy_range_uj = get_max_energy_counter,
+		.release = release_zone,
+		.set_enable = set_domain_enable,
+		.get_enable = get_domain_enable,
+	},
+	/* RAPL_DOMAIN_PP0 */
+	{
+		.get_energy_uj = get_energy_counter,
+		.get_max_energy_range_uj = get_max_energy_counter,
+		.release = release_zone,
+		.set_enable = set_domain_enable,
+		.get_enable = get_domain_enable,
+	},
+	/* RAPL_DOMAIN_PP1 */
+	{
+		.get_energy_uj = get_energy_counter,
+		.get_max_energy_range_uj = get_max_energy_counter,
+		.release = release_zone,
+		.set_enable = set_domain_enable,
+		.get_enable = get_domain_enable,
+	},
+	/* RAPL_DOMAIN_DRAM */
+	{
+		.get_energy_uj = get_energy_counter,
+		.get_max_energy_range_uj = get_max_energy_counter,
+		.release = release_zone,
+		.set_enable = set_domain_enable,
+		.get_enable = get_domain_enable,
+	},
+};
+
+static int set_power_limit(struct powercap_zone *power_zone, int id,
+			u64 power_limit)
+{
+	struct rapl_domain *rd;
+	struct rapl_package *rp;
+	int ret = 0;
+
+	get_online_cpus();
+	rd = power_zone_to_rapl_domain(power_zone);
+	rp = find_package_by_id(rd->package_id);
+	if (!rp) {
+		ret = -ENODEV;
+		goto set_exit;
+	}
+
+	if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+		dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n",
+			rd->name);
+		ret = -EACCES;
+		goto set_exit;
+	}
+
+	switch (rd->rpl[id].prim_id) {
+	case PL1_ENABLE:
+		rapl_write_data_raw(rd, POWER_LIMIT1, power_limit);
+		break;
+	case PL2_ENABLE:
+		rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	if (!ret)
+		package_power_limit_irq_save(rd->package_id);
+set_exit:
+	put_online_cpus();
+	return ret;
+}
+
+static int get_current_power_limit(struct powercap_zone *power_zone, int id,
+					u64 *data)
+{
+	struct rapl_domain *rd;
+	u64 val;
+	int prim;
+	int ret = 0;
+
+	get_online_cpus();
+	rd = power_zone_to_rapl_domain(power_zone);
+	switch (rd->rpl[id].prim_id) {
+	case PL1_ENABLE:
+		prim = POWER_LIMIT1;
+		break;
+	case PL2_ENABLE:
+		prim = POWER_LIMIT2;
+		break;
+	default:
+		put_online_cpus();
+		return -EINVAL;
+	}
+	if (rapl_read_data_raw(rd, prim, true, &val))
+		ret = -EIO;
+	else
+		*data = val;
+
+	put_online_cpus();
+
+	return ret;
+}
+
+static int set_time_window(struct powercap_zone *power_zone, int id,
+								u64 window)
+{
+	struct rapl_domain *rd;
+	int ret = 0;
+
+	get_online_cpus();
+	rd = power_zone_to_rapl_domain(power_zone);
+	switch (rd->rpl[id].prim_id) {
+	case PL1_ENABLE:
+		rapl_write_data_raw(rd, TIME_WINDOW1, window);
+		break;
+	case PL2_ENABLE:
+		rapl_write_data_raw(rd, TIME_WINDOW2, window);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	put_online_cpus();
+	return ret;
+}
+
+static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
+{
+	struct rapl_domain *rd;
+	u64 val;
+	int ret = 0;
+
+	get_online_cpus();
+	rd = power_zone_to_rapl_domain(power_zone);
+	switch (rd->rpl[id].prim_id) {
+	case PL1_ENABLE:
+		ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
+		break;
+	case PL2_ENABLE:
+		ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
+		break;
+	default:
+		put_online_cpus();
+		return -EINVAL;
+	}
+	if (!ret)
+		*data = val;
+	put_online_cpus();
+
+	return ret;
+}
+
+static const char *get_constraint_name(struct powercap_zone *power_zone, int id)
+{
+	struct rapl_power_limit *rpl;
+	struct rapl_domain *rd;
+
+	rd = power_zone_to_rapl_domain(power_zone);
+	rpl = (struct rapl_power_limit *) &rd->rpl[id];
+
+	return rpl->name;
+}
+
+
+static int get_max_power(struct powercap_zone *power_zone, int id,
+					u64 *data)
+{
+	struct rapl_domain *rd;
+	u64 val;
+	int prim;
+	int ret = 0;
+
+	get_online_cpus();
+	rd = power_zone_to_rapl_domain(power_zone);
+	switch (rd->rpl[id].prim_id) {
+	case PL1_ENABLE:
+		prim = THERMAL_SPEC_POWER;
+		break;
+	case PL2_ENABLE:
+		prim = MAX_POWER;
+		break;
+	default:
+		put_online_cpus();
+		return -EINVAL;
+	}
+	if (rapl_read_data_raw(rd, prim, true, &val))
+		ret = -EIO;
+	else
+		*data = val;
+
+	put_online_cpus();
+
+	return ret;
+}
+
+static struct powercap_zone_constraint_ops constraint_ops = {
+	.set_power_limit_uw = set_power_limit,
+	.get_power_limit_uw = get_current_power_limit,
+	.set_time_window_us = set_time_window,
+	.get_time_window_us = get_time_window,
+	.get_max_power_uw = get_max_power,
+	.get_name = get_constraint_name,
+};
+
+/* called after domain detection and package level data are set */
+static void rapl_init_domains(struct rapl_package *rp)
+{
+	int i;
+	struct rapl_domain *rd = rp->domains;
+
+	for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+		unsigned int mask = rp->domain_map & (1 << i);
+		switch (mask) {
+		case BIT(RAPL_DOMAIN_PACKAGE):
+			rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE];
+			rd->id = RAPL_DOMAIN_PACKAGE;
+			rd->msrs[0] = MSR_PKG_POWER_LIMIT;
+			rd->msrs[1] = MSR_PKG_ENERGY_STATUS;
+			rd->msrs[2] = MSR_PKG_PERF_STATUS;
+			rd->msrs[3] = 0;
+			rd->msrs[4] = MSR_PKG_POWER_INFO;
+			rd->rpl[0].prim_id = PL1_ENABLE;
+			rd->rpl[0].name = pl1_name;
+			rd->rpl[1].prim_id = PL2_ENABLE;
+			rd->rpl[1].name = pl2_name;
+			break;
+		case BIT(RAPL_DOMAIN_PP0):
+			rd->name = rapl_domain_names[RAPL_DOMAIN_PP0];
+			rd->id = RAPL_DOMAIN_PP0;
+			rd->msrs[0] = MSR_PP0_POWER_LIMIT;
+			rd->msrs[1] = MSR_PP0_ENERGY_STATUS;
+			rd->msrs[2] = 0;
+			rd->msrs[3] = MSR_PP0_POLICY;
+			rd->msrs[4] = 0;
+			rd->rpl[0].prim_id = PL1_ENABLE;
+			rd->rpl[0].name = pl1_name;
+			break;
+		case BIT(RAPL_DOMAIN_PP1):
+			rd->name = rapl_domain_names[RAPL_DOMAIN_PP1];
+			rd->id = RAPL_DOMAIN_PP1;
+			rd->msrs[0] = MSR_PP1_POWER_LIMIT;
+			rd->msrs[1] = MSR_PP1_ENERGY_STATUS;
+			rd->msrs[2] = 0;
+			rd->msrs[3] = MSR_PP1_POLICY;
+			rd->msrs[4] = 0;
+			rd->rpl[0].prim_id = PL1_ENABLE;
+			rd->rpl[0].name = pl1_name;
+			break;
+		case BIT(RAPL_DOMAIN_DRAM):
+			rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM];
+			rd->id = RAPL_DOMAIN_DRAM;
+			rd->msrs[0] = MSR_DRAM_POWER_LIMIT;
+			rd->msrs[1] = MSR_DRAM_ENERGY_STATUS;
+			rd->msrs[2] = MSR_DRAM_PERF_STATUS;
+			rd->msrs[3] = 0;
+			rd->msrs[4] = MSR_DRAM_POWER_INFO;
+			rd->rpl[0].prim_id = PL1_ENABLE;
+			rd->rpl[0].name = pl1_name;
+			break;
+		}
+		if (mask) {
+			rd->package_id = rp->id;
+			rd++;
+		}
+	}
+}
+
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+			int to_raw)
+{
+	u64 divisor = 1;
+	int scale = 1; /* scale to user friendly data without floating point */
+	u64 f, y; /* fraction and exp. used for time unit */
+	struct rapl_package *rp;
+
+	rp = find_package_by_id(package);
+	if (!rp)
+		return value;
+
+	switch (type) {
+	case POWER_UNIT:
+		divisor = rp->power_unit_divisor;
+		scale = POWER_UNIT_SCALE;
+		break;
+	case ENERGY_UNIT:
+		scale = ENERGY_UNIT_SCALE;
+		divisor = rp->energy_unit_divisor;
+		break;
+	case TIME_UNIT:
+		divisor = rp->time_unit_divisor;
+		scale = TIME_UNIT_SCALE;
+		/* special processing based on 2^Y*(1+F)/4 = val/divisor, refer
+		 * to Intel Software Developer's manual Vol. 3a, CH 14.7.4.
+		 */
+		if (!to_raw) {
+			f = (value & 0x60) >> 5;
+			y = value & 0x1f;
+			value = (1 << y) * (4 + f) * scale / 4;
+			return div64_u64(value, divisor);
+		} else {
+			do_div(value, scale);
+			value *= divisor;
+			y = ilog2(value);
+			f = div64_u64(4 * (value - (1 << y)), 1 << y);
+			value = (y & 0x1f) | ((f & 0x3) << 5);
+			return value;
+		}
+		break;
+	case ARBITRARY_UNIT:
+	default:
+		return value;
+	};
+
+	if (to_raw)
+		return div64_u64(value * divisor, scale);
+	else
+		return div64_u64(value * scale, divisor);
+}
+
+/* in the order of enum rapl_primitives */
+static struct rapl_primitive_info rpi[] = {
+	/* name, mask, shift, msr index, unit divisor */
+	PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
+				RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0),
+	PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
+				RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+	PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
+				RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+	PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31,
+				RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+	PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
+				RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+	PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
+				RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+	PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
+				RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+	PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+				RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+	PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+				RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+	PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+				RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+	PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
+				0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+	PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
+				RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+	PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
+				RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+	PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
+				RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0),
+	PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
+				RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0),
+	PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
+				RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0),
+	/* non-hardware */
+	PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
+				RAPL_PRIMITIVE_DERIVED),
+	{NULL, 0, 0, 0},
+};
+
+/* Read primitive data based on its related struct rapl_primitive_info.
+ * if xlate flag is set, return translated data based on data units, i.e.
+ * time, energy, and power.
+ * RAPL MSRs are non-architectual and are laid out not consistently across
+ * domains. Here we use primitive info to allow writing consolidated access
+ * functions.
+ * For a given primitive, it is processed by MSR mask and shift. Unit conversion
+ * is pre-assigned based on RAPL unit MSRs read at init time.
+ * 63-------------------------- 31--------------------------- 0
+ * |                           xxxxx (mask)                   |
+ * |                                |<- shift ----------------|
+ * 63-------------------------- 31--------------------------- 0
+ */
+static int rapl_read_data_raw(struct rapl_domain *rd,
+			enum rapl_primitives prim,
+			bool xlate, u64 *data)
+{
+	u64 value, final;
+	u32 msr;
+	struct rapl_primitive_info *rp = &rpi[prim];
+	int cpu;
+
+	if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY)
+		return -EINVAL;
+
+	msr = rd->msrs[rp->id];
+	if (!msr)
+		return -EINVAL;
+	/* use physical package id to look up active cpus */
+	cpu = find_active_cpu_on_package(rd->package_id);
+	if (cpu < 0)
+		return cpu;
+
+	/* special-case package domain, which uses a different bit*/
+	if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
+		rp->mask = POWER_PACKAGE_LOCK;
+		rp->shift = 63;
+	}
+	/* non-hardware data are collected by the polling thread */
+	if (rp->flag & RAPL_PRIMITIVE_DERIVED) {
+		*data = rd->rdd.primitives[prim];
+		return 0;
+	}
+
+	if (rdmsrl_safe_on_cpu(cpu, msr, &value)) {
+		pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
+		return -EIO;
+	}
+
+	final = value & rp->mask;
+	final = final >> rp->shift;
+	if (xlate)
+		*data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0);
+	else
+		*data = final;
+
+	return 0;
+}
+
+/* Similar use of primitive info in the read counterpart */
+static int rapl_write_data_raw(struct rapl_domain *rd,
+			enum rapl_primitives prim,
+			unsigned long long value)
+{
+	u64 msr_val;
+	u32 msr;
+	struct rapl_primitive_info *rp = &rpi[prim];
+	int cpu;
+
+	cpu = find_active_cpu_on_package(rd->package_id);
+	if (cpu < 0)
+		return cpu;
+	msr = rd->msrs[rp->id];
+	if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
+		dev_dbg(&rd->power_zone.dev,
+			"failed to read msr 0x%x on cpu %d\n", msr, cpu);
+		return -EIO;
+	}
+	value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1);
+	msr_val &= ~rp->mask;
+	msr_val |= value << rp->shift;
+	if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
+		dev_dbg(&rd->power_zone.dev,
+			"failed to write msr 0x%x on cpu %d\n", msr, cpu);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int rapl_check_unit(struct rapl_package *rp, int cpu)
+{
+	u64 msr_val;
+	u32 value;
+
+	if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) {
+		pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n",
+			MSR_RAPL_POWER_UNIT, cpu);
+		return -ENODEV;
+	}
+
+	/* Raw RAPL data stored in MSRs are in certain scales. We need to
+	 * convert them into standard units based on the divisors reported in
+	 * the RAPL unit MSRs.
+	 * i.e.
+	 * energy unit: 1/enery_unit_divisor Joules
+	 * power unit: 1/power_unit_divisor Watts
+	 * time unit: 1/time_unit_divisor Seconds
+	 */
+	value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
+	rp->energy_unit_divisor = 1 << value;
+
+
+	value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
+	rp->power_unit_divisor = 1 << value;
+
+	value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
+	rp->time_unit_divisor = 1 << value;
+
+	pr_debug("Physical package %d units: energy=%d, time=%d, power=%d\n",
+		rp->id,
+		rp->energy_unit_divisor,
+		rp->time_unit_divisor,
+		rp->power_unit_divisor);
+
+	return 0;
+}
+
+/* REVISIT:
+ * When package power limit is set artificially low by RAPL, LVT
+ * thermal interrupt for package power limit should be ignored
+ * since we are not really exceeding the real limit. The intention
+ * is to avoid excessive interrupts while we are trying to save power.
+ * A useful feature might be routing the package_power_limit interrupt
+ * to userspace via eventfd. once we have a usecase, this is simple
+ * to do by adding an atomic notifier.
+ */
+
+static void package_power_limit_irq_save(int package_id)
+{
+	u32 l, h = 0;
+	int cpu;
+	struct rapl_package *rp;
+
+	rp = find_package_by_id(package_id);
+	if (!rp)
+		return;
+
+	if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+		return;
+
+	cpu = find_active_cpu_on_package(package_id);
+	if (cpu < 0)
+		return;
+	/* save the state of PLN irq mask bit before disabling it */
+	rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
+		rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
+		rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
+	}
+	l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+	wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+/* restore per package power limit interrupt enable state */
+static void package_power_limit_irq_restore(int package_id)
+{
+	u32 l, h;
+	int cpu;
+	struct rapl_package *rp;
+
+	rp = find_package_by_id(package_id);
+	if (!rp)
+		return;
+
+	if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+		return;
+
+	cpu = find_active_cpu_on_package(package_id);
+	if (cpu < 0)
+		return;
+
+	/* irq enable state not saved, nothing to restore */
+	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
+		return;
+	rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+
+	if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
+		l |= PACKAGE_THERM_INT_PLN_ENABLE;
+	else
+		l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+
+	wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+static const struct x86_cpu_id rapl_ids[] = {
+	{ X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
+	{ X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+	{ X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
+	{ X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+	/* TODO: Add more CPU IDs after testing */
+	{}
+};
+MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
+
+/* read once for all raw primitive data for all packages, domains */
+static void rapl_update_domain_data(void)
+{
+	int dmn, prim;
+	u64 val;
+	struct rapl_package *rp;
+
+	list_for_each_entry(rp, &rapl_packages, plist) {
+		for (dmn = 0; dmn < rp->nr_domains; dmn++) {
+			pr_debug("update package %d domain %s data\n", rp->id,
+				rp->domains[dmn].name);
+			/* exclude non-raw primitives */
+			for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++)
+				if (!rapl_read_data_raw(&rp->domains[dmn], prim,
+								rpi[prim].unit,
+								&val))
+					rp->domains[dmn].rdd.primitives[prim] =
+									val;
+		}
+	}
+
+}
+
+static int rapl_unregister_powercap(void)
+{
+	struct rapl_package *rp;
+	struct rapl_domain *rd, *rd_package = NULL;
+
+	/* unregister all active rapl packages from the powercap layer,
+	 * hotplug lock held
+	 */
+	list_for_each_entry(rp, &rapl_packages, plist) {
+		package_power_limit_irq_restore(rp->id);
+
+		for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+		     rd++) {
+			pr_debug("remove package, undo power limit on %d: %s\n",
+				rp->id, rd->name);
+			rapl_write_data_raw(rd, PL1_ENABLE, 0);
+			rapl_write_data_raw(rd, PL2_ENABLE, 0);
+			rapl_write_data_raw(rd, PL1_CLAMP, 0);
+			rapl_write_data_raw(rd, PL2_CLAMP, 0);
+			if (rd->id == RAPL_DOMAIN_PACKAGE) {
+				rd_package = rd;
+				continue;
+			}
+			powercap_unregister_zone(control_type, &rd->power_zone);
+		}
+		/* do the package zone last */
+		if (rd_package)
+			powercap_unregister_zone(control_type,
+						&rd_package->power_zone);
+	}
+	powercap_unregister_control_type(control_type);
+
+	return 0;
+}
+
+static int rapl_package_register_powercap(struct rapl_package *rp)
+{
+	struct rapl_domain *rd;
+	int ret = 0;
+	char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
+	struct powercap_zone *power_zone = NULL;
+	int nr_pl;
+
+	/* first we register package domain as the parent zone*/
+	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+		if (rd->id == RAPL_DOMAIN_PACKAGE) {
+			nr_pl = find_nr_power_limit(rd);
+			pr_debug("register socket %d package domain %s\n",
+				rp->id, rd->name);
+			memset(dev_name, 0, sizeof(dev_name));
+			snprintf(dev_name, sizeof(dev_name), "%s-%d",
+				rd->name, rp->id);
+			power_zone = powercap_register_zone(&rd->power_zone,
+							control_type,
+							dev_name, NULL,
+							&zone_ops[rd->id],
+							nr_pl,
+							&constraint_ops);
+			if (IS_ERR(power_zone)) {
+				pr_debug("failed to register package, %d\n",
+					rp->id);
+				ret = PTR_ERR(power_zone);
+				goto exit_package;
+			}
+			/* track parent zone in per package/socket data */
+			rp->power_zone = power_zone;
+			/* done, only one package domain per socket */
+			break;
+		}
+	}
+	if (!power_zone) {
+		pr_err("no package domain found, unknown topology!\n");
+		ret = -ENODEV;
+		goto exit_package;
+	}
+	/* now register domains as children of the socket/package*/
+	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+		if (rd->id == RAPL_DOMAIN_PACKAGE)
+			continue;
+		/* number of power limits per domain varies */
+		nr_pl = find_nr_power_limit(rd);
+		power_zone = powercap_register_zone(&rd->power_zone,
+						control_type, rd->name,
+						rp->power_zone,
+						&zone_ops[rd->id], nr_pl,
+						&constraint_ops);
+
+		if (IS_ERR(power_zone)) {
+			pr_debug("failed to register power_zone, %d:%s:%s\n",
+				rp->id, rd->name, dev_name);
+			ret = PTR_ERR(power_zone);
+			goto err_cleanup;
+		}
+	}
+
+exit_package:
+	return ret;
+err_cleanup:
+	/* clean up previously initialized domains within the package if we
+	 * failed after the first domain setup.
+	 */
+	while (--rd >= rp->domains) {
+		pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+		powercap_unregister_zone(control_type, &rd->power_zone);
+	}
+
+	return ret;
+}
+
+static int rapl_register_powercap(void)
+{
+	struct rapl_domain *rd;
+	struct rapl_package *rp;
+	int ret = 0;
+
+	control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
+	if (IS_ERR(control_type)) {
+		pr_debug("failed to register powercap control_type.\n");
+		return PTR_ERR(control_type);
+	}
+	/* read the initial data */
+	rapl_update_domain_data();
+	list_for_each_entry(rp, &rapl_packages, plist)
+		if (rapl_package_register_powercap(rp))
+			goto err_cleanup_package;
+	return ret;
+
+err_cleanup_package:
+	/* clean up previously initialized packages */
+	list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) {
+		for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+		     rd++) {
+			pr_debug("unregister zone/package %d, %s domain\n",
+				rp->id, rd->name);
+			powercap_unregister_zone(control_type, &rd->power_zone);
+		}
+	}
+
+	return ret;
+}
+
+static int rapl_check_domain(int cpu, int domain)
+{
+	unsigned msr;
+	u64 val1, val2 = 0;
+	int retry = 0;
+
+	switch (domain) {
+	case RAPL_DOMAIN_PACKAGE:
+		msr = MSR_PKG_ENERGY_STATUS;
+		break;
+	case RAPL_DOMAIN_PP0:
+		msr = MSR_PP0_ENERGY_STATUS;
+		break;
+	case RAPL_DOMAIN_PP1:
+		msr = MSR_PP1_ENERGY_STATUS;
+		break;
+	case RAPL_DOMAIN_DRAM:
+		msr = MSR_DRAM_ENERGY_STATUS;
+		break;
+	default:
+		pr_err("invalid domain id %d\n", domain);
+		return -EINVAL;
+	}
+	if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
+		return -ENODEV;
+
+	/* energy counters roll slowly on some domains */
+	while (++retry < 10) {
+		usleep_range(10000, 15000);
+		rdmsrl_safe_on_cpu(cpu, msr, &val2);
+		if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK))
+			return 0;
+	}
+	/* if energy counter does not change, report as bad domain */
+	pr_info("domain %s energy ctr %llu:%llu not working, skip\n",
+		rapl_domain_names[domain], val1, val2);
+
+	return -ENODEV;
+}
+
+/* Detect active and valid domains for the given CPU, caller must
+ * ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
+ */
+static int rapl_detect_domains(struct rapl_package *rp, int cpu)
+{
+	int i;
+	int ret = 0;
+	struct rapl_domain *rd;
+	u64 locked;
+
+	for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+		/* use physical package id to read counters */
+		if (!rapl_check_domain(cpu, i))
+			rp->domain_map |= 1 << i;
+	}
+	rp->nr_domains = bitmap_weight(&rp->domain_map,	RAPL_DOMAIN_MAX);
+	if (!rp->nr_domains) {
+		pr_err("no valid rapl domains found in package %d\n", rp->id);
+		ret = -ENODEV;
+		goto done;
+	}
+	pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+
+	rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
+			GFP_KERNEL);
+	if (!rp->domains) {
+		ret = -ENOMEM;
+		goto done;
+	}
+	rapl_init_domains(rp);
+
+	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+		/* check if the domain is locked by BIOS */
+		if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) {
+			pr_info("RAPL package %d domain %s locked by BIOS\n",
+				rp->id, rd->name);
+				rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+		}
+	}
+
+
+done:
+	return ret;
+}
+
+static bool is_package_new(int package)
+{
+	struct rapl_package *rp;
+
+	/* caller prevents cpu hotplug, there will be no new packages added
+	 * or deleted while traversing the package list, no need for locking.
+	 */
+	list_for_each_entry(rp, &rapl_packages, plist)
+		if (package == rp->id)
+			return false;
+
+	return true;
+}
+
+/* RAPL interface can be made of a two-level hierarchy: package level and domain
+ * level. We first detect the number of packages then domains of each package.
+ * We have to consider the possiblity of CPU online/offline due to hotplug and
+ * other scenarios.
+ */
+static int rapl_detect_topology(void)
+{
+	int i;
+	int phy_package_id;
+	struct rapl_package *new_package, *rp;
+
+	for_each_online_cpu(i) {
+		phy_package_id = topology_physical_package_id(i);
+		if (is_package_new(phy_package_id)) {
+			new_package = kzalloc(sizeof(*rp), GFP_KERNEL);
+			if (!new_package) {
+				rapl_cleanup_data();
+				return -ENOMEM;
+			}
+			/* add the new package to the list */
+			new_package->id = phy_package_id;
+			new_package->nr_cpus = 1;
+
+			/* check if the package contains valid domains */
+			if (rapl_detect_domains(new_package, i) ||
+				rapl_check_unit(new_package, i)) {
+				kfree(new_package->domains);
+				kfree(new_package);
+				/* free up the packages already initialized */
+				rapl_cleanup_data();
+				return -ENODEV;
+			}
+			INIT_LIST_HEAD(&new_package->plist);
+			list_add(&new_package->plist, &rapl_packages);
+		} else {
+			rp = find_package_by_id(phy_package_id);
+			if (rp)
+				++rp->nr_cpus;
+		}
+	}
+
+	return 0;
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static void rapl_remove_package(struct rapl_package *rp)
+{
+	struct rapl_domain *rd, *rd_package = NULL;
+
+	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+		if (rd->id == RAPL_DOMAIN_PACKAGE) {
+			rd_package = rd;
+			continue;
+		}
+		pr_debug("remove package %d, %s domain\n", rp->id, rd->name);
+		powercap_unregister_zone(control_type, &rd->power_zone);
+	}
+	/* do parent zone last */
+	powercap_unregister_zone(control_type, &rd_package->power_zone);
+	list_del(&rp->plist);
+	kfree(rp);
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static int rapl_add_package(int cpu)
+{
+	int ret = 0;
+	int phy_package_id;
+	struct rapl_package *rp;
+
+	phy_package_id = topology_physical_package_id(cpu);
+	rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
+	if (!rp)
+		return -ENOMEM;
+
+	/* add the new package to the list */
+	rp->id = phy_package_id;
+	rp->nr_cpus = 1;
+	/* check if the package contains valid domains */
+	if (rapl_detect_domains(rp, cpu) ||
+		rapl_check_unit(rp, cpu)) {
+		ret = -ENODEV;
+		goto err_free_package;
+	}
+	if (!rapl_package_register_powercap(rp)) {
+		INIT_LIST_HEAD(&rp->plist);
+		list_add(&rp->plist, &rapl_packages);
+		return ret;
+	}
+
+err_free_package:
+	kfree(rp->domains);
+	kfree(rp);
+
+	return ret;
+}
+
+/* Handles CPU hotplug on multi-socket systems.
+ * If a CPU goes online as the first CPU of the physical package
+ * we add the RAPL package to the system. Similarly, when the last
+ * CPU of the package is removed, we remove the RAPL package and its
+ * associated domains. Cooling devices are handled accordingly at
+ * per-domain level.
+ */
+static int rapl_cpu_callback(struct notifier_block *nfb,
+				unsigned long action, void *hcpu)
+{
+	unsigned long cpu = (unsigned long)hcpu;
+	int phy_package_id;
+	struct rapl_package *rp;
+
+	phy_package_id = topology_physical_package_id(cpu);
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+	case CPU_DOWN_FAILED:
+	case CPU_DOWN_FAILED_FROZEN:
+		rp = find_package_by_id(phy_package_id);
+		if (rp)
+			++rp->nr_cpus;
+		else
+			rapl_add_package(cpu);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		rp = find_package_by_id(phy_package_id);
+		if (!rp)
+			break;
+		if (--rp->nr_cpus == 0)
+			rapl_remove_package(rp);
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_cpu_notifier = {
+	.notifier_call = rapl_cpu_callback,
+};
+
+static int __init rapl_init(void)
+{
+	int ret = 0;
+
+	if (!x86_match_cpu(rapl_ids)) {
+		pr_err("driver does not support CPU family %d model %d\n",
+			boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+		return -ENODEV;
+	}
+	/* prevent CPU hotplug during detection */
+	get_online_cpus();
+	ret = rapl_detect_topology();
+	if (ret)
+		goto done;
+
+	if (rapl_register_powercap()) {
+		rapl_cleanup_data();
+		ret = -ENODEV;
+		goto done;
+	}
+	register_hotcpu_notifier(&rapl_cpu_notifier);
+done:
+	put_online_cpus();
+
+	return ret;
+}
+
+static void __exit rapl_exit(void)
+{
+	get_online_cpus();
+	unregister_hotcpu_notifier(&rapl_cpu_notifier);
+	rapl_unregister_powercap();
+	rapl_cleanup_data();
+	put_online_cpus();
+}
+
+module_init(rapl_init);
+module_exit(rapl_exit);
+
+MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
new file mode 100644
index 0000000..8d0fe43
--- /dev/null
+++ b/drivers/powercap/powercap_sys.c
@@ -0,0 +1,685 @@
+/*
+ * Power capping class
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/powercap.h>
+
+#define to_powercap_zone(n) container_of(n, struct powercap_zone, dev)
+#define to_powercap_control_type(n) \
+			container_of(n, struct powercap_control_type, dev)
+
+/* Power zone show function */
+#define define_power_zone_show(_attr)		\
+static ssize_t _attr##_show(struct device *dev, \
+					struct device_attribute *dev_attr,\
+					char *buf) \
+{ \
+	u64 value; \
+	ssize_t len = -EINVAL; \
+	struct powercap_zone *power_zone = to_powercap_zone(dev); \
+	\
+	if (power_zone->ops->get_##_attr) { \
+		if (!power_zone->ops->get_##_attr(power_zone, &value)) \
+			len = sprintf(buf, "%lld\n", value); \
+	} \
+	\
+	return len; \
+}
+
+/* The only meaningful input is 0 (reset), others are silently ignored */
+#define define_power_zone_store(_attr)		\
+static ssize_t _attr##_store(struct device *dev,\
+				struct device_attribute *dev_attr, \
+				const char *buf, size_t count) \
+{ \
+	int err; \
+	struct powercap_zone *power_zone = to_powercap_zone(dev); \
+	u64 value; \
+	\
+	err = kstrtoull(buf, 10, &value); \
+	if (err) \
+		return -EINVAL; \
+	if (value) \
+		return count; \
+	if (power_zone->ops->reset_##_attr) { \
+		if (!power_zone->ops->reset_##_attr(power_zone)) \
+			return count; \
+	} \
+	\
+	return -EINVAL; \
+}
+
+/* Power zone constraint show function */
+#define define_power_zone_constraint_show(_attr) \
+static ssize_t show_constraint_##_attr(struct device *dev, \
+				struct device_attribute *dev_attr,\
+				char *buf) \
+{ \
+	u64 value; \
+	ssize_t len = -ENODATA; \
+	struct powercap_zone *power_zone = to_powercap_zone(dev); \
+	int id; \
+	struct powercap_zone_constraint *pconst;\
+	\
+	if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+		return -EINVAL; \
+	if (id >= power_zone->const_id_cnt)	\
+		return -EINVAL; \
+	pconst = &power_zone->constraints[id]; \
+	if (pconst && pconst->ops && pconst->ops->get_##_attr) { \
+		if (!pconst->ops->get_##_attr(power_zone, id, &value)) \
+			len = sprintf(buf, "%lld\n", value); \
+	} \
+	\
+	return len; \
+}
+
+/* Power zone constraint store function */
+#define define_power_zone_constraint_store(_attr) \
+static ssize_t store_constraint_##_attr(struct device *dev,\
+				struct device_attribute *dev_attr, \
+				const char *buf, size_t count) \
+{ \
+	int err; \
+	u64 value; \
+	struct powercap_zone *power_zone = to_powercap_zone(dev); \
+	int id; \
+	struct powercap_zone_constraint *pconst;\
+	\
+	if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+		return -EINVAL; \
+	if (id >= power_zone->const_id_cnt)	\
+		return -EINVAL; \
+	pconst = &power_zone->constraints[id]; \
+	err = kstrtoull(buf, 10, &value); \
+	if (err) \
+		return -EINVAL; \
+	if (pconst && pconst->ops && pconst->ops->set_##_attr) { \
+		if (!pconst->ops->set_##_attr(power_zone, id, value)) \
+			return count; \
+	} \
+	\
+	return -ENODATA; \
+}
+
+/* Power zone information callbacks */
+define_power_zone_show(power_uw);
+define_power_zone_show(max_power_range_uw);
+define_power_zone_show(energy_uj);
+define_power_zone_store(energy_uj);
+define_power_zone_show(max_energy_range_uj);
+
+/* Power zone attributes */
+static DEVICE_ATTR_RO(max_power_range_uw);
+static DEVICE_ATTR_RO(power_uw);
+static DEVICE_ATTR_RO(max_energy_range_uj);
+static DEVICE_ATTR_RW(energy_uj);
+
+/* Power zone constraint attributes callbacks */
+define_power_zone_constraint_show(power_limit_uw);
+define_power_zone_constraint_store(power_limit_uw);
+define_power_zone_constraint_show(time_window_us);
+define_power_zone_constraint_store(time_window_us);
+define_power_zone_constraint_show(max_power_uw);
+define_power_zone_constraint_show(min_power_uw);
+define_power_zone_constraint_show(max_time_window_us);
+define_power_zone_constraint_show(min_time_window_us);
+
+/* For one time seeding of constraint device attributes */
+struct powercap_constraint_attr {
+	struct device_attribute power_limit_attr;
+	struct device_attribute time_window_attr;
+	struct device_attribute max_power_attr;
+	struct device_attribute min_power_attr;
+	struct device_attribute max_time_window_attr;
+	struct device_attribute min_time_window_attr;
+	struct device_attribute name_attr;
+};
+
+static struct powercap_constraint_attr
+				constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
+
+/* A list of powercap control_types */
+static LIST_HEAD(powercap_cntrl_list);
+/* Mutex to protect list of powercap control_types */
+static DEFINE_MUTEX(powercap_cntrl_list_lock);
+
+#define POWERCAP_CONSTRAINT_NAME_LEN	30 /* Some limit to avoid overflow */
+static ssize_t show_constraint_name(struct device *dev,
+				struct device_attribute *dev_attr,
+				char *buf)
+{
+	const char *name;
+	struct powercap_zone *power_zone = to_powercap_zone(dev);
+	int id;
+	ssize_t len = -ENODATA;
+	struct powercap_zone_constraint *pconst;
+
+	if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
+		return -EINVAL;
+	if (id >= power_zone->const_id_cnt)
+		return -EINVAL;
+	pconst = &power_zone->constraints[id];
+
+	if (pconst && pconst->ops && pconst->ops->get_name) {
+		name = pconst->ops->get_name(power_zone, id);
+		if (name) {
+			snprintf(buf, POWERCAP_CONSTRAINT_NAME_LEN,
+								"%s\n", name);
+			buf[POWERCAP_CONSTRAINT_NAME_LEN] = '\0';
+			len = strlen(buf);
+		}
+	}
+
+	return len;
+}
+
+static int create_constraint_attribute(int id, const char *name,
+				int mode,
+				struct device_attribute *dev_attr,
+				ssize_t (*show)(struct device *,
+					struct device_attribute *, char *),
+				ssize_t (*store)(struct device *,
+					struct device_attribute *,
+				const char *, size_t)
+				)
+{
+
+	dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
+								id, name);
+	if (!dev_attr->attr.name)
+		return -ENOMEM;
+	dev_attr->attr.mode = mode;
+	dev_attr->show = show;
+	dev_attr->store = store;
+
+	return 0;
+}
+
+static void free_constraint_attributes(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+		kfree(constraint_attrs[i].power_limit_attr.attr.name);
+		kfree(constraint_attrs[i].time_window_attr.attr.name);
+		kfree(constraint_attrs[i].name_attr.attr.name);
+		kfree(constraint_attrs[i].max_power_attr.attr.name);
+		kfree(constraint_attrs[i].min_power_attr.attr.name);
+		kfree(constraint_attrs[i].max_time_window_attr.attr.name);
+		kfree(constraint_attrs[i].min_time_window_attr.attr.name);
+	}
+}
+
+static int seed_constraint_attributes(void)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+		ret = create_constraint_attribute(i, "power_limit_uw",
+					S_IWUSR | S_IRUGO,
+					&constraint_attrs[i].power_limit_attr,
+					show_constraint_power_limit_uw,
+					store_constraint_power_limit_uw);
+		if (ret)
+			goto err_alloc;
+		ret = create_constraint_attribute(i, "time_window_us",
+					S_IWUSR | S_IRUGO,
+					&constraint_attrs[i].time_window_attr,
+					show_constraint_time_window_us,
+					store_constraint_time_window_us);
+		if (ret)
+			goto err_alloc;
+		ret = create_constraint_attribute(i, "name", S_IRUGO,
+				&constraint_attrs[i].name_attr,
+				show_constraint_name,
+				NULL);
+		if (ret)
+			goto err_alloc;
+		ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
+				&constraint_attrs[i].max_power_attr,
+				show_constraint_max_power_uw,
+				NULL);
+		if (ret)
+			goto err_alloc;
+		ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
+				&constraint_attrs[i].min_power_attr,
+				show_constraint_min_power_uw,
+				NULL);
+		if (ret)
+			goto err_alloc;
+		ret = create_constraint_attribute(i, "max_time_window_us",
+				S_IRUGO,
+				&constraint_attrs[i].max_time_window_attr,
+				show_constraint_max_time_window_us,
+				NULL);
+		if (ret)
+			goto err_alloc;
+		ret = create_constraint_attribute(i, "min_time_window_us",
+				S_IRUGO,
+				&constraint_attrs[i].min_time_window_attr,
+				show_constraint_min_time_window_us,
+				NULL);
+		if (ret)
+			goto err_alloc;
+
+	}
+
+	return 0;
+
+err_alloc:
+	free_constraint_attributes();
+
+	return ret;
+}
+
+static int create_constraints(struct powercap_zone *power_zone,
+				int nr_constraints,
+				struct powercap_zone_constraint_ops *const_ops)
+{
+	int i;
+	int ret = 0;
+	int count;
+	struct powercap_zone_constraint *pconst;
+
+	if (!power_zone || !const_ops || !const_ops->get_power_limit_uw ||
+					!const_ops->set_power_limit_uw ||
+					!const_ops->get_time_window_us ||
+					!const_ops->set_time_window_us)
+		return -EINVAL;
+
+	count = power_zone->zone_attr_count;
+	for (i = 0; i < nr_constraints; ++i) {
+		pconst = &power_zone->constraints[i];
+		pconst->ops = const_ops;
+		pconst->id = power_zone->const_id_cnt;
+		power_zone->const_id_cnt++;
+		power_zone->zone_dev_attrs[count++] =
+				&constraint_attrs[i].power_limit_attr.attr;
+		power_zone->zone_dev_attrs[count++] =
+				&constraint_attrs[i].time_window_attr.attr;
+		if (pconst->ops->get_name)
+			power_zone->zone_dev_attrs[count++] =
+				&constraint_attrs[i].name_attr.attr;
+		if (pconst->ops->get_max_power_uw)
+			power_zone->zone_dev_attrs[count++] =
+				&constraint_attrs[i].max_power_attr.attr;
+		if (pconst->ops->get_min_power_uw)
+			power_zone->zone_dev_attrs[count++] =
+				&constraint_attrs[i].min_power_attr.attr;
+		if (pconst->ops->get_max_time_window_us)
+			power_zone->zone_dev_attrs[count++] =
+				&constraint_attrs[i].max_time_window_attr.attr;
+		if (pconst->ops->get_min_time_window_us)
+			power_zone->zone_dev_attrs[count++] =
+				&constraint_attrs[i].min_time_window_attr.attr;
+	}
+	power_zone->zone_attr_count = count;
+
+	return ret;
+}
+
+static bool control_type_valid(void *control_type)
+{
+	struct powercap_control_type *pos = NULL;
+	bool found = false;
+
+	mutex_lock(&powercap_cntrl_list_lock);
+
+	list_for_each_entry(pos, &powercap_cntrl_list, node) {
+		if (pos == control_type) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&powercap_cntrl_list_lock);
+
+	return found;
+}
+
+static ssize_t name_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+	return sprintf(buf, "%s\n", power_zone->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
+/* Create zone and attributes in sysfs */
+static void create_power_zone_common_attributes(
+					struct powercap_zone *power_zone)
+{
+	int count = 0;
+
+	power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr;
+	if (power_zone->ops->get_max_energy_range_uj)
+		power_zone->zone_dev_attrs[count++] =
+					&dev_attr_max_energy_range_uj.attr;
+	if (power_zone->ops->get_energy_uj)
+		power_zone->zone_dev_attrs[count++] =
+					&dev_attr_energy_uj.attr;
+	if (power_zone->ops->get_power_uw)
+		power_zone->zone_dev_attrs[count++] =
+					&dev_attr_power_uw.attr;
+	if (power_zone->ops->get_max_power_range_uw)
+		power_zone->zone_dev_attrs[count++] =
+					&dev_attr_max_power_range_uw.attr;
+	power_zone->zone_dev_attrs[count] = NULL;
+	power_zone->zone_attr_count = count;
+}
+
+static void powercap_release(struct device *dev)
+{
+	bool allocated;
+
+	if (dev->parent) {
+		struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+		/* Store flag as the release() may free memory */
+		allocated = power_zone->allocated;
+		/* Remove id from parent idr struct */
+		idr_remove(power_zone->parent_idr, power_zone->id);
+		/* Destroy idrs allocated for this zone */
+		idr_destroy(&power_zone->idr);
+		kfree(power_zone->name);
+		kfree(power_zone->zone_dev_attrs);
+		kfree(power_zone->constraints);
+		if (power_zone->ops->release)
+			power_zone->ops->release(power_zone);
+		if (allocated)
+			kfree(power_zone);
+	} else {
+		struct powercap_control_type *control_type =
+						to_powercap_control_type(dev);
+
+		/* Store flag as the release() may free memory */
+		allocated = control_type->allocated;
+		idr_destroy(&control_type->idr);
+		mutex_destroy(&control_type->lock);
+		if (control_type->ops && control_type->ops->release)
+			control_type->ops->release(control_type);
+		if (allocated)
+			kfree(control_type);
+	}
+}
+
+static ssize_t enabled_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	bool mode = true;
+
+	/* Default is enabled */
+	if (dev->parent) {
+		struct powercap_zone *power_zone = to_powercap_zone(dev);
+		if (power_zone->ops->get_enable)
+			if (power_zone->ops->get_enable(power_zone, &mode))
+				mode = false;
+	} else {
+		struct powercap_control_type *control_type =
+						to_powercap_control_type(dev);
+		if (control_type->ops && control_type->ops->get_enable)
+			if (control_type->ops->get_enable(control_type, &mode))
+				mode = false;
+	}
+
+	return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t enabled_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,  size_t len)
+{
+	bool mode;
+
+	if (strtobool(buf, &mode))
+		return -EINVAL;
+	if (dev->parent) {
+		struct powercap_zone *power_zone = to_powercap_zone(dev);
+		if (power_zone->ops->set_enable)
+			if (!power_zone->ops->set_enable(power_zone, mode))
+				return len;
+	} else {
+		struct powercap_control_type *control_type =
+						to_powercap_control_type(dev);
+		if (control_type->ops && control_type->ops->set_enable)
+			if (!control_type->ops->set_enable(control_type, mode))
+				return len;
+	}
+
+	return -ENOSYS;
+}
+
+static DEVICE_ATTR_RW(enabled);
+
+static struct attribute *powercap_attrs[] = {
+	&dev_attr_enabled.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(powercap);
+
+static struct class powercap_class = {
+	.name = "powercap",
+	.dev_release = powercap_release,
+	.dev_groups = powercap_groups,
+};
+
+struct powercap_zone *powercap_register_zone(
+				struct powercap_zone *power_zone,
+				struct powercap_control_type *control_type,
+				const char *name,
+				struct powercap_zone *parent,
+				const struct powercap_zone_ops *ops,
+				int nr_constraints,
+				struct powercap_zone_constraint_ops *const_ops)
+{
+	int result;
+	int nr_attrs;
+
+	if (!name || !control_type || !ops ||
+			nr_constraints > MAX_CONSTRAINTS_PER_ZONE ||
+			(!ops->get_energy_uj && !ops->get_power_uw) ||
+			!control_type_valid(control_type))
+		return ERR_PTR(-EINVAL);
+
+	if (power_zone) {
+		if (!ops->release)
+			return ERR_PTR(-EINVAL);
+		memset(power_zone, 0, sizeof(*power_zone));
+	} else {
+		power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL);
+		if (!power_zone)
+			return ERR_PTR(-ENOMEM);
+		power_zone->allocated = true;
+	}
+	power_zone->ops = ops;
+	power_zone->control_type_inst = control_type;
+	if (!parent) {
+		power_zone->dev.parent = &control_type->dev;
+		power_zone->parent_idr = &control_type->idr;
+	} else {
+		power_zone->dev.parent = &parent->dev;
+		power_zone->parent_idr = &parent->idr;
+	}
+	power_zone->dev.class = &powercap_class;
+
+	mutex_lock(&control_type->lock);
+	/* Using idr to get the unique id */
+	result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL);
+	if (result < 0)
+		goto err_idr_alloc;
+
+	power_zone->id = result;
+	idr_init(&power_zone->idr);
+	power_zone->name = kstrdup(name, GFP_KERNEL);
+	if (!power_zone->name)
+		goto err_name_alloc;
+	dev_set_name(&power_zone->dev, "%s:%x",
+					dev_name(power_zone->dev.parent),
+					power_zone->id);
+	power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) *
+					 nr_constraints, GFP_KERNEL);
+	if (!power_zone->constraints)
+		goto err_const_alloc;
+
+	nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS +
+						POWERCAP_ZONE_MAX_ATTRS + 1;
+	power_zone->zone_dev_attrs = kzalloc(sizeof(void *) *
+						nr_attrs, GFP_KERNEL);
+	if (!power_zone->zone_dev_attrs)
+		goto err_attr_alloc;
+	create_power_zone_common_attributes(power_zone);
+	result = create_constraints(power_zone, nr_constraints, const_ops);
+	if (result)
+		goto err_dev_ret;
+
+	power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL;
+	power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs;
+	power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
+	power_zone->dev_attr_groups[1] = NULL;
+	power_zone->dev.groups = power_zone->dev_attr_groups;
+	result = device_register(&power_zone->dev);
+	if (result)
+		goto err_dev_ret;
+
+	control_type->nr_zones++;
+	mutex_unlock(&control_type->lock);
+
+	return power_zone;
+
+err_dev_ret:
+	kfree(power_zone->zone_dev_attrs);
+err_attr_alloc:
+	kfree(power_zone->constraints);
+err_const_alloc:
+	kfree(power_zone->name);
+err_name_alloc:
+	idr_remove(power_zone->parent_idr, power_zone->id);
+err_idr_alloc:
+	if (power_zone->allocated)
+		kfree(power_zone);
+	mutex_unlock(&control_type->lock);
+
+	return ERR_PTR(result);
+}
+EXPORT_SYMBOL_GPL(powercap_register_zone);
+
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+				struct powercap_zone *power_zone)
+{
+	if (!power_zone || !control_type)
+		return -EINVAL;
+
+	mutex_lock(&control_type->lock);
+	control_type->nr_zones--;
+	mutex_unlock(&control_type->lock);
+
+	device_unregister(&power_zone->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_zone);
+
+struct powercap_control_type *powercap_register_control_type(
+				struct powercap_control_type *control_type,
+				const char *name,
+				const struct powercap_control_type_ops *ops)
+{
+	int result;
+
+	if (!name)
+		return ERR_PTR(-EINVAL);
+	if (control_type) {
+		if (!ops || !ops->release)
+			return ERR_PTR(-EINVAL);
+		memset(control_type, 0, sizeof(*control_type));
+	} else {
+		control_type = kzalloc(sizeof(*control_type), GFP_KERNEL);
+		if (!control_type)
+			return ERR_PTR(-ENOMEM);
+		control_type->allocated = true;
+	}
+	mutex_init(&control_type->lock);
+	control_type->ops = ops;
+	INIT_LIST_HEAD(&control_type->node);
+	control_type->dev.class = &powercap_class;
+	dev_set_name(&control_type->dev, "%s", name);
+	result = device_register(&control_type->dev);
+	if (result) {
+		if (control_type->allocated)
+			kfree(control_type);
+		return ERR_PTR(result);
+	}
+	idr_init(&control_type->idr);
+
+	mutex_lock(&powercap_cntrl_list_lock);
+	list_add_tail(&control_type->node, &powercap_cntrl_list);
+	mutex_unlock(&powercap_cntrl_list_lock);
+
+	return control_type;
+}
+EXPORT_SYMBOL_GPL(powercap_register_control_type);
+
+int powercap_unregister_control_type(struct powercap_control_type *control_type)
+{
+	struct powercap_control_type *pos = NULL;
+
+	if (control_type->nr_zones) {
+		dev_err(&control_type->dev, "Zones of this type still not freed\n");
+		return -EINVAL;
+	}
+	mutex_lock(&powercap_cntrl_list_lock);
+	list_for_each_entry(pos, &powercap_cntrl_list, node) {
+		if (pos == control_type) {
+			list_del(&control_type->node);
+			mutex_unlock(&powercap_cntrl_list_lock);
+			device_unregister(&control_type->dev);
+			return 0;
+		}
+	}
+	mutex_unlock(&powercap_cntrl_list_lock);
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
+
+static int __init powercap_init(void)
+{
+	int result = 0;
+
+	result = seed_constraint_attributes();
+	if (result)
+		return result;
+
+	result = class_register(&powercap_class);
+
+	return result;
+}
+
+device_initcall(powercap_init);
+
+MODULE_DESCRIPTION("PowerCap sysfs Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 9966124..f41bacf 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -201,7 +201,7 @@
 	.driver		= {
 		.name	= PPS_GPIO_NAME,
 		.owner	= THIS_MODULE,
-		.of_match_table	= of_match_ptr(pps_gpio_dt_ids),
+		.of_match_table	= pps_gpio_dt_ids,
 	},
 };
 
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 75840b5..eece329 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -62,6 +62,15 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-bfin.
 
+config PWM_EP93XX
+	tristate "Cirrus Logic EP93xx PWM support"
+	depends on ARCH_EP93XX
+	help
+	  Generic PWM framework driver for Cirrus Logic EP93xx.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called pwm-ep93xx.
+
 config PWM_IMX
 	tristate "i.MX PWM support"
 	depends on ARCH_MXC
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 77a8c18..8b754e4 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -3,6 +3,7 @@
 obj-$(CONFIG_PWM_AB8500)	+= pwm-ab8500.o
 obj-$(CONFIG_PWM_ATMEL_TCB)	+= pwm-atmel-tcb.o
 obj-$(CONFIG_PWM_BFIN)		+= pwm-bfin.o
+obj-$(CONFIG_PWM_EP93XX)	+= pwm-ep93xx.o
 obj-$(CONFIG_PWM_IMX)		+= pwm-imx.o
 obj-$(CONFIG_PWM_JZ4740)	+= pwm-jz4740.o
 obj-$(CONFIG_PWM_LPC32XX)	+= pwm-lpc32xx.o
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index ba6ce01..f3dcd02 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -249,6 +249,8 @@
 		}
 	}
 
+	cmr |= (tcbpwm->div & ATMEL_TC_TCCLKS);
+
 	__raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
 
 	if (index == 0)
@@ -305,7 +307,7 @@
 		i = slowclk;
 		rate = 32768;
 		min = div_u64(NSEC_PER_SEC, rate);
-		max = min << 16;
+		max = min << tc->tcb_config->counter_width;
 
 		/* If period is too big return ERANGE error */
 		if (max < period_ns)
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
new file mode 100644
index 0000000..33aa446
--- /dev/null
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -0,0 +1,230 @@
+/*
+ * PWM framework driver for Cirrus Logic EP93xx
+ *
+ * Copyright (c) 2009        Matthieu Crapet <mcrapet@gmail.com>
+ * Copyright (c) 2009, 2013  H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * EP9301/02 have only one channel:
+ *   platform device ep93xx-pwm.1 - PWMOUT1 (EGPIO14)
+ *
+ * EP9307 has only one channel:
+ *   platform device ep93xx-pwm.0 - PWMOUT
+ *
+ * EP9312/15 have two channels:
+ *   platform device ep93xx-pwm.0 - PWMOUT
+ *   platform device ep93xx-pwm.1 - PWMOUT1 (EGPIO14)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+
+#include <asm/div64.h>
+
+#include <mach/platform.h>	/* for ep93xx_pwm_{acquire,release}_gpio() */
+
+#define EP93XX_PWMx_TERM_COUNT	0x00
+#define EP93XX_PWMx_DUTY_CYCLE	0x04
+#define EP93XX_PWMx_ENABLE	0x08
+#define EP93XX_PWMx_INVERT	0x0c
+
+struct ep93xx_pwm {
+	void __iomem *base;
+	struct clk *clk;
+	struct pwm_chip chip;
+};
+
+static inline struct ep93xx_pwm *to_ep93xx_pwm(struct pwm_chip *chip)
+{
+	return container_of(chip, struct ep93xx_pwm, chip);
+}
+
+static int ep93xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct platform_device *pdev = to_platform_device(chip->dev);
+
+	return ep93xx_pwm_acquire_gpio(pdev);
+}
+
+static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct platform_device *pdev = to_platform_device(chip->dev);
+
+	ep93xx_pwm_release_gpio(pdev);
+}
+
+static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+			     int duty_ns, int period_ns)
+{
+	struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+	void __iomem *base = ep93xx_pwm->base;
+	unsigned long long c;
+	unsigned long period_cycles;
+	unsigned long duty_cycles;
+	unsigned long term;
+	int ret = 0;
+
+	/*
+	 * The clock needs to be enabled to access the PWM registers.
+	 * Configuration can be changed at any time.
+	 */
+	if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+		ret = clk_enable(ep93xx_pwm->clk);
+		if (ret)
+			return ret;
+	}
+
+	c = clk_get_rate(ep93xx_pwm->clk);
+	c *= period_ns;
+	do_div(c, 1000000000);
+	period_cycles = c;
+
+	c = period_cycles;
+	c *= duty_ns;
+	do_div(c, period_ns);
+	duty_cycles = c;
+
+	if (period_cycles < 0x10000 && duty_cycles < 0x10000) {
+		term = readw(base + EP93XX_PWMx_TERM_COUNT);
+
+		/* Order is important if PWM is running */
+		if (period_cycles > term) {
+			writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
+			writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
+		} else {
+			writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
+			writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
+		}
+	} else {
+		ret = -EINVAL;
+	}
+
+	if (!test_bit(PWMF_ENABLED, &pwm->flags))
+		clk_disable(ep93xx_pwm->clk);
+
+	return ret;
+}
+
+static int ep93xx_pwm_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+			       enum pwm_polarity polarity)
+{
+	struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+	int ret;
+
+	/*
+	 * The clock needs to be enabled to access the PWM registers.
+	 * Polarity can only be changed when the PWM is disabled.
+	 */
+	ret = clk_enable(ep93xx_pwm->clk);
+	if (ret)
+		return ret;
+
+	if (polarity == PWM_POLARITY_INVERSED)
+		writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_INVERT);
+	else
+		writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_INVERT);
+
+	clk_disable(ep93xx_pwm->clk);
+
+	return 0;
+}
+
+static int ep93xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+	int ret;
+
+	ret = clk_enable(ep93xx_pwm->clk);
+	if (ret)
+		return ret;
+
+	writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
+
+	return 0;
+}
+
+static void ep93xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+
+	writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
+	clk_disable(ep93xx_pwm->clk);
+}
+
+static const struct pwm_ops ep93xx_pwm_ops = {
+	.request = ep93xx_pwm_request,
+	.free = ep93xx_pwm_free,
+	.config = ep93xx_pwm_config,
+	.set_polarity = ep93xx_pwm_polarity,
+	.enable = ep93xx_pwm_enable,
+	.disable = ep93xx_pwm_disable,
+	.owner = THIS_MODULE,
+};
+
+static int ep93xx_pwm_probe(struct platform_device *pdev)
+{
+	struct ep93xx_pwm *ep93xx_pwm;
+	struct resource *res;
+	int ret;
+
+	ep93xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_pwm), GFP_KERNEL);
+	if (!ep93xx_pwm)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ep93xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ep93xx_pwm->base))
+		return PTR_ERR(ep93xx_pwm->base);
+
+	ep93xx_pwm->clk = devm_clk_get(&pdev->dev, "pwm_clk");
+	if (IS_ERR(ep93xx_pwm->clk))
+		return PTR_ERR(ep93xx_pwm->clk);
+
+	ep93xx_pwm->chip.dev = &pdev->dev;
+	ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
+	ep93xx_pwm->chip.base = -1;
+	ep93xx_pwm->chip.npwm = 1;
+
+	ret = pwmchip_add(&ep93xx_pwm->chip);
+	if (ret < 0)
+		return ret;
+
+	platform_set_drvdata(pdev, ep93xx_pwm);
+	return 0;
+}
+
+static int ep93xx_pwm_remove(struct platform_device *pdev)
+{
+	struct ep93xx_pwm *ep93xx_pwm = platform_get_drvdata(pdev);
+
+	return pwmchip_remove(&ep93xx_pwm->chip);
+}
+
+static struct platform_driver ep93xx_pwm_driver = {
+	.driver = {
+		.name = "ep93xx-pwm",
+	},
+	.probe = ep93xx_pwm_probe,
+	.remove = ep93xx_pwm_remove,
+};
+module_platform_driver(ep93xx_pwm_driver);
+
+MODULE_DESCRIPTION("Cirrus Logic EP93xx PWM driver");
+MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, "
+	      "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_ALIAS("platform:ep93xx-pwm");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 2b7c4f8..cc47733 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -16,6 +16,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/pwm.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 
 /* i.MX1 and i.MX21 share the same PWM function block: */
@@ -296,7 +297,7 @@
 	.driver		= {
 		.name	= "imx-pwm",
 		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(imx_pwm_dt_ids),
+		.of_match_table = imx_pwm_dt_ids,
 	},
 	.probe		= imx_pwm_probe,
 	.remove		= imx_pwm_remove,
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index efac99e0..9dc0f9d 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -169,7 +169,7 @@
 	.driver = {
 		.name = "lpc32xx-pwm",
 		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(lpc32xx_pwm_dt_ids),
+		.of_match_table = lpc32xx_pwm_dt_ids,
 	},
 	.probe = lpc32xx_pwm_probe,
 	.remove = lpc32xx_pwm_remove,
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index c2c5a4f..9475bc7 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -189,7 +189,7 @@
 	.driver = {
 		.name = "mxs-pwm",
 		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(mxs_pwm_dt_ids),
+		.of_match_table = mxs_pwm_dt_ids,
 	},
 	.probe = mxs_pwm_probe,
 	.remove = mxs_pwm_remove,
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index fcc8b9a..b59639e 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -18,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/slab.h>
@@ -224,8 +225,8 @@
 
 static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-	pwm_set_chip_data(pwm, NULL);
 	devm_kfree(chip->dev, pwm_get_chip_data(pwm));
+	pwm_set_chip_data(pwm, NULL);
 }
 
 static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm)
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index c2e2e58..4e5c3d1 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -26,7 +26,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/pwm.h>
 #include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
 
 #include "pwm-tipwmss.h"
 
@@ -208,11 +207,6 @@
 	struct clk *clk;
 	struct ecap_pwm_chip *pc;
 	u16 status;
-	struct pinctrl *pinctrl;
-
-	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-	if (IS_ERR(pinctrl))
-		dev_warn(&pdev->dev, "unable to select pin group\n");
 
 	pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
 	if (!pc) {
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 084f552..a4d8f51 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -26,7 +26,6 @@
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
 #include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
 
 #include "pwm-tipwmss.h"
 
@@ -439,11 +438,6 @@
 	struct clk *clk;
 	struct ehrpwm_pwm_chip *pc;
 	u16 status;
-	struct pinctrl *pinctrl;
-
-	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-	if (IS_ERR(pinctrl))
-		dev_warn(&pdev->dev, "unable to select pin group\n");
 
 	pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
 	if (!pc) {
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 29d1bba..b964470 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -21,6 +21,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/i2c/twl.h>
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index eef9105..b99a50e 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/i2c/twl.h>
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 23f8d1c..a00132e 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -906,7 +906,7 @@
 		}
 		ret = tps65910_reg_write(pmic->mfd, sr_reg_add, 0);
 		if (ret < 0) {
-			dev_err(mfd->dev, "Error in settting sr register\n");
+			dev_err(mfd->dev, "Error in setting sr register\n");
 			return ret;
 		}
 	}
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index b09c75c..a34b506 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -30,7 +30,7 @@
 #include "remoteproc_internal.h"
 
 /* kick the remote processor, and let it know which virtqueue to poke at */
-static void rproc_virtio_notify(struct virtqueue *vq)
+static bool rproc_virtio_notify(struct virtqueue *vq)
 {
 	struct rproc_vring *rvring = vq->priv;
 	struct rproc *rproc = rvring->rvdev->rproc;
@@ -39,6 +39,7 @@
 	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
 
 	rproc->ops->kick(rproc, notifyid);
+	return true;
 }
 
 /**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 9654aa3..15f166a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -153,6 +153,16 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-88pm80x.
 
+config RTC_DRV_AS3722
+	tristate "ams AS3722 RTC driver"
+	depends on MFD_AS3722
+	help
+	  If you say yes here you get support for the RTC of ams AS3722 PMIC
+	  chips.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-as3722.
+
 config RTC_DRV_DS1307
 	tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
 	help
@@ -497,6 +507,16 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-rv3029c2.
 
+config RTC_DRV_S5M
+	tristate "Samsung S5M series"
+	depends on MFD_SEC_CORE
+	help
+	  If you say yes here you will get support for the
+	  RTC of Samsung S5M PMIC series.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-s5m.
+
 endif # I2C
 
 comment "SPI RTC drivers"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 2dff3d2..27b4bd8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_RTC_DRV_88PM80X)	+= rtc-88pm80x.o
 obj-$(CONFIG_RTC_DRV_AB3100)	+= rtc-ab3100.o
 obj-$(CONFIG_RTC_DRV_AB8500)	+= rtc-ab8500.o
+obj-$(CONFIG_RTC_DRV_AS3722)	+= rtc-as3722.o
 obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
 obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
 obj-$(CONFIG_RTC_DRV_AT91SAM9)	+= rtc-at91sam9.o
@@ -107,6 +108,7 @@
 obj-$(CONFIG_RTC_DRV_RX8581)	+= rtc-rx8581.o
 obj-$(CONFIG_RTC_DRV_S35390A)	+= rtc-s35390a.o
 obj-$(CONFIG_RTC_DRV_S3C)	+= rtc-s3c.o
+obj-$(CONFIG_RTC_DRV_S5M)	+= rtc-s5m.o
 obj-$(CONFIG_RTC_DRV_SA1100)	+= rtc-sa1100.o
 obj-$(CONFIG_RTC_DRV_SH)	+= rtc-sh.o
 obj-$(CONFIG_RTC_DRV_SNVS)	+= rtc-snvs.o
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 354c937..0916089 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -251,14 +251,15 @@
 static int pm80x_rtc_probe(struct platform_device *pdev)
 {
 	struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
-	struct pm80x_platform_data *pm80x_pdata;
+	struct pm80x_platform_data *pm80x_pdata =
+				dev_get_platdata(pdev->dev.parent);
 	struct pm80x_rtc_pdata *pdata = NULL;
 	struct pm80x_rtc_info *info;
 	struct rtc_time tm;
 	unsigned long ticks = 0;
 	int ret;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (pdata == NULL)
 		dev_warn(&pdev->dev, "No platform data!\n");
 
@@ -326,8 +327,7 @@
 	regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO,
 			   PM800_RTC1_USE_XO);
 
-	if (pdev->dev.parent->platform_data) {
-		pm80x_pdata = pdev->dev.parent->platform_data;
+	if (pm80x_pdata) {
 		pdata = pm80x_pdata->rtc;
 		if (pdata)
 			info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 4e30c85..81650484 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -316,7 +316,7 @@
 	unsigned long ticks = 0;
 	int ret;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 
 	info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_rtc_info),
 			    GFP_KERNEL);
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
new file mode 100644
index 0000000..9cfa817
--- /dev/null
+++ b/drivers/rtc/rtc-as3722.c
@@ -0,0 +1,275 @@
+/*
+ * rtc-as3722.c - Real Time Clock driver for ams AS3722 PMICs
+ *
+ * Copyright (C) 2013 ams AG
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bcd.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/as3722.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+
+#define AS3722_RTC_START_YEAR	  2000
+struct as3722_rtc {
+	struct rtc_device	*rtc;
+	struct device		*dev;
+	struct as3722		*as3722;
+	int			alarm_irq;
+	bool			irq_enable;
+};
+
+static void as3722_time_to_reg(u8 *rbuff, struct rtc_time *tm)
+{
+	rbuff[0] = bin2bcd(tm->tm_sec);
+	rbuff[1] = bin2bcd(tm->tm_min);
+	rbuff[2] = bin2bcd(tm->tm_hour);
+	rbuff[3] = bin2bcd(tm->tm_mday);
+	rbuff[4] = bin2bcd(tm->tm_mon);
+	rbuff[5] = bin2bcd(tm->tm_year - (AS3722_RTC_START_YEAR - 1900));
+}
+
+static void as3722_reg_to_time(u8 *rbuff, struct rtc_time *tm)
+{
+	tm->tm_sec = bcd2bin(rbuff[0] & 0x7F);
+	tm->tm_min = bcd2bin(rbuff[1] & 0x7F);
+	tm->tm_hour = bcd2bin(rbuff[2] & 0x3F);
+	tm->tm_mday = bcd2bin(rbuff[3] & 0x3F);
+	tm->tm_mon = bcd2bin(rbuff[4] & 0x1F);
+	tm->tm_year = (AS3722_RTC_START_YEAR - 1900) + bcd2bin(rbuff[5] & 0x7F);
+	return;
+}
+
+static int as3722_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+	struct as3722 *as3722 = as3722_rtc->as3722;
+	u8 as_time_array[6];
+	int ret;
+
+	ret = as3722_block_read(as3722, AS3722_RTC_SECOND_REG,
+			6, as_time_array);
+	if (ret < 0) {
+		dev_err(dev, "RTC_SECOND reg block read failed %d\n", ret);
+		return ret;
+	}
+	as3722_reg_to_time(as_time_array, tm);
+	return 0;
+}
+
+static int as3722_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+	struct as3722 *as3722 = as3722_rtc->as3722;
+	u8 as_time_array[6];
+	int ret;
+
+	if (tm->tm_year < (AS3722_RTC_START_YEAR - 1900))
+		return -EINVAL;
+
+	as3722_time_to_reg(as_time_array, tm);
+	ret = as3722_block_write(as3722, AS3722_RTC_SECOND_REG, 6,
+			as_time_array);
+	if (ret < 0)
+		dev_err(dev, "RTC_SECOND reg block write failed %d\n", ret);
+	return ret;
+}
+
+static int as3722_rtc_alarm_irq_enable(struct device *dev,
+		unsigned int enabled)
+{
+	struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+
+	if (enabled && !as3722_rtc->irq_enable) {
+		enable_irq(as3722_rtc->alarm_irq);
+		as3722_rtc->irq_enable = true;
+	} else if (!enabled && as3722_rtc->irq_enable)  {
+		disable_irq(as3722_rtc->alarm_irq);
+		as3722_rtc->irq_enable = false;
+	}
+	return 0;
+}
+
+static int as3722_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+	struct as3722 *as3722 = as3722_rtc->as3722;
+	u8 as_time_array[6];
+	int ret;
+
+	ret = as3722_block_read(as3722, AS3722_RTC_ALARM_SECOND_REG, 6,
+			as_time_array);
+	if (ret < 0) {
+		dev_err(dev, "RTC_ALARM_SECOND block read failed %d\n", ret);
+		return ret;
+	}
+
+	as3722_reg_to_time(as_time_array, &alrm->time);
+	return 0;
+}
+
+static int as3722_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+	struct as3722 *as3722 = as3722_rtc->as3722;
+	u8 as_time_array[6];
+	int ret;
+
+	if (alrm->time.tm_year < (AS3722_RTC_START_YEAR - 1900))
+		return -EINVAL;
+
+	ret = as3722_rtc_alarm_irq_enable(dev, 0);
+	if (ret < 0) {
+		dev_err(dev, "Disable RTC alarm failed\n");
+		return ret;
+	}
+
+	as3722_time_to_reg(as_time_array, &alrm->time);
+	ret = as3722_block_write(as3722, AS3722_RTC_ALARM_SECOND_REG, 6,
+			as_time_array);
+	if (ret < 0) {
+		dev_err(dev, "RTC_ALARM_SECOND block write failed %d\n", ret);
+		return ret;
+	}
+
+	if (alrm->enabled)
+		ret = as3722_rtc_alarm_irq_enable(dev, alrm->enabled);
+	return ret;
+}
+
+static irqreturn_t as3722_alarm_irq(int irq, void *data)
+{
+	struct as3722_rtc *as3722_rtc = data;
+
+	rtc_update_irq(as3722_rtc->rtc, 1, RTC_IRQF | RTC_AF);
+	return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops as3722_rtc_ops = {
+	.read_time = as3722_rtc_read_time,
+	.set_time = as3722_rtc_set_time,
+	.read_alarm = as3722_rtc_read_alarm,
+	.set_alarm = as3722_rtc_set_alarm,
+	.alarm_irq_enable = as3722_rtc_alarm_irq_enable,
+};
+
+static int as3722_rtc_probe(struct platform_device *pdev)
+{
+	struct as3722 *as3722 = dev_get_drvdata(pdev->dev.parent);
+	struct as3722_rtc *as3722_rtc;
+	int ret;
+
+	as3722_rtc = devm_kzalloc(&pdev->dev, sizeof(*as3722_rtc), GFP_KERNEL);
+	if (!as3722_rtc)
+		return -ENOMEM;
+
+	as3722_rtc->as3722 = as3722;
+	as3722_rtc->dev = &pdev->dev;
+	platform_set_drvdata(pdev, as3722_rtc);
+
+	/* Enable the RTC to make sure it is running. */
+	ret = as3722_update_bits(as3722, AS3722_RTC_CONTROL_REG,
+			AS3722_RTC_ON | AS3722_RTC_ALARM_WAKEUP_EN,
+			AS3722_RTC_ON | AS3722_RTC_ALARM_WAKEUP_EN);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "RTC_CONTROL reg write failed: %d\n", ret);
+		return ret;
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	as3722_rtc->rtc = rtc_device_register("as3722", &pdev->dev,
+				&as3722_rtc_ops, THIS_MODULE);
+	if (IS_ERR(as3722_rtc->rtc)) {
+		ret = PTR_ERR(as3722_rtc->rtc);
+		dev_err(&pdev->dev, "RTC register failed: %d\n", ret);
+		return ret;
+	}
+
+	as3722_rtc->alarm_irq = platform_get_irq(pdev, 0);
+	dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq);
+
+	ret = request_threaded_irq(as3722_rtc->alarm_irq, NULL,
+			as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME,
+			"rtc-alarm", as3722_rtc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
+				as3722_rtc->alarm_irq, ret);
+		goto scrub;
+	}
+	disable_irq(as3722_rtc->alarm_irq);
+	return 0;
+scrub:
+	rtc_device_unregister(as3722_rtc->rtc);
+	return ret;
+}
+
+static int as3722_rtc_remove(struct platform_device *pdev)
+{
+	struct as3722_rtc *as3722_rtc = platform_get_drvdata(pdev);
+
+	free_irq(as3722_rtc->alarm_irq, as3722_rtc);
+	rtc_device_unregister(as3722_rtc->rtc);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int as3722_rtc_suspend(struct device *dev)
+{
+	struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		enable_irq_wake(as3722_rtc->alarm_irq);
+
+	return 0;
+}
+
+static int as3722_rtc_resume(struct device *dev)
+{
+	struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		disable_irq_wake(as3722_rtc->alarm_irq);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops as3722_rtc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(as3722_rtc_suspend, as3722_rtc_resume)
+};
+
+static struct platform_driver as3722_rtc_driver = {
+	.probe = as3722_rtc_probe,
+	.remove = as3722_rtc_remove,
+	.driver = {
+		.name = "as3722-rtc",
+		.pm = &as3722_rtc_pm_ops,
+	},
+};
+module_platform_driver(as3722_rtc_driver);
+
+MODULE_DESCRIPTION("RTC driver for AS3722 PMICs");
+MODULE_ALIAS("platform:as3722-rtc");
+MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 7418926..8b2cd8a 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -376,7 +376,8 @@
 		return -ENXIO;
 	}
 
-	at91_rtc_regs = ioremap(regs->start, resource_size(regs));
+	at91_rtc_regs = devm_ioremap(&pdev->dev, regs->start,
+				     resource_size(regs));
 	if (!at91_rtc_regs) {
 		dev_err(&pdev->dev, "failed to map registers, aborting.\n");
 		return -ENOMEM;
@@ -390,12 +391,12 @@
 					AT91_RTC_SECEV | AT91_RTC_TIMEV |
 					AT91_RTC_CALEV);
 
-	ret = request_irq(irq, at91_rtc_interrupt,
+	ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt,
 				IRQF_SHARED,
 				"at91_rtc", pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
-		goto err_unmap;
+		return ret;
 	}
 
 	/* cpu init code should really have flagged this device as
@@ -404,23 +405,14 @@
 	if (!device_can_wakeup(&pdev->dev))
 		device_init_wakeup(&pdev->dev, 1);
 
-	rtc = rtc_device_register(pdev->name, &pdev->dev,
+	rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
 				&at91_rtc_ops, THIS_MODULE);
-	if (IS_ERR(rtc)) {
-		ret = PTR_ERR(rtc);
-		goto err_free_irq;
-	}
+	if (IS_ERR(rtc))
+		return PTR_ERR(rtc);
 	platform_set_drvdata(pdev, rtc);
 
 	dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
 	return 0;
-
-err_free_irq:
-	free_irq(irq, pdev);
-err_unmap:
-	iounmap(at91_rtc_regs);
-
-	return ret;
 }
 
 /*
@@ -428,16 +420,10 @@
  */
 static int __exit at91_rtc_remove(struct platform_device *pdev)
 {
-	struct rtc_device *rtc = platform_get_drvdata(pdev);
-
 	/* Disable all interrupts */
 	at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
 					AT91_RTC_SECEV | AT91_RTC_TIMEV |
 					AT91_RTC_CALEV);
-	free_irq(irq, pdev);
-
-	rtc_device_unregister(rtc);
-	iounmap(at91_rtc_regs);
 
 	return 0;
 }
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 24e733c..f148762 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -595,7 +595,7 @@
 static int INITSECTION
 cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
 {
-	struct cmos_rtc_board_info	*info = dev->platform_data;
+	struct cmos_rtc_board_info	*info = dev_get_platdata(dev);
 	int				retval = 0;
 	unsigned char			rtc_control;
 	unsigned			address_space;
@@ -789,7 +789,6 @@
 	cmos->iomem = NULL;
 
 	cmos->dev = NULL;
-	dev_set_drvdata(dev, NULL);
 }
 
 #ifdef	CONFIG_PM
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index e00642b..48cb2ac3 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -278,7 +278,7 @@
 		return -ENOMEM;
 
 	rtc->da9055 = dev_get_drvdata(pdev->dev.parent);
-	pdata = rtc->da9055->dev->platform_data;
+	pdata = dev_get_platdata(rtc->da9055->dev);
 	platform_set_drvdata(pdev, rtc);
 
 	ret = da9055_rtc_device_init(rtc->da9055, pdata);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index dd6170a..80f3237 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -606,7 +606,7 @@
 	struct ds1305			*ds1305;
 	int				status;
 	u8				addr, value;
-	struct ds1305_platform_data	*pdata = spi->dev.platform_data;
+	struct ds1305_platform_data	*pdata = dev_get_platdata(&spi->dev);
 	bool				write_ctrl = false;
 
 	/* Sanity check board setup data.  This may be hooked up
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index ca18fd1..4e75345 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -670,9 +670,9 @@
 	int			tmp;
 	const struct chip_desc	*chip = &chips[id->driver_data];
 	struct i2c_adapter	*adapter = to_i2c_adapter(client->dev.parent);
-	int			want_irq = false;
+	bool			want_irq = false;
 	unsigned char		*buf;
-	struct ds1307_platform_data *pdata = client->dev.platform_data;
+	struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
 	static const int	bbsqi_bitpos[] = {
 		[ds_1337] = 0,
 		[ds_1339] = DS1339_BIT_BBSQI,
@@ -956,7 +956,7 @@
 					GFP_KERNEL);
 		if (!ds1307->nvram) {
 			err = -ENOMEM;
-			goto exit;
+			goto err_irq;
 		}
 		ds1307->nvram->attr.name = "nvram";
 		ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
@@ -967,13 +967,15 @@
 		ds1307->nvram_offset = chip->nvram_offset;
 		err = sysfs_create_bin_file(&client->dev.kobj, ds1307->nvram);
 		if (err)
-			goto exit;
+			goto err_irq;
 		set_bit(HAS_NVRAM, &ds1307->flags);
 		dev_info(&client->dev, "%zu bytes nvram\n", ds1307->nvram->size);
 	}
 
 	return 0;
 
+err_irq:
+	free_irq(client->irq, client);
 exit:
 	return err;
 }
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 2ca5a23..fc209dc 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -224,7 +224,7 @@
 
 static int rtc_probe(struct platform_device *pdev)
 {
-	struct ds2404_platform_data *pdata = pdev->dev.platform_data;
+	struct ds2404_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct ds2404 *chip;
 	int retval = -EBUSY;
 
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 580e7b5..5e4f5dc 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -42,7 +42,7 @@
 static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
 				unsigned short *delete)
 {
-	struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+	struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
 	unsigned long comp;
 
 	comp = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP);
@@ -60,7 +60,7 @@
 
 static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-	struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+	struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
 	unsigned long time;
 
 	 time = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA);
@@ -71,7 +71,7 @@
 
 static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs)
 {
-	struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+	struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
 
 	__raw_writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD);
 	return 0;
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 45560ff..965a9da 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -209,7 +209,7 @@
 		platform_get_drvdata(to_platform_device(dev));
 	int ret;
 
-	INIT_COMPLETION(time_state->comp_last_time);
+	reinit_completion(&time_state->comp_last_time);
 	/* get a report with all values through requesting one value */
 	sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
 			HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
@@ -236,7 +236,7 @@
 static int hid_time_probe(struct platform_device *pdev)
 {
 	int ret = 0;
-	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+	struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
 	struct hid_time_state *time_state = devm_kzalloc(&pdev->dev,
 		sizeof(struct hid_time_state), GFP_KERNEL);
 
@@ -281,11 +281,18 @@
 		goto err_open;
 	}
 
+	/*
+	 * Enable HID input processing early in order to be able to read the
+	 * clock already in devm_rtc_device_register().
+	 */
+	hid_device_io_start(hsdev->hdev);
+
 	time_state->rtc = devm_rtc_device_register(&pdev->dev,
 					"hid-sensor-time", &hid_time_rtc_ops,
 					THIS_MODULE);
 
 	if (IS_ERR_OR_NULL(time_state->rtc)) {
+		hid_device_io_stop(hsdev->hdev);
 		ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV;
 		time_state->rtc = NULL;
 		dev_err(&pdev->dev, "rtc device register failed!\n");
@@ -303,7 +310,7 @@
 
 static int hid_time_remove(struct platform_device *pdev)
 {
-	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+	struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
 
 	sensor_hub_device_close(hsdev);
 	sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index c016ad8..c3c549d 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -144,11 +144,7 @@
 static int
 isl1208_i2c_get_sr(struct i2c_client *client)
 {
-	int sr = i2c_smbus_read_byte_data(client, ISL1208_REG_SR);
-	if (sr < 0)
-		return -EIO;
-
-	return sr;
+	return i2c_smbus_read_byte_data(client, ISL1208_REG_SR);
 }
 
 static int
@@ -647,10 +643,11 @@
 		 "chip found, driver version " DRV_VERSION "\n");
 
 	if (client->irq > 0) {
-		rc = request_threaded_irq(client->irq, NULL,
-					  isl1208_rtc_interrupt,
-					  IRQF_SHARED,
-					  isl1208_driver.driver.name, client);
+		rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+					       isl1208_rtc_interrupt,
+					       IRQF_SHARED,
+					       isl1208_driver.driver.name,
+					       client);
 		if (!rc) {
 			device_init_wakeup(&client->dev, 1);
 			enable_irq_wake(client->irq);
@@ -662,20 +659,18 @@
 		}
 	}
 
-	rtc = rtc_device_register(isl1208_driver.driver.name,
-				  &client->dev, &isl1208_rtc_ops,
+	rtc = devm_rtc_device_register(&client->dev, isl1208_driver.driver.name,
+				  &isl1208_rtc_ops,
 				  THIS_MODULE);
-	if (IS_ERR(rtc)) {
-		rc = PTR_ERR(rtc);
-		goto exit_free_irq;
-	}
+	if (IS_ERR(rtc))
+		return PTR_ERR(rtc);
 
 	i2c_set_clientdata(client, rtc);
 
 	rc = isl1208_i2c_get_sr(client);
 	if (rc < 0) {
 		dev_err(&client->dev, "reading status failed\n");
-		goto exit_unregister;
+		return rc;
 	}
 
 	if (rc & ISL1208_REG_SR_RTCF)
@@ -684,28 +679,15 @@
 
 	rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
 	if (rc)
-		goto exit_unregister;
+		return rc;
 
 	return 0;
-
-exit_unregister:
-	rtc_device_unregister(rtc);
-exit_free_irq:
-	if (client->irq)
-		free_irq(client->irq, client);
-
-	return rc;
 }
 
 static int
 isl1208_remove(struct i2c_client *client)
 {
-	struct rtc_device *rtc = i2c_get_clientdata(client);
-
 	sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
-	rtc_device_unregister(rtc);
-	if (client->irq)
-		free_irq(client->irq, client);
 
 	return 0;
 }
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index fcb0329..11880c1 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -68,7 +68,7 @@
 static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	unsigned long flags;
 	u8 val;
@@ -111,7 +111,7 @@
 static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	unsigned long flags;
 	u8 val = 0;
@@ -158,7 +158,7 @@
 static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	struct rtc_time *tm = &alrm->time;
 	unsigned long flags;
@@ -205,7 +205,7 @@
 static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	struct rtc_time *tm = &alrm->time;
 	u8 mday, hour, min, sec;
@@ -266,7 +266,7 @@
 static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	unsigned long flags;
 
@@ -283,7 +283,7 @@
 static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	unsigned long flags;
 	u8 val;
@@ -304,7 +304,7 @@
 {
 	struct device *dev = (struct device *)dev_id;
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	u8 event;
 
@@ -340,7 +340,7 @@
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	ssize_t cnt = 0;
 	unsigned long flags;
@@ -360,7 +360,7 @@
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	ssize_t cnt = 0;
 	unsigned long flags;
@@ -385,7 +385,7 @@
 
 static int m48t59_rtc_probe(struct platform_device *pdev)
 {
-	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
 	struct m48t59_private *m48t59 = NULL;
 	struct resource *res;
 	int ret = -ENOMEM;
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 2d30314..32f64c9 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -46,7 +46,7 @@
 {
 	unsigned char reg;
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t86_ops *ops = pdev->dev.platform_data;
+	struct m48t86_ops *ops = dev_get_platdata(&pdev->dev);
 
 	reg = ops->readbyte(M48T86_REG_B);
 
@@ -84,7 +84,7 @@
 {
 	unsigned char reg;
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t86_ops *ops = pdev->dev.platform_data;
+	struct m48t86_ops *ops = dev_get_platdata(&pdev->dev);
 
 	reg = ops->readbyte(M48T86_REG_B);
 
@@ -123,7 +123,7 @@
 {
 	unsigned char reg;
 	struct platform_device *pdev = to_platform_device(dev);
-	struct m48t86_ops *ops = pdev->dev.platform_data;
+	struct m48t86_ops *ops = dev_get_platdata(&pdev->dev);
 
 	reg = ops->readbyte(M48T86_REG_B);
 
@@ -147,7 +147,7 @@
 static int m48t86_rtc_probe(struct platform_device *dev)
 {
 	unsigned char reg;
-	struct m48t86_ops *ops = dev->dev.platform_data;
+	struct m48t86_ops *ops = dev_get_platdata(&dev->dev);
 	struct rtc_device *rtc;
 
 	rtc = devm_rtc_device_register(&dev->dev, "m48t86",
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 55969b1..4804985 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -164,14 +164,7 @@
 
 static int max6900_i2c_clear_write_protect(struct i2c_client *client)
 {
-	int rc;
-	rc = i2c_smbus_write_byte_data(client, MAX6900_REG_CONTROL_WRITE, 0);
-	if (rc < 0) {
-		dev_err(&client->dev, "%s: control register write failed\n",
-			__func__);
-		return -EIO;
-	}
-	return 0;
+	return i2c_smbus_write_byte_data(client, MAX6900_REG_CONTROL_WRITE, 0);
 }
 
 static int
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 315209d9..e2436d1 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -380,7 +380,6 @@
 cleanup1:
 	rtc_device_unregister(mrst_rtc.rtc);
 cleanup0:
-	dev_set_drvdata(dev, NULL);
 	mrst_rtc.dev = NULL;
 	release_mem_region(iomem->start, resource_size(iomem));
 	dev_err(dev, "rtc-mrst: unable to initialise\n");
@@ -412,7 +411,6 @@
 	mrst->iomem = NULL;
 
 	mrst->dev = NULL;
-	dev_set_drvdata(dev, NULL);
 }
 
 #ifdef	CONFIG_PM
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c7d97ee..26de5f8 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -553,7 +553,7 @@
 		.name	= DRIVER_NAME,
 		.owner	= THIS_MODULE,
 		.pm	= &omap_rtc_pm_ops,
-		.of_match_table = of_match_ptr(omap_rtc_of_match),
+		.of_match_table = omap_rtc_of_match,
 	},
 	.id_table	= omap_rtc_devtype,
 };
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 1725b50..d1953bb 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -327,7 +327,7 @@
 
 static int pcf2123_remove(struct spi_device *spi)
 {
-	struct pcf2123_plat_data *pdata = spi->dev.platform_data;
+	struct pcf2123_plat_data *pdata = dev_get_platdata(&spi->dev);
 	int i;
 
 	if (pdata) {
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 22bacdb..f85a1a9 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -106,7 +106,7 @@
 	if (ret)
 		goto err_req;
 
-	rtc = kmalloc(sizeof(*rtc), GFP_KERNEL);
+	rtc = devm_kzalloc(&dev->dev, sizeof(*rtc), GFP_KERNEL);
 	if (!rtc) {
 		ret = -ENOMEM;
 		goto err_rtc;
@@ -115,7 +115,7 @@
 	rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
 	if (!rtc->base) {
 		ret = -ENOMEM;
-		goto err_map;
+		goto err_rtc;
 	}
 
 	__raw_writel(0, rtc->base + RTC_CR);
@@ -141,8 +141,6 @@
 	free_irq(dev->irq[0], rtc);
  err_irq:
 	iounmap(rtc->base);
- err_map:
-	kfree(rtc);
  err_rtc:
 	amba_release_regions(dev);
  err_req:
@@ -153,14 +151,11 @@
 {
 	struct pl030_rtc *rtc = amba_get_drvdata(dev);
 
-	amba_set_drvdata(dev, NULL);
-
 	writel(0, rtc->base + RTC_CR);
 
 	free_irq(dev->irq[0], rtc);
 	rtc_device_unregister(rtc->rtc);
 	iounmap(rtc->base);
-	kfree(rtc);
 	amba_release_regions(dev);
 
 	return 0;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index e3b2571..99181fff 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -305,7 +305,6 @@
 {
 	struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
 
-	amba_set_drvdata(adev, NULL);
 	free_irq(adev->irq[0], ldata);
 	rtc_device_unregister(ldata->rtc);
 	iounmap(ldata->base);
@@ -391,7 +390,6 @@
 	rtc_device_unregister(ldata->rtc);
 out_no_rtc:
 	iounmap(ldata->base);
-	amba_set_drvdata(adev, NULL);
 out_no_remap:
 	kfree(ldata);
 out:
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index 402732c..1ecfe3b 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -53,11 +53,11 @@
 }
 
 /* Update control registers */
-static void puv3_rtc_setaie(int to)
+static void puv3_rtc_setaie(struct device *dev, int to)
 {
 	unsigned int tmp;
 
-	pr_debug("%s: aie=%d\n", __func__, to);
+	dev_dbg(dev, "%s: aie=%d\n", __func__, to);
 
 	tmp = readl(RTC_RTSR) & ~RTC_RTSR_ALE;
 
@@ -71,7 +71,7 @@
 {
 	unsigned int tmp;
 
-	pr_debug("%s: pie=%d\n", __func__, enabled);
+	dev_debug(dev, "%s: pie=%d\n", __func__, enabled);
 
 	spin_lock_irq(&puv3_rtc_pie_lock);
 	tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
@@ -90,7 +90,7 @@
 {
 	rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
 
-	pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
+	dev_dbg(dev, "read time %02x.%02x.%02x %02x/%02x/%02x\n",
 		 rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
 		 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
 
@@ -101,7 +101,7 @@
 {
 	unsigned long rtc_count = 0;
 
-	pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
+	dev_dbg(dev, "set time %02d.%02d.%02d %02d/%02d/%02d\n",
 		 tm->tm_year, tm->tm_mon, tm->tm_mday,
 		 tm->tm_hour, tm->tm_min, tm->tm_sec);
 
@@ -119,7 +119,7 @@
 
 	alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE;
 
-	pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+	dev_dbg(dev, "read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
 		 alrm->enabled,
 		 alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
 		 alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
@@ -132,7 +132,7 @@
 	struct rtc_time *tm = &alrm->time;
 	unsigned long rtcalarm_count = 0;
 
-	pr_debug("puv3_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+	dev_dbg(dev, "puv3_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
 		 alrm->enabled,
 		 tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
 		 tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
@@ -140,7 +140,7 @@
 	rtc_tm_to_time(tm, &rtcalarm_count);
 	writel(rtcalarm_count, RTC_RTAR);
 
-	puv3_rtc_setaie(alrm->enabled);
+	puv3_rtc_setaie(&dev->dev, alrm->enabled);
 
 	if (alrm->enabled)
 		enable_irq_wake(puv3_rtc_alarmno);
@@ -227,7 +227,7 @@
 	rtc_device_unregister(rtc);
 
 	puv3_rtc_setpie(&dev->dev, 0);
-	puv3_rtc_setaie(0);
+	puv3_rtc_setaie(&dev->dev, 0);
 
 	release_resource(puv3_rtc_mem);
 	kfree(puv3_rtc_mem);
@@ -241,7 +241,7 @@
 	struct resource *res;
 	int ret;
 
-	pr_debug("%s: probe=%p\n", __func__, pdev);
+	dev_dbg(&pdev->dev, "%s: probe=%p\n", __func__, pdev);
 
 	/* find the IRQs */
 	puv3_rtc_tickno = platform_get_irq(pdev, 1);
@@ -256,7 +256,7 @@
 		return -ENOENT;
 	}
 
-	pr_debug("PKUnity_rtc: tick irq %d, alarm irq %d\n",
+	dev_dbg(&pdev->dev, "PKUnity_rtc: tick irq %d, alarm irq %d\n",
 		 puv3_rtc_tickno, puv3_rtc_alarmno);
 
 	/* get the memory region */
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index f7a90a1..090a101 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -64,7 +64,7 @@
 rs5c348_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
 	struct spi_device *spi = to_spi_device(dev);
-	struct rs5c348_plat_data *pdata = spi->dev.platform_data;
+	struct rs5c348_plat_data *pdata = dev_get_platdata(&spi->dev);
 	u8 txbuf[5+7], *txp;
 	int ret;
 
@@ -100,7 +100,7 @@
 rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct spi_device *spi = to_spi_device(dev);
-	struct rs5c348_plat_data *pdata = spi->dev.platform_data;
+	struct rs5c348_plat_data *pdata = dev_get_platdata(&spi->dev);
 	u8 txbuf[5], rxbuf[7];
 	int ret;
 
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
new file mode 100644
index 0000000..b7fd02b
--- /dev/null
+++ b/drivers/rtc/rtc-s5m.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ *	http://www.samsung.com
+ *
+ *  Copyright (C) 2013 Google, Inc
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/bcd.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/rtc.h>
+
+struct s5m_rtc_info {
+	struct device *dev;
+	struct sec_pmic_dev *s5m87xx;
+	struct regmap *rtc;
+	struct rtc_device *rtc_dev;
+	int irq;
+	int device_type;
+	int rtc_24hr_mode;
+	bool wtsr_smpl;
+};
+
+static void s5m8767_data_to_tm(u8 *data, struct rtc_time *tm,
+			       int rtc_24hr_mode)
+{
+	tm->tm_sec = data[RTC_SEC] & 0x7f;
+	tm->tm_min = data[RTC_MIN] & 0x7f;
+	if (rtc_24hr_mode) {
+		tm->tm_hour = data[RTC_HOUR] & 0x1f;
+	} else {
+		tm->tm_hour = data[RTC_HOUR] & 0x0f;
+		if (data[RTC_HOUR] & HOUR_PM_MASK)
+			tm->tm_hour += 12;
+	}
+
+	tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0x7f);
+	tm->tm_mday = data[RTC_DATE] & 0x1f;
+	tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
+	tm->tm_year = (data[RTC_YEAR1] & 0x7f) + 100;
+	tm->tm_yday = 0;
+	tm->tm_isdst = 0;
+}
+
+static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+	data[RTC_SEC] = tm->tm_sec;
+	data[RTC_MIN] = tm->tm_min;
+
+	if (tm->tm_hour >= 12)
+		data[RTC_HOUR] = tm->tm_hour | HOUR_PM_MASK;
+	else
+		data[RTC_HOUR] = tm->tm_hour & ~HOUR_PM_MASK;
+
+	data[RTC_WEEKDAY] = 1 << tm->tm_wday;
+	data[RTC_DATE] = tm->tm_mday;
+	data[RTC_MONTH] = tm->tm_mon + 1;
+	data[RTC_YEAR1] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
+
+	if (tm->tm_year < 100) {
+		pr_err("s5m8767 RTC cannot handle the year %d.\n",
+		       1900 + tm->tm_year);
+		return -EINVAL;
+	} else {
+		return 0;
+	}
+}
+
+static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
+{
+	int ret;
+	unsigned int data;
+
+	ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+	if (ret < 0) {
+		dev_err(info->dev, "failed to read update reg(%d)\n", ret);
+		return ret;
+	}
+
+	data |= RTC_TIME_EN_MASK;
+	data |= RTC_UDR_MASK;
+
+	ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data);
+	if (ret < 0) {
+		dev_err(info->dev, "failed to write update reg(%d)\n", ret);
+		return ret;
+	}
+
+	do {
+		ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+	} while ((data & RTC_UDR_MASK) && !ret);
+
+	return ret;
+}
+
+static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
+{
+	int ret;
+	unsigned int data;
+
+	ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+	if (ret < 0) {
+		dev_err(info->dev, "%s: fail to read update reg(%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
+	data &= ~RTC_TIME_EN_MASK;
+	data |= RTC_UDR_MASK;
+
+	ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data);
+	if (ret < 0) {
+		dev_err(info->dev, "%s: fail to write update reg(%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
+	do {
+		ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+	} while ((data & RTC_UDR_MASK) && !ret);
+
+	return ret;
+}
+
+static void s5m8763_data_to_tm(u8 *data, struct rtc_time *tm)
+{
+	tm->tm_sec = bcd2bin(data[RTC_SEC]);
+	tm->tm_min = bcd2bin(data[RTC_MIN]);
+
+	if (data[RTC_HOUR] & HOUR_12) {
+		tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x1f);
+		if (data[RTC_HOUR] & HOUR_PM)
+			tm->tm_hour += 12;
+	} else {
+		tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x3f);
+	}
+
+	tm->tm_wday = data[RTC_WEEKDAY] & 0x07;
+	tm->tm_mday = bcd2bin(data[RTC_DATE]);
+	tm->tm_mon = bcd2bin(data[RTC_MONTH]);
+	tm->tm_year = bcd2bin(data[RTC_YEAR1]) + bcd2bin(data[RTC_YEAR2]) * 100;
+	tm->tm_year -= 1900;
+}
+
+static void s5m8763_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+	data[RTC_SEC] = bin2bcd(tm->tm_sec);
+	data[RTC_MIN] = bin2bcd(tm->tm_min);
+	data[RTC_HOUR] = bin2bcd(tm->tm_hour);
+	data[RTC_WEEKDAY] = tm->tm_wday;
+	data[RTC_DATE] = bin2bcd(tm->tm_mday);
+	data[RTC_MONTH] = bin2bcd(tm->tm_mon);
+	data[RTC_YEAR1] = bin2bcd(tm->tm_year % 100);
+	data[RTC_YEAR2] = bin2bcd((tm->tm_year + 1900) / 100);
+}
+
+static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct s5m_rtc_info *info = dev_get_drvdata(dev);
+	u8 data[8];
+	int ret;
+
+	ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8);
+	if (ret < 0)
+		return ret;
+
+	switch (info->device_type) {
+	case S5M8763X:
+		s5m8763_data_to_tm(data, tm);
+		break;
+
+	case S5M8767X:
+		s5m8767_data_to_tm(data, tm, info->rtc_24hr_mode);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+		1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
+
+	return rtc_valid_tm(tm);
+}
+
+static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct s5m_rtc_info *info = dev_get_drvdata(dev);
+	u8 data[8];
+	int ret = 0;
+
+	switch (info->device_type) {
+	case S5M8763X:
+		s5m8763_tm_to_data(tm, data);
+		break;
+	case S5M8767X:
+		ret = s5m8767_tm_to_data(tm, data);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+		1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
+
+	ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8);
+	if (ret < 0)
+		return ret;
+
+	ret = s5m8767_rtc_set_time_reg(info);
+
+	return ret;
+}
+
+static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct s5m_rtc_info *info = dev_get_drvdata(dev);
+	u8 data[8];
+	unsigned int val;
+	int ret, i;
+
+	ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+	if (ret < 0)
+		return ret;
+
+	switch (info->device_type) {
+	case S5M8763X:
+		s5m8763_data_to_tm(data, &alrm->time);
+		ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val);
+		if (ret < 0)
+			return ret;
+
+		alrm->enabled = !!val;
+
+		ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val);
+		if (ret < 0)
+			return ret;
+
+		break;
+
+	case S5M8767X:
+		s5m8767_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+		dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+			1900 + alrm->time.tm_year, 1 + alrm->time.tm_mon,
+			alrm->time.tm_mday, alrm->time.tm_hour,
+			alrm->time.tm_min, alrm->time.tm_sec,
+			alrm->time.tm_wday);
+
+		alrm->enabled = 0;
+		for (i = 0; i < 7; i++) {
+			if (data[i] & ALARM_ENABLE_MASK) {
+				alrm->enabled = 1;
+				break;
+			}
+		}
+
+		alrm->pending = 0;
+		ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val);
+		if (ret < 0)
+			return ret;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (val & ALARM0_STATUS)
+		alrm->pending = 1;
+	else
+		alrm->pending = 0;
+
+	return 0;
+}
+
+static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
+{
+	u8 data[8];
+	int ret, i;
+	struct rtc_time tm;
+
+	ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+	if (ret < 0)
+		return ret;
+
+	s5m8767_data_to_tm(data, &tm, info->rtc_24hr_mode);
+	dev_dbg(info->dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+		1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday,
+		tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_wday);
+
+	switch (info->device_type) {
+	case S5M8763X:
+		ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0);
+		break;
+
+	case S5M8767X:
+		for (i = 0; i < 7; i++)
+			data[i] &= ~ALARM_ENABLE_MASK;
+
+		ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+		if (ret < 0)
+			return ret;
+
+		ret = s5m8767_rtc_set_alarm_reg(info);
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
+{
+	int ret;
+	u8 data[8];
+	u8 alarm0_conf;
+	struct rtc_time tm;
+
+	ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+	if (ret < 0)
+		return ret;
+
+	s5m8767_data_to_tm(data, &tm, info->rtc_24hr_mode);
+	dev_dbg(info->dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+		1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday,
+		tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_wday);
+
+	switch (info->device_type) {
+	case S5M8763X:
+		alarm0_conf = 0x77;
+		ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf);
+		break;
+
+	case S5M8767X:
+		data[RTC_SEC] |= ALARM_ENABLE_MASK;
+		data[RTC_MIN] |= ALARM_ENABLE_MASK;
+		data[RTC_HOUR] |= ALARM_ENABLE_MASK;
+		data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+		if (data[RTC_DATE] & 0x1f)
+			data[RTC_DATE] |= ALARM_ENABLE_MASK;
+		if (data[RTC_MONTH] & 0xf)
+			data[RTC_MONTH] |= ALARM_ENABLE_MASK;
+		if (data[RTC_YEAR1] & 0x7f)
+			data[RTC_YEAR1] |= ALARM_ENABLE_MASK;
+
+		ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+		if (ret < 0)
+			return ret;
+		ret = s5m8767_rtc_set_alarm_reg(info);
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct s5m_rtc_info *info = dev_get_drvdata(dev);
+	u8 data[8];
+	int ret;
+
+	switch (info->device_type) {
+	case S5M8763X:
+		s5m8763_tm_to_data(&alrm->time, data);
+		break;
+
+	case S5M8767X:
+		s5m8767_tm_to_data(&alrm->time, data);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+		1900 + alrm->time.tm_year, 1 + alrm->time.tm_mon,
+		alrm->time.tm_mday, alrm->time.tm_hour, alrm->time.tm_min,
+		alrm->time.tm_sec, alrm->time.tm_wday);
+
+	ret = s5m_rtc_stop_alarm(info);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+	if (ret < 0)
+		return ret;
+
+	ret = s5m8767_rtc_set_alarm_reg(info);
+	if (ret < 0)
+		return ret;
+
+	if (alrm->enabled)
+		ret = s5m_rtc_start_alarm(info);
+
+	return ret;
+}
+
+static int s5m_rtc_alarm_irq_enable(struct device *dev,
+				    unsigned int enabled)
+{
+	struct s5m_rtc_info *info = dev_get_drvdata(dev);
+
+	if (enabled)
+		return s5m_rtc_start_alarm(info);
+	else
+		return s5m_rtc_stop_alarm(info);
+}
+
+static irqreturn_t s5m_rtc_alarm_irq(int irq, void *data)
+{
+	struct s5m_rtc_info *info = data;
+
+	rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+	return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops s5m_rtc_ops = {
+	.read_time = s5m_rtc_read_time,
+	.set_time = s5m_rtc_set_time,
+	.read_alarm = s5m_rtc_read_alarm,
+	.set_alarm = s5m_rtc_set_alarm,
+	.alarm_irq_enable = s5m_rtc_alarm_irq_enable,
+};
+
+static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
+{
+	int ret;
+	ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL,
+				 WTSR_ENABLE_MASK,
+				 enable ? WTSR_ENABLE_MASK : 0);
+	if (ret < 0)
+		dev_err(info->dev, "%s: fail to update WTSR reg(%d)\n",
+			__func__, ret);
+}
+
+static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
+{
+	int ret;
+	ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL,
+				 SMPL_ENABLE_MASK,
+				 enable ? SMPL_ENABLE_MASK : 0);
+	if (ret < 0)
+		dev_err(info->dev, "%s: fail to update SMPL reg(%d)\n",
+			__func__, ret);
+}
+
+static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
+{
+	u8 data[2];
+	unsigned int tp_read;
+	int ret;
+	struct rtc_time tm;
+
+	ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read);
+	if (ret < 0) {
+		dev_err(info->dev, "%s: fail to read control reg(%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
+	/* Set RTC control register : Binary mode, 24hour mode */
+	data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+	data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+
+	info->rtc_24hr_mode = 1;
+	ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2);
+	if (ret < 0) {
+		dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
+	/* In first boot time, Set rtc time to 1/1/2012 00:00:00(SUN) */
+	if ((tp_read & RTC_TCON_MASK) == 0) {
+		dev_dbg(info->dev, "rtc init\n");
+		tm.tm_sec = 0;
+		tm.tm_min = 0;
+		tm.tm_hour = 0;
+		tm.tm_wday = 0;
+		tm.tm_mday = 1;
+		tm.tm_mon = 0;
+		tm.tm_year = 112;
+		tm.tm_yday = 0;
+		tm.tm_isdst = 0;
+		ret = s5m_rtc_set_time(info->dev, &tm);
+	}
+
+	ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON,
+				 RTC_TCON_MASK, tp_read | RTC_TCON_MASK);
+	if (ret < 0)
+		dev_err(info->dev, "%s: fail to update TCON reg(%d)\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int s5m_rtc_probe(struct platform_device *pdev)
+{
+	struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent);
+	struct sec_platform_data *pdata = s5m87xx->pdata;
+	struct s5m_rtc_info *info;
+	int ret;
+
+	if (!pdata) {
+		dev_err(pdev->dev.parent, "Platform data not supplied\n");
+		return -ENODEV;
+	}
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = &pdev->dev;
+	info->s5m87xx = s5m87xx;
+	info->rtc = s5m87xx->rtc;
+	info->device_type = s5m87xx->device_type;
+	info->wtsr_smpl = s5m87xx->wtsr_smpl;
+
+	switch (pdata->device_type) {
+	case S5M8763X:
+		info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0;
+		break;
+
+	case S5M8767X:
+		info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1;
+		break;
+
+	default:
+		ret = -EINVAL;
+		dev_err(&pdev->dev, "Unsupported device type: %d\n", ret);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, info);
+
+	ret = s5m8767_rtc_init_reg(info);
+
+	if (info->wtsr_smpl) {
+		s5m_rtc_enable_wtsr(info, true);
+		s5m_rtc_enable_smpl(info, true);
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	info->rtc_dev = devm_rtc_device_register(&pdev->dev, "s5m-rtc",
+						 &s5m_rtc_ops, THIS_MODULE);
+
+	if (IS_ERR(info->rtc_dev))
+		return PTR_ERR(info->rtc_dev);
+
+	ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+					s5m_rtc_alarm_irq, 0, "rtc-alarm0",
+					info);
+	if (ret < 0)
+		dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+			info->irq, ret);
+
+	return ret;
+}
+
+static void s5m_rtc_shutdown(struct platform_device *pdev)
+{
+	struct s5m_rtc_info *info = platform_get_drvdata(pdev);
+	int i;
+	unsigned int val = 0;
+	if (info->wtsr_smpl) {
+		for (i = 0; i < 3; i++) {
+			s5m_rtc_enable_wtsr(info, false);
+			regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val);
+			pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
+			if (val & WTSR_ENABLE_MASK)
+				pr_emerg("%s: fail to disable WTSR\n",
+					 __func__);
+			else {
+				pr_info("%s: success to disable WTSR\n",
+					__func__);
+				break;
+			}
+		}
+	}
+	/* Disable SMPL when power off */
+	s5m_rtc_enable_smpl(info, false);
+}
+
+static const struct platform_device_id s5m_rtc_id[] = {
+	{ "s5m-rtc", 0 },
+};
+
+static struct platform_driver s5m_rtc_driver = {
+	.driver		= {
+		.name	= "s5m-rtc",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= s5m_rtc_probe,
+	.shutdown	= s5m_rtc_shutdown,
+	.id_table	= s5m_rtc_id,
+};
+
+module_platform_driver(s5m_rtc_driver);
+
+/* Module information */
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5M RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s5m-rtc");
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 6d87e26..d0d2b04 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -649,8 +649,9 @@
 	clk_enable(rtc->clk);
 
 	rtc->capabilities = RTC_DEF_CAPABILITIES;
-	if (pdev->dev.platform_data) {
-		struct sh_rtc_platform_info *pinfo = pdev->dev.platform_data;
+	if (dev_get_platdata(&pdev->dev)) {
+		struct sh_rtc_platform_info *pinfo =
+			dev_get_platdata(&pdev->dev);
 
 		/*
 		 * Some CPUs have special capabilities in addition to the
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index 63460cf..3eb3642 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -59,7 +59,7 @@
 	unsigned long rtc_alarm, rtc_count;
 	struct sirfsoc_rtc_drv *rtcdrv;
 
-	rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+	rtcdrv = dev_get_drvdata(dev);
 
 	local_irq_disable();
 
@@ -94,7 +94,7 @@
 {
 	unsigned long rtc_status_reg, rtc_alarm;
 	struct sirfsoc_rtc_drv *rtcdrv;
-	rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+	rtcdrv = dev_get_drvdata(dev);
 
 	if (alrm->enabled) {
 		rtc_tm_to_time(&(alrm->time), &rtc_alarm);
@@ -157,7 +157,7 @@
 {
 	unsigned long tmp_rtc = 0;
 	struct sirfsoc_rtc_drv *rtcdrv;
-	rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+	rtcdrv = dev_get_drvdata(dev);
 	/*
 	 * This patch is taken from WinCE - Need to validate this for
 	 * correctness. To work around sirfsoc RTC counter double sync logic
@@ -178,7 +178,7 @@
 {
 	unsigned long rtc_time;
 	struct sirfsoc_rtc_drv *rtcdrv;
-	rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+	rtcdrv = dev_get_drvdata(dev);
 
 	rtc_tm_to_time(tm, &rtc_time);
 
@@ -274,7 +274,7 @@
 	err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base);
 	if (err) {
 		dev_err(&pdev->dev, "unable to find base address of rtc node in dtb\n");
-		goto error;
+		return err;
 	}
 
 	platform_set_drvdata(pdev, rtcdrv);
@@ -290,7 +290,7 @@
 	rtc_div = ((32768 / RTC_HZ) / 2) - 1;
 	sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
 
-	rtcdrv->rtc = rtc_device_register(pdev->name, &(pdev->dev),
+	rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
 			&sirfsoc_rtc_ops, THIS_MODULE);
 	if (IS_ERR(rtcdrv->rtc)) {
 		err = PTR_ERR(rtcdrv->rtc);
@@ -322,24 +322,15 @@
 			rtcdrv);
 	if (err) {
 		dev_err(&pdev->dev, "Unable to register for the SiRF SOC RTC IRQ\n");
-		goto error;
+		return err;
 	}
 
 	return 0;
-
-error:
-	if (rtcdrv->rtc)
-		rtc_device_unregister(rtcdrv->rtc);
-
-	return err;
 }
 
 static int sirfsoc_rtc_remove(struct platform_device *pdev)
 {
-	struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
-
 	device_init_wakeup(&pdev->dev, 0);
-	rtc_device_unregister(rtcdrv->rtc);
 
 	return 0;
 }
@@ -373,7 +364,7 @@
 {
 	u32 tmp;
 	struct sirfsoc_rtc_drv *rtcdrv;
-	rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev);
+	rtcdrv = dev_get_drvdata(dev);
 
 	/*
 	 * if resume from snapshot and the rtc power is losed,
@@ -467,7 +458,7 @@
 #ifdef CONFIG_PM
 		.pm = &sirfsoc_rtc_pm_ops,
 #endif
-		.of_match_table = of_match_ptr(sirfsoc_rtc_of_match),
+		.of_match_table = sirfsoc_rtc_of_match,
 	},
 	.probe = sirfsoc_rtc_probe,
 	.remove = sirfsoc_rtc_remove,
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 316a342..fa384fe 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -329,7 +329,7 @@
 		.name	= "snvs_rtc",
 		.owner	= THIS_MODULE,
 		.pm	= &snvs_rtc_pm_ops,
-		.of_match_table = of_match_ptr(snvs_dt_ids),
+		.of_match_table = snvs_dt_ids,
 	},
 	.probe		= snvs_rtc_probe,
 };
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 2601953..ea96492 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -343,7 +343,7 @@
 		.name	= "stmp3xxx-rtc",
 		.owner	= THIS_MODULE,
 		.pm	= &stmp3xxx_rtc_pm_ops,
-		.of_match_table = of_match_ptr(rtc_dt_ids),
+		.of_match_table = rtc_dt_ids,
 	},
 };
 
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index a9caf04..7af0020 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -22,7 +22,6 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 #include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
 #include <linux/interrupt.h>
 #include <linux/mfd/tps65910.h>
 
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index d07d898..25222cd 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -303,7 +303,7 @@
 
 static int rtc_probe(struct platform_device *pdev)
 {
-	struct v3020_platform_data *pdata = pdev->dev.platform_data;
+	struct v3020_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct v3020 *chip;
 	int retval = -EBUSY;
 	int i;
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 54e104e..aabc22c 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -20,6 +20,7 @@
 #include <linux/err.h>
 #include <linux/fs.h>
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -27,11 +28,10 @@
 #include <linux/rtc.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
+#include <linux/uaccess.h>
 #include <linux/log2.h>
 
 #include <asm/div64.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
 
 MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
 MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index c2d6331..df2ef3e 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -228,7 +228,7 @@
 	vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0);
 	if (vt8500_rtc->irq_alarm < 0) {
 		dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
-		return -ENXIO;
+		return vt8500_rtc->irq_alarm;
 	}
 
 	vt8500_rtc->res = devm_request_mem_region(&pdev->dev,
@@ -296,7 +296,7 @@
 	.driver		= {
 		.name	= "vt8500-rtc",
 		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(wmt_dt_ids),
+		.of_match_table = wmt_dt_ids,
 	},
 };
 
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 244f77f..f302efa9 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2979,12 +2979,12 @@
 
 	elevator_exit(block->request_queue->elevator);
 	block->request_queue->elevator = NULL;
+	mutex_lock(&block->request_queue->sysfs_lock);
 	rc = elevator_init(block->request_queue, "deadline");
-	if (rc) {
+	if (rc)
 		blk_cleanup_queue(block->request_queue);
-		return rc;
-	}
-	return 0;
+	mutex_unlock(&block->request_queue->sysfs_lock);
+	return rc;
 }
 
 /*
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index af2166f..1abd0db 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -166,11 +166,15 @@
  * make a hypercall.  We hand the address  of the virtqueue so the Host
  * knows which virtqueue we're talking about.
  */
-static void kvm_notify(struct virtqueue *vq)
+static bool kvm_notify(struct virtqueue *vq)
 {
+	long rc;
 	struct kvm_vqconfig *config = vq->priv;
 
-	kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+	rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+	if (rc < 0)
+		return false;
+	return true;
 }
 
 /*
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 779dc51..d629717 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -162,7 +162,7 @@
 	return __rc;
 }
 
-static void virtio_ccw_kvm_notify(struct virtqueue *vq)
+static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
 {
 	struct virtio_ccw_vq_info *info = vq->priv;
 	struct virtio_ccw_device *vcdev;
@@ -171,6 +171,9 @@
 	vcdev = to_vc_device(info->vq->vdev);
 	ccw_device_get_schid(vcdev->cdev, &schid);
 	info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
+	if (info->cookie < 0)
+		return false;
+	return true;
 }
 
 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d7b66a2..eb4e1f8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4451,7 +4451,7 @@
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
 	struct qeth_snmp_ureq *ureq;
-	int req_len;
+	unsigned int req_len;
 	struct qeth_arp_query_info qinfo = {0, };
 	int rc = 0;
 
@@ -4467,6 +4467,10 @@
 	/* skip 4 bytes (data_len struct member) to get req_len */
 	if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
 		return -EFAULT;
+	if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
+		       sizeof(struct qeth_ipacmd_hdr) -
+		       sizeof(struct qeth_ipacmd_setadpparms_hdr)))
+		return -EINVAL;
 	ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
 	if (IS_ERR(ureq)) {
 		QETH_CARD_TEXT(card, 2, "snmpnome");
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 132a905..0ca6448 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -344,7 +344,7 @@
 
 /**
  * zfcp_dbf_san_req - trace event for issued SAN request
- * @tag: indentifier for event
+ * @tag: identifier for event
  * @fsf_req: request containing issued CT data
  * d_id: destination ID
  */
@@ -361,7 +361,7 @@
 
 /**
  * zfcp_dbf_san_res - trace event for received SAN request
- * @tag: indentifier for event
+ * @tag: identifier for event
  * @fsf_req: request containing issued CT data
  */
 void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
@@ -377,7 +377,7 @@
 
 /**
  * zfcp_dbf_san_in_els - trace event for incoming ELS
- * @tag: indentifier for event
+ * @tag: identifier for event
  * @fsf_req: request containing issued CT data
  */
 void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 757eb07..972f817 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -26,8 +26,8 @@
 
 */
 
-#define blogic_drvr_version		"2.1.16"
-#define blogic_drvr_date		"18 July 2002"
+#define blogic_drvr_version		"2.1.17"
+#define blogic_drvr_date		"12 September 2013"
 
 #include <linux/module.h>
 #include <linux/init.h>
@@ -311,12 +311,14 @@
   caller.
 */
 
-static void blogic_dealloc_ccb(struct blogic_ccb *ccb)
+static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
 {
 	struct blogic_adapter *adapter = ccb->adapter;
 
-	scsi_dma_unmap(ccb->command);
-	pci_unmap_single(adapter->pci_device, ccb->sensedata,
+	if (ccb->command != NULL)
+		scsi_dma_unmap(ccb->command);
+	if (dma_unmap)
+		pci_unmap_single(adapter->pci_device, ccb->sensedata,
 			 ccb->sense_datalen, PCI_DMA_FROMDEVICE);
 
 	ccb->command = NULL;
@@ -2762,8 +2764,8 @@
 			/*
 			   Place CCB back on the Host Adapter's free list.
 			 */
-			blogic_dealloc_ccb(ccb);
-#if 0				/* this needs to be redone different for new EH */
+			blogic_dealloc_ccb(ccb, 1);
+#if 0			/* this needs to be redone different for new EH */
 			/*
 			   Bus Device Reset CCBs have the command field
 			   non-NULL only when a Bus Device Reset was requested
@@ -2791,7 +2793,7 @@
 				if (ccb->status == BLOGIC_CCB_RESET &&
 						ccb->tgt_id == tgt_id) {
 					command = ccb->command;
-					blogic_dealloc_ccb(ccb);
+					blogic_dealloc_ccb(ccb, 1);
 					adapter->active_cmds[tgt_id]--;
 					command->result = DID_RESET << 16;
 					command->scsi_done(command);
@@ -2862,7 +2864,7 @@
 			/*
 			   Place CCB back on the Host Adapter's free list.
 			 */
-			blogic_dealloc_ccb(ccb);
+			blogic_dealloc_ccb(ccb, 1);
 			/*
 			   Call the SCSI Command Completion Routine.
 			 */
@@ -3034,6 +3036,7 @@
 	int buflen = scsi_bufflen(command);
 	int count;
 	struct blogic_ccb *ccb;
+	dma_addr_t sense_buf;
 
 	/*
 	   SCSI REQUEST_SENSE commands will be executed automatically by the
@@ -3179,10 +3182,17 @@
 	}
 	memcpy(ccb->cdb, cdb, cdblen);
 	ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
-	ccb->sensedata = pci_map_single(adapter->pci_device,
+	ccb->command = command;
+	sense_buf = pci_map_single(adapter->pci_device,
 				command->sense_buffer, ccb->sense_datalen,
 				PCI_DMA_FROMDEVICE);
-	ccb->command = command;
+	if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
+		blogic_err("DMA mapping for sense data buffer failed\n",
+				adapter);
+		blogic_dealloc_ccb(ccb, 0);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+	ccb->sensedata = sense_buf;
 	command->scsi_done = comp_cb;
 	if (blogic_multimaster_type(adapter)) {
 		/*
@@ -3203,7 +3213,7 @@
 			if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
 						ccb)) {
 				blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
-				blogic_dealloc_ccb(ccb);
+				blogic_dealloc_ccb(ccb, 1);
 				command->result = DID_ERROR << 16;
 				command->scsi_done(command);
 			}
@@ -3337,7 +3347,7 @@
 
 	for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all)
 		if (ccb->status == BLOGIC_CCB_ACTIVE)
-			blogic_dealloc_ccb(ccb);
+			blogic_dealloc_ccb(ccb, 1);
 	/*
 	 * Wait a few seconds between the Host Adapter Hard Reset which
 	 * initiates a SCSI Bus Reset and issuing any SCSI Commands.  Some
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index c67e401..d814588 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2511,8 +2511,8 @@
 	struct asc_board *boardp = shost_priv(s);
 
 	printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
-	printk(" host_busy %u, host_no %d, last_reset %d,\n",
-	       s->host_busy, s->host_no, (unsigned)s->last_reset);
+	printk(" host_busy %u, host_no %d,\n",
+	       s->host_busy, s->host_no);
 
 	printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
 	       (ulong)s->base, (ulong)s->io_port, boardp->irq);
@@ -3345,8 +3345,8 @@
 		shost->host_no);
 
 	seq_printf(m,
-		   " host_busy %u, last_reset %lu, max_id %u, max_lun %u, max_channel %u\n",
-		   shost->host_busy, shost->last_reset, shost->max_id,
+		   " host_busy %u, max_id %u, max_lun %u, max_channel %u\n",
+		   shost->host_busy, shost->max_id,
 		   shost->max_lun, shost->max_channel);
 
 	seq_printf(m,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 33c52bc..97fd450 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1035,7 +1035,6 @@
 	pci_release_regions(pdev);
 	scsi_host_put(host);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static void arcmsr_shutdown(struct pci_dev *pdev)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 15a629d..a795d81 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3144,8 +3144,6 @@
 	atp870u_free_tables(pshost);
 	printk(KERN_INFO "scsi_host_put : %p\n",pshost);
 	scsi_host_put(pshost);
-	printk(KERN_INFO "pci_set_drvdata : %p\n",pdev);
-	pci_set_drvdata(pdev, NULL);	
 }
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 777e7c0..2e28f6c 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -128,7 +128,7 @@
 
 #define PAGE_SHIFT_4K 12
 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
-#define mcc_timeout		120000 /* 5s timeout */
+#define mcc_timeout		120000 /* 12s timeout */
 
 /* Returns number of pages spanned by the data starting at the given addr */
 #define PAGES_4K_SPANNED(_address, size)				\
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index e66aa7c..3338391 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -17,9 +17,9 @@
 
 #include <scsi/iscsi_proto.h>
 
+#include "be_main.h"
 #include "be.h"
 #include "be_mgmt.h"
-#include "be_main.h"
 
 int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
 {
@@ -158,8 +158,10 @@
 	struct be_cmd_resp_hdr *ioctl_resp_hdr;
 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
-	if (beiscsi_error(phba))
+	if (beiscsi_error(phba)) {
+		free_mcc_tag(&phba->ctrl, tag);
 		return -EIO;
+	}
 
 	/* wait for the mccq completion */
 	rc = wait_event_interruptible_timeout(
@@ -173,7 +175,11 @@
 			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 			    BEISCSI_LOG_CONFIG,
 			    "BC_%d : MBX Cmd Completion timed out\n");
-		rc = -EAGAIN;
+		rc = -EBUSY;
+
+		/* decrement the mccq used count */
+		atomic_dec(&phba->ctrl.mcc_obj.q.used);
+
 		goto release_mcc_tag;
 	} else
 		rc = 0;
@@ -208,10 +214,18 @@
 
 		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
 			ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
-			if (ioctl_resp_hdr->response_length)
-				goto release_mcc_tag;
+			beiscsi_log(phba, KERN_WARNING,
+				    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+				    BEISCSI_LOG_CONFIG,
+				    "BC_%d : Insufficent Buffer Error "
+				    "Resp_Len : %d Actual_Resp_Len : %d\n",
+				    ioctl_resp_hdr->response_length,
+				    ioctl_resp_hdr->actual_resp_len);
+
+			rc = -EAGAIN;
+			goto release_mcc_tag;
 		}
-		rc = -EAGAIN;
+		rc = -EIO;
 	}
 
 release_mcc_tag:
@@ -363,7 +377,7 @@
 	} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
 		    ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
 		     (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
-		phba->state = BE_ADAPTER_UP;
+		phba->state = BE_ADAPTER_LINK_UP;
 
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -486,33 +500,47 @@
  **/
 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 {
+#define BEISCSI_MBX_RDY_BIT_TIMEOUT	4000	/* 4sec */
 	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-	uint32_t wait = 0;
+	unsigned long timeout;
+	bool read_flag = false;
+	int ret = 0, i;
 	u32 ready;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
+
+	if (beiscsi_error(phba))
+		return -EIO;
+
+	timeout = jiffies + (HZ * 110);
 
 	do {
-
-		if (beiscsi_error(phba))
-			return -EIO;
-
-		ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
-		if (ready)
-			break;
-
-		if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
-			beiscsi_log(phba, KERN_ERR,
-				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-				    "BC_%d : FW Timed Out\n");
-			phba->fw_timeout = true;
-			beiscsi_ue_detect(phba);
-			return -EBUSY;
+		for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
+			ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+			if (ready) {
+				read_flag = true;
+				break;
+			}
+			mdelay(1);
 		}
 
-		mdelay(1);
-		wait++;
-	} while (true);
-	return 0;
+		if (!read_flag) {
+			wait_event_timeout(rdybit_check_q,
+					  (read_flag != true),
+					   HZ * 5);
+		}
+	} while ((time_before(jiffies, timeout)) && !read_flag);
+
+	if (!read_flag) {
+		beiscsi_log(phba, KERN_ERR,
+			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+			    "BC_%d : FW Timed Out\n");
+			phba->fw_timeout = true;
+			beiscsi_ue_detect(phba);
+			ret = -EBUSY;
+	}
+
+	return ret;
 }
 
 /*
@@ -699,7 +727,7 @@
 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 	struct be_mcc_wrb *wrb;
 
-	BUG_ON(atomic_read(&mccq->used) >= mccq->len);
+	WARN_ON(atomic_read(&mccq->used) >= mccq->len);
 	wrb = queue_head_node(mccq);
 	memset(wrb, 0, sizeof(*wrb));
 	wrb->tag0 = (mccq->head & 0x000000FF) << 16;
@@ -1009,10 +1037,29 @@
 	return status;
 }
 
+/**
+ * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
+ * @ctrl: ptr to ctrl_info
+ * @cq: Completion Queue
+ * @dq: Default Queue
+ * @lenght: ring size
+ * @entry_size: size of each entry in DEFQ
+ * @is_header: Header or Data DEFQ
+ * @ulp_num: Bind to which ULP
+ *
+ * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
+ * on this queue by the FW
+ *
+ * return
+ *	Success: 0
+ *	Failure: Non-Zero Value
+ *
+ **/
 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
 				    struct be_queue_info *cq,
 				    struct be_queue_info *dq, int length,
-				    int entry_size)
+				    int entry_size, uint8_t is_header,
+				    uint8_t ulp_num)
 {
 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 	struct be_defq_create_req *req = embedded_payload(wrb);
@@ -1030,6 +1077,11 @@
 			   OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
 
 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+	if (phba->fw_config.dual_ulp_aware) {
+		req->ulp_num = ulp_num;
+		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+	}
 
 	if (is_chip_be2_be3r(phba)) {
 		AMAP_SET_BITS(struct amap_be_default_pdu_context,
@@ -1067,22 +1119,53 @@
 
 	status = be_mbox_notify(ctrl);
 	if (!status) {
+		struct be_ring *defq_ring;
 		struct be_defq_create_resp *resp = embedded_payload(wrb);
 
 		dq->id = le16_to_cpu(resp->id);
 		dq->created = true;
+		if (is_header)
+			defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
+		else
+			defq_ring = &phba->phwi_ctrlr->
+				    default_pdu_data[ulp_num];
+
+		defq_ring->id = dq->id;
+
+		if (!phba->fw_config.dual_ulp_aware) {
+			defq_ring->ulp_num = BEISCSI_ULP0;
+			defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
+		} else {
+			defq_ring->ulp_num = resp->ulp_num;
+			defq_ring->doorbell_offset = resp->doorbell_offset;
+		}
 	}
 	spin_unlock(&ctrl->mbox_lock);
 
 	return status;
 }
 
-int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
-		       struct be_queue_info *wrbq)
+/**
+ * be_cmd_wrbq_create()- Create WRBQ
+ * @ctrl: ptr to ctrl_info
+ * @q_mem: memory details for the queue
+ * @wrbq: queue info
+ * @pwrb_context: ptr to wrb_context
+ * @ulp_num: ULP on which the WRBQ is to be created
+ *
+ * Create WRBQ on the passed ULP_NUM.
+ *
+ **/
+int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
+			struct be_dma_mem *q_mem,
+			struct be_queue_info *wrbq,
+			struct hwi_wrb_context *pwrb_context,
+			uint8_t ulp_num)
 {
 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 	struct be_wrbq_create_req *req = embedded_payload(wrb);
 	struct be_wrbq_create_resp *resp = embedded_payload(wrb);
+	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 	int status;
 
 	spin_lock(&ctrl->mbox_lock);
@@ -1093,17 +1176,78 @@
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
 		OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+	if (phba->fw_config.dual_ulp_aware) {
+		req->ulp_num = ulp_num;
+		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+	}
+
 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 
 	status = be_mbox_notify(ctrl);
 	if (!status) {
 		wrbq->id = le16_to_cpu(resp->cid);
 		wrbq->created = true;
+
+		pwrb_context->cid = wrbq->id;
+		if (!phba->fw_config.dual_ulp_aware) {
+			pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
+			pwrb_context->ulp_num = BEISCSI_ULP0;
+		} else {
+			pwrb_context->ulp_num = resp->ulp_num;
+			pwrb_context->doorbell_offset = resp->doorbell_offset;
+		}
 	}
 	spin_unlock(&ctrl->mbox_lock);
 	return status;
 }
 
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+				    struct be_dma_mem *q_mem)
+{
+	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_post_template_pages_req *req = embedded_payload(wrb);
+	int status;
+
+	spin_lock(&ctrl->mbox_lock);
+
+	memset(wrb, 0, sizeof(*wrb));
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			   OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
+			   sizeof(*req));
+
+	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+	status = be_mbox_notify(ctrl);
+	spin_unlock(&ctrl->mbox_lock);
+	return status;
+}
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
+{
+	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_remove_template_pages_req *req = embedded_payload(wrb);
+	int status;
+
+	spin_lock(&ctrl->mbox_lock);
+
+	memset(wrb, 0, sizeof(*wrb));
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			   OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
+			   sizeof(*req));
+
+	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+
+	status = be_mbox_notify(ctrl);
+	spin_unlock(&ctrl->mbox_lock);
+	return status;
+}
+
 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
 				struct be_dma_mem *q_mem,
 				u32 page_offset, u32 num_pages)
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 9907308..627ebbe 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -40,6 +40,7 @@
 	u32 tag1;		/* dword 3 */
 	u32 rsvd;		/* dword 4 */
 	union {
+#define EMBED_MBX_MAX_PAYLOAD_SIZE  220
 		u8 embedded_payload[236];	/* used by embedded cmds */
 		struct be_sge sgl[19];	/* used by non-embedded cmds */
 	} payload;
@@ -162,6 +163,8 @@
 #define OPCODE_COMMON_CQ_CREATE				12
 #define OPCODE_COMMON_EQ_CREATE				13
 #define OPCODE_COMMON_MCC_CREATE			21
+#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS	24
+#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS	25
 #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES		32
 #define OPCODE_COMMON_GET_FW_VERSION			35
 #define OPCODE_COMMON_MODIFY_EQ_DELAY			41
@@ -217,6 +220,10 @@
 	u32 hi;
 };
 
+struct virt_addr {
+	u32 lo;
+	u32 hi;
+};
 /**************************
  * BE Command definitions *
  **************************/
@@ -722,7 +729,13 @@
 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
 				    struct be_queue_info *cq,
 				    struct be_queue_info *dq, int length,
-				    int entry_size);
+				    int entry_size, uint8_t is_header,
+				    uint8_t ulp_num);
+
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+				    struct be_dma_mem *q_mem);
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl);
 
 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
 				struct be_dma_mem *q_mem, u32 page_offset,
@@ -731,7 +744,9 @@
 int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
 
 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
-		       struct be_queue_info *wrbq);
+		       struct be_queue_info *wrbq,
+		       struct hwi_wrb_context *pwrb_context,
+		       uint8_t ulp_num);
 
 bool is_link_state_evt(u32 trailer);
 
@@ -776,7 +791,9 @@
 	struct be_cmd_req_hdr hdr;
 	u16 num_pages;
 	u8 ulp_num;
-	u8 rsvd0;
+#define BEISCSI_DUAL_ULP_AWARE_BIT	0	/* Byte 3 - Bit 0 */
+#define BEISCSI_BIND_Q_TO_ULP_BIT	1	/* Byte 3 - Bit 1 */
+	u8 dua_feature;
 	struct be_default_pdu_context context;
 	struct phys_addr pages[8];
 } __packed;
@@ -784,6 +801,27 @@
 struct be_defq_create_resp {
 	struct be_cmd_req_hdr hdr;
 	u16 id;
+	u8 rsvd0;
+	u8 ulp_num;
+	u32 doorbell_offset;
+	u16 register_set;
+	u16 doorbell_format;
+} __packed;
+
+struct be_post_template_pages_req {
+	struct be_cmd_req_hdr hdr;
+	u16 num_pages;
+#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI	0x1
+	u16 type;
+	struct phys_addr scratch_pa;
+	struct virt_addr scratch_va;
+	struct virt_addr pages_va;
+	struct phys_addr pages[16];
+} __packed;
+
+struct be_remove_template_pages_req {
+	struct be_cmd_req_hdr hdr;
+	u16 type;
 	u16 rsvd0;
 } __packed;
 
@@ -800,14 +838,18 @@
 	struct be_cmd_req_hdr hdr;
 	u16 num_pages;
 	u8 ulp_num;
-	u8 rsvd0;
+	u8 dua_feature;
 	struct phys_addr pages[8];
 } __packed;
 
 struct be_wrbq_create_resp {
 	struct be_cmd_resp_hdr resp_hdr;
 	u16 cid;
-	u16 rsvd0;
+	u8 rsvd0;
+	u8 ulp_num;
+	u32 doorbell_offset;
+	u16 register_set;
+	u16 doorbell_format;
 } __packed;
 
 #define SOL_CID_MASK		0x0000FFC0
@@ -1002,6 +1044,7 @@
 } __packed;
 
 struct be_ulp_fw_cfg {
+#define BEISCSI_ULP_ISCSI_INI_MODE	0x10
 	u32 ulp_mode;
 	u32 etx_base;
 	u32 etx_count;
@@ -1017,14 +1060,26 @@
 	u32 icd_count;
 };
 
+struct be_ulp_chain_icd {
+	u32 chain_base;
+	u32 chain_count;
+};
+
 struct be_fw_cfg {
 	struct be_cmd_req_hdr hdr;
 	u32 be_config_number;
 	u32 asic_revision;
 	u32 phys_port;
+#define BEISCSI_FUNC_ISCSI_INI_MODE	0x10
+#define BEISCSI_FUNC_DUA_MODE	0x800
 	u32 function_mode;
 	struct be_ulp_fw_cfg ulp[2];
 	u32 function_caps;
+	u32 cqid_base;
+	u32 cqid_count;
+	u32 eqid_base;
+	u32 eqid_count;
+	struct be_ulp_chain_icd chain_icd[2];
 } __packed;
 
 struct be_cmd_get_all_if_id_req {
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index ef36be00..ffadbee 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -58,10 +58,15 @@
 	}
 	beiscsi_ep = ep->dd_data;
 	phba = beiscsi_ep->phba;
-	shost = phba->shost;
 
-	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-		    "BS_%d : In beiscsi_session_create\n");
+	if (phba->state & BE_ADAPTER_PCI_ERR) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+			    "BS_%d : PCI_ERROR Recovery\n");
+		return NULL;
+	} else {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BS_%d : In beiscsi_session_create\n");
+	}
 
 	if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -74,6 +79,7 @@
 		cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
 	}
 
+	shost = phba->shost;
 	cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
 					  shost, cmds_max,
 					  sizeof(*beiscsi_sess),
@@ -194,6 +200,8 @@
 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr;
+	struct hwi_wrb_context *pwrb_context;
 	struct beiscsi_endpoint *beiscsi_ep;
 	struct iscsi_endpoint *ep;
 
@@ -214,9 +222,13 @@
 		return -EEXIST;
 	}
 
+	pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
+						beiscsi_ep->ep_cid)];
+
 	beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
 	beiscsi_conn->ep = beiscsi_ep;
 	beiscsi_ep->conn = beiscsi_conn;
+	beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
 
 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
 		    "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
@@ -265,13 +277,17 @@
 
 void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
 {
-	struct be_cmd_get_if_info_resp if_info;
+	struct be_cmd_get_if_info_resp *if_info;
 
-	if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info))
+	if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
 		beiscsi_create_ipv4_iface(phba);
+		kfree(if_info);
+	}
 
-	if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info))
+	if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
 		beiscsi_create_ipv6_iface(phba);
+		kfree(if_info);
+	}
 }
 
 void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
@@ -467,6 +483,12 @@
 	uint32_t rm_len = dt_len;
 	int ret = 0 ;
 
+	if (phba->state & BE_ADAPTER_PCI_ERR) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+			    "BS_%d : In PCI_ERROR Recovery\n");
+		return -EBUSY;
+	}
+
 	nla_for_each_attr(attrib, data, dt_len, rm_len) {
 		iface_param = nla_data(attrib);
 
@@ -512,59 +534,60 @@
 		struct iscsi_iface *iface, int param,
 		char *buf)
 {
-	struct be_cmd_get_if_info_resp if_info;
+	struct be_cmd_get_if_info_resp *if_info;
 	int len, ip_type = BE2_IPV4;
 
-	memset(&if_info, 0, sizeof(if_info));
-
 	if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
 		ip_type = BE2_IPV6;
 
 	len = mgmt_get_if_info(phba, ip_type, &if_info);
-	if (len)
+	if (len) {
+		kfree(if_info);
 		return len;
+	}
 
 	switch (param) {
 	case ISCSI_NET_PARAM_IPV4_ADDR:
-		len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr);
+		len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr);
 		break;
 	case ISCSI_NET_PARAM_IPV6_ADDR:
-		len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr);
+		len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr);
 		break;
 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
-		if (!if_info.dhcp_state)
+		if (!if_info->dhcp_state)
 			len = sprintf(buf, "static\n");
 		else
 			len = sprintf(buf, "dhcp\n");
 		break;
 	case ISCSI_NET_PARAM_IPV4_SUBNET:
-		len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
+		len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask);
 		break;
 	case ISCSI_NET_PARAM_VLAN_ENABLED:
 		len = sprintf(buf, "%s\n",
-			     (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+			     (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
 			     ? "Disabled\n" : "Enabled\n");
 		break;
 	case ISCSI_NET_PARAM_VLAN_ID:
-		if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+		if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
 			return -EINVAL;
 		else
 			len = sprintf(buf, "%d\n",
-				     (if_info.vlan_priority &
+				     (if_info->vlan_priority &
 				     ISCSI_MAX_VLAN_ID));
 		break;
 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
-		if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+		if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
 			return -EINVAL;
 		else
 			len = sprintf(buf, "%d\n",
-				     ((if_info.vlan_priority >> 13) &
+				     ((if_info->vlan_priority >> 13) &
 				     ISCSI_MAX_VLAN_PRIORITY));
 		break;
 	default:
 		WARN_ON(1);
 	}
 
+	kfree(if_info);
 	return len;
 }
 
@@ -577,6 +600,12 @@
 	struct be_cmd_get_def_gateway_resp gateway;
 	int len = -ENOSYS;
 
+	if (phba->state & BE_ADAPTER_PCI_ERR) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+			    "BS_%d : In PCI_ERROR Recovery\n");
+		return -EBUSY;
+	}
+
 	switch (param) {
 	case ISCSI_NET_PARAM_IPV4_ADDR:
 	case ISCSI_NET_PARAM_IPV4_SUBNET:
@@ -672,8 +701,7 @@
 			session->max_burst = 262144;
 		break;
 	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
-		if ((conn->max_xmit_dlength > 65536) ||
-		    (conn->max_xmit_dlength == 0))
+		if (conn->max_xmit_dlength > 65536)
 			conn->max_xmit_dlength = 65536;
 	default:
 		return 0;
@@ -727,7 +755,7 @@
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct iscsi_cls_host *ihost = shost->shost_data;
 
-	ihost->port_state = (phba->state == BE_ADAPTER_UP) ?
+	ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
 		ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
 }
 
@@ -795,9 +823,16 @@
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	int status = 0;
 
-	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-		    "BS_%d : In beiscsi_get_host_param,"
-		    " param= %d\n", param);
+
+	if (phba->state & BE_ADAPTER_PCI_ERR) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+			    "BS_%d : In PCI_ERROR Recovery\n");
+		return -EBUSY;
+	} else {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BS_%d : In beiscsi_get_host_param,"
+			    " param = %d\n", param);
+	}
 
 	switch (param) {
 	case ISCSI_HOST_PARAM_HWADDRESS:
@@ -840,7 +875,7 @@
 	struct be_cmd_get_nic_conf_resp resp;
 	int rc;
 
-	if (strlen(phba->mac_address))
+	if (phba->mac_addr_set)
 		return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
 
 	memset(&resp, 0, sizeof(resp));
@@ -848,6 +883,7 @@
 	if (rc)
 		return rc;
 
+	phba->mac_addr_set = true;
 	memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
 	return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
 }
@@ -923,6 +959,10 @@
 		      session->max_r2t);
 	AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
 		      (conn->exp_statsn - 1));
+	AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+		      max_recv_data_segment_length, params,
+		      conn->max_recv_dlength);
+
 }
 
 /**
@@ -935,10 +975,19 @@
 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
 	struct beiscsi_endpoint *beiscsi_ep;
 	struct beiscsi_offload_params params;
+	struct beiscsi_hba *phba;
 
-	beiscsi_log(beiscsi_conn->phba, KERN_INFO,
-		    BEISCSI_LOG_CONFIG,
-		    "BS_%d : In beiscsi_conn_start\n");
+	phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+
+	if (phba->state & BE_ADAPTER_PCI_ERR) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+			    "BS_%d : In PCI_ERROR Recovery\n");
+		return -EBUSY;
+	} else {
+		beiscsi_log(beiscsi_conn->phba, KERN_INFO,
+			    BEISCSI_LOG_CONFIG,
+			    "BS_%d : In beiscsi_conn_start\n");
+	}
 
 	memset(&params, 0, sizeof(struct beiscsi_offload_params));
 	beiscsi_ep = beiscsi_conn->ep;
@@ -960,15 +1009,31 @@
  */
 static int beiscsi_get_cid(struct beiscsi_hba *phba)
 {
-	unsigned short cid = 0xFFFF;
+	unsigned short cid = 0xFFFF, cid_from_ulp;
+	struct ulp_cid_info *cid_info = NULL;
+	uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
 
-	if (!phba->avlbl_cids)
-		return cid;
+	/* Find the ULP which has more CID available */
+	cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
+			  BEISCSI_ULP0_AVLBL_CID(phba) : 0;
+	cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ?
+			  BEISCSI_ULP1_AVLBL_CID(phba) : 0;
+	cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
+			BEISCSI_ULP0 : BEISCSI_ULP1;
 
-	cid = phba->cid_array[phba->cid_alloc++];
-	if (phba->cid_alloc == phba->params.cxns_per_ctrl)
-		phba->cid_alloc = 0;
-	phba->avlbl_cids--;
+	if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
+		cid_info = phba->cid_array_info[cid_from_ulp];
+		if (!cid_info->avlbl_cids)
+			return cid;
+
+		cid = cid_info->cid_array[cid_info->cid_alloc++];
+
+		if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
+					   phba, cid_from_ulp))
+			cid_info->cid_alloc = 0;
+
+		cid_info->avlbl_cids--;
+	}
 	return cid;
 }
 
@@ -979,10 +1044,22 @@
  */
 static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
 {
-	phba->avlbl_cids++;
-	phba->cid_array[phba->cid_free++] = cid;
-	if (phba->cid_free == phba->params.cxns_per_ctrl)
-		phba->cid_free = 0;
+	uint16_t cid_post_ulp;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_wrb_context *pwrb_context;
+	struct ulp_cid_info *cid_info = NULL;
+	uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+	phwi_ctrlr = phba->phwi_ctrlr;
+	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+	cid_post_ulp = pwrb_context->ulp_num;
+
+	cid_info = phba->cid_array_info[cid_post_ulp];
+	cid_info->avlbl_cids++;
+
+	cid_info->cid_array[cid_info->cid_free++] = cid;
+	if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
+		cid_info->cid_free = 0;
 }
 
 /**
@@ -1135,7 +1212,12 @@
 		return ERR_PTR(ret);
 	}
 
-	if (phba->state != BE_ADAPTER_UP) {
+	if (phba->state & BE_ADAPTER_PCI_ERR) {
+		ret = -EBUSY;
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+			    "BS_%d : In PCI_ERROR Recovery\n");
+		return ERR_PTR(ret);
+	} else if (phba->state & BE_ADAPTER_LINK_DOWN) {
 		ret = -EBUSY;
 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
 			    "BS_%d : The Adapter Port state is Down!!!\n");
@@ -1260,6 +1342,12 @@
 		tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
 	}
 
+	if (phba->state & BE_ADAPTER_PCI_ERR) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+			    "BS_%d : PCI_ERROR Recovery\n");
+		goto free_ep;
+	}
+
 	tag = mgmt_invalidate_connection(phba, beiscsi_ep,
 					  beiscsi_ep->ep_cid,
 					  mgmt_invalidate_flag,
@@ -1272,6 +1360,7 @@
 
 	beiscsi_mccq_compl(phba, tag, NULL, NULL);
 	beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
+free_ep:
 	beiscsi_free_ep(beiscsi_ep);
 	beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
 	iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index a1f5ac7..1f37505 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -149,18 +149,25 @@
 		"\t\t\t\tMiscellaneous Events	: 0x04\n"
 		"\t\t\t\tError Handling		: 0x08\n"
 		"\t\t\t\tIO Path Events		: 0x10\n"
-		"\t\t\t\tConfiguration Path	: 0x20\n");
+		"\t\t\t\tConfiguration Path	: 0x20\n"
+		"\t\t\t\tiSCSI Protocol		: 0x40\n");
 
 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
-DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
+DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
+DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
+	     beiscsi_active_session_disp, NULL);
+DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
+	     beiscsi_free_session_disp, NULL);
 struct device_attribute *beiscsi_attrs[] = {
 	&dev_attr_beiscsi_log_enable,
 	&dev_attr_beiscsi_drvr_ver,
 	&dev_attr_beiscsi_adapter_family,
 	&dev_attr_beiscsi_fw_ver,
-	&dev_attr_beiscsi_active_cid_count,
+	&dev_attr_beiscsi_active_session_count,
+	&dev_attr_beiscsi_free_session_count,
+	&dev_attr_beiscsi_phys_port,
 	NULL,
 };
 
@@ -239,6 +246,11 @@
 		return SUCCESS;
 	}
 	spin_unlock_bh(&session->lock);
+	/* Invalidate WRB Posted for this Task */
+	AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+		      aborted_io_task->pwrb_handle->pwrb,
+		      1);
+
 	conn = aborted_task->conn;
 	beiscsi_conn = conn->dd_data;
 	phba = beiscsi_conn->phba;
@@ -316,6 +328,11 @@
 		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
 			continue;
 
+		/* Invalidate WRB Posted for this Task */
+		AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+			      abrt_io_task->pwrb_handle->pwrb,
+			      1);
+
 		inv_tbl->cid = cid;
 		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
 		num_invalidate++;
@@ -699,30 +716,85 @@
 	return status;
 }
 
+/**
+ * beiscsi_get_params()- Set the config paramters
+ * @phba: ptr  device priv structure
+ **/
 static void beiscsi_get_params(struct beiscsi_hba *phba)
 {
-	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
-				    - (phba->fw_config.iscsi_cid_count
-				    + BE2_TMFS
-				    + BE2_NOPOUT_REQ));
-	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
-	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
-	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
+	uint32_t total_cid_count = 0;
+	uint32_t total_icd_count = 0;
+	uint8_t ulp_num = 0;
+
+	total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+			  BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		uint32_t align_mask = 0;
+		uint32_t icd_post_per_page = 0;
+		uint32_t icd_count_unavailable = 0;
+		uint32_t icd_start = 0, icd_count = 0;
+		uint32_t icd_start_align = 0, icd_count_align = 0;
+
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+			icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+			icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+
+			/* Get ICD count that can be posted on each page */
+			icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
+					     sizeof(struct iscsi_sge)));
+			align_mask = (icd_post_per_page - 1);
+
+			/* Check if icd_start is aligned ICD per page posting */
+			if (icd_start % icd_post_per_page) {
+				icd_start_align = ((icd_start +
+						    icd_post_per_page) &
+						    ~(align_mask));
+				phba->fw_config.
+					iscsi_icd_start[ulp_num] =
+					icd_start_align;
+			}
+
+			icd_count_align = (icd_count & ~align_mask);
+
+			/* ICD discarded in the process of alignment */
+			if (icd_start_align)
+				icd_count_unavailable = ((icd_start_align -
+							  icd_start) +
+							 (icd_count -
+							  icd_count_align));
+
+			/* Updated ICD count available */
+			phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
+					icd_count_unavailable);
+
+			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+					"BM_%d : Aligned ICD values\n"
+					"\t ICD Start : %d\n"
+					"\t ICD Count : %d\n"
+					"\t ICD Discarded : %d\n",
+					phba->fw_config.
+					iscsi_icd_start[ulp_num],
+					phba->fw_config.
+					iscsi_icd_count[ulp_num],
+					icd_count_unavailable);
+			break;
+		}
+	}
+
+	total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+	phba->params.ios_per_ctrl = (total_icd_count -
+				    (total_cid_count +
+				     BE2_TMFS + BE2_NOPOUT_REQ));
+	phba->params.cxns_per_ctrl = total_cid_count;
+	phba->params.asyncpdus_per_ctrl = total_cid_count;
+	phba->params.icds_per_ctrl = total_icd_count;
 	phba->params.num_sge_per_io = BE2_SGE;
 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
 	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
 	phba->params.eq_timer = 64;
-	phba->params.num_eq_entries =
-	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
-				    + BE2_TMFS) / 512) + 1) * 512;
-	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
-				? 1024 : phba->params.num_eq_entries;
-	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-		    "BM_%d : phba->params.num_eq_entries=%d\n",
-		    phba->params.num_eq_entries);
-	phba->params.num_cq_entries =
-	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
-				    + BE2_TMFS) / 512) + 1) * 512;
+	phba->params.num_eq_entries = 1024;
+	phba->params.num_cq_entries = 1024;
 	phba->params.wrbs_per_cxn = 256;
 }
 
@@ -1613,8 +1685,8 @@
 
 	WARN_ON(!pasync_handle);
 
-	pasync_handle->cri =
-			BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
+	pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
+			     beiscsi_conn->beiscsi_conn_cid);
 	pasync_handle->is_header = is_header;
 	pasync_handle->buffer_len = dpl;
 	*pcq_index = index;
@@ -1674,18 +1746,13 @@
 }
 
 static void hwi_free_async_msg(struct beiscsi_hba *phba,
-				       unsigned int cri)
+			       struct hwi_async_pdu_context *pasync_ctx,
+			       unsigned int cri)
 {
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_async_pdu_context *pasync_ctx;
 	struct async_pdu_handle *pasync_handle, *tmp_handle;
 	struct list_head *plist;
 
-	phwi_ctrlr = phba->phwi_ctrlr;
-	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
-
 	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
-
 	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
 		list_del(&pasync_handle->link);
 
@@ -1720,7 +1787,7 @@
 }
 
 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
-				   unsigned int is_header)
+				    unsigned int is_header, uint8_t ulp_num)
 {
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_async_pdu_context *pasync_ctx;
@@ -1728,13 +1795,13 @@
 	struct list_head *pfree_link, *pbusy_list;
 	struct phys_addr *pasync_sge;
 	unsigned int ring_id, num_entries;
-	unsigned int host_write_num;
+	unsigned int host_write_num, doorbell_offset;
 	unsigned int writables;
 	unsigned int i = 0;
 	u32 doorbell = 0;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
 	num_entries = pasync_ctx->num_entries;
 
 	if (is_header) {
@@ -1742,13 +1809,17 @@
 				pasync_ctx->async_header.free_entries);
 		pfree_link = pasync_ctx->async_header.free_list.next;
 		host_write_num = pasync_ctx->async_header.host_write_ptr;
-		ring_id = phwi_ctrlr->default_pdu_hdr.id;
+		ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
+		doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
+				  doorbell_offset;
 	} else {
 		writables = min(pasync_ctx->async_data.writables,
 				pasync_ctx->async_data.free_entries);
 		pfree_link = pasync_ctx->async_data.free_list.next;
 		host_write_num = pasync_ctx->async_data.host_write_ptr;
-		ring_id = phwi_ctrlr->default_pdu_data.id;
+		ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
+		doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
+				  doorbell_offset;
 	}
 
 	writables = (writables / 8) * 8;
@@ -1796,7 +1867,7 @@
 		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
 					<< DB_DEF_PDU_CQPROC_SHIFT;
 
-		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
+		iowrite32(doorbell, phba->db_va + doorbell_offset);
 	}
 }
 
@@ -1808,9 +1879,13 @@
 	struct hwi_async_pdu_context *pasync_ctx;
 	struct async_pdu_handle *pasync_handle = NULL;
 	unsigned int cq_index = -1;
+	uint16_t cri_index = BE_GET_CRI_FROM_CID(
+			     beiscsi_conn->beiscsi_conn_cid);
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+		     cri_index));
 
 	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
 					     pdpdu_cqe, &cq_index);
@@ -1819,8 +1894,10 @@
 		hwi_update_async_writables(phba, pasync_ctx,
 					   pasync_handle->is_header, cq_index);
 
-	hwi_free_async_msg(phba, pasync_handle->cri);
-	hwi_post_async_buffers(phba, pasync_handle->is_header);
+	hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
+	hwi_post_async_buffers(phba, pasync_handle->is_header,
+			       BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+			       cri_index));
 }
 
 static unsigned int
@@ -1859,7 +1936,7 @@
 					    phdr, hdr_len, pfirst_buffer,
 					    offset);
 
-	hwi_free_async_msg(phba, cri);
+	hwi_free_async_msg(phba, pasync_ctx, cri);
 	return 0;
 }
 
@@ -1875,13 +1952,16 @@
 	struct pdu_base *ppdu;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+		     BE_GET_CRI_FROM_CID(beiscsi_conn->
+				 beiscsi_conn_cid)));
 
 	list_del(&pasync_handle->link);
 	if (pasync_handle->is_header) {
 		pasync_ctx->async_header.busy_entries--;
 		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
-			hwi_free_async_msg(phba, cri);
+			hwi_free_async_msg(phba, pasync_ctx, cri);
 			BUG();
 		}
 
@@ -1936,9 +2016,14 @@
 	struct hwi_async_pdu_context *pasync_ctx;
 	struct async_pdu_handle *pasync_handle = NULL;
 	unsigned int cq_index = -1;
+	uint16_t cri_index = BE_GET_CRI_FROM_CID(
+			     beiscsi_conn->beiscsi_conn_cid);
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+		     cri_index));
+
 	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
 					     pdpdu_cqe, &cq_index);
 
@@ -1947,7 +2032,9 @@
 					   pasync_handle->is_header, cq_index);
 
 	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
-	hwi_post_async_buffers(phba, pasync_handle->is_header);
+	hwi_post_async_buffers(phba, pasync_handle->is_header,
+			       BEISCSI_GET_ULP_FROM_CRI(
+			       phwi_ctrlr, cri_index));
 }
 
 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
@@ -2072,8 +2159,10 @@
 				    "BM_%d : Received %s[%d] on CID : %d\n",
 				    cqe_desc[code], code, cid);
 
+			spin_lock_bh(&phba->async_pdu_lock);
 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
 					     (struct i_t_dpdu_cqe *)sol);
+			spin_unlock_bh(&phba->async_pdu_lock);
 			break;
 		case UNSOL_DATA_NOTIFY:
 			beiscsi_log(phba, KERN_INFO,
@@ -2081,8 +2170,10 @@
 				    "BM_%d : Received %s[%d] on CID : %d\n",
 				    cqe_desc[code], code, cid);
 
+			spin_lock_bh(&phba->async_pdu_lock);
 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
 					     (struct i_t_dpdu_cqe *)sol);
+			spin_unlock_bh(&phba->async_pdu_lock);
 			break;
 		case CXN_INVALIDATE_INDEX_NOTIFY:
 		case CMD_INVALIDATED_NOTIFY:
@@ -2110,8 +2201,10 @@
 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
 				    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
 				    cqe_desc[code], code, cid);
+			spin_lock_bh(&phba->async_pdu_lock);
 			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
 					     (struct i_t_dpdu_cqe *) sol);
+			spin_unlock_bh(&phba->async_pdu_lock);
 			break;
 		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
 		case CXN_KILLED_BURST_LEN_MISMATCH:
@@ -2476,26 +2569,19 @@
 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
 }
 
+/**
+ * beiscsi_find_mem_req()- Find mem needed
+ * @phba: ptr to HBA struct
+ **/
 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 {
+	uint8_t mem_descr_index, ulp_num;
 	unsigned int num_cq_pages, num_async_pdu_buf_pages;
 	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
 	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
 
 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
 				      sizeof(struct sol_cqe));
-	num_async_pdu_buf_pages =
-			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
-				       phba->params.defpdu_hdr_sz);
-	num_async_pdu_buf_sgl_pages =
-			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
-				       sizeof(struct phys_addr));
-	num_async_pdu_data_pages =
-			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
-				       phba->params.defpdu_data_sz);
-	num_async_pdu_data_sgl_pages =
-			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
-				       sizeof(struct phys_addr));
 
 	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
 
@@ -2517,24 +2603,79 @@
 		phba->params.icds_per_ctrl;
 	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
 		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 
-	phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
-		num_async_pdu_buf_pages * PAGE_SIZE;
-	phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
-		num_async_pdu_data_pages * PAGE_SIZE;
-	phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
-		num_async_pdu_buf_sgl_pages * PAGE_SIZE;
-	phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
-		num_async_pdu_data_sgl_pages * PAGE_SIZE;
-	phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
-		phba->params.asyncpdus_per_ctrl *
-		sizeof(struct async_pdu_handle);
-	phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
-		phba->params.asyncpdus_per_ctrl *
-		sizeof(struct async_pdu_handle);
-	phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
-		sizeof(struct hwi_async_pdu_context) +
-		(phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
+			num_async_pdu_buf_sgl_pages =
+				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+					       phba, ulp_num) *
+					       sizeof(struct phys_addr));
+
+			num_async_pdu_buf_pages =
+				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+					       phba, ulp_num) *
+					       phba->params.defpdu_hdr_sz);
+
+			num_async_pdu_data_pages =
+				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+					       phba, ulp_num) *
+					       phba->params.defpdu_data_sz);
+
+			num_async_pdu_data_sgl_pages =
+				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+					       phba, ulp_num) *
+					       sizeof(struct phys_addr));
+
+			mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
+					  (ulp_num * MEM_DESCR_OFFSET));
+			phba->mem_req[mem_descr_index] =
+					BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+					BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
+
+			mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+					  (ulp_num * MEM_DESCR_OFFSET));
+			phba->mem_req[mem_descr_index] =
+					  num_async_pdu_buf_pages *
+					  PAGE_SIZE;
+
+			mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+					  (ulp_num * MEM_DESCR_OFFSET));
+			phba->mem_req[mem_descr_index] =
+					  num_async_pdu_data_pages *
+					  PAGE_SIZE;
+
+			mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+					  (ulp_num * MEM_DESCR_OFFSET));
+			phba->mem_req[mem_descr_index] =
+					  num_async_pdu_buf_sgl_pages *
+					  PAGE_SIZE;
+
+			mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
+					  (ulp_num * MEM_DESCR_OFFSET));
+			phba->mem_req[mem_descr_index] =
+					  num_async_pdu_data_sgl_pages *
+					  PAGE_SIZE;
+
+			mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+					  (ulp_num * MEM_DESCR_OFFSET));
+			phba->mem_req[mem_descr_index] =
+					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+					  sizeof(struct async_pdu_handle);
+
+			mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+					  (ulp_num * MEM_DESCR_OFFSET));
+			phba->mem_req[mem_descr_index] =
+					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+					  sizeof(struct async_pdu_handle);
+
+			mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+					  (ulp_num * MEM_DESCR_OFFSET));
+			phba->mem_req[mem_descr_index] =
+					  sizeof(struct hwi_async_pdu_context) +
+					 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+					  sizeof(struct hwi_async_entry));
+		}
+	}
 }
 
 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
@@ -2576,6 +2717,12 @@
 
 	mem_descr = phba->init_mem;
 	for (i = 0; i < SE_MEM_MAX; i++) {
+		if (!phba->mem_req[i]) {
+			mem_descr->mem_array = NULL;
+			mem_descr++;
+			continue;
+		}
+
 		j = 0;
 		mem_arr = mem_arr_orig;
 		alloc_size = phba->mem_req[i];
@@ -2697,7 +2844,7 @@
 	/* Allocate memory for WRBQ */
 	phwi_ctxt = phwi_ctrlr->phwi_ctxt;
 	phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
-				     phba->fw_config.iscsi_cid_count,
+				     phba->params.cxns_per_ctrl,
 				     GFP_KERNEL);
 	if (!phwi_ctxt->be_wrbq) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -2779,6 +2926,7 @@
 
 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 {
+	uint8_t ulp_num;
 	struct hwi_controller *phwi_ctrlr;
 	struct hba_parameters *p = &phba->params;
 	struct hwi_async_pdu_context *pasync_ctx;
@@ -2786,155 +2934,150 @@
 	unsigned int index, idx, num_per_mem, num_async_data;
 	struct be_mem_descriptor *mem_descr;
 
-	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 
-	phwi_ctrlr = phba->phwi_ctrlr;
-	phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
+			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+			mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+				     (ulp_num * MEM_DESCR_OFFSET));
+
+			phwi_ctrlr = phba->phwi_ctrlr;
+			phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
+				(struct hwi_async_pdu_context *)
+				 mem_descr->mem_array[0].virtual_address;
+
+			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+			memset(pasync_ctx, 0, sizeof(*pasync_ctx));
+
+			pasync_ctx->async_entry =
+					(struct hwi_async_entry *)
+					((long unsigned int)pasync_ctx +
+					sizeof(struct hwi_async_pdu_context));
+
+			pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
+						  ulp_num);
+			pasync_ctx->buffer_size = p->defpdu_hdr_sz;
+
+			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+			mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+				(ulp_num * MEM_DESCR_OFFSET);
+			if (mem_descr->mem_array[0].virtual_address) {
+				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+					    "BM_%d : hwi_init_async_pdu_ctx"
+					    " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
+					    ulp_num,
+					    mem_descr->mem_array[0].
+					    virtual_address);
+			} else
+				beiscsi_log(phba, KERN_WARNING,
+					    BEISCSI_LOG_INIT,
+					    "BM_%d : No Virtual address for ULP : %d\n",
+					    ulp_num);
+
+			pasync_ctx->async_header.va_base =
 				mem_descr->mem_array[0].virtual_address;
-	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
-	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
-	pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
-					  phba->fw_config.iscsi_cid_count,
-					  GFP_KERNEL);
-	if (!pasync_ctx->async_entry) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
-		return -ENOMEM;
-	}
+			pasync_ctx->async_header.pa_base.u.a64.address =
+				mem_descr->mem_array[0].
+				bus_address.u.a64.address;
 
-	pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
-	pasync_ctx->buffer_size = p->defpdu_hdr_sz;
+			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+			mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+				     (ulp_num * MEM_DESCR_OFFSET);
+			if (mem_descr->mem_array[0].virtual_address) {
+				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+					    "BM_%d : hwi_init_async_pdu_ctx"
+					    " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
+					    ulp_num,
+					    mem_descr->mem_array[0].
+					    virtual_address);
+			} else
+				beiscsi_log(phba, KERN_WARNING,
+					    BEISCSI_LOG_INIT,
+					    "BM_%d : No Virtual address for ULP : %d\n",
+					    ulp_num);
 
-	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
-	if (mem_descr->mem_array[0].virtual_address) {
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BM_%d : hwi_init_async_pdu_ctx"
-			    " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
-			    mem_descr->mem_array[0].virtual_address);
-	} else
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-			    "BM_%d : No Virtual address\n");
+			pasync_ctx->async_header.ring_base =
+				mem_descr->mem_array[0].virtual_address;
 
-	pasync_ctx->async_header.va_base =
-			mem_descr->mem_array[0].virtual_address;
+			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+			mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+				     (ulp_num * MEM_DESCR_OFFSET);
+			if (mem_descr->mem_array[0].virtual_address) {
+				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+					    "BM_%d : hwi_init_async_pdu_ctx"
+					    " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
+					    ulp_num,
+					    mem_descr->mem_array[0].
+					    virtual_address);
+			} else
+				beiscsi_log(phba, KERN_WARNING,
+					    BEISCSI_LOG_INIT,
+					    "BM_%d : No Virtual address for ULP : %d\n",
+					    ulp_num);
 
-	pasync_ctx->async_header.pa_base.u.a64.address =
-			mem_descr->mem_array[0].bus_address.u.a64.address;
+			pasync_ctx->async_header.handle_base =
+				mem_descr->mem_array[0].virtual_address;
+			pasync_ctx->async_header.writables = 0;
+			INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
 
-	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
-	if (mem_descr->mem_array[0].virtual_address) {
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BM_%d : hwi_init_async_pdu_ctx"
-			    " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
-			    mem_descr->mem_array[0].virtual_address);
-	} else
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-			    "BM_%d : No Virtual address\n");
+			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+			mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+				     (ulp_num * MEM_DESCR_OFFSET);
+			if (mem_descr->mem_array[0].virtual_address) {
+				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+					    "BM_%d : hwi_init_async_pdu_ctx"
+					    " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
+					    ulp_num,
+					    mem_descr->mem_array[0].
+					    virtual_address);
+			} else
+				beiscsi_log(phba, KERN_WARNING,
+					    BEISCSI_LOG_INIT,
+					    "BM_%d : No Virtual address for ULP : %d\n",
+					    ulp_num);
 
-	pasync_ctx->async_header.ring_base =
-			mem_descr->mem_array[0].virtual_address;
+			pasync_ctx->async_data.ring_base =
+				mem_descr->mem_array[0].virtual_address;
 
-	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
-	if (mem_descr->mem_array[0].virtual_address) {
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BM_%d : hwi_init_async_pdu_ctx"
-			    " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
-			    mem_descr->mem_array[0].virtual_address);
-	} else
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-			    "BM_%d : No Virtual address\n");
+			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+			mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+				     (ulp_num * MEM_DESCR_OFFSET);
+			if (!mem_descr->mem_array[0].virtual_address)
+				beiscsi_log(phba, KERN_WARNING,
+					    BEISCSI_LOG_INIT,
+					    "BM_%d : No Virtual address for ULP : %d\n",
+					    ulp_num);
 
-	pasync_ctx->async_header.handle_base =
-			mem_descr->mem_array[0].virtual_address;
-	pasync_ctx->async_header.writables = 0;
-	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
+			pasync_ctx->async_data.handle_base =
+				mem_descr->mem_array[0].virtual_address;
+			pasync_ctx->async_data.writables = 0;
+			INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
 
+			pasync_header_h =
+				(struct async_pdu_handle *)
+				pasync_ctx->async_header.handle_base;
+			pasync_data_h =
+				(struct async_pdu_handle *)
+				pasync_ctx->async_data.handle_base;
 
-	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_DATA_RING;
-	if (mem_descr->mem_array[0].virtual_address) {
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BM_%d : hwi_init_async_pdu_ctx"
-			    " HWI_MEM_ASYNC_DATA_RING va=%p\n",
-			    mem_descr->mem_array[0].virtual_address);
-	} else
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-			    "BM_%d : No Virtual address\n");
+			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+			mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+				     (ulp_num * MEM_DESCR_OFFSET);
+			if (mem_descr->mem_array[0].virtual_address) {
+				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+					    "BM_%d : hwi_init_async_pdu_ctx"
+					    " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
+					    ulp_num,
+					    mem_descr->mem_array[0].
+					    virtual_address);
+			} else
+				beiscsi_log(phba, KERN_WARNING,
+					    BEISCSI_LOG_INIT,
+					    "BM_%d : No Virtual address for ULP : %d\n",
+					    ulp_num);
 
-	pasync_ctx->async_data.ring_base =
-			mem_descr->mem_array[0].virtual_address;
-
-	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
-	if (!mem_descr->mem_array[0].virtual_address)
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-			    "BM_%d : No Virtual address\n");
-
-	pasync_ctx->async_data.handle_base =
-			mem_descr->mem_array[0].virtual_address;
-	pasync_ctx->async_data.writables = 0;
-	INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
-
-	pasync_header_h =
-		(struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
-	pasync_data_h =
-		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
-
-	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
-	if (mem_descr->mem_array[0].virtual_address) {
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BM_%d : hwi_init_async_pdu_ctx"
-			    " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
-			    mem_descr->mem_array[0].virtual_address);
-	} else
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-			    "BM_%d : No Virtual address\n");
-
-	idx = 0;
-	pasync_ctx->async_data.va_base =
-			mem_descr->mem_array[idx].virtual_address;
-	pasync_ctx->async_data.pa_base.u.a64.address =
-			mem_descr->mem_array[idx].bus_address.u.a64.address;
-
-	num_async_data = ((mem_descr->mem_array[idx].size) /
-				phba->params.defpdu_data_sz);
-	num_per_mem = 0;
-
-	for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
-		pasync_header_h->cri = -1;
-		pasync_header_h->index = (char)index;
-		INIT_LIST_HEAD(&pasync_header_h->link);
-		pasync_header_h->pbuffer =
-			(void *)((unsigned long)
-			(pasync_ctx->async_header.va_base) +
-			(p->defpdu_hdr_sz * index));
-
-		pasync_header_h->pa.u.a64.address =
-			pasync_ctx->async_header.pa_base.u.a64.address +
-			(p->defpdu_hdr_sz * index);
-
-		list_add_tail(&pasync_header_h->link,
-				&pasync_ctx->async_header.free_list);
-		pasync_header_h++;
-		pasync_ctx->async_header.free_entries++;
-		pasync_ctx->async_header.writables++;
-
-		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
-		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
-			       header_busy_list);
-		pasync_data_h->cri = -1;
-		pasync_data_h->index = (char)index;
-		INIT_LIST_HEAD(&pasync_data_h->link);
-
-		if (!num_async_data) {
-			num_per_mem = 0;
-			idx++;
+			idx = 0;
 			pasync_ctx->async_data.va_base =
 				mem_descr->mem_array[idx].virtual_address;
 			pasync_ctx->async_data.pa_base.u.a64.address =
@@ -2943,32 +3086,83 @@
 
 			num_async_data = ((mem_descr->mem_array[idx].size) /
 					phba->params.defpdu_data_sz);
+			num_per_mem = 0;
+
+			for (index = 0;	index < BEISCSI_GET_CID_COUNT
+					(phba, ulp_num); index++) {
+				pasync_header_h->cri = -1;
+				pasync_header_h->index = (char)index;
+				INIT_LIST_HEAD(&pasync_header_h->link);
+				pasync_header_h->pbuffer =
+					(void *)((unsigned long)
+						 (pasync_ctx->
+						  async_header.va_base) +
+						 (p->defpdu_hdr_sz * index));
+
+				pasync_header_h->pa.u.a64.address =
+					pasync_ctx->async_header.pa_base.u.a64.
+					address + (p->defpdu_hdr_sz * index);
+
+				list_add_tail(&pasync_header_h->link,
+					      &pasync_ctx->async_header.
+					      free_list);
+				pasync_header_h++;
+				pasync_ctx->async_header.free_entries++;
+				pasync_ctx->async_header.writables++;
+
+				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+					       wait_queue.list);
+				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+					       header_busy_list);
+				pasync_data_h->cri = -1;
+				pasync_data_h->index = (char)index;
+				INIT_LIST_HEAD(&pasync_data_h->link);
+
+				if (!num_async_data) {
+					num_per_mem = 0;
+					idx++;
+					pasync_ctx->async_data.va_base =
+						mem_descr->mem_array[idx].
+						virtual_address;
+					pasync_ctx->async_data.pa_base.u.
+						a64.address =
+						mem_descr->mem_array[idx].
+						bus_address.u.a64.address;
+					num_async_data =
+						((mem_descr->mem_array[idx].
+						  size) /
+						 phba->params.defpdu_data_sz);
+				}
+				pasync_data_h->pbuffer =
+					(void *)((unsigned long)
+					(pasync_ctx->async_data.va_base) +
+					(p->defpdu_data_sz * num_per_mem));
+
+				pasync_data_h->pa.u.a64.address =
+					pasync_ctx->async_data.pa_base.u.a64.
+					address + (p->defpdu_data_sz *
+					num_per_mem);
+				num_per_mem++;
+				num_async_data--;
+
+				list_add_tail(&pasync_data_h->link,
+					      &pasync_ctx->async_data.
+					      free_list);
+				pasync_data_h++;
+				pasync_ctx->async_data.free_entries++;
+				pasync_ctx->async_data.writables++;
+
+				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+					       data_busy_list);
+			}
+
+			pasync_ctx->async_header.host_write_ptr = 0;
+			pasync_ctx->async_header.ep_read_ptr = -1;
+			pasync_ctx->async_data.host_write_ptr = 0;
+			pasync_ctx->async_data.ep_read_ptr = -1;
 		}
-		pasync_data_h->pbuffer =
-			(void *)((unsigned long)
-			(pasync_ctx->async_data.va_base) +
-			(p->defpdu_data_sz * num_per_mem));
-
-		pasync_data_h->pa.u.a64.address =
-		    pasync_ctx->async_data.pa_base.u.a64.address +
-		    (p->defpdu_data_sz * num_per_mem);
-		num_per_mem++;
-		num_async_data--;
-
-		list_add_tail(&pasync_data_h->link,
-			      &pasync_ctx->async_data.free_list);
-		pasync_data_h++;
-		pasync_ctx->async_data.free_entries++;
-		pasync_ctx->async_data.writables++;
-
-		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
 	}
 
-	pasync_ctx->async_header.host_write_ptr = 0;
-	pasync_ctx->async_header.ep_read_ptr = -1;
-	pasync_ctx->async_data.host_write_ptr = 0;
-	pasync_ctx->async_data.ep_read_ptr = -1;
-
 	return 0;
 }
 
@@ -3164,7 +3358,7 @@
 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
 		       struct hwi_context_memory *phwi_context,
 		       struct hwi_controller *phwi_ctrlr,
-		       unsigned int def_pdu_ring_sz)
+		       unsigned int def_pdu_ring_sz, uint8_t ulp_num)
 {
 	unsigned int idx;
 	int ret;
@@ -3174,36 +3368,42 @@
 	void *dq_vaddress;
 
 	idx = 0;
-	dq = &phwi_context->be_def_hdrq;
+	dq = &phwi_context->be_def_hdrq[ulp_num];
 	cq = &phwi_context->be_cq[0];
 	mem = &dq->dma_mem;
 	mem_descr = phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
+	mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+		    (ulp_num * MEM_DESCR_OFFSET);
 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
 	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
 			    sizeof(struct phys_addr),
 			    sizeof(struct phys_addr), dq_vaddress);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
+			    "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
+			    ulp_num);
+
 		return ret;
 	}
 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
 				  bus_address.u.a64.address;
 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
 					      def_pdu_ring_sz,
-					      phba->params.defpdu_hdr_sz);
+					      phba->params.defpdu_hdr_sz,
+					      BEISCSI_DEFQ_HDR, ulp_num);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
+			    "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
+			    ulp_num);
+
 		return ret;
 	}
-	phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
-	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-		    "BM_%d : iscsi def pdu id is %d\n",
-		    phwi_context->be_def_hdrq.id);
 
-	hwi_post_async_buffers(phba, 1);
+	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+		    "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
+		    ulp_num,
+		    phwi_context->be_def_hdrq[ulp_num].id);
+	hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
 	return 0;
 }
 
@@ -3211,7 +3411,7 @@
 beiscsi_create_def_data(struct beiscsi_hba *phba,
 			struct hwi_context_memory *phwi_context,
 			struct hwi_controller *phwi_ctrlr,
-			unsigned int def_pdu_ring_sz)
+			unsigned int def_pdu_ring_sz, uint8_t ulp_num)
 {
 	unsigned int idx;
 	int ret;
@@ -3221,40 +3421,83 @@
 	void *dq_vaddress;
 
 	idx = 0;
-	dataq = &phwi_context->be_def_dataq;
+	dataq = &phwi_context->be_def_dataq[ulp_num];
 	cq = &phwi_context->be_cq[0];
 	mem = &dataq->dma_mem;
 	mem_descr = phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_DATA_RING;
+	mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+		    (ulp_num * MEM_DESCR_OFFSET);
 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
 	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
 			    sizeof(struct phys_addr),
 			    sizeof(struct phys_addr), dq_vaddress);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
+			    "BM_%d : be_fill_queue Failed for DEF PDU "
+			    "DATA on ULP : %d\n",
+			    ulp_num);
+
 		return ret;
 	}
 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
 				  bus_address.u.a64.address;
 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
 					      def_pdu_ring_sz,
-					      phba->params.defpdu_data_sz);
+					      phba->params.defpdu_data_sz,
+					      BEISCSI_DEFQ_DATA, ulp_num);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BM_%d be_cmd_create_default_pdu_queue"
-			    " Failed for DEF PDU DATA\n");
+			    " Failed for DEF PDU DATA on ULP : %d\n",
+			    ulp_num);
 		return ret;
 	}
-	phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
-	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-		    "BM_%d : iscsi def data id is %d\n",
-		    phwi_context->be_def_dataq.id);
 
-	hwi_post_async_buffers(phba, 0);
 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-		    "BM_%d : DEFAULT PDU DATA RING CREATED\n");
+		    "BM_%d : iscsi def data id on ULP : %d is  %d\n",
+		    ulp_num,
+		    phwi_context->be_def_dataq[ulp_num].id);
 
+	hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
+	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+		    "BM_%d : DEFAULT PDU DATA RING CREATED"
+		    "on ULP : %d\n", ulp_num);
+
+	return 0;
+}
+
+
+static int
+beiscsi_post_template_hdr(struct beiscsi_hba *phba)
+{
+	struct be_mem_descriptor *mem_descr;
+	struct mem_array *pm_arr;
+	struct be_dma_mem sgl;
+	int status, ulp_num;
+
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+			mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
+				    (ulp_num * MEM_DESCR_OFFSET);
+			pm_arr = mem_descr->mem_array;
+
+			hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
+			status = be_cmd_iscsi_post_template_hdr(
+				 &phba->ctrl, &sgl);
+
+			if (status != 0) {
+				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+					    "BM_%d : Post Template HDR Failed for"
+					    "ULP_%d\n", ulp_num);
+				return status;
+			}
+
+			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+				    "BM_%d : Template HDR Pages Posted for"
+				    "ULP_%d\n", ulp_num);
+		}
+	}
 	return 0;
 }
 
@@ -3265,14 +3508,18 @@
 	struct mem_array *pm_arr;
 	unsigned int page_offset, i;
 	struct be_dma_mem sgl;
-	int status;
+	int status, ulp_num = 0;
 
 	mem_descr = phba->init_mem;
 	mem_descr += HWI_MEM_SGE;
 	pm_arr = mem_descr->mem_array;
 
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+			break;
+
 	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
-			phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
+			phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
 	for (i = 0; i < mem_descr->num_elements; i++) {
 		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
 		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
@@ -3324,13 +3571,15 @@
 {
 	unsigned int wrb_mem_index, offset, size, num_wrb_rings;
 	u64 pa_addr_lo;
-	unsigned int idx, num, i;
+	unsigned int idx, num, i, ulp_num;
 	struct mem_array *pwrb_arr;
 	void *wrb_vaddr;
 	struct be_dma_mem sgl;
 	struct be_mem_descriptor *mem_descr;
 	struct hwi_wrb_context *pwrb_context;
 	int status;
+	uint8_t ulp_count = 0, ulp_base_num = 0;
+	uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
 
 	idx = 0;
 	mem_descr = phba->init_mem;
@@ -3374,14 +3623,37 @@
 			num_wrb_rings--;
 		}
 	}
+
+	/* Get the ULP Count */
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+			ulp_count++;
+			ulp_base_num = ulp_num;
+			cid_count_ulp[ulp_num] =
+				BEISCSI_GET_CID_COUNT(phba, ulp_num);
+		}
+
 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
 		wrb_mem_index = 0;
 		offset = 0;
 		size = 0;
 
+		if (ulp_count > 1) {
+			ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
+
+			if (!cid_count_ulp[ulp_base_num])
+				ulp_base_num = (ulp_base_num + 1) %
+						BEISCSI_ULP_COUNT;
+
+			cid_count_ulp[ulp_base_num]--;
+		}
+
+
 		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
 		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
-					    &phwi_context->be_wrbq[i]);
+					    &phwi_context->be_wrbq[i],
+					    &phwi_ctrlr->wrb_context[i],
+					    ulp_base_num);
 		if (status != 0) {
 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 				    "BM_%d : wrbq create failed.");
@@ -3389,7 +3661,6 @@
 			return status;
 		}
 		pwrb_context = &phwi_ctrlr->wrb_context[i];
-		pwrb_context->cid = phwi_context->be_wrbq[i].id;
 		BE_SET_CID_TO_CRI(i, pwrb_context->cid);
 	}
 	kfree(pwrb_arr);
@@ -3433,10 +3704,13 @@
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
 	struct hwi_async_pdu_context *pasync_ctx;
-	int i, eq_num;
+	int i, eq_num, ulp_num;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
+
+	be_cmd_iscsi_remove_template_hdr(ctrl);
+
 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
 		q = &phwi_context->be_wrbq[i];
 		if (q->created)
@@ -3445,13 +3719,20 @@
 	kfree(phwi_context->be_wrbq);
 	free_wrb_handles(phba);
 
-	q = &phwi_context->be_def_hdrq;
-	if (q->created)
-		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 
-	q = &phwi_context->be_def_dataq;
-	if (q->created)
-		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+			q = &phwi_context->be_def_hdrq[ulp_num];
+			if (q->created)
+				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+			q = &phwi_context->be_def_dataq[ulp_num];
+			if (q->created)
+				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+		}
+	}
 
 	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
 
@@ -3470,9 +3751,6 @@
 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
 	}
 	be_mcc_queues_destroy(phba);
-
-	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
-	kfree(pasync_ctx->async_entry);
 	be_cmd_fw_uninit(ctrl);
 }
 
@@ -3538,8 +3816,19 @@
 				  BEISCSI_MAX_NUM_CPUS : num_cpus;
 		break;
 	case BE_GEN4:
-		phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
-				  OC_SKH_MAX_NUM_CPUS : num_cpus;
+		/*
+		 * If eqid_count == 1 fall back to
+		 * INTX mechanism
+		 **/
+		if (phba->fw_config.eqid_count == 1) {
+			enable_msix = 0;
+			phba->num_cpus = 1;
+			return;
+		}
+
+		phba->num_cpus =
+			(num_cpus > (phba->fw_config.eqid_count - 1)) ?
+			(phba->fw_config.eqid_count - 1) : num_cpus;
 		break;
 	default:
 		phba->num_cpus = 1;
@@ -3552,10 +3841,8 @@
 	struct hwi_context_memory *phwi_context;
 	unsigned int def_pdu_ring_sz;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	int status;
+	int status, ulp_num;
 
-	def_pdu_ring_sz =
-		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
 	phwi_context->max_eqd = 0;
@@ -3588,20 +3875,35 @@
 		goto error;
 	}
 
-	status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
-					def_pdu_ring_sz);
-	if (status != 0) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Default Header not created\n");
-		goto error;
-	}
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 
-	status = beiscsi_create_def_data(phba, phwi_context,
-					 phwi_ctrlr, def_pdu_ring_sz);
-	if (status != 0) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Default Data not created\n");
-		goto error;
+			def_pdu_ring_sz =
+				BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+				sizeof(struct phys_addr);
+
+			status = beiscsi_create_def_hdr(phba, phwi_context,
+							phwi_ctrlr,
+							def_pdu_ring_sz,
+							ulp_num);
+			if (status != 0) {
+				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+					    "BM_%d : Default Header not created for ULP : %d\n",
+					    ulp_num);
+				goto error;
+			}
+
+			status = beiscsi_create_def_data(phba, phwi_context,
+							 phwi_ctrlr,
+							 def_pdu_ring_sz,
+							 ulp_num);
+			if (status != 0) {
+				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+					    "BM_%d : Default Data not created for ULP : %d\n",
+					    ulp_num);
+				goto error;
+			}
+		}
 	}
 
 	status = beiscsi_post_pages(phba);
@@ -3611,6 +3913,12 @@
 		goto error;
 	}
 
+	status = beiscsi_post_template_hdr(phba);
+	if (status != 0) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : Template HDR Posting for CXN Failed\n");
+	}
+
 	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
 	if (status != 0) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -3618,6 +3926,26 @@
 		goto error;
 	}
 
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		uint16_t async_arr_idx = 0;
+
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+			uint16_t cri = 0;
+			struct hwi_async_pdu_context *pasync_ctx;
+
+			pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
+				     phwi_ctrlr, ulp_num);
+			for (cri = 0; cri <
+			     phba->params.cxns_per_ctrl; cri++) {
+				if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
+					       (phwi_ctrlr, cri))
+					pasync_ctx->cid_to_async_cri_map[
+					phwi_ctrlr->wrb_context[cri].cid] =
+					async_arr_idx++;
+			}
+		}
+	}
+
 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
 		    "BM_%d : hwi_init_port success\n");
 	return 0;
@@ -3682,6 +4010,7 @@
 			  (unsigned long)mem_descr->mem_array[j - 1].
 			  bus_address.u.a64.address);
 		}
+
 		kfree(mem_descr->mem_array);
 		mem_descr++;
 	}
@@ -3721,6 +4050,7 @@
 	struct sgl_handle *psgl_handle;
 	struct iscsi_sge *pfrag;
 	unsigned int arr_index, i, idx;
+	unsigned int ulp_icd_start, ulp_num = 0;
 
 	phba->io_sgl_hndl_avbl = 0;
 	phba->eh_sgl_hndl_avbl = 0;
@@ -3787,6 +4117,12 @@
 		    "\n BM_%d : mem_descr_sg->num_elements=%d\n",
 		    mem_descr_sg->num_elements);
 
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+			break;
+
+	ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+
 	arr_index = 0;
 	idx = 0;
 	while (idx < mem_descr_sg->num_elements) {
@@ -3805,8 +4141,7 @@
 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
 			pfrag += phba->params.num_sge_per_io;
-			psgl_handle->sgl_index =
-				phba->fw_config.iscsi_icd_start + arr_index++;
+			psgl_handle->sgl_index = ulp_icd_start + arr_index++;
 		}
 		idx++;
 	}
@@ -3819,15 +4154,46 @@
 
 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
 {
-	int i;
+	int ret;
+	uint16_t i, ulp_num;
+	struct ulp_cid_info *ptr_cid_info = NULL;
 
-	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
-				  GFP_KERNEL);
-	if (!phba->cid_array) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Failed to allocate memory in "
-			    "hba_setup_cid_tbls\n");
-		return -ENOMEM;
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+			ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
+					       GFP_KERNEL);
+
+			if (!ptr_cid_info) {
+				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+					    "BM_%d : Failed to allocate memory"
+					    "for ULP_CID_INFO for ULP : %d\n",
+					    ulp_num);
+				ret = -ENOMEM;
+				goto free_memory;
+
+			}
+
+			/* Allocate memory for CID array */
+			ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
+						  BEISCSI_GET_CID_COUNT(phba,
+						  ulp_num), GFP_KERNEL);
+			if (!ptr_cid_info->cid_array) {
+				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+					    "BM_%d : Failed to allocate memory"
+					    "for CID_ARRAY for ULP : %d\n",
+					    ulp_num);
+				kfree(ptr_cid_info);
+				ptr_cid_info = NULL;
+				ret = -ENOMEM;
+
+				goto free_memory;
+			}
+			ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
+						   phba, ulp_num);
+
+			/* Save the cid_info_array ptr */
+			phba->cid_array_info[ulp_num] = ptr_cid_info;
+		}
 	}
 	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
 				 phba->params.cxns_per_ctrl, GFP_KERNEL);
@@ -3835,9 +4201,9 @@
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BM_%d : Failed to allocate memory in "
 			    "hba_setup_cid_tbls\n");
-		kfree(phba->cid_array);
-		phba->cid_array = NULL;
-		return -ENOMEM;
+		ret = -ENOMEM;
+
+		goto free_memory;
 	}
 
 	phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
@@ -3847,18 +4213,44 @@
 			    "BM_%d : Failed to allocate memory in"
 			    "hba_setup_cid_tbls\n");
 
-		kfree(phba->cid_array);
 		kfree(phba->ep_array);
-		phba->cid_array = NULL;
 		phba->ep_array = NULL;
-		return -ENOMEM;
+		ret = -ENOMEM;
 	}
 
-	for (i = 0; i < phba->params.cxns_per_ctrl; i++)
-		phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+		ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
 
-	phba->avlbl_cids = phba->params.cxns_per_ctrl;
+		ptr_cid_info = phba->cid_array_info[ulp_num];
+		ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
+			phba->phwi_ctrlr->wrb_context[i].cid;
+
+	}
+
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+			ptr_cid_info = phba->cid_array_info[ulp_num];
+
+			ptr_cid_info->cid_alloc = 0;
+			ptr_cid_info->cid_free = 0;
+		}
+	}
 	return 0;
+
+free_memory:
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+			ptr_cid_info = phba->cid_array_info[ulp_num];
+
+			if (ptr_cid_info) {
+				kfree(ptr_cid_info->cid_array);
+				kfree(ptr_cid_info);
+				phba->cid_array_info[ulp_num] = NULL;
+			}
+		}
+	}
+
+	return ret;
 }
 
 static void hwi_enable_intr(struct beiscsi_hba *phba)
@@ -4113,20 +4505,39 @@
 
 static void beiscsi_clean_port(struct beiscsi_hba *phba)
 {
-	int mgmt_status;
+	int mgmt_status, ulp_num;
+	struct ulp_cid_info *ptr_cid_info = NULL;
 
-	mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
-	if (mgmt_status)
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-			    "BM_%d : mgmt_epfw_cleanup FAILED\n");
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+			mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
+			if (mgmt_status)
+				beiscsi_log(phba, KERN_WARNING,
+					    BEISCSI_LOG_INIT,
+					    "BM_%d : mgmt_epfw_cleanup FAILED"
+					    " for ULP_%d\n", ulp_num);
+		}
+	}
 
 	hwi_purge_eq(phba);
 	hwi_cleanup(phba);
 	kfree(phba->io_sgl_hndl_base);
 	kfree(phba->eh_sgl_hndl_base);
-	kfree(phba->cid_array);
 	kfree(phba->ep_array);
 	kfree(phba->conn_table);
+
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+			ptr_cid_info = phba->cid_array_info[ulp_num];
+
+			if (ptr_cid_info) {
+				kfree(ptr_cid_info->cid_array);
+				kfree(ptr_cid_info);
+				phba->cid_array_info[ulp_num] = NULL;
+			}
+		}
+	}
+
 }
 
 /**
@@ -4255,8 +4666,8 @@
 	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
 			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
-
-	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+	iowrite32(doorbell, phba->db_va +
+		  beiscsi_conn->doorbell_offset);
 }
 
 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
@@ -4481,7 +4892,8 @@
 		     DB_DEF_PDU_WRB_INDEX_MASK) <<
 		     DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
-	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+	iowrite32(doorbell, phba->db_va +
+		  beiscsi_conn->doorbell_offset);
 	return 0;
 }
 
@@ -4536,7 +4948,8 @@
 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
 
-	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+	iowrite32(doorbell, phba->db_va +
+		  beiscsi_conn->doorbell_offset);
 	return 0;
 }
 
@@ -4638,7 +5051,8 @@
 	doorbell |= (io_task->pwrb_handle->wrb_index &
 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
-	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+	iowrite32(doorbell, phba->db_va +
+		  beiscsi_conn->doorbell_offset);
 	return 0;
 }
 
@@ -4663,8 +5077,12 @@
 		struct beiscsi_hba *phba = NULL;
 
 		phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
-			    "BM_%d : scsi_dma_map Failed\n");
+		beiscsi_log(phba, KERN_ERR,
+			    BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
+			    "BM_%d : scsi_dma_map Failed "
+			    "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
+			    be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
+			    io_task->libiscsi_itt, scsi_bufflen(sc));
 
 		return num_sg;
 	}
@@ -4769,10 +5187,12 @@
 /*
  * beiscsi_quiesce()- Cleanup Driver resources
  * @phba: Instance Priv structure
+ * @unload_state:i Clean or EEH unload state
  *
  * Free the OS and HW resources held by the driver
  **/
-static void beiscsi_quiesce(struct beiscsi_hba *phba)
+static void beiscsi_quiesce(struct beiscsi_hba *phba,
+		uint32_t unload_state)
 {
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
@@ -4785,28 +5205,37 @@
 	if (phba->msix_enabled) {
 		for (i = 0; i <= phba->num_cpus; i++) {
 			msix_vec = phba->msix_entries[i].vector;
+			synchronize_irq(msix_vec);
 			free_irq(msix_vec, &phwi_context->be_eq[i]);
 			kfree(phba->msi_name[i]);
 		}
 	} else
-		if (phba->pcidev->irq)
+		if (phba->pcidev->irq) {
+			synchronize_irq(phba->pcidev->irq);
 			free_irq(phba->pcidev->irq, phba);
+		}
 	pci_disable_msix(phba->pcidev);
-	destroy_workqueue(phba->wq);
+
 	if (blk_iopoll_enabled)
 		for (i = 0; i < phba->num_cpus; i++) {
 			pbe_eq = &phwi_context->be_eq[i];
 			blk_iopoll_disable(&pbe_eq->iopoll);
 		}
 
-	beiscsi_clean_port(phba);
-	beiscsi_free_mem(phba);
+	if (unload_state == BEISCSI_CLEAN_UNLOAD) {
+		destroy_workqueue(phba->wq);
+		beiscsi_clean_port(phba);
+		beiscsi_free_mem(phba);
 
-	beiscsi_unmap_pci_function(phba);
-	pci_free_consistent(phba->pcidev,
-			    phba->ctrl.mbox_mem_alloced.size,
-			    phba->ctrl.mbox_mem_alloced.va,
-			    phba->ctrl.mbox_mem_alloced.dma);
+		beiscsi_unmap_pci_function(phba);
+		pci_free_consistent(phba->pcidev,
+				    phba->ctrl.mbox_mem_alloced.size,
+				    phba->ctrl.mbox_mem_alloced.va,
+				    phba->ctrl.mbox_mem_alloced.dma);
+	} else {
+		hwi_purge_eq(phba);
+		hwi_cleanup(phba);
+	}
 
 	cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
 }
@@ -4823,11 +5252,13 @@
 	}
 
 	beiscsi_destroy_def_ifaces(phba);
-	beiscsi_quiesce(phba);
+	beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
 	iscsi_boot_destroy_kset(phba->boot_kset);
 	iscsi_host_remove(phba->shost);
 	pci_dev_put(phba->pcidev);
 	iscsi_host_free(phba->shost);
+	pci_disable_pcie_error_reporting(pcidev);
+	pci_set_drvdata(pcidev, NULL);
 	pci_disable_device(pcidev);
 }
 
@@ -4842,7 +5273,7 @@
 		return;
 	}
 
-	beiscsi_quiesce(phba);
+	beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
 	pci_disable_device(pcidev);
 }
 
@@ -4880,6 +5311,167 @@
 			      msecs_to_jiffies(1000));
 }
 
+
+static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
+		pci_channel_state_t state)
+{
+	struct beiscsi_hba *phba = NULL;
+
+	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+	phba->state |= BE_ADAPTER_PCI_ERR;
+
+	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+		    "BM_%d : EEH error detected\n");
+
+	beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
+
+	if (state == pci_channel_io_perm_failure) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : EEH : State PERM Failure");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_disable_device(pdev);
+
+	/* The error could cause the FW to trigger a flash debug dump.
+	 * Resetting the card while flash dump is in progress
+	 * can cause it not to recover; wait for it to finish.
+	 * Wait only for first function as it is needed only once per
+	 * adapter.
+	 **/
+	if (pdev->devfn == 0)
+		ssleep(30);
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
+{
+	struct beiscsi_hba *phba = NULL;
+	int status = 0;
+
+	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+
+	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+		    "BM_%d : EEH Reset\n");
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	pci_set_master(pdev);
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	/* Wait for the CHIP Reset to complete */
+	status = be_chk_reset_complete(phba);
+	if (!status) {
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+			    "BM_%d : EEH Reset Completed\n");
+	} else {
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+			    "BM_%d : EEH Reset Completion Failure\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void beiscsi_eeh_resume(struct pci_dev *pdev)
+{
+	int ret = 0, i;
+	struct be_eq_obj *pbe_eq;
+	struct beiscsi_hba *phba = NULL;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_context_memory *phwi_context;
+
+	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+	pci_save_state(pdev);
+
+	if (enable_msix)
+		find_num_cpus(phba);
+	else
+		phba->num_cpus = 1;
+
+	if (enable_msix) {
+		beiscsi_msix_enable(phba);
+		if (!phba->msix_enabled)
+			phba->num_cpus = 1;
+	}
+
+	ret = beiscsi_cmd_reset_function(phba);
+	if (ret) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : Reset Failed\n");
+		goto ret_err;
+	}
+
+	ret = be_chk_reset_complete(phba);
+	if (ret) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : Failed to get out of reset.\n");
+		goto ret_err;
+	}
+
+	beiscsi_get_params(phba);
+	phba->shost->max_id = phba->params.cxns_per_ctrl;
+	phba->shost->can_queue = phba->params.ios_per_ctrl;
+	ret = hwi_init_controller(phba);
+
+	for (i = 0; i < MAX_MCC_CMD; i++) {
+		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+		phba->ctrl.mcc_tag[i] = i + 1;
+		phba->ctrl.mcc_numtag[i + 1] = 0;
+		phba->ctrl.mcc_tag_available++;
+	}
+
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
+
+	if (blk_iopoll_enabled) {
+		for (i = 0; i < phba->num_cpus; i++) {
+			pbe_eq = &phwi_context->be_eq[i];
+			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+					be_iopoll);
+			blk_iopoll_enable(&pbe_eq->iopoll);
+		}
+
+		i = (phba->msix_enabled) ? i : 0;
+		/* Work item for MCC handling */
+		pbe_eq = &phwi_context->be_eq[i];
+		INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+	} else {
+		if (phba->msix_enabled) {
+			for (i = 0; i <= phba->num_cpus; i++) {
+				pbe_eq = &phwi_context->be_eq[i];
+				INIT_WORK(&pbe_eq->work_cqs,
+					  beiscsi_process_all_cqs);
+			}
+		} else {
+			pbe_eq = &phwi_context->be_eq[0];
+			INIT_WORK(&pbe_eq->work_cqs,
+				  beiscsi_process_all_cqs);
+		}
+	}
+
+	ret = beiscsi_init_irqs(phba);
+	if (ret < 0) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : beiscsi_eeh_resume - "
+			    "Failed to beiscsi_init_irqs\n");
+		goto ret_err;
+	}
+
+	hwi_enable_intr(phba);
+	phba->state &= ~BE_ADAPTER_PCI_ERR;
+
+	return;
+ret_err:
+	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+		    "BM_%d : AER EEH Resume Failed\n");
+}
+
 static int beiscsi_dev_probe(struct pci_dev *pcidev,
 			     const struct pci_device_id *id)
 {
@@ -4887,7 +5479,7 @@
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
 	struct be_eq_obj *pbe_eq;
-	int ret, i;
+	int ret = 0, i;
 
 	ret = beiscsi_enable_pci(pcidev);
 	if (ret < 0) {
@@ -4903,10 +5495,20 @@
 		goto disable_pci;
 	}
 
+	/* Enable EEH reporting */
+	ret = pci_enable_pcie_error_reporting(pcidev);
+	if (ret)
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+			    "BM_%d : PCIe Error Reporting "
+			    "Enabling Failed\n");
+
+	pci_save_state(pcidev);
+
 	/* Initialize Driver configuration Paramters */
 	beiscsi_hba_attrs_init(phba);
 
 	phba->fw_timeout = false;
+	phba->mac_addr_set = false;
 
 
 	switch (pcidev->device) {
@@ -4929,6 +5531,38 @@
 		phba->generation = 0;
 	}
 
+	ret = be_ctrl_init(phba, pcidev);
+	if (ret) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : beiscsi_dev_probe-"
+			    "Failed in be_ctrl_init\n");
+		goto hba_free;
+	}
+
+	ret = beiscsi_cmd_reset_function(phba);
+	if (ret) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : Reset Failed\n");
+		goto hba_free;
+	}
+	ret = be_chk_reset_complete(phba);
+	if (ret) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : Failed to get out of reset.\n");
+		goto hba_free;
+	}
+
+	spin_lock_init(&phba->io_sgl_lock);
+	spin_lock_init(&phba->mgmt_sgl_lock);
+	spin_lock_init(&phba->isr_lock);
+	spin_lock_init(&phba->async_pdu_lock);
+	ret = mgmt_get_fw_config(&phba->ctrl, phba);
+	if (ret != 0) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : Error getting fw config\n");
+		goto free_port;
+	}
+
 	if (enable_msix)
 		find_num_cpus(phba);
 	else
@@ -4943,38 +5577,8 @@
 		if (!phba->msix_enabled)
 			phba->num_cpus = 1;
 	}
-	ret = be_ctrl_init(phba, pcidev);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : beiscsi_dev_probe-"
-			    "Failed in be_ctrl_init\n");
-		goto hba_free;
-	}
 
-	ret = beiscsi_cmd_reset_function(phba);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Reset Failed. Aborting Crashdump\n");
-		goto hba_free;
-	}
-	ret = be_chk_reset_complete(phba);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Failed to get out of reset."
-			    "Aborting Crashdump\n");
-		goto hba_free;
-	}
-
-	spin_lock_init(&phba->io_sgl_lock);
-	spin_lock_init(&phba->mgmt_sgl_lock);
-	spin_lock_init(&phba->isr_lock);
-	ret = mgmt_get_fw_config(&phba->ctrl, phba);
-	if (ret != 0) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Error getting fw config\n");
-		goto free_port;
-	}
-	phba->shost->max_id = phba->fw_config.iscsi_cid_count;
+	phba->shost->max_id = phba->params.cxns_per_ctrl;
 	beiscsi_get_params(phba);
 	phba->shost->can_queue = phba->params.ios_per_ctrl;
 	ret = beiscsi_init_port(phba);
@@ -4985,7 +5589,7 @@
 		goto free_port;
 	}
 
-	for (i = 0; i < MAX_MCC_CMD ; i++) {
+	for (i = 0; i < MAX_MCC_CMD; i++) {
 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
 		phba->ctrl.mcc_tag[i] = i + 1;
 		phba->ctrl.mcc_numtag[i + 1] = 0;
@@ -5089,6 +5693,12 @@
 	return ret;
 }
 
+static struct pci_error_handlers beiscsi_eeh_handlers = {
+	.error_detected = beiscsi_eeh_err_detected,
+	.slot_reset = beiscsi_eeh_reset,
+	.resume = beiscsi_eeh_resume,
+};
+
 struct iscsi_transport beiscsi_iscsi_transport = {
 	.owner = THIS_MODULE,
 	.name = DRV_NAME,
@@ -5127,7 +5737,8 @@
 	.probe = beiscsi_dev_probe,
 	.remove = beiscsi_remove,
 	.shutdown = beiscsi_shutdown,
-	.id_table = beiscsi_pci_id_table
+	.id_table = beiscsi_pci_id_table,
+	.err_handler = &beiscsi_eeh_handlers
 };
 
 
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 2c06ef3..31fa27b 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -26,6 +26,7 @@
 #include <linux/in.h>
 #include <linux/ctype.h>
 #include <linux/module.h>
+#include <linux/aer.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -34,9 +35,8 @@
 #include <scsi/libiscsi.h>
 #include <scsi/scsi_transport_iscsi.h>
 
-#include "be.h"
 #define DRV_NAME		"be2iscsi"
-#define BUILD_STR		"10.0.467.0"
+#define BUILD_STR		"10.0.659.0"
 #define BE_NAME			"Emulex OneConnect" \
 				"Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
@@ -66,7 +66,6 @@
 
 #define MAX_CPUS		64
 #define BEISCSI_MAX_NUM_CPUS	7
-#define OC_SKH_MAX_NUM_CPUS	31
 
 #define BEISCSI_VER_STRLEN 32
 
@@ -74,6 +73,7 @@
 
 #define BEISCSI_CMD_PER_LUN	128	/* scsi_host->cmd_per_lun */
 #define BEISCSI_MAX_SECTORS	2048	/* scsi_host->max_sectors */
+#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
 
 #define BEISCSI_MAX_CMD_LEN	16	/* scsi_host->max_cmd_len */
 #define BEISCSI_NUM_MAX_LUN	256	/* scsi_host->max_lun */
@@ -97,14 +97,19 @@
 
 #define INVALID_SESS_HANDLE	0xFFFFFFFF
 
-#define BE_ADAPTER_UP		0x00000000
-#define BE_ADAPTER_LINK_DOWN	0x00000001
+#define BE_ADAPTER_LINK_UP	0x001
+#define BE_ADAPTER_LINK_DOWN	0x002
+#define BE_ADAPTER_PCI_ERR	0x004
+
+#define BEISCSI_CLEAN_UNLOAD	0x01
+#define BEISCSI_EEH_UNLOAD	0x02
 /**
  * hardware needs the async PDU buffers to be posted in multiples of 8
  * So have atleast 8 of them by default
  */
 
-#define HWI_GET_ASYNC_PDU_CTX(phwi)	(phwi->phwi_ctxt->pasync_ctx)
+#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num)	\
+	(phwi->phwi_ctxt->pasync_ctx[ulp_num])
 
 /********* Memory BAR register ************/
 #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET	0xfc
@@ -149,29 +154,41 @@
 #define DB_CQ_REARM_SHIFT		(29)	/* bit 29 */
 
 #define GET_HWI_CONTROLLER_WS(pc)	(pc->phwi_ctrlr)
-#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
-		(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id)
-#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\
-		(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id)
+#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+		(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id)
+#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+		(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id)
 
 #define PAGES_REQUIRED(x) \
 	((x < PAGE_SIZE) ? 1 :  ((x + PAGE_SIZE - 1) / PAGE_SIZE))
 
 #define BEISCSI_MSI_NAME 20 /* size of msi_name string */
 
+#define MEM_DESCR_OFFSET 8
+#define BEISCSI_DEFQ_HDR 1
+#define BEISCSI_DEFQ_DATA 0
 enum be_mem_enum {
 	HWI_MEM_ADDN_CONTEXT,
 	HWI_MEM_WRB,
 	HWI_MEM_WRBH,
 	HWI_MEM_SGLH,
 	HWI_MEM_SGE,
-	HWI_MEM_ASYNC_HEADER_BUF,	/* 5 */
-	HWI_MEM_ASYNC_DATA_BUF,
-	HWI_MEM_ASYNC_HEADER_RING,
-	HWI_MEM_ASYNC_DATA_RING,
-	HWI_MEM_ASYNC_HEADER_HANDLE,
-	HWI_MEM_ASYNC_DATA_HANDLE,	/* 10 */
-	HWI_MEM_ASYNC_PDU_CONTEXT,
+	HWI_MEM_TEMPLATE_HDR_ULP0,
+	HWI_MEM_ASYNC_HEADER_BUF_ULP0,	/* 6 */
+	HWI_MEM_ASYNC_DATA_BUF_ULP0,
+	HWI_MEM_ASYNC_HEADER_RING_ULP0,
+	HWI_MEM_ASYNC_DATA_RING_ULP0,
+	HWI_MEM_ASYNC_HEADER_HANDLE_ULP0,
+	HWI_MEM_ASYNC_DATA_HANDLE_ULP0,	/* 11 */
+	HWI_MEM_ASYNC_PDU_CONTEXT_ULP0,
+	HWI_MEM_TEMPLATE_HDR_ULP1,
+	HWI_MEM_ASYNC_HEADER_BUF_ULP1,	/* 14 */
+	HWI_MEM_ASYNC_DATA_BUF_ULP1,
+	HWI_MEM_ASYNC_HEADER_RING_ULP1,
+	HWI_MEM_ASYNC_DATA_RING_ULP1,
+	HWI_MEM_ASYNC_HEADER_HANDLE_ULP1,
+	HWI_MEM_ASYNC_DATA_HANDLE_ULP1,	/* 19 */
+	HWI_MEM_ASYNC_PDU_CONTEXT_ULP1,
 	ISCSI_MEM_GLOBAL_HEADER,
 	SE_MEM_MAX
 };
@@ -266,9 +283,49 @@
 	unsigned short cid;
 } __packed;
 
+#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
+	(phwi_ctrlr->wrb_context[cri].ulp_num)
+struct hwi_wrb_context {
+	struct list_head wrb_handle_list;
+	struct list_head wrb_handle_drvr_list;
+	struct wrb_handle **pwrb_handle_base;
+	struct wrb_handle **pwrb_handle_basestd;
+	struct iscsi_wrb *plast_wrb;
+	unsigned short alloc_index;
+	unsigned short free_index;
+	unsigned short wrb_handles_available;
+	unsigned short cid;
+	uint8_t ulp_num;	/* ULP to which CID binded */
+	uint16_t register_set;
+	uint16_t doorbell_format;
+	uint32_t doorbell_offset;
+};
+
+struct ulp_cid_info {
+	unsigned short *cid_array;
+	unsigned short avlbl_cids;
+	unsigned short cid_alloc;
+	unsigned short cid_free;
+};
+
+#include "be.h"
 #define chip_be2(phba)      (phba->generation == BE_GEN2)
 #define chip_be3_r(phba)    (phba->generation == BE_GEN3)
 #define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
+
+#define BEISCSI_ULP0    0
+#define BEISCSI_ULP1    1
+#define BEISCSI_ULP_COUNT   2
+#define BEISCSI_ULP0_LOADED 0x01
+#define BEISCSI_ULP1_LOADED 0x02
+
+#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \
+	(((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids)
+#define BEISCSI_ULP0_AVLBL_CID(phba) \
+	BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0)
+#define BEISCSI_ULP1_AVLBL_CID(phba) \
+	BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1)
+
 struct beiscsi_hba {
 	struct hba_parameters params;
 	struct hwi_controller *phwi_ctrlr;
@@ -303,17 +360,15 @@
 	spinlock_t io_sgl_lock;
 	spinlock_t mgmt_sgl_lock;
 	spinlock_t isr_lock;
+	spinlock_t async_pdu_lock;
 	unsigned int age;
-	unsigned short avlbl_cids;
-	unsigned short cid_alloc;
-	unsigned short cid_free;
 	struct list_head hba_queue;
 #define BE_MAX_SESSION 2048
 #define BE_SET_CID_TO_CRI(cri_index, cid) \
 			  (phba->cid_to_cri_map[cid] = cri_index)
 #define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
 	unsigned short cid_to_cri_map[BE_MAX_SESSION];
-	unsigned short *cid_array;
+	struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
 	struct iscsi_endpoint **ep_array;
 	struct beiscsi_conn **conn_table;
 	struct iscsi_boot_kset *boot_kset;
@@ -325,20 +380,21 @@
 		 * group together since they are used most frequently
 		 * for cid to cri conversion
 		 */
-		unsigned int iscsi_cid_start;
 		unsigned int phys_port;
+		unsigned int eqid_count;
+		unsigned int cqid_count;
+		unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
+#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \
+		(phba->fw_config.iscsi_cid_count[ulp_num])
+		unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT];
+		unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT];
+		unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT];
+		unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT];
+		unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT];
 
-		unsigned int isr_offset;
-		unsigned int iscsi_icd_start;
-		unsigned int iscsi_cid_count;
-		unsigned int iscsi_icd_count;
-		unsigned int pci_function;
-
-		unsigned short cid_alloc;
-		unsigned short cid_free;
-		unsigned short avlbl_cids;
 		unsigned short iscsi_features;
-		spinlock_t cid_lock;
+		uint16_t dual_ulp_aware;
+		unsigned long ulp_supported;
 	} fw_config;
 
 	unsigned int state;
@@ -346,6 +402,7 @@
 	bool ue_detected;
 	struct delayed_work beiscsi_hw_check_task;
 
+	bool mac_addr_set;
 	u8 mac_address[ETH_ALEN];
 	char fw_ver_str[BEISCSI_VER_STRLEN];
 	char wq_name[20];
@@ -374,6 +431,7 @@
 	struct iscsi_conn *conn;
 	struct beiscsi_hba *phba;
 	u32 exp_statsn;
+	u32 doorbell_offset;
 	u32 beiscsi_conn_cid;
 	struct beiscsi_endpoint *ep;
 	unsigned short login_in_progress;
@@ -474,7 +532,7 @@
 };
 
 struct beiscsi_offload_params {
-	u32 dw[5];
+	u32 dw[6];
 };
 
 #define OFFLD_PARAMS_ERL	0x00000003
@@ -504,6 +562,7 @@
 	u8 max_r2t[16];
 	u8 pad[8];
 	u8 exp_statsn[32];
+	u8 max_recv_data_segment_length[32];
 };
 
 /* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
@@ -567,7 +626,8 @@
 
 	unsigned int buffer_size;
 	unsigned int num_entries;
-
+#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
+	unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
 	/**
 	 * This is a varying size list! Do not add anything
 	 * after this entry!!
@@ -885,30 +945,32 @@
 	u8 first_burst_length[24]; /* DWORD 3 */
 	u8 rsvd3[8]; /* DOWRD 3 */
 	u8 max_r2t[16]; /* DWORD 4 */
-	u8 rsvd4[10];   /* DWORD 4 */
+	u8 rsvd4;       /* DWORD 4 */
 	u8 hde;         /* DWORD 4 */
 	u8 dde;         /* DWORD 4 */
 	u8 erl[2];      /* DWORD 4 */
+	u8 rsvd5[6];    /* DWORD 4 */
 	u8 imd;         /* DWORD 4 */
 	u8 ir2t;        /* DWORD 4 */
+	u8 rsvd6[3];    /* DWORD 4 */
 	u8 stat_sn[32];     /* DWORD 5 */
-	u8 rsvd5[32];   /* DWORD 6 */
-	u8 rsvd6[32];   /* DWORD 7 */
+	u8 rsvd7[32];   /* DWORD 6 */
+	u8 rsvd8[32];   /* DWORD 7 */
 	u8 max_recv_dataseg_len[24];    /* DWORD 8 */
-	u8 rsvd7[8]; /* DWORD 8 */
-	u8 rsvd8[32];   /* DWORD 9 */
-	u8 rsvd9[32];   /* DWORD 10 */
+	u8 rsvd9[8]; /* DWORD 8 */
+	u8 rsvd10[32];   /* DWORD 9 */
+	u8 rsvd11[32];   /* DWORD 10 */
 	u8 max_cxns[16]; /* DWORD 11 */
-	u8 rsvd10[11]; /* DWORD  11*/
+	u8 rsvd12[11]; /* DWORD  11*/
 	u8 invld; /* DWORD 11 */
-	u8 rsvd11;/* DWORD 11*/
+	u8 rsvd13;/* DWORD 11*/
 	u8 dmsg; /* DWORD 11 */
 	u8 data_seq_inorder; /* DWORD 11 */
 	u8 pdu_seq_inorder; /* DWORD 11 */
-	u8 rsvd12[32]; /*DWORD 12 */
-	u8 rsvd13[32]; /* DWORD 13 */
-	u8 rsvd14[32]; /* DWORD 14 */
-	u8 rsvd15[32]; /* DWORD 15 */
+	u8 rsvd14[32]; /*DWORD 12 */
+	u8 rsvd15[32]; /* DWORD 13 */
+	u8 rsvd16[32]; /* DWORD 14 */
+	u8 rsvd17[32]; /* DWORD 15 */
 } __packed;
 
 
@@ -919,6 +981,10 @@
 	u32 cidx;		/* consumer index */
 	u32 pidx;		/* producer index -- not used by most rings */
 	u32 item_size;		/* size in bytes of one object */
+	u8 ulp_num;	/* ULP to which CID binded */
+	u16 register_set;
+	u16 doorbell_format;
+	u32 doorbell_offset;
 
 	void *va;		/* The virtual address of the ring.  This
 				 * should be last to allow 32 & 64 bit debugger
@@ -926,18 +992,6 @@
 				 */
 };
 
-struct hwi_wrb_context {
-	struct list_head wrb_handle_list;
-	struct list_head wrb_handle_drvr_list;
-	struct wrb_handle **pwrb_handle_base;
-	struct wrb_handle **pwrb_handle_basestd;
-	struct iscsi_wrb *plast_wrb;
-	unsigned short alloc_index;
-	unsigned short free_index;
-	unsigned short wrb_handles_available;
-	unsigned short cid;
-};
-
 struct hwi_controller {
 	struct list_head io_sgl_list;
 	struct list_head eh_sgl_list;
@@ -946,8 +1000,8 @@
 
 	struct hwi_wrb_context *wrb_context;
 	struct mcc_wrb *pmcc_wrb_base;
-	struct be_ring default_pdu_hdr;
-	struct be_ring default_pdu_data;
+	struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
+	struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
 	struct hwi_context_memory *phwi_ctxt;
 };
 
@@ -978,11 +1032,10 @@
 	struct be_eq_obj be_eq[MAX_CPUS];
 	struct be_queue_info be_cq[MAX_CPUS - 1];
 
-	struct be_queue_info be_def_hdrq;
-	struct be_queue_info be_def_dataq;
-
 	struct be_queue_info *be_wrbq;
-	struct hwi_async_pdu_context *pasync_ctx;
+	struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
+	struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
+	struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
 };
 
 /* Logging related definitions */
@@ -992,6 +1045,7 @@
 #define BEISCSI_LOG_EH		0x0008	/* Error Handler */
 #define BEISCSI_LOG_IO		0x0010	/* IO Code Path */
 #define BEISCSI_LOG_CONFIG	0x0020	/* CONFIG Code Path */
+#define BEISCSI_LOG_ISCSI	0x0040	/* SCSI/iSCSI Protocol related Logs */
 
 #define beiscsi_log(phba, level, mask, fmt, arg...) \
 do { \
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 245a959..b2fcac7 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -278,6 +278,18 @@
 	return tag;
 }
 
+/**
+ * mgmt_get_fw_config()- Get the FW config for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the FW config and resources available for the function.
+ * The resources are created based on the count received here.
+ *
+ * return
+ *	Success: 0
+ *	Failure: Non-Zero Value
+ **/
 int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
 				struct beiscsi_hba *phba)
 {
@@ -291,31 +303,79 @@
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
+			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+			   EMBED_MBX_MAX_PAYLOAD_SIZE);
 	status = be_mbox_notify(ctrl);
 	if (!status) {
+		uint8_t ulp_num = 0;
 		struct be_fw_cfg *pfw_cfg;
 		pfw_cfg = req;
-		phba->fw_config.phys_port = pfw_cfg->phys_port;
-		phba->fw_config.iscsi_icd_start =
-					pfw_cfg->ulp[0].icd_base;
-		phba->fw_config.iscsi_icd_count =
-					pfw_cfg->ulp[0].icd_count;
-		phba->fw_config.iscsi_cid_start =
-					pfw_cfg->ulp[0].sq_base;
-		phba->fw_config.iscsi_cid_count =
-					pfw_cfg->ulp[0].sq_count;
-		if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
-			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-				    "BG_%d : FW reported MAX CXNS as %d\t"
-				    "Max Supported = %d.\n",
-				    phba->fw_config.iscsi_cid_count,
-				    BE2_MAX_SESSIONS);
-			phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
+
+		if (!is_chip_be2_be3r(phba)) {
+			phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+			phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+
+			beiscsi_log(phba, KERN_INFO,
+				    BEISCSI_LOG_INIT,
+				    "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+				    phba->fw_config.eqid_count,
+				    phba->fw_config.cqid_count);
 		}
+
+		for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+			if (pfw_cfg->ulp[ulp_num].ulp_mode &
+			    BEISCSI_ULP_ISCSI_INI_MODE)
+				set_bit(ulp_num,
+				&phba->fw_config.ulp_supported);
+
+		phba->fw_config.phys_port = pfw_cfg->phys_port;
+		for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+			if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+				phba->fw_config.iscsi_cid_start[ulp_num] =
+					pfw_cfg->ulp[ulp_num].sq_base;
+				phba->fw_config.iscsi_cid_count[ulp_num] =
+					pfw_cfg->ulp[ulp_num].sq_count;
+
+				phba->fw_config.iscsi_icd_start[ulp_num] =
+					pfw_cfg->ulp[ulp_num].icd_base;
+				phba->fw_config.iscsi_icd_count[ulp_num] =
+					pfw_cfg->ulp[ulp_num].icd_count;
+
+				phba->fw_config.iscsi_chain_start[ulp_num] =
+					pfw_cfg->chain_icd[ulp_num].chain_base;
+				phba->fw_config.iscsi_chain_count[ulp_num] =
+					pfw_cfg->chain_icd[ulp_num].chain_count;
+
+				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+					    "BG_%d : Function loaded on ULP : %d\n"
+					    "\tiscsi_cid_count : %d\n"
+					    "\tiscsi_cid_start : %d\n"
+					    "\t iscsi_icd_count : %d\n"
+					    "\t iscsi_icd_start : %d\n",
+					    ulp_num,
+					    phba->fw_config.
+					    iscsi_cid_count[ulp_num],
+					    phba->fw_config.
+					    iscsi_cid_start[ulp_num],
+					    phba->fw_config.
+					    iscsi_icd_count[ulp_num],
+					    phba->fw_config.
+					    iscsi_icd_start[ulp_num]);
+			}
+		}
+
+		phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+						  BEISCSI_FUNC_DUA_MODE);
+
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+			    "BG_%d : DUA Mode : 0x%x\n",
+			    phba->fw_config.dual_ulp_aware);
+
 	} else {
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BG_%d : Failed in mgmt_get_fw_config\n");
+		status = -EINVAL;
 	}
 
 	spin_unlock(&ctrl->mbox_lock);
@@ -448,7 +508,16 @@
 	return tag;
 }
 
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
+/**
+ * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
+ * @phba: pointer to dev priv structure
+ * @ulp_num: ULP number.
+ *
+ * return
+ *	Success: 0
+ *	Failure: Non-Zero Value
+ **/
+int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
@@ -462,9 +531,9 @@
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
 			   OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
 
-	req->chute = chute;
-	req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba));
-	req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba));
+	req->chute = (1 << ulp_num);
+	req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
+	req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
 
 	status =  be_mcc_notify_wait(phba);
 	if (status)
@@ -585,6 +654,16 @@
 	return tag;
 }
 
+/**
+ * mgmt_open_connection()- Establish a TCP CXN
+ * @dst_addr: Destination Address
+ * @beiscsi_ep: ptr to device endpoint struct
+ * @nonemb_cmd: ptr to memory allocated for command
+ *
+ * return
+ *	Success: Tag number of the MBX Command issued
+ *	Failure: Error code
+ **/
 int mgmt_open_connection(struct beiscsi_hba *phba,
 			 struct sockaddr *dst_addr,
 			 struct beiscsi_endpoint *beiscsi_ep,
@@ -602,14 +681,17 @@
 	struct phys_addr template_address = { 0, 0 };
 	struct phys_addr *ptemplate_address;
 	unsigned int tag = 0;
-	unsigned int i;
+	unsigned int i, ulp_num;
 	unsigned short cid = beiscsi_ep->ep_cid;
 	struct be_sge *sge;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
-	def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
-	def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba);
+
+	ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num;
+
+	def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num);
+	def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num);
 
 	ptemplate_address = &template_address;
 	ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
@@ -748,11 +830,14 @@
 
 	rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
 	if (rc) {
+		/* Check if the IOCTL needs to be re-issued */
+		if (rc == -EAGAIN)
+			return rc;
+
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 			    "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
 
-		rc = -EIO;
 		goto free_cmd;
 	}
 
@@ -861,7 +946,7 @@
 		uint32_t boot_proto)
 {
 	struct be_cmd_get_def_gateway_resp gtway_addr_set;
-	struct be_cmd_get_if_info_resp if_info;
+	struct be_cmd_get_if_info_resp *if_info;
 	struct be_cmd_set_dhcp_req *dhcpreq;
 	struct be_cmd_rel_dhcp_req *reldhcp;
 	struct be_dma_mem nonemb_cmd;
@@ -872,16 +957,17 @@
 	if (mgmt_get_all_if_id(phba))
 		return -EIO;
 
-	memset(&if_info, 0, sizeof(if_info));
 	ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
 		BE2_IPV6 : BE2_IPV4 ;
 
 	rc = mgmt_get_if_info(phba, ip_type, &if_info);
-	if (rc)
+	if (rc) {
+		kfree(if_info);
 		return rc;
+	}
 
 	if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
-		if (if_info.dhcp_state) {
+		if (if_info->dhcp_state) {
 			beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
 				    "BG_%d : DHCP Already Enabled\n");
 			return 0;
@@ -894,9 +980,9 @@
 				IP_V6_LEN : IP_V4_LEN;
 
 	} else {
-		if (if_info.dhcp_state) {
+		if (if_info->dhcp_state) {
 
-			memset(&if_info, 0, sizeof(if_info));
+			memset(if_info, 0, sizeof(*if_info));
 			rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
 				OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
 				sizeof(*reldhcp));
@@ -919,8 +1005,8 @@
 	}
 
 	/* Delete the Static IP Set */
-	if (if_info.ip_addr.addr[0]) {
-		rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL,
+	if (if_info->ip_addr.addr[0]) {
+		rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
 					   IP_ACTION_DEL);
 		if (rc)
 			return rc;
@@ -966,7 +1052,7 @@
 
 		return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
 	} else {
-		return mgmt_static_ip_modify(phba, &if_info, ip_param,
+		return mgmt_static_ip_modify(phba, if_info, ip_param,
 					     subnet_param, IP_ACTION_ADD);
 	}
 
@@ -1031,27 +1117,64 @@
 }
 
 int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
-		     struct be_cmd_get_if_info_resp *if_info)
+		     struct be_cmd_get_if_info_resp **if_info)
 {
 	struct be_cmd_get_if_info_req *req;
 	struct be_dma_mem nonemb_cmd;
+	uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
 	int rc;
 
 	if (mgmt_get_all_if_id(phba))
 		return -EIO;
 
-	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-				 OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
-				 sizeof(*if_info));
-	if (rc)
-		return rc;
+	do {
+		rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+					 OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+					 ioctl_size);
+		if (rc)
+			return rc;
 
-	req = nonemb_cmd.va;
-	req->interface_hndl = phba->interface_handle;
-	req->ip_type = ip_type;
+		req = nonemb_cmd.va;
+		req->interface_hndl = phba->interface_handle;
+		req->ip_type = ip_type;
 
-	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info,
-				    sizeof(*if_info));
+		/* Allocate memory for if_info */
+		*if_info = kzalloc(ioctl_size, GFP_KERNEL);
+		if (!*if_info) {
+			beiscsi_log(phba, KERN_ERR,
+				    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+				    "BG_%d : Memory Allocation Failure\n");
+
+				/* Free the DMA memory for the IOCTL issuing */
+				pci_free_consistent(phba->ctrl.pdev,
+						    nonemb_cmd.size,
+						    nonemb_cmd.va,
+						    nonemb_cmd.dma);
+				return -ENOMEM;
+		}
+
+		rc =  mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info,
+					   ioctl_size);
+
+		/* Check if the error is because of Insufficent_Buffer */
+		if (rc == -EAGAIN) {
+
+			/* Get the new memory size */
+			ioctl_size = ((struct be_cmd_resp_hdr *)
+				      nonemb_cmd.va)->actual_resp_len;
+			ioctl_size += sizeof(struct be_cmd_req_hdr);
+
+			/* Free the previous allocated DMA memory */
+			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+					    nonemb_cmd.va,
+					    nonemb_cmd.dma);
+
+			/* Free the virtual memory */
+			kfree(*if_info);
+		} else
+			break;
+	} while (true);
+	return rc;
 }
 
 int mgmt_get_nic_conf(struct beiscsi_hba *phba,
@@ -1281,7 +1404,7 @@
 }
 
 /**
- * beiscsi_active_cid_disp()- Display Sessions Active
+ * beiscsi_active_session_disp()- Display Sessions Active
  * @dev: ptr to device not used.
  * @attr: device attribute, not used.
  * @buf: contains formatted text Session Count
@@ -1290,14 +1413,56 @@
  * size of the formatted string
  **/
 ssize_t
-beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
 			 char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-		       (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+			avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
+			total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
+			len += snprintf(buf+len, PAGE_SIZE - len,
+					"ULP%d : %d\n", ulp_num,
+					(total_cids - avlbl_cids));
+		} else
+			len += snprintf(buf+len, PAGE_SIZE - len,
+					"ULP%d : %d\n", ulp_num, 0);
+	}
+
+	return len;
+}
+
+/**
+ * beiscsi_free_session_disp()- Display Avaliable Session
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	uint16_t ulp_num, len = 0;
+
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
+			len += snprintf(buf+len, PAGE_SIZE - len,
+					"ULP%d : %d\n", ulp_num,
+					BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
+		else
+			len += snprintf(buf+len, PAGE_SIZE - len,
+					"ULP%d : %d\n", ulp_num, 0);
+	}
+
+	return len;
 }
 
 /**
@@ -1338,6 +1503,25 @@
 	}
 }
 
+/**
+ * beiscsi_phys_port()- Display Physical Port Identifier
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text port identifier
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n",
+			phba->fw_config.phys_port);
+}
 
 void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
 			     struct wrb_handle *pwrb_handle,
@@ -1411,10 +1595,6 @@
 
 	memset(pwrb, 0, sizeof(*pwrb));
 
-	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
-		      max_burst_length, pwrb, params->dw[offsetof
-		      (struct amap_beiscsi_offload_params,
-		      max_burst_length) / 32]);
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
 		      max_burst_length, pwrb, params->dw[offsetof
 		      (struct amap_beiscsi_offload_params,
@@ -1436,7 +1616,9 @@
 		      params->dw[offsetof(struct amap_beiscsi_offload_params,
 		      first_burst_length) / 32]);
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
-		      max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN);
+		      max_recv_dataseg_len, pwrb,
+		      params->dw[offsetof(struct amap_beiscsi_offload_params,
+		      max_recv_data_segment_length) / 32]);
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
 		      max_cxns, pwrb, BEISCSI_MAX_CXNS);
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 04af7e7..01b8c97 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -294,7 +294,7 @@
 		      struct be_cmd_get_nic_conf_resp *mac);
 
 int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
-		     struct be_cmd_get_if_info_resp *if_info);
+		     struct be_cmd_get_if_info_resp **if_info);
 
 int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
 		     struct be_cmd_get_def_gateway_resp *gateway);
@@ -315,12 +315,19 @@
 ssize_t beiscsi_fw_ver_disp(struct device *dev,
 			     struct device_attribute *attr, char *buf);
 
-ssize_t beiscsi_active_cid_disp(struct device *dev,
-				 struct device_attribute *attr, char *buf);
+ssize_t beiscsi_active_session_disp(struct device *dev,
+				     struct device_attribute *attr, char *buf);
 
 ssize_t beiscsi_adap_family_disp(struct device *dev,
 				  struct device_attribute *attr, char *buf);
 
+
+ssize_t beiscsi_free_session_disp(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_phys_port_disp(struct device *dev,
+				struct device_attribute *attr, char *buf);
+
 void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
 			     struct wrb_handle *pwrb_handle,
 			     struct be_mem_descriptor *mem_descr);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index f8ca7be..fc80a32 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -766,49 +766,20 @@
 	bfad->pcidev = pdev;
 
 	/* Adjust PCIe Maximum Read Request Size */
-	if (pcie_max_read_reqsz > 0) {
-		int pcie_cap_reg;
-		u16 pcie_dev_ctl;
-		u16 mask = 0xffff;
-
-		switch (pcie_max_read_reqsz) {
-		case 128:
-			mask = 0x0;
-			break;
-		case 256:
-			mask = 0x1000;
-			break;
-		case 512:
-			mask = 0x2000;
-			break;
-		case 1024:
-			mask = 0x3000;
-			break;
-		case 2048:
-			mask = 0x4000;
-			break;
-		case 4096:
-			mask = 0x5000;
-			break;
-		default:
-			break;
-		}
-
-		pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-		if (mask != 0xffff && pcie_cap_reg) {
-			pcie_cap_reg += 0x08;
-			pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
-			if ((pcie_dev_ctl & 0x7000) != mask) {
-				printk(KERN_WARNING "BFA[%s]: "
+	if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
+		if (pcie_max_read_reqsz >= 128 &&
+		    pcie_max_read_reqsz <= 4096 &&
+		    is_power_of_2(pcie_max_read_reqsz)) {
+			int max_rq = pcie_get_readrq(pdev);
+			printk(KERN_WARNING "BFA[%s]: "
 				"pcie_max_read_request_size is %d, "
-				"reset to %d\n", bfad->pci_name,
-				(1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
+				"reset to %d\n", bfad->pci_name, max_rq,
 				pcie_max_read_reqsz);
-
-				pcie_dev_ctl &= ~0x7000;
-				pci_write_config_word(pdev, pcie_cap_reg,
-						pcie_dev_ctl | mask);
-			}
+			pcie_set_readrq(pdev, pcie_max_read_reqsz);
+		} else {
+			printk(KERN_WARNING "BFA[%s]: invalid "
+			       "pcie_max_read_request_size %d ignored\n",
+			       bfad->pci_name, pcie_max_read_reqsz);
 		}
 	}
 
@@ -833,7 +804,6 @@
 	/* Disable PCIE Advanced Error Recovery (AER) */
 	pci_disable_pcie_error_reporting(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 bfa_status_t
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index d7ca930..1ebf3fb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"1.0.14"
+#define BNX2FC_VERSION		"2.4.1"
 
 #define PFX			"bnx2fc: "
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 69ac554..9b94850 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Mar 08, 2013"
+#define DRV_MODULE_RELDATE	"Sep 17, 2013"
 
 
 static char version[] =
@@ -542,8 +542,7 @@
 	vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
 	if (vn_port) {
 		port = lport_priv(vn_port);
-		if (compare_ether_addr(port->data_src_addr, dest_mac)
-		    != 0) {
+		if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
 			BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
 			put_cpu();
 			kfree_skb(skb);
@@ -1381,6 +1380,7 @@
 		return NULL;
 	}
 	ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+	ctlr->cdev = ctlr_dev;
 	interface = fcoe_ctlr_priv(ctlr);
 	dev_hold(netdev);
 	kref_init(&interface->kref);
@@ -2004,6 +2004,24 @@
 		set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
 }
 
+/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */
+static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
+{
+	struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+	if (interface->enabled == true) {
+		if (!ctlr->lp) {
+			pr_err(PFX "__bnx2fc_disable: lport not found\n");
+			return -ENODEV;
+		} else {
+			interface->enabled = false;
+			fcoe_ctlr_link_down(ctlr);
+			fcoe_clean_pending_queue(ctlr->lp);
+		}
+	}
+	return 0;
+}
+
 /**
  * Deperecated: Use bnx2fc_enabled()
  */
@@ -2018,20 +2036,34 @@
 
 	interface = bnx2fc_interface_lookup(netdev);
 	ctlr = bnx2fc_to_ctlr(interface);
-	if (!interface || !ctlr->lp) {
-		rc = -ENODEV;
-		printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
-	} else {
-		interface->enabled = false;
-		fcoe_ctlr_link_down(ctlr);
-		fcoe_clean_pending_queue(ctlr->lp);
-	}
 
+	if (!interface) {
+		rc = -ENODEV;
+		pr_err(PFX "bnx2fc_disable: interface not found\n");
+	} else {
+		rc = __bnx2fc_disable(ctlr);
+	}
 	mutex_unlock(&bnx2fc_dev_lock);
 	rtnl_unlock();
 	return rc;
 }
 
+static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
+{
+	struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+	if (interface->enabled == false) {
+		if (!ctlr->lp) {
+			pr_err(PFX "__bnx2fc_enable: lport not found\n");
+			return -ENODEV;
+		} else if (!bnx2fc_link_ok(ctlr->lp)) {
+			fcoe_ctlr_link_up(ctlr);
+			interface->enabled = true;
+		}
+	}
+	return 0;
+}
+
 /**
  * Deprecated: Use bnx2fc_enabled()
  */
@@ -2046,12 +2078,11 @@
 
 	interface = bnx2fc_interface_lookup(netdev);
 	ctlr = bnx2fc_to_ctlr(interface);
-	if (!interface || !ctlr->lp) {
+	if (!interface) {
 		rc = -ENODEV;
-		printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
-	} else if (!bnx2fc_link_ok(ctlr->lp)) {
-		fcoe_ctlr_link_up(ctlr);
-		interface->enabled = true;
+		pr_err(PFX "bnx2fc_enable: interface not found\n");
+	} else {
+		rc = __bnx2fc_enable(ctlr);
 	}
 
 	mutex_unlock(&bnx2fc_dev_lock);
@@ -2072,14 +2103,12 @@
 static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
 {
 	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
-	struct fc_lport *lport = ctlr->lp;
-	struct net_device *netdev = bnx2fc_netdev(lport);
 
 	switch (cdev->enabled) {
 	case FCOE_CTLR_ENABLED:
-		return bnx2fc_enable(netdev);
+		return __bnx2fc_enable(ctlr);
 	case FCOE_CTLR_DISABLED:
-		return bnx2fc_disable(netdev);
+		return __bnx2fc_disable(ctlr);
 	case FCOE_CTLR_UNUSED:
 	default:
 		return -ENOTSUPP;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 575142e..ed88089 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1246,6 +1246,12 @@
 			kref_put(&io_req->refcount,
 				 bnx2fc_cmd_release); /* drop timer hold */
 		rc = bnx2fc_expl_logo(lport, io_req);
+		/* This only occurs when an task abort was requested while ABTS
+		   is in progress.  Setting the IO_CLEANUP flag will skip the
+		   RRQ process in the case when the fw generated SCSI_CMD cmpl
+		   was a result from the ABTS request rather than the CLEANUP
+		   request */
+		set_bit(BNX2FC_FLAG_IO_CLEANUP,	&io_req->req_flags);
 		goto out;
 	}
 
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5be718c..e4cf23d 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -126,7 +126,7 @@
 
 /**
  * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
- * @ep:		endpoint (transport indentifier) structure
+ * @ep:		endpoint (transport identifier) structure
  * @action:	action, ARM or DISARM. For now only ARM_CQE is used
  *
  * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
@@ -756,7 +756,7 @@
 /**
  * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
  * @hba:	adapter structure pointer
- * @ep:		endpoint (transport indentifier) structure
+ * @ep:		endpoint (transport identifier) structure
  *
  * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
  * 	iscsi connection context clean-up process
@@ -791,7 +791,7 @@
 /**
  * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
  * @hba: 		adapter structure pointer
- * @ep: 		endpoint (transport indentifier) structure
+ * @ep: 		endpoint (transport identifier) structure
  *
  * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
@@ -851,7 +851,7 @@
 /**
  * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
  * @hba: 		adapter structure pointer
- * @ep: 		endpoint (transport indentifier) structure
+ * @ep: 		endpoint (transport identifier) structure
  *
  * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
@@ -920,7 +920,7 @@
  * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
  *
  * @hba: 		adapter structure pointer
- * @ep: 		endpoint (transport indentifier) structure
+ * @ep: 		endpoint (transport identifier) structure
  *
  * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
@@ -939,7 +939,7 @@
 
 /**
  * setup_qp_page_tables - iscsi QP page table setup function
- * @ep:		endpoint (transport indentifier) structure
+ * @ep:		endpoint (transport identifier) structure
  *
  * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
  * 	64-bit address in big endian format. Whereas 10G/sec (57710) requires
@@ -1046,7 +1046,7 @@
 /**
  * bnx2i_alloc_qp_resc - allocates required resources for QP.
  * @hba:	adapter structure pointer
- * @ep:		endpoint (transport indentifier) structure
+ * @ep:		endpoint (transport identifier) structure
  *
  * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
  *	memory for SQ/RQ/CQ and page tables. EP structure elements such
@@ -1191,7 +1191,7 @@
 /**
  * bnx2i_free_qp_resc - free memory resources held by QP
  * @hba:	adapter structure pointer
- * @ep:	endpoint (transport indentifier) structure
+ * @ep:	endpoint (transport identifier) structure
  *
  * Free QP resources - SQ/RQ/CQ memory and page tables.
  */
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fabeb88..854dad7 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -596,7 +596,7 @@
 /**
  * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
  * @hba:	pointer to adapter instance
- * @ep:		pointer to endpoint (transport indentifier) structure
+ * @ep:		pointer to endpoint (transport identifier) structure
  *
  * EP destroy queue manager
  */
@@ -613,7 +613,7 @@
  * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
  *
  * @hba: 		pointer to adapter instance
- * @ep: 		pointer to endpoint (transport indentifier) structure
+ * @ep: 		pointer to endpoint (transport identifier) structure
  *
  * EP destroy queue manager
  */
@@ -630,7 +630,7 @@
 /**
  * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
  * @hba:	pointer to adapter instance
- * @ep:		pointer to endpoint (transport indentifier) structure
+ * @ep:		pointer to endpoint (transport identifier) structure
  *
  * pending conn offload completion queue manager
  */
@@ -646,7 +646,7 @@
 /**
  * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
  * @hba: 		pointer to adapter instance
- * @ep: 		pointer to endpoint (transport indentifier) structure
+ * @ep: 		pointer to endpoint (transport identifier) structure
  *
  * pending conn offload completion queue manager
  */
@@ -721,7 +721,7 @@
 /**
  * bnx2i_ep_active_list_add - add an entry to ep active list
  * @hba:	pointer to adapter instance
- * @ep:		pointer to endpoint (transport indentifier) structure
+ * @ep:		pointer to endpoint (transport identifier) structure
  *
  * current active conn queue manager
  */
@@ -737,7 +737,7 @@
 /**
  * bnx2i_ep_active_list_del - deletes an entry to ep active list
  * @hba:	pointer to adapter instance
- * @ep:		pointer to endpoint (transport indentifier) structure
+ * @ep:		pointer to endpoint (transport identifier) structure
  *
  * current active conn queue manager
  */
@@ -1695,7 +1695,7 @@
 /**
  * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
  * @hba:	pointer to adapter instance
- * @ep:		endpoint (transport indentifier) structure
+ * @ep:		endpoint (transport identifier) structure
  *
  * destroys cm_sock structure and on chip iscsi context
  */
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 0eb35b9..0eaec47 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -852,22 +852,6 @@
 	return 0;
 }
 
-static void
-csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
-{
-	uint16_t val;
-	int pcie_cap;
-
-	if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
-		pci_read_config_word(hw->pdev,
-				     pcie_cap + PCI_EXP_DEVCTL2, &val);
-		val &= 0xfff0;
-		val |= range ;
-		pci_write_config_word(hw->pdev,
-				      pcie_cap + PCI_EXP_DEVCTL2, val);
-	}
-}
-
 /*****************************************************************************/
 /* HW State machine assists                                                  */
 /*****************************************************************************/
@@ -2069,8 +2053,10 @@
 		goto out;
 	}
 
-	/* Set pci completion timeout value to 4 seconds. */
-	csio_set_pcie_completion_timeout(hw, 0xd);
+	/* Set PCIe completion timeout to 4 seconds */
+	if (pci_is_pcie(hw->pdev))
+		pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
+				PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
 
 	hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
 
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 00346fe..1aafc33 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1010,7 +1010,6 @@
 	csio_hw_stop(hw);
 	spin_unlock_irq(&hw->lock);
 	csio_lnodes_unblock_request(hw);
-	pci_set_drvdata(hw->pdev, NULL);
 	csio_lnodes_exit(hw, 0);
 	csio_hw_free(hw);
 err_pci_exit:
@@ -1044,7 +1043,6 @@
 
 	csio_lnodes_exit(hw, 0);
 	csio_hw_free(hw);
-	pci_set_drvdata(pdev, NULL);
 	csio_pci_exit(pdev, &bars);
 }
 
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 694e13c..83d9bf6 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -308,6 +308,8 @@
 	struct timer_list waiting_timer;
 	struct timer_list selto_timer;
 
+	unsigned long last_reset;
+
 	u16 srb_count;
 
 	u8 sel_timeout;
@@ -860,9 +862,9 @@
 	init_timer(&acb->waiting_timer);
 	acb->waiting_timer.function = waiting_timeout;
 	acb->waiting_timer.data = (unsigned long) acb;
-	if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2))
+	if (time_before(jiffies + to, acb->last_reset - HZ / 2))
 		acb->waiting_timer.expires =
-		    acb->scsi_host->last_reset - HZ / 2 + 1;
+		    acb->last_reset - HZ / 2 + 1;
 	else
 		acb->waiting_timer.expires = jiffies + to + 1;
 	add_timer(&acb->waiting_timer);
@@ -1319,7 +1321,7 @@
 	udelay(500);
 
 	/* We may be in serious trouble. Wait some seconds */
-	acb->scsi_host->last_reset =
+	acb->last_reset =
 	    jiffies + 3 * HZ / 2 +
 	    HZ * acb->eeprom.delay_time;
 
@@ -1462,9 +1464,9 @@
 	acb->selto_timer.function = selection_timeout_missed;
 	acb->selto_timer.data = (unsigned long) acb;
 	if (time_before
-	    (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2))
+	    (jiffies + HZ, acb->last_reset + HZ / 2))
 		acb->selto_timer.expires =
-		    acb->scsi_host->last_reset + HZ / 2 + 1;
+		    acb->last_reset + HZ / 2 + 1;
 	else
 		acb->selto_timer.expires = jiffies + HZ + 1;
 	add_timer(&acb->selto_timer);
@@ -1535,7 +1537,7 @@
 	}
 	/* Allow starting of SCSI commands half a second before we allow the mid-level
 	 * to queue them again after a reset */
-	if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) {
+	if (time_before(jiffies, acb->last_reset - HZ / 2)) {
 		dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
 		return 1;
 	}
@@ -3031,7 +3033,7 @@
 		dprintkl(KERN_ERR, "disconnect: No such device\n");
 		udelay(500);
 		/* Suspend queue for a while */
-		acb->scsi_host->last_reset =
+		acb->last_reset =
 		    jiffies + HZ / 2 +
 		    HZ * acb->eeprom.delay_time;
 		clear_fifo(acb, "disconnectEx");
@@ -3053,7 +3055,7 @@
 		waiting_process_next(acb);
 	} else if (srb->state & SRB_ABORT_SENT) {
 		dcb->flag &= ~ABORT_DEV_;
-		acb->scsi_host->last_reset = jiffies + HZ / 2 + 1;
+		acb->last_reset = jiffies + HZ / 2 + 1;
 		dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
 		doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
 		waiting_process_next(acb);
@@ -3649,7 +3651,7 @@
 	/*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
 	udelay(500);
 	/* Maybe we locked up the bus? Then lets wait even longer ... */
-	acb->scsi_host->last_reset =
+	acb->last_reset =
 	    jiffies + 5 * HZ / 2 +
 	    HZ * acb->eeprom.delay_time;
 
@@ -4426,7 +4428,7 @@
 	host->dma_channel = -1;
 	host->unique_id = acb->io_port_base;
 	host->irq = acb->irq_level;
-	host->last_reset = jiffies;
+	acb->last_reset = jiffies;
 
 	host->max_id = 16;
 	if (host->max_id - 1 == eeprom->scsi_id)
@@ -4484,7 +4486,7 @@
 		/*spin_unlock_irq (&io_request_lock); */
 		udelay(500);
 
-		acb->scsi_host->last_reset =
+		acb->last_reset =
 		    jiffies + HZ / 2 +
 		    HZ * acb->eeprom.delay_time;
 
@@ -4859,7 +4861,6 @@
 	adapter_uninit(acb);
 	pci_disable_device(dev);
 	scsi_host_put(scsi_host);
-	pci_set_drvdata(dev, NULL);
 }
 
 
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 68adb89..5248c88 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -481,6 +481,11 @@
 			 * Power On, Reset, or Bus Device Reset, just retry.
 			 */
 			return ADD_TO_MLQUEUE;
+		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
+			/*
+			 * Device internal reset
+			 */
+			return ADD_TO_MLQUEUE;
 		if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
 			/*
 			 * Mode Parameters Changed
@@ -517,12 +522,13 @@
 /*
  * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
  * @sdev: the device to be evaluated.
+ * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state
  *
  * Evaluate the Target Port Group State.
  * Returns SCSI_DH_DEV_OFFLINED if the path is
  * found to be unusable.
  */
-static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)
 {
 	struct scsi_sense_hdr sense_hdr;
 	int len, k, off, valid_states = 0;
@@ -594,7 +600,7 @@
 	else
 		h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
 
-	if (orig_transition_tmo != h->transition_tmo) {
+	if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {
 		sdev_printk(KERN_INFO, sdev,
 			    "%s: transition timeout set to %d seconds\n",
 			    ALUA_DH_NAME, h->transition_tmo);
@@ -632,14 +638,19 @@
 
 	switch (h->state) {
 	case TPGS_STATE_TRANSITIONING:
-		if (time_before(jiffies, expiry)) {
-			/* State transition, retry */
-			interval += 2000;
-			msleep(interval);
-			goto retry;
+		if (wait_for_transition) {
+			if (time_before(jiffies, expiry)) {
+				/* State transition, retry */
+				interval += 2000;
+				msleep(interval);
+				goto retry;
+			}
+			err = SCSI_DH_RETRY;
+		} else {
+			err = SCSI_DH_OK;
 		}
+
 		/* Transitioning time exceeded, set port to standby */
-		err = SCSI_DH_RETRY;
 		h->state = TPGS_STATE_STANDBY;
 		break;
 	case TPGS_STATE_OFFLINE:
@@ -673,7 +684,7 @@
 	if (err != SCSI_DH_OK)
 		goto out;
 
-	err = alua_rtpg(sdev, h);
+	err = alua_rtpg(sdev, h, 0);
 	if (err != SCSI_DH_OK)
 		goto out;
 
@@ -733,7 +744,7 @@
 	int err = SCSI_DH_OK;
 	int stpg = 0;
 
-	err = alua_rtpg(sdev, h);
+	err = alua_rtpg(sdev, h, 1);
 	if (err != SCSI_DH_OK)
 		goto out;
 
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 69c915a..4b9cf93 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -786,6 +786,7 @@
 	{"IBM", "1742"},
 	{"IBM", "1745"},
 	{"IBM", "1746"},
+	{"IBM", "1813"},
 	{"IBM", "1814"},
 	{"IBM", "1815"},
 	{"IBM", "1818"},
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 19e1b42..c0ae8fa 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -448,19 +448,8 @@
 	}
 
 	rmb();
-	/*
-	 * TODO: I need to block here if I am processing ioctl cmds
-	 * but if the outstanding cmds all finish before the ioctl,
-	 * the scsi-core will not know to start sending cmds to me again.
-	 * I need to a way to restart the scsi-cores queues or should I block
-	 * calling scsi_done on the outstanding cmds instead
-	 * for now we don't set the IOCTL state
-	 */
-	if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
-		pHba->host->last_reset = jiffies;
-		pHba->host->resetting = 1;
-		return 1;
-	}
+	if ((pHba->state) & DPTI_STATE_RESET)
+		return SCSI_MLQUEUE_HOST_BUSY;
 
 	// TODO if the cmd->device if offline then I may need to issue a bus rescan
 	// followed by a get_lct to see if the device is there anymore
@@ -1811,21 +1800,23 @@
 	}
 
 	do {
-		if(pHba->host)
+		/*
+		 * Stop any new commands from enterring the
+		 * controller while processing the ioctl
+		 */
+		if (pHba->host) {
+			scsi_block_requests(pHba->host);
 			spin_lock_irqsave(pHba->host->host_lock, flags);
-		// This state stops any new commands from enterring the
-		// controller while processing the ioctl
-//		pHba->state |= DPTI_STATE_IOCTL;
-//		We can't set this now - The scsi subsystem sets host_blocked and
-//		the queue empties and stops.  We need a way to restart the queue
+		}
 		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
 		if (rcode != 0)
 			printk("adpt_i2o_passthru: post wait failed %d %p\n",
 					rcode, reply);
-//		pHba->state &= ~DPTI_STATE_IOCTL;
-		if(pHba->host)
+		if (pHba->host) {
 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
-	} while(rcode == -ETIMEDOUT);  
+			scsi_unblock_requests(pHba->host);
+		}
+	} while (rcode == -ETIMEDOUT);
 
 	if(rcode){
 		goto cleanup;
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index beded71..aeb0461 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -202,7 +202,6 @@
 
 // HBA state flags
 #define DPTI_STATE_RESET	(0x01)
-#define DPTI_STATE_IOCTL	(0x02)
 
 typedef struct _adpt_hba {
 	struct _adpt_hba *next;
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 0838e26..3fd305d 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -799,47 +799,47 @@
 	struct esas2r_target *targetdb_end;
 	unsigned char *regs;
 	unsigned char *data_window;
-	u32 volatile flags;
-	#define AF_PORT_CHANGE      (u32)(0x00000001)
-	#define AF_CHPRST_NEEDED    (u32)(0x00000004)
-	#define AF_CHPRST_PENDING   (u32)(0x00000008)
-	#define AF_CHPRST_DETECTED  (u32)(0x00000010)
-	#define AF_BUSRST_NEEDED    (u32)(0x00000020)
-	#define AF_BUSRST_PENDING   (u32)(0x00000040)
-	#define AF_BUSRST_DETECTED  (u32)(0x00000080)
-	#define AF_DISABLED         (u32)(0x00000100)
-	#define AF_FLASH_LOCK       (u32)(0x00000200)
-	#define AF_OS_RESET         (u32)(0x00002000)
-	#define AF_FLASHING         (u32)(0x00004000)
-	#define AF_POWER_MGT        (u32)(0x00008000)
-	#define AF_NVR_VALID        (u32)(0x00010000)
-	#define AF_DEGRADED_MODE    (u32)(0x00020000)
-	#define AF_DISC_PENDING     (u32)(0x00040000)
-	#define AF_TASKLET_SCHEDULED    (u32)(0x00080000)
-	#define AF_HEARTBEAT        (u32)(0x00200000)
-	#define AF_HEARTBEAT_ENB    (u32)(0x00400000)
-	#define AF_NOT_PRESENT      (u32)(0x00800000)
-	#define AF_CHPRST_STARTED   (u32)(0x01000000)
-	#define AF_FIRST_INIT       (u32)(0x02000000)
-	#define AF_POWER_DOWN       (u32)(0x04000000)
-	#define AF_DISC_IN_PROG     (u32)(0x08000000)
-	#define AF_COMM_LIST_TOGGLE (u32)(0x10000000)
-	#define AF_LEGACY_SGE_MODE  (u32)(0x20000000)
-	#define AF_DISC_POLLED      (u32)(0x40000000)
-	u32 volatile flags2;
-	#define AF2_SERIAL_FLASH    (u32)(0x00000001)
-	#define AF2_DEV_SCAN        (u32)(0x00000002)
-	#define AF2_DEV_CNT_OK      (u32)(0x00000004)
-	#define AF2_COREDUMP_AVAIL  (u32)(0x00000008)
-	#define AF2_COREDUMP_SAVED  (u32)(0x00000010)
-	#define AF2_VDA_POWER_DOWN  (u32)(0x00000100)
-	#define AF2_THUNDERLINK     (u32)(0x00000200)
-	#define AF2_THUNDERBOLT     (u32)(0x00000400)
-	#define AF2_INIT_DONE       (u32)(0x00000800)
-	#define AF2_INT_PENDING     (u32)(0x00001000)
-	#define AF2_TIMER_TICK      (u32)(0x00002000)
-	#define AF2_IRQ_CLAIMED     (u32)(0x00004000)
-	#define AF2_MSI_ENABLED     (u32)(0x00008000)
+	long flags;
+	#define AF_PORT_CHANGE      0
+	#define AF_CHPRST_NEEDED    1
+	#define AF_CHPRST_PENDING   2
+	#define AF_CHPRST_DETECTED  3
+	#define AF_BUSRST_NEEDED    4
+	#define AF_BUSRST_PENDING   5
+	#define AF_BUSRST_DETECTED  6
+	#define AF_DISABLED         7
+	#define AF_FLASH_LOCK       8
+	#define AF_OS_RESET         9
+	#define AF_FLASHING         10
+	#define AF_POWER_MGT        11
+	#define AF_NVR_VALID        12
+	#define AF_DEGRADED_MODE    13
+	#define AF_DISC_PENDING     14
+	#define AF_TASKLET_SCHEDULED    15
+	#define AF_HEARTBEAT        16
+	#define AF_HEARTBEAT_ENB    17
+	#define AF_NOT_PRESENT      18
+	#define AF_CHPRST_STARTED   19
+	#define AF_FIRST_INIT       20
+	#define AF_POWER_DOWN       21
+	#define AF_DISC_IN_PROG     22
+	#define AF_COMM_LIST_TOGGLE 23
+	#define AF_LEGACY_SGE_MODE  24
+	#define AF_DISC_POLLED      25
+	long flags2;
+	#define AF2_SERIAL_FLASH    0
+	#define AF2_DEV_SCAN        1
+	#define AF2_DEV_CNT_OK      2
+	#define AF2_COREDUMP_AVAIL  3
+	#define AF2_COREDUMP_SAVED  4
+	#define AF2_VDA_POWER_DOWN  5
+	#define AF2_THUNDERLINK     6
+	#define AF2_THUNDERBOLT     7
+	#define AF2_INIT_DONE       8
+	#define AF2_INT_PENDING     9
+	#define AF2_TIMER_TICK      10
+	#define AF2_IRQ_CLAIMED     11
+	#define AF2_MSI_ENABLED     12
 	atomic_t disable_cnt;
 	atomic_t dis_ints_cnt;
 	u32 int_stat;
@@ -1150,16 +1150,6 @@
 			   int data_sz);
 
 /* Inline functions */
-static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
-{
-	return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
-}
-
-static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
-{
-	return test_and_clear_bit(ilog2(bits),
-				  (volatile unsigned long *)flags);
-}
 
 /* Allocate a chip scatter/gather list entry */
 static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
@@ -1217,7 +1207,6 @@
 					  struct esas2r_adapter *a)
 {
 	union atto_vda_req *vrq = rq->vrq;
-	u32 handle;
 
 	INIT_LIST_HEAD(&rq->sg_table_head);
 	rq->data_buf = (void *)(vrq + 1);
@@ -1253,11 +1242,9 @@
 
 	/*
 	 * add a reference number to the handle to make it unique (until it
-	 * wraps of course) while preserving the upper word
+	 * wraps of course) while preserving the least significant word
 	 */
-
-	handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
-	vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
+	vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle;
 
 	/*
 	 * the following formats a SCSI request.  the caller can override as
@@ -1303,10 +1290,13 @@
 
 static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
 {
-	return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
-			    | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED
-			    | AF_PORT_CHANGE))
-	       ? true : false;
+
+	return test_bit(AF_BUSRST_NEEDED, &a->flags) ||
+	       test_bit(AF_BUSRST_DETECTED, &a->flags) ||
+	       test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+	       test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+	       test_bit(AF_PORT_CHANGE, &a->flags);
+
 }
 
 /*
@@ -1345,24 +1335,24 @@
 static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
 {
 	/* make sure we don't schedule twice */
-	if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) &
-	      ilog2(AF_TASKLET_SCHEDULED)))
+	if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags))
 		tasklet_hi_schedule(&a->tasklet);
 }
 
 static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
 {
-	if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING))
-	    && (a->nvram->options2 & SASNVR2_HEARTBEAT))
-		esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB);
+	if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+	    !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+	    (a->nvram->options2 & SASNVR2_HEARTBEAT))
+		set_bit(AF_HEARTBEAT_ENB, &a->flags);
 	else
-		esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
+		clear_bit(AF_HEARTBEAT_ENB, &a->flags);
 }
 
 static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
 {
-	esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
-	esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+	clear_bit(AF_HEARTBEAT_ENB, &a->flags);
+	clear_bit(AF_HEARTBEAT, &a->flags);
 }
 
 /* Set the initial state for resetting the adapter on the next pass through
@@ -1372,9 +1362,9 @@
 {
 	esas2r_disable_heartbeat(a);
 
-	esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED);
-	esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
-	esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
+	set_bit(AF_CHPRST_NEEDED, &a->flags);
+	set_bit(AF_CHPRST_PENDING, &a->flags);
+	set_bit(AF_DISC_PENDING, &a->flags);
 }
 
 /* See if an interrupt is pending on the adapter. */
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
index dec6c33..1c079f4 100644
--- a/drivers/scsi/esas2r/esas2r_disc.c
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -86,9 +86,9 @@
 
 	esas2r_trace_enter();
 
-	esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
-	esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
-	esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
+	clear_bit(AF_DISC_IN_PROG, &a->flags);
+	clear_bit(AF2_DEV_SCAN, &a->flags2);
+	clear_bit(AF2_DEV_CNT_OK, &a->flags2);
 
 	a->disc_start_time = jiffies_to_msecs(jiffies);
 	a->disc_wait_time = nvr->dev_wait_time * 1000;
@@ -107,7 +107,8 @@
 
 	a->general_req.interrupt_cx = NULL;
 
-	if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
+	if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+	    test_bit(AF_POWER_MGT, &a->flags)) {
 		if (a->prev_dev_cnt == 0) {
 			/* Don't bother waiting if there is nothing to wait
 			 * for.
@@ -212,9 +213,7 @@
 			|| a->disc_wait_cnt == 0)) {
 			/* After three seconds of waiting, schedule a scan. */
 			if (time >= 3000
-			    && !(esas2r_lock_set_flags(&a->flags2,
-						       AF2_DEV_SCAN) &
-				 ilog2(AF2_DEV_SCAN))) {
+			    && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 				spin_lock_irqsave(&a->mem_lock, flags);
 				esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 				spin_unlock_irqrestore(&a->mem_lock, flags);
@@ -228,18 +227,14 @@
 		 * We are done waiting...we think.  Adjust the wait time to
 		 * consume events after the count is met.
 		 */
-		if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
-		      & ilog2(AF2_DEV_CNT_OK)))
+		if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
 			a->disc_wait_time = time + 3000;
 
 		/* If we haven't done a full scan yet, do it now. */
-		if (!(esas2r_lock_set_flags(&a->flags2,
-					    AF2_DEV_SCAN) &
-		      ilog2(AF2_DEV_SCAN))) {
+		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 			spin_lock_irqsave(&a->mem_lock, flags);
 			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 			spin_unlock_irqrestore(&a->mem_lock, flags);
-
 			esas2r_trace_exit();
 			return;
 		}
@@ -253,9 +248,7 @@
 			return;
 		}
 	} else {
-		if (!(esas2r_lock_set_flags(&a->flags2,
-					    AF2_DEV_SCAN) &
-		      ilog2(AF2_DEV_SCAN))) {
+		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 			spin_lock_irqsave(&a->mem_lock, flags);
 			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 			spin_unlock_irqrestore(&a->mem_lock, flags);
@@ -265,8 +258,8 @@
 	/* We want to stop waiting for devices. */
 	a->disc_wait_time = 0;
 
-	if ((a->flags & AF_DISC_POLLED)
-	    && (a->flags & AF_DISC_IN_PROG)) {
+	if (test_bit(AF_DISC_POLLED, &a->flags) &&
+	    test_bit(AF_DISC_IN_PROG, &a->flags)) {
 		/*
 		 * Polled discovery is still pending so continue the active
 		 * discovery until it is done.  At that point, we will stop
@@ -280,14 +273,14 @@
 		 * driven; i.e. There is no transition.
 		 */
 		esas2r_disc_fix_curr_requests(a);
-		esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+		clear_bit(AF_DISC_PENDING, &a->flags);
 
 		/*
 		 * We have deferred target state changes until now because we
 		 * don't want to report any removals (due to the first arrival)
 		 * until the device wait time expires.
 		 */
-		esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+		set_bit(AF_PORT_CHANGE, &a->flags);
 	}
 
 	esas2r_trace_exit();
@@ -308,7 +301,8 @@
 	 * Don't start discovery before or during polled discovery.  if we did,
 	 * we would have a deadlock if we are in the ISR already.
 	 */
-	if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
+	if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+	    !test_bit(AF_DISC_POLLED, &a->flags))
 		esas2r_disc_start_port(a);
 
 	esas2r_trace_exit();
@@ -322,7 +316,7 @@
 
 	esas2r_trace_enter();
 
-	if (a->flags & AF_DISC_IN_PROG) {
+	if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
 		esas2r_trace_exit();
 
 		return false;
@@ -330,7 +324,7 @@
 
 	/* If there is a discovery waiting, process it. */
 	if (dc->disc_evt) {
-		if ((a->flags & AF_DISC_POLLED)
+		if (test_bit(AF_DISC_POLLED, &a->flags)
 		    && a->disc_wait_time == 0) {
 			/*
 			 * We are doing polled discovery, but we no longer want
@@ -347,7 +341,7 @@
 
 		esas2r_hdebug("disc done");
 
-		esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+		set_bit(AF_PORT_CHANGE, &a->flags);
 
 		esas2r_trace_exit();
 
@@ -356,10 +350,10 @@
 
 	/* Handle the discovery context */
 	esas2r_trace("disc_evt: %d", dc->disc_evt);
-	esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
+	set_bit(AF_DISC_IN_PROG, &a->flags);
 	dc->flags = 0;
 
-	if (a->flags & AF_DISC_POLLED)
+	if (test_bit(AF_DISC_POLLED, &a->flags))
 		dc->flags |= DCF_POLLED;
 
 	rq->interrupt_cx = dc;
@@ -379,7 +373,7 @@
 	}
 
 	/* Continue interrupt driven discovery */
-	if (!(a->flags & AF_DISC_POLLED))
+	if (!test_bit(AF_DISC_POLLED, &a->flags))
 		ret = esas2r_disc_continue(a, rq);
 	else
 		ret = true;
@@ -453,10 +447,10 @@
 	/* Discovery is done...for now. */
 	rq->interrupt_cx = NULL;
 
-	if (!(a->flags & AF_DISC_PENDING))
+	if (!test_bit(AF_DISC_PENDING, &a->flags))
 		esas2r_disc_fix_curr_requests(a);
 
-	esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+	clear_bit(AF_DISC_IN_PROG, &a->flags);
 
 	/* Start the next discovery. */
 	return esas2r_disc_start_port(a);
@@ -480,7 +474,8 @@
 
 	spin_lock_irqsave(&a->queue_lock, flags);
 
-	if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
+	if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+	    !test_bit(AF_FLASHING, &a->flags))
 		esas2r_disc_local_start_request(a, rq);
 	else
 		list_add_tail(&rq->req_list, &a->defer_list);
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 2ec3c23..b7dc59f 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -231,7 +231,7 @@
 	 * RS_PENDING, FM API tasks will continue.
 	 */
 	rq->req_stat = RS_PENDING;
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		/* not suppported for now */;
 	else
 		build_flash_msg(a, rq);
@@ -315,7 +315,7 @@
 		memset(fc->scratch, 0, FM_BUF_SZ);
 
 	esas2r_enable_heartbeat(a);
-	esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK);
+	clear_bit(AF_FLASH_LOCK, &a->flags);
 	return false;
 }
 
@@ -526,7 +526,7 @@
 			 * The download is complete.  If in degraded mode,
 			 * attempt a chip reset.
 			 */
-			if (a->flags & AF_DEGRADED_MODE)
+			if (test_bit(AF_DEGRADED_MODE, &a->flags))
 				esas2r_local_reset_adapter(a);
 
 			a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
@@ -890,7 +890,7 @@
 		}
 	}
 
-	if (a->flags & AF_DEGRADED_MODE) {
+	if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
 		fs->status = ATTO_STS_DEGRADED;
 		return false;
 	}
@@ -945,8 +945,12 @@
 
 	/* Now wait for the firmware to process it */
 	starttime = jiffies_to_msecs(jiffies);
-	timeout = a->flags &
-		  (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000;
+
+	if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+	    test_bit(AF_DISC_PENDING, &a->flags))
+		timeout = 40000;
+	else
+		timeout = 5000;
 
 	while (true) {
 		intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
@@ -1008,7 +1012,7 @@
 		u32 offset;
 		u32 iatvr;
 
-		if (a->flags2 & AF2_SERIAL_FLASH)
+		if (test_bit(AF2_SERIAL_FLASH, &a->flags2))
 			iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
 		else
 			iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
@@ -1236,9 +1240,9 @@
 	if (rq->req_stat != RS_PENDING) {
 		/* update the NVRAM state */
 		if (rq->req_stat == RS_SUCCESS)
-			esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+			set_bit(AF_NVR_VALID, &a->flags);
 		else
-			esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+			clear_bit(AF_NVR_VALID, &a->flags);
 
 		esas2r_enable_heartbeat(a);
 
@@ -1258,7 +1262,7 @@
 	u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
 	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
 
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		return false;
 
 	if (down_interruptible(&a->nvram_semaphore))
@@ -1302,7 +1306,7 @@
 			       FLS_OFFSET_NVR,
 			       sizeof(struct esas2r_sas_nvram));
 
-	if (a->flags & AF_LEGACY_SGE_MODE) {
+	if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
 
 		vrq->data.sge[0].length =
 			cpu_to_le32(SGE_LAST |
@@ -1337,7 +1341,7 @@
 	} else if (n->version > SASNVR_VERSION) {
 		esas2r_hdebug("invalid NVRAM version");
 	} else {
-		esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+		set_bit(AF_NVR_VALID, &a->flags);
 		rslt = true;
 	}
 
@@ -1359,7 +1363,7 @@
 	struct esas2r_sas_nvram *n = a->nvram;
 	u32 time = jiffies_to_msecs(jiffies);
 
-	esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+	clear_bit(AF_NVR_VALID, &a->flags);
 	*n = default_sas_nvram;
 	n->sas_addr[3] |= 0x0F;
 	n->sas_addr[4] = HIBYTE(LOWORD(time));
@@ -1389,7 +1393,7 @@
 	u8 j;
 	struct esas2r_component_header *ch;
 
-	if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) {
+	if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) {
 		/* flag was already set */
 		fi->status = FI_STAT_BUSY;
 		return false;
@@ -1413,7 +1417,7 @@
 		return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
 	}
 
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
 
 	switch (fi->action) {
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index da1869d..b9750e2 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -216,7 +216,7 @@
 			goto use_legacy_interrupts;
 		}
 		a->intr_mode = INTR_MODE_MSI;
-		esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
+		set_bit(AF2_MSI_ENABLED, &a->flags2);
 		break;
 
 
@@ -252,7 +252,7 @@
 		return;
 	}
 
-	esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
+	set_bit(AF2_IRQ_CLAIMED, &a->flags2);
 	esas2r_log(ESAS2R_LOG_INFO,
 		   "claimed IRQ %d flags: 0x%lx",
 		   a->pcid->irq, flags);
@@ -380,10 +380,10 @@
 	/* interrupts will be disabled until we are done with init */
 	atomic_inc(&a->dis_ints_cnt);
 	atomic_inc(&a->disable_cnt);
-	a->flags |= AF_CHPRST_PENDING
-		    | AF_DISC_PENDING
-		    | AF_FIRST_INIT
-		    | AF_LEGACY_SGE_MODE;
+	set_bit(AF_CHPRST_PENDING, &a->flags);
+	set_bit(AF_DISC_PENDING, &a->flags);
+	set_bit(AF_FIRST_INIT, &a->flags);
+	set_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
 	a->init_msg = ESAS2R_INIT_MSG_START;
 	a->max_vdareq_size = 128;
@@ -440,11 +440,11 @@
 
 	esas2r_claim_interrupts(a);
 
-	if (a->flags2 & AF2_IRQ_CLAIMED)
+	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
 		esas2r_enable_chip_interrupts(a);
 
-	esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
-	if (!(a->flags & AF_DEGRADED_MODE))
+	set_bit(AF2_INIT_DONE, &a->flags2);
+	if (!test_bit(AF_DEGRADED_MODE, &a->flags))
 		esas2r_kickoff_timer(a);
 	esas2r_debug("esas2r_init_adapter done for %p (%d)",
 		     a, a->disable_cnt);
@@ -457,8 +457,8 @@
 {
 	struct esas2r_mem_desc *memdesc, *next;
 
-	if ((a->flags2 & AF2_INIT_DONE)
-	    &&  (!(a->flags & AF_DEGRADED_MODE))) {
+	if ((test_bit(AF2_INIT_DONE, &a->flags2))
+	    &&  (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
 		if (!power_management) {
 			del_timer_sync(&a->timer);
 			tasklet_kill(&a->tasklet);
@@ -508,19 +508,19 @@
 	}
 
 	/* Clean up interrupts */
-	if (a->flags2 & AF2_IRQ_CLAIMED) {
+	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
 		esas2r_log_dev(ESAS2R_LOG_INFO,
 			       &(a->pcid->dev),
 			       "free_irq(%d) called", a->pcid->irq);
 
 		free_irq(a->pcid->irq, a);
 		esas2r_debug("IRQ released");
-		esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
+		clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
 	}
 
-	if (a->flags2 & AF2_MSI_ENABLED) {
+	if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
 		pci_disable_msi(a->pcid);
-		esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
+		clear_bit(AF2_MSI_ENABLED, &a->flags2);
 		esas2r_debug("MSI disabled");
 	}
 
@@ -641,12 +641,10 @@
 		pci_set_drvdata(a->pcid, NULL);
 		esas2r_adapters[i] = NULL;
 
-		if (a->flags2 & AF2_INIT_DONE) {
-			esas2r_lock_clear_flags(&a->flags2,
-						AF2_INIT_DONE);
+		if (test_bit(AF2_INIT_DONE, &a->flags2)) {
+			clear_bit(AF2_INIT_DONE, &a->flags2);
 
-			esas2r_lock_set_flags(&a->flags,
-					      AF_DEGRADED_MODE);
+			set_bit(AF_DEGRADED_MODE, &a->flags);
 
 			esas2r_log_dev(ESAS2R_LOG_INFO,
 				       &(a->host->shost_gendev),
@@ -759,7 +757,7 @@
 
 	esas2r_claim_interrupts(a);
 
-	if (a->flags2 & AF2_IRQ_CLAIMED) {
+	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
 		/*
 		 * Now that system interrupt(s) are claimed, we can enable
 		 * chip interrupts.
@@ -781,7 +779,7 @@
 
 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
 {
-	esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
+	set_bit(AF_DEGRADED_MODE, &a->flags);
 	esas2r_log(ESAS2R_LOG_CRIT,
 		   "setting adapter to degraded mode: %s\n", error_str);
 	return false;
@@ -809,7 +807,7 @@
 	int pcie_cap_reg;
 
 	pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
-	if (0xffff & pcie_cap_reg) {
+	if (pcie_cap_reg) {
 		u16 devcontrol;
 
 		pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@@ -896,7 +894,7 @@
 	    && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
 		a->flags2 |= AF2_THUNDERBOLT;
 
-	if (a->flags2 & AF2_THUNDERBOLT)
+	if (test_bit(AF2_THUNDERBOLT, &a->flags2))
 		a->flags2 |= AF2_SERIAL_FLASH;
 
 	if (a->pcid->subsystem_device == ATTO_TLSH_1068)
@@ -956,14 +954,14 @@
 	a->outbound_copy = (u32 volatile *)high;
 	high += sizeof(u32);
 
-	if (!(a->flags & AF_NVR_VALID))
+	if (!test_bit(AF_NVR_VALID, &a->flags))
 		esas2r_nvram_set_defaults(a);
 
 	/* update the caller's uncached memory area pointer */
 	*uncached_area = (void *)high;
 
 	/* initialize the allocated memory */
-	if (a->flags & AF_FIRST_INIT) {
+	if (test_bit(AF_FIRST_INIT, &a->flags)) {
 		memset(a->req_table, 0,
 		       (num_requests + num_ae_requests +
 			1) * sizeof(struct esas2r_request *));
@@ -1019,7 +1017,7 @@
 	 * if the chip reset detected flag is set, we can bypass a bunch of
 	 * stuff.
 	 */
-	if (a->flags & AF_CHPRST_DETECTED)
+	if (test_bit(AF_CHPRST_DETECTED, &a->flags))
 		goto skip_chip_reset;
 
 	/*
@@ -1057,14 +1055,12 @@
 						    doorbell);
 
 			if (ver == DRBL_FW_VER_0) {
-				esas2r_lock_set_flags(&a->flags,
-						      AF_LEGACY_SGE_MODE);
+				set_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
 				a->max_vdareq_size = 128;
 				a->build_sgl = esas2r_build_sg_list_sge;
 			} else if (ver == DRBL_FW_VER_1) {
-				esas2r_lock_clear_flags(&a->flags,
-							AF_LEGACY_SGE_MODE);
+				clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
 				a->max_vdareq_size = 1024;
 				a->build_sgl = esas2r_build_sg_list_prd;
@@ -1139,7 +1135,7 @@
 	*a->outbound_copy =
 		a->last_write =
 			a->last_read = a->list_size - 1;
-	esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+	set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
 	esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
 				    a->last_write);
 	esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
@@ -1204,9 +1200,9 @@
 	 */
 	doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
 	if (doorbell & DRBL_POWER_DOWN)
-		esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+		set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
 	else
-		esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+		clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
 
 	/*
 	 * enable assertion of outbound queue and doorbell interrupts in the
@@ -1239,8 +1235,8 @@
 				     0,
 				     NULL);
 		ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
-		ci->sgl_page_size = sgl_page_size;
-		ci->epoch_time = now.tv_sec;
+		ci->sgl_page_size = cpu_to_le32(sgl_page_size);
+		ci->epoch_time = cpu_to_le32(now.tv_sec);
 		rq->flags |= RF_FAILURE_OK;
 		a->init_msg = ESAS2R_INIT_MSG_INIT;
 		break;
@@ -1250,12 +1246,15 @@
 		if (rq->req_stat == RS_SUCCESS) {
 			u32 major;
 			u32 minor;
+			u16 fw_release;
 
 			a->fw_version = le16_to_cpu(
 				rq->func_rsp.cfg_rsp.vda_version);
 			a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
-			major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
-			minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
+			fw_release = le16_to_cpu(
+				rq->func_rsp.cfg_rsp.fw_release);
+			major = LOBYTE(fw_release);
+			minor = HIBYTE(fw_release);
 			a->fw_version += (major << 16) + (minor << 24);
 		} else {
 			esas2r_hdebug("FAILED");
@@ -1266,9 +1265,8 @@
 		 * unsupported config requests correctly.
 		 */
 
-		if ((a->flags2 & AF2_THUNDERBOLT)
-		    || (be32_to_cpu(a->fw_version) >
-			be32_to_cpu(0x47020052))) {
+		if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
+		    || (be32_to_cpu(a->fw_version) > 0x00524702)) {
 			esas2r_hdebug("CFG get init");
 			esas2r_build_cfg_req(a,
 					     rq,
@@ -1361,10 +1359,10 @@
 	struct esas2r_request *rq;
 	u32 i;
 
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		goto exit;
 
-	if (!(a->flags & AF_NVR_VALID)) {
+	if (!test_bit(AF_NVR_VALID, &a->flags)) {
 		if (!esas2r_nvram_read_direct(a))
 			esas2r_log(ESAS2R_LOG_WARN,
 				   "invalid/missing NVRAM parameters");
@@ -1376,8 +1374,8 @@
 	}
 
 	/* The firmware is ready. */
-	esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
-	esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+	clear_bit(AF_DEGRADED_MODE, &a->flags);
+	clear_bit(AF_CHPRST_PENDING, &a->flags);
 
 	/* Post all the async event requests */
 	for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
@@ -1398,8 +1396,8 @@
 
 	esas2r_hdebug("firmware revision: %s", a->fw_rev);
 
-	if ((a->flags & AF_CHPRST_DETECTED)
-	    && (a->flags & AF_FIRST_INIT)) {
+	if (test_bit(AF_CHPRST_DETECTED, &a->flags)
+	    && (test_bit(AF_FIRST_INIT, &a->flags))) {
 		esas2r_enable_chip_interrupts(a);
 		return true;
 	}
@@ -1423,18 +1421,18 @@
 		 * Block Tasklets from getting scheduled and indicate this is
 		 * polled discovery.
 		 */
-		esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
-		esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
+		set_bit(AF_TASKLET_SCHEDULED, &a->flags);
+		set_bit(AF_DISC_POLLED, &a->flags);
 
 		/*
 		 * Temporarily bring the disable count to zero to enable
 		 * deferred processing.  Note that the count is already zero
 		 * after the first initialization.
 		 */
-		if (a->flags & AF_FIRST_INIT)
+		if (test_bit(AF_FIRST_INIT, &a->flags))
 			atomic_dec(&a->disable_cnt);
 
-		while (a->flags & AF_DISC_PENDING) {
+		while (test_bit(AF_DISC_PENDING, &a->flags)) {
 			schedule_timeout_interruptible(msecs_to_jiffies(100));
 
 			/*
@@ -1453,7 +1451,7 @@
 			 * we have to make sure the timer tick processes the
 			 * doorbell indicating the firmware is ready.
 			 */
-			if (!(a->flags & AF_CHPRST_PENDING))
+			if (!test_bit(AF_CHPRST_PENDING, &a->flags))
 				esas2r_disc_check_for_work(a);
 
 			/* Simulate a timer tick. */
@@ -1473,11 +1471,11 @@
 
 		}
 
-		if (a->flags & AF_FIRST_INIT)
+		if (test_bit(AF_FIRST_INIT, &a->flags))
 			atomic_inc(&a->disable_cnt);
 
-		esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
-		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+		clear_bit(AF_DISC_POLLED, &a->flags);
+		clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
 	}
 
 
@@ -1504,26 +1502,26 @@
 	 * need to get done before we exit.
 	 */
 
-	if ((a->flags & AF_CHPRST_DETECTED)
-	    && (a->flags & AF_FIRST_INIT)) {
+	if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
+	    test_bit(AF_FIRST_INIT, &a->flags)) {
 		/*
 		 * Reinitialization was performed during the first
 		 * initialization.  Only clear the chip reset flag so the
 		 * original device polling is not cancelled.
 		 */
 		if (!rslt)
-			esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+			clear_bit(AF_CHPRST_PENDING, &a->flags);
 	} else {
 		/* First initialization or a subsequent re-init is complete. */
 		if (!rslt) {
-			esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
-			esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+			clear_bit(AF_CHPRST_PENDING, &a->flags);
+			clear_bit(AF_DISC_PENDING, &a->flags);
 		}
 
 
 		/* Enable deferred processing after the first initialization. */
-		if (a->flags & AF_FIRST_INIT) {
-			esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
+		if (test_bit(AF_FIRST_INIT, &a->flags)) {
+			clear_bit(AF_FIRST_INIT, &a->flags);
 
 			if (atomic_dec_return(&a->disable_cnt) == 0)
 				esas2r_do_deferred_processes(a);
@@ -1535,7 +1533,7 @@
 
 void esas2r_reset_adapter(struct esas2r_adapter *a)
 {
-	esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+	set_bit(AF_OS_RESET, &a->flags);
 	esas2r_local_reset_adapter(a);
 	esas2r_schedule_tasklet(a);
 }
@@ -1550,17 +1548,17 @@
 	 * dump is located in the upper 512KB of the onchip SRAM.  Make sure
 	 * to not overwrite a previous crash that was saved.
 	 */
-	if ((a->flags2 & AF2_COREDUMP_AVAIL)
-	    && !(a->flags2 & AF2_COREDUMP_SAVED)) {
+	if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
+	    !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
 		esas2r_read_mem_block(a,
 				      a->fw_coredump_buff,
 				      MW_DATA_ADDR_SRAM + 0x80000,
 				      ESAS2R_FWCOREDUMP_SZ);
 
-		esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
+		set_bit(AF2_COREDUMP_SAVED, &a->flags2);
 	}
 
-	esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+	clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
 
 	/* Reset the chip */
 	if (a->pcid->revision == MVR_FREY_B2)
@@ -1606,10 +1604,10 @@
  */
 void esas2r_power_down(struct esas2r_adapter *a)
 {
-	esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
-	esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
+	set_bit(AF_POWER_MGT, &a->flags);
+	set_bit(AF_POWER_DOWN, &a->flags);
 
-	if (!(a->flags & AF_DEGRADED_MODE)) {
+	if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
 		u32 starttime;
 		u32 doorbell;
 
@@ -1649,14 +1647,14 @@
 		 * For versions of firmware that support it tell them the driver
 		 * is powering down.
 		 */
-		if (a->flags2 & AF2_VDA_POWER_DOWN)
+		if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
 			esas2r_power_down_notify_firmware(a);
 	}
 
 	/* Suspend I/O processing. */
-	esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
-	esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
-	esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
+	set_bit(AF_OS_RESET, &a->flags);
+	set_bit(AF_DISC_PENDING, &a->flags);
+	set_bit(AF_CHPRST_PENDING, &a->flags);
 
 	esas2r_process_adapter_reset(a);
 
@@ -1673,9 +1671,9 @@
 {
 	bool ret;
 
-	esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
+	clear_bit(AF_POWER_DOWN, &a->flags);
 	esas2r_init_pci_cfg_space(a);
-	esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
+	set_bit(AF_FIRST_INIT, &a->flags);
 	atomic_inc(&a->disable_cnt);
 
 	/* reinitialize the adapter */
@@ -1687,17 +1685,17 @@
 	esas2r_send_reset_ae(a, true);
 
 	/* clear this flag after initialization. */
-	esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
+	clear_bit(AF_POWER_MGT, &a->flags);
 	return ret;
 }
 
 bool esas2r_is_adapter_present(struct esas2r_adapter *a)
 {
-	if (a->flags & AF_NOT_PRESENT)
+	if (test_bit(AF_NOT_PRESENT, &a->flags))
 		return false;
 
 	if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
-		esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
+		set_bit(AF_NOT_PRESENT, &a->flags);
 
 		return false;
 	}
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
index c2d4ff5..f16d6bc 100644
--- a/drivers/scsi/esas2r/esas2r_int.c
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -96,7 +96,7 @@
 	if (!esas2r_adapter_interrupt_pending(a))
 		return IRQ_NONE;
 
-	esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
+	set_bit(AF2_INT_PENDING, &a->flags2);
 	esas2r_schedule_tasklet(a);
 
 	return IRQ_HANDLED;
@@ -317,9 +317,10 @@
 	 *  = 2 - can start any request
 	 */
 
-	if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
+	if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+	    test_bit(AF_FLASHING, &a->flags))
 		startreqs = 0;
-	else if (a->flags & AF_DISC_PENDING)
+	else if (test_bit(AF_DISC_PENDING, &a->flags))
 		startreqs = 1;
 
 	atomic_inc(&a->disable_cnt);
@@ -367,7 +368,7 @@
 				 * Flashing could have been set by last local
 				 * start
 				 */
-				if (a->flags & AF_FLASHING)
+				if (test_bit(AF_FLASHING, &a->flags))
 					break;
 			}
 		}
@@ -404,7 +405,7 @@
 
 		dc->disc_evt = 0;
 
-		esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+		clear_bit(AF_DISC_IN_PROG, &a->flags);
 	}
 
 	/*
@@ -425,7 +426,7 @@
 		a->last_write =
 			a->last_read = a->list_size - 1;
 
-	esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+	set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
 
 	/* Kill all the requests on the active list */
 	list_for_each(element, &a->defer_list) {
@@ -470,7 +471,7 @@
 	if (atomic_read(&a->disable_cnt) == 0)
 		esas2r_do_deferred_processes(a);
 
-	esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
+	clear_bit(AF_OS_RESET, &a->flags);
 
 	esas2r_trace_exit();
 }
@@ -478,10 +479,10 @@
 static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
 {
 
-	esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
-	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
-	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
-	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+	clear_bit(AF_CHPRST_NEEDED, &a->flags);
+	clear_bit(AF_BUSRST_NEEDED, &a->flags);
+	clear_bit(AF_BUSRST_DETECTED, &a->flags);
+	clear_bit(AF_BUSRST_PENDING, &a->flags);
 	/*
 	 * Make sure we don't get attempt more than 3 resets
 	 * when the uptime between resets does not exceed one
@@ -507,10 +508,10 @@
 		 * prevent the heartbeat from trying to recover.
 		 */
 
-		esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
-		esas2r_lock_set_flags(&a->flags, AF_DISABLED);
-		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
-		esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+		set_bit(AF_DEGRADED_MODE, &a->flags);
+		set_bit(AF_DISABLED, &a->flags);
+		clear_bit(AF_CHPRST_PENDING, &a->flags);
+		clear_bit(AF_DISC_PENDING, &a->flags);
 
 		esas2r_disable_chip_interrupts(a);
 		a->int_mask = 0;
@@ -519,18 +520,17 @@
 		esas2r_log(ESAS2R_LOG_CRIT,
 			   "Adapter disabled because of hardware failure");
 	} else {
-		u32 flags =
-			esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
+		bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
 
-		if (!(flags & AF_CHPRST_STARTED))
+		if (!alrdyrst)
 			/*
 			 * Only disable interrupts if this is
 			 * the first reset attempt.
 			 */
 			esas2r_disable_chip_interrupts(a);
 
-		if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
-		    !(flags & AF_CHPRST_STARTED)) {
+		if ((test_bit(AF_POWER_MGT, &a->flags)) &&
+		    !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
 			/*
 			 * Don't reset the chip on the first
 			 * deferred power up attempt.
@@ -543,10 +543,10 @@
 		/* Kick off the reinitialization */
 		a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
 		a->chip_init_time = jiffies_to_msecs(jiffies);
-		if (!(a->flags & AF_POWER_MGT)) {
+		if (!test_bit(AF_POWER_MGT, &a->flags)) {
 			esas2r_process_adapter_reset(a);
 
-			if (!(flags & AF_CHPRST_STARTED)) {
+			if (!alrdyrst) {
 				/* Remove devices now that I/O is cleaned up. */
 				a->prev_dev_cnt =
 					esas2r_targ_db_get_tgt_cnt(a);
@@ -560,38 +560,37 @@
 
 static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
 {
-	while (a->flags & AF_CHPRST_DETECTED) {
+	while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
 		/*
 		 * Balance the enable in esas2r_initadapter_hw.
 		 * Esas2r_power_down already took care of it for power
 		 * management.
 		 */
-		if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
-							AF_POWER_MGT))
+		if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+		    !test_bit(AF_POWER_MGT, &a->flags))
 			esas2r_disable_chip_interrupts(a);
 
 		/* Reinitialize the chip. */
 		esas2r_check_adapter(a);
 		esas2r_init_adapter_hw(a, 0);
 
-		if (a->flags & AF_CHPRST_NEEDED)
+		if (test_bit(AF_CHPRST_NEEDED, &a->flags))
 			break;
 
-		if (a->flags & AF_POWER_MGT) {
+		if (test_bit(AF_POWER_MGT, &a->flags)) {
 			/* Recovery from power management. */
-			if (a->flags & AF_FIRST_INIT) {
+			if (test_bit(AF_FIRST_INIT, &a->flags)) {
 				/* Chip reset during normal power up */
 				esas2r_log(ESAS2R_LOG_CRIT,
 					   "The firmware was reset during a normal power-up sequence");
 			} else {
 				/* Deferred power up complete. */
-				esas2r_lock_clear_flags(&a->flags,
-							AF_POWER_MGT);
+				clear_bit(AF_POWER_MGT, &a->flags);
 				esas2r_send_reset_ae(a, true);
 			}
 		} else {
 			/* Recovery from online chip reset. */
-			if (a->flags & AF_FIRST_INIT) {
+			if (test_bit(AF_FIRST_INIT, &a->flags)) {
 				/* Chip reset during driver load */
 			} else {
 				/* Chip reset after driver load */
@@ -602,14 +601,14 @@
 				   "Recovering from a chip reset while the chip was online");
 		}
 
-		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
+		clear_bit(AF_CHPRST_STARTED, &a->flags);
 		esas2r_enable_chip_interrupts(a);
 
 		/*
 		 * Clear this flag last!  this indicates that the chip has been
 		 * reset already during initialization.
 		 */
-		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
+		clear_bit(AF_CHPRST_DETECTED, &a->flags);
 	}
 }
 
@@ -617,26 +616,28 @@
 /* Perform deferred tasks when chip interrupts are disabled */
 void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
 {
-	if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
-		if (a->flags & AF_CHPRST_NEEDED)
+
+	if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+	    test_bit(AF_CHPRST_DETECTED, &a->flags)) {
+		if (test_bit(AF_CHPRST_NEEDED, &a->flags))
 			esas2r_chip_rst_needed_during_tasklet(a);
 
 		esas2r_handle_chip_rst_during_tasklet(a);
 	}
 
-	if (a->flags & AF_BUSRST_NEEDED) {
+	if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
 		esas2r_hdebug("hard resetting bus");
 
-		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
+		clear_bit(AF_BUSRST_NEEDED, &a->flags);
 
-		if (a->flags & AF_FLASHING)
-			esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+		if (test_bit(AF_FLASHING, &a->flags))
+			set_bit(AF_BUSRST_DETECTED, &a->flags);
 		else
 			esas2r_write_register_dword(a, MU_DOORBELL_IN,
 						    DRBL_RESET_BUS);
 	}
 
-	if (a->flags & AF_BUSRST_DETECTED) {
+	if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
 		esas2r_process_bus_reset(a);
 
 		esas2r_log_dev(ESAS2R_LOG_WARN,
@@ -645,14 +646,14 @@
 
 		scsi_report_bus_reset(a->host, 0);
 
-		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
-		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+		clear_bit(AF_BUSRST_DETECTED, &a->flags);
+		clear_bit(AF_BUSRST_PENDING, &a->flags);
 
 		esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
 	}
 
-	if (a->flags & AF_PORT_CHANGE) {
-		esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
+	if (test_bit(AF_PORT_CHANGE, &a->flags)) {
+		clear_bit(AF_PORT_CHANGE, &a->flags);
 
 		esas2r_targ_db_report_changes(a);
 	}
@@ -672,10 +673,10 @@
 	esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
 
 	if (doorbell & DRBL_RESET_BUS)
-		esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+		set_bit(AF_BUSRST_DETECTED, &a->flags);
 
 	if (doorbell & DRBL_FORCE_INT)
-		esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+		clear_bit(AF_HEARTBEAT, &a->flags);
 
 	if (doorbell & DRBL_PANIC_REASON_MASK) {
 		esas2r_hdebug("*** Firmware Panic ***");
@@ -683,7 +684,7 @@
 	}
 
 	if (doorbell & DRBL_FW_RESET) {
-		esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+		set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
 		esas2r_local_reset_adapter(a);
 	}
 
@@ -918,7 +919,7 @@
 {
 	if (rq->vrq->scsi.function == VDA_FUNC_FLASH
 	    && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
-		esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
+		clear_bit(AF_FLASHING, &a->flags);
 
 	/* See if we setup a callback to do special processing */
 
diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c
index 324e262..a8df916 100644
--- a/drivers/scsi/esas2r/esas2r_io.c
+++ b/drivers/scsi/esas2r/esas2r_io.c
@@ -49,7 +49,8 @@
 	struct esas2r_request *startrq = rq;
 	unsigned long flags;
 
-	if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
+	if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) ||
+		     test_bit(AF_POWER_DOWN, &a->flags))) {
 		if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
 			rq->req_stat = RS_SEL2;
 		else
@@ -69,8 +70,8 @@
 			 * Note that if AF_DISC_PENDING is set than this will
 			 * go on the defer queue.
 			 */
-			if (unlikely(t->target_state != TS_PRESENT
-				     && !(a->flags & AF_DISC_PENDING)))
+			if (unlikely(t->target_state != TS_PRESENT &&
+				     !test_bit(AF_DISC_PENDING, &a->flags)))
 				rq->req_stat = RS_SEL;
 		}
 	}
@@ -91,8 +92,9 @@
 	spin_lock_irqsave(&a->queue_lock, flags);
 
 	if (likely(list_empty(&a->defer_list) &&
-		   !(a->flags &
-		     (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
+		   !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+		   !test_bit(AF_FLASHING, &a->flags) &&
+		   !test_bit(AF_DISC_PENDING, &a->flags)))
 		esas2r_local_start_request(a, startrq);
 	else
 		list_add_tail(&startrq->req_list, &a->defer_list);
@@ -124,7 +126,7 @@
 
 	if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
 		     && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
-		esas2r_lock_set_flags(&a->flags, AF_FLASHING);
+		set_bit(AF_FLASHING, &a->flags);
 
 	list_add_tail(&rq->req_list, &a->active_list);
 	esas2r_start_vda_request(a, rq);
@@ -147,11 +149,10 @@
 	if (a->last_write >= a->list_size) {
 		a->last_write = 0;
 		/* update the toggle bit */
-		if (a->flags & AF_COMM_LIST_TOGGLE)
-			esas2r_lock_clear_flags(&a->flags,
-						AF_COMM_LIST_TOGGLE);
+		if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
+			clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);
 		else
-			esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+			set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
 	}
 
 	element =
@@ -169,7 +170,7 @@
 	/* Update the write pointer */
 	dw = a->last_write;
 
-	if (a->flags & AF_COMM_LIST_TOGGLE)
+	if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
 		dw |= MU_ILW_TOGGLE;
 
 	esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
@@ -687,18 +688,14 @@
 			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
 						    doorbell);
 			if (ver == DRBL_FW_VER_0) {
-				esas2r_lock_set_flags(&a->flags,
-						      AF_CHPRST_DETECTED);
-				esas2r_lock_set_flags(&a->flags,
-						      AF_LEGACY_SGE_MODE);
+				set_bit(AF_CHPRST_DETECTED, &a->flags);
+				set_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
 				a->max_vdareq_size = 128;
 				a->build_sgl = esas2r_build_sg_list_sge;
 			} else if (ver == DRBL_FW_VER_1) {
-				esas2r_lock_set_flags(&a->flags,
-						      AF_CHPRST_DETECTED);
-				esas2r_lock_clear_flags(&a->flags,
-							AF_LEGACY_SGE_MODE);
+				set_bit(AF_CHPRST_DETECTED, &a->flags);
+				clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
 				a->max_vdareq_size = 1024;
 				a->build_sgl = esas2r_build_sg_list_prd;
@@ -719,28 +716,27 @@
 	a->last_tick_time = currtime;
 
 	/* count down the uptime */
-	if (a->chip_uptime
-	    && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
+	if (a->chip_uptime &&
+	    !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+	    !test_bit(AF_DISC_PENDING, &a->flags)) {
 		if (deltatime >= a->chip_uptime)
 			a->chip_uptime = 0;
 		else
 			a->chip_uptime -= deltatime;
 	}
 
-	if (a->flags & AF_CHPRST_PENDING) {
-		if (!(a->flags & AF_CHPRST_NEEDED)
-		    && !(a->flags & AF_CHPRST_DETECTED))
+	if (test_bit(AF_CHPRST_PENDING, &a->flags)) {
+		if (!test_bit(AF_CHPRST_NEEDED, &a->flags) &&
+		    !test_bit(AF_CHPRST_DETECTED, &a->flags))
 			esas2r_handle_pending_reset(a, currtime);
 	} else {
-		if (a->flags & AF_DISC_PENDING)
+		if (test_bit(AF_DISC_PENDING, &a->flags))
 			esas2r_disc_check_complete(a);
-
-		if (a->flags & AF_HEARTBEAT_ENB) {
-			if (a->flags & AF_HEARTBEAT) {
+		if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) {
+			if (test_bit(AF_HEARTBEAT, &a->flags)) {
 				if ((currtime - a->heartbeat_time) >=
 				    ESAS2R_HEARTBEAT_TIME) {
-					esas2r_lock_clear_flags(&a->flags,
-								AF_HEARTBEAT);
+					clear_bit(AF_HEARTBEAT, &a->flags);
 					esas2r_hdebug("heartbeat failed");
 					esas2r_log(ESAS2R_LOG_CRIT,
 						   "heartbeat failed");
@@ -748,7 +744,7 @@
 					esas2r_local_reset_adapter(a);
 				}
 			} else {
-				esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
+				set_bit(AF_HEARTBEAT, &a->flags);
 				a->heartbeat_time = currtime;
 				esas2r_force_interrupt(a);
 			}
@@ -812,7 +808,7 @@
 	rqaux->vrq->scsi.flags |=
 		cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
 
-	if (a->flags & AF_FLASHING) {
+	if (test_bit(AF_FLASHING, &a->flags)) {
 		/* Assume success.  if there are active requests, return busy */
 		rqaux->req_stat = RS_SUCCESS;
 
@@ -831,7 +827,7 @@
 
 	spin_unlock_irqrestore(&a->queue_lock, flags);
 
-	if (!(a->flags & AF_FLASHING))
+	if (!test_bit(AF_FLASHING, &a->flags))
 		esas2r_start_request(a, rqaux);
 
 	esas2r_comp_list_drain(a, &comp_list);
@@ -848,11 +844,12 @@
 {
 	esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
 
-	if (!(a->flags & AF_DEGRADED_MODE)
-	    && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
-		esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
-		esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
-		esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+	if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+	    !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+	    !test_bit(AF_DISC_PENDING, &a->flags)) {
+		set_bit(AF_BUSRST_NEEDED, &a->flags);
+		set_bit(AF_BUSRST_PENDING, &a->flags);
+		set_bit(AF_OS_RESET, &a->flags);
 
 		esas2r_schedule_tasklet(a);
 	}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index e5b0902..d89a027 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -347,7 +347,7 @@
 {
 	struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
 
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		return false;
 
 	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
@@ -463,7 +463,7 @@
 		gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
 		gcc->bios_build_rev = LOWORD(a->flash_ver);
 
-		if (a->flags2 & AF2_THUNDERLINK)
+		if (test_bit(AF2_THUNDERLINK, &a->flags2))
 			gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
 					   | CSMI_CNTLRF_SATA_HBA;
 		else
@@ -485,7 +485,7 @@
 	{
 		struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
 
-		if (a->flags & AF_DEGRADED_MODE)
+		if (test_bit(AF_DEGRADED_MODE, &a->flags))
 			gcs->status = CSMI_CNTLR_STS_FAILED;
 		else
 			gcs->status = CSMI_CNTLR_STS_GOOD;
@@ -819,10 +819,10 @@
 
 		gai->adap_type = ATTO_GAI_AT_ESASRAID2;
 
-		if (a->flags2 & AF2_THUNDERLINK)
+		if (test_bit(AF2_THUNDERLINK, &a->flags2))
 			gai->adap_type = ATTO_GAI_AT_TLSASHBA;
 
-		if (a->flags & AF_DEGRADED_MODE)
+		if (test_bit(AF_DEGRADED_MODE, &a->flags))
 			gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
 
 		gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
@@ -938,7 +938,7 @@
 				u32 total_len = ESAS2R_FWCOREDUMP_SZ;
 
 				/* Size is zero if a core dump isn't present */
-				if (!(a->flags2 & AF2_COREDUMP_SAVED))
+				if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
 					total_len = 0;
 
 				if (len > total_len)
@@ -960,8 +960,7 @@
 				memset(a->fw_coredump_buff, 0,
 				       ESAS2R_FWCOREDUMP_SZ);
 
-				esas2r_lock_clear_flags(&a->flags2,
-							AF2_COREDUMP_SAVED);
+				clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
 			} else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
 				hi->status = ATTO_STS_UNSUPPORTED;
 				break;
@@ -973,7 +972,7 @@
 			trc->total_length = ESAS2R_FWCOREDUMP_SZ;
 
 			/* Return zero length buffer if core dump not present */
-			if (!(a->flags2 & AF2_COREDUMP_SAVED))
+			if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
 				trc->total_length = 0;
 		} else {
 			hi->status = ATTO_STS_UNSUPPORTED;
@@ -1048,6 +1047,7 @@
 		else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
 
+
 		if (!esas2r_build_sg_list(a, rq, sgc)) {
 			hi->status = ATTO_STS_OUT_OF_RSRC;
 			break;
@@ -1139,15 +1139,15 @@
 			break;
 		}
 
-		if (a->flags & AF_CHPRST_NEEDED)
+		if (test_bit(AF_CHPRST_NEEDED, &a->flags))
 			ac->adap_state = ATTO_AC_AS_RST_SCHED;
-		else if (a->flags & AF_CHPRST_PENDING)
+		else if (test_bit(AF_CHPRST_PENDING, &a->flags))
 			ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
-		else if (a->flags & AF_DISC_PENDING)
+		else if (test_bit(AF_DISC_PENDING, &a->flags))
 			ac->adap_state = ATTO_AC_AS_RST_DISC;
-		else if (a->flags & AF_DISABLED)
+		else if (test_bit(AF_DISABLED, &a->flags))
 			ac->adap_state = ATTO_AC_AS_DISABLED;
-		else if (a->flags & AF_DEGRADED_MODE)
+		else if (test_bit(AF_DEGRADED_MODE, &a->flags))
 			ac->adap_state = ATTO_AC_AS_DEGRADED;
 		else
 			ac->adap_state = ATTO_AC_AS_OK;
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 4abf127..f37f3e3 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -889,7 +889,7 @@
 	/* Assume success, if it fails we will fix the result later. */
 	cmd->result = DID_OK << 16;
 
-	if (unlikely(a->flags & AF_DEGRADED_MODE)) {
+	if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
 		cmd->result = DID_NO_CONNECT << 16;
 		cmd->scsi_done(cmd);
 		return 0;
@@ -1050,7 +1050,7 @@
 
 	esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
 
-	if (a->flags & AF_DEGRADED_MODE) {
+	if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
 		cmd->result = DID_ABORT << 16;
 
 		scsi_set_resid(cmd, 0);
@@ -1131,7 +1131,7 @@
 	struct esas2r_adapter *a =
 		(struct esas2r_adapter *)cmd->device->host->hostdata;
 
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		return FAILED;
 
 	if (host_reset)
@@ -1141,14 +1141,14 @@
 
 	/* above call sets the AF_OS_RESET flag.  wait for it to clear. */
 
-	while (a->flags & AF_OS_RESET) {
+	while (test_bit(AF_OS_RESET, &a->flags)) {
 		msleep(10);
 
-		if (a->flags & AF_DEGRADED_MODE)
+		if (test_bit(AF_DEGRADED_MODE, &a->flags))
 			return FAILED;
 	}
 
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		return FAILED;
 
 	return SUCCESS;
@@ -1176,7 +1176,7 @@
 	u8 task_management_status = RS_PENDING;
 	bool completed;
 
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		return FAILED;
 
 retry:
@@ -1229,7 +1229,7 @@
 			msleep(10);
 	}
 
-	if (a->flags & AF_DEGRADED_MODE)
+	if (test_bit(AF_DEGRADED_MODE, &a->flags))
 		return FAILED;
 
 	if (task_management_status == RS_BUSY) {
@@ -1666,13 +1666,13 @@
 {
 	struct esas2r_adapter *a = (struct esas2r_adapter *)context;
 
-	if (unlikely(a->flags2 & AF2_TIMER_TICK)) {
-		esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK);
+	if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) {
+		clear_bit(AF2_TIMER_TICK, &a->flags2);
 		esas2r_timer_tick(a);
 	}
 
-	if (likely(a->flags2 & AF2_INT_PENDING)) {
-		esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING);
+	if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) {
+		clear_bit(AF2_INT_PENDING, &a->flags2);
 		esas2r_adapter_interrupt(a);
 	}
 
@@ -1680,12 +1680,12 @@
 		esas2r_do_tasklet_tasks(a);
 
 	if (esas2r_is_tasklet_pending(a)
-	    || (a->flags2 & AF2_INT_PENDING)
-	    || (a->flags2 & AF2_TIMER_TICK)) {
-		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+	    || (test_bit(AF2_INT_PENDING, &a->flags2))
+	    || (test_bit(AF2_TIMER_TICK, &a->flags2))) {
+		clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
 		esas2r_schedule_tasklet(a);
 	} else {
-		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+		clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
 	}
 }
 
@@ -1707,7 +1707,7 @@
 {
 	struct esas2r_adapter *a = (struct esas2r_adapter *)context;
 
-	esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK);
+	set_bit(AF2_TIMER_TICK, &a->flags2);
 
 	esas2r_schedule_tasklet(a);
 
diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c
index e540a2f..bf45bea 100644
--- a/drivers/scsi/esas2r/esas2r_targdb.c
+++ b/drivers/scsi/esas2r/esas2r_targdb.c
@@ -86,7 +86,7 @@
 
 	esas2r_trace_enter();
 
-	if (a->flags & AF_DISC_PENDING) {
+	if (test_bit(AF_DISC_PENDING, &a->flags)) {
 		esas2r_trace_exit();
 		return;
 	}
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
index fd13928..30028e5 100644
--- a/drivers/scsi/esas2r/esas2r_vda.c
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -84,7 +84,7 @@
 		return false;
 	}
 
-	if (a->flags & AF_DEGRADED_MODE) {
+	if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
 		vi->status = ATTO_STS_DEGRADED;
 		return false;
 	}
@@ -310,9 +310,9 @@
 				le32_to_cpu(rsp->vda_version);
 			cfg->data.init.fw_build = rsp->fw_build;
 
-			snprintf(buf, sizeof(buf), "%1d.%02d",
-				(int)LOBYTE(le16_to_cpu(rsp->fw_release)),
-				(int)HIBYTE(le16_to_cpu(rsp->fw_release)));
+			snprintf(buf, sizeof(buf), "%1.1u.%2.2u",
+				 (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
+				 (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
 
 			memcpy(&cfg->data.init.fw_release, buf,
 			       sizeof(cfg->data.init.fw_release));
@@ -389,7 +389,7 @@
 	vrq->length = cpu_to_le32(length);
 
 	if (vrq->length) {
-		if (a->flags & AF_LEGACY_SGE_MODE) {
+		if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
 			vrq->sg_list_offset = (u8)offsetof(
 				struct atto_vda_mgmt_req, sge);
 
@@ -427,7 +427,7 @@
 
 	vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
 
-	if (a->flags & AF_LEGACY_SGE_MODE) {
+	if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
 		vrq->sg_list_offset =
 			(u8)offsetof(struct atto_vda_ae_req, sge);
 		vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 07453bb..f317000 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -408,6 +408,7 @@
 	}
 
 	ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+	ctlr->cdev = ctlr_dev;
 	fcoe = fcoe_ctlr_priv(ctlr);
 
 	dev_hold(netdev);
@@ -1440,22 +1441,28 @@
 	ctlr = fcoe_to_ctlr(fcoe);
 	lport = ctlr->lp;
 	if (unlikely(!lport)) {
-		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
+		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n");
 		goto err2;
 	}
 	if (!lport->link_up)
 		goto err2;
 
-	FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
-			"data:%p tail:%p end:%p sum:%d dev:%s",
+	FCOE_NETDEV_DBG(netdev,
+			"skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
 			skb->len, skb->data_len, skb->head, skb->data,
 			skb_tail_pointer(skb), skb_end_pointer(skb),
 			skb->csum, skb->dev ? skb->dev->name : "<NULL>");
 
+
+	skb = skb_share_check(skb, GFP_ATOMIC);
+
+	if (skb == NULL)
+		return NET_RX_DROP;
+
 	eh = eth_hdr(skb);
 
 	if (is_fip_mode(ctlr) &&
-	    compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
+	    !ether_addr_equal(eh->h_source, ctlr->dest_addr)) {
 		FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
 				eh->h_source);
 		goto err;
@@ -1540,13 +1547,13 @@
 		wake_up_process(fps->thread);
 	spin_unlock(&fps->fcoe_rx_list.lock);
 
-	return 0;
+	return NET_RX_SUCCESS;
 err:
 	per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
 	put_cpu();
 err2:
 	kfree_skb(skb);
-	return -1;
+	return NET_RX_DROP;
 }
 
 /**
@@ -1788,13 +1795,13 @@
 	lport = fr->fr_dev;
 	if (unlikely(!lport)) {
 		if (skb->destructor != fcoe_percpu_flush_done)
-			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
+			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
 		kfree_skb(skb);
 		return;
 	}
 
-	FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
-			"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
+	FCOE_NETDEV_DBG(skb->dev,
+			"skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
 			skb->len, skb->data_len,
 			skb->head, skb->data, skb_tail_pointer(skb),
 			skb_end_pointer(skb), skb->csum,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 203415e..34a1b1f 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,74 +160,113 @@
 }
 EXPORT_SYMBOL(fcoe_ctlr_init);
 
+/**
+ * fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
+ * @new: The newly discovered FCF
+ *
+ * Called with fip->ctlr_mutex held
+ */
 static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
 {
 	struct fcoe_ctlr *fip = new->fip;
-	struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
-	struct fcoe_fcf_device temp, *fcf_dev;
-	int rc = 0;
+	struct fcoe_ctlr_device *ctlr_dev;
+	struct fcoe_fcf_device *temp, *fcf_dev;
+	int rc = -ENOMEM;
 
 	LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
 			new->fabric_name, new->fcf_mac);
 
-	mutex_lock(&ctlr_dev->lock);
-
-	temp.fabric_name = new->fabric_name;
-	temp.switch_name = new->switch_name;
-	temp.fc_map = new->fc_map;
-	temp.vfid = new->vfid;
-	memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
-	temp.priority = new->pri;
-	temp.fka_period = new->fka_period;
-	temp.selected = 0; /* default to unselected */
-
-	fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
-	if (unlikely(!fcf_dev)) {
-		rc = -ENOMEM;
+	temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+	if (!temp)
 		goto out;
-	}
+
+	temp->fabric_name = new->fabric_name;
+	temp->switch_name = new->switch_name;
+	temp->fc_map = new->fc_map;
+	temp->vfid = new->vfid;
+	memcpy(temp->mac, new->fcf_mac, ETH_ALEN);
+	temp->priority = new->pri;
+	temp->fka_period = new->fka_period;
+	temp->selected = 0; /* default to unselected */
 
 	/*
-	 * The fcoe_sysfs layer can return a CONNECTED fcf that
-	 * has a priv (fcf was never deleted) or a CONNECTED fcf
-	 * that doesn't have a priv (fcf was deleted). However,
-	 * libfcoe will always delete FCFs before trying to add
-	 * them. This is ensured because both recv_adv and
-	 * age_fcfs are protected by the the fcoe_ctlr's mutex.
-	 * This means that we should never get a FCF with a
-	 * non-NULL priv pointer.
+	 * If ctlr_dev doesn't exist then it means we're a libfcoe user
+	 * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device.
+	 * fnic would be an example of a driver with this behavior. In this
+	 * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we
+	 * don't want to make sysfs changes.
 	 */
-	BUG_ON(fcf_dev->priv);
 
-	fcf_dev->priv = new;
-	new->fcf_dev = fcf_dev;
+	ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+	if (ctlr_dev) {
+		mutex_lock(&ctlr_dev->lock);
+		fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp);
+		if (unlikely(!fcf_dev)) {
+			rc = -ENOMEM;
+			mutex_unlock(&ctlr_dev->lock);
+			goto out;
+		}
+
+		/*
+		 * The fcoe_sysfs layer can return a CONNECTED fcf that
+		 * has a priv (fcf was never deleted) or a CONNECTED fcf
+		 * that doesn't have a priv (fcf was deleted). However,
+		 * libfcoe will always delete FCFs before trying to add
+		 * them. This is ensured because both recv_adv and
+		 * age_fcfs are protected by the the fcoe_ctlr's mutex.
+		 * This means that we should never get a FCF with a
+		 * non-NULL priv pointer.
+		 */
+		BUG_ON(fcf_dev->priv);
+
+		fcf_dev->priv = new;
+		new->fcf_dev = fcf_dev;
+		mutex_unlock(&ctlr_dev->lock);
+	}
 
 	list_add(&new->list, &fip->fcfs);
 	fip->fcf_count++;
+	rc = 0;
 
 out:
-	mutex_unlock(&ctlr_dev->lock);
+	kfree(temp);
 	return rc;
 }
 
+/**
+ * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
+ * @new: The FCF to be removed
+ *
+ * Called with fip->ctlr_mutex held
+ */
 static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
 {
 	struct fcoe_ctlr *fip = new->fip;
-	struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+	struct fcoe_ctlr_device *cdev;
 	struct fcoe_fcf_device *fcf_dev;
 
 	list_del(&new->list);
 	fip->fcf_count--;
 
-	mutex_lock(&ctlr_dev->lock);
-
-	fcf_dev = fcoe_fcf_to_fcf_dev(new);
-	WARN_ON(!fcf_dev);
-	new->fcf_dev = NULL;
-	fcoe_fcf_device_delete(fcf_dev);
-	kfree(new);
-
-	mutex_unlock(&ctlr_dev->lock);
+	/*
+	 * If ctlr_dev doesn't exist then it means we're a libfcoe user
+	 * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device
+	 * or a fcoe_fcf_device.
+	 *
+	 * fnic would be an example of a driver with this behavior. In this
+	 * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above),
+	 * but we don't want to make sysfs changes.
+	 */
+	cdev = fcoe_ctlr_to_ctlr_dev(fip);
+	if (cdev) {
+		mutex_lock(&cdev->lock);
+		fcf_dev = fcoe_fcf_to_fcf_dev(new);
+		WARN_ON(!fcf_dev);
+		new->fcf_dev = NULL;
+		fcoe_fcf_device_delete(fcf_dev);
+		kfree(new);
+		mutex_unlock(&cdev->lock);
+	}
 }
 
 /**
@@ -300,7 +339,7 @@
 	spin_unlock_bh(&fip->ctlr_lock);
 	sel = fip->sel_fcf;
 
-	if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr))
+	if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
 		goto unlock;
 	if (!is_zero_ether_addr(fip->dest_addr)) {
 		printk(KERN_NOTICE "libfcoe: host%d: "
@@ -1000,7 +1039,7 @@
 		if (fcf->switch_name == new.switch_name &&
 		    fcf->fabric_name == new.fabric_name &&
 		    fcf->fc_map == new.fc_map &&
-		    compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
+		    ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) {
 			found = 1;
 			break;
 		}
@@ -1340,7 +1379,7 @@
 			mp = (struct fip_mac_desc *)desc;
 			if (dlen < sizeof(*mp))
 				goto err;
-			if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
+			if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac))
 				goto err;
 			desc_mask &= ~BIT(FIP_DT_MAC);
 			break;
@@ -1418,8 +1457,8 @@
 			 * 'port_id' is already validated, check MAC address and
 			 * wwpn
 			 */
-			if (compare_ether_addr(fip->get_src_addr(vn_port),
-						vp->fd_mac) != 0 ||
+			if (!ether_addr_equal(fip->get_src_addr(vn_port),
+					      vp->fd_mac) ||
 				get_unaligned_be64(&vp->fd_wwpn) !=
 							vn_port->wwpn)
 				continue;
@@ -1453,6 +1492,9 @@
  */
 void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		return;
 	skb_queue_tail(&fip->fip_recv_list, skb);
 	schedule_work(&fip->recv_work);
 }
@@ -1479,12 +1521,12 @@
 		goto drop;
 	eh = eth_hdr(skb);
 	if (fip->mode == FIP_MODE_VN2VN) {
-		if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
-		    compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) &&
-		    compare_ether_addr(eh->h_dest, fcoe_all_p2p))
+		if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
+		    !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) &&
+		    !ether_addr_equal(eh->h_dest, fcoe_all_p2p))
 			goto drop;
-	} else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
-		   compare_ether_addr(eh->h_dest, fcoe_all_enode))
+	} else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
+		   !ether_addr_equal(eh->h_dest, fcoe_all_enode))
 		goto drop;
 	fiph = (struct fip_header *)skb->data;
 	op = ntohs(fiph->fip_op);
@@ -1856,7 +1898,7 @@
 		 * address_mode flag to use FC_OUI-based Ethernet DA.
 		 * Otherwise we use the FCoE gateway addr
 		 */
-		if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
+		if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
 			fcoe_ctlr_map_dest(fip);
 		} else {
 			memcpy(fip->dest_addr, sa, ETH_ALEN);
@@ -2825,8 +2867,8 @@
  * disabled, so that should ensure that this routine is only called
  * when nothing is happening.
  */
-void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
-			enum fip_state fip_mode)
+static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
+			       enum fip_state fip_mode)
 {
 	void *priv;
 
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 1f4f22f..045c4e1 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -300,29 +300,29 @@
 
 	switch (ctlr->enabled) {
 	case FCOE_CTLR_ENABLED:
-		LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
+		LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n");
 		return -EBUSY;
 	case FCOE_CTLR_DISABLED:
 		if (!ctlr->f->set_fcoe_ctlr_mode) {
 			LIBFCOE_SYSFS_DBG(ctlr,
-					  "Mode change not supported by LLD.");
+					  "Mode change not supported by LLD.\n");
 			return -ENOTSUPP;
 		}
 
 		ctlr->mode = fcoe_parse_mode(mode);
 		if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
-			LIBFCOE_SYSFS_DBG(ctlr,
-					  "Unknown mode %s provided.", buf);
+			LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
+					  buf);
 			return -EINVAL;
 		}
 
 		ctlr->f->set_fcoe_ctlr_mode(ctlr);
-		LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
+		LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
 
 		return count;
 	case FCOE_CTLR_UNUSED:
 	default:
-		LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
+		LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");
 		return -ENOTSUPP;
 	};
 }
@@ -657,7 +657,7 @@
 	if (new->switch_name == old->switch_name &&
 	    new->fabric_name == old->fabric_name &&
 	    new->fc_map == old->fc_map &&
-	    compare_ether_addr(new->mac, old->mac) == 0)
+	    ether_addr_equal(new->mac, old->mac))
 		return 1;
 	return 0;
 }
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index e4dd3d7..528d43b 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,6 +27,7 @@
 #include "fnic_io.h"
 #include "fnic_res.h"
 #include "fnic_trace.h"
+#include "fnic_stats.h"
 #include "vnic_dev.h"
 #include "vnic_wq.h"
 #include "vnic_rq.h"
@@ -38,7 +39,7 @@
 
 #define DRV_NAME		"fnic"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
-#define DRV_VERSION		"1.5.0.23"
+#define DRV_VERSION		"1.5.0.45"
 #define PFX			DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -232,6 +233,13 @@
 	unsigned int wq_count;
 	unsigned int cq_count;
 
+	struct dentry *fnic_stats_debugfs_host;
+	struct dentry *fnic_stats_debugfs_file;
+	struct dentry *fnic_reset_debugfs_file;
+	unsigned int reset_stats;
+	atomic64_t io_cmpl_skip;
+	struct fnic_stats fnic_stats;
+
 	u32 vlan_hw_insert:1;	        /* let hw insert the tag */
 	u32 in_remove:1;                /* fnic device in removal */
 	u32 stop_rx_link_events:1;      /* stop proc. rx frames, link events */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index cbcb012..b6073f8 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -23,6 +23,58 @@
 static struct dentry *fnic_trace_debugfs_root;
 static struct dentry *fnic_trace_debugfs_file;
 static struct dentry *fnic_trace_enable;
+static struct dentry *fnic_stats_debugfs_root;
+
+/*
+ * fnic_debugfs_init - Initialize debugfs for fnic debug logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory and statistics directory for trace buffer and
+ * stats logging.
+ */
+int fnic_debugfs_init(void)
+{
+	int rc = -1;
+	fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+	if (!fnic_trace_debugfs_root) {
+		printk(KERN_DEBUG "Cannot create debugfs root\n");
+		return rc;
+	}
+
+	if (!fnic_trace_debugfs_root) {
+		printk(KERN_DEBUG
+			"fnic root directory doesn't exist in debugfs\n");
+		return rc;
+	}
+
+	fnic_stats_debugfs_root = debugfs_create_dir("statistics",
+						fnic_trace_debugfs_root);
+	if (!fnic_stats_debugfs_root) {
+		printk(KERN_DEBUG "Cannot create Statistics directory\n");
+		return rc;
+	}
+
+	rc = 0;
+	return rc;
+}
+
+/*
+ * fnic_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic.
+ */
+void fnic_debugfs_terminate(void)
+{
+	debugfs_remove(fnic_stats_debugfs_root);
+	fnic_stats_debugfs_root = NULL;
+
+	debugfs_remove(fnic_trace_debugfs_root);
+	fnic_trace_debugfs_root = NULL;
+}
 
 /*
  * fnic_trace_ctrl_open - Open the trace_enable file
@@ -241,16 +293,16 @@
  * Description:
  * When Debugfs is configured this routine sets up the fnic debugfs
  * file system. If not already created, this routine will create the
- * fnic directory. It will create file trace to log fnic trace buffer
- * output into debugfs and it will also create file trace_enable to
- * control enable/disable of trace logging into trace buffer.
+ * create file trace to log fnic trace buffer output into debugfs and
+ * it will also create file trace_enable to control enable/disable of
+ * trace logging into trace buffer.
  */
 int fnic_trace_debugfs_init(void)
 {
 	int rc = -1;
-	fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
 	if (!fnic_trace_debugfs_root) {
-		printk(KERN_DEBUG "Cannot create debugfs root\n");
+		printk(KERN_DEBUG
+			"FNIC Debugfs root directory doesn't exist\n");
 		return rc;
 	}
 	fnic_trace_enable = debugfs_create_file("tracing_enable",
@@ -259,8 +311,8 @@
 					  NULL, &fnic_trace_ctrl_fops);
 
 	if (!fnic_trace_enable) {
-		printk(KERN_DEBUG "Cannot create trace_enable file"
-				  " under debugfs");
+		printk(KERN_DEBUG
+			"Cannot create trace_enable file under debugfs\n");
 		return rc;
 	}
 
@@ -271,7 +323,8 @@
 						  &fnic_trace_debugfs_fops);
 
 	if (!fnic_trace_debugfs_file) {
-		printk(KERN_DEBUG "Cannot create trace file under debugfs");
+		printk(KERN_DEBUG
+			"Cannot create trace file under debugfs\n");
 		return rc;
 	}
 	rc = 0;
@@ -295,8 +348,323 @@
 		debugfs_remove(fnic_trace_enable);
 		fnic_trace_enable = NULL;
 	}
-	if (fnic_trace_debugfs_root) {
-		debugfs_remove(fnic_trace_debugfs_root);
-		fnic_trace_debugfs_root = NULL;
+}
+
+/*
+ * fnic_reset_stats_open - Open the reset_stats file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the stats reset flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file reset_stats and stores i_private data
+ * to debug structure to retrieve later for while performing other
+ * file oprations.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_reset_stats_open(struct inode *inode, struct file *file)
+{
+	struct stats_debug_info *debug;
+
+	debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+	if (!debug)
+		return -ENOMEM;
+
+	debug->i_private = inode->i_private;
+
+	file->private_data = debug;
+
+	return 0;
+}
+
+/*
+ * fnic_reset_stats_read - Read a reset_stats debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable reset_stats
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_reset_stats_read(struct file *file,
+					char __user *ubuf,
+					size_t cnt, loff_t *ppos)
+{
+	struct stats_debug_info *debug = file->private_data;
+	struct fnic *fnic = (struct fnic *)debug->i_private;
+	char buf[64];
+	int len;
+
+	len = sprintf(buf, "%u\n", fnic->reset_stats);
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_reset_stats_write - Write to reset_stats debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * resets cumulative stats of fnic.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_reset_stats_write(struct file *file,
+					const char __user *ubuf,
+					size_t cnt, loff_t *ppos)
+{
+	struct stats_debug_info *debug = file->private_data;
+	struct fnic *fnic = (struct fnic *)debug->i_private;
+	struct fnic_stats *stats = &fnic->fnic_stats;
+	u64 *io_stats_p = (u64 *)&stats->io_stats;
+	u64 *fw_stats_p = (u64 *)&stats->fw_stats;
+	char buf[64];
+	unsigned long val;
+	int ret;
+
+	if (cnt >= sizeof(buf))
+		return -EINVAL;
+
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+
+	buf[cnt] = 0;
+
+	ret = kstrtoul(buf, 10, &val);
+	if (ret < 0)
+		return ret;
+
+	fnic->reset_stats = val;
+
+	if (fnic->reset_stats) {
+		/* Skip variable is used to avoid descrepancies to Num IOs
+		 * and IO Completions stats. Skip incrementing No IO Compls
+		 * for pending active IOs after reset stats
+		 */
+		atomic64_set(&fnic->io_cmpl_skip,
+			atomic64_read(&stats->io_stats.active_ios));
+		memset(&stats->abts_stats, 0, sizeof(struct abort_stats));
+		memset(&stats->term_stats, 0,
+			sizeof(struct terminate_stats));
+		memset(&stats->reset_stats, 0, sizeof(struct reset_stats));
+		memset(&stats->misc_stats, 0, sizeof(struct misc_stats));
+		memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats));
+		memset(io_stats_p+1, 0,
+			sizeof(struct io_path_stats) - sizeof(u64));
+		memset(fw_stats_p+1, 0,
+			sizeof(struct fw_stats) - sizeof(u64));
 	}
+
+	(*ppos)++;
+	return cnt;
+}
+
+/*
+ * fnic_reset_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_reset_stats_release(struct inode *inode,
+					struct file *file)
+{
+	struct stats_debug_info *debug = file->private_data;
+	kfree(debug);
+	return 0;
+}
+
+/*
+ * fnic_stats_debugfs_open - Open the stats file for specific host
+ * and get fnic stats.
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the specific host statistics.
+ *
+ * Description:
+ * This routine opens a debugsfs file stats of specific host and print
+ * fnic stats.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_stats_debugfs_open(struct inode *inode,
+					struct file *file)
+{
+	struct fnic *fnic = inode->i_private;
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+	struct stats_debug_info *debug;
+	int buf_size = 2 * PAGE_SIZE;
+
+	debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+	if (!debug)
+		return -ENOMEM;
+
+	debug->debug_buffer = vmalloc(buf_size);
+	if (!debug->debug_buffer) {
+		kfree(debug);
+		return -ENOMEM;
+	}
+
+	debug->buf_size = buf_size;
+	memset((void *)debug->debug_buffer, 0, buf_size);
+	debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
+
+	file->private_data = debug;
+
+	return 0;
+}
+
+/*
+ * fnic_stats_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_stats_debugfs_read(struct file *file,
+					char __user *ubuf,
+					size_t nbytes,
+					loff_t *pos)
+{
+	struct stats_debug_info *debug = file->private_data;
+	int rc = 0;
+	rc = simple_read_from_buffer(ubuf, nbytes, pos,
+					debug->debug_buffer,
+					debug->buffer_len);
+	return rc;
+}
+
+/*
+ * fnic_stats_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_stats_debugfs_release(struct inode *inode,
+					struct file *file)
+{
+	struct stats_debug_info *debug = file->private_data;
+	vfree(debug->debug_buffer);
+	kfree(debug);
+	return 0;
+}
+
+static const struct file_operations fnic_stats_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = fnic_stats_debugfs_open,
+	.read = fnic_stats_debugfs_read,
+	.release = fnic_stats_debugfs_release,
+};
+
+static const struct file_operations fnic_reset_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = fnic_reset_stats_open,
+	.read = fnic_reset_stats_read,
+	.write = fnic_reset_stats_write,
+	.release = fnic_reset_stats_release,
+};
+
+/*
+ * fnic_stats_init - Initialize stats struct and create stats file per fnic
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the stats file per fnic
+ * It will create file stats and reset_stats under statistics/host# directory
+ * to log per fnic stats.
+ */
+int fnic_stats_debugfs_init(struct fnic *fnic)
+{
+	int rc = -1;
+	char name[16];
+
+	snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
+
+	if (!fnic_stats_debugfs_root) {
+		printk(KERN_DEBUG "fnic_stats root doesn't exist\n");
+		return rc;
+	}
+	fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
+						fnic_stats_debugfs_root);
+	if (!fnic->fnic_stats_debugfs_host) {
+		printk(KERN_DEBUG "Cannot create host directory\n");
+		return rc;
+	}
+
+	fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
+						S_IFREG|S_IRUGO|S_IWUSR,
+						fnic->fnic_stats_debugfs_host,
+						fnic,
+						&fnic_stats_debugfs_fops);
+	if (!fnic->fnic_stats_debugfs_file) {
+		printk(KERN_DEBUG "Cannot create host stats file\n");
+		return rc;
+	}
+
+	fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
+						S_IFREG|S_IRUGO|S_IWUSR,
+						fnic->fnic_stats_debugfs_host,
+						fnic,
+						&fnic_reset_debugfs_fops);
+	if (!fnic->fnic_reset_debugfs_file) {
+		printk(KERN_DEBUG "Cannot create host stats file\n");
+		return rc;
+	}
+	rc = 0;
+	return rc;
+}
+
+/*
+ * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic stats.
+ */
+void fnic_stats_debugfs_remove(struct fnic *fnic)
+{
+	if (!fnic)
+		return;
+
+	debugfs_remove(fnic->fnic_stats_debugfs_file);
+	fnic->fnic_stats_debugfs_file = NULL;
+
+	debugfs_remove(fnic->fnic_reset_debugfs_file);
+	fnic->fnic_reset_debugfs_file = NULL;
+
+	debugfs_remove(fnic->fnic_stats_debugfs_host);
+	fnic->fnic_stats_debugfs_host = NULL;
 }
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 006fa92..1671325 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -302,6 +302,7 @@
 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
 {
 	struct fcoe_ctlr *fip = &fnic->ctlr;
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 	struct sk_buff *skb;
 	char *eth_fr;
 	int fr_len;
@@ -337,6 +338,7 @@
 	vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
 	vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
 	put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+	atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
 
 	skb_put(skb, sizeof(*vlan));
 	skb->protocol = htons(ETH_P_FIP);
@@ -354,6 +356,7 @@
 	struct fcoe_ctlr *fip = &fnic->ctlr;
 	struct fip_header *fiph;
 	struct fip_desc *desc;
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 	u16 vid;
 	size_t rlen;
 	size_t dlen;
@@ -402,6 +405,7 @@
 	/* any VLAN descriptors present ? */
 	if (list_empty(&fnic->vlans)) {
 		/* retry from timer */
+		atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
 		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
 			  "No VLAN descriptors in FIP VLAN response\n");
 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
@@ -533,6 +537,7 @@
 void fnic_handle_fip_frame(struct work_struct *work)
 {
 	struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 	unsigned long flags;
 	struct sk_buff *skb;
 	struct ethhdr *eh;
@@ -567,6 +572,8 @@
 			 * fcf's & restart from scratch
 			 */
 			if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+				atomic64_inc(
+					&fnic_stats->vlan_stats.flogi_rejects);
 				shost_printk(KERN_INFO, fnic->lport->host,
 					  "Trigger a Link down - VLAN Disc\n");
 				fcoe_ctlr_link_down(&fnic->ctlr);
@@ -651,13 +658,13 @@
 
 	if (is_zero_ether_addr(new))
 		new = ctl;
-	if (!compare_ether_addr(data, new))
+	if (ether_addr_equal(data, new))
 		return;
 	FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
-	if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
+	if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
 		vnic_dev_del_addr(fnic->vdev, data);
 	memcpy(data, new, ETH_ALEN);
-	if (compare_ether_addr(new, ctl))
+	if (!ether_addr_equal(new, ctl))
 		vnic_dev_add_addr(fnic->vdev, new);
 }
 
@@ -753,6 +760,7 @@
 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
 	struct sk_buff *skb;
 	struct fc_frame *fp;
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 	unsigned int eth_hdrs_stripped;
 	u8 type, color, eop, sop, ingress_port, vlan_stripped;
 	u8 fcoe = 0, fcoe_sof, fcoe_eof;
@@ -803,6 +811,7 @@
 		eth_hdrs_stripped = 0;
 		skb_trim(skb, bytes_written);
 		if (!fcs_ok) {
+			atomic64_inc(&fnic_stats->misc_stats.frame_errors);
 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
 				     "fcs error.  dropping packet.\n");
 			goto drop;
@@ -818,6 +827,7 @@
 	}
 
 	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+		atomic64_inc(&fnic_stats->misc_stats.frame_errors);
 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
 			     "fnic rq_cmpl fcoe x%x fcsok x%x"
 			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
@@ -1205,6 +1215,7 @@
 {
 	unsigned long flags;
 	struct fcoe_vlan *vlan;
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 	u64 sol_time;
 
 	spin_lock_irqsave(&fnic->fnic_lock, flags);
@@ -1273,6 +1284,7 @@
 			vlan->state = FIP_VLAN_SENT; /* sent now */
 		}
 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
 		vlan->sol_count++;
 		sol_time = jiffies + msecs_to_jiffies
 					(FCOE_CTLR_START_DELAY);
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 5c1f223..7d9b54a 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -37,6 +37,9 @@
 	if (!pba)
 		return IRQ_NONE;
 
+	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
 	if (pba & (1 << FNIC_INTX_NOTIFY)) {
 		vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
 		fnic_handle_link_event(fnic);
@@ -66,6 +69,9 @@
 	struct fnic *fnic = data;
 	unsigned long work_done = 0;
 
+	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
 	work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
 	work_done += fnic_wq_cmpl_handler(fnic, -1);
 	work_done += fnic_rq_cmpl_handler(fnic, -1);
@@ -83,6 +89,9 @@
 	struct fnic *fnic = data;
 	unsigned long rq_work_done = 0;
 
+	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
 	rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
 	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
 				 rq_work_done,
@@ -97,6 +106,9 @@
 	struct fnic *fnic = data;
 	unsigned long wq_work_done = 0;
 
+	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
 	wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
 	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
 				 wq_work_done,
@@ -110,6 +122,9 @@
 	struct fnic *fnic = data;
 	unsigned long wq_copy_work_done = 0;
 
+	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
 	wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
 	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
 				 wq_copy_work_done,
@@ -122,6 +137,9 @@
 {
 	struct fnic *fnic = data;
 
+	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
 	vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
 	fnic_log_q_error(fnic);
 	fnic_handle_link_event(fnic);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index bbf81ea..33e4ec2 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -556,6 +556,13 @@
 
 	host->transportt = fnic_fc_transport;
 
+	err = fnic_stats_debugfs_init(fnic);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+				"Failed to initialize debugfs for stats\n");
+		fnic_stats_debugfs_remove(fnic);
+	}
+
 	/* Setup PCI resources */
 	pci_set_drvdata(pdev, fnic);
 
@@ -917,6 +924,7 @@
 err_out_disable_device:
 	pci_disable_device(pdev);
 err_out_free_hba:
+	fnic_stats_debugfs_remove(fnic);
 	scsi_host_put(lp->host);
 err_out:
 	return err;
@@ -969,6 +977,7 @@
 
 	fcoe_ctlr_destroy(&fnic->ctlr);
 	fc_lport_destroy(lp);
+	fnic_stats_debugfs_remove(fnic);
 
 	/*
 	 * This stops the fnic device, masks all interrupts. Completed
@@ -996,7 +1005,6 @@
 	fnic_iounmap(fnic);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	scsi_host_put(lp->host);
 }
 
@@ -1014,6 +1022,14 @@
 
 	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
 
+	/* Create debugfs entries for fnic */
+	err = fnic_debugfs_init();
+	if (err < 0) {
+		printk(KERN_ERR PFX "Failed to create fnic directory "
+				"for tracing and stats logging\n");
+		fnic_debugfs_terminate();
+	}
+
 	/* Allocate memory for trace buffer */
 	err = fnic_trace_buf_init();
 	if (err < 0) {
@@ -1102,6 +1118,7 @@
 	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
 err_create_fnic_sgl_slab_dflt:
 	fnic_trace_free();
+	fnic_debugfs_terminate();
 	return err;
 }
 
@@ -1118,6 +1135,7 @@
 	kmem_cache_destroy(fnic_io_req_cache);
 	fc_release_transport(fnic_fc_transport);
 	fnic_trace_free();
+	fnic_debugfs_terminate();
 }
 
 module_init(fnic_init_module);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d014aae..0521436 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -226,15 +226,23 @@
 
 	if (!vnic_wq_copy_desc_avail(wq))
 		ret = -EAGAIN;
-	else
+	else {
 		fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
+		atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+		if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+			  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+			atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+				atomic64_read(
+				  &fnic->fnic_stats.fw_stats.active_fw_reqs));
+	}
 
 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 
-	if (!ret)
+	if (!ret) {
+		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			      "Issued fw reset\n");
-	else {
+	} else {
 		fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			      "Failed to issue fw reset\n");
@@ -291,6 +299,12 @@
 			      fc_id, fnic->ctlr.map_dest, gw_mac);
 	}
 
+	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
 flogi_reg_ioreq_end:
 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 	return ret;
@@ -310,6 +324,7 @@
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
 	struct fc_rport_libfc_priv *rp = rport->dd_data;
 	struct host_sg_desc *desc;
+	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 	u8 pri_tag = 0;
 	unsigned int i;
 	unsigned long intr_flags;
@@ -358,6 +373,7 @@
 		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 			  "fnic_queue_wq_copy_desc failure - no descriptors\n");
+		atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
 
@@ -386,6 +402,12 @@
 					 rport->maxframe_size, rp->r_a_tov,
 					 rp->e_d_tov);
 
+	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 	return 0;
 }
@@ -401,6 +423,7 @@
 	struct fc_rport *rport;
 	struct fnic_io_req *io_req = NULL;
 	struct fnic *fnic = lport_priv(lp);
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 	struct vnic_wq_copy *wq;
 	int ret;
 	u64 cmd_trace;
@@ -414,6 +437,7 @@
 	rport = starget_to_rport(scsi_target(sc->device));
 	ret = fc_remote_port_chkready(rport);
 	if (ret) {
+		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 		sc->result = ret;
 		done(sc);
 		return 0;
@@ -436,6 +460,7 @@
 	/* Get a new io_req for this SCSI IO */
 	io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
 	if (!io_req) {
+		atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 		ret = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 	}
@@ -462,6 +487,7 @@
 			mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
 				      GFP_ATOMIC);
 		if (!io_req->sgl_list) {
+			atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 			ret = SCSI_MLQUEUE_HOST_BUSY;
 			scsi_dma_unmap(sc);
 			mempool_free(io_req, fnic->io_req_pool);
@@ -509,6 +535,13 @@
 			mempool_free(io_req, fnic->io_req_pool);
 		}
 	} else {
+		atomic64_inc(&fnic_stats->io_stats.active_ios);
+		atomic64_inc(&fnic_stats->io_stats.num_ios);
+		if (atomic64_read(&fnic_stats->io_stats.active_ios) >
+			  atomic64_read(&fnic_stats->io_stats.max_active_ios))
+			atomic64_set(&fnic_stats->io_stats.max_active_ios,
+			     atomic64_read(&fnic_stats->io_stats.active_ios));
+
 		/* REVISIT: Use per IO lock in the final code */
 		CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
 	}
@@ -542,12 +575,18 @@
 	struct fcpio_tag tag;
 	int ret = 0;
 	unsigned long flags;
+	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
 
 	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 
+	atomic64_inc(&reset_stats->fw_reset_completions);
+
 	/* Clean up all outstanding io requests */
 	fnic_cleanup_io(fnic, SCSI_NO_TAG);
 
+	atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
+	atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
+
 	spin_lock_irqsave(&fnic->fnic_lock, flags);
 
 	/* fnic should be in FC_TRANS_ETH_MODE */
@@ -571,6 +610,7 @@
 			 * reset the firmware. Free the cached flogi
 			 */
 			fnic->state = FNIC_IN_FC_MODE;
+			atomic64_inc(&reset_stats->fw_reset_failures);
 			ret = -1;
 		}
 	} else {
@@ -578,6 +618,7 @@
 			      fnic->lport->host,
 			      "Unexpected state %s while processing"
 			      " reset cmpl\n", fnic_state_to_str(fnic->state));
+		atomic64_inc(&reset_stats->fw_reset_failures);
 		ret = -1;
 	}
 
@@ -701,10 +742,14 @@
 	wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
 	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 
+	fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
 	if (is_ack_index_in_range(wq, request_out)) {
 		fnic->fw_ack_index[0] = request_out;
 		fnic->fw_ack_recd[0] = 1;
-	}
+	} else
+		atomic64_inc(
+			&fnic->fnic_stats.misc_stats.ack_index_out_of_range);
+
 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 	FNIC_TRACE(fnic_fcpio_ack_handler,
 		  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
@@ -726,6 +771,7 @@
 	struct fcpio_icmnd_cmpl *icmnd_cmpl;
 	struct fnic_io_req *io_req;
 	struct scsi_cmnd *sc;
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 	unsigned long flags;
 	spinlock_t *io_lock;
 	u64 cmd_trace;
@@ -746,6 +792,7 @@
 	sc = scsi_host_find_tag(fnic->lport->host, id);
 	WARN_ON_ONCE(!sc);
 	if (!sc) {
+		atomic64_inc(&fnic_stats->io_stats.sc_null);
 		shost_printk(KERN_ERR, fnic->lport->host,
 			  "icmnd_cmpl sc is null - "
 			  "hdr status = %s tag = 0x%x desc = 0x%p\n",
@@ -766,6 +813,7 @@
 	io_req = (struct fnic_io_req *)CMD_SP(sc);
 	WARN_ON_ONCE(!io_req);
 	if (!io_req) {
+		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
 		CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
 		spin_unlock_irqrestore(io_lock, flags);
 		shost_printk(KERN_ERR, fnic->lport->host,
@@ -824,31 +872,54 @@
 		if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
 			xfer_len -= icmnd_cmpl->residual;
 
+		if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
+			atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
 		break;
 
 	case FCPIO_TIMEOUT:          /* request was timed out */
+		atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
 		sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
 		break;
 
 	case FCPIO_ABORTED:          /* request was aborted */
+		atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 		break;
 
 	case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
+		atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
 		scsi_set_resid(sc, icmnd_cmpl->residual);
 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 		break;
 
 	case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
+		atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
 		sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
 		break;
+
+	case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
+		atomic64_inc(&fnic_stats->io_stats.io_not_found);
+		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+		break;
+
+	case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
+		atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
+		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+		break;
+
+	case FCPIO_FW_ERR:           /* request was terminated due fw error */
+		atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
+		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+		break;
+
+	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
+		atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
+		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+		break;
+
 	case FCPIO_INVALID_HEADER:   /* header contains invalid data */
 	case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
 	case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
-	case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
-	case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
-	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
-	case FCPIO_FW_ERR:           /* request was terminated due fw error */
 	default:
 		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
 			     fnic_fcpio_status_to_str(hdr_status));
@@ -856,6 +927,11 @@
 		break;
 	}
 
+	if (hdr_status != FCPIO_SUCCESS) {
+		atomic64_inc(&fnic_stats->io_stats.io_failures);
+		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+			     fnic_fcpio_status_to_str(hdr_status));
+	}
 	/* Break link with the SCSI command */
 	CMD_SP(sc) = NULL;
 	CMD_FLAGS(sc) |= FNIC_IO_DONE;
@@ -889,6 +965,12 @@
 	} else
 		fnic->lport->host_stats.fcp_control_requests++;
 
+	atomic64_dec(&fnic_stats->io_stats.active_ios);
+	if (atomic64_read(&fnic->io_cmpl_skip))
+		atomic64_dec(&fnic->io_cmpl_skip);
+	else
+		atomic64_inc(&fnic_stats->io_stats.io_completions);
+
 	/* Call SCSI completion function to complete the IO */
 	if (sc->scsi_done)
 		sc->scsi_done(sc);
@@ -906,6 +988,10 @@
 	u32 id;
 	struct scsi_cmnd *sc;
 	struct fnic_io_req *io_req;
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+	struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
+	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
+	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 	unsigned long flags;
 	spinlock_t *io_lock;
 	unsigned long start_time;
@@ -923,6 +1009,7 @@
 	sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
 	WARN_ON_ONCE(!sc);
 	if (!sc) {
+		atomic64_inc(&fnic_stats->io_stats.sc_null);
 		shost_printk(KERN_ERR, fnic->lport->host,
 			  "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
 			  fnic_fcpio_status_to_str(hdr_status), id);
@@ -933,6 +1020,7 @@
 	io_req = (struct fnic_io_req *)CMD_SP(sc);
 	WARN_ON_ONCE(!io_req);
 	if (!io_req) {
+		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
 		spin_unlock_irqrestore(io_lock, flags);
 		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
 		shost_printk(KERN_ERR, fnic->lport->host,
@@ -957,6 +1045,31 @@
 		spin_unlock_irqrestore(io_lock, flags);
 	} else if (id & FNIC_TAG_ABORT) {
 		/* Completion of abort cmd */
+		switch (hdr_status) {
+		case FCPIO_SUCCESS:
+			break;
+		case FCPIO_TIMEOUT:
+			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+				atomic64_inc(&abts_stats->abort_fw_timeouts);
+			else
+				atomic64_inc(
+					&term_stats->terminate_fw_timeouts);
+			break;
+		case FCPIO_IO_NOT_FOUND:
+			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+				atomic64_inc(&abts_stats->abort_io_not_found);
+			else
+				atomic64_inc(
+					&term_stats->terminate_io_not_found);
+			break;
+		default:
+			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+				atomic64_inc(&abts_stats->abort_failures);
+			else
+				atomic64_inc(
+					&term_stats->terminate_failures);
+			break;
+		}
 		if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
 			/* This is a late completion. Ignore it */
 			spin_unlock_irqrestore(io_lock, flags);
@@ -964,6 +1077,16 @@
 		}
 		CMD_ABTS_STATUS(sc) = hdr_status;
 		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+
+		atomic64_dec(&fnic_stats->io_stats.active_ios);
+		if (atomic64_read(&fnic->io_cmpl_skip))
+			atomic64_dec(&fnic->io_cmpl_skip);
+		else
+			atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+		if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
+			atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
+
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			      "abts cmpl recd. id %d status %s\n",
 			      (int)(id & FNIC_TAG_MASK),
@@ -1067,6 +1190,18 @@
 	struct fnic *fnic = vnic_dev_priv(vdev);
 
 	switch (desc->hdr.type) {
+	case FCPIO_ICMND_CMPL: /* fw completed a command */
+	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
+	case FCPIO_RESET_CMPL: /* fw completed reset */
+		atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+		break;
+	default:
+		break;
+	}
+
+	switch (desc->hdr.type) {
 	case FCPIO_ACK: /* fw copied copy wq desc to its queue */
 		fnic_fcpio_ack_handler(fnic, cq_index, desc);
 		break;
@@ -1126,6 +1261,7 @@
 	struct scsi_cmnd *sc;
 	spinlock_t *io_lock;
 	unsigned long start_time = 0;
+	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 
 	for (i = 0; i < fnic->fnic_max_tag_id; i++) {
 		if (i == exclude_id)
@@ -1179,6 +1315,11 @@
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
 			      " DID_TRANSPORT_DISRUPTED\n");
 
+		if (atomic64_read(&fnic->io_cmpl_skip))
+			atomic64_dec(&fnic->io_cmpl_skip);
+		else
+			atomic64_inc(&fnic_stats->io_stats.io_completions);
+
 		/* Complete the command to SCSI */
 		if (sc->scsi_done) {
 			FNIC_TRACE(fnic_cleanup_io,
@@ -1262,6 +1403,7 @@
 {
 	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 	struct Scsi_Host *host = fnic->lport->host;
+	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 	unsigned long flags;
 
 	spin_lock_irqsave(host->host_lock, flags);
@@ -1283,12 +1425,19 @@
 		atomic_dec(&fnic->in_flight);
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			"fnic_queue_abort_io_req: failure: no descriptors\n");
+		atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
 		return 1;
 	}
 	fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
 				     0, task_req, tag, fc_lun, io_req->port_id,
 				     fnic->config.ra_tov, fnic->config.ed_tov);
 
+	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 	atomic_dec(&fnic->in_flight);
 
@@ -1299,10 +1448,13 @@
 {
 	int tag;
 	int abt_tag;
+	int term_cnt = 0;
 	struct fnic_io_req *io_req;
 	spinlock_t *io_lock;
 	unsigned long flags;
 	struct scsi_cmnd *sc;
+	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
+	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
 	struct scsi_lun fc_lun;
 	enum fnic_ioreq_state old_ioreq_state;
 
@@ -1366,6 +1518,7 @@
 		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
 		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
 		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+			atomic64_inc(&reset_stats->device_reset_terminates);
 			abt_tag = (tag | FNIC_TAG_DEV_RST);
 			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			"fnic_rport_exch_reset dev rst sc 0x%p\n",
@@ -1402,8 +1555,12 @@
 			else
 				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
 			spin_unlock_irqrestore(io_lock, flags);
+			atomic64_inc(&term_stats->terminates);
+			term_cnt++;
 		}
 	}
+	if (term_cnt > atomic64_read(&term_stats->max_terminates))
+		atomic64_set(&term_stats->max_terminates, term_cnt);
 
 }
 
@@ -1411,6 +1568,7 @@
 {
 	int tag;
 	int abt_tag;
+	int term_cnt = 0;
 	struct fnic_io_req *io_req;
 	spinlock_t *io_lock;
 	unsigned long flags;
@@ -1420,6 +1578,8 @@
 	struct fc_lport *lport;
 	struct fnic *fnic;
 	struct fc_rport *cmd_rport;
+	struct reset_stats *reset_stats;
+	struct terminate_stats *term_stats;
 	enum fnic_ioreq_state old_ioreq_state;
 
 	if (!rport) {
@@ -1448,6 +1608,9 @@
 	if (fnic->in_remove)
 		return;
 
+	reset_stats = &fnic->fnic_stats.reset_stats;
+	term_stats = &fnic->fnic_stats.term_stats;
+
 	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
 		abt_tag = tag;
 		io_lock = fnic_io_lock_tag(fnic, tag);
@@ -1504,6 +1667,7 @@
 		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
 		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
 		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+			atomic64_inc(&reset_stats->device_reset_terminates);
 			abt_tag = (tag | FNIC_TAG_DEV_RST);
 			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			"fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
@@ -1540,8 +1704,12 @@
 			else
 				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
 			spin_unlock_irqrestore(io_lock, flags);
+			atomic64_inc(&term_stats->terminates);
+			term_cnt++;
 		}
 	}
+	if (term_cnt > atomic64_read(&term_stats->max_terminates))
+		atomic64_set(&term_stats->max_terminates, term_cnt);
 
 }
 
@@ -1562,6 +1730,9 @@
 	int ret = SUCCESS;
 	u32 task_req = 0;
 	struct scsi_lun fc_lun;
+	struct fnic_stats *fnic_stats;
+	struct abort_stats *abts_stats;
+	struct terminate_stats *term_stats;
 	int tag;
 	DECLARE_COMPLETION_ONSTACK(tm_done);
 
@@ -1572,6 +1743,10 @@
 	lp = shost_priv(sc->device->host);
 
 	fnic = lport_priv(lp);
+	fnic_stats = &fnic->fnic_stats;
+	abts_stats = &fnic->fnic_stats.abts_stats;
+	term_stats = &fnic->fnic_stats.term_stats;
+
 	rport = starget_to_rport(scsi_target(sc->device));
 	tag = sc->request->tag;
 	FNIC_SCSI_DBG(KERN_DEBUG,
@@ -1630,8 +1805,10 @@
 	 */
 	if (fc_remote_port_chkready(rport) == 0)
 		task_req = FCPIO_ITMF_ABT_TASK;
-	else
+	else {
+		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 		task_req = FCPIO_ITMF_ABT_TASK_TERM;
+	}
 
 	/* Now queue the abort command to firmware */
 	int_to_scsilun(sc->device->lun, &fc_lun);
@@ -1646,10 +1823,13 @@
 		ret = FAILED;
 		goto fnic_abort_cmd_end;
 	}
-	if (task_req == FCPIO_ITMF_ABT_TASK)
+	if (task_req == FCPIO_ITMF_ABT_TASK) {
 		CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
-	else
+		atomic64_inc(&fnic_stats->abts_stats.aborts);
+	} else {
 		CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
+		atomic64_inc(&fnic_stats->term_stats.terminates);
+	}
 
 	/*
 	 * We queued an abort IO, wait for its completion.
@@ -1667,6 +1847,7 @@
 
 	io_req = (struct fnic_io_req *)CMD_SP(sc);
 	if (!io_req) {
+		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
 		spin_unlock_irqrestore(io_lock, flags);
 		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
 		ret = FAILED;
@@ -1677,6 +1858,15 @@
 	/* fw did not complete abort, timed out */
 	if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
 		spin_unlock_irqrestore(io_lock, flags);
+		if (task_req == FCPIO_ITMF_ABT_TASK) {
+			FNIC_SCSI_DBG(KERN_INFO,
+				fnic->lport->host, "Abort Driver Timeout\n");
+			atomic64_inc(&abts_stats->abort_drv_timeouts);
+		} else {
+			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+				"Terminate Driver Timeout\n");
+			atomic64_inc(&term_stats->terminate_drv_timeouts);
+		}
 		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
 		ret = FAILED;
 		goto fnic_abort_cmd_end;
@@ -1721,6 +1911,7 @@
 {
 	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 	struct Scsi_Host *host = fnic->lport->host;
+	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 	struct scsi_lun fc_lun;
 	int ret = 0;
 	unsigned long intr_flags;
@@ -1742,6 +1933,7 @@
 	if (!vnic_wq_copy_desc_avail(wq)) {
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			  "queue_dr_io_req failure - no descriptors\n");
+		atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
 		ret = -EAGAIN;
 		goto lr_io_req_end;
 	}
@@ -1754,6 +1946,12 @@
 				     fc_lun.scsi_lun, io_req->port_id,
 				     fnic->config.ra_tov, fnic->config.ed_tov);
 
+	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
 lr_io_req_end:
 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 	atomic_dec(&fnic->in_flight);
@@ -1988,6 +2186,8 @@
 	unsigned long flags;
 	unsigned long start_time = 0;
 	struct scsi_lun fc_lun;
+	struct fnic_stats *fnic_stats;
+	struct reset_stats *reset_stats;
 	int tag = 0;
 	DECLARE_COMPLETION_ONSTACK(tm_done);
 	int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
@@ -1999,6 +2199,10 @@
 	lp = shost_priv(sc->device->host);
 
 	fnic = lport_priv(lp);
+	fnic_stats = &fnic->fnic_stats;
+	reset_stats = &fnic->fnic_stats.reset_stats;
+
+	atomic64_inc(&reset_stats->device_resets);
 
 	rport = starget_to_rport(scsi_target(sc->device));
 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2009,8 +2213,10 @@
 		goto fnic_device_reset_end;
 
 	/* Check if remote port up */
-	if (fc_remote_port_chkready(rport))
+	if (fc_remote_port_chkready(rport)) {
+		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 		goto fnic_device_reset_end;
+	}
 
 	CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
 	/* Allocate tag if not present */
@@ -2086,6 +2292,7 @@
 	 * gets cleaned up during higher levels of EH
 	 */
 	if (status == FCPIO_INVALID_CODE) {
+		atomic64_inc(&reset_stats->device_reset_timeouts);
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			      "Device reset timed out\n");
 		CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
@@ -2199,6 +2406,10 @@
 		      "Returning from device reset %s\n",
 		      (ret == SUCCESS) ?
 		      "SUCCESS" : "FAILED");
+
+	if (ret == FAILED)
+		atomic64_inc(&reset_stats->device_reset_failures);
+
 	return ret;
 }
 
@@ -2207,26 +2418,34 @@
 {
 	struct fc_lport *lp;
 	struct fnic *fnic;
-	int ret = SUCCESS;
+	int ret = 0;
+	struct reset_stats *reset_stats;
 
 	lp = shost_priv(shost);
 	fnic = lport_priv(lp);
+	reset_stats = &fnic->fnic_stats.reset_stats;
 
 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 		      "fnic_reset called\n");
 
+	atomic64_inc(&reset_stats->fnic_resets);
+
 	/*
 	 * Reset local port, this will clean up libFC exchanges,
 	 * reset remote port sessions, and if link is up, begin flogi
 	 */
-	if (lp->tt.lport_reset(lp))
-		ret = FAILED;
+	ret = lp->tt.lport_reset(lp);
 
 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 		      "Returning from fnic reset %s\n",
-		      (ret == SUCCESS) ?
+		      (ret == 0) ?
 		      "SUCCESS" : "FAILED");
 
+	if (ret == 0)
+		atomic64_inc(&reset_stats->fnic_reset_completions);
+	else
+		atomic64_inc(&reset_stats->fnic_reset_failures);
+
 	return ret;
 }
 
@@ -2251,7 +2470,7 @@
 	 * scsi-ml tries to send a TUR to every device if host reset is
 	 * successful, so before returning to scsi, fabric should be up
 	 */
-	ret = fnic_reset(shost);
+	ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
 	if (ret == SUCCESS) {
 		wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
 		ret = FAILED;
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
new file mode 100644
index 0000000..540cceb8
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_STATS_H_
+#define _FNIC_STATS_H_
+struct io_path_stats {
+	atomic64_t active_ios;
+	atomic64_t max_active_ios;
+	atomic64_t io_completions;
+	atomic64_t io_failures;
+	atomic64_t ioreq_null;
+	atomic64_t alloc_failures;
+	atomic64_t sc_null;
+	atomic64_t io_not_found;
+	atomic64_t num_ios;
+};
+
+struct abort_stats {
+	atomic64_t aborts;
+	atomic64_t abort_failures;
+	atomic64_t abort_drv_timeouts;
+	atomic64_t abort_fw_timeouts;
+	atomic64_t abort_io_not_found;
+};
+
+struct terminate_stats {
+	atomic64_t terminates;
+	atomic64_t max_terminates;
+	atomic64_t terminate_drv_timeouts;
+	atomic64_t terminate_fw_timeouts;
+	atomic64_t terminate_io_not_found;
+	atomic64_t terminate_failures;
+};
+
+struct reset_stats {
+	atomic64_t device_resets;
+	atomic64_t device_reset_failures;
+	atomic64_t device_reset_aborts;
+	atomic64_t device_reset_timeouts;
+	atomic64_t device_reset_terminates;
+	atomic64_t fw_resets;
+	atomic64_t fw_reset_completions;
+	atomic64_t fw_reset_failures;
+	atomic64_t fnic_resets;
+	atomic64_t fnic_reset_completions;
+	atomic64_t fnic_reset_failures;
+};
+
+struct fw_stats {
+	atomic64_t active_fw_reqs;
+	atomic64_t max_fw_reqs;
+	atomic64_t fw_out_of_resources;
+	atomic64_t io_fw_errs;
+};
+
+struct vlan_stats {
+	atomic64_t vlan_disc_reqs;
+	atomic64_t resp_withno_vlanID;
+	atomic64_t sol_expiry_count;
+	atomic64_t flogi_rejects;
+};
+
+struct misc_stats {
+	u64 last_isr_time;
+	u64 last_ack_time;
+	atomic64_t isr_count;
+	atomic64_t max_cq_entries;
+	atomic64_t ack_index_out_of_range;
+	atomic64_t data_count_mismatch;
+	atomic64_t fcpio_timeout;
+	atomic64_t fcpio_aborted;
+	atomic64_t sgl_invalid;
+	atomic64_t mss_invalid;
+	atomic64_t abts_cpwq_alloc_failures;
+	atomic64_t devrst_cpwq_alloc_failures;
+	atomic64_t io_cpwq_alloc_failures;
+	atomic64_t no_icmnd_itmf_cmpls;
+	atomic64_t queue_fulls;
+	atomic64_t rport_not_ready;
+	atomic64_t frame_errors;
+};
+
+struct fnic_stats {
+	struct io_path_stats io_stats;
+	struct abort_stats abts_stats;
+	struct terminate_stats term_stats;
+	struct reset_stats reset_stats;
+	struct fw_stats fw_stats;
+	struct vlan_stats vlan_stats;
+	struct misc_stats misc_stats;
+};
+
+struct stats_debug_info {
+	char *debug_buffer;
+	void *i_private;
+	int buf_size;
+	int buffer_len;
+};
+
+int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
+int fnic_stats_debugfs_init(struct fnic *);
+void fnic_stats_debugfs_remove(struct fnic *);
+#endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 23a60e3..e002e71 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -189,6 +189,191 @@
 }
 
 /*
+ * fnic_get_stats_data - Copy fnic stats buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
+ *
+ * Description:
+ * This routine gathers the fnic stats debugfs data from the fnic_stats struct
+ * and dumps it to stats_debug_info.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into
+ * stats_debug_info
+ */
+int fnic_get_stats_data(struct stats_debug_info *debug,
+			struct fnic_stats *stats)
+{
+	int len = 0;
+	int buf_size = debug->buf_size;
+	struct timespec val1, val2;
+
+	len = snprintf(debug->debug_buffer + len, buf_size - len,
+		  "------------------------------------------\n"
+		  "\t\tIO Statistics\n"
+		  "------------------------------------------\n");
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
+		  "Number of IOs: %lld\nNumber of IO Completions: %lld\n"
+		  "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
+		  "Number of Memory alloc Failures: %lld\n"
+		  "Number of IOREQ Null: %lld\n"
+		  "Number of SCSI cmd pointer Null: %lld\n",
+		  (u64)atomic64_read(&stats->io_stats.active_ios),
+		  (u64)atomic64_read(&stats->io_stats.max_active_ios),
+		  (u64)atomic64_read(&stats->io_stats.num_ios),
+		  (u64)atomic64_read(&stats->io_stats.io_completions),
+		  (u64)atomic64_read(&stats->io_stats.io_failures),
+		  (u64)atomic64_read(&stats->io_stats.io_not_found),
+		  (u64)atomic64_read(&stats->io_stats.alloc_failures),
+		  (u64)atomic64_read(&stats->io_stats.ioreq_null),
+		  (u64)atomic64_read(&stats->io_stats.sc_null));
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "\n------------------------------------------\n"
+		  "\t\tAbort Statistics\n"
+		  "------------------------------------------\n");
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "Number of Aborts: %lld\n"
+		  "Number of Abort Failures: %lld\n"
+		  "Number of Abort Driver Timeouts: %lld\n"
+		  "Number of Abort FW Timeouts: %lld\n"
+		  "Number of Abort IO NOT Found: %lld\n",
+		  (u64)atomic64_read(&stats->abts_stats.aborts),
+		  (u64)atomic64_read(&stats->abts_stats.abort_failures),
+		  (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
+		  (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
+		  (u64)atomic64_read(&stats->abts_stats.abort_io_not_found));
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "\n------------------------------------------\n"
+		  "\t\tTerminate Statistics\n"
+		  "------------------------------------------\n");
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "Number of Terminates: %lld\n"
+		  "Maximum Terminates: %lld\n"
+		  "Number of Terminate Driver Timeouts: %lld\n"
+		  "Number of Terminate FW Timeouts: %lld\n"
+		  "Number of Terminate IO NOT Found: %lld\n"
+		  "Number of Terminate Failures: %lld\n",
+		  (u64)atomic64_read(&stats->term_stats.terminates),
+		  (u64)atomic64_read(&stats->term_stats.max_terminates),
+		  (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
+		  (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
+		  (u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
+		  (u64)atomic64_read(&stats->term_stats.terminate_failures));
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "\n------------------------------------------\n"
+		  "\t\tReset Statistics\n"
+		  "------------------------------------------\n");
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "Number of Device Resets: %lld\n"
+		  "Number of Device Reset Failures: %lld\n"
+		  "Number of Device Reset Aborts: %lld\n"
+		  "Number of Device Reset Timeouts: %lld\n"
+		  "Number of Device Reset Terminates: %lld\n"
+		  "Number of FW Resets: %lld\n"
+		  "Number of FW Reset Completions: %lld\n"
+		  "Number of FW Reset Failures: %lld\n"
+		  "Number of Fnic Reset: %lld\n"
+		  "Number of Fnic Reset Completions: %lld\n"
+		  "Number of Fnic Reset Failures: %lld\n",
+		  (u64)atomic64_read(&stats->reset_stats.device_resets),
+		  (u64)atomic64_read(&stats->reset_stats.device_reset_failures),
+		  (u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
+		  (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
+		  (u64)atomic64_read(
+			  &stats->reset_stats.device_reset_terminates),
+		  (u64)atomic64_read(&stats->reset_stats.fw_resets),
+		  (u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
+		  (u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
+		  (u64)atomic64_read(&stats->reset_stats.fnic_resets),
+		  (u64)atomic64_read(
+			  &stats->reset_stats.fnic_reset_completions),
+		  (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "\n------------------------------------------\n"
+		  "\t\tFirmware Statistics\n"
+		  "------------------------------------------\n");
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "Number of Active FW Requests %lld\n"
+		  "Maximum FW Requests: %lld\n"
+		  "Number of FW out of resources: %lld\n"
+		  "Number of FW IO errors: %lld\n",
+		  (u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
+		  (u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
+		  (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
+		  (u64)atomic64_read(&stats->fw_stats.io_fw_errs));
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "\n------------------------------------------\n"
+		  "\t\tVlan Discovery Statistics\n"
+		  "------------------------------------------\n");
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "Number of Vlan Discovery Requests Sent %lld\n"
+		  "Vlan Response Received with no FCF VLAN ID: %lld\n"
+		  "No solicitations recvd after vlan set, expiry count: %lld\n"
+		  "Flogi rejects count: %lld\n",
+		  (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
+		  (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
+		  (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
+		  (u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "\n------------------------------------------\n"
+		  "\t\tOther Important Statistics\n"
+		  "------------------------------------------\n");
+
+	jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
+	jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+		  "Last ISR time: %llu (%8lu.%8lu)\n"
+		  "Last ACK time: %llu (%8lu.%8lu)\n"
+		  "Number of ISRs: %lld\n"
+		  "Maximum CQ Entries: %lld\n"
+		  "Number of ACK index out of range: %lld\n"
+		  "Number of data count mismatch: %lld\n"
+		  "Number of FCPIO Timeouts: %lld\n"
+		  "Number of FCPIO Aborted: %lld\n"
+		  "Number of SGL Invalid: %lld\n"
+		  "Number of Copy WQ Alloc Failures for ABTs: %lld\n"
+		  "Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
+		  "Number of Copy WQ Alloc Failures for IOs: %lld\n"
+		  "Number of no icmnd itmf Completions: %lld\n"
+		  "Number of QUEUE Fulls: %lld\n"
+		  "Number of rport not ready: %lld\n"
+		  "Number of receive frame errors: %lld\n",
+		  (u64)stats->misc_stats.last_isr_time,
+		  val1.tv_sec, val1.tv_nsec,
+		  (u64)stats->misc_stats.last_ack_time,
+		  val2.tv_sec, val2.tv_nsec,
+		  (u64)atomic64_read(&stats->misc_stats.isr_count),
+		  (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
+		  (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
+		  (u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
+		  (u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
+		  (u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
+		  (u64)atomic64_read(&stats->misc_stats.sgl_invalid),
+		  (u64)atomic64_read(
+			  &stats->misc_stats.abts_cpwq_alloc_failures),
+		  (u64)atomic64_read(
+			  &stats->misc_stats.devrst_cpwq_alloc_failures),
+		  (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
+		  (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
+		  (u64)atomic64_read(&stats->misc_stats.queue_fulls),
+		  (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
+		  (u64)atomic64_read(&stats->misc_stats.frame_errors));
+
+	return len;
+
+}
+
+/*
  * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
  *
  * Description:
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index cef42b4..d412f2e 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -84,7 +84,8 @@
 int fnic_get_trace_data(fnic_dbgfs_t *);
 int fnic_trace_buf_init(void);
 void fnic_trace_free(void);
+int fnic_debugfs_init(void);
+void fnic_debugfs_terminate(void);
 int fnic_trace_debugfs_init(void);
 void fnic_trace_debugfs_terminate(void);
-
 #endif
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 6d55b4e..ee4fa40 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -594,8 +594,6 @@
 {
 	gdth_ha_str *ha = pci_get_drvdata(pdev);
 
-	pci_set_drvdata(pdev, NULL);
-
 	list_del(&ha->list);
 	gdth_remove_one(ha);
 
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index df0c3c7..f334859 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -316,6 +316,12 @@
 	kfree(shost);
 }
 
+static unsigned int shost_eh_deadline;
+
+module_param_named(eh_deadline, shost_eh_deadline, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(eh_deadline,
+		 "SCSI EH timeout in seconds (should be between 1 and 2^32-1)");
+
 static struct device_type scsi_host_type = {
 	.name =		"scsi_host",
 	.release =	scsi_host_dev_release,
@@ -388,6 +394,7 @@
 	shost->unchecked_isa_dma = sht->unchecked_isa_dma;
 	shost->use_clustering = sht->use_clustering;
 	shost->ordered_tag = sht->ordered_tag;
+	shost->eh_deadline = shost_eh_deadline * HZ;
 
 	if (sht->supported_mode == MODE_UNKNOWN)
 		/* means we didn't set it ... default to INITIATOR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 891c86b..22f6432 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -100,7 +100,6 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1920},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
@@ -5018,7 +5017,6 @@
 	kfree(h->hba_inquiry_data);
 	pci_disable_device(pdev);
 	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
 	kfree(h);
 }
 
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 9e2588a..add6d15 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -116,6 +116,7 @@
 	struct iscsi_conn *conn = sk->sk_user_data;
 
 	if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
+	    (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
 	    !atomic_read(&sk->sk_rmem_alloc)) {
 		ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
 		iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 5879929..1b3a094 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/log2.h>
 
 #include <scsi/fc/fc_fc2.h>
 
@@ -303,10 +304,7 @@
 		fr_eof(fp) = FC_EOF_N;
 	}
 
-	/*
-	 * Initialize remainig fh fields
-	 * from fc_fill_fc_hdr
-	 */
+	/* Initialize remaining fh fields from fc_fill_fc_hdr */
 	fh->fh_ox_id = htons(ep->oxid);
 	fh->fh_rx_id = htons(ep->rxid);
 	fh->fh_seq_id = ep->seq.id;
@@ -362,9 +360,10 @@
 
 	FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
 
-	if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
-			       msecs_to_jiffies(timer_msec)))
-		fc_exch_hold(ep);		/* hold for timer */
+	fc_exch_hold(ep);		/* hold for timer */
+	if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
+				msecs_to_jiffies(timer_msec)))
+		fc_exch_release(ep);
 }
 
 /**
@@ -382,6 +381,8 @@
 /**
  * fc_exch_done_locked() - Complete an exchange with the exchange lock held
  * @ep: The exchange that is complete
+ *
+ * Note: May sleep if invoked from outside a response handler.
  */
 static int fc_exch_done_locked(struct fc_exch *ep)
 {
@@ -393,7 +394,6 @@
 	 * ep, and in that case we only clear the resp and set it as
 	 * complete, so it can be reused by the timer to send the rrq.
 	 */
-	ep->resp = NULL;
 	if (ep->state & FC_EX_DONE)
 		return rc;
 	ep->esb_stat |= ESB_ST_COMPLETE;
@@ -464,15 +464,21 @@
 }
 
 static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
-		       struct fc_frame *fp)
+			      struct fc_frame *fp)
 {
 	struct fc_exch *ep;
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
-	int error;
+	int error = -ENXIO;
 	u32 f_ctl;
 	u8 fh_type = fh->fh_type;
 
 	ep = fc_seq_exch(sp);
+
+	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
+		fc_frame_free(fp);
+		goto out;
+	}
+
 	WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
 
 	f_ctl = ntoh24(fh->fh_f_ctl);
@@ -515,6 +521,9 @@
  * @lport: The local port that the exchange will be sent on
  * @sp:	   The sequence to be sent
  * @fp:	   The frame to be sent on the exchange
+ *
+ * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
+ * or indirectly by calling libfc_function_template.frame_send().
  */
 static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
 		       struct fc_frame *fp)
@@ -581,6 +590,8 @@
 
 /*
  * Set the response handler for the exchange associated with a sequence.
+ *
+ * Note: May sleep if invoked from outside a response handler.
  */
 static void fc_seq_set_resp(struct fc_seq *sp,
 			    void (*resp)(struct fc_seq *, struct fc_frame *,
@@ -588,8 +599,18 @@
 			    void *arg)
 {
 	struct fc_exch *ep = fc_seq_exch(sp);
+	DEFINE_WAIT(wait);
 
 	spin_lock_bh(&ep->ex_lock);
+	while (ep->resp_active && ep->resp_task != current) {
+		prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
+		spin_unlock_bh(&ep->ex_lock);
+
+		schedule();
+
+		spin_lock_bh(&ep->ex_lock);
+	}
+	finish_wait(&ep->resp_wq, &wait);
 	ep->resp = resp;
 	ep->arg = arg;
 	spin_unlock_bh(&ep->ex_lock);
@@ -622,27 +643,31 @@
 	if (!sp)
 		return -ENOMEM;
 
-	ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
 	if (timer_msec)
 		fc_exch_timer_set_locked(ep, timer_msec);
 
-	/*
-	 * If not logged into the fabric, don't send ABTS but leave
-	 * sequence active until next timeout.
-	 */
-	if (!ep->sid)
-		return 0;
-
-	/*
-	 * Send an abort for the sequence that timed out.
-	 */
-	fp = fc_frame_alloc(ep->lp, 0);
-	if (fp) {
-		fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
-			       FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
-		error = fc_seq_send_locked(ep->lp, sp, fp);
-	} else
-		error = -ENOBUFS;
+	if (ep->sid) {
+		/*
+		 * Send an abort for the sequence that timed out.
+		 */
+		fp = fc_frame_alloc(ep->lp, 0);
+		if (fp) {
+			ep->esb_stat |= ESB_ST_SEQ_INIT;
+			fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
+				       FC_TYPE_BLS, FC_FC_END_SEQ |
+				       FC_FC_SEQ_INIT, 0);
+			error = fc_seq_send_locked(ep->lp, sp, fp);
+		} else {
+			error = -ENOBUFS;
+		}
+	} else {
+		/*
+		 * If not logged into the fabric, don't send ABTS but leave
+		 * sequence active until next timeout.
+		 */
+		error = 0;
+	}
+	ep->esb_stat |= ESB_ST_ABNORMAL;
 	return error;
 }
 
@@ -669,6 +694,61 @@
 }
 
 /**
+ * fc_invoke_resp() - invoke ep->resp()
+ *
+ * Notes:
+ * It is assumed that after initialization finished (this means the
+ * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
+ * modified only via fc_seq_set_resp(). This guarantees that none of these
+ * two variables changes if ep->resp_active > 0.
+ *
+ * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
+ * this function is invoked, the first spin_lock_bh() call in this function
+ * will wait until fc_seq_set_resp() has finished modifying these variables.
+ *
+ * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
+ * ep->resp() won't be invoked after fc_exch_done() has returned.
+ *
+ * The response handler itself may invoke fc_exch_done(), which will clear the
+ * ep->resp pointer.
+ *
+ * Return value:
+ * Returns true if and only if ep->resp has been invoked.
+ */
+static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
+			   struct fc_frame *fp)
+{
+	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+	void *arg;
+	bool res = false;
+
+	spin_lock_bh(&ep->ex_lock);
+	ep->resp_active++;
+	if (ep->resp_task != current)
+		ep->resp_task = !ep->resp_task ? current : NULL;
+	resp = ep->resp;
+	arg = ep->arg;
+	spin_unlock_bh(&ep->ex_lock);
+
+	if (resp) {
+		resp(sp, fp, arg);
+		res = true;
+	} else if (!IS_ERR(fp)) {
+		fc_frame_free(fp);
+	}
+
+	spin_lock_bh(&ep->ex_lock);
+	if (--ep->resp_active == 0)
+		ep->resp_task = NULL;
+	spin_unlock_bh(&ep->ex_lock);
+
+	if (ep->resp_active == 0)
+		wake_up(&ep->resp_wq);
+
+	return res;
+}
+
+/**
  * fc_exch_timeout() - Handle exchange timer expiration
  * @work: The work_struct identifying the exchange that timed out
  */
@@ -677,8 +757,6 @@
 	struct fc_exch *ep = container_of(work, struct fc_exch,
 					  timeout_work.work);
 	struct fc_seq *sp = &ep->seq;
-	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
-	void *arg;
 	u32 e_stat;
 	int rc = 1;
 
@@ -696,16 +774,13 @@
 			fc_exch_rrq(ep);
 		goto done;
 	} else {
-		resp = ep->resp;
-		arg = ep->arg;
-		ep->resp = NULL;
 		if (e_stat & ESB_ST_ABNORMAL)
 			rc = fc_exch_done_locked(ep);
 		spin_unlock_bh(&ep->ex_lock);
 		if (!rc)
 			fc_exch_delete(ep);
-		if (resp)
-			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
+		fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
+		fc_seq_set_resp(sp, NULL, ep->arg);
 		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
 		goto done;
 	}
@@ -792,6 +867,8 @@
 	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */
 	ep->rxid = FC_XID_UNKNOWN;
 	ep->class = mp->class;
+	ep->resp_active = 0;
+	init_waitqueue_head(&ep->resp_wq);
 	INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
 out:
 	return ep;
@@ -838,8 +915,10 @@
 		pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
 		spin_lock_bh(&pool->lock);
 		ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
-		if (ep && ep->xid == xid)
+		if (ep) {
+			WARN_ON(ep->xid != xid);
 			fc_exch_hold(ep);
+		}
 		spin_unlock_bh(&pool->lock);
 	}
 	return ep;
@@ -850,6 +929,8 @@
  * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
  *		    the memory allocated for the related objects may be freed.
  * @sp: The sequence that has completed
+ *
+ * Note: May sleep if invoked from outside a response handler.
  */
 static void fc_exch_done(struct fc_seq *sp)
 {
@@ -859,6 +940,8 @@
 	spin_lock_bh(&ep->ex_lock);
 	rc = fc_exch_done_locked(ep);
 	spin_unlock_bh(&ep->ex_lock);
+
+	fc_seq_set_resp(sp, NULL, ep->arg);
 	if (!rc)
 		fc_exch_delete(ep);
 }
@@ -987,6 +1070,7 @@
 		}
 	}
 
+	spin_lock_bh(&ep->ex_lock);
 	/*
 	 * At this point, we have the exchange held.
 	 * Find or create the sequence.
@@ -1014,11 +1098,11 @@
 				 * sending RSP, hence write request on other
 				 * end never finishes.
 				 */
-				spin_lock_bh(&ep->ex_lock);
 				sp->ssb_stat |= SSB_ST_RESP;
 				sp->id = fh->fh_seq_id;
-				spin_unlock_bh(&ep->ex_lock);
 			} else {
+				spin_unlock_bh(&ep->ex_lock);
+
 				/* sequence/exch should exist */
 				reject = FC_RJT_SEQ_ID;
 				goto rel;
@@ -1029,6 +1113,7 @@
 
 	if (f_ctl & FC_FC_SEQ_INIT)
 		ep->esb_stat |= ESB_ST_SEQ_INIT;
+	spin_unlock_bh(&ep->ex_lock);
 
 	fr_seq(fp) = sp;
 out:
@@ -1291,21 +1376,23 @@
 
 	if (!ep)
 		goto reject;
+
+	fp = fc_frame_alloc(ep->lp, sizeof(*ap));
+	if (!fp)
+		goto free;
+
 	spin_lock_bh(&ep->ex_lock);
 	if (ep->esb_stat & ESB_ST_COMPLETE) {
 		spin_unlock_bh(&ep->ex_lock);
+
+		fc_frame_free(fp);
 		goto reject;
 	}
-	if (!(ep->esb_stat & ESB_ST_REC_QUAL))
+	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
+		ep->esb_stat |= ESB_ST_REC_QUAL;
 		fc_exch_hold(ep);		/* hold for REC_QUAL */
-	ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
-	fc_exch_timer_set_locked(ep, ep->r_a_tov);
-
-	fp = fc_frame_alloc(ep->lp, sizeof(*ap));
-	if (!fp) {
-		spin_unlock_bh(&ep->ex_lock);
-		goto free;
 	}
+	fc_exch_timer_set_locked(ep, ep->r_a_tov);
 	fh = fc_frame_header_get(fp);
 	ap = fc_frame_payload_get(fp, sizeof(*ap));
 	memset(ap, 0, sizeof(*ap));
@@ -1319,14 +1406,16 @@
 	}
 	sp = fc_seq_start_next_locked(sp);
 	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+	ep->esb_stat |= ESB_ST_ABNORMAL;
 	spin_unlock_bh(&ep->ex_lock);
+
+free:
 	fc_frame_free(rx_fp);
 	return;
 
 reject:
 	fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
-free:
-	fc_frame_free(rx_fp);
+	goto free;
 }
 
 /**
@@ -1416,9 +1505,7 @@
 		 * If new exch resp handler is valid then call that
 		 * first.
 		 */
-		if (ep->resp)
-			ep->resp(sp, fp, ep->arg);
-		else
+		if (!fc_invoke_resp(ep, sp, fp))
 			lport->tt.lport_recv(lport, fp);
 		fc_exch_release(ep);	/* release from lookup */
 	} else {
@@ -1442,8 +1529,6 @@
 	struct fc_exch *ep;
 	enum fc_sof sof;
 	u32 f_ctl;
-	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
-	void *ex_resp_arg;
 	int rc;
 
 	ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
@@ -1478,19 +1563,19 @@
 
 	f_ctl = ntoh24(fh->fh_f_ctl);
 	fr_seq(fp) = sp;
+
+	spin_lock_bh(&ep->ex_lock);
 	if (f_ctl & FC_FC_SEQ_INIT)
 		ep->esb_stat |= ESB_ST_SEQ_INIT;
+	spin_unlock_bh(&ep->ex_lock);
 
 	if (fc_sof_needs_ack(sof))
 		fc_seq_send_ack(sp, fp);
-	resp = ep->resp;
-	ex_resp_arg = ep->arg;
 
 	if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
 	    (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
 	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
 		spin_lock_bh(&ep->ex_lock);
-		resp = ep->resp;
 		rc = fc_exch_done_locked(ep);
 		WARN_ON(fc_seq_exch(sp) != ep);
 		spin_unlock_bh(&ep->ex_lock);
@@ -1511,10 +1596,8 @@
 	 * If new exch resp handler is valid then call that
 	 * first.
 	 */
-	if (resp)
-		resp(sp, fp, ex_resp_arg);
-	else
-		fc_frame_free(fp);
+	fc_invoke_resp(ep, sp, fp);
+
 	fc_exch_release(ep);
 	return;
 rel:
@@ -1553,8 +1636,6 @@
  */
 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
 {
-	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
-	void *ex_resp_arg;
 	struct fc_frame_header *fh;
 	struct fc_ba_acc *ap;
 	struct fc_seq *sp;
@@ -1599,9 +1680,6 @@
 		break;
 	}
 
-	resp = ep->resp;
-	ex_resp_arg = ep->arg;
-
 	/* do we need to do some other checks here. Can we reuse more of
 	 * fc_exch_recv_seq_resp
 	 */
@@ -1613,17 +1691,14 @@
 	    ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
 		rc = fc_exch_done_locked(ep);
 	spin_unlock_bh(&ep->ex_lock);
+
+	fc_exch_hold(ep);
 	if (!rc)
 		fc_exch_delete(ep);
-
-	if (resp)
-		resp(sp, fp, ex_resp_arg);
-	else
-		fc_frame_free(fp);
-
+	fc_invoke_resp(ep, sp, fp);
 	if (has_rec)
 		fc_exch_timer_set(ep, ep->r_a_tov);
-
+	fc_exch_release(ep);
 }
 
 /**
@@ -1662,7 +1737,7 @@
 			break;
 		default:
 			if (ep)
-				FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
+				FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
 					    fh->fh_r_ctl,
 					    fc_exch_rctl_name(fh->fh_r_ctl));
 			break;
@@ -1745,32 +1820,33 @@
 /**
  * fc_exch_reset() - Reset an exchange
  * @ep: The exchange to be reset
+ *
+ * Note: May sleep if invoked from outside a response handler.
  */
 static void fc_exch_reset(struct fc_exch *ep)
 {
 	struct fc_seq *sp;
-	void (*resp)(struct fc_seq *, struct fc_frame *, void *);
-	void *arg;
 	int rc = 1;
 
 	spin_lock_bh(&ep->ex_lock);
 	fc_exch_abort_locked(ep, 0);
 	ep->state |= FC_EX_RST_CLEANUP;
 	fc_exch_timer_cancel(ep);
-	resp = ep->resp;
-	ep->resp = NULL;
 	if (ep->esb_stat & ESB_ST_REC_QUAL)
 		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec_qual */
 	ep->esb_stat &= ~ESB_ST_REC_QUAL;
-	arg = ep->arg;
 	sp = &ep->seq;
 	rc = fc_exch_done_locked(ep);
 	spin_unlock_bh(&ep->ex_lock);
+
+	fc_exch_hold(ep);
+
 	if (!rc)
 		fc_exch_delete(ep);
 
-	if (resp)
-		resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
+	fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+	fc_seq_set_resp(sp, NULL, ep->arg);
+	fc_exch_release(ep);
 }
 
 /**
@@ -1956,13 +2032,13 @@
 
 	switch (op) {
 	case ELS_LS_RJT:
-		FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
+		FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
 		/* fall through */
 	case ELS_LS_ACC:
 		goto cleanup;
 	default:
-		FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
-			    "for RRQ", op);
+		FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
+			    op);
 		return;
 	}
 
@@ -2533,13 +2609,8 @@
 	 * cpu on which exchange originated by simple bitwise
 	 * AND operation between fc_cpu_mask and exchange id.
 	 */
-	fc_cpu_mask = 1;
-	fc_cpu_order = 0;
-	while (fc_cpu_mask < nr_cpu_ids) {
-		fc_cpu_mask <<= 1;
-		fc_cpu_order++;
-	}
-	fc_cpu_mask--;
+	fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
+	fc_cpu_mask = (1 << fc_cpu_order) - 1;
 
 	fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
 	if (!fc_exch_workqueue)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5fd0f1f..1d7e76e 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -902,7 +902,8 @@
 	/*
 	 * Check for missing or extra data frames.
 	 */
-	if (unlikely(fsp->xfer_len != expected_len)) {
+	if (unlikely(fsp->cdb_status == SAM_STAT_GOOD &&
+		     fsp->xfer_len != expected_len)) {
 		if (fsp->xfer_len < expected_len) {
 			/*
 			 * Some data may be queued locally,
@@ -955,12 +956,11 @@
 		 * Test for transport underrun, independent of response
 		 * underrun status.
 		 */
-		if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+		if (fsp->cdb_status == SAM_STAT_GOOD &&
+		    fsp->xfer_len < fsp->data_len && !fsp->io_status &&
 		    (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
-		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
 			fsp->status_code = FC_DATA_UNDRUN;
-			fsp->io_status = 0;
-		}
 	}
 
 	seq = fsp->seq_ptr;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index f04d15c..e01a298 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -516,7 +516,7 @@
  * @lport: The local port receiving the LOGO
  * @fp:	   The LOGO request frame
  *
- * Locking Note: The lport lock is exected to be held before calling
+ * Locking Note: The lport lock is expected to be held before calling
  * this function.
  */
 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
@@ -1088,7 +1088,7 @@
 {
 	unsigned long delay = 0;
 	FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
-		     PTR_ERR(fp), fc_lport_state(lport),
+		     IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
 		     lport->retry_count);
 
 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index c710d90..589ff9a 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1705,7 +1705,7 @@
  * @rdata: The remote port that sent the PRLI request
  * @rx_fp: The PRLI request frame
  *
- * Locking Note: The rport lock is exected to be held before calling
+ * Locking Note: The rport lock is expected to be held before calling
  * this function.
  */
 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
@@ -1824,7 +1824,7 @@
  * @rdata: The remote port that sent the PRLO request
  * @rx_fp: The PRLO request frame
  *
- * Locking Note: The rport lock is exected to be held before calling
+ * Locking Note: The rport lock is expected to be held before calling
  * this function.
  */
 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
@@ -1895,7 +1895,7 @@
  * @lport: The local port that received the LOGO request
  * @fp:	   The LOGO request frame
  *
- * Locking Note: The rport lock is exected to be held before calling
+ * Locking Note: The rport lock is expected to be held before calling
  * this function.
  */
 static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b92aec9..82134d20 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2629,7 +2629,7 @@
 				rspiocbq,
 				(phba->fc_ratov * 2)
 				+ LPFC_DRVR_TIMEOUT);
-	if (iocb_stat) {
+	if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
 		ret_val = -EIO;
 		goto err_get_xri_exit;
 	}
@@ -3204,8 +3204,9 @@
 					     rspiocbq, (phba->fc_ratov * 2) +
 					     LPFC_DRVR_TIMEOUT);
 
-	if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
-					   (rsp->ulpStatus != IOCB_SUCCESS))) {
+	if ((iocb_stat != IOCB_SUCCESS) ||
+	    ((phba->sli_rev < LPFC_SLI_REV4) &&
+	     (rsp->ulpStatus != IOSTAT_SUCCESS))) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
 				"3126 Failed loopback test issue iocb: "
 				"iocb_stat:x%x\n", iocb_stat);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 02e8cd9..da61d8d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -280,7 +280,7 @@
 		buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
 		kfree(buf_ptr);
-		ctiocb->context1 = NULL;
+		ctiocb->context3 = NULL;
 	}
 	lpfc_sli_release_iocbq(phba, ctiocb);
 	return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7801601..883ea2d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4171,8 +4171,6 @@
 	NLP_INT_NODE_ACT(ndlp);
 	atomic_set(&ndlp->cmd_pending, 0);
 	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
-	if (vport->phba->sli_rev == LPFC_SLI_REV4)
-		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
 }
 
 struct lpfc_nodelist *
@@ -4217,6 +4215,9 @@
 	lpfc_initialize_node(vport, ndlp, did);
 
 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+
 
 	if (state != NLP_STE_UNUSED_NODE)
 		lpfc_nlp_set_state(vport, ndlp, state);
@@ -5617,6 +5618,9 @@
 
 	lpfc_initialize_node(vport, ndlp, did);
 	INIT_LIST_HEAD(&ndlp->nlp_listp);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
 		"node init:       did:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 086c3f2..5464b11 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3439,7 +3439,8 @@
 #define els_req64_hopcnt_SHIFT      24
 #define els_req64_hopcnt_MASK       0x000000ff
 #define els_req64_hopcnt_WORD       word13
-	uint32_t reserved[2];
+	uint32_t word14;
+	uint32_t max_response_payload_len;
 };
 
 struct xmit_els_rsp64_wqe {
@@ -3554,7 +3555,8 @@
 	uint32_t relative_offset;
 	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
 	struct wqe_common wqe_com;     /* words 6-11 */
-	uint32_t rsvd_12_15[4];
+	uint32_t rsvd_12_14[3];
+	uint32_t max_response_payload_len;
 };
 
 struct create_xri_wqe {
@@ -3584,7 +3586,13 @@
 
 struct fcp_iwrite64_wqe {
 	struct ulp_bde64 bde;
-	uint32_t payload_offset_len;
+	uint32_t word3;
+#define	cmd_buff_len_SHIFT  16
+#define	cmd_buff_len_MASK  0x00000ffff
+#define	cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
 	uint32_t total_xfer_len;
 	uint32_t initial_xfer_len;
 	struct wqe_common wqe_com;     /* words 6-11 */
@@ -3594,7 +3602,13 @@
 
 struct fcp_iread64_wqe {
 	struct ulp_bde64 bde;
-	uint32_t payload_offset_len;   /* word 3 */
+	uint32_t word3;
+#define	cmd_buff_len_SHIFT  16
+#define	cmd_buff_len_MASK  0x00000ffff
+#define	cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
 	uint32_t total_xfer_len;       /* word 4 */
 	uint32_t rsrvd5;               /* word 5 */
 	struct wqe_common wqe_com;     /* words 6-11 */
@@ -3604,7 +3618,13 @@
 
 struct fcp_icmnd64_wqe {
 	struct ulp_bde64 bde;          /* words 0-2 */
-	uint32_t rsrvd3;               /* word 3 */
+	uint32_t word3;
+#define	cmd_buff_len_SHIFT  16
+#define	cmd_buff_len_MASK  0x00000ffff
+#define	cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
 	uint32_t rsrvd4;               /* word 4 */
 	uint32_t rsrvd5;               /* word 5 */
 	struct wqe_common wqe_com;     /* words 6-11 */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 647f5bf..68c94cc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4545,7 +4545,7 @@
 	pci_save_state(pdev);
 
 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
-	if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+	if (pci_is_pcie(pdev))
 		pdev->needs_freset = 1;
 
 	return 0;
@@ -4581,8 +4581,6 @@
 	/* Release PCI resource and disable PCI device */
 	pci_release_selected_regions(pdev, bars);
 	pci_disable_device(pdev);
-	/* Null out PCI private reference to driver */
-	pci_set_drvdata(pdev, NULL);
 
 	return;
 }
@@ -9429,7 +9427,6 @@
 	/* Disable interrupt */
 	lpfc_sli_disable_intr(phba);
 
-	pci_set_drvdata(pdev, NULL);
 	scsi_host_put(shost);
 
 	/*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c913e8c..b2ede05 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1012,14 +1012,6 @@
 			break;
 		}
 
-		/* Allocate iotag for psb->cur_iocbq. */
-		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
-		if (iotag == 0) {
-			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
-				psb->data, psb->dma_handle);
-			kfree(psb);
-			break;
-		}
 
 		lxri = lpfc_sli4_next_xritag(phba);
 		if (lxri == NO_XRI) {
@@ -1028,6 +1020,19 @@
 			kfree(psb);
 			break;
 		}
+
+		/* Allocate iotag for psb->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+		if (iotag == 0) {
+			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+				psb->data, psb->dma_handle);
+			kfree(psb);
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"3368 Failed to allocated IOTAG for"
+					" XRI:0x%x\n", lxri);
+			lpfc_sli4_free_xri(phba, lxri);
+			break;
+		}
 		psb->cur_iocbq.sli4_lxritag = lxri;
 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
@@ -4485,9 +4490,7 @@
 		piocb->ulpContext =
 		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
 	}
-	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
-		piocb->ulpFCP2Rcvy = 1;
-	}
+	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
 
 	/* ulpTimeout is only one byte */
@@ -4981,6 +4984,73 @@
 	}
 }
 
+
+/**
+ * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ *
+ * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
+ *
+ * Return code :
+ *   0x2003 - Error
+ *   0x2002 - Success
+ **/
+static int
+lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+	uint32_t rsp_info;
+	uint32_t rsp_len;
+	uint8_t  rsp_info_code;
+	int ret = FAILED;
+
+
+	if (fcprsp == NULL)
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "0703 fcp_rsp is missing\n");
+	else {
+		rsp_info = fcprsp->rspStatus2;
+		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
+		rsp_info_code = fcprsp->rspInfo3;
+
+
+		lpfc_printf_vlog(vport, KERN_INFO,
+				 LOG_FCP,
+				 "0706 fcp_rsp valid 0x%x,"
+				 " rsp len=%d code 0x%x\n",
+				 rsp_info,
+				 rsp_len, rsp_info_code);
+
+		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
+			switch (rsp_info_code) {
+			case RSP_NO_FAILURE:
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+						 "0715 Task Mgmt No Failure\n");
+				ret = SUCCESS;
+				break;
+			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+						 "0716 Task Mgmt Target "
+						"reject\n");
+				break;
+			case RSP_TM_NOT_COMPLETED: /* TM failed */
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+						 "0717 Task Mgmt Target "
+						"failed TM\n");
+				break;
+			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+						 "0718 Task Mgmt to invalid "
+						"LUN\n");
+				break;
+			}
+		}
+	}
+	return ret;
+}
+
+
 /**
  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
  * @vport: The virtual port for which this call is being executed.
@@ -5042,12 +5112,8 @@
 
 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
-	if (status != IOCB_SUCCESS) {
-		if (status == IOCB_TIMEDOUT) {
-			ret = TIMEOUT_ERROR;
-		} else
-			ret = FAILED;
-		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+	if ((status != IOCB_SUCCESS) ||
+	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
 			 "iocb_flag x%x\n",
@@ -5055,9 +5121,21 @@
 			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
 			 iocbqrsp->iocb.un.ulpWord[4],
 			 iocbq->iocb_flag);
-	} else if (status == IOCB_BUSY)
-		ret = FAILED;
-	else
+		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
+		if (status == IOCB_SUCCESS) {
+			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+				/* Something in the FCP_RSP was invalid.
+				 * Check conditions */
+				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
+			else
+				ret = FAILED;
+		} else if (status == IOCB_TIMEDOUT) {
+			ret = TIMEOUT_ERROR;
+		} else {
+			ret = FAILED;
+		}
+		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+	} else
 		ret = SUCCESS;
 
 	lpfc_sli_release_iocbq(phba, iocbqrsp);
@@ -5181,7 +5259,7 @@
 	unsigned tgt_id = cmnd->device->id;
 	unsigned int lun_id = cmnd->device->lun;
 	struct lpfc_scsi_event_header scsi_event;
-	int status, ret = SUCCESS;
+	int status;
 
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5222,9 +5300,11 @@
 	 * So, continue on.
 	 * We will report success if all the i/o aborts successfully.
 	 */
-	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+	if (status == SUCCESS)
+		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
 						LPFC_CTX_LUN);
-	return ret;
+
+	return status;
 }
 
 /**
@@ -5248,7 +5328,7 @@
 	unsigned tgt_id = cmnd->device->id;
 	unsigned int lun_id = cmnd->device->lun;
 	struct lpfc_scsi_event_header scsi_event;
-	int status, ret = SUCCESS;
+	int status;
 
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5289,9 +5369,10 @@
 	 * So, continue on.
 	 * We will report success if all the i/o aborts successfully.
 	 */
-	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+	if (status == SUCCESS)
+		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
 					  LPFC_CTX_TGT);
-	return ret;
+	return status;
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index b1d9f7fc..852ff7d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -73,6 +73,7 @@
 #define RSP_RO_MISMATCH_ERR  0x03
 #define RSP_TM_NOT_SUPPORTED 0x04	/* Task mgmt function not supported */
 #define RSP_TM_NOT_COMPLETED 0x05	/* Task mgmt function not performed */
+#define RSP_TM_INVALID_LU    0x09	/* Task mgmt function to invalid LU */
 
 	uint32_t rspInfoRsvd;	/* FCP_RSP_INFO bytes 4-7 (reserved) */
 
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 612f489..8f580fd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -71,6 +71,8 @@
 				       int);
 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
 			uint32_t);
+static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
+static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
 
 static IOCB_t *
 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -6566,6 +6568,108 @@
 	return;
 }
 
+/**
+ * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
+ *                                    are pending
+ * @phba: Pointer to HBA context object.
+ *
+ * This function checks if any mailbox completions are present on the mailbox
+ * completion queue.
+ **/
+bool
+lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
+{
+
+	uint32_t idx;
+	struct lpfc_queue *mcq;
+	struct lpfc_mcqe *mcqe;
+	bool pending_completions = false;
+
+	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+		return false;
+
+	/* Check for completions on mailbox completion queue */
+
+	mcq = phba->sli4_hba.mbx_cq;
+	idx = mcq->hba_index;
+	while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
+		mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
+		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
+			pending_completions = true;
+			break;
+		}
+		idx = (idx + 1) % mcq->entry_count;
+		if (mcq->hba_index == idx)
+			break;
+	}
+	return pending_completions;
+
+}
+
+/**
+ * lpfc_sli4_process_missed_mbox_completions - process mbox completions
+ *					      that were missed.
+ * @phba: Pointer to HBA context object.
+ *
+ * For sli4, it is possible to miss an interrupt. As such mbox completions
+ * maybe missed causing erroneous mailbox timeouts to occur. This function
+ * checks to see if mbox completions are on the mailbox completion queue
+ * and will process all the completions associated with the eq for the
+ * mailbox completion queue.
+ **/
+bool
+lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+{
+
+	uint32_t eqidx;
+	struct lpfc_queue *fpeq = NULL;
+	struct lpfc_eqe *eqe;
+	bool mbox_pending;
+
+	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+		return false;
+
+	/* Find the eq associated with the mcq */
+
+	if (phba->sli4_hba.hba_eq)
+		for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
+			if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
+			    phba->sli4_hba.mbx_cq->assoc_qid) {
+				fpeq = phba->sli4_hba.hba_eq[eqidx];
+				break;
+			}
+	if (!fpeq)
+		return false;
+
+	/* Turn off interrupts from this EQ */
+
+	lpfc_sli4_eq_clr_intr(fpeq);
+
+	/* Check to see if a mbox completion is pending */
+
+	mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
+
+	/*
+	 * If a mbox completion is pending, process all the events on EQ
+	 * associated with the mbox completion queue (this could include
+	 * mailbox commands, async events, els commands, receive queue data
+	 * and fcp commands)
+	 */
+
+	if (mbox_pending)
+		while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+			lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
+			fpeq->EQ_processed++;
+		}
+
+	/* Always clear and re-arm the EQ */
+
+	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+	return mbox_pending;
+
+}
 
 /**
  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
@@ -6583,6 +6687,10 @@
 	struct lpfc_sli *psli = &phba->sli;
 	struct lpfc_sli_ring *pring;
 
+	/* If the mailbox completed, process the completion and return */
+	if (lpfc_sli4_process_missed_mbox_completions(phba))
+		return;
+
 	/* Check the pmbox pointer first.  There is a race condition
 	 * between the mbox timeout handler getting executed in the
 	 * worklist and the mailbox actually completing. When this
@@ -7077,6 +7185,10 @@
 						1000) + jiffies;
 	spin_unlock_irq(&phba->hbalock);
 
+	/* Make sure the mailbox is really active */
+	if (timeout)
+		lpfc_sli4_process_missed_mbox_completions(phba);
+
 	/* Wait for the outstnading mailbox command to complete */
 	while (phba->sli.mbox_active) {
 		/* Check active mailbox complete status every 2ms */
@@ -8076,6 +8188,7 @@
 		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
 		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
 		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+		wqe->els_req.max_response_payload_len = total_len - xmit_len;
 		break;
 	case CMD_XMIT_SEQUENCE64_CX:
 		bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
@@ -8120,8 +8233,10 @@
 		command_type = FCP_COMMAND_DATA_OUT;
 		/* word3 iocb=iotag wqe=payload_offset_len */
 		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
-		wqe->fcp_iwrite.payload_offset_len =
-			xmit_len + sizeof(struct fcp_rsp);
+		bf_set(payload_offset_len, &wqe->fcp_iwrite,
+		       xmit_len + sizeof(struct fcp_rsp));
+		bf_set(cmd_buff_len, &wqe->fcp_iwrite,
+		       0);
 		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
 		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
 		bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
@@ -8139,8 +8254,10 @@
 	case CMD_FCP_IREAD64_CR:
 		/* word3 iocb=iotag wqe=payload_offset_len */
 		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
-		wqe->fcp_iread.payload_offset_len =
-			xmit_len + sizeof(struct fcp_rsp);
+		bf_set(payload_offset_len, &wqe->fcp_iread,
+		       xmit_len + sizeof(struct fcp_rsp));
+		bf_set(cmd_buff_len, &wqe->fcp_iread,
+		       0);
 		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
 		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
 		bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
@@ -8156,8 +8273,13 @@
 		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
 		break;
 	case CMD_FCP_ICMND64_CR:
+		/* word3 iocb=iotag wqe=payload_offset_len */
+		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+		bf_set(payload_offset_len, &wqe->fcp_icmd,
+		       xmit_len + sizeof(struct fcp_rsp));
+		bf_set(cmd_buff_len, &wqe->fcp_icmd,
+		       0);
 		/* word3 iocb=IO_TAG wqe=reserved */
-		wqe->fcp_icmd.rsrvd3 = 0;
 		bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
 		/* Always open the exchange */
 		bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
@@ -8203,6 +8325,7 @@
 		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
 		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
 		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+		wqe->gen_req.max_response_payload_len = total_len - xmit_len;
 		command_type = OTHER_COMMAND;
 		break;
 	case CMD_XMIT_ELS_RSP64_CX:
@@ -10073,6 +10196,11 @@
 		if (iocb_completed) {
 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 					"0331 IOCB wake signaled\n");
+			/* Note: we are not indicating if the IOCB has a success
+			 * status or not - that's for the caller to check.
+			 * IOCB_SUCCESS means just that the command was sent and
+			 * completed. Not that it completed successfully.
+			 * */
 		} else if (timeleft == 0) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 					"0338 IOCB wait timeout error - no "
@@ -11074,8 +11202,11 @@
 			      struct lpfc_iocbq *pIocbOut,
 			      struct lpfc_wcqe_complete *wcqe)
 {
+	int numBdes, i;
 	unsigned long iflags;
-	uint32_t status;
+	uint32_t status, max_response;
+	struct lpfc_dmabuf *dmabuf;
+	struct ulp_bde64 *bpl, bde;
 	size_t offset = offsetof(struct lpfc_iocbq, iocb);
 
 	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -11092,7 +11223,36 @@
 			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
 	else {
 		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
-		pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+		switch (pIocbOut->iocb.ulpCommand) {
+		case CMD_ELS_REQUEST64_CR:
+			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+			bpl  = (struct ulp_bde64 *)dmabuf->virt;
+			bde.tus.w = le32_to_cpu(bpl[1].tus.w);
+			max_response = bde.tus.f.bdeSize;
+			break;
+		case CMD_GEN_REQUEST64_CR:
+			max_response = 0;
+			if (!pIocbOut->context3)
+				break;
+			numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
+					sizeof(struct ulp_bde64);
+			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+			bpl = (struct ulp_bde64 *)dmabuf->virt;
+			for (i = 0; i < numBdes; i++) {
+				bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+				if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+					max_response += bde.tus.f.bdeSize;
+			}
+			break;
+		default:
+			max_response = wcqe->total_data_placed;
+			break;
+		}
+		if (max_response < wcqe->total_data_placed)
+			pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
+		else
+			pIocbIn->iocb.un.genreq64.bdl.bdeSize =
+				wcqe->total_data_placed;
 	}
 
 	/* Convert BG errors for completion status */
@@ -15098,6 +15258,7 @@
 	uint16_t max_rpi, rpi_limit;
 	uint16_t rpi_remaining, lrpi = 0;
 	struct lpfc_rpi_hdr *rpi_hdr;
+	unsigned long iflag;
 
 	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
 	rpi_limit = phba->sli4_hba.next_rpi;
@@ -15106,7 +15267,7 @@
 	 * Fetch the next logical rpi.  Because this index is logical,
 	 * the  driver starts at 0 each time.
 	 */
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irqsave(&phba->hbalock, iflag);
 	rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
 	if (rpi >= rpi_limit)
 		rpi = LPFC_RPI_ALLOC_ERROR;
@@ -15122,7 +15283,7 @@
 	 */
 	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
 	    (phba->sli4_hba.rpi_count >= max_rpi)) {
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
 		return rpi;
 	}
 
@@ -15131,7 +15292,7 @@
 	 * extents.
 	 */
 	if (!phba->sli4_hba.rpi_hdrs_in_use) {
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
 		return rpi;
 	}
 
@@ -15142,7 +15303,7 @@
 	 * how many are supported max by the device.
 	 */
 	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
 	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
 		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
 		if (!rpi_hdr) {
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 85120b7..298c8cd 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -673,6 +673,7 @@
 int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
 int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
 uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+void lpfc_sli4_free_xri(struct lpfc_hba *, int);
 int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
 int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
 struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f58f183..e3094c4 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.42"
+#define LPFC_DRIVER_VERSION "8.3.43"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 515c962..d1a4b82 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -534,7 +534,6 @@
 	return 0;
 
 out_cmm_unreg:
-	pci_set_drvdata(pdev, NULL);
 	megaraid_cmm_unregister(adapter);
 out_fini_mbox:
 	megaraid_fini_mbox(adapter);
@@ -594,11 +593,6 @@
 	// detach from the IO sub-system
 	megaraid_io_detach(adapter);
 
-	// reset the device state in the PCI structure. We check this
-	// condition when we enter here. If the device state is NULL,
-	// that would mean the device has already been removed
-	pci_set_drvdata(pdev, NULL);
-
 	// Unregister from common management module
 	//
 	// FIXME: this must return success or failure for conditions if there
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 0c73ba4..e9e543c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1531,6 +1531,7 @@
 	struct megasas_register_set __iomem *reg_set;
 	u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
 	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
+	struct megasas_pd_list          local_pd_list[MEGASAS_MAX_PD];
 	u8     ld_ids[MEGASAS_MAX_LD_IDS];
 	s8 init_id;
 
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3020921..0a743a5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3194,19 +3194,21 @@
 	     (le32_to_cpu(ci->count) <
 		  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
 
-		memset(instance->pd_list, 0,
+		memset(instance->local_pd_list, 0,
 			MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
 
 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
 
-			instance->pd_list[pd_addr->deviceId].tid	=
+			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
 				le16_to_cpu(pd_addr->deviceId);
-			instance->pd_list[pd_addr->deviceId].driveType	=
+			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
 							pd_addr->scsiDevType;
-			instance->pd_list[pd_addr->deviceId].driveState	=
+			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
 							MR_PD_STATE_SYSTEM;
 			pd_addr++;
 		}
+		memcpy(instance->pd_list, instance->local_pd_list,
+			sizeof(instance->pd_list));
 	}
 
 	pci_free_consistent(instance->pdev,
@@ -3998,7 +4000,7 @@
 		 * values
 		 */
 		if ((prev_aen.members.class <= curr_aen.members.class) &&
-		    !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^
+		    !((prev_aen.members.locale & curr_aen.members.locale) ^
 		      curr_aen.members.locale)) {
 			/*
 			 * Previously issued event registration includes
@@ -4006,7 +4008,7 @@
 			 */
 			return 0;
 		} else {
-			curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale);
+			curr_aen.members.locale |= prev_aen.members.locale;
 
 			if (prev_aen.members.class < curr_aen.members.class)
 				curr_aen.members.class = prev_aen.members.class;
@@ -4097,7 +4099,7 @@
 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
 
 	return megasas_register_aen(instance,
-			le32_to_cpu(eli.newest_seq_num) + 1,
+			eli.newest_seq_num + 1,
 			class_locale.word);
 }
 
@@ -4449,7 +4451,6 @@
 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
 	megasas_mgmt_info.max_index--;
 
-	pci_set_drvdata(pdev, NULL);
 	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
 		for (i = 0 ; i < instance->msix_vectors; i++)
@@ -4805,8 +4806,6 @@
 		}
 	}
 
-	pci_set_drvdata(instance->pdev, NULL);
-
 	instance->instancet->disable_intr(instance);
 
 	if (instance->msix_vectors)
@@ -4848,8 +4847,6 @@
 				instance->evt_detail, instance->evt_detail_h);
 	scsi_host_put(host);
 
-	pci_set_drvdata(pdev, NULL);
-
 	pci_disable_device(pdev);
 
 	return;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7b7381d..5ff978b 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,7 +657,6 @@
 	tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
 #endif
 
-	pci_set_drvdata(pdev, NULL);
 	sas_unregister_ha(sha);
 	sas_remove_host(mvi->shost);
 	scsi_remove_host(mvi->shost);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 6b1b4e9..6c1f223 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1411,7 +1411,7 @@
 
 		if (res) {
 			del_timer(&task->slow_task->timer);
-			mv_printk("executing internel task failed:%d\n", res);
+			mv_printk("executing internal task failed:%d\n", res);
 			goto ex_err;
 		}
 
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index c3601b5..edbee8d 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2583,7 +2583,6 @@
 	return 0;
 
 fail_io_attach:
-	pci_set_drvdata(pdev, NULL);
 	mhba->instancet->disable_intr(mhba);
 	free_irq(mhba->pdev->irq, mhba);
 fail_init_irq:
@@ -2618,7 +2617,6 @@
 	free_irq(mhba->pdev->irq, mhba);
 	mvumi_release_fw(mhba);
 	scsi_host_put(host);
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 	dev_dbg(&pdev->dev, "driver is removed!\n");
 }
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 5982a58..7d014b1 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1615,7 +1615,7 @@
 	spinlock_t	smp_lock;	/* Lock for SMP threading       */
 
 	/*----------------------------------------------------------------
-	**	Chip and controller indentification.
+	**	Chip and controller identification.
 	**----------------------------------------------------------------
 	*/
 	int		unit;		/* Unit number			*/
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index d99f41c..a04b4ff 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -309,6 +309,117 @@
 }
 static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
 /**
+ * pm8001_ctl_ib_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	int offset;
+	char *str = buf;
+	int start = 0;
+#define IB_MEMMAP(c)		\
+		(*(u32 *)((u8 *)pm8001_ha->		\
+		memoryMap.region[IB].virt_ptr +		\
+		pm8001_ha->evtlog_ib_offset + (c)))
+
+	for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+		if (pm8001_ha->chip_id != chip_8001)
+			str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+		else
+			str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+		start = start + 4;
+	}
+	pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
+	if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+		&& (pm8001_ha->chip_id != chip_8001))
+		pm8001_ha->evtlog_ib_offset = 0;
+	if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
+		&& (pm8001_ha->chip_id == chip_8001))
+		pm8001_ha->evtlog_ib_offset = 0;
+
+	return str - buf;
+}
+
+static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
+/**
+ * pm8001_ctl_ob_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	int offset;
+	char *str = buf;
+	int start = 0;
+#define OB_MEMMAP(c)		\
+		(*(u32 *)((u8 *)pm8001_ha->		\
+		memoryMap.region[OB].virt_ptr +		\
+		pm8001_ha->evtlog_ob_offset + (c)))
+
+	for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+		if (pm8001_ha->chip_id != chip_8001)
+			str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+		else
+			str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+		start = start + 4;
+	}
+	pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
+	if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+			&& (pm8001_ha->chip_id != chip_8001))
+		pm8001_ha->evtlog_ob_offset = 0;
+	if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
+			&& (pm8001_ha->chip_id == chip_8001))
+		pm8001_ha->evtlog_ob_offset = 0;
+
+	return str - buf;
+}
+static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
+/**
+ * pm8001_ctl_bios_version_show - Bios version Display
+ * @cdev:pointer to embedded class device
+ * @buf:the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	char *str = buf;
+	void *virt_addr;
+	int bios_index;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct pm8001_ioctl_payload payload;
+
+	pm8001_ha->nvmd_completion = &completion;
+	payload.minor_function = 7;
+	payload.offset = 0;
+	payload.length = 4096;
+	payload.func_specific = kzalloc(4096, GFP_KERNEL);
+	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+	wait_for_completion(&completion);
+	virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
+	for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
+		bios_index++)
+		str += sprintf(str, "%c",
+			*((u8 *)((u8 *)virt_addr+bios_index)));
+	return str - buf;
+}
+static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
+/**
  * pm8001_ctl_aap_log_show - IOP event log
  * @cdev: pointer to embedded class device
  * @buf: the buffer returned
@@ -344,6 +455,43 @@
 }
 static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
 
+/**
+ ** pm8001_ctl_fatal_log_show - fatal error logging
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **
+ ** A sysfs 'read-only' shost attribute.
+ **/
+
+static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	u32 count;
+
+	count = pm80xx_get_fatal_dump(cdev, attr, buf);
+	return count;
+}
+
+static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
+
+
+/**
+ ** pm8001_ctl_gsm_log_show - gsm dump collection
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **A sysfs 'read-only' shost attribute.
+ **/
+static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	u32 count;
+
+	count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
+	return count;
+}
+
+static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
+
 #define FLASH_CMD_NONE      0x00
 #define FLASH_CMD_UPDATE    0x01
 #define FLASH_CMD_SET_NVMD    0x02
@@ -603,12 +751,17 @@
 	&dev_attr_update_fw,
 	&dev_attr_aap_log,
 	&dev_attr_iop_log,
+	&dev_attr_fatal_log,
+	&dev_attr_gsm_log,
 	&dev_attr_max_out_io,
 	&dev_attr_max_devices,
 	&dev_attr_max_sg_list,
 	&dev_attr_sas_spec_support,
 	&dev_attr_logging_level,
 	&dev_attr_host_sas_address,
+	&dev_attr_bios_version,
+	&dev_attr_ib_log,
+	&dev_attr_ob_log,
 	NULL,
 };
 
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index 63ad4aa..d0d43a2 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -45,6 +45,8 @@
 #define HEADER_LEN			28
 #define SIZE_OFFSET			16
 
+#define BIOSOFFSET			56
+#define BIOS_OFFSET_LIMIT		61
 
 #define FLASH_OK                        0x000000
 #define FAIL_OPEN_BIOS_FILE             0x000100
@@ -53,5 +55,9 @@
 #define FAIL_OUT_MEMORY                 0x000c00
 #define FLASH_IN_PROGRESS               0x001000
 
+#define IB_OB_READ_TIMES                256
+#define SYSFS_OFFSET                    1024
+#define PM80XX_IB_OB_QUEUE_SIZE         (32 * 1024)
+#define PM8001_IB_OB_QUEUE_SIZE         (16 * 1024)
 #endif /* PM8001_CTL_H_INCLUDED */
 
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 479c5a7..74a4bb9 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -46,7 +46,10 @@
 	chip_8008,
 	chip_8009,
 	chip_8018,
-	chip_8019
+	chip_8019,
+	chip_8074,
+	chip_8076,
+	chip_8077
 };
 
 enum phy_speed {
@@ -99,7 +102,8 @@
 	NVMD,	    /* NVM device */
 	DEV_MEM,    /* memory for devices */
 	CCB_MEM,    /* memory for command control block */
-	FW_FLASH    /* memory for fw flash update */
+	FW_FLASH,    /* memory for fw flash update */
+	FORENSIC_MEM  /* memory for fw forensic data */
 };
 #define	PM8001_EVENT_LOG_SIZE	 (128 * 1024)
 
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4a21957..f16ece9 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1868,6 +1868,13 @@
 	if (unlikely(!t || !t->lldd_task || !t->dev))
 		return;
 	ts = &t->task_status;
+	/* Print sas address of IO failed device */
+	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+		(status != IO_UNDERFLOW))
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("SAS Address of IO Failure Drive:"
+			"%016llx", SAS_ADDR(t->dev->sas_addr)));
+
 	switch (status) {
 	case IO_SUCCESS:
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@@ -2276,6 +2283,11 @@
 	u32 param;
 	u32 status;
 	u32 tag;
+	int i, j;
+	u8 sata_addr_low[4];
+	u32 temp_sata_addr_low;
+	u8 sata_addr_hi[4];
+	u32 temp_sata_addr_hi;
 	struct sata_completion_resp *psataPayload;
 	struct task_status_struct *ts;
 	struct ata_task_resp *resp ;
@@ -2325,7 +2337,46 @@
 			pm8001_printk("ts null\n"));
 		return;
 	}
-
+	/* Print sas address of IO failed device */
+	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+		(status != IO_UNDERFLOW)) {
+		if (!((t->dev->parent) &&
+			(DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+			for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
+				sata_addr_low[i] = pm8001_ha->sas_addr[j];
+			for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
+				sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+			memcpy(&temp_sata_addr_low, sata_addr_low,
+				sizeof(sata_addr_low));
+			memcpy(&temp_sata_addr_hi, sata_addr_hi,
+				sizeof(sata_addr_hi));
+			temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+						|((temp_sata_addr_hi << 8) &
+						0xff0000) |
+						((temp_sata_addr_hi >> 8)
+						& 0xff00) |
+						((temp_sata_addr_hi << 24) &
+						0xff000000));
+			temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+						& 0xff) |
+						((temp_sata_addr_low << 8)
+						& 0xff0000) |
+						((temp_sata_addr_low >> 8)
+						& 0xff00) |
+						((temp_sata_addr_low << 24)
+						& 0xff000000)) +
+						pm8001_dev->attached_phy +
+						0x10);
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SAS Address of IO Failure Drive:"
+				"%08x%08x", temp_sata_addr_hi,
+					temp_sata_addr_low));
+		} else {
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SAS Address of IO Failure Drive:"
+				"%016llx", SAS_ADDR(t->dev->sas_addr)));
+		}
+	}
 	switch (status) {
 	case IO_SUCCESS:
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@@ -3087,8 +3138,8 @@
 	struct pm8001_device *pm8001_dev = ccb->device;
 	u32 status = le32_to_cpu(pPayload->status);
 	u32 device_id = le32_to_cpu(pPayload->device_id);
-	u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS;
-	u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS;
+	u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
+	u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
 	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
 		"from 0x%x to 0x%x status = 0x%x!\n",
 		device_id, pds, nds, status));
@@ -4700,6 +4751,8 @@
 	sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
 	memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
 	sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
+	if (pm8001_ha->chip_id != chip_8001)
+		sspTMCmd.ds_ads_m = 0x08;
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
 	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
 	return ret;
@@ -4778,6 +4831,16 @@
 		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
 		break;
 	}
+	case IOP_RDUMP: {
+		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
+		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+		nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
+		nvmd_req.resp_addr_hi =
+		cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+		nvmd_req.resp_addr_lo =
+		cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+		break;
+	}
 	default:
 		break;
 	}
@@ -4938,6 +5001,89 @@
 	return rc;
 }
 
+ssize_t
+pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
+{
+	u32 value, rem, offset = 0, bar = 0;
+	u32 index, work_offset, dw_length;
+	u32 shift_value, gsm_base, gsm_dump_offset;
+	char *direct_data;
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	direct_data = buf;
+	gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset;
+
+	/* check max is 1 Mbytes */
+	if ((length > 0x100000) || (gsm_dump_offset & 3) ||
+		((gsm_dump_offset + length) > 0x1000000))
+			return 1;
+
+	if (pm8001_ha->chip_id == chip_8001)
+		bar = 2;
+	else
+		bar = 1;
+
+	work_offset = gsm_dump_offset & 0xFFFF0000;
+	offset = gsm_dump_offset & 0x0000FFFF;
+	gsm_dump_offset = work_offset;
+	/* adjust length to dword boundary */
+	rem = length & 3;
+	dw_length = length >> 2;
+
+	for (index = 0; index < dw_length; index++) {
+		if ((work_offset + offset) & 0xFFFF0000) {
+			if (pm8001_ha->chip_id == chip_8001)
+				shift_value = ((gsm_dump_offset + offset) &
+						SHIFT_REG_64K_MASK);
+			else
+				shift_value = (((gsm_dump_offset + offset) &
+						SHIFT_REG_64K_MASK) >>
+						SHIFT_REG_BIT_SHIFT);
+
+			if (pm8001_ha->chip_id == chip_8001) {
+				gsm_base = GSM_BASE;
+				if (-1 == pm8001_bar4_shift(pm8001_ha,
+						(gsm_base + shift_value)))
+					return 1;
+			} else {
+				gsm_base = 0;
+				if (-1 == pm80xx_bar4_shift(pm8001_ha,
+						(gsm_base + shift_value)))
+					return 1;
+			}
+			gsm_dump_offset = (gsm_dump_offset + offset) &
+						0xFFFF0000;
+			work_offset = 0;
+			offset = offset & 0x0000FFFF;
+		}
+		value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+						0x0000FFFF);
+		direct_data += sprintf(direct_data, "%08x ", value);
+		offset += 4;
+	}
+	if (rem != 0) {
+		value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+						0x0000FFFF);
+		/* xfr for non_dw */
+		direct_data += sprintf(direct_data, "%08x ", value);
+	}
+	/* Shift back to BAR4 original address */
+	if (pm8001_ha->chip_id == chip_8001) {
+		if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+			return 1;
+	} else {
+		if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
+			return 1;
+	}
+	pm8001_ha->fatal_forensic_shift_offset += 1024;
+
+	if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
+		pm8001_ha->fatal_forensic_shift_offset = 0;
+	return direct_data - buf;
+}
+
 int
 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
 	struct pm8001_device *pm8001_dev, u32 state)
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index d7c1e20..6d91e24 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -1027,5 +1027,8 @@
 #define DEVREG_FAILURE_PORT_NOT_VALID_STATE		0x06
 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID		0x07
 
+#define GSM_BASE					0x4F0000
+#define SHIFT_REG_64K_MASK				0xffff0000
+#define SHIFT_REG_BIT_SHIFT				8
 #endif
 
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f7c1896..34f5f5f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -54,6 +54,9 @@
 	[chip_8009] = {1,  8, &pm8001_80xx_dispatch,},
 	[chip_8018] = {0,  16, &pm8001_80xx_dispatch,},
 	[chip_8019] = {1,  16, &pm8001_80xx_dispatch,},
+	[chip_8074] = {0,  8, &pm8001_80xx_dispatch,},
+	[chip_8076] = {0,  16, &pm8001_80xx_dispatch,},
+	[chip_8077] = {0,  16, &pm8001_80xx_dispatch,},
 };
 static int pm8001_id;
 
@@ -344,6 +347,10 @@
 	/* Memory region for fw flash */
 	pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
 
+	pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1;
+	pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
+	pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
+	pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
 	for (i = 0; i < USI_MAX_MEMCNT; i++) {
 		if (pm8001_mem_alloc(pm8001_ha->pdev,
 			&pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -664,6 +671,31 @@
 #endif
 }
 
+/*
+ * pm8001_get_phy_settings_info : Read phy setting values.
+ * @pm8001_ha : our hba.
+ */
+void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
+{
+
+#ifdef PM8001_READ_VPD
+	/*OPTION ROM FLASH read for the SPC cards */
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct pm8001_ioctl_payload payload;
+
+	pm8001_ha->nvmd_completion = &completion;
+	/* SAS ADDRESS read from flash / EEPROM */
+	payload.minor_function = 6;
+	payload.offset = 0;
+	payload.length = 4096;
+	payload.func_specific = kzalloc(4096, GFP_KERNEL);
+	/* Read phy setting values from flash */
+	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+	wait_for_completion(&completion);
+	pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
+#endif
+}
+
 #ifdef PM8001_USE_MSIX
 /**
  * pm8001_setup_msix - enable MSI-X interrupt
@@ -844,6 +876,10 @@
 	}
 
 	pm8001_init_sas_add(pm8001_ha);
+	/* phy setting support for motherboard controller */
+	if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
+		pdev->subsystem_vendor != 0)
+		pm8001_get_phy_settings_info(pm8001_ha);
 	pm8001_post_sas_ha_init(shost, chip);
 	rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
 	if (rc)
@@ -873,7 +909,6 @@
 	struct pm8001_hba_info *pm8001_ha;
 	int i;
 	pm8001_ha = sha->lldd_ha;
-	pci_set_drvdata(pdev, NULL);
 	sas_unregister_ha(sha);
 	sas_remove_host(pm8001_ha->shost);
 	list_del(&pm8001_ha->list);
@@ -1037,6 +1072,12 @@
 	{ PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
 	{ PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
 	{ PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+	{ PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 },
+	{ PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 },
+	{ PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },
 	{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
 		PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
 	{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
@@ -1057,6 +1098,24 @@
 		PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
 	{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
 		PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+		PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+		PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },
 	{} /* terminate list */
 };
 
@@ -1108,8 +1167,11 @@
 module_exit(pm8001_exit);
 
 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
+MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
+MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
 MODULE_DESCRIPTION(
-		"PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
+		"PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
+		"SAS/SATA controller driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index a85d73d..f4eb18e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -447,7 +447,6 @@
 			break;
 		case SAS_PROTOCOL_SATA:
 		case SAS_PROTOCOL_STP:
-		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
 			rc = pm8001_task_prep_ata(pm8001_ha, ccb);
 			break;
 		default:
@@ -704,6 +703,8 @@
 	int res, retry;
 	struct sas_task *task = NULL;
 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	DECLARE_COMPLETION_ONSTACK(completion_setstate);
 
 	for (retry = 0; retry < 3; retry++) {
 		task = sas_alloc_slow_task(GFP_KERNEL);
@@ -729,6 +730,12 @@
 			goto ex_err;
 		}
 		wait_for_completion(&task->slow_task->completion);
+		if (pm8001_ha->chip_id != chip_8001) {
+			pm8001_dev->setds_completion = &completion_setstate;
+				PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+					pm8001_dev, 0x01);
+			wait_for_completion(&completion_setstate);
+		}
 		res = -TMF_RESP_FUNC_FAILED;
 		/* Even TMF timed out, return direct. */
 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 5708194..6037d47 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -104,6 +104,9 @@
 
 
 #define DEV_IS_EXPANDER(type)	((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
+#define IS_SPCV_12G(dev)	((dev->device == 0X8074)		\
+				|| (dev->device == 0X8076)		\
+				|| (dev->device == 0X8077))
 
 #define PM8001_NAME_LENGTH		32/* generic length of strings */
 extern struct list_head hba_list;
@@ -129,6 +132,61 @@
 	u8	*func_specific;
 };
 
+#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF
+#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24)
+#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET            0x00     /* HNFBUFL */
+#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET            0x04     /* HNFBUFH */
+#define MPI_FATAL_EDUMP_TABLE_LENGTH               0x08     /* HNFBLEN */
+#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE            0x0C     /* FDDHSHK */
+#define MPI_FATAL_EDUMP_TABLE_STATUS               0x10     /* FDDTSTAT */
+#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN            0x14     /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_HANDSHAKE_RDY              0x1
+#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY             0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD                 0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED           0x1
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE      0x3
+#define TYPE_GSM_SPACE        1
+#define TYPE_QUEUE            2
+#define TYPE_FATAL            3
+#define TYPE_NON_FATAL        4
+#define TYPE_INBOUND          1
+#define TYPE_OUTBOUND         2
+struct forensic_data {
+	u32  data_type;
+	union {
+		struct {
+			u32  direct_len;
+			u32  direct_offset;
+			void  *direct_data;
+		} gsm_buf;
+		struct {
+			u16  queue_type;
+			u16  queue_index;
+			u32  direct_len;
+			void  *direct_data;
+		} queue_buf;
+		struct {
+			u32  direct_len;
+			u32  direct_offset;
+			u32  read_len;
+			void  *direct_data;
+		} data_buf;
+	};
+};
+
+/* bit31-26 - mask bar */
+#define SCRATCH_PAD0_BAR_MASK                    0xFC000000
+/* bit25-0  - offset mask */
+#define SCRATCH_PAD0_OFFSET_MASK                 0x03FFFFFF
+/* if AAP error state */
+#define SCRATCH_PAD0_AAPERR_MASK                 0xFFFFFFFF
+/* Inbound doorbell bit7 */
+#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP	 0x80
+/* Inbound doorbell bit7 SPCV */
+#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO  0x80
+#define MAIN_MERRDCTO_MERRDCES		         0xA0/* DWORD 0x28) */
+
 struct pm8001_dispatch {
 	char *name;
 	int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -343,6 +401,7 @@
 	u32			phy_attr_table_offset;
 	u32			port_recovery_timer;
 	u32			interrupt_reassertion_delay;
+	u32			fatal_n_non_fatal_dump;	        /* 0x28 */
 	} pm80xx_tbl;
 };
 
@@ -417,6 +476,13 @@
 	struct pm8001_hba_memspace io_mem[6];
 	struct mpi_mem_req	memoryMap;
 	struct encrypt		encrypt_info; /* support encryption */
+	struct forensic_data	forensic_info;
+	u32			fatal_bar_loc;
+	u32			forensic_last_offset;
+	u32			fatal_forensic_shift_offset;
+	u32			forensic_fatal_step;
+	u32			evtlog_ib_offset;
+	u32			evtlog_ob_offset;
 	void __iomem	*msg_unit_tbl_addr;/*Message Unit Table Addr*/
 	void __iomem	*main_cfg_tbl_addr;/*Main Config Table Addr*/
 	void __iomem	*general_stat_tbl_addr;/*General Status Table Addr*/
@@ -425,6 +491,7 @@
 	void __iomem	*pspa_q_tbl_addr;
 			/*MPI SAS PHY attributes Queue Config Table Addr*/
 	void __iomem	*ivt_tbl_addr; /*MPI IVT Table Addr */
+	void __iomem	*fatal_tbl_addr; /*MPI IVT Table Addr */
 	union main_cfg_table	main_cfg_tbl;
 	union general_status_table	gs_tbl;
 	struct inbound_queue_table	inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
@@ -629,7 +696,12 @@
 int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
 
 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
-
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+	u32 length, u8 *buf);
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+		struct device_attribute *attr, char *buf);
+ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
 /* ctl shared API */
 extern struct device_attribute *pm8001_host_attrs[];
 
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 9f91030..8987b17 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -45,6 +45,228 @@
 
 #define SMP_DIRECT 1
 #define SMP_INDIRECT 2
+
+
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
+{
+	u32 reg_val;
+	unsigned long start;
+	pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value);
+	/* confirm the setting is written */
+	start = jiffies + HZ; /* 1 sec */
+	do {
+		reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
+	} while ((reg_val != shift_value) && time_before(jiffies, start));
+	if (reg_val != shift_value) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+			" = 0x%x\n", reg_val));
+		return -1;
+	}
+	return 0;
+}
+
+void pm80xx_pci_mem_copy(struct pm8001_hba_info  *pm8001_ha, u32 soffset,
+				const void *destination,
+				u32 dw_count, u32 bus_base_number)
+{
+	u32 index, value, offset;
+	u32 *destination1;
+	destination1 = (u32 *)destination;
+
+	for (index = 0; index < dw_count; index += 4, destination1++) {
+		offset = (soffset + index / 4);
+		if (offset < (64 * 1024)) {
+			value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
+			*destination1 =  cpu_to_le32(value);
+		}
+	}
+	return;
+}
+
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+	void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
+	u32 status = 1;
+	u32 accum_len , reg_val, index, *temp;
+	unsigned long start;
+	u8 *direct_data;
+	char *fatal_error_data = buf;
+
+	pm8001_ha->forensic_info.data_buf.direct_data = buf;
+	if (pm8001_ha->chip_id == chip_8001) {
+		pm8001_ha->forensic_info.data_buf.direct_data +=
+			sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+			"Not supported for SPC controller");
+		return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+			(char *)buf;
+	}
+	if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
+		direct_data = (u8 *)fatal_error_data;
+		pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
+		pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+		pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+		pm8001_ha->forensic_info.data_buf.read_len = 0;
+
+		pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+	}
+
+	if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+		/* start to get data */
+		/* Program the MEMBASE II Shifting Register with 0x00.*/
+		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+				pm8001_ha->fatal_forensic_shift_offset);
+		pm8001_ha->forensic_last_offset = 0;
+		pm8001_ha->forensic_fatal_step = 0;
+		pm8001_ha->fatal_bar_loc = 0;
+	}
+	/* Read until accum_len is retrived */
+	accum_len = pm8001_mr32(fatal_table_address,
+				MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+	PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
+						accum_len));
+	if (accum_len == 0xFFFFFFFF) {
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Possible PCI issue 0x%x not expected\n",
+				accum_len));
+		return status;
+	}
+	if (accum_len == 0 || accum_len >= 0x100000) {
+		pm8001_ha->forensic_info.data_buf.direct_data +=
+			sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+				"%08x ", 0xFFFFFFFF);
+		return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+			(char *)buf;
+	}
+	temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
+	if (pm8001_ha->forensic_fatal_step == 0) {
+moreData:
+		if (pm8001_ha->forensic_info.data_buf.direct_data) {
+			/* Data is in bar, copy to host memory */
+			pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
+			 pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+				pm8001_ha->forensic_info.data_buf.direct_len ,
+					1);
+		}
+		pm8001_ha->fatal_bar_loc +=
+			pm8001_ha->forensic_info.data_buf.direct_len;
+		pm8001_ha->forensic_info.data_buf.direct_offset +=
+			pm8001_ha->forensic_info.data_buf.direct_len;
+		pm8001_ha->forensic_last_offset	+=
+			pm8001_ha->forensic_info.data_buf.direct_len;
+		pm8001_ha->forensic_info.data_buf.read_len =
+			pm8001_ha->forensic_info.data_buf.direct_len;
+
+		if (pm8001_ha->forensic_last_offset  >= accum_len) {
+			pm8001_ha->forensic_info.data_buf.direct_data +=
+			sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+				"%08x ", 3);
+			for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+				pm8001_ha->forensic_info.data_buf.direct_data +=
+					sprintf(pm8001_ha->
+					 forensic_info.data_buf.direct_data,
+						"%08x ", *(temp + index));
+			}
+
+			pm8001_ha->fatal_bar_loc = 0;
+			pm8001_ha->forensic_fatal_step = 1;
+			pm8001_ha->fatal_forensic_shift_offset = 0;
+			pm8001_ha->forensic_last_offset	= 0;
+			status = 0;
+			return (char *)pm8001_ha->
+				forensic_info.data_buf.direct_data -
+				(char *)buf;
+		}
+		if (pm8001_ha->fatal_bar_loc < (64 * 1024)) {
+			pm8001_ha->forensic_info.data_buf.direct_data +=
+				sprintf(pm8001_ha->
+					forensic_info.data_buf.direct_data,
+					"%08x ", 2);
+			for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+				pm8001_ha->forensic_info.data_buf.direct_data +=
+					sprintf(pm8001_ha->
+					forensic_info.data_buf.direct_data,
+					"%08x ", *(temp + index));
+			}
+			status = 0;
+			return (char *)pm8001_ha->
+				forensic_info.data_buf.direct_data -
+				(char *)buf;
+		}
+
+		/* Increment the MEMBASE II Shifting Register value by 0x100.*/
+		pm8001_ha->forensic_info.data_buf.direct_data +=
+			sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+				"%08x ", 2);
+		for (index = 0; index < 256; index++) {
+			pm8001_ha->forensic_info.data_buf.direct_data +=
+				sprintf(pm8001_ha->
+					forensic_info.data_buf.direct_data,
+						"%08x ", *(temp + index));
+		}
+		pm8001_ha->fatal_forensic_shift_offset += 0x100;
+		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+			pm8001_ha->fatal_forensic_shift_offset);
+		pm8001_ha->fatal_bar_loc = 0;
+		status = 0;
+		return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+			(char *)buf;
+	}
+	if (pm8001_ha->forensic_fatal_step == 1) {
+		pm8001_ha->fatal_forensic_shift_offset = 0;
+		/* Read 64K of the debug data. */
+		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+			pm8001_ha->fatal_forensic_shift_offset);
+		pm8001_mw32(fatal_table_address,
+			MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+				MPI_FATAL_EDUMP_HANDSHAKE_RDY);
+
+		/* Poll FDDHSHK  until clear  */
+		start = jiffies + (2 * HZ); /* 2 sec */
+
+		do {
+			reg_val = pm8001_mr32(fatal_table_address,
+					MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
+		} while ((reg_val) && time_before(jiffies, start));
+
+		if (reg_val != 0) {
+			PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+			" = 0x%x\n", reg_val));
+			return -1;
+		}
+
+		/* Read the next 64K of the debug data. */
+		pm8001_ha->forensic_fatal_step = 0;
+		if (pm8001_mr32(fatal_table_address,
+			MPI_FATAL_EDUMP_TABLE_STATUS) !=
+				MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+			pm8001_mw32(fatal_table_address,
+				MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
+			goto moreData;
+		} else {
+			pm8001_ha->forensic_info.data_buf.direct_data +=
+				sprintf(pm8001_ha->
+					forensic_info.data_buf.direct_data,
+						"%08x ", 4);
+			pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
+			pm8001_ha->forensic_info.data_buf.direct_len =  0;
+			pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+			pm8001_ha->forensic_info.data_buf.read_len = 0;
+			status = 0;
+		}
+	}
+
+	return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+		(char *)buf;
+}
+
 /**
  * read_main_config_table - read the configure table and save it.
  * @pm8001_ha: our hba card information
@@ -430,7 +652,11 @@
 	table is updated */
 	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
 	/* wait until Inbound DoorBell Clear Register toggled */
-	max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+	if (IS_SPCV_12G(pm8001_ha->pdev)) {
+		max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+	} else {
+		max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+	}
 	do {
 		udelay(1);
 		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@@ -579,6 +805,9 @@
 	pm8001_ha->pspa_q_tbl_addr =
 		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
 					0xFFFFFF);
+	pm8001_ha->fatal_tbl_addr =
+		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
+					0xFFFFFF);
 
 	PM8001_INIT_DBG(pm8001_ha,
 			pm8001_printk("GST OFFSET 0x%x\n",
@@ -913,7 +1142,11 @@
 	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
 
 	/* wait until Inbound DoorBell Clear Register toggled */
-	max_wait_count = 2 * 1000 * 1000;	/* 2 sec for spcv/ve */
+	if (IS_SPCV_12G(pm8001_ha->pdev)) {
+		max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+	} else {
+		max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+	}
 	do {
 		udelay(1);
 		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@@ -959,6 +1192,7 @@
 {
 	u32 regval;
 	u32 bootloader_state;
+	u32 ibutton0, ibutton1;
 
 	/* Check if MPI is in ready state to reset */
 	if (mpi_uninit_check(pm8001_ha) != 0) {
@@ -1017,7 +1251,27 @@
 	if (-1 == check_fw_ready(pm8001_ha)) {
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("Firmware is not ready!\n"));
-		return -EBUSY;
+		/* check iButton feature support for motherboard controller */
+		if (pm8001_ha->pdev->subsystem_vendor !=
+			PCI_VENDOR_ID_ADAPTEC2 &&
+			pm8001_ha->pdev->subsystem_vendor != 0) {
+			ibutton0 = pm8001_cr32(pm8001_ha, 0,
+					MSGU_HOST_SCRATCH_PAD_6);
+			ibutton1 = pm8001_cr32(pm8001_ha, 0,
+					MSGU_HOST_SCRATCH_PAD_7);
+			if (!ibutton0 && !ibutton1) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("iButton Feature is"
+					" not Available!!!\n"));
+				return -EBUSY;
+			}
+			if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("CRC Check for iButton"
+					" Feature Failed!!!\n"));
+				return -EBUSY;
+			}
+		}
 	}
 	PM8001_INIT_DBG(pm8001_ha,
 		pm8001_printk("SPCv soft reset Complete\n"));
@@ -1268,6 +1522,13 @@
 	if (unlikely(!t || !t->lldd_task || !t->dev))
 		return;
 	ts = &t->task_status;
+	/* Print sas address of IO failed device */
+	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+		(status != IO_UNDERFLOW))
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("SAS Address of IO Failure Drive"
+			":%016llx", SAS_ADDR(t->dev->sas_addr)));
+
 	switch (status) {
 	case IO_SUCCESS:
 		PM8001_IO_DBG(pm8001_ha,
@@ -1691,6 +1952,10 @@
 	u32 param;
 	u32 status;
 	u32 tag;
+	int i, j;
+	u8 sata_addr_low[4];
+	u32 temp_sata_addr_low, temp_sata_addr_hi;
+	u8 sata_addr_hi[4];
 	struct sata_completion_resp *psataPayload;
 	struct task_status_struct *ts;
 	struct ata_task_resp *resp ;
@@ -1740,7 +2005,47 @@
 			pm8001_printk("ts null\n"));
 		return;
 	}
+	/* Print sas address of IO failed device */
+	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+		(status != IO_UNDERFLOW)) {
+		if (!((t->dev->parent) &&
+			(DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+			for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
+				sata_addr_low[i] = pm8001_ha->sas_addr[j];
+			for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
+				sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+			memcpy(&temp_sata_addr_low, sata_addr_low,
+				sizeof(sata_addr_low));
+			memcpy(&temp_sata_addr_hi, sata_addr_hi,
+				sizeof(sata_addr_hi));
+			temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+						|((temp_sata_addr_hi << 8) &
+						0xff0000) |
+						((temp_sata_addr_hi >> 8)
+						& 0xff00) |
+						((temp_sata_addr_hi << 24) &
+						0xff000000));
+			temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+						& 0xff) |
+						((temp_sata_addr_low << 8)
+						& 0xff0000) |
+						((temp_sata_addr_low >> 8)
+						& 0xff00) |
+						((temp_sata_addr_low << 24)
+						& 0xff000000)) +
+						pm8001_dev->attached_phy +
+						0x10);
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SAS Address of IO Failure Drive:"
+				"%08x%08x", temp_sata_addr_hi,
+					temp_sata_addr_low));
 
+		} else {
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("SAS Address of IO Failure Drive:"
+				"%016llx", SAS_ADDR(t->dev->sas_addr)));
+		}
+	}
 	switch (status) {
 	case IO_SUCCESS:
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@@ -3103,9 +3408,27 @@
 static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
 			void *piomb)
 {
-	PM8001_MSG_DBG(pm8001_ha,
-			pm8001_printk(" pm80xx_addition_functionality\n"));
+	u8 page_code;
+	struct set_phy_profile_resp *pPayload =
+		(struct set_phy_profile_resp *)(piomb + 4);
+	u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
+	u32 status = le32_to_cpu(pPayload->status);
 
+	page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
+	if (status) {
+		/* status is FAILED */
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("PhyProfile command failed  with status "
+			"0x%08X \n", status));
+		return -1;
+	} else {
+		if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("Invalid page code 0x%X\n",
+					page_code));
+			return -1;
+		}
+	}
 	return 0;
 }
 
@@ -3484,8 +3807,6 @@
 	else
 		pm8001_ha->smp_exp_mode = SMP_INDIRECT;
 
-	/* DIRECT MODE support only in spcv/ve */
-	pm8001_ha->smp_exp_mode = SMP_DIRECT;
 
 	tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
 	preq_dma_addr = (char *)phys_to_virt(tmp_addr);
@@ -3501,7 +3822,7 @@
 		/* exclude top 4 bytes for SMP req header */
 		smp_cmd.long_smp_req.long_req_addr =
 			cpu_to_le64((u64)sg_dma_address
-				(&task->smp_task.smp_req) - 4);
+				(&task->smp_task.smp_req) + 4);
 		/* exclude 4 bytes for SMP req header and CRC */
 		smp_cmd.long_smp_req.long_req_size =
 			cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
@@ -3604,10 +3925,10 @@
 	struct ssp_ini_io_start_req ssp_cmd;
 	u32 tag = ccb->ccb_tag;
 	int ret;
-	u64 phys_addr;
+	u64 phys_addr, start_addr, end_addr;
+	u32 end_addr_high, end_addr_low;
 	struct inbound_queue_table *circularQ;
-	static u32 inb;
-	static u32 outb;
+	u32 q_index;
 	u32 opc = OPC_INB_SSPINIIOSTART;
 	memset(&ssp_cmd, 0, sizeof(ssp_cmd));
 	memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
@@ -3626,7 +3947,8 @@
 	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
 	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
 		       task->ssp_task.cmd->cmd_len);
-	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+	circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
 
 	/* Check if encryption is set */
 	if (pm8001_ha->chip->encrypt &&
@@ -3658,6 +3980,30 @@
 				cpu_to_le32(upper_32_bits(dma_addr));
 			ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
 			ssp_cmd.enc_esgl = 0;
+			/* Check 4G Boundary */
+			start_addr = cpu_to_le64(dma_addr);
+			end_addr = (start_addr + ssp_cmd.enc_len) - 1;
+			end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+			end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+			if (end_addr_high != ssp_cmd.enc_addr_high) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("The sg list address "
+					"start_addr=0x%016llx data_len=0x%x "
+					"end_addr_high=0x%08x end_addr_low="
+					"0x%08x has crossed 4G boundary\n",
+						start_addr, ssp_cmd.enc_len,
+						end_addr_high, end_addr_low));
+				pm8001_chip_make_sg(task->scatter, 1,
+					ccb->buf_prd);
+				phys_addr = ccb->ccb_dma_handle +
+					offsetof(struct pm8001_ccb_info,
+						buf_prd[0]);
+				ssp_cmd.enc_addr_low =
+					cpu_to_le32(lower_32_bits(phys_addr));
+				ssp_cmd.enc_addr_high =
+					cpu_to_le32(upper_32_bits(phys_addr));
+				ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+			}
 		} else if (task->num_scatter == 0) {
 			ssp_cmd.enc_addr_low = 0;
 			ssp_cmd.enc_addr_high = 0;
@@ -3674,7 +4020,7 @@
 	} else {
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
 			"Sending Normal SAS command 0x%x inb q %x\n",
-			task->ssp_task.cmd->cmnd[0], inb));
+			task->ssp_task.cmd->cmnd[0], q_index));
 		/* fill in PRD (scatter/gather) table, if any */
 		if (task->num_scatter > 1) {
 			pm8001_chip_make_sg(task->scatter, ccb->n_elem,
@@ -3693,6 +4039,30 @@
 				cpu_to_le32(upper_32_bits(dma_addr));
 			ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
 			ssp_cmd.esgl = 0;
+			/* Check 4G Boundary */
+			start_addr = cpu_to_le64(dma_addr);
+			end_addr = (start_addr + ssp_cmd.len) - 1;
+			end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+			end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+			if (end_addr_high != ssp_cmd.addr_high) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("The sg list address "
+					"start_addr=0x%016llx data_len=0x%x "
+					"end_addr_high=0x%08x end_addr_low="
+					"0x%08x has crossed 4G boundary\n",
+						 start_addr, ssp_cmd.len,
+						 end_addr_high, end_addr_low));
+				pm8001_chip_make_sg(task->scatter, 1,
+					ccb->buf_prd);
+				phys_addr = ccb->ccb_dma_handle +
+					offsetof(struct pm8001_ccb_info,
+						 buf_prd[0]);
+				ssp_cmd.addr_low =
+					cpu_to_le32(lower_32_bits(phys_addr));
+				ssp_cmd.addr_high =
+					cpu_to_le32(upper_32_bits(phys_addr));
+				ssp_cmd.esgl = cpu_to_le32(1<<31);
+			}
 		} else if (task->num_scatter == 0) {
 			ssp_cmd.addr_low = 0;
 			ssp_cmd.addr_high = 0;
@@ -3700,11 +4070,9 @@
 			ssp_cmd.esgl = 0;
 		}
 	}
-	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
-
-	/* rotate the outb queue */
-	outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
-
+	q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+						&ssp_cmd, q_index);
 	return ret;
 }
 
@@ -3716,18 +4084,19 @@
 	struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
 	u32 tag = ccb->ccb_tag;
 	int ret;
-	static u32 inb;
-	static u32 outb;
+	u32 q_index;
 	struct sata_start_req sata_cmd;
 	u32 hdr_tag, ncg_tag = 0;
-	u64 phys_addr;
+	u64 phys_addr, start_addr, end_addr;
+	u32 end_addr_high, end_addr_low;
 	u32 ATAP = 0x0;
 	u32 dir;
 	struct inbound_queue_table *circularQ;
 	unsigned long flags;
 	u32 opc = OPC_INB_SATA_HOST_OPSTART;
 	memset(&sata_cmd, 0, sizeof(sata_cmd));
-	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+	circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
 
 	if (task->data_dir == PCI_DMA_NONE) {
 		ATAP = 0x04; /* no data*/
@@ -3788,6 +4157,31 @@
 			sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
 			sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
 			sata_cmd.enc_esgl = 0;
+			/* Check 4G Boundary */
+			start_addr = cpu_to_le64(dma_addr);
+			end_addr = (start_addr + sata_cmd.enc_len) - 1;
+			end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+			end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+			if (end_addr_high != sata_cmd.enc_addr_high) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("The sg list address "
+					"start_addr=0x%016llx data_len=0x%x "
+					"end_addr_high=0x%08x end_addr_low"
+					"=0x%08x has crossed 4G boundary\n",
+						start_addr, sata_cmd.enc_len,
+						end_addr_high, end_addr_low));
+				pm8001_chip_make_sg(task->scatter, 1,
+					ccb->buf_prd);
+				phys_addr = ccb->ccb_dma_handle +
+						offsetof(struct pm8001_ccb_info,
+						buf_prd[0]);
+				sata_cmd.enc_addr_low =
+					lower_32_bits(phys_addr);
+				sata_cmd.enc_addr_high =
+					upper_32_bits(phys_addr);
+				sata_cmd.enc_esgl =
+					cpu_to_le32(1 << 31);
+			}
 		} else if (task->num_scatter == 0) {
 			sata_cmd.enc_addr_low = 0;
 			sata_cmd.enc_addr_high = 0;
@@ -3808,7 +4202,7 @@
 	} else {
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
 			"Sending Normal SATA command 0x%x inb %x\n",
-			sata_cmd.sata_fis.command, inb));
+			sata_cmd.sata_fis.command, q_index));
 		/* dad (bit 0-1) is 0 */
 		sata_cmd.ncqtag_atap_dir_m_dad =
 			cpu_to_le32(((ncg_tag & 0xff)<<16) |
@@ -3829,6 +4223,30 @@
 			sata_cmd.addr_high = upper_32_bits(dma_addr);
 			sata_cmd.len = cpu_to_le32(task->total_xfer_len);
 			sata_cmd.esgl = 0;
+			/* Check 4G Boundary */
+			start_addr = cpu_to_le64(dma_addr);
+			end_addr = (start_addr + sata_cmd.len) - 1;
+			end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+			end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+			if (end_addr_high != sata_cmd.addr_high) {
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("The sg list address "
+					"start_addr=0x%016llx data_len=0x%x"
+					"end_addr_high=0x%08x end_addr_low="
+					"0x%08x has crossed 4G boundary\n",
+						start_addr, sata_cmd.len,
+						end_addr_high, end_addr_low));
+				pm8001_chip_make_sg(task->scatter, 1,
+					ccb->buf_prd);
+				phys_addr = ccb->ccb_dma_handle +
+					offsetof(struct pm8001_ccb_info,
+					buf_prd[0]);
+				sata_cmd.addr_low =
+					lower_32_bits(phys_addr);
+				sata_cmd.addr_high =
+					upper_32_bits(phys_addr);
+				sata_cmd.esgl = cpu_to_le32(1 << 31);
+			}
 		} else if (task->num_scatter == 0) {
 			sata_cmd.addr_low = 0;
 			sata_cmd.addr_high = 0;
@@ -3905,12 +4323,9 @@
 			}
 		}
 	}
-
+	q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
 	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
-						&sata_cmd, outb++);
-
-	/* rotate the outb queue */
-	outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+						&sata_cmd, q_index);
 	return ret;
 }
 
@@ -3941,9 +4356,16 @@
 	 ** [14]	0b disable spin up hold; 1b enable spin up hold
 	 ** [15] ob no change in current PHY analig setup 1b enable using SPAST
 	 */
-	payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
-			LINKMODE_AUTO | LINKRATE_15 |
-			LINKRATE_30 | LINKRATE_60 | phy_id);
+	if (!IS_SPCV_12G(pm8001_ha->pdev))
+		payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+				LINKMODE_AUTO | LINKRATE_15 |
+				LINKRATE_30 | LINKRATE_60 | phy_id);
+	else
+		payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+				LINKMODE_AUTO | LINKRATE_15 |
+				LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
+				phy_id);
+
 	/* SSC Disable and SAS Analog ST configuration */
 	/**
 	payload.ase_sh_lm_slr_phyid =
@@ -4102,6 +4524,45 @@
 	return IRQ_HANDLED;
 }
 
+void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
+	u32 operation, u32 phyid, u32 length, u32 *buf)
+{
+	u32 tag , i, j = 0;
+	int rc;
+	struct set_phy_profile_req payload;
+	struct inbound_queue_table *circularQ;
+	u32 opc = OPC_INB_SET_PHY_PROFILE;
+
+	memset(&payload, 0, sizeof(payload));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n"));
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	payload.tag = cpu_to_le32(tag);
+	payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid  & 0xFF));
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk(" phy profile command for phy %x ,length is %d\n",
+			payload.ppc_phyid, length));
+	for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
+		payload.reserved[j] =  cpu_to_le32(*((u32 *)buf + i));
+		j++;
+	}
+	pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+	u32 length, u8 *buf)
+{
+	u32 page_code, i;
+
+	page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
+	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+		mpi_set_phy_profile_req(pm8001_ha,
+			SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
+		length = length + PHY_DWORD_LENGTH;
+	}
+	PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n"));
+}
 const struct pm8001_dispatch pm8001_80xx_dispatch = {
 	.name			= "pmc80xx",
 	.chip_init		= pm80xx_chip_init,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 2b760ba..c86816b 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -168,6 +168,11 @@
 #define LINKRATE_15			(0x01 << 8)
 #define LINKRATE_30			(0x02 << 8)
 #define LINKRATE_60			(0x06 << 8)
+#define LINKRATE_120			(0x08 << 8)
+
+/* phy_profile */
+#define SAS_PHY_ANALOG_SETTINGS_PAGE	0x04
+#define PHY_DWORD_LENGTH		0xC
 
 /* Thermal related */
 #define	THERMAL_ENABLE			0x1
@@ -1223,10 +1228,10 @@
 
 /* MSGU CONFIGURATION TABLE*/
 
-#define SPCv_MSGU_CFG_TABLE_UPDATE		0x01
-#define SPCv_MSGU_CFG_TABLE_RESET		0x02
-#define SPCv_MSGU_CFG_TABLE_FREEZE		0x04
-#define SPCv_MSGU_CFG_TABLE_UNFREEZE		0x08
+#define SPCv_MSGU_CFG_TABLE_UPDATE		0x001
+#define SPCv_MSGU_CFG_TABLE_RESET		0x002
+#define SPCv_MSGU_CFG_TABLE_FREEZE		0x004
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE		0x008
 #define MSGU_IBDB_SET				0x00
 #define MSGU_HOST_INT_STATUS			0x08
 #define MSGU_HOST_INT_MASK			0x0C
@@ -1520,4 +1525,6 @@
 #define DEVREG_FAILURE_PORT_NOT_VALID_STATE		0x06
 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID		0x07
 
+
+#define MEMBASE_II_SHIFT_REGISTER       0x1010
 #endif
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 1eb7b028..e43db77 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -6049,7 +6049,6 @@
 
 out_disable_device:
 	atomic_dec(&pmcraid_adapter_count);
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 	return -ENODEV;
 }
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 62ee713..30d20e7 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -507,7 +507,7 @@
 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
 
 	/* PCIe -- adjust Maximum Read Request Size (2048). */
-	if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+	if (pci_is_pcie(ha->pdev))
 		pcie_set_readrq(ha->pdev, 2048);
 
 	ha->chip_revision = ha->pdev->revision;
@@ -660,10 +660,8 @@
 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
 {
 	struct qla_hw_data *ha = vha->hw;
-	int pcie_reg;
 
-	pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
-	if (pcie_reg) {
+	if (pci_is_pcie(ha->pdev)) {
 		strcpy(str, "PCIe iSA");
 		return str;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9f01bbb..52be35e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -494,18 +494,14 @@
 	static char *pci_bus_modes[] = { "33", "66", "100", "133", };
 	struct qla_hw_data *ha = vha->hw;
 	uint32_t pci_bus;
-	int pcie_reg;
 
-	pcie_reg = pci_pcie_cap(ha->pdev);
-	if (pcie_reg) {
+	if (pci_is_pcie(ha->pdev)) {
 		char lwstr[6];
-		uint16_t pcie_lstat, lspeed, lwidth;
+		uint32_t lstat, lspeed, lwidth;
 
-		pcie_reg += PCI_EXP_LNKCAP;
-		pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
-		lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
-		lwidth = (pcie_lstat &
-		    (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
+		pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
+		lspeed = lstat & PCI_EXP_LNKCAP_SLS;
+		lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
 
 		strcpy(str, "PCIe (");
 		switch (lspeed) {
@@ -3183,7 +3179,6 @@
 	pci_disable_pcie_error_reporting(pdev);
 
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 static void
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 41327d4..084d1fd 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -306,6 +306,7 @@
 struct qla_ddb_index {
 	struct list_head list;
 	uint16_t fw_ddb_idx;
+	uint16_t flash_ddb_idx;
 	struct dev_db_entry fw_ddb;
 	uint8_t flash_isid[6];
 };
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 51d1a70..1243e59 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -539,6 +539,10 @@
 #define ENABLE_INTERNAL_LOOPBACK	0x04
 #define ENABLE_EXTERNAL_LOOPBACK	0x08
 
+/* generic defines to enable/disable params */
+#define QL4_PARAM_DISABLE	0
+#define QL4_PARAM_ENABLE	1
+
 /*************************************************************************/
 
 /* Host Adapter Initialization Control Block (from host) */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index e6f2a26..5cef252 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -83,6 +83,8 @@
 		uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
 int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
 			   char *password, int bidi, uint16_t *chap_index);
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+		     uint16_t idx, int bidi);
 
 void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
 void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 8503ad6..655b7bb 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -82,3 +82,15 @@
 	__qla4xxx_disable_intrs(ha);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
+
+static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry)
+{
+	int type;
+
+	if (chap_entry->flags & BIT_7)
+		type = LOCAL_CHAP;
+	else
+		type = BIDI_CHAP;
+
+	return type;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 62d4208..22cbd00 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1530,13 +1530,26 @@
 	return ret;
 }
 
-static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
-			    char *password, uint16_t idx, int bidi)
+/**
+ * qla4xxx_set_chap - Make a chap entry at the given index
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to set
+ * @password: CHAP password to set
+ * @idx: CHAP index at which to make the entry
+ * @bidi: type of chap entry (chap_in or chap_out)
+ *
+ * Create chap entry at the given index with the information provided.
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+		     uint16_t idx, int bidi)
 {
 	int ret = 0;
 	int rval = QLA_ERROR;
 	uint32_t offset = 0;
 	struct ql4_chap_table *chap_table;
+	uint32_t chap_size = 0;
 	dma_addr_t chap_dma;
 
 	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
@@ -1554,7 +1567,20 @@
 	strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
 	strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
 	chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
-	offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table));
+
+	if (is_qla40XX(ha)) {
+		chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
+		offset = FLASH_CHAP_OFFSET;
+	} else { /* Single region contains CHAP info for both ports which is
+		  * divided into half for each port.
+		  */
+		chap_size = ha->hw.flt_chap_size / 2;
+		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+		if (ha->port_num == 1)
+			offset += chap_size;
+	}
+
+	offset += (idx * sizeof(struct ql4_chap_table));
 	rval = qla4xxx_set_flash(ha, chap_dma, offset,
 				sizeof(struct ql4_chap_table),
 				FLASH_OPT_RMW_COMMIT);
@@ -1611,7 +1637,7 @@
 		goto exit_unlock_uni_chap;
 	}
 
-	if (!(chap_table->flags & BIT_6)) {
+	if (!(chap_table->flags & BIT_7)) {
 		ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
 		rval = QLA_ERROR;
 		goto exit_unlock_uni_chap;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f8a0a26..a28d5e6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -149,6 +149,8 @@
 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
 				 uint32_t *num_entries, char *buf);
 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void  *data,
+				  int len);
 
 /*
  * SCSI host template entry points
@@ -252,6 +254,7 @@
 	.send_ping		= qla4xxx_send_ping,
 	.get_chap		= qla4xxx_get_chap_list,
 	.delete_chap		= qla4xxx_delete_chap,
+	.set_chap		= qla4xxx_set_chap_entry,
 	.get_flashnode_param	= qla4xxx_sysfs_ddb_get_param,
 	.set_flashnode_param	= qla4xxx_sysfs_ddb_set_param,
 	.new_flashnode		= qla4xxx_sysfs_ddb_add,
@@ -508,6 +511,95 @@
 	return 0;
 }
 
+static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
+				     int16_t chap_index,
+				     struct ql4_chap_table **chap_entry)
+{
+	int rval = QLA_ERROR;
+	int max_chap_entries;
+
+	if (!ha->chap_list) {
+		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+		rval = QLA_ERROR;
+		goto exit_get_chap;
+	}
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+				   sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (chap_index > max_chap_entries) {
+		ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+		rval = QLA_ERROR;
+		goto exit_get_chap;
+	}
+
+	*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
+	if ((*chap_entry)->cookie !=
+	     __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+		rval = QLA_ERROR;
+		*chap_entry = NULL;
+	} else {
+		rval = QLA_SUCCESS;
+	}
+
+exit_get_chap:
+	return rval;
+}
+
+/**
+ * qla4xxx_find_free_chap_index - Find the first free chap index
+ * @ha: pointer to adapter structure
+ * @chap_index: CHAP index to be returned
+ *
+ * Find the first free chap index available in the chap table
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
+					uint16_t *chap_index)
+{
+	int i, rval;
+	int free_index = -1;
+	int max_chap_entries = 0;
+	struct ql4_chap_table *chap_table;
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+						sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (!ha->chap_list) {
+		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+		rval = QLA_ERROR;
+		goto exit_find_chap;
+	}
+
+	for (i = 0; i < max_chap_entries; i++) {
+		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+
+		if ((chap_table->cookie !=
+		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+		   (i > MAX_RESRV_CHAP_IDX)) {
+				free_index = i;
+				break;
+		}
+	}
+
+	if (free_index != -1) {
+		*chap_index = free_index;
+		rval = QLA_SUCCESS;
+	} else {
+		rval = QLA_ERROR;
+	}
+
+exit_find_chap:
+	return rval;
+}
+
 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
 				  uint32_t *num_entries, char *buf)
 {
@@ -691,6 +783,111 @@
 	return ret;
 }
 
+/**
+ * qla4xxx_set_chap_entry - Make chap entry with given information
+ * @shost: pointer to host
+ * @data: chap info - credentials, index and type to make chap entry
+ * @len: length of data
+ *
+ * Add or update chap entry with the given information
+ **/
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_chap_rec chap_rec;
+	struct ql4_chap_table *chap_entry = NULL;
+	struct iscsi_param_info *param_info;
+	struct nlattr *attr;
+	int max_chap_entries = 0;
+	int type;
+	int rem = len;
+	int rc = 0;
+
+	memset(&chap_rec, 0, sizeof(chap_rec));
+
+	nla_for_each_attr(attr, data, len, rem) {
+		param_info = nla_data(attr);
+
+		switch (param_info->param) {
+		case ISCSI_CHAP_PARAM_INDEX:
+			chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
+			break;
+		case ISCSI_CHAP_PARAM_CHAP_TYPE:
+			chap_rec.chap_type = param_info->value[0];
+			break;
+		case ISCSI_CHAP_PARAM_USERNAME:
+			memcpy(chap_rec.username, param_info->value,
+			       param_info->len);
+			break;
+		case ISCSI_CHAP_PARAM_PASSWORD:
+			memcpy(chap_rec.password, param_info->value,
+			       param_info->len);
+			break;
+		case ISCSI_CHAP_PARAM_PASSWORD_LEN:
+			chap_rec.password_length = param_info->value[0];
+			break;
+		default:
+			ql4_printk(KERN_ERR, ha,
+				   "%s: No such sysfs attribute\n", __func__);
+			rc = -ENOSYS;
+			goto exit_set_chap;
+		};
+	}
+
+	if (chap_rec.chap_type == CHAP_TYPE_IN)
+		type = BIDI_CHAP;
+	else
+		type = LOCAL_CHAP;
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+				   sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	mutex_lock(&ha->chap_sem);
+	if (chap_rec.chap_tbl_idx < max_chap_entries) {
+		rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
+					       &chap_entry);
+		if (!rc) {
+			if (!(type == qla4xxx_get_chap_type(chap_entry))) {
+				ql4_printk(KERN_INFO, ha,
+					   "Type mismatch for CHAP entry %d\n",
+					   chap_rec.chap_tbl_idx);
+				rc = -EINVAL;
+				goto exit_unlock_chap;
+			}
+
+			/* If chap index is in use then don't modify it */
+			rc = qla4xxx_is_chap_active(shost,
+						    chap_rec.chap_tbl_idx);
+			if (rc) {
+				ql4_printk(KERN_INFO, ha,
+					   "CHAP entry %d is in use\n",
+					   chap_rec.chap_tbl_idx);
+				rc = -EBUSY;
+				goto exit_unlock_chap;
+			}
+		}
+	} else {
+		rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
+		if (rc) {
+			ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
+			rc = -EBUSY;
+			goto exit_unlock_chap;
+		}
+	}
+
+	rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
+			      chap_rec.chap_tbl_idx, type);
+
+exit_unlock_chap:
+	mutex_unlock(&ha->chap_sem);
+
+exit_set_chap:
+	return rc;
+}
+
 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
 				   enum iscsi_param_type param_type,
 				   int param, char *buf)
@@ -1455,9 +1652,12 @@
 	struct iscsi_session *sess = cls_sess->dd_data;
 	struct ddb_entry *ddb_entry = sess->dd_data;
 	struct scsi_qla_host *ha = ddb_entry->ha;
+	struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
+	struct ql4_chap_table chap_tbl;
 	int rval, len;
 	uint16_t idx;
 
+	memset(&chap_tbl, 0, sizeof(chap_tbl));
 	switch (param) {
 	case ISCSI_PARAM_CHAP_IN_IDX:
 		rval = qla4xxx_get_chap_index(ha, sess->username_in,
@@ -1469,14 +1669,46 @@
 			len = sprintf(buf, "%hu\n", idx);
 		break;
 	case ISCSI_PARAM_CHAP_OUT_IDX:
-		rval = qla4xxx_get_chap_index(ha, sess->username,
-					      sess->password, LOCAL_CHAP,
-					      &idx);
+		if (ddb_entry->ddb_type == FLASH_DDB) {
+			if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+				idx = ddb_entry->chap_tbl_idx;
+				rval = QLA_SUCCESS;
+			} else {
+				rval = QLA_ERROR;
+			}
+		} else {
+			rval = qla4xxx_get_chap_index(ha, sess->username,
+						      sess->password,
+						      LOCAL_CHAP, &idx);
+		}
 		if (rval)
 			len = sprintf(buf, "\n");
 		else
 			len = sprintf(buf, "%hu\n", idx);
 		break;
+	case ISCSI_PARAM_USERNAME:
+	case ISCSI_PARAM_PASSWORD:
+		/* First, populate session username and password for FLASH DDB,
+		 * if not already done. This happens when session login fails
+		 * for a FLASH DDB.
+		 */
+		if (ddb_entry->ddb_type == FLASH_DDB &&
+		    ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
+		    !sess->username && !sess->password) {
+			idx = ddb_entry->chap_tbl_idx;
+			rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+							    chap_tbl.secret,
+							    idx);
+			if (!rval) {
+				iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+						(char *)chap_tbl.name,
+						strlen((char *)chap_tbl.name));
+				iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+						(char *)chap_tbl.secret,
+						chap_tbl.secret_len);
+			}
+		}
+		/* allow fall-through */
 	default:
 		return iscsi_session_get_param(cls_sess, param, buf);
 	}
@@ -2373,11 +2605,6 @@
 	COPY_ISID(sess->isid, fw_ddb_entry->isid);
 
 	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
-	if (ddb_link < MAX_DDB_ENTRIES)
-		sess->discovery_parent_idx = ddb_link;
-	else
-		sess->discovery_parent_idx = DDB_NO_LINK;
-
 	if (ddb_link == DDB_ISNS)
 		disc_parent = ISCSI_DISC_PARENT_ISNS;
 	else if (ddb_link == DDB_NO_LINK)
@@ -2402,6 +2629,7 @@
 	int buflen = 0;
 	struct iscsi_session *sess;
 	struct ddb_entry *ddb_entry;
+	struct ql4_chap_table chap_tbl;
 	struct iscsi_conn *conn;
 	char ip_addr[DDB_IPADDR_LEN];
 	uint16_t options = 0;
@@ -2409,6 +2637,7 @@
 	sess = cls_sess->dd_data;
 	ddb_entry = sess->dd_data;
 	conn = cls_conn->dd_data;
+	memset(&chap_tbl, 0, sizeof(chap_tbl));
 
 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
 
@@ -2435,6 +2664,19 @@
 			(char *)fw_ddb_entry->iscsi_name, buflen);
 	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
 			(char *)ha->name_string, buflen);
+
+	if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+		if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+						   chap_tbl.secret,
+						   ddb_entry->chap_tbl_idx)) {
+			iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+					(char *)chap_tbl.name,
+					strlen((char *)chap_tbl.name));
+			iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+					(char *)chap_tbl.secret,
+					chap_tbl.secret_len);
+		}
+	}
 }
 
 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -4937,7 +5179,8 @@
 }
 
 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
-				     struct dev_db_entry *fw_ddb_entry)
+				     struct dev_db_entry *fw_ddb_entry,
+				     uint32_t *index)
 {
 	struct ddb_entry *ddb_entry;
 	struct ql4_tuple_ddb *fw_tddb = NULL;
@@ -4971,6 +5214,8 @@
 		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
 		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
 			ret = QLA_SUCCESS; /* found */
+			if (index != NULL)
+				*index = idx;
 			goto exit_check;
 		}
 	}
@@ -5206,6 +5451,7 @@
 	ddb_entry->ha = ha;
 	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
 	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+	ddb_entry->chap_tbl_idx = INVALID_ENTRY;
 
 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
 	atomic_set(&ddb_entry->relogin_timer, 0);
@@ -5267,6 +5513,87 @@
 	} while (time_after(wtime, jiffies));
 }
 
+static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
+				  struct dev_db_entry *flash_ddb_entry)
+{
+	uint16_t options = 0;
+	size_t ip_len = IP_ADDR_LEN;
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	if (options & DDB_OPT_IPV6_DEVICE)
+		ip_len = IPv6_ADDR_LEN;
+
+	if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
+		return QLA_ERROR;
+
+	if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
+		   sizeof(fw_ddb_entry->isid)))
+		return QLA_ERROR;
+
+	if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
+		   sizeof(fw_ddb_entry->port)))
+		return QLA_ERROR;
+
+	return QLA_SUCCESS;
+}
+
+static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
+				     struct dev_db_entry *fw_ddb_entry,
+				     uint32_t fw_idx, uint32_t *flash_index)
+{
+	struct dev_db_entry *flash_ddb_entry;
+	dma_addr_t flash_ddb_entry_dma;
+	uint32_t idx = 0;
+	int max_ddbs;
+	int ret = QLA_ERROR, status;
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+					 &flash_ddb_entry_dma);
+	if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
+		ql4_printk(KERN_ERR, ha, "Out of memory\n");
+		goto exit_find_st_idx;
+	}
+
+	status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+					  flash_ddb_entry_dma, fw_idx);
+	if (status == QLA_SUCCESS) {
+		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+		if (status == QLA_SUCCESS) {
+			*flash_index = fw_idx;
+			ret = QLA_SUCCESS;
+			goto exit_find_st_idx;
+		}
+	}
+
+	for (idx = 0; idx < max_ddbs; idx++) {
+		status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+						  flash_ddb_entry_dma, idx);
+		if (status == QLA_ERROR)
+			continue;
+
+		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+		if (status == QLA_SUCCESS) {
+			*flash_index = idx;
+			ret = QLA_SUCCESS;
+			goto exit_find_st_idx;
+		}
+	}
+
+	if (idx == max_ddbs)
+		ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
+			   fw_idx);
+
+exit_find_st_idx:
+	if (flash_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
+			      flash_ddb_entry_dma);
+
+	return ret;
+}
+
 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
 				  struct list_head *list_st)
 {
@@ -5278,6 +5605,7 @@
 	int ret;
 	uint32_t idx = 0, next_idx = 0;
 	uint32_t state = 0, conn_err = 0;
+	uint32_t flash_index = -1;
 	uint16_t conn_id = 0;
 
 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@@ -5310,6 +5638,19 @@
 		if (!st_ddb_idx)
 			break;
 
+		ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
+						&flash_index);
+		if (ret == QLA_ERROR) {
+			ql4_printk(KERN_ERR, ha,
+				   "No flash entry for ST at idx [%d]\n", idx);
+			st_ddb_idx->flash_ddb_idx = idx;
+		} else {
+			ql4_printk(KERN_INFO, ha,
+				   "ST at idx [%d] is stored at flash [%d]\n",
+				   idx, flash_index);
+			st_ddb_idx->flash_ddb_idx = flash_index;
+		}
+
 		st_ddb_idx->fw_ddb_idx = idx;
 
 		list_add_tail(&st_ddb_idx->list, list_st);
@@ -5354,6 +5695,28 @@
 	}
 }
 
+static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
+					 struct ddb_entry *ddb_entry,
+					 struct dev_db_entry *fw_ddb_entry)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_session *sess;
+	uint32_t max_ddbs = 0;
+	uint16_t ddb_link = -1;
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	cls_sess = ddb_entry->sess;
+	sess = cls_sess->dd_data;
+
+	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+	if (ddb_link < max_ddbs)
+		sess->discovery_parent_idx = ddb_link;
+	else
+		sess->discovery_parent_idx = DDB_NO_LINK;
+}
+
 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
 				   struct dev_db_entry *fw_ddb_entry,
 				   int is_reset, uint16_t idx)
@@ -5418,6 +5781,7 @@
 
 	/* Update sess/conn params */
 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+	qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
 
 	if (is_reset == RESET_ADAPTER) {
 		iscsi_block_session(cls_sess);
@@ -5434,17 +5798,43 @@
 	return ret;
 }
 
+static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
+				       struct list_head *list_ddb,
+				       struct dev_db_entry *fw_ddb_entry)
+{
+	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
+	uint16_t ddb_link;
+
+	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+
+	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+		if (ddb_idx->fw_ddb_idx == ddb_link) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Updating NT parent idx from [%d] to [%d]\n",
+					  ddb_link, ddb_idx->flash_ddb_idx));
+			fw_ddb_entry->ddb_link =
+					    cpu_to_le16(ddb_idx->flash_ddb_idx);
+			return;
+		}
+	}
+}
+
 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
-				  struct list_head *list_nt, int is_reset)
+				  struct list_head *list_nt,
+				  struct list_head *list_st,
+				  int is_reset)
 {
 	struct dev_db_entry *fw_ddb_entry;
+	struct ddb_entry *ddb_entry = NULL;
 	dma_addr_t fw_ddb_dma;
 	int max_ddbs;
 	int fw_idx_size;
 	int ret;
 	uint32_t idx = 0, next_idx = 0;
 	uint32_t state = 0, conn_err = 0;
+	uint32_t ddb_idx = -1;
 	uint16_t conn_id = 0;
+	uint16_t ddb_link = -1;
 	struct qla_ddb_index  *nt_ddb_idx;
 
 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@@ -5471,12 +5861,18 @@
 		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
 			goto continue_next_nt;
 
+		ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+		if (ddb_link < max_ddbs)
+			qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
+
 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
-		    state == DDB_DS_SESSION_FAILED))
+		    state == DDB_DS_SESSION_FAILED) &&
+		    (is_reset == INIT_ADAPTER))
 			goto continue_next_nt;
 
 		DEBUG2(ql4_printk(KERN_INFO, ha,
 				  "Adding  DDB to session = 0x%x\n", idx));
+
 		if (is_reset == INIT_ADAPTER) {
 			nt_ddb_idx = vmalloc(fw_idx_size);
 			if (!nt_ddb_idx)
@@ -5506,9 +5902,17 @@
 
 			list_add_tail(&nt_ddb_idx->list, list_nt);
 		} else if (is_reset == RESET_ADAPTER) {
-			if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
-								QLA_SUCCESS)
+			ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
+							&ddb_idx);
+			if (ret == QLA_SUCCESS) {
+				ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
+								       ddb_idx);
+				if (ddb_entry != NULL)
+					qla4xxx_update_sess_disc_idx(ha,
+								     ddb_entry,
+								  fw_ddb_entry);
 				goto continue_next_nt;
+			}
 		}
 
 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
@@ -5526,7 +5930,8 @@
 }
 
 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
-				      struct list_head *list_nt)
+				      struct list_head *list_nt,
+				      uint16_t target_id)
 {
 	struct dev_db_entry *fw_ddb_entry;
 	dma_addr_t fw_ddb_dma;
@@ -5571,13 +5976,16 @@
 
 		nt_ddb_idx->fw_ddb_idx = idx;
 
-		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
 		if (ret == QLA_SUCCESS) {
 			/* free nt_ddb_idx and do not add to list_nt */
 			vfree(nt_ddb_idx);
 			goto continue_next_new_nt;
 		}
 
+		if (target_id < max_ddbs)
+			fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
+
 		list_add_tail(&nt_ddb_idx->list, list_nt);
 
 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
@@ -5894,7 +6302,8 @@
 }
 
 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
-				struct dev_db_entry *fw_ddb_entry)
+				struct dev_db_entry *fw_ddb_entry,
+				uint16_t target_id)
 {
 	struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
 	struct list_head list_nt;
@@ -5919,7 +6328,7 @@
 	if (ret == QLA_ERROR)
 		goto exit_login_st;
 
-	qla4xxx_build_new_nt_list(ha, &list_nt);
+	qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
 
 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
 		list_del_init(&ddb_idx->list);
@@ -5946,7 +6355,7 @@
 {
 	int ret = QLA_ERROR;
 
-	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
 	if (ret != QLA_SUCCESS)
 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
 					      idx);
@@ -6001,7 +6410,8 @@
 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
 
 	if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
-		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
+		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
+					   fnode_sess->target_id);
 	else
 		ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
 					   fnode_sess->target_id);
@@ -6522,10 +6932,13 @@
 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
 	struct scsi_qla_host *ha = to_qla_host(shost);
 	struct iscsi_flashnode_param_info *fnode_param;
+	struct ql4_chap_table chap_tbl;
 	struct nlattr *attr;
+	uint16_t chap_out_idx = INVALID_ENTRY;
 	int rc = QLA_ERROR;
 	uint32_t rem = len;
 
+	memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
 	nla_for_each_attr(attr, data, len, rem) {
 		fnode_param = nla_data(attr);
 
@@ -6567,6 +6980,10 @@
 			break;
 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
 			fnode_sess->chap_auth_en = fnode_param->value[0];
+			/* Invalidate chap index if chap auth is disabled */
+			if (!fnode_sess->chap_auth_en)
+				fnode_sess->chap_out_idx = INVALID_ENTRY;
+
 			break;
 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
 			fnode_conn->snack_req_en = fnode_param->value[0];
@@ -6705,6 +7122,17 @@
 			fnode_conn->exp_statsn =
 						*(uint32_t *)fnode_param->value;
 			break;
+		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+			chap_out_idx = *(uint16_t *)fnode_param->value;
+			if (!qla4xxx_get_uni_chap_at_index(ha,
+							   chap_tbl.name,
+							   chap_tbl.secret,
+							   chap_out_idx)) {
+				fnode_sess->chap_out_idx = chap_out_idx;
+				/* Enable chap auth if chap index is valid */
+				fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
+			}
+			break;
 		default:
 			ql4_printk(KERN_ERR, ha,
 				   "%s: No such sysfs attribute\n", __func__);
@@ -6926,11 +7354,10 @@
 		schedule_timeout_uninterruptible(HZ / 10);
 	} while (time_after(wtime, jiffies));
 
-	/* Free up the sendtargets list */
+
+	qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
+
 	qla4xxx_free_ddb_list(&list_st);
-
-	qla4xxx_build_nt_list(ha, &list_nt, is_reset);
-
 	qla4xxx_free_ddb_list(&list_nt);
 
 	qla4xxx_free_ddb_index(ha);
@@ -7400,7 +7827,6 @@
 
 	pci_disable_pcie_error_reporting(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 /**
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index eaa808e..fe0bcb1 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -78,11 +78,6 @@
  * Definitions and constants.
  */
 
-#define MIN_RESET_DELAY (2*HZ)
-
-/* Do not call reset on error if we just did a reset within 15 sec. */
-#define MIN_RESET_PERIOD (15*HZ)
-
 /*
  * Note - the initial logging level can be set here to log events at boot time.
  * After the system is up, you may enable logging via the /proc interface.
@@ -658,7 +653,6 @@
 int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
 {
 	struct Scsi_Host *host = cmd->device->host;
-	unsigned long timeout;
 	int rtn = 0;
 
 	atomic_inc(&cmd->device->iorequest_cnt);
@@ -704,28 +698,6 @@
 			       (cmd->device->lun << 5 & 0xe0);
 	}
 
-	/*
-	 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
-	 * we can avoid the drive not being ready.
-	 */
-	timeout = host->last_reset + MIN_RESET_DELAY;
-
-	if (host->resetting && time_before(jiffies, timeout)) {
-		int ticks_remaining = timeout - jiffies;
-		/*
-		 * NOTE: This may be executed from within an interrupt
-		 * handler!  This is bad, but for now, it'll do.  The irq
-		 * level of the interrupt handler has been masked out by the
-		 * platform dependent interrupt handling code already, so the
-		 * sti() here will not cause another call to the SCSI host's
-		 * interrupt handler (assuming there is one irq-level per
-		 * host).
-		 */
-		while (--ticks_remaining >= 0)
-			mdelay(1 + 999 / HZ);
-		host->resetting = 0;
-	}
-
 	scsi_log_send(cmd);
 
 	/*
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 01c0ffa..80b8b10 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -169,7 +169,7 @@
 static int scsi_debug_dsense = DEF_D_SENSE;
 static int scsi_debug_every_nth = DEF_EVERY_NTH;
 static int scsi_debug_fake_rw = DEF_FAKE_RW;
-static int scsi_debug_guard = DEF_GUARD;
+static unsigned int scsi_debug_guard = DEF_GUARD;
 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
 static int scsi_debug_max_luns = DEF_MAX_LUNS;
 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
@@ -293,6 +293,20 @@
 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
 			           0, 0, 0x0, 0x0};
 
+static void *fake_store(unsigned long long lba)
+{
+	lba = do_div(lba, sdebug_store_sectors);
+
+	return fake_storep + lba * scsi_debug_sector_size;
+}
+
+static struct sd_dif_tuple *dif_store(sector_t sector)
+{
+	sector = do_div(sector, sdebug_store_sectors);
+
+	return dif_storep + sector;
+}
+
 static int sdebug_add_adapter(void);
 static void sdebug_remove_adapter(void);
 
@@ -1731,25 +1745,22 @@
 	return ret;
 }
 
-static u16 dif_compute_csum(const void *buf, int len)
+static __be16 dif_compute_csum(const void *buf, int len)
 {
-	u16 csum;
+	__be16 csum;
 
-	switch (scsi_debug_guard) {
-	case 1:
-		csum = ip_compute_csum(buf, len);
-		break;
-	case 0:
+	if (scsi_debug_guard)
+		csum = (__force __be16)ip_compute_csum(buf, len);
+	else
 		csum = cpu_to_be16(crc_t10dif(buf, len));
-		break;
-	}
+
 	return csum;
 }
 
 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
 		      sector_t sector, u32 ei_lba)
 {
-	u16 csum = dif_compute_csum(data, scsi_debug_sector_size);
+	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
 
 	if (sdt->guard_tag != csum) {
 		pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
@@ -1775,31 +1786,62 @@
 	return 0;
 }
 
-static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
-			    unsigned int sectors, u32 ei_lba)
+static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+			  unsigned int sectors, bool read)
 {
 	unsigned int i, resid;
 	struct scatterlist *psgl;
+	void *paddr;
+	const void *dif_store_end = dif_storep + sdebug_store_sectors;
+
+	/* Bytes of protection data to copy into sgl */
+	resid = sectors * sizeof(*dif_storep);
+
+	scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
+		int len = min(psgl->length, resid);
+		void *start = dif_store(sector);
+		int rest = 0;
+
+		if (dif_store_end < start + len)
+			rest = start + len - dif_store_end;
+
+		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
+
+		if (read)
+			memcpy(paddr, start, len - rest);
+		else
+			memcpy(start, paddr, len - rest);
+
+		if (rest) {
+			if (read)
+				memcpy(paddr + len - rest, dif_storep, rest);
+			else
+				memcpy(dif_storep, paddr + len - rest, rest);
+		}
+
+		sector += len / sizeof(*dif_storep);
+		resid -= len;
+		kunmap_atomic(paddr);
+	}
+}
+
+static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+			    unsigned int sectors, u32 ei_lba)
+{
+	unsigned int i;
 	struct sd_dif_tuple *sdt;
 	sector_t sector;
-	sector_t tmp_sec = start_sec;
-	void *paddr;
 
-	start_sec = do_div(tmp_sec, sdebug_store_sectors);
-
-	sdt = dif_storep + start_sec;
-
-	for (i = 0 ; i < sectors ; i++) {
+	for (i = 0; i < sectors; i++) {
 		int ret;
 
-		if (sdt[i].app_tag == 0xffff)
+		sector = start_sec + i;
+		sdt = dif_store(sector);
+
+		if (sdt->app_tag == cpu_to_be16(0xffff))
 			continue;
 
-		sector = start_sec + i;
-
-		ret = dif_verify(&sdt[i],
-				 fake_storep + sector * scsi_debug_sector_size,
-				 sector, ei_lba);
+		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
 		if (ret) {
 			dif_errors++;
 			return ret;
@@ -1808,26 +1850,7 @@
 		ei_lba++;
 	}
 
-	/* Bytes of protection data to copy into sgl */
-	resid = sectors * sizeof(*dif_storep);
-	sector = start_sec;
-
-	scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
-		int len = min(psgl->length, resid);
-
-		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
-		memcpy(paddr, dif_storep + sector, len);
-
-		sector += len / sizeof(*dif_storep);
-		if (sector >= sdebug_store_sectors) {
-			/* Force wrap */
-			tmp_sec = sector;
-			sector = do_div(tmp_sec, sdebug_store_sectors);
-		}
-		resid -= len;
-		kunmap_atomic(paddr);
-	}
-
+	dif_copy_prot(SCpnt, start_sec, sectors, true);
 	dix_reads++;
 
 	return 0;
@@ -1910,15 +1933,12 @@
 {
 	int i, j, ret;
 	struct sd_dif_tuple *sdt;
-	struct scatterlist *dsgl = scsi_sglist(SCpnt);
+	struct scatterlist *dsgl;
 	struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
 	void *daddr, *paddr;
-	sector_t tmp_sec = start_sec;
-	sector_t sector;
+	sector_t sector = start_sec;
 	int ppage_offset;
 
-	sector = do_div(tmp_sec, sdebug_store_sectors);
-
 	BUG_ON(scsi_sg_count(SCpnt) == 0);
 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
 
@@ -1946,25 +1966,13 @@
 
 			sdt = paddr + ppage_offset;
 
-			ret = dif_verify(sdt, daddr + j, start_sec, ei_lba);
+			ret = dif_verify(sdt, daddr + j, sector, ei_lba);
 			if (ret) {
 				dump_sector(daddr + j, scsi_debug_sector_size);
 				goto out;
 			}
 
-			/* Would be great to copy this in bigger
-			 * chunks.  However, for the sake of
-			 * correctness we need to verify each sector
-			 * before writing it to "stable" storage
-			 */
-			memcpy(dif_storep + sector, sdt, sizeof(*sdt));
-
 			sector++;
-
-			if (sector == sdebug_store_sectors)
-				sector = 0;	/* Force wrap */
-
-			start_sec++;
 			ei_lba++;
 			ppage_offset += sizeof(struct sd_dif_tuple);
 		}
@@ -1973,6 +1981,7 @@
 		kunmap_atomic(daddr);
 	}
 
+	dif_copy_prot(SCpnt, start_sec, sectors, false);
 	dix_writes++;
 
 	return 0;
@@ -2742,7 +2751,7 @@
 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
-module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
+module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
@@ -3172,7 +3181,7 @@
 
 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
+	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
 }
 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
 
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 83e591b..e8bee9f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -87,6 +87,18 @@
 }
 EXPORT_SYMBOL_GPL(scsi_schedule_eh);
 
+static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
+{
+	if (!shost->last_reset || !shost->eh_deadline)
+		return 0;
+
+	if (time_before(jiffies,
+			shost->last_reset + shost->eh_deadline))
+		return 0;
+
+	return 1;
+}
+
 /**
  * scsi_eh_scmd_add - add scsi cmd to error handling.
  * @scmd:	scmd to run eh on.
@@ -109,6 +121,9 @@
 		if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
 			goto out_unlock;
 
+	if (shost->eh_deadline && !shost->last_reset)
+		shost->last_reset = jiffies;
+
 	ret = 1;
 	scmd->eh_eflags |= eh_flag;
 	list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
@@ -138,6 +153,9 @@
 	trace_scsi_dispatch_cmd_timeout(scmd);
 	scsi_log_completion(scmd, TIMEOUT_ERROR);
 
+	if (host->eh_deadline && !host->last_reset)
+		host->last_reset = jiffies;
+
 	if (host->transportt->eh_timed_out)
 		rtn = host->transportt->eh_timed_out(scmd);
 	else if (host->hostt->eh_timed_out)
@@ -990,13 +1008,26 @@
 		      struct list_head *done_q)
 {
 	struct scsi_cmnd *scmd, *next;
+	struct Scsi_Host *shost;
 	int rtn;
+	unsigned long flags;
 
 	list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
 		if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
 		    SCSI_SENSE_VALID(scmd))
 			continue;
 
+		shost = scmd->device->host;
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (scsi_host_eh_past_deadline(shost)) {
+			spin_unlock_irqrestore(shost->host_lock, flags);
+			SCSI_LOG_ERROR_RECOVERY(3,
+				shost_printk(KERN_INFO, shost,
+					    "skip %s, past eh deadline\n",
+					     __func__));
+			break;
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
 		SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
 						  "%s: requesting sense\n",
 						  current->comm));
@@ -1082,11 +1113,28 @@
 	struct scsi_cmnd *scmd, *next;
 	struct scsi_device *sdev;
 	int finish_cmds;
+	unsigned long flags;
 
 	while (!list_empty(cmd_list)) {
 		scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
 		sdev = scmd->device;
 
+		if (!try_stu) {
+			spin_lock_irqsave(sdev->host->host_lock, flags);
+			if (scsi_host_eh_past_deadline(sdev->host)) {
+				/* Push items back onto work_q */
+				list_splice_init(cmd_list, work_q);
+				spin_unlock_irqrestore(sdev->host->host_lock,
+						       flags);
+				SCSI_LOG_ERROR_RECOVERY(3,
+					shost_printk(KERN_INFO, sdev->host,
+						     "skip %s, past eh deadline",
+						     __func__));
+				break;
+			}
+			spin_unlock_irqrestore(sdev->host->host_lock, flags);
+		}
+
 		finish_cmds = !scsi_device_online(scmd->device) ||
 			(try_stu && !scsi_eh_try_stu(scmd) &&
 			 !scsi_eh_tur(scmd)) ||
@@ -1122,26 +1170,42 @@
 	struct scsi_cmnd *scmd, *next;
 	LIST_HEAD(check_list);
 	int rtn;
+	struct Scsi_Host *shost;
+	unsigned long flags;
 
 	list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
 		if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
 			continue;
+		shost = scmd->device->host;
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (scsi_host_eh_past_deadline(shost)) {
+			spin_unlock_irqrestore(shost->host_lock, flags);
+			list_splice_init(&check_list, work_q);
+			SCSI_LOG_ERROR_RECOVERY(3,
+				shost_printk(KERN_INFO, shost,
+					    "skip %s, past eh deadline\n",
+					     __func__));
+			return list_empty(work_q);
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
 		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
 						  "0x%p\n", current->comm,
 						  scmd));
-		rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
-		if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
-			scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
-			if (rtn == FAST_IO_FAIL)
-				scsi_eh_finish_cmd(scmd, done_q);
-			else
-				list_move_tail(&scmd->eh_entry, &check_list);
-		} else
+		rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
+		if (rtn == FAILED) {
 			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
 							  " cmd failed:"
 							  "0x%p\n",
 							  current->comm,
 							  scmd));
+			list_splice_init(&check_list, work_q);
+			return list_empty(work_q);
+		}
+		scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
+		if (rtn == FAST_IO_FAIL)
+			scsi_eh_finish_cmd(scmd, done_q);
+		else
+			list_move_tail(&scmd->eh_entry, &check_list);
 	}
 
 	return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
@@ -1187,8 +1251,19 @@
 {
 	struct scsi_cmnd *scmd, *stu_scmd, *next;
 	struct scsi_device *sdev;
+	unsigned long flags;
 
 	shost_for_each_device(sdev, shost) {
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (scsi_host_eh_past_deadline(shost)) {
+			spin_unlock_irqrestore(shost->host_lock, flags);
+			SCSI_LOG_ERROR_RECOVERY(3,
+				shost_printk(KERN_INFO, shost,
+					    "skip %s, past eh deadline\n",
+					     __func__));
+			break;
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
 		stu_scmd = NULL;
 		list_for_each_entry(scmd, work_q, eh_entry)
 			if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
@@ -1241,9 +1316,20 @@
 {
 	struct scsi_cmnd *scmd, *bdr_scmd, *next;
 	struct scsi_device *sdev;
+	unsigned long flags;
 	int rtn;
 
 	shost_for_each_device(sdev, shost) {
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (scsi_host_eh_past_deadline(shost)) {
+			spin_unlock_irqrestore(shost->host_lock, flags);
+			SCSI_LOG_ERROR_RECOVERY(3,
+				shost_printk(KERN_INFO, shost,
+					    "skip %s, past eh deadline\n",
+					     __func__));
+			break;
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
 		bdr_scmd = NULL;
 		list_for_each_entry(scmd, work_q, eh_entry)
 			if (scmd->device == sdev) {
@@ -1303,6 +1389,21 @@
 		struct scsi_cmnd *next, *scmd;
 		int rtn;
 		unsigned int id;
+		unsigned long flags;
+
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (scsi_host_eh_past_deadline(shost)) {
+			spin_unlock_irqrestore(shost->host_lock, flags);
+			/* push back on work queue for further processing */
+			list_splice_init(&check_list, work_q);
+			list_splice_init(&tmp_list, work_q);
+			SCSI_LOG_ERROR_RECOVERY(3,
+				shost_printk(KERN_INFO, shost,
+					    "skip %s, past eh deadline\n",
+					     __func__));
+			return list_empty(work_q);
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
 
 		scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
 		id = scmd_id(scmd);
@@ -1347,6 +1448,7 @@
 	LIST_HEAD(check_list);
 	unsigned int channel;
 	int rtn;
+	unsigned long flags;
 
 	/*
 	 * we really want to loop over the various channels, and do this on
@@ -1356,6 +1458,18 @@
 	 */
 
 	for (channel = 0; channel <= shost->max_channel; channel++) {
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (scsi_host_eh_past_deadline(shost)) {
+			spin_unlock_irqrestore(shost->host_lock, flags);
+			list_splice_init(&check_list, work_q);
+			SCSI_LOG_ERROR_RECOVERY(3,
+				shost_printk(KERN_INFO, shost,
+					    "skip %s, past eh deadline\n",
+					     __func__));
+			return list_empty(work_q);
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
+
 		chan_scmd = NULL;
 		list_for_each_entry(scmd, work_q, eh_entry) {
 			if (channel == scmd_channel(scmd)) {
@@ -1755,8 +1869,9 @@
 	 * will be requests for character device operations, and also for
 	 * ioctls to queued block devices.
 	 */
-	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
-					  __func__));
+	SCSI_LOG_ERROR_RECOVERY(3,
+		printk("scsi_eh_%d waking up host to restart\n",
+		       shost->host_no));
 
 	spin_lock_irqsave(shost->host_lock, flags);
 	if (scsi_host_set_state(shost, SHOST_RUNNING))
@@ -1883,6 +1998,10 @@
 		if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
 			scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
 
+	spin_lock_irqsave(shost->host_lock, flags);
+	if (shost->eh_deadline)
+		shost->last_reset = 0;
+	spin_unlock_irqrestore(shost->host_lock, flags);
 	scsi_eh_flush_done_q(&eh_done_q);
 }
 
@@ -1909,7 +2028,7 @@
 		if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
 		    shost->host_failed != shost->host_busy) {
 			SCSI_LOG_ERROR_RECOVERY(1,
-				printk("Error handler scsi_eh_%d sleeping\n",
+				printk("scsi_eh_%d: sleeping\n",
 					shost->host_no));
 			schedule();
 			continue;
@@ -1917,8 +2036,9 @@
 
 		__set_current_state(TASK_RUNNING);
 		SCSI_LOG_ERROR_RECOVERY(1,
-			printk("Error handler scsi_eh_%d waking up\n",
-				shost->host_no));
+			printk("scsi_eh_%d: waking up %d/%d/%d\n",
+			       shost->host_no, shost->host_eh_scheduled,
+			       shost->host_failed, shost->host_busy));
 
 		/*
 		 * We have a host that is failing for some reason.  Figure out
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d1549b7..7bd7f0d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@
 
 	host_dev = scsi_get_device(shost);
 	if (host_dev && host_dev->dma_mask)
-		bounce_limit = *host_dev->dma_mask;
+		bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
 
 	return bounce_limit;
 }
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 4c5aabe..af4c050 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -54,7 +54,8 @@
 		/*
 		 * All the high-level SCSI drivers that implement runtime
 		 * PM treat runtime suspend, system suspend, and system
-		 * hibernate identically.
+		 * hibernate nearly identically. In all cases the requirements
+		 * for runtime suspension are stricter.
 		 */
 		if (pm_runtime_suspended(dev))
 			return 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 40c6394..8ff62c2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -281,6 +281,42 @@
 
 static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
 
+static ssize_t
+show_shost_eh_deadline(struct device *dev,
+		      struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+
+	return sprintf(buf, "%d\n", shost->eh_deadline / HZ);
+}
+
+static ssize_t
+store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	int ret = -EINVAL;
+	int deadline;
+	unsigned long flags;
+
+	if (shost->transportt && shost->transportt->eh_strategy_handler)
+		return ret;
+
+	if (sscanf(buf, "%d\n", &deadline) == 1) {
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (scsi_host_in_recovery(shost))
+			ret = -EBUSY;
+		else {
+			shost->eh_deadline = deadline * HZ;
+			ret = count;
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
+
 shost_rd_attr(unique_id, "%u\n");
 shost_rd_attr(host_busy, "%hu\n");
 shost_rd_attr(cmd_per_lun, "%hd\n");
@@ -308,6 +344,7 @@
 	&dev_attr_prot_capabilities.attr,
 	&dev_attr_prot_guard_type.attr,
 	&dev_attr_host_reset.attr,
+	&dev_attr_eh_deadline.attr,
 	NULL
 };
 
@@ -529,6 +566,7 @@
  */
 sdev_rd_attr (device_blocked, "%d\n");
 sdev_rd_attr (queue_depth, "%d\n");
+sdev_rd_attr (device_busy, "%d\n");
 sdev_rd_attr (type, "%d\n");
 sdev_rd_attr (scsi_level, "%d\n");
 sdev_rd_attr (vendor, "%.8s\n");
@@ -750,6 +788,7 @@
 	&dev_attr_device_blocked.attr,
 	&dev_attr_type.attr,
 	&dev_attr_scsi_level.attr,
+	&dev_attr_device_busy.attr,
 	&dev_attr_vendor.attr,
 	&dev_attr_model.attr,
 	&dev_attr_rev.attr,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e4a989f..63a6ca4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2744,6 +2744,28 @@
 	return err;
 }
 
+static int iscsi_set_chap(struct iscsi_transport *transport,
+			  struct iscsi_uevent *ev, uint32_t len)
+{
+	char *data = (char *)ev + sizeof(*ev);
+	struct Scsi_Host *shost;
+	int err = 0;
+
+	if (!transport->set_chap)
+		return -ENOSYS;
+
+	shost = scsi_host_lookup(ev->u.set_path.host_no);
+	if (!shost) {
+		pr_err("%s could not find host no %u\n",
+		       __func__, ev->u.set_path.host_no);
+		return -ENODEV;
+	}
+
+	err = transport->set_chap(shost, data, len);
+	scsi_host_put(shost);
+	return err;
+}
+
 static int iscsi_delete_chap(struct iscsi_transport *transport,
 			     struct iscsi_uevent *ev)
 {
@@ -3234,6 +3256,10 @@
 	case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
 		err = iscsi_logout_flashnode_sid(transport, ev);
 		break;
+	case ISCSI_UEVENT_SET_CHAP:
+		err = iscsi_set_chap(transport, ev,
+				     nlmsg_attrlen(nlh, sizeof(*ev)));
+		break;
 	default:
 		err = -ENOSYS;
 		break;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5693f6d7..e6c4bff 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -105,7 +105,8 @@
 static int  sd_probe(struct device *);
 static int  sd_remove(struct device *);
 static void sd_shutdown(struct device *);
-static int sd_suspend(struct device *);
+static int sd_suspend_system(struct device *);
+static int sd_suspend_runtime(struct device *);
 static int sd_resume(struct device *);
 static void sd_rescan(struct device *);
 static int sd_done(struct scsi_cmnd *);
@@ -484,11 +485,11 @@
 };
 
 static const struct dev_pm_ops sd_pm_ops = {
-	.suspend		= sd_suspend,
+	.suspend		= sd_suspend_system,
 	.resume			= sd_resume,
-	.poweroff		= sd_suspend,
+	.poweroff		= sd_suspend_system,
 	.restore		= sd_resume,
-	.runtime_suspend	= sd_suspend,
+	.runtime_suspend	= sd_suspend_runtime,
 	.runtime_resume		= sd_resume,
 };
 
@@ -829,7 +830,7 @@
 
 static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
 {
-	rq->timeout = SD_FLUSH_TIMEOUT;
+	rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER;
 	rq->retries = SD_MAX_RETRIES;
 	rq->cmd[0] = SYNCHRONIZE_CACHE;
 	rq->cmd_len = 10;
@@ -1002,7 +1003,7 @@
 		SCpnt->cmnd[0] = READ_6;
 		SCpnt->sc_data_direction = DMA_FROM_DEVICE;
 	} else {
-		scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags);
+		scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
 		goto out;
 	}
 
@@ -1433,12 +1434,13 @@
 {
 	int retries, res;
 	struct scsi_device *sdp = sdkp->device;
+	const int timeout = sdp->request_queue->rq_timeout
+		* SD_FLUSH_TIMEOUT_MULTIPLIER;
 	struct scsi_sense_hdr sshdr;
 
 	if (!scsi_device_online(sdp))
 		return -ENODEV;
 
-
 	for (retries = 3; retries > 0; --retries) {
 		unsigned char cmd[10] = { 0 };
 
@@ -1448,20 +1450,39 @@
 		 * flush everything.
 		 */
 		res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
-					     &sshdr, SD_FLUSH_TIMEOUT,
-					     SD_MAX_RETRIES, NULL, REQ_PM);
+					     &sshdr, timeout, SD_MAX_RETRIES,
+					     NULL, REQ_PM);
 		if (res == 0)
 			break;
 	}
 
 	if (res) {
 		sd_print_result(sdkp, res);
+
 		if (driver_byte(res) & DRIVER_SENSE)
 			sd_print_sense_hdr(sdkp, &sshdr);
-	}
+		/* we need to evaluate the error return  */
+		if (scsi_sense_valid(&sshdr) &&
+			/* 0x3a is medium not present */
+			sshdr.asc == 0x3a)
+				/* this is no error here */
+				return 0;
 
-	if (res)
-		return -EIO;
+		switch (host_byte(res)) {
+		/* ignore errors due to racing a disconnection */
+		case DID_BAD_TARGET:
+		case DID_NO_CONNECT:
+			return 0;
+		/* signal the upper layer it might try again */
+		case DID_BUS_BUSY:
+		case DID_IMM_RETRY:
+		case DID_REQUEUE:
+		case DID_SOFT_ERROR:
+			return -EBUSY;
+		default:
+			return -EIO;
+		}
+	}
 	return 0;
 }
 
@@ -2639,13 +2660,16 @@
 	struct scsi_device *sdev = sdkp->device;
 
 	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
+		/* too large values might cause issues with arcmsr */
+		int vpd_buf_len = 64;
+
 		sdev->no_report_opcodes = 1;
 
 		/* Disable WRITE SAME if REPORT SUPPORTED OPERATION
 		 * CODES is unsupported and the device has an ATA
 		 * Information VPD page (SAT).
 		 */
-		if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
+		if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
 			sdev->no_write_same = 1;
 	}
 
@@ -3058,9 +3082,17 @@
 		sd_print_result(sdkp, res);
 		if (driver_byte(res) & DRIVER_SENSE)
 			sd_print_sense_hdr(sdkp, &sshdr);
+		if (scsi_sense_valid(&sshdr) &&
+			/* 0x3a is medium not present */
+			sshdr.asc == 0x3a)
+			res = 0;
 	}
 
-	return res;
+	/* SCSI error codes must not go to the generic layer */
+	if (res)
+		return -EIO;
+
+	return 0;
 }
 
 /*
@@ -3078,7 +3110,7 @@
 	if (pm_runtime_suspended(dev))
 		goto exit;
 
-	if (sdkp->WCE) {
+	if (sdkp->WCE && sdkp->media_present) {
 		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
 		sd_sync_cache(sdkp);
 	}
@@ -3092,7 +3124,7 @@
 	scsi_disk_put(sdkp);
 }
 
-static int sd_suspend(struct device *dev)
+static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
 {
 	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
 	int ret = 0;
@@ -3100,16 +3132,23 @@
 	if (!sdkp)
 		return 0;	/* this can happen */
 
-	if (sdkp->WCE) {
+	if (sdkp->WCE && sdkp->media_present) {
 		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
 		ret = sd_sync_cache(sdkp);
-		if (ret)
+		if (ret) {
+			/* ignore OFFLINE device */
+			if (ret == -ENODEV)
+				ret = 0;
 			goto done;
+		}
 	}
 
 	if (sdkp->device->manage_start_stop) {
 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+		/* an error is not worth aborting a system sleep */
 		ret = sd_start_stop_device(sdkp, 0);
+		if (ignore_stop_errors)
+			ret = 0;
 	}
 
 done:
@@ -3117,6 +3156,16 @@
 	return ret;
 }
 
+static int sd_suspend_system(struct device *dev)
+{
+	return sd_suspend_common(dev, true);
+}
+
+static int sd_suspend_runtime(struct device *dev)
+{
+	return sd_suspend_common(dev, false);
+}
+
 static int sd_resume(struct device *dev)
 {
 	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 7a049de..26895ff 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -13,7 +13,11 @@
  */
 #define SD_TIMEOUT		(30 * HZ)
 #define SD_MOD_TIMEOUT		(75 * HZ)
-#define SD_FLUSH_TIMEOUT	(60 * HZ)
+/*
+ * Flush timeout is a multiplier over the standard device timeout which is
+ * user modifiable via sysfs but initially set to SD_TIMEOUT
+ */
+#define SD_FLUSH_TIMEOUT_MULTIPLIER	2
 #define SD_WRITE_SAME_TIMEOUT	(120 * HZ)
 
 /*
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 325c31c..1aa4bef 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1790,8 +1790,6 @@
 
 	scsi_remove_host(hba->host);
 
-	pci_set_drvdata(pdev, NULL);
-
 	stex_hba_stop(hba);
 
 	stex_hba_free(hba);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index b80bf70..8053695 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -174,7 +174,7 @@
  */
 struct sym_shcb {
 	/*
-	 *  Chip and controller indentification.
+	 *  Chip and controller identification.
 	 */
 	int		unit;
 	char		inst_name[16];
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 9327f5f..b006cf7 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -521,7 +521,7 @@
 	pACB->SelConn++;
 	return 1;
     }
-    if (time_before (jiffies, pACB->pScsiHost->last_reset))
+    if (time_before (jiffies, pACB->last_reset))
     {
 	DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n"));
 	return 1;
@@ -1863,7 +1863,7 @@
     /* delay half a second */
     udelay (1000);
     DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
-    pACB->pScsiHost->last_reset = jiffies + 5*HZ/2
+    pACB->last_reset = jiffies + 5*HZ/2
 		    + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
     pACB->Connected = 0;
 
@@ -2048,9 +2048,9 @@
 
 	dc390_ResetDevParam(pACB);
 	mdelay(1);
-	pACB->pScsiHost->last_reset = jiffies + 3*HZ/2 
+	pACB->last_reset = jiffies + 3*HZ/2
 		+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
-    
+
 	DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
 	DC390_read8(INT_Status);		/* Reset Pending INT */
 
@@ -2383,7 +2383,7 @@
 	if (pACB->Gmode2 & RST_SCSI_BUS) {
 		dc390_ResetSCSIBus(pACB);
 		udelay(1000);
-		shost->last_reset = jiffies + HZ/2 +
+		pACB->last_reset = jiffies + HZ/2 +
 			HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
 	}
 
@@ -2455,8 +2455,8 @@
 	shost->irq = pdev->irq;
 	shost->base = io_port;
 	shost->unique_id = io_port;
-	shost->last_reset = jiffies;
-	
+
+	pACB->last_reset = jiffies;
 	pACB->pScsiHost = shost;
 	pACB->IOPortBase = (u16) io_port;
 	pACB->IRQLevel = pdev->irq;
@@ -2553,7 +2553,6 @@
 
 	pci_disable_device(dev);
 	scsi_host_put(scsi_host);
-	pci_set_drvdata(dev, NULL);
 }
 
 static struct pci_device_id tmscsim_pci_tbl[] = {
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
index 77adc54..3d1bb4a 100644
--- a/drivers/scsi/tmscsim.h
+++ b/drivers/scsi/tmscsim.h
@@ -143,6 +143,7 @@
 
 struct pci_dev	*pdev;
 
+unsigned long   last_reset;
 unsigned long	Cmds;
 u32		SelLost;
 u32		SelConn;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index a823cf4..8b95312 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -132,7 +132,6 @@
 	pm_runtime_forbid(&pdev->dev);
 	pm_runtime_get_noresume(&pdev->dev);
 	ufshcd_remove(hba);
-	pci_set_drvdata(pdev, NULL);
 }
 
 /**
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 74b88ef..c3173dc 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -224,6 +224,9 @@
 		virtqueue_disable_cb(vq);
 		while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
 			fn(vscsi, buf);
+
+		if (unlikely(virtqueue_is_broken(vq)))
+			break;
 	} while (!virtqueue_enable_cb(vq));
 	spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
 }
@@ -710,19 +713,15 @@
 #define virtscsi_config_get(vdev, fld) \
 	({ \
 		typeof(((struct virtio_scsi_config *)0)->fld) __val; \
-		vdev->config->get(vdev, \
-				  offsetof(struct virtio_scsi_config, fld), \
-				  &__val, sizeof(__val)); \
+		virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
 		__val; \
 	})
 
 #define virtscsi_config_set(vdev, fld, val) \
-	(void)({ \
+	do { \
 		typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
-		vdev->config->set(vdev, \
-				  offsetof(struct virtio_scsi_config, fld), \
-				  &__val, sizeof(__val)); \
-	})
+		virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
+	} while(0)
 
 static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
 {
@@ -954,7 +953,7 @@
 	scsi_host_put(shost);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtscsi_freeze(struct virtio_device *vdev)
 {
 	virtscsi_remove_vqs(vdev);
@@ -988,7 +987,7 @@
 	.id_table = id_table,
 	.probe = virtscsi_probe,
 	.scan = virtscsi_scan,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 	.freeze = virtscsi_freeze,
 	.restore = virtscsi_restore,
 #endif
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 3bfaa66..b9755ec 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1405,7 +1405,6 @@
 out_free_host:
 	scsi_host_put(host);
 out_disable_device:
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 
 	return error;
@@ -1445,7 +1444,6 @@
 
 	scsi_host_put(host);
 
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 }
 
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 4c332143..3ed666f 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -217,7 +217,7 @@
 		cs |= spi->chip_select;
 	}
 
-	INIT_COMPLETION(bs->done);
+	reinit_completion(&bs->done);
 	bs->tx_buf = tfr->tx_buf;
 	bs->rx_buf = tfr->rx_buf;
 	bs->len = tfr->len;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index e2a5a42..6f03d7e 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -105,7 +105,7 @@
 
 		gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH));
 
-		INIT_COMPLETION(hw->done);
+		reinit_completion(&hw->done);
 
 		hw->count = 0;
 		hw->len = xfer->len;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index dd72445..50b2d88 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -554,7 +554,7 @@
 	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
 	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
 
-	INIT_COMPLETION(dspi->done);
+	reinit_completion(&dspi->done);
 
 	if (spicfg->io_type == SPI_IO_TYPE_INTR)
 		set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 32200d4..80d8f40 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -232,7 +232,7 @@
 	mpc8xxx_spi->tx = t->tx_buf;
 	mpc8xxx_spi->rx = t->rx_buf;
 
-	INIT_COMPLETION(mpc8xxx_spi->done);
+	reinit_completion(&mpc8xxx_spi->done);
 
 	/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
 	if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 2129fcd..119f7af 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -339,7 +339,7 @@
 	mpc8xxx_spi->tx = t->tx_buf;
 	mpc8xxx_spi->rx = t->rx_buf;
 
-	INIT_COMPLETION(mpc8xxx_spi->done);
+	reinit_completion(&mpc8xxx_spi->done);
 
 	if (mpc8xxx_spi->flags & SPI_CPM_MODE)
 		ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 58d5ee0..9602bbd 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -167,7 +167,7 @@
 			}
 
 			/* have the ISR trigger when the TX FIFO is empty */
-			INIT_COMPLETION(mps->txisrdone);
+			reinit_completion(&mps->txisrdone);
 			out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
 			out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
 			wait_for_completion(&mps->txisrdone);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index de33305..73afb56 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -202,7 +202,7 @@
 	if (!dma_xfer)
 		return -ENOMEM;
 
-	INIT_COMPLETION(spi->c);
+	reinit_completion(&spi->c);
 
 	/* Chip select was already programmed into CTRL0 */
 	ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 9e2020d..4c4b0a1 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -890,7 +890,7 @@
 	unsigned long flags;
 	int use_dma;
 
-	INIT_COMPLETION(sdd->xfer_completion);
+		reinit_completion(&sdd->xfer_completion);
 
 	/* Only BPW and Speed may change across transfers */
 	bpw = xfer->bits_per_word;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 2a95435..c74298c 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -465,7 +465,7 @@
 	ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
 
 	/* start by setting frame bit */
-	INIT_COMPLETION(p->done);
+	reinit_completion(&p->done);
 	ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
 	if (ret) {
 		dev_err(&p->pdev->dev, "failed to start hardware\n");
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 592b4af..ed5e501 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -305,8 +305,8 @@
 	sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
 	sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
 	sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
-	INIT_COMPLETION(sspi->rx_done);
-	INIT_COMPLETION(sspi->tx_done);
+	reinit_completion(&sspi->rx_done);
+	reinit_completion(&sspi->tx_done);
 
 	writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
 
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 9146bb3..aaecfb3 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -451,7 +451,7 @@
 
 static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
 {
-	INIT_COMPLETION(tspi->tx_dma_complete);
+	reinit_completion(&tspi->tx_dma_complete);
 	tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
 				tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
 				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
@@ -470,7 +470,7 @@
 
 static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
 {
-	INIT_COMPLETION(tspi->rx_dma_complete);
+	reinit_completion(&tspi->rx_dma_complete);
 	tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
 				tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
 				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
@@ -844,7 +844,7 @@
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		unsigned long cmd1;
 
-		INIT_COMPLETION(tspi->xfer_completion);
+		reinit_completion(&tspi->xfer_completion);
 
 		cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
 
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 79be8ce..4dc8e81 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -339,7 +339,7 @@
 	msg->actual_length = 0;
 	single_xfer = list_is_singular(&msg->transfers);
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-		INIT_COMPLETION(tsd->xfer_completion);
+		reinit_completion(&tsd->xfer_completion);
 		ret = tegra_sflash_start_transfer_one(spi, xfer,
 					is_first_msg, single_xfer);
 		if (ret < 0) {
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index af0a678..e66715b 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -462,7 +462,7 @@
 
 static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
 {
-	INIT_COMPLETION(tspi->tx_dma_complete);
+	reinit_completion(&tspi->tx_dma_complete);
 	tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
 				tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
 				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
@@ -481,7 +481,7 @@
 
 static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
 {
-	INIT_COMPLETION(tspi->rx_dma_complete);
+	reinit_completion(&tspi->rx_dma_complete);
 	tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
 				tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
 				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
@@ -836,7 +836,7 @@
 	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
 	int ret;
 
-	INIT_COMPLETION(tspi->xfer_completion);
+	reinit_completion(&tspi->xfer_completion);
 	ret = tegra_slink_start_transfer_one(spi, xfer);
 	if (ret < 0) {
 		dev_err(tspi->dev,
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index ec3a83f..6d4ce46 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -258,7 +258,7 @@
 	xspi->tx_ptr = t->tx_buf;
 	xspi->rx_ptr = t->rx_buf;
 	xspi->remaining_bytes = t->len;
-	INIT_COMPLETION(xspi->done);
+	reinit_completion(&xspi->done);
 
 
 	/* Enable the transmit empty interrupt, which we use to determine
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8d05acc..8d85ddc 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -245,15 +245,27 @@
 static int spi_drv_probe(struct device *dev)
 {
 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
+	struct spi_device		*spi = to_spi_device(dev);
+	int ret;
 
-	return sdrv->probe(to_spi_device(dev));
+	acpi_dev_pm_attach(&spi->dev, true);
+	ret = sdrv->probe(spi);
+	if (ret)
+		acpi_dev_pm_detach(&spi->dev, true);
+
+	return ret;
 }
 
 static int spi_drv_remove(struct device *dev)
 {
 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
+	struct spi_device		*spi = to_spi_device(dev);
+	int ret;
 
-	return sdrv->remove(to_spi_device(dev));
+	ret = sdrv->remove(spi);
+	acpi_dev_pm_detach(&spi->dev, true);
+
+	return ret;
 }
 
 static void spi_drv_shutdown(struct device *dev)
@@ -559,7 +571,7 @@
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		trace_spi_transfer_start(msg, xfer);
 
-		INIT_COMPLETION(master->xfer_completion);
+		reinit_completion(&master->xfer_completion);
 
 		ret = master->transfer_one(master, msg->spi, xfer);
 		if (ret < 0) {
@@ -1145,8 +1157,10 @@
 		return AE_OK;
 	}
 
+	adev->power.flags.ignore_parent = true;
 	strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
 	if (spi_add_device(spi)) {
+		adev->power.flags.ignore_parent = false;
 		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
 			dev_name(&adev->dev));
 		spi_dev_put(spi);
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 2dfb06a..1e9da40 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -86,9 +86,6 @@
 static int translated_ioctl(struct file *file, unsigned int cmd,
 			    unsigned long arg)
 {
-	if (!file->f_op)
-		return -ENOTTY;
-
 	if (file->f_op->unlocked_ioctl)
 		return file->f_op->unlocked_ioctl(file, cmd, arg);
 
diff --git a/drivers/staging/dwc2/platform.c b/drivers/staging/dwc2/platform.c
index 76ae6e2..83ca105 100644
--- a/drivers/staging/dwc2/platform.c
+++ b/drivers/staging/dwc2/platform.c
@@ -100,8 +100,9 @@
 	 */
 	if (!dev->dev.dma_mask)
 		dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
-	if (!dev->dev.coherent_dma_mask)
-		dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		return retval;
 
 	irq = platform_get_irq(dev, 0);
 	if (irq < 0) {
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index d9446c4..ab8b29d 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -3601,17 +3601,10 @@
 		goto err_out;
 	}
 
-	/* Let's set up the PORT LOGIC Register.  First we need to know what
-	 * the max_payload_size is
-	 */
-	if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) {
-		dev_err(&pdev->dev,
-		    "Could not read PCI config space for Max Payload Size\n");
-		goto err_out;
-	}
+	/* Let's set up the PORT LOGIC Register. */
 
 	/* Program the Ack/Nak latency and replay timers */
-	max_payload &= 0x07;
+	max_payload = pdev->pcie_mpss;
 
 	if (max_payload < 2) {
 		static const u16 acknak[2] = { 0x76, 0xD0 };
@@ -3641,8 +3634,7 @@
 	}
 
 	/* Change the max read size to 2k */
-	if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
-				PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) {
+	if (pcie_set_readrq(pdev, 2048)) {
 		dev_err(&pdev->dev,
 			"Couldn't change PCI config space for Max read size\n");
 		goto err_out;
@@ -4791,21 +4783,8 @@
 	pci_set_master(pdev);
 
 	/* Check the DMA addressing support of this device */
-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-		if (rc < 0) {
-			dev_err(&pdev->dev,
-			  "Unable to obtain 64 bit DMA for consistent allocations\n");
-			goto err_release_res;
-		}
-	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
-		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-		if (rc < 0) {
-			dev_err(&pdev->dev,
-			  "Unable to obtain 32 bit DMA for consistent allocations\n");
-			goto err_release_res;
-		}
-	} else {
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
 		dev_err(&pdev->dev, "No usable DMA addressing method\n");
 		rc = -EIO;
 		goto err_release_res;
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 9221a74..93c7299 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -42,7 +42,7 @@
 
 /**
  * struct ad7606_chip_info - chip specifc information
- * @name:		indentification string for chip
+ * @name:		identification string for chip
  * @int_vref_mv:	the internal reference voltage
  * @channels:		channel specification
  * @num_channels:	number of channels
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index aeae76b..e2dd783 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -783,7 +783,7 @@
 	if (!ret)
 		return -EBUSY;
 
-	INIT_COMPLETION(lradc->completion);
+	reinit_completion(&lradc->completion);
 
 	/*
 	 * No buffered operation in progress, map the channel and trigger it.
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 394254f..5032ff7 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -1,6 +1,7 @@
 config DRM_IMX
 	tristate "DRM Support for Freescale i.MX"
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select VIDEOMODE_HELPERS
 	select DRM_GEM_CMA_HELPER
 	select DRM_KMS_CMA_HELPER
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 4483d47..51aa977 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -407,14 +407,14 @@
 
 	/*
 	 * enable drm irq mode.
-	 * - with irq_enabled = 1, we can use the vblank feature.
+	 * - with irq_enabled = true, we can use the vblank feature.
 	 *
 	 * P.S. note that we wouldn't use drm irq handler but
 	 *      just specific driver own one instead because
 	 *      drm framework supports only one irq handler and
 	 *      drivers can well take care of their interrupts
 	 */
-	drm->irq_enabled = 1;
+	drm->irq_enabled = true;
 
 	drm_mode_config_init(drm);
 	imx_drm_mode_config_init(drm);
@@ -434,11 +434,11 @@
 		goto err_init;
 
 	/*
-	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
-	imxdrm->drm->vblank_disable_allowed = 1;
+	imxdrm->drm->vblank_disable_allowed = true;
 
 	if (!imx_drm_device_get())
 		ret = -EINVAL;
@@ -815,6 +815,12 @@
 
 static int imx_drm_platform_probe(struct platform_device *pdev)
 {
+	int ret;
+
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
 	imx_drm_device->dev = &pdev->dev;
 
 	return drm_platform_init(&imx_drm_driver, pdev);
@@ -857,8 +863,6 @@
 		goto err_pdev;
 	}
 
-	imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
-
 	ret = platform_driver_register(&imx_drm_pdrv);
 	if (ret)
 		goto err_pdrv;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 670a56a..ce6ba98 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -407,7 +407,9 @@
 	if (!pdata)
 		return -EINVAL;
 
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
 	if (!ipu_crtc)
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 359c6c1..eefdb8d 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -105,8 +105,8 @@
 #define ll_vfs_unlink(inode,entry,mnt)	  vfs_unlink(inode,entry)
 #define ll_vfs_mknod(dir,entry,mnt,mode,dev)    vfs_mknod(dir,entry,mode,dev)
 #define ll_security_inode_unlink(dir,entry,mnt) security_inode_unlink(dir,entry)
-#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
-		vfs_rename(old,old_dir,new,new_dir)
+#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1,delegated_inode) \
+		vfs_rename(old,old_dir,new,new_dir,delegated_inode)
 
 #define cfs_bio_io_error(a,b)   bio_io_error((a))
 #define cfs_bio_endio(a,b,c)    bio_endio((a),(c))
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
index 2870487..35aefa2 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
@@ -165,7 +165,7 @@
 #define LUSTRE_EHOSTUNREACH	113	/* No route to host */
 #define LUSTRE_EALREADY		114	/* Operation already in progress */
 #define LUSTRE_EINPROGRESS	115	/* Operation now in progress */
-#define LUSTRE_ESTALE		116	/* Stale NFS file handle */
+#define LUSTRE_ESTALE		116	/* Stale file handle */
 #define LUSTRE_EUCLEAN		117	/* Structure needs cleaning */
 #define LUSTRE_ENOTNAM		118	/* Not a XENIX named type file */
 #define LUSTRE_ENAVAIL		119	/* No XENIX semaphores available */
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 34815b5..90bbdae 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -83,7 +83,7 @@
 }
 
 
-/* called from iget5_locked->find_inode() under inode_lock spinlock */
+/* called from iget5_locked->find_inode() under inode_hash_lock spinlock */
 static int ll_test_inode(struct inode *inode, void *opaque)
 {
 	struct ll_inode_info *lli = ll_i2info(inode);
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
index 1ef06fe..09474e7 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
@@ -224,7 +224,7 @@
 		GOTO(put_old, err = PTR_ERR(dchild_new));
 
 	err = ll_vfs_rename(dir->d_inode, dchild_old, mnt,
-			    dir->d_inode, dchild_new, mnt);
+			    dir->d_inode, dchild_new, mnt, NULL);
 
 	dput(dchild_new);
 put_old:
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 90d6ac4..081407b 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -901,10 +901,7 @@
 	int err;
 	struct dt3155_priv *pd;
 
-	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-	if (err)
-		return -ENODEV;
-	err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 	if (err)
 		return -ENODEV;
 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
index 3335941..7f2f247 100644
--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
@@ -87,7 +87,7 @@
 	if (mutex_lock_interruptible(&p2m_dev->mutex))
 		return -EINTR;
 
-	INIT_COMPLETION(p2m_dev->completion);
+	reinit_completion(&p2m_dev->completion);
 	p2m_dev->error = 0;
 
 	if (desc_cnt > 1 && solo_dev->type != SOLO_DEV_6110 && desc_mode) {
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 44f24fa..36523ed 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -430,11 +430,6 @@
 int ATOMIC_INC_RETURN(ATOMIC_T *v);
 int ATOMIC_DEC_RETURN(ATOMIC_T *v);
 
-/* File operation APIs, just for linux now */
-int rtw_is_file_readable(char *path);
-int rtw_retrive_from_file(char *path, u8 __user *buf, u32 sz);
-int rtw_store_to_file(char *path, u8 __user *buf, u32 sz);
-
 struct rtw_netdev_priv_indicator {
 	void *priv;
 	u32 sizeof_priv;
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 5a9e9e4..a1ae727 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -356,214 +356,6 @@
 	return atomic_dec_return(v);
 }
 
-/* Open a file with the specific @param path, @param flag, @param mode
- * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
- * @param path the path of the file to open
- * @param flag file operation flags, please refer to linux document
- * @param mode please refer to linux document
- * @return Linux specific error code
- */
-static int openfile(struct file **fpp, char *path, int flag, int mode)
-{
-	struct file *fp;
-
-	fp = filp_open(path, flag, mode);
-	if (IS_ERR(fp)) {
-		*fpp = NULL;
-		return PTR_ERR(fp);
-	} else {
-		*fpp = fp;
-		return 0;
-	}
-}
-
-/* Close the file with the specific @param fp
- * @param fp the pointer of struct file to close
- * @return always 0
- */
-static int closefile(struct file *fp)
-{
-	filp_close(fp, NULL);
-	return 0;
-}
-
-static int readfile(struct file *fp, char __user *buf, int len)
-{
-	int rlen = 0, sum = 0;
-
-	if (!fp->f_op || !fp->f_op->read)
-		return -EPERM;
-
-	while (sum < len) {
-		rlen = fp->f_op->read(fp, buf+sum, len-sum, &fp->f_pos);
-		if (rlen > 0)
-			sum += rlen;
-		else if (0 != rlen)
-			return rlen;
-		else
-			break;
-	}
-	return  sum;
-}
-
-static int writefile(struct file *fp, char __user *buf, int len)
-{
-	int wlen = 0, sum = 0;
-
-	if (!fp->f_op || !fp->f_op->write)
-		return -EPERM;
-
-	while (sum < len) {
-		wlen = fp->f_op->write(fp, buf+sum, len-sum, &fp->f_pos);
-		if (wlen > 0)
-			sum += wlen;
-		else if (0 != wlen)
-			return wlen;
-		else
-			break;
-	}
-	return sum;
-}
-
-/* Test if the specifi @param path is a file and readable
- * @param path the path of the file to test
- * @return Linux specific error code
- */
-static int isfilereadable(char *path)
-{
-	struct file *fp;
-	int ret = 0;
-	mm_segment_t oldfs;
-	char __user buf;
-
-	fp = filp_open(path, O_RDONLY, 0);
-	if (IS_ERR(fp)) {
-		ret = PTR_ERR(fp);
-	} else {
-		oldfs = get_fs(); set_fs(get_ds());
-
-		if (1 != readfile(fp, &buf, 1))
-			ret = PTR_ERR(fp);
-
-		set_fs(oldfs);
-		filp_close(fp, NULL);
-	}
-	return ret;
-}
-
-/* Open the file with @param path and retrive the file content into
- * memory starting from @param buf for @param sz at most
- * @param path the path of the file to open and read
- * @param buf the starting address of the buffer to store file content
- * @param sz how many bytes to read at most
- * @return the byte we've read, or Linux specific error code
- */
-static int retrievefromfile(char *path, u8 __user *buf, u32 sz)
-{
-	int ret = -1;
-	mm_segment_t oldfs;
-	struct file *fp;
-
-	if (path && buf) {
-		ret = openfile(&fp, path, O_RDONLY, 0);
-		if (0 == ret) {
-			DBG_88E("%s openfile path:%s fp =%p\n", __func__,
-				path, fp);
-
-			oldfs = get_fs(); set_fs(get_ds());
-			ret = readfile(fp, buf, sz);
-			set_fs(oldfs);
-			closefile(fp);
-
-			DBG_88E("%s readfile, ret:%d\n", __func__, ret);
-
-		} else {
-			DBG_88E("%s openfile path:%s Fail, ret:%d\n", __func__,
-				path, ret);
-		}
-	} else {
-		DBG_88E("%s NULL pointer\n", __func__);
-		ret =  -EINVAL;
-	}
-	return ret;
-}
-
-/*
-* Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
-* @param path the path of the file to open and write
-* @param buf the starting address of the data to write into file
-* @param sz how many bytes to write at most
-* @return the byte we've written, or Linux specific error code
-*/
-static int storetofile(char *path, u8 __user *buf, u32 sz)
-{
-	int ret = 0;
-	mm_segment_t oldfs;
-	struct file *fp;
-
-	if (path && buf) {
-		ret = openfile(&fp, path, O_CREAT|O_WRONLY, 0666);
-		if (0 == ret) {
-			DBG_88E("%s openfile path:%s fp =%p\n", __func__, path, fp);
-
-			oldfs = get_fs(); set_fs(get_ds());
-			ret = writefile(fp, buf, sz);
-			set_fs(oldfs);
-			closefile(fp);
-
-			DBG_88E("%s writefile, ret:%d\n", __func__, ret);
-
-		} else {
-			DBG_88E("%s openfile path:%s Fail, ret:%d\n", __func__, path, ret);
-		}
-	} else {
-		DBG_88E("%s NULL pointer\n", __func__);
-		ret =  -EINVAL;
-	}
-	return ret;
-}
-
-/*
-* Test if the specifi @param path is a file and readable
-* @param path the path of the file to test
-* @return true or false
-*/
-int rtw_is_file_readable(char *path)
-{
-	if (isfilereadable(path) == 0)
-		return true;
-	else
-		return false;
-}
-
-/*
-* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
-* @param path the path of the file to open and read
-* @param buf the starting address of the buffer to store file content
-* @param sz how many bytes to read at most
-* @return the byte we've read
-*/
-int rtw_retrive_from_file(char *path, u8 __user *buf, u32 sz)
-{
-	int ret = retrievefromfile(path, buf, sz);
-
-	return ret >= 0 ? ret : 0;
-}
-
-/*
- * Open the file with @param path and wirte @param sz byte of data
- * starting from @param buf into the file
- * @param path the path of the file to open and write
- * @param buf the starting address of the data to write into file
- * @param sz how many bytes to write at most
- * @return the byte we've written
- */
-int rtw_store_to_file(char *path, u8 __user *buf, u32 sz)
-{
-	int ret = storetofile(path, buf, sz);
-	return ret >= 0 ? ret : 0;
-}
-
 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
 						    void *old_priv)
 {
diff --git a/drivers/staging/tidspbridge/core/sync.c b/drivers/staging/tidspbridge/core/sync.c
index 7bb550a..743ff09 100644
--- a/drivers/staging/tidspbridge/core/sync.c
+++ b/drivers/staging/tidspbridge/core/sync.c
@@ -72,7 +72,7 @@
 	spin_lock_bh(&sync_lock);
 	for (i = 0; i < count; i++) {
 		if (completion_done(&events[i]->comp)) {
-			INIT_COMPLETION(events[i]->comp);
+			reinit_completion(&events[i]->comp);
 			*index = i;
 			spin_unlock_bh(&sync_lock);
 			status = 0;
@@ -92,7 +92,7 @@
 	spin_lock_bh(&sync_lock);
 	for (i = 0; i < count; i++) {
 		if (completion_done(&events[i]->comp)) {
-			INIT_COMPLETION(events[i]->comp);
+			reinit_completion(&events[i]->comp);
 			*index = i;
 			status = 0;
 		}
diff --git a/drivers/staging/tidspbridge/include/dspbridge/sync.h b/drivers/staging/tidspbridge/include/dspbridge/sync.h
index 58a0d5c..fc19b97 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/sync.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/sync.h
@@ -59,7 +59,7 @@
 
 static inline void sync_reset_event(struct sync_object *event)
 {
-	INIT_COMPLETION(event->comp);
+	reinit_completion(&event->comp);
 	event->multi_comp = NULL;
 }
 
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 6d04eb4..1aa4a3f 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -332,7 +332,7 @@
 	struct dev_object *dev;
 	struct cfg_devnode *dev_node;
 	if (atomic_read(&bridge_cref)) {
-		INIT_COMPLETION(bridge_comp);
+		reinit_completion(&bridge_comp);
 		while (!wait_for_completion_timeout(&bridge_comp,
 						msecs_to_jiffies(REC_TIMEOUT)))
 			pr_info("%s:%d handle(s) still opened\n",
@@ -348,7 +348,7 @@
 
 void bridge_recover_schedule(void)
 {
-	INIT_COMPLETION(bridge_open_comp);
+	reinit_completion(&bridge_open_comp);
 	recover = true;
 	queue_work(bridge_rec_queue, &bridge_recovery_work);
 }
@@ -389,7 +389,7 @@
 #ifdef CONFIG_TIDSPBRIDGE_RECOVERY
 	bridge_rec_queue = create_workqueue("bridge_rec_queue");
 	INIT_WORK(&bridge_recovery_work, bridge_recover);
-	INIT_COMPLETION(bridge_comp);
+	reinit_completion(&bridge_comp);
 #endif
 
 #ifdef CONFIG_PM
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index dbfc390..f35a1f7 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -56,7 +56,7 @@
 	select THERMAL_GOV_USER_SPACE
 	help
 	  Select this if you want to let the user space manage the
-	  lpatform thermals.
+	  platform thermals.
 
 endchoice
 
@@ -69,6 +69,7 @@
 	bool "Step_wise thermal governor"
 	help
 	  Enable this to manage platform thermals using a simple linear
+	  governor.
 
 config THERMAL_GOV_USER_SPACE
 	bool "User_space thermal governor"
@@ -78,7 +79,6 @@
 config CPU_THERMAL
 	bool "generic cpu cooling support"
 	depends on CPU_FREQ
-	select CPU_FREQ_TABLE
 	help
 	  This implements the generic cpu cooling mechanism through frequency
 	  reduction. An ACPI version of this already exists
@@ -117,14 +117,14 @@
 	depends on OF
 	help
 	  Enable this to plug the SPEAr thermal sensor driver into the Linux
-	  thermal framework
+	  thermal framework.
 
 config RCAR_THERMAL
 	tristate "Renesas R-Car thermal driver"
 	depends on ARCH_SHMOBILE
 	help
 	  Enable this to plug the R-Car thermal sensor driver into the Linux
-	  thermal framework
+	  thermal framework.
 
 config KIRKWOOD_THERMAL
 	tristate "Temperature sensor on Marvell Kirkwood SoCs"
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index d179028..02a46f2 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -469,10 +469,10 @@
 
 	cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
 						   &cpufreq_cooling_ops);
-	if (!cool_dev) {
+	if (IS_ERR(cool_dev)) {
 		release_idr(&cpufreq_idr, cpufreq_dev->id);
 		kfree(cpufreq_dev);
-		return ERR_PTR(-EINVAL);
+		return cool_dev;
 	}
 	cpufreq_dev->cool_dev = cool_dev;
 	cpufreq_dev->cpufreq_state = 0;
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index b40b37c..8f181b3 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -675,6 +675,11 @@
 	{ X86_VENDOR_INTEL, 6, 0x2e},
 	{ X86_VENDOR_INTEL, 6, 0x2f},
 	{ X86_VENDOR_INTEL, 6, 0x3a},
+	{ X86_VENDOR_INTEL, 6, 0x3c},
+	{ X86_VENDOR_INTEL, 6, 0x3e},
+	{ X86_VENDOR_INTEL, 6, 0x3f},
+	{ X86_VENDOR_INTEL, 6, 0x45},
+	{ X86_VENDOR_INTEL, 6, 0x46},
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
@@ -758,21 +763,39 @@
 	/* probe cpu features and ids here */
 	retval = powerclamp_probe();
 	if (retval)
-		return retval;
+		goto exit_free;
+
 	/* set default limit, maybe adjusted during runtime based on feedback */
 	window_size = 2;
 	register_hotcpu_notifier(&powerclamp_cpu_notifier);
+
 	powerclamp_thread = alloc_percpu(struct task_struct *);
+	if (!powerclamp_thread) {
+		retval = -ENOMEM;
+		goto exit_unregister;
+	}
+
 	cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
 						&powerclamp_cooling_ops);
-	if (IS_ERR(cooling_dev))
-		return -ENODEV;
+	if (IS_ERR(cooling_dev)) {
+		retval = -ENODEV;
+		goto exit_free_thread;
+	}
 
 	if (!duration)
 		duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+
 	powerclamp_create_debug_files();
 
 	return 0;
+
+exit_free_thread:
+	free_percpu(powerclamp_thread);
+exit_unregister:
+	unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+exit_free:
+	kfree(cpu_clamping_mask);
+	return retval;
 }
 module_init(powerclamp_init);
 
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4962a6a..03a5671 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -247,10 +247,11 @@
 		if (!pos->tzp && !pos->ops->bind)
 			continue;
 
-		if (!pos->tzp && pos->ops->bind) {
+		if (pos->ops->bind) {
 			ret = pos->ops->bind(pos, cdev);
 			if (ret)
 				print_bind_err_msg(pos, cdev, ret);
+			continue;
 		}
 
 		tzp = pos->tzp;
@@ -282,8 +283,8 @@
 
 	mutex_lock(&thermal_list_lock);
 
-	/* If there is no platform data, try to use ops->bind */
-	if (!tzp && tz->ops->bind) {
+	/* If there is ops->bind, try to use ops->bind */
+	if (tz->ops->bind) {
 		list_for_each_entry(pos, &thermal_cdev_list, node) {
 			ret = tz->ops->bind(tz, pos);
 			if (ret)
@@ -1038,7 +1039,8 @@
 		     sizeof("thermal_zone") - 1)) {
 		tz = to_thermal_zone(dev);
 		kfree(tz);
-	} else {
+	} else if(!strncmp(dev_name(dev), "cooling_device",
+			sizeof("cooling_device") - 1)){
 		cdev = to_cooling_device(dev);
 		kfree(cdev);
 	}
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index c193af6..636c9ba 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -183,7 +183,7 @@
 {
 	int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
 	if (rc < 0)
-		return 0;
+		return rc;
 
 	return len;
 }
@@ -642,7 +642,22 @@
 
 void xen_raw_console_write(const char *str)
 {
-	dom0_write_console(0, str, strlen(str));
+	ssize_t len = strlen(str);
+	int rc = 0;
+
+	if (xen_domain()) {
+		rc = dom0_write_console(0, str, len);
+#ifdef CONFIG_X86
+		if (rc == -ENOSYS && xen_hvm_domain())
+			goto outb_print;
+
+	} else if (xen_cpuid_base()) {
+		int i;
+outb_print:
+		for (i = 0; i < len; i++)
+			outb(str[i], 0xe9);
+#endif
+	}
 }
 
 void xen_raw_printk(const char *fmt, ...)
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 0e888621..7332e2c 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -495,7 +495,7 @@
 	count = dport->xmit_cnt;
 	/* xmit buffer no longer empty? */
 	if (count)
-		INIT_COMPLETION(dport->xmit_empty);
+		reinit_completion(&dport->xmit_empty);
 	mutex_unlock(&dport->xmit_lock);
 
 	if (total) {
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index f519da9..96c4a19 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -158,14 +158,12 @@
 	if (pdata->sram_pool) {
 		gdev->sram_pool = pdata->sram_pool;
 		gdev->sram_vaddr =
-			gen_pool_alloc(gdev->sram_pool, sram_pool_sz);
+			(unsigned long)gen_pool_dma_alloc(gdev->sram_pool,
+					sram_pool_sz, &gdev->sram_paddr);
 		if (!gdev->sram_vaddr) {
 			dev_err(&dev->dev, "Could not allocate SRAM pool\n");
 			goto out_free;
 		}
-		gdev->sram_paddr =
-			gen_pool_virt_to_phys(gdev->sram_pool,
-					      gdev->sram_vaddr);
 	}
 
 	gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz,
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index aa49162..892cc96 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -344,7 +344,7 @@
 		/* it could happen that we reinitialize this completion, while
 		 * somebody was waiting for that completion.  The timeout and
 		 * while loop handle such cases, but this might be improved */
-		INIT_COMPLETION(c67x00->endpoint_disable);
+		reinit_completion(&c67x00->endpoint_disable);
 		c67x00_sched_kick(c67x00);
 		wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
 
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 023d3cb..bb5d976 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -115,10 +115,9 @@
 
 	pdata.phy = data->phy;
 
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto err_clk;
 
 	if (data->usbmisc_data) {
 		ret = imx_usbmisc_init(data->usbmisc_data);
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index db535b0..fed7f68 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -28,7 +28,7 @@
 	bool "Enable USB persist by default"
 	default y
 	help
-	  Say N here if you don't want USB power session persistance
+	  Say N here if you don't want USB power session persistence
 	  enabled by default.  If you say N it will make suspended USB
 	  devices that lose power get reenumerated as if they had been
 	  unplugged, causing any mounted filesystems to be lost.  The
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 3bdfbf8..ea337a7 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -29,27 +29,19 @@
 
 static int usb_open(struct inode *inode, struct file *file)
 {
-	int minor = iminor(inode);
-	const struct file_operations *c;
 	int err = -ENODEV;
-	const struct file_operations *old_fops, *new_fops = NULL;
+	const struct file_operations *new_fops;
 
 	down_read(&minor_rwsem);
-	c = usb_minors[minor];
+	new_fops = fops_get(usb_minors[iminor(inode)]);
 
-	if (!c || !(new_fops = fops_get(c)))
+	if (!new_fops)
 		goto done;
 
-	old_fops = file->f_op;
-	file->f_op = new_fops;
+	replace_fops(file, new_fops);
 	/* Curiouser and curiouser... NULL ->open() as "no device" ? */
 	if (file->f_op->open)
 		err = file->f_op->open(inode, file);
-	if (err) {
-		fops_put(file->f_op);
-		file->f_op = fops_get(old_fops);
-	}
-	fops_put(old_fops);
  done:
 	up_read(&minor_rwsem);
 	return err;
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f2e88a..8b20c70 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -119,10 +119,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we move to full device tree support this will vanish off.
 	 */
-	if (!dev->dma_mask)
-		dev->dma_mask = &dev->coherent_dma_mask;
-	if (!dev->coherent_dma_mask)
-		dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto err1;
 
 	platform_set_drvdata(pdev, exynos);
 
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 44cf775..774e8b8 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -373,7 +373,7 @@
 	if (req->buf == NULL)
 		req->buf = (void *)0xDEADBABE;
 
-	INIT_COMPLETION(ffs->ep0req_completion);
+	reinit_completion(&ffs->ep0req_completion);
 
 	ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
 	if (unlikely(ret < 0))
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 67128be..6a2a65a 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -3078,7 +3078,9 @@
 		 udc->isp1301_i2c_client->addr);
 
 	pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		goto resource_fail;
 
 	udc->board = &lpc32xx_usbddata;
 
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index df13d42..205f4a3 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -227,8 +227,7 @@
 
 	/* TODO: Probably need checks here; is the core connected? */
 
-	if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
-	    dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+	if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
 		return -EOPNOTSUPP;
 
 	usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index f417526..284f841 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -96,10 +96,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		goto fail_create_hcd;
 
 	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
 	if (!hcd) {
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 016352e..e97c198 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -84,10 +84,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we move to full device tree support this will vanish off.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
 
 	exynos_setup_vbus_gpio(pdev);
 
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ab0397e..4c528b2 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -116,8 +116,10 @@
 	 * We can DMA from anywhere. But the descriptors must be in
 	 * the lower 4GB.
 	 */
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 	pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
 	if (!hcd)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 78b01fa..6fa82d6 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -104,7 +104,7 @@
 	struct resource	*res;
 	struct usb_hcd	*hcd;
 	void __iomem *regs;
-	int ret = -ENODEV;
+	int ret;
 	int irq;
 	int i;
 	struct omap_hcd	*omap;
@@ -144,11 +144,11 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!dev->dma_mask)
-		dev->dma_mask = &dev->coherent_dma_mask;
-	if (!dev->coherent_dma_mask)
-		dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
+	ret = -ENODEV;
 	hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
 			dev_name(dev));
 	if (!hcd) {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index d1dfb9d..2ba7673 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -180,10 +180,9 @@
 	 * set. Since shared usb code relies on it, set it here for
 	 * now. Once we have dma capability bindings this can go away.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		goto err1;
 
 	if (!request_mem_region(res->start, resource_size(res),
 				ehci_orion_hc_driver.description)) {
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index f6b790c..7f30b71 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -78,7 +78,7 @@
 	struct resource *res_mem;
 	struct usb_ehci_pdata *pdata;
 	int irq;
-	int err = -ENOMEM;
+	int err;
 
 	if (usb_disabled())
 		return -ENODEV;
@@ -89,10 +89,10 @@
 	 */
 	if (!dev_get_platdata(&dev->dev))
 		dev->dev.platform_data = &ehci_platform_defaults;
-	if (!dev->dev.dma_mask)
-		dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
-	if (!dev->dev.coherent_dma_mask)
-		dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+	err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
 
 	pdata = dev_get_platdata(&dev->dev);
 
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1cf0adb..ee6f9ff 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -81,10 +81,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		goto fail;
 
 	usbh_clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index e6d8e26..b9fd039 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -362,10 +362,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
 
 	hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
 					dev_name(&pdev->dev));
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 476b5a5..418444e 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -469,7 +469,7 @@
 static int ohci_at91_of_init(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
-	int i, gpio;
+	int i, gpio, ret;
 	enum of_gpio_flags flags;
 	struct at91_usbh_data	*pdata;
 	u32 ports;
@@ -481,10 +481,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index a87baed..91ec9b2 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -71,10 +71,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we move to full device tree support this will vanish off.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
 
 	hcd = usb_create_hcd(&exynos_ohci_hc_driver,
 				&pdev->dev, dev_name(&pdev->dev));
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 9ab7e24..e99db8a 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -181,8 +181,9 @@
 		return -EPROBE_DEFER;
 	}
 
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-	pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto fail_disable;
 
 	dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
 	if (usb_disabled()) {
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index 342dc7e..6c16dce 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -127,8 +127,9 @@
 	}
 
 	/* Ohci is a 32-bit device. */
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-	pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
 	if (!hcd)
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 408d06a..2145741 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -65,7 +65,7 @@
 	struct usb_hcd		*hcd = NULL;
 	void __iomem		*regs = NULL;
 	struct resource		*res;
-	int			ret = -ENODEV;
+	int			ret;
 	int			irq;
 
 	if (usb_disabled())
@@ -99,11 +99,11 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!dev->dma_mask)
-		dev->dma_mask = &dev->coherent_dma_mask;
-	if (!dev->coherent_dma_mask)
-		dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto err_io;
 
+	ret = -ENODEV;
 	hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
 			dev_name(dev));
 	if (!hcd) {
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index deea5d1..e89ac4d 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -298,6 +298,7 @@
 	struct device_node *np = pdev->dev.of_node;
 	struct pxaohci_platform_data *pdata;
 	u32 tmp;
+	int ret;
 
 	if (!np)
 		return 0;
@@ -306,10 +307,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 17b2a7d..aa9e127 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -185,6 +185,12 @@
 	if (usb_disabled())
 		return -ENODEV;
 
+	/*
+	 * We don't call dma_set_mask_and_coherent() here because the
+	 * DMA mask has already been appropraitely setup by the core
+	 * SA-1111 bus code (which includes bug workarounds.)
+	 */
+
 	hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
 	if (!hcd)
 		return -ENOMEM;
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 31ff3fc..6b02107 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -56,10 +56,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		goto fail;
 
 	usbh_clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
index 74af2c6..0196f76 100644
--- a/drivers/usb/host/ssb-hcd.c
+++ b/drivers/usb/host/ssb-hcd.c
@@ -163,8 +163,7 @@
 
 	/* TODO: Probably need checks here; is the core connected? */
 
-	if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
-	    dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+	if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
 		return -EOPNOTSUPP;
 
 	usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index ded842b..3003fef 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -75,10 +75,9 @@
 	 * Since shared usb code relies on it, set it here for now.
 	 * Once we have dma capability bindings this can go away.
 	 */
-	if (!pdev->dev.dma_mask)
-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-	if (!pdev->dev.coherent_dma_mask)
-		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
 			pdev->name);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 84657e0..439c951 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -455,7 +455,7 @@
 		return -1;
 	}
 	mos_parport->msg_pending = true;   /* synch usb call pending */
-	INIT_COMPLETION(mos_parport->syncmsg_compl);
+	reinit_completion(&mos_parport->syncmsg_compl);
 	spin_unlock(&release_lock);
 
 	mutex_lock(&mos_parport->serial->disc_mutex);
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index fa44fbe..552258c 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -478,11 +478,10 @@
 		return -EINVAL;
 	}
 
-	printk(KERN_INFO
-		"fb%d: %s frame buffer device\n", fb_info.node,	fb_info.fix.id);
-	printk(KERN_INFO
-		"fb%d: %dx%dx%d at 0x%08lx\n", fb_info.node,
-		mc68x328fb_default.xres_virtual, mc68x328fb_default.yres_virtual,
+	fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
+	fb_info(&fb_info, "%dx%dx%d at 0x%08lx\n",
+		mc68x328fb_default.xres_virtual,
+		mc68x328fb_default.yres_virtual,
 		1 << mc68x328fb_default.bits_per_pixel, videomemory);
 
 	return 0;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 84b685f..4f2e1b3 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -19,10 +19,10 @@
 
 source "drivers/gpu/vga/Kconfig"
 
-source "drivers/gpu/drm/Kconfig"
-
 source "drivers/gpu/host1x/Kconfig"
 
+source "drivers/gpu/drm/Kconfig"
+
 config VGASTATE
        tristate
        default n
@@ -996,6 +996,8 @@
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
+	select FB_MODE_HELPERS
+	select VIDEOMODE_HELPERS
 	help
 	  This enables support for the AT91/AT32 LCD Controller.
 
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 7e8346e..a305cae 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -949,9 +949,7 @@
 		 * the page.
 		 */
 		page = virt_to_page(virtual_start);
-		ClearPageReserved(page);
-		init_page_count(page);
-		free_page(virtual_start);
+		__free_reserved_page(page);
 
 		virtual_start += PAGE_SIZE;
 		mb_freed += PAGE_SIZE / 1024;
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 0a2cce7..14d6b37 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -10,6 +10,7 @@
  *
  *  ARM PrimeCell PL110 Color LCD Controller
  */
+#include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -544,13 +545,17 @@
 
 static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
 {
-	struct clcd_board *board = dev->dev.platform_data;
+	struct clcd_board *board = dev_get_platdata(&dev->dev);
 	struct clcd_fb *fb;
 	int ret;
 
 	if (!board)
 		return -EINVAL;
 
+	ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto out;
+
 	ret = amba_request_regions(dev, NULL);
 	if (ret) {
 		printk(KERN_ERR "CLCD: unable to reserve regs region\n");
@@ -594,8 +599,6 @@
 {
 	struct clcd_fb *fb = amba_get_drvdata(dev);
 
-	amba_set_drvdata(dev, NULL);
-
 	clcdfb_disable(fb);
 	unregister_framebuffer(&fb->fb);
 	if (fb->fb.cmap.len)
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index a6780ee..0dac36c 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -3742,13 +3742,12 @@
 	if (err)
 		goto unset_drvdata;
 
-	printk("fb%d: %s frame buffer device, using %dK of video memory\n",
-	       info->node, info->fix.id, info->fix.smem_len>>10);
+	fb_info(info, "%s frame buffer device, using %dK of video memory\n",
+		info->fix.id, info->fix.smem_len>>10);
 
 	return 0;
 
 unset_drvdata:
-	dev_set_drvdata(&pdev->dev, NULL);
 	fb_dealloc_cmap(&info->cmap);
 free_irq:
 	free_irq(IRQ_AMIGA_COPPER, info->par);
@@ -3768,7 +3767,6 @@
 	struct fb_info *info = dev_get_drvdata(&pdev->dev);
 
 	unregister_framebuffer(info);
-	dev_set_drvdata(&pdev->dev, NULL);
 	fb_dealloc_cmap(&info->cmap);
 	free_irq(IRQ_AMIGA_COPPER, info->par);
 	custom.dmacon = DMAF_ALL | DMAF_MASTER;
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index e43401a..1b0b233 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -556,9 +556,8 @@
 			goto err1;
 		}
 	}
-	printk(KERN_INFO
-	       "fb%d: Arc frame buffer device, using %dK of video memory\n",
-	       info->node, videomemorysize >> 10);
+	fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
+		videomemorysize >> 10);
 
 	/* this inits the lcd but doesn't clear dirty pixels */
 	for (i = 0; i < num_cols * num_rows; i++) {
@@ -572,8 +571,7 @@
 	/* if we were told to splash the screen, we just clear it */
 	if (!nosplash) {
 		for (i = 0; i < num_cols * num_rows; i++) {
-			printk(KERN_INFO "fb%d: splashing lcd %d\n",
-				info->node, i);
+			fb_info(info, "splashing lcd %d\n", i);
 			ks108_set_start_line(par, i, 0);
 			ks108_clear_lcd(par, i);
 		}
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 94a51f1..a6b29bd 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -137,8 +137,7 @@
 
 	if ((map->width != 8) || (map->height != 16) ||
 	    (map->depth != 1) || (map->length != 256)) {
-	    	printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
-		       "height %d, depth %d, length %d\n", info->node,
+		fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
 		       map->width, map->height, map->depth, map->length);
 		return;
 	}
@@ -517,7 +516,7 @@
 
 	int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
 	if (rv < 0) {
-		printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+		fb_err(info, "cannot set requested pixclock, keeping old value\n");
 		return;
 	}
 
@@ -584,7 +583,7 @@
 	rv = svga_match_format (arkfb_formats, var, NULL);
 	if (rv < 0)
 	{
-		printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+		fb_err(info, "unsupported mode requested\n");
 		return rv;
 	}
 
@@ -604,14 +603,15 @@
 	mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
 	if (mem > info->screen_size)
 	{
-		printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+		fb_err(info, "not enough framebuffer memory (%d kB requested, %d kB available)\n",
+		       mem >> 10, (unsigned int) (info->screen_size >> 10));
 		return -EINVAL;
 	}
 
 	rv = svga_check_timings (&ark_timing_regs, var, info->node);
 	if (rv < 0)
 	{
-		printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+		fb_err(info, "invalid timings requested\n");
 		return rv;
 	}
 
@@ -693,7 +693,7 @@
 	vga_wseq(par->state.vgabase, 0x18, regval);
 
 	/* Set the offset register */
-	pr_debug("fb%d: offset register       : %d\n", info->node, offset_value);
+	fb_dbg(info, "offset register       : %d\n", offset_value);
 	svga_wcrt_multi(par->state.vgabase, ark_offset_regs, offset_value);
 
 	/* fix for hi-res textmode */
@@ -716,7 +716,7 @@
 	/* Set mode-specific register values */
 	switch (mode) {
 	case 0:
-		pr_debug("fb%d: text mode\n", info->node);
+		fb_dbg(info, "text mode\n");
 		svga_set_textmode_vga_regs(par->state.vgabase);
 
 		vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
@@ -725,7 +725,7 @@
 
 		break;
 	case 1:
-		pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+		fb_dbg(info, "4 bit pseudocolor\n");
 		vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
 
 		vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
@@ -733,44 +733,44 @@
 		dac_set_mode(par->dac, DAC_PSEUDO8_8);
 		break;
 	case 2:
-		pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+		fb_dbg(info, "4 bit pseudocolor, planar\n");
 
 		vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
 		svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
 		dac_set_mode(par->dac, DAC_PSEUDO8_8);
 		break;
 	case 3:
-		pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+		fb_dbg(info, "8 bit pseudocolor\n");
 
 		vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode */
 
 		if (info->var.pixclock > 20000) {
-			pr_debug("fb%d: not using multiplex\n", info->node);
+			fb_dbg(info, "not using multiplex\n");
 			svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
 			dac_set_mode(par->dac, DAC_PSEUDO8_8);
 		} else {
-			pr_debug("fb%d: using multiplex\n", info->node);
+			fb_dbg(info, "using multiplex\n");
 			svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
 			dac_set_mode(par->dac, DAC_PSEUDO8_16);
 			hdiv = 2;
 		}
 		break;
 	case 4:
-		pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
+		fb_dbg(info, "5/5/5 truecolor\n");
 
 		vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
 		svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
 		dac_set_mode(par->dac, DAC_RGB1555_16);
 		break;
 	case 5:
-		pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+		fb_dbg(info, "5/6/5 truecolor\n");
 
 		vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
 		svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
 		dac_set_mode(par->dac, DAC_RGB0565_16);
 		break;
 	case 6:
-		pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+		fb_dbg(info, "8/8/8 truecolor\n");
 
 		vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode ??? */
 		svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
@@ -779,7 +779,7 @@
 		hdiv = 2;
 		break;
 	case 7:
-		pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
+		fb_dbg(info, "8/8/8/8 truecolor\n");
 
 		vga_wseq(par->state.vgabase, 0x11, 0x1E); /* 32bpp accel mode */
 		svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
@@ -787,7 +787,7 @@
 		hmul = 2;
 		break;
 	default:
-		printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
+		fb_err(info, "unsupported mode - bug\n");
 		return -EINVAL;
 	}
 
@@ -879,19 +879,19 @@
 
 	switch (blank_mode) {
 	case FB_BLANK_UNBLANK:
-		pr_debug("fb%d: unblank\n", info->node);
+		fb_dbg(info, "unblank\n");
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
 		svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
 		break;
 	case FB_BLANK_NORMAL:
-		pr_debug("fb%d: blank\n", info->node);
+		fb_dbg(info, "blank\n");
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
 		break;
 	case FB_BLANK_POWERDOWN:
 	case FB_BLANK_HSYNC_SUSPEND:
 	case FB_BLANK_VSYNC_SUSPEND:
-		pr_debug("fb%d: sync down\n", info->node);
+		fb_dbg(info, "sync down\n");
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
 		break;
@@ -1048,12 +1048,12 @@
 
 	rc = register_framebuffer(info);
 	if (rc < 0) {
-		dev_err(info->device, "cannot register framebugger\n");
+		dev_err(info->device, "cannot register framebuffer\n");
 		goto err_reg_fb;
 	}
 
-	printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
-		 pci_name(dev), info->fix.smem_len >> 20);
+	fb_info(info, "%s on %s, %d MB RAM\n",
+		info->fix.id, pci_name(dev), info->fix.smem_len >> 20);
 
 	/* Record a reference to the driver data */
 	pci_set_drvdata(dev, info);
@@ -1108,7 +1108,6 @@
 		pci_release_regions(dev);
 /*		pci_disable_device(dev); */
 
-		pci_set_drvdata(dev, NULL);
 		framebuffer_release(info);
 	}
 }
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
index d5a37d6..d611f1a 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/asiliantfb.c
@@ -527,8 +527,8 @@
 		return err;
 	}
 
-	printk(KERN_INFO "fb%d: Asiliant 69000 frame buffer (%dK RAM detected)\n",
-		p->node, p->fix.smem_len / 1024);
+	fb_info(p, "Asiliant 69000 frame buffer (%dK RAM detected)\n",
+		p->fix.smem_len / 1024);
 
 	writeb(0xff, mmio_base + 0x78c);
 	chips_hw_init(p);
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 64e41f5..e21d1f5 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -3246,11 +3246,8 @@
 		return -EINVAL;
 	}
 
-	// FIXME: mode needs setting!
-	//printk("fb%d: %s frame buffer device, using %dK of video memory\n",
-	//       fb_info.node, fb_info.mode->name, screen_len>>10);
-	printk("fb%d: frame buffer device, using %dK of video memory\n",
-	       fb_info.node, screen_len >> 10);
+	fb_info(&fb_info, "frame buffer device, using %dK of video memory\n",
+		screen_len >> 10);
 
 	/* TODO: This driver cannot be unloaded yet */
 	return 0;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 088511a..8521051 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -20,12 +20,55 @@
 #include <linux/gfp.h>
 #include <linux/module.h>
 #include <linux/platform_data/atmel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
 
 #include <mach/cpu.h>
 #include <asm/gpio.h>
 
 #include <video/atmel_lcdc.h>
 
+struct atmel_lcdfb_config {
+	bool have_alt_pixclock;
+	bool have_hozval;
+	bool have_intensity_bit;
+};
+
+ /* LCD Controller info data structure, stored in device platform_data */
+struct atmel_lcdfb_info {
+	spinlock_t		lock;
+	struct fb_info		*info;
+	void __iomem		*mmio;
+	int			irq_base;
+	struct work_struct	task;
+
+	unsigned int		smem_len;
+	struct platform_device	*pdev;
+	struct clk		*bus_clk;
+	struct clk		*lcdc_clk;
+
+	struct backlight_device	*backlight;
+	u8			bl_power;
+	u8			saved_lcdcon;
+
+	u32			pseudo_palette[16];
+	bool			have_intensity_bit;
+
+	struct atmel_lcdfb_pdata pdata;
+
+	struct atmel_lcdfb_config *config;
+};
+
+struct atmel_lcdfb_power_ctrl_gpio {
+	int gpio;
+	int active_low;
+
+	struct list_head list;
+};
+
 #define lcdc_readl(sinfo, reg)		__raw_readl((sinfo)->mmio+(reg))
 #define lcdc_writel(sinfo, reg, val)	__raw_writel((val), (sinfo)->mmio+(reg))
 
@@ -34,12 +77,6 @@
 #define ATMEL_LCDC_DMA_BURST_LEN	8	/* words */
 #define ATMEL_LCDC_FIFO_SIZE		512	/* words */
 
-struct atmel_lcdfb_config {
-	bool have_alt_pixclock;
-	bool have_hozval;
-	bool have_intensity_bit;
-};
-
 static struct atmel_lcdfb_config at91sam9261_config = {
 	.have_hozval		= true,
 	.have_intensity_bit	= true,
@@ -248,18 +285,27 @@
 
 static void init_contrast(struct atmel_lcdfb_info *sinfo)
 {
+	struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
 	/* contrast pwm can be 'inverted' */
-	if (sinfo->lcdcon_pol_negative)
+	if (pdata->lcdcon_pol_negative)
 			contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE);
 
 	/* have some default contrast/backlight settings */
 	lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
 	lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
 
-	if (sinfo->lcdcon_is_backlight)
+	if (pdata->lcdcon_is_backlight)
 		init_backlight(sinfo);
 }
 
+static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int on)
+{
+	struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
+	if (pdata->atmel_lcdfb_power_control)
+		pdata->atmel_lcdfb_power_control(pdata, on);
+}
 
 static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
 	.type		= FB_TYPE_PACKED_PIXELS,
@@ -299,9 +345,11 @@
 
 static void atmel_lcdfb_stop_nowait(struct atmel_lcdfb_info *sinfo)
 {
+	struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
 	/* Turn off the LCD controller and the DMA controller */
 	lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
-			sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
+			pdata->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
 
 	/* Wait for the LCDC core to become idle */
 	while (lcdc_readl(sinfo, ATMEL_LCDC_PWRCON) & ATMEL_LCDC_BUSY)
@@ -321,9 +369,11 @@
 
 static void atmel_lcdfb_start(struct atmel_lcdfb_info *sinfo)
 {
-	lcdc_writel(sinfo, ATMEL_LCDC_DMACON, sinfo->default_dmacon);
+	struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
+	lcdc_writel(sinfo, ATMEL_LCDC_DMACON, pdata->default_dmacon);
 	lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
-		(sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET)
+		(pdata->guard_time << ATMEL_LCDC_GUARDT_OFFSET)
 		| ATMEL_LCDC_PWR);
 }
 
@@ -424,6 +474,7 @@
 {
 	struct device *dev = info->device;
 	struct atmel_lcdfb_info *sinfo = info->par;
+	struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
 	unsigned long clk_value_khz;
 
 	clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
@@ -510,7 +561,7 @@
 		else
 			var->green.length = 6;
 
-		if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+		if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
 			/* RGB:5X5 mode */
 			var->red.offset = var->green.length + 5;
 			var->blue.offset = 0;
@@ -527,7 +578,7 @@
 		var->transp.length = 8;
 		/* fall through */
 	case 24:
-		if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+		if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
 			/* RGB:888 mode */
 			var->red.offset = 16;
 			var->blue.offset = 0;
@@ -576,6 +627,7 @@
 static int atmel_lcdfb_set_par(struct fb_info *info)
 {
 	struct atmel_lcdfb_info *sinfo = info->par;
+	struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
 	unsigned long hozval_linesz;
 	unsigned long value;
 	unsigned long clk_value_khz;
@@ -637,7 +689,7 @@
 
 
 	/* Initialize control register 2 */
-	value = sinfo->default_lcdcon2;
+	value = pdata->default_lcdcon2;
 
 	if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
 		value |= ATMEL_LCDC_INVLINE_INVERTED;
@@ -741,6 +793,7 @@
 			     unsigned int transp, struct fb_info *info)
 {
 	struct atmel_lcdfb_info *sinfo = info->par;
+	struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
 	unsigned int val;
 	u32 *pal;
 	int ret = 1;
@@ -777,8 +830,7 @@
 				 */
 			} else {
 				/* new style BGR:565 / RGB:565 */
-				if (sinfo->lcd_wiring_mode ==
-				    ATMEL_LCDC_WIRING_RGB) {
+				if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
 					val  = ((blue >> 11) & 0x001f);
 					val |= ((red  >>  0) & 0xf800);
 				} else {
@@ -912,16 +964,187 @@
 	clk_disable_unprepare(sinfo->lcdc_clk);
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_lcdfb_dt_ids[] = {
+	{ .compatible = "atmel,at91sam9261-lcdc" , .data = &at91sam9261_config, },
+	{ .compatible = "atmel,at91sam9263-lcdc" , .data = &at91sam9263_config, },
+	{ .compatible = "atmel,at91sam9g10-lcdc" , .data = &at91sam9g10_config, },
+	{ .compatible = "atmel,at91sam9g45-lcdc" , .data = &at91sam9g45_config, },
+	{ .compatible = "atmel,at91sam9g45es-lcdc" , .data = &at91sam9g45es_config, },
+	{ .compatible = "atmel,at91sam9rl-lcdc" , .data = &at91sam9rl_config, },
+	{ .compatible = "atmel,at32ap-lcdc" , .data = &at32ap_config, },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_lcdfb_dt_ids);
+
+static const char *atmel_lcdfb_wiring_modes[] = {
+	[ATMEL_LCDC_WIRING_BGR]	= "BRG",
+	[ATMEL_LCDC_WIRING_RGB]	= "RGB",
+};
+
+const int atmel_lcdfb_get_of_wiring_modes(struct device_node *np)
+{
+	const char *mode;
+	int err, i;
+
+	err = of_property_read_string(np, "atmel,lcd-wiring-mode", &mode);
+	if (err < 0)
+		return ATMEL_LCDC_WIRING_BGR;
+
+	for (i = 0; i < ARRAY_SIZE(atmel_lcdfb_wiring_modes); i++)
+		if (!strcasecmp(mode, atmel_lcdfb_wiring_modes[i]))
+			return i;
+
+	return -ENODEV;
+}
+
+static void atmel_lcdfb_power_control_gpio(struct atmel_lcdfb_pdata *pdata, int on)
+{
+	struct atmel_lcdfb_power_ctrl_gpio *og;
+
+	list_for_each_entry(og, &pdata->pwr_gpios, list)
+		gpio_set_value(og->gpio, on);
+}
+
+static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+{
+	struct fb_info *info = sinfo->info;
+	struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+	struct fb_var_screeninfo *var = &info->var;
+	struct device *dev = &sinfo->pdev->dev;
+	struct device_node *np =dev->of_node;
+	struct device_node *display_np;
+	struct device_node *timings_np;
+	struct display_timings *timings;
+	enum of_gpio_flags flags;
+	struct atmel_lcdfb_power_ctrl_gpio *og;
+	bool is_gpio_power = false;
+	int ret = -ENOENT;
+	int i, gpio;
+
+	sinfo->config = (struct atmel_lcdfb_config*)
+		of_match_device(atmel_lcdfb_dt_ids, dev)->data;
+
+	display_np = of_parse_phandle(np, "display", 0);
+	if (!display_np) {
+		dev_err(dev, "failed to find display phandle\n");
+		return -ENOENT;
+	}
+
+	ret = of_property_read_u32(display_np, "bits-per-pixel", &var->bits_per_pixel);
+	if (ret < 0) {
+		dev_err(dev, "failed to get property bits-per-pixel\n");
+		goto put_display_node;
+	}
+
+	ret = of_property_read_u32(display_np, "atmel,guard-time", &pdata->guard_time);
+	if (ret < 0) {
+		dev_err(dev, "failed to get property atmel,guard-time\n");
+		goto put_display_node;
+	}
+
+	ret = of_property_read_u32(display_np, "atmel,lcdcon2", &pdata->default_lcdcon2);
+	if (ret < 0) {
+		dev_err(dev, "failed to get property atmel,lcdcon2\n");
+		goto put_display_node;
+	}
+
+	ret = of_property_read_u32(display_np, "atmel,dmacon", &pdata->default_dmacon);
+	if (ret < 0) {
+		dev_err(dev, "failed to get property bits-per-pixel\n");
+		goto put_display_node;
+	}
+
+	ret = -ENOMEM;
+	for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) {
+		gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio",
+					       i, &flags);
+		if (gpio < 0)
+			continue;
+
+		og = devm_kzalloc(dev, sizeof(*og), GFP_KERNEL);
+		if (!og)
+			goto put_display_node;
+
+		og->gpio = gpio;
+		og->active_low = flags & OF_GPIO_ACTIVE_LOW;
+		is_gpio_power = true;
+		ret = devm_gpio_request(dev, gpio, "lcd-power-control-gpio");
+		if (ret) {
+			dev_err(dev, "request gpio %d failed\n", gpio);
+			goto put_display_node;
+		}
+
+		ret = gpio_direction_output(gpio, og->active_low);
+		if (ret) {
+			dev_err(dev, "set direction output gpio %d failed\n", gpio);
+			goto put_display_node;
+		}
+	}
+
+	if (is_gpio_power)
+		pdata->atmel_lcdfb_power_control = atmel_lcdfb_power_control_gpio;
+
+	ret = atmel_lcdfb_get_of_wiring_modes(display_np);
+	if (ret < 0) {
+		dev_err(dev, "invalid atmel,lcd-wiring-mode\n");
+		goto put_display_node;
+	}
+	pdata->lcd_wiring_mode = ret;
+
+	pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight");
+
+	timings = of_get_display_timings(display_np);
+	if (!timings) {
+		dev_err(dev, "failed to get display timings\n");
+		goto put_display_node;
+	}
+
+	timings_np = of_find_node_by_name(display_np, "display-timings");
+	if (!timings_np) {
+		dev_err(dev, "failed to find display-timings node\n");
+		goto put_display_node;
+	}
+
+	for (i = 0; i < of_get_child_count(timings_np); i++) {
+		struct videomode vm;
+		struct fb_videomode fb_vm;
+
+		ret = videomode_from_timings(timings, &vm, i);
+		if (ret < 0)
+			goto put_timings_node;
+		ret = fb_videomode_from_videomode(&vm, &fb_vm);
+		if (ret < 0)
+			goto put_timings_node;
+
+		fb_add_videomode(&fb_vm, &info->modelist);
+	}
+
+	return 0;
+
+put_timings_node:
+	of_node_put(timings_np);
+put_display_node:
+	of_node_put(display_np);
+	return ret;
+}
+#else
+static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+{
+	return 0;
+}
+#endif
 
 static int __init atmel_lcdfb_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct fb_info *info;
 	struct atmel_lcdfb_info *sinfo;
-	struct atmel_lcdfb_info *pdata_sinfo;
-	struct fb_videomode fbmode;
+	struct atmel_lcdfb_pdata *pdata = NULL;
 	struct resource *regs = NULL;
 	struct resource *map = NULL;
+	struct fb_modelist *modelist;
 	int ret;
 
 	dev_dbg(dev, "%s BEGIN\n", __func__);
@@ -934,26 +1157,35 @@
 	}
 
 	sinfo = info->par;
+	sinfo->pdev = pdev;
+	sinfo->info = info;
 
-	if (dev->platform_data) {
-		pdata_sinfo = (struct atmel_lcdfb_info *)dev->platform_data;
-		sinfo->default_bpp = pdata_sinfo->default_bpp;
-		sinfo->default_dmacon = pdata_sinfo->default_dmacon;
-		sinfo->default_lcdcon2 = pdata_sinfo->default_lcdcon2;
-		sinfo->default_monspecs = pdata_sinfo->default_monspecs;
-		sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
-		sinfo->guard_time = pdata_sinfo->guard_time;
-		sinfo->smem_len = pdata_sinfo->smem_len;
-		sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
-		sinfo->lcdcon_pol_negative = pdata_sinfo->lcdcon_pol_negative;
-		sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
+	INIT_LIST_HEAD(&info->modelist);
+
+	if (pdev->dev.of_node) {
+		ret = atmel_lcdfb_of_init(sinfo);
+		if (ret)
+			goto free_info;
+	} else if (dev_get_platdata(dev)) {
+		struct fb_monspecs *monspecs;
+		int i;
+
+		pdata = dev_get_platdata(dev);
+		monspecs = pdata->default_monspecs;
+		sinfo->pdata = *pdata;
+
+		for (i = 0; i < monspecs->modedb_len; i++)
+			fb_add_videomode(&monspecs->modedb[i], &info->modelist);
+
+		sinfo->config = atmel_lcdfb_get_config(pdev);
+
+		info->var.bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
+		memcpy(&info->monspecs, pdata->default_monspecs, sizeof(info->monspecs));
 	} else {
 		dev_err(dev, "cannot get default configuration\n");
 		goto free_info;
 	}
-	sinfo->info = info;
-	sinfo->pdev = pdev;
-	sinfo->config = atmel_lcdfb_get_config(pdev);
+
 	if (!sinfo->config)
 		goto free_info;
 
@@ -962,7 +1194,6 @@
 	info->pseudo_palette = sinfo->pseudo_palette;
 	info->fbops = &atmel_lcdfb_ops;
 
-	memcpy(&info->monspecs, sinfo->default_monspecs, sizeof(info->monspecs));
 	info->fix = atmel_lcdfb_fix;
 
 	/* Enable LCDC Clocks */
@@ -978,14 +1209,11 @@
 	}
 	atmel_lcdfb_start_clock(sinfo);
 
-	ret = fb_find_mode(&info->var, info, NULL, info->monspecs.modedb,
-			info->monspecs.modedb_len, info->monspecs.modedb,
-			sinfo->default_bpp);
-	if (!ret) {
-		dev_err(dev, "no suitable video mode found\n");
-		goto stop_clk;
-	}
+	modelist = list_first_entry(&info->modelist,
+			struct fb_modelist, list);
+	fb_videomode_to_var(&info->var, &modelist->mode);
 
+	atmel_lcdfb_check_var(&info->var, info);
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!regs) {
@@ -1069,18 +1297,6 @@
 		goto unregister_irqs;
 	}
 
-	/*
-	 * This makes sure that our colour bitfield
-	 * descriptors are correctly initialised.
-	 */
-	atmel_lcdfb_check_var(&info->var, info);
-
-	ret = fb_set_var(info, &info->var);
-	if (ret) {
-		dev_warn(dev, "unable to set display parameters\n");
-		goto free_cmap;
-	}
-
 	dev_set_drvdata(dev, info);
 
 	/*
@@ -1092,13 +1308,8 @@
 		goto reset_drvdata;
 	}
 
-	/* add selected videomode to modelist */
-	fb_var_to_videomode(&fbmode, &info->var);
-	fb_add_videomode(&fbmode, &info->modelist);
-
 	/* Power up the LCDC screen */
-	if (sinfo->atmel_lcdfb_power_control)
-		sinfo->atmel_lcdfb_power_control(1);
+	atmel_lcdfb_power_control(sinfo, 1);
 
 	dev_info(dev, "fb%d: Atmel LCDC at 0x%08lx (mapped at %p), irq %d\n",
 		       info->node, info->fix.mmio_start, sinfo->mmio, sinfo->irq_base);
@@ -1107,7 +1318,6 @@
 
 reset_drvdata:
 	dev_set_drvdata(dev, NULL);
-free_cmap:
 	fb_dealloc_cmap(&info->cmap);
 unregister_irqs:
 	cancel_work_sync(&sinfo->task);
@@ -1143,15 +1353,16 @@
 	struct device *dev = &pdev->dev;
 	struct fb_info *info = dev_get_drvdata(dev);
 	struct atmel_lcdfb_info *sinfo;
+	struct atmel_lcdfb_pdata *pdata;
 
 	if (!info || !info->par)
 		return 0;
 	sinfo = info->par;
+	pdata = &sinfo->pdata;
 
 	cancel_work_sync(&sinfo->task);
 	exit_backlight(sinfo);
-	if (sinfo->atmel_lcdfb_power_control)
-		sinfo->atmel_lcdfb_power_control(0);
+	atmel_lcdfb_power_control(sinfo, 0);
 	unregister_framebuffer(info);
 	atmel_lcdfb_stop_clock(sinfo);
 	clk_put(sinfo->lcdc_clk);
@@ -1167,7 +1378,6 @@
 		atmel_lcdfb_free_video_memory(sinfo);
 	}
 
-	dev_set_drvdata(dev, NULL);
 	framebuffer_release(info);
 
 	return 0;
@@ -1188,9 +1398,7 @@
 
 	sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
 	lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
-	if (sinfo->atmel_lcdfb_power_control)
-		sinfo->atmel_lcdfb_power_control(0);
-
+	atmel_lcdfb_power_control(sinfo, 0);
 	atmel_lcdfb_stop(sinfo);
 	atmel_lcdfb_stop_clock(sinfo);
 
@@ -1204,8 +1412,7 @@
 
 	atmel_lcdfb_start_clock(sinfo);
 	atmel_lcdfb_start(sinfo);
-	if (sinfo->atmel_lcdfb_power_control)
-		sinfo->atmel_lcdfb_power_control(1);
+	atmel_lcdfb_power_control(sinfo, 1);
 	lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, sinfo->saved_lcdcon);
 
 	/* Enable FIFO & DMA errors */
@@ -1228,6 +1435,7 @@
 	.driver		= {
 		.name	= "atmel_lcdfb",
 		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(atmel_lcdfb_dt_ids),
 	},
 };
 
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index a4dfe8c..12ca031 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -413,7 +413,6 @@
 	int blitter_may_be_busy;
 	int fifo_slots;                 /* free slots in FIFO (64 max) */
 
-	int	pm_reg;
 	int crt_on, lcd_on;
 	struct pci_dev *pdev;
 	struct fb_info *next;
@@ -2016,7 +2015,6 @@
 
 	aty128_init_engine(par);
 
-	par->pm_reg = pdev->pm_cap;
 	par->pdev = pdev;
 	par->asleep = 0;
 	par->lock_blank = 0;
@@ -2029,8 +2027,8 @@
 	if (register_framebuffer(info) < 0)
 		return 0;
 
-	printk(KERN_INFO "fb%d: %s frame buffer device on %s\n",
-	       info->node, info->fix.id, video_card);
+	fb_info(info, "%s frame buffer device on %s\n",
+		info->fix.id, video_card);
 
 	return 1;	/* success! */
 }
@@ -2397,7 +2395,7 @@
 	u32	pmgt;
 	struct pci_dev *pdev = par->pdev;
 
-	if (!par->pm_reg)
+	if (!par->pdev->pm_cap)
 		return;
 		
 	/* Set the chip into the appropriate suspend mode (we use D2,
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 9b0f12c..28fafbf 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1848,7 +1848,6 @@
 
 			return aty_waitforvblank(par, crtc);
 		}
-		break;
 
 #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
 	case ATYIO_CLKR:
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 1e30b2b..26d80a4 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -819,11 +819,6 @@
 	if (v.xres_virtual < v.xres)
 		v.xres = v.xres_virtual;
 
-	if (v.xoffset < 0)
-                v.xoffset = 0;
-        if (v.yoffset < 0)
-                v.yoffset = 0;
-         
         if (v.xoffset > v.xres_virtual - v.xres)
                 v.xoffset = v.xres_virtual - v.xres - 1;
                         
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index f7091ec..46a12f1 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -1427,6 +1427,8 @@
 	mdelay( 15);
 }
 
+#if defined(CONFIG_PM)
+#if defined(CONFIG_X86) || defined(CONFIG_PPC_PMAC)
 static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo)
 {
 	u32 tmp, tmp2;
@@ -1939,9 +1941,10 @@
 	 */
 	radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
 }
+#endif
 
 #ifdef CONFIG_PPC_OF
-
+#ifdef CONFIG_PPC_PMAC
 static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo)
 {
 	OUTREG(MC_CNTL, rinfo->save_regs[46]);
@@ -2202,6 +2205,8 @@
 	radeon_pm_restore_pixel_pll(rinfo);
 	radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
 }
+#endif
+#endif
 
 #if 0 /* Not ready yet */
 static void radeon_reinitialize_QW(struct radeonfb_info *rinfo)
@@ -2515,13 +2520,13 @@
 
 	for (;;) {
 		pci_read_config_word(rinfo->pdev,
-				     rinfo->pm_reg+PCI_PM_CTRL,
+				     rinfo->pdev->pm_cap + PCI_PM_CTRL,
 				     &pwr_cmd);
-		if (pwr_cmd & 2)
+		if (pwr_cmd & state)
 			break;
-		pwr_cmd = (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2;
+		pwr_cmd = (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | state;
 		pci_write_config_word(rinfo->pdev,
-				      rinfo->pm_reg+PCI_PM_CTRL,
+				      rinfo->pdev->pm_cap + PCI_PM_CTRL,
 				      pwr_cmd);
 		msleep(500);
 	}
@@ -2532,7 +2537,7 @@
 {
 	u32 tmp;
 
-	if (!rinfo->pm_reg)
+	if (!rinfo->pdev->pm_cap)
 		return;
 
 	/* Set the chip into appropriate suspend mode (we use D2,
@@ -2804,9 +2809,6 @@
 
 void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep)
 {
-	/* Find PM registers in config space if any*/
-	rinfo->pm_reg = rinfo->pdev->pm_cap;
-
 	/* Enable/Disable dynamic clocks: TODO add sysfs access */
 	if (rinfo->family == CHIP_FAMILY_RS480)
 		rinfo->dynclk = -1;
@@ -2830,7 +2832,7 @@
 	 * reason. --BenH
 	 */
 	if (machine_is(powermac) && rinfo->of_node) {
-		if (rinfo->is_mobility && rinfo->pm_reg &&
+		if (rinfo->is_mobility && rinfo->pdev->pm_cap &&
 		    rinfo->family <= CHIP_FAMILY_RV250)
 			rinfo->pm_mode |= radeon_pm_d2;
 
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 7351e66..cb84604 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -342,7 +342,6 @@
 
 	int			mtrr_hdl;
 
-	int			pm_reg;
 	u32			save_regs[100];
 	int			asleep;
 	int			lock_blank;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 22ad8524..372d4ae 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -564,7 +564,7 @@
 	if (!dev)
 		return -ENODEV;
 
-	fbdev = (struct au1100fb_device *) platform_get_drvdata(dev);
+	fbdev = platform_get_drvdata(dev);
 
 #if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
 	au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
@@ -636,19 +636,7 @@
 	.suspend	= au1100fb_drv_suspend,
         .resume		= au1100fb_drv_resume,
 };
-
-static int __init au1100fb_load(void)
-{
-	return platform_driver_register(&au1100fb_driver);
-}
-
-static void __exit au1100fb_unload(void)
-{
-	platform_driver_unregister(&au1100fb_driver);
-}
-
-module_init(au1100fb_load);
-module_exit(au1100fb_unload);
+module_platform_driver(au1100fb_driver);
 
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 1d02897..4cfba78 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1853,21 +1853,7 @@
 	.probe		= au1200fb_drv_probe,
 	.remove		= au1200fb_drv_remove,
 };
-
-/*-------------------------------------------------------------------------*/
-
-static int __init au1200fb_init(void)
-{
-	return platform_driver_register(&au1200fb_driver);
-}
-
-static void __exit au1200fb_cleanup(void)
-{
-	platform_driver_unregister(&au1200fb_driver);
-}
-
-module_init(au1200fb_init);
-module_exit(au1200fb_cleanup);
+module_platform_driver(au1200fb_driver);
 
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 2cd6350..7db5234 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -196,7 +196,7 @@
 static int pm860x_backlight_probe(struct platform_device *pdev)
 {
 	struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
-	struct pm860x_backlight_pdata *pdata = pdev->dev.platform_data;
+	struct pm860x_backlight_pdata *pdata = dev_get_platdata(&pdev->dev);
 	struct pm860x_backlight_data *data;
 	struct backlight_device *bl;
 	struct resource *res;
@@ -243,7 +243,7 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = MAX_BRIGHTNESS;
-	bl = backlight_device_register(name, &pdev->dev, data,
+	bl = devm_backlight_device_register(&pdev->dev, name, &pdev->dev, data,
 					&pm860x_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
@@ -256,21 +256,10 @@
 	/* read current backlight */
 	ret = pm860x_backlight_get_brightness(bl);
 	if (ret < 0)
-		goto out_brt;
+		return ret;
 
 	backlight_update_status(bl);
 	return 0;
-out_brt:
-	backlight_device_unregister(bl);
-	return ret;
-}
-
-static int pm860x_backlight_remove(struct platform_device *pdev)
-{
-	struct backlight_device *bl = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(bl);
-	return 0;
 }
 
 static struct platform_driver pm860x_backlight_driver = {
@@ -279,7 +268,6 @@
 		.owner	= THIS_MODULE,
 	},
 	.probe		= pm860x_backlight_probe,
-	.remove		= pm860x_backlight_remove,
 };
 
 module_platform_driver(pm860x_backlight_driver);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index d4a7a35..5a3eb2e 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -368,12 +368,12 @@
 	  If you have a AnalogicTech AAT2870 say Y to enable the
 	  backlight driver.
 
-config BACKLIGHT_LM3630
-	tristate "Backlight Driver for LM3630"
+config BACKLIGHT_LM3630A
+	tristate "Backlight Driver for LM3630A"
 	depends on BACKLIGHT_CLASS_DEVICE && I2C
 	select REGMAP_I2C
 	help
-	  This supports TI LM3630 Backlight Driver
+	  This supports TI LM3630A Backlight Driver
 
 config BACKLIGHT_LM3639
 	tristate "Backlight Driver for LM3639"
@@ -388,8 +388,8 @@
 	tristate "Backlight driver for TI LP855X"
 	depends on BACKLIGHT_CLASS_DEVICE && I2C
 	help
-	  This supports TI LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
-	  backlight driver.
+	  This supports TI LP8550, LP8551, LP8552, LP8553, LP8555, LP8556 and
+	  LP8557 backlight driver.
 
 config BACKLIGHT_LP8788
 	tristate "Backlight driver for TI LP8788 MFD"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 38e1bab..bb82002 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -37,7 +37,7 @@
 obj-$(CONFIG_BACKLIGHT_HP680)		+= hp680_bl.o
 obj-$(CONFIG_BACKLIGHT_HP700)		+= jornada720_bl.o
 obj-$(CONFIG_BACKLIGHT_LM3533)		+= lm3533_bl.o
-obj-$(CONFIG_BACKLIGHT_LM3630)		+= lm3630_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3630A)		+= lm3630a_bl.o
 obj-$(CONFIG_BACKLIGHT_LM3639)		+= lm3639_bl.o
 obj-$(CONFIG_BACKLIGHT_LOCOMO)		+= locomolcd.o
 obj-$(CONFIG_BACKLIGHT_LP855X)		+= lp855x_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index c6fc668..ee0c0a9 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -127,7 +127,7 @@
 
 static int aat2870_bl_probe(struct platform_device *pdev)
 {
-	struct aat2870_bl_platform_data *pdata = pdev->dev.platform_data;
+	struct aat2870_bl_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct aat2870_bl_driver_data *aat2870_bl;
 	struct backlight_device *bd;
 	struct backlight_properties props;
@@ -158,8 +158,9 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 
 	props.type = BACKLIGHT_RAW;
-	bd = backlight_device_register("aat2870-backlight", &pdev->dev,
-				       aat2870_bl, &aat2870_bl_ops, &props);
+	bd = devm_backlight_device_register(&pdev->dev, "aat2870-backlight",
+					&pdev->dev, aat2870_bl, &aat2870_bl_ops,
+					&props);
 	if (IS_ERR(bd)) {
 		dev_err(&pdev->dev,
 			"Failed allocate memory for backlight device\n");
@@ -194,13 +195,11 @@
 	ret = aat2870_bl_update_status(bd);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to initialize\n");
-		goto out_bl_dev_unregister;
+		return ret;
 	}
 
 	return 0;
 
-out_bl_dev_unregister:
-	backlight_device_unregister(bd);
 out:
 	return ret;
 }
@@ -214,8 +213,6 @@
 	bd->props.brightness = 0;
 	backlight_update_status(bd);
 
-	backlight_device_unregister(bd);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index c84701b..f37097a 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -297,7 +297,7 @@
 		return -ENOMEM;
 
 	data->master = pdev->dev.parent;
-	data->pdata = pdev->dev.platform_data;
+	data->pdata = dev_get_platdata(&pdev->dev);
 
 	if (data->pdata  == NULL) {
 		dev_err(&pdev->dev, "missing platform data\n");
@@ -312,8 +312,9 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = ADP5020_MAX_BRIGHTNESS;
-	bl = backlight_device_register(pdev->name, data->master, data,
-				       &adp5520_bl_ops, &props);
+	bl = devm_backlight_device_register(&pdev->dev, pdev->name,
+					data->master, data, &adp5520_bl_ops,
+					&props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		return PTR_ERR(bl);
@@ -326,7 +327,7 @@
 
 	if (ret) {
 		dev_err(&pdev->dev, "failed to register sysfs\n");
-		backlight_device_unregister(bl);
+		return ret;
 	}
 
 	platform_set_drvdata(pdev, bl);
@@ -347,8 +348,6 @@
 		sysfs_remove_group(&bl->dev.kobj,
 				&adp5520_bl_attr_group);
 
-	backlight_device_unregister(bl);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 75b10f8..9d65671 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -216,7 +216,7 @@
 static int adp8860_led_probe(struct i2c_client *client)
 {
 	struct adp8860_backlight_platform_data *pdata =
-		client->dev.platform_data;
+		dev_get_platdata(&client->dev);
 	struct adp8860_bl *data = i2c_get_clientdata(client);
 	struct adp8860_led *led, *led_dat;
 	struct led_info *cur_led;
@@ -300,7 +300,7 @@
 static int adp8860_led_remove(struct i2c_client *client)
 {
 	struct adp8860_backlight_platform_data *pdata =
-		client->dev.platform_data;
+		dev_get_platdata(&client->dev);
 	struct adp8860_bl *data = i2c_get_clientdata(client);
 	int i;
 
@@ -658,7 +658,7 @@
 	struct backlight_device *bl;
 	struct adp8860_bl *data;
 	struct adp8860_backlight_platform_data *pdata =
-		client->dev.platform_data;
+		dev_get_platdata(&client->dev);
 	struct backlight_properties props;
 	uint8_t reg_val;
 	int ret;
@@ -711,8 +711,9 @@
 
 	mutex_init(&data->lock);
 
-	bl = backlight_device_register(dev_driver_string(&client->dev),
-			&client->dev, data, &adp8860_bl_ops, &props);
+	bl = devm_backlight_device_register(&client->dev,
+				dev_driver_string(&client->dev),
+				&client->dev, data, &adp8860_bl_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&client->dev, "failed to register backlight\n");
 		return PTR_ERR(bl);
@@ -728,7 +729,7 @@
 
 	if (ret) {
 		dev_err(&client->dev, "failed to register sysfs\n");
-		goto out1;
+		return ret;
 	}
 
 	ret = adp8860_bl_setup(bl);
@@ -751,8 +752,6 @@
 	if (data->en_ambl_sens)
 		sysfs_remove_group(&data->bl->dev.kobj,
 			&adp8860_bl_attr_group);
-out1:
-	backlight_device_unregister(bl);
 
 	return ret;
 }
@@ -770,8 +769,6 @@
 		sysfs_remove_group(&data->bl->dev.kobj,
 			&adp8860_bl_attr_group);
 
-	backlight_device_unregister(data->bl);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 90049d7..6370720 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -238,7 +238,7 @@
 static int adp8870_led_probe(struct i2c_client *client)
 {
 	struct adp8870_backlight_platform_data *pdata =
-		client->dev.platform_data;
+		dev_get_platdata(&client->dev);
 	struct adp8870_bl *data = i2c_get_clientdata(client);
 	struct adp8870_led *led, *led_dat;
 	struct led_info *cur_led;
@@ -325,7 +325,7 @@
 static int adp8870_led_remove(struct i2c_client *client)
 {
 	struct adp8870_backlight_platform_data *pdata =
-		client->dev.platform_data;
+		dev_get_platdata(&client->dev);
 	struct adp8870_bl *data = i2c_get_clientdata(client);
 	int i;
 
@@ -848,7 +848,7 @@
 	struct backlight_device *bl;
 	struct adp8870_bl *data;
 	struct adp8870_backlight_platform_data *pdata =
-		client->dev.platform_data;
+		dev_get_platdata(&client->dev);
 	uint8_t reg_val;
 	int ret;
 
@@ -888,8 +888,9 @@
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS;
-	bl = backlight_device_register(dev_driver_string(&client->dev),
-			&client->dev, data, &adp8870_bl_ops, &props);
+	bl = devm_backlight_device_register(&client->dev,
+				dev_driver_string(&client->dev),
+				&client->dev, data, &adp8870_bl_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&client->dev, "failed to register backlight\n");
 		return PTR_ERR(bl);
@@ -902,7 +903,7 @@
 			&adp8870_bl_attr_group);
 		if (ret) {
 			dev_err(&client->dev, "failed to register sysfs\n");
-			goto out1;
+			return ret;
 		}
 	}
 
@@ -925,8 +926,6 @@
 	if (data->pdata->en_ambl_sens)
 		sysfs_remove_group(&data->bl->dev.kobj,
 			&adp8870_bl_attr_group);
-out1:
-	backlight_device_unregister(bl);
 
 	return ret;
 }
@@ -944,8 +943,6 @@
 		sysfs_remove_group(&data->bl->dev.kobj,
 			&adp8870_bl_attr_group);
 
-	backlight_device_unregister(data->bl);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 319fef6..d8952c4 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -471,14 +471,14 @@
 	lcd->spi = spi;
 	lcd->dev = &spi->dev;
 
-	lcd->lcd_pd = spi->dev.platform_data;
+	lcd->lcd_pd = dev_get_platdata(&spi->dev);
 	if (!lcd->lcd_pd) {
 		dev_err(&spi->dev, "platform data is NULL\n");
 		return -EINVAL;
 	}
 
-	ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
-		&ams369fg06_lcd_ops);
+	ld = devm_lcd_device_register(&spi->dev, "ams369fg06", &spi->dev, lcd,
+					&ams369fg06_lcd_ops);
 	if (IS_ERR(ld))
 		return PTR_ERR(ld);
 
@@ -488,12 +488,11 @@
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = MAX_BRIGHTNESS;
 
-	bd = backlight_device_register("ams369fg06-bl", &spi->dev, lcd,
-		&ams369fg06_backlight_ops, &props);
-	if (IS_ERR(bd)) {
-		ret =  PTR_ERR(bd);
-		goto out_lcd_unregister;
-	}
+	bd = devm_backlight_device_register(&spi->dev, "ams369fg06-bl",
+					&spi->dev, lcd,
+					&ams369fg06_backlight_ops, &props);
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
 
 	bd->props.brightness = DEFAULT_BRIGHTNESS;
 	lcd->bd = bd;
@@ -516,10 +515,6 @@
 	dev_info(&spi->dev, "ams369fg06 panel driver has been probed.\n");
 
 	return 0;
-
-out_lcd_unregister:
-	lcd_device_unregister(ld);
-	return ret;
 }
 
 static int ams369fg06_remove(struct spi_device *spi)
@@ -527,9 +522,6 @@
 	struct ams369fg06 *lcd = spi_get_drvdata(spi);
 
 	ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
-	backlight_device_unregister(lcd->bd);
-	lcd_device_unregister(lcd->ld);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index 123887cd..bb1fc45 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -240,7 +240,8 @@
 	/* max tuning I = 31uA for voltage- and 38250uA for current-feedback */
 	props.max_brightness = max_brightness;
 
-	bl = backlight_device_register(su->type == AS3711_BL_SU1 ?
+	bl = devm_backlight_device_register(&pdev->dev,
+				       su->type == AS3711_BL_SU1 ?
 				       "as3711-su1" : "as3711-su2",
 				       &pdev->dev, su,
 				       &as3711_bl_ops, &props);
@@ -432,8 +433,7 @@
 		case AS3711_SU2_LX_SD4:
 			break;
 		default:
-			ret = -EINVAL;
-			goto esu2;
+			return -EINVAL;
 		}
 
 		switch (pdata->su2_feedback) {
@@ -447,8 +447,7 @@
 			max_brightness = min(pdata->su2_max_uA / 150, 255);
 			break;
 		default:
-			ret = -EINVAL;
-			goto esu2;
+			return -EINVAL;
 		}
 
 		ret = as3711_bl_init_su2(supply);
@@ -457,26 +456,12 @@
 
 		ret = as3711_bl_register(pdev, max_brightness, su);
 		if (ret < 0)
-			goto esu2;
+			return ret;
 	}
 
 	platform_set_drvdata(pdev, supply);
 
 	return 0;
-
-esu2:
-	backlight_device_unregister(supply->su1.bl);
-	return ret;
-}
-
-static int as3711_backlight_remove(struct platform_device *pdev)
-{
-	struct as3711_bl_supply *supply = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(supply->su1.bl);
-	backlight_device_unregister(supply->su2.bl);
-
-	return 0;
 }
 
 static struct platform_driver as3711_backlight_driver = {
@@ -485,7 +470,6 @@
 		.owner	= THIS_MODULE,
 	},
 	.probe		= as3711_backlight_probe,
-	.remove		= as3711_backlight_remove,
 };
 
 module_platform_driver(as3711_backlight_driver);
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index f7447f7..261b1a4 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/fb.h>
-#include <linux/clk.h>
 #include <linux/gpio.h>
 #include <linux/backlight.h>
 #include <linux/atmel_pwm.h>
@@ -27,6 +26,14 @@
 	int					gpio_on;
 };
 
+static void atmel_pwm_bl_set_gpio_on(struct atmel_pwm_bl *pwmbl, int on)
+{
+	if (!gpio_is_valid(pwmbl->gpio_on))
+		return;
+
+	gpio_set_value(pwmbl->gpio_on, on ^ pwmbl->pdata->on_active_low);
+}
+
 static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
 {
 	struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
@@ -49,19 +56,13 @@
 		pwm_duty = pwmbl->pdata->pwm_duty_min;
 
 	if (!intensity) {
-		if (pwmbl->gpio_on != -1) {
-			gpio_set_value(pwmbl->gpio_on,
-					0 ^ pwmbl->pdata->on_active_low);
-		}
+		atmel_pwm_bl_set_gpio_on(pwmbl, 0);
 		pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
 		pwm_channel_disable(&pwmbl->pwmc);
 	} else {
 		pwm_channel_enable(&pwmbl->pwmc);
 		pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
-		if (pwmbl->gpio_on != -1) {
-			gpio_set_value(pwmbl->gpio_on,
-					1 ^ pwmbl->pdata->on_active_low);
-		}
+		atmel_pwm_bl_set_gpio_on(pwmbl, 1);
 	}
 
 	return 0;
@@ -70,17 +71,16 @@
 static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
 {
 	struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
-	u8 intensity;
+	u32 cdty;
+	u32 intensity;
 
-	if (pwmbl->pdata->pwm_active_low) {
-		intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) -
-			pwmbl->pdata->pwm_duty_min;
-	} else {
-		intensity = pwmbl->pdata->pwm_duty_max -
-			pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
-	}
+	cdty = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
+	if (pwmbl->pdata->pwm_active_low)
+		intensity = cdty - pwmbl->pdata->pwm_duty_min;
+	else
+		intensity = pwmbl->pdata->pwm_duty_max - cdty;
 
-	return intensity;
+	return intensity & 0xffff;
 }
 
 static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
@@ -124,46 +124,40 @@
 	const struct atmel_pwm_bl_platform_data *pdata;
 	struct backlight_device *bldev;
 	struct atmel_pwm_bl *pwmbl;
+	unsigned long flags;
 	int retval;
 
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata)
+		return -ENODEV;
+
+	if (pdata->pwm_compare_max < pdata->pwm_duty_max ||
+			pdata->pwm_duty_min > pdata->pwm_duty_max ||
+			pdata->pwm_frequency == 0)
+		return -EINVAL;
+
 	pwmbl = devm_kzalloc(&pdev->dev, sizeof(struct atmel_pwm_bl),
 				GFP_KERNEL);
 	if (!pwmbl)
 		return -ENOMEM;
 
 	pwmbl->pdev = pdev;
-
-	pdata = pdev->dev.platform_data;
-	if (!pdata) {
-		retval = -ENODEV;
-		goto err_free_mem;
-	}
-
-	if (pdata->pwm_compare_max < pdata->pwm_duty_max ||
-			pdata->pwm_duty_min > pdata->pwm_duty_max ||
-			pdata->pwm_frequency == 0) {
-		retval = -EINVAL;
-		goto err_free_mem;
-	}
-
 	pwmbl->pdata = pdata;
 	pwmbl->gpio_on = pdata->gpio_on;
 
 	retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc);
 	if (retval)
-		goto err_free_mem;
+		return retval;
 
-	if (pwmbl->gpio_on != -1) {
-		retval = devm_gpio_request(&pdev->dev, pwmbl->gpio_on,
-					"gpio_atmel_pwm_bl");
-		if (retval) {
-			pwmbl->gpio_on = -1;
-			goto err_free_pwm;
-		}
-
+	if (gpio_is_valid(pwmbl->gpio_on)) {
 		/* Turn display off by default. */
-		retval = gpio_direction_output(pwmbl->gpio_on,
-				0 ^ pdata->on_active_low);
+		if (pdata->on_active_low)
+			flags = GPIOF_OUT_INIT_HIGH;
+		else
+			flags = GPIOF_OUT_INIT_LOW;
+
+		retval = devm_gpio_request_one(&pdev->dev, pwmbl->gpio_on,
+						flags, "gpio_atmel_pwm_bl");
 		if (retval)
 			goto err_free_pwm;
 	}
@@ -171,8 +165,9 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
-	bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl,
-					  &atmel_pwm_bl_ops, &props);
+	bldev = devm_backlight_device_register(&pdev->dev, "atmel-pwm-bl",
+					&pdev->dev, pwmbl, &atmel_pwm_bl_ops,
+					&props);
 	if (IS_ERR(bldev)) {
 		retval = PTR_ERR(bldev);
 		goto err_free_pwm;
@@ -188,17 +183,15 @@
 
 	retval = atmel_pwm_bl_init_pwm(pwmbl);
 	if (retval)
-		goto err_free_bl_dev;
+		goto err_free_pwm;
 
 	atmel_pwm_bl_set_intensity(bldev);
 
 	return 0;
 
-err_free_bl_dev:
-	backlight_device_unregister(bldev);
 err_free_pwm:
 	pwm_channel_free(&pwmbl->pwmc);
-err_free_mem:
+
 	return retval;
 }
 
@@ -206,11 +199,9 @@
 {
 	struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
 
-	if (pwmbl->gpio_on != -1)
-		gpio_set_value(pwmbl->gpio_on, 0);
+	atmel_pwm_bl_set_gpio_on(pwmbl, 0);
 	pwm_channel_disable(&pwmbl->pwmc);
 	pwm_channel_free(&pwmbl->pwmc);
-	backlight_device_unregister(pwmbl->bldev);
 
 	return 0;
 }
@@ -229,3 +220,4 @@
 MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
 MODULE_DESCRIPTION("Atmel PWM backlight driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel-pwm-bl");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 94a403a..5d05555 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -21,6 +21,9 @@
 #include <asm/backlight.h>
 #endif
 
+static struct list_head backlight_dev_list;
+static struct mutex backlight_dev_list_mutex;
+
 static const char *const backlight_types[] = {
 	[BACKLIGHT_RAW] = "raw",
 	[BACKLIGHT_PLATFORM] = "platform",
@@ -349,10 +352,32 @@
 	mutex_unlock(&pmac_backlight_mutex);
 #endif
 
+	mutex_lock(&backlight_dev_list_mutex);
+	list_add(&new_bd->entry, &backlight_dev_list);
+	mutex_unlock(&backlight_dev_list_mutex);
+
 	return new_bd;
 }
 EXPORT_SYMBOL(backlight_device_register);
 
+bool backlight_device_registered(enum backlight_type type)
+{
+	bool found = false;
+	struct backlight_device *bd;
+
+	mutex_lock(&backlight_dev_list_mutex);
+	list_for_each_entry(bd, &backlight_dev_list, entry) {
+		if (bd->props.type == type) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&backlight_dev_list_mutex);
+
+	return found;
+}
+EXPORT_SYMBOL(backlight_device_registered);
+
 /**
  * backlight_device_unregister - unregisters a backlight device object.
  * @bd: the backlight device object to be unregistered and freed.
@@ -364,6 +389,10 @@
 	if (!bd)
 		return;
 
+	mutex_lock(&backlight_dev_list_mutex);
+	list_del(&bd->entry);
+	mutex_unlock(&backlight_dev_list_mutex);
+
 #ifdef CONFIG_PMAC_BACKLIGHT
 	mutex_lock(&pmac_backlight_mutex);
 	if (pmac_backlight == bd)
@@ -499,6 +528,8 @@
 
 	backlight_class->dev_groups = bl_device_groups;
 	backlight_class->pm = &backlight_class_dev_pm_ops;
+	INIT_LIST_HEAD(&backlight_dev_list);
+	mutex_init(&backlight_dev_list_mutex);
 	return 0;
 }
 
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index 15e3294..16dd9bc 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -128,7 +128,7 @@
 static int bd6107_probe(struct i2c_client *client,
 			  const struct i2c_device_id *id)
 {
-	struct bd6107_platform_data *pdata = client->dev.platform_data;
+	struct bd6107_platform_data *pdata = dev_get_platdata(&client->dev);
 	struct backlight_device *backlight;
 	struct backlight_properties props;
 	struct bd6107 *bd;
@@ -166,7 +166,8 @@
 	props.brightness = clamp_t(unsigned int, pdata->def_value, 0,
 				   props.max_brightness);
 
-	backlight = backlight_device_register(dev_name(&client->dev),
+	backlight = devm_backlight_device_register(&client->dev,
+					      dev_name(&client->dev),
 					      &bd->client->dev, bd,
 					      &bd6107_backlight_ops, &props);
 	if (IS_ERR(backlight)) {
@@ -186,7 +187,6 @@
 
 	backlight->props.brightness = 0;
 	backlight_update_status(backlight);
-	backlight_device_unregister(backlight);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index c97867a..db8db5f 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -533,7 +533,7 @@
 static int corgi_lcd_probe(struct spi_device *spi)
 {
 	struct backlight_properties props;
-	struct corgi_lcd_platform_data *pdata = spi->dev.platform_data;
+	struct corgi_lcd_platform_data *pdata = dev_get_platdata(&spi->dev);
 	struct corgi_lcd *lcd;
 	int ret = 0;
 
@@ -550,8 +550,8 @@
 
 	lcd->spi_dev = spi;
 
-	lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
-					lcd, &corgi_lcd_ops);
+	lcd->lcd_dev = devm_lcd_device_register(&spi->dev, "corgi_lcd",
+						&spi->dev, lcd, &corgi_lcd_ops);
 	if (IS_ERR(lcd->lcd_dev))
 		return PTR_ERR(lcd->lcd_dev);
 
@@ -561,18 +561,18 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = pdata->max_intensity;
-	lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd,
-						&corgi_bl_ops, &props);
-	if (IS_ERR(lcd->bl_dev)) {
-		ret = PTR_ERR(lcd->bl_dev);
-		goto err_unregister_lcd;
-	}
+	lcd->bl_dev = devm_backlight_device_register(&spi->dev, "corgi_bl",
+						&spi->dev, lcd, &corgi_bl_ops,
+						&props);
+	if (IS_ERR(lcd->bl_dev))
+		return PTR_ERR(lcd->bl_dev);
+
 	lcd->bl_dev->props.brightness = pdata->default_intensity;
 	lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
 
 	ret = setup_gpio_backlight(lcd, pdata);
 	if (ret)
-		goto err_unregister_bl;
+		return ret;
 
 	lcd->kick_battery = pdata->kick_battery;
 
@@ -583,12 +583,6 @@
 	lcd->limit_mask = pdata->limit_mask;
 	the_corgi_lcd = lcd;
 	return 0;
-
-err_unregister_bl:
-	backlight_device_unregister(lcd->bl_dev);
-err_unregister_lcd:
-	lcd_device_unregister(lcd->lcd_dev);
-	return ret;
 }
 
 static int corgi_lcd_remove(struct spi_device *spi)
@@ -598,11 +592,7 @@
 	lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
 	lcd->bl_dev->props.brightness = 0;
 	backlight_update_status(lcd->bl_dev);
-	backlight_device_unregister(lcd->bl_dev);
-
 	corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
-	lcd_device_unregister(lcd->lcd_dev);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 37bae80..f3fed9e 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -195,16 +195,17 @@
 
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
-	bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL,
-					&cr_backlight_ops, &props);
+	bdp = devm_backlight_device_register(&pdev->dev, "cr-backlight",
+					&pdev->dev, NULL, &cr_backlight_ops,
+					&props);
 	if (IS_ERR(bdp)) {
 		pci_dev_put(lpc_dev);
 		return PTR_ERR(bdp);
 	}
 
-	ldp = lcd_device_register("cr-lcd", &pdev->dev, NULL, &cr_lcd_ops);
+	ldp = devm_lcd_device_register(&pdev->dev, "cr-lcd", &pdev->dev, NULL,
+					&cr_lcd_ops);
 	if (IS_ERR(ldp)) {
-		backlight_device_unregister(bdp);
 		pci_dev_put(lpc_dev);
 		return PTR_ERR(ldp);
 	}
@@ -215,8 +216,6 @@
 
 	crp = devm_kzalloc(&pdev->dev, sizeof(*crp), GFP_KERNEL);
 	if (!crp) {
-		lcd_device_unregister(ldp);
-		backlight_device_unregister(bdp);
 		pci_dev_put(lpc_dev);
 		return -ENOMEM;
 	}
@@ -241,8 +240,6 @@
 	crp->cr_backlight_device->props.max_brightness = 0;
 	cr_backlight_set_intensity(crp->cr_backlight_device);
 	cr_lcd_set_power(crp->cr_lcd_device, FB_BLANK_POWERDOWN);
-	backlight_device_unregister(crp->cr_backlight_device);
-	lcd_device_unregister(crp->cr_lcd_device);
 	pci_dev_put(lpc_dev);
 
 	return 0;
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 67cadd3..12c5d84 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -109,7 +109,7 @@
 
 static int da903x_backlight_probe(struct platform_device *pdev)
 {
-	struct da9034_backlight_pdata *pdata = pdev->dev.platform_data;
+	struct da9034_backlight_pdata *pdata = dev_get_platdata(&pdev->dev);
 	struct da903x_backlight_data *data;
 	struct backlight_device *bl;
 	struct backlight_properties props;
@@ -144,8 +144,9 @@
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = max_brightness;
-	bl = backlight_device_register(pdev->name, data->da903x_dev, data,
-				       &da903x_backlight_ops, &props);
+	bl = devm_backlight_device_register(&pdev->dev, pdev->name,
+					data->da903x_dev, data,
+					&da903x_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		return PTR_ERR(bl);
@@ -158,21 +159,12 @@
 	return 0;
 }
 
-static int da903x_backlight_remove(struct platform_device *pdev)
-{
-	struct backlight_device *bl = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(bl);
-	return 0;
-}
-
 static struct platform_driver da903x_backlight_driver = {
 	.driver		= {
 		.name	= "da903x-backlight",
 		.owner	= THIS_MODULE,
 	},
 	.probe		= da903x_backlight_probe,
-	.remove		= da903x_backlight_remove,
 };
 
 module_platform_driver(da903x_backlight_driver);
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index 842da5a..20d55be 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -125,8 +125,9 @@
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = DA9052_MAX_BRIGHTNESS;
 
-	bl = backlight_device_register(pdev->name, wleds->da9052->dev, wleds,
-				       &da9052_backlight_ops, &props);
+	bl = devm_backlight_device_register(&pdev->dev, pdev->name,
+					wleds->da9052->dev, wleds,
+					&da9052_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "Failed to register backlight\n");
 		return PTR_ERR(bl);
@@ -147,7 +148,6 @@
 	wleds->brightness = 0;
 	wleds->state = DA9052_WLEDS_OFF;
 	da9052_adjust_wled_brightness(wleds);
-	backlight_device_unregister(bl);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 018368b..0d1f633 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -92,8 +92,8 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = EP93XX_MAX_BRIGHT;
-	bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl,
-				       &ep93xxbl_ops, &props);
+	bl = devm_backlight_device_register(&dev->dev, dev->name, &dev->dev,
+					ep93xxbl, &ep93xxbl_ops, &props);
 	if (IS_ERR(bl))
 		return PTR_ERR(bl);
 
@@ -106,14 +106,6 @@
 	return 0;
 }
 
-static int ep93xxbl_remove(struct platform_device *dev)
-{
-	struct backlight_device *bl = platform_get_drvdata(dev);
-
-	backlight_device_unregister(bl);
-	return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int ep93xxbl_suspend(struct device *dev)
 {
@@ -140,7 +132,6 @@
 		.pm	= &ep93xxbl_pm_ops,
 	},
 	.probe		= ep93xxbl_probe,
-	.remove		= ep93xxbl_remove,
 };
 
 module_platform_driver(ep93xxbl_driver);
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 19e393b..5d8d652 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -79,7 +79,7 @@
 static int genericbl_probe(struct platform_device *pdev)
 {
 	struct backlight_properties props;
-	struct generic_bl_info *machinfo = pdev->dev.platform_data;
+	struct generic_bl_info *machinfo = dev_get_platdata(&pdev->dev);
 	const char *name = "generic-bl";
 	struct backlight_device *bd;
 
@@ -93,8 +93,8 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = machinfo->max_intensity;
-	bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
-				       &props);
+	bd = devm_backlight_device_register(&pdev->dev, name, &pdev->dev,
+					NULL, &genericbl_ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
 
@@ -118,8 +118,6 @@
 	bd->props.brightness = 0;
 	backlight_update_status(bd);
 
-	backlight_device_unregister(bd);
-
 	dev_info(&pdev->dev, "Generic Backlight Driver Unloaded\n");
 	return 0;
 }
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 5fa217f..81fb127 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -62,7 +62,8 @@
 
 static int gpio_backlight_probe(struct platform_device *pdev)
 {
-	struct gpio_backlight_platform_data *pdata = pdev->dev.platform_data;
+	struct gpio_backlight_platform_data *pdata =
+		dev_get_platdata(&pdev->dev);
 	struct backlight_properties props;
 	struct backlight_device *bl;
 	struct gpio_backlight *gbl;
@@ -94,8 +95,9 @@
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = 1;
-	bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, gbl,
-				       &gpio_backlight_ops, &props);
+	bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
+					&pdev->dev, gbl, &gpio_backlight_ops,
+					&props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		return PTR_ERR(bl);
@@ -108,21 +110,12 @@
 	return 0;
 }
 
-static int gpio_backlight_remove(struct platform_device *pdev)
-{
-	struct backlight_device *bl = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(bl);
-	return 0;
-}
-
 static struct platform_driver gpio_backlight_driver = {
 	.driver		= {
 		.name		= "gpio-backlight",
 		.owner		= THIS_MODULE,
 	},
 	.probe		= gpio_backlight_probe,
-	.remove		= gpio_backlight_remove,
 };
 
 module_platform_driver(gpio_backlight_driver);
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
index c7af8c4..985e854 100644
--- a/drivers/video/backlight/hx8357.c
+++ b/drivers/video/backlight/hx8357.c
@@ -648,7 +648,8 @@
 		lcd->use_im_pins = 0;
 	}
 
-	lcdev = lcd_device_register("mxsfb", &spi->dev, lcd, &hx8357_ops);
+	lcdev = devm_lcd_device_register(&spi->dev, "mxsfb", &spi->dev, lcd,
+					&hx8357_ops);
 	if (IS_ERR(lcdev)) {
 		ret = PTR_ERR(lcdev);
 		return ret;
@@ -660,32 +661,19 @@
 	ret = ((int (*)(struct lcd_device *))match->data)(lcdev);
 	if (ret) {
 		dev_err(&spi->dev, "Couldn't initialize panel\n");
-		goto init_error;
+		return ret;
 	}
 
 	dev_info(&spi->dev, "Panel probed\n");
 
 	return 0;
-
-init_error:
-	lcd_device_unregister(lcdev);
-	return ret;
-}
-
-static int hx8357_remove(struct spi_device *spi)
-{
-	struct lcd_device *lcdev = spi_get_drvdata(spi);
-
-	lcd_device_unregister(lcdev);
-	return 0;
 }
 
 static struct spi_driver hx8357_driver = {
 	.probe  = hx8357_probe,
-	.remove = hx8357_remove,
 	.driver = {
 		.name = "hx8357",
-		.of_match_table = of_match_ptr(hx8357_dt_ids),
+		.of_match_table = hx8357_dt_ids,
 	},
 };
 
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index d9f65c2..73464e4 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -513,8 +513,8 @@
 
 	ili->power = FB_BLANK_POWERDOWN;
 
-	lcd = lcd_device_register("ili922xlcd", &spi->dev, ili,
-				  &ili922x_ops);
+	lcd = devm_lcd_device_register(&spi->dev, "ili922xlcd", &spi->dev, ili,
+					&ili922x_ops);
 	if (IS_ERR(lcd)) {
 		dev_err(&spi->dev, "cannot register LCD\n");
 		return PTR_ERR(lcd);
@@ -530,10 +530,7 @@
 
 static int ili922x_remove(struct spi_device *spi)
 {
-	struct ili922x *ili = spi_get_drvdata(spi);
-
 	ili922x_poweroff(spi);
-	lcd_device_unregister(ili->ld);
 	return 0;
 }
 
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index f8be90c..e2b8b40 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -198,7 +198,7 @@
 int ili9320_probe_spi(struct spi_device *spi,
 				struct ili9320_client *client)
 {
-	struct ili9320_platdata *cfg = spi->dev.platform_data;
+	struct ili9320_platdata *cfg = dev_get_platdata(&spi->dev);
 	struct device *dev = &spi->dev;
 	struct ili9320 *ili;
 	struct lcd_device *lcd;
@@ -235,7 +235,8 @@
 
 	ili9320_setup_spi(ili, spi);
 
-	lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
+	lcd = devm_lcd_device_register(&spi->dev, "ili9320", dev, ili,
+					&ili9320_ops);
 	if (IS_ERR(lcd)) {
 		dev_err(dev, "failed to register lcd device\n");
 		return PTR_ERR(lcd);
@@ -248,24 +249,16 @@
 	ret = ili9320_power(ili, FB_BLANK_UNBLANK);
 	if (ret != 0) {
 		dev_err(dev, "failed to set lcd power state\n");
-		goto err_unregister;
+		return ret;
 	}
 
 	return 0;
-
- err_unregister:
-	lcd_device_unregister(lcd);
-
-	return ret;
 }
 EXPORT_SYMBOL_GPL(ili9320_probe_spi);
 
 int ili9320_remove(struct ili9320 *ili)
 {
 	ili9320_power(ili, FB_BLANK_POWERDOWN);
-
-	lcd_device_unregister(ili->lcd);
-
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ili9320_remove);
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index bca6ccc..7592cc2 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -141,7 +141,7 @@
 static int kb3886bl_probe(struct platform_device *pdev)
 {
 	struct backlight_properties props;
-	struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data;
+	struct kb3886bl_machinfo *machinfo = dev_get_platdata(&pdev->dev);
 
 	bl_machinfo = machinfo;
 	if (!machinfo->limit_mask)
@@ -150,10 +150,10 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = machinfo->max_intensity;
-	kb3886_backlight_device = backlight_device_register("kb3886-bl",
-							    &pdev->dev, NULL,
-							    &kb3886bl_ops,
-							    &props);
+	kb3886_backlight_device = devm_backlight_device_register(&pdev->dev,
+							"kb3886-bl", &pdev->dev,
+							NULL, &kb3886bl_ops,
+							&props);
 	if (IS_ERR(kb3886_backlight_device))
 		return PTR_ERR(kb3886_backlight_device);
 
@@ -166,18 +166,8 @@
 	return 0;
 }
 
-static int kb3886bl_remove(struct platform_device *pdev)
-{
-	struct backlight_device *bd = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(bd);
-
-	return 0;
-}
-
 static struct platform_driver kb3886bl_driver = {
 	.probe		= kb3886bl_probe,
-	.remove		= kb3886bl_remove,
 	.driver		= {
 		.name	= "kb3886-bl",
 		.pm	= &kb3886bl_pm_ops,
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index a35a38c..b5fc13b 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -48,7 +48,7 @@
 
 static void l4f00242t03_lcd_init(struct spi_device *spi)
 {
-	struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+	struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
 	struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
 	const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
 	int ret;
@@ -88,7 +88,7 @@
 
 static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
 {
-	struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+	struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
 	struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
 
 	dev_dbg(&spi->dev, "Powering down LCD\n");
@@ -171,7 +171,7 @@
 static int l4f00242t03_probe(struct spi_device *spi)
 {
 	struct l4f00242t03_priv *priv;
-	struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+	struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
 	int ret;
 
 	if (pdata == NULL) {
@@ -244,7 +244,6 @@
 
 	l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
 	lcd_device_unregister(priv->ld);
-	spi_set_drvdata(spi, NULL);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index 1e0a309..506a6c2 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -702,7 +702,7 @@
 	lcd->spi = spi;
 	lcd->dev = &spi->dev;
 
-	lcd->lcd_pd = spi->dev.platform_data;
+	lcd->lcd_pd = dev_get_platdata(&spi->dev);
 	if (!lcd->lcd_pd) {
 		dev_err(&spi->dev, "platform data is NULL.\n");
 		return -EINVAL;
@@ -716,7 +716,8 @@
 		return ret;
 	}
 
-	ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
+	ld = devm_lcd_device_register(&spi->dev, "ld9040", &spi->dev, lcd,
+					&ld9040_lcd_ops);
 	if (IS_ERR(ld))
 		return PTR_ERR(ld);
 
@@ -726,12 +727,10 @@
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = MAX_BRIGHTNESS;
 
-	bd = backlight_device_register("ld9040-bl", &spi->dev,
-		lcd, &ld9040_backlight_ops, &props);
-	if (IS_ERR(bd)) {
-		ret = PTR_ERR(bd);
-		goto out_unregister_lcd;
-	}
+	bd = devm_backlight_device_register(&spi->dev, "ld9040-bl", &spi->dev,
+					lcd, &ld9040_backlight_ops, &props);
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
 
 	bd->props.brightness = MAX_BRIGHTNESS;
 	lcd->bd = bd;
@@ -757,11 +756,6 @@
 
 	dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
 	return 0;
-
-out_unregister_lcd:
-	lcd_device_unregister(lcd->ld);
-
-	return ret;
 }
 
 static int ld9040_remove(struct spi_device *spi)
@@ -769,9 +763,6 @@
 	struct ld9040 *lcd = spi_get_drvdata(spi);
 
 	ld9040_power(lcd, FB_BLANK_POWERDOWN);
-	backlight_device_unregister(lcd->bd);
-	lcd_device_unregister(lcd->ld);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/ld9040_gamma.h b/drivers/video/backlight/ld9040_gamma.h
index 038d9c8..c5e586d 100644
--- a/drivers/video/backlight/ld9040_gamma.h
+++ b/drivers/video/backlight/ld9040_gamma.h
@@ -169,7 +169,9 @@
 
 struct ld9040_gamma {
 	unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
-} gamma_table = {
+};
+
+static struct ld9040_gamma gamma_table = {
 	.gamma_22_table[0] = (unsigned int *)&ld9040_22_50,
 	.gamma_22_table[1] = (unsigned int *)&ld9040_22_70,
 	.gamma_22_table[2] = (unsigned int *)&ld9040_22_80,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
index 1d1dbfb..187d1c2 100644
--- a/drivers/video/backlight/lm3533_bl.c
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -284,7 +284,7 @@
 	if (!lm3533)
 		return -EINVAL;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (!pdata) {
 		dev_err(&pdev->dev, "no platform data\n");
 		return -EINVAL;
@@ -313,8 +313,9 @@
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
 	props.brightness = pdata->default_brightness;
-	bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
-						&lm3533_bl_ops, &props);
+	bd = devm_backlight_device_register(&pdev->dev, pdata->name,
+					pdev->dev.parent, bl, &lm3533_bl_ops,
+					&props);
 	if (IS_ERR(bd)) {
 		dev_err(&pdev->dev, "failed to register backlight device\n");
 		return PTR_ERR(bd);
@@ -328,7 +329,7 @@
 	ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to create sysfs attributes\n");
-		goto err_unregister;
+		return ret;
 	}
 
 	backlight_update_status(bd);
@@ -345,8 +346,6 @@
 
 err_sysfs_remove:
 	sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
-err_unregister:
-	backlight_device_unregister(bd);
 
 	return ret;
 }
@@ -363,7 +362,6 @@
 
 	lm3533_ctrlbank_disable(&bl->cb);
 	sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
-	backlight_device_unregister(bd);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/lm3630_bl.c b/drivers/video/backlight/lm3630_bl.c
deleted file mode 100644
index 76a62e9..0000000
--- a/drivers/video/backlight/lm3630_bl.c
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
-* Simple driver for Texas Instruments LM3630 Backlight driver chip
-* Copyright (C) 2012 Texas Instruments
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License version 2 as
-* published by the Free Software Foundation.
-*
-*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/backlight.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
-#include <linux/regmap.h>
-#include <linux/platform_data/lm3630_bl.h>
-
-#define REG_CTRL	0x00
-#define REG_CONFIG	0x01
-#define REG_BRT_A	0x03
-#define REG_BRT_B	0x04
-#define REG_INT_STATUS	0x09
-#define REG_INT_EN	0x0A
-#define REG_FAULT	0x0B
-#define REG_PWM_OUTLOW	0x12
-#define REG_PWM_OUTHIGH	0x13
-#define REG_MAX		0x1F
-
-#define INT_DEBOUNCE_MSEC	10
-
-enum lm3630_leds {
-	BLED_ALL = 0,
-	BLED_1,
-	BLED_2
-};
-
-static const char * const bled_name[] = {
-	[BLED_ALL] = "lm3630_bled",	/*Bank1 controls all string */
-	[BLED_1] = "lm3630_bled1",	/*Bank1 controls bled1 */
-	[BLED_2] = "lm3630_bled2",	/*Bank1 or 2 controls bled2 */
-};
-
-struct lm3630_chip_data {
-	struct device *dev;
-	struct delayed_work work;
-	int irq;
-	struct workqueue_struct *irqthread;
-	struct lm3630_platform_data *pdata;
-	struct backlight_device *bled1;
-	struct backlight_device *bled2;
-	struct regmap *regmap;
-};
-
-/* initialize chip */
-static int lm3630_chip_init(struct lm3630_chip_data *pchip)
-{
-	int ret;
-	unsigned int reg_val;
-	struct lm3630_platform_data *pdata = pchip->pdata;
-
-	/*pwm control */
-	reg_val = ((pdata->pwm_active & 0x01) << 2) | (pdata->pwm_ctrl & 0x03);
-	ret = regmap_update_bits(pchip->regmap, REG_CONFIG, 0x07, reg_val);
-	if (ret < 0)
-		goto out;
-
-	/* bank control */
-	reg_val = ((pdata->bank_b_ctrl & 0x01) << 1) |
-			(pdata->bank_a_ctrl & 0x07);
-	ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x07, reg_val);
-	if (ret < 0)
-		goto out;
-
-	ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
-	if (ret < 0)
-		goto out;
-
-	/* set initial brightness */
-	if (pdata->bank_a_ctrl != BANK_A_CTRL_DISABLE) {
-		ret = regmap_write(pchip->regmap,
-				   REG_BRT_A, pdata->init_brt_led1);
-		if (ret < 0)
-			goto out;
-	}
-
-	if (pdata->bank_b_ctrl != BANK_B_CTRL_DISABLE) {
-		ret = regmap_write(pchip->regmap,
-				   REG_BRT_B, pdata->init_brt_led2);
-		if (ret < 0)
-			goto out;
-	}
-	return ret;
-
-out:
-	dev_err(pchip->dev, "i2c failed to access register\n");
-	return ret;
-}
-
-/* interrupt handling */
-static void lm3630_delayed_func(struct work_struct *work)
-{
-	int ret;
-	unsigned int reg_val;
-	struct lm3630_chip_data *pchip;
-
-	pchip = container_of(work, struct lm3630_chip_data, work.work);
-
-	ret = regmap_read(pchip->regmap, REG_INT_STATUS, &reg_val);
-	if (ret < 0) {
-		dev_err(pchip->dev,
-			"i2c failed to access REG_INT_STATUS Register\n");
-		return;
-	}
-
-	dev_info(pchip->dev, "REG_INT_STATUS Register is 0x%x\n", reg_val);
-}
-
-static irqreturn_t lm3630_isr_func(int irq, void *chip)
-{
-	int ret;
-	struct lm3630_chip_data *pchip = chip;
-	unsigned long delay = msecs_to_jiffies(INT_DEBOUNCE_MSEC);
-
-	queue_delayed_work(pchip->irqthread, &pchip->work, delay);
-
-	ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
-	if (ret < 0)
-		goto out;
-
-	return IRQ_HANDLED;
-out:
-	dev_err(pchip->dev, "i2c failed to access register\n");
-	return IRQ_HANDLED;
-}
-
-static int lm3630_intr_config(struct lm3630_chip_data *pchip)
-{
-	INIT_DELAYED_WORK(&pchip->work, lm3630_delayed_func);
-	pchip->irqthread = create_singlethread_workqueue("lm3630-irqthd");
-	if (!pchip->irqthread) {
-		dev_err(pchip->dev, "create irq thread fail...\n");
-		return -1;
-	}
-	if (request_threaded_irq
-	    (pchip->irq, NULL, lm3630_isr_func,
-	     IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "lm3630_irq", pchip)) {
-		dev_err(pchip->dev, "request threaded irq fail..\n");
-		return -1;
-	}
-	return 0;
-}
-
-static bool
-set_intensity(struct backlight_device *bl, struct lm3630_chip_data *pchip)
-{
-	if (!pchip->pdata->pwm_set_intensity)
-		return false;
-	pchip->pdata->pwm_set_intensity(bl->props.brightness - 1,
-					pchip->pdata->pwm_period);
-	return true;
-}
-
-/* update and get brightness */
-static int lm3630_bank_a_update_status(struct backlight_device *bl)
-{
-	int ret;
-	struct lm3630_chip_data *pchip = bl_get_data(bl);
-	enum lm3630_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
-
-	/* brightness 0 means disable */
-	if (!bl->props.brightness) {
-		ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x04, 0x00);
-		if (ret < 0)
-			goto out;
-		return bl->props.brightness;
-	}
-
-	/* pwm control */
-	if (pwm_ctrl == PWM_CTRL_BANK_A || pwm_ctrl == PWM_CTRL_BANK_ALL) {
-		if (!set_intensity(bl, pchip))
-			dev_err(pchip->dev, "No pwm control func. in plat-data\n");
-	} else {
-
-		/* i2c control */
-		ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
-		if (ret < 0)
-			goto out;
-		mdelay(1);
-		ret = regmap_write(pchip->regmap,
-				   REG_BRT_A, bl->props.brightness - 1);
-		if (ret < 0)
-			goto out;
-	}
-	return bl->props.brightness;
-out:
-	dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
-	return bl->props.brightness;
-}
-
-static int lm3630_bank_a_get_brightness(struct backlight_device *bl)
-{
-	unsigned int reg_val;
-	int brightness, ret;
-	struct lm3630_chip_data *pchip = bl_get_data(bl);
-	enum lm3630_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
-
-	if (pwm_ctrl == PWM_CTRL_BANK_A || pwm_ctrl == PWM_CTRL_BANK_ALL) {
-		ret = regmap_read(pchip->regmap, REG_PWM_OUTHIGH, &reg_val);
-		if (ret < 0)
-			goto out;
-		brightness = reg_val & 0x01;
-		ret = regmap_read(pchip->regmap, REG_PWM_OUTLOW, &reg_val);
-		if (ret < 0)
-			goto out;
-		brightness = ((brightness << 8) | reg_val) + 1;
-	} else {
-		ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
-		if (ret < 0)
-			goto out;
-		mdelay(1);
-		ret = regmap_read(pchip->regmap, REG_BRT_A, &reg_val);
-		if (ret < 0)
-			goto out;
-		brightness = reg_val + 1;
-	}
-	bl->props.brightness = brightness;
-	return bl->props.brightness;
-out:
-	dev_err(pchip->dev, "i2c failed to access register\n");
-	return 0;
-}
-
-static const struct backlight_ops lm3630_bank_a_ops = {
-	.options = BL_CORE_SUSPENDRESUME,
-	.update_status = lm3630_bank_a_update_status,
-	.get_brightness = lm3630_bank_a_get_brightness,
-};
-
-static int lm3630_bank_b_update_status(struct backlight_device *bl)
-{
-	int ret;
-	struct lm3630_chip_data *pchip = bl_get_data(bl);
-	enum lm3630_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
-
-	if (pwm_ctrl == PWM_CTRL_BANK_B || pwm_ctrl == PWM_CTRL_BANK_ALL) {
-		if (!set_intensity(bl, pchip))
-			dev_err(pchip->dev,
-				"no pwm control func. in plat-data\n");
-	} else {
-		ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
-		if (ret < 0)
-			goto out;
-		mdelay(1);
-		ret = regmap_write(pchip->regmap,
-				   REG_BRT_B, bl->props.brightness - 1);
-	}
-	return bl->props.brightness;
-out:
-	dev_err(pchip->dev, "i2c failed to access register\n");
-	return bl->props.brightness;
-}
-
-static int lm3630_bank_b_get_brightness(struct backlight_device *bl)
-{
-	unsigned int reg_val;
-	int brightness, ret;
-	struct lm3630_chip_data *pchip = bl_get_data(bl);
-	enum lm3630_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
-
-	if (pwm_ctrl == PWM_CTRL_BANK_B || pwm_ctrl == PWM_CTRL_BANK_ALL) {
-		ret = regmap_read(pchip->regmap, REG_PWM_OUTHIGH, &reg_val);
-		if (ret < 0)
-			goto out;
-		brightness = reg_val & 0x01;
-		ret = regmap_read(pchip->regmap, REG_PWM_OUTLOW, &reg_val);
-		if (ret < 0)
-			goto out;
-		brightness = ((brightness << 8) | reg_val) + 1;
-	} else {
-		ret = regmap_update_bits(pchip->regmap, REG_CTRL, 0x80, 0x00);
-		if (ret < 0)
-			goto out;
-		mdelay(1);
-		ret = regmap_read(pchip->regmap, REG_BRT_B, &reg_val);
-		if (ret < 0)
-			goto out;
-		brightness = reg_val + 1;
-	}
-	bl->props.brightness = brightness;
-
-	return bl->props.brightness;
-out:
-	dev_err(pchip->dev, "i2c failed to access register\n");
-	return bl->props.brightness;
-}
-
-static const struct backlight_ops lm3630_bank_b_ops = {
-	.options = BL_CORE_SUSPENDRESUME,
-	.update_status = lm3630_bank_b_update_status,
-	.get_brightness = lm3630_bank_b_get_brightness,
-};
-
-static int lm3630_backlight_register(struct lm3630_chip_data *pchip,
-				     enum lm3630_leds ledno)
-{
-	const char *name = bled_name[ledno];
-	struct backlight_properties props;
-	struct lm3630_platform_data *pdata = pchip->pdata;
-
-	props.type = BACKLIGHT_RAW;
-	switch (ledno) {
-	case BLED_1:
-	case BLED_ALL:
-		props.brightness = pdata->init_brt_led1;
-		props.max_brightness = pdata->max_brt_led1;
-		pchip->bled1 =
-		    backlight_device_register(name, pchip->dev, pchip,
-					      &lm3630_bank_a_ops, &props);
-		if (IS_ERR(pchip->bled1))
-			return PTR_ERR(pchip->bled1);
-		break;
-	case BLED_2:
-		props.brightness = pdata->init_brt_led2;
-		props.max_brightness = pdata->max_brt_led2;
-		pchip->bled2 =
-		    backlight_device_register(name, pchip->dev, pchip,
-					      &lm3630_bank_b_ops, &props);
-		if (IS_ERR(pchip->bled2))
-			return PTR_ERR(pchip->bled2);
-		break;
-	}
-	return 0;
-}
-
-static void lm3630_backlight_unregister(struct lm3630_chip_data *pchip)
-{
-	if (pchip->bled1)
-		backlight_device_unregister(pchip->bled1);
-	if (pchip->bled2)
-		backlight_device_unregister(pchip->bled2);
-}
-
-static const struct regmap_config lm3630_regmap = {
-	.reg_bits = 8,
-	.val_bits = 8,
-	.max_register = REG_MAX,
-};
-
-static int lm3630_probe(struct i2c_client *client,
-				  const struct i2c_device_id *id)
-{
-	struct lm3630_platform_data *pdata = client->dev.platform_data;
-	struct lm3630_chip_data *pchip;
-	int ret;
-
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-		dev_err(&client->dev, "fail : i2c functionality check...\n");
-		return -EOPNOTSUPP;
-	}
-
-	if (pdata == NULL) {
-		dev_err(&client->dev, "fail : no platform data.\n");
-		return -ENODATA;
-	}
-
-	pchip = devm_kzalloc(&client->dev, sizeof(struct lm3630_chip_data),
-			     GFP_KERNEL);
-	if (!pchip)
-		return -ENOMEM;
-	pchip->pdata = pdata;
-	pchip->dev = &client->dev;
-
-	pchip->regmap = devm_regmap_init_i2c(client, &lm3630_regmap);
-	if (IS_ERR(pchip->regmap)) {
-		ret = PTR_ERR(pchip->regmap);
-		dev_err(&client->dev, "fail : allocate register map: %d\n",
-			ret);
-		return ret;
-	}
-	i2c_set_clientdata(client, pchip);
-
-	/* chip initialize */
-	ret = lm3630_chip_init(pchip);
-	if (ret < 0) {
-		dev_err(&client->dev, "fail : init chip\n");
-		goto err_chip_init;
-	}
-
-	switch (pdata->bank_a_ctrl) {
-	case BANK_A_CTRL_ALL:
-		ret = lm3630_backlight_register(pchip, BLED_ALL);
-		pdata->bank_b_ctrl = BANK_B_CTRL_DISABLE;
-		break;
-	case BANK_A_CTRL_LED1:
-		ret = lm3630_backlight_register(pchip, BLED_1);
-		break;
-	case BANK_A_CTRL_LED2:
-		ret = lm3630_backlight_register(pchip, BLED_2);
-		pdata->bank_b_ctrl = BANK_B_CTRL_DISABLE;
-		break;
-	default:
-		break;
-	}
-
-	if (ret < 0)
-		goto err_bl_reg;
-
-	if (pdata->bank_b_ctrl && pchip->bled2 == NULL) {
-		ret = lm3630_backlight_register(pchip, BLED_2);
-		if (ret < 0)
-			goto err_bl_reg;
-	}
-
-	/* interrupt enable  : irq 0 is not allowed for lm3630 */
-	pchip->irq = client->irq;
-	if (pchip->irq)
-		lm3630_intr_config(pchip);
-
-	dev_info(&client->dev, "LM3630 backlight register OK.\n");
-	return 0;
-
-err_bl_reg:
-	dev_err(&client->dev, "fail : backlight register.\n");
-	lm3630_backlight_unregister(pchip);
-err_chip_init:
-	return ret;
-}
-
-static int lm3630_remove(struct i2c_client *client)
-{
-	int ret;
-	struct lm3630_chip_data *pchip = i2c_get_clientdata(client);
-
-	ret = regmap_write(pchip->regmap, REG_BRT_A, 0);
-	if (ret < 0)
-		dev_err(pchip->dev, "i2c failed to access register\n");
-
-	ret = regmap_write(pchip->regmap, REG_BRT_B, 0);
-	if (ret < 0)
-		dev_err(pchip->dev, "i2c failed to access register\n");
-
-	lm3630_backlight_unregister(pchip);
-	if (pchip->irq) {
-		free_irq(pchip->irq, pchip);
-		flush_workqueue(pchip->irqthread);
-		destroy_workqueue(pchip->irqthread);
-	}
-	return 0;
-}
-
-static const struct i2c_device_id lm3630_id[] = {
-	{LM3630_NAME, 0},
-	{}
-};
-
-MODULE_DEVICE_TABLE(i2c, lm3630_id);
-
-static struct i2c_driver lm3630_i2c_driver = {
-	.driver = {
-		   .name = LM3630_NAME,
-		   },
-	.probe = lm3630_probe,
-	.remove = lm3630_remove,
-	.id_table = lm3630_id,
-};
-
-module_i2c_driver(lm3630_i2c_driver);
-
-MODULE_DESCRIPTION("Texas Instruments Backlight driver for LM3630");
-MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
-MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
new file mode 100644
index 0000000..35fe482
--- /dev/null
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -0,0 +1,483 @@
+/*
+* Simple driver for Texas Instruments LM3630A Backlight driver chip
+* Copyright (C) 2012 Texas Instruments
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/pwm.h>
+#include <linux/platform_data/lm3630a_bl.h>
+
+#define REG_CTRL	0x00
+#define REG_BOOST	0x02
+#define REG_CONFIG	0x01
+#define REG_BRT_A	0x03
+#define REG_BRT_B	0x04
+#define REG_I_A		0x05
+#define REG_I_B		0x06
+#define REG_INT_STATUS	0x09
+#define REG_INT_EN	0x0A
+#define REG_FAULT	0x0B
+#define REG_PWM_OUTLOW	0x12
+#define REG_PWM_OUTHIGH	0x13
+#define REG_MAX		0x1F
+
+#define INT_DEBOUNCE_MSEC	10
+struct lm3630a_chip {
+	struct device *dev;
+	struct delayed_work work;
+
+	int irq;
+	struct workqueue_struct *irqthread;
+	struct lm3630a_platform_data *pdata;
+	struct backlight_device *bleda;
+	struct backlight_device *bledb;
+	struct regmap *regmap;
+	struct pwm_device *pwmd;
+};
+
+/* i2c access */
+static int lm3630a_read(struct lm3630a_chip *pchip, unsigned int reg)
+{
+	int rval;
+	unsigned int reg_val;
+
+	rval = regmap_read(pchip->regmap, reg, &reg_val);
+	if (rval < 0)
+		return rval;
+	return reg_val & 0xFF;
+}
+
+static int lm3630a_write(struct lm3630a_chip *pchip,
+			 unsigned int reg, unsigned int data)
+{
+	return regmap_write(pchip->regmap, reg, data);
+}
+
+static int lm3630a_update(struct lm3630a_chip *pchip,
+			  unsigned int reg, unsigned int mask,
+			  unsigned int data)
+{
+	return regmap_update_bits(pchip->regmap, reg, mask, data);
+}
+
+/* initialize chip */
+static int lm3630a_chip_init(struct lm3630a_chip *pchip)
+{
+	int rval;
+	struct lm3630a_platform_data *pdata = pchip->pdata;
+
+	usleep_range(1000, 2000);
+	/* set Filter Strength Register */
+	rval = lm3630a_write(pchip, 0x50, 0x03);
+	/* set Cofig. register */
+	rval |= lm3630a_update(pchip, REG_CONFIG, 0x07, pdata->pwm_ctrl);
+	/* set boost control */
+	rval |= lm3630a_write(pchip, REG_BOOST, 0x38);
+	/* set current A */
+	rval |= lm3630a_update(pchip, REG_I_A, 0x1F, 0x1F);
+	/* set current B */
+	rval |= lm3630a_write(pchip, REG_I_B, 0x1F);
+	/* set control */
+	rval |= lm3630a_update(pchip, REG_CTRL, 0x14, pdata->leda_ctrl);
+	rval |= lm3630a_update(pchip, REG_CTRL, 0x0B, pdata->ledb_ctrl);
+	usleep_range(1000, 2000);
+	/* set brightness A and B */
+	rval |= lm3630a_write(pchip, REG_BRT_A, pdata->leda_init_brt);
+	rval |= lm3630a_write(pchip, REG_BRT_B, pdata->ledb_init_brt);
+
+	if (rval < 0)
+		dev_err(pchip->dev, "i2c failed to access register\n");
+	return rval;
+}
+
+/* interrupt handling */
+static void lm3630a_delayed_func(struct work_struct *work)
+{
+	int rval;
+	struct lm3630a_chip *pchip;
+
+	pchip = container_of(work, struct lm3630a_chip, work.work);
+
+	rval = lm3630a_read(pchip, REG_INT_STATUS);
+	if (rval < 0) {
+		dev_err(pchip->dev,
+			"i2c failed to access REG_INT_STATUS Register\n");
+		return;
+	}
+
+	dev_info(pchip->dev, "REG_INT_STATUS Register is 0x%x\n", rval);
+}
+
+static irqreturn_t lm3630a_isr_func(int irq, void *chip)
+{
+	int rval;
+	struct lm3630a_chip *pchip = chip;
+	unsigned long delay = msecs_to_jiffies(INT_DEBOUNCE_MSEC);
+
+	queue_delayed_work(pchip->irqthread, &pchip->work, delay);
+
+	rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+	if (rval < 0) {
+		dev_err(pchip->dev, "i2c failed to access register\n");
+		return IRQ_NONE;
+	}
+	return IRQ_HANDLED;
+}
+
+static int lm3630a_intr_config(struct lm3630a_chip *pchip)
+{
+	int rval;
+
+	rval = lm3630a_write(pchip, REG_INT_EN, 0x87);
+	if (rval < 0)
+		return rval;
+
+	INIT_DELAYED_WORK(&pchip->work, lm3630a_delayed_func);
+	pchip->irqthread = create_singlethread_workqueue("lm3630a-irqthd");
+	if (!pchip->irqthread) {
+		dev_err(pchip->dev, "create irq thread fail\n");
+		return -ENOMEM;
+	}
+	if (request_threaded_irq
+	    (pchip->irq, NULL, lm3630a_isr_func,
+	     IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "lm3630a_irq", pchip)) {
+		dev_err(pchip->dev, "request threaded irq fail\n");
+		destroy_workqueue(pchip->irqthread);
+		return -ENOMEM;
+	}
+	return rval;
+}
+
+static void lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
+{
+	unsigned int period = pwm_get_period(pchip->pwmd);
+	unsigned int duty = br * period / br_max;
+
+	pwm_config(pchip->pwmd, duty, period);
+	if (duty)
+		pwm_enable(pchip->pwmd);
+	else
+		pwm_disable(pchip->pwmd);
+}
+
+/* update and get brightness */
+static int lm3630a_bank_a_update_status(struct backlight_device *bl)
+{
+	int ret;
+	struct lm3630a_chip *pchip = bl_get_data(bl);
+	enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+
+	/* pwm control */
+	if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) {
+		lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+				 bl->props.max_brightness);
+		return bl->props.brightness;
+	}
+
+	/* disable sleep */
+	ret = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+	if (ret < 0)
+		goto out_i2c_err;
+	usleep_range(1000, 2000);
+	/* minimum brightness is 0x04 */
+	ret = lm3630a_write(pchip, REG_BRT_A, bl->props.brightness);
+	if (bl->props.brightness < 0x4)
+		ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDA_ENABLE, 0);
+	else
+		ret |= lm3630a_update(pchip, REG_CTRL,
+				      LM3630A_LEDA_ENABLE, LM3630A_LEDA_ENABLE);
+	if (ret < 0)
+		goto out_i2c_err;
+	return bl->props.brightness;
+
+out_i2c_err:
+	dev_err(pchip->dev, "i2c failed to access\n");
+	return bl->props.brightness;
+}
+
+static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
+{
+	int brightness, rval;
+	struct lm3630a_chip *pchip = bl_get_data(bl);
+	enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+
+	if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) {
+		rval = lm3630a_read(pchip, REG_PWM_OUTHIGH);
+		if (rval < 0)
+			goto out_i2c_err;
+		brightness = (rval & 0x01) << 8;
+		rval = lm3630a_read(pchip, REG_PWM_OUTLOW);
+		if (rval < 0)
+			goto out_i2c_err;
+		brightness |= rval;
+		goto out;
+	}
+
+	/* disable sleep */
+	rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+	if (rval < 0)
+		goto out_i2c_err;
+	usleep_range(1000, 2000);
+	rval = lm3630a_read(pchip, REG_BRT_A);
+	if (rval < 0)
+		goto out_i2c_err;
+	brightness = rval;
+
+out:
+	bl->props.brightness = brightness;
+	return bl->props.brightness;
+out_i2c_err:
+	dev_err(pchip->dev, "i2c failed to access register\n");
+	return 0;
+}
+
+static const struct backlight_ops lm3630a_bank_a_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.update_status = lm3630a_bank_a_update_status,
+	.get_brightness = lm3630a_bank_a_get_brightness,
+};
+
+/* update and get brightness */
+static int lm3630a_bank_b_update_status(struct backlight_device *bl)
+{
+	int ret;
+	struct lm3630a_chip *pchip = bl_get_data(bl);
+	enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+
+	/* pwm control */
+	if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) {
+		lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+				 bl->props.max_brightness);
+		return bl->props.brightness;
+	}
+
+	/* disable sleep */
+	ret = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+	if (ret < 0)
+		goto out_i2c_err;
+	usleep_range(1000, 2000);
+	/* minimum brightness is 0x04 */
+	ret = lm3630a_write(pchip, REG_BRT_B, bl->props.brightness);
+	if (bl->props.brightness < 0x4)
+		ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDB_ENABLE, 0);
+	else
+		ret |= lm3630a_update(pchip, REG_CTRL,
+				      LM3630A_LEDB_ENABLE, LM3630A_LEDB_ENABLE);
+	if (ret < 0)
+		goto out_i2c_err;
+	return bl->props.brightness;
+
+out_i2c_err:
+	dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
+	return bl->props.brightness;
+}
+
+static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
+{
+	int brightness, rval;
+	struct lm3630a_chip *pchip = bl_get_data(bl);
+	enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+
+	if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) {
+		rval = lm3630a_read(pchip, REG_PWM_OUTHIGH);
+		if (rval < 0)
+			goto out_i2c_err;
+		brightness = (rval & 0x01) << 8;
+		rval = lm3630a_read(pchip, REG_PWM_OUTLOW);
+		if (rval < 0)
+			goto out_i2c_err;
+		brightness |= rval;
+		goto out;
+	}
+
+	/* disable sleep */
+	rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
+	if (rval < 0)
+		goto out_i2c_err;
+	usleep_range(1000, 2000);
+	rval = lm3630a_read(pchip, REG_BRT_B);
+	if (rval < 0)
+		goto out_i2c_err;
+	brightness = rval;
+
+out:
+	bl->props.brightness = brightness;
+	return bl->props.brightness;
+out_i2c_err:
+	dev_err(pchip->dev, "i2c failed to access register\n");
+	return 0;
+}
+
+static const struct backlight_ops lm3630a_bank_b_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.update_status = lm3630a_bank_b_update_status,
+	.get_brightness = lm3630a_bank_b_get_brightness,
+};
+
+static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
+{
+	struct backlight_properties props;
+	struct lm3630a_platform_data *pdata = pchip->pdata;
+
+	props.type = BACKLIGHT_RAW;
+	if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
+		props.brightness = pdata->leda_init_brt;
+		props.max_brightness = pdata->leda_max_brt;
+		pchip->bleda =
+		    devm_backlight_device_register(pchip->dev, "lm3630a_leda",
+						   pchip->dev, pchip,
+						   &lm3630a_bank_a_ops, &props);
+		if (IS_ERR(pchip->bleda))
+			return PTR_ERR(pchip->bleda);
+	}
+
+	if ((pdata->ledb_ctrl != LM3630A_LEDB_DISABLE) &&
+	    (pdata->ledb_ctrl != LM3630A_LEDB_ON_A)) {
+		props.brightness = pdata->ledb_init_brt;
+		props.max_brightness = pdata->ledb_max_brt;
+		pchip->bledb =
+		    devm_backlight_device_register(pchip->dev, "lm3630a_ledb",
+						   pchip->dev, pchip,
+						   &lm3630a_bank_b_ops, &props);
+		if (IS_ERR(pchip->bledb))
+			return PTR_ERR(pchip->bledb);
+	}
+	return 0;
+}
+
+static const struct regmap_config lm3630a_regmap = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = REG_MAX,
+};
+
+static int lm3630a_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct lm3630a_platform_data *pdata = dev_get_platdata(&client->dev);
+	struct lm3630a_chip *pchip;
+	int rval;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "fail : i2c functionality check\n");
+		return -EOPNOTSUPP;
+	}
+
+	pchip = devm_kzalloc(&client->dev, sizeof(struct lm3630a_chip),
+			     GFP_KERNEL);
+	if (!pchip)
+		return -ENOMEM;
+	pchip->dev = &client->dev;
+
+	pchip->regmap = devm_regmap_init_i2c(client, &lm3630a_regmap);
+	if (IS_ERR(pchip->regmap)) {
+		rval = PTR_ERR(pchip->regmap);
+		dev_err(&client->dev, "fail : allocate reg. map: %d\n", rval);
+		return rval;
+	}
+
+	i2c_set_clientdata(client, pchip);
+	if (pdata == NULL) {
+		pdata = devm_kzalloc(pchip->dev,
+				     sizeof(struct lm3630a_platform_data),
+				     GFP_KERNEL);
+		if (pdata == NULL)
+			return -ENOMEM;
+		/* default values */
+		pdata->leda_ctrl = LM3630A_LEDA_ENABLE;
+		pdata->ledb_ctrl = LM3630A_LEDB_ENABLE;
+		pdata->leda_max_brt = LM3630A_MAX_BRIGHTNESS;
+		pdata->ledb_max_brt = LM3630A_MAX_BRIGHTNESS;
+		pdata->leda_init_brt = LM3630A_MAX_BRIGHTNESS;
+		pdata->ledb_init_brt = LM3630A_MAX_BRIGHTNESS;
+	}
+	pchip->pdata = pdata;
+
+	/* chip initialize */
+	rval = lm3630a_chip_init(pchip);
+	if (rval < 0) {
+		dev_err(&client->dev, "fail : init chip\n");
+		return rval;
+	}
+	/* backlight register */
+	rval = lm3630a_backlight_register(pchip);
+	if (rval < 0) {
+		dev_err(&client->dev, "fail : backlight register.\n");
+		return rval;
+	}
+	/* pwm */
+	if (pdata->pwm_ctrl != LM3630A_PWM_DISABLE) {
+		pchip->pwmd = devm_pwm_get(pchip->dev, "lm3630a-pwm");
+		if (IS_ERR(pchip->pwmd)) {
+			dev_err(&client->dev, "fail : get pwm device\n");
+			return PTR_ERR(pchip->pwmd);
+		}
+	}
+	pchip->pwmd->period = pdata->pwm_period;
+
+	/* interrupt enable  : irq 0 is not allowed */
+	pchip->irq = client->irq;
+	if (pchip->irq) {
+		rval = lm3630a_intr_config(pchip);
+		if (rval < 0)
+			return rval;
+	}
+	dev_info(&client->dev, "LM3630A backlight register OK.\n");
+	return 0;
+}
+
+static int lm3630a_remove(struct i2c_client *client)
+{
+	int rval;
+	struct lm3630a_chip *pchip = i2c_get_clientdata(client);
+
+	rval = lm3630a_write(pchip, REG_BRT_A, 0);
+	if (rval < 0)
+		dev_err(pchip->dev, "i2c failed to access register\n");
+
+	rval = lm3630a_write(pchip, REG_BRT_B, 0);
+	if (rval < 0)
+		dev_err(pchip->dev, "i2c failed to access register\n");
+
+	if (pchip->irq) {
+		free_irq(pchip->irq, pchip);
+		flush_workqueue(pchip->irqthread);
+		destroy_workqueue(pchip->irqthread);
+	}
+	return 0;
+}
+
+static const struct i2c_device_id lm3630a_id[] = {
+	{LM3630A_NAME, 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, lm3630a_id);
+
+static struct i2c_driver lm3630a_i2c_driver = {
+	.driver = {
+		   .name = LM3630A_NAME,
+		   },
+	.probe = lm3630a_probe,
+	.remove = lm3630a_remove,
+	.id_table = lm3630a_id,
+};
+
+module_i2c_driver(lm3630a_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments Backlight driver for LM3630A");
+MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("LDD MLP <ldd-mlp@list.ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 053964d..6fd60ad 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -76,10 +76,13 @@
 		goto out;
 
 	/* output pins config. */
-	if (!pdata->init_brt_led)
-		reg_val = pdata->fled_pins | pdata->bled_pins;
-	else
-		reg_val = pdata->fled_pins | pdata->bled_pins | 0x01;
+	if (!pdata->init_brt_led) {
+		reg_val = pdata->fled_pins;
+		reg_val |= pdata->bled_pins;
+	} else {
+		reg_val = pdata->fled_pins;
+		reg_val |= pdata->bled_pins | 0x01;
+	}
 
 	ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x79, reg_val);
 	if (ret < 0)
@@ -304,7 +307,7 @@
 {
 	int ret;
 	struct lm3639_chip_data *pchip;
-	struct lm3639_platform_data *pdata = client->dev.platform_data;
+	struct lm3639_platform_data *pdata = dev_get_platdata(&client->dev);
 	struct backlight_properties props;
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 4eec472..de88325 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -128,7 +128,7 @@
 {
 	struct lms283gf05_state *st = lcd_get_data(ld);
 	struct spi_device *spi = st->spi;
-	struct lms283gf05_pdata *pdata = spi->dev.platform_data;
+	struct lms283gf05_pdata *pdata = dev_get_platdata(&spi->dev);
 
 	if (power <= FB_BLANK_NORMAL) {
 		if (pdata)
@@ -153,7 +153,7 @@
 static int lms283gf05_probe(struct spi_device *spi)
 {
 	struct lms283gf05_state *st;
-	struct lms283gf05_pdata *pdata = spi->dev.platform_data;
+	struct lms283gf05_pdata *pdata = dev_get_platdata(&spi->dev);
 	struct lcd_device *ld;
 	int ret = 0;
 
@@ -173,7 +173,8 @@
 		return -ENOMEM;
 	}
 
-	ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
+	ld = devm_lcd_device_register(&spi->dev, "lms283gf05", &spi->dev, st,
+					&lms_ops);
 	if (IS_ERR(ld))
 		return PTR_ERR(ld);
 
@@ -190,22 +191,12 @@
 	return 0;
 }
 
-static int lms283gf05_remove(struct spi_device *spi)
-{
-	struct lms283gf05_state *st = spi_get_drvdata(spi);
-
-	lcd_device_unregister(st->ld);
-
-	return 0;
-}
-
 static struct spi_driver lms283gf05_driver = {
 	.driver = {
 		.name	= "lms283gf05",
 		.owner	= THIS_MODULE,
 	},
 	.probe		= lms283gf05_probe,
-	.remove		= lms283gf05_remove,
 };
 
 module_spi_driver(lms283gf05_driver);
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
index cf01b9a..77258b7 100644
--- a/drivers/video/backlight/lms501kf03.c
+++ b/drivers/video/backlight/lms501kf03.c
@@ -344,14 +344,14 @@
 	lcd->spi = spi;
 	lcd->dev = &spi->dev;
 
-	lcd->lcd_pd = spi->dev.platform_data;
+	lcd->lcd_pd = dev_get_platdata(&spi->dev);
 	if (!lcd->lcd_pd) {
 		dev_err(&spi->dev, "platform data is NULL\n");
 		return -EINVAL;
 	}
 
-	ld = lcd_device_register("lms501kf03", &spi->dev, lcd,
-				&lms501kf03_lcd_ops);
+	ld = devm_lcd_device_register(&spi->dev, "lms501kf03", &spi->dev, lcd,
+					&lms501kf03_lcd_ops);
 	if (IS_ERR(ld))
 		return PTR_ERR(ld);
 
@@ -382,8 +382,6 @@
 	struct lms501kf03 *lcd = spi_get_drvdata(spi);
 
 	lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
-	lcd_device_unregister(lcd->ld);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index c0b41f13..cae80d5 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -26,13 +26,15 @@
 #define LP8556_EPROM_START		0xA0
 #define LP8556_EPROM_END		0xAF
 
-/* LP8557 Registers */
+/* LP8555/7 Registers */
 #define LP8557_BL_CMD			0x00
 #define LP8557_BL_MASK			0x01
 #define LP8557_BL_ON			0x01
 #define LP8557_BL_OFF			0x00
 #define LP8557_BRIGHTNESS_CTRL		0x04
 #define LP8557_CONFIG			0x10
+#define LP8555_EPROM_START		0x10
+#define LP8555_EPROM_END		0x7A
 #define LP8557_EPROM_START		0x10
 #define LP8557_EPROM_END		0x1E
 
@@ -111,6 +113,10 @@
 		start = LP8556_EPROM_START;
 		end = LP8556_EPROM_END;
 		break;
+	case LP8555:
+		start = LP8555_EPROM_START;
+		end = LP8555_EPROM_END;
+		break;
 	case LP8557:
 		start = LP8557_EPROM_START;
 		end = LP8557_EPROM_END;
@@ -165,9 +171,14 @@
 	struct lp855x_platform_data *pd = lp->pdata;
 
 	switch (lp->chip_id) {
-	case LP8550 ... LP8556:
+	case LP8550:
+	case LP8551:
+	case LP8552:
+	case LP8553:
+	case LP8556:
 		lp->cfg = &lp855x_dev_cfg;
 		break;
+	case LP8555:
 	case LP8557:
 		lp->cfg = &lp8557_dev_cfg;
 		break;
@@ -289,7 +300,7 @@
 
 	props.brightness = pdata->initial_brightness;
 
-	bl = backlight_device_register(name, lp->dev, lp,
+	bl = devm_backlight_device_register(lp->dev, name, lp->dev, lp,
 				       &lp855x_bl_ops, &props);
 	if (IS_ERR(bl))
 		return PTR_ERR(bl);
@@ -299,12 +310,6 @@
 	return 0;
 }
 
-static void lp855x_backlight_unregister(struct lp855x *lp)
-{
-	if (lp->bl)
-		backlight_device_unregister(lp->bl);
-}
-
 static ssize_t lp855x_get_chip_id(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
@@ -394,7 +399,7 @@
 static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
 {
 	struct lp855x *lp;
-	struct lp855x_platform_data *pdata = cl->dev.platform_data;
+	struct lp855x_platform_data *pdata = dev_get_platdata(&cl->dev);
 	struct device_node *node = cl->dev.of_node;
 	int ret;
 
@@ -403,7 +408,7 @@
 		if (ret < 0)
 			return ret;
 
-		pdata = cl->dev.platform_data;
+		pdata = dev_get_platdata(&cl->dev);
 	}
 
 	if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
@@ -428,29 +433,24 @@
 	ret = lp855x_configure(lp);
 	if (ret) {
 		dev_err(lp->dev, "device config err: %d", ret);
-		goto err_dev;
+		return ret;
 	}
 
 	ret = lp855x_backlight_register(lp);
 	if (ret) {
 		dev_err(lp->dev,
 			"failed to register backlight. err: %d\n", ret);
-		goto err_dev;
+		return ret;
 	}
 
 	ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group);
 	if (ret) {
 		dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret);
-		goto err_sysfs;
+		return ret;
 	}
 
 	backlight_update_status(lp->bl);
 	return 0;
-
-err_sysfs:
-	lp855x_backlight_unregister(lp);
-err_dev:
-	return ret;
 }
 
 static int lp855x_remove(struct i2c_client *cl)
@@ -460,7 +460,6 @@
 	lp->bl->props.brightness = 0;
 	backlight_update_status(lp->bl);
 	sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
-	lp855x_backlight_unregister(lp);
 
 	return 0;
 }
@@ -470,6 +469,7 @@
 	{ .compatible = "ti,lp8551", },
 	{ .compatible = "ti,lp8552", },
 	{ .compatible = "ti,lp8553", },
+	{ .compatible = "ti,lp8555", },
 	{ .compatible = "ti,lp8556", },
 	{ .compatible = "ti,lp8557", },
 	{ }
@@ -481,6 +481,7 @@
 	{"lp8551", LP8551},
 	{"lp8552", LP8552},
 	{"lp8553", LP8553},
+	{"lp8555", LP8555},
 	{"lp8556", LP8556},
 	{"lp8557", LP8557},
 	{ }
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index 980855e..e49905d 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -52,7 +52,7 @@
 	struct pwm_device *pwm;
 };
 
-struct lp8788_bl_config default_bl_config = {
+static struct lp8788_bl_config default_bl_config = {
 	.bl_mode    = LP8788_BL_REGISTER_ONLY,
 	.dim_mode   = LP8788_DIM_EXPONENTIAL,
 	.full_scale = LP8788_FULLSCALE_1900uA,
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index ed1b392..383f550 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -242,7 +242,8 @@
 	if (!lcd->buffer)
 		return -ENOMEM;
 
-	ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
+	ld = devm_lcd_device_register(&spi->dev, "ltv350qv", &spi->dev, lcd,
+					&ltv_ops);
 	if (IS_ERR(ld))
 		return PTR_ERR(ld);
 
@@ -250,15 +251,11 @@
 
 	ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
 	if (ret)
-		goto out_unregister;
+		return ret;
 
 	spi_set_drvdata(spi, lcd);
 
 	return 0;
-
-out_unregister:
-	lcd_device_unregister(ld);
-	return ret;
 }
 
 static int ltv350qv_remove(struct spi_device *spi)
@@ -266,8 +263,6 @@
 	struct ltv350qv *lcd = spi_get_drvdata(spi);
 
 	ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
-	lcd_device_unregister(lcd->ld);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index 498fd73..1802b2d 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -93,7 +93,7 @@
 static int lv5207lp_probe(struct i2c_client *client,
 			  const struct i2c_device_id *id)
 {
-	struct lv5207lp_platform_data *pdata = client->dev.platform_data;
+	struct lv5207lp_platform_data *pdata = dev_get_platdata(&client->dev);
 	struct backlight_device *backlight;
 	struct backlight_properties props;
 	struct lv5207lp *lv;
@@ -124,9 +124,9 @@
 	props.brightness = clamp_t(unsigned int, pdata->def_value, 0,
 				   props.max_brightness);
 
-	backlight = backlight_device_register(dev_name(&client->dev),
-					      &lv->client->dev, lv,
-					      &lv5207lp_backlight_ops, &props);
+	backlight = devm_backlight_device_register(&client->dev,
+				dev_name(&client->dev), &lv->client->dev,
+				lv, &lv5207lp_backlight_ops, &props);
 	if (IS_ERR(backlight)) {
 		dev_err(&client->dev, "failed to register backlight\n");
 		return PTR_ERR(backlight);
@@ -144,7 +144,6 @@
 
 	backlight->props.brightness = 0;
 	backlight_update_status(backlight);
-	backlight_device_unregister(backlight);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 886e797..66fa08c 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -163,7 +163,8 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = MAX_BRIGHTNESS;
-	bl = backlight_device_register("max8925-backlight", &pdev->dev, data,
+	bl = devm_backlight_device_register(&pdev->dev, "max8925-backlight",
+					&pdev->dev, data,
 					&max8925_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
@@ -188,20 +189,9 @@
 	}
 	ret = max8925_set_bits(chip->i2c, data->reg_mode_cntl, 0xfe, value);
 	if (ret < 0)
-		goto out_brt;
+		return ret;
 	backlight_update_status(bl);
 	return 0;
-out_brt:
-	backlight_device_unregister(bl);
-	return ret;
-}
-
-static int max8925_backlight_remove(struct platform_device *pdev)
-{
-	struct backlight_device *bl = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(bl);
-	return 0;
 }
 
 static struct platform_driver max8925_backlight_driver = {
@@ -210,7 +200,6 @@
 		.owner	= THIS_MODULE,
 	},
 	.probe		= max8925_backlight_probe,
-	.remove		= max8925_backlight_remove,
 };
 
 module_platform_driver(max8925_backlight_driver);
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 812e22e..ac11a46 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -133,7 +133,7 @@
 	struct backlight_properties props;
 	struct backlight_device *dev;
 	struct omap_backlight *bl;
-	struct omap_backlight_config *pdata = pdev->dev.platform_data;
+	struct omap_backlight_config *pdata = dev_get_platdata(&pdev->dev);
 
 	if (!pdata)
 		return -ENXIO;
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index 633b0a2..2098c5d 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -120,8 +120,8 @@
 	memset(&props, 0, sizeof(props));
 	props.max_brightness = MAX_USER_VALUE;
 	props.type = BACKLIGHT_RAW;
-	bl = backlight_device_register(pdev->name, &pdev->dev,
-			NULL, &pandora_backlight_ops, &props);
+	bl = devm_backlight_device_register(&pdev->dev, pdev->name, &pdev->dev,
+					NULL, &pandora_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		return PTR_ERR(bl);
@@ -145,20 +145,12 @@
 	return 0;
 }
 
-static int pandora_backlight_remove(struct platform_device *pdev)
-{
-	struct backlight_device *bl = platform_get_drvdata(pdev);
-	backlight_device_unregister(bl);
-	return 0;
-}
-
 static struct platform_driver pandora_backlight_driver = {
 	.driver		= {
 		.name	= "pandora-backlight",
 		.owner	= THIS_MODULE,
 	},
 	.probe		= pandora_backlight_probe,
-	.remove		= pandora_backlight_remove,
 };
 
 module_platform_driver(pandora_backlight_driver);
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index 6ed76be..b95d3b0 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -103,7 +103,7 @@
 {
 	struct pcf50633_bl *pcf_bl;
 	struct device *parent = pdev->dev.parent;
-	struct pcf50633_platform_data *pcf50633_data = parent->platform_data;
+	struct pcf50633_platform_data *pcf50633_data = dev_get_platdata(parent);
 	struct pcf50633_bl_platform_data *pdata = pcf50633_data->backlight_data;
 	struct backlight_properties bl_props;
 
@@ -126,7 +126,8 @@
 
 	pcf_bl->pcf = dev_to_pcf50633(pdev->dev.parent);
 
-	pcf_bl->bl = backlight_device_register(pdev->name, &pdev->dev, pcf_bl,
+	pcf_bl->bl = devm_backlight_device_register(&pdev->dev, pdev->name,
+						&pdev->dev, pcf_bl,
 						&pcf50633_bl_ops, &bl_props);
 
 	if (IS_ERR(pcf_bl->bl))
@@ -147,18 +148,8 @@
 	return 0;
 }
 
-static int pcf50633_bl_remove(struct platform_device *pdev)
-{
-	struct pcf50633_bl *pcf_bl = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(pcf_bl->bl);
-
-	return 0;
-}
-
 static struct platform_driver pcf50633_bl_driver = {
 	.probe =	pcf50633_bl_probe,
-	.remove =	pcf50633_bl_remove,
 	.driver = {
 		.name = "pcf50633-backlight",
 	},
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 0568367..d01884d 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -80,7 +80,7 @@
 	struct device *dev = &pdev->dev;
 	int err;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (!pdata) {
 		dev_err(dev, "no platform data supplied\n");
 		return -EINVAL;
@@ -101,30 +101,17 @@
 
 	plcd->us = dev;
 	plcd->pdata = pdata;
-	plcd->lcd = lcd_device_register(dev_name(dev), dev,
-					plcd, &platform_lcd_ops);
+	plcd->lcd = devm_lcd_device_register(&pdev->dev, dev_name(dev), dev,
+						plcd, &platform_lcd_ops);
 	if (IS_ERR(plcd->lcd)) {
 		dev_err(dev, "cannot register lcd device\n");
-		err = PTR_ERR(plcd->lcd);
-		goto err;
+		return PTR_ERR(plcd->lcd);
 	}
 
 	platform_set_drvdata(pdev, plcd);
 	platform_lcd_set_power(plcd->lcd, FB_BLANK_NORMAL);
 
 	return 0;
-
- err:
-	return err;
-}
-
-static int platform_lcd_remove(struct platform_device *pdev)
-{
-	struct platform_lcd *plcd = platform_get_drvdata(pdev);
-
-	lcd_device_unregister(plcd->lcd);
-
-	return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -168,7 +155,6 @@
 		.of_match_table = of_match_ptr(platform_lcd_of_match),
 	},
 	.probe		= platform_lcd_probe,
-	.remove		= platform_lcd_remove,
 };
 
 module_platform_driver(platform_lcd_driver);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 1fea627..fb80d68 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -19,6 +21,7 @@
 #include <linux/err.h>
 #include <linux/pwm.h>
 #include <linux/pwm_backlight.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 
 struct pwm_bl_data {
@@ -27,6 +30,11 @@
 	unsigned int		period;
 	unsigned int		lth_brightness;
 	unsigned int		*levels;
+	bool			enabled;
+	struct regulator	*power_supply;
+	int			enable_gpio;
+	unsigned long		enable_gpio_flags;
+	unsigned int		scale;
 	int			(*notify)(struct device *,
 					  int brightness);
 	void			(*notify_after)(struct device *,
@@ -35,11 +43,65 @@
 	void			(*exit)(struct device *);
 };
 
+static void pwm_backlight_power_on(struct pwm_bl_data *pb, int brightness)
+{
+	int err;
+
+	if (pb->enabled)
+		return;
+
+	err = regulator_enable(pb->power_supply);
+	if (err < 0)
+		dev_err(pb->dev, "failed to enable power supply\n");
+
+	if (gpio_is_valid(pb->enable_gpio)) {
+		if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
+			gpio_set_value(pb->enable_gpio, 0);
+		else
+			gpio_set_value(pb->enable_gpio, 1);
+	}
+
+	pwm_enable(pb->pwm);
+	pb->enabled = true;
+}
+
+static void pwm_backlight_power_off(struct pwm_bl_data *pb)
+{
+	if (!pb->enabled)
+		return;
+
+	pwm_config(pb->pwm, 0, pb->period);
+	pwm_disable(pb->pwm);
+
+	if (gpio_is_valid(pb->enable_gpio)) {
+		if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
+			gpio_set_value(pb->enable_gpio, 1);
+		else
+			gpio_set_value(pb->enable_gpio, 0);
+	}
+
+	regulator_disable(pb->power_supply);
+	pb->enabled = false;
+}
+
+static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
+{
+	unsigned int lth = pb->lth_brightness;
+	int duty_cycle;
+
+	if (pb->levels)
+		duty_cycle = pb->levels[brightness];
+	else
+		duty_cycle = brightness;
+
+	return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
+}
+
 static int pwm_backlight_update_status(struct backlight_device *bl)
 {
 	struct pwm_bl_data *pb = bl_get_data(bl);
 	int brightness = bl->props.brightness;
-	int max = bl->props.max_brightness;
+	int duty_cycle;
 
 	if (bl->props.power != FB_BLANK_UNBLANK ||
 	    bl->props.fb_blank != FB_BLANK_UNBLANK ||
@@ -49,24 +111,12 @@
 	if (pb->notify)
 		brightness = pb->notify(pb->dev, brightness);
 
-	if (brightness == 0) {
-		pwm_config(pb->pwm, 0, pb->period);
-		pwm_disable(pb->pwm);
-	} else {
-		int duty_cycle;
-
-		if (pb->levels) {
-			duty_cycle = pb->levels[brightness];
-			max = pb->levels[max];
-		} else {
-			duty_cycle = brightness;
-		}
-
-		duty_cycle = pb->lth_brightness +
-		     (duty_cycle * (pb->period - pb->lth_brightness) / max);
+	if (brightness > 0) {
+		duty_cycle = compute_duty_cycle(pb, brightness);
 		pwm_config(pb->pwm, duty_cycle, pb->period);
-		pwm_enable(pb->pwm);
-	}
+		pwm_backlight_power_on(pb, brightness);
+	} else
+		pwm_backlight_power_off(pb);
 
 	if (pb->notify_after)
 		pb->notify_after(pb->dev, brightness);
@@ -98,6 +148,7 @@
 				  struct platform_pwm_backlight_data *data)
 {
 	struct device_node *node = dev->of_node;
+	enum of_gpio_flags flags;
 	struct property *prop;
 	int length;
 	u32 value;
@@ -138,11 +189,13 @@
 		data->max_brightness--;
 	}
 
-	/*
-	 * TODO: Most users of this driver use a number of GPIOs to control
-	 *       backlight power. Support for specifying these needs to be
-	 *       added.
-	 */
+	data->enable_gpio = of_get_named_gpio_flags(node, "enable-gpios", 0,
+						    &flags);
+	if (data->enable_gpio == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+
+	if (gpio_is_valid(data->enable_gpio) && (flags & OF_GPIO_ACTIVE_LOW))
+		data->enable_gpio_flags |= PWM_BACKLIGHT_GPIO_ACTIVE_LOW;
 
 	return 0;
 }
@@ -163,12 +216,11 @@
 
 static int pwm_backlight_probe(struct platform_device *pdev)
 {
-	struct platform_pwm_backlight_data *data = pdev->dev.platform_data;
+	struct platform_pwm_backlight_data *data = dev_get_platdata(&pdev->dev);
 	struct platform_pwm_backlight_data defdata;
 	struct backlight_properties props;
 	struct backlight_device *bl;
 	struct pwm_bl_data *pb;
-	unsigned int max;
 	int ret;
 
 	if (!data) {
@@ -195,16 +247,46 @@
 	}
 
 	if (data->levels) {
-		max = data->levels[data->max_brightness];
+		unsigned int i;
+
+		for (i = 0; i <= data->max_brightness; i++)
+			if (data->levels[i] > pb->scale)
+				pb->scale = data->levels[i];
+
 		pb->levels = data->levels;
 	} else
-		max = data->max_brightness;
+		pb->scale = data->max_brightness;
 
+	pb->enable_gpio = data->enable_gpio;
+	pb->enable_gpio_flags = data->enable_gpio_flags;
 	pb->notify = data->notify;
 	pb->notify_after = data->notify_after;
 	pb->check_fb = data->check_fb;
 	pb->exit = data->exit;
 	pb->dev = &pdev->dev;
+	pb->enabled = false;
+
+	if (gpio_is_valid(pb->enable_gpio)) {
+		unsigned long flags;
+
+		if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
+			flags = GPIOF_OUT_INIT_HIGH;
+		else
+			flags = GPIOF_OUT_INIT_LOW;
+
+		ret = gpio_request_one(pb->enable_gpio, flags, "enable");
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed to request GPIO#%d: %d\n",
+				pb->enable_gpio, ret);
+			goto err_alloc;
+		}
+	}
+
+	pb->power_supply = devm_regulator_get(&pdev->dev, "power");
+	if (IS_ERR(pb->power_supply)) {
+		ret = PTR_ERR(pb->power_supply);
+		goto err_gpio;
+	}
 
 	pb->pwm = devm_pwm_get(&pdev->dev, NULL);
 	if (IS_ERR(pb->pwm)) {
@@ -214,7 +296,7 @@
 		if (IS_ERR(pb->pwm)) {
 			dev_err(&pdev->dev, "unable to request legacy PWM\n");
 			ret = PTR_ERR(pb->pwm);
-			goto err_alloc;
+			goto err_gpio;
 		}
 	}
 
@@ -229,7 +311,7 @@
 		pwm_set_period(pb->pwm, data->pwm_period_ns);
 
 	pb->period = pwm_get_period(pb->pwm);
-	pb->lth_brightness = data->lth_brightness * (pb->period / max);
+	pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale);
 
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
@@ -239,7 +321,7 @@
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		ret = PTR_ERR(bl);
-		goto err_alloc;
+		goto err_gpio;
 	}
 
 	if (data->dft_brightness > data->max_brightness) {
@@ -255,6 +337,9 @@
 	platform_set_drvdata(pdev, bl);
 	return 0;
 
+err_gpio:
+	if (gpio_is_valid(pb->enable_gpio))
+		gpio_free(pb->enable_gpio);
 err_alloc:
 	if (data->exit)
 		data->exit(&pdev->dev);
@@ -267,10 +352,11 @@
 	struct pwm_bl_data *pb = bl_get_data(bl);
 
 	backlight_device_unregister(bl);
-	pwm_config(pb->pwm, 0, pb->period);
-	pwm_disable(pb->pwm);
+	pwm_backlight_power_off(pb);
+
 	if (pb->exit)
 		pb->exit(&pdev->dev);
+
 	return 0;
 }
 
@@ -282,10 +368,12 @@
 
 	if (pb->notify)
 		pb->notify(pb->dev, 0);
-	pwm_config(pb->pwm, 0, pb->period);
-	pwm_disable(pb->pwm);
+
+	pwm_backlight_power_off(pb);
+
 	if (pb->notify_after)
 		pb->notify_after(pb->dev, 0);
+
 	return 0;
 }
 
@@ -294,12 +382,19 @@
 	struct backlight_device *bl = dev_get_drvdata(dev);
 
 	backlight_update_status(bl);
+
 	return 0;
 }
 #endif
 
-static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend,
-			 pwm_backlight_resume);
+static const struct dev_pm_ops pwm_backlight_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+	.suspend = pwm_backlight_suspend,
+	.resume = pwm_backlight_resume,
+	.poweroff = pwm_backlight_suspend,
+	.restore = pwm_backlight_resume,
+#endif
+};
 
 static struct platform_driver pwm_backlight_driver = {
 	.driver		= {
@@ -317,4 +412,3 @@
 MODULE_DESCRIPTION("PWM based Backlight Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:pwm-backlight");
-
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index b37bb18..510a1bc 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -735,13 +735,14 @@
 	lcd->spi = spi;
 	lcd->dev = &spi->dev;
 
-	lcd->lcd_pd = spi->dev.platform_data;
+	lcd->lcd_pd = dev_get_platdata(&spi->dev);
 	if (!lcd->lcd_pd) {
 		dev_err(&spi->dev, "platform data is NULL.\n");
 		return -EINVAL;
 	}
 
-	ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
+	ld = devm_lcd_device_register(&spi->dev, "s6e63m0", &spi->dev, lcd,
+				&s6e63m0_lcd_ops);
 	if (IS_ERR(ld))
 		return PTR_ERR(ld);
 
@@ -751,12 +752,11 @@
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = MAX_BRIGHTNESS;
 
-	bd = backlight_device_register("s6e63m0bl-bl", &spi->dev, lcd,
-		&s6e63m0_backlight_ops, &props);
-	if (IS_ERR(bd)) {
-		ret =  PTR_ERR(bd);
-		goto out_lcd_unregister;
-	}
+	bd = devm_backlight_device_register(&spi->dev, "s6e63m0bl-bl",
+					&spi->dev, lcd, &s6e63m0_backlight_ops,
+					&props);
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
 
 	bd->props.brightness = MAX_BRIGHTNESS;
 	lcd->bd = bd;
@@ -798,10 +798,6 @@
 	dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
 
 	return 0;
-
-out_lcd_unregister:
-	lcd_device_unregister(ld);
-	return ret;
 }
 
 static int s6e63m0_remove(struct spi_device *spi)
@@ -811,8 +807,6 @@
 	s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
 	device_remove_file(&spi->dev, &dev_attr_gamma_table);
 	device_remove_file(&spi->dev, &dev_attr_gamma_mode);
-	backlight_device_unregister(lcd->bd);
-	lcd_device_unregister(lcd->ld);
 
 	return 0;
 }
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 18cdf46..908016f 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -338,7 +338,7 @@
 	enum tdo24m_model model;
 	int err;
 
-	pdata = spi->dev.platform_data;
+	pdata = dev_get_platdata(&spi->dev);
 	if (pdata)
 		model = pdata->model;
 	else
@@ -385,21 +385,17 @@
 		return -EINVAL;
 	}
 
-	lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
-					lcd, &tdo24m_ops);
+	lcd->lcd_dev = devm_lcd_device_register(&spi->dev, "tdo24m", &spi->dev,
+						lcd, &tdo24m_ops);
 	if (IS_ERR(lcd->lcd_dev))
 		return PTR_ERR(lcd->lcd_dev);
 
 	spi_set_drvdata(spi, lcd);
 	err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
 	if (err)
-		goto out_unregister;
+		return err;
 
 	return 0;
-
-out_unregister:
-	lcd_device_unregister(lcd->lcd_dev);
-	return err;
 }
 
 static int tdo24m_remove(struct spi_device *spi)
@@ -407,8 +403,6 @@
 	struct tdo24m *lcd = spi_get_drvdata(spi);
 
 	tdo24m_power(lcd, FB_BLANK_POWERDOWN);
-	lcd_device_unregister(lcd->lcd_dev);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 9df66ac..b8db933 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -38,7 +38,7 @@
 
 static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
 {
-	struct spi_device *spi = data->i2c->dev.platform_data;
+	struct spi_device *spi = dev_get_platdata(&data->i2c->dev);
 
 	i2c_smbus_write_byte_data(data->i2c, DAC_CH1, data->comadj);
 
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index bf08157..be5d636 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -198,7 +198,7 @@
 	ret = devm_gpio_request_one(&spi->dev, TOSA_GPIO_TG_ON,
 				GPIOF_OUT_INIT_LOW, "tg #pwr");
 	if (ret < 0)
-		goto err_gpio_tg;
+		return ret;
 
 	mdelay(60);
 
@@ -219,8 +219,6 @@
 
 err_register:
 	tosa_lcd_tg_off(data);
-err_gpio_tg:
-	spi_set_drvdata(spi, NULL);
 	return ret;
 }
 
@@ -235,8 +233,6 @@
 
 	tosa_lcd_tg_off(data);
 
-	spi_set_drvdata(spi, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 0578231..cbba37e 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -287,12 +287,11 @@
 		if (IS_ERR(pdata))
 			return PTR_ERR(pdata);
 	} else {
-		if (!pdev->dev.platform_data) {
+		pdata = dev_get_platdata(&pdev->dev);
+		if (!pdata) {
 			dev_err(&pdev->dev, "no platform data provided\n");
 			return -EINVAL;
 		}
-
-		pdata = pdev->dev.platform_data;
 	}
 
 	tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl),
@@ -314,7 +313,7 @@
 	bl_props.type = BACKLIGHT_RAW;
 	bl_props.max_brightness = 100;
 
-	tps65217_bl->bl = backlight_device_register(pdev->name,
+	tps65217_bl->bl = devm_backlight_device_register(&pdev->dev, pdev->name,
 						tps65217_bl->dev, tps65217_bl,
 						&tps65217_bl_ops, &bl_props);
 	if (IS_ERR(tps65217_bl->bl)) {
@@ -330,18 +329,8 @@
 	return 0;
 }
 
-static int tps65217_bl_remove(struct platform_device *pdev)
-{
-	struct tps65217_bl *tps65217_bl = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(tps65217_bl->bl);
-
-	return 0;
-}
-
 static struct platform_driver tps65217_bl_driver = {
 	.probe		= tps65217_bl_probe,
-	.remove		= tps65217_bl_remove,
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= "tps65217-bl",
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 9e5517a..8b9455e 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -123,7 +123,7 @@
 static int wm831x_backlight_probe(struct platform_device *pdev)
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-	struct wm831x_pdata *wm831x_pdata;
+	struct wm831x_pdata *wm831x_pdata = dev_get_platdata(pdev->dev.parent);
 	struct wm831x_backlight_pdata *pdata;
 	struct wm831x_backlight_data *data;
 	struct backlight_device *bl;
@@ -131,12 +131,10 @@
 	int ret, i, max_isel, isink_reg, dcdc_cfg;
 
 	/* We need platform data */
-	if (pdev->dev.parent->platform_data) {
-		wm831x_pdata = pdev->dev.parent->platform_data;
+	if (wm831x_pdata)
 		pdata = wm831x_pdata->backlight;
-	} else {
+	else
 		pdata = NULL;
-	}
 
 	if (!pdata) {
 		dev_err(&pdev->dev, "No platform data supplied\n");
@@ -197,8 +195,8 @@
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = max_isel;
-	bl = backlight_device_register("wm831x", &pdev->dev, data,
-				       &wm831x_backlight_ops, &props);
+	bl = devm_backlight_device_register(&pdev->dev, "wm831x", &pdev->dev,
+					data, &wm831x_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		return PTR_ERR(bl);
@@ -216,21 +214,12 @@
 	return 0;
 }
 
-static int wm831x_backlight_remove(struct platform_device *pdev)
-{
-	struct backlight_device *bl = platform_get_drvdata(pdev);
-
-	backlight_device_unregister(bl);
-	return 0;
-}
-
 static struct platform_driver wm831x_backlight_driver = {
 	.driver		= {
 		.name	= "wm831x-backlight",
 		.owner	= THIS_MODULE,
 	},
 	.probe		= wm831x_backlight_probe,
-	.remove		= wm831x_backlight_remove,
 };
 
 module_platform_driver(wm831x_backlight_driver);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 87f288b..42b8f9d 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -761,19 +761,7 @@
 		   .owner = THIS_MODULE,
 		   },
 };
-
-static int __init bfin_bf54x_driver_init(void)
-{
-	return platform_driver_register(&bfin_bf54x_driver);
-}
-
-static void __exit bfin_bf54x_driver_cleanup(void)
-{
-	platform_driver_unregister(&bfin_bf54x_driver);
-}
+module_platform_driver(bfin_bf54x_driver);
 
 MODULE_DESCRIPTION("Blackfin BF54x TFT LCD Driver");
 MODULE_LICENSE("GPL");
-
-module_init(bfin_bf54x_driver_init);
-module_exit(bfin_bf54x_driver_cleanup);
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 48c0c4e..b5cf130 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -664,19 +664,7 @@
 		   .owner = THIS_MODULE,
 		   },
 };
-
-static int __init bfin_t350mcqb_driver_init(void)
-{
-	return platform_driver_register(&bfin_t350mcqb_driver);
-}
-
-static void __exit bfin_t350mcqb_driver_cleanup(void)
-{
-	platform_driver_unregister(&bfin_t350mcqb_driver);
-}
+module_platform_driver(bfin_t350mcqb_driver);
 
 MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
 MODULE_LICENSE("GPL");
-
-module_init(bfin_t350mcqb_driver_init);
-module_exit(bfin_t350mcqb_driver_cleanup);
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index b09701c..8556264 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1167,9 +1167,8 @@
 	if (retval < 0)
 		goto err_unreg_fb;
 
-	printk(KERN_INFO
-	       "fb%d: Broadsheet frame buffer, using %dK of video memory\n",
-	       info->node, videomemorysize >> 10);
+	fb_info(info, "Broadsheet frame buffer, using %dK of video memory\n",
+		videomemorysize >> 10);
 
 
 	return 0;
@@ -1217,19 +1216,7 @@
 		.name	= "broadsheetfb",
 	},
 };
-
-static int __init broadsheetfb_init(void)
-{
-	return platform_driver_register(&broadsheetfb_driver);
-}
-
-static void __exit broadsheetfb_exit(void)
-{
-	platform_driver_unregister(&broadsheetfb_driver);
-}
-
-module_init(broadsheetfb_init);
-module_exit(broadsheetfb_exit);
+module_platform_driver(broadsheetfb_driver);
 
 MODULE_DESCRIPTION("fbdev driver for Broadsheet controller");
 MODULE_AUTHOR("Jaya Kumar");
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 60017fc..bc123d6 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -363,8 +363,6 @@
 
 	framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
index 153dd65..65f7c15 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/carminefb.c
@@ -585,8 +585,7 @@
 	if (ret < 0)
 		goto err_dealloc_cmap;
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-			info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	*rinfo = info;
 	return 0;
@@ -746,7 +745,6 @@
 	iounmap(hw->v_regs);
 	release_mem_region(fix.mmio_start, fix.mmio_len);
 
-	pci_set_drvdata(dev, NULL);
 	pci_disable_device(dev);
 	kfree(hw);
 }
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index baed57d..a2bb276 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -181,7 +181,7 @@
 			}
 			shift += bpp;
 			shift &= (32 - 1);
-			if (!l) { l = 8; s++; };
+			if (!l) { l = 8; s++; }
 		}
 
 		/* write trailing bits */
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index ed3b8891..c79745b 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -330,7 +330,7 @@
 		default:
 			ret = -ENOSYS;
 			break;
-		};
+		}
 		if (!ret) {
 			sbus_writeb(cur_mode, &regs->mcr);
 			par->mode = mode;
@@ -343,7 +343,7 @@
 					  FBTYPE_MDICOLOR, 8,
 					  info->fix.smem_len);
 		break;
-	};
+	}
 
 	return ret;
 }
@@ -583,8 +583,6 @@
 
 	framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 9f63507..64a89d5 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -446,8 +446,6 @@
 
 	framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 3545dec..70781fe 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -624,7 +624,7 @@
 	default:
 		cg6_cpu_name = "i386";
 		break;
-	};
+	}
 	if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) {
 		if (info->fix.smem_len <= 0x100000)
 			cg6_card_name = "TGX";
@@ -839,8 +839,6 @@
 
 	framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 97db3ba..5aab9b9 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -595,11 +595,6 @@
 		return -EINVAL;
 	}
 
-	if (var->xoffset < 0)
-		var->xoffset = 0;
-	if (var->yoffset < 0)
-		var->yoffset = 0;
-
 	/* truncate xoffset and yoffset to maximum if too high */
 	if (var->xoffset > var->xres_virtual - var->xres)
 		var->xoffset = var->xres_virtual - var->xres - 1;
@@ -2159,7 +2154,6 @@
 	if (!ret)
 		return 0;
 
-	pci_set_drvdata(pdev, NULL);
 	iounmap(info->screen_base);
 err_release_legacy:
 	if (release_io_ports)
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index a903149..d5533f4 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -368,8 +368,7 @@
 
 	lcd_clear(info);
 
-	printk(KERN_INFO "fb%d: Cobalt server LCD frame buffer device\n",
-		info->node);
+	fb_info(info, "Cobalt server LCD frame buffer device\n");
 
 	return 0;
 }
@@ -395,19 +394,7 @@
 		.owner	= THIS_MODULE,
 	},
 };
-
-static int __init cobalt_lcdfb_init(void)
-{
-	return platform_driver_register(&cobalt_lcdfb_driver);
-}
-
-static void __exit cobalt_lcdfb_exit(void)
-{
-	platform_driver_unregister(&cobalt_lcdfb_driver);
-}
-
-module_init(cobalt_lcdfb_init);
-module_exit(cobalt_lcdfb_exit);
+module_platform_driver(cobalt_lcdfb_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Yoichi Yuasa");
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 67b77b4..fdadef9 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -471,8 +471,8 @@
 	/* Register with fbdev layer */
 	if (register_framebuffer(&p->info) < 0)
 		return -ENXIO;
-	
-	printk(KERN_INFO "fb%d: control display adapter\n", p->info.node);	
+
+	fb_info(&p->info, "control display adapter\n");
 
 	return 0;
 }
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 1c446bc..b0a950f 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1810,11 +1810,6 @@
 		iounmap(cfb->region);
 		cyberpro_free_fb_info(cfb);
 
-		/*
-		 * Ensure that the driver data is no longer
-		 * valid.
-		 */
-		pci_set_drvdata(dev, NULL);
 		if (cfb == int_cfb_info)
 			int_cfb_info = NULL;
 
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index e030e17..a1d74dd 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -129,7 +129,6 @@
 
 #define LCD_NUM_BUFFERS	2
 
-#define WSI_TIMEOUT	50
 #define PALETTE_SIZE	256
 
 #define	CLK_MIN_DIV	2
@@ -1314,7 +1313,7 @@
 
 static struct fb_videomode *da8xx_fb_get_videomode(struct platform_device *dev)
 {
-	struct da8xx_lcdc_platform_data *fb_pdata = dev->dev.platform_data;
+	struct da8xx_lcdc_platform_data *fb_pdata = dev_get_platdata(&dev->dev);
 	struct fb_videomode *lcdc_info;
 	int i;
 
@@ -1336,7 +1335,7 @@
 static int fb_probe(struct platform_device *device)
 {
 	struct da8xx_lcdc_platform_data *fb_pdata =
-						device->dev.platform_data;
+						dev_get_platdata(&device->dev);
 	static struct resource *lcdc_regs;
 	struct lcd_ctrl_config *lcd_cfg;
 	struct fb_videomode *lcdc_info;
@@ -1548,7 +1547,7 @@
 }
 
 #ifdef CONFIG_PM
-struct lcdc_context {
+static struct lcdc_context {
 	u32 clk_enable;
 	u32 ctrl;
 	u32 dma_ctrl;
@@ -1663,19 +1662,7 @@
 		   .owner = THIS_MODULE,
 		   },
 };
-
-static int __init da8xx_fb_init(void)
-{
-	return platform_driver_register(&da8xx_fb_driver);
-}
-
-static void __exit da8xx_fb_cleanup(void)
-{
-	platform_driver_unregister(&da8xx_fb_driver);
-}
-
-module_init(da8xx_fb_init);
-module_exit(da8xx_fb_cleanup);
+module_platform_driver(da8xx_fb_driver);
 
 MODULE_DESCRIPTION("Framebuffer driver for TI da8xx/omap-l1xx");
 MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 7f9ff75..cd7c0df 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -108,8 +108,8 @@
 			if (!*this_opt) continue;
 
 			for (i = 0; i < M_UNKNOWN; i++) {
-				if (!strcmp(this_opt, efifb_dmi_list[i].optname) &&
-				    efifb_dmi_list[i].base != 0) {
+				if (efifb_dmi_list[i].base != 0 &&
+				    !strcmp(this_opt, efifb_dmi_list[i].optname)) {
 					screen_info.lfb_base = efifb_dmi_list[i].base;
 					screen_info.lfb_linelength = efifb_dmi_list[i].stride;
 					screen_info.lfb_width = efifb_dmi_list[i].width;
@@ -322,8 +322,7 @@
 		printk(KERN_ERR "efifb: cannot register framebuffer\n");
 		goto err_fb_dealoc;
 	}
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-		info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	return 0;
 
 err_fb_dealoc:
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 28a837d..35a0f53 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -487,7 +487,7 @@
 
 static int ep93xxfb_probe(struct platform_device *pdev)
 {
-	struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
+	struct ep93xxfb_mach_info *mach_info = dev_get_platdata(&pdev->dev);
 	struct fb_info *info;
 	struct ep93xx_fbi *fbi;
 	struct resource *res;
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 520fc9b..85edabf 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -220,7 +220,7 @@
 	case MIPI_DSI_DCS_LONG_WRITE:
 	{
 		unsigned int size, payload = 0;
-		INIT_COMPLETION(dsim_wr_comp);
+		reinit_completion(&dsim_wr_comp);
 
 		size = data_size * 4;
 
@@ -356,7 +356,7 @@
 	msleep(20);
 
 	mutex_lock(&dsim->lock);
-	INIT_COMPLETION(dsim_rd_comp);
+	reinit_completion(&dsim_rd_comp);
 	exynos_mipi_dsi_rd_tx_header(dsim,
 		MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, req_size);
 
@@ -376,6 +376,7 @@
 			"data id %x is not supported current DSI spec.\n",
 			data_id);
 
+		mutex_unlock(&dsim->lock);
 		return -EINVAL;
 	}
 
@@ -667,7 +668,7 @@
 	default:
 		dev_info(dsim->dev, "data lane is invalid.\n");
 		return -EINVAL;
-	};
+	}
 
 	exynos_mipi_dsi_sw_reset(dsim);
 	exynos_mipi_dsi_func_reset(dsim);
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
index 27fc956..6db9ebd 100644
--- a/drivers/video/fb-puv3.c
+++ b/drivers/video/fb-puv3.c
@@ -713,9 +713,8 @@
 	platform_set_drvdata(dev, info);
 	platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
 
-	printk(KERN_INFO
-	       "fb%d: Virtual frame buffer device, using %dM of video memory\n",
-	       info->node, UNIFB_MEMSIZE >> 20);
+	fb_info(info, "Virtual frame buffer device, using %dM of video memory\n",
+		UNIFB_MEMSIZE >> 20);
 	return 0;
 err2:
 	fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index dacaf74..010d191 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1108,14 +1108,16 @@
 	case FBIOPUT_VSCREENINFO:
 		if (copy_from_user(&var, argp, sizeof(var)))
 			return -EFAULT;
-		if (!lock_fb_info(info))
-			return -ENODEV;
 		console_lock();
+		if (!lock_fb_info(info)) {
+			console_unlock();
+			return -ENODEV;
+		}
 		info->flags |= FBINFO_MISC_USEREVENT;
 		ret = fb_set_var(info, &var);
 		info->flags &= ~FBINFO_MISC_USEREVENT;
-		console_unlock();
 		unlock_fb_info(info);
+		console_unlock();
 		if (!ret && copy_to_user(argp, &var, sizeof(var)))
 			ret = -EFAULT;
 		break;
@@ -1144,12 +1146,14 @@
 	case FBIOPAN_DISPLAY:
 		if (copy_from_user(&var, argp, sizeof(var)))
 			return -EFAULT;
-		if (!lock_fb_info(info))
-			return -ENODEV;
 		console_lock();
+		if (!lock_fb_info(info)) {
+			console_unlock();
+			return -ENODEV;
+		}
 		ret = fb_pan_display(info, &var);
-		console_unlock();
 		unlock_fb_info(info);
+		console_unlock();
 		if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
 			return -EFAULT;
 		break;
@@ -1184,23 +1188,27 @@
 			break;
 		}
 		event.data = &con2fb;
-		if (!lock_fb_info(info))
-			return -ENODEV;
 		console_lock();
+		if (!lock_fb_info(info)) {
+			console_unlock();
+			return -ENODEV;
+		}
 		event.info = info;
 		ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
-		console_unlock();
 		unlock_fb_info(info);
+		console_unlock();
 		break;
 	case FBIOBLANK:
-		if (!lock_fb_info(info))
-			return -ENODEV;
 		console_lock();
+		if (!lock_fb_info(info)) {
+			console_unlock();
+			return -ENODEV;
+		}
 		info->flags |= FBINFO_MISC_USEREVENT;
 		ret = fb_blank(info, arg);
 		info->flags &= ~FBINFO_MISC_USEREVENT;
-		console_unlock();
 		unlock_fb_info(info);
+		console_unlock();
 		break;
 	default:
 		if (!lock_fb_info(info))
@@ -1660,12 +1668,15 @@
 	registered_fb[i] = fb_info;
 
 	event.info = fb_info;
-	if (!lock_fb_info(fb_info))
-		return -ENODEV;
 	console_lock();
+	if (!lock_fb_info(fb_info)) {
+		console_unlock();
+		return -ENODEV;
+	}
+
 	fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
-	console_unlock();
 	unlock_fb_info(fb_info);
+	console_unlock();
 	return 0;
 }
 
@@ -1678,13 +1689,16 @@
 	if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
 		return -EINVAL;
 
-	if (!lock_fb_info(fb_info))
-		return -ENODEV;
 	console_lock();
+	if (!lock_fb_info(fb_info)) {
+		console_unlock();
+		return -ENODEV;
+	}
+
 	event.info = fb_info;
 	ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
-	console_unlock();
 	unlock_fb_info(fb_info);
+	console_unlock();
 
 	if (ret)
 		return -EINVAL;
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index ef476b0..53444ac 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -177,9 +177,12 @@
 	if (i * sizeof(struct fb_videomode) != count)
 		return -EINVAL;
 
-	if (!lock_fb_info(fb_info))
-		return -ENODEV;
 	console_lock();
+	if (!lock_fb_info(fb_info)) {
+		console_unlock();
+		return -ENODEV;
+	}
+
 	list_splice(&fb_info->modelist, &old_list);
 	fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
 				 &fb_info->modelist);
@@ -189,8 +192,8 @@
 	} else
 		fb_destroy_modelist(&old_list);
 
-	console_unlock();
 	unlock_fb_info(fb_info);
+	console_unlock();
 
 	return 0;
 }
@@ -404,12 +407,16 @@
 
 	state = simple_strtoul(buf, &last, 0);
 
-	if (!lock_fb_info(fb_info))
-		return -ENODEV;
 	console_lock();
+	if (!lock_fb_info(fb_info)) {
+		console_unlock();
+		return -ENODEV;
+	}
+
 	fb_set_suspend(fb_info, (int)state);
-	console_unlock();
+
 	unlock_fb_info(fb_info);
+	console_unlock();
 
 	return count;
 }
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 6d27447..4c4ffa6 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -1035,8 +1035,6 @@
 
 	framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index c99c967..e69d47a 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -289,7 +289,7 @@
 		zorro_release_device(z);
 		return -EINVAL;
 	}
-	printk("fb%d: %s frame buffer device\n", info->node, fb_fix.id);
+	fb_info(info, "%s frame buffer device\n", fb_fix.id);
 	return 0;
 }
 
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index b047ec5..e8758b9 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1104,7 +1104,7 @@
 
 		fsl_diu_load_cursor_image(info, image, bg, fg,
 			cursor->image.width, cursor->image.height);
-	};
+	}
 
 	/*
 	 * Show or hide the cursor.  The cursor data is always stored in the
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index ceab370..4c7cb36 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -1236,9 +1236,9 @@
 	platform_set_drvdata(p_dev, info);
 	gbefb_create_sysfs(&p_dev->dev);
 
-	printk(KERN_INFO "fb%d: %s rev %d @ 0x%08x using %dkB memory\n",
-	       info->node, info->fix.id, gbe_revision, (unsigned) GBE_BASE,
-	       gbe_mem_size >> 10);
+	fb_info(info, "%s rev %d @ 0x%08x using %dkB memory\n",
+		info->fix.id, gbe_revision, (unsigned)GBE_BASE,
+		gbe_mem_size >> 10);
 
 	return 0;
 
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index ebbaada..2794ba1 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -357,7 +357,7 @@
 		goto err;
 	}
 	pci_set_drvdata(pdev, info);
-	printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	return 0;
 
   err:
@@ -399,7 +399,6 @@
 	release_mem_region(gx1_gx_base() + 0x8300, 0x100);
 
 	fb_dealloc_cmap(&info->cmap);
-	pci_set_drvdata(pdev, NULL);
 
 	framebuffer_release(info);
 }
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 19f0c1ad..1790f14 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -423,7 +423,7 @@
 		goto err;
 	}
 	pci_set_drvdata(pdev, info);
-	printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	return 0;
 
   err:
@@ -471,7 +471,6 @@
 	pci_release_region(pdev, 1);
 
 	fb_dealloc_cmap(&info->cmap);
-	pci_set_drvdata(pdev, NULL);
 
 	framebuffer_release(info);
 }
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 4dd7b55..9e1d19d 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -555,8 +555,7 @@
 		goto err;
 	}
 	pci_set_drvdata(pdev, info);
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-		info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	return 0;
 
@@ -606,7 +605,6 @@
 	pci_release_region(pdev, 3);
 
 	fb_dealloc_cmap(&info->cmap);
-	pci_set_drvdata(pdev, NULL);
 	framebuffer_release(info);
 }
 
diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c
index 861109e..c078701 100644
--- a/drivers/video/grvga.c
+++ b/drivers/video/grvga.c
@@ -496,7 +496,6 @@
 	return 0;
 
 free_mem:
-	dev_set_drvdata(&dev->dev, NULL);
 	if (grvga_fix_addr)
 		iounmap((void *)virtual_start);
 	else
@@ -530,7 +529,6 @@
 			kfree((void *)info->screen_base);
 
 		framebuffer_release(info);
-		dev_set_drvdata(&device->dev, NULL);
 	}
 
 	return 0;
@@ -557,19 +555,7 @@
 	.remove		= grvga_remove,
 };
 
-
-static int __init grvga_init(void)
-{
-	return platform_driver_register(&grvga_driver);
-}
-
-static void __exit grvga_exit(void)
-{
-	platform_driver_unregister(&grvga_driver);
-}
-
-module_init(grvga_init);
-module_exit(grvga_exit);
+module_platform_driver(grvga_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Aeroflex Gaisler");
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
index c35663f..135d78a 100644
--- a/drivers/video/gxt4500.c
+++ b/drivers/video/gxt4500.c
@@ -698,8 +698,7 @@
 		dev_err(&pdev->dev, "gxt4500: cannot register framebuffer\n");
 		goto err_free_cmap;
 	}
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-	       info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	return 0;
 
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 59d2318..f64120e 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -261,9 +261,8 @@
 		goto err_fbreg;
 	platform_set_drvdata(dev, info);
 
-	printk(KERN_INFO
-	       "fb%d: Hecuba frame buffer device, using %dK of video memory\n",
-	       info->node, videomemorysize >> 10);
+	fb_info(info, "Hecuba frame buffer device, using %dK of video memory\n",
+		videomemorysize >> 10);
 
 	/* this inits the dpy */
 	retval = par->board->init(par);
@@ -305,19 +304,7 @@
 		.name	= "hecubafb",
 	},
 };
-
-static int __init hecubafb_init(void)
-{
-	return platform_driver_register(&hecubafb_driver);
-}
-
-static void __exit hecubafb_exit(void)
-{
-	platform_driver_unregister(&hecubafb_driver);
-}
-
-module_init(hecubafb_init);
-module_exit(hecubafb_exit);
+module_platform_driver(hecubafb_driver);
 
 MODULE_DESCRIPTION("fbdev driver for Hecuba/Apollo controller");
 MODULE_AUTHOR("Jaya Kumar");
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 1e9e2d8..5ff9fe2 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -586,8 +586,7 @@
 		return -EINVAL;
 	}
 
-        printk(KERN_INFO "fb%d: %s frame buffer device\n",
-               info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	platform_set_drvdata(pdev, info);
 	return 0;
 }
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index c2414d6..a648d51 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -405,8 +405,7 @@
 
 	platform_set_drvdata(dev, info);
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-	       info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	return 0;
 
diff --git a/drivers/video/hpfb.c b/drivers/video/hpfb.c
index b802f93..a1b7e5f 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/hpfb.c
@@ -298,8 +298,7 @@
 	if (ret < 0)
 		goto dealloc_cmap;
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-	       fb_info.node, fb_info.fix.id);
+	fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
 
 	return 0;
 
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
index 8ac99b8..130708f 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/hyperv_fb.c
@@ -575,6 +575,10 @@
 	return 0;
 }
 
+static int hvfb_blank(int blank, struct fb_info *info)
+{
+	return 1;	/* get fb_blank to set the colormap to all black */
+}
 
 static struct fb_ops hvfb_ops = {
 	.owner = THIS_MODULE,
@@ -584,6 +588,7 @@
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
 	.fb_imageblit = cfb_imageblit,
+	.fb_blank = hvfb_blank,
 };
 
 
@@ -795,12 +800,21 @@
 }
 
 
+static DEFINE_PCI_DEVICE_TABLE(pci_stub_id_table) = {
+	{
+		.vendor      = PCI_VENDOR_ID_MICROSOFT,
+		.device      = PCI_DEVICE_ID_HYPERV_VIDEO,
+	},
+	{ /* end of list */ }
+};
+
 static const struct hv_vmbus_device_id id_table[] = {
 	/* Synthetic Video Device GUID */
 	{HV_SYNTHVID_GUID},
 	{}
 };
 
+MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
 MODULE_DEVICE_TABLE(vmbus, id_table);
 
 static struct hv_driver hvfb_drv = {
@@ -810,14 +824,43 @@
 	.remove = hvfb_remove,
 };
 
+static int hvfb_pci_stub_probe(struct pci_dev *pdev,
+			       const struct pci_device_id *ent)
+{
+	return 0;
+}
+
+static void hvfb_pci_stub_remove(struct pci_dev *pdev)
+{
+}
+
+static struct pci_driver hvfb_pci_stub_driver = {
+	.name =		KBUILD_MODNAME,
+	.id_table =	pci_stub_id_table,
+	.probe =	hvfb_pci_stub_probe,
+	.remove =	hvfb_pci_stub_remove,
+};
 
 static int __init hvfb_drv_init(void)
 {
-	return vmbus_driver_register(&hvfb_drv);
+	int ret;
+
+	ret = vmbus_driver_register(&hvfb_drv);
+	if (ret != 0)
+		return ret;
+
+	ret = pci_register_driver(&hvfb_pci_stub_driver);
+	if (ret != 0) {
+		vmbus_driver_unregister(&hvfb_drv);
+		return ret;
+	}
+
+	return 0;
 }
 
 static void __exit hvfb_drv_exit(void)
 {
+	pci_unregister_driver(&hvfb_pci_stub_driver);
 	vmbus_driver_unregister(&hvfb_drv);
 }
 
diff --git a/drivers/video/i740fb.c b/drivers/video/i740fb.c
index 6c48388..ca7c9df 100644
--- a/drivers/video/i740fb.c
+++ b/drivers/video/i740fb.c
@@ -203,8 +203,7 @@
 
 	mutex_lock(&(par->open_lock));
 	if (par->ref_count == 0) {
-		printk(KERN_ERR "fb%d: release called with zero refcount\n",
-			info->node);
+		fb_err(info, "release called with zero refcount\n");
 		mutex_unlock(&(par->open_lock));
 		return -EINVAL;
 	}
@@ -1067,7 +1066,7 @@
 	par->has_sgram = !((tmp & DRAM_RAS_TIMING) ||
 			   (tmp & DRAM_RAS_PRECHARGE));
 
-	printk(KERN_INFO "fb%d: Intel740 on %s, %ld KB %s\n", info->node,
+	fb_info(info, "Intel740 on %s, %ld KB %s\n",
 		pci_name(dev), info->screen_size >> 10,
 		par->has_sgram ? "SGRAM" : "SDRAM");
 
@@ -1143,8 +1142,7 @@
 		goto err_reg_framebuffer;
 	}
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-		info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	pci_set_drvdata(dev, info);
 #ifdef CONFIG_MTRR
 	if (mtrr) {
@@ -1194,7 +1192,6 @@
 		pci_iounmap(dev, info->screen_base);
 		pci_release_regions(dev);
 /*		pci_disable_device(dev); */
-		pci_set_drvdata(dev, NULL);
 		framebuffer_release(info);
 	}
 }
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 4ce3438..038192a 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -2129,7 +2129,6 @@
 
 	unregister_framebuffer(info);  
 	i810fb_release_resource(info, par);
-	pci_set_drvdata(dev, NULL);
 	printk("cleanup_module:  unloaded i810 framebuffer device\n");
 }                                                	
 
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 79cbfa7..486f188 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -360,9 +360,8 @@
 	if (register_framebuffer(info) < 0)
 		return 0;
 
-	printk("fb%d: %s frame buffer device at 0x%08lx [%dMB VRAM]\n",
-	       info->node, info->fix.id, 
-	       par->frame_buffer_phys, info->fix.smem_len >> 20);
+	fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n",
+		info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20);
 
 	iga_blank_border(par); 
 	return 1;
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index d5220cc..aae10ce 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -1461,8 +1461,8 @@
 	}
 
 	tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
-	printk("fb%u: %s frame buffer; %uMB vram; chip version %u\n",
-		info->node, info->fix.id, info->fix.smem_len >> 20, tmp);
+	fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n",
+		info->fix.id, info->fix.smem_len >> 20, tmp);
 }
 
 static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 38733ac..44ee678 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -755,7 +755,7 @@
 
 static int imxfb_init_fbinfo(struct platform_device *pdev)
 {
-	struct imx_fb_platform_data *pdata = pdev->dev.platform_data;
+	struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct fb_info *info = dev_get_drvdata(&pdev->dev);
 	struct imxfb_info *fbi = info->par;
 	struct device_node *np;
@@ -877,7 +877,7 @@
 	if (!res)
 		return -ENODEV;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 
 	info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev);
 	if (!info)
@@ -1066,7 +1066,7 @@
 #endif
 	unregister_framebuffer(info);
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (pdata && pdata->exit)
 		pdata->exit(fbi->pdev);
 
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 8209e46..b847d530 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -931,8 +931,6 @@
 		return;
 
 	cleanup(dinfo);
-
-	pci_set_drvdata(pdev, NULL);
 }
 
 /***************************************************************
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 2c49112..87790e9 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -99,9 +99,9 @@
 #define JZ_LCD_CTRL_BPP_15_16		0x4
 #define JZ_LCD_CTRL_BPP_18_24		0x5
 
-#define JZ_LCD_CMD_SOF_IRQ BIT(15)
-#define JZ_LCD_CMD_EOF_IRQ BIT(16)
-#define JZ_LCD_CMD_ENABLE_PAL BIT(12)
+#define JZ_LCD_CMD_SOF_IRQ BIT(31)
+#define JZ_LCD_CMD_EOF_IRQ BIT(30)
+#define JZ_LCD_CMD_ENABLE_PAL BIT(28)
 
 #define JZ_LCD_SYNC_MASK 0x3ff
 
@@ -471,7 +471,7 @@
 	writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
 
 	if (!jzfb->is_enabled)
-		clk_disable(jzfb->ldclk);
+		clk_disable_unprepare(jzfb->ldclk);
 
 	mutex_unlock(&jzfb->lock);
 
@@ -485,7 +485,7 @@
 {
 	uint32_t ctrl;
 
-	clk_enable(jzfb->ldclk);
+	clk_prepare_enable(jzfb->ldclk);
 
 	jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
 	jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
@@ -514,7 +514,7 @@
 	jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
 	jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
 
-	clk_disable(jzfb->ldclk);
+	clk_disable_unprepare(jzfb->ldclk);
 }
 
 static int jzfb_blank(int blank_mode, struct fb_info *info)
@@ -693,7 +693,7 @@
 
 	fb_alloc_cmap(&fb->cmap, 256, 0);
 
-	clk_enable(jzfb->ldclk);
+	clk_prepare_enable(jzfb->ldclk);
 	jzfb->is_enabled = 1;
 
 	writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
@@ -763,7 +763,7 @@
 static int jzfb_resume(struct device *dev)
 {
 	struct jzfb *jzfb = dev_get_drvdata(dev);
-	clk_enable(jzfb->ldclk);
+	clk_prepare_enable(jzfb->ldclk);
 
 	mutex_lock(&jzfb->lock);
 	if (jzfb->is_enabled)
@@ -798,18 +798,7 @@
 		.pm = JZFB_PM_OPS,
 	},
 };
-
-static int __init jzfb_init(void)
-{
-	return platform_driver_register(&jzfb_driver);
-}
-module_init(jzfb_init);
-
-static void __exit jzfb_exit(void)
-{
-	platform_driver_unregister(&jzfb_driver);
-}
-module_exit(jzfb_exit);
+module_platform_driver(jzfb_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index 6157f74..50c8574 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -623,7 +623,6 @@
 				"command instead.\n");
 			return -EINVAL;
 		}
-		break;
 	case KYRO_IOCTL_UVSTRIDE:
 		if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long)))
 			return -EFAULT;
@@ -736,10 +735,10 @@
 	if (register_framebuffer(info) < 0)
 		goto out_unmap;
 
-	printk("fb%d: %s frame buffer device, at %dx%d@%d using %ldk/%ldk of VRAM\n",
-	       info->node, info->fix.id, info->var.xres,
-	       info->var.yres, info->var.bits_per_pixel, size >> 10,
-	       (unsigned long)info->fix.smem_len >> 10);
+	fb_info(info, "%s frame buffer device, at %dx%d@%d using %ldk/%ldk of VRAM\n",
+		info->fix.id,
+		info->var.xres, info->var.yres, info->var.bits_per_pixel,
+		size >> 10, (unsigned long)info->fix.smem_len >> 10);
 
 	pci_set_drvdata(pdev, info);
 
@@ -779,7 +778,6 @@
 #endif
 
 	unregister_framebuffer(info);
-	pci_set_drvdata(pdev, NULL);
 	framebuffer_release(info);
 }
 
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index b17f500..2c7f7d4 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -469,7 +469,7 @@
 
 		default:
 			continue;
-		};
+		}
 		sbus_writel(0x5800 + j, &lx_krn->krn_type);
 		sbus_writel(wi->wi_values[0], &lx_krn->krn_value);
 	}
@@ -648,8 +648,6 @@
 
 	framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index fe01add..5bd2eb8 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -913,8 +913,7 @@
 	if (err)
 		goto fail_dealloc;
 
-	pr_info("fb%d: %s frame buffer device\n",
-	        fb_info.node, fb_info.fix.id);
+	fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
 
 	return 0;
 
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
index 1717623..a01147f 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/matrox/matroxfb_DAC1064.c
@@ -494,7 +494,7 @@
 			if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
 				break;
 			udelay(10);
-		};
+		}
 
 		CRITEND
 
@@ -639,7 +639,7 @@
 		if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
 			break;
 		udelay(10);
-	};
+	}
 	if (!clk)
 		printk(KERN_ERR "matroxfb: Pixel PLL%c not locked after usual time\n", (reg-M1064_XPIXPLLAM-2)/4 + 'A');
 	selClk = inDAC1064(minfo, M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_SRC_MASK;
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index 9a44cec..195ad7c 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -473,7 +473,7 @@
 		if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40)
 			break;
 		udelay(10);
-	};
+	}
 	if (!tmout)
 		printk(KERN_ERR "matroxfb: Temporary pixel PLL not locked after 5 secs\n");
 
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 2456529..87c64ff 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1893,14 +1893,12 @@
 	if (register_framebuffer(&minfo->fbcon) < 0) {
 		goto failVideoIO;
 	}
-	printk("fb%d: %s frame buffer device\n",
-	       minfo->fbcon.node, minfo->fbcon.fix.id);
+	fb_info(&minfo->fbcon, "%s frame buffer device\n", minfo->fbcon.fix.id);
 
 	/* there is no console on this fb... but we have to initialize hardware
 	 * until someone tells me what is proper thing to do */
 	if (!minfo->initialized) {
-		printk(KERN_INFO "fb%d: initializing hardware\n",
-		       minfo->fbcon.node);
+		fb_info(&minfo->fbcon, "initializing hardware\n");
 		/* We have to use FB_ACTIVATE_FORCE, as we had to put vesafb_defined to the fbcon.var
 		 * already before, so register_framebuffer works correctly. */
 		vesafb_defined.activate |= FB_ACTIVATE_FORCE;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index fd28974..ee41a0f 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1295,19 +1295,7 @@
 	.id_table	= maven_id,
 };
 
-static int __init matroxfb_maven_init(void)
-{
-	return i2c_add_driver(&maven_driver);
-}
-
-static void __exit matroxfb_maven_exit(void)
-{
-	i2c_del_driver(&maven_driver);
-}
-
+module_i2c_driver(maven_driver);
 MODULE_AUTHOR("(c) 1999-2002 Petr Vandrovec <vandrove@vc.cvut.cz>");
 MODULE_DESCRIPTION("Matrox G200/G400 Matrox MGA-TVO driver");
 MODULE_LICENSE("GPL");
-module_init(matroxfb_maven_init);
-module_exit(matroxfb_maven_exit);
-/* we do not have __setup() yet */
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 91c59c9..0cd4c33 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -781,7 +781,6 @@
 irqdisp:
 	irq_dispose_mapping(par->irq);
 fbrel:
-	dev_set_drvdata(dev, NULL);
 	framebuffer_release(info);
 	return ret;
 }
@@ -814,7 +813,6 @@
 	iounmap(par->mmio_base);
 	iounmap(par->fb_base);
 
-	dev_set_drvdata(&ofdev->dev, NULL);
 	release_mem_region(par->res->start, res_size);
 	framebuffer_release(fbi);
 	return 0;
@@ -1157,7 +1155,6 @@
 
 	device_remove_file(&pdev->dev, &dev_attr_dispregs);
 
-	pci_set_drvdata(pdev, NULL);
 	unregister_framebuffer(fbi);
 	fb_dealloc_cmap(&fbi->cmap);
 
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 0c1a874..f0a5392 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -890,7 +890,7 @@
 
 	dev_dbg(&dev->dev, "mbxfb_probe\n");
 
-	pdata = dev->dev.platform_data;
+	pdata = dev_get_platdata(&dev->dev);
 	if (!pdata) {
 		dev_err(&dev->dev, "platform data is required\n");
 		return -EINVAL;
@@ -976,7 +976,7 @@
 
 	platform_set_drvdata(dev, fbi);
 
-	printk(KERN_INFO "fb%d: mbx frame buffer device\n", fbi->node);
+	fb_info(fbi, "mbx frame buffer device\n");
 
 	if (mfbi->platform_probe)
 		mfbi->platform_probe(fbi);
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index f30150d..195cc2d 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -690,7 +690,8 @@
 		goto err_csum_table;
 	}
 
-	if (board->setup_irq(info))
+	retval = board->setup_irq(info);
+	if (retval)
 		goto err_csum_table;
 
 	retval = metronome_init_regs(par);
@@ -769,23 +770,11 @@
 		.name	= "metronomefb",
 	},
 };
-
-static int __init metronomefb_init(void)
-{
-	return platform_driver_register(&metronomefb_driver);
-}
-
-static void __exit metronomefb_exit(void)
-{
-	platform_driver_unregister(&metronomefb_driver);
-}
+module_platform_driver(metronomefb_driver);
 
 module_param(user_wfm_size, uint, 0);
 MODULE_PARM_DESC(user_wfm_size, "Set custom waveform size");
 
-module_init(metronomefb_init);
-module_exit(metronomefb_exit);
-
 MODULE_DESCRIPTION("fbdev driver for Metronome controller");
 MODULE_AUTHOR("Jaya Kumar");
 MODULE_LICENSE("GPL");
diff --git a/drivers/video/mmp/fb/mmpfb.c b/drivers/video/mmp/fb/mmpfb.c
index 4ab95b8..7ab31eb 100644
--- a/drivers/video/mmp/fb/mmpfb.c
+++ b/drivers/video/mmp/fb/mmpfb.c
@@ -392,12 +392,29 @@
 	return 0;
 }
 
+static void mmpfb_set_win(struct fb_info *info)
+{
+	struct mmpfb_info *fbi = info->par;
+	struct fb_var_screeninfo *var = &info->var;
+	struct mmp_win win;
+	u32 stride;
+
+	memset(&win, 0, sizeof(win));
+	win.xsrc = win.xdst = fbi->mode.xres;
+	win.ysrc = win.ydst = fbi->mode.yres;
+	win.pix_fmt = fbi->pix_fmt;
+	stride = pixfmt_to_stride(win.pix_fmt);
+	win.pitch[0] = var->xres_virtual * stride;
+	win.pitch[1] = win.pitch[2] =
+		(stride == 1) ? (var->xres_virtual >> 1) : 0;
+	mmp_overlay_set_win(fbi->overlay, &win);
+}
+
 static int mmpfb_set_par(struct fb_info *info)
 {
 	struct mmpfb_info *fbi = info->par;
 	struct fb_var_screeninfo *var = &info->var;
 	struct mmp_addr addr;
-	struct mmp_win win;
 	struct mmp_mode mode;
 	int ret;
 
@@ -409,11 +426,8 @@
 	fbmode_to_mmpmode(&mode, &fbi->mode, fbi->output_fmt);
 	mmp_path_set_mode(fbi->path, &mode);
 
-	memset(&win, 0, sizeof(win));
-	win.xsrc = win.xdst = fbi->mode.xres;
-	win.ysrc = win.ydst = fbi->mode.yres;
-	win.pix_fmt = fbi->pix_fmt;
-	mmp_overlay_set_win(fbi->overlay, &win);
+	/* set window related info */
+	mmpfb_set_win(info);
 
 	/* set address always */
 	memset(&addr, 0, sizeof(addr));
@@ -427,16 +441,12 @@
 static void mmpfb_power(struct mmpfb_info *fbi, int power)
 {
 	struct mmp_addr addr;
-	struct mmp_win win;
 	struct fb_var_screeninfo *var = &fbi->fb_info->var;
 
 	/* for power on, always set address/window again */
 	if (power) {
-		memset(&win, 0, sizeof(win));
-		win.xsrc = win.xdst = fbi->mode.xres;
-		win.ysrc = win.ydst = fbi->mode.yres;
-		win.pix_fmt = fbi->pix_fmt;
-		mmp_overlay_set_win(fbi->overlay, &win);
+		/* set window related info */
+		mmpfb_set_win(fbi->fb_info);
 
 		/* set address always */
 		memset(&addr, 0, sizeof(addr));
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 6ac7552..8621a9f 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -53,15 +53,14 @@
 		tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
 		if (tmp & isr)
 			writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
-	} while ((isr = readl(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
+	} while ((isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
 
 	return IRQ_HANDLED;
 }
 
 static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
 {
-	u32 link_config = path_to_path_plat(overlay->path)->link_config;
-	u32 rbswap, uvswap = 0, yuvswap = 0,
+	u32 rbswap = 0, uvswap = 0, yuvswap = 0,
 		csc_en = 0, val = 0,
 		vid = overlay_is_vid(overlay);
 
@@ -71,27 +70,23 @@
 	case PIXFMT_RGB888PACK:
 	case PIXFMT_RGB888UNPACK:
 	case PIXFMT_RGBA888:
-		rbswap = !(link_config & 0x1);
+		rbswap = 1;
 		break;
 	case PIXFMT_VYUY:
 	case PIXFMT_YVU422P:
 	case PIXFMT_YVU420P:
-		rbswap = link_config & 0x1;
 		uvswap = 1;
 		break;
 	case PIXFMT_YUYV:
-		rbswap = link_config & 0x1;
 		yuvswap = 1;
 		break;
 	default:
-		rbswap = link_config & 0x1;
 		break;
 	}
 
 	switch (pix_fmt) {
 	case PIXFMT_RGB565:
 	case PIXFMT_BGR565:
-		val = 0;
 		break;
 	case PIXFMT_RGB1555:
 	case PIXFMT_BGR1555:
@@ -147,17 +142,27 @@
 static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
 {
 	struct lcd_regs *regs = path_regs(overlay->path);
-	u32 pitch;
 
 	/* assert win supported */
 	memcpy(&overlay->win, win, sizeof(struct mmp_win));
 
 	mutex_lock(&overlay->access_ok);
-	pitch = win->xsrc * pixfmt_to_stride(win->pix_fmt);
-	writel_relaxed(pitch, &regs->g_pitch);
-	writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
-	writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
-	writel_relaxed(0, &regs->g_start);
+
+	if (overlay_is_vid(overlay)) {
+		writel_relaxed(win->pitch[0], &regs->v_pitch_yc);
+		writel_relaxed(win->pitch[2] << 16 |
+				win->pitch[1], &regs->v_pitch_uv);
+
+		writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->v_size);
+		writel_relaxed((win->ydst << 16) | win->xdst, &regs->v_size_z);
+		writel_relaxed(win->ypos << 16 | win->xpos, &regs->v_start);
+	} else {
+		writel_relaxed(win->pitch[0], &regs->g_pitch);
+
+		writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
+		writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
+		writel_relaxed(win->ypos << 16 | win->xpos, &regs->g_start);
+	}
 
 	dmafetch_set_fmt(overlay);
 	mutex_unlock(&overlay->access_ok);
@@ -239,7 +244,13 @@
 
 	/* FIXME: assert addr supported */
 	memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
-	writel(addr->phys[0], &regs->g_0);
+
+	if (overlay_is_vid(overlay)) {
+		writel_relaxed(addr->phys[0], &regs->v_y0);
+		writel_relaxed(addr->phys[1], &regs->v_u0);
+		writel_relaxed(addr->phys[2], &regs->v_v0);
+	} else
+		writel_relaxed(addr->phys[0], &regs->g_0);
 
 	return overlay->addr.phys[0];
 }
@@ -248,7 +259,8 @@
 {
 	struct lcd_regs *regs = path_regs(path);
 	u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
-		link_config = path_to_path_plat(path)->link_config;
+		link_config = path_to_path_plat(path)->link_config,
+		dsi_rbswap = path_to_path_plat(path)->link_config;
 
 	/* FIXME: assert videomode supported */
 	memcpy(&path->mode, mode, sizeof(struct mmp_mode));
@@ -263,6 +275,12 @@
 	tmp |= CFG_DUMB_ENA(1);
 	writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
 
+	/* interface rb_swap setting */
+	tmp = readl_relaxed(ctrl_regs(path) + intf_rbswap_ctrl(path->id)) &
+		(~(CFG_INTFRBSWAP_MASK));
+	tmp |= dsi_rbswap & CFG_INTFRBSWAP_MASK;
+	writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id));
+
 	writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
 	writel_relaxed((mode->left_margin << 16) | mode->right_margin,
 		&regs->screen_h_porch);
@@ -370,20 +388,12 @@
 	 * bus arbiter for faster read if not tv path;
 	 * 2.enable horizontal smooth filter;
 	 */
-	if (PATH_PN == path->id) {
-		mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
-			| CFG_ARBFAST_ENA(1);
-		tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
-		tmp |= mask;
-		writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
-	} else if (PATH_TV == path->id) {
-		mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
-			| CFG_ARBFAST_ENA(1);
-		tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
-		tmp &= ~mask;
-		tmp |= CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK;
-		writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
-	}
+	mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK | CFG_ARBFAST_ENA(1);
+	tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+	tmp |= mask;
+	if (PATH_TV == path->id)
+		tmp &= ~CFG_ARBFAST_ENA(1);
+	writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
 }
 
 static int path_init(struct mmphw_path_plat *path_plat,
@@ -419,6 +429,7 @@
 	path_plat->path = path;
 	path_plat->path_config = config->path_config;
 	path_plat->link_config = config->link_config;
+	path_plat->dsi_rbswap = config->dsi_rbswap;
 	path_set_default(path);
 
 	kfree(path_info);
diff --git a/drivers/video/mmp/hw/mmp_ctrl.h b/drivers/video/mmp/hw/mmp_ctrl.h
index edd2002..53301cf 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/mmp/hw/mmp_ctrl.h
@@ -163,6 +163,8 @@
 
 #define LCD_SCLK(path) ((PATH_PN == path->id) ? LCD_CFG_SCLK_DIV :\
 	((PATH_TV == path->id) ? LCD_TCLK_DIV : LCD_PN2_SCLK_DIV))
+#define intf_rbswap_ctrl(id)	((id) ? (((id) & 1) ? LCD_TVIF_CTRL : \
+				PN2_IOPAD_CONTROL) : LCD_TOP_CTRL)
 
 /* dither configure */
 #ifdef CONFIG_CPU_PXA988
@@ -615,6 +617,8 @@
 #define LCD_SPU_DUMB_CTRL			0x01B8
 #define	 CFG_DUMBMODE(mode)			((mode)<<28)
 #define	 CFG_DUMBMODE_MASK			0xF0000000
+#define	 CFG_INTFRBSWAP(mode)			((mode)<<24)
+#define	 CFG_INTFRBSWAP_MASK			0x0F000000
 #define	 CFG_LCDGPIO_O(data)			((data)<<20)
 #define	 CFG_LCDGPIO_O_MASK			0x0FF00000
 #define	 CFG_LCDGPIO_ENA(gpio)			((gpio)<<12)
@@ -1427,6 +1431,7 @@
 	struct mmp_path *path;
 	u32 path_config;
 	u32 link_config;
+	u32 dsi_rbswap;
 };
 
 /* mmp ctrl describes mmp controller related info */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index cfdb380..804f874 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1354,7 +1354,7 @@
 static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
 {
 	struct device *dev = mx3fb->dev;
-	struct mx3fb_platform_data *mx3fb_pdata = dev->platform_data;
+	struct mx3fb_platform_data *mx3fb_pdata = dev_get_platdata(dev);
 	const char *name = mx3fb_pdata->name;
 	unsigned int irq;
 	struct fb_info *fbi;
@@ -1462,7 +1462,7 @@
 		return false;
 
 	dev = rq->mx3fb->dev;
-	mx3fb_pdata = dev->platform_data;
+	mx3fb_pdata = dev_get_platdata(dev);
 
 	return rq->id == chan->chan_id &&
 		mx3fb_pdata->dma_dev == chan->device->dev;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index c172a52..44f99a6 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2106,8 +2106,7 @@
 	if (err < 0)
 		goto err_reg_fb;
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-	       info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	/*
 	 * Our driver data
@@ -2148,12 +2147,6 @@
 		fb_destroy_modedb(info->monspecs.modedb);
 		neo_unmap_mmio(info);
 		neo_free_fb_info(info);
-
-		/*
-		 * Ensure that the driver data is no longer
-		 * valid.
-		 */
-		pci_set_drvdata(dev, NULL);
 	}
 }
 
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 796e511..478f980 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -91,7 +91,7 @@
 			       struct fb_info *info)
 {
 	struct nuc900fb_info *fbi = info->par;
-	struct nuc900fb_mach_info *mach_info = fbi->dev->platform_data;
+	struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
 	struct nuc900fb_display *display = NULL;
 	struct nuc900fb_display *default_display = mach_info->displays +
 						   mach_info->default_display;
@@ -358,7 +358,7 @@
 static int nuc900fb_init_registers(struct fb_info *info)
 {
 	struct nuc900fb_info *fbi = info->par;
-	struct nuc900fb_mach_info *mach_info = fbi->dev->platform_data;
+	struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
 	void __iomem *regs = fbi->io;
 
 	/*reset the display engine*/
@@ -512,7 +512,7 @@
 	int size;
 
 	dev_dbg(&pdev->dev, "devinit\n");
-	mach_info = pdev->dev.platform_data;
+	mach_info = dev_get_platdata(&pdev->dev);
 	if (mach_info == NULL) {
 		dev_err(&pdev->dev,
 			"no platform data for lcd, cannot attach\n");
@@ -647,8 +647,7 @@
 		goto free_cpufreq;
 	}
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-		fbinfo->node, fbinfo->fix.id);
+	fb_info(fbinfo, "%s frame buffer device\n", fbinfo->fix.id);
 
 	return 0;
 
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index ed20a98..81c80ac 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -1300,7 +1300,7 @@
 					break;
 				default:
 					break;
-				};
+				}
 
 				NV_WR32(par->PGRAPH, 0x0b38, 0x2ffff800);
 				NV_WR32(par->PGRAPH, 0x0b3c, 0x00006000);
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 0c4f343..9dbea22 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -515,8 +515,7 @@
 	if (register_framebuffer(info) < 0)
 		goto out_err;
 
-	printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n",
-	       info->node, full_name);
+	fb_info(info, "Open Firmware frame buffer device on %s\n", full_name);
 	return;
 
 out_err:
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index f349ee6..a4ee65b 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -947,7 +947,7 @@
 	hwa742.extif = fbdev->ext_if;
 	hwa742.int_ctrl = fbdev->int_ctrl;
 
-	omapfb_conf = fbdev->dev->platform_data;
+	omapfb_conf = dev_get_platdata(fbdev->dev);
 
 	hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck");
 
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index d40612c..e4fc6d9b 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -1602,7 +1602,7 @@
 	char name[17];
 	int i;
 
-	conf = fbdev->dev->platform_data;
+	conf = dev_get_platdata(fbdev->dev);
 
 	fbdev->ctrl = NULL;
 
@@ -1674,7 +1674,7 @@
 		goto cleanup;
 	}
 
-	if (pdev->dev.platform_data == NULL) {
+	if (dev_get_platdata(&pdev->dev) == NULL) {
 		dev_err(&pdev->dev, "missing platform data\n");
 		r = -ENOENT;
 		goto cleanup;
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 10b25e7..e6cfc38 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -57,6 +57,12 @@
         help
           LCD Panel used in TI's SDP3430 and EVM boards
 
+config DISPLAY_PANEL_TPO_TD028TTEC1
+        tristate "TPO TD028TTEC1 LCD Panel"
+        depends on SPI
+        help
+          LCD panel used in Openmoko.
+
 config DISPLAY_PANEL_TPO_TD043MTEA1
         tristate "TPO TD043MTEA1 LCD Panel"
         depends on SPI
diff --git a/drivers/video/omap2/displays-new/Makefile b/drivers/video/omap2/displays-new/Makefile
index 5aeb11b..0323a8a 100644
--- a/drivers/video/omap2/displays-new/Makefile
+++ b/drivers/video/omap2/displays-new/Makefile
@@ -8,5 +8,6 @@
 obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
 obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
 obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
 obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
 obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index 63d88ee..b6c5090 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -262,6 +262,9 @@
 
 	in = omap_dss_find_output(pdata->source);
 	if (in == NULL) {
+		if (ddata->i2c_adapter)
+			i2c_put_adapter(ddata->i2c_adapter);
+
 		dev_err(&pdev->dev, "Failed to find video source\n");
 		return -EPROBE_DEFER;
 	}
@@ -313,6 +316,10 @@
 
 err_reg:
 	omap_dss_put_device(ddata->in);
+
+	if (ddata->i2c_adapter)
+		i2c_put_adapter(ddata->i2c_adapter);
+
 	return r;
 }
 
diff --git a/drivers/video/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
index 798ef20..d5c936c 100644
--- a/drivers/video/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
@@ -69,7 +69,7 @@
 	dst->src = dssdev;
 	dssdev->dst = dst;
 
-	INIT_COMPLETION(ddata->hpd_completion);
+	reinit_completion(&ddata->hpd_completion);
 
 	gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
 	/* DC-DC converter needs at max 300us to get to 90% of 5V */
diff --git a/drivers/video/omap2/displays-new/panel-dsi-cm.c b/drivers/video/omap2/displays-new/panel-dsi-cm.c
index aaaea64..b7baafe 100644
--- a/drivers/video/omap2/displays-new/panel-dsi-cm.c
+++ b/drivers/video/omap2/displays-new/panel-dsi-cm.c
@@ -599,7 +599,7 @@
 	if (r) {
 		dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n");
 		goto err0;
-	};
+	}
 
 	r = in->ops.dsi->set_config(in, &dsi_config);
 	if (r) {
diff --git a/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
new file mode 100644
index 0000000..9a08908
--- /dev/null
+++ b/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
@@ -0,0 +1,480 @@
+/*
+ * Toppoly TD028TTEC1 panel support
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Neo 1973 code (jbt6k74.c):
+ * Copyright (C) 2006-2007 by OpenMoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ *
+ * Ported and adapted from Neo 1973 U-Boot by:
+ * H. Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+	struct omap_dss_device dssdev;
+	struct omap_dss_device *in;
+
+	int data_lines;
+
+	struct omap_video_timings videomode;
+
+	struct spi_device *spi_dev;
+};
+
+static struct omap_video_timings td028ttec1_panel_timings = {
+	.x_res		= 480,
+	.y_res		= 640,
+	.pixel_clock	= 22153,
+	.hfp		= 24,
+	.hsw		= 8,
+	.hbp		= 8,
+	.vfp		= 4,
+	.vsw		= 2,
+	.vbp		= 2,
+
+	.vsync_level	= OMAPDSS_SIG_ACTIVE_LOW,
+	.hsync_level	= OMAPDSS_SIG_ACTIVE_LOW,
+
+	.data_pclk_edge	= OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+	.de_level	= OMAPDSS_SIG_ACTIVE_HIGH,
+	.sync_pclk_edge	= OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+};
+
+#define JBT_COMMAND	0x000
+#define JBT_DATA	0x100
+
+static int jbt_ret_write_0(struct panel_drv_data *ddata, u8 reg)
+{
+	int rc;
+	u16 tx_buf = JBT_COMMAND | reg;
+
+	rc = spi_write(ddata->spi_dev, (u8 *)&tx_buf,
+			1*sizeof(u16));
+	if (rc != 0)
+		dev_err(&ddata->spi_dev->dev,
+			"jbt_ret_write_0 spi_write ret %d\n", rc);
+
+	return rc;
+}
+
+static int jbt_reg_write_1(struct panel_drv_data *ddata, u8 reg, u8 data)
+{
+	int rc;
+	u16 tx_buf[2];
+
+	tx_buf[0] = JBT_COMMAND | reg;
+	tx_buf[1] = JBT_DATA | data;
+	rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
+			2*sizeof(u16));
+	if (rc != 0)
+		dev_err(&ddata->spi_dev->dev,
+			"jbt_reg_write_1 spi_write ret %d\n", rc);
+
+	return rc;
+}
+
+static int jbt_reg_write_2(struct panel_drv_data *ddata, u8 reg, u16 data)
+{
+	int rc;
+	u16 tx_buf[3];
+
+	tx_buf[0] = JBT_COMMAND | reg;
+	tx_buf[1] = JBT_DATA | (data >> 8);
+	tx_buf[2] = JBT_DATA | (data & 0xff);
+
+	rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
+			3*sizeof(u16));
+
+	if (rc != 0)
+		dev_err(&ddata->spi_dev->dev,
+			"jbt_reg_write_2 spi_write ret %d\n", rc);
+
+	return rc;
+}
+
+enum jbt_register {
+	JBT_REG_SLEEP_IN		= 0x10,
+	JBT_REG_SLEEP_OUT		= 0x11,
+
+	JBT_REG_DISPLAY_OFF		= 0x28,
+	JBT_REG_DISPLAY_ON		= 0x29,
+
+	JBT_REG_RGB_FORMAT		= 0x3a,
+	JBT_REG_QUAD_RATE		= 0x3b,
+
+	JBT_REG_POWER_ON_OFF		= 0xb0,
+	JBT_REG_BOOSTER_OP		= 0xb1,
+	JBT_REG_BOOSTER_MODE		= 0xb2,
+	JBT_REG_BOOSTER_FREQ		= 0xb3,
+	JBT_REG_OPAMP_SYSCLK		= 0xb4,
+	JBT_REG_VSC_VOLTAGE		= 0xb5,
+	JBT_REG_VCOM_VOLTAGE		= 0xb6,
+	JBT_REG_EXT_DISPL		= 0xb7,
+	JBT_REG_OUTPUT_CONTROL		= 0xb8,
+	JBT_REG_DCCLK_DCEV		= 0xb9,
+	JBT_REG_DISPLAY_MODE1		= 0xba,
+	JBT_REG_DISPLAY_MODE2		= 0xbb,
+	JBT_REG_DISPLAY_MODE		= 0xbc,
+	JBT_REG_ASW_SLEW		= 0xbd,
+	JBT_REG_DUMMY_DISPLAY		= 0xbe,
+	JBT_REG_DRIVE_SYSTEM		= 0xbf,
+
+	JBT_REG_SLEEP_OUT_FR_A		= 0xc0,
+	JBT_REG_SLEEP_OUT_FR_B		= 0xc1,
+	JBT_REG_SLEEP_OUT_FR_C		= 0xc2,
+	JBT_REG_SLEEP_IN_LCCNT_D	= 0xc3,
+	JBT_REG_SLEEP_IN_LCCNT_E	= 0xc4,
+	JBT_REG_SLEEP_IN_LCCNT_F	= 0xc5,
+	JBT_REG_SLEEP_IN_LCCNT_G	= 0xc6,
+
+	JBT_REG_GAMMA1_FINE_1		= 0xc7,
+	JBT_REG_GAMMA1_FINE_2		= 0xc8,
+	JBT_REG_GAMMA1_INCLINATION	= 0xc9,
+	JBT_REG_GAMMA1_BLUE_OFFSET	= 0xca,
+
+	JBT_REG_BLANK_CONTROL		= 0xcf,
+	JBT_REG_BLANK_TH_TV		= 0xd0,
+	JBT_REG_CKV_ON_OFF		= 0xd1,
+	JBT_REG_CKV_1_2			= 0xd2,
+	JBT_REG_OEV_TIMING		= 0xd3,
+	JBT_REG_ASW_TIMING_1		= 0xd4,
+	JBT_REG_ASW_TIMING_2		= 0xd5,
+
+	JBT_REG_HCLOCK_VGA		= 0xec,
+	JBT_REG_HCLOCK_QVGA		= 0xed,
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static int td028ttec1_panel_connect(struct omap_dss_device *dssdev)
+{
+	struct panel_drv_data *ddata = to_panel_data(dssdev);
+	struct omap_dss_device *in = ddata->in;
+	int r;
+
+	if (omapdss_device_is_connected(dssdev))
+		return 0;
+
+	r = in->ops.dpi->connect(in, dssdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev)
+{
+	struct panel_drv_data *ddata = to_panel_data(dssdev);
+	struct omap_dss_device *in = ddata->in;
+
+	if (!omapdss_device_is_connected(dssdev))
+		return;
+
+	in->ops.dpi->disconnect(in, dssdev);
+}
+
+static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
+{
+	struct panel_drv_data *ddata = to_panel_data(dssdev);
+	struct omap_dss_device *in = ddata->in;
+	int r;
+
+	if (!omapdss_device_is_connected(dssdev))
+		return -ENODEV;
+
+	if (omapdss_device_is_enabled(dssdev))
+		return 0;
+
+	in->ops.dpi->set_data_lines(in, ddata->data_lines);
+	in->ops.dpi->set_timings(in, &ddata->videomode);
+
+	r = in->ops.dpi->enable(in);
+	if (r)
+		return r;
+
+	dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n",
+		dssdev->state);
+
+	/* three times command zero */
+	r |= jbt_ret_write_0(ddata, 0x00);
+	usleep_range(1000, 2000);
+	r |= jbt_ret_write_0(ddata, 0x00);
+	usleep_range(1000, 2000);
+	r |= jbt_ret_write_0(ddata, 0x00);
+	usleep_range(1000, 2000);
+
+	if (r) {
+		dev_warn(dssdev->dev, "transfer error\n");
+		goto transfer_err;
+	}
+
+	/* deep standby out */
+	r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x17);
+
+	/* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
+	r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE, 0x80);
+
+	/* Quad mode off */
+	r |= jbt_reg_write_1(ddata, JBT_REG_QUAD_RATE, 0x00);
+
+	/* AVDD on, XVDD on */
+	r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x16);
+
+	/* Output control */
+	r |= jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0xfff9);
+
+	/* Sleep mode off */
+	r |= jbt_ret_write_0(ddata, JBT_REG_SLEEP_OUT);
+
+	/* at this point we have like 50% grey */
+
+	/* initialize register set */
+	r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE1, 0x01);
+	r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE2, 0x00);
+	r |= jbt_reg_write_1(ddata, JBT_REG_RGB_FORMAT, 0x60);
+	r |= jbt_reg_write_1(ddata, JBT_REG_DRIVE_SYSTEM, 0x10);
+	r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_OP, 0x56);
+	r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_MODE, 0x33);
+	r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
+	r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
+	r |= jbt_reg_write_1(ddata, JBT_REG_OPAMP_SYSCLK, 0x02);
+	r |= jbt_reg_write_1(ddata, JBT_REG_VSC_VOLTAGE, 0x2b);
+	r |= jbt_reg_write_1(ddata, JBT_REG_VCOM_VOLTAGE, 0x40);
+	r |= jbt_reg_write_1(ddata, JBT_REG_EXT_DISPL, 0x03);
+	r |= jbt_reg_write_1(ddata, JBT_REG_DCCLK_DCEV, 0x04);
+	/*
+	 * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
+	 * to avoid red / blue flicker
+	 */
+	r |= jbt_reg_write_1(ddata, JBT_REG_ASW_SLEW, 0x04);
+	r |= jbt_reg_write_1(ddata, JBT_REG_DUMMY_DISPLAY, 0x00);
+
+	r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_A, 0x11);
+	r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_B, 0x11);
+	r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_C, 0x11);
+	r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040);
+	r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0);
+	r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020);
+	r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0);
+
+	r |= jbt_reg_write_2(ddata, JBT_REG_GAMMA1_FINE_1, 0x5533);
+	r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_FINE_2, 0x00);
+	r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_INCLINATION, 0x00);
+	r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00);
+
+	r |= jbt_reg_write_2(ddata, JBT_REG_HCLOCK_VGA, 0x1f0);
+	r |= jbt_reg_write_1(ddata, JBT_REG_BLANK_CONTROL, 0x02);
+	r |= jbt_reg_write_2(ddata, JBT_REG_BLANK_TH_TV, 0x0804);
+
+	r |= jbt_reg_write_1(ddata, JBT_REG_CKV_ON_OFF, 0x01);
+	r |= jbt_reg_write_2(ddata, JBT_REG_CKV_1_2, 0x0000);
+
+	r |= jbt_reg_write_2(ddata, JBT_REG_OEV_TIMING, 0x0d0e);
+	r |= jbt_reg_write_2(ddata, JBT_REG_ASW_TIMING_1, 0x11a4);
+	r |= jbt_reg_write_1(ddata, JBT_REG_ASW_TIMING_2, 0x0e);
+
+	r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+transfer_err:
+
+	return r ? -EIO : 0;
+}
+
+static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
+{
+	struct panel_drv_data *ddata = to_panel_data(dssdev);
+	struct omap_dss_device *in = ddata->in;
+
+	if (!omapdss_device_is_enabled(dssdev))
+		return;
+
+	dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
+
+	jbt_ret_write_0(ddata, JBT_REG_DISPLAY_OFF);
+	jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
+	jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
+	jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
+
+	in->ops.dpi->disable(in);
+
+	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	struct panel_drv_data *ddata = to_panel_data(dssdev);
+	struct omap_dss_device *in = ddata->in;
+
+	ddata->videomode = *timings;
+	dssdev->panel.timings = *timings;
+
+	in->ops.dpi->set_timings(in, timings);
+}
+
+static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+	*timings = ddata->videomode;
+}
+
+static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	struct panel_drv_data *ddata = to_panel_data(dssdev);
+	struct omap_dss_device *in = ddata->in;
+
+	return in->ops.dpi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver td028ttec1_ops = {
+	.connect	= td028ttec1_panel_connect,
+	.disconnect	= td028ttec1_panel_disconnect,
+
+	.enable		= td028ttec1_panel_enable,
+	.disable	= td028ttec1_panel_disable,
+
+	.set_timings	= td028ttec1_panel_set_timings,
+	.get_timings	= td028ttec1_panel_get_timings,
+	.check_timings	= td028ttec1_panel_check_timings,
+};
+
+static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
+{
+	const struct panel_tpo_td028ttec1_platform_data *pdata;
+	struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+	struct omap_dss_device *dssdev, *in;
+
+	pdata = dev_get_platdata(&spi->dev);
+
+	in = omap_dss_find_output(pdata->source);
+	if (in == NULL) {
+		dev_err(&spi->dev, "failed to find video source '%s'\n",
+				pdata->source);
+		return -EPROBE_DEFER;
+	}
+
+	ddata->in = in;
+
+	ddata->data_lines = pdata->data_lines;
+
+	dssdev = &ddata->dssdev;
+	dssdev->name = pdata->name;
+
+	return 0;
+}
+
+static int td028ttec1_panel_probe(struct spi_device *spi)
+{
+	struct panel_drv_data *ddata;
+	struct omap_dss_device *dssdev;
+	int r;
+
+	dev_dbg(&spi->dev, "%s\n", __func__);
+
+	spi->bits_per_word = 9;
+	spi->mode = SPI_MODE_3;
+
+	r = spi_setup(spi);
+	if (r < 0) {
+		dev_err(&spi->dev, "spi_setup failed: %d\n", r);
+		return r;
+	}
+
+	ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+	if (ddata == NULL)
+		return -ENOMEM;
+
+	dev_set_drvdata(&spi->dev, ddata);
+
+	ddata->spi_dev = spi;
+
+	if (dev_get_platdata(&spi->dev)) {
+		r = td028ttec1_panel_probe_pdata(spi);
+		if (r)
+			return r;
+	} else {
+		return -ENODEV;
+	}
+
+	ddata->videomode = td028ttec1_panel_timings;
+
+	dssdev = &ddata->dssdev;
+	dssdev->dev = &spi->dev;
+	dssdev->driver = &td028ttec1_ops;
+	dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+	dssdev->owner = THIS_MODULE;
+	dssdev->panel.timings = ddata->videomode;
+	dssdev->phy.dpi.data_lines = ddata->data_lines;
+
+	r = omapdss_register_display(dssdev);
+	if (r) {
+		dev_err(&spi->dev, "Failed to register panel\n");
+		goto err_reg;
+	}
+
+	return 0;
+
+err_reg:
+	omap_dss_put_device(ddata->in);
+	return r;
+}
+
+static int td028ttec1_panel_remove(struct spi_device *spi)
+{
+	struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+	struct omap_dss_device *dssdev = &ddata->dssdev;
+	struct omap_dss_device *in = ddata->in;
+
+	dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
+
+	omapdss_unregister_display(dssdev);
+
+	td028ttec1_panel_disable(dssdev);
+	td028ttec1_panel_disconnect(dssdev);
+
+	omap_dss_put_device(in);
+
+	return 0;
+}
+
+static struct spi_driver td028ttec1_spi_driver = {
+	.probe		= td028ttec1_panel_probe,
+	.remove		= td028ttec1_panel_remove,
+
+	.driver         = {
+		.name   = "panel-tpo-td028ttec1",
+		.owner  = THIS_MODULE,
+	},
+};
+
+module_spi_driver(td028ttec1_spi_driver);
+
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 94832eb..d3aa91b 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -10,5 +10,6 @@
 omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
 omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
 omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o ti_hdmi_4xxx_ip.o
+omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi_common.o hdmi_wp.o hdmi_pll.o \
+	hdmi_phy.o hdmi4_core.o
 ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 60d3958..ffa45c8 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -266,7 +266,7 @@
 	venc_init_platform_driver,
 #endif
 #ifdef CONFIG_OMAP4_DSS_HDMI
-	hdmi_init_platform_driver,
+	hdmi4_init_platform_driver,
 #endif
 };
 
@@ -287,7 +287,7 @@
 	venc_uninit_platform_driver,
 #endif
 #ifdef CONFIG_OMAP4_DSS_HDMI
-	hdmi_uninit_platform_driver,
+	hdmi4_uninit_platform_driver,
 #endif
 };
 
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 4779750..4ec59ca 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2352,7 +2352,7 @@
 {
 	enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
 	bool five_taps = true;
-	bool fieldmode = 0;
+	bool fieldmode = false;
 	u16 in_height = oi->height;
 	u16 in_width = oi->width;
 	bool ilace = timings->interlace;
@@ -2365,7 +2365,7 @@
 	out_height = oi->out_height == 0 ? oi->height : oi->out_height;
 
 	if (ilace && oi->height == out_height)
-		fieldmode = 1;
+		fieldmode = true;
 
 	if (ilace) {
 		if (fieldmode)
@@ -2396,7 +2396,7 @@
 		bool mem_to_mem)
 {
 	bool five_taps = true;
-	bool fieldmode = 0;
+	bool fieldmode = false;
 	int r, cconv = 0;
 	unsigned offset0, offset1;
 	s32 row_inc;
@@ -2417,7 +2417,7 @@
 	out_height = out_height == 0 ? height : out_height;
 
 	if (ilace && height == out_height)
-		fieldmode = 1;
+		fieldmode = true;
 
 	if (ilace) {
 		if (fieldmode)
@@ -2918,7 +2918,7 @@
 		break;
 	default:
 		BUG();
-	};
+	}
 
 	l = dispc_read_reg(DISPC_POL_FREQ(channel));
 	l |= FLD_VAL(onoff, 17, 17);
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index fafe7c9..669a81f 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -266,7 +266,7 @@
 		OMAPDSS_SIG_ACTIVE_LOW;
 	ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
 		OMAPDSS_SIG_ACTIVE_HIGH :
-		OMAPDSS_SIG_ACTIVE_HIGH;
+		OMAPDSS_SIG_ACTIVE_LOW;
 	ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
 		OMAPDSS_DRIVE_SIG_RISING_EDGE :
 		OMAPDSS_DRIVE_SIG_FALLING_EDGE;
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index a598b58..6056b27 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -312,7 +312,7 @@
 	struct dsi_isr_tables isr_tables_copy;
 
 	int update_channel;
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
 	unsigned update_bytes;
 #endif
 
@@ -334,7 +334,7 @@
 
 	u32		errors;
 	spinlock_t	errors_lock;
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
 	ktime_t perf_setup_time;
 	ktime_t perf_start_time;
 #endif
@@ -373,7 +373,7 @@
 	struct completion *completion;
 };
 
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
 static bool dsi_perf;
 module_param(dsi_perf, bool, 0644);
 #endif
@@ -497,7 +497,7 @@
 	}
 }
 
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
 static void dsi_perf_mark_setup(struct platform_device *dsidev)
 {
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4066,7 +4066,7 @@
 		default:
 			r = -EINVAL;
 			goto err_pix_fmt;
-		};
+		}
 
 		dsi_if_enable(dsidev, false);
 		dsi_vc_enable(dsidev, channel, false);
@@ -4277,7 +4277,7 @@
 	dw = dsi->timings.x_res;
 	dh = dsi->timings.y_res;
 
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
 	dsi->update_bytes = dw * dh *
 		dsi_get_pixel_size(dsi->pix_fmt) / 8;
 #endif
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index e172531..f538e86 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -427,8 +427,8 @@
 void venc_uninit_platform_driver(void) __exit;
 
 /* HDMI */
-int hdmi_init_platform_driver(void) __init;
-void hdmi_uninit_platform_driver(void) __exit;
+int hdmi4_init_platform_driver(void) __init;
+void hdmi4_uninit_platform_driver(void) __exit;
 
 /* RFBI */
 int rfbi_init_platform_driver(void) __init;
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index b9cfebb..f8fd6db 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -789,50 +789,6 @@
 	.burst_size_unit = 16,
 };
 
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-/* HDMI OMAP4 Functions*/
-static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
-
-	.video_configure	=	ti_hdmi_4xxx_basic_configure,
-	.phy_enable		=	ti_hdmi_4xxx_phy_enable,
-	.phy_disable		=	ti_hdmi_4xxx_phy_disable,
-	.read_edid		=	ti_hdmi_4xxx_read_edid,
-	.pll_enable		=	ti_hdmi_4xxx_pll_enable,
-	.pll_disable		=	ti_hdmi_4xxx_pll_disable,
-	.video_enable		=	ti_hdmi_4xxx_wp_video_start,
-	.video_disable		=	ti_hdmi_4xxx_wp_video_stop,
-	.dump_wrapper		=	ti_hdmi_4xxx_wp_dump,
-	.dump_core		=	ti_hdmi_4xxx_core_dump,
-	.dump_pll		=	ti_hdmi_4xxx_pll_dump,
-	.dump_phy		=	ti_hdmi_4xxx_phy_dump,
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-	.audio_enable		=       ti_hdmi_4xxx_wp_audio_enable,
-	.audio_disable		=       ti_hdmi_4xxx_wp_audio_disable,
-	.audio_start		=       ti_hdmi_4xxx_audio_start,
-	.audio_stop		=       ti_hdmi_4xxx_audio_stop,
-	.audio_config		=	ti_hdmi_4xxx_audio_config,
-	.audio_get_dma_port	=	ti_hdmi_4xxx_audio_get_dma_port,
-#endif
-
-};
-
-void dss_init_hdmi_ip_ops(struct hdmi_ip_data *ip_data,
-		enum omapdss_version version)
-{
-	switch (version) {
-	case OMAPDSS_VER_OMAP4430_ES1:
-	case OMAPDSS_VER_OMAP4430_ES2:
-	case OMAPDSS_VER_OMAP4:
-		ip_data->ops = &omap4_hdmi_functions;
-		break;
-	default:
-		ip_data->ops = NULL;
-	}
-
-	WARN_ON(ip_data->ops == NULL);
-}
-#endif
-
 /* Functions returning values related to a DSS feature */
 int dss_feat_get_num_mgrs(void)
 {
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 489b9be..10b0556 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -20,10 +20,6 @@
 #ifndef __OMAP2_DSS_FEATURES_H
 #define __OMAP2_DSS_FEATURES_H
 
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-#include "ti_hdmi.h"
-#endif
-
 #define MAX_DSS_MANAGERS	4
 #define MAX_DSS_OVERLAYS	4
 #define MAX_DSS_LCD_MANAGERS	3
@@ -117,8 +113,4 @@
 bool dss_has_feature(enum dss_feat_id id);
 void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
 void dss_features_init(enum omapdss_version version);
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-void dss_init_hdmi_ip_ops(struct hdmi_ip_data *ip_data,
-		enum omapdss_version version);
-#endif
 #endif
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
deleted file mode 100644
index 82a9640..0000000
--- a/drivers/video/omap2/dss/hdmi.c
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- * hdmi.c
- *
- * HDMI interface DSS driver setting for TI's OMAP4 family of processor.
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Authors: Yong Zhi
- *	Mythri pk <mythripk@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "HDMI"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
-
-#include "ti_hdmi.h"
-#include "dss.h"
-#include "dss_features.h"
-
-#define HDMI_WP			0x0
-#define HDMI_CORE_SYS		0x400
-#define HDMI_CORE_AV		0x900
-#define HDMI_PLLCTRL		0x200
-#define HDMI_PHY		0x300
-
-/* HDMI EDID Length move this */
-#define HDMI_EDID_MAX_LENGTH			256
-#define EDID_TIMING_DESCRIPTOR_SIZE		0x12
-#define EDID_DESCRIPTOR_BLOCK0_ADDRESS		0x36
-#define EDID_DESCRIPTOR_BLOCK1_ADDRESS		0x80
-#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR	4
-#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR	4
-
-#define HDMI_DEFAULT_REGN 16
-#define HDMI_DEFAULT_REGM2 1
-
-static struct {
-	struct mutex lock;
-	struct platform_device *pdev;
-
-	struct hdmi_ip_data ip_data;
-
-	struct clk *sys_clk;
-	struct regulator *vdda_hdmi_dac_reg;
-
-	bool core_enabled;
-
-	struct omap_dss_device output;
-} hdmi;
-
-/*
- * Logic for the below structure :
- * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
- * There is a correspondence between CEA/VESA timing and code, please
- * refer to section 6.3 in HDMI 1.3 specification for timing code.
- *
- * In the below structure, cea_vesa_timings corresponds to all OMAP4
- * supported CEA and VESA timing values.code_cea corresponds to the CEA
- * code, It is used to get the timing from cea_vesa_timing array.Similarly
- * with code_vesa. Code_index is used for back mapping, that is once EDID
- * is read from the TV, EDID is parsed to find the timing values and then
- * map it to corresponding CEA or VESA index.
- */
-
-static const struct hdmi_config cea_timings[] = {
-	{
-		{ 640, 480, 25200, 96, 16, 48, 2, 10, 33,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 1, HDMI_HDMI },
-	},
-	{
-		{ 720, 480, 27027, 62, 16, 60, 6, 9, 30,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 2, HDMI_HDMI },
-	},
-	{
-		{ 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 4, HDMI_HDMI },
-	},
-	{
-		{ 1920, 540, 74250, 44, 88, 148, 5, 2, 15,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			true, },
-		{ 5, HDMI_HDMI },
-	},
-	{
-		{ 1440, 240, 27027, 124, 38, 114, 3, 4, 15,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			true, },
-		{ 6, HDMI_HDMI },
-	},
-	{
-		{ 1920, 1080, 148500, 44, 88, 148, 5, 4, 36,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 16, HDMI_HDMI },
-	},
-	{
-		{ 720, 576, 27000, 64, 12, 68, 5, 5, 39,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 17, HDMI_HDMI },
-	},
-	{
-		{ 1280, 720, 74250, 40, 440, 220, 5, 5, 20,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 19, HDMI_HDMI },
-	},
-	{
-		{ 1920, 540, 74250, 44, 528, 148, 5, 2, 15,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			true, },
-		{ 20, HDMI_HDMI },
-	},
-	{
-		{ 1440, 288, 27000, 126, 24, 138, 3, 2, 19,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			true, },
-		{ 21, HDMI_HDMI },
-	},
-	{
-		{ 1440, 576, 54000, 128, 24, 136, 5, 5, 39,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 29, HDMI_HDMI },
-	},
-	{
-		{ 1920, 1080, 148500, 44, 528, 148, 5, 4, 36,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 31, HDMI_HDMI },
-	},
-	{
-		{ 1920, 1080, 74250, 44, 638, 148, 5, 4, 36,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 32, HDMI_HDMI },
-	},
-	{
-		{ 2880, 480, 108108, 248, 64, 240, 6, 9, 30,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 35, HDMI_HDMI },
-	},
-	{
-		{ 2880, 576, 108000, 256, 48, 272, 5, 5, 39,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 37, HDMI_HDMI },
-	},
-};
-
-static const struct hdmi_config vesa_timings[] = {
-/* VESA From Here */
-	{
-		{ 640, 480, 25175, 96, 16, 48, 2, 11, 31,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 4, HDMI_DVI },
-	},
-	{
-		{ 800, 600, 40000, 128, 40, 88, 4, 1, 23,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 9, HDMI_DVI },
-	},
-	{
-		{ 848, 480, 33750, 112, 16, 112, 8, 6, 23,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0xE, HDMI_DVI },
-	},
-	{
-		{ 1280, 768, 79500, 128, 64, 192, 7, 3, 20,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 0x17, HDMI_DVI },
-	},
-	{
-		{ 1280, 800, 83500, 128, 72, 200, 6, 3, 22,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 0x1C, HDMI_DVI },
-	},
-	{
-		{ 1360, 768, 85500, 112, 64, 256, 6, 3, 18,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x27, HDMI_DVI },
-	},
-	{
-		{ 1280, 960, 108000, 112, 96, 312, 3, 1, 36,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x20, HDMI_DVI },
-	},
-	{
-		{ 1280, 1024, 108000, 112, 48, 248, 3, 1, 38,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x23, HDMI_DVI },
-	},
-	{
-		{ 1024, 768, 65000, 136, 24, 160, 6, 3, 29,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 0x10, HDMI_DVI },
-	},
-	{
-		{ 1400, 1050, 121750, 144, 88, 232, 4, 3, 32,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 0x2A, HDMI_DVI },
-	},
-	{
-		{ 1440, 900, 106500, 152, 80, 232, 6, 3, 25,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 0x2F, HDMI_DVI },
-	},
-	{
-		{ 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
-			false, },
-		{ 0x3A, HDMI_DVI },
-	},
-	{
-		{ 1366, 768, 85500, 143, 70, 213, 3, 3, 24,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x51, HDMI_DVI },
-	},
-	{
-		{ 1920, 1080, 148500, 44, 148, 80, 5, 4, 36,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x52, HDMI_DVI },
-	},
-	{
-		{ 1280, 768, 68250, 32, 48, 80, 7, 3, 12,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x16, HDMI_DVI },
-	},
-	{
-		{ 1400, 1050, 101000, 32, 48, 80, 4, 3, 23,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x29, HDMI_DVI },
-	},
-	{
-		{ 1680, 1050, 119000, 32, 48, 80, 6, 3, 21,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x39, HDMI_DVI },
-	},
-	{
-		{ 1280, 800, 79500, 32, 48, 80, 6, 3, 14,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x1B, HDMI_DVI },
-	},
-	{
-		{ 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
-			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x55, HDMI_DVI },
-	},
-	{
-		{ 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
-			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
-			false, },
-		{ 0x44, HDMI_DVI },
-	},
-};
-
-static int hdmi_runtime_get(void)
-{
-	int r;
-
-	DSSDBG("hdmi_runtime_get\n");
-
-	r = pm_runtime_get_sync(&hdmi.pdev->dev);
-	WARN_ON(r < 0);
-	if (r < 0)
-		return r;
-
-	return 0;
-}
-
-static void hdmi_runtime_put(void)
-{
-	int r;
-
-	DSSDBG("hdmi_runtime_put\n");
-
-	r = pm_runtime_put_sync(&hdmi.pdev->dev);
-	WARN_ON(r < 0 && r != -ENOSYS);
-}
-
-static int hdmi_init_regulator(void)
-{
-	struct regulator *reg;
-
-	if (hdmi.vdda_hdmi_dac_reg != NULL)
-		return 0;
-
-	reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
-
-	/* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
-	if (IS_ERR(reg))
-		reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
-
-	if (IS_ERR(reg)) {
-		DSSERR("can't get VDDA_HDMI_DAC regulator\n");
-		return PTR_ERR(reg);
-	}
-
-	hdmi.vdda_hdmi_dac_reg = reg;
-
-	return 0;
-}
-
-static const struct hdmi_config *hdmi_find_timing(
-					const struct hdmi_config *timings_arr,
-					int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++) {
-		if (timings_arr[i].cm.code == hdmi.ip_data.cfg.cm.code)
-			return &timings_arr[i];
-	}
-	return NULL;
-}
-
-static const struct hdmi_config *hdmi_get_timings(void)
-{
-       const struct hdmi_config *arr;
-       int len;
-
-       if (hdmi.ip_data.cfg.cm.mode == HDMI_DVI) {
-               arr = vesa_timings;
-               len = ARRAY_SIZE(vesa_timings);
-       } else {
-               arr = cea_timings;
-               len = ARRAY_SIZE(cea_timings);
-       }
-
-       return hdmi_find_timing(arr, len);
-}
-
-static bool hdmi_timings_compare(struct omap_video_timings *timing1,
-				const struct omap_video_timings *timing2)
-{
-	int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
-
-	if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
-			DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
-		(timing2->x_res == timing1->x_res) &&
-		(timing2->y_res == timing1->y_res)) {
-
-		timing2_hsync = timing2->hfp + timing2->hsw + timing2->hbp;
-		timing1_hsync = timing1->hfp + timing1->hsw + timing1->hbp;
-		timing2_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
-		timing1_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
-
-		DSSDBG("timing1_hsync = %d timing1_vsync = %d"\
-			"timing2_hsync = %d timing2_vsync = %d\n",
-			timing1_hsync, timing1_vsync,
-			timing2_hsync, timing2_vsync);
-
-		if ((timing1_hsync == timing2_hsync) &&
-			(timing1_vsync == timing2_vsync)) {
-			return true;
-		}
-	}
-	return false;
-}
-
-static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
-{
-	int i;
-	struct hdmi_cm cm = {-1};
-	DSSDBG("hdmi_get_code\n");
-
-	for (i = 0; i < ARRAY_SIZE(cea_timings); i++) {
-		if (hdmi_timings_compare(timing, &cea_timings[i].timings)) {
-			cm = cea_timings[i].cm;
-			goto end;
-		}
-	}
-	for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) {
-		if (hdmi_timings_compare(timing, &vesa_timings[i].timings)) {
-			cm = vesa_timings[i].cm;
-			goto end;
-		}
-	}
-
-end:	return cm;
-
-}
-
-static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
-		struct hdmi_pll_info *pi)
-{
-	unsigned long clkin, refclk;
-	u32 mf;
-
-	clkin = clk_get_rate(hdmi.sys_clk) / 10000;
-	/*
-	 * Input clock is predivided by N + 1
-	 * out put of which is reference clk
-	 */
-
-	pi->regn = HDMI_DEFAULT_REGN;
-
-	refclk = clkin / pi->regn;
-
-	pi->regm2 = HDMI_DEFAULT_REGM2;
-
-	/*
-	 * multiplier is pixel_clk/ref_clk
-	 * Multiplying by 100 to avoid fractional part removal
-	 */
-	pi->regm = phy * pi->regm2 / refclk;
-
-	/*
-	 * fractional multiplier is remainder of the difference between
-	 * multiplier and actual phy(required pixel clock thus should be
-	 * multiplied by 2^18(262144) divided by the reference clock
-	 */
-	mf = (phy - pi->regm / pi->regm2 * refclk) * 262144;
-	pi->regmf = pi->regm2 * mf / refclk;
-
-	/*
-	 * Dcofreq should be set to 1 if required pixel clock
-	 * is greater than 1000MHz
-	 */
-	pi->dcofreq = phy > 1000 * 100;
-	pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
-
-	/* Set the reference clock to sysclk reference */
-	pi->refsel = HDMI_REFSEL_SYSCLK;
-
-	DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
-	DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
-}
-
-static int hdmi_power_on_core(struct omap_dss_device *dssdev)
-{
-	int r;
-
-	r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
-	if (r)
-		return r;
-
-	r = hdmi_runtime_get();
-	if (r)
-		goto err_runtime_get;
-
-	/* Make selection of HDMI in DSS */
-	dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
-
-	hdmi.core_enabled = true;
-
-	return 0;
-
-err_runtime_get:
-	regulator_disable(hdmi.vdda_hdmi_dac_reg);
-
-	return r;
-}
-
-static void hdmi_power_off_core(struct omap_dss_device *dssdev)
-{
-	hdmi.core_enabled = false;
-
-	hdmi_runtime_put();
-	regulator_disable(hdmi.vdda_hdmi_dac_reg);
-}
-
-static int hdmi_power_on_full(struct omap_dss_device *dssdev)
-{
-	int r;
-	struct omap_video_timings *p;
-	struct omap_overlay_manager *mgr = hdmi.output.manager;
-	unsigned long phy;
-
-	r = hdmi_power_on_core(dssdev);
-	if (r)
-		return r;
-
-	dss_mgr_disable(mgr);
-
-	p = &hdmi.ip_data.cfg.timings;
-
-	DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
-
-	phy = p->pixel_clock;
-
-	hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
-
-	hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
-
-	/* config the PLL and PHY hdmi_set_pll_pwrfirst */
-	r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
-	if (r) {
-		DSSDBG("Failed to lock PLL\n");
-		goto err_pll_enable;
-	}
-
-	r = hdmi.ip_data.ops->phy_enable(&hdmi.ip_data);
-	if (r) {
-		DSSDBG("Failed to start PHY\n");
-		goto err_phy_enable;
-	}
-
-	hdmi.ip_data.ops->video_configure(&hdmi.ip_data);
-
-	/* bypass TV gamma table */
-	dispc_enable_gamma_table(0);
-
-	/* tv size */
-	dss_mgr_set_timings(mgr, p);
-
-	r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
-	if (r)
-		goto err_vid_enable;
-
-	r = dss_mgr_enable(mgr);
-	if (r)
-		goto err_mgr_enable;
-
-	return 0;
-
-err_mgr_enable:
-	hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
-err_vid_enable:
-	hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
-err_phy_enable:
-	hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
-err_pll_enable:
-	hdmi_power_off_core(dssdev);
-	return -EIO;
-}
-
-static void hdmi_power_off_full(struct omap_dss_device *dssdev)
-{
-	struct omap_overlay_manager *mgr = hdmi.output.manager;
-
-	dss_mgr_disable(mgr);
-
-	hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
-	hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
-	hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
-
-	hdmi_power_off_core(dssdev);
-}
-
-static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
-					struct omap_video_timings *timings)
-{
-	struct hdmi_cm cm;
-
-	cm = hdmi_get_code(timings);
-	if (cm.code == -1) {
-		return -EINVAL;
-	}
-
-	return 0;
-
-}
-
-static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	struct hdmi_cm cm;
-	const struct hdmi_config *t;
-
-	mutex_lock(&hdmi.lock);
-
-	cm = hdmi_get_code(timings);
-	hdmi.ip_data.cfg.cm = cm;
-
-	t = hdmi_get_timings();
-	if (t != NULL) {
-		hdmi.ip_data.cfg = *t;
-
-		dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
-	}
-
-	mutex_unlock(&hdmi.lock);
-}
-
-static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	const struct hdmi_config *cfg;
-
-	cfg = hdmi_get_timings();
-	if (cfg == NULL)
-		cfg = &vesa_timings[0];
-
-	memcpy(timings, &cfg->timings, sizeof(cfg->timings));
-}
-
-static void hdmi_dump_regs(struct seq_file *s)
-{
-	mutex_lock(&hdmi.lock);
-
-	if (hdmi_runtime_get()) {
-		mutex_unlock(&hdmi.lock);
-		return;
-	}
-
-	hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
-	hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
-	hdmi.ip_data.ops->dump_phy(&hdmi.ip_data, s);
-	hdmi.ip_data.ops->dump_core(&hdmi.ip_data, s);
-
-	hdmi_runtime_put();
-	mutex_unlock(&hdmi.lock);
-}
-
-static int read_edid(u8 *buf, int len)
-{
-	int r;
-
-	mutex_lock(&hdmi.lock);
-
-	r = hdmi_runtime_get();
-	BUG_ON(r);
-
-	r = hdmi.ip_data.ops->read_edid(&hdmi.ip_data, buf, len);
-
-	hdmi_runtime_put();
-	mutex_unlock(&hdmi.lock);
-
-	return r;
-}
-
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
-{
-	struct omap_dss_device *out = &hdmi.output;
-	int r = 0;
-
-	DSSDBG("ENTER hdmi_display_enable\n");
-
-	mutex_lock(&hdmi.lock);
-
-	if (out == NULL || out->manager == NULL) {
-		DSSERR("failed to enable display: no output/manager\n");
-		r = -ENODEV;
-		goto err0;
-	}
-
-	r = hdmi_power_on_full(dssdev);
-	if (r) {
-		DSSERR("failed to power on device\n");
-		goto err0;
-	}
-
-	mutex_unlock(&hdmi.lock);
-	return 0;
-
-err0:
-	mutex_unlock(&hdmi.lock);
-	return r;
-}
-
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
-{
-	DSSDBG("Enter hdmi_display_disable\n");
-
-	mutex_lock(&hdmi.lock);
-
-	hdmi_power_off_full(dssdev);
-
-	mutex_unlock(&hdmi.lock);
-}
-
-static int hdmi_core_enable(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	DSSDBG("ENTER omapdss_hdmi_core_enable\n");
-
-	mutex_lock(&hdmi.lock);
-
-	r = hdmi_power_on_core(dssdev);
-	if (r) {
-		DSSERR("failed to power on device\n");
-		goto err0;
-	}
-
-	mutex_unlock(&hdmi.lock);
-	return 0;
-
-err0:
-	mutex_unlock(&hdmi.lock);
-	return r;
-}
-
-static void hdmi_core_disable(struct omap_dss_device *dssdev)
-{
-	DSSDBG("Enter omapdss_hdmi_core_disable\n");
-
-	mutex_lock(&hdmi.lock);
-
-	hdmi_power_off_core(dssdev);
-
-	mutex_unlock(&hdmi.lock);
-}
-
-static int hdmi_get_clocks(struct platform_device *pdev)
-{
-	struct clk *clk;
-
-	clk = devm_clk_get(&pdev->dev, "sys_clk");
-	if (IS_ERR(clk)) {
-		DSSERR("can't get sys_clk\n");
-		return PTR_ERR(clk);
-	}
-
-	hdmi.sys_clk = clk;
-
-	return 0;
-}
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
-{
-	u32 deep_color;
-	bool deep_color_correct = false;
-	u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
-
-	if (n == NULL || cts == NULL)
-		return -EINVAL;
-
-	/* TODO: When implemented, query deep color mode here. */
-	deep_color = 100;
-
-	/*
-	 * When using deep color, the default N value (as in the HDMI
-	 * specification) yields to an non-integer CTS. Hence, we
-	 * modify it while keeping the restrictions described in
-	 * section 7.2.1 of the HDMI 1.4a specification.
-	 */
-	switch (sample_freq) {
-	case 32000:
-	case 48000:
-	case 96000:
-	case 192000:
-		if (deep_color == 125)
-			if (pclk == 27027 || pclk == 74250)
-				deep_color_correct = true;
-		if (deep_color == 150)
-			if (pclk == 27027)
-				deep_color_correct = true;
-		break;
-	case 44100:
-	case 88200:
-	case 176400:
-		if (deep_color == 125)
-			if (pclk == 27027)
-				deep_color_correct = true;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (deep_color_correct) {
-		switch (sample_freq) {
-		case 32000:
-			*n = 8192;
-			break;
-		case 44100:
-			*n = 12544;
-			break;
-		case 48000:
-			*n = 8192;
-			break;
-		case 88200:
-			*n = 25088;
-			break;
-		case 96000:
-			*n = 16384;
-			break;
-		case 176400:
-			*n = 50176;
-			break;
-		case 192000:
-			*n = 32768;
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else {
-		switch (sample_freq) {
-		case 32000:
-			*n = 4096;
-			break;
-		case 44100:
-			*n = 6272;
-			break;
-		case 48000:
-			*n = 6144;
-			break;
-		case 88200:
-			*n = 12544;
-			break;
-		case 96000:
-			*n = 12288;
-			break;
-		case 176400:
-			*n = 25088;
-			break;
-		case 192000:
-			*n = 24576;
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-	/* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
-	*cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
-
-	return 0;
-}
-
-static bool hdmi_mode_has_audio(void)
-{
-	if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
-		return true;
-	else
-		return false;
-}
-
-#endif
-
-static int hdmi_connect(struct omap_dss_device *dssdev,
-		struct omap_dss_device *dst)
-{
-	struct omap_overlay_manager *mgr;
-	int r;
-
-	dss_init_hdmi_ip_ops(&hdmi.ip_data, omapdss_get_version());
-
-	r = hdmi_init_regulator();
-	if (r)
-		return r;
-
-	mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
-	if (!mgr)
-		return -ENODEV;
-
-	r = dss_mgr_connect(mgr, dssdev);
-	if (r)
-		return r;
-
-	r = omapdss_output_set_device(dssdev, dst);
-	if (r) {
-		DSSERR("failed to connect output to new device: %s\n",
-				dst->name);
-		dss_mgr_disconnect(mgr, dssdev);
-		return r;
-	}
-
-	return 0;
-}
-
-static void hdmi_disconnect(struct omap_dss_device *dssdev,
-		struct omap_dss_device *dst)
-{
-	WARN_ON(dst != dssdev->dst);
-
-	if (dst != dssdev->dst)
-		return;
-
-	omapdss_output_unset_device(dssdev);
-
-	if (dssdev->manager)
-		dss_mgr_disconnect(dssdev->manager, dssdev);
-}
-
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
-		u8 *edid, int len)
-{
-	bool need_enable;
-	int r;
-
-	need_enable = hdmi.core_enabled == false;
-
-	if (need_enable) {
-		r = hdmi_core_enable(dssdev);
-		if (r)
-			return r;
-	}
-
-	r = read_edid(edid, len);
-
-	if (need_enable)
-		hdmi_core_disable(dssdev);
-
-	return r;
-}
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
-	int r;
-
-	mutex_lock(&hdmi.lock);
-
-	if (!hdmi_mode_has_audio()) {
-		r = -EPERM;
-		goto err;
-	}
-
-
-	r = hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
-	if (r)
-		goto err;
-
-	mutex_unlock(&hdmi.lock);
-	return 0;
-
-err:
-	mutex_unlock(&hdmi.lock);
-	return r;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
-	hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
-	return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
-	hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
-	bool r;
-
-	mutex_lock(&hdmi.lock);
-
-	r = hdmi_mode_has_audio();
-
-	mutex_unlock(&hdmi.lock);
-	return r;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
-		struct omap_dss_audio *audio)
-{
-	int r;
-
-	mutex_lock(&hdmi.lock);
-
-	if (!hdmi_mode_has_audio()) {
-		r = -EPERM;
-		goto err;
-	}
-
-	r = hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
-	if (r)
-		goto err;
-
-	mutex_unlock(&hdmi.lock);
-	return 0;
-
-err:
-	mutex_unlock(&hdmi.lock);
-	return r;
-}
-#else
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
-	return -EPERM;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
-	return -EPERM;
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
-	return false;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
-		struct omap_dss_audio *audio)
-{
-	return -EPERM;
-}
-#endif
-
-static const struct omapdss_hdmi_ops hdmi_ops = {
-	.connect		= hdmi_connect,
-	.disconnect		= hdmi_disconnect,
-
-	.enable			= hdmi_display_enable,
-	.disable		= hdmi_display_disable,
-
-	.check_timings		= hdmi_display_check_timing,
-	.set_timings		= hdmi_display_set_timing,
-	.get_timings		= hdmi_display_get_timings,
-
-	.read_edid		= hdmi_read_edid,
-
-	.audio_enable		= hdmi_audio_enable,
-	.audio_disable		= hdmi_audio_disable,
-	.audio_start		= hdmi_audio_start,
-	.audio_stop		= hdmi_audio_stop,
-	.audio_supported	= hdmi_audio_supported,
-	.audio_config		= hdmi_audio_config,
-};
-
-static void hdmi_init_output(struct platform_device *pdev)
-{
-	struct omap_dss_device *out = &hdmi.output;
-
-	out->dev = &pdev->dev;
-	out->id = OMAP_DSS_OUTPUT_HDMI;
-	out->output_type = OMAP_DISPLAY_TYPE_HDMI;
-	out->name = "hdmi.0";
-	out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
-	out->ops.hdmi = &hdmi_ops;
-	out->owner = THIS_MODULE;
-
-	omapdss_register_output(out);
-}
-
-static void __exit hdmi_uninit_output(struct platform_device *pdev)
-{
-	struct omap_dss_device *out = &hdmi.output;
-
-	omapdss_unregister_output(out);
-}
-
-/* HDMI HW IP initialisation */
-static int omapdss_hdmihw_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	int r;
-
-	hdmi.pdev = pdev;
-
-	mutex_init(&hdmi.lock);
-	mutex_init(&hdmi.ip_data.lock);
-
-	res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
-
-	/* Base address taken from platform */
-	hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(hdmi.ip_data.base_wp))
-		return PTR_ERR(hdmi.ip_data.base_wp);
-
-	hdmi.ip_data.irq = platform_get_irq(pdev, 0);
-	if (hdmi.ip_data.irq < 0) {
-		DSSERR("platform_get_irq failed\n");
-		return -ENODEV;
-	}
-
-	r = hdmi_get_clocks(pdev);
-	if (r) {
-		DSSERR("can't get clocks\n");
-		return r;
-	}
-
-	pm_runtime_enable(&pdev->dev);
-
-	hdmi.ip_data.core_sys_offset = HDMI_CORE_SYS;
-	hdmi.ip_data.core_av_offset = HDMI_CORE_AV;
-	hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
-	hdmi.ip_data.phy_offset = HDMI_PHY;
-
-	hdmi_init_output(pdev);
-
-	dss_debugfs_create_file("hdmi", hdmi_dump_regs);
-
-	return 0;
-}
-
-static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
-{
-	hdmi_uninit_output(pdev);
-
-	pm_runtime_disable(&pdev->dev);
-
-	return 0;
-}
-
-static int hdmi_runtime_suspend(struct device *dev)
-{
-	clk_disable_unprepare(hdmi.sys_clk);
-
-	dispc_runtime_put();
-
-	return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
-	int r;
-
-	r = dispc_runtime_get();
-	if (r < 0)
-		return r;
-
-	clk_prepare_enable(hdmi.sys_clk);
-
-	return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
-	.runtime_suspend = hdmi_runtime_suspend,
-	.runtime_resume = hdmi_runtime_resume,
-};
-
-static struct platform_driver omapdss_hdmihw_driver = {
-	.probe		= omapdss_hdmihw_probe,
-	.remove         = __exit_p(omapdss_hdmihw_remove),
-	.driver         = {
-		.name   = "omapdss_hdmi",
-		.owner  = THIS_MODULE,
-		.pm	= &hdmi_pm_ops,
-	},
-};
-
-int __init hdmi_init_platform_driver(void)
-{
-	return platform_driver_register(&omapdss_hdmihw_driver);
-}
-
-void __exit hdmi_uninit_platform_driver(void)
-{
-	platform_driver_unregister(&omapdss_hdmihw_driver);
-}
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
new file mode 100644
index 0000000..b049376
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -0,0 +1,444 @@
+/*
+ * HDMI driver definition for TI OMAP4 Processor.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HDMI_H
+#define _HDMI_H
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+
+/* HDMI Wrapper */
+
+#define HDMI_WP_REVISION			0x0
+#define HDMI_WP_SYSCONFIG			0x10
+#define HDMI_WP_IRQSTATUS_RAW			0x24
+#define HDMI_WP_IRQSTATUS			0x28
+#define HDMI_WP_IRQENABLE_SET			0x2C
+#define HDMI_WP_IRQENABLE_CLR			0x30
+#define HDMI_WP_IRQWAKEEN			0x34
+#define HDMI_WP_PWR_CTRL			0x40
+#define HDMI_WP_DEBOUNCE			0x44
+#define HDMI_WP_VIDEO_CFG			0x50
+#define HDMI_WP_VIDEO_SIZE			0x60
+#define HDMI_WP_VIDEO_TIMING_H			0x68
+#define HDMI_WP_VIDEO_TIMING_V			0x6C
+#define HDMI_WP_WP_CLK				0x70
+#define HDMI_WP_AUDIO_CFG			0x80
+#define HDMI_WP_AUDIO_CFG2			0x84
+#define HDMI_WP_AUDIO_CTRL			0x88
+#define HDMI_WP_AUDIO_DATA			0x8C
+
+/* HDMI WP IRQ flags */
+
+#define HDMI_IRQ_OCP_TIMEOUT			(1 << 4)
+#define HDMI_IRQ_AUDIO_FIFO_UNDERFLOW		(1 << 8)
+#define HDMI_IRQ_AUDIO_FIFO_OVERFLOW		(1 << 9)
+#define HDMI_IRQ_AUDIO_FIFO_SAMPLE_REQ		(1 << 10)
+#define HDMI_IRQ_VIDEO_VSYNC			(1 << 16)
+#define HDMI_IRQ_VIDEO_FRAME_DONE		(1 << 17)
+#define HDMI_IRQ_PHY_LINE5V_ASSERT		(1 << 24)
+#define HDMI_IRQ_LINK_CONNECT			(1 << 25)
+#define HDMI_IRQ_LINK_DISCONNECT		(1 << 26)
+#define HDMI_IRQ_PLL_LOCK			(1 << 29)
+#define HDMI_IRQ_PLL_UNLOCK			(1 << 30)
+#define HDMI_IRQ_PLL_RECAL			(1 << 31)
+
+/* HDMI PLL */
+
+#define PLLCTRL_PLL_CONTROL			0x0
+#define PLLCTRL_PLL_STATUS			0x4
+#define PLLCTRL_PLL_GO				0x8
+#define PLLCTRL_CFG1				0xC
+#define PLLCTRL_CFG2				0x10
+#define PLLCTRL_CFG3				0x14
+#define PLLCTRL_SSC_CFG1			0x18
+#define PLLCTRL_SSC_CFG2			0x1C
+#define PLLCTRL_CFG4				0x20
+
+/* HDMI PHY */
+
+#define HDMI_TXPHY_TX_CTRL			0x0
+#define HDMI_TXPHY_DIGITAL_CTRL			0x4
+#define HDMI_TXPHY_POWER_CTRL			0x8
+#define HDMI_TXPHY_PAD_CFG_CTRL			0xC
+
+enum hdmi_pll_pwr {
+	HDMI_PLLPWRCMD_ALLOFF = 0,
+	HDMI_PLLPWRCMD_PLLONLY = 1,
+	HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
+	HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
+};
+
+enum hdmi_phy_pwr {
+	HDMI_PHYPWRCMD_OFF = 0,
+	HDMI_PHYPWRCMD_LDOON = 1,
+	HDMI_PHYPWRCMD_TXON = 2
+};
+
+enum hdmi_core_hdmi_dvi {
+	HDMI_DVI = 0,
+	HDMI_HDMI = 1
+};
+
+enum hdmi_clk_refsel {
+	HDMI_REFSEL_PCLK = 0,
+	HDMI_REFSEL_REF1 = 1,
+	HDMI_REFSEL_REF2 = 2,
+	HDMI_REFSEL_SYSCLK = 3
+};
+
+enum hdmi_packing_mode {
+	HDMI_PACK_10b_RGB_YUV444 = 0,
+	HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
+	HDMI_PACK_20b_YUV422 = 2,
+	HDMI_PACK_ALREADYPACKED = 7
+};
+
+enum hdmi_stereo_channels {
+	HDMI_AUDIO_STEREO_NOCHANNELS = 0,
+	HDMI_AUDIO_STEREO_ONECHANNEL = 1,
+	HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
+	HDMI_AUDIO_STEREO_THREECHANNELS = 3,
+	HDMI_AUDIO_STEREO_FOURCHANNELS = 4
+};
+
+enum hdmi_audio_type {
+	HDMI_AUDIO_TYPE_LPCM = 0,
+	HDMI_AUDIO_TYPE_IEC = 1
+};
+
+enum hdmi_audio_justify {
+	HDMI_AUDIO_JUSTIFY_LEFT = 0,
+	HDMI_AUDIO_JUSTIFY_RIGHT = 1
+};
+
+enum hdmi_audio_sample_order {
+	HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
+	HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
+};
+
+enum hdmi_audio_samples_perword {
+	HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
+	HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
+};
+
+enum hdmi_audio_sample_size {
+	HDMI_AUDIO_SAMPLE_16BITS = 0,
+	HDMI_AUDIO_SAMPLE_24BITS = 1
+};
+
+enum hdmi_audio_transf_mode {
+	HDMI_AUDIO_TRANSF_DMA = 0,
+	HDMI_AUDIO_TRANSF_IRQ = 1
+};
+
+enum hdmi_audio_blk_strt_end_sig {
+	HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
+	HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
+};
+
+enum hdmi_core_audio_layout {
+	HDMI_AUDIO_LAYOUT_2CH = 0,
+	HDMI_AUDIO_LAYOUT_8CH = 1
+};
+
+enum hdmi_core_cts_mode {
+	HDMI_AUDIO_CTS_MODE_HW = 0,
+	HDMI_AUDIO_CTS_MODE_SW = 1
+};
+
+enum hdmi_audio_mclk_mode {
+	HDMI_AUDIO_MCLK_128FS = 0,
+	HDMI_AUDIO_MCLK_256FS = 1,
+	HDMI_AUDIO_MCLK_384FS = 2,
+	HDMI_AUDIO_MCLK_512FS = 3,
+	HDMI_AUDIO_MCLK_768FS = 4,
+	HDMI_AUDIO_MCLK_1024FS = 5,
+	HDMI_AUDIO_MCLK_1152FS = 6,
+	HDMI_AUDIO_MCLK_192FS = 7
+};
+
+/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
+enum hdmi_core_infoframe {
+	HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
+	HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
+	HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
+	HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
+	HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON =  1,
+	HDMI_INFOFRAME_AVI_DB1B_NO = 0,
+	HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
+	HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
+	HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
+	HDMI_INFOFRAME_AVI_DB1S_0 = 0,
+	HDMI_INFOFRAME_AVI_DB1S_1 = 1,
+	HDMI_INFOFRAME_AVI_DB1S_2 = 2,
+	HDMI_INFOFRAME_AVI_DB2C_NO = 0,
+	HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
+	HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
+	HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
+	HDMI_INFOFRAME_AVI_DB2M_NO = 0,
+	HDMI_INFOFRAME_AVI_DB2M_43 = 1,
+	HDMI_INFOFRAME_AVI_DB2M_169 = 2,
+	HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
+	HDMI_INFOFRAME_AVI_DB2R_43 = 9,
+	HDMI_INFOFRAME_AVI_DB2R_169 = 10,
+	HDMI_INFOFRAME_AVI_DB2R_149 = 11,
+	HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
+	HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
+	HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
+	HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
+	HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
+	HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
+	HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
+	HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
+	HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
+	HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
+	HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
+	HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
+	HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
+	HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
+	HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
+	HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
+	HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
+	HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
+	HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
+	HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
+	HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
+};
+
+struct hdmi_cm {
+	int	code;
+	int	mode;
+};
+
+struct hdmi_video_format {
+	enum hdmi_packing_mode	packing_mode;
+	u32			y_res;	/* Line per panel */
+	u32			x_res;	/* pixel per line */
+};
+
+struct hdmi_config {
+	struct omap_video_timings timings;
+	struct hdmi_cm cm;
+};
+
+/* HDMI PLL structure */
+struct hdmi_pll_info {
+	u16 regn;
+	u16 regm;
+	u32 regmf;
+	u16 regm2;
+	u16 regsd;
+	u16 dcofreq;
+	enum hdmi_clk_refsel refsel;
+};
+
+struct hdmi_audio_format {
+	enum hdmi_stereo_channels		stereo_channels;
+	u8					active_chnnls_msk;
+	enum hdmi_audio_type			type;
+	enum hdmi_audio_justify			justification;
+	enum hdmi_audio_sample_order		sample_order;
+	enum hdmi_audio_samples_perword		samples_per_word;
+	enum hdmi_audio_sample_size		sample_size;
+	enum hdmi_audio_blk_strt_end_sig	en_sig_blk_strt_end;
+};
+
+struct hdmi_audio_dma {
+	u8				transfer_size;
+	u8				block_size;
+	enum hdmi_audio_transf_mode	mode;
+	u16				fifo_threshold;
+};
+
+struct hdmi_core_audio_i2s_config {
+	u8 in_length_bits;
+	u8 justification;
+	u8 sck_edge_mode;
+	u8 vbit;
+	u8 direction;
+	u8 shift;
+	u8 active_sds;
+};
+
+struct hdmi_core_audio_config {
+	struct hdmi_core_audio_i2s_config	i2s_cfg;
+	struct snd_aes_iec958			*iec60958_cfg;
+	bool					fs_override;
+	u32					n;
+	u32					cts;
+	u32					aud_par_busclk;
+	enum hdmi_core_audio_layout		layout;
+	enum hdmi_core_cts_mode			cts_mode;
+	bool					use_mclk;
+	enum hdmi_audio_mclk_mode		mclk_mode;
+	bool					en_acr_pkt;
+	bool					en_dsd_audio;
+	bool					en_parallel_aud_input;
+	bool					en_spdif;
+};
+
+/*
+ * Refer to section 8.2 in HDMI 1.3 specification for
+ * details about infoframe databytes
+ */
+struct hdmi_core_infoframe_avi {
+	/* Y0, Y1 rgb,yCbCr */
+	u8	db1_format;
+	/* A0  Active information Present */
+	u8	db1_active_info;
+	/* B0, B1 Bar info data valid */
+	u8	db1_bar_info_dv;
+	/* S0, S1 scan information */
+	u8	db1_scan_info;
+	/* C0, C1 colorimetry */
+	u8	db2_colorimetry;
+	/* M0, M1 Aspect ratio (4:3, 16:9) */
+	u8	db2_aspect_ratio;
+	/* R0...R3 Active format aspect ratio */
+	u8	db2_active_fmt_ar;
+	/* ITC IT content. */
+	u8	db3_itc;
+	/* EC0, EC1, EC2 Extended colorimetry */
+	u8	db3_ec;
+	/* Q1, Q0 Quantization range */
+	u8	db3_q_range;
+	/* SC1, SC0 Non-uniform picture scaling */
+	u8	db3_nup_scaling;
+	/* VIC0..6 Video format identification */
+	u8	db4_videocode;
+	/* PR0..PR3 Pixel repetition factor */
+	u8	db5_pixel_repeat;
+	/* Line number end of top bar */
+	u16	db6_7_line_eoftop;
+	/* Line number start of bottom bar */
+	u16	db8_9_line_sofbottom;
+	/* Pixel number end of left bar */
+	u16	db10_11_pixel_eofleft;
+	/* Pixel number start of right bar */
+	u16	db12_13_pixel_sofright;
+};
+
+struct hdmi_wp_data {
+	void __iomem *base;
+};
+
+struct hdmi_pll_data {
+	void __iomem *base;
+
+	struct hdmi_pll_info info;
+};
+
+struct hdmi_phy_data {
+	void __iomem *base;
+
+	int irq;
+};
+
+struct hdmi_core_data {
+	void __iomem *base;
+
+	struct hdmi_core_infoframe_avi avi_cfg;
+};
+
+static inline void hdmi_write_reg(void __iomem *base_addr, const u16 idx,
+		u32 val)
+{
+	__raw_writel(val, base_addr + idx);
+}
+
+static inline u32 hdmi_read_reg(void __iomem *base_addr, const u16 idx)
+{
+	return __raw_readl(base_addr + idx);
+}
+
+#define REG_FLD_MOD(base, idx, val, start, end) \
+	hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\
+							val, start, end))
+#define REG_GET(base, idx, start, end) \
+	FLD_GET(hdmi_read_reg(base, idx), start, end)
+
+static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
+		const u16 idx, int b2, int b1, u32 val)
+{
+	u32 t = 0;
+	while (val != REG_GET(base_addr, idx, b2, b1)) {
+		udelay(1);
+		if (t++ > 10000)
+			return !val;
+	}
+	return val;
+}
+
+/* HDMI wrapper funcs */
+int hdmi_wp_video_start(struct hdmi_wp_data *wp);
+void hdmi_wp_video_stop(struct hdmi_wp_data *wp);
+void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s);
+u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp);
+void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus);
+void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask);
+void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask);
+int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val);
+int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
+void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
+		struct hdmi_video_format *video_fmt);
+void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
+		struct omap_video_timings *timings);
+void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
+		struct omap_video_timings *timings);
+void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
+		struct omap_video_timings *timings, struct hdmi_config *param);
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
+
+/* HDMI PLL funcs */
+int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
+void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
+void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
+void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy);
+int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll);
+
+/* HDMI PHY funcs */
+int hdmi_phy_enable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp,
+		struct hdmi_config *cfg);
+void hdmi_phy_disable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp);
+void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s);
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy);
+
+/* HDMI common funcs */
+const struct hdmi_config *hdmi_default_timing(void);
+const struct hdmi_config *hdmi_get_timings(int mode, int code);
+struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts);
+int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable);
+int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable);
+void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
+		struct hdmi_audio_format *aud_fmt);
+void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
+		struct hdmi_audio_dma *aud_dma);
+static inline bool hdmi_mode_has_audio(int mode)
+{
+	return mode == HDMI_HDMI ? true : false;
+}
+#endif
+#endif
diff --git a/drivers/video/omap2/dss/hdmi4.c b/drivers/video/omap2/dss/hdmi4.c
new file mode 100644
index 0000000..e140096
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi4.c
@@ -0,0 +1,696 @@
+/*
+ * HDMI interface DSS driver for TI's OMAP4 family of SoCs.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ *	Mythri pk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "HDMI"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <video/omapdss.h>
+
+#include "hdmi4_core.h"
+#include "dss.h"
+#include "dss_features.h"
+
+static struct {
+	struct mutex lock;
+	struct platform_device *pdev;
+
+	struct hdmi_wp_data	wp;
+	struct hdmi_pll_data	pll;
+	struct hdmi_phy_data	phy;
+	struct hdmi_core_data	core;
+
+	struct hdmi_config cfg;
+
+	struct clk *sys_clk;
+	struct regulator *vdda_hdmi_dac_reg;
+
+	bool core_enabled;
+
+	struct omap_dss_device output;
+} hdmi;
+
+static int hdmi_runtime_get(void)
+{
+	int r;
+
+	DSSDBG("hdmi_runtime_get\n");
+
+	r = pm_runtime_get_sync(&hdmi.pdev->dev);
+	WARN_ON(r < 0);
+	if (r < 0)
+		return r;
+
+	return 0;
+}
+
+static void hdmi_runtime_put(void)
+{
+	int r;
+
+	DSSDBG("hdmi_runtime_put\n");
+
+	r = pm_runtime_put_sync(&hdmi.pdev->dev);
+	WARN_ON(r < 0 && r != -ENOSYS);
+}
+
+static int hdmi_init_regulator(void)
+{
+	struct regulator *reg;
+
+	if (hdmi.vdda_hdmi_dac_reg != NULL)
+		return 0;
+
+	reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
+
+	/* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
+	if (IS_ERR(reg))
+		reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
+
+	if (IS_ERR(reg)) {
+		DSSERR("can't get VDDA_HDMI_DAC regulator\n");
+		return PTR_ERR(reg);
+	}
+
+	hdmi.vdda_hdmi_dac_reg = reg;
+
+	return 0;
+}
+
+static int hdmi_power_on_core(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
+	if (r)
+		return r;
+
+	r = hdmi_runtime_get();
+	if (r)
+		goto err_runtime_get;
+
+	/* Make selection of HDMI in DSS */
+	dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+
+	hdmi.core_enabled = true;
+
+	return 0;
+
+err_runtime_get:
+	regulator_disable(hdmi.vdda_hdmi_dac_reg);
+
+	return r;
+}
+
+static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+{
+	hdmi.core_enabled = false;
+
+	hdmi_runtime_put();
+	regulator_disable(hdmi.vdda_hdmi_dac_reg);
+}
+
+static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+{
+	int r;
+	struct omap_video_timings *p;
+	struct omap_overlay_manager *mgr = hdmi.output.manager;
+	unsigned long phy;
+
+	r = hdmi_power_on_core(dssdev);
+	if (r)
+		return r;
+
+	dss_mgr_disable(mgr);
+
+	p = &hdmi.cfg.timings;
+
+	DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+
+	phy = p->pixel_clock;
+
+	hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
+
+	hdmi_wp_video_stop(&hdmi.wp);
+
+	/* config the PLL and PHY hdmi_set_pll_pwrfirst */
+	r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
+	if (r) {
+		DSSDBG("Failed to lock PLL\n");
+		goto err_pll_enable;
+	}
+
+	r = hdmi_phy_enable(&hdmi.phy, &hdmi.wp, &hdmi.cfg);
+	if (r) {
+		DSSDBG("Failed to start PHY\n");
+		goto err_phy_enable;
+	}
+
+	hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
+
+	/* bypass TV gamma table */
+	dispc_enable_gamma_table(0);
+
+	/* tv size */
+	dss_mgr_set_timings(mgr, p);
+
+	r = hdmi_wp_video_start(&hdmi.wp);
+	if (r)
+		goto err_vid_enable;
+
+	r = dss_mgr_enable(mgr);
+	if (r)
+		goto err_mgr_enable;
+
+	return 0;
+
+err_mgr_enable:
+	hdmi_wp_video_stop(&hdmi.wp);
+err_vid_enable:
+	hdmi_phy_disable(&hdmi.phy, &hdmi.wp);
+err_phy_enable:
+	hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+err_pll_enable:
+	hdmi_power_off_core(dssdev);
+	return -EIO;
+}
+
+static void hdmi_power_off_full(struct omap_dss_device *dssdev)
+{
+	struct omap_overlay_manager *mgr = hdmi.output.manager;
+
+	dss_mgr_disable(mgr);
+
+	hdmi_wp_video_stop(&hdmi.wp);
+	hdmi_phy_disable(&hdmi.phy, &hdmi.wp);
+	hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+
+	hdmi_power_off_core(dssdev);
+}
+
+static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
+					struct omap_video_timings *timings)
+{
+	struct hdmi_cm cm;
+
+	cm = hdmi_get_code(timings);
+	if (cm.code == -1)
+		return -EINVAL;
+
+	return 0;
+
+}
+
+static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	struct hdmi_cm cm;
+	const struct hdmi_config *t;
+
+	mutex_lock(&hdmi.lock);
+
+	cm = hdmi_get_code(timings);
+	hdmi.cfg.cm = cm;
+
+	t = hdmi_get_timings(cm.mode, cm.code);
+	if (t != NULL) {
+		hdmi.cfg = *t;
+
+		dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+	}
+
+	mutex_unlock(&hdmi.lock);
+}
+
+static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	const struct hdmi_config *cfg;
+	struct hdmi_cm cm = hdmi.cfg.cm;
+
+	cfg = hdmi_get_timings(cm.mode, cm.code);
+	if (cfg == NULL)
+		cfg = hdmi_default_timing();
+
+	memcpy(timings, &cfg->timings, sizeof(cfg->timings));
+}
+
+static void hdmi_dump_regs(struct seq_file *s)
+{
+	mutex_lock(&hdmi.lock);
+
+	if (hdmi_runtime_get()) {
+		mutex_unlock(&hdmi.lock);
+		return;
+	}
+
+	hdmi_wp_dump(&hdmi.wp, s);
+	hdmi_pll_dump(&hdmi.pll, s);
+	hdmi_phy_dump(&hdmi.phy, s);
+	hdmi4_core_dump(&hdmi.core, s);
+
+	hdmi_runtime_put();
+	mutex_unlock(&hdmi.lock);
+}
+
+static int read_edid(u8 *buf, int len)
+{
+	int r;
+
+	mutex_lock(&hdmi.lock);
+
+	r = hdmi_runtime_get();
+	BUG_ON(r);
+
+	r = hdmi4_read_edid(&hdmi.core,  buf, len);
+
+	hdmi_runtime_put();
+	mutex_unlock(&hdmi.lock);
+
+	return r;
+}
+
+static int hdmi_display_enable(struct omap_dss_device *dssdev)
+{
+	struct omap_dss_device *out = &hdmi.output;
+	int r = 0;
+
+	DSSDBG("ENTER hdmi_display_enable\n");
+
+	mutex_lock(&hdmi.lock);
+
+	if (out == NULL || out->manager == NULL) {
+		DSSERR("failed to enable display: no output/manager\n");
+		r = -ENODEV;
+		goto err0;
+	}
+
+	r = hdmi_power_on_full(dssdev);
+	if (r) {
+		DSSERR("failed to power on device\n");
+		goto err0;
+	}
+
+	mutex_unlock(&hdmi.lock);
+	return 0;
+
+err0:
+	mutex_unlock(&hdmi.lock);
+	return r;
+}
+
+static void hdmi_display_disable(struct omap_dss_device *dssdev)
+{
+	DSSDBG("Enter hdmi_display_disable\n");
+
+	mutex_lock(&hdmi.lock);
+
+	hdmi_power_off_full(dssdev);
+
+	mutex_unlock(&hdmi.lock);
+}
+
+static int hdmi_core_enable(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+
+	mutex_lock(&hdmi.lock);
+
+	r = hdmi_power_on_core(dssdev);
+	if (r) {
+		DSSERR("failed to power on device\n");
+		goto err0;
+	}
+
+	mutex_unlock(&hdmi.lock);
+	return 0;
+
+err0:
+	mutex_unlock(&hdmi.lock);
+	return r;
+}
+
+static void hdmi_core_disable(struct omap_dss_device *dssdev)
+{
+	DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+	mutex_lock(&hdmi.lock);
+
+	hdmi_power_off_core(dssdev);
+
+	mutex_unlock(&hdmi.lock);
+}
+
+static int hdmi_get_clocks(struct platform_device *pdev)
+{
+	struct clk *clk;
+
+	clk = devm_clk_get(&pdev->dev, "sys_clk");
+	if (IS_ERR(clk)) {
+		DSSERR("can't get sys_clk\n");
+		return PTR_ERR(clk);
+	}
+
+	hdmi.sys_clk = clk;
+
+	return 0;
+}
+
+static int hdmi_connect(struct omap_dss_device *dssdev,
+		struct omap_dss_device *dst)
+{
+	struct omap_overlay_manager *mgr;
+	int r;
+
+	r = hdmi_init_regulator();
+	if (r)
+		return r;
+
+	mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
+	if (!mgr)
+		return -ENODEV;
+
+	r = dss_mgr_connect(mgr, dssdev);
+	if (r)
+		return r;
+
+	r = omapdss_output_set_device(dssdev, dst);
+	if (r) {
+		DSSERR("failed to connect output to new device: %s\n",
+				dst->name);
+		dss_mgr_disconnect(mgr, dssdev);
+		return r;
+	}
+
+	return 0;
+}
+
+static void hdmi_disconnect(struct omap_dss_device *dssdev,
+		struct omap_dss_device *dst)
+{
+	WARN_ON(dst != dssdev->dst);
+
+	if (dst != dssdev->dst)
+		return;
+
+	omapdss_output_unset_device(dssdev);
+
+	if (dssdev->manager)
+		dss_mgr_disconnect(dssdev->manager, dssdev);
+}
+
+static int hdmi_read_edid(struct omap_dss_device *dssdev,
+		u8 *edid, int len)
+{
+	bool need_enable;
+	int r;
+
+	need_enable = hdmi.core_enabled == false;
+
+	if (need_enable) {
+		r = hdmi_core_enable(dssdev);
+		if (r)
+			return r;
+	}
+
+	r = read_edid(edid, len);
+
+	if (need_enable)
+		hdmi_core_disable(dssdev);
+
+	return r;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int hdmi_audio_enable(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	mutex_lock(&hdmi.lock);
+
+	if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+		r = -EPERM;
+		goto err;
+	}
+
+	r = hdmi_wp_audio_enable(&hdmi.wp, true);
+	if (r)
+		goto err;
+
+	mutex_unlock(&hdmi.lock);
+	return 0;
+
+err:
+	mutex_unlock(&hdmi.lock);
+	return r;
+}
+
+static void hdmi_audio_disable(struct omap_dss_device *dssdev)
+{
+	hdmi_wp_audio_enable(&hdmi.wp, false);
+}
+
+static int hdmi_audio_start(struct omap_dss_device *dssdev)
+{
+	return hdmi4_audio_start(&hdmi.core, &hdmi.wp);
+}
+
+static void hdmi_audio_stop(struct omap_dss_device *dssdev)
+{
+	hdmi4_audio_stop(&hdmi.core, &hdmi.wp);
+}
+
+static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
+{
+	bool r;
+
+	mutex_lock(&hdmi.lock);
+
+	r = hdmi_mode_has_audio(hdmi.cfg.cm.mode);
+
+	mutex_unlock(&hdmi.lock);
+	return r;
+}
+
+static int hdmi_audio_config(struct omap_dss_device *dssdev,
+		struct omap_dss_audio *audio)
+{
+	int r;
+	u32 pclk = hdmi.cfg.timings.pixel_clock;
+
+	mutex_lock(&hdmi.lock);
+
+	if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+		r = -EPERM;
+		goto err;
+	}
+
+	r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, audio, pclk);
+	if (r)
+		goto err;
+
+	mutex_unlock(&hdmi.lock);
+	return 0;
+
+err:
+	mutex_unlock(&hdmi.lock);
+	return r;
+}
+#else
+static int hdmi_audio_enable(struct omap_dss_device *dssdev)
+{
+	return -EPERM;
+}
+
+static void hdmi_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_audio_start(struct omap_dss_device *dssdev)
+{
+	return -EPERM;
+}
+
+static void hdmi_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
+{
+	return false;
+}
+
+static int hdmi_audio_config(struct omap_dss_device *dssdev,
+		struct omap_dss_audio *audio)
+{
+	return -EPERM;
+}
+#endif
+
+static const struct omapdss_hdmi_ops hdmi_ops = {
+	.connect		= hdmi_connect,
+	.disconnect		= hdmi_disconnect,
+
+	.enable			= hdmi_display_enable,
+	.disable		= hdmi_display_disable,
+
+	.check_timings		= hdmi_display_check_timing,
+	.set_timings		= hdmi_display_set_timing,
+	.get_timings		= hdmi_display_get_timings,
+
+	.read_edid		= hdmi_read_edid,
+
+	.audio_enable		= hdmi_audio_enable,
+	.audio_disable		= hdmi_audio_disable,
+	.audio_start		= hdmi_audio_start,
+	.audio_stop		= hdmi_audio_stop,
+	.audio_supported	= hdmi_audio_supported,
+	.audio_config		= hdmi_audio_config,
+};
+
+static void hdmi_init_output(struct platform_device *pdev)
+{
+	struct omap_dss_device *out = &hdmi.output;
+
+	out->dev = &pdev->dev;
+	out->id = OMAP_DSS_OUTPUT_HDMI;
+	out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+	out->name = "hdmi.0";
+	out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+	out->ops.hdmi = &hdmi_ops;
+	out->owner = THIS_MODULE;
+
+	omapdss_register_output(out);
+}
+
+static void __exit hdmi_uninit_output(struct platform_device *pdev)
+{
+	struct omap_dss_device *out = &hdmi.output;
+
+	omapdss_unregister_output(out);
+}
+
+/* HDMI HW IP initialisation */
+static int omapdss_hdmihw_probe(struct platform_device *pdev)
+{
+	int r;
+
+	hdmi.pdev = pdev;
+
+	mutex_init(&hdmi.lock);
+
+	r = hdmi_wp_init(pdev, &hdmi.wp);
+	if (r)
+		return r;
+
+	r = hdmi_pll_init(pdev, &hdmi.pll);
+	if (r)
+		return r;
+
+	r = hdmi_phy_init(pdev, &hdmi.phy);
+	if (r)
+		return r;
+
+	r = hdmi4_core_init(pdev, &hdmi.core);
+	if (r)
+		return r;
+
+	r = hdmi_get_clocks(pdev);
+	if (r) {
+		DSSERR("can't get clocks\n");
+		return r;
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	hdmi_init_output(pdev);
+
+	dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+
+	return 0;
+}
+
+static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
+{
+	hdmi_uninit_output(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+	clk_disable_unprepare(hdmi.sys_clk);
+
+	dispc_runtime_put();
+
+	return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+	int r;
+
+	r = dispc_runtime_get();
+	if (r < 0)
+		return r;
+
+	clk_prepare_enable(hdmi.sys_clk);
+
+	return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+	.runtime_suspend = hdmi_runtime_suspend,
+	.runtime_resume = hdmi_runtime_resume,
+};
+
+static struct platform_driver omapdss_hdmihw_driver = {
+	.probe		= omapdss_hdmihw_probe,
+	.remove         = __exit_p(omapdss_hdmihw_remove),
+	.driver         = {
+		.name   = "omapdss_hdmi",
+		.owner  = THIS_MODULE,
+		.pm	= &hdmi_pm_ops,
+	},
+};
+
+int __init hdmi4_init_platform_driver(void)
+{
+	return platform_driver_register(&omapdss_hdmihw_driver);
+}
+
+void __exit hdmi4_uninit_platform_driver(void)
+{
+	platform_driver_unregister(&omapdss_hdmihw_driver);
+}
diff --git a/drivers/video/omap2/dss/hdmi4_core.c b/drivers/video/omap2/dss/hdmi4_core.c
new file mode 100644
index 0000000..5dd5e54
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi4_core.c
@@ -0,0 +1,1034 @@
+/*
+ * ti_hdmi_4xxx_ip.c
+ *
+ * HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ *	Mythri pk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#endif
+
+#include "hdmi4_core.h"
+#include "dss_features.h"
+
+#define HDMI_CORE_AV		0x500
+
+static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
+{
+	return core->base + HDMI_CORE_AV;
+}
+
+static int hdmi_core_ddc_init(struct hdmi_core_data *core)
+{
+	void __iomem *base = core->base;
+
+	/* Turn on CLK for DDC */
+	REG_FLD_MOD(base, HDMI_CORE_AV_DPD, 0x7, 2, 0);
+
+	/* IN_PROG */
+	if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 1) {
+		/* Abort transaction */
+		REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xf, 3, 0);
+		/* IN_PROG */
+		if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
+					4, 4, 0) != 0) {
+			DSSERR("Timeout aborting DDC transaction\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	/* Clk SCL Devices */
+	REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xA, 3, 0);
+
+	/* HDMI_CORE_DDC_STATUS_IN_PROG */
+	if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
+				4, 4, 0) != 0) {
+		DSSERR("Timeout starting SCL clock\n");
+		return -ETIMEDOUT;
+	}
+
+	/* Clear FIFO */
+	REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x9, 3, 0);
+
+	/* HDMI_CORE_DDC_STATUS_IN_PROG */
+	if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
+				4, 4, 0) != 0) {
+		DSSERR("Timeout clearing DDC fifo\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
+		u8 *pedid, int ext)
+{
+	void __iomem *base = core->base;
+	u32 i;
+	char checksum;
+	u32 offset = 0;
+
+	/* HDMI_CORE_DDC_STATUS_IN_PROG */
+	if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
+				4, 4, 0) != 0) {
+		DSSERR("Timeout waiting DDC to be ready\n");
+		return -ETIMEDOUT;
+	}
+
+	if (ext % 2 != 0)
+		offset = 0x80;
+
+	/* Load Segment Address Register */
+	REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0);
+
+	/* Load Slave Address Register */
+	REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
+
+	/* Load Offset Address Register */
+	REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+
+	/* Load Byte Count */
+	REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+	REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
+
+	/* Set DDC_CMD */
+	if (ext)
+		REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
+	else
+		REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
+
+	/* HDMI_CORE_DDC_STATUS_BUS_LOW */
+	if (REG_GET(base, HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
+		pr_err("I2C Bus Low?\n");
+		return -EIO;
+	}
+	/* HDMI_CORE_DDC_STATUS_NO_ACK */
+	if (REG_GET(base, HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
+		pr_err("I2C No Ack\n");
+		return -EIO;
+	}
+
+	for (i = 0; i < 0x80; ++i) {
+		int t;
+
+		/* IN_PROG */
+		if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 0) {
+			DSSERR("operation stopped when reading edid\n");
+			return -EIO;
+		}
+
+		t = 0;
+		/* FIFO_EMPTY */
+		while (REG_GET(base, HDMI_CORE_DDC_STATUS, 2, 2) == 1) {
+			if (t++ > 10000) {
+				DSSERR("timeout reading edid\n");
+				return -ETIMEDOUT;
+			}
+			udelay(1);
+		}
+
+		pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
+	}
+
+	checksum = 0;
+	for (i = 0; i < 0x80; ++i)
+		checksum += pedid[i];
+
+	if (checksum != 0) {
+		pr_err("E-EDID checksum failed!!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
+{
+	int r, l;
+
+	if (len < 128)
+		return -EINVAL;
+
+	r = hdmi_core_ddc_init(core);
+	if (r)
+		return r;
+
+	r = hdmi_core_ddc_edid(core, edid, 0);
+	if (r)
+		return r;
+
+	l = 128;
+
+	if (len >= 128 * 2 && edid[0x7e] > 0) {
+		r = hdmi_core_ddc_edid(core, edid + 0x80, 1);
+		if (r)
+			return r;
+		l += 128;
+	}
+
+	return l;
+}
+
+static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
+			struct hdmi_core_infoframe_avi *avi_cfg,
+			struct hdmi_core_packet_enable_repeat *repeat_cfg)
+{
+	pr_debug("Enter hdmi_core_init\n");
+
+	/* video core */
+	video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
+	video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
+	video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
+	video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
+	video_cfg->hdmi_dvi = HDMI_DVI;
+	video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
+
+	/* info frame */
+	avi_cfg->db1_format = 0;
+	avi_cfg->db1_active_info = 0;
+	avi_cfg->db1_bar_info_dv = 0;
+	avi_cfg->db1_scan_info = 0;
+	avi_cfg->db2_colorimetry = 0;
+	avi_cfg->db2_aspect_ratio = 0;
+	avi_cfg->db2_active_fmt_ar = 0;
+	avi_cfg->db3_itc = 0;
+	avi_cfg->db3_ec = 0;
+	avi_cfg->db3_q_range = 0;
+	avi_cfg->db3_nup_scaling = 0;
+	avi_cfg->db4_videocode = 0;
+	avi_cfg->db5_pixel_repeat = 0;
+	avi_cfg->db6_7_line_eoftop = 0;
+	avi_cfg->db8_9_line_sofbottom = 0;
+	avi_cfg->db10_11_pixel_eofleft = 0;
+	avi_cfg->db12_13_pixel_sofright = 0;
+
+	/* packet enable and repeat */
+	repeat_cfg->audio_pkt = 0;
+	repeat_cfg->audio_pkt_repeat = 0;
+	repeat_cfg->avi_infoframe = 0;
+	repeat_cfg->avi_infoframe_repeat = 0;
+	repeat_cfg->gen_cntrl_pkt = 0;
+	repeat_cfg->gen_cntrl_pkt_repeat = 0;
+	repeat_cfg->generic_pkt = 0;
+	repeat_cfg->generic_pkt_repeat = 0;
+}
+
+static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
+{
+	pr_debug("Enter hdmi_core_powerdown_disable\n");
+	REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_release(struct hdmi_core_data *core)
+{
+	pr_debug("Enter hdmi_core_swreset_release\n");
+	REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_assert(struct hdmi_core_data *core)
+{
+	pr_debug("Enter hdmi_core_swreset_assert\n");
+	REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x1, 0, 0);
+}
+
+/* HDMI_CORE_VIDEO_CONFIG */
+static void hdmi_core_video_config(struct hdmi_core_data *core,
+				struct hdmi_core_video_config *cfg)
+{
+	u32 r = 0;
+	void __iomem *core_sys_base = core->base;
+	void __iomem *core_av_base = hdmi_av_base(core);
+
+	/* sys_ctrl1 default configuration not tunable */
+	r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1);
+	r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
+	r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
+	r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS, 2, 2);
+	r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE, 1, 1);
+	hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1, r);
+
+	REG_FLD_MOD(core_sys_base,
+			HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
+
+	/* Vid_Mode */
+	r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE);
+
+	/* dither truncation configuration */
+	if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
+		r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
+		r = FLD_MOD(r, 1, 5, 5);
+	} else {
+		r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
+		r = FLD_MOD(r, 0, 5, 5);
+	}
+	hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE, r);
+
+	/* HDMI_Ctrl */
+	r = hdmi_read_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL);
+	r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
+	r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
+	r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
+	hdmi_write_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL, r);
+
+	/* TMDS_CTRL */
+	REG_FLD_MOD(core_sys_base,
+			HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5);
+}
+
+static void hdmi_core_aux_infoframe_avi_config(struct hdmi_core_data *core)
+{
+	u32 val;
+	char sum = 0, checksum = 0;
+	void __iomem *av_base = hdmi_av_base(core);
+	struct hdmi_core_infoframe_avi info_avi = core->avi_cfg;
+
+	sum += 0x82 + 0x002 + 0x00D;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_TYPE, 0x082);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_VERS, 0x002);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_LEN, 0x00D);
+
+	val = (info_avi.db1_format << 5) |
+		(info_avi.db1_active_info << 4) |
+		(info_avi.db1_bar_info_dv << 2) |
+		(info_avi.db1_scan_info);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(0), val);
+	sum += val;
+
+	val = (info_avi.db2_colorimetry << 6) |
+		(info_avi.db2_aspect_ratio << 4) |
+		(info_avi.db2_active_fmt_ar);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(1), val);
+	sum += val;
+
+	val = (info_avi.db3_itc << 7) |
+		(info_avi.db3_ec << 4) |
+		(info_avi.db3_q_range << 2) |
+		(info_avi.db3_nup_scaling);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(2), val);
+	sum += val;
+
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(3),
+					info_avi.db4_videocode);
+	sum += info_avi.db4_videocode;
+
+	val = info_avi.db5_pixel_repeat;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(4), val);
+	sum += val;
+
+	val = info_avi.db6_7_line_eoftop & 0x00FF;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(5), val);
+	sum += val;
+
+	val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(6), val);
+	sum += val;
+
+	val = info_avi.db8_9_line_sofbottom & 0x00FF;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(7), val);
+	sum += val;
+
+	val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(8), val);
+	sum += val;
+
+	val = info_avi.db10_11_pixel_eofleft & 0x00FF;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(9), val);
+	sum += val;
+
+	val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(10), val);
+	sum += val;
+
+	val = info_avi.db12_13_pixel_sofright & 0x00FF;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(11), val);
+	sum += val;
+
+	val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(12), val);
+	sum += val;
+
+	checksum = 0x100 - sum;
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_CHSUM, checksum);
+}
+
+static void hdmi_core_av_packet_config(struct hdmi_core_data *core,
+		struct hdmi_core_packet_enable_repeat repeat_cfg)
+{
+	/* enable/repeat the infoframe */
+	hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL1,
+		(repeat_cfg.audio_pkt << 5) |
+		(repeat_cfg.audio_pkt_repeat << 4) |
+		(repeat_cfg.avi_infoframe << 1) |
+		(repeat_cfg.avi_infoframe_repeat));
+
+	/* enable/repeat the packet */
+	hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL2,
+		(repeat_cfg.gen_cntrl_pkt << 3) |
+		(repeat_cfg.gen_cntrl_pkt_repeat << 2) |
+		(repeat_cfg.generic_pkt << 1) |
+		(repeat_cfg.generic_pkt_repeat));
+}
+
+void hdmi4_configure(struct hdmi_core_data *core,
+	struct hdmi_wp_data *wp, struct hdmi_config *cfg)
+{
+	/* HDMI */
+	struct omap_video_timings video_timing;
+	struct hdmi_video_format video_format;
+	/* HDMI core */
+	struct hdmi_core_infoframe_avi *avi_cfg = &core->avi_cfg;
+	struct hdmi_core_video_config v_core_cfg;
+	struct hdmi_core_packet_enable_repeat repeat_cfg;
+
+	hdmi_core_init(&v_core_cfg, avi_cfg, &repeat_cfg);
+
+	hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+
+	hdmi_wp_video_config_timing(wp, &video_timing);
+
+	/* video config */
+	video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
+
+	hdmi_wp_video_config_format(wp, &video_format);
+
+	hdmi_wp_video_config_interface(wp, &video_timing);
+
+	/*
+	 * configure core video part
+	 * set software reset in the core
+	 */
+	hdmi_core_swreset_assert(core);
+
+	/* power down off */
+	hdmi_core_powerdown_disable(core);
+
+	v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
+	v_core_cfg.hdmi_dvi = cfg->cm.mode;
+
+	hdmi_core_video_config(core, &v_core_cfg);
+
+	/* release software reset in the core */
+	hdmi_core_swreset_release(core);
+
+	/*
+	 * configure packet
+	 * info frame video see doc CEA861-D page 65
+	 */
+	avi_cfg->db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
+	avi_cfg->db1_active_info =
+			HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
+	avi_cfg->db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
+	avi_cfg->db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
+	avi_cfg->db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
+	avi_cfg->db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
+	avi_cfg->db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
+	avi_cfg->db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
+	avi_cfg->db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
+	avi_cfg->db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
+	avi_cfg->db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
+	avi_cfg->db4_videocode = cfg->cm.code;
+	avi_cfg->db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
+	avi_cfg->db6_7_line_eoftop = 0;
+	avi_cfg->db8_9_line_sofbottom = 0;
+	avi_cfg->db10_11_pixel_eofleft = 0;
+	avi_cfg->db12_13_pixel_sofright = 0;
+
+	hdmi_core_aux_infoframe_avi_config(core);
+
+	/* enable/repeat the infoframe */
+	repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
+	repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
+	/* wakeup */
+	repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
+	repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
+	hdmi_core_av_packet_config(core, repeat_cfg);
+}
+
+void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s)
+{
+	int i;
+
+#define CORE_REG(i, name) name(i)
+#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
+		hdmi_read_reg(core->base, r))
+#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
+		hdmi_read_reg(hdmi_av_base(core), r))
+#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
+		(i < 10) ? 32 - (int)strlen(#r) : 31 - (int)strlen(#r), " ", \
+		hdmi_read_reg(hdmi_av_base(core), CORE_REG(i, r)))
+
+	DUMPCORE(HDMI_CORE_SYS_VND_IDL);
+	DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
+	DUMPCORE(HDMI_CORE_SYS_DEV_IDH);
+	DUMPCORE(HDMI_CORE_SYS_DEV_REV);
+	DUMPCORE(HDMI_CORE_SYS_SRST);
+	DUMPCORE(HDMI_CORE_SYS_SYS_CTRL1);
+	DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+	DUMPCORE(HDMI_CORE_SYS_SYS_CTRL3);
+	DUMPCORE(HDMI_CORE_SYS_DE_DLY);
+	DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
+	DUMPCORE(HDMI_CORE_SYS_DE_TOP);
+	DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
+	DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
+	DUMPCORE(HDMI_CORE_SYS_DE_LINL);
+	DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
+	DUMPCORE(HDMI_CORE_SYS_HRES_L);
+	DUMPCORE(HDMI_CORE_SYS_HRES_H);
+	DUMPCORE(HDMI_CORE_SYS_VRES_L);
+	DUMPCORE(HDMI_CORE_SYS_VRES_H);
+	DUMPCORE(HDMI_CORE_SYS_IADJUST);
+	DUMPCORE(HDMI_CORE_SYS_POLDETECT);
+	DUMPCORE(HDMI_CORE_SYS_HWIDTH1);
+	DUMPCORE(HDMI_CORE_SYS_HWIDTH2);
+	DUMPCORE(HDMI_CORE_SYS_VWIDTH);
+	DUMPCORE(HDMI_CORE_SYS_VID_CTRL);
+	DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
+	DUMPCORE(HDMI_CORE_SYS_VID_MODE);
+	DUMPCORE(HDMI_CORE_SYS_VID_BLANK1);
+	DUMPCORE(HDMI_CORE_SYS_VID_BLANK3);
+	DUMPCORE(HDMI_CORE_SYS_VID_BLANK1);
+	DUMPCORE(HDMI_CORE_SYS_DC_HEADER);
+	DUMPCORE(HDMI_CORE_SYS_VID_DITHER);
+	DUMPCORE(HDMI_CORE_SYS_RGB2XVYCC_CT);
+	DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_LOW);
+	DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_UP);
+	DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_LOW);
+	DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_UP);
+	DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_LOW);
+	DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_UP);
+	DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_LOW);
+	DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_UP);
+	DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
+	DUMPCORE(HDMI_CORE_SYS_INTR1);
+	DUMPCORE(HDMI_CORE_SYS_INTR2);
+	DUMPCORE(HDMI_CORE_SYS_INTR3);
+	DUMPCORE(HDMI_CORE_SYS_INTR4);
+	DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK1);
+	DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK2);
+	DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK3);
+	DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK4);
+	DUMPCORE(HDMI_CORE_SYS_INTR_CTRL);
+	DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
+
+	DUMPCORE(HDMI_CORE_DDC_ADDR);
+	DUMPCORE(HDMI_CORE_DDC_SEGM);
+	DUMPCORE(HDMI_CORE_DDC_OFFSET);
+	DUMPCORE(HDMI_CORE_DDC_COUNT1);
+	DUMPCORE(HDMI_CORE_DDC_COUNT2);
+	DUMPCORE(HDMI_CORE_DDC_STATUS);
+	DUMPCORE(HDMI_CORE_DDC_CMD);
+	DUMPCORE(HDMI_CORE_DDC_DATA);
+
+	DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
+	DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
+	DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
+	DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
+	DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
+	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
+	DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
+	DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
+	DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
+	DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
+	DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
+	DUMPCOREAV(HDMI_CORE_AV_ASRC);
+	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
+	DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
+	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
+	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
+	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
+	DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
+	DUMPCOREAV(HDMI_CORE_AV_DPD);
+	DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
+	DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
+	DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
+	DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
+	DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
+
+	for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
+		DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
+	DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
+	DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
+
+	for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
+		DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
+	DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
+	DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
+
+	for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
+		DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
+	DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
+	DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
+	DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
+
+	for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
+		DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
+
+	for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
+		DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
+
+	for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
+		DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
+
+	DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static void hdmi_core_audio_config(struct hdmi_core_data *core,
+					struct hdmi_core_audio_config *cfg)
+{
+	u32 r;
+	void __iomem *av_base = hdmi_av_base(core);
+
+	/*
+	 * Parameters for generation of Audio Clock Recovery packets
+	 */
+	REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
+	REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
+	REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
+
+	if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
+		REG_FLD_MOD(av_base, HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
+		REG_FLD_MOD(av_base,
+				HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
+		REG_FLD_MOD(av_base,
+				HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
+	} else {
+		REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
+				cfg->aud_par_busclk, 7, 0);
+		REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
+				(cfg->aud_par_busclk >> 8), 7, 0);
+		REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
+				(cfg->aud_par_busclk >> 16), 7, 0);
+	}
+
+	/* Set ACR clock divisor */
+	REG_FLD_MOD(av_base,
+			HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+
+	r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
+	/*
+	 * Use TMDS clock for ACR packets. For devices that use
+	 * the MCLK, this is the first part of the MCLK initialization.
+	 */
+	r = FLD_MOD(r, 0, 2, 2);
+
+	r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
+	r = FLD_MOD(r, cfg->cts_mode, 0, 0);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_ACR_CTRL, r);
+
+	/* For devices using MCLK, this completes its initialization. */
+	if (cfg->use_mclk)
+		REG_FLD_MOD(av_base, HDMI_CORE_AV_ACR_CTRL, 1, 2, 2);
+
+	/* Override of SPDIF sample frequency with value in I2S_CHST4 */
+	REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
+						cfg->fs_override, 1, 1);
+
+	/*
+	 * Set IEC-60958-3 channel status word. It is passed to the IP
+	 * just as it is received. The user of the driver is responsible
+	 * for its contents.
+	 */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
+		       cfg->iec60958_cfg->status[0]);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
+		       cfg->iec60958_cfg->status[1]);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
+		       cfg->iec60958_cfg->status[2]);
+	/* yes, this is correct: status[3] goes to CHST4 register */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
+		       cfg->iec60958_cfg->status[3]);
+	/* yes, this is correct: status[4] goes to CHST5 register */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
+		       cfg->iec60958_cfg->status[4]);
+
+	/* set I2S parameters */
+	r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
+	r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
+	r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
+	r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
+	r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
+	r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
+
+	REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
+			cfg->i2s_cfg.in_length_bits, 3, 0);
+
+	/* Audio channels and mode parameters */
+	REG_FLD_MOD(av_base, HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1);
+	r = hdmi_read_reg(av_base, HDMI_CORE_AV_AUD_MODE);
+	r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
+	r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
+	r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
+	r = FLD_MOD(r, cfg->en_spdif, 1, 1);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
+
+	/* Audio channel mappings */
+	/* TODO: Make channel mapping dynamic. For now, map channels
+	 * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
+	 * HDMI speaker order is different. See CEA-861 Section 6.6.2.
+	 */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
+	REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
+}
+
+static void hdmi_core_audio_infoframe_cfg(struct hdmi_core_data *core,
+		struct snd_cea_861_aud_if *info_aud)
+{
+	u8 sum = 0, checksum = 0;
+	void __iomem *av_base = hdmi_av_base(core);
+
+	/*
+	 * Set audio info frame type, version and length as
+	 * described in HDMI 1.4a Section 8.2.2 specification.
+	 * Checksum calculation is defined in Section 5.3.5.
+	 */
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_TYPE, 0x84);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_VERS, 0x01);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
+	sum += 0x84 + 0x001 + 0x00a;
+
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
+		       info_aud->db1_ct_cc);
+	sum += info_aud->db1_ct_cc;
+
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
+		       info_aud->db2_sf_ss);
+	sum += info_aud->db2_sf_ss;
+
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
+	sum += info_aud->db3;
+
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
+	sum += info_aud->db4_ca;
+
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
+		       info_aud->db5_dminh_lsv);
+	sum += info_aud->db5_dminh_lsv;
+
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
+	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
+
+	checksum = 0x100 - sum;
+	hdmi_write_reg(av_base,
+					HDMI_CORE_AV_AUDIO_CHSUM, checksum);
+
+	/*
+	 * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
+	 * is available.
+	 */
+}
+
+int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+		struct omap_dss_audio *audio, u32 pclk)
+{
+	struct hdmi_audio_format audio_format;
+	struct hdmi_audio_dma audio_dma;
+	struct hdmi_core_audio_config acore;
+	int err, n, cts, channel_count;
+	unsigned int fs_nr;
+	bool word_length_16b = false;
+
+	if (!audio || !audio->iec || !audio->cea || !core)
+		return -EINVAL;
+
+	acore.iec60958_cfg = audio->iec;
+	/*
+	 * In the IEC-60958 status word, check if the audio sample word length
+	 * is 16-bit as several optimizations can be performed in such case.
+	 */
+	if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
+		if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
+			word_length_16b = true;
+
+	/* I2S configuration. See Phillips' specification */
+	if (word_length_16b)
+		acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+	else
+		acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+	/*
+	 * The I2S input word length is twice the lenght given in the IEC-60958
+	 * status word. If the word size is greater than
+	 * 20 bits, increment by one.
+	 */
+	acore.i2s_cfg.in_length_bits = audio->iec->status[4]
+		& IEC958_AES4_CON_WORDLEN;
+	if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
+		acore.i2s_cfg.in_length_bits++;
+	acore.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+	acore.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+	acore.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+	acore.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+
+	/* convert sample frequency to a number */
+	switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
+	case IEC958_AES3_CON_FS_32000:
+		fs_nr = 32000;
+		break;
+	case IEC958_AES3_CON_FS_44100:
+		fs_nr = 44100;
+		break;
+	case IEC958_AES3_CON_FS_48000:
+		fs_nr = 48000;
+		break;
+	case IEC958_AES3_CON_FS_88200:
+		fs_nr = 88200;
+		break;
+	case IEC958_AES3_CON_FS_96000:
+		fs_nr = 96000;
+		break;
+	case IEC958_AES3_CON_FS_176400:
+		fs_nr = 176400;
+		break;
+	case IEC958_AES3_CON_FS_192000:
+		fs_nr = 192000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+
+	/* Audio clock regeneration settings */
+	acore.n = n;
+	acore.cts = cts;
+	if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+		acore.aud_par_busclk = 0;
+		acore.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+		acore.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+	} else {
+		acore.aud_par_busclk = (((128 * 31) - 1) << 8);
+		acore.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+		acore.use_mclk = true;
+	}
+
+	if (acore.use_mclk)
+		acore.mclk_mode = HDMI_AUDIO_MCLK_128FS;
+
+	/* Audio channels settings */
+	channel_count = (audio->cea->db1_ct_cc &
+			 CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
+
+	switch (channel_count) {
+	case 2:
+		audio_format.active_chnnls_msk = 0x03;
+		break;
+	case 3:
+		audio_format.active_chnnls_msk = 0x07;
+		break;
+	case 4:
+		audio_format.active_chnnls_msk = 0x0f;
+		break;
+	case 5:
+		audio_format.active_chnnls_msk = 0x1f;
+		break;
+	case 6:
+		audio_format.active_chnnls_msk = 0x3f;
+		break;
+	case 7:
+		audio_format.active_chnnls_msk = 0x7f;
+		break;
+	case 8:
+		audio_format.active_chnnls_msk = 0xff;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * the HDMI IP needs to enable four stereo channels when transmitting
+	 * more than 2 audio channels
+	 */
+	if (channel_count == 2) {
+		audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
+		acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+		acore.layout = HDMI_AUDIO_LAYOUT_2CH;
+	} else {
+		audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
+		acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+				HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
+				HDMI_AUDIO_I2S_SD3_EN;
+		acore.layout = HDMI_AUDIO_LAYOUT_8CH;
+	}
+
+	acore.en_spdif = false;
+	/* use sample frequency from channel status word */
+	acore.fs_override = true;
+	/* enable ACR packets */
+	acore.en_acr_pkt = true;
+	/* disable direct streaming digital audio */
+	acore.en_dsd_audio = false;
+	/* use parallel audio interface */
+	acore.en_parallel_aud_input = true;
+
+	/* DMA settings */
+	if (word_length_16b)
+		audio_dma.transfer_size = 0x10;
+	else
+		audio_dma.transfer_size = 0x20;
+	audio_dma.block_size = 0xC0;
+	audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
+	audio_dma.fifo_threshold = 0x20; /* in number of samples */
+
+	/* audio FIFO format settings */
+	if (word_length_16b) {
+		audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
+		audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
+		audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+	} else {
+		audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
+		audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
+		audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+	}
+	audio_format.type = HDMI_AUDIO_TYPE_LPCM;
+	audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
+	/* disable start/stop signals of IEC 60958 blocks */
+	audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
+
+	/* configure DMA and audio FIFO format*/
+	hdmi_wp_audio_config_dma(wp, &audio_dma);
+	hdmi_wp_audio_config_format(wp, &audio_format);
+
+	/* configure the core*/
+	hdmi_core_audio_config(core, &acore);
+
+	/* configure CEA 861 audio infoframe*/
+	hdmi_core_audio_infoframe_cfg(core, audio->cea);
+
+	return 0;
+}
+
+int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
+{
+	REG_FLD_MOD(hdmi_av_base(core),
+		    HDMI_CORE_AV_AUD_MODE, true, 0, 0);
+
+	hdmi_wp_audio_core_req_enable(wp, true);
+
+	return 0;
+}
+
+void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
+{
+	REG_FLD_MOD(hdmi_av_base(core),
+		    HDMI_CORE_AV_AUD_MODE, false, 0, 0);
+
+	hdmi_wp_audio_core_req_enable(wp, false);
+}
+
+int hdmi4_audio_get_dma_port(u32 *offset, u32 *size)
+{
+	if (!offset || !size)
+		return -EINVAL;
+	*offset = HDMI_WP_AUDIO_DATA;
+	*size = 4;
+	return 0;
+}
+
+#endif
+
+#define CORE_OFFSET		0x400
+#define CORE_SIZE		0xc00
+
+int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
+{
+	struct resource *res;
+	struct resource temp_res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_core");
+	if (!res) {
+		DSSDBG("can't get CORE mem resource by name\n");
+		/*
+		 * if hwmod/DT doesn't have the memory resource information
+		 * split into HDMI sub blocks by name, we try again by getting
+		 * the platform's first resource. this code will be removed when
+		 * the driver can get the mem resources by name
+		 */
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res) {
+			DSSERR("can't get CORE mem resource\n");
+			return -EINVAL;
+		}
+
+		temp_res.start = res->start + CORE_OFFSET;
+		temp_res.end = temp_res.start + CORE_SIZE - 1;
+		res = &temp_res;
+	}
+
+	core->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!core->base) {
+		DSSERR("can't ioremap CORE\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
diff --git a/drivers/video/omap2/dss/hdmi4_core.h b/drivers/video/omap2/dss/hdmi4_core.h
new file mode 100644
index 0000000..bb64689
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi4_core.h
@@ -0,0 +1,276 @@
+/*
+ * HDMI header definition for OMAP4 HDMI core IP
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HDMI4_CORE_H_
+#define _HDMI4_CORE_H_
+
+#include "hdmi.h"
+
+/* OMAP4 HDMI IP Core System */
+
+#define HDMI_CORE_SYS_VND_IDL			0x0
+#define HDMI_CORE_SYS_DEV_IDL			0x8
+#define HDMI_CORE_SYS_DEV_IDH			0xC
+#define HDMI_CORE_SYS_DEV_REV			0x10
+#define HDMI_CORE_SYS_SRST			0x14
+#define HDMI_CORE_SYS_SYS_CTRL1			0x20
+#define HDMI_CORE_SYS_SYS_STAT			0x24
+#define HDMI_CORE_SYS_SYS_CTRL3			0x28
+#define HDMI_CORE_SYS_DCTL			0x34
+#define HDMI_CORE_SYS_DE_DLY			0xC8
+#define HDMI_CORE_SYS_DE_CTRL			0xCC
+#define HDMI_CORE_SYS_DE_TOP			0xD0
+#define HDMI_CORE_SYS_DE_CNTL			0xD8
+#define HDMI_CORE_SYS_DE_CNTH			0xDC
+#define HDMI_CORE_SYS_DE_LINL			0xE0
+#define HDMI_CORE_SYS_DE_LINH_1			0xE4
+#define HDMI_CORE_SYS_HRES_L			0xE8
+#define HDMI_CORE_SYS_HRES_H			0xEC
+#define HDMI_CORE_SYS_VRES_L			0xF0
+#define HDMI_CORE_SYS_VRES_H			0xF4
+#define HDMI_CORE_SYS_IADJUST			0xF8
+#define HDMI_CORE_SYS_POLDETECT			0xFC
+#define HDMI_CORE_SYS_HWIDTH1			0x110
+#define HDMI_CORE_SYS_HWIDTH2			0x114
+#define HDMI_CORE_SYS_VWIDTH			0x11C
+#define HDMI_CORE_SYS_VID_CTRL			0x120
+#define HDMI_CORE_SYS_VID_ACEN			0x124
+#define HDMI_CORE_SYS_VID_MODE			0x128
+#define HDMI_CORE_SYS_VID_BLANK1		0x12C
+#define HDMI_CORE_SYS_VID_BLANK2		0x130
+#define HDMI_CORE_SYS_VID_BLANK3		0x134
+#define HDMI_CORE_SYS_DC_HEADER			0x138
+#define HDMI_CORE_SYS_VID_DITHER		0x13C
+#define HDMI_CORE_SYS_RGB2XVYCC_CT		0x140
+#define HDMI_CORE_SYS_R2Y_COEFF_LOW		0x144
+#define HDMI_CORE_SYS_R2Y_COEFF_UP		0x148
+#define HDMI_CORE_SYS_G2Y_COEFF_LOW		0x14C
+#define HDMI_CORE_SYS_G2Y_COEFF_UP		0x150
+#define HDMI_CORE_SYS_B2Y_COEFF_LOW		0x154
+#define HDMI_CORE_SYS_B2Y_COEFF_UP		0x158
+#define HDMI_CORE_SYS_R2CB_COEFF_LOW		0x15C
+#define HDMI_CORE_SYS_R2CB_COEFF_UP		0x160
+#define HDMI_CORE_SYS_G2CB_COEFF_LOW		0x164
+#define HDMI_CORE_SYS_G2CB_COEFF_UP		0x168
+#define HDMI_CORE_SYS_B2CB_COEFF_LOW		0x16C
+#define HDMI_CORE_SYS_B2CB_COEFF_UP		0x170
+#define HDMI_CORE_SYS_R2CR_COEFF_LOW		0x174
+#define HDMI_CORE_SYS_R2CR_COEFF_UP		0x178
+#define HDMI_CORE_SYS_G2CR_COEFF_LOW		0x17C
+#define HDMI_CORE_SYS_G2CR_COEFF_UP		0x180
+#define HDMI_CORE_SYS_B2CR_COEFF_LOW		0x184
+#define HDMI_CORE_SYS_B2CR_COEFF_UP		0x188
+#define HDMI_CORE_SYS_RGB_OFFSET_LOW		0x18C
+#define HDMI_CORE_SYS_RGB_OFFSET_UP		0x190
+#define HDMI_CORE_SYS_Y_OFFSET_LOW		0x194
+#define HDMI_CORE_SYS_Y_OFFSET_UP		0x198
+#define HDMI_CORE_SYS_CBCR_OFFSET_LOW		0x19C
+#define HDMI_CORE_SYS_CBCR_OFFSET_UP		0x1A0
+#define HDMI_CORE_SYS_INTR_STATE		0x1C0
+#define HDMI_CORE_SYS_INTR1			0x1C4
+#define HDMI_CORE_SYS_INTR2			0x1C8
+#define HDMI_CORE_SYS_INTR3			0x1CC
+#define HDMI_CORE_SYS_INTR4			0x1D0
+#define HDMI_CORE_SYS_INTR_UNMASK1		0x1D4
+#define HDMI_CORE_SYS_INTR_UNMASK2		0x1D8
+#define HDMI_CORE_SYS_INTR_UNMASK3		0x1DC
+#define HDMI_CORE_SYS_INTR_UNMASK4		0x1E0
+#define HDMI_CORE_SYS_INTR_CTRL			0x1E4
+#define HDMI_CORE_SYS_TMDS_CTRL			0x208
+
+/* value definitions for HDMI_CORE_SYS_SYS_CTRL1 fields */
+#define HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC	0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC	0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS	0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE	0x1
+
+/* HDMI DDC E-DID */
+#define HDMI_CORE_DDC_ADDR			0x3B4
+#define HDMI_CORE_DDC_SEGM			0x3B8
+#define HDMI_CORE_DDC_OFFSET			0x3BC
+#define HDMI_CORE_DDC_COUNT1			0x3C0
+#define HDMI_CORE_DDC_COUNT2			0x3C4
+#define HDMI_CORE_DDC_STATUS			0x3C8
+#define HDMI_CORE_DDC_CMD			0x3CC
+#define HDMI_CORE_DDC_DATA			0x3D0
+
+/* HDMI IP Core Audio Video */
+
+#define HDMI_CORE_AV_ACR_CTRL			0x4
+#define HDMI_CORE_AV_FREQ_SVAL			0x8
+#define HDMI_CORE_AV_N_SVAL1			0xC
+#define HDMI_CORE_AV_N_SVAL2			0x10
+#define HDMI_CORE_AV_N_SVAL3			0x14
+#define HDMI_CORE_AV_CTS_SVAL1			0x18
+#define HDMI_CORE_AV_CTS_SVAL2			0x1C
+#define HDMI_CORE_AV_CTS_SVAL3			0x20
+#define HDMI_CORE_AV_CTS_HVAL1			0x24
+#define HDMI_CORE_AV_CTS_HVAL2			0x28
+#define HDMI_CORE_AV_CTS_HVAL3			0x2C
+#define HDMI_CORE_AV_AUD_MODE			0x50
+#define HDMI_CORE_AV_SPDIF_CTRL			0x54
+#define HDMI_CORE_AV_HW_SPDIF_FS		0x60
+#define HDMI_CORE_AV_SWAP_I2S			0x64
+#define HDMI_CORE_AV_SPDIF_ERTH			0x6C
+#define HDMI_CORE_AV_I2S_IN_MAP			0x70
+#define HDMI_CORE_AV_I2S_IN_CTRL		0x74
+#define HDMI_CORE_AV_I2S_CHST0			0x78
+#define HDMI_CORE_AV_I2S_CHST1			0x7C
+#define HDMI_CORE_AV_I2S_CHST2			0x80
+#define HDMI_CORE_AV_I2S_CHST4			0x84
+#define HDMI_CORE_AV_I2S_CHST5			0x88
+#define HDMI_CORE_AV_ASRC			0x8C
+#define HDMI_CORE_AV_I2S_IN_LEN			0x90
+#define HDMI_CORE_AV_HDMI_CTRL			0xBC
+#define HDMI_CORE_AV_AUDO_TXSTAT		0xC0
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1		0xCC
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2		0xD0
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3		0xD4
+#define HDMI_CORE_AV_TEST_TXCTRL		0xF0
+#define HDMI_CORE_AV_DPD			0xF4
+#define HDMI_CORE_AV_PB_CTRL1			0xF8
+#define HDMI_CORE_AV_PB_CTRL2			0xFC
+#define HDMI_CORE_AV_AVI_TYPE			0x100
+#define HDMI_CORE_AV_AVI_VERS			0x104
+#define HDMI_CORE_AV_AVI_LEN			0x108
+#define HDMI_CORE_AV_AVI_CHSUM			0x10C
+#define HDMI_CORE_AV_AVI_DBYTE(n)		(n * 4 + 0x110)
+#define HDMI_CORE_AV_SPD_TYPE			0x180
+#define HDMI_CORE_AV_SPD_VERS			0x184
+#define HDMI_CORE_AV_SPD_LEN			0x188
+#define HDMI_CORE_AV_SPD_CHSUM			0x18C
+#define HDMI_CORE_AV_SPD_DBYTE(n)		(n * 4 + 0x190)
+#define HDMI_CORE_AV_AUDIO_TYPE			0x200
+#define HDMI_CORE_AV_AUDIO_VERS			0x204
+#define HDMI_CORE_AV_AUDIO_LEN			0x208
+#define HDMI_CORE_AV_AUDIO_CHSUM		0x20C
+#define HDMI_CORE_AV_AUD_DBYTE(n)		(n * 4 + 0x210)
+#define HDMI_CORE_AV_MPEG_TYPE			0x280
+#define HDMI_CORE_AV_MPEG_VERS			0x284
+#define HDMI_CORE_AV_MPEG_LEN			0x288
+#define HDMI_CORE_AV_MPEG_CHSUM			0x28C
+#define HDMI_CORE_AV_MPEG_DBYTE(n)		(n * 4 + 0x290)
+#define HDMI_CORE_AV_GEN_DBYTE(n)		(n * 4 + 0x300)
+#define HDMI_CORE_AV_CP_BYTE1			0x37C
+#define HDMI_CORE_AV_GEN2_DBYTE(n)		(n * 4 + 0x380)
+#define HDMI_CORE_AV_CEC_ADDR_ID		0x3FC
+
+#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE		0x4
+#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE		0x4
+#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE		0x4
+#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE		0x4
+
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS		15
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS		27
+#define HDMI_CORE_AV_AUD_DBYTE_NELEMS		10
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS		27
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS		31
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS		31
+
+enum hdmi_core_inputbus_width {
+	HDMI_INPUT_8BIT = 0,
+	HDMI_INPUT_10BIT = 1,
+	HDMI_INPUT_12BIT = 2
+};
+
+enum hdmi_core_dither_trunc {
+	HDMI_OUTPUTTRUNCATION_8BIT = 0,
+	HDMI_OUTPUTTRUNCATION_10BIT = 1,
+	HDMI_OUTPUTTRUNCATION_12BIT = 2,
+	HDMI_OUTPUTDITHER_8BIT = 3,
+	HDMI_OUTPUTDITHER_10BIT = 4,
+	HDMI_OUTPUTDITHER_12BIT = 5
+};
+
+enum hdmi_core_deepcolor_ed {
+	HDMI_DEEPCOLORPACKECTDISABLE = 0,
+	HDMI_DEEPCOLORPACKECTENABLE = 1
+};
+
+enum hdmi_core_packet_mode {
+	HDMI_PACKETMODERESERVEDVALUE = 0,
+	HDMI_PACKETMODE24BITPERPIXEL = 4,
+	HDMI_PACKETMODE30BITPERPIXEL = 5,
+	HDMI_PACKETMODE36BITPERPIXEL = 6,
+	HDMI_PACKETMODE48BITPERPIXEL = 7
+};
+
+enum hdmi_core_tclkselclkmult {
+	HDMI_FPLL05IDCK = 0,
+	HDMI_FPLL10IDCK = 1,
+	HDMI_FPLL20IDCK = 2,
+	HDMI_FPLL40IDCK = 3
+};
+
+enum hdmi_core_packet_ctrl {
+	HDMI_PACKETENABLE = 1,
+	HDMI_PACKETDISABLE = 0,
+	HDMI_PACKETREPEATON = 1,
+	HDMI_PACKETREPEATOFF = 0
+};
+
+enum hdmi_audio_i2s_config {
+	HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
+	HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
+	HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
+	HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
+	HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
+	HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
+	HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
+	HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
+	HDMI_AUDIO_I2S_SD0_EN = 1,
+	HDMI_AUDIO_I2S_SD1_EN = 1 << 1,
+	HDMI_AUDIO_I2S_SD2_EN = 1 << 2,
+	HDMI_AUDIO_I2S_SD3_EN = 1 << 3,
+};
+
+struct hdmi_core_video_config {
+	enum hdmi_core_inputbus_width	ip_bus_width;
+	enum hdmi_core_dither_trunc	op_dither_truc;
+	enum hdmi_core_deepcolor_ed	deep_color_pkt;
+	enum hdmi_core_packet_mode	pkt_mode;
+	enum hdmi_core_hdmi_dvi		hdmi_dvi;
+	enum hdmi_core_tclkselclkmult	tclk_sel_clkmult;
+};
+
+struct hdmi_core_packet_enable_repeat {
+	u32	audio_pkt;
+	u32	audio_pkt_repeat;
+	u32	avi_infoframe;
+	u32	avi_infoframe_repeat;
+	u32	gen_cntrl_pkt;
+	u32	gen_cntrl_pkt_repeat;
+	u32	generic_pkt;
+	u32	generic_pkt_repeat;
+};
+
+int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+		struct hdmi_config *cfg);
+void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
+int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
+void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
+int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+		struct omap_dss_audio *audio, u32 pclk);
+int hdmi4_audio_get_dma_port(u32 *offset, u32 *size);
+#endif
+
+#endif
diff --git a/drivers/video/omap2/dss/hdmi_common.c b/drivers/video/omap2/dss/hdmi_common.c
new file mode 100644
index 0000000..5586aaa
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_common.c
@@ -0,0 +1,423 @@
+
+/*
+ * Logic for the below structure :
+ * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
+ * There is a correspondence between CEA/VESA timing and code, please
+ * refer to section 6.3 in HDMI 1.3 specification for timing code.
+ *
+ * In the below structure, cea_vesa_timings corresponds to all OMAP4
+ * supported CEA and VESA timing values.code_cea corresponds to the CEA
+ * code, It is used to get the timing from cea_vesa_timing array.Similarly
+ * with code_vesa. Code_index is used for back mapping, that is once EDID
+ * is read from the TV, EDID is parsed to find the timing values and then
+ * map it to corresponding CEA or VESA index.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <video/omapdss.h>
+
+#include "hdmi.h"
+
+static const struct hdmi_config cea_timings[] = {
+	{
+		{ 640, 480, 25200, 96, 16, 48, 2, 10, 33,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 1, HDMI_HDMI },
+	},
+	{
+		{ 720, 480, 27027, 62, 16, 60, 6, 9, 30,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 2, HDMI_HDMI },
+	},
+	{
+		{ 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 4, HDMI_HDMI },
+	},
+	{
+		{ 1920, 540, 74250, 44, 88, 148, 5, 2, 15,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			true, },
+		{ 5, HDMI_HDMI },
+	},
+	{
+		{ 1440, 240, 27027, 124, 38, 114, 3, 4, 15,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			true, },
+		{ 6, HDMI_HDMI },
+	},
+	{
+		{ 1920, 1080, 148500, 44, 88, 148, 5, 4, 36,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 16, HDMI_HDMI },
+	},
+	{
+		{ 720, 576, 27000, 64, 12, 68, 5, 5, 39,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 17, HDMI_HDMI },
+	},
+	{
+		{ 1280, 720, 74250, 40, 440, 220, 5, 5, 20,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 19, HDMI_HDMI },
+	},
+	{
+		{ 1920, 540, 74250, 44, 528, 148, 5, 2, 15,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			true, },
+		{ 20, HDMI_HDMI },
+	},
+	{
+		{ 1440, 288, 27000, 126, 24, 138, 3, 2, 19,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			true, },
+		{ 21, HDMI_HDMI },
+	},
+	{
+		{ 1440, 576, 54000, 128, 24, 136, 5, 5, 39,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 29, HDMI_HDMI },
+	},
+	{
+		{ 1920, 1080, 148500, 44, 528, 148, 5, 4, 36,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 31, HDMI_HDMI },
+	},
+	{
+		{ 1920, 1080, 74250, 44, 638, 148, 5, 4, 36,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 32, HDMI_HDMI },
+	},
+	{
+		{ 2880, 480, 108108, 248, 64, 240, 6, 9, 30,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 35, HDMI_HDMI },
+	},
+	{
+		{ 2880, 576, 108000, 256, 48, 272, 5, 5, 39,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 37, HDMI_HDMI },
+	},
+};
+
+static const struct hdmi_config vesa_timings[] = {
+/* VESA From Here */
+	{
+		{ 640, 480, 25175, 96, 16, 48, 2, 11, 31,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 4, HDMI_DVI },
+	},
+	{
+		{ 800, 600, 40000, 128, 40, 88, 4, 1, 23,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 9, HDMI_DVI },
+	},
+	{
+		{ 848, 480, 33750, 112, 16, 112, 8, 6, 23,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0xE, HDMI_DVI },
+	},
+	{
+		{ 1280, 768, 79500, 128, 64, 192, 7, 3, 20,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 0x17, HDMI_DVI },
+	},
+	{
+		{ 1280, 800, 83500, 128, 72, 200, 6, 3, 22,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 0x1C, HDMI_DVI },
+	},
+	{
+		{ 1360, 768, 85500, 112, 64, 256, 6, 3, 18,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x27, HDMI_DVI },
+	},
+	{
+		{ 1280, 960, 108000, 112, 96, 312, 3, 1, 36,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x20, HDMI_DVI },
+	},
+	{
+		{ 1280, 1024, 108000, 112, 48, 248, 3, 1, 38,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x23, HDMI_DVI },
+	},
+	{
+		{ 1024, 768, 65000, 136, 24, 160, 6, 3, 29,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 0x10, HDMI_DVI },
+	},
+	{
+		{ 1400, 1050, 121750, 144, 88, 232, 4, 3, 32,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 0x2A, HDMI_DVI },
+	},
+	{
+		{ 1440, 900, 106500, 152, 80, 232, 6, 3, 25,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 0x2F, HDMI_DVI },
+	},
+	{
+		{ 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+			false, },
+		{ 0x3A, HDMI_DVI },
+	},
+	{
+		{ 1366, 768, 85500, 143, 70, 213, 3, 3, 24,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x51, HDMI_DVI },
+	},
+	{
+		{ 1920, 1080, 148500, 44, 148, 80, 5, 4, 36,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x52, HDMI_DVI },
+	},
+	{
+		{ 1280, 768, 68250, 32, 48, 80, 7, 3, 12,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x16, HDMI_DVI },
+	},
+	{
+		{ 1400, 1050, 101000, 32, 48, 80, 4, 3, 23,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x29, HDMI_DVI },
+	},
+	{
+		{ 1680, 1050, 119000, 32, 48, 80, 6, 3, 21,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x39, HDMI_DVI },
+	},
+	{
+		{ 1280, 800, 79500, 32, 48, 80, 6, 3, 14,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x1B, HDMI_DVI },
+	},
+	{
+		{ 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+			OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x55, HDMI_DVI },
+	},
+	{
+		{ 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
+			OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+			false, },
+		{ 0x44, HDMI_DVI },
+	},
+};
+
+const struct hdmi_config *hdmi_default_timing(void)
+{
+	return &vesa_timings[0];
+}
+
+static const struct hdmi_config *hdmi_find_timing(int code,
+			const struct hdmi_config *timings_arr, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (timings_arr[i].cm.code == code)
+			return &timings_arr[i];
+	}
+
+	return NULL;
+}
+
+const struct hdmi_config *hdmi_get_timings(int mode, int code)
+{
+	const struct hdmi_config *arr;
+	int len;
+
+	if (mode == HDMI_DVI) {
+		arr = vesa_timings;
+		len = ARRAY_SIZE(vesa_timings);
+	} else {
+		arr = cea_timings;
+		len = ARRAY_SIZE(cea_timings);
+	}
+
+	return hdmi_find_timing(code, arr, len);
+}
+
+static bool hdmi_timings_compare(struct omap_video_timings *timing1,
+			const struct omap_video_timings *timing2)
+{
+	int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
+
+	if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
+			DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
+		(timing2->x_res == timing1->x_res) &&
+		(timing2->y_res == timing1->y_res)) {
+
+		timing2_hsync = timing2->hfp + timing2->hsw + timing2->hbp;
+		timing1_hsync = timing1->hfp + timing1->hsw + timing1->hbp;
+		timing2_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
+		timing1_vsync = timing1->vfp + timing1->vsw + timing1->vbp;
+
+		DSSDBG("timing1_hsync = %d timing1_vsync = %d"\
+			"timing2_hsync = %d timing2_vsync = %d\n",
+			timing1_hsync, timing1_vsync,
+			timing2_hsync, timing2_vsync);
+
+		if ((timing1_hsync == timing2_hsync) &&
+			(timing1_vsync == timing2_vsync)) {
+			return true;
+		}
+	}
+	return false;
+}
+
+struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
+{
+	int i;
+	struct hdmi_cm cm = {-1};
+	DSSDBG("hdmi_get_code\n");
+
+	for (i = 0; i < ARRAY_SIZE(cea_timings); i++) {
+		if (hdmi_timings_compare(timing, &cea_timings[i].timings)) {
+			cm = cea_timings[i].cm;
+			goto end;
+		}
+	}
+	for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) {
+		if (hdmi_timings_compare(timing, &vesa_timings[i].timings)) {
+			cm = vesa_timings[i].cm;
+			goto end;
+		}
+	}
+
+end:
+	return cm;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
+{
+	u32 deep_color;
+	bool deep_color_correct = false;
+
+	if (n == NULL || cts == NULL)
+		return -EINVAL;
+
+	/* TODO: When implemented, query deep color mode here. */
+	deep_color = 100;
+
+	/*
+	 * When using deep color, the default N value (as in the HDMI
+	 * specification) yields to an non-integer CTS. Hence, we
+	 * modify it while keeping the restrictions described in
+	 * section 7.2.1 of the HDMI 1.4a specification.
+	 */
+	switch (sample_freq) {
+	case 32000:
+	case 48000:
+	case 96000:
+	case 192000:
+		if (deep_color == 125)
+			if (pclk == 27027 || pclk == 74250)
+				deep_color_correct = true;
+		if (deep_color == 150)
+			if (pclk == 27027)
+				deep_color_correct = true;
+		break;
+	case 44100:
+	case 88200:
+	case 176400:
+		if (deep_color == 125)
+			if (pclk == 27027)
+				deep_color_correct = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (deep_color_correct) {
+		switch (sample_freq) {
+		case 32000:
+			*n = 8192;
+			break;
+		case 44100:
+			*n = 12544;
+			break;
+		case 48000:
+			*n = 8192;
+			break;
+		case 88200:
+			*n = 25088;
+			break;
+		case 96000:
+			*n = 16384;
+			break;
+		case 176400:
+			*n = 50176;
+			break;
+		case 192000:
+			*n = 32768;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		switch (sample_freq) {
+		case 32000:
+			*n = 4096;
+			break;
+		case 44100:
+			*n = 6272;
+			break;
+		case 48000:
+			*n = 6144;
+			break;
+		case 88200:
+			*n = 12544;
+			break;
+		case 96000:
+			*n = 12288;
+			break;
+		case 176400:
+			*n = 25088;
+			break;
+		case 192000:
+			*n = 24576;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	/* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+	*cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+
+	return 0;
+}
+#endif
diff --git a/drivers/video/omap2/dss/hdmi_phy.c b/drivers/video/omap2/dss/hdmi_phy.c
new file mode 100644
index 0000000..45acb99
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_phy.c
@@ -0,0 +1,160 @@
+/*
+ * HDMI PHY
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
+{
+#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
+		hdmi_read_reg(phy->base, r))
+
+	DUMPPHY(HDMI_TXPHY_TX_CTRL);
+	DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
+	DUMPPHY(HDMI_TXPHY_POWER_CTRL);
+	DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *data)
+{
+	struct hdmi_wp_data *wp = data;
+	u32 irqstatus;
+
+	irqstatus = hdmi_wp_get_irqstatus(wp);
+	hdmi_wp_set_irqstatus(wp, irqstatus);
+
+	if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
+			irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
+		/*
+		 * If we get both connect and disconnect interrupts at the same
+		 * time, turn off the PHY, clear interrupts, and restart, which
+		 * raises connect interrupt if a cable is connected, or nothing
+		 * if cable is not connected.
+		 */
+		hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+
+		hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT |
+				HDMI_IRQ_LINK_DISCONNECT);
+
+		hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+	} else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
+		hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON);
+	} else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
+		hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+	}
+
+	return IRQ_HANDLED;
+}
+
+int hdmi_phy_enable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp,
+			struct hdmi_config *cfg)
+{
+	u16 r = 0;
+	u32 irqstatus;
+
+	hdmi_wp_clear_irqenable(wp, 0xffffffff);
+
+	irqstatus = hdmi_wp_get_irqstatus(wp);
+	hdmi_wp_set_irqstatus(wp, irqstatus);
+
+	r = hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+	if (r)
+		return r;
+
+	/*
+	 * Read address 0 in order to get the SCP reset done completed
+	 * Dummy access performed to make sure reset is done
+	 */
+	hdmi_read_reg(phy->base, HDMI_TXPHY_TX_CTRL);
+
+	/*
+	 * Write to phy address 0 to configure the clock
+	 * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
+	 */
+	REG_FLD_MOD(phy->base, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
+
+	/* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
+	hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
+
+	/* Setup max LDO voltage */
+	REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
+
+	/* Write to phy address 3 to change the polarity control */
+	REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
+	r = request_threaded_irq(phy->irq, NULL, hdmi_irq_handler,
+				IRQF_ONESHOT, "OMAP HDMI", wp);
+	if (r) {
+		DSSERR("HDMI IRQ request failed\n");
+		hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+		return r;
+	}
+
+	hdmi_wp_set_irqenable(wp,
+		HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
+
+	return 0;
+}
+
+void hdmi_phy_disable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp)
+{
+	free_irq(phy->irq, wp);
+
+	hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+}
+
+#define PHY_OFFSET	0x300
+#define PHY_SIZE	0x100
+
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
+{
+	struct resource *res;
+	struct resource temp_res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_txphy");
+	if (!res) {
+		DSSDBG("can't get PHY mem resource by name\n");
+		/*
+		 * if hwmod/DT doesn't have the memory resource information
+		 * split into HDMI sub blocks by name, we try again by getting
+		 * the platform's first resource. this code will be removed when
+		 * the driver can get the mem resources by name
+		 */
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res) {
+			DSSERR("can't get PHY mem resource\n");
+			return -EINVAL;
+		}
+
+		temp_res.start = res->start + PHY_OFFSET;
+		temp_res.end = temp_res.start + PHY_SIZE - 1;
+		res = &temp_res;
+	}
+
+	phy->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!phy->base) {
+		DSSERR("can't ioremap TX PHY\n");
+		return -ENOMEM;
+	}
+
+	phy->irq = platform_get_irq(pdev, 0);
+	if (phy->irq < 0) {
+		DSSERR("platform_get_irq failed\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
diff --git a/drivers/video/omap2/dss/hdmi_pll.c b/drivers/video/omap2/dss/hdmi_pll.c
new file mode 100644
index 0000000..d3e6e78
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_pll.c
@@ -0,0 +1,230 @@
+/*
+ * HDMI PLL
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+#define HDMI_DEFAULT_REGN 16
+#define HDMI_DEFAULT_REGM2 1
+
+void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
+{
+#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
+		hdmi_read_reg(pll->base, r))
+
+	DUMPPLL(PLLCTRL_PLL_CONTROL);
+	DUMPPLL(PLLCTRL_PLL_STATUS);
+	DUMPPLL(PLLCTRL_PLL_GO);
+	DUMPPLL(PLLCTRL_CFG1);
+	DUMPPLL(PLLCTRL_CFG2);
+	DUMPPLL(PLLCTRL_CFG3);
+	DUMPPLL(PLLCTRL_SSC_CFG1);
+	DUMPPLL(PLLCTRL_SSC_CFG2);
+	DUMPPLL(PLLCTRL_CFG4);
+}
+
+void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy)
+{
+	struct hdmi_pll_info *pi = &pll->info;
+	unsigned long refclk;
+	u32 mf;
+
+	/* use our funky units */
+	clkin /= 10000;
+
+	/*
+	 * Input clock is predivided by N + 1
+	 * out put of which is reference clk
+	 */
+
+	pi->regn = HDMI_DEFAULT_REGN;
+
+	refclk = clkin / pi->regn;
+
+	pi->regm2 = HDMI_DEFAULT_REGM2;
+
+	/*
+	 * multiplier is pixel_clk/ref_clk
+	 * Multiplying by 100 to avoid fractional part removal
+	 */
+	pi->regm = phy * pi->regm2 / refclk;
+
+	/*
+	 * fractional multiplier is remainder of the difference between
+	 * multiplier and actual phy(required pixel clock thus should be
+	 * multiplied by 2^18(262144) divided by the reference clock
+	 */
+	mf = (phy - pi->regm / pi->regm2 * refclk) * 262144;
+	pi->regmf = pi->regm2 * mf / refclk;
+
+	/*
+	 * Dcofreq should be set to 1 if required pixel clock
+	 * is greater than 1000MHz
+	 */
+	pi->dcofreq = phy > 1000 * 100;
+	pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
+
+	/* Set the reference clock to sysclk reference */
+	pi->refsel = HDMI_REFSEL_SYSCLK;
+
+	DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
+	DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
+}
+
+
+static int hdmi_pll_config(struct hdmi_pll_data *pll)
+{
+	u32 r;
+	struct hdmi_pll_info *fmt = &pll->info;
+
+	/* PLL start always use manual mode */
+	REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
+
+	r = hdmi_read_reg(pll->base, PLLCTRL_CFG1);
+	r = FLD_MOD(r, fmt->regm, 20, 9);	/* CFG1_PLL_REGM */
+	r = FLD_MOD(r, fmt->regn - 1, 8, 1);	/* CFG1_PLL_REGN */
+	hdmi_write_reg(pll->base, PLLCTRL_CFG1, r);
+
+	r = hdmi_read_reg(pll->base, PLLCTRL_CFG2);
+
+	r = FLD_MOD(r, 0x0, 12, 12);	/* PLL_HIGHFREQ divide by 2 */
+	r = FLD_MOD(r, 0x1, 13, 13);	/* PLL_REFEN */
+	r = FLD_MOD(r, 0x0, 14, 14);	/* PHY_CLKINEN de-assert during locking */
+	r = FLD_MOD(r, fmt->refsel, 22, 21);	/* REFSEL */
+
+	if (fmt->dcofreq) {
+		/* divider programming for frequency beyond 1000Mhz */
+		REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
+		r = FLD_MOD(r, 0x4, 3, 1);	/* 1000MHz and 2000MHz */
+	} else {
+		r = FLD_MOD(r, 0x2, 3, 1);	/* 500MHz and 1000MHz */
+	}
+
+	hdmi_write_reg(pll->base, PLLCTRL_CFG2, r);
+
+	r = hdmi_read_reg(pll->base, PLLCTRL_CFG4);
+	r = FLD_MOD(r, fmt->regm2, 24, 18);
+	r = FLD_MOD(r, fmt->regmf, 17, 0);
+	hdmi_write_reg(pll->base, PLLCTRL_CFG4, r);
+
+	/* go now */
+	REG_FLD_MOD(pll->base, PLLCTRL_PLL_GO, 0x1, 0, 0);
+
+	/* wait for bit change */
+	if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
+			0, 0, 1) != 1) {
+		pr_err("PLL GO bit not set\n");
+		return -ETIMEDOUT;
+	}
+
+	/* Wait till the lock bit is set in PLL status */
+	if (hdmi_wait_for_bit_change(pll->base,
+			PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
+		pr_err("cannot lock PLL\n");
+		pr_err("CFG1 0x%x\n",
+			hdmi_read_reg(pll->base, PLLCTRL_CFG1));
+		pr_err("CFG2 0x%x\n",
+			hdmi_read_reg(pll->base, PLLCTRL_CFG2));
+		pr_err("CFG4 0x%x\n",
+			hdmi_read_reg(pll->base, PLLCTRL_CFG4));
+		return -ETIMEDOUT;
+	}
+
+	pr_debug("PLL locked!\n");
+
+	return 0;
+}
+
+static int hdmi_pll_reset(struct hdmi_pll_data *pll)
+{
+	/* SYSRESET  controlled by power FSM */
+	REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
+
+	/* READ 0x0 reset is in progress */
+	if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_STATUS, 0, 0, 1)
+			!= 1) {
+		pr_err("Failed to sysreset PLL\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
+{
+	u16 r = 0;
+
+	r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
+	if (r)
+		return r;
+
+	r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
+	if (r)
+		return r;
+
+	r = hdmi_pll_reset(pll);
+	if (r)
+		return r;
+
+	r = hdmi_pll_config(pll);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
+{
+	hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
+}
+
+#define PLL_OFFSET	0x200
+#define PLL_SIZE	0x100
+
+int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll)
+{
+	struct resource *res;
+	struct resource temp_res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_pllctrl");
+	if (!res) {
+		DSSDBG("can't get PLL mem resource by name\n");
+		/*
+		 * if hwmod/DT doesn't have the memory resource information
+		 * split into HDMI sub blocks by name, we try again by getting
+		 * the platform's first resource. this code will be removed when
+		 * the driver can get the mem resources by name
+		 */
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res) {
+			DSSERR("can't get PLL mem resource\n");
+			return -EINVAL;
+		}
+
+		temp_res.start = res->start + PLL_OFFSET;
+		temp_res.end = temp_res.start + PLL_SIZE - 1;
+		res = &temp_res;
+	}
+
+	pll->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!pll->base) {
+		DSSERR("can't ioremap PLLCTRL\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
diff --git a/drivers/video/omap2/dss/hdmi_wp.c b/drivers/video/omap2/dss/hdmi_wp.c
new file mode 100644
index 0000000..8151d89
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_wp.c
@@ -0,0 +1,271 @@
+/*
+ * HDMI wrapper
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, hdmi_read_reg(wp->base, r))
+
+	DUMPREG(HDMI_WP_REVISION);
+	DUMPREG(HDMI_WP_SYSCONFIG);
+	DUMPREG(HDMI_WP_IRQSTATUS_RAW);
+	DUMPREG(HDMI_WP_IRQSTATUS);
+	DUMPREG(HDMI_WP_IRQENABLE_SET);
+	DUMPREG(HDMI_WP_IRQENABLE_CLR);
+	DUMPREG(HDMI_WP_IRQWAKEEN);
+	DUMPREG(HDMI_WP_PWR_CTRL);
+	DUMPREG(HDMI_WP_DEBOUNCE);
+	DUMPREG(HDMI_WP_VIDEO_CFG);
+	DUMPREG(HDMI_WP_VIDEO_SIZE);
+	DUMPREG(HDMI_WP_VIDEO_TIMING_H);
+	DUMPREG(HDMI_WP_VIDEO_TIMING_V);
+	DUMPREG(HDMI_WP_WP_CLK);
+	DUMPREG(HDMI_WP_AUDIO_CFG);
+	DUMPREG(HDMI_WP_AUDIO_CFG2);
+	DUMPREG(HDMI_WP_AUDIO_CTRL);
+	DUMPREG(HDMI_WP_AUDIO_DATA);
+}
+
+u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp)
+{
+	return hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS);
+}
+
+void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus)
+{
+	hdmi_write_reg(wp->base, HDMI_WP_IRQSTATUS, irqstatus);
+	/* flush posted write */
+	hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS);
+}
+
+void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask)
+{
+	hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_SET, mask);
+}
+
+void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask)
+{
+	hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_CLR, mask);
+}
+
+/* PHY_PWR_CMD */
+int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val)
+{
+	/* Return if already the state */
+	if (REG_GET(wp->base, HDMI_WP_PWR_CTRL, 5, 4) == val)
+		return 0;
+
+	/* Command for power control of HDMI PHY */
+	REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 7, 6);
+
+	/* Status of the power control of HDMI PHY */
+	if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 5, 4, val)
+			!= val) {
+		pr_err("Failed to set PHY power mode to %d\n", val);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/* PLL_PWR_CMD */
+int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val)
+{
+	/* Command for power control of HDMI PLL */
+	REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 3, 2);
+
+	/* wait till PHY_PWR_STATUS is set */
+	if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 1, 0, val)
+			!= val) {
+		pr_err("Failed to set PLL_PWR_STATUS\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+int hdmi_wp_video_start(struct hdmi_wp_data *wp)
+{
+	REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, true, 31, 31);
+
+	return 0;
+}
+
+void hdmi_wp_video_stop(struct hdmi_wp_data *wp)
+{
+	REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, false, 31, 31);
+}
+
+void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
+		struct hdmi_video_format *video_fmt)
+{
+	u32 l = 0;
+
+	REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, video_fmt->packing_mode,
+		10, 8);
+
+	l |= FLD_VAL(video_fmt->y_res, 31, 16);
+	l |= FLD_VAL(video_fmt->x_res, 15, 0);
+	hdmi_write_reg(wp->base, HDMI_WP_VIDEO_SIZE, l);
+}
+
+void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
+		struct omap_video_timings *timings)
+{
+	u32 r;
+	bool vsync_pol, hsync_pol;
+	pr_debug("Enter hdmi_wp_video_config_interface\n");
+
+	vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+	hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+
+	r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
+	r = FLD_MOD(r, vsync_pol, 7, 7);
+	r = FLD_MOD(r, hsync_pol, 6, 6);
+	r = FLD_MOD(r, timings->interlace, 3, 3);
+	r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
+	hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
+}
+
+void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
+		struct omap_video_timings *timings)
+{
+	u32 timing_h = 0;
+	u32 timing_v = 0;
+
+	pr_debug("Enter hdmi_wp_video_config_timing\n");
+
+	timing_h |= FLD_VAL(timings->hbp, 31, 20);
+	timing_h |= FLD_VAL(timings->hfp, 19, 8);
+	timing_h |= FLD_VAL(timings->hsw, 7, 0);
+	hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
+
+	timing_v |= FLD_VAL(timings->vbp, 31, 20);
+	timing_v |= FLD_VAL(timings->vfp, 19, 8);
+	timing_v |= FLD_VAL(timings->vsw, 7, 0);
+	hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
+}
+
+void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
+		struct omap_video_timings *timings, struct hdmi_config *param)
+{
+	pr_debug("Enter hdmi_wp_video_init_format\n");
+
+	video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
+	video_fmt->y_res = param->timings.y_res;
+	video_fmt->x_res = param->timings.x_res;
+
+	timings->hbp = param->timings.hbp;
+	timings->hfp = param->timings.hfp;
+	timings->hsw = param->timings.hsw;
+	timings->vbp = param->timings.vbp;
+	timings->vfp = param->timings.vfp;
+	timings->vsw = param->timings.vsw;
+	timings->vsync_level = param->timings.vsync_level;
+	timings->hsync_level = param->timings.hsync_level;
+	timings->interlace = param->timings.interlace;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
+		struct hdmi_audio_format *aud_fmt)
+{
+	u32 r;
+
+	DSSDBG("Enter hdmi_wp_audio_config_format\n");
+
+	r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG);
+	r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
+	r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
+	r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
+	r = FLD_MOD(r, aud_fmt->type, 4, 4);
+	r = FLD_MOD(r, aud_fmt->justification, 3, 3);
+	r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
+	r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
+	r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
+	hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG, r);
+}
+
+void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
+		struct hdmi_audio_dma *aud_dma)
+{
+	u32 r;
+
+	DSSDBG("Enter hdmi_wp_audio_config_dma\n");
+
+	r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG2);
+	r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
+	r = FLD_MOD(r, aud_dma->block_size, 7, 0);
+	hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG2, r);
+
+	r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CTRL);
+	r = FLD_MOD(r, aud_dma->mode, 9, 9);
+	r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
+	hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CTRL, r);
+}
+
+int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable)
+{
+	REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+
+	return 0;
+}
+
+int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable)
+{
+	REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+
+	return 0;
+}
+#endif
+
+#define WP_SIZE	0x200
+
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
+{
+	struct resource *res;
+	struct resource temp_res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_wp");
+	if (!res) {
+		DSSDBG("can't get WP mem resource by name\n");
+		/*
+		 * if hwmod/DT doesn't have the memory resource information
+		 * split into HDMI sub blocks by name, we try again by getting
+		 * the platform's first resource. this code will be removed when
+		 * the driver can get the mem resources by name
+		 */
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res) {
+			DSSERR("can't get WP mem resource\n");
+			return -EINVAL;
+		}
+
+		temp_res.start = res->start;
+		temp_res.end = temp_res.start + WP_SIZE - 1;
+		res = &temp_res;
+	}
+
+	wp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!wp->base) {
+		DSSERR("can't ioremap HDMI WP\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
deleted file mode 100644
index 45215f4..0000000
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * ti_hdmi.h
- *
- * HDMI driver definition for TI OMAP4, DM81xx, DM38xx  Processor.
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _TI_HDMI_H
-#define _TI_HDMI_H
-
-struct hdmi_ip_data;
-
-enum hdmi_pll_pwr {
-	HDMI_PLLPWRCMD_ALLOFF = 0,
-	HDMI_PLLPWRCMD_PLLONLY = 1,
-	HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
-	HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
-};
-
-enum hdmi_core_hdmi_dvi {
-	HDMI_DVI = 0,
-	HDMI_HDMI = 1
-};
-
-enum hdmi_clk_refsel {
-	HDMI_REFSEL_PCLK = 0,
-	HDMI_REFSEL_REF1 = 1,
-	HDMI_REFSEL_REF2 = 2,
-	HDMI_REFSEL_SYSCLK = 3
-};
-
-struct hdmi_cm {
-	int	code;
-	int	mode;
-};
-
-struct hdmi_config {
-	struct omap_video_timings timings;
-	struct hdmi_cm cm;
-};
-
-/* HDMI PLL structure */
-struct hdmi_pll_info {
-	u16 regn;
-	u16 regm;
-	u32 regmf;
-	u16 regm2;
-	u16 regsd;
-	u16 dcofreq;
-	enum hdmi_clk_refsel refsel;
-};
-
-struct ti_hdmi_ip_ops {
-
-	void (*video_configure)(struct hdmi_ip_data *ip_data);
-
-	int (*phy_enable)(struct hdmi_ip_data *ip_data);
-
-	void (*phy_disable)(struct hdmi_ip_data *ip_data);
-
-	int (*read_edid)(struct hdmi_ip_data *ip_data, u8 *edid, int len);
-
-	int (*pll_enable)(struct hdmi_ip_data *ip_data);
-
-	void (*pll_disable)(struct hdmi_ip_data *ip_data);
-
-	int (*video_enable)(struct hdmi_ip_data *ip_data);
-
-	void (*video_disable)(struct hdmi_ip_data *ip_data);
-
-	void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
-	void (*dump_core)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
-	void (*dump_pll)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
-	void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-	int (*audio_enable)(struct hdmi_ip_data *ip_data);
-
-	void (*audio_disable)(struct hdmi_ip_data *ip_data);
-
-	int (*audio_start)(struct hdmi_ip_data *ip_data);
-
-	void (*audio_stop)(struct hdmi_ip_data *ip_data);
-
-	int (*audio_config)(struct hdmi_ip_data *ip_data,
-		struct omap_dss_audio *audio);
-
-	int (*audio_get_dma_port)(u32 *offset, u32 *size);
-#endif
-
-};
-
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_avi {
-	/* Y0, Y1 rgb,yCbCr */
-	u8	db1_format;
-	/* A0  Active information Present */
-	u8	db1_active_info;
-	/* B0, B1 Bar info data valid */
-	u8	db1_bar_info_dv;
-	/* S0, S1 scan information */
-	u8	db1_scan_info;
-	/* C0, C1 colorimetry */
-	u8	db2_colorimetry;
-	/* M0, M1 Aspect ratio (4:3, 16:9) */
-	u8	db2_aspect_ratio;
-	/* R0...R3 Active format aspect ratio */
-	u8	db2_active_fmt_ar;
-	/* ITC IT content. */
-	u8	db3_itc;
-	/* EC0, EC1, EC2 Extended colorimetry */
-	u8	db3_ec;
-	/* Q1, Q0 Quantization range */
-	u8	db3_q_range;
-	/* SC1, SC0 Non-uniform picture scaling */
-	u8	db3_nup_scaling;
-	/* VIC0..6 Video format identification */
-	u8	db4_videocode;
-	/* PR0..PR3 Pixel repetition factor */
-	u8	db5_pixel_repeat;
-	/* Line number end of top bar */
-	u16	db6_7_line_eoftop;
-	/* Line number start of bottom bar */
-	u16	db8_9_line_sofbottom;
-	/* Pixel number end of left bar */
-	u16	db10_11_pixel_eofleft;
-	/* Pixel number start of right bar */
-	u16	db12_13_pixel_sofright;
-};
-
-struct hdmi_ip_data {
-	void __iomem	*base_wp;	/* HDMI wrapper */
-	unsigned long	core_sys_offset;
-	unsigned long	core_av_offset;
-	unsigned long	pll_offset;
-	unsigned long	phy_offset;
-	int		irq;
-	const struct ti_hdmi_ip_ops *ops;
-	struct hdmi_config cfg;
-	struct hdmi_pll_info pll_data;
-	struct hdmi_core_infoframe_avi avi_cfg;
-
-	/* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
-	struct mutex lock;
-};
-int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
-int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
-int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
-		struct omap_dss_audio *audio);
-int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size);
-#endif
-#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
deleted file mode 100644
index 3dfe009..0000000
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ /dev/null
@@ -1,1427 +0,0 @@
-/*
- * ti_hdmi_4xxx_ip.c
- *
- * HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Authors: Yong Zhi
- *	Mythri pk <mythripk@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/seq_file.h>
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-#include <sound/asound.h>
-#include <sound/asoundef.h>
-#endif
-
-#include "ti_hdmi_4xxx_ip.h"
-#include "dss.h"
-#include "dss_features.h"
-
-#define HDMI_IRQ_LINK_CONNECT		(1 << 25)
-#define HDMI_IRQ_LINK_DISCONNECT	(1 << 26)
-
-static inline void hdmi_write_reg(void __iomem *base_addr,
-				const u16 idx, u32 val)
-{
-	__raw_writel(val, base_addr + idx);
-}
-
-static inline u32 hdmi_read_reg(void __iomem *base_addr,
-				const u16 idx)
-{
-	return __raw_readl(base_addr + idx);
-}
-
-static inline void __iomem *hdmi_wp_base(struct hdmi_ip_data *ip_data)
-{
-	return ip_data->base_wp;
-}
-
-static inline void __iomem *hdmi_phy_base(struct hdmi_ip_data *ip_data)
-{
-	return ip_data->base_wp + ip_data->phy_offset;
-}
-
-static inline void __iomem *hdmi_pll_base(struct hdmi_ip_data *ip_data)
-{
-	return ip_data->base_wp + ip_data->pll_offset;
-}
-
-static inline void __iomem *hdmi_av_base(struct hdmi_ip_data *ip_data)
-{
-	return ip_data->base_wp + ip_data->core_av_offset;
-}
-
-static inline void __iomem *hdmi_core_sys_base(struct hdmi_ip_data *ip_data)
-{
-	return ip_data->base_wp + ip_data->core_sys_offset;
-}
-
-static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
-				const u16 idx,
-				int b2, int b1, u32 val)
-{
-	u32 t = 0;
-	while (val != REG_GET(base_addr, idx, b2, b1)) {
-		udelay(1);
-		if (t++ > 10000)
-			return !val;
-	}
-	return val;
-}
-
-static int hdmi_pll_init(struct hdmi_ip_data *ip_data)
-{
-	u32 r;
-	void __iomem *pll_base = hdmi_pll_base(ip_data);
-	struct hdmi_pll_info *fmt = &ip_data->pll_data;
-
-	/* PLL start always use manual mode */
-	REG_FLD_MOD(pll_base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
-
-	r = hdmi_read_reg(pll_base, PLLCTRL_CFG1);
-	r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
-	r = FLD_MOD(r, fmt->regn - 1, 8, 1);  /* CFG1_PLL_REGN */
-
-	hdmi_write_reg(pll_base, PLLCTRL_CFG1, r);
-
-	r = hdmi_read_reg(pll_base, PLLCTRL_CFG2);
-
-	r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
-	r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
-	r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
-	r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
-
-	if (fmt->dcofreq) {
-		/* divider programming for frequency beyond 1000Mhz */
-		REG_FLD_MOD(pll_base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
-		r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
-	} else {
-		r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
-	}
-
-	hdmi_write_reg(pll_base, PLLCTRL_CFG2, r);
-
-	r = hdmi_read_reg(pll_base, PLLCTRL_CFG4);
-	r = FLD_MOD(r, fmt->regm2, 24, 18);
-	r = FLD_MOD(r, fmt->regmf, 17, 0);
-
-	hdmi_write_reg(pll_base, PLLCTRL_CFG4, r);
-
-	/* go now */
-	REG_FLD_MOD(pll_base, PLLCTRL_PLL_GO, 0x1, 0, 0);
-
-	/* wait for bit change */
-	if (hdmi_wait_for_bit_change(pll_base, PLLCTRL_PLL_GO,
-							0, 0, 1) != 1) {
-		pr_err("PLL GO bit not set\n");
-		return -ETIMEDOUT;
-	}
-
-	/* Wait till the lock bit is set in PLL status */
-	if (hdmi_wait_for_bit_change(pll_base,
-				PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
-		pr_err("cannot lock PLL\n");
-		pr_err("CFG1 0x%x\n",
-			hdmi_read_reg(pll_base, PLLCTRL_CFG1));
-		pr_err("CFG2 0x%x\n",
-			hdmi_read_reg(pll_base, PLLCTRL_CFG2));
-		pr_err("CFG4 0x%x\n",
-			hdmi_read_reg(pll_base, PLLCTRL_CFG4));
-		return -ETIMEDOUT;
-	}
-
-	pr_debug("PLL locked!\n");
-
-	return 0;
-}
-
-/* PHY_PWR_CMD */
-static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data, enum hdmi_phy_pwr val)
-{
-	/* Return if already the state */
-	if (REG_GET(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, 5, 4) == val)
-		return 0;
-
-	/* Command for power control of HDMI PHY */
-	REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6);
-
-	/* Status of the power control of HDMI PHY */
-	if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data),
-				HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
-		pr_err("Failed to set PHY power mode to %d\n", val);
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-/* PLL_PWR_CMD */
-static int hdmi_set_pll_pwr(struct hdmi_ip_data *ip_data, enum hdmi_pll_pwr val)
-{
-	/* Command for power control of HDMI PLL */
-	REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 3, 2);
-
-	/* wait till PHY_PWR_STATUS is set */
-	if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL,
-						1, 0, val) != val) {
-		pr_err("Failed to set PLL_PWR_STATUS\n");
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-static int hdmi_pll_reset(struct hdmi_ip_data *ip_data)
-{
-	/* SYSRESET  controlled by power FSM */
-	REG_FLD_MOD(hdmi_pll_base(ip_data), PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
-
-	/* READ 0x0 reset is in progress */
-	if (hdmi_wait_for_bit_change(hdmi_pll_base(ip_data),
-				PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
-		pr_err("Failed to sysreset PLL\n");
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data)
-{
-	u16 r = 0;
-
-	r = hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
-	if (r)
-		return r;
-
-	r = hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
-	if (r)
-		return r;
-
-	r = hdmi_pll_reset(ip_data);
-	if (r)
-		return r;
-
-	r = hdmi_pll_init(ip_data);
-	if (r)
-		return r;
-
-	return 0;
-}
-
-void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
-{
-	hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
-}
-
-static irqreturn_t hdmi_irq_handler(int irq, void *data)
-{
-	struct hdmi_ip_data *ip_data = data;
-	void __iomem *wp_base = hdmi_wp_base(ip_data);
-	u32 irqstatus;
-
-	irqstatus = hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
-	hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS, irqstatus);
-	/* flush posted write */
-	hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
-
-	if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
-			irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
-		/*
-		 * If we get both connect and disconnect interrupts at the same
-		 * time, turn off the PHY, clear interrupts, and restart, which
-		 * raises connect interrupt if a cable is connected, or nothing
-		 * if cable is not connected.
-		 */
-		hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
-
-		hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS,
-			HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
-		/* flush posted write */
-		hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
-
-		hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
-	} else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
-		hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
-	} else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
-		hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
-	}
-
-	return IRQ_HANDLED;
-}
-
-int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
-{
-	u16 r = 0;
-	void __iomem *phy_base = hdmi_phy_base(ip_data);
-
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_CLR,
-			0xffffffff);
-
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQSTATUS,
-			HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
-
-	r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
-	if (r)
-		return r;
-
-	/*
-	 * Read address 0 in order to get the SCP reset done completed
-	 * Dummy access performed to make sure reset is done
-	 */
-	hdmi_read_reg(phy_base, HDMI_TXPHY_TX_CTRL);
-
-	/*
-	 * Write to phy address 0 to configure the clock
-	 * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
-	 */
-	REG_FLD_MOD(phy_base, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
-
-	/* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
-	hdmi_write_reg(phy_base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
-
-	/* Setup max LDO voltage */
-	REG_FLD_MOD(phy_base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
-
-	/* Write to phy address 3 to change the polarity control */
-	REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
-
-	r = request_threaded_irq(ip_data->irq, NULL, hdmi_irq_handler,
-				 IRQF_ONESHOT, "OMAP HDMI", ip_data);
-	if (r) {
-		DSSERR("HDMI IRQ request failed\n");
-		hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
-		return r;
-	}
-
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_SET,
-			HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
-
-	return 0;
-}
-
-void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
-{
-	free_irq(ip_data->irq, ip_data);
-
-	hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
-}
-
-static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
-{
-	void __iomem *base = hdmi_core_sys_base(ip_data);
-
-	/* Turn on CLK for DDC */
-	REG_FLD_MOD(base, HDMI_CORE_AV_DPD, 0x7, 2, 0);
-
-	/* IN_PROG */
-	if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 1) {
-		/* Abort transaction */
-		REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xf, 3, 0);
-		/* IN_PROG */
-		if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
-					4, 4, 0) != 0) {
-			DSSERR("Timeout aborting DDC transaction\n");
-			return -ETIMEDOUT;
-		}
-	}
-
-	/* Clk SCL Devices */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xA, 3, 0);
-
-	/* HDMI_CORE_DDC_STATUS_IN_PROG */
-	if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
-				4, 4, 0) != 0) {
-		DSSERR("Timeout starting SCL clock\n");
-		return -ETIMEDOUT;
-	}
-
-	/* Clear FIFO */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x9, 3, 0);
-
-	/* HDMI_CORE_DDC_STATUS_IN_PROG */
-	if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
-				4, 4, 0) != 0) {
-		DSSERR("Timeout clearing DDC fifo\n");
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-static int hdmi_core_ddc_edid(struct hdmi_ip_data *ip_data,
-		u8 *pedid, int ext)
-{
-	void __iomem *base = hdmi_core_sys_base(ip_data);
-	u32 i;
-	char checksum;
-	u32 offset = 0;
-
-	/* HDMI_CORE_DDC_STATUS_IN_PROG */
-	if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
-				4, 4, 0) != 0) {
-		DSSERR("Timeout waiting DDC to be ready\n");
-		return -ETIMEDOUT;
-	}
-
-	if (ext % 2 != 0)
-		offset = 0x80;
-
-	/* Load Segment Address Register */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0);
-
-	/* Load Slave Address Register */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
-
-	/* Load Offset Address Register */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0);
-
-	/* Load Byte Count */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
-	REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
-
-	/* Set DDC_CMD */
-	if (ext)
-		REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
-	else
-		REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
-
-	/* HDMI_CORE_DDC_STATUS_BUS_LOW */
-	if (REG_GET(base, HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
-		pr_err("I2C Bus Low?\n");
-		return -EIO;
-	}
-	/* HDMI_CORE_DDC_STATUS_NO_ACK */
-	if (REG_GET(base, HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
-		pr_err("I2C No Ack\n");
-		return -EIO;
-	}
-
-	for (i = 0; i < 0x80; ++i) {
-		int t;
-
-		/* IN_PROG */
-		if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 0) {
-			DSSERR("operation stopped when reading edid\n");
-			return -EIO;
-		}
-
-		t = 0;
-		/* FIFO_EMPTY */
-		while (REG_GET(base, HDMI_CORE_DDC_STATUS, 2, 2) == 1) {
-			if (t++ > 10000) {
-				DSSERR("timeout reading edid\n");
-				return -ETIMEDOUT;
-			}
-			udelay(1);
-		}
-
-		pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
-	}
-
-	checksum = 0;
-	for (i = 0; i < 0x80; ++i)
-		checksum += pedid[i];
-
-	if (checksum != 0) {
-		pr_err("E-EDID checksum failed!!\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
-int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data,
-				u8 *edid, int len)
-{
-	int r, l;
-
-	if (len < 128)
-		return -EINVAL;
-
-	r = hdmi_core_ddc_init(ip_data);
-	if (r)
-		return r;
-
-	r = hdmi_core_ddc_edid(ip_data, edid, 0);
-	if (r)
-		return r;
-
-	l = 128;
-
-	if (len >= 128 * 2 && edid[0x7e] > 0) {
-		r = hdmi_core_ddc_edid(ip_data, edid + 0x80, 1);
-		if (r)
-			return r;
-		l += 128;
-	}
-
-	return l;
-}
-
-static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
-			struct hdmi_core_infoframe_avi *avi_cfg,
-			struct hdmi_core_packet_enable_repeat *repeat_cfg)
-{
-	pr_debug("Enter hdmi_core_init\n");
-
-	/* video core */
-	video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
-	video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
-	video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
-	video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
-	video_cfg->hdmi_dvi = HDMI_DVI;
-	video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
-
-	/* info frame */
-	avi_cfg->db1_format = 0;
-	avi_cfg->db1_active_info = 0;
-	avi_cfg->db1_bar_info_dv = 0;
-	avi_cfg->db1_scan_info = 0;
-	avi_cfg->db2_colorimetry = 0;
-	avi_cfg->db2_aspect_ratio = 0;
-	avi_cfg->db2_active_fmt_ar = 0;
-	avi_cfg->db3_itc = 0;
-	avi_cfg->db3_ec = 0;
-	avi_cfg->db3_q_range = 0;
-	avi_cfg->db3_nup_scaling = 0;
-	avi_cfg->db4_videocode = 0;
-	avi_cfg->db5_pixel_repeat = 0;
-	avi_cfg->db6_7_line_eoftop = 0 ;
-	avi_cfg->db8_9_line_sofbottom = 0;
-	avi_cfg->db10_11_pixel_eofleft = 0;
-	avi_cfg->db12_13_pixel_sofright = 0;
-
-	/* packet enable and repeat */
-	repeat_cfg->audio_pkt = 0;
-	repeat_cfg->audio_pkt_repeat = 0;
-	repeat_cfg->avi_infoframe = 0;
-	repeat_cfg->avi_infoframe_repeat = 0;
-	repeat_cfg->gen_cntrl_pkt = 0;
-	repeat_cfg->gen_cntrl_pkt_repeat = 0;
-	repeat_cfg->generic_pkt = 0;
-	repeat_cfg->generic_pkt_repeat = 0;
-}
-
-static void hdmi_core_powerdown_disable(struct hdmi_ip_data *ip_data)
-{
-	pr_debug("Enter hdmi_core_powerdown_disable\n");
-	REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_CTRL1, 0x0, 0, 0);
-}
-
-static void hdmi_core_swreset_release(struct hdmi_ip_data *ip_data)
-{
-	pr_debug("Enter hdmi_core_swreset_release\n");
-	REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x0, 0, 0);
-}
-
-static void hdmi_core_swreset_assert(struct hdmi_ip_data *ip_data)
-{
-	pr_debug("Enter hdmi_core_swreset_assert\n");
-	REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x1, 0, 0);
-}
-
-/* HDMI_CORE_VIDEO_CONFIG */
-static void hdmi_core_video_config(struct hdmi_ip_data *ip_data,
-				struct hdmi_core_video_config *cfg)
-{
-	u32 r = 0;
-	void __iomem *core_sys_base = hdmi_core_sys_base(ip_data);
-
-	/* sys_ctrl1 default configuration not tunable */
-	r = hdmi_read_reg(core_sys_base, HDMI_CORE_CTRL1);
-	r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
-	r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
-	r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
-	r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
-	hdmi_write_reg(core_sys_base, HDMI_CORE_CTRL1, r);
-
-	REG_FLD_MOD(core_sys_base,
-			HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
-
-	/* Vid_Mode */
-	r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE);
-
-	/* dither truncation configuration */
-	if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
-		r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
-		r = FLD_MOD(r, 1, 5, 5);
-	} else {
-		r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
-		r = FLD_MOD(r, 0, 5, 5);
-	}
-	hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE, r);
-
-	/* HDMI_Ctrl */
-	r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL);
-	r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
-	r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
-	r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
-	hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL, r);
-
-	/* TMDS_CTRL */
-	REG_FLD_MOD(core_sys_base,
-			HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5);
-}
-
-static void hdmi_core_aux_infoframe_avi_config(struct hdmi_ip_data *ip_data)
-{
-	u32 val;
-	char sum = 0, checksum = 0;
-	void __iomem *av_base = hdmi_av_base(ip_data);
-	struct hdmi_core_infoframe_avi info_avi = ip_data->avi_cfg;
-
-	sum += 0x82 + 0x002 + 0x00D;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_TYPE, 0x082);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_VERS, 0x002);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_LEN, 0x00D);
-
-	val = (info_avi.db1_format << 5) |
-		(info_avi.db1_active_info << 4) |
-		(info_avi.db1_bar_info_dv << 2) |
-		(info_avi.db1_scan_info);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(0), val);
-	sum += val;
-
-	val = (info_avi.db2_colorimetry << 6) |
-		(info_avi.db2_aspect_ratio << 4) |
-		(info_avi.db2_active_fmt_ar);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(1), val);
-	sum += val;
-
-	val = (info_avi.db3_itc << 7) |
-		(info_avi.db3_ec << 4) |
-		(info_avi.db3_q_range << 2) |
-		(info_avi.db3_nup_scaling);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(2), val);
-	sum += val;
-
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(3),
-					info_avi.db4_videocode);
-	sum += info_avi.db4_videocode;
-
-	val = info_avi.db5_pixel_repeat;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(4), val);
-	sum += val;
-
-	val = info_avi.db6_7_line_eoftop & 0x00FF;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(5), val);
-	sum += val;
-
-	val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(6), val);
-	sum += val;
-
-	val = info_avi.db8_9_line_sofbottom & 0x00FF;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(7), val);
-	sum += val;
-
-	val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(8), val);
-	sum += val;
-
-	val = info_avi.db10_11_pixel_eofleft & 0x00FF;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(9), val);
-	sum += val;
-
-	val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(10), val);
-	sum += val;
-
-	val = info_avi.db12_13_pixel_sofright & 0x00FF;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(11), val);
-	sum += val;
-
-	val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(12), val);
-	sum += val;
-
-	checksum = 0x100 - sum;
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_CHSUM, checksum);
-}
-
-static void hdmi_core_av_packet_config(struct hdmi_ip_data *ip_data,
-		struct hdmi_core_packet_enable_repeat repeat_cfg)
-{
-	/* enable/repeat the infoframe */
-	hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL1,
-		(repeat_cfg.audio_pkt << 5) |
-		(repeat_cfg.audio_pkt_repeat << 4) |
-		(repeat_cfg.avi_infoframe << 1) |
-		(repeat_cfg.avi_infoframe_repeat));
-
-	/* enable/repeat the packet */
-	hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL2,
-		(repeat_cfg.gen_cntrl_pkt << 3) |
-		(repeat_cfg.gen_cntrl_pkt_repeat << 2) |
-		(repeat_cfg.generic_pkt << 1) |
-		(repeat_cfg.generic_pkt_repeat));
-}
-
-static void hdmi_wp_init(struct omap_video_timings *timings,
-			struct hdmi_video_format *video_fmt)
-{
-	pr_debug("Enter hdmi_wp_init\n");
-
-	timings->hbp = 0;
-	timings->hfp = 0;
-	timings->hsw = 0;
-	timings->vbp = 0;
-	timings->vfp = 0;
-	timings->vsw = 0;
-
-	video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
-	video_fmt->y_res = 0;
-	video_fmt->x_res = 0;
-
-}
-
-int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
-{
-	REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
-	return 0;
-}
-
-void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
-{
-	REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
-}
-
-static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
-	struct omap_video_timings *timings, struct hdmi_config *param)
-{
-	pr_debug("Enter hdmi_wp_video_init_format\n");
-
-	video_fmt->y_res = param->timings.y_res;
-	video_fmt->x_res = param->timings.x_res;
-
-	timings->hbp = param->timings.hbp;
-	timings->hfp = param->timings.hfp;
-	timings->hsw = param->timings.hsw;
-	timings->vbp = param->timings.vbp;
-	timings->vfp = param->timings.vfp;
-	timings->vsw = param->timings.vsw;
-}
-
-static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data,
-		struct hdmi_video_format *video_fmt)
-{
-	u32 l = 0;
-
-	REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG,
-			video_fmt->packing_mode, 10, 8);
-
-	l |= FLD_VAL(video_fmt->y_res, 31, 16);
-	l |= FLD_VAL(video_fmt->x_res, 15, 0);
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_SIZE, l);
-}
-
-static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data)
-{
-	u32 r;
-	bool vsync_pol, hsync_pol;
-	pr_debug("Enter hdmi_wp_video_config_interface\n");
-
-	vsync_pol = ip_data->cfg.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
-	hsync_pol = ip_data->cfg.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
-
-	r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG);
-	r = FLD_MOD(r, vsync_pol, 7, 7);
-	r = FLD_MOD(r, hsync_pol, 6, 6);
-	r = FLD_MOD(r, ip_data->cfg.timings.interlace, 3, 3);
-	r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r);
-}
-
-static void hdmi_wp_video_config_timing(struct hdmi_ip_data *ip_data,
-		struct omap_video_timings *timings)
-{
-	u32 timing_h = 0;
-	u32 timing_v = 0;
-
-	pr_debug("Enter hdmi_wp_video_config_timing\n");
-
-	timing_h |= FLD_VAL(timings->hbp, 31, 20);
-	timing_h |= FLD_VAL(timings->hfp, 19, 8);
-	timing_h |= FLD_VAL(timings->hsw, 7, 0);
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_H, timing_h);
-
-	timing_v |= FLD_VAL(timings->vbp, 31, 20);
-	timing_v |= FLD_VAL(timings->vfp, 19, 8);
-	timing_v |= FLD_VAL(timings->vsw, 7, 0);
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_V, timing_v);
-}
-
-void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
-{
-	/* HDMI */
-	struct omap_video_timings video_timing;
-	struct hdmi_video_format video_format;
-	/* HDMI core */
-	struct hdmi_core_infoframe_avi *avi_cfg = &ip_data->avi_cfg;
-	struct hdmi_core_video_config v_core_cfg;
-	struct hdmi_core_packet_enable_repeat repeat_cfg;
-	struct hdmi_config *cfg = &ip_data->cfg;
-
-	hdmi_wp_init(&video_timing, &video_format);
-
-	hdmi_core_init(&v_core_cfg, avi_cfg, &repeat_cfg);
-
-	hdmi_wp_video_init_format(&video_format, &video_timing, cfg);
-
-	hdmi_wp_video_config_timing(ip_data, &video_timing);
-
-	/* video config */
-	video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
-
-	hdmi_wp_video_config_format(ip_data, &video_format);
-
-	hdmi_wp_video_config_interface(ip_data);
-
-	/*
-	 * configure core video part
-	 * set software reset in the core
-	 */
-	hdmi_core_swreset_assert(ip_data);
-
-	/* power down off */
-	hdmi_core_powerdown_disable(ip_data);
-
-	v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
-	v_core_cfg.hdmi_dvi = cfg->cm.mode;
-
-	hdmi_core_video_config(ip_data, &v_core_cfg);
-
-	/* release software reset in the core */
-	hdmi_core_swreset_release(ip_data);
-
-	/*
-	 * configure packet
-	 * info frame video see doc CEA861-D page 65
-	 */
-	avi_cfg->db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
-	avi_cfg->db1_active_info =
-			HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
-	avi_cfg->db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
-	avi_cfg->db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
-	avi_cfg->db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
-	avi_cfg->db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
-	avi_cfg->db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
-	avi_cfg->db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
-	avi_cfg->db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
-	avi_cfg->db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
-	avi_cfg->db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
-	avi_cfg->db4_videocode = cfg->cm.code;
-	avi_cfg->db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
-	avi_cfg->db6_7_line_eoftop = 0;
-	avi_cfg->db8_9_line_sofbottom = 0;
-	avi_cfg->db10_11_pixel_eofleft = 0;
-	avi_cfg->db12_13_pixel_sofright = 0;
-
-	hdmi_core_aux_infoframe_avi_config(ip_data);
-
-	/* enable/repeat the infoframe */
-	repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
-	repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
-	/* wakeup */
-	repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
-	repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
-	hdmi_core_av_packet_config(ip_data, repeat_cfg);
-}
-
-void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r,\
-		hdmi_read_reg(hdmi_wp_base(ip_data), r))
-
-	DUMPREG(HDMI_WP_REVISION);
-	DUMPREG(HDMI_WP_SYSCONFIG);
-	DUMPREG(HDMI_WP_IRQSTATUS_RAW);
-	DUMPREG(HDMI_WP_IRQSTATUS);
-	DUMPREG(HDMI_WP_PWR_CTRL);
-	DUMPREG(HDMI_WP_IRQENABLE_SET);
-	DUMPREG(HDMI_WP_VIDEO_CFG);
-	DUMPREG(HDMI_WP_VIDEO_SIZE);
-	DUMPREG(HDMI_WP_VIDEO_TIMING_H);
-	DUMPREG(HDMI_WP_VIDEO_TIMING_V);
-	DUMPREG(HDMI_WP_WP_CLK);
-	DUMPREG(HDMI_WP_AUDIO_CFG);
-	DUMPREG(HDMI_WP_AUDIO_CFG2);
-	DUMPREG(HDMI_WP_AUDIO_CTRL);
-	DUMPREG(HDMI_WP_AUDIO_DATA);
-}
-
-void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
-		hdmi_read_reg(hdmi_pll_base(ip_data), r))
-
-	DUMPPLL(PLLCTRL_PLL_CONTROL);
-	DUMPPLL(PLLCTRL_PLL_STATUS);
-	DUMPPLL(PLLCTRL_PLL_GO);
-	DUMPPLL(PLLCTRL_CFG1);
-	DUMPPLL(PLLCTRL_CFG2);
-	DUMPPLL(PLLCTRL_CFG3);
-	DUMPPLL(PLLCTRL_CFG4);
-}
-
-void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-	int i;
-
-#define CORE_REG(i, name) name(i)
-#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
-		hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
-#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
-		hdmi_read_reg(hdmi_av_base(ip_data), r))
-#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
-		(i < 10) ? 32 - (int)strlen(#r) : 31 - (int)strlen(#r), " ", \
-		hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
-
-	DUMPCORE(HDMI_CORE_SYS_VND_IDL);
-	DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
-	DUMPCORE(HDMI_CORE_SYS_DEV_IDH);
-	DUMPCORE(HDMI_CORE_SYS_DEV_REV);
-	DUMPCORE(HDMI_CORE_SYS_SRST);
-	DUMPCORE(HDMI_CORE_CTRL1);
-	DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
-	DUMPCORE(HDMI_CORE_SYS_DE_DLY);
-	DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
-	DUMPCORE(HDMI_CORE_SYS_DE_TOP);
-	DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
-	DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
-	DUMPCORE(HDMI_CORE_SYS_DE_LINL);
-	DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
-	DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
-	DUMPCORE(HDMI_CORE_SYS_VID_MODE);
-	DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
-	DUMPCORE(HDMI_CORE_SYS_INTR1);
-	DUMPCORE(HDMI_CORE_SYS_INTR2);
-	DUMPCORE(HDMI_CORE_SYS_INTR3);
-	DUMPCORE(HDMI_CORE_SYS_INTR4);
-	DUMPCORE(HDMI_CORE_SYS_UMASK1);
-	DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
-
-	DUMPCORE(HDMI_CORE_DDC_ADDR);
-	DUMPCORE(HDMI_CORE_DDC_SEGM);
-	DUMPCORE(HDMI_CORE_DDC_OFFSET);
-	DUMPCORE(HDMI_CORE_DDC_COUNT1);
-	DUMPCORE(HDMI_CORE_DDC_COUNT2);
-	DUMPCORE(HDMI_CORE_DDC_STATUS);
-	DUMPCORE(HDMI_CORE_DDC_CMD);
-	DUMPCORE(HDMI_CORE_DDC_DATA);
-
-	DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
-	DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
-	DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
-	DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
-	DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
-	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
-	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
-	DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
-	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
-	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
-	DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
-	DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
-	DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
-	DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
-	DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
-	DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
-	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
-	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
-	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
-	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
-	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
-	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
-	DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
-	DUMPCOREAV(HDMI_CORE_AV_ASRC);
-	DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
-	DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
-	DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
-	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
-	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
-	DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
-	DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
-	DUMPCOREAV(HDMI_CORE_AV_DPD);
-	DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
-	DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
-	DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
-	DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
-	DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
-	DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
-
-	for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
-		DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
-
-	DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
-	DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
-	DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
-	DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
-
-	for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
-		DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
-
-	DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
-	DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
-	DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
-	DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
-
-	for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
-		DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
-
-	DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
-	DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
-	DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
-	DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
-
-	for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
-		DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
-
-	for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
-		DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
-
-	DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
-
-	for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
-		DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
-
-	DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
-}
-
-void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
-		hdmi_read_reg(hdmi_phy_base(ip_data), r))
-
-	DUMPPHY(HDMI_TXPHY_TX_CTRL);
-	DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
-	DUMPPHY(HDMI_TXPHY_POWER_CTRL);
-	DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
-}
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
-					struct hdmi_audio_format *aud_fmt)
-{
-	u32 r;
-
-	DSSDBG("Enter hdmi_wp_audio_config_format\n");
-
-	r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG);
-	r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
-	r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
-	r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
-	r = FLD_MOD(r, aud_fmt->type, 4, 4);
-	r = FLD_MOD(r, aud_fmt->justification, 3, 3);
-	r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
-	r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
-	r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
-}
-
-static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
-					struct hdmi_audio_dma *aud_dma)
-{
-	u32 r;
-
-	DSSDBG("Enter hdmi_wp_audio_config_dma\n");
-
-	r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2);
-	r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
-	r = FLD_MOD(r, aud_dma->block_size, 7, 0);
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2, r);
-
-	r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL);
-	r = FLD_MOD(r, aud_dma->mode, 9, 9);
-	r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
-	hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
-}
-
-static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
-					struct hdmi_core_audio_config *cfg)
-{
-	u32 r;
-	void __iomem *av_base = hdmi_av_base(ip_data);
-
-	/*
-	 * Parameters for generation of Audio Clock Recovery packets
-	 */
-	REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
-	REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
-	REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
-
-	if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
-		REG_FLD_MOD(av_base, HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
-		REG_FLD_MOD(av_base,
-				HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
-		REG_FLD_MOD(av_base,
-				HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
-	} else {
-		REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
-				cfg->aud_par_busclk, 7, 0);
-		REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
-				(cfg->aud_par_busclk >> 8), 7, 0);
-		REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
-				(cfg->aud_par_busclk >> 16), 7, 0);
-	}
-
-	/* Set ACR clock divisor */
-	REG_FLD_MOD(av_base,
-			HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
-
-	r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
-	/*
-	 * Use TMDS clock for ACR packets. For devices that use
-	 * the MCLK, this is the first part of the MCLK initialization.
-	 */
-	r = FLD_MOD(r, 0, 2, 2);
-
-	r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
-	r = FLD_MOD(r, cfg->cts_mode, 0, 0);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_ACR_CTRL, r);
-
-	/* For devices using MCLK, this completes its initialization. */
-	if (cfg->use_mclk)
-		REG_FLD_MOD(av_base, HDMI_CORE_AV_ACR_CTRL, 1, 2, 2);
-
-	/* Override of SPDIF sample frequency with value in I2S_CHST4 */
-	REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
-						cfg->fs_override, 1, 1);
-
-	/*
-	 * Set IEC-60958-3 channel status word. It is passed to the IP
-	 * just as it is received. The user of the driver is responsible
-	 * for its contents.
-	 */
-	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
-		       cfg->iec60958_cfg->status[0]);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
-		       cfg->iec60958_cfg->status[1]);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
-		       cfg->iec60958_cfg->status[2]);
-	/* yes, this is correct: status[3] goes to CHST4 register */
-	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
-		       cfg->iec60958_cfg->status[3]);
-	/* yes, this is correct: status[4] goes to CHST5 register */
-	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
-		       cfg->iec60958_cfg->status[4]);
-
-	/* set I2S parameters */
-	r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
-	r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
-	r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
-	r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
-	r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
-	r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
-
-	REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
-			cfg->i2s_cfg.in_length_bits, 3, 0);
-
-	/* Audio channels and mode parameters */
-	REG_FLD_MOD(av_base, HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1);
-	r = hdmi_read_reg(av_base, HDMI_CORE_AV_AUD_MODE);
-	r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
-	r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
-	r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
-	r = FLD_MOD(r, cfg->en_spdif, 1, 1);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
-
-	/* Audio channel mappings */
-	/* TODO: Make channel mapping dynamic. For now, map channels
-	 * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
-	 * HDMI speaker order is different. See CEA-861 Section 6.6.2.
-	 */
-	hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
-	REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
-}
-
-static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
-		struct snd_cea_861_aud_if *info_aud)
-{
-	u8 sum = 0, checksum = 0;
-	void __iomem *av_base = hdmi_av_base(ip_data);
-
-	/*
-	 * Set audio info frame type, version and length as
-	 * described in HDMI 1.4a Section 8.2.2 specification.
-	 * Checksum calculation is defined in Section 5.3.5.
-	 */
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_TYPE, 0x84);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_VERS, 0x01);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
-	sum += 0x84 + 0x001 + 0x00a;
-
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
-		       info_aud->db1_ct_cc);
-	sum += info_aud->db1_ct_cc;
-
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
-		       info_aud->db2_sf_ss);
-	sum += info_aud->db2_sf_ss;
-
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
-	sum += info_aud->db3;
-
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
-	sum += info_aud->db4_ca;
-
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
-		       info_aud->db5_dminh_lsv);
-	sum += info_aud->db5_dminh_lsv;
-
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
-	hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
-
-	checksum = 0x100 - sum;
-	hdmi_write_reg(av_base,
-					HDMI_CORE_AV_AUDIO_CHSUM, checksum);
-
-	/*
-	 * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
-	 * is available.
-	 */
-}
-
-int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
-		struct omap_dss_audio *audio)
-{
-	struct hdmi_audio_format audio_format;
-	struct hdmi_audio_dma audio_dma;
-	struct hdmi_core_audio_config core;
-	int err, n, cts, channel_count;
-	unsigned int fs_nr;
-	bool word_length_16b = false;
-
-	if (!audio || !audio->iec || !audio->cea || !ip_data)
-		return -EINVAL;
-
-	core.iec60958_cfg = audio->iec;
-	/*
-	 * In the IEC-60958 status word, check if the audio sample word length
-	 * is 16-bit as several optimizations can be performed in such case.
-	 */
-	if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
-		if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
-			word_length_16b = true;
-
-	/* I2S configuration. See Phillips' specification */
-	if (word_length_16b)
-		core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
-	else
-		core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
-	/*
-	 * The I2S input word length is twice the lenght given in the IEC-60958
-	 * status word. If the word size is greater than
-	 * 20 bits, increment by one.
-	 */
-	core.i2s_cfg.in_length_bits = audio->iec->status[4]
-		& IEC958_AES4_CON_WORDLEN;
-	if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
-		core.i2s_cfg.in_length_bits++;
-	core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
-	core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
-	core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
-	core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
-
-	/* convert sample frequency to a number */
-	switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
-	case IEC958_AES3_CON_FS_32000:
-		fs_nr = 32000;
-		break;
-	case IEC958_AES3_CON_FS_44100:
-		fs_nr = 44100;
-		break;
-	case IEC958_AES3_CON_FS_48000:
-		fs_nr = 48000;
-		break;
-	case IEC958_AES3_CON_FS_88200:
-		fs_nr = 88200;
-		break;
-	case IEC958_AES3_CON_FS_96000:
-		fs_nr = 96000;
-		break;
-	case IEC958_AES3_CON_FS_176400:
-		fs_nr = 176400;
-		break;
-	case IEC958_AES3_CON_FS_192000:
-		fs_nr = 192000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	err = hdmi_compute_acr(fs_nr, &n, &cts);
-
-	/* Audio clock regeneration settings */
-	core.n = n;
-	core.cts = cts;
-	if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
-		core.aud_par_busclk = 0;
-		core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
-		core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
-	} else {
-		core.aud_par_busclk = (((128 * 31) - 1) << 8);
-		core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
-		core.use_mclk = true;
-	}
-
-	if (core.use_mclk)
-		core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
-
-	/* Audio channels settings */
-	channel_count = (audio->cea->db1_ct_cc &
-			 CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
-
-	switch (channel_count) {
-	case 2:
-		audio_format.active_chnnls_msk = 0x03;
-		break;
-	case 3:
-		audio_format.active_chnnls_msk = 0x07;
-		break;
-	case 4:
-		audio_format.active_chnnls_msk = 0x0f;
-		break;
-	case 5:
-		audio_format.active_chnnls_msk = 0x1f;
-		break;
-	case 6:
-		audio_format.active_chnnls_msk = 0x3f;
-		break;
-	case 7:
-		audio_format.active_chnnls_msk = 0x7f;
-		break;
-	case 8:
-		audio_format.active_chnnls_msk = 0xff;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/*
-	 * the HDMI IP needs to enable four stereo channels when transmitting
-	 * more than 2 audio channels
-	 */
-	if (channel_count == 2) {
-		audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
-		core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
-		core.layout = HDMI_AUDIO_LAYOUT_2CH;
-	} else {
-		audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
-		core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
-				HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
-				HDMI_AUDIO_I2S_SD3_EN;
-		core.layout = HDMI_AUDIO_LAYOUT_8CH;
-	}
-
-	core.en_spdif = false;
-	/* use sample frequency from channel status word */
-	core.fs_override = true;
-	/* enable ACR packets */
-	core.en_acr_pkt = true;
-	/* disable direct streaming digital audio */
-	core.en_dsd_audio = false;
-	/* use parallel audio interface */
-	core.en_parallel_aud_input = true;
-
-	/* DMA settings */
-	if (word_length_16b)
-		audio_dma.transfer_size = 0x10;
-	else
-		audio_dma.transfer_size = 0x20;
-	audio_dma.block_size = 0xC0;
-	audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
-	audio_dma.fifo_threshold = 0x20; /* in number of samples */
-
-	/* audio FIFO format settings */
-	if (word_length_16b) {
-		audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
-		audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
-		audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
-	} else {
-		audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
-		audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
-		audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
-	}
-	audio_format.type = HDMI_AUDIO_TYPE_LPCM;
-	audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
-	/* disable start/stop signals of IEC 60958 blocks */
-	audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
-
-	/* configure DMA and audio FIFO format*/
-	ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
-	ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
-
-	/* configure the core*/
-	ti_hdmi_4xxx_core_audio_config(ip_data, &core);
-
-	/* configure CEA 861 audio infoframe*/
-	ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
-
-	return 0;
-}
-
-int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
-{
-	REG_FLD_MOD(hdmi_wp_base(ip_data),
-		    HDMI_WP_AUDIO_CTRL, true, 31, 31);
-	return 0;
-}
-
-void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
-{
-	REG_FLD_MOD(hdmi_wp_base(ip_data),
-		    HDMI_WP_AUDIO_CTRL, false, 31, 31);
-}
-
-int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
-{
-	REG_FLD_MOD(hdmi_av_base(ip_data),
-		    HDMI_CORE_AV_AUD_MODE, true, 0, 0);
-	REG_FLD_MOD(hdmi_wp_base(ip_data),
-		    HDMI_WP_AUDIO_CTRL, true, 30, 30);
-	return 0;
-}
-
-void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
-{
-	REG_FLD_MOD(hdmi_av_base(ip_data),
-		    HDMI_CORE_AV_AUD_MODE, false, 0, 0);
-	REG_FLD_MOD(hdmi_wp_base(ip_data),
-		    HDMI_WP_AUDIO_CTRL, false, 30, 30);
-}
-
-int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size)
-{
-	if (!offset || !size)
-		return -EINVAL;
-	*offset = HDMI_WP_AUDIO_DATA;
-	*size = 4;
-	return 0;
-}
-#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
deleted file mode 100644
index 6ef2f92..0000000
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * ti_hdmi_4xxx_ip.h
- *
- * HDMI header definition for DM81xx, DM38xx, TI OMAP4 etc processors.
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _HDMI_TI_4xxx_H_
-#define _HDMI_TI_4xxx_H_
-
-#include <linux/string.h>
-#include <video/omapdss.h>
-#include "ti_hdmi.h"
-
-/* HDMI Wrapper */
-
-#define HDMI_WP_REVISION			0x0
-#define HDMI_WP_SYSCONFIG			0x10
-#define HDMI_WP_IRQSTATUS_RAW			0x24
-#define HDMI_WP_IRQSTATUS			0x28
-#define HDMI_WP_PWR_CTRL			0x40
-#define HDMI_WP_IRQENABLE_SET			0x2C
-#define HDMI_WP_IRQENABLE_CLR			0x30
-#define HDMI_WP_VIDEO_CFG			0x50
-#define HDMI_WP_VIDEO_SIZE			0x60
-#define HDMI_WP_VIDEO_TIMING_H			0x68
-#define HDMI_WP_VIDEO_TIMING_V			0x6C
-#define HDMI_WP_WP_CLK				0x70
-#define HDMI_WP_AUDIO_CFG			0x80
-#define HDMI_WP_AUDIO_CFG2			0x84
-#define HDMI_WP_AUDIO_CTRL			0x88
-#define HDMI_WP_AUDIO_DATA			0x8C
-
-/* HDMI IP Core System */
-
-#define HDMI_CORE_SYS_VND_IDL			0x0
-#define HDMI_CORE_SYS_DEV_IDL			0x8
-#define HDMI_CORE_SYS_DEV_IDH			0xC
-#define HDMI_CORE_SYS_DEV_REV			0x10
-#define HDMI_CORE_SYS_SRST			0x14
-#define HDMI_CORE_CTRL1				0x20
-#define HDMI_CORE_SYS_SYS_STAT			0x24
-#define HDMI_CORE_SYS_DE_DLY			0xC8
-#define HDMI_CORE_SYS_DE_CTRL			0xCC
-#define HDMI_CORE_SYS_DE_TOP			0xD0
-#define HDMI_CORE_SYS_DE_CNTL			0xD8
-#define HDMI_CORE_SYS_DE_CNTH			0xDC
-#define HDMI_CORE_SYS_DE_LINL			0xE0
-#define HDMI_CORE_SYS_DE_LINH_1			0xE4
-#define HDMI_CORE_SYS_VID_ACEN			0x124
-#define HDMI_CORE_SYS_VID_MODE			0x128
-#define HDMI_CORE_SYS_INTR_STATE		0x1C0
-#define HDMI_CORE_SYS_INTR1			0x1C4
-#define HDMI_CORE_SYS_INTR2			0x1C8
-#define HDMI_CORE_SYS_INTR3			0x1CC
-#define HDMI_CORE_SYS_INTR4			0x1D0
-#define HDMI_CORE_SYS_UMASK1			0x1D4
-#define HDMI_CORE_SYS_TMDS_CTRL			0x208
-
-#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC	0x1
-#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC	0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS	0x1
-#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE	0x1
-
-/* HDMI DDC E-DID */
-#define HDMI_CORE_DDC_ADDR			0x3B4
-#define HDMI_CORE_DDC_SEGM			0x3B8
-#define HDMI_CORE_DDC_OFFSET			0x3BC
-#define HDMI_CORE_DDC_COUNT1			0x3C0
-#define HDMI_CORE_DDC_COUNT2			0x3C4
-#define HDMI_CORE_DDC_STATUS			0x3C8
-#define HDMI_CORE_DDC_CMD			0x3CC
-#define HDMI_CORE_DDC_DATA			0x3D0
-
-/* HDMI IP Core Audio Video */
-
-#define HDMI_CORE_AV_ACR_CTRL			0x4
-#define HDMI_CORE_AV_FREQ_SVAL			0x8
-#define HDMI_CORE_AV_N_SVAL1			0xC
-#define HDMI_CORE_AV_N_SVAL2			0x10
-#define HDMI_CORE_AV_N_SVAL3			0x14
-#define HDMI_CORE_AV_CTS_SVAL1			0x18
-#define HDMI_CORE_AV_CTS_SVAL2			0x1C
-#define HDMI_CORE_AV_CTS_SVAL3			0x20
-#define HDMI_CORE_AV_CTS_HVAL1			0x24
-#define HDMI_CORE_AV_CTS_HVAL2			0x28
-#define HDMI_CORE_AV_CTS_HVAL3			0x2C
-#define HDMI_CORE_AV_AUD_MODE			0x50
-#define HDMI_CORE_AV_SPDIF_CTRL			0x54
-#define HDMI_CORE_AV_HW_SPDIF_FS		0x60
-#define HDMI_CORE_AV_SWAP_I2S			0x64
-#define HDMI_CORE_AV_SPDIF_ERTH			0x6C
-#define HDMI_CORE_AV_I2S_IN_MAP			0x70
-#define HDMI_CORE_AV_I2S_IN_CTRL		0x74
-#define HDMI_CORE_AV_I2S_CHST0			0x78
-#define HDMI_CORE_AV_I2S_CHST1			0x7C
-#define HDMI_CORE_AV_I2S_CHST2			0x80
-#define HDMI_CORE_AV_I2S_CHST4			0x84
-#define HDMI_CORE_AV_I2S_CHST5			0x88
-#define HDMI_CORE_AV_ASRC			0x8C
-#define HDMI_CORE_AV_I2S_IN_LEN			0x90
-#define HDMI_CORE_AV_HDMI_CTRL			0xBC
-#define HDMI_CORE_AV_AUDO_TXSTAT		0xC0
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1		0xCC
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2		0xD0
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3		0xD4
-#define HDMI_CORE_AV_TEST_TXCTRL		0xF0
-#define HDMI_CORE_AV_DPD			0xF4
-#define HDMI_CORE_AV_PB_CTRL1			0xF8
-#define HDMI_CORE_AV_PB_CTRL2			0xFC
-#define HDMI_CORE_AV_AVI_TYPE			0x100
-#define HDMI_CORE_AV_AVI_VERS			0x104
-#define HDMI_CORE_AV_AVI_LEN			0x108
-#define HDMI_CORE_AV_AVI_CHSUM			0x10C
-#define HDMI_CORE_AV_AVI_DBYTE(n)		(n * 4 + 0x110)
-#define HDMI_CORE_AV_SPD_TYPE			0x180
-#define HDMI_CORE_AV_SPD_VERS			0x184
-#define HDMI_CORE_AV_SPD_LEN			0x188
-#define HDMI_CORE_AV_SPD_CHSUM			0x18C
-#define HDMI_CORE_AV_SPD_DBYTE(n)		(n * 4 + 0x190)
-#define HDMI_CORE_AV_AUDIO_TYPE			0x200
-#define HDMI_CORE_AV_AUDIO_VERS			0x204
-#define HDMI_CORE_AV_AUDIO_LEN			0x208
-#define HDMI_CORE_AV_AUDIO_CHSUM		0x20C
-#define HDMI_CORE_AV_AUD_DBYTE(n)		(n * 4 + 0x210)
-#define HDMI_CORE_AV_MPEG_TYPE			0x280
-#define HDMI_CORE_AV_MPEG_VERS			0x284
-#define HDMI_CORE_AV_MPEG_LEN			0x288
-#define HDMI_CORE_AV_MPEG_CHSUM			0x28C
-#define HDMI_CORE_AV_MPEG_DBYTE(n)		(n * 4 + 0x290)
-#define HDMI_CORE_AV_GEN_DBYTE(n)		(n * 4 + 0x300)
-#define HDMI_CORE_AV_CP_BYTE1			0x37C
-#define HDMI_CORE_AV_GEN2_DBYTE(n)		(n * 4 + 0x380)
-#define HDMI_CORE_AV_CEC_ADDR_ID		0x3FC
-
-#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE		0x4
-#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE		0x4
-#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE		0x4
-#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE		0x4
-
-#define HDMI_CORE_AV_AVI_DBYTE_NELEMS		15
-#define HDMI_CORE_AV_SPD_DBYTE_NELEMS		27
-#define HDMI_CORE_AV_AUD_DBYTE_NELEMS		10
-#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS		27
-#define HDMI_CORE_AV_GEN_DBYTE_NELEMS		31
-#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS		31
-
-/* PLL */
-
-#define PLLCTRL_PLL_CONTROL			0x0
-#define PLLCTRL_PLL_STATUS			0x4
-#define PLLCTRL_PLL_GO				0x8
-#define PLLCTRL_CFG1				0xC
-#define PLLCTRL_CFG2				0x10
-#define PLLCTRL_CFG3				0x14
-#define PLLCTRL_CFG4				0x20
-
-/* HDMI PHY */
-
-#define HDMI_TXPHY_TX_CTRL			0x0
-#define HDMI_TXPHY_DIGITAL_CTRL			0x4
-#define HDMI_TXPHY_POWER_CTRL			0x8
-#define HDMI_TXPHY_PAD_CFG_CTRL			0xC
-
-#define REG_FLD_MOD(base, idx, val, start, end) \
-	hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\
-							val, start, end))
-#define REG_GET(base, idx, start, end) \
-	FLD_GET(hdmi_read_reg(base, idx), start, end)
-
-enum hdmi_phy_pwr {
-	HDMI_PHYPWRCMD_OFF = 0,
-	HDMI_PHYPWRCMD_LDOON = 1,
-	HDMI_PHYPWRCMD_TXON = 2
-};
-
-enum hdmi_core_inputbus_width {
-	HDMI_INPUT_8BIT = 0,
-	HDMI_INPUT_10BIT = 1,
-	HDMI_INPUT_12BIT = 2
-};
-
-enum hdmi_core_dither_trunc {
-	HDMI_OUTPUTTRUNCATION_8BIT = 0,
-	HDMI_OUTPUTTRUNCATION_10BIT = 1,
-	HDMI_OUTPUTTRUNCATION_12BIT = 2,
-	HDMI_OUTPUTDITHER_8BIT = 3,
-	HDMI_OUTPUTDITHER_10BIT = 4,
-	HDMI_OUTPUTDITHER_12BIT = 5
-};
-
-enum hdmi_core_deepcolor_ed {
-	HDMI_DEEPCOLORPACKECTDISABLE = 0,
-	HDMI_DEEPCOLORPACKECTENABLE = 1
-};
-
-enum hdmi_core_packet_mode {
-	HDMI_PACKETMODERESERVEDVALUE = 0,
-	HDMI_PACKETMODE24BITPERPIXEL = 4,
-	HDMI_PACKETMODE30BITPERPIXEL = 5,
-	HDMI_PACKETMODE36BITPERPIXEL = 6,
-	HDMI_PACKETMODE48BITPERPIXEL = 7
-};
-
-enum hdmi_core_tclkselclkmult {
-	HDMI_FPLL05IDCK = 0,
-	HDMI_FPLL10IDCK = 1,
-	HDMI_FPLL20IDCK = 2,
-	HDMI_FPLL40IDCK = 3
-};
-
-enum hdmi_core_packet_ctrl {
-	HDMI_PACKETENABLE = 1,
-	HDMI_PACKETDISABLE = 0,
-	HDMI_PACKETREPEATON = 1,
-	HDMI_PACKETREPEATOFF = 0
-};
-
-/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
-enum hdmi_core_infoframe {
-	HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
-	HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
-	HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
-	HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
-	HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON =  1,
-	HDMI_INFOFRAME_AVI_DB1B_NO = 0,
-	HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
-	HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
-	HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
-	HDMI_INFOFRAME_AVI_DB1S_0 = 0,
-	HDMI_INFOFRAME_AVI_DB1S_1 = 1,
-	HDMI_INFOFRAME_AVI_DB1S_2 = 2,
-	HDMI_INFOFRAME_AVI_DB2C_NO = 0,
-	HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
-	HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
-	HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
-	HDMI_INFOFRAME_AVI_DB2M_NO = 0,
-	HDMI_INFOFRAME_AVI_DB2M_43 = 1,
-	HDMI_INFOFRAME_AVI_DB2M_169 = 2,
-	HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
-	HDMI_INFOFRAME_AVI_DB2R_43 = 9,
-	HDMI_INFOFRAME_AVI_DB2R_169 = 10,
-	HDMI_INFOFRAME_AVI_DB2R_149 = 11,
-	HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
-	HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
-	HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
-	HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
-	HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
-	HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
-	HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
-	HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
-	HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
-	HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
-	HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
-	HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
-	HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
-	HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
-	HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
-	HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
-	HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
-	HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
-	HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
-	HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
-	HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
-};
-
-enum hdmi_packing_mode {
-	HDMI_PACK_10b_RGB_YUV444 = 0,
-	HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
-	HDMI_PACK_20b_YUV422 = 2,
-	HDMI_PACK_ALREADYPACKED = 7
-};
-
-enum hdmi_core_audio_layout {
-	HDMI_AUDIO_LAYOUT_2CH = 0,
-	HDMI_AUDIO_LAYOUT_8CH = 1
-};
-
-enum hdmi_core_cts_mode {
-	HDMI_AUDIO_CTS_MODE_HW = 0,
-	HDMI_AUDIO_CTS_MODE_SW = 1
-};
-
-enum hdmi_stereo_channels {
-	HDMI_AUDIO_STEREO_NOCHANNELS = 0,
-	HDMI_AUDIO_STEREO_ONECHANNEL = 1,
-	HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
-	HDMI_AUDIO_STEREO_THREECHANNELS = 3,
-	HDMI_AUDIO_STEREO_FOURCHANNELS = 4
-};
-
-enum hdmi_audio_type {
-	HDMI_AUDIO_TYPE_LPCM = 0,
-	HDMI_AUDIO_TYPE_IEC = 1
-};
-
-enum hdmi_audio_justify {
-	HDMI_AUDIO_JUSTIFY_LEFT = 0,
-	HDMI_AUDIO_JUSTIFY_RIGHT = 1
-};
-
-enum hdmi_audio_sample_order {
-	HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
-	HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
-};
-
-enum hdmi_audio_samples_perword {
-	HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
-	HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
-};
-
-enum hdmi_audio_sample_size {
-	HDMI_AUDIO_SAMPLE_16BITS = 0,
-	HDMI_AUDIO_SAMPLE_24BITS = 1
-};
-
-enum hdmi_audio_transf_mode {
-	HDMI_AUDIO_TRANSF_DMA = 0,
-	HDMI_AUDIO_TRANSF_IRQ = 1
-};
-
-enum hdmi_audio_blk_strt_end_sig {
-	HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
-	HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
-};
-
-enum hdmi_audio_i2s_config {
-	HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
-	HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
-	HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
-	HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
-	HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
-	HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
-	HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
-	HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
-	HDMI_AUDIO_I2S_SD0_EN = 1,
-	HDMI_AUDIO_I2S_SD1_EN = 1 << 1,
-	HDMI_AUDIO_I2S_SD2_EN = 1 << 2,
-	HDMI_AUDIO_I2S_SD3_EN = 1 << 3,
-};
-
-enum hdmi_audio_mclk_mode {
-	HDMI_AUDIO_MCLK_128FS = 0,
-	HDMI_AUDIO_MCLK_256FS = 1,
-	HDMI_AUDIO_MCLK_384FS = 2,
-	HDMI_AUDIO_MCLK_512FS = 3,
-	HDMI_AUDIO_MCLK_768FS = 4,
-	HDMI_AUDIO_MCLK_1024FS = 5,
-	HDMI_AUDIO_MCLK_1152FS = 6,
-	HDMI_AUDIO_MCLK_192FS = 7
-};
-
-struct hdmi_core_video_config {
-	enum hdmi_core_inputbus_width	ip_bus_width;
-	enum hdmi_core_dither_trunc	op_dither_truc;
-	enum hdmi_core_deepcolor_ed	deep_color_pkt;
-	enum hdmi_core_packet_mode	pkt_mode;
-	enum hdmi_core_hdmi_dvi		hdmi_dvi;
-	enum hdmi_core_tclkselclkmult	tclk_sel_clkmult;
-};
-
-struct hdmi_core_packet_enable_repeat {
-	u32	audio_pkt;
-	u32	audio_pkt_repeat;
-	u32	avi_infoframe;
-	u32	avi_infoframe_repeat;
-	u32	gen_cntrl_pkt;
-	u32	gen_cntrl_pkt_repeat;
-	u32	generic_pkt;
-	u32	generic_pkt_repeat;
-};
-
-struct hdmi_video_format {
-	enum hdmi_packing_mode	packing_mode;
-	u32			y_res;	/* Line per panel */
-	u32			x_res;	/* pixel per line */
-};
-
-struct hdmi_audio_format {
-	enum hdmi_stereo_channels		stereo_channels;
-	u8					active_chnnls_msk;
-	enum hdmi_audio_type			type;
-	enum hdmi_audio_justify			justification;
-	enum hdmi_audio_sample_order		sample_order;
-	enum hdmi_audio_samples_perword		samples_per_word;
-	enum hdmi_audio_sample_size		sample_size;
-	enum hdmi_audio_blk_strt_end_sig	en_sig_blk_strt_end;
-};
-
-struct hdmi_audio_dma {
-	u8				transfer_size;
-	u8				block_size;
-	enum hdmi_audio_transf_mode	mode;
-	u16				fifo_threshold;
-};
-
-struct hdmi_core_audio_i2s_config {
-	u8 in_length_bits;
-	u8 justification;
-	u8 sck_edge_mode;
-	u8 vbit;
-	u8 direction;
-	u8 shift;
-	u8 active_sds;
-};
-
-struct hdmi_core_audio_config {
-	struct hdmi_core_audio_i2s_config	i2s_cfg;
-	struct snd_aes_iec958			*iec60958_cfg;
-	bool					fs_override;
-	u32					n;
-	u32					cts;
-	u32					aud_par_busclk;
-	enum hdmi_core_audio_layout		layout;
-	enum hdmi_core_cts_mode			cts_mode;
-	bool					use_mclk;
-	enum hdmi_audio_mclk_mode		mclk_mode;
-	bool					en_acr_pkt;
-	bool					en_dsd_audio;
-	bool					en_parallel_aud_input;
-	bool					en_spdif;
-};
-
-#endif
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 4b23af6..367cea8 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -339,8 +339,6 @@
 
 	framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 3d86bac..4c92995 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -403,7 +403,7 @@
 	if (rc < 0)
 		return rc;
 
-	printk(KERN_INFO "fb%d: Apple Platinum frame buffer device\n", info->node);
+	fb_info(info, "Apple Platinum frame buffer device\n");
 
 	return 0;
 }
@@ -639,7 +639,6 @@
 		iounmap(pinfo->frame_buffer);
 		iounmap(pinfo->platinum_regs);
 		iounmap(pinfo->cmap_regs);
-		dev_set_drvdata(&odev->dev, NULL);
 		framebuffer_release(info);
 	}
 
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 81354ee..3b85b64 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1694,8 +1694,8 @@
 	if (retval < 0)
 		goto err_exit_all;
 
-	printk(KERN_INFO "fb%d: %s frame buffer device, memory = %dK.\n",
-	       info->node, info->fix.id, pm2fb_fix.smem_len / 1024);
+	fb_info(info, "%s frame buffer device, memory = %dK\n",
+		info->fix.id, pm2fb_fix.smem_len / 1024);
 
 	/*
 	 * Our driver data
@@ -1744,7 +1744,6 @@
 	iounmap(par->v_regs);
 	release_mem_region(fix->mmio_start, fix->mmio_len);
 
-	pci_set_drvdata(pdev, NULL);
 	fb_dealloc_cmap(&info->cmap);
 	kfree(info->pixmap.addr);
 	framebuffer_release(info);
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 7718faa..4bf3273 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -1445,8 +1445,7 @@
 		retval = -EINVAL;
 		goto err_exit_all;
 	}
-	printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-	   info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	pci_set_drvdata(dev, info);
 	return 0;
 
@@ -1489,7 +1488,6 @@
 		iounmap(par->v_regs);
 		release_mem_region(fix->mmio_start, fix->mmio_len);
 
-		pci_set_drvdata(dev, NULL);
 		kfree(info->pixmap.addr);
 		framebuffer_release(info);
 	}
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index d1e46ce..914a52b 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -212,8 +212,8 @@
 
 	get_device(dev);
 
-	pr_info("fb%d: %s frame buffer device at %s\n",
-		info->node, info->fix.id, dev_name(dev));
+	fb_info(info, "%s frame buffer device at %s\n",
+		info->fix.id, dev_name(dev));
 
 	return 0;
 
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index 0e13174..0822b6f 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -328,11 +328,10 @@
 	snprintf(freq1, sizeof(freq1), "%u.%03uMHz",
 		 par->osc1 / 1000, par->osc1 % 1000);
 
-	pr_info("fb%d: %s frame buffer device at %s\n",
-		info->node, info->fix.id, dev_name(dev));
-	pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n",
-		info->node, freq0, par->osc1 ? freq1 : "disabled",
-		par->osc1 != 0);
+	fb_info(info, "%s frame buffer device at %s\n",
+		info->fix.id, dev_name(dev));
+	fb_info(info, "Osc0: %s, Osc1: %s, Osc%u selected\n",
+		freq0, par->osc1 ? freq1 : "disabled", par->osc1 != 0);
 
 	return 0;
 
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index df07860..167cfff 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -817,24 +817,25 @@
 
 	rev = fb_readl(par->mmio_base + 0x04);
 
-	printk("fb%d: %s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
-	       fb_info->node, fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
-	       modememused >> 10, (unsigned long)(fb_info->fix.smem_len >> 10));
-	printk("fb%d: Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
-	       fb_info->node, fb_info->var.xres, fb_info->var.yres,
-	       fb_info->var.bits_per_pixel,
-	       get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
-	       (char *)pvr2_get_param(cables, NULL, cable_type, 3),
-	       (char *)pvr2_get_param(outputs, NULL, video_output, 3));
+	fb_info(fb_info, "%s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
+		fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
+		modememused >> 10,
+		(unsigned long)(fb_info->fix.smem_len >> 10));
+	fb_info(fb_info, "Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
+		fb_info->var.xres, fb_info->var.yres,
+		fb_info->var.bits_per_pixel,
+		get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
+		(char *)pvr2_get_param(cables, NULL, cable_type, 3),
+		(char *)pvr2_get_param(outputs, NULL, video_output, 3));
 
 #ifdef CONFIG_SH_STORE_QUEUES
-	printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node);
+	fb_notice(fb_info, "registering with SQ API\n");
 
 	pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
 			      fb_info->fix.id, PAGE_SHARED);
 
-	printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n",
-	       fb_info->node, pvr2fb_map);
+	fb_notice(fb_info, "Mapped video memory to SQ addr 0x%lx\n",
+		  pvr2fb_map);
 #endif
 
 	return 0;
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index aa9bd1f..c95b9e4 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -364,7 +364,7 @@
 static void set_dumb_panel_control(struct fb_info *info)
 {
 	struct pxa168fb_info *fbi = info->par;
-	struct pxa168fb_mach_info *mi = fbi->dev->platform_data;
+	struct pxa168fb_mach_info *mi = dev_get_platdata(fbi->dev);
 	u32 x;
 
 	/*
@@ -407,7 +407,7 @@
 	u32 x;
 	struct pxa168fb_mach_info *mi;
 
-	mi = fbi->dev->platform_data;
+	mi = dev_get_platdata(fbi->dev);
 
 	/*
 	 * Set additional mode info.
@@ -609,7 +609,7 @@
 	struct clk *clk;
 	int irq, ret;
 
-	mi = pdev->dev.platform_data;
+	mi = dev_get_platdata(&pdev->dev);
 	if (mi == NULL) {
 		dev_err(&pdev->dev, "no platform data defined\n");
 		return -EINVAL;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index eca2de4..1ecd9ce 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -457,7 +457,7 @@
 static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
 	struct pxafb_info *fbi = (struct pxafb_info *)info;
-	struct pxafb_mach_info *inf = fbi->dev->platform_data;
+	struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
 	int err;
 
 	if (inf->fixed_modes) {
@@ -1230,7 +1230,7 @@
 static void setup_smart_timing(struct pxafb_info *fbi,
 				struct fb_var_screeninfo *var)
 {
-	struct pxafb_mach_info *inf = fbi->dev->platform_data;
+	struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
 	struct pxafb_mode_info *mode = &inf->modes[0];
 	unsigned long lclk = clk_get_rate(fbi->clk);
 	unsigned t1, t2, t3, t4;
@@ -1258,14 +1258,14 @@
 static int pxafb_smart_thread(void *arg)
 {
 	struct pxafb_info *fbi = arg;
-	struct pxafb_mach_info *inf = fbi->dev->platform_data;
+	struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
 
 	if (!inf->smart_update) {
 		pr_err("%s: not properly initialized, thread terminated\n",
 				__func__);
 		return -EINVAL;
 	}
-	inf = fbi->dev->platform_data;
+	inf = dev_get_platdata(fbi->dev);
 
 	pr_debug("%s(): task starting\n", __func__);
 
@@ -1793,7 +1793,7 @@
 {
 	struct pxafb_info *fbi;
 	void *addr;
-	struct pxafb_mach_info *inf = dev->platform_data;
+	struct pxafb_mach_info *inf = dev_get_platdata(dev);
 
 	/* Alloc the pxafb_info and pseudo_palette in one step */
 	fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL);
@@ -1855,7 +1855,7 @@
 #ifdef CONFIG_FB_PXA_PARAMETERS
 static int parse_opt_mode(struct device *dev, const char *this_opt)
 {
-	struct pxafb_mach_info *inf = dev->platform_data;
+	struct pxafb_mach_info *inf = dev_get_platdata(dev);
 
 	const char *name = this_opt+5;
 	unsigned int namelen = strlen(name);
@@ -1914,7 +1914,7 @@
 
 static int parse_opt(struct device *dev, char *this_opt)
 {
-	struct pxafb_mach_info *inf = dev->platform_data;
+	struct pxafb_mach_info *inf = dev_get_platdata(dev);
 	struct pxafb_mode_info *mode = &inf->modes[0];
 	char s[64];
 
@@ -2102,7 +2102,7 @@
 
 	dev_dbg(&dev->dev, "pxafb_probe\n");
 
-	inf = dev->dev.platform_data;
+	inf = dev_get_platdata(&dev->dev);
 	ret = -ENOMEM;
 	fbi = NULL;
 	if (!inf)
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index d44c735..7487f76 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -119,8 +119,7 @@
 		return -EINVAL;
 	}
 
-        printk(KERN_INFO "fb%d: Q40 frame buffer alive and kicking !\n",
-	       info->node);
+	fb_info(info, "Q40 frame buffer alive and kicking !\n");
 	return 0;
 }
 
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 9536715..a5514ac 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -1185,11 +1185,6 @@
 	if (rivafb_do_maximize(info, var, nom, den) < 0)
 		return -EINVAL;
 
-	if (var->xoffset < 0)
-		var->xoffset = 0;
-	if (var->yoffset < 0)
-		var->yoffset = 0;
-
 	/* truncate xoffset and yoffset to maximum if too high */
 	if (var->xoffset > var->xres_virtual - var->xres)
 		var->xoffset = var->xres_virtual - var->xres - 1;
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index 05c2dc3..83433cb 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -777,8 +777,8 @@
 	printk(KERN_INFO "Epson S1D13XXX FB Driver\n");
 
 	/* enable platform-dependent hardware glue, if any */
-	if (pdev->dev.platform_data)
-		pdata = pdev->dev.platform_data;
+	if (dev_get_platdata(&pdev->dev))
+		pdata = dev_get_platdata(&pdev->dev);
 
 	if (pdata && pdata->platform_init_video)
 		pdata->platform_init_video();
@@ -901,8 +901,7 @@
 		goto bail;
 	}
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-	       info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	return 0;
 
@@ -923,8 +922,8 @@
 	lcd_enable(s1dfb, 0);
 	crt_enable(s1dfb, 0);
 
-	if (dev->dev.platform_data)
-		pdata = dev->dev.platform_data;
+	if (dev_get_platdata(&dev->dev))
+		pdata = dev_get_platdata(&dev->dev);
 
 #if 0
 	if (!s1dfb->disp_save)
@@ -973,8 +972,8 @@
 	while ((s1d13xxxfb_readreg(s1dfb, S1DREG_PS_STATUS) & 0x01))
 		udelay(10);
 
-	if (dev->dev.platform_data)
-		pdata = dev->dev.platform_data;
+	if (dev_get_platdata(&dev->dev))
+		pdata = dev_get_platdata(&dev->dev);
 
 	if (s1dfb->regs_save) {
 		/* will write RO regs, *should* get away with it :) */
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 2e7991c..62acae2 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -1378,7 +1378,7 @@
 		return -EINVAL;
 	}
 
-	pd = pdev->dev.platform_data;
+	pd = dev_get_platdata(&pdev->dev);
 	if (!pd) {
 		dev_err(dev, "no platform data specified\n");
 		return -EINVAL;
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 21a32adb..81af5a6 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -123,7 +123,7 @@
 			       struct fb_info *info)
 {
 	struct s3c2410fb_info *fbi = info->par;
-	struct s3c2410fb_mach_info *mach_info = fbi->dev->platform_data;
+	struct s3c2410fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
 	struct s3c2410fb_display *display = NULL;
 	struct s3c2410fb_display *default_display = mach_info->displays +
 						    mach_info->default_display;
@@ -686,7 +686,7 @@
 static int s3c2410fb_init_registers(struct fb_info *info)
 {
 	struct s3c2410fb_info *fbi = info->par;
-	struct s3c2410fb_mach_info *mach_info = fbi->dev->platform_data;
+	struct s3c2410fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
 	unsigned long flags;
 	void __iomem *regs = fbi->io;
 	void __iomem *tpal;
@@ -833,7 +833,7 @@
 	int size;
 	u32 lcdcon1;
 
-	mach_info = pdev->dev.platform_data;
+	mach_info = dev_get_platdata(&pdev->dev);
 	if (mach_info == NULL) {
 		dev_err(&pdev->dev,
 			"no platform data for lcd, cannot attach\n");
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d838ba8..968b299 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -306,8 +306,8 @@
 
 	if ((map->width != 8) || (map->height != 16) ||
 	    (map->depth != 1) || (map->length != 256)) {
-	    	printk(KERN_ERR "fb%d: unsupported font parameters: width %d, height %d, depth %d, length %d\n",
-			info->node, map->width, map->height, map->depth, map->length);
+		fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
+		       map->width, map->height, map->depth, map->length);
 		return;
 	}
 
@@ -476,7 +476,7 @@
 	rv = svga_compute_pll((par->chip == CHIP_365_TRIO3D) ? &s3_trio3d_pll : &s3_pll,
 			      1000000000 / pixclock, &m, &n, &r, info->node);
 	if (rv < 0) {
-		printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+		fb_err(info, "cannot set requested pixclock, keeping old value\n");
 		return;
 	}
 
@@ -569,7 +569,7 @@
 		rv = -EINVAL;
 
 	if (rv < 0) {
-		printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+		fb_err(info, "unsupported mode requested\n");
 		return rv;
 	}
 
@@ -587,22 +587,21 @@
 	/* Check whether have enough memory */
 	mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
 	if (mem > info->screen_size) {
-		printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n",
-			info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+		fb_err(info, "not enough framebuffer memory (%d kB requested , %u kB available)\n",
+		       mem >> 10, (unsigned int) (info->screen_size >> 10));
 		return -EINVAL;
 	}
 
 	rv = svga_check_timings (&s3_timing_regs, var, info->node);
 	if (rv < 0) {
-		printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+		fb_err(info, "invalid timings requested\n");
 		return rv;
 	}
 
 	rv = svga_compute_pll(&s3_pll, PICOS2KHZ(var->pixclock), &m, &n, &r,
 				info->node);
 	if (rv < 0) {
-		printk(KERN_ERR "fb%d: invalid pixclock value requested\n",
-			info->node);
+		fb_err(info, "invalid pixclock value requested\n");
 		return rv;
 	}
 
@@ -686,7 +685,7 @@
 
 
 	/* Set the offset register */
-	pr_debug("fb%d: offset register       : %d\n", info->node, offset_value);
+	fb_dbg(info, "offset register       : %d\n", offset_value);
 	svga_wcrt_multi(par->state.vgabase, s3_offset_regs, offset_value);
 
 	if (par->chip != CHIP_357_VIRGE_GX2 &&
@@ -769,7 +768,7 @@
 	/* Set mode-specific register values */
 	switch (mode) {
 	case 0:
-		pr_debug("fb%d: text mode\n", info->node);
+		fb_dbg(info, "text mode\n");
 		svga_set_textmode_vga_regs(par->state.vgabase);
 
 		/* Set additional registers like in 8-bit mode */
@@ -780,12 +779,12 @@
 		svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
 
 		if (fasttext) {
-			pr_debug("fb%d: high speed text mode set\n", info->node);
+			fb_dbg(info, "high speed text mode set\n");
 			svga_wcrt_mask(par->state.vgabase, 0x31, 0x40, 0x40);
 		}
 		break;
 	case 1:
-		pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+		fb_dbg(info, "4 bit pseudocolor\n");
 		vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
 
 		/* Set additional registers like in 8-bit mode */
@@ -796,7 +795,7 @@
 		svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
 		break;
 	case 2:
-		pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+		fb_dbg(info, "4 bit pseudocolor, planar\n");
 
 		/* Set additional registers like in 8-bit mode */
 		svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
@@ -806,7 +805,7 @@
 		svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
 		break;
 	case 3:
-		pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+		fb_dbg(info, "8 bit pseudocolor\n");
 		svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
 		if (info->var.pixclock > 20000 ||
 		    par->chip == CHIP_357_VIRGE_GX2 ||
@@ -822,7 +821,7 @@
 		}
 		break;
 	case 4:
-		pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
+		fb_dbg(info, "5/5/5 truecolor\n");
 		if (par->chip == CHIP_988_VIRGE_VX) {
 			if (info->var.pixclock > 20000)
 				svga_wcrt_mask(par->state.vgabase, 0x67, 0x20, 0xF0);
@@ -850,7 +849,7 @@
 		}
 		break;
 	case 5:
-		pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+		fb_dbg(info, "5/6/5 truecolor\n");
 		if (par->chip == CHIP_988_VIRGE_VX) {
 			if (info->var.pixclock > 20000)
 				svga_wcrt_mask(par->state.vgabase, 0x67, 0x40, 0xF0);
@@ -879,16 +878,16 @@
 		break;
 	case 6:
 		/* VIRGE VX case */
-		pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+		fb_dbg(info, "8/8/8 truecolor\n");
 		svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
 		break;
 	case 7:
-		pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
+		fb_dbg(info, "8/8/8/8 truecolor\n");
 		svga_wcrt_mask(par->state.vgabase, 0x50, 0x30, 0x30);
 		svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
 		break;
 	default:
-		printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
+		fb_err(info, "unsupported mode - bug\n");
 		return -EINVAL;
 	}
 
@@ -991,27 +990,27 @@
 
 	switch (blank_mode) {
 	case FB_BLANK_UNBLANK:
-		pr_debug("fb%d: unblank\n", info->node);
+		fb_dbg(info, "unblank\n");
 		svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
 		break;
 	case FB_BLANK_NORMAL:
-		pr_debug("fb%d: blank\n", info->node);
+		fb_dbg(info, "blank\n");
 		svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		break;
 	case FB_BLANK_HSYNC_SUSPEND:
-		pr_debug("fb%d: hsync\n", info->node);
+		fb_dbg(info, "hsync\n");
 		svga_wcrt_mask(par->state.vgabase, 0x56, 0x02, 0x06);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		break;
 	case FB_BLANK_VSYNC_SUSPEND:
-		pr_debug("fb%d: vsync\n", info->node);
+		fb_dbg(info, "vsync\n");
 		svga_wcrt_mask(par->state.vgabase, 0x56, 0x04, 0x06);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		break;
 	case FB_BLANK_POWERDOWN:
-		pr_debug("fb%d: sync down\n", info->node);
+		fb_dbg(info, "sync down\n");
 		svga_wcrt_mask(par->state.vgabase, 0x56, 0x06, 0x06);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		break;
@@ -1352,13 +1351,16 @@
 		goto err_reg_fb;
 	}
 
-	printk(KERN_INFO "fb%d: %s on %s, %d MB RAM, %d MHz MCLK\n", info->node, info->fix.id,
-		 pci_name(dev), info->fix.smem_len >> 20, (par->mclk_freq + 500) / 1000);
+	fb_info(info, "%s on %s, %d MB RAM, %d MHz MCLK\n",
+		info->fix.id, pci_name(dev),
+		info->fix.smem_len >> 20, (par->mclk_freq + 500) / 1000);
 
 	if (par->chip == CHIP_UNKNOWN)
-		printk(KERN_INFO "fb%d: unknown chip, CR2D=%x, CR2E=%x, CRT2F=%x, CRT30=%x\n",
-			info->node, vga_rcrt(par->state.vgabase, 0x2d), vga_rcrt(par->state.vgabase, 0x2e),
-			vga_rcrt(par->state.vgabase, 0x2f), vga_rcrt(par->state.vgabase, 0x30));
+		fb_info(info, "unknown chip, CR2D=%x, CR2E=%x, CRT2F=%x, CRT30=%x\n",
+			vga_rcrt(par->state.vgabase, 0x2d),
+			vga_rcrt(par->state.vgabase, 0x2e),
+			vga_rcrt(par->state.vgabase, 0x2f),
+			vga_rcrt(par->state.vgabase, 0x30));
 
 	/* Record a reference to the driver data */
 	pci_set_drvdata(dev, info);
@@ -1424,7 +1426,6 @@
 		pci_release_regions(dev);
 /*		pci_disable_device(dev); */
 
-		pci_set_drvdata(dev, NULL);
 		framebuffer_release(info);
 	}
 }
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index de76da0..580c444e 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -1116,7 +1116,7 @@
 
 static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
 {
-	struct sa1100fb_mach_info *inf = dev->platform_data;
+	struct sa1100fb_mach_info *inf = dev_get_platdata(dev);
 	struct sa1100fb_info *fbi;
 	unsigned i;
 
@@ -1201,7 +1201,7 @@
 	struct resource *res;
 	int ret, irq;
 
-	if (!pdev->dev.platform_data) {
+	if (!dev_get_platdata(&pdev->dev)) {
 		dev_err(&pdev->dev, "no platform LCD data\n");
 		return -EINVAL;
 	}
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 741b239..4dbf45f 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2362,12 +2362,6 @@
 		kfree(info->pixmap.addr);
 		pci_release_regions(dev);
 		framebuffer_release(info);
-
-		/*
-		 * Ensure that the driver data is no longer
-		 * valid.
-		 */
-		pci_set_drvdata(dev, NULL);
 	}
 }
 
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 296afae..a350209 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -186,7 +186,7 @@
 	}
 	default:
 		return -EINVAL;
-	};
+	}
 }
 EXPORT_SYMBOL(sbusfb_ioctl_helper);
 
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index a9ac3ce..bc74d04 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -803,8 +803,8 @@
 
 	platform_set_drvdata(dev, info);
 
-	printk(KERN_INFO "fb%d: SGI DBE frame buffer device, using %ldK of video memory at %#lx\n",      
-		info->node, sgivwfb_mem_size >> 10, sgivwfb_mem_phys);
+	fb_info(info, "SGI DBE frame buffer device, using %ldK of video memory at %#lx\n",
+		sgivwfb_mem_size >> 10, sgivwfb_mem_phys);
 	return 0;
 
 fail_register_framebuffer:
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index bfe4728..9a33ee0 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -498,7 +498,7 @@
 static void sh_hdmi_audio_config(struct sh_hdmi *hdmi)
 {
 	u8 data;
-	struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+	struct sh_mobile_hdmi_info *pdata = dev_get_platdata(hdmi->dev);
 
 	/*
 	 * [7:4] L/R data swap control
@@ -815,7 +815,7 @@
 		unsigned long *hdmi_rate, unsigned long *parent_rate)
 {
 	unsigned long target = PICOS2KHZ(mode->pixclock) * 1000, rate_error;
-	struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+	struct sh_mobile_hdmi_info *pdata = dev_get_platdata(hdmi->dev);
 
 	*hdmi_rate = clk_round_rate(hdmi->hdmi_clk, target);
 	if ((long)*hdmi_rate < 0)
@@ -1271,7 +1271,7 @@
 
 static int __init sh_hdmi_probe(struct platform_device *pdev)
 {
-	struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
+	struct sh_mobile_hdmi_info *pdata = dev_get_platdata(&pdev->dev);
 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	struct resource *htop1_res;
 	int irq = platform_get_irq(pdev, 0), ret;
@@ -1290,7 +1290,7 @@
 		}
 	}
 
-	hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
 	if (!hdmi) {
 		dev_err(&pdev->dev, "Cannot allocate device data\n");
 		return -ENOMEM;
@@ -1304,7 +1304,7 @@
 	if (IS_ERR(hdmi->hdmi_clk)) {
 		ret = PTR_ERR(hdmi->hdmi_clk);
 		dev_err(&pdev->dev, "Unable to get clock: %d\n", ret);
-		goto egetclk;
+		return ret;
 	}
 
 	/* select register access functions */
@@ -1326,7 +1326,7 @@
 		goto erate;
 	}
 
-	ret = clk_enable(hdmi->hdmi_clk);
+	ret = clk_prepare_enable(hdmi->hdmi_clk);
 	if (ret < 0) {
 		dev_err(hdmi->dev, "Cannot enable clock: %d\n", ret);
 		goto erate;
@@ -1404,11 +1404,9 @@
 emap:
 	release_mem_region(res->start, resource_size(res));
 ereqreg:
-	clk_disable(hdmi->hdmi_clk);
+	clk_disable_unprepare(hdmi->hdmi_clk);
 erate:
 	clk_put(hdmi->hdmi_clk);
-egetclk:
-	kfree(hdmi);
 
 	return ret;
 }
@@ -1427,13 +1425,12 @@
 	cancel_delayed_work_sync(&hdmi->edid_work);
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
-	clk_disable(hdmi->hdmi_clk);
+	clk_disable_unprepare(hdmi->hdmi_clk);
 	clk_put(hdmi->hdmi_clk);
 	if (hdmi->htop1)
 		iounmap(hdmi->htop1);
 	iounmap(hdmi->base);
 	release_mem_region(res->start, resource_size(res));
-	kfree(hdmi);
 
 	return 0;
 }
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 0264704..ab85ad6 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -344,7 +344,7 @@
 {
 	if (atomic_inc_and_test(&priv->hw_usecnt)) {
 		if (priv->dot_clk)
-			clk_enable(priv->dot_clk);
+			clk_prepare_enable(priv->dot_clk);
 		pm_runtime_get_sync(priv->dev);
 		if (priv->meram_dev && priv->meram_dev->pdev)
 			pm_runtime_get_sync(&priv->meram_dev->pdev->dev);
@@ -358,7 +358,7 @@
 			pm_runtime_put_sync(&priv->meram_dev->pdev->dev);
 		pm_runtime_put(priv->dev);
 		if (priv->dot_clk)
-			clk_disable(priv->dot_clk);
+			clk_disable_unprepare(priv->dot_clk);
 	}
 }
 
@@ -574,8 +574,9 @@
 	switch (event) {
 	case SH_MOBILE_LCDC_EVENT_DISPLAY_CONNECT:
 		/* HDMI plug in */
+		console_lock();
 		if (lock_fb_info(info)) {
-			console_lock();
+
 
 			ch->display.width = monspec->max_x * 10;
 			ch->display.height = monspec->max_y * 10;
@@ -594,19 +595,20 @@
 				fb_set_suspend(info, 0);
 			}
 
-			console_unlock();
+
 			unlock_fb_info(info);
 		}
+		console_unlock();
 		break;
 
 	case SH_MOBILE_LCDC_EVENT_DISPLAY_DISCONNECT:
 		/* HDMI disconnect */
+		console_lock();
 		if (lock_fb_info(info)) {
-			console_lock();
 			fb_set_suspend(info, 1);
-			console_unlock();
 			unlock_fb_info(info);
 		}
+		console_unlock();
 		break;
 
 	case SH_MOBILE_LCDC_EVENT_DISPLAY_MODE:
diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c
index 8d78106..210f3a0 100644
--- a/drivers/video/simplefb.c
+++ b/drivers/video/simplefb.c
@@ -66,8 +66,15 @@
 	return 0;
 }
 
+static void simplefb_destroy(struct fb_info *info)
+{
+	if (info->screen_base)
+		iounmap(info->screen_base);
+}
+
 static struct fb_ops simplefb_ops = {
 	.owner		= THIS_MODULE,
+	.fb_destroy	= simplefb_destroy,
 	.fb_setcolreg	= simplefb_setcolreg,
 	.fb_fillrect	= cfb_fillrect,
 	.fb_copyarea	= cfb_copyarea,
@@ -132,7 +139,7 @@
 static int simplefb_parse_pd(struct platform_device *pdev,
 			     struct simplefb_params *params)
 {
-	struct simplefb_platform_data *pd = pdev->dev.platform_data;
+	struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
 	int i;
 
 	params->width = pd->width;
@@ -167,7 +174,7 @@
 		return -ENODEV;
 
 	ret = -ENODEV;
-	if (pdev->dev.platform_data)
+	if (dev_get_platdata(&pdev->dev))
 		ret = simplefb_parse_pd(pdev, &params);
 	else if (pdev->dev.of_node)
 		ret = simplefb_parse_dt(pdev, &params);
@@ -212,17 +219,26 @@
 
 	info->fbops = &simplefb_ops;
 	info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE;
-	info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start,
-					 info->fix.smem_len);
+	info->screen_base = ioremap_wc(info->fix.smem_start,
+				       info->fix.smem_len);
 	if (!info->screen_base) {
 		framebuffer_release(info);
 		return -ENODEV;
 	}
 	info->pseudo_palette = (void *)(info + 1);
 
+	dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n",
+			     info->fix.smem_start, info->fix.smem_len,
+			     info->screen_base);
+	dev_info(&pdev->dev, "format=%s, mode=%dx%dx%d, linelength=%d\n",
+			     params.format->name,
+			     info->var.xres, info->var.yres,
+			     info->var.bits_per_pixel, info->fix.line_length);
+
 	ret = register_framebuffer(info);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
+		iounmap(info->screen_base);
 		framebuffer_release(info);
 		return ret;
 	}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 977e279..22ad028 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -5994,7 +5994,6 @@
 	if(!ivideo->sisvga_enabled) {
 		if(pci_enable_device(pdev)) {
 			if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
-			pci_set_drvdata(pdev, NULL);
 			framebuffer_release(sis_fb_info);
 			return -EIO;
 		}
@@ -6211,7 +6210,6 @@
 			pci_dev_put(ivideo->lpcdev);
 		if(ivideo->nbridge)
 			pci_dev_put(ivideo->nbridge);
-		pci_set_drvdata(pdev, NULL);
 		if(!ivideo->sisvga_enabled)
 			pci_disable_device(pdev);
 		framebuffer_release(sis_fb_info);
@@ -6480,8 +6478,8 @@
 									"disabled");
 
 
-		printk(KERN_INFO "fb%d: %s frame buffer device version %d.%d.%d\n",
-			sis_fb_info->node, ivideo->myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
+		fb_info(sis_fb_info, "%s frame buffer device version %d.%d.%d\n",
+			ivideo->myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
 
 		printk(KERN_INFO "sisfb: Copyright (C) 2001-2005 Thomas Winischhofer\n");
 
@@ -6523,8 +6521,6 @@
 		mtrr_del(ivideo->mtrr, ivideo->video_base, ivideo->video_size);
 #endif
 
-	pci_set_drvdata(pdev, NULL);
-
 	/* If device was disabled when starting, disable
 	 * it when quitting.
 	 */
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 2d4694c..fefde7c 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -824,8 +824,7 @@
 	fb_dealloc_cmap(&info->cmap);
 	return -EINVAL;
     }
-    printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-	   info->fix.id);
+    fb_info(info, "%s frame buffer device\n", info->fix.id);
     pci_set_drvdata(dev, info); /* or platform_set_drvdata(pdev, info) */
     return 0;
 }
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index e188ada..d513ed6 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -1147,7 +1147,7 @@
 
 	fb_destroy_modelist(&info->modelist);
 
-	dev->info = 0;
+	dev->info = NULL;
 
 	/* Assume info structure is freed after this point */
 	framebuffer_release(info);
diff --git a/drivers/video/ssd1307fb.c b/drivers/video/ssd1307fb.c
index 44967c8..f4daa59 100644
--- a/drivers/video/ssd1307fb.c
+++ b/drivers/video/ssd1307fb.c
@@ -569,7 +569,7 @@
 	.id_table = ssd1307fb_i2c_id,
 	.driver = {
 		.name = "ssd1307fb",
-		.of_match_table = of_match_ptr(ssd1307fb_of_match),
+		.of_match_table = ssd1307fb_of_match,
 		.owner = THIS_MODULE,
 	},
 };
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 9c00026..f0cb279 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -706,10 +706,10 @@
 	fbiinit0 = sst_read (FBIINIT0);
 	if (par->vgapass) {
 		sst_write(FBIINIT0, fbiinit0 & ~DIS_VGA_PASSTHROUGH);
-		printk(KERN_INFO "fb%d: Enabling VGA pass-through\n", info->node );
+		fb_info(info, "Enabling VGA pass-through\n");
 	} else {
 		sst_write(FBIINIT0, fbiinit0 | DIS_VGA_PASSTHROUGH);
-		printk(KERN_INFO "fb%d: Disabling VGA pass-through\n", info->node );
+		fb_info(info, "Disabling VGA pass-through\n");
 	}
 	pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, tmp);
 }
@@ -1437,8 +1437,8 @@
 		printk(KERN_WARNING "sstfb: can't create sysfs entry.\n");
 
 
-	printk(KERN_INFO "fb%d: %s frame buffer device at 0x%p\n",
-	       info->node, fix->id, info->screen_base);
+	fb_info(info, "%s frame buffer device at 0x%p\n",
+		fix->id, info->screen_base);
 
 	return 0;
 
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index 019a1fe..cfe8a2f 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1283,9 +1283,7 @@
 
 	sti->info = info; /* save for unregister_framebuffer() */
 
-	printk(KERN_INFO 
-	    "fb%d: %s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
-		fb->info.node, 
+	fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
 		fix->id,
 		var->xres, 
 		var->yres,
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index cc6f48b..58241b4 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -186,8 +186,6 @@
 
         framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index 33df9ec..9e01322 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -198,8 +198,8 @@
 
 	if ((map->width != 8) || (map->height != 16) ||
 	    (map->depth != 1) || (map->length != 256)) {
-	    	printk(KERN_ERR "fb%d: unsupported font parameters: width %d, height %d, depth %d, length %d\n",
-			info->node, map->width, map->height, map->depth, map->length);
+		fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
+		       map->width, map->height, map->depth, map->length);
 		return;
 	}
 
diff --git a/drivers/video/sysimgblt.c b/drivers/video/sysimgblt.c
index 186c6f6..a4d05b1 100644
--- a/drivers/video/sysimgblt.c
+++ b/drivers/video/sysimgblt.c
@@ -152,7 +152,7 @@
 			}
 			shift += bpp;
 			shift &= (32 - 1);
-			if (!l) { l = 8; s++; };
+			if (!l) { l = 8; s++; }
 		}
 
 		/* write trailing bits */
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index c000852..7fb2d69 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -232,7 +232,7 @@
 
 	case FB_BLANK_POWERDOWN: /* Poweroff */
 		break;
-	};
+	}
 
 	sbus_writel(val, &thc->thc_misc);
 
@@ -434,7 +434,7 @@
 		default:
 			j = i;
 			break;
-		};
+		}
 		par->mmap_map[i].poff = op->resource[j].start;
 	}
 
@@ -498,8 +498,6 @@
 
 	framebuffer_release(info);
 
-	dev_set_drvdata(&op->dev, NULL);
-
 	return 0;
 }
 
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 64bc28b..f761fe3 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1646,7 +1646,6 @@
 			   pci_resource_len(pdev, 1));
 	release_mem_region(pci_resource_start(pdev, 0),
 			   pci_resource_len(pdev, 0));
-	pci_set_drvdata(pdev, NULL);
 	fb_dealloc_cmap(&info->cmap);
 	framebuffer_release(info);
 }
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index c9c8e5a..f28674f 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1671,8 +1671,8 @@
 	if (tga_bus_tc)
 		pr_info("tgafb: SFB+ detected, rev=0x%02x\n",
 			par->tga_chip_rev);
-	pr_info("fb%d: %s frame buffer device at 0x%lx\n",
-		info->node, info->fix.id, (long)bar0_start);
+	fb_info(info, "%s frame buffer device at 0x%lx\n",
+		info->fix.id, (long)bar0_start);
 
 	return 0;
 
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index deb8733..7fb4e32 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -250,7 +250,7 @@
  */
 static int tmiofb_hw_stop(struct platform_device *dev)
 {
-	struct tmio_fb_data *data = dev->dev.platform_data;
+	struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
 	struct fb_info *info = platform_get_drvdata(dev);
 	struct tmiofb_par *par = info->par;
 
@@ -311,7 +311,7 @@
  */
 static void tmiofb_hw_mode(struct platform_device *dev)
 {
-	struct tmio_fb_data *data = dev->dev.platform_data;
+	struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
 	struct fb_info *info = platform_get_drvdata(dev);
 	struct fb_videomode *mode = info->mode;
 	struct tmiofb_par *par = info->par;
@@ -557,7 +557,7 @@
 static struct fb_videomode *
 tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
 {
-	struct tmio_fb_data *data = info->device->platform_data;
+	struct tmio_fb_data *data = dev_get_platdata(info->device);
 	struct fb_videomode *best = NULL;
 	int i;
 
@@ -577,7 +577,7 @@
 {
 
 	struct fb_videomode *mode;
-	struct tmio_fb_data *data = info->device->platform_data;
+	struct tmio_fb_data *data = dev_get_platdata(info->device);
 
 	mode = tmiofb_find_mode(info, var);
 	if (!mode || var->bits_per_pixel > 16)
@@ -678,7 +678,7 @@
 static int tmiofb_probe(struct platform_device *dev)
 {
 	const struct mfd_cell *cell = mfd_get_cell(dev);
-	struct tmio_fb_data *data = dev->dev.platform_data;
+	struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
 	struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
 	struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
 	struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -781,8 +781,7 @@
 	if (retval < 0)
 		goto err_register_framebuffer;
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-				info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	return 0;
 
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index ab57d38..7ed9a22 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -1553,7 +1553,6 @@
 	iounmap(info->screen_base);
 	release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
 	release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
-	pci_set_drvdata(dev, NULL);
 	kfree(info->pixmap.addr);
 	fb_dealloc_cmap(&info->cmap);
 	framebuffer_release(info);
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index d2e5bc3..025f14e 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1166,7 +1166,7 @@
 	int new_len;
 	unsigned char *old_fb = info->screen_base;
 	unsigned char *new_fb;
-	unsigned char *new_back = 0;
+	unsigned char *new_back = NULL;
 
 	pr_warn("Reallocating framebuffer. Addresses will change!\n");
 
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 7aec6f3..256fba7 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -233,8 +233,7 @@
 static void uvesafb_free(struct uvesafb_ktask *task)
 {
 	if (task) {
-		if (task->done)
-			kfree(task->done);
+		kfree(task->done);
 		kfree(task);
 	}
 }
@@ -1332,8 +1331,8 @@
 				FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
 	info->fix.line_length = mode->bytes_per_scan_line;
 
-out:	if (crtc != NULL)
-		kfree(crtc);
+out:
+	kfree(crtc);
 	uvesafb_free(task);
 
 	return err;
@@ -1771,13 +1770,11 @@
 			"using %dk, total %dk\n", info->fix.smem_start,
 			info->screen_base, info->fix.smem_len/1024,
 			par->vbe_ib.total_memory * 64);
-	printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-			info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 
 	err = sysfs_create_group(&dev->dev.kobj, &uvesafb_dev_attgrp);
 	if (err != 0)
-		printk(KERN_WARNING "fb%d: failed to register attributes\n",
-			info->node);
+		fb_warn(info, "failed to register attributes\n");
 
 	return 0;
 
@@ -1793,8 +1790,7 @@
 	fb_destroy_modedb(info->monspecs.modedb);
 	fb_dealloc_cmap(&info->cmap);
 out:
-	if (par->vbe_modes)
-		kfree(par->vbe_modes);
+	kfree(par->vbe_modes);
 
 	framebuffer_release(info);
 	return err;
@@ -1817,12 +1813,9 @@
 		fb_dealloc_cmap(&info->cmap);
 
 		if (par) {
-			if (par->vbe_modes)
-				kfree(par->vbe_modes);
-			if (par->vbe_state_orig)
-				kfree(par->vbe_state_orig);
-			if (par->vbe_state_saved)
-				kfree(par->vbe_state_saved);
+			kfree(par->vbe_modes);
+			kfree(par->vbe_state_orig);
+			kfree(par->vbe_state_saved);
 		}
 
 		framebuffer_release(info);
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index 3f5a041..e287ebc 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -392,7 +392,7 @@
 	if ((err = register_framebuffer(&p->info)) != 0)
 		goto out_cmap_free;
 
-	printk(KERN_INFO "fb%d: valkyrie frame buffer device\n", p->info.node);
+	fb_info(&p->info, "valkyrie frame buffer device\n");
 	return 0;
 
  out_cmap_free:
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index bd83233..1c7da3b 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -489,8 +489,7 @@
 		fb_dealloc_cmap(&info->cmap);
 		goto err;
 	}
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-	       info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	return 0;
 err:
 	if (info->screen_base)
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index ee5985e..70a897b 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -390,9 +390,8 @@
 			   struct fb_info *info)
 {
 	if (var->vmode & FB_VMODE_YWRAP) {
-		if (var->yoffset < 0
-		    || var->yoffset >= info->var.yres_virtual
-		    || var->xoffset)
+		if (var->yoffset >= info->var.yres_virtual ||
+		    var->xoffset)
 			return -EINVAL;
 	} else {
 		if (var->xoffset + info->var.xres > info->var.xres_virtual ||
@@ -527,9 +526,8 @@
 		goto err2;
 	platform_set_drvdata(dev, info);
 
-	printk(KERN_INFO
-	       "fb%d: Virtual frame buffer device, using %ldK of video memory\n",
-	       info->node, videomemorysize >> 10);
+	fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
+		videomemorysize >> 10);
 	return 0;
 err2:
 	fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 2827333..283d335 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1377,8 +1377,7 @@
 		goto err_check_var;
 	}
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n",
-	       info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	platform_set_drvdata(dev, info);
 
 	return 0;
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 897484903..b30e5a4 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -365,7 +365,7 @@
 	if (!fb_mem_virt) {
 		pr_err("%s: Failed to allocate framebuffer\n", __func__);
 		return -ENOMEM;
-	};
+	}
 
 	fbi->fb.fix.smem_start	= fb_mem_phys;
 	fbi->fb.fix.smem_len	= fb_mem_len;
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index e9557fa..8bc6e09 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -266,7 +266,7 @@
 
 	rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
 	if (rv < 0) {
-		printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+		fb_err(info, "cannot set requested pixclock, keeping old value\n");
 		return;
 	}
 
@@ -335,7 +335,7 @@
 	rv = svga_match_format (vt8623fb_formats, var, NULL);
 	if (rv < 0)
 	{
-		printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+		fb_err(info, "unsupported mode requested\n");
 		return rv;
 	}
 
@@ -354,21 +354,23 @@
 	mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
 	if (mem > info->screen_size)
 	{
-		printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+		fb_err(info, "not enough framebuffer memory (%d kB requested, %d kB available)\n",
+		       mem >> 10, (unsigned int) (info->screen_size >> 10));
 		return -EINVAL;
 	}
 
 	/* Text mode is limited to 256 kB of memory */
 	if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
 	{
-		printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
+		fb_err(info, "text framebuffer size too large (%d kB requested, 256 kB possible)\n",
+		       mem >> 10);
 		return -EINVAL;
 	}
 
 	rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
 	if (rv < 0)
 	{
-		printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+		fb_err(info, "invalid timings requested\n");
 		return rv;
 	}
 
@@ -474,32 +476,32 @@
 	mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
 	switch (mode) {
 	case 0:
-		pr_debug("fb%d: text mode\n", info->node);
+		fb_dbg(info, "text mode\n");
 		svga_set_textmode_vga_regs(par->state.vgabase);
 		svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
 		svga_wcrt_mask(par->state.vgabase, 0x11, 0x60, 0x70);
 		break;
 	case 1:
-		pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+		fb_dbg(info, "4 bit pseudocolor\n");
 		vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
 		svga_wseq_mask(par->state.vgabase, 0x15, 0x20, 0xFE);
 		svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
 		break;
 	case 2:
-		pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+		fb_dbg(info, "4 bit pseudocolor, planar\n");
 		svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
 		svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
 		break;
 	case 3:
-		pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+		fb_dbg(info, "8 bit pseudocolor\n");
 		svga_wseq_mask(par->state.vgabase, 0x15, 0x22, 0xFE);
 		break;
 	case 4:
-		pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+		fb_dbg(info, "5/6/5 truecolor\n");
 		svga_wseq_mask(par->state.vgabase, 0x15, 0xB6, 0xFE);
 		break;
 	case 5:
-		pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+		fb_dbg(info, "8/8/8 truecolor\n");
 		svga_wseq_mask(par->state.vgabase, 0x15, 0xAE, 0xFE);
 		break;
 	default:
@@ -584,27 +586,27 @@
 
 	switch (blank_mode) {
 	case FB_BLANK_UNBLANK:
-		pr_debug("fb%d: unblank\n", info->node);
+		fb_dbg(info, "unblank\n");
 		svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
 		break;
 	case FB_BLANK_NORMAL:
-		pr_debug("fb%d: blank\n", info->node);
+		fb_dbg(info, "blank\n");
 		svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		break;
 	case FB_BLANK_HSYNC_SUSPEND:
-		pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
+		fb_dbg(info, "DPMS standby (hsync off)\n");
 		svga_wcrt_mask(par->state.vgabase, 0x36, 0x10, 0x30);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		break;
 	case FB_BLANK_VSYNC_SUSPEND:
-		pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
+		fb_dbg(info, "DPMS suspend (vsync off)\n");
 		svga_wcrt_mask(par->state.vgabase, 0x36, 0x20, 0x30);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		break;
 	case FB_BLANK_POWERDOWN:
-		pr_debug("fb%d: DPMS off (no sync)\n", info->node);
+		fb_dbg(info, "DPMS off (no sync)\n");
 		svga_wcrt_mask(par->state.vgabase, 0x36, 0x30, 0x30);
 		svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
 		break;
@@ -769,12 +771,12 @@
 
 	rc = register_framebuffer(info);
 	if (rc < 0) {
-		dev_err(info->device, "cannot register framebugger\n");
+		dev_err(info->device, "cannot register framebuffer\n");
 		goto err_reg_fb;
 	}
 
-	printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
-		 pci_name(dev), info->fix.smem_len >> 20);
+	fb_info(info, "%s on %s, %d MB RAM\n",
+		info->fix.id, pci_name(dev), info->fix.smem_len >> 20);
 
 	/* Record a reference to the driver data */
 	pci_set_drvdata(dev, info);
@@ -829,7 +831,6 @@
 		pci_release_regions(dev);
 /*		pci_disable_device(dev); */
 
-		pci_set_drvdata(dev, NULL);
 		framebuffer_release(info);
 	}
 }
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 7a299e95..10951c8 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -680,7 +680,7 @@
 	par = info->par;
 	platform_set_drvdata(pdev, info);
 
-	inf = pdev->dev.platform_data;
+	inf = dev_get_platdata(&pdev->dev);
 	par->chip_id = chip_id;
 	par->mach = inf;
 	par->fastpll_mode = 0;
@@ -761,10 +761,9 @@
 	err |= device_create_file(&pdev->dev, &dev_attr_flip);
 
 	if (err != 0)
-		printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n",
-				info->node, err);
+		fb_warn(info, "failed to register attributes (%d)\n", err);
 
-	printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	return 0;
 out:
 	if (info) {
diff --git a/drivers/video/wm8505fb.c b/drivers/video/wm8505fb.c
index 3072f30..537d199 100644
--- a/drivers/video/wm8505fb.c
+++ b/drivers/video/wm8505fb.c
@@ -372,14 +372,12 @@
 	}
 
 	ret = device_create_file(&pdev->dev, &dev_attr_contrast);
-	if (ret < 0) {
-		printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n",
-			fbi->fb.node, ret);
-	}
+	if (ret < 0)
+		fb_warn(&fbi->fb, "failed to register attributes (%d)\n", ret);
 
-	printk(KERN_INFO "fb%d: %s frame buffer at 0x%lx-0x%lx\n",
-	       fbi->fb.node, fbi->fb.fix.id, fbi->fb.fix.smem_start,
-	       fbi->fb.fix.smem_start + fbi->fb.fix.smem_len - 1);
+	fb_info(&fbi->fb, "%s frame buffer at 0x%lx-0x%lx\n",
+		fbi->fb.fix.id, fbi->fb.fix.smem_start,
+		fbi->fb.fix.smem_start + fbi->fb.fix.smem_len - 1);
 
 	return 0;
 }
@@ -411,7 +409,7 @@
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= DRIVER_NAME,
-		.of_match_table = of_match_ptr(wmt_dt_ids),
+		.of_match_table = wmt_dt_ids,
 	},
 };
 
diff --git a/drivers/video/wmt_ge_rops.c b/drivers/video/wmt_ge_rops.c
index 4aaeb18..b0a9f34 100644
--- a/drivers/video/wmt_ge_rops.c
+++ b/drivers/video/wmt_ge_rops.c
@@ -169,13 +169,13 @@
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= "wmt_ge_rops",
-		.of_match_table = of_match_ptr(wmt_dt_ids),
+		.of_match_table = wmt_dt_ids,
 	},
 };
 
 module_platform_driver(wmt_ge_rops_driver);
 
-MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com");
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
 MODULE_DESCRIPTION("Accelerators for raster operations using "
 		   "WonderMedia Graphics Engine");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 84c664e..6ff1a91 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -260,10 +260,9 @@
 
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 		drvdata->regs = devm_ioremap_resource(&pdev->dev, res);
-		if (IS_ERR(drvdata->regs)) {
-			rc = PTR_ERR(drvdata->regs);
-			goto err_region;
-		}
+		if (IS_ERR(drvdata->regs))
+			return PTR_ERR(drvdata->regs);
+
 		drvdata->regs_phys = res->start;
 	}
 
@@ -279,11 +278,7 @@
 
 	if (!drvdata->fb_virt) {
 		dev_err(dev, "Could not allocate frame buffer memory\n");
-		rc = -ENOMEM;
-		if (drvdata->flags & BUS_ACCESS_FLAG)
-			goto err_fbmem;
-		else
-			goto err_region;
+		return -ENOMEM;
 	}
 
 	/* Clear (turn to black) the framebuffer */
@@ -363,14 +358,6 @@
 	/* Turn off the display */
 	xilinx_fb_out32(drvdata, REG_CTRL, 0);
 
-err_fbmem:
-	if (drvdata->flags & BUS_ACCESS_FLAG)
-		devm_iounmap(dev, drvdata->regs);
-
-err_region:
-	kfree(drvdata);
-	dev_set_drvdata(dev, NULL);
-
 	return rc;
 }
 
@@ -395,17 +382,12 @@
 	/* Turn off the display */
 	xilinx_fb_out32(drvdata, REG_CTRL, 0);
 
-	/* Release the resources, as allocated based on interface */
-	if (drvdata->flags & BUS_ACCESS_FLAG)
-		devm_iounmap(dev, drvdata->regs);
 #ifdef CONFIG_PPC_DCR
-	else
+	/* Release the resources, as allocated based on interface */
+	if (!(drvdata->flags & BUS_ACCESS_FLAG))
 		dcr_unmap(drvdata->dcr_host, drvdata->dcr_len);
 #endif
 
-	kfree(drvdata);
-	dev_set_drvdata(dev, NULL);
-
 	return 0;
 }
 
@@ -413,7 +395,7 @@
  * OF bus binding
  */
 
-static int xilinxfb_of_probe(struct platform_device *op)
+static int xilinxfb_of_probe(struct platform_device *pdev)
 {
 	const u32 *prop;
 	u32 tft_access = 0;
@@ -425,17 +407,15 @@
 	pdata = xilinx_fb_default_pdata;
 
 	/* Allocate the driver data region */
-	drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
-	if (!drvdata) {
-		dev_err(&op->dev, "Couldn't allocate device private record\n");
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
 		return -ENOMEM;
-	}
 
 	/*
 	 * To check whether the core is connected directly to DCR or BUS
 	 * interface and initialize the tft_access accordingly.
 	 */
-	of_property_read_u32(op->dev.of_node, "xlnx,dcr-splb-slave-if",
+	of_property_read_u32(pdev->dev.of_node, "xlnx,dcr-splb-slave-if",
 			     &tft_access);
 
 	/*
@@ -448,40 +428,39 @@
 #ifdef CONFIG_PPC_DCR
 	else {
 		int start;
-		start = dcr_resource_start(op->dev.of_node, 0);
-		drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0);
-		drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len);
+		start = dcr_resource_start(pdev->dev.of_node, 0);
+		drvdata->dcr_len = dcr_resource_len(pdev->dev.of_node, 0);
+		drvdata->dcr_host = dcr_map(pdev->dev.of_node, start, drvdata->dcr_len);
 		if (!DCR_MAP_OK(drvdata->dcr_host)) {
-			dev_err(&op->dev, "invalid DCR address\n");
-			kfree(drvdata);
+			dev_err(&pdev->dev, "invalid DCR address\n");
 			return -ENODEV;
 		}
 	}
 #endif
 
-	prop = of_get_property(op->dev.of_node, "phys-size", &size);
+	prop = of_get_property(pdev->dev.of_node, "phys-size", &size);
 	if ((prop) && (size >= sizeof(u32)*2)) {
 		pdata.screen_width_mm = prop[0];
 		pdata.screen_height_mm = prop[1];
 	}
 
-	prop = of_get_property(op->dev.of_node, "resolution", &size);
+	prop = of_get_property(pdev->dev.of_node, "resolution", &size);
 	if ((prop) && (size >= sizeof(u32)*2)) {
 		pdata.xres = prop[0];
 		pdata.yres = prop[1];
 	}
 
-	prop = of_get_property(op->dev.of_node, "virtual-resolution", &size);
+	prop = of_get_property(pdev->dev.of_node, "virtual-resolution", &size);
 	if ((prop) && (size >= sizeof(u32)*2)) {
 		pdata.xvirt = prop[0];
 		pdata.yvirt = prop[1];
 	}
 
-	if (of_find_property(op->dev.of_node, "rotate-display", NULL))
+	if (of_find_property(pdev->dev.of_node, "rotate-display", NULL))
 		pdata.rotate_screen = 1;
 
-	dev_set_drvdata(&op->dev, drvdata);
-	return xilinxfb_assign(op, drvdata, &pdata);
+	dev_set_drvdata(&pdev->dev, drvdata);
+	return xilinxfb_assign(pdev, drvdata, &pdata);
 }
 
 static int xilinxfb_of_remove(struct platform_device *op)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1f572c0..c444654 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -275,9 +275,8 @@
 	__le32 v;
 	s64 target;
 
-	vb->vdev->config->get(vb->vdev,
-			      offsetof(struct virtio_balloon_config, num_pages),
-			      &v, sizeof(v));
+	virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+
 	target = le32_to_cpu(v);
 	return target - vb->num_pages;
 }
@@ -286,9 +285,8 @@
 {
 	__le32 actual = cpu_to_le32(vb->num_pages);
 
-	vb->vdev->config->set(vb->vdev,
-			      offsetof(struct virtio_balloon_config, actual),
-			      &actual, sizeof(actual));
+	virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+		      &actual);
 }
 
 static int balloon(void *_vballoon)
@@ -513,7 +511,7 @@
 	kfree(vb);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtballoon_freeze(struct virtio_device *vdev)
 {
 	struct virtio_balloon *vb = vdev->priv;
@@ -556,7 +554,7 @@
 	.probe =	virtballoon_probe,
 	.remove =	virtballoon_remove,
 	.config_changed = virtballoon_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 	.freeze	=	virtballoon_freeze,
 	.restore =	virtballoon_restore,
 #endif
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 1ba0d68..c600ccf 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -219,13 +219,14 @@
 /* Transport interface */
 
 /* the notify function used when creating a virt queue */
-static void vm_notify(struct virtqueue *vq)
+static bool vm_notify(struct virtqueue *vq)
 {
 	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
 
 	/* We write the queue's selector into the notification register to
 	 * signal the other end */
 	writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+	return true;
 }
 
 /* Notify all virtqueues on an interrupt. */
@@ -470,7 +471,7 @@
 
 	/* Check magic value */
 	magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
-	if (memcmp(&magic, "virt", 4) != 0) {
+	if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
 		dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
 		return -ENODEV;
 	}
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 98917fc..a37c699 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -197,13 +197,14 @@
 }
 
 /* the notify function used when creating a virt queue */
-static void vp_notify(struct virtqueue *vq)
+static bool vp_notify(struct virtqueue *vq)
 {
 	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 
 	/* we write the queue's selector into the notification register to
 	 * signal the other end */
 	iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+	return true;
 }
 
 /* Handle a configuration change: Tell driver if it wants to know. */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 6b4a4db..28b5338 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -81,7 +81,7 @@
 	u16 last_used_idx;
 
 	/* How to notify other side. FIXME: commonalize hcalls! */
-	void (*notify)(struct virtqueue *vq);
+	bool (*notify)(struct virtqueue *vq);
 
 #ifdef DEBUG
 	/* They're supposed to lock for us. */
@@ -173,6 +173,8 @@
 	head = vq->free_head;
 	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
 	vq->vring.desc[head].addr = virt_to_phys(desc);
+	/* kmemleak gives a false positive, as it's hidden by virt_to_phys */
+	kmemleak_ignore(desc);
 	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
 
 	/* Update free pointer */
@@ -428,13 +430,22 @@
  * @vq: the struct virtqueue
  *
  * This does not need to be serialized.
+ *
+ * Returns false if host notify failed or queue is broken, otherwise true.
  */
-void virtqueue_notify(struct virtqueue *_vq)
+bool virtqueue_notify(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
+	if (unlikely(vq->broken))
+		return false;
+
 	/* Prod other side to tell it about changes. */
-	vq->notify(_vq);
+	if (!vq->notify(_vq)) {
+		vq->broken = true;
+		return false;
+	}
+	return true;
 }
 EXPORT_SYMBOL_GPL(virtqueue_notify);
 
@@ -447,11 +458,14 @@
  *
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
+ *
+ * Returns false if kick failed, otherwise true.
  */
-void virtqueue_kick(struct virtqueue *vq)
+bool virtqueue_kick(struct virtqueue *vq)
 {
 	if (virtqueue_kick_prepare(vq))
-		virtqueue_notify(vq);
+		return virtqueue_notify(vq);
+	return true;
 }
 EXPORT_SYMBOL_GPL(virtqueue_kick);
 
@@ -742,7 +756,7 @@
 				      struct virtio_device *vdev,
 				      bool weak_barriers,
 				      void *pages,
-				      void (*notify)(struct virtqueue *),
+				      bool (*notify)(struct virtqueue *),
 				      void (*callback)(struct virtqueue *),
 				      const char *name)
 {
@@ -837,4 +851,12 @@
 }
 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
 
+bool virtqueue_is_broken(struct virtqueue *_vq)
+{
+	struct vring_virtqueue *vq = to_vvq(_vq);
+
+	return vq->broken;
+}
+EXPORT_SYMBOL_GPL(virtqueue_is_broken);
+
 MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 41613f9..02df3b1 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -255,17 +255,17 @@
 static void ds1wm_up(struct ds1wm_data *ds1wm_data)
 {
 	int divisor;
-	struct ds1wm_driver_data *plat = ds1wm_data->pdev->dev.platform_data;
+	struct device *dev = &ds1wm_data->pdev->dev;
+	struct ds1wm_driver_data *plat = dev_get_platdata(dev);
 
 	if (ds1wm_data->cell->enable)
 		ds1wm_data->cell->enable(ds1wm_data->pdev);
 
 	divisor = ds1wm_find_divisor(plat->clock_rate);
-	dev_dbg(&ds1wm_data->pdev->dev,
-		"found divisor 0x%x for clock %d\n", divisor, plat->clock_rate);
+	dev_dbg(dev, "found divisor 0x%x for clock %d\n",
+		divisor, plat->clock_rate);
 	if (divisor == 0) {
-		dev_err(&ds1wm_data->pdev->dev,
-			"no suitable divisor for %dHz clock\n",
+		dev_err(dev, "no suitable divisor for %dHz clock\n",
 			plat->clock_rate);
 		return;
 	}
@@ -481,7 +481,7 @@
 	ds1wm_data->cell = mfd_get_cell(pdev);
 	if (!ds1wm_data->cell)
 		return -ENODEV;
-	plat = pdev->dev.platform_data;
+	plat = dev_get_platdata(&pdev->dev);
 	if (!plat)
 		return -ENODEV;
 
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 264ad1c..e36b18b 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -56,7 +56,7 @@
 
 static int w1_gpio_probe_dt(struct platform_device *pdev)
 {
-	struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct device_node *np = pdev->dev.of_node;
 	int gpio;
 
@@ -92,7 +92,7 @@
 		}
 	}
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 
 	if (!pdata) {
 		dev_err(&pdev->dev, "No configuration data\n");
@@ -154,7 +154,7 @@
 static int w1_gpio_remove(struct platform_device *pdev)
 {
 	struct w1_bus_master *master = platform_get_drvdata(pdev);
-	struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
 	if (pdata->enable_external_pullup)
 		pdata->enable_external_pullup(0);
@@ -171,7 +171,7 @@
 
 static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
 {
-	struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
 	if (pdata->enable_external_pullup)
 		pdata->enable_external_pullup(0);
@@ -181,7 +181,7 @@
 
 static int w1_gpio_resume(struct platform_device *pdev)
 {
-	struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
 	if (pdata->enable_external_pullup)
 		pdata->enable_external_pullup(1);
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index fa932c2..66efa96 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -709,7 +709,7 @@
 
 	sl->owner = THIS_MODULE;
 	sl->master = dev;
-	set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
+	set_bit(W1_SLAVE_ACTIVE, &sl->flags);
 
 	memset(&msg, 0, sizeof(msg));
 	memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
@@ -866,7 +866,7 @@
 
 	sl = w1_slave_search_device(dev, tmp);
 	if (sl) {
-		set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
+		set_bit(W1_SLAVE_ACTIVE, &sl->flags);
 	} else {
 		if (rn && tmp->crc == w1_calc_crc8((u8 *)&rn_le, 7))
 			w1_attach_slave_device(dev, tmp);
@@ -984,14 +984,14 @@
 	struct w1_slave *sl, *sln;
 
 	list_for_each_entry(sl, &dev->slist, w1_slave_entry)
-		clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
+		clear_bit(W1_SLAVE_ACTIVE, &sl->flags);
 
 	w1_search_devices(dev, search_type, cb);
 
 	list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
-		if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl)
+		if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl)
 			w1_slave_detach(sl);
-		else if (test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags))
+		else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags))
 			sl->ttl = dev->slave_ttl;
 	}
 
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 45908e5..ca8081a 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -67,8 +67,8 @@
 	struct w1_reg_num	reg_num;
 	atomic_t		refcnt;
 	u8			rom[9];
-	u32			flags;
 	int			ttl;
+	unsigned long		flags;
 
 	struct w1_master	*master;
 	struct w1_family	*family;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 23eae5c..c794ea1 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -140,7 +140,6 @@
 
 config SWIOTLB_XEN
 	def_bool y
-	depends on PCI && X86
 	select SWIOTLB
 
 config XEN_TMEM
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b232908..55ea73f7 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -596,7 +596,7 @@
 	}
 }
 
-static int __cpuinit balloon_cpu_notify(struct notifier_block *self,
+static int balloon_cpu_notify(struct notifier_block *self,
 				    unsigned long action, void *hcpu)
 {
 	int cpu = (long)hcpu;
@@ -616,7 +616,7 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block balloon_cpu_notifier __cpuinitdata = {
+static struct notifier_block balloon_cpu_notifier = {
 	.notifier_call	= balloon_cpu_notify,
 };
 
@@ -641,7 +641,7 @@
 
 	balloon_stats.current_pages = xen_pv_domain()
 		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
-		: max_pfn;
+		: get_num_physpages();
 	balloon_stats.target_pages  = balloon_stats.current_pages;
 	balloon_stats.balloon_low   = 0;
 	balloon_stats.balloon_high  = 0;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 8b3a69a..5de2063 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -305,7 +305,7 @@
 	if (rc < 0)
 		goto err;
 
-	rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
+	rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
 				       u->name, evtchn);
 	if (rc < 0)
 		goto err;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index c4d2298..62ccf54 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -49,6 +49,7 @@
 #include <xen/grant_table.h>
 #include <xen/interface/memory.h>
 #include <xen/hvc-console.h>
+#include <xen/swiotlb-xen.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/interface.h>
 
@@ -898,8 +899,16 @@
 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
 						&map_ops[i].status, __func__);
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
+	/* this is basically a nop on x86 */
+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
+		for (i = 0; i < count; i++) {
+			if (map_ops[i].status)
+				continue;
+			set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
+					map_ops[i].dev_bus_addr >> PAGE_SHIFT);
+		}
 		return ret;
+	}
 
 	if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
 		arch_enter_lazy_mmu_mode();
@@ -942,8 +951,14 @@
 	if (ret)
 		return ret;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
+	/* this is basically a nop on x86 */
+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
+		for (i = 0; i < count; i++) {
+			set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
+					INVALID_P2M_ENTRY);
+		}
 		return ret;
+	}
 
 	if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
 		arch_enter_lazy_mmu_mode();
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 18fff88..d15f6e8 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -26,6 +26,7 @@
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 #include "../pci/pci.h"
+#include <asm/pci_x86.h>
 
 static bool __read_mostly pci_seg_supported = true;
 
@@ -192,3 +193,49 @@
 }
 
 arch_initcall(register_xen_pci_notifier);
+
+#ifdef CONFIG_PCI_MMCONFIG
+static int __init xen_mcfg_late(void)
+{
+	struct pci_mmcfg_region *cfg;
+	int rc;
+
+	if (!xen_initial_domain())
+		return 0;
+
+	if ((pci_probe & PCI_PROBE_MMCONF) == 0)
+		return 0;
+
+	if (list_empty(&pci_mmcfg_list))
+		return 0;
+
+	/* Check whether they are in the right area. */
+	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+		struct physdev_pci_mmcfg_reserved r;
+
+		r.address = cfg->address;
+		r.segment = cfg->segment;
+		r.start_bus = cfg->start_bus;
+		r.end_bus = cfg->end_bus;
+		r.flags = XEN_PCI_MMCFG_RESERVED;
+
+		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
+		switch (rc) {
+		case 0:
+		case -ENOSYS:
+			continue;
+
+		default:
+			pr_warn("Failed to report MMCONFIG reservation"
+				" state for %s to hypervisor"
+				" (%d)\n",
+				cfg->name, rc);
+		}
+	}
+	return 0;
+}
+/*
+ * Needs to be done after acpi_init which are subsys_initcall.
+ */
+subsys_initcall_sync(xen_mcfg_late);
+#endif
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 99db9e1..2f3528e 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -84,7 +84,7 @@
 static int xen_allocate_irq(struct pci_dev *pdev)
 {
 	return request_irq(pdev->irq, do_hvm_evtchn_intr,
-			IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+			IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
 			"xen-platform-pci", pdev);
 }
 
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1b2277c..a224bc7 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -42,12 +42,31 @@
 #include <xen/page.h>
 #include <xen/xen-ops.h>
 #include <xen/hvc-console.h>
+
+#include <asm/dma-mapping.h>
+#include <asm/xen/page-coherent.h>
+
+#include <trace/events/swiotlb.h>
 /*
  * Used to do a quick range check in swiotlb_tbl_unmap_single and
  * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
  * API.
  */
 
+#ifndef CONFIG_X86
+static unsigned long dma_alloc_coherent_mask(struct device *dev,
+					    gfp_t gfp)
+{
+	unsigned long dma_mask = 0;
+
+	dma_mask = dev->coherent_dma_mask;
+	if (!dma_mask)
+		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
+
+	return dma_mask;
+}
+#endif
+
 static char *xen_io_tlb_start, *xen_io_tlb_end;
 static unsigned long xen_io_tlb_nslabs;
 /*
@@ -56,17 +75,17 @@
 
 static u64 start_dma_addr;
 
-static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
 {
 	return phys_to_machine(XPADDR(paddr)).maddr;
 }
 
-static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
+static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
 {
 	return machine_to_phys(XMADDR(baddr)).paddr;
 }
 
-static dma_addr_t xen_virt_to_bus(void *address)
+static inline dma_addr_t xen_virt_to_bus(void *address)
 {
 	return xen_phys_to_bus(virt_to_phys(address));
 }
@@ -89,7 +108,7 @@
 	return 1;
 }
 
-static int range_straddles_page_boundary(phys_addr_t p, size_t size)
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 {
 	unsigned long pfn = PFN_DOWN(p);
 	unsigned int offset = p & ~PAGE_MASK;
@@ -126,6 +145,8 @@
 {
 	int i, rc;
 	int dma_bits;
+	dma_addr_t dma_handle;
+	phys_addr_t p = virt_to_phys(buf);
 
 	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
 
@@ -135,9 +156,9 @@
 
 		do {
 			rc = xen_create_contiguous_region(
-				(unsigned long)buf + (i << IO_TLB_SHIFT),
+				p + (i << IO_TLB_SHIFT),
 				get_order(slabs << IO_TLB_SHIFT),
-				dma_bits);
+				dma_bits, &dma_handle);
 		} while (rc && dma_bits++ < max_dma_bits);
 		if (rc)
 			return rc;
@@ -263,7 +284,6 @@
 	void *ret;
 	int order = get_order(size);
 	u64 dma_mask = DMA_BIT_MASK(32);
-	unsigned long vstart;
 	phys_addr_t phys;
 	dma_addr_t dev_addr;
 
@@ -278,8 +298,12 @@
 	if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
 		return ret;
 
-	vstart = __get_free_pages(flags, order);
-	ret = (void *)vstart;
+	/* On ARM this function returns an ioremap'ped virtual address for
+	 * which virt_to_phys doesn't return the corresponding physical
+	 * address. In fact on ARM virt_to_phys only works for kernel direct
+	 * mapped RAM memory. Also see comment below.
+	 */
+	ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
 
 	if (!ret)
 		return ret;
@@ -287,18 +311,21 @@
 	if (hwdev && hwdev->coherent_dma_mask)
 		dma_mask = dma_alloc_coherent_mask(hwdev, flags);
 
-	phys = virt_to_phys(ret);
+	/* At this point dma_handle is the physical address, next we are
+	 * going to set it to the machine address.
+	 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
+	 * to *dma_handle. */
+	phys = *dma_handle;
 	dev_addr = xen_phys_to_bus(phys);
 	if (((dev_addr + size - 1 <= dma_mask)) &&
 	    !range_straddles_page_boundary(phys, size))
 		*dma_handle = dev_addr;
 	else {
-		if (xen_create_contiguous_region(vstart, order,
-						 fls64(dma_mask)) != 0) {
-			free_pages(vstart, order);
+		if (xen_create_contiguous_region(phys, order,
+						 fls64(dma_mask), dma_handle) != 0) {
+			xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
 			return NULL;
 		}
-		*dma_handle = virt_to_machine(ret).maddr;
 	}
 	memset(ret, 0, size);
 	return ret;
@@ -319,13 +346,15 @@
 	if (hwdev && hwdev->coherent_dma_mask)
 		dma_mask = hwdev->coherent_dma_mask;
 
-	phys = virt_to_phys(vaddr);
+	/* do not use virt_to_phys because on ARM it doesn't return you the
+	 * physical address */
+	phys = xen_bus_to_phys(dev_addr);
 
 	if (((dev_addr + size - 1 > dma_mask)) ||
 	    range_straddles_page_boundary(phys, size))
-		xen_destroy_contiguous_region((unsigned long)vaddr, order);
+		xen_destroy_contiguous_region(phys, order);
 
-	free_pages((unsigned long)vaddr, order);
+	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
 
@@ -352,16 +381,25 @@
 	 * buffering it.
 	 */
 	if (dma_capable(dev, dev_addr, size) &&
-	    !range_straddles_page_boundary(phys, size) && !swiotlb_force)
+	    !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
+		/* we are not interested in the dma_addr returned by
+		 * xen_dma_map_page, only in the potential cache flushes executed
+		 * by the function. */
+		xen_dma_map_page(dev, page, offset, size, dir, attrs);
 		return dev_addr;
+	}
 
 	/*
 	 * Oh well, have to allocate and map a bounce buffer.
 	 */
+	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+
 	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
 	if (map == SWIOTLB_MAP_ERROR)
 		return DMA_ERROR_CODE;
 
+	xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
+					map & ~PAGE_MASK, size, dir, attrs);
 	dev_addr = xen_phys_to_bus(map);
 
 	/*
@@ -384,12 +422,15 @@
  * whatever the device wrote there.
  */
 static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-			     size_t size, enum dma_data_direction dir)
+			     size_t size, enum dma_data_direction dir,
+				 struct dma_attrs *attrs)
 {
 	phys_addr_t paddr = xen_bus_to_phys(dev_addr);
 
 	BUG_ON(dir == DMA_NONE);
 
+	xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
+
 	/* NOTE: We use dev_addr here, not paddr! */
 	if (is_xen_swiotlb_buffer(dev_addr)) {
 		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
@@ -412,7 +453,7 @@
 			    size_t size, enum dma_data_direction dir,
 			    struct dma_attrs *attrs)
 {
-	xen_unmap_single(hwdev, dev_addr, size, dir);
+	xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
 
@@ -435,11 +476,15 @@
 
 	BUG_ON(dir == DMA_NONE);
 
+	if (target == SYNC_FOR_CPU)
+		xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+
 	/* NOTE: We use dev_addr here, not paddr! */
-	if (is_xen_swiotlb_buffer(dev_addr)) {
+	if (is_xen_swiotlb_buffer(dev_addr))
 		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
-		return;
-	}
+
+	if (target == SYNC_FOR_DEVICE)
+		xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
 
 	if (dir != DMA_FROM_DEVICE)
 		return;
@@ -502,16 +547,26 @@
 								 sg->length,
 								 dir);
 			if (map == SWIOTLB_MAP_ERROR) {
+				dev_warn(hwdev, "swiotlb buffer is full\n");
 				/* Don't panic here, we expect map_sg users
 				   to do proper error handling. */
 				xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
 							   attrs);
 				sg_dma_len(sgl) = 0;
-				return DMA_ERROR_CODE;
+				return 0;
 			}
 			sg->dma_address = xen_phys_to_bus(map);
-		} else
+		} else {
+			/* we are not interested in the dma_addr returned by
+			 * xen_dma_map_page, only in the potential cache flushes executed
+			 * by the function. */
+			xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
+						paddr & ~PAGE_MASK,
+						sg->length,
+						dir,
+						attrs);
 			sg->dma_address = dev_addr;
+		}
 		sg_dma_len(sg) = sg->length;
 	}
 	return nelems;
@@ -533,7 +588,7 @@
 	BUG_ON(dir == DMA_NONE);
 
 	for_each_sg(sgl, sg, nelems, i)
-		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
+		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
 
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
@@ -593,3 +648,15 @@
 	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
+
+int
+xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
+		return -EIO;
+
+	*dev->dma_mask = dma_mask;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
diff --git a/fs/9p/cache.h b/fs/9p/cache.h
index 40cc54c..2f96754 100644
--- a/fs/9p/cache.h
+++ b/fs/9p/cache.h
@@ -101,6 +101,18 @@
 
 #else /* CONFIG_9P_FSCACHE */
 
+static inline void v9fs_cache_inode_get_cookie(struct inode *inode)
+{
+}
+
+static inline void v9fs_cache_inode_put_cookie(struct inode *inode)
+{
+}
+
+static inline void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *file)
+{
+}
+
 static inline int v9fs_fscache_release_page(struct page *page,
 					    gfp_t gfp) {
 	return 1;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index aa5ecf4..a0df3e7 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -105,10 +105,8 @@
 		v9inode->writeback_fid = (void *) fid;
 	}
 	mutex_unlock(&v9inode->v_mutex);
-#ifdef CONFIG_9P_FSCACHE
 	if (v9ses->cache)
 		v9fs_cache_inode_set_cookie(inode, file);
-#endif
 	return 0;
 out_error:
 	p9_client_clunk(file->private_data);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 94de6d1..4e65aa9 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -448,9 +448,7 @@
 	clear_inode(inode);
 	filemap_fdatawrite(inode->i_mapping);
 
-#ifdef CONFIG_9P_FSCACHE
 	v9fs_cache_inode_put_cookie(inode);
-#endif
 	/* clunk the fid stashed in writeback_fid */
 	if (v9inode->writeback_fid) {
 		p9_client_clunk(v9inode->writeback_fid);
@@ -531,9 +529,7 @@
 		goto error;
 
 	v9fs_stat2inode(st, inode, sb);
-#ifdef CONFIG_9P_FSCACHE
 	v9fs_cache_inode_get_cookie(inode);
-#endif
 	unlock_new_inode(inode);
 	return inode;
 error:
@@ -905,10 +901,8 @@
 		goto error;
 
 	file->private_data = fid;
-#ifdef CONFIG_9P_FSCACHE
 	if (v9ses->cache)
 		v9fs_cache_inode_set_cookie(dentry->d_inode, file);
-#endif
 
 	*opened |= FILE_CREATED;
 out:
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index a7c4814..4c10edec2 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -141,9 +141,7 @@
 		goto error;
 
 	v9fs_stat2inode_dotl(st, inode);
-#ifdef CONFIG_9P_FSCACHE
 	v9fs_cache_inode_get_cookie(inode);
-#endif
 	retval = v9fs_get_acl(inode, fid);
 	if (retval)
 		goto error;
@@ -355,10 +353,8 @@
 	if (err)
 		goto err_clunk_old_fid;
 	file->private_data = ofid;
-#ifdef CONFIG_9P_FSCACHE
 	if (v9ses->cache)
 		v9fs_cache_inode_set_cookie(inode, file);
-#endif
 	*opened |= FILE_CREATED;
 out:
 	v9fs_put_acl(dacl, pacl);
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 585adaf..c770337 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -43,9 +43,12 @@
  * ADFS file system superblock data in memory
  */
 struct adfs_sb_info {
-	struct adfs_discmap *s_map;	/* bh list containing map		 */
-	struct adfs_dir_ops *s_dir;	/* directory operations			 */
-
+	union { struct {
+		struct adfs_discmap *s_map;	/* bh list containing map	 */
+		struct adfs_dir_ops *s_dir;	/* directory operations		 */
+		};
+		struct rcu_head rcu;		/* used only at shutdown time	 */
+	};
 	kuid_t		s_uid;		/* owner uid				 */
 	kgid_t		s_gid;		/* owner gid				 */
 	umode_t		s_owner_mask;	/* ADFS owner perm -> unix perm		 */
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 0ff4bae..7b3003c 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -123,8 +123,7 @@
 	for (i = 0; i < asb->s_map_size; i++)
 		brelse(asb->s_map[i].dm_bh);
 	kfree(asb->s_map);
-	kfree(asb);
-	sb->s_fs_info = NULL;
+	kfree_rcu(asb, rcu);
 }
 
 static int adfs_show_options(struct seq_file *seq, struct dentry *root)
diff --git a/fs/aio.c b/fs/aio.c
index 067e3d3..823efcb 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -36,10 +36,10 @@
 #include <linux/eventfd.h>
 #include <linux/blkdev.h>
 #include <linux/compat.h>
-#include <linux/anon_inodes.h>
 #include <linux/migrate.h>
 #include <linux/ramfs.h>
 #include <linux/percpu-refcount.h>
+#include <linux/mount.h>
 
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
@@ -152,12 +152,67 @@
 static struct kmem_cache	*kiocb_cachep;
 static struct kmem_cache	*kioctx_cachep;
 
+static struct vfsmount *aio_mnt;
+
+static const struct file_operations aio_ring_fops;
+static const struct address_space_operations aio_ctx_aops;
+
+static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
+{
+	struct qstr this = QSTR_INIT("[aio]", 5);
+	struct file *file;
+	struct path path;
+	struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
+	if (IS_ERR(inode))
+		return ERR_CAST(inode);
+
+	inode->i_mapping->a_ops = &aio_ctx_aops;
+	inode->i_mapping->private_data = ctx;
+	inode->i_size = PAGE_SIZE * nr_pages;
+
+	path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
+	if (!path.dentry) {
+		iput(inode);
+		return ERR_PTR(-ENOMEM);
+	}
+	path.mnt = mntget(aio_mnt);
+
+	d_instantiate(path.dentry, inode);
+	file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops);
+	if (IS_ERR(file)) {
+		path_put(&path);
+		return file;
+	}
+
+	file->f_flags = O_RDWR;
+	file->private_data = ctx;
+	return file;
+}
+
+static struct dentry *aio_mount(struct file_system_type *fs_type,
+				int flags, const char *dev_name, void *data)
+{
+	static const struct dentry_operations ops = {
+		.d_dname	= simple_dname,
+	};
+	return mount_pseudo(fs_type, "aio:", NULL, &ops, 0xa10a10a1);
+}
+
 /* aio_setup
  *	Creates the slab caches used by the aio routines, panic on
  *	failure as this is done early during the boot sequence.
  */
 static int __init aio_setup(void)
 {
+	static struct file_system_type aio_fs = {
+		.name		= "aio",
+		.mount		= aio_mount,
+		.kill_sb	= kill_anon_super,
+	};
+	aio_mnt = kern_mount(&aio_fs);
+	if (IS_ERR(aio_mnt))
+		panic("Failed to create aio fs mount.");
+
 	kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 
@@ -283,16 +338,12 @@
 	if (nr_pages < 0)
 		return -EINVAL;
 
-	file = anon_inode_getfile_private("[aio]", &aio_ring_fops, ctx, O_RDWR);
+	file = aio_private_file(ctx, nr_pages);
 	if (IS_ERR(file)) {
 		ctx->aio_ring_file = NULL;
 		return -EAGAIN;
 	}
 
-	file->f_inode->i_mapping->a_ops = &aio_ctx_aops;
-	file->f_inode->i_mapping->private_data = ctx;
-	file->f_inode->i_size = PAGE_SIZE * (loff_t)nr_pages;
-
 	for (i = 0; i < nr_pages; i++) {
 		struct page *page;
 		page = find_or_create_page(file->f_inode->i_mapping,
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 85c9618..2408473 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -24,7 +24,6 @@
 
 static struct vfsmount *anon_inode_mnt __read_mostly;
 static struct inode *anon_inode_inode;
-static const struct file_operations anon_inode_fops;
 
 /*
  * anon_inodefs_dname() is called from d_path().
@@ -39,51 +38,6 @@
 	.d_dname	= anon_inodefs_dname,
 };
 
-/*
- * nop .set_page_dirty method so that people can use .page_mkwrite on
- * anon inodes.
- */
-static int anon_set_page_dirty(struct page *page)
-{
-	return 0;
-};
-
-static const struct address_space_operations anon_aops = {
-	.set_page_dirty = anon_set_page_dirty,
-};
-
-/*
- * A single inode exists for all anon_inode files. Contrary to pipes,
- * anon_inode inodes have no associated per-instance data, so we need
- * only allocate one of them.
- */
-static struct inode *anon_inode_mkinode(struct super_block *s)
-{
-	struct inode *inode = new_inode_pseudo(s);
-
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-
-	inode->i_ino = get_next_ino();
-	inode->i_fop = &anon_inode_fops;
-
-	inode->i_mapping->a_ops = &anon_aops;
-
-	/*
-	 * Mark the inode dirty from the very beginning,
-	 * that way it will never be moved to the dirty
-	 * list because mark_inode_dirty() will think
-	 * that it already _is_ on the dirty list.
-	 */
-	inode->i_state = I_DIRTY;
-	inode->i_mode = S_IRUSR | S_IWUSR;
-	inode->i_uid = current_fsuid();
-	inode->i_gid = current_fsgid();
-	inode->i_flags |= S_PRIVATE;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	return inode;
-}
-
 static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
 				int flags, const char *dev_name, void *data)
 {
@@ -92,7 +46,7 @@
 			&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
 	if (!IS_ERR(root)) {
 		struct super_block *s = root->d_sb;
-		anon_inode_inode = anon_inode_mkinode(s);
+		anon_inode_inode = alloc_anon_inode(s);
 		if (IS_ERR(anon_inode_inode)) {
 			dput(root);
 			deactivate_locked_super(s);
@@ -109,72 +63,6 @@
 };
 
 /**
- * anon_inode_getfile_private - creates a new file instance by hooking it up to an
- *                      anonymous inode, and a dentry that describe the "class"
- *                      of the file
- *
- * @name:    [in]    name of the "class" of the new file
- * @fops:    [in]    file operations for the new file
- * @priv:    [in]    private data for the new file (will be file's private_data)
- * @flags:   [in]    flags
- *
- *
- * Similar to anon_inode_getfile, but each file holds a single inode.
- *
- */
-struct file *anon_inode_getfile_private(const char *name,
-					const struct file_operations *fops,
-					void *priv, int flags)
-{
-	struct qstr this;
-	struct path path;
-	struct file *file;
-	struct inode *inode;
-
-	if (fops->owner && !try_module_get(fops->owner))
-		return ERR_PTR(-ENOENT);
-
-	inode = anon_inode_mkinode(anon_inode_mnt->mnt_sb);
-	if (IS_ERR(inode)) {
-		file = ERR_PTR(-ENOMEM);
-		goto err_module;
-	}
-
-	/*
-	 * Link the inode to a directory entry by creating a unique name
-	 * using the inode sequence number.
-	 */
-	file = ERR_PTR(-ENOMEM);
-	this.name = name;
-	this.len = strlen(name);
-	this.hash = 0;
-	path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
-	if (!path.dentry)
-		goto err_module;
-
-	path.mnt = mntget(anon_inode_mnt);
-
-	d_instantiate(path.dentry, inode);
-
-	file = alloc_file(&path, OPEN_FMODE(flags), fops);
-	if (IS_ERR(file))
-		goto err_dput;
-
-	file->f_mapping = inode->i_mapping;
-	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
-	file->private_data = priv;
-
-	return file;
-
-err_dput:
-	path_put(&path);
-err_module:
-	module_put(fops->owner);
-	return file;
-}
-EXPORT_SYMBOL_GPL(anon_inode_getfile_private);
-
-/**
  * anon_inode_getfile - creates a new file instance by hooking it up to an
  *                      anonymous inode, and a dentry that describe the "class"
  *                      of the file
diff --git a/fs/attr.c b/fs/attr.c
index 1449adb..267968d 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -167,7 +167,27 @@
 }
 EXPORT_SYMBOL(setattr_copy);
 
-int notify_change(struct dentry * dentry, struct iattr * attr)
+/**
+ * notify_change - modify attributes of a filesytem object
+ * @dentry:	object affected
+ * @iattr:	new attributes
+ * @delegated_inode: returns inode, if the inode is delegated
+ *
+ * The caller must hold the i_mutex on the affected object.
+ *
+ * If notify_change discovers a delegation in need of breaking,
+ * it will return -EWOULDBLOCK and return a reference to the inode in
+ * delegated_inode.  The caller should then break the delegation and
+ * retry.  Because breaking a delegation may take a long time, the
+ * caller should drop the i_mutex before doing so.
+ *
+ * Alternatively, a caller may pass NULL for delegated_inode.  This may
+ * be appropriate for callers that expect the underlying filesystem not
+ * to be NFS exported.  Also, passing NULL is fine for callers holding
+ * the file open for write, as there can be no conflicting delegation in
+ * that case.
+ */
+int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **delegated_inode)
 {
 	struct inode *inode = dentry->d_inode;
 	umode_t mode = inode->i_mode;
@@ -243,6 +263,9 @@
 	error = security_inode_setattr(dentry, attr);
 	if (error)
 		return error;
+	error = try_break_deleg(inode, delegated_inode);
+	if (error)
+		return error;
 
 	if (inode->i_op->setattr)
 		error = inode->i_op->setattr(dentry, attr);
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 3f1128b..4218e26 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -122,6 +122,7 @@
 	spinlock_t lookup_lock;
 	struct list_head active_list;
 	struct list_head expiring_list;
+	struct rcu_head rcu;
 };
 
 static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
@@ -271,7 +272,7 @@
 
 static inline int autofs_prepare_pipe(struct file *pipe)
 {
-	if (!pipe->f_op || !pipe->f_op->write)
+	if (!pipe->f_op->write)
 		return -EINVAL;
 	if (!S_ISFIFO(file_inode(pipe)->i_mode))
 		return -EINVAL;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 0f00da3..1818ce7 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -658,12 +658,6 @@
 			goto out;
 		}
 
-		if (!fp->f_op) {
-			err = -ENOTTY;
-			fput(fp);
-			goto out;
-		}
-
 		sbi = autofs_dev_ioctl_sbi(fp);
 		if (!sbi || sbi->magic != AUTOFS_SBI_MAGIC) {
 			err = -EINVAL;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index b104726..3b9cc9b 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -56,18 +56,13 @@
 	 * just call kill_anon_super when we are called from
 	 * deactivate_super.
 	 */
-	if (!sbi)
-		goto out_kill_sb;
+	if (sbi) /* Free wait queues, close pipe */
+		autofs4_catatonic_mode(sbi);
 
-	/* Free wait queues, close pipe */
-	autofs4_catatonic_mode(sbi);
-
-	sb->s_fs_info = NULL;
-	kfree(sbi);
-
-out_kill_sb:
 	DPRINTK("shutting down");
 	kill_litter_super(sb);
+	if (sbi)
+		kfree_rcu(sbi, rcu);
 }
 
 static int autofs4_show_options(struct seq_file *m, struct dentry *root)
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index e9c75e2..daa15d6 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -42,7 +42,7 @@
 static int befs_init_inodecache(void);
 static void befs_destroy_inodecache(void);
 static void *befs_follow_link(struct dentry *, struct nameidata *);
-static void befs_put_link(struct dentry *, struct nameidata *, void *);
+static void *befs_fast_follow_link(struct dentry *, struct nameidata *);
 static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
 			char **out, int *out_len);
 static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
@@ -79,10 +79,15 @@
 	.bmap		= befs_bmap,
 };
 
+static const struct inode_operations befs_fast_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= befs_fast_follow_link,
+};
+
 static const struct inode_operations befs_symlink_inode_operations = {
 	.readlink	= generic_readlink,
 	.follow_link	= befs_follow_link,
-	.put_link	= befs_put_link,
+	.put_link	= kfree_put_link,
 };
 
 /* 
@@ -411,7 +416,10 @@
 		inode->i_op = &befs_dir_inode_operations;
 		inode->i_fop = &befs_dir_operations;
 	} else if (S_ISLNK(inode->i_mode)) {
-		inode->i_op = &befs_symlink_inode_operations;
+		if (befs_ino->i_flags & BEFS_LONG_SYMLINK)
+			inode->i_op = &befs_symlink_inode_operations;
+		else
+			inode->i_op = &befs_fast_symlink_inode_operations;
 	} else {
 		befs_error(sb, "Inode %lu is not a regular file, "
 			   "directory or symlink. THAT IS WRONG! BeFS has no "
@@ -477,47 +485,40 @@
 static void *
 befs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
+	struct super_block *sb = dentry->d_sb;
 	befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+	befs_data_stream *data = &befs_ino->i_data.ds;
+	befs_off_t len = data->size;
 	char *link;
 
-	if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-		struct super_block *sb = dentry->d_sb;
-		befs_data_stream *data = &befs_ino->i_data.ds;
-		befs_off_t len = data->size;
+	if (len == 0) {
+		befs_error(sb, "Long symlink with illegal length");
+		link = ERR_PTR(-EIO);
+	} else {
+		befs_debug(sb, "Follow long symlink");
 
-		if (len == 0) {
-			befs_error(sb, "Long symlink with illegal length");
+		link = kmalloc(len, GFP_NOFS);
+		if (!link) {
+			link = ERR_PTR(-ENOMEM);
+		} else if (befs_read_lsymlink(sb, data, link, len) != len) {
+			kfree(link);
+			befs_error(sb, "Failed to read entire long symlink");
 			link = ERR_PTR(-EIO);
 		} else {
-			befs_debug(sb, "Follow long symlink");
-
-			link = kmalloc(len, GFP_NOFS);
-			if (!link) {
-				link = ERR_PTR(-ENOMEM);
-			} else if (befs_read_lsymlink(sb, data, link, len) != len) {
-				kfree(link);
-				befs_error(sb, "Failed to read entire long symlink");
-				link = ERR_PTR(-EIO);
-			} else {
-				link[len - 1] = '\0';
-			}
+			link[len - 1] = '\0';
 		}
-	} else {
-		link = befs_ino->i_data.symlink;
 	}
-
 	nd_set_link(nd, link);
 	return NULL;
 }
 
-static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+
+static void *
+befs_fast_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
-	if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-		char *link = nd_get_link(nd);
-		if (!IS_ERR(link))
-			kfree(link);
-	}
+	nd_set_link(nd, befs_ino->i_data.symlink);
+	return NULL;
 }
 
 /*
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 89dec7f..ca0ba15 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -45,7 +45,6 @@
  */
 static int aout_core_dump(struct coredump_params *cprm)
 {
-	struct file *file = cprm->file;
 	mm_segment_t fs;
 	int has_dumped = 0;
 	void __user *dump_start;
@@ -85,10 +84,10 @@
 
 	set_fs(KERNEL_DS);
 /* struct user */
-	if (!dump_write(file, &dump, sizeof(dump)))
+	if (!dump_emit(cprm, &dump, sizeof(dump)))
 		goto end_coredump;
 /* Now dump all of the user data.  Include malloced stuff as well */
-	if (!dump_seek(cprm->file, PAGE_SIZE - sizeof(dump)))
+	if (!dump_skip(cprm, PAGE_SIZE - sizeof(dump)))
 		goto end_coredump;
 /* now we start writing out the user space info */
 	set_fs(USER_DS);
@@ -96,14 +95,14 @@
 	if (dump.u_dsize != 0) {
 		dump_start = START_DATA(dump);
 		dump_size = dump.u_dsize << PAGE_SHIFT;
-		if (!dump_write(file, dump_start, dump_size))
+		if (!dump_emit(cprm, dump_start, dump_size))
 			goto end_coredump;
 	}
 /* Now prepare to dump the stack area */
 	if (dump.u_ssize != 0) {
 		dump_start = START_STACK(dump);
 		dump_size = dump.u_ssize << PAGE_SHIFT;
-		if (!dump_write(file, dump_start, dump_size))
+		if (!dump_emit(cprm, dump_start, dump_size))
 			goto end_coredump;
 	}
 end_coredump:
@@ -221,7 +220,7 @@
 	 * Requires a mmap handler. This prevents people from using a.out
 	 * as part of an exploit attack against /proc-related vulnerabilities.
 	 */
-	if (!bprm->file->f_op || !bprm->file->f_op->mmap)
+	if (!bprm->file->f_op->mmap)
 		return -ENOEXEC;
 
 	fd_offset = N_TXTOFF(ex);
@@ -374,7 +373,7 @@
 	 * Requires a mmap handler. This prevents people from using a.out
 	 * as part of an exploit attack against /proc-related vulnerabilities.
 	 */
-	if (!file->f_op || !file->f_op->mmap)
+	if (!file->f_op->mmap)
 		goto out;
 
 	if (N_FLAGS(ex))
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 4c94a79..571a423 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -406,7 +406,7 @@
 		goto out;
 	if (!elf_check_arch(interp_elf_ex))
 		goto out;
-	if (!interpreter->f_op || !interpreter->f_op->mmap)
+	if (!interpreter->f_op->mmap)
 		goto out;
 
 	/*
@@ -607,7 +607,7 @@
 		goto out;
 	if (!elf_check_arch(&loc->elf_ex))
 		goto out;
-	if (!bprm->file->f_op || !bprm->file->f_op->mmap)
+	if (!bprm->file->f_op->mmap)
 		goto out;
 
 	/* Now read in all of the header information */
@@ -1028,7 +1028,7 @@
 
 	/* First of all, some simple consistency checks */
 	if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
-	    !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
+	    !elf_check_arch(&elf_ex) || !file->f_op->mmap)
 		goto out;
 
 	/* Now read in all of the header information */
@@ -1225,35 +1225,17 @@
 	return sz;
 }
 
-#define DUMP_WRITE(addr, nr, foffset)	\
-	do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
-
-static int alignfile(struct file *file, loff_t *foffset)
-{
-	static const char buf[4] = { 0, };
-	DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
-	return 1;
-}
-
-static int writenote(struct memelfnote *men, struct file *file,
-			loff_t *foffset)
+static int writenote(struct memelfnote *men, struct coredump_params *cprm)
 {
 	struct elf_note en;
 	en.n_namesz = strlen(men->name) + 1;
 	en.n_descsz = men->datasz;
 	en.n_type = men->type;
 
-	DUMP_WRITE(&en, sizeof(en), foffset);
-	DUMP_WRITE(men->name, en.n_namesz, foffset);
-	if (!alignfile(file, foffset))
-		return 0;
-	DUMP_WRITE(men->data, men->datasz, foffset);
-	if (!alignfile(file, foffset))
-		return 0;
-
-	return 1;
+	return dump_emit(cprm, &en, sizeof(en)) &&
+	    dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
+	    dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
 }
-#undef DUMP_WRITE
 
 static void fill_elf_header(struct elfhdr *elf, int segs,
 			    u16 machine, u32 flags)
@@ -1392,7 +1374,7 @@
 }
 
 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
-		siginfo_t *siginfo)
+		const siginfo_t *siginfo)
 {
 	mm_segment_t old_fs = get_fs();
 	set_fs(KERNEL_DS);
@@ -1599,7 +1581,7 @@
 
 static int fill_note_info(struct elfhdr *elf, int phdrs,
 			  struct elf_note_info *info,
-			  siginfo_t *siginfo, struct pt_regs *regs)
+			  const siginfo_t *siginfo, struct pt_regs *regs)
 {
 	struct task_struct *dump_task = current;
 	const struct user_regset_view *view = task_user_regset_view(dump_task);
@@ -1702,7 +1684,7 @@
  * process-wide notes are interleaved after the first thread-specific note.
  */
 static int write_note_info(struct elf_note_info *info,
-			   struct file *file, loff_t *foffset)
+			   struct coredump_params *cprm)
 {
 	bool first = 1;
 	struct elf_thread_core_info *t = info->thread;
@@ -1710,22 +1692,22 @@
 	do {
 		int i;
 
-		if (!writenote(&t->notes[0], file, foffset))
+		if (!writenote(&t->notes[0], cprm))
 			return 0;
 
-		if (first && !writenote(&info->psinfo, file, foffset))
+		if (first && !writenote(&info->psinfo, cprm))
 			return 0;
-		if (first && !writenote(&info->signote, file, foffset))
+		if (first && !writenote(&info->signote, cprm))
 			return 0;
-		if (first && !writenote(&info->auxv, file, foffset))
+		if (first && !writenote(&info->auxv, cprm))
 			return 0;
 		if (first && info->files.data &&
-				!writenote(&info->files, file, foffset))
+				!writenote(&info->files, cprm))
 			return 0;
 
 		for (i = 1; i < info->thread_notes; ++i)
 			if (t->notes[i].data &&
-			    !writenote(&t->notes[i], file, foffset))
+			    !writenote(&t->notes[i], cprm))
 				return 0;
 
 		first = 0;
@@ -1848,34 +1830,31 @@
 
 static int fill_note_info(struct elfhdr *elf, int phdrs,
 			  struct elf_note_info *info,
-			  siginfo_t *siginfo, struct pt_regs *regs)
+			  const siginfo_t *siginfo, struct pt_regs *regs)
 {
 	struct list_head *t;
+	struct core_thread *ct;
+	struct elf_thread_status *ets;
 
 	if (!elf_note_info_init(info))
 		return 0;
 
-	if (siginfo->si_signo) {
-		struct core_thread *ct;
-		struct elf_thread_status *ets;
+	for (ct = current->mm->core_state->dumper.next;
+					ct; ct = ct->next) {
+		ets = kzalloc(sizeof(*ets), GFP_KERNEL);
+		if (!ets)
+			return 0;
 
-		for (ct = current->mm->core_state->dumper.next;
-						ct; ct = ct->next) {
-			ets = kzalloc(sizeof(*ets), GFP_KERNEL);
-			if (!ets)
-				return 0;
+		ets->thread = ct->task;
+		list_add(&ets->list, &info->thread_list);
+	}
 
-			ets->thread = ct->task;
-			list_add(&ets->list, &info->thread_list);
-		}
+	list_for_each(t, &info->thread_list) {
+		int sz;
 
-		list_for_each(t, &info->thread_list) {
-			int sz;
-
-			ets = list_entry(t, struct elf_thread_status, list);
-			sz = elf_dump_thread_status(siginfo->si_signo, ets);
-			info->thread_status_size += sz;
-		}
+		ets = list_entry(t, struct elf_thread_status, list);
+		sz = elf_dump_thread_status(siginfo->si_signo, ets);
+		info->thread_status_size += sz;
 	}
 	/* now collect the dump for the current */
 	memset(info->prstatus, 0, sizeof(*info->prstatus));
@@ -1935,13 +1914,13 @@
 }
 
 static int write_note_info(struct elf_note_info *info,
-			   struct file *file, loff_t *foffset)
+			   struct coredump_params *cprm)
 {
 	int i;
 	struct list_head *t;
 
 	for (i = 0; i < info->numnote; i++)
-		if (!writenote(info->notes + i, file, foffset))
+		if (!writenote(info->notes + i, cprm))
 			return 0;
 
 	/* write out the thread status notes section */
@@ -1950,7 +1929,7 @@
 				list_entry(t, struct elf_thread_status, list);
 
 		for (i = 0; i < tmp->num_notes; i++)
-			if (!writenote(&tmp->notes[i], file, foffset))
+			if (!writenote(&tmp->notes[i], cprm))
 				return 0;
 	}
 
@@ -2046,10 +2025,9 @@
 	int has_dumped = 0;
 	mm_segment_t fs;
 	int segs;
-	size_t size = 0;
 	struct vm_area_struct *vma, *gate_vma;
 	struct elfhdr *elf = NULL;
-	loff_t offset = 0, dataoff, foffset;
+	loff_t offset = 0, dataoff;
 	struct elf_note_info info = { };
 	struct elf_phdr *phdr4note = NULL;
 	struct elf_shdr *shdr4extnum = NULL;
@@ -2105,7 +2083,6 @@
 
 	offset += sizeof(*elf);				/* Elf header */
 	offset += segs * sizeof(struct elf_phdr);	/* Program headers */
-	foffset = offset;
 
 	/* Write notes phdr entry */
 	{
@@ -2136,13 +2113,10 @@
 
 	offset = dataoff;
 
-	size += sizeof(*elf);
-	if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+	if (!dump_emit(cprm, elf, sizeof(*elf)))
 		goto end_coredump;
 
-	size += sizeof(*phdr4note);
-	if (size > cprm->limit
-	    || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+	if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
 		goto end_coredump;
 
 	/* Write program headers for segments dump */
@@ -2164,24 +2138,22 @@
 			phdr.p_flags |= PF_X;
 		phdr.p_align = ELF_EXEC_PAGESIZE;
 
-		size += sizeof(phdr);
-		if (size > cprm->limit
-		    || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+		if (!dump_emit(cprm, &phdr, sizeof(phdr)))
 			goto end_coredump;
 	}
 
-	if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
+	if (!elf_core_write_extra_phdrs(cprm, offset))
 		goto end_coredump;
 
  	/* write out the notes section */
-	if (!write_note_info(&info, cprm->file, &foffset))
+	if (!write_note_info(&info, cprm))
 		goto end_coredump;
 
-	if (elf_coredump_extra_notes_write(cprm->file, &foffset))
+	if (elf_coredump_extra_notes_write(cprm))
 		goto end_coredump;
 
 	/* Align to page */
-	if (!dump_seek(cprm->file, dataoff - foffset))
+	if (!dump_skip(cprm, dataoff - cprm->written))
 		goto end_coredump;
 
 	for (vma = first_vma(current, gate_vma); vma != NULL;
@@ -2198,26 +2170,21 @@
 			page = get_dump_page(addr);
 			if (page) {
 				void *kaddr = kmap(page);
-				stop = ((size += PAGE_SIZE) > cprm->limit) ||
-					!dump_write(cprm->file, kaddr,
-						    PAGE_SIZE);
+				stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
 				kunmap(page);
 				page_cache_release(page);
 			} else
-				stop = !dump_seek(cprm->file, PAGE_SIZE);
+				stop = !dump_skip(cprm, PAGE_SIZE);
 			if (stop)
 				goto end_coredump;
 		}
 	}
 
-	if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
+	if (!elf_core_write_extra_data(cprm))
 		goto end_coredump;
 
 	if (e_phnum == PN_XNUM) {
-		size += sizeof(*shdr4extnum);
-		if (size > cprm->limit
-		    || !dump_write(cprm->file, shdr4extnum,
-				   sizeof(*shdr4extnum)))
+		if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
 			goto end_coredump;
 	}
 
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index c166f32..fe2a643 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -111,7 +111,7 @@
 		return 0;
 	if (!elf_check_arch(hdr) || !elf_check_fdpic(hdr))
 		return 0;
-	if (!file->f_op || !file->f_op->mmap)
+	if (!file->f_op->mmap)
 		return 0;
 	return 1;
 }
@@ -1267,35 +1267,17 @@
 
 /* #define DEBUG */
 
-#define DUMP_WRITE(addr, nr, foffset)	\
-	do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
-
-static int alignfile(struct file *file, loff_t *foffset)
-{
-	static const char buf[4] = { 0, };
-	DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
-	return 1;
-}
-
-static int writenote(struct memelfnote *men, struct file *file,
-			loff_t *foffset)
+static int writenote(struct memelfnote *men, struct coredump_params *cprm)
 {
 	struct elf_note en;
 	en.n_namesz = strlen(men->name) + 1;
 	en.n_descsz = men->datasz;
 	en.n_type = men->type;
 
-	DUMP_WRITE(&en, sizeof(en), foffset);
-	DUMP_WRITE(men->name, en.n_namesz, foffset);
-	if (!alignfile(file, foffset))
-		return 0;
-	DUMP_WRITE(men->data, men->datasz, foffset);
-	if (!alignfile(file, foffset))
-		return 0;
-
-	return 1;
+	return dump_emit(cprm, &en, sizeof(en)) &&
+		dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
+		dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
 }
-#undef DUMP_WRITE
 
 static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
 {
@@ -1500,66 +1482,40 @@
 /*
  * dump the segments for an MMU process
  */
-#ifdef CONFIG_MMU
-static int elf_fdpic_dump_segments(struct file *file, size_t *size,
-			   unsigned long *limit, unsigned long mm_flags)
+static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
 {
 	struct vm_area_struct *vma;
-	int err = 0;
 
 	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
 		unsigned long addr;
 
-		if (!maydump(vma, mm_flags))
+		if (!maydump(vma, cprm->mm_flags))
 			continue;
 
+#ifdef CONFIG_MMU
 		for (addr = vma->vm_start; addr < vma->vm_end;
 							addr += PAGE_SIZE) {
+			bool res;
 			struct page *page = get_dump_page(addr);
 			if (page) {
 				void *kaddr = kmap(page);
-				*size += PAGE_SIZE;
-				if (*size > *limit)
-					err = -EFBIG;
-				else if (!dump_write(file, kaddr, PAGE_SIZE))
-					err = -EIO;
+				res = dump_emit(cprm, kaddr, PAGE_SIZE);
 				kunmap(page);
 				page_cache_release(page);
-			} else if (!dump_seek(file, PAGE_SIZE))
-				err = -EFBIG;
-			if (err)
-				goto out;
+			} else {
+				res = dump_skip(cprm, PAGE_SIZE);
+			}
+			if (!res)
+				return false;
 		}
-	}
-out:
-	return err;
-}
-#endif
-
-/*
- * dump the segments for a NOMMU process
- */
-#ifndef CONFIG_MMU
-static int elf_fdpic_dump_segments(struct file *file, size_t *size,
-			   unsigned long *limit, unsigned long mm_flags)
-{
-	struct vm_area_struct *vma;
-
-	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
-		if (!maydump(vma, mm_flags))
-			continue;
-
-		if ((*size += PAGE_SIZE) > *limit)
-			return -EFBIG;
-
-		if (!dump_write(file, (void *) vma->vm_start,
+#else
+		if (!dump_emit(cprm, (void *) vma->vm_start,
 				vma->vm_end - vma->vm_start))
-			return -EIO;
-	}
-
-	return 0;
-}
+			return false;
 #endif
+	}
+	return true;
+}
 
 static size_t elf_core_vma_data_size(unsigned long mm_flags)
 {
@@ -1585,11 +1541,10 @@
 	int has_dumped = 0;
 	mm_segment_t fs;
 	int segs;
-	size_t size = 0;
 	int i;
 	struct vm_area_struct *vma;
 	struct elfhdr *elf = NULL;
-	loff_t offset = 0, dataoff, foffset;
+	loff_t offset = 0, dataoff;
 	int numnote;
 	struct memelfnote *notes = NULL;
 	struct elf_prstatus *prstatus = NULL;	/* NT_PRSTATUS */
@@ -1606,6 +1561,8 @@
 	struct elf_shdr *shdr4extnum = NULL;
 	Elf_Half e_phnum;
 	elf_addr_t e_shoff;
+	struct core_thread *ct;
+	struct elf_thread_status *tmp;
 
 	/*
 	 * We no longer stop all VM operations.
@@ -1641,28 +1598,23 @@
 		goto cleanup;
 #endif
 
-	if (cprm->siginfo->si_signo) {
-		struct core_thread *ct;
+	for (ct = current->mm->core_state->dumper.next;
+					ct; ct = ct->next) {
+		tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+		if (!tmp)
+			goto cleanup;
+
+		tmp->thread = ct->task;
+		list_add(&tmp->list, &thread_list);
+	}
+
+	list_for_each(t, &thread_list) {
 		struct elf_thread_status *tmp;
+		int sz;
 
-		for (ct = current->mm->core_state->dumper.next;
-						ct; ct = ct->next) {
-			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
-			if (!tmp)
-				goto cleanup;
-
-			tmp->thread = ct->task;
-			list_add(&tmp->list, &thread_list);
-		}
-
-		list_for_each(t, &thread_list) {
-			struct elf_thread_status *tmp;
-			int sz;
-
-			tmp = list_entry(t, struct elf_thread_status, list);
-			sz = elf_dump_thread_status(cprm->siginfo->si_signo, tmp);
-			thread_status_size += sz;
-		}
+		tmp = list_entry(t, struct elf_thread_status, list);
+		sz = elf_dump_thread_status(cprm->siginfo->si_signo, tmp);
+		thread_status_size += sz;
 	}
 
 	/* now collect the dump for the current */
@@ -1720,7 +1672,6 @@
 
 	offset += sizeof(*elf);				/* Elf header */
 	offset += segs * sizeof(struct elf_phdr);	/* Program headers */
-	foffset = offset;
 
 	/* Write notes phdr entry */
 	{
@@ -1755,13 +1706,10 @@
 
 	offset = dataoff;
 
-	size += sizeof(*elf);
-	if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+	if (!dump_emit(cprm, elf, sizeof(*elf)))
 		goto end_coredump;
 
-	size += sizeof(*phdr4note);
-	if (size > cprm->limit
-	    || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+	if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
 		goto end_coredump;
 
 	/* write program headers for segments dump */
@@ -1785,18 +1733,16 @@
 			phdr.p_flags |= PF_X;
 		phdr.p_align = ELF_EXEC_PAGESIZE;
 
-		size += sizeof(phdr);
-		if (size > cprm->limit
-		    || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+		if (!dump_emit(cprm, &phdr, sizeof(phdr)))
 			goto end_coredump;
 	}
 
-	if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
+	if (!elf_core_write_extra_phdrs(cprm, offset))
 		goto end_coredump;
 
  	/* write out the notes section */
 	for (i = 0; i < numnote; i++)
-		if (!writenote(notes + i, cprm->file, &foffset))
+		if (!writenote(notes + i, cprm))
 			goto end_coredump;
 
 	/* write out the thread status notes section */
@@ -1805,25 +1751,21 @@
 				list_entry(t, struct elf_thread_status, list);
 
 		for (i = 0; i < tmp->num_notes; i++)
-			if (!writenote(&tmp->notes[i], cprm->file, &foffset))
+			if (!writenote(&tmp->notes[i], cprm))
 				goto end_coredump;
 	}
 
-	if (!dump_seek(cprm->file, dataoff - foffset))
+	if (!dump_skip(cprm, dataoff - cprm->written))
 		goto end_coredump;
 
-	if (elf_fdpic_dump_segments(cprm->file, &size, &cprm->limit,
-				    cprm->mm_flags) < 0)
+	if (!elf_fdpic_dump_segments(cprm))
 		goto end_coredump;
 
-	if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
+	if (!elf_core_write_extra_data(cprm))
 		goto end_coredump;
 
 	if (e_phnum == PN_XNUM) {
-		size += sizeof(*shdr4extnum);
-		if (size > cprm->limit
-		    || !dump_write(cprm->file, shdr4extnum,
-				   sizeof(*shdr4extnum)))
+		if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
 			goto end_coredump;
 	}
 
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
index 037a3e2..f37b08c 100644
--- a/fs/binfmt_em86.c
+++ b/fs/binfmt_em86.c
@@ -38,7 +38,7 @@
 	/* First of all, some simple consistency checks */
 	if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
 		(!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) ||
-		(!bprm->file->f_op || !bprm->file->f_op->mmap)) {
+		!bprm->file->f_op->mmap) {
 			return -ENOEXEC;
 	}
 
diff --git a/fs/bio.c b/fs/bio.c
index ea5035d..2bdb4e2 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1805,6 +1805,52 @@
 EXPORT_SYMBOL(bio_split);
 
 /**
+ * bio_trim - trim a bio
+ * @bio:	bio to trim
+ * @offset:	number of sectors to trim from the front of @bio
+ * @size:	size we want to trim @bio to, in sectors
+ */
+void bio_trim(struct bio *bio, int offset, int size)
+{
+	/* 'bio' is a cloned bio which we need to trim to match
+	 * the given offset and size.
+	 * This requires adjusting bi_sector, bi_size, and bi_io_vec
+	 */
+	int i;
+	struct bio_vec *bvec;
+	int sofar = 0;
+
+	size <<= 9;
+	if (offset == 0 && size == bio->bi_size)
+		return;
+
+	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
+	bio_advance(bio, offset << 9);
+
+	bio->bi_size = size;
+
+	/* avoid any complications with bi_idx being non-zero*/
+	if (bio->bi_idx) {
+		memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
+			(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
+		bio->bi_vcnt -= bio->bi_idx;
+		bio->bi_idx = 0;
+	}
+	/* Make sure vcnt and last bv are not too big */
+	bio_for_each_segment(bvec, bio, i) {
+		if (sofar + bvec->bv_len > size)
+			bvec->bv_len = size - sofar;
+		if (bvec->bv_len == 0) {
+			bio->bi_vcnt = i;
+			break;
+		}
+		sofar += bvec->bv_len;
+	}
+}
+EXPORT_SYMBOL_GPL(bio_trim);
+
+/**
  *      bio_sector_offset - Find hardware sector offset in bio
  *      @bio:           bio to inspect
  *      @index:         bio_vec index
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 398cbd5..f9d5094 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -59,7 +59,8 @@
 	help
 	  This will run some basic sanity tests on the free space cache
 	  code to make sure it is acting as it should.  These are mostly
-	  regression tests and are only really interesting to btrfs devlopers.
+	  regression tests and are only really interesting to btrfs
+	  developers.
 
 	  If unsure, say N.
 
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index a91a6a3..1a44e42 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -14,4 +14,6 @@
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
 btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
 
-btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o
+btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
+	tests/extent-buffer-tests.o tests/btrfs-tests.o \
+	tests/extent-io-tests.o tests/inode-tests.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index e15d2b0..0890c83 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -229,7 +229,7 @@
 		if (ret > 0) {
 			/* we need an acl */
 			ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
-		} else {
+		} else if (ret < 0) {
 			cache_no_acl(inode);
 		}
 	} else {
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 08cc08f..8aec751 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -262,7 +262,7 @@
 	struct btrfs_work *work = NULL;
 	struct list_head *cur = NULL;
 
-	if(!list_empty(prio_head))
+	if (!list_empty(prio_head))
 		cur = prio_head->next;
 
 	smp_mb();
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0552a59..3775947 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -185,6 +185,9 @@
 {
 	struct __prelim_ref *ref;
 
+	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
+		return 0;
+
 	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 	if (!ref)
 		return -ENOMEM;
@@ -323,8 +326,7 @@
 
 	eb = path->nodes[level];
 	while (!eb) {
-		if (!level) {
-			WARN_ON(1);
+		if (WARN_ON(!level)) {
 			ret = 1;
 			goto out;
 		}
@@ -1619,7 +1621,7 @@
 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
 		btrfs_release_path(path);
 
-		item = btrfs_item_nr(eb, slot);
+		item = btrfs_item_nr(slot);
 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
 
 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 71f074e..ac0b39d 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -19,6 +19,7 @@
 #ifndef __BTRFS_I__
 #define __BTRFS_I__
 
+#include <linux/hash.h>
 #include "extent_map.h"
 #include "extent_io.h"
 #include "ordered-data.h"
@@ -179,6 +180,25 @@
 	return container_of(inode, struct btrfs_inode, vfs_inode);
 }
 
+static inline unsigned long btrfs_inode_hash(u64 objectid,
+					     const struct btrfs_root *root)
+{
+	u64 h = objectid ^ (root->objectid * GOLDEN_RATIO_PRIME);
+
+#if BITS_PER_LONG == 32
+	h = (h >> 32) ^ (h & 0xffffffff);
+#endif
+
+	return (unsigned long)h;
+}
+
+static inline void btrfs_insert_inode_hash(struct inode *inode)
+{
+	unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root);
+
+	__insert_inode_hash(inode, h);
+}
+
 static inline u64 btrfs_ino(struct inode *inode)
 {
 	u64 ino = BTRFS_I(inode)->location.objectid;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 1c47be1..e0aab44 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1038,7 +1038,7 @@
 						     disk_item_offset,
 						     sizeof(struct btrfs_item));
 			item_offset = btrfs_stack_item_offset(&disk_item);
-			item_size = btrfs_stack_item_offset(&disk_item);
+			item_size = btrfs_stack_item_size(&disk_item);
 			disk_key = &disk_item.key;
 			type = btrfs_disk_key_type(disk_key);
 
@@ -1900,7 +1900,9 @@
 							       dev_state,
 							       dev_bytenr);
 			}
-			if (block->logical_bytenr != bytenr) {
+			if (block->logical_bytenr != bytenr &&
+			    !(!block->is_metadata &&
+			      block->logical_bytenr == 0))
 				printk(KERN_INFO
 				       "Written block @%llu (%s/%llu/%d)"
 				       " found in hash table, %c,"
@@ -1910,15 +1912,14 @@
 				       block->mirror_num,
 				       btrfsic_get_block_type(state, block),
 				       block->logical_bytenr);
-				block->logical_bytenr = bytenr;
-			} else if (state->print_mask &
-				   BTRFSIC_PRINT_MASK_VERBOSE)
+			else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
 				printk(KERN_INFO
 				       "Written block @%llu (%s/%llu/%d)"
 				       " found in hash table, %c.\n",
 				       bytenr, dev_state->name, dev_bytenr,
 				       block->mirror_num,
 				       btrfsic_get_block_type(state, block));
+			block->logical_bytenr = bytenr;
 		} else {
 			if (num_pages * PAGE_CACHE_SIZE <
 			    state->datablock_size) {
@@ -2463,10 +2464,8 @@
 		}
 	}
 
-	if (-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)) {
-		WARN_ON(1);
+	if (WARN_ON(-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)))
 		btrfsic_dump_tree(state);
-	}
 
 	return 0;
 }
@@ -2906,7 +2905,7 @@
 		btrfsic_release_block_ctx(&block_ctx);
 	}
 
-	if (!match) {
+	if (WARN_ON(!match)) {
 		printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio,"
 		       " buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
 		       " phys_bytenr=%llu)!\n",
@@ -2923,7 +2922,6 @@
 			       bytenr, block_ctx.dev->name,
 			       block_ctx.dev_bytenr, mirror_num);
 		}
-		WARN_ON(1);
 	}
 }
 
diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h
deleted file mode 100644
index 7c4503e..0000000
--- a/fs/btrfs/compat.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _COMPAT_H_
-#define _COMPAT_H_
-
-#define btrfs_drop_nlink(inode) drop_nlink(inode)
-#define btrfs_inc_nlink(inode)	inc_nlink(inode)
-
-#endif /* _COMPAT_H_ */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 6aad98c..1499b27 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -32,7 +32,6 @@
 #include <linux/writeback.h>
 #include <linux/bit_spinlock.h>
 #include <linux/slab.h>
-#include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -360,7 +359,7 @@
 	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 
 	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
-	if(!bio) {
+	if (!bio) {
 		kfree(cb);
 		return -ENOMEM;
 	}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 61b5bcd..316136b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -274,7 +274,7 @@
 	else
 		btrfs_set_header_owner(cow, new_root_objectid);
 
-	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(cow),
+	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
 			    BTRFS_FSID_SIZE);
 
 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
@@ -996,7 +996,7 @@
 	else
 		btrfs_set_header_owner(cow, root->root_key.objectid);
 
-	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(cow),
+	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
 			    BTRFS_FSID_SIZE);
 
 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
@@ -1285,11 +1285,10 @@
 		free_extent_buffer(eb_root);
 		blocksize = btrfs_level_size(root, old_root->level);
 		old = read_tree_block(root, logical, blocksize, 0);
-		if (!old || !extent_buffer_uptodate(old)) {
+		if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
 			free_extent_buffer(old);
 			pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
 				logical);
-			WARN_ON(1);
 		} else {
 			eb = btrfs_clone_extent_buffer(old);
 			free_extent_buffer(old);
@@ -2758,7 +2757,7 @@
 	int level;
 	int lowest_unlock = 1;
 	u8 lowest_level = 0;
-	int prev_cmp;
+	int prev_cmp = -1;
 
 	lowest_level = p->lowest_level;
 	WARN_ON(p->nodes[0] != NULL);
@@ -2769,7 +2768,6 @@
 	}
 
 again:
-	prev_cmp = -1;
 	b = get_old_root(root, time_seq);
 	level = btrfs_header_level(b);
 	p->locks[level] = BTRFS_READ_LOCK;
@@ -2787,6 +2785,11 @@
 		 */
 		btrfs_unlock_up_safe(p, level + 1);
 
+		/*
+		 * Since we can unwind eb's we want to do a real search every
+		 * time.
+		 */
+		prev_cmp = -1;
 		ret = key_search(b, key, level, &prev_cmp, &slot);
 
 		if (level != 0) {
@@ -3148,7 +3151,7 @@
 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(c, root->root_key.objectid);
 
-	write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(c),
+	write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
 			    BTRFS_FSID_SIZE);
 
 	write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
@@ -3287,7 +3290,7 @@
 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(split, root->root_key.objectid);
 	write_extent_buffer(split, root->fs_info->fsid,
-			    btrfs_header_fsid(split), BTRFS_FSID_SIZE);
+			    btrfs_header_fsid(), BTRFS_FSID_SIZE);
 	write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
 			    btrfs_header_chunk_tree_uuid(split),
 			    BTRFS_UUID_SIZE);
@@ -3337,8 +3340,8 @@
 	if (!nr)
 		return 0;
 	btrfs_init_map_token(&token);
-	start_item = btrfs_item_nr(l, start);
-	end_item = btrfs_item_nr(l, end);
+	start_item = btrfs_item_nr(start);
+	end_item = btrfs_item_nr(end);
 	data_len = btrfs_token_item_offset(l, start_item, &token) +
 		btrfs_token_item_size(l, start_item, &token);
 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
@@ -3406,7 +3409,7 @@
 	slot = path->slots[1];
 	i = left_nritems - 1;
 	while (i >= nr) {
-		item = btrfs_item_nr(left, i);
+		item = btrfs_item_nr(i);
 
 		if (!empty && push_items > 0) {
 			if (path->slots[0] > i)
@@ -3470,7 +3473,7 @@
 	btrfs_set_header_nritems(right, right_nritems);
 	push_space = BTRFS_LEAF_DATA_SIZE(root);
 	for (i = 0; i < right_nritems; i++) {
-		item = btrfs_item_nr(right, i);
+		item = btrfs_item_nr(i);
 		push_space -= btrfs_token_item_size(right, item, &token);
 		btrfs_set_token_item_offset(right, item, push_space, &token);
 	}
@@ -3612,7 +3615,7 @@
 		nr = min(right_nritems - 1, max_slot);
 
 	for (i = 0; i < nr; i++) {
-		item = btrfs_item_nr(right, i);
+		item = btrfs_item_nr(i);
 
 		if (!empty && push_items > 0) {
 			if (path->slots[0] < i)
@@ -3639,8 +3642,7 @@
 		ret = 1;
 		goto out;
 	}
-	if (!empty && push_items == btrfs_header_nritems(right))
-		WARN_ON(1);
+	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
 
 	/* push data from right to left */
 	copy_extent_buffer(left, right,
@@ -3663,7 +3665,7 @@
 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
 		u32 ioff;
 
-		item = btrfs_item_nr(left, i);
+		item = btrfs_item_nr(i);
 
 		ioff = btrfs_token_item_offset(left, item, &token);
 		btrfs_set_token_item_offset(left, item,
@@ -3694,7 +3696,7 @@
 	btrfs_set_header_nritems(right, right_nritems);
 	push_space = BTRFS_LEAF_DATA_SIZE(root);
 	for (i = 0; i < right_nritems; i++) {
-		item = btrfs_item_nr(right, i);
+		item = btrfs_item_nr(i);
 
 		push_space = push_space - btrfs_token_item_size(right,
 								item, &token);
@@ -3835,7 +3837,7 @@
 		      btrfs_item_end_nr(l, mid);
 
 	for (i = 0; i < nritems; i++) {
-		struct btrfs_item *item = btrfs_item_nr(right, i);
+		struct btrfs_item *item = btrfs_item_nr(i);
 		u32 ioff;
 
 		ioff = btrfs_token_item_offset(right, item, &token);
@@ -4016,7 +4018,7 @@
 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
 					if (data_size && !tried_avoid_double)
 						goto push_for_double;
-					split = 2 ;
+					split = 2;
 				}
 			}
 		}
@@ -4042,7 +4044,7 @@
 	btrfs_set_header_owner(right, root->root_key.objectid);
 	btrfs_set_header_level(right, 0);
 	write_extent_buffer(right, root->fs_info->fsid,
-			    btrfs_header_fsid(right), BTRFS_FSID_SIZE);
+			    btrfs_header_fsid(), BTRFS_FSID_SIZE);
 
 	write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
 			    btrfs_header_chunk_tree_uuid(right),
@@ -4177,7 +4179,7 @@
 
 	btrfs_set_path_blocking(path);
 
-	item = btrfs_item_nr(leaf, path->slots[0]);
+	item = btrfs_item_nr(path->slots[0]);
 	orig_offset = btrfs_item_offset(leaf, item);
 	item_size = btrfs_item_size(leaf, item);
 
@@ -4200,7 +4202,7 @@
 	btrfs_cpu_key_to_disk(&disk_key, new_key);
 	btrfs_set_item_key(leaf, &disk_key, slot);
 
-	new_item = btrfs_item_nr(leaf, slot);
+	new_item = btrfs_item_nr(slot);
 
 	btrfs_set_item_offset(leaf, new_item, orig_offset);
 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
@@ -4339,7 +4341,7 @@
 	/* first correct the data pointers */
 	for (i = slot; i < nritems; i++) {
 		u32 ioff;
-		item = btrfs_item_nr(leaf, i);
+		item = btrfs_item_nr(i);
 
 		ioff = btrfs_token_item_offset(leaf, item, &token);
 		btrfs_set_token_item_offset(leaf, item,
@@ -4387,7 +4389,7 @@
 			fixup_low_keys(root, path, &disk_key, 1);
 	}
 
-	item = btrfs_item_nr(leaf, slot);
+	item = btrfs_item_nr(slot);
 	btrfs_set_item_size(leaf, item, new_size);
 	btrfs_mark_buffer_dirty(leaf);
 
@@ -4441,7 +4443,7 @@
 	/* first correct the data pointers */
 	for (i = slot; i < nritems; i++) {
 		u32 ioff;
-		item = btrfs_item_nr(leaf, i);
+		item = btrfs_item_nr(i);
 
 		ioff = btrfs_token_item_offset(leaf, item, &token);
 		btrfs_set_token_item_offset(leaf, item,
@@ -4455,7 +4457,7 @@
 
 	data_end = old_data;
 	old_size = btrfs_item_size_nr(leaf, slot);
-	item = btrfs_item_nr(leaf, slot);
+	item = btrfs_item_nr(slot);
 	btrfs_set_item_size(leaf, item, old_size + data_size);
 	btrfs_mark_buffer_dirty(leaf);
 
@@ -4514,7 +4516,7 @@
 		for (i = slot; i < nritems; i++) {
 			u32 ioff;
 
-			item = btrfs_item_nr(leaf, i);
+			item = btrfs_item_nr( i);
 			ioff = btrfs_token_item_offset(leaf, item, &token);
 			btrfs_set_token_item_offset(leaf, item,
 						    ioff - total_data, &token);
@@ -4535,7 +4537,7 @@
 	for (i = 0; i < nr; i++) {
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
 		btrfs_set_item_key(leaf, &disk_key, slot + i);
-		item = btrfs_item_nr(leaf, slot + i);
+		item = btrfs_item_nr(slot + i);
 		btrfs_set_token_item_offset(leaf, item,
 					    data_end - data_size[i], &token);
 		data_end -= data_size[i];
@@ -4730,7 +4732,7 @@
 		for (i = slot + nr; i < nritems; i++) {
 			u32 ioff;
 
-			item = btrfs_item_nr(leaf, i);
+			item = btrfs_item_nr(i);
 			ioff = btrfs_token_item_offset(leaf, item, &token);
 			btrfs_set_token_item_offset(leaf, item,
 						    ioff + dsize, &token);
@@ -4823,14 +4825,18 @@
 
 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
 
-	if (key.offset > 0)
+	if (key.offset > 0) {
 		key.offset--;
-	else if (key.type > 0)
+	} else if (key.type > 0) {
 		key.type--;
-	else if (key.objectid > 0)
+		key.offset = (u64)-1;
+	} else if (key.objectid > 0) {
 		key.objectid--;
-	else
+		key.type = (u8)-1;
+		key.offset = (u64)-1;
+	} else {
 		return 1;
+	}
 
 	btrfs_release_path(path);
 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -4866,7 +4872,6 @@
  * was nothing in the tree that matched the search criteria.
  */
 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
-			 struct btrfs_key *max_key,
 			 struct btrfs_path *path,
 			 u64 min_trans)
 {
@@ -4911,10 +4916,8 @@
 		 * If it is too old, old, skip to the next one.
 		 */
 		while (slot < nritems) {
-			u64 blockptr;
 			u64 gen;
 
-			blockptr = btrfs_node_blockptr(cur, slot);
 			gen = btrfs_node_ptr_generation(cur, slot);
 			if (gen < min_trans) {
 				slot++;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0506f40..f9aeb27 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -47,6 +47,12 @@
 extern struct kmem_cache *btrfs_free_space_cachep;
 struct btrfs_ordered_sum;
 
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+#define STATIC noinline
+#else
+#define STATIC static noinline
+#endif
+
 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
 
 #define BTRFS_MAX_MIRRORS 3
@@ -1580,7 +1586,6 @@
 	atomic_t scrubs_paused;
 	atomic_t scrub_cancel_req;
 	wait_queue_head_t scrub_pause_wait;
-	struct rw_semaphore scrub_super_lock;
 	int scrub_workers_refcnt;
 	struct btrfs_workers scrub_workers;
 	struct btrfs_workers scrub_wr_completion_workers;
@@ -1724,7 +1729,9 @@
 	int ref_cows;
 	int track_dirty;
 	int in_radix;
-
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+	int dummy_root;
+#endif
 	u64 defrag_trans_start;
 	struct btrfs_key defrag_progress;
 	struct btrfs_key defrag_max;
@@ -2461,8 +2468,7 @@
 		sizeof(struct btrfs_item) * nr;
 }
 
-static inline struct btrfs_item *btrfs_item_nr(struct extent_buffer *eb,
-					       int nr)
+static inline struct btrfs_item *btrfs_item_nr(int nr)
 {
 	return (struct btrfs_item *)btrfs_item_nr_offset(nr);
 }
@@ -2475,30 +2481,30 @@
 
 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr)
 {
-	return btrfs_item_end(eb, btrfs_item_nr(eb, nr));
+	return btrfs_item_end(eb, btrfs_item_nr(nr));
 }
 
 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr)
 {
-	return btrfs_item_offset(eb, btrfs_item_nr(eb, nr));
+	return btrfs_item_offset(eb, btrfs_item_nr(nr));
 }
 
 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr)
 {
-	return btrfs_item_size(eb, btrfs_item_nr(eb, nr));
+	return btrfs_item_size(eb, btrfs_item_nr(nr));
 }
 
 static inline void btrfs_item_key(struct extent_buffer *eb,
 			   struct btrfs_disk_key *disk_key, int nr)
 {
-	struct btrfs_item *item = btrfs_item_nr(eb, nr);
+	struct btrfs_item *item = btrfs_item_nr(nr);
 	read_eb_member(eb, item, struct btrfs_item, key, disk_key);
 }
 
 static inline void btrfs_set_item_key(struct extent_buffer *eb,
 			       struct btrfs_disk_key *disk_key, int nr)
 {
-	struct btrfs_item *item = btrfs_item_nr(eb, nr);
+	struct btrfs_item *item = btrfs_item_nr(nr);
 	write_eb_member(eb, item, struct btrfs_item, key, disk_key);
 }
 
@@ -2666,7 +2672,7 @@
 	btrfs_set_header_flags(eb, flags);
 }
 
-static inline unsigned long btrfs_header_fsid(struct extent_buffer *eb)
+static inline unsigned long btrfs_header_fsid(void)
 {
 	return offsetof(struct btrfs_header, fsid);
 }
@@ -3105,11 +3111,6 @@
 	((unsigned long)(btrfs_leaf_data(leaf) + \
 	btrfs_item_offset_nr(leaf, slot)))
 
-static inline struct dentry *fdentry(struct file *file)
-{
-	return file->f_path.dentry;
-}
-
 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
 {
 	return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
@@ -3308,7 +3309,6 @@
 			struct btrfs_key *key, int lowest_level,
 			u64 min_trans);
 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
-			 struct btrfs_key *max_key,
 			 struct btrfs_path *path,
 			 u64 min_trans);
 enum btrfs_compare_tree_result {
@@ -3675,8 +3675,7 @@
 			       u32 min_type);
 
 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
-int btrfs_start_all_delalloc_inodes(struct btrfs_fs_info *fs_info,
-				    int delay_iput);
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 			      struct extent_state **cached_state);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
@@ -3944,9 +3943,7 @@
 		    u64 end, struct btrfs_scrub_progress *progress,
 		    int readonly, int is_dev_replace);
 void btrfs_scrub_pause(struct btrfs_root *root);
-void btrfs_scrub_pause_super(struct btrfs_root *root);
 void btrfs_scrub_continue(struct btrfs_root *root);
-void btrfs_scrub_continue_super(struct btrfs_root *root);
 int btrfs_scrub_cancel(struct btrfs_fs_info *info);
 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
 			   struct btrfs_device *dev);
@@ -4028,5 +4025,9 @@
 	return signal_pending(current);
 }
 
+/* Sanity test specific functions */
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+void btrfs_test_destroy_inode(struct inode *inode);
+#endif
 
 #endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index cbd9523..8d292fb 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -108,8 +108,8 @@
 			return node;
 		}
 		btrfs_inode->delayed_node = node;
-		atomic_inc(&node->refs);	/* can be accessed */
-		atomic_inc(&node->refs);	/* cached in the inode */
+		/* can be accessed and cached in the inode */
+		atomic_add(2, &node->refs);
 		spin_unlock(&root->inode_lock);
 		return node;
 	}
@@ -138,8 +138,8 @@
 		return ERR_PTR(-ENOMEM);
 	btrfs_init_delayed_node(node, root, ino);
 
-	atomic_inc(&node->refs);	/* cached in the btrfs inode */
-	atomic_inc(&node->refs);	/* can be accessed */
+	/* cached in the btrfs inode and can be accessed */
+	atomic_add(2, &node->refs);
 
 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
 	if (ret) {
@@ -649,14 +649,13 @@
 			goto out;
 
 		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
-		if (!ret)
+		if (!WARN_ON(ret))
 			goto out;
 
 		/*
 		 * Ok this is a problem, let's just steal from the global rsv
 		 * since this really shouldn't happen that often.
 		 */
-		WARN_ON(1);
 		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
 					      dst_rsv, num_bytes);
 		goto out;
@@ -771,13 +770,13 @@
 	 */
 	btrfs_set_path_blocking(path);
 
-	keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
+	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 	if (!keys) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
+	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 	if (!data_size) {
 		ret = -ENOMEM;
 		goto error;
@@ -1174,8 +1173,10 @@
 	mutex_unlock(&delayed_node->mutex);
 
 	path = btrfs_alloc_path();
-	if (!path)
+	if (!path) {
+		btrfs_release_delayed_node(delayed_node);
 		return -ENOMEM;
+	}
 	path->leave_spinning = 1;
 
 	block_rsv = trans->block_rsv;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 9efb94e..342f9fd 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -26,7 +26,6 @@
 #include <linux/kthread.h>
 #include <linux/math64.h>
 #include <asm/div64.h>
-#include "compat.h"
 #include "ctree.h"
 #include "extent_map.h"
 #include "disk-io.h"
@@ -38,7 +37,6 @@
 #include "rcu-string.h"
 #include "dev-replace.h"
 
-static u64 btrfs_get_seconds_since_1970(void);
 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
 				       int scrub_ret);
 static void btrfs_dev_replace_update_device_in_mapping_tree(
@@ -296,13 +294,6 @@
 		dev_replace->cursor_left_last_write_of_item;
 }
 
-static u64 btrfs_get_seconds_since_1970(void)
-{
-	struct timespec t = CURRENT_TIME_SEC;
-
-	return t.tv_sec;
-}
-
 int btrfs_dev_replace_start(struct btrfs_root *root,
 			    struct btrfs_ioctl_dev_replace_args *args)
 {
@@ -390,7 +381,7 @@
 	 * go to the tgtdev as well (refer to btrfs_map_block()).
 	 */
 	dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
-	dev_replace->time_started = btrfs_get_seconds_since_1970();
+	dev_replace->time_started = get_seconds();
 	dev_replace->cursor_left = 0;
 	dev_replace->committed_cursor_left = 0;
 	dev_replace->cursor_left_last_write_of_item = 0;
@@ -400,7 +391,7 @@
 	args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
 	btrfs_dev_replace_unlock(dev_replace);
 
-	btrfs_wait_all_ordered_extents(root->fs_info);
+	btrfs_wait_ordered_roots(root->fs_info, -1);
 
 	/* force writing the updated state information to disk */
 	trans = btrfs_start_transaction(root, 0);
@@ -470,12 +461,12 @@
 	 * flush all outstanding I/O and inode extent mappings before the
 	 * copy operation is declared as being finished
 	 */
-	ret = btrfs_start_all_delalloc_inodes(root->fs_info, 0);
+	ret = btrfs_start_delalloc_roots(root->fs_info, 0);
 	if (ret) {
 		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
 		return ret;
 	}
-	btrfs_wait_all_ordered_extents(root->fs_info);
+	btrfs_wait_ordered_roots(root->fs_info, -1);
 
 	trans = btrfs_start_transaction(root, 0);
 	if (IS_ERR(trans)) {
@@ -493,7 +484,7 @@
 			  : BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED;
 	dev_replace->tgtdev = NULL;
 	dev_replace->srcdev = NULL;
-	dev_replace->time_stopped = btrfs_get_seconds_since_1970();
+	dev_replace->time_stopped = get_seconds();
 	dev_replace->item_needs_writeback = 1;
 
 	if (scrub_ret) {
@@ -650,6 +641,9 @@
 	u64 result;
 	int ret;
 
+	if (fs_info->sb->s_flags & MS_RDONLY)
+		return -EROFS;
+
 	mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
 	btrfs_dev_replace_lock(dev_replace);
 	switch (dev_replace->replace_state) {
@@ -668,7 +662,7 @@
 		break;
 	}
 	dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
-	dev_replace->time_stopped = btrfs_get_seconds_since_1970();
+	dev_replace->time_stopped = get_seconds();
 	dev_replace->item_needs_writeback = 1;
 	btrfs_dev_replace_unlock(dev_replace);
 	btrfs_scrub_cancel(fs_info);
@@ -703,7 +697,7 @@
 	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
 		dev_replace->replace_state =
 			BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
-		dev_replace->time_stopped = btrfs_get_seconds_since_1970();
+		dev_replace->time_stopped = get_seconds();
 		dev_replace->item_needs_writeback = 1;
 		pr_info("btrfs: suspending dev_replace for unmount\n");
 		break;
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 79e594e..c031ea3 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -58,7 +58,7 @@
 		return ERR_PTR(ret);
 	WARN_ON(ret > 0);
 	leaf = path->nodes[0];
-	item = btrfs_item_nr(leaf, path->slots[0]);
+	item = btrfs_item_nr(path->slots[0]);
 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 	BUG_ON(data_size > btrfs_item_size(leaf, item));
 	ptr += btrfs_item_size(leaf, item) - data_size;
@@ -474,8 +474,10 @@
 	}
 
 	/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
-	if (btrfs_dir_data_len(leaf, dir_item) > BTRFS_MAX_XATTR_SIZE(root)) {
-		printk(KERN_CRIT "btrfs: invalid dir item data len: %u\n",
+	if ((btrfs_dir_data_len(leaf, dir_item) +
+	     btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
+		printk(KERN_CRIT "btrfs: invalid dir item name + data len: %u + %u\n",
+		       (unsigned)btrfs_dir_name_len(leaf, dir_item),
 		       (unsigned)btrfs_dir_data_len(leaf, dir_item));
 		return 1;
 	}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 62176ad8..4c4ed0b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -33,7 +33,6 @@
 #include <linux/uuid.h>
 #include <linux/semaphore.h>
 #include <asm/unaligned.h>
-#include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -64,7 +63,6 @@
 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 				      struct btrfs_root *root);
-static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
 					struct extent_io_tree *dirty_pages,
@@ -477,14 +475,8 @@
 	if (page != eb->pages[0])
 		return 0;
 	found_start = btrfs_header_bytenr(eb);
-	if (found_start != start) {
-		WARN_ON(1);
+	if (WARN_ON(found_start != start || !PageUptodate(page)))
 		return 0;
-	}
-	if (!PageUptodate(page)) {
-		WARN_ON(1);
-		return 0;
-	}
 	csum_tree_block(root, eb, 0);
 	return 0;
 }
@@ -496,7 +488,7 @@
 	u8 fsid[BTRFS_UUID_SIZE];
 	int ret = 1;
 
-	read_extent_buffer(eb, fsid, btrfs_header_fsid(eb), BTRFS_FSID_SIZE);
+	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
 	while (fs_devices) {
 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
 			ret = 0;
@@ -1105,8 +1097,7 @@
 {
 	struct inode *btree_inode = root->fs_info->btree_inode;
 	struct extent_buffer *eb;
-	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
-				bytenr, blocksize);
+	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, bytenr);
 	return eb;
 }
 
@@ -1229,14 +1220,18 @@
 	atomic_set(&root->refs, 1);
 	root->log_transid = 0;
 	root->last_log_commit = 0;
-	extent_io_tree_init(&root->dirty_log_pages,
-			     fs_info->btree_inode->i_mapping);
+	if (fs_info)
+		extent_io_tree_init(&root->dirty_log_pages,
+				     fs_info->btree_inode->i_mapping);
 
 	memset(&root->root_key, 0, sizeof(root->root_key));
 	memset(&root->root_item, 0, sizeof(root->root_item));
 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
 	memset(&root->root_kobj, 0, sizeof(root->root_kobj));
-	root->defrag_trans_start = fs_info->generation;
+	if (fs_info)
+		root->defrag_trans_start = fs_info->generation;
+	else
+		root->defrag_trans_start = 0;
 	init_completion(&root->kobj_unregister);
 	root->defrag_running = 0;
 	root->root_key.objectid = objectid;
@@ -1253,6 +1248,22 @@
 	return root;
 }
 
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+/* Should only be used by the testing infrastructure */
+struct btrfs_root *btrfs_alloc_dummy_root(void)
+{
+	struct btrfs_root *root;
+
+	root = btrfs_alloc_root(NULL);
+	if (!root)
+		return ERR_PTR(-ENOMEM);
+	__setup_root(4096, 4096, 4096, 4096, root, NULL, 1);
+	root->dummy_root = 1;
+
+	return root;
+}
+#endif
+
 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 				     struct btrfs_fs_info *fs_info,
 				     u64 objectid)
@@ -1292,7 +1303,7 @@
 	btrfs_set_header_owner(leaf, objectid);
 	root->node = leaf;
 
-	write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(leaf),
+	write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
 			    BTRFS_FSID_SIZE);
 	write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
 			    btrfs_header_chunk_tree_uuid(leaf),
@@ -1379,7 +1390,7 @@
 	root->node = leaf;
 
 	write_extent_buffer(root->node, root->fs_info->fsid,
-			    btrfs_header_fsid(root->node), BTRFS_FSID_SIZE);
+			    btrfs_header_fsid(), BTRFS_FSID_SIZE);
 	btrfs_mark_buffer_dirty(root->node);
 	btrfs_tree_unlock(root->node);
 	return root;
@@ -1780,6 +1791,9 @@
 		wake_up_process(root->fs_info->cleaner_kthread);
 		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
 
+		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
+				      &root->fs_info->fs_state)))
+			btrfs_cleanup_transaction(root);
 		if (!try_to_freeze()) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			if (!kthread_should_stop() &&
@@ -2013,50 +2027,28 @@
 	btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
 }
 
+static void free_root_extent_buffers(struct btrfs_root *root)
+{
+	if (root) {
+		free_extent_buffer(root->node);
+		free_extent_buffer(root->commit_root);
+		root->node = NULL;
+		root->commit_root = NULL;
+	}
+}
+
 /* helper to cleanup tree roots */
 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
 {
-	free_extent_buffer(info->tree_root->node);
-	free_extent_buffer(info->tree_root->commit_root);
-	info->tree_root->node = NULL;
-	info->tree_root->commit_root = NULL;
+	free_root_extent_buffers(info->tree_root);
 
-	if (info->dev_root) {
-		free_extent_buffer(info->dev_root->node);
-		free_extent_buffer(info->dev_root->commit_root);
-		info->dev_root->node = NULL;
-		info->dev_root->commit_root = NULL;
-	}
-	if (info->extent_root) {
-		free_extent_buffer(info->extent_root->node);
-		free_extent_buffer(info->extent_root->commit_root);
-		info->extent_root->node = NULL;
-		info->extent_root->commit_root = NULL;
-	}
-	if (info->csum_root) {
-		free_extent_buffer(info->csum_root->node);
-		free_extent_buffer(info->csum_root->commit_root);
-		info->csum_root->node = NULL;
-		info->csum_root->commit_root = NULL;
-	}
-	if (info->quota_root) {
-		free_extent_buffer(info->quota_root->node);
-		free_extent_buffer(info->quota_root->commit_root);
-		info->quota_root->node = NULL;
-		info->quota_root->commit_root = NULL;
-	}
-	if (info->uuid_root) {
-		free_extent_buffer(info->uuid_root->node);
-		free_extent_buffer(info->uuid_root->commit_root);
-		info->uuid_root->node = NULL;
-		info->uuid_root->commit_root = NULL;
-	}
-	if (chunk_root) {
-		free_extent_buffer(info->chunk_root->node);
-		free_extent_buffer(info->chunk_root->commit_root);
-		info->chunk_root->node = NULL;
-		info->chunk_root->commit_root = NULL;
-	}
+	free_root_extent_buffers(info->dev_root);
+	free_root_extent_buffers(info->extent_root);
+	free_root_extent_buffers(info->csum_root);
+	free_root_extent_buffers(info->quota_root);
+	free_root_extent_buffers(info->uuid_root);
+	if (chunk_root)
+		free_root_extent_buffers(info->chunk_root);
 }
 
 static void del_fs_roots(struct btrfs_fs_info *fs_info)
@@ -2230,7 +2222,6 @@
 	atomic_set(&fs_info->scrubs_paused, 0);
 	atomic_set(&fs_info->scrub_cancel_req, 0);
 	init_waitqueue_head(&fs_info->scrub_pause_wait);
-	init_rwsem(&fs_info->scrub_super_lock);
 	fs_info->scrub_workers_refcnt = 0;
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 	fs_info->check_integrity_print_mask = 0;
@@ -2272,7 +2263,7 @@
 	       sizeof(struct btrfs_key));
 	set_bit(BTRFS_INODE_DUMMY,
 		&BTRFS_I(fs_info->btree_inode)->runtime_flags);
-	insert_inode_hash(fs_info->btree_inode);
+	btrfs_insert_inode_hash(fs_info->btree_inode);
 
 	spin_lock_init(&fs_info->block_group_cache_lock);
 	fs_info->block_group_cache_tree = RB_ROOT;
@@ -2670,6 +2661,7 @@
 
 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
 	tree_root->commit_root = btrfs_root_node(tree_root);
+	btrfs_set_root_refs(&tree_root->root_item, 1);
 
 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
 	location.type = BTRFS_ROOT_ITEM_KEY;
@@ -3448,10 +3440,7 @@
 int write_ctree_super(struct btrfs_trans_handle *trans,
 		      struct btrfs_root *root, int max_mirrors)
 {
-	int ret;
-
-	ret = write_all_supers(root, max_mirrors);
-	return ret;
+	return write_all_supers(root, max_mirrors);
 }
 
 /* Drop a fs root from the radix tree and free it. */
@@ -3614,12 +3603,12 @@
 		       percpu_counter_sum(&fs_info->delalloc_bytes));
 	}
 
+	del_fs_roots(fs_info);
+
 	btrfs_free_block_groups(fs_info);
 
 	btrfs_stop_all_workers(fs_info);
 
-	del_fs_roots(fs_info);
-
 	free_root_pointers(fs_info, 1);
 
 	iput(fs_info->btree_inode);
@@ -3669,10 +3658,20 @@
 
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
 {
-	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
+	struct btrfs_root *root;
 	u64 transid = btrfs_header_generation(buf);
 	int was_dirty;
 
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+	/*
+	 * This is a fast path so only do this check if we have sanity tests
+	 * enabled.  Normal people shouldn't be marking dummy buffers as dirty
+	 * outside of the sanity tests.
+	 */
+	if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
+		return;
+#endif
+	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
 	btrfs_assert_tree_locked(buf);
 	if (transid != root->fs_info->generation)
 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
@@ -3802,7 +3801,8 @@
 	while (!list_empty(&splice)) {
 		root = list_first_entry(&splice, struct btrfs_root,
 					ordered_root);
-		list_del_init(&root->ordered_root);
+		list_move_tail(&root->ordered_root,
+			       &fs_info->ordered_roots);
 
 		btrfs_destroy_ordered_extents(root);
 
@@ -3880,24 +3880,6 @@
 	return ret;
 }
 
-static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
-{
-	struct btrfs_pending_snapshot *snapshot;
-	struct list_head splice;
-
-	INIT_LIST_HEAD(&splice);
-
-	list_splice_init(&t->pending_snapshots, &splice);
-
-	while (!list_empty(&splice)) {
-		snapshot = list_entry(splice.next,
-				      struct btrfs_pending_snapshot,
-				      list);
-		snapshot->error = -ECANCELED;
-		list_del_init(&snapshot->list);
-	}
-}
-
 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
 {
 	struct btrfs_inode *btrfs_inode;
@@ -4027,15 +4009,13 @@
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
 				   struct btrfs_root *root)
 {
+	btrfs_destroy_ordered_operations(cur_trans, root);
+
 	btrfs_destroy_delayed_refs(cur_trans, root);
-	btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
-				cur_trans->dirty_pages.dirty_bytes);
 
 	cur_trans->state = TRANS_STATE_COMMIT_START;
 	wake_up(&root->fs_info->transaction_blocked_wait);
 
-	btrfs_evict_pending_snapshots(cur_trans);
-
 	cur_trans->state = TRANS_STATE_UNBLOCKED;
 	wake_up(&root->fs_info->transaction_wait);
 
@@ -4059,63 +4039,51 @@
 static int btrfs_cleanup_transaction(struct btrfs_root *root)
 {
 	struct btrfs_transaction *t;
-	LIST_HEAD(list);
 
 	mutex_lock(&root->fs_info->transaction_kthread_mutex);
 
 	spin_lock(&root->fs_info->trans_lock);
-	list_splice_init(&root->fs_info->trans_list, &list);
-	root->fs_info->running_transaction = NULL;
-	spin_unlock(&root->fs_info->trans_lock);
+	while (!list_empty(&root->fs_info->trans_list)) {
+		t = list_first_entry(&root->fs_info->trans_list,
+				     struct btrfs_transaction, list);
+		if (t->state >= TRANS_STATE_COMMIT_START) {
+			atomic_inc(&t->use_count);
+			spin_unlock(&root->fs_info->trans_lock);
+			btrfs_wait_for_commit(root, t->transid);
+			btrfs_put_transaction(t);
+			spin_lock(&root->fs_info->trans_lock);
+			continue;
+		}
+		if (t == root->fs_info->running_transaction) {
+			t->state = TRANS_STATE_COMMIT_DOING;
+			spin_unlock(&root->fs_info->trans_lock);
+			/*
+			 * We wait for 0 num_writers since we don't hold a trans
+			 * handle open currently for this transaction.
+			 */
+			wait_event(t->writer_wait,
+				   atomic_read(&t->num_writers) == 0);
+		} else {
+			spin_unlock(&root->fs_info->trans_lock);
+		}
+		btrfs_cleanup_one_transaction(t, root);
 
-	while (!list_empty(&list)) {
-		t = list_entry(list.next, struct btrfs_transaction, list);
-
-		btrfs_destroy_ordered_operations(t, root);
-
-		btrfs_destroy_all_ordered_extents(root->fs_info);
-
-		btrfs_destroy_delayed_refs(t, root);
-
-		/*
-		 *  FIXME: cleanup wait for commit
-		 *  We needn't acquire the lock here, because we are during
-		 *  the umount, there is no other task which will change it.
-		 */
-		t->state = TRANS_STATE_COMMIT_START;
-		smp_mb();
-		if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
-			wake_up(&root->fs_info->transaction_blocked_wait);
-
-		btrfs_evict_pending_snapshots(t);
-
-		t->state = TRANS_STATE_UNBLOCKED;
-		smp_mb();
-		if (waitqueue_active(&root->fs_info->transaction_wait))
-			wake_up(&root->fs_info->transaction_wait);
-
-		btrfs_destroy_delayed_inodes(root);
-		btrfs_assert_delayed_root_empty(root);
-
-		btrfs_destroy_all_delalloc_inodes(root->fs_info);
-
-		btrfs_destroy_marked_extents(root, &t->dirty_pages,
-					     EXTENT_DIRTY);
-
-		btrfs_destroy_pinned_extent(root,
-					    root->fs_info->pinned_extents);
-
-		t->state = TRANS_STATE_COMPLETED;
-		smp_mb();
-		if (waitqueue_active(&t->commit_wait))
-			wake_up(&t->commit_wait);
-
-		atomic_set(&t->use_count, 0);
+		spin_lock(&root->fs_info->trans_lock);
+		if (t == root->fs_info->running_transaction)
+			root->fs_info->running_transaction = NULL;
 		list_del_init(&t->list);
-		memset(t, 0, sizeof(*t));
-		kmem_cache_free(btrfs_transaction_cachep, t);
-	}
+		spin_unlock(&root->fs_info->trans_lock);
 
+		btrfs_put_transaction(t);
+		trace_btrfs_transaction_commit(root);
+		spin_lock(&root->fs_info->trans_lock);
+	}
+	spin_unlock(&root->fs_info->trans_lock);
+	btrfs_destroy_all_ordered_extents(root->fs_info);
+	btrfs_destroy_delayed_inodes(root);
+	btrfs_assert_delayed_root_empty(root);
+	btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
+	btrfs_destroy_all_delalloc_inodes(root->fs_info);
 	mutex_unlock(&root->fs_info->transaction_kthread_mutex);
 
 	return 0;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 5ce2a7d..53059df 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -86,6 +86,10 @@
 				 struct btrfs_root *root);
 void btrfs_free_fs_root(struct btrfs_root *root);
 
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+struct btrfs_root *btrfs_alloc_dummy_root(void);
+#endif
+
 /*
  * This function is used to grab the root, and avoid it is freed when we
  * access it. But it doesn't ensure that the tree is not dropped.
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 4b86916..41422a3 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -5,7 +5,6 @@
 #include "btrfs_inode.h"
 #include "print-tree.h"
 #include "export.h"
-#include "compat.h"
 
 #define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \
 						 parent_objectid) / 4)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d58bef1..45d98d0 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -25,7 +25,6 @@
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
 #include <linux/percpu_counter.h>
-#include "compat.h"
 #include "hash.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -1551,9 +1550,8 @@
 	if (ret && !insert) {
 		err = -ENOENT;
 		goto out;
-	} else if (ret) {
+	} else if (WARN_ON(ret)) {
 		err = -EIO;
-		WARN_ON(1);
 		goto out;
 	}
 
@@ -1979,7 +1977,6 @@
 	struct btrfs_extent_item *item;
 	u64 refs;
 	int ret;
-	int err = 0;
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -1992,14 +1989,9 @@
 					   path, bytenr, num_bytes, parent,
 					   root_objectid, owner, offset,
 					   refs_to_add, extent_op);
-	if (ret == 0)
+	if (ret != -EAGAIN)
 		goto out;
 
-	if (ret != -EAGAIN) {
-		err = ret;
-		goto out;
-	}
-
 	leaf = path->nodes[0];
 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
 	refs = btrfs_extent_refs(leaf, item);
@@ -2021,7 +2013,7 @@
 		btrfs_abort_transaction(trans, root, ret);
 out:
 	btrfs_free_path(path);
-	return err;
+	return ret;
 }
 
 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
@@ -2137,15 +2129,28 @@
 	}
 	if (ret > 0) {
 		if (metadata) {
-			btrfs_release_path(path);
-			metadata = 0;
+			if (path->slots[0] > 0) {
+				path->slots[0]--;
+				btrfs_item_key_to_cpu(path->nodes[0], &key,
+						      path->slots[0]);
+				if (key.objectid == node->bytenr &&
+				    key.type == BTRFS_EXTENT_ITEM_KEY &&
+				    key.offset == node->num_bytes)
+					ret = 0;
+			}
+			if (ret > 0) {
+				btrfs_release_path(path);
+				metadata = 0;
 
-			key.offset = node->num_bytes;
-			key.type = BTRFS_EXTENT_ITEM_KEY;
-			goto again;
+				key.objectid = node->bytenr;
+				key.offset = node->num_bytes;
+				key.type = BTRFS_EXTENT_ITEM_KEY;
+				goto again;
+			}
+		} else {
+			err = -EIO;
+			goto out;
 		}
-		err = -EIO;
-		goto out;
 	}
 
 	leaf = path->nodes[0];
@@ -2234,8 +2239,12 @@
 {
 	int ret = 0;
 
-	if (trans->aborted)
+	if (trans->aborted) {
+		if (insert_reserved)
+			btrfs_pin_extent(root, node->bytenr,
+					 node->num_bytes, 1);
 		return 0;
+	}
 
 	if (btrfs_delayed_ref_is_head(node)) {
 		struct btrfs_delayed_ref_head *head;
@@ -2411,6 +2420,14 @@
 				btrfs_free_delayed_extent_op(extent_op);
 
 				if (ret) {
+					/*
+					 * Need to reset must_insert_reserved if
+					 * there was an error so the abort stuff
+					 * can cleanup the reserved space
+					 * properly.
+					 */
+					if (must_insert_reserved)
+						locked_ref->must_insert_reserved = 1;
 					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
 					spin_lock(&delayed_refs->lock);
 					btrfs_delayed_ref_unlock(locked_ref);
@@ -3197,8 +3214,7 @@
 		if (ret)
 			goto out_put;
 
-		ret = btrfs_truncate_free_space_cache(root, trans, path,
-						      inode);
+		ret = btrfs_truncate_free_space_cache(root, trans, inode);
 		if (ret)
 			goto out_put;
 	}
@@ -3318,10 +3334,9 @@
 		last = cache->key.objectid + cache->key.offset;
 
 		err = write_one_cache_group(trans, root, path, cache);
+		btrfs_put_block_group(cache);
 		if (err) /* File system offline */
 			goto out;
-
-		btrfs_put_block_group(cache);
 	}
 
 	while (1) {
@@ -3605,10 +3620,9 @@
 	/* make sure bytes are sectorsize aligned */
 	bytes = ALIGN(bytes, root->sectorsize);
 
-	if (root == root->fs_info->tree_root ||
-	    BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
-		alloc_chunk = 0;
+	if (btrfs_is_free_space_inode(inode)) {
 		committed = 1;
+		ASSERT(current->journal_info);
 	}
 
 	data_sinfo = fs_info->data_sinfo;
@@ -3636,6 +3650,16 @@
 			spin_unlock(&data_sinfo->lock);
 alloc:
 			alloc_target = btrfs_get_alloc_profile(root, 1);
+			/*
+			 * It is ugly that we don't call nolock join
+			 * transaction for the free space inode case here.
+			 * But it is safe because we only do the data space
+			 * reservation for the free space cache in the
+			 * transaction context, the common join transaction
+			 * just increase the counter of the current transaction
+			 * handler, doesn't try to acquire the trans_lock of
+			 * the fs.
+			 */
 			trans = btrfs_join_transaction(root);
 			if (IS_ERR(trans))
 				return PTR_ERR(trans);
@@ -3681,6 +3705,9 @@
 			goto again;
 		}
 
+		trace_btrfs_space_reservation(root->fs_info,
+					      "space_info:enospc",
+					      data_sinfo->flags, bytes, 1);
 		return -ENOSPC;
 	}
 	data_sinfo->bytes_may_use += bytes;
@@ -3989,12 +4016,26 @@
 		 * the filesystem is readonly(all dirty pages are written to
 		 * the disk).
 		 */
-		btrfs_start_all_delalloc_inodes(root->fs_info, 0);
+		btrfs_start_delalloc_roots(root->fs_info, 0);
 		if (!current->journal_info)
-			btrfs_wait_all_ordered_extents(root->fs_info);
+			btrfs_wait_ordered_roots(root->fs_info, -1);
 	}
 }
 
+static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
+{
+	u64 bytes;
+	int nr;
+
+	bytes = btrfs_calc_trans_metadata_size(root, 1);
+	nr = (int)div64_u64(to_reclaim, bytes);
+	if (!nr)
+		nr = 1;
+	return nr;
+}
+
+#define EXTENT_SIZE_PER_ITEM	(256 * 1024)
+
 /*
  * shrink metadata reservation for delalloc
  */
@@ -4007,24 +4048,30 @@
 	u64 delalloc_bytes;
 	u64 max_reclaim;
 	long time_left;
-	unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
-	int loops = 0;
+	unsigned long nr_pages;
+	int loops;
+	int items;
 	enum btrfs_reserve_flush_enum flush;
 
+	/* Calc the number of the pages we need flush for space reservation */
+	items = calc_reclaim_items_nr(root, to_reclaim);
+	to_reclaim = items * EXTENT_SIZE_PER_ITEM;
+
 	trans = (struct btrfs_trans_handle *)current->journal_info;
 	block_rsv = &root->fs_info->delalloc_block_rsv;
 	space_info = block_rsv->space_info;
 
-	smp_mb();
 	delalloc_bytes = percpu_counter_sum_positive(
 						&root->fs_info->delalloc_bytes);
 	if (delalloc_bytes == 0) {
 		if (trans)
 			return;
-		btrfs_wait_all_ordered_extents(root->fs_info);
+		if (wait_ordered)
+			btrfs_wait_ordered_roots(root->fs_info, items);
 		return;
 	}
 
+	loops = 0;
 	while (delalloc_bytes && loops < 3) {
 		max_reclaim = min(delalloc_bytes, to_reclaim);
 		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
@@ -4033,9 +4080,19 @@
 		 * We need to wait for the async pages to actually start before
 		 * we do anything.
 		 */
-		wait_event(root->fs_info->async_submit_wait,
-			   !atomic_read(&root->fs_info->async_delalloc_pages));
+		max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
+		if (!max_reclaim)
+			goto skip_async;
 
+		if (max_reclaim <= nr_pages)
+			max_reclaim = 0;
+		else
+			max_reclaim -= nr_pages;
+
+		wait_event(root->fs_info->async_submit_wait,
+			   atomic_read(&root->fs_info->async_delalloc_pages) <=
+			   (int)max_reclaim);
+skip_async:
 		if (!trans)
 			flush = BTRFS_RESERVE_FLUSH_ALL;
 		else
@@ -4049,13 +4106,12 @@
 
 		loops++;
 		if (wait_ordered && !trans) {
-			btrfs_wait_all_ordered_extents(root->fs_info);
+			btrfs_wait_ordered_roots(root->fs_info, items);
 		} else {
 			time_left = schedule_timeout_killable(1);
 			if (time_left)
 				break;
 		}
-		smp_mb();
 		delalloc_bytes = percpu_counter_sum_positive(
 						&root->fs_info->delalloc_bytes);
 	}
@@ -4140,16 +4196,11 @@
 	switch (state) {
 	case FLUSH_DELAYED_ITEMS_NR:
 	case FLUSH_DELAYED_ITEMS:
-		if (state == FLUSH_DELAYED_ITEMS_NR) {
-			u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
-
-			nr = (int)div64_u64(num_bytes, bytes);
-			if (!nr)
-				nr = 1;
-			nr *= 2;
-		} else {
+		if (state == FLUSH_DELAYED_ITEMS_NR)
+			nr = calc_reclaim_items_nr(root, num_bytes) * 2;
+		else
 			nr = -1;
-		}
+
 		trans = btrfs_join_transaction(root);
 		if (IS_ERR(trans)) {
 			ret = PTR_ERR(trans);
@@ -4332,6 +4383,10 @@
 		    !block_rsv_use_bytes(global_rsv, orig_bytes))
 			ret = 0;
 	}
+	if (ret == -ENOSPC)
+		trace_btrfs_space_reservation(root->fs_info,
+					      "space_info:enospc",
+					      space_info->flags, orig_bytes, 1);
 	if (flushing) {
 		spin_lock(&space_info->lock);
 		space_info->flush = 0;
@@ -4986,7 +5041,7 @@
 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
 
 	if (to_reserve)
-		trace_btrfs_space_reservation(root->fs_info,"delalloc",
+		trace_btrfs_space_reservation(root->fs_info, "delalloc",
 					      btrfs_ino(inode), to_reserve, 1);
 	block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
@@ -5264,6 +5319,8 @@
 
 	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
+	if (reserved)
+		trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
 	return 0;
 }
 
@@ -5718,9 +5775,8 @@
 			}
 			extent_slot = path->slots[0];
 		}
-	} else if (ret == -ENOENT) {
+	} else if (WARN_ON(ret == -ENOENT)) {
 		btrfs_print_leaf(extent_root, path->nodes[0]);
-		WARN_ON(1);
 		btrfs_err(info,
 			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
 			bytenr, parent, root_objectid, owner_objectid,
@@ -5967,6 +6023,7 @@
 
 		btrfs_add_free_space(cache, buf->start, buf->len);
 		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
+		trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
 		pin = 0;
 	}
 out:
@@ -6594,8 +6651,6 @@
 		}
 	}
 
-	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
-
 	return ret;
 }
 
@@ -6707,6 +6762,7 @@
 			ins->objectid, ins->offset);
 		BUG();
 	}
+	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
 	return ret;
 }
 
@@ -6731,13 +6787,18 @@
 		size += sizeof(*block_info);
 
 	path = btrfs_alloc_path();
-	if (!path)
+	if (!path) {
+		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
+						   root->leafsize);
 		return -ENOMEM;
+	}
 
 	path->leave_spinning = 1;
 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
 				      ins, size);
 	if (ret) {
+		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
+						   root->leafsize);
 		btrfs_free_path(path);
 		return ret;
 	}
@@ -6779,6 +6840,8 @@
 			ins->objectid, ins->offset);
 		BUG();
 	}
+
+	trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
 	return ret;
 }
 
@@ -7983,7 +8046,7 @@
 
 	spin_lock(&sinfo->lock);
 
-	for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
+	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
 		if (!list_empty(&sinfo->block_groups[i]))
 			free_bytes += __btrfs_get_ro_block_group_free_space(
 						&sinfo->block_groups[i]);
@@ -8271,15 +8334,14 @@
 
 	release_global_block_rsv(info);
 
-	while(!list_empty(&info->space_info)) {
+	while (!list_empty(&info->space_info)) {
 		space_info = list_entry(info->space_info.next,
 					struct btrfs_space_info,
 					list);
 		if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
-			if (space_info->bytes_pinned > 0 ||
+			if (WARN_ON(space_info->bytes_pinned > 0 ||
 			    space_info->bytes_reserved > 0 ||
-			    space_info->bytes_may_use > 0) {
-				WARN_ON(1);
+			    space_info->bytes_may_use > 0)) {
 				dump_space_info(space_info, 0, 0);
 			}
 		}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 51731b7..856bc2b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -13,13 +13,13 @@
 #include <linux/cleancache.h>
 #include "extent_io.h"
 #include "extent_map.h"
-#include "compat.h"
 #include "ctree.h"
 #include "btrfs_inode.h"
 #include "volumes.h"
 #include "check-integrity.h"
 #include "locking.h"
 #include "rcu-string.h"
+#include "backref.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -1597,11 +1597,10 @@
  *
  * 1 is returned if we find something, 0 if nothing was in the tree
  */
-static noinline u64 find_lock_delalloc_range(struct inode *inode,
-					     struct extent_io_tree *tree,
-					     struct page *locked_page,
-					     u64 *start, u64 *end,
-					     u64 max_bytes)
+STATIC u64 find_lock_delalloc_range(struct inode *inode,
+				    struct extent_io_tree *tree,
+				    struct page *locked_page, u64 *start,
+				    u64 *end, u64 max_bytes)
 {
 	u64 delalloc_start;
 	u64 delalloc_end;
@@ -1740,10 +1739,8 @@
 	u64 last = 0;
 	int found = 0;
 
-	if (search_end <= cur_start) {
-		WARN_ON(1);
+	if (WARN_ON(search_end <= cur_start))
 		return 0;
-	}
 
 	spin_lock(&tree->lock);
 	if (cur_start == 0 && bits == EXTENT_DIRTY) {
@@ -3569,9 +3566,8 @@
 			 * but no sense in crashing the users box for something
 			 * we can survive anyway.
 			 */
-			if (!eb) {
+			if (WARN_ON(!eb)) {
 				spin_unlock(&mapping->private_lock);
-				WARN_ON(1);
 				continue;
 			}
 
@@ -4038,7 +4034,7 @@
 	if (offset >= last)
 		return NULL;
 
-	while(1) {
+	while (1) {
 		len = last - offset;
 		if (len == 0)
 			break;
@@ -4062,6 +4058,19 @@
 	return NULL;
 }
 
+static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx)
+{
+	unsigned long cnt = *((unsigned long *)ctx);
+
+	cnt++;
+	*((unsigned long *)ctx) = cnt;
+
+	/* Now we're sure that the extent is shared. */
+	if (cnt > 1)
+		return 1;
+	return 0;
+}
+
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		__u64 start, __u64 len, get_extent_t *get_extent)
 {
@@ -4128,7 +4137,7 @@
 		last = found_key.offset;
 		last_for_get_extent = last + 1;
 	}
-	btrfs_free_path(path);
+	btrfs_release_path(path);
 
 	/*
 	 * we might have some extents allocated but more delalloc past those
@@ -4198,7 +4207,24 @@
 			flags |= (FIEMAP_EXTENT_DELALLOC |
 				  FIEMAP_EXTENT_UNKNOWN);
 		} else {
+			unsigned long ref_cnt = 0;
+
 			disko = em->block_start + offset_in_extent;
+
+			/*
+			 * As btrfs supports shared space, this information
+			 * can be exported to userspace tools via
+			 * flag FIEMAP_EXTENT_SHARED.
+			 */
+			ret = iterate_inodes_from_logical(
+					em->block_start,
+					BTRFS_I(inode)->root->fs_info,
+					path, count_ext_ref, &ref_cnt);
+			if (ret < 0 && ret != -ENOENT)
+				goto out_free;
+
+			if (ref_cnt > 1)
+				flags |= FIEMAP_EXTENT_SHARED;
 		}
 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
 			flags |= FIEMAP_EXTENT_ENCODED;
@@ -4230,6 +4256,7 @@
 out_free:
 	free_extent_map(em);
 out:
+	btrfs_free_path(path);
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
 			     &cached_state, GFP_NOFS);
 	return ret;
@@ -4455,6 +4482,23 @@
 	}
 }
 
+struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
+					 		u64 start)
+{
+	struct extent_buffer *eb;
+
+	rcu_read_lock();
+	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
+	if (eb && atomic_inc_not_zero(&eb->refs)) {
+		rcu_read_unlock();
+		mark_extent_buffer_accessed(eb);
+		return eb;
+	}
+	rcu_read_unlock();
+
+	return NULL;
+}
+
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
 					  u64 start, unsigned long len)
 {
@@ -4468,14 +4512,10 @@
 	int uptodate = 1;
 	int ret;
 
-	rcu_read_lock();
-	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
-	if (eb && atomic_inc_not_zero(&eb->refs)) {
-		rcu_read_unlock();
-		mark_extent_buffer_accessed(eb);
+
+	eb = find_extent_buffer(tree, start);
+	if (eb)
 		return eb;
-	}
-	rcu_read_unlock();
 
 	eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
 	if (!eb)
@@ -4534,24 +4574,17 @@
 
 	spin_lock(&tree->buffer_lock);
 	ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
+	spin_unlock(&tree->buffer_lock);
+	radix_tree_preload_end();
 	if (ret == -EEXIST) {
-		exists = radix_tree_lookup(&tree->buffer,
-						start >> PAGE_CACHE_SHIFT);
-		if (!atomic_inc_not_zero(&exists->refs)) {
-			spin_unlock(&tree->buffer_lock);
-			radix_tree_preload_end();
-			exists = NULL;
+		exists = find_extent_buffer(tree, start);
+		if (exists)
+			goto free_eb;
+		else
 			goto again;
-		}
-		spin_unlock(&tree->buffer_lock);
-		radix_tree_preload_end();
-		mark_extent_buffer_accessed(exists);
-		goto free_eb;
 	}
 	/* add one reference for the tree */
 	check_buffer_tree_ref(eb);
-	spin_unlock(&tree->buffer_lock);
-	radix_tree_preload_end();
 
 	/*
 	 * there is a race where release page may have
@@ -4582,23 +4615,6 @@
 	return exists;
 }
 
-struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
-					 u64 start, unsigned long len)
-{
-	struct extent_buffer *eb;
-
-	rcu_read_lock();
-	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
-	if (eb && atomic_inc_not_zero(&eb->refs)) {
-		rcu_read_unlock();
-		mark_extent_buffer_accessed(eb);
-		return eb;
-	}
-	rcu_read_unlock();
-
-	return NULL;
-}
-
 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
 {
 	struct extent_buffer *eb =
@@ -5062,23 +5078,6 @@
 	}
 }
 
-static void move_pages(struct page *dst_page, struct page *src_page,
-		       unsigned long dst_off, unsigned long src_off,
-		       unsigned long len)
-{
-	char *dst_kaddr = page_address(dst_page);
-	if (dst_page == src_page) {
-		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
-	} else {
-		char *src_kaddr = page_address(src_page);
-		char *p = dst_kaddr + dst_off + len;
-		char *s = src_kaddr + src_off + len;
-
-		while (len--)
-			*--p = *--s;
-	}
-}
-
 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
 {
 	unsigned long distance = (src > dst) ? src - dst : dst - src;
@@ -5189,7 +5188,7 @@
 
 		cur = min_t(unsigned long, len, src_off_in_page + 1);
 		cur = min(cur, dst_off_in_page + 1);
-		move_pages(extent_buffer_page(dst, dst_i),
+		copy_pages(extent_buffer_page(dst, dst_i),
 			   extent_buffer_page(dst, src_i),
 			   dst_off_in_page - cur + 1,
 			   src_off_in_page - cur + 1, cur);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 6dbc645..19620c5 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -271,7 +271,7 @@
 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
-					 u64 start, unsigned long len);
+					 u64 start);
 void free_extent_buffer(struct extent_buffer *eb);
 void free_extent_buffer_stale(struct extent_buffer *eb);
 #define WAIT_NONE	0
@@ -345,4 +345,10 @@
 int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
 			 int mirror_num);
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+noinline u64 find_lock_delalloc_range(struct inode *inode,
+				      struct extent_io_tree *tree,
+				      struct page *locked_page, u64 *start,
+				      u64 *end, u64 max_bytes);
+#endif
 #endif
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 61adc44..93fba71 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -3,10 +3,10 @@
 
 #include <linux/rbtree.h>
 
-#define EXTENT_MAP_LAST_BYTE (u64)-4
-#define EXTENT_MAP_HOLE (u64)-3
-#define EXTENT_MAP_INLINE (u64)-2
-#define EXTENT_MAP_DELALLOC (u64)-1
+#define EXTENT_MAP_LAST_BYTE ((u64)-4)
+#define EXTENT_MAP_HOLE ((u64)-3)
+#define EXTENT_MAP_INLINE ((u64)-2)
+#define EXTENT_MAP_DELALLOC ((u64)-1)
 
 /* bits for the flags field */
 #define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 4f53159..6f38488 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -329,6 +329,9 @@
 	u64 csum_end;
 	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 
+	ASSERT(start == ALIGN(start, root->sectorsize) &&
+	       (end + 1) == ALIGN(end + 1, root->sectorsize));
+
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -846,10 +849,8 @@
 	path->leave_spinning = 0;
 	if (ret < 0)
 		goto fail_unlock;
-	if (ret != 0) {
-		WARN_ON(1);
+	if (WARN_ON(ret != 0))
 		goto fail_unlock;
-	}
 	leaf = path->nodes[0];
 csum:
 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 72da4df..82d0342 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -39,7 +39,6 @@
 #include "print-tree.h"
 #include "tree-log.h"
 #include "locking.h"
-#include "compat.h"
 #include "volumes.h"
 
 static struct kmem_cache *btrfs_inode_defrag_cachep;
@@ -370,7 +369,7 @@
 	u64 root_objectid = 0;
 
 	atomic_inc(&fs_info->defrag_running);
-	while(1) {
+	while (1) {
 		/* Pause the auto defragger. */
 		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 			     &fs_info->fs_state))
@@ -1281,6 +1280,7 @@
 		}
 		wait_on_page_writeback(pages[i]);
 	}
+	faili = num_pages - 1;
 	err = 0;
 	if (start_pos < inode->i_size) {
 		struct btrfs_ordered_extent *ordered;
@@ -1299,8 +1299,10 @@
 				unlock_page(pages[i]);
 				page_cache_release(pages[i]);
 			}
-			btrfs_wait_ordered_range(inode, start_pos,
-						 last_pos - start_pos);
+			err = btrfs_wait_ordered_range(inode, start_pos,
+						       last_pos - start_pos);
+			if (err)
+				goto fail;
 			goto again;
 		}
 		if (ordered)
@@ -1809,8 +1811,13 @@
 	atomic_inc(&root->log_batch);
 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
 			     &BTRFS_I(inode)->runtime_flags);
-	if (full_sync)
-		btrfs_wait_ordered_range(inode, start, end - start + 1);
+	if (full_sync) {
+		ret = btrfs_wait_ordered_range(inode, start, end - start + 1);
+		if (ret) {
+			mutex_unlock(&inode->i_mutex);
+			goto out;
+		}
+	}
 	atomic_inc(&root->log_batch);
 
 	/*
@@ -1876,27 +1883,20 @@
 	mutex_unlock(&inode->i_mutex);
 
 	if (ret != BTRFS_NO_LOG_SYNC) {
-		if (ret > 0) {
-			/*
-			 * If we didn't already wait for ordered extents we need
-			 * to do that now.
-			 */
-			if (!full_sync)
-				btrfs_wait_ordered_range(inode, start,
-							 end - start + 1);
-			ret = btrfs_commit_transaction(trans, root);
-		} else {
+		if (!ret) {
 			ret = btrfs_sync_log(trans, root);
-			if (ret == 0) {
+			if (!ret) {
 				ret = btrfs_end_transaction(trans, root);
-			} else {
-				if (!full_sync)
-					btrfs_wait_ordered_range(inode, start,
-								 end -
-								 start + 1);
-				ret = btrfs_commit_transaction(trans, root);
+				goto out;
 			}
 		}
+		if (!full_sync) {
+			ret = btrfs_wait_ordered_range(inode, start,
+						       end - start + 1);
+			if (ret)
+				goto out;
+		}
+		ret = btrfs_commit_transaction(trans, root);
 	} else {
 		ret = btrfs_end_transaction(trans, root);
 	}
@@ -2067,7 +2067,9 @@
 	bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
 			  ((offset + len - 1) >> PAGE_CACHE_SHIFT));
 
-	btrfs_wait_ordered_range(inode, offset, len);
+	ret = btrfs_wait_ordered_range(inode, offset, len);
+	if (ret)
+		return ret;
 
 	mutex_lock(&inode->i_mutex);
 	/*
@@ -2136,8 +2138,12 @@
 			btrfs_put_ordered_extent(ordered);
 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
 				     lockend, &cached_state, GFP_NOFS);
-		btrfs_wait_ordered_range(inode, lockstart,
-					 lockend - lockstart + 1);
+		ret = btrfs_wait_ordered_range(inode, lockstart,
+					       lockend - lockstart + 1);
+		if (ret) {
+			mutex_unlock(&inode->i_mutex);
+			return ret;
+		}
 	}
 
 	path = btrfs_alloc_path();
@@ -2308,7 +2314,10 @@
 	 * wait for ordered IO before we have any locks.  We'll loop again
 	 * below with the locks held.
 	 */
-	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
+	ret = btrfs_wait_ordered_range(inode, alloc_start,
+				       alloc_end - alloc_start);
+	if (ret)
+		goto out;
 
 	locked_end = alloc_end - 1;
 	while (1) {
@@ -2332,8 +2341,10 @@
 			 * we can't wait on the range with the transaction
 			 * running or with the extent lock held
 			 */
-			btrfs_wait_ordered_range(inode, alloc_start,
-						 alloc_end - alloc_start);
+			ret = btrfs_wait_ordered_range(inode, alloc_start,
+						       alloc_end - alloc_start);
+			if (ret)
+				goto out;
 		} else {
 			if (ordered)
 				btrfs_put_ordered_extent(ordered);
@@ -2405,14 +2416,12 @@
 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
-	struct extent_map *em;
+	struct extent_map *em = NULL;
 	struct extent_state *cached_state = NULL;
 	u64 lockstart = *offset;
 	u64 lockend = i_size_read(inode);
 	u64 start = *offset;
-	u64 orig_start = *offset;
 	u64 len = i_size_read(inode);
-	u64 last_end = 0;
 	int ret = 0;
 
 	lockend = max_t(u64, root->sectorsize, lockend);
@@ -2429,89 +2438,35 @@
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
 			 &cached_state);
 
-	/*
-	 * Delalloc is such a pain.  If we have a hole and we have pending
-	 * delalloc for a portion of the hole we will get back a hole that
-	 * exists for the entire range since it hasn't been actually written
-	 * yet.  So to take care of this case we need to look for an extent just
-	 * before the position we want in case there is outstanding delalloc
-	 * going on here.
-	 */
-	if (whence == SEEK_HOLE && start != 0) {
-		if (start <= root->sectorsize)
-			em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
-						     root->sectorsize, 0);
-		else
-			em = btrfs_get_extent_fiemap(inode, NULL, 0,
-						     start - root->sectorsize,
-						     root->sectorsize, 0);
-		if (IS_ERR(em)) {
-			ret = PTR_ERR(em);
-			goto out;
-		}
-		last_end = em->start + em->len;
-		if (em->block_start == EXTENT_MAP_DELALLOC)
-			last_end = min_t(u64, last_end, inode->i_size);
-		free_extent_map(em);
-	}
-
-	while (1) {
+	while (start < inode->i_size) {
 		em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
 		if (IS_ERR(em)) {
 			ret = PTR_ERR(em);
+			em = NULL;
 			break;
 		}
 
-		if (em->block_start == EXTENT_MAP_HOLE) {
-			if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
-				if (last_end <= orig_start) {
-					free_extent_map(em);
-					ret = -ENXIO;
-					break;
-				}
-			}
-
-			if (whence == SEEK_HOLE) {
-				*offset = start;
-				free_extent_map(em);
-				break;
-			}
-		} else {
-			if (whence == SEEK_DATA) {
-				if (em->block_start == EXTENT_MAP_DELALLOC) {
-					if (start >= inode->i_size) {
-						free_extent_map(em);
-						ret = -ENXIO;
-						break;
-					}
-				}
-
-				if (!test_bit(EXTENT_FLAG_PREALLOC,
-					      &em->flags)) {
-					*offset = start;
-					free_extent_map(em);
-					break;
-				}
-			}
-		}
+		if (whence == SEEK_HOLE &&
+		    (em->block_start == EXTENT_MAP_HOLE ||
+		     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
+			break;
+		else if (whence == SEEK_DATA &&
+			   (em->block_start != EXTENT_MAP_HOLE &&
+			    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
+			break;
 
 		start = em->start + em->len;
-		last_end = em->start + em->len;
-
-		if (em->block_start == EXTENT_MAP_DELALLOC)
-			last_end = min_t(u64, last_end, inode->i_size);
-
-		if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
-			free_extent_map(em);
-			ret = -ENXIO;
-			break;
-		}
 		free_extent_map(em);
+		em = NULL;
 		cond_resched();
 	}
-	if (!ret)
-		*offset = min(*offset, inode->i_size);
-out:
+	free_extent_map(em);
+	if (!ret) {
+		if (whence == SEEK_DATA && start >= inode->i_size)
+			ret = -ENXIO;
+		else
+			*offset = min_t(loff_t, start, inode->i_size);
+	}
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 			     &cached_state, GFP_NOFS);
 	return ret;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index b4f9904..057be95 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -218,7 +218,6 @@
 
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 				    struct btrfs_trans_handle *trans,
-				    struct btrfs_path *path,
 				    struct inode *inode)
 {
 	int ret = 0;
@@ -1009,8 +1008,13 @@
 	if (ret)
 		goto out;
 
-
-	btrfs_wait_ordered_range(inode, 0, (u64)-1);
+	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+	if (ret) {
+		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
+				 GFP_NOFS);
+		goto out;
+	}
 
 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 	key.offset = offset;
@@ -2276,7 +2280,7 @@
 		goto out;
 
 	entry = rb_entry(node, struct btrfs_free_space, offset_index);
-	while(1) {
+	while (1) {
 		if (entry->bytes < bytes && entry->bytes > *max_extent_size)
 			*max_extent_size = entry->bytes;
 
@@ -2967,19 +2971,15 @@
 
 int btrfs_write_out_ino_cache(struct btrfs_root *root,
 			      struct btrfs_trans_handle *trans,
-			      struct btrfs_path *path)
+			      struct btrfs_path *path,
+			      struct inode *inode)
 {
 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
-	struct inode *inode;
 	int ret;
 
 	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
 		return 0;
 
-	inode = lookup_free_ino_inode(root, path);
-	if (IS_ERR(inode))
-		return 0;
-
 	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
 	if (ret) {
 		btrfs_delalloc_release_metadata(inode, inode->i_size);
@@ -2990,7 +2990,6 @@
 #endif
 	}
 
-	iput(inode);
 	return ret;
 }
 
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index e737f92..0cf4977 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -58,7 +58,6 @@
 				       struct btrfs_block_rsv *rsv);
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 				    struct btrfs_trans_handle *trans,
-				    struct btrfs_path *path,
 				    struct inode *inode);
 int load_free_space_cache(struct btrfs_fs_info *fs_info,
 			  struct btrfs_block_group_cache *block_group);
@@ -76,7 +75,8 @@
 			struct btrfs_root *root);
 int btrfs_write_out_ino_cache(struct btrfs_root *root,
 			      struct btrfs_trans_handle *trans,
-			      struct btrfs_path *path);
+			      struct btrfs_path *path,
+			      struct inode *inode);
 
 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index e0b7034..ec82fae 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -369,7 +369,7 @@
 		goto out;
 
 	leaf = path->nodes[0];
-	item = btrfs_item_nr(leaf, path->slots[0]);
+	item = btrfs_item_nr(path->slots[0]);
 	ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char);
 	ptr += btrfs_item_size(leaf, item) - ins_len;
 	extref = (struct btrfs_inode_extref *)ptr;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 2c66ddb..ab485e5 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -78,10 +78,8 @@
 			    btrfs_transaction_in_commit(fs_info)) {
 				leaf = path->nodes[0];
 
-				if (btrfs_header_nritems(leaf) == 0) {
-					WARN_ON(1);
+				if (WARN_ON(btrfs_header_nritems(leaf) == 0))
 					break;
-				}
 
 				/*
 				 * Save the key so we can advances forward
@@ -237,7 +235,7 @@
 		start_caching(root);
 
 		if (objectid <= root->cache_progress ||
-		    objectid > root->highest_objectid)
+		    objectid >= root->highest_objectid)
 			__btrfs_add_free_space(ctl, objectid, 1);
 		else
 			__btrfs_add_free_space(pinned, objectid, 1);
@@ -412,8 +410,7 @@
 		return 0;
 
 	/* Don't save inode cache if we are deleting this root */
-	if (btrfs_root_refs(&root->root_item) == 0 &&
-	    root != root->fs_info->tree_root)
+	if (btrfs_root_refs(&root->root_item) == 0)
 		return 0;
 
 	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
@@ -467,7 +464,7 @@
 	}
 
 	if (i_size_read(inode) > 0) {
-		ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
+		ret = btrfs_truncate_free_space_cache(root, trans, inode);
 		if (ret) {
 			if (ret != -ENOSPC)
 				btrfs_abort_transaction(trans, root, ret);
@@ -504,7 +501,7 @@
 	}
 	btrfs_free_reserved_data_space(inode, prealloc);
 
-	ret = btrfs_write_out_ino_cache(root, trans, path);
+	ret = btrfs_write_out_ino_cache(root, trans, path, inode);
 out_put:
 	iput(inode);
 out_release:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 51e3afa..da8d2f6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -43,7 +43,6 @@
 #include <linux/btrfs.h>
 #include <linux/blkdev.h>
 #include <linux/posix_acl_xattr.h>
-#include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -844,7 +843,10 @@
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	int ret = 0;
 
-	BUG_ON(btrfs_is_free_space_inode(inode));
+	if (btrfs_is_free_space_inode(inode)) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
 
 	num_bytes = ALIGN(end - start + 1, blocksize);
 	num_bytes = max(blocksize,  num_bytes);
@@ -1178,10 +1180,8 @@
 	while (1) {
 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
 					       cur_offset, 0);
-		if (ret < 0) {
-			btrfs_abort_transaction(trans, root, ret);
+		if (ret < 0)
 			goto error;
-		}
 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
 			leaf = path->nodes[0];
 			btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1195,10 +1195,8 @@
 		leaf = path->nodes[0];
 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 			ret = btrfs_next_leaf(root, path);
-			if (ret < 0) {
-				btrfs_abort_transaction(trans, root, ret);
+			if (ret < 0)
 				goto error;
-			}
 			if (ret > 0)
 				break;
 			leaf = path->nodes[0];
@@ -1289,10 +1287,8 @@
 			ret = cow_file_range(inode, locked_page,
 					     cow_start, found_key.offset - 1,
 					     page_started, nr_written, 1);
-			if (ret) {
-				btrfs_abort_transaction(trans, root, ret);
+			if (ret)
 				goto error;
-			}
 			cow_start = (u64)-1;
 		}
 
@@ -1339,10 +1335,8 @@
 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
 						      num_bytes);
-			if (ret) {
-				btrfs_abort_transaction(trans, root, ret);
+			if (ret)
 				goto error;
-			}
 		}
 
 		extent_clear_unlock_delalloc(inode, cur_offset,
@@ -1364,10 +1358,8 @@
 	if (cow_start != (u64)-1) {
 		ret = cow_file_range(inode, locked_page, cow_start, end,
 				     page_started, nr_written, 1);
-		if (ret) {
-			btrfs_abort_transaction(trans, root, ret);
+		if (ret)
 			goto error;
-		}
 	}
 
 error:
@@ -1551,7 +1543,13 @@
 			spin_unlock(&BTRFS_I(inode)->lock);
 		}
 
-		if (*bits & EXTENT_DO_ACCOUNTING)
+		/*
+		 * We don't reserve metadata space for space cache inodes so we
+		 * don't need to call dellalloc_release_metadata if there is an
+		 * error.
+		 */
+		if (*bits & EXTENT_DO_ACCOUNTING &&
+		    root != root->fs_info->tree_root)
 			btrfs_delalloc_release_metadata(inode, len);
 
 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
@@ -2041,10 +2039,8 @@
 		key.offset = offset;
 
 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-	if (ret < 0) {
-		WARN_ON(1);
+	if (WARN_ON(ret < 0))
 		return ret;
-	}
 	ret = 0;
 
 	while (1) {
@@ -2367,10 +2363,23 @@
 	return ret;
 }
 
+static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
+{
+	struct old_sa_defrag_extent *old, *tmp;
+
+	if (!new)
+		return;
+
+	list_for_each_entry_safe(old, tmp, &new->head, list) {
+		list_del(&old->list);
+		kfree(old);
+	}
+	kfree(new);
+}
+
 static void relink_file_extents(struct new_sa_defrag_extent *new)
 {
 	struct btrfs_path *path;
-	struct old_sa_defrag_extent *old, *tmp;
 	struct sa_defrag_extent_backref *backref;
 	struct sa_defrag_extent_backref *prev = NULL;
 	struct inode *inode;
@@ -2413,16 +2422,11 @@
 	kfree(prev);
 
 	btrfs_free_path(path);
-
-	list_for_each_entry_safe(old, tmp, &new->head, list) {
-		list_del(&old->list);
-		kfree(old);
-	}
 out:
+	free_sa_defrag_extent(new);
+
 	atomic_dec(&root->fs_info->defrag_running);
 	wake_up(&root->fs_info->transaction_wait);
-
-	kfree(new);
 }
 
 static struct new_sa_defrag_extent *
@@ -2432,7 +2436,7 @@
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_path *path;
 	struct btrfs_key key;
-	struct old_sa_defrag_extent *old, *tmp;
+	struct old_sa_defrag_extent *old;
 	struct new_sa_defrag_extent *new;
 	int ret;
 
@@ -2480,7 +2484,7 @@
 		if (slot >= btrfs_header_nritems(l)) {
 			ret = btrfs_next_leaf(root, path);
 			if (ret < 0)
-				goto out_free_list;
+				goto out_free_path;
 			else if (ret > 0)
 				break;
 			continue;
@@ -2509,7 +2513,7 @@
 
 		old = kmalloc(sizeof(*old), GFP_NOFS);
 		if (!old)
-			goto out_free_list;
+			goto out_free_path;
 
 		offset = max(new->file_pos, key.offset);
 		end = min(new->file_pos + new->len, key.offset + num_bytes);
@@ -2531,15 +2535,10 @@
 
 	return new;
 
-out_free_list:
-	list_for_each_entry_safe(old, tmp, &new->head, list) {
-		list_del(&old->list);
-		kfree(old);
-	}
 out_free_path:
 	btrfs_free_path(path);
 out_kfree:
-	kfree(new);
+	free_sa_defrag_extent(new);
 	return NULL;
 }
 
@@ -2710,8 +2709,14 @@
 	btrfs_remove_ordered_extent(inode, ordered_extent);
 
 	/* for snapshot-aware defrag */
-	if (new)
-		relink_file_extents(new);
+	if (new) {
+		if (ret) {
+			free_sa_defrag_extent(new);
+			atomic_dec(&root->fs_info->defrag_running);
+		} else {
+			relink_file_extents(new);
+		}
+	}
 
 	/* once for us */
 	btrfs_put_ordered_extent(ordered_extent);
@@ -2969,6 +2974,7 @@
 	if (insert >= 1) {
 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
 		if (ret) {
+			atomic_dec(&root->orphan_inodes);
 			if (reserve) {
 				clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
 					  &BTRFS_I(inode)->runtime_flags);
@@ -3018,14 +3024,16 @@
 		release_rsv = 1;
 	spin_unlock(&root->orphan_lock);
 
-	if (trans && delete_item)
-		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
-
-	if (release_rsv) {
-		btrfs_orphan_release_metadata(inode);
+	if (delete_item) {
 		atomic_dec(&root->orphan_inodes);
+		if (trans)
+			ret = btrfs_del_orphan_item(trans, root,
+						    btrfs_ino(inode));
 	}
 
+	if (release_rsv)
+		btrfs_orphan_release_metadata(inode);
+
 	return ret;
 }
 
@@ -3172,8 +3180,7 @@
 
 		/* if we have links, this was a truncate, lets do that */
 		if (inode->i_nlink) {
-			if (!S_ISREG(inode->i_mode)) {
-				WARN_ON(1);
+			if (WARN_ON(!S_ISREG(inode->i_mode))) {
 				iput(inode);
 				continue;
 			}
@@ -3636,7 +3643,7 @@
 	int ret;
 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
 	if (!ret) {
-		btrfs_drop_nlink(inode);
+		drop_nlink(inode);
 		ret = btrfs_update_inode(trans, root, inode);
 	}
 	return ret;
@@ -4230,15 +4237,16 @@
 
 	while (1) {
 		struct btrfs_ordered_extent *ordered;
-		btrfs_wait_ordered_range(inode, hole_start,
-					 block_end - hole_start);
+
 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
 				 &cached_state);
-		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
+		ordered = btrfs_lookup_ordered_range(inode, hole_start,
+						     block_end - hole_start);
 		if (!ordered)
 			break;
 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
 				     &cached_state, GFP_NOFS);
+		btrfs_start_ordered_extent(inode, ordered, 1);
 		btrfs_put_ordered_extent(ordered);
 	}
 
@@ -4472,8 +4480,10 @@
 	trace_btrfs_inode_evict(inode);
 
 	truncate_inode_pages(&inode->i_data, 0);
-	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
-			       btrfs_is_free_space_inode(inode)))
+	if (inode->i_nlink &&
+	    ((btrfs_root_refs(&root->root_item) != 0 &&
+	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
+	     btrfs_is_free_space_inode(inode)))
 		goto no_delete;
 
 	if (is_bad_inode(inode)) {
@@ -4490,7 +4500,8 @@
 	}
 
 	if (inode->i_nlink > 0) {
-		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
+		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
+		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
 		goto no_delete;
 	}
 
@@ -4731,14 +4742,7 @@
 	}
 	spin_unlock(&root->inode_lock);
 
-	/*
-	 * Free space cache has inodes in the tree root, but the tree root has a
-	 * root_refs of 0, so this could end up dropping the tree root as a
-	 * snapshot, so we need the extra !root->fs_info->tree_root check to
-	 * make sure we don't drop it.
-	 */
-	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
-	    root != root->fs_info->tree_root) {
+	if (empty && btrfs_root_refs(&root->root_item) == 0) {
 		synchronize_srcu(&root->fs_info->subvol_srcu);
 		spin_lock(&root->inode_lock);
 		empty = RB_EMPTY_ROOT(&root->inode_tree);
@@ -4831,10 +4835,12 @@
 {
 	struct inode *inode;
 	struct btrfs_iget_args args;
+	unsigned long hashval = btrfs_inode_hash(objectid, root);
+
 	args.ino = objectid;
 	args.root = root;
 
-	inode = iget5_locked(s, objectid, btrfs_find_actor,
+	inode = iget5_locked(s, hashval, btrfs_find_actor,
 			     btrfs_init_locked_inode,
 			     (void *)&args);
 	return inode;
@@ -5048,7 +5054,7 @@
 			continue;
 		}
 
-		item = btrfs_item_nr(leaf, slot);
+		item = btrfs_item_nr(slot);
 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
 		if (found_key.objectid != key.objectid)
@@ -5454,7 +5460,7 @@
 				BTRFS_INODE_NODATASUM;
 	}
 
-	insert_inode_hash(inode);
+	btrfs_insert_inode_hash(inode);
 	inode_tree_add(inode);
 
 	trace_btrfs_inode_new(inode);
@@ -5730,7 +5736,7 @@
 		goto fail;
 	}
 
-	btrfs_inc_nlink(inode);
+	inc_nlink(inode);
 	inode_inc_iversion(inode);
 	inode->i_ctime = CURRENT_TIME;
 	ihold(inode);
@@ -5860,7 +5866,7 @@
 	compress_type = btrfs_file_extent_compression(leaf, item);
 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
 	inline_size = btrfs_file_extent_inline_item_len(leaf,
-					btrfs_item_nr(leaf, path->slots[0]));
+					btrfs_item_nr(path->slots[0]));
 	tmp = kmalloc(inline_size, GFP_NOFS);
 	if (!tmp)
 		return -ENOMEM;
@@ -5974,7 +5980,14 @@
 	found_type = btrfs_key_type(&found_key);
 	if (found_key.objectid != objectid ||
 	    found_type != BTRFS_EXTENT_DATA_KEY) {
-		goto not_found;
+		/*
+		 * If we backup past the first extent we want to move forward
+		 * and see if there is an extent in front of us, otherwise we'll
+		 * say there is a hole for our whole search range which can
+		 * cause problems.
+		 */
+		extent_end = start;
+		goto next;
 	}
 
 	found_type = btrfs_file_extent_type(leaf, item);
@@ -5989,7 +6002,7 @@
 		size = btrfs_file_extent_inline_len(leaf, item);
 		extent_end = ALIGN(extent_start + size, root->sectorsize);
 	}
-
+next:
 	if (start >= extent_end) {
 		path->slots[0]++;
 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
@@ -6249,7 +6262,7 @@
 	/* adjust the range_start to make sure it doesn't
 	 * go backwards from the start they passed in
 	 */
-	range_start = max(start,range_start);
+	range_start = max(start, range_start);
 	found = found_end - range_start;
 
 	if (found > 0) {
@@ -7053,7 +7066,7 @@
 			}
 		} else {
 			submit_len += bvec->bv_len;
-			nr_pages ++;
+			nr_pages++;
 			bvec++;
 		}
 	}
@@ -7222,7 +7235,9 @@
 	 * outstanding dirty pages are on disk.
 	 */
 	count = iov_length(iov, nr_segs);
-	btrfs_wait_ordered_range(inode, offset, count);
+	ret = btrfs_wait_ordered_range(inode, offset, count);
+	if (ret)
+		return ret;
 
 	if (rw & WRITE) {
 		/*
@@ -7563,7 +7578,10 @@
 	u64 mask = root->sectorsize - 1;
 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
 
-	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
+	ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
+				       (u64)-1);
+	if (ret)
+		return ret;
 
 	/*
 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
@@ -7787,6 +7805,14 @@
 	return inode;
 }
 
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+void btrfs_test_destroy_inode(struct inode *inode)
+{
+	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
+	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
+}
+#endif
+
 static void btrfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
@@ -7857,8 +7883,7 @@
 		return 1;
 
 	/* the snap/subvol tree is on deleting */
-	if (btrfs_root_refs(&root->root_item) == 0 &&
-	    root != root->fs_info->tree_root)
+	if (btrfs_root_refs(&root->root_item) == 0)
 		return 1;
 	else
 		return generic_drop_inode(inode);
@@ -7995,8 +8020,7 @@
 		if (ret == -EEXIST) {
 			/* we shouldn't get
 			 * eexist without a new_inode */
-			if (!new_inode) {
-				WARN_ON(1);
+			if (WARN_ON(!new_inode)) {
 				return ret;
 			}
 		} else {
@@ -8144,18 +8168,24 @@
 static void btrfs_run_delalloc_work(struct btrfs_work *work)
 {
 	struct btrfs_delalloc_work *delalloc_work;
+	struct inode *inode;
 
 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
 				     work);
-	if (delalloc_work->wait)
-		btrfs_wait_ordered_range(delalloc_work->inode, 0, (u64)-1);
-	else
-		filemap_flush(delalloc_work->inode->i_mapping);
+	inode = delalloc_work->inode;
+	if (delalloc_work->wait) {
+		btrfs_wait_ordered_range(inode, 0, (u64)-1);
+	} else {
+		filemap_flush(inode->i_mapping);
+		if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+			     &BTRFS_I(inode)->runtime_flags))
+			filemap_flush(inode->i_mapping);
+	}
 
 	if (delalloc_work->delay_iput)
-		btrfs_add_delayed_iput(delalloc_work->inode);
+		btrfs_add_delayed_iput(inode);
 	else
-		iput(delalloc_work->inode);
+		iput(inode);
 	complete(&delalloc_work->completion);
 }
 
@@ -8276,8 +8306,7 @@
 	return ret;
 }
 
-int btrfs_start_all_delalloc_inodes(struct btrfs_fs_info *fs_info,
-				    int delay_iput)
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput)
 {
 	struct btrfs_root *root;
 	struct list_head splice;
@@ -8337,14 +8366,14 @@
 	int err;
 	int drop_inode = 0;
 	u64 objectid;
-	u64 index = 0 ;
+	u64 index = 0;
 	int name_len;
 	int datasize;
 	unsigned long ptr;
 	struct btrfs_file_extent_item *ei;
 	struct extent_buffer *leaf;
 
-	name_len = strlen(symname) + 1;
+	name_len = strlen(symname);
 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
 		return -ENAMETOOLONG;
 
@@ -8432,7 +8461,7 @@
 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
 	inode_set_bytes(inode, name_len);
-	btrfs_i_size_write(inode, name_len - 1);
+	btrfs_i_size_write(inode, name_len);
 	err = btrfs_update_inode(trans, root, inode);
 	if (err)
 		drop_inode = 1;
@@ -8491,6 +8520,8 @@
 						  ins.offset, 0, 0, 0,
 						  BTRFS_FILE_EXTENT_PREALLOC);
 		if (ret) {
+			btrfs_free_reserved_extent(root, ins.objectid,
+						   ins.offset);
 			btrfs_abort_transaction(trans, root, ret);
 			if (own_trans)
 				btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 9d46f60..a1116225 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -44,7 +44,6 @@
 #include <linux/uuid.h>
 #include <linux/btrfs.h>
 #include <linux/uaccess.h>
-#include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -321,7 +320,7 @@
 
 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 {
-	struct btrfs_fs_info *fs_info = btrfs_sb(fdentry(file)->d_sb);
+	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
 	struct btrfs_device *device;
 	struct request_queue *q;
 	struct fstrim_range range;
@@ -369,9 +368,13 @@
 
 int btrfs_is_empty_uuid(u8 *uuid)
 {
-	static char empty_uuid[BTRFS_UUID_SIZE] = {0};
+	int i;
 
-	return !memcmp(uuid, empty_uuid, BTRFS_UUID_SIZE);
+	for (i = 0; i < BTRFS_UUID_SIZE; i++) {
+		if (uuid[i])
+			return 0;
+	}
+	return 1;
 }
 
 static noinline int create_subvol(struct inode *dir,
@@ -436,7 +439,7 @@
 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(leaf, objectid);
 
-	write_extent_buffer(leaf, root->fs_info->fsid, btrfs_header_fsid(leaf),
+	write_extent_buffer(leaf, root->fs_info->fsid, btrfs_header_fsid(),
 			    BTRFS_FSID_SIZE);
 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
 			    btrfs_header_chunk_tree_uuid(leaf),
@@ -574,7 +577,7 @@
 	if (ret)
 		return ret;
 
-	btrfs_wait_ordered_extents(root);
+	btrfs_wait_ordered_extents(root, -1);
 
 	pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
 	if (!pending_snapshot)
@@ -688,7 +691,7 @@
  *     nfs_async_unlink().
  */
 
-static int btrfs_may_delete(struct inode *dir,struct dentry *victim,int isdir)
+static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
 {
 	int error;
 
@@ -842,7 +845,6 @@
 {
 	struct btrfs_path *path;
 	struct btrfs_key min_key;
-	struct btrfs_key max_key;
 	struct extent_buffer *leaf;
 	struct btrfs_file_extent_item *extent;
 	int type;
@@ -857,15 +859,10 @@
 	min_key.type = BTRFS_EXTENT_DATA_KEY;
 	min_key.offset = *off;
 
-	max_key.objectid = ino;
-	max_key.type = (u8)-1;
-	max_key.offset = (u64)-1;
-
 	path->keep_locks = 1;
 
-	while(1) {
-		ret = btrfs_search_forward(root, &min_key, &max_key,
-					   path, newer_than);
+	while (1) {
+		ret = btrfs_search_forward(root, &min_key, path, newer_than);
 		if (ret != 0)
 			goto none;
 		if (min_key.objectid != ino)
@@ -1206,7 +1203,7 @@
 		ra = &file->f_ra;
 	}
 
-	pages = kmalloc(sizeof(struct page *) * max_cluster,
+	pages = kmalloc_array(max_cluster, sizeof(struct page *),
 			GFP_NOFS);
 	if (!pages) {
 		ret = -ENOMEM;
@@ -1893,7 +1890,6 @@
 {
 	struct btrfs_root *root;
 	struct btrfs_key key;
-	struct btrfs_key max_key;
 	struct btrfs_path *path;
 	struct btrfs_ioctl_search_key *sk = &args->key;
 	struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
@@ -1925,15 +1921,10 @@
 	key.type = sk->min_type;
 	key.offset = sk->min_offset;
 
-	max_key.objectid = sk->max_objectid;
-	max_key.type = sk->max_type;
-	max_key.offset = sk->max_offset;
-
 	path->keep_locks = 1;
 
-	while(1) {
-		ret = btrfs_search_forward(root, &key, &max_key, path,
-					   sk->min_transid);
+	while (1) {
+		ret = btrfs_search_forward(root, &key, path, sk->min_transid);
 		if (ret != 0) {
 			if (ret > 0)
 				ret = 0;
@@ -2018,7 +2009,7 @@
 	key.type = BTRFS_INODE_REF_KEY;
 	key.offset = (u64)-1;
 
-	while(1) {
+	while (1) {
 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 		if (ret < 0)
 			goto out;
@@ -2047,7 +2038,7 @@
 		}
 
 		*(ptr + len) = '/';
-		read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len);
+		read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
 
 		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
 			break;
@@ -2058,7 +2049,7 @@
 		dirid = key.objectid;
 	}
 	memmove(name, ptr, total_len);
-	name[total_len]='\0';
+	name[total_len] = '\0';
 	ret = 0;
 out:
 	btrfs_free_path(path);
@@ -2098,7 +2089,7 @@
 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
 					     void __user *arg)
 {
-	struct dentry *parent = fdentry(file);
+	struct dentry *parent = file->f_path.dentry;
 	struct dentry *dentry;
 	struct inode *dir = parent->d_inode;
 	struct inode *inode;
@@ -2144,7 +2135,7 @@
 
 	inode = dentry->d_inode;
 	dest = BTRFS_I(inode)->root;
-	if (!capable(CAP_SYS_ADMIN)){
+	if (!capable(CAP_SYS_ADMIN)) {
 		/*
 		 * Regular user.  Only allow this with a special mount
 		 * option, when the user has write+exec access to the
@@ -2727,15 +2718,10 @@
 	size = sizeof(tmp) +
 		tmp.dest_count * sizeof(struct btrfs_ioctl_same_extent_info);
 
-	same = kmalloc(size, GFP_NOFS);
-	if (!same) {
-		ret = -EFAULT;
-		goto out;
-	}
+	same = memdup_user((struct btrfs_ioctl_same_args __user *)argp, size);
 
-	if (copy_from_user(same,
-			   (struct btrfs_ioctl_same_args __user *)argp, size)) {
-		ret = -EFAULT;
+	if (IS_ERR(same)) {
+		ret = PTR_ERR(same);
 		goto out;
 	}
 
@@ -3119,7 +3105,7 @@
 static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 				       u64 off, u64 olen, u64 destoff)
 {
-	struct inode *inode = fdentry(file)->d_inode;
+	struct inode *inode = file_inode(file);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct fd src_file;
 	struct inode *src;
@@ -3679,9 +3665,10 @@
 
 	switch (p->cmd) {
 	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
-		if (root->fs_info->sb->s_flags & MS_RDONLY)
-			return -EROFS;
-
+		if (root->fs_info->sb->s_flags & MS_RDONLY) {
+			ret = -EROFS;
+			goto out;
+		}
 		if (atomic_xchg(
 			&root->fs_info->mutually_exclusive_operation_running,
 			1)) {
@@ -3707,7 +3694,7 @@
 
 	if (copy_to_user(arg, p, sizeof(*p)))
 		ret = -EFAULT;
-
+out:
 	kfree(p);
 	return ret;
 }
@@ -4317,7 +4304,7 @@
 
 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -4557,9 +4544,15 @@
 		return btrfs_ioctl_logical_to_ino(root, argp);
 	case BTRFS_IOC_SPACE_INFO:
 		return btrfs_ioctl_space_info(root, argp);
-	case BTRFS_IOC_SYNC:
-		btrfs_sync_fs(file->f_dentry->d_sb, 1);
-		return 0;
+	case BTRFS_IOC_SYNC: {
+		int ret;
+
+		ret = btrfs_start_delalloc_roots(root->fs_info, 0);
+		if (ret)
+			return ret;
+		ret = btrfs_sync_fs(file->f_dentry->d_sb, 1);
+		return ret;
+	}
 	case BTRFS_IOC_START_SYNC:
 		return btrfs_ioctl_start_sync(root, argp);
 	case BTRFS_IOC_WAIT_SYNC:
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index c702cb6..25a8f38 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -537,7 +537,9 @@
 	 */
 	if (RB_EMPTY_ROOT(&tree->tree) &&
 	    !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
+		spin_lock(&root->fs_info->ordered_root_lock);
 		list_del_init(&BTRFS_I(inode)->ordered_operations);
+		spin_unlock(&root->fs_info->ordered_root_lock);
 	}
 
 	if (!root->nr_ordered_extents) {
@@ -563,10 +565,11 @@
  * wait for all the ordered extents in a root.  This is done when balancing
  * space between drives.
  */
-void btrfs_wait_ordered_extents(struct btrfs_root *root)
+int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
 {
 	struct list_head splice, works;
 	struct btrfs_ordered_extent *ordered, *next;
+	int count = 0;
 
 	INIT_LIST_HEAD(&splice);
 	INIT_LIST_HEAD(&works);
@@ -574,7 +577,7 @@
 	mutex_lock(&root->fs_info->ordered_operations_mutex);
 	spin_lock(&root->ordered_extent_lock);
 	list_splice_init(&root->ordered_extents, &splice);
-	while (!list_empty(&splice)) {
+	while (!list_empty(&splice) && nr) {
 		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 					   root_extent_list);
 		list_move_tail(&ordered->root_extent_list,
@@ -589,7 +592,11 @@
 
 		cond_resched();
 		spin_lock(&root->ordered_extent_lock);
+		if (nr != -1)
+			nr--;
+		count++;
 	}
+	list_splice_tail(&splice, &root->ordered_extents);
 	spin_unlock(&root->ordered_extent_lock);
 
 	list_for_each_entry_safe(ordered, next, &works, work_list) {
@@ -599,18 +606,21 @@
 		cond_resched();
 	}
 	mutex_unlock(&root->fs_info->ordered_operations_mutex);
+
+	return count;
 }
 
-void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info)
+void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
 {
 	struct btrfs_root *root;
 	struct list_head splice;
+	int done;
 
 	INIT_LIST_HEAD(&splice);
 
 	spin_lock(&fs_info->ordered_root_lock);
 	list_splice_init(&fs_info->ordered_roots, &splice);
-	while (!list_empty(&splice)) {
+	while (!list_empty(&splice) && nr) {
 		root = list_first_entry(&splice, struct btrfs_root,
 					ordered_root);
 		root = btrfs_grab_fs_root(root);
@@ -619,10 +629,14 @@
 			       &fs_info->ordered_roots);
 		spin_unlock(&fs_info->ordered_root_lock);
 
-		btrfs_wait_ordered_extents(root);
+		done = btrfs_wait_ordered_extents(root, nr);
 		btrfs_put_fs_root(root);
 
 		spin_lock(&fs_info->ordered_root_lock);
+		if (nr != -1) {
+			nr -= done;
+			WARN_ON(nr < 0);
+		}
 	}
 	spin_unlock(&fs_info->ordered_root_lock);
 }
@@ -734,8 +748,9 @@
 /*
  * Used to wait on ordered extents across a large range of bytes.
  */
-void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 {
+	int ret = 0;
 	u64 end;
 	u64 orig_end;
 	struct btrfs_ordered_extent *ordered;
@@ -751,8 +766,9 @@
 	/* start IO across the range first to instantiate any delalloc
 	 * extents
 	 */
-	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
-
+	ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+	if (ret)
+		return ret;
 	/*
 	 * So with compression we will find and lock a dirty page and clear the
 	 * first one as dirty, setup an async extent, and immediately return
@@ -768,10 +784,15 @@
 	 * right and you are wrong.
 	 */
 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
-		     &BTRFS_I(inode)->runtime_flags))
-		filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
-
-	filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+		     &BTRFS_I(inode)->runtime_flags)) {
+		ret = filemap_fdatawrite_range(inode->i_mapping, start,
+					       orig_end);
+		if (ret)
+			return ret;
+	}
+	ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+	if (ret)
+		return ret;
 
 	end = orig_end;
 	while (1) {
@@ -788,11 +809,14 @@
 		}
 		btrfs_start_ordered_extent(inode, ordered, 1);
 		end = ordered->file_offset;
+		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+			ret = -EIO;
 		btrfs_put_ordered_extent(ordered);
-		if (end == 0 || end == start)
+		if (ret || end == 0 || end == start)
 			break;
 		end--;
 	}
+	return ret;
 }
 
 /*
@@ -1076,7 +1100,7 @@
 	 * if this file hasn't been changed since the last transaction
 	 * commit, we can safely return without doing anything
 	 */
-	if (last_mod < root->fs_info->last_trans_committed)
+	if (last_mod <= root->fs_info->last_trans_committed)
 		return;
 
 	spin_lock(&root->fs_info->ordered_root_lock);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 0c0b356..9b0450f 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -180,7 +180,7 @@
 							 u64 file_offset);
 void btrfs_start_ordered_extent(struct inode *inode,
 				struct btrfs_ordered_extent *entry, int wait);
-void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
+int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
 struct btrfs_ordered_extent *
 btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
@@ -195,8 +195,8 @@
 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
 				 struct btrfs_root *root,
 				 struct inode *inode);
-void btrfs_wait_ordered_extents(struct btrfs_root *root);
-void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info);
+int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr);
+void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr);
 void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 0088bed..417053b 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -193,7 +193,7 @@
 	btrfs_info(root->fs_info, "leaf %llu total ptrs %d free space %d",
 		   btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l));
 	for (i = 0 ; i < nr ; i++) {
-		item = btrfs_item_nr(l, i);
+		item = btrfs_item_nr(i);
 		btrfs_item_key_to_cpu(l, &key, i);
 		type = btrfs_key_type(&key);
 		printk(KERN_INFO "\titem %d key (%llu %u %llu) itemoff %d "
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index d0ecfbd..24ac218 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -33,7 +33,6 @@
 #include <linux/raid/xor.h>
 #include <linux/vmalloc.h>
 #include <asm/div64.h>
-#include "compat.h"
 #include "ctree.h"
 #include "extent_map.h"
 #include "disk-io.h"
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 4a35572..ce459a7 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1383,6 +1383,7 @@
 {
 	struct btrfs_root *reloc_root;
 	struct reloc_control *rc = root->fs_info->reloc_ctl;
+	struct btrfs_block_rsv *rsv;
 	int clear_rsv = 0;
 	int ret;
 
@@ -1396,13 +1397,14 @@
 	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
 		return 0;
 
-	if (!trans->block_rsv) {
+	if (!trans->reloc_reserved) {
+		rsv = trans->block_rsv;
 		trans->block_rsv = rc->block_rsv;
 		clear_rsv = 1;
 	}
 	reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
 	if (clear_rsv)
-		trans->block_rsv = NULL;
+		trans->block_rsv = rsv;
 
 	ret = __add_reloc_root(reloc_root);
 	BUG_ON(ret < 0);
@@ -1775,8 +1777,7 @@
 			new_ptr_gen = 0;
 		}
 
-		if (new_bytenr > 0 && new_bytenr == old_bytenr) {
-			WARN_ON(1);
+		if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
 			ret = level;
 			break;
 		}
@@ -2058,7 +2059,7 @@
 	LIST_HEAD(inode_list);
 	struct btrfs_key key;
 	struct btrfs_key next_key;
-	struct btrfs_trans_handle *trans;
+	struct btrfs_trans_handle *trans = NULL;
 	struct btrfs_root *reloc_root;
 	struct btrfs_root_item *root_item;
 	struct btrfs_path *path;
@@ -2107,18 +2108,19 @@
 	memset(&next_key, 0, sizeof(next_key));
 
 	while (1) {
-		trans = btrfs_start_transaction(root, 0);
-		BUG_ON(IS_ERR(trans));
-		trans->block_rsv = rc->block_rsv;
-
 		ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
 					     BTRFS_RESERVE_FLUSH_ALL);
 		if (ret) {
-			BUG_ON(ret != -EAGAIN);
-			ret = btrfs_commit_transaction(trans, root);
-			BUG_ON(ret);
-			continue;
+			err = ret;
+			goto out;
 		}
+		trans = btrfs_start_transaction(root, 0);
+		if (IS_ERR(trans)) {
+			err = PTR_ERR(trans);
+			trans = NULL;
+			goto out;
+		}
+		trans->block_rsv = rc->block_rsv;
 
 		replaced = 0;
 		max_level = level;
@@ -2164,6 +2166,7 @@
 		root_item->drop_level = level;
 
 		btrfs_end_transaction_throttle(trans, root);
+		trans = NULL;
 
 		btrfs_btree_balance_dirty(root);
 
@@ -2192,7 +2195,8 @@
 		btrfs_update_reloc_root(trans, root);
 	}
 
-	btrfs_end_transaction_throttle(trans, root);
+	if (trans)
+		btrfs_end_transaction_throttle(trans, root);
 
 	btrfs_btree_balance_dirty(root);
 
@@ -3258,7 +3262,7 @@
 	struct rb_node *rb_node;
 	u32 item_size;
 	int level = -1;
-	int generation;
+	u64 generation;
 
 	eb =  path->nodes[0];
 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
@@ -3407,7 +3411,6 @@
 				    struct inode *inode, u64 ino)
 {
 	struct btrfs_key key;
-	struct btrfs_path *path;
 	struct btrfs_root *root = fs_info->tree_root;
 	struct btrfs_trans_handle *trans;
 	int ret = 0;
@@ -3432,22 +3435,14 @@
 	if (ret)
 		goto out;
 
-	path = btrfs_alloc_path();
-	if (!path) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
 	trans = btrfs_join_transaction(root);
 	if (IS_ERR(trans)) {
-		btrfs_free_path(path);
 		ret = PTR_ERR(trans);
 		goto out;
 	}
 
-	ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
+	ret = btrfs_truncate_free_space_cache(root, trans, inode);
 
-	btrfs_free_path(path);
 	btrfs_end_transaction(trans, root);
 	btrfs_btree_balance_dirty(root);
 out:
@@ -3549,10 +3544,8 @@
 				err = ret;
 				goto out;
 			}
-			if (ret > 0) {
-				WARN_ON(1);
+			if (WARN_ON(ret > 0))
 				goto out;
-			}
 
 			leaf = path->nodes[0];
 			nritems = btrfs_header_nritems(leaf);
@@ -3572,11 +3565,9 @@
 		}
 
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-		if (key.objectid != ref_objectid ||
-		    key.type != BTRFS_EXTENT_DATA_KEY) {
-			WARN_ON(1);
+		if (WARN_ON(key.objectid != ref_objectid ||
+		    key.type != BTRFS_EXTENT_DATA_KEY))
 			break;
-		}
 
 		fi = btrfs_item_ptr(leaf, path->slots[0],
 				    struct btrfs_file_extent_item);
@@ -4001,16 +3992,6 @@
 			}
 		}
 
-		ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5);
-		if (ret < 0) {
-			if (ret != -ENOSPC) {
-				err = ret;
-				WARN_ON(1);
-				break;
-			}
-			rc->commit_transaction = 1;
-		}
-
 		if (rc->commit_transaction) {
 			rc->commit_transaction = 0;
 			ret = btrfs_commit_transaction(trans, rc->extent_root);
@@ -4241,12 +4222,12 @@
 	printk(KERN_INFO "btrfs: relocating block group %llu flags %llu\n",
 	       rc->block_group->key.objectid, rc->block_group->flags);
 
-	ret = btrfs_start_all_delalloc_inodes(fs_info, 0);
+	ret = btrfs_start_delalloc_roots(fs_info, 0);
 	if (ret < 0) {
 		err = ret;
 		goto out;
 	}
-	btrfs_wait_all_ordered_extents(fs_info);
+	btrfs_wait_ordered_roots(fs_info, -1);
 
 	while (1) {
 		mutex_lock(&fs_info->cleaner_mutex);
@@ -4264,7 +4245,12 @@
 			rc->extents_found);
 
 		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
-			btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1);
+			ret = btrfs_wait_ordered_range(rc->data_inode, 0,
+						       (u64)-1);
+			if (ret) {
+				err = ret;
+				goto out;
+			}
 			invalidate_mapping_pages(rc->data_inode->i_mapping,
 						 0, -1);
 			rc->stage = UPDATE_DATA_PTRS;
@@ -4481,6 +4467,7 @@
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret;
 	u64 disk_bytenr;
+	u64 new_bytenr;
 	LIST_HEAD(list);
 
 	ordered = btrfs_lookup_ordered_extent(inode, file_pos);
@@ -4492,13 +4479,24 @@
 	if (ret)
 		goto out;
 
-	disk_bytenr = ordered->start;
 	while (!list_empty(&list)) {
 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
 		list_del_init(&sums->list);
 
-		sums->bytenr = disk_bytenr;
-		disk_bytenr += sums->len;
+		/*
+		 * We need to offset the new_bytenr based on where the csum is.
+		 * We need to do this because we will read in entire prealloc
+		 * extents but we may have written to say the middle of the
+		 * prealloc extent, so we need to make sure the csum goes with
+		 * the right disk offset.
+		 *
+		 * We can do this because the data reloc inode refers strictly
+		 * to the on disk bytes, so we don't have to worry about
+		 * disk_len vs real len like with real inodes since it's all
+		 * disk length.
+		 */
+		new_bytenr = ordered->start + (sums->bytenr - disk_bytenr);
+		sums->bytenr = new_bytenr;
 
 		btrfs_add_ordered_sum(inode, ordered, sums);
 	}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index a18e0e2..2544805 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2717,8 +2717,6 @@
 		mutex_unlock(&fs_info->scrub_lock);
 		wake_up(&fs_info->scrub_pause_wait);
 
-		dev_replace->cursor_left = dev_replace->cursor_right;
-		dev_replace->item_needs_writeback = 1;
 		btrfs_put_block_group(cache);
 		if (ret)
 			break;
@@ -2732,6 +2730,9 @@
 			break;
 		}
 
+		dev_replace->cursor_left = dev_replace->cursor_right;
+		dev_replace->item_needs_writeback = 1;
+
 		key.offset = found_key.offset + length;
 		btrfs_release_path(path);
 	}
@@ -2783,7 +2784,6 @@
 {
 	int ret = 0;
 
-	mutex_lock(&fs_info->scrub_lock);
 	if (fs_info->scrub_workers_refcnt == 0) {
 		if (is_dev_replace)
 			btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1,
@@ -2813,21 +2813,17 @@
 	}
 	++fs_info->scrub_workers_refcnt;
 out:
-	mutex_unlock(&fs_info->scrub_lock);
-
 	return ret;
 }
 
 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
 {
-	mutex_lock(&fs_info->scrub_lock);
 	if (--fs_info->scrub_workers_refcnt == 0) {
 		btrfs_stop_workers(&fs_info->scrub_workers);
 		btrfs_stop_workers(&fs_info->scrub_wr_completion_workers);
 		btrfs_stop_workers(&fs_info->scrub_nocow_workers);
 	}
 	WARN_ON(fs_info->scrub_workers_refcnt < 0);
-	mutex_unlock(&fs_info->scrub_lock);
 }
 
 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
@@ -2888,23 +2884,18 @@
 		return -EINVAL;
 	}
 
-	ret = scrub_workers_get(fs_info, is_dev_replace);
-	if (ret)
-		return ret;
 
 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
 	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
 	if (!dev || (dev->missing && !is_dev_replace)) {
 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		scrub_workers_put(fs_info);
 		return -ENODEV;
 	}
-	mutex_lock(&fs_info->scrub_lock);
 
+	mutex_lock(&fs_info->scrub_lock);
 	if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
 		mutex_unlock(&fs_info->scrub_lock);
 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		scrub_workers_put(fs_info);
 		return -EIO;
 	}
 
@@ -2915,10 +2906,17 @@
 		btrfs_dev_replace_unlock(&fs_info->dev_replace);
 		mutex_unlock(&fs_info->scrub_lock);
 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		scrub_workers_put(fs_info);
 		return -EINPROGRESS;
 	}
 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
+
+	ret = scrub_workers_get(fs_info, is_dev_replace);
+	if (ret) {
+		mutex_unlock(&fs_info->scrub_lock);
+		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+		return ret;
+	}
+
 	sctx = scrub_setup_ctx(dev, is_dev_replace);
 	if (IS_ERR(sctx)) {
 		mutex_unlock(&fs_info->scrub_lock);
@@ -2931,13 +2929,15 @@
 
 	atomic_inc(&fs_info->scrubs_running);
 	mutex_unlock(&fs_info->scrub_lock);
-	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	if (!is_dev_replace) {
-		down_read(&fs_info->scrub_super_lock);
+		/*
+		 * by holding device list mutex, we can
+		 * kick off writing super in log tree sync.
+		 */
 		ret = scrub_supers(sctx, dev);
-		up_read(&fs_info->scrub_super_lock);
 	}
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	if (!ret)
 		ret = scrub_enumerate_chunks(sctx, dev, start, end,
@@ -2954,10 +2954,10 @@
 
 	mutex_lock(&fs_info->scrub_lock);
 	dev->scrub_device = NULL;
+	scrub_workers_put(fs_info);
 	mutex_unlock(&fs_info->scrub_lock);
 
 	scrub_free_ctx(sctx);
-	scrub_workers_put(fs_info);
 
 	return ret;
 }
@@ -2987,16 +2987,6 @@
 	wake_up(&fs_info->scrub_pause_wait);
 }
 
-void btrfs_scrub_pause_super(struct btrfs_root *root)
-{
-	down_write(&root->fs_info->scrub_super_lock);
-}
-
-void btrfs_scrub_continue_super(struct btrfs_root *root)
-{
-	up_write(&root->fs_info->scrub_super_lock);
-}
-
 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
 {
 	mutex_lock(&fs_info->scrub_lock);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index e46e0ed..6837fe8 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -121,7 +121,6 @@
 	struct list_head name_cache_list;
 	int name_cache_size;
 
-	struct file *cur_inode_filp;
 	char *read_buf;
 };
 
@@ -565,10 +564,8 @@
 {
 	struct btrfs_cmd_header *hdr;
 
-	if (!sctx->send_buf) {
-		WARN_ON(1);
+	if (WARN_ON(!sctx->send_buf))
 		return -EINVAL;
-	}
 
 	BUG_ON(sctx->send_size);
 
@@ -791,7 +788,7 @@
 	if (found_key->type == BTRFS_INODE_REF_KEY) {
 		ptr = (unsigned long)btrfs_item_ptr(eb, slot,
 						    struct btrfs_inode_ref);
-		item = btrfs_item_nr(eb, slot);
+		item = btrfs_item_nr(slot);
 		total = btrfs_item_size(eb, item);
 		elem_size = sizeof(*iref);
 	} else {
@@ -905,7 +902,7 @@
 
 	eb = path->nodes[0];
 	slot = path->slots[0];
-	item = btrfs_item_nr(eb, slot);
+	item = btrfs_item_nr(slot);
 	di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
 	cur = 0;
 	len = 0;
@@ -2120,77 +2117,6 @@
 }
 
 /*
- * Called for regular files when sending extents data. Opens a struct file
- * to read from the file.
- */
-static int open_cur_inode_file(struct send_ctx *sctx)
-{
-	int ret = 0;
-	struct btrfs_key key;
-	struct path path;
-	struct inode *inode;
-	struct dentry *dentry;
-	struct file *filp;
-	int new = 0;
-
-	if (sctx->cur_inode_filp)
-		goto out;
-
-	key.objectid = sctx->cur_ino;
-	key.type = BTRFS_INODE_ITEM_KEY;
-	key.offset = 0;
-
-	inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root,
-			&new);
-	if (IS_ERR(inode)) {
-		ret = PTR_ERR(inode);
-		goto out;
-	}
-
-	dentry = d_obtain_alias(inode);
-	inode = NULL;
-	if (IS_ERR(dentry)) {
-		ret = PTR_ERR(dentry);
-		goto out;
-	}
-
-	path.mnt = sctx->mnt;
-	path.dentry = dentry;
-	filp = dentry_open(&path, O_RDONLY | O_LARGEFILE, current_cred());
-	dput(dentry);
-	dentry = NULL;
-	if (IS_ERR(filp)) {
-		ret = PTR_ERR(filp);
-		goto out;
-	}
-	sctx->cur_inode_filp = filp;
-
-out:
-	/*
-	 * no xxxput required here as every vfs op
-	 * does it by itself on failure
-	 */
-	return ret;
-}
-
-/*
- * Closes the struct file that was created in open_cur_inode_file
- */
-static int close_cur_inode_file(struct send_ctx *sctx)
-{
-	int ret = 0;
-
-	if (!sctx->cur_inode_filp)
-		goto out;
-
-	ret = filp_close(sctx->cur_inode_filp, NULL);
-	sctx->cur_inode_filp = NULL;
-
-out:
-	return ret;
-}
-
-/*
  * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
  */
 static int send_subvol_begin(struct send_ctx *sctx)
@@ -3622,6 +3548,72 @@
 	return ret;
 }
 
+static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
+{
+	struct btrfs_root *root = sctx->send_root;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct inode *inode;
+	struct page *page;
+	char *addr;
+	struct btrfs_key key;
+	pgoff_t index = offset >> PAGE_CACHE_SHIFT;
+	pgoff_t last_index;
+	unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
+	ssize_t ret = 0;
+
+	key.objectid = sctx->cur_ino;
+	key.type = BTRFS_INODE_ITEM_KEY;
+	key.offset = 0;
+
+	inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
+
+	if (offset + len > i_size_read(inode)) {
+		if (offset > i_size_read(inode))
+			len = 0;
+		else
+			len = offset - i_size_read(inode);
+	}
+	if (len == 0)
+		goto out;
+
+	last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+	while (index <= last_index) {
+		unsigned cur_len = min_t(unsigned, len,
+					 PAGE_CACHE_SIZE - pg_offset);
+		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+		if (!page) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		if (!PageUptodate(page)) {
+			btrfs_readpage(NULL, page);
+			lock_page(page);
+			if (!PageUptodate(page)) {
+				unlock_page(page);
+				page_cache_release(page);
+				ret = -EIO;
+				break;
+			}
+		}
+
+		addr = kmap(page);
+		memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
+		kunmap(page);
+		unlock_page(page);
+		page_cache_release(page);
+		index++;
+		pg_offset = 0;
+		len -= cur_len;
+		ret += cur_len;
+	}
+out:
+	iput(inode);
+	return ret;
+}
+
 /*
  * Read some bytes from the current inode/file and send a write command to
  * user space.
@@ -3630,35 +3622,20 @@
 {
 	int ret = 0;
 	struct fs_path *p;
-	loff_t pos = offset;
-	int num_read = 0;
-	mm_segment_t old_fs;
+	ssize_t num_read = 0;
 
 	p = fs_path_alloc();
 	if (!p)
 		return -ENOMEM;
 
-	/*
-	 * vfs normally only accepts user space buffers for security reasons.
-	 * we only read from the file and also only provide the read_buf buffer
-	 * to vfs. As this buffer does not come from a user space call, it's
-	 * ok to temporary allow kernel space buffers.
-	 */
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-
 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
 
-	ret = open_cur_inode_file(sctx);
-	if (ret < 0)
+	num_read = fill_read_buf(sctx, offset, len);
+	if (num_read <= 0) {
+		if (num_read < 0)
+			ret = num_read;
 		goto out;
-
-	ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos);
-	if (ret < 0)
-		goto out;
-	num_read = ret;
-	if (!num_read)
-		goto out;
+	}
 
 	ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
 	if (ret < 0)
@@ -3677,7 +3654,6 @@
 tlv_put_failure:
 out:
 	fs_path_free(p);
-	set_fs(old_fs);
 	if (ret < 0)
 		return ret;
 	return num_read;
@@ -3926,16 +3902,16 @@
 	while (key.offset < ekey->offset + left_len) {
 		ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 		right_type = btrfs_file_extent_type(eb, ei);
-		right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
-		right_len = btrfs_file_extent_num_bytes(eb, ei);
-		right_offset = btrfs_file_extent_offset(eb, ei);
-		right_gen = btrfs_file_extent_generation(eb, ei);
-
 		if (right_type != BTRFS_FILE_EXTENT_REG) {
 			ret = 0;
 			goto out;
 		}
 
+		right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
+		right_len = btrfs_file_extent_num_bytes(eb, ei);
+		right_offset = btrfs_file_extent_offset(eb, ei);
+		right_gen = btrfs_file_extent_generation(eb, ei);
+
 		/*
 		 * Are we at extent 8? If yes, we know the extent is changed.
 		 * This may only happen on the first iteration.
@@ -4222,10 +4198,6 @@
 	u64 left_gen = 0;
 	u64 right_gen = 0;
 
-	ret = close_cur_inode_file(sctx);
-	if (ret < 0)
-		goto out;
-
 	sctx->cur_ino = key->objectid;
 	sctx->cur_inode_new_gen = 0;
 
@@ -4686,11 +4658,6 @@
 	}
 
 out:
-	if (!ret)
-		ret = close_cur_inode_file(sctx);
-	else
-		close_cur_inode_file(sctx);
-
 	free_recorded_refs(sctx);
 	return ret;
 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index e913328..2d8ac1b 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -42,7 +42,6 @@
 #include <linux/cleancache.h>
 #include <linux/ratelimit.h>
 #include <linux/btrfs.h>
-#include "compat.h"
 #include "delayed-inode.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -921,7 +920,7 @@
 		return 0;
 	}
 
-	btrfs_wait_all_ordered_extents(fs_info);
+	btrfs_wait_ordered_roots(fs_info, -1);
 
 	trans = btrfs_attach_transaction_barrier(root);
 	if (IS_ERR(trans)) {
@@ -1330,6 +1329,12 @@
 		 * this also happens on 'umount -rf' or on shutdown, when
 		 * the filesystem is busy.
 		 */
+
+		/* wait for the uuid_scan task to finish */
+		down(&fs_info->uuid_tree_rescan_sem);
+		/* avoid complains from lockdep et al. */
+		up(&fs_info->uuid_tree_rescan_sem);
+
 		sb->s_flags |= MS_RDONLY;
 
 		btrfs_dev_replace_suspend_for_unmount(fs_info);
@@ -1465,7 +1470,7 @@
 	nr_devices = fs_info->fs_devices->open_devices;
 	BUG_ON(!nr_devices);
 
-	devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
+	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
 			       GFP_NOFS);
 	if (!devices_info)
 		return -ENOMEM;
@@ -1789,7 +1794,25 @@
 
 static int btrfs_run_sanity_tests(void)
 {
-	return btrfs_test_free_space_cache();
+	int ret;
+
+	ret = btrfs_init_test_fs();
+	if (ret)
+		return ret;
+
+	ret = btrfs_test_free_space_cache();
+	if (ret)
+		goto out;
+	ret = btrfs_test_extent_buffer_operations();
+	if (ret)
+		goto out;
+	ret = btrfs_test_extent_io();
+	if (ret)
+		goto out;
+	ret = btrfs_test_inodes();
+out:
+	btrfs_destroy_test_fs();
+	return ret;
 }
 
 static int __init init_btrfs_fs(void)
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
new file mode 100644
index 0000000..757ef00
--- /dev/null
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2013 Fusion IO.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/magic.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+
+static struct vfsmount *test_mnt = NULL;
+
+static const struct super_operations btrfs_test_super_ops = {
+	.alloc_inode	= btrfs_alloc_inode,
+	.destroy_inode	= btrfs_test_destroy_inode,
+};
+
+static struct dentry *btrfs_test_mount(struct file_system_type *fs_type,
+				       int flags, const char *dev_name,
+				       void *data)
+{
+	return mount_pseudo(fs_type, "btrfs_test:", &btrfs_test_super_ops,
+			    NULL, BTRFS_TEST_MAGIC);
+}
+
+static struct file_system_type test_type = {
+	.name		= "btrfs_test_fs",
+	.mount		= btrfs_test_mount,
+	.kill_sb	= kill_anon_super,
+};
+
+struct inode *btrfs_new_test_inode(void)
+{
+	return new_inode(test_mnt->mnt_sb);
+}
+
+int btrfs_init_test_fs(void)
+{
+	int ret;
+
+	ret = register_filesystem(&test_type);
+	if (ret) {
+		printk(KERN_ERR "btrfs: cannot register test file system\n");
+		return ret;
+	}
+
+	test_mnt = kern_mount(&test_type);
+	if (IS_ERR(test_mnt)) {
+		printk(KERN_ERR "btrfs: cannot mount test file system\n");
+		unregister_filesystem(&test_type);
+		return ret;
+	}
+	return 0;
+}
+
+void btrfs_destroy_test_fs(void)
+{
+	kern_unmount(test_mnt);
+	unregister_filesystem(&test_type);
+}
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index 5808776..b353bc8 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -24,11 +24,36 @@
 #define test_msg(fmt, ...) pr_info("btrfs: selftest: " fmt, ##__VA_ARGS__)
 
 int btrfs_test_free_space_cache(void);
+int btrfs_test_extent_buffer_operations(void);
+int btrfs_test_extent_io(void);
+int btrfs_test_inodes(void);
+int btrfs_init_test_fs(void);
+void btrfs_destroy_test_fs(void);
+struct inode *btrfs_new_test_inode(void);
 #else
 static inline int btrfs_test_free_space_cache(void)
 {
 	return 0;
 }
+static inline int btrfs_test_extent_buffer_operations(void)
+{
+	return 0;
+}
+static inline int btrfs_init_test_fs(void)
+{
+	return 0;
+}
+static inline void btrfs_destroy_test_fs(void)
+{
+}
+static inline int btrfs_test_extent_io(void)
+{
+	return 0;
+}
+static inline int btrfs_test_inodes(void)
+{
+	return 0;
+}
 #endif
 
 #endif
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
new file mode 100644
index 0000000..cc286ce
--- /dev/null
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2013 Fusion IO.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../extent_io.h"
+#include "../disk-io.h"
+
+static int test_btrfs_split_item(void)
+{
+	struct btrfs_path *path;
+	struct btrfs_root *root;
+	struct extent_buffer *eb;
+	struct btrfs_item *item;
+	char *value = "mary had a little lamb";
+	char *split1 = "mary had a little";
+	char *split2 = " lamb";
+	char *split3 = "mary";
+	char *split4 = " had a little";
+	char buf[32];
+	struct btrfs_key key;
+	u32 value_len = strlen(value);
+	int ret = 0;
+
+	test_msg("Running btrfs_split_item tests\n");
+
+	root = btrfs_alloc_dummy_root();
+	if (IS_ERR(root)) {
+		test_msg("Could not allocate root\n");
+		return PTR_ERR(root);
+	}
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		test_msg("Could not allocate path\n");
+		kfree(root);
+		return -ENOMEM;
+	}
+
+	path->nodes[0] = eb = alloc_dummy_extent_buffer(0, 4096);
+	if (!eb) {
+		test_msg("Could not allocate dummy buffer\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	path->slots[0] = 0;
+
+	key.objectid = 0;
+	key.type = BTRFS_EXTENT_CSUM_KEY;
+	key.offset = 0;
+
+	setup_items_for_insert(root, path, &key, &value_len, value_len,
+			       value_len + sizeof(struct btrfs_item), 1);
+	item = btrfs_item_nr(0);
+	write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
+			    value_len);
+
+	key.offset = 3;
+
+	/*
+	 * Passing NULL trans here should be safe because we have plenty of
+	 * space in this leaf to split the item without having to split the
+	 * leaf.
+	 */
+	ret = btrfs_split_item(NULL, root, path, &key, 17);
+	if (ret) {
+		test_msg("Split item failed %d\n", ret);
+		goto out;
+	}
+
+	/*
+	 * Read the first slot, it should have the original key and contain only
+	 * 'mary had a little'
+	 */
+	btrfs_item_key_to_cpu(eb, &key, 0);
+	if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+	    key.offset != 0) {
+		test_msg("Invalid key at slot 0\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	item = btrfs_item_nr(0);
+	if (btrfs_item_size(eb, item) != strlen(split1)) {
+		test_msg("Invalid len in the first split\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0),
+			   strlen(split1));
+	if (memcmp(buf, split1, strlen(split1))) {
+		test_msg("Data in the buffer doesn't match what it should "
+			 "in the first split have='%.*s' want '%s'\n",
+			 (int)strlen(split1), buf, split1);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	btrfs_item_key_to_cpu(eb, &key, 1);
+	if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+	    key.offset != 3) {
+		test_msg("Invalid key at slot 1\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	item = btrfs_item_nr(1);
+	if (btrfs_item_size(eb, item) != strlen(split2)) {
+		test_msg("Invalid len in the second split\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1),
+			   strlen(split2));
+	if (memcmp(buf, split2, strlen(split2))) {
+		test_msg("Data in the buffer doesn't match what it should "
+			 "in the second split\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	key.offset = 1;
+	/* Do it again so we test memmoving the other items in the leaf */
+	ret = btrfs_split_item(NULL, root, path, &key, 4);
+	if (ret) {
+		test_msg("Second split item failed %d\n", ret);
+		goto out;
+	}
+
+	btrfs_item_key_to_cpu(eb, &key, 0);
+	if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+	    key.offset != 0) {
+		test_msg("Invalid key at slot 0\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	item = btrfs_item_nr(0);
+	if (btrfs_item_size(eb, item) != strlen(split3)) {
+		test_msg("Invalid len in the first split\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0),
+			   strlen(split3));
+	if (memcmp(buf, split3, strlen(split3))) {
+		test_msg("Data in the buffer doesn't match what it should "
+			 "in the third split");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	btrfs_item_key_to_cpu(eb, &key, 1);
+	if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+	    key.offset != 1) {
+		test_msg("Invalid key at slot 1\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	item = btrfs_item_nr(1);
+	if (btrfs_item_size(eb, item) != strlen(split4)) {
+		test_msg("Invalid len in the second split\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1),
+			   strlen(split4));
+	if (memcmp(buf, split4, strlen(split4))) {
+		test_msg("Data in the buffer doesn't match what it should "
+			 "in the fourth split\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	btrfs_item_key_to_cpu(eb, &key, 2);
+	if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+	    key.offset != 3) {
+		test_msg("Invalid key at slot 2\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	item = btrfs_item_nr(2);
+	if (btrfs_item_size(eb, item) != strlen(split2)) {
+		test_msg("Invalid len in the second split\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 2),
+			   strlen(split2));
+	if (memcmp(buf, split2, strlen(split2))) {
+		test_msg("Data in the buffer doesn't match what it should "
+			 "in the last chunk\n");
+		ret = -EINVAL;
+		goto out;
+	}
+out:
+	btrfs_free_path(path);
+	kfree(root);
+	return ret;
+}
+
+int btrfs_test_extent_buffer_operations(void)
+{
+	test_msg("Running extent buffer operation tests");
+	return test_btrfs_split_item();
+}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
new file mode 100644
index 0000000..7e99c2f
--- /dev/null
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2013 Fusion IO.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include "btrfs-tests.h"
+#include "../extent_io.h"
+
+#define PROCESS_UNLOCK		(1 << 0)
+#define PROCESS_RELEASE		(1 << 1)
+#define PROCESS_TEST_LOCKED	(1 << 2)
+
+static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
+				       unsigned long flags)
+{
+	int ret;
+	struct page *pages[16];
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long nr_pages = end_index - index + 1;
+	int i;
+	int count = 0;
+	int loops = 0;
+
+	while (nr_pages > 0) {
+		ret = find_get_pages_contig(inode->i_mapping, index,
+				     min_t(unsigned long, nr_pages,
+				     ARRAY_SIZE(pages)), pages);
+		for (i = 0; i < ret; i++) {
+			if (flags & PROCESS_TEST_LOCKED &&
+			    !PageLocked(pages[i]))
+				count++;
+			if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
+				unlock_page(pages[i]);
+			page_cache_release(pages[i]);
+			if (flags & PROCESS_RELEASE)
+				page_cache_release(pages[i]);
+		}
+		nr_pages -= ret;
+		index += ret;
+		cond_resched();
+		loops++;
+		if (loops > 100000) {
+			printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret);
+			break;
+		}
+	}
+	return count;
+}
+
+static int test_find_delalloc(void)
+{
+	struct inode *inode;
+	struct extent_io_tree tmp;
+	struct page *page;
+	struct page *locked_page = NULL;
+	unsigned long index = 0;
+	u64 total_dirty = 256 * 1024 * 1024;
+	u64 max_bytes = 128 * 1024 * 1024;
+	u64 start, end, test_start;
+	u64 found;
+	int ret = -EINVAL;
+
+	inode = btrfs_new_test_inode();
+	if (!inode) {
+		test_msg("Failed to allocate test inode\n");
+		return -ENOMEM;
+	}
+
+	extent_io_tree_init(&tmp, &inode->i_data);
+
+	/*
+	 * First go through and create and mark all of our pages dirty, we pin
+	 * everything to make sure our pages don't get evicted and screw up our
+	 * test.
+	 */
+	for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
+		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+		if (!page) {
+			test_msg("Failed to allocate test page\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+		SetPageDirty(page);
+		if (index) {
+			unlock_page(page);
+		} else {
+			page_cache_get(page);
+			locked_page = page;
+		}
+	}
+
+	/* Test this scenario
+	 * |--- delalloc ---|
+	 * |---  search  ---|
+	 */
+	set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_NOFS);
+	start = 0;
+	end = 0;
+	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+					 &end, max_bytes);
+	if (!found) {
+		test_msg("Should have found at least one delalloc\n");
+		goto out_bits;
+	}
+	if (start != 0 || end != 4095) {
+		test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
+			 start, end);
+		goto out_bits;
+	}
+	unlock_extent(&tmp, start, end);
+	unlock_page(locked_page);
+	page_cache_release(locked_page);
+
+	/*
+	 * Test this scenario
+	 *
+	 * |--- delalloc ---|
+	 *           |--- search ---|
+	 */
+	test_start = 64 * 1024 * 1024;
+	locked_page = find_lock_page(inode->i_mapping,
+				     test_start >> PAGE_CACHE_SHIFT);
+	if (!locked_page) {
+		test_msg("Couldn't find the locked page\n");
+		goto out_bits;
+	}
+	set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_NOFS);
+	start = test_start;
+	end = 0;
+	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+					 &end, max_bytes);
+	if (!found) {
+		test_msg("Couldn't find delalloc in our range\n");
+		goto out_bits;
+	}
+	if (start != test_start || end != max_bytes - 1) {
+		test_msg("Expected start %Lu end %Lu, got start %Lu, end "
+			 "%Lu\n", test_start, max_bytes - 1, start, end);
+		goto out_bits;
+	}
+	if (process_page_range(inode, start, end,
+			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
+		test_msg("There were unlocked pages in the range\n");
+		goto out_bits;
+	}
+	unlock_extent(&tmp, start, end);
+	/* locked_page was unlocked above */
+	page_cache_release(locked_page);
+
+	/*
+	 * Test this scenario
+	 * |--- delalloc ---|
+	 *                    |--- search ---|
+	 */
+	test_start = max_bytes + 4096;
+	locked_page = find_lock_page(inode->i_mapping, test_start >>
+				     PAGE_CACHE_SHIFT);
+	if (!locked_page) {
+		test_msg("Could'nt find the locked page\n");
+		goto out_bits;
+	}
+	start = test_start;
+	end = 0;
+	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+					 &end, max_bytes);
+	if (found) {
+		test_msg("Found range when we shouldn't have\n");
+		goto out_bits;
+	}
+	if (end != (u64)-1) {
+		test_msg("Did not return the proper end offset\n");
+		goto out_bits;
+	}
+
+	/*
+	 * Test this scenario
+	 * [------- delalloc -------|
+	 * [max_bytes]|-- search--|
+	 *
+	 * We are re-using our test_start from above since it works out well.
+	 */
+	set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_NOFS);
+	start = test_start;
+	end = 0;
+	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+					 &end, max_bytes);
+	if (!found) {
+		test_msg("Didn't find our range\n");
+		goto out_bits;
+	}
+	if (start != test_start || end != total_dirty - 1) {
+		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
+			 test_start, total_dirty - 1, start, end);
+		goto out_bits;
+	}
+	if (process_page_range(inode, start, end,
+			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
+		test_msg("Pages in range were not all locked\n");
+		goto out_bits;
+	}
+	unlock_extent(&tmp, start, end);
+
+	/*
+	 * Now to test where we run into a page that is no longer dirty in the
+	 * range we want to find.
+	 */
+	page = find_get_page(inode->i_mapping, (max_bytes + (1 * 1024 * 1024))
+			     >> PAGE_CACHE_SHIFT);
+	if (!page) {
+		test_msg("Couldn't find our page\n");
+		goto out_bits;
+	}
+	ClearPageDirty(page);
+	page_cache_release(page);
+
+	/* We unlocked it in the previous test */
+	lock_page(locked_page);
+	start = test_start;
+	end = 0;
+	/*
+	 * Currently if we fail to find dirty pages in the delalloc range we
+	 * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search.  If
+	 * this changes at any point in the future we will need to fix this
+	 * tests expected behavior.
+	 */
+	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+					 &end, max_bytes);
+	if (!found) {
+		test_msg("Didn't find our range\n");
+		goto out_bits;
+	}
+	if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) {
+		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
+			 test_start, test_start + PAGE_CACHE_SIZE - 1, start,
+			 end);
+		goto out_bits;
+	}
+	if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
+			       PROCESS_UNLOCK)) {
+		test_msg("Pages in range were not all locked\n");
+		goto out_bits;
+	}
+	ret = 0;
+out_bits:
+	clear_extent_bits(&tmp, 0, total_dirty - 1,
+			  (unsigned long)-1, GFP_NOFS);
+out:
+	if (locked_page)
+		page_cache_release(locked_page);
+	process_page_range(inode, 0, total_dirty - 1,
+			   PROCESS_UNLOCK | PROCESS_RELEASE);
+	iput(inode);
+	return ret;
+}
+
+int btrfs_test_extent_io(void)
+{
+	test_msg("Running find delalloc tests\n");
+	return test_find_delalloc();
+}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
new file mode 100644
index 0000000..397d1f9
--- /dev/null
+++ b/fs/btrfs/tests/inode-tests.c
@@ -0,0 +1,955 @@
+/*
+ * Copyright (C) 2013 Fusion IO.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../btrfs_inode.h"
+#include "../disk-io.h"
+#include "../extent_io.h"
+#include "../volumes.h"
+
+static struct btrfs_fs_info *alloc_dummy_fs_info(void)
+{
+	struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
+						GFP_NOFS);
+	if (!fs_info)
+		return fs_info;
+	fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices),
+				      GFP_NOFS);
+	if (!fs_info->fs_devices) {
+		kfree(fs_info);
+		return NULL;
+	}
+	return fs_info;
+}
+static void free_dummy_root(struct btrfs_root *root)
+{
+	if (!root)
+		return;
+	if (root->fs_info) {
+		kfree(root->fs_info->fs_devices);
+		kfree(root->fs_info);
+	}
+	if (root->node)
+		free_extent_buffer(root->node);
+	kfree(root);
+}
+
+static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
+			  u64 ram_bytes, u64 offset, u64 disk_bytenr,
+			  u64 disk_len, u32 type, u8 compression, int slot)
+{
+	struct btrfs_path path;
+	struct btrfs_file_extent_item *fi;
+	struct extent_buffer *leaf = root->node;
+	struct btrfs_key key;
+	u32 value_len = sizeof(struct btrfs_file_extent_item);
+
+	if (type == BTRFS_FILE_EXTENT_INLINE)
+		value_len += len;
+	memset(&path, 0, sizeof(path));
+
+	path.nodes[0] = leaf;
+	path.slots[0] = slot;
+
+	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+	key.type = BTRFS_EXTENT_DATA_KEY;
+	key.offset = start;
+
+	setup_items_for_insert(root, &path, &key, &value_len, value_len,
+			       value_len + sizeof(struct btrfs_item), 1);
+	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+	btrfs_set_file_extent_generation(leaf, fi, 1);
+	btrfs_set_file_extent_type(leaf, fi, type);
+	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
+	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_len);
+	btrfs_set_file_extent_offset(leaf, fi, offset);
+	btrfs_set_file_extent_num_bytes(leaf, fi, len);
+	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
+	btrfs_set_file_extent_compression(leaf, fi, compression);
+	btrfs_set_file_extent_encryption(leaf, fi, 0);
+	btrfs_set_file_extent_other_encoding(leaf, fi, 0);
+}
+
+static void insert_inode_item_key(struct btrfs_root *root)
+{
+	struct btrfs_path path;
+	struct extent_buffer *leaf = root->node;
+	struct btrfs_key key;
+	u32 value_len = 0;
+
+	memset(&path, 0, sizeof(path));
+
+	path.nodes[0] = leaf;
+	path.slots[0] = 0;
+
+	key.objectid = BTRFS_INODE_ITEM_KEY;
+	key.type = BTRFS_INODE_ITEM_KEY;
+	key.offset = 0;
+
+	setup_items_for_insert(root, &path, &key, &value_len, value_len,
+			       value_len + sizeof(struct btrfs_item), 1);
+}
+
+/*
+ * Build the most complicated map of extents the earth has ever seen.  We want
+ * this so we can test all of the corner cases of btrfs_get_extent.  Here is a
+ * diagram of how the extents will look though this may not be possible we still
+ * want to make sure everything acts normally (the last number is not inclusive)
+ *
+ * [0 - 5][5 -  6][6 - 10][10 - 4096][  4096 - 8192 ][8192 - 12288]
+ * [hole ][inline][ hole ][ regular ][regular1 split][    hole    ]
+ *
+ * [ 12288 - 20480][20480 - 24576][  24576 - 28672  ][28672 - 36864][36864 - 45056]
+ * [regular1 split][   prealloc1 ][prealloc1 written][   prealloc1 ][ compressed  ]
+ *
+ * [45056 - 49152][49152-53248][53248-61440][61440-65536][     65536+81920   ]
+ * [ compressed1 ][  regular  ][compressed1][  regular  ][ hole but no extent]
+ *
+ * [81920-86016]
+ * [  regular  ]
+ */
+static void setup_file_extents(struct btrfs_root *root)
+{
+	int slot = 0;
+	u64 disk_bytenr = 1 * 1024 * 1024;
+	u64 offset = 0;
+
+	/* First we want a hole */
+	insert_extent(root, offset, 5, 5, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
+		      slot);
+	slot++;
+	offset += 5;
+
+	/*
+	 * Now we want an inline extent, I don't think this is possible but hey
+	 * why not?  Also keep in mind if we have an inline extent it counts as
+	 * the whole first page.  If we were to expand it we would have to cow
+	 * and we wouldn't have an inline extent anymore.
+	 */
+	insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
+		      slot);
+	slot++;
+	offset = 4096;
+
+	/* Now another hole */
+	insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
+		      slot);
+	slot++;
+	offset += 4;
+
+	/* Now for a regular extent */
+	insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096,
+		      BTRFS_FILE_EXTENT_REG, 0, slot);
+	slot++;
+	disk_bytenr += 4096;
+	offset += 4095;
+
+	/*
+	 * Now for 3 extents that were split from a hole punch so we test
+	 * offsets properly.
+	 */
+	insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
+		      BTRFS_FILE_EXTENT_REG, 0, slot);
+	slot++;
+	offset += 4096;
+	insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG,
+		      0, slot);
+	slot++;
+	offset += 4096;
+	insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+		      BTRFS_FILE_EXTENT_REG, 0, slot);
+	slot++;
+	offset += 8192;
+	disk_bytenr += 16384;
+
+	/* Now for a unwritten prealloc extent */
+	insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
+		      BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+	slot++;
+	offset += 4096;
+
+	/*
+	 * We want to jack up disk_bytenr a little more so the em stuff doesn't
+	 * merge our records.
+	 */
+	disk_bytenr += 8192;
+
+	/*
+	 * Now for a partially written prealloc extent, basically the same as
+	 * the hole punch example above.  Ram_bytes never changes when you mark
+	 * extents written btw.
+	 */
+	insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
+		      BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+	slot++;
+	offset += 4096;
+	insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384,
+		      BTRFS_FILE_EXTENT_REG, 0, slot);
+	slot++;
+	offset += 4096;
+	insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+		      BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+	slot++;
+	offset += 8192;
+	disk_bytenr += 16384;
+
+	/* Now a normal compressed extent */
+	insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096,
+		      BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+	slot++;
+	offset += 8192;
+	/* No merges */
+	disk_bytenr += 8192;
+
+	/* Now a split compressed extent */
+	insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096,
+		      BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+	slot++;
+	offset += 4096;
+	insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096,
+		      BTRFS_FILE_EXTENT_REG, 0, slot);
+	slot++;
+	offset += 4096;
+	insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096,
+		      BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+	slot++;
+	offset += 8192;
+	disk_bytenr += 8192;
+
+	/* Now extents that have a hole but no hole extent */
+	insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
+		      BTRFS_FILE_EXTENT_REG, 0, slot);
+	slot++;
+	offset += 16384;
+	disk_bytenr += 4096;
+	insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
+		      BTRFS_FILE_EXTENT_REG, 0, slot);
+}
+
+static unsigned long prealloc_only = 0;
+static unsigned long compressed_only = 0;
+static unsigned long vacancy_only = 0;
+
+static noinline int test_btrfs_get_extent(void)
+{
+	struct inode *inode = NULL;
+	struct btrfs_root *root = NULL;
+	struct extent_map *em = NULL;
+	u64 orig_start;
+	u64 disk_bytenr;
+	u64 offset;
+	int ret = -ENOMEM;
+
+	inode = btrfs_new_test_inode();
+	if (!inode) {
+		test_msg("Couldn't allocate inode\n");
+		return ret;
+	}
+
+	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+	BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
+	BTRFS_I(inode)->location.offset = 0;
+
+	root = btrfs_alloc_dummy_root();
+	if (IS_ERR(root)) {
+		test_msg("Couldn't allocate root\n");
+		goto out;
+	}
+
+	/*
+	 * We do this since btrfs_get_extent wants to assign em->bdev to
+	 * root->fs_info->fs_devices->latest_bdev.
+	 */
+	root->fs_info = alloc_dummy_fs_info();
+	if (!root->fs_info) {
+		test_msg("Couldn't allocate dummy fs info\n");
+		goto out;
+	}
+
+	root->node = alloc_dummy_extent_buffer(0, 4096);
+	if (!root->node) {
+		test_msg("Couldn't allocate dummy buffer\n");
+		goto out;
+	}
+
+	/*
+	 * We will just free a dummy node if it's ref count is 2 so we need an
+	 * extra ref so our searches don't accidently release our page.
+	 */
+	extent_buffer_get(root->node);
+	btrfs_set_header_nritems(root->node, 0);
+	btrfs_set_header_level(root->node, 0);
+	ret = -EINVAL;
+
+	/* First with no extents */
+	BTRFS_I(inode)->root = root;
+	em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0);
+	if (IS_ERR(em)) {
+		em = NULL;
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != EXTENT_MAP_HOLE) {
+		test_msg("Expected a hole, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
+		test_msg("Vacancy flag wasn't set properly\n");
+		goto out;
+	}
+	free_extent_map(em);
+	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
+
+	/*
+	 * All of the magic numbers are based on the mapping setup in
+	 * setup_file_extents, so if you change anything there you need to
+	 * update the comment and update the expected values below.
+	 */
+	setup_file_extents(root);
+
+	em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != EXTENT_MAP_HOLE) {
+		test_msg("Expected a hole, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != 0 || em->len != 5) {
+		test_msg("Unexpected extent wanted start 0 len 5, got start "
+			 "%llu len %llu\n", em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != EXTENT_MAP_INLINE) {
+		test_msg("Expected an inline, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4091) {
+		test_msg("Unexpected extent wanted start %llu len 1, got start "
+			 "%llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	/*
+	 * We don't test anything else for inline since it doesn't get set
+	 * unless we have a page for it to write into.  Maybe we should change
+	 * this?
+	 */
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != EXTENT_MAP_HOLE) {
+		test_msg("Expected a hole, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4) {
+		test_msg("Unexpected extent wanted start %llu len 4, got start "
+			 "%llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	/* Regular extent */
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4095) {
+		test_msg("Unexpected extent wanted start %llu len 4095, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
+			 em->orig_start);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	/* The next 3 are split extents */
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
+			 em->orig_start);
+		goto out;
+	}
+	disk_bytenr = em->block_start;
+	orig_start = em->start;
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != EXTENT_MAP_HOLE) {
+		test_msg("Expected a hole, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 8192) {
+		test_msg("Unexpected extent wanted start %llu len 8192, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	if (em->orig_start != orig_start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n",
+			 orig_start, em->orig_start);
+		goto out;
+	}
+	disk_bytenr += (em->start - orig_start);
+	if (em->block_start != disk_bytenr) {
+		test_msg("Wrong block start, want %llu, have %llu\n",
+			 disk_bytenr, em->block_start);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	/* Prealloc extent */
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != prealloc_only) {
+		test_msg("Unexpected flags set, want %lu have %lu\n",
+			 prealloc_only, em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
+			 em->orig_start);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	/* The next 3 are a half written prealloc extent */
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != prealloc_only) {
+		test_msg("Unexpected flags set, want %lu have %lu\n",
+			 prealloc_only, em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
+			 em->orig_start);
+		goto out;
+	}
+	disk_bytenr = em->block_start;
+	orig_start = em->start;
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_HOLE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	if (em->orig_start != orig_start) {
+		test_msg("Unexpected orig offset, wanted %llu, have %llu\n",
+			 orig_start, em->orig_start);
+		goto out;
+	}
+	if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
+		test_msg("Unexpected block start, wanted %llu, have %llu\n",
+			 disk_bytenr + (em->start - em->orig_start),
+			 em->block_start);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 8192) {
+		test_msg("Unexpected extent wanted start %llu len 8192, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != prealloc_only) {
+		test_msg("Unexpected flags set, want %lu have %lu\n",
+			 prealloc_only, em->flags);
+		goto out;
+	}
+	if (em->orig_start != orig_start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", orig_start,
+			 em->orig_start);
+		goto out;
+	}
+	if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
+		test_msg("Unexpected block start, wanted %llu, have %llu\n",
+			 disk_bytenr + (em->start - em->orig_start),
+			 em->block_start);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	/* Now for the compressed extent */
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 8192) {
+		test_msg("Unexpected extent wanted start %llu len 8192, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != compressed_only) {
+		test_msg("Unexpected flags set, want %lu have %lu\n",
+			 compressed_only, em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n",
+			 em->start, em->orig_start);
+		goto out;
+	}
+	if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+		test_msg("Unexpected compress type, wanted %d, got %d\n",
+			 BTRFS_COMPRESS_ZLIB, em->compress_type);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	/* Split compressed extent */
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != compressed_only) {
+		test_msg("Unexpected flags set, want %lu have %lu\n",
+			 compressed_only, em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n",
+			 em->start, em->orig_start);
+		goto out;
+	}
+	if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+		test_msg("Unexpected compress type, wanted %d, got %d\n",
+			 BTRFS_COMPRESS_ZLIB, em->compress_type);
+		goto out;
+	}
+	disk_bytenr = em->block_start;
+	orig_start = em->start;
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
+			 em->orig_start);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != disk_bytenr) {
+		test_msg("Block start does not match, want %llu got %llu\n",
+			 disk_bytenr, em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 8192) {
+		test_msg("Unexpected extent wanted start %llu len 8192, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != compressed_only) {
+		test_msg("Unexpected flags set, want %lu have %lu\n",
+			 compressed_only, em->flags);
+		goto out;
+	}
+	if (em->orig_start != orig_start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n",
+			 em->start, orig_start);
+		goto out;
+	}
+	if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+		test_msg("Unexpected compress type, wanted %d, got %d\n",
+			 BTRFS_COMPRESS_ZLIB, em->compress_type);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	/* A hole between regular extents but no hole extent */
+	em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
+			 em->orig_start);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096 * 1024, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != EXTENT_MAP_HOLE) {
+		test_msg("Expected a hole extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	/*
+	 * Currently we just return a length that we requested rather than the
+	 * length of the actual hole, if this changes we'll have to change this
+	 * test.
+	 */
+	if (em->start != offset || em->len != 12288) {
+		test_msg("Unexpected extent wanted start %llu len 12288, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != vacancy_only) {
+		test_msg("Unexpected flags set, want %lu have %lu\n",
+			 vacancy_only, em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
+			 em->orig_start);
+		goto out;
+	}
+	offset = em->start + em->len;
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != offset || em->len != 4096) {
+		test_msg("Unexpected extent wanted start %llu len 4096, got "
+			 "start %llu len %llu\n", offset, em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
+		goto out;
+	}
+	if (em->orig_start != em->start) {
+		test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
+			 em->orig_start);
+		goto out;
+	}
+	ret = 0;
+out:
+	if (!IS_ERR(em))
+		free_extent_map(em);
+	iput(inode);
+	free_dummy_root(root);
+	return ret;
+}
+
+static int test_hole_first(void)
+{
+	struct inode *inode = NULL;
+	struct btrfs_root *root = NULL;
+	struct extent_map *em = NULL;
+	int ret = -ENOMEM;
+
+	inode = btrfs_new_test_inode();
+	if (!inode) {
+		test_msg("Couldn't allocate inode\n");
+		return ret;
+	}
+
+	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+	BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
+	BTRFS_I(inode)->location.offset = 0;
+
+	root = btrfs_alloc_dummy_root();
+	if (IS_ERR(root)) {
+		test_msg("Couldn't allocate root\n");
+		goto out;
+	}
+
+	root->fs_info = alloc_dummy_fs_info();
+	if (!root->fs_info) {
+		test_msg("Couldn't allocate dummy fs info\n");
+		goto out;
+	}
+
+	root->node = alloc_dummy_extent_buffer(0, 4096);
+	if (!root->node) {
+		test_msg("Couldn't allocate dummy buffer\n");
+		goto out;
+	}
+
+	extent_buffer_get(root->node);
+	btrfs_set_header_nritems(root->node, 0);
+	btrfs_set_header_level(root->node, 0);
+	BTRFS_I(inode)->root = root;
+	ret = -EINVAL;
+
+	/*
+	 * Need a blank inode item here just so we don't confuse
+	 * btrfs_get_extent.
+	 */
+	insert_inode_item_key(root);
+	insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096,
+		      BTRFS_FILE_EXTENT_REG, 0, 1);
+	em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != EXTENT_MAP_HOLE) {
+		test_msg("Expected a hole, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != 0 || em->len != 4096) {
+		test_msg("Unexpected extent wanted start 0 len 4096, got start "
+			 "%llu len %llu\n", em->start, em->len);
+		goto out;
+	}
+	if (em->flags != vacancy_only) {
+		test_msg("Wrong flags, wanted %lu, have %lu\n", vacancy_only,
+			 em->flags);
+		goto out;
+	}
+	free_extent_map(em);
+
+	em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0);
+	if (IS_ERR(em)) {
+		test_msg("Got an error when we shouldn't have\n");
+		goto out;
+	}
+	if (em->block_start != 4096) {
+		test_msg("Expected a real extent, got %llu\n", em->block_start);
+		goto out;
+	}
+	if (em->start != 4096 || em->len != 4096) {
+		test_msg("Unexpected extent wanted start 4096 len 4096, got "
+			 "start %llu len %llu\n", em->start, em->len);
+		goto out;
+	}
+	if (em->flags != 0) {
+		test_msg("Unexpected flags set, wanted 0 got %lu\n",
+			 em->flags);
+		goto out;
+	}
+	ret = 0;
+out:
+	if (!IS_ERR(em))
+		free_extent_map(em);
+	iput(inode);
+	free_dummy_root(root);
+	return ret;
+}
+
+int btrfs_test_inodes(void)
+{
+	int ret;
+
+	set_bit(EXTENT_FLAG_COMPRESSED, &compressed_only);
+	set_bit(EXTENT_FLAG_VACANCY, &vacancy_only);
+	set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
+
+	test_msg("Running btrfs_get_extent tests\n");
+	ret = test_btrfs_get_extent();
+	if (ret)
+		return ret;
+	test_msg("Running hole first btrfs_get_extent test\n");
+	return test_hole_first();
+}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8c81bdc..57c16b4 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -57,7 +57,7 @@
 					   __TRANS_JOIN_NOLOCK),
 };
 
-static void put_transaction(struct btrfs_transaction *transaction)
+void btrfs_put_transaction(struct btrfs_transaction *transaction)
 {
 	WARN_ON(atomic_read(&transaction->use_count) == 0);
 	if (atomic_dec_and_test(&transaction->use_count)) {
@@ -332,7 +332,7 @@
 		wait_event(root->fs_info->transaction_wait,
 			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
 			   cur_trans->aborted);
-		put_transaction(cur_trans);
+		btrfs_put_transaction(cur_trans);
 	} else {
 		spin_unlock(&root->fs_info->trans_lock);
 	}
@@ -353,6 +353,17 @@
 	return 0;
 }
 
+static inline bool need_reserve_reloc_root(struct btrfs_root *root)
+{
+	if (!root->fs_info->reloc_ctl ||
+	    !root->ref_cows ||
+	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+	    root->reloc_root)
+		return false;
+
+	return true;
+}
+
 static struct btrfs_trans_handle *
 start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
 		  enum btrfs_reserve_flush_enum flush)
@@ -360,8 +371,9 @@
 	struct btrfs_trans_handle *h;
 	struct btrfs_transaction *cur_trans;
 	u64 num_bytes = 0;
-	int ret;
 	u64 qgroup_reserved = 0;
+	bool reloc_reserved = false;
+	int ret;
 
 	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
 		return ERR_PTR(-EROFS);
@@ -390,6 +402,14 @@
 		}
 
 		num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
+		/*
+		 * Do the reservation for the relocation root creation
+		 */
+		if (unlikely(need_reserve_reloc_root(root))) {
+			num_bytes += root->nodesize;
+			reloc_reserved = true;
+		}
+
 		ret = btrfs_block_rsv_add(root,
 					  &root->fs_info->trans_block_rsv,
 					  num_bytes, flush);
@@ -451,6 +471,7 @@
 	h->delayed_ref_elem.seq = 0;
 	h->type = type;
 	h->allocating_chunk = false;
+	h->reloc_reserved = false;
 	INIT_LIST_HEAD(&h->qgroup_ref_list);
 	INIT_LIST_HEAD(&h->new_bgs);
 
@@ -466,6 +487,7 @@
 					      h->transid, num_bytes, 1);
 		h->block_rsv = &root->fs_info->trans_block_rsv;
 		h->bytes_reserved = num_bytes;
+		h->reloc_reserved = reloc_reserved;
 	}
 	h->qgroup_reserved = qgroup_reserved;
 
@@ -610,7 +632,7 @@
 	}
 
 	wait_for_commit(root, cur_trans);
-	put_transaction(cur_trans);
+	btrfs_put_transaction(cur_trans);
 out:
 	return ret;
 }
@@ -735,7 +757,7 @@
 	smp_mb();
 	if (waitqueue_active(&cur_trans->writer_wait))
 		wake_up(&cur_trans->writer_wait);
-	put_transaction(cur_trans);
+	btrfs_put_transaction(cur_trans);
 
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
@@ -744,8 +766,10 @@
 		btrfs_run_delayed_iputs(root);
 
 	if (trans->aborted ||
-	    test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+	    test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+		wake_up_process(info->transaction_kthread);
 		err = -EIO;
+	}
 	assert_qgroups_uptodate(trans);
 
 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
@@ -948,16 +972,19 @@
 		return ret;
 
 	ret = btrfs_run_dev_stats(trans, root->fs_info);
-	WARN_ON(ret);
+	if (ret)
+		return ret;
 	ret = btrfs_run_dev_replace(trans, root->fs_info);
-	WARN_ON(ret);
-
+	if (ret)
+		return ret;
 	ret = btrfs_run_qgroups(trans, root->fs_info);
-	BUG_ON(ret);
+	if (ret)
+		return ret;
 
 	/* run_qgroups might have added some more refs */
 	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-	BUG_ON(ret);
+	if (ret)
+		return ret;
 
 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
 		next = fs_info->dirty_cowonly_roots.next;
@@ -1510,7 +1537,7 @@
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
 
-	put_transaction(cur_trans);
+	btrfs_put_transaction(cur_trans);
 	return 0;
 }
 
@@ -1552,8 +1579,10 @@
 		root->fs_info->running_transaction = NULL;
 	spin_unlock(&root->fs_info->trans_lock);
 
-	put_transaction(cur_trans);
-	put_transaction(cur_trans);
+	if (trans->type & __TRANS_FREEZABLE)
+		sb_end_intwrite(root->fs_info->sb);
+	btrfs_put_transaction(cur_trans);
+	btrfs_put_transaction(cur_trans);
 
 	trace_btrfs_transaction_commit(root);
 
@@ -1571,15 +1600,19 @@
 	int ret;
 
 	ret = btrfs_run_delayed_items(trans, root);
-	if (ret)
-		return ret;
-
 	/*
 	 * running the delayed items may have added new refs. account
 	 * them now so that they hinder processing of more delayed refs
 	 * as little as possible.
 	 */
-	btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+	if (ret) {
+		btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+		return ret;
+	}
+
+	ret = btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+	if (ret)
+		return ret;
 
 	/*
 	 * rename don't use btrfs_join_transaction, so, once we
@@ -1596,14 +1629,14 @@
 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
 {
 	if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
-		return btrfs_start_all_delalloc_inodes(fs_info, 1);
+		return btrfs_start_delalloc_roots(fs_info, 1);
 	return 0;
 }
 
 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
 {
 	if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
-		btrfs_wait_all_ordered_extents(fs_info);
+		btrfs_wait_ordered_roots(fs_info, -1);
 }
 
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
@@ -1669,7 +1702,7 @@
 
 		wait_for_commit(root, cur_trans);
 
-		put_transaction(cur_trans);
+		btrfs_put_transaction(cur_trans);
 
 		return ret;
 	}
@@ -1686,7 +1719,7 @@
 
 			wait_for_commit(root, prev_trans);
 
-			put_transaction(prev_trans);
+			btrfs_put_transaction(prev_trans);
 		} else {
 			spin_unlock(&root->fs_info->trans_lock);
 		}
@@ -1885,8 +1918,8 @@
 	list_del_init(&cur_trans->list);
 	spin_unlock(&root->fs_info->trans_lock);
 
-	put_transaction(cur_trans);
-	put_transaction(cur_trans);
+	btrfs_put_transaction(cur_trans);
+	btrfs_put_transaction(cur_trans);
 
 	if (trans->type & __TRANS_FREEZABLE)
 		sb_end_intwrite(root->fs_info->sb);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 5c2af84..7657d11 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -92,6 +92,7 @@
 	short aborted;
 	short adding_csums;
 	bool allocating_chunk;
+	bool reloc_reserved;
 	unsigned int type;
 	/*
 	 * this root is only needed to validate that the root passed to
@@ -166,4 +167,5 @@
 				struct extent_io_tree *dirty_pages, int mark);
 int btrfs_transaction_blocked(struct btrfs_fs_info *info);
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
+void btrfs_put_transaction(struct btrfs_transaction *transaction);
 #endif
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 94e05c1..76928ca 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -37,7 +37,6 @@
 	int ret = 0;
 	int wret;
 	int level;
-	int is_extent = 0;
 	int next_key_ret = 0;
 	u64 last_ret = 0;
 	u64 min_trans = 0;
@@ -50,7 +49,7 @@
 		goto out;
 	}
 
-	if (root->ref_cows == 0 && !is_extent)
+	if (root->ref_cows == 0)
 		goto out;
 
 	if (btrfs_test_opt(root, SSD))
@@ -85,7 +84,7 @@
 
 	path->keep_locks = 1;
 
-	ret = btrfs_search_forward(root, &key, NULL, path, min_trans);
+	ret = btrfs_search_forward(root, &key, path, min_trans);
 	if (ret < 0)
 		goto out;
 	if (ret > 0) {
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 79f057c..744553c 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -26,7 +26,6 @@
 #include "locking.h"
 #include "print-tree.h"
 #include "backref.h"
-#include "compat.h"
 #include "tree-log.h"
 #include "hash.h"
 
@@ -936,7 +935,7 @@
 					    parent_objectid,
 					    victim_name,
 					    victim_name_len)) {
-				btrfs_inc_nlink(inode);
+				inc_nlink(inode);
 				btrfs_release_path(path);
 
 				ret = btrfs_unlink_inode(trans, root, dir,
@@ -1006,7 +1005,7 @@
 				victim_parent = read_one_inode(root,
 							       parent_objectid);
 				if (victim_parent) {
-					btrfs_inc_nlink(inode);
+					inc_nlink(inode);
 					btrfs_release_path(path);
 
 					ret = btrfs_unlink_inode(trans, root,
@@ -1113,11 +1112,11 @@
 				  struct extent_buffer *eb, int slot,
 				  struct btrfs_key *key)
 {
-	struct inode *dir;
-	struct inode *inode;
+	struct inode *dir = NULL;
+	struct inode *inode = NULL;
 	unsigned long ref_ptr;
 	unsigned long ref_end;
-	char *name;
+	char *name = NULL;
 	int namelen;
 	int ret;
 	int search_done = 0;
@@ -1150,13 +1149,15 @@
 	 * care of the rest
 	 */
 	dir = read_one_inode(root, parent_objectid);
-	if (!dir)
-		return -ENOENT;
+	if (!dir) {
+		ret = -ENOENT;
+		goto out;
+	}
 
 	inode = read_one_inode(root, inode_objectid);
 	if (!inode) {
-		iput(dir);
-		return -EIO;
+		ret = -EIO;
+		goto out;
 	}
 
 	while (ref_ptr < ref_end) {
@@ -1169,14 +1170,16 @@
 			 */
 			if (!dir)
 				dir = read_one_inode(root, parent_objectid);
-			if (!dir)
-				return -ENOENT;
+			if (!dir) {
+				ret = -ENOENT;
+				goto out;
+			}
 		} else {
 			ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
 					     &ref_index);
 		}
 		if (ret)
-			return ret;
+			goto out;
 
 		/* if we already have a perfect match, we're done */
 		if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
@@ -1196,12 +1199,11 @@
 						      parent_objectid,
 						      ref_index, name, namelen,
 						      &search_done);
-				if (ret == 1) {
-					ret = 0;
+				if (ret) {
+					if (ret == 1)
+						ret = 0;
 					goto out;
 				}
-				if (ret)
-					goto out;
 			}
 
 			/* insert our name */
@@ -1215,6 +1217,7 @@
 
 		ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
 		kfree(name);
+		name = NULL;
 		if (log_ref_ver) {
 			iput(dir);
 			dir = NULL;
@@ -1225,6 +1228,7 @@
 	ret = overwrite_item(trans, root, path, eb, slot, key);
 out:
 	btrfs_release_path(path);
+	kfree(name);
 	iput(dir);
 	iput(inode);
 	return ret;
@@ -1307,6 +1311,7 @@
 				break;
 			path->slots[0]--;
 		}
+process_slot:
 		btrfs_item_key_to_cpu(path->nodes[0], &key,
 				      path->slots[0]);
 		if (key.objectid != ino ||
@@ -1327,6 +1332,10 @@
 
 		if (key.offset == 0)
 			break;
+		if (path->slots[0] > 0) {
+			path->slots[0]--;
+			goto process_slot;
+		}
 		key.offset--;
 		btrfs_release_path(path);
 	}
@@ -1480,7 +1489,7 @@
 		if (!inode->i_nlink)
 			set_nlink(inode, 1);
 		else
-			btrfs_inc_nlink(inode);
+			inc_nlink(inode);
 		ret = btrfs_update_inode(trans, root, inode);
 	} else if (ret == -EEXIST) {
 		ret = 0;
@@ -1823,7 +1832,7 @@
 						     dir_key->offset,
 						     name, name_len, 0);
 		}
-		if (IS_ERR_OR_NULL(log_di)) {
+		if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
 			btrfs_dir_item_key_to_cpu(eb, di, &location);
 			btrfs_release_path(path);
 			btrfs_release_path(log_path);
@@ -1841,7 +1850,7 @@
 				goto out;
 			}
 
-			btrfs_inc_nlink(inode);
+			inc_nlink(inode);
 			ret = btrfs_unlink_inode(trans, root, dir, inode,
 						 name, name_len);
 			if (!ret)
@@ -1860,6 +1869,9 @@
 				goto again;
 			ret = 0;
 			goto out;
+		} else if (IS_ERR(log_di)) {
+			kfree(name);
+			return PTR_ERR(log_di);
 		}
 		btrfs_release_path(log_path);
 		kfree(name);
@@ -2118,8 +2130,7 @@
 		WARN_ON(*level >= BTRFS_MAX_LEVEL);
 		cur = path->nodes[*level];
 
-		if (btrfs_header_level(cur) != *level)
-			WARN_ON(1);
+		WARN_ON(btrfs_header_level(cur) != *level);
 
 		if (path->slots[*level] >=
 		    btrfs_header_nritems(cur))
@@ -2151,11 +2162,13 @@
 					return ret;
 				}
 
-				btrfs_tree_lock(next);
-				btrfs_set_lock_blocking(next);
-				clean_tree_block(trans, root, next);
-				btrfs_wait_tree_block_writeback(next);
-				btrfs_tree_unlock(next);
+				if (trans) {
+					btrfs_tree_lock(next);
+					btrfs_set_lock_blocking(next);
+					clean_tree_block(trans, root, next);
+					btrfs_wait_tree_block_writeback(next);
+					btrfs_tree_unlock(next);
+				}
 
 				WARN_ON(root_owner !=
 					BTRFS_TREE_LOG_OBJECTID);
@@ -2227,11 +2240,13 @@
 
 				next = path->nodes[*level];
 
-				btrfs_tree_lock(next);
-				btrfs_set_lock_blocking(next);
-				clean_tree_block(trans, root, next);
-				btrfs_wait_tree_block_writeback(next);
-				btrfs_tree_unlock(next);
+				if (trans) {
+					btrfs_tree_lock(next);
+					btrfs_set_lock_blocking(next);
+					clean_tree_block(trans, root, next);
+					btrfs_wait_tree_block_writeback(next);
+					btrfs_tree_unlock(next);
+				}
 
 				WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
 				ret = btrfs_free_and_pin_reserved_extent(root,
@@ -2301,11 +2316,13 @@
 
 			next = path->nodes[orig_level];
 
-			btrfs_tree_lock(next);
-			btrfs_set_lock_blocking(next);
-			clean_tree_block(trans, log, next);
-			btrfs_wait_tree_block_writeback(next);
-			btrfs_tree_unlock(next);
+			if (trans) {
+				btrfs_tree_lock(next);
+				btrfs_set_lock_blocking(next);
+				clean_tree_block(trans, log, next);
+				btrfs_wait_tree_block_writeback(next);
+				btrfs_tree_unlock(next);
+			}
 
 			WARN_ON(log->root_key.objectid !=
 				BTRFS_TREE_LOG_OBJECTID);
@@ -2571,9 +2588,7 @@
 	 * the running transaction open, so a full commit can't hop
 	 * in and cause problems either.
 	 */
-	btrfs_scrub_pause_super(root);
 	ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
-	btrfs_scrub_continue_super(root);
 	if (ret) {
 		btrfs_abort_transaction(trans, root, ret);
 		goto out_wake_log_root;
@@ -2608,13 +2623,10 @@
 		.process_func = process_one_buffer
 	};
 
-	if (trans) {
-		ret = walk_log_tree(trans, log, &wc);
-
-		/* I don't think this can happen but just in case */
-		if (ret)
-			btrfs_abort_transaction(trans, log, ret);
-	}
+	ret = walk_log_tree(trans, log, &wc);
+	/* I don't think this can happen but just in case */
+	if (ret)
+		btrfs_abort_transaction(trans, log, ret);
 
 	while (1) {
 		ret = find_first_extent_bit(&log->dirty_log_pages,
@@ -2867,7 +2879,6 @@
 			  u64 min_offset, u64 *last_offset_ret)
 {
 	struct btrfs_key min_key;
-	struct btrfs_key max_key;
 	struct btrfs_root *log = root->log_root;
 	struct extent_buffer *src;
 	int err = 0;
@@ -2879,9 +2890,6 @@
 	u64 ino = btrfs_ino(inode);
 
 	log = root->log_root;
-	max_key.objectid = ino;
-	max_key.offset = (u64)-1;
-	max_key.type = key_type;
 
 	min_key.objectid = ino;
 	min_key.type = key_type;
@@ -2889,8 +2897,7 @@
 
 	path->keep_locks = 1;
 
-	ret = btrfs_search_forward(root, &min_key, &max_key,
-				   path, trans->transid);
+	ret = btrfs_search_forward(root, &min_key, path, trans->transid);
 
 	/*
 	 * we didn't find anything from this transaction, see if there
@@ -2943,10 +2950,8 @@
 
 	/* find the first key from this transaction again */
 	ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
-	if (ret != 0) {
-		WARN_ON(1);
+	if (WARN_ON(ret != 0))
 		goto done;
-	}
 
 	/*
 	 * we have a block from this transaction, log every item in it
@@ -3172,11 +3177,10 @@
 			  struct inode *inode)
 {
 	struct btrfs_inode_item *inode_item;
-	struct btrfs_key key;
 	int ret;
 
-	memcpy(&key, &BTRFS_I(inode)->location, sizeof(key));
-	ret = btrfs_insert_empty_item(trans, log, path, &key,
+	ret = btrfs_insert_empty_item(trans, log, path,
+				      &BTRFS_I(inode)->location,
 				      sizeof(*inode_item));
 	if (ret && ret != -EEXIST)
 		return ret;
@@ -3375,7 +3379,7 @@
 		btrfs_set_token_file_extent_type(leaf, fi,
 						 BTRFS_FILE_EXTENT_REG,
 						 &token);
-		if (em->block_start == 0)
+		if (em->block_start == EXTENT_MAP_HOLE)
 			skip_csum = true;
 	}
 
@@ -3417,11 +3421,6 @@
 	if (skip_csum)
 		return 0;
 
-	if (em->compress_type) {
-		csum_offset = 0;
-		csum_len = block_len;
-	}
-
 	/*
 	 * First check and see if our csums are on our outstanding ordered
 	 * extents.
@@ -3505,8 +3504,13 @@
 	if (!mod_len || ret)
 		return ret;
 
-	csum_offset = mod_start - em->start;
-	csum_len = mod_len;
+	if (em->compress_type) {
+		csum_offset = 0;
+		csum_len = block_len;
+	} else {
+		csum_offset = mod_start - em->start;
+		csum_len = mod_len;
+	}
 
 	/* block start is already adjusted for the file extent offset. */
 	ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
@@ -3719,7 +3723,7 @@
 
 	while (1) {
 		ins_nr = 0;
-		ret = btrfs_search_forward(root, &min_key, &max_key,
+		ret = btrfs_search_forward(root, &min_key,
 					   path, trans->transid);
 		if (ret != 0)
 			break;
@@ -3769,14 +3773,14 @@
 		}
 		btrfs_release_path(path);
 
-		if (min_key.offset < (u64)-1)
+		if (min_key.offset < (u64)-1) {
 			min_key.offset++;
-		else if (min_key.type < (u8)-1)
+		} else if (min_key.type < max_key.type) {
 			min_key.type++;
-		else if (min_key.objectid < (u64)-1)
-			min_key.objectid++;
-		else
+			min_key.offset = 0;
+		} else {
 			break;
+		}
 	}
 	if (ins_nr) {
 		ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index dd0dea3..fbda900 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -260,7 +260,6 @@
 {
 	struct btrfs_root *root = fs_info->uuid_root;
 	struct btrfs_key key;
-	struct btrfs_key max_key;
 	struct btrfs_path *path;
 	int ret = 0;
 	struct extent_buffer *leaf;
@@ -277,13 +276,10 @@
 	key.objectid = 0;
 	key.type = 0;
 	key.offset = 0;
-	max_key.objectid = (u64)-1;
-	max_key.type = (u8)-1;
-	max_key.offset = (u64)-1;
 
 again_search_slot:
 	path->keep_locks = 1;
-	ret = btrfs_search_forward(root, &key, &max_key, path, 0);
+	ret = btrfs_search_forward(root, &key, path, 0);
 	if (ret) {
 		if (ret > 0)
 			ret = 0;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 043b215..0db6370 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -28,7 +28,6 @@
 #include <linux/raid/pq.h>
 #include <linux/semaphore.h>
 #include <asm/div64.h>
-#include "compat.h"
 #include "ctree.h"
 #include "extent_map.h"
 #include "disk-io.h"
@@ -666,7 +665,8 @@
 		if (device->bdev)
 			fs_devices->open_devices--;
 
-		if (device->writeable && !device->is_tgtdev_for_dev_replace) {
+		if (device->writeable &&
+		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
 			list_del_init(&device->dev_alloc_list);
 			fs_devices->rw_devices--;
 		}
@@ -2041,6 +2041,7 @@
 	device->in_fs_metadata = 1;
 	device->is_tgtdev_for_dev_replace = 0;
 	device->mode = FMODE_EXCL;
+	device->dev_stats_valid = 1;
 	set_blocksize(device->bdev, 4096);
 
 	if (seeding_dev) {
@@ -2208,6 +2209,7 @@
 	device->in_fs_metadata = 1;
 	device->is_tgtdev_for_dev_replace = 1;
 	device->mode = FMODE_EXCL;
+	device->dev_stats_valid = 1;
 	set_blocksize(device->bdev, 4096);
 	device->fs_devices = fs_info->fs_devices;
 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
@@ -2550,8 +2552,7 @@
 		failed = 0;
 		retried = true;
 		goto again;
-	} else if (failed && retried) {
-		WARN_ON(1);
+	} else if (WARN_ON(failed && retried)) {
 		ret = -ENOSPC;
 	}
 error:
@@ -3423,6 +3424,9 @@
 
 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
 {
+	if (fs_info->sb->s_flags & MS_RDONLY)
+		return -EROFS;
+
 	mutex_lock(&fs_info->balance_mutex);
 	if (!fs_info->balance_ctl) {
 		mutex_unlock(&fs_info->balance_mutex);
@@ -3488,7 +3492,7 @@
 	path->keep_locks = 1;
 
 	while (1) {
-		ret = btrfs_search_forward(root, &key, &max_key, path, 0);
+		ret = btrfs_search_forward(root, &key, path, 0);
 		if (ret) {
 			if (ret > 0)
 				ret = 0;
@@ -4488,6 +4492,7 @@
 		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
 			    "%Lu-%Lu\n", logical, logical+len, em->start,
 			    em->start + em->len);
+		free_extent_map(em);
 		return 1;
 	}
 
@@ -4668,6 +4673,7 @@
 		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
 			   "found %Lu-%Lu\n", logical, em->start,
 			   em->start + em->len);
+		free_extent_map(em);
 		return -EINVAL;
 	}
 
@@ -4895,7 +4901,7 @@
 			num_stripes = map->num_stripes;
 			max_errors = nr_parity_stripes(map);
 
-			raid_map = kmalloc(sizeof(u64) * num_stripes,
+			raid_map = kmalloc_array(num_stripes, sizeof(u64),
 					   GFP_NOFS);
 			if (!raid_map) {
 				ret = -ENOMEM;
@@ -5395,10 +5401,8 @@
 		.bi_rw = bio->bi_rw,
 	};
 
-	if (bio->bi_vcnt == 0) {
-		WARN_ON(1);
+	if (WARN_ON(bio->bi_vcnt == 0))
 		return 1;
-	}
 
 	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
 	if (bio_sectors(bio) > max_sectors)
@@ -5631,10 +5635,8 @@
 	struct btrfs_device *dev;
 	u64 tmp;
 
-	if (!devid && !fs_info) {
-		WARN_ON(1);
+	if (WARN_ON(!devid && !fs_info))
 		return ERR_PTR(-EINVAL);
-	}
 
 	dev = __alloc_device();
 	if (IS_ERR(dev))
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index b72f540..8b3cd14 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -43,9 +43,8 @@
 	/* WRITE_SYNC bios */
 	struct btrfs_pending_bios pending_sync_bios;
 
-	int running_pending;
 	u64 generation;
-
+	int running_pending;
 	int writeable;
 	int in_fs_metadata;
 	int missing;
@@ -53,11 +52,11 @@
 	int is_tgtdev_for_dev_replace;
 
 	spinlock_t io_lock;
+	/* the mode sent to blkdev_get */
+	fmode_t mode;
 
 	struct block_device *bdev;
 
-	/* the mode sent to blkdev_get */
-	fmode_t mode;
 
 	struct rcu_string *name;
 
@@ -78,16 +77,21 @@
 
 	/* optimal io width for this device */
 	u32 io_width;
+	/* type and info about this device */
+	u64 type;
 
 	/* minimal io size for this device */
 	u32 sector_size;
 
-	/* type and info about this device */
-	u64 type;
 
 	/* physical drive uuid (or lvm uuid) */
 	u8 uuid[BTRFS_UUID_SIZE];
 
+	/* for sending down flush barriers */
+	int nobarriers;
+	struct bio *flush_bio;
+	struct completion flush_wait;
+
 	/* per-device scrub information */
 	struct scrub_ctx *scrub_device;
 
@@ -103,10 +107,6 @@
 	struct radix_tree_root reada_zones;
 	struct radix_tree_root reada_extents;
 
-	/* for sending down flush barriers */
-	struct bio *flush_bio;
-	struct completion flush_wait;
-	int nobarriers;
 
 	/* disk I/O failure stats. For detailed description refer to
 	 * enum btrfs_dev_stat_values in ioctl.h */
@@ -132,7 +132,9 @@
 
 	/* all of the devices in the FS, protected by a mutex
 	 * so we can safely walk it to write out the supers without
-	 * worrying about add/remove by the multi-device code
+	 * worrying about add/remove by the multi-device code.
+	 * Scrubbing super can kick off supers writing by holding
+	 * this mutex lock.
 	 */
 	struct mutex device_list_mutex;
 	struct list_head devices;
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 00baf14..57e17fe 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -449,14 +449,14 @@
 		_debug("discard tail %llx", oi_size);
 		newattrs.ia_valid = ATTR_SIZE;
 		newattrs.ia_size = oi_size & PAGE_MASK;
-		ret = notify_change(object->backer, &newattrs);
+		ret = notify_change(object->backer, &newattrs, NULL);
 		if (ret < 0)
 			goto truncate_failed;
 	}
 
 	newattrs.ia_valid = ATTR_SIZE;
 	newattrs.ia_size = ni_size;
-	ret = notify_change(object->backer, &newattrs);
+	ret = notify_change(object->backer, &newattrs, NULL);
 
 truncate_failed:
 	mutex_unlock(&object->backer->d_inode->i_mutex);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index f4a08d7..ca65f39 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -294,7 +294,7 @@
 		if (ret < 0) {
 			cachefiles_io_error(cache, "Unlink security error");
 		} else {
-			ret = vfs_unlink(dir->d_inode, rep);
+			ret = vfs_unlink(dir->d_inode, rep, NULL);
 
 			if (preemptive)
 				cachefiles_mark_object_buried(cache, rep);
@@ -396,7 +396,7 @@
 		cachefiles_io_error(cache, "Rename security error %d", ret);
 	} else {
 		ret = vfs_rename(dir->d_inode, rep,
-				 cache->graveyard->d_inode, grave);
+				 cache->graveyard->d_inode, grave, NULL);
 		if (ret != 0 && ret != -ENOMEM)
 			cachefiles_io_error(cache,
 					    "Rename failed with error %d", ret);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index afc2bb6..f77f770 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -368,6 +368,7 @@
  */
 static int chrdev_open(struct inode *inode, struct file *filp)
 {
+	const struct file_operations *fops;
 	struct cdev *p;
 	struct cdev *new = NULL;
 	int ret = 0;
@@ -400,10 +401,11 @@
 		return ret;
 
 	ret = -ENXIO;
-	filp->f_op = fops_get(p->ops);
-	if (!filp->f_op)
+	fops = fops_get(p->ops);
+	if (!fops)
 		goto out_cdev_put;
 
+	replace_fops(filp, fops);
 	if (filp->f_op->open) {
 		ret = filp->f_op->open(inode, filp);
 		if (ret)
@@ -574,7 +576,8 @@
 void __init chrdev_init(void)
 {
 	cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
-	bdi_init(&directly_mappable_cdev_bdi);
+	if (bdi_init(&directly_mappable_cdev_bdi))
+		panic("Failed to init directly mappable cdev bdi");
 }
 
 
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 37e4a72..9409fa1 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -65,5 +65,6 @@
 	char   *mountdata; /* options received at mount time or via DFS refs */
 	struct backing_dev_info bdi;
 	struct delayed_work prune_tlinks;
+	struct rcu_head rcu;
 };
 #endif				/* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 77fc5e1..849f613 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -862,7 +862,7 @@
 const struct inode_operations cifs_symlink_inode_ops = {
 	.readlink = generic_readlink,
 	.follow_link = cifs_follow_link,
-	.put_link = cifs_put_link,
+	.put_link = kfree_put_link,
 	.permission = cifs_permission,
 	/* BB add the following two eventually */
 	/* revalidate: cifs_revalidate,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 6d0b072..26a754f 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -115,8 +115,6 @@
 
 /* Functions related to symlinks */
 extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
-extern void cifs_put_link(struct dentry *direntry,
-			  struct nameidata *nd, void *);
 extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
 			 int buflen);
 extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 62a5514..8813ff7 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3770,6 +3770,13 @@
 	return rc;
 }
 
+static void delayed_free(struct rcu_head *p)
+{
+	struct cifs_sb_info *sbi = container_of(p, struct cifs_sb_info, rcu);
+	unload_nls(sbi->local_nls);
+	kfree(sbi);
+}
+
 void
 cifs_umount(struct cifs_sb_info *cifs_sb)
 {
@@ -3794,8 +3801,7 @@
 
 	bdi_destroy(&cifs_sb->bdi);
 	kfree(cifs_sb->mountdata);
-	unload_nls(cifs_sb->local_nls);
-	kfree(cifs_sb);
+	call_rcu(&cifs_sb->rcu, delayed_free);
 }
 
 int
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 7e36ceb..cc023471 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -621,10 +621,3 @@
 	free_xid(xid);
 	return rc;
 }
-
-void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
-{
-	char *p = nd_get_link(nd);
-	if (!IS_ERR(p))
-		kfree(p);
-}
diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h
index cc0ea9f..e7550cb 100644
--- a/fs/coda/coda_linux.h
+++ b/fs/coda/coda_linux.h
@@ -40,7 +40,7 @@
 int coda_open(struct inode *i, struct file *f);
 int coda_release(struct inode *i, struct file *f);
 int coda_permission(struct inode *inode, int mask);
-int coda_revalidate_inode(struct dentry *);
+int coda_revalidate_inode(struct inode *);
 int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int coda_setattr(struct dentry *, struct iattr *);
 
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 190effc..5efbb5e 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -387,9 +387,6 @@
 	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
 	host_file = cfi->cfi_container;
 
-	if (!host_file->f_op)
-		return -ENOTDIR;
-
 	if (host_file->f_op->iterate) {
 		struct inode *host_inode = file_inode(host_file);
 		mutex_lock(&host_inode->i_mutex);
@@ -566,13 +563,12 @@
  * cache manager Venus issues a downcall to the kernel when this 
  * happens 
  */
-int coda_revalidate_inode(struct dentry *dentry)
+int coda_revalidate_inode(struct inode *inode)
 {
 	struct coda_vattr attr;
 	int error;
 	int old_mode;
 	ino_t old_ino;
-	struct inode *inode = dentry->d_inode;
 	struct coda_inode_info *cii = ITOC(inode);
 
 	if (!cii->c_flags)
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 380b798..9e83b77 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -36,7 +36,7 @@
 	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
 	host_file = cfi->cfi_container;
 
-	if (!host_file->f_op || !host_file->f_op->read)
+	if (!host_file->f_op->read)
 		return -EINVAL;
 
 	return host_file->f_op->read(host_file, buf, count, ppos);
@@ -75,7 +75,7 @@
 	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
 	host_file = cfi->cfi_container;
 
-	if (!host_file->f_op || !host_file->f_op->write)
+	if (!host_file->f_op->write)
 		return -EINVAL;
 
 	host_inode = file_inode(host_file);
@@ -105,7 +105,7 @@
 	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
 	host_file = cfi->cfi_container;
 
-	if (!host_file->f_op || !host_file->f_op->mmap)
+	if (!host_file->f_op->mmap)
 		return -ENODEV;
 
 	coda_inode = file_inode(coda_file);
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 4dcc0d8..506de34 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -257,7 +257,7 @@
 
 int coda_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 {
-	int err = coda_revalidate_inode(dentry);
+	int err = coda_revalidate_inode(dentry->d_inode);
 	if (!err)
 		generic_fillattr(dentry->d_inode, stat);
 	return err;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 5d19acf..dc52e13 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1583,13 +1583,13 @@
 		/*FALL THROUGH*/
 
 	default:
-		if (f.file->f_op && f.file->f_op->compat_ioctl) {
+		if (f.file->f_op->compat_ioctl) {
 			error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
 			if (error != -ENOIOCTLCMD)
 				goto out_fput;
 		}
 
-		if (!f.file->f_op || !f.file->f_op->unlocked_ioctl)
+		if (!f.file->f_op->unlocked_ioctl)
 			goto do_ioctl;
 		break;
 	}
diff --git a/fs/coredump.c b/fs/coredump.c
index 9bdeca1..62406b6 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -485,7 +485,7 @@
 	return err;
 }
 
-void do_coredump(siginfo_t *siginfo)
+void do_coredump(const siginfo_t *siginfo)
 {
 	struct core_state core_state;
 	struct core_name cn;
@@ -645,7 +645,7 @@
 		 */
 		if (!uid_eq(inode->i_uid, current_fsuid()))
 			goto close_fail;
-		if (!cprm.file->f_op || !cprm.file->f_op->write)
+		if (!cprm.file->f_op->write)
 			goto close_fail;
 		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
 			goto close_fail;
@@ -685,40 +685,55 @@
  * do on a core-file: use only these functions to write out all the
  * necessary info.
  */
-int dump_write(struct file *file, const void *addr, int nr)
+int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
 {
-	return !dump_interrupted() &&
-		access_ok(VERIFY_READ, addr, nr) &&
-		file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-}
-EXPORT_SYMBOL(dump_write);
-
-int dump_seek(struct file *file, loff_t off)
-{
-	int ret = 1;
-
-	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
-		if (dump_interrupted() ||
-		    file->f_op->llseek(file, off, SEEK_CUR) < 0)
+	struct file *file = cprm->file;
+	loff_t pos = file->f_pos;
+	ssize_t n;
+	if (cprm->written + nr > cprm->limit)
+		return 0;
+	while (nr) {
+		if (dump_interrupted())
 			return 0;
-	} else {
-		char *buf = (char *)get_zeroed_page(GFP_KERNEL);
-
-		if (!buf)
+		n = vfs_write(file, addr, nr, &pos);
+		if (n <= 0)
 			return 0;
-		while (off > 0) {
-			unsigned long n = off;
-
-			if (n > PAGE_SIZE)
-				n = PAGE_SIZE;
-			if (!dump_write(file, buf, n)) {
-				ret = 0;
-				break;
-			}
-			off -= n;
-		}
-		free_page((unsigned long)buf);
+		file->f_pos = pos;
+		cprm->written += n;
+		nr -= n;
 	}
-	return ret;
+	return 1;
 }
-EXPORT_SYMBOL(dump_seek);
+EXPORT_SYMBOL(dump_emit);
+
+int dump_skip(struct coredump_params *cprm, size_t nr)
+{
+	static char zeroes[PAGE_SIZE];
+	struct file *file = cprm->file;
+	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+		if (cprm->written + nr > cprm->limit)
+			return 0;
+		if (dump_interrupted() ||
+		    file->f_op->llseek(file, nr, SEEK_CUR) < 0)
+			return 0;
+		cprm->written += nr;
+		return 1;
+	} else {
+		while (nr > PAGE_SIZE) {
+			if (!dump_emit(cprm, zeroes, PAGE_SIZE))
+				return 0;
+			nr -= PAGE_SIZE;
+		}
+		return dump_emit(cprm, zeroes, nr);
+	}
+}
+EXPORT_SYMBOL(dump_skip);
+
+int dump_align(struct coredump_params *cprm, int align)
+{
+	unsigned mod = cprm->written & (align - 1);
+	if (align & (align - 1))
+		return -EINVAL;
+	return mod ? dump_skip(cprm, align - mod) : 0;
+}
+EXPORT_SYMBOL(dump_align);
diff --git a/fs/cramfs/Kconfig b/fs/cramfs/Kconfig
index cd06466..11b29d4 100644
--- a/fs/cramfs/Kconfig
+++ b/fs/cramfs/Kconfig
@@ -1,5 +1,5 @@
 config CRAMFS
-	tristate "Compressed ROM file system support (cramfs)"
+	tristate "Compressed ROM file system support (cramfs) (OBSOLETE)"
 	depends on BLOCK
 	select ZLIB_INFLATE
 	help
@@ -16,4 +16,7 @@
 	  cramfs.  Note that the root file system (the one containing the
 	  directory /) cannot be compiled as a module.
 
+	  This filesystem is obsoleted by SquashFS, which is much better
+	  in terms of performance and features.
+
 	  If unsure, say N.
diff --git a/fs/dcache.c b/fs/dcache.c
index ae6ebb8..0a38ef8 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -343,6 +343,7 @@
 	__releases(dentry->d_inode->i_lock)
 {
 	struct inode *inode = dentry->d_inode;
+	__d_clear_type(dentry);
 	dentry->d_inode = NULL;
 	hlist_del_init(&dentry->d_alias);
 	dentry_rcuwalk_barrier(dentry);
@@ -483,27 +484,6 @@
 	return parent;
 }
 
-/*
- * Unhash a dentry without inserting an RCU walk barrier or checking that
- * dentry->d_lock is locked.  The caller must take care of that, if
- * appropriate.
- */
-static void __d_shrink(struct dentry *dentry)
-{
-	if (!d_unhashed(dentry)) {
-		struct hlist_bl_head *b;
-		if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
-			b = &dentry->d_sb->s_anon;
-		else
-			b = d_hash(dentry->d_parent, dentry->d_name.hash);
-
-		hlist_bl_lock(b);
-		__hlist_bl_del(&dentry->d_hash);
-		dentry->d_hash.pprev = NULL;
-		hlist_bl_unlock(b);
-	}
-}
-
 /**
  * d_drop - drop a dentry
  * @dentry: dentry to drop
@@ -522,7 +502,21 @@
 void __d_drop(struct dentry *dentry)
 {
 	if (!d_unhashed(dentry)) {
-		__d_shrink(dentry);
+		struct hlist_bl_head *b;
+		/*
+		 * Hashed dentries are normally on the dentry hashtable,
+		 * with the exception of those newly allocated by
+		 * d_obtain_alias, which are always IS_ROOT:
+		 */
+		if (unlikely(IS_ROOT(dentry)))
+			b = &dentry->d_sb->s_anon;
+		else
+			b = d_hash(dentry->d_parent, dentry->d_name.hash);
+
+		hlist_bl_lock(b);
+		__hlist_bl_del(&dentry->d_hash);
+		dentry->d_hash.pprev = NULL;
+		hlist_bl_unlock(b);
 		dentry_rcuwalk_barrier(dentry);
 	}
 }
@@ -1076,116 +1070,6 @@
 EXPORT_SYMBOL(shrink_dcache_sb);
 
 /*
- * destroy a single subtree of dentries for unmount
- * - see the comments on shrink_dcache_for_umount() for a description of the
- *   locking
- */
-static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
-{
-	struct dentry *parent;
-
-	BUG_ON(!IS_ROOT(dentry));
-
-	for (;;) {
-		/* descend to the first leaf in the current subtree */
-		while (!list_empty(&dentry->d_subdirs))
-			dentry = list_entry(dentry->d_subdirs.next,
-					    struct dentry, d_u.d_child);
-
-		/* consume the dentries from this leaf up through its parents
-		 * until we find one with children or run out altogether */
-		do {
-			struct inode *inode;
-
-			/*
-			 * inform the fs that this dentry is about to be
-			 * unhashed and destroyed.
-			 */
-			if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
-			    !d_unhashed(dentry))
-				dentry->d_op->d_prune(dentry);
-
-			dentry_lru_del(dentry);
-			__d_shrink(dentry);
-
-			if (dentry->d_lockref.count != 0) {
-				printk(KERN_ERR
-				       "BUG: Dentry %p{i=%lx,n=%s}"
-				       " still in use (%d)"
-				       " [unmount of %s %s]\n",
-				       dentry,
-				       dentry->d_inode ?
-				       dentry->d_inode->i_ino : 0UL,
-				       dentry->d_name.name,
-				       dentry->d_lockref.count,
-				       dentry->d_sb->s_type->name,
-				       dentry->d_sb->s_id);
-				BUG();
-			}
-
-			if (IS_ROOT(dentry)) {
-				parent = NULL;
-				list_del(&dentry->d_u.d_child);
-			} else {
-				parent = dentry->d_parent;
-				parent->d_lockref.count--;
-				list_del(&dentry->d_u.d_child);
-			}
-
-			inode = dentry->d_inode;
-			if (inode) {
-				dentry->d_inode = NULL;
-				hlist_del_init(&dentry->d_alias);
-				if (dentry->d_op && dentry->d_op->d_iput)
-					dentry->d_op->d_iput(dentry, inode);
-				else
-					iput(inode);
-			}
-
-			d_free(dentry);
-
-			/* finished when we fall off the top of the tree,
-			 * otherwise we ascend to the parent and move to the
-			 * next sibling if there is one */
-			if (!parent)
-				return;
-			dentry = parent;
-		} while (list_empty(&dentry->d_subdirs));
-
-		dentry = list_entry(dentry->d_subdirs.next,
-				    struct dentry, d_u.d_child);
-	}
-}
-
-/*
- * destroy the dentries attached to a superblock on unmounting
- * - we don't need to use dentry->d_lock because:
- *   - the superblock is detached from all mountings and open files, so the
- *     dentry trees will not be rearranged by the VFS
- *   - s_umount is write-locked, so the memory pressure shrinker will ignore
- *     any dentries belonging to this superblock that it comes across
- *   - the filesystem itself is no longer permitted to rearrange the dentries
- *     in this superblock
- */
-void shrink_dcache_for_umount(struct super_block *sb)
-{
-	struct dentry *dentry;
-
-	if (down_read_trylock(&sb->s_umount))
-		BUG();
-
-	dentry = sb->s_root;
-	sb->s_root = NULL;
-	dentry->d_lockref.count--;
-	shrink_dcache_for_umount_subtree(dentry);
-
-	while (!hlist_bl_empty(&sb->s_anon)) {
-		dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
-		shrink_dcache_for_umount_subtree(dentry);
-	}
-}
-
-/*
  * This tries to ascend one level of parenthood, but
  * we can race with renaming, so we need to re-check
  * the parenthood after dropping the lock and check
@@ -1478,6 +1362,91 @@
 }
 EXPORT_SYMBOL(shrink_dcache_parent);
 
+static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
+{
+	struct select_data *data = _data;
+	enum d_walk_ret ret = D_WALK_CONTINUE;
+
+	if (dentry->d_lockref.count) {
+		dentry_lru_del(dentry);
+		if (likely(!list_empty(&dentry->d_subdirs)))
+			goto out;
+		if (dentry == data->start && dentry->d_lockref.count == 1)
+			goto out;
+		printk(KERN_ERR
+		       "BUG: Dentry %p{i=%lx,n=%s}"
+		       " still in use (%d)"
+		       " [unmount of %s %s]\n",
+		       dentry,
+		       dentry->d_inode ?
+		       dentry->d_inode->i_ino : 0UL,
+		       dentry->d_name.name,
+		       dentry->d_lockref.count,
+		       dentry->d_sb->s_type->name,
+		       dentry->d_sb->s_id);
+		BUG();
+	} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
+		/*
+		 * We can't use d_lru_shrink_move() because we
+		 * need to get the global LRU lock and do the
+		 * LRU accounting.
+		 */
+		if (dentry->d_flags & DCACHE_LRU_LIST)
+			d_lru_del(dentry);
+		d_shrink_add(dentry, &data->dispose);
+		data->found++;
+		ret = D_WALK_NORETRY;
+	}
+out:
+	if (data->found && need_resched())
+		ret = D_WALK_QUIT;
+	return ret;
+}
+
+/*
+ * destroy the dentries attached to a superblock on unmounting
+ */
+void shrink_dcache_for_umount(struct super_block *sb)
+{
+	struct dentry *dentry;
+
+	if (down_read_trylock(&sb->s_umount))
+		BUG();
+
+	dentry = sb->s_root;
+	sb->s_root = NULL;
+	for (;;) {
+		struct select_data data;
+
+		INIT_LIST_HEAD(&data.dispose);
+		data.start = dentry;
+		data.found = 0;
+
+		d_walk(dentry, &data, umount_collect, NULL);
+		if (!data.found)
+			break;
+
+		shrink_dentry_list(&data.dispose);
+		cond_resched();
+	}
+	d_drop(dentry);
+	dput(dentry);
+
+	while (!hlist_bl_empty(&sb->s_anon)) {
+		struct select_data data;
+		dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
+
+		INIT_LIST_HEAD(&data.dispose);
+		data.start = NULL;
+		data.found = 0;
+
+		d_walk(dentry, &data, umount_collect, NULL);
+		if (data.found)
+			shrink_dentry_list(&data.dispose);
+		cond_resched();
+	}
+}
+
 static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
 {
 	struct select_data *data = _data;
@@ -1638,12 +1607,17 @@
 }
 EXPORT_SYMBOL(d_alloc);
 
+/**
+ * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
+ * @sb: the superblock
+ * @name: qstr of the name
+ *
+ * For a filesystem that just pins its dentries in memory and never
+ * performs lookups at all, return an unhashed IS_ROOT dentry.
+ */
 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
 {
-	struct dentry *dentry = __d_alloc(sb, name);
-	if (dentry)
-		dentry->d_flags |= DCACHE_DISCONNECTED;
-	return dentry;
+	return __d_alloc(sb, name);
 }
 EXPORT_SYMBOL(d_alloc_pseudo);
 
@@ -1685,14 +1659,42 @@
 }
 EXPORT_SYMBOL(d_set_d_op);
 
+static unsigned d_flags_for_inode(struct inode *inode)
+{
+	unsigned add_flags = DCACHE_FILE_TYPE;
+
+	if (!inode)
+		return DCACHE_MISS_TYPE;
+
+	if (S_ISDIR(inode->i_mode)) {
+		add_flags = DCACHE_DIRECTORY_TYPE;
+		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
+			if (unlikely(!inode->i_op->lookup))
+				add_flags = DCACHE_AUTODIR_TYPE;
+			else
+				inode->i_opflags |= IOP_LOOKUP;
+		}
+	} else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
+		if (unlikely(inode->i_op->follow_link))
+			add_flags = DCACHE_SYMLINK_TYPE;
+		else
+			inode->i_opflags |= IOP_NOFOLLOW;
+	}
+
+	if (unlikely(IS_AUTOMOUNT(inode)))
+		add_flags |= DCACHE_NEED_AUTOMOUNT;
+	return add_flags;
+}
+
 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
 {
+	unsigned add_flags = d_flags_for_inode(inode);
+
 	spin_lock(&dentry->d_lock);
-	if (inode) {
-		if (unlikely(IS_AUTOMOUNT(inode)))
-			dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
+	dentry->d_flags &= ~DCACHE_ENTRY_TYPE;
+	dentry->d_flags |= add_flags;
+	if (inode)
 		hlist_add_head(&dentry->d_alias, &inode->i_dentry);
-	}
 	dentry->d_inode = inode;
 	dentry_rcuwalk_barrier(dentry);
 	spin_unlock(&dentry->d_lock);
@@ -1801,6 +1803,33 @@
 
 EXPORT_SYMBOL(d_instantiate_unique);
 
+/**
+ * d_instantiate_no_diralias - instantiate a non-aliased dentry
+ * @entry: dentry to complete
+ * @inode: inode to attach to this dentry
+ *
+ * Fill in inode information in the entry.  If a directory alias is found, then
+ * return an error (and drop inode).  Together with d_materialise_unique() this
+ * guarantees that a directory inode may never have more than one alias.
+ */
+int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
+{
+	BUG_ON(!hlist_unhashed(&entry->d_alias));
+
+	spin_lock(&inode->i_lock);
+	if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
+		spin_unlock(&inode->i_lock);
+		iput(inode);
+		return -EBUSY;
+	}
+	__d_instantiate(entry, inode);
+	spin_unlock(&inode->i_lock);
+	security_d_instantiate(entry, inode);
+
+	return 0;
+}
+EXPORT_SYMBOL(d_instantiate_no_diralias);
+
 struct dentry *d_make_root(struct inode *root_inode)
 {
 	struct dentry *res = NULL;
@@ -1870,6 +1899,7 @@
 	static const struct qstr anonstring = QSTR_INIT("/", 1);
 	struct dentry *tmp;
 	struct dentry *res;
+	unsigned add_flags;
 
 	if (!inode)
 		return ERR_PTR(-ESTALE);
@@ -1895,9 +1925,11 @@
 	}
 
 	/* attach a disconnected dentry */
+	add_flags = d_flags_for_inode(inode) | DCACHE_DISCONNECTED;
+
 	spin_lock(&tmp->d_lock);
 	tmp->d_inode = inode;
-	tmp->d_flags |= DCACHE_DISCONNECTED;
+	tmp->d_flags |= add_flags;
 	hlist_add_head(&tmp->d_alias, &inode->i_dentry);
 	hlist_bl_lock(&tmp->d_sb->s_anon);
 	hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
@@ -2574,7 +2606,7 @@
 	dentry_lock_for_move(dentry, target);
 
 	write_seqcount_begin(&dentry->d_seq);
-	write_seqcount_begin(&target->d_seq);
+	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
 
 	/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
 
@@ -2706,7 +2738,7 @@
 	dentry_lock_for_move(anon, dentry);
 
 	write_seqcount_begin(&dentry->d_seq);
-	write_seqcount_begin(&anon->d_seq);
+	write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
 
 	dparent = dentry->d_parent;
 
@@ -2725,7 +2757,6 @@
 	spin_unlock(&dentry->d_lock);
 
 	/* anon->d_lock still locked, returns locked */
-	anon->d_flags &= ~DCACHE_DISCONNECTED;
 }
 
 /**
@@ -2881,27 +2912,36 @@
 			const struct path *root,
 			char **buffer, int *buflen)
 {
-	struct dentry *dentry = path->dentry;
-	struct vfsmount *vfsmnt = path->mnt;
-	struct mount *mnt = real_mount(vfsmnt);
+	struct dentry *dentry;
+	struct vfsmount *vfsmnt;
+	struct mount *mnt;
 	int error = 0;
-	unsigned seq = 0;
+	unsigned seq, m_seq = 0;
 	char *bptr;
 	int blen;
 
 	rcu_read_lock();
+restart_mnt:
+	read_seqbegin_or_lock(&mount_lock, &m_seq);
+	seq = 0;
+	rcu_read_lock();
 restart:
 	bptr = *buffer;
 	blen = *buflen;
+	error = 0;
+	dentry = path->dentry;
+	vfsmnt = path->mnt;
+	mnt = real_mount(vfsmnt);
 	read_seqbegin_or_lock(&rename_lock, &seq);
 	while (dentry != root->dentry || vfsmnt != root->mnt) {
 		struct dentry * parent;
 
 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+			struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
 			/* Global root? */
-			if (mnt_has_parent(mnt)) {
-				dentry = mnt->mnt_mountpoint;
-				mnt = mnt->mnt_parent;
+			if (mnt != parent) {
+				dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
+				mnt = parent;
 				vfsmnt = &mnt->mnt;
 				continue;
 			}
@@ -2936,6 +2976,14 @@
 	}
 	done_seqretry(&rename_lock, seq);
 
+	if (!(m_seq & 1))
+		rcu_read_unlock();
+	if (need_seqretry(&mount_lock, m_seq)) {
+		m_seq = 1;
+		goto restart_mnt;
+	}
+	done_seqretry(&mount_lock, m_seq);
+
 	if (error >= 0 && bptr == *buffer) {
 		if (--blen < 0)
 			error = -ENAMETOOLONG;
@@ -2971,9 +3019,7 @@
 	int error;
 
 	prepend(&res, &buflen, "\0", 1);
-	br_read_lock(&vfsmount_lock);
 	error = prepend_path(path, root, &res, &buflen);
-	br_read_unlock(&vfsmount_lock);
 
 	if (error < 0)
 		return ERR_PTR(error);
@@ -2990,9 +3036,7 @@
 	int error;
 
 	prepend(&res, &buflen, "\0", 1);
-	br_read_lock(&vfsmount_lock);
 	error = prepend_path(path, &root, &res, &buflen);
-	br_read_unlock(&vfsmount_lock);
 
 	if (error > 1)
 		error = -EINVAL;
@@ -3067,9 +3111,7 @@
 
 	rcu_read_lock();
 	get_fs_root_rcu(current->fs, &root);
-	br_read_lock(&vfsmount_lock);
 	error = path_with_deleted(path, &root, &res, &buflen);
-	br_read_unlock(&vfsmount_lock);
 	rcu_read_unlock();
 
 	if (error < 0)
@@ -3224,7 +3266,6 @@
 	get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
 
 	error = -ENOENT;
-	br_read_lock(&vfsmount_lock);
 	if (!d_unlinked(pwd.dentry)) {
 		unsigned long len;
 		char *cwd = page + PATH_MAX;
@@ -3232,7 +3273,6 @@
 
 		prepend(&cwd, &buflen, "\0", 1);
 		error = prepend_path(&pwd, &root, &cwd, &buflen);
-		br_read_unlock(&vfsmount_lock);
 		rcu_read_unlock();
 
 		if (error < 0)
@@ -3253,7 +3293,6 @@
 				error = -EFAULT;
 		}
 	} else {
-		br_read_unlock(&vfsmount_lock);
 		rcu_read_unlock();
 	}
 
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index c7c83ff..9c0444c 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -566,8 +566,7 @@
 	mutex_lock(&parent->d_inode->i_mutex);
 
 	if (child != dentry) {
-		next = list_entry(child->d_u.d_child.next, struct dentry,
-					d_u.d_child);
+		next = list_next_entry(child, d_u.d_child);
 		goto up;
 	}
 
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 073d30b..a726b9f 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -498,6 +498,7 @@
 {
 	struct pts_fs_info *fsi = DEVPTS_SB(sb);
 
+	ida_destroy(&fsi->allocated_ptys);
 	kfree(fsi);
 	kill_litter_super(sb);
 }
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 88556dc..d5abafd 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -706,9 +706,7 @@
 {
 	struct dlm_lkb *lkb = p;
 
-	if (!lkb->lkb_nodeid)
-		return 1;
-	return 0;
+	return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
 }
 
 static int lkb_idr_is_any(int id, void *p, void *data)
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 000eae2..2f6735d 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -392,7 +392,7 @@
 
 		wait_for_completion(&ecr->completion);
 		rc = ecr->rc;
-		INIT_COMPLETION(ecr->completion);
+		reinit_completion(&ecr->completion);
 	}
 out:
 	ablkcipher_request_free(req);
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index bf12ba5..4000f6b 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -44,15 +44,15 @@
  */
 static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
-	struct dentry *lower_dentry;
-	int rc = 1;
+	struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+	int rc;
+
+	if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
+		return 1;
 
 	if (flags & LOOKUP_RCU)
 		return -ECHILD;
 
-	lower_dentry = ecryptfs_dentry_to_lower(dentry);
-	if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
-		goto out;
 	rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
 	if (dentry->d_inode) {
 		struct inode *lower_inode =
@@ -60,12 +60,17 @@
 
 		fsstack_copy_attr_all(dentry->d_inode, lower_inode);
 	}
-out:
 	return rc;
 }
 
 struct kmem_cache *ecryptfs_dentry_info_cache;
 
+static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
+{
+	kmem_cache_free(ecryptfs_dentry_info_cache,
+		container_of(head, struct ecryptfs_dentry_info, rcu));
+}
+
 /**
  * ecryptfs_d_release
  * @dentry: The ecryptfs dentry
@@ -74,15 +79,11 @@
  */
 static void ecryptfs_d_release(struct dentry *dentry)
 {
-	if (ecryptfs_dentry_to_private(dentry)) {
-		if (ecryptfs_dentry_to_lower(dentry)) {
-			dput(ecryptfs_dentry_to_lower(dentry));
-			mntput(ecryptfs_dentry_to_lower_mnt(dentry));
-		}
-		kmem_cache_free(ecryptfs_dentry_info_cache,
-				ecryptfs_dentry_to_private(dentry));
+	struct ecryptfs_dentry_info *p = dentry->d_fsdata;
+	if (p) {
+		path_put(&p->lower_path);
+		call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
 	}
-	return;
 }
 
 const struct dentry_operations ecryptfs_dops = {
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index df19d34..90d1882 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -261,7 +261,10 @@
  * vfsmount too. */
 struct ecryptfs_dentry_info {
 	struct path lower_path;
-	struct ecryptfs_crypt_stat *crypt_stat;
+	union {
+		struct ecryptfs_crypt_stat *crypt_stat;
+		struct rcu_head rcu;
+	};
 };
 
 /**
@@ -512,13 +515,6 @@
 	return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry;
 }
 
-static inline void
-ecryptfs_set_dentry_lower(struct dentry *dentry, struct dentry *lower_dentry)
-{
-	((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry =
-		lower_dentry;
-}
-
 static inline struct vfsmount *
 ecryptfs_dentry_to_lower_mnt(struct dentry *dentry)
 {
@@ -531,13 +527,6 @@
 	return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
 }
 
-static inline void
-ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt)
-{
-	((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.mnt =
-		lower_mnt;
-}
-
 #define ecryptfs_printk(type, fmt, arg...) \
         __ecryptfs_printk(type "%s: " fmt, __func__, ## arg);
 __printf(1, 2)
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 992cf95..2229a74 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -271,7 +271,7 @@
 {
 	struct file *lower_file = ecryptfs_file_to_lower(file);
 
-	if (lower_file->f_op && lower_file->f_op->flush) {
+	if (lower_file->f_op->flush) {
 		filemap_write_and_wait(file->f_mapping);
 		return lower_file->f_op->flush(lower_file, td);
 	}
@@ -305,7 +305,7 @@
 	struct file *lower_file = NULL;
 
 	lower_file = ecryptfs_file_to_lower(file);
-	if (lower_file->f_op && lower_file->f_op->fasync)
+	if (lower_file->f_op->fasync)
 		rc = lower_file->f_op->fasync(fd, lower_file, flag);
 	return rc;
 }
@@ -318,7 +318,7 @@
 
 	if (ecryptfs_file_to_private(file))
 		lower_file = ecryptfs_file_to_lower(file);
-	if (lower_file && lower_file->f_op && lower_file->f_op->unlocked_ioctl)
+	if (lower_file->f_op->unlocked_ioctl)
 		rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
 	return rc;
 }
@@ -332,7 +332,7 @@
 
 	if (ecryptfs_file_to_private(file))
 		lower_file = ecryptfs_file_to_lower(file);
-	if (lower_file && lower_file->f_op && lower_file->f_op->compat_ioctl)
+	if (lower_file->f_op && lower_file->f_op->compat_ioctl)
 		rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
 	return rc;
 }
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 67e9b63..c36c448 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -153,7 +153,7 @@
 
 	dget(lower_dentry);
 	lower_dir_dentry = lock_parent(lower_dentry);
-	rc = vfs_unlink(lower_dir_inode, lower_dentry);
+	rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
 	if (rc) {
 		printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
 		goto out_unlock;
@@ -208,7 +208,7 @@
 	inode = __ecryptfs_get_inode(lower_dentry->d_inode,
 				     directory_inode->i_sb);
 	if (IS_ERR(inode)) {
-		vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
+		vfs_unlink(lower_dir_dentry->d_inode, lower_dentry, NULL);
 		goto out_lock;
 	}
 	fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
@@ -361,8 +361,8 @@
 	BUG_ON(!d_count(lower_dentry));
 
 	ecryptfs_set_dentry_private(dentry, dentry_info);
-	ecryptfs_set_dentry_lower(dentry, lower_dentry);
-	ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
+	dentry_info->lower_path.mnt = lower_mnt;
+	dentry_info->lower_path.dentry = lower_dentry;
 
 	if (!lower_dentry->d_inode) {
 		/* We want to add because we couldn't find in lower */
@@ -475,7 +475,7 @@
 	dget(lower_new_dentry);
 	lower_dir_dentry = lock_parent(lower_new_dentry);
 	rc = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode,
-		      lower_new_dentry);
+		      lower_new_dentry, NULL);
 	if (rc || !lower_new_dentry->d_inode)
 		goto out_lock;
 	rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb);
@@ -640,7 +640,8 @@
 		goto out_lock;
 	}
 	rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
-			lower_new_dir_dentry->d_inode, lower_new_dentry);
+			lower_new_dir_dentry->d_inode, lower_new_dentry,
+			NULL);
 	if (rc)
 		goto out_lock;
 	if (target_inode)
@@ -703,16 +704,6 @@
 	return NULL;
 }
 
-static void
-ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
-{
-	char *buf = nd_get_link(nd);
-	if (!IS_ERR(buf)) {
-		/* Free the char* */
-		kfree(buf);
-	}
-}
-
 /**
  * upper_size_to_lower_size
  * @crypt_stat: Crypt_stat associated with file
@@ -891,7 +882,7 @@
 		struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
 
 		mutex_lock(&lower_dentry->d_inode->i_mutex);
-		rc = notify_change(lower_dentry, &lower_ia);
+		rc = notify_change(lower_dentry, &lower_ia, NULL);
 		mutex_unlock(&lower_dentry->d_inode->i_mutex);
 	}
 	return rc;
@@ -992,7 +983,7 @@
 		lower_ia.ia_valid &= ~ATTR_MODE;
 
 	mutex_lock(&lower_dentry->d_inode->i_mutex);
-	rc = notify_change(lower_dentry, &lower_ia);
+	rc = notify_change(lower_dentry, &lower_ia, NULL);
 	mutex_unlock(&lower_dentry->d_inode->i_mutex);
 out:
 	fsstack_copy_attr_all(inode, lower_inode);
@@ -1121,7 +1112,7 @@
 const struct inode_operations ecryptfs_symlink_iops = {
 	.readlink = generic_readlink,
 	.follow_link = ecryptfs_follow_link,
-	.put_link = ecryptfs_put_link,
+	.put_link = kfree_put_link,
 	.permission = ecryptfs_permission,
 	.setattr = ecryptfs_setattr,
 	.getattr = ecryptfs_getattr_link,
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index eb1c597..1b119d3 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -585,8 +585,7 @@
 
 	/* ->kill_sb() will take care of root_info */
 	ecryptfs_set_dentry_private(s->s_root, root_info);
-	ecryptfs_set_dentry_lower(s->s_root, path.dentry);
-	ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
+	root_info->lower_path = path;
 
 	s->s_flags |= MS_ACTIVE;
 	return dget(s->s_root);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 810c28f..79b65c3 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -41,6 +41,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/compat.h>
+#include <linux/rculist.h>
 
 /*
  * LOCKING:
@@ -133,8 +134,12 @@
  * of these on a server and we do not want this to take another cache line.
  */
 struct epitem {
-	/* RB tree node used to link this structure to the eventpoll RB tree */
-	struct rb_node rbn;
+	union {
+		/* RB tree node links this structure to the eventpoll RB tree */
+		struct rb_node rbn;
+		/* Used to free the struct epitem */
+		struct rcu_head rcu;
+	};
 
 	/* List header used to link this structure to the eventpoll ready list */
 	struct list_head rdllink;
@@ -580,14 +585,14 @@
  * @sproc: Pointer to the scan callback.
  * @priv: Private opaque data passed to the @sproc callback.
  * @depth: The current depth of recursive f_op->poll calls.
+ * @ep_locked: caller already holds ep->mtx
  *
  * Returns: The same integer error code returned by the @sproc callback.
  */
 static int ep_scan_ready_list(struct eventpoll *ep,
 			      int (*sproc)(struct eventpoll *,
 					   struct list_head *, void *),
-			      void *priv,
-			      int depth)
+			      void *priv, int depth, bool ep_locked)
 {
 	int error, pwake = 0;
 	unsigned long flags;
@@ -598,7 +603,9 @@
 	 * We need to lock this because we could be hit by
 	 * eventpoll_release_file() and epoll_ctl().
 	 */
-	mutex_lock_nested(&ep->mtx, depth);
+
+	if (!ep_locked)
+		mutex_lock_nested(&ep->mtx, depth);
 
 	/*
 	 * Steal the ready list, and re-init the original one to the
@@ -662,7 +669,8 @@
 	}
 	spin_unlock_irqrestore(&ep->lock, flags);
 
-	mutex_unlock(&ep->mtx);
+	if (!ep_locked)
+		mutex_unlock(&ep->mtx);
 
 	/* We have to call this outside the lock */
 	if (pwake)
@@ -671,6 +679,12 @@
 	return error;
 }
 
+static void epi_rcu_free(struct rcu_head *head)
+{
+	struct epitem *epi = container_of(head, struct epitem, rcu);
+	kmem_cache_free(epi_cache, epi);
+}
+
 /*
  * Removes a "struct epitem" from the eventpoll RB tree and deallocates
  * all the associated resources. Must be called with "mtx" held.
@@ -692,8 +706,7 @@
 
 	/* Remove the current item from the list of epoll hooks */
 	spin_lock(&file->f_lock);
-	if (ep_is_linked(&epi->fllink))
-		list_del_init(&epi->fllink);
+	list_del_rcu(&epi->fllink);
 	spin_unlock(&file->f_lock);
 
 	rb_erase(&epi->rbn, &ep->rbr);
@@ -704,9 +717,14 @@
 	spin_unlock_irqrestore(&ep->lock, flags);
 
 	wakeup_source_unregister(ep_wakeup_source(epi));
-
-	/* At this point it is safe to free the eventpoll item */
-	kmem_cache_free(epi_cache, epi);
+	/*
+	 * At this point it is safe to free the eventpoll item. Use the union
+	 * field epi->rcu, since we are trying to minimize the size of
+	 * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
+	 * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
+	 * use of the rbn field.
+	 */
+	call_rcu(&epi->rcu, epi_rcu_free);
 
 	atomic_long_dec(&ep->user->epoll_watches);
 
@@ -807,15 +825,34 @@
 	return 0;
 }
 
+static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
+				 poll_table *pt);
+
+struct readyevents_arg {
+	struct eventpoll *ep;
+	bool locked;
+};
+
 static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
 {
-	return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
+	struct readyevents_arg *arg = priv;
+
+	return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL,
+				  call_nests + 1, arg->locked);
 }
 
 static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
 {
 	int pollflags;
 	struct eventpoll *ep = file->private_data;
+	struct readyevents_arg arg;
+
+	/*
+	 * During ep_insert() we already hold the ep->mtx for the tfile.
+	 * Prevent re-aquisition.
+	 */
+	arg.locked = wait && (wait->_qproc == ep_ptable_queue_proc);
+	arg.ep = ep;
 
 	/* Insert inside our poll wait queue */
 	poll_wait(file, &ep->poll_wait, wait);
@@ -827,7 +864,7 @@
 	 * could re-enter here.
 	 */
 	pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
-				   ep_poll_readyevents_proc, ep, ep, current);
+				   ep_poll_readyevents_proc, &arg, ep, current);
 
 	return pollflags != -1 ? pollflags : 0;
 }
@@ -872,7 +909,6 @@
  */
 void eventpoll_release_file(struct file *file)
 {
-	struct list_head *lsthead = &file->f_ep_links;
 	struct eventpoll *ep;
 	struct epitem *epi;
 
@@ -890,17 +926,12 @@
 	 * Besides, ep_remove() acquires the lock, so we can't hold it here.
 	 */
 	mutex_lock(&epmutex);
-
-	while (!list_empty(lsthead)) {
-		epi = list_first_entry(lsthead, struct epitem, fllink);
-
+	list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
 		ep = epi->ep;
-		list_del_init(&epi->fllink);
 		mutex_lock_nested(&ep->mtx, 0);
 		ep_remove(ep, epi);
 		mutex_unlock(&ep->mtx);
 	}
-
 	mutex_unlock(&epmutex);
 }
 
@@ -1138,7 +1169,9 @@
 	struct file *child_file;
 	struct epitem *epi;
 
-	list_for_each_entry(epi, &file->f_ep_links, fllink) {
+	/* CTL_DEL can remove links here, but that can't increase our count */
+	rcu_read_lock();
+	list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
 		child_file = epi->ep->file;
 		if (is_file_epoll(child_file)) {
 			if (list_empty(&child_file->f_ep_links)) {
@@ -1160,6 +1193,7 @@
 				"file is not an ep!\n");
 		}
 	}
+	rcu_read_unlock();
 	return error;
 }
 
@@ -1231,7 +1265,7 @@
  * Must be called with "mtx" held.
  */
 static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
-		     struct file *tfile, int fd)
+		     struct file *tfile, int fd, int full_check)
 {
 	int error, revents, pwake = 0;
 	unsigned long flags;
@@ -1286,7 +1320,7 @@
 
 	/* Add the current item to the list of active epoll hook for this file */
 	spin_lock(&tfile->f_lock);
-	list_add_tail(&epi->fllink, &tfile->f_ep_links);
+	list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
 	spin_unlock(&tfile->f_lock);
 
 	/*
@@ -1297,7 +1331,7 @@
 
 	/* now check if we've created too many backpaths */
 	error = -EINVAL;
-	if (reverse_path_check())
+	if (full_check && reverse_path_check())
 		goto error_remove_epi;
 
 	/* We have to drop the new item inside our item list to keep track of it */
@@ -1327,8 +1361,7 @@
 
 error_remove_epi:
 	spin_lock(&tfile->f_lock);
-	if (ep_is_linked(&epi->fllink))
-		list_del_init(&epi->fllink);
+	list_del_rcu(&epi->fllink);
 	spin_unlock(&tfile->f_lock);
 
 	rb_erase(&epi->rbn, &ep->rbr);
@@ -1521,7 +1554,7 @@
 	esed.maxevents = maxevents;
 	esed.events = events;
 
-	return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
+	return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
 }
 
 static inline struct timespec ep_set_mstimeout(long ms)
@@ -1791,11 +1824,12 @@
 		struct epoll_event __user *, event)
 {
 	int error;
-	int did_lock_epmutex = 0;
+	int full_check = 0;
 	struct fd f, tf;
 	struct eventpoll *ep;
 	struct epitem *epi;
 	struct epoll_event epds;
+	struct eventpoll *tep = NULL;
 
 	error = -EFAULT;
 	if (ep_op_has_event(op) &&
@@ -1814,7 +1848,7 @@
 
 	/* The target file descriptor must support poll */
 	error = -EPERM;
-	if (!tf.file->f_op || !tf.file->f_op->poll)
+	if (!tf.file->f_op->poll)
 		goto error_tgt_fput;
 
 	/* Check if EPOLLWAKEUP is allowed */
@@ -1844,26 +1878,40 @@
 	 * and hang them on the tfile_check_list, so we can check that we
 	 * haven't created too many possible wakeup paths.
 	 *
-	 * We need to hold the epmutex across both ep_insert and ep_remove
-	 * b/c we want to make sure we are looking at a coherent view of
-	 * epoll network.
+	 * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
+	 * the epoll file descriptor is attaching directly to a wakeup source,
+	 * unless the epoll file descriptor is nested. The purpose of taking the
+	 * 'epmutex' on add is to prevent complex toplogies such as loops and
+	 * deep wakeup paths from forming in parallel through multiple
+	 * EPOLL_CTL_ADD operations.
 	 */
-	if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
-		mutex_lock(&epmutex);
-		did_lock_epmutex = 1;
-	}
-	if (op == EPOLL_CTL_ADD) {
-		if (is_file_epoll(tf.file)) {
-			error = -ELOOP;
-			if (ep_loop_check(ep, tf.file) != 0) {
-				clear_tfile_check_list();
-				goto error_tgt_fput;
-			}
-		} else
-			list_add(&tf.file->f_tfile_llink, &tfile_check_list);
-	}
-
 	mutex_lock_nested(&ep->mtx, 0);
+	if (op == EPOLL_CTL_ADD) {
+		if (!list_empty(&f.file->f_ep_links) ||
+						is_file_epoll(tf.file)) {
+			full_check = 1;
+			mutex_unlock(&ep->mtx);
+			mutex_lock(&epmutex);
+			if (is_file_epoll(tf.file)) {
+				error = -ELOOP;
+				if (ep_loop_check(ep, tf.file) != 0) {
+					clear_tfile_check_list();
+					goto error_tgt_fput;
+				}
+			} else
+				list_add(&tf.file->f_tfile_llink,
+							&tfile_check_list);
+			mutex_lock_nested(&ep->mtx, 0);
+			if (is_file_epoll(tf.file)) {
+				tep = tf.file->private_data;
+				mutex_lock_nested(&tep->mtx, 1);
+			}
+		}
+	}
+	if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) {
+		tep = tf.file->private_data;
+		mutex_lock_nested(&tep->mtx, 1);
+	}
 
 	/*
 	 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
@@ -1877,10 +1925,11 @@
 	case EPOLL_CTL_ADD:
 		if (!epi) {
 			epds.events |= POLLERR | POLLHUP;
-			error = ep_insert(ep, &epds, tf.file, fd);
+			error = ep_insert(ep, &epds, tf.file, fd, full_check);
 		} else
 			error = -EEXIST;
-		clear_tfile_check_list();
+		if (full_check)
+			clear_tfile_check_list();
 		break;
 	case EPOLL_CTL_DEL:
 		if (epi)
@@ -1896,10 +1945,12 @@
 			error = -ENOENT;
 		break;
 	}
+	if (tep != NULL)
+		mutex_unlock(&tep->mtx);
 	mutex_unlock(&ep->mtx);
 
 error_tgt_fput:
-	if (did_lock_epmutex)
+	if (full_check)
 		mutex_unlock(&epmutex);
 
 	fdput(tf);
diff --git a/fs/exec.c b/fs/exec.c
index 2ea437e..977319f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -106,6 +106,7 @@
  */
 SYSCALL_DEFINE1(uselib, const char __user *, library)
 {
+	struct linux_binfmt *fmt;
 	struct file *file;
 	struct filename *tmp = getname(library);
 	int error = PTR_ERR(tmp);
@@ -136,24 +137,21 @@
 	fsnotify_open(file);
 
 	error = -ENOEXEC;
-	if(file->f_op) {
-		struct linux_binfmt * fmt;
 
-		read_lock(&binfmt_lock);
-		list_for_each_entry(fmt, &formats, lh) {
-			if (!fmt->load_shlib)
-				continue;
-			if (!try_module_get(fmt->module))
-				continue;
-			read_unlock(&binfmt_lock);
-			error = fmt->load_shlib(file);
-			read_lock(&binfmt_lock);
-			put_binfmt(fmt);
-			if (error != -ENOEXEC)
-				break;
-		}
+	read_lock(&binfmt_lock);
+	list_for_each_entry(fmt, &formats, lh) {
+		if (!fmt->load_shlib)
+			continue;
+		if (!try_module_get(fmt->module))
+			continue;
 		read_unlock(&binfmt_lock);
+		error = fmt->load_shlib(file);
+		read_lock(&binfmt_lock);
+		put_binfmt(fmt);
+		if (error != -ENOEXEC)
+			break;
 	}
+	read_unlock(&binfmt_lock);
 exit:
 	fput(file);
 out:
@@ -1277,13 +1275,10 @@
  */
 int prepare_binprm(struct linux_binprm *bprm)
 {
-	umode_t mode;
-	struct inode * inode = file_inode(bprm->file);
+	struct inode *inode = file_inode(bprm->file);
+	umode_t mode = inode->i_mode;
 	int retval;
 
-	mode = inode->i_mode;
-	if (bprm->file->f_op == NULL)
-		return -EACCES;
 
 	/* clear any previous set[ug]id data from a previous binary */
 	bprm->cred->euid = current_euid();
@@ -1669,6 +1664,12 @@
 	return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
 }
 
+/*
+ * This returns the actual value of the suid_dumpable flag. For things
+ * that are using this for checking for privilege transitions, it must
+ * test against SUID_DUMP_USER rather than treating it as a boolean
+ * value.
+ */
 int get_dumpable(struct mm_struct *mm)
 {
 	return __get_dumpable(mm->flags);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index a235f00..48a359d 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -69,145 +69,162 @@
 	return NULL;
 }
 
-/*
- * Find root of a disconnected subtree and return a reference to it.
- */
-static struct dentry *
-find_disconnected_root(struct dentry *dentry)
+static bool dentry_connected(struct dentry *dentry)
 {
 	dget(dentry);
-	while (!IS_ROOT(dentry)) {
+	while (dentry->d_flags & DCACHE_DISCONNECTED) {
 		struct dentry *parent = dget_parent(dentry);
 
-		if (!(parent->d_flags & DCACHE_DISCONNECTED)) {
+		dput(dentry);
+		if (IS_ROOT(dentry)) {
 			dput(parent);
-			break;
+			return false;
 		}
+		dentry = parent;
+	}
+	dput(dentry);
+	return true;
+}
+
+static void clear_disconnected(struct dentry *dentry)
+{
+	dget(dentry);
+	while (dentry->d_flags & DCACHE_DISCONNECTED) {
+		struct dentry *parent = dget_parent(dentry);
+
+		WARN_ON_ONCE(IS_ROOT(dentry));
+
+		spin_lock(&dentry->d_lock);
+		dentry->d_flags &= ~DCACHE_DISCONNECTED;
+		spin_unlock(&dentry->d_lock);
 
 		dput(dentry);
 		dentry = parent;
 	}
-	return dentry;
+	dput(dentry);
+}
+
+/*
+ * Reconnect a directory dentry with its parent.
+ *
+ * This can return a dentry, or NULL, or an error.
+ *
+ * In the first case the returned dentry is the parent of the given
+ * dentry, and may itself need to be reconnected to its parent.
+ *
+ * In the NULL case, a concurrent VFS operation has either renamed or
+ * removed this directory.  The concurrent operation has reconnected our
+ * dentry, so we no longer need to.
+ */
+static struct dentry *reconnect_one(struct vfsmount *mnt,
+		struct dentry *dentry, char *nbuf)
+{
+	struct dentry *parent;
+	struct dentry *tmp;
+	int err;
+
+	parent = ERR_PTR(-EACCES);
+	mutex_lock(&dentry->d_inode->i_mutex);
+	if (mnt->mnt_sb->s_export_op->get_parent)
+		parent = mnt->mnt_sb->s_export_op->get_parent(dentry);
+	mutex_unlock(&dentry->d_inode->i_mutex);
+
+	if (IS_ERR(parent)) {
+		dprintk("%s: get_parent of %ld failed, err %d\n",
+			__func__, dentry->d_inode->i_ino, PTR_ERR(parent));
+		return parent;
+	}
+
+	dprintk("%s: find name of %lu in %lu\n", __func__,
+		dentry->d_inode->i_ino, parent->d_inode->i_ino);
+	err = exportfs_get_name(mnt, parent, nbuf, dentry);
+	if (err == -ENOENT)
+		goto out_reconnected;
+	if (err)
+		goto out_err;
+	dprintk("%s: found name: %s\n", __func__, nbuf);
+	mutex_lock(&parent->d_inode->i_mutex);
+	tmp = lookup_one_len(nbuf, parent, strlen(nbuf));
+	mutex_unlock(&parent->d_inode->i_mutex);
+	if (IS_ERR(tmp)) {
+		dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
+		goto out_err;
+	}
+	if (tmp != dentry) {
+		dput(tmp);
+		goto out_reconnected;
+	}
+	dput(tmp);
+	if (IS_ROOT(dentry)) {
+		err = -ESTALE;
+		goto out_err;
+	}
+	return parent;
+
+out_err:
+	dput(parent);
+	return ERR_PTR(err);
+out_reconnected:
+	dput(parent);
+	/*
+	 * Someone must have renamed our entry into another parent, in
+	 * which case it has been reconnected by the rename.
+	 *
+	 * Or someone removed it entirely, in which case filehandle
+	 * lookup will succeed but the directory is now IS_DEAD and
+	 * subsequent operations on it will fail.
+	 *
+	 * Alternatively, maybe there was no race at all, and the
+	 * filesystem is just corrupt and gave us a parent that doesn't
+	 * actually contain any entry pointing to this inode.  So,
+	 * double check that this worked and return -ESTALE if not:
+	 */
+	if (!dentry_connected(dentry))
+		return ERR_PTR(-ESTALE);
+	return NULL;
 }
 
 /*
  * Make sure target_dir is fully connected to the dentry tree.
  *
- * It may already be, as the flag isn't always updated when connection happens.
+ * On successful return, DCACHE_DISCONNECTED will be cleared on
+ * target_dir, and target_dir->d_parent->...->d_parent will reach the
+ * root of the filesystem.
+ *
+ * Whenever DCACHE_DISCONNECTED is unset, target_dir is fully connected.
+ * But the converse is not true: target_dir may have DCACHE_DISCONNECTED
+ * set but already be connected.  In that case we'll verify the
+ * connection to root and then clear the flag.
+ *
+ * Note that target_dir could be removed by a concurrent operation.  In
+ * that case reconnect_path may still succeed with target_dir fully
+ * connected, but further operations using the filehandle will fail when
+ * necessary (due to S_DEAD being set on the directory).
  */
 static int
 reconnect_path(struct vfsmount *mnt, struct dentry *target_dir, char *nbuf)
 {
-	int noprogress = 0;
-	int err = -ESTALE;
+	struct dentry *dentry, *parent;
 
-	/*
-	 * It is possible that a confused file system might not let us complete
-	 * the path to the root.  For example, if get_parent returns a directory
-	 * in which we cannot find a name for the child.  While this implies a
-	 * very sick filesystem we don't want it to cause knfsd to spin.  Hence
-	 * the noprogress counter.  If we go through the loop 10 times (2 is
-	 * probably enough) without getting anywhere, we just give up
-	 */
-	while (target_dir->d_flags & DCACHE_DISCONNECTED && noprogress++ < 10) {
-		struct dentry *pd = find_disconnected_root(target_dir);
+	dentry = dget(target_dir);
 
-		if (!IS_ROOT(pd)) {
-			/* must have found a connected parent - great */
-			spin_lock(&pd->d_lock);
-			pd->d_flags &= ~DCACHE_DISCONNECTED;
-			spin_unlock(&pd->d_lock);
-			noprogress = 0;
-		} else if (pd == mnt->mnt_sb->s_root) {
-			printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n");
-			spin_lock(&pd->d_lock);
-			pd->d_flags &= ~DCACHE_DISCONNECTED;
-			spin_unlock(&pd->d_lock);
-			noprogress = 0;
-		} else {
-			/*
-			 * We have hit the top of a disconnected path, try to
-			 * find parent and connect.
-			 *
-			 * Racing with some other process renaming a directory
-			 * isn't much of a problem here.  If someone renames
-			 * the directory, it will end up properly connected,
-			 * which is what we want
-			 *
-			 * Getting the parent can't be supported generically,
-			 * the locking is too icky.
-			 *
-			 * Instead we just return EACCES.  If server reboots
-			 * or inodes get flushed, you lose
-			 */
-			struct dentry *ppd = ERR_PTR(-EACCES);
-			struct dentry *npd;
+	while (dentry->d_flags & DCACHE_DISCONNECTED) {
+		BUG_ON(dentry == mnt->mnt_sb->s_root);
 
-			mutex_lock(&pd->d_inode->i_mutex);
-			if (mnt->mnt_sb->s_export_op->get_parent)
-				ppd = mnt->mnt_sb->s_export_op->get_parent(pd);
-			mutex_unlock(&pd->d_inode->i_mutex);
+		if (IS_ROOT(dentry))
+			parent = reconnect_one(mnt, dentry, nbuf);
+		else
+			parent = dget_parent(dentry);
 
-			if (IS_ERR(ppd)) {
-				err = PTR_ERR(ppd);
-				dprintk("%s: get_parent of %ld failed, err %d\n",
-					__func__, pd->d_inode->i_ino, err);
-				dput(pd);
-				break;
-			}
-
-			dprintk("%s: find name of %lu in %lu\n", __func__,
-				pd->d_inode->i_ino, ppd->d_inode->i_ino);
-			err = exportfs_get_name(mnt, ppd, nbuf, pd);
-			if (err) {
-				dput(ppd);
-				dput(pd);
-				if (err == -ENOENT)
-					/* some race between get_parent and
-					 * get_name?  just try again
-					 */
-					continue;
-				break;
-			}
-			dprintk("%s: found name: %s\n", __func__, nbuf);
-			mutex_lock(&ppd->d_inode->i_mutex);
-			npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
-			mutex_unlock(&ppd->d_inode->i_mutex);
-			if (IS_ERR(npd)) {
-				err = PTR_ERR(npd);
-				dprintk("%s: lookup failed: %d\n",
-					__func__, err);
-				dput(ppd);
-				dput(pd);
-				break;
-			}
-			/* we didn't really want npd, we really wanted
-			 * a side-effect of the lookup.
-			 * hopefully, npd == pd, though it isn't really
-			 * a problem if it isn't
-			 */
-			if (npd == pd)
-				noprogress = 0;
-			else
-				printk("%s: npd != pd\n", __func__);
-			dput(npd);
-			dput(ppd);
-			if (IS_ROOT(pd)) {
-				/* something went wrong, we have to give up */
-				dput(pd);
-				break;
-			}
-		}
-		dput(pd);
+		if (!parent)
+			break;
+		dput(dentry);
+		if (IS_ERR(parent))
+			return PTR_ERR(parent);
+		dentry = parent;
 	}
-
-	if (target_dir->d_flags & DCACHE_DISCONNECTED) {
-		/* something went wrong - oh-well */
-		if (!err)
-			err = -ESTALE;
-		return err;
-	}
-
+	dput(dentry);
+	clear_disconnected(target_dir);
 	return 0;
 }
 
@@ -215,7 +232,7 @@
 	struct dir_context ctx;
 	char *name;		/* name that was found. It already points to a
 				   buffer NAME_MAX+1 is size */
-	unsigned long ino;	/* the inum we are looking for */
+	u64 ino;		/* the inum we are looking for */
 	int found;		/* inode matched? */
 	int sequence;		/* sequence counter */
 };
@@ -255,10 +272,14 @@
 	struct inode *dir = path->dentry->d_inode;
 	int error;
 	struct file *file;
+	struct kstat stat;
+	struct path child_path = {
+		.mnt = path->mnt,
+		.dentry = child,
+	};
 	struct getdents_callback buffer = {
 		.ctx.actor = filldir_one,
 		.name = name,
-		.ino = child->d_inode->i_ino
 	};
 
 	error = -ENOTDIR;
@@ -268,6 +289,16 @@
 	if (!dir->i_fop)
 		goto out;
 	/*
+	 * inode->i_ino is unsigned long, kstat->ino is u64, so the
+	 * former would be insufficient on 32-bit hosts when the
+	 * filesystem supports 64-bit inode numbers.  So we need to
+	 * actually call ->getattr, not just read i_ino:
+	 */
+	error = vfs_getattr_nosec(&child_path, &stat);
+	if (error)
+		return error;
+	buffer.ino = stat.ino;
+	/*
 	 * Open the directory ...
 	 */
 	file = dentry_open(path, O_RDONLY, cred);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index c260de6..8a33764 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -632,6 +632,8 @@
 	int count = 0;
 	ext2_fsblk_t first_block = 0;
 
+	BUG_ON(maxblocks == 0);
+
 	depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 
 	if (depth == 0)
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index 1c33128..e98171a 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -35,6 +35,7 @@
 	int rc;
 
 	memset(&tmp, 0, sizeof(struct buffer_head));
+	tmp.b_size = 1 << inode->i_blkbits;
 	rc = ext2_get_block(inode, pgoff, &tmp, create);
 	*result = tmp.b_blocknr;
 
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index c50c761..37fd31e 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2825,6 +2825,10 @@
 		 * bitmap, and an inode table.
 		 */
 		overhead += ngroups * (2 + sbi->s_itb_per_group);
+
+		/* Add the journal blocks as well */
+                overhead += sbi->s_journal->j_maxlen;
+
 		sbi->s_overhead_last = overhead;
 		smp_wmb();
 		sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index dc5d572..6ea7b14 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -640,6 +640,7 @@
 	struct ext4_group_desc *gdp;
 	ext4_group_t i;
 	ext4_group_t ngroups = ext4_get_groups_count(sb);
+	struct ext4_group_info *grp;
 #ifdef EXT4FS_DEBUG
 	struct ext4_super_block *es;
 	ext4_fsblk_t bitmap_count;
@@ -655,7 +656,11 @@
 		gdp = ext4_get_group_desc(sb, i, NULL);
 		if (!gdp)
 			continue;
-		desc_count += ext4_free_group_clusters(sb, gdp);
+		grp = NULL;
+		if (EXT4_SB(sb)->s_group_info)
+			grp = ext4_get_group_info(sb, i);
+		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			desc_count += ext4_free_group_clusters(sb, gdp);
 		brelse(bitmap_bh);
 		bitmap_bh = ext4_read_block_bitmap(sb, i);
 		if (bitmap_bh == NULL)
@@ -679,7 +684,11 @@
 		gdp = ext4_get_group_desc(sb, i, NULL);
 		if (!gdp)
 			continue;
-		desc_count += ext4_free_group_clusters(sb, gdp);
+		grp = NULL;
+		if (EXT4_SB(sb)->s_group_info)
+			grp = ext4_get_group_info(sb, i);
+		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			desc_count += ext4_free_group_clusters(sb, gdp);
 	}
 
 	return desc_count;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index af815ea..e618503 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
+#include <linux/ratelimit.h>
 #include <crypto/hash.h>
 #ifdef __KERNEL__
 #include <linux/compat.h>
@@ -1314,6 +1315,11 @@
 	unsigned long s_es_last_sorted;
 	struct percpu_counter s_extent_cache_cnt;
 	spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
+
+	/* Ratelimit ext4 messages. */
+	struct ratelimit_state s_err_ratelimit_state;
+	struct ratelimit_state s_warning_ratelimit_state;
+	struct ratelimit_state s_msg_ratelimit_state;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1396,7 +1402,18 @@
 	clear_bit(bit + (offset), &EXT4_I(inode)->i_##field);		\
 }
 
+/* Add these declarations here only so that these functions can be
+ * found by name.  Otherwise, they are very hard to locate. */
+static inline int ext4_test_inode_flag(struct inode *inode, int bit);
+static inline void ext4_set_inode_flag(struct inode *inode, int bit);
+static inline void ext4_clear_inode_flag(struct inode *inode, int bit);
 EXT4_INODE_BIT_FNS(flag, flags, 0)
+
+/* Add these declarations here only so that these functions can be
+ * found by name.  Otherwise, they are very hard to locate. */
+static inline int ext4_test_inode_state(struct inode *inode, int bit);
+static inline void ext4_set_inode_state(struct inode *inode, int bit);
+static inline void ext4_clear_inode_state(struct inode *inode, int bit);
 #if (BITS_PER_LONG < 64)
 EXT4_INODE_BIT_FNS(state, state_flags, 0)
 
@@ -2734,8 +2751,6 @@
 					    struct inode *second);
 extern void ext4_double_up_write_data_sem(struct inode *orig_inode,
 					  struct inode *donor_inode);
-void ext4_inode_double_lock(struct inode *inode1, struct inode *inode2);
-void ext4_inode_double_unlock(struct inode *inode1, struct inode *inode2);
 extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
 			     __u64 start_orig, __u64 start_donor,
 			     __u64 len, __u64 *moved_len);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 54d52af..35f65cf 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1666,7 +1666,7 @@
 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
 				struct ext4_extent *ex2)
 {
-	unsigned short ext1_ee_len, ext2_ee_len, max_len;
+	unsigned short ext1_ee_len, ext2_ee_len;
 
 	/*
 	 * Make sure that both extents are initialized. We don't merge
@@ -1677,11 +1677,6 @@
 	if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2))
 		return 0;
 
-	if (ext4_ext_is_uninitialized(ex1))
-		max_len = EXT_UNINIT_MAX_LEN;
-	else
-		max_len = EXT_INIT_MAX_LEN;
-
 	ext1_ee_len = ext4_ext_get_actual_len(ex1);
 	ext2_ee_len = ext4_ext_get_actual_len(ex2);
 
@@ -1694,7 +1689,7 @@
 	 * as an RO_COMPAT feature, refuse to merge to extents if
 	 * this can result in the top bit of ee_len being set.
 	 */
-	if (ext1_ee_len + ext2_ee_len > max_len)
+	if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
 		return 0;
 #ifdef AGGRESSIVE_TEST
 	if (ext1_ee_len >= 4)
@@ -1720,7 +1715,6 @@
 	struct ext4_extent_header *eh;
 	unsigned int depth, len;
 	int merge_done = 0;
-	int uninitialized = 0;
 
 	depth = ext_depth(inode);
 	BUG_ON(path[depth].p_hdr == NULL);
@@ -1730,12 +1724,8 @@
 		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
 			break;
 		/* merge with next extent! */
-		if (ext4_ext_is_uninitialized(ex))
-			uninitialized = 1;
 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
 				+ ext4_ext_get_actual_len(ex + 1));
-		if (uninitialized)
-			ext4_ext_mark_uninitialized(ex);
 
 		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
 			len = (EXT_LAST_EXTENT(eh) - ex - 1)
@@ -1890,7 +1880,6 @@
 	struct ext4_ext_path *npath = NULL;
 	int depth, len, err;
 	ext4_lblk_t next;
-	unsigned uninitialized = 0;
 	int mb_flags = 0;
 
 	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
@@ -1942,18 +1931,8 @@
 			if (err)
 				return err;
 
-			/*
-			 * ext4_can_extents_be_merged should have checked
-			 * that either both extents are uninitialized, or
-			 * both aren't. Thus we need to check only one of
-			 * them here.
-			 */
-			if (ext4_ext_is_uninitialized(ex))
-				uninitialized = 1;
 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
 					+ ext4_ext_get_actual_len(newext));
-			if (uninitialized)
-				ext4_ext_mark_uninitialized(ex);
 			eh = path[depth].p_hdr;
 			nearex = ex;
 			goto merge;
@@ -1976,20 +1955,10 @@
 			if (err)
 				return err;
 
-			/*
-			 * ext4_can_extents_be_merged should have checked
-			 * that either both extents are uninitialized, or
-			 * both aren't. Thus we need to check only one of
-			 * them here.
-			 */
-			if (ext4_ext_is_uninitialized(ex))
-				uninitialized = 1;
 			ex->ee_block = newext->ee_block;
 			ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
 					+ ext4_ext_get_actual_len(newext));
-			if (uninitialized)
-				ext4_ext_mark_uninitialized(ex);
 			eh = path[depth].p_hdr;
 			nearex = ex;
 			goto merge;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 137193f..0ee59a6 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -432,7 +432,7 @@
 			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
 			grp = hinfo.hash;
 		} else
-			get_random_bytes(&grp, sizeof(grp));
+			grp = prandom_u32();
 		parent_group = (unsigned)grp % ngroups;
 		for (i = 0; i < ngroups; i++) {
 			g = (parent_group + i) % ngroups;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index d9ecbf1..bae9875 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -994,11 +994,9 @@
 	struct inode	*dir = dentry->d_parent->d_inode;
 	const char	*name = dentry->d_name.name;
 	int		namelen = dentry->d_name.len;
-	unsigned short	reclen;
 	int		err;
 	struct ext4_dir_entry_2 *de;
 
-	reclen = EXT4_DIR_REC_LEN(namelen);
 	err = ext4_find_dest_de(dir, inode, iloc->bh,
 				inline_start, inline_size,
 				name, namelen, &de);
@@ -1442,6 +1440,7 @@
 	if (ret < 0)
 		goto out;
 
+	ret = 0;
 	sb = inode->i_sb;
 	parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode);
 	offset = ctx->pos;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e274e9c..0757634 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2178,6 +2178,9 @@
  *
  * @handle - handle for journal operations
  * @mpd - extent to map
+ * @give_up_on_write - we set this to true iff there is a fatal error and there
+ *                     is no hope of writing the data. The caller should discard
+ *                     dirty pages to avoid infinite loops.
  *
  * The function maps extent starting at mpd->lblk of length mpd->len. If it is
  * delayed, blocks are allocated, if it is unwritten, we may need to convert
@@ -2295,6 +2298,7 @@
 	struct address_space *mapping = mpd->inode->i_mapping;
 	struct pagevec pvec;
 	unsigned int nr_pages;
+	long left = mpd->wbc->nr_to_write;
 	pgoff_t index = mpd->first_page;
 	pgoff_t end = mpd->last_page;
 	int tag;
@@ -2330,6 +2334,17 @@
 			if (page->index > end)
 				goto out;
 
+			/*
+			 * Accumulated enough dirty pages? This doesn't apply
+			 * to WB_SYNC_ALL mode. For integrity sync we have to
+			 * keep going because someone may be concurrently
+			 * dirtying pages, and we might have synced a lot of
+			 * newly appeared dirty pages, but have not synced all
+			 * of the old dirty pages.
+			 */
+			if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
+				goto out;
+
 			/* If we can't merge this page, we are done. */
 			if (mpd->map.m_len > 0 && mpd->next_page != page->index)
 				goto out;
@@ -2364,19 +2379,7 @@
 			if (err <= 0)
 				goto out;
 			err = 0;
-
-			/*
-			 * Accumulated enough dirty pages? This doesn't apply
-			 * to WB_SYNC_ALL mode. For integrity sync we have to
-			 * keep going because someone may be concurrently
-			 * dirtying pages, and we might have synced a lot of
-			 * newly appeared dirty pages, but have not synced all
-			 * of the old dirty pages.
-			 */
-			if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
-			    mpd->next_page - mpd->first_page >=
-							mpd->wbc->nr_to_write)
-				goto out;
+			left--;
 		}
 		pagevec_release(&pvec);
 		cond_resched();
@@ -2420,16 +2423,15 @@
 	 * because that could violate lock ordering on umount
 	 */
 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
-		return 0;
+		goto out_writepages;
 
 	if (ext4_should_journal_data(inode)) {
 		struct blk_plug plug;
-		int ret;
 
 		blk_start_plug(&plug);
 		ret = write_cache_pages(mapping, wbc, __writepage, mapping);
 		blk_finish_plug(&plug);
-		return ret;
+		goto out_writepages;
 	}
 
 	/*
@@ -2442,8 +2444,10 @@
 	 * *never* be called, so if that ever happens, we would want
 	 * the stack trace.
 	 */
-	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
-		return -EROFS;
+	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
+		ret = -EROFS;
+		goto out_writepages;
+	}
 
 	if (ext4_should_dioread_nolock(inode)) {
 		/*
@@ -4690,6 +4694,15 @@
 	generic_fillattr(inode, stat);
 
 	/*
+	 * If there is inline data in the inode, the inode will normally not
+	 * have data blocks allocated (it may have an external xattr block).
+	 * Report at least one sector for such files, so tools like tar, rsync,
+	 * others doen't incorrectly think the file is completely sparse.
+	 */
+	if (unlikely(ext4_has_inline_data(inode)))
+		stat->blocks += (stat->size + 511) >> 9;
+
+	/*
 	 * We can't update i_blocks if the block allocation is delayed
 	 * otherwise in the case of system crash before the real block
 	 * allocation is done, we will have i_blocks inconsistent with
@@ -4700,9 +4713,8 @@
 	 * blocks for this file.
 	 */
 	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
-				EXT4_I(inode)->i_reserved_data_blocks);
-
-	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
+				   EXT4_I(inode)->i_reserved_data_blocks);
+	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
 	return 0;
 }
 
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index a569d33..60589b6 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -130,7 +130,7 @@
 
 	/* Protect orig inodes against a truncate and make sure,
 	 * that only 1 swap_inode_boot_loader is running. */
-	ext4_inode_double_lock(inode, inode_bl);
+	lock_two_nondirectories(inode, inode_bl);
 
 	truncate_inode_pages(&inode->i_data, 0);
 	truncate_inode_pages(&inode_bl->i_data, 0);
@@ -205,7 +205,7 @@
 	ext4_inode_resume_unlocked_dio(inode);
 	ext4_inode_resume_unlocked_dio(inode_bl);
 
-	ext4_inode_double_unlock(inode, inode_bl);
+	unlock_two_nondirectories(inode, inode_bl);
 
 	iput(inode_bl);
 
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index a41e3ba..4d113ef 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4794,8 +4794,8 @@
 					 " group:%d block:%d count:%lu failed"
 					 " with %d", block_group, bit, count,
 					 err);
-		}
-
+		} else
+			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
 
 		ext4_lock_group(sb, block_group);
 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 214461e..04434ad 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -259,7 +259,7 @@
 	u32 new_seq;
 
 	do {
-		get_random_bytes(&new_seq, sizeof(u32));
+		new_seq = prandom_u32();
 	} while (new_seq > EXT4_MMP_SEQ_MAX);
 
 	return new_seq;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 7fa4d85..773b503 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -1203,42 +1203,6 @@
 }
 
 /**
- * ext4_inode_double_lock - Lock i_mutex on both @inode1 and @inode2
- *
- * @inode1:	the inode structure
- * @inode2:	the inode structure
- *
- * Lock two inodes' i_mutex
- */
-void
-ext4_inode_double_lock(struct inode *inode1, struct inode *inode2)
-{
-	BUG_ON(inode1 == inode2);
-	if (inode1 < inode2) {
-		mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
-		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
-	} else {
-		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
-		mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
-	}
-}
-
-/**
- * ext4_inode_double_unlock - Release i_mutex on both @inode1 and @inode2
- *
- * @inode1:     the inode that is released first
- * @inode2:     the inode that is released second
- *
- */
-
-void
-ext4_inode_double_unlock(struct inode *inode1, struct inode *inode2)
-{
-	mutex_unlock(&inode1->i_mutex);
-	mutex_unlock(&inode2->i_mutex);
-}
-
-/**
  * ext4_move_extents - Exchange the specified range of a file
  *
  * @o_filp:		file structure of the original file
@@ -1327,7 +1291,7 @@
 		return -EINVAL;
 	}
 	/* Protect orig and donor inodes against a truncate */
-	ext4_inode_double_lock(orig_inode, donor_inode);
+	lock_two_nondirectories(orig_inode, donor_inode);
 
 	/* Wait for all existing dio workers */
 	ext4_inode_block_unlocked_dio(orig_inode);
@@ -1535,7 +1499,7 @@
 	ext4_double_up_write_data_sem(orig_inode, donor_inode);
 	ext4_inode_resume_unlocked_dio(orig_inode);
 	ext4_inode_resume_unlocked_dio(donor_inode);
-	ext4_inode_double_unlock(orig_inode, donor_inode);
+	unlock_two_nondirectories(orig_inode, donor_inode);
 
 	return ret;
 }
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d7d0c7b..d488f80 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -197,14 +197,15 @@
 static void ext4_add_complete_io(ext4_io_end_t *io_end)
 {
 	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
+	struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
 	struct workqueue_struct *wq;
 	unsigned long flags;
 
 	/* Only reserved conversions from writeback should enter here */
 	WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
-	WARN_ON(!io_end->handle);
+	WARN_ON(!io_end->handle && sbi->s_journal);
 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-	wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
+	wq = sbi->rsv_conversion_wq;
 	if (list_empty(&ei->i_rsv_conversion_list))
 		queue_work(wq, &ei->i_rsv_conversion_work);
 	list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2c2e6cb..c977f4e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -411,20 +411,26 @@
 			sb->s_id);
 }
 
+#define ext4_error_ratelimit(sb)					\
+		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
+			     "EXT4-fs error")
+
 void __ext4_error(struct super_block *sb, const char *function,
 		  unsigned int line, const char *fmt, ...)
 {
 	struct va_format vaf;
 	va_list args;
 
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
-	       sb->s_id, function, line, current->comm, &vaf);
-	va_end(args);
+	if (ext4_error_ratelimit(sb)) {
+		va_start(args, fmt);
+		vaf.fmt = fmt;
+		vaf.va = &args;
+		printk(KERN_CRIT
+		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+		       sb->s_id, function, line, current->comm, &vaf);
+		va_end(args);
+	}
 	save_error_info(sb, function, line);
-
 	ext4_handle_error(sb);
 }
 
@@ -438,22 +444,23 @@
 
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
 	es->s_last_error_block = cpu_to_le64(block);
+	if (ext4_error_ratelimit(inode->i_sb)) {
+		va_start(args, fmt);
+		vaf.fmt = fmt;
+		vaf.va = &args;
+		if (block)
+			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+			       "inode #%lu: block %llu: comm %s: %pV\n",
+			       inode->i_sb->s_id, function, line, inode->i_ino,
+			       block, current->comm, &vaf);
+		else
+			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+			       "inode #%lu: comm %s: %pV\n",
+			       inode->i_sb->s_id, function, line, inode->i_ino,
+			       current->comm, &vaf);
+		va_end(args);
+	}
 	save_error_info(inode->i_sb, function, line);
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	if (block)
-		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
-		       "inode #%lu: block %llu: comm %s: %pV\n",
-		       inode->i_sb->s_id, function, line, inode->i_ino,
-		       block, current->comm, &vaf);
-	else
-		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
-		       "inode #%lu: comm %s: %pV\n",
-		       inode->i_sb->s_id, function, line, inode->i_ino,
-		       current->comm, &vaf);
-	va_end(args);
-
 	ext4_handle_error(inode->i_sb);
 }
 
@@ -469,27 +476,28 @@
 
 	es = EXT4_SB(inode->i_sb)->s_es;
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
+	if (ext4_error_ratelimit(inode->i_sb)) {
+		path = d_path(&(file->f_path), pathname, sizeof(pathname));
+		if (IS_ERR(path))
+			path = "(unknown)";
+		va_start(args, fmt);
+		vaf.fmt = fmt;
+		vaf.va = &args;
+		if (block)
+			printk(KERN_CRIT
+			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+			       "block %llu: comm %s: path %s: %pV\n",
+			       inode->i_sb->s_id, function, line, inode->i_ino,
+			       block, current->comm, path, &vaf);
+		else
+			printk(KERN_CRIT
+			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+			       "comm %s: path %s: %pV\n",
+			       inode->i_sb->s_id, function, line, inode->i_ino,
+			       current->comm, path, &vaf);
+		va_end(args);
+	}
 	save_error_info(inode->i_sb, function, line);
-	path = d_path(&(file->f_path), pathname, sizeof(pathname));
-	if (IS_ERR(path))
-		path = "(unknown)";
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	if (block)
-		printk(KERN_CRIT
-		       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
-		       "block %llu: comm %s: path %s: %pV\n",
-		       inode->i_sb->s_id, function, line, inode->i_ino,
-		       block, current->comm, path, &vaf);
-	else
-		printk(KERN_CRIT
-		       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
-		       "comm %s: path %s: %pV\n",
-		       inode->i_sb->s_id, function, line, inode->i_ino,
-		       current->comm, path, &vaf);
-	va_end(args);
-
 	ext4_handle_error(inode->i_sb);
 }
 
@@ -543,11 +551,13 @@
 	    (sb->s_flags & MS_RDONLY))
 		return;
 
-	errstr = ext4_decode_error(sb, errno, nbuf);
-	printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
-	       sb->s_id, function, line, errstr);
-	save_error_info(sb, function, line);
+	if (ext4_error_ratelimit(sb)) {
+		errstr = ext4_decode_error(sb, errno, nbuf);
+		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
+		       sb->s_id, function, line, errstr);
+	}
 
+	save_error_info(sb, function, line);
 	ext4_handle_error(sb);
 }
 
@@ -597,6 +607,9 @@
 	struct va_format vaf;
 	va_list args;
 
+	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
+		return;
+
 	va_start(args, fmt);
 	vaf.fmt = fmt;
 	vaf.va = &args;
@@ -610,6 +623,10 @@
 	struct va_format vaf;
 	va_list args;
 
+	if (!___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
+			  "EXT4-fs warning"))
+		return;
+
 	va_start(args, fmt);
 	vaf.fmt = fmt;
 	vaf.va = &args;
@@ -633,18 +650,20 @@
 	es->s_last_error_block = cpu_to_le64(block);
 	__save_error_info(sb, function, line);
 
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
-	       sb->s_id, function, line, grp);
-	if (ino)
-		printk(KERN_CONT "inode %lu: ", ino);
-	if (block)
-		printk(KERN_CONT "block %llu:", (unsigned long long) block);
-	printk(KERN_CONT "%pV\n", &vaf);
-	va_end(args);
+	if (ext4_error_ratelimit(sb)) {
+		va_start(args, fmt);
+		vaf.fmt = fmt;
+		vaf.va = &args;
+		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
+		       sb->s_id, function, line, grp);
+		if (ino)
+			printk(KERN_CONT "inode %lu: ", ino);
+		if (block)
+			printk(KERN_CONT "block %llu:",
+			       (unsigned long long) block);
+		printk(KERN_CONT "%pV\n", &vaf);
+		va_end(args);
+	}
 
 	if (test_opt(sb, ERRORS_CONT)) {
 		ext4_commit_super(sb, 0);
@@ -2606,6 +2625,12 @@
 EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
 EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
 EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
 
 static struct attribute *ext4_attrs[] = {
 	ATTR_LIST(delayed_allocation_blocks),
@@ -2623,6 +2648,12 @@
 	ATTR_LIST(max_writeback_mb_bump),
 	ATTR_LIST(extent_max_zeroout_kb),
 	ATTR_LIST(trigger_fs_error),
+	ATTR_LIST(err_ratelimit_interval_ms),
+	ATTR_LIST(err_ratelimit_burst),
+	ATTR_LIST(warning_ratelimit_interval_ms),
+	ATTR_LIST(warning_ratelimit_burst),
+	ATTR_LIST(msg_ratelimit_interval_ms),
+	ATTR_LIST(msg_ratelimit_burst),
 	NULL,
 };
 
@@ -3037,7 +3068,6 @@
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_li_request *elr;
-	unsigned long rnd;
 
 	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
 	if (!elr)
@@ -3052,10 +3082,8 @@
 	 * spread the inode table initialization requests
 	 * better.
 	 */
-	get_random_bytes(&rnd, sizeof(rnd));
-	elr->lr_next_sched = jiffies + (unsigned long)rnd %
-			     (EXT4_DEF_LI_MAX_START_DELAY * HZ);
-
+	elr->lr_next_sched = jiffies + (prandom_u32() %
+				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
 	return elr;
 }
 
@@ -4118,6 +4146,11 @@
 	if (es->s_error_count)
 		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
 
+	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
+	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
+	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
+	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
+
 	kfree(orig_data);
 	return 0;
 
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 03e9beb..1423c48 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1352,6 +1352,7 @@
 					new_extra_isize = s_min_extra_isize;
 					kfree(is); is = NULL;
 					kfree(bs); bs = NULL;
+					brelse(bh);
 					goto retry;
 				}
 				error = -1;
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index e06e099..214fe10 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -63,3 +63,11 @@
 	  the extended attribute support in advance.
 
 	  If you are not using a security module, say N.
+
+config F2FS_CHECK_FS
+	bool "F2FS consistency checking feature"
+	depends on F2FS_FS
+	help
+	  Enables BUG_ONs which check the file system consistency in runtime.
+
+	  If you want to improve the performance, say N.
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index b7826ec..d0fc287 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -205,7 +205,8 @@
 	return acl;
 }
 
-static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+static int f2fs_set_acl(struct inode *inode, int type,
+			struct posix_acl *acl, struct page *ipage)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -250,7 +251,7 @@
 		}
 	}
 
-	error = f2fs_setxattr(inode, name_index, "", value, size, NULL);
+	error = f2fs_setxattr(inode, name_index, "", value, size, ipage);
 
 	kfree(value);
 	if (!error)
@@ -260,10 +261,10 @@
 	return error;
 }
 
-int f2fs_init_acl(struct inode *inode, struct inode *dir)
+int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage)
 {
-	struct posix_acl *acl = NULL;
 	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+	struct posix_acl *acl = NULL;
 	int error = 0;
 
 	if (!S_ISLNK(inode->i_mode)) {
@@ -276,19 +277,19 @@
 			inode->i_mode &= ~current_umask();
 	}
 
-	if (test_opt(sbi, POSIX_ACL) && acl) {
+	if (!test_opt(sbi, POSIX_ACL) || !acl)
+		goto cleanup;
 
-		if (S_ISDIR(inode->i_mode)) {
-			error = f2fs_set_acl(inode, ACL_TYPE_DEFAULT, acl);
-			if (error)
-				goto cleanup;
-		}
-		error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
-		if (error < 0)
-			return error;
-		if (error > 0)
-			error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl);
+	if (S_ISDIR(inode->i_mode)) {
+		error = f2fs_set_acl(inode, ACL_TYPE_DEFAULT, acl, ipage);
+		if (error)
+			goto cleanup;
 	}
+	error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
+	if (error < 0)
+		return error;
+	if (error > 0)
+		error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, ipage);
 cleanup:
 	posix_acl_release(acl);
 	return error;
@@ -313,7 +314,8 @@
 	error = posix_acl_chmod(&acl, GFP_KERNEL, mode);
 	if (error)
 		return error;
-	error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl);
+
+	error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, NULL);
 	posix_acl_release(acl);
 	return error;
 }
@@ -388,7 +390,7 @@
 		acl = NULL;
 	}
 
-	error = f2fs_set_acl(inode, type, acl);
+	error = f2fs_set_acl(inode, type, acl, NULL);
 
 release_and_out:
 	posix_acl_release(acl);
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 80f4306..4963313 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -36,9 +36,9 @@
 
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
 
-extern struct posix_acl *f2fs_get_acl(struct inode *inode, int type);
-extern int f2fs_acl_chmod(struct inode *inode);
-extern int f2fs_init_acl(struct inode *inode, struct inode *dir);
+extern struct posix_acl *f2fs_get_acl(struct inode *, int);
+extern int f2fs_acl_chmod(struct inode *);
+extern int f2fs_init_acl(struct inode *, struct inode *, struct page *);
 #else
 #define f2fs_check_acl	NULL
 #define f2fs_get_acl	NULL
@@ -49,7 +49,8 @@
 	return 0;
 }
 
-static inline int f2fs_init_acl(struct inode *inode, struct inode *dir)
+static inline int f2fs_init_acl(struct inode *inode, struct inode *dir,
+							struct page *page)
 {
 	return 0;
 }
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index bb31220..5716e5e 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -81,7 +81,7 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
 	/* Should not write any meta pages, if any IO error was occurred */
-	if (wbc->for_reclaim ||
+	if (wbc->for_reclaim || sbi->por_doing ||
 			is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
 		dec_page_count(sbi, F2FS_DIRTY_META);
 		wbc->pages_skipped++;
@@ -142,8 +142,8 @@
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 			lock_page(page);
-			BUG_ON(page->mapping != mapping);
-			BUG_ON(!PageDirty(page));
+			f2fs_bug_on(page->mapping != mapping);
+			f2fs_bug_on(!PageDirty(page));
 			clear_page_dirty_for_io(page);
 			if (f2fs_write_meta_page(page, &wbc)) {
 				unlock_page(page);
@@ -167,6 +167,8 @@
 	struct address_space *mapping = page->mapping;
 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
 
+	trace_f2fs_set_page_dirty(page, META);
+
 	SetPageUptodate(page);
 	if (!PageDirty(page)) {
 		__set_page_dirty_nobuffers(page);
@@ -206,6 +208,7 @@
 void release_orphan_inode(struct f2fs_sb_info *sbi)
 {
 	mutex_lock(&sbi->orphan_inode_mutex);
+	f2fs_bug_on(sbi->n_orphans == 0);
 	sbi->n_orphans--;
 	mutex_unlock(&sbi->orphan_inode_mutex);
 }
@@ -225,12 +228,8 @@
 			break;
 		orphan = NULL;
 	}
-retry:
-	new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
-	if (!new) {
-		cond_resched();
-		goto retry;
-	}
+
+	new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
 	new->ino = ino;
 
 	/* add new_oentry into list which is sorted by inode number */
@@ -253,6 +252,7 @@
 		if (orphan->ino == ino) {
 			list_del(&orphan->list);
 			kmem_cache_free(orphan_entry_slab, orphan);
+			f2fs_bug_on(sbi->n_orphans == 0);
 			sbi->n_orphans--;
 			break;
 		}
@@ -263,7 +263,7 @@
 static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
 	struct inode *inode = f2fs_iget(sbi->sb, ino);
-	BUG_ON(IS_ERR(inode));
+	f2fs_bug_on(IS_ERR(inode));
 	clear_nlink(inode);
 
 	/* truncate all the data during iput */
@@ -277,7 +277,7 @@
 	if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
 		return 0;
 
-	sbi->por_doing = 1;
+	sbi->por_doing = true;
 	start_blk = __start_cp_addr(sbi) + 1;
 	orphan_blkaddr = __start_sum_addr(sbi) - 1;
 
@@ -294,7 +294,7 @@
 	}
 	/* clear Orphan Flag */
 	clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
-	sbi->por_doing = 0;
+	sbi->por_doing = false;
 	return 0;
 }
 
@@ -469,9 +469,7 @@
 			return -EEXIST;
 	}
 	list_add_tail(&new->list, head);
-#ifdef CONFIG_F2FS_STAT_FS
-	sbi->n_dirty_dirs++;
-#endif
+	stat_inc_dirty_dir(sbi);
 	return 0;
 }
 
@@ -482,12 +480,8 @@
 
 	if (!S_ISDIR(inode->i_mode))
 		return;
-retry:
-	new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
-	if (!new) {
-		cond_resched();
-		goto retry;
-	}
+
+	new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
 	new->inode = inode;
 	INIT_LIST_HEAD(&new->list);
 
@@ -504,13 +498,9 @@
 void add_dirty_dir_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-	struct dir_inode_entry *new;
-retry:
-	new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
-	if (!new) {
-		cond_resched();
-		goto retry;
-	}
+	struct dir_inode_entry *new =
+			f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
+
 	new->inode = inode;
 	INIT_LIST_HEAD(&new->list);
 
@@ -541,9 +531,7 @@
 		if (entry->inode == inode) {
 			list_del(&entry->list);
 			kmem_cache_free(inode_entry_slab, entry);
-#ifdef CONFIG_F2FS_STAT_FS
-			sbi->n_dirty_dirs--;
-#endif
+			stat_dec_dirty_dir(sbi);
 			break;
 		}
 	}
@@ -617,11 +605,10 @@
 	blk_start_plug(&plug);
 
 retry_flush_dents:
-	mutex_lock_all(sbi);
-
+	f2fs_lock_all(sbi);
 	/* write all the dirty dentry pages */
 	if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
-		mutex_unlock_all(sbi);
+		f2fs_unlock_all(sbi);
 		sync_dirty_dir_inodes(sbi);
 		goto retry_flush_dents;
 	}
@@ -644,7 +631,22 @@
 static void unblock_operations(struct f2fs_sb_info *sbi)
 {
 	mutex_unlock(&sbi->node_write);
-	mutex_unlock_all(sbi);
+	f2fs_unlock_all(sbi);
+}
+
+static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+{
+	DEFINE_WAIT(wait);
+
+	for (;;) {
+		prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
+
+		if (!get_pages(sbi, F2FS_WRITEBACK))
+			break;
+
+		io_schedule();
+	}
+	finish_wait(&sbi->cp_wait, &wait);
 }
 
 static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
@@ -756,8 +758,7 @@
 	f2fs_put_page(cp_page, 1);
 
 	/* wait for previous submitted node/meta pages writeback */
-	while (get_pages(sbi, F2FS_WRITEBACK))
-		congestion_wait(BLK_RW_ASYNC, HZ / 50);
+	wait_on_all_pages_writeback(sbi);
 
 	filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX);
 	filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 941f9b9..aa3438c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -68,9 +68,6 @@
 					struct buffer_head *bh_result)
 {
 	struct f2fs_inode_info *fi = F2FS_I(inode);
-#ifdef CONFIG_F2FS_STAT_FS
-	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-#endif
 	pgoff_t start_fofs, end_fofs;
 	block_t start_blkaddr;
 
@@ -80,9 +77,8 @@
 		return 0;
 	}
 
-#ifdef CONFIG_F2FS_STAT_FS
-	sbi->total_hit_ext++;
-#endif
+	stat_inc_total_hit(inode->i_sb);
+
 	start_fofs = fi->ext.fofs;
 	end_fofs = fi->ext.fofs + fi->ext.len - 1;
 	start_blkaddr = fi->ext.blk_addr;
@@ -100,9 +96,7 @@
 		else
 			bh_result->b_size = UINT_MAX;
 
-#ifdef CONFIG_F2FS_STAT_FS
-		sbi->read_hit_ext++;
-#endif
+		stat_inc_read_hit(inode->i_sb);
 		read_unlock(&fi->ext.ext_lock);
 		return 1;
 	}
@@ -116,7 +110,7 @@
 	pgoff_t fofs, start_fofs, end_fofs;
 	block_t start_blkaddr, end_blkaddr;
 
-	BUG_ON(blk_addr == NEW_ADDR);
+	f2fs_bug_on(blk_addr == NEW_ADDR);
 	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
 							dn->ofs_in_node;
 
@@ -442,7 +436,7 @@
 	}
 
 	/* It does not support data allocation */
-	BUG_ON(create);
+	f2fs_bug_on(create);
 
 	if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
 		int i;
@@ -560,9 +554,9 @@
 		inode_dec_dirty_dents(inode);
 		err = do_write_data_page(page);
 	} else {
-		int ilock = mutex_lock_op(sbi);
+		f2fs_lock_op(sbi);
 		err = do_write_data_page(page);
-		mutex_unlock_op(sbi, ilock);
+		f2fs_unlock_op(sbi);
 		need_balance_fs = true;
 	}
 	if (err == -ENOENT)
@@ -641,7 +635,6 @@
 	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
 	struct dnode_of_data dn;
 	int err = 0;
-	int ilock;
 
 	f2fs_balance_fs(sbi);
 repeat:
@@ -650,7 +643,7 @@
 		return -ENOMEM;
 	*pagep = page;
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 	err = get_dnode_of_data(&dn, index, ALLOC_NODE);
@@ -664,7 +657,7 @@
 	if (err)
 		goto err;
 
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
 		return 0;
@@ -700,7 +693,7 @@
 	return 0;
 
 err:
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 	f2fs_put_page(page, 1);
 	return err;
 }
@@ -763,6 +756,8 @@
 	struct address_space *mapping = page->mapping;
 	struct inode *inode = mapping->host;
 
+	trace_f2fs_set_page_dirty(page, DATA);
+
 	SetPageUptodate(page);
 	if (!PageDirty(page)) {
 		__set_page_dirty_nobuffers(page);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 384c6da..594fc1b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -139,7 +139,7 @@
 	bool room = false;
 	int max_slots = 0;
 
-	BUG_ON(level > MAX_DIR_HASH_DEPTH);
+	f2fs_bug_on(level > MAX_DIR_HASH_DEPTH);
 
 	nbucket = dir_buckets(level);
 	nblock = bucket_blocks(level);
@@ -346,7 +346,7 @@
 				goto error;
 		}
 
-		err = f2fs_init_acl(inode, dir);
+		err = f2fs_init_acl(inode, dir, page);
 		if (err)
 			goto error;
 
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 608f0df..89dc750 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -18,6 +18,13 @@
 #include <linux/crc32.h>
 #include <linux/magic.h>
 #include <linux/kobject.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_F2FS_CHECK_FS
+#define f2fs_bug_on(condition)	BUG_ON(condition)
+#else
+#define f2fs_bug_on(condition)
+#endif
 
 /*
  * For mount options
@@ -298,6 +305,9 @@
 	unsigned int main_segments;	/* # of segments in main area */
 	unsigned int reserved_segments;	/* # of reserved segments */
 	unsigned int ovp_segments;	/* # of overprovision segments */
+
+	/* a threshold to reclaim prefree segments */
+	unsigned int rec_prefree_segments;
 };
 
 /*
@@ -318,14 +328,6 @@
 };
 
 /*
- * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
- * The checkpoint procedure blocks all the locks in this fs_lock array.
- * Some FS operations grab free locks, and if there is no free lock,
- * then wait to grab a lock in a round-robin manner.
- */
-#define NR_GLOBAL_LOCKS	8
-
-/*
  * The below are the page types of bios used in submti_bio().
  * The available types are:
  * DATA			User data pages. It operates as async mode.
@@ -365,12 +367,12 @@
 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
 	struct inode *meta_inode;		/* cache meta blocks */
 	struct mutex cp_mutex;			/* checkpoint procedure lock */
-	struct mutex fs_lock[NR_GLOBAL_LOCKS];	/* blocking FS operations */
+	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
 	struct mutex node_write;		/* locking node writes */
 	struct mutex writepages;		/* mutex for writepages() */
-	unsigned char next_lock_num;		/* round-robin global locks */
-	int por_doing;				/* recovery is doing or not */
-	int on_build_free_nids;			/* build_free_nids is doing */
+	bool por_doing;				/* recovery is doing or not */
+	bool on_build_free_nids;		/* build_free_nids is doing */
+	wait_queue_head_t cp_wait;
 
 	/* for orphan inode management */
 	struct list_head orphan_inode_list;	/* orphan inode list */
@@ -520,48 +522,24 @@
 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 }
 
-static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
 {
-	int i;
-
-	for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
-		/*
-		 * This is the only time we take multiple fs_lock[]
-		 * instances; the order is immaterial since we
-		 * always hold cp_mutex, which serializes multiple
-		 * such operations.
-		 */
-		mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
-	}
+	down_read(&sbi->cp_rwsem);
 }
 
-static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
 {
-	int i = 0;
-	for (; i < NR_GLOBAL_LOCKS; i++)
-		mutex_unlock(&sbi->fs_lock[i]);
+	up_read(&sbi->cp_rwsem);
 }
 
-static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
 {
-	unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
-	int i = 0;
-
-	for (; i < NR_GLOBAL_LOCKS; i++)
-		if (mutex_trylock(&sbi->fs_lock[i]))
-			return i;
-
-	mutex_lock(&sbi->fs_lock[next_lock]);
-	sbi->next_lock_num++;
-	return next_lock;
+	down_write_nest_lock(&sbi->cp_rwsem, &sbi->cp_mutex);
 }
 
-static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
+static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
 {
-	if (ilock < 0)
-		return;
-	BUG_ON(ilock >= NR_GLOBAL_LOCKS);
-	mutex_unlock(&sbi->fs_lock[ilock]);
+	up_write(&sbi->cp_rwsem);
 }
 
 /*
@@ -612,8 +590,8 @@
 						blkcnt_t count)
 {
 	spin_lock(&sbi->stat_lock);
-	BUG_ON(sbi->total_valid_block_count < (block_t) count);
-	BUG_ON(inode->i_blocks < count);
+	f2fs_bug_on(sbi->total_valid_block_count < (block_t) count);
+	f2fs_bug_on(inode->i_blocks < count);
 	inode->i_blocks -= count;
 	sbi->total_valid_block_count -= (block_t)count;
 	spin_unlock(&sbi->stat_lock);
@@ -745,9 +723,9 @@
 {
 	spin_lock(&sbi->stat_lock);
 
-	BUG_ON(sbi->total_valid_block_count < count);
-	BUG_ON(sbi->total_valid_node_count < count);
-	BUG_ON(inode->i_blocks < count);
+	f2fs_bug_on(sbi->total_valid_block_count < count);
+	f2fs_bug_on(sbi->total_valid_node_count < count);
+	f2fs_bug_on(inode->i_blocks < count);
 
 	inode->i_blocks -= count;
 	sbi->total_valid_node_count -= count;
@@ -768,7 +746,7 @@
 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
 {
 	spin_lock(&sbi->stat_lock);
-	BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
+	f2fs_bug_on(sbi->total_valid_inode_count == sbi->total_node_count);
 	sbi->total_valid_inode_count++;
 	spin_unlock(&sbi->stat_lock);
 }
@@ -776,7 +754,7 @@
 static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
 {
 	spin_lock(&sbi->stat_lock);
-	BUG_ON(!sbi->total_valid_inode_count);
+	f2fs_bug_on(!sbi->total_valid_inode_count);
 	sbi->total_valid_inode_count--;
 	spin_unlock(&sbi->stat_lock);
 	return 0;
@@ -797,7 +775,7 @@
 		return;
 
 	if (unlock) {
-		BUG_ON(!PageLocked(page));
+		f2fs_bug_on(!PageLocked(page));
 		unlock_page(page);
 	}
 	page_cache_release(page);
@@ -819,6 +797,20 @@
 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
 }
 
+static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
+						gfp_t flags)
+{
+	void *entry;
+retry:
+	entry = kmem_cache_alloc(cachep, flags);
+	if (!entry) {
+		cond_resched();
+		goto retry;
+	}
+
+	return entry;
+}
+
 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
 
 static inline bool IS_INODE(struct page *page)
@@ -979,6 +971,7 @@
  */
 void f2fs_set_inode_flags(struct inode *);
 struct inode *f2fs_iget(struct super_block *, unsigned long);
+int try_to_free_nats(struct f2fs_sb_info *, int);
 void update_inode(struct inode *, struct page *);
 int update_inode_page(struct inode *);
 int f2fs_write_inode(struct inode *, struct writeback_control *);
@@ -1033,6 +1026,7 @@
 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
 int truncate_inode_blocks(struct inode *, pgoff_t);
 int truncate_xattr_node(struct inode *, struct page *);
+int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
 int remove_inode_page(struct inode *);
 struct page *new_inode_page(struct inode *, const struct qstr *);
 struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
@@ -1059,6 +1053,7 @@
  * segment.c
  */
 void f2fs_balance_fs(struct f2fs_sb_info *);
+void f2fs_balance_fs_bg(struct f2fs_sb_info *);
 void invalidate_blocks(struct f2fs_sb_info *, block_t);
 void clear_prefree_segments(struct f2fs_sb_info *);
 int npages_for_summary_flush(struct f2fs_sb_info *);
@@ -1172,7 +1167,16 @@
 	return (struct f2fs_stat_info*)sbi->stat_info;
 }
 
-#define stat_inc_call_count(si)	((si)->call_count++)
+#define stat_inc_call_count(si)		((si)->call_count++)
+#define stat_inc_bggc_count(sbi)	((sbi)->bg_gc++)
+#define stat_inc_dirty_dir(sbi)		((sbi)->n_dirty_dirs++)
+#define stat_dec_dirty_dir(sbi)		((sbi)->n_dirty_dirs--)
+#define stat_inc_total_hit(sb)		((F2FS_SB(sb))->total_hit_ext++)
+#define stat_inc_read_hit(sb)		((F2FS_SB(sb))->read_hit_ext++)
+#define stat_inc_seg_type(sbi, curseg)					\
+		((sbi)->segment_count[(curseg)->alloc_type]++)
+#define stat_inc_block_count(sbi, curseg)				\
+		((sbi)->block_count[(curseg)->alloc_type]++)
 
 #define stat_inc_seg_count(sbi, type)					\
 	do {								\
@@ -1207,6 +1211,13 @@
 void f2fs_destroy_root_stats(void);
 #else
 #define stat_inc_call_count(si)
+#define stat_inc_bggc_count(si)
+#define stat_inc_dirty_dir(sbi)
+#define stat_dec_dirty_dir(sbi)
+#define stat_inc_total_hit(sb)
+#define stat_inc_read_hit(sb)
+#define stat_inc_seg_type(sbi, curseg)
+#define stat_inc_block_count(sbi, curseg)
 #define stat_inc_seg_count(si, type)
 #define stat_inc_tot_blk_count(si, blks)
 #define stat_inc_data_blk_count(si, blks)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 02c9069..7d714f4 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -35,18 +35,18 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	block_t old_blk_addr;
 	struct dnode_of_data dn;
-	int err, ilock;
+	int err;
 
 	f2fs_balance_fs(sbi);
 
 	sb_start_pagefault(inode->i_sb);
 
 	/* block allocation */
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 	err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
 	if (err) {
-		mutex_unlock_op(sbi, ilock);
+		f2fs_unlock_op(sbi);
 		goto out;
 	}
 
@@ -56,12 +56,12 @@
 		err = reserve_new_block(&dn);
 		if (err) {
 			f2fs_put_dnode(&dn);
-			mutex_unlock_op(sbi, ilock);
+			f2fs_unlock_op(sbi);
 			goto out;
 		}
 	}
 	f2fs_put_dnode(&dn);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	file_update_time(vma->vm_file);
 	lock_page(page);
@@ -88,6 +88,7 @@
 	set_page_dirty(page);
 	SetPageUptodate(page);
 
+	trace_f2fs_vm_page_mkwrite(page, DATA);
 mapped:
 	/* fill the page */
 	wait_on_page_writeback(page);
@@ -188,8 +189,9 @@
 			if (ret)
 				goto out;
 		}
-		filemap_fdatawait_range(sbi->node_inode->i_mapping,
-							0, LONG_MAX);
+		ret = wait_on_node_pages_writeback(sbi, inode->i_ino);
+		if (ret)
+			goto out;
 		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
 	}
 out:
@@ -270,7 +272,7 @@
 	unsigned int blocksize = inode->i_sb->s_blocksize;
 	struct dnode_of_data dn;
 	pgoff_t free_from;
-	int count = 0, ilock = -1;
+	int count = 0;
 	int err;
 
 	trace_f2fs_truncate_blocks_enter(inode, from);
@@ -278,13 +280,13 @@
 	free_from = (pgoff_t)
 			((from + blocksize - 1) >> (sbi->log_blocksize));
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
 	if (err) {
 		if (err == -ENOENT)
 			goto free_next;
-		mutex_unlock_op(sbi, ilock);
+		f2fs_unlock_op(sbi);
 		trace_f2fs_truncate_blocks_exit(inode, err);
 		return err;
 	}
@@ -295,7 +297,7 @@
 		count = ADDRS_PER_BLOCK;
 
 	count -= dn.ofs_in_node;
-	BUG_ON(count < 0);
+	f2fs_bug_on(count < 0);
 
 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
 		truncate_data_blocks_range(&dn, count);
@@ -305,7 +307,7 @@
 	f2fs_put_dnode(&dn);
 free_next:
 	err = truncate_inode_blocks(inode, free_from);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	/* lastly zero out the first data page */
 	truncate_partial_data_page(inode, from);
@@ -416,16 +418,15 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	struct page *page;
-	int ilock;
 
 	if (!len)
 		return;
 
 	f2fs_balance_fs(sbi);
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	page = get_new_data_page(inode, NULL, index, false);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	if (!IS_ERR(page)) {
 		wait_on_page_writeback(page);
@@ -484,7 +485,6 @@
 			struct address_space *mapping = inode->i_mapping;
 			loff_t blk_start, blk_end;
 			struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-			int ilock;
 
 			f2fs_balance_fs(sbi);
 
@@ -493,9 +493,9 @@
 			truncate_inode_pages_range(mapping, blk_start,
 					blk_end - 1);
 
-			ilock = mutex_lock_op(sbi);
+			f2fs_lock_op(sbi);
 			ret = truncate_hole(inode, pg_start, pg_end);
-			mutex_unlock_op(sbi, ilock);
+			f2fs_unlock_op(sbi);
 		}
 	}
 
@@ -529,13 +529,12 @@
 
 	for (index = pg_start; index <= pg_end; index++) {
 		struct dnode_of_data dn;
-		int ilock;
 
-		ilock = mutex_lock_op(sbi);
+		f2fs_lock_op(sbi);
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
 		ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
 		if (ret) {
-			mutex_unlock_op(sbi, ilock);
+			f2fs_unlock_op(sbi);
 			break;
 		}
 
@@ -543,12 +542,12 @@
 			ret = reserve_new_block(&dn);
 			if (ret) {
 				f2fs_put_dnode(&dn);
-				mutex_unlock_op(sbi, ilock);
+				f2fs_unlock_op(sbi);
 				break;
 			}
 		}
 		f2fs_put_dnode(&dn);
-		mutex_unlock_op(sbi, ilock);
+		f2fs_unlock_op(sbi);
 
 		if (pg_start == pg_end)
 			new_size = offset + len;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 2f157e8..b7ad1ec 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -77,13 +77,15 @@
 		else
 			wait_ms = increase_sleep_time(gc_th, wait_ms);
 
-#ifdef CONFIG_F2FS_STAT_FS
-		sbi->bg_gc++;
-#endif
+		stat_inc_bggc_count(sbi);
 
 		/* if return value is not zero, no victim was selected */
 		if (f2fs_gc(sbi))
 			wait_ms = gc_th->no_gc_sleep_time;
+
+		/* balancing f2fs's metadata periodically */
+		f2fs_balance_fs_bg(sbi);
+
 	} while (!kthread_should_stop());
 	return 0;
 }
@@ -236,8 +238,8 @@
 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 }
 
-static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
-					struct victim_sel_policy *p)
+static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
+			unsigned int segno, struct victim_sel_policy *p)
 {
 	if (p->alloc_mode == SSR)
 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
@@ -293,7 +295,11 @@
 			}
 			break;
 		}
-		p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+
+		p.offset = segno + p.ofs_unit;
+		if (p.ofs_unit > 1)
+			p.offset -= segno % p.ofs_unit;
+
 		secno = GET_SECNO(sbi, segno);
 
 		if (sec_usage_check(sbi, secno))
@@ -306,10 +312,9 @@
 		if (p.min_cost > cost) {
 			p.min_segno = segno;
 			p.min_cost = cost;
-		}
-
-		if (cost == max_cost)
+		} else if (unlikely(cost == max_cost)) {
 			continue;
+		}
 
 		if (nsearched++ >= p.max_search) {
 			sbi->last_victim[p.gc_mode] = segno;
@@ -358,12 +363,8 @@
 		iput(inode);
 		return;
 	}
-repeat:
-	new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
-	if (!new_ie) {
-		cond_resched();
-		goto repeat;
-	}
+
+	new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
 	new_ie->inode = inode;
 	list_add_tail(&new_ie->list, ilist);
 }
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 9339cd2..d0eaa9f 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -37,6 +37,31 @@
 		inode->i_flags |= S_DIRSYNC;
 }
 
+static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+		if (ri->i_addr[0])
+			inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
+		else
+			inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
+	}
+}
+
+static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+		if (old_valid_dev(inode->i_rdev)) {
+			ri->i_addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
+			ri->i_addr[1] = 0;
+		} else {
+			ri->i_addr[0] = 0;
+			ri->i_addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
+			ri->i_addr[2] = 0;
+		}
+	}
+}
+
 static int do_read_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -73,10 +98,6 @@
 	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
 	inode->i_generation = le32_to_cpu(ri->i_generation);
-	if (ri->i_addr[0])
-		inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
-	else
-		inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
 
 	fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
@@ -84,8 +105,13 @@
 	fi->flags = 0;
 	fi->i_advise = ri->i_advise;
 	fi->i_pino = le32_to_cpu(ri->i_pino);
+
 	get_extent_info(&fi->ext, ri->i_ext);
 	get_inline_info(fi, ri);
+
+	/* get rdev by using inline_info */
+	__get_inode_rdev(inode, ri);
+
 	f2fs_put_page(node_page, 1);
 	return 0;
 }
@@ -179,21 +205,10 @@
 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
 	ri->i_generation = cpu_to_le32(inode->i_generation);
 
-	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
-		if (old_valid_dev(inode->i_rdev)) {
-			ri->i_addr[0] =
-				cpu_to_le32(old_encode_dev(inode->i_rdev));
-			ri->i_addr[1] = 0;
-		} else {
-			ri->i_addr[0] = 0;
-			ri->i_addr[1] =
-				cpu_to_le32(new_encode_dev(inode->i_rdev));
-			ri->i_addr[2] = 0;
-		}
-	}
-
+	__set_inode_rdev(inode, ri);
 	set_cold_node(inode, node_page);
 	set_page_dirty(node_page);
+
 	clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
 }
 
@@ -214,7 +229,7 @@
 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-	int ret, ilock;
+	int ret;
 
 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
 			inode->i_ino == F2FS_META_INO(sbi))
@@ -227,9 +242,9 @@
 	 * We need to lock here to prevent from producing dirty node pages
 	 * during the urgent cleaning time when runing out of free sections.
 	 */
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	ret = update_inode_page(inode);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	if (wbc)
 		f2fs_balance_fs(sbi);
@@ -243,7 +258,6 @@
 void f2fs_evict_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-	int ilock;
 
 	trace_f2fs_evict_inode(inode);
 	truncate_inode_pages(&inode->i_data, 0);
@@ -252,7 +266,7 @@
 			inode->i_ino == F2FS_META_INO(sbi))
 		goto no_delete;
 
-	BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents));
+	f2fs_bug_on(atomic_read(&F2FS_I(inode)->dirty_dents));
 	remove_dirty_dir_inode(inode);
 
 	if (inode->i_nlink || is_bad_inode(inode))
@@ -265,9 +279,9 @@
 	if (F2FS_HAS_BLOCKS(inode))
 		f2fs_truncate(inode);
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	remove_inode_page(inode);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	sb_end_intwrite(inode->i_sb);
 no_delete:
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 2a5359c..575adac 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -27,19 +27,19 @@
 	nid_t ino;
 	struct inode *inode;
 	bool nid_free = false;
-	int err, ilock;
+	int err;
 
 	inode = new_inode(sb);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	if (!alloc_nid(sbi, &ino)) {
-		mutex_unlock_op(sbi, ilock);
+		f2fs_unlock_op(sbi);
 		err = -ENOSPC;
 		goto fail;
 	}
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	inode->i_uid = current_fsuid();
 
@@ -115,7 +115,7 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct inode *inode;
 	nid_t ino = 0;
-	int err, ilock;
+	int err;
 
 	f2fs_balance_fs(sbi);
 
@@ -131,9 +131,9 @@
 	inode->i_mapping->a_ops = &f2fs_dblock_aops;
 	ino = inode->i_ino;
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 	if (err)
 		goto out;
 
@@ -157,7 +157,7 @@
 	struct inode *inode = old_dentry->d_inode;
 	struct super_block *sb = dir->i_sb;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
-	int err, ilock;
+	int err;
 
 	f2fs_balance_fs(sbi);
 
@@ -165,9 +165,9 @@
 	ihold(inode);
 
 	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 	if (err)
 		goto out;
 
@@ -220,7 +220,6 @@
 	struct f2fs_dir_entry *de;
 	struct page *page;
 	int err = -ENOENT;
-	int ilock;
 
 	trace_f2fs_unlink_enter(dir, dentry);
 	f2fs_balance_fs(sbi);
@@ -229,16 +228,16 @@
 	if (!de)
 		goto fail;
 
+	f2fs_lock_op(sbi);
 	err = acquire_orphan_inode(sbi);
 	if (err) {
+		f2fs_unlock_op(sbi);
 		kunmap(page);
 		f2fs_put_page(page, 0);
 		goto fail;
 	}
-
-	ilock = mutex_lock_op(sbi);
 	f2fs_delete_entry(de, page, inode);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	/* In order to evict this inode,  we set it dirty */
 	mark_inode_dirty(inode);
@@ -254,7 +253,7 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct inode *inode;
 	size_t symlen = strlen(symname) + 1;
-	int err, ilock;
+	int err;
 
 	f2fs_balance_fs(sbi);
 
@@ -265,9 +264,9 @@
 	inode->i_op = &f2fs_symlink_inode_operations;
 	inode->i_mapping->a_ops = &f2fs_dblock_aops;
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 	if (err)
 		goto out;
 
@@ -290,7 +289,7 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
 	struct inode *inode;
-	int err, ilock;
+	int err;
 
 	f2fs_balance_fs(sbi);
 
@@ -304,9 +303,9 @@
 	mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
 
 	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 	if (err)
 		goto out_fail;
 
@@ -342,7 +341,6 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct inode *inode;
 	int err = 0;
-	int ilock;
 
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
@@ -356,9 +354,9 @@
 	init_special_inode(inode, inode->i_mode, rdev);
 	inode->i_op = &f2fs_special_inode_operations;
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 	if (err)
 		goto out;
 
@@ -387,7 +385,7 @@
 	struct f2fs_dir_entry *old_dir_entry = NULL;
 	struct f2fs_dir_entry *old_entry;
 	struct f2fs_dir_entry *new_entry;
-	int err = -ENOENT, ilock = -1;
+	int err = -ENOENT;
 
 	f2fs_balance_fs(sbi);
 
@@ -402,7 +400,7 @@
 			goto out_old;
 	}
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 
 	if (new_inode) {
 
@@ -467,7 +465,7 @@
 		update_inode_page(old_dir);
 	}
 
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 	return 0;
 
 put_out_dir:
@@ -477,7 +475,7 @@
 		kunmap(old_dir_page);
 		f2fs_put_page(old_dir_page, 0);
 	}
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 out_old:
 	kunmap(old_page);
 	f2fs_put_page(old_page, 0);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 51ef278..4ac4150 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -204,7 +204,7 @@
 		}
 		e->ni = *ni;
 		e->checkpointed = true;
-		BUG_ON(ni->blk_addr == NEW_ADDR);
+		f2fs_bug_on(ni->blk_addr == NEW_ADDR);
 	} else if (new_blkaddr == NEW_ADDR) {
 		/*
 		 * when nid is reallocated,
@@ -212,19 +212,19 @@
 		 * So, reinitialize it with new information.
 		 */
 		e->ni = *ni;
-		BUG_ON(ni->blk_addr != NULL_ADDR);
+		f2fs_bug_on(ni->blk_addr != NULL_ADDR);
 	}
 
 	if (new_blkaddr == NEW_ADDR)
 		e->checkpointed = false;
 
 	/* sanity check */
-	BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
-	BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
+	f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
+	f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
 			new_blkaddr == NULL_ADDR);
-	BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
+	f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
 			new_blkaddr == NEW_ADDR);
-	BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
+	f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
 			nat_get_blkaddr(e) != NULL_ADDR &&
 			new_blkaddr == NEW_ADDR);
 
@@ -240,7 +240,7 @@
 	write_unlock(&nm_i->nat_tree_lock);
 }
 
-static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
+int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 
@@ -495,10 +495,10 @@
 
 	get_node_info(sbi, dn->nid, &ni);
 	if (dn->inode->i_blocks == 0) {
-		BUG_ON(ni.blk_addr != NULL_ADDR);
+		f2fs_bug_on(ni.blk_addr != NULL_ADDR);
 		goto invalidate;
 	}
-	BUG_ON(ni.blk_addr == NULL_ADDR);
+	f2fs_bug_on(ni.blk_addr == NULL_ADDR);
 
 	/* Deallocate node address */
 	invalidate_blocks(sbi, ni.blk_addr);
@@ -822,7 +822,7 @@
 	}
 
 	/* 0 is possible, after f2fs_new_inode() is failed */
-	BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
+	f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
 	set_new_dnode(&dn, inode, page, page, ino);
 	truncate_node(&dn);
 	return 0;
@@ -863,7 +863,7 @@
 	get_node_info(sbi, dn->nid, &old_ni);
 
 	/* Reinitialize old_ni with new node page */
-	BUG_ON(old_ni.blk_addr != NULL_ADDR);
+	f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
 	new_ni = old_ni;
 	new_ni.ino = dn->inode->i_ino;
 	set_node_addr(sbi, &new_ni, NEW_ADDR);
@@ -969,7 +969,7 @@
 		goto repeat;
 	}
 got_it:
-	BUG_ON(nid != nid_of_node(page));
+	f2fs_bug_on(nid != nid_of_node(page));
 	mark_page_accessed(page);
 	return page;
 }
@@ -1148,6 +1148,47 @@
 	return nwritten;
 }
 
+int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+{
+	struct address_space *mapping = sbi->node_inode->i_mapping;
+	pgoff_t index = 0, end = LONG_MAX;
+	struct pagevec pvec;
+	int nr_pages;
+	int ret2 = 0, ret = 0;
+
+	pagevec_init(&pvec, 0);
+	while ((index <= end) &&
+			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+			PAGECACHE_TAG_WRITEBACK,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
+		unsigned i;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			/* until radix tree lookup accepts end_index */
+			if (page->index > end)
+				continue;
+
+			if (ino && ino_of_node(page) == ino) {
+				wait_on_page_writeback(page);
+				if (TestClearPageError(page))
+					ret = -EIO;
+			}
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+
+	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+		ret2 = -ENOSPC;
+	if (test_and_clear_bit(AS_EIO, &mapping->flags))
+		ret2 = -EIO;
+	if (!ret)
+		ret = ret2;
+	return ret;
+}
+
 static int f2fs_write_node_page(struct page *page,
 				struct writeback_control *wbc)
 {
@@ -1156,11 +1197,14 @@
 	block_t new_addr;
 	struct node_info ni;
 
+	if (sbi->por_doing)
+		goto redirty_out;
+
 	wait_on_page_writeback(page);
 
 	/* get old block addr of this node page */
 	nid = nid_of_node(page);
-	BUG_ON(page->index != nid);
+	f2fs_bug_on(page->index != nid);
 
 	get_node_info(sbi, nid, &ni);
 
@@ -1171,12 +1215,8 @@
 		return 0;
 	}
 
-	if (wbc->for_reclaim) {
-		dec_page_count(sbi, F2FS_DIRTY_NODES);
-		wbc->pages_skipped++;
-		set_page_dirty(page);
-		return AOP_WRITEPAGE_ACTIVATE;
-	}
+	if (wbc->for_reclaim)
+		goto redirty_out;
 
 	mutex_lock(&sbi->node_write);
 	set_page_writeback(page);
@@ -1186,6 +1226,12 @@
 	mutex_unlock(&sbi->node_write);
 	unlock_page(page);
 	return 0;
+
+redirty_out:
+	dec_page_count(sbi, F2FS_DIRTY_NODES);
+	wbc->pages_skipped++;
+	set_page_dirty(page);
+	return AOP_WRITEPAGE_ACTIVATE;
 }
 
 /*
@@ -1200,11 +1246,8 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
 	long nr_to_write = wbc->nr_to_write;
 
-	/* First check balancing cached NAT entries */
-	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
-		f2fs_sync_fs(sbi->sb, true);
-		return 0;
-	}
+	/* balancing f2fs's metadata in background */
+	f2fs_balance_fs_bg(sbi);
 
 	/* collect a number of dirty node pages and write together */
 	if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
@@ -1223,6 +1266,8 @@
 	struct address_space *mapping = page->mapping;
 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
 
+	trace_f2fs_set_page_dirty(page, NODE);
+
 	SetPageUptodate(page);
 	if (!PageDirty(page)) {
 		__set_page_dirty_nobuffers(page);
@@ -1291,23 +1336,18 @@
 	if (nid == 0)
 		return 0;
 
-	if (!build)
-		goto retry;
-
-	/* do not add allocated nids */
-	read_lock(&nm_i->nat_tree_lock);
-	ne = __lookup_nat_cache(nm_i, nid);
-	if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
-		allocated = true;
-	read_unlock(&nm_i->nat_tree_lock);
-	if (allocated)
-		return 0;
-retry:
-	i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
-	if (!i) {
-		cond_resched();
-		goto retry;
+	if (build) {
+		/* do not add allocated nids */
+		read_lock(&nm_i->nat_tree_lock);
+		ne = __lookup_nat_cache(nm_i, nid);
+		if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
+			allocated = true;
+		read_unlock(&nm_i->nat_tree_lock);
+		if (allocated)
+			return 0;
 	}
+
+	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
 	i->nid = nid;
 	i->state = NID_NEW;
 
@@ -1350,7 +1390,7 @@
 			break;
 
 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
-		BUG_ON(blk_addr == NEW_ADDR);
+		f2fs_bug_on(blk_addr == NEW_ADDR);
 		if (blk_addr == NULL_ADDR) {
 			if (add_free_nid(nm_i, start_nid, true) < 0)
 				break;
@@ -1421,14 +1461,14 @@
 
 	/* We should not use stale free nids created by build_free_nids */
 	if (nm_i->fcnt && !sbi->on_build_free_nids) {
-		BUG_ON(list_empty(&nm_i->free_nid_list));
+		f2fs_bug_on(list_empty(&nm_i->free_nid_list));
 		list_for_each(this, &nm_i->free_nid_list) {
 			i = list_entry(this, struct free_nid, list);
 			if (i->state == NID_NEW)
 				break;
 		}
 
-		BUG_ON(i->state != NID_NEW);
+		f2fs_bug_on(i->state != NID_NEW);
 		*nid = i->nid;
 		i->state = NID_ALLOC;
 		nm_i->fcnt--;
@@ -1439,9 +1479,9 @@
 
 	/* Let's scan nat pages and its caches to get free nids */
 	mutex_lock(&nm_i->build_lock);
-	sbi->on_build_free_nids = 1;
+	sbi->on_build_free_nids = true;
 	build_free_nids(sbi);
-	sbi->on_build_free_nids = 0;
+	sbi->on_build_free_nids = false;
 	mutex_unlock(&nm_i->build_lock);
 	goto retry;
 }
@@ -1456,7 +1496,7 @@
 
 	spin_lock(&nm_i->free_nid_list_lock);
 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
-	BUG_ON(!i || i->state != NID_ALLOC);
+	f2fs_bug_on(!i || i->state != NID_ALLOC);
 	__del_from_free_nid_list(i);
 	spin_unlock(&nm_i->free_nid_list_lock);
 }
@@ -1474,7 +1514,7 @@
 
 	spin_lock(&nm_i->free_nid_list_lock);
 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
-	BUG_ON(!i || i->state != NID_ALLOC);
+	f2fs_bug_on(!i || i->state != NID_ALLOC);
 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
 		__del_from_free_nid_list(i);
 	} else {
@@ -1677,7 +1717,7 @@
 			nat_blk = page_address(page);
 		}
 
-		BUG_ON(!nat_blk);
+		f2fs_bug_on(!nat_blk);
 		raw_ne = nat_blk->entries[nid - start_nid];
 flush_now:
 		new_blkaddr = nat_get_blkaddr(ne);
@@ -1781,11 +1821,11 @@
 	/* destroy free nid list */
 	spin_lock(&nm_i->free_nid_list_lock);
 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
-		BUG_ON(i->state == NID_ALLOC);
+		f2fs_bug_on(i->state == NID_ALLOC);
 		__del_from_free_nid_list(i);
 		nm_i->fcnt--;
 	}
-	BUG_ON(nm_i->fcnt);
+	f2fs_bug_on(nm_i->fcnt);
 	spin_unlock(&nm_i->free_nid_list_lock);
 
 	/* destroy nat cache */
@@ -1799,7 +1839,7 @@
 			__del_from_nat_cache(nm_i, e);
 		}
 	}
-	BUG_ON(nm_i->nat_cnt);
+	f2fs_bug_on(nm_i->nat_cnt);
 	write_unlock(&nm_i->nat_tree_lock);
 
 	kfree(nm_i->nat_bitmap);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 51ef5ee..fdc8116 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -64,24 +64,31 @@
 	name.name = raw_inode->i_name;
 retry:
 	de = f2fs_find_entry(dir, &name, &page);
-	if (de && inode->i_ino == le32_to_cpu(de->ino)) {
-		kunmap(page);
-		f2fs_put_page(page, 0);
-		goto out;
-	}
+	if (de && inode->i_ino == le32_to_cpu(de->ino))
+		goto out_unmap_put;
 	if (de) {
 		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
 		if (IS_ERR(einode)) {
 			WARN_ON(1);
 			if (PTR_ERR(einode) == -ENOENT)
 				err = -EEXIST;
-			goto out;
+			goto out_unmap_put;
+		}
+		err = acquire_orphan_inode(F2FS_SB(inode->i_sb));
+		if (err) {
+			iput(einode);
+			goto out_unmap_put;
 		}
 		f2fs_delete_entry(de, page, einode);
 		iput(einode);
 		goto retry;
 	}
 	err = __f2fs_add_link(dir, &name, inode);
+	goto out;
+
+out_unmap_put:
+	kunmap(page);
+	f2fs_put_page(page, 0);
 out:
 	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
 			"ino = %x, name = %s, dir = %lx, err = %d",
@@ -285,7 +292,6 @@
 	struct f2fs_summary sum;
 	struct node_info ni;
 	int err = 0, recovered = 0;
-	int ilock;
 
 	start = start_bidx_of_node(ofs_of_node(page), fi);
 	if (IS_INODE(page))
@@ -293,20 +299,20 @@
 	else
 		end = start + ADDRS_PER_BLOCK;
 
-	ilock = mutex_lock_op(sbi);
+	f2fs_lock_op(sbi);
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 
 	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
 	if (err) {
-		mutex_unlock_op(sbi, ilock);
+		f2fs_unlock_op(sbi);
 		return err;
 	}
 
 	wait_on_page_writeback(dn.node_page);
 
 	get_node_info(sbi, dn.nid, &ni);
-	BUG_ON(ni.ino != ino_of_node(page));
-	BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
+	f2fs_bug_on(ni.ino != ino_of_node(page));
+	f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page));
 
 	for (; start < end; start++) {
 		block_t src, dest;
@@ -316,9 +322,9 @@
 
 		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
 			if (src == NULL_ADDR) {
-				int err = reserve_new_block(&dn);
+				err = reserve_new_block(&dn);
 				/* We should not get -ENOSPC */
-				BUG_ON(err);
+				f2fs_bug_on(err);
 			}
 
 			/* Check the previous node page having this index */
@@ -349,7 +355,7 @@
 	recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
 err:
 	f2fs_put_dnode(&dn);
-	mutex_unlock_op(sbi, ilock);
+	f2fs_unlock_op(sbi);
 
 	f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
 			"recovered_data = %d blocks, err = %d",
@@ -419,6 +425,7 @@
 {
 	struct list_head inode_list;
 	int err;
+	bool need_writecp = false;
 
 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
 			sizeof(struct fsync_inode_entry), NULL);
@@ -428,7 +435,7 @@
 	INIT_LIST_HEAD(&inode_list);
 
 	/* step #1: find fsynced inode numbers */
-	sbi->por_doing = 1;
+	sbi->por_doing = true;
 	err = find_fsync_dnodes(sbi, &inode_list);
 	if (err)
 		goto out;
@@ -436,14 +443,16 @@
 	if (list_empty(&inode_list))
 		goto out;
 
+	need_writecp = true;
+
 	/* step #2: recover data */
 	err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
-	BUG_ON(!list_empty(&inode_list));
+	f2fs_bug_on(!list_empty(&inode_list));
 out:
 	destroy_fsync_dnodes(&inode_list);
 	kmem_cache_destroy(fsync_entry_slab);
-	sbi->por_doing = 0;
-	if (!err)
+	sbi->por_doing = false;
+	if (!err && need_writecp)
 		write_checkpoint(sbi, false);
 	return err;
 }
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 09af9c7..fa284d3 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -36,6 +36,14 @@
 	}
 }
 
+void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
+{
+	/* check the # of cached NAT entries and prefree segments */
+	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
+				excess_prefree_segs(sbi))
+		f2fs_sync_fs(sbi->sb, true);
+}
+
 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
 		enum dirty_type dirty_type)
 {
@@ -50,20 +58,10 @@
 
 	if (dirty_type == DIRTY) {
 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
-		enum dirty_type t = DIRTY_HOT_DATA;
+		enum dirty_type t = sentry->type;
 
-		dirty_type = sentry->type;
-
-		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
-			dirty_i->nr_dirty[dirty_type]++;
-
-		/* Only one bitmap should be set */
-		for (; t <= DIRTY_COLD_NODE; t++) {
-			if (t == dirty_type)
-				continue;
-			if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
-				dirty_i->nr_dirty[t]--;
-		}
+		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
+			dirty_i->nr_dirty[t]++;
 	}
 }
 
@@ -76,12 +74,11 @@
 		dirty_i->nr_dirty[dirty_type]--;
 
 	if (dirty_type == DIRTY) {
-		enum dirty_type t = DIRTY_HOT_DATA;
+		struct seg_entry *sentry = get_seg_entry(sbi, segno);
+		enum dirty_type t = sentry->type;
 
-		/* clear all the bitmaps */
-		for (; t <= DIRTY_COLD_NODE; t++)
-			if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
-				dirty_i->nr_dirty[t]--;
+		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+			dirty_i->nr_dirty[t]--;
 
 		if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
 			clear_bit(GET_SECNO(sbi, segno),
@@ -142,27 +139,33 @@
 void clear_prefree_segments(struct f2fs_sb_info *sbi)
 {
 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-	unsigned int segno = -1;
+	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
 	unsigned int total_segs = TOTAL_SEGS(sbi);
+	unsigned int start = 0, end = -1;
 
 	mutex_lock(&dirty_i->seglist_lock);
+
 	while (1) {
-		segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
-				segno + 1);
-		if (segno >= total_segs)
+		int i;
+		start = find_next_bit(prefree_map, total_segs, end + 1);
+		if (start >= total_segs)
 			break;
+		end = find_next_zero_bit(prefree_map, total_segs, start + 1);
 
-		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
-			dirty_i->nr_dirty[PRE]--;
+		for (i = start; i < end; i++)
+			clear_bit(i, prefree_map);
 
-		/* Let's use trim */
-		if (test_opt(sbi, DISCARD))
-			blkdev_issue_discard(sbi->sb->s_bdev,
-					START_BLOCK(sbi, segno) <<
-					sbi->log_sectors_per_block,
-					1 << (sbi->log_sectors_per_block +
-						sbi->log_blocks_per_seg),
-					GFP_NOFS, 0);
+		dirty_i->nr_dirty[PRE] -= end - start;
+
+		if (!test_opt(sbi, DISCARD))
+			continue;
+
+		blkdev_issue_discard(sbi->sb->s_bdev,
+				START_BLOCK(sbi, start) <<
+				sbi->log_sectors_per_block,
+				(1 << (sbi->log_sectors_per_block +
+				sbi->log_blocks_per_seg)) * (end - start),
+				GFP_NOFS, 0);
 	}
 	mutex_unlock(&dirty_i->seglist_lock);
 }
@@ -195,7 +198,7 @@
 	new_vblocks = se->valid_blocks + del;
 	offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
 
-	BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
+	f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
 				(new_vblocks > sbi->blocks_per_seg)));
 
 	se->valid_blocks = new_vblocks;
@@ -235,7 +238,7 @@
 	unsigned int segno = GET_SEGNO(sbi, addr);
 	struct sit_info *sit_i = SIT_I(sbi);
 
-	BUG_ON(addr == NULL_ADDR);
+	f2fs_bug_on(addr == NULL_ADDR);
 	if (addr == NEW_ADDR)
 		return;
 
@@ -267,9 +270,8 @@
  */
 int npages_for_summary_flush(struct f2fs_sb_info *sbi)
 {
-	int total_size_bytes = 0;
 	int valid_sum_count = 0;
-	int i, sum_space;
+	int i, sum_in_page;
 
 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
 		if (sbi->ckpt->alloc_type[i] == SSR)
@@ -278,13 +280,12 @@
 			valid_sum_count += curseg_blkoff(sbi, i);
 	}
 
-	total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
-			+ sizeof(struct nat_journal) + 2
-			+ sizeof(struct sit_journal) + 2;
-	sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
-	if (total_size_bytes < sum_space)
+	sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
+			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
+	if (valid_sum_count <= sum_in_page)
 		return 1;
-	else if (total_size_bytes < 2 * sum_space)
+	else if ((valid_sum_count - sum_in_page) <=
+		(PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
 		return 2;
 	return 3;
 }
@@ -350,7 +351,7 @@
 		if (dir == ALLOC_RIGHT) {
 			secno = find_next_zero_bit(free_i->free_secmap,
 							TOTAL_SECS(sbi), 0);
-			BUG_ON(secno >= TOTAL_SECS(sbi));
+			f2fs_bug_on(secno >= TOTAL_SECS(sbi));
 		} else {
 			go_left = 1;
 			left_start = hint - 1;
@@ -366,7 +367,7 @@
 		}
 		left_start = find_next_zero_bit(free_i->free_secmap,
 							TOTAL_SECS(sbi), 0);
-		BUG_ON(left_start >= TOTAL_SECS(sbi));
+		f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
 		break;
 	}
 	secno = left_start;
@@ -405,7 +406,7 @@
 	}
 got_it:
 	/* set it as dirty segment in free segmap */
-	BUG_ON(test_bit(segno, free_i->free_segmap));
+	f2fs_bug_on(test_bit(segno, free_i->free_segmap));
 	__set_inuse(sbi, segno);
 	*newseg = segno;
 	write_unlock(&free_i->segmap_lock);
@@ -550,9 +551,8 @@
 		change_curseg(sbi, type, true);
 	else
 		new_curseg(sbi, type, false);
-#ifdef CONFIG_F2FS_STAT_FS
-	sbi->segment_count[curseg->alloc_type]++;
-#endif
+
+	stat_inc_seg_type(sbi, curseg);
 }
 
 void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -597,6 +597,11 @@
 
 	if (p->is_sync)
 		complete(p->wait);
+
+	if (!get_pages(p->sbi, F2FS_WRITEBACK) &&
+			!list_empty(&p->sbi->cp_wait.task_list))
+		wake_up(&p->sbi->cp_wait);
+
 	kfree(p);
 	bio_put(bio);
 }
@@ -657,6 +662,7 @@
 				block_t blk_addr, enum page_type type)
 {
 	struct block_device *bdev = sbi->sb->s_bdev;
+	int bio_blocks;
 
 	verify_block_addr(sbi, blk_addr);
 
@@ -676,7 +682,8 @@
 			goto retry;
 		}
 
-		sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
+		bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+		sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
 		sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
 		sbi->bio[type]->bi_private = priv;
 		/*
@@ -771,7 +778,7 @@
 		return __get_segment_type_4(page, p_type);
 	}
 	/* NR_CURSEG_TYPE(6) logs by default */
-	BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
+	f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE);
 	return __get_segment_type_6(page, p_type);
 }
 
@@ -801,9 +808,8 @@
 
 	mutex_lock(&sit_i->sentry_lock);
 	__refresh_next_blkoff(sbi, curseg);
-#ifdef CONFIG_F2FS_STAT_FS
-	sbi->block_count[curseg->alloc_type]++;
-#endif
+
+	stat_inc_block_count(sbi, curseg);
 
 	/*
 	 * SIT information should be updated before segment allocation,
@@ -849,7 +855,7 @@
 	struct f2fs_summary sum;
 	struct node_info ni;
 
-	BUG_ON(old_blkaddr == NULL_ADDR);
+	f2fs_bug_on(old_blkaddr == NULL_ADDR);
 	get_node_info(sbi, dn->nid, &ni);
 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
 
@@ -1122,8 +1128,6 @@
 						SUM_JOURNAL_SIZE);
 	written_size += SUM_JOURNAL_SIZE;
 
-	set_page_dirty(page);
-
 	/* Step 3: write summary entries */
 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
 		unsigned short blkoff;
@@ -1142,18 +1146,20 @@
 			summary = (struct f2fs_summary *)(kaddr + written_size);
 			*summary = seg_i->sum_blk->entries[j];
 			written_size += SUMMARY_SIZE;
-			set_page_dirty(page);
 
 			if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
 							SUM_FOOTER_SIZE)
 				continue;
 
+			set_page_dirty(page);
 			f2fs_put_page(page, 1);
 			page = NULL;
 		}
 	}
-	if (page)
+	if (page) {
+		set_page_dirty(page);
 		f2fs_put_page(page, 1);
+	}
 }
 
 static void write_normal_summaries(struct f2fs_sb_info *sbi,
@@ -1239,7 +1245,7 @@
 	/* get current sit block page without lock */
 	src_page = get_meta_page(sbi, src_off);
 	dst_page = grab_meta_page(sbi, dst_off);
-	BUG_ON(PageDirty(src_page));
+	f2fs_bug_on(PageDirty(src_page));
 
 	src_addr = page_address(src_page);
 	dst_addr = page_address(dst_page);
@@ -1271,9 +1277,9 @@
 			__mark_sit_entry_dirty(sbi, segno);
 		}
 		update_sits_in_cursum(sum, -sits_in_cursum(sum));
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
 /*
@@ -1637,6 +1643,7 @@
 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
+	sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
 
 	err = build_sit_info(sbi);
 	if (err)
@@ -1744,6 +1751,8 @@
 void destroy_segment_manager(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_sm_info *sm_info = SM_I(sbi);
+	if (!sm_info)
+		return;
 	destroy_dirty_segmap(sbi);
 	destroy_curseg(sbi);
 	destroy_free_segmap(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index bdd10ea..269f690 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -14,6 +14,8 @@
 #define NULL_SEGNO			((unsigned int)(~0))
 #define NULL_SECNO			((unsigned int)(~0))
 
+#define DEF_RECLAIM_PREFREE_SEGMENTS	100	/* 200MB of prefree segments */
+
 /* L: Logical segment # in volume, R: Relative segment # in main area */
 #define GET_L2R_SEGNO(free_i, segno)	(segno - free_i->start_segno)
 #define GET_R2L_SEGNO(free_i, segno)	(segno + free_i->start_segno)
@@ -90,6 +92,8 @@
 	(blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
 #define SECTOR_TO_BLOCK(sbi, sectors)					\
 	(sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
+#define MAX_BIO_BLOCKS(max_hw_blocks)					\
+	(min((int)max_hw_blocks, BIO_MAX_PAGES))
 
 /* during checkpoint, bio_private is used to synchronize the last bio */
 struct bio_private {
@@ -470,6 +474,11 @@
 						reserved_sections(sbi)));
 }
 
+static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
+{
+	return (prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments);
+}
+
 static inline int utilization(struct f2fs_sb_info *sbi)
 {
 	return div_u64((u64)valid_user_blocks(sbi) * 100, sbi->user_block_count);
@@ -513,16 +522,13 @@
 	return curseg->next_blkoff;
 }
 
+#ifdef CONFIG_F2FS_CHECK_FS
 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
 {
 	unsigned int end_segno = SM_I(sbi)->segment_count - 1;
 	BUG_ON(segno > end_segno);
 }
 
-/*
- * This function is used for only debugging.
- * NOTE: In future, we have to remove this function.
- */
 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
 {
 	struct f2fs_sm_info *sm_info = SM_I(sbi);
@@ -541,8 +547,9 @@
 {
 	struct f2fs_sm_info *sm_info = SM_I(sbi);
 	unsigned int end_segno = sm_info->segment_count - 1;
+	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
 	int valid_blocks = 0;
-	int i;
+	int cur_pos = 0, next_pos;
 
 	/* check segment usage */
 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
@@ -551,11 +558,26 @@
 	BUG_ON(segno > end_segno);
 
 	/* check bitmap with valid block count */
-	for (i = 0; i < sbi->blocks_per_seg; i++)
-		if (f2fs_test_bit(i, raw_sit->valid_map))
-			valid_blocks++;
+	do {
+		if (is_valid) {
+			next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
+					sbi->blocks_per_seg,
+					cur_pos);
+			valid_blocks += next_pos - cur_pos;
+		} else
+			next_pos = find_next_bit_le(&raw_sit->valid_map,
+					sbi->blocks_per_seg,
+					cur_pos);
+		cur_pos = next_pos;
+		is_valid = !is_valid;
+	} while (cur_pos < sbi->blocks_per_seg);
 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
 }
+#else
+#define check_seg_range(sbi, segno)
+#define verify_block_addr(sbi, blk_addr)
+#define check_block_count(sbi, segno, raw_sit)
+#endif
 
 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
 						unsigned int start)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 13d0a0f..bafff72 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -43,7 +43,9 @@
 	Opt_disable_roll_forward,
 	Opt_discard,
 	Opt_noheap,
+	Opt_user_xattr,
 	Opt_nouser_xattr,
+	Opt_acl,
 	Opt_noacl,
 	Opt_active_logs,
 	Opt_disable_ext_identify,
@@ -56,7 +58,9 @@
 	{Opt_disable_roll_forward, "disable_roll_forward"},
 	{Opt_discard, "discard"},
 	{Opt_noheap, "no_heap"},
+	{Opt_user_xattr, "user_xattr"},
 	{Opt_nouser_xattr, "nouser_xattr"},
+	{Opt_acl, "acl"},
 	{Opt_noacl, "noacl"},
 	{Opt_active_logs, "active_logs=%u"},
 	{Opt_disable_ext_identify, "disable_ext_identify"},
@@ -65,24 +69,40 @@
 };
 
 /* Sysfs support for f2fs */
+enum {
+	GC_THREAD,	/* struct f2fs_gc_thread */
+	SM_INFO,	/* struct f2fs_sm_info */
+};
+
 struct f2fs_attr {
 	struct attribute attr;
 	ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
 	ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
 			 const char *, size_t);
+	int struct_type;
 	int offset;
 };
 
+static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
+{
+	if (struct_type == GC_THREAD)
+		return (unsigned char *)sbi->gc_thread;
+	else if (struct_type == SM_INFO)
+		return (unsigned char *)SM_I(sbi);
+	return NULL;
+}
+
 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
 			struct f2fs_sb_info *sbi, char *buf)
 {
-	struct f2fs_gc_kthread *gc_kth = sbi->gc_thread;
+	unsigned char *ptr = NULL;
 	unsigned int *ui;
 
-	if (!gc_kth)
+	ptr = __struct_ptr(sbi, a->struct_type);
+	if (!ptr)
 		return -EINVAL;
 
-	ui = (unsigned int *)(((char *)gc_kth) + a->offset);
+	ui = (unsigned int *)(ptr + a->offset);
 
 	return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
 }
@@ -91,15 +111,16 @@
 			struct f2fs_sb_info *sbi,
 			const char *buf, size_t count)
 {
-	struct f2fs_gc_kthread *gc_kth = sbi->gc_thread;
+	unsigned char *ptr;
 	unsigned long t;
 	unsigned int *ui;
 	ssize_t ret;
 
-	if (!gc_kth)
+	ptr = __struct_ptr(sbi, a->struct_type);
+	if (!ptr)
 		return -EINVAL;
 
-	ui = (unsigned int *)(((char *)gc_kth) + a->offset);
+	ui = (unsigned int *)(ptr + a->offset);
 
 	ret = kstrtoul(skip_spaces(buf), 0, &t);
 	if (ret < 0)
@@ -135,21 +156,25 @@
 	complete(&sbi->s_kobj_unregister);
 }
 
-#define F2FS_ATTR_OFFSET(_name, _mode, _show, _store, _elname) \
+#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
 static struct f2fs_attr f2fs_attr_##_name = {			\
 	.attr = {.name = __stringify(_name), .mode = _mode },	\
 	.show	= _show,					\
 	.store	= _store,					\
-	.offset = offsetof(struct f2fs_gc_kthread, _elname),	\
+	.struct_type = _struct_type,				\
+	.offset = _offset					\
 }
 
-#define F2FS_RW_ATTR(name, elname)	\
-	F2FS_ATTR_OFFSET(name, 0644, f2fs_sbi_show, f2fs_sbi_store, elname)
+#define F2FS_RW_ATTR(struct_type, struct_name, name, elname)	\
+	F2FS_ATTR_OFFSET(struct_type, name, 0644,		\
+		f2fs_sbi_show, f2fs_sbi_store,			\
+		offsetof(struct struct_name, elname))
 
-F2FS_RW_ATTR(gc_min_sleep_time, min_sleep_time);
-F2FS_RW_ATTR(gc_max_sleep_time, max_sleep_time);
-F2FS_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
-F2FS_RW_ATTR(gc_idle, gc_idle);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
 
 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
 static struct attribute *f2fs_attrs[] = {
@@ -157,6 +182,7 @@
 	ATTR_LIST(gc_max_sleep_time),
 	ATTR_LIST(gc_no_gc_sleep_time),
 	ATTR_LIST(gc_idle),
+	ATTR_LIST(reclaim_segments),
 	NULL,
 };
 
@@ -237,6 +263,9 @@
 			set_opt(sbi, NOHEAP);
 			break;
 #ifdef CONFIG_F2FS_FS_XATTR
+		case Opt_user_xattr:
+			set_opt(sbi, XATTR_USER);
+			break;
 		case Opt_nouser_xattr:
 			clear_opt(sbi, XATTR_USER);
 			break;
@@ -244,6 +273,10 @@
 			set_opt(sbi, INLINE_XATTR);
 			break;
 #else
+		case Opt_user_xattr:
+			f2fs_msg(sb, KERN_INFO,
+				"user_xattr options not supported");
+			break;
 		case Opt_nouser_xattr:
 			f2fs_msg(sb, KERN_INFO,
 				"nouser_xattr options not supported");
@@ -254,10 +287,16 @@
 			break;
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
+		case Opt_acl:
+			set_opt(sbi, POSIX_ACL);
+			break;
 		case Opt_noacl:
 			clear_opt(sbi, POSIX_ACL);
 			break;
 #else
+		case Opt_acl:
+			f2fs_msg(sb, KERN_INFO, "acl options not supported");
+			break;
 		case Opt_noacl:
 			f2fs_msg(sb, KERN_INFO, "noacl options not supported");
 			break;
@@ -355,7 +394,9 @@
 	f2fs_destroy_stats(sbi);
 	stop_gc_thread(sbi);
 
-	write_checkpoint(sbi, true);
+	/* We don't need to do checkpoint when it's clean */
+	if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES))
+		write_checkpoint(sbi, true);
 
 	iput(sbi->node_inode);
 	iput(sbi->meta_inode);
@@ -727,30 +768,47 @@
 		atomic_set(&sbi->nr_pages[i], 0);
 }
 
-static int validate_superblock(struct super_block *sb,
-		struct f2fs_super_block **raw_super,
-		struct buffer_head **raw_super_buf, sector_t block)
+/*
+ * Read f2fs raw super block.
+ * Because we have two copies of super block, so read the first one at first,
+ * if the first one is invalid, move to read the second one.
+ */
+static int read_raw_super_block(struct super_block *sb,
+			struct f2fs_super_block **raw_super,
+			struct buffer_head **raw_super_buf)
 {
-	const char *super = (block == 0 ? "first" : "second");
+	int block = 0;
 
-	/* read f2fs raw super block */
+retry:
 	*raw_super_buf = sb_bread(sb, block);
 	if (!*raw_super_buf) {
-		f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
-				super);
-		return -EIO;
+		f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+				block + 1);
+		if (block == 0) {
+			block++;
+			goto retry;
+		} else {
+			return -EIO;
+		}
 	}
 
 	*raw_super = (struct f2fs_super_block *)
 		((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
 
 	/* sanity checking of raw super */
-	if (!sanity_check_raw_super(sb, *raw_super))
-		return 0;
+	if (sanity_check_raw_super(sb, *raw_super)) {
+		brelse(*raw_super_buf);
+		f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
+				"in %dth superblock", block + 1);
+		if(block == 0) {
+			block++;
+			goto retry;
+		} else {
+			return -EINVAL;
+		}
+	}
 
-	f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
-				"in %s superblock", super);
-	return -EINVAL;
+	return 0;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -760,7 +818,6 @@
 	struct buffer_head *raw_super_buf;
 	struct inode *root;
 	long err = -EINVAL;
-	int i;
 
 	/* allocate memory for f2fs-specific super block info */
 	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
@@ -773,14 +830,10 @@
 		goto free_sbi;
 	}
 
-	err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
-	if (err) {
-		brelse(raw_super_buf);
-		/* check secondary superblock when primary failed */
-		err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
-		if (err)
-			goto free_sb_buf;
-	}
+	err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
+	if (err)
+		goto free_sbi;
+
 	sb->s_fs_info = sbi;
 	/* init some FS parameters */
 	sbi->active_logs = NR_CURSEG_TYPE;
@@ -818,12 +871,12 @@
 	mutex_init(&sbi->gc_mutex);
 	mutex_init(&sbi->writepages);
 	mutex_init(&sbi->cp_mutex);
-	for (i = 0; i < NR_GLOBAL_LOCKS; i++)
-		mutex_init(&sbi->fs_lock[i]);
 	mutex_init(&sbi->node_write);
-	sbi->por_doing = 0;
+	sbi->por_doing = false;
 	spin_lock_init(&sbi->stat_lock);
 	init_rwsem(&sbi->bio_sem);
+	init_rwsem(&sbi->cp_rwsem);
+	init_waitqueue_head(&sbi->cp_wait);
 	init_sb_info(sbi);
 
 	/* get an inode for meta space */
@@ -922,12 +975,12 @@
 		/* After POR, we can run background GC thread.*/
 		err = start_gc_thread(sbi);
 		if (err)
-			goto fail;
+			goto free_gc;
 	}
 
 	err = f2fs_build_stats(sbi);
 	if (err)
-		goto fail;
+		goto free_gc;
 
 	if (f2fs_proc_root)
 		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
@@ -953,6 +1006,12 @@
 
 	return 0;
 fail:
+	if (sbi->s_proc) {
+		remove_proc_entry("segment_info", sbi->s_proc);
+		remove_proc_entry(sb->s_id, f2fs_proc_root);
+	}
+	f2fs_destroy_stats(sbi);
+free_gc:
 	stop_gc_thread(sbi);
 free_root_inode:
 	dput(sb->s_root);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 1ac8a5f..aa7a3f1 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -154,6 +154,9 @@
 }
 
 #ifdef CONFIG_F2FS_FS_SECURITY
+static int __f2fs_setxattr(struct inode *inode, int name_index,
+			const char *name, const void *value, size_t value_len,
+			struct page *ipage);
 static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
 		void *page)
 {
@@ -161,7 +164,7 @@
 	int err = 0;
 
 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-		err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
+		err = __f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
 				xattr->name, xattr->value,
 				xattr->value_len, (struct page *)page);
 		if (err < 0)
@@ -369,7 +372,7 @@
 			alloc_nid_failed(sbi, new_nid);
 			return PTR_ERR(xpage);
 		}
-		BUG_ON(new_nid);
+		f2fs_bug_on(new_nid);
 	} else {
 		struct dnode_of_data dn;
 		set_new_dnode(&dn, inode, NULL, NULL, new_nid);
@@ -469,16 +472,15 @@
 	return error;
 }
 
-int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
-			const void *value, size_t value_len, struct page *ipage)
+static int __f2fs_setxattr(struct inode *inode, int name_index,
+			const char *name, const void *value, size_t value_len,
+			struct page *ipage)
 {
-	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	struct f2fs_xattr_entry *here, *last;
 	void *base_addr;
 	int found, newsize;
 	size_t name_len;
-	int ilock;
 	__u32 new_hsize;
 	int error = -ENOMEM;
 
@@ -493,10 +495,6 @@
 	if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN(inode))
 		return -ERANGE;
 
-	f2fs_balance_fs(sbi);
-
-	ilock = mutex_lock_op(sbi);
-
 	base_addr = read_all_xattrs(inode, ipage);
 	if (!base_addr)
 		goto exit;
@@ -522,7 +520,7 @@
 		 */
 		free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
 		if (found)
-			free = free - ENTRY_SIZE(here);
+			free = free + ENTRY_SIZE(here);
 
 		if (free < newsize) {
 			error = -ENOSPC;
@@ -578,7 +576,21 @@
 	else
 		update_inode_page(inode);
 exit:
-	mutex_unlock_op(sbi, ilock);
 	kzfree(base_addr);
 	return error;
 }
+
+int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
+			const void *value, size_t value_len, struct page *ipage)
+{
+	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+	int err;
+
+	f2fs_balance_fs(sbi);
+
+	f2fs_lock_op(sbi);
+	err = __f2fs_setxattr(inode, name_index, name, value, value_len, ipage);
+	f2fs_unlock_op(sbi);
+
+	return err;
+}
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 4241e6f..7c31f4b 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -102,6 +102,7 @@
 	struct hlist_head dir_hashtable[FAT_HASH_SIZE];
 
 	unsigned int dirty;           /* fs state before mount */
+	struct rcu_head rcu;
 };
 
 #define FAT_CACHE_VALID	0	/* special case for valid cache */
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0062da2..854b578 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -548,6 +548,16 @@
 	brelse(bh);
 }
 
+static void delayed_free(struct rcu_head *p)
+{
+	struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
+	unload_nls(sbi->nls_disk);
+	unload_nls(sbi->nls_io);
+	if (sbi->options.iocharset != fat_default_iocharset)
+		kfree(sbi->options.iocharset);
+	kfree(sbi);
+}
+
 static void fat_put_super(struct super_block *sb)
 {
 	struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -557,14 +567,7 @@
 	iput(sbi->fsinfo_inode);
 	iput(sbi->fat_inode);
 
-	unload_nls(sbi->nls_disk);
-	unload_nls(sbi->nls_io);
-
-	if (sbi->options.iocharset != fat_default_iocharset)
-		kfree(sbi->options.iocharset);
-
-	sb->s_fs_info = NULL;
-	kfree(sbi);
+	call_rcu(&sbi->rcu, delayed_free);
 }
 
 static struct kmem_cache *fat_inode_cachep;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 65343c3..ef68665 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -56,7 +56,7 @@
 				return -EINVAL;
 	}
 
-	if (filp->f_op && filp->f_op->check_flags)
+	if (filp->f_op->check_flags)
 		error = filp->f_op->check_flags(arg);
 	if (error)
 		return error;
@@ -64,8 +64,7 @@
 	/*
 	 * ->fasync() is responsible for setting the FASYNC bit.
 	 */
-	if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op &&
-			filp->f_op->fasync) {
+	if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
 		error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
 		if (error < 0)
 			goto out;
diff --git a/fs/file_table.c b/fs/file_table.c
index e900ca5..5fff903 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -36,8 +36,6 @@
 	.max_files = NR_FILE
 };
 
-DEFINE_STATIC_LGLOCK(files_lglock);
-
 /* SLAB cache for file structures */
 static struct kmem_cache *filp_cachep __read_mostly;
 
@@ -134,7 +132,6 @@
 		return ERR_PTR(error);
 	}
 
-	INIT_LIST_HEAD(&f->f_u.fu_list);
 	atomic_long_set(&f->f_count, 1);
 	rwlock_init(&f->f_owner.lock);
 	spin_lock_init(&f->f_lock);
@@ -240,11 +237,11 @@
 	locks_remove_flock(file);
 
 	if (unlikely(file->f_flags & FASYNC)) {
-		if (file->f_op && file->f_op->fasync)
+		if (file->f_op->fasync)
 			file->f_op->fasync(-1, file, 0);
 	}
 	ima_file_free(file);
-	if (file->f_op && file->f_op->release)
+	if (file->f_op->release)
 		file->f_op->release(inode, file);
 	security_file_free(file);
 	if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
@@ -304,7 +301,6 @@
 	if (atomic_long_dec_and_test(&file->f_count)) {
 		struct task_struct *task = current;
 
-		file_sb_list_del(file);
 		if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
 			init_task_work(&file->f_u.fu_rcuhead, ____fput);
 			if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
@@ -333,7 +329,6 @@
 {
 	if (atomic_long_dec_and_test(&file->f_count)) {
 		struct task_struct *task = current;
-		file_sb_list_del(file);
 		BUG_ON(!(task->flags & PF_KTHREAD));
 		__fput(file);
 	}
@@ -345,129 +340,10 @@
 {
 	if (atomic_long_dec_and_test(&file->f_count)) {
 		security_file_free(file);
-		file_sb_list_del(file);
 		file_free(file);
 	}
 }
 
-static inline int file_list_cpu(struct file *file)
-{
-#ifdef CONFIG_SMP
-	return file->f_sb_list_cpu;
-#else
-	return smp_processor_id();
-#endif
-}
-
-/* helper for file_sb_list_add to reduce ifdefs */
-static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
-{
-	struct list_head *list;
-#ifdef CONFIG_SMP
-	int cpu;
-	cpu = smp_processor_id();
-	file->f_sb_list_cpu = cpu;
-	list = per_cpu_ptr(sb->s_files, cpu);
-#else
-	list = &sb->s_files;
-#endif
-	list_add(&file->f_u.fu_list, list);
-}
-
-/**
- * file_sb_list_add - add a file to the sb's file list
- * @file: file to add
- * @sb: sb to add it to
- *
- * Use this function to associate a file with the superblock of the inode it
- * refers to.
- */
-void file_sb_list_add(struct file *file, struct super_block *sb)
-{
-	if (likely(!(file->f_mode & FMODE_WRITE)))
-		return;
-	if (!S_ISREG(file_inode(file)->i_mode))
-		return;
-	lg_local_lock(&files_lglock);
-	__file_sb_list_add(file, sb);
-	lg_local_unlock(&files_lglock);
-}
-
-/**
- * file_sb_list_del - remove a file from the sb's file list
- * @file: file to remove
- * @sb: sb to remove it from
- *
- * Use this function to remove a file from its superblock.
- */
-void file_sb_list_del(struct file *file)
-{
-	if (!list_empty(&file->f_u.fu_list)) {
-		lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
-		list_del_init(&file->f_u.fu_list);
-		lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
-	}
-}
-
-#ifdef CONFIG_SMP
-
-/*
- * These macros iterate all files on all CPUs for a given superblock.
- * files_lglock must be held globally.
- */
-#define do_file_list_for_each_entry(__sb, __file)		\
-{								\
-	int i;							\
-	for_each_possible_cpu(i) {				\
-		struct list_head *list;				\
-		list = per_cpu_ptr((__sb)->s_files, i);		\
-		list_for_each_entry((__file), list, f_u.fu_list)
-
-#define while_file_list_for_each_entry				\
-	}							\
-}
-
-#else
-
-#define do_file_list_for_each_entry(__sb, __file)		\
-{								\
-	struct list_head *list;					\
-	list = &(sb)->s_files;					\
-	list_for_each_entry((__file), list, f_u.fu_list)
-
-#define while_file_list_for_each_entry				\
-}
-
-#endif
-
-/**
- *	mark_files_ro - mark all files read-only
- *	@sb: superblock in question
- *
- *	All files are marked read-only.  We don't care about pending
- *	delete files so this should be used in 'force' mode only.
- */
-void mark_files_ro(struct super_block *sb)
-{
-	struct file *f;
-
-	lg_global_lock(&files_lglock);
-	do_file_list_for_each_entry(sb, f) {
-		if (!file_count(f))
-			continue;
-		if (!(f->f_mode & FMODE_WRITE))
-			continue;
-		spin_lock(&f->f_lock);
-		f->f_mode &= ~FMODE_WRITE;
-		spin_unlock(&f->f_lock);
-		if (file_check_writeable(f) != 0)
-			continue;
-		__mnt_drop_write(f->f_path.mnt);
-		file_release_write(f);
-	} while_file_list_for_each_entry;
-	lg_global_unlock(&files_lglock);
-}
-
 void __init files_init(unsigned long mempages)
 { 
 	unsigned long n;
@@ -483,6 +359,5 @@
 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
 	files_defer_init();
-	lg_lock_init(&files_lglock, "files_lglock");
 	percpu_counter_init(&nr_files, 0);
 } 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 9f4935b..1f4a10e 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -26,6 +26,7 @@
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
 #include <linux/tracepoint.h>
+#include <linux/device.h>
 #include "internal.h"
 
 /*
@@ -39,13 +40,18 @@
 struct wb_writeback_work {
 	long nr_pages;
 	struct super_block *sb;
-	unsigned long *older_than_this;
+	/*
+	 * Write only inodes dirtied before this time. Don't forget to set
+	 * older_than_this_is_set when you set this.
+	 */
+	unsigned long older_than_this;
 	enum writeback_sync_modes sync_mode;
 	unsigned int tagged_writepages:1;
 	unsigned int for_kupdate:1;
 	unsigned int range_cyclic:1;
 	unsigned int for_background:1;
 	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
+	unsigned int older_than_this_is_set:1;
 	enum wb_reason reason;		/* why was writeback initiated? */
 
 	struct list_head list;		/* pending work list */
@@ -246,10 +252,10 @@
 	int do_sb_sort = 0;
 	int moved = 0;
 
+	WARN_ON_ONCE(!work->older_than_this_is_set);
 	while (!list_empty(delaying_queue)) {
 		inode = wb_inode(delaying_queue->prev);
-		if (work->older_than_this &&
-		    inode_dirtied_after(inode, *work->older_than_this))
+		if (inode_dirtied_after(inode, work->older_than_this))
 			break;
 		list_move(&inode->i_wb_list, &tmp);
 		moved++;
@@ -733,6 +739,8 @@
 		.sync_mode	= WB_SYNC_NONE,
 		.range_cyclic	= 1,
 		.reason		= reason,
+		.older_than_this = jiffies,
+		.older_than_this_is_set = 1,
 	};
 
 	spin_lock(&wb->list_lock);
@@ -791,12 +799,13 @@
 {
 	unsigned long wb_start = jiffies;
 	long nr_pages = work->nr_pages;
-	unsigned long oldest_jif;
 	struct inode *inode;
 	long progress;
 
-	oldest_jif = jiffies;
-	work->older_than_this = &oldest_jif;
+	if (!work->older_than_this_is_set) {
+		work->older_than_this = jiffies;
+		work->older_than_this_is_set = 1;
+	}
 
 	spin_lock(&wb->list_lock);
 	for (;;) {
@@ -830,10 +839,10 @@
 		 * safe.
 		 */
 		if (work->for_kupdate) {
-			oldest_jif = jiffies -
+			work->older_than_this = jiffies -
 				msecs_to_jiffies(dirty_expire_interval * 10);
 		} else if (work->for_background)
-			oldest_jif = jiffies;
+			work->older_than_this = jiffies;
 
 		trace_writeback_start(wb->bdi, work);
 		if (list_empty(&wb->b_io))
@@ -1345,18 +1354,21 @@
 
 /**
  * sync_inodes_sb	-	sync sb inode pages
- * @sb: the superblock
+ * @sb:			the superblock
+ * @older_than_this:	timestamp
  *
  * This function writes and waits on any dirty inode belonging to this
- * super_block.
+ * superblock that has been dirtied before given timestamp.
  */
-void sync_inodes_sb(struct super_block *sb)
+void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct wb_writeback_work work = {
 		.sb		= sb,
 		.sync_mode	= WB_SYNC_ALL,
 		.nr_pages	= LONG_MAX,
+		.older_than_this = older_than_this,
+		.older_than_this_is_set = 1,
 		.range_cyclic	= 0,
 		.done		= &done,
 		.reason		= WB_REASON_SYNC,
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index d8ac61d..7dca743 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -161,6 +161,6 @@
 struct fs_struct init_fs = {
 	.users		= 1,
 	.lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock),
-	.seq		= SEQCNT_ZERO,
+	.seq		= SEQCNT_ZERO(init_fs.seq),
 	.umask		= 0022,
 };
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index dcb8216..53d35c5 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -799,7 +799,7 @@
  */
 bool fscache_object_sleep_till_congested(signed long *timeoutp)
 {
-	wait_queue_head_t *cong_wq = &__get_cpu_var(fscache_object_cong_wait);
+	wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
 	DEFINE_WAIT(wait);
 
 	if (fscache_object_congested())
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index adbfd66..b96a49b 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -473,7 +473,7 @@
 static void cuse_fc_release(struct fuse_conn *fc)
 {
 	struct cuse_conn *cc = fc_to_cc(fc);
-	kfree(cc);
+	kfree_rcu(cc, fc.rcu);
 }
 
 /**
@@ -589,11 +589,14 @@
 ATTRIBUTE_GROUPS(cuse_class_dev);
 
 static struct miscdevice cuse_miscdev = {
-	.minor		= MISC_DYNAMIC_MINOR,
+	.minor		= CUSE_MINOR,
 	.name		= "cuse",
 	.fops		= &cuse_channel_fops,
 };
 
+MODULE_ALIAS_MISCDEV(CUSE_MINOR);
+MODULE_ALIAS("devname:cuse");
+
 static int __init cuse_init(void)
 {
 	int i, rc;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b7989f2..c3eb2c4 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -342,24 +342,6 @@
 	return err;
 }
 
-static struct dentry *fuse_materialise_dentry(struct dentry *dentry,
-					      struct inode *inode)
-{
-	struct dentry *newent;
-
-	if (inode && S_ISDIR(inode->i_mode)) {
-		struct fuse_conn *fc = get_fuse_conn(inode);
-
-		mutex_lock(&fc->inst_mutex);
-		newent = d_materialise_unique(dentry, inode);
-		mutex_unlock(&fc->inst_mutex);
-	} else {
-		newent = d_materialise_unique(dentry, inode);
-	}
-
-	return newent;
-}
-
 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
 				  unsigned int flags)
 {
@@ -382,7 +364,7 @@
 	if (inode && get_node_id(inode) == FUSE_ROOT_ID)
 		goto out_iput;
 
-	newent = fuse_materialise_dentry(entry, inode);
+	newent = d_materialise_unique(entry, inode);
 	err = PTR_ERR(newent);
 	if (IS_ERR(newent))
 		goto out_err;
@@ -601,21 +583,9 @@
 	}
 	kfree(forget);
 
-	if (S_ISDIR(inode->i_mode)) {
-		struct dentry *alias;
-		mutex_lock(&fc->inst_mutex);
-		alias = d_find_alias(inode);
-		if (alias) {
-			/* New directory must have moved since mkdir */
-			mutex_unlock(&fc->inst_mutex);
-			dput(alias);
-			iput(inode);
-			return -EBUSY;
-		}
-		d_instantiate(entry, inode);
-		mutex_unlock(&fc->inst_mutex);
-	} else
-		d_instantiate(entry, inode);
+	err = d_instantiate_no_diralias(entry, inode);
+	if (err)
+		return err;
 
 	fuse_change_entry_timeout(entry, &outarg);
 	fuse_invalidate_attr(dir);
@@ -1284,7 +1254,7 @@
 	if (!inode)
 		goto out;
 
-	alias = fuse_materialise_dentry(dentry, inode);
+	alias = d_materialise_unique(dentry, inode);
 	err = PTR_ERR(alias);
 	if (IS_ERR(alias))
 		goto out;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4598345..7e70506 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -334,7 +334,8 @@
 
 		BUG_ON(req->inode != inode);
 		curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
-		if (curr_index == index) {
+		if (curr_index <= index &&
+		    index < curr_index + req->num_pages) {
 			found = true;
 			break;
 		}
@@ -1409,8 +1410,13 @@
 
 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
 {
-	__free_page(req->pages[0]);
-	fuse_file_put(req->ff, false);
+	int i;
+
+	for (i = 0; i < req->num_pages; i++)
+		__free_page(req->pages[i]);
+
+	if (req->ff)
+		fuse_file_put(req->ff, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@@ -1418,30 +1424,34 @@
 	struct inode *inode = req->inode;
 	struct fuse_inode *fi = get_fuse_inode(inode);
 	struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+	int i;
 
 	list_del(&req->writepages_entry);
-	dec_bdi_stat(bdi, BDI_WRITEBACK);
-	dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
-	bdi_writeout_inc(bdi);
+	for (i = 0; i < req->num_pages; i++) {
+		dec_bdi_stat(bdi, BDI_WRITEBACK);
+		dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
+		bdi_writeout_inc(bdi);
+	}
 	wake_up(&fi->page_waitq);
 }
 
 /* Called under fc->lock, may release and reacquire it */
-static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
+				loff_t size)
 __releases(fc->lock)
 __acquires(fc->lock)
 {
 	struct fuse_inode *fi = get_fuse_inode(req->inode);
-	loff_t size = i_size_read(req->inode);
 	struct fuse_write_in *inarg = &req->misc.write.in;
+	__u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
 
 	if (!fc->connected)
 		goto out_free;
 
-	if (inarg->offset + PAGE_CACHE_SIZE <= size) {
-		inarg->size = PAGE_CACHE_SIZE;
+	if (inarg->offset + data_size <= size) {
+		inarg->size = data_size;
 	} else if (inarg->offset < size) {
-		inarg->size = size & (PAGE_CACHE_SIZE - 1);
+		inarg->size = size - inarg->offset;
 	} else {
 		/* Got truncated off completely */
 		goto out_free;
@@ -1472,12 +1482,13 @@
 {
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	struct fuse_inode *fi = get_fuse_inode(inode);
+	size_t crop = i_size_read(inode);
 	struct fuse_req *req;
 
 	while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
 		req = list_entry(fi->queued_writes.next, struct fuse_req, list);
 		list_del_init(&req->list);
-		fuse_send_writepage(fc, req);
+		fuse_send_writepage(fc, req, crop);
 	}
 }
 
@@ -1488,12 +1499,62 @@
 
 	mapping_set_error(inode->i_mapping, req->out.h.error);
 	spin_lock(&fc->lock);
+	while (req->misc.write.next) {
+		struct fuse_conn *fc = get_fuse_conn(inode);
+		struct fuse_write_in *inarg = &req->misc.write.in;
+		struct fuse_req *next = req->misc.write.next;
+		req->misc.write.next = next->misc.write.next;
+		next->misc.write.next = NULL;
+		next->ff = fuse_file_get(req->ff);
+		list_add(&next->writepages_entry, &fi->writepages);
+
+		/*
+		 * Skip fuse_flush_writepages() to make it easy to crop requests
+		 * based on primary request size.
+		 *
+		 * 1st case (trivial): there are no concurrent activities using
+		 * fuse_set/release_nowrite.  Then we're on safe side because
+		 * fuse_flush_writepages() would call fuse_send_writepage()
+		 * anyway.
+		 *
+		 * 2nd case: someone called fuse_set_nowrite and it is waiting
+		 * now for completion of all in-flight requests.  This happens
+		 * rarely and no more than once per page, so this should be
+		 * okay.
+		 *
+		 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
+		 * of fuse_set_nowrite..fuse_release_nowrite section.  The fact
+		 * that fuse_set_nowrite returned implies that all in-flight
+		 * requests were completed along with all of their secondary
+		 * requests.  Further primary requests are blocked by negative
+		 * writectr.  Hence there cannot be any in-flight requests and
+		 * no invocations of fuse_writepage_end() while we're in
+		 * fuse_set_nowrite..fuse_release_nowrite section.
+		 */
+		fuse_send_writepage(fc, next, inarg->offset + inarg->size);
+	}
 	fi->writectr--;
 	fuse_writepage_finish(fc, req);
 	spin_unlock(&fc->lock);
 	fuse_writepage_free(fc, req);
 }
 
+static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
+					     struct fuse_inode *fi)
+{
+	struct fuse_file *ff = NULL;
+
+	spin_lock(&fc->lock);
+	if (!WARN_ON(list_empty(&fi->write_files))) {
+		ff = list_entry(fi->write_files.next, struct fuse_file,
+				write_entry);
+		fuse_file_get(ff);
+	}
+	spin_unlock(&fc->lock);
+
+	return ff;
+}
+
 static int fuse_writepage_locked(struct page *page)
 {
 	struct address_space *mapping = page->mapping;
@@ -1501,8 +1562,8 @@
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	struct fuse_inode *fi = get_fuse_inode(inode);
 	struct fuse_req *req;
-	struct fuse_file *ff;
 	struct page *tmp_page;
+	int error = -ENOMEM;
 
 	set_page_writeback(page);
 
@@ -1515,16 +1576,16 @@
 	if (!tmp_page)
 		goto err_free;
 
-	spin_lock(&fc->lock);
-	BUG_ON(list_empty(&fi->write_files));
-	ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
-	req->ff = fuse_file_get(ff);
-	spin_unlock(&fc->lock);
+	error = -EIO;
+	req->ff = fuse_write_file_get(fc, fi);
+	if (!req->ff)
+		goto err_free;
 
-	fuse_write_fill(req, ff, page_offset(page), 0);
+	fuse_write_fill(req, req->ff, page_offset(page), 0);
 
 	copy_highpage(tmp_page, page);
 	req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
+	req->misc.write.next = NULL;
 	req->in.argpages = 1;
 	req->num_pages = 1;
 	req->pages[0] = tmp_page;
@@ -1550,19 +1611,263 @@
 	fuse_request_free(req);
 err:
 	end_page_writeback(page);
-	return -ENOMEM;
+	return error;
 }
 
 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
 {
 	int err;
 
+	if (fuse_page_is_writeback(page->mapping->host, page->index)) {
+		/*
+		 * ->writepages() should be called for sync() and friends.  We
+		 * should only get here on direct reclaim and then we are
+		 * allowed to skip a page which is already in flight
+		 */
+		WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
+
+		redirty_page_for_writepage(wbc, page);
+		return 0;
+	}
+
 	err = fuse_writepage_locked(page);
 	unlock_page(page);
 
 	return err;
 }
 
+struct fuse_fill_wb_data {
+	struct fuse_req *req;
+	struct fuse_file *ff;
+	struct inode *inode;
+	struct page **orig_pages;
+};
+
+static void fuse_writepages_send(struct fuse_fill_wb_data *data)
+{
+	struct fuse_req *req = data->req;
+	struct inode *inode = data->inode;
+	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_inode *fi = get_fuse_inode(inode);
+	int num_pages = req->num_pages;
+	int i;
+
+	req->ff = fuse_file_get(data->ff);
+	spin_lock(&fc->lock);
+	list_add_tail(&req->list, &fi->queued_writes);
+	fuse_flush_writepages(inode);
+	spin_unlock(&fc->lock);
+
+	for (i = 0; i < num_pages; i++)
+		end_page_writeback(data->orig_pages[i]);
+}
+
+static bool fuse_writepage_in_flight(struct fuse_req *new_req,
+				     struct page *page)
+{
+	struct fuse_conn *fc = get_fuse_conn(new_req->inode);
+	struct fuse_inode *fi = get_fuse_inode(new_req->inode);
+	struct fuse_req *tmp;
+	struct fuse_req *old_req;
+	bool found = false;
+	pgoff_t curr_index;
+
+	BUG_ON(new_req->num_pages != 0);
+
+	spin_lock(&fc->lock);
+	list_del(&new_req->writepages_entry);
+	list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
+		BUG_ON(old_req->inode != new_req->inode);
+		curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+		if (curr_index <= page->index &&
+		    page->index < curr_index + old_req->num_pages) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) {
+		list_add(&new_req->writepages_entry, &fi->writepages);
+		goto out_unlock;
+	}
+
+	new_req->num_pages = 1;
+	for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
+		BUG_ON(tmp->inode != new_req->inode);
+		curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+		if (tmp->num_pages == 1 &&
+		    curr_index == page->index) {
+			old_req = tmp;
+		}
+	}
+
+	if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
+					old_req->state == FUSE_REQ_PENDING)) {
+		struct backing_dev_info *bdi = page->mapping->backing_dev_info;
+
+		copy_highpage(old_req->pages[0], page);
+		spin_unlock(&fc->lock);
+
+		dec_bdi_stat(bdi, BDI_WRITEBACK);
+		dec_zone_page_state(page, NR_WRITEBACK_TEMP);
+		bdi_writeout_inc(bdi);
+		fuse_writepage_free(fc, new_req);
+		fuse_request_free(new_req);
+		goto out;
+	} else {
+		new_req->misc.write.next = old_req->misc.write.next;
+		old_req->misc.write.next = new_req;
+	}
+out_unlock:
+	spin_unlock(&fc->lock);
+out:
+	return found;
+}
+
+static int fuse_writepages_fill(struct page *page,
+		struct writeback_control *wbc, void *_data)
+{
+	struct fuse_fill_wb_data *data = _data;
+	struct fuse_req *req = data->req;
+	struct inode *inode = data->inode;
+	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct page *tmp_page;
+	bool is_writeback;
+	int err;
+
+	if (!data->ff) {
+		err = -EIO;
+		data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+		if (!data->ff)
+			goto out_unlock;
+	}
+
+	/*
+	 * Being under writeback is unlikely but possible.  For example direct
+	 * read to an mmaped fuse file will set the page dirty twice; once when
+	 * the pages are faulted with get_user_pages(), and then after the read
+	 * completed.
+	 */
+	is_writeback = fuse_page_is_writeback(inode, page->index);
+
+	if (req && req->num_pages &&
+	    (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
+	     (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
+	     data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
+		fuse_writepages_send(data);
+		data->req = NULL;
+	}
+	err = -ENOMEM;
+	tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+	if (!tmp_page)
+		goto out_unlock;
+
+	/*
+	 * The page must not be redirtied until the writeout is completed
+	 * (i.e. userspace has sent a reply to the write request).  Otherwise
+	 * there could be more than one temporary page instance for each real
+	 * page.
+	 *
+	 * This is ensured by holding the page lock in page_mkwrite() while
+	 * checking fuse_page_is_writeback().  We already hold the page lock
+	 * since clear_page_dirty_for_io() and keep it held until we add the
+	 * request to the fi->writepages list and increment req->num_pages.
+	 * After this fuse_page_is_writeback() will indicate that the page is
+	 * under writeback, so we can release the page lock.
+	 */
+	if (data->req == NULL) {
+		struct fuse_inode *fi = get_fuse_inode(inode);
+
+		err = -ENOMEM;
+		req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
+		if (!req) {
+			__free_page(tmp_page);
+			goto out_unlock;
+		}
+
+		fuse_write_fill(req, data->ff, page_offset(page), 0);
+		req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
+		req->misc.write.next = NULL;
+		req->in.argpages = 1;
+		req->background = 1;
+		req->num_pages = 0;
+		req->end = fuse_writepage_end;
+		req->inode = inode;
+
+		spin_lock(&fc->lock);
+		list_add(&req->writepages_entry, &fi->writepages);
+		spin_unlock(&fc->lock);
+
+		data->req = req;
+	}
+	set_page_writeback(page);
+
+	copy_highpage(tmp_page, page);
+	req->pages[req->num_pages] = tmp_page;
+	req->page_descs[req->num_pages].offset = 0;
+	req->page_descs[req->num_pages].length = PAGE_SIZE;
+
+	inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
+	inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+
+	err = 0;
+	if (is_writeback && fuse_writepage_in_flight(req, page)) {
+		end_page_writeback(page);
+		data->req = NULL;
+		goto out_unlock;
+	}
+	data->orig_pages[req->num_pages] = page;
+
+	/*
+	 * Protected by fc->lock against concurrent access by
+	 * fuse_page_is_writeback().
+	 */
+	spin_lock(&fc->lock);
+	req->num_pages++;
+	spin_unlock(&fc->lock);
+
+out_unlock:
+	unlock_page(page);
+
+	return err;
+}
+
+static int fuse_writepages(struct address_space *mapping,
+			   struct writeback_control *wbc)
+{
+	struct inode *inode = mapping->host;
+	struct fuse_fill_wb_data data;
+	int err;
+
+	err = -EIO;
+	if (is_bad_inode(inode))
+		goto out;
+
+	data.inode = inode;
+	data.req = NULL;
+	data.ff = NULL;
+
+	err = -ENOMEM;
+	data.orig_pages = kzalloc(sizeof(struct page *) *
+				  FUSE_MAX_PAGES_PER_REQ,
+				  GFP_NOFS);
+	if (!data.orig_pages)
+		goto out;
+
+	err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
+	if (data.req) {
+		/* Ignore errors if we can write at least one page */
+		BUG_ON(!data.req->num_pages);
+		fuse_writepages_send(&data);
+		err = 0;
+	}
+	if (data.ff)
+		fuse_file_put(data.ff, false);
+
+	kfree(data.orig_pages);
+out:
+	return err;
+}
+
 static int fuse_launder_page(struct page *page)
 {
 	int err = 0;
@@ -1602,14 +1907,17 @@
 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct page *page = vmf->page;
-	/*
-	 * Don't use page->mapping as it may become NULL from a
-	 * concurrent truncate.
-	 */
-	struct inode *inode = vma->vm_file->f_mapping->host;
+	struct inode *inode = file_inode(vma->vm_file);
+
+	file_update_time(vma->vm_file);
+	lock_page(page);
+	if (page->mapping != inode->i_mapping) {
+		unlock_page(page);
+		return VM_FAULT_NOPAGE;
+	}
 
 	fuse_wait_on_page_writeback(inode, page->index);
-	return 0;
+	return VM_FAULT_LOCKED;
 }
 
 static const struct vm_operations_struct fuse_file_vm_ops = {
@@ -2581,6 +2889,7 @@
 static const struct address_space_operations fuse_file_aops  = {
 	.readpage	= fuse_readpage,
 	.writepage	= fuse_writepage,
+	.writepages	= fuse_writepages,
 	.launder_page	= fuse_launder_page,
 	.readpages	= fuse_readpages,
 	.set_page_dirty	= __set_page_dirty_nobuffers,
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5b9e6f3..7d27309 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -321,6 +321,7 @@
 		struct {
 			struct fuse_write_in in;
 			struct fuse_write_out out;
+			struct fuse_req *next;
 		} write;
 		struct fuse_notify_retrieve_in retrieve_in;
 		struct fuse_lk_in lk_in;
@@ -374,12 +375,11 @@
 	/** Lock protecting accessess to  members of this structure */
 	spinlock_t lock;
 
-	/** Mutex protecting against directory alias creation */
-	struct mutex inst_mutex;
-
 	/** Refcount */
 	atomic_t count;
 
+	struct rcu_head rcu;
+
 	/** The user id for this mount */
 	kuid_t user_id;
 
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index a8ce6da..d468643 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -565,7 +565,6 @@
 {
 	memset(fc, 0, sizeof(*fc));
 	spin_lock_init(&fc->lock);
-	mutex_init(&fc->inst_mutex);
 	init_rwsem(&fc->killsb);
 	atomic_set(&fc->count, 1);
 	init_waitqueue_head(&fc->waitq);
@@ -596,7 +595,6 @@
 	if (atomic_dec_and_test(&fc->count)) {
 		if (fc->destroy_req)
 			fuse_request_free(fc->destroy_req);
-		mutex_destroy(&fc->inst_mutex);
 		fc->release(fc);
 	}
 }
@@ -920,7 +918,7 @@
 
 static void fuse_free_conn(struct fuse_conn *fc)
 {
-	kfree(fc);
+	kfree_rcu(fc, rcu);
 }
 
 static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 109ce93..1615df1 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1514,13 +1514,6 @@
 	return NULL;
 }
 
-static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
-{
-	char *s = nd_get_link(nd);
-	if (!IS_ERR(s))
-		kfree(s);
-}
-
 /**
  * gfs2_permission -
  * @inode: The inode
@@ -1872,7 +1865,7 @@
 const struct inode_operations gfs2_symlink_iops = {
 	.readlink = generic_readlink,
 	.follow_link = gfs2_follow_link,
-	.put_link = gfs2_put_link,
+	.put_link = kfree_put_link,
 	.permission = gfs2_permission,
 	.setattr = gfs2_setattr,
 	.getattr = gfs2_getattr,
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index 2a1d712..f6bd266 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -153,11 +153,6 @@
 	u32 reserved3[16];
 } __packed;
 
-#define HFS_NODE_INDEX	0x00	/* An internal (index) node */
-#define HFS_NODE_HEADER	0x01	/* The tree header node (node 0) */
-#define HFS_NODE_MAP		0x02	/* Holds part of the bitmap of used nodes */
-#define HFS_NODE_LEAF		0xFF	/* A leaf (ndNHeight==1) node */
-
 #define BTREE_ATTR_BADCLOSE	0x00000001	/* b-tree not closed properly. not
 						   used by hfsplus. */
 #define HFS_TREE_BIGKEYS	0x00000002	/* key length is u16 instead of u8.
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 0c6540c..0fcec8b 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -15,6 +15,118 @@
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
 
+/*
+ * Initial source code of clump size calculation is gotten
+ * from http://opensource.apple.com/tarballs/diskdev_cmds/
+ */
+#define CLUMP_ENTRIES	15
+
+static short clumptbl[CLUMP_ENTRIES * 3] = {
+/*
+ *	    Volume	Attributes	 Catalog	 Extents
+ *	     Size	Clump (MB)	Clump (MB)	Clump (MB)
+ */
+	/*   1GB */	  4,		  4,		 4,
+	/*   2GB */	  6,		  6,		 4,
+	/*   4GB */	  8,		  8,		 4,
+	/*   8GB */	 11,		 11,		 5,
+	/*
+	 * For volumes 16GB and larger, we want to make sure that a full OS
+	 * install won't require fragmentation of the Catalog or Attributes
+	 * B-trees.  We do this by making the clump sizes sufficiently large,
+	 * and by leaving a gap after the B-trees for them to grow into.
+	 *
+	 * For SnowLeopard 10A298, a FullNetInstall with all packages selected
+	 * results in:
+	 * Catalog B-tree Header
+	 *	nodeSize:          8192
+	 *	totalNodes:       31616
+	 *	freeNodes:         1978
+	 * (used = 231.55 MB)
+	 * Attributes B-tree Header
+	 *	nodeSize:          8192
+	 *	totalNodes:       63232
+	 *	freeNodes:          958
+	 * (used = 486.52 MB)
+	 *
+	 * We also want Time Machine backup volumes to have a sufficiently
+	 * large clump size to reduce fragmentation.
+	 *
+	 * The series of numbers for Catalog and Attribute form a geometric
+	 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
+	 * the previous term.  For Attributes (16GB to 512GB), each term is
+	 * 4**(1/5) times the previous term.  For 1TB to 16TB, each term is
+	 * 2**(1/5) times the previous term.
+	 */
+	/*  16GB */	 64,		 32,		 5,
+	/*  32GB */	 84,		 49,		 6,
+	/*  64GB */	111,		 74,		 7,
+	/* 128GB */	147,		111,		 8,
+	/* 256GB */	194,		169,		 9,
+	/* 512GB */	256,		256,		11,
+	/*   1TB */	294,		294,		14,
+	/*   2TB */	338,		338,		16,
+	/*   4TB */	388,		388,		20,
+	/*   8TB */	446,		446,		25,
+	/*  16TB */	512,		512,		32
+};
+
+u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
+					u64 sectors, int file_id)
+{
+	u32 mod = max(node_size, block_size);
+	u32 clump_size;
+	int column;
+	int i;
+
+	/* Figure out which column of the above table to use for this file. */
+	switch (file_id) {
+	case HFSPLUS_ATTR_CNID:
+		column = 0;
+		break;
+	case HFSPLUS_CAT_CNID:
+		column = 1;
+		break;
+	default:
+		column = 2;
+		break;
+	}
+
+	/*
+	 * The default clump size is 0.8% of the volume size. And
+	 * it must also be a multiple of the node and block size.
+	 */
+	if (sectors < 0x200000) {
+		clump_size = sectors << 2;	/*  0.8 %  */
+		if (clump_size < (8 * node_size))
+			clump_size = 8 * node_size;
+	} else {
+		/* turn exponent into table index... */
+		for (i = 0, sectors = sectors >> 22;
+		     sectors && (i < CLUMP_ENTRIES - 1);
+		     ++i, sectors = sectors >> 1) {
+			/* empty body */
+		}
+
+		clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
+	}
+
+	/*
+	 * Round the clump size to a multiple of node and block size.
+	 * NOTE: This rounds down.
+	 */
+	clump_size /= mod;
+	clump_size *= mod;
+
+	/*
+	 * Rounding down could have rounded down to 0 if the block size was
+	 * greater than the clump size.  If so, just use one block or node.
+	 */
+	if (clump_size == 0)
+		clump_size = mod;
+
+	return clump_size;
+}
 
 /* Get a reference to a B*Tree and do some initial checks */
 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 2b9cd016..08846425b 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -127,6 +127,14 @@
 #define HFS_BNODE_DELETED	4
 
 /*
+ * Attributes file states
+ */
+#define HFSPLUS_EMPTY_ATTR_TREE		0
+#define HFSPLUS_CREATING_ATTR_TREE	1
+#define HFSPLUS_VALID_ATTR_TREE		2
+#define HFSPLUS_FAILED_ATTR_TREE	3
+
+/*
  * HFS+ superblock info (built from Volume Header on disk)
  */
 
@@ -141,6 +149,7 @@
 	struct hfs_btree *ext_tree;
 	struct hfs_btree *cat_tree;
 	struct hfs_btree *attr_tree;
+	atomic_t attr_tree_state;
 	struct inode *alloc_file;
 	struct inode *hidden_dir;
 	struct nls_table *nls;
@@ -380,6 +389,7 @@
 int hfsplus_block_free(struct super_block *, u32, u32);
 
 /* btree.c */
+u32 hfsplus_calc_btree_clump_size(u32, u32, u64, int);
 struct hfs_btree *hfs_btree_open(struct super_block *, u32);
 void hfs_btree_close(struct hfs_btree *);
 int hfs_btree_write(struct hfs_btree *);
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 452ede0..8ffb3a8 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -156,10 +156,10 @@
 } __packed;
 
 /* HFS+ BTree node types */
-#define HFS_NODE_INDEX	0x00
-#define HFS_NODE_HEADER	0x01
-#define HFS_NODE_MAP	0x02
-#define HFS_NODE_LEAF	0xFF
+#define HFS_NODE_INDEX	0x00	/* An internal (index) node */
+#define HFS_NODE_HEADER	0x01	/* The tree header node (node 0) */
+#define HFS_NODE_MAP	0x02	/* Holds part of the bitmap of used nodes */
+#define HFS_NODE_LEAF	0xFF	/* A leaf (ndNHeight==1) node */
 
 /* HFS+ BTree header */
 struct hfs_btree_header_rec {
@@ -187,6 +187,9 @@
 /* HFS+ BTree misc info */
 #define HFSPLUS_TREE_HEAD 0
 #define HFSPLUS_NODE_MXSZ 32768
+#define HFSPLUS_ATTR_TREE_NODE_SIZE		8192
+#define HFSPLUS_BTREE_HDR_NODE_RECS_COUNT	3
+#define HFSPLUS_BTREE_HDR_USER_BYTES		128
 
 /* Some special File ID numbers (stolen from hfs.h) */
 #define HFSPLUS_POR_CNID		1	/* Parent Of the Root */
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 4c4d142..80875aa 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -474,12 +474,14 @@
 		pr_err("failed to load catalog file\n");
 		goto out_close_ext_tree;
 	}
+	atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
 	if (vhdr->attr_file.total_blocks != 0) {
 		sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
 		if (!sbi->attr_tree) {
 			pr_err("failed to load attributes file\n");
 			goto out_close_cat_tree;
 		}
+		atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
 	}
 	sb->s_xattr = hfsplus_xattr_handlers;
 
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index bd8471f..3c6136f 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -127,6 +127,211 @@
 	return 0;
 }
 
+static void hfsplus_init_header_node(struct inode *attr_file,
+					u32 clump_size,
+					char *buf, u16 node_size)
+{
+	struct hfs_bnode_desc *desc;
+	struct hfs_btree_header_rec *head;
+	u16 offset;
+	__be16 *rec_offsets;
+	u32 hdr_node_map_rec_bits;
+	char *bmp;
+	u32 used_nodes;
+	u32 used_bmp_bytes;
+	loff_t tmp;
+
+	hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %u\n",
+				clump_size, node_size);
+
+	/* The end of the node contains list of record offsets */
+	rec_offsets = (__be16 *)(buf + node_size);
+
+	desc = (struct hfs_bnode_desc *)buf;
+	desc->type = HFS_NODE_HEADER;
+	desc->num_recs = cpu_to_be16(HFSPLUS_BTREE_HDR_NODE_RECS_COUNT);
+	offset = sizeof(struct hfs_bnode_desc);
+	*--rec_offsets = cpu_to_be16(offset);
+
+	head = (struct hfs_btree_header_rec *)(buf + offset);
+	head->node_size = cpu_to_be16(node_size);
+	tmp = i_size_read(attr_file);
+	do_div(tmp, node_size);
+	head->node_count = cpu_to_be32(tmp);
+	head->free_nodes = cpu_to_be32(be32_to_cpu(head->node_count) - 1);
+	head->clump_size = cpu_to_be32(clump_size);
+	head->attributes |= cpu_to_be32(HFS_TREE_BIGKEYS | HFS_TREE_VARIDXKEYS);
+	head->max_key_len = cpu_to_be16(HFSPLUS_ATTR_KEYLEN - sizeof(u16));
+	offset += sizeof(struct hfs_btree_header_rec);
+	*--rec_offsets = cpu_to_be16(offset);
+	offset += HFSPLUS_BTREE_HDR_USER_BYTES;
+	*--rec_offsets = cpu_to_be16(offset);
+
+	hdr_node_map_rec_bits = 8 * (node_size - offset - (4 * sizeof(u16)));
+	if (be32_to_cpu(head->node_count) > hdr_node_map_rec_bits) {
+		u32 map_node_bits;
+		u32 map_nodes;
+
+		desc->next = cpu_to_be32(be32_to_cpu(head->leaf_tail) + 1);
+		map_node_bits = 8 * (node_size - sizeof(struct hfs_bnode_desc) -
+					(2 * sizeof(u16)) - 2);
+		map_nodes = (be32_to_cpu(head->node_count) -
+				hdr_node_map_rec_bits +
+				(map_node_bits - 1)) / map_node_bits;
+		be32_add_cpu(&head->free_nodes, 0 - map_nodes);
+	}
+
+	bmp = buf + offset;
+	used_nodes =
+		be32_to_cpu(head->node_count) - be32_to_cpu(head->free_nodes);
+	used_bmp_bytes = used_nodes / 8;
+	if (used_bmp_bytes) {
+		memset(bmp, 0xFF, used_bmp_bytes);
+		bmp += used_bmp_bytes;
+		used_nodes %= 8;
+	}
+	*bmp = ~(0xFF >> used_nodes);
+	offset += hdr_node_map_rec_bits / 8;
+	*--rec_offsets = cpu_to_be16(offset);
+}
+
+static int hfsplus_create_attributes_file(struct super_block *sb)
+{
+	int err = 0;
+	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+	struct inode *attr_file;
+	struct hfsplus_inode_info *hip;
+	u32 clump_size;
+	u16 node_size = HFSPLUS_ATTR_TREE_NODE_SIZE;
+	char *buf;
+	int index, written;
+	struct address_space *mapping;
+	struct page *page;
+	int old_state = HFSPLUS_EMPTY_ATTR_TREE;
+
+	hfs_dbg(ATTR_MOD, "create_attr_file: ino %d\n", HFSPLUS_ATTR_CNID);
+
+check_attr_tree_state_again:
+	switch (atomic_read(&sbi->attr_tree_state)) {
+	case HFSPLUS_EMPTY_ATTR_TREE:
+		if (old_state != atomic_cmpxchg(&sbi->attr_tree_state,
+						old_state,
+						HFSPLUS_CREATING_ATTR_TREE))
+			goto check_attr_tree_state_again;
+		break;
+	case HFSPLUS_CREATING_ATTR_TREE:
+		/*
+		 * This state means that another thread is in process
+		 * of AttributesFile creation. Theoretically, it is
+		 * possible to be here. But really __setxattr() method
+		 * first of all calls hfs_find_init() for lookup in
+		 * B-tree of CatalogFile. This method locks mutex of
+		 * CatalogFile's B-tree. As a result, if some thread
+		 * is inside AttributedFile creation operation then
+		 * another threads will be waiting unlocking of
+		 * CatalogFile's B-tree's mutex. However, if code will
+		 * change then we will return error code (-EAGAIN) from
+		 * here. Really, it means that first try to set of xattr
+		 * fails with error but second attempt will have success.
+		 */
+		return -EAGAIN;
+	case HFSPLUS_VALID_ATTR_TREE:
+		return 0;
+	case HFSPLUS_FAILED_ATTR_TREE:
+		return -EOPNOTSUPP;
+	default:
+		BUG();
+	}
+
+	attr_file = hfsplus_iget(sb, HFSPLUS_ATTR_CNID);
+	if (IS_ERR(attr_file)) {
+		pr_err("failed to load attributes file\n");
+		return PTR_ERR(attr_file);
+	}
+
+	BUG_ON(i_size_read(attr_file) != 0);
+
+	hip = HFSPLUS_I(attr_file);
+
+	clump_size = hfsplus_calc_btree_clump_size(sb->s_blocksize,
+						    node_size,
+						    sbi->sect_count,
+						    HFSPLUS_ATTR_CNID);
+
+	mutex_lock(&hip->extents_lock);
+	hip->clump_blocks = clump_size >> sbi->alloc_blksz_shift;
+	mutex_unlock(&hip->extents_lock);
+
+	if (sbi->free_blocks <= (hip->clump_blocks << 1)) {
+		err = -ENOSPC;
+		goto end_attr_file_creation;
+	}
+
+	while (hip->alloc_blocks < hip->clump_blocks) {
+		err = hfsplus_file_extend(attr_file);
+		if (unlikely(err)) {
+			pr_err("failed to extend attributes file\n");
+			goto end_attr_file_creation;
+		}
+		hip->phys_size = attr_file->i_size =
+			(loff_t)hip->alloc_blocks << sbi->alloc_blksz_shift;
+		hip->fs_blocks = hip->alloc_blocks << sbi->fs_shift;
+		inode_set_bytes(attr_file, attr_file->i_size);
+	}
+
+	buf = kzalloc(node_size, GFP_NOFS);
+	if (!buf) {
+		pr_err("failed to allocate memory for header node\n");
+		err = -ENOMEM;
+		goto end_attr_file_creation;
+	}
+
+	hfsplus_init_header_node(attr_file, clump_size, buf, node_size);
+
+	mapping = attr_file->i_mapping;
+
+	index = 0;
+	written = 0;
+	for (; written < node_size; index++, written += PAGE_CACHE_SIZE) {
+		void *kaddr;
+
+		page = read_mapping_page(mapping, index, NULL);
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			goto failed_header_node_init;
+		}
+
+		kaddr = kmap_atomic(page);
+		memcpy(kaddr, buf + written,
+			min_t(size_t, PAGE_CACHE_SIZE, node_size - written));
+		kunmap_atomic(kaddr);
+
+		set_page_dirty(page);
+		page_cache_release(page);
+	}
+
+	hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
+
+	sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
+	if (!sbi->attr_tree)
+		pr_err("failed to load attributes file\n");
+
+failed_header_node_init:
+	kfree(buf);
+
+end_attr_file_creation:
+	iput(attr_file);
+
+	if (!err)
+		atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
+	else if (err == -ENOSPC)
+		atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
+	else
+		atomic_set(&sbi->attr_tree_state, HFSPLUS_FAILED_ATTR_TREE);
+
+	return err;
+}
+
 int __hfsplus_setxattr(struct inode *inode, const char *name,
 			const void *value, size_t size, int flags)
 {
@@ -211,8 +416,9 @@
 	}
 
 	if (!HFSPLUS_SB(inode->i_sb)->attr_tree) {
-		err = -EOPNOTSUPP;
-		goto end_setxattr;
+		err = hfsplus_create_attributes_file(inode->i_sb);
+		if (unlikely(err))
+			goto end_setxattr;
 	}
 
 	if (hfsplus_attr_exists(inode, name)) {
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 1b39863..6797bf8 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -80,6 +80,7 @@
 	unsigned sb_c_bitmap;		/* current bitmap */
 	unsigned sb_max_fwd_alloc;	/* max forwad allocation */
 	int sb_timeshift;
+	struct rcu_head rcu;
 };
 
 /* Four 512-byte buffers and the 2k block obtained by concatenating them */
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 345713d..1b39afd 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -407,7 +407,7 @@
 			/*printk("HPFS: truncating file before delete.\n");*/
 			newattrs.ia_size = 0;
 			newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
-			err = notify_change(dentry, &newattrs);
+			err = notify_change(dentry, &newattrs, NULL);
 			put_write_access(inode);
 			if (!err)
 				goto again;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 4334cda..b8d01ef 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -101,18 +101,24 @@
 	return 0;
 }
 
+static void free_sbi(struct hpfs_sb_info *sbi)
+{
+	kfree(sbi->sb_cp_table);
+	kfree(sbi->sb_bmp_dir);
+	kfree(sbi);
+}
+
+static void lazy_free_sbi(struct rcu_head *rcu)
+{
+	free_sbi(container_of(rcu, struct hpfs_sb_info, rcu));
+}
+
 static void hpfs_put_super(struct super_block *s)
 {
-	struct hpfs_sb_info *sbi = hpfs_sb(s);
-
 	hpfs_lock(s);
 	unmark_dirty(s);
 	hpfs_unlock(s);
-
-	kfree(sbi->sb_cp_table);
-	kfree(sbi->sb_bmp_dir);
-	s->s_fs_info = NULL;
-	kfree(sbi);
+	call_rcu(&hpfs_sb(s)->rcu, lazy_free_sbi);
 }
 
 unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
@@ -485,9 +491,6 @@
 	}
 	s->s_fs_info = sbi;
 
-	sbi->sb_bmp_dir = NULL;
-	sbi->sb_cp_table = NULL;
-
 	mutex_init(&sbi->hpfs_mutex);
 	hpfs_lock(s);
 
@@ -679,10 +682,7 @@
 bail1:
 bail0:
 	hpfs_unlock(s);
-	kfree(sbi->sb_bmp_dir);
-	kfree(sbi->sb_cp_table);
-	s->s_fs_info = NULL;
-	kfree(sbi);
+	free_sbi(sbi);
 	return -EINVAL;
 }
 
diff --git a/fs/inode.c b/fs/inode.c
index b33ba8e..4bcdad3 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -773,15 +773,11 @@
 
 repeat:
 	hlist_for_each_entry(inode, head, i_hash) {
+		if (inode->i_sb != sb)
+			continue;
+		if (!test(inode, data))
+			continue;
 		spin_lock(&inode->i_lock);
-		if (inode->i_sb != sb) {
-			spin_unlock(&inode->i_lock);
-			continue;
-		}
-		if (!test(inode, data)) {
-			spin_unlock(&inode->i_lock);
-			continue;
-		}
 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
 			__wait_on_freeing_inode(inode);
 			goto repeat;
@@ -804,15 +800,11 @@
 
 repeat:
 	hlist_for_each_entry(inode, head, i_hash) {
+		if (inode->i_ino != ino)
+			continue;
+		if (inode->i_sb != sb)
+			continue;
 		spin_lock(&inode->i_lock);
-		if (inode->i_ino != ino) {
-			spin_unlock(&inode->i_lock);
-			continue;
-		}
-		if (inode->i_sb != sb) {
-			spin_unlock(&inode->i_lock);
-			continue;
-		}
 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
 			__wait_on_freeing_inode(inode);
 			goto repeat;
@@ -951,6 +943,42 @@
 EXPORT_SYMBOL(unlock_new_inode);
 
 /**
+ * lock_two_nondirectories - take two i_mutexes on non-directory objects
+ * @inode1: first inode to lock
+ * @inode2: second inode to lock
+ */
+void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
+{
+	WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
+	if (inode1 == inode2 || !inode2) {
+		mutex_lock(&inode1->i_mutex);
+		return;
+	}
+	WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
+	if (inode1 < inode2) {
+		mutex_lock(&inode1->i_mutex);
+		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2);
+	} else {
+		mutex_lock(&inode2->i_mutex);
+		mutex_lock_nested(&inode1->i_mutex, I_MUTEX_NONDIR2);
+	}
+}
+EXPORT_SYMBOL(lock_two_nondirectories);
+
+/**
+ * unlock_two_nondirectories - release locks from lock_two_nondirectories()
+ * @inode1: first inode to unlock
+ * @inode2: second inode to unlock
+ */
+void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
+{
+	mutex_unlock(&inode1->i_mutex);
+	if (inode2 && inode2 != inode1)
+		mutex_unlock(&inode2->i_mutex);
+}
+EXPORT_SYMBOL(unlock_two_nondirectories);
+
+/**
  * iget5_locked - obtain an inode from a mounted file system
  * @sb:		super block of file system
  * @hashval:	hash value (usually inode number) to get
@@ -1575,7 +1603,11 @@
 	struct iattr newattrs;
 
 	newattrs.ia_valid = ATTR_FORCE | kill;
-	return notify_change(dentry, &newattrs);
+	/*
+	 * Note we call this on write, so notify_change will not
+	 * encounter any conflicting delegations:
+	 */
+	return notify_change(dentry, &newattrs, NULL);
 }
 
 int file_remove_suid(struct file *file)
diff --git a/fs/internal.h b/fs/internal.h
index 513e0d8..4657424 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -9,8 +9,6 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include <linux/lglock.h>
-
 struct super_block;
 struct file_system_type;
 struct linux_binprm;
@@ -62,8 +60,6 @@
 
 extern void __init mnt_init(void);
 
-extern struct lglock vfsmount_lock;
-
 extern int __mnt_want_write(struct vfsmount *);
 extern int __mnt_want_write_file(struct file *);
 extern void __mnt_drop_write(struct vfsmount *);
@@ -77,9 +73,6 @@
 /*
  * file_table.c
  */
-extern void file_sb_list_add(struct file *f, struct super_block *sb);
-extern void file_sb_list_del(struct file *f);
-extern void mark_files_ro(struct super_block *);
 extern struct file *get_empty_filp(void);
 
 /*
diff --git a/fs/ioctl.c b/fs/ioctl.c
index fd507fb..8ac3fad 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -37,7 +37,7 @@
 {
 	int error = -ENOTTY;
 
-	if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+	if (!filp->f_op->unlocked_ioctl)
 		goto out;
 
 	error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
@@ -501,7 +501,7 @@
 
 	/* Did FASYNC state change ? */
 	if ((flag ^ filp->f_flags) & FASYNC) {
-		if (filp->f_op && filp->f_op->fasync)
+		if (filp->f_op->fasync)
 			/* fasync() adjusts filp->f_flags */
 			error = filp->f_op->fasync(fd, filp, on);
 		else
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index e5d408a..4a9e10e 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -181,7 +181,7 @@
  * Compute the hash for the isofs name corresponding to the dentry.
  */
 static int
-isofs_hash_common(const struct dentry *dentry, struct qstr *qstr, int ms)
+isofs_hash_common(struct qstr *qstr, int ms)
 {
 	const char *name;
 	int len;
@@ -202,7 +202,7 @@
  * Compute the hash for the isofs name corresponding to the dentry.
  */
 static int
-isofs_hashi_common(const struct dentry *dentry, struct qstr *qstr, int ms)
+isofs_hashi_common(struct qstr *qstr, int ms)
 {
 	const char *name;
 	int len;
@@ -259,13 +259,13 @@
 static int
 isofs_hash(const struct dentry *dentry, struct qstr *qstr)
 {
-	return isofs_hash_common(dentry, qstr, 0);
+	return isofs_hash_common(qstr, 0);
 }
 
 static int
 isofs_hashi(const struct dentry *dentry, struct qstr *qstr)
 {
-	return isofs_hashi_common(dentry, qstr, 0);
+	return isofs_hashi_common(qstr, 0);
 }
 
 static int
@@ -286,13 +286,13 @@
 static int
 isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr)
 {
-	return isofs_hash_common(dentry, qstr, 1);
+	return isofs_hash_common(qstr, 1);
 }
 
 static int
 isofs_hashi_ms(const struct dentry *dentry, struct qstr *qstr)
 {
-	return isofs_hashi_common(dentry, qstr, 1);
+	return isofs_hashi_common(qstr, 1);
 }
 
 static int
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index be0c39b..aa603e0 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -26,7 +26,6 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/hrtimer.h>
-#include <linux/backing-dev.h>
 
 static void __journal_temp_unlink_buffer(struct journal_head *jh);
 
@@ -100,10 +99,11 @@
 
 alloc_transaction:
 	if (!journal->j_running_transaction) {
-		new_transaction = kzalloc(sizeof(*new_transaction), GFP_NOFS);
+		new_transaction = kzalloc(sizeof(*new_transaction),
+						GFP_NOFS|__GFP_NOFAIL);
 		if (!new_transaction) {
-			congestion_wait(BLK_RW_ASYNC, HZ/50);
-			goto alloc_transaction;
+			ret = -ENOMEM;
+			goto out;
 		}
 	}
 
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index fe3c052..09b3ed4 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -515,6 +515,10 @@
 
 	c = JFFS2_SB_INFO(sb);
 
+	/* Do not support the MLC nand */
+	if (c->mtd->type == MTD_MLCNANDFLASH)
+		return -EINVAL;
+
 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
 	if (c->mtd->type == MTD_NANDFLASH) {
 		pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
diff --git a/fs/libfs.c b/fs/libfs.c
index 3a3a9b5..5de0694 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -10,6 +10,7 @@
 #include <linux/vfs.h>
 #include <linux/quotaops.h>
 #include <linux/mutex.h>
+#include <linux/namei.h>
 #include <linux/exportfs.h>
 #include <linux/writeback.h>
 #include <linux/buffer_head.h> /* sync_mapping_buffers */
@@ -31,6 +32,7 @@
 	stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
 	return 0;
 }
+EXPORT_SYMBOL(simple_getattr);
 
 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
@@ -39,6 +41,7 @@
 	buf->f_namelen = NAME_MAX;
 	return 0;
 }
+EXPORT_SYMBOL(simple_statfs);
 
 /*
  * Retaining negative dentries for an in-memory filesystem just wastes
@@ -66,6 +69,7 @@
 	d_add(dentry, NULL);
 	return NULL;
 }
+EXPORT_SYMBOL(simple_lookup);
 
 int dcache_dir_open(struct inode *inode, struct file *file)
 {
@@ -75,12 +79,14 @@
 
 	return file->private_data ? 0 : -ENOMEM;
 }
+EXPORT_SYMBOL(dcache_dir_open);
 
 int dcache_dir_close(struct inode *inode, struct file *file)
 {
 	dput(file->private_data);
 	return 0;
 }
+EXPORT_SYMBOL(dcache_dir_close);
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
 {
@@ -123,6 +129,7 @@
 	mutex_unlock(&dentry->d_inode->i_mutex);
 	return offset;
 }
+EXPORT_SYMBOL(dcache_dir_lseek);
 
 /* Relationship between i_mode and the DT_xxx types */
 static inline unsigned char dt_type(struct inode *inode)
@@ -172,11 +179,13 @@
 	spin_unlock(&dentry->d_lock);
 	return 0;
 }
+EXPORT_SYMBOL(dcache_readdir);
 
 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
 {
 	return -EISDIR;
 }
+EXPORT_SYMBOL(generic_read_dir);
 
 const struct file_operations simple_dir_operations = {
 	.open		= dcache_dir_open,
@@ -186,10 +195,12 @@
 	.iterate	= dcache_readdir,
 	.fsync		= noop_fsync,
 };
+EXPORT_SYMBOL(simple_dir_operations);
 
 const struct inode_operations simple_dir_inode_operations = {
 	.lookup		= simple_lookup,
 };
+EXPORT_SYMBOL(simple_dir_inode_operations);
 
 static const struct super_operations simple_super_operations = {
 	.statfs		= simple_statfs,
@@ -244,6 +255,7 @@
 	deactivate_locked_super(s);
 	return ERR_PTR(-ENOMEM);
 }
+EXPORT_SYMBOL(mount_pseudo);
 
 int simple_open(struct inode *inode, struct file *file)
 {
@@ -251,6 +263,7 @@
 		file->private_data = inode->i_private;
 	return 0;
 }
+EXPORT_SYMBOL(simple_open);
 
 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 {
@@ -263,6 +276,7 @@
 	d_instantiate(dentry, inode);
 	return 0;
 }
+EXPORT_SYMBOL(simple_link);
 
 int simple_empty(struct dentry *dentry)
 {
@@ -283,6 +297,7 @@
 	spin_unlock(&dentry->d_lock);
 	return ret;
 }
+EXPORT_SYMBOL(simple_empty);
 
 int simple_unlink(struct inode *dir, struct dentry *dentry)
 {
@@ -293,6 +308,7 @@
 	dput(dentry);
 	return 0;
 }
+EXPORT_SYMBOL(simple_unlink);
 
 int simple_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -304,6 +320,7 @@
 	drop_nlink(dir);
 	return 0;
 }
+EXPORT_SYMBOL(simple_rmdir);
 
 int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
 		struct inode *new_dir, struct dentry *new_dentry)
@@ -330,6 +347,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(simple_rename);
 
 /**
  * simple_setattr - setattr for simple filesystem
@@ -370,6 +388,7 @@
 	unlock_page(page);
 	return 0;
 }
+EXPORT_SYMBOL(simple_readpage);
 
 int simple_write_begin(struct file *file, struct address_space *mapping,
 			loff_t pos, unsigned len, unsigned flags,
@@ -393,6 +412,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(simple_write_begin);
 
 /**
  * simple_write_end - .write_end helper for non-block-device FSes
@@ -444,6 +464,7 @@
 
 	return copied;
 }
+EXPORT_SYMBOL(simple_write_end);
 
 /*
  * the inodes created here are not hashed. If you use iunique to generate
@@ -512,6 +533,7 @@
 	dput(root);
 	return -ENOMEM;
 }
+EXPORT_SYMBOL(simple_fill_super);
 
 static DEFINE_SPINLOCK(pin_fs_lock);
 
@@ -534,6 +556,7 @@
 	mntput(mnt);
 	return 0;
 }
+EXPORT_SYMBOL(simple_pin_fs);
 
 void simple_release_fs(struct vfsmount **mount, int *count)
 {
@@ -545,6 +568,7 @@
 	spin_unlock(&pin_fs_lock);
 	mntput(mnt);
 }
+EXPORT_SYMBOL(simple_release_fs);
 
 /**
  * simple_read_from_buffer - copy data from the buffer to user space
@@ -579,6 +603,7 @@
 	*ppos = pos + count;
 	return count;
 }
+EXPORT_SYMBOL(simple_read_from_buffer);
 
 /**
  * simple_write_to_buffer - copy data from user space to the buffer
@@ -613,6 +638,7 @@
 	*ppos = pos + count;
 	return count;
 }
+EXPORT_SYMBOL(simple_write_to_buffer);
 
 /**
  * memory_read_from_buffer - copy data from the buffer
@@ -644,6 +670,7 @@
 
 	return count;
 }
+EXPORT_SYMBOL(memory_read_from_buffer);
 
 /*
  * Transaction based IO.
@@ -665,6 +692,7 @@
 	smp_mb();
 	ar->size = n;
 }
+EXPORT_SYMBOL(simple_transaction_set);
 
 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
 {
@@ -696,6 +724,7 @@
 
 	return ar->data;
 }
+EXPORT_SYMBOL(simple_transaction_get);
 
 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
 {
@@ -705,12 +734,14 @@
 		return 0;
 	return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
 }
+EXPORT_SYMBOL(simple_transaction_read);
 
 int simple_transaction_release(struct inode *inode, struct file *file)
 {
 	free_page((unsigned long)file->private_data);
 	return 0;
 }
+EXPORT_SYMBOL(simple_transaction_release);
 
 /* Simple attribute files */
 
@@ -746,12 +777,14 @@
 
 	return nonseekable_open(inode, file);
 }
+EXPORT_SYMBOL_GPL(simple_attr_open);
 
 int simple_attr_release(struct inode *inode, struct file *file)
 {
 	kfree(file->private_data);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(simple_attr_release);	/* GPL-only?  This?  Really? */
 
 /* read from the buffer that is filled with the get function */
 ssize_t simple_attr_read(struct file *file, char __user *buf,
@@ -787,6 +820,7 @@
 	mutex_unlock(&attr->mutex);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(simple_attr_read);
 
 /* interpret the buffer as a number to call the set function with */
 ssize_t simple_attr_write(struct file *file, const char __user *buf,
@@ -819,6 +853,7 @@
 	mutex_unlock(&attr->mutex);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(simple_attr_write);
 
 /**
  * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
@@ -957,39 +992,56 @@
 {
 	return 0;
 }
-
-EXPORT_SYMBOL(dcache_dir_close);
-EXPORT_SYMBOL(dcache_dir_lseek);
-EXPORT_SYMBOL(dcache_dir_open);
-EXPORT_SYMBOL(dcache_readdir);
-EXPORT_SYMBOL(generic_read_dir);
-EXPORT_SYMBOL(mount_pseudo);
-EXPORT_SYMBOL(simple_write_begin);
-EXPORT_SYMBOL(simple_write_end);
-EXPORT_SYMBOL(simple_dir_inode_operations);
-EXPORT_SYMBOL(simple_dir_operations);
-EXPORT_SYMBOL(simple_empty);
-EXPORT_SYMBOL(simple_fill_super);
-EXPORT_SYMBOL(simple_getattr);
-EXPORT_SYMBOL(simple_open);
-EXPORT_SYMBOL(simple_link);
-EXPORT_SYMBOL(simple_lookup);
-EXPORT_SYMBOL(simple_pin_fs);
-EXPORT_SYMBOL(simple_readpage);
-EXPORT_SYMBOL(simple_release_fs);
-EXPORT_SYMBOL(simple_rename);
-EXPORT_SYMBOL(simple_rmdir);
-EXPORT_SYMBOL(simple_statfs);
 EXPORT_SYMBOL(noop_fsync);
-EXPORT_SYMBOL(simple_unlink);
-EXPORT_SYMBOL(simple_read_from_buffer);
-EXPORT_SYMBOL(simple_write_to_buffer);
-EXPORT_SYMBOL(memory_read_from_buffer);
-EXPORT_SYMBOL(simple_transaction_set);
-EXPORT_SYMBOL(simple_transaction_get);
-EXPORT_SYMBOL(simple_transaction_read);
-EXPORT_SYMBOL(simple_transaction_release);
-EXPORT_SYMBOL_GPL(simple_attr_open);
-EXPORT_SYMBOL_GPL(simple_attr_release);
-EXPORT_SYMBOL_GPL(simple_attr_read);
-EXPORT_SYMBOL_GPL(simple_attr_write);
+
+void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
+				void *cookie)
+{
+	char *s = nd_get_link(nd);
+	if (!IS_ERR(s))
+		kfree(s);
+}
+EXPORT_SYMBOL(kfree_put_link);
+
+/*
+ * nop .set_page_dirty method so that people can use .page_mkwrite on
+ * anon inodes.
+ */
+static int anon_set_page_dirty(struct page *page)
+{
+	return 0;
+};
+
+/*
+ * A single inode exists for all anon_inode files. Contrary to pipes,
+ * anon_inode inodes have no associated per-instance data, so we need
+ * only allocate one of them.
+ */
+struct inode *alloc_anon_inode(struct super_block *s)
+{
+	static const struct address_space_operations anon_aops = {
+		.set_page_dirty = anon_set_page_dirty,
+	};
+	struct inode *inode = new_inode_pseudo(s);
+
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	inode->i_ino = get_next_ino();
+	inode->i_mapping->a_ops = &anon_aops;
+
+	/*
+	 * Mark the inode dirty from the very beginning,
+	 * that way it will never be moved to the dirty
+	 * list because mark_inode_dirty() will think
+	 * that it already _is_ on the dirty list.
+	 */
+	inode->i_state = I_DIRTY;
+	inode->i_mode = S_IRUSR | S_IWUSR;
+	inode->i_uid = current_fsuid();
+	inode->i_gid = current_fsgid();
+	inode->i_flags |= S_PRIVATE;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	return inode;
+}
+EXPORT_SYMBOL(alloc_anon_inode);
diff --git a/fs/locks.c b/fs/locks.c
index b27a300..92a0f0a 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -134,7 +134,7 @@
 
 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
-#define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
+#define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG))
 
 static bool lease_breaking(struct file_lock *fl)
 {
@@ -1292,28 +1292,40 @@
 	}
 }
 
+static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
+{
+	if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
+		return false;
+	return locks_conflict(breaker, lease);
+}
+
 /**
  *	__break_lease	-	revoke all outstanding leases on file
  *	@inode: the inode of the file to return
- *	@mode: the open mode (read or write)
+ *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
+ *	    break all leases
+ *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
+ *	    only delegations
  *
  *	break_lease (inlined for speed) has checked there already is at least
  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
  *	a call to open() or truncate().  This function can sleep unless you
  *	specified %O_NONBLOCK to your open().
  */
-int __break_lease(struct inode *inode, unsigned int mode)
+int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
 {
 	int error = 0;
 	struct file_lock *new_fl, *flock;
 	struct file_lock *fl;
 	unsigned long break_time;
 	int i_have_this_lease = 0;
+	bool lease_conflict = false;
 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
 
 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
 	if (IS_ERR(new_fl))
 		return PTR_ERR(new_fl);
+	new_fl->fl_flags = type;
 
 	spin_lock(&inode->i_lock);
 
@@ -1323,13 +1335,16 @@
 	if ((flock == NULL) || !IS_LEASE(flock))
 		goto out;
 
-	if (!locks_conflict(flock, new_fl))
+	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
+		if (leases_conflict(fl, new_fl)) {
+			lease_conflict = true;
+			if (fl->fl_owner == current->files)
+				i_have_this_lease = 1;
+		}
+	}
+	if (!lease_conflict)
 		goto out;
 
-	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
-		if (fl->fl_owner == current->files)
-			i_have_this_lease = 1;
-
 	break_time = 0;
 	if (lease_break_time > 0) {
 		break_time = jiffies + lease_break_time * HZ;
@@ -1338,6 +1353,8 @@
 	}
 
 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
+		if (!leases_conflict(fl, new_fl))
+			continue;
 		if (want_write) {
 			if (fl->fl_flags & FL_UNLOCK_PENDING)
 				continue;
@@ -1379,7 +1396,7 @@
 		 */
 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
 				flock = flock->fl_next) {
-			if (locks_conflict(new_fl, flock))
+			if (leases_conflict(new_fl, flock))
 				goto restart;
 		}
 		error = 0;
@@ -1460,9 +1477,27 @@
 	struct file_lock *fl, **before, **my_before = NULL, *lease;
 	struct dentry *dentry = filp->f_path.dentry;
 	struct inode *inode = dentry->d_inode;
+	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
 	int error;
 
 	lease = *flp;
+	/*
+	 * In the delegation case we need mutual exclusion with
+	 * a number of operations that take the i_mutex.  We trylock
+	 * because delegations are an optional optimization, and if
+	 * there's some chance of a conflict--we'd rather not
+	 * bother, maybe that's a sign this just isn't a good file to
+	 * hand out a delegation on.
+	 */
+	if (is_deleg && !mutex_trylock(&inode->i_mutex))
+		return -EAGAIN;
+
+	if (is_deleg && arg == F_WRLCK) {
+		/* Write delegations are not currently supported: */
+		mutex_unlock(&inode->i_mutex);
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
 
 	error = -EAGAIN;
 	if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
@@ -1514,9 +1549,10 @@
 		goto out;
 
 	locks_insert_lock(before, lease);
-	return 0;
-
+	error = 0;
 out:
+	if (is_deleg)
+		mutex_unlock(&inode->i_mutex);
 	return error;
 }
 
@@ -1579,7 +1615,7 @@
 
 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
 {
-	if (filp->f_op && filp->f_op->setlease)
+	if (filp->f_op->setlease)
 		return filp->f_op->setlease(filp, arg, lease);
 	else
 		return generic_setlease(filp, arg, lease);
@@ -1771,7 +1807,7 @@
 	if (error)
 		goto out_free;
 
-	if (f.file->f_op && f.file->f_op->flock)
+	if (f.file->f_op->flock)
 		error = f.file->f_op->flock(f.file,
 					  (can_sleep) ? F_SETLKW : F_SETLK,
 					  lock);
@@ -1797,7 +1833,7 @@
  */
 int vfs_test_lock(struct file *filp, struct file_lock *fl)
 {
-	if (filp->f_op && filp->f_op->lock)
+	if (filp->f_op->lock)
 		return filp->f_op->lock(filp, F_GETLK, fl);
 	posix_test_lock(filp, fl);
 	return 0;
@@ -1909,7 +1945,7 @@
  */
 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
 {
-	if (filp->f_op && filp->f_op->lock)
+	if (filp->f_op->lock)
 		return filp->f_op->lock(filp, cmd, fl);
 	else
 		return posix_lock_file(filp, fl, conf);
@@ -2182,7 +2218,7 @@
 	if (!inode->i_flock)
 		return;
 
-	if (filp->f_op && filp->f_op->flock) {
+	if (filp->f_op->flock) {
 		struct file_lock fl = {
 			.fl_pid = current->tgid,
 			.fl_file = filp,
@@ -2246,7 +2282,7 @@
  */
 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
 {
-	if (filp->f_op && filp->f_op->lock)
+	if (filp->f_op->lock)
 		return filp->f_op->lock(filp, F_CANCELLK, fl);
 	return 0;
 }
diff --git a/fs/mount.h b/fs/mount.h
index 64a8581..d64c594 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -29,6 +29,7 @@
 	struct mount *mnt_parent;
 	struct dentry *mnt_mountpoint;
 	struct vfsmount mnt;
+	struct rcu_head mnt_rcu;
 #ifdef CONFIG_SMP
 	struct mnt_pcp __percpu *mnt_pcp;
 #else
@@ -55,7 +56,7 @@
 	int mnt_group_id;		/* peer group identifier */
 	int mnt_expiry_mark;		/* true if marked for expiry */
 	int mnt_pinned;
-	int mnt_ghosts;
+	struct path mnt_ex_mountpoint;
 };
 
 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
@@ -76,13 +77,28 @@
 	return !IS_ERR_OR_NULL(real_mount(mnt));
 }
 
-extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
+extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
+extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
+
+extern bool legitimize_mnt(struct vfsmount *, unsigned);
 
 static inline void get_mnt_ns(struct mnt_namespace *ns)
 {
 	atomic_inc(&ns->count);
 }
 
+extern seqlock_t mount_lock;
+
+static inline void lock_mount_hash(void)
+{
+	write_seqlock(&mount_lock);
+}
+
+static inline void unlock_mount_hash(void)
+{
+	write_sequnlock(&mount_lock);
+}
+
 struct proc_mounts {
 	struct seq_file m;
 	struct mnt_namespace *ns;
diff --git a/fs/namei.c b/fs/namei.c
index caa2805..e029a4c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -482,18 +482,6 @@
  * to restart the path walk from the beginning in ref-walk mode.
  */
 
-static inline void lock_rcu_walk(void)
-{
-	br_read_lock(&vfsmount_lock);
-	rcu_read_lock();
-}
-
-static inline void unlock_rcu_walk(void)
-{
-	rcu_read_unlock();
-	br_read_unlock(&vfsmount_lock);
-}
-
 /**
  * unlazy_walk - try to switch to ref-walk mode.
  * @nd: nameidata pathwalk data
@@ -512,26 +500,23 @@
 	BUG_ON(!(nd->flags & LOOKUP_RCU));
 
 	/*
-	 * Get a reference to the parent first: we're
-	 * going to make "path_put(nd->path)" valid in
-	 * non-RCU context for "terminate_walk()".
-	 *
-	 * If this doesn't work, return immediately with
-	 * RCU walking still active (and then we will do
-	 * the RCU walk cleanup in terminate_walk()).
+	 * After legitimizing the bastards, terminate_walk()
+	 * will do the right thing for non-RCU mode, and all our
+	 * subsequent exit cases should rcu_read_unlock()
+	 * before returning.  Do vfsmount first; if dentry
+	 * can't be legitimized, just set nd->path.dentry to NULL
+	 * and rely on dput(NULL) being a no-op.
 	 */
-	if (!lockref_get_not_dead(&parent->d_lockref))
+	if (!legitimize_mnt(nd->path.mnt, nd->m_seq))
 		return -ECHILD;
-
-	/*
-	 * After the mntget(), we terminate_walk() will do
-	 * the right thing for non-RCU mode, and all our
-	 * subsequent exit cases should unlock_rcu_walk()
-	 * before returning.
-	 */
-	mntget(nd->path.mnt);
 	nd->flags &= ~LOOKUP_RCU;
 
+	if (!lockref_get_not_dead(&parent->d_lockref)) {
+		nd->path.dentry = NULL;	
+		rcu_read_unlock();
+		return -ECHILD;
+	}
+
 	/*
 	 * For a negative lookup, the lookup sequence point is the parents
 	 * sequence point, and it only needs to revalidate the parent dentry.
@@ -566,17 +551,17 @@
 		spin_unlock(&fs->lock);
 	}
 
-	unlock_rcu_walk();
+	rcu_read_unlock();
 	return 0;
 
 unlock_and_drop_dentry:
 	spin_unlock(&fs->lock);
 drop_dentry:
-	unlock_rcu_walk();
+	rcu_read_unlock();
 	dput(dentry);
 	goto drop_root_mnt;
 out:
-	unlock_rcu_walk();
+	rcu_read_unlock();
 drop_root_mnt:
 	if (!(nd->flags & LOOKUP_ROOT))
 		nd->root.mnt = NULL;
@@ -608,17 +593,22 @@
 		if (!(nd->flags & LOOKUP_ROOT))
 			nd->root.mnt = NULL;
 
+		if (!legitimize_mnt(nd->path.mnt, nd->m_seq)) {
+			rcu_read_unlock();
+			return -ECHILD;
+		}
 		if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) {
-			unlock_rcu_walk();
+			rcu_read_unlock();
+			mntput(nd->path.mnt);
 			return -ECHILD;
 		}
 		if (read_seqcount_retry(&dentry->d_seq, nd->seq)) {
-			unlock_rcu_walk();
+			rcu_read_unlock();
 			dput(dentry);
+			mntput(nd->path.mnt);
 			return -ECHILD;
 		}
-		mntget(nd->path.mnt);
-		unlock_rcu_walk();
+		rcu_read_unlock();
 	}
 
 	if (likely(!(nd->flags & LOOKUP_JUMPED)))
@@ -909,15 +899,15 @@
 	struct mount *parent;
 	struct dentry *mountpoint;
 
-	br_read_lock(&vfsmount_lock);
+	read_seqlock_excl(&mount_lock);
 	parent = mnt->mnt_parent;
 	if (parent == mnt) {
-		br_read_unlock(&vfsmount_lock);
+		read_sequnlock_excl(&mount_lock);
 		return 0;
 	}
 	mntget(&parent->mnt);
 	mountpoint = dget(mnt->mnt_mountpoint);
-	br_read_unlock(&vfsmount_lock);
+	read_sequnlock_excl(&mount_lock);
 	dput(path->dentry);
 	path->dentry = mountpoint;
 	mntput(path->mnt);
@@ -1048,8 +1038,8 @@
 
 			/* Something is mounted on this dentry in another
 			 * namespace and/or whatever was mounted there in this
-			 * namespace got unmounted before we managed to get the
-			 * vfsmount_lock */
+			 * namespace got unmounted before lookup_mnt() could
+			 * get it */
 		}
 
 		/* Handle an automount point */
@@ -1111,7 +1101,7 @@
 		if (!d_mountpoint(path->dentry))
 			break;
 
-		mounted = __lookup_mnt(path->mnt, path->dentry, 1);
+		mounted = __lookup_mnt(path->mnt, path->dentry);
 		if (!mounted)
 			break;
 		path->mnt = &mounted->mnt;
@@ -1132,7 +1122,7 @@
 {
 	while (d_mountpoint(nd->path.dentry)) {
 		struct mount *mounted;
-		mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
+		mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
 		if (!mounted)
 			break;
 		nd->path.mnt = &mounted->mnt;
@@ -1174,7 +1164,7 @@
 	nd->flags &= ~LOOKUP_RCU;
 	if (!(nd->flags & LOOKUP_ROOT))
 		nd->root.mnt = NULL;
-	unlock_rcu_walk();
+	rcu_read_unlock();
 	return -ECHILD;
 }
 
@@ -1308,8 +1298,8 @@
 }
 
 /*
- * Call i_op->lookup on the dentry.  The dentry must be negative but may be
- * hashed if it was pouplated with DCACHE_NEED_LOOKUP.
+ * Call i_op->lookup on the dentry.  The dentry must be negative and
+ * unhashed.
  *
  * dir->d_inode->i_mutex must be held
  */
@@ -1501,7 +1491,7 @@
 		nd->flags &= ~LOOKUP_RCU;
 		if (!(nd->flags & LOOKUP_ROOT))
 			nd->root.mnt = NULL;
-		unlock_rcu_walk();
+		rcu_read_unlock();
 	}
 }
 
@@ -1511,18 +1501,9 @@
  * so we keep a cache of "no, this doesn't need follow_link"
  * for the common case.
  */
-static inline int should_follow_link(struct inode *inode, int follow)
+static inline int should_follow_link(struct dentry *dentry, int follow)
 {
-	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
-		if (likely(inode->i_op->follow_link))
-			return follow;
-
-		/* This gets set once for the inode lifetime */
-		spin_lock(&inode->i_lock);
-		inode->i_opflags |= IOP_NOFOLLOW;
-		spin_unlock(&inode->i_lock);
-	}
-	return 0;
+	return unlikely(d_is_symlink(dentry)) ? follow : 0;
 }
 
 static inline int walk_component(struct nameidata *nd, struct path *path,
@@ -1552,7 +1533,7 @@
 	if (!inode)
 		goto out_path_put;
 
-	if (should_follow_link(inode, follow)) {
+	if (should_follow_link(path->dentry, follow)) {
 		if (nd->flags & LOOKUP_RCU) {
 			if (unlikely(unlazy_walk(nd, path->dentry))) {
 				err = -ECHILD;
@@ -1611,26 +1592,6 @@
 }
 
 /*
- * We really don't want to look at inode->i_op->lookup
- * when we don't have to. So we keep a cache bit in
- * the inode ->i_opflags field that says "yes, we can
- * do lookup on this inode".
- */
-static inline int can_lookup(struct inode *inode)
-{
-	if (likely(inode->i_opflags & IOP_LOOKUP))
-		return 1;
-	if (likely(!inode->i_op->lookup))
-		return 0;
-
-	/* We do this once for the lifetime of the inode */
-	spin_lock(&inode->i_lock);
-	inode->i_opflags |= IOP_LOOKUP;
-	spin_unlock(&inode->i_lock);
-	return 1;
-}
-
-/*
  * We can do the critical dentry name comparison and hashing
  * operations one word at a time, but we are limited to:
  *
@@ -1833,7 +1794,7 @@
 			if (err)
 				return err;
 		}
-		if (!can_lookup(nd->inode)) {
+		if (!d_is_directory(nd->path.dentry)) {
 			err = -ENOTDIR; 
 			break;
 		}
@@ -1851,9 +1812,10 @@
 	nd->flags = flags | LOOKUP_JUMPED;
 	nd->depth = 0;
 	if (flags & LOOKUP_ROOT) {
-		struct inode *inode = nd->root.dentry->d_inode;
+		struct dentry *root = nd->root.dentry;
+		struct inode *inode = root->d_inode;
 		if (*name) {
-			if (!can_lookup(inode))
+			if (!d_is_directory(root))
 				return -ENOTDIR;
 			retval = inode_permission(inode, MAY_EXEC);
 			if (retval)
@@ -1862,8 +1824,9 @@
 		nd->path = nd->root;
 		nd->inode = inode;
 		if (flags & LOOKUP_RCU) {
-			lock_rcu_walk();
+			rcu_read_lock();
 			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+			nd->m_seq = read_seqbegin(&mount_lock);
 		} else {
 			path_get(&nd->path);
 		}
@@ -1872,9 +1835,10 @@
 
 	nd->root.mnt = NULL;
 
+	nd->m_seq = read_seqbegin(&mount_lock);
 	if (*name=='/') {
 		if (flags & LOOKUP_RCU) {
-			lock_rcu_walk();
+			rcu_read_lock();
 			set_root_rcu(nd);
 		} else {
 			set_root(nd);
@@ -1886,7 +1850,7 @@
 			struct fs_struct *fs = current->fs;
 			unsigned seq;
 
-			lock_rcu_walk();
+			rcu_read_lock();
 
 			do {
 				seq = read_seqcount_begin(&fs->seq);
@@ -1907,7 +1871,7 @@
 		dentry = f.file->f_path.dentry;
 
 		if (*name) {
-			if (!can_lookup(dentry->d_inode)) {
+			if (!d_is_directory(dentry)) {
 				fdput(f);
 				return -ENOTDIR;
 			}
@@ -1918,7 +1882,7 @@
 			if (f.need_put)
 				*fp = f.file;
 			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-			lock_rcu_walk();
+			rcu_read_lock();
 		} else {
 			path_get(&nd->path);
 			fdput(f);
@@ -1989,7 +1953,7 @@
 		err = complete_walk(nd);
 
 	if (!err && nd->flags & LOOKUP_DIRECTORY) {
-		if (!can_lookup(nd->inode)) {
+		if (!d_is_directory(nd->path.dentry)) {
 			path_put(&nd->path);
 			err = -ENOTDIR;
 		}
@@ -2281,7 +2245,7 @@
 	}
 	path->dentry = dentry;
 	path->mnt = mntget(nd->path.mnt);
-	if (should_follow_link(dentry->d_inode, nd->flags & LOOKUP_FOLLOW))
+	if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
 		return 1;
 	follow_mount(path);
 	error = 0;
@@ -2426,12 +2390,14 @@
  * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
  *     nfs_async_unlink().
  */
-static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
+static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
 {
+	struct inode *inode = victim->d_inode;
 	int error;
 
-	if (!victim->d_inode)
+	if (d_is_negative(victim))
 		return -ENOENT;
+	BUG_ON(!inode);
 
 	BUG_ON(victim->d_parent->d_inode != dir);
 	audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
@@ -2441,15 +2407,16 @@
 		return error;
 	if (IS_APPEND(dir))
 		return -EPERM;
-	if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
-	    IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
+
+	if (check_sticky(dir, inode) || IS_APPEND(inode) ||
+	    IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
 		return -EPERM;
 	if (isdir) {
-		if (!S_ISDIR(victim->d_inode->i_mode))
+		if (!d_is_directory(victim) && !d_is_autodir(victim))
 			return -ENOTDIR;
 		if (IS_ROOT(victim))
 			return -EBUSY;
-	} else if (S_ISDIR(victim->d_inode->i_mode))
+	} else if (d_is_directory(victim) || d_is_autodir(victim))
 		return -EISDIR;
 	if (IS_DEADDIR(dir))
 		return -ENOENT;
@@ -2983,7 +2950,7 @@
 	/*
 	 * create/update audit record if it already exists.
 	 */
-	if (path->dentry->d_inode)
+	if (d_is_positive(path->dentry))
 		audit_inode(name, path->dentry, 0);
 
 	/*
@@ -3012,12 +2979,12 @@
 finish_lookup:
 	/* we _can_ be in RCU mode here */
 	error = -ENOENT;
-	if (!inode) {
+	if (d_is_negative(path->dentry)) {
 		path_to_nameidata(path, nd);
 		goto out;
 	}
 
-	if (should_follow_link(inode, !symlink_ok)) {
+	if (should_follow_link(path->dentry, !symlink_ok)) {
 		if (nd->flags & LOOKUP_RCU) {
 			if (unlikely(unlazy_walk(nd, path->dentry))) {
 				error = -ECHILD;
@@ -3046,10 +3013,11 @@
 	}
 	audit_inode(name, nd->path.dentry, 0);
 	error = -EISDIR;
-	if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
+	if ((open_flag & O_CREAT) &&
+	    (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
 		goto out;
 	error = -ENOTDIR;
-	if ((nd->flags & LOOKUP_DIRECTORY) && !can_lookup(nd->inode))
+	if ((nd->flags & LOOKUP_DIRECTORY) && !d_is_directory(nd->path.dentry))
 		goto out;
 	if (!S_ISREG(nd->inode->i_mode))
 		will_truncate = false;
@@ -3275,7 +3243,7 @@
 	nd.root.mnt = mnt;
 	nd.root.dentry = dentry;
 
-	if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN)
+	if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN)
 		return ERR_PTR(-ELOOP);
 
 	file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_RCU);
@@ -3325,8 +3293,9 @@
 		goto unlock;
 
 	error = -EEXIST;
-	if (dentry->d_inode)
+	if (d_is_positive(dentry))
 		goto fail;
+
 	/*
 	 * Special case - lookup gave negative, but... we had foo/bar/
 	 * From the vfs_mknod() POV we just have a negative dentry -
@@ -3647,8 +3616,27 @@
 	return do_rmdir(AT_FDCWD, pathname);
 }
 
-int vfs_unlink(struct inode *dir, struct dentry *dentry)
+/**
+ * vfs_unlink - unlink a filesystem object
+ * @dir:	parent directory
+ * @dentry:	victim
+ * @delegated_inode: returns victim inode, if the inode is delegated.
+ *
+ * The caller must hold dir->i_mutex.
+ *
+ * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
+ * return a reference to the inode in delegated_inode.  The caller
+ * should then break the delegation on that inode and retry.  Because
+ * breaking a delegation may take a long time, the caller should drop
+ * dir->i_mutex before doing so.
+ *
+ * Alternatively, a caller may pass NULL for delegated_inode.  This may
+ * be appropriate for callers that expect the underlying filesystem not
+ * to be NFS exported.
+ */
+int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
 {
+	struct inode *target = dentry->d_inode;
 	int error = may_delete(dir, dentry, 0);
 
 	if (error)
@@ -3657,22 +3645,26 @@
 	if (!dir->i_op->unlink)
 		return -EPERM;
 
-	mutex_lock(&dentry->d_inode->i_mutex);
+	mutex_lock(&target->i_mutex);
 	if (d_mountpoint(dentry))
 		error = -EBUSY;
 	else {
 		error = security_inode_unlink(dir, dentry);
 		if (!error) {
+			error = try_break_deleg(target, delegated_inode);
+			if (error)
+				goto out;
 			error = dir->i_op->unlink(dir, dentry);
 			if (!error)
 				dont_mount(dentry);
 		}
 	}
-	mutex_unlock(&dentry->d_inode->i_mutex);
+out:
+	mutex_unlock(&target->i_mutex);
 
 	/* We don't d_delete() NFS sillyrenamed files--they still exist. */
 	if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
-		fsnotify_link_count(dentry->d_inode);
+		fsnotify_link_count(target);
 		d_delete(dentry);
 	}
 
@@ -3692,6 +3684,7 @@
 	struct dentry *dentry;
 	struct nameidata nd;
 	struct inode *inode = NULL;
+	struct inode *delegated_inode = NULL;
 	unsigned int lookup_flags = 0;
 retry:
 	name = user_path_parent(dfd, pathname, &nd, lookup_flags);
@@ -3706,7 +3699,7 @@
 	error = mnt_want_write(nd.path.mnt);
 	if (error)
 		goto exit1;
-
+retry_deleg:
 	mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
 	dentry = lookup_hash(&nd);
 	error = PTR_ERR(dentry);
@@ -3715,19 +3708,25 @@
 		if (nd.last.name[nd.last.len])
 			goto slashes;
 		inode = dentry->d_inode;
-		if (!inode)
+		if (d_is_negative(dentry))
 			goto slashes;
 		ihold(inode);
 		error = security_path_unlink(&nd.path, dentry);
 		if (error)
 			goto exit2;
-		error = vfs_unlink(nd.path.dentry->d_inode, dentry);
+		error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
 exit2:
 		dput(dentry);
 	}
 	mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
 	if (inode)
 		iput(inode);	/* truncate the inode here */
+	inode = NULL;
+	if (delegated_inode) {
+		error = break_deleg_wait(&delegated_inode);
+		if (!error)
+			goto retry_deleg;
+	}
 	mnt_drop_write(nd.path.mnt);
 exit1:
 	path_put(&nd.path);
@@ -3740,8 +3739,12 @@
 	return error;
 
 slashes:
-	error = !dentry->d_inode ? -ENOENT :
-		S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
+	if (d_is_negative(dentry))
+		error = -ENOENT;
+	else if (d_is_directory(dentry) || d_is_autodir(dentry))
+		error = -EISDIR;
+	else
+		error = -ENOTDIR;
 	goto exit2;
 }
 
@@ -3817,7 +3820,26 @@
 	return sys_symlinkat(oldname, AT_FDCWD, newname);
 }
 
-int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
+/**
+ * vfs_link - create a new link
+ * @old_dentry:	object to be linked
+ * @dir:	new parent
+ * @new_dentry:	where to create the new link
+ * @delegated_inode: returns inode needing a delegation break
+ *
+ * The caller must hold dir->i_mutex
+ *
+ * If vfs_link discovers a delegation on the to-be-linked file in need
+ * of breaking, it will return -EWOULDBLOCK and return a reference to the
+ * inode in delegated_inode.  The caller should then break the delegation
+ * and retry.  Because breaking a delegation may take a long time, the
+ * caller should drop the i_mutex before doing so.
+ *
+ * Alternatively, a caller may pass NULL for delegated_inode.  This may
+ * be appropriate for callers that expect the underlying filesystem not
+ * to be NFS exported.
+ */
+int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
 {
 	struct inode *inode = old_dentry->d_inode;
 	unsigned max_links = dir->i_sb->s_max_links;
@@ -3853,8 +3875,11 @@
 		error =  -ENOENT;
 	else if (max_links && inode->i_nlink >= max_links)
 		error = -EMLINK;
-	else
-		error = dir->i_op->link(old_dentry, dir, new_dentry);
+	else {
+		error = try_break_deleg(inode, delegated_inode);
+		if (!error)
+			error = dir->i_op->link(old_dentry, dir, new_dentry);
+	}
 
 	if (!error && (inode->i_state & I_LINKABLE)) {
 		spin_lock(&inode->i_lock);
@@ -3881,6 +3906,7 @@
 {
 	struct dentry *new_dentry;
 	struct path old_path, new_path;
+	struct inode *delegated_inode = NULL;
 	int how = 0;
 	int error;
 
@@ -3919,9 +3945,14 @@
 	error = security_path_link(old_path.dentry, &new_path, new_dentry);
 	if (error)
 		goto out_dput;
-	error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
+	error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
 out_dput:
 	done_path_create(&new_path, new_dentry);
+	if (delegated_inode) {
+		error = break_deleg_wait(&delegated_inode);
+		if (!error)
+			goto retry;
+	}
 	if (retry_estale(error, how)) {
 		how |= LOOKUP_REVAL;
 		goto retry;
@@ -3946,7 +3977,8 @@
  *	   That's where 4.4 screws up. Current fix: serialization on
  *	   sb->s_vfs_rename_mutex. We might be more accurate, but that's another
  *	   story.
- *	c) we have to lock _three_ objects - parents and victim (if it exists).
+ *	c) we have to lock _four_ objects - parents and victim (if it exists),
+ *	   and source (if it is not a directory).
  *	   And that - after we got ->i_mutex on parents (until then we don't know
  *	   whether the target exists).  Solution: try to be smart with locking
  *	   order for inodes.  We rely on the fact that tree topology may change
@@ -4019,9 +4051,11 @@
 }
 
 static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
-			    struct inode *new_dir, struct dentry *new_dentry)
+			    struct inode *new_dir, struct dentry *new_dentry,
+			    struct inode **delegated_inode)
 {
 	struct inode *target = new_dentry->d_inode;
+	struct inode *source = old_dentry->d_inode;
 	int error;
 
 	error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
@@ -4029,13 +4063,20 @@
 		return error;
 
 	dget(new_dentry);
-	if (target)
-		mutex_lock(&target->i_mutex);
+	lock_two_nondirectories(source, target);
 
 	error = -EBUSY;
 	if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
 		goto out;
 
+	error = try_break_deleg(source, delegated_inode);
+	if (error)
+		goto out;
+	if (target) {
+		error = try_break_deleg(target, delegated_inode);
+		if (error)
+			goto out;
+	}
 	error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
 	if (error)
 		goto out;
@@ -4045,17 +4086,38 @@
 	if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
 		d_move(old_dentry, new_dentry);
 out:
-	if (target)
-		mutex_unlock(&target->i_mutex);
+	unlock_two_nondirectories(source, target);
 	dput(new_dentry);
 	return error;
 }
 
+/**
+ * vfs_rename - rename a filesystem object
+ * @old_dir:	parent of source
+ * @old_dentry:	source
+ * @new_dir:	parent of destination
+ * @new_dentry:	destination
+ * @delegated_inode: returns an inode needing a delegation break
+ *
+ * The caller must hold multiple mutexes--see lock_rename()).
+ *
+ * If vfs_rename discovers a delegation in need of breaking at either
+ * the source or destination, it will return -EWOULDBLOCK and return a
+ * reference to the inode in delegated_inode.  The caller should then
+ * break the delegation and retry.  Because breaking a delegation may
+ * take a long time, the caller should drop all locks before doing
+ * so.
+ *
+ * Alternatively, a caller may pass NULL for delegated_inode.  This may
+ * be appropriate for callers that expect the underlying filesystem not
+ * to be NFS exported.
+ */
 int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-	       struct inode *new_dir, struct dentry *new_dentry)
+	       struct inode *new_dir, struct dentry *new_dentry,
+	       struct inode **delegated_inode)
 {
 	int error;
-	int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
+	int is_dir = d_is_directory(old_dentry) || d_is_autodir(old_dentry);
 	const unsigned char *old_name;
 
 	if (old_dentry->d_inode == new_dentry->d_inode)
@@ -4080,7 +4142,7 @@
 	if (is_dir)
 		error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
 	else
-		error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
+		error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry,delegated_inode);
 	if (!error)
 		fsnotify_move(old_dir, new_dir, old_name, is_dir,
 			      new_dentry->d_inode, old_dentry);
@@ -4096,6 +4158,7 @@
 	struct dentry *old_dentry, *new_dentry;
 	struct dentry *trap;
 	struct nameidata oldnd, newnd;
+	struct inode *delegated_inode = NULL;
 	struct filename *from;
 	struct filename *to;
 	unsigned int lookup_flags = 0;
@@ -4135,6 +4198,7 @@
 	newnd.flags &= ~LOOKUP_PARENT;
 	newnd.flags |= LOOKUP_RENAME_TARGET;
 
+retry_deleg:
 	trap = lock_rename(new_dir, old_dir);
 
 	old_dentry = lookup_hash(&oldnd);
@@ -4143,10 +4207,10 @@
 		goto exit3;
 	/* source must exist */
 	error = -ENOENT;
-	if (!old_dentry->d_inode)
+	if (d_is_negative(old_dentry))
 		goto exit4;
 	/* unless the source is a directory trailing slashes give -ENOTDIR */
-	if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
+	if (!d_is_directory(old_dentry) && !d_is_autodir(old_dentry)) {
 		error = -ENOTDIR;
 		if (oldnd.last.name[oldnd.last.len])
 			goto exit4;
@@ -4171,13 +4235,19 @@
 	if (error)
 		goto exit5;
 	error = vfs_rename(old_dir->d_inode, old_dentry,
-				   new_dir->d_inode, new_dentry);
+				   new_dir->d_inode, new_dentry,
+				   &delegated_inode);
 exit5:
 	dput(new_dentry);
 exit4:
 	dput(old_dentry);
 exit3:
 	unlock_rename(new_dir, old_dir);
+	if (delegated_inode) {
+		error = break_deleg_wait(&delegated_inode);
+		if (!error)
+			goto retry_deleg;
+	}
 	mnt_drop_write(oldnd.path.mnt);
 exit2:
 	if (retry_estale(error, lookup_flags))
diff --git a/fs/namespace.c b/fs/namespace.c
index da5c494..ac2ce8a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -39,7 +39,7 @@
 static struct list_head *mount_hashtable __read_mostly;
 static struct list_head *mountpoint_hashtable __read_mostly;
 static struct kmem_cache *mnt_cache __read_mostly;
-static struct rw_semaphore namespace_sem;
+static DECLARE_RWSEM(namespace_sem);
 
 /* /sys/fs */
 struct kobject *fs_kobj;
@@ -53,7 +53,7 @@
  * It should be taken for write in all cases where the vfsmount
  * tree or hash is modified or when a vfsmount structure is modified.
  */
-DEFINE_BRLOCK(vfsmount_lock);
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
 
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
@@ -63,8 +63,6 @@
 	return tmp & (HASH_SIZE - 1);
 }
 
-#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
-
 /*
  * allocation is serialized by namespace_sem, but we need the spinlock to
  * serialize with freeing.
@@ -458,7 +456,7 @@
 {
 	int ret = 0;
 
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
 	/*
 	 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -492,15 +490,15 @@
 	 */
 	smp_wmb();
 	mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 	return ret;
 }
 
 static void __mnt_unmake_readonly(struct mount *mnt)
 {
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	mnt->mnt.mnt_flags &= ~MNT_READONLY;
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 }
 
 int sb_prepare_remount_readonly(struct super_block *sb)
@@ -512,7 +510,7 @@
 	if (atomic_long_read(&sb->s_remove_count))
 		return -EBUSY;
 
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
 		if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
 			mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -534,7 +532,7 @@
 		if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
 			mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
 	}
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 
 	return err;
 }
@@ -549,30 +547,56 @@
 	kmem_cache_free(mnt_cache, mnt);
 }
 
+/* call under rcu_read_lock */
+bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
+{
+	struct mount *mnt;
+	if (read_seqretry(&mount_lock, seq))
+		return false;
+	if (bastard == NULL)
+		return true;
+	mnt = real_mount(bastard);
+	mnt_add_count(mnt, 1);
+	if (likely(!read_seqretry(&mount_lock, seq)))
+		return true;
+	if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
+		mnt_add_count(mnt, -1);
+		return false;
+	}
+	rcu_read_unlock();
+	mntput(bastard);
+	rcu_read_lock();
+	return false;
+}
+
 /*
- * find the first or last mount at @dentry on vfsmount @mnt depending on
- * @dir. If @dir is set return the first mount else return the last mount.
- * vfsmount_lock must be held for read or write.
+ * find the first mount at @dentry on vfsmount @mnt.
+ * call under rcu_read_lock()
  */
-struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
-			      int dir)
+struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
 {
 	struct list_head *head = mount_hashtable + hash(mnt, dentry);
-	struct list_head *tmp = head;
-	struct mount *p, *found = NULL;
+	struct mount *p;
 
-	for (;;) {
-		tmp = dir ? tmp->next : tmp->prev;
-		p = NULL;
-		if (tmp == head)
-			break;
-		p = list_entry(tmp, struct mount, mnt_hash);
-		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) {
-			found = p;
-			break;
-		}
-	}
-	return found;
+	list_for_each_entry_rcu(p, head, mnt_hash)
+		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
+			return p;
+	return NULL;
+}
+
+/*
+ * find the last mount at @dentry on vfsmount @mnt.
+ * mount_lock must be held.
+ */
+struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
+{
+	struct list_head *head = mount_hashtable + hash(mnt, dentry);
+	struct mount *p;
+
+	list_for_each_entry_reverse(p, head, mnt_hash)
+		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
+			return p;
+	return NULL;
 }
 
 /*
@@ -594,17 +618,17 @@
 struct vfsmount *lookup_mnt(struct path *path)
 {
 	struct mount *child_mnt;
+	struct vfsmount *m;
+	unsigned seq;
 
-	br_read_lock(&vfsmount_lock);
-	child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
-	if (child_mnt) {
-		mnt_add_count(child_mnt, 1);
-		br_read_unlock(&vfsmount_lock);
-		return &child_mnt->mnt;
-	} else {
-		br_read_unlock(&vfsmount_lock);
-		return NULL;
-	}
+	rcu_read_lock();
+	do {
+		seq = read_seqbegin(&mount_lock);
+		child_mnt = __lookup_mnt(path->mnt, path->dentry);
+		m = child_mnt ? &child_mnt->mnt : NULL;
+	} while (!legitimize_mnt(m, seq));
+	rcu_read_unlock();
+	return m;
 }
 
 static struct mountpoint *new_mountpoint(struct dentry *dentry)
@@ -796,9 +820,9 @@
 	mnt->mnt.mnt_sb = root->d_sb;
 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 	mnt->mnt_parent = mnt;
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 	return &mnt->mnt;
 }
 EXPORT_SYMBOL_GPL(vfs_kern_mount);
@@ -839,9 +863,9 @@
 	mnt->mnt.mnt_root = dget(root);
 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 	mnt->mnt_parent = mnt;
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 
 	if ((flag & CL_SLAVE) ||
 	    ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
@@ -872,10 +896,49 @@
 	return ERR_PTR(err);
 }
 
-static inline void mntfree(struct mount *mnt)
+static void delayed_free(struct rcu_head *head)
 {
-	struct vfsmount *m = &mnt->mnt;
-	struct super_block *sb = m->mnt_sb;
+	struct mount *mnt = container_of(head, struct mount, mnt_rcu);
+	kfree(mnt->mnt_devname);
+#ifdef CONFIG_SMP
+	free_percpu(mnt->mnt_pcp);
+#endif
+	kmem_cache_free(mnt_cache, mnt);
+}
+
+static void mntput_no_expire(struct mount *mnt)
+{
+put_again:
+	rcu_read_lock();
+	mnt_add_count(mnt, -1);
+	if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
+		rcu_read_unlock();
+		return;
+	}
+	lock_mount_hash();
+	if (mnt_get_count(mnt)) {
+		rcu_read_unlock();
+		unlock_mount_hash();
+		return;
+	}
+	if (unlikely(mnt->mnt_pinned)) {
+		mnt_add_count(mnt, mnt->mnt_pinned + 1);
+		mnt->mnt_pinned = 0;
+		rcu_read_unlock();
+		unlock_mount_hash();
+		acct_auto_close_mnt(&mnt->mnt);
+		goto put_again;
+	}
+	if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
+		rcu_read_unlock();
+		unlock_mount_hash();
+		return;
+	}
+	mnt->mnt.mnt_flags |= MNT_DOOMED;
+	rcu_read_unlock();
+
+	list_del(&mnt->mnt_instance);
+	unlock_mount_hash();
 
 	/*
 	 * This probably indicates that somebody messed
@@ -888,48 +951,11 @@
 	 * so mnt_get_writers() below is safe.
 	 */
 	WARN_ON(mnt_get_writers(mnt));
-	fsnotify_vfsmount_delete(m);
-	dput(m->mnt_root);
-	free_vfsmnt(mnt);
-	deactivate_super(sb);
-}
-
-static void mntput_no_expire(struct mount *mnt)
-{
-put_again:
-#ifdef CONFIG_SMP
-	br_read_lock(&vfsmount_lock);
-	if (likely(mnt->mnt_ns)) {
-		/* shouldn't be the last one */
-		mnt_add_count(mnt, -1);
-		br_read_unlock(&vfsmount_lock);
-		return;
-	}
-	br_read_unlock(&vfsmount_lock);
-
-	br_write_lock(&vfsmount_lock);
-	mnt_add_count(mnt, -1);
-	if (mnt_get_count(mnt)) {
-		br_write_unlock(&vfsmount_lock);
-		return;
-	}
-#else
-	mnt_add_count(mnt, -1);
-	if (likely(mnt_get_count(mnt)))
-		return;
-	br_write_lock(&vfsmount_lock);
-#endif
-	if (unlikely(mnt->mnt_pinned)) {
-		mnt_add_count(mnt, mnt->mnt_pinned + 1);
-		mnt->mnt_pinned = 0;
-		br_write_unlock(&vfsmount_lock);
-		acct_auto_close_mnt(&mnt->mnt);
-		goto put_again;
-	}
-
-	list_del(&mnt->mnt_instance);
-	br_write_unlock(&vfsmount_lock);
-	mntfree(mnt);
+	fsnotify_vfsmount_delete(&mnt->mnt);
+	dput(mnt->mnt.mnt_root);
+	deactivate_super(mnt->mnt.mnt_sb);
+	mnt_free_id(mnt);
+	call_rcu(&mnt->mnt_rcu, delayed_free);
 }
 
 void mntput(struct vfsmount *mnt)
@@ -954,21 +980,21 @@
 
 void mnt_pin(struct vfsmount *mnt)
 {
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	real_mount(mnt)->mnt_pinned++;
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 }
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *m)
 {
 	struct mount *mnt = real_mount(m);
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	if (mnt->mnt_pinned) {
 		mnt_add_count(mnt, 1);
 		mnt->mnt_pinned--;
 	}
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 }
 EXPORT_SYMBOL(mnt_unpin);
 
@@ -1085,12 +1111,12 @@
 	BUG_ON(!m);
 
 	/* write lock needed for mnt_get_count */
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	for (p = mnt; p; p = next_mnt(p, mnt)) {
 		actual_refs += mnt_get_count(p);
 		minimum_refs += 2;
 	}
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 
 	if (actual_refs > minimum_refs)
 		return 0;
@@ -1117,10 +1143,10 @@
 {
 	int ret = 1;
 	down_read(&namespace_sem);
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	if (propagate_mount_busy(real_mount(mnt), 2))
 		ret = 0;
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 	up_read(&namespace_sem);
 	return ret;
 }
@@ -1142,23 +1168,13 @@
 	list_splice_init(&unmounted, &head);
 	up_write(&namespace_sem);
 
+	synchronize_rcu();
+
 	while (!list_empty(&head)) {
 		mnt = list_first_entry(&head, struct mount, mnt_hash);
 		list_del_init(&mnt->mnt_hash);
-		if (mnt_has_parent(mnt)) {
-			struct dentry *dentry;
-			struct mount *m;
-
-			br_write_lock(&vfsmount_lock);
-			dentry = mnt->mnt_mountpoint;
-			m = mnt->mnt_parent;
-			mnt->mnt_mountpoint = mnt->mnt.mnt_root;
-			mnt->mnt_parent = mnt;
-			m->mnt_ghosts--;
-			br_write_unlock(&vfsmount_lock);
-			dput(dentry);
-			mntput(&m->mnt);
-		}
+		if (mnt->mnt_ex_mountpoint.mnt)
+			path_put(&mnt->mnt_ex_mountpoint);
 		mntput(&mnt->mnt);
 	}
 }
@@ -1169,10 +1185,13 @@
 }
 
 /*
- * vfsmount lock must be held for write
+ * mount_lock must be held
  * namespace_sem must be held for write
+ * how = 0 => just this tree, don't propagate
+ * how = 1 => propagate; we know that nobody else has reference to any victims
+ * how = 2 => lazy umount
  */
-void umount_tree(struct mount *mnt, int propagate)
+void umount_tree(struct mount *mnt, int how)
 {
 	LIST_HEAD(tmp_list);
 	struct mount *p;
@@ -1180,7 +1199,7 @@
 	for (p = mnt; p; p = next_mnt(p, mnt))
 		list_move(&p->mnt_hash, &tmp_list);
 
-	if (propagate)
+	if (how)
 		propagate_umount(&tmp_list);
 
 	list_for_each_entry(p, &tmp_list, mnt_hash) {
@@ -1188,10 +1207,16 @@
 		list_del_init(&p->mnt_list);
 		__touch_mnt_namespace(p->mnt_ns);
 		p->mnt_ns = NULL;
+		if (how < 2)
+			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
 		list_del_init(&p->mnt_child);
 		if (mnt_has_parent(p)) {
-			p->mnt_parent->mnt_ghosts++;
 			put_mountpoint(p->mnt_mp);
+			/* move the reference to mountpoint into ->mnt_ex_mountpoint */
+			p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
+			p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
+			p->mnt_mountpoint = p->mnt.mnt_root;
+			p->mnt_parent = p;
 			p->mnt_mp = NULL;
 		}
 		change_mnt_propagation(p, MS_PRIVATE);
@@ -1225,12 +1250,12 @@
 		 * probably don't strictly need the lock here if we examined
 		 * all race cases, but it's a slowpath.
 		 */
-		br_write_lock(&vfsmount_lock);
+		lock_mount_hash();
 		if (mnt_get_count(mnt) != 2) {
-			br_write_unlock(&vfsmount_lock);
+			unlock_mount_hash();
 			return -EBUSY;
 		}
-		br_write_unlock(&vfsmount_lock);
+		unlock_mount_hash();
 
 		if (!xchg(&mnt->mnt_expiry_mark, 1))
 			return -EAGAIN;
@@ -1272,19 +1297,23 @@
 	}
 
 	namespace_lock();
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	event++;
 
-	if (!(flags & MNT_DETACH))
-		shrink_submounts(mnt);
-
-	retval = -EBUSY;
-	if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
+	if (flags & MNT_DETACH) {
 		if (!list_empty(&mnt->mnt_list))
-			umount_tree(mnt, 1);
+			umount_tree(mnt, 2);
 		retval = 0;
+	} else {
+		shrink_submounts(mnt);
+		retval = -EBUSY;
+		if (!propagate_mount_busy(mnt, 2)) {
+			if (!list_empty(&mnt->mnt_list))
+				umount_tree(mnt, 1);
+			retval = 0;
+		}
 	}
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 	namespace_unlock();
 	return retval;
 }
@@ -1427,18 +1456,18 @@
 			q = clone_mnt(p, p->mnt.mnt_root, flag);
 			if (IS_ERR(q))
 				goto out;
-			br_write_lock(&vfsmount_lock);
+			lock_mount_hash();
 			list_add_tail(&q->mnt_list, &res->mnt_list);
 			attach_mnt(q, parent, p->mnt_mp);
-			br_write_unlock(&vfsmount_lock);
+			unlock_mount_hash();
 		}
 	}
 	return res;
 out:
 	if (res) {
-		br_write_lock(&vfsmount_lock);
+		lock_mount_hash();
 		umount_tree(res, 0);
-		br_write_unlock(&vfsmount_lock);
+		unlock_mount_hash();
 	}
 	return q;
 }
@@ -1460,9 +1489,9 @@
 void drop_collected_mounts(struct vfsmount *mnt)
 {
 	namespace_lock();
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	umount_tree(real_mount(mnt), 0);
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 	namespace_unlock();
 }
 
@@ -1589,7 +1618,7 @@
 	if (err)
 		goto out_cleanup_ids;
 
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 
 	if (IS_MNT_SHARED(dest_mnt)) {
 		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1608,7 +1637,7 @@
 		list_del_init(&child->mnt_hash);
 		commit_tree(child);
 	}
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 
 	return 0;
 
@@ -1710,10 +1739,10 @@
 			goto out_unlock;
 	}
 
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
 		change_mnt_propagation(m, type);
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 
  out_unlock:
 	namespace_unlock();
@@ -1785,9 +1814,9 @@
 
 	err = graft_tree(mnt, parent, mp);
 	if (err) {
-		br_write_lock(&vfsmount_lock);
+		lock_mount_hash();
 		umount_tree(mnt, 0);
-		br_write_unlock(&vfsmount_lock);
+		unlock_mount_hash();
 	}
 out2:
 	unlock_mount(mp);
@@ -1846,17 +1875,13 @@
 	else
 		err = do_remount_sb(sb, flags, data, 0);
 	if (!err) {
-		br_write_lock(&vfsmount_lock);
+		lock_mount_hash();
 		mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
 		mnt->mnt.mnt_flags = mnt_flags;
-		br_write_unlock(&vfsmount_lock);
+		touch_mnt_namespace(mnt->mnt_ns);
+		unlock_mount_hash();
 	}
 	up_write(&sb->s_umount);
-	if (!err) {
-		br_write_lock(&vfsmount_lock);
-		touch_mnt_namespace(mnt->mnt_ns);
-		br_write_unlock(&vfsmount_lock);
-	}
 	return err;
 }
 
@@ -1972,7 +1997,7 @@
 	struct mount *parent;
 	int err;
 
-	mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
+	mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT);
 
 	mp = lock_mount(path);
 	if (IS_ERR(mp))
@@ -2077,9 +2102,7 @@
 	/* remove m from any expiration list it may be on */
 	if (!list_empty(&mnt->mnt_expire)) {
 		namespace_lock();
-		br_write_lock(&vfsmount_lock);
 		list_del_init(&mnt->mnt_expire);
-		br_write_unlock(&vfsmount_lock);
 		namespace_unlock();
 	}
 	mntput(m);
@@ -2095,11 +2118,9 @@
 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
 {
 	namespace_lock();
-	br_write_lock(&vfsmount_lock);
 
 	list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
 
-	br_write_unlock(&vfsmount_lock);
 	namespace_unlock();
 }
 EXPORT_SYMBOL(mnt_set_expiry);
@@ -2118,7 +2139,7 @@
 		return;
 
 	namespace_lock();
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 
 	/* extract from the expiration list every vfsmount that matches the
 	 * following criteria:
@@ -2137,7 +2158,7 @@
 		touch_mnt_namespace(mnt->mnt_ns);
 		umount_tree(mnt, 1);
 	}
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 	namespace_unlock();
 }
 
@@ -2193,7 +2214,7 @@
  * process a list of expirable mountpoints with the intent of discarding any
  * submounts of a specific parent mountpoint
  *
- * vfsmount_lock must be held for write
+ * mount_lock must be held for write
  */
 static void shrink_submounts(struct mount *mnt)
 {
@@ -2414,20 +2435,25 @@
 	return new_ns;
 }
 
-/*
- * Allocate a new namespace structure and populate it with contents
- * copied from the namespace of the passed in task structure.
- */
-static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
-		struct user_namespace *user_ns, struct fs_struct *fs)
+struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
+		struct user_namespace *user_ns, struct fs_struct *new_fs)
 {
 	struct mnt_namespace *new_ns;
 	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
 	struct mount *p, *q;
-	struct mount *old = mnt_ns->root;
+	struct mount *old;
 	struct mount *new;
 	int copy_flags;
 
+	BUG_ON(!ns);
+
+	if (likely(!(flags & CLONE_NEWNS))) {
+		get_mnt_ns(ns);
+		return ns;
+	}
+
+	old = ns->root;
+
 	new_ns = alloc_mnt_ns(user_ns);
 	if (IS_ERR(new_ns))
 		return new_ns;
@@ -2435,7 +2461,7 @@
 	namespace_lock();
 	/* First pass: copy the tree topology */
 	copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
-	if (user_ns != mnt_ns->user_ns)
+	if (user_ns != ns->user_ns)
 		copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
 	new = copy_tree(old, old->mnt.mnt_root, copy_flags);
 	if (IS_ERR(new)) {
@@ -2444,9 +2470,7 @@
 		return ERR_CAST(new);
 	}
 	new_ns->root = new;
-	br_write_lock(&vfsmount_lock);
 	list_add_tail(&new_ns->list, &new->mnt_list);
-	br_write_unlock(&vfsmount_lock);
 
 	/*
 	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2457,13 +2481,13 @@
 	q = new;
 	while (p) {
 		q->mnt_ns = new_ns;
-		if (fs) {
-			if (&p->mnt == fs->root.mnt) {
-				fs->root.mnt = mntget(&q->mnt);
+		if (new_fs) {
+			if (&p->mnt == new_fs->root.mnt) {
+				new_fs->root.mnt = mntget(&q->mnt);
 				rootmnt = &p->mnt;
 			}
-			if (&p->mnt == fs->pwd.mnt) {
-				fs->pwd.mnt = mntget(&q->mnt);
+			if (&p->mnt == new_fs->pwd.mnt) {
+				new_fs->pwd.mnt = mntget(&q->mnt);
 				pwdmnt = &p->mnt;
 			}
 		}
@@ -2484,23 +2508,6 @@
 	return new_ns;
 }
 
-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
-		struct user_namespace *user_ns, struct fs_struct *new_fs)
-{
-	struct mnt_namespace *new_ns;
-
-	BUG_ON(!ns);
-	get_mnt_ns(ns);
-
-	if (!(flags & CLONE_NEWNS))
-		return ns;
-
-	new_ns = dup_mnt_ns(ns, user_ns, new_fs);
-
-	put_mnt_ns(ns);
-	return new_ns;
-}
-
 /**
  * create_mnt_ns - creates a private namespace and adds a root filesystem
  * @mnt: pointer to the new root filesystem mountpoint
@@ -2593,7 +2600,7 @@
 /*
  * Return true if path is reachable from root
  *
- * namespace_sem or vfsmount_lock is held
+ * namespace_sem or mount_lock is held
  */
 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
 			 const struct path *root)
@@ -2608,9 +2615,9 @@
 int path_is_under(struct path *path1, struct path *path2)
 {
 	int res;
-	br_read_lock(&vfsmount_lock);
+	read_seqlock_excl(&mount_lock);
 	res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
-	br_read_unlock(&vfsmount_lock);
+	read_sequnlock_excl(&mount_lock);
 	return res;
 }
 EXPORT_SYMBOL(path_is_under);
@@ -2701,7 +2708,7 @@
 	if (!is_path_reachable(old_mnt, old.dentry, &new))
 		goto out4;
 	root_mp->m_count++; /* pin it so it won't go away */
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	detach_mnt(new_mnt, &parent_path);
 	detach_mnt(root_mnt, &root_parent);
 	if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
@@ -2713,7 +2720,7 @@
 	/* mount new_root on / */
 	attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
 	touch_mnt_namespace(current->nsproxy->mnt_ns);
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 	chroot_fs_refs(&root, &new);
 	put_mountpoint(root_mp);
 	error = 0;
@@ -2767,8 +2774,6 @@
 	unsigned u;
 	int err;
 
-	init_rwsem(&namespace_sem);
-
 	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 
@@ -2785,8 +2790,6 @@
 	for (u = 0; u < HASH_SIZE; u++)
 		INIT_LIST_HEAD(&mountpoint_hashtable[u]);
 
-	br_lock_init(&vfsmount_lock);
-
 	err = sysfs_init();
 	if (err)
 		printk(KERN_WARNING "%s: sysfs_init error: %d\n",
@@ -2802,11 +2805,7 @@
 {
 	if (!atomic_dec_and_test(&ns->count))
 		return;
-	namespace_lock();
-	br_write_lock(&vfsmount_lock);
-	umount_tree(ns->root, 0);
-	br_write_unlock(&vfsmount_lock);
-	namespace_unlock();
+	drop_collected_mounts(&ns->root->mnt);
 	free_mnt_ns(ns);
 }
 
@@ -2829,9 +2828,8 @@
 {
 	/* release long term mount so mount point can be released */
 	if (!IS_ERR_OR_NULL(mnt)) {
-		br_write_lock(&vfsmount_lock);
 		real_mount(mnt)->mnt_ns = NULL;
-		br_write_unlock(&vfsmount_lock);
+		synchronize_rcu();	/* yecchhh... */
 		mntput(mnt);
 	}
 }
@@ -2875,7 +2873,7 @@
 	if (unlikely(!ns))
 		return false;
 
-	namespace_lock();
+	down_read(&namespace_sem);
 	list_for_each_entry(mnt, &ns->list, mnt_list) {
 		struct mount *child;
 		if (mnt->mnt.mnt_sb->s_type != type)
@@ -2896,7 +2894,7 @@
 	next:	;
 	}
 found:
-	namespace_unlock();
+	up_read(&namespace_sem);
 	return visible;
 }
 
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 3be0474..c320ac5 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -339,9 +339,8 @@
 	if (val)
 		goto finished;
 
-	DDPRINTK("ncp_lookup_validate: %s/%s not valid, age=%ld, server lookup\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
-		NCP_GET_AGE(dentry));
+	DDPRINTK("ncp_lookup_validate: %pd2 not valid, age=%ld, server lookup\n",
+		dentry, NCP_GET_AGE(dentry));
 
 	len = sizeof(__name);
 	if (ncp_is_server_root(dir)) {
@@ -359,8 +358,8 @@
 			res = ncp_obtain_info(server, dir, __name, &(finfo.i));
 	}
 	finfo.volume = finfo.i.volNumber;
-	DDPRINTK("ncp_lookup_validate: looked for %s/%s, res=%d\n",
-		dentry->d_parent->d_name.name, __name, res);
+	DDPRINTK("ncp_lookup_validate: looked for %pd/%s, res=%d\n",
+		dentry->d_parent, __name, res);
 	/*
 	 * If we didn't find it, or if it has a different dirEntNum to
 	 * what we remember, it's not valid any more.
@@ -454,8 +453,7 @@
 	ctl.page  = NULL;
 	ctl.cache = NULL;
 
-	DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
+	DDPRINTK("ncp_readdir: reading %pD2, pos=%d\n", file,
 		(int) ctx->pos);
 
 	result = -EIO;
@@ -740,12 +738,10 @@
 	int more;
 	size_t bufsize;
 
-	DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
+	DPRINTK("ncp_do_readdir: %pD2, fpos=%ld\n", file,
 		(unsigned long) ctx->pos);
-	PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n",
-		dentry->d_name.name, NCP_FINFO(dir)->volNumber,
-		NCP_FINFO(dir)->dirEntNum);
+	PPRINTK("ncp_do_readdir: init %pD, volnum=%d, dirent=%u\n",
+		file, NCP_FINFO(dir)->volNumber, NCP_FINFO(dir)->dirEntNum);
 
 	err = ncp_initialize_search(server, dir, &seq);
 	if (err) {
@@ -850,8 +846,7 @@
 	if (!ncp_conn_valid(server))
 		goto finished;
 
-	PPRINTK("ncp_lookup: server lookup for %s/%s\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	PPRINTK("ncp_lookup: server lookup for %pd2\n", dentry);
 
 	len = sizeof(__name);
 	if (ncp_is_server_root(dir)) {
@@ -867,8 +862,7 @@
 		if (!res)
 			res = ncp_obtain_info(server, dir, __name, &(finfo.i));
 	}
-	PPRINTK("ncp_lookup: looked for %s/%s, res=%d\n",
-		dentry->d_parent->d_name.name, __name, res);
+	PPRINTK("ncp_lookup: looked for %pd2, res=%d\n", dentry, res);
 	/*
 	 * If we didn't find an entry, make a negative dentry.
 	 */
@@ -915,8 +909,7 @@
 	return error;
 
 out_close:
-	PPRINTK("ncp_instantiate: %s/%s failed, closing file\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	PPRINTK("ncp_instantiate: %pd2 failed, closing file\n", dentry);
 	ncp_close_file(NCP_SERVER(dir), finfo->file_handle);
 	goto out;
 }
@@ -930,8 +923,7 @@
 	int opmode;
 	__u8 __name[NCP_MAXPATHLEN + 1];
 	
-	PPRINTK("ncp_create_new: creating %s/%s, mode=%hx\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name, mode);
+	PPRINTK("ncp_create_new: creating %pd2, mode=%hx\n", dentry, mode);
 
 	ncp_age_dentry(server, dentry);
 	len = sizeof(__name);
@@ -960,8 +952,7 @@
 				error = -ENAMETOOLONG;
 			else if (result < 0)
 				error = result;
-			DPRINTK("ncp_create: %s/%s failed\n",
-				dentry->d_parent->d_name.name, dentry->d_name.name);
+			DPRINTK("ncp_create: %pd2 failed\n", dentry);
 			goto out;
 		}
 		opmode = O_WRONLY;
@@ -994,8 +985,7 @@
 	int error, len;
 	__u8 __name[NCP_MAXPATHLEN + 1];
 
-	DPRINTK("ncp_mkdir: making %s/%s\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	DPRINTK("ncp_mkdir: making %pd2\n", dentry);
 
 	ncp_age_dentry(server, dentry);
 	len = sizeof(__name);
@@ -1032,8 +1022,7 @@
 	int error, result, len;
 	__u8 __name[NCP_MAXPATHLEN + 1];
 
-	DPRINTK("ncp_rmdir: removing %s/%s\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	DPRINTK("ncp_rmdir: removing %pd2\n", dentry);
 
 	len = sizeof(__name);
 	error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
@@ -1078,8 +1067,7 @@
 	int error;
 
 	server = NCP_SERVER(dir);
-	DPRINTK("ncp_unlink: unlinking %s/%s\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	DPRINTK("ncp_unlink: unlinking %pd2\n", dentry);
 	
 	/*
 	 * Check whether to close the file ...
@@ -1099,8 +1087,7 @@
 #endif
 	switch (error) {
 		case 0x00:
-			DPRINTK("ncp: removed %s/%s\n",
-				dentry->d_parent->d_name.name, dentry->d_name.name);
+			DPRINTK("ncp: removed %pd2\n", dentry);
 			break;
 		case 0x85:
 		case 0x8A:
@@ -1133,9 +1120,7 @@
 	int old_len, new_len;
 	__u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
 
-	DPRINTK("ncp_rename: %s/%s to %s/%s\n",
-		old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-		new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
+	DPRINTK("ncp_rename: %pd2 to %pd2\n", old_dentry, new_dentry);
 
 	ncp_age_dentry(server, old_dentry);
 	ncp_age_dentry(server, new_dentry);
@@ -1165,8 +1150,8 @@
 #endif
 	switch (error) {
 		case 0x00:
-               	        DPRINTK("ncp renamed %s -> %s.\n",
-                                old_dentry->d_name.name,new_dentry->d_name.name);
+               	        DPRINTK("ncp renamed %pd -> %pd.\n",
+                                old_dentry, new_dentry);
 			break;
 		case 0x9E:
 			error = -ENAMETOOLONG;
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 122e260..8f5074e 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -107,8 +107,7 @@
 	void* freepage;
 	size_t freelen;
 
-	DPRINTK("ncp_file_read: enter %s/%s\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	DPRINTK("ncp_file_read: enter %pd2\n", dentry);
 
 	pos = *ppos;
 
@@ -166,8 +165,7 @@
 
 	file_accessed(file);
 
-	DPRINTK("ncp_file_read: exit %s/%s\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	DPRINTK("ncp_file_read: exit %pd2\n", dentry);
 outrel:
 	ncp_inode_close(inode);		
 	return already_read ? already_read : error;
@@ -184,8 +182,7 @@
 	int errno;
 	void* bouncebuffer;
 
-	DPRINTK("ncp_file_write: enter %s/%s\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	DPRINTK("ncp_file_write: enter %pd2\n", dentry);
 	if ((ssize_t) count < 0)
 		return -EINVAL;
 	pos = *ppos;
@@ -264,8 +261,7 @@
 			i_size_write(inode, pos);
 		mutex_unlock(&inode->i_mutex);
 	}
-	DPRINTK("ncp_file_write: exit %s/%s\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	DPRINTK("ncp_file_write: exit %pd2\n", dentry);
 outrel:
 	ncp_inode_close(inode);		
 	return already_written ? already_written : errno;
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 4659da6..2cf2ebe 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -782,6 +782,17 @@
 	return error;
 }
 
+static void delayed_free(struct rcu_head *p)
+{
+	struct ncp_server *server = container_of(p, struct ncp_server, rcu);
+#ifdef CONFIG_NCPFS_NLS
+	/* unload the NLS charsets */
+	unload_nls(server->nls_vol);
+	unload_nls(server->nls_io);
+#endif /* CONFIG_NCPFS_NLS */
+	kfree(server);
+}
+
 static void ncp_put_super(struct super_block *sb)
 {
 	struct ncp_server *server = NCP_SBP(sb);
@@ -792,11 +803,6 @@
 
 	ncp_stop_tasks(server);
 
-#ifdef CONFIG_NCPFS_NLS
-	/* unload the NLS charsets */
-	unload_nls(server->nls_vol);
-	unload_nls(server->nls_io);
-#endif /* CONFIG_NCPFS_NLS */
 	mutex_destroy(&server->rcv.creq_mutex);
 	mutex_destroy(&server->root_setup_lock);
 	mutex_destroy(&server->mutex);
@@ -813,8 +819,7 @@
 	vfree(server->rxbuf);
 	vfree(server->txbuf);
 	vfree(server->packet);
-	sb->s_fs_info = NULL;
-	kfree(server);
+	call_rcu(&server->rcu, delayed_free);
 }
 
 static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf)
diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
index c51b2c5..b81e97a 100644
--- a/fs/ncpfs/ncp_fs_sb.h
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -38,7 +38,7 @@
 };
 
 struct ncp_server {
-
+	struct rcu_head rcu;
 	struct ncp_mount_data_kernel m;	/* Nearly all of the mount data is of
 					   interest for us later, so we store
 					   it completely. */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 9a8676f..812154a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -98,9 +98,7 @@
 	struct nfs_open_dir_context *ctx;
 	struct rpc_cred *cred;
 
-	dfprintk(FILE, "NFS: open dir(%s/%s)\n",
-			filp->f_path.dentry->d_parent->d_name.name,
-			filp->f_path.dentry->d_name.name);
+	dfprintk(FILE, "NFS: open dir(%pD2)\n", filp);
 
 	nfs_inc_stats(inode, NFSIOS_VFSOPEN);
 
@@ -297,11 +295,10 @@
 				if (ctx->duped > 0
 				    && ctx->dup_cookie == *desc->dir_cookie) {
 					if (printk_ratelimit()) {
-						pr_notice("NFS: directory %s/%s contains a readdir loop."
+						pr_notice("NFS: directory %pD2 contains a readdir loop."
 								"Please contact your server vendor.  "
 								"The file: %s has duplicate cookie %llu\n",
-								desc->file->f_dentry->d_parent->d_name.name,
-								desc->file->f_dentry->d_name.name,
+								desc->file,
 								array->array[i].string.name,
 								*desc->dir_cookie);
 					}
@@ -822,9 +819,8 @@
 	struct nfs_open_dir_context *dir_ctx = file->private_data;
 	int res = 0;
 
-	dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
-			dentry->d_parent->d_name.name, dentry->d_name.name,
-			(long long)ctx->pos);
+	dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
+			file, (long long)ctx->pos);
 	nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
 
 	/*
@@ -880,22 +876,17 @@
 	nfs_unblock_sillyrename(dentry);
 	if (res > 0)
 		res = 0;
-	dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
-			dentry->d_parent->d_name.name, dentry->d_name.name,
-			res);
+	dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
 	return res;
 }
 
 static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
 {
-	struct dentry *dentry = filp->f_path.dentry;
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode = file_inode(filp);
 	struct nfs_open_dir_context *dir_ctx = filp->private_data;
 
-	dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
-			dentry->d_parent->d_name.name,
-			dentry->d_name.name,
-			offset, whence);
+	dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
+			filp, offset, whence);
 
 	mutex_lock(&inode->i_mutex);
 	switch (whence) {
@@ -925,15 +916,12 @@
 static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
 			 int datasync)
 {
-	struct dentry *dentry = filp->f_path.dentry;
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode = file_inode(filp);
 
-	dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n",
-			dentry->d_parent->d_name.name, dentry->d_name.name,
-			datasync);
+	dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync);
 
 	mutex_lock(&inode->i_mutex);
-	nfs_inc_stats(dentry->d_inode, NFSIOS_VFSFSYNC);
+	nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
 	mutex_unlock(&inode->i_mutex);
 	return 0;
 }
@@ -1073,9 +1061,8 @@
 	}
 
 	if (is_bad_inode(inode)) {
-		dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
-				__func__, dentry->d_parent->d_name.name,
-				dentry->d_name.name);
+		dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
+				__func__, dentry);
 		goto out_bad;
 	}
 
@@ -1125,9 +1112,8 @@
 	nfs_advise_use_readdirplus(dir);
  out_valid_noent:
 	dput(parent);
-	dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
-			__func__, dentry->d_parent->d_name.name,
-			dentry->d_name.name);
+	dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
+			__func__, dentry);
 	return 1;
 out_zap_parent:
 	nfs_zap_caches(dir);
@@ -1153,18 +1139,16 @@
 		goto out_valid;
 
 	dput(parent);
-	dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
-			__func__, dentry->d_parent->d_name.name,
-			dentry->d_name.name);
+	dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+			__func__, dentry);
 	return 0;
 out_error:
 	nfs_free_fattr(fattr);
 	nfs_free_fhandle(fhandle);
 	nfs4_label_free(label);
 	dput(parent);
-	dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) lookup returned error %d\n",
-			__func__, dentry->d_parent->d_name.name,
-			dentry->d_name.name, error);
+	dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
+			__func__, dentry, error);
 	return error;
 }
 
@@ -1188,16 +1172,14 @@
 	 * eventually need to do something more here.
 	 */
 	if (!inode) {
-		dfprintk(LOOKUPCACHE, "%s: %s/%s has negative inode\n",
-				__func__, dentry->d_parent->d_name.name,
-				dentry->d_name.name);
+		dfprintk(LOOKUPCACHE, "%s: %pd2 has negative inode\n",
+				__func__, dentry);
 		return 1;
 	}
 
 	if (is_bad_inode(inode)) {
-		dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
-				__func__, dentry->d_parent->d_name.name,
-				dentry->d_name.name);
+		dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
+				__func__, dentry);
 		return 0;
 	}
 
@@ -1212,9 +1194,8 @@
  */
 static int nfs_dentry_delete(const struct dentry *dentry)
 {
-	dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
-		dentry->d_flags);
+	dfprintk(VFS, "NFS: dentry_delete(%pd2, %x)\n",
+		dentry, dentry->d_flags);
 
 	/* Unhash any dentry with a stale inode */
 	if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode))
@@ -1292,8 +1273,7 @@
 	struct nfs4_label *label = NULL;
 	int error;
 
-	dfprintk(VFS, "NFS: lookup(%s/%s)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
 	nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
 
 	res = ERR_PTR(-ENAMETOOLONG);
@@ -1424,8 +1404,8 @@
 	/* Expect a negative dentry */
 	BUG_ON(dentry->d_inode);
 
-	dfprintk(VFS, "NFS: atomic_open(%s/%ld), %s\n",
-			dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+	dfprintk(VFS, "NFS: atomic_open(%s/%ld), %pd\n",
+			dir->i_sb->s_id, dir->i_ino, dentry);
 
 	err = nfs_check_flags(open_flags);
 	if (err)
@@ -1614,8 +1594,8 @@
 	int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
 	int error;
 
-	dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
-			dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+	dfprintk(VFS, "NFS: create(%s/%ld), %pd\n",
+			dir->i_sb->s_id, dir->i_ino, dentry);
 
 	attr.ia_mode = mode;
 	attr.ia_valid = ATTR_MODE;
@@ -1641,8 +1621,8 @@
 	struct iattr attr;
 	int status;
 
-	dfprintk(VFS, "NFS: mknod(%s/%ld), %s\n",
-			dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+	dfprintk(VFS, "NFS: mknod(%s/%ld), %pd\n",
+			dir->i_sb->s_id, dir->i_ino, dentry);
 
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
@@ -1670,8 +1650,8 @@
 	struct iattr attr;
 	int error;
 
-	dfprintk(VFS, "NFS: mkdir(%s/%ld), %s\n",
-			dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+	dfprintk(VFS, "NFS: mkdir(%s/%ld), %pd\n",
+			dir->i_sb->s_id, dir->i_ino, dentry);
 
 	attr.ia_valid = ATTR_MODE;
 	attr.ia_mode = mode | S_IFDIR;
@@ -1698,8 +1678,8 @@
 {
 	int error;
 
-	dfprintk(VFS, "NFS: rmdir(%s/%ld), %s\n",
-			dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+	dfprintk(VFS, "NFS: rmdir(%s/%ld), %pd\n",
+			dir->i_sb->s_id, dir->i_ino, dentry);
 
 	trace_nfs_rmdir_enter(dir, dentry);
 	if (dentry->d_inode) {
@@ -1734,8 +1714,7 @@
 	struct inode *inode = dentry->d_inode;
 	int error = -EBUSY;
 		
-	dfprintk(VFS, "NFS: safe_remove(%s/%s)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry);
 
 	/* If the dentry was sillyrenamed, we simply call d_delete() */
 	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
@@ -1768,8 +1747,8 @@
 	int error;
 	int need_rehash = 0;
 
-	dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
-		dir->i_ino, dentry->d_name.name);
+	dfprintk(VFS, "NFS: unlink(%s/%ld, %pd)\n", dir->i_sb->s_id,
+		dir->i_ino, dentry);
 
 	trace_nfs_unlink_enter(dir, dentry);
 	spin_lock(&dentry->d_lock);
@@ -1819,8 +1798,8 @@
 	unsigned int pathlen = strlen(symname);
 	int error;
 
-	dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id,
-		dir->i_ino, dentry->d_name.name, symname);
+	dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s)\n", dir->i_sb->s_id,
+		dir->i_ino, dentry, symname);
 
 	if (pathlen > PAGE_SIZE)
 		return -ENAMETOOLONG;
@@ -1842,9 +1821,9 @@
 	error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
 	trace_nfs_symlink_exit(dir, dentry, error);
 	if (error != 0) {
-		dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s) error %d\n",
+		dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s) error %d\n",
 			dir->i_sb->s_id, dir->i_ino,
-			dentry->d_name.name, symname, error);
+			dentry, symname, error);
 		d_drop(dentry);
 		__free_page(page);
 		return error;
@@ -1871,9 +1850,8 @@
 	struct inode *inode = old_dentry->d_inode;
 	int error;
 
-	dfprintk(VFS, "NFS: link(%s/%s -> %s/%s)\n",
-		old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n",
+		old_dentry, dentry);
 
 	trace_nfs_link_enter(inode, dir, dentry);
 	NFS_PROTO(inode)->return_delegation(inode);
@@ -1921,9 +1899,8 @@
 	struct dentry *dentry = NULL, *rehash = NULL;
 	int error = -EBUSY;
 
-	dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
-		 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-		 new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
+	dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
+		 old_dentry, new_dentry,
 		 d_count(new_dentry));
 
 	trace_nfs_rename_enter(old_dir, old_dentry, new_dir, new_dentry);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 91ff089..d71d66c 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -124,9 +124,8 @@
 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
 {
 #ifndef CONFIG_NFS_SWAP
-	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
-			iocb->ki_filp->f_path.dentry->d_name.name,
-			(long long) pos, nr_segs);
+	dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
+			iocb->ki_filp, (long long) pos, nr_segs);
 
 	return -EINVAL;
 #else
@@ -909,10 +908,8 @@
 	count = iov_length(iov, nr_segs);
 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 
-	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
-		file->f_path.dentry->d_parent->d_name.name,
-		file->f_path.dentry->d_name.name,
-		count, (long long) pos);
+	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
+		file, count, (long long) pos);
 
 	retval = 0;
 	if (!count)
@@ -965,10 +962,8 @@
 	count = iov_length(iov, nr_segs);
 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 
-	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
-		file->f_path.dentry->d_parent->d_name.name,
-		file->f_path.dentry->d_name.name,
-		count, (long long) pos);
+	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
+		file, count, (long long) pos);
 
 	retval = generic_write_checks(file, &pos, &count, 0);
 	if (retval)
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 1e6bfdb..e2fcacf 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -65,9 +65,7 @@
 {
 	int res;
 
-	dprintk("NFS: open file(%s/%s)\n",
-			filp->f_path.dentry->d_parent->d_name.name,
-			filp->f_path.dentry->d_name.name);
+	dprintk("NFS: open file(%pD2)\n", filp);
 
 	nfs_inc_stats(inode, NFSIOS_VFSOPEN);
 	res = nfs_check_flags(filp->f_flags);
@@ -81,9 +79,7 @@
 int
 nfs_file_release(struct inode *inode, struct file *filp)
 {
-	dprintk("NFS: release(%s/%s)\n",
-			filp->f_path.dentry->d_parent->d_name.name,
-			filp->f_path.dentry->d_name.name);
+	dprintk("NFS: release(%pD2)\n", filp);
 
 	nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
 	return nfs_release(inode, filp);
@@ -123,10 +119,8 @@
 
 loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence)
 {
-	dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
-			filp->f_path.dentry->d_parent->d_name.name,
-			filp->f_path.dentry->d_name.name,
-			offset, whence);
+	dprintk("NFS: llseek file(%pD2, %lld, %d)\n",
+			filp, offset, whence);
 
 	/*
 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
@@ -150,12 +144,9 @@
 int
 nfs_file_flush(struct file *file, fl_owner_t id)
 {
-	struct dentry	*dentry = file->f_path.dentry;
-	struct inode	*inode = dentry->d_inode;
+	struct inode	*inode = file_inode(file);
 
-	dprintk("NFS: flush(%s/%s)\n",
-			dentry->d_parent->d_name.name,
-			dentry->d_name.name);
+	dprintk("NFS: flush(%pD2)\n", file);
 
 	nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
 	if ((file->f_mode & FMODE_WRITE) == 0)
@@ -177,15 +168,14 @@
 nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
 		unsigned long nr_segs, loff_t pos)
 {
-	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
-	struct inode * inode = dentry->d_inode;
+	struct inode *inode = file_inode(iocb->ki_filp);
 	ssize_t result;
 
 	if (iocb->ki_filp->f_flags & O_DIRECT)
 		return nfs_file_direct_read(iocb, iov, nr_segs, pos, true);
 
-	dprintk("NFS: read(%s/%s, %lu@%lu)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
+	dprintk("NFS: read(%pD2, %lu@%lu)\n",
+		iocb->ki_filp,
 		(unsigned long) iov_length(iov, nr_segs), (unsigned long) pos);
 
 	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
@@ -203,13 +193,11 @@
 		     struct pipe_inode_info *pipe, size_t count,
 		     unsigned int flags)
 {
-	struct dentry *dentry = filp->f_path.dentry;
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode = file_inode(filp);
 	ssize_t res;
 
-	dprintk("NFS: splice_read(%s/%s, %lu@%Lu)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
-		(unsigned long) count, (unsigned long long) *ppos);
+	dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
+		filp, (unsigned long) count, (unsigned long long) *ppos);
 
 	res = nfs_revalidate_mapping(inode, filp->f_mapping);
 	if (!res) {
@@ -224,12 +212,10 @@
 int
 nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
 {
-	struct dentry *dentry = file->f_path.dentry;
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode = file_inode(file);
 	int	status;
 
-	dprintk("NFS: mmap(%s/%s)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	dprintk("NFS: mmap(%pD2)\n", file);
 
 	/* Note: generic_file_mmap() returns ENOSYS on nommu systems
 	 *       so we call that before revalidating the mapping
@@ -258,15 +244,12 @@
 int
 nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync)
 {
-	struct dentry *dentry = file->f_path.dentry;
 	struct nfs_open_context *ctx = nfs_file_open_context(file);
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode = file_inode(file);
 	int have_error, do_resend, status;
 	int ret = 0;
 
-	dprintk("NFS: fsync file(%s/%s) datasync %d\n",
-			dentry->d_parent->d_name.name, dentry->d_name.name,
-			datasync);
+	dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
 
 	nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
 	do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
@@ -371,10 +354,8 @@
 	struct page *page;
 	int once_thru = 0;
 
-	dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
-		file->f_path.dentry->d_parent->d_name.name,
-		file->f_path.dentry->d_name.name,
-		mapping->host->i_ino, len, (long long) pos);
+	dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%ld), %u@%lld)\n",
+		file, mapping->host->i_ino, len, (long long) pos);
 
 start:
 	/*
@@ -414,10 +395,8 @@
 	struct nfs_open_context *ctx = nfs_file_open_context(file);
 	int status;
 
-	dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
-		file->f_path.dentry->d_parent->d_name.name,
-		file->f_path.dentry->d_name.name,
-		mapping->host->i_ino, len, (long long) pos);
+	dfprintk(PAGECACHE, "NFS: write_end(%pD2(%ld), %u@%lld)\n",
+		file, mapping->host->i_ino, len, (long long) pos);
 
 	/*
 	 * Zero any uninitialised parts of the page, and then mark the page
@@ -601,22 +580,21 @@
 {
 	struct page *page = vmf->page;
 	struct file *filp = vma->vm_file;
-	struct dentry *dentry = filp->f_path.dentry;
+	struct inode *inode = file_inode(filp);
 	unsigned pagelen;
 	int ret = VM_FAULT_NOPAGE;
 	struct address_space *mapping;
 
-	dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
-		filp->f_mapping->host->i_ino,
+	dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%ld), offset %lld)\n",
+		filp, filp->f_mapping->host->i_ino,
 		(long long)page_offset(page));
 
 	/* make sure the cache has finished storing the page */
-	nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page);
+	nfs_fscache_wait_on_page_write(NFS_I(inode), page);
 
 	lock_page(page);
 	mapping = page_file_mapping(page);
-	if (mapping != dentry->d_inode->i_mapping)
+	if (mapping != inode->i_mapping)
 		goto out_unlock;
 
 	wait_on_page_writeback(page);
@@ -659,22 +637,21 @@
 ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
 		       unsigned long nr_segs, loff_t pos)
 {
-	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
-	struct inode * inode = dentry->d_inode;
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file_inode(file);
 	unsigned long written = 0;
 	ssize_t result;
 	size_t count = iov_length(iov, nr_segs);
 
-	result = nfs_key_timeout_notify(iocb->ki_filp, inode);
+	result = nfs_key_timeout_notify(file, inode);
 	if (result)
 		return result;
 
-	if (iocb->ki_filp->f_flags & O_DIRECT)
+	if (file->f_flags & O_DIRECT)
 		return nfs_file_direct_write(iocb, iov, nr_segs, pos, true);
 
-	dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
-		(unsigned long) count, (long long) pos);
+	dprintk("NFS: write(%pD2, %lu@%Ld)\n",
+		file, (unsigned long) count, (long long) pos);
 
 	result = -EBUSY;
 	if (IS_SWAPFILE(inode))
@@ -682,8 +659,8 @@
 	/*
 	 * O_APPEND implies that we must revalidate the file length.
 	 */
-	if (iocb->ki_filp->f_flags & O_APPEND) {
-		result = nfs_revalidate_file_size(inode, iocb->ki_filp);
+	if (file->f_flags & O_APPEND) {
+		result = nfs_revalidate_file_size(inode, file);
 		if (result)
 			goto out;
 	}
@@ -697,8 +674,8 @@
 		written = result;
 
 	/* Return error values for O_DSYNC and IS_SYNC() */
-	if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
-		int err = vfs_fsync(iocb->ki_filp, 0);
+	if (result >= 0 && nfs_need_sync_write(file, inode)) {
+		int err = vfs_fsync(file, 0);
 		if (err < 0)
 			result = err;
 	}
@@ -717,14 +694,12 @@
 			      struct file *filp, loff_t *ppos,
 			      size_t count, unsigned int flags)
 {
-	struct dentry *dentry = filp->f_path.dentry;
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode = file_inode(filp);
 	unsigned long written = 0;
 	ssize_t ret;
 
-	dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
-		(unsigned long) count, (unsigned long long) *ppos);
+	dprintk("NFS splice_write(%pD2, %lu@%llu)\n",
+		filp, (unsigned long) count, (unsigned long long) *ppos);
 
 	/*
 	 * The combination of splice and an O_APPEND destination is disallowed.
@@ -883,10 +858,8 @@
 	int ret = -ENOLCK;
 	int is_local = 0;
 
-	dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
-			filp->f_path.dentry->d_parent->d_name.name,
-			filp->f_path.dentry->d_name.name,
-			fl->fl_type, fl->fl_flags,
+	dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n",
+			filp, fl->fl_type, fl->fl_flags,
 			(long long)fl->fl_start, (long long)fl->fl_end);
 
 	nfs_inc_stats(inode, NFSIOS_VFSLOCK);
@@ -923,10 +896,8 @@
 	struct inode *inode = filp->f_mapping->host;
 	int is_local = 0;
 
-	dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
-			filp->f_path.dentry->d_parent->d_name.name,
-			filp->f_path.dentry->d_name.name,
-			fl->fl_type, fl->fl_flags);
+	dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n",
+			filp, fl->fl_type, fl->fl_flags);
 
 	if (!(fl->fl_flags & FL_FLOCK))
 		return -ENOLCK;
@@ -960,9 +931,7 @@
  */
 int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
 {
-	dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
-			file->f_path.dentry->d_parent->d_name.name,
-			file->f_path.dentry->d_name.name, arg);
+	dprintk("NFS: setlease(%pD2, arg=%ld)\n", file, arg);
 	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(nfs_setlease);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 348b535..b5a0afc 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -253,9 +253,8 @@
 
 	dprintk("--> nfs_do_submount()\n");
 
-	dprintk("%s: submounting on %s/%s\n", __func__,
-			dentry->d_parent->d_name.name,
-			dentry->d_name.name);
+	dprintk("%s: submounting on %pd2\n", __func__,
+			dentry);
 	if (page == NULL)
 		goto out;
 	devname = nfs_devname(dentry, page, PAGE_SIZE);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 90cb10d..01b6f6a 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -321,7 +321,7 @@
 	umode_t mode = sattr->ia_mode;
 	int status = -ENOMEM;
 
-	dprintk("NFS call  create %s\n", dentry->d_name.name);
+	dprintk("NFS call  create %pd\n", dentry);
 
 	data = nfs3_alloc_createdata();
 	if (data == NULL)
@@ -548,7 +548,7 @@
 	if (len > NFS3_MAXPATHLEN)
 		return -ENAMETOOLONG;
 
-	dprintk("NFS call  symlink %s\n", dentry->d_name.name);
+	dprintk("NFS call  symlink %pd\n", dentry);
 
 	data = nfs3_alloc_createdata();
 	if (data == NULL)
@@ -576,7 +576,7 @@
 	umode_t mode = sattr->ia_mode;
 	int status = -ENOMEM;
 
-	dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
+	dprintk("NFS call  mkdir %pd\n", dentry);
 
 	sattr->ia_mode &= ~current_umask();
 
@@ -695,7 +695,7 @@
 	umode_t mode = sattr->ia_mode;
 	int status = -ENOMEM;
 
-	dprintk("NFS call  mknod %s %u:%u\n", dentry->d_name.name,
+	dprintk("NFS call  mknod %pd %u:%u\n", dentry,
 			MAJOR(rdev), MINOR(rdev));
 
 	sattr->ia_mode &= ~current_umask();
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 1f01b55..8de3407 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -31,9 +31,7 @@
 	 * -EOPENSTALE.  The VFS will retry the lookup/create/open.
 	 */
 
-	dprintk("NFS: open file(%s/%s)\n",
-		dentry->d_parent->d_name.name,
-		dentry->d_name.name);
+	dprintk("NFS: open file(%pd2)\n", dentry);
 
 	if ((openflags & O_ACCMODE) == 3)
 		openflags--;
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index c08cbf4..4e7f05d 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -292,8 +292,7 @@
 	if (locations == NULL || locations->nlocations <= 0)
 		goto out;
 
-	dprintk("%s: referral at %s/%s\n", __func__,
-		dentry->d_parent->d_name.name, dentry->d_name.name);
+	dprintk("%s: referral at %pd2\n", __func__, dentry);
 
 	page = (char *) __get_free_page(GFP_USER);
 	if (!page)
@@ -357,8 +356,8 @@
 	mnt = ERR_PTR(-ENOENT);
 
 	parent = dget_parent(dentry);
-	dprintk("%s: getting locations for %s/%s\n",
-		__func__, parent->d_name.name, dentry->d_name.name);
+	dprintk("%s: getting locations for %pd2\n",
+		__func__, dentry);
 
 	err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page);
 	dput(parent);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 5ab33c0..659990c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3771,9 +3771,8 @@
 	};
 	int			status;
 
-	dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
-			dentry->d_parent->d_name.name,
-			dentry->d_name.name,
+	dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
+			dentry,
 			(unsigned long long)cookie);
 	nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
 	res.pgbase = args.pgbase;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index c8e729d..74a7e12 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -244,7 +244,7 @@
 	set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
 	spin_lock(&tbl->slot_tbl_lock);
 	if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
-		INIT_COMPLETION(tbl->complete);
+		reinit_completion(&tbl->complete);
 		spin_unlock(&tbl->slot_tbl_lock);
 		return wait_for_completion_interruptible(&tbl->complete);
 	}
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index a8f57c7..fddbba2 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -235,7 +235,7 @@
 	};
 	int status = -ENOMEM;
 
-	dprintk("NFS call  create %s\n", dentry->d_name.name);
+	dprintk("NFS call  create %pd\n", dentry);
 	data = nfs_alloc_createdata(dir, dentry, sattr);
 	if (data == NULL)
 		goto out;
@@ -265,7 +265,7 @@
 	umode_t mode;
 	int status = -ENOMEM;
 
-	dprintk("NFS call  mknod %s\n", dentry->d_name.name);
+	dprintk("NFS call  mknod %pd\n", dentry);
 
 	mode = sattr->ia_mode;
 	if (S_ISFIFO(mode)) {
@@ -423,7 +423,7 @@
 	};
 	int status = -ENAMETOOLONG;
 
-	dprintk("NFS call  symlink %s\n", dentry->d_name.name);
+	dprintk("NFS call  symlink %pd\n", dentry);
 
 	if (len > NFS2_MAXPATHLEN)
 		goto out;
@@ -462,7 +462,7 @@
 	};
 	int status = -ENOMEM;
 
-	dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
+	dprintk("NFS call  mkdir %pd\n", dentry);
 	data = nfs_alloc_createdata(dir, dentry, sattr);
 	if (data == NULL)
 		goto out;
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 0c29b1b..11d7894 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -495,9 +495,8 @@
 	struct rpc_task *task;
 	int            error = -EBUSY;
 
-	dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name,
-		d_count(dentry));
+	dfprintk(VFS, "NFS: silly-rename(%pd2, ct=%d)\n",
+		dentry, d_count(dentry));
 	nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
 
 	/*
@@ -521,8 +520,8 @@
 				SILLYNAME_FILEID_LEN, fileid,
 				SILLYNAME_COUNTER_LEN, sillycounter);
 
-		dfprintk(VFS, "NFS: trying to rename %s to %s\n",
-				dentry->d_name.name, silly);
+		dfprintk(VFS, "NFS: trying to rename %pd to %s\n",
+				dentry, silly);
 
 		sdentry = lookup_one_len(silly, dentry->d_parent, slen);
 		/*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index ac1dc33..c1d5482 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -954,10 +954,8 @@
 
 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
 
-	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
-		file->f_path.dentry->d_parent->d_name.name,
-		file->f_path.dentry->d_name.name, count,
-		(long long)(page_file_offset(page) + offset));
+	dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
+		file, count, (long long)(page_file_offset(page) + offset));
 
 	if (nfs_can_extend_write(file, page, inode)) {
 		count = max(count + offset, nfs_page_length(page));
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index dc8f1ef..f994e75 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -95,7 +95,7 @@
 	Smack policies on NFSv4 files, say N.
 
 	WARNING: there is still a chance of backwards-incompatible protocol changes.
-	For now we recommend "Y" only for developers and testers."
+	For now we recommend "Y" only for developers and testers.
 
 config NFSD_FAULT_INJECTION
 	bool "NFS server manual fault injection"
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 5f38ea3..8513c59 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -536,16 +536,12 @@
 		if (err)
 			goto out3;
 		exp.ex_anon_uid= make_kuid(&init_user_ns, an_int);
-		if (!uid_valid(exp.ex_anon_uid))
-			goto out3;
 
 		/* anon gid */
 		err = get_int(&mesg, &an_int);
 		if (err)
 			goto out3;
 		exp.ex_anon_gid= make_kgid(&init_user_ns, an_int);
-		if (!gid_valid(exp.ex_anon_gid))
-			goto out3;
 
 		/* fsid */
 		err = get_int(&mesg, &an_int);
@@ -583,6 +579,26 @@
 				   exp.ex_uuid);
 		if (err)
 			goto out4;
+		/*
+		 * No point caching this if it would immediately expire.
+		 * Also, this protects exportfs's dummy export from the
+		 * anon_uid/anon_gid checks:
+		 */
+		if (exp.h.expiry_time < seconds_since_boot())
+			goto out4;
+		/*
+		 * For some reason exportfs has been passing down an
+		 * invalid (-1) uid & gid on the "dummy" export which it
+		 * uses to test export support.  To make sure exportfs
+		 * sees errors from check_export we therefore need to
+		 * delay these checks till after check_export:
+		 */
+		err = -EINVAL;
+		if (!uid_valid(exp.ex_anon_uid))
+			goto out4;
+		if (!gid_valid(exp.ex_anon_gid))
+			goto out4;
+		err = 0;
 	}
 
 	expp = svc_export_lookup(&exp);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index e0a65a9..9c271f4 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -385,8 +385,8 @@
 
 	status = vfs_rmdir(parent->d_inode, child);
 	if (status)
-		printk("failed to remove client recovery directory %s\n",
-				child->d_name.name);
+		printk("failed to remove client recovery directory %pd\n",
+				child);
 	/* Keep trying, success or failure: */
 	return 0;
 }
@@ -410,15 +410,15 @@
 	nfs4_release_reclaim(nn);
 	if (status)
 		printk("nfsd4: failed to purge old clients from recovery"
-			" directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
+			" directory %pD\n", nn->rec_file);
 }
 
 static int
 load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
 {
 	if (child->d_name.len != HEXDIR_LEN - 1) {
-		printk("nfsd4: illegal name %s in recovery directory\n",
-				child->d_name.name);
+		printk("nfsd4: illegal name %pd in recovery directory\n",
+				child);
 		/* Keep trying; maybe the others are OK: */
 		return 0;
 	}
@@ -437,7 +437,7 @@
 	status = nfsd4_list_rec_dir(load_recdir, nn);
 	if (status)
 		printk("nfsd4: failed loading clients from recovery"
-			" directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
+			" directory %pD\n", nn->rec_file);
 	return status;
 }
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 0874998..105d6fa 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -402,11 +402,16 @@
 	idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
 }
 
+static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s)
+{
+	kmem_cache_free(slab, s);
+}
+
 void
 nfs4_put_delegation(struct nfs4_delegation *dp)
 {
 	if (atomic_dec_and_test(&dp->dl_count)) {
-		kmem_cache_free(deleg_slab, dp);
+		nfs4_free_stid(deleg_slab, &dp->dl_stid);
 		num_delegations--;
 	}
 }
@@ -610,7 +615,7 @@
 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
 {
 	remove_stid(&stp->st_stid);
-	kmem_cache_free(stateid_slab, stp);
+	nfs4_free_stid(stateid_slab, &stp->st_stid);
 }
 
 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -668,7 +673,6 @@
 static void release_open_stateid(struct nfs4_ol_stateid *stp)
 {
 	unhash_open_stateid(stp);
-	unhash_stid(&stp->st_stid);
 	free_generic_stateid(stp);
 }
 
@@ -690,7 +694,6 @@
 	struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
 
 	if (s) {
-		unhash_stid(&s->st_stid);
 		free_generic_stateid(s);
 		oo->oo_last_closed_stid = NULL;
 	}
@@ -1127,6 +1130,11 @@
 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
 		destroy_delegation(dp);
 	}
+	list_splice_init(&clp->cl_revoked, &reaplist);
+	while (!list_empty(&reaplist)) {
+		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
+		destroy_revoked_delegation(dp);
+	}
 	while (!list_empty(&clp->cl_openowners)) {
 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
 		release_openowner(oo);
@@ -3008,7 +3016,7 @@
 		return NULL;
 	locks_init_lock(fl);
 	fl->fl_lmops = &nfsd_lease_mng_ops;
-	fl->fl_flags = FL_LEASE;
+	fl->fl_flags = FL_DELEG;
 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
 	fl->fl_end = OFFSET_MAX;
 	fl->fl_owner = (fl_owner_t)(dp->dl_file);
@@ -3154,7 +3162,7 @@
 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
 	return;
 out_free:
-	unhash_stid(&dp->dl_stid);
+	remove_stid(&dp->dl_stid);
 	nfs4_put_delegation(dp);
 out_no_deleg:
 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
@@ -3843,9 +3851,8 @@
 	struct nfs4_ol_stateid *stp;
 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
-	dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
-			(int)cstate->current_fh.fh_dentry->d_name.len,
-			cstate->current_fh.fh_dentry->d_name.name);
+	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
+			cstate->current_fh.fh_dentry);
 
 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
 	if (status)
@@ -3922,9 +3929,8 @@
 	struct nfs4_ol_stateid *stp;
 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
-	dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 
-			(int)cstate->current_fh.fh_dentry->d_name.len,
-			cstate->current_fh.fh_dentry->d_name.name);
+	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
+			cstate->current_fh.fh_dentry);
 
 	/* We don't yet support WANT bits: */
 	if (od->od_deleg_want)
@@ -3980,9 +3986,8 @@
 	struct net *net = SVC_NET(rqstp);
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-	dprintk("NFSD: nfsd4_close on file %.*s\n", 
-			(int)cstate->current_fh.fh_dentry->d_name.len,
-			cstate->current_fh.fh_dentry->d_name.name);
+	dprintk("NFSD: nfsd4_close on file %pd\n", 
+			cstate->current_fh.fh_dentry);
 
 	nfs4_lock_state();
 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
@@ -3998,10 +4003,9 @@
 
 	nfsd4_close_open_stateid(stp);
 
-	if (cstate->minorversion) {
-		unhash_stid(&stp->st_stid);
+	if (cstate->minorversion)
 		free_generic_stateid(stp);
-	} else
+	else
 		oo->oo_last_closed_stid = stp;
 
 	if (list_empty(&oo->oo_owner.so_stateids)) {
@@ -5122,7 +5126,6 @@
 	return ret;
 }
 
-/* should be called with the state lock held */
 void
 nfs4_state_shutdown_net(struct net *net)
 {
@@ -5133,6 +5136,7 @@
 	cancel_delayed_work_sync(&nn->laundromat_work);
 	locks_end_grace(&nn->nfsd4_manager);
 
+	nfs4_lock_state();
 	INIT_LIST_HEAD(&reaplist);
 	spin_lock(&recall_lock);
 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
@@ -5147,6 +5151,7 @@
 
 	nfsd4_client_tracking_exit(net);
 	nfs4_state_destroy_net(net);
+	nfs4_unlock_state();
 }
 
 void
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index d9454fe..088de13 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -411,6 +411,7 @@
 		label->data = kzalloc(dummy32 + 1, GFP_KERNEL);
 		if (!label->data)
 			return nfserr_jukebox;
+		label->len = dummy32;
 		defer_free(argp, kfree, label->data);
 		memcpy(label->data, buf, dummy32);
 	}
@@ -945,13 +946,16 @@
 nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
 {
 	DECODE_HEAD;
-		    
+
+	if (argp->minorversion >= 1)
+		return nfserr_notsupp;
+
 	status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid);
 	if (status)
 		return status;
 	READ_BUF(4);
 	READ32(open_conf->oc_seqid);
-						        
+
 	DECODE_TAIL;
 }
 
@@ -991,6 +995,14 @@
 }
 
 static __be32
+nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
+{
+	if (argp->minorversion == 0)
+		return nfs_ok;
+	return nfserr_notsupp;
+}
+
+static __be32
 nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
 {
 	DECODE_HEAD;
@@ -1061,6 +1073,9 @@
 {
 	DECODE_HEAD;
 
+	if (argp->minorversion >= 1)
+		return nfserr_notsupp;
+
 	READ_BUF(sizeof(clientid_t));
 	COPYMEM(clientid, sizeof(clientid_t));
 
@@ -1111,6 +1126,9 @@
 {
 	DECODE_HEAD;
 
+	if (argp->minorversion >= 1)
+		return nfserr_notsupp;
+
 	READ_BUF(NFS4_VERIFIER_SIZE);
 	COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
 
@@ -1137,6 +1155,9 @@
 {
 	DECODE_HEAD;
 
+	if (argp->minorversion >= 1)
+		return nfserr_notsupp;
+
 	READ_BUF(8 + NFS4_VERIFIER_SIZE);
 	COPYMEM(&scd_c->sc_clientid, 8);
 	COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE);
@@ -1220,6 +1241,9 @@
 {
 	DECODE_HEAD;
 
+	if (argp->minorversion >= 1)
+		return nfserr_notsupp;
+
 	READ_BUF(12);
 	COPYMEM(&rlockowner->rl_clientid, sizeof(clientid_t));
 	READ32(rlockowner->rl_owner.len);
@@ -1519,7 +1543,7 @@
 	[OP_OPEN_CONFIRM]	= (nfsd4_dec)nfsd4_decode_open_confirm,
 	[OP_OPEN_DOWNGRADE]	= (nfsd4_dec)nfsd4_decode_open_downgrade,
 	[OP_PUTFH]		= (nfsd4_dec)nfsd4_decode_putfh,
-	[OP_PUTPUBFH]		= (nfsd4_dec)nfsd4_decode_noop,
+	[OP_PUTPUBFH]		= (nfsd4_dec)nfsd4_decode_putpubfh,
 	[OP_PUTROOTFH]		= (nfsd4_dec)nfsd4_decode_noop,
 	[OP_READ]		= (nfsd4_dec)nfsd4_decode_read,
 	[OP_READDIR]		= (nfsd4_dec)nfsd4_decode_readdir,
@@ -1536,46 +1560,6 @@
 	[OP_VERIFY]		= (nfsd4_dec)nfsd4_decode_verify,
 	[OP_WRITE]		= (nfsd4_dec)nfsd4_decode_write,
 	[OP_RELEASE_LOCKOWNER]	= (nfsd4_dec)nfsd4_decode_release_lockowner,
-};
-
-static nfsd4_dec nfsd41_dec_ops[] = {
-	[OP_ACCESS]		= (nfsd4_dec)nfsd4_decode_access,
-	[OP_CLOSE]		= (nfsd4_dec)nfsd4_decode_close,
-	[OP_COMMIT]		= (nfsd4_dec)nfsd4_decode_commit,
-	[OP_CREATE]		= (nfsd4_dec)nfsd4_decode_create,
-	[OP_DELEGPURGE]		= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_DELEGRETURN]	= (nfsd4_dec)nfsd4_decode_delegreturn,
-	[OP_GETATTR]		= (nfsd4_dec)nfsd4_decode_getattr,
-	[OP_GETFH]		= (nfsd4_dec)nfsd4_decode_noop,
-	[OP_LINK]		= (nfsd4_dec)nfsd4_decode_link,
-	[OP_LOCK]		= (nfsd4_dec)nfsd4_decode_lock,
-	[OP_LOCKT]		= (nfsd4_dec)nfsd4_decode_lockt,
-	[OP_LOCKU]		= (nfsd4_dec)nfsd4_decode_locku,
-	[OP_LOOKUP]		= (nfsd4_dec)nfsd4_decode_lookup,
-	[OP_LOOKUPP]		= (nfsd4_dec)nfsd4_decode_noop,
-	[OP_NVERIFY]		= (nfsd4_dec)nfsd4_decode_verify,
-	[OP_OPEN]		= (nfsd4_dec)nfsd4_decode_open,
-	[OP_OPENATTR]		= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_OPEN_CONFIRM]	= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_OPEN_DOWNGRADE]	= (nfsd4_dec)nfsd4_decode_open_downgrade,
-	[OP_PUTFH]		= (nfsd4_dec)nfsd4_decode_putfh,
-	[OP_PUTPUBFH]		= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_PUTROOTFH]		= (nfsd4_dec)nfsd4_decode_noop,
-	[OP_READ]		= (nfsd4_dec)nfsd4_decode_read,
-	[OP_READDIR]		= (nfsd4_dec)nfsd4_decode_readdir,
-	[OP_READLINK]		= (nfsd4_dec)nfsd4_decode_noop,
-	[OP_REMOVE]		= (nfsd4_dec)nfsd4_decode_remove,
-	[OP_RENAME]		= (nfsd4_dec)nfsd4_decode_rename,
-	[OP_RENEW]		= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_RESTOREFH]		= (nfsd4_dec)nfsd4_decode_noop,
-	[OP_SAVEFH]		= (nfsd4_dec)nfsd4_decode_noop,
-	[OP_SECINFO]		= (nfsd4_dec)nfsd4_decode_secinfo,
-	[OP_SETATTR]		= (nfsd4_dec)nfsd4_decode_setattr,
-	[OP_SETCLIENTID]	= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_SETCLIENTID_CONFIRM]= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_VERIFY]		= (nfsd4_dec)nfsd4_decode_verify,
-	[OP_WRITE]		= (nfsd4_dec)nfsd4_decode_write,
-	[OP_RELEASE_LOCKOWNER]	= (nfsd4_dec)nfsd4_decode_notsupp,
 
 	/* new operations for NFSv4.1 */
 	[OP_BACKCHANNEL_CTL]	= (nfsd4_dec)nfsd4_decode_backchannel_ctl,
@@ -1599,24 +1583,53 @@
 	[OP_RECLAIM_COMPLETE]	= (nfsd4_dec)nfsd4_decode_reclaim_complete,
 };
 
-struct nfsd4_minorversion_ops {
-	nfsd4_dec *decoders;
-	int nops;
-};
+static inline bool
+nfsd4_opnum_in_range(struct nfsd4_compoundargs *argp, struct nfsd4_op *op)
+{
+	if (op->opnum < FIRST_NFS4_OP)
+		return false;
+	else if (argp->minorversion == 0 && op->opnum > LAST_NFS40_OP)
+		return false;
+	else if (argp->minorversion == 1 && op->opnum > LAST_NFS41_OP)
+		return false;
+	else if (argp->minorversion == 2 && op->opnum > LAST_NFS42_OP)
+		return false;
+	return true;
+}
 
-static struct nfsd4_minorversion_ops nfsd4_minorversion[] = {
-	[0] = { nfsd4_dec_ops, ARRAY_SIZE(nfsd4_dec_ops) },
-	[1] = { nfsd41_dec_ops, ARRAY_SIZE(nfsd41_dec_ops) },
-	[2] = { nfsd41_dec_ops, ARRAY_SIZE(nfsd41_dec_ops) },
-};
+/*
+ * Return a rough estimate of the maximum possible reply size.  Note the
+ * estimate includes rpc headers so is meant to be passed to
+ * svc_reserve, not svc_reserve_auth.
+ *
+ * Also note the current compound encoding permits only one operation to
+ * use pages beyond the first one, so the maximum possible length is the
+ * maximum over these values, not the sum.
+ */
+static int nfsd4_max_reply(u32 opnum)
+{
+	switch (opnum) {
+	case OP_READLINK:
+	case OP_READDIR:
+		/*
+		 * Both of these ops take a single page for data and put
+		 * the head and tail in another page:
+		 */
+		return 2 * PAGE_SIZE;
+	case OP_READ:
+		return INT_MAX;
+	default:
+		return PAGE_SIZE;
+	}
+}
 
 static __be32
 nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
 {
 	DECODE_HEAD;
 	struct nfsd4_op *op;
-	struct nfsd4_minorversion_ops *ops;
 	bool cachethis = false;
+	int max_reply = PAGE_SIZE;
 	int i;
 
 	READ_BUF(4);
@@ -1640,10 +1653,9 @@
 		}
 	}
 
-	if (argp->minorversion >= ARRAY_SIZE(nfsd4_minorversion))
+	if (argp->minorversion > NFSD_SUPPORTED_MINOR_VERSION)
 		argp->opcnt = 0;
 
-	ops = &nfsd4_minorversion[argp->minorversion];
 	for (i = 0; i < argp->opcnt; i++) {
 		op = &argp->ops[i];
 		op->replay = NULL;
@@ -1651,8 +1663,8 @@
 		READ_BUF(4);
 		READ32(op->opnum);
 
-		if (op->opnum >= FIRST_NFS4_OP && op->opnum <= LAST_NFS4_OP)
-			op->status = ops->decoders[op->opnum](argp, &op->u);
+		if (nfsd4_opnum_in_range(argp, op))
+			op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
 		else {
 			op->opnum = OP_ILLEGAL;
 			op->status = nfserr_op_illegal;
@@ -1667,10 +1679,14 @@
 		 * op in the compound wants to be cached:
 		 */
 		cachethis |= nfsd4_cache_this_op(op);
+
+		max_reply = max(max_reply, nfsd4_max_reply(op->opnum));
 	}
 	/* Sessions make the DRC unnecessary: */
 	if (argp->minorversion)
 		cachethis = false;
+	if (max_reply != INT_MAX)
+		svc_reserve(argp->rqstp, max_reply);
 	argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
 
 	DECODE_TAIL;
@@ -2375,7 +2391,7 @@
 	if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
 		if ((buflen -= 8) < 0)
 			goto out_resource;
-		WRITE64(~(u64)0);
+		WRITE64(exp->ex_path.mnt->mnt_sb->s_maxbytes);
 	}
 	if (bmval0 & FATTR4_WORD0_MAXLINK) {
 		if ((buflen -= 4) < 0)
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 814afaa..3c37b16 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -47,7 +47,7 @@
 		tdentry = parent;
 	}
 	if (tdentry != exp->ex_path.dentry)
-		dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name);
+		dprintk("nfsd_acceptable failed at %p %pd\n", tdentry, tdentry);
 	rv = (tdentry == exp->ex_path.dentry);
 	dput(tdentry);
 	return rv;
@@ -253,8 +253,8 @@
 
 	if (S_ISDIR(dentry->d_inode->i_mode) &&
 			(dentry->d_flags & DCACHE_DISCONNECTED)) {
-		printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
-				dentry->d_parent->d_name.name, dentry->d_name.name);
+		printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
+				dentry);
 	}
 
 	fhp->fh_dentry = dentry;
@@ -361,10 +361,9 @@
 	error = nfsd_permission(rqstp, exp, dentry, access);
 
 	if (error) {
-		dprintk("fh_verify: %s/%s permission failure, "
+		dprintk("fh_verify: %pd2 permission failure, "
 			"acc=%x, error=%d\n",
-			dentry->d_parent->d_name.name,
-			dentry->d_name.name,
+			dentry,
 			access, ntohl(error));
 	}
 out:
@@ -514,14 +513,13 @@
 	 */
 
 	struct inode * inode = dentry->d_inode;
-	struct dentry *parent = dentry->d_parent;
 	__u32 *datap;
 	dev_t ex_dev = exp_sb(exp)->s_dev;
 
-	dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
+	dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %pd2, ino=%ld)\n",
 		MAJOR(ex_dev), MINOR(ex_dev),
 		(long) exp->ex_path.dentry->d_inode->i_ino,
-		parent->d_name.name, dentry->d_name.name,
+		dentry,
 		(inode ? inode->i_ino : 0));
 
 	/* Choose filehandle version and fsid type based on
@@ -534,13 +532,13 @@
 		fh_put(ref_fh);
 
 	if (fhp->fh_locked || fhp->fh_dentry) {
-		printk(KERN_ERR "fh_compose: fh %s/%s not initialized!\n",
-		       parent->d_name.name, dentry->d_name.name);
+		printk(KERN_ERR "fh_compose: fh %pd2 not initialized!\n",
+		       dentry);
 	}
 	if (fhp->fh_maxsize < NFS_FHSIZE)
-		printk(KERN_ERR "fh_compose: called with maxsize %d! %s/%s\n",
+		printk(KERN_ERR "fh_compose: called with maxsize %d! %pd2\n",
 		       fhp->fh_maxsize,
-		       parent->d_name.name, dentry->d_name.name);
+		       dentry);
 
 	fhp->fh_dentry = dget(dentry); /* our internal copy */
 	fhp->fh_export = exp;
@@ -600,22 +598,20 @@
 		_fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle);
 	} else {
 		if (fhp->fh_handle.fh_fileid_type != FILEID_ROOT)
-			goto out;
+			return 0;
 
 		_fh_update(fhp, fhp->fh_export, dentry);
 		if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
 			return nfserr_opnotsupp;
 	}
-out:
 	return 0;
-
 out_bad:
 	printk(KERN_ERR "fh_update: fh not verified!\n");
-	goto out;
+	return nfserr_serverfault;
 out_negative:
-	printk(KERN_ERR "fh_update: %s/%s still negative!\n",
-		dentry->d_parent->d_name.name, dentry->d_name.name);
-	goto out;
+	printk(KERN_ERR "fh_update: %pd2 still negative!\n",
+		dentry);
+	return nfserr_serverfault;
 }
 
 /*
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index e5e6707..4775bc4 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -173,8 +173,8 @@
 	BUG_ON(!dentry);
 
 	if (fhp->fh_locked) {
-		printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
-			dentry->d_parent->d_name.name, dentry->d_name.name);
+		printk(KERN_WARNING "fh_lock: %pd2 already locked!\n",
+			dentry);
 		return;
 	}
 
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c827acb..94b5f5d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -427,7 +427,7 @@
 			goto out_nfserr;
 		fh_lock(fhp);
 
-		host_err = notify_change(dentry, iap);
+		host_err = notify_change(dentry, iap, NULL);
 		err = nfserrno(host_err);
 		fh_unlock(fhp);
 	}
@@ -988,7 +988,11 @@
 	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
 
 	mutex_lock(&dentry->d_inode->i_mutex);
-	notify_change(dentry, &ia);
+	/*
+	 * Note we call this on write, so notify_change will not
+	 * encounter any conflicting delegations:
+	 */
+	notify_change(dentry, &ia, NULL);
 	mutex_unlock(&dentry->d_inode->i_mutex);
 }
 
@@ -1317,9 +1321,8 @@
 		if (!fhp->fh_locked) {
 			/* not actually possible */
 			printk(KERN_ERR
-				"nfsd_create: parent %s/%s not locked!\n",
-				dentry->d_parent->d_name.name,
-				dentry->d_name.name);
+				"nfsd_create: parent %pd2 not locked!\n",
+				dentry);
 			err = nfserr_io;
 			goto out;
 		}
@@ -1329,8 +1332,8 @@
 	 */
 	err = nfserr_exist;
 	if (dchild->d_inode) {
-		dprintk("nfsd_create: dentry %s/%s not negative!\n",
-			dentry->d_name.name, dchild->d_name.name);
+		dprintk("nfsd_create: dentry %pd/%pd not negative!\n",
+			dentry, dchild);
 		goto out; 
 	}
 
@@ -1737,7 +1740,7 @@
 		err = nfserrno(host_err);
 		goto out_dput;
 	}
-	host_err = vfs_link(dold, dirp, dnew);
+	host_err = vfs_link(dold, dirp, dnew, NULL);
 	if (!host_err) {
 		err = nfserrno(commit_metadata(ffhp));
 		if (!err)
@@ -1838,7 +1841,7 @@
 		if (host_err)
 			goto out_dput_new;
 	}
-	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
+	host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL);
 	if (!host_err) {
 		host_err = commit_metadata(tfhp);
 		if (!host_err)
@@ -1911,7 +1914,7 @@
 	if (host_err)
 		goto out_put;
 	if (type != S_IFDIR)
-		host_err = vfs_unlink(dirp, rdentry);
+		host_err = vfs_unlink(dirp, rdentry, NULL);
 	else
 		host_err = vfs_rmdir(dirp, rdentry);
 	if (!host_err)
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 2778b02..ffb9b36 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -55,7 +55,7 @@
  *
  * Return 1 if the attributes match and 0 if not.
  *
- * NOTE: This function runs with the inode->i_lock spin lock held so it is not
+ * NOTE: This function runs with the inode_hash_lock spin lock held so it is not
  * allowed to sleep.
  */
 int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 17e6bdd..dc7411f 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1025,7 +1025,7 @@
 		for(i = count;  i < (num_got + count); i++) {
 			bhs[i] = sb_getblk(osb->sb, first_blkno);
 			if (bhs[i] == NULL) {
-				status = -EIO;
+				status = -ENOMEM;
 				mlog_errno(status);
 				goto bail;
 			}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f37d3c0..aeb44e8 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -80,6 +80,7 @@
 
 	if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
 						    le32_to_cpu(fe->i_clusters))) {
+		err = -ENOMEM;
 		mlog(ML_ERROR, "block offset is outside the allocated size: "
 		     "%llu\n", (unsigned long long)iblock);
 		goto bail;
@@ -92,6 +93,7 @@
 			    iblock;
 		buffer_cache_bh = sb_getblk(osb->sb, blkno);
 		if (!buffer_cache_bh) {
+			err = -ENOMEM;
 			mlog(ML_ERROR, "couldn't getblock for symlink!\n");
 			goto bail;
 		}
@@ -592,26 +594,11 @@
 	ocfs2_rw_unlock(inode, level);
 }
 
-/*
- * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen
- * from ext3.  PageChecked() bits have been removed as OCFS2 does not
- * do journalled data.
- */
-static void ocfs2_invalidatepage(struct page *page, unsigned int offset,
-				 unsigned int length)
-{
-	journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
-
-	jbd2_journal_invalidatepage(journal, page, offset, length);
-}
-
 static int ocfs2_releasepage(struct page *page, gfp_t wait)
 {
-	journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
-
 	if (!page_has_buffers(page))
 		return 0;
-	return jbd2_journal_try_to_free_buffers(journal, page, wait);
+	return try_to_free_buffers(page);
 }
 
 static ssize_t ocfs2_direct_IO(int rw,
@@ -1802,8 +1789,7 @@
 			data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
 
 		credits = ocfs2_calc_extend_credits(inode->i_sb,
-						    &di->id2.i_list,
-						    clusters_to_alloc);
+						    &di->id2.i_list);
 
 	}
 
@@ -1897,10 +1883,14 @@
 out:
 	ocfs2_free_write_ctxt(wc);
 
-	if (data_ac)
+	if (data_ac) {
 		ocfs2_free_alloc_context(data_ac);
-	if (meta_ac)
+		data_ac = NULL;
+	}
+	if (meta_ac) {
 		ocfs2_free_alloc_context(meta_ac);
+		meta_ac = NULL;
+	}
 
 	if (ret == -ENOSPC && try_free) {
 		/*
@@ -2087,7 +2077,7 @@
 	.write_end		= ocfs2_write_end,
 	.bmap			= ocfs2_bmap,
 	.direct_IO		= ocfs2_direct_IO,
-	.invalidatepage		= ocfs2_invalidatepage,
+	.invalidatepage		= block_invalidatepage,
 	.releasepage		= ocfs2_releasepage,
 	.migratepage		= buffer_migrate_page,
 	.is_partially_uptodate	= block_is_partially_uptodate,
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 5d18ad1..5b704c6 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -115,7 +115,7 @@
 		if (bhs[i] == NULL) {
 			bhs[i] = sb_getblk(osb->sb, block++);
 			if (bhs[i] == NULL) {
-				status = -EIO;
+				status = -ENOMEM;
 				mlog_errno(status);
 				goto bail;
 			}
@@ -214,7 +214,7 @@
 			bhs[i] = sb_getblk(sb, block++);
 			if (bhs[i] == NULL) {
 				ocfs2_metadata_cache_io_unlock(ci);
-				status = -EIO;
+				status = -ENOMEM;
 				mlog_errno(status);
 				goto bail;
 			}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 363f0dc..73920ff 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -35,6 +35,7 @@
 #include <linux/time.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
+#include <linux/bitmap.h>
 
 #include "heartbeat.h"
 #include "tcp.h"
@@ -282,15 +283,6 @@
 	int               wc_error;
 };
 
-static int o2hb_pop_count(void *map, int count)
-{
-	int i = -1, pop = 0;
-
-	while ((i = find_next_bit(map, count, i + 1)) < count)
-		pop++;
-	return pop;
-}
-
 static void o2hb_write_timeout(struct work_struct *work)
 {
 	int failed, quorum;
@@ -307,9 +299,9 @@
 		spin_lock_irqsave(&o2hb_live_lock, flags);
 		if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
 			set_bit(reg->hr_region_num, o2hb_failed_region_bitmap);
-		failed = o2hb_pop_count(&o2hb_failed_region_bitmap,
+		failed = bitmap_weight(o2hb_failed_region_bitmap,
 					O2NM_MAX_REGIONS);
-		quorum = o2hb_pop_count(&o2hb_quorum_region_bitmap,
+		quorum = bitmap_weight(o2hb_quorum_region_bitmap,
 					O2NM_MAX_REGIONS);
 		spin_unlock_irqrestore(&o2hb_live_lock, flags);
 
@@ -765,7 +757,7 @@
 	 * If global heartbeat active, unpin all regions if the
 	 * region count > CUT_OFF
 	 */
-	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+	if (bitmap_weight(o2hb_quorum_region_bitmap,
 			   O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
 		o2hb_region_unpin(NULL);
 unlock:
@@ -954,23 +946,9 @@
 	return changed;
 }
 
-/* This could be faster if we just implmented a find_last_bit, but I
- * don't think the circumstances warrant it. */
-static int o2hb_highest_node(unsigned long *nodes,
-			     int numbits)
+static int o2hb_highest_node(unsigned long *nodes, int numbits)
 {
-	int highest, node;
-
-	highest = numbits;
-	node = -1;
-	while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) {
-		if (node >= numbits)
-			break;
-
-		highest = node;
-	}
-
-	return highest;
+	return find_last_bit(nodes, numbits);
 }
 
 static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
@@ -1829,7 +1807,7 @@
 	live_threshold = O2HB_LIVE_THRESHOLD;
 	if (o2hb_global_heartbeat_active()) {
 		spin_lock(&o2hb_live_lock);
-		if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
+		if (bitmap_weight(o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
 			live_threshold <<= 1;
 		spin_unlock(&o2hb_live_lock);
 	}
@@ -2180,7 +2158,7 @@
 	if (!o2hb_dependent_users)
 		goto unlock;
 
-	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+	if (bitmap_weight(o2hb_quorum_region_bitmap,
 			   O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
 		o2hb_region_pin(NULL);
 
@@ -2480,7 +2458,7 @@
 	if (o2hb_dependent_users > 1)
 		goto unlock;
 
-	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+	if (bitmap_weight(o2hb_quorum_region_bitmap,
 			   O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
 		ret = o2hb_region_pin(NULL);
 
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index baa2b9e..2260fb9 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -199,7 +199,8 @@
 #define mlog_errno(st) do {						\
 	int _st = (st);							\
 	if (_st != -ERESTARTSYS && _st != -EINTR &&			\
-	    _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC)		\
+	    _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC &&		\
+	    _st != -EDQUOT)						\
 		mlog(ML_ERROR, "status = %lld\n", (long long)_st);	\
 } while (0)
 
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 30544ce..91a7e85 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2349,7 +2349,7 @@
 
 	dx_root_bh = sb_getblk(osb->sb, dr_blkno);
 	if (dx_root_bh == NULL) {
-		ret = -EIO;
+		ret = -ENOMEM;
 		goto out;
 	}
 	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
@@ -2422,7 +2422,7 @@
 	for (i = 0; i < num_dx_leaves; i++) {
 		bh = sb_getblk(osb->sb, start_blk + i);
 		if (bh == NULL) {
-			ret = -EIO;
+			ret = -ENOMEM;
 			goto out;
 		}
 		dx_leaves[i] = bh;
@@ -2929,7 +2929,7 @@
 	blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
 	dirdata_bh = sb_getblk(sb, blkno);
 	if (!dirdata_bh) {
-		ret = -EIO;
+		ret = -ENOMEM;
 		mlog_errno(ret);
 		goto out_commit;
 	}
@@ -3159,7 +3159,7 @@
 
 	*new_bh = sb_getblk(sb, p_blkno);
 	if (!*new_bh) {
-		status = -EIO;
+		status = -ENOMEM;
 		mlog_errno(status);
 		goto bail;
 	}
@@ -3284,7 +3284,7 @@
 		if (ocfs2_dir_resv_allowed(osb))
 			data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
 
-		credits = ocfs2_calc_extend_credits(sb, el, 1);
+		credits = ocfs2_calc_extend_credits(sb, el);
 	} else {
 		spin_unlock(&OCFS2_I(dir)->ip_lock);
 		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
@@ -3716,7 +3716,7 @@
 {
 	int credits = ocfs2_clusters_to_blocks(osb->sb, 2);
 
-	credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list, 1);
+	credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
 	credits += ocfs2_quota_trans_credits(osb->sb);
 	return credits;
 }
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index cf0f103..af3f7aa 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1885,8 +1885,10 @@
 			 * up nodes that this node contacted */
 			while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
 						    nn+1)) < O2NM_MAX_NODES) {
-				if (nn != dlm->node_num && nn != assert->node_idx)
+				if (nn != dlm->node_num && nn != assert->node_idx) {
 					master_request = 1;
+					break;
+				}
 			}
 		}
 		mle->master = assert->node_idx;
@@ -2354,6 +2356,10 @@
 
 	assert_spin_locked(&res->spinlock);
 
+	/* delay migration when the lockres is in MIGRATING state */
+	if (res->state & DLM_LOCK_RES_MIGRATING)
+		return 0;
+
 	if (res->owner != dlm->node_num)
 		return 0;
 
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 0b5adca..7035af0 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1886,6 +1886,13 @@
 		if (ml->type == LKM_NLMODE)
 			goto skip_lvb;
 
+		/*
+		 * If the lock is in the blocked list it can't have a valid lvb,
+		 * so skip it
+		 */
+		if (ml->list == DLM_BLOCKED_LIST)
+			goto skip_lvb;
+
 		if (!dlm_lvb_is_empty(mres->lvb)) {
 			if (lksb->flags & DLM_LKSB_PUT_LVB) {
 				/* other node was trying to update
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3a44a64..3407b2c 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1304,7 +1304,7 @@
 {
 	wait_for_completion(&mw->mw_complete);
 	/* Re-arm the completion in case we want to wait on it again */
-	INIT_COMPLETION(mw->mw_complete);
+	reinit_completion(&mw->mw_complete);
 	return mw->mw_status;
 }
 
@@ -1355,7 +1355,7 @@
 	else
 		ret = mw->mw_status;
 	/* Re-arm the completion in case we want to wait on it again */
-	INIT_COMPLETION(mw->mw_complete);
+	reinit_completion(&mw->mw_complete);
 	return ret;
 }
 
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index d71903c..6fff128 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -580,7 +580,7 @@
 	int did_quota = 0;
 
 	/*
-	 * This function only exists for file systems which don't
+	 * Unwritten extent only exists for file systems which
 	 * support holes.
 	 */
 	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
@@ -603,8 +603,7 @@
 		goto leave;
 	}
 
-	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
-					    clusters_to_add);
+	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
 	handle = ocfs2_start_trans(osb, credits);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index f87f9bd..f29a90f 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -386,19 +386,9 @@
 	u32 generation = 0;
 
 	status = -EINVAL;
-	if (inode == NULL || inode->i_sb == NULL) {
-		mlog(ML_ERROR, "bad inode\n");
-		return status;
-	}
 	sb = inode->i_sb;
 	osb = OCFS2_SB(sb);
 
-	if (!args) {
-		mlog(ML_ERROR, "bad inode args\n");
-		make_bad_inode(inode);
-		return status;
-	}
-
 	/*
 	 * To improve performance of cold-cache inode stats, we take
 	 * the cluster lock here if possible.
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 0b479ba..9ff4e8c 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -524,8 +524,7 @@
  * the result may be wrong.
  */
 static inline int ocfs2_calc_extend_credits(struct super_block *sb,
-					    struct ocfs2_extent_list *root_el,
-					    u32 bits_wanted)
+					    struct ocfs2_extent_list *root_el)
 {
 	int bitmap_blocks, sysfile_bitmap_blocks, extent_blocks;
 
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 3d3f3c8..631a982 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -201,8 +201,7 @@
 		}
 	}
 
-	*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el,
-					      clusters_to_move + 2);
+	*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
 
 	mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
 	     extra_blocks, clusters_to_move, *credits);
@@ -1067,8 +1066,10 @@
 	if (status)
 		return status;
 
-	if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE))
+	if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
+		status = -EPERM;
 		goto out_drop;
+	}
 
 	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
 		status = -EPERM;
@@ -1090,8 +1091,10 @@
 		goto out_free;
 	}
 
-	if (range.me_start > i_size_read(inode))
+	if (range.me_start > i_size_read(inode)) {
+		status = -EINVAL;
 		goto out_free;
+	}
 
 	if (range.me_start + range.me_len > i_size_read(inode))
 			range.me_len = i_size_read(inode) - range.me_start;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index be3f867..4f791f6 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -489,7 +489,7 @@
 
 	*new_fe_bh = sb_getblk(osb->sb, fe_blkno);
 	if (!*new_fe_bh) {
-		status = -EIO;
+		status = -ENOMEM;
 		mlog_errno(status);
 		goto leave;
 	}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index bf4dfc1..55767e1 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -612,6 +612,11 @@
 	}
 
 	new_bh = sb_getblk(inode->i_sb, first_blkno);
+	if (!new_bh) {
+		ret = -ENOMEM;
+		mlog_errno(ret);
+		goto out_commit;
+	}
 	ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
 
 	ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
@@ -1310,7 +1315,7 @@
 
 	new_bh = sb_getblk(sb, blkno);
 	if (new_bh == NULL) {
-		ret = -EIO;
+		ret = -ENOMEM;
 		mlog_errno(ret);
 		goto out;
 	}
@@ -1561,7 +1566,7 @@
 
 	new_bh = sb_getblk(sb, blkno);
 	if (new_bh == NULL) {
-		ret = -EIO;
+		ret = -ENOMEM;
 		mlog_errno(ret);
 		goto out;
 	}
@@ -2502,8 +2507,7 @@
 		ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
 		*meta_add += ocfs2_extend_meta_needed(et.et_root_el);
 		*credits += ocfs2_calc_extend_credits(sb,
-						      et.et_root_el,
-						      ref_blocks);
+						      et.et_root_el);
 	} else {
 		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
 		*meta_add += 1;
@@ -2874,8 +2878,7 @@
 		meta_add =
 			ocfs2_extend_meta_needed(et->et_root_el);
 
-	*credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
-					      num_clusters + 2);
+	*credits += ocfs2_calc_extend_credits(sb, et->et_root_el);
 
 	ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
 					       p_cluster, num_clusters,
@@ -3031,7 +3034,7 @@
 	for (i = 0; i < blocks; i++, old_block++, new_block++) {
 		new_bh = sb_getblk(osb->sb, new_block);
 		if (new_bh == NULL) {
-			ret = -EIO;
+			ret = -ENOMEM;
 			mlog_errno(ret);
 			break;
 		}
@@ -3625,8 +3628,7 @@
 
 		ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
 		*credits += ocfs2_calc_extend_credits(inode->i_sb,
-						      et.et_root_el,
-						      ref_blocks);
+						      et.et_root_el);
 	}
 
 out:
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index ec55add..822ebc1 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -469,6 +469,7 @@
 	struct ocfs2_chain_list *cl;
 	struct ocfs2_chain_rec *cr;
 	u16 cl_bpc;
+	u64 bg_ptr;
 
 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 		return -EROFS;
@@ -513,7 +514,7 @@
 	ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
 	if (ret) {
 		mlog_errno(ret);
-		goto out_unlock;
+		goto out_free_group_bh;
 	}
 
 	trace_ocfs2_group_add((unsigned long long)input->group,
@@ -523,7 +524,7 @@
 	if (IS_ERR(handle)) {
 		mlog_errno(PTR_ERR(handle));
 		ret = -EINVAL;
-		goto out_unlock;
+		goto out_free_group_bh;
 	}
 
 	cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
@@ -538,12 +539,14 @@
 	}
 
 	group = (struct ocfs2_group_desc *)group_bh->b_data;
+	bg_ptr = le64_to_cpu(group->bg_next_group);
 	group->bg_next_group = cr->c_blkno;
 	ocfs2_journal_dirty(handle, group_bh);
 
 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
 				      main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
 	if (ret < 0) {
+		group->bg_next_group = cpu_to_le64(bg_ptr);
 		mlog_errno(ret);
 		goto out_commit;
 	}
@@ -574,8 +577,11 @@
 
 out_commit:
 	ocfs2_commit_trans(osb, handle);
-out_unlock:
+
+out_free_group_bh:
 	brelse(group_bh);
+
+out_unlock:
 	brelse(main_bm_bh);
 
 	ocfs2_inode_unlock(main_bm_inode, 1);
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 39abf89..cb7ec0b 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -643,7 +643,7 @@
 
 #define FS_OCFS2_NM		1
 
-static ctl_table ocfs2_nm_table[] = {
+static struct ctl_table ocfs2_nm_table[] = {
 	{
 		.procname	= "hb_ctl_path",
 		.data		= ocfs2_hb_ctl_path,
@@ -654,7 +654,7 @@
 	{ }
 };
 
-static ctl_table ocfs2_mod_table[] = {
+static struct ctl_table ocfs2_mod_table[] = {
 	{
 		.procname	= "nm",
 		.data		= NULL,
@@ -665,7 +665,7 @@
 	{ }
 };
 
-static ctl_table ocfs2_kern_table[] = {
+static struct ctl_table ocfs2_kern_table[] = {
 	{
 		.procname	= "ocfs2",
 		.data		= NULL,
@@ -676,7 +676,7 @@
 	{ }
 };
 
-static ctl_table ocfs2_root_table[] = {
+static struct ctl_table ocfs2_root_table[] = {
 	{
 		.procname	= "fs",
 		.data		= NULL,
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 5397c07..2c91452 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -481,7 +481,7 @@
 
 	bg_bh = sb_getblk(osb->sb, bg_blkno);
 	if (!bg_bh) {
-		status = -EIO;
+		status = -ENOMEM;
 		mlog_errno(status);
 		goto bail;
 	}
@@ -661,7 +661,7 @@
 
 	bg_bh = sb_getblk(osb->sb, bg_blkno);
 	if (!bg_bh) {
-		status = -EIO;
+		status = -ENOMEM;
 		mlog_errno(status);
 		goto bail;
 	}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index d4e81e4..c414929 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1848,8 +1848,8 @@
 
 	*bh = sb_getblk(sb, block);
 	if (!*bh) {
-		mlog_errno(-EIO);
-		return -EIO;
+		mlog_errno(-ENOMEM);
+		return -ENOMEM;
 	}
 	lock_buffer(*bh);
 	if (!buffer_dirty(*bh))
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 6ce0686..f0a1326 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -377,7 +377,7 @@
 		bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
 					      xb_blkno + i);
 		if (!bucket->bu_bhs[i]) {
-			rc = -EIO;
+			rc = -ENOMEM;
 			mlog_errno(rc);
 			break;
 		}
@@ -754,8 +754,7 @@
 			BUG_ON(why == RESTART_META);
 
 			credits = ocfs2_calc_extend_credits(inode->i_sb,
-							    &vb->vb_xv->xr_list,
-							    clusters_to_add);
+							    &vb->vb_xv->xr_list);
 			status = ocfs2_extend_trans(handle, credits);
 			if (status < 0) {
 				status = -ENOMEM;
@@ -2865,6 +2864,12 @@
 	}
 
 	new_bh = sb_getblk(inode->i_sb, first_blkno);
+	if (!new_bh) {
+		ret = -ENOMEM;
+		mlog_errno(ret);
+		goto end;
+	}
+
 	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
 
 	ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
@@ -3040,8 +3045,7 @@
 		if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
 			clusters_add += new_clusters;
 			credits += ocfs2_calc_extend_credits(inode->i_sb,
-							&def_xv.xv.xr_list,
-							new_clusters);
+							&def_xv.xv.xr_list);
 		}
 
 		goto meta_guess;
@@ -3106,8 +3110,7 @@
 			if (!ocfs2_xattr_is_local(xe))
 				credits += ocfs2_calc_extend_credits(
 							inode->i_sb,
-							&def_xv.xv.xr_list,
-							new_clusters);
+							&def_xv.xv.xr_list);
 			goto out;
 		}
 	}
@@ -3132,9 +3135,7 @@
 			meta_add += ocfs2_extend_meta_needed(&xv->xr_list);
 			clusters_add += new_clusters - old_clusters;
 			credits += ocfs2_calc_extend_credits(inode->i_sb,
-							     &xv->xr_list,
-							     new_clusters -
-							     old_clusters);
+							     &xv->xr_list);
 			if (value_size >= OCFS2_XATTR_ROOT_SIZE)
 				goto out;
 		}
@@ -3180,7 +3181,7 @@
 				 &xb->xb_attrs.xb_root.xt_list;
 			meta_add += ocfs2_extend_meta_needed(el);
 			credits += ocfs2_calc_extend_credits(inode->i_sb,
-							     el, 1);
+							     el);
 		} else
 			credits += OCFS2_SUBALLOC_ALLOC + 1;
 
@@ -6216,8 +6217,7 @@
 			  le16_to_cpu(xv->xr_list.l_next_free_rec);
 
 		*credits += ocfs2_calc_extend_credits(sb,
-						&def_xv.xv.xr_list,
-						le32_to_cpu(xv->xr_clusters));
+						&def_xv.xv.xr_list);
 
 		/*
 		 * If the value is a tree with depth > 1, We don't go deep
@@ -6782,7 +6782,7 @@
 		metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
 
 	*credits += ocfs2_calc_extend_credits(osb->sb,
-					      xt_et->et_root_el, len);
+					      xt_et->et_root_el);
 
 	if (metas.num_metas) {
 		ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
diff --git a/fs/open.c b/fs/open.c
index d420331..4b3e1ed 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -57,7 +57,8 @@
 		newattrs.ia_valid |= ret | ATTR_FORCE;
 
 	mutex_lock(&dentry->d_inode->i_mutex);
-	ret = notify_change(dentry, &newattrs);
+	/* Note any delegations or leases have already been broken: */
+	ret = notify_change(dentry, &newattrs, NULL);
 	mutex_unlock(&dentry->d_inode->i_mutex);
 	return ret;
 }
@@ -464,21 +465,28 @@
 static int chmod_common(struct path *path, umode_t mode)
 {
 	struct inode *inode = path->dentry->d_inode;
+	struct inode *delegated_inode = NULL;
 	struct iattr newattrs;
 	int error;
 
 	error = mnt_want_write(path->mnt);
 	if (error)
 		return error;
+retry_deleg:
 	mutex_lock(&inode->i_mutex);
 	error = security_path_chmod(path, mode);
 	if (error)
 		goto out_unlock;
 	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
 	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
-	error = notify_change(path->dentry, &newattrs);
+	error = notify_change(path->dentry, &newattrs, &delegated_inode);
 out_unlock:
 	mutex_unlock(&inode->i_mutex);
+	if (delegated_inode) {
+		error = break_deleg_wait(&delegated_inode);
+		if (!error)
+			goto retry_deleg;
+	}
 	mnt_drop_write(path->mnt);
 	return error;
 }
@@ -522,6 +530,7 @@
 static int chown_common(struct path *path, uid_t user, gid_t group)
 {
 	struct inode *inode = path->dentry->d_inode;
+	struct inode *delegated_inode = NULL;
 	int error;
 	struct iattr newattrs;
 	kuid_t uid;
@@ -546,12 +555,17 @@
 	if (!S_ISDIR(inode->i_mode))
 		newattrs.ia_valid |=
 			ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+retry_deleg:
 	mutex_lock(&inode->i_mutex);
 	error = security_path_chown(path, uid, gid);
 	if (!error)
-		error = notify_change(path->dentry, &newattrs);
+		error = notify_change(path->dentry, &newattrs, &delegated_inode);
 	mutex_unlock(&inode->i_mutex);
-
+	if (delegated_inode) {
+		error = break_deleg_wait(&delegated_inode);
+		if (!error)
+			goto retry_deleg;
+	}
 	return error;
 }
 
@@ -685,7 +699,6 @@
 	}
 
 	f->f_mapping = inode->i_mapping;
-	file_sb_list_add(f, inode->i_sb);
 
 	if (unlikely(f->f_mode & FMODE_PATH)) {
 		f->f_op = &empty_fops;
@@ -693,6 +706,10 @@
 	}
 
 	f->f_op = fops_get(inode->i_fop);
+	if (unlikely(WARN_ON(!f->f_op))) {
+		error = -ENODEV;
+		goto cleanup_all;
+	}
 
 	error = security_file_open(f, cred);
 	if (error)
@@ -702,7 +719,7 @@
 	if (error)
 		goto cleanup_all;
 
-	if (!open && f->f_op)
+	if (!open)
 		open = f->f_op->open;
 	if (open) {
 		error = open(inode, f);
@@ -720,7 +737,6 @@
 
 cleanup_all:
 	fops_put(f->f_op);
-	file_sb_list_del(f);
 	if (f->f_mode & FMODE_WRITE) {
 		put_write_access(inode);
 		if (!special_file(inode->i_mode)) {
@@ -1023,7 +1039,7 @@
 		return 0;
 	}
 
-	if (filp->f_op && filp->f_op->flush)
+	if (filp->f_op->flush)
 		retval = filp->f_op->flush(filp, id);
 
 	if (likely(!(filp->f_mode & FMODE_PATH))) {
diff --git a/fs/pnode.c b/fs/pnode.c
index 9af0df1..c7221bb 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -264,12 +264,12 @@
 		prev_src_mnt  = child;
 	}
 out:
-	br_write_lock(&vfsmount_lock);
+	lock_mount_hash();
 	while (!list_empty(&tmp_list)) {
 		child = list_first_entry(&tmp_list, struct mount, mnt_hash);
 		umount_tree(child, 0);
 	}
-	br_write_unlock(&vfsmount_lock);
+	unlock_mount_hash();
 	return ret;
 }
 
@@ -278,8 +278,7 @@
  */
 static inline int do_refcount_check(struct mount *mnt, int count)
 {
-	int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
-	return (mycount > count);
+	return mnt_get_count(mnt) > count;
 }
 
 /*
@@ -311,7 +310,7 @@
 
 	for (m = propagation_next(parent, parent); m;
 	     		m = propagation_next(m, parent)) {
-		child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint, 0);
+		child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
 		if (child && list_empty(&child->mnt_mounts) &&
 		    (ret = do_refcount_check(child, 1)))
 			break;
@@ -333,8 +332,8 @@
 	for (m = propagation_next(parent, parent); m;
 			m = propagation_next(m, parent)) {
 
-		struct mount *child = __lookup_mnt(&m->mnt,
-					mnt->mnt_mountpoint, 0);
+		struct mount *child = __lookup_mnt_last(&m->mnt,
+						mnt->mnt_mountpoint);
 		/*
 		 * umount the child only if the child has no
 		 * other children
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 15af622..2183fcf 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -31,6 +31,10 @@
 config PROC_KCORE
 	bool "/proc/kcore support" if !ARM
 	depends on PROC_FS && MMU
+	help
+	  Provides a virtual ELF core file of the live kernel.  This can
+	  be read with gdb and other ELF tools.  No modifications can be
+	  made using this mechanism.
 
 config PROC_VMCORE
 	bool "/proc/vmcore support"
diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c
index b701eaa..51942d5 100644
--- a/fs/proc/consoles.c
+++ b/fs/proc/consoles.c
@@ -29,7 +29,6 @@
 	char flags[ARRAY_SIZE(con_flags) + 1];
 	struct console *con = v;
 	unsigned int a;
-	int len;
 	dev_t dev = 0;
 
 	if (con->device) {
@@ -47,11 +46,10 @@
 			con_flags[a].name : ' ';
 	flags[a] = 0;
 
-	seq_printf(m, "%s%d%n", con->name, con->index, &len);
-	len = 21 - len;
-	if (len < 1)
-		len = 1;
-	seq_printf(m, "%*c%c%c%c (%s)", len, ' ', con->read ? 'R' : '-',
+	seq_setwidth(m, 21 - 1);
+	seq_printf(m, "%s%d", con->name, con->index);
+	seq_pad(m, ' ');
+	seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-',
 			con->write ? 'W' : '-', con->unblank ? 'U' : '-',
 			flags);
 	if (dev)
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 8eaa1ba..28955d4 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -285,19 +285,23 @@
 	return rv;
 }
 
-static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+static unsigned long
+proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
+			   unsigned long len, unsigned long pgoff,
+			   unsigned long flags)
 {
 	struct proc_dir_entry *pde = PDE(file_inode(file));
 	unsigned long rv = -EIO;
-	unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) = NULL;
+	unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
+				  unsigned long, unsigned long) = NULL;
 	if (use_pde(pde)) {
 #ifdef CONFIG_MMU
-		get_unmapped_area = current->mm->get_unmapped_area;
+		get_area = current->mm->get_unmapped_area;
 #endif
 		if (pde->proc_fops->get_unmapped_area)
-			get_unmapped_area = pde->proc_fops->get_unmapped_area;
-		if (get_unmapped_area)
-			rv = get_unmapped_area(file, orig_addr, len, pgoff, flags);
+			get_area = pde->proc_fops->get_unmapped_area;
+		if (get_area)
+			rv = get_area(file, orig_addr, len, pgoff, flags);
 		unuse_pde(pde);
 	}
 	return rv;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 06ea155..5ed0e52 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -255,8 +255,7 @@
 	end_pfn = 0;
 	for_each_node_state(nid, N_MEMORY) {
 		unsigned long node_end;
-		node_end  = NODE_DATA(nid)->node_start_pfn +
-			NODE_DATA(nid)->node_spanned_pages;
+		node_end = node_end_pfn(nid);
 		if (end_pfn < node_end)
 			end_pfn = node_end;
 	}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 59d85d6..a77d2b2 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -1,8 +1,8 @@
 #include <linux/fs.h>
-#include <linux/hugetlb.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/hugetlb.h>
 #include <linux/mman.h>
 #include <linux/mmzone.h>
 #include <linux/proc_fs.h>
@@ -24,7 +24,6 @@
 {
 	struct sysinfo i;
 	unsigned long committed;
-	unsigned long allowed;
 	struct vmalloc_info vmi;
 	long cached;
 	unsigned long pages[NR_LRU_LISTS];
@@ -37,8 +36,6 @@
 	si_meminfo(&i);
 	si_swapinfo(&i);
 	committed = percpu_counter_read_positive(&vm_committed_as);
-	allowed = ((totalram_pages - hugetlb_total_pages())
-		* sysctl_overcommit_ratio / 100) + total_swap_pages;
 
 	cached = global_page_state(NR_FILE_PAGES) -
 			total_swapcache_pages() - i.bufferram;
@@ -147,7 +144,7 @@
 		K(global_page_state(NR_UNSTABLE_NFS)),
 		K(global_page_state(NR_BOUNCE)),
 		K(global_page_state(NR_WRITEBACK_TEMP)),
-		K(allowed),
+		K(vm_commit_limit()),
 		K(committed),
 		(unsigned long)VMALLOC_TOTAL >> 10,
 		vmi.used >> 10,
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index ccfd99b..5f9bc8a 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -39,7 +39,7 @@
 	unsigned long ino = 0;
 	struct file *file;
 	dev_t dev = 0;
-	int flags, len;
+	int flags;
 
 	flags = region->vm_flags;
 	file = region->vm_file;
@@ -50,8 +50,9 @@
 		ino = inode->i_ino;
 	}
 
+	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 	seq_printf(m,
-		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
 		   region->vm_start,
 		   region->vm_end,
 		   flags & VM_READ ? 'r' : '-',
@@ -59,13 +60,10 @@
 		   flags & VM_EXEC ? 'x' : '-',
 		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
 		   ((loff_t)region->vm_pgoff) << PAGE_SHIFT,
-		   MAJOR(dev), MINOR(dev), ino, &len);
+		   MAJOR(dev), MINOR(dev), ino);
 
 	if (file) {
-		len = 25 + sizeof(void *) * 6 - len;
-		if (len < 1)
-			len = 1;
-		seq_printf(m, "%*c", len, ' ');
+		seq_pad(m, ' ');
 		seq_path(m, &file->f_path, "");
 	}
 
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 6b6a993..ffeb202 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -36,18 +36,10 @@
 	return NULL;
 }
 
-static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
-				void *cookie)
-{
-	char *s = nd_get_link(nd);
-	if (!IS_ERR(s))
-		kfree(s);
-}
-
 static const struct inode_operations proc_self_inode_operations = {
 	.readlink	= proc_self_readlink,
 	.follow_link	= proc_self_follow_link,
-	.put_link	= proc_self_put_link,
+	.put_link	= kfree_put_link,
 };
 
 static unsigned self_inum;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 390bdab..fb52b54 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -62,7 +62,8 @@
 		total_rss << (PAGE_SHIFT-10),
 		data << (PAGE_SHIFT-10),
 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
-		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
+		(PTRS_PER_PTE * sizeof(pte_t) *
+		 atomic_long_read(&mm->nr_ptes)) >> 10,
 		swap << (PAGE_SHIFT-10));
 }
 
@@ -83,14 +84,6 @@
 	return mm->total_vm;
 }
 
-static void pad_len_spaces(struct seq_file *m, int len)
-{
-	len = 25 + sizeof(void*) * 6 - len;
-	if (len < 1)
-		len = 1;
-	seq_printf(m, "%*c", len, ' ');
-}
-
 #ifdef CONFIG_NUMA
 /*
  * These functions are for numa_maps but called in generic **maps seq_file
@@ -268,7 +261,6 @@
 	unsigned long long pgoff = 0;
 	unsigned long start, end;
 	dev_t dev = 0;
-	int len;
 	const char *name = NULL;
 
 	if (file) {
@@ -286,7 +278,8 @@
 	if (stack_guard_page_end(vma, end))
 		end -= PAGE_SIZE;
 
-	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
+	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
 			start,
 			end,
 			flags & VM_READ ? 'r' : '-',
@@ -294,14 +287,14 @@
 			flags & VM_EXEC ? 'x' : '-',
 			flags & VM_MAYSHARE ? 's' : 'p',
 			pgoff,
-			MAJOR(dev), MINOR(dev), ino, &len);
+			MAJOR(dev), MINOR(dev), ino);
 
 	/*
 	 * Print the dentry name for named mappings, and a
 	 * special [heap] marker for the heap:
 	 */
 	if (file) {
-		pad_len_spaces(m, len);
+		seq_pad(m, ' ');
 		seq_path(m, &file->f_path, "\n");
 		goto done;
 	}
@@ -333,7 +326,7 @@
 				name = "[stack]";
 			} else {
 				/* Thread stack in /proc/PID/maps */
-				pad_len_spaces(m, len);
+				seq_pad(m, ' ');
 				seq_printf(m, "[stack:%d]", tid);
 			}
 		}
@@ -341,7 +334,7 @@
 
 done:
 	if (name) {
-		pad_len_spaces(m, len);
+		seq_pad(m, ' ');
 		seq_puts(m, name);
 	}
 	seq_putc(m, '\n');
@@ -505,9 +498,9 @@
 	pte_t *pte;
 	spinlock_t *ptl;
 
-	if (pmd_trans_huge_lock(pmd, vma) == 1) {
+	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
-		spin_unlock(&walk->mm->page_table_lock);
+		spin_unlock(ptl);
 		mss->anonymous_thp += HPAGE_PMD_SIZE;
 		return 0;
 	}
@@ -561,6 +554,9 @@
 		[ilog2(VM_NONLINEAR)]	= "nl",
 		[ilog2(VM_ARCH_1)]	= "ar",
 		[ilog2(VM_DONTDUMP)]	= "dd",
+#ifdef CONFIG_MEM_SOFT_DIRTY
+		[ilog2(VM_SOFTDIRTY)]	= "sd",
+#endif
 		[ilog2(VM_MIXEDMAP)]	= "mm",
 		[ilog2(VM_HUGEPAGE)]	= "hg",
 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
@@ -995,13 +991,14 @@
 {
 	struct vm_area_struct *vma;
 	struct pagemapread *pm = walk->private;
+	spinlock_t *ptl;
 	pte_t *pte;
 	int err = 0;
 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
 
 	/* find the first VMA at or above 'addr' */
 	vma = find_vma(walk->mm, addr);
-	if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
+	if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 		int pmd_flags2;
 
 		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1019,7 +1016,7 @@
 			if (err)
 				break;
 		}
-		spin_unlock(&walk->mm->page_table_lock);
+		spin_unlock(ptl);
 		return err;
 	}
 
@@ -1321,7 +1318,7 @@
 
 	md = walk->private;
 
-	if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
+	if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
 		pte_t huge_pte = *(pte_t *)pmd;
 		struct page *page;
 
@@ -1329,7 +1326,7 @@
 		if (page)
 			gather_stats(page, md, pte_dirty(huge_pte),
 				     HPAGE_PMD_SIZE/PAGE_SIZE);
-		spin_unlock(&walk->mm->page_table_lock);
+		spin_unlock(ptl);
 		return 0;
 	}
 
@@ -1387,8 +1384,8 @@
 	struct mm_struct *mm = vma->vm_mm;
 	struct mm_walk walk = {};
 	struct mempolicy *pol;
-	int n;
-	char buffer[50];
+	char buffer[64];
+	int nid;
 
 	if (!mm)
 		return 0;
@@ -1404,10 +1401,8 @@
 	walk.mm = mm;
 
 	pol = get_vma_policy(task, vma, vma->vm_start);
-	n = mpol_to_str(buffer, sizeof(buffer), pol);
+	mpol_to_str(buffer, sizeof(buffer), pol);
 	mpol_cond_put(pol);
-	if (n < 0)
-		return n;
 
 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
 
@@ -1460,9 +1455,9 @@
 	if (md->writeback)
 		seq_printf(m, " writeback=%lu", md->writeback);
 
-	for_each_node_state(n, N_MEMORY)
-		if (md->node[n])
-			seq_printf(m, " N%d=%lu", n, md->node[n]);
+	for_each_node_state(nid, N_MEMORY)
+		if (md->node[nid])
+			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
 out:
 	seq_putc(m, '\n');
 
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 56123a6..678455d 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -123,14 +123,6 @@
 	return size;
 }
 
-static void pad_len_spaces(struct seq_file *m, int len)
-{
-	len = 25 + sizeof(void*) * 6 - len;
-	if (len < 1)
-		len = 1;
-	seq_printf(m, "%*c", len, ' ');
-}
-
 /*
  * display a single VMA to a sequenced file
  */
@@ -142,7 +134,7 @@
 	unsigned long ino = 0;
 	struct file *file;
 	dev_t dev = 0;
-	int flags, len;
+	int flags;
 	unsigned long long pgoff = 0;
 
 	flags = vma->vm_flags;
@@ -155,8 +147,9 @@
 		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
 	}
 
+	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 	seq_printf(m,
-		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
 		   vma->vm_start,
 		   vma->vm_end,
 		   flags & VM_READ ? 'r' : '-',
@@ -164,16 +157,16 @@
 		   flags & VM_EXEC ? 'x' : '-',
 		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
 		   pgoff,
-		   MAJOR(dev), MINOR(dev), ino, &len);
+		   MAJOR(dev), MINOR(dev), ino);
 
 	if (file) {
-		pad_len_spaces(m, len);
+		seq_pad(m, ' ');
 		seq_path(m, &file->f_path, "");
 	} else if (mm) {
 		pid_t tid = vm_is_stack(priv->task, vma, is_pid);
 
 		if (tid != 0) {
-			pad_len_spaces(m, len);
+			seq_pad(m, ' ');
 			/*
 			 * Thread stack in /proc/PID/task/TID/maps or
 			 * the main process stack.
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 5fe34c3..439406e 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -20,15 +20,15 @@
 	struct proc_mounts *p = proc_mounts(file->private_data);
 	struct mnt_namespace *ns = p->ns;
 	unsigned res = POLLIN | POLLRDNORM;
+	int event;
 
 	poll_wait(file, &p->ns->poll, wait);
 
-	br_read_lock(&vfsmount_lock);
-	if (p->m.poll_event != ns->event) {
-		p->m.poll_event = ns->event;
+	event = ACCESS_ONCE(ns->event);
+	if (p->m.poll_event != event) {
+		p->m.poll_event = event;
 		res |= POLLERR | POLLPRI;
 	}
-	br_read_unlock(&vfsmount_lock);
 
 	return res;
 }
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
index d024505..e62c818 100644
--- a/fs/qnx4/namei.c
+++ b/fs/qnx4/namei.c
@@ -60,10 +60,6 @@
 	struct buffer_head *bh;
 
 	*res_dir = NULL;
-	if (!dir->i_sb) {
-		printk(KERN_WARNING "qnx4: no superblock on dir.\n");
-		return NULL;
-	}
 	bh = NULL;
 	block = offset = blkofs = 0;
 	while (blkofs * QNX4_BLOCK_SIZE + offset < dir->i_size) {
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index dea86e8..2b363e2 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -117,6 +117,7 @@
 
 static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
 {
+	memset(dst, 0, sizeof(*dst));
 	dst->dqb_bhardlimit = src->d_blk_hardlimit;
 	dst->dqb_bsoftlimit = src->d_blk_softlimit;
 	dst->dqb_curspace = src->d_bcount;
diff --git a/fs/read_write.c b/fs/read_write.c
index e3cd280..58e440d 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -257,7 +257,7 @@
 
 	fn = no_llseek;
 	if (file->f_mode & FMODE_LSEEK) {
-		if (file->f_op && file->f_op->llseek)
+		if (file->f_op->llseek)
 			fn = file->f_op->llseek;
 	}
 	return fn(file, offset, whence);
@@ -384,7 +384,7 @@
 
 	if (!(file->f_mode & FMODE_READ))
 		return -EBADF;
-	if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
+	if (!file->f_op->read && !file->f_op->aio_read)
 		return -EINVAL;
 	if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
 		return -EFAULT;
@@ -433,7 +433,7 @@
 	const char __user *p;
 	ssize_t ret;
 
-	if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+	if (!file->f_op->write && !file->f_op->aio_write)
 		return -EINVAL;
 
 	old_fs = get_fs();
@@ -460,7 +460,7 @@
 
 	if (!(file->f_mode & FMODE_WRITE))
 		return -EBADF;
-	if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+	if (!file->f_op->write && !file->f_op->aio_write)
 		return -EINVAL;
 	if (unlikely(!access_ok(VERIFY_READ, buf, count)))
 		return -EFAULT;
@@ -727,11 +727,6 @@
 	io_fn_t fn;
 	iov_fn_t fnv;
 
-	if (!file->f_op) {
-		ret = -EINVAL;
-		goto out;
-	}
-
 	ret = rw_copy_check_uvector(type, uvector, nr_segs,
 				    ARRAY_SIZE(iovstack), iovstack, &iov);
 	if (ret <= 0)
@@ -778,7 +773,7 @@
 {
 	if (!(file->f_mode & FMODE_READ))
 		return -EBADF;
-	if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+	if (!file->f_op->aio_read && !file->f_op->read)
 		return -EINVAL;
 
 	return do_readv_writev(READ, file, vec, vlen, pos);
@@ -791,7 +786,7 @@
 {
 	if (!(file->f_mode & FMODE_WRITE))
 		return -EBADF;
-	if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+	if (!file->f_op->aio_write && !file->f_op->write)
 		return -EINVAL;
 
 	return do_readv_writev(WRITE, file, vec, vlen, pos);
@@ -906,10 +901,6 @@
 	io_fn_t fn;
 	iov_fn_t fnv;
 
-	ret = -EINVAL;
-	if (!file->f_op)
-		goto out;
-
 	ret = -EFAULT;
 	if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
 		goto out;
@@ -965,7 +956,7 @@
 		goto out;
 
 	ret = -EINVAL;
-	if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+	if (!file->f_op->aio_read && !file->f_op->read)
 		goto out;
 
 	ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
@@ -1032,7 +1023,7 @@
 		goto out;
 
 	ret = -EINVAL;
-	if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+	if (!file->f_op->aio_write && !file->f_op->write)
 		goto out;
 
 	ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
diff --git a/fs/readdir.c b/fs/readdir.c
index 93d71e5..5b53d99 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -24,7 +24,7 @@
 {
 	struct inode *inode = file_inode(file);
 	int res = -ENOTDIR;
-	if (!file->f_op || !file->f_op->iterate)
+	if (!file->f_op->iterate)
 		goto out;
 
 	res = security_file_permission(file, MAY_READ);
diff --git a/fs/select.c b/fs/select.c
index dfd5cb1..467bb1c 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -454,7 +454,7 @@
 					const struct file_operations *f_op;
 					f_op = f.file->f_op;
 					mask = DEFAULT_POLLMASK;
-					if (f_op && f_op->poll) {
+					if (f_op->poll) {
 						wait_key_set(wait, in, out,
 							     bit, busy_flag);
 						mask = (*f_op->poll)(f.file, wait);
@@ -761,7 +761,7 @@
 		mask = POLLNVAL;
 		if (f.file) {
 			mask = DEFAULT_POLLMASK;
-			if (f.file->f_op && f.file->f_op->poll) {
+			if (f.file->f_op->poll) {
 				pwait->_key = pollfd->events|POLLERR|POLLHUP;
 				pwait->_key |= busy_flag;
 				mask = f.file->f_op->poll(f.file, pwait);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index a290157..1cd2388 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -766,6 +766,21 @@
 }
 EXPORT_SYMBOL(seq_write);
 
+/**
+ * seq_pad - write padding spaces to buffer
+ * @m: seq_file identifying the buffer to which data should be written
+ * @c: the byte to append after padding if non-zero
+ */
+void seq_pad(struct seq_file *m, char c)
+{
+	int size = m->pad_until - m->count;
+	if (size > 0)
+		seq_printf(m, "%*s", size, "");
+	if (c)
+		seq_putc(m, c);
+}
+EXPORT_SYMBOL(seq_pad);
+
 struct list_head *seq_list_start(struct list_head *head, loff_t pos)
 {
 	struct list_head *lh;
diff --git a/fs/splice.c b/fs/splice.c
index 3b7ee65..46a08f7 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -695,7 +695,7 @@
 	loff_t pos = sd->pos;
 	int more;
 
-	if (!likely(file->f_op && file->f_op->sendpage))
+	if (!likely(file->f_op->sendpage))
 		return -EINVAL;
 
 	more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
@@ -1099,7 +1099,7 @@
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
 				loff_t *, size_t, unsigned int);
 
-	if (out->f_op && out->f_op->splice_write)
+	if (out->f_op->splice_write)
 		splice_write = out->f_op->splice_write;
 	else
 		splice_write = default_file_splice_write;
@@ -1125,7 +1125,7 @@
 	if (unlikely(ret < 0))
 		return ret;
 
-	if (in->f_op && in->f_op->splice_read)
+	if (in->f_op->splice_read)
 		splice_read = in->f_op->splice_read;
 	else
 		splice_read = default_file_splice_read;
diff --git a/fs/stat.c b/fs/stat.c
index d0ea7ef..ae0c3ce 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -37,14 +37,21 @@
 
 EXPORT_SYMBOL(generic_fillattr);
 
-int vfs_getattr(struct path *path, struct kstat *stat)
+/**
+ * vfs_getattr_nosec - getattr without security checks
+ * @path: file to get attributes from
+ * @stat: structure to return attributes in
+ *
+ * Get attributes without calling security_inode_getattr.
+ *
+ * Currently the only caller other than vfs_getattr is internal to the
+ * filehandle lookup code, which uses only the inode number and returns
+ * no attributes to any user.  Any other code probably wants
+ * vfs_getattr.
+ */
+int vfs_getattr_nosec(struct path *path, struct kstat *stat)
 {
 	struct inode *inode = path->dentry->d_inode;
-	int retval;
-
-	retval = security_inode_getattr(path->mnt, path->dentry);
-	if (retval)
-		return retval;
 
 	if (inode->i_op->getattr)
 		return inode->i_op->getattr(path->mnt, path->dentry, stat);
@@ -53,6 +60,18 @@
 	return 0;
 }
 
+EXPORT_SYMBOL(vfs_getattr_nosec);
+
+int vfs_getattr(struct path *path, struct kstat *stat)
+{
+	int retval;
+
+	retval = security_inode_getattr(path->mnt, path->dentry);
+	if (retval)
+		return retval;
+	return vfs_getattr_nosec(path, stat);
+}
+
 EXPORT_SYMBOL(vfs_getattr);
 
 int vfs_fstat(unsigned int fd, struct kstat *stat)
diff --git a/fs/super.c b/fs/super.c
index 0225c20..e5f6c2c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -129,33 +129,24 @@
 	return total_objects;
 }
 
-static int init_sb_writers(struct super_block *s, struct file_system_type *type)
-{
-	int err;
-	int i;
-
-	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
-		err = percpu_counter_init(&s->s_writers.counter[i], 0);
-		if (err < 0)
-			goto err_out;
-		lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
-				 &type->s_writers_key[i], 0);
-	}
-	init_waitqueue_head(&s->s_writers.wait);
-	init_waitqueue_head(&s->s_writers.wait_unfrozen);
-	return 0;
-err_out:
-	while (--i >= 0)
-		percpu_counter_destroy(&s->s_writers.counter[i]);
-	return err;
-}
-
-static void destroy_sb_writers(struct super_block *s)
+/**
+ *	destroy_super	-	frees a superblock
+ *	@s: superblock to free
+ *
+ *	Frees a superblock.
+ */
+static void destroy_super(struct super_block *s)
 {
 	int i;
-
+	list_lru_destroy(&s->s_dentry_lru);
+	list_lru_destroy(&s->s_inode_lru);
 	for (i = 0; i < SB_FREEZE_LEVELS; i++)
 		percpu_counter_destroy(&s->s_writers.counter[i]);
+	security_sb_free(s);
+	WARN_ON(!list_empty(&s->s_mounts));
+	kfree(s->s_subtype);
+	kfree(s->s_options);
+	kfree_rcu(s, rcu);
 }
 
 /**
@@ -170,111 +161,74 @@
 {
 	struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
 	static const struct super_operations default_op;
+	int i;
 
-	if (s) {
-		if (security_sb_alloc(s))
-			goto out_free_sb;
+	if (!s)
+		return NULL;
 
-#ifdef CONFIG_SMP
-		s->s_files = alloc_percpu(struct list_head);
-		if (!s->s_files)
-			goto err_out;
-		else {
-			int i;
+	if (security_sb_alloc(s))
+		goto fail;
 
-			for_each_possible_cpu(i)
-				INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
-		}
-#else
-		INIT_LIST_HEAD(&s->s_files);
-#endif
-		if (init_sb_writers(s, type))
-			goto err_out;
-		s->s_flags = flags;
-		s->s_bdi = &default_backing_dev_info;
-		INIT_HLIST_NODE(&s->s_instances);
-		INIT_HLIST_BL_HEAD(&s->s_anon);
-		INIT_LIST_HEAD(&s->s_inodes);
-
-		if (list_lru_init(&s->s_dentry_lru))
-			goto err_out;
-		if (list_lru_init(&s->s_inode_lru))
-			goto err_out_dentry_lru;
-
-		INIT_LIST_HEAD(&s->s_mounts);
-		init_rwsem(&s->s_umount);
-		lockdep_set_class(&s->s_umount, &type->s_umount_key);
-		/*
-		 * sget() can have s_umount recursion.
-		 *
-		 * When it cannot find a suitable sb, it allocates a new
-		 * one (this one), and tries again to find a suitable old
-		 * one.
-		 *
-		 * In case that succeeds, it will acquire the s_umount
-		 * lock of the old one. Since these are clearly distrinct
-		 * locks, and this object isn't exposed yet, there's no
-		 * risk of deadlocks.
-		 *
-		 * Annotate this by putting this lock in a different
-		 * subclass.
-		 */
-		down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
-		s->s_count = 1;
-		atomic_set(&s->s_active, 1);
-		mutex_init(&s->s_vfs_rename_mutex);
-		lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
-		mutex_init(&s->s_dquot.dqio_mutex);
-		mutex_init(&s->s_dquot.dqonoff_mutex);
-		init_rwsem(&s->s_dquot.dqptr_sem);
-		s->s_maxbytes = MAX_NON_LFS;
-		s->s_op = &default_op;
-		s->s_time_gran = 1000000000;
-		s->cleancache_poolid = -1;
-
-		s->s_shrink.seeks = DEFAULT_SEEKS;
-		s->s_shrink.scan_objects = super_cache_scan;
-		s->s_shrink.count_objects = super_cache_count;
-		s->s_shrink.batch = 1024;
-		s->s_shrink.flags = SHRINKER_NUMA_AWARE;
+	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
+		if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0)
+			goto fail;
+		lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
+				 &type->s_writers_key[i], 0);
 	}
-out:
+	init_waitqueue_head(&s->s_writers.wait);
+	init_waitqueue_head(&s->s_writers.wait_unfrozen);
+	s->s_flags = flags;
+	s->s_bdi = &default_backing_dev_info;
+	INIT_HLIST_NODE(&s->s_instances);
+	INIT_HLIST_BL_HEAD(&s->s_anon);
+	INIT_LIST_HEAD(&s->s_inodes);
+
+	if (list_lru_init(&s->s_dentry_lru))
+		goto fail;
+	if (list_lru_init(&s->s_inode_lru))
+		goto fail;
+
+	INIT_LIST_HEAD(&s->s_mounts);
+	init_rwsem(&s->s_umount);
+	lockdep_set_class(&s->s_umount, &type->s_umount_key);
+	/*
+	 * sget() can have s_umount recursion.
+	 *
+	 * When it cannot find a suitable sb, it allocates a new
+	 * one (this one), and tries again to find a suitable old
+	 * one.
+	 *
+	 * In case that succeeds, it will acquire the s_umount
+	 * lock of the old one. Since these are clearly distrinct
+	 * locks, and this object isn't exposed yet, there's no
+	 * risk of deadlocks.
+	 *
+	 * Annotate this by putting this lock in a different
+	 * subclass.
+	 */
+	down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
+	s->s_count = 1;
+	atomic_set(&s->s_active, 1);
+	mutex_init(&s->s_vfs_rename_mutex);
+	lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
+	mutex_init(&s->s_dquot.dqio_mutex);
+	mutex_init(&s->s_dquot.dqonoff_mutex);
+	init_rwsem(&s->s_dquot.dqptr_sem);
+	s->s_maxbytes = MAX_NON_LFS;
+	s->s_op = &default_op;
+	s->s_time_gran = 1000000000;
+	s->cleancache_poolid = -1;
+
+	s->s_shrink.seeks = DEFAULT_SEEKS;
+	s->s_shrink.scan_objects = super_cache_scan;
+	s->s_shrink.count_objects = super_cache_count;
+	s->s_shrink.batch = 1024;
+	s->s_shrink.flags = SHRINKER_NUMA_AWARE;
 	return s;
 
-err_out_dentry_lru:
-	list_lru_destroy(&s->s_dentry_lru);
-err_out:
-	security_sb_free(s);
-#ifdef CONFIG_SMP
-	if (s->s_files)
-		free_percpu(s->s_files);
-#endif
-	destroy_sb_writers(s);
-out_free_sb:
-	kfree(s);
-	s = NULL;
-	goto out;
-}
-
-/**
- *	destroy_super	-	frees a superblock
- *	@s: superblock to free
- *
- *	Frees a superblock.
- */
-static inline void destroy_super(struct super_block *s)
-{
-	list_lru_destroy(&s->s_dentry_lru);
-	list_lru_destroy(&s->s_inode_lru);
-#ifdef CONFIG_SMP
-	free_percpu(s->s_files);
-#endif
-	destroy_sb_writers(s);
-	security_sb_free(s);
-	WARN_ON(!list_empty(&s->s_mounts));
-	kfree(s->s_subtype);
-	kfree(s->s_options);
-	kfree(s);
+fail:
+	destroy_super(s);
+	return NULL;
 }
 
 /* Superblock refcounting  */
@@ -756,7 +710,8 @@
 	   make sure there are no rw files opened */
 	if (remount_ro) {
 		if (force) {
-			mark_files_ro(sb);
+			sb->s_readonly_remount = 1;
+			smp_wmb();
 		} else {
 			retval = sb_prepare_remount_readonly(sb);
 			if (retval)
diff --git a/fs/sync.c b/fs/sync.c
index 905f3f6..f155374 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -27,10 +27,11 @@
  * wait == 1 case since in that case write_inode() functions do
  * sync_dirty_buffer() and thus effectively write one block at a time.
  */
-static int __sync_filesystem(struct super_block *sb, int wait)
+static int __sync_filesystem(struct super_block *sb, int wait,
+			     unsigned long start)
 {
 	if (wait)
-		sync_inodes_sb(sb);
+		sync_inodes_sb(sb, start);
 	else
 		writeback_inodes_sb(sb, WB_REASON_SYNC);
 
@@ -47,6 +48,7 @@
 int sync_filesystem(struct super_block *sb)
 {
 	int ret;
+	unsigned long start = jiffies;
 
 	/*
 	 * We need to be protected against the filesystem going from
@@ -60,17 +62,17 @@
 	if (sb->s_flags & MS_RDONLY)
 		return 0;
 
-	ret = __sync_filesystem(sb, 0);
+	ret = __sync_filesystem(sb, 0, start);
 	if (ret < 0)
 		return ret;
-	return __sync_filesystem(sb, 1);
+	return __sync_filesystem(sb, 1, start);
 }
 EXPORT_SYMBOL_GPL(sync_filesystem);
 
 static void sync_inodes_one_sb(struct super_block *sb, void *arg)
 {
 	if (!(sb->s_flags & MS_RDONLY))
-		sync_inodes_sb(sb);
+		sync_inodes_sb(sb, *((unsigned long *)arg));
 }
 
 static void sync_fs_one_sb(struct super_block *sb, void *arg)
@@ -102,9 +104,10 @@
 SYSCALL_DEFINE0(sync)
 {
 	int nowait = 0, wait = 1;
+	unsigned long start = jiffies;
 
 	wakeup_flusher_threads(0, WB_REASON_SYNC);
-	iterate_supers(sync_inodes_one_sb, NULL);
+	iterate_supers(sync_inodes_one_sb, &start);
 	iterate_supers(sync_fs_one_sb, &nowait);
 	iterate_supers(sync_fs_one_sb, &wait);
 	iterate_bdevs(fdatawrite_one_bdev, NULL);
@@ -177,7 +180,7 @@
  */
 int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
 {
-	if (!file->f_op || !file->f_op->fsync)
+	if (!file->f_op->fsync)
 		return -EINVAL;
 	return file->f_op->fsync(file, start, end, datasync);
 }
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 6e025e0..cc1febd 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2563,9 +2563,9 @@
 	unsigned int from, to, ffs = chance(1, 2);
 	unsigned char *p = (void *)buf;
 
-	from = prandom_u32() % (len + 1);
-	/* Corruption may only span one max. write unit */
-	to = min(len, ALIGN(from, c->max_write_size));
+	from = prandom_u32() % len;
+	/* Corruption span max to end of write unit */
+	to = min(len, ALIGN(from + 1, c->max_write_size));
 
 	ubifs_warn("filled bytes %u-%u with %s", from, to - 1,
 		   ffs ? "0xFFs" : "random data");
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 6b4947f..ea41649 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -192,8 +192,7 @@
 	struct ubifs_dent_node *dent;
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
 
-	dbg_gen("'%.*s' in dir ino %lu",
-		dentry->d_name.len, dentry->d_name.name, dir->i_ino);
+	dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
 
 	if (dentry->d_name.len > UBIFS_MAX_NLEN)
 		return ERR_PTR(-ENAMETOOLONG);
@@ -225,8 +224,8 @@
 		 * checking.
 		 */
 		err = PTR_ERR(inode);
-		ubifs_err("dead directory entry '%.*s', error %d",
-			  dentry->d_name.len, dentry->d_name.name, err);
+		ubifs_err("dead directory entry '%pd', error %d",
+			  dentry, err);
 		ubifs_ro_mode(c, err);
 		goto out;
 	}
@@ -260,8 +259,8 @@
 	 * parent directory inode.
 	 */
 
-	dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
-		dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
+	dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+		dentry, mode, dir->i_ino);
 
 	err = ubifs_budget_space(c, &req);
 	if (err)
@@ -509,8 +508,8 @@
 	 * changing the parent inode.
 	 */
 
-	dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu",
-		dentry->d_name.len, dentry->d_name.name, inode->i_ino,
+	dbg_gen("dent '%pd' to ino %lu (nlink %d) in dir ino %lu",
+		dentry, inode->i_ino,
 		inode->i_nlink, dir->i_ino);
 	ubifs_assert(mutex_is_locked(&dir->i_mutex));
 	ubifs_assert(mutex_is_locked(&inode->i_mutex));
@@ -566,8 +565,8 @@
 	 * deletions.
 	 */
 
-	dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu",
-		dentry->d_name.len, dentry->d_name.name, inode->i_ino,
+	dbg_gen("dent '%pd' from ino %lu (nlink %d) in dir ino %lu",
+		dentry, inode->i_ino,
 		inode->i_nlink, dir->i_ino);
 	ubifs_assert(mutex_is_locked(&dir->i_mutex));
 	ubifs_assert(mutex_is_locked(&inode->i_mutex));
@@ -656,8 +655,8 @@
 	 * because we have extra space reserved for deletions.
 	 */
 
-	dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len,
-		dentry->d_name.name, inode->i_ino, dir->i_ino);
+	dbg_gen("directory '%pd', ino %lu in dir ino %lu", dentry,
+		inode->i_ino, dir->i_ino);
 	ubifs_assert(mutex_is_locked(&dir->i_mutex));
 	ubifs_assert(mutex_is_locked(&inode->i_mutex));
 	err = check_dir_empty(c, dentry->d_inode);
@@ -716,8 +715,8 @@
 	 * directory inode.
 	 */
 
-	dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
-		dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
+	dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+		dentry, mode, dir->i_ino);
 
 	err = ubifs_budget_space(c, &req);
 	if (err)
@@ -778,8 +777,7 @@
 	 * directory inode.
 	 */
 
-	dbg_gen("dent '%.*s' in dir ino %lu",
-		dentry->d_name.len, dentry->d_name.name, dir->i_ino);
+	dbg_gen("dent '%pd' in dir ino %lu", dentry, dir->i_ino);
 
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
@@ -853,8 +851,8 @@
 	 * directory inode.
 	 */
 
-	dbg_gen("dent '%.*s', target '%s' in dir ino %lu", dentry->d_name.len,
-		dentry->d_name.name, symname, dir->i_ino);
+	dbg_gen("dent '%pd', target '%s' in dir ino %lu", dentry,
+		symname, dir->i_ino);
 
 	if (len > UBIFS_MAX_INO_DATA)
 		return -ENAMETOOLONG;
@@ -979,10 +977,9 @@
 	 * separately.
 	 */
 
-	dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in dir ino %lu",
-		old_dentry->d_name.len, old_dentry->d_name.name,
-		old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len,
-		new_dentry->d_name.name, new_dir->i_ino);
+	dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu",
+		old_dentry, old_inode->i_ino, old_dir->i_ino,
+		new_dentry, new_dir->i_ino);
 	ubifs_assert(mutex_is_locked(&old_dir->i_mutex));
 	ubifs_assert(mutex_is_locked(&new_dir->i_mutex));
 	if (unlink)
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 76ca53c..9718da8 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -668,8 +668,7 @@
 	ubifs_assert(!wbuf->used);
 
 	for (i = 0; ; i++) {
-		int space_before = c->leb_size - wbuf->offs - wbuf->used;
-		int space_after;
+		int space_before, space_after;
 
 		cond_resched();
 
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index afaad07..0e045e7 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -933,10 +933,8 @@
 	int move = (old_dir != new_dir);
 	struct ubifs_inode *uninitialized_var(new_ui);
 
-	dbg_jnl("dent '%.*s' in dir ino %lu to dent '%.*s' in dir ino %lu",
-		old_dentry->d_name.len, old_dentry->d_name.name,
-		old_dir->i_ino, new_dentry->d_name.len,
-		new_dentry->d_name.name, new_dir->i_ino);
+	dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
+		old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
 	ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
 	ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
 	ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 3e4aa72..f69daa5 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1630,8 +1630,10 @@
 	}
 
 	c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ, GFP_KERNEL);
-	if (!c->write_reserve_buf)
+	if (!c->write_reserve_buf) {
+		err = -ENOMEM;
 		goto out;
+	}
 
 	err = ubifs_lpt_init(c, 0, 1);
 	if (err)
@@ -2064,8 +2066,10 @@
 	}
 
 	sb->s_root = d_make_root(root);
-	if (!sb->s_root)
+	if (!sb->s_root) {
+		err = -ENOMEM;
 		goto out_umount;
+	}
 
 	mutex_unlock(&c->umount_mutex);
 	return 0;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 0f7139b..5e0a63b 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -303,8 +303,8 @@
 	union ubifs_key key;
 	int err, type;
 
-	dbg_gen("xattr '%s', host ino %lu ('%.*s'), size %zd", name,
-		host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+	dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", name,
+		host->i_ino, dentry, size);
 	ubifs_assert(mutex_is_locked(&host->i_mutex));
 
 	if (size > UBIFS_MAX_INO_DATA)
@@ -367,8 +367,8 @@
 	union ubifs_key key;
 	int err;
 
-	dbg_gen("xattr '%s', ino %lu ('%.*s'), buf size %zd", name,
-		host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+	dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
+		host->i_ino, dentry, size);
 
 	err = check_namespace(&nm);
 	if (err < 0)
@@ -426,8 +426,8 @@
 	int err, len, written = 0;
 	struct qstr nm = { .name = NULL };
 
-	dbg_gen("ino %lu ('%.*s'), buffer size %zd", host->i_ino,
-		dentry->d_name.len, dentry->d_name.name, size);
+	dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino,
+		dentry, size);
 
 	len = host_ui->xattr_names + host_ui->xattr_cnt;
 	if (!buffer)
@@ -529,8 +529,8 @@
 	union ubifs_key key;
 	int err;
 
-	dbg_gen("xattr '%s', ino %lu ('%.*s')", name,
-		host->i_ino, dentry->d_name.len, dentry->d_name.name);
+	dbg_gen("xattr '%s', ino %lu ('%pd')", name,
+		host->i_ino, dentry);
 	ubifs_assert(mutex_is_locked(&host->i_mutex));
 
 	err = check_namespace(&nm);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 9121938..3306b9f 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -76,6 +76,9 @@
 
 #define UDF_DEFAULT_BLOCKSIZE 2048
 
+#define VSD_FIRST_SECTOR_OFFSET		32768
+#define VSD_MAX_SECTOR_OFFSET		0x800000
+
 enum { UDF_MAX_LINKS = 0xffff };
 
 /* These are the "meat" - everything else is stuffing */
@@ -685,7 +688,7 @@
 static loff_t udf_check_vsd(struct super_block *sb)
 {
 	struct volStructDesc *vsd = NULL;
-	loff_t sector = 32768;
+	loff_t sector = VSD_FIRST_SECTOR_OFFSET;
 	int sectorsize;
 	struct buffer_head *bh = NULL;
 	int nsr02 = 0;
@@ -703,8 +706,18 @@
 	udf_debug("Starting at sector %u (%ld byte sectors)\n",
 		  (unsigned int)(sector >> sb->s_blocksize_bits),
 		  sb->s_blocksize);
-	/* Process the sequence (if applicable) */
-	for (; !nsr02 && !nsr03; sector += sectorsize) {
+	/* Process the sequence (if applicable). The hard limit on the sector
+	 * offset is arbitrary, hopefully large enough so that all valid UDF
+	 * filesystems will be recognised. There is no mention of an upper
+	 * bound to the size of the volume recognition area in the standard.
+	 *  The limit will prevent the code to read all the sectors of a
+	 * specially crafted image (like a bluray disc full of CD001 sectors),
+	 * potentially causing minutes or even hours of uninterruptible I/O
+	 * activity. This actually happened with uninitialised SSD partitions
+	 * (all 0xFF) before the check for the limit and all valid IDs were
+	 * added */
+	for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET;
+	     sector += sectorsize) {
 		/* Read a block */
 		bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
 		if (!bh)
@@ -714,10 +727,7 @@
 		vsd = (struct volStructDesc *)(bh->b_data +
 					      (sector & (sb->s_blocksize - 1)));
 
-		if (vsd->stdIdent[0] == 0) {
-			brelse(bh);
-			break;
-		} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
+		if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
 				    VSD_STD_ID_LEN)) {
 			switch (vsd->structType) {
 			case 0:
@@ -753,6 +763,17 @@
 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
 				    VSD_STD_ID_LEN))
 			nsr03 = sector;
+		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BOOT2,
+				    VSD_STD_ID_LEN))
+			; /* nothing */
+		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CDW02,
+				    VSD_STD_ID_LEN))
+			; /* nothing */
+		else {
+			/* invalid id : end of volume recognition area */
+			brelse(bh);
+			break;
+		}
 		brelse(bh);
 	}
 
@@ -760,7 +781,8 @@
 		return nsr03;
 	else if (nsr02)
 		return nsr02;
-	else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768)
+	else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
+			VSD_FIRST_SECTOR_OFFSET)
 		return -1;
 	else
 		return 0;
@@ -1270,6 +1292,9 @@
 	 * PHYSICAL partitions are already set up
 	 */
 	type1_idx = i;
+#ifdef UDFFS_DEBUG
+	map = NULL; /* supress 'maybe used uninitialized' warning */
+#endif
 	for (i = 0; i < sbi->s_partitions; i++) {
 		map = &sbi->s_partmaps[i];
 
@@ -1891,7 +1916,9 @@
 			return 0;
 		}
 		if (nsr_off == -1)
-			udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
+			udf_debug("Failed to read sector at offset %d. "
+				  "Assuming open disc. Skipping validity "
+				  "check\n", VSD_FIRST_SECTOR_OFFSET);
 		if (!sbi->s_last_block)
 			sbi->s_last_block = udf_get_last_block(sb);
 	} else {
diff --git a/fs/utimes.c b/fs/utimes.c
index f4fb7ec..aa138d6 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -53,6 +53,7 @@
 	int error;
 	struct iattr newattrs;
 	struct inode *inode = path->dentry->d_inode;
+	struct inode *delegated_inode = NULL;
 
 	error = mnt_want_write(path->mnt);
 	if (error)
@@ -101,9 +102,15 @@
 				goto mnt_drop_write_and_out;
 		}
 	}
+retry_deleg:
 	mutex_lock(&inode->i_mutex);
-	error = notify_change(path->dentry, &newattrs);
+	error = notify_change(path->dentry, &newattrs, &delegated_inode);
 	mutex_unlock(&inode->i_mutex);
+	if (delegated_inode) {
+		error = break_deleg_wait(&delegated_inode);
+		if (!error)
+			goto retry_deleg;
+	}
 
 mnt_drop_write_and_out:
 	mnt_drop_write(path->mnt);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 0719e4d..c21f435 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -66,12 +66,14 @@
 				   xfs_bmap_btree.o \
 				   xfs_btree.o \
 				   xfs_da_btree.o \
+				   xfs_da_format.o \
 				   xfs_dir2.o \
 				   xfs_dir2_block.o \
 				   xfs_dir2_data.o \
 				   xfs_dir2_leaf.o \
 				   xfs_dir2_node.o \
 				   xfs_dir2_sf.o \
+				   xfs_dquot_buf.o \
 				   xfs_ialloc.o \
 				   xfs_ialloc_btree.o \
 				   xfs_icreate_item.o \
@@ -103,7 +105,11 @@
 				   xfs_qm_bhv.o \
 				   xfs_qm.o \
 				   xfs_quotaops.o
-xfs-$(CONFIG_XFS_RT)		+= xfs_rtalloc.o
+
+# xfs_rtbitmap is shared with libxfs
+xfs-$(CONFIG_XFS_RT)		+= xfs_rtalloc.o \
+				   xfs_rtbitmap.o
+
 xfs-$(CONFIG_XFS_POSIX_ACL)	+= xfs_acl.o
 xfs-$(CONFIG_PROC_FS)		+= xfs_stats.o
 xfs-$(CONFIG_SYSCTL)		+= xfs_sysctl.o
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index a02cfb9..66a36be 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -63,17 +63,6 @@
 }
 
 void *
-kmem_zalloc(size_t size, xfs_km_flags_t flags)
-{
-	void	*ptr;
-
-	ptr = kmem_alloc(size, flags);
-	if (ptr)
-		memset((char *)ptr, 0, (int)size);
-	return ptr;
-}
-
-void *
 kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
 {
 	void	*ptr;
@@ -128,14 +117,3 @@
 		congestion_wait(BLK_RW_ASYNC, HZ/50);
 	} while (1);
 }
-
-void *
-kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
-{
-	void	*ptr;
-
-	ptr = kmem_zone_alloc(zone, flags);
-	if (ptr)
-		memset((char *)ptr, 0, kmem_cache_size(zone));
-	return ptr;
-}
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 3a7371c..64db0e5 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -32,6 +32,7 @@
 #define KM_NOSLEEP	((__force xfs_km_flags_t)0x0002u)
 #define KM_NOFS		((__force xfs_km_flags_t)0x0004u)
 #define KM_MAYFAIL	((__force xfs_km_flags_t)0x0008u)
+#define KM_ZERO		((__force xfs_km_flags_t)0x0010u)
 
 /*
  * We use a special process flag to avoid recursive callbacks into
@@ -43,7 +44,7 @@
 {
 	gfp_t	lflags;
 
-	BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
+	BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
 
 	if (flags & KM_NOSLEEP) {
 		lflags = GFP_ATOMIC | __GFP_NOWARN;
@@ -52,11 +53,14 @@
 		if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
 			lflags &= ~__GFP_FS;
 	}
+
+	if (flags & KM_ZERO)
+		lflags |= __GFP_ZERO;
+
 	return lflags;
 }
 
 extern void *kmem_alloc(size_t, xfs_km_flags_t);
-extern void *kmem_zalloc(size_t, xfs_km_flags_t);
 extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
 extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
 extern void  kmem_free(const void *);
@@ -64,6 +68,12 @@
 
 extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
 
+static inline void *
+kmem_zalloc(size_t size, xfs_km_flags_t flags)
+{
+	return kmem_alloc(size, flags | KM_ZERO);
+}
+
 /*
  * Zone interfaces
  */
@@ -102,6 +112,11 @@
 }
 
 extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
-extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t);
+
+static inline void *
+kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
+{
+	return kmem_zone_alloc(zone, flags | KM_ZERO);
+}
 
 #endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 0e2f37ef..370eb3e 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -16,15 +16,15 @@
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_acl.h"
+#include "xfs_attr.h"
 #include "xfs_trace.h"
 #include <linux/slab.h>
 #include <linux/xattr.h>
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 1cb740a..3fc1098 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -128,8 +128,6 @@
 extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
 			xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
 
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
-
 /*
  * Size of the unlinked inode hash table in the agi.
  */
@@ -191,8 +189,6 @@
 extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
 				xfs_agnumber_t agno, struct xfs_buf **bpp);
 
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-
 /*
  * The third a.g. block contains the a.g. freelist, an array
  * of block pointers to blocks owned by the allocation btree code.
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 5a1393f..9eab2df 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -17,25 +17,25 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_error.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
+#include "xfs_log.h"
 
 struct workqueue_struct *xfs_alloc_wq;
 
@@ -2294,6 +2294,8 @@
 {
 	int		error;
 
+	trace_xfs_read_agf(mp, agno);
+
 	ASSERT(agno != NULLAGNUMBER);
 	error = xfs_trans_read_buf(
 			mp, tp, mp->m_ddev_targp,
@@ -2324,8 +2326,9 @@
 	struct xfs_perag	*pag;		/* per allocation group data */
 	int			error;
 
-	ASSERT(agno != NULLAGNUMBER);
+	trace_xfs_alloc_read_agf(mp, agno);
 
+	ASSERT(agno != NULLAGNUMBER);
 	error = xfs_read_agf(mp, tp, agno,
 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
 			bpp);
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 99d0a61..feacb06 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -231,7 +231,4 @@
 	xfs_extlen_t		*len,	/* output: length of extent */
 	int			*stat);	/* output: success/failure */
 
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
-extern const struct xfs_buf_ops xfs_agfl_buf_ops;
-
 #endif	/* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index cafc902..1308542 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -17,23 +17,21 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 
 
 STATIC struct xfs_btree_cur *
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h
index e3a3f74..45e189e 100644
--- a/fs/xfs/xfs_alloc_btree.h
+++ b/fs/xfs/xfs_alloc_btree.h
@@ -27,39 +27,6 @@
 struct xfs_mount;
 
 /*
- * There are two on-disk btrees, one sorted by blockno and one sorted
- * by blockcount and blockno.  All blocks look the same to make the code
- * simpler; if we have time later, we'll make the optimizations.
- */
-#define	XFS_ABTB_MAGIC		0x41425442	/* 'ABTB' for bno tree */
-#define	XFS_ABTB_CRC_MAGIC	0x41423342	/* 'AB3B' */
-#define	XFS_ABTC_MAGIC		0x41425443	/* 'ABTC' for cnt tree */
-#define	XFS_ABTC_CRC_MAGIC	0x41423343	/* 'AB3C' */
-
-/*
- * Data record/key structure
- */
-typedef struct xfs_alloc_rec {
-	__be32		ar_startblock;	/* starting block number */
-	__be32		ar_blockcount;	/* count of free blocks */
-} xfs_alloc_rec_t, xfs_alloc_key_t;
-
-typedef struct xfs_alloc_rec_incore {
-	xfs_agblock_t	ar_startblock;	/* starting block number */
-	xfs_extlen_t	ar_blockcount;	/* count of free blocks */
-} xfs_alloc_rec_incore_t;
-
-/* btree pointer type */
-typedef __be32 xfs_alloc_ptr_t;
-
-/*
- * Block numbers in the AG:
- * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
- */
-#define	XFS_BNO_BLOCK(mp)	((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
-#define	XFS_CNT_BLOCK(mp)	((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
-
-/*
  * Btree block header size depends on a superblock flag.
  */
 #define XFS_ALLOC_BLOCK_LEN(mp) \
@@ -95,6 +62,4 @@
 		xfs_agnumber_t, xfs_btnum_t);
 extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
 
-extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
-
 #endif	/* __XFS_ALLOC_BTREE_H__ */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index e51e581..71c8c9d 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -16,14 +16,15 @@
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
@@ -31,6 +32,8 @@
 #include "xfs_trace.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
 #include <linux/aio.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
@@ -333,7 +336,7 @@
 
 	if (type == XFS_IO_DELALLOC &&
 	    (!nimaps || isnullstartblock(imap->br_startblock))) {
-		error = xfs_iomap_write_allocate(ip, offset, count, imap);
+		error = xfs_iomap_write_allocate(ip, offset, imap);
 		if (!error)
 			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
 		return -XFS_ERROR(error);
@@ -1569,8 +1572,7 @@
 
 	ASSERT(len <= PAGE_CACHE_SIZE);
 
-	page = grab_cache_page_write_begin(mapping, index,
-					   flags | AOP_FLAG_NOFS);
+	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
 		return -ENOMEM;
 
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index ddcf226..b861270 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -17,23 +17,24 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_attr_remote.h"
@@ -41,6 +42,7 @@
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 /*
  * xfs_attr.c
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index bb24b07..09480c5 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -18,22 +18,20 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_attr_remote.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
@@ -41,7 +39,8 @@
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
-#include "xfs_trans_priv.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
 
 /*
  * Look at all the extents for this logical region,
@@ -232,13 +231,13 @@
 	}
 
 	node = bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&ichdr, node);
+	dp->d_ops->node_hdr_from_disk(&ichdr, node);
 	parent_blkno = bp->b_bn;
 	if (!ichdr.count) {
 		xfs_trans_brelse(*trans, bp);
 		return 0;
 	}
-	btree = xfs_da3_node_tree_p(node);
+	btree = dp->d_ops->node_tree_p(node);
 	child_fsb = be32_to_cpu(btree[0].before);
 	xfs_trans_brelse(*trans, bp);	/* no locks for later trans */
 
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 86db20a..7b126f4 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -18,32 +18,31 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_inode_item.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
+#include "xfs_bmap.h"
 #include "xfs_attr_sf.h"
 #include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_bmap.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
 
 
 /*
@@ -918,8 +917,8 @@
 	if (error)
 		goto out;
 	node = bp1->b_addr;
-	xfs_da3_node_hdr_from_disk(&icnodehdr, node);
-	btree = xfs_da3_node_tree_p(node);
+	dp->d_ops->node_hdr_from_disk(&icnodehdr, node);
+	btree = dp->d_ops->node_tree_p(node);
 
 	leaf = bp2->b_addr;
 	xfs_attr3_leaf_hdr_from_disk(&icleafhdr, leaf);
@@ -929,7 +928,7 @@
 	btree[0].hashval = entries[icleafhdr.count - 1].hashval;
 	btree[0].before = cpu_to_be32(blkno);
 	icnodehdr.count = 1;
-	xfs_da3_node_hdr_to_disk(node, &icnodehdr);
+	dp->d_ops->node_hdr_to_disk(node, &icnodehdr);
 	xfs_trans_log_buf(args->trans, bp1, 0, XFS_LBSIZE(mp) - 1);
 	error = 0;
 out:
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index c102213..3ec5ec0 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -19,16 +19,6 @@
 #ifndef __XFS_ATTR_LEAF_H__
 #define	__XFS_ATTR_LEAF_H__
 
-/*
- * Attribute storage layout, internal structure, access macros, etc.
- *
- * Attribute lists are structured around Btrees where all the data
- * elements are in the leaf nodes.  Attribute names are hashed into an int,
- * then that int is used as the index into the Btree.  Since the hashval
- * of an attribute name may not be unique, we may have duplicate keys.  The
- * internal links in the Btree are logical block offsets into the file.
- */
-
 struct attrlist;
 struct attrlist_cursor_kern;
 struct xfs_attr_list_context;
@@ -38,226 +28,6 @@
 struct xfs_inode;
 struct xfs_trans;
 
-/*========================================================================
- * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This is the structure of the leaf nodes in the Btree.
- *
- * Struct leaf_entry's are packed from the top.  Name/values grow from the
- * bottom but are not packed.  The freemap contains run-length-encoded entries
- * for the free bytes after the leaf_entry's, but only the N largest such,
- * smaller runs are dropped.  When the freemap doesn't show enough space
- * for an allocation, we compact the name/value area and try again.  If we
- * still don't have enough space, then we have to split the block.  The
- * name/value structs (both local and remote versions) must be 32bit aligned.
- *
- * Since we have duplicate hash keys, for each key that matches, compare
- * the actual name string.  The root and intermediate node search always
- * takes the first-in-the-block key match found, so we should only have
- * to work "forw"ard.  If none matches, continue with the "forw"ard leaf
- * nodes until the hash key changes or the attribute name is found.
- *
- * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
- * the leaf_entry.  The namespaces are independent only because we also look
- * at the namespace bit when we are looking for a matching attribute name.
- *
- * We also store an "incomplete" bit in the leaf_entry.  It shows that an
- * attribute is in the middle of being created and should not be shown to
- * the user if we crash during the time that the bit is set.  We clear the
- * bit when we have finished setting up the attribute.  We do this because
- * we cannot create some large attributes inside a single transaction, and we
- * need some indication that we weren't finished if we crash in the middle.
- */
-#define XFS_ATTR_LEAF_MAPSIZE	3	/* how many freespace slots */
-
-typedef struct xfs_attr_leaf_map {	/* RLE map of free bytes */
-	__be16	base;			  /* base of free region */
-	__be16	size;			  /* length of free region */
-} xfs_attr_leaf_map_t;
-
-typedef struct xfs_attr_leaf_hdr {	/* constant-structure header block */
-	xfs_da_blkinfo_t info;		/* block type, links, etc. */
-	__be16	count;			/* count of active leaf_entry's */
-	__be16	usedbytes;		/* num bytes of names/values stored */
-	__be16	firstused;		/* first used byte in name area */
-	__u8	holes;			/* != 0 if blk needs compaction */
-	__u8	pad1;
-	xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
-					/* N largest free regions */
-} xfs_attr_leaf_hdr_t;
-
-typedef struct xfs_attr_leaf_entry {	/* sorted on key, not name */
-	__be32	hashval;		/* hash value of name */
-	__be16	nameidx;		/* index into buffer of name/value */
-	__u8	flags;			/* LOCAL/ROOT/SECURE/INCOMPLETE flag */
-	__u8	pad2;			/* unused pad byte */
-} xfs_attr_leaf_entry_t;
-
-typedef struct xfs_attr_leaf_name_local {
-	__be16	valuelen;		/* number of bytes in value */
-	__u8	namelen;		/* length of name bytes */
-	__u8	nameval[1];		/* name/value bytes */
-} xfs_attr_leaf_name_local_t;
-
-typedef struct xfs_attr_leaf_name_remote {
-	__be32	valueblk;		/* block number of value bytes */
-	__be32	valuelen;		/* number of bytes in value */
-	__u8	namelen;		/* length of name bytes */
-	__u8	name[1];		/* name bytes */
-} xfs_attr_leaf_name_remote_t;
-
-typedef struct xfs_attr_leafblock {
-	xfs_attr_leaf_hdr_t	hdr;	/* constant-structure header block */
-	xfs_attr_leaf_entry_t	entries[1];	/* sorted on key, not name */
-	xfs_attr_leaf_name_local_t namelist;	/* grows from bottom of buf */
-	xfs_attr_leaf_name_remote_t valuelist;	/* grows from bottom of buf */
-} xfs_attr_leafblock_t;
-
-/*
- * CRC enabled leaf structures. Called "version 3" structures to match the
- * version number of the directory and dablk structures for this feature, and
- * attr2 is already taken by the variable inode attribute fork size feature.
- */
-struct xfs_attr3_leaf_hdr {
-	struct xfs_da3_blkinfo	info;
-	__be16			count;
-	__be16			usedbytes;
-	__be16			firstused;
-	__u8			holes;
-	__u8			pad1;
-	struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
-	__be32			pad2;		/* 64 bit alignment */
-};
-
-#define XFS_ATTR3_LEAF_CRC_OFF	(offsetof(struct xfs_attr3_leaf_hdr, info.crc))
-
-struct xfs_attr3_leafblock {
-	struct xfs_attr3_leaf_hdr	hdr;
-	struct xfs_attr_leaf_entry	entries[1];
-
-	/*
-	 * The rest of the block contains the following structures after the
-	 * leaf entries, growing from the bottom up. The variables are never
-	 * referenced, the locations accessed purely from helper functions.
-	 *
-	 * struct xfs_attr_leaf_name_local
-	 * struct xfs_attr_leaf_name_remote
-	 */
-};
-
-/*
- * incore, neutral version of the attribute leaf header
- */
-struct xfs_attr3_icleaf_hdr {
-	__uint32_t	forw;
-	__uint32_t	back;
-	__uint16_t	magic;
-	__uint16_t	count;
-	__uint16_t	usedbytes;
-	__uint16_t	firstused;
-	__u8		holes;
-	struct {
-		__uint16_t	base;
-		__uint16_t	size;
-	} freemap[XFS_ATTR_LEAF_MAPSIZE];
-};
-
-/*
- * Flags used in the leaf_entry[i].flags field.
- * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
- * on the system call, they are "or"ed together for various operations.
- */
-#define	XFS_ATTR_LOCAL_BIT	0	/* attr is stored locally */
-#define	XFS_ATTR_ROOT_BIT	1	/* limit access to trusted attrs */
-#define	XFS_ATTR_SECURE_BIT	2	/* limit access to secure attrs */
-#define	XFS_ATTR_INCOMPLETE_BIT	7	/* attr in middle of create/delete */
-#define XFS_ATTR_LOCAL		(1 << XFS_ATTR_LOCAL_BIT)
-#define XFS_ATTR_ROOT		(1 << XFS_ATTR_ROOT_BIT)
-#define XFS_ATTR_SECURE		(1 << XFS_ATTR_SECURE_BIT)
-#define XFS_ATTR_INCOMPLETE	(1 << XFS_ATTR_INCOMPLETE_BIT)
-
-/*
- * Conversion macros for converting namespace bits from argument flags
- * to ondisk flags.
- */
-#define XFS_ATTR_NSP_ARGS_MASK		(ATTR_ROOT | ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK_MASK	(XFS_ATTR_ROOT | XFS_ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK(flags)	((flags) & XFS_ATTR_NSP_ONDISK_MASK)
-#define XFS_ATTR_NSP_ARGS(flags)	((flags) & XFS_ATTR_NSP_ARGS_MASK)
-#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x)	(((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
-					 ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
-#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x)	(((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
-					 ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
-
-/*
- * Alignment for namelist and valuelist entries (since they are mixed
- * there can be only one alignment value)
- */
-#define	XFS_ATTR_LEAF_NAME_ALIGN	((uint)sizeof(xfs_dablk_t))
-
-static inline int
-xfs_attr3_leaf_hdr_size(struct xfs_attr_leafblock *leafp)
-{
-	if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
-		return sizeof(struct xfs_attr3_leaf_hdr);
-	return sizeof(struct xfs_attr_leaf_hdr);
-}
-
-static inline struct xfs_attr_leaf_entry *
-xfs_attr3_leaf_entryp(xfs_attr_leafblock_t *leafp)
-{
-	if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
-		return &((struct xfs_attr3_leafblock *)leafp)->entries[0];
-	return &leafp->entries[0];
-}
-
-/*
- * Cast typed pointers for "local" and "remote" name/value structs.
- */
-static inline char *
-xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
-{
-	struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp);
-
-	return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)];
-}
-
-static inline xfs_attr_leaf_name_remote_t *
-xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
-{
-	return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx);
-}
-
-static inline xfs_attr_leaf_name_local_t *
-xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
-{
-	return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx);
-}
-
-/*
- * Calculate total bytes used (including trailing pad for alignment) for
- * a "local" name/value structure, a "remote" name/value structure, and
- * a pointer which might be either.
- */
-static inline int xfs_attr_leaf_entsize_remote(int nlen)
-{
-	return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
-		XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
-{
-	return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
-		XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-static inline int xfs_attr_leaf_entsize_local_max(int bsize)
-{
-	return (((bsize) >> 1) + ((bsize) >> 2));
-}
-
 /*
  * Used to keep a list of "remote value" extents when unlinking an inode.
  */
@@ -336,6 +106,4 @@
 void	xfs_attr3_leaf_hdr_to_disk(struct xfs_attr_leafblock *to,
 				   struct xfs_attr3_icleaf_hdr *from);
 
-extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
-
 #endif	/* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index cbc80d4..2d174b1 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -18,31 +18,29 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
 
 STATIC int
 xfs_attr_shortform_compare(const void *a, const void *b)
@@ -229,6 +227,7 @@
 	struct xfs_da_node_entry *btree;
 	int error, i;
 	struct xfs_buf *bp;
+	struct xfs_inode	*dp = context->dp;
 
 	trace_xfs_attr_node_list(context);
 
@@ -242,7 +241,7 @@
 	 */
 	bp = NULL;
 	if (cursor->blkno > 0) {
-		error = xfs_da3_node_read(NULL, context->dp, cursor->blkno, -1,
+		error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1,
 					      &bp, XFS_ATTR_FORK);
 		if ((error != 0) && (error != EFSCORRUPTED))
 			return(error);
@@ -292,7 +291,7 @@
 		for (;;) {
 			__uint16_t magic;
 
-			error = xfs_da3_node_read(NULL, context->dp,
+			error = xfs_da3_node_read(NULL, dp,
 						      cursor->blkno, -1, &bp,
 						      XFS_ATTR_FORK);
 			if (error)
@@ -312,8 +311,8 @@
 				return XFS_ERROR(EFSCORRUPTED);
 			}
 
-			xfs_da3_node_hdr_from_disk(&nodehdr, node);
-			btree = xfs_da3_node_tree_p(node);
+			dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+			btree = dp->d_ops->node_tree_p(node);
 			for (i = 0; i < nodehdr.count; btree++, i++) {
 				if (cursor->hashval
 						<= be32_to_cpu(btree->hashval)) {
@@ -349,8 +348,7 @@
 			break;
 		cursor->blkno = leafhdr.forw;
 		xfs_trans_brelse(NULL, bp);
-		error = xfs_attr3_leaf_read(NULL, context->dp, cursor->blkno, -1,
-					   &bp);
+		error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp);
 		if (error)
 			return error;
 	}
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c
index 712a502..739e0a52 100644
--- a/fs/xfs/xfs_attr_remote.c
+++ b/fs/xfs/xfs_attr_remote.c
@@ -18,20 +18,19 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
@@ -42,6 +41,7 @@
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
 #include "xfs_buf_item.h"
+#include "xfs_error.h"
 
 #define ATTR_RMTVALUE_MAPSIZE	1	/* # of map entries at once */
 
diff --git a/fs/xfs/xfs_attr_remote.h b/fs/xfs/xfs_attr_remote.h
index 92a8fd7..5a9acfa 100644
--- a/fs/xfs/xfs_attr_remote.h
+++ b/fs/xfs/xfs_attr_remote.h
@@ -18,35 +18,6 @@
 #ifndef __XFS_ATTR_REMOTE_H__
 #define	__XFS_ATTR_REMOTE_H__
 
-#define XFS_ATTR3_RMT_MAGIC	0x5841524d	/* XARM */
-
-/*
- * There is one of these headers per filesystem block in a remote attribute.
- * This is done to ensure there is a 1:1 mapping between the attribute value
- * length and the number of blocks needed to store the attribute. This makes the
- * verification of a buffer a little more complex, but greatly simplifies the
- * allocation, reading and writing of these attributes as we don't have to guess
- * the number of blocks needed to store the attribute data.
- */
-struct xfs_attr3_rmt_hdr {
-	__be32	rm_magic;
-	__be32	rm_offset;
-	__be32	rm_bytes;
-	__be32	rm_crc;
-	uuid_t	rm_uuid;
-	__be64	rm_owner;
-	__be64	rm_blkno;
-	__be64	rm_lsn;
-};
-
-#define XFS_ATTR3_RMT_CRC_OFF	offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
-
-#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize)	\
-	((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
-			sizeof(struct xfs_attr3_rmt_hdr) : 0))
-
-extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
-
 int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
 
 int xfs_attr_rmtval_get(struct xfs_da_args *args);
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c
index 4822884..0e8885a 100644
--- a/fs/xfs/xfs_bit.c
+++ b/fs/xfs/xfs_bit.c
@@ -16,10 +16,8 @@
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_log_format.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
 
 /*
  * XFS bit manipulation routines, used in non-realtime code.
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index f47e65c..1c02da8 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -17,39 +17,37 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_mount.h"
-#include "xfs_itable.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_extfree_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr_leaf.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_buf_item.h"
-#include "xfs_filestream.h"
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
 
 
 kmem_zone_t		*xfs_bmap_free_item_zone;
@@ -1482,7 +1480,7 @@
 		xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
 				"Access to block zero in inode %llu "
 				"start_block: %llx start_off: %llx "
-				"blkcnt: %llx extent-state: %x lastx: %x\n",
+				"blkcnt: %llx extent-state: %x lastx: %x",
 			(unsigned long long)ip->i_ino,
 			(unsigned long long)gotp->br_startblock,
 			(unsigned long long)gotp->br_startoff,
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index bb8de8e..706bc3f 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -17,27 +17,26 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_btree.h"
-#include "xfs_itable.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * Determine the extent state.
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index e367461..6e42e1e 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -18,9 +18,6 @@
 #ifndef __XFS_BMAP_BTREE_H__
 #define __XFS_BMAP_BTREE_H__
 
-#define XFS_BMAP_MAGIC		0x424d4150	/* 'BMAP' */
-#define XFS_BMAP_CRC_MAGIC	0x424d4133	/* 'BMA3' */
-
 struct xfs_btree_cur;
 struct xfs_btree_block;
 struct xfs_mount;
@@ -28,85 +25,6 @@
 struct xfs_trans;
 
 /*
- * Bmap root header, on-disk form only.
- */
-typedef struct xfs_bmdr_block {
-	__be16		bb_level;	/* 0 is a leaf */
-	__be16		bb_numrecs;	/* current # of data records */
-} xfs_bmdr_block_t;
-
-/*
- * Bmap btree record and extent descriptor.
- *  l0:63 is an extent flag (value 1 indicates non-normal).
- *  l0:9-62 are startoff.
- *  l0:0-8 and l1:21-63 are startblock.
- *  l1:0-20 are blockcount.
- */
-#define BMBT_EXNTFLAG_BITLEN	1
-#define BMBT_STARTOFF_BITLEN	54
-#define BMBT_STARTBLOCK_BITLEN	52
-#define BMBT_BLOCKCOUNT_BITLEN	21
-
-typedef struct xfs_bmbt_rec {
-	__be64			l0, l1;
-} xfs_bmbt_rec_t;
-
-typedef __uint64_t	xfs_bmbt_rec_base_t;	/* use this for casts */
-typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
-
-typedef struct xfs_bmbt_rec_host {
-	__uint64_t		l0, l1;
-} xfs_bmbt_rec_host_t;
-
-/*
- * Values and macros for delayed-allocation startblock fields.
- */
-#define STARTBLOCKVALBITS	17
-#define STARTBLOCKMASKBITS	(15 + XFS_BIG_BLKNOS * 20)
-#define DSTARTBLOCKMASKBITS	(15 + 20)
-#define STARTBLOCKMASK		\
-	(((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-#define DSTARTBLOCKMASK		\
-	(((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-
-static inline int isnullstartblock(xfs_fsblock_t x)
-{
-	return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
-}
-
-static inline int isnulldstartblock(xfs_dfsbno_t x)
-{
-	return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
-}
-
-static inline xfs_fsblock_t nullstartblock(int k)
-{
-	ASSERT(k < (1 << STARTBLOCKVALBITS));
-	return STARTBLOCKMASK | (k);
-}
-
-static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
-{
-	return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
-}
-
-/*
- * Possible extent formats.
- */
-typedef enum {
-	XFS_EXTFMT_NOSTATE = 0,
-	XFS_EXTFMT_HASSTATE
-} xfs_exntfmt_t;
-
-/*
- * Possible extent states.
- */
-typedef enum {
-	XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
-	XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
-} xfs_exntst_t;
-
-/*
  * Extent state and extent format macros.
  */
 #define XFS_EXTFMT_INODE(x)	\
@@ -115,27 +33,6 @@
 #define ISUNWRITTEN(x)	((x)->br_state == XFS_EXT_UNWRITTEN)
 
 /*
- * Incore version of above.
- */
-typedef struct xfs_bmbt_irec
-{
-	xfs_fileoff_t	br_startoff;	/* starting file offset */
-	xfs_fsblock_t	br_startblock;	/* starting block number */
-	xfs_filblks_t	br_blockcount;	/* number of blocks */
-	xfs_exntst_t	br_state;	/* extent state */
-} xfs_bmbt_irec_t;
-
-/*
- * Key structure for non-leaf levels of the tree.
- */
-typedef struct xfs_bmbt_key {
-	__be64		br_startoff;	/* starting file offset */
-} xfs_bmbt_key_t, xfs_bmdr_key_t;
-
-/* btree pointer type */
-typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
-
-/*
  * Btree block header size depends on a superblock flag.
  */
 #define XFS_BMBT_BLOCK_LEN(mp) \
@@ -243,6 +140,4 @@
 extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
 		struct xfs_trans *, struct xfs_inode *, int);
 
-extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
-
 #endif	/* __XFS_BMAP_BTREE_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 97f952c..5887e41 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -18,31 +18,31 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_trans.h"
 #include "xfs_extfree_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 /* Kernel only BMAP related definitions and functions */
 
@@ -965,32 +965,12 @@
 	return error;
 }
 
-/*
- * xfs_alloc_file_space()
- *      This routine allocates disk space for the given file.
- *
- *	If alloc_type == 0, this request is for an ALLOCSP type
- *	request which will change the file size.  In this case, no
- *	DMAPI event will be generated by the call.  A TRUNCATE event
- *	will be generated later by xfs_setattr.
- *
- *	If alloc_type != 0, this request is for a RESVSP type
- *	request, and a DMAPI DM_EVENT_WRITE will be generated if the
- *	lower block boundary byte address is less than the file's
- *	length.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-STATIC int
+int
 xfs_alloc_file_space(
-	xfs_inode_t		*ip,
+	struct xfs_inode	*ip,
 	xfs_off_t		offset,
 	xfs_off_t		len,
-	int			alloc_type,
-	int			attr_flags)
+	int			alloc_type)
 {
 	xfs_mount_t		*mp = ip->i_mount;
 	xfs_off_t		count;
@@ -1232,24 +1212,11 @@
 	return error;
 }
 
-/*
- * xfs_free_file_space()
- *      This routine frees disk space for the given file.
- *
- *	This routine is only called by xfs_change_file_space
- *	for an UNRESVSP type call.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-STATIC int
+int
 xfs_free_file_space(
-	xfs_inode_t		*ip,
+	struct xfs_inode	*ip,
 	xfs_off_t		offset,
-	xfs_off_t		len,
-	int			attr_flags)
+	xfs_off_t		len)
 {
 	int			committed;
 	int			done;
@@ -1267,7 +1234,6 @@
 	int			rt;
 	xfs_fileoff_t		startoffset_fsb;
 	xfs_trans_t		*tp;
-	int			need_iolock = 1;
 
 	mp = ip->i_mount;
 
@@ -1284,20 +1250,15 @@
 	startoffset_fsb	= XFS_B_TO_FSB(mp, offset);
 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
 
-	if (attr_flags & XFS_ATTR_NOLOCK)
-		need_iolock = 0;
-	if (need_iolock) {
-		xfs_ilock(ip, XFS_IOLOCK_EXCL);
-		/* wait for the completion of any pending DIOs */
-		inode_dio_wait(VFS_I(ip));
-	}
+	/* wait for the completion of any pending DIOs */
+	inode_dio_wait(VFS_I(ip));
 
 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
 	ioffset = offset & ~(rounding - 1);
 	error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
 					      ioffset, -1);
 	if (error)
-		goto out_unlock_iolock;
+		goto out;
 	truncate_pagecache_range(VFS_I(ip), ioffset, -1);
 
 	/*
@@ -1311,7 +1272,7 @@
 		error = xfs_bmapi_read(ip, startoffset_fsb, 1,
 					&imap, &nimap, 0);
 		if (error)
-			goto out_unlock_iolock;
+			goto out;
 		ASSERT(nimap == 0 || nimap == 1);
 		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
 			xfs_daddr_t	block;
@@ -1326,7 +1287,7 @@
 		error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
 					&imap, &nimap, 0);
 		if (error)
-			goto out_unlock_iolock;
+			goto out;
 		ASSERT(nimap == 0 || nimap == 1);
 		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
 			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
@@ -1412,27 +1373,23 @@
 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 	}
 
- out_unlock_iolock:
-	if (need_iolock)
-		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ out:
 	return error;
 
  error0:
 	xfs_bmap_cancel(&free_list);
  error1:
 	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-	xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
-		    XFS_ILOCK_EXCL);
-	return error;
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	goto out;
 }
 
 
-STATIC int
+int
 xfs_zero_file_space(
 	struct xfs_inode	*ip,
 	xfs_off_t		offset,
-	xfs_off_t		len,
-	int			attr_flags)
+	xfs_off_t		len)
 {
 	struct xfs_mount	*mp = ip->i_mount;
 	uint			granularity;
@@ -1453,9 +1410,6 @@
 	ASSERT(start_boundary >= offset);
 	ASSERT(end_boundary <= offset + len);
 
-	if (!(attr_flags & XFS_ATTR_NOLOCK))
-		xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
 	if (start_boundary < end_boundary - 1) {
 		/* punch out the page cache over the conversion range */
 		truncate_pagecache_range(VFS_I(ip), start_boundary,
@@ -1463,16 +1417,16 @@
 		/* convert the blocks */
 		error = xfs_alloc_file_space(ip, start_boundary,
 					end_boundary - start_boundary - 1,
-					XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT,
-					attr_flags);
+					XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
 		if (error)
-			goto out_unlock;
+			goto out;
 
 		/* We've handled the interior of the range, now for the edges */
-		if (start_boundary != offset)
+		if (start_boundary != offset) {
 			error = xfs_iozero(ip, offset, start_boundary - offset);
-		if (error)
-			goto out_unlock;
+			if (error)
+				goto out;
+		}
 
 		if (end_boundary != offset + len)
 			error = xfs_iozero(ip, end_boundary,
@@ -1486,197 +1440,12 @@
 		error = xfs_iozero(ip, offset, len);
 	}
 
-out_unlock:
-	if (!(attr_flags & XFS_ATTR_NOLOCK))
-		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+out:
 	return error;
 
 }
 
 /*
- * xfs_change_file_space()
- *      This routine allocates or frees disk space for the given file.
- *      The user specified parameters are checked for alignment and size
- *      limitations.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-int
-xfs_change_file_space(
-	xfs_inode_t	*ip,
-	int		cmd,
-	xfs_flock64_t	*bf,
-	xfs_off_t	offset,
-	int		attr_flags)
-{
-	xfs_mount_t	*mp = ip->i_mount;
-	int		clrprealloc;
-	int		error;
-	xfs_fsize_t	fsize;
-	int		setprealloc;
-	xfs_off_t	startoffset;
-	xfs_trans_t	*tp;
-	struct iattr	iattr;
-
-	if (!S_ISREG(ip->i_d.di_mode))
-		return XFS_ERROR(EINVAL);
-
-	switch (bf->l_whence) {
-	case 0: /*SEEK_SET*/
-		break;
-	case 1: /*SEEK_CUR*/
-		bf->l_start += offset;
-		break;
-	case 2: /*SEEK_END*/
-		bf->l_start += XFS_ISIZE(ip);
-		break;
-	default:
-		return XFS_ERROR(EINVAL);
-	}
-
-	/*
-	 * length of <= 0 for resv/unresv/zero is invalid.  length for
-	 * alloc/free is ignored completely and we have no idea what userspace
-	 * might have set it to, so set it to zero to allow range
-	 * checks to pass.
-	 */
-	switch (cmd) {
-	case XFS_IOC_ZERO_RANGE:
-	case XFS_IOC_RESVSP:
-	case XFS_IOC_RESVSP64:
-	case XFS_IOC_UNRESVSP:
-	case XFS_IOC_UNRESVSP64:
-		if (bf->l_len <= 0)
-			return XFS_ERROR(EINVAL);
-		break;
-	default:
-		bf->l_len = 0;
-		break;
-	}
-
-	if (bf->l_start < 0 ||
-	    bf->l_start > mp->m_super->s_maxbytes ||
-	    bf->l_start + bf->l_len < 0 ||
-	    bf->l_start + bf->l_len >= mp->m_super->s_maxbytes)
-		return XFS_ERROR(EINVAL);
-
-	bf->l_whence = 0;
-
-	startoffset = bf->l_start;
-	fsize = XFS_ISIZE(ip);
-
-	setprealloc = clrprealloc = 0;
-	switch (cmd) {
-	case XFS_IOC_ZERO_RANGE:
-		error = xfs_zero_file_space(ip, startoffset, bf->l_len,
-						attr_flags);
-		if (error)
-			return error;
-		setprealloc = 1;
-		break;
-
-	case XFS_IOC_RESVSP:
-	case XFS_IOC_RESVSP64:
-		error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
-						XFS_BMAPI_PREALLOC, attr_flags);
-		if (error)
-			return error;
-		setprealloc = 1;
-		break;
-
-	case XFS_IOC_UNRESVSP:
-	case XFS_IOC_UNRESVSP64:
-		if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
-								attr_flags)))
-			return error;
-		break;
-
-	case XFS_IOC_ALLOCSP:
-	case XFS_IOC_ALLOCSP64:
-	case XFS_IOC_FREESP:
-	case XFS_IOC_FREESP64:
-		/*
-		 * These operations actually do IO when extending the file, but
-		 * the allocation is done seperately to the zeroing that is
-		 * done. This set of operations need to be serialised against
-		 * other IO operations, such as truncate and buffered IO. We
-		 * need to take the IOLOCK here to serialise the allocation and
-		 * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
-		 * truncate, direct IO) from racing against the transient
-		 * allocated but not written state we can have here.
-		 */
-		xfs_ilock(ip, XFS_IOLOCK_EXCL);
-		if (startoffset > fsize) {
-			error = xfs_alloc_file_space(ip, fsize,
-					startoffset - fsize, 0,
-					attr_flags | XFS_ATTR_NOLOCK);
-			if (error) {
-				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-				break;
-			}
-		}
-
-		iattr.ia_valid = ATTR_SIZE;
-		iattr.ia_size = startoffset;
-
-		error = xfs_setattr_size(ip, &iattr,
-					 attr_flags | XFS_ATTR_NOLOCK);
-		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-
-		if (error)
-			return error;
-
-		clrprealloc = 1;
-		break;
-
-	default:
-		ASSERT(0);
-		return XFS_ERROR(EINVAL);
-	}
-
-	/*
-	 * update the inode timestamp, mode, and prealloc flag bits
-	 */
-	tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp, 0);
-		return error;
-	}
-
-	xfs_ilock(ip, XFS_ILOCK_EXCL);
-	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-	if ((attr_flags & XFS_ATTR_DMI) == 0) {
-		ip->i_d.di_mode &= ~S_ISUID;
-
-		/*
-		 * Note that we don't have to worry about mandatory
-		 * file locking being disabled here because we only
-		 * clear the S_ISGID bit if the Group execute bit is
-		 * on, but if it was on then mandatory locking wouldn't
-		 * have been enabled.
-		 */
-		if (ip->i_d.di_mode & S_IXGRP)
-			ip->i_d.di_mode &= ~S_ISGID;
-
-		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-	}
-	if (setprealloc)
-		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
-	else if (clrprealloc)
-		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
-
-	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-	if (attr_flags & XFS_ATTR_SYNC)
-		xfs_trans_set_sync(tp);
-	return xfs_trans_commit(tp, 0);
-}
-
-/*
  * We need to check that the format of the data fork in the temporary inode is
  * valid for the target inode before doing the swap. This is not a problem with
  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 0612609..900747b 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -93,9 +93,12 @@
 			     int *is_empty);
 
 /* preallocation and hole punch interface */
-int	xfs_change_file_space(struct xfs_inode *ip, int cmd,
-			      xfs_flock64_t *bf, xfs_off_t offset,
-			      int attr_flags);
+int	xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
+			     xfs_off_t len, int alloc_type);
+int	xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
+			    xfs_off_t len);
+int	xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
+			    xfs_off_t len);
 
 /* EOF block manipulation functions */
 bool	xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 5690e102..9adaae4 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -17,18 +17,16 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_buf_item.h"
 #include "xfs_btree.h"
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 06729b6..91e34f2 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -27,73 +27,6 @@
 extern kmem_zone_t	*xfs_btree_cur_zone;
 
 /*
- * This nonsense is to make -wlint happy.
- */
-#define	XFS_LOOKUP_EQ	((xfs_lookup_t)XFS_LOOKUP_EQi)
-#define	XFS_LOOKUP_LE	((xfs_lookup_t)XFS_LOOKUP_LEi)
-#define	XFS_LOOKUP_GE	((xfs_lookup_t)XFS_LOOKUP_GEi)
-
-#define	XFS_BTNUM_BNO	((xfs_btnum_t)XFS_BTNUM_BNOi)
-#define	XFS_BTNUM_CNT	((xfs_btnum_t)XFS_BTNUM_CNTi)
-#define	XFS_BTNUM_BMAP	((xfs_btnum_t)XFS_BTNUM_BMAPi)
-#define	XFS_BTNUM_INO	((xfs_btnum_t)XFS_BTNUM_INOi)
-
-/*
- * Generic btree header.
- *
- * This is a combination of the actual format used on disk for short and long
- * format btrees.  The first three fields are shared by both format, but the
- * pointers are different and should be used with care.
- *
- * To get the size of the actual short or long form headers please use the size
- * macros below.  Never use sizeof(xfs_btree_block).
- *
- * The blkno, crc, lsn, owner and uuid fields are only available in filesystems
- * with the crc feature bit, and all accesses to them must be conditional on
- * that flag.
- */
-struct xfs_btree_block {
-	__be32		bb_magic;	/* magic number for block type */
-	__be16		bb_level;	/* 0 is a leaf */
-	__be16		bb_numrecs;	/* current # of data records */
-	union {
-		struct {
-			__be32		bb_leftsib;
-			__be32		bb_rightsib;
-
-			__be64		bb_blkno;
-			__be64		bb_lsn;
-			uuid_t		bb_uuid;
-			__be32		bb_owner;
-			__le32		bb_crc;
-		} s;			/* short form pointers */
-		struct	{
-			__be64		bb_leftsib;
-			__be64		bb_rightsib;
-
-			__be64		bb_blkno;
-			__be64		bb_lsn;
-			uuid_t		bb_uuid;
-			__be64		bb_owner;
-			__le32		bb_crc;
-			__be32		bb_pad; /* padding for alignment */
-		} l;			/* long form pointers */
-	} bb_u;				/* rest */
-};
-
-#define XFS_BTREE_SBLOCK_LEN	16	/* size of a short form block */
-#define XFS_BTREE_LBLOCK_LEN	24	/* size of a long form block */
-
-/* sizes of CRC enabled btree blocks */
-#define XFS_BTREE_SBLOCK_CRC_LEN	(XFS_BTREE_SBLOCK_LEN + 40)
-#define XFS_BTREE_LBLOCK_CRC_LEN	(XFS_BTREE_LBLOCK_LEN + 48)
-
-#define XFS_BTREE_SBLOCK_CRC_OFF \
-	offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
-#define XFS_BTREE_LBLOCK_CRC_OFF \
-	offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
-
-/*
  * Generic key, ptr and record wrapper structures.
  *
  * These are disk format structures, and are converted where necessary
@@ -119,6 +52,18 @@
 };
 
 /*
+ * This nonsense is to make -wlint happy.
+ */
+#define	XFS_LOOKUP_EQ	((xfs_lookup_t)XFS_LOOKUP_EQi)
+#define	XFS_LOOKUP_LE	((xfs_lookup_t)XFS_LOOKUP_LEi)
+#define	XFS_LOOKUP_GE	((xfs_lookup_t)XFS_LOOKUP_GEi)
+
+#define	XFS_BTNUM_BNO	((xfs_btnum_t)XFS_BTNUM_BNOi)
+#define	XFS_BTNUM_CNT	((xfs_btnum_t)XFS_BTNUM_CNTi)
+#define	XFS_BTNUM_BMAP	((xfs_btnum_t)XFS_BTNUM_BMAPi)
+#define	XFS_BTNUM_INO	((xfs_btnum_t)XFS_BTNUM_INOi)
+
+/*
  * For logging record fields.
  */
 #define	XFS_BB_MAGIC		(1 << 0)
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 2634700..c7f0b77 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -34,12 +34,13 @@
 #include <linux/backing-dev.h>
 #include <linux/freezer.h>
 
-#include "xfs_sb.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_log.h"
+#include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 static kmem_zone_t *xfs_buf_zone;
 
@@ -590,7 +591,7 @@
 		error = _xfs_buf_map_pages(bp, flags);
 		if (unlikely(error)) {
 			xfs_warn(target->bt_mount,
-				"%s: failed to map pages\n", __func__);
+				"%s: failed to map pagesn", __func__);
 			xfs_buf_relse(bp);
 			return NULL;
 		}
@@ -809,7 +810,7 @@
 	error = _xfs_buf_map_pages(bp, 0);
 	if (unlikely(error)) {
 		xfs_warn(target->bt_mount,
-			"%s: failed to map pages\n", __func__);
+			"%s: failed to map pages", __func__);
 		goto fail_free_mem;
 	}
 
@@ -1618,7 +1619,7 @@
 		bdevname(btp->bt_bdev, name);
 
 		xfs_warn(btp->bt_mount,
-			"Cannot set_blocksize to %u on device %s\n",
+			"Cannot set_blocksize to %u on device %s",
 			sectorsize, name);
 		return EINVAL;
 	}
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index f1d85cf..a64f67b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -17,17 +17,18 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 
 kmem_zone_t	*xfs_buf_item_zone;
@@ -808,7 +809,7 @@
  * Mark bytes first through last inclusive as dirty in the buf
  * item's bitmap.
  */
-void
+static void
 xfs_buf_item_log_segment(
 	struct xfs_buf_log_item	*bip,
 	uint			first,
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index db63710..3f3455a 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -71,10 +71,6 @@
 void	xfs_buf_iodone_callbacks(struct xfs_buf *);
 void	xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
 
-void	xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
-			       enum xfs_blft);
-void	xfs_trans_buf_copy_type(struct xfs_buf *dst_bp, struct xfs_buf *src_bp);
-
 extern kmem_zone_t	*xfs_buf_item_zone;
 
 #endif	/* __XFS_BUF_ITEM_H__ */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 20bf8e8..796272a 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -18,20 +18,20 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
@@ -129,56 +129,6 @@
 	kmem_zone_free(xfs_da_state_zone, state);
 }
 
-void
-xfs_da3_node_hdr_from_disk(
-	struct xfs_da3_icnode_hdr	*to,
-	struct xfs_da_intnode		*from)
-{
-	ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
-	       from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
-
-	if (from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
-		struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
-
-		to->forw = be32_to_cpu(hdr3->info.hdr.forw);
-		to->back = be32_to_cpu(hdr3->info.hdr.back);
-		to->magic = be16_to_cpu(hdr3->info.hdr.magic);
-		to->count = be16_to_cpu(hdr3->__count);
-		to->level = be16_to_cpu(hdr3->__level);
-		return;
-	}
-	to->forw = be32_to_cpu(from->hdr.info.forw);
-	to->back = be32_to_cpu(from->hdr.info.back);
-	to->magic = be16_to_cpu(from->hdr.info.magic);
-	to->count = be16_to_cpu(from->hdr.__count);
-	to->level = be16_to_cpu(from->hdr.__level);
-}
-
-void
-xfs_da3_node_hdr_to_disk(
-	struct xfs_da_intnode		*to,
-	struct xfs_da3_icnode_hdr	*from)
-{
-	ASSERT(from->magic == XFS_DA_NODE_MAGIC ||
-	       from->magic == XFS_DA3_NODE_MAGIC);
-
-	if (from->magic == XFS_DA3_NODE_MAGIC) {
-		struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
-
-		hdr3->info.hdr.forw = cpu_to_be32(from->forw);
-		hdr3->info.hdr.back = cpu_to_be32(from->back);
-		hdr3->info.hdr.magic = cpu_to_be16(from->magic);
-		hdr3->__count = cpu_to_be16(from->count);
-		hdr3->__level = cpu_to_be16(from->level);
-		return;
-	}
-	to->hdr.info.forw = cpu_to_be32(from->forw);
-	to->hdr.info.back = cpu_to_be32(from->back);
-	to->hdr.info.magic = cpu_to_be16(from->magic);
-	to->hdr.__count = cpu_to_be16(from->count);
-	to->hdr.__level = cpu_to_be16(from->level);
-}
-
 static bool
 xfs_da3_node_verify(
 	struct xfs_buf		*bp)
@@ -186,8 +136,11 @@
 	struct xfs_mount	*mp = bp->b_target->bt_mount;
 	struct xfs_da_intnode	*hdr = bp->b_addr;
 	struct xfs_da3_icnode_hdr ichdr;
+	const struct xfs_dir_ops *ops;
 
-	xfs_da3_node_hdr_from_disk(&ichdr, hdr);
+	ops = xfs_dir_get_ops(mp, NULL);
+
+	ops->node_hdr_from_disk(&ichdr, hdr);
 
 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 		struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
@@ -354,11 +307,12 @@
 	struct xfs_da3_icnode_hdr ichdr = {0};
 	struct xfs_buf		*bp;
 	int			error;
+	struct xfs_inode	*dp = args->dp;
 
 	trace_xfs_da_node_create(args);
 	ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
 
-	error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
+	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
 	if (error)
 		return(error);
 	bp->b_ops = &xfs_da3_node_buf_ops;
@@ -377,9 +331,9 @@
 	}
 	ichdr.level = level;
 
-	xfs_da3_node_hdr_to_disk(node, &ichdr);
+	dp->d_ops->node_hdr_to_disk(node, &ichdr);
 	xfs_trans_log_buf(tp, bp,
-		XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
+		XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 
 	*bpp = bp;
 	return(0);
@@ -589,8 +543,8 @@
 	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
 		struct xfs_da3_icnode_hdr nodehdr;
 
-		xfs_da3_node_hdr_from_disk(&nodehdr, oldroot);
-		btree = xfs_da3_node_tree_p(oldroot);
+		dp->d_ops->node_hdr_from_disk(&nodehdr, oldroot);
+		btree = dp->d_ops->node_tree_p(oldroot);
 		size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
 		level = nodehdr.level;
 
@@ -604,8 +558,8 @@
 		struct xfs_dir2_leaf_entry *ents;
 
 		leaf = (xfs_dir2_leaf_t *)oldroot;
-		xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-		ents = xfs_dir3_leaf_ents_p(leaf);
+		dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+		ents = dp->d_ops->leaf_ents_p(leaf);
 
 		ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
 		       leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
@@ -649,14 +603,14 @@
 		return error;
 
 	node = bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&nodehdr, node);
-	btree = xfs_da3_node_tree_p(node);
+	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+	btree = dp->d_ops->node_tree_p(node);
 	btree[0].hashval = cpu_to_be32(blk1->hashval);
 	btree[0].before = cpu_to_be32(blk1->blkno);
 	btree[1].hashval = cpu_to_be32(blk2->hashval);
 	btree[1].before = cpu_to_be32(blk2->blkno);
 	nodehdr.count = 2;
-	xfs_da3_node_hdr_to_disk(node, &nodehdr);
+	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 
 #ifdef DEBUG
 	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
@@ -693,11 +647,12 @@
 	int			newcount;
 	int			error;
 	int			useextra;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_node_split(state->args);
 
 	node = oldblk->bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&nodehdr, node);
+	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 
 	/*
 	 * With V2 dirs the extra block is data or freespace.
@@ -744,7 +699,7 @@
 	 * If we had double-split op below us, then add the extra block too.
 	 */
 	node = oldblk->bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&nodehdr, node);
+	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 	if (oldblk->index <= nodehdr.count) {
 		oldblk->index++;
 		xfs_da3_node_add(state, oldblk, addblk);
@@ -793,15 +748,16 @@
 	int			count;
 	int			tmp;
 	int			swap = 0;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_node_rebalance(state->args);
 
 	node1 = blk1->bp->b_addr;
 	node2 = blk2->bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
-	xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
-	btree1 = xfs_da3_node_tree_p(node1);
-	btree2 = xfs_da3_node_tree_p(node2);
+	dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
+	dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
+	btree1 = dp->d_ops->node_tree_p(node1);
+	btree2 = dp->d_ops->node_tree_p(node2);
 
 	/*
 	 * Figure out how many entries need to move, and in which direction.
@@ -814,10 +770,10 @@
 		tmpnode = node1;
 		node1 = node2;
 		node2 = tmpnode;
-		xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
-		xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
-		btree1 = xfs_da3_node_tree_p(node1);
-		btree2 = xfs_da3_node_tree_p(node2);
+		dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
+		dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
+		btree1 = dp->d_ops->node_tree_p(node1);
+		btree2 = dp->d_ops->node_tree_p(node2);
 		swap = 1;
 	}
 
@@ -879,15 +835,14 @@
 	/*
 	 * Log header of node 1 and all current bits of node 2.
 	 */
-	xfs_da3_node_hdr_to_disk(node1, &nodehdr1);
+	dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
 	xfs_trans_log_buf(tp, blk1->bp,
-		XFS_DA_LOGRANGE(node1, &node1->hdr,
-				xfs_da3_node_hdr_size(node1)));
+		XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
 
-	xfs_da3_node_hdr_to_disk(node2, &nodehdr2);
+	dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
 	xfs_trans_log_buf(tp, blk2->bp,
 		XFS_DA_LOGRANGE(node2, &node2->hdr,
-				xfs_da3_node_hdr_size(node2) +
+				dp->d_ops->node_hdr_size +
 				(sizeof(btree2[0]) * nodehdr2.count)));
 
 	/*
@@ -897,10 +852,10 @@
 	if (swap) {
 		node1 = blk1->bp->b_addr;
 		node2 = blk2->bp->b_addr;
-		xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
-		xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
-		btree1 = xfs_da3_node_tree_p(node1);
-		btree2 = xfs_da3_node_tree_p(node2);
+		dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
+		dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
+		btree1 = dp->d_ops->node_tree_p(node1);
+		btree2 = dp->d_ops->node_tree_p(node2);
 	}
 	blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
 	blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
@@ -927,12 +882,13 @@
 	struct xfs_da3_icnode_hdr nodehdr;
 	struct xfs_da_node_entry *btree;
 	int			tmp;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_node_add(state->args);
 
 	node = oldblk->bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&nodehdr, node);
-	btree = xfs_da3_node_tree_p(node);
+	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+	btree = dp->d_ops->node_tree_p(node);
 
 	ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
 	ASSERT(newblk->blkno != 0);
@@ -955,9 +911,9 @@
 				tmp + sizeof(*btree)));
 
 	nodehdr.count += 1;
-	xfs_da3_node_hdr_to_disk(node, &nodehdr);
+	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 	xfs_trans_log_buf(state->args->trans, oldblk->bp,
-		XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
+		XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 
 	/*
 	 * Copy the last hash value from the oldblk to propagate upwards.
@@ -1094,6 +1050,7 @@
 	struct xfs_da3_icnode_hdr oldroothdr;
 	struct xfs_da_node_entry *btree;
 	int			error;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_root_join(state->args);
 
@@ -1101,7 +1058,7 @@
 
 	args = state->args;
 	oldroot = root_blk->bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&oldroothdr, oldroot);
+	dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
 	ASSERT(oldroothdr.forw == 0);
 	ASSERT(oldroothdr.back == 0);
 
@@ -1115,10 +1072,10 @@
 	 * Read in the (only) child block, then copy those bytes into
 	 * the root block's buffer and free the original child block.
 	 */
-	btree = xfs_da3_node_tree_p(oldroot);
+	btree = dp->d_ops->node_tree_p(oldroot);
 	child = be32_to_cpu(btree[0].before);
 	ASSERT(child != 0);
-	error = xfs_da3_node_read(args->trans, args->dp, child, -1, &bp,
+	error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
 					     args->whichfork);
 	if (error)
 		return error;
@@ -1168,6 +1125,7 @@
 	int			error;
 	int			retval;
 	int			i;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_node_toosmall(state->args);
 
@@ -1179,7 +1137,7 @@
 	blk = &state->path.blk[ state->path.active-1 ];
 	info = blk->bp->b_addr;
 	node = (xfs_da_intnode_t *)info;
-	xfs_da3_node_hdr_from_disk(&nodehdr, node);
+	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 	if (nodehdr.count > (state->node_ents >> 1)) {
 		*action = 0;	/* blk over 50%, don't try to join */
 		return(0);	/* blk over 50%, don't try to join */
@@ -1231,13 +1189,13 @@
 			blkno = nodehdr.back;
 		if (blkno == 0)
 			continue;
-		error = xfs_da3_node_read(state->args->trans, state->args->dp,
+		error = xfs_da3_node_read(state->args->trans, dp,
 					blkno, -1, &bp, state->args->whichfork);
 		if (error)
 			return(error);
 
 		node = bp->b_addr;
-		xfs_da3_node_hdr_from_disk(&thdr, node);
+		dp->d_ops->node_hdr_from_disk(&thdr, node);
 		xfs_trans_brelse(state->args->trans, bp);
 
 		if (count - thdr.count >= 0)
@@ -1275,6 +1233,7 @@
  */
 STATIC uint
 xfs_da3_node_lasthash(
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp,
 	int			*count)
 {
@@ -1283,12 +1242,12 @@
 	struct xfs_da3_icnode_hdr nodehdr;
 
 	node = bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&nodehdr, node);
+	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 	if (count)
 		*count = nodehdr.count;
 	if (!nodehdr.count)
 		return 0;
-	btree = xfs_da3_node_tree_p(node);
+	btree = dp->d_ops->node_tree_p(node);
 	return be32_to_cpu(btree[nodehdr.count - 1].hashval);
 }
 
@@ -1307,6 +1266,7 @@
 	xfs_dahash_t		lasthash=0;
 	int			level;
 	int			count;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_fixhashpath(state->args);
 
@@ -1319,12 +1279,12 @@
 			return;
 		break;
 	case XFS_DIR2_LEAFN_MAGIC:
-		lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
+		lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
 		if (count == 0)
 			return;
 		break;
 	case XFS_DA_NODE_MAGIC:
-		lasthash = xfs_da3_node_lasthash(blk->bp, &count);
+		lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
 		if (count == 0)
 			return;
 		break;
@@ -1333,8 +1293,8 @@
 		struct xfs_da3_icnode_hdr nodehdr;
 
 		node = blk->bp->b_addr;
-		xfs_da3_node_hdr_from_disk(&nodehdr, node);
-		btree = xfs_da3_node_tree_p(node);
+		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+		btree = dp->d_ops->node_tree_p(node);
 		if (be32_to_cpu(btree->hashval) == lasthash)
 			break;
 		blk->hashval = lasthash;
@@ -1360,11 +1320,12 @@
 	struct xfs_da_node_entry *btree;
 	int			index;
 	int			tmp;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_node_remove(state->args);
 
 	node = drop_blk->bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&nodehdr, node);
+	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 	ASSERT(drop_blk->index < nodehdr.count);
 	ASSERT(drop_blk->index >= 0);
 
@@ -1372,7 +1333,7 @@
 	 * Copy over the offending entry, or just zero it out.
 	 */
 	index = drop_blk->index;
-	btree = xfs_da3_node_tree_p(node);
+	btree = dp->d_ops->node_tree_p(node);
 	if (index < nodehdr.count - 1) {
 		tmp  = nodehdr.count - index - 1;
 		tmp *= (uint)sizeof(xfs_da_node_entry_t);
@@ -1385,9 +1346,9 @@
 	xfs_trans_log_buf(state->args->trans, drop_blk->bp,
 	    XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
 	nodehdr.count -= 1;
-	xfs_da3_node_hdr_to_disk(node, &nodehdr);
+	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 	xfs_trans_log_buf(state->args->trans, drop_blk->bp,
-	    XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
+	    XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 
 	/*
 	 * Copy the last hash value from the block to propagate upwards.
@@ -1414,15 +1375,16 @@
 	struct xfs_trans	*tp;
 	int			sindex;
 	int			tmp;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_node_unbalance(state->args);
 
 	drop_node = drop_blk->bp->b_addr;
 	save_node = save_blk->bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&drop_hdr, drop_node);
-	xfs_da3_node_hdr_from_disk(&save_hdr, save_node);
-	drop_btree = xfs_da3_node_tree_p(drop_node);
-	save_btree = xfs_da3_node_tree_p(save_node);
+	dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
+	dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
+	drop_btree = dp->d_ops->node_tree_p(drop_node);
+	save_btree = dp->d_ops->node_tree_p(save_node);
 	tp = state->args->trans;
 
 	/*
@@ -1456,10 +1418,10 @@
 	memcpy(&save_btree[sindex], &drop_btree[0], tmp);
 	save_hdr.count += drop_hdr.count;
 
-	xfs_da3_node_hdr_to_disk(save_node, &save_hdr);
+	dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
 	xfs_trans_log_buf(tp, save_blk->bp,
 		XFS_DA_LOGRANGE(save_node, &save_node->hdr,
-				xfs_da3_node_hdr_size(save_node)));
+				dp->d_ops->node_hdr_size));
 
 	/*
 	 * Save the last hashval in the remaining block for upward propagation.
@@ -1501,6 +1463,7 @@
 	int			max;
 	int			error;
 	int			retval;
+	struct xfs_inode	*dp = state->args->dp;
 
 	args = state->args;
 
@@ -1536,7 +1499,8 @@
 		if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
 		    blk->magic == XFS_DIR3_LEAFN_MAGIC) {
 			blk->magic = XFS_DIR2_LEAFN_MAGIC;
-			blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
+			blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
+							       blk->bp, NULL);
 			break;
 		}
 
@@ -1547,8 +1511,8 @@
 		 * Search an intermediate node for a match.
 		 */
 		node = blk->bp->b_addr;
-		xfs_da3_node_hdr_from_disk(&nodehdr, node);
-		btree = xfs_da3_node_tree_p(node);
+		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+		btree = dp->d_ops->node_tree_p(node);
 
 		max = nodehdr.count;
 		blk->hashval = be32_to_cpu(btree[max - 1].hashval);
@@ -1643,6 +1607,7 @@
  */
 STATIC int
 xfs_da3_node_order(
+	struct xfs_inode *dp,
 	struct xfs_buf	*node1_bp,
 	struct xfs_buf	*node2_bp)
 {
@@ -1655,10 +1620,10 @@
 
 	node1 = node1_bp->b_addr;
 	node2 = node2_bp->b_addr;
-	xfs_da3_node_hdr_from_disk(&node1hdr, node1);
-	xfs_da3_node_hdr_from_disk(&node2hdr, node2);
-	btree1 = xfs_da3_node_tree_p(node1);
-	btree2 = xfs_da3_node_tree_p(node2);
+	dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
+	dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
+	btree1 = dp->d_ops->node_tree_p(node1);
+	btree2 = dp->d_ops->node_tree_p(node2);
 
 	if (node1hdr.count > 0 && node2hdr.count > 0 &&
 	    ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
@@ -1685,6 +1650,7 @@
 	struct xfs_buf		*bp;
 	int			before = 0;
 	int			error;
+	struct xfs_inode	*dp = state->args->dp;
 
 	/*
 	 * Set up environment.
@@ -1702,10 +1668,10 @@
 		before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
 		break;
 	case XFS_DIR2_LEAFN_MAGIC:
-		before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
+		before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
 		break;
 	case XFS_DA_NODE_MAGIC:
-		before = xfs_da3_node_order(old_blk->bp, new_blk->bp);
+		before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
 		break;
 	}
 
@@ -1720,7 +1686,7 @@
 		new_info->forw = cpu_to_be32(old_blk->blkno);
 		new_info->back = old_info->back;
 		if (old_info->back) {
-			error = xfs_da3_node_read(args->trans, args->dp,
+			error = xfs_da3_node_read(args->trans, dp,
 						be32_to_cpu(old_info->back),
 						-1, &bp, args->whichfork);
 			if (error)
@@ -1741,7 +1707,7 @@
 		new_info->forw = old_info->forw;
 		new_info->back = cpu_to_be32(old_blk->blkno);
 		if (old_info->forw) {
-			error = xfs_da3_node_read(args->trans, args->dp,
+			error = xfs_da3_node_read(args->trans, dp,
 						be32_to_cpu(old_info->forw),
 						-1, &bp, args->whichfork);
 			if (error)
@@ -1861,6 +1827,7 @@
 	xfs_dablk_t		blkno = 0;
 	int			level;
 	int			error;
+	struct xfs_inode	*dp = state->args->dp;
 
 	trace_xfs_da_path_shift(state->args);
 
@@ -1876,8 +1843,8 @@
 	level = (path->active-1) - 1;	/* skip bottom layer in path */
 	for (blk = &path->blk[level]; level >= 0; blk--, level--) {
 		node = blk->bp->b_addr;
-		xfs_da3_node_hdr_from_disk(&nodehdr, node);
-		btree = xfs_da3_node_tree_p(node);
+		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+		btree = dp->d_ops->node_tree_p(node);
 
 		if (forward && (blk->index < nodehdr.count - 1)) {
 			blk->index++;
@@ -1911,7 +1878,7 @@
 		 * Read the next child block.
 		 */
 		blk->blkno = blkno;
-		error = xfs_da3_node_read(args->trans, args->dp, blkno, -1,
+		error = xfs_da3_node_read(args->trans, dp, blkno, -1,
 					&blk->bp, args->whichfork);
 		if (error)
 			return(error);
@@ -1933,8 +1900,8 @@
 		case XFS_DA3_NODE_MAGIC:
 			blk->magic = XFS_DA_NODE_MAGIC;
 			node = (xfs_da_intnode_t *)info;
-			xfs_da3_node_hdr_from_disk(&nodehdr, node);
-			btree = xfs_da3_node_tree_p(node);
+			dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+			btree = dp->d_ops->node_tree_p(node);
 			blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
 			if (forward)
 				blk->index = 0;
@@ -1947,16 +1914,15 @@
 			blk->magic = XFS_ATTR_LEAF_MAGIC;
 			ASSERT(level == path->active-1);
 			blk->index = 0;
-			blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
-							      NULL);
+			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
 			break;
 		case XFS_DIR2_LEAFN_MAGIC:
 		case XFS_DIR3_LEAFN_MAGIC:
 			blk->magic = XFS_DIR2_LEAFN_MAGIC;
 			ASSERT(level == path->active-1);
 			blk->index = 0;
-			blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
-							       NULL);
+			blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
+							       blk->bp, NULL);
 			break;
 		default:
 			ASSERT(0);
@@ -2163,7 +2129,7 @@
 	struct xfs_dir2_leaf	*dead_leaf2;
 	struct xfs_da_node_entry *btree;
 	struct xfs_da3_icnode_hdr par_hdr;
-	struct xfs_inode	*ip;
+	struct xfs_inode	*dp;
 	struct xfs_trans	*tp;
 	struct xfs_mount	*mp;
 	struct xfs_buf		*dead_buf;
@@ -2187,12 +2153,12 @@
 	dead_buf = *dead_bufp;
 	dead_blkno = *dead_blknop;
 	tp = args->trans;
-	ip = args->dp;
+	dp = args->dp;
 	w = args->whichfork;
 	ASSERT(w == XFS_DATA_FORK);
-	mp = ip->i_mount;
+	mp = dp->i_mount;
 	lastoff = mp->m_dirfreeblk;
-	error = xfs_bmap_last_before(tp, ip, &lastoff, w);
+	error = xfs_bmap_last_before(tp, dp, &lastoff, w);
 	if (error)
 		return error;
 	if (unlikely(lastoff == 0)) {
@@ -2204,7 +2170,7 @@
 	 * Read the last block in the btree space.
 	 */
 	last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
-	error = xfs_da3_node_read(tp, ip, last_blkno, -1, &last_buf, w);
+	error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
 	if (error)
 		return error;
 	/*
@@ -2222,16 +2188,16 @@
 		struct xfs_dir2_leaf_entry *ents;
 
 		dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
-		xfs_dir3_leaf_hdr_from_disk(&leafhdr, dead_leaf2);
-		ents = xfs_dir3_leaf_ents_p(dead_leaf2);
+		dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
+		ents = dp->d_ops->leaf_ents_p(dead_leaf2);
 		dead_level = 0;
 		dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
 	} else {
 		struct xfs_da3_icnode_hdr deadhdr;
 
 		dead_node = (xfs_da_intnode_t *)dead_info;
-		xfs_da3_node_hdr_from_disk(&deadhdr, dead_node);
-		btree = xfs_da3_node_tree_p(dead_node);
+		dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
+		btree = dp->d_ops->node_tree_p(dead_node);
 		dead_level = deadhdr.level;
 		dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
 	}
@@ -2240,7 +2206,7 @@
 	 * If the moved block has a left sibling, fix up the pointers.
 	 */
 	if ((sib_blkno = be32_to_cpu(dead_info->back))) {
-		error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
+		error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
 		if (error)
 			goto done;
 		sib_info = sib_buf->b_addr;
@@ -2262,7 +2228,7 @@
 	 * If the moved block has a right sibling, fix up the pointers.
 	 */
 	if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
-		error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
+		error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
 		if (error)
 			goto done;
 		sib_info = sib_buf->b_addr;
@@ -2286,11 +2252,11 @@
 	 * Walk down the tree looking for the parent of the moved block.
 	 */
 	for (;;) {
-		error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
+		error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
 		if (error)
 			goto done;
 		par_node = par_buf->b_addr;
-		xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
+		dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
 		if (level >= 0 && level != par_hdr.level + 1) {
 			XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
 					 XFS_ERRLEVEL_LOW, mp);
@@ -2298,7 +2264,7 @@
 			goto done;
 		}
 		level = par_hdr.level;
-		btree = xfs_da3_node_tree_p(par_node);
+		btree = dp->d_ops->node_tree_p(par_node);
 		for (entno = 0;
 		     entno < par_hdr.count &&
 		     be32_to_cpu(btree[entno].hashval) < dead_hash;
@@ -2337,18 +2303,18 @@
 			error = XFS_ERROR(EFSCORRUPTED);
 			goto done;
 		}
-		error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
+		error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
 		if (error)
 			goto done;
 		par_node = par_buf->b_addr;
-		xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
+		dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
 		if (par_hdr.level != level) {
 			XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
 					 XFS_ERRLEVEL_LOW, mp);
 			error = XFS_ERROR(EFSCORRUPTED);
 			goto done;
 		}
-		btree = xfs_da3_node_tree_p(par_node);
+		btree = dp->d_ops->node_tree_p(par_node);
 		entno = 0;
 	}
 	/*
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index b1f2679..6e95ea7 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -23,146 +23,7 @@
 struct xfs_inode;
 struct xfs_trans;
 struct zone;
-
-/*========================================================================
- * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
- *
- * It is used to manage a doubly linked list of all blocks at the same
- * level in the Btree, and to identify which type of block this is.
- */
-#define XFS_DA_NODE_MAGIC	0xfebe	/* magic number: non-leaf blocks */
-#define XFS_ATTR_LEAF_MAGIC	0xfbee	/* magic number: attribute leaf blks */
-#define	XFS_DIR2_LEAF1_MAGIC	0xd2f1	/* magic number: v2 dirlf single blks */
-#define	XFS_DIR2_LEAFN_MAGIC	0xd2ff	/* magic number: v2 dirlf multi blks */
-
-typedef struct xfs_da_blkinfo {
-	__be32		forw;			/* previous block in list */
-	__be32		back;			/* following block in list */
-	__be16		magic;			/* validity check on block */
-	__be16		pad;			/* unused */
-} xfs_da_blkinfo_t;
-
-/*
- * CRC enabled directory structure types
- *
- * The headers change size for the additional verification information, but
- * otherwise the tree layouts and contents are unchanged. Hence the da btree
- * code can use the struct xfs_da_blkinfo for manipulating the tree links and
- * magic numbers without modification for both v2 and v3 nodes.
- */
-#define XFS_DA3_NODE_MAGIC	0x3ebe	/* magic number: non-leaf blocks */
-#define XFS_ATTR3_LEAF_MAGIC	0x3bee	/* magic number: attribute leaf blks */
-#define	XFS_DIR3_LEAF1_MAGIC	0x3df1	/* magic number: v2 dirlf single blks */
-#define	XFS_DIR3_LEAFN_MAGIC	0x3dff	/* magic number: v2 dirlf multi blks */
-
-struct xfs_da3_blkinfo {
-	/*
-	 * the node link manipulation code relies on the fact that the first
-	 * element of this structure is the struct xfs_da_blkinfo so it can
-	 * ignore the differences in the rest of the structures.
-	 */
-	struct xfs_da_blkinfo	hdr;
-	__be32			crc;	/* CRC of block */
-	__be64			blkno;	/* first block of the buffer */
-	__be64			lsn;	/* sequence number of last write */
-	uuid_t			uuid;	/* filesystem we belong to */
-	__be64			owner;	/* inode that owns the block */
-};
-
-/*
- * This is the structure of the root and intermediate nodes in the Btree.
- * The leaf nodes are defined above.
- *
- * Entries are not packed.
- *
- * Since we have duplicate keys, use a binary search but always follow
- * all match in the block, not just the first match found.
- */
-#define	XFS_DA_NODE_MAXDEPTH	5	/* max depth of Btree */
-
-typedef struct xfs_da_node_hdr {
-	struct xfs_da_blkinfo	info;	/* block type, links, etc. */
-	__be16			__count; /* count of active entries */
-	__be16			__level; /* level above leaves (leaf == 0) */
-} xfs_da_node_hdr_t;
-
-struct xfs_da3_node_hdr {
-	struct xfs_da3_blkinfo	info;	/* block type, links, etc. */
-	__be16			__count; /* count of active entries */
-	__be16			__level; /* level above leaves (leaf == 0) */
-	__be32			__pad32;
-};
-
-#define XFS_DA3_NODE_CRC_OFF	(offsetof(struct xfs_da3_node_hdr, info.crc))
-
-typedef struct xfs_da_node_entry {
-	__be32	hashval;	/* hash value for this descendant */
-	__be32	before;		/* Btree block before this key */
-} xfs_da_node_entry_t;
-
-typedef struct xfs_da_intnode {
-	struct xfs_da_node_hdr	hdr;
-	struct xfs_da_node_entry __btree[];
-} xfs_da_intnode_t;
-
-struct xfs_da3_intnode {
-	struct xfs_da3_node_hdr	hdr;
-	struct xfs_da_node_entry __btree[];
-};
-
-/*
- * In-core version of the node header to abstract the differences in the v2 and
- * v3 disk format of the headers. Callers need to convert to/from disk format as
- * appropriate.
- */
-struct xfs_da3_icnode_hdr {
-	__uint32_t	forw;
-	__uint32_t	back;
-	__uint16_t	magic;
-	__uint16_t	count;
-	__uint16_t	level;
-};
-
-extern void xfs_da3_node_hdr_from_disk(struct xfs_da3_icnode_hdr *to,
-				       struct xfs_da_intnode *from);
-extern void xfs_da3_node_hdr_to_disk(struct xfs_da_intnode *to,
-				     struct xfs_da3_icnode_hdr *from);
-
-static inline int
-__xfs_da3_node_hdr_size(bool v3)
-{
-	if (v3)
-		return sizeof(struct xfs_da3_node_hdr);
-	return sizeof(struct xfs_da_node_hdr);
-}
-static inline int
-xfs_da3_node_hdr_size(struct xfs_da_intnode *dap)
-{
-	bool	v3 = dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC);
-
-	return __xfs_da3_node_hdr_size(v3);
-}
-
-static inline struct xfs_da_node_entry *
-xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
-{
-	if (dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
-		struct xfs_da3_intnode *dap3 = (struct xfs_da3_intnode *)dap;
-		return dap3->__btree;
-	}
-	return dap->__btree;
-}
-
-extern void xfs_da3_intnode_from_disk(struct xfs_da3_icnode_hdr *to,
-				      struct xfs_da_intnode *from);
-extern void xfs_da3_intnode_to_disk(struct xfs_da_intnode *to,
-				    struct xfs_da3_icnode_hdr *from);
-
-#define	XFS_LBSIZE(mp)	(mp)->m_sb.sb_blocksize
+struct xfs_dir_ops;
 
 /*========================================================================
  * Btree searching and modification structure definitions.
@@ -309,8 +170,6 @@
 			 xfs_dablk_t bno, xfs_daddr_t mappedbno,
 			 struct xfs_buf **bpp, int which_fork);
 
-extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
-
 /*
  * Utility routines.
  */
diff --git a/fs/xfs/xfs_da_format.c b/fs/xfs/xfs_da_format.c
new file mode 100644
index 0000000..e6c83e1
--- /dev/null
+++ b/fs/xfs/xfs_da_format.c
@@ -0,0 +1,907 @@
+/*
+ * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
+#include "xfs_inode.h"
+#include "xfs_dir2.h"
+
+/*
+ * Shortform directory ops
+ */
+static int
+xfs_dir2_sf_entsize(
+	struct xfs_dir2_sf_hdr	*hdr,
+	int			len)
+{
+	int count = sizeof(struct xfs_dir2_sf_entry);	/* namelen + offset */
+
+	count += len;					/* name */
+	count += hdr->i8count ? sizeof(xfs_dir2_ino8_t) :
+				sizeof(xfs_dir2_ino4_t); /* ino # */
+	return count;
+}
+
+static int
+xfs_dir3_sf_entsize(
+	struct xfs_dir2_sf_hdr	*hdr,
+	int			len)
+{
+	return xfs_dir2_sf_entsize(hdr, len) + sizeof(__uint8_t);
+}
+
+static struct xfs_dir2_sf_entry *
+xfs_dir2_sf_nextentry(
+	struct xfs_dir2_sf_hdr	*hdr,
+	struct xfs_dir2_sf_entry *sfep)
+{
+	return (struct xfs_dir2_sf_entry *)
+		((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen));
+}
+
+static struct xfs_dir2_sf_entry *
+xfs_dir3_sf_nextentry(
+	struct xfs_dir2_sf_hdr	*hdr,
+	struct xfs_dir2_sf_entry *sfep)
+{
+	return (struct xfs_dir2_sf_entry *)
+		((char *)sfep + xfs_dir3_sf_entsize(hdr, sfep->namelen));
+}
+
+
+/*
+ * For filetype enabled shortform directories, the file type field is stored at
+ * the end of the name.  Because it's only a single byte, endian conversion is
+ * not necessary. For non-filetype enable directories, the type is always
+ * unknown and we never store the value.
+ */
+static __uint8_t
+xfs_dir2_sfe_get_ftype(
+	struct xfs_dir2_sf_entry *sfep)
+{
+	return XFS_DIR3_FT_UNKNOWN;
+}
+
+static void
+xfs_dir2_sfe_put_ftype(
+	struct xfs_dir2_sf_entry *sfep,
+	__uint8_t		ftype)
+{
+	ASSERT(ftype < XFS_DIR3_FT_MAX);
+}
+
+static __uint8_t
+xfs_dir3_sfe_get_ftype(
+	struct xfs_dir2_sf_entry *sfep)
+{
+	__uint8_t	ftype;
+
+	ftype = sfep->name[sfep->namelen];
+	if (ftype >= XFS_DIR3_FT_MAX)
+		return XFS_DIR3_FT_UNKNOWN;
+	return ftype;
+}
+
+static void
+xfs_dir3_sfe_put_ftype(
+	struct xfs_dir2_sf_entry *sfep,
+	__uint8_t		ftype)
+{
+	ASSERT(ftype < XFS_DIR3_FT_MAX);
+
+	sfep->name[sfep->namelen] = ftype;
+}
+
+/*
+ * Inode numbers in short-form directories can come in two versions,
+ * either 4 bytes or 8 bytes wide.  These helpers deal with the
+ * two forms transparently by looking at the headers i8count field.
+ *
+ * For 64-bit inode number the most significant byte must be zero.
+ */
+static xfs_ino_t
+xfs_dir2_sf_get_ino(
+	struct xfs_dir2_sf_hdr	*hdr,
+	xfs_dir2_inou_t		*from)
+{
+	if (hdr->i8count)
+		return get_unaligned_be64(&from->i8.i) & 0x00ffffffffffffffULL;
+	else
+		return get_unaligned_be32(&from->i4.i);
+}
+
+static void
+xfs_dir2_sf_put_ino(
+	struct xfs_dir2_sf_hdr	*hdr,
+	xfs_dir2_inou_t		*to,
+	xfs_ino_t		ino)
+{
+	ASSERT((ino & 0xff00000000000000ULL) == 0);
+
+	if (hdr->i8count)
+		put_unaligned_be64(ino, &to->i8.i);
+	else
+		put_unaligned_be32(ino, &to->i4.i);
+}
+
+static xfs_ino_t
+xfs_dir2_sf_get_parent_ino(
+	struct xfs_dir2_sf_hdr	*hdr)
+{
+	return xfs_dir2_sf_get_ino(hdr, &hdr->parent);
+}
+
+static void
+xfs_dir2_sf_put_parent_ino(
+	struct xfs_dir2_sf_hdr	*hdr,
+	xfs_ino_t		ino)
+{
+	xfs_dir2_sf_put_ino(hdr, &hdr->parent, ino);
+}
+
+/*
+ * In short-form directory entries the inode numbers are stored at variable
+ * offset behind the entry name. If the entry stores a filetype value, then it
+ * sits between the name and the inode number. Hence the inode numbers may only
+ * be accessed through the helpers below.
+ */
+static xfs_ino_t
+xfs_dir2_sfe_get_ino(
+	struct xfs_dir2_sf_hdr	*hdr,
+	struct xfs_dir2_sf_entry *sfep)
+{
+	return xfs_dir2_sf_get_ino(hdr,
+				(xfs_dir2_inou_t *)&sfep->name[sfep->namelen]);
+}
+
+static void
+xfs_dir2_sfe_put_ino(
+	struct xfs_dir2_sf_hdr	*hdr,
+	struct xfs_dir2_sf_entry *sfep,
+	xfs_ino_t		ino)
+{
+	xfs_dir2_sf_put_ino(hdr,
+			    (xfs_dir2_inou_t *)&sfep->name[sfep->namelen], ino);
+}
+
+static xfs_ino_t
+xfs_dir3_sfe_get_ino(
+	struct xfs_dir2_sf_hdr	*hdr,
+	struct xfs_dir2_sf_entry *sfep)
+{
+	return xfs_dir2_sf_get_ino(hdr,
+			(xfs_dir2_inou_t *)&sfep->name[sfep->namelen + 1]);
+}
+
+static void
+xfs_dir3_sfe_put_ino(
+	struct xfs_dir2_sf_hdr	*hdr,
+	struct xfs_dir2_sf_entry *sfep,
+	xfs_ino_t		ino)
+{
+	xfs_dir2_sf_put_ino(hdr,
+			(xfs_dir2_inou_t *)&sfep->name[sfep->namelen + 1], ino);
+}
+
+
+/*
+ * Directory data block operations
+ */
+
+/*
+ * For special situations, the dirent size ends up fixed because we always know
+ * what the size of the entry is. That's true for the "." and "..", and
+ * therefore we know that they are a fixed size and hence their offsets are
+ * constant, as is the first entry.
+ *
+ * Hence, this calculation is written as a macro to be able to be calculated at
+ * compile time and so certain offsets can be calculated directly in the
+ * structure initaliser via the macro. There are two macros - one for dirents
+ * with ftype and without so there are no unresolvable conditionals in the
+ * calculations. We also use round_up() as XFS_DIR2_DATA_ALIGN is always a power
+ * of 2 and the compiler doesn't reject it (unlike roundup()).
+ */
+#define XFS_DIR2_DATA_ENTSIZE(n)					\
+	round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) +	\
+		 sizeof(xfs_dir2_data_off_t)), XFS_DIR2_DATA_ALIGN)
+
+#define XFS_DIR3_DATA_ENTSIZE(n)					\
+	round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) +	\
+		 sizeof(xfs_dir2_data_off_t) + sizeof(__uint8_t)),	\
+		XFS_DIR2_DATA_ALIGN)
+
+static int
+xfs_dir2_data_entsize(
+	int			n)
+{
+	return XFS_DIR2_DATA_ENTSIZE(n);
+}
+
+static int
+xfs_dir3_data_entsize(
+	int			n)
+{
+	return XFS_DIR3_DATA_ENTSIZE(n);
+}
+
+static __uint8_t
+xfs_dir2_data_get_ftype(
+	struct xfs_dir2_data_entry *dep)
+{
+	return XFS_DIR3_FT_UNKNOWN;
+}
+
+static void
+xfs_dir2_data_put_ftype(
+	struct xfs_dir2_data_entry *dep,
+	__uint8_t		ftype)
+{
+	ASSERT(ftype < XFS_DIR3_FT_MAX);
+}
+
+static __uint8_t
+xfs_dir3_data_get_ftype(
+	struct xfs_dir2_data_entry *dep)
+{
+	__uint8_t	ftype = dep->name[dep->namelen];
+
+	ASSERT(ftype < XFS_DIR3_FT_MAX);
+	if (ftype >= XFS_DIR3_FT_MAX)
+		return XFS_DIR3_FT_UNKNOWN;
+	return ftype;
+}
+
+static void
+xfs_dir3_data_put_ftype(
+	struct xfs_dir2_data_entry *dep,
+	__uint8_t		type)
+{
+	ASSERT(type < XFS_DIR3_FT_MAX);
+	ASSERT(dep->namelen != 0);
+
+	dep->name[dep->namelen] = type;
+}
+
+/*
+ * Pointer to an entry's tag word.
+ */
+static __be16 *
+xfs_dir2_data_entry_tag_p(
+	struct xfs_dir2_data_entry *dep)
+{
+	return (__be16 *)((char *)dep +
+		xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
+}
+
+static __be16 *
+xfs_dir3_data_entry_tag_p(
+	struct xfs_dir2_data_entry *dep)
+{
+	return (__be16 *)((char *)dep +
+		xfs_dir3_data_entsize(dep->namelen) - sizeof(__be16));
+}
+
+/*
+ * location of . and .. in data space (always block 0)
+ */
+static struct xfs_dir2_data_entry *
+xfs_dir2_data_dot_entry_p(
+	struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_data_dotdot_entry_p(
+	struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
+				XFS_DIR2_DATA_ENTSIZE(1));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_data_first_entry_p(
+	struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
+				XFS_DIR2_DATA_ENTSIZE(1) +
+				XFS_DIR2_DATA_ENTSIZE(2));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_ftype_data_dotdot_entry_p(
+	struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
+				XFS_DIR3_DATA_ENTSIZE(1));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_ftype_data_first_entry_p(
+	struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
+				XFS_DIR3_DATA_ENTSIZE(1) +
+				XFS_DIR3_DATA_ENTSIZE(2));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir3_data_dot_entry_p(
+	struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir3_data_dotdot_entry_p(
+	struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir3_data_hdr) +
+				XFS_DIR3_DATA_ENTSIZE(1));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir3_data_first_entry_p(
+	struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir3_data_hdr) +
+				XFS_DIR3_DATA_ENTSIZE(1) +
+				XFS_DIR3_DATA_ENTSIZE(2));
+}
+
+static struct xfs_dir2_data_free *
+xfs_dir2_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
+{
+	return hdr->bestfree;
+}
+
+static struct xfs_dir2_data_free *
+xfs_dir3_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
+{
+	return ((struct xfs_dir3_data_hdr *)hdr)->best_free;
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_data_entry_p(struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
+}
+
+static struct xfs_dir2_data_unused *
+xfs_dir2_data_unused_p(struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_unused *)
+		((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir3_data_entry_p(struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_entry *)
+		((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
+}
+
+static struct xfs_dir2_data_unused *
+xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
+{
+	return (struct xfs_dir2_data_unused *)
+		((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
+}
+
+
+/*
+ * Directory Leaf block operations
+ */
+static int
+xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
+{
+	return (mp->m_dirblksize - sizeof(struct xfs_dir2_leaf_hdr)) /
+		(uint)sizeof(struct xfs_dir2_leaf_entry);
+}
+
+static struct xfs_dir2_leaf_entry *
+xfs_dir2_leaf_ents_p(struct xfs_dir2_leaf *lp)
+{
+	return lp->__ents;
+}
+
+static int
+xfs_dir3_max_leaf_ents(struct xfs_mount *mp)
+{
+	return (mp->m_dirblksize - sizeof(struct xfs_dir3_leaf_hdr)) /
+		(uint)sizeof(struct xfs_dir2_leaf_entry);
+}
+
+static struct xfs_dir2_leaf_entry *
+xfs_dir3_leaf_ents_p(struct xfs_dir2_leaf *lp)
+{
+	return ((struct xfs_dir3_leaf *)lp)->__ents;
+}
+
+static void
+xfs_dir2_leaf_hdr_from_disk(
+	struct xfs_dir3_icleaf_hdr	*to,
+	struct xfs_dir2_leaf		*from)
+{
+	to->forw = be32_to_cpu(from->hdr.info.forw);
+	to->back = be32_to_cpu(from->hdr.info.back);
+	to->magic = be16_to_cpu(from->hdr.info.magic);
+	to->count = be16_to_cpu(from->hdr.count);
+	to->stale = be16_to_cpu(from->hdr.stale);
+
+	ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
+	       to->magic == XFS_DIR2_LEAFN_MAGIC);
+}
+
+static void
+xfs_dir2_leaf_hdr_to_disk(
+	struct xfs_dir2_leaf		*to,
+	struct xfs_dir3_icleaf_hdr	*from)
+{
+	ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC ||
+	       from->magic == XFS_DIR2_LEAFN_MAGIC);
+
+	to->hdr.info.forw = cpu_to_be32(from->forw);
+	to->hdr.info.back = cpu_to_be32(from->back);
+	to->hdr.info.magic = cpu_to_be16(from->magic);
+	to->hdr.count = cpu_to_be16(from->count);
+	to->hdr.stale = cpu_to_be16(from->stale);
+}
+
+static void
+xfs_dir3_leaf_hdr_from_disk(
+	struct xfs_dir3_icleaf_hdr	*to,
+	struct xfs_dir2_leaf		*from)
+{
+	struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)from;
+
+	to->forw = be32_to_cpu(hdr3->info.hdr.forw);
+	to->back = be32_to_cpu(hdr3->info.hdr.back);
+	to->magic = be16_to_cpu(hdr3->info.hdr.magic);
+	to->count = be16_to_cpu(hdr3->count);
+	to->stale = be16_to_cpu(hdr3->stale);
+
+	ASSERT(to->magic == XFS_DIR3_LEAF1_MAGIC ||
+	       to->magic == XFS_DIR3_LEAFN_MAGIC);
+}
+
+static void
+xfs_dir3_leaf_hdr_to_disk(
+	struct xfs_dir2_leaf		*to,
+	struct xfs_dir3_icleaf_hdr	*from)
+{
+	struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)to;
+
+	ASSERT(from->magic == XFS_DIR3_LEAF1_MAGIC ||
+	       from->magic == XFS_DIR3_LEAFN_MAGIC);
+
+	hdr3->info.hdr.forw = cpu_to_be32(from->forw);
+	hdr3->info.hdr.back = cpu_to_be32(from->back);
+	hdr3->info.hdr.magic = cpu_to_be16(from->magic);
+	hdr3->count = cpu_to_be16(from->count);
+	hdr3->stale = cpu_to_be16(from->stale);
+}
+
+
+/*
+ * Directory/Attribute Node block operations
+ */
+static struct xfs_da_node_entry *
+xfs_da2_node_tree_p(struct xfs_da_intnode *dap)
+{
+	return dap->__btree;
+}
+
+static struct xfs_da_node_entry *
+xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
+{
+	return ((struct xfs_da3_intnode *)dap)->__btree;
+}
+
+static void
+xfs_da2_node_hdr_from_disk(
+	struct xfs_da3_icnode_hdr	*to,
+	struct xfs_da_intnode		*from)
+{
+	ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
+	to->forw = be32_to_cpu(from->hdr.info.forw);
+	to->back = be32_to_cpu(from->hdr.info.back);
+	to->magic = be16_to_cpu(from->hdr.info.magic);
+	to->count = be16_to_cpu(from->hdr.__count);
+	to->level = be16_to_cpu(from->hdr.__level);
+}
+
+static void
+xfs_da2_node_hdr_to_disk(
+	struct xfs_da_intnode		*to,
+	struct xfs_da3_icnode_hdr	*from)
+{
+	ASSERT(from->magic == XFS_DA_NODE_MAGIC);
+	to->hdr.info.forw = cpu_to_be32(from->forw);
+	to->hdr.info.back = cpu_to_be32(from->back);
+	to->hdr.info.magic = cpu_to_be16(from->magic);
+	to->hdr.__count = cpu_to_be16(from->count);
+	to->hdr.__level = cpu_to_be16(from->level);
+}
+
+static void
+xfs_da3_node_hdr_from_disk(
+	struct xfs_da3_icnode_hdr	*to,
+	struct xfs_da_intnode		*from)
+{
+	struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
+
+	ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
+	to->forw = be32_to_cpu(hdr3->info.hdr.forw);
+	to->back = be32_to_cpu(hdr3->info.hdr.back);
+	to->magic = be16_to_cpu(hdr3->info.hdr.magic);
+	to->count = be16_to_cpu(hdr3->__count);
+	to->level = be16_to_cpu(hdr3->__level);
+}
+
+static void
+xfs_da3_node_hdr_to_disk(
+	struct xfs_da_intnode		*to,
+	struct xfs_da3_icnode_hdr	*from)
+{
+	struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
+
+	ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
+	hdr3->info.hdr.forw = cpu_to_be32(from->forw);
+	hdr3->info.hdr.back = cpu_to_be32(from->back);
+	hdr3->info.hdr.magic = cpu_to_be16(from->magic);
+	hdr3->__count = cpu_to_be16(from->count);
+	hdr3->__level = cpu_to_be16(from->level);
+}
+
+
+/*
+ * Directory free space block operations
+ */
+static int
+xfs_dir2_free_max_bests(struct xfs_mount *mp)
+{
+	return (mp->m_dirblksize - sizeof(struct xfs_dir2_free_hdr)) /
+		sizeof(xfs_dir2_data_off_t);
+}
+
+static __be16 *
+xfs_dir2_free_bests_p(struct xfs_dir2_free *free)
+{
+	return (__be16 *)((char *)free + sizeof(struct xfs_dir2_free_hdr));
+}
+
+/*
+ * Convert data space db to the corresponding free db.
+ */
+static xfs_dir2_db_t
+xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+	return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir2_free_max_bests(mp);
+}
+
+/*
+ * Convert data space db to the corresponding index in a free db.
+ */
+static int
+xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+	return db % xfs_dir2_free_max_bests(mp);
+}
+
+static int
+xfs_dir3_free_max_bests(struct xfs_mount *mp)
+{
+	return (mp->m_dirblksize - sizeof(struct xfs_dir3_free_hdr)) /
+		sizeof(xfs_dir2_data_off_t);
+}
+
+static __be16 *
+xfs_dir3_free_bests_p(struct xfs_dir2_free *free)
+{
+	return (__be16 *)((char *)free + sizeof(struct xfs_dir3_free_hdr));
+}
+
+/*
+ * Convert data space db to the corresponding free db.
+ */
+static xfs_dir2_db_t
+xfs_dir3_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+	return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir3_free_max_bests(mp);
+}
+
+/*
+ * Convert data space db to the corresponding index in a free db.
+ */
+static int
+xfs_dir3_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+	return db % xfs_dir3_free_max_bests(mp);
+}
+
+static void
+xfs_dir2_free_hdr_from_disk(
+	struct xfs_dir3_icfree_hdr	*to,
+	struct xfs_dir2_free		*from)
+{
+	to->magic = be32_to_cpu(from->hdr.magic);
+	to->firstdb = be32_to_cpu(from->hdr.firstdb);
+	to->nvalid = be32_to_cpu(from->hdr.nvalid);
+	to->nused = be32_to_cpu(from->hdr.nused);
+	ASSERT(to->magic == XFS_DIR2_FREE_MAGIC);
+}
+
+static void
+xfs_dir2_free_hdr_to_disk(
+	struct xfs_dir2_free		*to,
+	struct xfs_dir3_icfree_hdr	*from)
+{
+	ASSERT(from->magic == XFS_DIR2_FREE_MAGIC);
+
+	to->hdr.magic = cpu_to_be32(from->magic);
+	to->hdr.firstdb = cpu_to_be32(from->firstdb);
+	to->hdr.nvalid = cpu_to_be32(from->nvalid);
+	to->hdr.nused = cpu_to_be32(from->nused);
+}
+
+static void
+xfs_dir3_free_hdr_from_disk(
+	struct xfs_dir3_icfree_hdr	*to,
+	struct xfs_dir2_free		*from)
+{
+	struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)from;
+
+	to->magic = be32_to_cpu(hdr3->hdr.magic);
+	to->firstdb = be32_to_cpu(hdr3->firstdb);
+	to->nvalid = be32_to_cpu(hdr3->nvalid);
+	to->nused = be32_to_cpu(hdr3->nused);
+
+	ASSERT(to->magic == XFS_DIR3_FREE_MAGIC);
+}
+
+static void
+xfs_dir3_free_hdr_to_disk(
+	struct xfs_dir2_free		*to,
+	struct xfs_dir3_icfree_hdr	*from)
+{
+	struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)to;
+
+	ASSERT(from->magic == XFS_DIR3_FREE_MAGIC);
+
+	hdr3->hdr.magic = cpu_to_be32(from->magic);
+	hdr3->firstdb = cpu_to_be32(from->firstdb);
+	hdr3->nvalid = cpu_to_be32(from->nvalid);
+	hdr3->nused = cpu_to_be32(from->nused);
+}
+
+static const struct xfs_dir_ops xfs_dir2_ops = {
+	.sf_entsize = xfs_dir2_sf_entsize,
+	.sf_nextentry = xfs_dir2_sf_nextentry,
+	.sf_get_ftype = xfs_dir2_sfe_get_ftype,
+	.sf_put_ftype = xfs_dir2_sfe_put_ftype,
+	.sf_get_ino = xfs_dir2_sfe_get_ino,
+	.sf_put_ino = xfs_dir2_sfe_put_ino,
+	.sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
+	.sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
+
+	.data_entsize = xfs_dir2_data_entsize,
+	.data_get_ftype = xfs_dir2_data_get_ftype,
+	.data_put_ftype = xfs_dir2_data_put_ftype,
+	.data_entry_tag_p = xfs_dir2_data_entry_tag_p,
+	.data_bestfree_p = xfs_dir2_data_bestfree_p,
+
+	.data_dot_offset = sizeof(struct xfs_dir2_data_hdr),
+	.data_dotdot_offset = sizeof(struct xfs_dir2_data_hdr) +
+				XFS_DIR2_DATA_ENTSIZE(1),
+	.data_first_offset =  sizeof(struct xfs_dir2_data_hdr) +
+				XFS_DIR2_DATA_ENTSIZE(1) +
+				XFS_DIR2_DATA_ENTSIZE(2),
+	.data_entry_offset = sizeof(struct xfs_dir2_data_hdr),
+
+	.data_dot_entry_p = xfs_dir2_data_dot_entry_p,
+	.data_dotdot_entry_p = xfs_dir2_data_dotdot_entry_p,
+	.data_first_entry_p = xfs_dir2_data_first_entry_p,
+	.data_entry_p = xfs_dir2_data_entry_p,
+	.data_unused_p = xfs_dir2_data_unused_p,
+
+	.leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr),
+	.leaf_hdr_to_disk = xfs_dir2_leaf_hdr_to_disk,
+	.leaf_hdr_from_disk = xfs_dir2_leaf_hdr_from_disk,
+	.leaf_max_ents = xfs_dir2_max_leaf_ents,
+	.leaf_ents_p = xfs_dir2_leaf_ents_p,
+
+	.node_hdr_size = sizeof(struct xfs_da_node_hdr),
+	.node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
+	.node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
+	.node_tree_p = xfs_da2_node_tree_p,
+
+	.free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
+	.free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
+	.free_hdr_from_disk = xfs_dir2_free_hdr_from_disk,
+	.free_max_bests = xfs_dir2_free_max_bests,
+	.free_bests_p = xfs_dir2_free_bests_p,
+	.db_to_fdb = xfs_dir2_db_to_fdb,
+	.db_to_fdindex = xfs_dir2_db_to_fdindex,
+};
+
+static const struct xfs_dir_ops xfs_dir2_ftype_ops = {
+	.sf_entsize = xfs_dir3_sf_entsize,
+	.sf_nextentry = xfs_dir3_sf_nextentry,
+	.sf_get_ftype = xfs_dir3_sfe_get_ftype,
+	.sf_put_ftype = xfs_dir3_sfe_put_ftype,
+	.sf_get_ino = xfs_dir3_sfe_get_ino,
+	.sf_put_ino = xfs_dir3_sfe_put_ino,
+	.sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
+	.sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
+
+	.data_entsize = xfs_dir3_data_entsize,
+	.data_get_ftype = xfs_dir3_data_get_ftype,
+	.data_put_ftype = xfs_dir3_data_put_ftype,
+	.data_entry_tag_p = xfs_dir3_data_entry_tag_p,
+	.data_bestfree_p = xfs_dir2_data_bestfree_p,
+
+	.data_dot_offset = sizeof(struct xfs_dir2_data_hdr),
+	.data_dotdot_offset = sizeof(struct xfs_dir2_data_hdr) +
+				XFS_DIR3_DATA_ENTSIZE(1),
+	.data_first_offset =  sizeof(struct xfs_dir2_data_hdr) +
+				XFS_DIR3_DATA_ENTSIZE(1) +
+				XFS_DIR3_DATA_ENTSIZE(2),
+	.data_entry_offset = sizeof(struct xfs_dir2_data_hdr),
+
+	.data_dot_entry_p = xfs_dir2_data_dot_entry_p,
+	.data_dotdot_entry_p = xfs_dir2_ftype_data_dotdot_entry_p,
+	.data_first_entry_p = xfs_dir2_ftype_data_first_entry_p,
+	.data_entry_p = xfs_dir2_data_entry_p,
+	.data_unused_p = xfs_dir2_data_unused_p,
+
+	.leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr),
+	.leaf_hdr_to_disk = xfs_dir2_leaf_hdr_to_disk,
+	.leaf_hdr_from_disk = xfs_dir2_leaf_hdr_from_disk,
+	.leaf_max_ents = xfs_dir2_max_leaf_ents,
+	.leaf_ents_p = xfs_dir2_leaf_ents_p,
+
+	.node_hdr_size = sizeof(struct xfs_da_node_hdr),
+	.node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
+	.node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
+	.node_tree_p = xfs_da2_node_tree_p,
+
+	.free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
+	.free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
+	.free_hdr_from_disk = xfs_dir2_free_hdr_from_disk,
+	.free_max_bests = xfs_dir2_free_max_bests,
+	.free_bests_p = xfs_dir2_free_bests_p,
+	.db_to_fdb = xfs_dir2_db_to_fdb,
+	.db_to_fdindex = xfs_dir2_db_to_fdindex,
+};
+
+static const struct xfs_dir_ops xfs_dir3_ops = {
+	.sf_entsize = xfs_dir3_sf_entsize,
+	.sf_nextentry = xfs_dir3_sf_nextentry,
+	.sf_get_ftype = xfs_dir3_sfe_get_ftype,
+	.sf_put_ftype = xfs_dir3_sfe_put_ftype,
+	.sf_get_ino = xfs_dir3_sfe_get_ino,
+	.sf_put_ino = xfs_dir3_sfe_put_ino,
+	.sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
+	.sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
+
+	.data_entsize = xfs_dir3_data_entsize,
+	.data_get_ftype = xfs_dir3_data_get_ftype,
+	.data_put_ftype = xfs_dir3_data_put_ftype,
+	.data_entry_tag_p = xfs_dir3_data_entry_tag_p,
+	.data_bestfree_p = xfs_dir3_data_bestfree_p,
+
+	.data_dot_offset = sizeof(struct xfs_dir3_data_hdr),
+	.data_dotdot_offset = sizeof(struct xfs_dir3_data_hdr) +
+				XFS_DIR3_DATA_ENTSIZE(1),
+	.data_first_offset =  sizeof(struct xfs_dir3_data_hdr) +
+				XFS_DIR3_DATA_ENTSIZE(1) +
+				XFS_DIR3_DATA_ENTSIZE(2),
+	.data_entry_offset = sizeof(struct xfs_dir3_data_hdr),
+
+	.data_dot_entry_p = xfs_dir3_data_dot_entry_p,
+	.data_dotdot_entry_p = xfs_dir3_data_dotdot_entry_p,
+	.data_first_entry_p = xfs_dir3_data_first_entry_p,
+	.data_entry_p = xfs_dir3_data_entry_p,
+	.data_unused_p = xfs_dir3_data_unused_p,
+
+	.leaf_hdr_size = sizeof(struct xfs_dir3_leaf_hdr),
+	.leaf_hdr_to_disk = xfs_dir3_leaf_hdr_to_disk,
+	.leaf_hdr_from_disk = xfs_dir3_leaf_hdr_from_disk,
+	.leaf_max_ents = xfs_dir3_max_leaf_ents,
+	.leaf_ents_p = xfs_dir3_leaf_ents_p,
+
+	.node_hdr_size = sizeof(struct xfs_da3_node_hdr),
+	.node_hdr_to_disk = xfs_da3_node_hdr_to_disk,
+	.node_hdr_from_disk = xfs_da3_node_hdr_from_disk,
+	.node_tree_p = xfs_da3_node_tree_p,
+
+	.free_hdr_size = sizeof(struct xfs_dir3_free_hdr),
+	.free_hdr_to_disk = xfs_dir3_free_hdr_to_disk,
+	.free_hdr_from_disk = xfs_dir3_free_hdr_from_disk,
+	.free_max_bests = xfs_dir3_free_max_bests,
+	.free_bests_p = xfs_dir3_free_bests_p,
+	.db_to_fdb = xfs_dir3_db_to_fdb,
+	.db_to_fdindex = xfs_dir3_db_to_fdindex,
+};
+
+static const struct xfs_dir_ops xfs_dir2_nondir_ops = {
+	.node_hdr_size = sizeof(struct xfs_da_node_hdr),
+	.node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
+	.node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
+	.node_tree_p = xfs_da2_node_tree_p,
+};
+
+static const struct xfs_dir_ops xfs_dir3_nondir_ops = {
+	.node_hdr_size = sizeof(struct xfs_da3_node_hdr),
+	.node_hdr_to_disk = xfs_da3_node_hdr_to_disk,
+	.node_hdr_from_disk = xfs_da3_node_hdr_from_disk,
+	.node_tree_p = xfs_da3_node_tree_p,
+};
+
+/*
+ * Return the ops structure according to the current config.  If we are passed
+ * an inode, then that overrides the default config we use which is based on
+ * feature bits.
+ */
+const struct xfs_dir_ops *
+xfs_dir_get_ops(
+	struct xfs_mount	*mp,
+	struct xfs_inode	*dp)
+{
+	if (dp)
+		return dp->d_ops;
+	if (mp->m_dir_inode_ops)
+		return mp->m_dir_inode_ops;
+	if (xfs_sb_version_hascrc(&mp->m_sb))
+		return &xfs_dir3_ops;
+	if (xfs_sb_version_hasftype(&mp->m_sb))
+		return &xfs_dir2_ftype_ops;
+	return &xfs_dir2_ops;
+}
+
+const struct xfs_dir_ops *
+xfs_nondir_get_ops(
+	struct xfs_mount	*mp,
+	struct xfs_inode	*dp)
+{
+	if (dp)
+		return dp->d_ops;
+	if (mp->m_nondir_inode_ops)
+		return mp->m_nondir_inode_ops;
+	if (xfs_sb_version_hascrc(&mp->m_sb))
+		return &xfs_dir3_nondir_ops;
+	return &xfs_dir2_nondir_ops;
+}
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_da_format.h
similarity index 60%
rename from fs/xfs/xfs_dir2_format.h
rename to fs/xfs/xfs_da_format.h
index 9cf6738..a19d3f8 100644
--- a/fs/xfs/xfs_dir2_format.h
+++ b/fs/xfs/xfs_da_format.h
@@ -16,8 +16,113 @@
  * along with this program; if not, write the Free Software Foundation,
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
-#ifndef __XFS_DIR2_FORMAT_H__
-#define __XFS_DIR2_FORMAT_H__
+#ifndef __XFS_DA_FORMAT_H__
+#define __XFS_DA_FORMAT_H__
+
+/*========================================================================
+ * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
+ *========================================================================*/
+
+/*
+ * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
+ *
+ * It is used to manage a doubly linked list of all blocks at the same
+ * level in the Btree, and to identify which type of block this is.
+ */
+#define XFS_DA_NODE_MAGIC	0xfebe	/* magic number: non-leaf blocks */
+#define XFS_ATTR_LEAF_MAGIC	0xfbee	/* magic number: attribute leaf blks */
+#define	XFS_DIR2_LEAF1_MAGIC	0xd2f1	/* magic number: v2 dirlf single blks */
+#define	XFS_DIR2_LEAFN_MAGIC	0xd2ff	/* magic number: v2 dirlf multi blks */
+
+typedef struct xfs_da_blkinfo {
+	__be32		forw;			/* previous block in list */
+	__be32		back;			/* following block in list */
+	__be16		magic;			/* validity check on block */
+	__be16		pad;			/* unused */
+} xfs_da_blkinfo_t;
+
+/*
+ * CRC enabled directory structure types
+ *
+ * The headers change size for the additional verification information, but
+ * otherwise the tree layouts and contents are unchanged. Hence the da btree
+ * code can use the struct xfs_da_blkinfo for manipulating the tree links and
+ * magic numbers without modification for both v2 and v3 nodes.
+ */
+#define XFS_DA3_NODE_MAGIC	0x3ebe	/* magic number: non-leaf blocks */
+#define XFS_ATTR3_LEAF_MAGIC	0x3bee	/* magic number: attribute leaf blks */
+#define	XFS_DIR3_LEAF1_MAGIC	0x3df1	/* magic number: v2 dirlf single blks */
+#define	XFS_DIR3_LEAFN_MAGIC	0x3dff	/* magic number: v2 dirlf multi blks */
+
+struct xfs_da3_blkinfo {
+	/*
+	 * the node link manipulation code relies on the fact that the first
+	 * element of this structure is the struct xfs_da_blkinfo so it can
+	 * ignore the differences in the rest of the structures.
+	 */
+	struct xfs_da_blkinfo	hdr;
+	__be32			crc;	/* CRC of block */
+	__be64			blkno;	/* first block of the buffer */
+	__be64			lsn;	/* sequence number of last write */
+	uuid_t			uuid;	/* filesystem we belong to */
+	__be64			owner;	/* inode that owns the block */
+};
+
+/*
+ * This is the structure of the root and intermediate nodes in the Btree.
+ * The leaf nodes are defined above.
+ *
+ * Entries are not packed.
+ *
+ * Since we have duplicate keys, use a binary search but always follow
+ * all match in the block, not just the first match found.
+ */
+#define	XFS_DA_NODE_MAXDEPTH	5	/* max depth of Btree */
+
+typedef struct xfs_da_node_hdr {
+	struct xfs_da_blkinfo	info;	/* block type, links, etc. */
+	__be16			__count; /* count of active entries */
+	__be16			__level; /* level above leaves (leaf == 0) */
+} xfs_da_node_hdr_t;
+
+struct xfs_da3_node_hdr {
+	struct xfs_da3_blkinfo	info;	/* block type, links, etc. */
+	__be16			__count; /* count of active entries */
+	__be16			__level; /* level above leaves (leaf == 0) */
+	__be32			__pad32;
+};
+
+#define XFS_DA3_NODE_CRC_OFF	(offsetof(struct xfs_da3_node_hdr, info.crc))
+
+typedef struct xfs_da_node_entry {
+	__be32	hashval;	/* hash value for this descendant */
+	__be32	before;		/* Btree block before this key */
+} xfs_da_node_entry_t;
+
+typedef struct xfs_da_intnode {
+	struct xfs_da_node_hdr	hdr;
+	struct xfs_da_node_entry __btree[];
+} xfs_da_intnode_t;
+
+struct xfs_da3_intnode {
+	struct xfs_da3_node_hdr	hdr;
+	struct xfs_da_node_entry __btree[];
+};
+
+/*
+ * In-core version of the node header to abstract the differences in the v2 and
+ * v3 disk format of the headers. Callers need to convert to/from disk format as
+ * appropriate.
+ */
+struct xfs_da3_icnode_hdr {
+	__uint32_t	forw;
+	__uint32_t	back;
+	__uint16_t	magic;
+	__uint16_t	count;
+	__uint16_t	level;
+};
+
+#define	XFS_LBSIZE(mp)	(mp)->m_sb.sb_blocksize
 
 /*
  * Directory version 2.
@@ -189,79 +294,6 @@
 		((char *)hdr + xfs_dir2_sf_hdr_size(hdr->i8count));
 }
 
-static inline int
-xfs_dir3_sf_entsize(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_sf_hdr	*hdr,
-	int			len)
-{
-	int count = sizeof(struct xfs_dir2_sf_entry); 	/* namelen + offset */
-
-	count += len;					/* name */
-	count += hdr->i8count ? sizeof(xfs_dir2_ino8_t) :
-				sizeof(xfs_dir2_ino4_t); /* ino # */
-	if (xfs_sb_version_hasftype(&mp->m_sb))
-		count += sizeof(__uint8_t);		/* file type */
-	return count;
-}
-
-static inline struct xfs_dir2_sf_entry *
-xfs_dir3_sf_nextentry(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_sf_hdr	*hdr,
-	struct xfs_dir2_sf_entry *sfep)
-{
-	return (struct xfs_dir2_sf_entry *)
-		((char *)sfep + xfs_dir3_sf_entsize(mp, hdr, sfep->namelen));
-}
-
-/*
- * in dir3 shortform directories, the file type field is stored at a variable
- * offset after the inode number. Because it's only a single byte, endian
- * conversion is not necessary.
- */
-static inline __uint8_t *
-xfs_dir3_sfe_ftypep(
-	struct xfs_dir2_sf_hdr	*hdr,
-	struct xfs_dir2_sf_entry *sfep)
-{
-	return (__uint8_t *)&sfep->name[sfep->namelen];
-}
-
-static inline __uint8_t
-xfs_dir3_sfe_get_ftype(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_sf_hdr	*hdr,
-	struct xfs_dir2_sf_entry *sfep)
-{
-	__uint8_t	*ftp;
-
-	if (!xfs_sb_version_hasftype(&mp->m_sb))
-		return XFS_DIR3_FT_UNKNOWN;
-
-	ftp = xfs_dir3_sfe_ftypep(hdr, sfep);
-	if (*ftp >= XFS_DIR3_FT_MAX)
-		return XFS_DIR3_FT_UNKNOWN;
-	return *ftp;
-}
-
-static inline void
-xfs_dir3_sfe_put_ftype(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_sf_hdr	*hdr,
-	struct xfs_dir2_sf_entry *sfep,
-	__uint8_t		ftype)
-{
-	__uint8_t	*ftp;
-
-	ASSERT(ftype < XFS_DIR3_FT_MAX);
-
-	if (!xfs_sb_version_hasftype(&mp->m_sb))
-		return;
-	ftp = xfs_dir3_sfe_ftypep(hdr, sfep);
-	*ftp = ftype;
-}
-
 /*
  * Data block structures.
  *
@@ -345,17 +377,6 @@
 
 #define XFS_DIR3_DATA_CRC_OFF  offsetof(struct xfs_dir3_data_hdr, hdr.crc)
 
-static inline struct xfs_dir2_data_free *
-xfs_dir3_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
-{
-	if (hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
-	    hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
-		struct xfs_dir3_data_hdr *hdr3 = (struct xfs_dir3_data_hdr *)hdr;
-		return hdr3->best_free;
-	}
-	return hdr->bestfree;
-}
-
 /*
  * Active entry in a data block.
  *
@@ -389,72 +410,6 @@
 } xfs_dir2_data_unused_t;
 
 /*
- * Size of a data entry.
- */
-static inline int
-__xfs_dir3_data_entsize(
-	bool	ftype,
-	int	n)
-{
-	int	size = offsetof(struct xfs_dir2_data_entry, name[0]);
-
-	size += n;
-	size += sizeof(xfs_dir2_data_off_t);
-	if (ftype)
-		size += sizeof(__uint8_t);
-	return roundup(size, XFS_DIR2_DATA_ALIGN);
-}
-static inline int
-xfs_dir3_data_entsize(
-	struct xfs_mount	*mp,
-	int			n)
-{
-	bool ftype = xfs_sb_version_hasftype(&mp->m_sb) ? true : false;
-	return __xfs_dir3_data_entsize(ftype, n);
-}
-
-static inline __uint8_t
-xfs_dir3_dirent_get_ftype(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_data_entry *dep)
-{
-	if (xfs_sb_version_hasftype(&mp->m_sb)) {
-		__uint8_t	type = dep->name[dep->namelen];
-
-		ASSERT(type < XFS_DIR3_FT_MAX);
-		if (type < XFS_DIR3_FT_MAX)
-			return type;
-
-	}
-	return XFS_DIR3_FT_UNKNOWN;
-}
-
-static inline void
-xfs_dir3_dirent_put_ftype(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_data_entry *dep,
-	__uint8_t		type)
-{
-	ASSERT(type < XFS_DIR3_FT_MAX);
-	ASSERT(dep->namelen != 0);
-
-	if (xfs_sb_version_hasftype(&mp->m_sb))
-		dep->name[dep->namelen] = type;
-}
-
-/*
- * Pointer to an entry's tag word.
- */
-static inline __be16 *
-xfs_dir3_data_entry_tag_p(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_data_entry *dep)
-{
-	return (__be16 *)((char *)dep +
-		xfs_dir3_data_entsize(mp, dep->namelen) - sizeof(__be16));
-}
-
-/*
  * Pointer to a freespace's tag word.
  */
 static inline __be16 *
@@ -464,93 +419,6 @@
 			be16_to_cpu(dup->length) - sizeof(__be16));
 }
 
-static inline size_t
-xfs_dir3_data_hdr_size(bool dir3)
-{
-	if (dir3)
-		return sizeof(struct xfs_dir3_data_hdr);
-	return sizeof(struct xfs_dir2_data_hdr);
-}
-
-static inline size_t
-xfs_dir3_data_entry_offset(struct xfs_dir2_data_hdr *hdr)
-{
-	bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
-		    hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC);
-	return xfs_dir3_data_hdr_size(dir3);
-}
-
-static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_entry_p(struct xfs_dir2_data_hdr *hdr)
-{
-	return (struct xfs_dir2_data_entry *)
-		((char *)hdr + xfs_dir3_data_entry_offset(hdr));
-}
-
-static inline struct xfs_dir2_data_unused *
-xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
-{
-	return (struct xfs_dir2_data_unused *)
-		((char *)hdr + xfs_dir3_data_entry_offset(hdr));
-}
-
-/*
- * Offsets of . and .. in data space (always block 0)
- *
- * XXX: there is scope for significant optimisation of the logic here. Right
- * now we are checking for "dir3 format" over and over again. Ideally we should
- * only do it once for each operation.
- */
-static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_dot_offset(struct xfs_mount *mp)
-{
-	return xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
-}
-
-static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_dotdot_offset(struct xfs_mount *mp)
-{
-	return xfs_dir3_data_dot_offset(mp) +
-		xfs_dir3_data_entsize(mp, 1);
-}
-
-static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_first_offset(struct xfs_mount *mp)
-{
-	return xfs_dir3_data_dotdot_offset(mp) +
-		xfs_dir3_data_entsize(mp, 2);
-}
-
-/*
- * location of . and .. in data space (always block 0)
- */
-static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_dot_entry_p(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_data_hdr *hdr)
-{
-	return (struct xfs_dir2_data_entry *)
-		((char *)hdr + xfs_dir3_data_dot_offset(mp));
-}
-
-static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_dotdot_entry_p(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_data_hdr *hdr)
-{
-	return (struct xfs_dir2_data_entry *)
-		((char *)hdr + xfs_dir3_data_dotdot_offset(mp));
-}
-
-static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_first_entry_p(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_data_hdr *hdr)
-{
-	return (struct xfs_dir2_data_entry *)
-		((char *)hdr + xfs_dir3_data_first_offset(mp));
-}
-
 /*
  * Leaf block structures.
  *
@@ -645,39 +513,6 @@
 
 #define XFS_DIR3_LEAF_CRC_OFF  offsetof(struct xfs_dir3_leaf_hdr, info.crc)
 
-extern void xfs_dir3_leaf_hdr_from_disk(struct xfs_dir3_icleaf_hdr *to,
-					struct xfs_dir2_leaf *from);
-
-static inline int
-xfs_dir3_leaf_hdr_size(struct xfs_dir2_leaf *lp)
-{
-	if (lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) ||
-	    lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC))
-		return sizeof(struct xfs_dir3_leaf_hdr);
-	return sizeof(struct xfs_dir2_leaf_hdr);
-}
-
-static inline int
-xfs_dir3_max_leaf_ents(struct xfs_mount *mp, struct xfs_dir2_leaf *lp)
-{
-	return (mp->m_dirblksize - xfs_dir3_leaf_hdr_size(lp)) /
-		(uint)sizeof(struct xfs_dir2_leaf_entry);
-}
-
-/*
- * Get address of the bestcount field in the single-leaf block.
- */
-static inline struct xfs_dir2_leaf_entry *
-xfs_dir3_leaf_ents_p(struct xfs_dir2_leaf *lp)
-{
-	if (lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) ||
-	    lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
-		struct xfs_dir3_leaf *lp3 = (struct xfs_dir3_leaf *)lp;
-		return lp3->__ents;
-	}
-	return lp->__ents;
-}
-
 /*
  * Get address of the bestcount field in the single-leaf block.
  */
@@ -869,48 +704,6 @@
 
 };
 
-void xfs_dir3_free_hdr_from_disk(struct xfs_dir3_icfree_hdr *to,
-				 struct xfs_dir2_free *from);
-
-static inline int
-xfs_dir3_free_hdr_size(struct xfs_mount *mp)
-{
-	if (xfs_sb_version_hascrc(&mp->m_sb))
-		return sizeof(struct xfs_dir3_free_hdr);
-	return sizeof(struct xfs_dir2_free_hdr);
-}
-
-static inline int
-xfs_dir3_free_max_bests(struct xfs_mount *mp)
-{
-	return (mp->m_dirblksize - xfs_dir3_free_hdr_size(mp)) /
-		sizeof(xfs_dir2_data_off_t);
-}
-
-static inline __be16 *
-xfs_dir3_free_bests_p(struct xfs_mount *mp, struct xfs_dir2_free *free)
-{
-	return (__be16 *)((char *)free + xfs_dir3_free_hdr_size(mp));
-}
-
-/*
- * Convert data space db to the corresponding free db.
- */
-static inline xfs_dir2_db_t
-xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
-{
-	return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir3_free_max_bests(mp);
-}
-
-/*
- * Convert data space db to the corresponding index in a free db.
- */
-static inline int
-xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
-{
-	return db % xfs_dir3_free_max_bests(mp);
-}
-
 /*
  * Single block format.
  *
@@ -961,4 +754,262 @@
 	return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
 }
 
-#endif /* __XFS_DIR2_FORMAT_H__ */
+
+/*
+ * Attribute storage layout
+ *
+ * Attribute lists are structured around Btrees where all the data
+ * elements are in the leaf nodes.  Attribute names are hashed into an int,
+ * then that int is used as the index into the Btree.  Since the hashval
+ * of an attribute name may not be unique, we may have duplicate keys.  The
+ * internal links in the Btree are logical block offsets into the file.
+ *
+ *========================================================================
+ * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
+ *========================================================================
+ *
+ * Struct leaf_entry's are packed from the top.  Name/values grow from the
+ * bottom but are not packed.  The freemap contains run-length-encoded entries
+ * for the free bytes after the leaf_entry's, but only the N largest such,
+ * smaller runs are dropped.  When the freemap doesn't show enough space
+ * for an allocation, we compact the name/value area and try again.  If we
+ * still don't have enough space, then we have to split the block.  The
+ * name/value structs (both local and remote versions) must be 32bit aligned.
+ *
+ * Since we have duplicate hash keys, for each key that matches, compare
+ * the actual name string.  The root and intermediate node search always
+ * takes the first-in-the-block key match found, so we should only have
+ * to work "forw"ard.  If none matches, continue with the "forw"ard leaf
+ * nodes until the hash key changes or the attribute name is found.
+ *
+ * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
+ * the leaf_entry.  The namespaces are independent only because we also look
+ * at the namespace bit when we are looking for a matching attribute name.
+ *
+ * We also store an "incomplete" bit in the leaf_entry.  It shows that an
+ * attribute is in the middle of being created and should not be shown to
+ * the user if we crash during the time that the bit is set.  We clear the
+ * bit when we have finished setting up the attribute.  We do this because
+ * we cannot create some large attributes inside a single transaction, and we
+ * need some indication that we weren't finished if we crash in the middle.
+ */
+#define XFS_ATTR_LEAF_MAPSIZE	3	/* how many freespace slots */
+
+typedef struct xfs_attr_leaf_map {	/* RLE map of free bytes */
+	__be16	base;			  /* base of free region */
+	__be16	size;			  /* length of free region */
+} xfs_attr_leaf_map_t;
+
+typedef struct xfs_attr_leaf_hdr {	/* constant-structure header block */
+	xfs_da_blkinfo_t info;		/* block type, links, etc. */
+	__be16	count;			/* count of active leaf_entry's */
+	__be16	usedbytes;		/* num bytes of names/values stored */
+	__be16	firstused;		/* first used byte in name area */
+	__u8	holes;			/* != 0 if blk needs compaction */
+	__u8	pad1;
+	xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
+					/* N largest free regions */
+} xfs_attr_leaf_hdr_t;
+
+typedef struct xfs_attr_leaf_entry {	/* sorted on key, not name */
+	__be32	hashval;		/* hash value of name */
+	__be16	nameidx;		/* index into buffer of name/value */
+	__u8	flags;			/* LOCAL/ROOT/SECURE/INCOMPLETE flag */
+	__u8	pad2;			/* unused pad byte */
+} xfs_attr_leaf_entry_t;
+
+typedef struct xfs_attr_leaf_name_local {
+	__be16	valuelen;		/* number of bytes in value */
+	__u8	namelen;		/* length of name bytes */
+	__u8	nameval[1];		/* name/value bytes */
+} xfs_attr_leaf_name_local_t;
+
+typedef struct xfs_attr_leaf_name_remote {
+	__be32	valueblk;		/* block number of value bytes */
+	__be32	valuelen;		/* number of bytes in value */
+	__u8	namelen;		/* length of name bytes */
+	__u8	name[1];		/* name bytes */
+} xfs_attr_leaf_name_remote_t;
+
+typedef struct xfs_attr_leafblock {
+	xfs_attr_leaf_hdr_t	hdr;	/* constant-structure header block */
+	xfs_attr_leaf_entry_t	entries[1];	/* sorted on key, not name */
+	xfs_attr_leaf_name_local_t namelist;	/* grows from bottom of buf */
+	xfs_attr_leaf_name_remote_t valuelist;	/* grows from bottom of buf */
+} xfs_attr_leafblock_t;
+
+/*
+ * CRC enabled leaf structures. Called "version 3" structures to match the
+ * version number of the directory and dablk structures for this feature, and
+ * attr2 is already taken by the variable inode attribute fork size feature.
+ */
+struct xfs_attr3_leaf_hdr {
+	struct xfs_da3_blkinfo	info;
+	__be16			count;
+	__be16			usedbytes;
+	__be16			firstused;
+	__u8			holes;
+	__u8			pad1;
+	struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
+	__be32			pad2;		/* 64 bit alignment */
+};
+
+#define XFS_ATTR3_LEAF_CRC_OFF	(offsetof(struct xfs_attr3_leaf_hdr, info.crc))
+
+struct xfs_attr3_leafblock {
+	struct xfs_attr3_leaf_hdr	hdr;
+	struct xfs_attr_leaf_entry	entries[1];
+
+	/*
+	 * The rest of the block contains the following structures after the
+	 * leaf entries, growing from the bottom up. The variables are never
+	 * referenced, the locations accessed purely from helper functions.
+	 *
+	 * struct xfs_attr_leaf_name_local
+	 * struct xfs_attr_leaf_name_remote
+	 */
+};
+
+/*
+ * incore, neutral version of the attribute leaf header
+ */
+struct xfs_attr3_icleaf_hdr {
+	__uint32_t	forw;
+	__uint32_t	back;
+	__uint16_t	magic;
+	__uint16_t	count;
+	__uint16_t	usedbytes;
+	__uint16_t	firstused;
+	__u8		holes;
+	struct {
+		__uint16_t	base;
+		__uint16_t	size;
+	} freemap[XFS_ATTR_LEAF_MAPSIZE];
+};
+
+/*
+ * Flags used in the leaf_entry[i].flags field.
+ * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
+ * on the system call, they are "or"ed together for various operations.
+ */
+#define	XFS_ATTR_LOCAL_BIT	0	/* attr is stored locally */
+#define	XFS_ATTR_ROOT_BIT	1	/* limit access to trusted attrs */
+#define	XFS_ATTR_SECURE_BIT	2	/* limit access to secure attrs */
+#define	XFS_ATTR_INCOMPLETE_BIT	7	/* attr in middle of create/delete */
+#define XFS_ATTR_LOCAL		(1 << XFS_ATTR_LOCAL_BIT)
+#define XFS_ATTR_ROOT		(1 << XFS_ATTR_ROOT_BIT)
+#define XFS_ATTR_SECURE		(1 << XFS_ATTR_SECURE_BIT)
+#define XFS_ATTR_INCOMPLETE	(1 << XFS_ATTR_INCOMPLETE_BIT)
+
+/*
+ * Conversion macros for converting namespace bits from argument flags
+ * to ondisk flags.
+ */
+#define XFS_ATTR_NSP_ARGS_MASK		(ATTR_ROOT | ATTR_SECURE)
+#define XFS_ATTR_NSP_ONDISK_MASK	(XFS_ATTR_ROOT | XFS_ATTR_SECURE)
+#define XFS_ATTR_NSP_ONDISK(flags)	((flags) & XFS_ATTR_NSP_ONDISK_MASK)
+#define XFS_ATTR_NSP_ARGS(flags)	((flags) & XFS_ATTR_NSP_ARGS_MASK)
+#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x)	(((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
+					 ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
+#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x)	(((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
+					 ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
+
+/*
+ * Alignment for namelist and valuelist entries (since they are mixed
+ * there can be only one alignment value)
+ */
+#define	XFS_ATTR_LEAF_NAME_ALIGN	((uint)sizeof(xfs_dablk_t))
+
+static inline int
+xfs_attr3_leaf_hdr_size(struct xfs_attr_leafblock *leafp)
+{
+	if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
+		return sizeof(struct xfs_attr3_leaf_hdr);
+	return sizeof(struct xfs_attr_leaf_hdr);
+}
+
+static inline struct xfs_attr_leaf_entry *
+xfs_attr3_leaf_entryp(xfs_attr_leafblock_t *leafp)
+{
+	if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
+		return &((struct xfs_attr3_leafblock *)leafp)->entries[0];
+	return &leafp->entries[0];
+}
+
+/*
+ * Cast typed pointers for "local" and "remote" name/value structs.
+ */
+static inline char *
+xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
+{
+	struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp);
+
+	return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)];
+}
+
+static inline xfs_attr_leaf_name_remote_t *
+xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
+{
+	return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx);
+}
+
+static inline xfs_attr_leaf_name_local_t *
+xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
+{
+	return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx);
+}
+
+/*
+ * Calculate total bytes used (including trailing pad for alignment) for
+ * a "local" name/value structure, a "remote" name/value structure, and
+ * a pointer which might be either.
+ */
+static inline int xfs_attr_leaf_entsize_remote(int nlen)
+{
+	return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
+		XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
+}
+
+static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
+{
+	return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
+		XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
+}
+
+static inline int xfs_attr_leaf_entsize_local_max(int bsize)
+{
+	return (((bsize) >> 1) + ((bsize) >> 2));
+}
+
+
+
+/*
+ * Remote attribute block format definition
+ *
+ * There is one of these headers per filesystem block in a remote attribute.
+ * This is done to ensure there is a 1:1 mapping between the attribute value
+ * length and the number of blocks needed to store the attribute. This makes the
+ * verification of a buffer a little more complex, but greatly simplifies the
+ * allocation, reading and writing of these attributes as we don't have to guess
+ * the number of blocks needed to store the attribute data.
+ */
+#define XFS_ATTR3_RMT_MAGIC	0x5841524d	/* XARM */
+
+struct xfs_attr3_rmt_hdr {
+	__be32	rm_magic;
+	__be32	rm_offset;
+	__be32	rm_bytes;
+	__be32	rm_crc;
+	uuid_t	rm_uuid;
+	__be64	rm_owner;
+	__be64	rm_blkno;
+	__be64	rm_lsn;
+};
+
+#define XFS_ATTR3_RMT_CRC_OFF	offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
+
+#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize)	\
+	((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
+			sizeof(struct xfs_attr3_rmt_hdr) : 0))
+
+#endif /* __XFS_DA_FORMAT_H__ */
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index edf203a..ce16ef0 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -17,25 +17,24 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
 
@@ -96,13 +95,17 @@
 	ASSERT(xfs_sb_version_hasdirv2(&mp->m_sb));
 	ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <=
 	       XFS_MAX_BLOCKSIZE);
+
+	mp->m_dir_inode_ops = xfs_dir_get_ops(mp, NULL);
+	mp->m_nondir_inode_ops = xfs_nondir_get_ops(mp, NULL);
+
 	mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog);
 	mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog;
 	mp->m_dirdatablk = xfs_dir2_db_to_da(mp, XFS_DIR2_DATA_FIRSTDB(mp));
 	mp->m_dirleafblk = xfs_dir2_db_to_da(mp, XFS_DIR2_LEAF_FIRSTDB(mp));
 	mp->m_dirfreeblk = xfs_dir2_db_to_da(mp, XFS_DIR2_FREE_FIRSTDB(mp));
 
-	nodehdr_size = __xfs_da3_node_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
+	nodehdr_size = mp->m_dir_inode_ops->node_hdr_size;
 	mp->m_attr_node_ents = (mp->m_sb.sb_blocksize - nodehdr_size) /
 				(uint)sizeof(xfs_da_node_entry_t);
 	mp->m_dir_node_ents = (mp->m_dirblksize - nodehdr_size) /
@@ -113,6 +116,7 @@
 		mp->m_dirnameops = &xfs_ascii_ci_nameops;
 	else
 		mp->m_dirnameops = &xfs_default_nameops;
+
 }
 
 /*
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 9910401..cec70e0 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -32,6 +32,83 @@
 extern struct xfs_name	xfs_name_dotdot;
 
 /*
+ * directory operations vector for encode/decode routines
+ */
+struct xfs_dir_ops {
+	int	(*sf_entsize)(struct xfs_dir2_sf_hdr *hdr, int len);
+	struct xfs_dir2_sf_entry *
+		(*sf_nextentry)(struct xfs_dir2_sf_hdr *hdr,
+				struct xfs_dir2_sf_entry *sfep);
+	__uint8_t (*sf_get_ftype)(struct xfs_dir2_sf_entry *sfep);
+	void	(*sf_put_ftype)(struct xfs_dir2_sf_entry *sfep,
+				__uint8_t ftype);
+	xfs_ino_t (*sf_get_ino)(struct xfs_dir2_sf_hdr *hdr,
+				struct xfs_dir2_sf_entry *sfep);
+	void	(*sf_put_ino)(struct xfs_dir2_sf_hdr *hdr,
+			      struct xfs_dir2_sf_entry *sfep,
+			      xfs_ino_t ino);
+	xfs_ino_t (*sf_get_parent_ino)(struct xfs_dir2_sf_hdr *hdr);
+	void	(*sf_put_parent_ino)(struct xfs_dir2_sf_hdr *hdr,
+				     xfs_ino_t ino);
+
+	int	(*data_entsize)(int len);
+	__uint8_t (*data_get_ftype)(struct xfs_dir2_data_entry *dep);
+	void	(*data_put_ftype)(struct xfs_dir2_data_entry *dep,
+				__uint8_t ftype);
+	__be16 * (*data_entry_tag_p)(struct xfs_dir2_data_entry *dep);
+	struct xfs_dir2_data_free *
+		(*data_bestfree_p)(struct xfs_dir2_data_hdr *hdr);
+
+	xfs_dir2_data_aoff_t data_dot_offset;
+	xfs_dir2_data_aoff_t data_dotdot_offset;
+	xfs_dir2_data_aoff_t data_first_offset;
+	size_t	data_entry_offset;
+
+	struct xfs_dir2_data_entry *
+		(*data_dot_entry_p)(struct xfs_dir2_data_hdr *hdr);
+	struct xfs_dir2_data_entry *
+		(*data_dotdot_entry_p)(struct xfs_dir2_data_hdr *hdr);
+	struct xfs_dir2_data_entry *
+		(*data_first_entry_p)(struct xfs_dir2_data_hdr *hdr);
+	struct xfs_dir2_data_entry *
+		(*data_entry_p)(struct xfs_dir2_data_hdr *hdr);
+	struct xfs_dir2_data_unused *
+		(*data_unused_p)(struct xfs_dir2_data_hdr *hdr);
+
+	int	leaf_hdr_size;
+	void	(*leaf_hdr_to_disk)(struct xfs_dir2_leaf *to,
+				    struct xfs_dir3_icleaf_hdr *from);
+	void	(*leaf_hdr_from_disk)(struct xfs_dir3_icleaf_hdr *to,
+				      struct xfs_dir2_leaf *from);
+	int	(*leaf_max_ents)(struct xfs_mount *mp);
+	struct xfs_dir2_leaf_entry *
+		(*leaf_ents_p)(struct xfs_dir2_leaf *lp);
+
+	int	node_hdr_size;
+	void	(*node_hdr_to_disk)(struct xfs_da_intnode *to,
+				    struct xfs_da3_icnode_hdr *from);
+	void	(*node_hdr_from_disk)(struct xfs_da3_icnode_hdr *to,
+				      struct xfs_da_intnode *from);
+	struct xfs_da_node_entry *
+		(*node_tree_p)(struct xfs_da_intnode *dap);
+
+	int	free_hdr_size;
+	void	(*free_hdr_to_disk)(struct xfs_dir2_free *to,
+				    struct xfs_dir3_icfree_hdr *from);
+	void	(*free_hdr_from_disk)(struct xfs_dir3_icfree_hdr *to,
+				      struct xfs_dir2_free *from);
+	int	(*free_max_bests)(struct xfs_mount *mp);
+	__be16 * (*free_bests_p)(struct xfs_dir2_free *free);
+	xfs_dir2_db_t (*db_to_fdb)(struct xfs_mount *mp, xfs_dir2_db_t db);
+	int	(*db_to_fdindex)(struct xfs_mount *mp, xfs_dir2_db_t db);
+};
+
+extern const struct xfs_dir_ops *
+	xfs_dir_get_ops(struct xfs_mount *mp, struct xfs_inode *dp);
+extern const struct xfs_dir_ops *
+	xfs_nondir_get_ops(struct xfs_mount *mp, struct xfs_inode *dp);
+
+/*
  * Generic directory interface routines
  */
 extern void xfs_dir_startup(void);
@@ -65,37 +142,30 @@
 /*
  * Interface routines used by userspace utilities
  */
-extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp);
-extern void xfs_dir2_sf_put_parent_ino(struct xfs_dir2_sf_hdr *sfp,
-		xfs_ino_t ino);
-extern xfs_ino_t xfs_dir3_sfe_get_ino(struct xfs_mount *mp,
-		struct xfs_dir2_sf_hdr *sfp, struct xfs_dir2_sf_entry *sfep);
-extern void xfs_dir3_sfe_put_ino(struct xfs_mount *mp,
-		struct xfs_dir2_sf_hdr *hdr, struct xfs_dir2_sf_entry *sfep,
-		xfs_ino_t ino);
-
 extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
 extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
 extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
 				struct xfs_buf *bp);
 
-extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
+extern void xfs_dir2_data_freescan(struct xfs_inode *dp,
 		struct xfs_dir2_data_hdr *hdr, int *loghead);
-extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_buf *bp,
-		struct xfs_dir2_data_entry *dep);
-extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
+extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_inode *dp,
+		struct xfs_buf *bp, struct xfs_dir2_data_entry *dep);
+extern void xfs_dir2_data_log_header(struct xfs_trans *tp, struct xfs_inode *dp,
 		struct xfs_buf *bp);
 extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_buf *bp,
 		struct xfs_dir2_data_unused *dup);
-extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_buf *bp,
+extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_inode *dp,
+		struct xfs_buf *bp, xfs_dir2_data_aoff_t offset,
+		xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
+extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_inode *dp,
+		struct xfs_buf *bp, struct xfs_dir2_data_unused *dup,
 		xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
 		int *needlogp, int *needscanp);
-extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_buf *bp,
-		struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
-		xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
 
 extern struct xfs_dir2_data_free *xfs_dir2_data_freefind(
-		struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_data_unused *dup);
+		struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_data_free *bf,
+		struct xfs_dir2_data_unused *dup);
 
 extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
 extern const struct xfs_buf_ops xfs_dir3_leafn_buf_ops;
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 12dad18..90cdbf4 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -18,25 +18,25 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_buf_item.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * Local function prototypes.
@@ -168,6 +168,7 @@
 
 static void
 xfs_dir2_block_need_space(
+	struct xfs_inode		*dp,
 	struct xfs_dir2_data_hdr	*hdr,
 	struct xfs_dir2_block_tail	*btp,
 	struct xfs_dir2_leaf_entry	*blp,
@@ -183,7 +184,7 @@
 	struct xfs_dir2_data_unused	*enddup = NULL;
 
 	*compact = 0;
-	bf = xfs_dir3_data_bestfree_p(hdr);
+	bf = dp->d_ops->data_bestfree_p(hdr);
 
 	/*
 	 * If there are stale entries we'll use one for the leaf.
@@ -280,6 +281,7 @@
 static void
 xfs_dir2_block_compact(
 	struct xfs_trans		*tp,
+	struct xfs_inode		*dp,
 	struct xfs_buf			*bp,
 	struct xfs_dir2_data_hdr	*hdr,
 	struct xfs_dir2_block_tail	*btp,
@@ -312,7 +314,7 @@
 	*lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
 	*lfloghigh -= be32_to_cpu(btp->stale) - 1;
 	be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
-	xfs_dir2_data_make_free(tp, bp,
+	xfs_dir2_data_make_free(tp, dp, bp,
 		(xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr),
 		(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
 		needlog, &needscan);
@@ -323,7 +325,7 @@
 	 * This needs to happen before the next call to use_free.
 	 */
 	if (needscan)
-		xfs_dir2_data_freescan(tp->t_mountp, hdr, needlog);
+		xfs_dir2_data_freescan(dp, hdr, needlog);
 }
 
 /*
@@ -369,7 +371,7 @@
 	if (error)
 		return error;
 
-	len = xfs_dir3_data_entsize(mp, args->namelen);
+	len = dp->d_ops->data_entsize(args->namelen);
 
 	/*
 	 * Set up pointers to parts of the block.
@@ -382,7 +384,7 @@
 	 * Find out if we can reuse stale entries or whether we need extra
 	 * space for entry and new leaf.
 	 */
-	xfs_dir2_block_need_space(hdr, btp, blp, &tagp, &dup,
+	xfs_dir2_block_need_space(dp, hdr, btp, blp, &tagp, &dup,
 				  &enddup, &compact, len);
 
 	/*
@@ -418,7 +420,7 @@
 	 * If need to compact the leaf entries, do it now.
 	 */
 	if (compact) {
-		xfs_dir2_block_compact(tp, bp, hdr, btp, blp, &needlog,
+		xfs_dir2_block_compact(tp, dp, bp, hdr, btp, blp, &needlog,
 				      &lfloghigh, &lfloglow);
 		/* recalculate blp post-compaction */
 		blp = xfs_dir2_block_leaf_p(btp);
@@ -453,7 +455,7 @@
 		/*
 		 * Mark the space needed for the new leaf entry, now in use.
 		 */
-		xfs_dir2_data_use_free(tp, bp, enddup,
+		xfs_dir2_data_use_free(tp, dp, bp, enddup,
 			(xfs_dir2_data_aoff_t)
 			((char *)enddup - (char *)hdr + be16_to_cpu(enddup->length) -
 			 sizeof(*blp)),
@@ -468,7 +470,7 @@
 		 * This needs to happen before the next call to use_free.
 		 */
 		if (needscan) {
-			xfs_dir2_data_freescan(mp, hdr, &needlog);
+			xfs_dir2_data_freescan(dp, hdr, &needlog);
 			needscan = 0;
 		}
 		/*
@@ -540,7 +542,7 @@
 	/*
 	 * Mark space for the data entry used.
 	 */
-	xfs_dir2_data_use_free(tp, bp, dup,
+	xfs_dir2_data_use_free(tp, dp, bp, dup,
 		(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
 		(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
 	/*
@@ -549,18 +551,18 @@
 	dep->inumber = cpu_to_be64(args->inumber);
 	dep->namelen = args->namelen;
 	memcpy(dep->name, args->name, args->namelen);
-	xfs_dir3_dirent_put_ftype(mp, dep, args->filetype);
-	tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+	dp->d_ops->data_put_ftype(dep, args->filetype);
+	tagp = dp->d_ops->data_entry_tag_p(dep);
 	*tagp = cpu_to_be16((char *)dep - (char *)hdr);
 	/*
 	 * Clean up the bestfree array and log the header, tail, and entry.
 	 */
 	if (needscan)
-		xfs_dir2_data_freescan(mp, hdr, &needlog);
+		xfs_dir2_data_freescan(dp, hdr, &needlog);
 	if (needlog)
-		xfs_dir2_data_log_header(tp, bp);
+		xfs_dir2_data_log_header(tp, dp, bp);
 	xfs_dir2_block_log_tail(tp, bp);
-	xfs_dir2_data_log_entry(tp, bp, dep);
+	xfs_dir2_data_log_entry(tp, dp, bp, dep);
 	xfs_dir3_data_check(dp, bp);
 	return 0;
 }
@@ -642,7 +644,7 @@
 	 * Fill in inode number, CI name if appropriate, release the block.
 	 */
 	args->inumber = be64_to_cpu(dep->inumber);
-	args->filetype = xfs_dir3_dirent_get_ftype(mp, dep);
+	args->filetype = dp->d_ops->data_get_ftype(dep);
 	error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
 	xfs_trans_brelse(args->trans, bp);
 	return XFS_ERROR(error);
@@ -799,9 +801,9 @@
 	 * Mark the data entry's space free.
 	 */
 	needlog = needscan = 0;
-	xfs_dir2_data_make_free(tp, bp,
+	xfs_dir2_data_make_free(tp, dp, bp,
 		(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
-		xfs_dir3_data_entsize(mp, dep->namelen), &needlog, &needscan);
+		dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
 	/*
 	 * Fix up the block tail.
 	 */
@@ -816,9 +818,9 @@
 	 * Fix up bestfree, log the header if necessary.
 	 */
 	if (needscan)
-		xfs_dir2_data_freescan(mp, hdr, &needlog);
+		xfs_dir2_data_freescan(dp, hdr, &needlog);
 	if (needlog)
-		xfs_dir2_data_log_header(tp, bp);
+		xfs_dir2_data_log_header(tp, dp, bp);
 	xfs_dir3_data_check(dp, bp);
 	/*
 	 * See if the size as a shortform is good enough.
@@ -875,8 +877,8 @@
 	 * Change the inode number to the new value.
 	 */
 	dep->inumber = cpu_to_be64(args->inumber);
-	xfs_dir3_dirent_put_ftype(mp, dep, args->filetype);
-	xfs_dir2_data_log_entry(args->trans, bp, dep);
+	dp->d_ops->data_put_ftype(dep, args->filetype);
+	xfs_dir2_data_log_entry(args->trans, dp, bp, dep);
 	xfs_dir3_data_check(dp, bp);
 	return 0;
 }
@@ -934,8 +936,8 @@
 	tp = args->trans;
 	mp = dp->i_mount;
 	leaf = lbp->b_addr;
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 	ltp = xfs_dir2_leaf_tail_p(mp, leaf);
 
 	ASSERT(leafhdr.magic == XFS_DIR2_LEAF1_MAGIC ||
@@ -949,7 +951,7 @@
 	while (dp->i_d.di_size > mp->m_dirblksize) {
 		int hdrsz;
 
-		hdrsz = xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
+		hdrsz = dp->d_ops->data_entry_offset;
 		bestsp = xfs_dir2_leaf_bests_p(ltp);
 		if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
 					    mp->m_dirblksize - hdrsz) {
@@ -999,7 +1001,7 @@
 	/*
 	 * Use up the space at the end of the block (blp/btp).
 	 */
-	xfs_dir2_data_use_free(tp, dbp, dup, mp->m_dirblksize - size, size,
+	xfs_dir2_data_use_free(tp, dp, dbp, dup, mp->m_dirblksize - size, size,
 		&needlog, &needscan);
 	/*
 	 * Initialize the block tail.
@@ -1023,9 +1025,9 @@
 	 * Scan the bestfree if we need it and log the data block header.
 	 */
 	if (needscan)
-		xfs_dir2_data_freescan(mp, hdr, &needlog);
+		xfs_dir2_data_freescan(dp, hdr, &needlog);
 	if (needlog)
-		xfs_dir2_data_log_header(tp, dbp);
+		xfs_dir2_data_log_header(tp, dp, dbp);
 	/*
 	 * Pitch the old leaf block.
 	 */
@@ -1136,9 +1138,9 @@
 	 * The whole thing is initialized to free by the init routine.
 	 * Say we're using the leaf and tail area.
 	 */
-	dup = xfs_dir3_data_unused_p(hdr);
+	dup = dp->d_ops->data_unused_p(hdr);
 	needlog = needscan = 0;
-	xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
+	xfs_dir2_data_use_free(tp, dp, bp, dup, mp->m_dirblksize - i, i, &needlog,
 		&needscan);
 	ASSERT(needscan == 0);
 	/*
@@ -1152,38 +1154,38 @@
 	/*
 	 * Remove the freespace, we'll manage it.
 	 */
-	xfs_dir2_data_use_free(tp, bp, dup,
+	xfs_dir2_data_use_free(tp, dp, bp, dup,
 		(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
 		be16_to_cpu(dup->length), &needlog, &needscan);
 	/*
 	 * Create entry for .
 	 */
-	dep = xfs_dir3_data_dot_entry_p(mp, hdr);
+	dep = dp->d_ops->data_dot_entry_p(hdr);
 	dep->inumber = cpu_to_be64(dp->i_ino);
 	dep->namelen = 1;
 	dep->name[0] = '.';
-	xfs_dir3_dirent_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
-	tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+	dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
+	tagp = dp->d_ops->data_entry_tag_p(dep);
 	*tagp = cpu_to_be16((char *)dep - (char *)hdr);
-	xfs_dir2_data_log_entry(tp, bp, dep);
+	xfs_dir2_data_log_entry(tp, dp, bp, dep);
 	blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
 	blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
 				(char *)dep - (char *)hdr));
 	/*
 	 * Create entry for ..
 	 */
-	dep = xfs_dir3_data_dotdot_entry_p(mp, hdr);
-	dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
+	dep = dp->d_ops->data_dotdot_entry_p(hdr);
+	dep->inumber = cpu_to_be64(dp->d_ops->sf_get_parent_ino(sfp));
 	dep->namelen = 2;
 	dep->name[0] = dep->name[1] = '.';
-	xfs_dir3_dirent_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
-	tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+	dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
+	tagp = dp->d_ops->data_entry_tag_p(dep);
 	*tagp = cpu_to_be16((char *)dep - (char *)hdr);
-	xfs_dir2_data_log_entry(tp, bp, dep);
+	xfs_dir2_data_log_entry(tp, dp, bp, dep);
 	blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
 	blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
 				(char *)dep - (char *)hdr));
-	offset = xfs_dir3_data_first_offset(mp);
+	offset = dp->d_ops->data_first_offset;
 	/*
 	 * Loop over existing entries, stuff them in.
 	 */
@@ -1214,7 +1216,9 @@
 			*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
 				((char *)dup - (char *)hdr));
 			xfs_dir2_data_log_unused(tp, bp, dup);
-			xfs_dir2_data_freeinsert(hdr, dup, &dummy);
+			xfs_dir2_data_freeinsert(hdr,
+						 dp->d_ops->data_bestfree_p(hdr),
+						 dup, &dummy);
 			offset += be16_to_cpu(dup->length);
 			continue;
 		}
@@ -1222,14 +1226,13 @@
 		 * Copy a real entry.
 		 */
 		dep = (xfs_dir2_data_entry_t *)((char *)hdr + newoffset);
-		dep->inumber = cpu_to_be64(xfs_dir3_sfe_get_ino(mp, sfp, sfep));
+		dep->inumber = cpu_to_be64(dp->d_ops->sf_get_ino(sfp, sfep));
 		dep->namelen = sfep->namelen;
-		xfs_dir3_dirent_put_ftype(mp, dep,
-					xfs_dir3_sfe_get_ftype(mp, sfp, sfep));
+		dp->d_ops->data_put_ftype(dep, dp->d_ops->sf_get_ftype(sfep));
 		memcpy(dep->name, sfep->name, dep->namelen);
-		tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+		tagp = dp->d_ops->data_entry_tag_p(dep);
 		*tagp = cpu_to_be16((char *)dep - (char *)hdr);
-		xfs_dir2_data_log_entry(tp, bp, dep);
+		xfs_dir2_data_log_entry(tp, dp, bp, dep);
 		name.name = sfep->name;
 		name.len = sfep->namelen;
 		blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
@@ -1240,7 +1243,7 @@
 		if (++i == sfp->count)
 			sfep = NULL;
 		else
-			sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+			sfep = dp->d_ops->sf_nextentry(sfp, sfep);
 	}
 	/* Done with the temporary buffer */
 	kmem_free(sfp);
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 47e1326..70acff4 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -18,20 +18,19 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
@@ -63,11 +62,18 @@
 	char			*p;		/* current data position */
 	int			stale;		/* count of stale leaves */
 	struct xfs_name		name;
+	const struct xfs_dir_ops *ops;
 
 	mp = bp->b_target->bt_mount;
+
+	/*
+	 * We can be passed a null dp here from a verifier, so we need to go the
+	 * hard way to get them.
+	 */
+	ops = xfs_dir_get_ops(mp, dp);
+
 	hdr = bp->b_addr;
-	bf = xfs_dir3_data_bestfree_p(hdr);
-	p = (char *)xfs_dir3_data_entry_p(hdr);
+	p = (char *)ops->data_entry_p(hdr);
 
 	switch (hdr->magic) {
 	case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC):
@@ -75,6 +81,16 @@
 		btp = xfs_dir2_block_tail_p(mp, hdr);
 		lep = xfs_dir2_block_leaf_p(btp);
 		endp = (char *)lep;
+
+		/*
+		 * The number of leaf entries is limited by the size of the
+		 * block and the amount of space used by the data entries.
+		 * We don't know how much space is used by the data entries yet,
+		 * so just ensure that the count falls somewhere inside the
+		 * block right now.
+		 */
+		XFS_WANT_CORRUPTED_RETURN(be32_to_cpu(btp->count) <
+			((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry));
 		break;
 	case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
 	case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
@@ -85,10 +101,11 @@
 		return EFSCORRUPTED;
 	}
 
-	count = lastfree = freeseen = 0;
 	/*
 	 * Account for zero bestfree entries.
 	 */
+	bf = ops->data_bestfree_p(hdr);
+	count = lastfree = freeseen = 0;
 	if (!bf[0].length) {
 		XFS_WANT_CORRUPTED_RETURN(!bf[0].offset);
 		freeseen |= 1 << 0;
@@ -121,7 +138,7 @@
 			XFS_WANT_CORRUPTED_RETURN(
 				be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
 					       (char *)dup - (char *)hdr);
-			dfp = xfs_dir2_data_freefind(hdr, dup);
+			dfp = xfs_dir2_data_freefind(hdr, bf, dup);
 			if (dfp) {
 				i = (int)(dfp - bf);
 				XFS_WANT_CORRUPTED_RETURN(
@@ -147,10 +164,10 @@
 		XFS_WANT_CORRUPTED_RETURN(
 			!xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)));
 		XFS_WANT_CORRUPTED_RETURN(
-			be16_to_cpu(*xfs_dir3_data_entry_tag_p(mp, dep)) ==
+			be16_to_cpu(*ops->data_entry_tag_p(dep)) ==
 					       (char *)dep - (char *)hdr);
 		XFS_WANT_CORRUPTED_RETURN(
-			xfs_dir3_dirent_get_ftype(mp, dep) < XFS_DIR3_FT_MAX);
+				ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX);
 		count++;
 		lastfree = 0;
 		if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
@@ -168,7 +185,7 @@
 			}
 			XFS_WANT_CORRUPTED_RETURN(i < be32_to_cpu(btp->count));
 		}
-		p += xfs_dir3_data_entsize(mp, dep->namelen);
+		p += ops->data_entsize(dep->namelen);
 	}
 	/*
 	 * Need to have seen all the entries and all the bestfree slots.
@@ -327,19 +344,18 @@
  */
 xfs_dir2_data_free_t *
 xfs_dir2_data_freefind(
-	xfs_dir2_data_hdr_t	*hdr,		/* data block */
-	xfs_dir2_data_unused_t	*dup)		/* data unused entry */
+	struct xfs_dir2_data_hdr *hdr,		/* data block header */
+	struct xfs_dir2_data_free *bf,		/* bestfree table pointer */
+	struct xfs_dir2_data_unused *dup)	/* unused space */
 {
 	xfs_dir2_data_free_t	*dfp;		/* bestfree entry */
 	xfs_dir2_data_aoff_t	off;		/* offset value needed */
-	struct xfs_dir2_data_free *bf;
 #ifdef DEBUG
 	int			matched;	/* matched the value */
 	int			seenzero;	/* saw a 0 bestfree entry */
 #endif
 
 	off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr);
-	bf = xfs_dir3_data_bestfree_p(hdr);
 
 #ifdef DEBUG
 	/*
@@ -399,11 +415,11 @@
  */
 xfs_dir2_data_free_t *				/* entry inserted */
 xfs_dir2_data_freeinsert(
-	xfs_dir2_data_hdr_t	*hdr,		/* data block pointer */
-	xfs_dir2_data_unused_t	*dup,		/* unused space */
+	struct xfs_dir2_data_hdr *hdr,		/* data block pointer */
+	struct xfs_dir2_data_free *dfp,		/* bestfree table pointer */
+	struct xfs_dir2_data_unused *dup,	/* unused space */
 	int			*loghead)	/* log the data header (out) */
 {
-	xfs_dir2_data_free_t	*dfp;		/* bestfree table pointer */
 	xfs_dir2_data_free_t	new;		/* new bestfree entry */
 
 	ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
@@ -411,7 +427,6 @@
 	       hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
 	       hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
 
-	dfp = xfs_dir3_data_bestfree_p(hdr);
 	new.length = dup->length;
 	new.offset = cpu_to_be16((char *)dup - (char *)hdr);
 
@@ -444,11 +459,11 @@
  */
 STATIC void
 xfs_dir2_data_freeremove(
-	xfs_dir2_data_hdr_t	*hdr,		/* data block header */
-	xfs_dir2_data_free_t	*dfp,		/* bestfree entry pointer */
+	struct xfs_dir2_data_hdr *hdr,		/* data block header */
+	struct xfs_dir2_data_free *bf,		/* bestfree table pointer */
+	struct xfs_dir2_data_free *dfp,		/* bestfree entry pointer */
 	int			*loghead)	/* out: log data header */
 {
-	struct xfs_dir2_data_free *bf;
 
 	ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
 	       hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
@@ -458,7 +473,6 @@
 	/*
 	 * It's the first entry, slide the next 2 up.
 	 */
-	bf = xfs_dir3_data_bestfree_p(hdr);
 	if (dfp == &bf[0]) {
 		bf[0] = bf[1];
 		bf[1] = bf[2];
@@ -486,9 +500,9 @@
  */
 void
 xfs_dir2_data_freescan(
-	xfs_mount_t		*mp,		/* filesystem mount point */
-	xfs_dir2_data_hdr_t	*hdr,		/* data block header */
-	int			*loghead)	/* out: log data header */
+	struct xfs_inode	*dp,
+	struct xfs_dir2_data_hdr *hdr,
+	int			*loghead)
 {
 	xfs_dir2_block_tail_t	*btp;		/* block tail */
 	xfs_dir2_data_entry_t	*dep;		/* active data entry */
@@ -505,19 +519,19 @@
 	/*
 	 * Start by clearing the table.
 	 */
-	bf = xfs_dir3_data_bestfree_p(hdr);
+	bf = dp->d_ops->data_bestfree_p(hdr);
 	memset(bf, 0, sizeof(*bf) * XFS_DIR2_DATA_FD_COUNT);
 	*loghead = 1;
 	/*
 	 * Set up pointers.
 	 */
-	p = (char *)xfs_dir3_data_entry_p(hdr);
+	p = (char *)dp->d_ops->data_entry_p(hdr);
 	if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
 	    hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
-		btp = xfs_dir2_block_tail_p(mp, hdr);
+		btp = xfs_dir2_block_tail_p(dp->i_mount, hdr);
 		endp = (char *)xfs_dir2_block_leaf_p(btp);
 	} else
-		endp = (char *)hdr + mp->m_dirblksize;
+		endp = (char *)hdr + dp->i_mount->m_dirblksize;
 	/*
 	 * Loop over the block's entries.
 	 */
@@ -529,7 +543,7 @@
 		if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
 			ASSERT((char *)dup - (char *)hdr ==
 			       be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
-			xfs_dir2_data_freeinsert(hdr, dup, loghead);
+			xfs_dir2_data_freeinsert(hdr, bf, dup, loghead);
 			p += be16_to_cpu(dup->length);
 		}
 		/*
@@ -538,8 +552,8 @@
 		else {
 			dep = (xfs_dir2_data_entry_t *)p;
 			ASSERT((char *)dep - (char *)hdr ==
-			       be16_to_cpu(*xfs_dir3_data_entry_tag_p(mp, dep)));
-			p += xfs_dir3_data_entsize(mp, dep->namelen);
+			       be16_to_cpu(*dp->d_ops->data_entry_tag_p(dep)));
+			p += dp->d_ops->data_entsize(dep->namelen);
 		}
 	}
 }
@@ -594,8 +608,8 @@
 	} else
 		hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
 
-	bf = xfs_dir3_data_bestfree_p(hdr);
-	bf[0].offset = cpu_to_be16(xfs_dir3_data_entry_offset(hdr));
+	bf = dp->d_ops->data_bestfree_p(hdr);
+	bf[0].offset = cpu_to_be16(dp->d_ops->data_entry_offset);
 	for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
 		bf[i].length = 0;
 		bf[i].offset = 0;
@@ -604,17 +618,17 @@
 	/*
 	 * Set up an unused entry for the block's body.
 	 */
-	dup = xfs_dir3_data_unused_p(hdr);
+	dup = dp->d_ops->data_unused_p(hdr);
 	dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
 
-	t = mp->m_dirblksize - (uint)xfs_dir3_data_entry_offset(hdr);
+	t = mp->m_dirblksize - (uint)dp->d_ops->data_entry_offset;
 	bf[0].length = cpu_to_be16(t);
 	dup->length = cpu_to_be16(t);
 	*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr);
 	/*
 	 * Log it and return it.
 	 */
-	xfs_dir2_data_log_header(tp, bp);
+	xfs_dir2_data_log_header(tp, dp, bp);
 	xfs_dir2_data_log_unused(tp, bp, dup);
 	*bpp = bp;
 	return 0;
@@ -626,11 +640,11 @@
 void
 xfs_dir2_data_log_entry(
 	struct xfs_trans	*tp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp,
 	xfs_dir2_data_entry_t	*dep)		/* data entry pointer */
 {
 	struct xfs_dir2_data_hdr *hdr = bp->b_addr;
-	struct xfs_mount	*mp = tp->t_mountp;
 
 	ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
 	       hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
@@ -638,7 +652,7 @@
 	       hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
 
 	xfs_trans_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
-		(uint)((char *)(xfs_dir3_data_entry_tag_p(mp, dep) + 1) -
+		(uint)((char *)(dp->d_ops->data_entry_tag_p(dep) + 1) -
 		       (char *)hdr - 1));
 }
 
@@ -648,16 +662,19 @@
 void
 xfs_dir2_data_log_header(
 	struct xfs_trans	*tp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp)
 {
-	xfs_dir2_data_hdr_t	*hdr = bp->b_addr;
+#ifdef DEBUG
+	struct xfs_dir2_data_hdr *hdr = bp->b_addr;
 
 	ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
 	       hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
 	       hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
 	       hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
+#endif
 
-	xfs_trans_log_buf(tp, bp, 0, xfs_dir3_data_entry_offset(hdr) - 1);
+	xfs_trans_log_buf(tp, bp, 0, dp->d_ops->data_entry_offset - 1);
 }
 
 /*
@@ -698,6 +715,7 @@
 void
 xfs_dir2_data_make_free(
 	struct xfs_trans	*tp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp,
 	xfs_dir2_data_aoff_t	offset,		/* starting byte offset */
 	xfs_dir2_data_aoff_t	len,		/* length in bytes */
@@ -735,7 +753,7 @@
 	 * If this isn't the start of the block, then back up to
 	 * the previous entry and see if it's free.
 	 */
-	if (offset > xfs_dir3_data_entry_offset(hdr)) {
+	if (offset > dp->d_ops->data_entry_offset) {
 		__be16			*tagp;	/* tag just before us */
 
 		tagp = (__be16 *)((char *)hdr + offset) - 1;
@@ -761,15 +779,15 @@
 	 * Previous and following entries are both free,
 	 * merge everything into a single free entry.
 	 */
-	bf = xfs_dir3_data_bestfree_p(hdr);
+	bf = dp->d_ops->data_bestfree_p(hdr);
 	if (prevdup && postdup) {
 		xfs_dir2_data_free_t	*dfp2;	/* another bestfree pointer */
 
 		/*
 		 * See if prevdup and/or postdup are in bestfree table.
 		 */
-		dfp = xfs_dir2_data_freefind(hdr, prevdup);
-		dfp2 = xfs_dir2_data_freefind(hdr, postdup);
+		dfp = xfs_dir2_data_freefind(hdr, bf, prevdup);
+		dfp2 = xfs_dir2_data_freefind(hdr, bf, postdup);
 		/*
 		 * We need a rescan unless there are exactly 2 free entries
 		 * namely our two.  Then we know what's happening, otherwise
@@ -797,12 +815,13 @@
 				ASSERT(dfp2 == dfp);
 				dfp2 = &bf[1];
 			}
-			xfs_dir2_data_freeremove(hdr, dfp2, needlogp);
-			xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+			xfs_dir2_data_freeremove(hdr, bf, dfp2, needlogp);
+			xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
 			/*
 			 * Now insert the new entry.
 			 */
-			dfp = xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
+			dfp = xfs_dir2_data_freeinsert(hdr, bf, prevdup,
+						       needlogp);
 			ASSERT(dfp == &bf[0]);
 			ASSERT(dfp->length == prevdup->length);
 			ASSERT(!dfp[1].length);
@@ -813,7 +832,7 @@
 	 * The entry before us is free, merge with it.
 	 */
 	else if (prevdup) {
-		dfp = xfs_dir2_data_freefind(hdr, prevdup);
+		dfp = xfs_dir2_data_freefind(hdr, bf, prevdup);
 		be16_add_cpu(&prevdup->length, len);
 		*xfs_dir2_data_unused_tag_p(prevdup) =
 			cpu_to_be16((char *)prevdup - (char *)hdr);
@@ -824,8 +843,8 @@
 		 * the old one and add the new one.
 		 */
 		if (dfp) {
-			xfs_dir2_data_freeremove(hdr, dfp, needlogp);
-			xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
+			xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
+			xfs_dir2_data_freeinsert(hdr, bf, prevdup, needlogp);
 		}
 		/*
 		 * Otherwise we need a scan if the new entry is big enough.
@@ -839,7 +858,7 @@
 	 * The following entry is free, merge with it.
 	 */
 	else if (postdup) {
-		dfp = xfs_dir2_data_freefind(hdr, postdup);
+		dfp = xfs_dir2_data_freefind(hdr, bf, postdup);
 		newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
 		newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
 		newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length));
@@ -852,8 +871,8 @@
 		 * the old one and add the new one.
 		 */
 		if (dfp) {
-			xfs_dir2_data_freeremove(hdr, dfp, needlogp);
-			xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+			xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
+			xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp);
 		}
 		/*
 		 * Otherwise we need a scan if the new entry is big enough.
@@ -873,7 +892,7 @@
 		*xfs_dir2_data_unused_tag_p(newdup) =
 			cpu_to_be16((char *)newdup - (char *)hdr);
 		xfs_dir2_data_log_unused(tp, bp, newdup);
-		xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+		xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp);
 	}
 	*needscanp = needscan;
 }
@@ -884,6 +903,7 @@
 void
 xfs_dir2_data_use_free(
 	struct xfs_trans	*tp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp,
 	xfs_dir2_data_unused_t	*dup,		/* unused entry */
 	xfs_dir2_data_aoff_t	offset,		/* starting offset to use */
@@ -913,9 +933,9 @@
 	/*
 	 * Look up the entry in the bestfree table.
 	 */
-	dfp = xfs_dir2_data_freefind(hdr, dup);
 	oldlen = be16_to_cpu(dup->length);
-	bf = xfs_dir3_data_bestfree_p(hdr);
+	bf = dp->d_ops->data_bestfree_p(hdr);
+	dfp = xfs_dir2_data_freefind(hdr, bf, dup);
 	ASSERT(dfp || oldlen <= be16_to_cpu(bf[2].length));
 	/*
 	 * Check for alignment with front and back of the entry.
@@ -932,7 +952,8 @@
 		if (dfp) {
 			needscan = (bf[2].offset != 0);
 			if (!needscan)
-				xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+				xfs_dir2_data_freeremove(hdr, bf, dfp,
+							 needlogp);
 		}
 	}
 	/*
@@ -950,8 +971,9 @@
 		 * If it was in the table, remove it and add the new one.
 		 */
 		if (dfp) {
-			xfs_dir2_data_freeremove(hdr, dfp, needlogp);
-			dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+			xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
+			dfp = xfs_dir2_data_freeinsert(hdr, bf, newdup,
+						       needlogp);
 			ASSERT(dfp != NULL);
 			ASSERT(dfp->length == newdup->length);
 			ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
@@ -977,8 +999,9 @@
 		 * If it was in the table, remove it and add the new one.
 		 */
 		if (dfp) {
-			xfs_dir2_data_freeremove(hdr, dfp, needlogp);
-			dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+			xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
+			dfp = xfs_dir2_data_freeinsert(hdr, bf, newdup,
+						       needlogp);
 			ASSERT(dfp != NULL);
 			ASSERT(dfp->length == newdup->length);
 			ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
@@ -1017,9 +1040,11 @@
 		if (dfp) {
 			needscan = (bf[2].length != 0);
 			if (!needscan) {
-				xfs_dir2_data_freeremove(hdr, dfp, needlogp);
-				xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
-				xfs_dir2_data_freeinsert(hdr, newdup2,
+				xfs_dir2_data_freeremove(hdr, bf, dfp,
+							 needlogp);
+				xfs_dir2_data_freeinsert(hdr, bf, newdup,
+							 needlogp);
+				xfs_dir2_data_freeinsert(hdr, bf, newdup2,
 							 needlogp);
 			}
 		}
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 1021c83..ae47ec6 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -18,23 +18,21 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
@@ -52,21 +50,21 @@
  * Pop an assert if something is wrong.
  */
 #ifdef DEBUG
-#define	xfs_dir3_leaf_check(mp, bp) \
+#define	xfs_dir3_leaf_check(dp, bp) \
 do { \
-	if (!xfs_dir3_leaf1_check((mp), (bp))) \
+	if (!xfs_dir3_leaf1_check((dp), (bp))) \
 		ASSERT(0); \
 } while (0);
 
 STATIC bool
 xfs_dir3_leaf1_check(
-	struct xfs_mount	*mp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp)
 {
 	struct xfs_dir2_leaf	*leaf = bp->b_addr;
 	struct xfs_dir3_icleaf_hdr leafhdr;
 
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 
 	if (leafhdr.magic == XFS_DIR3_LEAF1_MAGIC) {
 		struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
@@ -75,71 +73,16 @@
 	} else if (leafhdr.magic != XFS_DIR2_LEAF1_MAGIC)
 		return false;
 
-	return xfs_dir3_leaf_check_int(mp, &leafhdr, leaf);
+	return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
 }
 #else
-#define	xfs_dir3_leaf_check(mp, bp)
+#define	xfs_dir3_leaf_check(dp, bp)
 #endif
 
-void
-xfs_dir3_leaf_hdr_from_disk(
-	struct xfs_dir3_icleaf_hdr	*to,
-	struct xfs_dir2_leaf		*from)
-{
-	if (from->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
-	    from->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
-		to->forw = be32_to_cpu(from->hdr.info.forw);
-		to->back = be32_to_cpu(from->hdr.info.back);
-		to->magic = be16_to_cpu(from->hdr.info.magic);
-		to->count = be16_to_cpu(from->hdr.count);
-		to->stale = be16_to_cpu(from->hdr.stale);
-	} else {
-		struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)from;
-
-		to->forw = be32_to_cpu(hdr3->info.hdr.forw);
-		to->back = be32_to_cpu(hdr3->info.hdr.back);
-		to->magic = be16_to_cpu(hdr3->info.hdr.magic);
-		to->count = be16_to_cpu(hdr3->count);
-		to->stale = be16_to_cpu(hdr3->stale);
-	}
-
-	ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
-	       to->magic == XFS_DIR3_LEAF1_MAGIC ||
-	       to->magic == XFS_DIR2_LEAFN_MAGIC ||
-	       to->magic == XFS_DIR3_LEAFN_MAGIC);
-}
-
-void
-xfs_dir3_leaf_hdr_to_disk(
-	struct xfs_dir2_leaf		*to,
-	struct xfs_dir3_icleaf_hdr	*from)
-{
-	ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC ||
-	       from->magic == XFS_DIR3_LEAF1_MAGIC ||
-	       from->magic == XFS_DIR2_LEAFN_MAGIC ||
-	       from->magic == XFS_DIR3_LEAFN_MAGIC);
-
-	if (from->magic == XFS_DIR2_LEAF1_MAGIC ||
-	    from->magic == XFS_DIR2_LEAFN_MAGIC) {
-		to->hdr.info.forw = cpu_to_be32(from->forw);
-		to->hdr.info.back = cpu_to_be32(from->back);
-		to->hdr.info.magic = cpu_to_be16(from->magic);
-		to->hdr.count = cpu_to_be16(from->count);
-		to->hdr.stale = cpu_to_be16(from->stale);
-	} else {
-		struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)to;
-
-		hdr3->info.hdr.forw = cpu_to_be32(from->forw);
-		hdr3->info.hdr.back = cpu_to_be32(from->back);
-		hdr3->info.hdr.magic = cpu_to_be16(from->magic);
-		hdr3->count = cpu_to_be16(from->count);
-		hdr3->stale = cpu_to_be16(from->stale);
-	}
-}
-
 bool
 xfs_dir3_leaf_check_int(
 	struct xfs_mount	*mp,
+	struct xfs_inode	*dp,
 	struct xfs_dir3_icleaf_hdr *hdr,
 	struct xfs_dir2_leaf	*leaf)
 {
@@ -147,8 +90,21 @@
 	xfs_dir2_leaf_tail_t	*ltp;
 	int			stale;
 	int			i;
+	const struct xfs_dir_ops *ops;
+	struct xfs_dir3_icleaf_hdr leafhdr;
 
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	/*
+	 * we can be passed a null dp here from a verifier, so we need to go the
+	 * hard way to get them.
+	 */
+	ops = xfs_dir_get_ops(mp, dp);
+
+	if (!hdr) {
+		ops->leaf_hdr_from_disk(&leafhdr, leaf);
+		hdr = &leafhdr;
+	}
+
+	ents = ops->leaf_ents_p(leaf);
 	ltp = xfs_dir2_leaf_tail_p(mp, leaf);
 
 	/*
@@ -156,7 +112,7 @@
 	 * Should factor in the size of the bests table as well.
 	 * We can deduce a value for that from di_size.
 	 */
-	if (hdr->count > xfs_dir3_max_leaf_ents(mp, leaf))
+	if (hdr->count > ops->leaf_max_ents(mp))
 		return false;
 
 	/* Leaves and bests don't overlap in leaf format. */
@@ -192,7 +148,6 @@
 {
 	struct xfs_mount	*mp = bp->b_target->bt_mount;
 	struct xfs_dir2_leaf	*leaf = bp->b_addr;
-	struct xfs_dir3_icleaf_hdr leafhdr;
 
 	ASSERT(magic == XFS_DIR2_LEAF1_MAGIC || magic == XFS_DIR2_LEAFN_MAGIC);
 
@@ -214,8 +169,7 @@
 			return false;
 	}
 
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-	return xfs_dir3_leaf_check_int(mp, &leafhdr, leaf);
+	return xfs_dir3_leaf_check_int(mp, NULL, NULL, leaf);
 }
 
 static void
@@ -401,7 +355,7 @@
 		return error;
 
 	xfs_dir3_leaf_init(mp, tp, bp, dp->i_ino, magic);
-	xfs_dir3_leaf_log_header(tp, bp);
+	xfs_dir3_leaf_log_header(tp, dp, bp);
 	if (magic == XFS_DIR2_LEAF1_MAGIC)
 		xfs_dir3_leaf_log_tail(tp, bp);
 	*bpp = bp;
@@ -462,31 +416,31 @@
 	xfs_dir3_data_check(dp, dbp);
 	btp = xfs_dir2_block_tail_p(mp, hdr);
 	blp = xfs_dir2_block_leaf_p(btp);
-	bf = xfs_dir3_data_bestfree_p(hdr);
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	bf = dp->d_ops->data_bestfree_p(hdr);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 
 	/*
 	 * Set the counts in the leaf header.
 	 */
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 	leafhdr.count = be32_to_cpu(btp->count);
 	leafhdr.stale = be32_to_cpu(btp->stale);
-	xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
-	xfs_dir3_leaf_log_header(tp, lbp);
+	dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+	xfs_dir3_leaf_log_header(tp, dp, lbp);
 
 	/*
 	 * Could compact these but I think we always do the conversion
 	 * after squeezing out stale entries.
 	 */
 	memcpy(ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t));
-	xfs_dir3_leaf_log_ents(tp, lbp, 0, leafhdr.count - 1);
+	xfs_dir3_leaf_log_ents(tp, dp, lbp, 0, leafhdr.count - 1);
 	needscan = 0;
 	needlog = 1;
 	/*
 	 * Make the space formerly occupied by the leaf entries and block
 	 * tail be free.
 	 */
-	xfs_dir2_data_make_free(tp, dbp,
+	xfs_dir2_data_make_free(tp, dp, dbp,
 		(xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr),
 		(xfs_dir2_data_aoff_t)((char *)hdr + mp->m_dirblksize -
 				       (char *)blp),
@@ -502,7 +456,7 @@
 		hdr->magic = cpu_to_be32(XFS_DIR3_DATA_MAGIC);
 
 	if (needscan)
-		xfs_dir2_data_freescan(mp, hdr, &needlog);
+		xfs_dir2_data_freescan(dp, hdr, &needlog);
 	/*
 	 * Set up leaf tail and bests table.
 	 */
@@ -514,8 +468,8 @@
 	 * Log the data header and leaf bests table.
 	 */
 	if (needlog)
-		xfs_dir2_data_log_header(tp, dbp);
-	xfs_dir3_leaf_check(mp, lbp);
+		xfs_dir2_data_log_header(tp, dp, dbp);
+	xfs_dir3_leaf_check(dp, lbp);
 	xfs_dir3_data_check(dp, dbp);
 	xfs_dir3_leaf_log_bests(tp, lbp, 0, 0);
 	return 0;
@@ -699,10 +653,10 @@
 	index = xfs_dir2_leaf_search_hash(args, lbp);
 	leaf = lbp->b_addr;
 	ltp = xfs_dir2_leaf_tail_p(mp, leaf);
-	ents = xfs_dir3_leaf_ents_p(leaf);
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 	bestsp = xfs_dir2_leaf_bests_p(ltp);
-	length = xfs_dir3_data_entsize(mp, args->namelen);
+	length = dp->d_ops->data_entsize(args->namelen);
 
 	/*
 	 * See if there are any entries with the same hash value
@@ -864,7 +818,7 @@
 		else
 			xfs_dir3_leaf_log_bests(tp, lbp, use_block, use_block);
 		hdr = dbp->b_addr;
-		bf = xfs_dir3_data_bestfree_p(hdr);
+		bf = dp->d_ops->data_bestfree_p(hdr);
 		bestsp[use_block] = bf[0].length;
 		grown = 1;
 	} else {
@@ -880,7 +834,7 @@
 			return error;
 		}
 		hdr = dbp->b_addr;
-		bf = xfs_dir3_data_bestfree_p(hdr);
+		bf = dp->d_ops->data_bestfree_p(hdr);
 		grown = 0;
 	}
 	/*
@@ -893,7 +847,7 @@
 	/*
 	 * Mark the initial part of our freespace in use for the new entry.
 	 */
-	xfs_dir2_data_use_free(tp, dbp, dup,
+	xfs_dir2_data_use_free(tp, dp, dbp, dup,
 		(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length,
 		&needlog, &needscan);
 	/*
@@ -903,20 +857,20 @@
 	dep->inumber = cpu_to_be64(args->inumber);
 	dep->namelen = args->namelen;
 	memcpy(dep->name, args->name, dep->namelen);
-	xfs_dir3_dirent_put_ftype(mp, dep, args->filetype);
-	tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+	dp->d_ops->data_put_ftype(dep, args->filetype);
+	tagp = dp->d_ops->data_entry_tag_p(dep);
 	*tagp = cpu_to_be16((char *)dep - (char *)hdr);
 	/*
 	 * Need to scan fix up the bestfree table.
 	 */
 	if (needscan)
-		xfs_dir2_data_freescan(mp, hdr, &needlog);
+		xfs_dir2_data_freescan(dp, hdr, &needlog);
 	/*
 	 * Need to log the data block's header.
 	 */
 	if (needlog)
-		xfs_dir2_data_log_header(tp, dbp);
-	xfs_dir2_data_log_entry(tp, dbp, dep);
+		xfs_dir2_data_log_header(tp, dp, dbp);
+	xfs_dir2_data_log_entry(tp, dp, dbp, dep);
 	/*
 	 * If the bests table needs to be changed, do it.
 	 * Log the change unless we've already done that.
@@ -939,10 +893,10 @@
 	/*
 	 * Log the leaf fields and give up the buffers.
 	 */
-	xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
-	xfs_dir3_leaf_log_header(tp, lbp);
-	xfs_dir3_leaf_log_ents(tp, lbp, lfloglow, lfloghigh);
-	xfs_dir3_leaf_check(mp, lbp);
+	dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+	xfs_dir3_leaf_log_header(tp, dp, lbp);
+	xfs_dir3_leaf_log_ents(tp, dp, lbp, lfloglow, lfloghigh);
+	xfs_dir3_leaf_check(dp, lbp);
 	xfs_dir3_data_check(dp, dbp);
 	return 0;
 }
@@ -962,6 +916,7 @@
 	int		loglow;		/* first leaf entry to log */
 	int		to;		/* target leaf index */
 	struct xfs_dir2_leaf_entry *ents;
+	struct xfs_inode *dp = args->dp;
 
 	leaf = bp->b_addr;
 	if (!leafhdr->stale)
@@ -970,7 +925,7 @@
 	/*
 	 * Compress out the stale entries in place.
 	 */
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 	for (from = to = 0, loglow = -1; from < leafhdr->count; from++) {
 		if (ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
 			continue;
@@ -991,10 +946,10 @@
 	leafhdr->count -= leafhdr->stale;
 	leafhdr->stale = 0;
 
-	xfs_dir3_leaf_hdr_to_disk(leaf, leafhdr);
-	xfs_dir3_leaf_log_header(args->trans, bp);
+	dp->d_ops->leaf_hdr_to_disk(leaf, leafhdr);
+	xfs_dir3_leaf_log_header(args->trans, dp, bp);
 	if (loglow != -1)
-		xfs_dir3_leaf_log_ents(args->trans, bp, loglow, to - 1);
+		xfs_dir3_leaf_log_ents(args->trans, dp, bp, loglow, to - 1);
 }
 
 /*
@@ -1121,10 +1076,11 @@
  */
 void
 xfs_dir3_leaf_log_ents(
-	xfs_trans_t		*tp,		/* transaction pointer */
-	struct xfs_buf		*bp,		/* leaf buffer */
-	int			first,		/* first entry to log */
-	int			last)		/* last entry to log */
+	struct xfs_trans	*tp,
+	struct xfs_inode	*dp,
+	struct xfs_buf		*bp,
+	int			first,
+	int			last)
 {
 	xfs_dir2_leaf_entry_t	*firstlep;	/* pointer to first entry */
 	xfs_dir2_leaf_entry_t	*lastlep;	/* pointer to last entry */
@@ -1136,7 +1092,7 @@
 	       leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
 	       leaf->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC));
 
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 	firstlep = &ents[first];
 	lastlep = &ents[last];
 	xfs_trans_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
@@ -1149,6 +1105,7 @@
 void
 xfs_dir3_leaf_log_header(
 	struct xfs_trans	*tp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp)
 {
 	struct xfs_dir2_leaf	*leaf = bp->b_addr;
@@ -1159,7 +1116,7 @@
 	       leaf->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC));
 
 	xfs_trans_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
-			  xfs_dir3_leaf_hdr_size(leaf) - 1);
+			  dp->d_ops->leaf_hdr_size - 1);
 }
 
 /*
@@ -1214,9 +1171,9 @@
 	}
 	tp = args->trans;
 	dp = args->dp;
-	xfs_dir3_leaf_check(dp->i_mount, lbp);
+	xfs_dir3_leaf_check(dp, lbp);
 	leaf = lbp->b_addr;
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 	/*
 	 * Get to the leaf entry and contained data entry address.
 	 */
@@ -1232,7 +1189,7 @@
 	 * Return the found inode number & CI name if appropriate
 	 */
 	args->inumber = be64_to_cpu(dep->inumber);
-	args->filetype = xfs_dir3_dirent_get_ftype(dp->i_mount, dep);
+	args->filetype = dp->d_ops->data_get_ftype(dep);
 	error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
 	xfs_trans_brelse(tp, dbp);
 	xfs_trans_brelse(tp, lbp);
@@ -1279,9 +1236,9 @@
 
 	*lbpp = lbp;
 	leaf = lbp->b_addr;
-	xfs_dir3_leaf_check(mp, lbp);
-	ents = xfs_dir3_leaf_ents_p(leaf);
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+	xfs_dir3_leaf_check(dp, lbp);
+	ents = dp->d_ops->leaf_ents_p(leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 
 	/*
 	 * Look for the first leaf entry with our hash value.
@@ -1415,9 +1372,9 @@
 	leaf = lbp->b_addr;
 	hdr = dbp->b_addr;
 	xfs_dir3_data_check(dp, dbp);
-	bf = xfs_dir3_data_bestfree_p(hdr);
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	bf = dp->d_ops->data_bestfree_p(hdr);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 	/*
 	 * Point to the leaf entry, use that to point to the data entry.
 	 */
@@ -1433,27 +1390,27 @@
 	/*
 	 * Mark the former data entry unused.
 	 */
-	xfs_dir2_data_make_free(tp, dbp,
+	xfs_dir2_data_make_free(tp, dp, dbp,
 		(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
-		xfs_dir3_data_entsize(mp, dep->namelen), &needlog, &needscan);
+		dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
 	/*
 	 * We just mark the leaf entry stale by putting a null in it.
 	 */
 	leafhdr.stale++;
-	xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
-	xfs_dir3_leaf_log_header(tp, lbp);
+	dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+	xfs_dir3_leaf_log_header(tp, dp, lbp);
 
 	lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
-	xfs_dir3_leaf_log_ents(tp, lbp, index, index);
+	xfs_dir3_leaf_log_ents(tp, dp, lbp, index, index);
 
 	/*
 	 * Scan the freespace in the data block again if necessary,
 	 * log the data block header if necessary.
 	 */
 	if (needscan)
-		xfs_dir2_data_freescan(mp, hdr, &needlog);
+		xfs_dir2_data_freescan(dp, hdr, &needlog);
 	if (needlog)
-		xfs_dir2_data_log_header(tp, dbp);
+		xfs_dir2_data_log_header(tp, dp, dbp);
 	/*
 	 * If the longest freespace in the data block has changed,
 	 * put the new value in the bests table and log that.
@@ -1467,7 +1424,7 @@
 	 * If the data block is now empty then get rid of the data block.
 	 */
 	if (be16_to_cpu(bf[0].length) ==
-			mp->m_dirblksize - xfs_dir3_data_entry_offset(hdr)) {
+			mp->m_dirblksize - dp->d_ops->data_entry_offset) {
 		ASSERT(db != mp->m_dirdatablk);
 		if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
 			/*
@@ -1478,7 +1435,7 @@
 			 */
 			if (error == ENOSPC && args->total == 0)
 				error = 0;
-			xfs_dir3_leaf_check(mp, lbp);
+			xfs_dir3_leaf_check(dp, lbp);
 			return error;
 		}
 		dbp = NULL;
@@ -1512,7 +1469,7 @@
 	else if (db != mp->m_dirdatablk)
 		dbp = NULL;
 
-	xfs_dir3_leaf_check(mp, lbp);
+	xfs_dir3_leaf_check(dp, lbp);
 	/*
 	 * See if we can convert to block form.
 	 */
@@ -1547,7 +1504,7 @@
 	}
 	dp = args->dp;
 	leaf = lbp->b_addr;
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 	/*
 	 * Point to the leaf entry, get data address from it.
 	 */
@@ -1563,10 +1520,10 @@
 	 * Put the new inode number in, log it.
 	 */
 	dep->inumber = cpu_to_be64(args->inumber);
-	xfs_dir3_dirent_put_ftype(dp->i_mount, dep, args->filetype);
+	dp->d_ops->data_put_ftype(dep, args->filetype);
 	tp = args->trans;
-	xfs_dir2_data_log_entry(tp, dbp, dep);
-	xfs_dir3_leaf_check(dp->i_mount, lbp);
+	xfs_dir2_data_log_entry(tp, dp, dbp, dep);
+	xfs_dir3_leaf_check(dp, lbp);
 	xfs_trans_brelse(tp, lbp);
 	return 0;
 }
@@ -1592,8 +1549,8 @@
 	struct xfs_dir3_icleaf_hdr leafhdr;
 
 	leaf = lbp->b_addr;
-	ents = xfs_dir3_leaf_ents_p(leaf);
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = args->dp->d_ops->leaf_ents_p(leaf);
+	args->dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 
 	/*
 	 * Note, the table cannot be empty, so we have to go through the loop.
@@ -1661,12 +1618,12 @@
 #ifdef DEBUG
 {
 	struct xfs_dir2_data_hdr *hdr = dbp->b_addr;
-	struct xfs_dir2_data_free *bf = xfs_dir3_data_bestfree_p(hdr);
+	struct xfs_dir2_data_free *bf = dp->d_ops->data_bestfree_p(hdr);
 
 	ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
 	       hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC));
 	ASSERT(be16_to_cpu(bf[0].length) ==
-	       mp->m_dirblksize - xfs_dir3_data_entry_offset(hdr));
+	       mp->m_dirblksize - dp->d_ops->data_entry_offset);
 	ASSERT(db == be32_to_cpu(ltp->bestcount) - 1);
 }
 #endif
@@ -1782,7 +1739,7 @@
 		return 0;
 	lbp = state->path.blk[0].bp;
 	leaf = lbp->b_addr;
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 
 	ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
 	       leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
@@ -1794,7 +1751,7 @@
 	if (error)
 		return error;
 	free = fbp->b_addr;
-	xfs_dir3_free_hdr_from_disk(&freehdr, free);
+	dp->d_ops->free_hdr_from_disk(&freehdr, free);
 
 	ASSERT(!freehdr.firstdb);
 
@@ -1828,14 +1785,14 @@
 	/*
 	 * Set up the leaf bests table.
 	 */
-	memcpy(xfs_dir2_leaf_bests_p(ltp), xfs_dir3_free_bests_p(mp, free),
+	memcpy(xfs_dir2_leaf_bests_p(ltp), dp->d_ops->free_bests_p(free),
 		freehdr.nvalid * sizeof(xfs_dir2_data_off_t));
 
-	xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
-	xfs_dir3_leaf_log_header(tp, lbp);
+	dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+	xfs_dir3_leaf_log_header(tp, dp, lbp);
 	xfs_dir3_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
 	xfs_dir3_leaf_log_tail(tp, lbp);
-	xfs_dir3_leaf_check(mp, lbp);
+	xfs_dir3_leaf_check(dp, lbp);
 
 	/*
 	 * Get rid of the freespace block.
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 4c3dba7..56369d4 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -18,22 +18,21 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
@@ -55,21 +54,21 @@
  * Check internal consistency of a leafn block.
  */
 #ifdef DEBUG
-#define	xfs_dir3_leaf_check(mp, bp) \
+#define	xfs_dir3_leaf_check(dp, bp) \
 do { \
-	if (!xfs_dir3_leafn_check((mp), (bp))) \
+	if (!xfs_dir3_leafn_check((dp), (bp))) \
 		ASSERT(0); \
 } while (0);
 
 static bool
 xfs_dir3_leafn_check(
-	struct xfs_mount	*mp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp)
 {
 	struct xfs_dir2_leaf	*leaf = bp->b_addr;
 	struct xfs_dir3_icleaf_hdr leafhdr;
 
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 
 	if (leafhdr.magic == XFS_DIR3_LEAFN_MAGIC) {
 		struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
@@ -78,10 +77,10 @@
 	} else if (leafhdr.magic != XFS_DIR2_LEAFN_MAGIC)
 		return false;
 
-	return xfs_dir3_leaf_check_int(mp, &leafhdr, leaf);
+	return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
 }
 #else
-#define	xfs_dir3_leaf_check(mp, bp)
+#define	xfs_dir3_leaf_check(dp, bp)
 #endif
 
 static bool
@@ -193,53 +192,6 @@
 	return __xfs_dir3_free_read(tp, dp, fbno, -2, bpp);
 }
 
-
-void
-xfs_dir3_free_hdr_from_disk(
-	struct xfs_dir3_icfree_hdr	*to,
-	struct xfs_dir2_free		*from)
-{
-	if (from->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)) {
-		to->magic = be32_to_cpu(from->hdr.magic);
-		to->firstdb = be32_to_cpu(from->hdr.firstdb);
-		to->nvalid = be32_to_cpu(from->hdr.nvalid);
-		to->nused = be32_to_cpu(from->hdr.nused);
-	} else {
-		struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)from;
-
-		to->magic = be32_to_cpu(hdr3->hdr.magic);
-		to->firstdb = be32_to_cpu(hdr3->firstdb);
-		to->nvalid = be32_to_cpu(hdr3->nvalid);
-		to->nused = be32_to_cpu(hdr3->nused);
-	}
-
-	ASSERT(to->magic == XFS_DIR2_FREE_MAGIC ||
-	       to->magic == XFS_DIR3_FREE_MAGIC);
-}
-
-static void
-xfs_dir3_free_hdr_to_disk(
-	struct xfs_dir2_free		*to,
-	struct xfs_dir3_icfree_hdr	*from)
-{
-	ASSERT(from->magic == XFS_DIR2_FREE_MAGIC ||
-	       from->magic == XFS_DIR3_FREE_MAGIC);
-
-	if (from->magic == XFS_DIR2_FREE_MAGIC) {
-		to->hdr.magic = cpu_to_be32(from->magic);
-		to->hdr.firstdb = cpu_to_be32(from->firstdb);
-		to->hdr.nvalid = cpu_to_be32(from->nvalid);
-		to->hdr.nused = cpu_to_be32(from->nused);
-	} else {
-		struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)to;
-
-		hdr3->hdr.magic = cpu_to_be32(from->magic);
-		hdr3->firstdb = cpu_to_be32(from->firstdb);
-		hdr3->nvalid = cpu_to_be32(from->nvalid);
-		hdr3->nused = cpu_to_be32(from->nused);
-	}
-}
-
 static int
 xfs_dir3_free_get_buf(
 	struct xfs_trans	*tp,
@@ -277,7 +229,7 @@
 		uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid);
 	} else
 		hdr.magic = XFS_DIR2_FREE_MAGIC;
-	xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr);
+	dp->d_ops->free_hdr_to_disk(bp->b_addr, &hdr);
 	*bpp = bp;
 	return 0;
 }
@@ -288,6 +240,7 @@
 STATIC void
 xfs_dir2_free_log_bests(
 	struct xfs_trans	*tp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp,
 	int			first,		/* first entry to log */
 	int			last)		/* last entry to log */
@@ -296,7 +249,7 @@
 	__be16			*bests;
 
 	free = bp->b_addr;
-	bests = xfs_dir3_free_bests_p(tp->t_mountp, free);
+	bests = dp->d_ops->free_bests_p(free);
 	ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
 	       free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
 	xfs_trans_log_buf(tp, bp,
@@ -311,6 +264,7 @@
 static void
 xfs_dir2_free_log_header(
 	struct xfs_trans	*tp,
+	struct xfs_inode	*dp,
 	struct xfs_buf		*bp)
 {
 #ifdef DEBUG
@@ -320,7 +274,7 @@
 	ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
 	       free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
 #endif
-	xfs_trans_log_buf(tp, bp, 0, xfs_dir3_free_hdr_size(tp->t_mountp) - 1);
+	xfs_trans_log_buf(tp, bp, 0, dp->d_ops->free_hdr_size - 1);
 }
 
 /*
@@ -369,7 +323,7 @@
 		return error;
 
 	free = fbp->b_addr;
-	xfs_dir3_free_hdr_from_disk(&freehdr, free);
+	dp->d_ops->free_hdr_from_disk(&freehdr, free);
 	leaf = lbp->b_addr;
 	ltp = xfs_dir2_leaf_tail_p(mp, leaf);
 	ASSERT(be32_to_cpu(ltp->bestcount) <=
@@ -380,7 +334,7 @@
 	 * Count active entries.
 	 */
 	from = xfs_dir2_leaf_bests_p(ltp);
-	to = xfs_dir3_free_bests_p(mp, free);
+	to = dp->d_ops->free_bests_p(free);
 	for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++, to++) {
 		if ((off = be16_to_cpu(*from)) != NULLDATAOFF)
 			n++;
@@ -393,9 +347,9 @@
 	freehdr.nused = n;
 	freehdr.nvalid = be32_to_cpu(ltp->bestcount);
 
-	xfs_dir3_free_hdr_to_disk(fbp->b_addr, &freehdr);
-	xfs_dir2_free_log_bests(tp, fbp, 0, freehdr.nvalid - 1);
-	xfs_dir2_free_log_header(tp, fbp);
+	dp->d_ops->free_hdr_to_disk(fbp->b_addr, &freehdr);
+	xfs_dir2_free_log_bests(tp, dp, fbp, 0, freehdr.nvalid - 1);
+	xfs_dir2_free_log_header(tp, dp, fbp);
 
 	/*
 	 * Converting the leaf to a leafnode is just a matter of changing the
@@ -409,8 +363,8 @@
 		leaf->hdr.info.magic = cpu_to_be16(XFS_DIR3_LEAFN_MAGIC);
 	lbp->b_ops = &xfs_dir3_leafn_buf_ops;
 	xfs_trans_buf_set_type(tp, lbp, XFS_BLFT_DIR_LEAFN_BUF);
-	xfs_dir3_leaf_log_header(tp, lbp);
-	xfs_dir3_leaf_check(mp, lbp);
+	xfs_dir3_leaf_log_header(tp, dp, lbp);
+	xfs_dir3_leaf_check(dp, lbp);
 	return 0;
 }
 
@@ -443,8 +397,8 @@
 	mp = dp->i_mount;
 	tp = args->trans;
 	leaf = bp->b_addr;
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 
 	/*
 	 * Quick check just to make sure we are not going to index
@@ -460,7 +414,7 @@
 	 * a compact.
 	 */
 
-	if (leafhdr.count == xfs_dir3_max_leaf_ents(mp, leaf)) {
+	if (leafhdr.count == dp->d_ops->leaf_max_ents(mp)) {
 		if (!leafhdr.stale)
 			return XFS_ERROR(ENOSPC);
 		compact = leafhdr.stale > 1;
@@ -498,30 +452,30 @@
 	lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp,
 				args->blkno, args->index));
 
-	xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
-	xfs_dir3_leaf_log_header(tp, bp);
-	xfs_dir3_leaf_log_ents(tp, bp, lfloglow, lfloghigh);
-	xfs_dir3_leaf_check(mp, bp);
+	dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+	xfs_dir3_leaf_log_header(tp, dp, bp);
+	xfs_dir3_leaf_log_ents(tp, dp, bp, lfloglow, lfloghigh);
+	xfs_dir3_leaf_check(dp, bp);
 	return 0;
 }
 
 #ifdef DEBUG
 static void
 xfs_dir2_free_hdr_check(
-	struct xfs_mount *mp,
+	struct xfs_inode *dp,
 	struct xfs_buf	*bp,
 	xfs_dir2_db_t	db)
 {
 	struct xfs_dir3_icfree_hdr hdr;
 
-	xfs_dir3_free_hdr_from_disk(&hdr, bp->b_addr);
+	dp->d_ops->free_hdr_from_disk(&hdr, bp->b_addr);
 
-	ASSERT((hdr.firstdb % xfs_dir3_free_max_bests(mp)) == 0);
+	ASSERT((hdr.firstdb % dp->d_ops->free_max_bests(dp->i_mount)) == 0);
 	ASSERT(hdr.firstdb <= db);
 	ASSERT(db < hdr.firstdb + hdr.nvalid);
 }
 #else
-#define xfs_dir2_free_hdr_check(mp, dp, db)
+#define xfs_dir2_free_hdr_check(dp, bp, db)
 #endif	/* DEBUG */
 
 /*
@@ -530,6 +484,7 @@
  */
 xfs_dahash_t					/* hash value */
 xfs_dir2_leafn_lasthash(
+	struct xfs_inode *dp,
 	struct xfs_buf	*bp,			/* leaf buffer */
 	int		*count)			/* count of entries in leaf */
 {
@@ -537,7 +492,7 @@
 	struct xfs_dir2_leaf_entry *ents;
 	struct xfs_dir3_icleaf_hdr leafhdr;
 
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 
 	ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
 	       leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
@@ -547,7 +502,7 @@
 	if (!leafhdr.count)
 		return 0;
 
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 	return be32_to_cpu(ents[leafhdr.count - 1].hashval);
 }
 
@@ -584,10 +539,10 @@
 	tp = args->trans;
 	mp = dp->i_mount;
 	leaf = bp->b_addr;
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 
-	xfs_dir3_leaf_check(mp, bp);
+	xfs_dir3_leaf_check(dp, bp);
 	ASSERT(leafhdr.count > 0);
 
 	/*
@@ -605,7 +560,7 @@
 		ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
 		       free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
 	}
-	length = xfs_dir3_data_entsize(mp, args->namelen);
+	length = dp->d_ops->data_entsize(args->namelen);
 	/*
 	 * Loop over leaf entries with the right hash value.
 	 */
@@ -637,7 +592,7 @@
 			 * Convert the data block to the free block
 			 * holding its freespace information.
 			 */
-			newfdb = xfs_dir2_db_to_fdb(mp, newdb);
+			newfdb = dp->d_ops->db_to_fdb(mp, newdb);
 			/*
 			 * If it's not the one we have in hand, read it in.
 			 */
@@ -655,16 +610,16 @@
 					return error;
 				free = curbp->b_addr;
 
-				xfs_dir2_free_hdr_check(mp, curbp, curdb);
+				xfs_dir2_free_hdr_check(dp, curbp, curdb);
 			}
 			/*
 			 * Get the index for our entry.
 			 */
-			fi = xfs_dir2_db_to_fdindex(mp, curdb);
+			fi = dp->d_ops->db_to_fdindex(mp, curdb);
 			/*
 			 * If it has room, return it.
 			 */
-			bests = xfs_dir3_free_bests_p(mp, free);
+			bests = dp->d_ops->free_bests_p(free);
 			if (unlikely(bests[fi] == cpu_to_be16(NULLDATAOFF))) {
 				XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
 							XFS_ERRLEVEL_LOW, mp);
@@ -734,10 +689,10 @@
 	tp = args->trans;
 	mp = dp->i_mount;
 	leaf = bp->b_addr;
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 
-	xfs_dir3_leaf_check(mp, bp);
+	xfs_dir3_leaf_check(dp, bp);
 	ASSERT(leafhdr.count > 0);
 
 	/*
@@ -816,7 +771,7 @@
 				xfs_trans_brelse(tp, state->extrablk.bp);
 			args->cmpresult = cmp;
 			args->inumber = be64_to_cpu(dep->inumber);
-			args->filetype = xfs_dir3_dirent_get_ftype(mp, dep);
+			args->filetype = dp->d_ops->data_get_ftype(dep);
 			*indexp = index;
 			state->extravalid = 1;
 			state->extrablk.bp = curbp;
@@ -907,7 +862,7 @@
 	if (start_d < dhdr->count) {
 		memmove(&dents[start_d + count], &dents[start_d],
 			(dhdr->count - start_d) * sizeof(xfs_dir2_leaf_entry_t));
-		xfs_dir3_leaf_log_ents(tp, bp_d, start_d + count,
+		xfs_dir3_leaf_log_ents(tp, args->dp, bp_d, start_d + count,
 				       count + dhdr->count - 1);
 	}
 	/*
@@ -929,7 +884,8 @@
 	 */
 	memcpy(&dents[start_d], &sents[start_s],
 		count * sizeof(xfs_dir2_leaf_entry_t));
-	xfs_dir3_leaf_log_ents(tp, bp_d, start_d, start_d + count - 1);
+	xfs_dir3_leaf_log_ents(tp, args->dp, bp_d,
+			       start_d, start_d + count - 1);
 
 	/*
 	 * If there are source entries after the ones we copied,
@@ -938,7 +894,8 @@
 	if (start_s + count < shdr->count) {
 		memmove(&sents[start_s], &sents[start_s + count],
 			count * sizeof(xfs_dir2_leaf_entry_t));
-		xfs_dir3_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1);
+		xfs_dir3_leaf_log_ents(tp, args->dp, bp_s,
+				       start_s, start_s + count - 1);
 	}
 
 	/*
@@ -956,6 +913,7 @@
  */
 int						/* sort order */
 xfs_dir2_leafn_order(
+	struct xfs_inode	*dp,
 	struct xfs_buf		*leaf1_bp,		/* leaf1 buffer */
 	struct xfs_buf		*leaf2_bp)		/* leaf2 buffer */
 {
@@ -966,10 +924,10 @@
 	struct xfs_dir3_icleaf_hdr hdr1;
 	struct xfs_dir3_icleaf_hdr hdr2;
 
-	xfs_dir3_leaf_hdr_from_disk(&hdr1, leaf1);
-	xfs_dir3_leaf_hdr_from_disk(&hdr2, leaf2);
-	ents1 = xfs_dir3_leaf_ents_p(leaf1);
-	ents2 = xfs_dir3_leaf_ents_p(leaf2);
+	dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
+	dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf2);
+	ents1 = dp->d_ops->leaf_ents_p(leaf1);
+	ents2 = dp->d_ops->leaf_ents_p(leaf2);
 
 	if (hdr1.count > 0 && hdr2.count > 0 &&
 	    (be32_to_cpu(ents2[0].hashval) < be32_to_cpu(ents1[0].hashval) ||
@@ -1007,12 +965,13 @@
 	struct xfs_dir2_leaf_entry *ents2;
 	struct xfs_dir3_icleaf_hdr hdr1;
 	struct xfs_dir3_icleaf_hdr hdr2;
+	struct xfs_inode	*dp = state->args->dp;
 
 	args = state->args;
 	/*
 	 * If the block order is wrong, swap the arguments.
 	 */
-	if ((swap = xfs_dir2_leafn_order(blk1->bp, blk2->bp))) {
+	if ((swap = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp))) {
 		xfs_da_state_blk_t	*tmp;	/* temp for block swap */
 
 		tmp = blk1;
@@ -1021,10 +980,10 @@
 	}
 	leaf1 = blk1->bp->b_addr;
 	leaf2 = blk2->bp->b_addr;
-	xfs_dir3_leaf_hdr_from_disk(&hdr1, leaf1);
-	xfs_dir3_leaf_hdr_from_disk(&hdr2, leaf2);
-	ents1 = xfs_dir3_leaf_ents_p(leaf1);
-	ents2 = xfs_dir3_leaf_ents_p(leaf2);
+	dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
+	dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf2);
+	ents1 = dp->d_ops->leaf_ents_p(leaf1);
+	ents2 = dp->d_ops->leaf_ents_p(leaf2);
 
 	oldsum = hdr1.count + hdr2.count;
 #if defined(DEBUG) || defined(XFS_WARN)
@@ -1070,13 +1029,13 @@
 	ASSERT(hdr1.stale + hdr2.stale == oldstale);
 
 	/* log the changes made when moving the entries */
-	xfs_dir3_leaf_hdr_to_disk(leaf1, &hdr1);
-	xfs_dir3_leaf_hdr_to_disk(leaf2, &hdr2);
-	xfs_dir3_leaf_log_header(args->trans, blk1->bp);
-	xfs_dir3_leaf_log_header(args->trans, blk2->bp);
+	dp->d_ops->leaf_hdr_to_disk(leaf1, &hdr1);
+	dp->d_ops->leaf_hdr_to_disk(leaf2, &hdr2);
+	xfs_dir3_leaf_log_header(args->trans, dp, blk1->bp);
+	xfs_dir3_leaf_log_header(args->trans, dp, blk2->bp);
 
-	xfs_dir3_leaf_check(args->dp->i_mount, blk1->bp);
-	xfs_dir3_leaf_check(args->dp->i_mount, blk2->bp);
+	xfs_dir3_leaf_check(dp, blk1->bp);
+	xfs_dir3_leaf_check(dp, blk2->bp);
 
 	/*
 	 * Mark whether we're inserting into the old or new leaf.
@@ -1097,11 +1056,11 @@
 	 * Finally sanity check just to make sure we are not returning a
 	 * negative index
 	 */
-	if(blk2->index < 0) {
+	if (blk2->index < 0) {
 		state->inleaf = 1;
 		blk2->index = 0;
-		xfs_alert(args->dp->i_mount,
-	"%s: picked the wrong leaf? reverting original leaf: blk1->index %d\n",
+		xfs_alert(dp->i_mount,
+	"%s: picked the wrong leaf? reverting original leaf: blk1->index %d",
 			__func__, blk1->index);
 	}
 }
@@ -1120,17 +1079,17 @@
 	int			logfree = 0;
 	__be16			*bests;
 	struct xfs_dir3_icfree_hdr freehdr;
+	struct xfs_inode	*dp = args->dp;
 
-	xfs_dir3_free_hdr_from_disk(&freehdr, free);
-
-	bests = xfs_dir3_free_bests_p(tp->t_mountp, free);
+	dp->d_ops->free_hdr_from_disk(&freehdr, free);
+	bests = dp->d_ops->free_bests_p(free);
 	if (hdr) {
 		/*
 		 * Data block is not empty, just set the free entry to the new
 		 * value.
 		 */
 		bests[findex] = cpu_to_be16(longest);
-		xfs_dir2_free_log_bests(tp, fbp, findex, findex);
+		xfs_dir2_free_log_bests(tp, dp, fbp, findex, findex);
 		return 0;
 	}
 
@@ -1157,8 +1116,8 @@
 		logfree = 1;
 	}
 
-	xfs_dir3_free_hdr_to_disk(free, &freehdr);
-	xfs_dir2_free_log_header(tp, fbp);
+	dp->d_ops->free_hdr_to_disk(free, &freehdr);
+	xfs_dir2_free_log_header(tp, dp, fbp);
 
 	/*
 	 * If there are no useful entries left in the block, get rid of the
@@ -1182,7 +1141,7 @@
 
 	/* Log the free entry that changed, unless we got rid of it.  */
 	if (logfree)
-		xfs_dir2_free_log_bests(tp, fbp, findex, findex);
+		xfs_dir2_free_log_bests(tp, dp, fbp, findex, findex);
 	return 0;
 }
 
@@ -1222,8 +1181,8 @@
 	tp = args->trans;
 	mp = dp->i_mount;
 	leaf = bp->b_addr;
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-	ents = xfs_dir3_leaf_ents_p(leaf);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
 
 	/*
 	 * Point to the entry we're removing.
@@ -1243,11 +1202,11 @@
 	 * Log the leaf block changes.
 	 */
 	leafhdr.stale++;
-	xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
-	xfs_dir3_leaf_log_header(tp, bp);
+	dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+	xfs_dir3_leaf_log_header(tp, dp, bp);
 
 	lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
-	xfs_dir3_leaf_log_ents(tp, bp, index, index);
+	xfs_dir3_leaf_log_ents(tp, dp, bp, index, index);
 
 	/*
 	 * Make the data entry free.  Keep track of the longest freespace
@@ -1256,19 +1215,19 @@
 	dbp = dblk->bp;
 	hdr = dbp->b_addr;
 	dep = (xfs_dir2_data_entry_t *)((char *)hdr + off);
-	bf = xfs_dir3_data_bestfree_p(hdr);
+	bf = dp->d_ops->data_bestfree_p(hdr);
 	longest = be16_to_cpu(bf[0].length);
 	needlog = needscan = 0;
-	xfs_dir2_data_make_free(tp, dbp, off,
-		xfs_dir3_data_entsize(mp, dep->namelen), &needlog, &needscan);
+	xfs_dir2_data_make_free(tp, dp, dbp, off,
+		dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
 	/*
 	 * Rescan the data block freespaces for bestfree.
 	 * Log the data block header if needed.
 	 */
 	if (needscan)
-		xfs_dir2_data_freescan(mp, hdr, &needlog);
+		xfs_dir2_data_freescan(dp, hdr, &needlog);
 	if (needlog)
-		xfs_dir2_data_log_header(tp, dbp);
+		xfs_dir2_data_log_header(tp, dp, dbp);
 	xfs_dir3_data_check(dp, dbp);
 	/*
 	 * If the longest data block freespace changes, need to update
@@ -1285,7 +1244,7 @@
 		 * Convert the data block number to a free block,
 		 * read in the free block.
 		 */
-		fdb = xfs_dir2_db_to_fdb(mp, db);
+		fdb = dp->d_ops->db_to_fdb(mp, db);
 		error = xfs_dir2_free_read(tp, dp, xfs_dir2_db_to_da(mp, fdb),
 					   &fbp);
 		if (error)
@@ -1294,22 +1253,22 @@
 #ifdef DEBUG
 	{
 		struct xfs_dir3_icfree_hdr freehdr;
-		xfs_dir3_free_hdr_from_disk(&freehdr, free);
-		ASSERT(freehdr.firstdb == xfs_dir3_free_max_bests(mp) *
+		dp->d_ops->free_hdr_from_disk(&freehdr, free);
+		ASSERT(freehdr.firstdb == dp->d_ops->free_max_bests(mp) *
 					  (fdb - XFS_DIR2_FREE_FIRSTDB(mp)));
 	}
 #endif
 		/*
 		 * Calculate which entry we need to fix.
 		 */
-		findex = xfs_dir2_db_to_fdindex(mp, db);
+		findex = dp->d_ops->db_to_fdindex(mp, db);
 		longest = be16_to_cpu(bf[0].length);
 		/*
 		 * If the data block is now empty we can get rid of it
 		 * (usually).
 		 */
 		if (longest == mp->m_dirblksize -
-			       xfs_dir3_data_entry_offset(hdr)) {
+			       dp->d_ops->data_entry_offset) {
 			/*
 			 * Try to punch out the data block.
 			 */
@@ -1336,12 +1295,12 @@
 			return error;
 	}
 
-	xfs_dir3_leaf_check(mp, bp);
+	xfs_dir3_leaf_check(dp, bp);
 	/*
 	 * Return indication of whether this leaf block is empty enough
 	 * to justify trying to join it with a neighbor.
 	 */
-	*rval = (xfs_dir3_leaf_hdr_size(leaf) +
+	*rval = (dp->d_ops->leaf_hdr_size +
 		 (uint)sizeof(ents[0]) * (leafhdr.count - leafhdr.stale)) <
 		mp->m_dir_magicpct;
 	return 0;
@@ -1360,13 +1319,14 @@
 	xfs_dablk_t		blkno;		/* new leaf block number */
 	int			error;		/* error return value */
 	xfs_mount_t		*mp;		/* filesystem mount point */
+	struct xfs_inode	*dp;
 
 	/*
 	 * Allocate space for a new leaf node.
 	 */
 	args = state->args;
-	mp = args->dp->i_mount;
-	ASSERT(args != NULL);
+	dp = args->dp;
+	mp = dp->i_mount;
 	ASSERT(oldblk->magic == XFS_DIR2_LEAFN_MAGIC);
 	error = xfs_da_grow_inode(args, &blkno);
 	if (error) {
@@ -1401,10 +1361,10 @@
 	/*
 	 * Update last hashval in each block since we added the name.
 	 */
-	oldblk->hashval = xfs_dir2_leafn_lasthash(oldblk->bp, NULL);
-	newblk->hashval = xfs_dir2_leafn_lasthash(newblk->bp, NULL);
-	xfs_dir3_leaf_check(mp, oldblk->bp);
-	xfs_dir3_leaf_check(mp, newblk->bp);
+	oldblk->hashval = xfs_dir2_leafn_lasthash(dp, oldblk->bp, NULL);
+	newblk->hashval = xfs_dir2_leafn_lasthash(dp, newblk->bp, NULL);
+	xfs_dir3_leaf_check(dp, oldblk->bp);
+	xfs_dir3_leaf_check(dp, newblk->bp);
 	return error;
 }
 
@@ -1434,6 +1394,7 @@
 	int			rval;		/* result from path_shift */
 	struct xfs_dir3_icleaf_hdr leafhdr;
 	struct xfs_dir2_leaf_entry *ents;
+	struct xfs_inode	*dp = state->args->dp;
 
 	/*
 	 * Check for the degenerate case of the block being over 50% full.
@@ -1442,12 +1403,12 @@
 	 */
 	blk = &state->path.blk[state->path.active - 1];
 	leaf = blk->bp->b_addr;
-	xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
-	ents = xfs_dir3_leaf_ents_p(leaf);
-	xfs_dir3_leaf_check(state->args->dp->i_mount, blk->bp);
+	dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+	ents = dp->d_ops->leaf_ents_p(leaf);
+	xfs_dir3_leaf_check(dp, blk->bp);
 
 	count = leafhdr.count - leafhdr.stale;
-	bytes = xfs_dir3_leaf_hdr_size(leaf) + count * sizeof(ents[0]);
+	bytes = dp->d_ops->leaf_hdr_size + count * sizeof(ents[0]);
 	if (bytes > (state->blocksize >> 1)) {
 		/*
 		 * Blk over 50%, don't try to join.
@@ -1492,7 +1453,7 @@
 		/*
 		 * Read the sibling leaf block.
 		 */
-		error = xfs_dir3_leafn_read(state->args->trans, state->args->dp,
+		error = xfs_dir3_leafn_read(state->args->trans, dp,
 					    blkno, -1, &bp);
 		if (error)
 			return error;
@@ -1504,8 +1465,8 @@
 		bytes = state->blocksize - (state->blocksize >> 2);
 
 		leaf = bp->b_addr;
-		xfs_dir3_leaf_hdr_from_disk(&hdr2, leaf);
-		ents = xfs_dir3_leaf_ents_p(leaf);
+		dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf);
+		ents = dp->d_ops->leaf_ents_p(leaf);
 		count += hdr2.count - hdr2.stale;
 		bytes -= count * sizeof(ents[0]);
 
@@ -1559,6 +1520,7 @@
 	struct xfs_dir3_icleaf_hdr drophdr;
 	struct xfs_dir2_leaf_entry *sents;
 	struct xfs_dir2_leaf_entry *dents;
+	struct xfs_inode	*dp = state->args->dp;
 
 	args = state->args;
 	ASSERT(drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
@@ -1566,10 +1528,10 @@
 	drop_leaf = drop_blk->bp->b_addr;
 	save_leaf = save_blk->bp->b_addr;
 
-	xfs_dir3_leaf_hdr_from_disk(&savehdr, save_leaf);
-	xfs_dir3_leaf_hdr_from_disk(&drophdr, drop_leaf);
-	sents = xfs_dir3_leaf_ents_p(save_leaf);
-	dents = xfs_dir3_leaf_ents_p(drop_leaf);
+	dp->d_ops->leaf_hdr_from_disk(&savehdr, save_leaf);
+	dp->d_ops->leaf_hdr_from_disk(&drophdr, drop_leaf);
+	sents = dp->d_ops->leaf_ents_p(save_leaf);
+	dents = dp->d_ops->leaf_ents_p(drop_leaf);
 
 	/*
 	 * If there are any stale leaf entries, take this opportunity
@@ -1584,7 +1546,7 @@
 	 * Move the entries from drop to the appropriate end of save.
 	 */
 	drop_blk->hashval = be32_to_cpu(dents[drophdr.count - 1].hashval);
-	if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp))
+	if (xfs_dir2_leafn_order(dp, save_blk->bp, drop_blk->bp))
 		xfs_dir3_leafn_moveents(args, drop_blk->bp, &drophdr, dents, 0,
 					save_blk->bp, &savehdr, sents, 0,
 					drophdr.count);
@@ -1595,13 +1557,13 @@
 	save_blk->hashval = be32_to_cpu(sents[savehdr.count - 1].hashval);
 
 	/* log the changes made when moving the entries */
-	xfs_dir3_leaf_hdr_to_disk(save_leaf, &savehdr);
-	xfs_dir3_leaf_hdr_to_disk(drop_leaf, &drophdr);
-	xfs_dir3_leaf_log_header(args->trans, save_blk->bp);
-	xfs_dir3_leaf_log_header(args->trans, drop_blk->bp);
+	dp->d_ops->leaf_hdr_to_disk(save_leaf, &savehdr);
+	dp->d_ops->leaf_hdr_to_disk(drop_leaf, &drophdr);
+	xfs_dir3_leaf_log_header(args->trans, dp, save_blk->bp);
+	xfs_dir3_leaf_log_header(args->trans, dp, drop_blk->bp);
 
-	xfs_dir3_leaf_check(args->dp->i_mount, save_blk->bp);
-	xfs_dir3_leaf_check(args->dp->i_mount, drop_blk->bp);
+	xfs_dir3_leaf_check(dp, save_blk->bp);
+	xfs_dir3_leaf_check(dp, drop_blk->bp);
 }
 
 /*
@@ -1712,7 +1674,7 @@
 	dp = args->dp;
 	mp = dp->i_mount;
 	tp = args->trans;
-	length = xfs_dir3_data_entsize(mp, args->namelen);
+	length = dp->d_ops->data_entsize(args->namelen);
 	/*
 	 * If we came in with a freespace block that means that lookup
 	 * found an entry with our hash value.  This is the freespace
@@ -1726,8 +1688,8 @@
 		ifbno = fblk->blkno;
 		free = fbp->b_addr;
 		findex = fblk->index;
-		bests = xfs_dir3_free_bests_p(mp, free);
-		xfs_dir3_free_hdr_from_disk(&freehdr, free);
+		bests = dp->d_ops->free_bests_p(free);
+		dp->d_ops->free_hdr_from_disk(&freehdr, free);
 
 		/*
 		 * This means the free entry showed that the data block had
@@ -1819,8 +1781,8 @@
 		 * and the freehdr are actually initialised if they are placed
 		 * there, so we have to do it here to avoid warnings. Blech.
 		 */
-		bests = xfs_dir3_free_bests_p(mp, free);
-		xfs_dir3_free_hdr_from_disk(&freehdr, free);
+		bests = dp->d_ops->free_bests_p(free);
+		dp->d_ops->free_hdr_from_disk(&freehdr, free);
 		if (be16_to_cpu(bests[findex]) != NULLDATAOFF &&
 		    be16_to_cpu(bests[findex]) >= length)
 			dbno = freehdr.firstdb + findex;
@@ -1871,7 +1833,7 @@
 		 * Get the freespace block corresponding to the data block
 		 * that was just allocated.
 		 */
-		fbno = xfs_dir2_db_to_fdb(mp, dbno);
+		fbno = dp->d_ops->db_to_fdb(mp, dbno);
 		error = xfs_dir2_free_try_read(tp, dp,
 					       xfs_dir2_db_to_da(mp, fbno),
 					       &fbp);
@@ -1888,12 +1850,12 @@
 			if (error)
 				return error;
 
-			if (unlikely(xfs_dir2_db_to_fdb(mp, dbno) != fbno)) {
+			if (unlikely(dp->d_ops->db_to_fdb(mp, dbno) != fbno)) {
 				xfs_alert(mp,
 			"%s: dir ino %llu needed freesp block %lld for\n"
 			"  data block %lld, got %lld ifbno %llu lastfbno %d",
 					__func__, (unsigned long long)dp->i_ino,
-					(long long)xfs_dir2_db_to_fdb(mp, dbno),
+					(long long)dp->d_ops->db_to_fdb(mp, dbno),
 					(long long)dbno, (long long)fbno,
 					(unsigned long long)ifbno, lastfbno);
 				if (fblk) {
@@ -1918,30 +1880,30 @@
 			if (error)
 				return error;
 			free = fbp->b_addr;
-			bests = xfs_dir3_free_bests_p(mp, free);
-			xfs_dir3_free_hdr_from_disk(&freehdr, free);
+			bests = dp->d_ops->free_bests_p(free);
+			dp->d_ops->free_hdr_from_disk(&freehdr, free);
 
 			/*
 			 * Remember the first slot as our empty slot.
 			 */
 			freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
-					xfs_dir3_free_max_bests(mp);
+					dp->d_ops->free_max_bests(mp);
 		} else {
 			free = fbp->b_addr;
-			bests = xfs_dir3_free_bests_p(mp, free);
-			xfs_dir3_free_hdr_from_disk(&freehdr, free);
+			bests = dp->d_ops->free_bests_p(free);
+			dp->d_ops->free_hdr_from_disk(&freehdr, free);
 		}
 
 		/*
 		 * Set the freespace block index from the data block number.
 		 */
-		findex = xfs_dir2_db_to_fdindex(mp, dbno);
+		findex = dp->d_ops->db_to_fdindex(mp, dbno);
 		/*
 		 * If it's after the end of the current entries in the
 		 * freespace block, extend that table.
 		 */
 		if (findex >= freehdr.nvalid) {
-			ASSERT(findex < xfs_dir3_free_max_bests(mp));
+			ASSERT(findex < dp->d_ops->free_max_bests(mp));
 			freehdr.nvalid = findex + 1;
 			/*
 			 * Tag new entry so nused will go up.
@@ -1954,8 +1916,8 @@
 		 */
 		if (bests[findex] == cpu_to_be16(NULLDATAOFF)) {
 			freehdr.nused++;
-			xfs_dir3_free_hdr_to_disk(fbp->b_addr, &freehdr);
-			xfs_dir2_free_log_header(tp, fbp);
+			dp->d_ops->free_hdr_to_disk(fbp->b_addr, &freehdr);
+			xfs_dir2_free_log_header(tp, dp, fbp);
 		}
 		/*
 		 * Update the real value in the table.
@@ -1963,7 +1925,7 @@
 		 * change again.
 		 */
 		hdr = dbp->b_addr;
-		bf = xfs_dir3_data_bestfree_p(hdr);
+		bf = dp->d_ops->data_bestfree_p(hdr);
 		bests[findex] = bf[0].length;
 		logfree = 1;
 	}
@@ -1985,7 +1947,7 @@
 		if (error)
 			return error;
 		hdr = dbp->b_addr;
-		bf = xfs_dir3_data_bestfree_p(hdr);
+		bf = dp->d_ops->data_bestfree_p(hdr);
 		logfree = 0;
 	}
 	ASSERT(be16_to_cpu(bf[0].length) >= length);
@@ -1998,7 +1960,7 @@
 	/*
 	 * Mark the first part of the unused space, inuse for us.
 	 */
-	xfs_dir2_data_use_free(tp, dbp, dup,
+	xfs_dir2_data_use_free(tp, dp, dbp, dup,
 		(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length,
 		&needlog, &needscan);
 	/*
@@ -2008,24 +1970,24 @@
 	dep->inumber = cpu_to_be64(args->inumber);
 	dep->namelen = args->namelen;
 	memcpy(dep->name, args->name, dep->namelen);
-	xfs_dir3_dirent_put_ftype(mp, dep, args->filetype);
-	tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+	dp->d_ops->data_put_ftype(dep, args->filetype);
+	tagp = dp->d_ops->data_entry_tag_p(dep);
 	*tagp = cpu_to_be16((char *)dep - (char *)hdr);
-	xfs_dir2_data_log_entry(tp, dbp, dep);
+	xfs_dir2_data_log_entry(tp, dp, dbp, dep);
 	/*
 	 * Rescan the block for bestfree if needed.
 	 */
 	if (needscan)
-		xfs_dir2_data_freescan(mp, hdr, &needlog);
+		xfs_dir2_data_freescan(dp, hdr, &needlog);
 	/*
 	 * Log the data block header if needed.
 	 */
 	if (needlog)
-		xfs_dir2_data_log_header(tp, dbp);
+		xfs_dir2_data_log_header(tp, dp, dbp);
 	/*
 	 * If the freespace entry is now wrong, update it.
 	 */
-	bests = xfs_dir3_free_bests_p(mp, free); /* gcc is so stupid */
+	bests = dp->d_ops->free_bests_p(free); /* gcc is so stupid */
 	if (be16_to_cpu(bests[findex]) != be16_to_cpu(bf[0].length)) {
 		bests[findex] = bf[0].length;
 		logfree = 1;
@@ -2034,7 +1996,7 @@
 	 * Log the freespace entry if needed.
 	 */
 	if (logfree)
-		xfs_dir2_free_log_bests(tp, fbp, findex, findex);
+		xfs_dir2_free_log_bests(tp, dp, fbp, findex, findex);
 	/*
 	 * Return the data block and offset in args, then drop the data block.
 	 */
@@ -2212,7 +2174,7 @@
 		blk = &state->path.blk[state->path.active - 1];
 		ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
 		leaf = blk->bp->b_addr;
-		ents = xfs_dir3_leaf_ents_p(leaf);
+		ents = args->dp->d_ops->leaf_ents_p(leaf);
 		lep = &ents[blk->index];
 		ASSERT(state->extravalid);
 		/*
@@ -2229,8 +2191,9 @@
 		 * Fill in the new inode number and log the entry.
 		 */
 		dep->inumber = cpu_to_be64(inum);
-		xfs_dir3_dirent_put_ftype(state->mp, dep, args->filetype);
-		xfs_dir2_data_log_entry(args->trans, state->extrablk.bp, dep);
+		args->dp->d_ops->data_put_ftype(dep, args->filetype);
+		xfs_dir2_data_log_entry(args->trans, args->dp,
+					state->extrablk.bp, dep);
 		rval = 0;
 	}
 	/*
@@ -2285,7 +2248,7 @@
 	if (!bp)
 		return 0;
 	free = bp->b_addr;
-	xfs_dir3_free_hdr_from_disk(&freehdr, free);
+	dp->d_ops->free_hdr_from_disk(&freehdr, free);
 
 	/*
 	 * If there are used entries, there's nothing to do.
diff --git a/fs/xfs/xfs_dir2_priv.h b/fs/xfs/xfs_dir2_priv.h
index 1bad84c..8b9d228 100644
--- a/fs/xfs/xfs_dir2_priv.h
+++ b/fs/xfs/xfs_dir2_priv.h
@@ -59,7 +59,8 @@
 
 extern struct xfs_dir2_data_free *
 xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
-		struct xfs_dir2_data_unused *dup, int *loghead);
+		struct xfs_dir2_data_free *bf, struct xfs_dir2_data_unused *dup,
+		int *loghead);
 extern int xfs_dir3_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
 		struct xfs_buf **bpp);
 
@@ -76,9 +77,9 @@
 		int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
 extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno,
 		struct xfs_buf **bpp, __uint16_t magic);
-extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp,
-		int first, int last);
-extern void xfs_dir3_leaf_log_header(struct xfs_trans *tp,
+extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_inode *dp,
+		struct xfs_buf *bp, int first, int last);
+extern void xfs_dir3_leaf_log_header(struct xfs_trans *tp, struct xfs_inode *dp,
 		struct xfs_buf *bp);
 extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
@@ -93,21 +94,18 @@
 		int lowstale, int highstale, int *lfloglow, int *lfloghigh);
 extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
 
-extern void xfs_dir3_leaf_hdr_from_disk(struct xfs_dir3_icleaf_hdr *to,
-		struct xfs_dir2_leaf *from);
-extern void xfs_dir3_leaf_hdr_to_disk(struct xfs_dir2_leaf *to,
-		struct xfs_dir3_icleaf_hdr *from);
-extern bool xfs_dir3_leaf_check_int(struct xfs_mount *mp,
+extern bool xfs_dir3_leaf_check_int(struct xfs_mount *mp, struct xfs_inode *dp,
 		struct xfs_dir3_icleaf_hdr *hdr, struct xfs_dir2_leaf *leaf);
 
 /* xfs_dir2_node.c */
 extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
 		struct xfs_buf *lbp);
-extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_buf *bp, int *count);
+extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_inode *dp,
+		struct xfs_buf *bp, int *count);
 extern int xfs_dir2_leafn_lookup_int(struct xfs_buf *bp,
 		struct xfs_da_args *args, int *indexp,
 		struct xfs_da_state *state);
-extern int xfs_dir2_leafn_order(struct xfs_buf *leaf1_bp,
+extern int xfs_dir2_leafn_order(struct xfs_inode *dp, struct xfs_buf *leaf1_bp,
 		struct xfs_buf *leaf2_bp);
 extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
 	struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 8f84153..c4e50c6 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -18,23 +18,23 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_bmap.h"
+#include "xfs_trans.h"
+#include "xfs_dinode.h"
 
 /*
  * Directory file type support functions
@@ -119,9 +119,9 @@
 	 * mp->m_dirdatablk.
 	 */
 	dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
-					     xfs_dir3_data_dot_offset(mp));
+						dp->d_ops->data_dot_offset);
 	dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
-						xfs_dir3_data_dotdot_offset(mp));
+						dp->d_ops->data_dotdot_offset);
 
 	/*
 	 * Put . entry unless we're starting past it.
@@ -136,7 +136,7 @@
 	 * Put .. entry unless we're starting past it.
 	 */
 	if (ctx->pos <= dotdot_offset) {
-		ino = xfs_dir2_sf_get_parent_ino(sfp);
+		ino = dp->d_ops->sf_get_parent_ino(sfp);
 		ctx->pos = dotdot_offset & 0x7fffffff;
 		if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
 			return 0;
@@ -153,17 +153,17 @@
 				xfs_dir2_sf_get_offset(sfep));
 
 		if (ctx->pos > off) {
-			sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+			sfep = dp->d_ops->sf_nextentry(sfp, sfep);
 			continue;
 		}
 
-		ino = xfs_dir3_sfe_get_ino(mp, sfp, sfep);
-		filetype = xfs_dir3_sfe_get_ftype(mp, sfp, sfep);
+		ino = dp->d_ops->sf_get_ino(sfp, sfep);
+		filetype = dp->d_ops->sf_get_ftype(sfep);
 		ctx->pos = off & 0x7fffffff;
 		if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
 			    xfs_dir3_get_dtype(mp, filetype)))
 			return 0;
-		sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+		sfep = dp->d_ops->sf_nextentry(sfp, sfep);
 	}
 
 	ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
@@ -213,7 +213,7 @@
 	 * Set up values for the loop.
 	 */
 	btp = xfs_dir2_block_tail_p(mp, hdr);
-	ptr = (char *)xfs_dir3_data_entry_p(hdr);
+	ptr = (char *)dp->d_ops->data_entry_p(hdr);
 	endptr = (char *)xfs_dir2_block_leaf_p(btp);
 
 	/*
@@ -237,7 +237,7 @@
 		/*
 		 * Bump pointer for the next iteration.
 		 */
-		ptr += xfs_dir3_data_entsize(mp, dep->namelen);
+		ptr += dp->d_ops->data_entsize(dep->namelen);
 		/*
 		 * The entry is before the desired starting point, skip it.
 		 */
@@ -248,7 +248,7 @@
 					    (char *)dep - (char *)hdr);
 
 		ctx->pos = cook & 0x7fffffff;
-		filetype = xfs_dir3_dirent_get_ftype(mp, dep);
+		filetype = dp->d_ops->data_get_ftype(dep);
 		/*
 		 * If it didn't fit, set the final offset to here & return.
 		 */
@@ -578,13 +578,13 @@
 			/*
 			 * Find our position in the block.
 			 */
-			ptr = (char *)xfs_dir3_data_entry_p(hdr);
+			ptr = (char *)dp->d_ops->data_entry_p(hdr);
 			byteoff = xfs_dir2_byte_to_off(mp, curoff);
 			/*
 			 * Skip past the header.
 			 */
 			if (byteoff == 0)
-				curoff += xfs_dir3_data_entry_offset(hdr);
+				curoff += dp->d_ops->data_entry_offset;
 			/*
 			 * Skip past entries until we reach our offset.
 			 */
@@ -601,7 +601,7 @@
 					}
 					dep = (xfs_dir2_data_entry_t *)ptr;
 					length =
-					   xfs_dir3_data_entsize(mp, dep->namelen);
+					   dp->d_ops->data_entsize(dep->namelen);
 					ptr += length;
 				}
 				/*
@@ -632,8 +632,8 @@
 		}
 
 		dep = (xfs_dir2_data_entry_t *)ptr;
-		length = xfs_dir3_data_entsize(mp, dep->namelen);
-		filetype = xfs_dir3_dirent_get_ftype(mp, dep);
+		length = dp->d_ops->data_entsize(dep->namelen);
+		filetype = dp->d_ops->data_get_ftype(dep);
 
 		ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
 		if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 3ef6d40..aafc6e4 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -17,22 +17,22 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_error.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 /*
  * Prototypes for internal functions.
@@ -57,89 +57,6 @@
 #endif /* XFS_BIG_INUMS */
 
 /*
- * Inode numbers in short-form directories can come in two versions,
- * either 4 bytes or 8 bytes wide.  These helpers deal with the
- * two forms transparently by looking at the headers i8count field.
- *
- * For 64-bit inode number the most significant byte must be zero.
- */
-static xfs_ino_t
-xfs_dir2_sf_get_ino(
-	struct xfs_dir2_sf_hdr	*hdr,
-	xfs_dir2_inou_t		*from)
-{
-	if (hdr->i8count)
-		return get_unaligned_be64(&from->i8.i) & 0x00ffffffffffffffULL;
-	else
-		return get_unaligned_be32(&from->i4.i);
-}
-
-static void
-xfs_dir2_sf_put_ino(
-	struct xfs_dir2_sf_hdr	*hdr,
-	xfs_dir2_inou_t		*to,
-	xfs_ino_t		ino)
-{
-	ASSERT((ino & 0xff00000000000000ULL) == 0);
-
-	if (hdr->i8count)
-		put_unaligned_be64(ino, &to->i8.i);
-	else
-		put_unaligned_be32(ino, &to->i4.i);
-}
-
-xfs_ino_t
-xfs_dir2_sf_get_parent_ino(
-	struct xfs_dir2_sf_hdr	*hdr)
-{
-	return xfs_dir2_sf_get_ino(hdr, &hdr->parent);
-}
-
-void
-xfs_dir2_sf_put_parent_ino(
-	struct xfs_dir2_sf_hdr	*hdr,
-	xfs_ino_t		ino)
-{
-	xfs_dir2_sf_put_ino(hdr, &hdr->parent, ino);
-}
-
-/*
- * In short-form directory entries the inode numbers are stored at variable
- * offset behind the entry name. If the entry stores a filetype value, then it
- * sits between the name and the inode number. Hence the inode numbers may only
- * be accessed through the helpers below.
- */
-static xfs_dir2_inou_t *
-xfs_dir3_sfe_inop(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_sf_entry *sfep)
-{
-	__uint8_t	*ptr = &sfep->name[sfep->namelen];
-	if (xfs_sb_version_hasftype(&mp->m_sb))
-		ptr++;
-	return (xfs_dir2_inou_t *)ptr;
-}
-
-xfs_ino_t
-xfs_dir3_sfe_get_ino(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_sf_hdr	*hdr,
-	struct xfs_dir2_sf_entry *sfep)
-{
-	return xfs_dir2_sf_get_ino(hdr, xfs_dir3_sfe_inop(mp, sfep));
-}
-
-void
-xfs_dir3_sfe_put_ino(
-	struct xfs_mount	*mp,
-	struct xfs_dir2_sf_hdr	*hdr,
-	struct xfs_dir2_sf_entry *sfep,
-	xfs_ino_t		ino)
-{
-	xfs_dir2_sf_put_ino(hdr, xfs_dir3_sfe_inop(mp, sfep), ino);
-}
-
-/*
  * Given a block directory (dp/block), calculate its size as a shortform (sf)
  * directory and a header for the sf directory, if it will fit it the
  * space currently present in the inode.  If it won't fit, the output
@@ -226,7 +143,7 @@
 	 */
 	sfhp->count = count;
 	sfhp->i8count = i8count;
-	xfs_dir2_sf_put_parent_ino(sfhp, parent);
+	dp->d_ops->sf_put_parent_ino(sfhp, parent);
 	return size;
 }
 
@@ -293,7 +210,7 @@
 	 * Set up to loop over the block's entries.
 	 */
 	btp = xfs_dir2_block_tail_p(mp, hdr);
-	ptr = (char *)xfs_dir3_data_entry_p(hdr);
+	ptr = (char *)dp->d_ops->data_entry_p(hdr);
 	endptr = (char *)xfs_dir2_block_leaf_p(btp);
 	sfep = xfs_dir2_sf_firstentry(sfp);
 	/*
@@ -321,7 +238,7 @@
 		else if (dep->namelen == 2 &&
 			 dep->name[0] == '.' && dep->name[1] == '.')
 			ASSERT(be64_to_cpu(dep->inumber) ==
-			       xfs_dir2_sf_get_parent_ino(sfp));
+			       dp->d_ops->sf_get_parent_ino(sfp));
 		/*
 		 * Normal entry, copy it into shortform.
 		 */
@@ -331,14 +248,14 @@
 				(xfs_dir2_data_aoff_t)
 				((char *)dep - (char *)hdr));
 			memcpy(sfep->name, dep->name, dep->namelen);
-			xfs_dir3_sfe_put_ino(mp, sfp, sfep,
-					     be64_to_cpu(dep->inumber));
-			xfs_dir3_sfe_put_ftype(mp, sfp, sfep,
-					xfs_dir3_dirent_get_ftype(mp, dep));
+			dp->d_ops->sf_put_ino(sfp, sfep,
+					      be64_to_cpu(dep->inumber));
+			dp->d_ops->sf_put_ftype(sfep,
+					dp->d_ops->data_get_ftype(dep));
 
-			sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+			sfep = dp->d_ops->sf_nextentry(sfp, sfep);
 		}
-		ptr += xfs_dir3_data_entsize(mp, dep->namelen);
+		ptr += dp->d_ops->data_entsize(dep->namelen);
 	}
 	ASSERT((char *)sfep - (char *)sfp == size);
 	xfs_dir2_sf_check(args);
@@ -389,7 +306,7 @@
 	/*
 	 * Compute entry (and change in) size.
 	 */
-	add_entsize = xfs_dir3_sf_entsize(dp->i_mount, sfp, args->namelen);
+	add_entsize = dp->d_ops->sf_entsize(sfp, args->namelen);
 	incr_isize = add_entsize;
 	objchange = 0;
 #if XFS_BIG_INUMS
@@ -483,8 +400,7 @@
 	/*
 	 * Grow the in-inode space.
 	 */
-	xfs_idata_realloc(dp,
-			  xfs_dir3_sf_entsize(dp->i_mount, sfp, args->namelen),
+	xfs_idata_realloc(dp, dp->d_ops->sf_entsize(sfp, args->namelen),
 			  XFS_DATA_FORK);
 	/*
 	 * Need to set up again due to realloc of the inode data.
@@ -497,8 +413,8 @@
 	sfep->namelen = args->namelen;
 	xfs_dir2_sf_put_offset(sfep, offset);
 	memcpy(sfep->name, args->name, sfep->namelen);
-	xfs_dir3_sfe_put_ino(dp->i_mount, sfp, sfep, args->inumber);
-	xfs_dir3_sfe_put_ftype(dp->i_mount, sfp, sfep, args->filetype);
+	dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
+	dp->d_ops->sf_put_ftype(sfep, args->filetype);
 
 	/*
 	 * Update the header and inode.
@@ -557,13 +473,13 @@
 	 * to insert the new entry.
 	 * If it's going to end up at the end then oldsfep will point there.
 	 */
-	for (offset = xfs_dir3_data_first_offset(mp),
+	for (offset = dp->d_ops->data_first_offset,
 	      oldsfep = xfs_dir2_sf_firstentry(oldsfp),
-	      add_datasize = xfs_dir3_data_entsize(mp, args->namelen),
+	      add_datasize = dp->d_ops->data_entsize(args->namelen),
 	      eof = (char *)oldsfep == &buf[old_isize];
 	     !eof;
-	     offset = new_offset + xfs_dir3_data_entsize(mp, oldsfep->namelen),
-	      oldsfep = xfs_dir3_sf_nextentry(mp, oldsfp, oldsfep),
+	     offset = new_offset + dp->d_ops->data_entsize(oldsfep->namelen),
+	      oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep),
 	      eof = (char *)oldsfep == &buf[old_isize]) {
 		new_offset = xfs_dir2_sf_get_offset(oldsfep);
 		if (offset + add_datasize <= new_offset)
@@ -592,8 +508,8 @@
 	sfep->namelen = args->namelen;
 	xfs_dir2_sf_put_offset(sfep, offset);
 	memcpy(sfep->name, args->name, sfep->namelen);
-	xfs_dir3_sfe_put_ino(mp, sfp, sfep, args->inumber);
-	xfs_dir3_sfe_put_ftype(mp, sfp, sfep, args->filetype);
+	dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
+	dp->d_ops->sf_put_ftype(sfep, args->filetype);
 	sfp->count++;
 #if XFS_BIG_INUMS
 	if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
@@ -603,7 +519,7 @@
 	 * If there's more left to copy, do that.
 	 */
 	if (!eof) {
-		sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+		sfep = dp->d_ops->sf_nextentry(sfp, sfep);
 		memcpy(sfep, oldsfep, old_isize - nbytes);
 	}
 	kmem_free(buf);
@@ -639,8 +555,8 @@
 	mp = dp->i_mount;
 
 	sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
-	size = xfs_dir3_data_entsize(mp, args->namelen);
-	offset = xfs_dir3_data_first_offset(mp);
+	size = dp->d_ops->data_entsize(args->namelen);
+	offset = dp->d_ops->data_first_offset;
 	sfep = xfs_dir2_sf_firstentry(sfp);
 	holefit = 0;
 	/*
@@ -652,8 +568,8 @@
 		if (!holefit)
 			holefit = offset + size <= xfs_dir2_sf_get_offset(sfep);
 		offset = xfs_dir2_sf_get_offset(sfep) +
-			 xfs_dir3_data_entsize(mp, sfep->namelen);
-		sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+			 dp->d_ops->data_entsize(sfep->namelen);
+		sfep = dp->d_ops->sf_nextentry(sfp, sfep);
 	}
 	/*
 	 * Calculate data bytes used excluding the new entry, if this
@@ -713,21 +629,20 @@
 	mp = dp->i_mount;
 
 	sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
-	offset = xfs_dir3_data_first_offset(mp);
-	ino = xfs_dir2_sf_get_parent_ino(sfp);
+	offset = dp->d_ops->data_first_offset;
+	ino = dp->d_ops->sf_get_parent_ino(sfp);
 	i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
 
 	for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
 	     i < sfp->count;
-	     i++, sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep)) {
+	     i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
 		ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset);
-		ino = xfs_dir3_sfe_get_ino(mp, sfp, sfep);
+		ino = dp->d_ops->sf_get_ino(sfp, sfep);
 		i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
 		offset =
 			xfs_dir2_sf_get_offset(sfep) +
-			xfs_dir3_data_entsize(mp, sfep->namelen);
-		ASSERT(xfs_dir3_sfe_get_ftype(mp, sfp, sfep) <
-							XFS_DIR3_FT_MAX);
+			dp->d_ops->data_entsize(sfep->namelen);
+		ASSERT(dp->d_ops->sf_get_ftype(sfep) < XFS_DIR3_FT_MAX);
 	}
 	ASSERT(i8count == sfp->i8count);
 	ASSERT(XFS_BIG_INUMS || i8count == 0);
@@ -783,7 +698,7 @@
 	/*
 	 * Now can put in the inode number, since i8count is set.
 	 */
-	xfs_dir2_sf_put_parent_ino(sfp, pino);
+	dp->d_ops->sf_put_parent_ino(sfp, pino);
 	sfp->count = 0;
 	dp->i_d.di_size = size;
 	xfs_dir2_sf_check(args);
@@ -838,7 +753,7 @@
 	 */
 	if (args->namelen == 2 &&
 	    args->name[0] == '.' && args->name[1] == '.') {
-		args->inumber = xfs_dir2_sf_get_parent_ino(sfp);
+		args->inumber = dp->d_ops->sf_get_parent_ino(sfp);
 		args->cmpresult = XFS_CMP_EXACT;
 		args->filetype = XFS_DIR3_FT_DIR;
 		return XFS_ERROR(EEXIST);
@@ -848,7 +763,7 @@
 	 */
 	ci_sfep = NULL;
 	for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
-	     i++, sfep = xfs_dir3_sf_nextentry(dp->i_mount, sfp, sfep)) {
+	     i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
 		/*
 		 * Compare name and if it's an exact match, return the inode
 		 * number. If it's the first case-insensitive match, store the
@@ -858,10 +773,8 @@
 								sfep->namelen);
 		if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
 			args->cmpresult = cmp;
-			args->inumber = xfs_dir3_sfe_get_ino(dp->i_mount,
-							     sfp, sfep);
-			args->filetype = xfs_dir3_sfe_get_ftype(dp->i_mount,
-								sfp, sfep);
+			args->inumber = dp->d_ops->sf_get_ino(sfp, sfep);
+			args->filetype = dp->d_ops->sf_get_ftype(sfep);
 			if (cmp == XFS_CMP_EXACT)
 				return XFS_ERROR(EEXIST);
 			ci_sfep = sfep;
@@ -917,10 +830,10 @@
 	 * Find the one we're deleting.
 	 */
 	for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
-	     i++, sfep = xfs_dir3_sf_nextentry(dp->i_mount, sfp, sfep)) {
+	     i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
 		if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
 								XFS_CMP_EXACT) {
-			ASSERT(xfs_dir3_sfe_get_ino(dp->i_mount, sfp, sfep) ==
+			ASSERT(dp->d_ops->sf_get_ino(sfp, sfep) ==
 			       args->inumber);
 			break;
 		}
@@ -934,7 +847,7 @@
 	 * Calculate sizes.
 	 */
 	byteoff = (int)((char *)sfep - (char *)sfp);
-	entsize = xfs_dir3_sf_entsize(dp->i_mount, sfp, args->namelen);
+	entsize = dp->d_ops->sf_entsize(sfp, args->namelen);
 	newsize = oldsize - entsize;
 	/*
 	 * Copy the part if any after the removed entry, sliding it down.
@@ -1041,28 +954,25 @@
 	if (args->namelen == 2 &&
 	    args->name[0] == '.' && args->name[1] == '.') {
 #if XFS_BIG_INUMS || defined(DEBUG)
-		ino = xfs_dir2_sf_get_parent_ino(sfp);
+		ino = dp->d_ops->sf_get_parent_ino(sfp);
 		ASSERT(args->inumber != ino);
 #endif
-		xfs_dir2_sf_put_parent_ino(sfp, args->inumber);
+		dp->d_ops->sf_put_parent_ino(sfp, args->inumber);
 	}
 	/*
 	 * Normal entry, look for the name.
 	 */
 	else {
 		for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
-		     i++, sfep = xfs_dir3_sf_nextentry(dp->i_mount, sfp, sfep)) {
+		     i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
 			if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
 								XFS_CMP_EXACT) {
 #if XFS_BIG_INUMS || defined(DEBUG)
-				ino = xfs_dir3_sfe_get_ino(dp->i_mount,
-							   sfp, sfep);
+				ino = dp->d_ops->sf_get_ino(sfp, sfep);
 				ASSERT(args->inumber != ino);
 #endif
-				xfs_dir3_sfe_put_ino(dp->i_mount, sfp, sfep,
-						     args->inumber);
-				xfs_dir3_sfe_put_ftype(dp->i_mount, sfp, sfep,
-						       args->filetype);
+				dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
+				dp->d_ops->sf_put_ftype(sfep, args->filetype);
 				break;
 			}
 		}
@@ -1165,22 +1075,21 @@
 	 */
 	sfp->count = oldsfp->count;
 	sfp->i8count = 0;
-	xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
+	dp->d_ops->sf_put_parent_ino(sfp, dp->d_ops->sf_get_parent_ino(oldsfp));
 	/*
 	 * Copy the entries field by field.
 	 */
 	for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
 		    oldsfep = xfs_dir2_sf_firstentry(oldsfp);
 	     i < sfp->count;
-	     i++, sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep),
-		  oldsfep = xfs_dir3_sf_nextentry(mp, oldsfp, oldsfep)) {
+	     i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
+		  oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
 		sfep->namelen = oldsfep->namelen;
 		sfep->offset = oldsfep->offset;
 		memcpy(sfep->name, oldsfep->name, sfep->namelen);
-		xfs_dir3_sfe_put_ino(mp, sfp, sfep,
-			xfs_dir3_sfe_get_ino(mp, oldsfp, oldsfep));
-		xfs_dir3_sfe_put_ftype(mp, sfp, sfep,
-			xfs_dir3_sfe_get_ftype(mp, oldsfp, oldsfep));
+		dp->d_ops->sf_put_ino(sfp, sfep,
+				      dp->d_ops->sf_get_ino(oldsfp, oldsfep));
+		dp->d_ops->sf_put_ftype(sfep, dp->d_ops->sf_get_ftype(oldsfep));
 	}
 	/*
 	 * Clean up the inode.
@@ -1244,22 +1153,21 @@
 	 */
 	sfp->count = oldsfp->count;
 	sfp->i8count = 1;
-	xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
+	dp->d_ops->sf_put_parent_ino(sfp, dp->d_ops->sf_get_parent_ino(oldsfp));
 	/*
 	 * Copy the entries field by field.
 	 */
 	for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
 		    oldsfep = xfs_dir2_sf_firstentry(oldsfp);
 	     i < sfp->count;
-	     i++, sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep),
-		  oldsfep = xfs_dir3_sf_nextentry(mp, oldsfp, oldsfep)) {
+	     i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
+		  oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
 		sfep->namelen = oldsfep->namelen;
 		sfep->offset = oldsfep->offset;
 		memcpy(sfep->name, oldsfep->name, sfep->namelen);
-		xfs_dir3_sfe_put_ino(mp, sfp, sfep,
-			xfs_dir3_sfe_get_ino(mp, oldsfp, oldsfep));
-		xfs_dir3_sfe_put_ftype(mp, sfp, sfep,
-			xfs_dir3_sfe_get_ftype(mp, oldsfp, oldsfep));
+		dp->d_ops->sf_put_ino(sfp, sfep,
+				      dp->d_ops->sf_get_ino(oldsfp, oldsfep));
+		dp->d_ops->sf_put_ftype(sfep, dp->d_ops->sf_get_ftype(oldsfep));
 	}
 	/*
 	 * Clean up the inode.
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 45560ee..8367d6d 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -17,22 +17,21 @@
  */
 #include "xfs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_quota.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
 #include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_extent_busy.h"
 #include "xfs_discard.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 STATIC int
 xfs_trim_extents(
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 1ee776d..6b1e695 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -18,28 +18,28 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
+#include "xfs_alloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
 #include "xfs_trans_priv.h"
 #include "xfs_qm.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * Lock order:
@@ -292,118 +292,6 @@
 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 }
 
-STATIC bool
-xfs_dquot_buf_verify_crc(
-	struct xfs_mount	*mp,
-	struct xfs_buf		*bp)
-{
-	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
-	int			ndquots;
-	int			i;
-
-	if (!xfs_sb_version_hascrc(&mp->m_sb))
-		return true;
-
-	/*
-	 * if we are in log recovery, the quota subsystem has not been
-	 * initialised so we have no quotainfo structure. In that case, we need
-	 * to manually calculate the number of dquots in the buffer.
-	 */
-	if (mp->m_quotainfo)
-		ndquots = mp->m_quotainfo->qi_dqperchunk;
-	else
-		ndquots = xfs_qm_calc_dquots_per_chunk(mp,
-					XFS_BB_TO_FSB(mp, bp->b_length));
-
-	for (i = 0; i < ndquots; i++, d++) {
-		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
-				 XFS_DQUOT_CRC_OFF))
-			return false;
-		if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
-			return false;
-	}
-	return true;
-}
-
-STATIC bool
-xfs_dquot_buf_verify(
-	struct xfs_mount	*mp,
-	struct xfs_buf		*bp)
-{
-	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
-	xfs_dqid_t		id = 0;
-	int			ndquots;
-	int			i;
-
-	/*
-	 * if we are in log recovery, the quota subsystem has not been
-	 * initialised so we have no quotainfo structure. In that case, we need
-	 * to manually calculate the number of dquots in the buffer.
-	 */
-	if (mp->m_quotainfo)
-		ndquots = mp->m_quotainfo->qi_dqperchunk;
-	else
-		ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
-
-	/*
-	 * On the first read of the buffer, verify that each dquot is valid.
-	 * We don't know what the id of the dquot is supposed to be, just that
-	 * they should be increasing monotonically within the buffer. If the
-	 * first id is corrupt, then it will fail on the second dquot in the
-	 * buffer so corruptions could point to the wrong dquot in this case.
-	 */
-	for (i = 0; i < ndquots; i++) {
-		struct xfs_disk_dquot	*ddq;
-		int			error;
-
-		ddq = &d[i].dd_diskdq;
-
-		if (i == 0)
-			id = be32_to_cpu(ddq->d_id);
-
-		error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
-				       "xfs_dquot_buf_verify");
-		if (error)
-			return false;
-	}
-	return true;
-}
-
-static void
-xfs_dquot_buf_read_verify(
-	struct xfs_buf	*bp)
-{
-	struct xfs_mount	*mp = bp->b_target->bt_mount;
-
-	if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
-		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-		xfs_buf_ioerror(bp, EFSCORRUPTED);
-	}
-}
-
-/*
- * we don't calculate the CRC here as that is done when the dquot is flushed to
- * the buffer after the update is done. This ensures that the dquot in the
- * buffer always has an up-to-date CRC value.
- */
-void
-xfs_dquot_buf_write_verify(
-	struct xfs_buf	*bp)
-{
-	struct xfs_mount	*mp = bp->b_target->bt_mount;
-
-	if (!xfs_dquot_buf_verify(mp, bp)) {
-		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-		xfs_buf_ioerror(bp, EFSCORRUPTED);
-		return;
-	}
-}
-
-const struct xfs_buf_ops xfs_dquot_buf_ops = {
-	.verify_read = xfs_dquot_buf_read_verify,
-	.verify_write = xfs_dquot_buf_write_verify,
-};
-
 /*
  * Allocate a block and fill it with dquots.
  * This is called when the bmapi finds a hole.
@@ -514,6 +402,7 @@
 
 	return (error);
 }
+
 STATIC int
 xfs_qm_dqrepair(
 	struct xfs_mount	*mp,
@@ -547,7 +436,7 @@
 	/* Do the actual repair of dquots in this buffer */
 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
 		ddq = &d[i].dd_diskdq;
-		error = xfs_qm_dqcheck(mp, ddq, firstid + i,
+		error = xfs_dqcheck(mp, ddq, firstid + i,
 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
 		if (error) {
@@ -1133,7 +1022,7 @@
 	/*
 	 * A simple sanity check in case we got a corrupted dquot..
 	 */
-	error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
+	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
 	if (error) {
 		xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 55abbca..d22ed00 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -172,6 +172,4 @@
 	return dqp;
 }
 
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-
 #endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/xfs_dquot_buf.c b/fs/xfs/xfs_dquot_buf.c
new file mode 100644
index 0000000..d401457
--- /dev/null
+++ b/fs/xfs/xfs_dquot_buf.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_qm.h"
+#include "xfs_error.h"
+#include "xfs_cksum.h"
+#include "xfs_trace.h"
+
+int
+xfs_calc_dquots_per_chunk(
+	struct xfs_mount	*mp,
+	unsigned int		nbblks)	/* basic block units */
+{
+	unsigned int	ndquots;
+
+	ASSERT(nbblks > 0);
+	ndquots = BBTOB(nbblks);
+	do_div(ndquots, sizeof(xfs_dqblk_t));
+
+	return ndquots;
+}
+
+/*
+ * Do some primitive error checking on ondisk dquot data structures.
+ */
+int
+xfs_dqcheck(
+	struct xfs_mount *mp,
+	xfs_disk_dquot_t *ddq,
+	xfs_dqid_t	 id,
+	uint		 type,	  /* used only when IO_dorepair is true */
+	uint		 flags,
+	char		 *str)
+{
+	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
+	int		errs = 0;
+
+	/*
+	 * We can encounter an uninitialized dquot buffer for 2 reasons:
+	 * 1. If we crash while deleting the quotainode(s), and those blks got
+	 *    used for user data. This is because we take the path of regular
+	 *    file deletion; however, the size field of quotainodes is never
+	 *    updated, so all the tricks that we play in itruncate_finish
+	 *    don't quite matter.
+	 *
+	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
+	 *    But the allocation will be replayed so we'll end up with an
+	 *    uninitialized quota block.
+	 *
+	 * This is all fine; things are still consistent, and we haven't lost
+	 * any quota information. Just don't complain about bad dquot blks.
+	 */
+	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
+		if (flags & XFS_QMOPT_DOWARN)
+			xfs_alert(mp,
+			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
+			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
+		errs++;
+	}
+	if (ddq->d_version != XFS_DQUOT_VERSION) {
+		if (flags & XFS_QMOPT_DOWARN)
+			xfs_alert(mp,
+			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
+			str, id, ddq->d_version, XFS_DQUOT_VERSION);
+		errs++;
+	}
+
+	if (ddq->d_flags != XFS_DQ_USER &&
+	    ddq->d_flags != XFS_DQ_PROJ &&
+	    ddq->d_flags != XFS_DQ_GROUP) {
+		if (flags & XFS_QMOPT_DOWARN)
+			xfs_alert(mp,
+			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
+			str, id, ddq->d_flags);
+		errs++;
+	}
+
+	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
+		if (flags & XFS_QMOPT_DOWARN)
+			xfs_alert(mp,
+			"%s : ondisk-dquot 0x%p, ID mismatch: "
+			"0x%x expected, found id 0x%x",
+			str, ddq, id, be32_to_cpu(ddq->d_id));
+		errs++;
+	}
+
+	if (!errs && ddq->d_id) {
+		if (ddq->d_blk_softlimit &&
+		    be64_to_cpu(ddq->d_bcount) >
+				be64_to_cpu(ddq->d_blk_softlimit)) {
+			if (!ddq->d_btimer) {
+				if (flags & XFS_QMOPT_DOWARN)
+					xfs_alert(mp,
+			"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
+					str, (int)be32_to_cpu(ddq->d_id), ddq);
+				errs++;
+			}
+		}
+		if (ddq->d_ino_softlimit &&
+		    be64_to_cpu(ddq->d_icount) >
+				be64_to_cpu(ddq->d_ino_softlimit)) {
+			if (!ddq->d_itimer) {
+				if (flags & XFS_QMOPT_DOWARN)
+					xfs_alert(mp,
+			"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
+					str, (int)be32_to_cpu(ddq->d_id), ddq);
+				errs++;
+			}
+		}
+		if (ddq->d_rtb_softlimit &&
+		    be64_to_cpu(ddq->d_rtbcount) >
+				be64_to_cpu(ddq->d_rtb_softlimit)) {
+			if (!ddq->d_rtbtimer) {
+				if (flags & XFS_QMOPT_DOWARN)
+					xfs_alert(mp,
+			"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
+					str, (int)be32_to_cpu(ddq->d_id), ddq);
+				errs++;
+			}
+		}
+	}
+
+	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
+		return errs;
+
+	if (flags & XFS_QMOPT_DOWARN)
+		xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
+
+	/*
+	 * Typically, a repair is only requested by quotacheck.
+	 */
+	ASSERT(id != -1);
+	ASSERT(flags & XFS_QMOPT_DQREPAIR);
+	memset(d, 0, sizeof(xfs_dqblk_t));
+
+	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
+	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
+	d->dd_diskdq.d_flags = type;
+	d->dd_diskdq.d_id = cpu_to_be32(id);
+
+	if (xfs_sb_version_hascrc(&mp->m_sb)) {
+		uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+		xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+				 XFS_DQUOT_CRC_OFF);
+	}
+
+	return errs;
+}
+
+STATIC bool
+xfs_dquot_buf_verify_crc(
+	struct xfs_mount	*mp,
+	struct xfs_buf		*bp)
+{
+	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
+	int			ndquots;
+	int			i;
+
+	if (!xfs_sb_version_hascrc(&mp->m_sb))
+		return true;
+
+	/*
+	 * if we are in log recovery, the quota subsystem has not been
+	 * initialised so we have no quotainfo structure. In that case, we need
+	 * to manually calculate the number of dquots in the buffer.
+	 */
+	if (mp->m_quotainfo)
+		ndquots = mp->m_quotainfo->qi_dqperchunk;
+	else
+		ndquots = xfs_calc_dquots_per_chunk(mp,
+					XFS_BB_TO_FSB(mp, bp->b_length));
+
+	for (i = 0; i < ndquots; i++, d++) {
+		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
+				 XFS_DQUOT_CRC_OFF))
+			return false;
+		if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
+			return false;
+	}
+	return true;
+}
+
+STATIC bool
+xfs_dquot_buf_verify(
+	struct xfs_mount	*mp,
+	struct xfs_buf		*bp)
+{
+	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
+	xfs_dqid_t		id = 0;
+	int			ndquots;
+	int			i;
+
+	/*
+	 * if we are in log recovery, the quota subsystem has not been
+	 * initialised so we have no quotainfo structure. In that case, we need
+	 * to manually calculate the number of dquots in the buffer.
+	 */
+	if (mp->m_quotainfo)
+		ndquots = mp->m_quotainfo->qi_dqperchunk;
+	else
+		ndquots = xfs_calc_dquots_per_chunk(mp, bp->b_length);
+
+	/*
+	 * On the first read of the buffer, verify that each dquot is valid.
+	 * We don't know what the id of the dquot is supposed to be, just that
+	 * they should be increasing monotonically within the buffer. If the
+	 * first id is corrupt, then it will fail on the second dquot in the
+	 * buffer so corruptions could point to the wrong dquot in this case.
+	 */
+	for (i = 0; i < ndquots; i++) {
+		struct xfs_disk_dquot	*ddq;
+		int			error;
+
+		ddq = &d[i].dd_diskdq;
+
+		if (i == 0)
+			id = be32_to_cpu(ddq->d_id);
+
+		error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
+				       "xfs_dquot_buf_verify");
+		if (error)
+			return false;
+	}
+	return true;
+}
+
+static void
+xfs_dquot_buf_read_verify(
+	struct xfs_buf	*bp)
+{
+	struct xfs_mount	*mp = bp->b_target->bt_mount;
+
+	if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
+		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+		xfs_buf_ioerror(bp, EFSCORRUPTED);
+	}
+}
+
+/*
+ * we don't calculate the CRC here as that is done when the dquot is flushed to
+ * the buffer after the update is done. This ensures that the dquot in the
+ * buffer always has an up-to-date CRC value.
+ */
+static void
+xfs_dquot_buf_write_verify(
+	struct xfs_buf	*bp)
+{
+	struct xfs_mount	*mp = bp->b_target->bt_mount;
+
+	if (!xfs_dquot_buf_verify(mp, bp)) {
+		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+		xfs_buf_ioerror(bp, EFSCORRUPTED);
+		return;
+	}
+}
+
+const struct xfs_buf_ops xfs_dquot_buf_ops = {
+	.verify_read = xfs_dquot_buf_read_verify,
+	.verify_write = xfs_dquot_buf_write_verify,
+};
+
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index e838d84..92e5f62 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -18,23 +18,19 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_qm.h"
+#include "xfs_log.h"
 
 static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
 {
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 1123d93f..9995b80 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -16,16 +16,13 @@
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
 #include "xfs_error.h"
 
 #ifdef DEBUG
@@ -159,7 +156,7 @@
 {
 	if (level <= xfs_error_level) {
 		xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT,
-		"Internal error %s at line %d of file %s.  Caller 0x%p\n",
+		"Internal error %s at line %d of file %s.  Caller 0x%p",
 			    tag, linenum, filename, ra);
 
 		xfs_stack_trace();
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 066df42..1399e18 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -16,21 +16,21 @@
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_da_format.h"
 #include "xfs_dir2.h"
 #include "xfs_export.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_log.h"
 
 /*
  * Note that we only accept fileids which are long enough rather than allow
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index e43708e..fd22f69 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -19,17 +19,18 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_alloc.h"
-#include "xfs_inode.h"
 #include "xfs_extent_busy.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 
 void
 xfs_extent_busy_insert(
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
index 985412d..bfff284 100644
--- a/fs/xfs/xfs_extent_busy.h
+++ b/fs/xfs/xfs_extent_busy.h
@@ -20,6 +20,10 @@
 #ifndef __XFS_EXTENT_BUSY_H__
 #define	__XFS_EXTENT_BUSY_H__
 
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_alloc_arg;
+
 /*
  * Busy block/extent entry.  Indexed by a rbtree in perag to mark blocks that
  * have been freed but whose transactions aren't committed to disk yet.
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index dc53e8f..3680d04 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -17,14 +17,14 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
 #include "xfs_extfree_item.h"
 
 
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 4c749ab..52c91e1 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -17,25 +17,27 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_ioctl.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 #include <linux/aio.h>
 #include <linux/dcache.h>
@@ -805,44 +807,64 @@
 
 STATIC long
 xfs_file_fallocate(
-	struct file	*file,
-	int		mode,
-	loff_t		offset,
-	loff_t		len)
+	struct file		*file,
+	int			mode,
+	loff_t			offset,
+	loff_t			len)
 {
-	struct inode	*inode = file_inode(file);
-	long		error;
-	loff_t		new_size = 0;
-	xfs_flock64_t	bf;
-	xfs_inode_t	*ip = XFS_I(inode);
-	int		cmd = XFS_IOC_RESVSP;
-	int		attr_flags = XFS_ATTR_NOLOCK;
+	struct inode		*inode = file_inode(file);
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_trans	*tp;
+	long			error;
+	loff_t			new_size = 0;
 
+	if (!S_ISREG(inode->i_mode))
+		return -EINVAL;
 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
 		return -EOPNOTSUPP;
 
-	bf.l_whence = 0;
-	bf.l_start = offset;
-	bf.l_len = len;
-
 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+	if (mode & FALLOC_FL_PUNCH_HOLE) {
+		error = xfs_free_file_space(ip, offset, len);
+		if (error)
+			goto out_unlock;
+	} else {
+		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+		    offset + len > i_size_read(inode)) {
+			new_size = offset + len;
+			error = -inode_newsize_ok(inode, new_size);
+			if (error)
+				goto out_unlock;
+		}
 
-	if (mode & FALLOC_FL_PUNCH_HOLE)
-		cmd = XFS_IOC_UNRESVSP;
-
-	/* check the new inode size is valid before allocating */
-	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	    offset + len > i_size_read(inode)) {
-		new_size = offset + len;
-		error = inode_newsize_ok(inode, new_size);
+		error = xfs_alloc_file_space(ip, offset, len,
+					     XFS_BMAPI_PREALLOC);
 		if (error)
 			goto out_unlock;
 	}
 
-	if (file->f_flags & O_DSYNC)
-		attr_flags |= XFS_ATTR_SYNC;
+	tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
+	error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
+	if (error) {
+		xfs_trans_cancel(tp, 0);
+		goto out_unlock;
+	}
 
-	error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	ip->i_d.di_mode &= ~S_ISUID;
+	if (ip->i_d.di_mode & S_IXGRP)
+		ip->i_d.di_mode &= ~S_ISGID;
+
+	if (!(mode & FALLOC_FL_PUNCH_HOLE))
+		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+
+	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+	if (file->f_flags & O_DSYNC)
+		xfs_trans_set_sync(tp);
+	error = xfs_trans_commit(tp, 0);
 	if (error)
 		goto out_unlock;
 
@@ -852,12 +874,12 @@
 
 		iattr.ia_valid = ATTR_SIZE;
 		iattr.ia_size = new_size;
-		error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
+		error = xfs_setattr_size(ip, &iattr);
 	}
 
 out_unlock:
 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-	return error;
+	return -error;
 }
 
 
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index ce78e65..12b6e77 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -16,19 +16,19 @@
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_log.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inum.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_inum.h"
+#include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_alloc.h"
 #include "xfs_mru_cache.h"
+#include "xfs_dinode.h"
 #include "xfs_filestream.h"
 #include "xfs_trace.h"
 
diff --git a/fs/xfs/xfs_format.h b/fs/xfs/xfs_format.h
index 35c08ff..b6ab5a3 100644
--- a/fs/xfs/xfs_format.h
+++ b/fs/xfs/xfs_format.h
@@ -156,14 +156,259 @@
 	((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
 			sizeof(struct xfs_dsymlink_hdr) : 0))
 
-int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
-int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
-			uint32_t size, struct xfs_buf *bp);
-bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
-			uint32_t size, struct xfs_buf *bp);
-void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
-				 struct xfs_inode *ip, struct xfs_ifork *ifp);
 
-extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+/*
+ * Allocation Btree format definitions
+ *
+ * There are two on-disk btrees, one sorted by blockno and one sorted
+ * by blockcount and blockno.  All blocks look the same to make the code
+ * simpler; if we have time later, we'll make the optimizations.
+ */
+#define	XFS_ABTB_MAGIC		0x41425442	/* 'ABTB' for bno tree */
+#define	XFS_ABTB_CRC_MAGIC	0x41423342	/* 'AB3B' */
+#define	XFS_ABTC_MAGIC		0x41425443	/* 'ABTC' for cnt tree */
+#define	XFS_ABTC_CRC_MAGIC	0x41423343	/* 'AB3C' */
+
+/*
+ * Data record/key structure
+ */
+typedef struct xfs_alloc_rec {
+	__be32		ar_startblock;	/* starting block number */
+	__be32		ar_blockcount;	/* count of free blocks */
+} xfs_alloc_rec_t, xfs_alloc_key_t;
+
+typedef struct xfs_alloc_rec_incore {
+	xfs_agblock_t	ar_startblock;	/* starting block number */
+	xfs_extlen_t	ar_blockcount;	/* count of free blocks */
+} xfs_alloc_rec_incore_t;
+
+/* btree pointer type */
+typedef __be32 xfs_alloc_ptr_t;
+
+/*
+ * Block numbers in the AG:
+ * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
+ */
+#define	XFS_BNO_BLOCK(mp)	((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
+#define	XFS_CNT_BLOCK(mp)	((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
+
+
+/*
+ * Inode Allocation Btree format definitions
+ *
+ * There is a btree for the inode map per allocation group.
+ */
+#define	XFS_IBT_MAGIC		0x49414254	/* 'IABT' */
+#define	XFS_IBT_CRC_MAGIC	0x49414233	/* 'IAB3' */
+
+typedef	__uint64_t	xfs_inofree_t;
+#define	XFS_INODES_PER_CHUNK		(NBBY * sizeof(xfs_inofree_t))
+#define	XFS_INODES_PER_CHUNK_LOG	(XFS_NBBYLOG + 3)
+#define	XFS_INOBT_ALL_FREE		((xfs_inofree_t)-1)
+#define	XFS_INOBT_MASK(i)		((xfs_inofree_t)1 << (i))
+
+static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
+{
+	return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
+}
+
+/*
+ * Data record structure
+ */
+typedef struct xfs_inobt_rec {
+	__be32		ir_startino;	/* starting inode number */
+	__be32		ir_freecount;	/* count of free inodes (set bits) */
+	__be64		ir_free;	/* free inode mask */
+} xfs_inobt_rec_t;
+
+typedef struct xfs_inobt_rec_incore {
+	xfs_agino_t	ir_startino;	/* starting inode number */
+	__int32_t	ir_freecount;	/* count of free inodes (set bits) */
+	xfs_inofree_t	ir_free;	/* free inode mask */
+} xfs_inobt_rec_incore_t;
+
+
+/*
+ * Key structure
+ */
+typedef struct xfs_inobt_key {
+	__be32		ir_startino;	/* starting inode number */
+} xfs_inobt_key_t;
+
+/* btree pointer type */
+typedef __be32 xfs_inobt_ptr_t;
+
+/*
+ * block numbers in the AG.
+ */
+#define	XFS_IBT_BLOCK(mp)		((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
+#define	XFS_PREALLOC_BLOCKS(mp)		((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
+
+
+
+/*
+ * BMAP Btree format definitions
+ *
+ * This includes both the root block definition that sits inside an inode fork
+ * and the record/pointer formats for the leaf/node in the blocks.
+ */
+#define XFS_BMAP_MAGIC		0x424d4150	/* 'BMAP' */
+#define XFS_BMAP_CRC_MAGIC	0x424d4133	/* 'BMA3' */
+
+/*
+ * Bmap root header, on-disk form only.
+ */
+typedef struct xfs_bmdr_block {
+	__be16		bb_level;	/* 0 is a leaf */
+	__be16		bb_numrecs;	/* current # of data records */
+} xfs_bmdr_block_t;
+
+/*
+ * Bmap btree record and extent descriptor.
+ *  l0:63 is an extent flag (value 1 indicates non-normal).
+ *  l0:9-62 are startoff.
+ *  l0:0-8 and l1:21-63 are startblock.
+ *  l1:0-20 are blockcount.
+ */
+#define BMBT_EXNTFLAG_BITLEN	1
+#define BMBT_STARTOFF_BITLEN	54
+#define BMBT_STARTBLOCK_BITLEN	52
+#define BMBT_BLOCKCOUNT_BITLEN	21
+
+typedef struct xfs_bmbt_rec {
+	__be64			l0, l1;
+} xfs_bmbt_rec_t;
+
+typedef __uint64_t	xfs_bmbt_rec_base_t;	/* use this for casts */
+typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
+
+typedef struct xfs_bmbt_rec_host {
+	__uint64_t		l0, l1;
+} xfs_bmbt_rec_host_t;
+
+/*
+ * Values and macros for delayed-allocation startblock fields.
+ */
+#define STARTBLOCKVALBITS	17
+#define STARTBLOCKMASKBITS	(15 + XFS_BIG_BLKNOS * 20)
+#define DSTARTBLOCKMASKBITS	(15 + 20)
+#define STARTBLOCKMASK		\
+	(((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+#define DSTARTBLOCKMASK		\
+	(((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+
+static inline int isnullstartblock(xfs_fsblock_t x)
+{
+	return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
+}
+
+static inline int isnulldstartblock(xfs_dfsbno_t x)
+{
+	return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
+}
+
+static inline xfs_fsblock_t nullstartblock(int k)
+{
+	ASSERT(k < (1 << STARTBLOCKVALBITS));
+	return STARTBLOCKMASK | (k);
+}
+
+static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
+{
+	return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
+}
+
+/*
+ * Possible extent formats.
+ */
+typedef enum {
+	XFS_EXTFMT_NOSTATE = 0,
+	XFS_EXTFMT_HASSTATE
+} xfs_exntfmt_t;
+
+/*
+ * Possible extent states.
+ */
+typedef enum {
+	XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
+	XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
+} xfs_exntst_t;
+
+/*
+ * Incore version of above.
+ */
+typedef struct xfs_bmbt_irec
+{
+	xfs_fileoff_t	br_startoff;	/* starting file offset */
+	xfs_fsblock_t	br_startblock;	/* starting block number */
+	xfs_filblks_t	br_blockcount;	/* number of blocks */
+	xfs_exntst_t	br_state;	/* extent state */
+} xfs_bmbt_irec_t;
+
+/*
+ * Key structure for non-leaf levels of the tree.
+ */
+typedef struct xfs_bmbt_key {
+	__be64		br_startoff;	/* starting file offset */
+} xfs_bmbt_key_t, xfs_bmdr_key_t;
+
+/* btree pointer type */
+typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
+
+
+/*
+ * Generic Btree block format definitions
+ *
+ * This is a combination of the actual format used on disk for short and long
+ * format btrees.  The first three fields are shared by both format, but the
+ * pointers are different and should be used with care.
+ *
+ * To get the size of the actual short or long form headers please use the size
+ * macros below.  Never use sizeof(xfs_btree_block).
+ *
+ * The blkno, crc, lsn, owner and uuid fields are only available in filesystems
+ * with the crc feature bit, and all accesses to them must be conditional on
+ * that flag.
+ */
+struct xfs_btree_block {
+	__be32		bb_magic;	/* magic number for block type */
+	__be16		bb_level;	/* 0 is a leaf */
+	__be16		bb_numrecs;	/* current # of data records */
+	union {
+		struct {
+			__be32		bb_leftsib;
+			__be32		bb_rightsib;
+
+			__be64		bb_blkno;
+			__be64		bb_lsn;
+			uuid_t		bb_uuid;
+			__be32		bb_owner;
+			__le32		bb_crc;
+		} s;			/* short form pointers */
+		struct	{
+			__be64		bb_leftsib;
+			__be64		bb_rightsib;
+
+			__be64		bb_blkno;
+			__be64		bb_lsn;
+			uuid_t		bb_uuid;
+			__be64		bb_owner;
+			__le32		bb_crc;
+			__be32		bb_pad; /* padding for alignment */
+		} l;			/* long form pointers */
+	} bb_u;				/* rest */
+};
+
+#define XFS_BTREE_SBLOCK_LEN	16	/* size of a short form block */
+#define XFS_BTREE_LBLOCK_LEN	24	/* size of a long form block */
+
+/* sizes of CRC enabled btree blocks */
+#define XFS_BTREE_SBLOCK_CRC_LEN	(XFS_BTREE_SBLOCK_LEN + 40)
+#define XFS_BTREE_LBLOCK_CRC_LEN	(XFS_BTREE_LBLOCK_LEN + 48)
+
+#define XFS_BTREE_SBLOCK_CRC_OFF \
+	offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
+#define XFS_BTREE_LBLOCK_CRC_OFF \
+	offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
 
 #endif /* __XFS_FORMAT_H__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 18272c7..c5fc116 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -233,11 +233,11 @@
 #define XFS_FSOP_GEOM_FLAGS_LOGV2	0x0100	/* log format version 2	*/
 #define XFS_FSOP_GEOM_FLAGS_SECTOR	0x0200	/* sector sizes >1BB	*/
 #define XFS_FSOP_GEOM_FLAGS_ATTR2	0x0400	/* inline attributes rework */
-#define XFS_FSOP_GEOM_FLAGS_PROJID32	0x0800  /* 32-bit project IDs	*/
+#define XFS_FSOP_GEOM_FLAGS_PROJID32	0x0800	/* 32-bit project IDs	*/
 #define XFS_FSOP_GEOM_FLAGS_DIRV2CI	0x1000	/* ASCII only CI names	*/
 #define XFS_FSOP_GEOM_FLAGS_LAZYSB	0x4000	/* lazy superblock counters */
 #define XFS_FSOP_GEOM_FLAGS_V5SB	0x8000	/* version 5 superblock */
-
+#define XFS_FSOP_GEOM_FLAGS_FTYPE	0x10000	/* inode directory types */
 
 /*
  * Minimum and maximum sizes need for growth checks.
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index e64ee52..a6e54b3 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -17,28 +17,29 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
 #include "xfs_error.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_ialloc.h"
 #include "xfs_fsops.h"
 #include "xfs_itable.h"
 #include "xfs_trans_space.h"
 #include "xfs_rtalloc.h"
-#include "xfs_filestream.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
 
 /*
  * File system operations
@@ -101,7 +102,9 @@
 			(xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
 				XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
 			(xfs_sb_version_hascrc(&mp->m_sb) ?
-				XFS_FSOP_GEOM_FLAGS_V5SB : 0);
+				XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
+			(xfs_sb_version_hasftype(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_FTYPE : 0);
 		geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
 				mp->m_sb.sb_logsectsize : BBSIZE;
 		geo->rtsectsize = mp->m_sb.sb_blocksize;
@@ -153,7 +156,7 @@
 	xfs_buf_t		*bp;
 	int			bucket;
 	int			dpct;
-	int			error;
+	int			error, saved_error = 0;
 	xfs_agnumber_t		nagcount;
 	xfs_agnumber_t		nagimax = 0;
 	xfs_rfsblock_t		nb, nb_mod;
@@ -496,29 +499,33 @@
 				error = ENOMEM;
 		}
 
+		/*
+		 * If we get an error reading or writing alternate superblocks,
+		 * continue.  xfs_repair chooses the "best" superblock based
+		 * on most matches; if we break early, we'll leave more
+		 * superblocks un-updated than updated, and xfs_repair may
+		 * pick them over the properly-updated primary.
+		 */
 		if (error) {
 			xfs_warn(mp,
 		"error %d reading secondary superblock for ag %d",
 				error, agno);
-			break;
+			saved_error = error;
+			continue;
 		}
 		xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
 
-		/*
-		 * If we get an error writing out the alternate superblocks,
-		 * just issue a warning and continue.  The real work is
-		 * already done and committed.
-		 */
 		error = xfs_bwrite(bp);
 		xfs_buf_relse(bp);
 		if (error) {
 			xfs_warn(mp,
 		"write error %d updating secondary superblock for ag %d",
 				error, agno);
-			break; /* no point in continuing */
+			saved_error = error;
+			continue;
 		}
 	}
-	return error;
+	return saved_error ? saved_error : error;
 
  error0:
 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index ccf2fb1..e87719c 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -17,29 +17,30 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_icreate_item.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
+#include "xfs_trace.h"
 
 
 /*
@@ -1627,8 +1628,9 @@
 {
 	int			error;
 
-	ASSERT(agno != NULLAGNUMBER);
+	trace_xfs_read_agi(mp, agno);
 
+	ASSERT(agno != NULLAGNUMBER);
 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
 			XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
 			XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
@@ -1651,6 +1653,8 @@
 	struct xfs_perag	*pag;	/* per allocation group data */
 	int			error;
 
+	trace_xfs_ialloc_read_agi(mp, agno);
+
 	error = xfs_read_agi(mp, tp, agno, bpp);
 	if (error)
 		return error;
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index 68c0732..a8f76a5 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -23,6 +23,7 @@
 struct xfs_imap;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_btree_cur;
 
 /*
  * Allocation parameters for inode allocation.
@@ -42,7 +43,7 @@
 static inline struct xfs_dinode *
 xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
 {
-	return (xfs_dinode_t *)
+	return (struct xfs_dinode *)
 		(xfs_buf_offset(b, o << (mp)->m_sb.sb_inodelog));
 }
 
@@ -158,6 +159,4 @@
 			  xfs_agnumber_t agno, xfs_agblock_t agbno,
 			  xfs_agblock_t length, unsigned int gen);
 
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-
 #endif	/* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 5448eb6..c8fa5bb 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -17,24 +17,23 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 
 
 STATIC int
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h
index 3ac36b76..f38b220 100644
--- a/fs/xfs/xfs_ialloc_btree.h
+++ b/fs/xfs/xfs_ialloc_btree.h
@@ -27,55 +27,6 @@
 struct xfs_mount;
 
 /*
- * There is a btree for the inode map per allocation group.
- */
-#define	XFS_IBT_MAGIC		0x49414254	/* 'IABT' */
-#define	XFS_IBT_CRC_MAGIC	0x49414233	/* 'IAB3' */
-
-typedef	__uint64_t	xfs_inofree_t;
-#define	XFS_INODES_PER_CHUNK		(NBBY * sizeof(xfs_inofree_t))
-#define	XFS_INODES_PER_CHUNK_LOG	(XFS_NBBYLOG + 3)
-#define	XFS_INOBT_ALL_FREE		((xfs_inofree_t)-1)
-#define	XFS_INOBT_MASK(i)		((xfs_inofree_t)1 << (i))
-
-static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
-{
-	return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
-}
-
-/*
- * Data record structure
- */
-typedef struct xfs_inobt_rec {
-	__be32		ir_startino;	/* starting inode number */
-	__be32		ir_freecount;	/* count of free inodes (set bits) */
-	__be64		ir_free;	/* free inode mask */
-} xfs_inobt_rec_t;
-
-typedef struct xfs_inobt_rec_incore {
-	xfs_agino_t	ir_startino;	/* starting inode number */
-	__int32_t	ir_freecount;	/* count of free inodes (set bits) */
-	xfs_inofree_t	ir_free;	/* free inode mask */
-} xfs_inobt_rec_incore_t;
-
-
-/*
- * Key structure
- */
-typedef struct xfs_inobt_key {
-	__be32		ir_startino;	/* starting inode number */
-} xfs_inobt_key_t;
-
-/* btree pointer type */
-typedef __be32 xfs_inobt_ptr_t;
-
-/*
- * block numbers in the AG.
- */
-#define	XFS_IBT_BLOCK(mp)		((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
-#define	XFS_PREALLOC_BLOCKS(mp)		((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
-
-/*
  * Btree block header size depends on a superblock flag.
  */
 #define XFS_INOBT_BLOCK_LEN(mp) \
@@ -110,6 +61,4 @@
 		struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t);
 extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
 
-extern const struct xfs_buf_ops xfs_inobt_buf_ops;
-
 #endif	/* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 474807a..98d3524 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -18,24 +18,19 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_log_priv.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_dinode.h"
 #include "xfs_error.h"
-#include "xfs_filestream.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
 #include "xfs_inode_item.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
-#include "xfs_fsops.h"
 #include "xfs_icache.h"
 #include "xfs_bmap_util.h"
 
@@ -500,11 +495,6 @@
 	if (!igrab(inode))
 		return ENOENT;
 
-	if (is_bad_inode(inode)) {
-		IRELE(ip);
-		return ENOENT;
-	}
-
 	/* inode is valid */
 	return 0;
 
@@ -918,8 +908,6 @@
 		xfs_iflock(ip);
 	}
 
-	if (is_bad_inode(VFS_I(ip)))
-		goto reclaim;
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 		xfs_iunpin_wait(ip);
 		xfs_iflush_abort(ip, false);
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index 5a5a593..d2eaccf 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -17,13 +17,14 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 #include "xfs_icreate_item.h"
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index e3d7538..001aa89 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -19,39 +19,38 @@
 
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_space.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
 #include "xfs_attr_sf.h"
 #include "xfs_attr.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_trans_space.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
 #include "xfs_ialloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
+#include "xfs_dinode.h"
 #include "xfs_filestream.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_bmap_btree.h"
 
 kmem_zone_t *xfs_inode_zone;
 
@@ -1663,134 +1662,84 @@
 }
 
 /*
- * xfs_inactive
+ * xfs_inactive_truncate
  *
- * This is called when the vnode reference count for the vnode
- * goes to zero.  If the file has been unlinked, then it must
- * now be truncated.  Also, we clear all of the read-ahead state
- * kept for the inode here since the file is now closed.
+ * Called to perform a truncate when an inode becomes unlinked.
  */
-int
-xfs_inactive(
-	xfs_inode_t	*ip)
+STATIC int
+xfs_inactive_truncate(
+	struct xfs_inode *ip)
 {
-	xfs_bmap_free_t		free_list;
-	xfs_fsblock_t		first_block;
-	int			committed;
+	struct xfs_mount	*mp = ip->i_mount;
 	struct xfs_trans	*tp;
-	struct xfs_mount	*mp;
-	struct xfs_trans_res	*resp;
 	int			error;
-	int			truncate = 0;
-
-	/*
-	 * If the inode is already free, then there can be nothing
-	 * to clean up here.
-	 */
-	if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
-		ASSERT(ip->i_df.if_real_bytes == 0);
-		ASSERT(ip->i_df.if_broot_bytes == 0);
-		return VN_INACTIVE_CACHE;
-	}
-
-	mp = ip->i_mount;
-
-	error = 0;
-
-	/* If this is a read-only mount, don't do this (would generate I/O) */
-	if (mp->m_flags & XFS_MOUNT_RDONLY)
-		goto out;
-
-	if (ip->i_d.di_nlink != 0) {
-		/*
-		 * force is true because we are evicting an inode from the
-		 * cache. Post-eof blocks must be freed, lest we end up with
-		 * broken free space accounting.
-		 */
-		if (xfs_can_free_eofblocks(ip, true)) {
-			error = xfs_free_eofblocks(mp, ip, false);
-			if (error)
-				return VN_INACTIVE_CACHE;
-		}
-		goto out;
-	}
-
-	if (S_ISREG(ip->i_d.di_mode) &&
-	    (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
-	     ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
-		truncate = 1;
-
-	error = xfs_qm_dqattach(ip, 0);
-	if (error)
-		return VN_INACTIVE_CACHE;
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-	resp = (truncate || S_ISLNK(ip->i_d.di_mode)) ?
-		&M_RES(mp)->tr_itruncate : &M_RES(mp)->tr_ifree;
-
-	error = xfs_trans_reserve(tp, resp, 0, 0);
+	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
 	if (error) {
 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
 		xfs_trans_cancel(tp, 0);
-		return VN_INACTIVE_CACHE;
+		return error;
 	}
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, 0);
 
-	if (S_ISLNK(ip->i_d.di_mode)) {
-		error = xfs_inactive_symlink(ip, &tp);
-		if (error)
-			goto out_cancel;
-	} else if (truncate) {
-		ip->i_d.di_size = 0;
-		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	/*
+	 * Log the inode size first to prevent stale data exposure in the event
+	 * of a system crash before the truncate completes. See the related
+	 * comment in xfs_setattr_size() for details.
+	 */
+	ip->i_d.di_size = 0;
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
-		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
-		if (error)
-			goto out_cancel;
+	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
+	if (error)
+		goto error_trans_cancel;
 
-		ASSERT(ip->i_d.di_nextents == 0);
+	ASSERT(ip->i_d.di_nextents == 0);
+
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+	if (error)
+		goto error_unlock;
+
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return 0;
+
+error_trans_cancel:
+	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error_unlock:
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return error;
+}
+
+/*
+ * xfs_inactive_ifree()
+ *
+ * Perform the inode free when an inode is unlinked.
+ */
+STATIC int
+xfs_inactive_ifree(
+	struct xfs_inode *ip)
+{
+	xfs_bmap_free_t		free_list;
+	xfs_fsblock_t		first_block;
+	int			committed;
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	int			error;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
+	if (error) {
+		ASSERT(XFS_FORCED_SHUTDOWN(mp));
+		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
+		return error;
 	}
 
-	/*
-	 * If there are attributes associated with the file then blow them away
-	 * now.  The code calls a routine that recursively deconstructs the
-	 * attribute fork.  We need to just commit the current transaction
-	 * because we can't use it for xfs_attr_inactive().
-	 */
-	if (ip->i_d.di_anextents > 0) {
-		ASSERT(ip->i_d.di_forkoff != 0);
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, 0);
 
-		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-		if (error)
-			goto out_unlock;
-
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-		error = xfs_attr_inactive(ip);
-		if (error)
-			goto out;
-
-		tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
-		if (error) {
-			xfs_trans_cancel(tp, 0);
-			goto out;
-		}
-
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		xfs_trans_ijoin(tp, ip, 0);
-	}
-
-	if (ip->i_afp)
-		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-
-	ASSERT(ip->i_d.di_anextents == 0);
-
-	/*
-	 * Free the inode.
-	 */
 	xfs_bmap_init(&free_list, &first_block);
 	error = xfs_ifree(tp, ip, &free_list);
 	if (error) {
@@ -1805,38 +1754,123 @@
 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
 		}
 		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-	} else {
-		/*
-		 * Credit the quota account(s). The inode is gone.
-		 */
-		xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
-
-		/*
-		 * Just ignore errors at this point.  There is nothing we can
-		 * do except to try to keep going. Make sure it's not a silent
-		 * error.
-		 */
-		error = xfs_bmap_finish(&tp,  &free_list, &committed);
-		if (error)
-			xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
-				__func__, error);
-		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-		if (error)
-			xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
-				__func__, error);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		return error;
 	}
 
 	/*
+	 * Credit the quota account(s). The inode is gone.
+	 */
+	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
+
+	/*
+	 * Just ignore errors at this point.  There is nothing we can
+	 * do except to try to keep going. Make sure it's not a silent
+	 * error.
+	 */
+	error = xfs_bmap_finish(&tp,  &free_list, &committed);
+	if (error)
+		xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
+			__func__, error);
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+	if (error)
+		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
+			__func__, error);
+
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return 0;
+}
+
+/*
+ * xfs_inactive
+ *
+ * This is called when the vnode reference count for the vnode
+ * goes to zero.  If the file has been unlinked, then it must
+ * now be truncated.  Also, we clear all of the read-ahead state
+ * kept for the inode here since the file is now closed.
+ */
+void
+xfs_inactive(
+	xfs_inode_t	*ip)
+{
+	struct xfs_mount	*mp;
+	int			error;
+	int			truncate = 0;
+
+	/*
+	 * If the inode is already free, then there can be nothing
+	 * to clean up here.
+	 */
+	if (ip->i_d.di_mode == 0) {
+		ASSERT(ip->i_df.if_real_bytes == 0);
+		ASSERT(ip->i_df.if_broot_bytes == 0);
+		return;
+	}
+
+	mp = ip->i_mount;
+
+	/* If this is a read-only mount, don't do this (would generate I/O) */
+	if (mp->m_flags & XFS_MOUNT_RDONLY)
+		return;
+
+	if (ip->i_d.di_nlink != 0) {
+		/*
+		 * force is true because we are evicting an inode from the
+		 * cache. Post-eof blocks must be freed, lest we end up with
+		 * broken free space accounting.
+		 */
+		if (xfs_can_free_eofblocks(ip, true))
+			xfs_free_eofblocks(mp, ip, false);
+
+		return;
+	}
+
+	if (S_ISREG(ip->i_d.di_mode) &&
+	    (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
+	     ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
+		truncate = 1;
+
+	error = xfs_qm_dqattach(ip, 0);
+	if (error)
+		return;
+
+	if (S_ISLNK(ip->i_d.di_mode))
+		error = xfs_inactive_symlink(ip);
+	else if (truncate)
+		error = xfs_inactive_truncate(ip);
+	if (error)
+		return;
+
+	/*
+	 * If there are attributes associated with the file then blow them away
+	 * now.  The code calls a routine that recursively deconstructs the
+	 * attribute fork.  We need to just commit the current transaction
+	 * because we can't use it for xfs_attr_inactive().
+	 */
+	if (ip->i_d.di_anextents > 0) {
+		ASSERT(ip->i_d.di_forkoff != 0);
+
+		error = xfs_attr_inactive(ip);
+		if (error)
+			return;
+	}
+
+	if (ip->i_afp)
+		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+
+	ASSERT(ip->i_d.di_anextents == 0);
+
+	/*
+	 * Free the inode.
+	 */
+	error = xfs_inactive_ifree(ip);
+	if (error)
+		return;
+
+	/*
 	 * Release the dquots held by inode, if any.
 	 */
 	xfs_qm_dqdetach(ip);
-out_unlock:
-	xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out:
-	return VN_INACTIVE_CACHE;
-out_cancel:
-	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-	goto out_unlock;
 }
 
 /*
@@ -2370,6 +2404,33 @@
 		__xfs_iunpin_wait(ip);
 }
 
+/*
+ * Removing an inode from the namespace involves removing the directory entry
+ * and dropping the link count on the inode. Removing the directory entry can
+ * result in locking an AGF (directory blocks were freed) and removing a link
+ * count can result in placing the inode on an unlinked list which results in
+ * locking an AGI.
+ *
+ * The big problem here is that we have an ordering constraint on AGF and AGI
+ * locking - inode allocation locks the AGI, then can allocate a new extent for
+ * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
+ * removes the inode from the unlinked list, requiring that we lock the AGI
+ * first, and then freeing the inode can result in an inode chunk being freed
+ * and hence freeing disk space requiring that we lock an AGF.
+ *
+ * Hence the ordering that is imposed by other parts of the code is AGI before
+ * AGF. This means we cannot remove the directory entry before we drop the inode
+ * reference count and put it on the unlinked list as this results in a lock
+ * order of AGF then AGI, and this can deadlock against inode allocation and
+ * freeing. Therefore we must drop the link counts before we remove the
+ * directory entry.
+ *
+ * This is still safe from a transactional point of view - it is not until we
+ * get to xfs_bmap_finish() that we have the possibility of multiple
+ * transactions in this operation. Hence as long as we remove the directory
+ * entry and drop the link count in the first transaction of the remove
+ * operation, there are no transactional constraints on the ordering here.
+ */
 int
 xfs_remove(
 	xfs_inode_t             *dp,
@@ -2439,6 +2500,7 @@
 	/*
 	 * If we're removing a directory perform some additional validation.
 	 */
+	cancel_flags |= XFS_TRANS_ABORT;
 	if (is_dir) {
 		ASSERT(ip->i_d.di_nlink >= 2);
 		if (ip->i_d.di_nlink != 2) {
@@ -2449,31 +2511,16 @@
 			error = XFS_ERROR(ENOTEMPTY);
 			goto out_trans_cancel;
 		}
-	}
 
-	xfs_bmap_init(&free_list, &first_block);
-	error = xfs_dir_removename(tp, dp, name, ip->i_ino,
-					&first_block, &free_list, resblks);
-	if (error) {
-		ASSERT(error != ENOENT);
-		goto out_bmap_cancel;
-	}
-	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-
-	if (is_dir) {
-		/*
-		 * Drop the link from ip's "..".
-		 */
+		/* Drop the link from ip's "..".  */
 		error = xfs_droplink(tp, dp);
 		if (error)
-			goto out_bmap_cancel;
+			goto out_trans_cancel;
 
-		/*
-		 * Drop the "." link from ip to self.
-		 */
+		/* Drop the "." link from ip to self.  */
 		error = xfs_droplink(tp, ip);
 		if (error)
-			goto out_bmap_cancel;
+			goto out_trans_cancel;
 	} else {
 		/*
 		 * When removing a non-directory we need to log the parent
@@ -2482,20 +2529,24 @@
 		 */
 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
 	}
+	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
 
-	/*
-	 * Drop the link from dp to ip.
-	 */
+	/* Drop the link from dp to ip. */
 	error = xfs_droplink(tp, ip);
 	if (error)
-		goto out_bmap_cancel;
+		goto out_trans_cancel;
 
-	/*
-	 * Determine if this is the last link while
-	 * we are in the transaction.
-	 */
+	/* Determine if this is the last link while the inode is locked */
 	link_zero = (ip->i_d.di_nlink == 0);
 
+	xfs_bmap_init(&free_list, &first_block);
+	error = xfs_dir_removename(tp, dp, name, ip->i_ino,
+					&first_block, &free_list, resblks);
+	if (error) {
+		ASSERT(error != ENOENT);
+		goto out_bmap_cancel;
+	}
+
 	/*
 	 * If this is a synchronous mount, make sure that the
 	 * remove transaction goes to disk before returning to
@@ -2525,7 +2576,6 @@
 
  out_bmap_cancel:
 	xfs_bmap_cancel(&free_list);
-	cancel_flags |= XFS_TRANS_ABORT;
  out_trans_cancel:
 	xfs_trans_cancel(tp, cancel_flags);
  std_return:
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 4a91358..9e6efccb 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -24,7 +24,6 @@
 /*
  * Kernel only inode definitions
  */
-
 struct xfs_dinode;
 struct xfs_inode;
 struct xfs_buf;
@@ -50,6 +49,9 @@
 	xfs_ifork_t		*i_afp;		/* attribute fork pointer */
 	xfs_ifork_t		i_df;		/* data fork */
 
+	/* operations vectors */
+	const struct xfs_dir_ops *d_ops;		/* directory ops vector */
+
 	/* Transaction and locking information. */
 	struct xfs_inode_log_item *i_itemp;	/* logging information */
 	mrlock_t		i_lock;		/* inode lock */
@@ -316,7 +318,7 @@
 
 
 int		xfs_release(struct xfs_inode *ip);
-int		xfs_inactive(struct xfs_inode *ip);
+void		xfs_inactive(struct xfs_inode *ip);
 int		xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
 			   struct xfs_inode **ipp, struct xfs_name *ci_name);
 int		xfs_create(struct xfs_inode *dp, struct xfs_name *name,
diff --git a/fs/xfs/xfs_inode_buf.c b/fs/xfs/xfs_inode_buf.c
index 63382d3..4fc9f39 100644
--- a/fs/xfs/xfs_inode_buf.c
+++ b/fs/xfs/xfs_inode_buf.c
@@ -17,20 +17,20 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_error.h"
 #include "xfs_cksum.h"
 #include "xfs_icache.h"
+#include "xfs_trans.h"
 #include "xfs_ialloc.h"
+#include "xfs_dinode.h"
 
 /*
  * Check that none of the inode's in the buffer have a next
diff --git a/fs/xfs/xfs_inode_buf.h b/fs/xfs/xfs_inode_buf.h
index abba0ae..9308c47 100644
--- a/fs/xfs/xfs_inode_buf.h
+++ b/fs/xfs/xfs_inode_buf.h
@@ -47,7 +47,4 @@
 #define	xfs_inobp_check(mp, bp)
 #endif /* DEBUG */
 
-extern const struct xfs_buf_ops xfs_inode_buf_ops;
-extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
-
 #endif	/* __XFS_INODE_BUF_H__ */
diff --git a/fs/xfs/xfs_inode_fork.c b/fs/xfs/xfs_inode_fork.c
index 02f1083..cfee14a 100644
--- a/fs/xfs/xfs_inode_fork.c
+++ b/fs/xfs/xfs_inode_fork.c
@@ -20,31 +20,21 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_filestream.h"
-#include "xfs_cksum.h"
 #include "xfs_trace.h"
-#include "xfs_icache.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dinode.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -1031,15 +1021,14 @@
 		 * the next index needed in the indirection array.
 		 */
 		else {
-			int	count = ext_diff;
+			uint	count = ext_diff;
 
 			while (count) {
 				erp = xfs_iext_irec_new(ifp, erp_idx);
-				erp->er_extcount = count;
-				count -= MIN(count, (int)XFS_LINEAR_EXTS);
-				if (count) {
+				erp->er_extcount = min(count, XFS_LINEAR_EXTS);
+				count -= erp->er_extcount;
+				if (count)
 					erp_idx++;
-				}
 			}
 		}
 	}
@@ -1359,7 +1348,7 @@
 void
 xfs_iext_realloc_direct(
 	xfs_ifork_t	*ifp,		/* inode fork pointer */
-	int		new_size)	/* new size of extents */
+	int		new_size)	/* new size of extents after adding */
 {
 	int		rnew_size;	/* real new size of extents */
 
@@ -1397,13 +1386,8 @@
 				rnew_size - ifp->if_real_bytes);
 		}
 	}
-	/*
-	 * Switch from the inline extent buffer to a direct
-	 * extent list. Be sure to include the inline extent
-	 * bytes in new_size.
-	 */
+	/* Switch from the inline extent buffer to a direct extent list */
 	else {
-		new_size += ifp->if_bytes;
 		if (!is_power_of_2(new_size)) {
 			rnew_size = roundup_pow_of_two(new_size);
 		}
diff --git a/fs/xfs/xfs_inode_fork.h b/fs/xfs/xfs_inode_fork.h
index 28661a0..eb329a1 100644
--- a/fs/xfs/xfs_inode_fork.h
+++ b/fs/xfs/xfs_inode_fork.h
@@ -19,6 +19,7 @@
 #define	__XFS_INODE_FORK_H__
 
 struct xfs_inode_log_item;
+struct xfs_dinode;
 
 /*
  * The following xfs_ext_irec_t struct introduces a second (top) level
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 3780811..7c0d391f 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -17,19 +17,19 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_trans_priv.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans_priv.h"
+#include "xfs_dinode.h"
 
 
 kmem_zone_t	*xfs_ili_zone;		/* inode log item zone */
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 668e8f4..4d61340 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -17,32 +17,31 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ioctl.h"
+#include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
 #include "xfs_attr.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_buf_item.h"
 #include "xfs_fsops.h"
 #include "xfs_discard.h"
 #include "xfs_quota.h"
-#include "xfs_inode_item.h"
 #include "xfs_export.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
+#include "xfs_dinode.h"
+#include "xfs_trans.h"
 
 #include <linux/capability.h>
 #include <linux/dcache.h>
@@ -641,7 +640,11 @@
 	unsigned int		cmd,
 	xfs_flock64_t		*bf)
 {
-	int			attr_flags = 0;
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	struct iattr		iattr;
+	bool			setprealloc = false;
+	bool			clrprealloc = false;
 	int			error;
 
 	/*
@@ -661,19 +664,128 @@
 	if (!S_ISREG(inode->i_mode))
 		return -XFS_ERROR(EINVAL);
 
-	if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
-		attr_flags |= XFS_ATTR_NONBLOCK;
-
-	if (filp->f_flags & O_DSYNC)
-		attr_flags |= XFS_ATTR_SYNC;
-
-	if (ioflags & IO_INVIS)
-		attr_flags |= XFS_ATTR_DMI;
-
 	error = mnt_want_write_file(filp);
 	if (error)
 		return error;
-	error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags);
+
+	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+	switch (bf->l_whence) {
+	case 0: /*SEEK_SET*/
+		break;
+	case 1: /*SEEK_CUR*/
+		bf->l_start += filp->f_pos;
+		break;
+	case 2: /*SEEK_END*/
+		bf->l_start += XFS_ISIZE(ip);
+		break;
+	default:
+		error = XFS_ERROR(EINVAL);
+		goto out_unlock;
+	}
+
+	/*
+	 * length of <= 0 for resv/unresv/zero is invalid.  length for
+	 * alloc/free is ignored completely and we have no idea what userspace
+	 * might have set it to, so set it to zero to allow range
+	 * checks to pass.
+	 */
+	switch (cmd) {
+	case XFS_IOC_ZERO_RANGE:
+	case XFS_IOC_RESVSP:
+	case XFS_IOC_RESVSP64:
+	case XFS_IOC_UNRESVSP:
+	case XFS_IOC_UNRESVSP64:
+		if (bf->l_len <= 0) {
+			error = XFS_ERROR(EINVAL);
+			goto out_unlock;
+		}
+		break;
+	default:
+		bf->l_len = 0;
+		break;
+	}
+
+	if (bf->l_start < 0 ||
+	    bf->l_start > mp->m_super->s_maxbytes ||
+	    bf->l_start + bf->l_len < 0 ||
+	    bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {
+		error = XFS_ERROR(EINVAL);
+		goto out_unlock;
+	}
+
+	switch (cmd) {
+	case XFS_IOC_ZERO_RANGE:
+		error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
+		if (!error)
+			setprealloc = true;
+		break;
+	case XFS_IOC_RESVSP:
+	case XFS_IOC_RESVSP64:
+		error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
+						XFS_BMAPI_PREALLOC);
+		if (!error)
+			setprealloc = true;
+		break;
+	case XFS_IOC_UNRESVSP:
+	case XFS_IOC_UNRESVSP64:
+		error = xfs_free_file_space(ip, bf->l_start, bf->l_len);
+		break;
+	case XFS_IOC_ALLOCSP:
+	case XFS_IOC_ALLOCSP64:
+	case XFS_IOC_FREESP:
+	case XFS_IOC_FREESP64:
+		if (bf->l_start > XFS_ISIZE(ip)) {
+			error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
+					bf->l_start - XFS_ISIZE(ip), 0);
+			if (error)
+				goto out_unlock;
+		}
+
+		iattr.ia_valid = ATTR_SIZE;
+		iattr.ia_size = bf->l_start;
+
+		error = xfs_setattr_size(ip, &iattr);
+		if (!error)
+			clrprealloc = true;
+		break;
+	default:
+		ASSERT(0);
+		error = XFS_ERROR(EINVAL);
+	}
+
+	if (error)
+		goto out_unlock;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
+	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
+	if (error) {
+		xfs_trans_cancel(tp, 0);
+		goto out_unlock;
+	}
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+	if (!(ioflags & IO_INVIS)) {
+		ip->i_d.di_mode &= ~S_ISUID;
+		if (ip->i_d.di_mode & S_IXGRP)
+			ip->i_d.di_mode &= ~S_ISGID;
+		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+	}
+
+	if (setprealloc)
+		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+	else if (clrprealloc)
+		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	if (filp->f_flags & O_DSYNC)
+		xfs_trans_set_sync(tp);
+	error = xfs_trans_commit(tp, 0);
+
+out_unlock:
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 	mnt_drop_write_file(filp);
 	return -error;
 }
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index f671f7e..e8fb123 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -22,14 +22,13 @@
 #include <asm/uaccess.h>
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_vnode.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 8d4d49b..22d1cbe 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -17,34 +17,28 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
 #include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_iomap.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_quota.h"
 #include "xfs_dquot_item.h"
 #include "xfs_dquot.h"
+#include "xfs_dinode.h"
 
 
 #define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
@@ -110,7 +104,7 @@
 	xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
 			"Access to block zero in inode %llu "
 			"start_block: %llx start_off: %llx "
-			"blkcnt: %llx extent-state: %x\n",
+			"blkcnt: %llx extent-state: %x",
 		(unsigned long long)ip->i_ino,
 		(unsigned long long)imap->br_startblock,
 		(unsigned long long)imap->br_startoff,
@@ -655,7 +649,6 @@
 xfs_iomap_write_allocate(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
-	size_t		count,
 	xfs_bmbt_irec_t *imap)
 {
 	xfs_mount_t	*mp = ip->i_mount;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 8061576..411fbb8 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -21,12 +21,12 @@
 struct xfs_inode;
 struct xfs_bmbt_irec;
 
-extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
 			struct xfs_bmbt_irec *, int);
-extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
 			struct xfs_bmbt_irec *);
-extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
 			struct xfs_bmbt_irec *);
-extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
 
 #endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 2b8952d..27e0e54 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -17,32 +17,28 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_acl.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
+#include "xfs_acl.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
 #include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_inode_item.h"
+#include "xfs_trans.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2_priv.h"
+#include "xfs_dinode.h"
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
@@ -709,8 +705,7 @@
 int
 xfs_setattr_size(
 	struct xfs_inode	*ip,
-	struct iattr		*iattr,
-	int			flags)
+	struct iattr		*iattr)
 {
 	struct xfs_mount	*mp = ip->i_mount;
 	struct inode		*inode = VFS_I(ip);
@@ -733,15 +728,11 @@
 	if (error)
 		return XFS_ERROR(error);
 
+	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 	ASSERT(S_ISREG(ip->i_d.di_mode));
 	ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
 			ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
 
-	if (!(flags & XFS_ATTR_NOLOCK)) {
-		lock_flags |= XFS_IOLOCK_EXCL;
-		xfs_ilock(ip, lock_flags);
-	}
-
 	oldsize = inode->i_size;
 	newsize = iattr->ia_size;
 
@@ -750,12 +741,11 @@
 	 */
 	if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
 		if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
-			goto out_unlock;
+			return 0;
 
 		/*
 		 * Use the regular setattr path to update the timestamps.
 		 */
-		xfs_iunlock(ip, lock_flags);
 		iattr->ia_valid &= ~ATTR_SIZE;
 		return xfs_setattr_nonsize(ip, iattr, 0);
 	}
@@ -765,7 +755,7 @@
 	 */
 	error = xfs_qm_dqattach(ip, 0);
 	if (error)
-		goto out_unlock;
+		return error;
 
 	/*
 	 * Now we can make the changes.  Before we join the inode to the
@@ -783,7 +773,7 @@
 		 */
 		error = xfs_zero_eof(ip, newsize, oldsize);
 		if (error)
-			goto out_unlock;
+			return error;
 	}
 
 	/*
@@ -802,7 +792,7 @@
 		error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
 						      ip->i_d.di_size, newsize);
 		if (error)
-			goto out_unlock;
+			return error;
 	}
 
 	/*
@@ -812,7 +802,7 @@
 
 	error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);
 	if (error)
-		goto out_unlock;
+		return error;
 
 	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
@@ -916,12 +906,21 @@
 
 STATIC int
 xfs_vn_setattr(
-	struct dentry	*dentry,
-	struct iattr	*iattr)
+	struct dentry		*dentry,
+	struct iattr		*iattr)
 {
-	if (iattr->ia_valid & ATTR_SIZE)
-		return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0);
-	return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
+	struct xfs_inode	*ip = XFS_I(dentry->d_inode);
+	int			error;
+
+	if (iattr->ia_valid & ATTR_SIZE) {
+		xfs_ilock(ip, XFS_IOLOCK_EXCL);
+		error = xfs_setattr_size(ip, iattr);
+		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	} else {
+		error = xfs_setattr_nonsize(ip, iattr, 0);
+	}
+
+	return -error;
 }
 
 STATIC int
@@ -1169,6 +1168,7 @@
 	struct xfs_inode	*ip)
 {
 	struct inode		*inode = &ip->i_vnode;
+	gfp_t			gfp_mask;
 
 	inode->i_ino = ip->i_ino;
 	inode->i_state = I_NEW;
@@ -1204,6 +1204,7 @@
 	inode->i_ctime.tv_nsec	= ip->i_d.di_ctime.t_nsec;
 	xfs_diflags_to_iflags(inode, ip);
 
+	ip->d_ops = ip->i_mount->m_nondir_inode_ops;
 	switch (inode->i_mode & S_IFMT) {
 	case S_IFREG:
 		inode->i_op = &xfs_inode_operations;
@@ -1216,6 +1217,7 @@
 		else
 			inode->i_op = &xfs_dir_inode_operations;
 		inode->i_fop = &xfs_dir_file_operations;
+		ip->d_ops = ip->i_mount->m_dir_inode_ops;
 		break;
 	case S_IFLNK:
 		inode->i_op = &xfs_symlink_inode_operations;
@@ -1229,6 +1231,14 @@
 	}
 
 	/*
+	 * Ensure all page cache allocations are done from GFP_NOFS context to
+	 * prevent direct reclaim recursion back into the filesystem and blowing
+	 * stacks or deadlocking.
+	 */
+	gfp_mask = mapping_gfp_mask(inode->i_mapping);
+	mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
+
+	/*
 	 * If there is no attribute fork no ACL can exist on this inode,
 	 * and it can't have any file capabilities attached to it either.
 	 */
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index d81fb41..d2c5057 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -30,14 +30,10 @@
 /*
  * Internal setattr interfaces.
  */
-#define	XFS_ATTR_DMI		0x01	/* invocation from a DMI function */
-#define	XFS_ATTR_NONBLOCK	0x02	/* return EAGAIN if op would block */
-#define XFS_ATTR_NOLOCK		0x04	/* Don't grab any conflicting locks */
-#define XFS_ATTR_NOACL		0x08	/* Don't call xfs_acl_chmod */
-#define XFS_ATTR_SYNC		0x10	/* synchronous operation required */
+#define XFS_ATTR_NOACL		0x01	/* Don't call xfs_acl_chmod */
 
 extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
 			       int flags);
-extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags);
+extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
 
 #endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 084b3e1..c237ad1 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -17,24 +17,23 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
-#include "xfs_btree.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
 
 STATIC int
 xfs_internal_inum(
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a2dea108..8497a00 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -17,21 +17,19 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_error.h"
-#include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_log_recover.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
-#include "xfs_dinode.h"
+#include "xfs_log.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
 #include "xfs_inode.h"
 #include "xfs_trace.h"
 #include "xfs_fsops.h"
@@ -1000,27 +998,34 @@
 }
 
 /*
- * Determine if we have a transaction that has gone to disk
- * that needs to be covered. To begin the transition to the idle state
- * firstly the log needs to be idle (no AIL and nothing in the iclogs).
- * If we are then in a state where covering is needed, the caller is informed
- * that dummy transactions are required to move the log into the idle state.
+ * Determine if we have a transaction that has gone to disk that needs to be
+ * covered. To begin the transition to the idle state firstly the log needs to
+ * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
+ * we start attempting to cover the log.
  *
- * Because this is called as part of the sync process, we should also indicate
- * that dummy transactions should be issued in anything but the covered or
- * idle states. This ensures that the log tail is accurately reflected in
- * the log at the end of the sync, hence if a crash occurrs avoids replay
- * of transactions where the metadata is already on disk.
+ * Only if we are then in a state where covering is needed, the caller is
+ * informed that dummy transactions are required to move the log into the idle
+ * state.
+ *
+ * If there are any items in the AIl or CIL, then we do not want to attempt to
+ * cover the log as we may be in a situation where there isn't log space
+ * available to run a dummy transaction and this can lead to deadlocks when the
+ * tail of the log is pinned by an item that is modified in the CIL.  Hence
+ * there's no point in running a dummy transaction at this point because we
+ * can't start trying to idle the log until both the CIL and AIL are empty.
  */
 int
 xfs_log_need_covered(xfs_mount_t *mp)
 {
-	int		needed = 0;
 	struct xlog	*log = mp->m_log;
+	int		needed = 0;
 
 	if (!xfs_fs_writable(mp))
 		return 0;
 
+	if (!xlog_cil_empty(log))
+		return 0;
+
 	spin_lock(&log->l_icloglock);
 	switch (log->l_covered_state) {
 	case XLOG_STATE_COVER_DONE:
@@ -1029,14 +1034,17 @@
 		break;
 	case XLOG_STATE_COVER_NEED:
 	case XLOG_STATE_COVER_NEED2:
-		if (!xfs_ail_min_lsn(log->l_ailp) &&
-		    xlog_iclogs_empty(log)) {
-			if (log->l_covered_state == XLOG_STATE_COVER_NEED)
-				log->l_covered_state = XLOG_STATE_COVER_DONE;
-			else
-				log->l_covered_state = XLOG_STATE_COVER_DONE2;
-		}
-		/* FALLTHRU */
+		if (xfs_ail_min_lsn(log->l_ailp))
+			break;
+		if (!xlog_iclogs_empty(log))
+			break;
+
+		needed = 1;
+		if (log->l_covered_state == XLOG_STATE_COVER_NEED)
+			log->l_covered_state = XLOG_STATE_COVER_DONE;
+		else
+			log->l_covered_state = XLOG_STATE_COVER_DONE2;
+		break;
 	default:
 		needed = 1;
 		break;
@@ -1068,6 +1076,7 @@
 		tail_lsn = lip->li_lsn;
 	else
 		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
+	trace_xfs_log_assign_tail_lsn(log, tail_lsn);
 	atomic64_set(&log->l_tail_lsn, tail_lsn);
 	return tail_lsn;
 }
@@ -1979,7 +1988,7 @@
 
 	for (i = 0; i < ticket->t_res_num; i++) {
 		uint r_type = ticket->t_res_arr[i].r_type;
-		xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
+		xfs_warn(mp, "region[%u]: %s - %u bytes", i,
 			    ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
 			    "bad-rtype" : res_type_str[r_type-1]),
 			    ticket->t_res_arr[i].r_len);
@@ -3702,11 +3711,9 @@
 	/* check validity of iclog pointers */
 	spin_lock(&log->l_icloglock);
 	icptr = log->l_iclog;
-	for (i=0; i < log->l_iclog_bufs; i++) {
-		if (icptr == NULL)
-			xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
-		icptr = icptr->ic_next;
-	}
+	for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
+		ASSERT(icptr);
+
 	if (icptr != log->l_iclog)
 		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
 	spin_unlock(&log->l_icloglock);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 1c45848..e148719 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -18,8 +18,6 @@
 #ifndef	__XFS_LOG_H__
 #define __XFS_LOG_H__
 
-#include "xfs_log_format.h"
-
 struct xfs_log_vec {
 	struct xfs_log_vec	*lv_next;	/* next lv in build list */
 	int			lv_niovecs;	/* number of iovecs in lv */
@@ -82,11 +80,7 @@
 struct xfs_log_item;
 struct xfs_item_ops;
 struct xfs_trans;
-
-void	xfs_log_item_init(struct xfs_mount	*mp,
-			struct xfs_log_item	*item,
-			int			type,
-			const struct xfs_item_ops *ops);
+struct xfs_log_callback;
 
 xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
 		       struct xlog_ticket *ticket,
@@ -114,7 +108,7 @@
 void	  xfs_log_space_wake(struct xfs_mount *mp);
 int	  xfs_log_notify(struct xfs_mount	*mp,
 			 struct xlog_in_core	*iclog,
-			 xfs_log_callback_t	*callback_entry);
+			 struct xfs_log_callback *callback_entry);
 int	  xfs_log_release_iclog(struct xfs_mount *mp,
 			 struct xlog_in_core	 *iclog);
 int	  xfs_log_reserve(struct xfs_mount *mp,
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index cfe9797..5eb51fc 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -17,11 +17,9 @@
 
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
-#include "xfs_log_priv.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
@@ -29,6 +27,10 @@
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_discard.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_log_priv.h"
 
 /*
  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
@@ -711,6 +713,20 @@
 	xlog_cil_push(log);
 }
 
+bool
+xlog_cil_empty(
+	struct xlog	*log)
+{
+	struct xfs_cil	*cil = log->l_cilp;
+	bool		empty = false;
+
+	spin_lock(&cil->xc_push_lock);
+	if (list_empty(&cil->xc_cil))
+		empty = true;
+	spin_unlock(&cil->xc_push_lock);
+	return empty;
+}
+
 /*
  * Commit a transaction with the given vector to the Committed Item List.
  *
diff --git a/fs/xfs/xfs_log_format.h b/fs/xfs/xfs_log_format.h
index ca7e28a8..f0969c7 100644
--- a/fs/xfs/xfs_log_format.h
+++ b/fs/xfs/xfs_log_format.h
@@ -234,178 +234,6 @@
 	{ XFS_LI_ICREATE,	"XFS_LI_ICREATE" }
 
 /*
- * Transaction types.  Used to distinguish types of buffers.
- */
-#define XFS_TRANS_SETATTR_NOT_SIZE	1
-#define XFS_TRANS_SETATTR_SIZE		2
-#define XFS_TRANS_INACTIVE		3
-#define XFS_TRANS_CREATE		4
-#define XFS_TRANS_CREATE_TRUNC		5
-#define XFS_TRANS_TRUNCATE_FILE		6
-#define XFS_TRANS_REMOVE		7
-#define XFS_TRANS_LINK			8
-#define XFS_TRANS_RENAME		9
-#define XFS_TRANS_MKDIR			10
-#define XFS_TRANS_RMDIR			11
-#define XFS_TRANS_SYMLINK		12
-#define XFS_TRANS_SET_DMATTRS		13
-#define XFS_TRANS_GROWFS		14
-#define XFS_TRANS_STRAT_WRITE		15
-#define XFS_TRANS_DIOSTRAT		16
-/* 17 was XFS_TRANS_WRITE_SYNC */
-#define	XFS_TRANS_WRITEID		18
-#define	XFS_TRANS_ADDAFORK		19
-#define	XFS_TRANS_ATTRINVAL		20
-#define	XFS_TRANS_ATRUNCATE		21
-#define	XFS_TRANS_ATTR_SET		22
-#define	XFS_TRANS_ATTR_RM		23
-#define	XFS_TRANS_ATTR_FLAG		24
-#define	XFS_TRANS_CLEAR_AGI_BUCKET	25
-#define XFS_TRANS_QM_SBCHANGE		26
-/*
- * Dummy entries since we use the transaction type to index into the
- * trans_type[] in xlog_recover_print_trans_head()
- */
-#define XFS_TRANS_DUMMY1		27
-#define XFS_TRANS_DUMMY2		28
-#define XFS_TRANS_QM_QUOTAOFF		29
-#define XFS_TRANS_QM_DQALLOC		30
-#define XFS_TRANS_QM_SETQLIM		31
-#define XFS_TRANS_QM_DQCLUSTER		32
-#define XFS_TRANS_QM_QINOCREATE		33
-#define XFS_TRANS_QM_QUOTAOFF_END	34
-#define XFS_TRANS_SB_UNIT		35
-#define XFS_TRANS_FSYNC_TS		36
-#define	XFS_TRANS_GROWFSRT_ALLOC	37
-#define	XFS_TRANS_GROWFSRT_ZERO		38
-#define	XFS_TRANS_GROWFSRT_FREE		39
-#define	XFS_TRANS_SWAPEXT		40
-#define	XFS_TRANS_SB_COUNT		41
-#define	XFS_TRANS_CHECKPOINT		42
-#define	XFS_TRANS_ICREATE		43
-#define	XFS_TRANS_TYPE_MAX		43
-/* new transaction types need to be reflected in xfs_logprint(8) */
-
-#define XFS_TRANS_TYPES \
-	{ XFS_TRANS_SETATTR_NOT_SIZE,	"SETATTR_NOT_SIZE" }, \
-	{ XFS_TRANS_SETATTR_SIZE,	"SETATTR_SIZE" }, \
-	{ XFS_TRANS_INACTIVE,		"INACTIVE" }, \
-	{ XFS_TRANS_CREATE,		"CREATE" }, \
-	{ XFS_TRANS_CREATE_TRUNC,	"CREATE_TRUNC" }, \
-	{ XFS_TRANS_TRUNCATE_FILE,	"TRUNCATE_FILE" }, \
-	{ XFS_TRANS_REMOVE,		"REMOVE" }, \
-	{ XFS_TRANS_LINK,		"LINK" }, \
-	{ XFS_TRANS_RENAME,		"RENAME" }, \
-	{ XFS_TRANS_MKDIR,		"MKDIR" }, \
-	{ XFS_TRANS_RMDIR,		"RMDIR" }, \
-	{ XFS_TRANS_SYMLINK,		"SYMLINK" }, \
-	{ XFS_TRANS_SET_DMATTRS,	"SET_DMATTRS" }, \
-	{ XFS_TRANS_GROWFS,		"GROWFS" }, \
-	{ XFS_TRANS_STRAT_WRITE,	"STRAT_WRITE" }, \
-	{ XFS_TRANS_DIOSTRAT,		"DIOSTRAT" }, \
-	{ XFS_TRANS_WRITEID,		"WRITEID" }, \
-	{ XFS_TRANS_ADDAFORK,		"ADDAFORK" }, \
-	{ XFS_TRANS_ATTRINVAL,		"ATTRINVAL" }, \
-	{ XFS_TRANS_ATRUNCATE,		"ATRUNCATE" }, \
-	{ XFS_TRANS_ATTR_SET,		"ATTR_SET" }, \
-	{ XFS_TRANS_ATTR_RM,		"ATTR_RM" }, \
-	{ XFS_TRANS_ATTR_FLAG,		"ATTR_FLAG" }, \
-	{ XFS_TRANS_CLEAR_AGI_BUCKET,	"CLEAR_AGI_BUCKET" }, \
-	{ XFS_TRANS_QM_SBCHANGE,	"QM_SBCHANGE" }, \
-	{ XFS_TRANS_QM_QUOTAOFF,	"QM_QUOTAOFF" }, \
-	{ XFS_TRANS_QM_DQALLOC,		"QM_DQALLOC" }, \
-	{ XFS_TRANS_QM_SETQLIM,		"QM_SETQLIM" }, \
-	{ XFS_TRANS_QM_DQCLUSTER,	"QM_DQCLUSTER" }, \
-	{ XFS_TRANS_QM_QINOCREATE,	"QM_QINOCREATE" }, \
-	{ XFS_TRANS_QM_QUOTAOFF_END,	"QM_QOFF_END" }, \
-	{ XFS_TRANS_SB_UNIT,		"SB_UNIT" }, \
-	{ XFS_TRANS_FSYNC_TS,		"FSYNC_TS" }, \
-	{ XFS_TRANS_GROWFSRT_ALLOC,	"GROWFSRT_ALLOC" }, \
-	{ XFS_TRANS_GROWFSRT_ZERO,	"GROWFSRT_ZERO" }, \
-	{ XFS_TRANS_GROWFSRT_FREE,	"GROWFSRT_FREE" }, \
-	{ XFS_TRANS_SWAPEXT,		"SWAPEXT" }, \
-	{ XFS_TRANS_SB_COUNT,		"SB_COUNT" }, \
-	{ XFS_TRANS_CHECKPOINT,		"CHECKPOINT" }, \
-	{ XFS_TRANS_DUMMY1,		"DUMMY1" }, \
-	{ XFS_TRANS_DUMMY2,		"DUMMY2" }, \
-	{ XLOG_UNMOUNT_REC_TYPE,	"UNMOUNT" }
-
-/*
- * This structure is used to track log items associated with
- * a transaction.  It points to the log item and keeps some
- * flags to track the state of the log item.  It also tracks
- * the amount of space needed to log the item it describes
- * once we get to commit processing (see xfs_trans_commit()).
- */
-struct xfs_log_item_desc {
-	struct xfs_log_item	*lid_item;
-	struct list_head	lid_trans;
-	unsigned char		lid_flags;
-};
-
-#define XFS_LID_DIRTY		0x1
-
-/*
- * Values for t_flags.
- */
-#define	XFS_TRANS_DIRTY		0x01	/* something needs to be logged */
-#define	XFS_TRANS_SB_DIRTY	0x02	/* superblock is modified */
-#define	XFS_TRANS_PERM_LOG_RES	0x04	/* xact took a permanent log res */
-#define	XFS_TRANS_SYNC		0x08	/* make commit synchronous */
-#define XFS_TRANS_DQ_DIRTY	0x10	/* at least one dquot in trx dirty */
-#define XFS_TRANS_RESERVE	0x20    /* OK to use reserved data blocks */
-#define XFS_TRANS_FREEZE_PROT	0x40	/* Transaction has elevated writer
-					   count in superblock */
-
-/*
- * Values for call flags parameter.
- */
-#define	XFS_TRANS_RELEASE_LOG_RES	0x4
-#define	XFS_TRANS_ABORT			0x8
-
-/*
- * Field values for xfs_trans_mod_sb.
- */
-#define	XFS_TRANS_SB_ICOUNT		0x00000001
-#define	XFS_TRANS_SB_IFREE		0x00000002
-#define	XFS_TRANS_SB_FDBLOCKS		0x00000004
-#define	XFS_TRANS_SB_RES_FDBLOCKS	0x00000008
-#define	XFS_TRANS_SB_FREXTENTS		0x00000010
-#define	XFS_TRANS_SB_RES_FREXTENTS	0x00000020
-#define	XFS_TRANS_SB_DBLOCKS		0x00000040
-#define	XFS_TRANS_SB_AGCOUNT		0x00000080
-#define	XFS_TRANS_SB_IMAXPCT		0x00000100
-#define	XFS_TRANS_SB_REXTSIZE		0x00000200
-#define	XFS_TRANS_SB_RBMBLOCKS		0x00000400
-#define	XFS_TRANS_SB_RBLOCKS		0x00000800
-#define	XFS_TRANS_SB_REXTENTS		0x00001000
-#define	XFS_TRANS_SB_REXTSLOG		0x00002000
-
-/*
- * Here we centralize the specification of XFS meta-data buffer
- * reference count values.  This determine how hard the buffer
- * cache tries to hold onto the buffer.
- */
-#define	XFS_AGF_REF		4
-#define	XFS_AGI_REF		4
-#define	XFS_AGFL_REF		3
-#define	XFS_INO_BTREE_REF	3
-#define	XFS_ALLOC_BTREE_REF	2
-#define	XFS_BMAP_BTREE_REF	2
-#define	XFS_DIR_BTREE_REF	2
-#define	XFS_INO_REF		2
-#define	XFS_ATTR_BTREE_REF	1
-#define	XFS_DQUOT_REF		1
-
-/*
- * Flags for xfs_trans_ichgtime().
- */
-#define	XFS_ICHGTIME_MOD	0x1	/* data fork modification timestamp */
-#define	XFS_ICHGTIME_CHG	0x2	/* inode field change timestamp */
-#define	XFS_ICHGTIME_CREATE	0x4	/* inode create timestamp */
-
-
-/*
  * Inode Log Item Format definitions.
  *
  * This is the structure used to lay out an inode log item in the
@@ -797,7 +625,6 @@
 	char			qf_pad[12];	/* padding for future */
 } xfs_qoff_logformat_t;
 
-
 /*
  * Disk quotas status in m_qflags, and also sb_qflags. 16 bits.
  */
@@ -849,8 +676,4 @@
 	__be32		icl_gen;	/* inode generation number to use */
 };
 
-int	xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
-int	xfs_log_calc_minimum_size(struct xfs_mount *);
-
-
 #endif /* __XFS_LOG_FORMAT_H__ */
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 136654b..9bc403a 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -22,6 +22,7 @@
 struct xlog;
 struct xlog_ticket;
 struct xfs_mount;
+struct xfs_log_callback;
 
 /*
  * Flags for log structure
@@ -227,8 +228,8 @@
 
 	/* Callback structures need their own cacheline */
 	spinlock_t		ic_callback_lock ____cacheline_aligned_in_smp;
-	xfs_log_callback_t	*ic_callback;
-	xfs_log_callback_t	**ic_callback_tail;
+	struct xfs_log_callback	*ic_callback;
+	struct xfs_log_callback	**ic_callback_tail;
 
 	/* reference counts need their own cacheline */
 	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
@@ -254,7 +255,7 @@
 	int			space_used;	/* aggregate size of regions */
 	struct list_head	busy_extents;	/* busy extents in chkpt */
 	struct xfs_log_vec	*lv_chain;	/* logvecs being pushed */
-	xfs_log_callback_t	log_cb;		/* completion callback hook. */
+	struct xfs_log_callback	log_cb;		/* completion callback hook. */
 	struct list_head	committing;	/* ctx committing list */
 };
 
@@ -514,12 +515,10 @@
 /*
  * Committed Item List interfaces
  */
-int
-xlog_cil_init(struct xlog *log);
-void
-xlog_cil_init_post_recovery(struct xlog *log);
-void
-xlog_cil_destroy(struct xlog *log);
+int	xlog_cil_init(struct xlog *log);
+void	xlog_cil_init_post_recovery(struct xlog *log);
+void	xlog_cil_destroy(struct xlog *log);
+bool	xlog_cil_empty(struct xlog *log);
 
 /*
  * CIL force routines
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 3979749..b6b669d 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -17,42 +17,34 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
 #include "xfs_log_recover.h"
+#include "xfs_inode_item.h"
 #include "xfs_extfree_item.h"
 #include "xfs_trans_priv.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
 #include "xfs_quota.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
-#include "xfs_icreate_item.h"
-
-/* Need all the magic numbers and buffer ops structures from these headers */
-#include "xfs_symlink.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_error.h"
 #include "xfs_dir2.h"
-#include "xfs_attr_leaf.h"
-#include "xfs_attr_remote.h"
 
 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
 
@@ -305,9 +297,9 @@
 	xfs_mount_t		*mp,
 	xlog_rec_header_t	*head)
 {
-	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
+	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
-	xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
+	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
 }
 #else
@@ -2362,7 +2354,7 @@
 					item->ri_buf[i].i_len, __func__);
 				goto next;
 			}
-			error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
+			error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
 					       -1, 0, XFS_QMOPT_DOWARN,
 					       "dquot_buf_recover");
 			if (error)
@@ -2394,133 +2386,6 @@
 }
 
 /*
- * Do some primitive error checking on ondisk dquot data structures.
- */
-int
-xfs_qm_dqcheck(
-	struct xfs_mount *mp,
-	xfs_disk_dquot_t *ddq,
-	xfs_dqid_t	 id,
-	uint		 type,	  /* used only when IO_dorepair is true */
-	uint		 flags,
-	char		 *str)
-{
-	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
-	int		errs = 0;
-
-	/*
-	 * We can encounter an uninitialized dquot buffer for 2 reasons:
-	 * 1. If we crash while deleting the quotainode(s), and those blks got
-	 *    used for user data. This is because we take the path of regular
-	 *    file deletion; however, the size field of quotainodes is never
-	 *    updated, so all the tricks that we play in itruncate_finish
-	 *    don't quite matter.
-	 *
-	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
-	 *    But the allocation will be replayed so we'll end up with an
-	 *    uninitialized quota block.
-	 *
-	 * This is all fine; things are still consistent, and we haven't lost
-	 * any quota information. Just don't complain about bad dquot blks.
-	 */
-	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
-		if (flags & XFS_QMOPT_DOWARN)
-			xfs_alert(mp,
-			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
-			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
-		errs++;
-	}
-	if (ddq->d_version != XFS_DQUOT_VERSION) {
-		if (flags & XFS_QMOPT_DOWARN)
-			xfs_alert(mp,
-			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
-			str, id, ddq->d_version, XFS_DQUOT_VERSION);
-		errs++;
-	}
-
-	if (ddq->d_flags != XFS_DQ_USER &&
-	    ddq->d_flags != XFS_DQ_PROJ &&
-	    ddq->d_flags != XFS_DQ_GROUP) {
-		if (flags & XFS_QMOPT_DOWARN)
-			xfs_alert(mp,
-			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
-			str, id, ddq->d_flags);
-		errs++;
-	}
-
-	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
-		if (flags & XFS_QMOPT_DOWARN)
-			xfs_alert(mp,
-			"%s : ondisk-dquot 0x%p, ID mismatch: "
-			"0x%x expected, found id 0x%x",
-			str, ddq, id, be32_to_cpu(ddq->d_id));
-		errs++;
-	}
-
-	if (!errs && ddq->d_id) {
-		if (ddq->d_blk_softlimit &&
-		    be64_to_cpu(ddq->d_bcount) >
-				be64_to_cpu(ddq->d_blk_softlimit)) {
-			if (!ddq->d_btimer) {
-				if (flags & XFS_QMOPT_DOWARN)
-					xfs_alert(mp,
-			"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
-					str, (int)be32_to_cpu(ddq->d_id), ddq);
-				errs++;
-			}
-		}
-		if (ddq->d_ino_softlimit &&
-		    be64_to_cpu(ddq->d_icount) >
-				be64_to_cpu(ddq->d_ino_softlimit)) {
-			if (!ddq->d_itimer) {
-				if (flags & XFS_QMOPT_DOWARN)
-					xfs_alert(mp,
-			"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
-					str, (int)be32_to_cpu(ddq->d_id), ddq);
-				errs++;
-			}
-		}
-		if (ddq->d_rtb_softlimit &&
-		    be64_to_cpu(ddq->d_rtbcount) >
-				be64_to_cpu(ddq->d_rtb_softlimit)) {
-			if (!ddq->d_rtbtimer) {
-				if (flags & XFS_QMOPT_DOWARN)
-					xfs_alert(mp,
-			"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
-					str, (int)be32_to_cpu(ddq->d_id), ddq);
-				errs++;
-			}
-		}
-	}
-
-	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
-		return errs;
-
-	if (flags & XFS_QMOPT_DOWARN)
-		xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
-
-	/*
-	 * Typically, a repair is only requested by quotacheck.
-	 */
-	ASSERT(id != -1);
-	ASSERT(flags & XFS_QMOPT_DQREPAIR);
-	memset(d, 0, sizeof(xfs_dqblk_t));
-
-	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
-	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
-	d->dd_diskdq.d_flags = type;
-	d->dd_diskdq.d_id = cpu_to_be32(id);
-
-	if (xfs_sb_version_hascrc(&mp->m_sb)) {
-		uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
-		xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
-				 XFS_DQUOT_CRC_OFF);
-	}
-
-	return errs;
-}
-
-/*
  * Perform a dquot buffer recovery.
  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
@@ -3125,7 +2990,7 @@
 	 */
 	dq_f = item->ri_buf[0].i_addr;
 	ASSERT(dq_f);
-	error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+	error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
 			   "xlog_recover_dquot_pass2 (log copy)");
 	if (error)
 		return XFS_ERROR(EIO);
@@ -3145,7 +3010,7 @@
 	 * was among a chunk of dquots created earlier, and we did some
 	 * minimal initialization then.
 	 */
-	error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+	error = xfs_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
 			   "xlog_recover_dquot_pass2");
 	if (error) {
 		xfs_buf_relse(bp);
@@ -4077,7 +3942,7 @@
 	if (crc != rhead->h_crc) {
 		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
 			xfs_alert(log->l_mp,
-		"log record CRC mismatch: found 0x%x, expected 0x%x.\n",
+		"log record CRC mismatch: found 0x%x, expected 0x%x.",
 					le32_to_cpu(rhead->h_crc),
 					le32_to_cpu(crc));
 			xfs_hex_dump(dp, 32);
diff --git a/fs/xfs/xfs_log_rlimit.c b/fs/xfs/xfs_log_rlimit.c
index bbcec0b..2af1a0a 100644
--- a/fs/xfs/xfs_log_rlimit.c
+++ b/fs/xfs/xfs_log_rlimit.c
@@ -17,16 +17,19 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_trans_space.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_da_btree.h"
 #include "xfs_attr_leaf.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * Calculate the maximum length in bytes that would be required for a local
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 9163dc1..63ca2f0 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -17,9 +17,8 @@
 
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 5dcc680..da88f16 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -17,35 +17,30 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
-#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_dir2.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_bmap.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_fsops.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
-#include "xfs_cksum.h"
-#include "xfs_buf_item.h"
 
 
 #ifdef HAVE_PERCPU_SB
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1fa0584..1d8101a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -26,6 +26,7 @@
 struct xfs_nameops;
 struct xfs_ail;
 struct xfs_quotainfo;
+struct xfs_dir_ops;
 
 #ifdef HAVE_PERCPU_SB
 
@@ -148,6 +149,8 @@
 	int			m_dir_magicpct;	/* 37% of the dir blocksize */
 	__uint8_t		m_sectbb_log;	/* sectlog - BBSHIFT */
 	const struct xfs_nameops *m_dirnameops;	/* vector of dir name ops */
+	const struct xfs_dir_ops *m_dir_inode_ops; /* vector of dir inode ops */
+	const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */
 	int			m_dirblksize;	/* directory block sz--bytes */
 	int			m_dirblkfsbs;	/* directory block sz--fsbs */
 	xfs_dablk_t		m_dirdatablk;	/* blockno of dir data v2 */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 3e6c2e6..14a4996 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -17,31 +17,28 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ialloc.h"
 #include "xfs_itable.h"
-#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * The global quota manager. There is only one of these for the entire
@@ -664,20 +661,6 @@
 	}
 }
 
-int
-xfs_qm_calc_dquots_per_chunk(
-	struct xfs_mount	*mp,
-	unsigned int		nbblks)	/* basic block units */
-{
-	unsigned int	ndquots;
-
-	ASSERT(nbblks > 0);
-	ndquots = BBTOB(nbblks);
-	do_div(ndquots, sizeof(xfs_dqblk_t));
-
-	return ndquots;
-}
-
 struct xfs_qm_isolate {
 	struct list_head	buffers;
 	struct list_head	dispose;
@@ -858,7 +841,7 @@
 
 	/* Precalc some constants */
 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
-	qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
+	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(mp,
 							qinf->qi_dqchunklen);
 
 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
@@ -1092,10 +1075,10 @@
 		/*
 		 * Do a sanity check, and if needed, repair the dqblk. Don't
 		 * output any warnings because it's perfectly possible to
-		 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
+		 * find uninitialised dquot blks. See comment in xfs_dqcheck.
 		 */
-		(void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
-				      "xfs_quotacheck");
+		xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+			    "xfs_quotacheck");
 		ddq->d_bcount = 0;
 		ddq->d_icount = 0;
 		ddq->d_rtbcount = 0;
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 2b602df..a788b66 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -103,8 +103,6 @@
 	return NULL;
 }
 
-extern int	xfs_qm_calc_dquots_per_chunk(struct xfs_mount *mp,
-					     unsigned int nbblks);
 extern void	xfs_trans_mod_dquot(struct xfs_trans *,
 					struct xfs_dquot *, uint, long);
 extern int	xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 3af50cc..e9be63a 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -18,21 +18,15 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_qm.h"
 
 
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 8174aad..437c919 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -20,24 +20,18 @@
 
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_trans.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_quota.h"
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
@@ -287,7 +281,7 @@
 	int		error = 0, error2 = 0;
 
 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
-		xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
+		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
 			__func__, flags, mp->m_qflags);
 		return XFS_ERROR(EINVAL);
 	}
@@ -325,7 +319,7 @@
 	sbflags = 0;
 
 	if (flags == 0) {
-		xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
+		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
 			__func__, mp->m_qflags);
 		return XFS_ERROR(EINVAL);
 	}
@@ -348,7 +342,7 @@
 	     (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
 	     (flags & XFS_PQUOTA_ENFD))) {
 		xfs_debug(mp,
-			"%s: Can't enforce without acct, flags=%x sbflags=%x\n",
+			"%s: Can't enforce without acct, flags=%x sbflags=%x",
 			__func__, flags, mp->m_sb.sb_qflags);
 		return XFS_ERROR(EINVAL);
 	}
@@ -648,7 +642,7 @@
 			q->qi_bsoftlimit = soft;
 		}
 	} else {
-		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
+		xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
 	}
 	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
@@ -664,7 +658,7 @@
 			q->qi_rtbsoftlimit = soft;
 		}
 	} else {
-		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
+		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
 	}
 
 	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
@@ -681,7 +675,7 @@
 			q->qi_isoftlimit = soft;
 		}
 	} else {
-		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
+		xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
 	}
 
 	/*
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index e7d84d2..5376dd4 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -150,10 +150,6 @@
 	xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \
 				f | XFS_QMOPT_RES_REGBLKS)
 
-extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *,
-				xfs_dqid_t, uint, uint, char *);
 extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
 
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-
 #endif	/* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/xfs_quota_defs.h b/fs/xfs/xfs_quota_defs.h
index e6b0d6e..b3b2b10 100644
--- a/fs/xfs/xfs_quota_defs.h
+++ b/fs/xfs/xfs_quota_defs.h
@@ -154,4 +154,8 @@
 		(XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
 #define XFS_QMOPT_RESBLK_MASK	(XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
 
+extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
+		       xfs_dqid_t id, uint type, uint flags, char *str);
+extern int xfs_calc_dquots_per_chunk(struct xfs_mount *mp, unsigned int nbblks);
+
 #endif	/* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 1326d81..af33caf 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -17,15 +17,14 @@
  */
 #include "xfs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_log.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
 #include "xfs_quota.h"
 #include "xfs_trans.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
 #include "xfs_qm.h"
 #include <linux/quota.h>
 
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 6f9e63c..a6a76b2 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -17,172 +17,260 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
-#include "xfs_fsops.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
 #include "xfs_error.h"
-#include "xfs_inode_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_buf.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
+#include "xfs_rtalloc.h"
 
 
 /*
- * Prototypes for internal functions.
- */
-
-
-STATIC int xfs_rtallocate_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-		xfs_extlen_t, xfs_buf_t **, xfs_fsblock_t *);
-STATIC int xfs_rtany_summary(xfs_mount_t *, xfs_trans_t *, int, int,
-		xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, int *);
-STATIC int xfs_rtcheck_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-		xfs_extlen_t, int, xfs_rtblock_t *, int *);
-STATIC int xfs_rtfind_back(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-		xfs_rtblock_t, xfs_rtblock_t *);
-STATIC int xfs_rtfind_forw(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-		xfs_rtblock_t, xfs_rtblock_t *);
-STATIC int xfs_rtget_summary( xfs_mount_t *, xfs_trans_t *, int,
-		xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, xfs_suminfo_t *);
-STATIC int xfs_rtmodify_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-		xfs_extlen_t, int);
-STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int,
-		xfs_rtblock_t, int, xfs_buf_t **, xfs_fsblock_t *);
-
-/*
- * Internal functions.
- */
-
-/*
- * Allocate space to the bitmap or summary file, and zero it, for growfs.
+ * Read and return the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
  */
 STATIC int				/* error */
-xfs_growfs_rt_alloc(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_extlen_t	oblocks,	/* old count of blocks */
-	xfs_extlen_t	nblocks,	/* new count of blocks */
-	xfs_inode_t	*ip)		/* inode (bitmap/summary) */
+xfs_rtget_summary(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	int		log,		/* log2 of extent size */
+	xfs_rtblock_t	bbno,		/* bitmap block number */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
+	xfs_suminfo_t	*sum)		/* out: summary info for this block */
 {
-	xfs_fileoff_t	bno;		/* block number in file */
-	xfs_buf_t	*bp;		/* temporary buffer for zeroing */
-	int		committed;	/* transaction committed flag */
-	xfs_daddr_t	d;		/* disk block address */
-	int		error;		/* error return value */
-	xfs_fsblock_t	firstblock;	/* first block allocated in xaction */
-	xfs_bmap_free_t	flist;		/* list of freed blocks */
-	xfs_fsblock_t	fsbno;		/* filesystem block for bno */
-	xfs_bmbt_irec_t	map;		/* block map output */
-	int		nmap;		/* number of block maps */
-	int		resblks;	/* space reservation */
+	xfs_buf_t	*bp;		/* buffer for summary block */
+	int		error;		/* error value */
+	xfs_fsblock_t	sb;		/* summary fsblock */
+	int		so;		/* index into the summary file */
+	xfs_suminfo_t	*sp;		/* pointer to returned data */
 
 	/*
-	 * Allocate space to the file, as necessary.
+	 * Compute entry number in the summary file.
 	 */
-	while (oblocks < nblocks) {
-		int		cancelflags = 0;
-		xfs_trans_t	*tp;
-
-		tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
-		resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
+	so = XFS_SUMOFFS(mp, log, bbno);
+	/*
+	 * Compute the block number in the summary file.
+	 */
+	sb = XFS_SUMOFFSTOBLOCK(mp, so);
+	/*
+	 * If we have an old buffer, and the block number matches, use that.
+	 */
+	if (rbpp && *rbpp && *rsb == sb)
+		bp = *rbpp;
+	/*
+	 * Otherwise we have to get the buffer.
+	 */
+	else {
 		/*
-		 * Reserve space & log for one extent added to the file.
+		 * If there was an old one, get rid of it first.
 		 */
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
-					  resblks, 0);
-		if (error)
-			goto error_cancel;
-		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
-		/*
-		 * Lock the inode.
-		 */
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-		xfs_bmap_init(&flist, &firstblock);
-		/*
-		 * Allocate blocks to the bitmap file.
-		 */
-		nmap = 1;
-		cancelflags |= XFS_TRANS_ABORT;
-		error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
-					XFS_BMAPI_METADATA, &firstblock,
-					resblks, &map, &nmap, &flist);
-		if (!error && nmap < 1)
-			error = XFS_ERROR(ENOSPC);
-		if (error)
-			goto error_cancel;
-		/*
-		 * Free any blocks freed up in the transaction, then commit.
-		 */
-		error = xfs_bmap_finish(&tp, &flist, &committed);
-		if (error)
-			goto error_cancel;
-		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-		if (error)
-			goto error;
-		/*
-		 * Now we need to clear the allocated blocks.
-		 * Do this one block per transaction, to keep it simple.
-		 */
-		cancelflags = 0;
-		for (bno = map.br_startoff, fsbno = map.br_startblock;
-		     bno < map.br_startoff + map.br_blockcount;
-		     bno++, fsbno++) {
-			tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
-			/*
-			 * Reserve log for one block zeroing.
-			 */
-			error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
-						  0, 0);
-			if (error)
-				goto error_cancel;
-			/*
-			 * Lock the bitmap inode.
-			 */
-			xfs_ilock(ip, XFS_ILOCK_EXCL);
-			xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-			/*
-			 * Get a buffer for the block.
-			 */
-			d = XFS_FSB_TO_DADDR(mp, fsbno);
-			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-				mp->m_bsize, 0);
-			if (bp == NULL) {
-				error = XFS_ERROR(EIO);
-error_cancel:
-				xfs_trans_cancel(tp, cancelflags);
-				goto error;
-			}
-			memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
-			xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
-			/*
-			 * Commit the transaction.
-			 */
-			error = xfs_trans_commit(tp, 0);
-			if (error)
-				goto error;
+		if (rbpp && *rbpp)
+			xfs_trans_brelse(tp, *rbpp);
+		error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+		if (error) {
+			return error;
 		}
 		/*
-		 * Go on to the next extent, if any.
+		 * Remember this buffer and block for the next call.
 		 */
-		oblocks = map.br_startoff + map.br_blockcount;
+		if (rbpp) {
+			*rbpp = bp;
+			*rsb = sb;
+		}
+	}
+	/*
+	 * Point to the summary information & copy it out.
+	 */
+	sp = XFS_SUMPTR(mp, bp, so);
+	*sum = *sp;
+	/*
+	 * Drop the buffer if we're not asked to remember it.
+	 */
+	if (!rbpp)
+		xfs_trans_brelse(tp, bp);
+	return 0;
+}
+
+
+/*
+ * Return whether there are any free extents in the size range given
+ * by low and high, for the bitmap block bbno.
+ */
+STATIC int				/* error */
+xfs_rtany_summary(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	int		low,		/* low log2 extent size */
+	int		high,		/* high log2 extent size */
+	xfs_rtblock_t	bbno,		/* bitmap block number */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
+	int		*stat)		/* out: any good extents here? */
+{
+	int		error;		/* error value */
+	int		log;		/* loop counter, log2 of ext. size */
+	xfs_suminfo_t	sum;		/* summary data */
+
+	/*
+	 * Loop over logs of extent sizes.  Order is irrelevant.
+	 */
+	for (log = low; log <= high; log++) {
+		/*
+		 * Get one summary datum.
+		 */
+		error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
+		if (error) {
+			return error;
+		}
+		/*
+		 * If there are any, return success.
+		 */
+		if (sum) {
+			*stat = 1;
+			return 0;
+		}
+	}
+	/*
+	 * Found nothing, return failure.
+	 */
+	*stat = 0;
+	return 0;
+}
+
+
+/*
+ * Copy and transform the summary file, given the old and new
+ * parameters in the mount structures.
+ */
+STATIC int				/* error */
+xfs_rtcopy_summary(
+	xfs_mount_t	*omp,		/* old file system mount point */
+	xfs_mount_t	*nmp,		/* new file system mount point */
+	xfs_trans_t	*tp)		/* transaction pointer */
+{
+	xfs_rtblock_t	bbno;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* summary buffer */
+	int		error;		/* error return value */
+	int		log;		/* summary level number (log length) */
+	xfs_suminfo_t	sum;		/* summary data */
+	xfs_fsblock_t	sumbno;		/* summary block number */
+
+	bp = NULL;
+	for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
+		for (bbno = omp->m_sb.sb_rbmblocks - 1;
+		     (xfs_srtblock_t)bbno >= 0;
+		     bbno--) {
+			error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
+				&sumbno, &sum);
+			if (error)
+				return error;
+			if (sum == 0)
+				continue;
+			error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
+				&bp, &sumbno);
+			if (error)
+				return error;
+			error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
+				&bp, &sumbno);
+			if (error)
+				return error;
+			ASSERT(sum > 0);
+		}
 	}
 	return 0;
+}
+/*
+ * Mark an extent specified by start and len allocated.
+ * Updates all the summary information as well as the bitmap.
+ */
+STATIC int				/* error */
+xfs_rtallocate_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* start block to allocate */
+	xfs_extlen_t	len,		/* length to allocate */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
+{
+	xfs_rtblock_t	end;		/* end of the allocated extent */
+	int		error;		/* error value */
+	xfs_rtblock_t	postblock = 0;	/* first block allocated > end */
+	xfs_rtblock_t	preblock = 0;	/* first block allocated < start */
 
-error:
+	end = start + len - 1;
+	/*
+	 * Assume we're allocating out of the middle of a free extent.
+	 * We need to find the beginning and end of the extent so we can
+	 * properly update the summary.
+	 */
+	error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Find the next allocated block (end of free extent).
+	 */
+	error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+		&postblock);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Decrement the summary information corresponding to the entire
+	 * (old) free extent.
+	 */
+	error = xfs_rtmodify_summary(mp, tp,
+		XFS_RTBLOCKLOG(postblock + 1 - preblock),
+		XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+	if (error) {
+		return error;
+	}
+	/*
+	 * If there are blocks not being allocated at the front of the
+	 * old extent, add summary data for them to be free.
+	 */
+	if (preblock < start) {
+		error = xfs_rtmodify_summary(mp, tp,
+			XFS_RTBLOCKLOG(start - preblock),
+			XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+	}
+	/*
+	 * If there are blocks not being allocated at the end of the
+	 * old extent, add summary data for them to be free.
+	 */
+	if (postblock > end) {
+		error = xfs_rtmodify_summary(mp, tp,
+			XFS_RTBLOCKLOG(postblock - end),
+			XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+	}
+	/*
+	 * Modify the bitmap to mark this extent allocated.
+	 */
+	error = xfs_rtmodify_range(mp, tp, start, len, 0);
 	return error;
 }
 
@@ -721,1112 +809,126 @@
 }
 
 /*
- * Mark an extent specified by start and len allocated.
- * Updates all the summary information as well as the bitmap.
+ * Allocate space to the bitmap or summary file, and zero it, for growfs.
  */
 STATIC int				/* error */
-xfs_rtallocate_range(
+xfs_growfs_rt_alloc(
 	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	start,		/* start block to allocate */
-	xfs_extlen_t	len,		/* length to allocate */
-	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
-	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
+	xfs_extlen_t	oblocks,	/* old count of blocks */
+	xfs_extlen_t	nblocks,	/* new count of blocks */
+	xfs_inode_t	*ip)		/* inode (bitmap/summary) */
 {
-	xfs_rtblock_t	end;		/* end of the allocated extent */
-	int		error;		/* error value */
-	xfs_rtblock_t	postblock = 0;	/* first block allocated > end */
-	xfs_rtblock_t	preblock = 0;	/* first block allocated < start */
-
-	end = start + len - 1;
-	/*
-	 * Assume we're allocating out of the middle of a free extent.
-	 * We need to find the beginning and end of the extent so we can
-	 * properly update the summary.
-	 */
-	error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
-	if (error) {
-		return error;
-	}
-	/*
-	 * Find the next allocated block (end of free extent).
-	 */
-	error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
-		&postblock);
-	if (error) {
-		return error;
-	}
-	/*
-	 * Decrement the summary information corresponding to the entire
-	 * (old) free extent.
-	 */
-	error = xfs_rtmodify_summary(mp, tp,
-		XFS_RTBLOCKLOG(postblock + 1 - preblock),
-		XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
-	if (error) {
-		return error;
-	}
-	/*
-	 * If there are blocks not being allocated at the front of the
-	 * old extent, add summary data for them to be free.
-	 */
-	if (preblock < start) {
-		error = xfs_rtmodify_summary(mp, tp,
-			XFS_RTBLOCKLOG(start - preblock),
-			XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
-		if (error) {
-			return error;
-		}
-	}
-	/*
-	 * If there are blocks not being allocated at the end of the
-	 * old extent, add summary data for them to be free.
-	 */
-	if (postblock > end) {
-		error = xfs_rtmodify_summary(mp, tp,
-			XFS_RTBLOCKLOG(postblock - end),
-			XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
-		if (error) {
-			return error;
-		}
-	}
-	/*
-	 * Modify the bitmap to mark this extent allocated.
-	 */
-	error = xfs_rtmodify_range(mp, tp, start, len, 0);
-	return error;
-}
-
-/*
- * Return whether there are any free extents in the size range given
- * by low and high, for the bitmap block bbno.
- */
-STATIC int				/* error */
-xfs_rtany_summary(
-	xfs_mount_t	*mp,		/* file system mount structure */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	int		low,		/* low log2 extent size */
-	int		high,		/* high log2 extent size */
-	xfs_rtblock_t	bbno,		/* bitmap block number */
-	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
-	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
-	int		*stat)		/* out: any good extents here? */
-{
-	int		error;		/* error value */
-	int		log;		/* loop counter, log2 of ext. size */
-	xfs_suminfo_t	sum;		/* summary data */
-
-	/*
-	 * Loop over logs of extent sizes.  Order is irrelevant.
-	 */
-	for (log = low; log <= high; log++) {
-		/*
-		 * Get one summary datum.
-		 */
-		error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
-		if (error) {
-			return error;
-		}
-		/*
-		 * If there are any, return success.
-		 */
-		if (sum) {
-			*stat = 1;
-			return 0;
-		}
-	}
-	/*
-	 * Found nothing, return failure.
-	 */
-	*stat = 0;
-	return 0;
-}
-
-/*
- * Get a buffer for the bitmap or summary file block specified.
- * The buffer is returned read and locked.
- */
-STATIC int				/* error */
-xfs_rtbuf_get(
-	xfs_mount_t	*mp,		/* file system mount structure */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	block,		/* block number in bitmap or summary */
-	int		issum,		/* is summary not bitmap */
-	xfs_buf_t	**bpp)		/* output: buffer for the block */
-{
-	xfs_buf_t	*bp;		/* block buffer, result */
-	xfs_inode_t	*ip;		/* bitmap or summary inode */
-	xfs_bmbt_irec_t	map;
-	int		nmap = 1;
-	int		error;		/* error value */
-
-	ip = issum ? mp->m_rsumip : mp->m_rbmip;
-
-	error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
-	if (error)
-		return error;
-
-	ASSERT(map.br_startblock != NULLFSBLOCK);
-	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
-				   XFS_FSB_TO_DADDR(mp, map.br_startblock),
-				   mp->m_bsize, 0, &bp, NULL);
-	if (error)
-		return error;
-	ASSERT(!xfs_buf_geterror(bp));
-	*bpp = bp;
-	return 0;
-}
-
-#ifdef DEBUG
-/*
- * Check that the given extent (block range) is allocated already.
- */
-STATIC int				/* error */
-xfs_rtcheck_alloc_range(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	bno,		/* starting block number of extent */
-	xfs_extlen_t	len,		/* length of extent */
-	int		*stat)		/* out: 1 for allocated, 0 for not */
-{
-	xfs_rtblock_t	new;		/* dummy for xfs_rtcheck_range */
-
-	return xfs_rtcheck_range(mp, tp, bno, len, 0, &new, stat);
-}
-#endif
-
-/*
- * Check that the given range is either all allocated (val = 0) or
- * all free (val = 1).
- */
-STATIC int				/* error */
-xfs_rtcheck_range(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	start,		/* starting block number of extent */
-	xfs_extlen_t	len,		/* length of extent */
-	int		val,		/* 1 for free, 0 for allocated */
-	xfs_rtblock_t	*new,		/* out: first block not matching */
-	int		*stat)		/* out: 1 for matches, 0 for not */
-{
-	xfs_rtword_t	*b;		/* current word in buffer */
-	int		bit;		/* bit number in the word */
-	xfs_rtblock_t	block;		/* bitmap block number */
-	xfs_buf_t	*bp;		/* buf for the block */
-	xfs_rtword_t	*bufp;		/* starting word in buffer */
-	int		error;		/* error value */
-	xfs_rtblock_t	i;		/* current bit number rel. to start */
-	xfs_rtblock_t	lastbit;	/* last useful bit in word */
-	xfs_rtword_t	mask;		/* mask of relevant bits for value */
-	xfs_rtword_t	wdiff;		/* difference from wanted value */
-	int		word;		/* word number in the buffer */
-
-	/*
-	 * Compute starting bitmap block number
-	 */
-	block = XFS_BITTOBLOCK(mp, start);
-	/*
-	 * Read the bitmap block.
-	 */
-	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-	if (error) {
-		return error;
-	}
-	bufp = bp->b_addr;
-	/*
-	 * Compute the starting word's address, and starting bit.
-	 */
-	word = XFS_BITTOWORD(mp, start);
-	b = &bufp[word];
-	bit = (int)(start & (XFS_NBWORD - 1));
-	/*
-	 * 0 (allocated) => all zero's; 1 (free) => all one's.
-	 */
-	val = -val;
-	/*
-	 * If not starting on a word boundary, deal with the first
-	 * (partial) word.
-	 */
-	if (bit) {
-		/*
-		 * Compute first bit not examined.
-		 */
-		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-		/*
-		 * Mask of relevant bits.
-		 */
-		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-		/*
-		 * Compute difference between actual and desired value.
-		 */
-		if ((wdiff = (*b ^ val) & mask)) {
-			/*
-			 * Different, compute first wrong bit and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i = XFS_RTLOBIT(wdiff) - bit;
-			*new = start + i;
-			*stat = 0;
-			return 0;
-		}
-		i = lastbit - bit;
-		/*
-		 * Go on to next block if that's where the next word is
-		 * and we need the next word.
-		 */
-		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-			/*
-			 * If done with this block, get the next one.
-			 */
-			xfs_trans_brelse(tp, bp);
-			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-			if (error) {
-				return error;
-			}
-			b = bufp = bp->b_addr;
-			word = 0;
-		} else {
-			/*
-			 * Go on to the next word in the buffer.
-			 */
-			b++;
-		}
-	} else {
-		/*
-		 * Starting on a word boundary, no partial word.
-		 */
-		i = 0;
-	}
-	/*
-	 * Loop over whole words in buffers.  When we use up one buffer
-	 * we move on to the next one.
-	 */
-	while (len - i >= XFS_NBWORD) {
-		/*
-		 * Compute difference between actual and desired value.
-		 */
-		if ((wdiff = *b ^ val)) {
-			/*
-			 * Different, compute first wrong bit and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i += XFS_RTLOBIT(wdiff);
-			*new = start + i;
-			*stat = 0;
-			return 0;
-		}
-		i += XFS_NBWORD;
-		/*
-		 * Go on to next block if that's where the next word is
-		 * and we need the next word.
-		 */
-		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-			/*
-			 * If done with this block, get the next one.
-			 */
-			xfs_trans_brelse(tp, bp);
-			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-			if (error) {
-				return error;
-			}
-			b = bufp = bp->b_addr;
-			word = 0;
-		} else {
-			/*
-			 * Go on to the next word in the buffer.
-			 */
-			b++;
-		}
-	}
-	/*
-	 * If not ending on a word boundary, deal with the last
-	 * (partial) word.
-	 */
-	if ((lastbit = len - i)) {
-		/*
-		 * Mask of relevant bits.
-		 */
-		mask = ((xfs_rtword_t)1 << lastbit) - 1;
-		/*
-		 * Compute difference between actual and desired value.
-		 */
-		if ((wdiff = (*b ^ val) & mask)) {
-			/*
-			 * Different, compute first wrong bit and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i += XFS_RTLOBIT(wdiff);
-			*new = start + i;
-			*stat = 0;
-			return 0;
-		} else
-			i = len;
-	}
-	/*
-	 * Successful, return.
-	 */
-	xfs_trans_brelse(tp, bp);
-	*new = start + i;
-	*stat = 1;
-	return 0;
-}
-
-/*
- * Copy and transform the summary file, given the old and new
- * parameters in the mount structures.
- */
-STATIC int				/* error */
-xfs_rtcopy_summary(
-	xfs_mount_t	*omp,		/* old file system mount point */
-	xfs_mount_t	*nmp,		/* new file system mount point */
-	xfs_trans_t	*tp)		/* transaction pointer */
-{
-	xfs_rtblock_t	bbno;		/* bitmap block number */
-	xfs_buf_t	*bp;		/* summary buffer */
+	xfs_fileoff_t	bno;		/* block number in file */
+	xfs_buf_t	*bp;		/* temporary buffer for zeroing */
+	int		committed;	/* transaction committed flag */
+	xfs_daddr_t	d;		/* disk block address */
 	int		error;		/* error return value */
-	int		log;		/* summary level number (log length) */
-	xfs_suminfo_t	sum;		/* summary data */
-	xfs_fsblock_t	sumbno;		/* summary block number */
+	xfs_fsblock_t	firstblock;	/* first block allocated in xaction */
+	xfs_bmap_free_t	flist;		/* list of freed blocks */
+	xfs_fsblock_t	fsbno;		/* filesystem block for bno */
+	xfs_bmbt_irec_t	map;		/* block map output */
+	int		nmap;		/* number of block maps */
+	int		resblks;	/* space reservation */
 
-	bp = NULL;
-	for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
-		for (bbno = omp->m_sb.sb_rbmblocks - 1;
-		     (xfs_srtblock_t)bbno >= 0;
-		     bbno--) {
-			error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
-				&sumbno, &sum);
+	/*
+	 * Allocate space to the file, as necessary.
+	 */
+	while (oblocks < nblocks) {
+		int		cancelflags = 0;
+		xfs_trans_t	*tp;
+
+		tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
+		resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
+		/*
+		 * Reserve space & log for one extent added to the file.
+		 */
+		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
+					  resblks, 0);
+		if (error)
+			goto error_cancel;
+		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
+		/*
+		 * Lock the inode.
+		 */
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+		xfs_bmap_init(&flist, &firstblock);
+		/*
+		 * Allocate blocks to the bitmap file.
+		 */
+		nmap = 1;
+		cancelflags |= XFS_TRANS_ABORT;
+		error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
+					XFS_BMAPI_METADATA, &firstblock,
+					resblks, &map, &nmap, &flist);
+		if (!error && nmap < 1)
+			error = XFS_ERROR(ENOSPC);
+		if (error)
+			goto error_cancel;
+		/*
+		 * Free any blocks freed up in the transaction, then commit.
+		 */
+		error = xfs_bmap_finish(&tp, &flist, &committed);
+		if (error)
+			goto error_cancel;
+		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+		if (error)
+			goto error;
+		/*
+		 * Now we need to clear the allocated blocks.
+		 * Do this one block per transaction, to keep it simple.
+		 */
+		cancelflags = 0;
+		for (bno = map.br_startoff, fsbno = map.br_startblock;
+		     bno < map.br_startoff + map.br_blockcount;
+		     bno++, fsbno++) {
+			tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
+			/*
+			 * Reserve log for one block zeroing.
+			 */
+			error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
+						  0, 0);
 			if (error)
-				return error;
-			if (sum == 0)
-				continue;
-			error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
-				&bp, &sumbno);
+				goto error_cancel;
+			/*
+			 * Lock the bitmap inode.
+			 */
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+			xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+			/*
+			 * Get a buffer for the block.
+			 */
+			d = XFS_FSB_TO_DADDR(mp, fsbno);
+			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+				mp->m_bsize, 0);
+			if (bp == NULL) {
+				error = XFS_ERROR(EIO);
+error_cancel:
+				xfs_trans_cancel(tp, cancelflags);
+				goto error;
+			}
+			memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
+			xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
+			/*
+			 * Commit the transaction.
+			 */
+			error = xfs_trans_commit(tp, 0);
 			if (error)
-				return error;
-			error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
-				&bp, &sumbno);
-			if (error)
-				return error;
-			ASSERT(sum > 0);
+				goto error;
 		}
+		/*
+		 * Go on to the next extent, if any.
+		 */
+		oblocks = map.br_startoff + map.br_blockcount;
 	}
 	return 0;
-}
 
-/*
- * Searching backward from start to limit, find the first block whose
- * allocated/free state is different from start's.
- */
-STATIC int				/* error */
-xfs_rtfind_back(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	start,		/* starting block to look at */
-	xfs_rtblock_t	limit,		/* last block to look at */
-	xfs_rtblock_t	*rtblock)	/* out: start block found */
-{
-	xfs_rtword_t	*b;		/* current word in buffer */
-	int		bit;		/* bit number in the word */
-	xfs_rtblock_t	block;		/* bitmap block number */
-	xfs_buf_t	*bp;		/* buf for the block */
-	xfs_rtword_t	*bufp;		/* starting word in buffer */
-	int		error;		/* error value */
-	xfs_rtblock_t	firstbit;	/* first useful bit in the word */
-	xfs_rtblock_t	i;		/* current bit number rel. to start */
-	xfs_rtblock_t	len;		/* length of inspected area */
-	xfs_rtword_t	mask;		/* mask of relevant bits for value */
-	xfs_rtword_t	want;		/* mask for "good" values */
-	xfs_rtword_t	wdiff;		/* difference from wanted value */
-	int		word;		/* word number in the buffer */
-
-	/*
-	 * Compute and read in starting bitmap block for starting block.
-	 */
-	block = XFS_BITTOBLOCK(mp, start);
-	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-	if (error) {
-		return error;
-	}
-	bufp = bp->b_addr;
-	/*
-	 * Get the first word's index & point to it.
-	 */
-	word = XFS_BITTOWORD(mp, start);
-	b = &bufp[word];
-	bit = (int)(start & (XFS_NBWORD - 1));
-	len = start - limit + 1;
-	/*
-	 * Compute match value, based on the bit at start: if 1 (free)
-	 * then all-ones, else all-zeroes.
-	 */
-	want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
-	/*
-	 * If the starting position is not word-aligned, deal with the
-	 * partial word.
-	 */
-	if (bit < XFS_NBWORD - 1) {
-		/*
-		 * Calculate first (leftmost) bit number to look at,
-		 * and mask for all the relevant bits in this word.
-		 */
-		firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
-		mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
-			firstbit;
-		/*
-		 * Calculate the difference between the value there
-		 * and what we're looking for.
-		 */
-		if ((wdiff = (*b ^ want) & mask)) {
-			/*
-			 * Different.  Mark where we are and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i = bit - XFS_RTHIBIT(wdiff);
-			*rtblock = start - i + 1;
-			return 0;
-		}
-		i = bit - firstbit + 1;
-		/*
-		 * Go on to previous block if that's where the previous word is
-		 * and we need the previous word.
-		 */
-		if (--word == -1 && i < len) {
-			/*
-			 * If done with this block, get the previous one.
-			 */
-			xfs_trans_brelse(tp, bp);
-			error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
-			if (error) {
-				return error;
-			}
-			bufp = bp->b_addr;
-			word = XFS_BLOCKWMASK(mp);
-			b = &bufp[word];
-		} else {
-			/*
-			 * Go on to the previous word in the buffer.
-			 */
-			b--;
-		}
-	} else {
-		/*
-		 * Starting on a word boundary, no partial word.
-		 */
-		i = 0;
-	}
-	/*
-	 * Loop over whole words in buffers.  When we use up one buffer
-	 * we move on to the previous one.
-	 */
-	while (len - i >= XFS_NBWORD) {
-		/*
-		 * Compute difference between actual and desired value.
-		 */
-		if ((wdiff = *b ^ want)) {
-			/*
-			 * Different, mark where we are and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
-			*rtblock = start - i + 1;
-			return 0;
-		}
-		i += XFS_NBWORD;
-		/*
-		 * Go on to previous block if that's where the previous word is
-		 * and we need the previous word.
-		 */
-		if (--word == -1 && i < len) {
-			/*
-			 * If done with this block, get the previous one.
-			 */
-			xfs_trans_brelse(tp, bp);
-			error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
-			if (error) {
-				return error;
-			}
-			bufp = bp->b_addr;
-			word = XFS_BLOCKWMASK(mp);
-			b = &bufp[word];
-		} else {
-			/*
-			 * Go on to the previous word in the buffer.
-			 */
-			b--;
-		}
-	}
-	/*
-	 * If not ending on a word boundary, deal with the last
-	 * (partial) word.
-	 */
-	if (len - i) {
-		/*
-		 * Calculate first (leftmost) bit number to look at,
-		 * and mask for all the relevant bits in this word.
-		 */
-		firstbit = XFS_NBWORD - (len - i);
-		mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
-		/*
-		 * Compute difference between actual and desired value.
-		 */
-		if ((wdiff = (*b ^ want) & mask)) {
-			/*
-			 * Different, mark where we are and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
-			*rtblock = start - i + 1;
-			return 0;
-		} else
-			i = len;
-	}
-	/*
-	 * No match, return that we scanned the whole area.
-	 */
-	xfs_trans_brelse(tp, bp);
-	*rtblock = start - i + 1;
-	return 0;
-}
-
-/*
- * Searching forward from start to limit, find the first block whose
- * allocated/free state is different from start's.
- */
-STATIC int				/* error */
-xfs_rtfind_forw(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	start,		/* starting block to look at */
-	xfs_rtblock_t	limit,		/* last block to look at */
-	xfs_rtblock_t	*rtblock)	/* out: start block found */
-{
-	xfs_rtword_t	*b;		/* current word in buffer */
-	int		bit;		/* bit number in the word */
-	xfs_rtblock_t	block;		/* bitmap block number */
-	xfs_buf_t	*bp;		/* buf for the block */
-	xfs_rtword_t	*bufp;		/* starting word in buffer */
-	int		error;		/* error value */
-	xfs_rtblock_t	i;		/* current bit number rel. to start */
-	xfs_rtblock_t	lastbit;	/* last useful bit in the word */
-	xfs_rtblock_t	len;		/* length of inspected area */
-	xfs_rtword_t	mask;		/* mask of relevant bits for value */
-	xfs_rtword_t	want;		/* mask for "good" values */
-	xfs_rtword_t	wdiff;		/* difference from wanted value */
-	int		word;		/* word number in the buffer */
-
-	/*
-	 * Compute and read in starting bitmap block for starting block.
-	 */
-	block = XFS_BITTOBLOCK(mp, start);
-	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-	if (error) {
-		return error;
-	}
-	bufp = bp->b_addr;
-	/*
-	 * Get the first word's index & point to it.
-	 */
-	word = XFS_BITTOWORD(mp, start);
-	b = &bufp[word];
-	bit = (int)(start & (XFS_NBWORD - 1));
-	len = limit - start + 1;
-	/*
-	 * Compute match value, based on the bit at start: if 1 (free)
-	 * then all-ones, else all-zeroes.
-	 */
-	want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
-	/*
-	 * If the starting position is not word-aligned, deal with the
-	 * partial word.
-	 */
-	if (bit) {
-		/*
-		 * Calculate last (rightmost) bit number to look at,
-		 * and mask for all the relevant bits in this word.
-		 */
-		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-		/*
-		 * Calculate the difference between the value there
-		 * and what we're looking for.
-		 */
-		if ((wdiff = (*b ^ want) & mask)) {
-			/*
-			 * Different.  Mark where we are and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i = XFS_RTLOBIT(wdiff) - bit;
-			*rtblock = start + i - 1;
-			return 0;
-		}
-		i = lastbit - bit;
-		/*
-		 * Go on to next block if that's where the next word is
-		 * and we need the next word.
-		 */
-		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-			/*
-			 * If done with this block, get the previous one.
-			 */
-			xfs_trans_brelse(tp, bp);
-			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-			if (error) {
-				return error;
-			}
-			b = bufp = bp->b_addr;
-			word = 0;
-		} else {
-			/*
-			 * Go on to the previous word in the buffer.
-			 */
-			b++;
-		}
-	} else {
-		/*
-		 * Starting on a word boundary, no partial word.
-		 */
-		i = 0;
-	}
-	/*
-	 * Loop over whole words in buffers.  When we use up one buffer
-	 * we move on to the next one.
-	 */
-	while (len - i >= XFS_NBWORD) {
-		/*
-		 * Compute difference between actual and desired value.
-		 */
-		if ((wdiff = *b ^ want)) {
-			/*
-			 * Different, mark where we are and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i += XFS_RTLOBIT(wdiff);
-			*rtblock = start + i - 1;
-			return 0;
-		}
-		i += XFS_NBWORD;
-		/*
-		 * Go on to next block if that's where the next word is
-		 * and we need the next word.
-		 */
-		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-			/*
-			 * If done with this block, get the next one.
-			 */
-			xfs_trans_brelse(tp, bp);
-			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-			if (error) {
-				return error;
-			}
-			b = bufp = bp->b_addr;
-			word = 0;
-		} else {
-			/*
-			 * Go on to the next word in the buffer.
-			 */
-			b++;
-		}
-	}
-	/*
-	 * If not ending on a word boundary, deal with the last
-	 * (partial) word.
-	 */
-	if ((lastbit = len - i)) {
-		/*
-		 * Calculate mask for all the relevant bits in this word.
-		 */
-		mask = ((xfs_rtword_t)1 << lastbit) - 1;
-		/*
-		 * Compute difference between actual and desired value.
-		 */
-		if ((wdiff = (*b ^ want) & mask)) {
-			/*
-			 * Different, mark where we are and return.
-			 */
-			xfs_trans_brelse(tp, bp);
-			i += XFS_RTLOBIT(wdiff);
-			*rtblock = start + i - 1;
-			return 0;
-		} else
-			i = len;
-	}
-	/*
-	 * No match, return that we scanned the whole area.
-	 */
-	xfs_trans_brelse(tp, bp);
-	*rtblock = start + i - 1;
-	return 0;
-}
-
-/*
- * Mark an extent specified by start and len freed.
- * Updates all the summary information as well as the bitmap.
- */
-STATIC int				/* error */
-xfs_rtfree_range(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	start,		/* starting block to free */
-	xfs_extlen_t	len,		/* length to free */
-	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
-	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
-{
-	xfs_rtblock_t	end;		/* end of the freed extent */
-	int		error;		/* error value */
-	xfs_rtblock_t	postblock;	/* first block freed > end */
-	xfs_rtblock_t	preblock;	/* first block freed < start */
-
-	end = start + len - 1;
-	/*
-	 * Modify the bitmap to mark this extent freed.
-	 */
-	error = xfs_rtmodify_range(mp, tp, start, len, 1);
-	if (error) {
-		return error;
-	}
-	/*
-	 * Assume we're freeing out of the middle of an allocated extent.
-	 * We need to find the beginning and end of the extent so we can
-	 * properly update the summary.
-	 */
-	error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
-	if (error) {
-		return error;
-	}
-	/*
-	 * Find the next allocated block (end of allocated extent).
-	 */
-	error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
-		&postblock);
-	if (error)
-		return error;
-	/*
-	 * If there are blocks not being freed at the front of the
-	 * old extent, add summary data for them to be allocated.
-	 */
-	if (preblock < start) {
-		error = xfs_rtmodify_summary(mp, tp,
-			XFS_RTBLOCKLOG(start - preblock),
-			XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
-		if (error) {
-			return error;
-		}
-	}
-	/*
-	 * If there are blocks not being freed at the end of the
-	 * old extent, add summary data for them to be allocated.
-	 */
-	if (postblock > end) {
-		error = xfs_rtmodify_summary(mp, tp,
-			XFS_RTBLOCKLOG(postblock - end),
-			XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
-		if (error) {
-			return error;
-		}
-	}
-	/*
-	 * Increment the summary information corresponding to the entire
-	 * (new) free extent.
-	 */
-	error = xfs_rtmodify_summary(mp, tp,
-		XFS_RTBLOCKLOG(postblock + 1 - preblock),
-		XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+error:
 	return error;
 }
 
 /*
- * Read and return the summary information for a given extent size,
- * bitmap block combination.
- * Keeps track of a current summary block, so we don't keep reading
- * it from the buffer cache.
- */
-STATIC int				/* error */
-xfs_rtget_summary(
-	xfs_mount_t	*mp,		/* file system mount structure */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	int		log,		/* log2 of extent size */
-	xfs_rtblock_t	bbno,		/* bitmap block number */
-	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
-	xfs_fsblock_t	*rsb,		/* in/out: summary block number */
-	xfs_suminfo_t	*sum)		/* out: summary info for this block */
-{
-	xfs_buf_t	*bp;		/* buffer for summary block */
-	int		error;		/* error value */
-	xfs_fsblock_t	sb;		/* summary fsblock */
-	int		so;		/* index into the summary file */
-	xfs_suminfo_t	*sp;		/* pointer to returned data */
-
-	/*
-	 * Compute entry number in the summary file.
-	 */
-	so = XFS_SUMOFFS(mp, log, bbno);
-	/*
-	 * Compute the block number in the summary file.
-	 */
-	sb = XFS_SUMOFFSTOBLOCK(mp, so);
-	/*
-	 * If we have an old buffer, and the block number matches, use that.
-	 */
-	if (rbpp && *rbpp && *rsb == sb)
-		bp = *rbpp;
-	/*
-	 * Otherwise we have to get the buffer.
-	 */
-	else {
-		/*
-		 * If there was an old one, get rid of it first.
-		 */
-		if (rbpp && *rbpp)
-			xfs_trans_brelse(tp, *rbpp);
-		error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
-		if (error) {
-			return error;
-		}
-		/*
-		 * Remember this buffer and block for the next call.
-		 */
-		if (rbpp) {
-			*rbpp = bp;
-			*rsb = sb;
-		}
-	}
-	/*
-	 * Point to the summary information & copy it out.
-	 */
-	sp = XFS_SUMPTR(mp, bp, so);
-	*sum = *sp;
-	/*
-	 * Drop the buffer if we're not asked to remember it.
-	 */
-	if (!rbpp)
-		xfs_trans_brelse(tp, bp);
-	return 0;
-}
-
-/*
- * Set the given range of bitmap bits to the given value.
- * Do whatever I/O and logging is required.
- */
-STATIC int				/* error */
-xfs_rtmodify_range(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	start,		/* starting block to modify */
-	xfs_extlen_t	len,		/* length of extent to modify */
-	int		val)		/* 1 for free, 0 for allocated */
-{
-	xfs_rtword_t	*b;		/* current word in buffer */
-	int		bit;		/* bit number in the word */
-	xfs_rtblock_t	block;		/* bitmap block number */
-	xfs_buf_t	*bp;		/* buf for the block */
-	xfs_rtword_t	*bufp;		/* starting word in buffer */
-	int		error;		/* error value */
-	xfs_rtword_t	*first;		/* first used word in the buffer */
-	int		i;		/* current bit number rel. to start */
-	int		lastbit;	/* last useful bit in word */
-	xfs_rtword_t	mask;		/* mask o frelevant bits for value */
-	int		word;		/* word number in the buffer */
-
-	/*
-	 * Compute starting bitmap block number.
-	 */
-	block = XFS_BITTOBLOCK(mp, start);
-	/*
-	 * Read the bitmap block, and point to its data.
-	 */
-	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-	if (error) {
-		return error;
-	}
-	bufp = bp->b_addr;
-	/*
-	 * Compute the starting word's address, and starting bit.
-	 */
-	word = XFS_BITTOWORD(mp, start);
-	first = b = &bufp[word];
-	bit = (int)(start & (XFS_NBWORD - 1));
-	/*
-	 * 0 (allocated) => all zeroes; 1 (free) => all ones.
-	 */
-	val = -val;
-	/*
-	 * If not starting on a word boundary, deal with the first
-	 * (partial) word.
-	 */
-	if (bit) {
-		/*
-		 * Compute first bit not changed and mask of relevant bits.
-		 */
-		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-		/*
-		 * Set/clear the active bits.
-		 */
-		if (val)
-			*b |= mask;
-		else
-			*b &= ~mask;
-		i = lastbit - bit;
-		/*
-		 * Go on to the next block if that's where the next word is
-		 * and we need the next word.
-		 */
-		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-			/*
-			 * Log the changed part of this block.
-			 * Get the next one.
-			 */
-			xfs_trans_log_buf(tp, bp,
-				(uint)((char *)first - (char *)bufp),
-				(uint)((char *)b - (char *)bufp));
-			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-			if (error) {
-				return error;
-			}
-			first = b = bufp = bp->b_addr;
-			word = 0;
-		} else {
-			/*
-			 * Go on to the next word in the buffer
-			 */
-			b++;
-		}
-	} else {
-		/*
-		 * Starting on a word boundary, no partial word.
-		 */
-		i = 0;
-	}
-	/*
-	 * Loop over whole words in buffers.  When we use up one buffer
-	 * we move on to the next one.
-	 */
-	while (len - i >= XFS_NBWORD) {
-		/*
-		 * Set the word value correctly.
-		 */
-		*b = val;
-		i += XFS_NBWORD;
-		/*
-		 * Go on to the next block if that's where the next word is
-		 * and we need the next word.
-		 */
-		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-			/*
-			 * Log the changed part of this block.
-			 * Get the next one.
-			 */
-			xfs_trans_log_buf(tp, bp,
-				(uint)((char *)first - (char *)bufp),
-				(uint)((char *)b - (char *)bufp));
-			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-			if (error) {
-				return error;
-			}
-			first = b = bufp = bp->b_addr;
-			word = 0;
-		} else {
-			/*
-			 * Go on to the next word in the buffer
-			 */
-			b++;
-		}
-	}
-	/*
-	 * If not ending on a word boundary, deal with the last
-	 * (partial) word.
-	 */
-	if ((lastbit = len - i)) {
-		/*
-		 * Compute a mask of relevant bits.
-		 */
-		bit = 0;
-		mask = ((xfs_rtword_t)1 << lastbit) - 1;
-		/*
-		 * Set/clear the active bits.
-		 */
-		if (val)
-			*b |= mask;
-		else
-			*b &= ~mask;
-		b++;
-	}
-	/*
-	 * Log any remaining changed bytes.
-	 */
-	if (b > first)
-		xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
-			(uint)((char *)b - (char *)bufp - 1));
-	return 0;
-}
-
-/*
- * Read and modify the summary information for a given extent size,
- * bitmap block combination.
- * Keeps track of a current summary block, so we don't keep reading
- * it from the buffer cache.
- */
-STATIC int				/* error */
-xfs_rtmodify_summary(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	int		log,		/* log2 of extent size */
-	xfs_rtblock_t	bbno,		/* bitmap block number */
-	int		delta,		/* change to make to summary info */
-	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
-	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
-{
-	xfs_buf_t	*bp;		/* buffer for the summary block */
-	int		error;		/* error value */
-	xfs_fsblock_t	sb;		/* summary fsblock */
-	int		so;		/* index into the summary file */
-	xfs_suminfo_t	*sp;		/* pointer to returned data */
-
-	/*
-	 * Compute entry number in the summary file.
-	 */
-	so = XFS_SUMOFFS(mp, log, bbno);
-	/*
-	 * Compute the block number in the summary file.
-	 */
-	sb = XFS_SUMOFFSTOBLOCK(mp, so);
-	/*
-	 * If we have an old buffer, and the block number matches, use that.
-	 */
-	if (rbpp && *rbpp && *rsb == sb)
-		bp = *rbpp;
-	/*
-	 * Otherwise we have to get the buffer.
-	 */
-	else {
-		/*
-		 * If there was an old one, get rid of it first.
-		 */
-		if (rbpp && *rbpp)
-			xfs_trans_brelse(tp, *rbpp);
-		error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
-		if (error) {
-			return error;
-		}
-		/*
-		 * Remember this buffer and block for the next call.
-		 */
-		if (rbpp) {
-			*rbpp = bp;
-			*rsb = sb;
-		}
-	}
-	/*
-	 * Point to the summary information, modify and log it.
-	 */
-	sp = XFS_SUMPTR(mp, bp, so);
-	*sp += delta;
-	xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
-		(uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
-	return 0;
-}
-
-/*
  * Visible (exported) functions.
  */
 
@@ -2129,66 +1231,6 @@
 }
 
 /*
- * Free an extent in the realtime subvolume.  Length is expressed in
- * realtime extents, as is the block number.
- */
-int					/* error */
-xfs_rtfree_extent(
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_rtblock_t	bno,		/* starting block number to free */
-	xfs_extlen_t	len)		/* length of extent freed */
-{
-	int		error;		/* error value */
-	xfs_mount_t	*mp;		/* file system mount structure */
-	xfs_fsblock_t	sb;		/* summary file block number */
-	xfs_buf_t	*sumbp;		/* summary file block buffer */
-
-	mp = tp->t_mountp;
-
-	ASSERT(mp->m_rbmip->i_itemp != NULL);
-	ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
-
-#ifdef DEBUG
-	/*
-	 * Check to see that this whole range is currently allocated.
-	 */
-	{
-		int	stat;		/* result from checking range */
-
-		error = xfs_rtcheck_alloc_range(mp, tp, bno, len, &stat);
-		if (error) {
-			return error;
-		}
-		ASSERT(stat);
-	}
-#endif
-	sumbp = NULL;
-	/*
-	 * Free the range of realtime blocks.
-	 */
-	error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
-	if (error) {
-		return error;
-	}
-	/*
-	 * Mark more blocks free in the superblock.
-	 */
-	xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
-	/*
-	 * If we've now freed all the blocks, reset the file sequence
-	 * number to 0.
-	 */
-	if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
-	    mp->m_sb.sb_rextents) {
-		if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
-			mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
-		*(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
-		xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
-	}
-	return 0;
-}
-
-/*
  * Initialize realtime fields in the mount structure.
  */
 int				/* error */
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index b2a1a24..752b63d 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -95,6 +95,30 @@
 	struct xfs_mount	*mp,	/* file system mount structure */
 	xfs_growfs_rt_t		*in);	/* user supplied growfs struct */
 
+/*
+ * From xfs_rtbitmap.c
+ */
+int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
+		  xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
+int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
+		      xfs_rtblock_t start, xfs_extlen_t len, int val,
+		      xfs_rtblock_t *new, int *stat);
+int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
+		    xfs_rtblock_t start, xfs_rtblock_t limit,
+		    xfs_rtblock_t *rtblock);
+int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
+		    xfs_rtblock_t start, xfs_rtblock_t limit,
+		    xfs_rtblock_t *rtblock);
+int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
+		       xfs_rtblock_t start, xfs_extlen_t len, int val);
+int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
+			 xfs_rtblock_t bbno, int delta, xfs_buf_t **rbpp,
+			 xfs_fsblock_t *rsb);
+int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
+		     xfs_rtblock_t start, xfs_extlen_t len,
+		     struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
+
+
 #else
 # define xfs_rtallocate_extent(t,b,min,max,l,a,f,p,rb)  (ENOSYS)
 # define xfs_rtfree_extent(t,b,l)                       (ENOSYS)
diff --git a/fs/xfs/xfs_rtbitmap.c b/fs/xfs/xfs_rtbitmap.c
new file mode 100644
index 0000000..b1f2fe8
--- /dev/null
+++ b/fs/xfs/xfs_rtbitmap.c
@@ -0,0 +1,974 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_trans.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_buf.h"
+#include "xfs_icache.h"
+#include "xfs_dinode.h"
+#include "xfs_rtalloc.h"
+
+
+/*
+ * Realtime allocator bitmap functions shared with userspace.
+ */
+
+/*
+ * Get a buffer for the bitmap or summary file block specified.
+ * The buffer is returned read and locked.
+ */
+int
+xfs_rtbuf_get(
+	xfs_mount_t	*mp,		/* file system mount structure */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	block,		/* block number in bitmap or summary */
+	int		issum,		/* is summary not bitmap */
+	xfs_buf_t	**bpp)		/* output: buffer for the block */
+{
+	xfs_buf_t	*bp;		/* block buffer, result */
+	xfs_inode_t	*ip;		/* bitmap or summary inode */
+	xfs_bmbt_irec_t	map;
+	int		nmap = 1;
+	int		error;		/* error value */
+
+	ip = issum ? mp->m_rsumip : mp->m_rbmip;
+
+	error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
+	if (error)
+		return error;
+
+	ASSERT(map.br_startblock != NULLFSBLOCK);
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+				   XFS_FSB_TO_DADDR(mp, map.br_startblock),
+				   mp->m_bsize, 0, &bp, NULL);
+	if (error)
+		return error;
+	ASSERT(!xfs_buf_geterror(bp));
+	*bpp = bp;
+	return 0;
+}
+
+/*
+ * Searching backward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+int
+xfs_rtfind_back(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to look at */
+	xfs_rtblock_t	limit,		/* last block to look at */
+	xfs_rtblock_t	*rtblock)	/* out: start block found */
+{
+	xfs_rtword_t	*b;		/* current word in buffer */
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* starting word in buffer */
+	int		error;		/* error value */
+	xfs_rtblock_t	firstbit;	/* first useful bit in the word */
+	xfs_rtblock_t	i;		/* current bit number rel. to start */
+	xfs_rtblock_t	len;		/* length of inspected area */
+	xfs_rtword_t	mask;		/* mask of relevant bits for value */
+	xfs_rtword_t	want;		/* mask for "good" values */
+	xfs_rtword_t	wdiff;		/* difference from wanted value */
+	int		word;		/* word number in the buffer */
+
+	/*
+	 * Compute and read in starting bitmap block for starting block.
+	 */
+	block = XFS_BITTOBLOCK(mp, start);
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	if (error) {
+		return error;
+	}
+	bufp = bp->b_addr;
+	/*
+	 * Get the first word's index & point to it.
+	 */
+	word = XFS_BITTOWORD(mp, start);
+	b = &bufp[word];
+	bit = (int)(start & (XFS_NBWORD - 1));
+	len = start - limit + 1;
+	/*
+	 * Compute match value, based on the bit at start: if 1 (free)
+	 * then all-ones, else all-zeroes.
+	 */
+	want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+	/*
+	 * If the starting position is not word-aligned, deal with the
+	 * partial word.
+	 */
+	if (bit < XFS_NBWORD - 1) {
+		/*
+		 * Calculate first (leftmost) bit number to look at,
+		 * and mask for all the relevant bits in this word.
+		 */
+		firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
+		mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
+			firstbit;
+		/*
+		 * Calculate the difference between the value there
+		 * and what we're looking for.
+		 */
+		if ((wdiff = (*b ^ want) & mask)) {
+			/*
+			 * Different.  Mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i = bit - XFS_RTHIBIT(wdiff);
+			*rtblock = start - i + 1;
+			return 0;
+		}
+		i = bit - firstbit + 1;
+		/*
+		 * Go on to previous block if that's where the previous word is
+		 * and we need the previous word.
+		 */
+		if (--word == -1 && i < len) {
+			/*
+			 * If done with this block, get the previous one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			bufp = bp->b_addr;
+			word = XFS_BLOCKWMASK(mp);
+			b = &bufp[word];
+		} else {
+			/*
+			 * Go on to the previous word in the buffer.
+			 */
+			b--;
+		}
+	} else {
+		/*
+		 * Starting on a word boundary, no partial word.
+		 */
+		i = 0;
+	}
+	/*
+	 * Loop over whole words in buffers.  When we use up one buffer
+	 * we move on to the previous one.
+	 */
+	while (len - i >= XFS_NBWORD) {
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = *b ^ want)) {
+			/*
+			 * Different, mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+			*rtblock = start - i + 1;
+			return 0;
+		}
+		i += XFS_NBWORD;
+		/*
+		 * Go on to previous block if that's where the previous word is
+		 * and we need the previous word.
+		 */
+		if (--word == -1 && i < len) {
+			/*
+			 * If done with this block, get the previous one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			bufp = bp->b_addr;
+			word = XFS_BLOCKWMASK(mp);
+			b = &bufp[word];
+		} else {
+			/*
+			 * Go on to the previous word in the buffer.
+			 */
+			b--;
+		}
+	}
+	/*
+	 * If not ending on a word boundary, deal with the last
+	 * (partial) word.
+	 */
+	if (len - i) {
+		/*
+		 * Calculate first (leftmost) bit number to look at,
+		 * and mask for all the relevant bits in this word.
+		 */
+		firstbit = XFS_NBWORD - (len - i);
+		mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = (*b ^ want) & mask)) {
+			/*
+			 * Different, mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+			*rtblock = start - i + 1;
+			return 0;
+		} else
+			i = len;
+	}
+	/*
+	 * No match, return that we scanned the whole area.
+	 */
+	xfs_trans_brelse(tp, bp);
+	*rtblock = start - i + 1;
+	return 0;
+}
+
+/*
+ * Searching forward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+int
+xfs_rtfind_forw(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to look at */
+	xfs_rtblock_t	limit,		/* last block to look at */
+	xfs_rtblock_t	*rtblock)	/* out: start block found */
+{
+	xfs_rtword_t	*b;		/* current word in buffer */
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* starting word in buffer */
+	int		error;		/* error value */
+	xfs_rtblock_t	i;		/* current bit number rel. to start */
+	xfs_rtblock_t	lastbit;	/* last useful bit in the word */
+	xfs_rtblock_t	len;		/* length of inspected area */
+	xfs_rtword_t	mask;		/* mask of relevant bits for value */
+	xfs_rtword_t	want;		/* mask for "good" values */
+	xfs_rtword_t	wdiff;		/* difference from wanted value */
+	int		word;		/* word number in the buffer */
+
+	/*
+	 * Compute and read in starting bitmap block for starting block.
+	 */
+	block = XFS_BITTOBLOCK(mp, start);
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	if (error) {
+		return error;
+	}
+	bufp = bp->b_addr;
+	/*
+	 * Get the first word's index & point to it.
+	 */
+	word = XFS_BITTOWORD(mp, start);
+	b = &bufp[word];
+	bit = (int)(start & (XFS_NBWORD - 1));
+	len = limit - start + 1;
+	/*
+	 * Compute match value, based on the bit at start: if 1 (free)
+	 * then all-ones, else all-zeroes.
+	 */
+	want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+	/*
+	 * If the starting position is not word-aligned, deal with the
+	 * partial word.
+	 */
+	if (bit) {
+		/*
+		 * Calculate last (rightmost) bit number to look at,
+		 * and mask for all the relevant bits in this word.
+		 */
+		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+		/*
+		 * Calculate the difference between the value there
+		 * and what we're looking for.
+		 */
+		if ((wdiff = (*b ^ want) & mask)) {
+			/*
+			 * Different.  Mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i = XFS_RTLOBIT(wdiff) - bit;
+			*rtblock = start + i - 1;
+			return 0;
+		}
+		i = lastbit - bit;
+		/*
+		 * Go on to next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * If done with this block, get the previous one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			b = bufp = bp->b_addr;
+			word = 0;
+		} else {
+			/*
+			 * Go on to the previous word in the buffer.
+			 */
+			b++;
+		}
+	} else {
+		/*
+		 * Starting on a word boundary, no partial word.
+		 */
+		i = 0;
+	}
+	/*
+	 * Loop over whole words in buffers.  When we use up one buffer
+	 * we move on to the next one.
+	 */
+	while (len - i >= XFS_NBWORD) {
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = *b ^ want)) {
+			/*
+			 * Different, mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_RTLOBIT(wdiff);
+			*rtblock = start + i - 1;
+			return 0;
+		}
+		i += XFS_NBWORD;
+		/*
+		 * Go on to next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * If done with this block, get the next one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			b = bufp = bp->b_addr;
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer.
+			 */
+			b++;
+		}
+	}
+	/*
+	 * If not ending on a word boundary, deal with the last
+	 * (partial) word.
+	 */
+	if ((lastbit = len - i)) {
+		/*
+		 * Calculate mask for all the relevant bits in this word.
+		 */
+		mask = ((xfs_rtword_t)1 << lastbit) - 1;
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = (*b ^ want) & mask)) {
+			/*
+			 * Different, mark where we are and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_RTLOBIT(wdiff);
+			*rtblock = start + i - 1;
+			return 0;
+		} else
+			i = len;
+	}
+	/*
+	 * No match, return that we scanned the whole area.
+	 */
+	xfs_trans_brelse(tp, bp);
+	*rtblock = start + i - 1;
+	return 0;
+}
+
+/*
+ * Read and modify the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
+ */
+int
+xfs_rtmodify_summary(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	int		log,		/* log2 of extent size */
+	xfs_rtblock_t	bbno,		/* bitmap block number */
+	int		delta,		/* change to make to summary info */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
+{
+	xfs_buf_t	*bp;		/* buffer for the summary block */
+	int		error;		/* error value */
+	xfs_fsblock_t	sb;		/* summary fsblock */
+	int		so;		/* index into the summary file */
+	xfs_suminfo_t	*sp;		/* pointer to returned data */
+
+	/*
+	 * Compute entry number in the summary file.
+	 */
+	so = XFS_SUMOFFS(mp, log, bbno);
+	/*
+	 * Compute the block number in the summary file.
+	 */
+	sb = XFS_SUMOFFSTOBLOCK(mp, so);
+	/*
+	 * If we have an old buffer, and the block number matches, use that.
+	 */
+	if (rbpp && *rbpp && *rsb == sb)
+		bp = *rbpp;
+	/*
+	 * Otherwise we have to get the buffer.
+	 */
+	else {
+		/*
+		 * If there was an old one, get rid of it first.
+		 */
+		if (rbpp && *rbpp)
+			xfs_trans_brelse(tp, *rbpp);
+		error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+		if (error) {
+			return error;
+		}
+		/*
+		 * Remember this buffer and block for the next call.
+		 */
+		if (rbpp) {
+			*rbpp = bp;
+			*rsb = sb;
+		}
+	}
+	/*
+	 * Point to the summary information, modify and log it.
+	 */
+	sp = XFS_SUMPTR(mp, bp, so);
+	*sp += delta;
+	xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
+		(uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
+	return 0;
+}
+
+/*
+ * Set the given range of bitmap bits to the given value.
+ * Do whatever I/O and logging is required.
+ */
+int
+xfs_rtmodify_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to modify */
+	xfs_extlen_t	len,		/* length of extent to modify */
+	int		val)		/* 1 for free, 0 for allocated */
+{
+	xfs_rtword_t	*b;		/* current word in buffer */
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* starting word in buffer */
+	int		error;		/* error value */
+	xfs_rtword_t	*first;		/* first used word in the buffer */
+	int		i;		/* current bit number rel. to start */
+	int		lastbit;	/* last useful bit in word */
+	xfs_rtword_t	mask;		/* mask o frelevant bits for value */
+	int		word;		/* word number in the buffer */
+
+	/*
+	 * Compute starting bitmap block number.
+	 */
+	block = XFS_BITTOBLOCK(mp, start);
+	/*
+	 * Read the bitmap block, and point to its data.
+	 */
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	if (error) {
+		return error;
+	}
+	bufp = bp->b_addr;
+	/*
+	 * Compute the starting word's address, and starting bit.
+	 */
+	word = XFS_BITTOWORD(mp, start);
+	first = b = &bufp[word];
+	bit = (int)(start & (XFS_NBWORD - 1));
+	/*
+	 * 0 (allocated) => all zeroes; 1 (free) => all ones.
+	 */
+	val = -val;
+	/*
+	 * If not starting on a word boundary, deal with the first
+	 * (partial) word.
+	 */
+	if (bit) {
+		/*
+		 * Compute first bit not changed and mask of relevant bits.
+		 */
+		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+		/*
+		 * Set/clear the active bits.
+		 */
+		if (val)
+			*b |= mask;
+		else
+			*b &= ~mask;
+		i = lastbit - bit;
+		/*
+		 * Go on to the next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * Log the changed part of this block.
+			 * Get the next one.
+			 */
+			xfs_trans_log_buf(tp, bp,
+				(uint)((char *)first - (char *)bufp),
+				(uint)((char *)b - (char *)bufp));
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			first = b = bufp = bp->b_addr;
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer
+			 */
+			b++;
+		}
+	} else {
+		/*
+		 * Starting on a word boundary, no partial word.
+		 */
+		i = 0;
+	}
+	/*
+	 * Loop over whole words in buffers.  When we use up one buffer
+	 * we move on to the next one.
+	 */
+	while (len - i >= XFS_NBWORD) {
+		/*
+		 * Set the word value correctly.
+		 */
+		*b = val;
+		i += XFS_NBWORD;
+		/*
+		 * Go on to the next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * Log the changed part of this block.
+			 * Get the next one.
+			 */
+			xfs_trans_log_buf(tp, bp,
+				(uint)((char *)first - (char *)bufp),
+				(uint)((char *)b - (char *)bufp));
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			first = b = bufp = bp->b_addr;
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer
+			 */
+			b++;
+		}
+	}
+	/*
+	 * If not ending on a word boundary, deal with the last
+	 * (partial) word.
+	 */
+	if ((lastbit = len - i)) {
+		/*
+		 * Compute a mask of relevant bits.
+		 */
+		bit = 0;
+		mask = ((xfs_rtword_t)1 << lastbit) - 1;
+		/*
+		 * Set/clear the active bits.
+		 */
+		if (val)
+			*b |= mask;
+		else
+			*b &= ~mask;
+		b++;
+	}
+	/*
+	 * Log any remaining changed bytes.
+	 */
+	if (b > first)
+		xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
+			(uint)((char *)b - (char *)bufp - 1));
+	return 0;
+}
+
+/*
+ * Mark an extent specified by start and len freed.
+ * Updates all the summary information as well as the bitmap.
+ */
+int
+xfs_rtfree_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block to free */
+	xfs_extlen_t	len,		/* length to free */
+	xfs_buf_t	**rbpp,		/* in/out: summary block buffer */
+	xfs_fsblock_t	*rsb)		/* in/out: summary block number */
+{
+	xfs_rtblock_t	end;		/* end of the freed extent */
+	int		error;		/* error value */
+	xfs_rtblock_t	postblock;	/* first block freed > end */
+	xfs_rtblock_t	preblock;	/* first block freed < start */
+
+	end = start + len - 1;
+	/*
+	 * Modify the bitmap to mark this extent freed.
+	 */
+	error = xfs_rtmodify_range(mp, tp, start, len, 1);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Assume we're freeing out of the middle of an allocated extent.
+	 * We need to find the beginning and end of the extent so we can
+	 * properly update the summary.
+	 */
+	error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Find the next allocated block (end of allocated extent).
+	 */
+	error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+		&postblock);
+	if (error)
+		return error;
+	/*
+	 * If there are blocks not being freed at the front of the
+	 * old extent, add summary data for them to be allocated.
+	 */
+	if (preblock < start) {
+		error = xfs_rtmodify_summary(mp, tp,
+			XFS_RTBLOCKLOG(start - preblock),
+			XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+	}
+	/*
+	 * If there are blocks not being freed at the end of the
+	 * old extent, add summary data for them to be allocated.
+	 */
+	if (postblock > end) {
+		error = xfs_rtmodify_summary(mp, tp,
+			XFS_RTBLOCKLOG(postblock - end),
+			XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
+		if (error) {
+			return error;
+		}
+	}
+	/*
+	 * Increment the summary information corresponding to the entire
+	 * (new) free extent.
+	 */
+	error = xfs_rtmodify_summary(mp, tp,
+		XFS_RTBLOCKLOG(postblock + 1 - preblock),
+		XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+	return error;
+}
+
+/*
+ * Check that the given range is either all allocated (val = 0) or
+ * all free (val = 1).
+ */
+int
+xfs_rtcheck_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	start,		/* starting block number of extent */
+	xfs_extlen_t	len,		/* length of extent */
+	int		val,		/* 1 for free, 0 for allocated */
+	xfs_rtblock_t	*new,		/* out: first block not matching */
+	int		*stat)		/* out: 1 for matches, 0 for not */
+{
+	xfs_rtword_t	*b;		/* current word in buffer */
+	int		bit;		/* bit number in the word */
+	xfs_rtblock_t	block;		/* bitmap block number */
+	xfs_buf_t	*bp;		/* buf for the block */
+	xfs_rtword_t	*bufp;		/* starting word in buffer */
+	int		error;		/* error value */
+	xfs_rtblock_t	i;		/* current bit number rel. to start */
+	xfs_rtblock_t	lastbit;	/* last useful bit in word */
+	xfs_rtword_t	mask;		/* mask of relevant bits for value */
+	xfs_rtword_t	wdiff;		/* difference from wanted value */
+	int		word;		/* word number in the buffer */
+
+	/*
+	 * Compute starting bitmap block number
+	 */
+	block = XFS_BITTOBLOCK(mp, start);
+	/*
+	 * Read the bitmap block.
+	 */
+	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+	if (error) {
+		return error;
+	}
+	bufp = bp->b_addr;
+	/*
+	 * Compute the starting word's address, and starting bit.
+	 */
+	word = XFS_BITTOWORD(mp, start);
+	b = &bufp[word];
+	bit = (int)(start & (XFS_NBWORD - 1));
+	/*
+	 * 0 (allocated) => all zero's; 1 (free) => all one's.
+	 */
+	val = -val;
+	/*
+	 * If not starting on a word boundary, deal with the first
+	 * (partial) word.
+	 */
+	if (bit) {
+		/*
+		 * Compute first bit not examined.
+		 */
+		lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+		/*
+		 * Mask of relevant bits.
+		 */
+		mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = (*b ^ val) & mask)) {
+			/*
+			 * Different, compute first wrong bit and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i = XFS_RTLOBIT(wdiff) - bit;
+			*new = start + i;
+			*stat = 0;
+			return 0;
+		}
+		i = lastbit - bit;
+		/*
+		 * Go on to next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * If done with this block, get the next one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			b = bufp = bp->b_addr;
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer.
+			 */
+			b++;
+		}
+	} else {
+		/*
+		 * Starting on a word boundary, no partial word.
+		 */
+		i = 0;
+	}
+	/*
+	 * Loop over whole words in buffers.  When we use up one buffer
+	 * we move on to the next one.
+	 */
+	while (len - i >= XFS_NBWORD) {
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = *b ^ val)) {
+			/*
+			 * Different, compute first wrong bit and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_RTLOBIT(wdiff);
+			*new = start + i;
+			*stat = 0;
+			return 0;
+		}
+		i += XFS_NBWORD;
+		/*
+		 * Go on to next block if that's where the next word is
+		 * and we need the next word.
+		 */
+		if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+			/*
+			 * If done with this block, get the next one.
+			 */
+			xfs_trans_brelse(tp, bp);
+			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+			if (error) {
+				return error;
+			}
+			b = bufp = bp->b_addr;
+			word = 0;
+		} else {
+			/*
+			 * Go on to the next word in the buffer.
+			 */
+			b++;
+		}
+	}
+	/*
+	 * If not ending on a word boundary, deal with the last
+	 * (partial) word.
+	 */
+	if ((lastbit = len - i)) {
+		/*
+		 * Mask of relevant bits.
+		 */
+		mask = ((xfs_rtword_t)1 << lastbit) - 1;
+		/*
+		 * Compute difference between actual and desired value.
+		 */
+		if ((wdiff = (*b ^ val) & mask)) {
+			/*
+			 * Different, compute first wrong bit and return.
+			 */
+			xfs_trans_brelse(tp, bp);
+			i += XFS_RTLOBIT(wdiff);
+			*new = start + i;
+			*stat = 0;
+			return 0;
+		} else
+			i = len;
+	}
+	/*
+	 * Successful, return.
+	 */
+	xfs_trans_brelse(tp, bp);
+	*new = start + i;
+	*stat = 1;
+	return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Check that the given extent (block range) is allocated already.
+ */
+STATIC int				/* error */
+xfs_rtcheck_alloc_range(
+	xfs_mount_t	*mp,		/* file system mount point */
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bno,		/* starting block number of extent */
+	xfs_extlen_t	len)		/* length of extent */
+{
+	xfs_rtblock_t	new;		/* dummy for xfs_rtcheck_range */
+	int		stat;
+	int		error;
+
+	error = xfs_rtcheck_range(mp, tp, bno, len, 0, &new, &stat);
+	if (error)
+		return error;
+	ASSERT(stat);
+	return 0;
+}
+#else
+#define xfs_rtcheck_alloc_range(m,t,b,l)	(0)
+#endif
+/*
+ * Free an extent in the realtime subvolume.  Length is expressed in
+ * realtime extents, as is the block number.
+ */
+int					/* error */
+xfs_rtfree_extent(
+	xfs_trans_t	*tp,		/* transaction pointer */
+	xfs_rtblock_t	bno,		/* starting block number to free */
+	xfs_extlen_t	len)		/* length of extent freed */
+{
+	int		error;		/* error value */
+	xfs_mount_t	*mp;		/* file system mount structure */
+	xfs_fsblock_t	sb;		/* summary file block number */
+	xfs_buf_t	*sumbp = NULL;	/* summary file block buffer */
+
+	mp = tp->t_mountp;
+
+	ASSERT(mp->m_rbmip->i_itemp != NULL);
+	ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+
+	error = xfs_rtcheck_alloc_range(mp, tp, bno, len);
+	if (error)
+		return error;
+
+	/*
+	 * Free the range of realtime blocks.
+	 */
+	error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
+	if (error) {
+		return error;
+	}
+	/*
+	 * Mark more blocks free in the superblock.
+	 */
+	xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
+	/*
+	 * If we've now freed all the blocks, reset the file sequence
+	 * number to 0.
+	 */
+	if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
+	    mp->m_sb.sb_rextents) {
+		if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
+			mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+		*(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
+		xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
+	}
+	return 0;
+}
+
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index a5b59d9..b7c9aea 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -17,34 +17,26 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
-#include "xfs_dir2.h"
+#include "xfs_inode.h"
+#include "xfs_ialloc.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_dinode.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_alloc.h"
-#include "xfs_rtalloc.h"
-#include "xfs_bmap.h"
-#include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_fsops.h"
-#include "xfs_trace.h"
-#include "xfs_cksum.h"
-#include "xfs_buf_item.h"
 
 /*
  * Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -249,13 +241,13 @@
 	if (xfs_sb_version_has_pquotino(sbp)) {
 		if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
 			xfs_notice(mp,
-			   "Version 5 of Super block has XFS_OQUOTA bits.\n");
+			   "Version 5 of Super block has XFS_OQUOTA bits.");
 			return XFS_ERROR(EFSCORRUPTED);
 		}
 	} else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
 				XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
 			xfs_notice(mp,
-"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.\n");
+"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.");
 			return XFS_ERROR(EFSCORRUPTED);
 	}
 
@@ -596,6 +588,11 @@
  * single bit error could clear the feature bit and unused parts of the
  * superblock are supposed to be zero. Hence a non-null crc field indicates that
  * we've potentially lost a feature bit and we should check it anyway.
+ *
+ * However, past bugs (i.e. in growfs) left non-zeroed regions beyond the
+ * last field in V4 secondary superblocks.  So for secondary superblocks,
+ * we are more forgiving, and ignore CRC failures if the primary doesn't
+ * indicate that the fs version is V5.
  */
 static void
 xfs_sb_read_verify(
@@ -616,16 +613,21 @@
 
 		if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize),
 				      offsetof(struct xfs_sb, sb_crc))) {
-			error = EFSCORRUPTED;
-			goto out_error;
+			/* Only fail bad secondaries on a known V5 filesystem */
+			if (bp->b_bn != XFS_SB_DADDR &&
+			    xfs_sb_version_hascrc(&mp->m_sb)) {
+				error = EFSCORRUPTED;
+				goto out_error;
+			}
 		}
 	}
 	error = xfs_sb_verify(bp, true);
 
 out_error:
 	if (error) {
-		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
-				     mp, bp->b_addr);
+		if (error != EWRONGFS)
+			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
+					     mp, bp->b_addr);
 		xfs_buf_ioerror(bp, error);
 	}
 }
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index 6835b44..35061d4 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -699,7 +699,4 @@
 extern void	xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
 extern void	xfs_sb_quota_from_disk(struct xfs_sb *sbp);
 
-extern const struct xfs_buf_ops xfs_sb_buf_ops;
-extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
-
 #endif	/* __XFS_SB_H__ */
diff --git a/fs/xfs/xfs_shared.h b/fs/xfs/xfs_shared.h
new file mode 100644
index 0000000..8c5035a1
--- /dev/null
+++ b/fs/xfs/xfs_shared.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef __XFS_SHARED_H__
+#define __XFS_SHARED_H__
+
+/*
+ * Definitions shared between kernel and userspace that don't fit into any other
+ * header file that is shared with userspace.
+ */
+struct xfs_ifork;
+struct xfs_buf;
+struct xfs_buf_ops;
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_inode;
+
+/*
+ * Buffer verifier operations are widely used, including userspace tools
+ */
+extern const struct xfs_buf_ops xfs_agf_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
+extern const struct xfs_buf_ops xfs_agf_buf_ops;
+extern const struct xfs_buf_ops xfs_agfl_buf_ops;
+extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
+extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
+extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
+extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
+extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ops;
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
+extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+extern const struct xfs_buf_ops xfs_inode_buf_ops;
+extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+
+/*
+ * Transaction types.  Used to distinguish types of buffers. These never reach
+ * the log.
+ */
+#define XFS_TRANS_SETATTR_NOT_SIZE	1
+#define XFS_TRANS_SETATTR_SIZE		2
+#define XFS_TRANS_INACTIVE		3
+#define XFS_TRANS_CREATE		4
+#define XFS_TRANS_CREATE_TRUNC		5
+#define XFS_TRANS_TRUNCATE_FILE		6
+#define XFS_TRANS_REMOVE		7
+#define XFS_TRANS_LINK			8
+#define XFS_TRANS_RENAME		9
+#define XFS_TRANS_MKDIR			10
+#define XFS_TRANS_RMDIR			11
+#define XFS_TRANS_SYMLINK		12
+#define XFS_TRANS_SET_DMATTRS		13
+#define XFS_TRANS_GROWFS		14
+#define XFS_TRANS_STRAT_WRITE		15
+#define XFS_TRANS_DIOSTRAT		16
+/* 17 was XFS_TRANS_WRITE_SYNC */
+#define	XFS_TRANS_WRITEID		18
+#define	XFS_TRANS_ADDAFORK		19
+#define	XFS_TRANS_ATTRINVAL		20
+#define	XFS_TRANS_ATRUNCATE		21
+#define	XFS_TRANS_ATTR_SET		22
+#define	XFS_TRANS_ATTR_RM		23
+#define	XFS_TRANS_ATTR_FLAG		24
+#define	XFS_TRANS_CLEAR_AGI_BUCKET	25
+#define XFS_TRANS_QM_SBCHANGE		26
+/*
+ * Dummy entries since we use the transaction type to index into the
+ * trans_type[] in xlog_recover_print_trans_head()
+ */
+#define XFS_TRANS_DUMMY1		27
+#define XFS_TRANS_DUMMY2		28
+#define XFS_TRANS_QM_QUOTAOFF		29
+#define XFS_TRANS_QM_DQALLOC		30
+#define XFS_TRANS_QM_SETQLIM		31
+#define XFS_TRANS_QM_DQCLUSTER		32
+#define XFS_TRANS_QM_QINOCREATE		33
+#define XFS_TRANS_QM_QUOTAOFF_END	34
+#define XFS_TRANS_SB_UNIT		35
+#define XFS_TRANS_FSYNC_TS		36
+#define	XFS_TRANS_GROWFSRT_ALLOC	37
+#define	XFS_TRANS_GROWFSRT_ZERO		38
+#define	XFS_TRANS_GROWFSRT_FREE		39
+#define	XFS_TRANS_SWAPEXT		40
+#define	XFS_TRANS_SB_COUNT		41
+#define	XFS_TRANS_CHECKPOINT		42
+#define	XFS_TRANS_ICREATE		43
+#define	XFS_TRANS_TYPE_MAX		43
+/* new transaction types need to be reflected in xfs_logprint(8) */
+
+#define XFS_TRANS_TYPES \
+	{ XFS_TRANS_SETATTR_NOT_SIZE,	"SETATTR_NOT_SIZE" }, \
+	{ XFS_TRANS_SETATTR_SIZE,	"SETATTR_SIZE" }, \
+	{ XFS_TRANS_INACTIVE,		"INACTIVE" }, \
+	{ XFS_TRANS_CREATE,		"CREATE" }, \
+	{ XFS_TRANS_CREATE_TRUNC,	"CREATE_TRUNC" }, \
+	{ XFS_TRANS_TRUNCATE_FILE,	"TRUNCATE_FILE" }, \
+	{ XFS_TRANS_REMOVE,		"REMOVE" }, \
+	{ XFS_TRANS_LINK,		"LINK" }, \
+	{ XFS_TRANS_RENAME,		"RENAME" }, \
+	{ XFS_TRANS_MKDIR,		"MKDIR" }, \
+	{ XFS_TRANS_RMDIR,		"RMDIR" }, \
+	{ XFS_TRANS_SYMLINK,		"SYMLINK" }, \
+	{ XFS_TRANS_SET_DMATTRS,	"SET_DMATTRS" }, \
+	{ XFS_TRANS_GROWFS,		"GROWFS" }, \
+	{ XFS_TRANS_STRAT_WRITE,	"STRAT_WRITE" }, \
+	{ XFS_TRANS_DIOSTRAT,		"DIOSTRAT" }, \
+	{ XFS_TRANS_WRITEID,		"WRITEID" }, \
+	{ XFS_TRANS_ADDAFORK,		"ADDAFORK" }, \
+	{ XFS_TRANS_ATTRINVAL,		"ATTRINVAL" }, \
+	{ XFS_TRANS_ATRUNCATE,		"ATRUNCATE" }, \
+	{ XFS_TRANS_ATTR_SET,		"ATTR_SET" }, \
+	{ XFS_TRANS_ATTR_RM,		"ATTR_RM" }, \
+	{ XFS_TRANS_ATTR_FLAG,		"ATTR_FLAG" }, \
+	{ XFS_TRANS_CLEAR_AGI_BUCKET,	"CLEAR_AGI_BUCKET" }, \
+	{ XFS_TRANS_QM_SBCHANGE,	"QM_SBCHANGE" }, \
+	{ XFS_TRANS_QM_QUOTAOFF,	"QM_QUOTAOFF" }, \
+	{ XFS_TRANS_QM_DQALLOC,		"QM_DQALLOC" }, \
+	{ XFS_TRANS_QM_SETQLIM,		"QM_SETQLIM" }, \
+	{ XFS_TRANS_QM_DQCLUSTER,	"QM_DQCLUSTER" }, \
+	{ XFS_TRANS_QM_QINOCREATE,	"QM_QINOCREATE" }, \
+	{ XFS_TRANS_QM_QUOTAOFF_END,	"QM_QOFF_END" }, \
+	{ XFS_TRANS_SB_UNIT,		"SB_UNIT" }, \
+	{ XFS_TRANS_FSYNC_TS,		"FSYNC_TS" }, \
+	{ XFS_TRANS_GROWFSRT_ALLOC,	"GROWFSRT_ALLOC" }, \
+	{ XFS_TRANS_GROWFSRT_ZERO,	"GROWFSRT_ZERO" }, \
+	{ XFS_TRANS_GROWFSRT_FREE,	"GROWFSRT_FREE" }, \
+	{ XFS_TRANS_SWAPEXT,		"SWAPEXT" }, \
+	{ XFS_TRANS_SB_COUNT,		"SB_COUNT" }, \
+	{ XFS_TRANS_CHECKPOINT,		"CHECKPOINT" }, \
+	{ XFS_TRANS_DUMMY1,		"DUMMY1" }, \
+	{ XFS_TRANS_DUMMY2,		"DUMMY2" }, \
+	{ XLOG_UNMOUNT_REC_TYPE,	"UNMOUNT" }
+
+/*
+ * This structure is used to track log items associated with
+ * a transaction.  It points to the log item and keeps some
+ * flags to track the state of the log item.  It also tracks
+ * the amount of space needed to log the item it describes
+ * once we get to commit processing (see xfs_trans_commit()).
+ */
+struct xfs_log_item_desc {
+	struct xfs_log_item	*lid_item;
+	struct list_head	lid_trans;
+	unsigned char		lid_flags;
+};
+
+#define XFS_LID_DIRTY		0x1
+
+/* log size calculation functions */
+int	xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
+int	xfs_log_calc_minimum_size(struct xfs_mount *);
+
+
+/*
+ * Values for t_flags.
+ */
+#define	XFS_TRANS_DIRTY		0x01	/* something needs to be logged */
+#define	XFS_TRANS_SB_DIRTY	0x02	/* superblock is modified */
+#define	XFS_TRANS_PERM_LOG_RES	0x04	/* xact took a permanent log res */
+#define	XFS_TRANS_SYNC		0x08	/* make commit synchronous */
+#define XFS_TRANS_DQ_DIRTY	0x10	/* at least one dquot in trx dirty */
+#define XFS_TRANS_RESERVE	0x20    /* OK to use reserved data blocks */
+#define XFS_TRANS_FREEZE_PROT	0x40	/* Transaction has elevated writer
+					   count in superblock */
+/*
+ * Values for call flags parameter.
+ */
+#define	XFS_TRANS_RELEASE_LOG_RES	0x4
+#define	XFS_TRANS_ABORT			0x8
+
+/*
+ * Field values for xfs_trans_mod_sb.
+ */
+#define	XFS_TRANS_SB_ICOUNT		0x00000001
+#define	XFS_TRANS_SB_IFREE		0x00000002
+#define	XFS_TRANS_SB_FDBLOCKS		0x00000004
+#define	XFS_TRANS_SB_RES_FDBLOCKS	0x00000008
+#define	XFS_TRANS_SB_FREXTENTS		0x00000010
+#define	XFS_TRANS_SB_RES_FREXTENTS	0x00000020
+#define	XFS_TRANS_SB_DBLOCKS		0x00000040
+#define	XFS_TRANS_SB_AGCOUNT		0x00000080
+#define	XFS_TRANS_SB_IMAXPCT		0x00000100
+#define	XFS_TRANS_SB_REXTSIZE		0x00000200
+#define	XFS_TRANS_SB_RBMBLOCKS		0x00000400
+#define	XFS_TRANS_SB_RBLOCKS		0x00000800
+#define	XFS_TRANS_SB_REXTENTS		0x00001000
+#define	XFS_TRANS_SB_REXTSLOG		0x00002000
+
+/*
+ * Here we centralize the specification of XFS meta-data buffer reference count
+ * values.  This determines how hard the buffer cache tries to hold onto the
+ * buffer.
+ */
+#define	XFS_AGF_REF		4
+#define	XFS_AGI_REF		4
+#define	XFS_AGFL_REF		3
+#define	XFS_INO_BTREE_REF	3
+#define	XFS_ALLOC_BTREE_REF	2
+#define	XFS_BMAP_BTREE_REF	2
+#define	XFS_DIR_BTREE_REF	2
+#define	XFS_INO_REF		2
+#define	XFS_ATTR_BTREE_REF	1
+#define	XFS_DQUOT_REF		1
+
+/*
+ * Flags for xfs_trans_ichgtime().
+ */
+#define	XFS_ICHGTIME_MOD	0x1	/* data fork modification timestamp */
+#define	XFS_ICHGTIME_CHG	0x2	/* inode field change timestamp */
+#define	XFS_ICHGTIME_CREATE	0x4	/* inode create timestamp */
+
+
+/*
+ * Symlink decoding/encoding functions
+ */
+int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
+int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+			uint32_t size, struct xfs_buf *bp);
+bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+			uint32_t size, struct xfs_buf *bp);
+void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
+				 struct xfs_inode *ip, struct xfs_ifork *ifp);
+
+#endif /* __XFS_SHARED_H__ */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 15188cc..f317488 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -17,34 +17,26 @@
  */
 
 #include "xfs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_alloc.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
 #include "xfs_fsops.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
-#include "xfs_trans_priv.h"
-#include "xfs_filestream.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_extfree_item.h"
 #include "xfs_mru_cache.h"
@@ -52,6 +44,9 @@
 #include "xfs_icache.h"
 #include "xfs_trace.h"
 #include "xfs_icreate_item.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
+#include "xfs_quota.h"
 
 #include <linux/namei.h>
 #include <linux/init.h>
@@ -918,7 +913,7 @@
 	struct super_block	*sb = mp->m_super;
 
 	if (down_read_trylock(&sb->s_umount)) {
-		sync_inodes_sb(sb);
+		sync_inodes_sb(sb, jiffies);
 		up_read(&sb->s_umount);
 	}
 }
@@ -946,10 +941,6 @@
 
 	XFS_STATS_INC(vn_reclaim);
 
-	/* bad inode, get out here ASAP */
-	if (is_bad_inode(inode))
-		goto out_reclaim;
-
 	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
 
 	/*
@@ -965,7 +956,6 @@
 	 * this more efficiently than we can here, so simply let background
 	 * reclaim tear down all inodes.
 	 */
-out_reclaim:
 	xfs_inode_set_reclaim_tag(ip);
 }
 
@@ -1165,7 +1155,7 @@
  * Note: xfs_log_quiesce() stops background log work - the callers must ensure
  * it is started again when appropriate.
  */
-void
+static void
 xfs_quiesce_attr(
 	struct xfs_mount	*mp)
 {
@@ -1246,7 +1236,7 @@
 			 */
 #if 0
 			xfs_info(mp,
-		"mount option \"%s\" not supported for remount\n", p);
+		"mount option \"%s\" not supported for remount", p);
 			return -EINVAL;
 #else
 			break;
@@ -1491,10 +1481,6 @@
 		error = ENOENT;
 		goto out_unmount;
 	}
-	if (is_bad_inode(root)) {
-		error = EINVAL;
-		goto out_unmount;
-	}
 	sb->s_root = d_make_root(root);
 	if (!sb->s_root) {
 		error = ENOMEM;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index f622a97..14e58f2 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -17,31 +17,31 @@
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_shared.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_da_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 /* ----- Kernel only functions below ----- */
 STATIC int
@@ -424,8 +424,7 @@
  */
 STATIC int
 xfs_inactive_symlink_rmt(
-	xfs_inode_t	*ip,
-	xfs_trans_t	**tpp)
+	struct xfs_inode *ip)
 {
 	xfs_buf_t	*bp;
 	int		committed;
@@ -437,11 +436,9 @@
 	xfs_mount_t	*mp;
 	xfs_bmbt_irec_t	mval[XFS_SYMLINK_MAPS];
 	int		nmaps;
-	xfs_trans_t	*ntp;
 	int		size;
 	xfs_trans_t	*tp;
 
-	tp = *tpp;
 	mp = ip->i_mount;
 	ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS);
 	/*
@@ -453,6 +450,16 @@
 	 */
 	ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
 
+	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+	if (error) {
+		xfs_trans_cancel(tp, 0);
+		return error;
+	}
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, 0);
+
 	/*
 	 * Lock the inode, fix the size, and join it to the transaction.
 	 * Hold it so in the normal path, we still have it locked for
@@ -471,7 +478,7 @@
 	error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
 				mval, &nmaps, 0);
 	if (error)
-		goto error0;
+		goto error_trans_cancel;
 	/*
 	 * Invalidate the block(s). No validation is done.
 	 */
@@ -481,22 +488,24 @@
 			XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
 		if (!bp) {
 			error = ENOMEM;
-			goto error1;
+			goto error_bmap_cancel;
 		}
 		xfs_trans_binval(tp, bp);
 	}
 	/*
 	 * Unmap the dead block(s) to the free_list.
 	 */
-	if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
-			&first_block, &free_list, &done)))
-		goto error1;
+	error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
+			    &first_block, &free_list, &done);
+	if (error)
+		goto error_bmap_cancel;
 	ASSERT(done);
 	/*
 	 * Commit the first transaction.  This logs the EFI and the inode.
 	 */
-	if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))
-		goto error1;
+	error = xfs_bmap_finish(&tp, &free_list, &committed);
+	if (error)
+		goto error_bmap_cancel;
 	/*
 	 * The transaction must have been committed, since there were
 	 * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.
@@ -511,26 +520,13 @@
 	xfs_trans_ijoin(tp, ip, 0);
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 	/*
-	 * Get a new, empty transaction to return to our caller.
-	 */
-	ntp = xfs_trans_dup(tp);
-	/*
 	 * Commit the transaction containing extent freeing and EFDs.
-	 * If we get an error on the commit here or on the reserve below,
-	 * we need to unlock the inode since the new transaction doesn't
-	 * have the inode attached.
 	 */
-	error = xfs_trans_commit(tp, 0);
-	tp = ntp;
+	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
 	if (error) {
 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
-		goto error0;
+		goto error_unlock;
 	}
-	/*
-	 * transaction commit worked ok so we can drop the extra ticket
-	 * reference that we gained in xfs_trans_dup()
-	 */
-	xfs_log_ticket_put(tp->t_ticket);
 
 	/*
 	 * Remove the memory for extent descriptions (just bookkeeping).
@@ -538,23 +534,16 @@
 	if (ip->i_df.if_bytes)
 		xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);
 	ASSERT(ip->i_df.if_bytes == 0);
-	/*
-	 * Put an itruncate log reservation in the new transaction
-	 * for our caller.
-	 */
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
-	if (error) {
-		ASSERT(XFS_FORCED_SHUTDOWN(mp));
-		goto error0;
-	}
 
-	xfs_trans_ijoin(tp, ip, 0);
-	*tpp = tp;
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 	return 0;
 
- error1:
+error_bmap_cancel:
 	xfs_bmap_cancel(&free_list);
- error0:
+error_trans_cancel:
+	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error_unlock:
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 	return error;
 }
 
@@ -563,41 +552,46 @@
  */
 int
 xfs_inactive_symlink(
-	struct xfs_inode	*ip,
-	struct xfs_trans	**tp)
+	struct xfs_inode	*ip)
 {
 	struct xfs_mount	*mp = ip->i_mount;
 	int			pathlen;
 
 	trace_xfs_inactive_symlink(ip);
 
-	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return XFS_ERROR(EIO);
 
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+
 	/*
 	 * Zero length symlinks _can_ exist.
 	 */
 	pathlen = (int)ip->i_d.di_size;
-	if (!pathlen)
+	if (!pathlen) {
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 		return 0;
+	}
 
 	if (pathlen < 0 || pathlen > MAXPATHLEN) {
 		xfs_alert(mp, "%s: inode (0x%llx) bad symlink length (%d)",
 			 __func__, (unsigned long long)ip->i_ino, pathlen);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 		ASSERT(0);
 		return XFS_ERROR(EFSCORRUPTED);
 	}
 
 	if (ip->i_df.if_flags & XFS_IFINLINE) {
-		if (ip->i_df.if_bytes > 0)
+		if (ip->i_df.if_bytes > 0) 
 			xfs_idata_realloc(ip, -(ip->i_df.if_bytes),
 					  XFS_DATA_FORK);
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 		ASSERT(ip->i_df.if_bytes == 0);
 		return 0;
 	}
 
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
 	/* remove the remote symlink */
-	return xfs_inactive_symlink_rmt(ip, tp);
+	return xfs_inactive_symlink_rmt(ip);
 }
diff --git a/fs/xfs/xfs_symlink.h b/fs/xfs/xfs_symlink.h
index 99338ba..e75245d 100644
--- a/fs/xfs/xfs_symlink.h
+++ b/fs/xfs/xfs_symlink.h
@@ -22,6 +22,6 @@
 int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
 		const char *target_path, umode_t mode, struct xfs_inode **ipp);
 int xfs_readlink(struct xfs_inode *ip, char *link);
-int xfs_inactive_symlink(struct xfs_inode *ip, struct xfs_trans **tpp);
+int xfs_inactive_symlink(struct xfs_inode *ip);
 
 #endif /* __XFS_SYMLINK_H */
diff --git a/fs/xfs/xfs_symlink_remote.c b/fs/xfs/xfs_symlink_remote.c
index 01c85e3..bf59a2b 100644
--- a/fs/xfs/xfs_symlink_remote.c
+++ b/fs/xfs/xfs_symlink_remote.c
@@ -19,8 +19,9 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
@@ -30,6 +31,7 @@
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 
 
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 5d7b3e4..dee3279 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -17,19 +17,16 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_ialloc.h"
 #include "xfs_itable.h"
@@ -37,6 +34,8 @@
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
 #include "xfs_buf_item.h"
 #include "xfs_quota.h"
@@ -46,6 +45,7 @@
 #include "xfs_dquot.h"
 #include "xfs_log_recover.h"
 #include "xfs_inode_item.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 47910e6..425dfa4 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -31,8 +31,8 @@
 struct xfs_da_node_entry;
 struct xfs_dquot;
 struct xfs_log_item;
-struct xlog_ticket;
 struct xlog;
+struct xlog_ticket;
 struct xlog_recover;
 struct xlog_recover_item;
 struct xfs_buf_log_format;
@@ -135,6 +135,31 @@
 DEFINE_PERAG_REF_EVENT(xfs_perag_set_eofblocks);
 DEFINE_PERAG_REF_EVENT(xfs_perag_clear_eofblocks);
 
+DECLARE_EVENT_CLASS(xfs_ag_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
+	TP_ARGS(mp, agno),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+	),
+	TP_printk("dev %d:%d agno %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno)
+);
+#define DEFINE_AG_EVENT(name)	\
+DEFINE_EVENT(xfs_ag_class, name,	\
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),	\
+	TP_ARGS(mp, agno))
+
+DEFINE_AG_EVENT(xfs_read_agf);
+DEFINE_AG_EVENT(xfs_alloc_read_agf);
+DEFINE_AG_EVENT(xfs_read_agi);
+DEFINE_AG_EVENT(xfs_ialloc_read_agi);
+
 TRACE_EVENT(xfs_attr_list_node_descend,
 	TP_PROTO(struct xfs_attr_list_context *ctx,
 		 struct xfs_da_node_entry *btree),
@@ -938,6 +963,63 @@
 DEFINE_LOG_ITEM_EVENT(xfs_ail_locked);
 DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing);
 
+DECLARE_EVENT_CLASS(xfs_ail_class,
+	TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn),
+	TP_ARGS(lip, old_lsn, new_lsn),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(void *, lip)
+		__field(uint, type)
+		__field(uint, flags)
+		__field(xfs_lsn_t, old_lsn)
+		__field(xfs_lsn_t, new_lsn)
+	),
+	TP_fast_assign(
+		__entry->dev = lip->li_mountp->m_super->s_dev;
+		__entry->lip = lip;
+		__entry->type = lip->li_type;
+		__entry->flags = lip->li_flags;
+		__entry->old_lsn = old_lsn;
+		__entry->new_lsn = new_lsn;
+	),
+	TP_printk("dev %d:%d lip 0x%p old lsn %d/%d new lsn %d/%d type %s flags %s",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->lip,
+		  CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
+		  CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
+		  __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
+		  __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
+)
+
+#define DEFINE_AIL_EVENT(name) \
+DEFINE_EVENT(xfs_ail_class, name, \
+	TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn), \
+	TP_ARGS(lip, old_lsn, new_lsn))
+DEFINE_AIL_EVENT(xfs_ail_insert);
+DEFINE_AIL_EVENT(xfs_ail_move);
+DEFINE_AIL_EVENT(xfs_ail_delete);
+
+TRACE_EVENT(xfs_log_assign_tail_lsn,
+	TP_PROTO(struct xlog *log, xfs_lsn_t new_lsn),
+	TP_ARGS(log, new_lsn),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_lsn_t, new_lsn)
+		__field(xfs_lsn_t, old_lsn)
+		__field(xfs_lsn_t, last_sync_lsn)
+	),
+	TP_fast_assign(
+		__entry->dev = log->l_mp->m_super->s_dev;
+		__entry->new_lsn = new_lsn;
+		__entry->old_lsn = atomic64_read(&log->l_tail_lsn);
+		__entry->last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
+	),
+	TP_printk("dev %d:%d new tail lsn %d/%d, old lsn %d/%d, last sync %d/%d",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
+		  CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
+		  CYCLE_LSN(__entry->last_sync_lsn), BLOCK_LSN(__entry->last_sync_lsn))
+)
 
 DECLARE_EVENT_CLASS(xfs_file_class,
 	TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 5411e01..c812c5c0 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -18,32 +18,21 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
-#include "xfs_bmap.h"
 #include "xfs_quota.h"
-#include "xfs_qm.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
-#include "xfs_trans_space.h"
-#include "xfs_inode_item.h"
-#include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
+#include "xfs_log.h"
 #include "xfs_trace.h"
+#include "xfs_error.h"
 
 kmem_zone_t	*xfs_trans_zone;
 kmem_zone_t	*xfs_log_item_desc_zone;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 09cf40b..9b96d35 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -18,10 +18,6 @@
 #ifndef	__XFS_TRANS_H__
 #define	__XFS_TRANS_H__
 
-struct xfs_log_item;
-
-#include "xfs_trans_resv.h"
-
 /* kernel only transaction subsystem defines */
 
 struct xfs_buf;
@@ -77,6 +73,9 @@
 	void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
 };
 
+void	xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
+			  int type, const struct xfs_item_ops *ops);
+
 /*
  * Return values for the iop_push() routines.
  */
@@ -85,18 +84,12 @@
 #define XFS_ITEM_LOCKED		2
 #define XFS_ITEM_FLUSHING	3
 
-/*
- * This is the type of function which can be given to xfs_trans_callback()
- * to be called upon the transaction's commit to disk.
- */
-typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
 
 /*
  * This is the structure maintained for every active transaction.
  */
 typedef struct xfs_trans {
 	unsigned int		t_magic;	/* magic number */
-	xfs_log_callback_t	t_logcb;	/* log callback struct */
 	unsigned int		t_type;		/* transaction type */
 	unsigned int		t_log_res;	/* amt of log space resvd */
 	unsigned int		t_log_count;	/* count for perm log res */
@@ -132,7 +125,6 @@
 	int64_t			t_rextents_delta;/* superblocks rextents chg */
 	int64_t			t_rextslog_delta;/* superblocks rextslog chg */
 	struct list_head	t_items;	/* log item descriptors */
-	xfs_trans_header_t	t_header;	/* header for in-log trans */
 	struct list_head	t_busy;		/* list of busy extents */
 	unsigned long		t_pflags;	/* saved process flags state */
 } xfs_trans_t;
@@ -237,10 +229,16 @@
 					 xfs_fsblock_t,
 					 xfs_extlen_t);
 int		xfs_trans_commit(xfs_trans_t *, uint flags);
+int		xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 void		xfs_trans_cancel(xfs_trans_t *, int);
 int		xfs_trans_ail_init(struct xfs_mount *);
 void		xfs_trans_ail_destroy(struct xfs_mount *);
 
+void		xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
+				       enum xfs_blft);
+void		xfs_trans_buf_copy_type(struct xfs_buf *dst_bp,
+					struct xfs_buf *src_bp);
+
 extern kmem_zone_t	*xfs_trans_zone;
 extern kmem_zone_t	*xfs_log_item_desc_zone;
 
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 21c6d7d..a728735 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -18,15 +18,16 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_trace.h"
 #include "xfs_error.h"
+#include "xfs_log.h"
 
 #ifdef DEBUG
 /*
@@ -658,11 +659,13 @@
 			if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
 				continue;
 
+			trace_xfs_ail_move(lip, lip->li_lsn, lsn);
 			xfs_ail_delete(ailp, lip);
 			if (mlip == lip)
 				mlip_changed = 1;
 		} else {
 			lip->li_flags |= XFS_LI_IN_AIL;
+			trace_xfs_ail_insert(lip, 0, lsn);
 		}
 		lip->li_lsn = lsn;
 		list_add(&lip->li_ail, &tmp);
@@ -731,6 +734,7 @@
 			return;
 		}
 
+		trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
 		xfs_ail_delete(ailp, lip);
 		lip->li_flags &= ~XFS_LI_IN_AIL;
 		lip->li_lsn = 0;
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 8c75b8f..c035d11 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -17,17 +17,15 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 54ee3c5..cd2a10e 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -17,23 +17,18 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
+#include "xfs_quota.h"
 #include "xfs_qm.h"
 
 STATIC void	xfs_trans_alloc_dqinfo(xfs_trans_t *);
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 8d71b16..47978ba 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -17,12 +17,13 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_extfree_item.h"
 
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 53dfe46..1bba7f6 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -17,18 +17,15 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_inode_item.h"
 #include "xfs_trace.h"
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index c52def0..12e86af 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -27,7 +27,6 @@
 
 
 void	xfs_trans_init(struct xfs_mount *);
-int	xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 void	xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void	xfs_trans_del_item(struct xfs_log_item *);
 void	xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/xfs_trans_resv.c
index a65a3cc4..d53d9f0 100644
--- a/fs/xfs/xfs_trans_resv.c
+++ b/fs/xfs/xfs_trans_resv.c
@@ -18,27 +18,19 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_ialloc.h"
-#include "xfs_alloc.h"
-#include "xfs_extent_busy.h"
-#include "xfs_bmap.h"
-#include "xfs_bmap_util.h"
 #include "xfs_quota.h"
+#include "xfs_trans.h"
 #include "xfs_qm.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
diff --git a/fs/xfs/xfs_vnode.h b/fs/xfs/xfs_vnode.h
index db14d0c..3e8e797 100644
--- a/fs/xfs/xfs_vnode.h
+++ b/fs/xfs/xfs_vnode.h
@@ -25,14 +25,6 @@
 struct attrlist_cursor_kern;
 
 /*
- * Return values for xfs_inactive.  A return value of
- * VN_INACTIVE_NOCACHE implies that the file system behavior
- * has disassociated its state and bhv_desc_t from the vnode.
- */
-#define	VN_INACTIVE_CACHE	0
-#define	VN_INACTIVE_NOCACHE	1
-
-/*
  * Flags for read/write calls - same values as IRIX
  */
 #define IO_ISDIRECT	0x00004		/* bypass page cache */
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index e01f35e..9d47907 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -17,9 +17,13 @@
  */
 
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_log_format.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 1c16f82..d98c670 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -100,7 +100,9 @@
  *      ACPI PM timer
  *      FACS table (Waking vectors and Global Lock)
  */
+#ifndef ACPI_REDUCED_HARDWARE
 #define ACPI_REDUCED_HARDWARE           FALSE
+#endif
 
 /******************************************************************************
  *
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index cf051e0..4e280bd 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -125,8 +125,9 @@
 #define AE_NO_HANDLER                   EXCEP_ENV (0x001A)
 #define AE_OWNER_ID_LIMIT               EXCEP_ENV (0x001B)
 #define AE_NOT_CONFIGURED               EXCEP_ENV (0x001C)
+#define AE_ACCESS                       EXCEP_ENV (0x001D)
 
-#define AE_CODE_ENV_MAX                 0x001C
+#define AE_CODE_ENV_MAX                 0x001D
 
 /*
  * Programmer exceptions
@@ -227,7 +228,7 @@
 	EXCEP_TXT("AE_NO_ACPI_TABLES", "ACPI tables could not be found"),
 	EXCEP_TXT("AE_NO_NAMESPACE", "A namespace has not been loaded"),
 	EXCEP_TXT("AE_NO_MEMORY", "Insufficient dynamic memory"),
-	EXCEP_TXT("AE_NOT_FOUND", "The name was not found in the namespace"),
+	EXCEP_TXT("AE_NOT_FOUND", "A requested entity is not found"),
 	EXCEP_TXT("AE_NOT_EXIST", "A required entity does not exist"),
 	EXCEP_TXT("AE_ALREADY_EXISTS", "An entity already exists"),
 	EXCEP_TXT("AE_TYPE", "The object type is incorrect"),
@@ -259,7 +260,8 @@
 	EXCEP_TXT("AE_OWNER_ID_LIMIT",
 		  "There are no more Owner IDs available for ACPI tables or control methods"),
 	EXCEP_TXT("AE_NOT_CONFIGURED",
-		  "The interface is not part of the current subsystem configuration")
+		  "The interface is not part of the current subsystem configuration"),
+	EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation")
 };
 
 static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index ce08ef7..1f36777 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -46,25 +46,25 @@
 
 /* Method names - these methods can appear anywhere in the namespace */
 
-#define METHOD_NAME__SB_        "_SB_"
-#define METHOD_NAME__HID        "_HID"
-#define METHOD_NAME__CID        "_CID"
-#define METHOD_NAME__UID        "_UID"
-#define METHOD_NAME__SUB        "_SUB"
 #define METHOD_NAME__ADR        "_ADR"
-#define METHOD_NAME__INI        "_INI"
-#define METHOD_NAME__STA        "_STA"
-#define METHOD_NAME__REG        "_REG"
-#define METHOD_NAME__SEG        "_SEG"
-#define METHOD_NAME__BBN        "_BBN"
-#define METHOD_NAME__PRT        "_PRT"
-#define METHOD_NAME__CRS        "_CRS"
-#define METHOD_NAME__PRS        "_PRS"
 #define METHOD_NAME__AEI        "_AEI"
-#define METHOD_NAME__PRW        "_PRW"
-#define METHOD_NAME__SRS        "_SRS"
+#define METHOD_NAME__BBN        "_BBN"
 #define METHOD_NAME__CBA        "_CBA"
+#define METHOD_NAME__CID        "_CID"
+#define METHOD_NAME__CRS        "_CRS"
+#define METHOD_NAME__HID        "_HID"
+#define METHOD_NAME__INI        "_INI"
 #define METHOD_NAME__PLD        "_PLD"
+#define METHOD_NAME__PRS        "_PRS"
+#define METHOD_NAME__PRT        "_PRT"
+#define METHOD_NAME__PRW        "_PRW"
+#define METHOD_NAME__REG        "_REG"
+#define METHOD_NAME__SB_        "_SB_"
+#define METHOD_NAME__SEG        "_SEG"
+#define METHOD_NAME__SRS        "_SRS"
+#define METHOD_NAME__STA        "_STA"
+#define METHOD_NAME__SUB        "_SUB"
+#define METHOD_NAME__UID        "_UID"
 
 /* Method names - these methods must appear at the namespace root */
 
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index d901982..89c60b0 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -222,7 +222,8 @@
 	u32 power_resources:1;	/* Power resources */
 	u32 inrush_current:1;	/* Serialize Dx->D0 */
 	u32 power_removed:1;	/* Optimize Dx->D0 */
-	u32 reserved:28;
+	u32 ignore_parent:1;	/* Power is independent of parent power state */
+	u32 reserved:27;
 };
 
 struct acpi_device_power_state {
@@ -338,20 +339,6 @@
 	u32 data;
 };
 
-struct acpi_eject_event {
-	struct acpi_device	*device;
-	u32		event;
-};
-
-struct acpi_hp_work {
-	struct work_struct work;
-	acpi_handle handle;
-	u32 type;
-	void *context;
-};
-void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
-			void (*func)(struct work_struct *work));
-
 extern struct kobject *acpi_kobj;
 extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
 void acpi_bus_private_data_handler(acpi_handle, void *);
@@ -390,7 +377,6 @@
 int acpi_bus_register_driver(struct acpi_driver *driver);
 void acpi_bus_unregister_driver(struct acpi_driver *driver);
 int acpi_bus_scan(acpi_handle handle);
-void acpi_bus_hot_remove_device(void *context);
 void acpi_bus_trim(struct acpi_device *start);
 acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd);
 int acpi_match_device_ids(struct acpi_device *device,
@@ -398,6 +384,9 @@
 int acpi_create_dir(struct acpi_device *);
 void acpi_remove_dir(struct acpi_device *);
 
+typedef void (*acpi_hp_callback)(void *data, u32 src);
+
+acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src);
 
 /**
  * module_acpi_driver(acpi_driver) - Helper macro for registering an ACPI driver
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 64b8c76..01e6c6d 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -77,54 +77,80 @@
 /*
  * OSL Initialization and shutdown primitives
  */
-acpi_status __init acpi_os_initialize(void);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize
+acpi_status acpi_os_initialize(void);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate
 acpi_status acpi_os_terminate(void);
+#endif
 
 /*
  * ACPI Table interfaces
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_root_pointer
 acpi_physical_address acpi_os_get_root_pointer(void);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
 acpi_status
 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
 			    acpi_string * new_val);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
 acpi_status
 acpi_os_table_override(struct acpi_table_header *existing_table,
 		       struct acpi_table_header **new_table);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_physical_table_override
 acpi_status
 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
 				acpi_physical_address * new_address,
 				u32 *new_table_length);
+#endif
 
 /*
  * Spinlock primitives
  */
-#ifndef acpi_os_create_lock
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
 acpi_status acpi_os_create_lock(acpi_spinlock * out_handle);
 #endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_lock
 void acpi_os_delete_lock(acpi_spinlock handle);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_lock
 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_lock
 void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags);
+#endif
 
 /*
  * Semaphore primitives
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_semaphore
 acpi_status
 acpi_os_create_semaphore(u32 max_units,
 			 u32 initial_units, acpi_semaphore * out_handle);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_semaphore
 acpi_status acpi_os_delete_semaphore(acpi_semaphore handle);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_semaphore
 acpi_status
 acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_signal_semaphore
 acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units);
+#endif
 
 /*
  * Mutex primitives. May be configured to use semaphores instead via
@@ -132,29 +158,48 @@
  */
 #if (ACPI_MUTEX_TYPE != ACPI_BINARY_SEMAPHORE)
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_mutex
 acpi_status acpi_os_create_mutex(acpi_mutex * out_handle);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_mutex
 void acpi_os_delete_mutex(acpi_mutex handle);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_mutex
 acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_mutex
 void acpi_os_release_mutex(acpi_mutex handle);
 #endif
 
+#endif
+
 /*
  * Memory allocation and mapping
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate
 void *acpi_os_allocate(acpi_size size);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate_zeroed
+void *acpi_os_allocate_zeroed(acpi_size size);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_free
 void acpi_os_free(void *memory);
+#endif
 
-void __iomem *acpi_os_map_memory(acpi_physical_address where,
-				acpi_size length);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_map_memory
+void *acpi_os_map_memory(acpi_physical_address where, acpi_size length);
+#endif
 
-void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
-void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_unmap_memory
+void acpi_os_unmap_memory(void *logical_address, acpi_size size);
+#endif
 
-#ifdef ACPI_FUTURE_USAGE
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_physical_address
 acpi_status
 acpi_os_get_physical_address(void *logical_address,
 			     acpi_physical_address * physical_address);
@@ -163,117 +208,195 @@
 /*
  * Memory/Object Cache
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_cache
 acpi_status
 acpi_os_create_cache(char *cache_name,
 		     u16 object_size,
 		     u16 max_depth, acpi_cache_t ** return_cache);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_cache
 acpi_status acpi_os_delete_cache(acpi_cache_t * cache);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_purge_cache
 acpi_status acpi_os_purge_cache(acpi_cache_t * cache);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object
 void *acpi_os_acquire_object(acpi_cache_t * cache);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_object
 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object);
+#endif
 
 /*
  * Interrupt handlers
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_install_interrupt_handler
 acpi_status
 acpi_os_install_interrupt_handler(u32 interrupt_number,
 				  acpi_osd_handler service_routine,
 				  void *context);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_remove_interrupt_handler
 acpi_status
 acpi_os_remove_interrupt_handler(u32 interrupt_number,
 				 acpi_osd_handler service_routine);
-
-void acpi_os_gpe_count(u32 gpe_number);
-void acpi_os_fixed_event_count(u32 fixed_event_number);
+#endif
 
 /*
  * Threads and Scheduling
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
 acpi_thread_id acpi_os_get_thread_id(void);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_execute
 acpi_status
 acpi_os_execute(acpi_execute_type type,
 		acpi_osd_exec_callback function, void *context);
+#endif
 
-acpi_status
-acpi_os_hotplug_execute(acpi_osd_exec_callback function, void *context);
-
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_events_complete
 void acpi_os_wait_events_complete(void);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_sleep
 void acpi_os_sleep(u64 milliseconds);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_stall
 void acpi_os_stall(u32 microseconds);
+#endif
 
 /*
  * Platform and hardware-independent I/O interfaces
  */
-acpi_status acpi_os_read_port(acpi_io_address address, u32 * value, u32 width);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_port
+acpi_status acpi_os_read_port(acpi_io_address address, u32 *value, u32 width);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_port
 acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
+#endif
 
 /*
  * Platform and hardware-independent physical memory interfaces
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_memory
 acpi_status
 acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_memory
 acpi_status
 acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width);
+#endif
 
 /*
  * Platform and hardware-independent PCI configuration space access
  * Note: Can't use "Register" as a parameter, changed to "Reg" --
  * certain compilers complain.
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_pci_configuration
 acpi_status
 acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id,
 			       u32 reg, u64 *value, u32 width);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_pci_configuration
 acpi_status
 acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
 				u32 reg, u64 value, u32 width);
+#endif
 
 /*
  * Miscellaneous
  */
-u64 acpi_os_get_timer(void);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable
+u8 acpi_os_readable(void *pointer, acpi_size length);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable
+u8 acpi_os_writable(void *pointer, acpi_size length);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_timer
+u64 acpi_os_get_timer(void);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_signal
 acpi_status acpi_os_signal(u32 function, void *info);
+#endif
 
 /*
  * Debug print routines
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_printf
 void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *format, ...);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_vprintf
 void acpi_os_vprintf(const char *format, va_list args);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output
 void acpi_os_redirect_output(void *destination);
+#endif
 
-#ifdef ACPI_FUTURE_USAGE
 /*
  * Debug input
  */
-u32 acpi_os_get_line(char *buffer);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line
+acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read);
+#endif
+
+/*
+ * Obtain ACPI table(s)
+ */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name
+acpi_status
+acpi_os_get_table_by_name(char *signature,
+			  u32 instance,
+			  struct acpi_table_header **table,
+			  acpi_physical_address * address);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index
+acpi_status
+acpi_os_get_table_by_index(u32 index,
+			   struct acpi_table_header **table,
+			   u32 *instance, acpi_physical_address * address);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address
+acpi_status
+acpi_os_get_table_by_address(acpi_physical_address address,
+			     struct acpi_table_header **table);
 #endif
 
 /*
  * Directory manipulation
  */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_directory
 void *acpi_os_open_directory(char *pathname,
 			     char *wildcard_spec, char requested_file_type);
+#endif
 
 /* requeste_file_type values */
 
 #define REQUEST_FILE_ONLY                   0
 #define REQUEST_DIR_ONLY                    1
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename
 char *acpi_os_get_next_filename(void *dir_handle);
+#endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory
 void acpi_os_close_directory(void *dir_handle);
+#endif
 
 #endif				/* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 85bfdbe..d8f9457 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20130725
+#define ACPI_CA_VERSION                 0x20130927
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -54,6 +54,7 @@
 #include <acpi/acbuffer.h>
 
 extern u8 acpi_gbl_permanent_mmap;
+extern u32 acpi_rsdt_forced;
 
 /*
  * Globals that are publically available
@@ -106,39 +107,41 @@
 	static ACPI_INLINE prototype {return(AE_OK);}
 
 #define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
-	static ACPI_INLINE prototype {}
+	static ACPI_INLINE prototype {return;}
 
 #endif				/* !ACPI_REDUCED_HARDWARE */
 
-extern u32 acpi_rsdt_forced;
 /*
  * Initialization
  */
-acpi_status
+acpi_status __init
 acpi_initialize_tables(struct acpi_table_desc *initial_storage,
 		       u32 initial_table_count, u8 allow_resize);
 
 acpi_status __init acpi_initialize_subsystem(void);
 
-acpi_status acpi_enable_subsystem(u32 flags);
+acpi_status __init acpi_enable_subsystem(u32 flags);
 
-acpi_status acpi_initialize_objects(u32 flags);
+acpi_status __init acpi_initialize_objects(u32 flags);
 
-acpi_status acpi_terminate(void);
+acpi_status __init acpi_terminate(void);
 
 /*
  * Miscellaneous global interfaces
  */
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void))
 #ifdef ACPI_FUTURE_USAGE
-acpi_status acpi_subsystem_status(void);
+ acpi_status acpi_subsystem_status(void);
 #endif
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer);
 #endif
 
+acpi_status acpi_get_statistics(struct acpi_statistics *stats);
+
 const char *acpi_format_exception(acpi_status exception);
 
 acpi_status acpi_purge_cached_objects(void);
@@ -159,29 +162,20 @@
 		       acpi_size length, struct acpi_pld_info **return_buffer);
 
 /*
- * ACPI Memory management
- */
-void *acpi_allocate(u32 size);
-
-void *acpi_callocate(u32 size);
-
-void acpi_free(void *address);
-
-/*
  * ACPI table load/unload interfaces
  */
 acpi_status acpi_load_table(struct acpi_table_header *table);
 
 acpi_status acpi_unload_parent_table(acpi_handle object);
 
-acpi_status acpi_load_tables(void);
+acpi_status __init acpi_load_tables(void);
 
 /*
  * ACPI table manipulation interfaces
  */
-acpi_status acpi_reallocate_root_table(void);
+acpi_status __init acpi_reallocate_root_table(void);
 
-acpi_status acpi_find_root_pointer(acpi_size *rsdp_address);
+acpi_status __init acpi_find_root_pointer(acpi_size *rsdp_address);
 
 acpi_status acpi_unload_table_id(acpi_owner_id id);
 
@@ -193,6 +187,7 @@
 acpi_get_table_with_size(acpi_string signature,
 	       u32 instance, struct acpi_table_header **out_table,
 	       acpi_size *tbl_size);
+
 acpi_status
 acpi_get_table(acpi_string signature,
 	       u32 instance, struct acpi_table_header **out_table);
@@ -280,8 +275,18 @@
 acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
 
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
-				acpi_install_global_event_handler
-				(acpi_gbl_event_handler handler, void *context))
+				acpi_install_sci_handler(acpi_sci_handler
+							 address,
+							 void *context))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_remove_sci_handler(acpi_sci_handler
+							 address))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+				 acpi_install_global_event_handler
+				 (acpi_gbl_event_handler handler,
+				  void *context))
 
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				 acpi_install_fixed_event_handler(u32
@@ -290,10 +295,12 @@
 								  handler,
 								  void
 								  *context))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				 acpi_remove_fixed_event_handler(u32 acpi_event,
 								 acpi_event_handler
 								 handler))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				 acpi_install_gpe_handler(acpi_handle
 							  gpe_device,
@@ -302,6 +309,7 @@
 							  acpi_gpe_handler
 							  address,
 							  void *context))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				 acpi_remove_gpe_handler(acpi_handle gpe_device,
 							 u32 gpe_number,
@@ -338,6 +346,7 @@
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				acpi_acquire_global_lock(u16 timeout,
 							 u32 *handle))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				acpi_release_global_lock(u32 handle))
 
@@ -364,6 +373,7 @@
 				acpi_get_event_status(u32 event,
 						      acpi_event_status
 						      *event_status))
+
 /*
  * General Purpose Event (GPE) Interfaces
  */
@@ -394,10 +404,12 @@
 							parent_device,
 							acpi_handle gpe_device,
 							u32 gpe_number))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				 acpi_set_gpe_wake_mask(acpi_handle gpe_device,
 							u32 gpe_number,
 							u8 action))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				 acpi_get_gpe_status(acpi_handle gpe_device,
 						     u32 gpe_number,
@@ -419,6 +431,7 @@
 						       *gpe_block_address,
 						       u32 register_count,
 						       u32 interrupt_number))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				 acpi_remove_gpe_block(acpi_handle gpe_device))
 
@@ -493,13 +506,13 @@
  * Sleep/Wake interfaces
  */
 acpi_status
-acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
+acpi_get_sleep_type_data(u8 sleep_state, u8 *slp_typ_a, u8 *slp_typ_b);
 
 acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
 
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
+acpi_status acpi_enter_sleep_state(u8 sleep_state);
 
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enter_sleep_state_s4bios(void))
 
 acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
 
@@ -508,7 +521,6 @@
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				acpi_set_firmware_waking_vector(u32
 								physical_address))
-
 #if ACPI_MACHINE_WIDTH == 64
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				acpi_set_firmware_waking_vector64(u64
@@ -532,50 +544,53 @@
 /*
  * Error/Warning output
  */
+ACPI_PRINTF_LIKE(3)
 void ACPI_INTERNAL_VAR_XFACE
-acpi_error(const char *module_name,
-	   u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+acpi_error(const char *module_name, u32 line_number, const char *format, ...);
 
+ACPI_PRINTF_LIKE(4)
 void ACPI_INTERNAL_VAR_XFACE
 acpi_exception(const char *module_name,
-	       u32 line_number,
-	       acpi_status status, const char *format, ...) ACPI_PRINTF_LIKE(4);
+	       u32 line_number, acpi_status status, const char *format, ...);
 
+ACPI_PRINTF_LIKE(3)
 void ACPI_INTERNAL_VAR_XFACE
-acpi_warning(const char *module_name,
-	     u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+acpi_warning(const char *module_name, u32 line_number, const char *format, ...);
 
+ACPI_PRINTF_LIKE(3)
 void ACPI_INTERNAL_VAR_XFACE
-acpi_info(const char *module_name,
-	  u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+acpi_info(const char *module_name, u32 line_number, const char *format, ...);
 
+ACPI_PRINTF_LIKE(3)
 void ACPI_INTERNAL_VAR_XFACE
 acpi_bios_error(const char *module_name,
-		u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+		u32 line_number, const char *format, ...);
 
+ACPI_PRINTF_LIKE(3)
 void ACPI_INTERNAL_VAR_XFACE
 acpi_bios_warning(const char *module_name,
-		  u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+		  u32 line_number, const char *format, ...);
 
 /*
  * Debug output
  */
 #ifdef ACPI_DEBUG_OUTPUT
 
+ACPI_PRINTF_LIKE(6)
 void ACPI_INTERNAL_VAR_XFACE
 acpi_debug_print(u32 requested_debug_level,
 		 u32 line_number,
 		 const char *function_name,
 		 const char *module_name,
-		 u32 component_id, const char *format, ...) ACPI_PRINTF_LIKE(6);
+		 u32 component_id, const char *format, ...);
 
+ACPI_PRINTF_LIKE(6)
 void ACPI_INTERNAL_VAR_XFACE
 acpi_debug_print_raw(u32 requested_debug_level,
 		     u32 line_number,
 		     const char *function_name,
 		     const char *module_name,
-		     u32 component_id,
-		     const char *format, ...) ACPI_PRINTF_LIKE(6);
+		     u32 component_id, const char *format, ...);
 #endif
 
 #endif				/* __ACXFACE_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 9b58a8f..9497088 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -146,7 +146,24 @@
 	u8 reserved[3];		/* Reserved, must be zero */
 };
 
-#define ACPI_RSDP_REV0_SIZE     20	/* Size of original ACPI 1.0 RSDP */
+/* Standalone struct for the ACPI 1.0 RSDP */
+
+struct acpi_rsdp_common {
+	char signature[8];
+	u8 checksum;
+	char oem_id[ACPI_OEM_ID_SIZE];
+	u8 revision;
+	u32 rsdt_physical_address;
+};
+
+/* Standalone struct for the extended part of the RSDP (ACPI 2.0+) */
+
+struct acpi_rsdp_extension {
+	u32 length;
+	u64 xsdt_physical_address;
+	u8 extended_checksum;
+	u8 reserved[3];
+};
 
 /*******************************************************************************
  *
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ffaac0e..40f7ed1 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -444,8 +444,8 @@
 };
 
 struct acpi_dmar_pci_path {
-	u8 dev;
-	u8 fn;
+	u8 device;
+	u8 function;
 };
 
 /*
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index b748aef..809b1a0 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -299,13 +299,57 @@
 #endif
 
 /*
- * All ACPICA functions that are available to the rest of the kernel are
- * tagged with this macro which can be defined as appropriate for the host.
+ * All ACPICA external functions that are available to the rest of the kernel
+ * are tagged with thes macros which can be defined as appropriate for the host.
+ *
+ * Notes:
+ * ACPI_EXPORT_SYMBOL_INIT is used for initialization and termination
+ * interfaces that may need special processing.
+ * ACPI_EXPORT_SYMBOL is used for all other public external functions.
  */
+#ifndef ACPI_EXPORT_SYMBOL_INIT
+#define ACPI_EXPORT_SYMBOL_INIT(symbol)
+#endif
+
 #ifndef ACPI_EXPORT_SYMBOL
 #define ACPI_EXPORT_SYMBOL(symbol)
 #endif
 
+/*
+ * Compiler/Clibrary-dependent debug initialization. Used for ACPICA
+ * utilities only.
+ */
+#ifndef ACPI_DEBUG_INITIALIZE
+#define ACPI_DEBUG_INITIALIZE()
+#endif
+
+/*******************************************************************************
+ *
+ * Configuration
+ *
+ ******************************************************************************/
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+/*
+ * Memory allocation tracking (used by acpi_exec to detect memory leaks)
+ */
+#define ACPI_MEM_PARAMETERS             _COMPONENT, _acpi_module_name, __LINE__
+#define ACPI_ALLOCATE(a)                acpi_ut_allocate_and_track ((acpi_size) (a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE_ZEROED(a)         acpi_ut_allocate_zeroed_and_track ((acpi_size) (a), ACPI_MEM_PARAMETERS)
+#define ACPI_FREE(a)                    acpi_ut_free_and_track (a, ACPI_MEM_PARAMETERS)
+#define ACPI_MEM_TRACKING(a)            a
+
+#else
+/*
+ * Normal memory allocation directly via the OS services layer
+ */
+#define ACPI_ALLOCATE(a)                acpi_os_allocate ((acpi_size) (a))
+#define ACPI_ALLOCATE_ZEROED(a)         acpi_os_allocate_zeroed ((acpi_size) (a))
+#define ACPI_FREE(a)                    acpi_os_free (a)
+#define ACPI_MEM_TRACKING(a)
+
+#endif				/* ACPI_DBG_TRACK_ALLOCATIONS */
+
 /******************************************************************************
  *
  * ACPI Specification constants (Do not change unless the specification changes)
@@ -322,6 +366,7 @@
 #define ACPI_PM1_REGISTER_WIDTH         16
 #define ACPI_PM2_REGISTER_WIDTH         8
 #define ACPI_PM_TIMER_WIDTH             32
+#define ACPI_RESET_REGISTER_WIDTH       8
 
 /* Names within the namespace are 4 bytes long */
 
@@ -474,6 +519,11 @@
 #define ACPI_MOVE_NAME(dest,src)        (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
 #endif
 
+/* Support for the special RSDP signature (8 characters) */
+
+#define ACPI_VALIDATE_RSDP_SIG(a)       (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_MAKE_RSDP_SIG(dest)        (ACPI_MEMCPY (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+
 /*******************************************************************************
  *
  * Miscellaneous constants
@@ -886,9 +936,13 @@
 	void *pointer;		/* pointer to buffer */
 };
 
-/* Free a buffer created in an struct acpi_buffer via ACPI_ALLOCATE_LOCAL_BUFFER */
-
-#define ACPI_FREE_BUFFER(b)         ACPI_FREE(b.pointer)
+/*
+ * Free a buffer created in an struct acpi_buffer via ACPI_ALLOCATE_BUFFER.
+ * Note: We use acpi_os_free here because acpi_os_allocate was used to allocate
+ * the buffer. This purposefully bypasses the internal allocation tracking
+ * mechanism (if it is enabled).
+ */
+#define ACPI_FREE_BUFFER(b)         acpi_os_free((b).pointer)
 
 /*
  * name_type for acpi_get_name
@@ -927,6 +981,16 @@
 	u32 debug_layer;
 };
 
+/*
+ * System statistics returned by acpi_get_statistics()
+ */
+struct acpi_statistics {
+	u32 sci_count;
+	u32 gpe_count;
+	u32 fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+	u32 method_count;
+};
+
 /* Table Event Types */
 
 #define ACPI_TABLE_EVENT_LOAD           0x0
@@ -946,6 +1010,9 @@
  * Various handlers and callback procedures
  */
 typedef
+u32 (*acpi_sci_handler) (void *context);
+
+typedef
 void (*acpi_gbl_event_handler) (u32 event_type,
 			       acpi_handle device,
 			       u32 event_number, void *context);
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index ef04b36..974d3ef 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -96,10 +96,11 @@
 #endif
 
 /*
- * acpi_bin/acpi_help/acpi_src configuration. All single threaded, with
- * no debug output.
+ * acpi_bin/acpi_dump/acpi_src/acpi_xtract configuration. All single
+ * threaded, with no debug output.
  */
 #if (defined ACPI_BIN_APP)   || \
+	(defined ACPI_DUMP_APP)  || \
 	(defined ACPI_SRC_APP)   || \
 	(defined ACPI_XTRACT_APP)
 #define ACPI_APPLICATION
@@ -147,6 +148,9 @@
 #if defined(_LINUX) || defined(__linux__)
 #include <acpi/platform/aclinux.h>
 
+#elif defined(_APPLE) || defined(__APPLE__)
+#include "acmacosx.h"
+
 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 #include "acfreebsd.h"
 
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 68534ef..28f4f4d 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -50,7 +50,6 @@
 #define ACPI_USE_DO_WHILE_0
 #define ACPI_MUTEX_TYPE             ACPI_BINARY_SEMAPHORE
 
-
 #ifdef __KERNEL__
 
 #include <linux/string.h>
@@ -58,11 +57,13 @@
 #include <linux/ctype.h>
 #include <linux/sched.h>
 #include <linux/atomic.h>
-#include <asm/div64.h>
-#include <asm/acpi.h>
+#include <linux/math64.h>
 #include <linux/slab.h>
 #include <linux/spinlock_types.h>
-#include <asm/current.h>
+#ifdef EXPORT_ACPI_INTERFACES
+#include <linux/export.h>
+#endif
+#include <asm/acpi.h>
 
 /* Host-dependent types and defines for in-kernel ACPICA */
 
@@ -74,7 +75,7 @@
 #define acpi_spinlock                       spinlock_t *
 #define acpi_cpu_flags                      unsigned long
 
-#else /* !__KERNEL__ */
+#else				/* !__KERNEL__ */
 
 #include <stdarg.h>
 #include <string.h>
@@ -87,7 +88,7 @@
 #define ACPI_FLUSH_CPU_CACHE()
 #define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread))
 
-#if defined(__ia64__) || defined(__x86_64__)
+#if defined(__ia64__) || defined(__x86_64__) || defined(__aarch64__)
 #define ACPI_MACHINE_WIDTH          64
 #define COMPILER_DEPENDENT_INT64    long
 #define COMPILER_DEPENDENT_UINT64   unsigned long
@@ -102,21 +103,35 @@
 #define __cdecl
 #endif
 
-#endif /* __KERNEL__ */
+#endif				/* __KERNEL__ */
 
 /* Linux uses GCC */
 
 #include <acpi/platform/acgcc.h>
 
 #ifdef __KERNEL__
+
+/*
+ * FIXME: Inclusion of actypes.h
+ * Linux kernel need this before defining inline OSL interfaces as
+ * actypes.h need to be included to find ACPICA type definitions.
+ * Since from ACPICA's perspective, the actypes.h should be included after
+ * acenv.h (aclinux.h), this leads to a inclusion mis-ordering issue.
+ */
 #include <acpi/actypes.h>
+
 /*
  * Overrides for in-kernel ACPICA
  */
-static inline acpi_thread_id acpi_os_get_thread_id(void)
-{
-	return (acpi_thread_id)(unsigned long)current;
-}
+acpi_status __init acpi_os_initialize(void);
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize
+
+acpi_status acpi_os_terminate(void);
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate
+
+/*
+ * Memory allocation/deallocation
+ */
 
 /*
  * The irqs_disabled() check is for resume from RAM.
@@ -126,25 +141,45 @@
  */
 static inline void *acpi_os_allocate(acpi_size size)
 {
-	return kmalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+	return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
 }
 
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate
+
+/* Use native linux version of acpi_os_allocate_zeroed */
+
 static inline void *acpi_os_allocate_zeroed(acpi_size size)
 {
-	return kzalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+	return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
 }
 
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate_zeroed
+#define USE_NATIVE_ALLOCATE_ZEROED
+
+static inline void acpi_os_free(void *memory)
+{
+	kfree(memory);
+}
+
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_free
+
 static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
 {
 	return kmem_cache_zalloc(cache,
-		irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+				 irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
 }
 
-#define ACPI_ALLOCATE(a)        acpi_os_allocate(a)
-#define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a)
-#define ACPI_FREE(a)            kfree(a)
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object
+
+static inline acpi_thread_id acpi_os_get_thread_id(void)
+{
+	return (acpi_thread_id) (unsigned long)current;
+}
+
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
 
 #ifndef CONFIG_PREEMPT
+
 /*
  * Used within ACPICA to show where it is safe to preempt execution
  * when CONFIG_PREEMPT=n
@@ -154,6 +189,7 @@
 		if (!irqs_disabled()) \
 			cond_resched(); \
 	} while (0)
+
 #endif
 
 /*
@@ -163,17 +199,50 @@
  * all locks to the name of the argument of acpi_os_create_lock(), which
  * prevents lockdep from reporting false positives for ACPICA locks.
  */
-#define acpi_os_create_lock(__handle)				\
-({								\
-	spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock));	\
-								\
-	if (lock) {						\
-		*(__handle) = lock;				\
-		spin_lock_init(*(__handle));			\
-	}							\
-	lock ? AE_OK : AE_NO_MEMORY;				\
-})
+#define acpi_os_create_lock(__handle) \
+	({ \
+		spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
+		if (lock) { \
+			*(__handle) = lock; \
+			spin_lock_init(*(__handle)); \
+		} \
+		lock ? AE_OK : AE_NO_MEMORY; \
+	})
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
 
-#endif /* __KERNEL__ */
+void __iomem *acpi_os_map_memory(acpi_physical_address where, acpi_size length);
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_map_memory
 
-#endif /* __ACLINUX_H__ */
+void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_unmap_memory
+
+/*
+ * OSL interfaces used by debugger/disassembler
+ */
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable
+
+/*
+ * OSL interfaces used by utilities
+ */
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_directory
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory
+
+/*
+ * OSL interfaces added by Linux
+ */
+void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
+
+void acpi_os_gpe_count(u32 gpe_number);
+
+void acpi_os_fixed_event_count(u32 fixed_event_number);
+
+#endif				/* __KERNEL__ */
+
+#endif				/* __ACLINUX_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 66096d0..6eb1d3c 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -199,6 +199,7 @@
 struct acpi_processor {
 	acpi_handle handle;
 	u32 acpi_id;
+	u32 apic_id;
 	u32 id;
 	u32 pblk;
 	int performance_platform_limit;
@@ -224,7 +225,6 @@
 	} piix4;
 };
 
-extern void acpi_processor_load_module(struct acpi_processor *pr);
 extern int acpi_processor_preregister_performance(struct
 						  acpi_processor_performance
 						  __percpu *performance);
@@ -314,6 +314,8 @@
 
 /* in processor_core.c */
 void acpi_processor_set_pdc(acpi_handle handle);
+int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int apic_id, u32 acpi_id);
 int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
 
 /* in processor_throttling.c */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 71c7780..998d4d5 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -7,6 +7,9 @@
  * @addr: The address to base the search on
  * @offset: The bitnumber to start searching at
  * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
  */
 extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
 		size, unsigned long offset);
@@ -18,6 +21,9 @@
  * @addr: The address to base the search on
  * @offset: The bitnumber to start searching at
  * @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
  */
 extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
 		long size, unsigned long offset);
@@ -28,9 +34,10 @@
 /**
  * find_first_bit - find the first set bit in a memory region
  * @addr: The address to start the search at
- * @size: The maximum size to search
+ * @size: The maximum number of bits to search
  *
  * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
  */
 extern unsigned long find_first_bit(const unsigned long *addr,
 				    unsigned long size);
@@ -38,9 +45,10 @@
 /**
  * find_first_zero_bit - find the first cleared bit in a memory region
  * @addr: The address to start the search at
- * @size: The maximum size to search
+ * @size: The maximum number of bits to search
  *
  * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
  */
 extern unsigned long find_first_zero_bit(const unsigned long *addr,
 					 unsigned long size);
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index aea9e45..14909b0 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -53,7 +53,7 @@
 
 #elif defined(CONFIG_SPARSEMEM)
 /*
- * Note: section's mem_map is encorded to reflect its start_pfn.
+ * Note: section's mem_map is encoded to reflect its start_pfn.
  * section[i].section_mem_map == mem_map's address - start_pfn;
  */
 #define __page_to_pfn(pg)					\
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index b685d3b..3d1a3af 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -32,6 +32,6 @@
 
 #endif
 
-extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from);
+extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
 
 #endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 83e2c31..bc2121f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -473,6 +473,7 @@
 #define KERNEL_CTORS()	. = ALIGN(8);			   \
 			VMLINUX_SYMBOL(__ctors_start) = .; \
 			*(.ctors)			   \
+			*(.init_array)			   \
 			VMLINUX_SYMBOL(__ctors_end) = .;
 #else
 #define KERNEL_CTORS()
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b46fb45..1d4a920 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -150,6 +150,7 @@
 #define DRIVER_BUS_PCI 0x1
 #define DRIVER_BUS_PLATFORM 0x2
 #define DRIVER_BUS_USB 0x3
+#define DRIVER_BUS_HOST1X 0x4
 
 /***********************************************************************/
 /** \name Begin the DRM... */
@@ -412,7 +413,12 @@
 
 /** File private data */
 struct drm_file {
-	int authenticated;
+	unsigned always_authenticated :1;
+	unsigned authenticated :1;
+	unsigned is_master :1; /* this file private is a master for a minor */
+	/* true when the client has asked us to expose stereo 3D mode flags */
+	unsigned stereo_allowed :1;
+
 	struct pid *pid;
 	kuid_t uid;
 	drm_magic_t magic;
@@ -429,10 +435,8 @@
 	struct file *filp;
 	void *driver_priv;
 
-	int is_master; /* this file private is a master for a minor */
 	struct drm_master *master; /* master this node is currently associated with
 				      N.B. not always minor->master */
-
 	/**
 	 * fbs - List of framebuffers associated with this file.
 	 *
@@ -667,8 +671,6 @@
 	uint32_t pending_read_domains;
 	uint32_t pending_write_domain;
 
-	void *driver_private;
-
 	/**
 	 * dma_buf - dma buf associated with this GEM object
 	 *
@@ -834,12 +836,17 @@
 	/**
 	 * Called by vblank timestamping code.
 	 *
-	 * Return the current display scanout position from a crtc.
+	 * Return the current display scanout position from a crtc, and an
+	 * optional accurate ktime_get timestamp of when position was measured.
 	 *
 	 * \param dev  DRM device.
 	 * \param crtc Id of the crtc to query.
 	 * \param *vpos Target location for current vertical scanout position.
 	 * \param *hpos Target location for current horizontal scanout position.
+	 * \param *stime Target location for timestamp taken immediately before
+	 *               scanout position query. Can be NULL to skip timestamp.
+	 * \param *etime Target location for timestamp taken immediately after
+	 *               scanout position query. Can be NULL to skip timestamp.
 	 *
 	 * Returns vpos as a positive number while in active scanout area.
 	 * Returns vpos as a negative number inside vblank, counting the number
@@ -856,7 +863,8 @@
 	 *
 	 */
 	int (*get_scanout_position) (struct drm_device *dev, int crtc,
-				     int *vpos, int *hpos);
+				     int *vpos, int *hpos, ktime_t *stime,
+				     ktime_t *etime);
 
 	/**
 	 * Called by \c drm_get_last_vbltimestamp. Should return a precise
@@ -922,7 +930,6 @@
 	 *
 	 * Returns 0 on success.
 	 */
-	int (*gem_init_object) (struct drm_gem_object *obj);
 	void (*gem_free_object) (struct drm_gem_object *obj);
 	int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
 	void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -997,27 +1004,6 @@
 #define DRM_MINOR_CONTROL 2
 #define DRM_MINOR_RENDER 3
 
-
-/**
- * debugfs node list. This structure represents a debugfs file to
- * be created by the drm core
- */
-struct drm_debugfs_list {
-	const char *name; /** file name */
-	int (*show)(struct seq_file*, void*); /** show callback */
-	u32 driver_features; /**< Required driver features for this entry */
-};
-
-/**
- * debugfs node structure. This structure represents a debugfs file.
- */
-struct drm_debugfs_node {
-	struct list_head list;
-	struct drm_minor *minor;
-	struct drm_debugfs_list *debugfs_ent;
-	struct dentry *dent;
-};
-
 /**
  * Info file list entry. This structure represents a debugfs or proc file to
  * be created by the drm core
@@ -1046,7 +1032,7 @@
 	int index;			/**< Minor device number */
 	int type;                       /**< Control or render */
 	dev_t device;			/**< Device number for mknod */
-	struct device kdev;		/**< Linux device */
+	struct device *kdev;		/**< Linux device */
 	struct drm_device *dev;
 
 	struct dentry *debugfs_root;
@@ -1081,6 +1067,19 @@
 	struct drm_event_vblank event;
 };
 
+struct drm_vblank_crtc {
+	wait_queue_head_t queue;	/**< VBLANK wait queue */
+	struct timeval time[DRM_VBLANKTIME_RBSIZE];	/**< timestamp of current count */
+	atomic_t count;			/**< number of VBLANK interrupts */
+	atomic_t refcount;		/* number of users of vblank interruptsper crtc */
+	u32 last;			/* protected by dev->vbl_lock, used */
+					/* for wraparound handling */
+	u32 last_wait;			/* Last vblank seqno waited per CRTC */
+	unsigned int inmodeset;		/* Display driver is setting mode */
+	bool enabled;			/* so we don't call enable more than
+					   once per disable */
+};
+
 /**
  * DRM device structure. This structure represent a complete card that
  * may contain multiple heads.
@@ -1105,25 +1104,16 @@
 	atomic_t buf_alloc;		/**< Buffer allocation in progress */
 	/*@} */
 
-	/** \name Performance counters */
-	/*@{ */
-	unsigned long counters;
-	enum drm_stat_type types[15];
-	atomic_t counts[15];
-	/*@} */
-
 	struct list_head filelist;
 
 	/** \name Memory management */
 	/*@{ */
 	struct list_head maplist;	/**< Linked list of regions */
-	int map_count;			/**< Number of mappable regions */
 	struct drm_open_hash map_hash;	/**< User token hash table for maps */
 
 	/** \name Context handle management */
 	/*@{ */
 	struct list_head ctxlist;	/**< Linked list of context handles */
-	int ctx_count;			/**< Number of context handles */
 	struct mutex ctxlist_mutex;	/**< For ctxlist */
 
 	struct idr ctx_idr;
@@ -1139,12 +1129,11 @@
 
 	/** \name Context support */
 	/*@{ */
-	int irq_enabled;		/**< True if irq handler is enabled */
+	bool irq_enabled;		/**< True if irq handler is enabled */
 	__volatile__ long context_flag;	/**< Context swapping flag */
 	int last_context;		/**< Last current context */
 	/*@} */
 
-	struct work_struct work;
 	/** \name VBLANK IRQ support */
 	/*@{ */
 
@@ -1154,20 +1143,13 @@
 	 * Once the modeset ioctl *has* been called though, we can safely
 	 * disable them when unused.
 	 */
-	int vblank_disable_allowed;
+	bool vblank_disable_allowed;
 
-	wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
-	atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
-	struct timeval *_vblank_time;   /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+	/* array of size num_crtcs */
+	struct drm_vblank_crtc *vblank;
+
 	spinlock_t vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
 	spinlock_t vbl_lock;
-	atomic_t *vblank_refcount;      /* number of users of vblank interruptsper crtc */
-	u32 *last_vblank;               /* protected by dev->vbl_lock, used */
-					/* for wraparound handling */
-	int *vblank_enabled;            /* so we don't call enable more than
-					   once per disable */
-	int *vblank_inmodeset;          /* Display driver is setting mode */
-	u32 *last_vblank_wait;		/* Last vblank seqno waited per CRTC */
 	struct timer_list vblank_disable_timer;
 
 	u32 max_vblank_count;           /**< size of vblank counter register */
@@ -1184,8 +1166,6 @@
 
 	struct device *dev;             /**< Device structure */
 	struct pci_dev *pdev;		/**< PCI device structure */
-	int pci_vendor;			/**< PCI vendor id */
-	int pci_device;			/**< PCI device id */
 #ifdef __alpha__
 	struct pci_controller *hose;
 #endif
@@ -1303,6 +1283,8 @@
 			struct drm_file *file_priv);
 extern int drm_getcap(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv);
+extern int drm_setclientcap(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
 extern int drm_setversion(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv);
 extern int drm_noop(struct drm_device *dev, void *data,
@@ -1454,7 +1436,6 @@
 extern void drm_master_put(struct drm_master **master);
 
 extern void drm_put_dev(struct drm_device *dev);
-extern int drm_put_minor(struct drm_minor **minor);
 extern void drm_unplug_dev(struct drm_device *dev);
 extern unsigned int drm_debug;
 extern unsigned int drm_rnodes;
@@ -1474,10 +1455,11 @@
 #if defined(CONFIG_DEBUG_FS)
 extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 			    struct dentry *root);
-extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
-				    struct dentry *root, struct drm_minor *minor);
-extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
-                                    struct drm_minor *minor);
+extern int drm_debugfs_create_files(const struct drm_info_list *files,
+				    int count, struct dentry *root,
+				    struct drm_minor *minor);
+extern int drm_debugfs_remove_files(const struct drm_info_list *files,
+				    int count, struct drm_minor *minor);
 extern int drm_debugfs_cleanup(struct drm_minor *minor);
 #endif
 
@@ -1556,8 +1538,6 @@
 void drm_gem_destroy(struct drm_device *dev);
 void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct kref *kref);
-struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
-					    size_t size);
 int drm_gem_object_init(struct drm_device *dev,
 			struct drm_gem_object *obj, size_t size);
 void drm_gem_private_object_init(struct drm_device *dev,
@@ -1645,10 +1625,11 @@
 
 #include <drm/drm_mem_util.h>
 
-extern int drm_fill_in_dev(struct drm_device *dev,
-			   const struct pci_device_id *ent,
-			   struct drm_driver *driver);
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+				 struct device *parent);
+void drm_dev_free(struct drm_device *dev);
+int drm_dev_register(struct drm_device *dev, unsigned long flags);
+void drm_dev_unregister(struct drm_device *dev);
 /*@}*/
 
 /* PCI section */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 24f4995..f32c5cd 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -108,6 +108,7 @@
     MODE_ONE_HEIGHT,    /* only one height is supported */
     MODE_ONE_SIZE,      /* only one resolution is supported */
     MODE_NO_REDUCED,    /* monitor doesn't accept reduced blanking */
+    MODE_NO_STEREO,	/* stereo modes not supported */
     MODE_UNVERIFIED = -3, /* mode needs to reverified */
     MODE_BAD = -2,	/* unspecified reason */
     MODE_ERROR	= -1	/* error condition */
@@ -124,7 +125,10 @@
 	.vscan = (vs), .flags = (f), \
 	.base.type = DRM_MODE_OBJECT_MODE
 
-#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+#define CRTC_INTERLACE_HALVE_V	(1 << 0) /* halve V values for interlacing */
+#define CRTC_STEREO_DOUBLE	(1 << 1) /* adjust timings for stereo modes */
+
+#define DRM_MODE_FLAG_3D_MAX	DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
 
 struct drm_display_mode {
 	/* Header */
@@ -155,8 +159,7 @@
 	int height_mm;
 
 	/* Actual mode we give to hw */
-	int clock_index;
-	int synth_clock;
+	int crtc_clock;		/* in KHz */
 	int crtc_hdisplay;
 	int crtc_hblank_start;
 	int crtc_hblank_end;
@@ -180,6 +183,11 @@
 	int hsync;		/* in kHz */
 };
 
+static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
+{
+	return mode->flags & DRM_MODE_FLAG_3D_MASK;
+}
+
 enum drm_connector_status {
 	connector_status_connected = 1,
 	connector_status_disconnected = 2,
@@ -587,7 +595,7 @@
  */
 struct drm_connector {
 	struct drm_device *dev;
-	struct device kdev;
+	struct device *kdev;
 	struct device_attribute *attr;
 	struct list_head head;
 
@@ -597,6 +605,7 @@
 	int connector_type_id;
 	bool interlace_allowed;
 	bool doublescan_allowed;
+	bool stereo_allowed;
 	struct list_head modes; /* list of modes on this connector */
 
 	enum drm_connector_status status;
@@ -964,6 +973,7 @@
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 struct i2c_adapter *adapter);
+extern struct edid *drm_edid_duplicate(const struct edid *edid);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
 extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
@@ -975,7 +985,7 @@
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
 extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
-extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
 extern int drm_mode_width(const struct drm_display_mode *mode);
 extern int drm_mode_height(const struct drm_display_mode *mode);
 
@@ -1108,6 +1118,8 @@
 				int GTF_2C, int GTF_K, int GTF_2J);
 extern int drm_add_modes_noedid(struct drm_connector *connector,
 				int hdisplay, int vdisplay);
+extern void drm_set_preferred_mode(struct drm_connector *connector,
+				   int hpref, int vpref);
 
 extern int drm_edid_header_is_valid(const u8 *raw_edid);
 extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
@@ -1135,4 +1147,21 @@
 extern int drm_format_vert_chroma_subsampling(uint32_t format);
 extern const char *drm_get_format_name(uint32_t format);
 
+/* Helpers */
+static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
+	uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC);
+	return mo ? obj_to_crtc(mo) : NULL;
+}
+
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+	uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+	return mo ? obj_to_encoder(mo) : NULL;
+}
+
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index f43d556..ef6ad3a 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -163,7 +163,7 @@
 extern int drm_helper_resume_force_mode(struct drm_device *dev);
 extern void drm_kms_helper_poll_init(struct drm_device *dev);
 extern void drm_kms_helper_poll_fini(struct drm_device *dev);
-extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
 extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index ae8dbfb..a92c375 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -77,10 +77,10 @@
 #define DP_DOWNSTREAMPORT_PRESENT           0x005
 # define DP_DWN_STRM_PORT_PRESENT           (1 << 0)
 # define DP_DWN_STRM_PORT_TYPE_MASK         0x06
-/* 00b = DisplayPort */
-/* 01b = Analog */
-/* 10b = TMDS or HDMI */
-/* 11b = Other */
+# define DP_DWN_STRM_PORT_TYPE_DP           (0 << 1)
+# define DP_DWN_STRM_PORT_TYPE_ANALOG       (1 << 1)
+# define DP_DWN_STRM_PORT_TYPE_TMDS         (2 << 1)
+# define DP_DWN_STRM_PORT_TYPE_OTHER        (3 << 1)
 # define DP_FORMAT_CONVERSION               (1 << 3)
 # define DP_DETAILED_CAP_INFO_AVAILABLE	    (1 << 4) /* DPI */
 
@@ -333,20 +333,20 @@
 
 
 #define DP_LINK_STATUS_SIZE	   6
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			  int lane_count);
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			      int lane_count);
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
 				     int lane);
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane);
 
 #define DP_RECEIVER_CAP_SIZE		0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE	2
 
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 
 u8 drm_dp_link_rate_to_bw_code(int link_rate);
 int drm_dp_bw_code_to_link_rate(u8 link_bw);
@@ -379,15 +379,22 @@
 #define EDP_VSC_PSR_CRC_VALUES_VALID	(1<<2)
 
 static inline int
-drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
 	return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
 }
 
 static inline u8
-drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
 	return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
 }
 
+static inline bool
+drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+	return dpcd[DP_DPCD_REV] >= 0x11 &&
+		(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 3d79e51..87578c1 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -261,6 +261,18 @@
 	{0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 3abfa6e..97d5497 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -49,6 +49,10 @@
 #define    SNB_GMCH_GGMS_MASK	0x3
 #define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
 #define    SNB_GMCH_GMS_MASK    0x1f
+#define    BDW_GMCH_GGMS_SHIFT	6
+#define    BDW_GMCH_GGMS_MASK	0x3
+#define    BDW_GMCH_GMS_SHIFT   8
+#define    BDW_GMCH_GMS_MASK    0xff
 
 #define I830_GMCH_CTRL			0x52
 
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 8a10f5c..940ece4 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -208,4 +208,29 @@
 #define INTEL_VLV_D_IDS(info) \
 	INTEL_VGA_DEVICE(0x0155, info)
 
+#define _INTEL_BDW_M(gt, id, info) \
+	INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
+#define _INTEL_BDW_D(gt, id, info) \
+	INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
+
+#define _INTEL_BDW_M_IDS(gt, info) \
+	_INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
+	_INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
+	_INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
+	_INTEL_BDW_M(gt, 0x160E, info) /* ULX */
+
+#define _INTEL_BDW_D_IDS(gt, info) \
+	_INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
+	_INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
+
+#define INTEL_BDW_M_IDS(info) \
+	_INTEL_BDW_M_IDS(1, info), \
+	_INTEL_BDW_M_IDS(2, info), \
+	_INTEL_BDW_M_IDS(3, info)
+
+#define INTEL_BDW_D_IDS(info) \
+	_INTEL_BDW_D_IDS(1, info), \
+	_INTEL_BDW_D_IDS(2, info), \
+	_INTEL_BDW_D_IDS(3, info)
+
 #endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 706b962..d1f61bf 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -62,7 +62,7 @@
 extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
 
 
-#ifdef CONFIG_SWIOTLB
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 /**
  * Initialize pool allocator.
  */
@@ -94,6 +94,15 @@
 {
 	return 0;
 }
+static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
+				   struct device *dev)
+{
+	return -ENOMEM;
+}
+static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
+				      struct device *dev)
+{
+}
 #endif
 
 #endif
diff --git a/include/dt-bindings/mfd/as3722.h b/include/dt-bindings/mfd/as3722.h
new file mode 100644
index 0000000..0e69256
--- /dev/null
+++ b/include/dt-bindings/mfd/as3722.h
@@ -0,0 +1,52 @@
+/*
+ * This header provides macros for ams AS3722 device bindings.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ */
+
+#ifndef __DT_BINDINGS_AS3722_H__
+#define __DT_BINDINGS_AS3722_H__
+
+/* External control pins */
+#define AS3722_EXT_CONTROL_PIN_ENABLE1 1
+#define AS3722_EXT_CONTROL_PIN_ENABLE2 2
+#define AS3722_EXT_CONTROL_PIN_ENABLE2 3
+
+/* Interrupt numbers for AS3722 */
+#define AS3722_IRQ_LID			0
+#define AS3722_IRQ_ACOK			1
+#define AS3722_IRQ_ENABLE1		2
+#define AS3722_IRQ_OCCUR_ALARM_SD0	3
+#define AS3722_IRQ_ONKEY_LONG_PRESS	4
+#define AS3722_IRQ_ONKEY		5
+#define AS3722_IRQ_OVTMP		6
+#define AS3722_IRQ_LOWBAT		7
+#define AS3722_IRQ_SD0_LV		8
+#define AS3722_IRQ_SD1_LV		9
+#define AS3722_IRQ_SD2_LV		10
+#define AS3722_IRQ_PWM1_OV_PROT		11
+#define AS3722_IRQ_PWM2_OV_PROT		12
+#define AS3722_IRQ_ENABLE2		13
+#define AS3722_IRQ_SD6_LV		14
+#define AS3722_IRQ_RTC_REP		15
+#define AS3722_IRQ_RTC_ALARM		16
+#define AS3722_IRQ_GPIO1		17
+#define AS3722_IRQ_GPIO2		18
+#define AS3722_IRQ_GPIO3		19
+#define AS3722_IRQ_GPIO4		20
+#define AS3722_IRQ_GPIO5		21
+#define AS3722_IRQ_WATCHDOG		22
+#define AS3722_IRQ_ENABLE3		23
+#define AS3722_IRQ_TEMP_SD0_SHUTDOWN	24
+#define AS3722_IRQ_TEMP_SD1_SHUTDOWN	25
+#define AS3722_IRQ_TEMP_SD2_SHUTDOWN	26
+#define AS3722_IRQ_TEMP_SD0_ALARM	27
+#define AS3722_IRQ_TEMP_SD1_ALARM	28
+#define AS3722_IRQ_TEMP_SD6_ALARM	29
+#define AS3722_IRQ_OCCUR_ALARM_SD6	30
+#define AS3722_IRQ_ADC			31
+
+#endif /* __DT_BINDINGS_AS3722_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c30bac8..b0972c4 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -116,7 +116,7 @@
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int *pcpu);
+int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
 int acpi_unmap_lsapic(int cpu);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
@@ -294,59 +294,52 @@
 #endif /* CONFIG_PM_SLEEP */
 
 struct acpi_osc_context {
-	char *uuid_str; /* uuid string */
+	char *uuid_str;			/* UUID string */
 	int rev;
-	struct acpi_buffer cap; /* arg2/arg3 */
-	struct acpi_buffer ret; /* free by caller if success */
+	struct acpi_buffer cap;		/* list of DWORD capabilities */
+	struct acpi_buffer ret;		/* free by caller if success */
 };
 
-#define OSC_QUERY_TYPE			0
-#define OSC_SUPPORT_TYPE 		1
-#define OSC_CONTROL_TYPE		2
-
-/* _OSC DW0 Definition */
-#define OSC_QUERY_ENABLE		1
-#define OSC_REQUEST_ERROR		2
-#define OSC_INVALID_UUID_ERROR		4
-#define OSC_INVALID_REVISION_ERROR	8
-#define OSC_CAPABILITIES_MASK_ERROR	16
-
 acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
 
-/* platform-wide _OSC bits */
-#define OSC_SB_PAD_SUPPORT		1
-#define OSC_SB_PPC_OST_SUPPORT		2
-#define OSC_SB_PR3_SUPPORT		4
-#define OSC_SB_HOTPLUG_OST_SUPPORT	8
-#define OSC_SB_APEI_SUPPORT		16
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+#define OSC_QUERY_DWORD				0	/* DWORD 1 */
+#define OSC_SUPPORT_DWORD			1	/* DWORD 2 */
+#define OSC_CONTROL_DWORD			2	/* DWORD 3 */
+
+/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
+#define OSC_QUERY_ENABLE			0x00000001  /* input */
+#define OSC_REQUEST_ERROR			0x00000002  /* return */
+#define OSC_INVALID_UUID_ERROR			0x00000004  /* return */
+#define OSC_INVALID_REVISION_ERROR		0x00000008  /* return */
+#define OSC_CAPABILITIES_MASK_ERROR		0x00000010  /* return */
+
+/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_SB_PAD_SUPPORT			0x00000001
+#define OSC_SB_PPC_OST_SUPPORT			0x00000002
+#define OSC_SB_PR3_SUPPORT			0x00000004
+#define OSC_SB_HOTPLUG_OST_SUPPORT		0x00000008
+#define OSC_SB_APEI_SUPPORT			0x00000010
+#define OSC_SB_CPC_SUPPORT			0x00000020
 
 extern bool osc_sb_apei_support_acked;
 
-/* PCI defined _OSC bits */
-/* _OSC DW1 Definition (OS Support Fields) */
-#define OSC_EXT_PCI_CONFIG_SUPPORT		1
-#define OSC_ACTIVE_STATE_PWR_SUPPORT 		2
-#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT	4
-#define OSC_PCI_SEGMENT_GROUPS_SUPPORT		8
-#define OSC_MSI_SUPPORT				16
-#define OSC_PCI_SUPPORT_MASKS			0x1f
+/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_PCI_EXT_CONFIG_SUPPORT		0x00000001
+#define OSC_PCI_ASPM_SUPPORT			0x00000002
+#define OSC_PCI_CLOCK_PM_SUPPORT		0x00000004
+#define OSC_PCI_SEGMENT_GROUPS_SUPPORT		0x00000008
+#define OSC_PCI_MSI_SUPPORT			0x00000010
+#define OSC_PCI_SUPPORT_MASKS			0x0000001f
 
-/* _OSC DW1 Definition (OS Control Fields) */
-#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL	1
-#define OSC_SHPC_NATIVE_HP_CONTROL 		2
-#define OSC_PCI_EXPRESS_PME_CONTROL		4
-#define OSC_PCI_EXPRESS_AER_CONTROL		8
-#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL	16
-
-#define OSC_PCI_CONTROL_MASKS 	(OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 	\
-				OSC_SHPC_NATIVE_HP_CONTROL | 		\
-				OSC_PCI_EXPRESS_PME_CONTROL |		\
-				OSC_PCI_EXPRESS_AER_CONTROL |		\
-				OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-#define OSC_PCI_NATIVE_HOTPLUG	(OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |	\
-				OSC_SHPC_NATIVE_HP_CONTROL)
+/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
+#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL	0x00000001
+#define OSC_PCI_SHPC_NATIVE_HP_CONTROL		0x00000002
+#define OSC_PCI_EXPRESS_PME_CONTROL		0x00000004
+#define OSC_PCI_EXPRESS_AER_CONTROL		0x00000008
+#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL	0x00000010
+#define OSC_PCI_CONTROL_MASKS			0x0000001f
 
 extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
 					     u32 *mask, u32 req);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 43ec7e2..63b5eff 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -21,7 +21,7 @@
 #include <linux/resource.h>
 #include <linux/regulator/consumer.h>
 
-#define AMBA_NR_IRQS	2
+#define AMBA_NR_IRQS	9
 #define AMBA_CID	0xb105f00d
 
 struct clk;
@@ -30,7 +30,6 @@
 	struct device		dev;
 	struct resource		res;
 	struct clk		*pclk;
-	u64			dma_mask;
 	unsigned int		periphid;
 	unsigned int		irq[AMBA_NR_IRQS];
 };
@@ -131,7 +130,6 @@
 struct amba_device name##_device = {				\
 	.dev = __AMBA_DEV(busid, data, ~0ULL),			\
 	.res = DEFINE_RES_MEM(base, SZ_4K),			\
-	.dma_mask = ~0ULL,					\
 	.irq = irqs,						\
 	.periphid = id,						\
 }
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 62d9303..0ddb5c0 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -40,7 +40,7 @@
 #define UART010_LCRL		0x10	/* Line control register, low byte. */
 #define UART010_CR		0x14	/* Control register. */
 #define UART01x_FR		0x18	/* Flag register (Read only). */
-#define UART010_IIR		0x1C	/* Interrupt indentification register (Read). */
+#define UART010_IIR		0x1C	/* Interrupt identification register (Read). */
 #define UART010_ICR		0x1C	/* Interrupt clear register (Write). */
 #define ST_UART011_LCRH_RX	0x1C    /* Rx line control register. */
 #define UART01x_ILPR		0x20	/* IrDA low power counter register. */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index cf573c2..8013a45 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -13,9 +13,6 @@
 struct file *anon_inode_getfile(const char *name,
 				const struct file_operations *fops,
 				void *priv, int flags);
-struct file *anon_inode_getfile_private(const char *name,
-				const struct file_operations *fops,
-				void *priv, int flags);
 int anon_inode_getfd(const char *name, const struct file_operations *fops,
 		     void *priv, int flags);
 
diff --git a/include/linux/ata.h b/include/linux/ata.h
index bf4c69c..f2f4d8d 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -219,6 +219,7 @@
 	ATA_CMD_IDLE		= 0xE3, /* place in idle power mode */
 	ATA_CMD_EDD		= 0x90,	/* execute device diagnostic */
 	ATA_CMD_DOWNLOAD_MICRO  = 0x92,
+	ATA_CMD_DOWNLOAD_MICRO_DMA = 0x93,
 	ATA_CMD_NOP		= 0x00,
 	ATA_CMD_FLUSH		= 0xE7,
 	ATA_CMD_FLUSH_EXT	= 0xEA,
@@ -268,12 +269,15 @@
 	ATA_CMD_WRITE_LOG_EXT	= 0x3F,
 	ATA_CMD_READ_LOG_DMA_EXT = 0x47,
 	ATA_CMD_WRITE_LOG_DMA_EXT = 0x57,
+	ATA_CMD_TRUSTED_NONDATA	= 0x5B,
 	ATA_CMD_TRUSTED_RCV	= 0x5C,
 	ATA_CMD_TRUSTED_RCV_DMA = 0x5D,
 	ATA_CMD_TRUSTED_SND	= 0x5E,
 	ATA_CMD_TRUSTED_SND_DMA = 0x5F,
 	ATA_CMD_PMP_READ	= 0xE4,
+	ATA_CMD_PMP_READ_DMA	= 0xE9,
 	ATA_CMD_PMP_WRITE	= 0xE8,
+	ATA_CMD_PMP_WRITE_DMA	= 0xEB,
 	ATA_CMD_CONF_OVERLAY	= 0xB1,
 	ATA_CMD_SEC_SET_PASS	= 0xF1,
 	ATA_CMD_SEC_UNLOCK	= 0xF2,
@@ -292,6 +296,9 @@
 	ATA_CMD_CFA_TRANS_SECT	= 0x87,
 	ATA_CMD_CFA_ERASE	= 0xC0,
 	ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
+	ATA_CMD_REQ_SENSE_DATA  = 0x0B,
+	ATA_CMD_SANITIZE_DEVICE = 0xB4,
+
 	/* marked obsolete in the ATA/ATAPI-7 spec */
 	ATA_CMD_RESTORE		= 0x10,
 
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5f66d51..2481900 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -109,7 +109,7 @@
 #endif
 };
 
-int bdi_init(struct backing_dev_info *bdi);
+int __must_check bdi_init(struct backing_dev_info *bdi);
 void bdi_destroy(struct backing_dev_info *bdi);
 
 __printf(3, 4)
@@ -117,7 +117,7 @@
 		const char *fmt, ...);
 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
 void bdi_unregister(struct backing_dev_info *bdi);
-int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
+int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
 			enum wb_reason reason);
 void bdi_start_background_writeback(struct backing_dev_info *bdi);
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 53b7794..5f9cd96 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -100,6 +100,9 @@
 	/* The framebuffer notifier block */
 	struct notifier_block fb_notif;
 
+	/* list entry of all registered backlight devices */
+	struct list_head entry;
+
 	struct device dev;
 };
 
@@ -123,6 +126,7 @@
 					struct backlight_device *bd);
 extern void backlight_force_update(struct backlight_device *bd,
 				   enum backlight_update_reason reason);
+extern bool backlight_device_registered(enum backlight_type type);
 
 #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
 
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index e8112ae..fd8bf32 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -56,11 +56,12 @@
 
 /* Function parameter for binfmt->coredump */
 struct coredump_params {
-	siginfo_t *siginfo;
+	const siginfo_t *siginfo;
 	struct pt_regs *regs;
 	struct file *file;
 	unsigned long limit;
 	unsigned long mm_flags;
+	loff_t written;
 };
 
 /*
@@ -99,9 +100,6 @@
 extern void would_dump(struct linux_binprm *, struct file *);
 
 extern int suid_dumpable;
-#define SUID_DUMP_DISABLE	0	/* No setuid dumping */
-#define SUID_DUMP_USER		1	/* Dump as user of process */
-#define SUID_DUMP_ROOT		2	/* Dump as root */
 
 /* Stack area protections */
 #define EXSTACK_DEFAULT   0	/* Whatever the arch defaults to */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ec48bac..060ff69 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,6 +218,7 @@
 };
 extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
 extern void bio_pair_release(struct bio_pair *dbio);
+extern void bio_trim(struct bio *bio, int offset, int size);
 
 extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
@@ -419,6 +420,8 @@
 	bl->head = bl->tail = NULL;
 }
 
+#define BIO_EMPTY_LIST	{ NULL, NULL }
+
 #define bio_list_for_each(bio, bl) \
 	for (bio = (bl)->head; bio; bio = bio->bi_next)
 
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index bd0c459..abc9ca7 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,8 +4,11 @@
 
 #ifdef	__KERNEL__
 #define BIT(nr)			(1UL << (nr))
+#define BIT_ULL(nr)		(1ULL << (nr))
 #define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
 #define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
 #define BITS_PER_BYTE		8
 #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 #endif
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
new file mode 100644
index 0000000..ab0e9b2
--- /dev/null
+++ b/include/linux/blk-mq.h
@@ -0,0 +1,183 @@
+#ifndef BLK_MQ_H
+#define BLK_MQ_H
+
+#include <linux/blkdev.h>
+
+struct blk_mq_tags;
+
+struct blk_mq_cpu_notifier {
+	struct list_head list;
+	void *data;
+	void (*notify)(void *data, unsigned long action, unsigned int cpu);
+};
+
+struct blk_mq_hw_ctx {
+	struct {
+		spinlock_t		lock;
+		struct list_head	dispatch;
+	} ____cacheline_aligned_in_smp;
+
+	unsigned long		state;		/* BLK_MQ_S_* flags */
+	struct delayed_work	delayed_work;
+
+	unsigned long		flags;		/* BLK_MQ_F_* flags */
+
+	struct request_queue	*queue;
+	unsigned int		queue_num;
+
+	void			*driver_data;
+
+	unsigned int		nr_ctx;
+	struct blk_mq_ctx	**ctxs;
+	unsigned int 		nr_ctx_map;
+	unsigned long		*ctx_map;
+
+	struct request		**rqs;
+	struct list_head	page_list;
+	struct blk_mq_tags	*tags;
+
+	unsigned long		queued;
+	unsigned long		run;
+#define BLK_MQ_MAX_DISPATCH_ORDER	10
+	unsigned long		dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
+
+	unsigned int		queue_depth;
+	unsigned int		numa_node;
+	unsigned int		cmd_size;	/* per-request extra data */
+
+	struct blk_mq_cpu_notifier	cpu_notifier;
+	struct kobject		kobj;
+};
+
+struct blk_mq_reg {
+	struct blk_mq_ops	*ops;
+	unsigned int		nr_hw_queues;
+	unsigned int		queue_depth;
+	unsigned int		reserved_tags;
+	unsigned int		cmd_size;	/* per-request extra data */
+	int			numa_node;
+	unsigned int		timeout;
+	unsigned int		flags;		/* BLK_MQ_F_* */
+};
+
+typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
+typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
+typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
+typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
+typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
+
+struct blk_mq_ops {
+	/*
+	 * Queue request
+	 */
+	queue_rq_fn		*queue_rq;
+
+	/*
+	 * Map to specific hardware queue
+	 */
+	map_queue_fn		*map_queue;
+
+	/*
+	 * Called on request timeout
+	 */
+	rq_timed_out_fn		*timeout;
+
+	/*
+	 * Override for hctx allocations (should probably go)
+	 */
+	alloc_hctx_fn		*alloc_hctx;
+	free_hctx_fn		*free_hctx;
+
+	/*
+	 * Called when the block layer side of a hardware queue has been
+	 * set up, allowing the driver to allocate/init matching structures.
+	 * Ditto for exit/teardown.
+	 */
+	init_hctx_fn		*init_hctx;
+	exit_hctx_fn		*exit_hctx;
+};
+
+enum {
+	BLK_MQ_RQ_QUEUE_OK	= 0,	/* queued fine */
+	BLK_MQ_RQ_QUEUE_BUSY	= 1,	/* requeue IO for later */
+	BLK_MQ_RQ_QUEUE_ERROR	= 2,	/* end IO with error */
+
+	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
+	BLK_MQ_F_SHOULD_SORT	= 1 << 1,
+	BLK_MQ_F_SHOULD_IPI	= 1 << 2,
+
+	BLK_MQ_S_STOPPED	= 1 << 0,
+
+	BLK_MQ_MAX_DEPTH	= 2048,
+};
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
+void blk_mq_free_queue(struct request_queue *);
+int blk_mq_register_disk(struct gendisk *);
+void blk_mq_unregister_disk(struct gendisk *);
+void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
+
+void blk_mq_insert_request(struct request_queue *, struct request *, bool);
+void blk_mq_run_queues(struct request_queue *q, bool async);
+void blk_mq_free_request(struct request *rq);
+bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
+struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
+struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
+
+struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
+struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
+void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
+
+void blk_mq_end_io(struct request *rq, int error);
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
+void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
+void blk_mq_stop_hw_queues(struct request_queue *q);
+void blk_mq_start_stopped_hw_queues(struct request_queue *q);
+
+/*
+ * Driver command data is immediately after the request. So subtract request
+ * size to get back to the original request.
+ */
+static inline struct request *blk_mq_rq_from_pdu(void *pdu)
+{
+	return pdu - sizeof(struct request);
+}
+static inline void *blk_mq_rq_to_pdu(struct request *rq)
+{
+	return (void *) rq + sizeof(*rq);
+}
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
+					       unsigned int tag)
+{
+	return hctx->rqs[tag];
+}
+
+#define queue_for_each_hw_ctx(q, hctx, i)				\
+	for ((i) = 0, hctx = (q)->queue_hw_ctx[0];			\
+	     (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
+
+#define queue_for_each_ctx(q, ctx, i)					\
+	for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0);		\
+	     (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
+
+#define hctx_for_each_ctx(hctx, ctx, i)					\
+	for ((i) = 0, ctx = (hctx)->ctxs[0];				\
+	     (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
+
+#define blk_ctx_sum(q, sum)						\
+({									\
+	struct blk_mq_ctx *__x;						\
+	unsigned int __ret = 0, __i;					\
+									\
+	queue_for_each_ctx((q), __x, __i)				\
+		__ret += sum;						\
+	__ret;								\
+})
+
+#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index fa1abeb..238ef0e 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -178,19 +178,20 @@
 	__REQ_MIXED_MERGE,	/* merge of different types, fail separately */
 	__REQ_KERNEL, 		/* direct IO to kernel pages */
 	__REQ_PM,		/* runtime pm request */
+	__REQ_END,		/* last of chain of requests */
 	__REQ_NR_BITS,		/* stops here */
 };
 
-#define REQ_WRITE		(1 << __REQ_WRITE)
-#define REQ_FAILFAST_DEV	(1 << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT	(1 << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER	(1 << __REQ_FAILFAST_DRIVER)
-#define REQ_SYNC		(1 << __REQ_SYNC)
-#define REQ_META		(1 << __REQ_META)
-#define REQ_PRIO		(1 << __REQ_PRIO)
-#define REQ_DISCARD		(1 << __REQ_DISCARD)
-#define REQ_WRITE_SAME		(1 << __REQ_WRITE_SAME)
-#define REQ_NOIDLE		(1 << __REQ_NOIDLE)
+#define REQ_WRITE		(1ULL << __REQ_WRITE)
+#define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC		(1ULL << __REQ_SYNC)
+#define REQ_META		(1ULL << __REQ_META)
+#define REQ_PRIO		(1ULL << __REQ_PRIO)
+#define REQ_DISCARD		(1ULL << __REQ_DISCARD)
+#define REQ_WRITE_SAME		(1ULL << __REQ_WRITE_SAME)
+#define REQ_NOIDLE		(1ULL << __REQ_NOIDLE)
 
 #define REQ_FAILFAST_MASK \
 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -206,28 +207,29 @@
 #define REQ_NOMERGE_FLAGS \
 	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
 
-#define REQ_RAHEAD		(1 << __REQ_RAHEAD)
-#define REQ_THROTTLED		(1 << __REQ_THROTTLED)
+#define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
+#define REQ_THROTTLED		(1ULL << __REQ_THROTTLED)
 
-#define REQ_SORTED		(1 << __REQ_SORTED)
-#define REQ_SOFTBARRIER		(1 << __REQ_SOFTBARRIER)
-#define REQ_FUA			(1 << __REQ_FUA)
-#define REQ_NOMERGE		(1 << __REQ_NOMERGE)
-#define REQ_STARTED		(1 << __REQ_STARTED)
-#define REQ_DONTPREP		(1 << __REQ_DONTPREP)
-#define REQ_QUEUED		(1 << __REQ_QUEUED)
-#define REQ_ELVPRIV		(1 << __REQ_ELVPRIV)
-#define REQ_FAILED		(1 << __REQ_FAILED)
-#define REQ_QUIET		(1 << __REQ_QUIET)
-#define REQ_PREEMPT		(1 << __REQ_PREEMPT)
-#define REQ_ALLOCED		(1 << __REQ_ALLOCED)
-#define REQ_COPY_USER		(1 << __REQ_COPY_USER)
-#define REQ_FLUSH		(1 << __REQ_FLUSH)
-#define REQ_FLUSH_SEQ		(1 << __REQ_FLUSH_SEQ)
-#define REQ_IO_STAT		(1 << __REQ_IO_STAT)
-#define REQ_MIXED_MERGE		(1 << __REQ_MIXED_MERGE)
-#define REQ_SECURE		(1 << __REQ_SECURE)
-#define REQ_KERNEL		(1 << __REQ_KERNEL)
-#define REQ_PM			(1 << __REQ_PM)
+#define REQ_SORTED		(1ULL << __REQ_SORTED)
+#define REQ_SOFTBARRIER		(1ULL << __REQ_SOFTBARRIER)
+#define REQ_FUA			(1ULL << __REQ_FUA)
+#define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
+#define REQ_STARTED		(1ULL << __REQ_STARTED)
+#define REQ_DONTPREP		(1ULL << __REQ_DONTPREP)
+#define REQ_QUEUED		(1ULL << __REQ_QUEUED)
+#define REQ_ELVPRIV		(1ULL << __REQ_ELVPRIV)
+#define REQ_FAILED		(1ULL << __REQ_FAILED)
+#define REQ_QUIET		(1ULL << __REQ_QUIET)
+#define REQ_PREEMPT		(1ULL << __REQ_PREEMPT)
+#define REQ_ALLOCED		(1ULL << __REQ_ALLOCED)
+#define REQ_COPY_USER		(1ULL << __REQ_COPY_USER)
+#define REQ_FLUSH		(1ULL << __REQ_FLUSH)
+#define REQ_FLUSH_SEQ		(1ULL << __REQ_FLUSH_SEQ)
+#define REQ_IO_STAT		(1ULL << __REQ_IO_STAT)
+#define REQ_MIXED_MERGE		(1ULL << __REQ_MIXED_MERGE)
+#define REQ_SECURE		(1ULL << __REQ_SECURE)
+#define REQ_KERNEL		(1ULL << __REQ_KERNEL)
+#define REQ_PM			(1ULL << __REQ_PM)
+#define REQ_END			(1ULL << __REQ_END)
 
 #endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0e6f765..f26ec20f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -8,6 +8,7 @@
 #include <linux/major.h>
 #include <linux/genhd.h>
 #include <linux/list.h>
+#include <linux/llist.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/pagemap.h>
@@ -94,12 +95,19 @@
  * as well!
  */
 struct request {
-	struct list_head queuelist;
-	struct call_single_data csd;
+	union {
+		struct list_head queuelist;
+		struct llist_node ll_list;
+	};
+	union {
+		struct call_single_data csd;
+		struct work_struct mq_flush_data;
+	};
 
 	struct request_queue *q;
+	struct blk_mq_ctx *mq_ctx;
 
-	unsigned int cmd_flags;
+	u64 cmd_flags;
 	enum rq_cmd_type_bits cmd_type;
 	unsigned long atomic_flags;
 
@@ -160,8 +168,6 @@
 
 	unsigned short ioprio;
 
-	int ref_count;
-
 	void *special;		/* opaque pointer available for LLD use */
 	char *buffer;		/* kaddr of the current segment if available */
 
@@ -215,6 +221,8 @@
 
 #include <linux/elevator.h>
 
+struct blk_queue_ctx;
+
 typedef void (request_fn_proc) (struct request_queue *q);
 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
@@ -313,6 +321,18 @@
 	dma_drain_needed_fn	*dma_drain_needed;
 	lld_busy_fn		*lld_busy_fn;
 
+	struct blk_mq_ops	*mq_ops;
+
+	unsigned int		*mq_map;
+
+	/* sw queues */
+	struct blk_mq_ctx	*queue_ctx;
+	unsigned int		nr_queues;
+
+	/* hw dispatch queues */
+	struct blk_mq_hw_ctx	**queue_hw_ctx;
+	unsigned int		nr_hw_queues;
+
 	/*
 	 * Dispatch queue sorting
 	 */
@@ -361,6 +381,11 @@
 	 */
 	struct kobject kobj;
 
+	/*
+	 * mq queue kobject
+	 */
+	struct kobject mq_kobj;
+
 #ifdef CONFIG_PM_RUNTIME
 	struct device		*dev;
 	int			rpm_status;
@@ -425,7 +450,13 @@
 	unsigned long		flush_pending_since;
 	struct list_head	flush_queue[2];
 	struct list_head	flush_data_in_flight;
-	struct request		flush_rq;
+	union {
+		struct request	flush_rq;
+		struct {
+			spinlock_t mq_flush_lock;
+			struct work_struct mq_flush_work;
+		};
+	};
 
 	struct mutex		sysfs_lock;
 
@@ -437,14 +468,14 @@
 	struct bsg_class_device bsg_dev;
 #endif
 
-#ifdef CONFIG_BLK_CGROUP
-	struct list_head	all_q_node;
-#endif
 #ifdef CONFIG_BLK_DEV_THROTTLING
 	/* Throttle data */
 	struct throtl_data *td;
 #endif
 	struct rcu_head		rcu_head;
+	wait_queue_head_t	mq_freeze_wq;
+	struct percpu_counter	mq_usage_counter;
+	struct list_head	all_q_node;
 };
 
 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
@@ -467,6 +498,7 @@
 #define QUEUE_FLAG_SECDISCARD  17	/* supports SECDISCARD */
 #define QUEUE_FLAG_SAME_FORCE  18	/* force complete on same CPU */
 #define QUEUE_FLAG_DEAD        19	/* queue tear-down finished */
+#define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -539,6 +571,7 @@
 #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
 #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
 #define blk_queue_bypass(q)	test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
+#define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_noxmerges(q)	\
 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
@@ -570,7 +603,7 @@
 
 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
 
-#define rq_data_dir(rq)		((rq)->cmd_flags & 1)
+#define rq_data_dir(rq)		(((rq)->cmd_flags & 1) != 0)
 
 static inline unsigned int blk_queue_cluster(struct request_queue *q)
 {
@@ -1013,6 +1046,7 @@
 struct blk_plug {
 	unsigned long magic; /* detect uninitialized use-cases */
 	struct list_head list; /* requests */
+	struct list_head mq_list; /* blk-mq requests */
 	struct list_head cb_list; /* md requires an unplug callback */
 };
 #define BLK_MAX_REQUEST_COUNT 16
@@ -1050,7 +1084,10 @@
 {
 	struct blk_plug *plug = tsk->plug;
 
-	return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
+	return plug &&
+		(!list_empty(&plug->list) ||
+		 !list_empty(&plug->mq_list) ||
+		 !list_empty(&plug->cb_list));
 }
 
 /*
@@ -1325,6 +1362,7 @@
 
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
+int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
 
 #ifdef CONFIG_BLK_CGROUP
 /*
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7c2e030..afc1343 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -5,6 +5,7 @@
 #include <linux/relay.h>
 #include <linux/compat.h>
 #include <uapi/linux/blktrace_api.h>
+#include <linux/list.h>
 
 #if defined(CONFIG_BLK_DEV_IO_TRACE)
 
@@ -23,6 +24,7 @@
 	struct dentry *dir;
 	struct dentry *dropped_file;
 	struct dentry *msg_file;
+	struct list_head running_list;
 	atomic_t dropped;
 };
 
@@ -87,7 +89,7 @@
 #ifdef CONFIG_COMPAT
 
 struct compat_blk_user_trace_setup {
-	char name[32];
+	char name[BLKTRACE_BDEV_SIZE];
 	u16 act_mask;
 	u32 buf_size;
 	u32 buf_nr;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3561d30..39c1d94 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -612,11 +612,6 @@
 	int subsys_id;
 	int disabled;
 	int early_init;
-	/*
-	 * True if this subsys uses ID. ID is not available before cgroup_init()
-	 * (not available in early_init time.)
-	 */
-	bool use_id;
 
 	/*
 	 * If %false, this subsystem is properly hierarchical -
@@ -642,9 +637,6 @@
 	 */
 	struct cgroupfs_root *root;
 	struct list_head sibling;
-	/* used when use_id == true */
-	struct idr idr;
-	spinlock_t id_lock;
 
 	/* list of cftype_sets */
 	struct list_head cftsets;
@@ -875,35 +867,6 @@
 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
 
-/*
- * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
- * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
- * CSS ID is assigned at cgroup allocation (create) automatically
- * and removed when subsys calls free_css_id() function. This is because
- * the lifetime of cgroup_subsys_state is subsys's matter.
- *
- * Looking up and scanning function should be called under rcu_read_lock().
- * Taking cgroup_mutex is not necessary for following calls.
- * But the css returned by this routine can be "not populated yet" or "being
- * destroyed". The caller should check css and cgroup's status.
- */
-
-/*
- * Typically Called at ->destroy(), or somewhere the subsys frees
- * cgroup_subsys_state.
- */
-void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
-
-/* Find a cgroup_subsys_state which has given ID */
-
-struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
-
-/* Returns true if root is ancestor of cg */
-bool css_is_ancestor(struct cgroup_subsys_state *cg,
-		     const struct cgroup_subsys_state *root);
-
-/* Get id and depth of css */
-unsigned short css_id(struct cgroup_subsys_state *css);
 struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
 					 struct cgroup_subsys *ss);
 
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
index 98e892e..a0f9280 100644
--- a/include/linux/cmdline-parser.h
+++ b/include/linux/cmdline-parser.h
@@ -8,6 +8,8 @@
 #define CMDLINEPARSEH
 
 #include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
 
 /* partition flags */
 #define PF_RDONLY                   0x01 /* Device is read only */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 345da00..eb8a49d 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -41,14 +41,14 @@
 	COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
 
 #define COMPAT_SYSCALL_DEFINEx(x, name, ...)				\
-	asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+	asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
+		__attribute__((alias(__stringify(compat_SyS##name))));  \
 	static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
 	asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
 	asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
 	{								\
 		return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));	\
 	}								\
-	SYSCALL_ALIAS(compat_sys##name, compat_SyS##name);		\
 	static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
 #ifndef compat_user_stack_pointer
@@ -362,7 +362,7 @@
 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
 		       unsigned long bitmap_size);
 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from);
+int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
 int get_compat_sigevent(struct sigevent *event,
 		const struct compat_sigevent __user *u_event);
 long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 22c33e3..5d5aaae 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -19,8 +19,8 @@
  *
  * See also:  complete(), wait_for_completion() (and friends _timeout,
  * _interruptible, _interruptible_timeout, and _killable), init_completion(),
- * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
- * INIT_COMPLETION().
+ * reinit_completion(), and macros DECLARE_COMPLETION(),
+ * DECLARE_COMPLETION_ONSTACK().
  */
 struct completion {
 	unsigned int done;
@@ -65,7 +65,7 @@
 
 /**
  * init_completion - Initialize a dynamically allocated completion
- * @x:  completion structure that is to be initialized
+ * @x:  pointer to completion structure that is to be initialized
  *
  * This inline function will initialize a dynamically created completion
  * structure.
@@ -76,6 +76,18 @@
 	init_waitqueue_head(&x->wait);
 }
 
+/**
+ * reinit_completion - reinitialize a completion structure
+ * @x:  pointer to completion structure that is to be reinitialized
+ *
+ * This inline function should be used to reinitialize a completion structure so it can
+ * be reused. This is especially important after complete_all() is used.
+ */
+static inline void reinit_completion(struct completion *x)
+{
+	x->done = 0;
+}
+
 extern void wait_for_completion(struct completion *);
 extern void wait_for_completion_io(struct completion *);
 extern int wait_for_completion_interruptible(struct completion *x);
@@ -94,14 +106,4 @@
 extern void complete(struct completion *);
 extern void complete_all(struct completion *);
 
-/**
- * INIT_COMPLETION - reinitialize a completion structure
- * @x:  completion structure to be reinitialized
- *
- * This macro should be used to reinitialize a completion structure so it can
- * be reused. This is especially important after complete_all() is used.
- */
-#define INIT_COMPLETION(x)	((x).done = 0)
-
-
 #endif
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index a98f1ca..d016a12 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -10,12 +10,14 @@
  * These are the only things you should do on a core-file: use only these
  * functions to write out all the necessary info.
  */
-extern int dump_write(struct file *file, const void *addr, int nr);
-extern int dump_seek(struct file *file, loff_t off);
+struct coredump_params;
+extern int dump_skip(struct coredump_params *cprm, size_t nr);
+extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
+extern int dump_align(struct coredump_params *cprm, int align);
 #ifdef CONFIG_COREDUMP
-extern void do_coredump(siginfo_t *siginfo);
+extern void do_coredump(const siginfo_t *siginfo);
 #else
-static inline void do_coredump(siginfo_t *siginfo) {}
+static inline void do_coredump(const siginfo_t *siginfo) {}
 #endif
 
 #endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index fbd25c3..03e235ad 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -188,19 +188,6 @@
 void clear_tasks_mm_cpumask(int cpu);
 int cpu_down(unsigned int cpu);
 
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-extern void cpu_hotplug_driver_lock(void);
-extern void cpu_hotplug_driver_unlock(void);
-#else
-static inline void cpu_hotplug_driver_lock(void)
-{
-}
-
-static inline void cpu_hotplug_driver_unlock(void)
-{
-}
-#endif
-
 #else		/* CONFIG_HOTPLUG_CPU */
 
 static inline void cpu_hotplug_begin(void) {}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index fcabc42..dc196bb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -85,6 +85,20 @@
 	struct list_head        policy_list;
 	struct kobject		kobj;
 	struct completion	kobj_unregister;
+
+	/*
+	 * The rules for this semaphore:
+	 * - Any routine that wants to read from the policy structure will
+	 *   do a down_read on this semaphore.
+	 * - Any routine that will write to the policy structure and/or may take away
+	 *   the policy altogether (eg. CPU hotplug), will hold this lock in write
+	 *   mode before doing so.
+	 *
+	 * Additional rules:
+	 * - Lock should not be held across
+	 *     __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+	 */
+	struct rw_semaphore	rwsem;
 };
 
 /* Only for ACPI */
@@ -93,8 +107,16 @@
 #define CPUFREQ_SHARED_TYPE_ALL	 (2) /* All dependent CPUs should set freq */
 #define CPUFREQ_SHARED_TYPE_ANY	 (3) /* Freq can be set from any dependent CPU*/
 
+#ifdef CONFIG_CPU_FREQ
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
 void cpufreq_cpu_put(struct cpufreq_policy *policy);
+#else
+static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+	return NULL;
+}
+static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
+#endif
 
 static inline bool policy_is_shared(struct cpufreq_policy *policy)
 {
@@ -180,13 +202,6 @@
 struct cpufreq_driver {
 	char			name[CPUFREQ_NAME_LEN];
 	u8			flags;
-	/*
-	 * This should be set by platforms having multiple clock-domains, i.e.
-	 * supporting multiple policies. With this sysfs directories of governor
-	 * would be created in cpu/cpu<num>/cpufreq/ directory and so they can
-	 * use the same governor with different tunables for different clusters.
-	 */
-	bool			have_governor_per_policy;
 
 	/* needed by all drivers */
 	int	(*init)		(struct cpufreq_policy *policy);
@@ -194,9 +209,11 @@
 
 	/* define one out of two */
 	int	(*setpolicy)	(struct cpufreq_policy *policy);
-	int	(*target)	(struct cpufreq_policy *policy,
+	int	(*target)	(struct cpufreq_policy *policy,	/* Deprecated */
 				 unsigned int target_freq,
 				 unsigned int relation);
+	int	(*target_index)	(struct cpufreq_policy *policy,
+				 unsigned int index);
 
 	/* should be defined, if possible */
 	unsigned int	(*get)	(unsigned int cpu);
@@ -211,13 +228,29 @@
 };
 
 /* flags */
-#define CPUFREQ_STICKY		0x01	/* the driver isn't removed even if
-					 * all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS	0x02	/* loops_per_jiffy or other kernel
-					 * "constants" aren't affected by
-					 * frequency transitions */
-#define CPUFREQ_PM_NO_WARN	0x04	/* don't warn on suspend/resume speed
-					 * mismatches */
+#define CPUFREQ_STICKY		(1 << 0)	/* driver isn't removed even if
+						   all ->init() calls failed */
+#define CPUFREQ_CONST_LOOPS	(1 << 1)	/* loops_per_jiffy or other
+						   kernel "constants" aren't
+						   affected by frequency
+						   transitions */
+#define CPUFREQ_PM_NO_WARN	(1 << 2)	/* don't warn on suspend/resume
+						   speed mismatches */
+
+/*
+ * This should be set by platforms having multiple clock-domains, i.e.
+ * supporting multiple policies. With this sysfs directories of governor would
+ * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
+ * governor with different tunables for different clusters.
+ */
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
+
+/*
+ * Driver will do POSTCHANGE notifications from outside of their ->target()
+ * routine and so must set cpufreq_driver->flags with this flag, so that core
+ * can handle them specially.
+ */
+#define CPUFREQ_ASYNC_NOTIFICATION  (1 << 4)
 
 int cpufreq_register_driver(struct cpufreq_driver *driver_data);
 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -240,6 +273,13 @@
 	return;
 }
 
+static inline void
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+{
+	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+			policy->cpuinfo.max_freq);
+}
+
 /*********************************************************************
  *                     CPUFREQ NOTIFIER INTERFACE                    *
  *********************************************************************/
@@ -392,6 +432,7 @@
 
 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
 				   struct cpufreq_frequency_table *table);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
 
 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
 				   struct cpufreq_frequency_table *table,
@@ -407,8 +448,20 @@
 
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr *cpufreq_generic_attr[];
 void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
 				      unsigned int cpu);
 void cpufreq_frequency_table_put_attr(unsigned int cpu);
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+				      struct cpufreq_frequency_table *table);
+
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+		struct cpufreq_frequency_table *table,
+		unsigned int transition_latency);
+static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
+{
+	cpufreq_frequency_table_put_attr(policy->cpu);
+	return 0;
+}
 
 #endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 781addc..50fcbb0 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -114,7 +114,7 @@
 	int			safe_state_index;
 
 	/* the driver handles the cpus in cpumask */
-	struct cpumask       *cpumask;
+	struct cpumask		*cpumask;
 };
 
 #ifdef CONFIG_CPU_IDLE
@@ -195,16 +195,10 @@
 };
 
 #ifdef CONFIG_CPU_IDLE
-
 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
-
 #else
-
 static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
 {return 0;}
-static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { }
-
 #endif
 
 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index cc1b01c..3fe661f 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -110,10 +110,14 @@
 
 static inline void set_mems_allowed(nodemask_t nodemask)
 {
+	unsigned long flags;
+
 	task_lock(current);
+	local_irq_save(flags);
 	write_seqcount_begin(&current->mems_allowed_seq);
 	current->mems_allowed = nodemask;
 	write_seqcount_end(&current->mems_allowed_seq);
+	local_irq_restore(flags);
 	task_unlock(current);
 }
 
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 68267b6..7d275c4 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -11,8 +11,48 @@
 extern u32  crc32_le(u32 crc, unsigned char const *p, size_t len);
 extern u32  crc32_be(u32 crc, unsigned char const *p, size_t len);
 
+/**
+ * crc32_le_combine - Combine two crc32 check values into one. For two
+ * 		      sequences of bytes, seq1 and seq2 with lengths len1
+ * 		      and len2, crc32_le() check values were calculated
+ * 		      for each, crc1 and crc2.
+ *
+ * @crc1: crc32 of the first block
+ * @crc2: crc32 of the second block
+ * @len2: length of the second block
+ *
+ * Return: The crc32_le() check value of seq1 and seq2 concatenated,
+ * 	   requiring only crc1, crc2, and len2. Note: If seq_full denotes
+ * 	   the concatenated memory area of seq1 with seq2, and crc_full
+ * 	   the crc32_le() value of seq_full, then crc_full ==
+ * 	   crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
+ * 	   with the same initializer as crc1, and crc2 seed was 0. See
+ * 	   also crc32_combine_test().
+ */
+extern u32  crc32_le_combine(u32 crc1, u32 crc2, size_t len2);
+
 extern u32  __crc32c_le(u32 crc, unsigned char const *p, size_t len);
 
+/**
+ * __crc32c_le_combine - Combine two crc32c check values into one. For two
+ * 			 sequences of bytes, seq1 and seq2 with lengths len1
+ * 			 and len2, __crc32c_le() check values were calculated
+ * 			 for each, crc1 and crc2.
+ *
+ * @crc1: crc32c of the first block
+ * @crc2: crc32c of the second block
+ * @len2: length of the second block
+ *
+ * Return: The __crc32c_le() check value of seq1 and seq2 concatenated,
+ * 	   requiring only crc1, crc2, and len2. Note: If seq_full denotes
+ * 	   the concatenated memory area of seq1 with seq2, and crc_full
+ * 	   the __crc32c_le() value of seq_full, then crc_full ==
+ * 	   __crc32c_le_combine(crc1, crc2, len2) when crc_full was
+ * 	   seeded with the same initializer as crc1, and crc2 seed
+ * 	   was 0. See also crc32c_combine_test().
+ */
+extern u32  __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2);
+
 #define crc32(seed, data, length)  crc32_le(seed, (unsigned char const *)(data), length)
 
 /*
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 59066e0..57e87e7 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -169,13 +169,13 @@
  */
 
 /* d_flags entries */
-#define DCACHE_OP_HASH		0x0001
-#define DCACHE_OP_COMPARE	0x0002
-#define DCACHE_OP_REVALIDATE	0x0004
-#define DCACHE_OP_DELETE	0x0008
-#define DCACHE_OP_PRUNE         0x0010
+#define DCACHE_OP_HASH			0x00000001
+#define DCACHE_OP_COMPARE		0x00000002
+#define DCACHE_OP_REVALIDATE		0x00000004
+#define DCACHE_OP_DELETE		0x00000008
+#define DCACHE_OP_PRUNE			0x00000010
 
-#define	DCACHE_DISCONNECTED	0x0020
+#define	DCACHE_DISCONNECTED		0x00000020
      /* This dentry is possibly not currently connected to the dcache tree, in
       * which case its parent will either be itself, or will have this flag as
       * well.  nfsd will not use a dentry with this bit set, but will first
@@ -186,30 +186,38 @@
       * dentry into place and return that dentry rather than the passed one,
       * typically using d_splice_alias. */
 
-#define DCACHE_REFERENCED	0x0040  /* Recently used, don't discard. */
-#define DCACHE_RCUACCESS	0x0080	/* Entry has ever been RCU-visible */
+#define DCACHE_REFERENCED		0x00000040 /* Recently used, don't discard. */
+#define DCACHE_RCUACCESS		0x00000080 /* Entry has ever been RCU-visible */
 
-#define DCACHE_CANT_MOUNT	0x0100
-#define DCACHE_GENOCIDE		0x0200
-#define DCACHE_SHRINK_LIST	0x0400
+#define DCACHE_CANT_MOUNT		0x00000100
+#define DCACHE_GENOCIDE			0x00000200
+#define DCACHE_SHRINK_LIST		0x00000400
 
-#define DCACHE_OP_WEAK_REVALIDATE	0x0800
+#define DCACHE_OP_WEAK_REVALIDATE	0x00000800
 
-#define DCACHE_NFSFS_RENAMED	0x1000
+#define DCACHE_NFSFS_RENAMED		0x00001000
      /* this dentry has been "silly renamed" and has to be deleted on the last
       * dput() */
-#define DCACHE_COOKIE		0x2000	/* For use by dcookie subsystem */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x4000
+#define DCACHE_COOKIE			0x00002000 /* For use by dcookie subsystem */
+#define DCACHE_FSNOTIFY_PARENT_WATCHED	0x00004000
      /* Parent inode is watched by some fsnotify listener */
 
-#define DCACHE_MOUNTED		0x10000	/* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT	0x20000	/* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT	0x40000	/* manage transit from this dirent */
+#define DCACHE_DENTRY_KILLED		0x00008000
+
+#define DCACHE_MOUNTED			0x00010000 /* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT		0x00020000 /* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT		0x00040000 /* manage transit from this dirent */
 #define DCACHE_MANAGED_DENTRY \
 	(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
 
-#define DCACHE_LRU_LIST		0x80000
-#define DCACHE_DENTRY_KILLED	0x100000
+#define DCACHE_LRU_LIST			0x00080000
+
+#define DCACHE_ENTRY_TYPE		0x00700000
+#define DCACHE_MISS_TYPE		0x00000000 /* Negative dentry */
+#define DCACHE_DIRECTORY_TYPE		0x00100000 /* Normal directory */
+#define DCACHE_AUTODIR_TYPE		0x00200000 /* Lookupless directory (presumed automount) */
+#define DCACHE_SYMLINK_TYPE		0x00300000 /* Symlink */
+#define DCACHE_FILE_TYPE		0x00400000 /* Other file type */
 
 extern seqlock_t rename_lock;
 
@@ -224,6 +232,7 @@
 extern void d_instantiate(struct dentry *, struct inode *);
 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
 extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
+extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
 extern void __d_drop(struct dentry *dentry);
 extern void d_drop(struct dentry *dentry);
 extern void d_delete(struct dentry *);
@@ -393,6 +402,61 @@
 	return dentry->d_flags & DCACHE_MOUNTED;
 }
 
+/*
+ * Directory cache entry type accessor functions.
+ */
+static inline void __d_set_type(struct dentry *dentry, unsigned type)
+{
+	dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type;
+}
+
+static inline void __d_clear_type(struct dentry *dentry)
+{
+	__d_set_type(dentry, DCACHE_MISS_TYPE);
+}
+
+static inline void d_set_type(struct dentry *dentry, unsigned type)
+{
+	spin_lock(&dentry->d_lock);
+	__d_set_type(dentry, type);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline unsigned __d_entry_type(const struct dentry *dentry)
+{
+	return dentry->d_flags & DCACHE_ENTRY_TYPE;
+}
+
+static inline bool d_is_directory(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
+}
+
+static inline bool d_is_autodir(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE;
+}
+
+static inline bool d_is_symlink(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
+}
+
+static inline bool d_is_file(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_FILE_TYPE;
+}
+
+static inline bool d_is_negative(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+}
+
+static inline bool d_is_positive(const struct dentry *dentry)
+{
+	return !d_is_negative(dentry);
+}
+
 extern int sysctl_vfs_cache_pressure;
 
 static inline unsigned long vfs_pressure_ratio(unsigned long val)
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 5f1ab92..d48dc00 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -15,7 +15,7 @@
 
 #include <linux/device.h>
 #include <linux/notifier.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 
 #define DEVFREQ_NAME_LEN 16
 
@@ -168,7 +168,7 @@
 	unsigned long max_freq;
 	bool stop_polling;
 
-	/* information for device freqeuncy transition */
+	/* information for device frequency transition */
 	unsigned int total_trans;
 	unsigned int *trans_table;
 	unsigned long *time_in_state;
@@ -187,7 +187,7 @@
 extern int devfreq_resume_device(struct devfreq *devfreq);
 
 /* Helper functions for devfreq user device driver with OPP. */
-extern struct opp *devfreq_recommended_opp(struct device *dev,
+extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
 					   unsigned long *freq, u32 flags);
 extern int devfreq_register_opp_notifier(struct device *dev,
 					 struct devfreq *devfreq);
@@ -238,7 +238,7 @@
 	return 0;
 }
 
-static inline struct opp *devfreq_recommended_opp(struct device *dev,
+static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
 					   unsigned long *freq, u32 flags)
 {
 	return ERR_PTR(-EINVAL);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3a8d0a2..fd4aee2 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -97,6 +97,30 @@
 }
 #endif
 
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+	int rc = dma_set_mask(dev, mask);
+	if (rc == 0)
+		dma_set_coherent_mask(dev, mask);
+	return rc;
+}
+
+/*
+ * Similar to the above, except it deals with the case where the device
+ * does not have dev->dma_mask appropriately setup.
+ */
+static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
+{
+	dev->dma_mask = &dev->coherent_dma_mask;
+	return dma_set_mask_and_coherent(dev, mask);
+}
+
 extern u64 dma_get_required_mask(struct device *dev);
 
 static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -129,6 +153,13 @@
 		return -EIO;
 }
 
+#ifndef dma_max_pfn
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+	return *dev->dma_mask >> PAGE_SHIFT;
+}
+#endif
+
 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
 					dma_addr_t *dma_handle, gfp_t flag)
 {
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 40a3c0e0..67a5fa7 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -39,13 +39,13 @@
 
 /* Optional callbacks to write extra ELF notes. */
 struct file;
+struct coredump_params;
 
 #ifndef ARCH_HAVE_EXTRA_ELF_NOTES
 static inline int elf_coredump_extra_notes_size(void) { return 0; }
-static inline int elf_coredump_extra_notes_write(struct file *file,
-			loff_t *foffset) { return 0; }
+static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
 #else
 extern int elf_coredump_extra_notes_size(void);
-extern int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset);
+extern int elf_coredump_extra_notes_write(struct coredump_params *cprm);
 #endif
 #endif /* _LINUX_ELF_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index cdd3d13..698d51a 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -6,6 +6,8 @@
 #include <asm/elf.h>
 #include <uapi/linux/elfcore.h>
 
+struct coredump_params;
+
 static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
 {
 #ifdef ELF_CORE_COPY_REGS
@@ -63,10 +65,9 @@
  */
 extern Elf_Half elf_core_extra_phdrs(void);
 extern int
-elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-			   unsigned long limit);
+elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
 extern int
-elf_core_write_extra_data(struct file *file, size_t *size, unsigned long limit);
+elf_core_write_extra_data(struct coredump_params *cprm);
 extern size_t elf_core_extra_data_size(void);
 
 #endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index d8b5124..fc4a9aa 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -28,27 +28,24 @@
 #include <asm/unaligned.h>
 
 #ifdef __KERNEL__
-extern __be16		eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
 extern const struct header_ops eth_header_ops;
 
-extern int eth_header(struct sk_buff *skb, struct net_device *dev,
-		      unsigned short type,
-		      const void *daddr, const void *saddr, unsigned len);
-extern int eth_rebuild_header(struct sk_buff *skb);
-extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
-extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
-extern void eth_header_cache_update(struct hh_cache *hh,
-				    const struct net_device *dev,
-				    const unsigned char *haddr);
-extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
-extern void eth_commit_mac_addr_change(struct net_device *dev, void *p);
-extern int eth_mac_addr(struct net_device *dev, void *p);
-extern int eth_change_mtu(struct net_device *dev, int new_mtu);
-extern int eth_validate_addr(struct net_device *dev);
+int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+	       const void *daddr, const void *saddr, unsigned len);
+int eth_rebuild_header(struct sk_buff *skb);
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
+		     __be16 type);
+void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+			     const unsigned char *haddr);
+int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
+void eth_commit_mac_addr_change(struct net_device *dev, void *p);
+int eth_mac_addr(struct net_device *dev, void *p);
+int eth_change_mtu(struct net_device *dev, int new_mtu);
+int eth_validate_addr(struct net_device *dev);
 
-
-
-extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
 					    unsigned int rxqs);
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
diff --git a/include/linux/export.h b/include/linux/export.h
index 412cd50..3f2793d 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -43,7 +43,7 @@
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
 #define __CRC_SYMBOL(sym, sec)					\
-	extern void *__crc_##sym __attribute__((weak));		\
+	extern __visible void *__crc_##sym __attribute__((weak));		\
 	static const unsigned long __kcrctab_##sym		\
 	__used							\
 	__attribute__((section("___kcrctab" sec "+" #sym), unused))	\
@@ -59,7 +59,7 @@
 	static const char __kstrtab_##sym[]			\
 	__attribute__((section("__ksymtab_strings"), aligned(1))) \
 	= VMLINUX_SYMBOL_STR(sym);				\
-	static const struct kernel_symbol __ksymtab_##sym	\
+	__visible const struct kernel_symbol __ksymtab_##sym	\
 	__used							\
 	__attribute__((section("___ksymtab" sec "+" #sym), unused))	\
 	= { (unsigned long)&sym, __kstrtab_##sym }
diff --git a/include/linux/fb.h b/include/linux/fb.h
index ffac70a..70c4836 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -792,4 +792,16 @@
 			const struct fb_videomode *default_mode,
 			unsigned int default_bpp);
 
+/* Convenience logging macros */
+#define fb_err(fb_info, fmt, ...)					\
+	pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_notice(info, fmt, ...)					\
+	pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn(fb_info, fmt, ...)					\
+	pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_info(fb_info, fmt, ...)					\
+	pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_dbg(fb_info, fmt, ...)					\
+	pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
 #endif /* _LINUX_FB_H */
diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h
index e460ef8..5009fa1 100644
--- a/include/linux/fcdevice.h
+++ b/include/linux/fcdevice.h
@@ -27,7 +27,7 @@
 #include <linux/if_fc.h>
 
 #ifdef __KERNEL__
-extern struct net_device *alloc_fcdev(int sizeof_priv);
+struct net_device *alloc_fcdev(int sizeof_priv);
 #endif
 
 #endif	/* _LINUX_FCDEVICE_H */
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index 155bafd..9a79f01 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -25,10 +25,9 @@
 #include <linux/if_fddi.h>
 
 #ifdef __KERNEL__
-extern __be16	fddi_type_trans(struct sk_buff *skb,
-				struct net_device *dev);
-extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
-extern struct net_device *alloc_fddidev(int sizeof_priv);
+__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int fddi_change_mtu(struct net_device *dev, int new_mtu);
+struct net_device *alloc_fddidev(int sizeof_priv);
 #endif
 
 #endif	/* _LINUX_FDDIDEVICE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 955dff5..bf5d574 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -623,10 +623,13 @@
  * 0: the object of the current VFS operation
  * 1: parent
  * 2: child/target
- * 3: quota file
+ * 3: xattr
+ * 4: second non-directory
+ * The last is for certain operations (such as rename) which lock two
+ * non-directories at once.
  *
  * The locking order between these classes is
- * parent -> child -> normal -> xattr -> quota
+ * parent -> child -> normal -> xattr -> second non-directory
  */
 enum inode_i_mutex_lock_class
 {
@@ -634,9 +637,12 @@
 	I_MUTEX_PARENT,
 	I_MUTEX_CHILD,
 	I_MUTEX_XATTR,
-	I_MUTEX_QUOTA
+	I_MUTEX_NONDIR2
 };
 
+void lock_two_nondirectories(struct inode *, struct inode*);
+void unlock_two_nondirectories(struct inode *, struct inode*);
+
 /*
  * NOTE: in a 32bit arch with a preemptable kernel and
  * an UP compile the i_size_read/write must be atomic
@@ -764,12 +770,7 @@
 #define FILE_MNT_WRITE_RELEASED	2
 
 struct file {
-	/*
-	 * fu_list becomes invalid after file_free is called and queued via
-	 * fu_rcuhead for RCU freeing
-	 */
 	union {
-		struct list_head	fu_list;
 		struct llist_node	fu_llist;
 		struct rcu_head 	fu_rcuhead;
 	} f_u;
@@ -783,9 +784,6 @@
 	 * Must not be taken from IRQ context.
 	 */
 	spinlock_t		f_lock;
-#ifdef CONFIG_SMP
-	int			f_sb_list_cpu;
-#endif
 	atomic_long_t		f_count;
 	unsigned int 		f_flags;
 	fmode_t			f_mode;
@@ -882,6 +880,7 @@
 
 #define FL_POSIX	1
 #define FL_FLOCK	2
+#define FL_DELEG	4	/* NFSv4 delegation */
 #define FL_ACCESS	8	/* not trying to lock, just looking */
 #define FL_EXISTS	16	/* when unlocking, test for existence */
 #define FL_LEASE	32	/* lease held on this file */
@@ -1023,7 +1022,7 @@
 extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
 extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
-extern int __break_lease(struct inode *inode, unsigned int flags);
+extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
 extern void lease_get_mtime(struct inode *, struct timespec *time);
 extern int generic_setlease(struct file *, long, struct file_lock **);
 extern int vfs_setlease(struct file *, long, struct file_lock **);
@@ -1132,7 +1131,7 @@
 	return -ENOLCK;
 }
 
-static inline int __break_lease(struct inode *inode, unsigned int mode)
+static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
 {
 	return 0;
 }
@@ -1264,11 +1263,6 @@
 
 	struct list_head	s_inodes;	/* all inodes */
 	struct hlist_bl_head	s_anon;		/* anonymous dentries for (nfs) exporting */
-#ifdef CONFIG_SMP
-	struct list_head __percpu *s_files;
-#else
-	struct list_head	s_files;
-#endif
 	struct list_head	s_mounts;	/* list of mounts; _not_ for fs use */
 	struct block_device	*s_bdev;
 	struct backing_dev_info *s_bdi;
@@ -1330,6 +1324,7 @@
 	 */
 	struct list_lru		s_dentry_lru ____cacheline_aligned_in_smp;
 	struct list_lru		s_inode_lru ____cacheline_aligned_in_smp;
+	struct rcu_head		rcu;
 };
 
 extern struct timespec current_fs_time(struct super_block *sb);
@@ -1458,10 +1453,10 @@
 extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
 extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
 extern int vfs_symlink(struct inode *, struct dentry *, const char *);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
 extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *);
-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
+extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **);
 
 /*
  * VFS dentry helper functions.
@@ -1875,6 +1870,17 @@
 	(((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
 #define fops_put(fops) \
 	do { if (fops) module_put((fops)->owner); } while(0)
+/*
+ * This one is to be used *ONLY* from ->open() instances.
+ * fops must be non-NULL, pinned down *and* module dependencies
+ * should be sufficient to pin the caller down as well.
+ */
+#define replace_fops(f, fops) \
+	do {	\
+		struct file *__file = (f); \
+		fops_put(__file->f_op); \
+		BUG_ON(!(__file->f_op = (fops))); \
+	} while(0)
 
 extern int register_filesystem(struct file_system_type *);
 extern int unregister_filesystem(struct file_system_type *);
@@ -1899,6 +1905,9 @@
 
 extern int current_umask(void);
 
+extern void ihold(struct inode * inode);
+extern void iput(struct inode *);
+
 /* /sys/fs */
 extern struct kobject *fs_kobj;
 
@@ -1955,9 +1964,39 @@
 static inline int break_lease(struct inode *inode, unsigned int mode)
 {
 	if (inode->i_flock)
-		return __break_lease(inode, mode);
+		return __break_lease(inode, mode, FL_LEASE);
 	return 0;
 }
+
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+	if (inode->i_flock)
+		return __break_lease(inode, mode, FL_DELEG);
+	return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+	int ret;
+
+	ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
+	if (ret == -EWOULDBLOCK && delegated_inode) {
+		*delegated_inode = inode;
+		ihold(inode);
+	}
+	return ret;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+	int ret;
+
+	ret = break_deleg(*delegated_inode, O_WRONLY);
+	iput(*delegated_inode);
+	*delegated_inode = NULL;
+	return ret;
+}
+
 #else /* !CONFIG_FILE_LOCKING */
 static inline int locks_mandatory_locked(struct inode *inode)
 {
@@ -1997,6 +2036,22 @@
 	return 0;
 }
 
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+	return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+	return 0;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+	BUG();
+	return 0;
+}
+
 #endif /* CONFIG_FILE_LOCKING */
 
 /* fs/open.c */
@@ -2223,7 +2278,7 @@
 #ifdef CONFIG_BLOCK
 extern sector_t bmap(struct inode *, sector_t);
 #endif
-extern int notify_change(struct dentry *, struct iattr *);
+extern int notify_change(struct dentry *, struct iattr *, struct inode **);
 extern int inode_permission(struct inode *, int);
 extern int generic_permission(struct inode *, int);
 
@@ -2337,8 +2392,6 @@
 extern int inode_init_always(struct super_block *, struct inode *);
 extern void inode_init_once(struct inode *);
 extern void address_space_init_once(struct address_space *mapping);
-extern void ihold(struct inode * inode);
-extern void iput(struct inode *);
 extern struct inode * igrab(struct inode *);
 extern ino_t iunique(struct super_block *, ino_t);
 extern int inode_needs_sync(struct inode *inode);
@@ -2507,8 +2560,10 @@
 		int nofs);
 extern int page_symlink(struct inode *inode, const char *symname, int len);
 extern const struct inode_operations page_symlink_inode_operations;
+extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
 extern int generic_readlink(struct dentry *, char __user *, int);
 extern void generic_fillattr(struct inode *, struct kstat *);
+int vfs_getattr_nosec(struct path *path, struct kstat *stat);
 extern int vfs_getattr(struct path *, struct kstat *);
 void __inode_add_bytes(struct inode *inode, loff_t bytes);
 void inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -2567,6 +2622,7 @@
 extern int simple_write_end(struct file *file, struct address_space *mapping,
 			loff_t pos, unsigned len, unsigned copied,
 			struct page *page, void *fsdata);
+extern struct inode *alloc_anon_inode(struct super_block *);
 
 extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
 extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9f15c00..31ea4b4 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -533,11 +533,11 @@
 static inline void ftrace_disable_daemon(void) { }
 static inline void ftrace_enable_daemon(void) { }
 static inline void ftrace_release_mod(struct module *mod) {}
-static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
 	return -EINVAL;
 }
-static inline int unregister_ftrace_command(char *cmd_name)
+static inline __init int unregister_ftrace_command(char *cmd_name)
 {
 	return -EINVAL;
 }
@@ -721,6 +721,7 @@
 extern char __irqentry_text_start[];
 extern char __irqentry_text_end[];
 
+#define FTRACE_NOTRACE_DEPTH 65536
 #define FTRACE_RETFUNC_DEPTH 50
 #define FTRACE_RETSTACK_ALLOC_SIZE 32
 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5eaa746..9abbe63 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -202,6 +202,7 @@
 	TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 	TRACE_EVENT_FL_WAS_ENABLED_BIT,
+	TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
 };
 
 /*
@@ -213,6 +214,7 @@
  *  WAS_ENABLED   - Set and stays set when an event was ever enabled
  *                    (used for module unloading, if a module event is enabled,
  *                     it is best to clear the buffers that used it).
+ *  USE_CALL_FILTER - For ftrace internal events, don't use file filter
  */
 enum {
 	TRACE_EVENT_FL_FILTERED		= (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -220,6 +222,7 @@
 	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 	TRACE_EVENT_FL_WAS_ENABLED	= (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
+	TRACE_EVENT_FL_USE_CALL_FILTER	= (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
 };
 
 struct ftrace_event_call {
@@ -238,6 +241,7 @@
 	 *   bit 2:		failed to apply filter
 	 *   bit 3:		ftrace internal event (do not enable)
 	 *   bit 4:		Event was enabled by module
+	 *   bit 5:		use call filter rather than file filter
 	 */
 	int			flags; /* static flags of different events */
 
@@ -253,6 +257,8 @@
 enum {
 	FTRACE_EVENT_FL_ENABLED_BIT,
 	FTRACE_EVENT_FL_RECORDED_CMD_BIT,
+	FTRACE_EVENT_FL_FILTERED_BIT,
+	FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
 	FTRACE_EVENT_FL_SOFT_MODE_BIT,
 	FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
 };
@@ -261,6 +267,8 @@
  * Ftrace event file flags:
  *  ENABLED	  - The event is enabled
  *  RECORDED_CMD  - The comms should be recorded at sched_switch
+ *  FILTERED	  - The event has a filter attached
+ *  NO_SET_FILTER - Set when filter has error and is to be ignored
  *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
  *  SOFT_DISABLED - When set, do not trace the event (even though its
  *                   tracepoint may be enabled)
@@ -268,6 +276,8 @@
 enum {
 	FTRACE_EVENT_FL_ENABLED		= (1 << FTRACE_EVENT_FL_ENABLED_BIT),
 	FTRACE_EVENT_FL_RECORDED_CMD	= (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
+	FTRACE_EVENT_FL_FILTERED	= (1 << FTRACE_EVENT_FL_FILTERED_BIT),
+	FTRACE_EVENT_FL_NO_SET_FILTER	= (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
 	FTRACE_EVENT_FL_SOFT_MODE	= (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
 	FTRACE_EVENT_FL_SOFT_DISABLED	= (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
 };
@@ -275,6 +285,7 @@
 struct ftrace_event_file {
 	struct list_head		list;
 	struct ftrace_event_call	*event_call;
+	struct event_filter		*filter;
 	struct dentry			*dir;
 	struct trace_array		*tr;
 	struct ftrace_subsystem_dir	*system;
@@ -310,12 +321,16 @@
 
 #define MAX_FILTER_STR_VAL	256	/* Should handle KSYM_SYMBOL_LEN */
 
-extern void destroy_preds(struct ftrace_event_call *call);
+extern void destroy_preds(struct ftrace_event_file *file);
+extern void destroy_call_preds(struct ftrace_event_call *call);
 extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_current_check_discard(struct ring_buffer *buffer,
-					struct ftrace_event_call *call,
-					void *rec,
-					struct ring_buffer_event *event);
+
+extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
+				struct ring_buffer *buffer,
+				struct ring_buffer_event *event);
+extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+				     struct ring_buffer *buffer,
+				     struct ring_buffer_event *event);
 
 enum {
 	FILTER_OTHER = 0,
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index f8d41cb..1eda33d 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -94,6 +94,8 @@
 }
 extern void gen_pool_destroy(struct gen_pool *);
 extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
+extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
+		dma_addr_t *dma);
 extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
 extern void gen_pool_for_each_chunk(struct gen_pool *,
 	void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index f148e49..8ec23fb 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -31,11 +31,11 @@
 	__u32	ifield;
 };
 
-extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
-extern int hippi_mac_addr(struct net_device *dev, void *p);
-extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
-extern struct net_device *alloc_hippi_dev(int sizeof_priv);
+__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int hippi_change_mtu(struct net_device *dev, int new_mtu);
+int hippi_mac_addr(struct net_device *dev, void *p);
+int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
+struct net_device *alloc_hippi_dev(int sizeof_priv);
 #endif
 
 #endif	/* _LINUX_HIPPIDEVICE_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
new file mode 100644
index 0000000..f5b9b87
--- /dev/null
+++ b/include/linux/host1x.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+enum host1x_class {
+	HOST1X_CLASS_HOST1X = 0x1,
+	HOST1X_CLASS_GR2D = 0x51,
+	HOST1X_CLASS_GR2D_SB = 0x52,
+	HOST1X_CLASS_GR3D = 0x60,
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+	int (*init)(struct host1x_client *client);
+	int (*exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+	struct list_head list;
+	struct device *parent;
+	struct device *dev;
+
+	const struct host1x_client_ops *ops;
+
+	enum host1x_class class;
+	struct host1x_channel *channel;
+
+	struct host1x_syncpt **syncpts;
+	unsigned int num_syncpts;
+};
+
+/*
+ * host1x buffer objects
+ */
+
+struct host1x_bo;
+struct sg_table;
+
+struct host1x_bo_ops {
+	struct host1x_bo *(*get)(struct host1x_bo *bo);
+	void (*put)(struct host1x_bo *bo);
+	dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+	void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+	void *(*mmap)(struct host1x_bo *bo);
+	void (*munmap)(struct host1x_bo *bo, void *addr);
+	void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+	void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+	const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+				  const struct host1x_bo_ops *ops)
+{
+	bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+	return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+	bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+				       struct sg_table **sgt)
+{
+	return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+	bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+	return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+	bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+	return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+				    unsigned int pagenum, void *addr)
+{
+	bo->ops->kunmap(bo, pagenum, addr);
+}
+
+/*
+ * host1x syncpoints
+ */
+
+#define HOST1X_SYNCPT_CLIENT_MANAGED	(1 << 0)
+#define HOST1X_SYNCPT_HAS_BASE		(1 << 1)
+
+struct host1x_syncpt_base;
+struct host1x_syncpt;
+struct host1x;
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+int host1x_syncpt_incr(struct host1x_syncpt *sp);
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+		       u32 *value);
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+					    unsigned long flags);
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+
+/*
+ * host1x channel
+ */
+
+struct host1x_channel;
+struct host1x_job;
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+/*
+ * host1x job
+ */
+
+struct host1x_reloc {
+	struct host1x_bo *cmdbuf;
+	u32 cmdbuf_offset;
+	struct host1x_bo *target;
+	u32 target_offset;
+	u32 shift;
+	u32 pad;
+};
+
+struct host1x_job {
+	/* When refcount goes to zero, job can be freed */
+	struct kref ref;
+
+	/* List entry */
+	struct list_head list;
+
+	/* Channel where job is submitted to */
+	struct host1x_channel *channel;
+
+	u32 client;
+
+	/* Gathers and their memory */
+	struct host1x_job_gather *gathers;
+	unsigned int num_gathers;
+
+	/* Wait checks to be processed at submit time */
+	struct host1x_waitchk *waitchk;
+	unsigned int num_waitchk;
+	u32 waitchk_mask;
+
+	/* Array of handles to be pinned & unpinned */
+	struct host1x_reloc *relocarray;
+	unsigned int num_relocs;
+	struct host1x_job_unpin_data *unpins;
+	unsigned int num_unpins;
+
+	dma_addr_t *addr_phys;
+	dma_addr_t *gather_addr_phys;
+	dma_addr_t *reloc_addr_phys;
+
+	/* Sync point id, number of increments and end related to the submit */
+	u32 syncpt_id;
+	u32 syncpt_incrs;
+	u32 syncpt_end;
+
+	/* Maximum time to wait for this job */
+	unsigned int timeout;
+
+	/* Index and number of slots used in the push buffer */
+	unsigned int first_get;
+	unsigned int num_slots;
+
+	/* Copy of gathers */
+	size_t gather_copy_size;
+	dma_addr_t gather_copy;
+	u8 *gather_copy_mapped;
+
+	/* Check if register is marked as an address reg */
+	int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+	/* Request a SETCLASS to this class */
+	u32 class;
+
+	/* Add a channel wait for previous ops to complete */
+	bool serialize;
+};
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+				    u32 num_cmdbufs, u32 num_relocs,
+				    u32 num_waitchks);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+			   u32 words, u32 offset);
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+void host1x_job_put(struct host1x_job *job);
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * subdevice probe infrastructure
+ */
+
+struct host1x_device;
+
+struct host1x_driver {
+	const struct of_device_id *subdevs;
+	struct list_head list;
+	const char *name;
+
+	int (*probe)(struct host1x_device *device);
+	int (*remove)(struct host1x_device *device);
+};
+
+int host1x_driver_register(struct host1x_driver *driver);
+void host1x_driver_unregister(struct host1x_driver *driver);
+
+struct host1x_device {
+	struct host1x_driver *driver;
+	struct list_head list;
+	struct device dev;
+
+	struct mutex subdevs_lock;
+	struct list_head subdevs;
+	struct list_head active;
+
+	struct mutex clients_lock;
+	struct list_head clients;
+};
+
+static inline struct host1x_device *to_host1x_device(struct device *dev)
+{
+	return container_of(dev, struct host1x_device, dev);
+}
+
+int host1x_device_init(struct host1x_device *device);
+int host1x_device_exit(struct host1x_device *device);
+
+int host1x_client_register(struct host1x_client *client);
+int host1x_client_unregister(struct host1x_client *client);
+
+#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 3935428..91672e2 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -54,7 +54,8 @@
 extern pmd_t *page_check_address_pmd(struct page *page,
 				     struct mm_struct *mm,
 				     unsigned long address,
-				     enum page_check_address_pmd_flag flag);
+				     enum page_check_address_pmd_flag flag,
+				     spinlock_t **ptl);
 
 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
@@ -129,15 +130,15 @@
 				    unsigned long start,
 				    unsigned long end,
 				    long adjust_next);
-extern int __pmd_trans_huge_lock(pmd_t *pmd,
-				 struct vm_area_struct *vma);
+extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+		spinlock_t **ptl);
 /* mmap_sem must be held on entry */
-static inline int pmd_trans_huge_lock(pmd_t *pmd,
-				      struct vm_area_struct *vma)
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+		spinlock_t **ptl)
 {
 	VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
 	if (pmd_trans_huge(*pmd))
-		return __pmd_trans_huge_lock(pmd, vma);
+		return __pmd_trans_huge_lock(pmd, vma, ptl);
 	else
 		return 0;
 }
@@ -215,8 +216,8 @@
 					 long adjust_next)
 {
 }
-static inline int pmd_trans_huge_lock(pmd_t *pmd,
-				      struct vm_area_struct *vma)
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+		spinlock_t **ptl)
 {
 	return 0;
 }
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 03932704..acd2010 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -392,6 +392,15 @@
 	return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
 }
 
+static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+					   struct mm_struct *mm, pte_t *pte)
+{
+	if (huge_page_size(h) == PMD_SIZE)
+		return pmd_lockptr(mm, (pmd_t *) pte);
+	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
+	return &mm->page_table_lock;
+}
+
 #else	/* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 #define alloc_huge_page_node(h, nid) NULL
@@ -401,6 +410,7 @@
 #define hstate_sizelog(s) NULL
 #define hstate_vma(v) NULL
 #define hstate_inode(i) NULL
+#define page_hstate(page) NULL
 #define huge_page_size(h) PAGE_SIZE
 #define huge_page_mask(h) PAGE_MASK
 #define vma_kernel_pagesize(v) PAGE_SIZE
@@ -421,6 +431,22 @@
 #define dissolve_free_huge_pages(s, e)	do {} while (0)
 #define pmd_huge_support()	0
 #define hugepage_migration_support(h)	0
+
+static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+					   struct mm_struct *mm, pte_t *pte)
+{
+	return &mm->page_table_lock;
+}
 #endif	/* CONFIG_HUGETLB_PAGE */
 
+static inline spinlock_t *huge_pte_lock(struct hstate *h,
+					struct mm_struct *mm, pte_t *pte)
+{
+	spinlock_t *ptl;
+
+	ptl = huge_pte_lockptr(h, mm, pte);
+	spin_lock(ptl);
+	return ptl;
+}
+
 #endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a5b598a..8c3b26a 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -697,6 +697,18 @@
 } __packed;
 
 /**
+ * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
+ *
+ * This structure represents the "Mesh Channel Switch Paramters element"
+ */
+struct ieee80211_mesh_chansw_params_ie {
+	u8 mesh_ttl;
+	u8 mesh_flags;
+	__le16 mesh_reason;
+	__le16 mesh_pre_value;
+} __packed;
+
+/**
  * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
  */
 struct ieee80211_wide_bw_chansw_ie {
@@ -751,6 +763,14 @@
 };
 
 /**
+ * mesh channel switch parameters element's flag indicator
+ *
+ */
+#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
+#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
+#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
+
+/**
  * struct ieee80211_rann_ie
  *
  * This structure refers to "Root Announcement information element"
@@ -1391,8 +1411,8 @@
 #define IEEE80211_VHT_CAP_RXSTBC_MASK				0x00000700
 #define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE			0x00000800
 #define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE			0x00001000
-#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX		0x00006000
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX		0x00030000
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MAX			0x0000e000
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX		0x00070000
 #define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE			0x00080000
 #define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE			0x00100000
 #define IEEE80211_VHT_CAP_VHT_TXOP_PS				0x00200000
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index ddd33fd..c270285 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -61,6 +61,7 @@
 	struct hlist_node	hlist;
 	struct macvlan_port	*port;
 	struct net_device	*lowerdev;
+	void			*fwd_priv;
 	struct macvlan_pcpu_stats __percpu *pcpu_stats;
 
 	DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 715c343..f252deb 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -89,6 +89,101 @@
 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
 
+/**
+ *	struct vlan_priority_tci_mapping - vlan egress priority mappings
+ *	@priority: skb priority
+ *	@vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
+ *	@next: pointer to next struct
+ */
+struct vlan_priority_tci_mapping {
+	u32					priority;
+	u16					vlan_qos;
+	struct vlan_priority_tci_mapping	*next;
+};
+
+/**
+ *	struct vlan_pcpu_stats - VLAN percpu rx/tx stats
+ *	@rx_packets: number of received packets
+ *	@rx_bytes: number of received bytes
+ *	@rx_multicast: number of received multicast packets
+ *	@tx_packets: number of transmitted packets
+ *	@tx_bytes: number of transmitted bytes
+ *	@syncp: synchronization point for 64bit counters
+ *	@rx_errors: number of rx errors
+ *	@tx_dropped: number of tx drops
+ */
+struct vlan_pcpu_stats {
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			rx_multicast;
+	u64			tx_packets;
+	u64			tx_bytes;
+	struct u64_stats_sync	syncp;
+	u32			rx_errors;
+	u32			tx_dropped;
+};
+
+struct proc_dir_entry;
+struct netpoll;
+
+/**
+ *	struct vlan_dev_priv - VLAN private device data
+ *	@nr_ingress_mappings: number of ingress priority mappings
+ *	@ingress_priority_map: ingress priority mappings
+ *	@nr_egress_mappings: number of egress priority mappings
+ *	@egress_priority_map: hash of egress priority mappings
+ *	@vlan_proto: VLAN encapsulation protocol
+ *	@vlan_id: VLAN identifier
+ *	@flags: device flags
+ *	@real_dev: underlying netdevice
+ *	@real_dev_addr: address of underlying netdevice
+ *	@dent: proc dir entry
+ *	@vlan_pcpu_stats: ptr to percpu rx stats
+ */
+struct vlan_dev_priv {
+	unsigned int				nr_ingress_mappings;
+	u32					ingress_priority_map[8];
+	unsigned int				nr_egress_mappings;
+	struct vlan_priority_tci_mapping	*egress_priority_map[16];
+
+	__be16					vlan_proto;
+	u16					vlan_id;
+	u16					flags;
+
+	struct net_device			*real_dev;
+	unsigned char				real_dev_addr[ETH_ALEN];
+
+	struct proc_dir_entry			*dent;
+	struct vlan_pcpu_stats __percpu		*vlan_pcpu_stats;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	struct netpoll				*netpoll;
+#endif
+};
+
+static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
+{
+	return netdev_priv(dev);
+}
+
+static inline u16
+vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
+{
+	struct vlan_priority_tci_mapping *mp;
+
+	smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
+
+	mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
+	while (mp) {
+		if (mp->priority == skprio) {
+			return mp->vlan_qos; /* This should already be shifted
+					      * to mask correctly with the
+					      * VLAN's TCI */
+		}
+		mp = mp->next;
+	}
+	return 0;
+}
+
 extern bool vlan_do_receive(struct sk_buff **skb);
 extern struct sk_buff *vlan_untag(struct sk_buff *skb);
 
@@ -121,6 +216,12 @@
 	return 0;
 }
 
+static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
+					       u32 skprio)
+{
+	return 0;
+}
+
 static inline bool vlan_do_receive(struct sk_buff **skb)
 {
 	return false;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 79640e01..0d678ae 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -147,25 +147,27 @@
 	unsigned long		ifa_tstamp; /* updated timestamp */
 };
 
-extern int register_inetaddr_notifier(struct notifier_block *nb);
-extern int unregister_inetaddr_notifier(struct notifier_block *nb);
+int register_inetaddr_notifier(struct notifier_block *nb);
+int unregister_inetaddr_notifier(struct notifier_block *nb);
 
-extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
-					struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
+				 struct ipv4_devconf *devconf);
 
-extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
 static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
 {
 	return __ip_dev_find(net, addr, true);
 }
 
-extern int		inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-extern int		devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern void		devinet_init(void);
-extern struct in_device	*inetdev_by_index(struct net *, int);
-extern __be32		inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
-extern __be32		inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope);
-extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask);
+int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
+int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+void devinet_init(void);
+struct in_device *inetdev_by_index(struct net *, int);
+__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
+__be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
+			 int scope);
+struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+				    __be32 mask);
 
 static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
 {
@@ -218,7 +220,7 @@
 	return rtnl_dereference(dev->ip_ptr);
 }
 
-extern void in_dev_finish_destroy(struct in_device *idev);
+void in_dev_finish_destroy(struct in_device *idev);
 
 static inline void in_dev_put(struct in_device *idev)
 {
diff --git a/include/linux/init.h b/include/linux/init.h
index f1c27a7..8e68a64 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -26,8 +26,8 @@
  * extern int initialize_foobar_device(int, int, int) __init;
  *
  * For initialized data:
- * You should insert __initdata between the variable name and equal
- * sign followed by value, e.g.:
+ * You should insert __initdata or __initconst between the variable name
+ * and equal sign followed by value, e.g.:
  *
  * static int init_variable __initdata = 0;
  * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
@@ -35,8 +35,6 @@
  * Don't forget to initialize data not at file scope, i.e. within a function,
  * as gcc otherwise puts the data into the bss section and not into the init
  * section.
- * 
- * Also note, that this data cannot be "const".
  */
 
 /* These are for everybody (although not all archs will actually
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 5cd0f09..b0ed422 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -32,10 +32,10 @@
 #endif
 
 #ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ							\
-	.mems_allowed_seq = SEQCNT_ZERO,
+#define INIT_CPUSET_SEQ(tsk)							\
+	.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
 #else
-#define INIT_CPUSET_SEQ
+#define INIT_CPUSET_SEQ(tsk)
 #endif
 
 #define INIT_SIGNALS(sig) {						\
@@ -220,7 +220,7 @@
 	INIT_FTRACE_GRAPH						\
 	INIT_TRACE_RECURSION						\
 	INIT_TASK_RCU_PREEMPT(tsk)					\
-	INIT_CPUSET_SEQ							\
+	INIT_CPUSET_SEQ(tsk)						\
 	INIT_VTIME(tsk)							\
 }
 
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c9e831d..db43b58 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,8 +11,6 @@
 #include <linux/irqnr.h>
 #include <linux/hardirq.h>
 #include <linux/irqflags.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
 #include <linux/hrtimer.h>
 #include <linux/kref.h>
 #include <linux/workqueue.h>
@@ -392,15 +390,6 @@
 extern void raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq(unsigned int nr);
 
-/* This is the worklist that queues up per-cpu softirq work.
- *
- * send_remote_sendirq() adds work to these lists, and
- * the softirq handler itself dequeues from them.  The queues
- * are protected by disabling local cpu interrupts and they must
- * only be accessed by the local cpu that they are for.
- */
-DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
 
 static inline struct task_struct *this_cpu_ksoftirqd(void)
@@ -408,17 +397,6 @@
 	return this_cpu_read(ksoftirqd);
 }
 
-/* Try to send a softirq to a remote cpu.  If this cannot be done, the
- * work will be queued to the local cpu.
- */
-extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
-
-/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
- * and compute the current cpu, passed in as 'this_cpu'.
- */
-extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
-				  int this_cpu, int softirq);
-
 /* Tasklets --- multithreaded analogue of BHs.
 
    Main feature differing them of generic softirqs: tasklet
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7ea319e..a444c79 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -22,6 +22,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/types.h>
+#include <trace/events/iommu.h>
 
 #define IOMMU_READ	(1)
 #define IOMMU_WRITE	(2)
@@ -227,6 +228,7 @@
 		ret = domain->handler(domain, dev, iova, flags,
 						domain->handler_token);
 
+	trace_io_page_fault(dev, iova, flags);
 	return ret;
 }
 
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 28ea384..5d89d1b 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -21,13 +21,11 @@
 	__s32		force_mld_version;
 	__s32		mldv1_unsolicited_report_interval;
 	__s32		mldv2_unsolicited_report_interval;
-#ifdef CONFIG_IPV6_PRIVACY
 	__s32		use_tempaddr;
 	__s32		temp_valid_lft;
 	__s32		temp_prefered_lft;
 	__s32		regen_max_retry;
 	__s32		max_desync_factor;
-#endif
 	__s32		max_addresses;
 	__s32		accept_ra_defrtr;
 	__s32		accept_ra_pinfo;
@@ -115,16 +113,8 @@
 	return IP6CB(skb)->iif;
 }
 
-struct inet6_request_sock {
-	struct in6_addr		loc_addr;
-	struct in6_addr		rmt_addr;
-	struct sk_buff		*pktopts;
-	int			iif;
-};
-
 struct tcp6_request_sock {
 	struct tcp_request_sock	  tcp6rsk_tcp;
-	struct inet6_request_sock tcp6rsk_inet6;
 };
 
 struct ipv6_mc_socklist;
@@ -141,8 +131,6 @@
  */
 struct ipv6_pinfo {
 	struct in6_addr 	saddr;
-	struct in6_addr 	rcv_saddr;
-	struct in6_addr		daddr;
 	struct in6_pktinfo	sticky_pktinfo;
 	const struct in6_addr		*daddr_cache;
 #ifdef CONFIG_IPV6_SUBTREES
@@ -256,48 +244,22 @@
 
 extern int inet6_sk_rebuild_header(struct sock *sk);
 
-struct inet6_timewait_sock {
-	struct in6_addr tw_v6_daddr;
-	struct in6_addr	tw_v6_rcv_saddr;
-};
-
 struct tcp6_timewait_sock {
 	struct tcp_timewait_sock   tcp6tw_tcp;
-	struct inet6_timewait_sock tcp6tw_inet6;
 };
 
-static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
-{
-	return (struct inet6_timewait_sock *)(((u8 *)sk) +
-					      inet_twsk(sk)->tw_ipv6_offset);
-}
-
 #if IS_ENABLED(CONFIG_IPV6)
 static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
 {
 	return inet_sk(__sk)->pinet6;
 }
 
-static inline struct inet6_request_sock *
-			inet6_rsk(const struct request_sock *rsk)
-{
-	return (struct inet6_request_sock *)(((u8 *)rsk) +
-					     inet_rsk(rsk)->inet6_rsk_offset);
-}
-
-static inline u32 inet6_rsk_offset(struct request_sock *rsk)
-{
-	return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock);
-}
-
 static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
 {
 	struct request_sock *req = reqsk_alloc(ops);
 
-	if (req != NULL) {
-		inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
-		inet6_rsk(req)->pktopts = NULL;
-	}
+	if (req)
+		inet_rsk(req)->pktopts = NULL;
 
 	return req;
 }
@@ -321,21 +283,11 @@
 #define __ipv6_only_sock(sk)	(inet6_sk(sk)->ipv6only)
 #define ipv6_only_sock(sk)	((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
 
-static inline u16 inet6_tw_offset(const struct proto *prot)
+static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
 {
-	return prot->twsk_prot->twsk_obj_size -
-			sizeof(struct inet6_timewait_sock);
-}
-
-static inline struct in6_addr *__inet6_rcv_saddr(const struct sock *sk)
-{
-	return likely(sk->sk_state != TCP_TIME_WAIT) ?
-		&inet6_sk(sk)->rcv_saddr : &inet6_twsk(sk)->tw_v6_rcv_saddr;
-}
-
-static inline struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
-{
-	return sk->sk_family == AF_INET6 ? __inet6_rcv_saddr(sk) : NULL;
+	if (sk->sk_family == AF_INET6)
+		return &sk->sk_v6_rcv_saddr;
+	return NULL;
 }
 
 static inline int inet_v6_ipv6only(const struct sock *sk)
@@ -363,28 +315,18 @@
 	return NULL;
 }
 
-#define __inet6_rcv_saddr(__sk)	NULL
 #define inet6_rcv_saddr(__sk)	NULL
 #define tcp_twsk_ipv6only(__sk)		0
 #define inet_v6_ipv6only(__sk)		0
 #endif /* IS_ENABLED(CONFIG_IPV6) */
 
 #define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)	\
-	((inet_sk(__sk)->inet_portpair == (__ports))		&&	\
+	(((__sk)->sk_portpair == (__ports))			&&	\
 	 ((__sk)->sk_family == AF_INET6)			&&	\
-	 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr))	&&	\
-	 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr))	&&	\
+	 ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr))		&&	\
+	 ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr))	&&	\
 	 (!(__sk)->sk_bound_dev_if	||				\
 	   ((__sk)->sk_bound_dev_if == (__dif))) 		&&	\
 	 net_eq(sock_net(__sk), (__net)))
 
-#define INET6_TW_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)	   \
-	((inet_twsk(__sk)->tw_portpair == (__ports))			&& \
-	 ((__sk)->sk_family == AF_INET6)				&& \
-	 ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))	&& \
-	 ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr)) && \
-	 (!(__sk)->sk_bound_dev_if	||				   \
-	  ((__sk)->sk_bound_dev_if == (__dif)))				&& \
-	 net_eq(sock_net(__sk), (__net)))
-
 #endif /* _IPV6_H */
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 0e5d9ec..cac496b 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -31,6 +31,8 @@
 #define GIC_DIST_TARGET			0x800
 #define GIC_DIST_CONFIG			0xc00
 #define GIC_DIST_SOFTINT		0xf00
+#define GIC_DIST_SGI_PENDING_CLEAR	0xf10
+#define GIC_DIST_SGI_PENDING_SET	0xf20
 
 #define GICH_HCR			0x0
 #define GICH_VTR			0x4
@@ -74,6 +76,11 @@
 	gic_init_bases(nr, start, dist, cpu, 0, NULL);
 }
 
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
+int gic_get_cpu_id(unsigned int cpu);
+void gic_migrate_target(unsigned int new_cpu_id);
+unsigned long gic_get_sgir_physaddr(void);
+
 #endif /* __ASSEMBLY */
 
 #endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index a507907..3999977 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -48,6 +48,13 @@
 
 #include <linux/types.h>
 #include <linux/compiler.h>
+#include <linux/bug.h>
+
+extern bool static_key_initialized;
+
+#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized,		      \
+				    "%s used before call to jump_label_init", \
+				    __func__)
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
 
@@ -128,29 +135,32 @@
 
 static __always_inline void jump_label_init(void)
 {
+	static_key_initialized = true;
 }
 
 static __always_inline bool static_key_false(struct static_key *key)
 {
-	if (unlikely(atomic_read(&key->enabled)) > 0)
+	if (unlikely(atomic_read(&key->enabled) > 0))
 		return true;
 	return false;
 }
 
 static __always_inline bool static_key_true(struct static_key *key)
 {
-	if (likely(atomic_read(&key->enabled)) > 0)
+	if (likely(atomic_read(&key->enabled) > 0))
 		return true;
 	return false;
 }
 
 static inline void static_key_slow_inc(struct static_key *key)
 {
+	STATIC_KEY_CHECK_USE();
 	atomic_inc(&key->enabled);
 }
 
 static inline void static_key_slow_dec(struct static_key *key)
 {
+	STATIC_KEY_CHECK_USE();
 	atomic_dec(&key->enabled);
 }
 
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 1137883..089f70f 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -23,12 +23,14 @@
 };
 static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
+	STATIC_KEY_CHECK_USE();
 	static_key_slow_dec(&key->key);
 }
 static inline void
 jump_label_rate_limit(struct static_key_deferred *key,
 		unsigned long rl)
 {
+	STATIC_KEY_CHECK_USE();
 }
 #endif	/* HAVE_JUMP_LABEL */
 #endif	/* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 546eb6a..f65ce09 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -15,5 +15,6 @@
 #define KPF_OWNER_PRIVATE	37
 #define KPF_ARCH		38
 #define KPF_UNCACHED		39
+#define KPF_SOFTDIRTY		40
 
 #endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 672ddc4..d4e98d1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -501,7 +501,6 @@
 
 extern void tracing_start(void);
 extern void tracing_stop(void);
-extern void ftrace_off_permanent(void);
 
 static inline __printf(1, 2)
 void ____trace_printk_check_format(const char *fmt, ...)
@@ -639,7 +638,6 @@
 #else
 static inline void tracing_start(void) { }
 static inline void tracing_stop(void) { }
-static inline void ftrace_off_permanent(void) { }
 static inline void trace_dump_stack(int skip) { }
 
 static inline void tracing_on(void) { }
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 10308c6..552d51e 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,7 +1,7 @@
 /*
  * A generic kernel FIFO implementation
  *
- * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
+ * Copyright (C) 2013 Stefani Seibold <stefani@seibold.net>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -67,9 +67,10 @@
 	union { \
 		struct __kfifo	kfifo; \
 		datatype	*type; \
+		const datatype	*const_type; \
 		char		(*rectype)[recsize]; \
 		ptrtype		*ptr; \
-		const ptrtype	*ptr_const; \
+		ptrtype const	*ptr_const; \
 	}
 
 #define __STRUCT_KFIFO(type, size, recsize, ptrtype) \
@@ -386,16 +387,12 @@
 #define	kfifo_put(fifo, val) \
 ({ \
 	typeof((fifo) + 1) __tmp = (fifo); \
-	typeof((val) + 1) __val = (val); \
+	typeof(*__tmp->const_type) __val = (val); \
 	unsigned int __ret; \
-	const size_t __recsize = sizeof(*__tmp->rectype); \
+	size_t __recsize = sizeof(*__tmp->rectype); \
 	struct __kfifo *__kfifo = &__tmp->kfifo; \
-	if (0) { \
-		typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \
-		__dummy = (typeof(__val))NULL; \
-	} \
 	if (__recsize) \
-		__ret = __kfifo_in_r(__kfifo, __val, sizeof(*__val), \
+		__ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \
 			__recsize); \
 	else { \
 		__ret = !kfifo_is_full(__tmp); \
@@ -404,7 +401,7 @@
 			((typeof(__tmp->type))__kfifo->data) : \
 			(__tmp->buf) \
 			)[__kfifo->in & __tmp->kfifo.mask] = \
-				*(typeof(__tmp->type))__val; \
+				(typeof(*__tmp->type))__val; \
 			smp_wmb(); \
 			__kfifo->in++; \
 		} \
@@ -415,7 +412,7 @@
 /**
  * kfifo_get - get data from the fifo
  * @fifo: address of the fifo to be used
- * @val: the var where to store the data to be added
+ * @val: address where to store the data
  *
  * This macro reads the data from the fifo.
  * It returns 0 if the fifo was empty. Otherwise it returns the number
@@ -428,12 +425,10 @@
 __kfifo_uint_must_check_helper( \
 ({ \
 	typeof((fifo) + 1) __tmp = (fifo); \
-	typeof((val) + 1) __val = (val); \
+	typeof(__tmp->ptr) __val = (val); \
 	unsigned int __ret; \
 	const size_t __recsize = sizeof(*__tmp->rectype); \
 	struct __kfifo *__kfifo = &__tmp->kfifo; \
-	if (0) \
-		__val = (typeof(__tmp->ptr))0; \
 	if (__recsize) \
 		__ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \
 			__recsize); \
@@ -456,7 +451,7 @@
 /**
  * kfifo_peek - get data from the fifo without removing
  * @fifo: address of the fifo to be used
- * @val: the var where to store the data to be added
+ * @val: address where to store the data
  *
  * This reads the data from the fifo without removing it from the fifo.
  * It returns 0 if the fifo was empty. Otherwise it returns the number
@@ -469,12 +464,10 @@
 __kfifo_uint_must_check_helper( \
 ({ \
 	typeof((fifo) + 1) __tmp = (fifo); \
-	typeof((val) + 1) __val = (val); \
+	typeof(__tmp->ptr) __val = (val); \
 	unsigned int __ret; \
 	const size_t __recsize = sizeof(*__tmp->rectype); \
 	struct __kfifo *__kfifo = &__tmp->kfifo; \
-	if (0) \
-		__val = (typeof(__tmp->ptr))NULL; \
 	if (__recsize) \
 		__ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \
 			__recsize); \
@@ -508,14 +501,10 @@
 #define	kfifo_in(fifo, buf, n) \
 ({ \
 	typeof((fifo) + 1) __tmp = (fifo); \
-	typeof((buf) + 1) __buf = (buf); \
+	typeof(__tmp->ptr_const) __buf = (buf); \
 	unsigned long __n = (n); \
 	const size_t __recsize = sizeof(*__tmp->rectype); \
 	struct __kfifo *__kfifo = &__tmp->kfifo; \
-	if (0) { \
-		typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \
-		__dummy = (typeof(__buf))NULL; \
-	} \
 	(__recsize) ?\
 	__kfifo_in_r(__kfifo, __buf, __n, __recsize) : \
 	__kfifo_in(__kfifo, __buf, __n); \
@@ -561,14 +550,10 @@
 __kfifo_uint_must_check_helper( \
 ({ \
 	typeof((fifo) + 1) __tmp = (fifo); \
-	typeof((buf) + 1) __buf = (buf); \
+	typeof(__tmp->ptr) __buf = (buf); \
 	unsigned long __n = (n); \
 	const size_t __recsize = sizeof(*__tmp->rectype); \
 	struct __kfifo *__kfifo = &__tmp->kfifo; \
-	if (0) { \
-		typeof(__tmp->ptr) __dummy = NULL; \
-		__buf = __dummy; \
-	} \
 	(__recsize) ?\
 	__kfifo_out_r(__kfifo, __buf, __n, __recsize) : \
 	__kfifo_out(__kfifo, __buf, __n); \
@@ -773,14 +758,10 @@
 __kfifo_uint_must_check_helper( \
 ({ \
 	typeof((fifo) + 1) __tmp = (fifo); \
-	typeof((buf) + 1) __buf = (buf); \
+	typeof(__tmp->ptr) __buf = (buf); \
 	unsigned long __n = (n); \
 	const size_t __recsize = sizeof(*__tmp->rectype); \
 	struct __kfifo *__kfifo = &__tmp->kfifo; \
-	if (0) { \
-		typeof(__tmp->ptr) __dummy __attribute__ ((unused)) = NULL; \
-		__buf = __dummy; \
-	} \
 	(__recsize) ? \
 	__kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \
 	__kfifo_out_peek(__kfifo, __buf, __n); \
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0fbbc7a..9523d2a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -142,7 +142,7 @@
 struct kvm_vcpu;
 extern struct kmem_cache *kvm_vcpu_cache;
 
-extern raw_spinlock_t kvm_lock;
+extern spinlock_t kvm_lock;
 extern struct list_head vm_list;
 
 struct kvm_io_range {
@@ -189,8 +189,7 @@
 	gva_t gva;
 	unsigned long addr;
 	struct kvm_arch_async_pf arch;
-	struct page *page;
-	bool done;
+	bool   wakeup_all;
 };
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
@@ -508,9 +507,10 @@
 			  struct kvm_userspace_memory_region *mem);
 int __kvm_set_memory_region(struct kvm *kvm,
 			    struct kvm_userspace_memory_region *mem);
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 			   struct kvm_memory_slot *dont);
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+			    unsigned long npages);
 void kvm_arch_memslots_updated(struct kvm *kvm);
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
 				struct kvm_memory_slot *memslot,
@@ -671,6 +671,25 @@
 }
 #endif
 
+#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
+void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
+void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
+bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
+#else
+static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
+{
+	return false;
+}
+#endif
+
 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 {
 #ifdef __KVM_HAVE_ARCH_WQP
@@ -747,9 +766,6 @@
 int kvm_request_irq_source_id(struct kvm *kvm);
 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 
-/* For vcpu->arch.iommu_flags */
-#define KVM_IOMMU_CACHE_COHERENCY	0x1
-
 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
@@ -789,7 +805,7 @@
 
 	/* KVM does not hold any references to rcu protected data when it
 	 * switches CPU into a guest mode. In fact switching to a guest mode
-	 * is very similar to exiting to userspase from rcu point of view. In
+	 * is very similar to exiting to userspace from rcu point of view. In
 	 * addition CPU may stay in a guest mode for quite a long time (up to
 	 * one time slice). Lets treat guest mode as quiescent state, just like
 	 * we do with user-mode execution.
@@ -842,13 +858,6 @@
 	return gfn_to_memslot(kvm, gfn)->id;
 }
 
-static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
-{
-	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
-	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
-		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
-}
-
 static inline gfn_t
 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
 {
@@ -1066,6 +1075,7 @@
 
 extern struct kvm_device_ops kvm_mpic_ops;
 extern struct kvm_device_ops kvm_xics_ops;
+extern struct kvm_device_ops kvm_vfio_ops;
 
 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
 
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 0d24e93..96549ab 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -25,16 +25,6 @@
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 
-/* can make br locks by using local lock for read side, global lock for write */
-#define br_lock_init(name)	lg_lock_init(name, #name)
-#define br_read_lock(name)	lg_local_lock(name)
-#define br_read_unlock(name)	lg_local_unlock(name)
-#define br_write_lock(name)	lg_global_lock(name)
-#define br_write_unlock(name)	lg_global_unlock(name)
-
-#define DEFINE_BRLOCK(name)		DEFINE_LGLOCK(name)
-#define DEFINE_STATIC_BRLOCK(name)	DEFINE_STATIC_LGLOCK(name)
-
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 #define LOCKDEP_INIT_MAP lockdep_init_map
 #else
diff --git a/include/linux/list.h b/include/linux/list.h
index f4d8a2f..ef95941 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -362,6 +362,17 @@
 	list_entry((ptr)->next, type, member)
 
 /**
+ * list_last_entry - get the last element from a list
+ * @ptr:	the list head to take the element from.
+ * @type:	the type of the struct this is embedded in.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_last_entry(ptr, type, member) \
+	list_entry((ptr)->prev, type, member)
+
+/**
  * list_first_entry_or_null - get the first element from a list
  * @ptr:	the list head to take the element from.
  * @type:	the type of the struct this is embedded in.
@@ -373,6 +384,22 @@
 	(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
 
 /**
+ * list_next_entry - get the next element in list
+ * @pos:	the type * to cursor
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_next_entry(pos, member) \
+	list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * list_prev_entry - get the prev element in list
+ * @pos:	the type * to cursor
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_prev_entry(pos, member) \
+	list_entry((pos)->member.prev, typeof(*(pos)), member)
+
+/**
  * list_for_each	-	iterate over a list
  * @pos:	the &struct list_head to use as a loop cursor.
  * @head:	the head for your list.
@@ -416,9 +443,9 @@
  * @member:	the name of the list_struct within the struct.
  */
 #define list_for_each_entry(pos, head, member)				\
-	for (pos = list_entry((head)->next, typeof(*pos), member);	\
-	     &pos->member != (head); 	\
-	     pos = list_entry(pos->member.next, typeof(*pos), member))
+	for (pos = list_first_entry(head, typeof(*pos), member);	\
+	     &pos->member != (head);					\
+	     pos = list_next_entry(pos, member))
 
 /**
  * list_for_each_entry_reverse - iterate backwards over list of given type.
@@ -427,9 +454,9 @@
  * @member:	the name of the list_struct within the struct.
  */
 #define list_for_each_entry_reverse(pos, head, member)			\
-	for (pos = list_entry((head)->prev, typeof(*pos), member);	\
-	     &pos->member != (head); 	\
-	     pos = list_entry(pos->member.prev, typeof(*pos), member))
+	for (pos = list_last_entry(head, typeof(*pos), member);		\
+	     &pos->member != (head); 					\
+	     pos = list_prev_entry(pos, member))
 
 /**
  * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
@@ -452,9 +479,9 @@
  * the current position.
  */
 #define list_for_each_entry_continue(pos, head, member) 		\
-	for (pos = list_entry(pos->member.next, typeof(*pos), member);	\
-	     &pos->member != (head);	\
-	     pos = list_entry(pos->member.next, typeof(*pos), member))
+	for (pos = list_next_entry(pos, member);			\
+	     &pos->member != (head);					\
+	     pos = list_next_entry(pos, member))
 
 /**
  * list_for_each_entry_continue_reverse - iterate backwards from the given point
@@ -466,9 +493,9 @@
  * the current position.
  */
 #define list_for_each_entry_continue_reverse(pos, head, member)		\
-	for (pos = list_entry(pos->member.prev, typeof(*pos), member);	\
-	     &pos->member != (head);	\
-	     pos = list_entry(pos->member.prev, typeof(*pos), member))
+	for (pos = list_prev_entry(pos, member);			\
+	     &pos->member != (head);					\
+	     pos = list_prev_entry(pos, member))
 
 /**
  * list_for_each_entry_from - iterate over list of given type from the current point
@@ -479,8 +506,8 @@
  * Iterate over list of given type, continuing from current position.
  */
 #define list_for_each_entry_from(pos, head, member) 			\
-	for (; &pos->member != (head);	\
-	     pos = list_entry(pos->member.next, typeof(*pos), member))
+	for (; &pos->member != (head);					\
+	     pos = list_next_entry(pos, member))
 
 /**
  * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
@@ -490,10 +517,10 @@
  * @member:	the name of the list_struct within the struct.
  */
 #define list_for_each_entry_safe(pos, n, head, member)			\
-	for (pos = list_entry((head)->next, typeof(*pos), member),	\
-		n = list_entry(pos->member.next, typeof(*pos), member);	\
+	for (pos = list_first_entry(head, typeof(*pos), member),	\
+		n = list_next_entry(pos, member);			\
 	     &pos->member != (head); 					\
-	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+	     pos = n, n = list_next_entry(n, member))
 
 /**
  * list_for_each_entry_safe_continue - continue list iteration safe against removal
@@ -506,10 +533,10 @@
  * safe against removal of list entry.
  */
 #define list_for_each_entry_safe_continue(pos, n, head, member) 		\
-	for (pos = list_entry(pos->member.next, typeof(*pos), member), 		\
-		n = list_entry(pos->member.next, typeof(*pos), member);		\
+	for (pos = list_next_entry(pos, member), 				\
+		n = list_next_entry(pos, member);				\
 	     &pos->member != (head);						\
-	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+	     pos = n, n = list_next_entry(n, member))
 
 /**
  * list_for_each_entry_safe_from - iterate over list from current point safe against removal
@@ -522,9 +549,9 @@
  * removal of list entry.
  */
 #define list_for_each_entry_safe_from(pos, n, head, member) 			\
-	for (n = list_entry(pos->member.next, typeof(*pos), member);		\
+	for (n = list_next_entry(pos, member);					\
 	     &pos->member != (head);						\
-	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+	     pos = n, n = list_next_entry(n, member))
 
 /**
  * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
@@ -537,10 +564,10 @@
  * of list entry.
  */
 #define list_for_each_entry_safe_reverse(pos, n, head, member)		\
-	for (pos = list_entry((head)->prev, typeof(*pos), member),	\
-		n = list_entry(pos->member.prev, typeof(*pos), member);	\
+	for (pos = list_last_entry(head, typeof(*pos), member),		\
+		n = list_prev_entry(pos, member);			\
 	     &pos->member != (head); 					\
-	     pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+	     pos = n, n = list_prev_entry(n, member))
 
 /**
  * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
@@ -555,7 +582,7 @@
  * completing the current iteration of the loop body.
  */
 #define list_safe_reset_next(pos, n, member)				\
-	n = list_entry(pos->member.next, typeof(*pos), member)
+	n = list_next_entry(pos, member)
 
 /*
  * Double linked lists with a single pointer list head.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 8828a78..fbf10a0 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -195,4 +195,6 @@
 
 extern struct llist_node *llist_del_first(struct llist_head *head);
 
+struct llist_node *llist_reverse_order(struct llist_node *head);
+
 #endif /* LLIST_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index cfc2f11..92b1bfc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -497,6 +497,10 @@
 #define rwlock_acquire_read(l, s, t, i)		lock_acquire_shared_recursive(l, s, t, NULL, i)
 #define rwlock_release(l, n, i)			lock_release(l, n, i)
 
+#define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
+#define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define seqcount_release(l, n, i)		lock_release(l, n, i)
+
 #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
 #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
 #define mutex_release(l, n, i)			lock_release(l, n, i)
@@ -504,11 +508,11 @@
 #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
 #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
 #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
-# define rwsem_release(l, n, i)			lock_release(l, n, i)
+#define rwsem_release(l, n, i)			lock_release(l, n, i)
 
 #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
 #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
-# define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
+#define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
 
 #ifdef CONFIG_PROVE_LOCKING
 # define might_lock(lock) 						\
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 13dfd36..c8929c3 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -15,10 +15,15 @@
  */
 
 #include <linux/spinlock.h>
+#include <generated/bounds.h>
+
+#define USE_CMPXCHG_LOCKREF \
+	(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
+	 IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS)
 
 struct lockref {
 	union {
-#ifdef CONFIG_CMPXCHG_LOCKREF
+#if USE_CMPXCHG_LOCKREF
 		aligned_u64 lock_count;
 #endif
 		struct {
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 31e95ac..77c60e5 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -35,6 +35,7 @@
 };
 
 struct memblock {
+	bool bottom_up;  /* is bottom up direction? */
 	phys_addr_t current_limit;
 	struct memblock_type memory;
 	struct memblock_type reserved;
@@ -148,6 +149,29 @@
 
 phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
 
+#ifdef CONFIG_MOVABLE_NODE
+/*
+ * Set the allocation direction to bottom-up or top-down.
+ */
+static inline void memblock_set_bottom_up(bool enable)
+{
+	memblock.bottom_up = enable;
+}
+
+/*
+ * Check if the allocation direction is bottom-up or not.
+ * if this is true, that said, memblock will allocate memory
+ * in bottom-up direction.
+ */
+static inline bool memblock_bottom_up(void)
+{
+	return memblock.bottom_up;
+}
+#else
+static inline void memblock_set_bottom_up(bool enable) {}
+static inline bool memblock_bottom_up(void) { return false; }
+#endif
+
 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
 #define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
 #define MEMBLOCK_ALLOC_ACCESSIBLE	0
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index dd38e62..4ca3d95 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -94,6 +94,8 @@
 extern void __online_page_increment_counters(struct page *page);
 extern void __online_page_free(struct page *page);
 
+extern int try_online_node(int nid);
+
 #ifdef CONFIG_MEMORY_HOTREMOVE
 extern bool is_pageblock_removable_nolock(struct page *page);
 extern int arch_remove_memory(u64 start, u64 size);
@@ -225,6 +227,11 @@
 {
 }
 
+static inline int try_online_node(int nid)
+{
+	return 0;
+}
+
 static inline void lock_memory_hotplug(void) {}
 static inline void unlock_memory_hotplug(void) {}
 
@@ -256,14 +263,12 @@
 
 extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
 		void *arg, int (*func)(struct memory_block *, void *));
-extern int mem_online_node(int nid);
 extern int add_memory(int nid, u64 start, u64 size);
 extern int arch_add_memory(int nid, u64 start, u64 size);
 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
 extern bool is_memblock_offlined(struct memory_block *mem);
 extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
-								int nr_pages);
+extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn);
 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
 					  unsigned long pnum);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index ea4d249..9fe426b 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -169,7 +169,7 @@
 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
 #endif
 
-extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
+extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
 
 /* Check if a vma is migratable */
 static inline int vma_migratable(struct vm_area_struct *vma)
@@ -307,9 +307,8 @@
 }
 #endif
 
-static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+static inline void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
 {
-	return 0;
 }
 
 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 4706d3d..cb49417 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -1908,7 +1908,7 @@
 #define ARIZONA_FLL2_SYNC_GAIN_MASK              0x003c  /* FLL2_SYNC_GAIN */
 #define ARIZONA_FLL2_SYNC_GAIN_SHIFT                  2  /* FLL2_SYNC_GAIN */
 #define ARIZONA_FLL2_SYNC_GAIN_WIDTH                  4  /* FLL2_SYNC_GAIN */
-#define ARIZONA_FLL2_SYNC_BW_MASK                0x0001  /* FLL2_SYNC_BW */
+#define ARIZONA_FLL2_SYNC_BW                     0x0001  /* FLL2_SYNC_BW */
 #define ARIZONA_FLL2_SYNC_BW_MASK                0x0001  /* FLL2_SYNC_BW */
 #define ARIZONA_FLL2_SYNC_BW_SHIFT                    0  /* FLL2_SYNC_BW */
 #define ARIZONA_FLL2_SYNC_BW_WIDTH                    1  /* FLL2_SYNC_BW */
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
new file mode 100644
index 0000000..16bf8a0
--- /dev/null
+++ b/include/linux/mfd/as3722.h
@@ -0,0 +1,423 @@
+/*
+ * as3722 definitions
+ *
+ * Copyright (C) 2013 ams
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ */
+
+#ifndef __LINUX_MFD_AS3722_H__
+#define __LINUX_MFD_AS3722_H__
+
+#include <linux/regmap.h>
+
+/* AS3722 registers */
+#define AS3722_SD0_VOLTAGE_REG				0x00
+#define AS3722_SD1_VOLTAGE_REG				0x01
+#define AS3722_SD2_VOLTAGE_REG				0x02
+#define AS3722_SD3_VOLTAGE_REG				0x03
+#define AS3722_SD4_VOLTAGE_REG				0x04
+#define AS3722_SD5_VOLTAGE_REG				0x05
+#define AS3722_SD6_VOLTAGE_REG				0x06
+#define AS3722_GPIO0_CONTROL_REG			0x08
+#define AS3722_GPIO1_CONTROL_REG			0x09
+#define AS3722_GPIO2_CONTROL_REG			0x0A
+#define AS3722_GPIO3_CONTROL_REG			0x0B
+#define AS3722_GPIO4_CONTROL_REG			0x0C
+#define AS3722_GPIO5_CONTROL_REG			0x0D
+#define AS3722_GPIO6_CONTROL_REG			0x0E
+#define AS3722_GPIO7_CONTROL_REG			0x0F
+#define AS3722_LDO0_VOLTAGE_REG				0x10
+#define AS3722_LDO1_VOLTAGE_REG				0x11
+#define AS3722_LDO2_VOLTAGE_REG				0x12
+#define AS3722_LDO3_VOLTAGE_REG				0x13
+#define AS3722_LDO4_VOLTAGE_REG				0x14
+#define AS3722_LDO5_VOLTAGE_REG				0x15
+#define AS3722_LDO6_VOLTAGE_REG				0x16
+#define AS3722_LDO7_VOLTAGE_REG				0x17
+#define AS3722_LDO9_VOLTAGE_REG				0x19
+#define AS3722_LDO10_VOLTAGE_REG			0x1A
+#define AS3722_LDO11_VOLTAGE_REG			0x1B
+#define AS3722_GPIO_DEB1_REG				0x1E
+#define AS3722_GPIO_DEB2_REG				0x1F
+#define AS3722_GPIO_SIGNAL_OUT_REG			0x20
+#define AS3722_GPIO_SIGNAL_IN_REG			0x21
+#define AS3722_REG_SEQU_MOD1_REG			0x22
+#define AS3722_REG_SEQU_MOD2_REG			0x23
+#define AS3722_REG_SEQU_MOD3_REG			0x24
+#define AS3722_SD_PHSW_CTRL_REG				0x27
+#define AS3722_SD_PHSW_STATUS				0x28
+#define AS3722_SD0_CONTROL_REG				0x29
+#define AS3722_SD1_CONTROL_REG				0x2A
+#define AS3722_SDmph_CONTROL_REG			0x2B
+#define AS3722_SD23_CONTROL_REG				0x2C
+#define AS3722_SD4_CONTROL_REG				0x2D
+#define AS3722_SD5_CONTROL_REG				0x2E
+#define AS3722_SD6_CONTROL_REG				0x2F
+#define AS3722_SD_DVM_REG				0x30
+#define AS3722_RESET_REASON_REG				0x31
+#define AS3722_BATTERY_VOLTAGE_MONITOR_REG		0x32
+#define AS3722_STARTUP_CONTROL_REG			0x33
+#define AS3722_RESET_TIMER_REG				0x34
+#define AS3722_REFERENCE_CONTROL_REG			0x35
+#define AS3722_RESET_CONTROL_REG			0x36
+#define AS3722_OVER_TEMP_CONTROL_REG			0x37
+#define AS3722_WATCHDOG_CONTROL_REG			0x38
+#define AS3722_REG_STANDBY_MOD1_REG			0x39
+#define AS3722_REG_STANDBY_MOD2_REG			0x3A
+#define AS3722_REG_STANDBY_MOD3_REG			0x3B
+#define AS3722_ENABLE_CTRL1_REG				0x3C
+#define AS3722_ENABLE_CTRL2_REG				0x3D
+#define AS3722_ENABLE_CTRL3_REG				0x3E
+#define AS3722_ENABLE_CTRL4_REG				0x3F
+#define AS3722_ENABLE_CTRL5_REG				0x40
+#define AS3722_PWM_CONTROL_L_REG			0x41
+#define AS3722_PWM_CONTROL_H_REG			0x42
+#define AS3722_WATCHDOG_TIMER_REG			0x46
+#define AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG		0x48
+#define AS3722_IOVOLTAGE_REG				0x49
+#define AS3722_BATTERY_VOLTAGE_MONITOR2_REG		0x4A
+#define AS3722_SD_CONTROL_REG				0x4D
+#define AS3722_LDOCONTROL0_REG				0x4E
+#define AS3722_LDOCONTROL1_REG				0x4F
+#define AS3722_SD0_PROTECT_REG				0x50
+#define AS3722_SD6_PROTECT_REG				0x51
+#define AS3722_PWM_VCONTROL1_REG			0x52
+#define AS3722_PWM_VCONTROL2_REG			0x53
+#define AS3722_PWM_VCONTROL3_REG			0x54
+#define AS3722_PWM_VCONTROL4_REG			0x55
+#define AS3722_BB_CHARGER_REG				0x57
+#define AS3722_CTRL_SEQU1_REG				0x58
+#define AS3722_CTRL_SEQU2_REG				0x59
+#define AS3722_OVCURRENT_REG				0x5A
+#define AS3722_OVCURRENT_DEB_REG			0x5B
+#define AS3722_SDLV_DEB_REG				0x5C
+#define AS3722_OC_PG_CTRL_REG				0x5D
+#define AS3722_OC_PG_CTRL2_REG				0x5E
+#define AS3722_CTRL_STATUS				0x5F
+#define AS3722_RTC_CONTROL_REG				0x60
+#define AS3722_RTC_SECOND_REG				0x61
+#define AS3722_RTC_MINUTE_REG				0x62
+#define AS3722_RTC_HOUR_REG				0x63
+#define AS3722_RTC_DAY_REG				0x64
+#define AS3722_RTC_MONTH_REG				0x65
+#define AS3722_RTC_YEAR_REG				0x66
+#define AS3722_RTC_ALARM_SECOND_REG			0x67
+#define AS3722_RTC_ALARM_MINUTE_REG			0x68
+#define AS3722_RTC_ALARM_HOUR_REG			0x69
+#define AS3722_RTC_ALARM_DAY_REG			0x6A
+#define AS3722_RTC_ALARM_MONTH_REG			0x6B
+#define AS3722_RTC_ALARM_YEAR_REG			0x6C
+#define AS3722_SRAM_REG					0x6D
+#define AS3722_RTC_ACCESS_REG				0x6F
+#define AS3722_RTC_STATUS_REG				0x73
+#define AS3722_INTERRUPT_MASK1_REG			0x74
+#define AS3722_INTERRUPT_MASK2_REG			0x75
+#define AS3722_INTERRUPT_MASK3_REG			0x76
+#define AS3722_INTERRUPT_MASK4_REG			0x77
+#define AS3722_INTERRUPT_STATUS1_REG			0x78
+#define AS3722_INTERRUPT_STATUS2_REG			0x79
+#define AS3722_INTERRUPT_STATUS3_REG			0x7A
+#define AS3722_INTERRUPT_STATUS4_REG			0x7B
+#define AS3722_TEMP_STATUS_REG				0x7D
+#define AS3722_ADC0_CONTROL_REG				0x80
+#define AS3722_ADC1_CONTROL_REG				0x81
+#define AS3722_ADC0_MSB_RESULT_REG			0x82
+#define AS3722_ADC0_LSB_RESULT_REG			0x83
+#define AS3722_ADC1_MSB_RESULT_REG			0x84
+#define AS3722_ADC1_LSB_RESULT_REG			0x85
+#define AS3722_ADC1_THRESHOLD_HI_MSB_REG		0x86
+#define AS3722_ADC1_THRESHOLD_HI_LSB_REG		0x87
+#define AS3722_ADC1_THRESHOLD_LO_MSB_REG		0x88
+#define AS3722_ADC1_THRESHOLD_LO_LSB_REG		0x89
+#define AS3722_ADC_CONFIGURATION_REG			0x8A
+#define AS3722_ASIC_ID1_REG				0x90
+#define AS3722_ASIC_ID2_REG				0x91
+#define AS3722_LOCK_REG					0x9E
+#define AS3722_MAX_REGISTER				0xF4
+
+#define AS3722_SD0_EXT_ENABLE_MASK			0x03
+#define AS3722_SD1_EXT_ENABLE_MASK			0x0C
+#define AS3722_SD2_EXT_ENABLE_MASK			0x30
+#define AS3722_SD3_EXT_ENABLE_MASK			0xC0
+#define AS3722_SD4_EXT_ENABLE_MASK			0x03
+#define AS3722_SD5_EXT_ENABLE_MASK			0x0C
+#define AS3722_SD6_EXT_ENABLE_MASK			0x30
+#define AS3722_LDO0_EXT_ENABLE_MASK			0x03
+#define AS3722_LDO1_EXT_ENABLE_MASK			0x0C
+#define AS3722_LDO2_EXT_ENABLE_MASK			0x30
+#define AS3722_LDO3_EXT_ENABLE_MASK			0xC0
+#define AS3722_LDO4_EXT_ENABLE_MASK			0x03
+#define AS3722_LDO5_EXT_ENABLE_MASK			0x0C
+#define AS3722_LDO6_EXT_ENABLE_MASK			0x30
+#define AS3722_LDO7_EXT_ENABLE_MASK			0xC0
+#define AS3722_LDO9_EXT_ENABLE_MASK			0x0C
+#define AS3722_LDO10_EXT_ENABLE_MASK			0x30
+#define AS3722_LDO11_EXT_ENABLE_MASK			0xC0
+
+#define AS3722_OVCURRENT_SD0_ALARM_MASK			0x07
+#define AS3722_OVCURRENT_SD0_ALARM_SHIFT		0x01
+#define AS3722_OVCURRENT_SD0_TRIP_MASK			0x18
+#define AS3722_OVCURRENT_SD0_TRIP_SHIFT			0x03
+#define AS3722_OVCURRENT_SD1_TRIP_MASK			0x60
+#define AS3722_OVCURRENT_SD1_TRIP_SHIFT			0x05
+
+#define AS3722_OVCURRENT_SD6_ALARM_MASK			0x07
+#define AS3722_OVCURRENT_SD6_ALARM_SHIFT		0x01
+#define AS3722_OVCURRENT_SD6_TRIP_MASK			0x18
+#define AS3722_OVCURRENT_SD6_TRIP_SHIFT			0x03
+
+/* AS3722 register bits and bit masks */
+#define AS3722_LDO_ILIMIT_MASK				BIT(7)
+#define AS3722_LDO_ILIMIT_BIT				BIT(7)
+#define AS3722_LDO0_VSEL_MASK				0x1F
+#define AS3722_LDO0_VSEL_MIN				0x01
+#define AS3722_LDO0_VSEL_MAX				0x12
+#define AS3722_LDO0_NUM_VOLT				0x12
+#define AS3722_LDO3_VSEL_MASK				0x3F
+#define AS3722_LDO3_VSEL_MIN				0x01
+#define AS3722_LDO3_VSEL_MAX				0x2D
+#define AS3722_LDO3_NUM_VOLT				0x2D
+#define AS3722_LDO_VSEL_MASK				0x7F
+#define AS3722_LDO_VSEL_MIN				0x01
+#define AS3722_LDO_VSEL_MAX				0x7F
+#define AS3722_LDO_VSEL_DNU_MIN				0x25
+#define AS3722_LDO_VSEL_DNU_MAX				0x3F
+#define AS3722_LDO_NUM_VOLT				0x80
+
+#define AS3722_LDO0_CTRL				BIT(0)
+#define AS3722_LDO1_CTRL				BIT(1)
+#define AS3722_LDO2_CTRL				BIT(2)
+#define AS3722_LDO3_CTRL				BIT(3)
+#define AS3722_LDO4_CTRL				BIT(4)
+#define AS3722_LDO5_CTRL				BIT(5)
+#define AS3722_LDO6_CTRL				BIT(6)
+#define AS3722_LDO7_CTRL				BIT(7)
+#define AS3722_LDO9_CTRL				BIT(1)
+#define AS3722_LDO10_CTRL				BIT(2)
+#define AS3722_LDO11_CTRL				BIT(3)
+
+#define AS3722_LDO3_MODE_MASK				(3 << 6)
+#define AS3722_LDO3_MODE_VAL(n)				(((n) & 0x3) << 6)
+#define AS3722_LDO3_MODE_PMOS				AS3722_LDO3_MODE_VAL(0)
+#define AS3722_LDO3_MODE_PMOS_TRACKING			AS3722_LDO3_MODE_VAL(1)
+#define AS3722_LDO3_MODE_NMOS				AS3722_LDO3_MODE_VAL(2)
+#define AS3722_LDO3_MODE_SWITCH				AS3722_LDO3_MODE_VAL(3)
+
+#define AS3722_SD_VSEL_MASK				0x7F
+#define AS3722_SD0_VSEL_MIN				0x01
+#define AS3722_SD0_VSEL_MAX				0x5A
+#define AS3722_SD2_VSEL_MIN				0x01
+#define AS3722_SD2_VSEL_MAX				0x7F
+
+#define AS3722_SDn_CTRL(n)				BIT(n)
+
+#define AS3722_SD0_MODE_FAST				BIT(4)
+#define AS3722_SD1_MODE_FAST				BIT(4)
+#define AS3722_SD2_MODE_FAST				BIT(2)
+#define AS3722_SD3_MODE_FAST				BIT(6)
+#define AS3722_SD4_MODE_FAST				BIT(2)
+#define AS3722_SD5_MODE_FAST				BIT(2)
+#define AS3722_SD6_MODE_FAST				BIT(4)
+
+#define AS3722_POWER_OFF				BIT(1)
+
+#define AS3722_INTERRUPT_MASK1_LID			BIT(0)
+#define AS3722_INTERRUPT_MASK1_ACOK			BIT(1)
+#define AS3722_INTERRUPT_MASK1_ENABLE1			BIT(2)
+#define AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0		BIT(3)
+#define AS3722_INTERRUPT_MASK1_ONKEY_LONG		BIT(4)
+#define AS3722_INTERRUPT_MASK1_ONKEY			BIT(5)
+#define AS3722_INTERRUPT_MASK1_OVTMP			BIT(6)
+#define AS3722_INTERRUPT_MASK1_LOWBAT			BIT(7)
+
+#define AS3722_INTERRUPT_MASK2_SD0_LV			BIT(0)
+#define AS3722_INTERRUPT_MASK2_SD1_LV			BIT(1)
+#define AS3722_INTERRUPT_MASK2_SD2345_LV		BIT(2)
+#define AS3722_INTERRUPT_MASK2_PWM1_OV_PROT		BIT(3)
+#define AS3722_INTERRUPT_MASK2_PWM2_OV_PROT		BIT(4)
+#define AS3722_INTERRUPT_MASK2_ENABLE2			BIT(5)
+#define AS3722_INTERRUPT_MASK2_SD6_LV			BIT(6)
+#define AS3722_INTERRUPT_MASK2_RTC_REP			BIT(7)
+
+#define AS3722_INTERRUPT_MASK3_RTC_ALARM		BIT(0)
+#define AS3722_INTERRUPT_MASK3_GPIO1			BIT(1)
+#define AS3722_INTERRUPT_MASK3_GPIO2			BIT(2)
+#define AS3722_INTERRUPT_MASK3_GPIO3			BIT(3)
+#define AS3722_INTERRUPT_MASK3_GPIO4			BIT(4)
+#define AS3722_INTERRUPT_MASK3_GPIO5			BIT(5)
+#define AS3722_INTERRUPT_MASK3_WATCHDOG			BIT(6)
+#define AS3722_INTERRUPT_MASK3_ENABLE3			BIT(7)
+
+#define AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN	BIT(0)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN	BIT(1)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN	BIT(2)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM		BIT(3)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM		BIT(4)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM		BIT(5)
+#define AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6		BIT(6)
+#define AS3722_INTERRUPT_MASK4_ADC			BIT(7)
+
+#define AS3722_ADC1_INTERVAL_TIME			BIT(0)
+#define AS3722_ADC1_INT_MODE_ON				BIT(1)
+#define AS3722_ADC_BUF_ON				BIT(2)
+#define AS3722_ADC1_LOW_VOLTAGE_RANGE			BIT(5)
+#define AS3722_ADC1_INTEVAL_SCAN			BIT(6)
+#define AS3722_ADC1_INT_MASK				BIT(7)
+
+#define AS3722_ADC_MSB_VAL_MASK				0x7F
+#define AS3722_ADC_LSB_VAL_MASK				0x07
+
+#define AS3722_ADC0_CONV_START				BIT(7)
+#define AS3722_ADC0_CONV_NOTREADY			BIT(7)
+#define AS3722_ADC0_SOURCE_SELECT_MASK			0x1F
+
+#define AS3722_ADC1_CONV_START				BIT(7)
+#define AS3722_ADC1_CONV_NOTREADY			BIT(7)
+#define AS3722_ADC1_SOURCE_SELECT_MASK			0x1F
+
+/* GPIO modes */
+#define AS3722_GPIO_MODE_MASK				0x07
+#define AS3722_GPIO_MODE_INPUT				0x00
+#define AS3722_GPIO_MODE_OUTPUT_VDDH			0x01
+#define AS3722_GPIO_MODE_IO_OPEN_DRAIN			0x02
+#define AS3722_GPIO_MODE_ADC_IN				0x03
+#define AS3722_GPIO_MODE_INPUT_PULL_UP			0x04
+#define AS3722_GPIO_MODE_INPUT_PULL_DOWN		0x05
+#define AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP		0x06
+#define AS3722_GPIO_MODE_OUTPUT_VDDL			0x07
+#define AS3722_GPIO_MODE_VAL(n)			((n) & AS3722_GPIO_MODE_MASK)
+
+#define AS3722_GPIO_INV					BIT(7)
+#define AS3722_GPIO_IOSF_MASK				0x78
+#define AS3722_GPIO_IOSF_VAL(n)				(((n) & 0xF) << 3)
+#define AS3722_GPIO_IOSF_NORMAL				AS3722_GPIO_IOSF_VAL(0)
+#define AS3722_GPIO_IOSF_INTERRUPT_OUT			AS3722_GPIO_IOSF_VAL(1)
+#define AS3722_GPIO_IOSF_VSUP_LOW_OUT			AS3722_GPIO_IOSF_VAL(2)
+#define AS3722_GPIO_IOSF_GPIO_INTERRUPT_IN		AS3722_GPIO_IOSF_VAL(3)
+#define AS3722_GPIO_IOSF_ISINK_PWM_IN			AS3722_GPIO_IOSF_VAL(4)
+#define AS3722_GPIO_IOSF_VOLTAGE_STBY			AS3722_GPIO_IOSF_VAL(5)
+#define AS3722_GPIO_IOSF_PWR_GOOD_OUT			AS3722_GPIO_IOSF_VAL(7)
+#define AS3722_GPIO_IOSF_Q32K_OUT			AS3722_GPIO_IOSF_VAL(8)
+#define AS3722_GPIO_IOSF_WATCHDOG_IN			AS3722_GPIO_IOSF_VAL(9)
+#define AS3722_GPIO_IOSF_SOFT_RESET_IN			AS3722_GPIO_IOSF_VAL(11)
+#define AS3722_GPIO_IOSF_PWM_OUT			AS3722_GPIO_IOSF_VAL(12)
+#define AS3722_GPIO_IOSF_VSUP_LOW_DEB_OUT		AS3722_GPIO_IOSF_VAL(13)
+#define AS3722_GPIO_IOSF_SD6_LOW_VOLT_LOW		AS3722_GPIO_IOSF_VAL(14)
+
+#define AS3722_GPIOn_SIGNAL(n)				BIT(n)
+#define AS3722_GPIOn_CONTROL_REG(n)		(AS3722_GPIO0_CONTROL_REG + n)
+#define AS3722_I2C_PULL_UP				BIT(4)
+#define AS3722_INT_PULL_UP				BIT(5)
+
+#define AS3722_RTC_REP_WAKEUP_EN			BIT(0)
+#define AS3722_RTC_ALARM_WAKEUP_EN			BIT(1)
+#define AS3722_RTC_ON					BIT(2)
+#define AS3722_RTC_IRQMODE				BIT(3)
+#define AS3722_RTC_CLK32K_OUT_EN			BIT(5)
+
+#define AS3722_WATCHDOG_TIMER_MAX			0x7F
+#define AS3722_WATCHDOG_ON				BIT(0)
+#define AS3722_WATCHDOG_SW_SIG				BIT(0)
+
+#define AS3722_EXT_CONTROL_ENABLE1			0x1
+#define AS3722_EXT_CONTROL_ENABLE2			0x2
+#define AS3722_EXT_CONTROL_ENABLE3			0x3
+
+/* Interrupt IDs */
+enum as3722_irq {
+	AS3722_IRQ_LID,
+	AS3722_IRQ_ACOK,
+	AS3722_IRQ_ENABLE1,
+	AS3722_IRQ_OCCUR_ALARM_SD0,
+	AS3722_IRQ_ONKEY_LONG_PRESS,
+	AS3722_IRQ_ONKEY,
+	AS3722_IRQ_OVTMP,
+	AS3722_IRQ_LOWBAT,
+	AS3722_IRQ_SD0_LV,
+	AS3722_IRQ_SD1_LV,
+	AS3722_IRQ_SD2_LV,
+	AS3722_IRQ_PWM1_OV_PROT,
+	AS3722_IRQ_PWM2_OV_PROT,
+	AS3722_IRQ_ENABLE2,
+	AS3722_IRQ_SD6_LV,
+	AS3722_IRQ_RTC_REP,
+	AS3722_IRQ_RTC_ALARM,
+	AS3722_IRQ_GPIO1,
+	AS3722_IRQ_GPIO2,
+	AS3722_IRQ_GPIO3,
+	AS3722_IRQ_GPIO4,
+	AS3722_IRQ_GPIO5,
+	AS3722_IRQ_WATCHDOG,
+	AS3722_IRQ_ENABLE3,
+	AS3722_IRQ_TEMP_SD0_SHUTDOWN,
+	AS3722_IRQ_TEMP_SD1_SHUTDOWN,
+	AS3722_IRQ_TEMP_SD2_SHUTDOWN,
+	AS3722_IRQ_TEMP_SD0_ALARM,
+	AS3722_IRQ_TEMP_SD1_ALARM,
+	AS3722_IRQ_TEMP_SD6_ALARM,
+	AS3722_IRQ_OCCUR_ALARM_SD6,
+	AS3722_IRQ_ADC,
+	AS3722_IRQ_MAX,
+};
+
+struct as3722 {
+	struct device *dev;
+	struct regmap *regmap;
+	int chip_irq;
+	unsigned long irq_flags;
+	bool en_intern_int_pullup;
+	bool en_intern_i2c_pullup;
+	struct regmap_irq_chip_data *irq_data;
+};
+
+static inline int as3722_read(struct as3722 *as3722, u32 reg, u32 *dest)
+{
+	return regmap_read(as3722->regmap, reg, dest);
+}
+
+static inline int as3722_write(struct as3722 *as3722, u32 reg, u32 value)
+{
+	return regmap_write(as3722->regmap, reg, value);
+}
+
+static inline int as3722_block_read(struct as3722 *as3722, u32 reg,
+		int count, u8 *buf)
+{
+	return regmap_bulk_read(as3722->regmap, reg, buf, count);
+}
+
+static inline int as3722_block_write(struct as3722 *as3722, u32 reg,
+		int count, u8 *data)
+{
+	return regmap_bulk_write(as3722->regmap, reg, data, count);
+}
+
+static inline int as3722_update_bits(struct as3722 *as3722, u32 reg,
+		u32 mask, u8 val)
+{
+	return regmap_update_bits(as3722->regmap, reg, mask, val);
+}
+
+static inline int as3722_irq_get_virq(struct as3722 *as3722, int irq)
+{
+	return regmap_irq_get_virq(as3722->irq_data, irq);
+}
+#endif /* __LINUX_MFD_AS3722_H__ */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 7314fc4..bdba8c6 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -104,7 +104,7 @@
 }
 
 extern int mfd_add_devices(struct device *parent, int id,
-			   struct mfd_cell *cells, int n_devs,
+			   const struct mfd_cell *cells, int n_devs,
 			   struct resource *mem_base,
 			   int irq_base, struct irq_domain *irq_domain);
 
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 786d02e..21e21b8 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -148,10 +148,15 @@
 				     unsigned reg_cnt, unsigned char *val)
 {
 	int ret;
+	unsigned int tmp;
+	int i;
 
-	ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt);
-	if (ret < 0)
-		return ret;
+	for (i = 0; i < reg_cnt; i++) {
+		ret = regmap_read(da9052->regmap, reg + i, &tmp);
+		val[i] = (unsigned char)tmp;
+		if (ret < 0)
+			return ret;
+	}
 
 	if (da9052->fix_io) {
 		ret = da9052->fix_io(da9052, reg);
@@ -166,10 +171,13 @@
 				      unsigned reg_cnt, unsigned char *val)
 {
 	int ret;
+	int i;
 
-	ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt);
-	if (ret < 0)
-		return ret;
+	for (i = 0; i < reg_cnt; i++) {
+		ret = regmap_write(da9052->regmap, reg + i, val[i]);
+		if (ret < 0)
+			return ret;
+	}
 
 	if (da9052->fix_io) {
 		ret = da9052->fix_io(da9052, reg);
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 244fb0d..3e050b9 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -323,7 +323,6 @@
 
 	int irq;
 	int irq_gpio;
-	bool wakeup;
 	struct mutex irqlock;
 	int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
 	int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index 676f0f3..3f3dc45 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -64,8 +64,6 @@
 };
 
 struct max77693_platform_data {
-	int wakeup;
-
 	/* regulator data */
 	struct max77693_regulator_data *regulators;
 	int num_regulators;
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index d1382df..0ce7721 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -756,6 +756,59 @@
 #define PCR_SETTING_REG2		0x814
 #define PCR_SETTING_REG3		0x747
 
+/* Phy bits */
+#define PHY_PCR_FORCE_CODE			0xB000
+#define PHY_PCR_OOBS_CALI_50			0x0800
+#define PHY_PCR_OOBS_VCM_08			0x0200
+#define PHY_PCR_OOBS_SEN_90			0x0040
+#define PHY_PCR_RSSI_EN				0x0002
+
+#define PHY_RCR1_ADP_TIME			0x0100
+#define PHY_RCR1_VCO_COARSE			0x001F
+
+#define PHY_RCR2_EMPHASE_EN			0x8000
+#define PHY_RCR2_NADJR				0x4000
+#define PHY_RCR2_CDR_CP_10			0x0400
+#define PHY_RCR2_CDR_SR_2			0x0100
+#define PHY_RCR2_FREQSEL_12			0x0040
+#define PHY_RCR2_CPADJEN			0x0020
+#define PHY_RCR2_CDR_SC_8			0x0008
+#define PHY_RCR2_CALIB_LATE			0x0002
+
+#define PHY_RDR_RXDSEL_1_9			0x4000
+
+#define PHY_TUNE_TUNEREF_1_0			0x4000
+#define PHY_TUNE_VBGSEL_1252			0x0C00
+#define PHY_TUNE_SDBUS_33			0x0200
+#define PHY_TUNE_TUNED18			0x01C0
+#define PHY_TUNE_TUNED12			0X0020
+
+#define PHY_BPCR_IBRXSEL			0x0400
+#define PHY_BPCR_IBTXSEL			0x0100
+#define PHY_BPCR_IB_FILTER			0x0080
+#define PHY_BPCR_CMIRROR_EN			0x0040
+
+#define PHY_REG_REV_RESV			0xE000
+#define PHY_REG_REV_RXIDLE_LATCHED		0x1000
+#define PHY_REG_REV_P1_EN			0x0800
+#define PHY_REG_REV_RXIDLE_EN			0x0400
+#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0	0x0040
+#define PHY_REG_REV_STOP_CLKRD			0x0020
+#define PHY_REG_REV_RX_PWST			0x0008
+#define PHY_REG_REV_STOP_CLKWR			0x0004
+
+#define PHY_FLD3_TIMER_4			0x7800
+#define PHY_FLD3_TIMER_6			0x00E0
+#define PHY_FLD3_RXDELINK			0x0004
+
+#define PHY_FLD4_FLDEN_SEL			0x4000
+#define PHY_FLD4_REQ_REF			0x2000
+#define PHY_FLD4_RXAMP_OFF			0x1000
+#define PHY_FLD4_REQ_ADDA			0x0800
+#define PHY_FLD4_BER_COUNT			0x00E0
+#define PHY_FLD4_BER_TIMER			0x000A
+#define PHY_FLD4_BER_CHK_EN			0x0001
+
 #define rtsx_pci_init_cmd(pcr)		((pcr)->ci = 0)
 
 struct rtsx_pcr;
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 378ae8a..2d0c907 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -51,6 +51,7 @@
 	int ono;
 	int type;
 	bool wakeup;
+	bool wtsr_smpl;
 };
 
 int sec_irq_init(struct sec_pmic_dev *sec_pmic);
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 71597e2..94b7cd6 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -62,6 +62,11 @@
 /* RTC Update Register1 */
 #define RTC_UDR_SHIFT		0
 #define RTC_UDR_MASK		(1 << RTC_UDR_SHIFT)
+#define RTC_TCON_SHIFT		1
+#define RTC_TCON_MASK		(1 << RTC_TCON_SHIFT)
+#define RTC_TIME_EN_SHIFT	3
+#define RTC_TIME_EN_MASK	(1 << RTC_TIME_EN_SHIFT)
+
 /* RTC Hour register */
 #define HOUR_PM_SHIFT		6
 #define HOUR_PM_MASK		(1 << HOUR_PM_SHIFT)
@@ -69,6 +74,12 @@
 #define ALARM_ENABLE_SHIFT	7
 #define ALARM_ENABLE_MASK	(1 << ALARM_ENABLE_SHIFT)
 
+#define SMPL_ENABLE_SHIFT	7
+#define SMPL_ENABLE_MASK	(1 << SMPL_ENABLE_SHIFT)
+
+#define WTSR_ENABLE_SHIFT	6
+#define WTSR_ENABLE_MASK	(1 << WTSR_ENABLE_SHIFT)
+
 enum {
 	RTC_SEC = 0,
 	RTC_MIN,
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
index ba89b94..674b45d 100644
--- a/include/linux/mfd/si476x-core.h
+++ b/include/linux/mfd/si476x-core.h
@@ -316,7 +316,7 @@
  * response to 'FM_RD_STATUS' command
  * @rdstpptyint: Traffic program flag(TP) and/or program type(PTY)
  * code has changed.
- * @rdspiint: Program indentifiaction(PI) code has changed.
+ * @rdspiint: Program identification(PI) code has changed.
  * @rdssyncint: RDS synchronization has changed.
  * @rdsfifoint: RDS was received and the RDS FIFO has at least
  * 'FM_RDS_INTERRUPT_FIFO_COUNT' elements in it.
diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h
new file mode 100644
index 0000000..eda1215
--- /dev/null
+++ b/include/linux/mfd/stw481x.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef MFD_STW481X_H
+#define MFD_STW481X_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+
+/* These registers are accessed from more than one driver */
+#define STW_CONF1			0x11U
+#define STW_CONF1_PDN_VMMC		0x01U
+#define STW_CONF1_VMMC_MASK		0x0eU
+#define STW_CONF1_VMMC_1_8V		0x02U
+#define STW_CONF1_VMMC_2_85V		0x04U
+#define STW_CONF1_VMMC_3V		0x06U
+#define STW_CONF1_VMMC_1_85V		0x08U
+#define STW_CONF1_VMMC_2_6V		0x0aU
+#define STW_CONF1_VMMC_2_7V		0x0cU
+#define STW_CONF1_VMMC_3_3V		0x0eU
+#define STW_CONF1_MMC_LS_STATUS		0x10U
+#define STW_PCTL_REG_LO			0x1eU
+#define STW_PCTL_REG_HI			0x1fU
+#define STW_CONF1_V_MONITORING		0x20U
+#define STW_CONF1_IT_WARN		0x40U
+#define STW_CONF1_PDN_VAUX		0x80U
+#define STW_CONF2			0x20U
+#define STW_CONF2_MASK_TWARN		0x01U
+#define STW_CONF2_VMMC_EXT		0x02U
+#define STW_CONF2_MASK_IT_WAKE_UP	0x04U
+#define STW_CONF2_GPO1			0x08U
+#define STW_CONF2_GPO2			0x10U
+#define STW_VCORE_SLEEP			0x21U
+
+/**
+ * struct stw481x - state holder for the Stw481x drivers
+ * @mutex: mutex to serialize I2C accesses
+ * @i2c_client: corresponding I2C client
+ * @regulator: regulator device for regulator children
+ * @map: regmap handle to access device registers
+ */
+struct stw481x {
+	struct mutex		lock;
+	struct i2c_client	*client;
+	struct regulator_dev	*vmmc_regulator;
+	struct regmap		*map;
+};
+
+#endif
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index b473577f..8789fa3c 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -17,10 +17,35 @@
 
 struct device_node;
 
+#ifdef CONFIG_MFD_SYSCON
 extern struct regmap *syscon_node_to_regmap(struct device_node *np);
 extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
 extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s);
 extern struct regmap *syscon_regmap_lookup_by_phandle(
 					struct device_node *np,
 					const char *property);
+#else
+static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle(
+					struct device_node *np,
+					const char *property)
+{
+	return ERR_PTR(-ENOSYS);
+}
+#endif
+
 #endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index 7086b22..b6d36b3 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -241,6 +241,12 @@
 
 #define IMX6Q_GPR5_L2_CLK_STOP			BIT(8)
 
+#define IMX6Q_GPR8_TX_SWING_LOW			(0x7f << 25)
+#define IMX6Q_GPR8_TX_SWING_FULL		(0x7f << 18)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB		(0x3f << 12)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB		(0x3f << 6)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN1		(0x3f << 0)
+
 #define IMX6Q_GPR9_TZASC2_BYP			BIT(1)
 #define IMX6Q_GPR9_TZASC1_BYP			BIT(0)
 
@@ -273,7 +279,9 @@
 #define IMX6Q_GPR12_ARMP_AHB_CLK_EN		BIT(26)
 #define IMX6Q_GPR12_ARMP_ATB_CLK_EN		BIT(25)
 #define IMX6Q_GPR12_ARMP_APB_CLK_EN		BIT(24)
+#define IMX6Q_GPR12_DEVICE_TYPE			(0xf << 12)
 #define IMX6Q_GPR12_PCIE_CTL_2			BIT(10)
+#define IMX6Q_GPR12_LOS_LEVEL			(0x1f << 4)
 
 #define IMX6Q_GPR13_SDMA_STOP_REQ		BIT(30)
 #define IMX6Q_GPR13_CAN2_STOP_REQ		BIT(29)
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 08cce7f..d498d98f 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -134,13 +134,18 @@
 #define FIFO1_THRESHOLD		19
 
 /*
-* ADC runs at 3MHz, and it takes
-* 15 cycles to latch one data output.
-* Hence the idle time for ADC to
-* process one sample data would be
-* around 5 micro seconds.
-*/
-#define IDLE_TIMEOUT 5 /* microsec */
+ * time in us for processing a single channel, calculated as follows:
+ *
+ * num cycles = open delay + (sample delay + conv time) * averaging
+ *
+ * num cycles: 152 + (1 + 13) * 16 = 376
+ *
+ * clock frequency: 26MHz / 8 = 3.25MHz
+ * clock period: 1 / 3.25MHz = 308ns
+ *
+ * processing time: 376 * 308ns = 116us
+ */
+#define IDLE_TIMEOUT 116 /* microsec */
 
 #define TSCADC_CELLS		2
 
@@ -155,6 +160,7 @@
 	struct mfd_cell cells[TSCADC_CELLS];
 	u32 reg_se_cache;
 	spinlock_t reg_lock;
+	unsigned int clk_div;
 
 	/* tsc device */
 	struct titsc *tsc;
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 40854ac..eefafa6 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -56,8 +56,6 @@
 #define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN)
 
 struct wm8994 {
-	struct mutex irq_lock;
-
 	struct wm8994_pdata pdata;
 
 	enum wm8994_type type;
@@ -85,16 +83,43 @@
 };
 
 /* Device I/O API */
-int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg);
-int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
-		 unsigned short val);
-int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
-		    unsigned short mask, unsigned short val);
-int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
-		     int count, u16 *buf);
-int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
-		     int count, const u16 *buf);
 
+static inline int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
+{
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(wm8994->regmap, reg, &val);
+
+	if (ret < 0)
+		return ret;
+	else
+		return val;
+}
+
+static inline int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
+				   unsigned short val)
+{
+	return regmap_write(wm8994->regmap, reg, val);
+}
+
+static inline int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
+				   int count, u16 *buf)
+{
+	return regmap_bulk_read(wm8994->regmap, reg, buf, count);
+}
+
+static inline int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
+				    int count, const u16 *buf)
+{
+	return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
+}
+
+static inline int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
+		    unsigned short mask, unsigned short val)
+{
+	return regmap_update_bits(wm8994->regmap, reg, mask, val);
+}
 
 /* Helper to save on boilerplate */
 static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq,
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index cb35835..f7eaf2d 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -31,6 +31,7 @@
 #define I2O_MINOR		166
 #define MICROCODE_MINOR		184
 #define TUN_MINOR		200
+#define CUSE_MINOR		203
 #define MWAVE_MINOR		219	/* ACP/Mwave Modem */
 #define MPT_MINOR		220
 #define MPT2SAS_MINOR		221
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index cd1fdf7..8df61bc 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -154,10 +154,6 @@
 	MLX4_CMD_QUERY_IF_STAT	 = 0X54,
 	MLX4_CMD_SET_IF_STAT	 = 0X55,
 
-	/* set port opcode modifiers */
-	MLX4_SET_PORT_PRIO2TC = 0x8,
-	MLX4_SET_PORT_SCHEDULER  = 0x9,
-
 	/* register/delete flow steering network rules */
 	MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
 	MLX4_QP_FLOW_STEERING_DETACH = 0x66,
@@ -182,6 +178,8 @@
 	MLX4_SET_PORT_VLAN_TABLE = 0x3,
 	MLX4_SET_PORT_PRIO_MAP  = 0x4,
 	MLX4_SET_PORT_GID_TABLE = 0x5,
+	MLX4_SET_PORT_PRIO2TC	= 0x8,
+	MLX4_SET_PORT_SCHEDULER = 0x9,
 };
 
 enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 24ce6bd..7d3a523 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -54,6 +54,7 @@
 	MLX4_FLAG_MASTER	= 1 << 2,
 	MLX4_FLAG_SLAVE		= 1 << 3,
 	MLX4_FLAG_SRIOV		= 1 << 4,
+	MLX4_FLAG_OLD_REG_MAC	= 1 << 6,
 };
 
 enum {
@@ -155,7 +156,7 @@
 	MLX4_DEV_CAP_FLAG2_RSS_TOP		= 1LL <<  1,
 	MLX4_DEV_CAP_FLAG2_RSS_XOR		= 1LL <<  2,
 	MLX4_DEV_CAP_FLAG2_FS_EN		= 1LL <<  3,
-	MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN	= 1LL <<  4,
+	MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN	= 1LL <<  4,
 	MLX4_DEV_CAP_FLAG2_TS			= 1LL <<  5,
 	MLX4_DEV_CAP_FLAG2_VLAN_CONTROL		= 1LL <<  6,
 	MLX4_DEV_CAP_FLAG2_FSM			= 1LL <<  7,
@@ -640,16 +641,28 @@
 	__be64	tx_bytes;
 };
 
+struct mlx4_quotas {
+	int qp;
+	int cq;
+	int srq;
+	int mpt;
+	int mtt;
+	int counter;
+	int xrcd;
+};
+
 struct mlx4_dev {
 	struct pci_dev	       *pdev;
 	unsigned long		flags;
 	unsigned long		num_slaves;
 	struct mlx4_caps	caps;
 	struct mlx4_phys_caps	phys_caps;
+	struct mlx4_quotas	quotas;
 	struct radix_tree_root	qp_table_tree;
 	u8			rev_id;
 	char			board_id[MLX4_BOARD_ID_LEN];
 	int			num_vfs;
+	int			numa_node;
 	int			oper_log_mgm_entry_size;
 	u64			regid_promisc_array[MLX4_MAX_PORTS + 1];
 	u64			regid_allmulti_array[MLX4_MAX_PORTS + 1];
@@ -771,6 +784,12 @@
 	return dev->flags & MLX4_FLAG_MASTER;
 }
 
+static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
+{
+	return dev->phys_caps.base_sqpn + 8 +
+		16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
+}
+
 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
 {
 	return (qpn < dev->phys_caps.base_sqpn + 8 +
@@ -816,7 +835,7 @@
 
 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
-int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf);
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
 void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
 
 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -1078,7 +1097,7 @@
 		u8 *pg, u16 *ratelimit);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
 
 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8aa4006..0548eb2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -50,6 +50,10 @@
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 
+#ifndef __pa_symbol
+#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
+#endif
+
 extern unsigned long sysctl_user_reserve_kbytes;
 extern unsigned long sysctl_admin_reserve_kbytes;
 
@@ -1312,32 +1316,85 @@
 }
 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 
-#if USE_SPLIT_PTLOCKS
-/*
- * We tuck a spinlock to guard each pagetable page into its struct page,
- * at page->private, with BUILD_BUG_ON to make sure that this will not
- * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
- * When freeing, reset page->mapping so free_pages_check won't complain.
- */
-#define __pte_lockptr(page)	&((page)->ptl)
-#define pte_lock_init(_page)	do {					\
-	spin_lock_init(__pte_lockptr(_page));				\
-} while (0)
-#define pte_lock_deinit(page)	((page)->mapping = NULL)
-#define pte_lockptr(mm, pmd)	({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
-#else	/* !USE_SPLIT_PTLOCKS */
+#if USE_SPLIT_PTE_PTLOCKS
+#if BLOATED_SPINLOCKS
+void __init ptlock_cache_init(void);
+extern bool ptlock_alloc(struct page *page);
+extern void ptlock_free(struct page *page);
+
+static inline spinlock_t *ptlock_ptr(struct page *page)
+{
+	return page->ptl;
+}
+#else /* BLOATED_SPINLOCKS */
+static inline void ptlock_cache_init(void) {}
+static inline bool ptlock_alloc(struct page *page)
+{
+	return true;
+}
+
+static inline void ptlock_free(struct page *page)
+{
+}
+
+static inline spinlock_t *ptlock_ptr(struct page *page)
+{
+	return &page->ptl;
+}
+#endif /* BLOATED_SPINLOCKS */
+
+static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+	return ptlock_ptr(pmd_page(*pmd));
+}
+
+static inline bool ptlock_init(struct page *page)
+{
+	/*
+	 * prep_new_page() initialize page->private (and therefore page->ptl)
+	 * with 0. Make sure nobody took it in use in between.
+	 *
+	 * It can happen if arch try to use slab for page table allocation:
+	 * slab code uses page->slab_cache and page->first_page (for tail
+	 * pages), which share storage with page->ptl.
+	 */
+	VM_BUG_ON(*(unsigned long *)&page->ptl);
+	if (!ptlock_alloc(page))
+		return false;
+	spin_lock_init(ptlock_ptr(page));
+	return true;
+}
+
+/* Reset page->mapping so free_pages_check won't complain. */
+static inline void pte_lock_deinit(struct page *page)
+{
+	page->mapping = NULL;
+	ptlock_free(page);
+}
+
+#else	/* !USE_SPLIT_PTE_PTLOCKS */
 /*
  * We use mm->page_table_lock to guard all pagetable pages of the mm.
  */
-#define pte_lock_init(page)	do {} while (0)
-#define pte_lock_deinit(page)	do {} while (0)
-#define pte_lockptr(mm, pmd)	({(void)(pmd); &(mm)->page_table_lock;})
-#endif /* USE_SPLIT_PTLOCKS */
-
-static inline void pgtable_page_ctor(struct page *page)
+static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
 {
-	pte_lock_init(page);
+	return &mm->page_table_lock;
+}
+static inline void ptlock_cache_init(void) {}
+static inline bool ptlock_init(struct page *page) { return true; }
+static inline void pte_lock_deinit(struct page *page) {}
+#endif /* USE_SPLIT_PTE_PTLOCKS */
+
+static inline void pgtable_init(void)
+{
+	ptlock_cache_init();
+	pgtable_cache_init();
+}
+
+static inline bool pgtable_page_ctor(struct page *page)
+{
 	inc_zone_page_state(page, NR_PAGETABLE);
+	return ptlock_init(page);
 }
 
 static inline void pgtable_page_dtor(struct page *page)
@@ -1374,6 +1431,52 @@
 	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
 		NULL: pte_offset_kernel(pmd, address))
 
+#if USE_SPLIT_PMD_PTLOCKS
+
+static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+	return ptlock_ptr(virt_to_page(pmd));
+}
+
+static inline bool pgtable_pmd_page_ctor(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	page->pmd_huge_pte = NULL;
+#endif
+	return ptlock_init(page);
+}
+
+static inline void pgtable_pmd_page_dtor(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	VM_BUG_ON(page->pmd_huge_pte);
+#endif
+	ptlock_free(page);
+}
+
+#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
+
+#else
+
+static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+	return &mm->page_table_lock;
+}
+
+static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
+static inline void pgtable_pmd_page_dtor(struct page *page) {}
+
+#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
+
+#endif
+
+static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
+{
+	spinlock_t *ptl = pmd_lockptr(mm, pmd);
+	spin_lock(ptl);
+	return ptl;
+}
+
 extern void free_area_init(unsigned long * zones_size);
 extern void free_area_init_node(int nid, unsigned long * zones_size,
 		unsigned long zone_start_pfn, unsigned long *zholes_size);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index a3198e5..10f5a72 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -23,7 +23,9 @@
 
 struct address_space;
 
-#define USE_SPLIT_PTLOCKS	(NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+#define USE_SPLIT_PTE_PTLOCKS	(NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+#define USE_SPLIT_PMD_PTLOCKS	(USE_SPLIT_PTE_PTLOCKS && \
+		IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
 
 /*
  * Each physical page in the system has a struct page associated with
@@ -63,6 +65,9 @@
 						 * this page is only used to
 						 * free other pages.
 						 */
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
+		pgtable_t pmd_huge_pte; /* protected by page->ptl */
+#endif
 		};
 
 		union {
@@ -141,9 +146,13 @@
 						 * indicates order in the buddy
 						 * system if PG_buddy is set.
 						 */
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
+#if BLOATED_SPINLOCKS
+		spinlock_t *ptl;
+#else
 		spinlock_t ptl;
 #endif
+#endif
 		struct kmem_cache *slab_cache;	/* SL[AU]B: Pointer to slab */
 		struct page *first_page;	/* Compound tail pages */
 	};
@@ -309,14 +318,14 @@
 	NR_MM_COUNTERS
 };
 
-#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
+#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
 #define SPLIT_RSS_COUNTING
 /* per-thread cached information, */
 struct task_rss_stat {
 	int events;	/* for synchronization threshold */
 	int count[NR_MM_COUNTERS];
 };
-#endif /* USE_SPLIT_PTLOCKS */
+#endif /* USE_SPLIT_PTE_PTLOCKS */
 
 struct mm_rss_stat {
 	atomic_long_t count[NR_MM_COUNTERS];
@@ -339,6 +348,7 @@
 	pgd_t * pgd;
 	atomic_t mm_users;			/* How many users with user space? */
 	atomic_t mm_count;			/* How many references to "struct mm_struct" (users count as 1) */
+	atomic_long_t nr_ptes;			/* Page table pages */
 	int map_count;				/* number of VMAs */
 
 	spinlock_t page_table_lock;		/* Protects page tables and some counters */
@@ -360,7 +370,6 @@
 	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE */
 	unsigned long stack_vm;		/* VM_GROWSUP/DOWN */
 	unsigned long def_flags;
-	unsigned long nr_ptes;		/* Page table pages */
 	unsigned long start_code, end_code, start_data, end_data;
 	unsigned long start_brk, brk, start_stack;
 	unsigned long arg_start, arg_end, env_start, env_end;
@@ -406,7 +415,7 @@
 #ifdef CONFIG_MMU_NOTIFIER
 	struct mmu_notifier_mm *mmu_notifier_mm;
 #endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 #endif
 #ifdef CONFIG_CPUMASK_OFFSTACK
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 92dc257..7f7f8da 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -87,4 +87,6 @@
 	       _calc_vm_trans(flags, MAP_DENYWRITE,  VM_DENYWRITE ) |
 	       _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    );
 }
+
+unsigned long vm_commit_limit(void);
 #endif /* _LINUX_MMAN_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 05f2447..15cd6b1 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -367,9 +367,6 @@
 	/* What modules do I depend on? */
 	struct list_head target_list;
 
-	/* Who is waiting for us to be unloaded */
-	struct task_struct *waiter;
-
 	/* Destruction function. */
 	void (*exit)(void);
 
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 38cd98f..371d346 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -49,6 +49,8 @@
 
 #define MNT_LOCK_READONLY	0x400000
 #define MNT_LOCKED		0x800000
+#define MNT_DOOMED		0x1000000
+#define MNT_SYNC_UMOUNT		0x2000000
 
 struct vfsmount {
 	struct dentry *mnt_root;	/* root of the mounted tree */
diff --git a/include/linux/msg.h b/include/linux/msg.h
index 391af8d..e21f9d4 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -6,9 +6,9 @@
 
 /* one msg_msg structure for each message */
 struct msg_msg {
-	struct list_head m_list; 
-	long  m_type;          
-	int m_ts;           /* message text size */
+	struct list_head m_list;
+	long m_type;
+	size_t m_ts;		/* message text size */
 	struct msg_msgseg* next;
 	void *security;
 	/* the actual message follows immediately */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index b17ead8..87cce50 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -64,6 +64,8 @@
 
 void default_teardown_msi_irqs(struct pci_dev *dev);
 void default_restore_msi_irqs(struct pci_dev *dev, int irq);
+u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
+u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
 
 struct msi_chip {
 	struct module *owner;
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 95fc482..36bb6a5 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -91,8 +91,6 @@
  * with NAND_BBT_CREATE.
  */
 #define NAND_BBT_CREATE_EMPTY	0x00000400
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES	0x00000800
 /* Write bbt if neccecary */
 #define NAND_BBT_WRITE		0x00002000
 /* Read and write back block contents when writing bbt */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 4b02512..5f487d7 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -365,7 +365,7 @@
 			bitpos = (map_bankwidth(map)-1-i)*8;
 #endif
 			orig.x[0] &= ~(0xff << bitpos);
-			orig.x[0] |= buf[i-start] << bitpos;
+			orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
 		}
 	}
 	return orig;
@@ -384,7 +384,7 @@
 
 	if (map_bankwidth(map) < MAP_FF_LIMIT) {
 		int bw = 8 * map_bankwidth(map);
-		r.x[0] = (1 << bw) - 1;
+		r.x[0] = (1UL << bw) - 1;
 	} else {
 		for (i=0; i<map_words(map); i++)
 			r.x[i] = ~0UL;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index f9bfe52..8cc0e2f 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -29,9 +29,6 @@
 
 #include <asm/div64.h>
 
-#define MTD_CHAR_MAJOR 90
-#define MTD_BLOCK_MAJOR 31
-
 #define MTD_ERASE_PENDING	0x01
 #define MTD_ERASING		0x02
 #define MTD_ERASE_SUSPEND	0x04
@@ -354,6 +351,11 @@
 	return mtd->_read_oob && mtd->_write_oob;
 }
 
+static inline int mtd_type_is_nand(const struct mtd_info *mtd)
+{
+	return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
 static inline int mtd_can_have_bb(const struct mtd_info *mtd)
 {
 	return !!mtd->_block_isbad;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ac8e89d..9e6c8f9 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -198,6 +198,7 @@
 /* Cell info constants */
 #define NAND_CI_CHIPNR_MSK	0x03
 #define NAND_CI_CELLTYPE_MSK	0x0C
+#define NAND_CI_CELLTYPE_SHIFT	2
 
 /* Keep gcc happy */
 struct nand_chip;
@@ -477,7 +478,7 @@
  * @badblockbits:	[INTERN] minimum number of set bits in a good block's
  *			bad block marker position; i.e., BBM == 11110111b is
  *			not bad when badblockbits == 7
- * @cellinfo:		[INTERN] MLC/multichip data from chip ident
+ * @bits_per_cell:	[INTERN] number of bits per cell. i.e., 1 means SLC.
  * @ecc_strength_ds:	[INTERN] ECC correctability from the datasheet.
  *			Minimum amount of bit errors per @ecc_step_ds guaranteed
  *			to be correctable. If unknown, set to zero.
@@ -498,7 +499,6 @@
  *			supported, 0 otherwise.
  * @onfi_set_features:	[REPLACEABLE] set the features for ONFI nand
  * @onfi_get_features:	[REPLACEABLE] get the features for ONFI nand
- * @ecclayout:		[REPLACEABLE] the default ECC placement scheme
  * @bbt:		[INTERN] bad block table pointer
  * @bbt_td:		[REPLACEABLE] bad block table descriptor for flash
  *			lookup.
@@ -559,7 +559,7 @@
 	int pagebuf;
 	unsigned int pagebuf_bitflips;
 	int subpagesize;
-	uint8_t cellinfo;
+	uint8_t bits_per_cell;
 	uint16_t ecc_strength_ds;
 	uint16_t ecc_step_ds;
 	int badblockpos;
@@ -572,7 +572,6 @@
 
 	uint8_t *oob_poi;
 	struct nand_hw_control *controller;
-	struct nand_ecclayout *ecclayout;
 
 	struct nand_ecc_ctrl ecc;
 	struct nand_buffers *buffers;
@@ -797,4 +796,13 @@
 	return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
 }
 
+/*
+ * Check if it is a SLC nand.
+ * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
+ * We do not distinguish the MLC and TLC now.
+ */
+static inline bool nand_is_slc(struct nand_chip *chip)
+{
+	return chip->bits_per_cell == 1;
+}
 #endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bab49da..d318193 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -131,7 +131,7 @@
 }
 
 /*
- * See kernel/mutex.c for detailed documentation of these APIs.
+ * See kernel/locking/mutex.c for detailed documentation of these APIs.
  * Also see Documentation/mutex-design.txt.
  */
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 8e47bc7..492de72 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -16,7 +16,7 @@
 	struct path	root;
 	struct inode	*inode; /* path.dentry.d_inode */
 	unsigned int	flags;
-	unsigned	seq;
+	unsigned	seq, m_seq;
 	int		last_type;
 	unsigned	depth;
 	char *saved_names[MAX_NESTED_LINKS + 1];
diff --git a/include/linux/net.h b/include/linux/net.h
index 4f27575..b292a04 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,6 +24,7 @@
 #include <linux/fcntl.h>	/* For O_CLOEXEC and O_NONBLOCK */
 #include <linux/kmemcheck.h>
 #include <linux/rcupdate.h>
+#include <linux/jump_label.h>
 #include <uapi/linux/net.h>
 
 struct poll_table_struct;
@@ -195,27 +196,23 @@
 	SOCK_WAKE_URG,
 };
 
-extern int	     sock_wake_async(struct socket *sk, int how, int band);
-extern int	     sock_register(const struct net_proto_family *fam);
-extern void	     sock_unregister(int family);
-extern int	     __sock_create(struct net *net, int family, int type, int proto,
-				 struct socket **res, int kern);
-extern int	     sock_create(int family, int type, int proto,
-				 struct socket **res);
-extern int	     sock_create_kern(int family, int type, int proto,
-				      struct socket **res);
-extern int	     sock_create_lite(int family, int type, int proto,
-				      struct socket **res); 
-extern void	     sock_release(struct socket *sock);
-extern int   	     sock_sendmsg(struct socket *sock, struct msghdr *msg,
-				  size_t len);
-extern int	     sock_recvmsg(struct socket *sock, struct msghdr *msg,
-				  size_t size, int flags);
-extern struct file  *sock_alloc_file(struct socket *sock, int flags, const char *dname);
-extern struct socket *sockfd_lookup(int fd, int *err);
-extern struct socket *sock_from_file(struct file *file, int *err);
+int sock_wake_async(struct socket *sk, int how, int band);
+int sock_register(const struct net_proto_family *fam);
+void sock_unregister(int family);
+int __sock_create(struct net *net, int family, int type, int proto,
+		  struct socket **res, int kern);
+int sock_create(int family, int type, int proto, struct socket **res);
+int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_lite(int family, int type, int proto, struct socket **res);
+void sock_release(struct socket *sock);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+		 int flags);
+struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
+struct socket *sockfd_lookup(int fd, int *err);
+struct socket *sock_from_file(struct file *file, int *err);
 #define		     sockfd_put(sock) fput(sock->file)
-extern int	     net_ratelimit(void);
+int net_ratelimit(void);
 
 #define net_ratelimited_function(function, ...)			\
 do {								\
@@ -243,32 +240,52 @@
 #define net_random()		prandom_u32()
 #define net_srandom(seed)	prandom_seed((__force u32)(seed))
 
-extern int   	     kernel_sendmsg(struct socket *sock, struct msghdr *msg,
-				    struct kvec *vec, size_t num, size_t len);
-extern int   	     kernel_recvmsg(struct socket *sock, struct msghdr *msg,
-				    struct kvec *vec, size_t num,
-				    size_t len, int flags);
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+			   struct static_key *done_key);
 
-extern int kernel_bind(struct socket *sock, struct sockaddr *addr,
-		       int addrlen);
-extern int kernel_listen(struct socket *sock, int backlog);
-extern int kernel_accept(struct socket *sock, struct socket **newsock,
-			 int flags);
-extern int kernel_connect(struct socket *sock, struct sockaddr *addr,
-			  int addrlen, int flags);
-extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
-			      int *addrlen);
-extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
-			      int *addrlen);
-extern int kernel_getsockopt(struct socket *sock, int level, int optname,
-			     char *optval, int *optlen);
-extern int kernel_setsockopt(struct socket *sock, int level, int optname,
-			     char *optval, unsigned int optlen);
-extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
-			   size_t size, int flags);
-extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
-extern int kernel_sock_shutdown(struct socket *sock,
-				enum sock_shutdown_cmd how);
+#ifdef HAVE_JUMP_LABEL
+#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
+		{ .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
+#else /* !HAVE_JUMP_LABEL */
+#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#endif /* HAVE_JUMP_LABEL */
+
+#define net_get_random_once(buf, nbytes)				\
+	({								\
+		bool ___ret = false;					\
+		static bool ___done = false;				\
+		static struct static_key ___done_key =			\
+			___NET_RANDOM_STATIC_KEY_INIT;			\
+		if (!static_key_true(&___done_key))			\
+			___ret = __net_get_random_once(buf,		\
+						       nbytes,		\
+						       &___done,	\
+						       &___done_key);	\
+		___ret;							\
+	})
+
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+		   size_t num, size_t len);
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+		   size_t num, size_t len, int flags);
+
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_listen(struct socket *sock, int backlog);
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+		   int flags);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+		       int *addrlen);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+		       int *addrlen);
+int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
+		      int *optlen);
+int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
+		      unsigned int optlen);
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+		    size_t size, int flags);
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
 
 #define MODULE_ALIAS_NETPROTO(proto) \
 	MODULE_ALIAS("net-pf-" __stringify(proto))
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index a2a89a5..1005ebf 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -42,6 +42,8 @@
 	NETIF_F_TSO6_BIT,		/* ... TCPv6 segmentation */
 	NETIF_F_FSO_BIT,		/* ... FCoE segmentation */
 	NETIF_F_GSO_GRE_BIT,		/* ... GRE with TSO */
+	NETIF_F_GSO_IPIP_BIT,		/* ... IPIP tunnel with TSO */
+	NETIF_F_GSO_SIT_BIT,		/* ... SIT tunnel with TSO */
 	NETIF_F_GSO_UDP_TUNNEL_BIT,	/* ... UDP TUNNEL with TSO */
 	NETIF_F_GSO_MPLS_BIT,		/* ... MPLS segmentation */
 	/**/NETIF_F_GSO_LAST =		/* last bit, see GSO_MASK */
@@ -60,6 +62,7 @@
 	NETIF_F_HW_VLAN_STAG_TX_BIT,	/* Transmit VLAN STAG HW acceleration */
 	NETIF_F_HW_VLAN_STAG_RX_BIT,	/* Receive VLAN STAG HW acceleration */
 	NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
+	NETIF_F_HW_L2FW_DOFFLOAD_BIT,	/* Allow L2 Forwarding in Hardware */
 
 	/*
 	 * Add your fresh new feature above and remember to update
@@ -107,11 +110,14 @@
 #define NETIF_F_RXFCS		__NETIF_F(RXFCS)
 #define NETIF_F_RXALL		__NETIF_F(RXALL)
 #define NETIF_F_GSO_GRE		__NETIF_F(GSO_GRE)
+#define NETIF_F_GSO_IPIP	__NETIF_F(GSO_IPIP)
+#define NETIF_F_GSO_SIT		__NETIF_F(GSO_SIT)
 #define NETIF_F_GSO_UDP_TUNNEL	__NETIF_F(GSO_UDP_TUNNEL)
 #define NETIF_F_GSO_MPLS	__NETIF_F(GSO_MPLS)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
 #define NETIF_F_HW_VLAN_STAG_RX	__NETIF_F(HW_VLAN_STAG_RX)
 #define NETIF_F_HW_VLAN_STAG_TX	__NETIF_F(HW_VLAN_STAG_TX)
+#define NETIF_F_HW_L2FW_DOFFLOAD	__NETIF_F(HW_L2FW_DOFFLOAD)
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index adf4070..7f0ed42 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -60,8 +60,8 @@
 #define SET_ETHTOOL_OPS(netdev,ops) \
 	( (netdev)->ethtool_ops = (ops) )
 
-extern void netdev_set_default_ethtool_ops(struct net_device *dev,
-					   const struct ethtool_ops *ops);
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+				    const struct ethtool_ops *ops);
 
 /* hardware address assignment types */
 #define NET_ADDR_PERM		0	/* address is permanent (default) */
@@ -298,7 +298,7 @@
 };
 #define NETDEV_BOOT_SETUP_MAX 8
 
-extern int __init netdev_boot_setup(char *str);
+int __init netdev_boot_setup(char *str);
 
 /*
  * Structure for NAPI scheduling similar to tasklet but with weighting
@@ -394,7 +394,7 @@
 typedef enum rx_handler_result rx_handler_result_t;
 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
 
-extern void __napi_schedule(struct napi_struct *n);
+void __napi_schedule(struct napi_struct *n);
 
 static inline bool napi_disable_pending(struct napi_struct *n)
 {
@@ -445,8 +445,8 @@
  *
  * Mark NAPI processing as complete.
  */
-extern void __napi_complete(struct napi_struct *n);
-extern void napi_complete(struct napi_struct *n);
+void __napi_complete(struct napi_struct *n);
+void napi_complete(struct napi_struct *n);
 
 /**
  *	napi_by_id - lookup a NAPI by napi_id
@@ -455,7 +455,7 @@
  * lookup @napi_id in napi_hash table
  * must be called under rcu_read_lock()
  */
-extern struct napi_struct *napi_by_id(unsigned int napi_id);
+struct napi_struct *napi_by_id(unsigned int napi_id);
 
 /**
  *	napi_hash_add - add a NAPI to global hashtable
@@ -463,7 +463,7 @@
  *
  * generate a new napi_id and store a @napi under it in napi_hash
  */
-extern void napi_hash_add(struct napi_struct *napi);
+void napi_hash_add(struct napi_struct *napi);
 
 /**
  *	napi_hash_del - remove a NAPI from global table
@@ -472,7 +472,7 @@
  * Warning: caller must observe rcu grace period
  * before freeing memory containing @napi
  */
-extern void napi_hash_del(struct napi_struct *napi);
+void napi_hash_del(struct napi_struct *napi);
 
 /**
  *	napi_disable - prevent NAPI from scheduling
@@ -483,6 +483,7 @@
  */
 static inline void napi_disable(struct napi_struct *n)
 {
+	might_sleep();
 	set_bit(NAPI_STATE_DISABLE, &n->state);
 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
 		msleep(1);
@@ -664,8 +665,8 @@
 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
 #ifdef CONFIG_RFS_ACCEL
-extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
-				u32 flow_id, u16 filter_id);
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
+			 u16 filter_id);
 #endif
 
 /* This structure contains an instance of an RX queue. */
@@ -961,6 +962,25 @@
  *	Called by vxlan to notify the driver about a UDP port and socket
  *	address family that vxlan is not listening to anymore. The operation
  *	is protected by the vxlan_net->sock_lock.
+ *
+ * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
+ *				 struct net_device *dev)
+ *	Called by upper layer devices to accelerate switching or other
+ *	station functionality into hardware. 'pdev is the lowerdev
+ *	to use for the offload and 'dev' is the net device that will
+ *	back the offload. Returns a pointer to the private structure
+ *	the upper layer will maintain.
+ * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
+ *	Called by upper layer device to delete the station created
+ *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
+ *	the station and priv is the structure returned by the add
+ *	operation.
+ * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
+ *				      struct net_device *dev,
+ *				      void *priv);
+ *	Callback to use for xmit over the accelerated station. This
+ *	is used in place of ndo_start_xmit on accelerated net
+ *	devices.
  */
 struct net_device_ops {
 	int			(*ndo_init)(struct net_device *dev);
@@ -1097,6 +1117,15 @@
 	void			(*ndo_del_vxlan_port)(struct  net_device *dev,
 						      sa_family_t sa_family,
 						      __be16 port);
+
+	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
+							struct net_device *dev);
+	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
+							void *priv);
+
+	netdev_tx_t		(*ndo_dfwd_start_xmit) (struct sk_buff *skb,
+							struct net_device *dev,
+							void *priv);
 };
 
 /*
@@ -1131,7 +1160,7 @@
 	unsigned long		mem_end;	/* shared mem end	*/
 	unsigned long		mem_start;	/* shared mem start	*/
 	unsigned long		base_addr;	/* device I/O address	*/
-	unsigned int		irq;		/* device IRQ number	*/
+	int			irq;		/* device IRQ number	*/
 
 	/*
 	 *	Some hardware also needs these fields, but they are not
@@ -1143,8 +1172,19 @@
 	struct list_head	dev_list;
 	struct list_head	napi_list;
 	struct list_head	unreg_list;
-	struct list_head	upper_dev_list; /* List of upper devices */
-	struct list_head	lower_dev_list;
+	struct list_head	close_list;
+
+	/* directly linked devices, like slaves for bonding */
+	struct {
+		struct list_head upper;
+		struct list_head lower;
+	} adj_list;
+
+	/* all linked devices, *including* neighbours */
+	struct {
+		struct list_head upper;
+		struct list_head lower;
+	} all_adj_list;
 
 
 	/* currently active device features */
@@ -1183,6 +1223,7 @@
 	/* Management operations */
 	const struct net_device_ops *netdev_ops;
 	const struct ethtool_ops *ethtool_ops;
+	const struct forwarding_accel_ops *fwd_ops;
 
 	/* Hardware header description */
 	const struct header_ops *header_ops;
@@ -1487,9 +1528,9 @@
 		f(dev, &dev->_tx[i], arg);
 }
 
-extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-					   struct sk_buff *skb);
-extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+				    struct sk_buff *skb);
+u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
 
 /*
  * Net namespace inlines
@@ -1546,7 +1587,7 @@
 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
 
 /* Set the sysfs device type for the network logical device to allow
- * fin grained indentification of different network device types. For
+ * fine-grained identification of different network device types. For
  * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
  */
 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
@@ -1673,8 +1714,8 @@
 #define NETDEV_CHANGEUPPER	0x0015
 #define NETDEV_RESEND_IGMP	0x0016
 
-extern int register_netdevice_notifier(struct notifier_block *nb);
-extern int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier(struct notifier_block *nb);
+int unregister_netdevice_notifier(struct notifier_block *nb);
 
 struct netdev_notifier_info {
 	struct net_device *dev;
@@ -1697,9 +1738,9 @@
 	return info->dev;
 }
 
-extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
-					 struct netdev_notifier_info *info);
-extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
+				  struct netdev_notifier_info *info);
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 
 
 extern rwlock_t				dev_base_lock;		/* Device list lock */
@@ -1754,54 +1795,53 @@
 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 }
 
-extern int 			netdev_boot_setup_check(struct net_device *dev);
-extern unsigned long		netdev_boot_base(const char *prefix, int unit);
-extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
-					      const char *hwaddr);
-extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern void		dev_add_pack(struct packet_type *pt);
-extern void		dev_remove_pack(struct packet_type *pt);
-extern void		__dev_remove_pack(struct packet_type *pt);
-extern void		dev_add_offload(struct packet_offload *po);
-extern void		dev_remove_offload(struct packet_offload *po);
-extern void		__dev_remove_offload(struct packet_offload *po);
+int netdev_boot_setup_check(struct net_device *dev);
+unsigned long netdev_boot_base(const char *prefix, int unit);
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+				       const char *hwaddr);
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
+void dev_add_pack(struct packet_type *pt);
+void dev_remove_pack(struct packet_type *pt);
+void __dev_remove_pack(struct packet_type *pt);
+void dev_add_offload(struct packet_offload *po);
+void dev_remove_offload(struct packet_offload *po);
+void __dev_remove_offload(struct packet_offload *po);
 
-extern struct net_device	*dev_get_by_flags_rcu(struct net *net, unsigned short flags,
-						      unsigned short mask);
-extern struct net_device	*dev_get_by_name(struct net *net, const char *name);
-extern struct net_device	*dev_get_by_name_rcu(struct net *net, const char *name);
-extern struct net_device	*__dev_get_by_name(struct net *net, const char *name);
-extern int		dev_alloc_name(struct net_device *dev, const char *name);
-extern int		dev_open(struct net_device *dev);
-extern int		dev_close(struct net_device *dev);
-extern void		dev_disable_lro(struct net_device *dev);
-extern int		dev_loopback_xmit(struct sk_buff *newskb);
-extern int		dev_queue_xmit(struct sk_buff *skb);
-extern int		register_netdevice(struct net_device *dev);
-extern void		unregister_netdevice_queue(struct net_device *dev,
-						   struct list_head *head);
-extern void		unregister_netdevice_many(struct list_head *head);
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
+					unsigned short mask);
+struct net_device *dev_get_by_name(struct net *net, const char *name);
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
+struct net_device *__dev_get_by_name(struct net *net, const char *name);
+int dev_alloc_name(struct net_device *dev, const char *name);
+int dev_open(struct net_device *dev);
+int dev_close(struct net_device *dev);
+void dev_disable_lro(struct net_device *dev);
+int dev_loopback_xmit(struct sk_buff *newskb);
+int dev_queue_xmit(struct sk_buff *skb);
+int register_netdevice(struct net_device *dev);
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+void unregister_netdevice_many(struct list_head *head);
 static inline void unregister_netdevice(struct net_device *dev)
 {
 	unregister_netdevice_queue(dev, NULL);
 }
 
-extern int 		netdev_refcnt_read(const struct net_device *dev);
-extern void		free_netdev(struct net_device *dev);
-extern void		synchronize_net(void);
-extern int		init_dummy_netdev(struct net_device *dev);
+int netdev_refcnt_read(const struct net_device *dev);
+void free_netdev(struct net_device *dev);
+void netdev_freemem(struct net_device *dev);
+void synchronize_net(void);
+int init_dummy_netdev(struct net_device *dev);
 
-extern struct net_device	*dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device	*dev_get_by_index_rcu(struct net *net, int ifindex);
-extern int		netdev_get_name(struct net *net, char *name, int ifindex);
-extern int		dev_restart(struct net_device *dev);
+struct net_device *dev_get_by_index(struct net *net, int ifindex);
+struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_restart(struct net_device *dev);
 #ifdef CONFIG_NETPOLL_TRAP
-extern int		netpoll_trap(void);
+int netpoll_trap(void);
 #endif
-extern int	       skb_gro_receive(struct sk_buff **head,
-				       struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 
 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 {
@@ -1873,7 +1913,7 @@
 }
 
 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
-extern int		register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 static inline int unregister_gifconf(unsigned int family)
 {
 	return register_gifconf(family, NULL);
@@ -1944,7 +1984,7 @@
 
 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 
-extern void __netif_schedule(struct Qdisc *q);
+void __netif_schedule(struct Qdisc *q);
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
@@ -2264,9 +2304,8 @@
 }
 
 #ifdef CONFIG_XPS
-extern int netif_set_xps_queue(struct net_device *dev,
-			       const struct cpumask *mask,
-			       u16 index);
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+			u16 index);
 #else
 static inline int netif_set_xps_queue(struct net_device *dev,
 				      const struct cpumask *mask,
@@ -2297,12 +2336,10 @@
 	return dev->num_tx_queues > 1;
 }
 
-extern int netif_set_real_num_tx_queues(struct net_device *dev,
-					unsigned int txq);
+int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
 
 #ifdef CONFIG_RPS
-extern int netif_set_real_num_rx_queues(struct net_device *dev,
-					unsigned int rxq);
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
 #else
 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
 						unsigned int rxq)
@@ -2329,28 +2366,27 @@
 }
 
 #define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
-extern int netif_get_num_default_rss_queues(void);
+int netif_get_num_default_rss_queues(void);
 
 /* Use this variant when it is known for sure that it
  * is executing from hardware interrupt context or with hardware interrupts
  * disabled.
  */
-extern void dev_kfree_skb_irq(struct sk_buff *skb);
+void dev_kfree_skb_irq(struct sk_buff *skb);
 
 /* Use this variant in places where it could be invoked
  * from either hardware interrupt or other context, with hardware interrupts
  * either disabled or enabled.
  */
-extern void dev_kfree_skb_any(struct sk_buff *skb);
+void dev_kfree_skb_any(struct sk_buff *skb);
 
-extern int		netif_rx(struct sk_buff *skb);
-extern int		netif_rx_ni(struct sk_buff *skb);
-extern int		netif_receive_skb(struct sk_buff *skb);
-extern gro_result_t	napi_gro_receive(struct napi_struct *napi,
-					 struct sk_buff *skb);
-extern void		napi_gro_flush(struct napi_struct *napi, bool flush_old);
-extern struct sk_buff *	napi_get_frags(struct napi_struct *napi);
-extern gro_result_t	napi_gro_frags(struct napi_struct *napi);
+int netif_rx(struct sk_buff *skb);
+int netif_rx_ni(struct sk_buff *skb);
+int netif_receive_skb(struct sk_buff *skb);
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
+void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+struct sk_buff *napi_get_frags(struct napi_struct *napi);
+gro_result_t napi_gro_frags(struct napi_struct *napi);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
@@ -2358,40 +2394,36 @@
 	napi->skb = NULL;
 }
 
-extern int netdev_rx_handler_register(struct net_device *dev,
-				      rx_handler_func_t *rx_handler,
-				      void *rx_handler_data);
-extern void netdev_rx_handler_unregister(struct net_device *dev);
+int netdev_rx_handler_register(struct net_device *dev,
+			       rx_handler_func_t *rx_handler,
+			       void *rx_handler_data);
+void netdev_rx_handler_unregister(struct net_device *dev);
 
-extern bool		dev_valid_name(const char *name);
-extern int		dev_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern int		dev_ethtool(struct net *net, struct ifreq *);
-extern unsigned int	dev_get_flags(const struct net_device *);
-extern int		__dev_change_flags(struct net_device *, unsigned int flags);
-extern int		dev_change_flags(struct net_device *, unsigned int);
-extern void		__dev_notify_flags(struct net_device *, unsigned int old_flags);
-extern int		dev_change_name(struct net_device *, const char *);
-extern int		dev_set_alias(struct net_device *, const char *, size_t);
-extern int		dev_change_net_namespace(struct net_device *,
-						 struct net *, const char *);
-extern int		dev_set_mtu(struct net_device *, int);
-extern void		dev_set_group(struct net_device *, int);
-extern int		dev_set_mac_address(struct net_device *,
-					    struct sockaddr *);
-extern int		dev_change_carrier(struct net_device *,
-					   bool new_carrier);
-extern int		dev_get_phys_port_id(struct net_device *dev,
-					     struct netdev_phys_port_id *ppid);
-extern int		dev_hard_start_xmit(struct sk_buff *skb,
-					    struct net_device *dev,
-					    struct netdev_queue *txq);
-extern int		dev_forward_skb(struct net_device *dev,
-					struct sk_buff *skb);
+bool dev_valid_name(const char *name);
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ethtool(struct net *net, struct ifreq *);
+unsigned int dev_get_flags(const struct net_device *);
+int __dev_change_flags(struct net_device *, unsigned int flags);
+int dev_change_flags(struct net_device *, unsigned int);
+void __dev_notify_flags(struct net_device *, unsigned int old_flags,
+			unsigned int gchanges);
+int dev_change_name(struct net_device *, const char *);
+int dev_set_alias(struct net_device *, const char *, size_t);
+int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int dev_set_mtu(struct net_device *, int);
+void dev_set_group(struct net_device *, int);
+int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_change_carrier(struct net_device *, bool new_carrier);
+int dev_get_phys_port_id(struct net_device *dev,
+			 struct netdev_phys_port_id *ppid);
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+			struct netdev_queue *txq, void *accel_priv);
+int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 
 extern int		netdev_budget;
 
 /* Called by rtnetlink.c:rtnl_unlock() */
-extern void netdev_run_todo(void);
+void netdev_run_todo(void);
 
 /**
  *	dev_put - release reference to device
@@ -2424,9 +2456,9 @@
  * kind of lower layer not just hardware media.
  */
 
-extern void linkwatch_init_dev(struct net_device *dev);
-extern void linkwatch_fire_event(struct net_device *dev);
-extern void linkwatch_forget_dev(struct net_device *dev);
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_fire_event(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
 
 /**
  *	netif_carrier_ok - test if carrier present
@@ -2439,13 +2471,13 @@
 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 }
 
-extern unsigned long dev_trans_start(struct net_device *dev);
+unsigned long dev_trans_start(struct net_device *dev);
 
-extern void __netdev_watchdog_up(struct net_device *dev);
+void __netdev_watchdog_up(struct net_device *dev);
 
-extern void netif_carrier_on(struct net_device *dev);
+void netif_carrier_on(struct net_device *dev);
 
-extern void netif_carrier_off(struct net_device *dev);
+void netif_carrier_off(struct net_device *dev);
 
 /**
  *	netif_dormant_on - mark device as dormant.
@@ -2513,9 +2545,9 @@
 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
 }
 
-extern void netif_device_detach(struct net_device *dev);
+void netif_device_detach(struct net_device *dev);
 
-extern void netif_device_attach(struct net_device *dev);
+void netif_device_attach(struct net_device *dev);
 
 /*
  * Network interface message level settings
@@ -2724,119 +2756,138 @@
 
 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 
-extern void		ether_setup(struct net_device *dev);
+void ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
-				       void (*setup)(struct net_device *),
-				       unsigned int txqs, unsigned int rxqs);
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+				    void (*setup)(struct net_device *),
+				    unsigned int txqs, unsigned int rxqs);
 #define alloc_netdev(sizeof_priv, name, setup) \
 	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
 
 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
 	alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
 
-extern int		register_netdev(struct net_device *dev);
-extern void		unregister_netdev(struct net_device *dev);
+int register_netdev(struct net_device *dev);
+void unregister_netdev(struct net_device *dev);
 
 /* General hardware address lists handling functions */
-extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
-				  struct netdev_hw_addr_list *from_list,
-				  int addr_len, unsigned char addr_type);
-extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
-				   struct netdev_hw_addr_list *from_list,
-				   int addr_len, unsigned char addr_type);
-extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
-			  struct netdev_hw_addr_list *from_list,
-			  int addr_len);
-extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
-			     struct netdev_hw_addr_list *from_list,
-			     int addr_len);
-extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
-extern void __hw_addr_init(struct netdev_hw_addr_list *list);
+int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+			   struct netdev_hw_addr_list *from_list,
+			   int addr_len, unsigned char addr_type);
+void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+			    struct netdev_hw_addr_list *from_list,
+			    int addr_len, unsigned char addr_type);
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+		   struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+		      struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_flush(struct netdev_hw_addr_list *list);
+void __hw_addr_init(struct netdev_hw_addr_list *list);
 
 /* Functions used for device addresses handling */
-extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
-			unsigned char addr_type);
-extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
-			unsigned char addr_type);
-extern int dev_addr_add_multiple(struct net_device *to_dev,
-				 struct net_device *from_dev,
-				 unsigned char addr_type);
-extern int dev_addr_del_multiple(struct net_device *to_dev,
-				 struct net_device *from_dev,
-				 unsigned char addr_type);
-extern void dev_addr_flush(struct net_device *dev);
-extern int dev_addr_init(struct net_device *dev);
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
+		 unsigned char addr_type);
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
+		 unsigned char addr_type);
+int dev_addr_add_multiple(struct net_device *to_dev,
+			  struct net_device *from_dev, unsigned char addr_type);
+int dev_addr_del_multiple(struct net_device *to_dev,
+			  struct net_device *from_dev, unsigned char addr_type);
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
 
 /* Functions used for unicast addresses handling */
-extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_sync(struct net_device *to, struct net_device *from);
-extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_uc_flush(struct net_device *dev);
-extern void dev_uc_init(struct net_device *dev);
+int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_uc_del(struct net_device *dev, const unsigned char *addr);
+int dev_uc_sync(struct net_device *to, struct net_device *from);
+int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_uc_unsync(struct net_device *to, struct net_device *from);
+void dev_uc_flush(struct net_device *dev);
+void dev_uc_init(struct net_device *dev);
 
 /* Functions used for multicast addresses handling */
-extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_sync(struct net_device *to, struct net_device *from);
-extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_mc_flush(struct net_device *dev);
-extern void dev_mc_init(struct net_device *dev);
+int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_sync(struct net_device *to, struct net_device *from);
+int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_mc_unsync(struct net_device *to, struct net_device *from);
+void dev_mc_flush(struct net_device *dev);
+void dev_mc_init(struct net_device *dev);
 
 /* Functions used for secondary unicast and multicast support */
-extern void		dev_set_rx_mode(struct net_device *dev);
-extern void		__dev_set_rx_mode(struct net_device *dev);
-extern int		dev_set_promiscuity(struct net_device *dev, int inc);
-extern int		dev_set_allmulti(struct net_device *dev, int inc);
-extern void		netdev_state_change(struct net_device *dev);
-extern void		netdev_notify_peers(struct net_device *dev);
-extern void		netdev_features_change(struct net_device *dev);
+void dev_set_rx_mode(struct net_device *dev);
+void __dev_set_rx_mode(struct net_device *dev);
+int dev_set_promiscuity(struct net_device *dev, int inc);
+int dev_set_allmulti(struct net_device *dev, int inc);
+void netdev_state_change(struct net_device *dev);
+void netdev_notify_peers(struct net_device *dev);
+void netdev_features_change(struct net_device *dev);
 /* Load a device via the kmod */
-extern void		dev_load(struct net *net, const char *name);
-extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
-					       struct rtnl_link_stats64 *storage);
-extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
-				    const struct net_device_stats *netdev_stats);
+void dev_load(struct net *net, const char *name);
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+					struct rtnl_link_stats64 *storage);
+void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+			     const struct net_device_stats *netdev_stats);
 
 extern int		netdev_max_backlog;
 extern int		netdev_tstamp_prequeue;
 extern int		weight_p;
 extern int		bpf_jit_enable;
 
-extern bool netdev_has_upper_dev(struct net_device *dev,
-				 struct net_device *upper_dev);
-extern bool netdev_has_any_upper_dev(struct net_device *dev);
-extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
-							struct list_head **iter);
+bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+						     struct list_head **iter);
 
 /* iterate through upper list, must be called under RCU read lock */
-#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
-	for (iter = &(dev)->upper_dev_list, \
-	     upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
-	     upper; \
-	     upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
+	for (iter = &(dev)->all_adj_list.upper, \
+	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
+	     updev; \
+	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
 
-extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
-extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
-extern int netdev_upper_dev_link(struct net_device *dev,
+void *netdev_lower_get_next_private(struct net_device *dev,
+				    struct list_head **iter);
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+					struct list_head **iter);
+
+#define netdev_for_each_lower_private(dev, priv, iter) \
+	for (iter = (dev)->adj_list.lower.next, \
+	     priv = netdev_lower_get_next_private(dev, &(iter)); \
+	     priv; \
+	     priv = netdev_lower_get_next_private(dev, &(iter)))
+
+#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
+	for (iter = &(dev)->adj_list.lower, \
+	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
+	     priv; \
+	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+
+void *netdev_adjacent_get_private(struct list_head *adj_list);
+struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
+struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
+int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
+int netdev_master_upper_dev_link(struct net_device *dev,
 				 struct net_device *upper_dev);
-extern int netdev_master_upper_dev_link(struct net_device *dev,
-					struct net_device *upper_dev);
-extern void netdev_upper_dev_unlink(struct net_device *dev,
-				    struct net_device *upper_dev);
-extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
-	netdev_features_t features, bool tx_path);
-extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
-					  netdev_features_t features);
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+					 struct net_device *upper_dev,
+					 void *private);
+void netdev_upper_dev_unlink(struct net_device *dev,
+			     struct net_device *upper_dev);
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+				       struct net_device *lower_dev);
+void *netdev_lower_dev_get_private(struct net_device *dev,
+				   struct net_device *lower_dev);
+int skb_checksum_help(struct sk_buff *skb);
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+				  netdev_features_t features, bool tx_path);
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+				    netdev_features_t features);
 
 static inline
 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
@@ -2858,26 +2909,26 @@
 }
 
 #ifdef CONFIG_BUG
-extern void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev);
 #else
 static inline void netdev_rx_csum_fault(struct net_device *dev)
 {
 }
 #endif
 /* rx skb timestamps */
-extern void		net_enable_timestamp(void);
-extern void		net_disable_timestamp(void);
+void net_enable_timestamp(void);
+void net_disable_timestamp(void);
 
 #ifdef CONFIG_PROC_FS
-extern int __init dev_proc_init(void);
+int __init dev_proc_init(void);
 #else
 #define dev_proc_init() 0
 #endif
 
-extern int netdev_class_create_file_ns(struct class_attribute *class_attr,
-				       const void *ns);
-extern void netdev_class_remove_file_ns(struct class_attribute *class_attr,
-					const void *ns);
+int netdev_class_create_file_ns(struct class_attribute *class_attr,
+				const void *ns);
+void netdev_class_remove_file_ns(struct class_attribute *class_attr,
+				 const void *ns);
 
 static inline int netdev_class_create_file(struct class_attribute *class_attr)
 {
@@ -2891,9 +2942,9 @@
 
 extern struct kobj_ns_type_operations net_ns_type_operations;
 
-extern const char *netdev_drivername(const struct net_device *dev);
+const char *netdev_drivername(const struct net_device *dev);
 
-extern void linkwatch_run_queue(void);
+void linkwatch_run_queue(void);
 
 static inline netdev_features_t netdev_get_wanted_features(
 	struct net_device *dev)
@@ -2957,6 +3008,11 @@
 	dev->gso_max_size = size;
 }
 
+static inline bool netif_is_macvlan(struct net_device *dev)
+{
+	return dev->priv_flags & IFF_MACVLAN;
+}
+
 static inline bool netif_is_bond_master(struct net_device *dev)
 {
 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
@@ -2985,22 +3041,22 @@
 	return dev->name;
 }
 
-extern __printf(3, 4)
+__printf(3, 4)
 int netdev_printk(const char *level, const struct net_device *dev,
 		  const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_emerg(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_alert(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_crit(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_err(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_warn(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_notice(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_info(const struct net_device *dev, const char *format, ...);
 
 #define MODULE_ALIAS_NETDEV(device) \
@@ -3041,7 +3097,7 @@
  * file/line information and a backtrace.
  */
 #define netdev_WARN(dev, format, args...)			\
-	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
+	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
 
 /* netif printk helpers, similar to netdev_printk */
 
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 708fe72ab9..2077489 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -35,14 +35,15 @@
 	result->all[3] = a1->all[3] & mask->all[3];
 }
 
-extern int netfilter_init(void);
+int netfilter_init(void);
 
 /* Largest hook number + 1 */
 #define NF_MAX_HOOKS 8
 
 struct sk_buff;
 
-typedef unsigned int nf_hookfn(unsigned int hooknum,
+struct nf_hook_ops;
+typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
 			       struct sk_buff *skb,
 			       const struct net_device *in,
 			       const struct net_device *out,
@@ -52,12 +53,13 @@
 	struct list_head list;
 
 	/* User fills in from here down. */
-	nf_hookfn *hook;
-	struct module *owner;
-	u_int8_t pf;
-	unsigned int hooknum;
+	nf_hookfn	*hook;
+	struct module	*owner;
+	void		*priv;
+	u_int8_t	pf;
+	unsigned int	hooknum;
 	/* Hooks are ordered in ascending priority. */
-	int priority;
+	int		priority;
 };
 
 struct nf_sockopt_ops {
@@ -208,7 +210,7 @@
 /* Call this before modifying an existing packet: ensures it is
    modifiable and linear to the point you care about (writable_len).
    Returns true or false. */
-extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
+int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
 
 struct flowi;
 struct nf_queue_entry;
@@ -269,8 +271,8 @@
 	return csum;
 }
 
-extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
-extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+int nf_register_afinfo(const struct nf_afinfo *afinfo);
+void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
 
 #include <net/flow.h>
 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
@@ -315,7 +317,7 @@
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
 
 struct nf_conn;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 9ac9fbd..c7174b8 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -49,31 +49,68 @@
 
 /* Set extensions */
 enum ip_set_extension {
-	IPSET_EXT_NONE = 0,
-	IPSET_EXT_BIT_TIMEOUT = 1,
+	IPSET_EXT_BIT_TIMEOUT = 0,
 	IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
-	IPSET_EXT_BIT_COUNTER = 2,
+	IPSET_EXT_BIT_COUNTER = 1,
 	IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
-};
-
-/* Extension offsets */
-enum ip_set_offset {
-	IPSET_OFFSET_TIMEOUT = 0,
-	IPSET_OFFSET_COUNTER,
-	IPSET_OFFSET_MAX,
+	IPSET_EXT_BIT_COMMENT = 2,
+	IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
+	/* Mark set with an extension which needs to call destroy */
+	IPSET_EXT_BIT_DESTROY = 7,
+	IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
 };
 
 #define SET_WITH_TIMEOUT(s)	((s)->extensions & IPSET_EXT_TIMEOUT)
 #define SET_WITH_COUNTER(s)	((s)->extensions & IPSET_EXT_COUNTER)
+#define SET_WITH_COMMENT(s)	((s)->extensions & IPSET_EXT_COMMENT)
+
+/* Extension id, in size order */
+enum ip_set_ext_id {
+	IPSET_EXT_ID_COUNTER = 0,
+	IPSET_EXT_ID_TIMEOUT,
+	IPSET_EXT_ID_COMMENT,
+	IPSET_EXT_ID_MAX,
+};
+
+/* Extension type */
+struct ip_set_ext_type {
+	/* Destroy extension private data (can be NULL) */
+	void (*destroy)(void *ext);
+	enum ip_set_extension type;
+	enum ipset_cadt_flags flag;
+	/* Size and minimal alignment */
+	u8 len;
+	u8 align;
+};
+
+extern const struct ip_set_ext_type ip_set_extensions[];
 
 struct ip_set_ext {
-	unsigned long timeout;
 	u64 packets;
 	u64 bytes;
+	u32 timeout;
+	char *comment;
+};
+
+struct ip_set_counter {
+	atomic64_t bytes;
+	atomic64_t packets;
+};
+
+struct ip_set_comment {
+	char *str;
 };
 
 struct ip_set;
 
+#define ext_timeout(e, s)	\
+(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+#define ext_counter(e, s)	\
+(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+#define ext_comment(e, s)	\
+(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+
+
 typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
 			   const struct ip_set_ext *ext,
 			   struct ip_set_ext *mext, u32 cmdflags);
@@ -147,7 +184,8 @@
 	u8 revision_min, revision_max;
 
 	/* Create set */
-	int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+	int (*create)(struct net *net, struct ip_set *set,
+		      struct nlattr *tb[], u32 flags);
 
 	/* Attribute policies */
 	const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
@@ -179,14 +217,45 @@
 	u8 revision;
 	/* Extensions */
 	u8 extensions;
+	/* Default timeout value, if enabled */
+	u32 timeout;
+	/* Element data size */
+	size_t dsize;
+	/* Offsets to extensions in elements */
+	size_t offset[IPSET_EXT_ID_MAX];
 	/* The type specific data */
 	void *data;
 };
 
-struct ip_set_counter {
-	atomic64_t bytes;
-	atomic64_t packets;
-};
+static inline void
+ip_set_ext_destroy(struct ip_set *set, void *data)
+{
+	/* Check that the extension is enabled for the set and
+	 * call it's destroy function for its extension part in data.
+	 */
+	if (SET_WITH_COMMENT(set))
+		ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
+			ext_comment(data, set));
+}
+
+static inline int
+ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+{
+	u32 cadt_flags = 0;
+
+	if (SET_WITH_TIMEOUT(set))
+		if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+					   htonl(set->timeout))))
+			return -EMSGSIZE;
+	if (SET_WITH_COUNTER(set))
+		cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
+	if (SET_WITH_COMMENT(set))
+		cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+
+	if (!cadt_flags)
+		return 0;
+	return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+}
 
 static inline void
 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
@@ -247,13 +316,24 @@
 		atomic64_set(&(counter)->packets, (long long)(ext->packets));
 }
 
+/* Netlink CB args */
+enum {
+	IPSET_CB_NET = 0,
+	IPSET_CB_DUMP,
+	IPSET_CB_INDEX,
+	IPSET_CB_ARG0,
+	IPSET_CB_ARG1,
+	IPSET_CB_ARG2,
+};
+
 /* register and unregister set references */
-extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
-extern void ip_set_put_byindex(ip_set_id_t index);
-extern const char *ip_set_name_byindex(ip_set_id_t index);
-extern ip_set_id_t ip_set_nfnl_get(const char *name);
-extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
-extern void ip_set_nfnl_put(ip_set_id_t index);
+extern ip_set_id_t ip_set_get_byname(struct net *net,
+				     const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
+extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(struct net *net, const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
 
 /* API for iptables set match, and SET target */
 
@@ -272,6 +352,8 @@
 extern void ip_set_free(void *members);
 extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
 extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
+			      size_t len);
 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
 				 struct ip_set_ext *ext);
 
@@ -389,13 +471,40 @@
 }
 
 #include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_comment.h>
 
-#define IP_SET_INIT_KEXT(skb, opt, map)			\
+static inline int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+		      const void *e, bool active)
+{
+	if (SET_WITH_TIMEOUT(set)) {
+		unsigned long *timeout = ext_timeout(e, set);
+
+		if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			htonl(active ? ip_set_timeout_get(timeout)
+				: *timeout)))
+			return -EMSGSIZE;
+	}
+	if (SET_WITH_COUNTER(set) &&
+	    ip_set_put_counter(skb, ext_counter(e, set)))
+		return -EMSGSIZE;
+	if (SET_WITH_COMMENT(set) &&
+	    ip_set_put_comment(skb, ext_comment(e, set)))
+		return -EMSGSIZE;
+	return 0;
+}
+
+#define IP_SET_INIT_KEXT(skb, opt, set)			\
 	{ .bytes = (skb)->len, .packets = 1,		\
-	  .timeout = ip_set_adt_opt_timeout(opt, map) }
+	  .timeout = ip_set_adt_opt_timeout(opt, set) }
 
-#define IP_SET_INIT_UEXT(map)				\
+#define IP_SET_INIT_UEXT(set)				\
 	{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX,	\
-	  .timeout = (map)->timeout }
+	  .timeout = (set)->timeout }
+
+#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
+
+#define IPSET_CONCAT(a, b)		a##b
+#define IPSET_TOKEN(a, b)		IPSET_CONCAT(a, b)
 
 #endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
new file mode 100644
index 0000000..21217ea
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -0,0 +1,57 @@
+#ifndef _IP_SET_COMMENT_H
+#define _IP_SET_COMMENT_H
+
+/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline char*
+ip_set_comment_uget(struct nlattr *tb)
+{
+	return nla_data(tb);
+}
+
+static inline void
+ip_set_init_comment(struct ip_set_comment *comment,
+		    const struct ip_set_ext *ext)
+{
+	size_t len = ext->comment ? strlen(ext->comment) : 0;
+
+	if (unlikely(comment->str)) {
+		kfree(comment->str);
+		comment->str = NULL;
+	}
+	if (!len)
+		return;
+	if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
+		len = IPSET_MAX_COMMENT_SIZE;
+	comment->str = kzalloc(len + 1, GFP_ATOMIC);
+	if (unlikely(!comment->str))
+		return;
+	strlcpy(comment->str, ext->comment, len + 1);
+}
+
+static inline int
+ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+{
+	if (!comment->str)
+		return 0;
+	return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+}
+
+static inline void
+ip_set_comment_free(struct ip_set_comment *comment)
+{
+	if (unlikely(!comment->str))
+		return;
+	kfree(comment->str);
+	comment->str = NULL;
+}
+
+#endif
+#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 3aac041..83c2f9e 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -23,8 +23,8 @@
 /* Set is defined with timeout support: timeout value may be 0 */
 #define IPSET_NO_TIMEOUT	UINT_MAX
 
-#define ip_set_adt_opt_timeout(opt, map)	\
-((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (map)->timeout)
+#define ip_set_adt_opt_timeout(opt, set)	\
+((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
 
 static inline unsigned int
 ip_set_timeout_uget(struct nlattr *tb)
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 127d0b9..2755057 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -23,6 +23,6 @@
 };
 
 /* call to create an explicit dependency on nf_conntrack. */
-extern void need_conntrack(void);
+void need_conntrack(void);
 
 #endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index f381020..858d9b2 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -29,13 +29,13 @@
 
 struct nf_conn;
 
-extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
-			 TransportAddress *taddr,
-			 union nf_inet_addr *addr, __be16 *port);
-extern void nf_conntrack_h245_expect(struct nf_conn *new,
-				     struct nf_conntrack_expect *this);
-extern void nf_conntrack_q931_expect(struct nf_conn *new,
-				     struct nf_conntrack_expect *this);
+int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+		  TransportAddress *taddr, union nf_inet_addr *addr,
+		  __be16 *port);
+void nf_conntrack_h245_expect(struct nf_conn *new,
+			      struct nf_conntrack_expect *this);
+void nf_conntrack_q931_expect(struct nf_conn *new,
+			      struct nf_conntrack_expect *this);
 extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
 				  unsigned char **data, int dataoff,
 				  H245_TransportAddress *taddr,
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index 6a0664c..ec2ffaf 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -87,8 +87,8 @@
 /* delete keymap entries */
 void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
 
-extern void nf_ct_gre_keymap_flush(struct net *net);
-extern void nf_nat_need_gre(void);
+void nf_ct_gre_keymap_flush(struct net *net);
+void nf_nat_need_gre(void);
 
 #endif /* __KERNEL__ */
 #endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index ba7f571..d5af3c2 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -107,85 +107,93 @@
 	SDP_HDR_MEDIA,
 };
 
-extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
-				       unsigned int protoff,
-				       unsigned int dataoff,
-				       const char **dptr,
-				       unsigned int *datalen);
-extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb,
-					  unsigned int protoff, s16 off);
-extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
-					      unsigned int protoff,
-					      unsigned int dataoff,
-					      const char **dptr,
-					      unsigned int *datalen,
-					      struct nf_conntrack_expect *exp,
-					      unsigned int matchoff,
-					      unsigned int matchlen);
-extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
-					    unsigned int protoff,
-					    unsigned int dataoff,
-					    const char **dptr,
-					    unsigned int *datalen,
-					    unsigned int sdpoff,
-					    enum sdp_header_types type,
-					    enum sdp_header_types term,
-					    const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
-					    unsigned int protoff,
-					    unsigned int dataoff,
-					    const char **dptr,
-					    unsigned int *datalen,
-					    unsigned int matchoff,
-					    unsigned int matchlen,
-					    u_int16_t port);
-extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
-					       unsigned int protoff,
-					       unsigned int dataoff,
-					       const char **dptr,
-					       unsigned int *datalen,
-					       unsigned int sdpoff,
-					       const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
-					     unsigned int protoff,
-					     unsigned int dataoff,
-					     const char **dptr,
-					     unsigned int *datalen,
-					     struct nf_conntrack_expect *rtp_exp,
-					     struct nf_conntrack_expect *rtcp_exp,
-					     unsigned int mediaoff,
-					     unsigned int medialen,
-					     union nf_inet_addr *rtp_addr);
+struct nf_nat_sip_hooks {
+	unsigned int (*msg)(struct sk_buff *skb,
+			    unsigned int protoff,
+			    unsigned int dataoff,
+			    const char **dptr,
+			    unsigned int *datalen);
 
-extern int ct_sip_parse_request(const struct nf_conn *ct,
-				const char *dptr, unsigned int datalen,
-				unsigned int *matchoff, unsigned int *matchlen,
-				union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
-			     unsigned int dataoff, unsigned int datalen,
-			     enum sip_header_types type,
-			     unsigned int *matchoff, unsigned int *matchlen);
-extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
-				   unsigned int *dataoff, unsigned int datalen,
-				   enum sip_header_types type, int *in_header,
-				   unsigned int *matchoff, unsigned int *matchlen,
-				   union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
-				      unsigned int dataoff, unsigned int datalen,
-				      const char *name,
-				      unsigned int *matchoff, unsigned int *matchlen,
-				      union nf_inet_addr *addr, bool delim);
-extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
-					unsigned int off, unsigned int datalen,
-					const char *name,
-					unsigned int *matchoff, unsigned int *matchen,
-					unsigned int *val);
+	void (*seq_adjust)(struct sk_buff *skb,
+			   unsigned int protoff, s16 off);
 
-extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
-				 unsigned int dataoff, unsigned int datalen,
+	unsigned int (*expect)(struct sk_buff *skb,
+			       unsigned int protoff,
+			       unsigned int dataoff,
+			       const char **dptr,
+			       unsigned int *datalen,
+			       struct nf_conntrack_expect *exp,
+			       unsigned int matchoff,
+			       unsigned int matchlen);
+
+	unsigned int (*sdp_addr)(struct sk_buff *skb,
+				 unsigned int protoff,
+				 unsigned int dataoff,
+				 const char **dptr,
+				 unsigned int *datalen,
+				 unsigned int sdpoff,
 				 enum sdp_header_types type,
 				 enum sdp_header_types term,
-				 unsigned int *matchoff, unsigned int *matchlen);
+				 const union nf_inet_addr *addr);
+
+	unsigned int (*sdp_port)(struct sk_buff *skb,
+				 unsigned int protoff,
+				 unsigned int dataoff,
+				 const char **dptr,
+				 unsigned int *datalen,
+				 unsigned int matchoff,
+				 unsigned int matchlen,
+				 u_int16_t port);
+
+	unsigned int (*sdp_session)(struct sk_buff *skb,
+				    unsigned int protoff,
+				    unsigned int dataoff,
+				    const char **dptr,
+				    unsigned int *datalen,
+				    unsigned int sdpoff,
+				    const union nf_inet_addr *addr);
+
+	unsigned int (*sdp_media)(struct sk_buff *skb,
+				  unsigned int protoff,
+				  unsigned int dataoff,
+				  const char **dptr,
+				  unsigned int *datalen,
+				  struct nf_conntrack_expect *rtp_exp,
+				  struct nf_conntrack_expect *rtcp_exp,
+				  unsigned int mediaoff,
+				  unsigned int medialen,
+				  union nf_inet_addr *rtp_addr);
+};
+extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+
+int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
+			 unsigned int datalen, unsigned int *matchoff,
+			 unsigned int *matchlen, union nf_inet_addr *addr,
+			 __be16 *port);
+int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
+		      unsigned int dataoff, unsigned int datalen,
+		      enum sip_header_types type, unsigned int *matchoff,
+		      unsigned int *matchlen);
+int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
+			    unsigned int *dataoff, unsigned int datalen,
+			    enum sip_header_types type, int *in_header,
+			    unsigned int *matchoff, unsigned int *matchlen,
+			    union nf_inet_addr *addr, __be16 *port);
+int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
+			       unsigned int dataoff, unsigned int datalen,
+			       const char *name, unsigned int *matchoff,
+			       unsigned int *matchlen, union nf_inet_addr *addr,
+			       bool delim);
+int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+				 unsigned int off, unsigned int datalen,
+				 const char *name, unsigned int *matchoff,
+				 unsigned int *matchen, unsigned int *val);
+
+int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
+			  unsigned int dataoff, unsigned int datalen,
+			  enum sdp_header_types type,
+			  enum sdp_header_types term,
+			  unsigned int *matchoff, unsigned int *matchlen);
 
 #endif /* __KERNEL__ */
 #endif /* __NF_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index cadb740..28c7436 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -14,6 +14,9 @@
 	int (*call_rcu)(struct sock *nl, struct sk_buff *skb, 
 		    const struct nlmsghdr *nlh,
 		    const struct nlattr * const cda[]);
+	int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+			  const struct nlmsghdr *nlh,
+			  const struct nlattr * const cda[]);
 	const struct nla_policy *policy;	/* netlink attribute policy */
 	const u_int16_t attr_count;		/* number of nlattr's */
 };
@@ -23,22 +26,24 @@
 	__u8 subsys_id;			/* nfnetlink subsystem ID */
 	__u8 cb_count;			/* number of callbacks */
 	const struct nfnl_callback *cb;	/* callback for individual types */
+	int (*commit)(struct sk_buff *skb);
+	int (*abort)(struct sk_buff *skb);
 };
 
-extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
-extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
 
-extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
-extern struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
-					   u32 dst_portid, gfp_t gfp_mask);
-extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
-			  unsigned int group, int echo, gfp_t flags);
-extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net,
-			     u32 portid, int flags);
+int nfnetlink_has_listeners(struct net *net, unsigned int group);
+struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
+				    u32 dst_portid, gfp_t gfp_mask);
+int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
+		   unsigned int group, int echo, gfp_t flags);
+int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
+		      int flags);
 
-extern void nfnl_lock(__u8 subsys_id);
-extern void nfnl_unlock(__u8 subsys_id);
+void nfnl_lock(__u8 subsys_id);
+void nfnl_unlock(__u8 subsys_id);
 
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
 	MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index bb4bbc9..b2e85e5 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -6,8 +6,8 @@
 
 struct nf_acct;
 
-extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
-extern void nfnl_acct_put(struct nf_acct *acct);
-extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+void nfnl_acct_put(struct nf_acct *acct);
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
 
 #endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index dd49566..a3e215b 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -229,50 +229,48 @@
 
 #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
 			  + nr_cpu_ids * sizeof(char *))
-extern int xt_register_target(struct xt_target *target);
-extern void xt_unregister_target(struct xt_target *target);
-extern int xt_register_targets(struct xt_target *target, unsigned int n);
-extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
+int xt_register_target(struct xt_target *target);
+void xt_unregister_target(struct xt_target *target);
+int xt_register_targets(struct xt_target *target, unsigned int n);
+void xt_unregister_targets(struct xt_target *target, unsigned int n);
 
-extern int xt_register_match(struct xt_match *target);
-extern void xt_unregister_match(struct xt_match *target);
-extern int xt_register_matches(struct xt_match *match, unsigned int n);
-extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
+int xt_register_match(struct xt_match *target);
+void xt_unregister_match(struct xt_match *target);
+int xt_register_matches(struct xt_match *match, unsigned int n);
+void xt_unregister_matches(struct xt_match *match, unsigned int n);
 
-extern int xt_check_match(struct xt_mtchk_param *,
-			  unsigned int size, u_int8_t proto, bool inv_proto);
-extern int xt_check_target(struct xt_tgchk_param *,
-			   unsigned int size, u_int8_t proto, bool inv_proto);
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+		   bool inv_proto);
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+		    bool inv_proto);
 
-extern struct xt_table *xt_register_table(struct net *net,
-					  const struct xt_table *table,
-					  struct xt_table_info *bootstrap,
-					  struct xt_table_info *newinfo);
-extern void *xt_unregister_table(struct xt_table *table);
+struct xt_table *xt_register_table(struct net *net,
+				   const struct xt_table *table,
+				   struct xt_table_info *bootstrap,
+				   struct xt_table_info *newinfo);
+void *xt_unregister_table(struct xt_table *table);
 
-extern struct xt_table_info *xt_replace_table(struct xt_table *table,
-					      unsigned int num_counters,
-					      struct xt_table_info *newinfo,
-					      int *error);
+struct xt_table_info *xt_replace_table(struct xt_table *table,
+				       unsigned int num_counters,
+				       struct xt_table_info *newinfo,
+				       int *error);
 
-extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
-extern struct xt_match *xt_request_find_match(u8 af, const char *name,
-					      u8 revision);
-extern struct xt_target *xt_request_find_target(u8 af, const char *name,
-						u8 revision);
-extern int xt_find_revision(u8 af, const char *name, u8 revision,
-			    int target, int *err);
+struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
+struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
+int xt_find_revision(u8 af, const char *name, u8 revision, int target,
+		     int *err);
 
-extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
-					   const char *name);
-extern void xt_table_unlock(struct xt_table *t);
+struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
+				    const char *name);
+void xt_table_unlock(struct xt_table *t);
 
-extern int xt_proto_init(struct net *net, u_int8_t af);
-extern void xt_proto_fini(struct net *net, u_int8_t af);
+int xt_proto_init(struct net *net, u_int8_t af);
+void xt_proto_fini(struct net *net, u_int8_t af);
 
-extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
-extern void xt_free_table_info(struct xt_table_info *info);
+struct xt_table_info *xt_alloc_table_info(unsigned int size);
+void xt_free_table_info(struct xt_table_info *info);
 
 /**
  * xt_recseq - recursive seqcount for netfilter use
@@ -353,8 +351,8 @@
 	return ret;
 }
 
-extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
-extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
+void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
 
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
@@ -414,25 +412,25 @@
 
 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
 
-extern void xt_compat_lock(u_int8_t af);
-extern void xt_compat_unlock(u_int8_t af);
+void xt_compat_lock(u_int8_t af);
+void xt_compat_unlock(u_int8_t af);
 
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
-extern void xt_compat_flush_offsets(u_int8_t af);
-extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
-extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
+void xt_compat_flush_offsets(u_int8_t af);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 
-extern int xt_compat_match_offset(const struct xt_match *match);
-extern int xt_compat_match_from_user(struct xt_entry_match *m,
-				     void **dstptr, unsigned int *size);
-extern int xt_compat_match_to_user(const struct xt_entry_match *m,
-				   void __user **dstptr, unsigned int *size);
+int xt_compat_match_offset(const struct xt_match *match);
+int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+			      unsigned int *size);
+int xt_compat_match_to_user(const struct xt_entry_match *m,
+			    void __user **dstptr, unsigned int *size);
 
-extern int xt_compat_target_offset(const struct xt_target *target);
-extern void xt_compat_target_from_user(struct xt_entry_target *t,
-				       void **dstptr, unsigned int *size);
-extern int xt_compat_target_to_user(const struct xt_entry_target *t,
-				    void __user **dstptr, unsigned int *size);
+int xt_compat_target_offset(const struct xt_target *target);
+void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+				unsigned int *size);
+int xt_compat_target_to_user(const struct xt_entry_target *t,
+			     void __user **dstptr, unsigned int *size);
 
 #endif /* CONFIG_COMPAT */
 #endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index dfb4d9e..8ab1c27 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -25,7 +25,7 @@
 #define BRNF_PPPoE			0x20
 
 /* Only used in br_forward.c */
-extern int nf_bridge_copy_header(struct sk_buff *skb);
+int nf_bridge_copy_header(struct sk_buff *skb);
 static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
 {
 	if (skb->nf_bridge &&
@@ -53,7 +53,7 @@
 	return 0;
 }
 
-extern int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sk_buff *skb);
 /* Only used in br_device.c */
 static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
 {
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index dfaf116..6e4591b 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,7 @@
 
 #include <uapi/linux/netfilter_ipv4.h>
 
-extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
-extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
-				   unsigned int dataoff, u_int8_t protocol);
+int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+		       unsigned int dataoff, u_int8_t protocol);
 #endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 2d4df6ce..64dad1cc 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -11,12 +11,12 @@
 
 
 #ifdef CONFIG_NETFILTER
-extern int ip6_route_me_harder(struct sk_buff *skb);
-extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
-				    unsigned int dataoff, u_int8_t protocol);
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+			unsigned int dataoff, u_int8_t protocol);
 
-extern int ipv6_netfilter_init(void);
-extern void ipv6_netfilter_fini(void);
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
 
 /*
  * Hook functions for ipv6 to allow xt_* modules to be built-in even
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c6f41b6..c163706 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -118,6 +118,9 @@
 
 #define FIRST_NFS4_OP	OP_ACCESS
 #define LAST_NFS4_OP 	OP_RECLAIM_COMPLETE
+#define LAST_NFS40_OP	OP_RELEASE_LOCKOWNER
+#define LAST_NFS41_OP	OP_RECLAIM_COMPLETE
+#define LAST_NFS42_OP	OP_RECLAIM_COMPLETE
 
 enum nfsstat4 {
 	NFS4_OK = 0,
diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h
index ed7f267..6f10e93 100644
--- a/include/linux/of_mtd.h
+++ b/include/linux/of_mtd.h
@@ -10,10 +10,29 @@
 #define __LINUX_OF_NET_H
 
 #ifdef CONFIG_OF_MTD
+
 #include <linux/of.h>
 int of_get_nand_ecc_mode(struct device_node *np);
 int of_get_nand_bus_width(struct device_node *np);
 bool of_get_nand_on_flash_bbt(struct device_node *np);
-#endif
+
+#else /* CONFIG_OF_MTD */
+
+static inline int of_get_nand_ecc_mode(struct device_node *np)
+{
+	return -ENOSYS;
+}
+
+static inline int of_get_nand_bus_width(struct device_node *np)
+{
+	return -ENOSYS;
+}
+
+static inline bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+	return false;
+}
+
+#endif /* CONFIG_OF_MTD */
 
 #endif /* __LINUX_OF_MTD_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index da60007..4cd6267 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -82,6 +82,11 @@
 	oom_killer_disabled = false;
 }
 
+static inline bool oom_gfp_allowed(gfp_t gfp_mask)
+{
+	return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
+}
+
 extern struct task_struct *find_lock_task_mm(struct task_struct *p);
 
 /* sysctls */
diff --git a/include/linux/opp.h b/include/linux/opp.h
deleted file mode 100644
index 3aca2b8..0000000
--- a/include/linux/opp.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Generic OPP Interface
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- *	Nishanth Menon
- *	Romit Dasgupta
- *	Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_OPP_H__
-#define __LINUX_OPP_H__
-
-#include <linux/err.h>
-#include <linux/cpufreq.h>
-#include <linux/notifier.h>
-
-struct opp;
-struct device;
-
-enum opp_event {
-	OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
-};
-
-#if defined(CONFIG_PM_OPP)
-
-unsigned long opp_get_voltage(struct opp *opp);
-
-unsigned long opp_get_freq(struct opp *opp);
-
-int opp_get_opp_count(struct device *dev);
-
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
-				bool available);
-
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq);
-
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq);
-
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt);
-
-int opp_enable(struct device *dev, unsigned long freq);
-
-int opp_disable(struct device *dev, unsigned long freq);
-
-struct srcu_notifier_head *opp_get_notifier(struct device *dev);
-#else
-static inline unsigned long opp_get_voltage(struct opp *opp)
-{
-	return 0;
-}
-
-static inline unsigned long opp_get_freq(struct opp *opp)
-{
-	return 0;
-}
-
-static inline int opp_get_opp_count(struct device *dev)
-{
-	return 0;
-}
-
-static inline struct opp *opp_find_freq_exact(struct device *dev,
-					unsigned long freq, bool available)
-{
-	return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_floor(struct device *dev,
-					unsigned long *freq)
-{
-	return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_ceil(struct device *dev,
-					unsigned long *freq)
-{
-	return ERR_PTR(-EINVAL);
-}
-
-static inline int opp_add(struct device *dev, unsigned long freq,
-					unsigned long u_volt)
-{
-	return -EINVAL;
-}
-
-static inline int opp_enable(struct device *dev, unsigned long freq)
-{
-	return 0;
-}
-
-static inline int opp_disable(struct device *dev, unsigned long freq)
-{
-	return 0;
-}
-
-static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
-{
-	return ERR_PTR(-EINVAL);
-}
-#endif		/* CONFIG_PM_OPP */
-
-#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
-int of_init_opp_table(struct device *dev);
-#else
-static inline int of_init_opp_table(struct device *dev)
-{
-	return -EINVAL;
-}
-#endif
-
-#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
-int opp_init_cpufreq_table(struct device *dev,
-			    struct cpufreq_frequency_table **table);
-void opp_free_cpufreq_table(struct device *dev,
-				struct cpufreq_frequency_table **table);
-#else
-static inline int opp_init_cpufreq_table(struct device *dev,
-			    struct cpufreq_frequency_table **table)
-{
-	return -EINVAL;
-}
-
-static inline
-void opp_free_cpufreq_table(struct device *dev,
-				struct cpufreq_frequency_table **table)
-{
-}
-#endif		/* CONFIG_CPU_FREQ */
-
-#endif		/* __LINUX_OPP_H__ */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index da172f9..835ec7b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -330,8 +330,6 @@
 	unsigned int	msix_enabled:1;
 	unsigned int	ari_enabled:1;	/* ARI forwarding */
 	unsigned int	is_managed:1;
-	unsigned int	is_pcie:1;	/* Obsolete. Will be removed.
-					   Use pci_is_pcie() instead */
 	unsigned int    needs_freset:1; /* Dev requires fundamental reset */
 	unsigned int	state_saved:1;
 	unsigned int	is_physfn:1;
@@ -472,12 +470,25 @@
 /*
  * Returns true if the pci bus is root (behind host-pci bridge),
  * false otherwise
+ *
+ * Some code assumes that "bus->self == NULL" means that bus is a root bus.
+ * This is incorrect because "virtual" buses added for SR-IOV (via
+ * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
  */
 static inline bool pci_is_root_bus(struct pci_bus *pbus)
 {
 	return !(pbus->parent);
 }
 
+static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
+{
+	dev = pci_physfn(dev);
+	if (pci_is_root_bus(dev->bus))
+		return NULL;
+
+	return dev->bus->self;
+}
+
 #ifdef CONFIG_PCI_MSI
 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
 {
@@ -1749,11 +1760,11 @@
  * pci_is_pcie - check if the PCI device is PCI Express capable
  * @dev: PCI device
  *
- * Retrun true if the PCI device is PCI Express capable, false otherwise.
+ * Returns: true if the PCI device is PCI Express capable, false otherwise.
  */
 static inline bool pci_is_pcie(struct pci_dev *dev)
 {
-	return !!pci_pcie_cap(dev);
+	return pci_pcie_cap(dev);
 }
 
 /**
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index c74088a..9e4761c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -375,22 +375,6 @@
 # define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
 #endif
 
-#ifndef this_cpu_xor
-# ifndef this_cpu_xor_1
-#  define this_cpu_xor_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_2
-#  define this_cpu_xor_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_4
-#  define this_cpu_xor_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_8
-#  define this_cpu_xor_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# define this_cpu_xor(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
-#endif
-
 #define _this_cpu_generic_add_return(pcp, val)				\
 ({									\
 	typeof(pcp) ret__;						\
@@ -629,22 +613,6 @@
 # define __this_cpu_or(pcp, val)	__pcpu_size_call(__this_cpu_or_, (pcp), (val))
 #endif
 
-#ifndef __this_cpu_xor
-# ifndef __this_cpu_xor_1
-#  define __this_cpu_xor_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_2
-#  define __this_cpu_xor_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_4
-#  define __this_cpu_xor_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_8
-#  define __this_cpu_xor_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# define __this_cpu_xor(pcp, val)	__pcpu_size_call(__this_cpu_xor_, (pcp), (val))
-#endif
-
 #define __this_cpu_generic_add_return(pcp, val)				\
 ({									\
 	__this_cpu_add(pcp, val);					\
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
index 0b23edb..1900bd0 100644
--- a/include/linux/percpu_ida.h
+++ b/include/linux/percpu_ida.h
@@ -16,6 +16,8 @@
 	 * percpu_ida_init()
 	 */
 	unsigned			nr_tags;
+	unsigned			percpu_max_size;
+	unsigned			percpu_batch_size;
 
 	struct percpu_ida_cpu __percpu	*tag_cpu;
 
@@ -51,10 +53,29 @@
 	} ____cacheline_aligned_in_smp;
 };
 
+/*
+ * Number of tags we move between the percpu freelist and the global freelist at
+ * a time
+ */
+#define IDA_DEFAULT_PCPU_BATCH_MOVE	32U
+/* Max size of percpu freelist, */
+#define IDA_DEFAULT_PCPU_SIZE	((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
+
 int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
 void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
 
 void percpu_ida_destroy(struct percpu_ida *pool);
-int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags);
+int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
+	unsigned long max_size, unsigned long batch_size);
+static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
+{
+	return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
+		IDA_DEFAULT_PCPU_BATCH_MOVE);
+}
 
+typedef int (*percpu_ida_cb)(unsigned, void *);
+int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
+	void *data);
+
+unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
 #endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index e277266..7246ef3 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -23,6 +23,7 @@
 struct pid_namespace {
 	struct kref kref;
 	struct pidmap pidmap[PIDMAP_ENTRIES];
+	struct rcu_head rcu;
 	int last_pid;
 	unsigned int nr_hashed;
 	struct task_struct *child_reaper;
diff --git a/include/linux/platform_data/lm3630_bl.h b/include/linux/platform_data/lm3630_bl.h
deleted file mode 100644
index 9176dd3..0000000
--- a/include/linux/platform_data/lm3630_bl.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
-* Simple driver for Texas Instruments LM3630 LED Flash driver chip
-* Copyright (C) 2012 Texas Instruments
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License version 2 as
-* published by the Free Software Foundation.
-*
-*/
-
-#ifndef __LINUX_LM3630_H
-#define __LINUX_LM3630_H
-
-#define LM3630_NAME "lm3630_bl"
-
-enum lm3630_pwm_ctrl {
-	PWM_CTRL_DISABLE = 0,
-	PWM_CTRL_BANK_A,
-	PWM_CTRL_BANK_B,
-	PWM_CTRL_BANK_ALL,
-};
-
-enum lm3630_pwm_active {
-	PWM_ACTIVE_HIGH = 0,
-	PWM_ACTIVE_LOW,
-};
-
-enum lm3630_bank_a_ctrl {
-	BANK_A_CTRL_DISABLE = 0x0,
-	BANK_A_CTRL_LED1 = 0x4,
-	BANK_A_CTRL_LED2 = 0x1,
-	BANK_A_CTRL_ALL = 0x5,
-};
-
-enum lm3630_bank_b_ctrl {
-	BANK_B_CTRL_DISABLE = 0,
-	BANK_B_CTRL_LED2,
-};
-
-struct lm3630_platform_data {
-
-	/* maximum brightness */
-	int max_brt_led1;
-	int max_brt_led2;
-
-	/* initial on brightness */
-	int init_brt_led1;
-	int init_brt_led2;
-	enum lm3630_pwm_ctrl pwm_ctrl;
-	enum lm3630_pwm_active pwm_active;
-	enum lm3630_bank_a_ctrl bank_a_ctrl;
-	enum lm3630_bank_b_ctrl bank_b_ctrl;
-	unsigned int pwm_period;
-	void (*pwm_set_intensity) (int brightness, int max_brightness);
-};
-
-#endif /* __LINUX_LM3630_H */
diff --git a/include/linux/platform_data/lm3630a_bl.h b/include/linux/platform_data/lm3630a_bl.h
new file mode 100644
index 0000000..7538e38
--- /dev/null
+++ b/include/linux/platform_data/lm3630a_bl.h
@@ -0,0 +1,65 @@
+/*
+* Simple driver for Texas Instruments LM3630A LED Flash driver chip
+* Copyright (C) 2012 Texas Instruments
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+*/
+
+#ifndef __LINUX_LM3630A_H
+#define __LINUX_LM3630A_H
+
+#define LM3630A_NAME "lm3630a_bl"
+
+enum lm3630a_pwm_ctrl {
+	LM3630A_PWM_DISABLE = 0x00,
+	LM3630A_PWM_BANK_A,
+	LM3630A_PWM_BANK_B,
+	LM3630A_PWM_BANK_ALL,
+	LM3630A_PWM_BANK_A_ACT_LOW = 0x05,
+	LM3630A_PWM_BANK_B_ACT_LOW,
+	LM3630A_PWM_BANK_ALL_ACT_LOW,
+};
+
+enum lm3630a_leda_ctrl {
+	LM3630A_LEDA_DISABLE = 0x00,
+	LM3630A_LEDA_ENABLE = 0x04,
+	LM3630A_LEDA_ENABLE_LINEAR = 0x14,
+};
+
+enum lm3630a_ledb_ctrl {
+	LM3630A_LEDB_DISABLE = 0x00,
+	LM3630A_LEDB_ON_A = 0x01,
+	LM3630A_LEDB_ENABLE = 0x02,
+	LM3630A_LEDB_ENABLE_LINEAR = 0x0A,
+};
+
+#define LM3630A_MAX_BRIGHTNESS 255
+/*
+ *@leda_init_brt : led a init brightness. 4~255
+ *@leda_max_brt  : led a max brightness.  4~255
+ *@leda_ctrl     : led a disable, enable linear, enable exponential
+ *@ledb_init_brt : led b init brightness. 4~255
+ *@ledb_max_brt  : led b max brightness.  4~255
+ *@ledb_ctrl     : led b disable, enable linear, enable exponential
+ *@pwm_period    : pwm period
+ *@pwm_ctrl      : pwm disable, bank a or b, active high or low
+ */
+struct lm3630a_platform_data {
+
+	/* led a config.  */
+	int leda_init_brt;
+	int leda_max_brt;
+	enum lm3630a_leda_ctrl leda_ctrl;
+	/* led b config. */
+	int ledb_init_brt;
+	int ledb_max_brt;
+	enum lm3630a_ledb_ctrl ledb_ctrl;
+	/* pwm config. */
+	unsigned int pwm_period;
+	enum lm3630a_pwm_ctrl pwm_ctrl;
+};
+
+#endif /* __LINUX_LM3630A_H */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index ea32005..1b2ba24 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -40,6 +40,17 @@
 #define LP8553_PWM_CONFIG	LP8550_PWM_CONFIG
 #define LP8553_I2C_CONFIG	LP8550_I2C_CONFIG
 
+/* CONFIG register - LP8555 */
+#define LP8555_PWM_STANDBY	BIT(7)
+#define LP8555_PWM_FILTER	BIT(6)
+#define LP8555_RELOAD_EPROM	BIT(3)	/* use it if EPROMs should be reset
+					   when the backlight turns on */
+#define LP8555_OFF_OPENLEDS	BIT(2)
+#define LP8555_PWM_CONFIG	LP8555_PWM_ONLY
+#define LP8555_I2C_CONFIG	LP8555_I2C_ONLY
+#define LP8555_COMB1_CONFIG	LP8555_COMBINED1
+#define LP8555_COMB2_CONFIG	LP8555_COMBINED2
+
 /* DEVICE CONTROL register - LP8556 */
 #define LP8556_PWM_CONFIG	(LP8556_PWM_ONLY << BRT_MODE_SHFT)
 #define LP8556_COMB1_CONFIG	(LP8556_COMBINED1 << BRT_MODE_SHFT)
@@ -65,6 +76,7 @@
 	LP8551,
 	LP8552,
 	LP8553,
+	LP8555,
 	LP8556,
 	LP8557,
 };
@@ -89,6 +101,13 @@
 	LP8553_I2C_ONLY = LP8550_I2C_ONLY,
 };
 
+enum lp8555_brightness_source {
+	LP8555_PWM_ONLY,
+	LP8555_I2C_ONLY,
+	LP8555_COMBINED1,	/* Brightness register with shaped PWM */
+	LP8555_COMBINED2,	/* PWM with shaped brightness register */
+};
+
 enum lp8556_brightness_source {
 	LP8556_PWM_ONLY,
 	LP8556_COMBINED1,	/* pwm + i2c before the shaper block */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 6bf9ef4..4da5bfa 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -23,13 +23,16 @@
 };
 
 enum omap_ecc {
-		/* 1-bit ecc: stored at end of spare area */
-	OMAP_ECC_HAMMING_CODE_DEFAULT = 0, /* Default, s/w method */
-	OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
-		/* 1-bit ecc: stored at beginning of spare area as romcode */
-	OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
-	OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */
-	OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */
+	/* 1-bit  ECC calculation by GPMC, Error detection by Software */
+	OMAP_ECC_HAM1_CODE_HW = 0,
+	/* 4-bit  ECC calculation by GPMC, Error detection by Software */
+	OMAP_ECC_BCH4_CODE_HW_DETECTION_SW,
+	/* 4-bit  ECC calculation by GPMC, Error detection by ELM */
+	OMAP_ECC_BCH4_CODE_HW,
+	/* 8-bit  ECC calculation by GPMC, Error detection by Software */
+	OMAP_ECC_BCH8_CODE_HW_DETECTION_SW,
+	/* 8-bit  ECC calculation by GPMC, Error detection by ELM */
+	OMAP_ECC_BCH8_CODE_HW,
 };
 
 struct gpmc_nand_regs {
@@ -63,5 +66,6 @@
 
 	/* for passing the partitions */
 	struct device_node	*of_node;
+	struct device_node	*elm_of_node;
 };
 #endif
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
new file mode 100644
index 0000000..0472ab2
--- /dev/null
+++ b/include/linux/platform_data/zforce_ts.h
@@ -0,0 +1,26 @@
+/* drivers/input/touchscreen/zforce.c
+ *
+ * Copyright (C) 2012-2013 MundoReader S.L.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_INPUT_ZFORCE_TS_H
+#define _LINUX_INPUT_ZFORCE_TS_H
+
+struct zforce_ts_platdata {
+	int gpio_int;
+	int gpio_rst;
+
+	unsigned int x_max;
+	unsigned int y_max;
+};
+
+#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
new file mode 100644
index 0000000..5151b00
--- /dev/null
+++ b/include/linux/pm_opp.h
@@ -0,0 +1,139 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ *	Nishanth Menon
+ *	Romit Dasgupta
+ *	Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_OPP_H__
+#define __LINUX_OPP_H__
+
+#include <linux/err.h>
+#include <linux/cpufreq.h>
+#include <linux/notifier.h>
+
+struct dev_pm_opp;
+struct device;
+
+enum dev_pm_opp_event {
+	OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+};
+
+#if defined(CONFIG_PM_OPP)
+
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+
+int dev_pm_opp_get_opp_count(struct device *dev);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+					      unsigned long freq,
+					      bool available);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+					      unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+					     unsigned long *freq);
+
+int dev_pm_opp_add(struct device *dev, unsigned long freq,
+		   unsigned long u_volt);
+
+int dev_pm_opp_enable(struct device *dev, unsigned long freq);
+
+int dev_pm_opp_disable(struct device *dev, unsigned long freq);
+
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+#else
+static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
+{
+	return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+	return 0;
+}
+
+static inline int dev_pm_opp_get_opp_count(struct device *dev)
+{
+	return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+					unsigned long freq, bool available)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+					unsigned long *freq)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+					unsigned long *freq)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+					unsigned long u_volt)
+{
+	return -EINVAL;
+}
+
+static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
+{
+	return 0;
+}
+
+static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
+{
+	return 0;
+}
+
+static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
+							struct device *dev)
+{
+	return ERR_PTR(-EINVAL);
+}
+#endif		/* CONFIG_PM_OPP */
+
+#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+	return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+			    struct cpufreq_frequency_table **table);
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+				struct cpufreq_frequency_table **table);
+#else
+static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
+			    struct cpufreq_frequency_table **table)
+{
+	return -EINVAL;
+}
+
+static inline
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+				struct cpufreq_frequency_table **table)
+{
+}
+#endif		/* CONFIG_CPU_FREQ */
+
+#endif		/* __LINUX_OPP_H__ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
new file mode 100644
index 0000000..4e25041
--- /dev/null
+++ b/include/linux/powercap.h
@@ -0,0 +1,325 @@
+/*
+ * powercap.h: Data types and headers for sysfs power capping interface
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#ifndef __POWERCAP_H__
+#define __POWERCAP_H__
+
+#include <linux/device.h>
+#include <linux/idr.h>
+
+/*
+ * A power cap class device can contain multiple powercap control_types.
+ * Each control_type can have multiple power zones, which can be independently
+ * controlled. Each power zone can have one or more constraints.
+ */
+
+struct powercap_control_type;
+struct powercap_zone;
+struct powercap_zone_constraint;
+
+/**
+ * struct powercap_control_type_ops - Define control type callbacks
+ * @set_enable:		Enable/Disable whole control type.
+ *			Default is enabled. But this callback allows all zones
+ *			to be in disable state and remove any applied power
+ *			limits. If disabled power zone can only be monitored
+ *			not controlled.
+ * @get_enable:		get Enable/Disable status.
+ * @release:		Callback to inform that last reference to this
+ *			control type is closed. So it is safe to free data
+ *			structure associated with this control type.
+ *			This callback is mandatory if the client own memory
+ *			for the control type.
+ *
+ * This structure defines control type callbacks to be implemented by client
+ * drivers
+ */
+struct powercap_control_type_ops {
+	int (*set_enable) (struct powercap_control_type *, bool mode);
+	int (*get_enable) (struct powercap_control_type *, bool *mode);
+	int (*release) (struct powercap_control_type *);
+};
+
+/**
+ * struct powercap_control_type- Defines a powercap control_type
+ * @name:		name of control_type
+ * @dev:		device for this control_type
+ * @idr:		idr to have unique id for its child
+ * @root_node:		Root holding power zones for this control_type
+ * @ops:		Pointer to callback struct
+ * @node_lock:		mutex for control type
+ * @allocated:		This is possible that client owns the memory
+ *			used by this structure. In this case
+ *			this flag is set to false by framework to
+ *			prevent deallocation during release process.
+ *			Otherwise this flag is set to true.
+ * @ctrl_inst:		link to the control_type list
+ *
+ * Defines powercap control_type. This acts as a container for power
+ * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
+ * All fields are private and should not be used by client drivers.
+ */
+struct powercap_control_type {
+	struct device dev;
+	struct idr idr;
+	int nr_zones;
+	const struct powercap_control_type_ops *ops;
+	struct mutex lock;
+	bool allocated;
+	struct list_head node;
+};
+
+/**
+ * struct powercap_zone_ops - Define power zone callbacks
+ * @get_max_energy_range_uj:	Get maximum range of energy counter in
+ *				micro-joules.
+ * @get_energy_uj:		Get current energy counter in micro-joules.
+ * @reset_energy_uj:		Reset micro-joules energy counter.
+ * @get_max_power_range_uw:	Get maximum range of power counter in
+ *				micro-watts.
+ * @get_power_uw:		Get current power counter in micro-watts.
+ * @set_enable:			Enable/Disable power zone controls.
+ *				Default is enabled.
+ * @get_enable:			get Enable/Disable status.
+ * @release:			Callback to inform that last reference to this
+ *				control type is closed. So it is safe to free
+ *				data structure associated with this
+ *				control type. Mandatory, if client driver owns
+ *				the power_zone memory.
+ *
+ * This structure defines zone callbacks to be implemented by client drivers.
+ * Client drives can define both energy and power related callbacks. But at
+ * the least one type (either power or energy) is mandatory. Client drivers
+ * should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_ops {
+	int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *);
+	int (*get_energy_uj) (struct powercap_zone *, u64 *);
+	int (*reset_energy_uj) (struct powercap_zone *);
+	int (*get_max_power_range_uw) (struct powercap_zone *, u64 *);
+	int (*get_power_uw) (struct powercap_zone *, u64 *);
+	int (*set_enable) (struct powercap_zone *, bool mode);
+	int (*get_enable) (struct powercap_zone *, bool *mode);
+	int (*release) (struct powercap_zone *);
+};
+
+#define	POWERCAP_ZONE_MAX_ATTRS		6
+#define	POWERCAP_CONSTRAINTS_ATTRS	8
+#define MAX_CONSTRAINTS_PER_ZONE	10
+/**
+ * struct powercap_zone- Defines instance of a power cap zone
+ * @id:			Unique id
+ * @name:		Power zone name.
+ * @control_type_inst:	Control type instance for this zone.
+ * @ops:		Pointer to the zone operation structure.
+ * @dev:		Instance of a device.
+ * @const_id_cnt:	Number of constraint defined.
+ * @idr:		Instance to an idr entry for children zones.
+ * @parent_idr:		To remove reference from the parent idr.
+ * @private_data:	Private data pointer if any for this zone.
+ * @zone_dev_attrs:	Attributes associated with this device.
+ * @zone_attr_count:	Attribute count.
+ * @dev_zone_attr_group: Attribute group for attributes.
+ * @dev_attr_groups:	Attribute group store to register with device.
+ * @allocated:		This is possible that client owns the memory
+ *			used by this structure. In this case
+ *			this flag is set to false by framework to
+ *			prevent deallocation during release process.
+ *			Otherwise this flag is set to true.
+ * @constraint_ptr:	List of constraints for this zone.
+ *
+ * This defines a power zone instance. The fields of this structure are
+ * private, and should not be used by client drivers.
+ */
+struct powercap_zone {
+	int id;
+	char *name;
+	void *control_type_inst;
+	const struct powercap_zone_ops *ops;
+	struct device dev;
+	int const_id_cnt;
+	struct idr idr;
+	struct idr *parent_idr;
+	void *private_data;
+	struct attribute **zone_dev_attrs;
+	int zone_attr_count;
+	struct attribute_group dev_zone_attr_group;
+	const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */
+	bool allocated;
+	struct powercap_zone_constraint *constraints;
+};
+
+/**
+ * struct powercap_zone_constraint_ops - Define constraint callbacks
+ * @set_power_limit_uw:		Set power limit in micro-watts.
+ * @get_power_limit_uw:		Get power limit in micro-watts.
+ * @set_time_window_us:		Set time window in micro-seconds.
+ * @get_time_window_us:		Get time window in micro-seconds.
+ * @get_max_power_uw:		Get max power allowed in micro-watts.
+ * @get_min_power_uw:		Get min power allowed in micro-watts.
+ * @get_max_time_window_us:	Get max time window allowed in micro-seconds.
+ * @get_min_time_window_us:	Get min time window allowed in micro-seconds.
+ * @get_name:			Get the name of constraint
+ *
+ * This structure is used to define the constraint callbacks for the client
+ * drivers. The following callbacks are mandatory and can't be NULL:
+ *  set_power_limit_uw
+ *  get_power_limit_uw
+ *  set_time_window_us
+ *  get_time_window_us
+ *  get_name
+ *  Client drivers should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_constraint_ops {
+	int (*set_power_limit_uw) (struct powercap_zone *, int, u64);
+	int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *);
+	int (*set_time_window_us) (struct powercap_zone *, int, u64);
+	int (*get_time_window_us) (struct powercap_zone *, int, u64 *);
+	int (*get_max_power_uw) (struct powercap_zone *, int, u64 *);
+	int (*get_min_power_uw) (struct powercap_zone *, int, u64 *);
+	int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *);
+	int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *);
+	const char *(*get_name) (struct powercap_zone *, int);
+};
+
+/**
+ * struct powercap_zone_constraint- Defines instance of a constraint
+ * @id:			Instance Id of this constraint.
+ * @power_zone:		Pointer to the power zone for this constraint.
+ * @ops:		Pointer to the constraint callbacks.
+ *
+ * This defines a constraint instance.
+ */
+struct powercap_zone_constraint {
+	int id;
+	struct powercap_zone *power_zone;
+	struct powercap_zone_constraint_ops *ops;
+};
+
+
+/* For clients to get their device pointer, may be used for dev_dbgs */
+#define POWERCAP_GET_DEV(power_zone)	(&power_zone->dev)
+
+/**
+* powercap_set_zone_data() - Set private data for a zone
+* @power_zone:	A pointer to the valid zone instance.
+* @pdata:	A pointer to the user private data.
+*
+* Allows client drivers to associate some private data to zone instance.
+*/
+static inline void powercap_set_zone_data(struct powercap_zone *power_zone,
+						void *pdata)
+{
+	if (power_zone)
+		power_zone->private_data = pdata;
+}
+
+/**
+* powercap_get_zone_data() - Get private data for a zone
+* @power_zone:	A pointer to the valid zone instance.
+*
+* Allows client drivers to get private data associate with a zone,
+* using call to powercap_set_zone_data.
+*/
+static inline void *powercap_get_zone_data(struct powercap_zone *power_zone)
+{
+	if (power_zone)
+		return power_zone->private_data;
+	return NULL;
+}
+
+/**
+* powercap_register_control_type() - Register a control_type with framework
+* @control_type:	Pointer to client allocated memory for the control type
+*			structure storage. If this is NULL, powercap framework
+*			will allocate memory and own it.
+*			Advantage of this parameter is that client can embed
+*			this data in its data structures and allocate in a
+*			single call, preventing multiple allocations.
+* @control_type_name:	The Name of this control_type, which will be shown
+*			in the sysfs Interface.
+* @ops:			Callbacks for control type. This parameter is optional.
+*
+* Used to create a control_type with the power capping class. Here control_type
+* can represent a type of technology, which can control a range of power zones.
+* For example a control_type can be RAPL (Running Average Power Limit)
+* Intel® 64 and IA-32 Processor Architectures. The name can be any string
+* which must be unique, otherwise this function returns NULL.
+* A pointer to the control_type instance is returned on success.
+*/
+struct powercap_control_type *powercap_register_control_type(
+				struct powercap_control_type *control_type,
+				const char *name,
+				const struct powercap_control_type_ops *ops);
+
+/**
+* powercap_unregister_control_type() - Unregister a control_type from framework
+* @instance:	A pointer to the valid control_type instance.
+*
+* Used to unregister a control_type with the power capping class.
+* All power zones registered under this control type have to be unregistered
+* before calling this function, or it will fail with an error code.
+*/
+int powercap_unregister_control_type(struct powercap_control_type *instance);
+
+/* Zone register/unregister API */
+
+/**
+* powercap_register_zone() - Register a power zone
+* @power_zone:	Pointer to client allocated memory for the power zone structure
+*		storage. If this is NULL, powercap framework will allocate
+*		memory and own it. Advantage of this parameter is that client
+*		can embed this data in its data structures and allocate in a
+*		single call, preventing multiple allocations.
+* @control_type: A control_type instance under which this zone operates.
+* @name:	A name for this zone.
+* @parent:	A pointer to the parent power zone instance if any or NULL
+* @ops:		Pointer to zone operation callback structure.
+* @no_constraints: Number of constraints for this zone
+* @const_ops:	Pointer to constraint callback structure
+*
+* Register a power zone under a given control type. A power zone must register
+* a pointer to a structure representing zone callbacks.
+* A power zone can be located under a parent power zone, in which case @parent
+* should point to it.  Otherwise, if @parent is NULL, the new power zone will
+* be located directly under the given control type
+* For each power zone there may be a number of constraints that appear in the
+* sysfs under that zone as attributes with unique numeric IDs.
+* Returns pointer to the power_zone on success.
+*/
+struct powercap_zone *powercap_register_zone(
+			struct powercap_zone *power_zone,
+			struct powercap_control_type *control_type,
+			const char *name,
+			struct powercap_zone *parent,
+			const struct powercap_zone_ops *ops,
+			int nr_constraints,
+			struct powercap_zone_constraint_ops *const_ops);
+
+/**
+* powercap_unregister_zone() - Unregister a zone device
+* @control_type:	A pointer to the valid instance of a control_type.
+* @power_zone:	A pointer to the valid zone instance for a control_type
+*
+* Used to unregister a zone device for a control_type.  Caller should
+* make sure that children for this zone are unregistered first.
+*/
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+				struct powercap_zone *power_zone);
+
+#endif
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 56f4a86..2de2e27 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -6,6 +6,9 @@
 
 #include <linux/backlight.h>
 
+/* TODO: convert to gpiod_*() API once it has been merged */
+#define PWM_BACKLIGHT_GPIO_ACTIVE_LOW	(1 << 0)
+
 struct platform_pwm_backlight_data {
 	int pwm_id;
 	unsigned int max_brightness;
@@ -13,6 +16,8 @@
 	unsigned int lth_brightness;
 	unsigned int pwm_period_ns;
 	unsigned int *levels;
+	int enable_gpio;
+	unsigned long enable_gpio_flags;
 	int (*init)(struct device *dev);
 	int (*notify)(struct device *dev, int brightness);
 	void (*notify_after)(struct device *dev, int brightness);
diff --git a/include/linux/random.h b/include/linux/random.h
index 6312dd9..4002b3d 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -29,8 +29,13 @@
 u32 prandom_u32(void);
 void prandom_bytes(void *buf, int nbytes);
 void prandom_seed(u32 seed);
+void prandom_reseed_late(void);
 
-u32 prandom_u32_state(struct rnd_state *);
+struct rnd_state {
+	__u32 s1, s2, s3, s4;
+};
+
+u32 prandom_u32_state(struct rnd_state *state);
 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
 
 /*
@@ -50,9 +55,10 @@
 {
 	u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
 
-	state->s1 = __seed(i, 1);
-	state->s2 = __seed(i, 7);
-	state->s3 = __seed(i, 15);
+	state->s1 = __seed(i,   2U);
+	state->s2 = __seed(i,   8U);
+	state->s3 = __seed(i,  16U);
+	state->s4 = __seed(i, 128U);
 }
 
 #ifdef CONFIG_ARCH_RANDOM
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index aa870a4..57e75ae 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -85,6 +85,11 @@
 	*rb_link = node;
 }
 
+#define rb_entry_safe(ptr, type, member) \
+	({ typeof(ptr) ____ptr = (ptr); \
+	   ____ptr ? rb_entry(____ptr, type, member) : NULL; \
+	})
+
 /**
  * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
  * given type safe against removal of rb_node entry
@@ -95,12 +100,9 @@
  * @field:	the name of the rb_node field within 'type'.
  */
 #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
-	for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
-		n = rb_entry(rb_next_postorder(&pos->field), \
-			typeof(*pos), field); \
-	     &pos->field; \
-	     pos = n, \
-		n = rb_entry(rb_next_postorder(&pos->field), \
-			typeof(*pos), field))
+	for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
+	     pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
+			typeof(*pos), field); 1; }); \
+	     pos = n)
 
 #endif	/* _LINUX_RBTREE_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index f28544b..939428a 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -15,7 +15,7 @@
 extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
 			      u32 id, long expires, u32 error);
 
-extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
 
 /* RTNL is used as a global lock for all changes to network configuration  */
 extern void rtnl_lock(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 045b0d2..6f7ffa4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -286,6 +286,14 @@
 }
 #endif
 
+#ifdef CONFIG_DETECT_HUNG_TASK
+void reset_hung_task_detector(void);
+#else
+static inline void reset_hung_task_detector(void)
+{
+}
+#endif
+
 /* Attach to any functions which should be ignored in wchan output. */
 #define __sched		__attribute__((__section__(".sched.text")))
 
@@ -323,6 +331,10 @@
 extern void set_dumpable(struct mm_struct *mm, int value);
 extern int get_dumpable(struct mm_struct *mm);
 
+#define SUID_DUMP_DISABLE	0	/* No setuid dumping */
+#define SUID_DUMP_USER		1	/* Dump as user of process */
+#define SUID_DUMP_ROOT		2	/* Dump as root */
+
 /* mm flags */
 /* dumpable bits */
 #define MMF_DUMPABLE      0  /* core dump is permitted */
@@ -1062,15 +1074,6 @@
 	struct hlist_head preempt_notifiers;
 #endif
 
-	/*
-	 * fpu_counter contains the number of consecutive context switches
-	 * that the FPU is used. If this is over a threshold, the lazy fpu
-	 * saving becomes unlazy to save the trap. This is an unsigned char
-	 * so that after 256 times the counter wraps and the behavior turns
-	 * lazy again; this to deal with bursty apps that only use FPU for
-	 * a short time
-	 */
-	unsigned char fpu_counter;
 #ifdef CONFIG_BLK_DEV_IO_TRACE
 	unsigned int btrace_seq;
 #endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 10d16c4f..41467f8 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -2,8 +2,8 @@
 #define _SCHED_SYSCTL_H
 
 #ifdef CONFIG_DETECT_HUNG_TASK
+extern int	     sysctl_hung_task_check_count;
 extern unsigned int  sysctl_hung_task_panic;
-extern unsigned long sysctl_hung_task_check_count;
 extern unsigned long sysctl_hung_task_timeout_secs;
 extern unsigned long sysctl_hung_task_warnings;
 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 4e32edc..52e0097 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -20,6 +20,7 @@
 	size_t size;
 	size_t from;
 	size_t count;
+	size_t pad_until;
 	loff_t index;
 	loff_t read_pos;
 	u64 version;
@@ -79,6 +80,20 @@
 	}
 }
 
+/**
+ * seq_setwidth - set padding width
+ * @m: the seq_file handle
+ * @size: the max number of bytes to pad.
+ *
+ * Call seq_setwidth() for setting max width, then call seq_printf() etc. and
+ * finally call seq_pad() to pad the remaining bytes.
+ */
+static inline void seq_setwidth(struct seq_file *m, size_t size)
+{
+	m->pad_until = m->count + size;
+}
+void seq_pad(struct seq_file *m, char c);
+
 char *mangle_path(char *s, const char *p, const char *esc);
 int seq_open(struct file *, const struct seq_operations *);
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 21a2093..1e8a8b6 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -34,6 +34,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/preempt.h>
+#include <linux/lockdep.h>
 #include <asm/processor.h>
 
 /*
@@ -44,10 +45,50 @@
  */
 typedef struct seqcount {
 	unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map dep_map;
+#endif
 } seqcount_t;
 
-#define SEQCNT_ZERO { 0 }
-#define seqcount_init(x)	do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
+static inline void __seqcount_init(seqcount_t *s, const char *name,
+					  struct lock_class_key *key)
+{
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	lockdep_init_map(&s->dep_map, name, key, 0);
+	s->sequence = 0;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+		.dep_map = { .name = #lockname } \
+
+# define seqcount_init(s)				\
+	do {						\
+		static struct lock_class_key __key;	\
+		__seqcount_init((s), #s, &__key);	\
+	} while (0)
+
+static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
+{
+	seqcount_t *l = (seqcount_t *)s;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
+	seqcount_release(&l->dep_map, 1, _RET_IP_);
+	local_irq_restore(flags);
+}
+
+#else
+# define SEQCOUNT_DEP_MAP_INIT(lockname)
+# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
+# define seqcount_lockdep_reader_access(x)
+#endif
+
+#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
+
 
 /**
  * __read_seqcount_begin - begin a seq-read critical section (without barrier)
@@ -76,6 +117,22 @@
 }
 
 /**
+ * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * read_seqcount_begin_no_lockdep opens a read critical section of the given
+ * seqcount, but without any lockdep checking. Validity of the critical
+ * section is tested by checking read_seqcount_retry function.
+ */
+static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
+{
+	unsigned ret = __read_seqcount_begin(s);
+	smp_rmb();
+	return ret;
+}
+
+/**
  * read_seqcount_begin - begin a seq-read critical section
  * @s: pointer to seqcount_t
  * Returns: count to be passed to read_seqcount_retry
@@ -86,9 +143,8 @@
  */
 static inline unsigned read_seqcount_begin(const seqcount_t *s)
 {
-	unsigned ret = __read_seqcount_begin(s);
-	smp_rmb();
-	return ret;
+	seqcount_lockdep_reader_access(s);
+	return read_seqcount_begin_no_lockdep(s);
 }
 
 /**
@@ -108,6 +164,8 @@
 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
 {
 	unsigned ret = ACCESS_ONCE(s->sequence);
+
+	seqcount_lockdep_reader_access(s);
 	smp_rmb();
 	return ret & ~1;
 }
@@ -152,14 +210,21 @@
  * Sequence counter only version assumes that callers are using their
  * own mutexing.
  */
-static inline void write_seqcount_begin(seqcount_t *s)
+static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
 {
 	s->sequence++;
 	smp_wmb();
+	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+}
+
+static inline void write_seqcount_begin(seqcount_t *s)
+{
+	write_seqcount_begin_nested(s, 0);
 }
 
 static inline void write_seqcount_end(seqcount_t *s)
 {
+	seqcount_release(&s->dep_map, 1, _RET_IP_);
 	smp_wmb();
 	s->sequence++;
 }
@@ -188,7 +253,7 @@
  */
 #define __SEQLOCK_UNLOCKED(lockname)			\
 	{						\
-		.seqcount = SEQCNT_ZERO,		\
+		.seqcount = SEQCNT_ZERO(lockname),	\
 		.lock =	__SPIN_LOCK_UNLOCKED(lockname)	\
 	}
 
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c2d8933..215b5ea 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -318,9 +318,13 @@
 
 	SKB_GSO_GRE = 1 << 6,
 
-	SKB_GSO_UDP_TUNNEL = 1 << 7,
+	SKB_GSO_IPIP = 1 << 7,
 
-	SKB_GSO_MPLS = 1 << 8,
+	SKB_GSO_SIT = 1 << 8,
+
+	SKB_GSO_UDP_TUNNEL = 1 << 9,
+
+	SKB_GSO_MPLS = 1 << 10,
 };
 
 #if BITS_PER_LONG > 32
@@ -333,11 +337,6 @@
 typedef unsigned char *sk_buff_data_t;
 #endif
 
-#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
-    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
-#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
-#endif
-
 /** 
  *	struct sk_buff - socket buffer
  *	@next: Next buffer in list
@@ -370,7 +369,6 @@
  *	@protocol: Packet protocol from driver
  *	@destructor: Destruct function
  *	@nfct: Associated connection, if any
- *	@nfct_reasm: netfilter conntrack re-assembly pointer
  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
  *	@skb_iif: ifindex of device we arrived on
  *	@tc_index: Traffic control index
@@ -459,9 +457,6 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	struct nf_conntrack	*nfct;
 #endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
-	struct sk_buff		*nfct_reasm;
-#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct nf_bridge_info	*nf_bridge;
 #endif
@@ -585,8 +580,8 @@
 	skb->_skb_refdst = (unsigned long)dst;
 }
 
-extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
-				bool force);
+void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
+			 bool force);
 
 /**
  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
@@ -634,20 +629,20 @@
 	return (struct rtable *)skb_dst(skb);
 }
 
-extern void kfree_skb(struct sk_buff *skb);
-extern void kfree_skb_list(struct sk_buff *segs);
-extern void skb_tx_error(struct sk_buff *skb);
-extern void consume_skb(struct sk_buff *skb);
-extern void	       __kfree_skb(struct sk_buff *skb);
+void kfree_skb(struct sk_buff *skb);
+void kfree_skb_list(struct sk_buff *segs);
+void skb_tx_error(struct sk_buff *skb);
+void consume_skb(struct sk_buff *skb);
+void  __kfree_skb(struct sk_buff *skb);
 extern struct kmem_cache *skbuff_head_cache;
 
-extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
-extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
-			     bool *fragstolen, int *delta_truesize);
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+		      bool *fragstolen, int *delta_truesize);
 
-extern struct sk_buff *__alloc_skb(unsigned int size,
-				   gfp_t priority, int flags, int node);
-extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
+struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+			    int node);
+struct sk_buff *build_skb(void *data, unsigned int frag_size);
 static inline struct sk_buff *alloc_skb(unsigned int size,
 					gfp_t priority)
 {
@@ -660,41 +655,33 @@
 	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
 }
 
-extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
+struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
 {
 	return __alloc_skb_head(priority, -1);
 }
 
-extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
-extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
-extern struct sk_buff *skb_clone(struct sk_buff *skb,
-				 gfp_t priority);
-extern struct sk_buff *skb_copy(const struct sk_buff *skb,
-				gfp_t priority);
-extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
-				 int headroom, gfp_t gfp_mask);
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
+struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
 
-extern int	       pskb_expand_head(struct sk_buff *skb,
-					int nhead, int ntail,
-					gfp_t gfp_mask);
-extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
-					    unsigned int headroom);
-extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
-				       int newheadroom, int newtailroom,
-				       gfp_t priority);
-extern int	       skb_to_sgvec(struct sk_buff *skb,
-				    struct scatterlist *sg, int offset,
-				    int len);
-extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
-				    struct sk_buff **trailer);
-extern int	       skb_pad(struct sk_buff *skb, int pad);
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+				     unsigned int headroom);
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
+				int newtailroom, gfp_t priority);
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
+		 int len);
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+int skb_pad(struct sk_buff *skb, int pad);
 #define dev_kfree_skb(a)	consume_skb(a)
 
-extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-			int getfrag(void *from, char *to, int offset,
-			int len,int odd, struct sk_buff *skb),
-			void *from, int length);
+int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
+			    int getfrag(void *from, char *to, int offset,
+					int len, int odd, struct sk_buff *skb),
+			    void *from, int length);
 
 struct skb_seq_state {
 	__u32		lower_offset;
@@ -706,18 +693,17 @@
 	__u8		*frag_data;
 };
 
-extern void	      skb_prepare_seq_read(struct sk_buff *skb,
-					   unsigned int from, unsigned int to,
-					   struct skb_seq_state *st);
-extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
-				   struct skb_seq_state *st);
-extern void	      skb_abort_seq_read(struct skb_seq_state *st);
+void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
+			  unsigned int to, struct skb_seq_state *st);
+unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
+			  struct skb_seq_state *st);
+void skb_abort_seq_read(struct skb_seq_state *st);
 
-extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
-				    unsigned int to, struct ts_config *config,
-				    struct ts_state *state);
+unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+			   unsigned int to, struct ts_config *config,
+			   struct ts_state *state);
 
-extern void __skb_get_rxhash(struct sk_buff *skb);
+void __skb_get_rxhash(struct sk_buff *skb);
 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 {
 	if (!skb->l4_rxhash)
@@ -1095,7 +1081,8 @@
  *	The "__skb_xxxx()" functions are the non-atomic ones that
  *	can only be called with interrupts disabled.
  */
-extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
+		struct sk_buff_head *list);
 static inline void __skb_insert(struct sk_buff *newsk,
 				struct sk_buff *prev, struct sk_buff *next,
 				struct sk_buff_head *list)
@@ -1201,8 +1188,8 @@
 	__skb_insert(newsk, prev, prev->next, list);
 }
 
-extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
-		       struct sk_buff_head *list);
+void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+		struct sk_buff_head *list);
 
 static inline void __skb_queue_before(struct sk_buff_head *list,
 				      struct sk_buff *next,
@@ -1221,7 +1208,7 @@
  *
  *	A buffer cannot be placed on two lists at the same time.
  */
-extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
 static inline void __skb_queue_head(struct sk_buff_head *list,
 				    struct sk_buff *newsk)
 {
@@ -1238,7 +1225,7 @@
  *
  *	A buffer cannot be placed on two lists at the same time.
  */
-extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
 static inline void __skb_queue_tail(struct sk_buff_head *list,
 				   struct sk_buff *newsk)
 {
@@ -1249,7 +1236,7 @@
  * remove sk_buff from list. _Must_ be called atomically, and with
  * the list known..
  */
-extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
 	struct sk_buff *next, *prev;
@@ -1270,7 +1257,7 @@
  *	so must be used with appropriate locks held only. The head item is
  *	returned or %NULL if the list is empty.
  */
-extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 {
 	struct sk_buff *skb = skb_peek(list);
@@ -1287,7 +1274,7 @@
  *	so must be used with appropriate locks held only. The tail item is
  *	returned or %NULL if the list is empty.
  */
-extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
 {
 	struct sk_buff *skb = skb_peek_tail(list);
@@ -1361,7 +1348,7 @@
  * @size: the length of the data
  *
  * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
- * @skb to point to &size bytes at offset @off within @page. In
+ * @skb to point to @size bytes at offset @off within @page. In
  * addition updates @skb such that @i is the last fragment.
  *
  * Does not take any additional reference on the fragment.
@@ -1373,8 +1360,11 @@
 	skb_shinfo(skb)->nr_frags = i + 1;
 }
 
-extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
-			    int off, int size, unsigned int truesize);
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+		     int size, unsigned int truesize);
+
+void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
+			  unsigned int truesize);
 
 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frag_list(skb))
@@ -1418,7 +1408,8 @@
 /*
  *	Add data to an sk_buff
  */
-extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
+unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
 {
 	unsigned char *tmp = skb_tail_pointer(skb);
@@ -1428,7 +1419,7 @@
 	return tmp;
 }
 
-extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
 {
 	skb->data -= len;
@@ -1436,7 +1427,7 @@
 	return skb->data;
 }
 
-extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
 {
 	skb->len -= len;
@@ -1449,7 +1440,7 @@
 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
 }
 
-extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
 
 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
 {
@@ -1753,7 +1744,7 @@
 #define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
 #endif
 
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+int ___pskb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
 {
@@ -1765,7 +1756,7 @@
 	skb_set_tail_pointer(skb, len);
 }
 
-extern void skb_trim(struct sk_buff *skb, unsigned int len);
+void skb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
 {
@@ -1838,7 +1829,7 @@
  *	the list and one reference dropped. This function does not take the
  *	list lock and the caller must hold the relevant locks to use it.
  */
-extern void skb_queue_purge(struct sk_buff_head *list);
+void skb_queue_purge(struct sk_buff_head *list);
 static inline void __skb_queue_purge(struct sk_buff_head *list)
 {
 	struct sk_buff *skb;
@@ -1850,11 +1841,10 @@
 #define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
 #define NETDEV_PAGECNT_MAX_BIAS	   NETDEV_FRAG_PAGE_MAX_SIZE
 
-extern void *netdev_alloc_frag(unsigned int fragsz);
+void *netdev_alloc_frag(unsigned int fragsz);
 
-extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-					  unsigned int length,
-					  gfp_t gfp_mask);
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
+				   gfp_t gfp_mask);
 
 /**
  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -2071,6 +2061,8 @@
 	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
 }
 
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
+
 /**
  * skb_frag_dma_map - maps a paged fragment via the DMA API
  * @dev: the device to map the fragment to
@@ -2342,60 +2334,49 @@
 #define skb_walk_frags(skb, iter)	\
 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 
-extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
-					   int *peeked, int *off, int *err);
-extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
-					 int noblock, int *err);
-extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
-				     struct poll_table_struct *wait);
-extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
-					       int offset, struct iovec *to,
-					       int size);
-extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
-							int hlen,
-							struct iovec *iov);
-extern int	       skb_copy_datagram_from_iovec(struct sk_buff *skb,
-						    int offset,
-						    const struct iovec *from,
-						    int from_offset,
-						    int len);
-extern int	       zerocopy_sg_from_iovec(struct sk_buff *skb,
-					      const struct iovec *frm,
-					      int offset,
-					      size_t count);
-extern int	       skb_copy_datagram_const_iovec(const struct sk_buff *from,
-						     int offset,
-						     const struct iovec *to,
-						     int to_offset,
-						     int size);
-extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-extern void	       skb_free_datagram_locked(struct sock *sk,
-						struct sk_buff *skb);
-extern int	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
-					 unsigned int flags);
-extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
-				    int len, __wsum csum);
-extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
-				     void *to, int len);
-extern int	       skb_store_bits(struct sk_buff *skb, int offset,
-				      const void *from, int len);
-extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
-					      int offset, u8 *to, int len,
-					      __wsum csum);
-extern int             skb_splice_bits(struct sk_buff *skb,
-						unsigned int offset,
-						struct pipe_inode_info *pipe,
-						unsigned int len,
-						unsigned int flags);
-extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
-extern void	       skb_split(struct sk_buff *skb,
-				 struct sk_buff *skb1, const u32 len);
-extern int	       skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
-				 int shiftlen);
-extern void	       skb_scrub_packet(struct sk_buff *skb, bool xnet);
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+				    int *peeked, int *off, int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+				  int *err);
+unsigned int datagram_poll(struct file *file, struct socket *sock,
+			   struct poll_table_struct *wait);
+int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
+			    struct iovec *to, int size);
+int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
+				     struct iovec *iov);
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+				 const struct iovec *from, int from_offset,
+				 int len);
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
+			   int offset, size_t count);
+int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
+				  const struct iovec *to, int to_offset,
+				  int size);
+void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+			      int len, __wsum csum);
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+		    struct pipe_inode_info *pipe, unsigned int len,
+		    unsigned int flags);
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
+void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 
-extern struct sk_buff *skb_segment(struct sk_buff *skb,
-				   netdev_features_t features);
+struct skb_checksum_ops {
+	__wsum (*update)(const void *mem, int len, __wsum wsum);
+	__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
+};
+
+__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
+		      __wsum csum, const struct skb_checksum_ops *ops);
+__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+		    __wsum csum);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
 				       int len, void *buffer)
@@ -2440,7 +2421,7 @@
 	memcpy(skb->data + offset, from, len);
 }
 
-extern void skb_init(void);
+void skb_init(void);
 
 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
 {
@@ -2483,12 +2464,12 @@
 	return ktime_set(0, 0);
 }
 
-extern void skb_timestamping_init(void);
+void skb_timestamping_init(void);
 
 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
 
-extern void skb_clone_tx_timestamp(struct sk_buff *skb);
-extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
+void skb_clone_tx_timestamp(struct sk_buff *skb);
+bool skb_defer_rx_timestamp(struct sk_buff *skb);
 
 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
 
@@ -2529,8 +2510,8 @@
  * generates a software time stamp (otherwise), then queues the clone
  * to the error queue of the socket.  Errors are silently ignored.
  */
-extern void skb_tstamp_tx(struct sk_buff *orig_skb,
-			struct skb_shared_hwtstamps *hwtstamps);
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+		   struct skb_shared_hwtstamps *hwtstamps);
 
 static inline void sw_tx_timestamp(struct sk_buff *skb)
 {
@@ -2562,8 +2543,8 @@
  */
 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
 
-extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
-extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
+__sum16 __skb_checksum_complete(struct sk_buff *skb);
 
 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
 {
@@ -2593,7 +2574,7 @@
 }
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
 {
 	if (nfct && atomic_dec_and_test(&nfct->use))
@@ -2605,18 +2586,6 @@
 		atomic_inc(&nfct->use);
 }
 #endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
-static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
-{
-	if (skb)
-		atomic_inc(&skb->users);
-}
-static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
-{
-	if (skb)
-		kfree_skb(skb);
-}
-#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
 {
@@ -2635,10 +2604,6 @@
 	nf_conntrack_put(skb->nfct);
 	skb->nfct = NULL;
 #endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
-	nf_conntrack_put_reasm(skb->nfct_reasm);
-	skb->nfct_reasm = NULL;
-#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	nf_bridge_put(skb->nf_bridge);
 	skb->nf_bridge = NULL;
@@ -2660,10 +2625,6 @@
 	nf_conntrack_get(src->nfct);
 	dst->nfctinfo = src->nfctinfo;
 #endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
-	dst->nfct_reasm = src->nfct_reasm;
-	nf_conntrack_get_reasm(src->nfct_reasm);
-#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	dst->nf_bridge  = src->nf_bridge;
 	nf_bridge_get(src->nf_bridge);
@@ -2675,9 +2636,6 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(dst->nfct);
 #endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
-	nf_conntrack_put_reasm(dst->nfct_reasm);
-#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	nf_bridge_put(dst->nf_bridge);
 #endif
@@ -2732,28 +2690,27 @@
 	return skb->queue_mapping != 0;
 }
 
-extern u16 __skb_tx_hash(const struct net_device *dev,
-			 const struct sk_buff *skb,
-			 unsigned int num_tx_queues);
+u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+		  unsigned int num_tx_queues);
 
+static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+{
 #ifdef CONFIG_XFRM
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
-{
 	return skb->sp;
-}
 #else
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
-{
 	return NULL;
-}
 #endif
+}
 
 /* Keeps track of mac header offset relative to skb->head.
  * It is useful for TSO of Tunneling protocol. e.g. GRE.
  * For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header. */
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
 struct skb_gso_cb {
-	int mac_offset;
+	int	mac_offset;
+	int	encap_level;
 };
 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
 
@@ -2783,12 +2740,13 @@
 	return skb_shinfo(skb)->gso_size;
 }
 
+/* Note: Should be called only if skb_is_gso(skb) is true */
 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
 {
 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
 }
 
-extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+void __skb_warn_lro_forwarding(const struct sk_buff *skb);
 
 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
 {
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 731f523..5da22ee 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -49,6 +49,9 @@
 		smp_call_func_t func, void *info, bool wait,
 		gfp_t gfp_flags);
 
+void __smp_call_function_single(int cpuid, struct call_single_data *data,
+				int wait);
+
 #ifdef CONFIG_SMP
 
 #include <linux/preempt.h>
@@ -95,9 +98,6 @@
 void smp_call_function_many(const struct cpumask *mask,
 			    smp_call_func_t func, void *info, bool wait);
 
-void __smp_call_function_single(int cpuid, struct call_single_data *data,
-				int wait);
-
 int smp_call_function_any(const struct cpumask *mask,
 			  smp_call_func_t func, void *info, int wait);
 
@@ -106,14 +106,10 @@
 /*
  * Generic and arch helpers
  */
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 void __init call_function_init(void);
 void generic_smp_call_function_single_interrupt(void);
 #define generic_smp_call_function_interrupt \
 	generic_smp_call_function_single_interrupt
-#else
-static inline void call_function_init(void) { }
-#endif
 
 /*
  * Mark the boot cpu "online" so that it can call console drivers in
@@ -155,12 +151,6 @@
 
 static inline void kick_all_cpus_sync(void) {  }
 
-static inline void __smp_call_function_single(int cpuid,
-		struct call_single_data *data, int wait)
-{
-	on_each_cpu(data->func, data->info, wait);
-}
-
 #endif /* !SMP */
 
 /*
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index c114614..9b058ee 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -237,4 +237,18 @@
 	__srcu_read_unlock(sp, idx);
 }
 
+/**
+ * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
+ *
+ * Converts the preceding srcu_read_unlock into a two-way memory barrier.
+ *
+ * Call this after srcu_read_unlock, to guarantee that all memory operations
+ * that occur after smp_mb__after_srcu_read_unlock will appear to happen after
+ * the preceding srcu_read_unlock.
+ */
+static inline void smp_mb__after_srcu_read_unlock(void)
+{
+	/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
+}
+
 #endif
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 86a12b0..0688472 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -108,6 +108,16 @@
 	return 0;
 }
 
+/* Get the device phy address */
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+	struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+	if (!dev)
+		return -ENODEV;
+
+	return dev->dev->bus->sprom.et0phyaddr;
+}
+
 extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
 					  struct pci_dev *pdev);
 extern int ssb_gige_map_irq(struct ssb_device *sdev,
@@ -174,6 +184,10 @@
 {
 	return -ENODEV;
 }
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+	return -ENODEV;
+}
 
 #endif /* CONFIG_SSB_DRIVER_GIGE */
 #endif /* LINUX_SSB_DRIVER_GIGE_H_ */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 8d4fa82..c0f7526 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -139,7 +139,8 @@
 
 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 					unsigned long address);
-extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma,
+		struct mm_struct *mm, pte_t *pte);
 #else
 
 #define make_migration_entry(page, write) swp_entry(0, 0)
@@ -151,8 +152,8 @@
 static inline void make_migration_entry_read(swp_entry_t *entryp) { }
 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 					 unsigned long address) { }
-static inline void migration_entry_wait_huge(struct mm_struct *mm,
-					pte_t *pte) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
+		struct mm_struct *mm, pte_t *pte) { }
 static inline int is_write_migration_entry(swp_entry_t entry)
 {
 	return 0;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 7fac04e..94273bb 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -120,7 +120,7 @@
 		.class			= &event_class_syscall_enter,	\
 		.event.funcs            = &enter_syscall_print_funcs,	\
 		.data			= (void *)&__syscall_meta_##sname,\
-		.flags			= TRACE_EVENT_FL_CAP_ANY,	\
+		.flags                  = TRACE_EVENT_FL_CAP_ANY,	\
 	};								\
 	static struct ftrace_event_call __used				\
 	  __attribute__((section("_ftrace_events")))			\
@@ -134,7 +134,7 @@
 		.class			= &event_class_syscall_exit,	\
 		.event.funcs		= &exit_syscall_print_funcs,	\
 		.data			= (void *)&__syscall_meta_##sname,\
-		.flags			= TRACE_EVENT_FL_CAP_ANY,	\
+		.flags                  = TRACE_EVENT_FL_CAP_ANY,	\
 	};								\
 	static struct ftrace_event_call __used				\
 	  __attribute__((section("_ftrace_events")))			\
@@ -184,7 +184,8 @@
 
 #define __PROTECT(...) asmlinkage_protect(__VA_ARGS__)
 #define __SYSCALL_DEFINEx(x, name, ...)					\
-	asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));	\
+	asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))	\
+		__attribute__((alias(__stringify(SyS##name))));		\
 	static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));	\
 	asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));	\
 	asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))	\
@@ -194,7 +195,6 @@
 		__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__));	\
 		return ret;						\
 	}								\
-	SYSCALL_ALIAS(sys##name, SyS##name);				\
 	static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
 asmlinkage long sys_time(time_t __user *tloc);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 8da8c4e..7bfabd2 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -67,6 +67,13 @@
 #endif
 };
 
+
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+# define u64_stats_init(syncp)	seqcount_init(syncp.seq)
+#else
+# define u64_stats_init(syncp)	do { } while (0)
+#endif
+
 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index cc25b70..c3fa807 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -36,6 +36,9 @@
  * SUCH DAMAGE.
  */
 
+#ifndef __LINUX_USB_CDC_NCM_H
+#define __LINUX_USB_CDC_NCM_H
+
 #define CDC_NCM_COMM_ALTSETTING_NCM		0
 #define CDC_NCM_COMM_ALTSETTING_MBIM		1
 
@@ -85,22 +88,13 @@
 #define cdc_ncm_data_intf_is_mbim(x)  ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
 
 struct cdc_ncm_ctx {
-	struct usb_cdc_ncm_ntb_parameters ncm_parm;
 	struct hrtimer tx_timer;
 	struct tasklet_struct bh;
 
 	const struct usb_cdc_ncm_desc *func_desc;
-	const struct usb_cdc_mbim_desc   *mbim_desc;
-	const struct usb_cdc_header_desc *header_desc;
-	const struct usb_cdc_union_desc *union_desc;
+	const struct usb_cdc_mbim_desc *mbim_desc;
 	const struct usb_cdc_ether_desc *ether_desc;
 
-	struct net_device *netdev;
-	struct usb_device *udev;
-	struct usb_host_endpoint *in_ep;
-	struct usb_host_endpoint *out_ep;
-	struct usb_host_endpoint *status_ep;
-	struct usb_interface *intf;
 	struct usb_interface *control;
 	struct usb_interface *data;
 
@@ -113,8 +107,6 @@
 
 	u32 tx_timer_pending;
 	u32 tx_curr_frame_num;
-	u32 rx_speed;
-	u32 tx_speed;
 	u32 rx_max;
 	u32 tx_max;
 	u32 max_datagram_size;
@@ -127,9 +119,14 @@
 	u16 connected;
 };
 
-extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
-extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
-extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
-extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
-extern int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
-extern int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
+struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
+int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+struct sk_buff *
+cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
+int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in);
+
+#endif /* __LINUX_USB_CDC_NCM_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 36d36cc..e4abb84 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,11 +51,11 @@
 		      void *data,
 		      gfp_t gfp);
 
-void virtqueue_kick(struct virtqueue *vq);
+bool virtqueue_kick(struct virtqueue *vq);
 
 bool virtqueue_kick_prepare(struct virtqueue *vq);
 
-void virtqueue_notify(struct virtqueue *vq);
+bool virtqueue_notify(struct virtqueue *vq);
 
 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
 
@@ -73,6 +73,8 @@
 
 unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
 
+bool virtqueue_is_broken(struct virtqueue *vq);
+
 /**
  * virtio_device - representation of a device using virtio
  * @index: unique position on the virtio bus
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 29b9104..e8f8f71 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -96,33 +96,6 @@
 	return test_bit(fbit, vdev->features);
 }
 
-/**
- * virtio_config_val - look for a feature and get a virtio config entry.
- * @vdev: the virtio device
- * @fbit: the feature bit
- * @offset: the type to search for.
- * @v: a pointer to the value to fill in.
- *
- * The return value is -ENOENT if the feature doesn't exist.  Otherwise
- * the config value is copied into whatever is pointed to by v. */
-#define virtio_config_val(vdev, fbit, offset, v) \
-	virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v))
-
-#define virtio_config_val_len(vdev, fbit, offset, v, len) \
-	virtio_config_buf((vdev), (fbit), (offset), (v), (len))
-
-static inline int virtio_config_buf(struct virtio_device *vdev,
-				    unsigned int fbit,
-				    unsigned int offset,
-				    void *buf, unsigned len)
-{
-	if (!virtio_has_feature(vdev, fbit))
-		return -ENOENT;
-
-	vdev->config->get(vdev, offset, buf, len);
-	return 0;
-}
-
 static inline
 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
 					vq_callback_t *c, const char *n)
@@ -162,5 +135,139 @@
 	return 0;
 }
 
+/* Config space accessors. */
+#define virtio_cread(vdev, structname, member, ptr)			\
+	do {								\
+		/* Must match the member's type, and be integer */	\
+		if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+			(*ptr) = 1;					\
+									\
+		switch (sizeof(*ptr)) {					\
+		case 1:							\
+			*(ptr) = virtio_cread8(vdev,			\
+					       offsetof(structname, member)); \
+			break;						\
+		case 2:							\
+			*(ptr) = virtio_cread16(vdev,			\
+						offsetof(structname, member)); \
+			break;						\
+		case 4:							\
+			*(ptr) = virtio_cread32(vdev,			\
+						offsetof(structname, member)); \
+			break;						\
+		case 8:							\
+			*(ptr) = virtio_cread64(vdev,			\
+						offsetof(structname, member)); \
+			break;						\
+		default:						\
+			BUG();						\
+		}							\
+	} while(0)
+
+/* Config space accessors. */
+#define virtio_cwrite(vdev, structname, member, ptr)			\
+	do {								\
+		/* Must match the member's type, and be integer */	\
+		if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+			BUG_ON((*ptr) == 1);				\
+									\
+		switch (sizeof(*ptr)) {					\
+		case 1:							\
+			virtio_cwrite8(vdev,				\
+				       offsetof(structname, member),	\
+				       *(ptr));				\
+			break;						\
+		case 2:							\
+			virtio_cwrite16(vdev,				\
+					offsetof(structname, member),	\
+					*(ptr));			\
+			break;						\
+		case 4:							\
+			virtio_cwrite32(vdev,				\
+					offsetof(structname, member),	\
+					*(ptr));			\
+			break;						\
+		case 8:							\
+			virtio_cwrite64(vdev,				\
+					offsetof(structname, member),	\
+					*(ptr));			\
+			break;						\
+		default:						\
+			BUG();						\
+		}							\
+	} while(0)
+
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+	u8 ret;
+	vdev->config->get(vdev, offset, &ret, sizeof(ret));
+	return ret;
+}
+
+static inline void virtio_cread_bytes(struct virtio_device *vdev,
+				      unsigned int offset,
+				      void *buf, size_t len)
+{
+	vdev->config->get(vdev, offset, buf, len);
+}
+
+static inline void virtio_cwrite8(struct virtio_device *vdev,
+				  unsigned int offset, u8 val)
+{
+	vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u16 virtio_cread16(struct virtio_device *vdev,
+				 unsigned int offset)
+{
+	u16 ret;
+	vdev->config->get(vdev, offset, &ret, sizeof(ret));
+	return ret;
+}
+
+static inline void virtio_cwrite16(struct virtio_device *vdev,
+				   unsigned int offset, u16 val)
+{
+	vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u32 virtio_cread32(struct virtio_device *vdev,
+				 unsigned int offset)
+{
+	u32 ret;
+	vdev->config->get(vdev, offset, &ret, sizeof(ret));
+	return ret;
+}
+
+static inline void virtio_cwrite32(struct virtio_device *vdev,
+				   unsigned int offset, u32 val)
+{
+	vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u64 virtio_cread64(struct virtio_device *vdev,
+				 unsigned int offset)
+{
+	u64 ret;
+	vdev->config->get(vdev, offset, &ret, sizeof(ret));
+	return ret;
+}
+
+static inline void virtio_cwrite64(struct virtio_device *vdev,
+				   unsigned int offset, u64 val)
+{
+	vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+/* Conditional config space accessors. */
+#define virtio_cread_feature(vdev, fbit, structname, member, ptr)	\
+	({								\
+		int _r = 0;						\
+		if (!virtio_has_feature(vdev, fbit))			\
+			_r = -ENOENT;					\
+		else							\
+			virtio_cread((vdev), structname, member, ptr);	\
+		_r;							\
+	})
 
 #endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index b300787..67e06fe 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -71,7 +71,7 @@
 				      struct virtio_device *vdev,
 				      bool weak_barriers,
 				      void *pages,
-				      void (*notify)(struct virtqueue *vq),
+				      bool (*notify)(struct virtqueue *vq),
 				      void (*callback)(struct virtqueue *vq),
 				      const char *name);
 void vring_del_virtqueue(struct virtqueue *vq);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 1855f0a..c557c6d 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -39,6 +39,7 @@
 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
 #ifdef CONFIG_NUMA_BALANCING
 		NUMA_PTE_UPDATES,
+		NUMA_HUGE_PTE_UPDATES,
 		NUMA_HINT_FAULTS,
 		NUMA_HINT_FAULTS_LOCAL,
 		NUMA_PAGE_MIGRATE,
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 021b8a3..fc0e432 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -97,7 +97,7 @@
 int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
 int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
 				  enum wb_reason reason);
-void sync_inodes_sb(struct super_block *);
+void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
 void inode_wait_for_writeback(struct inode *inode);
 
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 10d43d8..2a628b2 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -197,8 +197,8 @@
 	return false;
 }
 
-#define BDADDR_ANY   (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
-#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
+#define BDADDR_ANY  (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
+#define BDADDR_NONE (&(bdaddr_t) {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}})
 
 /* Copy, swap, convert BD Address */
 static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
@@ -218,11 +218,10 @@
 
 struct bt_sock {
 	struct sock sk;
-	bdaddr_t    src;
-	bdaddr_t    dst;
 	struct list_head accept_q;
 	struct sock *parent;
 	unsigned long flags;
+	void (*skb_msg_name)(struct sk_buff *, void *, int *);
 };
 
 enum {
@@ -249,6 +248,7 @@
 uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
 int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
 
 void bt_accept_enqueue(struct sock *parent, struct sock *sk);
 void bt_accept_unlink(struct sock *sk);
@@ -282,8 +282,11 @@
 	__u8 incoming;
 	__u16 expect;
 	__u8 force_active;
+	struct l2cap_chan *chan;
 	struct l2cap_ctrl control;
 	struct hci_req_ctrl req;
+	bdaddr_t bdaddr;
+	__le16 psm;
 };
 #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
 
@@ -331,16 +334,16 @@
 
 int bt_to_errno(__u16 code);
 
-extern int hci_sock_init(void);
-extern void hci_sock_cleanup(void);
+int hci_sock_init(void);
+void hci_sock_cleanup(void);
 
-extern int bt_sysfs_init(void);
-extern void bt_sysfs_cleanup(void);
+int bt_sysfs_init(void);
+void bt_sysfs_cleanup(void);
 
-extern int  bt_procfs_init(struct net *net, const char *name,
-			   struct bt_sock_list* sk_list,
-			   int (* seq_show)(struct seq_file *, void *));
-extern void bt_procfs_cleanup(struct net *net, const char *name);
+int bt_procfs_init(struct net *net, const char *name,
+		   struct bt_sock_list *sk_list,
+		   int (*seq_show)(struct seq_file *, void *));
+void bt_procfs_cleanup(struct net *net, const char *name);
 
 extern struct dentry *bt_debugfs;
 
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 15f1084..1784c48 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -35,6 +35,8 @@
 
 #define HCI_MAX_AMP_ASSOC_SIZE	672
 
+#define HCI_MAX_CSB_DATA_SIZE	252
+
 /* HCI dev events */
 #define HCI_DEV_REG			1
 #define HCI_DEV_UNREG			2
@@ -62,16 +64,20 @@
 #define HCI_AMP		0x01
 
 /* First BR/EDR Controller shall have ID = 0 */
-#define HCI_BREDR_ID	0
+#define AMP_ID_BREDR	0x00
+
+/* AMP controller types */
+#define AMP_TYPE_BREDR	0x00
+#define AMP_TYPE_80211	0x01
 
 /* AMP controller status */
-#define AMP_CTRL_POWERED_DOWN			0x00
-#define AMP_CTRL_BLUETOOTH_ONLY			0x01
-#define AMP_CTRL_NO_CAPACITY			0x02
-#define AMP_CTRL_LOW_CAPACITY			0x03
-#define AMP_CTRL_MEDIUM_CAPACITY		0x04
-#define AMP_CTRL_HIGH_CAPACITY			0x05
-#define AMP_CTRL_FULL_CAPACITY			0x06
+#define AMP_STATUS_POWERED_DOWN			0x00
+#define AMP_STATUS_BLUETOOTH_ONLY		0x01
+#define AMP_STATUS_NO_CAPACITY			0x02
+#define AMP_STATUS_LOW_CAPACITY			0x03
+#define AMP_STATUS_MEDIUM_CAPACITY		0x04
+#define AMP_STATUS_HIGH_CAPACITY		0x05
+#define AMP_STATUS_FULL_CAPACITY		0x06
 
 /* HCI device quirks */
 enum {
@@ -109,18 +115,22 @@
 	HCI_PAIRABLE,
 	HCI_SERVICE_CACHE,
 	HCI_DEBUG_KEYS,
+	HCI_DUT_MODE,
 	HCI_UNREGISTER,
+	HCI_USER_CHANNEL,
 
 	HCI_LE_SCAN,
 	HCI_SSP_ENABLED,
 	HCI_HS_ENABLED,
 	HCI_LE_ENABLED,
-	HCI_LE_PERIPHERAL,
+	HCI_ADVERTISING,
 	HCI_CONNECTABLE,
 	HCI_DISCOVERABLE,
+	HCI_LIMITED_DISCOVERABLE,
 	HCI_LINK_SECURITY,
 	HCI_PERIODIC_INQ,
 	HCI_FAST_CONNECTABLE,
+	HCI_BREDR_ENABLED,
 };
 
 /* A mask for the flags that are supposed to remain when a reset happens
@@ -624,6 +634,24 @@
 	__u8     flow_spec_id;
 } __packed;
 
+#define HCI_OP_SET_CSB			0x0441
+struct hci_cp_set_csb {
+	__u8	enable;
+	__u8	lt_addr;
+	__u8	lpo_allowed;
+	__le16	packet_type;
+	__le16	interval_min;
+	__le16	interval_max;
+	__le16	csb_sv_tout;
+} __packed;
+struct hci_rp_set_csb {
+	__u8	status;
+	__u8	lt_addr;
+	__le16	interval;
+} __packed;
+
+#define HCI_OP_START_SYNC_TRAIN		0x0443
+
 #define HCI_OP_SNIFF_MODE		0x0803
 struct hci_cp_sniff_mode {
 	__le16   handle;
@@ -694,9 +722,6 @@
 } __packed;
 
 #define HCI_OP_SET_EVENT_MASK		0x0c01
-struct hci_cp_set_event_mask {
-	__u8     mask[8];
-} __packed;
 
 #define HCI_OP_RESET			0x0c03
 
@@ -792,6 +817,20 @@
 	__le16   sco_max_pkt;
 } __packed;
 
+#define HCI_OP_READ_NUM_SUPPORTED_IAC	0x0c38
+struct hci_rp_read_num_supported_iac {
+	__u8	status;
+	__u8	num_iac;
+} __packed;
+
+#define HCI_OP_READ_CURRENT_IAC_LAP	0x0c39
+
+#define HCI_OP_WRITE_CURRENT_IAC_LAP	0x0c3a
+struct hci_cp_write_current_iac_lap {
+	__u8	num_iac;
+	__u8	iac_lap[6];
+} __packed;
+
 #define HCI_OP_WRITE_INQUIRY_MODE	0x0c45
 
 #define HCI_MAX_EIR_LENGTH		240
@@ -826,6 +865,10 @@
 	__s8     tx_power;
 } __packed;
 
+#define HCI_OP_SET_EVENT_MASK_PAGE_2	0x0c63
+
+#define HCI_OP_READ_LOCATION_DATA	0x0c64
+
 #define HCI_OP_READ_FLOW_CONTROL_MODE	0x0c66
 struct hci_rp_read_flow_control_mode {
 	__u8     status;
@@ -838,6 +881,50 @@
 	__u8	simul;
 } __packed;
 
+#define HCI_OP_SET_RESERVED_LT_ADDR	0x0c74
+struct hci_cp_set_reserved_lt_addr {
+	__u8	lt_addr;
+} __packed;
+struct hci_rp_set_reserved_lt_addr {
+	__u8	status;
+	__u8	lt_addr;
+} __packed;
+
+#define HCI_OP_DELETE_RESERVED_LT_ADDR	0x0c75
+struct hci_cp_delete_reserved_lt_addr {
+	__u8	lt_addr;
+} __packed;
+struct hci_rp_delete_reserved_lt_addr {
+	__u8	status;
+	__u8	lt_addr;
+} __packed;
+
+#define HCI_OP_SET_CSB_DATA		0x0c76
+struct hci_cp_set_csb_data {
+	__u8	lt_addr;
+	__u8	fragment;
+	__u8	data_length;
+	__u8	data[HCI_MAX_CSB_DATA_SIZE];
+} __packed;
+struct hci_rp_set_csb_data {
+	__u8	status;
+	__u8	lt_addr;
+} __packed;
+
+#define HCI_OP_READ_SYNC_TRAIN_PARAMS	0x0c77
+
+#define HCI_OP_WRITE_SYNC_TRAIN_PARAMS	0x0c78
+struct hci_cp_write_sync_train_params {
+	__le16	interval_min;
+	__le16	interval_max;
+	__le32	sync_train_tout;
+	__u8	service_data;
+} __packed;
+struct hci_rp_write_sync_train_params {
+	__u8	status;
+	__le16	sync_train_int;
+} __packed;
+
 #define HCI_OP_READ_LOCAL_VERSION	0x1001
 struct hci_rp_read_local_version {
 	__u8     status;
@@ -957,6 +1044,10 @@
 	__u8     phy_handle;
 } __packed;
 
+#define HCI_OP_ENABLE_DUT_MODE		0x1803
+
+#define HCI_OP_WRITE_SSP_DEBUG_MODE	0x1804
+
 #define HCI_OP_LE_SET_EVENT_MASK	0x2001
 struct hci_cp_le_set_event_mask {
 	__u8     mask[8];
@@ -975,6 +1066,20 @@
 	__u8     features[8];
 } __packed;
 
+#define HCI_OP_LE_SET_RANDOM_ADDR	0x2005
+
+#define HCI_OP_LE_SET_ADV_PARAM		0x2006
+struct hci_cp_le_set_adv_param {
+	__le16   min_interval;
+	__le16   max_interval;
+	__u8     type;
+	__u8     own_address_type;
+	__u8     direct_addr_type;
+	bdaddr_t direct_addr;
+	__u8     channel_map;
+	__u8     filter_policy;
+} __packed;
+
 #define HCI_OP_LE_READ_ADV_TX_POWER	0x2007
 struct hci_rp_le_read_adv_tx_power {
 	__u8	status;
@@ -989,6 +1094,12 @@
 	__u8	data[HCI_MAX_AD_LENGTH];
 } __packed;
 
+#define HCI_OP_LE_SET_SCAN_RSP_DATA	0x2009
+struct hci_cp_le_set_scan_rsp_data {
+	__u8	length;
+	__u8	data[HCI_MAX_AD_LENGTH];
+} __packed;
+
 #define HCI_OP_LE_SET_ADV_ENABLE	0x200a
 
 #define LE_SCAN_PASSIVE			0x00
@@ -1438,6 +1549,13 @@
 	struct hci_comp_blocks_info handles[0];
 } __packed;
 
+#define HCI_EV_SYNC_TRAIN_COMPLETE	0x4F
+struct hci_ev_sync_train_complete {
+	__u8	status;
+} __packed;
+
+#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT	0x54
+
 /* Low energy meta events */
 #define LE_CONN_ROLE_MASTER	0x00
 
@@ -1462,11 +1580,11 @@
 } __packed;
 
 /* Advertising report event types */
-#define ADV_IND		0x00
-#define ADV_DIRECT_IND	0x01
-#define ADV_SCAN_IND	0x02
-#define ADV_NONCONN_IND	0x03
-#define ADV_SCAN_RSP	0x04
+#define LE_ADV_IND		0x00
+#define LE_ADV_DIRECT_IND	0x01
+#define LE_ADV_SCAN_IND		0x02
+#define LE_ADV_NONCONN_IND	0x03
+#define LE_ADV_SCAN_RSP		0x04
 
 #define ADDR_LE_DEV_PUBLIC	0x00
 #define ADDR_LE_DEV_RANDOM	0x01
@@ -1571,6 +1689,7 @@
 #define HCI_DEV_NONE	0xffff
 
 #define HCI_CHANNEL_RAW		0
+#define HCI_CHANNEL_USER	1
 #define HCI_CHANNEL_MONITOR	2
 #define HCI_CHANNEL_CONTROL	3
 
@@ -1673,6 +1792,4 @@
 };
 #define IREQ_CACHE_FLUSH 0x0001
 
-extern bool enable_hs;
-
 #endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3ede820..f8555ad7 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -81,6 +81,7 @@
 struct bdaddr_list {
 	struct list_head list;
 	bdaddr_t bdaddr;
+	u8 bdaddr_type;
 };
 
 struct bt_uuid {
@@ -140,6 +141,8 @@
 	__u8		bus;
 	__u8		dev_type;
 	bdaddr_t	bdaddr;
+	bdaddr_t	static_addr;
+	__u8		own_addr_type;
 	__u8		dev_name[HCI_MAX_NAME_LENGTH];
 	__u8		short_name[HCI_MAX_SHORT_NAME_LENGTH];
 	__u8		eir[HCI_MAX_EIR_LENGTH];
@@ -158,11 +161,17 @@
 	__u16		manufacturer;
 	__u16		lmp_subver;
 	__u16		voice_setting;
+	__u8		num_iac;
 	__u8		io_capability;
 	__s8		inq_tx_power;
 	__u16		page_scan_interval;
 	__u16		page_scan_window;
 	__u8		page_scan_type;
+	__u16		le_scan_interval;
+	__u16		le_scan_window;
+	__u16		le_conn_min_interval;
+	__u16		le_conn_max_interval;
+	__u8		ssp_debug_mode;
 
 	__u16		devid_source;
 	__u16		devid_vendor;
@@ -279,14 +288,15 @@
 	__s8			adv_tx_power;
 	__u8			adv_data[HCI_MAX_AD_LENGTH];
 	__u8			adv_data_len;
+	__u8			scan_rsp_data[HCI_MAX_AD_LENGTH];
+	__u8			scan_rsp_data_len;
 
 	int (*open)(struct hci_dev *hdev);
 	int (*close)(struct hci_dev *hdev);
 	int (*flush)(struct hci_dev *hdev);
 	int (*setup)(struct hci_dev *hdev);
-	int (*send)(struct sk_buff *skb);
+	int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
-	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
 };
 
 #define HCI_PHY_HANDLE(handle)	(handle & 0xff)
@@ -298,6 +308,8 @@
 
 	bdaddr_t	dst;
 	__u8		dst_type;
+	bdaddr_t	src;
+	__u8		src_type;
 	__u16		handle;
 	__u16		state;
 	__u8		mode;
@@ -306,7 +318,6 @@
 	__u8		attempt;
 	__u8		dev_class[3];
 	__u8		features[HCI_MAX_PAGES][8];
-	__u16		interval;
 	__u16		pkt_type;
 	__u16		link_policy;
 	__u32		link_mode;
@@ -334,8 +345,8 @@
 	struct list_head chan_list;
 
 	struct delayed_work disc_work;
-	struct timer_list idle_timer;
-	struct timer_list auto_accept_timer;
+	struct delayed_work auto_accept_work;
+	struct delayed_work idle_work;
 
 	struct device	dev;
 
@@ -367,18 +378,17 @@
 extern rwlock_t hci_cb_list_lock;
 
 /* ----- HCI interface to upper protocols ----- */
-extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
-extern int l2cap_disconn_ind(struct hci_conn *hcon);
-extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
-extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
-extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
-			      u16 flags);
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+int l2cap_disconn_ind(struct hci_conn *hcon);
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
 
-extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
-extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
 
 /* ----- Inquiry cache ----- */
 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
@@ -644,7 +654,7 @@
 		switch (conn->type) {
 		case ACL_LINK:
 		case LE_LINK:
-			del_timer(&conn->idle_timer);
+			cancel_delayed_work(&conn->idle_work);
 			if (conn->state == BT_CONNECTED) {
 				timeo = conn->disc_timeout;
 				if (!conn->out)
@@ -703,19 +713,6 @@
 	dev_set_drvdata(&hdev->dev, data);
 }
 
-/* hci_dev_list shall be locked */
-static inline uint8_t __hci_num_ctrl(void)
-{
-	uint8_t count = 0;
-	struct list_head *p;
-
-	list_for_each(p, &hci_dev_list) {
-		count++;
-	}
-
-	return count;
-}
-
 struct hci_dev *hci_dev_get(int index);
 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
 
@@ -738,7 +735,7 @@
 int hci_inquiry(void __user *arg);
 
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
-					 bdaddr_t *bdaddr);
+					 bdaddr_t *bdaddr, u8 type);
 int hci_blacklist_clear(struct hci_dev *hdev);
 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
@@ -768,13 +765,11 @@
 
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
-int hci_recv_frame(struct sk_buff *skb);
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
 
 void hci_init_sysfs(struct hci_dev *hdev);
-int hci_add_sysfs(struct hci_dev *hdev);
-void hci_del_sysfs(struct hci_dev *hdev);
 void hci_conn_init_sysfs(struct hci_conn *conn);
 void hci_conn_add_sysfs(struct hci_conn *conn);
 void hci_conn_del_sysfs(struct hci_conn *conn);
@@ -807,22 +802,6 @@
 #define lmp_host_le_capable(dev)   (!!((dev)->features[1][0] & LMP_HOST_LE))
 #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
 
-/* returns true if at least one AMP active */
-static inline bool hci_amp_capable(void)
-{
-	struct hci_dev *hdev;
-	bool ret = false;
-
-	read_lock(&hci_dev_list_lock);
-	list_for_each_entry(hdev, &hci_dev_list, list)
-		if (hdev->amp_type == HCI_AMP &&
-		    test_bit(HCI_UP, &hdev->flags))
-			ret = true;
-	read_unlock(&hci_dev_list_lock);
-
-	return ret;
-}
-
 /* ----- HCI protocols ----- */
 #define HCI_PROTO_DEFER             0x01
 
@@ -1033,34 +1012,6 @@
 	return false;
 }
 
-static inline size_t eir_get_length(u8 *eir, size_t eir_len)
-{
-	size_t parsed = 0;
-
-	while (parsed < eir_len) {
-		u8 field_len = eir[0];
-
-		if (field_len == 0)
-			return parsed;
-
-		parsed += field_len + 1;
-		eir += field_len + 1;
-	}
-
-	return eir_len;
-}
-
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
-				  u8 data_len)
-{
-	eir[eir_len++] = sizeof(type) + data_len;
-	eir[eir_len++] = type;
-	memcpy(&eir[eir_len], data, data_len);
-	eir_len += data_len;
-
-	return eir_len;
-}
-
 int hci_register_cb(struct hci_cb *hcb);
 int hci_unregister_cb(struct hci_cb *hcb);
 
@@ -1120,29 +1071,30 @@
 #define DISCOV_BREDR_INQUIRY_LEN	0x08
 
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
-int mgmt_index_added(struct hci_dev *hdev);
-int mgmt_index_removed(struct hci_dev *hdev);
-int mgmt_set_powered_failed(struct hci_dev *hdev, int err);
+void mgmt_index_added(struct hci_dev *hdev);
+void mgmt_index_removed(struct hci_dev *hdev);
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
 int mgmt_powered(struct hci_dev *hdev, u8 powered);
-int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
-int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
-int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
-		      bool persistent);
-int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-			  u8 addr_type, u32 flags, u8 *name, u8 name_len,
-			  u8 *dev_class);
-int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-			     u8 link_type, u8 addr_type, u8 reason);
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
-			   u8 link_type, u8 addr_type, u8 status);
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-			u8 addr_type, u8 status);
-int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
-int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
-				 u8 status);
-int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
-				     u8 status);
+void mgmt_discoverable_timeout(struct hci_dev *hdev);
+void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
+void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+		       bool persistent);
+void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
+			   u8 *dev_class);
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+			      u8 link_type, u8 addr_type, u8 reason);
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+			    u8 link_type, u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+			 u8 addr_type, u8 status);
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+				  u8 status);
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+				      u8 status);
 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
 			      u8 link_type, u8 addr_type, __le32 value,
 			      u8 confirm_hint);
@@ -1159,26 +1111,25 @@
 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
 			     u8 link_type, u8 addr_type, u32 passkey,
 			     u8 entered);
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-		     u8 addr_type, u8 status);
-int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
-int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
-				   u8 status);
-int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
-					    u8 *randomizer, u8 status);
-int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-		      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
-		      u8 ssp, u8 *eir, u16 eir_len);
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-		     u8 addr_type, s8 rssi, u8 *name, u8 name_len);
-int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+		      u8 addr_type, u8 status);
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+				    u8 status);
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+					     u8 *randomizer, u8 status);
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+		       u8 ssp, u8 *eir, u16 eir_len);
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+		      u8 addr_type, s8 rssi, u8 *name, u8 name_len);
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-bool mgmt_valid_hdev(struct hci_dev *hdev);
-int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
+void mgmt_reenable_advertising(struct hci_dev *hdev);
 
 /* HCI info for socket */
 #define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -1208,15 +1159,11 @@
 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
 
-void hci_update_ad(struct hci_request *req);
-
 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
 					u16 latency, u16 to_multiplier);
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
 							__u8 ltk[16]);
 
-u8 bdaddr_to_le(u8 bdaddr_type);
-
 #define SCO_AIRMODE_MASK       0x0003
 #define SCO_AIRMODE_CVSD       0x0000
 #define SCO_AIRMODE_TRANSP     0x0003
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 1a966af..c853b16d 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -131,6 +131,7 @@
 
 /* L2CAP fixed channels */
 #define L2CAP_FC_L2CAP		0x02
+#define L2CAP_FC_CONNLESS	0x04
 #define L2CAP_FC_A2MP		0x08
 
 /* L2CAP Control Field bit masks */
@@ -237,8 +238,9 @@
 /* protocol/service multiplexer (PSM) */
 #define L2CAP_PSM_SDP		0x0001
 #define L2CAP_PSM_RFCOMM	0x0003
+#define L2CAP_PSM_3DSP		0x0021
 
-/* channel indentifier */
+/* channel identifier */
 #define L2CAP_CID_SIGNALING	0x0001
 #define L2CAP_CID_CONN_LESS	0x0002
 #define L2CAP_CID_A2MP		0x0003
@@ -433,8 +435,6 @@
 #define L2CAP_SEQ_LIST_TAIL	0x8000
 
 struct l2cap_chan {
-	struct sock *sk;
-
 	struct l2cap_conn	*conn;
 	struct hci_conn		*hs_hcon;
 	struct hci_chan		*hs_hchan;
@@ -442,7 +442,12 @@
 
 	__u8		state;
 
+	bdaddr_t	dst;
+	__u8		dst_type;
+	bdaddr_t	src;
+	__u8		src_type;
 	__le16		psm;
+	__le16		sport;
 	__u16		dcid;
 	__u16		scid;
 
@@ -453,8 +458,6 @@
 	__u8		chan_type;
 	__u8		chan_policy;
 
-	__le16		sport;
-
 	__u8		sec_level;
 
 	__u8		ident;
@@ -546,9 +549,12 @@
 	void			(*teardown) (struct l2cap_chan *chan, int err);
 	void			(*close) (struct l2cap_chan *chan);
 	void			(*state_change) (struct l2cap_chan *chan,
-						 int state);
+						 int state, int err);
 	void			(*ready) (struct l2cap_chan *chan);
 	void			(*defer) (struct l2cap_chan *chan);
+	void			(*resume) (struct l2cap_chan *chan);
+	void			(*set_shutdown) (struct l2cap_chan *chan);
+	long			(*get_sndtimeo) (struct l2cap_chan *chan);
 	struct sk_buff		*(*alloc_skb) (struct l2cap_chan *chan,
 					       unsigned long len, int nb);
 };
@@ -557,13 +563,11 @@
 	struct hci_conn		*hcon;
 	struct hci_chan		*hchan;
 
-	bdaddr_t		*dst;
-	bdaddr_t		*src;
-
 	unsigned int		mtu;
 
 	__u32			feat_mask;
 	__u8			fixed_chan_mask;
+	bool			hs_enabled;
 
 	__u8			info_state;
 	__u8			info_ident;
@@ -649,6 +653,7 @@
 	FLAG_FLUSHABLE,
 	FLAG_EXT_CTRL,
 	FLAG_EFS_ENABLE,
+	FLAG_DEFER_SETUP,
 };
 
 enum {
@@ -790,6 +795,19 @@
 {
 }
 
+static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
+{
+}
+
+static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
+{
+	return 0;
+}
+
 extern bool disable_ertm;
 
 int l2cap_init_sockets(void);
@@ -797,7 +815,6 @@
 bool l2cap_is_socket(struct socket *sock);
 
 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
-int __l2cap_wait_ack(struct sock *sk);
 
 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 9944c3e..518c5c8 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -93,6 +93,7 @@
 #define MGMT_SETTING_BREDR		0x00000080
 #define MGMT_SETTING_HS			0x00000100
 #define MGMT_SETTING_LE			0x00000200
+#define MGMT_SETTING_ADVERTISING	0x00000400
 
 #define MGMT_OP_READ_INFO		0x0004
 #define MGMT_READ_INFO_SIZE		0
@@ -351,6 +352,23 @@
 } __packed;
 #define MGMT_SET_DEVICE_ID_SIZE		8
 
+#define MGMT_OP_SET_ADVERTISING		0x0029
+
+#define MGMT_OP_SET_BREDR		0x002A
+
+#define MGMT_OP_SET_STATIC_ADDRESS	0x002B
+struct mgmt_cp_set_static_address {
+	bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_STATIC_ADDRESS_SIZE	6
+
+#define MGMT_OP_SET_SCAN_PARAMS		0x002C
+struct mgmt_cp_set_scan_params {
+	__le16	interval;
+	__le16	window;
+} __packed;
+#define MGMT_SET_SCAN_PARAMS_SIZE	4
+
 #define MGMT_EV_CMD_COMPLETE		0x0001
 struct mgmt_ev_cmd_complete {
 	__le16	opcode;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 7afd419..486213a 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -256,8 +256,8 @@
 		rfcomm_dlc_free(d);
 }
 
-extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
-extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
 
 static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
 {
@@ -300,6 +300,8 @@
 
 struct rfcomm_pinfo {
 	struct bt_sock bt;
+	bdaddr_t src;
+	bdaddr_t dst;
 	struct rfcomm_dlc   *dlc;
 	u8     channel;
 	u8     sec_level;
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index e252a31..2019d1a 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -55,9 +55,6 @@
 struct sco_conn {
 	struct hci_conn	*hcon;
 
-	bdaddr_t	*dst;
-	bdaddr_t	*src;
-
 	spinlock_t	lock;
 	struct sock	*sk;
 
@@ -72,6 +69,8 @@
 
 struct sco_pinfo {
 	struct bt_sock	bt;
+	bdaddr_t	src;
+	bdaddr_t	dst;
 	__u32		flags;
 	__u16		setting;
 	struct sco_conn	*conn;
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index 4795e81..097f69c 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -195,6 +195,6 @@
 	__IFLA_CAIF_HSI_MAX
 };
 
-extern struct cfhsi_ops *cfhsi_get_ops(void);
+struct cfhsi_ops *cfhsi_get_ops(void);
 
 #endif		/* CAIF_HSI_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cb71091..3eae46c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -437,6 +437,15 @@
 			     u32 prohibited_flags);
 
 /**
+ * cfg80211_chandef_dfs_required - checks if radar detection is required
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
+ */
+int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
+				  const struct cfg80211_chan_def *chandef);
+
+/**
  * ieee80211_chandef_rate_flags - returns rate flags for a channel
  *
  * In some channel types, not all rates may be used - for example CCK
@@ -735,6 +744,10 @@
  * @capability: station capability
  * @ext_capab: extended capabilities of the station
  * @ext_capab_len: number of extended capabilities
+ * @supported_channels: supported channels in IEEE 802.11 format
+ * @supported_channels_len: number of supported channels
+ * @supported_oper_classes: supported oper classes in IEEE 802.11 format
+ * @supported_oper_classes_len: number of supported operating classes
  */
 struct station_parameters {
 	const u8 *supported_rates;
@@ -754,6 +767,10 @@
 	u16 capability;
 	const u8 *ext_capab;
 	u8 ext_capab_len;
+	const u8 *supported_channels;
+	u8 supported_channels_len;
+	const u8 *supported_oper_classes;
+	u8 supported_oper_classes_len;
 };
 
 /**
@@ -1647,6 +1664,9 @@
  *	sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
  *	required to assume that the port is unauthorized until authorized by
  *	user space. Otherwise, port is marked authorized by default.
+ * @userspace_handles_dfs: whether user space controls DFS operation, i.e.
+ *	changes the channel when a radar is detected. This is required
+ *	to operate on DFS channels.
  * @basic_rates: bitmap of basic rates to use when creating the IBSS
  * @mcast_rate: per-band multicast rate index + 1 (0: disabled)
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
@@ -1664,6 +1684,7 @@
 	bool channel_fixed;
 	bool privacy;
 	bool control_port;
+	bool userspace_handles_dfs;
 	int mcast_rate[IEEE80211_NUM_BANDS];
 	struct ieee80211_ht_cap ht_capa;
 	struct ieee80211_ht_cap ht_capa_mask;
@@ -3044,6 +3065,7 @@
  * @conn: (private) cfg80211 software SME connection state machine data
  * @connect_keys: (private) keys to set after connection is established
  * @ibss_fixed: (private) IBSS is using fixed BSSID
+ * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
  * @event_list: (private) list for internal event processing
  * @event_lock: (private) lock for event list
  */
@@ -3082,6 +3104,7 @@
 	struct ieee80211_channel *channel;
 
 	bool ibss_fixed;
+	bool ibss_dfs_possible;
 
 	bool ps;
 	int ps_timeout;
@@ -3474,6 +3497,15 @@
 const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
 					       u32 center_freq);
 
+/**
+ * reg_initiator_name - map regulatory request initiator enum to name
+ * @initiator: the regulatory request initiator
+ *
+ * You can use this to map the regulatory request initiator enum to a
+ * proper string representation.
+ */
+const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
+
 /*
  * callbacks for asynchronous cfg80211 methods, notification
  * functions and BSS handling helpers
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 8f59ca5..37a0e24 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -79,6 +79,12 @@
 }
 
 static inline __wsum
+csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
+{
+	return csum_block_add(csum, csum2, offset);
+}
+
+static inline __wsum
 csum_block_sub(__wsum csum, __wsum csum2, int offset)
 {
 	u32 sum = (__force u32)csum2;
@@ -92,6 +98,11 @@
 	return (__force __wsum)n;
 }
 
+static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+{
+	return csum_partial(buff, len, sum);
+}
+
 #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
 
 static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
diff --git a/include/net/codel.h b/include/net/codel.h
index 389cf62..3b04ff5 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -72,10 +72,21 @@
 	return ns >> CODEL_SHIFT;
 }
 
-#define codel_time_after(a, b)		((s32)(a) - (s32)(b) > 0)
-#define codel_time_after_eq(a, b)	((s32)(a) - (s32)(b) >= 0)
-#define codel_time_before(a, b)		((s32)(a) - (s32)(b) < 0)
-#define codel_time_before_eq(a, b)	((s32)(a) - (s32)(b) <= 0)
+/* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia:
+ *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
+ * codel_time_after(a,b) returns true if the time a is after time b.
+ */
+#define codel_time_after(a, b)						\
+	(typecheck(codel_time_t, a) &&					\
+	 typecheck(codel_time_t, b) &&					\
+	 ((s32)((a) - (b)) > 0))
+#define codel_time_before(a, b) 	codel_time_after(b, a)
+
+#define codel_time_after_eq(a, b)					\
+	(typecheck(codel_time_t, a) &&					\
+	 typecheck(codel_time_t, b) &&					\
+	 ((s32)((a) - (b)) >= 0))
+#define codel_time_before_eq(a, b)	codel_time_after_eq(b, a)
 
 /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
 struct codel_skb_cb {
diff --git a/include/net/compat.h b/include/net/compat.h
index 6e95653..3b603b1 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -29,8 +29,8 @@
 	compat_int_t	cmsg_type;
 };
 
-extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
+int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
+int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 
 #else /* defined(CONFIG_COMPAT) */
 /*
@@ -40,24 +40,30 @@
 #define compat_mmsghdr	mmsghdr
 #endif /* defined(CONFIG_COMPAT) */
 
-extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
-extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int);
-extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int);
-extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
-					   unsigned int, unsigned int);
-extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int);
-extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
-					   unsigned int, unsigned int,
-					   struct compat_timespec __user *);
-extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
-extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
+int verify_compat_iovec(struct msghdr *, struct iovec *,
+			struct sockaddr_storage *, int);
+asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
+				   unsigned int);
+asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
+				    unsigned int, unsigned int);
+asmlinkage long compat_sys_recvmsg(int, struct compat_msghdr __user *,
+				   unsigned int);
+asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
+				    unsigned int, unsigned int,
+				    struct compat_timespec __user *);
+asmlinkage long compat_sys_getsockopt(int, int, int, char __user *,
+				      int __user *);
+int put_cmsg_compat(struct msghdr*, int, int, int, void *);
 
-extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
+int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
+				     unsigned char *, int);
 
-extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
-	int (*)(struct sock *, int, int, char __user *, unsigned int));
-extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
-	int __user *, int (*)(struct sock *, int, int, char __user *,
-				int __user *));
+int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
+			 int (*)(struct sock *, int, int, char __user *,
+				 unsigned int));
+int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *,
+			 int (*)(struct sock *, int, int, char __user *,
+				 int __user *));
 
 #endif /* NET_COMPAT_H */
diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h
index 443626ed..d2f3041 100644
--- a/include/net/dcbevent.h
+++ b/include/net/dcbevent.h
@@ -25,9 +25,9 @@
 };
 
 #ifdef CONFIG_DCB
-extern int register_dcbevent_notifier(struct notifier_block *nb);
-extern int unregister_dcbevent_notifier(struct notifier_block *nb);
-extern int call_dcbevent_notifiers(unsigned long val, void *v);
+int register_dcbevent_notifier(struct notifier_block *nb);
+int unregister_dcbevent_notifier(struct notifier_block *nb);
+int call_dcbevent_notifiers(unsigned long val, void *v);
 #else
 static inline int
 register_dcbevent_notifier(struct notifier_block *nb)
diff --git a/include/net/dn.h b/include/net/dn.h
index c88bf4e..ccc1558 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -199,24 +199,26 @@
 	fld->fld_dport = scp->addrrem;
 }
 
-extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
+unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
 
 #define DN_MENUVER_ACC 0x01
 #define DN_MENUVER_USR 0x02
 #define DN_MENUVER_PRX 0x04
 #define DN_MENUVER_UIC 0x08
 
-extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-extern struct sock *dn_find_by_skb(struct sk_buff *skb);
+struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
+struct sock *dn_find_by_skb(struct sk_buff *skb);
 #define DN_ASCBUF_LEN 9
-extern char *dn_addr2asc(__u16, char *);
-extern int dn_destroy_timer(struct sock *sk);
+char *dn_addr2asc(__u16, char *);
+int dn_destroy_timer(struct sock *sk);
 
-extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type);
-extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type);
+int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
+			 unsigned char type);
+int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
+			 unsigned char *type);
 
-extern void dn_start_slow_timer(struct sock *sk);
-extern void dn_stop_slow_timer(struct sock *sk);
+void dn_start_slow_timer(struct sock *sk);
+void dn_stop_slow_timer(struct sock *sk);
 
 extern __le16 decnet_address;
 extern int decnet_debug_level;
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index b9e32db..20b5ab0 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -148,27 +148,27 @@
 } __packed;
 
 
-extern void dn_dev_init(void);
-extern void dn_dev_cleanup(void);
+void dn_dev_init(void);
+void dn_dev_cleanup(void);
 
-extern int dn_dev_ioctl(unsigned int cmd, void __user *arg);
+int dn_dev_ioctl(unsigned int cmd, void __user *arg);
 
-extern void dn_dev_devices_off(void);
-extern void dn_dev_devices_on(void);
+void dn_dev_devices_off(void);
+void dn_dev_devices_on(void);
 
-extern void dn_dev_init_pkt(struct sk_buff *skb);
-extern void dn_dev_veri_pkt(struct sk_buff *skb);
-extern void dn_dev_hello(struct sk_buff *skb);
+void dn_dev_init_pkt(struct sk_buff *skb);
+void dn_dev_veri_pkt(struct sk_buff *skb);
+void dn_dev_hello(struct sk_buff *skb);
 
-extern void dn_dev_up(struct net_device *);
-extern void dn_dev_down(struct net_device *);
+void dn_dev_up(struct net_device *);
+void dn_dev_down(struct net_device *);
 
-extern int dn_dev_set_default(struct net_device *dev, int force);
-extern struct net_device *dn_dev_get_default(void);
-extern int dn_dev_bind_default(__le16 *addr);
+int dn_dev_set_default(struct net_device *dev, int force);
+struct net_device *dn_dev_get_default(void);
+int dn_dev_bind_default(__le16 *addr);
 
-extern int register_dnaddr_notifier(struct notifier_block *nb);
-extern int unregister_dnaddr_notifier(struct notifier_block *nb);
+int register_dnaddr_notifier(struct notifier_block *nb);
+int unregister_dnaddr_notifier(struct notifier_block *nb);
 
 static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
 {
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 74004af..f2ca135 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -95,41 +95,38 @@
 /*
  * dn_fib.c
  */
-extern void dn_fib_init(void);
-extern void dn_fib_cleanup(void);
+void dn_fib_init(void);
+void dn_fib_cleanup(void);
 
-extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd, 
-			unsigned long arg);
-extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, 
-				struct nlattr *attrs[],
-				const struct nlmsghdr *nlh, int *errp);
-extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi, 
-			const struct flowidn *fld,
-			struct dn_fib_res *res);
-extern void dn_fib_release_info(struct dn_fib_info *fi);
-extern void dn_fib_flush(void);
-extern void dn_fib_select_multipath(const struct flowidn *fld,
-					struct dn_fib_res *res);
+int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
+				       struct nlattr *attrs[],
+				       const struct nlmsghdr *nlh, int *errp);
+int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
+			  const struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_release_info(struct dn_fib_info *fi);
+void dn_fib_flush(void);
+void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
 
 /*
  * dn_tables.c
  */
-extern struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-extern struct dn_fib_table *dn_fib_empty_table(void);
-extern void dn_fib_table_init(void);
-extern void dn_fib_table_cleanup(void);
+struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
+struct dn_fib_table *dn_fib_empty_table(void);
+void dn_fib_table_init(void);
+void dn_fib_table_cleanup(void);
 
 /*
  * dn_rules.c
  */
-extern void dn_fib_rules_init(void);
-extern void dn_fib_rules_cleanup(void);
-extern unsigned int dnet_addr_type(__le16 addr);
-extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_rules_init(void);
+void dn_fib_rules_cleanup(void);
+unsigned int dnet_addr_type(__le16 addr);
+int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
 
-extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
-extern void dn_fib_free_info(struct dn_fib_info *fi);
+void dn_fib_free_info(struct dn_fib_info *fi);
 
 static inline void dn_fib_info_put(struct dn_fib_info *fi)
 {
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index 4cb4ae7..fac4e3f 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -16,12 +16,12 @@
 	__u8 priority;
 };
 
-extern void dn_neigh_init(void);
-extern void dn_neigh_cleanup(void);
-extern int dn_neigh_router_hello(struct sk_buff *skb);
-extern int dn_neigh_endnode_hello(struct sk_buff *skb);
-extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-extern int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+void dn_neigh_init(void);
+void dn_neigh_cleanup(void);
+int dn_neigh_router_hello(struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sk_buff *skb);
+void dn_neigh_pointopoint_hello(struct sk_buff *skb);
+int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
 
 extern struct neigh_table dn_neigh_table;
 
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index e43a289..3a3e33d 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -15,29 +15,32 @@
 *******************************************************************************/
 /* dn_nsp.c functions prototyping */
 
-extern void dn_nsp_send_data_ack(struct sock *sk);
-extern void dn_nsp_send_oth_ack(struct sock *sk);
-extern void dn_nsp_delayed_ack(struct sock *sk);
-extern void dn_send_conn_ack(struct sock *sk);
-extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 
-			unsigned short reason, gfp_t gfp);
-extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
-				unsigned short reason);
-extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
+void dn_nsp_send_data_ack(struct sock *sk);
+void dn_nsp_send_oth_ack(struct sock *sk);
+void dn_nsp_delayed_ack(struct sock *sk);
+void dn_send_conn_ack(struct sock *sk);
+void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
+void dn_nsp_send_disc(struct sock *sk, unsigned char type,
+		      unsigned short reason, gfp_t gfp);
+void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
+			unsigned short reason);
+void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
+void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
 
-extern void dn_nsp_output(struct sock *sk);
-extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
-extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob);
-extern unsigned long dn_nsp_persist(struct sock *sk);
-extern int dn_nsp_xmit_timeout(struct sock *sk);
+void dn_nsp_output(struct sock *sk);
+int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
+			    struct sk_buff_head *q, unsigned short acknum);
+void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
+		       int oob);
+unsigned long dn_nsp_persist(struct sock *sk);
+int dn_nsp_xmit_timeout(struct sock *sk);
 
-extern int dn_nsp_rx(struct sk_buff *);
-extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+int dn_nsp_rx(struct sk_buff *);
+int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
+				  long timeo, int *err);
 
 #define NSP_REASON_OK 0		/* No error */
 #define NSP_REASON_NR 1		/* No resources */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 2e9d317..b409ad6 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -15,10 +15,11 @@
     GNU General Public License for more details.
 *******************************************************************************/
 
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *, struct sock *sk, int flags);
-extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern void dn_rt_cache_flush(int delay);
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
+			 struct sock *sk, int flags);
+int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
+void dn_rt_cache_flush(int delay);
 
 /* Masks for flags field */
 #define DN_RT_F_PID 0x07 /* Mask for packet type                      */
@@ -92,8 +93,8 @@
 	return rt->fld.flowidn_iif == 0;
 }
 
-extern void dn_route_init(void);
-extern void dn_route_cleanup(void);
+void dn_route_init(void);
+void dn_route_cleanup(void);
 
 #include <net/sock.h>
 #include <linux/if_arp.h>
diff --git a/include/net/dst.h b/include/net/dst.h
index 3c4c944..44995c1 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -106,7 +106,7 @@
 	};
 };
 
-extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
 extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY	0x1UL
@@ -119,7 +119,7 @@
 	return dst->_metrics & DST_METRICS_READ_ONLY;
 }
 
-extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
 
 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
 {
@@ -262,7 +262,7 @@
 	return dst;
 }
 
-extern void dst_release(struct dst_entry *dst);
+void dst_release(struct dst_entry *dst);
 
 static inline void refdst_drop(unsigned long refdst)
 {
@@ -362,12 +362,11 @@
 	return child;
 }
 
-extern int dst_discard(struct sk_buff *skb);
-extern void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
-		       int initial_ref, int initial_obsolete,
-		       unsigned short flags);
-extern void __dst_free(struct dst_entry *dst);
-extern struct dst_entry *dst_destroy(struct dst_entry *dst);
+int dst_discard(struct sk_buff *skb);
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+		int initial_obsolete, unsigned short flags);
+void __dst_free(struct dst_entry *dst);
+struct dst_entry *dst_destroy(struct dst_entry *dst);
 
 static inline void dst_free(struct dst_entry *dst)
 {
@@ -463,7 +462,7 @@
 	return dst;
 }
 
-extern void		dst_init(void);
+void dst_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
@@ -486,9 +485,9 @@
 }
 
 #else
-extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
-				     const struct flowi *fl, struct sock *sk,
-				     int flags);
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+			      const struct flowi *fl, struct sock *sk,
+			      int flags);
 
 /* skb attached with this dst needs transformation if dst->xfrm is valid */
 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
diff --git a/include/net/esp.h b/include/net/esp.h
index d584513..a43be85 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -3,18 +3,6 @@
 
 #include <linux/skbuff.h>
 
-struct crypto_aead;
-
-struct esp_data {
-	/* 0..255 */
-	int padlen;
-
-	/* Confidentiality & Integrity */
-	struct crypto_aead *aead;
-};
-
-extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
-
 struct ip_esp_hdr;
 
 static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 4b2b557..e584de1 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -115,14 +115,13 @@
 	return frh->table;
 }
 
-extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *);
-extern void fib_rules_unregister(struct fib_rules_ops *);
+struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
+					 struct net *);
+void fib_rules_unregister(struct fib_rules_ops *);
 
-extern int			fib_rules_lookup(struct fib_rules_ops *,
-						 struct flowi *, int flags,
-						 struct fib_lookup_arg *);
-extern int			fib_default_rule_add(struct fib_rules_ops *,
-						     u32 pref, u32 table,
-						     u32 flags);
-extern u32			fib_default_rule_pref(struct fib_rules_ops *ops);
+int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
+		     struct fib_lookup_arg *);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
+			 u32 flags);
+u32 fib_default_rule_pref(struct fib_rules_ops *ops);
 #endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 628e11b..65ce471 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -215,12 +215,13 @@
 		struct net *net, const struct flowi *key, u16 family,
 		u8 dir, struct flow_cache_object *oldobj, void *ctx);
 
-extern struct flow_cache_object *flow_cache_lookup(
-		struct net *net, const struct flowi *key, u16 family,
-		u8 dir, flow_resolve_t resolver, void *ctx);
+struct flow_cache_object *flow_cache_lookup(struct net *net,
+					    const struct flowi *key, u16 family,
+					    u8 dir, flow_resolve_t resolver,
+					    void *ctx);
 
-extern void flow_cache_flush(void);
-extern void flow_cache_flush_deferred(void);
+void flow_cache_flush(void);
+void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index bb8271d..7e64bd8 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -13,5 +13,6 @@
 	u8 ip_proto;
 };
 
-extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
 #endif
diff --git a/include/net/garp.h b/include/net/garp.h
index 834d8ad..abf33bb 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -112,19 +112,18 @@
 	struct rcu_head			rcu;
 };
 
-extern int	garp_register_application(struct garp_application *app);
-extern void	garp_unregister_application(struct garp_application *app);
+int garp_register_application(struct garp_application *app);
+void garp_unregister_application(struct garp_application *app);
 
-extern int	garp_init_applicant(struct net_device *dev,
-				    struct garp_application *app);
-extern void	garp_uninit_applicant(struct net_device *dev,
-				      struct garp_application *app);
+int garp_init_applicant(struct net_device *dev, struct garp_application *app);
+void garp_uninit_applicant(struct net_device *dev,
+			   struct garp_application *app);
 
-extern int	garp_request_join(const struct net_device *dev,
-				  const struct garp_application *app,
-				  const void *data, u8 len, u8 type);
-extern void	garp_request_leave(const struct net_device *dev,
-				   const struct garp_application *app,
-				   const void *data, u8 len, u8 type);
+int garp_request_join(const struct net_device *dev,
+		      const struct garp_application *app, const void *data,
+		      u8 len, u8 type);
+void garp_request_leave(const struct net_device *dev,
+			const struct garp_application *app,
+			const void *data, u8 len, u8 type);
 
 #endif /* _NET_GARP_H */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index cf8439b..ea4271d 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -19,32 +19,31 @@
 	struct tc_stats   tc_stats;
 };
 
-extern int gnet_stats_start_copy(struct sk_buff *skb, int type,
+int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
+			  struct gnet_dump *d);
+
+int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+				 int tc_stats_type, int xstats_type,
 				 spinlock_t *lock, struct gnet_dump *d);
 
-extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
-					int tc_stats_type,int xstats_type,
-					spinlock_t *lock, struct gnet_dump *d);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+			  struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_rate_est(struct gnet_dump *d,
+			     const struct gnet_stats_basic_packed *b,
+			     struct gnet_stats_rate_est64 *r);
+int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
+int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
 
-extern int gnet_stats_copy_basic(struct gnet_dump *d,
-				 struct gnet_stats_basic_packed *b);
-extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
-				    const struct gnet_stats_basic_packed *b,
-				    struct gnet_stats_rate_est64 *r);
-extern int gnet_stats_copy_queue(struct gnet_dump *d,
-				 struct gnet_stats_queue *q);
-extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
+int gnet_stats_finish_copy(struct gnet_dump *d);
 
-extern int gnet_stats_finish_copy(struct gnet_dump *d);
-
-extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
-			     struct gnet_stats_rate_est64 *rate_est,
-			     spinlock_t *stats_lock, struct nlattr *opt);
-extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
-			       struct gnet_stats_rate_est64 *rate_est);
-extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
-				 struct gnet_stats_rate_est64 *rate_est,
-				 spinlock_t *stats_lock, struct nlattr *opt);
-extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
-				 const struct gnet_stats_rate_est64 *rate_est);
+int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+		      struct gnet_stats_rate_est64 *rate_est,
+		      spinlock_t *stats_lock, struct nlattr *opt);
+void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
+			struct gnet_stats_rate_est64 *rate_est);
+int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+			  struct gnet_stats_rate_est64 *rate_est,
+			  spinlock_t *stats_lock, struct nlattr *opt);
+bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
+			  const struct gnet_stats_rate_est64 *rate_est);
 #endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8e0b6c8..9b787b6 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -122,7 +122,7 @@
 	struct list_head	ops_list;
 };
 
-extern int __genl_register_family(struct genl_family *family);
+int __genl_register_family(struct genl_family *family);
 
 static inline int genl_register_family(struct genl_family *family)
 {
@@ -130,8 +130,8 @@
 	return __genl_register_family(family);
 }
 
-extern int __genl_register_family_with_ops(struct genl_family *family,
-	struct genl_ops *ops, size_t n_ops);
+int __genl_register_family_with_ops(struct genl_family *family,
+				    struct genl_ops *ops, size_t n_ops);
 
 static inline int genl_register_family_with_ops(struct genl_family *family,
 	struct genl_ops *ops, size_t n_ops)
@@ -140,18 +140,18 @@
 	return __genl_register_family_with_ops(family, ops, n_ops);
 }
 
-extern int genl_unregister_family(struct genl_family *family);
-extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_register_mc_group(struct genl_family *family,
-				  struct genl_multicast_group *grp);
-extern void genl_unregister_mc_group(struct genl_family *family,
-				     struct genl_multicast_group *grp);
-extern void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
-			u32 group, struct nlmsghdr *nlh, gfp_t flags);
+int genl_unregister_family(struct genl_family *family);
+int genl_register_ops(struct genl_family *, struct genl_ops *ops);
+int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
+int genl_register_mc_group(struct genl_family *family,
+			   struct genl_multicast_group *grp);
+void genl_unregister_mc_group(struct genl_family *family,
+			      struct genl_multicast_group *grp);
+void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
+		 u32 group, struct nlmsghdr *nlh, gfp_t flags);
 
 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
-				struct genl_family *family, int flags, u8 cmd);
+		  struct genl_family *family, int flags, u8 cmd);
 
 /**
  * genlmsg_nlhdr - Obtain netlink header from user specified header
diff --git a/include/net/gre.h b/include/net/gre.h
index 57e4afd..dcd9ae3 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -38,7 +38,13 @@
 
 void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 		      int hdr_len);
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
+
+static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+						  bool gre_csum)
+{
+	return iptunnel_handle_offloads(skb, gre_csum, SKB_GSO_GRE);
+}
+
 
 static inline int ip_gre_calc_hlen(__be16 o_flags)
 {
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 081439f..970028e 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -39,10 +39,10 @@
 struct sk_buff;
 struct net;
 
-extern void	icmp_send(struct sk_buff *skb_in,  int type, int code, __be32 info);
-extern int	icmp_rcv(struct sk_buff *skb);
-extern void	icmp_err(struct sk_buff *, u32 info);
-extern int	icmp_init(void);
-extern void	icmp_out_count(struct net *net, unsigned char type);
+void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+int icmp_rcv(struct sk_buff *skb);
+void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_init(void);
+void icmp_out_count(struct net *net, unsigned char type);
 
 #endif	/* _ICMP_H */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 02ef772..76d5427 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -66,11 +66,10 @@
 	struct hlist_node	addr_lst;
 	struct list_head	if_list;
 
-#ifdef CONFIG_IPV6_PRIVACY
 	struct list_head	tmp_list;
 	struct inet6_ifaddr	*ifpub;
 	int			regen_count;
-#endif
+
 	bool			tokenized;
 
 	struct rcu_head		rcu;
@@ -192,11 +191,9 @@
 	__u32			if_flags;
 	int			dead;
 
-#ifdef CONFIG_IPV6_PRIVACY
 	u8			rndid[8];
 	struct timer_list	regen_timer;
 	struct list_head	tempaddr_list;
-#endif
 
 	struct in6_addr		token;
 
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 04642c9..f981ba7 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -22,27 +22,25 @@
 struct sock;
 struct sockaddr;
 
-extern int inet6_csk_bind_conflict(const struct sock *sk,
-				   const struct inet_bind_bucket *tb, bool relax);
+int inet6_csk_bind_conflict(const struct sock *sk,
+			    const struct inet_bind_bucket *tb, bool relax);
 
-extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
-					     struct flowi6 *fl6,
-					     const struct request_sock *req);
+struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
+				      const struct request_sock *req);
 
-extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
-						 struct request_sock ***prevp,
-						 const __be16 rport,
-						 const struct in6_addr *raddr,
-						 const struct in6_addr *laddr,
-						 const int iif);
+struct request_sock *inet6_csk_search_req(const struct sock *sk,
+					  struct request_sock ***prevp,
+					  const __be16 rport,
+					  const struct in6_addr *raddr,
+					  const struct in6_addr *laddr,
+					  const int iif);
 
-extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
-					   struct request_sock *req,
-					   const unsigned long timeout);
+void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+				    const unsigned long timeout);
 
-extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
-extern int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
+int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
 
-extern struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
+struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
 #endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index fd4ee01..ae06135 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -28,32 +28,17 @@
 
 struct inet_hashinfo;
 
-static inline unsigned int inet6_ehashfn(struct net *net,
-				const struct in6_addr *laddr, const u16 lport,
-				const struct in6_addr *faddr, const __be16 fport)
+static inline unsigned int __inet6_ehashfn(const u32 lhash,
+				    const u16 lport,
+				    const u32 fhash,
+				    const __be16 fport,
+				    const u32 initval)
 {
-	u32 ports = (((u32)lport) << 16) | (__force u32)fport;
-
-	return jhash_3words((__force u32)laddr->s6_addr32[3],
-			    ipv6_addr_jhash(faddr),
-			    ports,
-			    inet_ehash_secret + net_hash_mix(net));
+	const u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+	return jhash_3words(lhash, fhash, ports, initval);
 }
 
-static inline int inet6_sk_ehashfn(const struct sock *sk)
-{
-	const struct inet_sock *inet = inet_sk(sk);
-	const struct ipv6_pinfo *np = inet6_sk(sk);
-	const struct in6_addr *laddr = &np->rcv_saddr;
-	const struct in6_addr *faddr = &np->daddr;
-	const __u16 lport = inet->inet_num;
-	const __be16 fport = inet->inet_dport;
-	struct net *net = sock_net(sk);
-
-	return inet6_ehashfn(net, laddr, lport, faddr, fport);
-}
-
-extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
+int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
 
 /*
  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
@@ -61,21 +46,19 @@
  *
  * The sockhash lock must be held as a reader here.
  */
-extern struct sock *__inet6_lookup_established(struct net *net,
-					   struct inet_hashinfo *hashinfo,
-					   const struct in6_addr *saddr,
-					   const __be16 sport,
-					   const struct in6_addr *daddr,
-					   const u16 hnum,
-					   const int dif);
+struct sock *__inet6_lookup_established(struct net *net,
+					struct inet_hashinfo *hashinfo,
+					const struct in6_addr *saddr,
+					const __be16 sport,
+					const struct in6_addr *daddr,
+					const u16 hnum, const int dif);
 
-extern struct sock *inet6_lookup_listener(struct net *net,
-					  struct inet_hashinfo *hashinfo,
-					  const struct in6_addr *saddr,
-					  const __be16 sport,
-					  const struct in6_addr *daddr,
-					  const unsigned short hnum,
-					  const int dif);
+struct sock *inet6_lookup_listener(struct net *net,
+				   struct inet_hashinfo *hashinfo,
+				   const struct in6_addr *saddr,
+				   const __be16 sport,
+				   const struct in6_addr *daddr,
+				   const unsigned short hnum, const int dif);
 
 static inline struct sock *__inet6_lookup(struct net *net,
 					  struct inet_hashinfo *hashinfo,
@@ -110,9 +93,9 @@
 			      inet6_iif(skb));
 }
 
-extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
-				 const struct in6_addr *saddr, const __be16 sport,
-				 const struct in6_addr *daddr, const __be16 dport,
-				 const int dif);
+struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+			  const struct in6_addr *saddr, const __be16 sport,
+			  const struct in6_addr *daddr, const __be16 dport,
+			  const int dif);
 #endif /* IS_ENABLED(CONFIG_IPV6) */
 #endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 2340087..fe7994c 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -13,30 +13,30 @@
 struct sockaddr;
 struct socket;
 
-extern int inet_release(struct socket *sock);
-extern int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-			       int addr_len, int flags);
-extern int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-				 int addr_len, int flags);
-extern int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
-			      int addr_len, int flags);
-extern int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-extern int inet_sendmsg(struct kiocb *iocb, struct socket *sock,
-			struct msghdr *msg, size_t size);
-extern ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
-			     size_t size, int flags);
-extern int inet_recvmsg(struct kiocb *iocb, struct socket *sock,
-			struct msghdr *msg, size_t size, int flags);
-extern int inet_shutdown(struct socket *sock, int how);
-extern int inet_listen(struct socket *sock, int backlog);
-extern void inet_sock_destruct(struct sock *sk);
-extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-extern int inet_getname(struct socket *sock, struct sockaddr *uaddr,
-			int *uaddr_len, int peer);
-extern int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-extern int inet_ctl_sock_create(struct sock **sk, unsigned short family,
-				unsigned short type, unsigned char protocol,
-				struct net *net);
+int inet_release(struct socket *sock);
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+			int addr_len, int flags);
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+			  int addr_len, int flags);
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+		       int addr_len, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags);
+int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+		 size_t size);
+ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+		      size_t size, int flags);
+int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+		 size_t size, int flags);
+int inet_shutdown(struct socket *sock, int how);
+int inet_listen(struct socket *sock, int backlog);
+void inet_sock_destruct(struct sock *sk);
+int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+		 int peer);
+int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+			 unsigned short type, unsigned char protocol,
+			 struct net *net);
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index de2c785..c55aeed 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -146,9 +146,9 @@
 	return (void *)inet_csk(sk)->icsk_ca_priv;
 }
 
-extern struct sock *inet_csk_clone_lock(const struct sock *sk,
-					const struct request_sock *req,
-					const gfp_t priority);
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+				 const struct request_sock *req,
+				 const gfp_t priority);
 
 enum inet_csk_ack_state_t {
 	ICSK_ACK_SCHED	= 1,
@@ -157,11 +157,11 @@
 	ICSK_ACK_PUSHED2 = 8
 };
 
-extern void inet_csk_init_xmit_timers(struct sock *sk,
-				      void (*retransmit_handler)(unsigned long),
-				      void (*delack_handler)(unsigned long),
-				      void (*keepalive_handler)(unsigned long));
-extern void inet_csk_clear_xmit_timers(struct sock *sk);
+void inet_csk_init_xmit_timers(struct sock *sk,
+			       void (*retransmit_handler)(unsigned long),
+			       void (*delack_handler)(unsigned long),
+			       void (*keepalive_handler)(unsigned long));
+void inet_csk_clear_xmit_timers(struct sock *sk);
 
 static inline void inet_csk_schedule_ack(struct sock *sk)
 {
@@ -178,8 +178,8 @@
 	memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
 }
 
-extern void inet_csk_delete_keepalive_timer(struct sock *sk);
-extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
+void inet_csk_delete_keepalive_timer(struct sock *sk);
+void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
 
 #ifdef INET_CSK_DEBUG
 extern const char inet_csk_timer_bug_msg[];
@@ -241,23 +241,21 @@
 #endif
 }
 
-extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
 
-extern struct request_sock *inet_csk_search_req(const struct sock *sk,
-						struct request_sock ***prevp,
-						const __be16 rport,
-						const __be32 raddr,
-						const __be32 laddr);
-extern int inet_csk_bind_conflict(const struct sock *sk,
-				  const struct inet_bind_bucket *tb, bool relax);
-extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
+struct request_sock *inet_csk_search_req(const struct sock *sk,
+					 struct request_sock ***prevp,
+					 const __be16 rport,
+					 const __be32 raddr,
+					 const __be32 laddr);
+int inet_csk_bind_conflict(const struct sock *sk,
+			   const struct inet_bind_bucket *tb, bool relax);
+int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
-extern struct dst_entry* inet_csk_route_req(struct sock *sk,
-					    struct flowi4 *fl4,
+struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4,
+				     const struct request_sock *req);
+struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
 					    const struct request_sock *req);
-extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk,
-						   struct sock *newsk,
-						   const struct request_sock *req);
 
 static inline void inet_csk_reqsk_queue_add(struct sock *sk,
 					    struct request_sock *req,
@@ -266,9 +264,8 @@
 	reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
 }
 
-extern void inet_csk_reqsk_queue_hash_add(struct sock *sk,
-					  struct request_sock *req,
-					  unsigned long timeout);
+void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+				   unsigned long timeout);
 
 static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
 						struct request_sock *req)
@@ -315,13 +312,13 @@
 	reqsk_free(req);
 }
 
-extern void inet_csk_reqsk_queue_prune(struct sock *parent,
-				       const unsigned long interval,
-				       const unsigned long timeout,
-				       const unsigned long max_rto);
+void inet_csk_reqsk_queue_prune(struct sock *parent,
+				const unsigned long interval,
+				const unsigned long timeout,
+				const unsigned long max_rto);
 
-extern void inet_csk_destroy_sock(struct sock *sk);
-extern void inet_csk_prepare_forced_close(struct sock *sk);
+void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_prepare_forced_close(struct sock *sk);
 
 /*
  * LISTEN is a special case for poll..
@@ -332,15 +329,15 @@
 			(POLLIN | POLLRDNORM) : 0;
 }
 
-extern int  inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
-extern void inet_csk_listen_stop(struct sock *sk);
+int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
+void inet_csk_listen_stop(struct sock *sk);
 
-extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
-extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
-				      char __user *optval, int __user *optlen);
-extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
-				      char __user *optval, unsigned int optlen);
+int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
+			       char __user *optval, int __user *optlen);
+int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
+			       char __user *optval, unsigned int optlen);
 
-extern struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 #endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index bfcbc00..6f59de9 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -64,6 +64,10 @@
 	rwlock_t		lock ____cacheline_aligned_in_smp;
 	int			secret_interval;
 	struct timer_list	secret_timer;
+
+	/* The first call to hashfn is responsible to initialize
+	 * rnd. This is best done with net_get_random_once.
+	 */
 	u32			rnd;
 	int			qsize;
 
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index ef83d9e..1bdb477 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -37,12 +37,11 @@
 #include <asm/byteorder.h>
 
 /* This is for all connections with a full identity, no wildcards.
- * One chain is dedicated to TIME_WAIT sockets.
- * I'll experiment with dynamic table growth later.
+ * The 'e' prefix stands for Establish, but we really put all sockets
+ * but LISTEN ones.
  */
 struct inet_ehash_bucket {
 	struct hlist_nulls_head chain;
-	struct hlist_nulls_head twchain;
 };
 
 /* There are a few simple rules, which allow for local port reuse by
@@ -123,7 +122,6 @@
 	 *
 	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
 	 *
-	 * TIME_WAIT sockets use a separate chain (twchain).
 	 */
 	struct inet_ehash_bucket	*ehash;
 	spinlock_t			*ehash_locks;
@@ -218,22 +216,21 @@
 	}
 }
 
-extern struct inet_bind_bucket *
-		    inet_bind_bucket_create(struct kmem_cache *cachep,
-					    struct net *net,
-					    struct inet_bind_hashbucket *head,
-					    const unsigned short snum);
-extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
-				     struct inet_bind_bucket *tb);
+struct inet_bind_bucket *
+inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
+			struct inet_bind_hashbucket *head,
+			const unsigned short snum);
+void inet_bind_bucket_destroy(struct kmem_cache *cachep,
+			      struct inet_bind_bucket *tb);
 
-static inline int inet_bhashfn(struct net *net,
-		const __u16 lport, const int bhash_size)
+static inline int inet_bhashfn(struct net *net, const __u16 lport,
+			       const int bhash_size)
 {
 	return (lport + net_hash_mix(net)) & (bhash_size - 1);
 }
 
-extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
-			   const unsigned short snum);
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+		    const unsigned short snum);
 
 /* These can have wildcards, don't try too hard. */
 static inline int inet_lhashfn(struct net *net, const unsigned short num)
@@ -247,23 +244,22 @@
 }
 
 /* Caller must disable local BH processing. */
-extern int __inet_inherit_port(struct sock *sk, struct sock *child);
+int __inet_inherit_port(struct sock *sk, struct sock *child);
 
-extern void inet_put_port(struct sock *sk);
+void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-extern int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-extern void inet_hash(struct sock *sk);
-extern void inet_unhash(struct sock *sk);
+int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+void inet_hash(struct sock *sk);
+void inet_unhash(struct sock *sk);
 
-extern struct sock *__inet_lookup_listener(struct net *net,
-					   struct inet_hashinfo *hashinfo,
-					   const __be32 saddr,
-					   const __be16 sport,
-					   const __be32 daddr,
-					   const unsigned short hnum,
-					   const int dif);
+struct sock *__inet_lookup_listener(struct net *net,
+				    struct inet_hashinfo *hashinfo,
+				    const __be32 saddr, const __be16 sport,
+				    const __be32 daddr,
+				    const unsigned short hnum,
+				    const int dif);
 
 static inline struct sock *inet_lookup_listener(struct net *net,
 		struct inet_hashinfo *hashinfo,
@@ -304,30 +300,17 @@
 				   ((__force __u64)(__be32)(__saddr)));
 #endif /* __BIG_ENDIAN */
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)	\
-	((inet_sk(__sk)->inet_portpair == (__ports))		&&	\
-	 (inet_sk(__sk)->inet_addrpair == (__cookie))		&&	\
+	(((__sk)->sk_portpair == (__ports))			&&	\
+	 ((__sk)->sk_addrpair == (__cookie))			&&	\
 	 (!(__sk)->sk_bound_dev_if	||				\
 	   ((__sk)->sk_bound_dev_if == (__dif))) 		&& 	\
 	 net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)\
-	((inet_twsk(__sk)->tw_portpair == (__ports))	&&		\
-	 (inet_twsk(__sk)->tw_addrpair == (__cookie))	&&		\
-	 (!(__sk)->sk_bound_dev_if	||				\
-	   ((__sk)->sk_bound_dev_if == (__dif)))	&&		\
-	 net_eq(sock_net(__sk), (__net)))
 #else /* 32-bit arch */
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
-	((inet_sk(__sk)->inet_portpair == (__ports))	&&		\
-	 (inet_sk(__sk)->inet_daddr	== (__saddr))	&&		\
-	 (inet_sk(__sk)->inet_rcv_saddr	== (__daddr))	&&		\
-	 (!(__sk)->sk_bound_dev_if	||				\
-	   ((__sk)->sk_bound_dev_if == (__dif))) 	&&		\
-	 net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
-	((inet_twsk(__sk)->tw_portpair == (__ports))	&&		\
-	 (inet_twsk(__sk)->tw_daddr	== (__saddr))	&&		\
-	 (inet_twsk(__sk)->tw_rcv_saddr	== (__daddr))	&&		\
+	(((__sk)->sk_portpair == (__ports))		&&		\
+	 ((__sk)->sk_daddr	== (__saddr))		&&		\
+	 ((__sk)->sk_rcv_saddr	== (__daddr))		&&		\
 	 (!(__sk)->sk_bound_dev_if	||				\
 	   ((__sk)->sk_bound_dev_if == (__dif))) 	&&		\
 	 net_eq(sock_net(__sk), (__net)))
@@ -339,10 +322,11 @@
  *
  * Local BH must be disabled here.
  */
-extern struct sock * __inet_lookup_established(struct net *net,
-		struct inet_hashinfo *hashinfo,
-		const __be32 saddr, const __be16 sport,
-		const __be32 daddr, const u16 hnum, const int dif);
+struct sock *__inet_lookup_established(struct net *net,
+				       struct inet_hashinfo *hashinfo,
+				       const __be32 saddr, const __be16 sport,
+				       const __be32 daddr, const u16 hnum,
+				       const int dif);
 
 static inline struct sock *
 	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
@@ -399,13 +383,14 @@
 				     iph->daddr, dport, inet_iif(skb));
 }
 
-extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
-		struct sock *sk,
-		u32 port_offset,
-		int (*check_established)(struct inet_timewait_death_row *,
-			struct sock *, __u16, struct inet_timewait_sock **),
-		int (*hash)(struct sock *sk, struct inet_timewait_sock *twp));
+int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+			struct sock *sk, u32 port_offset,
+			int (*check_established)(struct inet_timewait_death_row *,
+						 struct sock *, __u16,
+						 struct inet_timewait_sock **),
+			int (*hash)(struct sock *sk,
+				    struct inet_timewait_sock *twp));
 
-extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
-			     struct sock *sk);
+int inet_hash_connect(struct inet_timewait_death_row *death_row,
+		      struct sock *sk);
 #endif /* _INET_HASHTABLES_H */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b21a7f0..1833c3f 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -70,13 +70,14 @@
 
 struct inet_request_sock {
 	struct request_sock	req;
-#if IS_ENABLED(CONFIG_IPV6)
-	u16			inet6_rsk_offset;
-#endif
-	__be16			loc_port;
-	__be32			loc_addr;
-	__be32			rmt_addr;
-	__be16			rmt_port;
+#define ir_loc_addr		req.__req_common.skc_rcv_saddr
+#define ir_rmt_addr		req.__req_common.skc_daddr
+#define ir_num			req.__req_common.skc_num
+#define ir_rmt_port		req.__req_common.skc_dport
+#define ir_v6_rmt_addr		req.__req_common.skc_v6_daddr
+#define ir_v6_loc_addr		req.__req_common.skc_v6_rcv_saddr
+#define ir_iif			req.__req_common.skc_bound_dev_if
+
 	kmemcheck_bitfield_begin(flags);
 	u16			snd_wscale : 4,
 				rcv_wscale : 4,
@@ -88,6 +89,7 @@
 				no_srccheck: 1;
 	kmemcheck_bitfield_end(flags);
 	struct ip_options_rcu	*opt;
+	struct sk_buff		*pktopts;
 };
 
 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -103,6 +105,9 @@
 	int			length; /* Total length of all frames */
 	struct dst_entry	*dst;
 	u8			tx_flags;
+	__u8			ttl;
+	__s16			tos;
+	char			priority;
 };
 
 struct inet_cork_full {
@@ -143,10 +148,8 @@
 	/* Socket demultiplex comparisons on incoming packets. */
 #define inet_daddr		sk.__sk_common.skc_daddr
 #define inet_rcv_saddr		sk.__sk_common.skc_rcv_saddr
-#define inet_addrpair		sk.__sk_common.skc_addrpair
 #define inet_dport		sk.__sk_common.skc_dport
 #define inet_num		sk.__sk_common.skc_num
-#define inet_portpair		sk.__sk_common.skc_portpair
 
 	__be32			inet_saddr;
 	__s16			uc_ttl;
@@ -199,32 +202,18 @@
 }
 #endif
 
-extern int inet_sk_rebuild_header(struct sock *sk);
+int inet_sk_rebuild_header(struct sock *sk);
 
-extern u32 inet_ehash_secret;
-extern u32 ipv6_hash_secret;
-extern void build_ehash_secret(void);
-
-static inline unsigned int inet_ehashfn(struct net *net,
-					const __be32 laddr, const __u16 lport,
-					const __be32 faddr, const __be16 fport)
+static inline unsigned int __inet_ehashfn(const __be32 laddr,
+					  const __u16 lport,
+					  const __be32 faddr,
+					  const __be16 fport,
+					  u32 initval)
 {
 	return jhash_3words((__force __u32) laddr,
 			    (__force __u32) faddr,
 			    ((__u32) lport) << 16 | (__force __u32)fport,
-			    inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet_sk_ehashfn(const struct sock *sk)
-{
-	const struct inet_sock *inet = inet_sk(sk);
-	const __be32 laddr = inet->inet_rcv_saddr;
-	const __u16 lport = inet->inet_num;
-	const __be32 faddr = inet->inet_daddr;
-	const __be16 fport = inet->inet_dport;
-	struct net *net = sock_net(sk);
-
-	return inet_ehashfn(net, laddr, lport, faddr, fport);
+			    initval);
 }
 
 static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index f908dfc..71c6e26 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -58,6 +58,11 @@
 # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
 #endif
 
+static inline u32 inet_tw_time_stamp(void)
+{
+	return jiffies;
+}
+
 /* TIME_WAIT reaping mechanism. */
 #define INET_TWDR_TWKILL_SLOTS	8 /* Please keep this a power of 2. */
 
@@ -83,9 +88,9 @@
 	int			sysctl_max_tw_buckets;
 };
 
-extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(struct work_struct *work);
-extern void inet_twdr_twcal_tick(unsigned long data);
+void inet_twdr_hangman(unsigned long data);
+void inet_twdr_twkill_work(struct work_struct *work);
+void inet_twdr_twcal_tick(unsigned long data);
 
 struct inet_bind_bucket;
 
@@ -111,11 +116,11 @@
 #define tw_prot			__tw_common.skc_prot
 #define tw_net			__tw_common.skc_net
 #define tw_daddr        	__tw_common.skc_daddr
+#define tw_v6_daddr		__tw_common.skc_v6_daddr
 #define tw_rcv_saddr    	__tw_common.skc_rcv_saddr
-#define tw_addrpair		__tw_common.skc_addrpair
+#define tw_v6_rcv_saddr    	__tw_common.skc_v6_rcv_saddr
 #define tw_dport		__tw_common.skc_dport
 #define tw_num			__tw_common.skc_num
-#define tw_portpair		__tw_common.skc_portpair
 
 	int			tw_timeout;
 	volatile unsigned char	tw_substate;
@@ -130,26 +135,14 @@
 				tw_transparent  : 1,
 				tw_pad		: 6,	/* 6 bits hole */
 				tw_tos		: 8,
-				tw_ipv6_offset  : 16;
+				tw_pad2		: 16;	/* 16 bits hole */
 	kmemcheck_bitfield_end(flags);
-	unsigned long		tw_ttd;
+	u32			tw_ttd;
 	struct inet_bind_bucket	*tw_tb;
 	struct hlist_node	tw_death_node;
 };
 #define tw_tclass tw_tos
 
-static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
-				      struct hlist_nulls_head *list)
-{
-	hlist_nulls_add_head_rcu(&tw->tw_node, list);
-}
-
-static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
-					   struct hlist_head *list)
-{
-	hlist_add_head(&tw->tw_bind_node, list);
-}
-
 static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
 {
 	return !hlist_unhashed(&tw->tw_death_node);
@@ -189,34 +182,28 @@
 	return (struct inet_timewait_sock *)sk;
 }
 
-static inline __be32 sk_rcv_saddr(const struct sock *sk)
-{
-/* both inet_sk() and inet_twsk() store rcv_saddr in skc_rcv_saddr */
-	return sk->__sk_common.skc_rcv_saddr;
-}
+void inet_twsk_free(struct inet_timewait_sock *tw);
+void inet_twsk_put(struct inet_timewait_sock *tw);
 
-extern void inet_twsk_put(struct inet_timewait_sock *tw);
+int inet_twsk_unhash(struct inet_timewait_sock *tw);
 
-extern int inet_twsk_unhash(struct inet_timewait_sock *tw);
+int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+			  struct inet_hashinfo *hashinfo);
 
-extern int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
-				 struct inet_hashinfo *hashinfo);
+struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+					   const int state);
 
-extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
-						  const int state);
+void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+			   struct inet_hashinfo *hashinfo);
 
-extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw,
-				  struct sock *sk,
-				  struct inet_hashinfo *hashinfo);
+void inet_twsk_schedule(struct inet_timewait_sock *tw,
+			struct inet_timewait_death_row *twdr,
+			const int timeo, const int timewait_len);
+void inet_twsk_deschedule(struct inet_timewait_sock *tw,
+			  struct inet_timewait_death_row *twdr);
 
-extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
-			       struct inet_timewait_death_row *twdr,
-			       const int timeo, const int timewait_len);
-extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
-				 struct inet_timewait_death_row *twdr);
-
-extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
-			    struct inet_timewait_death_row *twdr, int family);
+void inet_twsk_purge(struct inet_hashinfo *hashinfo,
+		     struct inet_timewait_death_row *twdr, int family);
 
 static inline
 struct net *twsk_net(const struct inet_timewait_sock *twsk)
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 53f464d..f4e127a 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -120,9 +120,9 @@
 	}
 }
 
-extern void inet_peer_base_init(struct inet_peer_base *);
+void inet_peer_base_init(struct inet_peer_base *);
 
-void			inet_initpeers(void) __init;
+void inet_initpeers(void) __init;
 
 #define INETPEER_METRICS_NEW	(~(u32) 0)
 
@@ -159,11 +159,11 @@
 }
 
 /* can be called from BH context or outside */
-extern void inet_putpeer(struct inet_peer *p);
-extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+void inet_putpeer(struct inet_peer *p);
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
-extern void inetpeer_invalidate_tree(struct inet_peer_base *);
-extern void inetpeer_invalidate_family(int family);
+void inetpeer_invalidate_tree(struct inet_peer_base *);
+void inetpeer_invalidate_family(int family);
 
 /*
  * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/include/net/ip.h b/include/net/ip.h
index 5e52688..217bc5b 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -28,6 +28,7 @@
 #include <linux/skbuff.h>
 
 #include <net/inet_sock.h>
+#include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
 
@@ -56,6 +57,9 @@
 	int			oif;
 	struct ip_options_rcu	*opt;
 	__u8			tx_flags;
+	__u8			ttl;
+	__s16			tos;
+	char			priority;
 };
 
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
@@ -86,64 +90,71 @@
 struct rtable;
 struct sockaddr;
 
-extern int		igmp_mc_proc_init(void);
+int igmp_mc_proc_init(void);
 
 /*
  *	Functions provided by ip.c
  */
 
-extern int		ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
-					      __be32 saddr, __be32 daddr,
-					      struct ip_options_rcu *opt);
-extern int		ip_rcv(struct sk_buff *skb, struct net_device *dev,
-			       struct packet_type *pt, struct net_device *orig_dev);
-extern int		ip_local_deliver(struct sk_buff *skb);
-extern int		ip_mr_input(struct sk_buff *skb);
-extern int		ip_output(struct sk_buff *skb);
-extern int		ip_mc_output(struct sk_buff *skb);
-extern int		ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-extern int		ip_do_nat(struct sk_buff *skb);
-extern void		ip_send_check(struct iphdr *ip);
-extern int		__ip_local_out(struct sk_buff *skb);
-extern int		ip_local_out(struct sk_buff *skb);
-extern int		ip_queue_xmit(struct sk_buff *skb, struct flowi *fl);
-extern void		ip_init(void);
-extern int		ip_append_data(struct sock *sk, struct flowi4 *fl4,
-				       int getfrag(void *from, char *to, int offset, int len,
-						   int odd, struct sk_buff *skb),
-				void *from, int len, int protolen,
-				struct ipcm_cookie *ipc,
-				struct rtable **rt,
-				unsigned int flags);
-extern int		ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
-extern ssize_t		ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
-				int offset, size_t size, int flags);
-extern struct sk_buff  *__ip_make_skb(struct sock *sk,
-				      struct flowi4 *fl4,
-				      struct sk_buff_head *queue,
-				      struct inet_cork *cork);
-extern int		ip_send_skb(struct net *net, struct sk_buff *skb);
-extern int		ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
-extern void		ip_flush_pending_frames(struct sock *sk);
-extern struct sk_buff  *ip_make_skb(struct sock *sk,
-				    struct flowi4 *fl4,
-				    int getfrag(void *from, char *to, int offset, int len,
-						int odd, struct sk_buff *skb),
-				    void *from, int length, int transhdrlen,
-				    struct ipcm_cookie *ipc,
-				    struct rtable **rtp,
-				    unsigned int flags);
+int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+			  __be32 saddr, __be32 daddr,
+			  struct ip_options_rcu *opt);
+int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+	   struct net_device *orig_dev);
+int ip_local_deliver(struct sk_buff *skb);
+int ip_mr_input(struct sk_buff *skb);
+int ip_output(struct sk_buff *skb);
+int ip_mc_output(struct sk_buff *skb);
+int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip_do_nat(struct sk_buff *skb);
+void ip_send_check(struct iphdr *ip);
+int __ip_local_out(struct sk_buff *skb);
+int ip_local_out(struct sk_buff *skb);
+int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl);
+void ip_init(void);
+int ip_append_data(struct sock *sk, struct flowi4 *fl4,
+		   int getfrag(void *from, char *to, int offset, int len,
+			       int odd, struct sk_buff *skb),
+		   void *from, int len, int protolen,
+		   struct ipcm_cookie *ipc,
+		   struct rtable **rt,
+		   unsigned int flags);
+int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
+		       struct sk_buff *skb);
+ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
+		       int offset, size_t size, int flags);
+struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+			      struct sk_buff_head *queue,
+			      struct inet_cork *cork);
+int ip_send_skb(struct net *net, struct sk_buff *skb);
+int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
+void ip_flush_pending_frames(struct sock *sk);
+struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+			    int getfrag(void *from, char *to, int offset,
+					int len, int odd, struct sk_buff *skb),
+			    void *from, int length, int transhdrlen,
+			    struct ipcm_cookie *ipc, struct rtable **rtp,
+			    unsigned int flags);
 
 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
 {
 	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
 }
 
-/* datagram.c */
-extern int		ip4_datagram_connect(struct sock *sk, 
-					     struct sockaddr *uaddr, int addr_len);
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+{
+	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+}
 
-extern void ip4_datagram_release_cb(struct sock *sk);
+static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+{
+	return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
+}
+
+/* datagram.c */
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+void ip4_datagram_release_cb(struct sock *sk);
 
 struct ip_reply_arg {
 	struct kvec iov[1];   
@@ -184,16 +195,16 @@
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
-extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long snmp_fold_field(void __percpu *mib[], int offt);
 #if BITS_PER_LONG==32
-extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
 #else
 static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
 {
 	return snmp_fold_field(mib, offt);
 }
 #endif
-extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
+int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
 
 static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
 {
@@ -206,11 +217,7 @@
 	}
 }
 
-extern struct local_ports {
-	seqlock_t	lock;
-	int		range[2];
-} sysctl_local_ports;
-extern void inet_get_local_port_range(int *low, int *high);
+void inet_get_local_port_range(struct net *net, int *low, int *high);
 
 extern unsigned long *sysctl_local_reserved_ports;
 static inline int inet_is_reserved_local_port(int port)
@@ -231,9 +238,9 @@
 /* From ip_output.c */
 extern int sysctl_ip_dynaddr;
 
-extern void ipfrag_init(void);
+void ipfrag_init(void);
 
-extern void ip_static_sysctl_init(void);
+void ip_static_sysctl_init(void);
 
 static inline bool ip_is_fragment(const struct iphdr *iph)
 {
@@ -262,7 +269,7 @@
 		 !(dst_metric_locked(dst, RTAX_MTU)));
 }
 
-extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
 
 static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
 {
@@ -367,7 +374,7 @@
 		struct ipv6_pinfo *np = inet6_sk(sk);
 
 		memset(&np->saddr, 0, sizeof(np->saddr));
-		memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
 	}
 #endif
 }
@@ -390,7 +397,7 @@
 	return 1;
 }
 
-extern bool ip_call_ra_chain(struct sk_buff *skb);
+bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
  *	Functions provided by ip_fragment.c
@@ -428,50 +435,52 @@
  *	Functions provided by ip_forward.c
  */
  
-extern int ip_forward(struct sk_buff *skb);
+int ip_forward(struct sk_buff *skb);
  
 /*
  *	Functions provided by ip_options.c
  */
  
-extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
-			     __be32 daddr, struct rtable *rt, int is_frag);
-extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
-extern void ip_options_fragment(struct sk_buff *skb);
-extern int ip_options_compile(struct net *net,
-			      struct ip_options *opt, struct sk_buff *skb);
-extern int ip_options_get(struct net *net, struct ip_options_rcu **optp,
-			  unsigned char *data, int optlen);
-extern int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
-				    unsigned char __user *data, int optlen);
-extern void ip_options_undo(struct ip_options * opt);
-extern void ip_forward_options(struct sk_buff *skb);
-extern int ip_options_rcv_srr(struct sk_buff *skb);
+void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
+		      __be32 daddr, struct rtable *rt, int is_frag);
+int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+void ip_options_fragment(struct sk_buff *skb);
+int ip_options_compile(struct net *net, struct ip_options *opt,
+		       struct sk_buff *skb);
+int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+		   unsigned char *data, int optlen);
+int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
+			     unsigned char __user *data, int optlen);
+void ip_options_undo(struct ip_options *opt);
+void ip_forward_options(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb);
 
 /*
  *	Functions provided by ip_sockglue.c
  */
 
-extern void	ipv4_pktinfo_prepare(struct sk_buff *skb);
-extern void	ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
-extern int	ip_cmsg_send(struct net *net,
-			     struct msghdr *msg, struct ipcm_cookie *ipc);
-extern int	ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
-extern int	ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
-extern int	compat_ip_setsockopt(struct sock *sk, int level,
-			int optname, char __user *optval, unsigned int optlen);
-extern int	compat_ip_getsockopt(struct sock *sk, int level,
-			int optname, char __user *optval, int __user *optlen);
-extern int	ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc);
+int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+		  unsigned int optlen);
+int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
+		  int __user *optlen);
+int compat_ip_setsockopt(struct sock *sk, int level, int optname,
+			 char __user *optval, unsigned int optlen);
+int compat_ip_getsockopt(struct sock *sk, int level, int optname,
+			 char __user *optval, int __user *optlen);
+int ip_ra_control(struct sock *sk, unsigned char on,
+		  void (*destructor)(struct sock *));
 
-extern int 	ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern void	ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 
-			      __be16 port, u32 info, u8 *payload);
-extern void	ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
-			       u32 info);
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
+void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+		   u32 info, u8 *payload);
+void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+		    u32 info);
 
 #ifdef CONFIG_PROC_FS
-extern int ip_misc_proc_init(void);
+int ip_misc_proc_init(void);
 #endif
 
 #endif	/* _IP_H */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 7686e3f..9e3c540 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -66,12 +66,14 @@
 	}
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
 static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 {
 	struct ipv6_pinfo *np = inet6_sk(sk);
 
-	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
+	__tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
 }
+#endif
 
 int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
 #endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5e661a9..2182525 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -268,48 +268,39 @@
  *	exported functions
  */
 
-extern struct fib6_table        *fib6_get_table(struct net *net, u32 id);
-extern struct fib6_table        *fib6_new_table(struct net *net, u32 id);
-extern struct dst_entry         *fib6_rule_lookup(struct net *net,
-						  struct flowi6 *fl6, int flags,
-						  pol_lookup_t lookup);
+struct fib6_table *fib6_get_table(struct net *net, u32 id);
+struct fib6_table *fib6_new_table(struct net *net, u32 id);
+struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
+				   int flags, pol_lookup_t lookup);
 
-extern struct fib6_node		*fib6_lookup(struct fib6_node *root,
-					     const struct in6_addr *daddr,
-					     const struct in6_addr *saddr);
+struct fib6_node *fib6_lookup(struct fib6_node *root,
+			      const struct in6_addr *daddr,
+			      const struct in6_addr *saddr);
 
-struct fib6_node		*fib6_locate(struct fib6_node *root,
-					     const struct in6_addr *daddr, int dst_len,
-					     const struct in6_addr *saddr, int src_len);
+struct fib6_node *fib6_locate(struct fib6_node *root,
+			      const struct in6_addr *daddr, int dst_len,
+			      const struct in6_addr *saddr, int src_len);
 
-extern void			fib6_clean_all_ro(struct net *net,
-					       int (*func)(struct rt6_info *, void *arg),
-					       int prune, void *arg);
+void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
+		    int prune, void *arg);
 
-extern void			fib6_clean_all(struct net *net,
-					       int (*func)(struct rt6_info *, void *arg),
-					       int prune, void *arg);
+int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info);
 
-extern int			fib6_add(struct fib6_node *root,
-					 struct rt6_info *rt,
-					 struct nl_info *info);
+int fib6_del(struct rt6_info *rt, struct nl_info *info);
 
-extern int			fib6_del(struct rt6_info *rt,
-					 struct nl_info *info);
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
 
-extern void			inet6_rt_notify(int event, struct rt6_info *rt,
-						struct nl_info *info);
+void fib6_run_gc(unsigned long expires, struct net *net, bool force);
 
-extern void			fib6_run_gc(unsigned long expires,
-					    struct net *net, bool force);
+void fib6_gc_cleanup(void);
 
-extern void			fib6_gc_cleanup(void);
+int fib6_init(void);
 
-extern int			fib6_init(void);
+int ipv6_route_open(struct inode *inode, struct file *file);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-extern int			fib6_rules_init(void);
-extern void			fib6_rules_cleanup(void);
+int fib6_rules_init(void);
+void fib6_rules_cleanup(void);
 #else
 static inline int               fib6_rules_init(void)
 {
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2b786b7..733747c 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -51,7 +51,7 @@
 	return (flags >> 3) & 7;
 }
 
-extern void rt6_bind_peer(struct rt6_info *rt, int create);
+void rt6_bind_peer(struct rt6_info *rt, int create);
 
 static inline struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
 {
@@ -72,70 +72,58 @@
 	return __rt6_get_peer(rt, 1);
 }
 
-extern void			ip6_route_input(struct sk_buff *skb);
+void ip6_route_input(struct sk_buff *skb);
 
-extern struct dst_entry *	ip6_route_output(struct net *net,
-						 const struct sock *sk,
-						 struct flowi6 *fl6);
-extern struct dst_entry *	ip6_route_lookup(struct net *net,
-						 struct flowi6 *fl6, int flags);
+struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+				   struct flowi6 *fl6);
+struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+				   int flags);
 
-extern int			ip6_route_init(void);
-extern void			ip6_route_cleanup(void);
+int ip6_route_init(void);
+void ip6_route_cleanup(void);
 
-extern int			ipv6_route_ioctl(struct net *net,
-						 unsigned int cmd,
-						 void __user *arg);
+int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
 
-extern int			ip6_route_add(struct fib6_config *cfg);
-extern int			ip6_ins_rt(struct rt6_info *);
-extern int			ip6_del_rt(struct rt6_info *);
+int ip6_route_add(struct fib6_config *cfg);
+int ip6_ins_rt(struct rt6_info *);
+int ip6_del_rt(struct rt6_info *);
 
-extern int			ip6_route_get_saddr(struct net *net,
-						    struct rt6_info *rt,
-						    const struct in6_addr *daddr,
-						    unsigned int prefs,
-						    struct in6_addr *saddr);
+int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
+			const struct in6_addr *daddr, unsigned int prefs,
+			struct in6_addr *saddr);
 
-extern struct rt6_info		*rt6_lookup(struct net *net,
-					    const struct in6_addr *daddr,
-					    const struct in6_addr *saddr,
-					    int oif, int flags);
+struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
+			    const struct in6_addr *saddr, int oif, int flags);
 
-extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
-					 struct flowi6 *fl6);
-extern int icmp6_dst_gc(void);
+struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
+int icmp6_dst_gc(void);
 
-extern void fib6_force_start_gc(struct net *net);
+void fib6_force_start_gc(struct net *net);
 
-extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
-					   const struct in6_addr *addr,
-					   bool anycast);
+struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+				    const struct in6_addr *addr, bool anycast);
 
 /*
  *	support functions for ND
  *
  */
-extern struct rt6_info *	rt6_get_dflt_router(const struct in6_addr *addr,
-						    struct net_device *dev);
-extern struct rt6_info *	rt6_add_dflt_router(const struct in6_addr *gwaddr,
-						    struct net_device *dev,
-						    unsigned int pref);
+struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr,
+				     struct net_device *dev);
+struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
+				     struct net_device *dev, unsigned int pref);
 
-extern void			rt6_purge_dflt_routers(struct net *net);
+void rt6_purge_dflt_routers(struct net *net);
 
-extern int			rt6_route_rcv(struct net_device *dev,
-					      u8 *opt, int len,
-					      const struct in6_addr *gwaddr);
+int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+		  const struct in6_addr *gwaddr);
 
-extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-			    int oif, u32 mark);
-extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
-			       __be32 mtu);
-extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
-extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
-				   u32 mark);
-extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
+		     u32 mark);
+void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+			    u32 mark);
+void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
 
 struct netlink_callback;
 
@@ -145,10 +133,10 @@
 	struct net *net;
 };
 
-extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-extern void rt6_ifdown(struct net *net, struct net_device *dev);
-extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
-extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+int rt6_dump_route(struct rt6_info *rt, void *p_arg);
+void rt6_ifdown(struct net *net, struct net_device *dev);
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
+void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 
 
 /*
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index cbf2be3..9922093 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -165,7 +165,7 @@
 #define FIB_TABLE_HASHSZ 2
 #endif
 
-extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
 
 #define FIB_RES_SADDR(net, res)				\
 	((FIB_RES_NH(res).nh_saddr_genid ==		\
@@ -187,14 +187,14 @@
 	unsigned long		tb_data[0];
 };
 
-extern int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
-			    struct fib_result *res, int fib_flags);
-extern int fib_table_insert(struct fib_table *, struct fib_config *);
-extern int fib_table_delete(struct fib_table *, struct fib_config *);
-extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
-			  struct netlink_callback *cb);
-extern int fib_table_flush(struct fib_table *table);
-extern void fib_free_table(struct fib_table *tb);
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+		     struct fib_result *res, int fib_flags);
+int fib_table_insert(struct fib_table *, struct fib_config *);
+int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
+		   struct netlink_callback *cb);
+int fib_table_flush(struct fib_table *table);
+void fib_free_table(struct fib_table *tb);
 
 
 
@@ -234,14 +234,13 @@
 }
 
 #else /* CONFIG_IP_MULTIPLE_TABLES */
-extern int __net_init fib4_rules_init(struct net *net);
-extern void __net_exit fib4_rules_exit(struct net *net);
+int __net_init fib4_rules_init(struct net *net);
+void __net_exit fib4_rules_exit(struct net *net);
 
-extern struct fib_table *fib_new_table(struct net *net, u32 id);
-extern struct fib_table *fib_get_table(struct net *net, u32 id);
+struct fib_table *fib_new_table(struct net *net, u32 id);
+struct fib_table *fib_get_table(struct net *net, u32 id);
 
-extern int __fib_lookup(struct net *net, struct flowi4 *flp,
-			struct fib_result *res);
+int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
 
 static inline int fib_lookup(struct net *net, struct flowi4 *flp,
 			     struct fib_result *res)
@@ -269,12 +268,12 @@
 
 /* Exported by fib_frontend.c */
 extern const struct nla_policy rtm_ipv4_policy[];
-extern void		ip_fib_init(void);
-extern __be32 fib_compute_spec_dst(struct sk_buff *skb);
-extern int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
-			       u8 tos, int oif, struct net_device *dev,
-			       struct in_device *idev, u32 *itag);
-extern void fib_select_default(struct fib_result *res);
+void ip_fib_init(void);
+__be32 fib_compute_spec_dst(struct sk_buff *skb);
+int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+			u8 tos, int oif, struct net_device *dev,
+			struct in_device *idev, u32 *itag);
+void fib_select_default(struct fib_result *res);
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static inline int fib_num_tclassid_users(struct net *net)
 {
@@ -288,15 +287,15 @@
 #endif
 
 /* Exported by fib_semantics.c */
-extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
-extern int fib_sync_down_dev(struct net_device *dev, int force);
-extern int fib_sync_down_addr(struct net *net, __be32 local);
-extern int fib_sync_up(struct net_device *dev);
-extern void fib_select_multipath(struct fib_result *res);
+int ip_fib_check_default(__be32 gw, struct net_device *dev);
+int fib_sync_down_dev(struct net_device *dev, int force);
+int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_up(struct net_device *dev);
+void fib_select_multipath(struct fib_result *res);
 
 /* Exported by fib_trie.c */
-extern void fib_trie_init(void);
-extern struct fib_table *fib_trie_table(u32 id);
+void fib_trie_init(void);
+struct fib_table *fib_trie_table(u32 id);
 
 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
@@ -314,7 +313,7 @@
 #endif
 }
 
-extern void free_fib_info(struct fib_info *fi);
+void free_fib_info(struct fib_info *fi);
 
 static inline void fib_info_put(struct fib_info *fi)
 {
@@ -323,8 +322,8 @@
 }
 
 #ifdef CONFIG_PROC_FS
-extern int __net_init  fib_proc_init(struct net *net);
-extern void __net_exit fib_proc_exit(struct net *net);
+int __net_init fib_proc_init(struct net *net);
+void __net_exit fib_proc_exit(struct net *net);
 #else
 static inline int fib_proc_init(struct net *net)
 {
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a0a4a10..732f8c6 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -150,6 +150,9 @@
 		  __be32 src, __be32 dst, __u8 proto,
 		  __u8 tos, __u8 ttl, __be16 df, bool xnet);
 
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
+					 int gso_type_mask);
+
 static inline void iptunnel_xmit_stats(int err,
 				       struct net_device_stats *err_stats,
 				       struct pcpu_tstats __percpu *stats)
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 9c4d37e..5679d92 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -109,7 +109,6 @@
 struct ip_vs_iphdr {
 	__u32 len;	/* IPv4 simply where L4 starts
 			   IPv6 where L4 Transport Header starts */
-	__u32 thoff_reasm; /* Transport Header Offset in nfct_reasm skb */
 	__u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
 	__s16 protocol;
 	__s32 flags;
@@ -117,34 +116,12 @@
 	union nf_inet_addr daddr;
 };
 
-/* Dependency to module: nf_defrag_ipv6 */
-#if defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
-static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
-{
-	return skb->nfct_reasm;
-}
-static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
-				      int len, void *buffer,
-				      const struct ip_vs_iphdr *ipvsh)
-{
-	if (unlikely(ipvsh->fragoffs && skb_nfct_reasm(skb)))
-		return skb_header_pointer(skb_nfct_reasm(skb),
-					  ipvsh->thoff_reasm, len, buffer);
-
-	return skb_header_pointer(skb, offset, len, buffer);
-}
-#else
-static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
-{
-	return NULL;
-}
 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
 				      int len, void *buffer,
 				      const struct ip_vs_iphdr *ipvsh)
 {
 	return skb_header_pointer(skb, offset, len, buffer);
 }
-#endif
 
 static inline void
 ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
@@ -171,19 +148,12 @@
 			(struct ipv6hdr *)skb_network_header(skb);
 		iphdr->saddr.in6 = iph->saddr;
 		iphdr->daddr.in6 = iph->daddr;
-		/* ipv6_find_hdr() updates len, flags, thoff_reasm */
-		iphdr->thoff_reasm = 0;
+		/* ipv6_find_hdr() updates len, flags */
 		iphdr->len	 = 0;
 		iphdr->flags	 = 0;
 		iphdr->protocol  = ipv6_find_hdr(skb, &iphdr->len, -1,
 						 &iphdr->fragoffs,
 						 &iphdr->flags);
-		/* get proto from re-assembled packet and it's offset */
-		if (skb_nfct_reasm(skb))
-			iphdr->protocol = ipv6_find_hdr(skb_nfct_reasm(skb),
-							&iphdr->thoff_reasm,
-							-1, NULL, NULL);
-
 	} else
 #endif
 	{
@@ -236,7 +206,7 @@
 #ifdef CONFIG_IP_VS_DEBUG
 #include <linux/net.h>
 
-extern int ip_vs_get_debug_level(void);
+int ip_vs_get_debug_level(void);
 
 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
 					 const union nf_inet_addr *addr,
@@ -532,9 +502,9 @@
 	struct tcp_states_t	*tcp_state_table;
 };
 
-extern struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
-extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
-						     unsigned short proto);
+struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
+struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+					      unsigned short proto);
 
 struct ip_vs_conn_param {
 	struct net			*net;
@@ -1173,8 +1143,8 @@
  *      IPVS core functions
  *      (from ip_vs_core.c)
  */
-extern const char *ip_vs_proto_name(unsigned int proto);
-extern void ip_vs_init_hash_table(struct list_head *table, int rows);
+const char *ip_vs_proto_name(unsigned int proto);
+void ip_vs_init_hash_table(struct list_head *table, int rows);
 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
 
 #define IP_VS_APP_TYPE_FTP	1
@@ -1237,22 +1207,22 @@
 	smp_mb__before_atomic_dec();
 	atomic_dec(&cp->refcnt);
 }
-extern void ip_vs_conn_put(struct ip_vs_conn *cp);
-extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
+void ip_vs_conn_put(struct ip_vs_conn *cp);
+void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
 
 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
 				  const union nf_inet_addr *daddr,
 				  __be16 dport, unsigned int flags,
 				  struct ip_vs_dest *dest, __u32 fwmark);
-extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
+void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
-extern const char * ip_vs_state_name(__u16 proto, int state);
+const char *ip_vs_state_name(__u16 proto, int state);
 
-extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
-extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(struct net *net);
-extern int ip_vs_conn_init(void);
-extern void ip_vs_conn_cleanup(void);
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
+int ip_vs_check_template(struct ip_vs_conn *ct);
+void ip_vs_random_dropentry(struct net *net);
+int ip_vs_conn_init(void);
+void ip_vs_conn_cleanup(void);
 
 static inline void ip_vs_control_del(struct ip_vs_conn *cp)
 {
@@ -1317,37 +1287,36 @@
 /*
  * IPVS netns init & cleanup functions
  */
-extern int ip_vs_estimator_net_init(struct net *net);
-extern int ip_vs_control_net_init(struct net *net);
-extern int ip_vs_protocol_net_init(struct net *net);
-extern int ip_vs_app_net_init(struct net *net);
-extern int ip_vs_conn_net_init(struct net *net);
-extern int ip_vs_sync_net_init(struct net *net);
-extern void ip_vs_conn_net_cleanup(struct net *net);
-extern void ip_vs_app_net_cleanup(struct net *net);
-extern void ip_vs_protocol_net_cleanup(struct net *net);
-extern void ip_vs_control_net_cleanup(struct net *net);
-extern void ip_vs_estimator_net_cleanup(struct net *net);
-extern void ip_vs_sync_net_cleanup(struct net *net);
-extern void ip_vs_service_net_cleanup(struct net *net);
+int ip_vs_estimator_net_init(struct net *net);
+int ip_vs_control_net_init(struct net *net);
+int ip_vs_protocol_net_init(struct net *net);
+int ip_vs_app_net_init(struct net *net);
+int ip_vs_conn_net_init(struct net *net);
+int ip_vs_sync_net_init(struct net *net);
+void ip_vs_conn_net_cleanup(struct net *net);
+void ip_vs_app_net_cleanup(struct net *net);
+void ip_vs_protocol_net_cleanup(struct net *net);
+void ip_vs_control_net_cleanup(struct net *net);
+void ip_vs_estimator_net_cleanup(struct net *net);
+void ip_vs_sync_net_cleanup(struct net *net);
+void ip_vs_service_net_cleanup(struct net *net);
 
 /*
  *      IPVS application functions
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern struct ip_vs_app *register_ip_vs_app(struct net *net,
-					    struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
-extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
-				  __u16 proto, __u16 port);
-extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
-extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
+int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+void ip_vs_unbind_app(struct ip_vs_conn *cp);
+int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+			   __u16 port);
+int ip_vs_app_inc_get(struct ip_vs_app *inc);
+void ip_vs_app_inc_put(struct ip_vs_app *inc);
 
-extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
+int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
+int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
 
 int register_ip_vs_pe(struct ip_vs_pe *pe);
 int unregister_ip_vs_pe(struct ip_vs_pe *pe);
@@ -1368,17 +1337,15 @@
 /*
  *	IPVS protocol functions (from ip_vs_proto.c)
  */
-extern int ip_vs_protocol_init(void);
-extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
-extern int *ip_vs_create_timeout_table(int *table, int size);
-extern int
-ip_vs_set_state_timeout(int *table, int num, const char *const *names,
-			const char *name, int to);
-extern void
-ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
-			  const struct sk_buff *skb,
-			  int offset, const char *msg);
+int ip_vs_protocol_init(void);
+void ip_vs_protocol_cleanup(void);
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
+int *ip_vs_create_timeout_table(int *table, int size);
+int ip_vs_set_state_timeout(int *table, int num, const char *const *names,
+			    const char *name, int to);
+void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
+			       const struct sk_buff *skb, int offset,
+			       const char *msg);
 
 extern struct ip_vs_protocol ip_vs_protocol_tcp;
 extern struct ip_vs_protocol ip_vs_protocol_udp;
@@ -1391,22 +1358,22 @@
  *      Registering/unregistering scheduler functions
  *      (from ip_vs_sched.c)
  */
-extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int ip_vs_bind_scheduler(struct ip_vs_service *svc,
-				struct ip_vs_scheduler *scheduler);
-extern void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
-				   struct ip_vs_scheduler *sched);
-extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
-extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
-extern struct ip_vs_conn *
+int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int ip_vs_bind_scheduler(struct ip_vs_service *svc,
+			 struct ip_vs_scheduler *scheduler);
+void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
+			    struct ip_vs_scheduler *sched);
+struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
+void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
+struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
 	       struct ip_vs_proto_data *pd, int *ignored,
 	       struct ip_vs_iphdr *iph);
-extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-			struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
+int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+		struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
 
-extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
 
 
 /*
@@ -1415,25 +1382,24 @@
 extern struct ip_vs_stats ip_vs_stats;
 extern int sysctl_ip_vs_sync_ver;
 
-extern struct ip_vs_service *
+struct ip_vs_service *
 ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
 		  const union nf_inet_addr *vaddr, __be16 vport);
 
-extern bool
-ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
-		       const union nf_inet_addr *daddr, __be16 dport);
+bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+			    const union nf_inet_addr *daddr, __be16 dport);
 
-extern int ip_vs_use_count_inc(void);
-extern void ip_vs_use_count_dec(void);
-extern int ip_vs_register_nl_ioctl(void);
-extern void ip_vs_unregister_nl_ioctl(void);
-extern int ip_vs_control_init(void);
-extern void ip_vs_control_cleanup(void);
-extern struct ip_vs_dest *
+int ip_vs_use_count_inc(void);
+void ip_vs_use_count_dec(void);
+int ip_vs_register_nl_ioctl(void);
+void ip_vs_unregister_nl_ioctl(void);
+int ip_vs_control_init(void);
+void ip_vs_control_cleanup(void);
+struct ip_vs_dest *
 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
 		__be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
 		__u16 protocol, __u32 fwmark, __u32 flags);
-extern void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
+void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
 {
@@ -1446,60 +1412,59 @@
 	atomic_dec(&dest->refcnt);
 }
 
+static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
+{
+	if (atomic_dec_return(&dest->refcnt) < 0)
+		kfree(dest);
+}
+
 /*
  *      IPVS sync daemon data and function prototypes
  *      (from ip_vs_sync.c)
  */
-extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
-			     __u8 syncid);
-extern int stop_sync_thread(struct net *net, int state);
-extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
-
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int stop_sync_thread(struct net *net, int state);
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
 
 /*
  *      IPVS rate estimator prototypes (from ip_vs_est.c)
  */
-extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
-				 struct ip_vs_stats *stats);
+void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+			  struct ip_vs_stats *stats);
 
 /*
  *	Various IPVS packet transmitters (from ip_vs_xmit.c)
  */
-extern int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-			   struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-			     struct ip_vs_protocol *pp,
-			     struct ip_vs_iphdr *iph);
-extern int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-			  struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-			     struct ip_vs_protocol *pp,
-			     struct ip_vs_iphdr *iph);
-extern int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-			 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-			   struct ip_vs_protocol *pp, int offset,
-			   unsigned int hooknum, struct ip_vs_iphdr *iph);
-extern void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
+int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+		    struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+		      struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+		   struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+		      struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+		    struct ip_vs_protocol *pp, int offset,
+		    unsigned int hooknum, struct ip_vs_iphdr *iph);
+void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
 
 #ifdef CONFIG_IP_VS_IPV6
-extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-				struct ip_vs_protocol *pp,
-				struct ip_vs_iphdr *iph);
-extern int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-			     struct ip_vs_protocol *pp,
-			     struct ip_vs_iphdr *iph);
-extern int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-				struct ip_vs_protocol *pp,
-				struct ip_vs_iphdr *iph);
-extern int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-			    struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-			      struct ip_vs_protocol *pp, int offset,
-			      unsigned int hooknum, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+			 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+		      struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+			 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+		       struct ip_vs_protocol *pp, int offset,
+		       unsigned int hooknum, struct ip_vs_iphdr *iph);
 #endif
 
 #ifdef CONFIG_SYSCTL
@@ -1548,15 +1513,15 @@
 	return fwd;
 }
 
-extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
-			   struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		    struct ip_vs_conn *cp, int dir);
 
 #ifdef CONFIG_IP_VS_IPV6
-extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
-			      struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
+		       struct ip_vs_conn *cp, int dir);
 #endif
 
-extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
+__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
 
 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
 {
@@ -1615,13 +1580,13 @@
 #endif
 }
 
-extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
-				   int outin);
-extern int ip_vs_confirm_conntrack(struct sk_buff *skb);
-extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
-				      struct ip_vs_conn *cp, u_int8_t proto,
-				      const __be16 port, int from_rs);
-extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
+void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+			    int outin);
+int ip_vs_confirm_conntrack(struct sk_buff *skb);
+void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
+			       struct ip_vs_conn *cp, u_int8_t proto,
+			       const __be16 port, int from_rs);
+void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
 
 #else
 
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index bbf1c8f..2a5f668 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -244,14 +244,15 @@
 	struct rcu_head			rcu;
 };
 
-extern struct ip6_flowlabel	*fl6_sock_lookup(struct sock *sk, __be32 label);
-extern struct ipv6_txoptions	*fl6_merge_options(struct ipv6_txoptions * opt_space,
-						   struct ip6_flowlabel * fl,
-						   struct ipv6_txoptions * fopt);
-extern void			fl6_free_socklist(struct sock *sk);
-extern int			ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
-extern int			ip6_flowlabel_init(void);
-extern void			ip6_flowlabel_cleanup(void);
+struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+					 struct ip6_flowlabel *fl,
+					 struct ipv6_txoptions *fopt);
+void fl6_free_socklist(struct sock *sk);
+int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq);
+int ip6_flowlabel_init(void);
+void ip6_flowlabel_cleanup(void);
 
 static inline void fl6_sock_release(struct ip6_flowlabel *fl)
 {
@@ -259,7 +260,7 @@
 		atomic_dec(&fl->users);
 }
 
-extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
 
 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
 			       struct icmp6hdr *thdr, int len);
@@ -267,19 +268,21 @@
 struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
 				      struct sock *sk, struct flowi6 *fl6);
 
-extern int 			ip6_ra_control(struct sock *sk, int sel);
+int ip6_ra_control(struct sock *sk, int sel);
 
-extern int			ipv6_parse_hopopts(struct sk_buff *skb);
+int ipv6_parse_hopopts(struct sk_buff *skb);
 
-extern struct ipv6_txoptions *  ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt);
-extern struct ipv6_txoptions *	ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
-						   int newtype,
-						   struct ipv6_opt_hdr __user *newopt,
-						   int newoptlen);
+struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
+					struct ipv6_txoptions *opt);
+struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
+					  struct ipv6_txoptions *opt,
+					  int newtype,
+					  struct ipv6_opt_hdr __user *newopt,
+					  int newoptlen);
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
 					  struct ipv6_txoptions *opt);
 
-extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
 
 static inline bool ipv6_accept_ra(struct inet6_dev *idev)
 {
@@ -306,7 +309,7 @@
 #define IPV6_FRAG_LOW_THRESH	(3 * 1024*1024)	/* 3145728 */
 #define IPV6_FRAG_TIMEOUT	(60 * HZ)	/* 60 seconds */
 
-extern int __ipv6_addr_type(const struct in6_addr *addr);
+int __ipv6_addr_type(const struct in6_addr *addr);
 static inline int ipv6_addr_type(const struct in6_addr *addr)
 {
 	return __ipv6_addr_type(addr) & 0xffff;
@@ -537,14 +540,14 @@
 }
 
 /* more secured version of ipv6_addr_hash() */
-static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
+static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
 {
 	u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
 
 	return jhash_3words(v,
 			    (__force u32)a->s6_addr32[2],
 			    (__force u32)a->s6_addr32[3],
-			    ipv6_hash_secret);
+			    initval);
 }
 
 static inline bool ipv6_addr_loopback(const struct in6_addr *a)
@@ -656,9 +659,9 @@
 	return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
-extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
 
-extern int ip6_dst_hoplimit(struct dst_entry *dst);
+int ip6_dst_hoplimit(struct dst_entry *dst);
 
 /*
  *	Header manipulation
@@ -682,83 +685,65 @@
  *	rcv function (called from netdevice level)
  */
 
-extern int			ipv6_rcv(struct sk_buff *skb, 
-					 struct net_device *dev, 
-					 struct packet_type *pt,
-					 struct net_device *orig_dev);
+int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
+	     struct packet_type *pt, struct net_device *orig_dev);
 
-extern int			ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sk_buff *skb);
 
 /*
  *	upper-layer output functions
  */
-extern int			ip6_xmit(struct sock *sk,
-					 struct sk_buff *skb,
-					 struct flowi6 *fl6,
-					 struct ipv6_txoptions *opt,
-					 int tclass);
+int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+	     struct ipv6_txoptions *opt, int tclass);
 
-extern int			ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
 
-extern int			ip6_append_data(struct sock *sk,
-						int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
-		    				void *from,
-						int length,
-						int transhdrlen,
-		      				int hlimit,
-		      				int tclass,
-						struct ipv6_txoptions *opt,
-						struct flowi6 *fl6,
-						struct rt6_info *rt,
-						unsigned int flags,
-						int dontfrag);
+int ip6_append_data(struct sock *sk,
+		    int getfrag(void *from, char *to, int offset, int len,
+				int odd, struct sk_buff *skb),
+		    void *from, int length, int transhdrlen, int hlimit,
+		    int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
+		    struct rt6_info *rt, unsigned int flags, int dontfrag);
 
-extern int			ip6_push_pending_frames(struct sock *sk);
+int ip6_push_pending_frames(struct sock *sk);
 
-extern void			ip6_flush_pending_frames(struct sock *sk);
+void ip6_flush_pending_frames(struct sock *sk);
 
-extern int			ip6_dst_lookup(struct sock *sk,
-					       struct dst_entry **dst,
-					       struct flowi6 *fl6);
-extern struct dst_entry *	ip6_dst_lookup_flow(struct sock *sk,
-						    struct flowi6 *fl6,
-						    const struct in6_addr *final_dst,
-						    bool can_sleep);
-extern struct dst_entry *	ip6_sk_dst_lookup_flow(struct sock *sk,
-						       struct flowi6 *fl6,
-						       const struct in6_addr *final_dst,
-						       bool can_sleep);
-extern struct dst_entry *	ip6_blackhole_route(struct net *net,
-						    struct dst_entry *orig_dst);
+int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+				      const struct in6_addr *final_dst,
+				      bool can_sleep);
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+					 const struct in6_addr *final_dst,
+					 bool can_sleep);
+struct dst_entry *ip6_blackhole_route(struct net *net,
+				      struct dst_entry *orig_dst);
 
 /*
  *	skb processing functions
  */
 
-extern int			ip6_output(struct sk_buff *skb);
-extern int			ip6_forward(struct sk_buff *skb);
-extern int			ip6_input(struct sk_buff *skb);
-extern int			ip6_mc_input(struct sk_buff *skb);
+int ip6_output(struct sk_buff *skb);
+int ip6_forward(struct sk_buff *skb);
+int ip6_input(struct sk_buff *skb);
+int ip6_mc_input(struct sk_buff *skb);
 
-extern int			__ip6_local_out(struct sk_buff *skb);
-extern int			ip6_local_out(struct sk_buff *skb);
+int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out(struct sk_buff *skb);
 
 /*
  *	Extension header (options) processing
  */
 
-extern void 			ipv6_push_nfrag_opts(struct sk_buff *skb,
-						     struct ipv6_txoptions *opt,
-						     u8 *proto,
-						     struct in6_addr **daddr_p);
-extern void			ipv6_push_frag_opts(struct sk_buff *skb,
-						    struct ipv6_txoptions *opt,
-						    u8 *proto);
+void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+			  u8 *proto, struct in6_addr **daddr_p);
+void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+			 u8 *proto);
 
-extern int			ipv6_skip_exthdr(const struct sk_buff *, int start,
-					         u8 *nexthdrp, __be16 *frag_offp);
+int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
+		     __be16 *frag_offp);
 
-extern bool			ipv6_ext_hdr(u8 nexthdr);
+bool ipv6_ext_hdr(u8 nexthdr);
 
 enum {
 	IP6_FH_F_FRAG		= (1 << 0),
@@ -767,57 +752,44 @@
 };
 
 /* find specified header and get offset to it */
-extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
-			 int target, unsigned short *fragoff, int *fragflg);
+int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
+		  unsigned short *fragoff, int *fragflg);
 
-extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
+int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
 
-extern struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
-				       const struct ipv6_txoptions *opt,
-				       struct in6_addr *orig);
+struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
+				const struct ipv6_txoptions *opt,
+				struct in6_addr *orig);
 
 /*
  *	socket options (ipv6_sockglue.c)
  */
 
-extern int			ipv6_setsockopt(struct sock *sk, int level, 
-						int optname,
-						char __user *optval, 
-						unsigned int optlen);
-extern int			ipv6_getsockopt(struct sock *sk, int level, 
-						int optname,
-						char __user *optval, 
-						int __user *optlen);
-extern int			compat_ipv6_setsockopt(struct sock *sk,
-						int level,
-						int optname,
-						char __user *optval,
-						unsigned int optlen);
-extern int			compat_ipv6_getsockopt(struct sock *sk,
-						int level,
-						int optname,
-						char __user *optval,
-						int __user *optlen);
+int ipv6_setsockopt(struct sock *sk, int level, int optname,
+		    char __user *optval, unsigned int optlen);
+int ipv6_getsockopt(struct sock *sk, int level, int optname,
+		    char __user *optval, int __user *optlen);
+int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
+			   char __user *optval, unsigned int optlen);
+int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
+			   char __user *optval, int __user *optlen);
 
-extern int			ip6_datagram_connect(struct sock *sk, 
-						     struct sockaddr *addr, int addr_len);
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
 
-extern int 			ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern int 			ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
-extern void			ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
-						u32 info, u8 *payload);
-extern void			ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
-extern void			ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
+void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+		     u32 info, u8 *payload);
+void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
 
-extern int inet6_release(struct socket *sock);
-extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr, 
-		      int addr_len);
-extern int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
-			 int *uaddr_len, int peer);
-extern int inet6_ioctl(struct socket *sock, unsigned int cmd, 
-		       unsigned long arg);
+int inet6_release(struct socket *sock);
+int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+		  int peer);
+int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 
-extern int inet6_hash_connect(struct inet_timewait_death_row *death_row,
+int inet6_hash_connect(struct inet_timewait_death_row *death_row,
 			      struct sock *sk);
 
 /*
@@ -829,30 +801,27 @@
 struct group_source_req;
 struct group_filter;
 
-extern int ip6_mc_source(int add, int omode, struct sock *sk,
-			 struct group_source_req *pgsr);
-extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
-extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
-			 struct group_filter __user *optval,
-			 int __user *optlen);
-extern unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
-				    const struct in6_addr *daddr, u32 rnd);
+int ip6_mc_source(int add, int omode, struct sock *sk,
+		  struct group_source_req *pgsr);
+int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
+int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
+		  struct group_filter __user *optval, int __user *optlen);
 
 #ifdef CONFIG_PROC_FS
-extern int  ac6_proc_init(struct net *net);
-extern void ac6_proc_exit(struct net *net);
-extern int  raw6_proc_init(void);
-extern void raw6_proc_exit(void);
-extern int  tcp6_proc_init(struct net *net);
-extern void tcp6_proc_exit(struct net *net);
-extern int  udp6_proc_init(struct net *net);
-extern void udp6_proc_exit(struct net *net);
-extern int  udplite6_proc_init(void);
-extern void udplite6_proc_exit(void);
-extern int  ipv6_misc_proc_init(void);
-extern void ipv6_misc_proc_exit(void);
-extern int snmp6_register_dev(struct inet6_dev *idev);
-extern int snmp6_unregister_dev(struct inet6_dev *idev);
+int ac6_proc_init(struct net *net);
+void ac6_proc_exit(struct net *net);
+int raw6_proc_init(void);
+void raw6_proc_exit(void);
+int tcp6_proc_init(struct net *net);
+void tcp6_proc_exit(struct net *net);
+int udp6_proc_init(struct net *net);
+void udp6_proc_exit(struct net *net);
+int udplite6_proc_init(void);
+void udplite6_proc_exit(void);
+int ipv6_misc_proc_init(void);
+void ipv6_misc_proc_exit(void);
+int snmp6_register_dev(struct inet6_dev *idev);
+int snmp6_unregister_dev(struct inet6_dev *idev);
 
 #else
 static inline int ac6_proc_init(struct net *net) { return 0; }
@@ -865,10 +834,10 @@
 extern struct ctl_table ipv6_route_table_template[];
 extern struct ctl_table ipv6_icmp_table_template[];
 
-extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
-extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
-extern int ipv6_sysctl_register(void);
-extern void ipv6_sysctl_unregister(void);
+struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+int ipv6_sysctl_register(void);
+void ipv6_sysctl_unregister(void);
 #endif
 
 #endif /* _NET_IPV6_H */
diff --git a/include/net/ipx.h b/include/net/ipx.h
index c1fec6b..9e9e354 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -123,23 +123,23 @@
 extern rwlock_t ipx_routes_lock;
 
 extern struct list_head ipx_interfaces;
-extern struct ipx_interface *ipx_interfaces_head(void);
+struct ipx_interface *ipx_interfaces_head(void);
 extern spinlock_t ipx_interfaces_lock;
 
 extern struct ipx_interface *ipx_primary_net;
 
-extern int ipx_proc_init(void);
-extern void ipx_proc_exit(void);
+int ipx_proc_init(void);
+void ipx_proc_exit(void);
 
-extern const char *ipx_frame_name(__be16);
-extern const char *ipx_device_name(struct ipx_interface *intrfc);
+const char *ipx_frame_name(__be16);
+const char *ipx_device_name(struct ipx_interface *intrfc);
 
 static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
 {
 	atomic_inc(&intrfc->refcnt);
 }
 
-extern void ipxitf_down(struct ipx_interface *intrfc);
+void ipxitf_down(struct ipx_interface *intrfc);
 
 static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
 {
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
index 80ffde3..0224402 100644
--- a/include/net/irda/ircomm_tty.h
+++ b/include/net/irda/ircomm_tty.h
@@ -105,13 +105,13 @@
 void ircomm_tty_start(struct tty_struct *tty);
 void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self);
 
-extern int ircomm_tty_tiocmget(struct tty_struct *tty);
-extern int ircomm_tty_tiocmset(struct tty_struct *tty,
-			       unsigned int set, unsigned int clear);
-extern int ircomm_tty_ioctl(struct tty_struct *tty, 
-			    unsigned int cmd, unsigned long arg);
-extern void ircomm_tty_set_termios(struct tty_struct *tty, 
-				   struct ktermios *old_termios);
+int ircomm_tty_tiocmget(struct tty_struct *tty);
+int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+			unsigned int clear);
+int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+		     unsigned long arg);
+void ircomm_tty_set_termios(struct tty_struct *tty,
+			    struct ktermios *old_termios);
 
 #endif
 
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
index 3bed61d..a059465 100644
--- a/include/net/irda/irda.h
+++ b/include/net/irda/irda.h
@@ -112,20 +112,19 @@
 struct net_device;
 struct packet_type;
 
-extern void irda_proc_register(void);
-extern void irda_proc_unregister(void);
+void irda_proc_register(void);
+void irda_proc_unregister(void);
 
-extern int irda_sysctl_register(void);
-extern void irda_sysctl_unregister(void);
+int irda_sysctl_register(void);
+void irda_sysctl_unregister(void);
 
-extern int irsock_init(void);
-extern void irsock_cleanup(void);
+int irsock_init(void);
+void irsock_cleanup(void);
 
-extern int irda_nl_register(void);
-extern void irda_nl_unregister(void);
+int irda_nl_register(void);
+void irda_nl_unregister(void);
 
-extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
-			    struct packet_type *ptype,
-			    struct net_device *orig_dev);
+int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
+		     struct packet_type *ptype, struct net_device *orig_dev);
 
 #endif /* NET_IRDA_H */
diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h
index 94c852d..1141747 100644
--- a/include/net/irda/irda_device.h
+++ b/include/net/irda/irda_device.h
@@ -162,7 +162,7 @@
         int irq, irq2;        /* Interrupts used */
         int dma, dma2;        /* DMA channel(s) used */
         int fifo_size;        /* FIFO size */
-        int irqflags;         /* interrupt flags (ie, IRQF_SHARED|IRQF_DISABLED) */
+        int irqflags;         /* interrupt flags (ie, IRQF_SHARED) */
 	int direction;        /* Link direction, used by some FIR drivers */
 	int enabled;          /* Powered on? */
 	int suspended;        /* Suspended by APM */
diff --git a/include/net/irda/irlap_event.h b/include/net/irda/irlap_event.h
index 4c90824..f9d88da 100644
--- a/include/net/irda/irlap_event.h
+++ b/include/net/irda/irlap_event.h
@@ -126,6 +126,6 @@
 		    struct sk_buff *skb, struct irlap_info *info);
 void irlap_print_event(IRLAP_EVENT event);
 
-extern int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
+int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
 
 #endif
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h
index 6b1dc4f..57173ae 100644
--- a/include/net/irda/irlap_frame.h
+++ b/include/net/irda/irlap_frame.h
@@ -163,7 +163,7 @@
 void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
 			 __u8 caddr, int command);
 
-extern int irlap_insert_qos_negotiation_params(struct irlap_cb *self, 
-					       struct sk_buff *skb);
+int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
+					struct sk_buff *skb);
 
 #endif
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 5d5a6a4..a830b01 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -432,44 +432,32 @@
 /* First : function strictly used inside the kernel */
 
 /* Handle /proc/net/wireless, called in net/code/dev.c */
-extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
-				 int length);
+int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
 
 /* Second : functions that may be called by driver modules */
 
 /* Send a single event to user space */
-extern void wireless_send_event(struct net_device *	dev,
-				unsigned int		cmd,
-				union iwreq_data *	wrqu,
-				const char *		extra);
+void wireless_send_event(struct net_device *dev, unsigned int cmd,
+			 union iwreq_data *wrqu, const char *extra);
 
 /* We may need a function to send a stream of events to user space.
  * More on that later... */
 
 /* Standard handler for SIOCSIWSPY */
-extern int iw_handler_set_spy(struct net_device *	dev,
-			      struct iw_request_info *	info,
-			      union iwreq_data *	wrqu,
-			      char *			extra);
+int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCGIWSPY */
-extern int iw_handler_get_spy(struct net_device *	dev,
-			      struct iw_request_info *	info,
-			      union iwreq_data *	wrqu,
-			      char *			extra);
+int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCSIWTHRSPY */
-extern int iw_handler_set_thrspy(struct net_device *	dev,
-				 struct iw_request_info *info,
-				 union iwreq_data *	wrqu,
-				 char *			extra);
+int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCGIWTHRSPY */
-extern int iw_handler_get_thrspy(struct net_device *	dev,
-				 struct iw_request_info *info,
-				 union iwreq_data *	wrqu,
-				 char *			extra);
+int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra);
 /* Driver call to update spy records */
-extern void wireless_spy_update(struct net_device *	dev,
-				unsigned char *		address,
-				struct iw_quality *	wstats);
+void wireless_spy_update(struct net_device *dev, unsigned char *address,
+			 struct iw_quality *wstats);
 
 /************************* INLINE FUNTIONS *************************/
 /*
diff --git a/include/net/lapb.h b/include/net/lapb.h
index df892a9..9510f87 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -105,40 +105,40 @@
 };
 
 /* lapb_iface.c */
-extern void lapb_connect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_connect_indication(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_indication(struct lapb_cb *lapb, int);
-extern int  lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
-extern int  lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_connect_confirmation(struct lapb_cb *lapb, int);
+void lapb_connect_indication(struct lapb_cb *lapb, int);
+void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
+void lapb_disconnect_indication(struct lapb_cb *lapb, int);
+int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
+int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
 
 /* lapb_in.c */
-extern void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
 
 /* lapb_out.c */
-extern void lapb_kick(struct lapb_cb *lapb);
-extern void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
-extern void lapb_establish_data_link(struct lapb_cb *lapb);
-extern void lapb_enquiry_response(struct lapb_cb *lapb);
-extern void lapb_timeout_response(struct lapb_cb *lapb);
-extern void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_check_need_response(struct lapb_cb *lapb, int, int);
+void lapb_kick(struct lapb_cb *lapb);
+void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
+void lapb_establish_data_link(struct lapb_cb *lapb);
+void lapb_enquiry_response(struct lapb_cb *lapb);
+void lapb_timeout_response(struct lapb_cb *lapb);
+void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_check_need_response(struct lapb_cb *lapb, int, int);
 
 /* lapb_subr.c */
-extern void lapb_clear_queues(struct lapb_cb *lapb);
-extern void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_requeue_frames(struct lapb_cb *lapb);
-extern int  lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
-extern int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
-extern void lapb_send_control(struct lapb_cb *lapb, int, int, int);
-extern void lapb_transmit_frmr(struct lapb_cb *lapb);
+void lapb_clear_queues(struct lapb_cb *lapb);
+void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_requeue_frames(struct lapb_cb *lapb);
+int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
+int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
+void lapb_send_control(struct lapb_cb *lapb, int, int, int);
+void lapb_transmit_frmr(struct lapb_cb *lapb);
 
 /* lapb_timer.c */
-extern void lapb_start_t1timer(struct lapb_cb *lapb);
-extern void lapb_start_t2timer(struct lapb_cb *lapb);
-extern void lapb_stop_t1timer(struct lapb_cb *lapb);
-extern void lapb_stop_t2timer(struct lapb_cb *lapb);
-extern int  lapb_t1timer_running(struct lapb_cb *lapb);
+void lapb_start_t1timer(struct lapb_cb *lapb);
+void lapb_start_t2timer(struct lapb_cb *lapb);
+void lapb_stop_t1timer(struct lapb_cb *lapb);
+void lapb_stop_t2timer(struct lapb_cb *lapb);
+int lapb_t1timer_running(struct lapb_cb *lapb);
 
 /*
  * Debug levels.
diff --git a/include/net/llc.h b/include/net/llc.h
index 9e7d7f0..68490cb 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -95,29 +95,29 @@
 extern struct list_head llc_sap_list;
 extern spinlock_t llc_sap_list_lock;
 
-extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
-		   struct packet_type *pt, struct net_device *orig_dev);
+int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+	    struct net_device *orig_dev);
 
-extern int llc_mac_hdr_init(struct sk_buff *skb,
-			    const unsigned char *sa, const unsigned char *da);
+int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa,
+		     const unsigned char *da);
 
-extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
-						   struct sk_buff *skb));
-extern void llc_remove_pack(int type);
+void llc_add_pack(int type,
+		  void (*handler)(struct llc_sap *sap, struct sk_buff *skb));
+void llc_remove_pack(int type);
 
-extern void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
+void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
 
-extern struct llc_sap *llc_sap_open(unsigned char lsap,
-				    int (*rcv)(struct sk_buff *skb,
-					       struct net_device *dev,
-					       struct packet_type *pt,
-					       struct net_device *orig_dev));
+struct llc_sap *llc_sap_open(unsigned char lsap,
+			     int (*rcv)(struct sk_buff *skb,
+					struct net_device *dev,
+					struct packet_type *pt,
+					struct net_device *orig_dev));
 static inline void llc_sap_hold(struct llc_sap *sap)
 {
 	atomic_inc(&sap->refcnt);
 }
 
-extern void llc_sap_close(struct llc_sap *sap);
+void llc_sap_close(struct llc_sap *sap);
 
 static inline void llc_sap_put(struct llc_sap *sap)
 {
@@ -125,27 +125,27 @@
 		llc_sap_close(sap);
 }
 
-extern struct llc_sap *llc_sap_find(unsigned char sap_value);
+struct llc_sap *llc_sap_find(unsigned char sap_value);
 
-extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
-				     unsigned char *dmac, unsigned char dsap);
+int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
+			      unsigned char *dmac, unsigned char dsap);
 
-extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
 
-extern void llc_station_init(void);
-extern void llc_station_exit(void);
+void llc_station_init(void);
+void llc_station_exit(void);
 
 #ifdef CONFIG_PROC_FS
-extern int llc_proc_init(void);
-extern void llc_proc_exit(void);
+int llc_proc_init(void);
+void llc_proc_exit(void);
 #else
 #define llc_proc_init()	(0)
 #define llc_proc_exit()	do { } while(0)
 #endif /* CONFIG_PROC_FS */
 #ifdef CONFIG_SYSCTL
-extern int llc_sysctl_init(void);
-extern void llc_sysctl_exit(void);
+int llc_sysctl_init(void);
+void llc_sysctl_exit(void);
 
 extern int sysctl_llc2_ack_timeout;
 extern int sysctl_llc2_busy_timeout;
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
index df83f69..f3be818 100644
--- a/include/net/llc_c_ac.h
+++ b/include/net/llc_c_ac.h
@@ -89,114 +89,92 @@
 
 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
 
-extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_data_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock* sk,
-						   struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock* sk,
-						      struct sk_buff *skb);
-extern int llc_conn_ac_send_disc_cmd_p_set_x(struct sock* sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_p(struct sock* sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_1(struct sock* sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock* sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock* sk,
+int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
+					    struct sk_buff *skb);
+int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
 					       struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock* sk,
-					       struct sk_buff *skb);
-extern int llc_conn_ac_send_i_cmd_p_set_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_xxx_x_set_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock* sk,
-						       struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_rsp_f_set_1(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_cmd_p_set_1(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_rsp_f_set_1(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_xxx_x_set_0(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
 						struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_cmd_p_set_1(struct sock* sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_rsp_f_set_1(struct sock* sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_rsp_f_set_1(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_xxx_x_set_0(struct sock* sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_xxx_x_set_0(struct sock* sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock* sk,
-					      struct sk_buff *skb);
-extern int llc_conn_ac_send_ua_rsp_f_set_p(struct sock* sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_tmr_if_not_running(struct sock* sk,
-						    struct sk_buff *skb);
-extern int llc_conn_ac_stop_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_all_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_other_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_nr_received(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_dec_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_p_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_2(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock* sk,
-							 struct sk_buff *skb);
-extern int llc_conn_ac_set_p_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_retry_cnt_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_retry_cnt_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vr_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_vr_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_nr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_disc(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_reset(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_confirm(struct sock* sk, struct sk_buff *skb);
-extern u8 llc_circular_between(u8 a, u8 b, u8 c);
-extern int llc_conn_ac_send_ack_if_needed(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rnr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_sendack_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_rsp_as_ack(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_as_ack(struct sock* sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
+					     struct sk_buff *skb);
+int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk,
+						  struct sk_buff *skb);
+int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
+u8 llc_circular_between(u8 a, u8 b, u8 c);
+int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
 
-extern void llc_conn_busy_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_ack_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_rej_tmr_cb(unsigned long timeout_data);
+void llc_conn_busy_tmr_cb(unsigned long timeout_data);
+void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
+void llc_conn_ack_tmr_cb(unsigned long timeout_data);
+void llc_conn_rej_tmr_cb(unsigned long timeout_data);
 
-extern void llc_conn_set_p_flag(struct sock *sk, u8 value);
+void llc_conn_set_p_flag(struct sock *sk, u8 value);
 #endif /* LLC_C_AC_H */
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 6ca3113..3948cf1 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -128,138 +128,93 @@
 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
 
-extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_detected(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk,
-					      struct sk_buff *skb);
-extern int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk,
-					      struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
-						    struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
-						  struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
-						  struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk,
+int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
 					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
+					      struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
+					     struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
 					       struct sk_buff *skb);
-extern int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
-						      struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
-						      struct sk_buff *skb);
-extern int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
+					       struct sk_buff *skb);
+int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
 /* NOT_USED functions and their variations */
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
-						  struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
-						  struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
-						  struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
-						  struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
+					      struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
+					      struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
+					      struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
+					      struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
 
 /* Available connection action qualifiers */
-extern int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk,
-					   struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk,
+int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
 					    struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk,
-					     struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conn(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_disc(struct sock *sk,
-					    struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_failed(struct sock *sk,
-					      struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
-						  struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk,
-					      struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk,
-						struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk,
-						struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
 
 static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
 {
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 2f97d8d..0134681 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -95,28 +95,24 @@
 	return skb->cb[sizeof(skb->cb) - 1];
 }
 
-extern struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
-				 struct proto *prot);
-extern void llc_sk_free(struct sock *sk);
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+			  struct proto *prot);
+void llc_sk_free(struct sock *sk);
 
-extern void llc_sk_reset(struct sock *sk);
+void llc_sk_reset(struct sock *sk);
 
 /* Access to a connection */
-extern int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr,
-					 u8 first_p_bit);
-extern void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr,
-					 u8 first_f_bit);
-extern int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr,
-				      u16 *how_many_unacked);
-extern struct sock *llc_lookup_established(struct llc_sap *sap,
-					   struct llc_addr *daddr,
-					   struct llc_addr *laddr);
-extern void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
-extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
+int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
+void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
+int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
+struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
+				    struct llc_addr *laddr);
+void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
+void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
 
-extern u8 llc_data_accept_state(u8 state);
-extern void llc_build_offset_table(void);
+u8 llc_data_accept_state(u8 state);
+void llc_build_offset_table(void);
 #endif /* LLC_CONN_H */
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index f0cb909..8d5c543 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,8 +62,7 @@
 #define LLC_STATUS_CONFLICT	7 /* disconnect conn */
 #define LLC_STATUS_RESET_DONE	8 /*  */
 
-extern int llc_establish_connection(struct sock *sk, u8 *lmac,
-				    u8 *dmac, u8 dsap);
-extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
-extern int llc_send_disc(struct sock *sk);
+int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
+int llc_send_disc(struct sock *sk);
 #endif /* LLC_IF_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 5a93d13..31e2de7 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -410,21 +410,20 @@
 	u8  ind_bits;		/* indicator bits set with macro */
 } __packed;
 
-extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
-extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
-extern void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
-extern void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
-extern void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
-extern void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb,
-				     struct llc_pdu_sn *prev_pdu,
-				     u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
-extern void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
+void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
+void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
+void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
+void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
+			      u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
+void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
 #endif /* LLC_PDU_H */
diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
index 37a3bbd..a61b98c 100644
--- a/include/net/llc_s_ac.h
+++ b/include/net/llc_s_ac.h
@@ -25,15 +25,13 @@
 /* All action functions must look like this */
 typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
 
-extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
-				       struct sk_buff *skb);
-extern int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_report_status(struct llc_sap *sap,
-					struct sk_buff *skb);
-extern int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
 #endif /* LLC_S_AC_H */
diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h
index e3acb93..84db3a5 100644
--- a/include/net/llc_s_ev.h
+++ b/include/net/llc_s_ev.h
@@ -53,15 +53,14 @@
 
 typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
 
-extern int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_deactivation_req(struct llc_sap *sap,
-				       struct sk_buff *skb);
+int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb);
 #endif /* LLC_S_EV_H */
diff --git a/include/net/llc_sap.h b/include/net/llc_sap.h
index ed25bec..1e4df9f 100644
--- a/include/net/llc_sap.h
+++ b/include/net/llc_sap.h
@@ -19,18 +19,14 @@
 struct sk_buff;
 struct sock;
 
-extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb,
-			       unsigned char prim);
-extern struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
-				       u8 type, u32 data_size);
+void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
+void llc_save_primitive(struct sock *sk, struct sk_buff *skb,
+			unsigned char prim);
+struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
+				u8 type, u32 data_size);
 
-extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
-				        struct sk_buff *skb,
-					unsigned char *dmac,
-					unsigned char dsap);
-extern void llc_build_and_send_xid_pkt(struct llc_sap *sap,
-				       struct sk_buff *skb,
-				       unsigned char *dmac,
-				       unsigned char dsap);
+void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb,
+				 unsigned char *dmac, unsigned char dsap);
+void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
+				unsigned char *dmac, unsigned char dsap);
 #endif /* LLC_SAP_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cc6035f..7ceed99 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -829,6 +829,15 @@
  * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
  * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
  * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
+ * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU
+ *	subframes instead of a one huge frame for performance reasons.
+ *	All, but the last MSDU from an A-MSDU should have this flag set. E.g.
+ *	if an A-MSDU has 3 frames, the first 2 must have the flag set, while
+ *	the 3rd (last) one must not have this flag set. The flag is used to
+ *	deal with retransmission/duplication recovery properly since A-MSDU
+ *	subframes share the same sequence number. Reported subframes can be
+ *	either regular MSDU or singly A-MSDUs. Subframes must not be
+ *	interleaved with other frames.
  */
 enum mac80211_rx_flags {
 	RX_FLAG_MMIC_ERROR		= BIT(0),
@@ -859,6 +868,7 @@
 	RX_FLAG_STBC_MASK		= BIT(26) | BIT(27),
 	RX_FLAG_10MHZ			= BIT(28),
 	RX_FLAG_5MHZ			= BIT(29),
+	RX_FLAG_AMSDU_MORE		= BIT(30),
 };
 
 #define RX_FLAG_STBC_SHIFT		26
@@ -1492,6 +1502,15 @@
  *
  * @IEEE80211_HW_TIMING_BEACON_ONLY: Use sync timing from beacon frames
  *	only, to allow getting TBTT of a DTIM beacon.
+ *
+ * @IEEE80211_HW_SUPPORTS_HT_CCK_RATES: Hardware supports mixing HT/CCK rates
+ *	and can cope with CCK rates in an aggregation session (e.g. by not
+ *	using aggregation for such frames.)
+ *
+ * @IEEE80211_HW_CHANCTX_STA_CSA: Support 802.11h based channel-switch (CSA)
+ *	for a single active channel while using channel contexts. When support
+ *	is not enabled the default action is to disconnect when getting the
+ *	CSA frame.
  */
 enum ieee80211_hw_flags {
 	IEEE80211_HW_HAS_RATE_CONTROL			= 1<<0,
@@ -1522,6 +1541,7 @@
 	IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF		= 1<<25,
 	IEEE80211_HW_TIMING_BEACON_ONLY			= 1<<26,
 	IEEE80211_HW_SUPPORTS_HT_CCK_RATES		= 1<<27,
+	IEEE80211_HW_CHANCTX_STA_CSA			= 1<<28,
 };
 
 /**
@@ -2666,6 +2686,10 @@
  *	zero using ieee80211_csa_is_complete() after the beacon has been
  *	transmitted and then call ieee80211_csa_finish().
  *
+ * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
+ *	information in bss_conf is set up and the beacon can be retrieved. A
+ *	channel context is bound before this is called.
+ * @leave_ibss: Leave the IBSS again.
  */
 struct ieee80211_ops {
 	void (*tx)(struct ieee80211_hw *hw,
@@ -2857,6 +2881,9 @@
 	void (*channel_switch_beacon)(struct ieee80211_hw *hw,
 				      struct ieee80211_vif *vif,
 				      struct cfg80211_chan_def *chandef);
+
+	int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+	void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
 };
 
 /**
@@ -3920,6 +3947,25 @@
 						void *data);
 
 /**
+ * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware that are currently active and calls the callback for them.
+ * This version can only be used while holding the RTNL.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw,
+					      u32 iter_flags,
+					      void (*iterator)(void *data,
+						u8 *mac,
+						struct ieee80211_vif *vif),
+					      void *data);
+
+/**
  * ieee80211_queue_work - add work onto the mac80211 workqueue
  *
  * Drivers and mac80211 use this to add work onto the mac80211 workqueue.
@@ -4525,4 +4571,18 @@
 				    struct cfg80211_wowlan_wakeup *wakeup,
 				    gfp_t gfp);
 
+/**
+ * ieee80211_tx_prepare_skb - prepare an 802.11 skb for transmission
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @vif: virtual interface
+ * @skb: frame to be sent from within the driver
+ * @band: the band to transmit on
+ * @sta: optional pointer to get the station to send the frame to
+ *
+ * Note: must be called under RCU lock
+ */
+bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif, struct sk_buff *skb,
+			      int band, struct ieee80211_sta **sta);
+
 #endif /* MAC80211_H */
diff --git a/include/net/mrp.h b/include/net/mrp.h
index 0f7558b..31912c3 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -126,19 +126,17 @@
 	struct rcu_head			rcu;
 };
 
-extern int	mrp_register_application(struct mrp_application *app);
-extern void	mrp_unregister_application(struct mrp_application *app);
+int mrp_register_application(struct mrp_application *app);
+void mrp_unregister_application(struct mrp_application *app);
 
-extern int	mrp_init_applicant(struct net_device *dev,
-				    struct mrp_application *app);
-extern void	mrp_uninit_applicant(struct net_device *dev,
-				      struct mrp_application *app);
+int mrp_init_applicant(struct net_device *dev, struct mrp_application *app);
+void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *app);
 
-extern int	mrp_request_join(const struct net_device *dev,
-				  const struct mrp_application *app,
-				  const void *value, u8 len, u8 type);
-extern void	mrp_request_leave(const struct net_device *dev,
-				   const struct mrp_application *app,
-				   const void *value, u8 len, u8 type);
+int mrp_request_join(const struct net_device *dev,
+		     const struct mrp_application *app,
+		     const void *value, u8 len, u8 type);
+void mrp_request_leave(const struct net_device *dev,
+		       const struct mrp_application *app,
+		       const void *value, u8 len, u8 type);
 
 #endif /* _NET_MRP_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index ea0cc26..6bbda34 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -110,8 +110,8 @@
 
 #define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
 
-extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
-						 struct ndisc_options *ndopts);
+struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
+					  struct ndisc_options *ndopts);
 
 /*
  * Return the padding between the option length and the start of the
@@ -189,60 +189,51 @@
 	return n;
 }
 
-extern int			ndisc_init(void);
-extern int			ndisc_late_init(void);
+int ndisc_init(void);
+int ndisc_late_init(void);
 
-extern void			ndisc_late_cleanup(void);
-extern void			ndisc_cleanup(void);
+void ndisc_late_cleanup(void);
+void ndisc_cleanup(void);
 
-extern int			ndisc_rcv(struct sk_buff *skb);
+int ndisc_rcv(struct sk_buff *skb);
 
-extern void			ndisc_send_ns(struct net_device *dev,
-					      struct neighbour *neigh,
-					      const struct in6_addr *solicit,
-					      const struct in6_addr *daddr,
-					      const struct in6_addr *saddr);
+void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
+		   const struct in6_addr *solicit,
+		   const struct in6_addr *daddr, const struct in6_addr *saddr);
 
-extern void			ndisc_send_rs(struct net_device *dev,
-					      const struct in6_addr *saddr,
-					      const struct in6_addr *daddr);
-extern void			ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
-					      const struct in6_addr *daddr,
-					      const struct in6_addr *solicited_addr,
-					      bool router, bool solicited, bool override,
-					      bool inc_opt);
+void ndisc_send_rs(struct net_device *dev,
+		   const struct in6_addr *saddr, const struct in6_addr *daddr);
+void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+		   const struct in6_addr *daddr,
+		   const struct in6_addr *solicited_addr,
+		   bool router, bool solicited, bool override, bool inc_opt);
 
-extern void			ndisc_send_redirect(struct sk_buff *skb,
-						    const struct in6_addr *target);
+void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);
 
-extern int			ndisc_mc_map(const struct in6_addr *addr, char *buf,
-					     struct net_device *dev, int dir);
+int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
+		 int dir);
 
 
 /*
  *	IGMP
  */
-extern int			igmp6_init(void);
+int igmp6_init(void);
 
-extern void			igmp6_cleanup(void);
+void igmp6_cleanup(void);
 
-extern int			igmp6_event_query(struct sk_buff *skb);
+int igmp6_event_query(struct sk_buff *skb);
 
-extern int			igmp6_event_report(struct sk_buff *skb);
+int igmp6_event_report(struct sk_buff *skb);
 
 
 #ifdef CONFIG_SYSCTL
-extern int 			ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
-							   int write,
-							   void __user *buffer,
-							   size_t *lenp,
-							   loff_t *ppos);
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+			       void __user *buffer, size_t *lenp, loff_t *ppos);
 int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
 				 void __user *oldval, size_t __user *oldlenp,
 				 void __user *newval, size_t newlen);
 #endif
 
-extern void 			inet6_ifinfo_notify(int event,
-						    struct inet6_dev *idev);
+void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
 
 #endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 9d22f08..da68c9a 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -22,6 +22,7 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netns/conntrack.h>
 #endif
+#include <net/netns/nftables.h>
 #include <net/netns/xfrm.h>
 
 struct user_namespace;
@@ -101,6 +102,9 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	struct netns_ct		ct;
 #endif
+#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
+	struct netns_nftables	nft;
+#endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 	struct netns_nf_frag	nf_frag;
 #endif
@@ -137,8 +141,8 @@
 extern struct net init_net;
 
 #ifdef CONFIG_NET_NS
-extern struct net *copy_net_ns(unsigned long flags,
-	struct user_namespace *user_ns, struct net *old_net);
+struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+			struct net *old_net);
 
 #else /* CONFIG_NET_NS */
 #include <linux/sched.h>
@@ -155,11 +159,11 @@
 
 extern struct list_head net_namespace_list;
 
-extern struct net *get_net_ns_by_pid(pid_t pid);
-extern struct net *get_net_ns_by_fd(int pid);
+struct net *get_net_ns_by_pid(pid_t pid);
+struct net *get_net_ns_by_fd(int pid);
 
 #ifdef CONFIG_NET_NS
-extern void __put_net(struct net *net);
+void __put_net(struct net *net);
 
 static inline struct net *get_net(struct net *net)
 {
@@ -191,7 +195,7 @@
 	return net1 == net2;
 }
 
-extern void net_drop_ns(void *);
+void net_drop_ns(void *);
 
 #else
 
@@ -308,19 +312,19 @@
  * device which caused kernel oops, and panics during network
  * namespace cleanup.   So please don't get this wrong.
  */
-extern int register_pernet_subsys(struct pernet_operations *);
-extern void unregister_pernet_subsys(struct pernet_operations *);
-extern int register_pernet_device(struct pernet_operations *);
-extern void unregister_pernet_device(struct pernet_operations *);
+int register_pernet_subsys(struct pernet_operations *);
+void unregister_pernet_subsys(struct pernet_operations *);
+int register_pernet_device(struct pernet_operations *);
+void unregister_pernet_device(struct pernet_operations *);
 
 struct ctl_table;
 struct ctl_table_header;
 
 #ifdef CONFIG_SYSCTL
-extern int net_sysctl_init(void);
-extern struct ctl_table_header *register_net_sysctl(struct net *net,
-	const char *path, struct ctl_table *table);
-extern void unregister_net_sysctl_table(struct ctl_table_header *header);
+int net_sysctl_init(void);
+struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
+					     struct ctl_table *table);
+void unregister_net_sysctl_table(struct ctl_table_header *header);
 #else
 static inline int net_sysctl_init(void) { return 0; }
 static inline struct ctl_table_header *register_net_sysctl(struct net *net,
diff --git a/include/net/netevent.h b/include/net/netevent.h
index fe630dd..d8bbb38 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -26,8 +26,8 @@
 	NETEVENT_REDIRECT,	   /* arg is struct netevent_redirect ptr */
 };
 
-extern int register_netevent_notifier(struct notifier_block *nb);
-extern int unregister_netevent_notifier(struct notifier_block *nb);
-extern int call_netevent_notifiers(unsigned long val, void *v);
+int register_netevent_notifier(struct notifier_block *nb);
+int unregister_netevent_notifier(struct notifier_block *nb);
+int call_netevent_notifiers(unsigned long val, void *v);
 
 #endif
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 7573d52..6c3d12e 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -16,9 +16,9 @@
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
 
-extern int nf_conntrack_ipv4_compat_init(void);
-extern void nf_conntrack_ipv4_compat_fini(void);
+int nf_conntrack_ipv4_compat_init(void);
+void nf_conntrack_ipv4_compat_fini(void);
 
-extern void need_ipv4_conntrack(void);
+void need_ipv4_conntrack(void);
 
 #endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
index 6b00ea3..f01ef20 100644
--- a/include/net/netfilter/ipv4/nf_defrag_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
@@ -1,6 +1,6 @@
 #ifndef _NF_DEFRAG_IPV4_H
 #define _NF_DEFRAG_IPV4_H
 
-extern void nf_defrag_ipv4_enable(void);
+void nf_defrag_ipv4_enable(void);
 
 #endif /* _NF_DEFRAG_IPV4_H */
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index fd79c9a..27666d8 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -1,15 +1,12 @@
 #ifndef _NF_DEFRAG_IPV6_H
 #define _NF_DEFRAG_IPV6_H
 
-extern void nf_defrag_ipv6_enable(void);
+void nf_defrag_ipv6_enable(void);
 
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
-			       struct net_device *in,
-			       struct net_device *out,
-			       int (*okfn)(struct sk_buff *));
+int nf_ct_frag6_init(void);
+void nf_ct_frag6_cleanup(void);
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+void nf_ct_frag6_consume_orig(struct sk_buff *skb);
 
 struct inet_frags_ctl;
 
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 0c1288a..01ea6ee 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -139,15 +139,13 @@
 }
 
 /* Alter reply tuple (maybe alter helper). */
-extern void
-nf_conntrack_alter_reply(struct nf_conn *ct,
-			 const struct nf_conntrack_tuple *newreply);
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+			      const struct nf_conntrack_tuple *newreply);
 
 /* Is this tuple taken? (ignoring any belonging to the given
    conntrack). */
-extern int
-nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
-			 const struct nf_conn *ignored_conntrack);
+int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+			     const struct nf_conn *ignored_conntrack);
 
 /* Return conntrack_info and tuple hash for given skb. */
 static inline struct nf_conn *
@@ -165,37 +163,34 @@
 }
 
 /* Protocol module loading */
-extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
-extern void nf_ct_l3proto_module_put(unsigned short l3proto);
+int nf_ct_l3proto_try_module_get(unsigned short l3proto);
+void nf_ct_l3proto_module_put(unsigned short l3proto);
 
 /*
  * Allocate a hashtable of hlist_head (if nulls == 0),
  * or hlist_nulls_head (if nulls == 1)
  */
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 
-extern void nf_ct_free_hashtable(void *hash, unsigned int size);
+void nf_ct_free_hashtable(void *hash, unsigned int size);
 
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
 __nf_conntrack_find(struct net *net, u16 zone,
 		    const struct nf_conntrack_tuple *tuple);
 
-extern int nf_conntrack_hash_check_insert(struct nf_conn *ct);
+int nf_conntrack_hash_check_insert(struct nf_conn *ct);
 bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 
-extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
+void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
 
-extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
-			      unsigned int nhoff, u_int16_t l3num,
-			      struct nf_conntrack_tuple *tuple);
-extern bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
-				 const struct nf_conntrack_tuple *orig);
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+		       u_int16_t l3num, struct nf_conntrack_tuple *tuple);
+bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+			  const struct nf_conntrack_tuple *orig);
 
-extern void __nf_ct_refresh_acct(struct nf_conn *ct,
-				 enum ip_conntrack_info ctinfo,
-				 const struct sk_buff *skb,
-				 unsigned long extra_jiffies,
-				 int do_acct);
+void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			  const struct sk_buff *skb,
+			  unsigned long extra_jiffies, int do_acct);
 
 /* Refresh conntrack for this many jiffies and do accounting */
 static inline void nf_ct_refresh_acct(struct nf_conn *ct,
@@ -214,10 +209,8 @@
 	__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
 }
 
-extern bool __nf_ct_kill_acct(struct nf_conn *ct,
-			      enum ip_conntrack_info ctinfo,
-			      const struct sk_buff *skb,
-			      int do_acct);
+bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		       const struct sk_buff *skb, int do_acct);
 
 /* kill conntrack and do accounting */
 static inline bool nf_ct_kill_acct(struct nf_conn *ct,
@@ -244,19 +237,17 @@
 {
 	return &__raw_get_cpu_var(nf_conntrack_untracked);
 }
-extern void nf_ct_untracked_status_or(unsigned long bits);
+void nf_ct_untracked_status_or(unsigned long bits);
 
 /* Iterate over all conntracks: if iter returns true, it's deleted. */
-extern void
-nf_ct_iterate_cleanup(struct net *net,
-		      int (*iter)(struct nf_conn *i, void *data),
-		      void *data, u32 portid, int report);
-extern void nf_conntrack_free(struct nf_conn *ct);
-extern struct nf_conn *
-nf_conntrack_alloc(struct net *net, u16 zone,
-		   const struct nf_conntrack_tuple *orig,
-		   const struct nf_conntrack_tuple *repl,
-		   gfp_t gfp);
+void nf_ct_iterate_cleanup(struct net *net,
+			   int (*iter)(struct nf_conn *i, void *data),
+			   void *data, u32 portid, int report);
+void nf_conntrack_free(struct nf_conn *ct);
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+				   const struct nf_conntrack_tuple *orig,
+				   const struct nf_conntrack_tuple *repl,
+				   gfp_t gfp);
 
 static inline int nf_ct_is_template(const struct nf_conn *ct)
 {
@@ -287,7 +278,7 @@
 
 struct kernel_param;
 
-extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
+int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
 extern unsigned int nf_conntrack_htable_size;
 extern unsigned int nf_conntrack_max;
 extern unsigned int nf_conntrack_hash_rnd;
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 2bdb7a1..79d8d16 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -19,17 +19,21 @@
 	atomic64_t bytes;
 };
 
+struct nf_conn_acct {
+	struct nf_conn_counter counter[IP_CT_DIR_MAX];
+};
+
 static inline
-struct nf_conn_counter *nf_conn_acct_find(const struct nf_conn *ct)
+struct nf_conn_acct *nf_conn_acct_find(const struct nf_conn *ct)
 {
 	return nf_ct_ext_find(ct, NF_CT_EXT_ACCT);
 }
 
 static inline
-struct nf_conn_counter *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
+struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
 {
 	struct net *net = nf_ct_net(ct);
-	struct nf_conn_counter *acct;
+	struct nf_conn_acct *acct;
 
 	if (!net->ct.sysctl_acct)
 		return NULL;
@@ -42,8 +46,8 @@
 	return acct;
 };
 
-extern unsigned int
-seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir);
+unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
+			    int dir);
 
 /* Check if connection tracking accounting is enabled */
 static inline bool nf_ct_acct_enabled(struct net *net)
@@ -57,9 +61,9 @@
 	net->ct.sysctl_acct = enable;
 }
 
-extern int nf_conntrack_acct_pernet_init(struct net *net);
-extern void nf_conntrack_acct_pernet_fini(struct net *net);
+int nf_conntrack_acct_pernet_init(struct net *net);
+void nf_conntrack_acct_pernet_fini(struct net *net);
 
-extern int nf_conntrack_acct_init(void);
-extern void nf_conntrack_acct_fini(void);
+int nf_conntrack_acct_init(void);
+void nf_conntrack_acct_fini(void);
 #endif /* _NF_CONNTRACK_ACCT_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index fb2b623..15308b8 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -20,49 +20,42 @@
 /* This header is used to share core functionality between the
    standalone connection tracking module, and the compatibility layer's use
    of connection tracking. */
-extern unsigned int nf_conntrack_in(struct net *net,
-				    u_int8_t pf,
-				    unsigned int hooknum,
-				    struct sk_buff *skb);
+unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
+			     struct sk_buff *skb);
 
-extern int nf_conntrack_init_net(struct net *net);
-extern void nf_conntrack_cleanup_net(struct net *net);
-extern void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
+int nf_conntrack_init_net(struct net *net);
+void nf_conntrack_cleanup_net(struct net *net);
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
 
-extern int nf_conntrack_proto_pernet_init(struct net *net);
-extern void nf_conntrack_proto_pernet_fini(struct net *net);
+int nf_conntrack_proto_pernet_init(struct net *net);
+void nf_conntrack_proto_pernet_fini(struct net *net);
 
-extern int nf_conntrack_proto_init(void);
-extern void nf_conntrack_proto_fini(void);
+int nf_conntrack_proto_init(void);
+void nf_conntrack_proto_fini(void);
 
-extern int nf_conntrack_init_start(void);
-extern void nf_conntrack_cleanup_start(void);
+int nf_conntrack_init_start(void);
+void nf_conntrack_cleanup_start(void);
 
-extern void nf_conntrack_init_end(void);
-extern void nf_conntrack_cleanup_end(void);
+void nf_conntrack_init_end(void);
+void nf_conntrack_cleanup_end(void);
 
-extern bool
-nf_ct_get_tuple(const struct sk_buff *skb,
-		unsigned int nhoff,
-		unsigned int dataoff,
-		u_int16_t l3num,
-		u_int8_t protonum,
-		struct nf_conntrack_tuple *tuple,
-		const struct nf_conntrack_l3proto *l3proto,
-		const struct nf_conntrack_l4proto *l4proto);
+bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
+		     unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
+		     struct nf_conntrack_tuple *tuple,
+		     const struct nf_conntrack_l3proto *l3proto,
+		     const struct nf_conntrack_l4proto *l4proto);
 
-extern bool
-nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
-		   const struct nf_conntrack_tuple *orig,
-		   const struct nf_conntrack_l3proto *l3proto,
-		   const struct nf_conntrack_l4proto *l4proto);
+bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+			const struct nf_conntrack_tuple *orig,
+			const struct nf_conntrack_l3proto *l3proto,
+			const struct nf_conntrack_l4proto *l4proto);
 
 /* Find a connection corresponding to a tuple. */
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
 nf_conntrack_find_get(struct net *net, u16 zone,
 		      const struct nf_conntrack_tuple *tuple);
 
-extern int __nf_conntrack_confirm(struct sk_buff *skb);
+int __nf_conntrack_confirm(struct sk_buff *skb);
 
 /* Confirm a connection: returns NF_DROP if packet must be dropped. */
 static inline int nf_conntrack_confirm(struct sk_buff *skb)
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 092dc65..0e3d08e 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -68,10 +68,12 @@
 	int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+int nf_conntrack_register_notifier(struct net *net,
+				   struct nf_ct_event_notifier *nb);
+void nf_conntrack_unregister_notifier(struct net *net,
+				      struct nf_ct_event_notifier *nb);
 
-extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
+void nf_ct_deliver_cached_events(struct nf_conn *ct);
 
 static inline void
 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -166,8 +168,10 @@
 	int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+int nf_ct_expect_register_notifier(struct net *net,
+				   struct nf_exp_event_notifier *nb);
+void nf_ct_expect_unregister_notifier(struct net *net,
+				      struct nf_exp_event_notifier *nb);
 
 static inline void
 nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -207,11 +211,11 @@
 	nf_ct_expect_event_report(event, exp, 0, 0);
 }
 
-extern int nf_conntrack_ecache_pernet_init(struct net *net);
-extern void nf_conntrack_ecache_pernet_fini(struct net *net);
+int nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_fini(struct net *net);
 
-extern int nf_conntrack_ecache_init(void);
-extern void nf_conntrack_ecache_fini(void);
+int nf_conntrack_ecache_init(void);
+void nf_conntrack_ecache_fini(void);
 #else /* CONFIG_NF_CONNTRACK_EVENTS */
 
 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 88a1d40..956b175 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -36,7 +36,7 @@
 #define NF_CT_EXT_HELPER_TYPE struct nf_conn_help
 #define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
 #define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
-#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
+#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct
 #define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
 #define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
 #define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
@@ -73,7 +73,7 @@
 	((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
 
 /* Destroy all relationships */
-extern void __nf_ct_ext_destroy(struct nf_conn *ct);
+void __nf_ct_ext_destroy(struct nf_conn *ct);
 static inline void nf_ct_ext_destroy(struct nf_conn *ct)
 {
 	if (ct->ext)
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 26c4ae5..6cf614bc 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -52,21 +52,24 @@
 	unsigned int queue_num;		/* For user-space helpers. */
 };
 
-extern struct nf_conntrack_helper *
-__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
+						       u16 l3num, u8 protonum);
 
-extern struct nf_conntrack_helper *
-nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name,
+							       u16 l3num,
+							       u8 protonum);
 
-extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
-extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
+int nf_conntrack_helper_register(struct nf_conntrack_helper *);
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
 
-extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, struct nf_conntrack_helper *helper, gfp_t gfp);
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
+					  struct nf_conntrack_helper *helper,
+					  gfp_t gfp);
 
-extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
-				     gfp_t flags);
+int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
+			      gfp_t flags);
 
-extern void nf_ct_helper_destroy(struct nf_conn *ct);
+void nf_ct_helper_destroy(struct nf_conn *ct);
 
 static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
 {
@@ -82,17 +85,16 @@
 	return (void *)help->data;
 }
 
-extern int nf_conntrack_helper_pernet_init(struct net *net);
-extern void nf_conntrack_helper_pernet_fini(struct net *net);
+int nf_conntrack_helper_pernet_init(struct net *net);
+void nf_conntrack_helper_pernet_fini(struct net *net);
 
-extern int nf_conntrack_helper_init(void);
-extern void nf_conntrack_helper_fini(void);
+int nf_conntrack_helper_init(void);
+void nf_conntrack_helper_fini(void);
 
-extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
-				       unsigned int protoff,
-				       struct nf_conn *ct,
-				       enum ip_conntrack_info ctinfo,
-				       unsigned int timeout);
+int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff,
+				struct nf_conn *ct,
+				enum ip_conntrack_info ctinfo,
+				unsigned int timeout);
 
 struct nf_ct_helper_expectfn {
 	struct list_head head;
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 3bb89ea..3efab70 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -77,17 +77,17 @@
 extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
 
 /* Protocol pernet registration. */
-extern int nf_ct_l3proto_pernet_register(struct net *net,
-					 struct nf_conntrack_l3proto *proto);
-extern void nf_ct_l3proto_pernet_unregister(struct net *net,
-					    struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_pernet_register(struct net *net,
+				  struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_pernet_unregister(struct net *net,
+				     struct nf_conntrack_l3proto *proto);
 
 /* Protocol global registration. */
-extern int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
-extern void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
 
-extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
+struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
+void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
 
 /* Existing built-in protocols */
 extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index b411d7b..4c8d573 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -114,22 +114,22 @@
 
 #define MAX_NF_CT_PROTO 256
 
-extern struct nf_conntrack_l4proto *
-__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto);
+struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
+						  u_int8_t l4proto);
 
-extern struct nf_conntrack_l4proto *
-nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto);
-extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
+struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
+						    u_int8_t l4proto);
+void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
 
 /* Protocol pernet registration. */
-extern int nf_ct_l4proto_pernet_register(struct net *net,
-					 struct nf_conntrack_l4proto *proto);
-extern void nf_ct_l4proto_pernet_unregister(struct net *net,
-					    struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_pernet_register(struct net *net,
+				  struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_pernet_unregister(struct net *net,
+				     struct nf_conntrack_l4proto *proto);
 
 /* Protocol global registration. */
-extern int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
-extern void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
 
 static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
 {
@@ -140,11 +140,11 @@
 }
 
 /* Generic netlink helpers */
-extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
-				      const struct nf_conntrack_tuple *tuple);
-extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
-				      struct nf_conntrack_tuple *t);
-extern int nf_ct_port_nlattr_tuple_size(void);
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+			       const struct nf_conntrack_tuple *tuple);
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+			       struct nf_conntrack_tuple *t);
+int nf_ct_port_nlattr_tuple_size(void);
 extern const struct nla_policy nf_ct_port_nla_policy[];
 
 #ifdef CONFIG_SYSCTL
diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h
index f6177a5..4b33629 100644
--- a/include/net/netfilter/nf_conntrack_seqadj.h
+++ b/include/net/netfilter/nf_conntrack_seqadj.h
@@ -30,22 +30,18 @@
 	return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC);
 }
 
-extern int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-			     s32 off);
-extern int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-			    __be32 seq, s32 off);
-extern void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
-				 struct nf_conn *ct,
-				 enum ip_conntrack_info ctinfo,
-				 s32 off);
+int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		      s32 off);
+int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		     __be32 seq, s32 off);
+void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct,
+			  enum ip_conntrack_info ctinfo, s32 off);
 
-extern int nf_ct_seq_adjust(struct sk_buff *skb,
-			    struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-			    unsigned int protoff);
-extern s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir,
-			    u32 seq);
+int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+		     enum ip_conntrack_info ctinfo, unsigned int protoff);
+s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
 
-extern int nf_conntrack_seqadj_init(void);
-extern void nf_conntrack_seqadj_fini(void);
+int nf_conntrack_seqadj_init(void);
+void nf_conntrack_seqadj_fini(void);
 
 #endif /* _NF_CONNTRACK_SEQADJ_H */
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index f572f31..6793614 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -56,22 +56,20 @@
 
 struct tcphdr;
 struct xt_synproxy_info;
-extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
-				   const struct tcphdr *th,
-				   struct synproxy_options *opts);
-extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
-extern void synproxy_build_options(struct tcphdr *th,
-				   const struct synproxy_options *opts);
+bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+			    const struct tcphdr *th,
+			    struct synproxy_options *opts);
+unsigned int synproxy_options_size(const struct synproxy_options *opts);
+void synproxy_build_options(struct tcphdr *th,
+			    const struct synproxy_options *opts);
 
-extern void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
-					   struct synproxy_options *opts);
-extern void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+				    struct synproxy_options *opts);
+void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
 
-extern unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
-					   unsigned int protoff,
-					   struct tcphdr *th,
-					   struct nf_conn *ct,
-					   enum ip_conntrack_info ctinfo,
-					   const struct nf_conn_synproxy *synproxy);
+unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff,
+				    struct tcphdr *th, struct nf_conn *ct,
+				    enum ip_conntrack_info ctinfo,
+				    const struct nf_conn_synproxy *synproxy);
 
 #endif /* _NF_CONNTRACK_SYNPROXY_H */
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index d23aceb..6230871 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -76,8 +76,8 @@
 }
 
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern int nf_conntrack_timeout_init(void);
-extern void nf_conntrack_timeout_fini(void);
+int nf_conntrack_timeout_init(void);
+void nf_conntrack_timeout_fini(void);
 #else
 static inline int nf_conntrack_timeout_init(void)
 {
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
index b004614..300ae22 100644
--- a/include/net/netfilter/nf_conntrack_timestamp.h
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -48,11 +48,11 @@
 }
 
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
-extern int nf_conntrack_tstamp_pernet_init(struct net *net);
-extern void nf_conntrack_tstamp_pernet_fini(struct net *net);
+int nf_conntrack_tstamp_pernet_init(struct net *net);
+void nf_conntrack_tstamp_pernet_fini(struct net *net);
 
-extern int nf_conntrack_tstamp_init(void);
-extern void nf_conntrack_tstamp_fini(void);
+int nf_conntrack_tstamp_init(void);
+void nf_conntrack_tstamp_fini(void);
 #else
 static inline int nf_conntrack_tstamp_pernet_init(struct net *net)
 {
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 59a1924..07eaaf6 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -41,13 +41,16 @@
 };
 
 /* Set up the info structure to map into this range. */
-extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
-				      const struct nf_nat_range *range,
-				      enum nf_nat_manip_type maniptype);
+unsigned int nf_nat_setup_info(struct nf_conn *ct,
+			       const struct nf_nat_range *range,
+			       enum nf_nat_manip_type maniptype);
+
+extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
+					      unsigned int hooknum);
 
 /* Is this tuple already taken? (not by us)*/
-extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
-			     const struct nf_conn *ignored_conntrack);
+int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+		      const struct nf_conn *ignored_conntrack);
 
 static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
 {
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 972e1e4..fbfd1ba 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -7,12 +7,10 @@
 /* This header used to share core functionality between the standalone
    NAT module, and the compatibility layer's use of NAT for masquerading. */
 
-extern unsigned int nf_nat_packet(struct nf_conn *ct,
-				  enum ip_conntrack_info ctinfo,
-				  unsigned int hooknum,
-				  struct sk_buff *skb);
+unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			   unsigned int hooknum, struct sk_buff *skb);
 
-extern int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
+int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
 
 static inline int nf_nat_initialized(struct nf_conn *ct,
 				     enum nf_nat_manip_type manip)
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index 404324d..01bcc6b 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -7,14 +7,11 @@
 struct sk_buff;
 
 /* These return true or false. */
-extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
-				      struct nf_conn *ct,
-				      enum ip_conntrack_info ctinfo,
-				      unsigned int protoff,
-				      unsigned int match_offset,
-				      unsigned int match_len,
-				      const char *rep_buffer,
-				      unsigned int rep_len, bool adjust);
+int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
+			       enum ip_conntrack_info ctinfo,
+			       unsigned int protoff, unsigned int match_offset,
+			       unsigned int match_len, const char *rep_buffer,
+			       unsigned int rep_len, bool adjust);
 
 static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
 					   struct nf_conn *ct,
@@ -30,18 +27,14 @@
 					  rep_buffer, rep_len, true);
 }
 
-extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
-				    struct nf_conn *ct,
-				    enum ip_conntrack_info ctinfo,
-				    unsigned int protoff,
-				    unsigned int match_offset,
-				    unsigned int match_len,
-				    const char *rep_buffer,
-				    unsigned int rep_len);
+int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
+			     enum ip_conntrack_info ctinfo,
+			     unsigned int protoff, unsigned int match_offset,
+			     unsigned int match_len, const char *rep_buffer,
+			     unsigned int rep_len);
 
 /* Setup NAT on this expected conntrack so it follows master, but goes
  * to port ct->master->saved_proto. */
-extern void nf_nat_follow_master(struct nf_conn *ct,
-				 struct nf_conntrack_expect *this);
+void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
 
 #endif
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index bd3b97e..5a2919b 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -35,18 +35,15 @@
 				   struct nf_nat_range *range);
 };
 
-extern int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
-extern void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
-extern const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
+int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
+void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
+const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
 
-extern int nf_nat_icmp_reply_translation(struct sk_buff *skb,
-					 struct nf_conn *ct,
-					 enum ip_conntrack_info ctinfo,
-					 unsigned int hooknum);
-extern int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
-					   struct nf_conn *ct,
-					   enum ip_conntrack_info ctinfo,
-					   unsigned int hooknum,
-					   unsigned int hdrlen);
+int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+				  enum ip_conntrack_info ctinfo,
+				  unsigned int hooknum);
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+				    enum ip_conntrack_info ctinfo,
+				    unsigned int hooknum, unsigned int hdrlen);
 
 #endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
index 24feb68..12f4cc8 100644
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -42,10 +42,11 @@
 };
 
 /* Protocol registration. */
-extern int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
-extern void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+void nf_nat_l4proto_unregister(u8 l3proto,
+			       const struct nf_nat_l4proto *l4proto);
 
-extern const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
+const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
 
 /* Built-in protocols. */
 extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
@@ -54,19 +55,18 @@
 extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
 extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
 
-extern bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
-				    enum nf_nat_manip_type maniptype,
-				    const union nf_conntrack_man_proto *min,
-				    const union nf_conntrack_man_proto *max);
+bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+			     enum nf_nat_manip_type maniptype,
+			     const union nf_conntrack_man_proto *min,
+			     const union nf_conntrack_man_proto *max);
 
-extern void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
-					struct nf_conntrack_tuple *tuple,
-					const struct nf_nat_range *range,
-					enum nf_nat_manip_type maniptype,
-					const struct nf_conn *ct,
-					u16 *rover);
+void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+				 struct nf_conntrack_tuple *tuple,
+				 const struct nf_nat_range *range,
+				 enum nf_nat_manip_type maniptype,
+				 const struct nf_conn *ct, u16 *rover);
 
-extern int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
-					  struct nf_nat_range *range);
+int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+				   struct nf_nat_range *range);
 
 #endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index aaba4bb..c1d5b3e 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -28,7 +28,7 @@
 
 void nf_register_queue_handler(const struct nf_queue_handler *qh);
 void nf_unregister_queue_handler(void);
-extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
new file mode 100644
index 0000000..5a91abf
--- /dev/null
+++ b/include/net/netfilter/nf_tables.h
@@ -0,0 +1,519 @@
+#ifndef _NET_NF_TABLES_H
+#define _NET_NF_TABLES_H
+
+#include <linux/list.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netlink.h>
+
+#define NFT_JUMP_STACK_SIZE	16
+
+struct nft_pktinfo {
+	struct sk_buff			*skb;
+	const struct net_device		*in;
+	const struct net_device		*out;
+	u8				hooknum;
+	u8				nhoff;
+	u8				thoff;
+	/* for x_tables compatibility */
+	struct xt_action_param		xt;
+};
+
+static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
+				   const struct nf_hook_ops *ops,
+				   struct sk_buff *skb,
+				   const struct net_device *in,
+				   const struct net_device *out)
+{
+	pkt->skb = skb;
+	pkt->in = pkt->xt.in = in;
+	pkt->out = pkt->xt.out = out;
+	pkt->hooknum = pkt->xt.hooknum = ops->hooknum;
+	pkt->xt.family = ops->pf;
+}
+
+struct nft_data {
+	union {
+		u32				data[4];
+		struct {
+			u32			verdict;
+			struct nft_chain	*chain;
+		};
+	};
+} __attribute__((aligned(__alignof__(u64))));
+
+static inline int nft_data_cmp(const struct nft_data *d1,
+			       const struct nft_data *d2,
+			       unsigned int len)
+{
+	return memcmp(d1->data, d2->data, len);
+}
+
+static inline void nft_data_copy(struct nft_data *dst,
+				 const struct nft_data *src)
+{
+	BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
+	*(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
+	*(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+}
+
+static inline void nft_data_debug(const struct nft_data *data)
+{
+	pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
+		 data->data[0], data->data[1],
+		 data->data[2], data->data[3]);
+}
+
+/**
+ *	struct nft_ctx - nf_tables rule/set context
+ *
+ *	@net: net namespace
+ * 	@skb: netlink skb
+ * 	@nlh: netlink message header
+ * 	@afi: address family info
+ * 	@table: the table the chain is contained in
+ * 	@chain: the chain the rule is contained in
+ *	@nla: netlink attributes
+ */
+struct nft_ctx {
+	struct net			*net;
+	const struct sk_buff		*skb;
+	const struct nlmsghdr		*nlh;
+	const struct nft_af_info	*afi;
+	const struct nft_table		*table;
+	const struct nft_chain		*chain;
+	const struct nlattr * const 	*nla;
+};
+
+struct nft_data_desc {
+	enum nft_data_types		type;
+	unsigned int			len;
+};
+
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+		  struct nft_data_desc *desc, const struct nlattr *nla);
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+		  enum nft_data_types type, unsigned int len);
+
+static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
+{
+	return reg == NFT_REG_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
+static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
+{
+	return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+}
+
+int nft_validate_input_register(enum nft_registers reg);
+int nft_validate_output_register(enum nft_registers reg);
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+			   const struct nft_data *data,
+			   enum nft_data_types type);
+
+/**
+ *	struct nft_set_elem - generic representation of set elements
+ *
+ *	@cookie: implementation specific element cookie
+ *	@key: element key
+ *	@data: element data (maps only)
+ *	@flags: element flags (end of interval)
+ *
+ *	The cookie can be used to store a handle to the element for subsequent
+ *	removal.
+ */
+struct nft_set_elem {
+	void			*cookie;
+	struct nft_data		key;
+	struct nft_data		data;
+	u32			flags;
+};
+
+struct nft_set;
+struct nft_set_iter {
+	unsigned int	count;
+	unsigned int	skip;
+	int		err;
+	int		(*fn)(const struct nft_ctx *ctx,
+			      const struct nft_set *set,
+			      const struct nft_set_iter *iter,
+			      const struct nft_set_elem *elem);
+};
+
+/**
+ *	struct nft_set_ops - nf_tables set operations
+ *
+ *	@lookup: look up an element within the set
+ *	@insert: insert new element into set
+ *	@remove: remove element from set
+ *	@walk: iterate over all set elemeennts
+ *	@privsize: function to return size of set private data
+ *	@init: initialize private data of new set instance
+ *	@destroy: destroy private data of set instance
+ *	@list: nf_tables_set_ops list node
+ *	@owner: module reference
+ *	@features: features supported by the implementation
+ */
+struct nft_set_ops {
+	bool				(*lookup)(const struct nft_set *set,
+						  const struct nft_data *key,
+						  struct nft_data *data);
+	int				(*get)(const struct nft_set *set,
+					       struct nft_set_elem *elem);
+	int				(*insert)(const struct nft_set *set,
+						  const struct nft_set_elem *elem);
+	void				(*remove)(const struct nft_set *set,
+						  const struct nft_set_elem *elem);
+	void				(*walk)(const struct nft_ctx *ctx,
+						const struct nft_set *set,
+						struct nft_set_iter *iter);
+
+	unsigned int			(*privsize)(const struct nlattr * const nla[]);
+	int				(*init)(const struct nft_set *set,
+						const struct nlattr * const nla[]);
+	void				(*destroy)(const struct nft_set *set);
+
+	struct list_head		list;
+	struct module			*owner;
+	u32				features;
+};
+
+int nft_register_set(struct nft_set_ops *ops);
+void nft_unregister_set(struct nft_set_ops *ops);
+
+/**
+ * 	struct nft_set - nf_tables set instance
+ *
+ *	@list: table set list node
+ *	@bindings: list of set bindings
+ * 	@name: name of the set
+ * 	@ktype: key type (numeric type defined by userspace, not used in the kernel)
+ * 	@dtype: data type (verdict or numeric type defined by userspace)
+ * 	@ops: set ops
+ * 	@flags: set flags
+ * 	@klen: key length
+ * 	@dlen: data length
+ * 	@data: private set data
+ */
+struct nft_set {
+	struct list_head		list;
+	struct list_head		bindings;
+	char				name[IFNAMSIZ];
+	u32				ktype;
+	u32				dtype;
+	/* runtime data below here */
+	const struct nft_set_ops	*ops ____cacheline_aligned;
+	u16				flags;
+	u8				klen;
+	u8				dlen;
+	unsigned char			data[]
+		__attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_set_priv(const struct nft_set *set)
+{
+	return (void *)set->data;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+				     const struct nlattr *nla);
+
+/**
+ *	struct nft_set_binding - nf_tables set binding
+ *
+ *	@list: set bindings list node
+ *	@chain: chain containing the rule bound to the set
+ *
+ *	A set binding contains all information necessary for validation
+ *	of new elements added to a bound set.
+ */
+struct nft_set_binding {
+	struct list_head		list;
+	const struct nft_chain		*chain;
+};
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+		       struct nft_set_binding *binding);
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+			  struct nft_set_binding *binding);
+
+
+/**
+ *	struct nft_expr_type - nf_tables expression type
+ *
+ *	@select_ops: function to select nft_expr_ops
+ *	@ops: default ops, used when no select_ops functions is present
+ *	@list: used internally
+ *	@name: Identifier
+ *	@owner: module reference
+ *	@policy: netlink attribute policy
+ *	@maxattr: highest netlink attribute number
+ */
+struct nft_expr_type {
+	const struct nft_expr_ops	*(*select_ops)(const struct nft_ctx *,
+						       const struct nlattr * const tb[]);
+	const struct nft_expr_ops	*ops;
+	struct list_head		list;
+	const char			*name;
+	struct module			*owner;
+	const struct nla_policy		*policy;
+	unsigned int			maxattr;
+};
+
+/**
+ *	struct nft_expr_ops - nf_tables expression operations
+ *
+ *	@eval: Expression evaluation function
+ *	@size: full expression size, including private data size
+ *	@init: initialization function
+ *	@destroy: destruction function
+ *	@dump: function to dump parameters
+ *	@type: expression type
+ *	@validate: validate expression, called during loop detection
+ *	@data: extra data to attach to this expression operation
+ */
+struct nft_expr;
+struct nft_expr_ops {
+	void				(*eval)(const struct nft_expr *expr,
+						struct nft_data data[NFT_REG_MAX + 1],
+						const struct nft_pktinfo *pkt);
+	unsigned int			size;
+
+	int				(*init)(const struct nft_ctx *ctx,
+						const struct nft_expr *expr,
+						const struct nlattr * const tb[]);
+	void				(*destroy)(const struct nft_expr *expr);
+	int				(*dump)(struct sk_buff *skb,
+						const struct nft_expr *expr);
+	int				(*validate)(const struct nft_ctx *ctx,
+						    const struct nft_expr *expr,
+						    const struct nft_data **data);
+	const struct nft_expr_type	*type;
+	void				*data;
+};
+
+#define NFT_EXPR_MAXATTR		16
+#define NFT_EXPR_SIZE(size)		(sizeof(struct nft_expr) + \
+					 ALIGN(size, __alignof__(struct nft_expr)))
+
+/**
+ *	struct nft_expr - nf_tables expression
+ *
+ *	@ops: expression ops
+ *	@data: expression private data
+ */
+struct nft_expr {
+	const struct nft_expr_ops	*ops;
+	unsigned char			data[];
+};
+
+static inline void *nft_expr_priv(const struct nft_expr *expr)
+{
+	return (void *)expr->data;
+}
+
+/**
+ *	struct nft_rule - nf_tables rule
+ *
+ *	@list: used internally
+ *	@rcu_head: used internally for rcu
+ *	@handle: rule handle
+ *	@genmask: generation mask
+ *	@dlen: length of expression data
+ *	@data: expression data
+ */
+struct nft_rule {
+	struct list_head		list;
+	struct rcu_head			rcu_head;
+	u64				handle:46,
+					genmask:2,
+					dlen:16;
+	unsigned char			data[]
+		__attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+/**
+ *	struct nft_rule_trans - nf_tables rule update in transaction
+ *
+ *	@list: used internally
+ *	@rule: rule that needs to be updated
+ *	@chain: chain that this rule belongs to
+ *	@table: table for which this chain applies
+ *	@nlh: netlink header of the message that contain this update
+ *	@family: family expressesed as AF_*
+ */
+struct nft_rule_trans {
+	struct list_head		list;
+	struct nft_rule			*rule;
+	const struct nft_chain		*chain;
+	const struct nft_table		*table;
+	const struct nlmsghdr		*nlh;
+	u8				family;
+};
+
+static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
+{
+	return (struct nft_expr *)&rule->data[0];
+}
+
+static inline struct nft_expr *nft_expr_next(const struct nft_expr *expr)
+{
+	return ((void *)expr) + expr->ops->size;
+}
+
+static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
+{
+	return (struct nft_expr *)&rule->data[rule->dlen];
+}
+
+/*
+ * The last pointer isn't really necessary, but the compiler isn't able to
+ * determine that the result of nft_expr_last() is always the same since it
+ * can't assume that the dlen value wasn't changed within calls in the loop.
+ */
+#define nft_rule_for_each_expr(expr, last, rule) \
+	for ((expr) = nft_expr_first(rule), (last) = nft_expr_last(rule); \
+	     (expr) != (last); \
+	     (expr) = nft_expr_next(expr))
+
+enum nft_chain_flags {
+	NFT_BASE_CHAIN			= 0x1,
+};
+
+/**
+ *	struct nft_chain - nf_tables chain
+ *
+ *	@rules: list of rules in the chain
+ *	@list: used internally
+ *	@rcu_head: used internally
+ *	@net: net namespace that this chain belongs to
+ *	@table: table that this chain belongs to
+ *	@handle: chain handle
+ *	@flags: bitmask of enum nft_chain_flags
+ *	@use: number of jump references to this chain
+ *	@level: length of longest path to this chain
+ *	@name: name of the chain
+ */
+struct nft_chain {
+	struct list_head		rules;
+	struct list_head		list;
+	struct rcu_head			rcu_head;
+	struct net			*net;
+	struct nft_table		*table;
+	u64				handle;
+	u8				flags;
+	u16				use;
+	u16				level;
+	char				name[NFT_CHAIN_MAXNAMELEN];
+};
+
+enum nft_chain_type {
+	NFT_CHAIN_T_DEFAULT = 0,
+	NFT_CHAIN_T_ROUTE,
+	NFT_CHAIN_T_NAT,
+	NFT_CHAIN_T_MAX
+};
+
+struct nft_stats {
+	u64 bytes;
+	u64 pkts;
+};
+
+/**
+ *	struct nft_base_chain - nf_tables base chain
+ *
+ *	@ops: netfilter hook ops
+ *	@type: chain type
+ *	@policy: default policy
+ *	@stats: per-cpu chain stats
+ *	@chain: the chain
+ */
+struct nft_base_chain {
+	struct nf_hook_ops		ops;
+	enum nft_chain_type		type;
+	u8				policy;
+	struct nft_stats __percpu	*stats;
+	struct nft_chain		chain;
+};
+
+static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
+{
+	return container_of(chain, struct nft_base_chain, chain);
+}
+
+unsigned int nft_do_chain_pktinfo(struct nft_pktinfo *pkt,
+				  const struct nf_hook_ops *ops);
+
+/**
+ *	struct nft_table - nf_tables table
+ *
+ *	@list: used internally
+ *	@chains: chains in the table
+ *	@sets: sets in the table
+ *	@hgenerator: handle generator state
+ *	@use: number of chain references to this table
+ *	@flags: table flag (see enum nft_table_flags)
+ *	@name: name of the table
+ */
+struct nft_table {
+	struct list_head		list;
+	struct list_head		chains;
+	struct list_head		sets;
+	u64				hgenerator;
+	u32				use;
+	u16				flags;
+	char				name[];
+};
+
+/**
+ *	struct nft_af_info - nf_tables address family info
+ *
+ *	@list: used internally
+ *	@family: address family
+ *	@nhooks: number of hooks in this family
+ *	@owner: module owner
+ *	@tables: used internally
+ *	@hooks: hookfn overrides for packet validation
+ */
+struct nft_af_info {
+	struct list_head		list;
+	int				family;
+	unsigned int			nhooks;
+	struct module			*owner;
+	struct list_head		tables;
+	nf_hookfn			*hooks[NF_MAX_HOOKS];
+};
+
+int nft_register_afinfo(struct net *, struct nft_af_info *);
+void nft_unregister_afinfo(struct nft_af_info *);
+
+struct nf_chain_type {
+	unsigned int		hook_mask;
+	const char		*name;
+	enum nft_chain_type	type;
+	nf_hookfn		*fn[NF_MAX_HOOKS];
+	struct module		*me;
+	int			family;
+};
+
+int nft_register_chain_type(struct nf_chain_type *);
+void nft_unregister_chain_type(struct nf_chain_type *);
+
+int nft_register_expr(struct nft_expr_type *);
+void nft_unregister_expr(struct nft_expr_type *);
+
+#define MODULE_ALIAS_NFT_FAMILY(family)	\
+	MODULE_ALIAS("nft-afinfo-" __stringify(family))
+
+#define MODULE_ALIAS_NFT_CHAIN(family, name) \
+	MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
+
+#define MODULE_ALIAS_NFT_EXPR(name) \
+	MODULE_ALIAS("nft-expr-" name)
+
+#define MODULE_ALIAS_NFT_SET() \
+	MODULE_ALIAS("nft-set")
+
+#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
new file mode 100644
index 0000000..cf2b7ae
--- /dev/null
+++ b/include/net/netfilter/nf_tables_core.h
@@ -0,0 +1,42 @@
+#ifndef _NET_NF_TABLES_CORE_H
+#define _NET_NF_TABLES_CORE_H
+
+int nf_tables_core_module_init(void);
+void nf_tables_core_module_exit(void);
+
+int nft_immediate_module_init(void);
+void nft_immediate_module_exit(void);
+
+struct nft_cmp_fast_expr {
+	u32			data;
+	enum nft_registers	sreg:8;
+	u8			len;
+};
+
+extern const struct nft_expr_ops nft_cmp_fast_ops;
+
+int nft_cmp_module_init(void);
+void nft_cmp_module_exit(void);
+
+int nft_lookup_module_init(void);
+void nft_lookup_module_exit(void);
+
+int nft_bitwise_module_init(void);
+void nft_bitwise_module_exit(void);
+
+int nft_byteorder_module_init(void);
+void nft_byteorder_module_exit(void);
+
+struct nft_payload {
+	enum nft_payload_bases	base:8;
+	u8			offset;
+	u8			len;
+	enum nft_registers	dreg:8;
+};
+
+extern const struct nft_expr_ops nft_payload_fast_ops;
+
+int nft_payload_module_init(void);
+void nft_payload_module_exit(void);
+
+#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
new file mode 100644
index 0000000..1be1c2c
--- /dev/null
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -0,0 +1,23 @@
+#ifndef _NF_TABLES_IPV4_H_
+#define _NF_TABLES_IPV4_H_
+
+#include <net/netfilter/nf_tables.h>
+#include <net/ip.h>
+
+static inline void
+nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+		     const struct nf_hook_ops *ops,
+		     struct sk_buff *skb,
+		     const struct net_device *in,
+		     const struct net_device *out)
+{
+	struct iphdr *ip;
+
+	nft_set_pktinfo(pkt, ops, skb, in, out);
+
+	pkt->xt.thoff = ip_hdrlen(pkt->skb);
+	ip = ip_hdr(pkt->skb);
+	pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+}
+
+#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
new file mode 100644
index 0000000..4a9b88a
--- /dev/null
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -0,0 +1,30 @@
+#ifndef _NF_TABLES_IPV6_H_
+#define _NF_TABLES_IPV6_H_
+
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ipv6.h>
+
+static inline int
+nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+		     const struct nf_hook_ops *ops,
+		     struct sk_buff *skb,
+		     const struct net_device *in,
+		     const struct net_device *out)
+{
+	int protohdr, thoff = 0;
+	unsigned short frag_off;
+
+	nft_set_pktinfo(pkt, ops, skb, in, out);
+
+	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+	/* If malformed, drop it */
+	if (protohdr < 0)
+		return -1;
+
+	pkt->xt.thoff = thoff;
+	pkt->xt.fragoff = frag_off;
+
+	return 0;
+}
+
+#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 495c71f..79f45e1 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -16,7 +16,7 @@
 	struct rcu_head			rcu;
 };
 
-extern struct xt_rateest *xt_rateest_lookup(const char *name);
-extern void xt_rateest_put(struct xt_rateest *est);
+struct xt_rateest *xt_rateest_lookup(const char *name);
+void xt_rateest_put(struct xt_rateest *est);
 
 #endif /* _XT_RATEEST_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 9690b0f..2b47eaa 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -225,44 +225,31 @@
 	u32			portid;
 };
 
-extern int		netlink_rcv_skb(struct sk_buff *skb,
-					int (*cb)(struct sk_buff *,
-						  struct nlmsghdr *));
-extern int		nlmsg_notify(struct sock *sk, struct sk_buff *skb,
-				     u32 portid, unsigned int group, int report,
-				     gfp_t flags);
+int netlink_rcv_skb(struct sk_buff *skb,
+		    int (*cb)(struct sk_buff *, struct nlmsghdr *));
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
+		 unsigned int group, int report, gfp_t flags);
 
-extern int		nla_validate(const struct nlattr *head,
-				     int len, int maxtype,
-				     const struct nla_policy *policy);
-extern int		nla_parse(struct nlattr **tb, int maxtype,
-				  const struct nlattr *head, int len,
-				  const struct nla_policy *policy);
-extern int		nla_policy_len(const struct nla_policy *, int);
-extern struct nlattr *	nla_find(const struct nlattr *head,
-				 int len, int attrtype);
-extern size_t		nla_strlcpy(char *dst, const struct nlattr *nla,
-				    size_t dstsize);
-extern int		nla_memcpy(void *dest, const struct nlattr *src, int count);
-extern int		nla_memcmp(const struct nlattr *nla, const void *data,
-				   size_t size);
-extern int		nla_strcmp(const struct nlattr *nla, const char *str);
-extern struct nlattr *	__nla_reserve(struct sk_buff *skb, int attrtype,
-				      int attrlen);
-extern void *		__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern struct nlattr *	nla_reserve(struct sk_buff *skb, int attrtype,
-				    int attrlen);
-extern void *		nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern void		__nla_put(struct sk_buff *skb, int attrtype,
-				  int attrlen, const void *data);
-extern void		__nla_put_nohdr(struct sk_buff *skb, int attrlen,
-					const void *data);
-extern int		nla_put(struct sk_buff *skb, int attrtype,
-				int attrlen, const void *data);
-extern int		nla_put_nohdr(struct sk_buff *skb, int attrlen,
-				      const void *data);
-extern int		nla_append(struct sk_buff *skb, int attrlen,
-				   const void *data);
+int nla_validate(const struct nlattr *head, int len, int maxtype,
+		 const struct nla_policy *policy);
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+	      int len, const struct nla_policy *policy);
+int nla_policy_len(const struct nla_policy *, int);
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
+int nla_memcpy(void *dest, const struct nlattr *src, int count);
+int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
+int nla_strcmp(const struct nlattr *nla, const char *str);
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+	       const void *data);
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_append(struct sk_buff *skb, int attrlen, const void *data);
 
 /**************************************************************************
  * Netlink Messages
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index bf2ec22..ee520cb 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -15,6 +15,10 @@
 struct hlist_head;
 struct fib_table;
 struct sock;
+struct local_ports {
+	seqlock_t	lock;
+	int		range[2];
+};
 
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
@@ -62,10 +66,11 @@
 	int sysctl_icmp_ratemask;
 	int sysctl_icmp_errors_use_inbound_ifaddr;
 
+	struct local_ports sysctl_local_ports;
+
 	int sysctl_tcp_ecn;
 
 	kgid_t sysctl_ping_group_range[2];
-	long sysctl_tcp_mem[3];
 
 	atomic_t dev_addr_genid;
 
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
new file mode 100644
index 0000000..15d056d
--- /dev/null
+++ b/include/net/netns/nftables.h
@@ -0,0 +1,19 @@
+#ifndef _NETNS_NFTABLES_H_
+#define _NETNS_NFTABLES_H_
+
+#include <linux/list.h>
+
+struct nft_af_info;
+
+struct netns_nftables {
+	struct list_head	af_info;
+	struct list_head	commit_list;
+	struct nft_af_info	*ipv4;
+	struct nft_af_info	*ipv6;
+	struct nft_af_info	*arp;
+	struct nft_af_info	*bridge;
+	u8			gencursor;
+	u8			genctr;
+};
+
+#endif
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 121dcf8..110350a 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -183,51 +183,50 @@
 extern int  sysctl_netrom_link_fails_count;
 extern int  sysctl_netrom_reset_circuit;
 
-extern int  nr_rx_frame(struct sk_buff *, struct net_device *);
-extern void nr_destroy_socket(struct sock *);
+int nr_rx_frame(struct sk_buff *, struct net_device *);
+void nr_destroy_socket(struct sock *);
 
 /* nr_dev.c */
-extern int  nr_rx_ip(struct sk_buff *, struct net_device *);
-extern void nr_setup(struct net_device *);
+int nr_rx_ip(struct sk_buff *, struct net_device *);
+void nr_setup(struct net_device *);
 
 /* nr_in.c */
-extern int  nr_process_rx_frame(struct sock *, struct sk_buff *);
+int nr_process_rx_frame(struct sock *, struct sk_buff *);
 
 /* nr_loopback.c */
-extern void nr_loopback_init(void);
-extern void nr_loopback_clear(void);
-extern int  nr_loopback_queue(struct sk_buff *);
+void nr_loopback_init(void);
+void nr_loopback_clear(void);
+int nr_loopback_queue(struct sk_buff *);
 
 /* nr_out.c */
-extern void nr_output(struct sock *, struct sk_buff *);
-extern void nr_send_nak_frame(struct sock *);
-extern void nr_kick(struct sock *);
-extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
-extern void nr_establish_data_link(struct sock *);
-extern void nr_enquiry_response(struct sock *);
-extern void nr_check_iframes_acked(struct sock *, unsigned short);
+void nr_output(struct sock *, struct sk_buff *);
+void nr_send_nak_frame(struct sock *);
+void nr_kick(struct sock *);
+void nr_transmit_buffer(struct sock *, struct sk_buff *);
+void nr_establish_data_link(struct sock *);
+void nr_enquiry_response(struct sock *);
+void nr_check_iframes_acked(struct sock *, unsigned short);
 
 /* nr_route.c */
-extern void nr_rt_device_down(struct net_device *);
-extern struct net_device *nr_dev_first(void);
-extern struct net_device *nr_dev_get(ax25_address *);
-extern int  nr_rt_ioctl(unsigned int, void __user *);
-extern void nr_link_failed(ax25_cb *, int);
-extern int  nr_route_frame(struct sk_buff *, ax25_cb *);
+void nr_rt_device_down(struct net_device *);
+struct net_device *nr_dev_first(void);
+struct net_device *nr_dev_get(ax25_address *);
+int nr_rt_ioctl(unsigned int, void __user *);
+void nr_link_failed(ax25_cb *, int);
+int nr_route_frame(struct sk_buff *, ax25_cb *);
 extern const struct file_operations nr_nodes_fops;
 extern const struct file_operations nr_neigh_fops;
-extern void nr_rt_free(void);
+void nr_rt_free(void);
 
 /* nr_subr.c */
-extern void nr_clear_queues(struct sock *);
-extern void nr_frames_acked(struct sock *, unsigned short);
-extern void nr_requeue_frames(struct sock *);
-extern int  nr_validate_nr(struct sock *, unsigned short);
-extern int  nr_in_rx_window(struct sock *, unsigned short);
-extern void nr_write_internal(struct sock *, int);
+void nr_clear_queues(struct sock *);
+void nr_frames_acked(struct sock *, unsigned short);
+void nr_requeue_frames(struct sock *);
+int nr_validate_nr(struct sock *, unsigned short);
+int nr_in_rx_window(struct sock *, unsigned short);
+void nr_write_internal(struct sock *, int);
 
-extern void __nr_transmit_reply(struct sk_buff *skb, int mine,
-	unsigned char cmdflags);
+void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags);
 
 /*
  * This routine is called when a Connect Acknowledge with the Choke Flag
@@ -247,24 +246,24 @@
 	__nr_transmit_reply((skb), (mine), NR_RESET);			\
 } while (0)
 
-extern void nr_disconnect(struct sock *, int);
+void nr_disconnect(struct sock *, int);
 
 /* nr_timer.c */
-extern void nr_init_timers(struct sock *sk);
-extern void nr_start_heartbeat(struct sock *);
-extern void nr_start_t1timer(struct sock *);
-extern void nr_start_t2timer(struct sock *);
-extern void nr_start_t4timer(struct sock *);
-extern void nr_start_idletimer(struct sock *);
-extern void nr_stop_heartbeat(struct sock *);
-extern void nr_stop_t1timer(struct sock *);
-extern void nr_stop_t2timer(struct sock *);
-extern void nr_stop_t4timer(struct sock *);
-extern void nr_stop_idletimer(struct sock *);
-extern int  nr_t1timer_running(struct sock *);
+void nr_init_timers(struct sock *sk);
+void nr_start_heartbeat(struct sock *);
+void nr_start_t1timer(struct sock *);
+void nr_start_t2timer(struct sock *);
+void nr_start_t4timer(struct sock *);
+void nr_start_idletimer(struct sock *);
+void nr_stop_heartbeat(struct sock *);
+void nr_stop_t1timer(struct sock *);
+void nr_stop_t2timer(struct sock *);
+void nr_stop_t4timer(struct sock *);
+void nr_stop_idletimer(struct sock *);
+int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-extern void nr_register_sysctl(void);
-extern void nr_unregister_sysctl(void);
+void nr_register_sysctl(void);
+void nr_unregister_sysctl(void);
 
 #endif
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
new file mode 100644
index 0000000..36acecd
--- /dev/null
+++ b/include/net/nfc/digital.h
@@ -0,0 +1,227 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __NFC_DIGITAL_H
+#define __NFC_DIGITAL_H
+
+#include <linux/skbuff.h>
+#include <net/nfc/nfc.h>
+
+/**
+ * Configuration types for in_configure_hw and tg_configure_hw.
+ */
+enum {
+	NFC_DIGITAL_CONFIG_RF_TECH = 0,
+	NFC_DIGITAL_CONFIG_FRAMING,
+};
+
+/**
+ * RF technology values passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_RF_TECH configuration type.
+ */
+enum {
+	NFC_DIGITAL_RF_TECH_106A = 0,
+	NFC_DIGITAL_RF_TECH_212F,
+	NFC_DIGITAL_RF_TECH_424F,
+
+	NFC_DIGITAL_RF_TECH_LAST,
+};
+
+/**
+ * Framing configuration passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_FRAMING configuration type.
+ */
+enum {
+	NFC_DIGITAL_FRAMING_NFCA_SHORT = 0,
+	NFC_DIGITAL_FRAMING_NFCA_STANDARD,
+	NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A,
+
+	NFC_DIGITAL_FRAMING_NFCA_T1T,
+	NFC_DIGITAL_FRAMING_NFCA_T2T,
+	NFC_DIGITAL_FRAMING_NFCA_NFC_DEP,
+
+	NFC_DIGITAL_FRAMING_NFCF,
+	NFC_DIGITAL_FRAMING_NFCF_T3T,
+	NFC_DIGITAL_FRAMING_NFCF_NFC_DEP,
+	NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED,
+
+	NFC_DIGITAL_FRAMING_LAST,
+};
+
+#define DIGITAL_MDAA_NFCID1_SIZE 3
+
+struct digital_tg_mdaa_params {
+	u16 sens_res;
+	u8 nfcid1[DIGITAL_MDAA_NFCID1_SIZE];
+	u8 sel_res;
+
+	u8 nfcid2[NFC_NFCID2_MAXSIZE];
+	u16 sc;
+};
+
+struct nfc_digital_dev;
+
+/**
+ * nfc_digital_cmd_complete_t - Definition of command result callback
+ *
+ * @ddev: nfc_digital_device ref
+ * @arg: user data
+ * @resp: response data
+ *
+ * resp pointer can be an error code and will be checked with IS_ERR() macro.
+ * The callback is responsible for freeing resp sk_buff.
+ */
+typedef void (*nfc_digital_cmd_complete_t)(struct nfc_digital_dev *ddev,
+					   void *arg, struct sk_buff *resp);
+
+/**
+ * Device side NFC Digital operations
+ *
+ * Initiator mode:
+ * @in_configure_hw: Hardware configuration for RF technology and communication
+ *	framing in initiator mode. This is a synchronous function.
+ * @in_send_cmd: Initiator mode data exchange using RF technology and framing
+ *	previously set with in_configure_hw. The peer response is returned
+ *	through callback cb. If an io error occurs or the peer didn't reply
+ *	within the specified timeout (ms), the error code is passed back through
+ *	the resp pointer. This is an asynchronous function.
+ *
+ * Target mode: Only NFC-DEP protocol is supported in target mode.
+ * @tg_configure_hw: Hardware configuration for RF technology and communication
+ *	framing in target mode. This is a synchronous function.
+ * @tg_send_cmd: Target mode data exchange using RF technology and framing
+ *	previously set with tg_configure_hw. The peer next command is returned
+ *	through callback cb. If an io error occurs or the peer didn't reply
+ *	within the specified timeout (ms), the error code is passed back through
+ *	the resp pointer. This is an asynchronous function.
+ * @tg_listen: Put the device in listen mode waiting for data from the peer
+ *	device. This is an asynchronous function.
+ * @tg_listen_mdaa: If supported, put the device in automatic listen mode with
+ *	mode detection and automatic anti-collision. In this mode, the device
+ *	automatically detects the RF technology and executes the anti-collision
+ *	detection using the command responses specified in mdaa_params. The
+ *	mdaa_params structure contains SENS_RES, NFCID1, and SEL_RES for 106A RF
+ *	tech. NFCID2 and system code (sc) for 212F and 424F. The driver returns
+ *	the NFC-DEP ATR_REQ command through cb. The digital stack deducts the RF
+ *	tech by analyzing the SoD of the frame containing the ATR_REQ command.
+ *	This is an asynchronous function.
+ *
+ * @switch_rf: Turns device radio on or off. The stack does not call explicitly
+ *	switch_rf to turn the radio on. A call to in|tg_configure_hw must turn
+ *	the device radio on.
+ * @abort_cmd: Discard the last sent command.
+ */
+struct nfc_digital_ops {
+	int (*in_configure_hw)(struct nfc_digital_dev *ddev, int type,
+			       int param);
+	int (*in_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+			   u16 timeout, nfc_digital_cmd_complete_t cb,
+			   void *arg);
+
+	int (*tg_configure_hw)(struct nfc_digital_dev *ddev, int type,
+			       int param);
+	int (*tg_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+			   u16 timeout, nfc_digital_cmd_complete_t cb,
+			   void *arg);
+	int (*tg_listen)(struct nfc_digital_dev *ddev, u16 timeout,
+			 nfc_digital_cmd_complete_t cb, void *arg);
+	int (*tg_listen_mdaa)(struct nfc_digital_dev *ddev,
+			      struct digital_tg_mdaa_params *mdaa_params,
+			      u16 timeout, nfc_digital_cmd_complete_t cb,
+			      void *arg);
+
+	int (*switch_rf)(struct nfc_digital_dev *ddev, bool on);
+	void (*abort_cmd)(struct nfc_digital_dev *ddev);
+};
+
+#define NFC_DIGITAL_POLL_MODE_COUNT_MAX	6 /* 106A, 212F, and 424F in & tg */
+
+typedef int (*digital_poll_t)(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+struct digital_poll_tech {
+	u8 rf_tech;
+	digital_poll_t poll_func;
+};
+
+/**
+ * Driver capabilities - bit mask made of the following values
+ *
+ * @NFC_DIGITAL_DRV_CAPS_IN_CRC: The driver handles CRC calculation in initiator
+ *	mode.
+ * @NFC_DIGITAL_DRV_CAPS_TG_CRC: The driver handles CRC calculation in target
+ *	mode.
+ */
+#define NFC_DIGITAL_DRV_CAPS_IN_CRC	0x0001
+#define NFC_DIGITAL_DRV_CAPS_TG_CRC	0x0002
+
+struct nfc_digital_dev {
+	struct nfc_dev *nfc_dev;
+	struct nfc_digital_ops *ops;
+
+	u32 protocols;
+
+	int tx_headroom;
+	int tx_tailroom;
+
+	u32 driver_capabilities;
+	void *driver_data;
+
+	struct digital_poll_tech poll_techs[NFC_DIGITAL_POLL_MODE_COUNT_MAX];
+	u8 poll_tech_count;
+	u8 poll_tech_index;
+	struct mutex poll_lock;
+
+	struct work_struct cmd_work;
+	struct work_struct cmd_complete_work;
+	struct list_head cmd_queue;
+	struct mutex cmd_lock;
+
+	struct work_struct poll_work;
+
+	u8 curr_protocol;
+	u8 curr_rf_tech;
+	u8 curr_nfc_dep_pni;
+
+	int (*skb_check_crc)(struct sk_buff *skb);
+	void (*skb_add_crc)(struct sk_buff *skb);
+};
+
+struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+						    __u32 supported_protocols,
+						    __u32 driver_capabilities,
+						    int tx_headroom,
+						    int tx_tailroom);
+void nfc_digital_free_device(struct nfc_digital_dev *ndev);
+int nfc_digital_register_device(struct nfc_digital_dev *ndev);
+void nfc_digital_unregister_device(struct nfc_digital_dev *ndev);
+
+static inline void nfc_digital_set_parent_dev(struct nfc_digital_dev *ndev,
+					      struct device *dev)
+{
+	nfc_set_parent_dev(ndev->nfc_dev, dev);
+}
+
+static inline void nfc_digital_set_drvdata(struct nfc_digital_dev *dev,
+					   void *data)
+{
+	dev->driver_data = data;
+}
+
+static inline void *nfc_digital_get_drvdata(struct nfc_digital_dev *dev)
+{
+	return dev->driver_data;
+}
+
+#endif /* __NFC_DIGITAL_H */
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index b64b7bc..2eca296 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -24,12 +24,6 @@
 
 #include <net/nfc/nfc.h>
 
-struct nfc_phy_ops {
-	int (*write)(void *dev_id, struct sk_buff *skb);
-	int (*enable)(void *dev_id);
-	void (*disable)(void *dev_id);
-};
-
 struct nfc_hci_dev;
 
 struct nfc_hci_ops {
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 88785e5..e5aa5ac 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -166,6 +166,10 @@
 #define NCI_GID_NFCEE_MGMT					0x2
 #define NCI_GID_PROPRIETARY					0xf
 
+/* ----- NCI over SPI head/crc(tail) room needed for outgoing frames ----- */
+#define NCI_SPI_HDR_LEN						4
+#define NCI_SPI_CRC_LEN						2
+
 /* ---- NCI Packet structures ---- */
 #define NCI_CTRL_HDR_SIZE					3
 #define NCI_DATA_HDR_SIZE					3
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 99fc1f3..6126f1f 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -207,19 +207,9 @@
 #define NCI_SPI_CRC_ENABLED	0x01
 
 /* ----- NCI SPI structures ----- */
-struct nci_spi_dev;
-
-struct nci_spi_ops {
-	int (*open)(struct nci_spi_dev *ndev);
-	int (*close)(struct nci_spi_dev *ndev);
-	void (*assert_int)(struct nci_spi_dev *ndev);
-	void (*deassert_int)(struct nci_spi_dev *ndev);
-};
-
-struct nci_spi_dev {
-	struct nci_dev		*nci_dev;
+struct nci_spi {
+	struct nci_dev		*ndev;
 	struct spi_device	*spi;
-	struct nci_spi_ops	*ops;
 
 	unsigned int		xfer_udelay;	/* microseconds delay between
 						  transactions */
@@ -227,31 +217,15 @@
 
 	struct completion	req_completion;
 	u8			req_result;
-
-	void			*driver_data;
 };
 
-/* ----- NCI SPI Devices ----- */
-struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi,
-						struct nci_spi_ops *ops,
-						u32 supported_protocols,
-						u32 supported_se,
-						u8 acknowledge_mode,
-						unsigned int delay);
-void nci_spi_free_device(struct nci_spi_dev *ndev);
-int nci_spi_register_device(struct nci_spi_dev *ndev);
-void nci_spi_unregister_device(struct nci_spi_dev *ndev);
-int nci_spi_recv_frame(struct nci_spi_dev *ndev);
-
-static inline void nci_spi_set_drvdata(struct nci_spi_dev *ndev,
-					    void *data)
-{
-	ndev->driver_data = data;
-}
-
-static inline void *nci_spi_get_drvdata(struct nci_spi_dev *ndev)
-{
-	return ndev->driver_data;
-}
+/* ----- NCI SPI ----- */
+struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
+				     u8 acknowledge_mode, unsigned int delay,
+				     struct nci_dev *ndev);
+int nci_spi_send(struct nci_spi *nspi,
+		 struct completion *write_handshake_completion,
+		 struct sk_buff *skb);
+struct sk_buff *nci_spi_read(struct nci_spi *nspi);
 
 #endif /* __NCI_CORE_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index f68ee68..82fc4e4 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -28,9 +28,14 @@
 #include <linux/device.h>
 #include <linux/skbuff.h>
 
-#define nfc_dev_info(dev, fmt, arg...) dev_info((dev), "NFC: " fmt "\n", ## arg)
-#define nfc_dev_err(dev, fmt, arg...) dev_err((dev), "NFC: " fmt "\n", ## arg)
-#define nfc_dev_dbg(dev, fmt, arg...) dev_dbg((dev), fmt "\n", ## arg)
+#define nfc_info(dev, fmt, ...) dev_info((dev), "NFC: " fmt, ##__VA_ARGS__)
+#define nfc_err(dev, fmt, ...) dev_err((dev), "NFC: " fmt, ##__VA_ARGS__)
+
+struct nfc_phy_ops {
+	int (*write)(void *dev_id, struct sk_buff *skb);
+	int (*enable)(void *dev_id);
+	void (*disable)(void *dev_id);
+};
 
 struct nfc_dev;
 
@@ -48,6 +53,8 @@
 typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb,
 								int err);
 
+typedef void (*se_io_cb_t)(void *context, u8 *apdu, size_t apdu_len, int err);
+
 struct nfc_target;
 
 struct nfc_ops {
@@ -74,12 +81,23 @@
 	int (*discover_se)(struct nfc_dev *dev);
 	int (*enable_se)(struct nfc_dev *dev, u32 se_idx);
 	int (*disable_se)(struct nfc_dev *dev, u32 se_idx);
+	int (*se_io) (struct nfc_dev *dev, u32 se_idx,
+		      u8 *apdu, size_t apdu_length,
+		      se_io_cb_t cb, void *cb_context);
 };
 
 #define NFC_TARGET_IDX_ANY -1
 #define NFC_MAX_GT_LEN 48
 #define NFC_ATR_RES_GT_OFFSET 15
 
+/**
+ * struct nfc_target - NFC target descriptiom
+ *
+ * @sens_res: 2 bytes describing the target SENS_RES response, if the target
+ *	is a type A one. The %sens_res most significant byte must be byte 2
+ *	as described by the NFC Forum digital specification (i.e. the platform
+ *	configuration one) while %sens_res least significant byte is byte 1.
+ */
 struct nfc_target {
 	u32 idx;
 	u32 supported_protocols;
@@ -243,5 +261,6 @@
 
 int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
 int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
+struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
 
 #endif /* __NET_NFC_H */
diff --git a/include/net/p8022.h b/include/net/p8022.h
index 42e9fac..05e4138 100644
--- a/include/net/p8022.h
+++ b/include/net/p8022.h
@@ -1,13 +1,13 @@
 #ifndef _NET_P8022_H
 #define _NET_P8022_H
-extern struct datalink_proto *
-	register_8022_client(unsigned char type,
-			     int (*func)(struct sk_buff *skb,
-					 struct net_device *dev,
-					 struct packet_type *pt,
-					 struct net_device *orig_dev));
-extern void unregister_8022_client(struct datalink_proto *proto);
+struct datalink_proto *
+register_8022_client(unsigned char type,
+		     int (*func)(struct sk_buff *skb,
+				 struct net_device *dev,
+				 struct packet_type *pt,
+				 struct net_device *orig_dev));
+void unregister_8022_client(struct datalink_proto *proto);
 
-extern struct datalink_proto *make_8023_client(void);
-extern void destroy_8023_client(struct datalink_proto *dl);
+struct datalink_proto *make_8023_client(void);
+void destroy_8023_client(struct datalink_proto *dl);
 #endif
diff --git a/include/net/ping.h b/include/net/ping.h
index 5db0224..3f67704 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -103,8 +103,8 @@
 int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo);
 void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo);
 
-extern int __init ping_proc_init(void);
-extern void ping_proc_exit(void);
+int __init ping_proc_init(void);
+void ping_proc_exit(void);
 #endif
 
 void __init ping_init(void);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 047c047..fbf7676 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -96,20 +96,20 @@
 extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
-extern int	inet_add_protocol(const struct net_protocol *prot, unsigned char num);
-extern int	inet_del_protocol(const struct net_protocol *prot, unsigned char num);
-extern int	inet_add_offload(const struct net_offload *prot, unsigned char num);
-extern int	inet_del_offload(const struct net_offload *prot, unsigned char num);
-extern void	inet_register_protosw(struct inet_protosw *p);
-extern void	inet_unregister_protosw(struct inet_protosw *p);
+int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_add_offload(const struct net_offload *prot, unsigned char num);
+int inet_del_offload(const struct net_offload *prot, unsigned char num);
+void inet_register_protosw(struct inet_protosw *p);
+void inet_unregister_protosw(struct inet_protosw *p);
 
 #if IS_ENABLED(CONFIG_IPV6)
-extern int	inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int	inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int	inet6_register_protosw(struct inet_protosw *p);
-extern void	inet6_unregister_protosw(struct inet_protosw *p);
+int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_register_protosw(struct inet_protosw *p);
+void inet6_unregister_protosw(struct inet_protosw *p);
 #endif
-extern int	inet6_add_offload(const struct net_offload *prot, unsigned char num);
-extern int	inet6_del_offload(const struct net_offload *prot, unsigned char num);
+int inet6_add_offload(const struct net_offload *prot, unsigned char num);
+int inet6_del_offload(const struct net_offload *prot, unsigned char num);
 
 #endif	/* _PROTOCOL_H */
diff --git a/include/net/psnap.h b/include/net/psnap.h
index fe456c2..78db4cc 100644
--- a/include/net/psnap.h
+++ b/include/net/psnap.h
@@ -1,11 +1,11 @@
 #ifndef _NET_PSNAP_H
 #define _NET_PSNAP_H
 
-extern struct datalink_proto *
+struct datalink_proto *
 register_snap_client(const unsigned char *desc,
 		     int (*rcvfunc)(struct sk_buff *, struct net_device *,
 				    struct packet_type *,
 				    struct net_device *orig_dev));
-extern void unregister_snap_client(struct datalink_proto *proto);
+void unregister_snap_client(struct datalink_proto *proto);
 
 #endif
diff --git a/include/net/raw.h b/include/net/raw.h
index 42ce6fe..6a40c65 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -26,7 +26,7 @@
 void raw_icmp_error(struct sk_buff *, int, u32);
 int raw_local_deliver(struct sk_buff *, int);
 
-extern int 	raw_rcv(struct sock *, struct sk_buff *);
+int raw_rcv(struct sock *, struct sk_buff *);
 
 #define RAW_HTABLE_SIZE	MAX_INET_PROTOS
 
@@ -36,8 +36,8 @@
 };
 
 #ifdef CONFIG_PROC_FS
-extern int  raw_proc_init(void);
-extern void raw_proc_exit(void);
+int raw_proc_init(void);
+void raw_proc_exit(void);
 
 struct raw_iter_state {
 	struct seq_net_private p;
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index e7ea660..87783de 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -7,8 +7,7 @@
 		u8 type, u8 code, int inner_offset, __be32);
 bool raw6_local_deliver(struct sk_buff *, int);
 
-extern int			rawv6_rcv(struct sock *sk,
-					  struct sk_buff *skb);
+int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 59795e4..7f830ff 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -43,11 +43,12 @@
 					   struct request_sock *req);
 };
 
-extern int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
+int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
 
 /* struct request_sock - mini sock to represent a connection request
  */
 struct request_sock {
+	struct sock_common		__req_common;
 	struct request_sock		*dl_next;
 	u16				mss;
 	u8				num_retrans; /* number of retransmits */
@@ -162,13 +163,13 @@
 					     */
 };
 
-extern int reqsk_queue_alloc(struct request_sock_queue *queue,
-			     unsigned int nr_table_entries);
+int reqsk_queue_alloc(struct request_sock_queue *queue,
+		      unsigned int nr_table_entries);
 
-extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_fastopen_remove(struct sock *sk,
-				  struct request_sock *req, bool reset);
+void __reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+			   bool reset);
 
 static inline struct request_sock *
 	reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
diff --git a/include/net/rose.h b/include/net/rose.h
index 555dd19..50811fe 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -160,38 +160,42 @@
 extern int  sysctl_rose_link_fail_timeout;
 extern int  sysctl_rose_maximum_vcs;
 extern int  sysctl_rose_window_size;
-extern int  rosecmp(rose_address *, rose_address *);
-extern int  rosecmpm(rose_address *, rose_address *, unsigned short);
-extern char *rose2asc(char *buf, const rose_address *);
-extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
-extern void rose_kill_by_neigh(struct rose_neigh *);
-extern unsigned int rose_new_lci(struct rose_neigh *);
-extern int  rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int);
-extern void rose_destroy_socket(struct sock *);
+
+int rosecmp(rose_address *, rose_address *);
+int rosecmpm(rose_address *, rose_address *, unsigned short);
+char *rose2asc(char *buf, const rose_address *);
+struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+void rose_kill_by_neigh(struct rose_neigh *);
+unsigned int rose_new_lci(struct rose_neigh *);
+int rose_rx_call_request(struct sk_buff *, struct net_device *,
+			 struct rose_neigh *, unsigned int);
+void rose_destroy_socket(struct sock *);
 
 /* rose_dev.c */
-extern void  rose_setup(struct net_device *);
+void rose_setup(struct net_device *);
 
 /* rose_in.c */
-extern int  rose_process_rx_frame(struct sock *, struct sk_buff *);
+int rose_process_rx_frame(struct sock *, struct sk_buff *);
 
 /* rose_link.c */
-extern void rose_start_ftimer(struct rose_neigh *);
-extern void rose_stop_ftimer(struct rose_neigh *);
-extern void rose_stop_t0timer(struct rose_neigh *);
-extern int  rose_ftimer_running(struct rose_neigh *);
-extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
-extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
-extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+void rose_start_ftimer(struct rose_neigh *);
+void rose_stop_ftimer(struct rose_neigh *);
+void rose_stop_t0timer(struct rose_neigh *);
+int rose_ftimer_running(struct rose_neigh *);
+void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *,
+			  unsigned short);
+void rose_transmit_clear_request(struct rose_neigh *, unsigned int,
+				 unsigned char, unsigned char);
+void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
 
 /* rose_loopback.c */
-extern void rose_loopback_init(void);
-extern void rose_loopback_clear(void);
-extern int  rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+void rose_loopback_init(void);
+void rose_loopback_clear(void);
+int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
 
 /* rose_out.c */
-extern void rose_kick(struct sock *);
-extern void rose_enquiry_response(struct sock *);
+void rose_kick(struct sock *);
+void rose_enquiry_response(struct sock *);
 
 /* rose_route.c */
 extern struct rose_neigh *rose_loopback_neigh;
@@ -199,43 +203,45 @@
 extern const struct file_operations rose_nodes_fops;
 extern const struct file_operations rose_routes_fops;
 
-extern void rose_add_loopback_neigh(void);
-extern int __must_check rose_add_loopback_node(rose_address *);
-extern void rose_del_loopback_node(rose_address *);
-extern void rose_rt_device_down(struct net_device *);
-extern void rose_link_device_down(struct net_device *);
-extern struct net_device *rose_dev_first(void);
-extern struct net_device *rose_dev_get(rose_address *);
-extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
-extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *, int);
-extern int  rose_rt_ioctl(unsigned int, void __user *);
-extern void rose_link_failed(ax25_cb *, int);
-extern int  rose_route_frame(struct sk_buff *, ax25_cb *);
-extern void rose_rt_free(void);
+void rose_add_loopback_neigh(void);
+int __must_check rose_add_loopback_node(rose_address *);
+void rose_del_loopback_node(rose_address *);
+void rose_rt_device_down(struct net_device *);
+void rose_link_device_down(struct net_device *);
+struct net_device *rose_dev_first(void);
+struct net_device *rose_dev_get(rose_address *);
+struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *,
+				  unsigned char *, int);
+int rose_rt_ioctl(unsigned int, void __user *);
+void rose_link_failed(ax25_cb *, int);
+int rose_route_frame(struct sk_buff *, ax25_cb *);
+void rose_rt_free(void);
 
 /* rose_subr.c */
-extern void rose_clear_queues(struct sock *);
-extern void rose_frames_acked(struct sock *, unsigned short);
-extern void rose_requeue_frames(struct sock *);
-extern int  rose_validate_nr(struct sock *, unsigned short);
-extern void rose_write_internal(struct sock *, int);
-extern int  rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
-extern int  rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *);
-extern void rose_disconnect(struct sock *, int, int, int);
+void rose_clear_queues(struct sock *);
+void rose_frames_acked(struct sock *, unsigned short);
+void rose_requeue_frames(struct sock *);
+int rose_validate_nr(struct sock *, unsigned short);
+void rose_write_internal(struct sock *, int);
+int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+int rose_parse_facilities(unsigned char *, unsigned int,
+			  struct rose_facilities_struct *);
+void rose_disconnect(struct sock *, int, int, int);
 
 /* rose_timer.c */
-extern void rose_start_heartbeat(struct sock *);
-extern void rose_start_t1timer(struct sock *);
-extern void rose_start_t2timer(struct sock *);
-extern void rose_start_t3timer(struct sock *);
-extern void rose_start_hbtimer(struct sock *);
-extern void rose_start_idletimer(struct sock *);
-extern void rose_stop_heartbeat(struct sock *);
-extern void rose_stop_timer(struct sock *);
-extern void rose_stop_idletimer(struct sock *);
+void rose_start_heartbeat(struct sock *);
+void rose_start_t1timer(struct sock *);
+void rose_start_t2timer(struct sock *);
+void rose_start_t3timer(struct sock *);
+void rose_start_hbtimer(struct sock *);
+void rose_start_idletimer(struct sock *);
+void rose_stop_heartbeat(struct sock *);
+void rose_stop_timer(struct sock *);
+void rose_stop_idletimer(struct sock *);
 
 /* sysctl_net_rose.c */
-extern void rose_register_sysctl(void);
-extern void rose_unregister_sysctl(void);
+void rose_register_sysctl(void);
+void rose_unregister_sysctl(void);
 
 #endif
diff --git a/include/net/route.h b/include/net/route.h
index afdeeb5..f68c167 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -39,6 +39,7 @@
 #define RTO_ONLINK	0x01
 
 #define RT_CONN_FLAGS(sk)   (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS_TOS(sk,tos)   (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
 
 struct fib_nh;
 struct fib_info;
@@ -87,34 +88,28 @@
 };
 
 struct rt_cache_stat {
-        unsigned int in_hit;
         unsigned int in_slow_tot;
         unsigned int in_slow_mc;
         unsigned int in_no_route;
         unsigned int in_brd;
         unsigned int in_martian_dst;
         unsigned int in_martian_src;
-        unsigned int out_hit;
         unsigned int out_slow_tot;
         unsigned int out_slow_mc;
-        unsigned int gc_total;
-        unsigned int gc_ignored;
-        unsigned int gc_goal_miss;
-        unsigned int gc_dst_overflow;
-        unsigned int in_hlist_search;
-        unsigned int out_hlist_search;
 };
 
 extern struct ip_rt_acct __percpu *ip_rt_acct;
 
 struct in_device;
-extern int		ip_rt_init(void);
-extern void		rt_cache_flush(struct net *net);
-extern void		rt_flush_dev(struct net_device *dev);
-extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
-extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
-					   struct sock *sk);
-extern struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig);
+
+int ip_rt_init(void);
+void rt_cache_flush(struct net *net);
+void rt_flush_dev(struct net_device *dev);
+struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
+struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
+				    struct sock *sk);
+struct dst_entry *ipv4_blackhole_route(struct net *net,
+				       struct dst_entry *dst_orig);
 
 static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp)
 {
@@ -162,8 +157,8 @@
 	return ip_route_output_key(net, fl4);
 }
 
-extern int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
-				u8 tos, struct net_device *devin);
+int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
+			 u8 tos, struct net_device *devin);
 
 static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
 				 u8 tos, struct net_device *devin)
@@ -179,24 +174,25 @@
 	return err;
 }
 
-extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
-			     int oif, u32 mark, u8 protocol, int flow_flags);
-extern void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
-extern void ipv4_redirect(struct sk_buff *skb, struct net *net,
-			  int oif, u32 mark, u8 protocol, int flow_flags);
-extern void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
-extern void ip_rt_send_redirect(struct sk_buff *skb);
+void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
+		      u32 mark, u8 protocol, int flow_flags);
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
+void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+		   u8 protocol, int flow_flags);
+void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
+void ip_rt_send_redirect(struct sk_buff *skb);
 
-extern unsigned int		inet_addr_type(struct net *net, __be32 addr);
-extern unsigned int		inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
-extern void		ip_rt_multicast_event(struct in_device *);
-extern int		ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
-extern void		ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
-extern int		ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb);
+unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
+				__be32 addr);
+void ip_rt_multicast_event(struct in_device *);
+int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
+void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb);
 
 struct in_ifaddr;
-extern void fib_add_ifaddr(struct in_ifaddr *);
-extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void fib_add_ifaddr(struct in_ifaddr *);
+void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
 
 static inline void ip_rt_put(struct rtable *rt)
 {
@@ -317,12 +313,20 @@
 	return hoplimit;
 }
 
-static inline int ip_skb_dst_mtu(struct sk_buff *skb)
+static inline bool ip_sk_accept_pmtu(const struct sock *sk)
 {
-	struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
+	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE;
+}
 
-	return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
-	       skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+static inline bool ip_sk_use_pmtu(const struct sock *sk)
+{
+	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
+}
+
+static inline int ip_skb_dst_mtu(const struct sk_buff *skb)
+{
+	return (!skb->sk || ip_sk_use_pmtu(skb->sk)) ?
+	       dst_mtu(skb_dst(skb)) : skb_dst(skb)->dev->mtu;
 }
 
 #endif	/* _ROUTE_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 7026648..bb13a18 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -8,14 +8,12 @@
 typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
 typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
 
-extern int	__rtnl_register(int protocol, int msgtype,
-				rtnl_doit_func, rtnl_dumpit_func,
-				rtnl_calcit_func);
-extern void	rtnl_register(int protocol, int msgtype,
-			      rtnl_doit_func, rtnl_dumpit_func,
-			      rtnl_calcit_func);
-extern int	rtnl_unregister(int protocol, int msgtype);
-extern void	rtnl_unregister_all(int protocol);
+int __rtnl_register(int protocol, int msgtype,
+		    rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+void rtnl_register(int protocol, int msgtype,
+		   rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+int rtnl_unregister(int protocol, int msgtype);
+void rtnl_unregister_all(int protocol);
 
 static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
 {
@@ -83,11 +81,11 @@
 	unsigned int		(*get_num_rx_queues)(void);
 };
 
-extern int	__rtnl_link_register(struct rtnl_link_ops *ops);
-extern void	__rtnl_link_unregister(struct rtnl_link_ops *ops);
+int __rtnl_link_register(struct rtnl_link_ops *ops);
+void __rtnl_link_unregister(struct rtnl_link_ops *ops);
 
-extern int	rtnl_link_register(struct rtnl_link_ops *ops);
-extern void	rtnl_link_unregister(struct rtnl_link_ops *ops);
+int rtnl_link_register(struct rtnl_link_ops *ops);
+void rtnl_link_unregister(struct rtnl_link_ops *ops);
 
 /**
  * 	struct rtnl_af_ops - rtnetlink address family operations
@@ -117,18 +115,18 @@
 					       const struct nlattr *attr);
 };
 
-extern int	__rtnl_af_register(struct rtnl_af_ops *ops);
-extern void	__rtnl_af_unregister(struct rtnl_af_ops *ops);
+int __rtnl_af_register(struct rtnl_af_ops *ops);
+void __rtnl_af_unregister(struct rtnl_af_ops *ops);
 
-extern int	rtnl_af_register(struct rtnl_af_ops *ops);
-extern void	rtnl_af_unregister(struct rtnl_af_ops *ops);
+int rtnl_af_register(struct rtnl_af_ops *ops);
+void rtnl_af_unregister(struct rtnl_af_ops *ops);
 
+struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
+struct net_device *rtnl_create_link(struct net *net, char *ifname,
+				    const struct rtnl_link_ops *ops,
+				    struct nlattr *tb[]);
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
-extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-extern struct net_device *rtnl_create_link(struct net *net,
-	char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
-extern int rtnl_configure_link(struct net_device *dev,
-			       const struct ifinfomsg *ifm);
 extern const struct nla_policy ifla_policy[IFLA_MAX+1];
 
 #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f4eb365..d0a6321 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -702,13 +702,20 @@
 }
 
 void psched_ratecfg_precompute(struct psched_ratecfg *r,
-			       const struct tc_ratespec *conf);
+			       const struct tc_ratespec *conf,
+			       u64 rate64);
 
 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
 					  const struct psched_ratecfg *r)
 {
 	memset(res, 0, sizeof(*res));
-	res->rate = r->rate_bytes_ps;
+
+	/* legacy struct tc_ratespec has a 32bit @rate field
+	 * Qdisc using 64bit rate should add new attributes
+	 * in order to maintain compatibility.
+	 */
+	res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
+
 	res->overhead = r->overhead;
 	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
 }
diff --git a/include/net/scm.h b/include/net/scm.h
index 8de2d37..262532d 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -33,11 +33,11 @@
 #endif
 };
 
-extern void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
-extern void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
-extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
-extern void __scm_destroy(struct scm_cookie *scm);
-extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl);
+void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
+void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
+int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
+void __scm_destroy(struct scm_cookie *scm);
+struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl);
 
 #ifdef CONFIG_SECURITY_NETWORK
 static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm)
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 259924d..6bd44fe 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -42,56 +42,38 @@
 #include <linux/types.h>
 #include <net/sctp/sctp.h>
 #include <linux/crc32c.h>
+#include <linux/crc32.h>
 
-static inline __u32 sctp_crc32c(__u32 crc, u8 *buffer, u16 length)
+static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
 {
-	return crc32c(crc, buffer, length);
-}
-
-static inline __u32 sctp_start_cksum(__u8 *buffer, __u16 length)
-{
-	__u32 crc = ~(__u32)0;
-	__u8  zero[sizeof(__u32)] = {0};
-
-	/* Optimize this routine to be SCTP specific, knowing how
-	 * to skip the checksum field of the SCTP header.
+	/* This uses the crypto implementation of crc32c, which is either
+	 * implemented w/ hardware support or resolves to __crc32c_le().
 	 */
-
-	/* Calculate CRC up to the checksum. */
-	crc = sctp_crc32c(crc, buffer, sizeof(struct sctphdr) - sizeof(__u32));
-
-	/* Skip checksum field of the header. */
-	crc = sctp_crc32c(crc, zero, sizeof(__u32));
-
-	/* Calculate the rest of the CRC. */
-	crc = sctp_crc32c(crc, &buffer[sizeof(struct sctphdr)],
-			    length - sizeof(struct sctphdr));
-	return crc;
+	return crc32c(sum, buff, len);
 }
 
-static inline __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
+static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
+				       int offset, int len)
 {
-	return sctp_crc32c(crc32, buffer, length);
+	return __crc32c_le_combine(csum, csum2, len);
 }
 
-static inline __le32 sctp_end_cksum(__u32 crc32)
-{
-	return cpu_to_le32(~crc32);
-}
-
-/* Calculate the CRC32C checksum of an SCTP packet.  */
 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
 					unsigned int offset)
 {
-	const struct sk_buff *iter;
+	struct sctphdr *sh = sctp_hdr(skb);
+        __le32 ret, old = sh->checksum;
+	const struct skb_checksum_ops ops = {
+		.update  = sctp_csum_update,
+		.combine = sctp_csum_combine,
+	};
 
-	__u32 crc32 = sctp_start_cksum(skb->data + offset,
-				       skb_headlen(skb) - offset);
-	skb_walk_frags(skb, iter)
-		crc32 = sctp_update_cksum((__u8 *) iter->data,
-					  skb_headlen(iter), crc32);
+	sh->checksum = 0;
+	ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset,
+					  ~(__u32)0, &ops));
+	sh->checksum = old;
 
-	return sctp_end_cksum(crc32);
+	return ret;
 }
 
 #endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 3794c5a..c5fe806 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -90,12 +90,11 @@
 /*
  * sctp/protocol.c
  */
-extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
-				     sctp_scope_t, gfp_t gfp,
-				     int flags);
-extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
-extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
-extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
+			      sctp_scope_t, gfp_t gfp, int flags);
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
+int sctp_register_pf(struct sctp_pf *, sa_family_t);
+void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
 
 /*
  * sctp/socket.c
@@ -110,7 +109,7 @@
 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
 		    struct sctp_association *asoc);
 extern struct percpu_counter sctp_sockets_allocated;
-extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 
 /*
  * sctp/primitive.c
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index c2e542b..f257486 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -3,18 +3,18 @@
 
 #include <linux/types.h>
 
-extern __u32 secure_ip_id(__be32 daddr);
-extern __u32 secure_ipv6_id(const __be32 daddr[4]);
-extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
-extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
-				      __be16 dport);
-extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
-					__be16 sport, __be16 dport);
-extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
-					  __be16 sport, __be16 dport);
-extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
-				       __be16 sport, __be16 dport);
-extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
-					 __be16 sport, __be16 dport);
+__u32 secure_ip_id(__be32 daddr);
+__u32 secure_ipv6_id(const __be32 daddr[4]);
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+			       __be16 dport);
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+				 __be16 sport, __be16 dport);
+__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+				   __be16 sport, __be16 dport);
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+				__be16 sport, __be16 dport);
+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+				  __be16 sport, __be16 dport);
 
 #endif /* _NET_SECURE_SEQ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 808cbc2..e3a18ff 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -156,7 +156,7 @@
  */
 struct sock_common {
 	/* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
-	 * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH()
+	 * address on 64bit arches : cf INET_MATCH()
 	 */
 	union {
 		__addrpair	skc_addrpair;
@@ -191,6 +191,12 @@
 #ifdef CONFIG_NET_NS
 	struct net	 	*skc_net;
 #endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+	struct in6_addr		skc_v6_daddr;
+	struct in6_addr		skc_v6_rcv_saddr;
+#endif
+
 	/*
 	 * fields between dontcopy_begin/dontcopy_end
 	 * are not copied in sock_copy()
@@ -218,7 +224,7 @@
   *	@sk_lock:	synchronizer
   *	@sk_rcvbuf: size of receive buffer in bytes
   *	@sk_wq: sock wait queue and async head
-  *	@sk_rx_dst: receive input route used by early tcp demux
+  *	@sk_rx_dst: receive input route used by early demux
   *	@sk_dst_cache: destination cache
   *	@sk_dst_lock: destination cache lock
   *	@sk_policy: flow policy
@@ -233,6 +239,7 @@
   *	@sk_ll_usec: usecs to busypoll when there is no data
   *	@sk_allocation: allocation mode
   *	@sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+  *	@sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
   *	@sk_sndbuf: size of send buffer in bytes
   *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
   *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -299,6 +306,12 @@
 #define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
 #define sk_dontcopy_end		__sk_common.skc_dontcopy_end
 #define sk_hash			__sk_common.skc_hash
+#define sk_portpair		__sk_common.skc_portpair
+#define sk_num			__sk_common.skc_num
+#define sk_dport		__sk_common.skc_dport
+#define sk_addrpair		__sk_common.skc_addrpair
+#define sk_daddr		__sk_common.skc_daddr
+#define sk_rcv_saddr		__sk_common.skc_rcv_saddr
 #define sk_family		__sk_common.skc_family
 #define sk_state		__sk_common.skc_state
 #define sk_reuse		__sk_common.skc_reuse
@@ -307,6 +320,9 @@
 #define sk_bind_node		__sk_common.skc_bind_node
 #define sk_prot			__sk_common.skc_prot
 #define sk_net			__sk_common.skc_net
+#define sk_v6_daddr		__sk_common.skc_v6_daddr
+#define sk_v6_rcv_saddr	__sk_common.skc_v6_rcv_saddr
+
 	socket_lock_t		sk_lock;
 	struct sk_buff_head	sk_receive_queue;
 	/*
@@ -363,6 +379,7 @@
 	int			sk_wmem_queued;
 	gfp_t			sk_allocation;
 	u32			sk_pacing_rate; /* bytes per second */
+	u32			sk_max_pacing_rate;
 	netdev_features_t	sk_route_caps;
 	netdev_features_t	sk_route_nocaps;
 	int			sk_gso_type;
@@ -751,7 +768,7 @@
 	return sk->sk_sndbuf - sk->sk_wmem_queued;
 }
 
-extern void sk_stream_write_space(struct sock *sk);
+void sk_stream_write_space(struct sock *sk);
 
 /* OOB backlog add */
 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
@@ -793,7 +810,7 @@
 	return 0;
 }
 
-extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 
 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
@@ -858,15 +875,15 @@
 		__rc;							\
 	})
 
-extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
-extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
-extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
-extern int sk_stream_error(struct sock *sk, int flags, int err);
-extern void sk_stream_kill_queues(struct sock *sk);
-extern void sk_set_memalloc(struct sock *sk);
-extern void sk_clear_memalloc(struct sock *sk);
+int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+void sk_stream_wait_close(struct sock *sk, long timeo_p);
+int sk_stream_error(struct sock *sk, int flags, int err);
+void sk_stream_kill_queues(struct sock *sk);
+void sk_set_memalloc(struct sock *sk);
+void sk_clear_memalloc(struct sock *sk);
 
-extern int sk_wait_data(struct sock *sk, long *timeo);
+int sk_wait_data(struct sock *sk, long *timeo);
 
 struct request_sock_ops;
 struct timewait_sock_ops;
@@ -1019,10 +1036,10 @@
 
 struct cg_proto {
 	void			(*enter_memory_pressure)(struct sock *sk);
-	struct res_counter	*memory_allocated;	/* Current allocated memory. */
-	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
-	int			*memory_pressure;
-	long			*sysctl_mem;
+	struct res_counter	memory_allocated;	/* Current allocated memory. */
+	struct percpu_counter	sockets_allocated;	/* Current number of sockets. */
+	int			memory_pressure;
+	long			sysctl_mem[3];
 	unsigned long		flags;
 	/*
 	 * memcg field is used to find which memcg we belong directly
@@ -1036,8 +1053,8 @@
 	struct mem_cgroup	*memcg;
 };
 
-extern int proto_register(struct proto *prot, int alloc_slab);
-extern void proto_unregister(struct proto *prot);
+int proto_register(struct proto *prot, int alloc_slab);
+void proto_unregister(struct proto *prot);
 
 static inline bool memcg_proto_active(struct cg_proto *cg_proto)
 {
@@ -1118,7 +1135,7 @@
 		return false;
 
 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-		return !!*sk->sk_cgrp->memory_pressure;
+		return !!sk->sk_cgrp->memory_pressure;
 
 	return !!*sk->sk_prot->memory_pressure;
 }
@@ -1138,8 +1155,8 @@
 		struct proto *prot = sk->sk_prot;
 
 		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-			if (*cg_proto->memory_pressure)
-				*cg_proto->memory_pressure = 0;
+			if (cg_proto->memory_pressure)
+				cg_proto->memory_pressure = 0;
 	}
 
 }
@@ -1175,7 +1192,7 @@
 	struct res_counter *fail;
 	int ret;
 
-	ret = res_counter_charge_nofail(prot->memory_allocated,
+	ret = res_counter_charge_nofail(&prot->memory_allocated,
 					amt << PAGE_SHIFT, &fail);
 	if (ret < 0)
 		*parent_status = OVER_LIMIT;
@@ -1184,13 +1201,13 @@
 static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
 					      unsigned long amt)
 {
-	res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+	res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT);
 }
 
 static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
 {
 	u64 ret;
-	ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+	ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE);
 	return ret >> PAGE_SHIFT;
 }
 
@@ -1238,7 +1255,7 @@
 		struct cg_proto *cg_proto = sk->sk_cgrp;
 
 		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-			percpu_counter_dec(cg_proto->sockets_allocated);
+			percpu_counter_dec(&cg_proto->sockets_allocated);
 	}
 
 	percpu_counter_dec(prot->sockets_allocated);
@@ -1252,7 +1269,7 @@
 		struct cg_proto *cg_proto = sk->sk_cgrp;
 
 		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-			percpu_counter_inc(cg_proto->sockets_allocated);
+			percpu_counter_inc(&cg_proto->sockets_allocated);
 	}
 
 	percpu_counter_inc(prot->sockets_allocated);
@@ -1264,7 +1281,7 @@
 	struct proto *prot = sk->sk_prot;
 
 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-		return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
+		return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated);
 
 	return percpu_counter_read_positive(prot->sockets_allocated);
 }
@@ -1292,8 +1309,8 @@
 
 #ifdef CONFIG_PROC_FS
 /* Called with local bh disabled */
-extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
-extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+int sock_prot_inuse_get(struct net *net, struct proto *proto);
 #else
 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
 		int inc)
@@ -1369,8 +1386,8 @@
 /*
  * Functions for memory accounting
  */
-extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
-extern void __sk_mem_reclaim(struct sock *sk);
+int __sk_mem_schedule(struct sock *sk, int size, int kind);
+void __sk_mem_reclaim(struct sock *sk);
 
 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1478,14 +1495,14 @@
 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
 } while (0)
 
-extern void lock_sock_nested(struct sock *sk, int subclass);
+void lock_sock_nested(struct sock *sk, int subclass);
 
 static inline void lock_sock(struct sock *sk)
 {
 	lock_sock_nested(sk, 0);
 }
 
-extern void release_sock(struct sock *sk);
+void release_sock(struct sock *sk);
 
 /* BH context may only use the following locking interface. */
 #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
@@ -1494,7 +1511,7 @@
 				SINGLE_DEPTH_NESTING)
 #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
 
-extern bool lock_sock_fast(struct sock *sk);
+bool lock_sock_fast(struct sock *sk);
 /**
  * unlock_sock_fast - complement of lock_sock_fast
  * @sk: socket
@@ -1512,108 +1529,84 @@
 }
 
 
-extern struct sock		*sk_alloc(struct net *net, int family,
-					  gfp_t priority,
-					  struct proto *prot);
-extern void			sk_free(struct sock *sk);
-extern void			sk_release_kernel(struct sock *sk);
-extern struct sock		*sk_clone_lock(const struct sock *sk,
-					       const gfp_t priority);
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+		      struct proto *prot);
+void sk_free(struct sock *sk);
+void sk_release_kernel(struct sock *sk);
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
 
-extern struct sk_buff		*sock_wmalloc(struct sock *sk,
-					      unsigned long size, int force,
-					      gfp_t priority);
-extern struct sk_buff		*sock_rmalloc(struct sock *sk,
-					      unsigned long size, int force,
-					      gfp_t priority);
-extern void			sock_wfree(struct sk_buff *skb);
-extern void			skb_orphan_partial(struct sk_buff *skb);
-extern void			sock_rfree(struct sk_buff *skb);
-extern void			sock_edemux(struct sk_buff *skb);
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+			     gfp_t priority);
+struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
+			     gfp_t priority);
+void sock_wfree(struct sk_buff *skb);
+void skb_orphan_partial(struct sk_buff *skb);
+void sock_rfree(struct sk_buff *skb);
+void sock_edemux(struct sk_buff *skb);
 
-extern int			sock_setsockopt(struct socket *sock, int level,
-						int op, char __user *optval,
-						unsigned int optlen);
+int sock_setsockopt(struct socket *sock, int level, int op,
+		    char __user *optval, unsigned int optlen);
 
-extern int			sock_getsockopt(struct socket *sock, int level,
-						int op, char __user *optval,
-						int __user *optlen);
-extern struct sk_buff		*sock_alloc_send_skb(struct sock *sk,
-						     unsigned long size,
-						     int noblock,
-						     int *errcode);
-extern struct sk_buff		*sock_alloc_send_pskb(struct sock *sk,
-						      unsigned long header_len,
-						      unsigned long data_len,
-						      int noblock,
-						      int *errcode,
-						      int max_page_order);
-extern void *sock_kmalloc(struct sock *sk, int size,
-			  gfp_t priority);
-extern void sock_kfree_s(struct sock *sk, void *mem, int size);
-extern void sk_send_sigurg(struct sock *sk);
+int sock_getsockopt(struct socket *sock, int level, int op,
+		    char __user *optval, int __user *optlen);
+struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
+				    int noblock, int *errcode);
+struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+				     unsigned long data_len, int noblock,
+				     int *errcode, int max_page_order);
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
+void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sk_send_sigurg(struct sock *sk);
 
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * does not implement a particular function.
  */
-extern int                      sock_no_bind(struct socket *,
-					     struct sockaddr *, int);
-extern int                      sock_no_connect(struct socket *,
-						struct sockaddr *, int, int);
-extern int                      sock_no_socketpair(struct socket *,
-						   struct socket *);
-extern int                      sock_no_accept(struct socket *,
-					       struct socket *, int);
-extern int                      sock_no_getname(struct socket *,
-						struct sockaddr *, int *, int);
-extern unsigned int             sock_no_poll(struct file *, struct socket *,
-					     struct poll_table_struct *);
-extern int                      sock_no_ioctl(struct socket *, unsigned int,
-					      unsigned long);
-extern int			sock_no_listen(struct socket *, int);
-extern int                      sock_no_shutdown(struct socket *, int);
-extern int			sock_no_getsockopt(struct socket *, int , int,
-						   char __user *, int __user *);
-extern int			sock_no_setsockopt(struct socket *, int, int,
-						   char __user *, unsigned int);
-extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
-						struct msghdr *, size_t);
-extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
-						struct msghdr *, size_t, int);
-extern int			sock_no_mmap(struct file *file,
-					     struct socket *sock,
-					     struct vm_area_struct *vma);
-extern ssize_t			sock_no_sendpage(struct socket *sock,
-						struct page *page,
-						int offset, size_t size,
-						int flags);
+int sock_no_bind(struct socket *, struct sockaddr *, int);
+int sock_no_connect(struct socket *, struct sockaddr *, int, int);
+int sock_no_socketpair(struct socket *, struct socket *);
+int sock_no_accept(struct socket *, struct socket *, int);
+int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
+unsigned int sock_no_poll(struct file *, struct socket *,
+			  struct poll_table_struct *);
+int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
+int sock_no_listen(struct socket *, int);
+int sock_no_shutdown(struct socket *, int);
+int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
+int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
+int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+		    int);
+int sock_no_mmap(struct file *file, struct socket *sock,
+		 struct vm_area_struct *vma);
+ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
+			 size_t size, int flags);
 
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * uses the inet style.
  */
-extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
 				  char __user *optval, int __user *optlen);
-extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
 			       struct msghdr *msg, size_t size, int flags);
-extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
 				  char __user *optval, unsigned int optlen);
-extern int compat_sock_common_getsockopt(struct socket *sock, int level,
+int compat_sock_common_getsockopt(struct socket *sock, int level,
 		int optname, char __user *optval, int __user *optlen);
-extern int compat_sock_common_setsockopt(struct socket *sock, int level,
+int compat_sock_common_setsockopt(struct socket *sock, int level,
 		int optname, char __user *optval, unsigned int optlen);
 
-extern void sk_common_release(struct sock *sk);
+void sk_common_release(struct sock *sk);
 
 /*
  *	Default socket callbacks and setup code
  */
 
 /* Initialise core socket variables */
-extern void sock_init_data(struct socket *sock, struct sock *sk);
+void sock_init_data(struct socket *sock, struct sock *sk);
 
-extern void sk_filter_release_rcu(struct rcu_head *rcu);
+void sk_filter_release_rcu(struct rcu_head *rcu);
 
 /**
  *	sk_filter_release - release a socket filter
@@ -1671,9 +1664,12 @@
 	if (atomic_dec_and_test(&sk->sk_refcnt))
 		sk_free(sk);
 }
+/* Generic version of sock_put(), dealing with all sockets
+ * (TCP_TIMEWAIT, ESTABLISHED...)
+ */
+void sock_gen_put(struct sock *sk);
 
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
-			  const int nested);
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
 
 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
 {
@@ -1727,8 +1723,8 @@
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
-extern kuid_t sock_i_uid(struct sock *sk);
-extern unsigned long sock_i_ino(struct sock *sk);
+kuid_t sock_i_uid(struct sock *sk);
+unsigned long sock_i_ino(struct sock *sk);
 
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
@@ -1750,8 +1746,6 @@
 	return dst;
 }
 
-extern void sk_reset_txq(struct sock *sk);
-
 static inline void dst_negative_advice(struct sock *sk)
 {
 	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
@@ -1761,7 +1755,7 @@
 
 		if (ndst != dst) {
 			rcu_assign_pointer(sk->sk_dst_cache, ndst);
-			sk_reset_txq(sk);
+			sk_tx_queue_clear(sk);
 		}
 	}
 }
@@ -1803,16 +1797,16 @@
 	spin_unlock(&sk->sk_dst_lock);
 }
 
-extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
 
-extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
 
 static inline bool sk_can_gso(const struct sock *sk)
 {
 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
 }
 
-extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
 
 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 {
@@ -2025,14 +2019,14 @@
 	sk_mem_charge(sk, skb->truesize);
 }
 
-extern void sk_reset_timer(struct sock *sk, struct timer_list *timer,
-			   unsigned long expires);
+void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+		    unsigned long expires);
 
-extern void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
-extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
-extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
 
 /*
  *	Recover an error report and clear atomically
@@ -2100,7 +2094,7 @@
 	return &sk->sk_frag;
 }
 
-extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
 
 /*
  *	Default write policy as shown to user space via poll/select/SIGIO
@@ -2138,10 +2132,10 @@
 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
 }
 
-extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
-	struct sk_buff *skb);
-extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
-	struct sk_buff *skb);
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+			   struct sk_buff *skb);
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+			     struct sk_buff *skb);
 
 static inline void
 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -2174,8 +2168,8 @@
 		__sock_recv_wifi_status(msg, sk, skb);
 }
 
-extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
-				     struct sk_buff *skb);
+void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+			      struct sk_buff *skb);
 
 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 					  struct sk_buff *skb)
@@ -2200,7 +2194,7 @@
  *
  * Currently only depends on SOCK_TIMESTAMPING* flags.
  */
-extern void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
+void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
 
 /**
  * sk_eat_skb - Release a skb if it is no longer needed
@@ -2264,11 +2258,11 @@
 	return NULL;
 }
 
-extern void sock_enable_timestamp(struct sock *sk, int flag);
-extern int sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int sock_get_timestampns(struct sock *, struct timespec __user *);
-extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
-			      int level, int type);
+void sock_enable_timestamp(struct sock *sk, int flag);
+int sock_get_timestamp(struct sock *, struct timeval __user *);
+int sock_get_timestampns(struct sock *, struct timespec __user *);
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
+		       int type);
 
 /*
  *	Enable debug/info messages
diff --git a/include/net/stp.h b/include/net/stp.h
index ad447f1..3af174d 100644
--- a/include/net/stp.h
+++ b/include/net/stp.h
@@ -8,7 +8,7 @@
 	void		*data;
 };
 
-extern int stp_proto_register(const struct stp_proto *proto);
-extern void stp_proto_unregister(const struct stp_proto *proto);
+int stp_proto_register(const struct stp_proto *proto);
+void stp_proto_unregister(const struct stp_proto *proto);
 
 #endif /* _NET_STP_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b1aa324..70e55d2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -50,7 +50,7 @@
 extern struct inet_hashinfo tcp_hashinfo;
 
 extern struct percpu_counter tcp_orphan_count;
-extern void tcp_time_wait(struct sock *sk, int state, int timeo);
+void tcp_time_wait(struct sock *sk, int state, int timeo);
 
 #define MAX_TCP_HEADER	(128 + MAX_HEADER)
 #define MAX_TCP_OPTION_SPACE 40
@@ -259,6 +259,7 @@
 extern int sysctl_tcp_fack;
 extern int sysctl_tcp_reordering;
 extern int sysctl_tcp_dsack;
+extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
@@ -274,7 +275,6 @@
 extern int sysctl_tcp_base_mss;
 extern int sysctl_tcp_workaround_signed_windows;
 extern int sysctl_tcp_slow_start_after_idle;
-extern int sysctl_tcp_max_ssthresh;
 extern int sysctl_tcp_thin_linear_timeouts;
 extern int sysctl_tcp_thin_dupack;
 extern int sysctl_tcp_early_retrans;
@@ -325,7 +325,7 @@
 	return false;
 }
 
-extern bool tcp_check_oom(struct sock *sk, int shift);
+bool tcp_check_oom(struct sock *sk, int shift);
 
 /* syncookies: remember time of last synqueue overflow */
 static inline void tcp_synq_overflow(struct sock *sk)
@@ -348,38 +348,36 @@
 #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 
-extern void tcp_init_mem(struct net *net);
+void tcp_tasklet_init(void);
 
-extern void tcp_tasklet_init(void);
+void tcp_v4_err(struct sk_buff *skb, u32);
 
-extern void tcp_v4_err(struct sk_buff *skb, u32);
+void tcp_shutdown(struct sock *sk, int how);
 
-extern void tcp_shutdown (struct sock *sk, int how);
+void tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_rcv(struct sk_buff *skb);
 
-extern void tcp_v4_early_demux(struct sk_buff *skb);
-extern int tcp_v4_rcv(struct sk_buff *skb);
-
-extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-		       size_t size);
-extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
-			size_t size, int flags);
-extern void tcp_release_cb(struct sock *sk);
-extern void tcp_wfree(struct sk_buff *skb);
-extern void tcp_write_timer_handler(struct sock *sk);
-extern void tcp_delack_timer_handler(struct sock *sk);
-extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-				 const struct tcphdr *th, unsigned int len);
-extern void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
-				const struct tcphdr *th, unsigned int len);
-extern void tcp_rcv_space_adjust(struct sock *sk);
-extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
-extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
-extern void tcp_twsk_destructor(struct sock *sk);
-extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
-			       struct pipe_inode_info *pipe, size_t len,
-			       unsigned int flags);
+int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
+int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		size_t size);
+int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+		 int flags);
+void tcp_release_cb(struct sock *sk);
+void tcp_wfree(struct sk_buff *skb);
+void tcp_write_timer_handler(struct sock *sk);
+void tcp_delack_timer_handler(struct sock *sk);
+int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+			  const struct tcphdr *th, unsigned int len);
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+			 const struct tcphdr *th, unsigned int len);
+void tcp_rcv_space_adjust(struct sock *sk);
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
+void tcp_twsk_destructor(struct sock *sk);
+ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+			struct pipe_inode_info *pipe, size_t len,
+			unsigned int flags);
 
 static inline void tcp_dec_quickack_mode(struct sock *sk,
 					 const unsigned int pkts)
@@ -409,66 +407,65 @@
 };
 
 
-extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
-						     struct sk_buff *skb,
-						     const struct tcphdr *th);
-extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
-				   struct request_sock *req,
-				   struct request_sock **prev,
-				   bool fastopen);
-extern int tcp_child_process(struct sock *parent, struct sock *child,
-			     struct sk_buff *skb);
-extern void tcp_enter_loss(struct sock *sk, int how);
-extern void tcp_clear_retrans(struct tcp_sock *tp);
-extern void tcp_update_metrics(struct sock *sk);
-extern void tcp_init_metrics(struct sock *sk);
-extern void tcp_metrics_init(void);
-extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
-extern bool tcp_remember_stamp(struct sock *sk);
-extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
-extern void tcp_disable_fack(struct tcp_sock *tp);
-extern void tcp_close(struct sock *sk, long timeout);
-extern void tcp_init_sock(struct sock *sk);
-extern unsigned int tcp_poll(struct file * file, struct socket *sock,
-			     struct poll_table_struct *wait);
-extern int tcp_getsockopt(struct sock *sk, int level, int optname,
+enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
+					      struct sk_buff *skb,
+					      const struct tcphdr *th);
+struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+			   struct request_sock *req, struct request_sock **prev,
+			   bool fastopen);
+int tcp_child_process(struct sock *parent, struct sock *child,
+		      struct sk_buff *skb);
+void tcp_enter_loss(struct sock *sk, int how);
+void tcp_clear_retrans(struct tcp_sock *tp);
+void tcp_update_metrics(struct sock *sk);
+void tcp_init_metrics(struct sock *sk);
+void tcp_metrics_init(void);
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
+			bool paws_check);
+bool tcp_remember_stamp(struct sock *sk);
+bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
+void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
+void tcp_disable_fack(struct tcp_sock *tp);
+void tcp_close(struct sock *sk, long timeout);
+void tcp_init_sock(struct sock *sk);
+unsigned int tcp_poll(struct file *file, struct socket *sock,
+		      struct poll_table_struct *wait);
+int tcp_getsockopt(struct sock *sk, int level, int optname,
+		   char __user *optval, int __user *optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname,
+		   char __user *optval, unsigned int optlen);
+int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 			  char __user *optval, int __user *optlen);
-extern int tcp_setsockopt(struct sock *sk, int level, int optname,
+int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
 			  char __user *optval, unsigned int optlen);
-extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
-				 char __user *optval, int __user *optlen);
-extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
-				 char __user *optval, unsigned int optlen);
-extern void tcp_set_keepalive(struct sock *sk, int val);
-extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-		       size_t len, int nonblock, int flags, int *addr_len);
-extern void tcp_parse_options(const struct sk_buff *skb,
-			      struct tcp_options_received *opt_rx,
-			      int estab, struct tcp_fastopen_cookie *foc);
-extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+void tcp_set_keepalive(struct sock *sk, int val);
+void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		size_t len, int nonblock, int flags, int *addr_len);
+void tcp_parse_options(const struct sk_buff *skb,
+		       struct tcp_options_received *opt_rx,
+		       int estab, struct tcp_fastopen_cookie *foc);
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 /*
  *	TCP v4 functions exported for the inet6 API
  */
 
-extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-extern struct sock * tcp_create_openreq_child(struct sock *sk,
-					      struct request_sock *req,
-					      struct sk_buff *skb);
-extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
-					  struct request_sock *req,
-					  struct dst_entry *dst);
-extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
-			  int addr_len);
-extern int tcp_connect(struct sock *sk);
-extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
-					struct request_sock *req,
-					struct tcp_fastopen_cookie *foc);
-extern int tcp_disconnect(struct sock *sk, int flags);
+void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+struct sock *tcp_create_openreq_child(struct sock *sk,
+				      struct request_sock *req,
+				      struct sk_buff *skb);
+struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+				  struct request_sock *req,
+				  struct dst_entry *dst);
+int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int tcp_connect(struct sock *sk);
+struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+				struct request_sock *req,
+				struct tcp_fastopen_cookie *foc);
+int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_connect_init(struct sock *sk);
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -476,16 +473,32 @@
 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 
 /* From syncookies.c */
-extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
-			     u32 cookie);
-extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
-				    struct ip_options *opt);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+		      u32 cookie);
+struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+			     struct ip_options *opt);
 #ifdef CONFIG_SYN_COOKIES
-extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
-				     const struct tcphdr *th, u16 *mssp);
-extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 
-				     __u16 *mss);
+#include <linux/ktime.h>
+
+/* Syncookies use a monotonic timer which increments every 64 seconds.
+ * This counter is used both as a hash input and partially encoded into
+ * the cookie value.  A cookie is only validated further if the delta
+ * between the current counter value and the encoded one is less than this,
+ * i.e. a sent cookie is valid only at most for 128 seconds (or less if
+ * the counter advances immediately after a cookie is generated).
+ */
+#define MAX_SYNCOOKIE_AGE 2
+
+static inline u32 tcp_cookie_time(void)
+{
+	struct timespec now;
+	getnstimeofday(&now);
+	return now.tv_sec >> 6; /* 64 seconds granularity */
+}
+
+u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+			      u16 *mssp);
+__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
 #else
 static inline __u32 cookie_v4_init_sequence(struct sock *sk,
 					    struct sk_buff *skb,
@@ -495,19 +508,19 @@
 }
 #endif
 
-extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern bool cookie_check_timestamp(struct tcp_options_received *opt,
-				struct net *net, bool *ecn_ok);
+__u32 cookie_init_timestamp(struct request_sock *req);
+bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
+			    bool *ecn_ok);
 
 /* From net/ipv6/syncookies.c */
-extern int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
-			     u32 cookie);
-extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+		      u32 cookie);
+struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 #ifdef CONFIG_SYN_COOKIES
-extern u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
-				     const struct tcphdr *th, u16 *mssp);
-extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
-				     __u16 *mss);
+u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+			      const struct tcphdr *th, u16 *mssp);
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
+			      __u16 *mss);
 #else
 static inline __u32 cookie_v6_init_sequence(struct sock *sk,
 					    struct sk_buff *skb,
@@ -518,47 +531,46 @@
 #endif
 /* tcp_output.c */
 
-extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
-				      int nonagle);
-extern bool tcp_may_send_now(struct sock *sk);
-extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern void tcp_retransmit_timer(struct sock *sk);
-extern void tcp_xmit_retransmit_queue(struct sock *);
-extern void tcp_simple_retransmit(struct sock *);
-extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+			       int nonagle);
+bool tcp_may_send_now(struct sock *sk);
+int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+void tcp_retransmit_timer(struct sock *sk);
+void tcp_xmit_retransmit_queue(struct sock *);
+void tcp_simple_retransmit(struct sock *);
+int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
 
-extern void tcp_send_probe0(struct sock *);
-extern void tcp_send_partial(struct sock *);
-extern int tcp_write_wakeup(struct sock *);
-extern void tcp_send_fin(struct sock *sk);
-extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
-extern int tcp_send_synack(struct sock *);
-extern bool tcp_syn_flood_action(struct sock *sk,
-				 const struct sk_buff *skb,
-				 const char *proto);
-extern void tcp_push_one(struct sock *, unsigned int mss_now);
-extern void tcp_send_ack(struct sock *sk);
-extern void tcp_send_delayed_ack(struct sock *sk);
-extern void tcp_send_loss_probe(struct sock *sk);
-extern bool tcp_schedule_loss_probe(struct sock *sk);
+void tcp_send_probe0(struct sock *);
+void tcp_send_partial(struct sock *);
+int tcp_write_wakeup(struct sock *);
+void tcp_send_fin(struct sock *sk);
+void tcp_send_active_reset(struct sock *sk, gfp_t priority);
+int tcp_send_synack(struct sock *);
+bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
+			  const char *proto);
+void tcp_push_one(struct sock *, unsigned int mss_now);
+void tcp_send_ack(struct sock *sk);
+void tcp_send_delayed_ack(struct sock *sk);
+void tcp_send_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk);
 
 /* tcp_input.c */
-extern void tcp_cwnd_application_limited(struct sock *sk);
-extern void tcp_resume_early_retransmit(struct sock *sk);
-extern void tcp_rearm_rto(struct sock *sk);
-extern void tcp_reset(struct sock *sk);
+void tcp_cwnd_application_limited(struct sock *sk);
+void tcp_resume_early_retransmit(struct sock *sk);
+void tcp_rearm_rto(struct sock *sk);
+void tcp_reset(struct sock *sk);
 
 /* tcp_timer.c */
-extern void tcp_init_xmit_timers(struct sock *);
+void tcp_init_xmit_timers(struct sock *);
 static inline void tcp_clear_xmit_timers(struct sock *sk)
 {
 	inet_csk_clear_xmit_timers(sk);
 }
 
-extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
-extern unsigned int tcp_current_mss(struct sock *sk);
+unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int tcp_current_mss(struct sock *sk);
 
 /* Bound MSS / TSO packet size with the half of the window */
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -584,20 +596,20 @@
 }
 
 /* tcp.c */
-extern void tcp_get_info(const struct sock *, struct tcp_info *);
+void tcp_get_info(const struct sock *, struct tcp_info *);
 
 /* Read 'sendfile()'-style from a TCP socket */
 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
 				unsigned int, size_t);
-extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
-			 sk_read_actor_t recv_actor);
+int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+		  sk_read_actor_t recv_actor);
 
-extern void tcp_initialize_rcv_mss(struct sock *sk);
+void tcp_initialize_rcv_mss(struct sock *sk);
 
-extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(struct sock *sk, int mss);
-extern void tcp_mtup_init(struct sock *sk);
-extern void tcp_init_buffer_space(struct sock *sk);
+int tcp_mtu_to_mss(struct sock *sk, int pmtu);
+int tcp_mss_to_mtu(struct sock *sk, int mss);
+void tcp_mtup_init(struct sock *sk);
+void tcp_init_buffer_space(struct sock *sk);
 
 static inline void tcp_bound_rto(const struct sock *sk)
 {
@@ -610,7 +622,7 @@
 	return (tp->srtt >> 3) + tp->rttvar;
 }
 
-extern void tcp_set_rto(struct sock *sk);
+void tcp_set_rto(struct sock *sk);
 
 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 {
@@ -663,7 +675,7 @@
  * scaling applied to the result.  The caller does these things
  * if necessary.  This is a "raw" window selection.
  */
-extern u32 __tcp_select_window(struct sock *sk);
+u32 __tcp_select_window(struct sock *sk);
 
 void tcp_send_window_probe(struct sock *sk);
 
@@ -784,7 +796,7 @@
 	/* lower bound for congestion window (optional) */
 	u32 (*min_cwnd)(const struct sock *sk);
 	/* do new cwnd calculation (required) */
-	void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
+	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
 	/* call before changing ca_state (optional) */
 	void (*set_state)(struct sock *sk, u8 new_state);
 	/* call when cwnd event occurs (optional) */
@@ -800,24 +812,24 @@
 	struct module 	*owner;
 };
 
-extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
-extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+int tcp_register_congestion_control(struct tcp_congestion_ops *type);
+void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 
-extern void tcp_init_congestion_control(struct sock *sk);
-extern void tcp_cleanup_congestion_control(struct sock *sk);
-extern int tcp_set_default_congestion_control(const char *name);
-extern void tcp_get_default_congestion_control(char *name);
-extern void tcp_get_available_congestion_control(char *buf, size_t len);
-extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
-extern int tcp_set_allowed_congestion_control(char *allowed);
-extern int tcp_set_congestion_control(struct sock *sk, const char *name);
-extern void tcp_slow_start(struct tcp_sock *tp);
-extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+void tcp_init_congestion_control(struct sock *sk);
+void tcp_cleanup_congestion_control(struct sock *sk);
+int tcp_set_default_congestion_control(const char *name);
+void tcp_get_default_congestion_control(char *name);
+void tcp_get_available_congestion_control(char *buf, size_t len);
+void tcp_get_allowed_congestion_control(char *buf, size_t len);
+int tcp_set_allowed_congestion_control(char *allowed);
+int tcp_set_congestion_control(struct sock *sk, const char *name);
+int tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 
 extern struct tcp_congestion_ops tcp_init_congestion_ops;
-extern u32 tcp_reno_ssthresh(struct sock *sk);
-extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
-extern u32 tcp_reno_min_cwnd(const struct sock *sk);
+u32 tcp_reno_ssthresh(struct sock *sk);
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
+u32 tcp_reno_min_cwnd(const struct sock *sk);
 extern struct tcp_congestion_ops tcp_reno;
 
 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -936,8 +948,8 @@
 /* Use define here intentionally to get WARN_ON location shown at the caller */
 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
 
-extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
-extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 
 /* The maximum number of MSS of available cwnd for which TSO defers
  * sending if not using sysctl_tcp_tso_win_divisor.
@@ -963,7 +975,7 @@
 {
 	return tp->snd_una + tp->snd_wnd;
 }
-extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
+bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
 
 static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
 				       const struct sk_buff *skb)
@@ -1028,7 +1040,7 @@
 #endif
 }
 
-extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
+bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
 
 #undef STATE_TRACE
 
@@ -1039,9 +1051,9 @@
 	"Close Wait","Last ACK","Listen","Closing"
 };
 #endif
-extern void tcp_set_state(struct sock *sk, int state);
+void tcp_set_state(struct sock *sk, int state);
 
-extern void tcp_done(struct sock *sk);
+void tcp_done(struct sock *sk);
 
 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
 {
@@ -1049,13 +1061,12 @@
 	rx_opt->num_sacks = 0;
 }
 
-extern u32 tcp_default_init_rwnd(u32 mss);
+u32 tcp_default_init_rwnd(u32 mss);
 
 /* Determine a window scaling and initial window to offer. */
-extern void tcp_select_initial_window(int __space, __u32 mss,
-				      __u32 *rcv_wnd, __u32 *window_clamp,
-				      int wscale_ok, __u8 *rcv_wscale,
-				      __u32 init_rcv_wnd);
+void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
+			       __u32 *window_clamp, int wscale_ok,
+			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
 
 static inline int tcp_win_from_space(int space)
 {
@@ -1095,11 +1106,11 @@
 	ireq->wscale_ok = rx_opt->wscale_ok;
 	ireq->acked = 0;
 	ireq->ecn_ok = 0;
-	ireq->rmt_port = tcp_hdr(skb)->source;
-	ireq->loc_port = tcp_hdr(skb)->dest;
+	ireq->ir_rmt_port = tcp_hdr(skb)->source;
+	ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
 }
 
-extern void tcp_enter_memory_pressure(struct sock *sk);
+void tcp_enter_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
 {
@@ -1252,21 +1263,20 @@
 };
 
 /* - functions */
-extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
-			       const struct sock *sk,
-			       const struct request_sock *req,
-			       const struct sk_buff *skb);
-extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-			  int family, const u8 *newkey,
-			  u8 newkeylen, gfp_t gfp);
-extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
-			  int family);
-extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
+			const struct sock *sk, const struct request_sock *req,
+			const struct sk_buff *skb);
+int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
+int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
+		   int family);
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
 					 struct sock *addr_sk);
 
 #ifdef CONFIG_TCP_MD5SIG
-extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
-			const union tcp_md5_addr *addr, int family);
+struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+					 const union tcp_md5_addr *addr,
+					 int family);
 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
 #else
 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1278,27 +1288,26 @@
 #define tcp_twsk_md5_key(twsk)	NULL
 #endif
 
-extern bool tcp_alloc_md5sig_pool(void);
+bool tcp_alloc_md5sig_pool(void);
 
-extern struct tcp_md5sig_pool	*tcp_get_md5sig_pool(void);
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
 static inline void tcp_put_md5sig_pool(void)
 {
 	local_bh_enable();
 }
 
-extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
-extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
-				 unsigned int header_len);
-extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
-			    const struct tcp_md5sig_key *key);
+int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
+int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
+			  unsigned int header_len);
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+		     const struct tcp_md5sig_key *key);
 
 /* From tcp_fastopen.c */
-extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-				   struct tcp_fastopen_cookie *cookie,
-				   int *syn_loss, unsigned long *last_syn_loss);
-extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
-				   struct tcp_fastopen_cookie *cookie,
-				   bool syn_lost);
+void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
+			    unsigned long *last_syn_loss);
+void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+			    struct tcp_fastopen_cookie *cookie, bool syn_lost);
 struct tcp_fastopen_request {
 	/* Fast Open cookie. Size 0 means a cookie request */
 	struct tcp_fastopen_cookie	cookie;
@@ -1309,9 +1318,9 @@
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
-				    struct tcp_fastopen_cookie *foc);
-
+void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+			     struct tcp_fastopen_cookie *foc);
+void tcp_fastopen_init_key_once(bool publish);
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
 /* Fastopen key context */
@@ -1507,7 +1516,6 @@
 	TCP_SEQ_STATE_LISTENING,
 	TCP_SEQ_STATE_OPENREQ,
 	TCP_SEQ_STATE_ESTABLISHED,
-	TCP_SEQ_STATE_TIME_WAIT,
 };
 
 int tcp_seq_open(struct inode *inode, struct file *file);
@@ -1529,22 +1537,20 @@
 	loff_t			last_pos;
 };
 
-extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
-extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
+int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
+void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
 
 extern struct request_sock_ops tcp_request_sock_ops;
 extern struct request_sock_ops tcp6_request_sock_ops;
 
-extern void tcp_v4_destroy_sock(struct sock *sk);
+void tcp_v4_destroy_sock(struct sock *sk);
 
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
-				       netdev_features_t features);
-extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
-					struct sk_buff *skb);
-extern int tcp_gro_complete(struct sk_buff *skb);
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+				netdev_features_t features);
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int tcp_gro_complete(struct sk_buff *skb);
 
-extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
-				__be32 daddr);
+void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
 
 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
 {
@@ -1560,8 +1566,8 @@
 }
 
 #ifdef CONFIG_PROC_FS
-extern int tcp4_proc_init(void);
-extern void tcp4_proc_exit(void);
+int tcp4_proc_init(void);
+void tcp4_proc_exit(void);
 #endif
 
 /* TCP af-specific functions */
@@ -1592,9 +1598,9 @@
 #endif
 };
 
-extern int tcpv4_offload_init(void);
+int tcpv4_offload_init(void);
 
-extern void tcp_v4_init(void);
-extern void tcp_init(void);
+void tcp_v4_init(void);
+void tcp_init(void);
 
 #endif	/* _TCP_H */
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
index 7df18bc..05b94d9 100644
--- a/include/net/tcp_memcontrol.h
+++ b/include/net/tcp_memcontrol.h
@@ -1,19 +1,7 @@
 #ifndef _TCP_MEMCG_H
 #define _TCP_MEMCG_H
 
-struct tcp_memcontrol {
-	struct cg_proto cg_proto;
-	/* per-cgroup tcp memory pressure knobs */
-	struct res_counter tcp_memory_allocated;
-	struct percpu_counter tcp_sockets_allocated;
-	/* those two are read-mostly, leave them at the end */
-	long tcp_prot_mem[3];
-	int tcp_memory_pressure;
-};
-
 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
 void tcp_destroy_cgroup(struct mem_cgroup *memcg);
-unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
-void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
 #endif /* _TCP_MEMCG_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index ef2e0b7..a24f0f3 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -79,7 +79,7 @@
 	unsigned int		log;
 };
 extern struct udp_table udp_table;
-extern void udp_table_init(struct udp_table *, const char *);
+void udp_table_init(struct udp_table *, const char *);
 static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
 					     struct net *net, unsigned int num)
 {
@@ -162,52 +162,53 @@
 	BUG();
 }
 
-extern void udp_lib_unhash(struct sock *sk);
-extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
+void udp_lib_unhash(struct sock *sk);
+void udp_lib_rehash(struct sock *sk, u16 new_hash);
 
 static inline void udp_lib_close(struct sock *sk, long timeout)
 {
 	sk_common_release(sk);
 }
 
-extern int udp_lib_get_port(struct sock *sk, unsigned short snum,
-			    int (*)(const struct sock *,const struct sock *),
-			    unsigned int hash2_nulladdr);
+int udp_lib_get_port(struct sock *sk, unsigned short snum,
+		     int (*)(const struct sock *, const struct sock *),
+		     unsigned int hash2_nulladdr);
 
 /* net/ipv4/udp.c */
-extern int udp_get_port(struct sock *sk, unsigned short snum,
-			int (*saddr_cmp)(const struct sock *,
-					 const struct sock *));
-extern void udp_err(struct sk_buff *, u32);
-extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
-			    struct msghdr *msg, size_t len);
-extern int udp_push_pending_frames(struct sock *sk);
-extern void udp_flush_pending_frames(struct sock *sk);
-extern void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
-extern int udp_rcv(struct sk_buff *skb);
-extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int udp_disconnect(struct sock *sk, int flags);
-extern unsigned int udp_poll(struct file *file, struct socket *sock,
-			     poll_table *wait);
-extern struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
-					      netdev_features_t features);
-extern int udp_lib_getsockopt(struct sock *sk, int level, int optname,
-			      char __user *optval, int __user *optlen);
-extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
-			      char __user *optval, unsigned int optlen,
-			      int (*push_pending_frames)(struct sock *));
-extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
-				    __be32 daddr, __be16 dport,
-				    int dif);
-extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
-				    __be32 daddr, __be16 dport,
-				    int dif, struct udp_table *tbl);
-extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
-				    const struct in6_addr *daddr, __be16 dport,
-				    int dif);
-extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
-				    const struct in6_addr *daddr, __be16 dport,
-				    int dif, struct udp_table *tbl);
+void udp_v4_early_demux(struct sk_buff *skb);
+int udp_get_port(struct sock *sk, unsigned short snum,
+		 int (*saddr_cmp)(const struct sock *,
+				  const struct sock *));
+void udp_err(struct sk_buff *, u32);
+int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		size_t len);
+int udp_push_pending_frames(struct sock *sk);
+void udp_flush_pending_frames(struct sock *sk);
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
+int udp_rcv(struct sk_buff *skb);
+int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_disconnect(struct sock *sk, int flags);
+unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+				       netdev_features_t features);
+int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+		       char __user *optval, int __user *optlen);
+int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+		       char __user *optval, unsigned int optlen,
+		       int (*push_pending_frames)(struct sock *));
+struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+			     __be32 daddr, __be16 dport, int dif);
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+			       __be32 daddr, __be16 dport, int dif,
+			       struct udp_table *tbl);
+struct sock *udp6_lib_lookup(struct net *net,
+			     const struct in6_addr *saddr, __be16 sport,
+			     const struct in6_addr *daddr, __be16 dport,
+			     int dif);
+struct sock *__udp6_lib_lookup(struct net *net,
+			       const struct in6_addr *saddr, __be16 sport,
+			       const struct in6_addr *daddr, __be16 dport,
+			       int dif, struct udp_table *tbl);
 
 /*
  * 	SNMP statistics for UDP and UDP-Lite
@@ -229,13 +230,13 @@
 } while(0)
 
 #if IS_ENABLED(CONFIG_IPV6)
-#define UDPX_INC_STATS_BH(sk, field) \
-	do { \
-		if ((sk)->sk_family == AF_INET) \
-			UDP_INC_STATS_BH(sock_net(sk), field, 0); \
-		else \
-			UDP6_INC_STATS_BH(sock_net(sk), field, 0); \
-	} while (0);
+#define UDPX_INC_STATS_BH(sk, field)					\
+do {									\
+	if ((sk)->sk_family == AF_INET)					\
+		UDP_INC_STATS_BH(sock_net(sk), field, 0);		\
+	else								\
+		UDP6_INC_STATS_BH(sock_net(sk), field, 0);		\
+} while (0)
 #else
 #define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0)
 #endif
@@ -259,19 +260,19 @@
 };
 
 #ifdef CONFIG_PROC_FS
-extern int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
-extern void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
+int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
+void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
 
-extern int udp4_proc_init(void);
-extern void udp4_proc_exit(void);
+int udp4_proc_init(void);
+void udp4_proc_exit(void);
 #endif
 
-extern int udpv4_offload_init(void);
+int udpv4_offload_init(void);
 
-extern void udp_init(void);
+void udp_init(void);
 
-extern void udp_encap_enable(void);
+void udp_encap_enable(void);
 #if IS_ENABLED(CONFIG_IPV6)
-extern void udpv6_encap_enable(void);
+void udpv6_encap_enable(void);
 #endif
 #endif	/* _UDP_H */
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 7137545..2caadab 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -126,7 +126,7 @@
 	return skb_checksum(skb, off, len, 0);
 }
 
-extern void	udplite4_register(void);
-extern int 	udplite_get_port(struct sock *sk, unsigned short snum,
-			int (*scmp)(const struct sock *, const struct sock *));
+void udplite4_register(void);
+int udplite_get_port(struct sock *sk, unsigned short snum,
+		     int (*scmp)(const struct sock *, const struct sock *));
 #endif	/* _UDPLITE_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 2d64d3c..6b6d180 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -36,5 +36,16 @@
 
 __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
 
+/* IP header + UDP + VXLAN + Ethernet header */
+#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
+/* IPv6 header + UDP + VXLAN + Ethernet header */
+#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+
+#if IS_ENABLED(CONFIG_VXLAN)
 void vxlan_get_rx_port(struct net_device *netdev);
+#else
+static inline void vxlan_get_rx_port(struct net_device *netdev)
+{
+}
+#endif
 #endif
diff --git a/include/net/wext.h b/include/net/wext.h
index 4f6e742..3459119 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -6,13 +6,13 @@
 struct net;
 
 #ifdef CONFIG_WEXT_CORE
-extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
-			     void __user *arg);
-extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
-				    unsigned long arg);
+int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+		      void __user *arg);
+int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
+			     unsigned long arg);
 
-extern struct iw_statistics *get_wireless_stats(struct net_device *dev);
-extern int call_commit_handler(struct net_device *dev);
+struct iw_statistics *get_wireless_stats(struct net_device *dev);
+int call_commit_handler(struct net_device *dev);
 #else
 static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
 				    void __user *arg)
@@ -27,8 +27,8 @@
 #endif
 
 #ifdef CONFIG_WEXT_PROC
-extern int wext_proc_init(struct net *net);
-extern void wext_proc_exit(struct net *net);
+int wext_proc_init(struct net *net);
+void wext_proc_exit(struct net *net);
 #else
 static inline int wext_proc_init(struct net *net)
 {
diff --git a/include/net/wimax.h b/include/net/wimax.h
index bbb74f9..98498e1 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -438,9 +438,9 @@
  *
  * These functions are not exported to user space.
  */
-extern void wimax_dev_init(struct wimax_dev *);
-extern int wimax_dev_add(struct wimax_dev *, struct net_device *);
-extern void wimax_dev_rm(struct wimax_dev *);
+void wimax_dev_init(struct wimax_dev *);
+int wimax_dev_add(struct wimax_dev *, struct net_device *);
+void wimax_dev_rm(struct wimax_dev *);
 
 static inline
 struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
@@ -454,8 +454,8 @@
 	return wimax_dev->net_dev->dev.parent;
 }
 
-extern void wimax_state_change(struct wimax_dev *, enum wimax_st);
-extern enum wimax_st wimax_state_get(struct wimax_dev *);
+void wimax_state_change(struct wimax_dev *, enum wimax_st);
+enum wimax_st wimax_state_get(struct wimax_dev *);
 
 /*
  * Radio Switch state reporting.
@@ -463,8 +463,8 @@
  * enum wimax_rf_state is declared in linux/wimax.h so the exports
  * to user space can use it.
  */
-extern void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
-extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
 
 
 /*
@@ -490,15 +490,14 @@
  * send diagnostics information that a device-specific diagnostics
  * tool would be interested in.
  */
-extern struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *,
-				       const void *, size_t, gfp_t);
-extern int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
-extern int wimax_msg(struct wimax_dev *, const char *,
-		     const void *, size_t, gfp_t);
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
+				size_t, gfp_t);
+int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
+int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
 
-extern const void *wimax_msg_data_len(struct sk_buff *, size_t *);
-extern const void *wimax_msg_data(struct sk_buff *);
-extern ssize_t wimax_msg_len(struct sk_buff *);
+const void *wimax_msg_data_len(struct sk_buff *, size_t *);
+const void *wimax_msg_data(struct sk_buff *);
+ssize_t wimax_msg_len(struct sk_buff *);
 
 
 /*
@@ -513,7 +512,7 @@
  * device's control structure and (as such) the 'struct wimax_dev' is
  * referenced by the caller.
  */
-extern int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
-extern int wimax_reset(struct wimax_dev *);
+int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
+int wimax_reset(struct wimax_dev *);
 
 #endif /* #ifndef __NET__WIMAX_H__ */
diff --git a/include/net/x25.h b/include/net/x25.h
index b4a8a89..c383aa4 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -187,57 +187,57 @@
 extern int  sysctl_x25_ack_holdback_timeout;
 extern int  sysctl_x25_forward;
 
-extern int x25_parse_address_block(struct sk_buff *skb,
-		struct x25_address *called_addr,
-		struct x25_address *calling_addr);
+int x25_parse_address_block(struct sk_buff *skb,
+			    struct x25_address *called_addr,
+			    struct x25_address *calling_addr);
 
-extern int  x25_addr_ntoa(unsigned char *, struct x25_address *,
-			  struct x25_address *);
-extern int  x25_addr_aton(unsigned char *, struct x25_address *,
-			  struct x25_address *);
-extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
-extern void x25_destroy_socket_from_timer(struct sock *);
-extern int  x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
-extern void x25_kill_by_neigh(struct x25_neigh *);
+int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *);
+int x25_addr_aton(unsigned char *, struct x25_address *, struct x25_address *);
+struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
+void x25_destroy_socket_from_timer(struct sock *);
+int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
+void x25_kill_by_neigh(struct x25_neigh *);
 
 /* x25_dev.c */
-extern void x25_send_frame(struct sk_buff *, struct x25_neigh *);
-extern int  x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
-extern void x25_establish_link(struct x25_neigh *);
-extern void x25_terminate_link(struct x25_neigh *);
+void x25_send_frame(struct sk_buff *, struct x25_neigh *);
+int x25_lapb_receive_frame(struct sk_buff *, struct net_device *,
+			   struct packet_type *, struct net_device *);
+void x25_establish_link(struct x25_neigh *);
+void x25_terminate_link(struct x25_neigh *);
 
 /* x25_facilities.c */
-extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
-				struct x25_dte_facilities *, unsigned long *);
-extern int x25_create_facilities(unsigned char *, struct x25_facilities *,
-				struct x25_dte_facilities *, unsigned long);
-extern int x25_negotiate_facilities(struct sk_buff *, struct sock *,
-				struct x25_facilities *,
-				struct x25_dte_facilities *);
-extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
+int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
+			 struct x25_dte_facilities *, unsigned long *);
+int x25_create_facilities(unsigned char *, struct x25_facilities *,
+			  struct x25_dte_facilities *, unsigned long);
+int x25_negotiate_facilities(struct sk_buff *, struct sock *,
+			     struct x25_facilities *,
+			     struct x25_dte_facilities *);
+void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
 
 /* x25_forward.c */
-extern void x25_clear_forward_by_lci(unsigned int lci);
-extern void x25_clear_forward_by_dev(struct net_device *);
-extern int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
-extern int x25_forward_call(struct x25_address *, struct x25_neigh *,
-				struct sk_buff *, int);
+void x25_clear_forward_by_lci(unsigned int lci);
+void x25_clear_forward_by_dev(struct net_device *);
+int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
+int x25_forward_call(struct x25_address *, struct x25_neigh *, struct sk_buff *,
+		     int);
 
 /* x25_in.c */
-extern int  x25_process_rx_frame(struct sock *, struct sk_buff *);
-extern int  x25_backlog_rcv(struct sock *, struct sk_buff *);
+int x25_process_rx_frame(struct sock *, struct sk_buff *);
+int x25_backlog_rcv(struct sock *, struct sk_buff *);
 
 /* x25_link.c */
-extern void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
-extern void x25_link_device_up(struct net_device *);
-extern void x25_link_device_down(struct net_device *);
-extern void x25_link_established(struct x25_neigh *);
-extern void x25_link_terminated(struct x25_neigh *);
-extern void x25_transmit_clear_request(struct x25_neigh *, unsigned int, unsigned char);
-extern void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
-extern int  x25_subscr_ioctl(unsigned int, void __user *);
-extern struct x25_neigh *x25_get_neigh(struct net_device *);
-extern void x25_link_free(void);
+void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
+void x25_link_device_up(struct net_device *);
+void x25_link_device_down(struct net_device *);
+void x25_link_established(struct x25_neigh *);
+void x25_link_terminated(struct x25_neigh *);
+void x25_transmit_clear_request(struct x25_neigh *, unsigned int,
+				unsigned char);
+void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
+int x25_subscr_ioctl(unsigned int, void __user *);
+struct x25_neigh *x25_get_neigh(struct net_device *);
+void x25_link_free(void);
 
 /* x25_neigh.c */
 static __inline__ void x25_neigh_hold(struct x25_neigh *nb)
@@ -252,16 +252,16 @@
 }
 
 /* x25_out.c */
-extern  int x25_output(struct sock *, struct sk_buff *);
-extern void x25_kick(struct sock *);
-extern void x25_enquiry_response(struct sock *);
+int x25_output(struct sock *, struct sk_buff *);
+void x25_kick(struct sock *);
+void x25_enquiry_response(struct sock *);
 
 /* x25_route.c */
-extern struct x25_route *x25_get_route(struct x25_address *addr);
-extern struct net_device *x25_dev_get(char *);
-extern void x25_route_device_down(struct net_device *dev);
-extern int  x25_route_ioctl(unsigned int, void __user *);
-extern void x25_route_free(void);
+struct x25_route *x25_get_route(struct x25_address *addr);
+struct net_device *x25_dev_get(char *);
+void x25_route_device_down(struct net_device *dev);
+int x25_route_ioctl(unsigned int, void __user *);
+void x25_route_free(void);
 
 static __inline__ void x25_route_hold(struct x25_route *rt)
 {
@@ -275,30 +275,31 @@
 }
 
 /* x25_subr.c */
-extern void x25_clear_queues(struct sock *);
-extern void x25_frames_acked(struct sock *, unsigned short);
-extern void x25_requeue_frames(struct sock *);
-extern int  x25_validate_nr(struct sock *, unsigned short);
-extern void x25_write_internal(struct sock *, int);
-extern int  x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *);
-extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
+void x25_clear_queues(struct sock *);
+void x25_frames_acked(struct sock *, unsigned short);
+void x25_requeue_frames(struct sock *);
+int x25_validate_nr(struct sock *, unsigned short);
+void x25_write_internal(struct sock *, int);
+int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *,
+	       int *);
+void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
 
 /* x25_timer.c */
-extern void x25_init_timers(struct sock *sk);
-extern void x25_start_heartbeat(struct sock *);
-extern void x25_start_t2timer(struct sock *);
-extern void x25_start_t21timer(struct sock *);
-extern void x25_start_t22timer(struct sock *);
-extern void x25_start_t23timer(struct sock *);
-extern void x25_stop_heartbeat(struct sock *);
-extern void x25_stop_timer(struct sock *);
-extern unsigned long x25_display_timer(struct sock *);
-extern void x25_check_rbuf(struct sock *);
+void x25_init_timers(struct sock *sk);
+void x25_start_heartbeat(struct sock *);
+void x25_start_t2timer(struct sock *);
+void x25_start_t21timer(struct sock *);
+void x25_start_t22timer(struct sock *);
+void x25_start_t23timer(struct sock *);
+void x25_stop_heartbeat(struct sock *);
+void x25_stop_timer(struct sock *);
+unsigned long x25_display_timer(struct sock *);
+void x25_check_rbuf(struct sock *);
 
 /* sysctl_net_x25.c */
 #ifdef CONFIG_SYSCTL
-extern void x25_register_sysctl(void);
-extern void x25_unregister_sysctl(void);
+void x25_register_sysctl(void);
+void x25_unregister_sysctl(void);
 #else
 static inline void x25_register_sysctl(void) {};
 static inline void x25_unregister_sysctl(void) {};
@@ -318,6 +319,6 @@
 extern struct list_head x25_neigh_list;
 extern rwlock_t x25_neigh_list_lock;
 
-extern int x25_proc_init(void);
-extern void x25_proc_exit(void);
+int x25_proc_init(void);
+void x25_proc_exit(void);
 #endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e253bf0..6b82fdf 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -307,15 +307,17 @@
 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
 };
 
-extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
-extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
+int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+void km_policy_notify(struct xfrm_policy *xp, int dir,
+		      const struct km_event *c);
+void km_state_notify(struct xfrm_state *x, const struct km_event *c);
 
 struct xfrm_tmpl;
-extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-extern void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
-extern int __xfrm_state_delete(struct xfrm_state *x);
+int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
+	     struct xfrm_policy *pol);
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
+int __xfrm_state_delete(struct xfrm_state *x);
 
 struct xfrm_state_afinfo {
 	unsigned int		family;
@@ -344,12 +346,12 @@
 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
 };
 
-extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
-extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
-extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
 
-extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
+void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
 struct xfrm_type {
 	char			*description;
@@ -372,8 +374,8 @@
 	u32			(*get_mtu)(struct xfrm_state *, int size);
 };
 
-extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
-extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
 
 struct xfrm_mode {
 	/*
@@ -434,8 +436,8 @@
 	XFRM_MODE_FLAG_TUNNEL = 1,
 };
 
-extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
-extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
+int xfrm_register_mode(struct xfrm_mode *mode, int family);
+int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
 
 static inline int xfrm_af2proto(unsigned int family)
 {
@@ -595,8 +597,8 @@
 					   const struct xfrm_kmaddress *k);
 };
 
-extern int xfrm_register_km(struct xfrm_mgr *km);
-extern int xfrm_unregister_km(struct xfrm_mgr *km);
+int xfrm_register_km(struct xfrm_mgr *km);
+int xfrm_unregister_km(struct xfrm_mgr *km);
 
 /*
  * This structure is used for the duration where packets are being
@@ -713,23 +715,23 @@
 		audit_log_task_context(audit_buf);
 }
 
-extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-				  kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-				  kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
-				 kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-				    kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
-					     struct sk_buff *skb);
-extern void xfrm_audit_state_replay(struct xfrm_state *x,
-				    struct sk_buff *skb, __be32 net_seq);
-extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
-extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
-				      __be32 net_spi, __be32 net_seq);
-extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
-				     struct sk_buff *skb, u8 proto);
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, kuid_t auid,
+			   u32 ses, u32 secid);
+void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, kuid_t auid,
+			      u32 ses, u32 secid);
+void xfrm_audit_state_add(struct xfrm_state *x, int result, kuid_t auid,
+			  u32 ses, u32 secid);
+void xfrm_audit_state_delete(struct xfrm_state *x, int result, kuid_t auid,
+			     u32 ses, u32 secid);
+void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
+				      struct sk_buff *skb);
+void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
+			     __be32 net_seq);
+void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
+void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
+			       __be32 net_seq);
+void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
+			      u8 proto);
 #else
 
 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
@@ -784,7 +786,7 @@
 		atomic_inc(&policy->refcnt);
 }
 
-extern void xfrm_policy_destroy(struct xfrm_policy *policy);
+void xfrm_policy_destroy(struct xfrm_policy *policy);
 
 static inline void xfrm_pol_put(struct xfrm_policy *policy)
 {
@@ -799,7 +801,7 @@
 		xfrm_pol_put(pols[i]);
 }
 
-extern void __xfrm_state_destroy(struct xfrm_state *);
+void __xfrm_state_destroy(struct xfrm_state *);
 
 static inline void __xfrm_state_put(struct xfrm_state *x)
 {
@@ -903,9 +905,8 @@
 	return port;
 }
 
-extern bool xfrm_selector_match(const struct xfrm_selector *sel,
-				const struct flowi *fl,
-				unsigned short family);
+bool xfrm_selector_match(const struct xfrm_selector *sel,
+			 const struct flowi *fl, unsigned short family);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 /*	If neither has a context --> match
@@ -975,7 +976,7 @@
 }
 #endif
 
-extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
 
 struct sec_path {
 	atomic_t		refcnt;
@@ -1000,7 +1001,7 @@
 	return sp;
 }
 
-extern void __secpath_destroy(struct sec_path *sp);
+void __secpath_destroy(struct sec_path *sp);
 
 static inline void
 secpath_put(struct sec_path *sp)
@@ -1009,7 +1010,7 @@
 		__secpath_destroy(sp);
 }
 
-extern struct sec_path *secpath_dup(struct sec_path *src);
+struct sec_path *secpath_dup(struct sec_path *src);
 
 static inline void
 secpath_reset(struct sk_buff *skb)
@@ -1059,7 +1060,8 @@
 }
 
 #ifdef CONFIG_XFRM
-extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
+int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+			unsigned short family);
 
 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
 				       struct sk_buff *skb,
@@ -1103,8 +1105,8 @@
 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
 }
 
-extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
-				 unsigned int family, int reverse);
+int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+			  unsigned int family, int reverse);
 
 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
 				      unsigned int family)
@@ -1119,7 +1121,7 @@
 	return __xfrm_decode_session(skb, fl, family, 1);
 }
 
-extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
+int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
 
 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
 {
@@ -1140,7 +1142,7 @@
 	return xfrm_route_forward(skb, AF_INET6);
 }
 
-extern int __xfrm_sk_clone_policy(struct sock *sk);
+int __xfrm_sk_clone_policy(struct sock *sk);
 
 static inline int xfrm_sk_clone_policy(struct sock *sk)
 {
@@ -1149,7 +1151,7 @@
 	return 0;
 }
 
-extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
 
 static inline void xfrm_sk_free_policy(struct sock *sk)
 {
@@ -1163,7 +1165,7 @@
 	}
 }
 
-extern void xfrm_garbage_collect(struct net *net);
+void xfrm_garbage_collect(struct net *net);
 
 #else
 
@@ -1355,6 +1357,12 @@
 	int priority;
 };
 
+struct xfrm_tunnel_notifier {
+	int (*handler)(struct sk_buff *skb);
+	struct xfrm_tunnel_notifier __rcu *next;
+	int priority;
+};
+
 struct xfrm6_tunnel {
 	int (*handler)(struct sk_buff *skb);
 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -1363,16 +1371,16 @@
 	int priority;
 };
 
-extern void xfrm_init(void);
-extern void xfrm4_init(void);
-extern int xfrm_state_init(struct net *net);
-extern void xfrm_state_fini(struct net *net);
-extern void xfrm4_state_init(void);
+void xfrm_init(void);
+void xfrm4_init(void);
+int xfrm_state_init(struct net *net);
+void xfrm_state_fini(struct net *net);
+void xfrm4_state_init(void);
 #ifdef CONFIG_XFRM
-extern int xfrm6_init(void);
-extern void xfrm6_fini(void);
-extern int xfrm6_state_init(void);
-extern void xfrm6_state_fini(void);
+int xfrm6_init(void);
+void xfrm6_fini(void);
+int xfrm6_state_init(void);
+void xfrm6_state_fini(void);
 #else
 static inline int xfrm6_init(void)
 {
@@ -1385,52 +1393,52 @@
 #endif
 
 #ifdef CONFIG_XFRM_STATISTICS
-extern int xfrm_proc_init(struct net *net);
-extern void xfrm_proc_fini(struct net *net);
+int xfrm_proc_init(struct net *net);
+void xfrm_proc_fini(struct net *net);
 #endif
 
-extern int xfrm_sysctl_init(struct net *net);
+int xfrm_sysctl_init(struct net *net);
 #ifdef CONFIG_SYSCTL
-extern void xfrm_sysctl_fini(struct net *net);
+void xfrm_sysctl_fini(struct net *net);
 #else
 static inline void xfrm_sysctl_fini(struct net *net)
 {
 }
 #endif
 
-extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
-extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
-			   int (*func)(struct xfrm_state *, int, void*), void *);
-extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
-extern struct xfrm_state *xfrm_state_alloc(struct net *net);
-extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
-					  const xfrm_address_t *saddr,
-					  const struct flowi *fl,
-					  struct xfrm_tmpl *tmpl,
-					  struct xfrm_policy *pol, int *err,
-					  unsigned short family);
-extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
-					       xfrm_address_t *daddr,
-					       xfrm_address_t *saddr,
-					       unsigned short family,
-					       u8 mode, u8 proto, u32 reqid);
-extern int xfrm_state_check_expire(struct xfrm_state *x);
-extern void xfrm_state_insert(struct xfrm_state *x);
-extern int xfrm_state_add(struct xfrm_state *x);
-extern int xfrm_state_update(struct xfrm_state *x);
-extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
-					    const xfrm_address_t *daddr, __be32 spi,
-					    u8 proto, unsigned short family);
-extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
-						   const xfrm_address_t *daddr,
-						   const xfrm_address_t *saddr,
-						   u8 proto,
-						   unsigned short family);
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
+int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
+		    int (*func)(struct xfrm_state *, int, void*), void *);
+void xfrm_state_walk_done(struct xfrm_state_walk *walk);
+struct xfrm_state *xfrm_state_alloc(struct net *net);
+struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+				   const xfrm_address_t *saddr,
+				   const struct flowi *fl,
+				   struct xfrm_tmpl *tmpl,
+				   struct xfrm_policy *pol, int *err,
+				   unsigned short family);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
+				       xfrm_address_t *daddr,
+				       xfrm_address_t *saddr,
+				       unsigned short family,
+				       u8 mode, u8 proto, u32 reqid);
+int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_insert(struct xfrm_state *x);
+int xfrm_state_add(struct xfrm_state *x);
+int xfrm_state_update(struct xfrm_state *x);
+struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
+				     const xfrm_address_t *daddr, __be32 spi,
+				     u8 proto, unsigned short family);
+struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+					    const xfrm_address_t *daddr,
+					    const xfrm_address_t *saddr,
+					    u8 proto,
+					    unsigned short family);
 #ifdef CONFIG_XFRM_SUB_POLICY
-extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
-			  int n, unsigned short family);
-extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
-			   int n, unsigned short family);
+int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
+		   unsigned short family);
+int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
+		    unsigned short family);
 #else
 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
 				 int n, unsigned short family)
@@ -1462,68 +1470,69 @@
 	u32 spdhmcnt;
 };
 
-extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
-					      u32 seq);
-extern int xfrm_state_delete(struct xfrm_state *x);
-extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
-extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
-extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
-extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
-extern int xfrm_init_replay(struct xfrm_state *x);
-extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
-extern int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
-extern int xfrm_init_state(struct xfrm_state *x);
-extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
-		      int encap_type);
-extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
-extern int xfrm_output_resume(struct sk_buff *skb, int err);
-extern int xfrm_output(struct sk_buff *skb);
-extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern void xfrm_local_error(struct sk_buff *skb, int mtu);
-extern int xfrm4_extract_header(struct sk_buff *skb);
-extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
-			   int encap_type);
-extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm4_rcv(struct sk_buff *skb);
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+int xfrm_state_delete(struct xfrm_state *x);
+int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
+void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+int xfrm_init_replay(struct xfrm_state *x);
+int xfrm_state_mtu(struct xfrm_state *x, int mtu);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
+int xfrm_init_state(struct xfrm_state *x);
+int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
+int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output(struct sk_buff *skb);
+int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+void xfrm_local_error(struct sk_buff *skb, int mtu);
+int xfrm4_extract_header(struct sk_buff *skb);
+int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+		    int encap_type);
+int xfrm4_transport_finish(struct sk_buff *skb, int async);
+int xfrm4_rcv(struct sk_buff *skb);
 
 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 {
 	return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
 }
 
-extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_output(struct sk_buff *skb);
-extern int xfrm4_output_finish(struct sk_buff *skb);
-extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
-extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
-extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-extern int xfrm6_extract_header(struct sk_buff *skb);
-extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
-extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm6_rcv(struct sk_buff *skb);
-extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
-			    xfrm_address_t *saddr, u8 proto);
-extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
-extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
-extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
-extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
-extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_output(struct sk_buff *skb);
-extern int xfrm6_output_finish(struct sk_buff *skb);
-extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
-				 u8 **prevhdr);
-extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_output(struct sk_buff *skb);
+int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
+int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
+int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
+int xfrm6_extract_header(struct sk_buff *skb);
+int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv(struct sk_buff *skb);
+int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
+		     xfrm_address_t *saddr, u8 proto);
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
+int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
+__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
+__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
+int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_output(struct sk_buff *skb);
+int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
+			  u8 **prevhdr);
 
 #ifdef CONFIG_XFRM
-extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
-extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
+int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+int xfrm_user_policy(struct sock *sk, int optname,
+		     u8 __user *optval, int optlen);
 #else
 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
 {
@@ -1540,59 +1549,62 @@
 
 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
 
-extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
-extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
-	int (*func)(struct xfrm_policy *, int, int, void*), void *);
-extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
+void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
+int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
+		     int (*func)(struct xfrm_policy *, int, int, void*),
+		     void *);
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
 					  u8 type, int dir,
 					  struct xfrm_selector *sel,
 					  struct xfrm_sec_ctx *ctx, int delete,
 					  int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
+				     u32 id, int delete, int *err);
 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
 u32 xfrm_get_acqseq(void);
-extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
 				 u8 mode, u32 reqid, u8 proto,
 				 const xfrm_address_t *daddr,
 				 const xfrm_address_t *saddr, int create,
 				 unsigned short family);
-extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
+int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
 
 #ifdef CONFIG_XFRM_MIGRATE
-extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
-		      const struct xfrm_migrate *m, int num_bundles,
-		      const struct xfrm_kmaddress *k);
-extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
-extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
-					      struct xfrm_migrate *m);
-extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
-			struct xfrm_migrate *m, int num_bundles,
-			struct xfrm_kmaddress *k);
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+	       const struct xfrm_migrate *m, int num_bundles,
+	       const struct xfrm_kmaddress *k);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m);
+struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
+				      struct xfrm_migrate *m);
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+		 struct xfrm_migrate *m, int num_bundles,
+		 struct xfrm_kmaddress *k);
 #endif
 
-extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
-extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
+int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
+void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
+int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
+	      xfrm_address_t *addr);
 
-extern void xfrm_input_init(void);
-extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
+void xfrm_input_init(void);
+int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
 
-extern void xfrm_probe_algs(void);
-extern int xfrm_count_pfkey_auth_supported(void);
-extern int xfrm_count_pfkey_enc_supported(void);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
-						   int probe);
+void xfrm_probe_algs(void);
+int xfrm_count_pfkey_auth_supported(void);
+int xfrm_count_pfkey_enc_supported(void);
+struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
+					    int probe);
 
 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
 				    const xfrm_address_t *b)
diff --git a/include/scsi/fc/fc_fc2.h b/include/scsi/fc/fc_fc2.h
index f87777d0..0b26714 100644
--- a/include/scsi/fc/fc_fc2.h
+++ b/include/scsi/fc/fc_fc2.h
@@ -104,7 +104,7 @@
  * esb_e_stat - flags from FC-FS-2 T11/1619-D Rev 0.90.
  */
 #define ESB_ST_RESP         (1 << 31)   /* responder to exchange */
-#define ESB_ST_SEQ_INIT     (1 << 30)   /* port holds sequence initiaive */
+#define ESB_ST_SEQ_INIT     (1 << 30)   /* port holds sequence initiative */
 #define ESB_ST_COMPLETE     (1 << 29)   /* exchange is complete */
 #define ESB_ST_ABNORMAL     (1 << 28)   /* abnormal ending condition */
 #define ESB_ST_REC_QUAL     (1 << 26)   /* recovery qualifier active */
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 13d81c5..5d6ed6c 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -69,6 +69,7 @@
 	ISCSI_UEVENT_LOGIN_FLASHNODE	= UEVENT_BASE + 28,
 	ISCSI_UEVENT_LOGOUT_FLASHNODE	= UEVENT_BASE + 29,
 	ISCSI_UEVENT_LOGOUT_FLASHNODE_SID	= UEVENT_BASE + 30,
+	ISCSI_UEVENT_SET_CHAP		= UEVENT_BASE + 31,
 
 	/* up events */
 	ISCSI_KEVENT_RECV_PDU		= KEVENT_BASE + 1,
@@ -309,8 +310,16 @@
 	ISCSI_HOST_PARAM,	/* iscsi_host_param */
 	ISCSI_NET_PARAM,	/* iscsi_net_param */
 	ISCSI_FLASHNODE_PARAM,	/* iscsi_flashnode_param */
+	ISCSI_CHAP_PARAM,	/* iscsi_chap_param */
 };
 
+/* structure for minimalist usecase */
+struct iscsi_param_info {
+	uint32_t len;		/* Actual length of the param value */
+	uint16_t param;		/* iscsi param */
+	uint8_t value[0];	/* length sized value follows */
+} __packed;
+
 struct iscsi_iface_param_info {
 	uint32_t iface_num;	/* iface number, 0 - n */
 	uint32_t len;		/* Actual length of the param */
@@ -739,6 +748,14 @@
 	CHAP_TYPE_IN,
 };
 
+enum iscsi_chap_param {
+	ISCSI_CHAP_PARAM_INDEX,
+	ISCSI_CHAP_PARAM_CHAP_TYPE,
+	ISCSI_CHAP_PARAM_USERNAME,
+	ISCSI_CHAP_PARAM_PASSWORD,
+	ISCSI_CHAP_PARAM_PASSWORD_LEN
+};
+
 #define ISCSI_CHAP_AUTH_NAME_MAX_LEN	256
 #define ISCSI_CHAP_AUTH_SECRET_MAX_LEN	256
 struct iscsi_chap_rec {
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index e1379b4..52beadf 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -410,6 +410,12 @@
  * @fh_type:      The frame type
  * @class:        The class of service
  * @seq:          The sequence in use on this exchange
+ * @resp_active:  Number of tasks that are concurrently executing @resp().
+ * @resp_task:    If @resp_active > 0, either the task executing @resp(), the
+ *                task that has been interrupted to execute the soft-IRQ
+ *                executing @resp() or NULL if more than one task is executing
+ *                @resp concurrently.
+ * @resp_wq:      Waitqueue for the tasks waiting on @resp_active.
  * @resp:         Callback for responses on this exchange
  * @destructor:   Called when destroying the exchange
  * @arg:          Passed as a void pointer to the resp() callback
@@ -441,6 +447,9 @@
 	u32		    r_a_tov;
 	u32		    f_ctl;
 	struct fc_seq       seq;
+	int		    resp_active;
+	struct task_struct  *resp_task;
+	wait_queue_head_t   resp_wq;
 	void		    (*resp)(struct fc_seq *, struct fc_frame *, void *);
 	void		    *arg;
 	void		    (*destructor)(struct fc_seq *, void *);
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 4427393..de7e3ee 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -90,6 +90,7 @@
  * @lp:		   &fc_lport: libfc local port.
  * @sel_fcf:	   currently selected FCF, or NULL.
  * @fcfs:	   list of discovered FCFs.
+ * @cdev:          (Optional) pointer to sysfs fcoe_ctlr_device.
  * @fcf_count:	   number of discovered FCF entries.
  * @sol_time:	   time when a multicast solicitation was last sent.
  * @sel_time:	   time after which to select an FCF.
@@ -127,6 +128,7 @@
 	struct fc_lport *lp;
 	struct fcoe_fcf *sel_fcf;
 	struct list_head fcfs;
+	struct fcoe_ctlr_device *cdev;
 	u16 fcf_count;
 	unsigned long sol_time;
 	unsigned long sel_time;
@@ -168,8 +170,11 @@
 	return (void *)(ctlr + 1);
 }
 
+/*
+ * This assumes that the fcoe_ctlr (x) is allocated with the fcoe_ctlr_device.
+ */
 #define fcoe_ctlr_to_ctlr_dev(x)					\
-	(struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1)
+	(x)->cdev
 
 /**
  * struct fcoe_fcf - Fibre-Channel Forwarder
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7552435..5460849 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -598,9 +598,12 @@
 	unsigned int host_eh_scheduled;    /* EH scheduled without command */
     
 	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
-	int resetting; /* if set, it means that last_reset is a valid value */
+
+	/* next two fields are used to bound the time spent in error handling */
+	int eh_deadline;
 	unsigned long last_reset;
 
+
 	/*
 	 * These three parameters can be used to allow for wide scsi,
 	 * and for host adapters that support multiple busses
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index d0f1602..fe7c8f3 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -152,6 +152,7 @@
 	int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx,
 			 uint32_t *num_entries, char *buf);
 	int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+	int (*set_chap) (struct Scsi_Host *shost, void *data, int len);
 	int (*get_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
 				    int param, char *buf);
 	int (*set_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 5ebda97..e2b9576 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -6,11 +6,9 @@
 
 #include <linux/tracepoint.h>
 
-struct search;
-
 DECLARE_EVENT_CLASS(bcache_request,
-	TP_PROTO(struct search *s, struct bio *bio),
-	TP_ARGS(s, bio),
+	TP_PROTO(struct bcache_device *d, struct bio *bio),
+	TP_ARGS(d, bio),
 
 	TP_STRUCT__entry(
 		__field(dev_t,		dev			)
@@ -24,8 +22,8 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->orig_major	= s->d->disk->major;
-		__entry->orig_minor	= s->d->disk->first_minor;
+		__entry->orig_major	= d->disk->major;
+		__entry->orig_minor	= d->disk->first_minor;
 		__entry->sector		= bio->bi_sector;
 		__entry->orig_sector	= bio->bi_sector - 16;
 		__entry->nr_sector	= bio->bi_size >> 9;
@@ -79,13 +77,13 @@
 /* request.c */
 
 DEFINE_EVENT(bcache_request, bcache_request_start,
-	TP_PROTO(struct search *s, struct bio *bio),
-	TP_ARGS(s, bio)
+	TP_PROTO(struct bcache_device *d, struct bio *bio),
+	TP_ARGS(d, bio)
 );
 
 DEFINE_EVENT(bcache_request, bcache_request_end,
-	TP_PROTO(struct search *s, struct bio *bio),
-	TP_ARGS(s, bio)
+	TP_PROTO(struct bcache_device *d, struct bio *bio),
+	TP_ARGS(d, bio)
 );
 
 DECLARE_EVENT_CLASS(bcache_bio,
@@ -370,6 +368,35 @@
 	TP_ARGS(b)
 );
 
+TRACE_EVENT(bcache_keyscan,
+	TP_PROTO(unsigned nr_found,
+		 unsigned start_inode, uint64_t start_offset,
+		 unsigned end_inode, uint64_t end_offset),
+	TP_ARGS(nr_found,
+		start_inode, start_offset,
+		end_inode, end_offset),
+
+	TP_STRUCT__entry(
+		__field(__u32,	nr_found			)
+		__field(__u32,	start_inode			)
+		__field(__u64,	start_offset			)
+		__field(__u32,	end_inode			)
+		__field(__u64,	end_offset			)
+	),
+
+	TP_fast_assign(
+		__entry->nr_found	= nr_found;
+		__entry->start_inode	= start_inode;
+		__entry->start_offset	= start_offset;
+		__entry->end_inode	= end_inode;
+		__entry->end_offset	= end_offset;
+	),
+
+	TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
+		  __entry->start_inode, __entry->start_offset,
+		  __entry->end_inode, __entry->end_offset)
+);
+
 /* Allocator */
 
 TRACE_EVENT(bcache_alloc_invalidate,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 52ae548..e0dc355 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -36,6 +36,11 @@
 		{ CURSEG_COLD_NODE, 	"Cold NODE" },			\
 		{ NO_CHECK_TYPE, 	"No TYPE" })
 
+#define show_file_type(type)						\
+	__print_symbolic(type,						\
+		{ 0,		"FILE" },				\
+		{ 1,		"DIR" })
+
 #define show_gc_type(type)						\
 	__print_symbolic(type,						\
 		{ FG_GC,	"Foreground GC" },			\
@@ -623,6 +628,52 @@
 		__entry->size)
 );
 
+DECLARE_EVENT_CLASS(f2fs__page,
+
+	TP_PROTO(struct page *page, int type),
+
+	TP_ARGS(page, type),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(int, type)
+		__field(int, dir)
+		__field(pgoff_t, index)
+		__field(int, dirty)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= page->mapping->host->i_sb->s_dev;
+		__entry->ino	= page->mapping->host->i_ino;
+		__entry->type	= type;
+		__entry->dir	= S_ISDIR(page->mapping->host->i_mode);
+		__entry->index	= page->index;
+		__entry->dirty	= PageDirty(page);
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, dirty = %d",
+		show_dev_ino(__entry),
+		show_block_type(__entry->type),
+		show_file_type(__entry->dir),
+		(unsigned long)__entry->index,
+		__entry->dirty)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty,
+
+	TP_PROTO(struct page *page, int type),
+
+	TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
+
+	TP_PROTO(struct page *page, int type),
+
+	TP_ARGS(page, type)
+);
+
 TRACE_EVENT(f2fs_submit_write_page,
 
 	TP_PROTO(struct page *page, block_t blk_addr, int type),
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
new file mode 100644
index 0000000..a8f5c32
--- /dev/null
+++ b/include/trace/events/iommu.h
@@ -0,0 +1,162 @@
+/*
+ * iommu trace points
+ *
+ * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
+ *
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iommu
+
+#if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IOMMU_H
+
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+struct device;
+
+DECLARE_EVENT_CLASS(iommu_group_event,
+
+	TP_PROTO(int group_id, struct device *dev),
+
+	TP_ARGS(group_id, dev),
+
+	TP_STRUCT__entry(
+		__field(int, gid)
+		__string(device, dev_name(dev))
+	),
+
+	TP_fast_assign(
+		__entry->gid = group_id;
+		__assign_str(device, dev_name(dev));
+	),
+
+	TP_printk("IOMMU: groupID=%d device=%s",
+			__entry->gid, __get_str(device)
+	)
+);
+
+DEFINE_EVENT(iommu_group_event, add_device_to_group,
+
+	TP_PROTO(int group_id, struct device *dev),
+
+	TP_ARGS(group_id, dev)
+
+);
+
+DEFINE_EVENT(iommu_group_event, remove_device_from_group,
+
+	TP_PROTO(int group_id, struct device *dev),
+
+	TP_ARGS(group_id, dev)
+);
+
+DECLARE_EVENT_CLASS(iommu_device_event,
+
+	TP_PROTO(struct device *dev),
+
+	TP_ARGS(dev),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(dev))
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(dev));
+	),
+
+	TP_printk("IOMMU: device=%s", __get_str(device)
+	)
+);
+
+DEFINE_EVENT(iommu_device_event, attach_device_to_domain,
+
+	TP_PROTO(struct device *dev),
+
+	TP_ARGS(dev)
+);
+
+DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
+
+	TP_PROTO(struct device *dev),
+
+	TP_ARGS(dev)
+);
+
+DECLARE_EVENT_CLASS(iommu_map_unmap,
+
+	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+	TP_ARGS(iova, paddr, size),
+
+	TP_STRUCT__entry(
+		__field(u64, iova)
+		__field(u64, paddr)
+		__field(int, size)
+	),
+
+	TP_fast_assign(
+		__entry->iova = iova;
+		__entry->paddr = paddr;
+		__entry->size = size;
+	),
+
+	TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x",
+			__entry->iova, __entry->paddr, __entry->size
+	)
+);
+
+DEFINE_EVENT(iommu_map_unmap, map,
+
+	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+	TP_ARGS(iova, paddr, size)
+);
+
+DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
+
+	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+	TP_ARGS(iova, paddr, size),
+
+	TP_printk("IOMMU: iova=0x%016llx size=0x%x",
+			__entry->iova, __entry->size
+	)
+);
+
+DECLARE_EVENT_CLASS(iommu_error,
+
+	TP_PROTO(struct device *dev, unsigned long iova, int flags),
+
+	TP_ARGS(dev, iova, flags),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(dev))
+		__string(driver, dev_driver_string(dev))
+		__field(u64, iova)
+		__field(int, flags)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(dev));
+		__assign_str(driver, dev_driver_string(dev));
+		__entry->iova = iova;
+		__entry->flags = flags;
+	),
+
+	TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x",
+			__get_str(driver), __get_str(device),
+			__entry->iova, __entry->flags
+	)
+);
+
+DEFINE_EVENT(iommu_error, io_page_fault,
+
+	TP_PROTO(struct device *dev, unsigned long iova, int flags),
+
+	TP_ARGS(dev, iova, flags)
+);
+#endif /* _TRACE_IOMMU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index d0c6134..aece134 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -267,14 +267,12 @@
 TRACE_EVENT(mm_page_alloc_extfrag,
 
 	TP_PROTO(struct page *page,
-			int alloc_order, int fallback_order,
-			int alloc_migratetype, int fallback_migratetype,
-			int change_ownership),
+		int alloc_order, int fallback_order,
+		int alloc_migratetype, int fallback_migratetype, int new_migratetype),
 
 	TP_ARGS(page,
 		alloc_order, fallback_order,
-		alloc_migratetype, fallback_migratetype,
-		change_ownership),
+		alloc_migratetype, fallback_migratetype, new_migratetype),
 
 	TP_STRUCT__entry(
 		__field(	struct page *,	page			)
@@ -291,7 +289,7 @@
 		__entry->fallback_order		= fallback_order;
 		__entry->alloc_migratetype	= alloc_migratetype;
 		__entry->fallback_migratetype	= fallback_migratetype;
-		__entry->change_ownership	= change_ownership;
+		__entry->change_ownership	= (new_migratetype == alloc_migratetype);
 	),
 
 	TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 7005d11..131a0bd 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -296,23 +296,21 @@
 
 TRACE_EVENT(
 	kvm_async_pf_completed,
-	TP_PROTO(unsigned long address, struct page *page, u64 gva),
-	TP_ARGS(address, page, gva),
+	TP_PROTO(unsigned long address, u64 gva),
+	TP_ARGS(address, gva),
 
 	TP_STRUCT__entry(
 		__field(unsigned long, address)
-		__field(pfn_t, pfn)
 		__field(u64, gva)
 		),
 
 	TP_fast_assign(
 		__entry->address = address;
-		__entry->pfn = page ? page_to_pfn(page) : 0;
 		__entry->gva = gva;
 		),
 
-	TP_printk("gva %#llx address %#lx pfn %#llx",  __entry->gva,
-		  __entry->address, __entry->pfn)
+	TP_printk("gva %#llx address %#lx",  __entry->gva,
+		  __entry->address)
 );
 
 #endif
diff --git a/include/trace/events/power_cpu_migrate.h b/include/trace/events/power_cpu_migrate.h
new file mode 100644
index 0000000..f76dd4d
--- /dev/null
+++ b/include/trace/events/power_cpu_migrate.h
@@ -0,0 +1,67 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWER_CPU_MIGRATE_H
+
+#include <linux/tracepoint.h>
+
+#define __cpu_migrate_proto			\
+	TP_PROTO(u64 timestamp,			\
+		 u32 cpu_hwid)
+#define __cpu_migrate_args			\
+	TP_ARGS(timestamp,			\
+		cpu_hwid)
+
+DECLARE_EVENT_CLASS(cpu_migrate,
+
+	__cpu_migrate_proto,
+	__cpu_migrate_args,
+
+	TP_STRUCT__entry(
+		__field(u64,	timestamp		)
+		__field(u32,	cpu_hwid		)
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->cpu_hwid = cpu_hwid;
+	),
+
+	TP_printk("timestamp=%llu cpu_hwid=0x%08lX",
+		(unsigned long long)__entry->timestamp,
+		(unsigned long)__entry->cpu_hwid
+	)
+);
+
+#define __define_cpu_migrate_event(name)		\
+	DEFINE_EVENT(cpu_migrate, cpu_migrate_##name,	\
+		__cpu_migrate_proto,			\
+		__cpu_migrate_args			\
+	)
+
+__define_cpu_migrate_event(begin);
+__define_cpu_migrate_event(finish);
+__define_cpu_migrate_event(current);
+
+#undef __define_cpu_migrate
+#undef __cpu_migrate_proto
+#undef __cpu_migrate_args
+
+/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
+#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+
+/*
+ * Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate
+ * a whole-cluster migration:
+ */
+#define CPU_MIGRATE_ALL_CPUS 0x80000000U
+#endif
+
+#endif /* _TRACE_POWER_CPU_MIGRATE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE power_cpu_migrate
+#include <trace/define_trace.h>
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
index 422df19..805af6d 100644
--- a/include/trace/events/random.h
+++ b/include/trace/events/random.h
@@ -7,6 +7,25 @@
 #include <linux/writeback.h>
 #include <linux/tracepoint.h>
 
+TRACE_EVENT(add_device_randomness,
+	TP_PROTO(int bytes, unsigned long IP),
+
+	TP_ARGS(bytes, IP),
+
+	TP_STRUCT__entry(
+		__field(	  int,	bytes			)
+		__field(unsigned long,	IP			)
+	),
+
+	TP_fast_assign(
+		__entry->bytes		= bytes;
+		__entry->IP		= IP;
+	),
+
+	TP_printk("bytes %d caller %pF",
+		__entry->bytes, (void *)__entry->IP)
+);
+
 DECLARE_EVENT_CLASS(random__mix_pool_bytes,
 	TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
 
@@ -68,7 +87,112 @@
 		  (void *)__entry->IP)
 );
 
-TRACE_EVENT(get_random_bytes,
+TRACE_EVENT(push_to_pool,
+	TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
+
+	TP_ARGS(pool_name, pool_bits, input_bits),
+
+	TP_STRUCT__entry(
+		__field( const char *,	pool_name		)
+		__field(	  int,	pool_bits		)
+		__field(	  int,	input_bits		)
+	),
+
+	TP_fast_assign(
+		__entry->pool_name	= pool_name;
+		__entry->pool_bits	= pool_bits;
+		__entry->input_bits	= input_bits;
+	),
+
+	TP_printk("%s: pool_bits %d input_pool_bits %d",
+		  __entry->pool_name, __entry->pool_bits,
+		  __entry->input_bits)
+);
+
+TRACE_EVENT(debit_entropy,
+	TP_PROTO(const char *pool_name, int debit_bits),
+
+	TP_ARGS(pool_name, debit_bits),
+
+	TP_STRUCT__entry(
+		__field( const char *,	pool_name		)
+		__field(	  int,	debit_bits		)
+	),
+
+	TP_fast_assign(
+		__entry->pool_name	= pool_name;
+		__entry->debit_bits	= debit_bits;
+	),
+
+	TP_printk("%s: debit_bits %d", __entry->pool_name,
+		  __entry->debit_bits)
+);
+
+TRACE_EVENT(add_input_randomness,
+	TP_PROTO(int input_bits),
+
+	TP_ARGS(input_bits),
+
+	TP_STRUCT__entry(
+		__field(	  int,	input_bits		)
+	),
+
+	TP_fast_assign(
+		__entry->input_bits	= input_bits;
+	),
+
+	TP_printk("input_pool_bits %d", __entry->input_bits)
+);
+
+TRACE_EVENT(add_disk_randomness,
+	TP_PROTO(dev_t dev, int input_bits),
+
+	TP_ARGS(dev, input_bits),
+
+	TP_STRUCT__entry(
+		__field(	dev_t,	dev			)
+		__field(	  int,	input_bits		)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= dev;
+		__entry->input_bits	= input_bits;
+	),
+
+	TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
+		  MINOR(__entry->dev), __entry->input_bits)
+);
+
+TRACE_EVENT(xfer_secondary_pool,
+	TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
+		 int pool_entropy, int input_entropy),
+
+	TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
+		input_entropy),
+
+	TP_STRUCT__entry(
+		__field( const char *,	pool_name		)
+		__field(	  int,	xfer_bits		)
+		__field(	  int,	request_bits		)
+		__field(	  int,	pool_entropy		)
+		__field(	  int,	input_entropy		)
+	),
+
+	TP_fast_assign(
+		__entry->pool_name	= pool_name;
+		__entry->xfer_bits	= xfer_bits;
+		__entry->request_bits	= request_bits;
+		__entry->pool_entropy	= pool_entropy;
+		__entry->input_entropy	= input_entropy;
+	),
+
+	TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
+		  "input_entropy %d", __entry->pool_name, __entry->xfer_bits,
+		  __entry->request_bits, __entry->pool_entropy,
+		  __entry->input_entropy)
+);
+
+DECLARE_EVENT_CLASS(random__get_random_bytes,
 	TP_PROTO(int nbytes, unsigned long IP),
 
 	TP_ARGS(nbytes, IP),
@@ -86,6 +210,18 @@
 	TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
 );
 
+DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
+	TP_PROTO(int nbytes, unsigned long IP),
+
+	TP_ARGS(nbytes, IP)
+);
+
+DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
+	TP_PROTO(int nbytes, unsigned long IP),
+
+	TP_ARGS(nbytes, IP)
+);
+
 DECLARE_EVENT_CLASS(random__extract_entropy,
 	TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
 		 unsigned long IP),
@@ -126,7 +262,52 @@
 	TP_ARGS(pool_name, nbytes, entropy_count, IP)
 );
 
+TRACE_EVENT(random_read,
+	TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
 
+	TP_ARGS(got_bits, need_bits, pool_left, input_left),
+
+	TP_STRUCT__entry(
+		__field(	  int,	got_bits		)
+		__field(	  int,	need_bits		)
+		__field(	  int,	pool_left		)
+		__field(	  int,	input_left		)
+	),
+
+	TP_fast_assign(
+		__entry->got_bits	= got_bits;
+		__entry->need_bits	= need_bits;
+		__entry->pool_left	= pool_left;
+		__entry->input_left	= input_left;
+	),
+
+	TP_printk("got_bits %d still_needed_bits %d "
+		  "blocking_pool_entropy_left %d input_entropy_left %d",
+		  __entry->got_bits, __entry->got_bits, __entry->pool_left,
+		  __entry->input_left)
+);
+
+TRACE_EVENT(urandom_read,
+	TP_PROTO(int got_bits, int pool_left, int input_left),
+
+	TP_ARGS(got_bits, pool_left, input_left),
+
+	TP_STRUCT__entry(
+		__field(	  int,	got_bits		)
+		__field(	  int,	pool_left		)
+		__field(	  int,	input_left		)
+	),
+
+	TP_fast_assign(
+		__entry->got_bits	= got_bits;
+		__entry->pool_left	= pool_left;
+		__entry->input_left	= input_left;
+	),
+
+	TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
+		  "input_entropy_left %d", __entry->got_bits,
+		  __entry->pool_left, __entry->input_left)
+);
 
 #endif /* _TRACE_RANDOM_H */
 
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 613381b..04c3084 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -424,6 +424,25 @@
 			__entry->oldprio, __entry->newprio)
 );
 
+#ifdef CONFIG_DETECT_HUNG_TASK
+TRACE_EVENT(sched_process_hang,
+	TP_PROTO(struct task_struct *tsk),
+	TP_ARGS(tsk),
+
+	TP_STRUCT__entry(
+		__array( char,	comm,	TASK_COMM_LEN	)
+		__field( pid_t,	pid			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+		__entry->pid = tsk->pid;
+	),
+
+	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
+);
+#endif /* CONFIG_DETECT_HUNG_TASK */
+
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
new file mode 100644
index 0000000..7ea4c5e
--- /dev/null
+++ b/include/trace/events/swiotlb.h
@@ -0,0 +1,46 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM swiotlb
+
+#if !defined(_TRACE_SWIOTLB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SWIOTLB_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(swiotlb_bounced,
+
+	TP_PROTO(struct device *dev,
+		 dma_addr_t dev_addr,
+		 size_t size,
+		 int swiotlb_force),
+
+	TP_ARGS(dev, dev_addr, size, swiotlb_force),
+
+	TP_STRUCT__entry(
+		__string(	dev_name,	dev_name(dev)	)
+		__field(	u64,	dma_mask		)
+		__field(	dma_addr_t,	dev_addr	)
+		__field(	size_t,	size			)
+		__field(	int,	swiotlb_force		)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name(dev));
+		__entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0);
+		__entry->dev_addr = dev_addr;
+		__entry->size = size;
+		__entry->swiotlb_force = swiotlb_force;
+	),
+
+	TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx "
+		"size=%zu %s",
+		__get_str(dev_name),
+		__entry->dma_mask,
+		(unsigned long long)__entry->dev_addr,
+		__entry->size,
+		__entry->swiotlb_force ? "swiotlb_force" : "" )
+);
+
+#endif /*  _TRACE_SWIOTLB_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 464ea82..c7bbbe7 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -287,11 +287,11 @@
 		__field(int,		reason)
 	),
 	TP_fast_assign(
-		unsigned long *older_than_this = work->older_than_this;
+		unsigned long older_than_this = work->older_than_this;
 		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
-		__entry->older	= older_than_this ?  *older_than_this : 0;
+		__entry->older	= older_than_this;
 		__entry->age	= older_than_this ?
-				  (jiffies - *older_than_this) * 1000 / HZ : -1;
+				  (jiffies - older_than_this) * 1000 / HZ : -1;
 		__entry->moved	= moved;
 		__entry->reason	= work->reason;
 	),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 5c7ab17..52594b2 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -437,9 +437,8 @@
  *	{ <assign>; }  <-- Here we assign the entries by the __field and
  *			   __array macros.
  *
- *	if (!filter_current_check_discard(buffer, event_call, entry, event))
- *		trace_nowake_buffer_unlock_commit(buffer,
- *						   event, irq_flags, pc);
+ *	if (!filter_check_discard(ftrace_file, entry, buffer, event))
+ *		trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
  * }
  *
  * static struct trace_event ftrace_event_type_<call> = {
@@ -553,7 +552,7 @@
 									\
 	{ assign; }							\
 									\
-	if (!filter_current_check_discard(buffer, event_call, entry, event)) \
+	if (!filter_check_discard(ftrace_file, entry, buffer, event))	\
 		trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
 }
 /*
diff --git a/include/uapi/asm-generic/errno.h b/include/uapi/asm-generic/errno.h
index a1331ce..1e1ea6e 100644
--- a/include/uapi/asm-generic/errno.h
+++ b/include/uapi/asm-generic/errno.h
@@ -86,7 +86,7 @@
 #define	EHOSTUNREACH	113	/* No route to host */
 #define	EALREADY	114	/* Operation already in progress */
 #define	EINPROGRESS	115	/* Operation now in progress */
-#define	ESTALE		116	/* Stale NFS file handle */
+#define	ESTALE		116	/* Stale file handle */
 #define	EUCLEAN		117	/* Structure needs cleaning */
 #define	ENOTNAM		118	/* Not a XENIX named type file */
 #define	ENAVAIL		119	/* No XENIX semaphores available */
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index f04b69b..38f14d0 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -78,4 +78,6 @@
 
 #define SO_BUSY_POLL		46
 
+#define SO_MAX_PACING_RATE	47
+
 #endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
new file mode 100644
index 0000000..8dec3fd
--- /dev/null
+++ b/include/uapi/drm/armada_drm.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  With inspiration from the i915 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DRM_ARMADA_IOCTL_H
+#define DRM_ARMADA_IOCTL_H
+
+#define DRM_ARMADA_GEM_CREATE		0x00
+#define DRM_ARMADA_GEM_MMAP		0x02
+#define DRM_ARMADA_GEM_PWRITE		0x03
+
+#define ARMADA_IOCTL(dir, name, str) \
+	DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str)
+
+struct drm_armada_gem_create {
+	uint32_t handle;
+	uint32_t size;
+};
+#define DRM_IOCTL_ARMADA_GEM_CREATE \
+	ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create)
+
+struct drm_armada_gem_mmap {
+	uint32_t handle;
+	uint32_t pad;
+	uint64_t offset;
+	uint64_t size;
+	uint64_t addr;
+};
+#define DRM_IOCTL_ARMADA_GEM_MMAP \
+	ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap)
+
+struct drm_armada_gem_pwrite {
+	uint64_t ptr;
+	uint32_t handle;
+	uint32_t offset;
+	uint32_t size;
+};
+#define DRM_IOCTL_ARMADA_GEM_PWRITE \
+	ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
+
+#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index ece8678..9b24d65 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -611,12 +611,37 @@
 	__u64 size;
 };
 
+#define DRM_CAP_DUMB_BUFFER		0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC	0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH	0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW	0x4
+#define DRM_CAP_PRIME			0x5
+#define  DRM_PRIME_CAP_IMPORT		0x1
+#define  DRM_PRIME_CAP_EXPORT		0x2
+#define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
+#define DRM_CAP_ASYNC_PAGE_FLIP		0x7
+
 /** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
 	__u64 capability;
 	__u64 value;
 };
 
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D	1
+
+/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+	__u64 capability;
+	__u64 value;
+};
+
 #define DRM_CLOEXEC O_CLOEXEC
 struct drm_prime_handle {
 	__u32 handle;
@@ -649,6 +674,7 @@
 #define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
 #define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
 #define DRM_IOCTL_GET_CAP		DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP	DRM_IOW( 0x0d, struct drm_set_client_cap)
 
 #define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
 #define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
@@ -774,17 +800,6 @@
 	__u32 reserved;
 };
 
-#define DRM_CAP_DUMB_BUFFER 0x1
-#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
-#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
-#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
-#define DRM_CAP_PRIME 0x5
-#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
-#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-
-#define DRM_PRIME_CAP_IMPORT 0x1
-#define DRM_PRIME_CAP_EXPORT 0x2
-
 /* typedef area */
 #ifndef __KERNEL__
 typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 28acbaf..f104c26 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -44,20 +44,35 @@
 
 /* Video mode flags */
 /* bit compatible with the xorg definitions. */
-#define DRM_MODE_FLAG_PHSYNC	(1<<0)
-#define DRM_MODE_FLAG_NHSYNC	(1<<1)
-#define DRM_MODE_FLAG_PVSYNC	(1<<2)
-#define DRM_MODE_FLAG_NVSYNC	(1<<3)
-#define DRM_MODE_FLAG_INTERLACE	(1<<4)
-#define DRM_MODE_FLAG_DBLSCAN	(1<<5)
-#define DRM_MODE_FLAG_CSYNC	(1<<6)
-#define DRM_MODE_FLAG_PCSYNC	(1<<7)
-#define DRM_MODE_FLAG_NCSYNC	(1<<8)
-#define DRM_MODE_FLAG_HSKEW	(1<<9) /* hskew provided */
-#define DRM_MODE_FLAG_BCAST	(1<<10)
-#define DRM_MODE_FLAG_PIXMUX	(1<<11)
-#define DRM_MODE_FLAG_DBLCLK	(1<<12)
-#define DRM_MODE_FLAG_CLKDIV2	(1<<13)
+#define DRM_MODE_FLAG_PHSYNC			(1<<0)
+#define DRM_MODE_FLAG_NHSYNC			(1<<1)
+#define DRM_MODE_FLAG_PVSYNC			(1<<2)
+#define DRM_MODE_FLAG_NVSYNC			(1<<3)
+#define DRM_MODE_FLAG_INTERLACE			(1<<4)
+#define DRM_MODE_FLAG_DBLSCAN			(1<<5)
+#define DRM_MODE_FLAG_CSYNC			(1<<6)
+#define DRM_MODE_FLAG_PCSYNC			(1<<7)
+#define DRM_MODE_FLAG_NCSYNC			(1<<8)
+#define DRM_MODE_FLAG_HSKEW			(1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST			(1<<10)
+#define DRM_MODE_FLAG_PIXMUX			(1<<11)
+#define DRM_MODE_FLAG_DBLCLK			(1<<12)
+#define DRM_MODE_FLAG_CLKDIV2			(1<<13)
+ /*
+  * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
+  * (define not exposed to user space).
+  */
+#define DRM_MODE_FLAG_3D_MASK			(0x1f<<14)
+#define  DRM_MODE_FLAG_3D_NONE			(0<<14)
+#define  DRM_MODE_FLAG_3D_FRAME_PACKING		(1<<14)
+#define  DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE	(2<<14)
+#define  DRM_MODE_FLAG_3D_LINE_ALTERNATIVE	(3<<14)
+#define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL	(4<<14)
+#define  DRM_MODE_FLAG_3D_L_DEPTH		(5<<14)
+#define  DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH	(6<<14)
+#define  DRM_MODE_FLAG_3D_TOP_AND_BOTTOM	(7<<14)
+#define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF	(8<<14)
+
 
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
@@ -165,6 +180,7 @@
 #define DRM_MODE_ENCODER_LVDS	3
 #define DRM_MODE_ENCODER_TVDAC	4
 #define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI	6
 
 struct drm_mode_get_encoder {
 	__u32 encoder_id;
@@ -203,6 +219,7 @@
 #define DRM_MODE_CONNECTOR_TV		13
 #define DRM_MODE_CONNECTOR_eDP		14
 #define DRM_MODE_CONNECTOR_VIRTUAL      15
+#define DRM_MODE_CONNECTOR_DSI		16
 
 struct drm_mode_get_connector {
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 55bb572..3a4e97b 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -38,10 +38,10 @@
  *
  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
  *	event from the gpu l3 cache. Additional information supplied is ROW,
- *	BANK, SUBBANK of the affected cacheline. Userspace should keep track of
- *	these events and if a specific cache-line seems to have a persistent
- *	error remap it with the l3 remapping tool supplied in intel-gpu-tools.
- *	The value supplied with the event is always 1.
+ *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
+ *	track of these events and if a specific cache-line seems to have a
+ *	persistent error remap it with the l3 remapping tool supplied in
+ *	intel-gpu-tools.  The value supplied with the event is always 1.
  *
  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
  *	hangcheck. The error detection event is a good indicator of when things
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 73bde4e..5e1ab55 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -19,6 +19,9 @@
 
 #include <drm/drm.h>
 
+#define DRM_TEGRA_GEM_CREATE_TILED     (1 << 0)
+#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
+
 struct drm_tegra_gem_create {
 	__u64 size;
 	__u32 flags;
@@ -65,6 +68,12 @@
 	__u32 id;
 };
 
+struct drm_tegra_get_syncpt_base {
+	__u64 context;
+	__u32 syncpt;
+	__u32 id;
+};
+
 struct drm_tegra_syncpt {
 	__u32 id;
 	__u32 incrs;
@@ -115,15 +124,16 @@
 	__u32 reserved[5];	/* future expansion */
 };
 
-#define DRM_TEGRA_GEM_CREATE	0x00
-#define DRM_TEGRA_GEM_MMAP	0x01
-#define DRM_TEGRA_SYNCPT_READ	0x02
-#define DRM_TEGRA_SYNCPT_INCR	0x03
-#define DRM_TEGRA_SYNCPT_WAIT	0x04
-#define DRM_TEGRA_OPEN_CHANNEL	0x05
-#define DRM_TEGRA_CLOSE_CHANNEL	0x06
-#define DRM_TEGRA_GET_SYNCPT	0x07
-#define DRM_TEGRA_SUBMIT	0x08
+#define DRM_TEGRA_GEM_CREATE		0x00
+#define DRM_TEGRA_GEM_MMAP		0x01
+#define DRM_TEGRA_SYNCPT_READ		0x02
+#define DRM_TEGRA_SYNCPT_INCR		0x03
+#define DRM_TEGRA_SYNCPT_WAIT		0x04
+#define DRM_TEGRA_OPEN_CHANNEL		0x05
+#define DRM_TEGRA_CLOSE_CHANNEL		0x06
+#define DRM_TEGRA_GET_SYNCPT		0x07
+#define DRM_TEGRA_SUBMIT		0x08
+#define DRM_TEGRA_GET_SYNCPT_BASE	0x09
 
 #define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
 #define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
@@ -134,5 +144,6 @@
 #define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
 #define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
 #define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
+#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
 
 #endif
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
new file mode 100644
index 0000000..164a7e2
--- /dev/null
+++ b/include/uapi/linux/bcache.h
@@ -0,0 +1,373 @@
+#ifndef _LINUX_BCACHE_H
+#define _LINUX_BCACHE_H
+
+/*
+ * Bcache on disk data structures
+ */
+
+#include <asm/types.h>
+
+#define BITMASK(name, type, field, offset, size)		\
+static inline __u64 name(const type *k)				\
+{ return (k->field >> offset) & ~(~0ULL << size); }		\
+								\
+static inline void SET_##name(type *k, __u64 v)			\
+{								\
+	k->field &= ~(~(~0ULL << size) << offset);		\
+	k->field |= (v & ~(~0ULL << size)) << offset;		\
+}
+
+/* Btree keys - all units are in sectors */
+
+struct bkey {
+	__u64	high;
+	__u64	low;
+	__u64	ptr[];
+};
+
+#define KEY_FIELD(name, field, offset, size)				\
+	BITMASK(name, struct bkey, field, offset, size)
+
+#define PTR_FIELD(name, offset, size)					\
+static inline __u64 name(const struct bkey *k, unsigned i)		\
+{ return (k->ptr[i] >> offset) & ~(~0ULL << size); }			\
+									\
+static inline void SET_##name(struct bkey *k, unsigned i, __u64 v)	\
+{									\
+	k->ptr[i] &= ~(~(~0ULL << size) << offset);			\
+	k->ptr[i] |= (v & ~(~0ULL << size)) << offset;			\
+}
+
+#define KEY_SIZE_BITS		16
+
+KEY_FIELD(KEY_PTRS,	high, 60, 3)
+KEY_FIELD(HEADER_SIZE,	high, 58, 2)
+KEY_FIELD(KEY_CSUM,	high, 56, 2)
+KEY_FIELD(KEY_PINNED,	high, 55, 1)
+KEY_FIELD(KEY_DIRTY,	high, 36, 1)
+
+KEY_FIELD(KEY_SIZE,	high, 20, KEY_SIZE_BITS)
+KEY_FIELD(KEY_INODE,	high, 0,  20)
+
+/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
+
+static inline __u64 KEY_OFFSET(const struct bkey *k)
+{
+	return k->low;
+}
+
+static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
+{
+	k->low = v;
+}
+
+/*
+ * The high bit being set is a relic from when we used it to do binary
+ * searches - it told you where a key started. It's not used anymore,
+ * and can probably be safely dropped.
+ */
+#define KEY(inode, offset, size)					\
+((struct bkey) {							\
+	.high = (1ULL << 63) | ((__u64) (size) << 20) | (inode),	\
+	.low = (offset)							\
+})
+
+#define ZERO_KEY			KEY(0, 0, 0)
+
+#define MAX_KEY_INODE			(~(~0 << 20))
+#define MAX_KEY_OFFSET			(~0ULL >> 1)
+#define MAX_KEY				KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
+
+#define KEY_START(k)			(KEY_OFFSET(k) - KEY_SIZE(k))
+#define START_KEY(k)			KEY(KEY_INODE(k), KEY_START(k), 0)
+
+#define PTR_DEV_BITS			12
+
+PTR_FIELD(PTR_DEV,			51, PTR_DEV_BITS)
+PTR_FIELD(PTR_OFFSET,			8,  43)
+PTR_FIELD(PTR_GEN,			0,  8)
+
+#define PTR_CHECK_DEV			((1 << PTR_DEV_BITS) - 1)
+
+#define PTR(gen, offset, dev)						\
+	((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
+
+/* Bkey utility code */
+
+static inline unsigned long bkey_u64s(const struct bkey *k)
+{
+	return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
+}
+
+static inline unsigned long bkey_bytes(const struct bkey *k)
+{
+	return bkey_u64s(k) * sizeof(__u64);
+}
+
+#define bkey_copy(_dest, _src)	memcpy(_dest, _src, bkey_bytes(_src))
+
+static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+{
+	SET_KEY_INODE(dest, KEY_INODE(src));
+	SET_KEY_OFFSET(dest, KEY_OFFSET(src));
+}
+
+static inline struct bkey *bkey_next(const struct bkey *k)
+{
+	__u64 *d = (void *) k;
+	return (struct bkey *) (d + bkey_u64s(k));
+}
+
+static inline struct bkey *bkey_last(const struct bkey *k, unsigned nr_keys)
+{
+	__u64 *d = (void *) k;
+	return (struct bkey *) (d + nr_keys);
+}
+/* Enough for a key with 6 pointers */
+#define BKEY_PAD		8
+
+#define BKEY_PADDED(key)					\
+	union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
+
+/* Superblock */
+
+/* Version 0: Cache device
+ * Version 1: Backing device
+ * Version 2: Seed pointer into btree node checksum
+ * Version 3: Cache device with new UUID format
+ * Version 4: Backing device with data offset
+ */
+#define BCACHE_SB_VERSION_CDEV		0
+#define BCACHE_SB_VERSION_BDEV		1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
+#define BCACHE_SB_MAX_VERSION		4
+
+#define SB_SECTOR			8
+#define SB_SIZE				4096
+#define SB_LABEL_SIZE			32
+#define SB_JOURNAL_BUCKETS		256U
+/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
+#define MAX_CACHES_PER_SET		8
+
+#define BDEV_DATA_START_DEFAULT		16	/* sectors */
+
+struct cache_sb {
+	__u64			csum;
+	__u64			offset;	/* sector where this sb was written */
+	__u64			version;
+
+	__u8			magic[16];
+
+	__u8			uuid[16];
+	union {
+		__u8		set_uuid[16];
+		__u64		set_magic;
+	};
+	__u8			label[SB_LABEL_SIZE];
+
+	__u64			flags;
+	__u64			seq;
+	__u64			pad[8];
+
+	union {
+	struct {
+		/* Cache devices */
+		__u64		nbuckets;	/* device size */
+
+		__u16		block_size;	/* sectors */
+		__u16		bucket_size;	/* sectors */
+
+		__u16		nr_in_set;
+		__u16		nr_this_dev;
+	};
+	struct {
+		/* Backing devices */
+		__u64		data_offset;
+
+		/*
+		 * block_size from the cache device section is still used by
+		 * backing devices, so don't add anything here until we fix
+		 * things to not need it for backing devices anymore
+		 */
+	};
+	};
+
+	__u32			last_mount;	/* time_t */
+
+	__u16			first_bucket;
+	union {
+		__u16		njournal_buckets;
+		__u16		keys;
+	};
+	__u64			d[SB_JOURNAL_BUCKETS];	/* journal buckets */
+};
+
+static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
+{
+	return sb->version == BCACHE_SB_VERSION_BDEV
+		|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
+}
+
+BITMASK(CACHE_SYNC,			struct cache_sb, flags, 0, 1);
+BITMASK(CACHE_DISCARD,			struct cache_sb, flags, 1, 1);
+BITMASK(CACHE_REPLACEMENT,		struct cache_sb, flags, 2, 3);
+#define CACHE_REPLACEMENT_LRU		0U
+#define CACHE_REPLACEMENT_FIFO		1U
+#define CACHE_REPLACEMENT_RANDOM	2U
+
+BITMASK(BDEV_CACHE_MODE,		struct cache_sb, flags, 0, 4);
+#define CACHE_MODE_WRITETHROUGH		0U
+#define CACHE_MODE_WRITEBACK		1U
+#define CACHE_MODE_WRITEAROUND		2U
+#define CACHE_MODE_NONE			3U
+BITMASK(BDEV_STATE,			struct cache_sb, flags, 61, 2);
+#define BDEV_STATE_NONE			0U
+#define BDEV_STATE_CLEAN		1U
+#define BDEV_STATE_DIRTY		2U
+#define BDEV_STATE_STALE		3U
+
+/*
+ * Magic numbers
+ *
+ * The various other data structures have their own magic numbers, which are
+ * xored with the first part of the cache set's UUID
+ */
+
+#define JSET_MAGIC			0x245235c1a3625032ULL
+#define PSET_MAGIC			0x6750e15f87337f91ULL
+#define BSET_MAGIC			0x90135c78b99e07f5ULL
+
+static inline __u64 jset_magic(struct cache_sb *sb)
+{
+	return sb->set_magic ^ JSET_MAGIC;
+}
+
+static inline __u64 pset_magic(struct cache_sb *sb)
+{
+	return sb->set_magic ^ PSET_MAGIC;
+}
+
+static inline __u64 bset_magic(struct cache_sb *sb)
+{
+	return sb->set_magic ^ BSET_MAGIC;
+}
+
+/*
+ * Journal
+ *
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+
+#define BCACHE_JSET_VERSION_UUIDv1	1
+#define BCACHE_JSET_VERSION_UUID	1	/* Always latest UUID format */
+#define BCACHE_JSET_VERSION		1
+
+struct jset {
+	__u64			csum;
+	__u64			magic;
+	__u64			seq;
+	__u32			version;
+	__u32			keys;
+
+	__u64			last_seq;
+
+	BKEY_PADDED(uuid_bucket);
+	BKEY_PADDED(btree_root);
+	__u16			btree_level;
+	__u16			pad[3];
+
+	__u64			prio_bucket[MAX_CACHES_PER_SET];
+
+	union {
+		struct bkey	start[0];
+		__u64		d[0];
+	};
+};
+
+/* Bucket prios/gens */
+
+struct prio_set {
+	__u64			csum;
+	__u64			magic;
+	__u64			seq;
+	__u32			version;
+	__u32			pad;
+
+	__u64			next_bucket;
+
+	struct bucket_disk {
+		__u16		prio;
+		__u8		gen;
+	} __attribute((packed)) data[];
+};
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry {
+	union {
+		struct {
+			__u8	uuid[16];
+			__u8	label[32];
+			__u32	first_reg;
+			__u32	last_reg;
+			__u32	invalidated;
+
+			__u32	flags;
+			/* Size of flash only volumes */
+			__u64	sectors;
+		};
+
+		__u8		pad[128];
+	};
+};
+
+BITMASK(UUID_FLASH_ONLY,	struct uuid_entry, flags, 0, 1);
+
+/* Btree nodes */
+
+/* Version 1: Seed pointer into btree node checksum
+ */
+#define BCACHE_BSET_CSUM		1
+#define BCACHE_BSET_VERSION		1
+
+/*
+ * Btree nodes
+ *
+ * On disk a btree node is a list/log of these; within each set the keys are
+ * sorted
+ */
+struct bset {
+	__u64			csum;
+	__u64			magic;
+	__u64			seq;
+	__u32			version;
+	__u32			keys;
+
+	union {
+		struct bkey	start[0];
+		__u64		d[0];
+	};
+};
+
+/* OBSOLETE */
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry_v0 {
+	__u8		uuid[16];
+	__u8		label[32];
+	__u32		first_reg;
+	__u32		last_reg;
+	__u32		invalidated;
+	__u32		pad;
+};
+
+#endif /* _LINUX_BCACHE_H */
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index 3ebe387..382251a 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -7,6 +7,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_BCM_H
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index 7b7148b..b632045 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -7,6 +7,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_ERROR_H
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 4e27c82..844c896 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -7,6 +7,38 @@
  * Copyright (c) 2011 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_GW_H
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 14966dd..df944ed 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -5,6 +5,14 @@
  *
  * Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
  *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
  */
 
 #ifndef CAN_NETLINK_H
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index a814062..c7d8c33 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -8,6 +8,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_RAW_H
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index f1e12bd..c8a43020 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@
 #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR	4
-#define DM_VERSION_MINOR	26
+#define DM_VERSION_MINOR	27
 #define DM_VERSION_PATCHLEVEL	0
-#define DM_VERSION_EXTRA	"-ioctl (2013-08-15)"
+#define DM_VERSION_EXTRA	"-ioctl (2013-10-30)"
 
 /* Status bits */
 #define DM_READONLY_FLAG	(1 << 0) /* In/Out */
@@ -341,4 +341,15 @@
  */
 #define DM_DATA_OUT_FLAG		(1 << 16) /* Out */
 
+/*
+ * If set with DM_DEV_REMOVE or DM_REMOVE_ALL this indicates that if
+ * the device cannot be removed immediately because it is still in use
+ * it should instead be scheduled for removal when it gets closed.
+ *
+ * On return from DM_DEV_REMOVE, DM_DEV_STATUS or other ioctls, this
+ * flag indicates that the device is scheduled to be removed when it
+ * gets closed.
+ */
+#define DM_DEFERRED_REMOVE		(1 << 17) /* In/Out */
+
 #endif				/* _LINUX_DM_IOCTL_H */
diff --git a/include/uapi/linux/hsr_netlink.h b/include/uapi/linux/hsr_netlink.h
new file mode 100644
index 0000000..2475cb8
--- /dev/null
+++ b/include/uapi/linux/hsr_netlink.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef __UAPI_HSR_NETLINK_H
+#define __UAPI_HSR_NETLINK_H
+
+/* Generic Netlink HSR family definition
+ */
+
+/* attributes */
+enum {
+	HSR_A_UNSPEC,
+	HSR_A_NODE_ADDR,
+	HSR_A_IFINDEX,
+	HSR_A_IF1_AGE,
+	HSR_A_IF2_AGE,
+	HSR_A_NODE_ADDR_B,
+	HSR_A_IF1_SEQ,
+	HSR_A_IF2_SEQ,
+	HSR_A_IF1_IFINDEX,
+	HSR_A_IF2_IFINDEX,
+	HSR_A_ADDR_B_IFINDEX,
+	__HSR_A_MAX,
+};
+#define HSR_A_MAX (__HSR_A_MAX - 1)
+
+
+/* commands */
+enum {
+	HSR_C_UNSPEC,
+	HSR_C_RING_ERROR,
+	HSR_C_NODE_DOWN,
+	HSR_C_GET_NODE_STATUS,
+	HSR_C_SET_NODE_STATUS,
+	HSR_C_GET_NODE_LIST,
+	HSR_C_SET_NODE_LIST,
+	__HSR_C_MAX,
+};
+#define HSR_C_MAX (__HSR_C_MAX - 1)
+
+#endif /* __UAPI_HSR_NETLINK_H */
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index 1ec407b..d758163 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -83,6 +83,7 @@
 #define IFF_SUPP_NOFCS	0x80000		/* device supports sending custom FCS */
 #define IFF_LIVE_ADDR_CHANGE 0x100000	/* device supports hardware address
 					 * change when it's running */
+#define IFF_MACVLAN 0x200000		/* Macvlan device */
 
 
 #define IF_GET_IFACE	0x0001		/* for querying only */
diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h
index a17edda..9635a62 100644
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -91,6 +91,8 @@
 #define BOND_XMIT_POLICY_LAYER2		0 /* layer 2 (MAC only), default */
 #define BOND_XMIT_POLICY_LAYER34	1 /* layer 3+4 (IP ^ (TCP || UDP)) */
 #define BOND_XMIT_POLICY_LAYER23	2 /* layer 2+3 (IP ^ MAC) */
+#define BOND_XMIT_POLICY_ENCAP23	3 /* encapsulated layer 2+3 */
+#define BOND_XMIT_POLICY_ENCAP34	4 /* encapsulated layer 3+4 */
 
 typedef struct ifbond {
 	__s32 bond_mode;
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index ade07f1..2ce0f6a 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -85,6 +85,7 @@
 #define ETH_P_8021AH	0x88E7          /* 802.1ah Backbone Service Tag */
 #define ETH_P_MVRP	0x88F5          /* 802.1Q MVRP                  */
 #define ETH_P_1588	0x88F7		/* IEEE 1588 Timesync */
+#define ETH_P_PRP	0x88FB		/* IEC 62439-3 PRP/HSRv0	*/
 #define ETH_P_FCOE	0x8906		/* Fibre Channel over Ethernet  */
 #define ETH_P_TDLS	0x890D          /* TDLS */
 #define ETH_P_FIP	0x8914		/* FCoE Initialization Protocol */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 80394e8..b78566f 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -325,6 +325,17 @@
 	__be16	high;
 };
 
+/* Bonding section */
+
+enum {
+	IFLA_BOND_UNSPEC,
+	IFLA_BOND_MODE,
+	IFLA_BOND_ACTIVE_SLAVE,
+	__IFLA_BOND_MAX,
+};
+
+#define IFLA_BOND_MAX	(__IFLA_BOND_MAX - 1)
+
 /* SR-IOV virtual function management section */
 
 enum {
@@ -470,4 +481,17 @@
 
 #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
 
+
+/* HSR section */
+
+enum {
+	IFLA_HSR_UNSPEC,
+	IFLA_HSR_SLAVE1,
+	IFLA_HSR_SLAVE2,
+	IFLA_HSR_MULTICAST_SPEC,
+	__IFLA_HSR_MAX,
+};
+
+#define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1)
+
 #endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index f9e8e49..393c5de 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -115,6 +115,11 @@
 #define IP_PMTUDISC_WANT		1	/* Use per route hints	*/
 #define IP_PMTUDISC_DO			2	/* Always DF		*/
 #define IP_PMTUDISC_PROBE		3       /* Ignore dst pmtu      */
+/* Always use interface mtu (ignores dst pmtu) but don't set DF flag.
+ * Also incoming ICMP frag_needed notifications will be ignored on
+ * this socket to prevent accepting spoofed ones.
+ */
+#define IP_PMTUDISC_INTERFACE		4
 
 #define IP_MULTICAST_IF			32
 #define IP_MULTICAST_TTL 		33
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 2945822..fbcffe8 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -334,7 +334,7 @@
 	__IPVS_CMD_ATTR_MAX,
 };
 
-#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
+#define IPVS_CMD_ATTR_MAX (__IPVS_CMD_ATTR_MAX - 1)
 
 /*
  * Attributes used to describe a service
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 99c2533..902f124 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -518,6 +518,10 @@
 /* machine type bits, to be used as argument to KVM_CREATE_VM */
 #define KVM_VM_S390_UCONTROL	1
 
+/* on ppc, 0 indicate default, 1 should force HV and 2 PR */
+#define KVM_VM_PPC_HV 1
+#define KVM_VM_PPC_PR 2
+
 #define KVM_S390_SIE_PAGE_OFFSET 1
 
 /*
@@ -541,6 +545,7 @@
 #define KVM_TRACE_ENABLE          __KVM_DEPRECATED_MAIN_W_0x06
 #define KVM_TRACE_PAUSE           __KVM_DEPRECATED_MAIN_0x07
 #define KVM_TRACE_DISABLE         __KVM_DEPRECATED_MAIN_0x08
+#define KVM_GET_EMULATED_CPUID	  _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
 
 /*
  * Extension capability list.
@@ -668,6 +673,7 @@
 #define KVM_CAP_IRQ_XICS 92
 #define KVM_CAP_ARM_EL1_32BIT 93
 #define KVM_CAP_SPAPR_MULTITCE 94
+#define KVM_CAP_EXT_EMUL_CPUID 95
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -843,6 +849,10 @@
 #define KVM_DEV_TYPE_FSL_MPIC_20	1
 #define KVM_DEV_TYPE_FSL_MPIC_42	2
 #define KVM_DEV_TYPE_XICS		3
+#define KVM_DEV_TYPE_VFIO		4
+#define  KVM_DEV_VFIO_GROUP			1
+#define   KVM_DEV_VFIO_GROUP_ADD			1
+#define   KVM_DEV_VFIO_GROUP_DEL			2
 
 /*
  * ioctls for VM fds
@@ -1012,6 +1022,7 @@
 /* VM is being stopped by host */
 #define KVM_KVMCLOCK_CTRL	  _IO(KVMIO,   0xad)
 #define KVM_ARM_VCPU_INIT	  _IOW(KVMIO,  0xae, struct kvm_vcpu_init)
+#define KVM_ARM_PREFERRED_TARGET  _IOR(KVMIO,  0xaf, struct kvm_vcpu_init)
 #define KVM_GET_REG_LIST	  _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
 
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 2944278..77c6031 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -71,6 +71,6 @@
 #define USBDEVICE_SUPER_MAGIC	0x9fa2
 #define MTD_INODE_FS_MAGIC      0x11307854
 #define ANON_INODE_FS_MAGIC	0x09041934
-
+#define BTRFS_TEST_MAGIC	0x73727279
 
 #endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/major.h b/include/uapi/linux/major.h
index 6a8ca98..620252e 100644
--- a/include/uapi/linux/major.h
+++ b/include/uapi/linux/major.h
@@ -54,6 +54,7 @@
 #define ACSI_MAJOR		28
 #define AZTECH_CDROM_MAJOR	29
 #define FB_MAJOR		29   /* /dev/fb* framebuffers */
+#define MTD_BLOCK_MAJOR		31
 #define CM206_CDROM_MAJOR	32
 #define IDE2_MAJOR		33
 #define IDE3_MAJOR		34
@@ -105,6 +106,7 @@
 #define IDE6_MAJOR		88
 #define IDE7_MAJOR		89
 #define IDE8_MAJOR		90
+#define MTD_CHAR_MAJOR		90
 #define IDE9_MAJOR		91
 
 #define DASD_MAJOR		94
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index 1749154..17c3af2 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -5,6 +5,8 @@
 header-y += nf_conntrack_sctp.h
 header-y += nf_conntrack_tcp.h
 header-y += nf_conntrack_tuple_common.h
+header-y += nf_tables.h
+header-y += nf_tables_compat.h
 header-y += nf_nat.h
 header-y += nfnetlink.h
 header-y += nfnetlink_acct.h
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 8024cdf..25d3b2f 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -10,12 +10,14 @@
 #ifndef _UAPI_IP_SET_H
 #define _UAPI_IP_SET_H
 
-
 #include <linux/types.h>
 
 /* The protocol version */
 #define IPSET_PROTOCOL		6
 
+/* The maximum permissible comment length we will accept over netlink */
+#define IPSET_MAX_COMMENT_SIZE	255
+
 /* The max length of strings including NUL: set and type identifiers */
 #define IPSET_MAXNAMELEN	32
 
@@ -110,6 +112,7 @@
 	IPSET_ATTR_IFACE,
 	IPSET_ATTR_BYTES,
 	IPSET_ATTR_PACKETS,
+	IPSET_ATTR_COMMENT,
 	__IPSET_ATTR_ADT_MAX,
 };
 #define IPSET_ATTR_ADT_MAX	(__IPSET_ATTR_ADT_MAX - 1)
@@ -140,6 +143,7 @@
 	IPSET_ERR_IPADDR_IPV4,
 	IPSET_ERR_IPADDR_IPV6,
 	IPSET_ERR_COUNTER,
+	IPSET_ERR_COMMENT,
 
 	/* Type specific error codes */
 	IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -176,6 +180,8 @@
 	IPSET_FLAG_NOMATCH	= (1 << IPSET_FLAG_BIT_NOMATCH),
 	IPSET_FLAG_BIT_WITH_COUNTERS = 3,
 	IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS),
+	IPSET_FLAG_BIT_WITH_COMMENT = 4,
+	IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT),
 	IPSET_FLAG_CADT_MAX	= 15,
 };
 
@@ -250,6 +256,14 @@
 #define IP_SET_OP_GET_BYINDEX	0x00000007	/* Get set name by index */
 /* Uses ip_set_req_get_set */
 
+#define IP_SET_OP_GET_FNAME	0x00000008	/* Get set index and family */
+struct ip_set_req_get_set_family {
+	unsigned int op;
+	unsigned int version;
+	unsigned int family;
+	union ip_set_name_index set;
+};
+
 #define IP_SET_OP_VERSION	0x00000100	/* Ask kernel version */
 struct ip_set_req_version {
 	unsigned int op;
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 8dd8038..319f471 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -25,6 +25,10 @@
 	IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
 };
 
+#define NF_CT_STATE_INVALID_BIT			(1 << 0)
+#define NF_CT_STATE_BIT(ctinfo)			(1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
+#define NF_CT_STATE_UNTRACKED_BIT		(1 << (IP_CT_NUMBER + 1))
+
 /* Bitset representing status of connection. */
 enum ip_conntrack_status {
 	/* It's an expected connection: bit 0 set.  This bit never changed */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
new file mode 100644
index 0000000..fbfd229
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -0,0 +1,718 @@
+#ifndef _LINUX_NF_TABLES_H
+#define _LINUX_NF_TABLES_H
+
+#define NFT_CHAIN_MAXNAMELEN 32
+
+enum nft_registers {
+	NFT_REG_VERDICT,
+	NFT_REG_1,
+	NFT_REG_2,
+	NFT_REG_3,
+	NFT_REG_4,
+	__NFT_REG_MAX
+};
+#define NFT_REG_MAX	(__NFT_REG_MAX - 1)
+
+/**
+ * enum nft_verdicts - nf_tables internal verdicts
+ *
+ * @NFT_CONTINUE: continue evaluation of the current rule
+ * @NFT_BREAK: terminate evaluation of the current rule
+ * @NFT_JUMP: push the current chain on the jump stack and jump to a chain
+ * @NFT_GOTO: jump to a chain without pushing the current chain on the jump stack
+ * @NFT_RETURN: return to the topmost chain on the jump stack
+ *
+ * The nf_tables verdicts share their numeric space with the netfilter verdicts.
+ */
+enum nft_verdicts {
+	NFT_CONTINUE	= -1,
+	NFT_BREAK	= -2,
+	NFT_JUMP	= -3,
+	NFT_GOTO	= -4,
+	NFT_RETURN	= -5,
+};
+
+/**
+ * enum nf_tables_msg_types - nf_tables netlink message types
+ *
+ * @NFT_MSG_NEWTABLE: create a new table (enum nft_table_attributes)
+ * @NFT_MSG_GETTABLE: get a table (enum nft_table_attributes)
+ * @NFT_MSG_DELTABLE: delete a table (enum nft_table_attributes)
+ * @NFT_MSG_NEWCHAIN: create a new chain (enum nft_chain_attributes)
+ * @NFT_MSG_GETCHAIN: get a chain (enum nft_chain_attributes)
+ * @NFT_MSG_DELCHAIN: delete a chain (enum nft_chain_attributes)
+ * @NFT_MSG_NEWRULE: create a new rule (enum nft_rule_attributes)
+ * @NFT_MSG_GETRULE: get a rule (enum nft_rule_attributes)
+ * @NFT_MSG_DELRULE: delete a rule (enum nft_rule_attributes)
+ * @NFT_MSG_NEWSET: create a new set (enum nft_set_attributes)
+ * @NFT_MSG_GETSET: get a set (enum nft_set_attributes)
+ * @NFT_MSG_DELSET: delete a set (enum nft_set_attributes)
+ * @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes)
+ */
+enum nf_tables_msg_types {
+	NFT_MSG_NEWTABLE,
+	NFT_MSG_GETTABLE,
+	NFT_MSG_DELTABLE,
+	NFT_MSG_NEWCHAIN,
+	NFT_MSG_GETCHAIN,
+	NFT_MSG_DELCHAIN,
+	NFT_MSG_NEWRULE,
+	NFT_MSG_GETRULE,
+	NFT_MSG_DELRULE,
+	NFT_MSG_NEWSET,
+	NFT_MSG_GETSET,
+	NFT_MSG_DELSET,
+	NFT_MSG_NEWSETELEM,
+	NFT_MSG_GETSETELEM,
+	NFT_MSG_DELSETELEM,
+	NFT_MSG_MAX,
+};
+
+/**
+ * enum nft_list_attributes - nf_tables generic list netlink attributes
+ *
+ * @NFTA_LIST_ELEM: list element (NLA_NESTED)
+ */
+enum nft_list_attributes {
+	NFTA_LIST_UNPEC,
+	NFTA_LIST_ELEM,
+	__NFTA_LIST_MAX
+};
+#define NFTA_LIST_MAX		(__NFTA_LIST_MAX - 1)
+
+/**
+ * enum nft_hook_attributes - nf_tables netfilter hook netlink attributes
+ *
+ * @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
+ * @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ */
+enum nft_hook_attributes {
+	NFTA_HOOK_UNSPEC,
+	NFTA_HOOK_HOOKNUM,
+	NFTA_HOOK_PRIORITY,
+	__NFTA_HOOK_MAX
+};
+#define NFTA_HOOK_MAX		(__NFTA_HOOK_MAX - 1)
+
+/**
+ * enum nft_table_flags - nf_tables table flags
+ *
+ * @NFT_TABLE_F_DORMANT: this table is not active
+ */
+enum nft_table_flags {
+	NFT_TABLE_F_DORMANT	= 0x1,
+};
+
+/**
+ * enum nft_table_attributes - nf_tables table netlink attributes
+ *
+ * @NFTA_TABLE_NAME: name of the table (NLA_STRING)
+ * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
+ */
+enum nft_table_attributes {
+	NFTA_TABLE_UNSPEC,
+	NFTA_TABLE_NAME,
+	NFTA_TABLE_FLAGS,
+	__NFTA_TABLE_MAX
+};
+#define NFTA_TABLE_MAX		(__NFTA_TABLE_MAX - 1)
+
+/**
+ * enum nft_chain_attributes - nf_tables chain netlink attributes
+ *
+ * @NFTA_CHAIN_TABLE: name of the table containing the chain (NLA_STRING)
+ * @NFTA_CHAIN_HANDLE: numeric handle of the chain (NLA_U64)
+ * @NFTA_CHAIN_NAME: name of the chain (NLA_STRING)
+ * @NFTA_CHAIN_HOOK: hook specification for basechains (NLA_NESTED: nft_hook_attributes)
+ * @NFTA_CHAIN_POLICY: numeric policy of the chain (NLA_U32)
+ * @NFTA_CHAIN_USE: number of references to this chain (NLA_U32)
+ * @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING)
+ * @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
+ */
+enum nft_chain_attributes {
+	NFTA_CHAIN_UNSPEC,
+	NFTA_CHAIN_TABLE,
+	NFTA_CHAIN_HANDLE,
+	NFTA_CHAIN_NAME,
+	NFTA_CHAIN_HOOK,
+	NFTA_CHAIN_POLICY,
+	NFTA_CHAIN_USE,
+	NFTA_CHAIN_TYPE,
+	NFTA_CHAIN_COUNTERS,
+	__NFTA_CHAIN_MAX
+};
+#define NFTA_CHAIN_MAX		(__NFTA_CHAIN_MAX - 1)
+
+/**
+ * enum nft_rule_attributes - nf_tables rule netlink attributes
+ *
+ * @NFTA_RULE_TABLE: name of the table containing the rule (NLA_STRING)
+ * @NFTA_RULE_CHAIN: name of the chain containing the rule (NLA_STRING)
+ * @NFTA_RULE_HANDLE: numeric handle of the rule (NLA_U64)
+ * @NFTA_RULE_EXPRESSIONS: list of expressions (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_RULE_COMPAT: compatibility specifications of the rule (NLA_NESTED: nft_rule_compat_attributes)
+ * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64)
+ */
+enum nft_rule_attributes {
+	NFTA_RULE_UNSPEC,
+	NFTA_RULE_TABLE,
+	NFTA_RULE_CHAIN,
+	NFTA_RULE_HANDLE,
+	NFTA_RULE_EXPRESSIONS,
+	NFTA_RULE_COMPAT,
+	NFTA_RULE_POSITION,
+	__NFTA_RULE_MAX
+};
+#define NFTA_RULE_MAX		(__NFTA_RULE_MAX - 1)
+
+/**
+ * enum nft_rule_compat_flags - nf_tables rule compat flags
+ *
+ * @NFT_RULE_COMPAT_F_INV: invert the check result
+ */
+enum nft_rule_compat_flags {
+	NFT_RULE_COMPAT_F_INV	= (1 << 1),
+	NFT_RULE_COMPAT_F_MASK	= NFT_RULE_COMPAT_F_INV,
+};
+
+/**
+ * enum nft_rule_compat_attributes - nf_tables rule compat attributes
+ *
+ * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
+ * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
+ */
+enum nft_rule_compat_attributes {
+	NFTA_RULE_COMPAT_UNSPEC,
+	NFTA_RULE_COMPAT_PROTO,
+	NFTA_RULE_COMPAT_FLAGS,
+	__NFTA_RULE_COMPAT_MAX
+};
+#define NFTA_RULE_COMPAT_MAX	(__NFTA_RULE_COMPAT_MAX - 1)
+
+/**
+ * enum nft_set_flags - nf_tables set flags
+ *
+ * @NFT_SET_ANONYMOUS: name allocation, automatic cleanup on unlink
+ * @NFT_SET_CONSTANT: set contents may not change while bound
+ * @NFT_SET_INTERVAL: set contains intervals
+ * @NFT_SET_MAP: set is used as a dictionary
+ */
+enum nft_set_flags {
+	NFT_SET_ANONYMOUS		= 0x1,
+	NFT_SET_CONSTANT		= 0x2,
+	NFT_SET_INTERVAL		= 0x4,
+	NFT_SET_MAP			= 0x8,
+};
+
+/**
+ * enum nft_set_attributes - nf_tables set netlink attributes
+ *
+ * @NFTA_SET_TABLE: table name (NLA_STRING)
+ * @NFTA_SET_NAME: set name (NLA_STRING)
+ * @NFTA_SET_FLAGS: bitmask of enum nft_set_flags (NLA_U32)
+ * @NFTA_SET_KEY_TYPE: key data type, informational purpose only (NLA_U32)
+ * @NFTA_SET_KEY_LEN: key data length (NLA_U32)
+ * @NFTA_SET_DATA_TYPE: mapping data type (NLA_U32)
+ * @NFTA_SET_DATA_LEN: mapping data length (NLA_U32)
+ */
+enum nft_set_attributes {
+	NFTA_SET_UNSPEC,
+	NFTA_SET_TABLE,
+	NFTA_SET_NAME,
+	NFTA_SET_FLAGS,
+	NFTA_SET_KEY_TYPE,
+	NFTA_SET_KEY_LEN,
+	NFTA_SET_DATA_TYPE,
+	NFTA_SET_DATA_LEN,
+	__NFTA_SET_MAX
+};
+#define NFTA_SET_MAX		(__NFTA_SET_MAX - 1)
+
+/**
+ * enum nft_set_elem_flags - nf_tables set element flags
+ *
+ * @NFT_SET_ELEM_INTERVAL_END: element ends the previous interval
+ */
+enum nft_set_elem_flags {
+	NFT_SET_ELEM_INTERVAL_END	= 0x1,
+};
+
+/**
+ * enum nft_set_elem_attributes - nf_tables set element netlink attributes
+ *
+ * @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
+ * @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
+ * @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
+ */
+enum nft_set_elem_attributes {
+	NFTA_SET_ELEM_UNSPEC,
+	NFTA_SET_ELEM_KEY,
+	NFTA_SET_ELEM_DATA,
+	NFTA_SET_ELEM_FLAGS,
+	__NFTA_SET_ELEM_MAX
+};
+#define NFTA_SET_ELEM_MAX	(__NFTA_SET_ELEM_MAX - 1)
+
+/**
+ * enum nft_set_elem_list_attributes - nf_tables set element list netlink attributes
+ *
+ * @NFTA_SET_ELEM_LIST_TABLE: table of the set to be changed (NLA_STRING)
+ * @NFTA_SET_ELEM_LIST_SET: name of the set to be changed (NLA_STRING)
+ * @NFTA_SET_ELEM_LIST_ELEMENTS: list of set elements (NLA_NESTED: nft_set_elem_attributes)
+ */
+enum nft_set_elem_list_attributes {
+	NFTA_SET_ELEM_LIST_UNSPEC,
+	NFTA_SET_ELEM_LIST_TABLE,
+	NFTA_SET_ELEM_LIST_SET,
+	NFTA_SET_ELEM_LIST_ELEMENTS,
+	__NFTA_SET_ELEM_LIST_MAX
+};
+#define NFTA_SET_ELEM_LIST_MAX	(__NFTA_SET_ELEM_LIST_MAX - 1)
+
+/**
+ * enum nft_data_types - nf_tables data types
+ *
+ * @NFT_DATA_VALUE: generic data
+ * @NFT_DATA_VERDICT: netfilter verdict
+ *
+ * The type of data is usually determined by the kernel directly and is not
+ * explicitly specified by userspace. The only difference are sets, where
+ * userspace specifies the key and mapping data types.
+ *
+ * The values 0xffffff00-0xffffffff are reserved for internally used types.
+ * The remaining range can be freely used by userspace to encode types, all
+ * values are equivalent to NFT_DATA_VALUE.
+ */
+enum nft_data_types {
+	NFT_DATA_VALUE,
+	NFT_DATA_VERDICT	= 0xffffff00U,
+};
+
+#define NFT_DATA_RESERVED_MASK	0xffffff00U
+
+/**
+ * enum nft_data_attributes - nf_tables data netlink attributes
+ *
+ * @NFTA_DATA_VALUE: generic data (NLA_BINARY)
+ * @NFTA_DATA_VERDICT: nf_tables verdict (NLA_NESTED: nft_verdict_attributes)
+ */
+enum nft_data_attributes {
+	NFTA_DATA_UNSPEC,
+	NFTA_DATA_VALUE,
+	NFTA_DATA_VERDICT,
+	__NFTA_DATA_MAX
+};
+#define NFTA_DATA_MAX		(__NFTA_DATA_MAX - 1)
+
+/**
+ * enum nft_verdict_attributes - nf_tables verdict netlink attributes
+ *
+ * @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts)
+ * @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING)
+ */
+enum nft_verdict_attributes {
+	NFTA_VERDICT_UNSPEC,
+	NFTA_VERDICT_CODE,
+	NFTA_VERDICT_CHAIN,
+	__NFTA_VERDICT_MAX
+};
+#define NFTA_VERDICT_MAX	(__NFTA_VERDICT_MAX - 1)
+
+/**
+ * enum nft_expr_attributes - nf_tables expression netlink attributes
+ *
+ * @NFTA_EXPR_NAME: name of the expression type (NLA_STRING)
+ * @NFTA_EXPR_DATA: type specific data (NLA_NESTED)
+ */
+enum nft_expr_attributes {
+	NFTA_EXPR_UNSPEC,
+	NFTA_EXPR_NAME,
+	NFTA_EXPR_DATA,
+	__NFTA_EXPR_MAX
+};
+#define NFTA_EXPR_MAX		(__NFTA_EXPR_MAX - 1)
+
+/**
+ * enum nft_immediate_attributes - nf_tables immediate expression netlink attributes
+ *
+ * @NFTA_IMMEDIATE_DREG: destination register to load data into (NLA_U32)
+ * @NFTA_IMMEDIATE_DATA: data to load (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_immediate_attributes {
+	NFTA_IMMEDIATE_UNSPEC,
+	NFTA_IMMEDIATE_DREG,
+	NFTA_IMMEDIATE_DATA,
+	__NFTA_IMMEDIATE_MAX
+};
+#define NFTA_IMMEDIATE_MAX	(__NFTA_IMMEDIATE_MAX - 1)
+
+/**
+ * enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
+ *
+ * @NFTA_BITWISE_SREG: source register (NLA_U32: nft_registers)
+ * @NFTA_BITWISE_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_BITWISE_LEN: length of operands (NLA_U32)
+ * @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes)
+ * @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes)
+ *
+ * The bitwise expression performs the following operation:
+ *
+ * dreg = (sreg & mask) ^ xor
+ *
+ * which allow to express all bitwise operations:
+ *
+ * 		mask	xor
+ * NOT:		1	1
+ * OR:		0	x
+ * XOR:		1	x
+ * AND:		x	0
+ */
+enum nft_bitwise_attributes {
+	NFTA_BITWISE_UNSPEC,
+	NFTA_BITWISE_SREG,
+	NFTA_BITWISE_DREG,
+	NFTA_BITWISE_LEN,
+	NFTA_BITWISE_MASK,
+	NFTA_BITWISE_XOR,
+	__NFTA_BITWISE_MAX
+};
+#define NFTA_BITWISE_MAX	(__NFTA_BITWISE_MAX - 1)
+
+/**
+ * enum nft_byteorder_ops - nf_tables byteorder operators
+ *
+ * @NFT_BYTEORDER_NTOH: network to host operator
+ * @NFT_BYTEORDER_HTON: host to network opertaor
+ */
+enum nft_byteorder_ops {
+	NFT_BYTEORDER_NTOH,
+	NFT_BYTEORDER_HTON,
+};
+
+/**
+ * enum nft_byteorder_attributes - nf_tables byteorder expression netlink attributes
+ *
+ * @NFTA_BYTEORDER_SREG: source register (NLA_U32: nft_registers)
+ * @NFTA_BYTEORDER_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_BYTEORDER_OP: operator (NLA_U32: enum nft_byteorder_ops)
+ * @NFTA_BYTEORDER_LEN: length of the data (NLA_U32)
+ * @NFTA_BYTEORDER_SIZE: data size in bytes (NLA_U32: 2 or 4)
+ */
+enum nft_byteorder_attributes {
+	NFTA_BYTEORDER_UNSPEC,
+	NFTA_BYTEORDER_SREG,
+	NFTA_BYTEORDER_DREG,
+	NFTA_BYTEORDER_OP,
+	NFTA_BYTEORDER_LEN,
+	NFTA_BYTEORDER_SIZE,
+	__NFTA_BYTEORDER_MAX
+};
+#define NFTA_BYTEORDER_MAX	(__NFTA_BYTEORDER_MAX - 1)
+
+/**
+ * enum nft_cmp_ops - nf_tables relational operator
+ *
+ * @NFT_CMP_EQ: equal
+ * @NFT_CMP_NEQ: not equal
+ * @NFT_CMP_LT: less than
+ * @NFT_CMP_LTE: less than or equal to
+ * @NFT_CMP_GT: greater than
+ * @NFT_CMP_GTE: greater than or equal to
+ */
+enum nft_cmp_ops {
+	NFT_CMP_EQ,
+	NFT_CMP_NEQ,
+	NFT_CMP_LT,
+	NFT_CMP_LTE,
+	NFT_CMP_GT,
+	NFT_CMP_GTE,
+};
+
+/**
+ * enum nft_cmp_attributes - nf_tables cmp expression netlink attributes
+ *
+ * @NFTA_CMP_SREG: source register of data to compare (NLA_U32: nft_registers)
+ * @NFTA_CMP_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_CMP_DATA: data to compare against (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_cmp_attributes {
+	NFTA_CMP_UNSPEC,
+	NFTA_CMP_SREG,
+	NFTA_CMP_OP,
+	NFTA_CMP_DATA,
+	__NFTA_CMP_MAX
+};
+#define NFTA_CMP_MAX		(__NFTA_CMP_MAX - 1)
+
+/**
+ * enum nft_lookup_attributes - nf_tables set lookup expression netlink attributes
+ *
+ * @NFTA_LOOKUP_SET: name of the set where to look for (NLA_STRING)
+ * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers)
+ * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers)
+ */
+enum nft_lookup_attributes {
+	NFTA_LOOKUP_UNSPEC,
+	NFTA_LOOKUP_SET,
+	NFTA_LOOKUP_SREG,
+	NFTA_LOOKUP_DREG,
+	__NFTA_LOOKUP_MAX
+};
+#define NFTA_LOOKUP_MAX		(__NFTA_LOOKUP_MAX - 1)
+
+/**
+ * enum nft_payload_bases - nf_tables payload expression offset bases
+ *
+ * @NFT_PAYLOAD_LL_HEADER: link layer header
+ * @NFT_PAYLOAD_NETWORK_HEADER: network header
+ * @NFT_PAYLOAD_TRANSPORT_HEADER: transport header
+ */
+enum nft_payload_bases {
+	NFT_PAYLOAD_LL_HEADER,
+	NFT_PAYLOAD_NETWORK_HEADER,
+	NFT_PAYLOAD_TRANSPORT_HEADER,
+};
+
+/**
+ * enum nft_payload_attributes - nf_tables payload expression netlink attributes
+ *
+ * @NFTA_PAYLOAD_DREG: destination register to load data into (NLA_U32: nft_registers)
+ * @NFTA_PAYLOAD_BASE: payload base (NLA_U32: nft_payload_bases)
+ * @NFTA_PAYLOAD_OFFSET: payload offset relative to base (NLA_U32)
+ * @NFTA_PAYLOAD_LEN: payload length (NLA_U32)
+ */
+enum nft_payload_attributes {
+	NFTA_PAYLOAD_UNSPEC,
+	NFTA_PAYLOAD_DREG,
+	NFTA_PAYLOAD_BASE,
+	NFTA_PAYLOAD_OFFSET,
+	NFTA_PAYLOAD_LEN,
+	__NFTA_PAYLOAD_MAX
+};
+#define NFTA_PAYLOAD_MAX	(__NFTA_PAYLOAD_MAX - 1)
+
+/**
+ * enum nft_exthdr_attributes - nf_tables IPv6 extension header expression netlink attributes
+ *
+ * @NFTA_EXTHDR_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_EXTHDR_TYPE: extension header type (NLA_U8)
+ * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32)
+ * @NFTA_EXTHDR_LEN: extension header length (NLA_U32)
+ */
+enum nft_exthdr_attributes {
+	NFTA_EXTHDR_UNSPEC,
+	NFTA_EXTHDR_DREG,
+	NFTA_EXTHDR_TYPE,
+	NFTA_EXTHDR_OFFSET,
+	NFTA_EXTHDR_LEN,
+	__NFTA_EXTHDR_MAX
+};
+#define NFTA_EXTHDR_MAX		(__NFTA_EXTHDR_MAX - 1)
+
+/**
+ * enum nft_meta_keys - nf_tables meta expression keys
+ *
+ * @NFT_META_LEN: packet length (skb->len)
+ * @NFT_META_PROTOCOL: packet ethertype protocol (skb->protocol), invalid in OUTPUT
+ * @NFT_META_PRIORITY: packet priority (skb->priority)
+ * @NFT_META_MARK: packet mark (skb->mark)
+ * @NFT_META_IIF: packet input interface index (dev->ifindex)
+ * @NFT_META_OIF: packet output interface index (dev->ifindex)
+ * @NFT_META_IIFNAME: packet input interface name (dev->name)
+ * @NFT_META_OIFNAME: packet output interface name (dev->name)
+ * @NFT_META_IIFTYPE: packet input interface type (dev->type)
+ * @NFT_META_OIFTYPE: packet output interface type (dev->type)
+ * @NFT_META_SKUID: originating socket UID (fsuid)
+ * @NFT_META_SKGID: originating socket GID (fsgid)
+ * @NFT_META_NFTRACE: packet nftrace bit
+ * @NFT_META_RTCLASSID: realm value of packet's route (skb->dst->tclassid)
+ * @NFT_META_SECMARK: packet secmark (skb->secmark)
+ */
+enum nft_meta_keys {
+	NFT_META_LEN,
+	NFT_META_PROTOCOL,
+	NFT_META_PRIORITY,
+	NFT_META_MARK,
+	NFT_META_IIF,
+	NFT_META_OIF,
+	NFT_META_IIFNAME,
+	NFT_META_OIFNAME,
+	NFT_META_IIFTYPE,
+	NFT_META_OIFTYPE,
+	NFT_META_SKUID,
+	NFT_META_SKGID,
+	NFT_META_NFTRACE,
+	NFT_META_RTCLASSID,
+	NFT_META_SECMARK,
+};
+
+/**
+ * enum nft_meta_attributes - nf_tables meta expression netlink attributes
+ *
+ * @NFTA_META_DREG: destination register (NLA_U32)
+ * @NFTA_META_KEY: meta data item to load (NLA_U32: nft_meta_keys)
+ */
+enum nft_meta_attributes {
+	NFTA_META_UNSPEC,
+	NFTA_META_DREG,
+	NFTA_META_KEY,
+	__NFTA_META_MAX
+};
+#define NFTA_META_MAX		(__NFTA_META_MAX - 1)
+
+/**
+ * enum nft_ct_keys - nf_tables ct expression keys
+ *
+ * @NFT_CT_STATE: conntrack state (bitmask of enum ip_conntrack_info)
+ * @NFT_CT_DIRECTION: conntrack direction (enum ip_conntrack_dir)
+ * @NFT_CT_STATUS: conntrack status (bitmask of enum ip_conntrack_status)
+ * @NFT_CT_MARK: conntrack mark value
+ * @NFT_CT_SECMARK: conntrack secmark value
+ * @NFT_CT_EXPIRATION: relative conntrack expiration time in ms
+ * @NFT_CT_HELPER: connection tracking helper assigned to conntrack
+ * @NFT_CT_L3PROTOCOL: conntrack layer 3 protocol
+ * @NFT_CT_SRC: conntrack layer 3 protocol source (IPv4/IPv6 address)
+ * @NFT_CT_DST: conntrack layer 3 protocol destination (IPv4/IPv6 address)
+ * @NFT_CT_PROTOCOL: conntrack layer 4 protocol
+ * @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source
+ * @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination
+ */
+enum nft_ct_keys {
+	NFT_CT_STATE,
+	NFT_CT_DIRECTION,
+	NFT_CT_STATUS,
+	NFT_CT_MARK,
+	NFT_CT_SECMARK,
+	NFT_CT_EXPIRATION,
+	NFT_CT_HELPER,
+	NFT_CT_L3PROTOCOL,
+	NFT_CT_SRC,
+	NFT_CT_DST,
+	NFT_CT_PROTOCOL,
+	NFT_CT_PROTO_SRC,
+	NFT_CT_PROTO_DST,
+};
+
+/**
+ * enum nft_ct_attributes - nf_tables ct expression netlink attributes
+ *
+ * @NFTA_CT_DREG: destination register (NLA_U32)
+ * @NFTA_CT_KEY: conntrack data item to load (NLA_U32: nft_ct_keys)
+ * @NFTA_CT_DIRECTION: direction in case of directional keys (NLA_U8)
+ */
+enum nft_ct_attributes {
+	NFTA_CT_UNSPEC,
+	NFTA_CT_DREG,
+	NFTA_CT_KEY,
+	NFTA_CT_DIRECTION,
+	__NFTA_CT_MAX
+};
+#define NFTA_CT_MAX		(__NFTA_CT_MAX - 1)
+
+/**
+ * enum nft_limit_attributes - nf_tables limit expression netlink attributes
+ *
+ * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
+ * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ */
+enum nft_limit_attributes {
+	NFTA_LIMIT_UNSPEC,
+	NFTA_LIMIT_RATE,
+	NFTA_LIMIT_UNIT,
+	__NFTA_LIMIT_MAX
+};
+#define NFTA_LIMIT_MAX		(__NFTA_LIMIT_MAX - 1)
+
+/**
+ * enum nft_counter_attributes - nf_tables counter expression netlink attributes
+ *
+ * @NFTA_COUNTER_BYTES: number of bytes (NLA_U64)
+ * @NFTA_COUNTER_PACKETS: number of packets (NLA_U64)
+ */
+enum nft_counter_attributes {
+	NFTA_COUNTER_UNSPEC,
+	NFTA_COUNTER_BYTES,
+	NFTA_COUNTER_PACKETS,
+	__NFTA_COUNTER_MAX
+};
+#define NFTA_COUNTER_MAX	(__NFTA_COUNTER_MAX - 1)
+
+/**
+ * enum nft_log_attributes - nf_tables log expression netlink attributes
+ *
+ * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32)
+ * @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING)
+ * @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32)
+ * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32)
+ */
+enum nft_log_attributes {
+	NFTA_LOG_UNSPEC,
+	NFTA_LOG_GROUP,
+	NFTA_LOG_PREFIX,
+	NFTA_LOG_SNAPLEN,
+	NFTA_LOG_QTHRESHOLD,
+	__NFTA_LOG_MAX
+};
+#define NFTA_LOG_MAX		(__NFTA_LOG_MAX - 1)
+
+/**
+ * enum nft_reject_types - nf_tables reject expression reject types
+ *
+ * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
+ * @NFT_REJECT_TCP_RST: reject using TCP RST
+ */
+enum nft_reject_types {
+	NFT_REJECT_ICMP_UNREACH,
+	NFT_REJECT_TCP_RST,
+};
+
+/**
+ * enum nft_reject_attributes - nf_tables reject expression netlink attributes
+ *
+ * @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types)
+ * @NFTA_REJECT_ICMP_CODE: ICMP code to use (NLA_U8)
+ */
+enum nft_reject_attributes {
+	NFTA_REJECT_UNSPEC,
+	NFTA_REJECT_TYPE,
+	NFTA_REJECT_ICMP_CODE,
+	__NFTA_REJECT_MAX
+};
+#define NFTA_REJECT_MAX		(__NFTA_REJECT_MAX - 1)
+
+/**
+ * enum nft_nat_types - nf_tables nat expression NAT types
+ *
+ * @NFT_NAT_SNAT: source NAT
+ * @NFT_NAT_DNAT: destination NAT
+ */
+enum nft_nat_types {
+	NFT_NAT_SNAT,
+	NFT_NAT_DNAT,
+};
+
+/**
+ * enum nft_nat_attributes - nf_tables nat expression netlink attributes
+ *
+ * @NFTA_NAT_TYPE: NAT type (NLA_U32: nft_nat_types)
+ * @NFTA_NAT_FAMILY: NAT family (NLA_U32)
+ * @NFTA_NAT_REG_ADDR_MIN: source register of address range start (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
+ */
+enum nft_nat_attributes {
+	NFTA_NAT_UNSPEC,
+	NFTA_NAT_TYPE,
+	NFTA_NAT_FAMILY,
+	NFTA_NAT_REG_ADDR_MIN,
+	NFTA_NAT_REG_ADDR_MAX,
+	NFTA_NAT_REG_PROTO_MIN,
+	NFTA_NAT_REG_PROTO_MAX,
+	__NFTA_NAT_MAX
+};
+#define NFTA_NAT_MAX		(__NFTA_NAT_MAX - 1)
+
+#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nf_tables_compat.h b/include/uapi/linux/netfilter/nf_tables_compat.h
new file mode 100644
index 0000000..8310f5f
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_tables_compat.h
@@ -0,0 +1,38 @@
+#ifndef _NFT_COMPAT_NFNETLINK_H_
+#define _NFT_COMPAT_NFNETLINK_H_
+
+enum nft_target_attributes {
+	NFTA_TARGET_UNSPEC,
+	NFTA_TARGET_NAME,
+	NFTA_TARGET_REV,
+	NFTA_TARGET_INFO,
+	__NFTA_TARGET_MAX
+};
+#define NFTA_TARGET_MAX		(__NFTA_TARGET_MAX - 1)
+
+enum nft_match_attributes {
+	NFTA_MATCH_UNSPEC,
+	NFTA_MATCH_NAME,
+	NFTA_MATCH_REV,
+	NFTA_MATCH_INFO,
+	__NFTA_MATCH_MAX
+};
+#define NFTA_MATCH_MAX		(__NFTA_MATCH_MAX - 1)
+
+#define NFT_COMPAT_NAME_MAX	32
+
+enum {
+	NFNL_MSG_COMPAT_GET,
+	NFNL_MSG_COMPAT_MAX
+};
+
+enum {
+	NFTA_COMPAT_UNSPEC = 0,
+	NFTA_COMPAT_NAME,
+	NFTA_COMPAT_REV,
+	NFTA_COMPAT_TYPE,
+	__NFTA_COMPAT_MAX,
+};
+#define NFTA_COMPAT_MAX (__NFTA_COMPAT_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h
index 4a4efaf..596ddd4 100644
--- a/include/uapi/linux/netfilter/nfnetlink.h
+++ b/include/uapi/linux/netfilter/nfnetlink.h
@@ -18,6 +18,8 @@
 #define NFNLGRP_CONNTRACK_EXP_UPDATE	NFNLGRP_CONNTRACK_EXP_UPDATE
 	NFNLGRP_CONNTRACK_EXP_DESTROY,
 #define NFNLGRP_CONNTRACK_EXP_DESTROY	NFNLGRP_CONNTRACK_EXP_DESTROY
+	NFNLGRP_NFTABLES,
+#define NFNLGRP_NFTABLES                NFNLGRP_NFTABLES
 	__NFNLGRP_MAX,
 };
 #define NFNLGRP_MAX	(__NFNLGRP_MAX - 1)
@@ -51,6 +53,12 @@
 #define NFNL_SUBSYS_ACCT		7
 #define NFNL_SUBSYS_CTNETLINK_TIMEOUT	8
 #define NFNL_SUBSYS_CTHELPER		9
-#define NFNL_SUBSYS_COUNT		10
+#define NFNL_SUBSYS_NFTABLES		10
+#define NFNL_SUBSYS_NFT_COMPAT		11
+#define NFNL_SUBSYS_COUNT		12
+
+/* Reserved control nfnetlink messages */
+#define NFNL_MSG_BATCH_BEGIN		NLMSG_MIN_TYPE
+#define NFNL_MSG_BATCH_END		NLMSG_MIN_TYPE+1
 
 #endif /* _UAPI_NFNETLINK_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index a2810a7..1ab0b97 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -6,6 +6,8 @@
 	IPCTNL_MSG_TIMEOUT_NEW,
 	IPCTNL_MSG_TIMEOUT_GET,
 	IPCTNL_MSG_TIMEOUT_DELETE,
+	IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+	IPCTNL_MSG_TIMEOUT_DEFAULT_GET,
 
 	IPCTNL_MSG_TIMEOUT_MAX
 };
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 29bed72..6ad6cc0 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -85,6 +85,7 @@
  *	a specific SE notifies us about the end of a transaction. The parameter
  *	for this event is the application ID (AID).
  * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
+ * @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element.
  */
 enum nfc_commands {
 	NFC_CMD_UNSPEC,
@@ -114,6 +115,7 @@
 	NFC_EVENT_SE_CONNECTIVITY,
 	NFC_EVENT_SE_TRANSACTION,
 	NFC_CMD_GET_SE,
+	NFC_CMD_SE_IO,
 /* private: internal use only */
 	__NFC_CMD_AFTER_LAST
 };
@@ -147,6 +149,7 @@
  * @NFC_ATTR_SE_INDEX: Secure element index
  * @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
  * @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status
+ * @NFC_ATTR_APDU: Secure element APDU
  */
 enum nfc_attrs {
 	NFC_ATTR_UNSPEC,
@@ -174,6 +177,7 @@
 	NFC_ATTR_SE_TYPE,
 	NFC_ATTR_SE_AID,
 	NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS,
+	NFC_ATTR_SE_APDU,
 /* private: internal use only */
 	__NFC_ATTR_AFTER_LAST
 };
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index fde2c02..f752e982 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -988,7 +988,7 @@
  * 	to query the CRDA to retrieve one regulatory domain. This attribute can
  * 	also be used by userspace to query the kernel for the currently set
  * 	regulatory domain. We chose an alpha2 as that is also used by the
- * 	IEEE-802.11d country information element to identify a country.
+ * 	IEEE-802.11 country information element to identify a country.
  * 	Users can also simply ask the wireless core to set regulatory domain
  * 	to a specific alpha2.
  * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory
@@ -1496,6 +1496,18 @@
  * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
  *	As specified in the &enum nl80211_rxmgmt_flags.
  *
+ * @NL80211_ATTR_STA_SUPPORTED_CHANNELS: array of supported channels.
+ *
+ * @NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES: array of supported
+ *      supported operating classes.
+ *
+ * @NL80211_ATTR_HANDLE_DFS: A flag indicating whether user space
+ *	controls DFS operation in IBSS mode. If the flag is included in
+ *	%NL80211_CMD_JOIN_IBSS request, the driver will allow use of DFS
+ *	channels and reports radar events to userspace. Userspace is required
+ *	to react to radar events, e.g. initiate a channel switch or leave the
+ *	IBSS network.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1806,6 +1818,12 @@
 
 	NL80211_ATTR_RXMGMT_FLAGS,
 
+	NL80211_ATTR_STA_SUPPORTED_CHANNELS,
+
+	NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES,
+
+	NL80211_ATTR_HANDLE_DFS,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -3860,13 +3878,12 @@
  *
  * Channel states used by the DFS code.
  *
- * @IEEE80211_DFS_USABLE: The channel can be used, but channel availability
+ * @NL80211_DFS_USABLE: The channel can be used, but channel availability
  *	check (CAC) must be performed before using it for AP or IBSS.
- * @IEEE80211_DFS_UNAVAILABLE: A radar has been detected on this channel, it
+ * @NL80211_DFS_UNAVAILABLE: A radar has been detected on this channel, it
  *	is therefore marked as not available.
- * @IEEE80211_DFS_AVAILABLE: The channel has been CAC checked and is available.
+ * @NL80211_DFS_AVAILABLE: The channel has been CAC checked and is available.
  */
-
 enum nl80211_dfs_state {
 	NL80211_DFS_USABLE,
 	NL80211_DFS_UNAVAILABLE,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index a74d375..d120f9f 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -63,15 +63,18 @@
  * not be sent.
  * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
  * datapath.  Always present in notifications.
+ * @OVS_DP_ATTR_MEGAFLOW_STATS: Statistics about mega flow masks usage for the
+ * datapath. Always present in notifications.
  *
  * These attributes follow the &struct ovs_header within the Generic Netlink
  * payload for %OVS_DP_* commands.
  */
 enum ovs_datapath_attr {
 	OVS_DP_ATTR_UNSPEC,
-	OVS_DP_ATTR_NAME,       /* name of dp_ifindex netdev */
-	OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
-	OVS_DP_ATTR_STATS,      /* struct ovs_dp_stats */
+	OVS_DP_ATTR_NAME,		/* name of dp_ifindex netdev */
+	OVS_DP_ATTR_UPCALL_PID,		/* Netlink PID to receive upcalls */
+	OVS_DP_ATTR_STATS,		/* struct ovs_dp_stats */
+	OVS_DP_ATTR_MEGAFLOW_STATS,	/* struct ovs_dp_megaflow_stats */
 	__OVS_DP_ATTR_MAX
 };
 
@@ -84,6 +87,14 @@
 	__u64 n_flows;           /* Number of flows present */
 };
 
+struct ovs_dp_megaflow_stats {
+	__u64 n_mask_hit;	 /* Number of masks used for flow lookups. */
+	__u32 n_masks;		 /* Number of masks for the datapath. */
+	__u32 pad0;		 /* Pad for future expension. */
+	__u64 pad1;		 /* Pad for future expension. */
+	__u64 pad2;		 /* Pad for future expension. */
+};
+
 struct ovs_vport_stats {
 	__u64   rx_packets;		/* total packets received       */
 	__u64   tx_packets;		/* total packets transmitted    */
@@ -260,6 +271,7 @@
 	OVS_KEY_ATTR_SKB_MARK,  /* u32 skb mark */
 	OVS_KEY_ATTR_TUNNEL,    /* Nested set of ovs_tunnel attributes */
 	OVS_KEY_ATTR_SCTP,      /* struct ovs_key_sctp */
+	OVS_KEY_ATTR_TCP_FLAGS,	/* be16 TCP flags. */
 
 #ifdef __KERNEL__
 	OVS_KEY_ATTR_IPV4_TUNNEL,  /* struct ovs_key_ipv4_tunnel */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index baa7852..0890556 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,7 +319,6 @@
 #define PCI_MSIX_PBA		8	/* Pending Bit Array offset */
 #define  PCI_MSIX_PBA_BIR	0x00000007 /* BAR index */
 #define  PCI_MSIX_PBA_OFFSET	0xfffffff8 /* Offset into specified BAR */
-#define  PCI_MSIX_FLAGS_BIRMASK	(7 << 0)   /* deprecated */
 #define PCI_CAP_MSIX_SIZEOF	12	/* size of MSIX registers */
 
 /* MSI-X entry's format */
@@ -558,7 +557,8 @@
 #define  PCI_EXP_DEVCAP2_OBFF_MSG	0x00040000 /* New message signaling */
 #define  PCI_EXP_DEVCAP2_OBFF_WAKE	0x00080000 /* Re-use WAKE# for OBFF */
 #define PCI_EXP_DEVCTL2		40	/* Device Control 2 */
-#define  PCI_EXP_DEVCTL2_ARI		0x20	/* Alternative Routing-ID */
+#define  PCI_EXP_DEVCTL2_COMP_TIMEOUT	0x000f	/* Completion Timeout Value */
+#define  PCI_EXP_DEVCTL2_ARI		0x0020	/* Alternative Routing-ID */
 #define  PCI_EXP_DEVCTL2_IDO_REQ_EN	0x0100	/* Allow IDO for requests */
 #define  PCI_EXP_DEVCTL2_IDO_CMP_EN	0x0200	/* Allow IDO for completions */
 #define  PCI_EXP_DEVCTL2_LTR_EN		0x0400	/* Enable LTR mechanism */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 082eafa..25731df 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -388,6 +388,20 @@
 
 #define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
 
+/* BPF classifier */
+
+enum {
+	TCA_BPF_UNSPEC,
+	TCA_BPF_ACT,
+	TCA_BPF_POLICE,
+	TCA_BPF_CLASSID,
+	TCA_BPF_OPS_LEN,
+	TCA_BPF_OPS,
+	__TCA_BPF_MAX,
+};
+
+#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
+
 /* Extended Matches */
 
 struct tcf_ematch_tree_hdr {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 9b82913..307f293 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -171,6 +171,8 @@
 	TCA_TBF_PARMS,
 	TCA_TBF_RTAB,
 	TCA_TBF_PTAB,
+	TCA_TBF_RATE64,
+	TCA_TBF_PRATE64,
 	__TCA_TBF_MAX,
 };
 
@@ -357,6 +359,8 @@
 	TCA_HTB_CTAB,
 	TCA_HTB_RTAB,
 	TCA_HTB_DIRECT_QLEN,
+	TCA_HTB_RATE64,
+	TCA_HTB_CEIL64,
 	__TCA_HTB_MAX,
 };
 
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index 7471b5b..fff3528 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -40,11 +40,4 @@
 	__u32	buf[0];
 };
 
-struct rnd_state {
-	__u32 s1, s2, s3;
-};
-
-/* Exported functions */
-
-
 #endif /* _UAPI_LINUX_RANDOM_H */
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 36eace0..e272ea0 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -94,10 +94,10 @@
 #define MTD_RAM			1
 #define MTD_ROM			2
 #define MTD_NORFLASH		3
-#define MTD_NANDFLASH		4
+#define MTD_NANDFLASH		4	/* SLC NAND */
 #define MTD_DATAFLASH		6
 #define MTD_UBIVOLUME		7
-#define MTD_MLCNANDFLASH	8
+#define MTD_MLCNANDFLASH	8	/* MLC NAND (including TLC) */
 
 #define MTD_WRITEABLE		0x400	/* Device is writeable */
 #define MTD_BIT_WRITEABLE	0x800	/* Single bits can be flipped */
@@ -275,4 +275,9 @@
 	MTD_FILE_MODE_RAW,
 };
 
+static inline int mtd_type_is_nand_user(const struct mtd_info_user *mtd)
+{
+	return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
 #endif /* __MTD_ABI_H__ */
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 0f5a2fc..c79f381 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -31,39 +31,20 @@
 #define ATMEL_LCDC_WIRING_BGR	0
 #define ATMEL_LCDC_WIRING_RGB	1
 
-struct atmel_lcdfb_config;
 
  /* LCD Controller info data structure, stored in device platform_data */
-struct atmel_lcdfb_info {
-	spinlock_t		lock;
-	struct fb_info		*info;
-	void __iomem		*mmio;
-	int			irq_base;
-	struct work_struct	task;
-
+struct atmel_lcdfb_pdata {
 	unsigned int		guard_time;
-	unsigned int 		smem_len;
-	struct platform_device	*pdev;
-	struct clk		*bus_clk;
-	struct clk		*lcdc_clk;
-
-#ifdef CONFIG_BACKLIGHT_ATMEL_LCDC
-	struct backlight_device	*backlight;
-	u8			bl_power;
-#endif
 	bool			lcdcon_is_backlight;
 	bool			lcdcon_pol_negative;
-	u8			saved_lcdcon;
-
 	u8			default_bpp;
 	u8			lcd_wiring_mode;
 	unsigned int		default_lcdcon2;
 	unsigned int		default_dmacon;
-	void (*atmel_lcdfb_power_control)(int on);
+	void (*atmel_lcdfb_power_control)(struct atmel_lcdfb_pdata *pdata, int on);
 	struct fb_monspecs	*default_monspecs;
-	u32			pseudo_palette[16];
 
-	struct atmel_lcdfb_config *config;
+	struct list_head	pwr_gpios;
 };
 
 #define ATMEL_LCDC_DMABADDR1	0x00
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index b9dd1fb..9fd9398 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -91,6 +91,11 @@
 	u16	up_crop;
 	u16	bottom_crop;
 	int	pix_fmt;
+	/*
+	 * pitch[0]: graphics/video layer line length or y pitch
+	 * pitch[1]/pitch[2]: video u/v pitch if non-zero
+	 */
+	u32	pitch[3];
 };
 
 struct mmp_addr {
@@ -334,6 +339,7 @@
 	int output_type;
 	u32 path_config;
 	u32 link_config;
+	u32 dsi_rbswap;
 };
 
 struct mmp_mach_plat_info {
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
index f7ac8d9..69279c0 100644
--- a/include/video/omap-panel-data.h
+++ b/include/video/omap-panel-data.h
@@ -238,4 +238,17 @@
 	int qvga_gpio;
 };
 
+/**
+ * panel-tpo-td028ttec1 platform data
+ * @name: name for display entity
+ * @source: name of the display entity used as a video source
+ * @data_lines: number of DPI datalines
+ */
+struct panel_tpo_td028ttec1_platform_data {
+	const char *name;
+	const char *source;
+
+	int data_lines;
+};
+
 #endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index eb262e3..c50061d 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -51,6 +51,20 @@
  */
 
 /*
+ * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
+ * offload off or on. If it is missing then the feature is assumed to be on.
+ * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
+ * offload on or off. If it is missing then the feature is assumed to be off.
+ */
+
+/*
+ * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to
+ * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither
+ * frontends nor backends are assumed to be capable unless the flags are
+ * present.
+ */
+
+/*
  * This is the 'wire' format for packets:
  *  Request 1: xen_netif_tx_request  -- XEN_NETTXF_* (any flags)
  * [Request 2: xen_netif_extra_info]    (only if request 1 has XEN_NETTXF_extra_info)
@@ -95,8 +109,10 @@
 #define _XEN_NETIF_EXTRA_FLAG_MORE	(0)
 #define  XEN_NETIF_EXTRA_FLAG_MORE	(1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
 
-/* GSO types - only TCPv4 currently supported. */
+/* GSO types */
+#define XEN_NETIF_GSO_TYPE_NONE		(0)
 #define XEN_NETIF_GSO_TYPE_TCPV4	(1)
+#define XEN_NETIF_GSO_TYPE_TCPV6	(2)
 
 /*
  * This structure needs to fit within both netif_tx_request and
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 7000bb1..42721d1 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -231,6 +231,17 @@
 #define XEN_PCI_DEV_VIRTFN             0x2
 #define XEN_PCI_DEV_PXM                0x4
 
+#define XEN_PCI_MMCFG_RESERVED         0x1
+
+#define PHYSDEVOP_pci_mmcfg_reserved    24
+struct physdev_pci_mmcfg_reserved {
+    uint64_t address;
+    uint16_t segment;
+    uint8_t start_bus;
+    uint8_t end_bus;
+    uint32_t flags;
+};
+
 #define PHYSDEVOP_pci_device_add        25
 struct physdev_pci_device_add {
     /* IN */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index de8bcc6..8b2eb93 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -1,6 +1,7 @@
 #ifndef __LINUX_SWIOTLB_XEN_H
 #define __LINUX_SWIOTLB_XEN_H
 
+#include <linux/dma-direction.h>
 #include <linux/swiotlb.h>
 
 extern int xen_swiotlb_init(int verbose, bool early);
@@ -55,4 +56,6 @@
 extern int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
 
+extern int
+xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
 #endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index d6fe062..fb2ea8f 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -19,10 +19,11 @@
 int xen_setup_shutdown_event(void);
 
 extern unsigned long *xen_contiguous_bitmap;
-int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
-				unsigned int address_bits);
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+				unsigned int address_bits,
+				dma_addr_t *dma_handle);
 
-void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
 
 struct vm_area_struct;
 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
diff --git a/init/Kconfig b/init/Kconfig
index bc8911f..2d60611 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -118,7 +118,6 @@
 choice
 	prompt "Kernel compression mode"
 	default KERNEL_GZIP
-	depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4
 	help
 	  The linux kernel is a kind of self-extracting executable.
 	  Several compression algorithms are available, which differ
@@ -137,6 +136,13 @@
 
 	  If in doubt, select 'gzip'
 
+config KERNEL_UNCOMPRESSED
+	bool "No compression"
+	help
+	  No compression at all. The kernel is huge but the compression and
+	  decompression times are zero.
+	  This is usually not what you want.
+
 config KERNEL_GZIP
 	bool "Gzip"
 	depends on HAVE_KERNEL_GZIP
@@ -845,7 +851,7 @@
 	default y
 	depends on NUMA_BALANCING
 	help
-	  If set, autonumic NUMA balancing will be enabled if running on a NUMA
+	  If set, automatic NUMA balancing will be enabled if running on a NUMA
 	  machine.
 
 config NUMA_BALANCING
@@ -856,7 +862,7 @@
 	help
 	  This option adds support for automatic NUMA aware memory/task placement.
 	  The mechanism is quite primitive and is based on migrating memory when
-	  it is references to the node the task is running on.
+	  it has references to the node the task is running on.
 
 	  This system will be inactive on UMA systems.
 
diff --git a/init/do_mounts.c b/init/do_mounts.c
index a51cddc..8e5addc 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -197,6 +197,8 @@
  *	   is a zero-filled hex representation of the 1-based partition number.
  *	7) PARTUUID=<UUID>/PARTNROFF=<int> to select a partition in relation to
  *	   a partition with a known unique id.
+ *	8) <major>:<minor> major and minor number of the device separated by
+ *	   a colon.
  *
  *	If name doesn't have fall into the categories above, we return (0,0).
  *	block_class is used to check if something is a disk name. If the disk
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 6be2879..7c098ac 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -57,6 +57,11 @@
  *	cramfs
  *	squashfs
  *	gzip
+ *	bzip2
+ *	lzma
+ *	xz
+ *	lzo
+ *	lz4
  */
 static int __init
 identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
@@ -342,6 +347,13 @@
 	int result;
 	crd_infd = in_fd;
 	crd_outfd = out_fd;
+
+	if (!deco) {
+		pr_emerg("Invalid ramdisk decompression routine.  "
+			 "Select appropriate config option.\n");
+		panic("Could not decompress initial ramdisk image.");
+	}
+
 	result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error);
 	if (decompress_error)
 		result = 1;
diff --git a/init/main.c b/init/main.c
index 379090f..01573fd 100644
--- a/init/main.c
+++ b/init/main.c
@@ -124,7 +124,6 @@
 extern void time_init(void);
 /* Default late time init is NULL. archs can override this later. */
 void (*__initdata late_time_init)(void);
-extern void softirq_init(void);
 
 /* Untouched command line saved by arch-specific code. */
 char __initdata boot_command_line[COMMAND_LINE_SIZE];
@@ -132,11 +131,20 @@
 char *saved_command_line;
 /* Command line for parameter parsing */
 static char *static_command_line;
+/* Command line for per-initcall parameter parsing */
+static char *initcall_command_line;
 
 static char *execute_command;
 static char *ramdisk_execute_command;
 
 /*
+ * Used to generate warnings if static_key manipulation functions are used
+ * before jump_label_init is called.
+ */
+bool static_key_initialized __read_mostly = false;
+EXPORT_SYMBOL_GPL(static_key_initialized);
+
+/*
  * If set, this is an indication to the drivers that reset the underlying
  * device before going ahead with the initialization otherwise driver might
  * rely on the BIOS and skip the reset operation.
@@ -348,6 +356,7 @@
 static void __init setup_command_line(char *command_line)
 {
 	saved_command_line = alloc_bootmem(strlen (boot_command_line)+1);
+	initcall_command_line = alloc_bootmem(strlen (boot_command_line)+1);
 	static_command_line = alloc_bootmem(strlen (command_line)+1);
 	strcpy (saved_command_line, boot_command_line);
 	strcpy (static_command_line, command_line);
@@ -467,7 +476,7 @@
 	mem_init();
 	kmem_cache_init();
 	percpu_init_late();
-	pgtable_cache_init();
+	pgtable_init();
 	vmalloc_init();
 }
 
@@ -745,9 +754,9 @@
 	extern const struct kernel_param __start___param[], __stop___param[];
 	initcall_t *fn;
 
-	strcpy(static_command_line, saved_command_line);
+	strcpy(initcall_command_line, saved_command_line);
 	parse_args(initcall_level_names[level],
-		   static_command_line, __start___param,
+		   initcall_command_line, __start___param,
 		   __stop___param - __start___param,
 		   level, level,
 		   &repair_env_string);
@@ -811,10 +820,26 @@
 		(const char __user *const __user *)envp_init);
 }
 
+static int try_to_run_init_process(const char *init_filename)
+{
+	int ret;
+
+	ret = run_init_process(init_filename);
+
+	if (ret && ret != -ENOENT) {
+		pr_err("Starting init: %s exists but couldn't execute it (error %d)\n",
+		       init_filename, ret);
+	}
+
+	return ret;
+}
+
 static noinline void __init kernel_init_freeable(void);
 
 static int __ref kernel_init(void *unused)
 {
+	int ret;
+
 	kernel_init_freeable();
 	/* need to finish all async __init code before freeing the memory */
 	async_synchronize_full();
@@ -826,9 +851,11 @@
 	flush_delayed_fput();
 
 	if (ramdisk_execute_command) {
-		if (!run_init_process(ramdisk_execute_command))
+		ret = run_init_process(ramdisk_execute_command);
+		if (!ret)
 			return 0;
-		pr_err("Failed to execute %s\n", ramdisk_execute_command);
+		pr_err("Failed to execute %s (error %d)\n",
+		       ramdisk_execute_command, ret);
 	}
 
 	/*
@@ -838,18 +865,19 @@
 	 * trying to recover a really broken machine.
 	 */
 	if (execute_command) {
-		if (!run_init_process(execute_command))
+		ret = run_init_process(execute_command);
+		if (!ret)
 			return 0;
-		pr_err("Failed to execute %s.  Attempting defaults...\n",
-			execute_command);
+		pr_err("Failed to execute %s (error %d).  Attempting defaults...\n",
+			execute_command, ret);
 	}
-	if (!run_init_process("/sbin/init") ||
-	    !run_init_process("/etc/init") ||
-	    !run_init_process("/bin/init") ||
-	    !run_init_process("/bin/sh"))
+	if (!try_to_run_init_process("/sbin/init") ||
+	    !try_to_run_init_process("/etc/init") ||
+	    !try_to_run_init_process("/bin/init") ||
+	    !try_to_run_init_process("/bin/sh"))
 		return 0;
 
-	panic("No init found.  Try passing init= option to kernel. "
+	panic("No working init found.  Try passing init= option to kernel. "
 	      "See Linux Documentation/init.txt for guidance.");
 }
 
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index ae1996d..95827ce 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -886,7 +886,7 @@
 		err = -ENOENT;
 	} else {
 		ihold(inode);
-		err = vfs_unlink(dentry->d_parent->d_inode, dentry);
+		err = vfs_unlink(dentry->d_parent->d_inode, dentry, NULL);
 	}
 	dput(dentry);
 
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 491e71f..7e70959 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -41,15 +41,15 @@
 	/* the next part of the message follows immediately */
 };
 
-#define DATALEN_MSG	(int)(PAGE_SIZE-sizeof(struct msg_msg))
-#define DATALEN_SEG	(int)(PAGE_SIZE-sizeof(struct msg_msgseg))
+#define DATALEN_MSG	((size_t)PAGE_SIZE-sizeof(struct msg_msg))
+#define DATALEN_SEG	((size_t)PAGE_SIZE-sizeof(struct msg_msgseg))
 
 
-static struct msg_msg *alloc_msg(int len)
+static struct msg_msg *alloc_msg(size_t len)
 {
 	struct msg_msg *msg;
 	struct msg_msgseg **pseg;
-	int alen;
+	size_t alen;
 
 	alen = min(len, DATALEN_MSG);
 	msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
@@ -80,12 +80,12 @@
 	return NULL;
 }
 
-struct msg_msg *load_msg(const void __user *src, int len)
+struct msg_msg *load_msg(const void __user *src, size_t len)
 {
 	struct msg_msg *msg;
 	struct msg_msgseg *seg;
 	int err = -EFAULT;
-	int alen;
+	size_t alen;
 
 	msg = alloc_msg(len);
 	if (msg == NULL)
@@ -117,8 +117,8 @@
 struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst)
 {
 	struct msg_msgseg *dst_pseg, *src_pseg;
-	int len = src->m_ts;
-	int alen;
+	size_t len = src->m_ts;
+	size_t alen;
 
 	BUG_ON(dst == NULL);
 	if (src->m_ts > dst->m_ts)
@@ -147,9 +147,9 @@
 	return ERR_PTR(-ENOSYS);
 }
 #endif
-int store_msg(void __user *dest, struct msg_msg *msg, int len)
+int store_msg(void __user *dest, struct msg_msg *msg, size_t len)
 {
-	int alen;
+	size_t alen;
 	struct msg_msgseg *seg;
 
 	alen = min(len, DATALEN_MSG);
diff --git a/ipc/util.c b/ipc/util.c
index 7684f41..3ae17a4 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -90,10 +90,8 @@
 		 * In order not to keep the lock on the hotplug memory chain
 		 * for too long, queue a work item that will, when waken up,
 		 * activate the ipcns notification chain.
-		 * No need to keep several ipc work items on the queue.
 		 */
-		if (!work_pending(&ipc_memory_wq))
-			schedule_work(&ipc_memory_wq);
+		schedule_work(&ipc_memory_wq);
 		break;
 	case MEM_GOING_ONLINE:
 	case MEM_GOING_OFFLINE:
diff --git a/ipc/util.h b/ipc/util.h
index f2f5036..59d78aa 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -148,9 +148,9 @@
 #endif
 
 extern void free_msg(struct msg_msg *msg);
-extern struct msg_msg *load_msg(const void __user *src, int len);
+extern struct msg_msg *load_msg(const void __user *src, size_t len);
 extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst);
-extern int store_msg(void __user *dest, struct msg_msg *msg, int len);
+extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
 
 extern void recompute_msgmni(struct ipc_namespace *);
 
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 94fabd5..2a202a8 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -55,4 +55,4 @@
 	default 1000 if HZ_1000
 
 config SCHED_HRTICK
-	def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS)
+	def_bool HIGH_RES_TIMERS
diff --git a/kernel/Makefile b/kernel/Makefile
index a4d1aa8..09a9c94 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,22 +7,19 @@
 	    sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
 	    signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
 	    extable.o params.o posix-timers.o \
-	    kthread.o sys_ni.o posix-cpu-timers.o mutex.o \
-	    hrtimer.o rwsem.o nsproxy.o semaphore.o \
+	    kthread.o sys_ni.o posix-cpu-timers.o \
+	    hrtimer.o nsproxy.o \
 	    notifier.o ksysfs.o cred.o reboot.o \
-	    async.o range.o groups.o lglock.o smpboot.o
+	    async.o range.o groups.o smpboot.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
-CFLAGS_REMOVE_lockdep.o = -pg
-CFLAGS_REMOVE_lockdep_proc.o = -pg
-CFLAGS_REMOVE_mutex-debug.o = -pg
-CFLAGS_REMOVE_rtmutex-debug.o = -pg
 CFLAGS_REMOVE_cgroup-debug.o = -pg
 CFLAGS_REMOVE_irq_work.o = -pg
 endif
 
 obj-y += sched/
+obj-y += locking/
 obj-y += power/
 obj-y += printk/
 obj-y += cpu/
@@ -34,26 +31,15 @@
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
 obj-y += time/
-obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
-obj-$(CONFIG_LOCKDEP) += lockdep.o
-ifeq ($(CONFIG_PROC_FS),y)
-obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-endif
 obj-$(CONFIG_FUTEX) += futex.o
 ifeq ($(CONFIG_COMPAT),y)
 obj-$(CONFIG_FUTEX) += futex_compat.o
 endif
-obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
-obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
-obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_SMP) += smp.o
 ifneq ($(CONFIG_SMP),y)
 obj-y += up.o
 endif
-obj-$(CONFIG_SMP) += spinlock.o
-obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
-obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
 obj-$(CONFIG_UID16) += uid16.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o
diff --git a/kernel/bounds.c b/kernel/bounds.c
index e8ca97b..578782e 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -11,6 +11,7 @@
 #include <linux/kbuild.h>
 #include <linux/page_cgroup.h>
 #include <linux/log2.h>
+#include <linux/spinlock.h>
 
 void foo(void)
 {
@@ -21,5 +22,6 @@
 #ifdef CONFIG_SMP
 	DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
 #endif
+	DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int));
 	/* End of constants */
 }
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8bd9cfd..e0839bcd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -125,38 +125,6 @@
 };
 
 /*
- * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
- * cgroup_subsys->use_id != 0.
- */
-#define CSS_ID_MAX	(65535)
-struct css_id {
-	/*
-	 * The css to which this ID points. This pointer is set to valid value
-	 * after cgroup is populated. If cgroup is removed, this will be NULL.
-	 * This pointer is expected to be RCU-safe because destroy()
-	 * is called after synchronize_rcu(). But for safe use, css_tryget()
-	 * should be used for avoiding race.
-	 */
-	struct cgroup_subsys_state __rcu *css;
-	/*
-	 * ID of this css.
-	 */
-	unsigned short id;
-	/*
-	 * Depth in hierarchy which this ID belongs to.
-	 */
-	unsigned short depth;
-	/*
-	 * ID is freed by RCU. (and lookup routine is RCU safe.)
-	 */
-	struct rcu_head rcu_head;
-	/*
-	 * Hierarchy of CSS ID belongs to.
-	 */
-	unsigned short stack[0]; /* Array of Length (depth+1) */
-};
-
-/*
  * cgroup_event represents events which userspace want to receive.
  */
 struct cgroup_event {
@@ -387,9 +355,6 @@
 static struct css_set init_css_set;
 static struct cgrp_cset_link init_cgrp_cset_link;
 
-static int cgroup_init_idr(struct cgroup_subsys *ss,
-			   struct cgroup_subsys_state *css);
-
 /*
  * css_set_lock protects the list of css_set objects, and the chain of
  * tasks off each css_set.  Nests outside task->alloc_lock due to
@@ -841,8 +806,6 @@
 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
-static int alloc_css_id(struct cgroup_subsys_state *child_css);
-
 static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
 {
 	struct inode *inode = new_inode(sb);
@@ -4240,21 +4203,6 @@
 				goto err;
 		}
 	}
-
-	/* This cgroup is ready now */
-	for_each_root_subsys(cgrp->root, ss) {
-		struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
-		struct css_id *id = rcu_dereference_protected(css->id, true);
-
-		/*
-		 * Update id->css pointer and make this css visible from
-		 * CSS ID functions. This pointer will be dereferened
-		 * from RCU-read-side without locks.
-		 */
-		if (id)
-			rcu_assign_pointer(id->css, css);
-	}
-
 	return 0;
 err:
 	cgroup_clear_dir(cgrp, subsys_mask);
@@ -4323,7 +4271,6 @@
 	css->cgroup = cgrp;
 	css->ss = ss;
 	css->flags = 0;
-	css->id = NULL;
 
 	if (cgrp->parent)
 		css->parent = cgroup_css(cgrp->parent, ss);
@@ -4455,12 +4402,6 @@
 			goto err_free_all;
 
 		init_css(css, ss, cgrp);
-
-		if (ss->use_id) {
-			err = alloc_css_id(css);
-			if (err)
-				goto err_free_all;
-		}
 	}
 
 	/*
@@ -4925,12 +4866,6 @@
 
 	/* our new subsystem will be attached to the dummy hierarchy. */
 	init_css(css, ss, cgroup_dummy_top);
-	/* init_idr must be after init_css() because it sets css->id. */
-	if (ss->use_id) {
-		ret = cgroup_init_idr(ss, css);
-		if (ret)
-			goto err_unload;
-	}
 
 	/*
 	 * Now we need to entangle the css into the existing css_sets. unlike
@@ -4996,9 +4931,6 @@
 
 	offline_css(cgroup_css(cgroup_dummy_top, ss));
 
-	if (ss->use_id)
-		idr_destroy(&ss->idr);
-
 	/* deassign the subsys_id */
 	cgroup_subsys[ss->subsys_id] = NULL;
 
@@ -5025,8 +4957,7 @@
 	/*
 	 * remove subsystem's css from the cgroup_dummy_top and free it -
 	 * need to free before marking as null because ss->css_free needs
-	 * the cgrp->subsys pointer to find their state. note that this
-	 * also takes care of freeing the css_id.
+	 * the cgrp->subsys pointer to find their state.
 	 */
 	ss->css_free(cgroup_css(cgroup_dummy_top, ss));
 	RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
@@ -5097,8 +5028,6 @@
 	for_each_builtin_subsys(ss, i) {
 		if (!ss->early_init)
 			cgroup_init_subsys(ss);
-		if (ss->use_id)
-			cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
 	}
 
 	/* allocate id for the dummy hierarchy */
@@ -5518,181 +5447,6 @@
 }
 __setup("cgroup_disable=", cgroup_disable);
 
-/*
- * Functons for CSS ID.
- */
-
-/* to get ID other than 0, this should be called when !cgroup_is_dead() */
-unsigned short css_id(struct cgroup_subsys_state *css)
-{
-	struct css_id *cssid;
-
-	/*
-	 * This css_id() can return correct value when somone has refcnt
-	 * on this or this is under rcu_read_lock(). Once css->id is allocated,
-	 * it's unchanged until freed.
-	 */
-	cssid = rcu_dereference_raw(css->id);
-
-	if (cssid)
-		return cssid->id;
-	return 0;
-}
-EXPORT_SYMBOL_GPL(css_id);
-
-/**
- *  css_is_ancestor - test "root" css is an ancestor of "child"
- * @child: the css to be tested.
- * @root: the css supporsed to be an ancestor of the child.
- *
- * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
- * this function reads css->id, the caller must hold rcu_read_lock().
- * But, considering usual usage, the csses should be valid objects after test.
- * Assuming that the caller will do some action to the child if this returns
- * returns true, the caller must take "child";s reference count.
- * If "child" is valid object and this returns true, "root" is valid, too.
- */
-
-bool css_is_ancestor(struct cgroup_subsys_state *child,
-		    const struct cgroup_subsys_state *root)
-{
-	struct css_id *child_id;
-	struct css_id *root_id;
-
-	child_id  = rcu_dereference(child->id);
-	if (!child_id)
-		return false;
-	root_id = rcu_dereference(root->id);
-	if (!root_id)
-		return false;
-	if (child_id->depth < root_id->depth)
-		return false;
-	if (child_id->stack[root_id->depth] != root_id->id)
-		return false;
-	return true;
-}
-
-void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
-{
-	struct css_id *id = rcu_dereference_protected(css->id, true);
-
-	/* When this is called before css_id initialization, id can be NULL */
-	if (!id)
-		return;
-
-	BUG_ON(!ss->use_id);
-
-	rcu_assign_pointer(id->css, NULL);
-	rcu_assign_pointer(css->id, NULL);
-	spin_lock(&ss->id_lock);
-	idr_remove(&ss->idr, id->id);
-	spin_unlock(&ss->id_lock);
-	kfree_rcu(id, rcu_head);
-}
-EXPORT_SYMBOL_GPL(free_css_id);
-
-/*
- * This is called by init or create(). Then, calls to this function are
- * always serialized (By cgroup_mutex() at create()).
- */
-
-static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
-{
-	struct css_id *newid;
-	int ret, size;
-
-	BUG_ON(!ss->use_id);
-
-	size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
-	newid = kzalloc(size, GFP_KERNEL);
-	if (!newid)
-		return ERR_PTR(-ENOMEM);
-
-	idr_preload(GFP_KERNEL);
-	spin_lock(&ss->id_lock);
-	/* Don't use 0. allocates an ID of 1-65535 */
-	ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
-	spin_unlock(&ss->id_lock);
-	idr_preload_end();
-
-	/* Returns error when there are no free spaces for new ID.*/
-	if (ret < 0)
-		goto err_out;
-
-	newid->id = ret;
-	newid->depth = depth;
-	return newid;
-err_out:
-	kfree(newid);
-	return ERR_PTR(ret);
-
-}
-
-static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
-					    struct cgroup_subsys_state *rootcss)
-{
-	struct css_id *newid;
-
-	spin_lock_init(&ss->id_lock);
-	idr_init(&ss->idr);
-
-	newid = get_new_cssid(ss, 0);
-	if (IS_ERR(newid))
-		return PTR_ERR(newid);
-
-	newid->stack[0] = newid->id;
-	RCU_INIT_POINTER(newid->css, rootcss);
-	RCU_INIT_POINTER(rootcss->id, newid);
-	return 0;
-}
-
-static int alloc_css_id(struct cgroup_subsys_state *child_css)
-{
-	struct cgroup_subsys_state *parent_css = css_parent(child_css);
-	struct css_id *child_id, *parent_id;
-	int i, depth;
-
-	parent_id = rcu_dereference_protected(parent_css->id, true);
-	depth = parent_id->depth + 1;
-
-	child_id = get_new_cssid(child_css->ss, depth);
-	if (IS_ERR(child_id))
-		return PTR_ERR(child_id);
-
-	for (i = 0; i < depth; i++)
-		child_id->stack[i] = parent_id->stack[i];
-	child_id->stack[depth] = child_id->id;
-	/*
-	 * child_id->css pointer will be set after this cgroup is available
-	 * see cgroup_populate_dir()
-	 */
-	rcu_assign_pointer(child_css->id, child_id);
-
-	return 0;
-}
-
-/**
- * css_lookup - lookup css by id
- * @ss: cgroup subsys to be looked into.
- * @id: the id
- *
- * Returns pointer to cgroup_subsys_state if there is valid one with id.
- * NULL if not. Should be called under rcu_read_lock()
- */
-struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
-{
-	struct css_id *cssid = NULL;
-
-	BUG_ON(!ss->use_id);
-	cssid = idr_find(&ss->idr, id);
-
-	if (unlikely(!cssid))
-		return NULL;
-
-	return rcu_dereference(cssid->css);
-}
-EXPORT_SYMBOL_GPL(css_lookup);
-
 /**
  * css_from_dir - get corresponding css from the dentry of a cgroup dir
  * @dentry: directory dentry of interest
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 63aa50d..deff2e6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -306,7 +306,6 @@
 				__func__, cpu);
 		goto out_release;
 	}
-	smpboot_park_threads(cpu);
 
 	/*
 	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
@@ -315,12 +314,16 @@
 	 *
 	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
 	 * not imply sync_sched(), so explicitly call both.
+	 *
+	 * Do sync before park smpboot threads to take care the rcu boost case.
 	 */
 #ifdef CONFIG_PREEMPT
 	synchronize_sched();
 #endif
 	synchronize_rcu();
 
+	smpboot_park_threads(cpu);
+
 	/*
 	 * So now all preempt/rcu users must observe !cpu_active().
 	 */
@@ -437,11 +440,6 @@
 {
 	int err = 0;
 
-#ifdef	CONFIG_MEMORY_HOTPLUG
-	int nid;
-	pg_data_t	*pgdat;
-#endif
-
 	if (!cpu_possible(cpu)) {
 		printk(KERN_ERR "can't online cpu %d because it is not "
 			"configured as may-hotadd at boot time\n", cpu);
@@ -452,27 +450,9 @@
 		return -EINVAL;
 	}
 
-#ifdef	CONFIG_MEMORY_HOTPLUG
-	nid = cpu_to_node(cpu);
-	if (!node_online(nid)) {
-		err = mem_online_node(nid);
-		if (err)
-			return err;
-	}
-
-	pgdat = NODE_DATA(nid);
-	if (!pgdat) {
-		printk(KERN_ERR
-			"Can't online cpu %d due to NULL pgdat\n", cpu);
-		return -ENOMEM;
-	}
-
-	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
-		mutex_lock(&zonelists_mutex);
-		build_all_zonelists(NULL, NULL);
-		mutex_unlock(&zonelists_mutex);
-	}
-#endif
+	err = try_online_node(cpu_to_node(cpu));
+	if (err)
+		return err;
 
 	cpu_maps_update_begin();
 
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index d473988..54996b7 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -108,12 +108,6 @@
 	struct timespec ts;
 	cputime_t utime, stime, stimescaled, utimescaled;
 
-	/* Though tsk->delays accessed later, early exit avoids
-	 * unnecessary returning of other data
-	 */
-	if (!tsk->delays)
-		goto done;
-
 	tmp = (s64)d->cpu_run_real_total;
 	task_cputime(tsk, &utime, &stime);
 	cputime_to_timespec(utime + stime, &ts);
@@ -158,7 +152,6 @@
 	d->freepages_count += tsk->delays->freepages_count;
 	spin_unlock_irqrestore(&tsk->delays->lock, flags);
 
-done:
 	return 0;
 }
 
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index ff915ef..e556751 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -1,23 +1,19 @@
 #include <linux/elf.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
-
-#include <asm/elf.h>
-
+#include <linux/binfmts.h>
 
 Elf_Half __weak elf_core_extra_phdrs(void)
 {
 	return 0;
 }
 
-int __weak elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-				      unsigned long limit)
+int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
 	return 1;
 }
 
-int __weak elf_core_write_extra_data(struct file *file, size_t *size,
-				     unsigned long limit)
+int __weak elf_core_write_extra_data(struct coredump_params *cprm)
 {
 	return 1;
 }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8c875ef..d724e77 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2234,9 +2234,6 @@
 	perf_event_update_userpage(next_event);
 }
 
-#define list_next_entry(pos, member) \
-	list_entry(pos->member.next, typeof(*pos), member)
-
 static void perf_event_sync_stat(struct perf_event_context *ctx,
 				   struct perf_event_context *next_ctx)
 {
diff --git a/kernel/fork.c b/kernel/fork.c
index f6d11fc..728d5be 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -532,7 +532,7 @@
 	mm->flags = (current->mm) ?
 		(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
 	mm->core_state = NULL;
-	mm->nr_ptes = 0;
+	atomic_long_set(&mm->nr_ptes, 0);
 	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
 	spin_lock_init(&mm->page_table_lock);
 	mm_init_aio(mm);
@@ -560,7 +560,7 @@
 					  "mm:%p idx:%d val:%ld\n", mm, i, x);
 	}
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 	VM_BUG_ON(mm->pmd_huge_pte);
 #endif
 }
@@ -814,7 +814,7 @@
 	memcpy(mm, oldmm, sizeof(*mm));
 	mm_init_cpumask(mm);
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 	mm->pmd_huge_pte = NULL;
 #endif
 	if (!mm_init(mm, tsk))
diff --git a/kernel/futex.c b/kernel/futex.c
index c3a1a55..80ba086 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -66,7 +66,7 @@
 
 #include <asm/futex.h>
 
-#include "rtmutex_common.h"
+#include "locking/rtmutex_common.h"
 
 int __read_mostly futex_cmpxchg_enabled;
 
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index d4da55d..d04ce8a 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -46,4 +46,34 @@
 	larger and run slower. Also be sure to exclude files from profiling
 	which are not linked to the kernel image to prevent linker errors.
 
+choice
+	prompt "Specify GCOV format"
+	depends on GCOV_KERNEL
+	default GCOV_FORMAT_AUTODETECT
+	---help---
+	The gcov format is usually determined by the GCC version, but there are
+	exceptions where format changes are integrated in lower-version GCCs.
+	In such a case use this option to adjust the format used in the kernel
+	accordingly.
+
+	If unsure, choose "Autodetect".
+
+config GCOV_FORMAT_AUTODETECT
+	bool "Autodetect"
+	---help---
+	Select this option to use the format that corresponds to your GCC
+	version.
+
+config GCOV_FORMAT_3_4
+	bool "GCC 3.4 format"
+	---help---
+	Select this option to use the format defined by GCC 3.4.
+
+config GCOV_FORMAT_4_7
+	bool "GCC 4.7 format"
+	---help---
+	Select this option to use the format defined by GCC 4.7.
+
+endchoice
+
 endmenu
diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile
index e97ca59..52aa7e8 100644
--- a/kernel/gcov/Makefile
+++ b/kernel/gcov/Makefile
@@ -1,3 +1,33 @@
 ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
 
-obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o gcc_3_4.o
+# if-lt
+# Usage VAR := $(call if-lt, $(a), $(b))
+# Returns 1 if (a < b)
+if-lt = $(shell [ $(1) -lt $(2) ] && echo 1)
+
+ifeq ($(CONFIG_GCOV_FORMAT_3_4),y)
+  cc-ver := 0304
+else ifeq ($(CONFIG_GCOV_FORMAT_4_7),y)
+  cc-ver := 0407
+else
+# Use cc-version if available, otherwise set 0
+#
+# scripts/Kbuild.include, which contains cc-version function, is not included
+# during make clean "make -f scripts/Makefile.clean obj=kernel/gcov"
+# Meaning cc-ver is empty causing if-lt test to fail with
+# "/bin/sh: line 0: [: -lt: unary operator expected" error mesage.
+# This has no affect on the clean phase, but the error message could be
+# confusing/annoying. So this dummy workaround sets cc-ver to zero if cc-version
+# is not available. We can probably move if-lt to Kbuild.include, so it's also
+# not defined during clean or to include Kbuild.include in
+# scripts/Makefile.clean. But the following workaround seems least invasive.
+  cc-ver := $(if $(call cc-version),$(call cc-version),0)
+endif
+
+obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o
+
+ifeq ($(call if-lt, $(cc-ver), 0407),1)
+  obj-$(CONFIG_GCOV_KERNEL) += gcc_3_4.o
+else
+  obj-$(CONFIG_GCOV_KERNEL) += gcc_4_7.o
+endif
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index 9b22d03..f45b75b 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -20,7 +20,6 @@
 #include <linux/mutex.h>
 #include "gcov.h"
 
-static struct gcov_info *gcov_info_head;
 static int gcov_events_enabled;
 static DEFINE_MUTEX(gcov_lock);
 
@@ -34,7 +33,7 @@
 
 	mutex_lock(&gcov_lock);
 	if (gcov_version == 0) {
-		gcov_version = info->version;
+		gcov_version = gcov_info_version(info);
 		/*
 		 * Printing gcc's version magic may prove useful for debugging
 		 * incompatibility reports.
@@ -45,8 +44,7 @@
 	 * Add new profiling data structure to list and inform event
 	 * listener.
 	 */
-	info->next = gcov_info_head;
-	gcov_info_head = info;
+	gcov_info_link(info);
 	if (gcov_events_enabled)
 		gcov_event(GCOV_ADD, info);
 	mutex_unlock(&gcov_lock);
@@ -81,6 +79,12 @@
 }
 EXPORT_SYMBOL(__gcov_merge_delta);
 
+void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_ior);
+
 /**
  * gcov_enable_events - enable event reporting through gcov_event()
  *
@@ -91,13 +95,15 @@
  */
 void gcov_enable_events(void)
 {
-	struct gcov_info *info;
+	struct gcov_info *info = NULL;
 
 	mutex_lock(&gcov_lock);
 	gcov_events_enabled = 1;
+
 	/* Perform event callback for previously registered entries. */
-	for (info = gcov_info_head; info; info = info->next)
+	while ((info = gcov_info_next(info)))
 		gcov_event(GCOV_ADD, info);
+
 	mutex_unlock(&gcov_lock);
 }
 
@@ -112,25 +118,23 @@
 				void *data)
 {
 	struct module *mod = data;
-	struct gcov_info *info;
-	struct gcov_info *prev;
+	struct gcov_info *info = NULL;
+	struct gcov_info *prev = NULL;
 
 	if (event != MODULE_STATE_GOING)
 		return NOTIFY_OK;
 	mutex_lock(&gcov_lock);
-	prev = NULL;
+
 	/* Remove entries located in module from linked list. */
-	for (info = gcov_info_head; info; info = info->next) {
+	while ((info = gcov_info_next(info))) {
 		if (within(info, mod->module_core, mod->core_size)) {
-			if (prev)
-				prev->next = info->next;
-			else
-				gcov_info_head = info->next;
+			gcov_info_unlink(prev, info);
 			if (gcov_events_enabled)
 				gcov_event(GCOV_REMOVE, info);
 		} else
 			prev = info;
 	}
+
 	mutex_unlock(&gcov_lock);
 
 	return NOTIFY_OK;
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index 7a7d2ee..15ff01a 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -75,7 +75,7 @@
 	unsigned long val;
 
 	if (kstrtoul(str, 0, &val)) {
-		pr_warning("invalid gcov_persist parameter '%s'\n", str);
+		pr_warn("invalid gcov_persist parameter '%s'\n", str);
 		return 0;
 	}
 	gcov_persist = val;
@@ -242,7 +242,7 @@
 
 	list_for_each_entry(node, &all_head, all) {
 		info = get_node_info(node);
-		if (info && (strcmp(info->filename, name) == 0))
+		if (info && (strcmp(gcov_info_filename(info), name) == 0))
 			return node;
 	}
 
@@ -279,7 +279,7 @@
 	seq = file->private_data;
 	info = gcov_iter_get_info(seq->private);
 	mutex_lock(&node_lock);
-	node = get_node_by_name(info->filename);
+	node = get_node_by_name(gcov_info_filename(info));
 	if (node) {
 		/* Reset counts or remove node for unloaded modules. */
 		if (node->num_loaded == 0)
@@ -365,7 +365,7 @@
  */
 static void add_links(struct gcov_node *node, struct dentry *parent)
 {
-	char *basename;
+	const char *basename;
 	char *target;
 	int num;
 	int i;
@@ -376,14 +376,14 @@
 	if (!node->links)
 		return;
 	for (i = 0; i < num; i++) {
-		target = get_link_target(get_node_info(node)->filename,
-					 &gcov_link[i]);
+		target = get_link_target(
+				gcov_info_filename(get_node_info(node)),
+				&gcov_link[i]);
 		if (!target)
 			goto out_err;
-		basename = strrchr(target, '/');
-		if (!basename)
+		basename = kbasename(target);
+		if (basename == target)
 			goto out_err;
-		basename++;
 		node->links[i] = debugfs_create_symlink(deskew(basename),
 							parent,	target);
 		if (!node->links[i])
@@ -450,7 +450,7 @@
 	} else
 		node->dentry = debugfs_create_dir(node->name, parent->dentry);
 	if (!node->dentry) {
-		pr_warning("could not create file\n");
+		pr_warn("could not create file\n");
 		kfree(node);
 		return NULL;
 	}
@@ -463,7 +463,7 @@
 
 err_nomem:
 	kfree(node);
-	pr_warning("out of memory\n");
+	pr_warn("out of memory\n");
 	return NULL;
 }
 
@@ -576,7 +576,7 @@
 	struct gcov_node *parent;
 	struct gcov_node *node;
 
-	filename = kstrdup(info->filename, GFP_KERNEL);
+	filename = kstrdup(gcov_info_filename(info), GFP_KERNEL);
 	if (!filename)
 		return;
 	parent = &root_node;
@@ -630,8 +630,8 @@
 	 */
 	loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
 	if (!loaded_info) {
-		pr_warning("could not add '%s' (out of memory)\n",
-			   info->filename);
+		pr_warn("could not add '%s' (out of memory)\n",
+			gcov_info_filename(info));
 		return;
 	}
 	memcpy(loaded_info, node->loaded_info,
@@ -644,8 +644,9 @@
 		 * data set replaces the copy of the last one.
 		 */
 		if (!gcov_info_is_compatible(node->unloaded_info, info)) {
-			pr_warning("discarding saved data for %s "
-				   "(incompatible version)\n", info->filename);
+			pr_warn("discarding saved data for %s "
+				"(incompatible version)\n",
+				gcov_info_filename(info));
 			gcov_info_free(node->unloaded_info);
 			node->unloaded_info = NULL;
 		}
@@ -655,8 +656,8 @@
 		 * The initial one takes precedence.
 		 */
 		if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
-			pr_warning("could not add '%s' (incompatible "
-				   "version)\n", info->filename);
+			pr_warn("could not add '%s' (incompatible "
+				"version)\n", gcov_info_filename(info));
 			kfree(loaded_info);
 			return;
 		}
@@ -691,8 +692,9 @@
 	else {
 		node->unloaded_info = gcov_info_dup(info);
 		if (!node->unloaded_info) {
-			pr_warning("could not save data for '%s' "
-				   "(out of memory)\n", info->filename);
+			pr_warn("could not save data for '%s' "
+				"(out of memory)\n",
+				gcov_info_filename(info));
 		}
 	}
 }
@@ -707,8 +709,8 @@
 
 	i = get_info_index(node, info);
 	if (i < 0) {
-		pr_warning("could not remove '%s' (not found)\n",
-			   info->filename);
+		pr_warn("could not remove '%s' (not found)\n",
+			gcov_info_filename(info));
 		return;
 	}
 	if (gcov_persist)
@@ -735,7 +737,7 @@
 	struct gcov_node *node;
 
 	mutex_lock(&node_lock);
-	node = get_node_by_name(info->filename);
+	node = get_node_by_name(gcov_info_filename(info));
 	switch (action) {
 	case GCOV_ADD:
 		if (node)
@@ -747,8 +749,8 @@
 		if (node)
 			remove_info(node, info);
 		else {
-			pr_warning("could not remove '%s' (not found)\n",
-				   info->filename);
+			pr_warn("could not remove '%s' (not found)\n",
+				gcov_info_filename(info));
 		}
 		break;
 	}
diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c
index ae5bb42..27bc88a 100644
--- a/kernel/gcov/gcc_3_4.c
+++ b/kernel/gcov/gcc_3_4.c
@@ -21,6 +21,121 @@
 #include <linux/vmalloc.h>
 #include "gcov.h"
 
+#define GCOV_COUNTERS		5
+
+static struct gcov_info *gcov_info_head;
+
+/**
+ * struct gcov_fn_info - profiling meta data per function
+ * @ident: object file-unique function identifier
+ * @checksum: function checksum
+ * @n_ctrs: number of values per counter type belonging to this function
+ *
+ * This data is generated by gcc during compilation and doesn't change
+ * at run-time.
+ */
+struct gcov_fn_info {
+	unsigned int ident;
+	unsigned int checksum;
+	unsigned int n_ctrs[0];
+};
+
+/**
+ * struct gcov_ctr_info - profiling data per counter type
+ * @num: number of counter values for this type
+ * @values: array of counter values for this type
+ * @merge: merge function for counter values of this type (unused)
+ *
+ * This data is generated by gcc during compilation and doesn't change
+ * at run-time with the exception of the values array.
+ */
+struct gcov_ctr_info {
+	unsigned int	num;
+	gcov_type	*values;
+	void		(*merge)(gcov_type *, unsigned int);
+};
+
+/**
+ * struct gcov_info - profiling data per object file
+ * @version: gcov version magic indicating the gcc version used for compilation
+ * @next: list head for a singly-linked list
+ * @stamp: time stamp
+ * @filename: name of the associated gcov data file
+ * @n_functions: number of instrumented functions
+ * @functions: function data
+ * @ctr_mask: mask specifying which counter types are active
+ * @counts: counter data per counter type
+ *
+ * This data is generated by gcc during compilation and doesn't change
+ * at run-time with the exception of the next pointer.
+ */
+struct gcov_info {
+	unsigned int			version;
+	struct gcov_info		*next;
+	unsigned int			stamp;
+	const char			*filename;
+	unsigned int			n_functions;
+	const struct gcov_fn_info	*functions;
+	unsigned int			ctr_mask;
+	struct gcov_ctr_info		counts[0];
+};
+
+/**
+ * gcov_info_filename - return info filename
+ * @info: profiling data set
+ */
+const char *gcov_info_filename(struct gcov_info *info)
+{
+	return info->filename;
+}
+
+/**
+ * gcov_info_version - return info version
+ * @info: profiling data set
+ */
+unsigned int gcov_info_version(struct gcov_info *info)
+{
+	return info->version;
+}
+
+/**
+ * gcov_info_next - return next profiling data set
+ * @info: profiling data set
+ *
+ * Returns next gcov_info following @info or first gcov_info in the chain if
+ * @info is %NULL.
+ */
+struct gcov_info *gcov_info_next(struct gcov_info *info)
+{
+	if (!info)
+		return gcov_info_head;
+
+	return info->next;
+}
+
+/**
+ * gcov_info_link - link/add profiling data set to the list
+ * @info: profiling data set
+ */
+void gcov_info_link(struct gcov_info *info)
+{
+	info->next = gcov_info_head;
+	gcov_info_head = info;
+}
+
+/**
+ * gcov_info_unlink - unlink/remove profiling data set from the list
+ * @prev: previous profiling data set
+ * @info: profiling data set
+ */
+void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
+{
+	if (prev)
+		prev->next = info->next;
+	else
+		gcov_info_head = info->next;
+}
+
 /* Symbolic links to be created for each profiling data file. */
 const struct gcov_link gcov_link[] = {
 	{ OBJ_TREE, "gcno" },	/* Link to .gcno file in $(objtree). */
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
new file mode 100644
index 0000000..2c6e463
--- /dev/null
+++ b/kernel/gcov/gcc_4_7.c
@@ -0,0 +1,560 @@
+/*
+ *  This code provides functions to handle gcc's profiling data format
+ *  introduced with gcc 4.7.
+ *
+ *  This file is based heavily on gcc_3_4.c file.
+ *
+ *  For a better understanding, refer to gcc source:
+ *  gcc/gcov-io.h
+ *  libgcc/libgcov.c
+ *
+ *  Uses gcc-internal data definitions.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include "gcov.h"
+
+#define GCOV_COUNTERS			8
+#define GCOV_TAG_FUNCTION_LENGTH	3
+
+static struct gcov_info *gcov_info_head;
+
+/**
+ * struct gcov_ctr_info - information about counters for a single function
+ * @num: number of counter values for this type
+ * @values: array of counter values for this type
+ *
+ * This data is generated by gcc during compilation and doesn't change
+ * at run-time with the exception of the values array.
+ */
+struct gcov_ctr_info {
+	unsigned int num;
+	gcov_type *values;
+};
+
+/**
+ * struct gcov_fn_info - profiling meta data per function
+ * @key: comdat key
+ * @ident: unique ident of function
+ * @lineno_checksum: function lineo_checksum
+ * @cfg_checksum: function cfg checksum
+ * @ctrs: instrumented counters
+ *
+ * This data is generated by gcc during compilation and doesn't change
+ * at run-time.
+ *
+ * Information about a single function.  This uses the trailing array
+ * idiom. The number of counters is determined from the merge pointer
+ * array in gcov_info.  The key is used to detect which of a set of
+ * comdat functions was selected -- it points to the gcov_info object
+ * of the object file containing the selected comdat function.
+ */
+struct gcov_fn_info {
+	const struct gcov_info *key;
+	unsigned int ident;
+	unsigned int lineno_checksum;
+	unsigned int cfg_checksum;
+	struct gcov_ctr_info ctrs[0];
+};
+
+/**
+ * struct gcov_info - profiling data per object file
+ * @version: gcov version magic indicating the gcc version used for compilation
+ * @next: list head for a singly-linked list
+ * @stamp: uniquifying time stamp
+ * @filename: name of the associated gcov data file
+ * @merge: merge functions (null for unused counter type)
+ * @n_functions: number of instrumented functions
+ * @functions: pointer to pointers to function information
+ *
+ * This data is generated by gcc during compilation and doesn't change
+ * at run-time with the exception of the next pointer.
+ */
+struct gcov_info {
+	unsigned int version;
+	struct gcov_info *next;
+	unsigned int stamp;
+	const char *filename;
+	void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int);
+	unsigned int n_functions;
+	struct gcov_fn_info **functions;
+};
+
+/**
+ * gcov_info_filename - return info filename
+ * @info: profiling data set
+ */
+const char *gcov_info_filename(struct gcov_info *info)
+{
+	return info->filename;
+}
+
+/**
+ * gcov_info_version - return info version
+ * @info: profiling data set
+ */
+unsigned int gcov_info_version(struct gcov_info *info)
+{
+	return info->version;
+}
+
+/**
+ * gcov_info_next - return next profiling data set
+ * @info: profiling data set
+ *
+ * Returns next gcov_info following @info or first gcov_info in the chain if
+ * @info is %NULL.
+ */
+struct gcov_info *gcov_info_next(struct gcov_info *info)
+{
+	if (!info)
+		return gcov_info_head;
+
+	return info->next;
+}
+
+/**
+ * gcov_info_link - link/add profiling data set to the list
+ * @info: profiling data set
+ */
+void gcov_info_link(struct gcov_info *info)
+{
+	info->next = gcov_info_head;
+	gcov_info_head = info;
+}
+
+/**
+ * gcov_info_unlink - unlink/remove profiling data set from the list
+ * @prev: previous profiling data set
+ * @info: profiling data set
+ */
+void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
+{
+	if (prev)
+		prev->next = info->next;
+	else
+		gcov_info_head = info->next;
+}
+
+/* Symbolic links to be created for each profiling data file. */
+const struct gcov_link gcov_link[] = {
+	{ OBJ_TREE, "gcno" },	/* Link to .gcno file in $(objtree). */
+	{ 0, NULL},
+};
+
+/*
+ * Determine whether a counter is active. Doesn't change at run-time.
+ */
+static int counter_active(struct gcov_info *info, unsigned int type)
+{
+	return info->merge[type] ? 1 : 0;
+}
+
+/* Determine number of active counters. Based on gcc magic. */
+static unsigned int num_counter_active(struct gcov_info *info)
+{
+	unsigned int i;
+	unsigned int result = 0;
+
+	for (i = 0; i < GCOV_COUNTERS; i++) {
+		if (counter_active(info, i))
+			result++;
+	}
+	return result;
+}
+
+/**
+ * gcov_info_reset - reset profiling data to zero
+ * @info: profiling data set
+ */
+void gcov_info_reset(struct gcov_info *info)
+{
+	struct gcov_ctr_info *ci_ptr;
+	unsigned int fi_idx;
+	unsigned int ct_idx;
+
+	for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) {
+		ci_ptr = info->functions[fi_idx]->ctrs;
+
+		for (ct_idx = 0; ct_idx < GCOV_COUNTERS; ct_idx++) {
+			if (!counter_active(info, ct_idx))
+				continue;
+
+			memset(ci_ptr->values, 0,
+					sizeof(gcov_type) * ci_ptr->num);
+			ci_ptr++;
+		}
+	}
+}
+
+/**
+ * gcov_info_is_compatible - check if profiling data can be added
+ * @info1: first profiling data set
+ * @info2: second profiling data set
+ *
+ * Returns non-zero if profiling data can be added, zero otherwise.
+ */
+int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
+{
+	return (info1->stamp == info2->stamp);
+}
+
+/**
+ * gcov_info_add - add up profiling data
+ * @dest: profiling data set to which data is added
+ * @source: profiling data set which is added
+ *
+ * Adds profiling counts of @source to @dest.
+ */
+void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
+{
+	struct gcov_ctr_info *dci_ptr;
+	struct gcov_ctr_info *sci_ptr;
+	unsigned int fi_idx;
+	unsigned int ct_idx;
+	unsigned int val_idx;
+
+	for (fi_idx = 0; fi_idx < src->n_functions; fi_idx++) {
+		dci_ptr = dst->functions[fi_idx]->ctrs;
+		sci_ptr = src->functions[fi_idx]->ctrs;
+
+		for (ct_idx = 0; ct_idx < GCOV_COUNTERS; ct_idx++) {
+			if (!counter_active(src, ct_idx))
+				continue;
+
+			for (val_idx = 0; val_idx < sci_ptr->num; val_idx++)
+				dci_ptr->values[val_idx] +=
+					sci_ptr->values[val_idx];
+
+			dci_ptr++;
+			sci_ptr++;
+		}
+	}
+}
+
+/**
+ * gcov_info_dup - duplicate profiling data set
+ * @info: profiling data set to duplicate
+ *
+ * Return newly allocated duplicate on success, %NULL on error.
+ */
+struct gcov_info *gcov_info_dup(struct gcov_info *info)
+{
+	struct gcov_info *dup;
+	struct gcov_ctr_info *dci_ptr; /* dst counter info */
+	struct gcov_ctr_info *sci_ptr; /* src counter info */
+	unsigned int active;
+	unsigned int fi_idx; /* function info idx */
+	unsigned int ct_idx; /* counter type idx */
+	size_t fi_size; /* function info size */
+	size_t cv_size; /* counter values size */
+
+	dup = kmemdup(info, sizeof(*dup), GFP_KERNEL);
+	if (!dup)
+		return NULL;
+
+	dup->next = NULL;
+	dup->filename = NULL;
+	dup->functions = NULL;
+
+	dup->filename = kstrdup(info->filename, GFP_KERNEL);
+	if (!dup->filename)
+		goto err_free;
+
+	dup->functions = kcalloc(info->n_functions,
+				 sizeof(struct gcov_fn_info *), GFP_KERNEL);
+	if (!dup->functions)
+		goto err_free;
+
+	active = num_counter_active(info);
+	fi_size = sizeof(struct gcov_fn_info);
+	fi_size += sizeof(struct gcov_ctr_info) * active;
+
+	for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) {
+		dup->functions[fi_idx] = kzalloc(fi_size, GFP_KERNEL);
+		if (!dup->functions[fi_idx])
+			goto err_free;
+
+		*(dup->functions[fi_idx]) = *(info->functions[fi_idx]);
+
+		sci_ptr = info->functions[fi_idx]->ctrs;
+		dci_ptr = dup->functions[fi_idx]->ctrs;
+
+		for (ct_idx = 0; ct_idx < active; ct_idx++) {
+
+			cv_size = sizeof(gcov_type) * sci_ptr->num;
+
+			dci_ptr->values = vmalloc(cv_size);
+
+			if (!dci_ptr->values)
+				goto err_free;
+
+			dci_ptr->num = sci_ptr->num;
+			memcpy(dci_ptr->values, sci_ptr->values, cv_size);
+
+			sci_ptr++;
+			dci_ptr++;
+		}
+	}
+
+	return dup;
+err_free:
+	gcov_info_free(dup);
+	return NULL;
+}
+
+/**
+ * gcov_info_free - release memory for profiling data set duplicate
+ * @info: profiling data set duplicate to free
+ */
+void gcov_info_free(struct gcov_info *info)
+{
+	unsigned int active;
+	unsigned int fi_idx;
+	unsigned int ct_idx;
+	struct gcov_ctr_info *ci_ptr;
+
+	if (!info->functions)
+		goto free_info;
+
+	active = num_counter_active(info);
+
+	for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) {
+		if (!info->functions[fi_idx])
+			continue;
+
+		ci_ptr = info->functions[fi_idx]->ctrs;
+
+		for (ct_idx = 0; ct_idx < active; ct_idx++, ci_ptr++)
+			vfree(ci_ptr->values);
+
+		kfree(info->functions[fi_idx]);
+	}
+
+free_info:
+	kfree(info->functions);
+	kfree(info->filename);
+	kfree(info);
+}
+
+#define ITER_STRIDE	PAGE_SIZE
+
+/**
+ * struct gcov_iterator - specifies current file position in logical records
+ * @info: associated profiling data
+ * @buffer: buffer containing file data
+ * @size: size of buffer
+ * @pos: current position in file
+ */
+struct gcov_iterator {
+	struct gcov_info *info;
+	void *buffer;
+	size_t size;
+	loff_t pos;
+};
+
+/**
+ * store_gcov_u32 - store 32 bit number in gcov format to buffer
+ * @buffer: target buffer or NULL
+ * @off: offset into the buffer
+ * @v: value to be stored
+ *
+ * Number format defined by gcc: numbers are recorded in the 32 bit
+ * unsigned binary form of the endianness of the machine generating the
+ * file. Returns the number of bytes stored. If @buffer is %NULL, doesn't
+ * store anything.
+ */
+static size_t store_gcov_u32(void *buffer, size_t off, u32 v)
+{
+	u32 *data;
+
+	if (buffer) {
+		data = buffer + off;
+		*data = v;
+	}
+
+	return sizeof(*data);
+}
+
+/**
+ * store_gcov_u64 - store 64 bit number in gcov format to buffer
+ * @buffer: target buffer or NULL
+ * @off: offset into the buffer
+ * @v: value to be stored
+ *
+ * Number format defined by gcc: numbers are recorded in the 32 bit
+ * unsigned binary form of the endianness of the machine generating the
+ * file. 64 bit numbers are stored as two 32 bit numbers, the low part
+ * first. Returns the number of bytes stored. If @buffer is %NULL, doesn't store
+ * anything.
+ */
+static size_t store_gcov_u64(void *buffer, size_t off, u64 v)
+{
+	u32 *data;
+
+	if (buffer) {
+		data = buffer + off;
+
+		data[0] = (v & 0xffffffffUL);
+		data[1] = (v >> 32);
+	}
+
+	return sizeof(*data) * 2;
+}
+
+/**
+ * convert_to_gcda - convert profiling data set to gcda file format
+ * @buffer: the buffer to store file data or %NULL if no data should be stored
+ * @info: profiling data set to be converted
+ *
+ * Returns the number of bytes that were/would have been stored into the buffer.
+ */
+static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
+{
+	struct gcov_fn_info *fi_ptr;
+	struct gcov_ctr_info *ci_ptr;
+	unsigned int fi_idx;
+	unsigned int ct_idx;
+	unsigned int cv_idx;
+	size_t pos = 0;
+
+	/* File header. */
+	pos += store_gcov_u32(buffer, pos, GCOV_DATA_MAGIC);
+	pos += store_gcov_u32(buffer, pos, info->version);
+	pos += store_gcov_u32(buffer, pos, info->stamp);
+
+	for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) {
+		fi_ptr = info->functions[fi_idx];
+
+		/* Function record. */
+		pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
+		pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+
+		ci_ptr = fi_ptr->ctrs;
+
+		for (ct_idx = 0; ct_idx < GCOV_COUNTERS; ct_idx++) {
+			if (!counter_active(info, ct_idx))
+				continue;
+
+			/* Counter record. */
+			pos += store_gcov_u32(buffer, pos,
+					      GCOV_TAG_FOR_COUNTER(ct_idx));
+			pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2);
+
+			for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) {
+				pos += store_gcov_u64(buffer, pos,
+						      ci_ptr->values[cv_idx]);
+			}
+
+			ci_ptr++;
+		}
+	}
+
+	return pos;
+}
+
+/**
+ * gcov_iter_new - allocate and initialize profiling data iterator
+ * @info: profiling data set to be iterated
+ *
+ * Return file iterator on success, %NULL otherwise.
+ */
+struct gcov_iterator *gcov_iter_new(struct gcov_info *info)
+{
+	struct gcov_iterator *iter;
+
+	iter = kzalloc(sizeof(struct gcov_iterator), GFP_KERNEL);
+	if (!iter)
+		goto err_free;
+
+	iter->info = info;
+	/* Dry-run to get the actual buffer size. */
+	iter->size = convert_to_gcda(NULL, info);
+	iter->buffer = vmalloc(iter->size);
+	if (!iter->buffer)
+		goto err_free;
+
+	convert_to_gcda(iter->buffer, info);
+
+	return iter;
+
+err_free:
+	kfree(iter);
+	return NULL;
+}
+
+
+/**
+ * gcov_iter_get_info - return profiling data set for given file iterator
+ * @iter: file iterator
+ */
+void gcov_iter_free(struct gcov_iterator *iter)
+{
+	vfree(iter->buffer);
+	kfree(iter);
+}
+
+/**
+ * gcov_iter_get_info - return profiling data set for given file iterator
+ * @iter: file iterator
+ */
+struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
+{
+	return iter->info;
+}
+
+/**
+ * gcov_iter_start - reset file iterator to starting position
+ * @iter: file iterator
+ */
+void gcov_iter_start(struct gcov_iterator *iter)
+{
+	iter->pos = 0;
+}
+
+/**
+ * gcov_iter_next - advance file iterator to next logical record
+ * @iter: file iterator
+ *
+ * Return zero if new position is valid, non-zero if iterator has reached end.
+ */
+int gcov_iter_next(struct gcov_iterator *iter)
+{
+	if (iter->pos < iter->size)
+		iter->pos += ITER_STRIDE;
+
+	if (iter->pos >= iter->size)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * gcov_iter_write - write data for current pos to seq_file
+ * @iter: file iterator
+ * @seq: seq_file handle
+ *
+ * Return zero on success, non-zero otherwise.
+ */
+int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
+{
+	size_t len;
+
+	if (iter->pos >= iter->size)
+		return -EINVAL;
+
+	len = ITER_STRIDE;
+	if (iter->pos + len > iter->size)
+		len = iter->size - iter->pos;
+
+	seq_write(seq, iter->buffer + iter->pos, len);
+
+	return 0;
+}
diff --git a/kernel/gcov/gcov.h b/kernel/gcov/gcov.h
index 060073e..92c8e22 100644
--- a/kernel/gcov/gcov.h
+++ b/kernel/gcov/gcov.h
@@ -21,7 +21,6 @@
  * gcc and need to be kept as close to the original definition as possible to
  * remain compatible.
  */
-#define GCOV_COUNTERS		5
 #define GCOV_DATA_MAGIC		((unsigned int) 0x67636461)
 #define GCOV_TAG_FUNCTION	((unsigned int) 0x01000000)
 #define GCOV_TAG_COUNTER_BASE	((unsigned int) 0x01a10000)
@@ -34,60 +33,18 @@
 typedef long long gcov_type;
 #endif
 
-/**
- * struct gcov_fn_info - profiling meta data per function
- * @ident: object file-unique function identifier
- * @checksum: function checksum
- * @n_ctrs: number of values per counter type belonging to this function
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time.
- */
-struct gcov_fn_info {
-	unsigned int ident;
-	unsigned int checksum;
-	unsigned int n_ctrs[0];
-};
+/* Opaque gcov_info. The gcov structures can change as for example in gcc 4.7 so
+ * we cannot use full definition here and they need to be placed in gcc specific
+ * implementation of gcov. This also means no direct access to the members in
+ * generic code and usage of the interface below.*/
+struct gcov_info;
 
-/**
- * struct gcov_ctr_info - profiling data per counter type
- * @num: number of counter values for this type
- * @values: array of counter values for this type
- * @merge: merge function for counter values of this type (unused)
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time with the exception of the values array.
- */
-struct gcov_ctr_info {
-	unsigned int	num;
-	gcov_type	*values;
-	void		(*merge)(gcov_type *, unsigned int);
-};
-
-/**
- * struct gcov_info - profiling data per object file
- * @version: gcov version magic indicating the gcc version used for compilation
- * @next: list head for a singly-linked list
- * @stamp: time stamp
- * @filename: name of the associated gcov data file
- * @n_functions: number of instrumented functions
- * @functions: function data
- * @ctr_mask: mask specifying which counter types are active
- * @counts: counter data per counter type
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time with the exception of the next pointer.
- */
-struct gcov_info {
-	unsigned int			version;
-	struct gcov_info		*next;
-	unsigned int			stamp;
-	const char			*filename;
-	unsigned int			n_functions;
-	const struct gcov_fn_info	*functions;
-	unsigned int			ctr_mask;
-	struct gcov_ctr_info		counts[0];
-};
+/* Interface to access gcov_info data  */
+const char *gcov_info_filename(struct gcov_info *info);
+unsigned int gcov_info_version(struct gcov_info *info);
+struct gcov_info *gcov_info_next(struct gcov_info *info);
+void gcov_info_link(struct gcov_info *info);
+void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info);
 
 /* Base interface. */
 enum gcov_action {
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 3e97fb1..9328b80 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -16,11 +16,12 @@
 #include <linux/export.h>
 #include <linux/sysctl.h>
 #include <linux/utsname.h>
+#include <trace/events/sched.h>
 
 /*
  * The number of tasks checked:
  */
-unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
+int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
 
 /*
  * Limit number of tasks checked in a batch.
@@ -92,6 +93,9 @@
 		t->last_switch_count = switch_count;
 		return;
 	}
+
+	trace_sched_process_hang(t);
+
 	if (!sysctl_hung_task_warnings)
 		return;
 	sysctl_hung_task_warnings--;
@@ -203,6 +207,14 @@
 	return ret;
 }
 
+static atomic_t reset_hung_task = ATOMIC_INIT(0);
+
+void reset_hung_task_detector(void)
+{
+	atomic_set(&reset_hung_task, 1);
+}
+EXPORT_SYMBOL_GPL(reset_hung_task_detector);
+
 /*
  * kthread which checks for tasks stuck in D state
  */
@@ -216,6 +228,9 @@
 		while (schedule_timeout_interruptible(timeout_jiffies(timeout)))
 			timeout = sysctl_hung_task_timeout_secs;
 
+		if (atomic_xchg(&reset_hung_task, 0))
+			continue;
+
 		check_hung_uninterruptible_tasks(timeout);
 	}
 
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a3bb14f..dc04c16 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -214,7 +214,7 @@
 }
 
 /**
- * irq_disable - Mark interupt disabled
+ * irq_disable - Mark interrupt disabled
  * @desc:	irq descriptor which should be disabled
  *
  * If the chip does not implement the irq_disable callback, we
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3e59f95..481a13c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -786,7 +786,7 @@
 }
 
 /*
- * Interrupts explicitely requested as threaded interupts want to be
+ * Interrupts explicitly requested as threaded interrupts want to be
  * preemtible - many of them need to sleep and wait for slow busses to
  * complete.
  */
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 297a924..9019f15 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -58,6 +58,7 @@
 
 void static_key_slow_inc(struct static_key *key)
 {
+	STATIC_KEY_CHECK_USE();
 	if (atomic_inc_not_zero(&key->enabled))
 		return;
 
@@ -103,12 +104,14 @@
 
 void static_key_slow_dec(struct static_key *key)
 {
+	STATIC_KEY_CHECK_USE();
 	__static_key_slow_dec(key, 0, NULL);
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
 void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
+	STATIC_KEY_CHECK_USE();
 	__static_key_slow_dec(&key->key, key->timeout, &key->work);
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
@@ -116,6 +119,7 @@
 void jump_label_rate_limit(struct static_key_deferred *key,
 		unsigned long rl)
 {
+	STATIC_KEY_CHECK_USE();
 	key->timeout = rl;
 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
 }
@@ -212,6 +216,7 @@
 		key->next = NULL;
 #endif
 	}
+	static_key_initialized = true;
 	jump_label_unlock();
 }
 
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2a74f30..490afc0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -921,7 +921,7 @@
  *   reinitialize them.
  *
  * - A machine specific part that includes the syscall number
- *   and the copies the image to it's final destination.  And
+ *   and then copies the image to it's final destination.  And
  *   jumps into the image at entry.
  *
  * kexec does not sync, or unmount filesystems so if you need
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a0d367a..ceeadfc 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2066,7 +2066,7 @@
 {
 	int i, err = 0;
 	unsigned long offset = 0, size = 0;
-	char *modname, namebuf[128];
+	char *modname, namebuf[KSYM_NAME_LEN];
 	const char *symbol_name;
 	void *addr;
 	struct kprobe_blackpoint *kb;
@@ -2192,7 +2192,7 @@
 	const char *sym = NULL;
 	unsigned int i = *(loff_t *) v;
 	unsigned long offset = 0;
-	char *modname, namebuf[128];
+	char *modname, namebuf[KSYM_NAME_LEN];
 
 	head = &kprobe_table[i];
 	preempt_disable();
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 760e86d..b5ae3ee 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -33,7 +33,7 @@
 
 	/* Result passed back to kthread_create() from kthreadd. */
 	struct task_struct *result;
-	struct completion done;
+	struct completion *done;
 
 	struct list_head list;
 };
@@ -178,6 +178,7 @@
 	struct kthread_create_info *create = _create;
 	int (*threadfn)(void *data) = create->threadfn;
 	void *data = create->data;
+	struct completion *done;
 	struct kthread self;
 	int ret;
 
@@ -187,10 +188,16 @@
 	init_completion(&self.parked);
 	current->vfork_done = &self.exited;
 
+	/* If user was SIGKILLed, I release the structure. */
+	done = xchg(&create->done, NULL);
+	if (!done) {
+		kfree(create);
+		do_exit(-EINTR);
+	}
 	/* OK, tell user we're spawned, wait for stop or wakeup */
 	__set_current_state(TASK_UNINTERRUPTIBLE);
 	create->result = current;
-	complete(&create->done);
+	complete(done);
 	schedule();
 
 	ret = -EINTR;
@@ -223,8 +230,15 @@
 	/* We want our own signal handler (we take no signals by default). */
 	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
 	if (pid < 0) {
+		/* If user was SIGKILLed, I release the structure. */
+		struct completion *done = xchg(&create->done, NULL);
+
+		if (!done) {
+			kfree(create);
+			return;
+		}
 		create->result = ERR_PTR(pid);
-		complete(&create->done);
+		complete(done);
 	}
 }
 
@@ -255,36 +269,59 @@
 					   const char namefmt[],
 					   ...)
 {
-	struct kthread_create_info create;
+	DECLARE_COMPLETION_ONSTACK(done);
+	struct task_struct *task;
+	struct kthread_create_info *create = kmalloc(sizeof(*create),
+						     GFP_KERNEL);
 
-	create.threadfn = threadfn;
-	create.data = data;
-	create.node = node;
-	init_completion(&create.done);
+	if (!create)
+		return ERR_PTR(-ENOMEM);
+	create->threadfn = threadfn;
+	create->data = data;
+	create->node = node;
+	create->done = &done;
 
 	spin_lock(&kthread_create_lock);
-	list_add_tail(&create.list, &kthread_create_list);
+	list_add_tail(&create->list, &kthread_create_list);
 	spin_unlock(&kthread_create_lock);
 
 	wake_up_process(kthreadd_task);
-	wait_for_completion(&create.done);
-
-	if (!IS_ERR(create.result)) {
+	/*
+	 * Wait for completion in killable state, for I might be chosen by
+	 * the OOM killer while kthreadd is trying to allocate memory for
+	 * new kernel thread.
+	 */
+	if (unlikely(wait_for_completion_killable(&done))) {
+		/*
+		 * If I was SIGKILLed before kthreadd (or new kernel thread)
+		 * calls complete(), leave the cleanup of this structure to
+		 * that thread.
+		 */
+		if (xchg(&create->done, NULL))
+			return ERR_PTR(-ENOMEM);
+		/*
+		 * kthreadd (or new kernel thread) will call complete()
+		 * shortly.
+		 */
+		wait_for_completion(&done);
+	}
+	task = create->result;
+	if (!IS_ERR(task)) {
 		static const struct sched_param param = { .sched_priority = 0 };
 		va_list args;
 
 		va_start(args, namefmt);
-		vsnprintf(create.result->comm, sizeof(create.result->comm),
-			  namefmt, args);
+		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
 		va_end(args);
 		/*
 		 * root may have changed our (kthreadd's) priority or CPU mask.
 		 * The kernel thread should not inherit these properties.
 		 */
-		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
-		set_cpus_allowed_ptr(create.result, cpu_all_mask);
+		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
+		set_cpus_allowed_ptr(task, cpu_all_mask);
 	}
-	return create.result;
+	kfree(create);
+	return task;
 }
 EXPORT_SYMBOL(kthread_create_on_node);
 
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
new file mode 100644
index 0000000..baab8e5
--- /dev/null
+++ b/kernel/locking/Makefile
@@ -0,0 +1,25 @@
+
+obj-y += mutex.o semaphore.o rwsem.o lglock.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_lockdep.o = -pg
+CFLAGS_REMOVE_lockdep_proc.o = -pg
+CFLAGS_REMOVE_mutex-debug.o = -pg
+CFLAGS_REMOVE_rtmutex-debug.o = -pg
+endif
+
+obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
+obj-$(CONFIG_LOCKDEP) += lockdep.o
+ifeq ($(CONFIG_PROC_FS),y)
+obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+endif
+obj-$(CONFIG_SMP) += spinlock.o
+obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
+obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
+obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
diff --git a/kernel/lglock.c b/kernel/locking/lglock.c
similarity index 100%
rename from kernel/lglock.c
rename to kernel/locking/lglock.c
diff --git a/kernel/lockdep.c b/kernel/locking/lockdep.c
similarity index 99%
rename from kernel/lockdep.c
rename to kernel/locking/lockdep.c
index 4e8e14c..576ba75 100644
--- a/kernel/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1232,7 +1232,7 @@
 	return 0;
 }
 
-unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
 {
 	unsigned long  count = 0;
 	struct lock_list *uninitialized_var(target_entry);
@@ -1258,7 +1258,7 @@
 	return ret;
 }
 
-unsigned long __lockdep_count_backward_deps(struct lock_list *this)
+static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
 {
 	unsigned long  count = 0;
 	struct lock_list *uninitialized_var(target_entry);
diff --git a/kernel/lockdep_internals.h b/kernel/locking/lockdep_internals.h
similarity index 100%
rename from kernel/lockdep_internals.h
rename to kernel/locking/lockdep_internals.h
diff --git a/kernel/lockdep_proc.c b/kernel/locking/lockdep_proc.c
similarity index 97%
rename from kernel/lockdep_proc.c
rename to kernel/locking/lockdep_proc.c
index b2c71c5..ef43ac4 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -421,6 +421,7 @@
 	seq_time(m, lt->min);
 	seq_time(m, lt->max);
 	seq_time(m, lt->total);
+	seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
 }
 
 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
@@ -518,20 +519,20 @@
 	}
 	if (i) {
 		seq_puts(m, "\n");
-		seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
+		seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
 		seq_puts(m, "\n");
 	}
 }
 
 static void seq_header(struct seq_file *m)
 {
-	seq_printf(m, "lock_stat version 0.3\n");
+	seq_puts(m, "lock_stat version 0.4\n");
 
 	if (unlikely(!debug_locks))
 		seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
 
-	seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
-	seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
+	seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
+	seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
 			"%14s %14s\n",
 			"class name",
 			"con-bounces",
@@ -539,12 +540,14 @@
 			"waittime-min",
 			"waittime-max",
 			"waittime-total",
+			"waittime-avg",
 			"acq-bounces",
 			"acquisitions",
 			"holdtime-min",
 			"holdtime-max",
-			"holdtime-total");
-	seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
+			"holdtime-total",
+			"holdtime-avg");
+	seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
 	seq_printf(m, "\n");
 }
 
diff --git a/kernel/lockdep_states.h b/kernel/locking/lockdep_states.h
similarity index 100%
rename from kernel/lockdep_states.h
rename to kernel/locking/lockdep_states.h
diff --git a/kernel/mutex-debug.c b/kernel/locking/mutex-debug.c
similarity index 100%
rename from kernel/mutex-debug.c
rename to kernel/locking/mutex-debug.c
diff --git a/kernel/mutex-debug.h b/kernel/locking/mutex-debug.h
similarity index 100%
rename from kernel/mutex-debug.h
rename to kernel/locking/mutex-debug.h
diff --git a/kernel/mutex.c b/kernel/locking/mutex.c
similarity index 99%
rename from kernel/mutex.c
rename to kernel/locking/mutex.c
index d24105b..4dd6e4c 100644
--- a/kernel/mutex.c
+++ b/kernel/locking/mutex.c
@@ -1,5 +1,5 @@
 /*
- * kernel/mutex.c
+ * kernel/locking/mutex.c
  *
  * Mutexes: blocking mutual exclusion locks
  *
diff --git a/kernel/mutex.h b/kernel/locking/mutex.h
similarity index 100%
rename from kernel/mutex.h
rename to kernel/locking/mutex.h
diff --git a/lib/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
similarity index 100%
rename from lib/percpu-rwsem.c
rename to kernel/locking/percpu-rwsem.c
diff --git a/kernel/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
similarity index 100%
rename from kernel/rtmutex-debug.c
rename to kernel/locking/rtmutex-debug.c
diff --git a/kernel/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
similarity index 100%
rename from kernel/rtmutex-debug.h
rename to kernel/locking/rtmutex-debug.h
diff --git a/kernel/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
similarity index 100%
rename from kernel/rtmutex-tester.c
rename to kernel/locking/rtmutex-tester.c
diff --git a/kernel/rtmutex.c b/kernel/locking/rtmutex.c
similarity index 100%
rename from kernel/rtmutex.c
rename to kernel/locking/rtmutex.c
diff --git a/kernel/rtmutex.h b/kernel/locking/rtmutex.h
similarity index 100%
rename from kernel/rtmutex.h
rename to kernel/locking/rtmutex.h
diff --git a/kernel/rtmutex_common.h b/kernel/locking/rtmutex_common.h
similarity index 100%
rename from kernel/rtmutex_common.h
rename to kernel/locking/rtmutex_common.h
diff --git a/lib/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
similarity index 100%
rename from lib/rwsem-spinlock.c
rename to kernel/locking/rwsem-spinlock.c
diff --git a/lib/rwsem.c b/kernel/locking/rwsem-xadd.c
similarity index 100%
rename from lib/rwsem.c
rename to kernel/locking/rwsem-xadd.c
diff --git a/kernel/rwsem.c b/kernel/locking/rwsem.c
similarity index 100%
rename from kernel/rwsem.c
rename to kernel/locking/rwsem.c
diff --git a/kernel/semaphore.c b/kernel/locking/semaphore.c
similarity index 100%
rename from kernel/semaphore.c
rename to kernel/locking/semaphore.c
diff --git a/kernel/spinlock.c b/kernel/locking/spinlock.c
similarity index 100%
rename from kernel/spinlock.c
rename to kernel/locking/spinlock.c
diff --git a/lib/spinlock_debug.c b/kernel/locking/spinlock_debug.c
similarity index 100%
rename from lib/spinlock_debug.c
rename to kernel/locking/spinlock_debug.c
diff --git a/kernel/module.c b/kernel/module.c
index dc58274..f5a3b1e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -378,23 +378,21 @@
 		if (syms->licence == GPL_ONLY)
 			return false;
 		if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
-			printk(KERN_WARNING "Symbol %s is being used "
-			       "by a non-GPL module, which will not "
-			       "be allowed in the future\n", fsa->name);
+			pr_warn("Symbol %s is being used by a non-GPL module, "
+				"which will not be allowed in the future\n",
+				fsa->name);
 		}
 	}
 
 #ifdef CONFIG_UNUSED_SYMBOLS
 	if (syms->unused && fsa->warn) {
-		printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
-		       "however this module is using it.\n", fsa->name);
-		printk(KERN_WARNING
-		       "This symbol will go away in the future.\n");
-		printk(KERN_WARNING
-		       "Please evalute if this is the right api to use and if "
-		       "it really is, submit a report the linux kernel "
-		       "mailinglist together with submitting your code for "
-		       "inclusion.\n");
+		pr_warn("Symbol %s is marked as UNUSED, however this module is "
+			"using it.\n", fsa->name);
+		pr_warn("This symbol will go away in the future.\n");
+		pr_warn("Please evalute if this is the right api to use and if "
+			"it really is, submit a report the linux kernel "
+			"mailinglist together with submitting your code for "
+			"inclusion.\n");
 	}
 #endif
 
@@ -492,16 +490,15 @@
 		return 0;
 
 	if (align > PAGE_SIZE) {
-		printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
-		       mod->name, align, PAGE_SIZE);
+		pr_warn("%s: per-cpu alignment %li > %li\n",
+			mod->name, align, PAGE_SIZE);
 		align = PAGE_SIZE;
 	}
 
 	mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
 	if (!mod->percpu) {
-		printk(KERN_WARNING
-		       "%s: Could not allocate %lu bytes percpu data\n",
-		       mod->name, (unsigned long)pcpusec->sh_size);
+		pr_warn("%s: Could not allocate %lu bytes percpu data\n",
+			mod->name, (unsigned long)pcpusec->sh_size);
 		return -ENOMEM;
 	}
 	mod->percpu_size = pcpusec->sh_size;
@@ -644,8 +641,6 @@
 
 	/* Hold reference count during initialization. */
 	__this_cpu_write(mod->refptr->incs, 1);
-	/* Backwards compatibility macros put refcount during init. */
-	mod->waiter = current;
 
 	return 0;
 }
@@ -679,7 +674,7 @@
 	pr_debug("Allocating new usage for %s.\n", a->name);
 	use = kmalloc(sizeof(*use), GFP_ATOMIC);
 	if (!use) {
-		printk(KERN_WARNING "%s: out of memory loading\n", a->name);
+		pr_warn("%s: out of memory loading\n", a->name);
 		return -ENOMEM;
 	}
 
@@ -771,16 +766,9 @@
 
 static int try_stop_module(struct module *mod, int flags, int *forced)
 {
-	if (flags & O_NONBLOCK) {
-		struct stopref sref = { mod, flags, forced };
+	struct stopref sref = { mod, flags, forced };
 
-		return stop_machine(__try_stop_module, &sref, NULL);
-	} else {
-		/* We don't need to stop the machine for this. */
-		mod->state = MODULE_STATE_GOING;
-		synchronize_sched();
-		return 0;
-	}
+	return stop_machine(__try_stop_module, &sref, NULL);
 }
 
 unsigned long module_refcount(struct module *mod)
@@ -813,21 +801,6 @@
 /* This exists whether we can unload or not */
 static void free_module(struct module *mod);
 
-static void wait_for_zero_refcount(struct module *mod)
-{
-	/* Since we might sleep for some time, release the mutex first */
-	mutex_unlock(&module_mutex);
-	for (;;) {
-		pr_debug("Looking at refcount...\n");
-		set_current_state(TASK_UNINTERRUPTIBLE);
-		if (module_refcount(mod) == 0)
-			break;
-		schedule();
-	}
-	current->state = TASK_RUNNING;
-	mutex_lock(&module_mutex);
-}
-
 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 		unsigned int, flags)
 {
@@ -842,6 +815,11 @@
 		return -EFAULT;
 	name[MODULE_NAME_LEN-1] = '\0';
 
+	if (!(flags & O_NONBLOCK)) {
+		printk(KERN_WARNING
+		       "waiting module removal not supported: please upgrade");
+	}
+
 	if (mutex_lock_interruptible(&module_mutex) != 0)
 		return -EINTR;
 
@@ -859,8 +837,7 @@
 
 	/* Doing init or already dying? */
 	if (mod->state != MODULE_STATE_LIVE) {
-		/* FIXME: if (force), slam module count and wake up
-                   waiter --RR */
+		/* FIXME: if (force), slam module count damn the torpedoes */
 		pr_debug("%s already dying\n", mod->name);
 		ret = -EBUSY;
 		goto out;
@@ -876,18 +853,11 @@
 		}
 	}
 
-	/* Set this up before setting mod->state */
-	mod->waiter = current;
-
 	/* Stop the machine so refcounts can't move and disable module. */
 	ret = try_stop_module(mod, flags, &forced);
 	if (ret != 0)
 		goto out;
 
-	/* Never wait if forced. */
-	if (!forced && module_refcount(mod) != 0)
-		wait_for_zero_refcount(mod);
-
 	mutex_unlock(&module_mutex);
 	/* Final destruction now no one is using it. */
 	if (mod->exit != NULL)
@@ -1005,9 +975,6 @@
 		__this_cpu_inc(module->refptr->decs);
 
 		trace_module_put(module, _RET_IP_);
-		/* Maybe they're waiting for us to drop reference? */
-		if (unlikely(!module_is_live(module)))
-			wake_up_process(module->waiter);
 		preempt_enable();
 	}
 }
@@ -1145,8 +1112,7 @@
 {
 #ifdef CONFIG_MODULE_FORCE_LOAD
 	if (!test_taint(TAINT_FORCED_MODULE))
-		printk(KERN_WARNING "%s: %s: kernel tainted.\n",
-		       mod->name, reason);
+		pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
 	add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
 	return 0;
 #else
@@ -1199,8 +1165,7 @@
 		goto bad_version;
 	}
 
-	printk(KERN_WARNING "%s: no symbol version for %s\n",
-	       mod->name, symname);
+	pr_warn("%s: no symbol version for %s\n", mod->name, symname);
 	return 0;
 
 bad_version:
@@ -1309,8 +1274,8 @@
 			!IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
 			|| PTR_ERR(ksym) != -EBUSY,
 					     30 * HZ) <= 0) {
-		printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
-		       mod->name, owner);
+		pr_warn("%s: gave up waiting for init of module %s.\n",
+			mod->name, owner);
 	}
 	return ksym;
 }
@@ -1626,15 +1591,14 @@
 	struct kobject *kobj;
 
 	if (!module_sysfs_initialized) {
-		printk(KERN_ERR "%s: module sysfs not initialized\n",
-		       mod->name);
+		pr_err("%s: module sysfs not initialized\n", mod->name);
 		err = -EINVAL;
 		goto out;
 	}
 
 	kobj = kset_find_obj(module_kset, mod->name);
 	if (kobj) {
-		printk(KERN_ERR "%s: module is already loaded\n", mod->name);
+		pr_err("%s: module is already loaded\n", mod->name);
 		kobject_put(kobj);
 		err = -EINVAL;
 		goto out;
@@ -1961,8 +1925,7 @@
 	for (i = 0; i < ARRAY_SIZE(arr); i++) {
 		for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
 			if (find_symbol(s->name, &owner, NULL, true, false)) {
-				printk(KERN_ERR
-				       "%s: exports duplicate symbol %s"
+				pr_err("%s: exports duplicate symbol %s"
 				       " (owned by %s)\n",
 				       mod->name, s->name, module_name(owner));
 				return -ENOEXEC;
@@ -2013,8 +1976,8 @@
 			if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
 				break;
 
-			printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
-			       mod->name, name, PTR_ERR(ksym));
+			pr_warn("%s: Unknown symbol %s (err %li)\n",
+				mod->name, name, PTR_ERR(ksym));
 			ret = PTR_ERR(ksym) ?: -ENOENT;
 			break;
 
@@ -2168,8 +2131,8 @@
 
 	if (!license_is_gpl_compatible(license)) {
 		if (!test_taint(TAINT_PROPRIETARY_MODULE))
-			printk(KERN_WARNING "%s: module license '%s' taints "
-				"kernel.\n", mod->name, license);
+			pr_warn("%s: module license '%s' taints kernel.\n",
+				mod->name, license);
 		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
 				 LOCKDEP_NOW_UNRELIABLE);
 	}
@@ -2405,8 +2368,8 @@
 		return;
 #ifdef CONFIG_DYNAMIC_DEBUG
 	if (ddebug_add_module(debug, num, debug->modname))
-		printk(KERN_ERR "dynamic debug error adding module: %s\n",
-					debug->modname);
+		pr_err("dynamic debug error adding module: %s\n",
+			debug->modname);
 #endif
 }
 
@@ -2619,8 +2582,7 @@
 		Elf_Shdr *shdr = &info->sechdrs[i];
 		if (shdr->sh_type != SHT_NOBITS
 		    && info->len < shdr->sh_offset + shdr->sh_size) {
-			printk(KERN_ERR "Module len %lu truncated\n",
-			       info->len);
+			pr_err("Module len %lu truncated\n", info->len);
 			return -ENOEXEC;
 		}
 
@@ -2682,15 +2644,14 @@
 
 	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
 	if (!info->index.mod) {
-		printk(KERN_WARNING "No module found in object\n");
+		pr_warn("No module found in object\n");
 		return ERR_PTR(-ENOEXEC);
 	}
 	/* This is temporary: point mod into copy of data. */
 	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
 
 	if (info->index.sym == 0) {
-		printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
-		       mod->name);
+		pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
 		return ERR_PTR(-ENOEXEC);
 	}
 
@@ -2717,7 +2678,7 @@
 		if (err)
 			return err;
 	} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
-		printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
+		pr_err("%s: version magic '%s' should be '%s'\n",
 		       mod->name, modmagic, vermagic);
 		return -ENOEXEC;
 	}
@@ -2727,9 +2688,8 @@
 
 	if (get_modinfo(info, "staging")) {
 		add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
-		printk(KERN_WARNING "%s: module is from the staging directory,"
-		       " the quality is unknown, you have been warned.\n",
-		       mod->name);
+		pr_warn("%s: module is from the staging directory, the quality "
+			"is unknown, you have been warned.\n", mod->name);
 	}
 
 	/* Set up license info based on the info section */
@@ -2738,7 +2698,7 @@
 	return 0;
 }
 
-static void find_module_sections(struct module *mod, struct load_info *info)
+static int find_module_sections(struct module *mod, struct load_info *info)
 {
 	mod->kp = section_objs(info, "__param",
 			       sizeof(*mod->kp), &mod->num_kp);
@@ -2768,6 +2728,18 @@
 #ifdef CONFIG_CONSTRUCTORS
 	mod->ctors = section_objs(info, ".ctors",
 				  sizeof(*mod->ctors), &mod->num_ctors);
+	if (!mod->ctors)
+		mod->ctors = section_objs(info, ".init_array",
+				sizeof(*mod->ctors), &mod->num_ctors);
+	else if (find_sec(info, ".init_array")) {
+		/*
+		 * This shouldn't happen with same compiler and binutils
+		 * building all parts of the module.
+		 */
+		printk(KERN_WARNING "%s: has both .ctors and .init_array.\n",
+		       mod->name);
+		return -EINVAL;
+	}
 #endif
 
 #ifdef CONFIG_TRACEPOINTS
@@ -2801,11 +2773,12 @@
 				    sizeof(*mod->extable), &mod->num_exentries);
 
 	if (section_addr(info, "__obsparm"))
-		printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
-		       mod->name);
+		pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
 
 	info->debug = section_objs(info, "__verbose",
 				   sizeof(*info->debug), &info->num_debug);
+
+	return 0;
 }
 
 static int move_module(struct module *mod, struct load_info *info)
@@ -3078,11 +3051,10 @@
 		return ret;
 	}
 	if (ret > 0) {
-		printk(KERN_WARNING
-"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
-"%s: loading module anyway...\n",
-		       __func__, mod->name, ret,
-		       __func__);
+		pr_warn("%s: '%s'->init suspiciously returned %d, it should "
+			"follow 0/-E convention\n"
+			"%s: loading module anyway...\n",
+			__func__, mod->name, ret, __func__);
 		dump_stack();
 	}
 
@@ -3205,10 +3177,8 @@
 {
 	/* Check for magic 'dyndbg' arg */ 
 	int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
-	if (ret != 0) {
-		printk(KERN_WARNING "%s: unknown parameter '%s' ignored\n",
-		       modname, param);
-	}
+	if (ret != 0)
+		pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
 	return 0;
 }
 
@@ -3243,10 +3213,9 @@
 #ifdef CONFIG_MODULE_SIG
 	mod->sig_ok = info->sig_ok;
 	if (!mod->sig_ok) {
-		printk_once(KERN_NOTICE
-			    "%s: module verification failed: signature and/or"
-			    " required key missing - tainting kernel\n",
-			    mod->name);
+		pr_notice_once("%s: module verification failed: signature "
+			       "and/or  required key missing - tainting "
+			       "kernel\n", mod->name);
 		add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK);
 	}
 #endif
@@ -3263,7 +3232,9 @@
 
 	/* Now we've got everything in the final locations, we can
 	 * find optional sections. */
-	find_module_sections(mod, info);
+	err = find_module_sections(mod, info);
+	if (err)
+		goto free_unload;
 
 	err = check_module_license_and_versions(mod);
 	if (err)
diff --git a/kernel/panic.c b/kernel/panic.c
index b6c482c..c00b4ce 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -233,7 +233,7 @@
  */
 const char *print_tainted(void)
 {
-	static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1];
+	static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ")];
 
 	if (tainted_mask) {
 		char *s;
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 4208655..06c62de 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -132,6 +132,12 @@
 	return ERR_PTR(err);
 }
 
+static void delayed_free_pidns(struct rcu_head *p)
+{
+	kmem_cache_free(pid_ns_cachep,
+			container_of(p, struct pid_namespace, rcu));
+}
+
 static void destroy_pid_namespace(struct pid_namespace *ns)
 {
 	int i;
@@ -140,7 +146,7 @@
 	for (i = 0; i < PIDMAP_ENTRIES; i++)
 		kfree(ns->pidmap[i].page);
 	put_user_ns(ns->user_ns);
-	kmem_cache_free(pid_ns_cachep, ns);
+	call_rcu(&ns->rcu, delayed_free_pidns);
 }
 
 struct pid_namespace *copy_pid_ns(unsigned long flags,
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index d444c4e..2fac9cc 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -178,6 +178,22 @@
 	def_bool y
 	depends on PM_DEBUG && PM_SLEEP
 
+config DPM_WATCHDOG
+	bool "Device suspend/resume watchdog"
+	depends on PM_DEBUG && PSTORE
+	---help---
+	  Sets up a watchdog timer to capture drivers that are
+	  locked up attempting to suspend/resume a device.
+	  A detected lockup causes system panic with message
+	  captured in pstore device for inspection in subsequent
+	  boot session.
+
+config DPM_WATCHDOG_TIMEOUT
+	int "Watchdog timeout in seconds"
+	range 1 120
+	default 12
+	depends on DPM_WATCHDOG
+
 config PM_TRACE
 	bool
 	help
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index a394297..8dff9b4 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -558,30 +558,12 @@
 	if (count == sizeof(s32)) {
 		if (copy_from_user(&value, buf, sizeof(s32)))
 			return -EFAULT;
-	} else if (count <= 11) { /* ASCII perhaps? */
-		char ascii_value[11];
-		unsigned long int ulval;
+	} else {
 		int ret;
 
-		if (copy_from_user(ascii_value, buf, count))
-			return -EFAULT;
-
-		if (count > 10) {
-			if (ascii_value[10] == '\n')
-				ascii_value[10] = '\0';
-			else
-				return -EINVAL;
-		} else {
-			ascii_value[count] = '\0';
-		}
-		ret = kstrtoul(ascii_value, 16, &ulval);
-		if (ret) {
-			pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
-			return -EINVAL;
-		}
-		value = (s32)lower_32_bits(ulval);
-	} else {
-		return -EINVAL;
+		ret = kstrtos32_from_user(buf, count, 16, &value);
+		if (ret)
+			return ret;
 	}
 
 	req = filp->private_data;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 98c3b34..10c22ca 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1402,7 +1402,11 @@
 	 * highmem and non-highmem zones separately.
 	 */
 	pages_highmem = preallocate_image_highmem(highmem / 2);
-	alloc = (count - max_size) - pages_highmem;
+	alloc = count - max_size;
+	if (alloc > pages_highmem)
+		alloc -= pages_highmem;
+	else
+		alloc = 0;
 	pages = preallocate_image_memory(alloc, avail_normal);
 	if (pages < alloc) {
 		/* We have exhausted non-highmem pages, try highmem. */
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 957f061..2485027 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -36,9 +36,9 @@
 	struct snapshot_handle handle;
 	int swap;
 	int mode;
-	char frozen;
-	char ready;
-	char platform_support;
+	bool frozen;
+	bool ready;
+	bool platform_support;
 	bool free_bitmaps;
 } snapshot_state;
 
@@ -93,9 +93,9 @@
 	if (error)
 		atomic_inc(&snapshot_device_available);
 
-	data->frozen = 0;
-	data->ready = 0;
-	data->platform_support = 0;
+	data->frozen = false;
+	data->ready = false;
+	data->platform_support = false;
 
  Unlock:
 	unlock_system_sleep();
@@ -229,7 +229,7 @@
 		if (error)
 			thaw_processes();
 		else
-			data->frozen = 1;
+			data->frozen = true;
 
 		break;
 
@@ -240,7 +240,7 @@
 		free_basic_memory_bitmaps();
 		data->free_bitmaps = false;
 		thaw_processes();
-		data->frozen = 0;
+		data->frozen = false;
 		break;
 
 	case SNAPSHOT_CREATE_IMAGE:
@@ -270,7 +270,7 @@
 	case SNAPSHOT_FREE:
 		swsusp_free();
 		memset(&data->handle, 0, sizeof(struct snapshot_handle));
-		data->ready = 0;
+		data->ready = false;
 		/*
 		 * It is necessary to thaw kernel threads here, because
 		 * SNAPSHOT_CREATE_IMAGE may be invoked directly after
@@ -334,7 +334,7 @@
 		 * PM_HIBERNATION_PREPARE
 		 */
 		error = suspend_devices_and_enter(PM_SUSPEND_MEM);
-		data->ready = 0;
+		data->ready = false;
 		break;
 
 	case SNAPSHOT_PLATFORM_SUPPORT:
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b4e8500..be7c86b 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -705,9 +705,9 @@
 
 #ifdef CONFIG_KEXEC
 /*
- * This appends the listed symbols to /proc/vmcoreinfo
+ * This appends the listed symbols to /proc/vmcore
  *
- * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to
+ * /proc/vmcore is used by various utilities, like crash and makedumpfile to
  * obtain access to symbols that are otherwise very difficult to locate.  These
  * symbols are specifically used so that utilities can access and extract the
  * dmesg log from a vmcore file after a crash.
@@ -791,7 +791,7 @@
 static int __init ignore_loglevel_setup(char *str)
 {
 	ignore_loglevel = 1;
-	printk(KERN_INFO "debug: ignoring loglevel setting.\n");
+	pr_info("debug: ignoring loglevel setting.\n");
 
 	return 0;
 }
@@ -820,9 +820,9 @@
 	pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
 		"HZ: %d, loops_per_msec: %llu\n",
 		boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
-	return 1;
+	return 0;
 }
-__setup("boot_delay=", boot_delay_setup);
+early_param("boot_delay", boot_delay_setup);
 
 static void boot_delay_msec(int level)
 {
@@ -2193,7 +2193,7 @@
 static int __init keep_bootcon_setup(char *str)
 {
 	keep_bootcon = 1;
-	printk(KERN_INFO "debug: skip boot console de-registration.\n");
+	pr_info("debug: skip boot console de-registration.\n");
 
 	return 0;
 }
@@ -2241,7 +2241,7 @@
 		/* find the last or real console */
 		for_each_console(bcon) {
 			if (!(bcon->flags & CON_BOOT)) {
-				printk(KERN_INFO "Too late to register bootconsole %s%d\n",
+				pr_info("Too late to register bootconsole %s%d\n",
 					newcon->name, newcon->index);
 				return;
 			}
@@ -2358,21 +2358,18 @@
 	 * users know there might be something in the kernel's log buffer that
 	 * went to the bootconsole (that they do not see on the real console)
 	 */
+	pr_info("%sconsole [%s%d] enabled\n",
+		(newcon->flags & CON_BOOT) ? "boot" : "" ,
+		newcon->name, newcon->index);
 	if (bcon &&
 	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
 	    !keep_bootcon) {
-		/* we need to iterate through twice, to make sure we print
-		 * everything out, before we unregister the console(s)
+		/* We need to iterate through all boot consoles, to make
+		 * sure we print everything out, before we unregister them.
 		 */
-		printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n",
-			newcon->name, newcon->index);
 		for_each_console(bcon)
 			if (bcon->flags & CON_BOOT)
 				unregister_console(bcon);
-	} else {
-		printk(KERN_INFO "%sconsole [%s%d] enabled\n",
-			(newcon->flags & CON_BOOT) ? "boot" : "" ,
-			newcon->name, newcon->index);
 	}
 }
 EXPORT_SYMBOL(register_console);
@@ -2382,6 +2379,10 @@
         struct console *a, *b;
 	int res;
 
+	pr_info("%sconsole [%s%d] disabled\n",
+		(console->flags & CON_BOOT) ? "boot" : "" ,
+		console->name, console->index);
+
 	res = _braille_unregister_console(console);
 	if (res)
 		return res;
@@ -2421,8 +2422,6 @@
 
 	for_each_console(con) {
 		if (!keep_bootcon && con->flags & CON_BOOT) {
-			printk(KERN_INFO "turn off boot console %s%d\n",
-				con->name, con->index);
 			unregister_console(con);
 		}
 	}
@@ -2449,7 +2448,7 @@
 
 	if (pending & PRINTK_PENDING_SCHED) {
 		char *buf = __get_cpu_var(printk_sched_buf);
-		printk(KERN_WARNING "[sched_delayed] %s", buf);
+		pr_warn("[sched_delayed] %s", buf);
 	}
 
 	if (pending & PRINTK_PENDING_WAKEUP)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index dd562e9..1f4bcb3 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -257,7 +257,8 @@
 	if (task->mm)
 		dumpable = get_dumpable(task->mm);
 	rcu_read_lock();
-	if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
+	if (dumpable != SUID_DUMP_USER &&
+	    !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
 		rcu_read_unlock();
 		return -EPERM;
 	}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 0c9a934..1254f31 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -181,7 +181,7 @@
 /*
  * Test whether RCU thinks that the current CPU is idle.
  */
-bool __rcu_is_watching(void)
+bool notrace __rcu_is_watching(void)
 {
 	return rcu_dynticks_nesting;
 }
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4c06ddf..dd08198 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -664,7 +664,7 @@
  * rcu_is_watching(), the caller of __rcu_is_watching() must have at
  * least disabled preemption.
  */
-bool __rcu_is_watching(void)
+bool notrace __rcu_is_watching(void)
 {
 	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
 }
@@ -675,7 +675,7 @@
  * If the current CPU is in its idle loop and is neither in an interrupt
  * or NMI handler, return true.
  */
-bool rcu_is_watching(void)
+bool notrace rcu_is_watching(void)
 {
 	int ret;
 
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3822ac0..6abb03d 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1133,7 +1133,7 @@
 
 #ifdef CONFIG_RCU_BOOST
 
-#include "../rtmutex_common.h"
+#include "../locking/rtmutex_common.h"
 
 #ifdef CONFIG_RCU_TRACE
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1deccd7..c180860 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2253,6 +2253,20 @@
 	struct rq *rq;
 	u64 ns = 0;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+	/*
+	 * 64-bit doesn't need locks to atomically read a 64bit value.
+	 * So we have a optimization chance when the task's delta_exec is 0.
+	 * Reading ->on_cpu is racy, but this is ok.
+	 *
+	 * If we race with it leaving cpu, we'll take a lock. So we're correct.
+	 * If we race with it entering cpu, unaccounted time is 0. This is
+	 * indistinguishable from the read occurring a few cycles earlier.
+	 */
+	if (!p->on_cpu)
+		return p->se.sum_exec_runtime;
+#endif
+
 	rq = task_rq_lock(p, &flags);
 	ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
 	task_rq_unlock(rq, p, &flags);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df77c60..e8b652e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1000,7 +1000,7 @@
  */
 static void update_numa_stats(struct numa_stats *ns, int nid)
 {
-	int cpu;
+	int cpu, cpus = 0;
 
 	memset(ns, 0, sizeof(*ns));
 	for_each_cpu(cpu, cpumask_of_node(nid)) {
@@ -1009,8 +1009,21 @@
 		ns->nr_running += rq->nr_running;
 		ns->load += weighted_cpuload(cpu);
 		ns->power += power_of(cpu);
+
+		cpus++;
 	}
 
+	/*
+	 * If we raced with hotplug and there are no CPUs left in our mask
+	 * the @ns structure is NULL'ed and task_numa_compare() will
+	 * not find this node attractive.
+	 *
+	 * We'll either bail at !has_capacity, or we'll detect a huge imbalance
+	 * and bail there.
+	 */
+	if (!cpus)
+		return;
+
 	ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
 	ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
 	ns->has_capacity = (ns->nr_running < ns->capacity);
@@ -1201,9 +1214,21 @@
 	 */
 	rcu_read_lock();
 	sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
-	env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
+	if (sd)
+		env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
 	rcu_read_unlock();
 
+	/*
+	 * Cpusets can break the scheduler domain tree into smaller
+	 * balance domains, some of which do not cross NUMA boundaries.
+	 * Tasks that are "trapped" in such domains cannot be migrated
+	 * elsewhere, so there is no point in (re)trying.
+	 */
+	if (unlikely(!sd)) {
+		p->numa_preferred_nid = cpu_to_node(task_cpu(p));
+		return -EINVAL;
+	}
+
 	taskweight = task_weight(p, env.src_nid);
 	groupweight = group_weight(p, env.src_nid);
 	update_numa_stats(&env.src_stats, env.src_nid);
@@ -2153,7 +2178,7 @@
 	long contrib;
 
 	/* The fraction of a cpu used by this cfs_rq */
-	contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
+	contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
 			  sa->runnable_avg_period + 1);
 	contrib -= cfs_rq->tg_runnable_contrib;
 
diff --git a/kernel/signal.c b/kernel/signal.c
index ded28b9..940b30e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2723,7 +2723,7 @@
 
 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
 
-int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
 {
 	int err;
 
diff --git a/kernel/smp.c b/kernel/smp.c
index f5768b0..bd9f940 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -15,9 +15,9 @@
 
 #include "smpboot.h"
 
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 enum {
 	CSD_FLAG_LOCK		= 0x01,
+	CSD_FLAG_WAIT		= 0x02,
 };
 
 struct call_function_data {
@@ -124,7 +124,7 @@
 
 static void csd_unlock(struct call_single_data *csd)
 {
-	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
+	WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
 
 	/*
 	 * ensure we're all done before releasing data:
@@ -139,13 +139,15 @@
  * for execution on the given CPU. data must already have
  * ->func, ->info, and ->flags set.
  */
-static
-void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
+static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
 {
 	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
 	unsigned long flags;
 	int ipi;
 
+	if (wait)
+		csd->flags |= CSD_FLAG_WAIT;
+
 	raw_spin_lock_irqsave(&dst->lock, flags);
 	ipi = list_empty(&dst->list);
 	list_add_tail(&csd->list, &dst->list);
@@ -340,6 +342,7 @@
 	}
 	put_cpu();
 }
+EXPORT_SYMBOL_GPL(__smp_call_function_single);
 
 /**
  * smp_call_function_many(): Run a function on a set of other CPUs.
@@ -459,7 +462,6 @@
 	return 0;
 }
 EXPORT_SYMBOL(smp_call_function);
-#endif /* USE_GENERIC_SMP_HELPERS */
 
 /* Setup configured maximum number of CPUs to activate */
 unsigned int setup_max_cpus = NR_CPUS;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b249883..11025cc 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,8 +6,6 @@
  *	Distribute under GPLv2.
  *
  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
- *
- *	Remote softirq infrastructure is by Jens Axboe.
  */
 
 #include <linux/export.h>
@@ -627,146 +625,17 @@
 }
 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
 
-/*
- * Remote softirq bits
- */
-
-DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-EXPORT_PER_CPU_SYMBOL(softirq_work_list);
-
-static void __local_trigger(struct call_single_data *cp, int softirq)
-{
-	struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
-
-	list_add_tail(&cp->list, head);
-
-	/* Trigger the softirq only if the list was previously empty.  */
-	if (head->next == &cp->list)
-		raise_softirq_irqoff(softirq);
-}
-
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
-static void remote_softirq_receive(void *data)
-{
-	struct call_single_data *cp = data;
-	unsigned long flags;
-	int softirq;
-
-	softirq = *(int *)cp->info;
-	local_irq_save(flags);
-	__local_trigger(cp, softirq);
-	local_irq_restore(flags);
-}
-
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	if (cpu_online(cpu)) {
-		cp->func = remote_softirq_receive;
-		cp->info = &softirq;
-		cp->flags = 0;
-
-		__smp_call_function_single(cpu, cp, 0);
-		return 0;
-	}
-	return 1;
-}
-#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	return 1;
-}
-#endif
-
-/**
- * __send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @this_cpu: the currently executing cpu
- * @softirq: the softirq for the work
- *
- * Attempt to schedule softirq work on a remote cpu.  If this cannot be
- * done, the work is instead queued up on the local cpu.
- *
- * Interrupts must be disabled.
- */
-void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
-{
-	if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
-		__local_trigger(cp, softirq);
-}
-EXPORT_SYMBOL(__send_remote_softirq);
-
-/**
- * send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @softirq: the softirq for the work
- *
- * Like __send_remote_softirq except that disabling interrupts and
- * computing the current cpu is done for the caller.
- */
-void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	unsigned long flags;
-	int this_cpu;
-
-	local_irq_save(flags);
-	this_cpu = smp_processor_id();
-	__send_remote_softirq(cp, cpu, this_cpu, softirq);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL(send_remote_softirq);
-
-static int remote_softirq_cpu_notify(struct notifier_block *self,
-					       unsigned long action, void *hcpu)
-{
-	/*
-	 * If a CPU goes away, splice its entries to the current CPU
-	 * and trigger a run of the softirq
-	 */
-	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-		int cpu = (unsigned long) hcpu;
-		int i;
-
-		local_irq_disable();
-		for (i = 0; i < NR_SOFTIRQS; i++) {
-			struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
-			struct list_head *local_head;
-
-			if (list_empty(head))
-				continue;
-
-			local_head = &__get_cpu_var(softirq_work_list[i]);
-			list_splice_init(head, local_head);
-			raise_softirq_irqoff(i);
-		}
-		local_irq_enable();
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block remote_softirq_cpu_notifier = {
-	.notifier_call	= remote_softirq_cpu_notify,
-};
-
 void __init softirq_init(void)
 {
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		int i;
-
 		per_cpu(tasklet_vec, cpu).tail =
 			&per_cpu(tasklet_vec, cpu).head;
 		per_cpu(tasklet_hi_vec, cpu).tail =
 			&per_cpu(tasklet_hi_vec, cpu).head;
-		for (i = 0; i < NR_SOFTIRQS; i++)
-			INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
 	}
 
-	register_hotcpu_notifier(&remote_softirq_cpu_notifier);
-
 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }
diff --git a/kernel/sys.c b/kernel/sys.c
index c18ecca..c723113 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -16,7 +16,6 @@
 #include <linux/perf_event.h>
 #include <linux/resource.h>
 #include <linux/kernel.h>
-#include <linux/kexec.h>
 #include <linux/workqueue.h>
 #include <linux/capability.h>
 #include <linux/device.h>
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 36547dd..34a6047 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -969,9 +969,10 @@
 	{
 		.procname	= "hung_task_check_count",
 		.data		= &sysctl_hung_task_check_count,
-		.maxlen		= sizeof(unsigned long),
+		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_doulongvec_minmax,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
 	},
 	{
 		.procname	= "hung_task_timeout_secs",
@@ -2222,8 +2223,11 @@
 			*i = val;
 		} else {
 			val = convdiv * (*i) / convmul;
-			if (!first)
+			if (!first) {
 				err = proc_put_char(&buffer, &left, '\t');
+				if (err)
+					break;
+			}
 			err = proc_put_long(&buffer, &left, val, false);
 			if (err)
 				break;
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index b609213..653cbbd 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1024,7 +1024,7 @@
 			if (get_user(value, vec + i))
 				goto out_kfree;
 
-			str += snprintf(str, end - str, "%lu\t", value);
+			str += scnprintf(str, end - str, "%lu\t", value);
 		}
 
 		result = kernel_write(file, buffer, str - buffer, 0);
@@ -1095,7 +1095,7 @@
 			if (get_user(value, vec + i))
 				goto out_kfree;
 
-			str += snprintf(str, end - str, "%lu\t", value);
+			str += scnprintf(str, end - str, "%lu\t", value);
 		}
 
 		result = kernel_write(file, buffer, str - buffer, 0);
@@ -1205,7 +1205,7 @@
 		if (get_user(dnaddr, (__le16 __user *)newval))
 			goto out;
 
-		len = snprintf(buf, sizeof(buf), "%hu.%hu",
+		len = scnprintf(buf, sizeof(buf), "%hu.%hu",
 				le16_to_cpu(dnaddr) >> 10,
 				le16_to_cpu(dnaddr) & 0x3ff);
 
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 145bb4d..9f4618e 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -290,6 +290,7 @@
 	struct listener_list *listeners;
 	struct listener *s, *tmp, *s2;
 	unsigned int cpu;
+	int ret = 0;
 
 	if (!cpumask_subset(mask, cpu_possible_mask))
 		return -EINVAL;
@@ -304,9 +305,10 @@
 		for_each_cpu(cpu, mask) {
 			s = kmalloc_node(sizeof(struct listener),
 					GFP_KERNEL, cpu_to_node(cpu));
-			if (!s)
+			if (!s) {
+				ret = -ENOMEM;
 				goto cleanup;
-
+			}
 			s->pid = pid;
 			s->valid = 1;
 
@@ -339,7 +341,7 @@
 		}
 		up_write(&listeners->sem);
 	}
-	return 0;
+	return ret;
 }
 
 static int parse(struct nlattr *na, struct cpumask *mask)
@@ -404,11 +406,15 @@
 	if (!na)
 		goto err;
 
-	if (nla_put(skb, type, sizeof(pid), &pid) < 0)
+	if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
+		nla_nest_cancel(skb, na);
 		goto err;
+	}
 	ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
-	if (!ret)
+	if (!ret) {
+		nla_nest_cancel(skb, na);
 		goto err;
+	}
 	nla_nest_end(skb, na);
 
 	return nla_data(ret);
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b8b8560..f785aef 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -26,6 +26,7 @@
 #include <linux/export.h>
 #include <linux/time.h>
 #include <linux/uaccess.h>
+#include <linux/list.h>
 
 #include <trace/events/block.h>
 
@@ -38,6 +39,9 @@
 static struct trace_array *blk_tr;
 static bool blk_tracer_enabled __read_mostly;
 
+static LIST_HEAD(running_trace_list);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
+
 /* Select an alternative, minimalistic output than the original one */
 #define TRACE_BLK_OPT_CLASSIC	0x1
 
@@ -107,10 +111,18 @@
  * Send out a notify for this process, if we haven't done so since a trace
  * started
  */
-static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
+static void trace_note_tsk(struct task_struct *tsk)
 {
+	unsigned long flags;
+	struct blk_trace *bt;
+
 	tsk->btrace_seq = blktrace_seq;
-	trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
+	spin_lock_irqsave(&running_trace_lock, flags);
+	list_for_each_entry(bt, &running_trace_list, running_list) {
+		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
+			   sizeof(tsk->comm));
+	}
+	spin_unlock_irqrestore(&running_trace_lock, flags);
 }
 
 static void trace_note_time(struct blk_trace *bt)
@@ -229,16 +241,15 @@
 		goto record_it;
 	}
 
+	if (unlikely(tsk->btrace_seq != blktrace_seq))
+		trace_note_tsk(tsk);
+
 	/*
 	 * A word about the locking here - we disable interrupts to reserve
 	 * some space in the relay per-cpu buffer, to prevent an irq
 	 * from coming in and stepping on our toes.
 	 */
 	local_irq_save(flags);
-
-	if (unlikely(tsk->btrace_seq != blktrace_seq))
-		trace_note_tsk(bt, tsk);
-
 	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
 	if (t) {
 		sequence = per_cpu_ptr(bt->sequence, cpu);
@@ -477,6 +488,7 @@
 	bt->dir = dir;
 	bt->dev = dev;
 	atomic_set(&bt->dropped, 0);
+	INIT_LIST_HEAD(&bt->running_list);
 
 	ret = -EIO;
 	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
@@ -567,13 +579,12 @@
 		.end_lba = cbuts.end_lba,
 		.pid = cbuts.pid,
 	};
-	memcpy(&buts.name, &cbuts.name, 32);
 
 	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 	if (ret)
 		return ret;
 
-	if (copy_to_user(arg, &buts.name, 32)) {
+	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
 		blk_trace_remove(q);
 		return -EFAULT;
 	}
@@ -601,6 +612,9 @@
 			blktrace_seq++;
 			smp_mb();
 			bt->trace_state = Blktrace_running;
+			spin_lock_irq(&running_trace_lock);
+			list_add(&bt->running_list, &running_trace_list);
+			spin_unlock_irq(&running_trace_lock);
 
 			trace_note_time(bt);
 			ret = 0;
@@ -608,6 +622,9 @@
 	} else {
 		if (bt->trace_state == Blktrace_running) {
 			bt->trace_state = Blktrace_stopped;
+			spin_lock_irq(&running_trace_lock);
+			list_del_init(&bt->running_list);
+			spin_unlock_irq(&running_trace_lock);
 			relay_flush(bt->rchan);
 			ret = 0;
 		}
@@ -1472,6 +1489,9 @@
 	if (atomic_dec_and_test(&blk_probes_ref))
 		blk_unregister_tracepoints();
 
+	spin_lock_irq(&running_trace_lock);
+	list_del(&bt->running_list);
+	spin_unlock_irq(&running_trace_lock);
 	blk_trace_free(bt);
 	return 0;
 }
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 03cf44a..22fa556 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3307,7 +3307,11 @@
 static LIST_HEAD(ftrace_commands);
 static DEFINE_MUTEX(ftrace_cmd_mutex);
 
-int register_ftrace_command(struct ftrace_func_command *cmd)
+/*
+ * Currently we only register ftrace commands from __init, so mark this
+ * __init too.
+ */
+__init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
 	struct ftrace_func_command *p;
 	int ret = 0;
@@ -3326,7 +3330,11 @@
 	return ret;
 }
 
-int unregister_ftrace_command(struct ftrace_func_command *cmd)
+/*
+ * Currently we only unregister ftrace commands from __init, so mark
+ * this __init too.
+ */
+__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
 {
 	struct ftrace_func_command *p, *n;
 	int ret = -ENODEV;
@@ -3641,7 +3649,7 @@
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
-static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
+static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
 
 static int __init set_graph_function(char *str)
 {
@@ -3659,7 +3667,7 @@
 		func = strsep(&buf, ",");
 		/* we allow only one expression at a time */
 		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-				      func);
+				      FTRACE_GRAPH_MAX_FUNCS, func);
 		if (ret)
 			printk(KERN_DEBUG "ftrace: function %s not "
 					  "traceable\n", func);
@@ -3776,15 +3784,25 @@
 static DEFINE_MUTEX(graph_lock);
 
 int ftrace_graph_count;
-int ftrace_graph_filter_enabled;
+int ftrace_graph_notrace_count;
 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+
+struct ftrace_graph_data {
+	unsigned long *table;
+	size_t size;
+	int *count;
+	const struct seq_operations *seq_ops;
+};
 
 static void *
 __g_next(struct seq_file *m, loff_t *pos)
 {
-	if (*pos >= ftrace_graph_count)
+	struct ftrace_graph_data *fgd = m->private;
+
+	if (*pos >= *fgd->count)
 		return NULL;
-	return &ftrace_graph_funcs[*pos];
+	return &fgd->table[*pos];
 }
 
 static void *
@@ -3796,10 +3814,12 @@
 
 static void *g_start(struct seq_file *m, loff_t *pos)
 {
+	struct ftrace_graph_data *fgd = m->private;
+
 	mutex_lock(&graph_lock);
 
 	/* Nothing, tell g_show to print all functions are enabled */
-	if (!ftrace_graph_filter_enabled && !*pos)
+	if (!*fgd->count && !*pos)
 		return (void *)1;
 
 	return __g_next(m, pos);
@@ -3835,38 +3855,88 @@
 };
 
 static int
-ftrace_graph_open(struct inode *inode, struct file *file)
+__ftrace_graph_open(struct inode *inode, struct file *file,
+		    struct ftrace_graph_data *fgd)
 {
 	int ret = 0;
 
-	if (unlikely(ftrace_disabled))
-		return -ENODEV;
-
 	mutex_lock(&graph_lock);
 	if ((file->f_mode & FMODE_WRITE) &&
 	    (file->f_flags & O_TRUNC)) {
-		ftrace_graph_filter_enabled = 0;
-		ftrace_graph_count = 0;
-		memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
+		*fgd->count = 0;
+		memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
 	}
 	mutex_unlock(&graph_lock);
 
-	if (file->f_mode & FMODE_READ)
-		ret = seq_open(file, &ftrace_graph_seq_ops);
+	if (file->f_mode & FMODE_READ) {
+		ret = seq_open(file, fgd->seq_ops);
+		if (!ret) {
+			struct seq_file *m = file->private_data;
+			m->private = fgd;
+		}
+	} else
+		file->private_data = fgd;
 
 	return ret;
 }
 
 static int
+ftrace_graph_open(struct inode *inode, struct file *file)
+{
+	struct ftrace_graph_data *fgd;
+
+	if (unlikely(ftrace_disabled))
+		return -ENODEV;
+
+	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
+	if (fgd == NULL)
+		return -ENOMEM;
+
+	fgd->table = ftrace_graph_funcs;
+	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
+	fgd->count = &ftrace_graph_count;
+	fgd->seq_ops = &ftrace_graph_seq_ops;
+
+	return __ftrace_graph_open(inode, file, fgd);
+}
+
+static int
+ftrace_graph_notrace_open(struct inode *inode, struct file *file)
+{
+	struct ftrace_graph_data *fgd;
+
+	if (unlikely(ftrace_disabled))
+		return -ENODEV;
+
+	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
+	if (fgd == NULL)
+		return -ENOMEM;
+
+	fgd->table = ftrace_graph_notrace_funcs;
+	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
+	fgd->count = &ftrace_graph_notrace_count;
+	fgd->seq_ops = &ftrace_graph_seq_ops;
+
+	return __ftrace_graph_open(inode, file, fgd);
+}
+
+static int
 ftrace_graph_release(struct inode *inode, struct file *file)
 {
-	if (file->f_mode & FMODE_READ)
+	if (file->f_mode & FMODE_READ) {
+		struct seq_file *m = file->private_data;
+
+		kfree(m->private);
 		seq_release(inode, file);
+	} else {
+		kfree(file->private_data);
+	}
+
 	return 0;
 }
 
 static int
-ftrace_set_func(unsigned long *array, int *idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
 {
 	struct dyn_ftrace *rec;
 	struct ftrace_page *pg;
@@ -3879,7 +3949,7 @@
 
 	/* decode regex */
 	type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
-	if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
+	if (!not && *idx >= size)
 		return -EBUSY;
 
 	search_len = strlen(search);
@@ -3907,7 +3977,7 @@
 				fail = 0;
 				if (!exists) {
 					array[(*idx)++] = rec->ip;
-					if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+					if (*idx >= size)
 						goto out;
 				}
 			} else {
@@ -3925,8 +3995,6 @@
 	if (fail)
 		return -EINVAL;
 
-	ftrace_graph_filter_enabled = !!(*idx);
-
 	return 0;
 }
 
@@ -3935,36 +4003,33 @@
 		   size_t cnt, loff_t *ppos)
 {
 	struct trace_parser parser;
-	ssize_t read, ret;
+	ssize_t read, ret = 0;
+	struct ftrace_graph_data *fgd = file->private_data;
 
 	if (!cnt)
 		return 0;
 
-	mutex_lock(&graph_lock);
-
-	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
-		ret = -ENOMEM;
-		goto out_unlock;
-	}
+	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
+		return -ENOMEM;
 
 	read = trace_get_user(&parser, ubuf, cnt, ppos);
 
 	if (read >= 0 && trace_parser_loaded((&parser))) {
 		parser.buffer[parser.idx] = 0;
 
+		mutex_lock(&graph_lock);
+
 		/* we allow only one expression at a time */
-		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-					parser.buffer);
-		if (ret)
-			goto out_free;
+		ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
+				      parser.buffer);
+
+		mutex_unlock(&graph_lock);
 	}
 
-	ret = read;
+	if (!ret)
+		ret = read;
 
-out_free:
 	trace_parser_put(&parser);
-out_unlock:
-	mutex_unlock(&graph_lock);
 
 	return ret;
 }
@@ -3976,6 +4041,14 @@
 	.llseek		= ftrace_filter_lseek,
 	.release	= ftrace_graph_release,
 };
+
+static const struct file_operations ftrace_graph_notrace_fops = {
+	.open		= ftrace_graph_notrace_open,
+	.read		= seq_read,
+	.write		= ftrace_graph_write,
+	.llseek		= ftrace_filter_lseek,
+	.release	= ftrace_graph_release,
+};
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
@@ -3997,6 +4070,9 @@
 	trace_create_file("set_graph_function", 0444, d_tracer,
 				    NULL,
 				    &ftrace_graph_fops);
+	trace_create_file("set_graph_notrace", 0444, d_tracer,
+				    NULL,
+				    &ftrace_graph_notrace_fops);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 	return 0;
@@ -4320,12 +4396,21 @@
 	 */
 	preempt_disable_notrace();
 	trace_recursion_set(TRACE_CONTROL_BIT);
+
+	/*
+	 * Control funcs (perf) uses RCU. Only trace if
+	 * RCU is currently active.
+	 */
+	if (!rcu_is_watching())
+		goto out;
+
 	do_for_each_ftrace_op(op, ftrace_control_list) {
 		if (!(op->flags & FTRACE_OPS_FL_STUB) &&
 		    !ftrace_function_local_disabled(op) &&
 		    ftrace_ops_test(op, ip, regs))
 			op->func(ip, parent_ip, op, regs);
 	} while_for_each_ftrace_op(op);
+ out:
 	trace_recursion_clear(TRACE_CONTROL_BIT);
 	preempt_enable_notrace();
 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d9fea7d..9d20cd9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -235,13 +235,33 @@
 	mutex_unlock(&trace_types_lock);
 }
 
-int filter_current_check_discard(struct ring_buffer *buffer,
-				 struct ftrace_event_call *call, void *rec,
-				 struct ring_buffer_event *event)
+int filter_check_discard(struct ftrace_event_file *file, void *rec,
+			 struct ring_buffer *buffer,
+			 struct ring_buffer_event *event)
 {
-	return filter_check_discard(call, rec, buffer, event);
+	if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
+	    !filter_match_preds(file->filter, rec)) {
+		ring_buffer_discard_commit(buffer, event);
+		return 1;
+	}
+
+	return 0;
 }
-EXPORT_SYMBOL_GPL(filter_current_check_discard);
+EXPORT_SYMBOL_GPL(filter_check_discard);
+
+int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+			      struct ring_buffer *buffer,
+			      struct ring_buffer_event *event)
+{
+	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
+	    !filter_match_preds(call->filter, rec)) {
+		ring_buffer_discard_commit(buffer, event);
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(call_filter_check_discard);
 
 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 {
@@ -843,9 +863,12 @@
 	if (isspace(ch)) {
 		parser->buffer[parser->idx] = 0;
 		parser->cont = false;
-	} else {
+	} else if (parser->idx < parser->size - 1) {
 		parser->cont = true;
 		parser->buffer[parser->idx++] = ch;
+	} else {
+		ret = -EINVAL;
+		goto out;
 	}
 
 	*ppos += read;
@@ -1261,21 +1284,6 @@
 }
 
 /**
- * ftrace_off_permanent - disable all ftrace code permanently
- *
- * This should only be called when a serious anomally has
- * been detected.  This will turn off the function tracing,
- * ring buffers, and other tracing utilites. It takes no
- * locks and can be called from any context.
- */
-void ftrace_off_permanent(void)
-{
-	tracing_disabled = 1;
-	ftrace_stop();
-	tracing_off_permanent();
-}
-
-/**
  * tracing_start - quick start of the tracer
  *
  * If tracing is enabled but was stopped by tracing_stop,
@@ -1631,7 +1639,7 @@
 	entry->ip			= ip;
 	entry->parent_ip		= parent_ip;
 
-	if (!filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		__buffer_unlock_commit(buffer, event);
 }
 
@@ -1715,7 +1723,7 @@
 
 	entry->size = trace.nr_entries;
 
-	if (!filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		__buffer_unlock_commit(buffer, event);
 
  out:
@@ -1817,7 +1825,7 @@
 	trace.entries		= entry->caller;
 
 	save_stack_trace_user(&trace);
-	if (!filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		__buffer_unlock_commit(buffer, event);
 
  out_drop_count:
@@ -2009,7 +2017,7 @@
 	entry->fmt			= fmt;
 
 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
-	if (!filter_check_discard(call, entry, buffer, event)) {
+	if (!call_filter_check_discard(call, entry, buffer, event)) {
 		__buffer_unlock_commit(buffer, event);
 		ftrace_trace_stack(buffer, flags, 6, pc);
 	}
@@ -2064,7 +2072,7 @@
 
 	memcpy(&entry->buf, tbuffer, len);
 	entry->buf[len] = '\0';
-	if (!filter_check_discard(call, entry, buffer, event)) {
+	if (!call_filter_check_discard(call, entry, buffer, event)) {
 		__buffer_unlock_commit(buffer, event);
 		ftrace_trace_stack(buffer, flags, 6, pc);
 	}
@@ -2761,7 +2769,7 @@
 	seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
 	seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
 	seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
-	seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
+	seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
 	seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
 	seq_printf(m, "#                       is not a '0' or '1')\n");
 }
@@ -2965,6 +2973,11 @@
 	return 0;
 }
 
+bool tracing_is_disabled(void)
+{
+	return (tracing_disabled) ? true: false;
+}
+
 /*
  * Open and update trace_array ref count.
  * Must have the current trace_array passed to it.
@@ -5455,12 +5468,12 @@
 	.func			= ftrace_trace_snapshot_callback,
 };
 
-static int register_snapshot_cmd(void)
+static __init int register_snapshot_cmd(void)
 {
 	return register_ftrace_command(&ftrace_snapshot_cmd);
 }
 #else
-static inline int register_snapshot_cmd(void) { return 0; }
+static inline __init int register_snapshot_cmd(void) { return 0; }
 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
 
 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
@@ -6254,6 +6267,17 @@
 	iter->trace = iter->tr->current_trace;
 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
 	iter->trace_buffer = &global_trace.trace_buffer;
+
+	if (iter->trace && iter->trace->open)
+		iter->trace->open(iter);
+
+	/* Annotate start of buffers if we had overruns */
+	if (ring_buffer_overruns(iter->trace_buffer->buffer))
+		iter->iter_flags |= TRACE_FILE_ANNOTATE;
+
+	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
+	if (trace_clocks[iter->tr->clock_id].in_ns)
+		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 }
 
 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 73d08aa..ea189e0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -193,8 +193,8 @@
 #ifdef CONFIG_FTRACE_SYSCALLS
 	int			sys_refcount_enter;
 	int			sys_refcount_exit;
-	DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
-	DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
+	struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
+	struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
 #endif
 	int			stop_count;
 	int			clock_id;
@@ -515,6 +515,7 @@
 void tracing_reset_current(int cpu);
 void tracing_reset_all_online_cpus(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
+bool tracing_is_disabled(void);
 struct dentry *trace_create_file(const char *name,
 				 umode_t mode,
 				 struct dentry *parent,
@@ -712,6 +713,8 @@
 #define TRACE_GRAPH_PRINT_PROC          0x8
 #define TRACE_GRAPH_PRINT_DURATION      0x10
 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
+#define TRACE_GRAPH_PRINT_FILL_SHIFT	28
+#define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 
 extern enum print_line_t
 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
@@ -731,15 +734,16 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 /* TODO: make this variable */
 #define FTRACE_GRAPH_MAX_FUNCS		32
-extern int ftrace_graph_filter_enabled;
 extern int ftrace_graph_count;
 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
+extern int ftrace_graph_notrace_count;
+extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
 
 static inline int ftrace_graph_addr(unsigned long addr)
 {
 	int i;
 
-	if (!ftrace_graph_filter_enabled)
+	if (!ftrace_graph_count)
 		return 1;
 
 	for (i = 0; i < ftrace_graph_count; i++) {
@@ -759,11 +763,31 @@
 
 	return 0;
 }
+
+static inline int ftrace_graph_notrace_addr(unsigned long addr)
+{
+	int i;
+
+	if (!ftrace_graph_notrace_count)
+		return 0;
+
+	for (i = 0; i < ftrace_graph_notrace_count; i++) {
+		if (addr == ftrace_graph_notrace_funcs[i])
+			return 1;
+	}
+
+	return 0;
+}
 #else
 static inline int ftrace_graph_addr(unsigned long addr)
 {
 	return 1;
 }
+
+static inline int ftrace_graph_notrace_addr(unsigned long addr)
+{
+	return 0;
+}
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
 static inline enum print_line_t
@@ -987,9 +1011,9 @@
 
 extern enum regex_type
 filter_parse_regex(char *buff, int len, char **search, int *not);
-extern void print_event_filter(struct ftrace_event_call *call,
+extern void print_event_filter(struct ftrace_event_file *file,
 			       struct trace_seq *s);
-extern int apply_event_filter(struct ftrace_event_call *call,
+extern int apply_event_filter(struct ftrace_event_file *file,
 			      char *filter_string);
 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
 					char *filter_string);
@@ -1000,20 +1024,6 @@
 struct ftrace_event_field *
 trace_find_event_field(struct ftrace_event_call *call, char *name);
 
-static inline int
-filter_check_discard(struct ftrace_event_call *call, void *rec,
-		     struct ring_buffer *buffer,
-		     struct ring_buffer_event *event)
-{
-	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
-	    !filter_match_preds(call->filter, rec)) {
-		ring_buffer_discard_commit(buffer, event);
-		return 1;
-	}
-
-	return 0;
-}
-
 extern void trace_event_enable_cmd_record(bool enable);
 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
 extern int event_trace_del_tracer(struct trace_array *tr);
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index d594da0..697fb9b 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -78,7 +78,7 @@
 	entry->line = f->line;
 	entry->correct = val == expect;
 
-	if (!filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		__buffer_unlock_commit(buffer, event);
 
  out:
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 368a4d5..f919a2e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -989,7 +989,7 @@
 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
 		  loff_t *ppos)
 {
-	struct ftrace_event_call *call;
+	struct ftrace_event_file *file;
 	struct trace_seq *s;
 	int r = -ENODEV;
 
@@ -1004,12 +1004,12 @@
 	trace_seq_init(s);
 
 	mutex_lock(&event_mutex);
-	call = event_file_data(filp);
-	if (call)
-		print_event_filter(call, s);
+	file = event_file_data(filp);
+	if (file)
+		print_event_filter(file, s);
 	mutex_unlock(&event_mutex);
 
-	if (call)
+	if (file)
 		r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
 
 	kfree(s);
@@ -1021,7 +1021,7 @@
 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
 		   loff_t *ppos)
 {
-	struct ftrace_event_call *call;
+	struct ftrace_event_file *file;
 	char *buf;
 	int err = -ENODEV;
 
@@ -1039,9 +1039,9 @@
 	buf[cnt] = '\0';
 
 	mutex_lock(&event_mutex);
-	call = event_file_data(filp);
-	if (call)
-		err = apply_event_filter(call, buf);
+	file = event_file_data(filp);
+	if (file)
+		err = apply_event_filter(file, buf);
 	mutex_unlock(&event_mutex);
 
 	free_page((unsigned long) buf);
@@ -1062,6 +1062,9 @@
 	struct trace_array *tr;
 	int ret;
 
+	if (tracing_is_disabled())
+		return -ENODEV;
+
 	/* Make sure the system still exists */
 	mutex_lock(&trace_types_lock);
 	mutex_lock(&event_mutex);
@@ -1108,6 +1111,9 @@
 	struct trace_array *tr = inode->i_private;
 	int ret;
 
+	if (tracing_is_disabled())
+		return -ENODEV;
+
 	if (trace_array_get(tr) < 0)
 		return -ENODEV;
 
@@ -1124,11 +1130,12 @@
 	if (ret < 0) {
 		trace_array_put(tr);
 		kfree(dir);
+		return ret;
 	}
 
 	filp->private_data = dir;
 
-	return ret;
+	return 0;
 }
 
 static int subsystem_release(struct inode *inode, struct file *file)
@@ -1539,7 +1546,7 @@
 			return -1;
 		}
 	}
-	trace_create_file("filter", 0644, file->dir, call,
+	trace_create_file("filter", 0644, file->dir, file,
 			  &ftrace_event_filter_fops);
 
 	trace_create_file("format", 0444, file->dir, call,
@@ -1577,6 +1584,7 @@
 		if (file->event_call != call)
 			continue;
 		ftrace_event_enable_disable(file, 0);
+		destroy_preds(file);
 		/*
 		 * The do_for_each_event_file() is
 		 * a double loop. After finding the call for this
@@ -1700,7 +1708,7 @@
 {
 	event_remove(call);
 	trace_destroy_fields(call);
-	destroy_preds(call);
+	destroy_call_preds(call);
 }
 
 static int probe_remove_event_call(struct ftrace_event_call *call)
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 97daa8c..2468f56 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -637,10 +637,18 @@
 	free_page((unsigned long) buf);
 }
 
-/* caller must hold event_mutex */
-void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
+static inline struct event_filter *event_filter(struct ftrace_event_file *file)
 {
-	struct event_filter *filter = call->filter;
+	if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		return file->event_call->filter;
+	else
+		return file->filter;
+}
+
+/* caller must hold event_mutex */
+void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s)
+{
+	struct event_filter *filter = event_filter(file);
 
 	if (filter && filter->filter_string)
 		trace_seq_printf(s, "%s\n", filter->filter_string);
@@ -766,11 +774,21 @@
 	filter->n_preds = 0;
 }
 
-static void filter_disable(struct ftrace_event_call *call)
+static void call_filter_disable(struct ftrace_event_call *call)
 {
 	call->flags &= ~TRACE_EVENT_FL_FILTERED;
 }
 
+static void filter_disable(struct ftrace_event_file *file)
+{
+	struct ftrace_event_call *call = file->event_call;
+
+	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		call_filter_disable(call);
+	else
+		file->flags &= ~FTRACE_EVENT_FL_FILTERED;
+}
+
 static void __free_filter(struct event_filter *filter)
 {
 	if (!filter)
@@ -781,18 +799,32 @@
 	kfree(filter);
 }
 
-/*
- * Called when destroying the ftrace_event_call.
- * The call is being freed, so we do not need to worry about
- * the call being currently used. This is for module code removing
- * the tracepoints from within it.
- */
-void destroy_preds(struct ftrace_event_call *call)
+void destroy_call_preds(struct ftrace_event_call *call)
 {
 	__free_filter(call->filter);
 	call->filter = NULL;
 }
 
+static void destroy_file_preds(struct ftrace_event_file *file)
+{
+	__free_filter(file->filter);
+	file->filter = NULL;
+}
+
+/*
+ * Called when destroying the ftrace_event_file.
+ * The file is being freed, so we do not need to worry about
+ * the file being currently used. This is for module code removing
+ * the tracepoints from within it.
+ */
+void destroy_preds(struct ftrace_event_file *file)
+{
+	if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		destroy_call_preds(file->event_call);
+	else
+		destroy_file_preds(file);
+}
+
 static struct event_filter *__alloc_filter(void)
 {
 	struct event_filter *filter;
@@ -825,28 +857,56 @@
 	return 0;
 }
 
-static void filter_free_subsystem_preds(struct event_subsystem *system)
+static inline void __remove_filter(struct ftrace_event_file *file)
 {
+	struct ftrace_event_call *call = file->event_call;
+
+	filter_disable(file);
+	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		remove_filter_string(call->filter);
+	else
+		remove_filter_string(file->filter);
+}
+
+static void filter_free_subsystem_preds(struct event_subsystem *system,
+					struct trace_array *tr)
+{
+	struct ftrace_event_file *file;
 	struct ftrace_event_call *call;
 
-	list_for_each_entry(call, &ftrace_events, list) {
+	list_for_each_entry(file, &tr->events, list) {
+		call = file->event_call;
 		if (strcmp(call->class->system, system->name) != 0)
 			continue;
 
-		filter_disable(call);
-		remove_filter_string(call->filter);
+		__remove_filter(file);
 	}
 }
 
-static void filter_free_subsystem_filters(struct event_subsystem *system)
+static inline void __free_subsystem_filter(struct ftrace_event_file *file)
 {
-	struct ftrace_event_call *call;
+	struct ftrace_event_call *call = file->event_call;
 
-	list_for_each_entry(call, &ftrace_events, list) {
-		if (strcmp(call->class->system, system->name) != 0)
-			continue;
+	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
 		__free_filter(call->filter);
 		call->filter = NULL;
+	} else {
+		__free_filter(file->filter);
+		file->filter = NULL;
+	}
+}
+
+static void filter_free_subsystem_filters(struct event_subsystem *system,
+					  struct trace_array *tr)
+{
+	struct ftrace_event_file *file;
+	struct ftrace_event_call *call;
+
+	list_for_each_entry(file, &tr->events, list) {
+		call = file->event_call;
+		if (strcmp(call->class->system, system->name) != 0)
+			continue;
+		__free_subsystem_filter(file);
 	}
 }
 
@@ -1617,15 +1677,85 @@
 	return err;
 }
 
+static inline void event_set_filtered_flag(struct ftrace_event_file *file)
+{
+	struct ftrace_event_call *call = file->event_call;
+
+	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		call->flags |= TRACE_EVENT_FL_FILTERED;
+	else
+		file->flags |= FTRACE_EVENT_FL_FILTERED;
+}
+
+static inline void event_set_filter(struct ftrace_event_file *file,
+				    struct event_filter *filter)
+{
+	struct ftrace_event_call *call = file->event_call;
+
+	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		rcu_assign_pointer(call->filter, filter);
+	else
+		rcu_assign_pointer(file->filter, filter);
+}
+
+static inline void event_clear_filter(struct ftrace_event_file *file)
+{
+	struct ftrace_event_call *call = file->event_call;
+
+	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		RCU_INIT_POINTER(call->filter, NULL);
+	else
+		RCU_INIT_POINTER(file->filter, NULL);
+}
+
+static inline void
+event_set_no_set_filter_flag(struct ftrace_event_file *file)
+{
+	struct ftrace_event_call *call = file->event_call;
+
+	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+	else
+		file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER;
+}
+
+static inline void
+event_clear_no_set_filter_flag(struct ftrace_event_file *file)
+{
+	struct ftrace_event_call *call = file->event_call;
+
+	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
+		call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
+	else
+		file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER;
+}
+
+static inline bool
+event_no_set_filter_flag(struct ftrace_event_file *file)
+{
+	struct ftrace_event_call *call = file->event_call;
+
+	if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER)
+		return true;
+
+	if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
+	    (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
+		return true;
+
+	return false;
+}
+
 struct filter_list {
 	struct list_head	list;
 	struct event_filter	*filter;
 };
 
 static int replace_system_preds(struct event_subsystem *system,
+				struct trace_array *tr,
 				struct filter_parse_state *ps,
 				char *filter_string)
 {
+	struct ftrace_event_file *file;
 	struct ftrace_event_call *call;
 	struct filter_list *filter_item;
 	struct filter_list *tmp;
@@ -1633,8 +1763,8 @@
 	bool fail = true;
 	int err;
 
-	list_for_each_entry(call, &ftrace_events, list) {
-
+	list_for_each_entry(file, &tr->events, list) {
+		call = file->event_call;
 		if (strcmp(call->class->system, system->name) != 0)
 			continue;
 
@@ -1644,18 +1774,20 @@
 		 */
 		err = replace_preds(call, NULL, ps, filter_string, true);
 		if (err)
-			call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+			event_set_no_set_filter_flag(file);
 		else
-			call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
+			event_clear_no_set_filter_flag(file);
 	}
 
-	list_for_each_entry(call, &ftrace_events, list) {
+	list_for_each_entry(file, &tr->events, list) {
 		struct event_filter *filter;
 
+		call = file->event_call;
+
 		if (strcmp(call->class->system, system->name) != 0)
 			continue;
 
-		if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+		if (event_no_set_filter_flag(file))
 			continue;
 
 		filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
@@ -1676,17 +1808,17 @@
 
 		err = replace_preds(call, filter, ps, filter_string, false);
 		if (err) {
-			filter_disable(call);
+			filter_disable(file);
 			parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
 			append_filter_err(ps, filter);
 		} else
-			call->flags |= TRACE_EVENT_FL_FILTERED;
+			event_set_filtered_flag(file);
 		/*
 		 * Regardless of if this returned an error, we still
 		 * replace the filter for the call.
 		 */
-		filter = call->filter;
-		rcu_assign_pointer(call->filter, filter_item->filter);
+		filter = event_filter(file);
+		event_set_filter(file, filter_item->filter);
 		filter_item->filter = filter;
 
 		fail = false;
@@ -1816,6 +1948,7 @@
  * and always remembers @filter_str.
  */
 static int create_system_filter(struct event_subsystem *system,
+				struct trace_array *tr,
 				char *filter_str, struct event_filter **filterp)
 {
 	struct event_filter *filter = NULL;
@@ -1824,7 +1957,7 @@
 
 	err = create_filter_start(filter_str, true, &ps, &filter);
 	if (!err) {
-		err = replace_system_preds(system, ps, filter_str);
+		err = replace_system_preds(system, tr, ps, filter_str);
 		if (!err) {
 			/* System filters just show a default message */
 			kfree(filter->filter_string);
@@ -1840,20 +1973,25 @@
 }
 
 /* caller must hold event_mutex */
-int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
+int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
 {
+	struct ftrace_event_call *call = file->event_call;
 	struct event_filter *filter;
 	int err;
 
 	if (!strcmp(strstrip(filter_string), "0")) {
-		filter_disable(call);
-		filter = call->filter;
+		filter_disable(file);
+		filter = event_filter(file);
+
 		if (!filter)
 			return 0;
-		RCU_INIT_POINTER(call->filter, NULL);
+
+		event_clear_filter(file);
+
 		/* Make sure the filter is not being used */
 		synchronize_sched();
 		__free_filter(filter);
+
 		return 0;
 	}
 
@@ -1866,14 +2004,15 @@
 	 * string
 	 */
 	if (filter) {
-		struct event_filter *tmp = call->filter;
+		struct event_filter *tmp;
 
+		tmp = event_filter(file);
 		if (!err)
-			call->flags |= TRACE_EVENT_FL_FILTERED;
+			event_set_filtered_flag(file);
 		else
-			filter_disable(call);
+			filter_disable(file);
 
-		rcu_assign_pointer(call->filter, filter);
+		event_set_filter(file, filter);
 
 		if (tmp) {
 			/* Make sure the call is done with the filter */
@@ -1889,6 +2028,7 @@
 				 char *filter_string)
 {
 	struct event_subsystem *system = dir->subsystem;
+	struct trace_array *tr = dir->tr;
 	struct event_filter *filter;
 	int err = 0;
 
@@ -1901,18 +2041,18 @@
 	}
 
 	if (!strcmp(strstrip(filter_string), "0")) {
-		filter_free_subsystem_preds(system);
+		filter_free_subsystem_preds(system, tr);
 		remove_filter_string(system->filter);
 		filter = system->filter;
 		system->filter = NULL;
 		/* Ensure all filters are no longer used */
 		synchronize_sched();
-		filter_free_subsystem_filters(system);
+		filter_free_subsystem_filters(system, tr);
 		__free_filter(filter);
 		goto out_unlock;
 	}
 
-	err = create_system_filter(system, filter_string, &filter);
+	err = create_system_filter(system, tr, filter_string, &filter);
 	if (filter) {
 		/*
 		 * No event actually uses the system filter
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index d21a746..7c3e3e7 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -180,7 +180,7 @@
 	.event.type		= etype,				\
 	.class			= &event_class_ftrace_##call,		\
 	.print_fmt		= print,				\
-	.flags			= TRACE_EVENT_FL_IGNORE_ENABLE,		\
+	.flags			= TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \
 };									\
 struct ftrace_event_call __used						\
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b5c0924..0b99120 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -82,9 +82,9 @@
  * to fill in space into DURATION column.
  */
 enum {
-	DURATION_FILL_FULL  = -1,
-	DURATION_FILL_START = -2,
-	DURATION_FILL_END   = -3,
+	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
+	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
+	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 };
 
 static enum print_line_t
@@ -114,16 +114,37 @@
 		return -EBUSY;
 	}
 
+	/*
+	 * The curr_ret_stack is an index to ftrace return stack of
+	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
+	 * DEPTH) when the function graph tracer is used.  To support
+	 * filtering out specific functions, it makes the index
+	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
+	 * so when it sees a negative index the ftrace will ignore
+	 * the record.  And the index gets recovered when returning
+	 * from the filtered function by adding the FTRACE_NOTRACE_
+	 * DEPTH and then it'll continue to record functions normally.
+	 *
+	 * The curr_ret_stack is initialized to -1 and get increased
+	 * in this function.  So it can be less than -1 only if it was
+	 * filtered out via ftrace_graph_notrace_addr() which can be
+	 * set from set_graph_notrace file in debugfs by user.
+	 */
+	if (current->curr_ret_stack < -1)
+		return -EBUSY;
+
 	calltime = trace_clock_local();
 
 	index = ++current->curr_ret_stack;
+	if (ftrace_graph_notrace_addr(func))
+		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
 	barrier();
 	current->ret_stack[index].ret = ret;
 	current->ret_stack[index].func = func;
 	current->ret_stack[index].calltime = calltime;
 	current->ret_stack[index].subtime = 0;
 	current->ret_stack[index].fp = frame_pointer;
-	*depth = index;
+	*depth = current->curr_ret_stack;
 
 	return 0;
 }
@@ -137,7 +158,17 @@
 
 	index = current->curr_ret_stack;
 
-	if (unlikely(index < 0)) {
+	/*
+	 * A negative index here means that it's just returned from a
+	 * notrace'd function.  Recover index to get an original
+	 * return address.  See ftrace_push_return_trace().
+	 *
+	 * TODO: Need to check whether the stack gets corrupted.
+	 */
+	if (index < 0)
+		index += FTRACE_NOTRACE_DEPTH;
+
+	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
 		ftrace_graph_stop();
 		WARN_ON(1);
 		/* Might as well panic, otherwise we have no where to go */
@@ -193,6 +224,15 @@
 	trace.rettime = trace_clock_local();
 	barrier();
 	current->curr_ret_stack--;
+	/*
+	 * The curr_ret_stack can be less than -1 only if it was
+	 * filtered out and it's about to return from the function.
+	 * Recover the index and continue to trace normal functions.
+	 */
+	if (current->curr_ret_stack < -1) {
+		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
+		return ret;
+	}
 
 	/*
 	 * The trace should run after decrementing the ret counter
@@ -230,7 +270,7 @@
 		return 0;
 	entry	= ring_buffer_event_data(event);
 	entry->graph_ent			= *trace;
-	if (!filter_current_check_discard(buffer, call, entry, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		__buffer_unlock_commit(buffer, event);
 
 	return 1;
@@ -259,10 +299,20 @@
 
 	/* trace it when it is-nested-in or is a function enabled. */
 	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
-	     ftrace_graph_ignore_irqs()) ||
+	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
 	    (max_depth && trace->depth >= max_depth))
 		return 0;
 
+	/*
+	 * Do not trace a function if it's filtered by set_graph_notrace.
+	 * Make the index of ret stack negative to indicate that it should
+	 * ignore further functions.  But it needs its own ret stack entry
+	 * to recover the original index in order to continue tracing after
+	 * returning from the function.
+	 */
+	if (ftrace_graph_notrace_addr(trace->func))
+		return 1;
+
 	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -335,7 +385,7 @@
 		return;
 	entry	= ring_buffer_event_data(event);
 	entry->ret				= *trace;
-	if (!filter_current_check_discard(buffer, call, entry, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		__buffer_unlock_commit(buffer, event);
 }
 
@@ -652,7 +702,7 @@
 	}
 
 	/* No overhead */
-	ret = print_graph_duration(DURATION_FILL_START, s, flags);
+	ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
 	if (ret != TRACE_TYPE_HANDLED)
 		return ret;
 
@@ -664,7 +714,7 @@
 	if (!ret)
 		return TRACE_TYPE_PARTIAL_LINE;
 
-	ret = print_graph_duration(DURATION_FILL_END, s, flags);
+	ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
 	if (ret != TRACE_TYPE_HANDLED)
 		return ret;
 
@@ -729,14 +779,14 @@
 			return TRACE_TYPE_HANDLED;
 
 	/* No real adata, just filling the column with spaces */
-	switch (duration) {
-	case DURATION_FILL_FULL:
+	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
+	case FLAGS_FILL_FULL:
 		ret = trace_seq_puts(s, "              |  ");
 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
-	case DURATION_FILL_START:
+	case FLAGS_FILL_START:
 		ret = trace_seq_puts(s, "  ");
 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
-	case DURATION_FILL_END:
+	case FLAGS_FILL_END:
 		ret = trace_seq_puts(s, " |");
 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
 	}
@@ -852,7 +902,7 @@
 	}
 
 	/* No time */
-	ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
+	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
 	if (ret != TRACE_TYPE_HANDLED)
 		return ret;
 
@@ -1172,7 +1222,7 @@
 		return TRACE_TYPE_PARTIAL_LINE;
 
 	/* No time */
-	ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
+	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
 	if (ret != TRACE_TYPE_HANDLED)
 		return ret;
 
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 243f683..dae9541 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -835,7 +835,7 @@
 	entry->ip = (unsigned long)tp->rp.kp.addr;
 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
-	if (!filter_current_check_discard(buffer, call, entry, event))
+	if (!filter_check_discard(ftrace_file, entry, buffer, event))
 		trace_buffer_unlock_commit_regs(buffer, event,
 						irq_flags, pc, regs);
 }
@@ -884,7 +884,7 @@
 	entry->ret_ip = (unsigned long)ri->ret_addr;
 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
-	if (!filter_current_check_discard(buffer, call, entry, event))
+	if (!filter_check_discard(ftrace_file, entry, buffer, event))
 		trace_buffer_unlock_commit_regs(buffer, event,
 						irq_flags, pc, regs);
 }
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index b3dcfb2..0abd9b8 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -323,7 +323,7 @@
 	entry	= ring_buffer_event_data(event);
 	entry->rw			= *rw;
 
-	if (!filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		trace_buffer_unlock_commit(buffer, event, 0, pc);
 }
 
@@ -353,7 +353,7 @@
 	entry	= ring_buffer_event_data(event);
 	entry->map			= *map;
 
-	if (!filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		trace_buffer_unlock_commit(buffer, event, 0, pc);
 }
 
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 4e98e3b..3f34dc9 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -45,7 +45,7 @@
 	entry->next_state		= next->state;
 	entry->next_cpu	= task_cpu(next);
 
-	if (!filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		trace_buffer_unlock_commit(buffer, event, flags, pc);
 }
 
@@ -101,7 +101,7 @@
 	entry->next_state		= wakee->state;
 	entry->next_cpu			= task_cpu(wakee);
 
-	if (!filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		trace_buffer_unlock_commit(buffer, event, flags, pc);
 }
 
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 847f88a..7af6736 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -43,46 +43,15 @@
 /* The root directory for all stat files */
 static struct dentry		*stat_dir;
 
-/*
- * Iterate through the rbtree using a post order traversal path
- * to release the next node.
- * It won't necessary release one at each iteration
- * but it will at least advance closer to the next one
- * to be released.
- */
-static struct rb_node *release_next(struct tracer_stat *ts,
-				    struct rb_node *node)
-{
-	struct stat_node *snode;
-	struct rb_node *parent = rb_parent(node);
-
-	if (node->rb_left)
-		return node->rb_left;
-	else if (node->rb_right)
-		return node->rb_right;
-	else {
-		if (!parent)
-			;
-		else if (parent->rb_left == node)
-			parent->rb_left = NULL;
-		else
-			parent->rb_right = NULL;
-
-		snode = container_of(node, struct stat_node, node);
-		if (ts->stat_release)
-			ts->stat_release(snode->stat);
-		kfree(snode);
-
-		return parent;
-	}
-}
-
 static void __reset_stat_session(struct stat_session *session)
 {
-	struct rb_node *node = session->stat_root.rb_node;
+	struct stat_node *snode, *n;
 
-	while (node)
-		node = release_next(session->ts, node);
+	rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) {
+		if (session->ts->stat_release)
+			session->ts->stat_release(snode->stat);
+		kfree(snode);
+	}
 
 	session->stat_root = RB_ROOT;
 }
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 559329d..e4b6d11 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -302,6 +302,7 @@
 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 {
 	struct trace_array *tr = data;
+	struct ftrace_event_file *ftrace_file;
 	struct syscall_trace_enter *entry;
 	struct syscall_metadata *sys_data;
 	struct ring_buffer_event *event;
@@ -314,7 +315,13 @@
 	syscall_nr = trace_get_syscall_nr(current, regs);
 	if (syscall_nr < 0)
 		return;
-	if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
+
+	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
+	ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
+	if (!ftrace_file)
+		return;
+
+	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
 		return;
 
 	sys_data = syscall_nr_to_meta(syscall_nr);
@@ -336,8 +343,7 @@
 	entry->nr = syscall_nr;
 	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
 
-	if (!filter_current_check_discard(buffer, sys_data->enter_event,
-					  entry, event))
+	if (!filter_check_discard(ftrace_file, entry, buffer, event))
 		trace_current_buffer_unlock_commit(buffer, event,
 						   irq_flags, pc);
 }
@@ -345,6 +351,7 @@
 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
 {
 	struct trace_array *tr = data;
+	struct ftrace_event_file *ftrace_file;
 	struct syscall_trace_exit *entry;
 	struct syscall_metadata *sys_data;
 	struct ring_buffer_event *event;
@@ -356,7 +363,13 @@
 	syscall_nr = trace_get_syscall_nr(current, regs);
 	if (syscall_nr < 0)
 		return;
-	if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
+
+	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
+	ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
+	if (!ftrace_file)
+		return;
+
+	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
 		return;
 
 	sys_data = syscall_nr_to_meta(syscall_nr);
@@ -377,8 +390,7 @@
 	entry->nr = syscall_nr;
 	entry->ret = syscall_get_return_value(current, regs);
 
-	if (!filter_current_check_discard(buffer, sys_data->exit_event,
-					  entry, event))
+	if (!filter_check_discard(ftrace_file, entry, buffer, event))
 		trace_current_buffer_unlock_commit(buffer, event,
 						   irq_flags, pc);
 }
@@ -397,7 +409,7 @@
 	if (!tr->sys_refcount_enter)
 		ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
 	if (!ret) {
-		set_bit(num, tr->enabled_enter_syscalls);
+		rcu_assign_pointer(tr->enter_syscall_files[num], file);
 		tr->sys_refcount_enter++;
 	}
 	mutex_unlock(&syscall_trace_lock);
@@ -415,10 +427,15 @@
 		return;
 	mutex_lock(&syscall_trace_lock);
 	tr->sys_refcount_enter--;
-	clear_bit(num, tr->enabled_enter_syscalls);
+	rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
 	if (!tr->sys_refcount_enter)
 		unregister_trace_sys_enter(ftrace_syscall_enter, tr);
 	mutex_unlock(&syscall_trace_lock);
+	/*
+	 * Callers expect the event to be completely disabled on
+	 * return, so wait for current handlers to finish.
+	 */
+	synchronize_sched();
 }
 
 static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -435,7 +452,7 @@
 	if (!tr->sys_refcount_exit)
 		ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
 	if (!ret) {
-		set_bit(num, tr->enabled_exit_syscalls);
+		rcu_assign_pointer(tr->exit_syscall_files[num], file);
 		tr->sys_refcount_exit++;
 	}
 	mutex_unlock(&syscall_trace_lock);
@@ -453,10 +470,15 @@
 		return;
 	mutex_lock(&syscall_trace_lock);
 	tr->sys_refcount_exit--;
-	clear_bit(num, tr->enabled_exit_syscalls);
+	rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
 	if (!tr->sys_refcount_exit)
 		unregister_trace_sys_exit(ftrace_syscall_exit, tr);
 	mutex_unlock(&syscall_trace_lock);
+	/*
+	 * Callers expect the event to be completely disabled on
+	 * return, so wait for current handlers to finish.
+	 */
+	synchronize_sched();
 }
 
 static int __init init_syscall_trace(struct ftrace_event_call *call)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 272261b..b6dcc42 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -128,6 +128,7 @@
 	if (is_ret)
 		tu->consumer.ret_handler = uretprobe_dispatcher;
 	init_trace_uprobe_filter(&tu->filter);
+	tu->call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER;
 	return tu;
 
 error:
@@ -561,7 +562,7 @@
 	for (i = 0; i < tu->nr_args; i++)
 		call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
 
-	if (!filter_current_check_discard(buffer, call, entry, event))
+	if (!call_filter_check_discard(call, entry, buffer, event))
 		trace_buffer_unlock_commit(buffer, event, 0, 0);
 }
 
diff --git a/kernel/up.c b/kernel/up.c
index 630d72b..509403e 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -22,6 +22,17 @@
 }
 EXPORT_SYMBOL(smp_call_function_single);
 
+void __smp_call_function_single(int cpu, struct call_single_data *csd,
+				int wait)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	csd->func(csd->info);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__smp_call_function_single);
+
 int on_each_cpu(smp_call_func_t func, void *info, int wait)
 {
 	unsigned long flags;
diff --git a/lib/Kconfig b/lib/Kconfig
index b3c8be0..06dc742 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,13 +51,6 @@
 config ARCH_USE_CMPXCHG_LOCKREF
 	bool
 
-config CMPXCHG_LOCKREF
-	def_bool y if ARCH_USE_CMPXCHG_LOCKREF
-	depends on SMP
-	depends on !GENERIC_LOCKBREAK
-	depends on !DEBUG_SPINLOCK
-	depends on !DEBUG_LOCK_ALLOC
-
 config CRC_CCITT
 	tristate "CRC-CCITT functions"
 	help
@@ -189,6 +182,13 @@
 	depends on AUDIT && !AUDIT_ARCH
 	default y
 
+config RANDOM32_SELFTEST
+	bool "PRNG perform self test on init"
+	default n
+	help
+	  This option enables the 32 bit PRNG library functions to perform a
+	  self test on initialization.
+
 #
 # compression support is select'ed if needed
 #
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ebef88f..db25707 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1481,6 +1481,15 @@
 	help
 	  A benchmark measuring the performance of the interval tree library
 
+config PERCPU_TEST
+	tristate "Per cpu operations test"
+	depends on m && DEBUG_KERNEL
+	help
+	  Enable this option to build test module which validates per-cpu
+	  operations.
+
+	  If unsure, say N.
+
 config ATOMIC64_SELFTEST
 	bool "Perform an atomic64_t self-test at boot"
 	help
diff --git a/lib/Makefile b/lib/Makefile
index f3bb2cb..d480a8c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -42,10 +42,6 @@
 obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
 obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
-obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
-lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
-lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
 
 CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
@@ -157,6 +153,8 @@
 
 interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
 
+obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
+
 obj-$(CONFIG_ASN1) += asn1_decoder.o
 
 obj-$(CONFIG_FONT_SUPPORT) += fonts/
diff --git a/lib/crc32.c b/lib/crc32.c
index 410093d..70f00ca 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/sched.h>
 #include "crc32defs.h"
 
 #if CRC_LE_BITS > 8
@@ -49,6 +50,30 @@
 MODULE_DESCRIPTION("Various CRC32 calculations");
 MODULE_LICENSE("GPL");
 
+#define GF2_DIM		32
+
+static u32 gf2_matrix_times(u32 *mat, u32 vec)
+{
+	u32 sum = 0;
+
+	while (vec) {
+		if (vec & 1)
+			sum ^= *mat;
+		vec >>= 1;
+		mat++;
+	}
+
+	return sum;
+}
+
+static void gf2_matrix_square(u32 *square, u32 *mat)
+{
+	int i;
+
+	for (i = 0; i < GF2_DIM; i++)
+		square[i] = gf2_matrix_times(mat, mat[i]);
+}
+
 #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
 
 /* implements slicing-by-4 or slicing-by-8 algorithm */
@@ -130,6 +155,52 @@
 }
 #endif
 
+/* For conditions of distribution and use, see copyright notice in zlib.h */
+static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
+				 u32 polynomial)
+{
+	u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
+	u32 odd[GF2_DIM];  /* Odd-power-of-two zeros operator  */
+	u32 row;
+	int i;
+
+	if (len2 <= 0)
+		return crc1;
+
+	/* Put operator for one zero bit in odd */
+	odd[0] = polynomial;
+	row = 1;
+	for (i = 1; i < GF2_DIM; i++) {
+		odd[i] = row;
+		row <<= 1;
+	}
+
+	gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
+	gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
+
+	/* Apply len2 zeros to crc1 (first square will put the operator for one
+	 * zero byte, eight zero bits, in even).
+	 */
+	do {
+		/* Apply zeros operator for this bit of len2 */
+		gf2_matrix_square(even, odd);
+		if (len2 & 1)
+			crc1 = gf2_matrix_times(even, crc1);
+		len2 >>= 1;
+		/* If no more bits set, then done */
+		if (len2 == 0)
+			break;
+		/* Another iteration of the loop with odd and even swapped */
+		gf2_matrix_square(odd, even);
+		if (len2 & 1)
+			crc1 = gf2_matrix_times(odd, crc1);
+		len2 >>= 1;
+	} while (len2 != 0);
+
+	crc1 ^= crc2;
+	return crc1;
+}
+
 /**
  * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
  *			CRC32/CRC32C
@@ -200,8 +271,19 @@
 			(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
 }
 #endif
+u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+	return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE);
+}
+
+u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+	return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE);
+}
 EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(crc32_le_combine);
 EXPORT_SYMBOL(__crc32c_le);
+EXPORT_SYMBOL(__crc32c_le_combine);
 
 /**
  * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -795,206 +877,106 @@
 	u32 crc32c_le;	/* expected crc32c_le result */
 } test[] =
 {
-	{0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1,
-	 0xf6e93d6c},
-	{0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad,
-	 0x0fe92aca},
-	{0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f,
-	 0x52e1ebb8},
-	{0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a,
-	 0x0798af9a},
-	{0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2,
-	 0x18eb3152},
-	{0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793,
-	 0xd00d08c7},
-	{0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed,
-	 0x8ba966bc},
-	{0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35,
-	 0x11d694a2},
-	{0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2,
-	 0x6ab3208d},
-	{0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10,
-	 0xba4603c5},
-	{0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb,
-	 0xe6071c6f},
-	{0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0,
-	 0x179ec30a},
-	{0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb,
-	 0x0903beb8},
-	{0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed,
-	 0x6a7cb4fa},
-	{0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591,
-	 0xdb535801},
-	{0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67,
-	 0x92bed597},
-	{0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd,
-	 0x192a3f1b},
-	{0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a,
-	 0xccbaec1a},
-	{0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b,
-	 0x7eabae4d},
-	{0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f,
-	 0x28c72982},
-	{0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d,
-	 0xc3cd4d18},
-	{0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a,
-	 0xbca8f0e7},
-	{0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97,
-	 0x713f60b3},
-	{0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2,
-	 0xebd08fd5},
-	{0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138,
-	 0x64406c59},
-	{0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032,
-	 0x7421890e},
-	{0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f,
-	 0xe9347603},
-	{0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f,
-	 0x1bef9060},
-	{0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32,
-	 0x34720072},
-	{0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef,
-	 0x48310f59},
-	{0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0,
-	 0x783a4213},
-	{0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59,
-	 0x9e8efd41},
-	{0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4,
-	 0xfc3d34a5},
-	{0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c,
-	 0x17a52ae2},
-	{0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51,
-	 0x886d935a},
-	{0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11,
-	 0xeaaeaeb2},
-	{0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659,
-	 0x8e900a4b},
-	{0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af,
-	 0xd74662b1},
-	{0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99,
-	 0xd26752ba},
-	{0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b,
-	 0x8b1fcd62},
-	{0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521,
-	 0xf54342fe},
-	{0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3,
-	 0x5b95b988},
-	{0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d,
-	 0x2e1176be},
-	{0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f,
-	 0x66120546},
-	{0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b,
-	 0xf256a5cc},
-	{0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0,
-	 0x4af1dd69},
-	{0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195,
-	 0x56f0a04a},
-	{0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d,
-	 0x74f6b6b2},
-	{0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4,
-	 0x085951fd},
-	{0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3,
-	 0xc65387eb},
-	{0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643,
-	 0x1ca9257b},
-	{0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10,
-	 0xfd196d76},
-	{0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d,
-	 0x5ef88339},
-	{0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5,
-	 0x2c3714d9},
-	{0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b,
-	 0x58576548},
-	{0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee,
-	 0xfd7c57de},
-	{0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14,
-	 0xd5fedd59},
-	{0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a,
-	 0x1cc3b17b},
-	{0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b,
-	 0x270eed73},
-	{0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3,
-	 0x91ecbb11},
-	{0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826,
-	 0x05ed8d0c},
-	{0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06,
-	 0x0b09ad5b},
-	{0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35,
-	 0xf8d511fb},
-	{0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801,
-	 0x5ad832cc},
-	{0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2,
-	 0x1214d196},
-	{0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d,
-	 0x5747218a},
-	{0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c,
-	 0xde8f14de},
-	{0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba,
-	 0x3563b7b9},
-	{0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5,
-	 0x071475d0},
-	{0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b,
-	 0x54c79d60},
-	{0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178,
-	 0x4c53eee6},
-	{0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3,
-	 0x10137a3c},
-	{0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605,
-	 0xaa9d6c73},
-	{0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1,
-	 0xb63d23e7},
-	{0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9,
-	 0x7f53e9cf},
-	{0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78,
-	 0x13c1cd83},
-	{0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9,
-	 0x49ff5867},
-	{0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd,
-	 0x8467f211},
-	{0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab,
-	 0x3f9683b2},
-	{0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb,
-	 0x76a3f874},
-	{0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77,
-	 0x863b702f},
-	{0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da,
-	 0xdc6c58ff},
-	{0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39,
-	 0x0622cc95},
-	{0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16,
-	 0xe85605cd},
-	{0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208,
-	 0x31da5f06},
-	{0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e,
-	 0xa1f2e784},
-	{0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5,
-	 0xb07cc616},
-	{0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892,
-	 0xbf943b6c},
-	{0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db,
-	 0x2c01af1c},
-	{0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43,
-	 0x0fe5f56d},
-	{0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac,
-	 0xf8943b2d},
-	{0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7,
-	 0xe4d89272},
-	{0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2,
-	 0x7c2f6bbb},
-	{0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2,
-	 0xabbf388b},
-	{0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640,
-	 0x1dca1f4e},
-	{0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f,
-	 0x5c170e23},
-	{0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99,
-	 0xc0e9d672},
-	{0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7,
-	 0xc18bdc86},
-	{0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499,
-	 0xa874fcdd},
-	{0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a,
-	 0x9dc0bb48},
+	{0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
+	{0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
+	{0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
+	{0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
+	{0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
+	{0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
+	{0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
+	{0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
+	{0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
+	{0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
+	{0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
+	{0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
+	{0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
+	{0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
+	{0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
+	{0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
+	{0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
+	{0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
+	{0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
+	{0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
+	{0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
+	{0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
+	{0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
+	{0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
+	{0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
+	{0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
+	{0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
+	{0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
+	{0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
+	{0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
+	{0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
+	{0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
+	{0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
+	{0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
+	{0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
+	{0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
+	{0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
+	{0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
+	{0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
+	{0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
+	{0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
+	{0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
+	{0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
+	{0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
+	{0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
+	{0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
+	{0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
+	{0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
+	{0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
+	{0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
+	{0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
+	{0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
+	{0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
+	{0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
+	{0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
+	{0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
+	{0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
+	{0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
+	{0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
+	{0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
+	{0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
+	{0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
+	{0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
+	{0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
+	{0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
+	{0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
+	{0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
+	{0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
+	{0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
+	{0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
+	{0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
+	{0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
+	{0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
+	{0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
+	{0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
+	{0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
+	{0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
+	{0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
+	{0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
+	{0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
+	{0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
+	{0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
+	{0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
+	{0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
+	{0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
+	{0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
+	{0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
+	{0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
+	{0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
+	{0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
+	{0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
+	{0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
+	{0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
+	{0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
+	{0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
+	{0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
+	{0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
+	{0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
+	{0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
+	{0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
 };
 
 #include <linux/time.h>
@@ -1050,6 +1032,41 @@
 	return 0;
 }
 
+static int __init crc32c_combine_test(void)
+{
+	int i, j;
+	int errors = 0, runs = 0;
+
+	for (i = 0; i < 10; i++) {
+		u32 crc_full;
+
+		crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
+				       test[i].length);
+		for (j = 0; j <= test[i].length; ++j) {
+			u32 crc1, crc2;
+			u32 len1 = j, len2 = test[i].length - j;
+
+			crc1 = __crc32c_le(test[i].crc, test_buf +
+					   test[i].start, len1);
+			crc2 = __crc32c_le(0, test_buf + test[i].start +
+					   len1, len2);
+
+			if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
+			      crc_full == test[i].crc32c_le))
+				errors++;
+			runs++;
+			cond_resched();
+		}
+	}
+
+	if (errors)
+		pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
+	else
+		pr_info("crc32c_combine: %d self tests passed\n", runs);
+
+	return 0;
+}
+
 static int __init crc32_test(void)
 {
 	int i;
@@ -1109,10 +1126,49 @@
 	return 0;
 }
 
+static int __init crc32_combine_test(void)
+{
+	int i, j;
+	int errors = 0, runs = 0;
+
+	for (i = 0; i < 10; i++) {
+		u32 crc_full;
+
+		crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
+				    test[i].length);
+		for (j = 0; j <= test[i].length; ++j) {
+			u32 crc1, crc2;
+			u32 len1 = j, len2 = test[i].length - j;
+
+			crc1 = crc32_le(test[i].crc, test_buf +
+					test[i].start, len1);
+			crc2 = crc32_le(0, test_buf + test[i].start +
+					len1, len2);
+
+			if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
+			      crc_full == test[i].crc_le))
+				errors++;
+			runs++;
+			cond_resched();
+		}
+	}
+
+	if (errors)
+		pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
+	else
+		pr_info("crc32_combine: %d self tests passed\n", runs);
+
+	return 0;
+}
+
 static int __init crc32test_init(void)
 {
 	crc32_test();
 	crc32c_test();
+
+	crc32_combine_test();
+	crc32c_combine_test();
+
 	return 0;
 }
 
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index bf2c8b1..e0731c3 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -196,7 +196,7 @@
 	 * initialized:
 	 */
 	if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
-		sched = keventd_up() && !work_pending(&debug_obj_work);
+		sched = keventd_up();
 	hlist_add_head(&obj->node, &obj_pool);
 	obj_pool_free++;
 	obj_pool_used--;
diff --git a/lib/digsig.c b/lib/digsig.c
index 2f31e6a..8793aed 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -209,7 +209,7 @@
 		kref = keyring_search(make_key_ref(keyring, 1UL),
 						&key_type_user, name);
 		if (IS_ERR(kref))
-			key = ERR_PTR(PTR_ERR(kref));
+			key = ERR_CAST(kref);
 		else
 			key = key_ref_to_ptr(kref);
 	} else {
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 26cf20b..dda3116 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -313,6 +313,34 @@
 EXPORT_SYMBOL(gen_pool_alloc);
 
 /**
+ * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: dma-view physical address
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
+{
+	unsigned long vaddr;
+
+	if (!pool)
+		return NULL;
+
+	vaddr = gen_pool_alloc(pool, size);
+	if (!vaddr)
+		return NULL;
+
+	*dma = gen_pool_virt_to_phys(pool, vaddr);
+
+	return (void *)vaddr;
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc);
+
+/**
  * gen_pool_free - free allocated special memory back to the pool
  * @pool: pool to free to
  * @addr: starting address of memory to free back to pool
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 7b7f830..d79b9d2 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -215,7 +215,7 @@
 	 * incrementing the fifo->in index counter
 	 */
 	smp_wmb();
-	*copied = len - ret;
+	*copied = len - ret * esize;
 	/* return the number of elements which are not copied */
 	return ret;
 }
@@ -275,7 +275,7 @@
 	 * incrementing the fifo->out index counter
 	 */
 	smp_wmb();
-	*copied = len - ret;
+	*copied = len - ret * esize;
 	/* return the number of elements which are not copied */
 	return ret;
 }
diff --git a/lib/llist.c b/lib/llist.c
index 4a70d12..f76196d 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -81,3 +81,25 @@
 	return entry;
 }
 EXPORT_SYMBOL_GPL(llist_del_first);
+
+/**
+ * llist_reverse_order - reverse order of a llist chain
+ * @head:	first item of the list to be reversed
+ *
+ * Reverse the order of a chain of llist entries and return the
+ * new first entry.
+ */
+struct llist_node *llist_reverse_order(struct llist_node *head)
+{
+	struct llist_node *new_head = NULL;
+
+	while (head) {
+		struct llist_node *tmp = head;
+		head = head->next;
+		tmp->next = new_head;
+		new_head = tmp;
+	}
+
+	return new_head;
+}
+EXPORT_SYMBOL_GPL(llist_reverse_order);
diff --git a/lib/lockref.c b/lib/lockref.c
index af6e95d..d2b123f 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,7 +1,7 @@
 #include <linux/export.h>
 #include <linux/lockref.h>
 
-#ifdef CONFIG_CMPXCHG_LOCKREF
+#if USE_CMPXCHG_LOCKREF
 
 /*
  * Allow weakly-ordered memory architectures to provide barrier-less
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 93c5d5e..7473ee3 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -60,14 +60,15 @@
 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 {
 	int cpu;
+	unsigned long flags;
 
-	raw_spin_lock(&fbc->lock);
+	raw_spin_lock_irqsave(&fbc->lock, flags);
 	for_each_possible_cpu(cpu) {
 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 		*pcount = 0;
 	}
 	fbc->count = amount;
-	raw_spin_unlock(&fbc->lock);
+	raw_spin_unlock_irqrestore(&fbc->lock, flags);
 }
 EXPORT_SYMBOL(percpu_counter_set);
 
@@ -78,9 +79,10 @@
 	preempt_disable();
 	count = __this_cpu_read(*fbc->counters) + amount;
 	if (count >= batch || count <= -batch) {
-		raw_spin_lock(&fbc->lock);
+		unsigned long flags;
+		raw_spin_lock_irqsave(&fbc->lock, flags);
 		fbc->count += count;
-		raw_spin_unlock(&fbc->lock);
+		raw_spin_unlock_irqrestore(&fbc->lock, flags);
 		__this_cpu_write(*fbc->counters, 0);
 	} else {
 		__this_cpu_write(*fbc->counters, count);
@@ -97,14 +99,15 @@
 {
 	s64 ret;
 	int cpu;
+	unsigned long flags;
 
-	raw_spin_lock(&fbc->lock);
+	raw_spin_lock_irqsave(&fbc->lock, flags);
 	ret = fbc->count;
 	for_each_online_cpu(cpu) {
 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 		ret += *pcount;
 	}
-	raw_spin_unlock(&fbc->lock);
+	raw_spin_unlock_irqrestore(&fbc->lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL(__percpu_counter_sum);
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index bab1ba2..b0698ea 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -30,15 +30,6 @@
 #include <linux/spinlock.h>
 #include <linux/percpu_ida.h>
 
-/*
- * Number of tags we move between the percpu freelist and the global freelist at
- * a time
- */
-#define IDA_PCPU_BATCH_MOVE	32U
-
-/* Max size of percpu freelist, */
-#define IDA_PCPU_SIZE		((IDA_PCPU_BATCH_MOVE * 3) / 2)
-
 struct percpu_ida_cpu {
 	/*
 	 * Even though this is percpu, we need a lock for tag stealing by remote
@@ -78,7 +69,7 @@
 	struct percpu_ida_cpu *remote;
 
 	for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
-	     cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2;
+	     cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
 	     cpus_have_tags--) {
 		cpu = cpumask_next(cpu, &pool->cpus_have_tags);
 
@@ -123,7 +114,7 @@
 {
 	move_tags(tags->freelist, &tags->nr_free,
 		  pool->freelist, &pool->nr_free,
-		  min(pool->nr_free, IDA_PCPU_BATCH_MOVE));
+		  min(pool->nr_free, pool->percpu_batch_size));
 }
 
 static inline unsigned alloc_local_tag(struct percpu_ida *pool,
@@ -245,17 +236,17 @@
 		wake_up(&pool->wait);
 	}
 
-	if (nr_free == IDA_PCPU_SIZE) {
+	if (nr_free == pool->percpu_max_size) {
 		spin_lock(&pool->lock);
 
 		/*
 		 * Global lock held and irqs disabled, don't need percpu
 		 * lock
 		 */
-		if (tags->nr_free == IDA_PCPU_SIZE) {
+		if (tags->nr_free == pool->percpu_max_size) {
 			move_tags(pool->freelist, &pool->nr_free,
 				  tags->freelist, &tags->nr_free,
-				  IDA_PCPU_BATCH_MOVE);
+				  pool->percpu_batch_size);
 
 			wake_up(&pool->wait);
 		}
@@ -292,7 +283,8 @@
  * Allocation is percpu, but sharding is limited by nr_tags - for best
  * performance, the workload should not span more cpus than nr_tags / 128.
  */
-int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
+int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
+	unsigned long max_size, unsigned long batch_size)
 {
 	unsigned i, cpu, order;
 
@@ -301,6 +293,8 @@
 	init_waitqueue_head(&pool->wait);
 	spin_lock_init(&pool->lock);
 	pool->nr_tags = nr_tags;
+	pool->percpu_max_size = max_size;
+	pool->percpu_batch_size = batch_size;
 
 	/* Guard against overflow */
 	if (nr_tags > (unsigned) INT_MAX + 1) {
@@ -319,7 +313,7 @@
 	pool->nr_free = nr_tags;
 
 	pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
-				       IDA_PCPU_SIZE * sizeof(unsigned),
+				       pool->percpu_max_size * sizeof(unsigned),
 				       sizeof(unsigned));
 	if (!pool->tag_cpu)
 		goto err;
@@ -332,4 +326,65 @@
 	percpu_ida_destroy(pool);
 	return -ENOMEM;
 }
-EXPORT_SYMBOL_GPL(percpu_ida_init);
+EXPORT_SYMBOL_GPL(__percpu_ida_init);
+
+/**
+ * percpu_ida_for_each_free - iterate free ids of a pool
+ * @pool: pool to iterate
+ * @fn: interate callback function
+ * @data: parameter for @fn
+ *
+ * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
+ * ids might be missed, some might be iterated duplicated, and some might
+ * be iterated and not free soon.
+ */
+int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
+	void *data)
+{
+	unsigned long flags;
+	struct percpu_ida_cpu *remote;
+	unsigned cpu, i, err = 0;
+
+	local_irq_save(flags);
+	for_each_possible_cpu(cpu) {
+		remote = per_cpu_ptr(pool->tag_cpu, cpu);
+		spin_lock(&remote->lock);
+		for (i = 0; i < remote->nr_free; i++) {
+			err = fn(remote->freelist[i], data);
+			if (err)
+				break;
+		}
+		spin_unlock(&remote->lock);
+		if (err)
+			goto out;
+	}
+
+	spin_lock(&pool->lock);
+	for (i = 0; i < pool->nr_free; i++) {
+		err = fn(pool->freelist[i], data);
+		if (err)
+			break;
+	}
+	spin_unlock(&pool->lock);
+out:
+	local_irq_restore(flags);
+	return err;
+}
+EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
+
+/**
+ * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
+ * @pool: pool related
+ * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
+ *
+ * Note: this just returns a snapshot of free tags number.
+ */
+unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
+{
+	struct percpu_ida_cpu *remote;
+	if (cpu == nr_cpu_ids)
+		return pool->nr_free;
+	remote = per_cpu_ptr(pool->tag_cpu, cpu);
+	return remote->nr_free;
+}
+EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
diff --git a/lib/percpu_test.c b/lib/percpu_test.c
new file mode 100644
index 0000000..0b5d14d
--- /dev/null
+++ b/lib/percpu_test.c
@@ -0,0 +1,138 @@
+#include <linux/module.h>
+
+/* validate @native and @pcp counter values match @expected */
+#define CHECK(native, pcp, expected)                                    \
+	do {                                                            \
+		WARN((native) != (expected),                            \
+		     "raw %ld (0x%lx) != expected %lld (0x%llx)",	\
+		     (native), (native),				\
+		     (long long)(expected), (long long)(expected));	\
+		WARN(__this_cpu_read(pcp) != (expected),                \
+		     "pcp %ld (0x%lx) != expected %lld (0x%llx)",	\
+		     __this_cpu_read(pcp), __this_cpu_read(pcp),	\
+		     (long long)(expected), (long long)(expected));	\
+	} while (0)
+
+static DEFINE_PER_CPU(long, long_counter);
+static DEFINE_PER_CPU(unsigned long, ulong_counter);
+
+static int __init percpu_test_init(void)
+{
+	/*
+	 * volatile prevents compiler from optimizing it uses, otherwise the
+	 * +ul_one/-ul_one below would replace with inc/dec instructions.
+	 */
+	volatile unsigned int ui_one = 1;
+	long l = 0;
+	unsigned long ul = 0;
+
+	pr_info("percpu test start\n");
+
+	preempt_disable();
+
+	l += -1;
+	__this_cpu_add(long_counter, -1);
+	CHECK(l, long_counter, -1);
+
+	l += 1;
+	__this_cpu_add(long_counter, 1);
+	CHECK(l, long_counter, 0);
+
+	ul = 0;
+	__this_cpu_write(ulong_counter, 0);
+
+	ul += 1UL;
+	__this_cpu_add(ulong_counter, 1UL);
+	CHECK(ul, ulong_counter, 1);
+
+	ul += -1UL;
+	__this_cpu_add(ulong_counter, -1UL);
+	CHECK(ul, ulong_counter, 0);
+
+	ul += -(unsigned long)1;
+	__this_cpu_add(ulong_counter, -(unsigned long)1);
+	CHECK(ul, ulong_counter, -1);
+
+	ul = 0;
+	__this_cpu_write(ulong_counter, 0);
+
+	ul -= 1;
+	__this_cpu_dec(ulong_counter);
+	CHECK(ul, ulong_counter, -1);
+	CHECK(ul, ulong_counter, ULONG_MAX);
+
+	l += -ui_one;
+	__this_cpu_add(long_counter, -ui_one);
+	CHECK(l, long_counter, 0xffffffff);
+
+	l += ui_one;
+	__this_cpu_add(long_counter, ui_one);
+	CHECK(l, long_counter, (long)0x100000000LL);
+
+
+	l = 0;
+	__this_cpu_write(long_counter, 0);
+
+	l -= ui_one;
+	__this_cpu_sub(long_counter, ui_one);
+	CHECK(l, long_counter, -1);
+
+	l = 0;
+	__this_cpu_write(long_counter, 0);
+
+	l += ui_one;
+	__this_cpu_add(long_counter, ui_one);
+	CHECK(l, long_counter, 1);
+
+	l += -ui_one;
+	__this_cpu_add(long_counter, -ui_one);
+	CHECK(l, long_counter, (long)0x100000000LL);
+
+	l = 0;
+	__this_cpu_write(long_counter, 0);
+
+	l -= ui_one;
+	this_cpu_sub(long_counter, ui_one);
+	CHECK(l, long_counter, -1);
+	CHECK(l, long_counter, ULONG_MAX);
+
+	ul = 0;
+	__this_cpu_write(ulong_counter, 0);
+
+	ul += ui_one;
+	__this_cpu_add(ulong_counter, ui_one);
+	CHECK(ul, ulong_counter, 1);
+
+	ul = 0;
+	__this_cpu_write(ulong_counter, 0);
+
+	ul -= ui_one;
+	__this_cpu_sub(ulong_counter, ui_one);
+	CHECK(ul, ulong_counter, -1);
+	CHECK(ul, ulong_counter, ULONG_MAX);
+
+	ul = 3;
+	__this_cpu_write(ulong_counter, 3);
+
+	ul = this_cpu_sub_return(ulong_counter, ui_one);
+	CHECK(ul, ulong_counter, 2);
+
+	ul = __this_cpu_sub_return(ulong_counter, ui_one);
+	CHECK(ul, ulong_counter, 1);
+
+	preempt_enable();
+
+	pr_info("percpu test done\n");
+	return -EAGAIN;  /* Fail will directly unload the module */
+}
+
+static void __exit percpu_test_exit(void)
+{
+}
+
+module_init(percpu_test_init)
+module_exit(percpu_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Thelen");
+MODULE_DESCRIPTION("percpu operations test");
diff --git a/lib/random32.c b/lib/random32.c
index 52280d5..82da4f4 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -2,19 +2,19 @@
   This is a maximally equidistributed combined Tausworthe generator
   based on code from GNU Scientific Library 1.5 (30 Jun 2004)
 
-   x_n = (s1_n ^ s2_n ^ s3_n)
+  lfsr113 version:
 
-   s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
-   s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
-   s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
+   x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
 
-   The period of this generator is about 2^88.
+   s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n <<  6) ^ s1_n) >> 13))
+   s2_{n+1} = (((s2_n & 4294967288) <<  2) ^ (((s2_n <<  2) ^ s2_n) >> 27))
+   s3_{n+1} = (((s3_n & 4294967280) <<  7) ^ (((s3_n << 13) ^ s3_n) >> 21))
+   s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n <<  3) ^ s4_n) >> 12))
+
+   The period of this generator is about 2^113 (see erratum paper).
 
    From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
-   Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
-
-   This is available on the net from L'Ecuyer's home page,
-
+   Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
    http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
    ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
 
@@ -29,7 +29,7 @@
         that paper.)
 
    This affects the seeding procedure by imposing the requirement
-   s1 > 1, s2 > 7, s3 > 15.
+   s1 > 1, s2 > 7, s3 > 15, s4 > 127.
 
 */
 
@@ -38,6 +38,11 @@
 #include <linux/export.h>
 #include <linux/jiffies.h>
 #include <linux/random.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_RANDOM32_SELFTEST
+static void __init prandom_state_selftest(void);
+#endif
 
 static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
 
@@ -52,11 +57,12 @@
 {
 #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
 
-	state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
-	state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
-	state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
+	state->s1 = TAUSWORTHE(state->s1,  6U, 13U, 4294967294U, 18U);
+	state->s2 = TAUSWORTHE(state->s2,  2U, 27U, 4294967288U,  2U);
+	state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U,  7U);
+	state->s4 = TAUSWORTHE(state->s4,  3U, 12U, 4294967168U, 13U);
 
-	return (state->s1 ^ state->s2 ^ state->s3);
+	return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
 }
 EXPORT_SYMBOL(prandom_u32_state);
 
@@ -126,6 +132,38 @@
 }
 EXPORT_SYMBOL(prandom_bytes);
 
+static void prandom_warmup(struct rnd_state *state)
+{
+	/* Calling RNG ten times to satify recurrence condition */
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+	prandom_u32_state(state);
+}
+
+static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
+{
+	/* Note: This sort of seeding is ONLY used in test cases and
+	 * during boot at the time from core_initcall until late_initcall
+	 * as we don't have a stronger entropy source available yet.
+	 * After late_initcall, we reseed entire state, we have to (!),
+	 * otherwise an attacker just needs to search 32 bit space to
+	 * probe for our internal 128 bit state if he knows a couple
+	 * of prandom32 outputs!
+	 */
+#define LCG(x)	((x) * 69069U)	/* super-duper LCG */
+	state->s1 = __seed(LCG(seed),        2U);
+	state->s2 = __seed(LCG(state->s1),   8U);
+	state->s3 = __seed(LCG(state->s2),  16U);
+	state->s4 = __seed(LCG(state->s3), 128U);
+}
+
 /**
  *	prandom_seed - add entropy to pseudo random number generator
  *	@seed: seed value
@@ -141,7 +179,9 @@
 	 */
 	for_each_possible_cpu (i) {
 		struct rnd_state *state = &per_cpu(net_rand_state, i);
-		state->s1 = __seed(state->s1 ^ entropy, 1);
+
+		state->s1 = __seed(state->s1 ^ entropy, 2U);
+		prandom_warmup(state);
 	}
 }
 EXPORT_SYMBOL(prandom_seed);
@@ -154,46 +194,245 @@
 {
 	int i;
 
+#ifdef CONFIG_RANDOM32_SELFTEST
+	prandom_state_selftest();
+#endif
+
 	for_each_possible_cpu(i) {
 		struct rnd_state *state = &per_cpu(net_rand_state,i);
 
-#define LCG(x)	((x) * 69069)	/* super-duper LCG */
-		state->s1 = __seed(LCG(i + jiffies), 1);
-		state->s2 = __seed(LCG(state->s1), 7);
-		state->s3 = __seed(LCG(state->s2), 15);
-
-		/* "warm it up" */
-		prandom_u32_state(state);
-		prandom_u32_state(state);
-		prandom_u32_state(state);
-		prandom_u32_state(state);
-		prandom_u32_state(state);
-		prandom_u32_state(state);
+		prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
+		prandom_warmup(state);
 	}
 	return 0;
 }
 core_initcall(prandom_init);
 
+static void __prandom_timer(unsigned long dontcare);
+static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
+
+static void __prandom_timer(unsigned long dontcare)
+{
+	u32 entropy;
+
+	get_random_bytes(&entropy, sizeof(entropy));
+	prandom_seed(entropy);
+	/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
+	seed_timer.expires = jiffies + (40 * HZ + (prandom_u32() % (40 * HZ)));
+	add_timer(&seed_timer);
+}
+
+static void prandom_start_seed_timer(void)
+{
+	set_timer_slack(&seed_timer, HZ);
+	seed_timer.expires = jiffies + 40 * HZ;
+	add_timer(&seed_timer);
+}
+
 /*
  *	Generate better values after random number generator
  *	is fully initialized.
  */
-static int __init prandom_reseed(void)
+static void __prandom_reseed(bool late)
 {
 	int i;
+	unsigned long flags;
+	static bool latch = false;
+	static DEFINE_SPINLOCK(lock);
+
+	/* only allow initial seeding (late == false) once */
+	spin_lock_irqsave(&lock, flags);
+	if (latch && !late)
+		goto out;
+	latch = true;
 
 	for_each_possible_cpu(i) {
 		struct rnd_state *state = &per_cpu(net_rand_state,i);
-		u32 seeds[3];
+		u32 seeds[4];
 
 		get_random_bytes(&seeds, sizeof(seeds));
-		state->s1 = __seed(seeds[0], 1);
-		state->s2 = __seed(seeds[1], 7);
-		state->s3 = __seed(seeds[2], 15);
+		state->s1 = __seed(seeds[0],   2U);
+		state->s2 = __seed(seeds[1],   8U);
+		state->s3 = __seed(seeds[2],  16U);
+		state->s4 = __seed(seeds[3], 128U);
 
-		/* mix it in */
-		prandom_u32_state(state);
+		prandom_warmup(state);
 	}
+out:
+	spin_unlock_irqrestore(&lock, flags);
+}
+
+void prandom_reseed_late(void)
+{
+	__prandom_reseed(true);
+}
+
+static int __init prandom_reseed(void)
+{
+	__prandom_reseed(false);
+	prandom_start_seed_timer();
 	return 0;
 }
 late_initcall(prandom_reseed);
+
+#ifdef CONFIG_RANDOM32_SELFTEST
+static struct prandom_test1 {
+	u32 seed;
+	u32 result;
+} test1[] = {
+	{ 1U, 3484351685U },
+	{ 2U, 2623130059U },
+	{ 3U, 3125133893U },
+	{ 4U,  984847254U },
+};
+
+static struct prandom_test2 {
+	u32 seed;
+	u32 iteration;
+	u32 result;
+} test2[] = {
+	/* Test cases against taus113 from GSL library. */
+	{  931557656U, 959U, 2975593782U },
+	{ 1339693295U, 876U, 3887776532U },
+	{ 1545556285U, 961U, 1615538833U },
+	{  601730776U, 723U, 1776162651U },
+	{ 1027516047U, 687U,  511983079U },
+	{  416526298U, 700U,  916156552U },
+	{ 1395522032U, 652U, 2222063676U },
+	{  366221443U, 617U, 2992857763U },
+	{ 1539836965U, 714U, 3783265725U },
+	{  556206671U, 994U,  799626459U },
+	{  684907218U, 799U,  367789491U },
+	{ 2121230701U, 931U, 2115467001U },
+	{ 1668516451U, 644U, 3620590685U },
+	{  768046066U, 883U, 2034077390U },
+	{ 1989159136U, 833U, 1195767305U },
+	{  536585145U, 996U, 3577259204U },
+	{ 1008129373U, 642U, 1478080776U },
+	{ 1740775604U, 939U, 1264980372U },
+	{ 1967883163U, 508U,   10734624U },
+	{ 1923019697U, 730U, 3821419629U },
+	{  442079932U, 560U, 3440032343U },
+	{ 1961302714U, 845U,  841962572U },
+	{ 2030205964U, 962U, 1325144227U },
+	{ 1160407529U, 507U,  240940858U },
+	{  635482502U, 779U, 4200489746U },
+	{ 1252788931U, 699U,  867195434U },
+	{ 1961817131U, 719U,  668237657U },
+	{ 1071468216U, 983U,  917876630U },
+	{ 1281848367U, 932U, 1003100039U },
+	{  582537119U, 780U, 1127273778U },
+	{ 1973672777U, 853U, 1071368872U },
+	{ 1896756996U, 762U, 1127851055U },
+	{  847917054U, 500U, 1717499075U },
+	{ 1240520510U, 951U, 2849576657U },
+	{ 1685071682U, 567U, 1961810396U },
+	{ 1516232129U, 557U,    3173877U },
+	{ 1208118903U, 612U, 1613145022U },
+	{ 1817269927U, 693U, 4279122573U },
+	{ 1510091701U, 717U,  638191229U },
+	{  365916850U, 807U,  600424314U },
+	{  399324359U, 702U, 1803598116U },
+	{ 1318480274U, 779U, 2074237022U },
+	{  697758115U, 840U, 1483639402U },
+	{ 1696507773U, 840U,  577415447U },
+	{ 2081979121U, 981U, 3041486449U },
+	{  955646687U, 742U, 3846494357U },
+	{ 1250683506U, 749U,  836419859U },
+	{  595003102U, 534U,  366794109U },
+	{   47485338U, 558U, 3521120834U },
+	{  619433479U, 610U, 3991783875U },
+	{  704096520U, 518U, 4139493852U },
+	{ 1712224984U, 606U, 2393312003U },
+	{ 1318233152U, 922U, 3880361134U },
+	{  855572992U, 761U, 1472974787U },
+	{   64721421U, 703U,  683860550U },
+	{  678931758U, 840U,  380616043U },
+	{  692711973U, 778U, 1382361947U },
+	{  677703619U, 530U, 2826914161U },
+	{   92393223U, 586U, 1522128471U },
+	{ 1222592920U, 743U, 3466726667U },
+	{  358288986U, 695U, 1091956998U },
+	{ 1935056945U, 958U,  514864477U },
+	{  735675993U, 990U, 1294239989U },
+	{ 1560089402U, 897U, 2238551287U },
+	{   70616361U, 829U,   22483098U },
+	{  368234700U, 731U, 2913875084U },
+	{   20221190U, 879U, 1564152970U },
+	{  539444654U, 682U, 1835141259U },
+	{ 1314987297U, 840U, 1801114136U },
+	{ 2019295544U, 645U, 3286438930U },
+	{  469023838U, 716U, 1637918202U },
+	{ 1843754496U, 653U, 2562092152U },
+	{  400672036U, 809U, 4264212785U },
+	{  404722249U, 965U, 2704116999U },
+	{  600702209U, 758U,  584979986U },
+	{  519953954U, 667U, 2574436237U },
+	{ 1658071126U, 694U, 2214569490U },
+	{  420480037U, 749U, 3430010866U },
+	{  690103647U, 969U, 3700758083U },
+	{ 1029424799U, 937U, 3787746841U },
+	{ 2012608669U, 506U, 3362628973U },
+	{ 1535432887U, 998U,   42610943U },
+	{ 1330635533U, 857U, 3040806504U },
+	{ 1223800550U, 539U, 3954229517U },
+	{ 1322411537U, 680U, 3223250324U },
+	{ 1877847898U, 945U, 2915147143U },
+	{ 1646356099U, 874U,  965988280U },
+	{  805687536U, 744U, 4032277920U },
+	{ 1948093210U, 633U, 1346597684U },
+	{  392609744U, 783U, 1636083295U },
+	{  690241304U, 770U, 1201031298U },
+	{ 1360302965U, 696U, 1665394461U },
+	{ 1220090946U, 780U, 1316922812U },
+	{  447092251U, 500U, 3438743375U },
+	{ 1613868791U, 592U,  828546883U },
+	{  523430951U, 548U, 2552392304U },
+	{  726692899U, 810U, 1656872867U },
+	{ 1364340021U, 836U, 3710513486U },
+	{ 1986257729U, 931U,  935013962U },
+	{  407983964U, 921U,  728767059U },
+};
+
+static void __init prandom_state_selftest(void)
+{
+	int i, j, errors = 0, runs = 0;
+	bool error = false;
+
+	for (i = 0; i < ARRAY_SIZE(test1); i++) {
+		struct rnd_state state;
+
+		prandom_seed_very_weak(&state, test1[i].seed);
+		prandom_warmup(&state);
+
+		if (test1[i].result != prandom_u32_state(&state))
+			error = true;
+	}
+
+	if (error)
+		pr_warn("prandom: seed boundary self test failed\n");
+	else
+		pr_info("prandom: seed boundary self test passed\n");
+
+	for (i = 0; i < ARRAY_SIZE(test2); i++) {
+		struct rnd_state state;
+
+		prandom_seed_very_weak(&state, test2[i].seed);
+		prandom_warmup(&state);
+
+		for (j = 0; j < test2[i].iteration - 1; j++)
+			prandom_u32_state(&state);
+
+		if (test2[i].result != prandom_u32_state(&state))
+			errors++;
+
+		runs++;
+		cond_resched();
+	}
+
+	if (errors)
+		pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
+	else
+		pr_info("prandom: %d self tests passed\n", runs);
+}
+#endif
diff --git a/lib/show_mem.c b/lib/show_mem.c
index b7c7231..5847a49 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -12,8 +12,7 @@
 void show_mem(unsigned int filter)
 {
 	pg_data_t *pgdat;
-	unsigned long total = 0, reserved = 0, shared = 0,
-		nonshared = 0, highmem = 0;
+	unsigned long total = 0, reserved = 0, highmem = 0;
 
 	printk("Mem-Info:\n");
 	show_free_areas(filter);
@@ -22,43 +21,27 @@
 		return;
 
 	for_each_online_pgdat(pgdat) {
-		unsigned long i, flags;
+		unsigned long flags;
+		int zoneid;
 
 		pgdat_resize_lock(pgdat, &flags);
-		for (i = 0; i < pgdat->node_spanned_pages; i++) {
-			struct page *page;
-			unsigned long pfn = pgdat->node_start_pfn + i;
-
-			if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
-				touch_nmi_watchdog();
-
-			if (!pfn_valid(pfn))
+		for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+			struct zone *zone = &pgdat->node_zones[zoneid];
+			if (!populated_zone(zone))
 				continue;
 
-			page = pfn_to_page(pfn);
+			total += zone->present_pages;
+			reserved = zone->present_pages - zone->managed_pages;
 
-			if (PageHighMem(page))
-				highmem++;
-
-			if (PageReserved(page))
-				reserved++;
-			else if (page_count(page) == 1)
-				nonshared++;
-			else if (page_count(page) > 1)
-				shared += page_count(page) - 1;
-
-			total++;
+			if (is_highmem_idx(zoneid))
+				highmem += zone->present_pages;
 		}
 		pgdat_resize_unlock(pgdat, &flags);
 	}
 
 	printk("%lu pages RAM\n", total);
-#ifdef CONFIG_HIGHMEM
-	printk("%lu pages HighMem\n", highmem);
-#endif
+	printk("%lu pages HighMem/MovableOnly\n", highmem);
 	printk("%lu pages reserved\n", reserved);
-	printk("%lu pages shared\n", shared);
-	printk("%lu pages non-shared\n", nonshared);
 #ifdef CONFIG_QUICKLIST
 	printk("%lu pages in pagetable cache\n",
 		quicklist_total_size());
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 4e8686c..e4399fa 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -38,6 +38,9 @@
 #include <linux/bootmem.h>
 #include <linux/iommu-helper.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/swiotlb.h>
+
 #define OFFSET(val,align) ((unsigned long)	\
 	                   ( (val) & ( (align) - 1)))
 
@@ -502,6 +505,7 @@
 
 not_found:
 	spin_unlock_irqrestore(&io_tlb_lock, flags);
+	dev_warn(hwdev, "swiotlb buffer is full\n");
 	return SWIOTLB_MAP_ERROR;
 found:
 	spin_unlock_irqrestore(&io_tlb_lock, flags);
@@ -726,6 +730,8 @@
 	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
 		return dev_addr;
 
+	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+
 	/* Oh well, have to allocate and map a bounce buffer. */
 	map = map_single(dev, phys, size, dir);
 	if (map == SWIOTLB_MAP_ERROR) {
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 26559bd..10909c5 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/ioport.h>
 #include <linux/dcache.h>
+#include <linux/cred.h>
 #include <net/addrconf.h>
 
 #include <asm/page.h>		/* for PAGE_SIZE */
@@ -1218,6 +1219,8 @@
  *            The maximum supported length is 64 bytes of the input. Consider
  *            to use print_hex_dump() for the larger input.
  * - 'a' For a phys_addr_t type and its derivative types (passed by reference)
+ * - 'd[234]' For a dentry name (optionally 2-4 last components)
+ * - 'D[234]' Same as 'd' but for a struct file
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -1312,11 +1315,37 @@
 				spec.field_width = default_width;
 			return string(buf, end, "pK-error", spec);
 		}
-		if (!((kptr_restrict == 0) ||
-		      (kptr_restrict == 1 &&
-		       has_capability_noaudit(current, CAP_SYSLOG))))
+
+		switch (kptr_restrict) {
+		case 0:
+			/* Always print %pK values */
+			break;
+		case 1: {
+			/*
+			 * Only print the real pointer value if the current
+			 * process has CAP_SYSLOG and is running with the
+			 * same credentials it started with. This is because
+			 * access to files is checked at open() time, but %pK
+			 * checks permission at read() time. We don't want to
+			 * leak pointer values if a binary opens a file using
+			 * %pK and then elevates privileges before reading it.
+			 */
+			const struct cred *cred = current_cred();
+
+			if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+			    !uid_eq(cred->euid, cred->uid) ||
+			    !gid_eq(cred->egid, cred->gid))
+				ptr = NULL;
+			break;
+		}
+		case 2:
+		default:
+			/* Always print 0's for %pK */
 			ptr = NULL;
+			break;
+		}
 		break;
+
 	case 'N':
 		switch (fmt[1]) {
 		case 'F':
@@ -1683,18 +1712,16 @@
 			break;
 
 		case FORMAT_TYPE_NRCHARS: {
-			u8 qualifier = spec.qualifier;
+			/*
+			 * Since %n poses a greater security risk than
+			 * utility, ignore %n and skip its argument.
+			 */
+			void *skip_arg;
 
-			if (qualifier == 'l') {
-				long *ip = va_arg(args, long *);
-				*ip = (str - buf);
-			} else if (_tolower(qualifier) == 'z') {
-				size_t *ip = va_arg(args, size_t *);
-				*ip = (str - buf);
-			} else {
-				int *ip = va_arg(args, int *);
-				*ip = (str - buf);
-			}
+			WARN_ONCE(1, "Please remove ignored %%n in '%s'\n",
+					old_fmt);
+
+			skip_arg = va_arg(args, void *);
 			break;
 		}
 
diff --git a/mm/Kconfig b/mm/Kconfig
index 394838f..eb69f35 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -20,7 +20,7 @@
 
 	  Some users of more advanced features like NUMA and
 	  memory hotplug may have different options here.
-	  DISCONTIGMEM is an more mature, better tested system,
+	  DISCONTIGMEM is a more mature, better tested system,
 	  but is incompatible with memory hotplug and may suffer
 	  decreased performance over SPARSEMEM.  If unsure between
 	  "Sparse Memory" and "Discontiguous Memory", choose
@@ -153,11 +153,18 @@
 	help
 	  Allow a node to have only movable memory.  Pages used by the kernel,
 	  such as direct mapping pages cannot be migrated.  So the corresponding
-	  memory device cannot be hotplugged.  This option allows users to
-	  online all the memory of a node as movable memory so that the whole
-	  node can be hotplugged.  Users who don't use the memory hotplug
-	  feature are fine with this option on since they don't online memory
-	  as movable.
+	  memory device cannot be hotplugged.  This option allows the following
+	  two things:
+	  - When the system is booting, node full of hotpluggable memory can
+	  be arranged to have only movable memory so that the whole node can
+	  be hot-removed. (need movable_node boot option specified).
+	  - After the system is up, the option allows users to online all the
+	  memory of a node as movable memory so that the whole node can be
+	  hot-removed.
+
+	  Users who don't use the memory hotplug feature are fine with this
+	  option on since they don't specify movable_node boot option or they
+	  don't online memory as movable.
 
 	  Say Y here if you want to hotplug a whole node.
 	  Say N here if you want kernel to use memory on all nodes evenly.
@@ -211,9 +218,11 @@
 	int
 	default "999999" if ARM && !CPU_CACHE_VIPT
 	default "999999" if PARISC && !PA20
-	default "999999" if DEBUG_SPINLOCK || DEBUG_LOCK_ALLOC
 	default "4"
 
+config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+	boolean
+
 #
 # support for memory balloon compaction
 config BALLOON_COMPACTION
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 6ab7744..90bd350 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -172,11 +172,12 @@
 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 {
 	struct page *page;
-	unsigned long start, end, pages, count = 0;
+	unsigned long *map, start, end, pages, count = 0;
 
 	if (!bdata->node_bootmem_map)
 		return 0;
 
+	map = bdata->node_bootmem_map;
 	start = bdata->node_min_pfn;
 	end = bdata->node_low_pfn;
 
@@ -184,10 +185,9 @@
 		bdata - bootmem_node_data, start, end);
 
 	while (start < end) {
-		unsigned long *map, idx, vec;
+		unsigned long idx, vec;
 		unsigned shift;
 
-		map = bdata->node_bootmem_map;
 		idx = start - bdata->node_min_pfn;
 		shift = idx & (BITS_PER_LONG - 1);
 		/*
@@ -784,7 +784,7 @@
 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
 	/* update goal according ...MAX_DMA32_PFN */
-	end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+	end_pfn = pgdat_end_pfn(pgdat);
 
 	if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
 	    (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
diff --git a/mm/compaction.c b/mm/compaction.c
index b5326b1..805165b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -235,10 +235,9 @@
 }
 
 /*
- * Isolate free pages onto a private freelist. Caller must hold zone->lock.
- * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
- * pages inside of the pageblock (even though it may still end up isolating
- * some pages).
+ * Isolate free pages onto a private freelist. If @strict is true, will abort
+ * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
+ * (even though it may still end up isolating some pages).
  */
 static unsigned long isolate_freepages_block(struct compact_control *cc,
 				unsigned long blockpfn,
diff --git a/mm/filemap.c b/mm/filemap.c
index ae4846f..b7749a9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1090,7 +1090,6 @@
  * @filp:	the file to read
  * @ppos:	current file position
  * @desc:	read_descriptor
- * @actor:	read method
  *
  * This is a generic file read routine, and uses the
  * mapping->a_ops->readpage() function for the actual low-level stuff.
@@ -1099,7 +1098,7 @@
  * of the logic when it comes to error handling etc.
  */
 static void do_generic_file_read(struct file *filp, loff_t *ppos,
-		read_descriptor_t *desc, read_actor_t actor)
+		read_descriptor_t *desc)
 {
 	struct address_space *mapping = filp->f_mapping;
 	struct inode *inode = mapping->host;
@@ -1200,13 +1199,14 @@
 		 * Ok, we have the page, and it's up-to-date, so
 		 * now we can copy it to user space...
 		 *
-		 * The actor routine returns how many bytes were actually used..
+		 * The file_read_actor routine returns how many bytes were
+		 * actually used..
 		 * NOTE! This may not be the same as how much of a user buffer
 		 * we filled up (we may be padding etc), so we can only update
 		 * "pos" here (the actor routine has to update the user buffer
 		 * pointers and the remaining count).
 		 */
-		ret = actor(desc, page, offset, nr);
+		ret = file_read_actor(desc, page, offset, nr);
 		offset += ret;
 		index += offset >> PAGE_CACHE_SHIFT;
 		offset &= ~PAGE_CACHE_MASK;
@@ -1479,7 +1479,7 @@
 		if (desc.count == 0)
 			continue;
 		desc.error = 0;
-		do_generic_file_read(filp, ppos, &desc, file_read_actor);
+		do_generic_file_read(filp, ppos, &desc);
 		retval += desc.written;
 		if (desc.error) {
 			retval = retval ?: desc.error;
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 28fe26b..d8d9fe3 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -26,7 +26,7 @@
  * of ZERO_PAGE(), such as /dev/zero
  */
 static DEFINE_MUTEX(xip_sparse_mutex);
-static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
+static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq);
 static struct page *__xip_sparse_page;
 
 /* called under xip_sparse_mutex */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2612f60..bccd5a6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -27,11 +27,12 @@
 #include "internal.h"
 
 /*
- * By default transparent hugepage support is enabled for all mappings
- * and khugepaged scans all mappings. Defrag is only invoked by
- * khugepaged hugepage allocations and by page faults inside
- * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
- * allocations.
+ * By default transparent hugepage support is disabled in order that avoid
+ * to risk increase the memory footprint of applications without a guaranteed
+ * benefit. When transparent hugepage support is enabled, is for all mappings,
+ * and khugepaged scans all mappings.
+ * Defrag is invoked by khugepaged hugepage allocations and by page faults
+ * for all hugepage allocations.
  */
 unsigned long transparent_hugepage_flags __read_mostly =
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
@@ -709,6 +710,7 @@
 					struct page *page)
 {
 	pgtable_t pgtable;
+	spinlock_t *ptl;
 
 	VM_BUG_ON(!PageCompound(page));
 	pgtable = pte_alloc_one(mm, haddr);
@@ -723,9 +725,9 @@
 	 */
 	__SetPageUptodate(page);
 
-	spin_lock(&mm->page_table_lock);
+	ptl = pmd_lock(mm, pmd);
 	if (unlikely(!pmd_none(*pmd))) {
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(ptl);
 		mem_cgroup_uncharge_page(page);
 		put_page(page);
 		pte_free(mm, pgtable);
@@ -737,8 +739,8 @@
 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
 		set_pmd_at(mm, haddr, pmd, entry);
 		add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
-		mm->nr_ptes++;
-		spin_unlock(&mm->page_table_lock);
+		atomic_long_inc(&mm->nr_ptes);
+		spin_unlock(ptl);
 	}
 
 	return 0;
@@ -758,14 +760,7 @@
 			       HPAGE_PMD_ORDER, vma, haddr, nd);
 }
 
-#ifndef CONFIG_NUMA
-static inline struct page *alloc_hugepage(int defrag)
-{
-	return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
-			   HPAGE_PMD_ORDER);
-}
-#endif
-
+/* Caller must hold page table lock. */
 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
 		struct page *zero_page)
@@ -778,7 +773,7 @@
 	entry = pmd_mkhuge(entry);
 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
 	set_pmd_at(mm, haddr, pmd, entry);
-	mm->nr_ptes++;
+	atomic_long_inc(&mm->nr_ptes);
 	return true;
 }
 
@@ -797,6 +792,7 @@
 		return VM_FAULT_OOM;
 	if (!(flags & FAULT_FLAG_WRITE) &&
 			transparent_hugepage_use_zero_page()) {
+		spinlock_t *ptl;
 		pgtable_t pgtable;
 		struct page *zero_page;
 		bool set;
@@ -809,10 +805,10 @@
 			count_vm_event(THP_FAULT_FALLBACK);
 			return VM_FAULT_FALLBACK;
 		}
-		spin_lock(&mm->page_table_lock);
+		ptl = pmd_lock(mm, pmd);
 		set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
 				zero_page);
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(ptl);
 		if (!set) {
 			pte_free(mm, pgtable);
 			put_huge_zero_page();
@@ -845,6 +841,7 @@
 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
 		  struct vm_area_struct *vma)
 {
+	spinlock_t *dst_ptl, *src_ptl;
 	struct page *src_page;
 	pmd_t pmd;
 	pgtable_t pgtable;
@@ -855,8 +852,9 @@
 	if (unlikely(!pgtable))
 		goto out;
 
-	spin_lock(&dst_mm->page_table_lock);
-	spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
+	dst_ptl = pmd_lock(dst_mm, dst_pmd);
+	src_ptl = pmd_lockptr(src_mm, src_pmd);
+	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 
 	ret = -EAGAIN;
 	pmd = *src_pmd;
@@ -865,7 +863,7 @@
 		goto out_unlock;
 	}
 	/*
-	 * mm->page_table_lock is enough to be sure that huge zero pmd is not
+	 * When page table lock is held, the huge zero pmd should not be
 	 * under splitting since we don't split the page itself, only pmd to
 	 * a page table.
 	 */
@@ -886,8 +884,8 @@
 	}
 	if (unlikely(pmd_trans_splitting(pmd))) {
 		/* split huge page running from under us */
-		spin_unlock(&src_mm->page_table_lock);
-		spin_unlock(&dst_mm->page_table_lock);
+		spin_unlock(src_ptl);
+		spin_unlock(dst_ptl);
 		pte_free(dst_mm, pgtable);
 
 		wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
@@ -903,12 +901,12 @@
 	pmd = pmd_mkold(pmd_wrprotect(pmd));
 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
-	dst_mm->nr_ptes++;
+	atomic_long_inc(&dst_mm->nr_ptes);
 
 	ret = 0;
 out_unlock:
-	spin_unlock(&src_mm->page_table_lock);
-	spin_unlock(&dst_mm->page_table_lock);
+	spin_unlock(src_ptl);
+	spin_unlock(dst_ptl);
 out:
 	return ret;
 }
@@ -919,10 +917,11 @@
 			   pmd_t *pmd, pmd_t orig_pmd,
 			   int dirty)
 {
+	spinlock_t *ptl;
 	pmd_t entry;
 	unsigned long haddr;
 
-	spin_lock(&mm->page_table_lock);
+	ptl = pmd_lock(mm, pmd);
 	if (unlikely(!pmd_same(*pmd, orig_pmd)))
 		goto unlock;
 
@@ -932,13 +931,14 @@
 		update_mmu_cache_pmd(vma, address, pmd);
 
 unlock:
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 }
 
 static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
 		struct vm_area_struct *vma, unsigned long address,
 		pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
 {
+	spinlock_t *ptl;
 	pgtable_t pgtable;
 	pmd_t _pmd;
 	struct page *page;
@@ -965,7 +965,7 @@
 	mmun_end   = haddr + HPAGE_PMD_SIZE;
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
-	spin_lock(&mm->page_table_lock);
+	ptl = pmd_lock(mm, pmd);
 	if (unlikely(!pmd_same(*pmd, orig_pmd)))
 		goto out_free_page;
 
@@ -992,7 +992,7 @@
 	}
 	smp_wmb(); /* make pte visible before pmd */
 	pmd_populate(mm, pmd, pgtable);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	put_huge_zero_page();
 	inc_mm_counter(mm, MM_ANONPAGES);
 
@@ -1002,7 +1002,7 @@
 out:
 	return ret;
 out_free_page:
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 	mem_cgroup_uncharge_page(page);
 	put_page(page);
@@ -1016,6 +1016,7 @@
 					struct page *page,
 					unsigned long haddr)
 {
+	spinlock_t *ptl;
 	pgtable_t pgtable;
 	pmd_t _pmd;
 	int ret = 0, i;
@@ -1062,7 +1063,7 @@
 	mmun_end   = haddr + HPAGE_PMD_SIZE;
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
-	spin_lock(&mm->page_table_lock);
+	ptl = pmd_lock(mm, pmd);
 	if (unlikely(!pmd_same(*pmd, orig_pmd)))
 		goto out_free_pages;
 	VM_BUG_ON(!PageHead(page));
@@ -1088,7 +1089,7 @@
 	smp_wmb(); /* make pte visible before pmd */
 	pmd_populate(mm, pmd, pgtable);
 	page_remove_rmap(page);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
@@ -1099,7 +1100,7 @@
 	return ret;
 
 out_free_pages:
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 	mem_cgroup_uncharge_start();
 	for (i = 0; i < HPAGE_PMD_NR; i++) {
@@ -1114,17 +1115,19 @@
 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
 {
+	spinlock_t *ptl;
 	int ret = 0;
 	struct page *page = NULL, *new_page;
 	unsigned long haddr;
 	unsigned long mmun_start;	/* For mmu_notifiers */
 	unsigned long mmun_end;		/* For mmu_notifiers */
 
+	ptl = pmd_lockptr(mm, pmd);
 	VM_BUG_ON(!vma->anon_vma);
 	haddr = address & HPAGE_PMD_MASK;
 	if (is_huge_zero_pmd(orig_pmd))
 		goto alloc;
-	spin_lock(&mm->page_table_lock);
+	spin_lock(ptl);
 	if (unlikely(!pmd_same(*pmd, orig_pmd)))
 		goto out_unlock;
 
@@ -1140,7 +1143,7 @@
 		goto out_unlock;
 	}
 	get_page(page);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 alloc:
 	if (transparent_hugepage_enabled(vma) &&
 	    !transparent_hugepage_debug_cow())
@@ -1187,11 +1190,11 @@
 	mmun_end   = haddr + HPAGE_PMD_SIZE;
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
-	spin_lock(&mm->page_table_lock);
+	spin_lock(ptl);
 	if (page)
 		put_page(page);
 	if (unlikely(!pmd_same(*pmd, orig_pmd))) {
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(ptl);
 		mem_cgroup_uncharge_page(new_page);
 		put_page(new_page);
 		goto out_mn;
@@ -1213,13 +1216,13 @@
 		}
 		ret |= VM_FAULT_WRITE;
 	}
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 out_mn:
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 out:
 	return ret;
 out_unlock:
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	return ret;
 }
 
@@ -1231,7 +1234,7 @@
 	struct mm_struct *mm = vma->vm_mm;
 	struct page *page = NULL;
 
-	assert_spin_locked(&mm->page_table_lock);
+	assert_spin_locked(pmd_lockptr(mm, pmd));
 
 	if (flags & FOLL_WRITE && !pmd_write(*pmd))
 		goto out;
@@ -1278,6 +1281,7 @@
 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 				unsigned long addr, pmd_t pmd, pmd_t *pmdp)
 {
+	spinlock_t *ptl;
 	struct anon_vma *anon_vma = NULL;
 	struct page *page;
 	unsigned long haddr = addr & HPAGE_PMD_MASK;
@@ -1287,7 +1291,7 @@
 	bool migrated = false;
 	int flags = 0;
 
-	spin_lock(&mm->page_table_lock);
+	ptl = pmd_lock(mm, pmdp);
 	if (unlikely(!pmd_same(pmd, *pmdp)))
 		goto out_unlock;
 
@@ -1325,7 +1329,7 @@
 		 * relock and check_same as the page may no longer be mapped.
 		 * As the fault is being retried, do not account for it.
 		 */
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(ptl);
 		wait_on_page_locked(page);
 		page_nid = -1;
 		goto out;
@@ -1333,13 +1337,13 @@
 
 	/* Page is misplaced, serialise migrations and parallel THP splits */
 	get_page(page);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	if (!page_locked)
 		lock_page(page);
 	anon_vma = page_lock_anon_vma_read(page);
 
 	/* Confirm the PMD did not change while page_table_lock was released */
-	spin_lock(&mm->page_table_lock);
+	spin_lock(ptl);
 	if (unlikely(!pmd_same(pmd, *pmdp))) {
 		unlock_page(page);
 		put_page(page);
@@ -1351,7 +1355,7 @@
 	 * Migrate the THP to the requested node, returns with page unlocked
 	 * and pmd_numa cleared.
 	 */
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	migrated = migrate_misplaced_transhuge_page(mm, vma,
 				pmdp, pmd, addr, page, target_nid);
 	if (migrated) {
@@ -1368,7 +1372,7 @@
 	update_mmu_cache_pmd(vma, addr, pmdp);
 	unlock_page(page);
 out_unlock:
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 
 out:
 	if (anon_vma)
@@ -1383,9 +1387,10 @@
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		 pmd_t *pmd, unsigned long addr)
 {
+	spinlock_t *ptl;
 	int ret = 0;
 
-	if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+	if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 		struct page *page;
 		pgtable_t pgtable;
 		pmd_t orig_pmd;
@@ -1399,8 +1404,8 @@
 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
 		pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
 		if (is_huge_zero_pmd(orig_pmd)) {
-			tlb->mm->nr_ptes--;
-			spin_unlock(&tlb->mm->page_table_lock);
+			atomic_long_dec(&tlb->mm->nr_ptes);
+			spin_unlock(ptl);
 			put_huge_zero_page();
 		} else {
 			page = pmd_page(orig_pmd);
@@ -1408,8 +1413,8 @@
 			VM_BUG_ON(page_mapcount(page) < 0);
 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
 			VM_BUG_ON(!PageHead(page));
-			tlb->mm->nr_ptes--;
-			spin_unlock(&tlb->mm->page_table_lock);
+			atomic_long_dec(&tlb->mm->nr_ptes);
+			spin_unlock(ptl);
 			tlb_remove_page(tlb, page);
 		}
 		pte_free(tlb->mm, pgtable);
@@ -1422,14 +1427,15 @@
 		unsigned long addr, unsigned long end,
 		unsigned char *vec)
 {
+	spinlock_t *ptl;
 	int ret = 0;
 
-	if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+	if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 		/*
 		 * All logical pages in the range are present
 		 * if backed by a huge page.
 		 */
-		spin_unlock(&vma->vm_mm->page_table_lock);
+		spin_unlock(ptl);
 		memset(vec, 1, (end - addr) >> PAGE_SHIFT);
 		ret = 1;
 	}
@@ -1442,6 +1448,7 @@
 		  unsigned long new_addr, unsigned long old_end,
 		  pmd_t *old_pmd, pmd_t *new_pmd)
 {
+	spinlock_t *old_ptl, *new_ptl;
 	int ret = 0;
 	pmd_t pmd;
 
@@ -1462,12 +1469,21 @@
 		goto out;
 	}
 
-	ret = __pmd_trans_huge_lock(old_pmd, vma);
+	/*
+	 * We don't have to worry about the ordering of src and dst
+	 * ptlocks because exclusive mmap_sem prevents deadlock.
+	 */
+	ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
 	if (ret == 1) {
+		new_ptl = pmd_lockptr(mm, new_pmd);
+		if (new_ptl != old_ptl)
+			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
 		pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
 		VM_BUG_ON(!pmd_none(*new_pmd));
 		set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
-		spin_unlock(&mm->page_table_lock);
+		if (new_ptl != old_ptl)
+			spin_unlock(new_ptl);
+		spin_unlock(old_ptl);
 	}
 out:
 	return ret;
@@ -1483,9 +1499,10 @@
 		unsigned long addr, pgprot_t newprot, int prot_numa)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	spinlock_t *ptl;
 	int ret = 0;
 
-	if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+	if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 		pmd_t entry;
 		ret = 1;
 		if (!prot_numa) {
@@ -1514,7 +1531,7 @@
 		if (ret == HPAGE_PMD_NR)
 			set_pmd_at(mm, addr, pmd, entry);
 
-		spin_unlock(&vma->vm_mm->page_table_lock);
+		spin_unlock(ptl);
 	}
 
 	return ret;
@@ -1527,12 +1544,13 @@
  * Note that if it returns 1, this routine returns without unlocking page
  * table locks. So callers must unlock them.
  */
-int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
+int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+		spinlock_t **ptl)
 {
-	spin_lock(&vma->vm_mm->page_table_lock);
+	*ptl = pmd_lock(vma->vm_mm, pmd);
 	if (likely(pmd_trans_huge(*pmd))) {
 		if (unlikely(pmd_trans_splitting(*pmd))) {
-			spin_unlock(&vma->vm_mm->page_table_lock);
+			spin_unlock(*ptl);
 			wait_split_huge_page(vma->anon_vma, pmd);
 			return -1;
 		} else {
@@ -1541,27 +1559,37 @@
 			return 1;
 		}
 	}
-	spin_unlock(&vma->vm_mm->page_table_lock);
+	spin_unlock(*ptl);
 	return 0;
 }
 
+/*
+ * This function returns whether a given @page is mapped onto the @address
+ * in the virtual space of @mm.
+ *
+ * When it's true, this function returns *pmd with holding the page table lock
+ * and passing it back to the caller via @ptl.
+ * If it's false, returns NULL without holding the page table lock.
+ */
 pmd_t *page_check_address_pmd(struct page *page,
 			      struct mm_struct *mm,
 			      unsigned long address,
-			      enum page_check_address_pmd_flag flag)
+			      enum page_check_address_pmd_flag flag,
+			      spinlock_t **ptl)
 {
-	pmd_t *pmd, *ret = NULL;
+	pmd_t *pmd;
 
 	if (address & ~HPAGE_PMD_MASK)
-		goto out;
+		return NULL;
 
 	pmd = mm_find_pmd(mm, address);
 	if (!pmd)
-		goto out;
+		return NULL;
+	*ptl = pmd_lock(mm, pmd);
 	if (pmd_none(*pmd))
-		goto out;
+		goto unlock;
 	if (pmd_page(*pmd) != page)
-		goto out;
+		goto unlock;
 	/*
 	 * split_vma() may create temporary aliased mappings. There is
 	 * no risk as long as all huge pmd are found and have their
@@ -1571,14 +1599,15 @@
 	 */
 	if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
 	    pmd_trans_splitting(*pmd))
-		goto out;
+		goto unlock;
 	if (pmd_trans_huge(*pmd)) {
 		VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
 			  !pmd_trans_splitting(*pmd));
-		ret = pmd;
+		return pmd;
 	}
-out:
-	return ret;
+unlock:
+	spin_unlock(*ptl);
+	return NULL;
 }
 
 static int __split_huge_page_splitting(struct page *page,
@@ -1586,6 +1615,7 @@
 				       unsigned long address)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	spinlock_t *ptl;
 	pmd_t *pmd;
 	int ret = 0;
 	/* For mmu_notifiers */
@@ -1593,9 +1623,8 @@
 	const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;
 
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-	spin_lock(&mm->page_table_lock);
 	pmd = page_check_address_pmd(page, mm, address,
-				     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
+			PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
 	if (pmd) {
 		/*
 		 * We can't temporarily set the pmd to null in order
@@ -1606,8 +1635,8 @@
 		 */
 		pmdp_splitting_flush(vma, address, pmd);
 		ret = 1;
+		spin_unlock(ptl);
 	}
-	spin_unlock(&mm->page_table_lock);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
 	return ret;
@@ -1738,14 +1767,14 @@
 				 unsigned long address)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	spinlock_t *ptl;
 	pmd_t *pmd, _pmd;
 	int ret = 0, i;
 	pgtable_t pgtable;
 	unsigned long haddr;
 
-	spin_lock(&mm->page_table_lock);
 	pmd = page_check_address_pmd(page, mm, address,
-				     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
+			PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
 	if (pmd) {
 		pgtable = pgtable_trans_huge_withdraw(mm, pmd);
 		pmd_populate(mm, &_pmd, pgtable);
@@ -1800,8 +1829,8 @@
 		pmdp_invalidate(vma, address, pmd);
 		pmd_populate(mm, pmd, pgtable);
 		ret = 1;
+		spin_unlock(ptl);
 	}
-	spin_unlock(&mm->page_table_lock);
 
 	return ret;
 }
@@ -2198,7 +2227,34 @@
 			msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
+static int khugepaged_node_load[MAX_NUMNODES];
+
 #ifdef CONFIG_NUMA
+static int khugepaged_find_target_node(void)
+{
+	static int last_khugepaged_target_node = NUMA_NO_NODE;
+	int nid, target_node = 0, max_value = 0;
+
+	/* find first node with max normal pages hit */
+	for (nid = 0; nid < MAX_NUMNODES; nid++)
+		if (khugepaged_node_load[nid] > max_value) {
+			max_value = khugepaged_node_load[nid];
+			target_node = nid;
+		}
+
+	/* do some balance if several nodes have the same hit record */
+	if (target_node <= last_khugepaged_target_node)
+		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
+				nid++)
+			if (max_value == khugepaged_node_load[nid]) {
+				target_node = nid;
+				break;
+			}
+
+	last_khugepaged_target_node = target_node;
+	return target_node;
+}
+
 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 {
 	if (IS_ERR(*hpage)) {
@@ -2232,9 +2288,8 @@
 	 * mmap_sem in read mode is good idea also to allow greater
 	 * scalability.
 	 */
-	*hpage  = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
-				      node, __GFP_OTHER_NODE);
-
+	*hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
+		khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
 	/*
 	 * After allocating the hugepage, release the mmap_sem read lock in
 	 * preparation for taking it in write mode.
@@ -2250,6 +2305,17 @@
 	return *hpage;
 }
 #else
+static int khugepaged_find_target_node(void)
+{
+	return 0;
+}
+
+static inline struct page *alloc_hugepage(int defrag)
+{
+	return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
+			   HPAGE_PMD_ORDER);
+}
+
 static struct page *khugepaged_alloc_hugepage(bool *wait)
 {
 	struct page *hpage;
@@ -2316,7 +2382,7 @@
 	pte_t *pte;
 	pgtable_t pgtable;
 	struct page *new_page;
-	spinlock_t *ptl;
+	spinlock_t *pmd_ptl, *pte_ptl;
 	int isolated;
 	unsigned long hstart, hend;
 	unsigned long mmun_start;	/* For mmu_notifiers */
@@ -2359,12 +2425,12 @@
 	anon_vma_lock_write(vma->anon_vma);
 
 	pte = pte_offset_map(pmd, address);
-	ptl = pte_lockptr(mm, pmd);
+	pte_ptl = pte_lockptr(mm, pmd);
 
 	mmun_start = address;
 	mmun_end   = address + HPAGE_PMD_SIZE;
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-	spin_lock(&mm->page_table_lock); /* probably unnecessary */
+	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
 	/*
 	 * After this gup_fast can't run anymore. This also removes
 	 * any huge TLB entry from the CPU so we won't allow
@@ -2372,16 +2438,16 @@
 	 * to avoid the risk of CPU bugs in that area.
 	 */
 	_pmd = pmdp_clear_flush(vma, address, pmd);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(pmd_ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
-	spin_lock(ptl);
+	spin_lock(pte_ptl);
 	isolated = __collapse_huge_page_isolate(vma, address, pte);
-	spin_unlock(ptl);
+	spin_unlock(pte_ptl);
 
 	if (unlikely(!isolated)) {
 		pte_unmap(pte);
-		spin_lock(&mm->page_table_lock);
+		spin_lock(pmd_ptl);
 		BUG_ON(!pmd_none(*pmd));
 		/*
 		 * We can only use set_pmd_at when establishing
@@ -2389,7 +2455,7 @@
 		 * points to regular pagetables. Use pmd_populate for that
 		 */
 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(pmd_ptl);
 		anon_vma_unlock_write(vma->anon_vma);
 		goto out;
 	}
@@ -2400,7 +2466,7 @@
 	 */
 	anon_vma_unlock_write(vma->anon_vma);
 
-	__collapse_huge_page_copy(pte, new_page, vma, address, ptl);
+	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
 	pte_unmap(pte);
 	__SetPageUptodate(new_page);
 	pgtable = pmd_pgtable(_pmd);
@@ -2415,13 +2481,13 @@
 	 */
 	smp_wmb();
 
-	spin_lock(&mm->page_table_lock);
+	spin_lock(pmd_ptl);
 	BUG_ON(!pmd_none(*pmd));
 	page_add_new_anon_rmap(new_page, vma, address);
 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
 	set_pmd_at(mm, address, pmd, _pmd);
 	update_mmu_cache_pmd(vma, address, pmd);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(pmd_ptl);
 
 	*hpage = NULL;
 
@@ -2456,6 +2522,7 @@
 	if (pmd_trans_huge(*pmd))
 		goto out;
 
+	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
 	     _pte++, _address += PAGE_SIZE) {
@@ -2472,12 +2539,13 @@
 		if (unlikely(!page))
 			goto out_unmap;
 		/*
-		 * Chose the node of the first page. This could
-		 * be more sophisticated and look at more pages,
-		 * but isn't for now.
+		 * Record which node the original page is from and save this
+		 * information to khugepaged_node_load[].
+		 * Khupaged will allocate hugepage from the node has the max
+		 * hit record.
 		 */
-		if (node == NUMA_NO_NODE)
-			node = page_to_nid(page);
+		node = page_to_nid(page);
+		khugepaged_node_load[node]++;
 		VM_BUG_ON(PageCompound(page));
 		if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
 			goto out_unmap;
@@ -2492,9 +2560,11 @@
 		ret = 1;
 out_unmap:
 	pte_unmap_unlock(pte, ptl);
-	if (ret)
+	if (ret) {
+		node = khugepaged_find_target_node();
 		/* collapse_huge_page will return with the mmap_sem released */
 		collapse_huge_page(mm, address, hpage, vma, node);
+	}
 out:
 	return ret;
 }
@@ -2746,6 +2816,7 @@
 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
 		pmd_t *pmd)
 {
+	spinlock_t *ptl;
 	struct page *page;
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long haddr = address & HPAGE_PMD_MASK;
@@ -2758,22 +2829,22 @@
 	mmun_end   = haddr + HPAGE_PMD_SIZE;
 again:
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-	spin_lock(&mm->page_table_lock);
+	ptl = pmd_lock(mm, pmd);
 	if (unlikely(!pmd_trans_huge(*pmd))) {
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(ptl);
 		mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 		return;
 	}
 	if (is_huge_zero_pmd(*pmd)) {
 		__split_huge_zero_page_pmd(vma, haddr, pmd);
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(ptl);
 		mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 		return;
 	}
 	page = pmd_page(*pmd);
 	VM_BUG_ON(!page_count(page));
 	get_page(page);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
 	split_huge_page(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0b7656e..7d57af2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2376,6 +2376,7 @@
 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 
 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
+		spinlock_t *src_ptl, *dst_ptl;
 		src_pte = huge_pte_offset(src, addr);
 		if (!src_pte)
 			continue;
@@ -2387,8 +2388,9 @@
 		if (dst_pte == src_pte)
 			continue;
 
-		spin_lock(&dst->page_table_lock);
-		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
+		dst_ptl = huge_pte_lock(h, dst, dst_pte);
+		src_ptl = huge_pte_lockptr(h, src, src_pte);
+		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 		if (!huge_pte_none(huge_ptep_get(src_pte))) {
 			if (cow)
 				huge_ptep_set_wrprotect(src, addr, src_pte);
@@ -2398,8 +2400,8 @@
 			page_dup_rmap(ptepage);
 			set_huge_pte_at(dst, addr, dst_pte, entry);
 		}
-		spin_unlock(&src->page_table_lock);
-		spin_unlock(&dst->page_table_lock);
+		spin_unlock(src_ptl);
+		spin_unlock(dst_ptl);
 	}
 	return 0;
 
@@ -2442,6 +2444,7 @@
 	unsigned long address;
 	pte_t *ptep;
 	pte_t pte;
+	spinlock_t *ptl;
 	struct page *page;
 	struct hstate *h = hstate_vma(vma);
 	unsigned long sz = huge_page_size(h);
@@ -2455,25 +2458,25 @@
 	tlb_start_vma(tlb, vma);
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 again:
-	spin_lock(&mm->page_table_lock);
 	for (address = start; address < end; address += sz) {
 		ptep = huge_pte_offset(mm, address);
 		if (!ptep)
 			continue;
 
+		ptl = huge_pte_lock(h, mm, ptep);
 		if (huge_pmd_unshare(mm, &address, ptep))
-			continue;
+			goto unlock;
 
 		pte = huge_ptep_get(ptep);
 		if (huge_pte_none(pte))
-			continue;
+			goto unlock;
 
 		/*
 		 * HWPoisoned hugepage is already unmapped and dropped reference
 		 */
 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
 			huge_pte_clear(mm, address, ptep);
-			continue;
+			goto unlock;
 		}
 
 		page = pte_page(pte);
@@ -2484,7 +2487,7 @@
 		 */
 		if (ref_page) {
 			if (page != ref_page)
-				continue;
+				goto unlock;
 
 			/*
 			 * Mark the VMA as having unmapped its page so that
@@ -2501,13 +2504,18 @@
 
 		page_remove_rmap(page);
 		force_flush = !__tlb_remove_page(tlb, page);
-		if (force_flush)
+		if (force_flush) {
+			spin_unlock(ptl);
 			break;
+		}
 		/* Bail out after unmapping reference page if supplied */
-		if (ref_page)
+		if (ref_page) {
+			spin_unlock(ptl);
 			break;
+		}
+unlock:
+		spin_unlock(ptl);
 	}
-	spin_unlock(&mm->page_table_lock);
 	/*
 	 * mmu_gather ran out of room to batch pages, we break out of
 	 * the PTE lock to avoid doing the potential expensive TLB invalidate
@@ -2613,7 +2621,7 @@
  */
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long address, pte_t *ptep, pte_t pte,
-			struct page *pagecache_page)
+			struct page *pagecache_page, spinlock_t *ptl)
 {
 	struct hstate *h = hstate_vma(vma);
 	struct page *old_page, *new_page;
@@ -2647,8 +2655,8 @@
 
 	page_cache_get(old_page);
 
-	/* Drop page_table_lock as buddy allocator may be called */
-	spin_unlock(&mm->page_table_lock);
+	/* Drop page table lock as buddy allocator may be called */
+	spin_unlock(ptl);
 	new_page = alloc_huge_page(vma, address, outside_reserve);
 
 	if (IS_ERR(new_page)) {
@@ -2666,13 +2674,13 @@
 			BUG_ON(huge_pte_none(pte));
 			if (unmap_ref_private(mm, vma, old_page, address)) {
 				BUG_ON(huge_pte_none(pte));
-				spin_lock(&mm->page_table_lock);
+				spin_lock(ptl);
 				ptep = huge_pte_offset(mm, address & huge_page_mask(h));
 				if (likely(pte_same(huge_ptep_get(ptep), pte)))
 					goto retry_avoidcopy;
 				/*
-				 * race occurs while re-acquiring page_table_lock, and
-				 * our job is done.
+				 * race occurs while re-acquiring page table
+				 * lock, and our job is done.
 				 */
 				return 0;
 			}
@@ -2680,7 +2688,7 @@
 		}
 
 		/* Caller expects lock to be held */
-		spin_lock(&mm->page_table_lock);
+		spin_lock(ptl);
 		if (err == -ENOMEM)
 			return VM_FAULT_OOM;
 		else
@@ -2695,7 +2703,7 @@
 		page_cache_release(new_page);
 		page_cache_release(old_page);
 		/* Caller expects lock to be held */
-		spin_lock(&mm->page_table_lock);
+		spin_lock(ptl);
 		return VM_FAULT_OOM;
 	}
 
@@ -2707,10 +2715,10 @@
 	mmun_end = mmun_start + huge_page_size(h);
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 	/*
-	 * Retake the page_table_lock to check for racing updates
+	 * Retake the page table lock to check for racing updates
 	 * before the page tables are altered
 	 */
-	spin_lock(&mm->page_table_lock);
+	spin_lock(ptl);
 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
 	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
 		ClearPagePrivate(new_page);
@@ -2724,13 +2732,13 @@
 		/* Make the old page be freed below */
 		new_page = old_page;
 	}
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 	page_cache_release(new_page);
 	page_cache_release(old_page);
 
 	/* Caller expects lock to be held */
-	spin_lock(&mm->page_table_lock);
+	spin_lock(ptl);
 	return 0;
 }
 
@@ -2778,6 +2786,7 @@
 	struct page *page;
 	struct address_space *mapping;
 	pte_t new_pte;
+	spinlock_t *ptl;
 
 	/*
 	 * Currently, we are forced to kill the process in the event the
@@ -2864,7 +2873,8 @@
 			goto backout_unlocked;
 		}
 
-	spin_lock(&mm->page_table_lock);
+	ptl = huge_pte_lockptr(h, mm, ptep);
+	spin_lock(ptl);
 	size = i_size_read(mapping->host) >> huge_page_shift(h);
 	if (idx >= size)
 		goto backout;
@@ -2885,16 +2895,16 @@
 
 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
 		/* Optimization, do the COW without a second fault */
-		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
 	}
 
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	unlock_page(page);
 out:
 	return ret;
 
 backout:
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 backout_unlocked:
 	unlock_page(page);
 	put_page(page);
@@ -2906,6 +2916,7 @@
 {
 	pte_t *ptep;
 	pte_t entry;
+	spinlock_t *ptl;
 	int ret;
 	struct page *page = NULL;
 	struct page *pagecache_page = NULL;
@@ -2918,7 +2929,7 @@
 	if (ptep) {
 		entry = huge_ptep_get(ptep);
 		if (unlikely(is_hugetlb_entry_migration(entry))) {
-			migration_entry_wait_huge(mm, ptep);
+			migration_entry_wait_huge(vma, mm, ptep);
 			return 0;
 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
 			return VM_FAULT_HWPOISON_LARGE |
@@ -2974,17 +2985,18 @@
 	if (page != pagecache_page)
 		lock_page(page);
 
-	spin_lock(&mm->page_table_lock);
+	ptl = huge_pte_lockptr(h, mm, ptep);
+	spin_lock(ptl);
 	/* Check for a racing update before calling hugetlb_cow */
 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
-		goto out_page_table_lock;
+		goto out_ptl;
 
 
 	if (flags & FAULT_FLAG_WRITE) {
 		if (!huge_pte_write(entry)) {
 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
-							pagecache_page);
-			goto out_page_table_lock;
+					pagecache_page, ptl);
+			goto out_ptl;
 		}
 		entry = huge_pte_mkdirty(entry);
 	}
@@ -2993,8 +3005,8 @@
 						flags & FAULT_FLAG_WRITE))
 		update_mmu_cache(vma, address, ptep);
 
-out_page_table_lock:
-	spin_unlock(&mm->page_table_lock);
+out_ptl:
+	spin_unlock(ptl);
 
 	if (pagecache_page) {
 		unlock_page(pagecache_page);
@@ -3020,9 +3032,9 @@
 	unsigned long remainder = *nr_pages;
 	struct hstate *h = hstate_vma(vma);
 
-	spin_lock(&mm->page_table_lock);
 	while (vaddr < vma->vm_end && remainder) {
 		pte_t *pte;
+		spinlock_t *ptl = NULL;
 		int absent;
 		struct page *page;
 
@@ -3030,8 +3042,12 @@
 		 * Some archs (sparc64, sh*) have multiple pte_ts to
 		 * each hugepage.  We have to make sure we get the
 		 * first, for the page indexing below to work.
+		 *
+		 * Note that page table lock is not held when pte is null.
 		 */
 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
+		if (pte)
+			ptl = huge_pte_lock(h, mm, pte);
 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
 
 		/*
@@ -3043,6 +3059,8 @@
 		 */
 		if (absent && (flags & FOLL_DUMP) &&
 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
+			if (pte)
+				spin_unlock(ptl);
 			remainder = 0;
 			break;
 		}
@@ -3062,10 +3080,10 @@
 		      !huge_pte_write(huge_ptep_get(pte)))) {
 			int ret;
 
-			spin_unlock(&mm->page_table_lock);
+			if (pte)
+				spin_unlock(ptl);
 			ret = hugetlb_fault(mm, vma, vaddr,
 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
-			spin_lock(&mm->page_table_lock);
 			if (!(ret & VM_FAULT_ERROR))
 				continue;
 
@@ -3096,8 +3114,8 @@
 			 */
 			goto same_page;
 		}
+		spin_unlock(ptl);
 	}
-	spin_unlock(&mm->page_table_lock);
 	*nr_pages = remainder;
 	*position = vaddr;
 
@@ -3118,13 +3136,15 @@
 	flush_cache_range(vma, address, end);
 
 	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
-	spin_lock(&mm->page_table_lock);
 	for (; address < end; address += huge_page_size(h)) {
+		spinlock_t *ptl;
 		ptep = huge_pte_offset(mm, address);
 		if (!ptep)
 			continue;
+		ptl = huge_pte_lock(h, mm, ptep);
 		if (huge_pmd_unshare(mm, &address, ptep)) {
 			pages++;
+			spin_unlock(ptl);
 			continue;
 		}
 		if (!huge_pte_none(huge_ptep_get(ptep))) {
@@ -3134,8 +3154,8 @@
 			set_huge_pte_at(mm, address, ptep, pte);
 			pages++;
 		}
+		spin_unlock(ptl);
 	}
-	spin_unlock(&mm->page_table_lock);
 	/*
 	 * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
 	 * may have cleared our pud entry and done put_page on the page table:
@@ -3298,6 +3318,7 @@
 	unsigned long saddr;
 	pte_t *spte = NULL;
 	pte_t *pte;
+	spinlock_t *ptl;
 
 	if (!vma_shareable(vma, addr))
 		return (pte_t *)pmd_alloc(mm, pud, addr);
@@ -3320,13 +3341,14 @@
 	if (!spte)
 		goto out;
 
-	spin_lock(&mm->page_table_lock);
+	ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
+	spin_lock(ptl);
 	if (pud_none(*pud))
 		pud_populate(mm, pud,
 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
 	else
 		put_page(virt_to_page(spte));
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 out:
 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
 	mutex_unlock(&mapping->i_mmap_mutex);
@@ -3340,7 +3362,7 @@
  * indicated by page_count > 1, unmap is achieved by clearing pud and
  * decrementing the ref count. If count == 1, the pte page is not shared.
  *
- * called with vma->vm_mm->page_table_lock held.
+ * called with page table lock held.
  *
  * returns: 1 successfully unmapped a shared pte page
  *	    0 the underlying pte page is not shared, or it is the last user
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e126b0e..31f01c5 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -753,7 +753,9 @@
 	}
 
 	spin_lock_irqsave(&object->lock, flags);
-	if (ptr + size > object->pointer + object->size) {
+	if (size == SIZE_MAX) {
+		size = object->pointer + object->size - ptr;
+	} else if (ptr + size > object->pointer + object->size) {
 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 		dump_object_info(object);
 		kmem_cache_free(scan_area_cache, area);
diff --git a/mm/ksm.c b/mm/ksm.c
index 0bea2b2..175fff7 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2309,8 +2309,8 @@
 			 * Allocate stable and unstable together:
 			 * MAXSMP NODES_SHIFT 10 will use 16kB.
 			 */
-			buf = kcalloc(nr_node_ids + nr_node_ids,
-				sizeof(*buf), GFP_KERNEL | __GFP_ZERO);
+			buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
+				      GFP_KERNEL);
 			/* Let us assume that RB_ROOT is NULL is zero */
 			if (!buf)
 				err = -ENOMEM;
diff --git a/mm/memblock.c b/mm/memblock.c
index 0ac412a..53e477b 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,6 +20,8 @@
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
 
+#include <asm-generic/sections.h>
+
 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
 
@@ -32,6 +34,7 @@
 	.reserved.cnt		= 1,	/* empty dummy entry */
 	.reserved.max		= INIT_MEMBLOCK_REGIONS,
 
+	.bottom_up		= false,
 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
 };
 
@@ -82,33 +85,57 @@
 	return (i < type->cnt) ? i : -1;
 }
 
-/**
- * memblock_find_in_range_node - find free area in given range and node
+/*
+ * __memblock_find_range_bottom_up - find free area utility in bottom-up
  * @start: start of candidate range
  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  * @size: size of free area to find
  * @align: alignment of free area to find
  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
  *
- * Find @size free area aligned to @align in the specified range and node.
+ * Utility called from memblock_find_in_range_node(), find free area bottom-up.
  *
  * RETURNS:
- * Found address on success, %0 on failure.
+ * Found address on success, 0 on failure.
  */
-phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
-					phys_addr_t end, phys_addr_t size,
-					phys_addr_t align, int nid)
+static phys_addr_t __init_memblock
+__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
+				phys_addr_t size, phys_addr_t align, int nid)
 {
 	phys_addr_t this_start, this_end, cand;
 	u64 i;
 
-	/* pump up @end */
-	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
-		end = memblock.current_limit;
+	for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
+		this_start = clamp(this_start, start, end);
+		this_end = clamp(this_end, start, end);
 
-	/* avoid allocating the first page */
-	start = max_t(phys_addr_t, start, PAGE_SIZE);
-	end = max(start, end);
+		cand = round_up(this_start, align);
+		if (cand < this_end && this_end - cand >= size)
+			return cand;
+	}
+
+	return 0;
+}
+
+/**
+ * __memblock_find_range_top_down - find free area utility, in top-down
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ *
+ * Utility called from memblock_find_in_range_node(), find free area top-down.
+ *
+ * RETURNS:
+ * Found address on success, 0 on failure.
+ */
+static phys_addr_t __init_memblock
+__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
+			       phys_addr_t size, phys_addr_t align, int nid)
+{
+	phys_addr_t this_start, this_end, cand;
+	u64 i;
 
 	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
 		this_start = clamp(this_start, start, end);
@@ -121,10 +148,81 @@
 		if (cand >= this_start)
 			return cand;
 	}
+
 	return 0;
 }
 
 /**
+ * memblock_find_in_range_node - find free area in given range and node
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ *
+ * Find @size free area aligned to @align in the specified range and node.
+ *
+ * When allocation direction is bottom-up, the @start should be greater
+ * than the end of the kernel image. Otherwise, it will be trimmed. The
+ * reason is that we want the bottom-up allocation just near the kernel
+ * image so it is highly likely that the allocated memory and the kernel
+ * will reside in the same node.
+ *
+ * If bottom-up allocation failed, will try to allocate memory top-down.
+ *
+ * RETURNS:
+ * Found address on success, 0 on failure.
+ */
+phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
+					phys_addr_t end, phys_addr_t size,
+					phys_addr_t align, int nid)
+{
+	int ret;
+	phys_addr_t kernel_end;
+
+	/* pump up @end */
+	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
+		end = memblock.current_limit;
+
+	/* avoid allocating the first page */
+	start = max_t(phys_addr_t, start, PAGE_SIZE);
+	end = max(start, end);
+	kernel_end = __pa_symbol(_end);
+
+	/*
+	 * try bottom-up allocation only when bottom-up mode
+	 * is set and @end is above the kernel image.
+	 */
+	if (memblock_bottom_up() && end > kernel_end) {
+		phys_addr_t bottom_up_start;
+
+		/* make sure we will allocate above the kernel */
+		bottom_up_start = max(start, kernel_end);
+
+		/* ok, try bottom-up allocation first */
+		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
+						      size, align, nid);
+		if (ret)
+			return ret;
+
+		/*
+		 * we always limit bottom-up allocation above the kernel,
+		 * but top-down allocation doesn't have the limit, so
+		 * retrying top-down allocation may succeed when bottom-up
+		 * allocation failed.
+		 *
+		 * bottom-up allocation is expected to be fail very rarely,
+		 * so we use WARN_ONCE() here to see the stack trace if
+		 * fail happens.
+		 */
+		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
+			     "memory hotunplug may be affected\n");
+	}
+
+	return __memblock_find_range_top_down(start, end, size, align, nid);
+}
+
+/**
  * memblock_find_in_range - find free area in given range
  * @start: start of candidate range
  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
@@ -134,7 +232,7 @@
  * Find @size free area aligned to @align in the specified range.
  *
  * RETURNS:
- * Found address on success, %0 on failure.
+ * Found address on success, 0 on failure.
  */
 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 					phys_addr_t end, phys_addr_t size,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 13b9d0f..f1a0ae6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -59,6 +59,7 @@
 #include <net/sock.h>
 #include <net/ip.h>
 #include <net/tcp_memcontrol.h>
+#include "slab.h"
 
 #include <asm/uaccess.h>
 
@@ -312,7 +313,7 @@
 
 	atomic_t	dead_count;
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
-	struct tcp_memcontrol tcp_mem;
+	struct cg_proto tcp_mem;
 #endif
 #if defined(CONFIG_MEMCG_KMEM)
 	/* analogous to slab_common's slab_caches list. per-memcg */
@@ -499,6 +500,29 @@
 	return (memcg == root_mem_cgroup);
 }
 
+/*
+ * We restrict the id in the range of [1, 65535], so it can fit into
+ * an unsigned short.
+ */
+#define MEM_CGROUP_ID_MAX	USHRT_MAX
+
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+{
+	/*
+	 * The ID of the root cgroup is 0, but memcg treat 0 as an
+	 * invalid ID, so we return (cgroup_id + 1).
+	 */
+	return memcg->css.cgroup->id + 1;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+	struct cgroup_subsys_state *css;
+
+	css = css_from_id(id - 1, &mem_cgroup_subsys);
+	return mem_cgroup_from_css(css);
+}
+
 /* Writing them here to avoid exposing memcg's inner layout */
 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
 
@@ -551,13 +575,13 @@
 	if (!memcg || mem_cgroup_is_root(memcg))
 		return NULL;
 
-	return &memcg->tcp_mem.cg_proto;
+	return &memcg->tcp_mem;
 }
 EXPORT_SYMBOL(tcp_proto_cgroup);
 
 static void disarm_sock_keys(struct mem_cgroup *memcg)
 {
-	if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+	if (!memcg_proto_activated(&memcg->tcp_mem))
 		return;
 	static_key_slow_dec(&memcg_socket_limit_enabled);
 }
@@ -570,16 +594,11 @@
 #ifdef CONFIG_MEMCG_KMEM
 /*
  * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
- * There are two main reasons for not using the css_id for this:
- *  1) this works better in sparse environments, where we have a lot of memcgs,
- *     but only a few kmem-limited. Or also, if we have, for instance, 200
- *     memcgs, and none but the 200th is kmem-limited, we'd have to have a
- *     200 entry array for that.
- *
- *  2) In order not to violate the cgroup API, we would like to do all memory
- *     allocation in ->create(). At that point, we haven't yet allocated the
- *     css_id. Having a separate index prevents us from messing with the cgroup
- *     core for this
+ * The main reason for not using cgroup id for this:
+ *  this works better in sparse environments, where we have a lot of memcgs,
+ *  but only a few kmem-limited. Or also, if we have, for instance, 200
+ *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
+ *  200 entry array for that.
  *
  * The current size of the caches array is stored in
  * memcg_limited_groups_array_size.  It will double each time we have to
@@ -594,14 +613,14 @@
  * cgroups is a reasonable guess. In the future, it could be a parameter or
  * tunable, but that is strictly not necessary.
  *
- * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
+ * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
  * this constant directly from cgroup, but it is understandable that this is
  * better kept as an internal representation in cgroup.c. In any case, the
- * css_id space is not getting any smaller, and we don't have to necessarily
+ * cgrp_id space is not getting any smaller, and we don't have to necessarily
  * increase ours as well if it increases.
  */
 #define MEMCG_CACHES_MIN_SIZE 4
-#define MEMCG_CACHES_MAX_SIZE 65535
+#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 
 /*
  * A lot of the calls to the cache allocation functions are expected to be
@@ -1408,7 +1427,7 @@
 		return true;
 	if (!root_memcg->use_hierarchy || !memcg)
 		return false;
-	return css_is_ancestor(&memcg->css, &root_memcg->css);
+	return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
 }
 
 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
@@ -2826,15 +2845,10 @@
  */
 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
 {
-	struct cgroup_subsys_state *css;
-
 	/* ID 0 is unused ID */
 	if (!id)
 		return NULL;
-	css = css_lookup(&mem_cgroup_subsys, id);
-	if (!css)
-		return NULL;
-	return mem_cgroup_from_css(css);
+	return mem_cgroup_from_id(id);
 }
 
 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
@@ -2955,7 +2969,7 @@
 
 	VM_BUG_ON(p->is_root_cache);
 	cachep = p->root_cache;
-	return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
+	return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
 }
 
 #ifdef CONFIG_SLABINFO
@@ -2984,21 +2998,14 @@
 	struct res_counter *fail_res;
 	struct mem_cgroup *_memcg;
 	int ret = 0;
-	bool may_oom;
 
 	ret = res_counter_charge(&memcg->kmem, size, &fail_res);
 	if (ret)
 		return ret;
 
-	/*
-	 * Conditions under which we can wait for the oom_killer. Those are
-	 * the same conditions tested by the core page allocator
-	 */
-	may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
-
 	_memcg = memcg;
 	ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
-				      &_memcg, may_oom);
+				      &_memcg, oom_gfp_allowed(gfp));
 
 	if (ret == -EINTR)  {
 		/*
@@ -3138,7 +3145,7 @@
 {
 	struct memcg_cache_params *cur_params = s->memcg_params;
 
-	VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
+	VM_BUG_ON(!is_root_cache(s));
 
 	if (num_groups > memcg_limited_groups_array_size) {
 		int i;
@@ -3399,7 +3406,7 @@
 	idx = memcg_cache_id(memcg);
 
 	mutex_lock(&memcg_cache_mutex);
-	new_cachep = cachep->memcg_params->memcg_caches[idx];
+	new_cachep = cache_from_memcg_idx(cachep, idx);
 	if (new_cachep) {
 		css_put(&memcg->css);
 		goto out;
@@ -3445,8 +3452,8 @@
 	 * we'll take the set_limit_mutex to protect ourselves against this.
 	 */
 	mutex_lock(&set_limit_mutex);
-	for (i = 0; i < memcg_limited_groups_array_size; i++) {
-		c = s->memcg_params->memcg_caches[i];
+	for_each_memcg_cache_index(i) {
+		c = cache_from_memcg_idx(s, i);
 		if (!c)
 			continue;
 
@@ -3579,8 +3586,8 @@
 	 * code updating memcg_caches will issue a write barrier to match this.
 	 */
 	read_barrier_depends();
-	if (likely(cachep->memcg_params->memcg_caches[idx])) {
-		cachep = cachep->memcg_params->memcg_caches[idx];
+	if (likely(cache_from_memcg_idx(cachep, idx))) {
+		cachep = cache_from_memcg_idx(cachep, idx);
 		goto out;
 	}
 
@@ -4350,7 +4357,7 @@
 	 * css_get() was called in uncharge().
 	 */
 	if (do_swap_account && swapout && memcg)
-		swap_cgroup_record(ent, css_id(&memcg->css));
+		swap_cgroup_record(ent, mem_cgroup_id(memcg));
 }
 #endif
 
@@ -4402,8 +4409,8 @@
 {
 	unsigned short old_id, new_id;
 
-	old_id = css_id(&from->css);
-	new_id = css_id(&to->css);
+	old_id = mem_cgroup_id(from);
+	new_id = mem_cgroup_id(to);
 
 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
 		mem_cgroup_swap_statistics(from, false);
@@ -5376,45 +5383,50 @@
 static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
 				struct cftype *cft, struct seq_file *m)
 {
+	struct numa_stat {
+		const char *name;
+		unsigned int lru_mask;
+	};
+
+	static const struct numa_stat stats[] = {
+		{ "total", LRU_ALL },
+		{ "file", LRU_ALL_FILE },
+		{ "anon", LRU_ALL_ANON },
+		{ "unevictable", BIT(LRU_UNEVICTABLE) },
+	};
+	const struct numa_stat *stat;
 	int nid;
-	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
-	unsigned long node_nr;
+	unsigned long nr;
 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
-	total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
-	seq_printf(m, "total=%lu", total_nr);
-	for_each_node_state(nid, N_MEMORY) {
-		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
-		seq_printf(m, " N%d=%lu", nid, node_nr);
+	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
+		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
+		seq_printf(m, "%s=%lu", stat->name, nr);
+		for_each_node_state(nid, N_MEMORY) {
+			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
+							  stat->lru_mask);
+			seq_printf(m, " N%d=%lu", nid, nr);
+		}
+		seq_putc(m, '\n');
 	}
-	seq_putc(m, '\n');
 
-	file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
-	seq_printf(m, "file=%lu", file_nr);
-	for_each_node_state(nid, N_MEMORY) {
-		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
-				LRU_ALL_FILE);
-		seq_printf(m, " N%d=%lu", nid, node_nr);
-	}
-	seq_putc(m, '\n');
+	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
+		struct mem_cgroup *iter;
 
-	anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
-	seq_printf(m, "anon=%lu", anon_nr);
-	for_each_node_state(nid, N_MEMORY) {
-		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
-				LRU_ALL_ANON);
-		seq_printf(m, " N%d=%lu", nid, node_nr);
+		nr = 0;
+		for_each_mem_cgroup_tree(iter, memcg)
+			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
+		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
+		for_each_node_state(nid, N_MEMORY) {
+			nr = 0;
+			for_each_mem_cgroup_tree(iter, memcg)
+				nr += mem_cgroup_node_nr_lru_pages(
+					iter, nid, stat->lru_mask);
+			seq_printf(m, " N%d=%lu", nid, nr);
+		}
+		seq_putc(m, '\n');
 	}
-	seq_putc(m, '\n');
 
-	unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
-	seq_printf(m, "unevictable=%lu", unevictable_nr);
-	for_each_node_state(nid, N_MEMORY) {
-		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
-				BIT(LRU_UNEVICTABLE));
-		seq_printf(m, " N%d=%lu", nid, node_nr);
-	}
-	seq_putc(m, '\n');
 	return 0;
 }
 #endif /* CONFIG_NUMA */
@@ -6166,7 +6178,6 @@
 	size_t size = memcg_size();
 
 	mem_cgroup_remove_from_trees(memcg);
-	free_css_id(&mem_cgroup_subsys, &memcg->css);
 
 	for_each_node(node)
 		free_mem_cgroup_per_zone_info(memcg, node);
@@ -6269,6 +6280,9 @@
 	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
 	int error = 0;
 
+	if (css->cgroup->id > MEM_CGROUP_ID_MAX)
+		return -ENOSPC;
+
 	if (!parent)
 		return 0;
 
@@ -6540,7 +6554,7 @@
 	}
 	/* There is a swap entry and a page doesn't exist or isn't charged */
 	if (ent.val && !ret &&
-			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
+	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
 		ret = MC_TARGET_SWAP;
 		if (target)
 			target->ent = ent;
@@ -6591,10 +6605,10 @@
 	pte_t *pte;
 	spinlock_t *ptl;
 
-	if (pmd_trans_huge_lock(pmd, vma) == 1) {
+	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
 			mc.precharge += HPAGE_PMD_NR;
-		spin_unlock(&vma->vm_mm->page_table_lock);
+		spin_unlock(ptl);
 		return 0;
 	}
 
@@ -6783,9 +6797,9 @@
 	 *    to be unlocked in __split_huge_page_splitting(), where the main
 	 *    part of thp split is not executed yet.
 	 */
-	if (pmd_trans_huge_lock(pmd, vma) == 1) {
+	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 		if (mc.precharge < HPAGE_PMD_NR) {
-			spin_unlock(&vma->vm_mm->page_table_lock);
+			spin_unlock(ptl);
 			return 0;
 		}
 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
@@ -6802,7 +6816,7 @@
 			}
 			put_page(page);
 		}
-		spin_unlock(&vma->vm_mm->page_table_lock);
+		spin_unlock(ptl);
 		return 0;
 	}
 
@@ -6960,7 +6974,6 @@
 	.bind = mem_cgroup_bind,
 	.base_cftypes = mem_cgroup_files,
 	.early_init = 0,
-	.use_id = 1,
 };
 
 #ifdef CONFIG_MEMCG_SWAP
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index bf3351b..b7c1716 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1269,7 +1269,7 @@
 
 	mf_cpu = &get_cpu_var(memory_failure_cpu);
 	spin_lock_irqsave(&mf_cpu->lock, proc_flags);
-	if (kfifo_put(&mf_cpu->fifo, &entry))
+	if (kfifo_put(&mf_cpu->fifo, entry))
 		schedule_work_on(smp_processor_id(), &mf_cpu->work);
 	else
 		pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
@@ -1423,19 +1423,6 @@
 		return 1;
 
 	/*
-	 * The lock_memory_hotplug prevents a race with memory hotplug.
-	 * This is a big hammer, a better would be nicer.
-	 */
-	lock_memory_hotplug();
-
-	/*
-	 * Isolate the page, so that it doesn't get reallocated if it
-	 * was free. This flag should be kept set until the source page
-	 * is freed and PG_hwpoison on it is set.
-	 */
-	if (get_pageblock_migratetype(p) != MIGRATE_ISOLATE)
-		set_migratetype_isolate(p, true);
-	/*
 	 * When the target page is a free hugepage, just remove it
 	 * from free hugepage list.
 	 */
@@ -1455,7 +1442,6 @@
 		/* Not a free page */
 		ret = 1;
 	}
-	unlock_memory_hotplug();
 	return ret;
 }
 
@@ -1654,15 +1640,28 @@
 		}
 	}
 
+	/*
+	 * The lock_memory_hotplug prevents a race with memory hotplug.
+	 * This is a big hammer, a better would be nicer.
+	 */
+	lock_memory_hotplug();
+
+	/*
+	 * Isolate the page, so that it doesn't get reallocated if it
+	 * was free. This flag should be kept set until the source page
+	 * is freed and PG_hwpoison on it is set.
+	 */
+	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+		set_migratetype_isolate(page, true);
+
 	ret = get_any_page(page, pfn, flags);
-	if (ret < 0)
-		goto unset;
-	if (ret) { /* for in-use pages */
+	unlock_memory_hotplug();
+	if (ret > 0) { /* for in-use pages */
 		if (PageHuge(page))
 			ret = soft_offline_huge_page(page, flags);
 		else
 			ret = __soft_offline_page(page, flags);
-	} else { /* for free pages */
+	} else if (ret == 0) { /* for free pages */
 		if (PageHuge(page)) {
 			set_page_hwpoison_huge_page(hpage);
 			dequeue_hwpoisoned_huge_page(hpage);
@@ -1673,7 +1672,6 @@
 			atomic_long_inc(&num_poisoned_pages);
 		}
 	}
-unset:
 	unset_migratetype_isolate(page, MIGRATE_MOVABLE);
 	return ret;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 1f2287e..0409e8f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -382,7 +382,7 @@
 	pgtable_t token = pmd_pgtable(*pmd);
 	pmd_clear(pmd);
 	pte_free_tlb(tlb, token, addr);
-	tlb->mm->nr_ptes--;
+	atomic_long_dec(&tlb->mm->nr_ptes);
 }
 
 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -453,8 +453,6 @@
 
 /*
  * This function frees user-level page tables of a process.
- *
- * Must be called with pagetable lock held.
  */
 void free_pgd_range(struct mmu_gather *tlb,
 			unsigned long addr, unsigned long end,
@@ -552,6 +550,7 @@
 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 		pmd_t *pmd, unsigned long address)
 {
+	spinlock_t *ptl;
 	pgtable_t new = pte_alloc_one(mm, address);
 	int wait_split_huge_page;
 	if (!new)
@@ -572,15 +571,15 @@
 	 */
 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 
-	spin_lock(&mm->page_table_lock);
+	ptl = pmd_lock(mm, pmd);
 	wait_split_huge_page = 0;
 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
-		mm->nr_ptes++;
+		atomic_long_inc(&mm->nr_ptes);
 		pmd_populate(mm, pmd, new);
 		new = NULL;
 	} else if (unlikely(pmd_trans_splitting(*pmd)))
 		wait_split_huge_page = 1;
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 	if (new)
 		pte_free(mm, new);
 	if (wait_split_huge_page)
@@ -681,7 +680,7 @@
 	if (vma->vm_ops)
 		printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
 		       vma->vm_ops->fault);
-	if (vma->vm_file && vma->vm_file->f_op)
+	if (vma->vm_file)
 		printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
 		       vma->vm_file->f_op->mmap);
 	dump_stack();
@@ -1518,20 +1517,20 @@
 			split_huge_page_pmd(vma, address, pmd);
 			goto split_fallthrough;
 		}
-		spin_lock(&mm->page_table_lock);
+		ptl = pmd_lock(mm, pmd);
 		if (likely(pmd_trans_huge(*pmd))) {
 			if (unlikely(pmd_trans_splitting(*pmd))) {
-				spin_unlock(&mm->page_table_lock);
+				spin_unlock(ptl);
 				wait_split_huge_page(vma->anon_vma, pmd);
 			} else {
 				page = follow_trans_huge_pmd(vma, address,
 							     pmd, flags);
-				spin_unlock(&mm->page_table_lock);
+				spin_unlock(ptl);
 				*page_mask = HPAGE_PMD_NR - 1;
 				goto out;
 			}
 		} else
-			spin_unlock(&mm->page_table_lock);
+			spin_unlock(ptl);
 		/* fall through */
 	}
 split_fallthrough:
@@ -4271,3 +4270,28 @@
 	}
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
+#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
+static struct kmem_cache *page_ptl_cachep;
+void __init ptlock_cache_init(void)
+{
+	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
+			SLAB_PANIC, NULL);
+}
+
+bool ptlock_alloc(struct page *page)
+{
+	spinlock_t *ptl;
+
+	ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+	if (!ptl)
+		return false;
+	page->ptl = ptl;
+	return true;
+}
+
+void ptlock_free(struct page *page)
+{
+	kfree(page->ptl);
+}
+#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index ed85fe3..489f235 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -31,6 +31,7 @@
 #include <linux/firmware-map.h>
 #include <linux/stop_machine.h>
 #include <linux/hugetlb.h>
+#include <linux/memblock.h>
 
 #include <asm/tlbflush.h>
 
@@ -365,8 +366,7 @@
 static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
 			    unsigned long end_pfn)
 {
-	unsigned long old_pgdat_end_pfn =
-		pgdat->node_start_pfn + pgdat->node_spanned_pages;
+	unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
 
 	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
 		pgdat->node_start_pfn = start_pfn;
@@ -402,13 +402,12 @@
 static int __meminit __add_section(int nid, struct zone *zone,
 					unsigned long phys_start_pfn)
 {
-	int nr_pages = PAGES_PER_SECTION;
 	int ret;
 
 	if (pfn_valid(phys_start_pfn))
 		return -EEXIST;
 
-	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
+	ret = sparse_add_one_section(zone, phys_start_pfn);
 
 	if (ret < 0)
 		return ret;
@@ -579,9 +578,9 @@
 static void shrink_pgdat_span(struct pglist_data *pgdat,
 			      unsigned long start_pfn, unsigned long end_pfn)
 {
-	unsigned long pgdat_start_pfn =  pgdat->node_start_pfn;
-	unsigned long pgdat_end_pfn =
-		pgdat->node_start_pfn + pgdat->node_spanned_pages;
+	unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
+	unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
+	unsigned long pgdat_end_pfn = p;
 	unsigned long pfn;
 	struct mem_section *ms;
 	int nid = pgdat->node_id;
@@ -935,7 +934,7 @@
 	arg.nr_pages = nr_pages;
 	node_states_check_changes_online(nr_pages, zone, &arg);
 
-	nid = page_to_nid(pfn_to_page(pfn));
+	nid = pfn_to_nid(pfn);
 
 	ret = memory_notify(MEM_GOING_ONLINE, &arg);
 	ret = notifier_to_errno(ret);
@@ -1044,17 +1043,23 @@
 }
 
 
-/*
+/**
+ * try_online_node - online a node if offlined
+ *
  * called by cpu_up() to online a node without onlined memory.
  */
-int mem_online_node(int nid)
+int try_online_node(int nid)
 {
 	pg_data_t	*pgdat;
 	int	ret;
 
+	if (node_online(nid))
+		return 0;
+
 	lock_memory_hotplug();
 	pgdat = hotadd_new_pgdat(nid, 0);
 	if (!pgdat) {
+		pr_err("Cannot online node %d due to NULL pgdat\n", nid);
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -1062,6 +1067,12 @@
 	ret = register_one_node(nid);
 	BUG_ON(ret);
 
+	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
+		mutex_lock(&zonelists_mutex);
+		build_all_zonelists(NULL, NULL);
+		mutex_unlock(&zonelists_mutex);
+	}
+
 out:
 	unlock_memory_hotplug();
 	return ret;
@@ -1412,6 +1423,36 @@
 }
 #endif /* CONFIG_MOVABLE_NODE */
 
+static int __init cmdline_parse_movable_node(char *p)
+{
+#ifdef CONFIG_MOVABLE_NODE
+	/*
+	 * Memory used by the kernel cannot be hot-removed because Linux
+	 * cannot migrate the kernel pages. When memory hotplug is
+	 * enabled, we should prevent memblock from allocating memory
+	 * for the kernel.
+	 *
+	 * ACPI SRAT records all hotpluggable memory ranges. But before
+	 * SRAT is parsed, we don't know about it.
+	 *
+	 * The kernel image is loaded into memory at very early time. We
+	 * cannot prevent this anyway. So on NUMA system, we set any
+	 * node the kernel resides in as un-hotpluggable.
+	 *
+	 * Since on modern servers, one node could have double-digit
+	 * gigabytes memory, we can assume the memory around the kernel
+	 * image is also un-hotpluggable. So before SRAT is parsed, just
+	 * allocate memory near the kernel image to try the best to keep
+	 * the kernel away from hotpluggable memory.
+	 */
+	memblock_set_bottom_up(true);
+#else
+	pr_warn("movable_node option not supported\n");
+#endif
+	return 0;
+}
+early_param("movable_node", cmdline_parse_movable_node);
+
 /* check which state of node_states will be changed when offline memory */
 static void node_states_check_changes_offline(unsigned long nr_pages,
 		struct zone *zone, struct memory_notify *arg)
@@ -1702,7 +1743,7 @@
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
-static int is_memblock_offlined_cb(struct memory_block *mem, void *arg)
+static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
 {
 	int ret = !is_memblock_offlined(mem);
 
@@ -1854,7 +1895,7 @@
 	 * if this is not the case.
 	 */
 	ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
-				is_memblock_offlined_cb);
+				check_memblock_offlined_cb);
 	if (ret) {
 		unlock_memory_hotplug();
 		BUG();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 71cb253..c4403cd 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -525,8 +525,9 @@
 #ifdef CONFIG_HUGETLB_PAGE
 	int nid;
 	struct page *page;
+	spinlock_t *ptl;
 
-	spin_lock(&vma->vm_mm->page_table_lock);
+	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
 	page = pte_page(huge_ptep_get((pte_t *)pmd));
 	nid = page_to_nid(page);
 	if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
@@ -536,7 +537,7 @@
 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
 		isolate_huge_page(page, private);
 unlock:
-	spin_unlock(&vma->vm_mm->page_table_lock);
+	spin_unlock(ptl);
 #else
 	BUG();
 #endif
@@ -1125,7 +1126,7 @@
 	tmp = *from;
 	while (!nodes_empty(tmp)) {
 		int s,d;
-		int source = -1;
+		int source = NUMA_NO_NODE;
 		int dest = 0;
 
 		for_each_node_mask(s, tmp) {
@@ -1160,7 +1161,7 @@
 			if (!node_isset(dest, tmp))
 				break;
 		}
-		if (source == -1)
+		if (source == NUMA_NO_NODE)
 			break;
 
 		node_clear(source, tmp);
@@ -1835,7 +1836,7 @@
 	unsigned nnodes = nodes_weight(pol->v.nodes);
 	unsigned target;
 	int c;
-	int nid = -1;
+	int nid = NUMA_NO_NODE;
 
 	if (!nnodes)
 		return numa_node_id();
@@ -1872,11 +1873,11 @@
 
 /*
  * Return the bit number of a random bit set in the nodemask.
- * (returns -1 if nodemask is empty)
+ * (returns NUMA_NO_NODE if nodemask is empty)
  */
 int node_random(const nodemask_t *maskp)
 {
-	int w, bit = -1;
+	int w, bit = NUMA_NO_NODE;
 
 	w = nodes_weight(*maskp);
 	if (w)
@@ -2914,62 +2915,45 @@
  * @maxlen:  length of @buffer
  * @pol:  pointer to mempolicy to be formatted
  *
- * Convert a mempolicy into a string.
- * Returns the number of characters in buffer (if positive)
- * or an error (negative)
+ * Convert @pol into a string.  If @buffer is too short, truncate the string.
+ * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
+ * longest flag, "relative", and to display at least a few node ids.
  */
-int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
 {
 	char *p = buffer;
-	int l;
-	nodemask_t nodes;
-	unsigned short mode;
-	unsigned short flags = pol ? pol->flags : 0;
+	nodemask_t nodes = NODE_MASK_NONE;
+	unsigned short mode = MPOL_DEFAULT;
+	unsigned short flags = 0;
 
-	/*
-	 * Sanity check:  room for longest mode, flag and some nodes
-	 */
-	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
-
-	if (!pol || pol == &default_policy)
-		mode = MPOL_DEFAULT;
-	else
+	if (pol && pol != &default_policy) {
 		mode = pol->mode;
+		flags = pol->flags;
+	}
 
 	switch (mode) {
 	case MPOL_DEFAULT:
-		nodes_clear(nodes);
 		break;
-
 	case MPOL_PREFERRED:
-		nodes_clear(nodes);
 		if (flags & MPOL_F_LOCAL)
 			mode = MPOL_LOCAL;
 		else
 			node_set(pol->v.preferred_node, nodes);
 		break;
-
 	case MPOL_BIND:
-		/* Fall through */
 	case MPOL_INTERLEAVE:
 		nodes = pol->v.nodes;
 		break;
-
 	default:
-		return -EINVAL;
+		WARN_ON_ONCE(1);
+		snprintf(p, maxlen, "unknown");
+		return;
 	}
 
-	l = strlen(policy_modes[mode]);
-	if (buffer + maxlen < p + l + 1)
-		return -ENOSPC;
-
-	strcpy(p, policy_modes[mode]);
-	p += l;
+	p += snprintf(p, maxlen, policy_modes[mode]);
 
 	if (flags & MPOL_MODE_FLAGS) {
-		if (buffer + maxlen < p + 2)
-			return -ENOSPC;
-		*p++ = '=';
+		p += snprintf(p, buffer + maxlen - p, "=");
 
 		/*
 		 * Currently, the only defined flags are mutually exclusive
@@ -2981,10 +2965,7 @@
 	}
 
 	if (!nodes_empty(nodes)) {
-		if (buffer + maxlen < p + 2)
-			return -ENOSPC;
-		*p++ = ':';
+		p += snprintf(p, buffer + maxlen - p, ":");
 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
 	}
-	return p - buffer;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index dfc8300..316e720 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -130,7 +130,7 @@
 		ptep = huge_pte_offset(mm, addr);
 		if (!ptep)
 			goto out;
-		ptl = &mm->page_table_lock;
+		ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
 	} else {
 		pmd = mm_find_pmd(mm, addr);
 		if (!pmd)
@@ -249,9 +249,10 @@
 	__migration_entry_wait(mm, ptep, ptl);
 }
 
-void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
+void migration_entry_wait_huge(struct vm_area_struct *vma,
+		struct mm_struct *mm, pte_t *pte)
 {
-	spinlock_t *ptl = &(mm)->page_table_lock;
+	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
 	__migration_entry_wait(mm, pte, ptl);
 }
 
@@ -1666,6 +1667,7 @@
 				unsigned long address,
 				struct page *page, int node)
 {
+	spinlock_t *ptl;
 	unsigned long haddr = address & HPAGE_PMD_MASK;
 	pg_data_t *pgdat = NODE_DATA(node);
 	int isolated = 0;
@@ -1705,9 +1707,9 @@
 	WARN_ON(PageLRU(new_page));
 
 	/* Recheck the target PMD */
-	spin_lock(&mm->page_table_lock);
+	ptl = pmd_lock(mm, pmd);
 	if (unlikely(!pmd_same(*pmd, entry))) {
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(ptl);
 
 		/* Reverse changes made by migrate_page_copy() */
 		if (TestClearPageActive(new_page))
@@ -1752,7 +1754,7 @@
 	 * before it's fully transferred to the new page.
 	 */
 	mem_cgroup_end_migration(memcg, page, new_page, true);
-	spin_unlock(&mm->page_table_lock);
+	spin_unlock(ptl);
 
 	unlock_page(new_page);
 	unlock_page(page);
diff --git a/mm/mmap.c b/mm/mmap.c
index 9d54851..834b2d7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -179,14 +179,12 @@
 		goto error;
 	}
 
-	allowed = (totalram_pages - hugetlb_total_pages())
-	       	* sysctl_overcommit_ratio / 100;
+	allowed = vm_commit_limit();
 	/*
 	 * Reserve some for root
 	 */
 	if (!cap_sys_admin)
 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
-	allowed += total_swap_pages;
 
 	/*
 	 * Don't let a single process grow so big a user can't recover
@@ -1299,7 +1297,7 @@
 				vm_flags &= ~VM_MAYEXEC;
 			}
 
-			if (!file->f_op || !file->f_op->mmap)
+			if (!file->f_op->mmap)
 				return -ENODEV;
 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
 				return -EINVAL;
@@ -1856,7 +1854,7 @@
 	struct vm_area_struct *vma;
 	struct vm_unmapped_area_info info;
 
-	if (len > TASK_SIZE)
+	if (len > TASK_SIZE - mmap_min_addr)
 		return -ENOMEM;
 
 	if (flags & MAP_FIXED)
@@ -1865,14 +1863,14 @@
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
+		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
 		    (!vma || addr + len <= vma->vm_start))
 			return addr;
 	}
 
 	info.flags = 0;
 	info.length = len;
-	info.low_limit = TASK_UNMAPPED_BASE;
+	info.low_limit = mm->mmap_base;
 	info.high_limit = TASK_SIZE;
 	info.align_mask = 0;
 	return vm_unmapped_area(&info);
@@ -1895,7 +1893,7 @@
 	struct vm_unmapped_area_info info;
 
 	/* requested length too big for entire address space */
-	if (len > TASK_SIZE)
+	if (len > TASK_SIZE - mmap_min_addr)
 		return -ENOMEM;
 
 	if (flags & MAP_FIXED)
@@ -1905,14 +1903,14 @@
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
+		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
 				(!vma || addr + len <= vma->vm_start))
 			return addr;
 	}
 
 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 	info.length = len;
-	info.low_limit = PAGE_SIZE;
+	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
 	info.high_limit = mm->mmap_base;
 	info.align_mask = 0;
 	addr = vm_unmapped_area(&info);
@@ -1951,7 +1949,7 @@
 		return -ENOMEM;
 
 	get_area = current->mm->get_unmapped_area;
-	if (file && file->f_op && file->f_op->get_unmapped_area)
+	if (file && file->f_op->get_unmapped_area)
 		get_area = file->f_op->get_unmapped_area;
 	addr = get_area(file, addr, len, pgoff, flags);
 	if (IS_ERR_VALUE(addr))
@@ -2726,7 +2724,8 @@
 	}
 	vm_unacct_memory(nr_accounted);
 
-	WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+	WARN_ON(atomic_long_read(&mm->nr_ptes) >
+			(FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
 }
 
 /* Insert vm structure into process list sorted by address
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a597f2f..26667971 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -112,6 +112,7 @@
 	pmd_t *pmd;
 	unsigned long next;
 	unsigned long pages = 0;
+	unsigned long nr_huge_updates = 0;
 
 	pmd = pmd_offset(pud, addr);
 	do {
@@ -126,9 +127,10 @@
 						newprot, prot_numa);
 
 				if (nr_ptes) {
-					if (nr_ptes == HPAGE_PMD_NR)
-						pages++;
-
+					if (nr_ptes == HPAGE_PMD_NR) {
+						pages += HPAGE_PMD_NR;
+						nr_huge_updates++;
+					}
 					continue;
 				}
 			}
@@ -141,6 +143,8 @@
 		pages += this_pages;
 	} while (pmd++, addr = next, addr != end);
 
+	if (nr_huge_updates)
+		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
 	return pages;
 }
 
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 61107cf..2c254d3 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -82,27 +82,18 @@
 
 static void __init __free_pages_memory(unsigned long start, unsigned long end)
 {
-	unsigned long i, start_aligned, end_aligned;
-	int order = ilog2(BITS_PER_LONG);
+	int order;
 
-	start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
-	end_aligned = end & ~(BITS_PER_LONG - 1);
+	while (start < end) {
+		order = min(MAX_ORDER - 1UL, __ffs(start));
 
-	if (end_aligned <= start_aligned) {
-		for (i = start; i < end; i++)
-			__free_pages_bootmem(pfn_to_page(i), 0);
+		while (start + (1UL << order) > end)
+			order--;
 
-		return;
+		__free_pages_bootmem(pfn_to_page(start), order);
+
+		start += (1UL << order);
 	}
-
-	for (i = start; i < start_aligned; i++)
-		__free_pages_bootmem(pfn_to_page(i), 0);
-
-	for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
-		__free_pages_bootmem(pfn_to_page(i), order);
-
-	for (i = end_aligned; i < end; i++)
-		__free_pages_bootmem(pfn_to_page(i), 0);
 }
 
 static unsigned long __init __free_memory_core(phys_addr_t start,
diff --git a/mm/nommu.c b/mm/nommu.c
index ecd1f15..fec093a 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -937,7 +937,7 @@
 		struct address_space *mapping;
 
 		/* files must support mmap */
-		if (!file->f_op || !file->f_op->mmap)
+		if (!file->f_op->mmap)
 			return -ENODEV;
 
 		/* work out if what we've got could possibly be shared
@@ -1948,13 +1948,12 @@
 		goto error;
 	}
 
-	allowed = totalram_pages * sysctl_overcommit_ratio / 100;
+	allowed = vm_commit_limit();
 	/*
 	 * Reserve some 3% for root
 	 */
 	if (!cap_sys_admin)
 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
-	allowed += total_swap_pages;
 
 	/*
 	 * Don't let a single process grow so big a user can't recover
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6738c47..1e4a600 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -161,7 +161,7 @@
 	 * The baseline for the badness score is the proportion of RAM that each
 	 * task's rss, pagetable and swap space use.
 	 */
-	points = get_mm_rss(p->mm) + p->mm->nr_ptes +
+	points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
 		 get_mm_counter(p->mm, MM_SWAPENTS);
 	task_unlock(p);
 
@@ -364,10 +364,10 @@
 			continue;
 		}
 
-		pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu         %5hd %s\n",
+		pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu         %5hd %s\n",
 			task->pid, from_kuid(&init_user_ns, task_uid(task)),
 			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
-			task->mm->nr_ptes,
+			atomic_long_read(&task->mm->nr_ptes),
 			get_mm_counter(task->mm, MM_SWAPENTS),
 			task->signal->oom_score_adj, task->comm);
 		task_unlock(task);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73d812f..580a5f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -234,8 +234,8 @@
 
 void set_pageblock_migratetype(struct page *page, int migratetype)
 {
-
-	if (unlikely(page_group_by_mobility_disabled))
+	if (unlikely(page_group_by_mobility_disabled &&
+		     migratetype < MIGRATE_PCPTYPES))
 		migratetype = MIGRATE_UNMOVABLE;
 
 	set_pageblock_flags_group(page, (unsigned long)migratetype,
@@ -1027,6 +1027,10 @@
 {
 	int current_order = page_order(page);
 
+	/*
+	 * When borrowing from MIGRATE_CMA, we need to release the excess
+	 * buddy pages to CMA itself.
+	 */
 	if (is_migrate_cma(fallback_type))
 		return fallback_type;
 
@@ -1091,21 +1095,11 @@
 			list_del(&page->lru);
 			rmv_page_order(page);
 
-			/*
-			 * Borrow the excess buddy pages as well, irrespective
-			 * of whether we stole freepages, or took ownership of
-			 * the pageblock or not.
-			 *
-			 * Exception: When borrowing from MIGRATE_CMA, release
-			 * the excess buddy pages to CMA itself.
-			 */
 			expand(zone, page, order, current_order, area,
-			       is_migrate_cma(migratetype)
-			     ? migratetype : start_migratetype);
+			       new_type);
 
-			trace_mm_page_alloc_extfrag(page, order,
-				current_order, start_migratetype, migratetype,
-				new_type == start_migratetype);
+			trace_mm_page_alloc_extfrag(page, order, current_order,
+				start_migratetype, migratetype, new_type);
 
 			return page;
 		}
@@ -1711,7 +1705,7 @@
  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
  * that have to skip over a lot of full or unallowed zones.
  *
- * If the zonelist cache is present in the passed in zonelist, then
+ * If the zonelist cache is present in the passed zonelist, then
  * returns a pointer to the allowed node mask (either the current
  * tasks mems_allowed, or node_states[N_MEMORY].)
  *
@@ -2593,7 +2587,7 @@
 	 * running out of options and have to consider going OOM
 	 */
 	if (!did_some_progress) {
-		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+		if (oom_gfp_allowed(gfp_mask)) {
 			if (oom_killer_disabled)
 				goto nopage;
 			/* Coredumps can quickly deplete all memory reserves */
@@ -3881,8 +3875,6 @@
 	return ffz(~size);
 }
 
-#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
-
 /*
  * Check if a pageblock contains reserved pages
  */
@@ -4266,7 +4258,7 @@
 	 */
 	zone->pageset = &boot_pageset;
 
-	if (zone->present_pages)
+	if (populated_zone(zone))
 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
 			zone->name, zone->present_pages,
 					 zone_batchsize(zone));
@@ -5160,7 +5152,7 @@
 
 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
 		struct zone *zone = &pgdat->node_zones[zone_type];
-		if (zone->present_pages) {
+		if (populated_zone(zone)) {
 			node_set_state(nid, N_HIGH_MEMORY);
 			if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
 			    zone_type <= ZONE_NORMAL)
diff --git a/mm/percpu.c b/mm/percpu.c
index 8c8e08f..0d10def 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1706,8 +1706,9 @@
 
 out_free_areas:
 	for (group = 0; group < ai->nr_groups; group++)
-		free_fn(areas[group],
-			ai->groups[group].nr_units * ai->unit_size);
+		if (areas[group])
+			free_fn(areas[group],
+				ai->groups[group].nr_units * ai->unit_size);
 out_free:
 	pcpu_free_alloc_info(ai);
 	if (areas)
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 3929a40..cbb3854 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -151,14 +151,14 @@
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 				pgtable_t pgtable)
 {
-	assert_spin_locked(&mm->page_table_lock);
+	assert_spin_locked(pmd_lockptr(mm, pmdp));
 
 	/* FIFO */
-	if (!mm->pmd_huge_pte)
+	if (!pmd_huge_pte(mm, pmdp))
 		INIT_LIST_HEAD(&pgtable->lru);
 	else
-		list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
-	mm->pmd_huge_pte = pgtable;
+		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
+	pmd_huge_pte(mm, pmdp) = pgtable;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
@@ -170,14 +170,14 @@
 {
 	pgtable_t pgtable;
 
-	assert_spin_locked(&mm->page_table_lock);
+	assert_spin_locked(pmd_lockptr(mm, pmdp));
 
 	/* FIFO */
-	pgtable = mm->pmd_huge_pte;
+	pgtable = pmd_huge_pte(mm, pmdp);
 	if (list_empty(&pgtable->lru))
-		mm->pmd_huge_pte = NULL;
+		pmd_huge_pte(mm, pmdp) = NULL;
 	else {
-		mm->pmd_huge_pte = list_entry(pgtable->lru.next,
+		pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
 					      struct page, lru);
 		list_del(&pgtable->lru);
 	}
diff --git a/mm/readahead.c b/mm/readahead.c
index e4ed041..7cdbb44 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -401,6 +401,7 @@
 		   unsigned long req_size)
 {
 	unsigned long max = max_sane_readahead(ra->ra_pages);
+	pgoff_t prev_offset;
 
 	/*
 	 * start of file
@@ -452,8 +453,11 @@
 
 	/*
 	 * sequential cache miss
+	 * trivial case: (offset - prev_offset) == 1
+	 * unaligned reads: (offset - prev_offset) == 0
 	 */
-	if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
+	prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
+	if (offset - prev_offset <= 1UL)
 		goto initial_readahead;
 
 	/*
@@ -569,7 +573,7 @@
 do_readahead(struct address_space *mapping, struct file *filp,
 	     pgoff_t index, unsigned long nr)
 {
-	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+	if (!mapping || !mapping->a_ops)
 		return -EINVAL;
 
 	force_page_cache_readahead(mapping, filp, index, nr);
diff --git a/mm/rmap.c b/mm/rmap.c
index fd3ee7a..55c8b8d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -601,7 +601,7 @@
 
 	if (unlikely(PageHuge(page))) {
 		pte = huge_pte_offset(mm, address);
-		ptl = &mm->page_table_lock;
+		ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
 		goto check;
 	}
 
@@ -665,25 +665,23 @@
 			unsigned long *vm_flags)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	spinlock_t *ptl;
 	int referenced = 0;
 
 	if (unlikely(PageTransHuge(page))) {
 		pmd_t *pmd;
 
-		spin_lock(&mm->page_table_lock);
 		/*
 		 * rmap might return false positives; we must filter
 		 * these out using page_check_address_pmd().
 		 */
 		pmd = page_check_address_pmd(page, mm, address,
-					     PAGE_CHECK_ADDRESS_PMD_FLAG);
-		if (!pmd) {
-			spin_unlock(&mm->page_table_lock);
+					     PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
+		if (!pmd)
 			goto out;
-		}
 
 		if (vma->vm_flags & VM_LOCKED) {
-			spin_unlock(&mm->page_table_lock);
+			spin_unlock(ptl);
 			*mapcount = 0;	/* break early from loop */
 			*vm_flags |= VM_LOCKED;
 			goto out;
@@ -692,10 +690,9 @@
 		/* go ahead even if the pmd is pmd_trans_splitting() */
 		if (pmdp_clear_flush_young_notify(vma, address, pmd))
 			referenced++;
-		spin_unlock(&mm->page_table_lock);
+		spin_unlock(ptl);
 	} else {
 		pte_t *pte;
-		spinlock_t *ptl;
 
 		/*
 		 * rmap might return false positives; we must filter
diff --git a/mm/slab.c b/mm/slab.c
index 2580db0..0c8967b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3982,7 +3982,7 @@
 
 	VM_BUG_ON(!mutex_is_locked(&slab_mutex));
 	for_each_memcg_cache_index(i) {
-		c = cache_from_memcg(cachep, i);
+		c = cache_from_memcg_idx(cachep, i);
 		if (c)
 			/* return value determined by the parent cache only */
 			__do_tune_cpucache(c, limit, batchcount, shared, gfp);
diff --git a/mm/slab.h b/mm/slab.h
index a535033..0859c42 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -160,7 +160,8 @@
 	return s->name;
 }
 
-static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
+static inline struct kmem_cache *
+cache_from_memcg_idx(struct kmem_cache *s, int idx)
 {
 	if (!s->memcg_params)
 		return NULL;
@@ -204,7 +205,8 @@
 	return s->name;
 }
 
-static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
+static inline struct kmem_cache *
+cache_from_memcg_idx(struct kmem_cache *s, int idx)
 {
 	return NULL;
 }
diff --git a/mm/slab_common.c b/mm/slab_common.c
index e2e98af..0b7bb39 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -571,7 +571,7 @@
 		return;
 
 	for_each_memcg_cache_index(i) {
-		c = cache_from_memcg(s, i);
+		c = cache_from_memcg_idx(s, i);
 		if (!c)
 			continue;
 
diff --git a/mm/slub.c b/mm/slub.c
index c3eb3d3..7e8bd8d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -955,7 +955,7 @@
 	kmemleak_free_recursive(x, s->flags);
 
 	/*
-	 * Trouble is that we may no longer disable interupts in the fast path
+	 * Trouble is that we may no longer disable interrupts in the fast path
 	 * So in order to make the debug calls that expect irqs to be
 	 * disabled we need to disable interrupts temporarily.
 	 */
@@ -4983,7 +4983,7 @@
 		 * through the descendants with best-effort propagation.
 		 */
 		for_each_memcg_cache_index(i) {
-			struct kmem_cache *c = cache_from_memcg(s, i);
+			struct kmem_cache *c = cache_from_memcg_idx(s, i);
 			if (c)
 				attribute->store(c, buf, len);
 		}
diff --git a/mm/sparse.c b/mm/sparse.c
index 4ac1d7e..8cc7be0 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -590,33 +590,32 @@
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
-						 unsigned long nr_pages)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
 {
 	/* This will make the necessary allocations eventually. */
 	return sparse_mem_map_populate(pnum, nid);
 }
-static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+static void __kfree_section_memmap(struct page *memmap)
 {
 	unsigned long start = (unsigned long)memmap;
-	unsigned long end = (unsigned long)(memmap + nr_pages);
+	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
 
 	vmemmap_free(start, end);
 }
 #ifdef CONFIG_MEMORY_HOTREMOVE
-static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap)
 {
 	unsigned long start = (unsigned long)memmap;
-	unsigned long end = (unsigned long)(memmap + nr_pages);
+	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
 
 	vmemmap_free(start, end);
 }
 #endif /* CONFIG_MEMORY_HOTREMOVE */
 #else
-static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
+static struct page *__kmalloc_section_memmap(void)
 {
 	struct page *page, *ret;
-	unsigned long memmap_size = sizeof(struct page) * nr_pages;
+	unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
 
 	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
 	if (page)
@@ -634,28 +633,30 @@
 	return ret;
 }
 
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
-						  unsigned long nr_pages)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
 {
-	return __kmalloc_section_memmap(nr_pages);
+	return __kmalloc_section_memmap();
 }
 
-static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+static void __kfree_section_memmap(struct page *memmap)
 {
 	if (is_vmalloc_addr(memmap))
 		vfree(memmap);
 	else
 		free_pages((unsigned long)memmap,
-			   get_order(sizeof(struct page) * nr_pages));
+			   get_order(sizeof(struct page) * PAGES_PER_SECTION));
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
-static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap)
 {
 	unsigned long maps_section_nr, removing_section_nr, i;
-	unsigned long magic;
+	unsigned long magic, nr_pages;
 	struct page *page = virt_to_page(memmap);
 
+	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+		>> PAGE_SHIFT;
+
 	for (i = 0; i < nr_pages; i++, page++) {
 		magic = (unsigned long) page->lru.next;
 
@@ -684,8 +685,7 @@
  * set.  If this is <=0, then that means that the passed-in
  * map was not consumed and must be freed.
  */
-int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
-			   int nr_pages)
+int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
 {
 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
 	struct pglist_data *pgdat = zone->zone_pgdat;
@@ -702,12 +702,12 @@
 	ret = sparse_index_init(section_nr, pgdat->node_id);
 	if (ret < 0 && ret != -EEXIST)
 		return ret;
-	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
+	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
 	if (!memmap)
 		return -ENOMEM;
 	usemap = __kmalloc_section_usemap();
 	if (!usemap) {
-		__kfree_section_memmap(memmap, nr_pages);
+		__kfree_section_memmap(memmap);
 		return -ENOMEM;
 	}
 
@@ -719,7 +719,7 @@
 		goto out;
 	}
 
-	memset(memmap, 0, sizeof(struct page) * nr_pages);
+	memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
 
 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
 
@@ -729,7 +729,7 @@
 	pgdat_resize_unlock(pgdat, &flags);
 	if (ret <= 0) {
 		kfree(usemap);
-		__kfree_section_memmap(memmap, nr_pages);
+		__kfree_section_memmap(memmap);
 	}
 	return ret;
 }
@@ -759,7 +759,6 @@
 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
 {
 	struct page *usemap_page;
-	unsigned long nr_pages;
 
 	if (!usemap)
 		return;
@@ -771,7 +770,7 @@
 	if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
 		kfree(usemap);
 		if (memmap)
-			__kfree_section_memmap(memmap, PAGES_PER_SECTION);
+			__kfree_section_memmap(memmap);
 		return;
 	}
 
@@ -780,12 +779,8 @@
 	 * on the section which has pgdat at boot time. Just keep it as is now.
 	 */
 
-	if (memmap) {
-		nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
-			>> PAGE_SHIFT;
-
-		free_map_bootmem(memmap, nr_pages);
-	}
+	if (memmap)
+		free_map_bootmem(memmap);
 }
 
 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
diff --git a/mm/swap.c b/mm/swap.c
index 759c3ca..7a9f80d 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -934,7 +934,8 @@
 #ifdef CONFIG_SWAP
 	int i;
 
-	bdi_init(swapper_spaces[0].backing_dev_info);
+	if (bdi_init(swapper_spaces[0].backing_dev_info))
+		panic("Failed to init swap bdi");
 	for (i = 0; i < MAX_SWAPFILES; i++) {
 		spin_lock_init(&swapper_spaces[i].tree_lock);
 		INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index de7c904..612a7c9 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -707,7 +707,7 @@
 	return (swp_entry_t) {0};
 }
 
-/* The only caller of this function is now susupend routine */
+/* The only caller of this function is now suspend routine */
 swp_entry_t get_swap_page_of_type(int type)
 {
 	struct swap_info_struct *si;
@@ -845,7 +845,7 @@
 }
 
 /*
- * Caller has made sure that the swapdevice corresponding to entry
+ * Caller has made sure that the swap device corresponding to entry
  * is still around or has not been recycled.
  */
 void swap_free(swp_entry_t entry)
@@ -947,7 +947,7 @@
 	 * original page might be freed under memory pressure, then
 	 * later read back in from swap, now with the wrong data.
 	 *
-	 * Hibration suspends storage while it is writing the image
+	 * Hibernation suspends storage while it is writing the image
 	 * to disk so check that here.
 	 */
 	if (pm_suspended_storage())
@@ -1179,7 +1179,7 @@
 	 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
 	 * of unmatched parts which look like swp_pte, so unuse_pte must
 	 * recheck under pte lock.  Scanning without pte lock lets it be
-	 * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
+	 * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
 	 */
 	pte = pte_offset_map(pmd, addr);
 	do {
@@ -1924,17 +1924,17 @@
 	p->cluster_info = NULL;
 	p->flags = 0;
 	frontswap_map = frontswap_map_get(p);
-	frontswap_map_set(p, NULL);
 	spin_unlock(&p->lock);
 	spin_unlock(&swap_lock);
 	frontswap_invalidate_area(type);
+	frontswap_map_set(p, NULL);
 	mutex_unlock(&swapon_mutex);
 	free_percpu(p->percpu_cluster);
 	p->percpu_cluster = NULL;
 	vfree(swap_map);
 	vfree(cluster_info);
 	vfree(frontswap_map);
-	/* Destroy swap account informatin */
+	/* Destroy swap account information */
 	swap_cgroup_swapoff(type);
 
 	inode = mapping->host;
@@ -2786,8 +2786,8 @@
 
 	/*
 	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
-	 * no architecture is using highmem pages for kernel pagetables: so it
-	 * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
+	 * no architecture is using highmem pages for kernel page tables: so it
+	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
 	 */
 	head = vmalloc_to_page(si->swap_map + offset);
 	offset &= ~PAGE_MASK;
diff --git a/mm/util.c b/mm/util.c
index eaf63fc2..f7bc209 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -7,6 +7,9 @@
 #include <linux/security.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
+#include <linux/mman.h>
+#include <linux/hugetlb.h>
+
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -398,6 +401,16 @@
 	return mapping;
 }
 
+/*
+ * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
+ */
+unsigned long vm_commit_limit(void)
+{
+	return ((totalram_pages - hugetlb_total_pages())
+		* sysctl_overcommit_ratio / 100) + total_swap_pages;
+}
+
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1074543..0fdf968 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -359,6 +359,12 @@
 	if (unlikely(!va))
 		return ERR_PTR(-ENOMEM);
 
+	/*
+	 * Only scan the relevant parts containing pointers to other objects
+	 * to avoid false negatives.
+	 */
+	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
+
 retry:
 	spin_lock(&vmap_area_lock);
 	/*
@@ -1546,7 +1552,7 @@
 			    gfp_t gfp_mask, pgprot_t prot,
 			    int node, const void *caller);
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
-				 pgprot_t prot, int node, const void *caller)
+				 pgprot_t prot, int node)
 {
 	const int order = 0;
 	struct page **pages;
@@ -1560,13 +1566,12 @@
 	/* Please note that the recursion is strictly bounded. */
 	if (array_size > PAGE_SIZE) {
 		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
-				PAGE_KERNEL, node, caller);
+				PAGE_KERNEL, node, area->caller);
 		area->flags |= VM_VPAGES;
 	} else {
 		pages = kmalloc_node(array_size, nested_gfp, node);
 	}
 	area->pages = pages;
-	area->caller = caller;
 	if (!area->pages) {
 		remove_vm_area(area->addr);
 		kfree(area);
@@ -1577,7 +1582,7 @@
 		struct page *page;
 		gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
 
-		if (node < 0)
+		if (node == NUMA_NO_NODE)
 			page = alloc_page(tmp_mask);
 		else
 			page = alloc_pages_node(node, tmp_mask, order);
@@ -1634,9 +1639,9 @@
 	if (!area)
 		goto fail;
 
-	addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
 	if (!addr)
-		goto fail;
+		return NULL;
 
 	/*
 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -1646,11 +1651,11 @@
 	clear_vm_uninitialized_flag(area);
 
 	/*
-	 * A ref_count = 3 is needed because the vm_struct and vmap_area
-	 * structures allocated in the __get_vm_area_node() function contain
-	 * references to the virtual address of the vmalloc'ed block.
+	 * A ref_count = 2 is needed because vm_struct allocated in
+	 * __get_vm_area_node() contains a reference to the virtual address of
+	 * the vmalloc'ed block.
 	 */
-	kmemleak_alloc(addr, real_size, 3, gfp_mask);
+	kmemleak_alloc(addr, real_size, 2, gfp_mask);
 
 	return addr;
 
@@ -2563,6 +2568,11 @@
 		if (!counters)
 			return;
 
+		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
+		smp_rmb();
+		if (v->flags & VM_UNINITIALIZED)
+			return;
+
 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
 
 		for (nr = 0; nr < v->nr_pages; nr++)
@@ -2579,23 +2589,15 @@
 	struct vmap_area *va = p;
 	struct vm_struct *v;
 
-	if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+	/*
+	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
+	 * behalf of vmap area is being tear down or vm_map_ram allocation.
+	 */
+	if (!(va->flags & VM_VM_AREA))
 		return 0;
 
-	if (!(va->flags & VM_VM_AREA)) {
-		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
-			(void *)va->va_start, (void *)va->va_end,
-					va->va_end - va->va_start);
-		return 0;
-	}
-
 	v = va->vm;
 
-	/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
-	smp_rmb();
-	if (v->flags & VM_UNINITIALIZED)
-		return 0;
-
 	seq_printf(m, "0x%pK-0x%pK %7ld",
 		v->addr, v->addr + v->size, v->size);
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 9bb3145..7249614 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -812,6 +812,7 @@
 
 #ifdef CONFIG_NUMA_BALANCING
 	"numa_pte_updates",
+	"numa_huge_pte_updates",
 	"numa_hint_faults",
 	"numa_hint_faults_local",
 	"numa_pages_migrated",
@@ -1229,6 +1230,20 @@
 	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
 }
 
+static void vmstat_cpu_dead(int node)
+{
+	int cpu;
+
+	get_online_cpus();
+	for_each_online_cpu(cpu)
+		if (cpu_to_node(cpu) == node)
+			goto end;
+
+	node_clear_state(node, N_CPU);
+end:
+	put_online_cpus();
+}
+
 /*
  * Use the cpu notifier to insure that the thresholds are recalculated
  * when necessary.
@@ -1258,6 +1273,7 @@
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
 		refresh_zone_stat_thresholds();
+		vmstat_cpu_dead(cpu_to_node(cpu));
 		break;
 	default:
 		break;
@@ -1276,8 +1292,12 @@
 
 	register_cpu_notifier(&vmstat_notifier);
 
-	for_each_online_cpu(cpu)
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
 		start_cpu_timer(cpu);
+		node_set_state(cpu_to_node(cpu), N_CPU);
+	}
+	put_online_cpus();
 #endif
 #ifdef CONFIG_PROC_FS
 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
diff --git a/mm/zswap.c b/mm/zswap.c
index d93510c..5a63f78 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -217,6 +217,7 @@
 	if (!entry)
 		return NULL;
 	entry->refcount = 1;
+	RB_CLEAR_NODE(&entry->rbnode);
 	return entry;
 }
 
@@ -225,19 +226,6 @@
 	kmem_cache_free(zswap_entry_cache, entry);
 }
 
-/* caller must hold the tree lock */
-static void zswap_entry_get(struct zswap_entry *entry)
-{
-	entry->refcount++;
-}
-
-/* caller must hold the tree lock */
-static int zswap_entry_put(struct zswap_entry *entry)
-{
-	entry->refcount--;
-	return entry->refcount;
-}
-
 /*********************************
 * rbtree functions
 **********************************/
@@ -285,6 +273,61 @@
 	return 0;
 }
 
+static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
+{
+	if (!RB_EMPTY_NODE(&entry->rbnode)) {
+		rb_erase(&entry->rbnode, root);
+		RB_CLEAR_NODE(&entry->rbnode);
+	}
+}
+
+/*
+ * Carries out the common pattern of freeing and entry's zsmalloc allocation,
+ * freeing the entry itself, and decrementing the number of stored pages.
+ */
+static void zswap_free_entry(struct zswap_tree *tree,
+			struct zswap_entry *entry)
+{
+	zbud_free(tree->pool, entry->handle);
+	zswap_entry_cache_free(entry);
+	atomic_dec(&zswap_stored_pages);
+	zswap_pool_pages = zbud_get_pool_size(tree->pool);
+}
+
+/* caller must hold the tree lock */
+static void zswap_entry_get(struct zswap_entry *entry)
+{
+	entry->refcount++;
+}
+
+/* caller must hold the tree lock
+* remove from the tree and free it, if nobody reference the entry
+*/
+static void zswap_entry_put(struct zswap_tree *tree,
+			struct zswap_entry *entry)
+{
+	int refcount = --entry->refcount;
+
+	BUG_ON(refcount < 0);
+	if (refcount == 0) {
+		zswap_rb_erase(&tree->rbroot, entry);
+		zswap_free_entry(tree, entry);
+	}
+}
+
+/* caller must hold the tree lock */
+static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
+				pgoff_t offset)
+{
+	struct zswap_entry *entry = NULL;
+
+	entry = zswap_rb_search(root, offset);
+	if (entry)
+		zswap_entry_get(entry);
+
+	return entry;
+}
+
 /*********************************
 * per-cpu code
 **********************************/
@@ -368,18 +411,6 @@
 		zswap_pool_pages);
 }
 
-/*
- * Carries out the common pattern of freeing and entry's zsmalloc allocation,
- * freeing the entry itself, and decrementing the number of stored pages.
- */
-static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry)
-{
-	zbud_free(tree->pool, entry->handle);
-	zswap_entry_cache_free(entry);
-	atomic_dec(&zswap_stored_pages);
-	zswap_pool_pages = zbud_get_pool_size(tree->pool);
-}
-
 /*********************************
 * writeback code
 **********************************/
@@ -387,7 +418,7 @@
 enum zswap_get_swap_ret {
 	ZSWAP_SWAPCACHE_NEW,
 	ZSWAP_SWAPCACHE_EXIST,
-	ZSWAP_SWAPCACHE_NOMEM
+	ZSWAP_SWAPCACHE_FAIL,
 };
 
 /*
@@ -401,9 +432,10 @@
  * added to the swap cache, and returned in retpage.
  *
  * If success, the swap cache page is returned in retpage
- * Returns 0 if page was already in the swap cache, page is not locked
- * Returns 1 if the new page needs to be populated, page is locked
- * Returns <0 on error
+ * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
+ * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
+ *     the new page is added to swapcache and locked
+ * Returns ZSWAP_SWAPCACHE_FAIL on error
  */
 static int zswap_get_swap_cache_page(swp_entry_t entry,
 				struct page **retpage)
@@ -475,7 +507,7 @@
 	if (new_page)
 		page_cache_release(new_page);
 	if (!found_page)
-		return ZSWAP_SWAPCACHE_NOMEM;
+		return ZSWAP_SWAPCACHE_FAIL;
 	*retpage = found_page;
 	return ZSWAP_SWAPCACHE_EXIST;
 }
@@ -502,7 +534,7 @@
 	struct page *page;
 	u8 *src, *dst;
 	unsigned int dlen;
-	int ret, refcount;
+	int ret;
 	struct writeback_control wbc = {
 		.sync_mode = WB_SYNC_NONE,
 	};
@@ -517,23 +549,22 @@
 
 	/* find and ref zswap entry */
 	spin_lock(&tree->lock);
-	entry = zswap_rb_search(&tree->rbroot, offset);
+	entry = zswap_entry_find_get(&tree->rbroot, offset);
 	if (!entry) {
 		/* entry was invalidated */
 		spin_unlock(&tree->lock);
 		return 0;
 	}
-	zswap_entry_get(entry);
 	spin_unlock(&tree->lock);
 	BUG_ON(offset != entry->offset);
 
 	/* try to allocate swap cache page */
 	switch (zswap_get_swap_cache_page(swpentry, &page)) {
-	case ZSWAP_SWAPCACHE_NOMEM: /* no memory */
+	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
 		ret = -ENOMEM;
 		goto fail;
 
-	case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */
+	case ZSWAP_SWAPCACHE_EXIST:
 		/* page is already in the swap cache, ignore for now */
 		page_cache_release(page);
 		ret = -EEXIST;
@@ -556,43 +587,44 @@
 		SetPageUptodate(page);
 	}
 
+	/* move it to the tail of the inactive list after end_writeback */
+	SetPageReclaim(page);
+
 	/* start writeback */
 	__swap_writepage(page, &wbc, end_swap_bio_write);
 	page_cache_release(page);
 	zswap_written_back_pages++;
 
 	spin_lock(&tree->lock);
-
 	/* drop local reference */
-	zswap_entry_put(entry);
-	/* drop the initial reference from entry creation */
-	refcount = zswap_entry_put(entry);
+	zswap_entry_put(tree, entry);
 
 	/*
-	 * There are three possible values for refcount here:
-	 * (1) refcount is 1, load is in progress, unlink from rbtree,
-	 *     load will free
-	 * (2) refcount is 0, (normal case) entry is valid,
-	 *     remove from rbtree and free entry
-	 * (3) refcount is -1, invalidate happened during writeback,
-	 *     free entry
-	 */
-	if (refcount >= 0) {
-		/* no invalidate yet, remove from rbtree */
-		rb_erase(&entry->rbnode, &tree->rbroot);
-	}
+	* There are two possible situations for entry here:
+	* (1) refcount is 1(normal case),  entry is valid and on the tree
+	* (2) refcount is 0, entry is freed and not on the tree
+	*     because invalidate happened during writeback
+	*  search the tree and free the entry if find entry
+	*/
+	if (entry == zswap_rb_search(&tree->rbroot, offset))
+		zswap_entry_put(tree, entry);
 	spin_unlock(&tree->lock);
-	if (refcount <= 0) {
-		/* free the entry */
-		zswap_free_entry(tree, entry);
-		return 0;
-	}
-	return -EAGAIN;
 
+	goto end;
+
+	/*
+	* if we get here due to ZSWAP_SWAPCACHE_EXIST
+	* a load may happening concurrently
+	* it is safe and okay to not free the entry
+	* if we free the entry in the following put
+	* it it either okay to return !0
+	*/
 fail:
 	spin_lock(&tree->lock);
-	zswap_entry_put(entry);
+	zswap_entry_put(tree, entry);
 	spin_unlock(&tree->lock);
+
+end:
 	return ret;
 }
 
@@ -676,11 +708,8 @@
 		if (ret == -EEXIST) {
 			zswap_duplicate_entry++;
 			/* remove from rbtree */
-			rb_erase(&dupentry->rbnode, &tree->rbroot);
-			if (!zswap_entry_put(dupentry)) {
-				/* free */
-				zswap_free_entry(tree, dupentry);
-			}
+			zswap_rb_erase(&tree->rbroot, dupentry);
+			zswap_entry_put(tree, dupentry);
 		}
 	} while (ret == -EEXIST);
 	spin_unlock(&tree->lock);
@@ -709,17 +738,16 @@
 	struct zswap_entry *entry;
 	u8 *src, *dst;
 	unsigned int dlen;
-	int refcount, ret;
+	int ret;
 
 	/* find */
 	spin_lock(&tree->lock);
-	entry = zswap_rb_search(&tree->rbroot, offset);
+	entry = zswap_entry_find_get(&tree->rbroot, offset);
 	if (!entry) {
 		/* entry was written back */
 		spin_unlock(&tree->lock);
 		return -1;
 	}
-	zswap_entry_get(entry);
 	spin_unlock(&tree->lock);
 
 	/* decompress */
@@ -734,22 +762,9 @@
 	BUG_ON(ret);
 
 	spin_lock(&tree->lock);
-	refcount = zswap_entry_put(entry);
-	if (likely(refcount)) {
-		spin_unlock(&tree->lock);
-		return 0;
-	}
+	zswap_entry_put(tree, entry);
 	spin_unlock(&tree->lock);
 
-	/*
-	 * We don't have to unlink from the rbtree because
-	 * zswap_writeback_entry() or zswap_frontswap_invalidate page()
-	 * has already done this for us if we are the last reference.
-	 */
-	/* free */
-
-	zswap_free_entry(tree, entry);
-
 	return 0;
 }
 
@@ -758,7 +773,6 @@
 {
 	struct zswap_tree *tree = zswap_trees[type];
 	struct zswap_entry *entry;
-	int refcount;
 
 	/* find */
 	spin_lock(&tree->lock);
@@ -770,20 +784,12 @@
 	}
 
 	/* remove from rbtree */
-	rb_erase(&entry->rbnode, &tree->rbroot);
+	zswap_rb_erase(&tree->rbroot, entry);
 
 	/* drop the initial reference from entry creation */
-	refcount = zswap_entry_put(entry);
+	zswap_entry_put(tree, entry);
 
 	spin_unlock(&tree->lock);
-
-	if (refcount) {
-		/* writeback in progress, writeback will free */
-		return;
-	}
-
-	/* free */
-	zswap_free_entry(tree, entry);
 }
 
 /* frees all zswap entries for the given swap type */
@@ -797,11 +803,8 @@
 
 	/* walk the tree and free everything */
 	spin_lock(&tree->lock);
-	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
-		zbud_free(tree->pool, entry->handle);
-		zswap_entry_cache_free(entry);
-		atomic_dec(&zswap_stored_pages);
-	}
+	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
+		zswap_free_entry(tree, entry);
 	tree->rbroot = RB_ROOT;
 	spin_unlock(&tree->lock);
 
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 61fc573..b3d17d1 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -98,14 +98,14 @@
 		vlan_gvrp_request_leave(dev);
 
 	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
+
+	netdev_upper_dev_unlink(real_dev, dev);
 	/* Because unregister_netdevice_queue() makes sure at least one rcu
 	 * grace period is respected before device freeing,
 	 * we dont need to call synchronize_net() here.
 	 */
 	unregister_netdevice_queue(dev, head);
 
-	netdev_upper_dev_unlink(real_dev, dev);
-
 	if (grp->nr_vlan_devs == 0) {
 		vlan_mvrp_uninit_applicant(real_dev);
 		vlan_gvrp_uninit_applicant(real_dev);
@@ -169,13 +169,13 @@
 	if (err < 0)
 		goto out_uninit_mvrp;
 
-	err = netdev_upper_dev_link(real_dev, dev);
-	if (err)
-		goto out_uninit_mvrp;
-
 	err = register_netdevice(dev);
 	if (err < 0)
-		goto out_upper_dev_unlink;
+		goto out_uninit_mvrp;
+
+	err = netdev_upper_dev_link(real_dev, dev);
+	if (err)
+		goto out_unregister_netdev;
 
 	/* Account for reference in struct vlan_dev_priv */
 	dev_hold(real_dev);
@@ -191,8 +191,8 @@
 
 	return 0;
 
-out_upper_dev_unlink:
-	netdev_upper_dev_unlink(real_dev, dev);
+out_unregister_netdev:
+	unregister_netdevice(dev);
 out_uninit_mvrp:
 	if (grp->nr_vlan_devs == 0)
 		vlan_mvrp_uninit_applicant(real_dev);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index ba5983f..5704ed9 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -5,83 +5,6 @@
 #include <linux/u64_stats_sync.h>
 #include <linux/list.h>
 
-
-/**
- *	struct vlan_priority_tci_mapping - vlan egress priority mappings
- *	@priority: skb priority
- *	@vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
- *	@next: pointer to next struct
- */
-struct vlan_priority_tci_mapping {
-	u32					priority;
-	u16					vlan_qos;
-	struct vlan_priority_tci_mapping	*next;
-};
-
-
-/**
- *	struct vlan_pcpu_stats - VLAN percpu rx/tx stats
- *	@rx_packets: number of received packets
- *	@rx_bytes: number of received bytes
- *	@rx_multicast: number of received multicast packets
- *	@tx_packets: number of transmitted packets
- *	@tx_bytes: number of transmitted bytes
- *	@syncp: synchronization point for 64bit counters
- *	@rx_errors: number of rx errors
- *	@tx_dropped: number of tx drops
- */
-struct vlan_pcpu_stats {
-	u64			rx_packets;
-	u64			rx_bytes;
-	u64			rx_multicast;
-	u64			tx_packets;
-	u64			tx_bytes;
-	struct u64_stats_sync	syncp;
-	u32			rx_errors;
-	u32			tx_dropped;
-};
-
-struct netpoll;
-
-/**
- *	struct vlan_dev_priv - VLAN private device data
- *	@nr_ingress_mappings: number of ingress priority mappings
- *	@ingress_priority_map: ingress priority mappings
- *	@nr_egress_mappings: number of egress priority mappings
- *	@egress_priority_map: hash of egress priority mappings
- *	@vlan_proto: VLAN encapsulation protocol
- *	@vlan_id: VLAN identifier
- *	@flags: device flags
- *	@real_dev: underlying netdevice
- *	@real_dev_addr: address of underlying netdevice
- *	@dent: proc dir entry
- *	@vlan_pcpu_stats: ptr to percpu rx stats
- */
-struct vlan_dev_priv {
-	unsigned int				nr_ingress_mappings;
-	u32					ingress_priority_map[8];
-	unsigned int				nr_egress_mappings;
-	struct vlan_priority_tci_mapping	*egress_priority_map[16];
-
-	__be16					vlan_proto;
-	u16					vlan_id;
-	u16					flags;
-
-	struct net_device			*real_dev;
-	unsigned char				real_dev_addr[ETH_ALEN];
-
-	struct proc_dir_entry			*dent;
-	struct vlan_pcpu_stats __percpu		*vlan_pcpu_stats;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	struct netpoll				*netpoll;
-#endif
-};
-
-static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
-{
-	return netdev_priv(dev);
-}
-
 /* if this changes, algorithm will have to be reworked because this
  * depends on completely exhausting the VLAN identifier space.  Thus
  * it gives constant time look-up, but in many cases it wastes memory.
@@ -196,12 +119,12 @@
 }
 
 #ifdef CONFIG_VLAN_8021Q_GVRP
-extern int vlan_gvrp_request_join(const struct net_device *dev);
-extern void vlan_gvrp_request_leave(const struct net_device *dev);
-extern int vlan_gvrp_init_applicant(struct net_device *dev);
-extern void vlan_gvrp_uninit_applicant(struct net_device *dev);
-extern int vlan_gvrp_init(void);
-extern void vlan_gvrp_uninit(void);
+int vlan_gvrp_request_join(const struct net_device *dev);
+void vlan_gvrp_request_leave(const struct net_device *dev);
+int vlan_gvrp_init_applicant(struct net_device *dev);
+void vlan_gvrp_uninit_applicant(struct net_device *dev);
+int vlan_gvrp_init(void);
+void vlan_gvrp_uninit(void);
 #else
 static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
 static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
@@ -212,12 +135,12 @@
 #endif
 
 #ifdef CONFIG_VLAN_8021Q_MVRP
-extern int vlan_mvrp_request_join(const struct net_device *dev);
-extern void vlan_mvrp_request_leave(const struct net_device *dev);
-extern int vlan_mvrp_init_applicant(struct net_device *dev);
-extern void vlan_mvrp_uninit_applicant(struct net_device *dev);
-extern int vlan_mvrp_init(void);
-extern void vlan_mvrp_uninit(void);
+int vlan_mvrp_request_join(const struct net_device *dev);
+void vlan_mvrp_request_leave(const struct net_device *dev);
+int vlan_mvrp_init_applicant(struct net_device *dev);
+void vlan_mvrp_uninit_applicant(struct net_device *dev);
+int vlan_mvrp_init(void);
+void vlan_mvrp_uninit(void);
 #else
 static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
 static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
@@ -229,8 +152,8 @@
 
 extern const char vlan_fullname[];
 extern const char vlan_version[];
-extern int vlan_netlink_init(void);
-extern void vlan_netlink_fini(void);
+int vlan_netlink_init(void);
+void vlan_netlink_fini(void);
 
 extern struct rtnl_link_ops vlan_link_ops;
 
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 09bf1c3..762896e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -68,25 +68,6 @@
 	return 0;
 }
 
-static inline u16
-vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
-{
-	struct vlan_priority_tci_mapping *mp;
-
-	smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
-
-	mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
-	while (mp) {
-		if (mp->priority == skb->priority) {
-			return mp->vlan_qos; /* This should already be shifted
-					      * to mask correctly with the
-					      * VLAN's TCI */
-		}
-		mp = mp->next;
-	}
-	return 0;
-}
-
 /*
  *	Create the VLAN header for an arbitrary protocol layer
  *
@@ -111,7 +92,7 @@
 		vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
 
 		vlan_tci = vlan->vlan_id;
-		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
+		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
 		vhdr->h_vlan_TCI = htons(vlan_tci);
 
 		/*
@@ -168,7 +149,7 @@
 	    vlan->flags & VLAN_FLAG_REORDER_HDR) {
 		u16 vlan_tci;
 		vlan_tci = vlan->vlan_id;
-		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
+		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
 		skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
 	}
 
@@ -558,7 +539,7 @@
 static int vlan_dev_init(struct net_device *dev)
 {
 	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-	int subclass = 0;
+	int subclass = 0, i;
 
 	netif_carrier_off(dev);
 
@@ -612,6 +593,13 @@
 	if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct vlan_pcpu_stats *vlan_stat;
+		vlan_stat = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+		u64_stats_init(&vlan_stat->syncp);
+	}
+
+
 	return 0;
 }
 
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 3ffda1b..9321a77 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -244,10 +244,10 @@
 	if (!ts)
 		return -EREMOTEIO;
 
-	if (!ts->rd->f_op || !ts->rd->f_op->poll)
+	if (!ts->rd->f_op->poll)
 		return -EIO;
 
-	if (!ts->wr->f_op || !ts->wr->f_op->poll)
+	if (!ts->wr->f_op->poll)
 		return -EIO;
 
 	ret = ts->rd->f_op->poll(ts->rd, pt);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 990afab..9c5a1aa 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -544,9 +544,7 @@
 
 	chan->inuse = false;
 	if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
-		vdev->config->get(vdev,
-				offsetof(struct virtio_9p_config, tag_len),
-				&tag_len, sizeof(tag_len));
+		virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
 	} else {
 		err = -EINVAL;
 		goto out_free_vq;
@@ -556,8 +554,9 @@
 		err = -ENOMEM;
 		goto out_free_vq;
 	}
-	vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag),
-			tag, tag_len);
+
+	virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
+			   tag, tag_len);
 	chan->tag = tag;
 	chan->tag_len = tag_len;
 	err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
diff --git a/net/Kconfig b/net/Kconfig
index b50dacc..0715db6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -220,6 +220,7 @@
 source "net/vmw_vsock/Kconfig"
 source "net/netlink/Kconfig"
 source "net/mpls/Kconfig"
+source "net/hsr/Kconfig"
 
 config RPS
 	boolean
diff --git a/net/Makefile b/net/Makefile
index 9492e8c..8fa2f91 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -71,3 +71,4 @@
 obj-$(CONFIG_OPENVSWITCH)	+= openvswitch/
 obj-$(CONFIG_VSOCKETS)	+= vmw_vsock/
 obj-$(CONFIG_NET_MPLS_GSO)	+= mpls/
+obj-$(CONFIG_HSR)		+= hsr/
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 4b4d2b7..a00123e 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1735,7 +1735,7 @@
 			res = -EFAULT;
 			break;
 		}
-		if (amount > AX25_NOUID_BLOCK) {
+		if (amount < 0 || amount > AX25_NOUID_BLOCK) {
 			res = -EINVAL;
 			break;
 		}
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 489bb36..4f4aabb 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -24,6 +24,7 @@
 batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
 batman-adv-y += debugfs.o
 batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
+batman-adv-y += fragmentation.o
 batman-adv-y += gateway_client.o
 batman-adv-y += gateway_common.o
 batman-adv-y += hard-interface.o
@@ -37,5 +38,3 @@
 batman-adv-y += soft-interface.o
 batman-adv-y += sysfs.o
 batman-adv-y += translation-table.o
-batman-adv-y += unicast.o
-batman-adv-y += vis.o
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 0a8a80c..a2b480a 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -87,22 +87,198 @@
 	return (uint8_t)(sum / count);
 }
 
+/**
+ * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ *  orig_node
+ * @orig_node: the orig_node for which the resources have to be free'd
+ */
+static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
+{
+	kfree(orig_node->bat_iv.bcast_own);
+	kfree(orig_node->bat_iv.bcast_own_sum);
+}
+
+/**
+ * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
+ *  include the new hard-interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
+				     int max_if_num)
+{
+	void *data_ptr;
+	size_t data_size, old_size;
+	int ret = -ENOMEM;
+
+	spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+	data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
+	old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
+	data_ptr = kmalloc(data_size, GFP_ATOMIC);
+	if (!data_ptr)
+		goto unlock;
+
+	memcpy(data_ptr, orig_node->bat_iv.bcast_own, old_size);
+	kfree(orig_node->bat_iv.bcast_own);
+	orig_node->bat_iv.bcast_own = data_ptr;
+
+	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+	if (!data_ptr) {
+		kfree(orig_node->bat_iv.bcast_own);
+		goto unlock;
+	}
+
+	memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+	       (max_if_num - 1) * sizeof(uint8_t));
+	kfree(orig_node->bat_iv.bcast_own_sum);
+	orig_node->bat_iv.bcast_own_sum = data_ptr;
+
+	ret = 0;
+
+unlock:
+	spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+	return ret;
+}
+
+/**
+ * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
+ *  exclude the removed interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ * @del_if_num: the index of the interface being removed
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
+				     int max_if_num, int del_if_num)
+{
+	int chunk_size,  ret = -ENOMEM, if_offset;
+	void *data_ptr = NULL;
+
+	spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+	/* last interface was removed */
+	if (max_if_num == 0)
+		goto free_bcast_own;
+
+	chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
+	data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
+	if (!data_ptr)
+		goto unlock;
+
+	/* copy first part */
+	memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
+
+	/* copy second part */
+	memcpy((char *)data_ptr + del_if_num * chunk_size,
+	       orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
+	       (max_if_num - del_if_num) * chunk_size);
+
+free_bcast_own:
+	kfree(orig_node->bat_iv.bcast_own);
+	orig_node->bat_iv.bcast_own = data_ptr;
+
+	if (max_if_num == 0)
+		goto free_own_sum;
+
+	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+	if (!data_ptr) {
+		kfree(orig_node->bat_iv.bcast_own);
+		goto unlock;
+	}
+
+	memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+	       del_if_num * sizeof(uint8_t));
+
+	if_offset = (del_if_num + 1) * sizeof(uint8_t);
+	memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
+	       orig_node->bat_iv.bcast_own_sum + if_offset,
+	       (max_if_num - del_if_num) * sizeof(uint8_t));
+
+free_own_sum:
+	kfree(orig_node->bat_iv.bcast_own_sum);
+	orig_node->bat_iv.bcast_own_sum = data_ptr;
+
+	ret = 0;
+unlock:
+	spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+	return ret;
+}
+
+/**
+ * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: mac address of the originator
+ *
+ * Returns the originator object corresponding to the passed mac address or NULL
+ * on failure.
+ * If the object does not exists it is created an initialised.
+ */
+static struct batadv_orig_node *
+batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
+{
+	struct batadv_orig_node *orig_node;
+	int size, hash_added;
+
+	orig_node = batadv_orig_hash_find(bat_priv, addr);
+	if (orig_node)
+		return orig_node;
+
+	orig_node = batadv_orig_node_new(bat_priv, addr);
+	if (!orig_node)
+		return NULL;
+
+	spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock);
+
+	size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
+	orig_node->bat_iv.bcast_own = kzalloc(size, GFP_ATOMIC);
+	if (!orig_node->bat_iv.bcast_own)
+		goto free_orig_node;
+
+	size = bat_priv->num_ifaces * sizeof(uint8_t);
+	orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
+	if (!orig_node->bat_iv.bcast_own_sum)
+		goto free_bcast_own;
+
+	hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+				     batadv_choose_orig, orig_node,
+				     &orig_node->hash_entry);
+	if (hash_added != 0)
+		goto free_bcast_own;
+
+	return orig_node;
+
+free_bcast_own:
+	kfree(orig_node->bat_iv.bcast_own);
+free_orig_node:
+	batadv_orig_node_free_ref(orig_node);
+
+	return NULL;
+}
+
 static struct batadv_neigh_node *
 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
 			const uint8_t *neigh_addr,
 			struct batadv_orig_node *orig_node,
 			struct batadv_orig_node *orig_neigh)
 {
+	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct batadv_neigh_node *neigh_node;
 
-	neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr);
+	neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
 	if (!neigh_node)
 		goto out;
 
-	INIT_LIST_HEAD(&neigh_node->bonding_list);
+	spin_lock_init(&neigh_node->bat_iv.lq_update_lock);
 
-	neigh_node->orig_node = orig_neigh;
-	neigh_node->if_incoming = hard_iface;
+	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+		   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+		   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
 
 	spin_lock_bh(&orig_node->neigh_list_lock);
 	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
@@ -135,9 +311,8 @@
 	batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
 	batadv_ogm_packet->header.ttl = 2;
 	batadv_ogm_packet->flags = BATADV_NO_FLAGS;
+	batadv_ogm_packet->reserved = 0;
 	batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
-	batadv_ogm_packet->tt_num_changes = 0;
-	batadv_ogm_packet->ttvn = 0;
 
 	res = 0;
 
@@ -207,12 +382,12 @@
 
 /* is there another aggregated packet here? */
 static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
-				     int tt_num_changes)
+				     __be16 tvlv_len)
 {
 	int next_buff_pos = 0;
 
 	next_buff_pos += buff_pos + BATADV_OGM_HLEN;
-	next_buff_pos += batadv_tt_len(tt_num_changes);
+	next_buff_pos += ntohs(tvlv_len);
 
 	return (next_buff_pos <= packet_len) &&
 	       (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -240,7 +415,7 @@
 
 	/* adjust all flags and log packets */
 	while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
-					 batadv_ogm_packet->tt_num_changes)) {
+					 batadv_ogm_packet->tvlv_len)) {
 		/* we might have aggregated direct link packets with an
 		 * ordinary base packet
 		 */
@@ -256,18 +431,18 @@
 			fwd_str = "Sending own";
 
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-			   "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+			   "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM]\n",
 			   fwd_str, (packet_num > 0 ? "aggregated " : ""),
 			   batadv_ogm_packet->orig,
 			   ntohl(batadv_ogm_packet->seqno),
 			   batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
 			   (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
 			    "on" : "off"),
-			   batadv_ogm_packet->ttvn, hard_iface->net_dev->name,
+			   hard_iface->net_dev->name,
 			   hard_iface->net_dev->dev_addr);
 
 		buff_pos += BATADV_OGM_HLEN;
-		buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
+		buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
 		packet_num++;
 		packet_pos = forw_packet->skb->data + buff_pos;
 		batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
@@ -601,7 +776,7 @@
 				  struct batadv_hard_iface *if_incoming)
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	uint8_t tt_num_changes;
+	uint16_t tvlv_len;
 
 	if (batadv_ogm_packet->header.ttl <= 1) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -621,7 +796,7 @@
 			return;
 	}
 
-	tt_num_changes = batadv_ogm_packet->tt_num_changes;
+	tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
 
 	batadv_ogm_packet->header.ttl--;
 	memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
@@ -642,7 +817,7 @@
 		batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
 
 	batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
-				BATADV_OGM_HLEN + batadv_tt_len(tt_num_changes),
+				BATADV_OGM_HLEN + tvlv_len,
 				if_incoming, 0, batadv_iv_ogm_fwd_send_time());
 }
 
@@ -662,20 +837,22 @@
 	uint32_t i;
 	size_t word_index;
 	uint8_t *w;
+	int if_num;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-			spin_lock_bh(&orig_node->ogm_cnt_lock);
+			spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 			word_index = hard_iface->if_num * BATADV_NUM_WORDS;
-			word = &(orig_node->bcast_own[word_index]);
+			word = &(orig_node->bat_iv.bcast_own[word_index]);
 
 			batadv_bit_get_packet(bat_priv, word, 1, 0);
-			w = &orig_node->bcast_own_sum[hard_iface->if_num];
+			if_num = hard_iface->if_num;
+			w = &orig_node->bat_iv.bcast_own_sum[if_num];
 			*w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
-			spin_unlock_bh(&orig_node->ogm_cnt_lock);
+			spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 		}
 		rcu_read_unlock();
 	}
@@ -688,43 +865,29 @@
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct batadv_hard_iface *primary_if;
 	int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
-	int vis_server, tt_num_changes = 0;
 	uint32_t seqno;
-	uint8_t bandwidth;
+	uint16_t tvlv_len = 0;
 
-	vis_server = atomic_read(&bat_priv->vis_mode);
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 
-	if (hard_iface == primary_if)
-		tt_num_changes = batadv_tt_append_diff(bat_priv, ogm_buff,
-						       ogm_buff_len,
-						       BATADV_OGM_HLEN);
+	if (hard_iface == primary_if) {
+		/* tt changes have to be committed before the tvlv data is
+		 * appended as it may alter the tt tvlv container
+		 */
+		batadv_tt_local_commit_changes(bat_priv);
+		tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, ogm_buff,
+							    ogm_buff_len,
+							    BATADV_OGM_HLEN);
+	}
 
 	batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
+	batadv_ogm_packet->tvlv_len = htons(tvlv_len);
 
 	/* change sequence number to network order */
 	seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
 	batadv_ogm_packet->seqno = htonl(seqno);
 	atomic_inc(&hard_iface->bat_iv.ogm_seqno);
 
-	batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
-	batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
-	if (tt_num_changes >= 0)
-		batadv_ogm_packet->tt_num_changes = tt_num_changes;
-
-	if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC)
-		batadv_ogm_packet->flags |= BATADV_VIS_SERVER;
-	else
-		batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
-
-	if (hard_iface == primary_if &&
-	    atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
-		bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
-		batadv_ogm_packet->gw_flags = bandwidth;
-	} else {
-		batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
-	}
-
 	batadv_iv_ogm_slide_own_bcast_window(hard_iface);
 	batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
 				hard_iface->bat_iv.ogm_buff_len, hard_iface, 1,
@@ -770,18 +933,18 @@
 		if (dup_status != BATADV_NO_DUP)
 			continue;
 
-		spin_lock_bh(&tmp_neigh_node->lq_update_lock);
-		batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
-				       &tmp_neigh_node->tq_index, 0);
-		tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
-		tmp_neigh_node->tq_avg = tq_avg;
-		spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
+		spin_lock_bh(&tmp_neigh_node->bat_iv.lq_update_lock);
+		batadv_ring_buffer_set(tmp_neigh_node->bat_iv.tq_recv,
+				       &tmp_neigh_node->bat_iv.tq_index, 0);
+		tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->bat_iv.tq_recv);
+		tmp_neigh_node->bat_iv.tq_avg = tq_avg;
+		spin_unlock_bh(&tmp_neigh_node->bat_iv.lq_update_lock);
 	}
 
 	if (!neigh_node) {
 		struct batadv_orig_node *orig_tmp;
 
-		orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source);
+		orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source);
 		if (!orig_tmp)
 			goto unlock;
 
@@ -798,80 +961,55 @@
 
 	rcu_read_unlock();
 
-	orig_node->flags = batadv_ogm_packet->flags;
 	neigh_node->last_seen = jiffies;
 
-	spin_lock_bh(&neigh_node->lq_update_lock);
-	batadv_ring_buffer_set(neigh_node->tq_recv,
-			       &neigh_node->tq_index,
+	spin_lock_bh(&neigh_node->bat_iv.lq_update_lock);
+	batadv_ring_buffer_set(neigh_node->bat_iv.tq_recv,
+			       &neigh_node->bat_iv.tq_index,
 			       batadv_ogm_packet->tq);
-	neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
-	spin_unlock_bh(&neigh_node->lq_update_lock);
+	tq_avg = batadv_ring_buffer_avg(neigh_node->bat_iv.tq_recv);
+	neigh_node->bat_iv.tq_avg = tq_avg;
+	spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
 
 	if (dup_status == BATADV_NO_DUP) {
 		orig_node->last_ttl = batadv_ogm_packet->header.ttl;
 		neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
 	}
 
-	batadv_bonding_candidate_add(orig_node, neigh_node);
+	batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
 
 	/* if this neighbor already is our next hop there is nothing
 	 * to change
 	 */
 	router = batadv_orig_node_get_router(orig_node);
 	if (router == neigh_node)
-		goto update_tt;
+		goto out;
 
 	/* if this neighbor does not offer a better TQ we won't consider it */
-	if (router && (router->tq_avg > neigh_node->tq_avg))
-		goto update_tt;
+	if (router && (router->bat_iv.tq_avg > neigh_node->bat_iv.tq_avg))
+		goto out;
 
 	/* if the TQ is the same and the link not more symmetric we
 	 * won't consider it either
 	 */
-	if (router && (neigh_node->tq_avg == router->tq_avg)) {
+	if (router && (neigh_node->bat_iv.tq_avg == router->bat_iv.tq_avg)) {
 		orig_node_tmp = router->orig_node;
-		spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+		spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 		if_num = router->if_incoming->if_num;
-		sum_orig = orig_node_tmp->bcast_own_sum[if_num];
-		spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+		sum_orig = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
+		spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 
 		orig_node_tmp = neigh_node->orig_node;
-		spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+		spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 		if_num = neigh_node->if_incoming->if_num;
-		sum_neigh = orig_node_tmp->bcast_own_sum[if_num];
-		spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+		sum_neigh = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
+		spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 
 		if (sum_orig >= sum_neigh)
-			goto update_tt;
+			goto out;
 	}
 
 	batadv_update_route(bat_priv, orig_node, neigh_node);
-
-update_tt:
-	/* I have to check for transtable changes only if the OGM has been
-	 * sent through a primary interface
-	 */
-	if (((batadv_ogm_packet->orig != ethhdr->h_source) &&
-	     (batadv_ogm_packet->header.ttl > 2)) ||
-	    (batadv_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
-		batadv_tt_update_orig(bat_priv, orig_node, tt_buff,
-				      batadv_ogm_packet->tt_num_changes,
-				      batadv_ogm_packet->ttvn,
-				      ntohs(batadv_ogm_packet->tt_crc));
-
-	if (orig_node->gw_flags != batadv_ogm_packet->gw_flags)
-		batadv_gw_node_update(bat_priv, orig_node,
-				      batadv_ogm_packet->gw_flags);
-
-	orig_node->gw_flags = batadv_ogm_packet->gw_flags;
-
-	/* restart gateway selection if fast or late switching was enabled */
-	if ((orig_node->gw_flags) &&
-	    (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
-	    (atomic_read(&bat_priv->gw_sel_class) > 2))
-		batadv_gw_check_election(bat_priv, orig_node);
-
 	goto out;
 
 unlock:
@@ -893,7 +1031,7 @@
 	uint8_t total_count;
 	uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
 	unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
-	int tq_asym_penalty, inv_asym_penalty, ret = 0;
+	int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
 	unsigned int combined_tq;
 
 	/* find corresponding one hop neighbor */
@@ -931,10 +1069,11 @@
 	orig_node->last_seen = jiffies;
 
 	/* find packet count of corresponding one hop neighbor */
-	spin_lock_bh(&orig_node->ogm_cnt_lock);
-	orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
-	neigh_rq_count = neigh_node->real_packet_count;
-	spin_unlock_bh(&orig_node->ogm_cnt_lock);
+	spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+	if_num = if_incoming->if_num;
+	orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
+	neigh_rq_count = neigh_node->bat_iv.real_packet_count;
+	spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 
 	/* pay attention to not get a value bigger than 100 % */
 	if (orig_eq_count > neigh_rq_count)
@@ -1016,12 +1155,13 @@
 	uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
 	uint8_t *neigh_addr;
 	uint8_t packet_count;
+	unsigned long *bitmap;
 
-	orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
+	orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
 	if (!orig_node)
 		return BATADV_NO_DUP;
 
-	spin_lock_bh(&orig_node->ogm_cnt_lock);
+	spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 	seq_diff = seqno - orig_node->last_real_seqno;
 
 	/* signalize caller that the packet is to be dropped. */
@@ -1036,7 +1176,7 @@
 	hlist_for_each_entry_rcu(tmp_neigh_node,
 				 &orig_node->neigh_list, list) {
 		neigh_addr = tmp_neigh_node->addr;
-		is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
+		is_dup = batadv_test_bit(tmp_neigh_node->bat_iv.real_bits,
 					 orig_node->last_real_seqno,
 					 seqno);
 
@@ -1052,13 +1192,13 @@
 		}
 
 		/* if the window moved, set the update flag. */
-		need_update |= batadv_bit_get_packet(bat_priv,
-						     tmp_neigh_node->real_bits,
+		bitmap = tmp_neigh_node->bat_iv.real_bits;
+		need_update |= batadv_bit_get_packet(bat_priv, bitmap,
 						     seq_diff, set_mark);
 
-		packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+		packet_count = bitmap_weight(tmp_neigh_node->bat_iv.real_bits,
 					     BATADV_TQ_LOCAL_WINDOW_SIZE);
-		tmp_neigh_node->real_packet_count = packet_count;
+		tmp_neigh_node->bat_iv.real_packet_count = packet_count;
 	}
 	rcu_read_unlock();
 
@@ -1070,7 +1210,7 @@
 	}
 
 out:
-	spin_unlock_bh(&orig_node->ogm_cnt_lock);
+	spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 	batadv_orig_node_free_ref(orig_node);
 	return ret;
 }
@@ -1082,7 +1222,7 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_hard_iface *hard_iface;
-	struct batadv_orig_node *orig_neigh_node, *orig_node;
+	struct batadv_orig_node *orig_neigh_node, *orig_node, *orig_node_tmp;
 	struct batadv_neigh_node *router = NULL, *router_router = NULL;
 	struct batadv_neigh_node *orig_neigh_router = NULL;
 	int has_directlink_flag;
@@ -1122,13 +1262,11 @@
 		is_single_hop_neigh = true;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-		   "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %#.4x, changes %u, tq %d, TTL %d, V %d, IDF %d)\n",
+		   "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d)\n",
 		   ethhdr->h_source, if_incoming->net_dev->name,
 		   if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
 		   batadv_ogm_packet->prev_sender,
-		   ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->ttvn,
-		   ntohs(batadv_ogm_packet->tt_crc),
-		   batadv_ogm_packet->tt_num_changes, batadv_ogm_packet->tq,
+		   ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
 		   batadv_ogm_packet->header.ttl,
 		   batadv_ogm_packet->header.version, has_directlink_flag);
 
@@ -1168,8 +1306,8 @@
 		int16_t if_num;
 		uint8_t *weight;
 
-		orig_neigh_node = batadv_get_orig_node(bat_priv,
-						       ethhdr->h_source);
+		orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+							 ethhdr->h_source);
 		if (!orig_neigh_node)
 			return;
 
@@ -1183,15 +1321,15 @@
 			if_num = if_incoming->if_num;
 			offset = if_num * BATADV_NUM_WORDS;
 
-			spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
-			word = &(orig_neigh_node->bcast_own[offset]);
+			spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
+			word = &(orig_neigh_node->bat_iv.bcast_own[offset]);
 			bit_pos = if_incoming_seqno - 2;
 			bit_pos -= ntohl(batadv_ogm_packet->seqno);
 			batadv_set_bit(word, bit_pos);
-			weight = &orig_neigh_node->bcast_own_sum[if_num];
+			weight = &orig_neigh_node->bat_iv.bcast_own_sum[if_num];
 			*weight = bitmap_weight(word,
 						BATADV_TQ_LOCAL_WINDOW_SIZE);
-			spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
+			spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
 		}
 
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -1214,7 +1352,7 @@
 		return;
 	}
 
-	orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
+	orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
 	if (!orig_node)
 		return;
 
@@ -1235,10 +1373,12 @@
 	}
 
 	router = batadv_orig_node_get_router(orig_node);
-	if (router)
-		router_router = batadv_orig_node_get_router(router->orig_node);
+	if (router) {
+		orig_node_tmp = router->orig_node;
+		router_router = batadv_orig_node_get_router(orig_node_tmp);
+	}
 
-	if ((router && router->tq_avg != 0) &&
+	if ((router && router->bat_iv.tq_avg != 0) &&
 	    (batadv_compare_eth(router->addr, ethhdr->h_source)))
 		is_from_best_next_hop = true;
 
@@ -1254,14 +1394,16 @@
 		goto out;
 	}
 
+	batadv_tvlv_ogm_receive(bat_priv, batadv_ogm_packet, orig_node);
+
 	/* if sender is a direct neighbor the sender mac equals
 	 * originator mac
 	 */
 	if (is_single_hop_neigh)
 		orig_neigh_node = orig_node;
 	else
-		orig_neigh_node = batadv_get_orig_node(bat_priv,
-						       ethhdr->h_source);
+		orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+							 ethhdr->h_source);
 
 	if (!orig_neigh_node)
 		goto out;
@@ -1350,9 +1492,9 @@
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct ethhdr *ethhdr;
 	int buff_pos = 0, packet_len;
-	unsigned char *tt_buff, *packet_buff;
-	bool ret;
+	unsigned char *tvlv_buff, *packet_buff;
 	uint8_t *packet_pos;
+	bool ret;
 
 	ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
 	if (!ret)
@@ -1375,14 +1517,14 @@
 
 	/* unpack the aggregated packets and process them one by one */
 	while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
-					 batadv_ogm_packet->tt_num_changes)) {
-		tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
+					 batadv_ogm_packet->tvlv_len)) {
+		tvlv_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
 
-		batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
-				      if_incoming);
+		batadv_iv_ogm_process(ethhdr, batadv_ogm_packet,
+				      tvlv_buff, if_incoming);
 
 		buff_pos += BATADV_OGM_HLEN;
-		buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
+		buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
 
 		packet_pos = packet_buff + buff_pos;
 		batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
@@ -1392,6 +1534,106 @@
 	return NET_RX_SUCCESS;
 }
 
+/**
+ * batadv_iv_ogm_orig_print - print the originator table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq: debugfs table seq_file struct
+ */
+static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
+				     struct seq_file *seq)
+{
+	struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
+	struct batadv_hashtable *hash = bat_priv->orig_hash;
+	int last_seen_msecs, last_seen_secs;
+	struct batadv_orig_node *orig_node;
+	unsigned long last_seen_jiffies;
+	struct hlist_head *head;
+	int batman_count = 0;
+	uint32_t i;
+
+	seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
+		   "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
+		   "Nexthop", "outgoingIF", "Potential nexthops");
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+			neigh_node = batadv_orig_node_get_router(orig_node);
+			if (!neigh_node)
+				continue;
+
+			if (neigh_node->bat_iv.tq_avg == 0)
+				goto next;
+
+			last_seen_jiffies = jiffies - orig_node->last_seen;
+			last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+			last_seen_secs = last_seen_msecs / 1000;
+			last_seen_msecs = last_seen_msecs % 1000;
+
+			seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
+				   orig_node->orig, last_seen_secs,
+				   last_seen_msecs, neigh_node->bat_iv.tq_avg,
+				   neigh_node->addr,
+				   neigh_node->if_incoming->net_dev->name);
+
+			hlist_for_each_entry_rcu(neigh_node_tmp,
+						 &orig_node->neigh_list, list) {
+				seq_printf(seq, " %pM (%3i)",
+					   neigh_node_tmp->addr,
+					   neigh_node_tmp->bat_iv.tq_avg);
+			}
+
+			seq_puts(seq, "\n");
+			batman_count++;
+
+next:
+			batadv_neigh_node_free_ref(neigh_node);
+		}
+		rcu_read_unlock();
+	}
+
+	if (batman_count == 0)
+		seq_puts(seq, "No batman nodes in range ...\n");
+}
+
+/**
+ * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * @neigh1: the first neighbor object of the comparison
+ * @neigh2: the second neighbor object of the comparison
+ *
+ * Returns a value less, equal to or greater than 0 if the metric via neigh1 is
+ * lower, the same as or higher than the metric via neigh2
+ */
+static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
+				   struct batadv_neigh_node *neigh2)
+{
+	uint8_t tq1, tq2;
+
+	tq1 = neigh1->bat_iv.tq_avg;
+	tq2 = neigh2->bat_iv.tq_avg;
+
+	return tq1 - tq2;
+}
+
+/**
+ * batadv_iv_ogm_neigh_is_eob - check if neigh1 is equally good or better than
+ *  neigh2 from the metric prospective
+ * @neigh1: the first neighbor object of the comparison
+ * @neigh2: the second neighbor object of the comparison
+ *
+ * Returns true if the metric via neigh1 is equally good or better than the
+ * metric via neigh2, false otherwise.
+ */
+static bool batadv_iv_ogm_neigh_is_eob(struct batadv_neigh_node *neigh1,
+				       struct batadv_neigh_node *neigh2)
+{
+	int diff = batadv_iv_ogm_neigh_cmp(neigh1, neigh2);
+
+	return diff > -BATADV_TQ_SIMILARITY_THRESHOLD;
+}
+
 static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
 	.name = "BATMAN_IV",
 	.bat_iface_enable = batadv_iv_ogm_iface_enable,
@@ -1400,6 +1642,12 @@
 	.bat_primary_iface_set = batadv_iv_ogm_primary_iface_set,
 	.bat_ogm_schedule = batadv_iv_ogm_schedule,
 	.bat_ogm_emit = batadv_iv_ogm_emit,
+	.bat_neigh_cmp = batadv_iv_ogm_neigh_cmp,
+	.bat_neigh_is_equiv_or_better = batadv_iv_ogm_neigh_is_eob,
+	.bat_orig_print = batadv_iv_ogm_orig_print,
+	.bat_orig_free = batadv_iv_ogm_orig_free,
+	.bat_orig_add_if = batadv_iv_ogm_orig_add_if,
+	.bat_orig_del_if = batadv_iv_ogm_orig_del_if,
 };
 
 int __init batadv_iv_init(void)
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 264de88..28eb5e6 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -411,10 +411,10 @@
 		return NULL;
 	}
 
-	/* this is a gateway now, remove any tt entries */
+	/* this is a gateway now, remove any TT entry on this VLAN */
 	orig_node = batadv_orig_hash_find(bat_priv, orig);
 	if (orig_node) {
-		batadv_tt_global_del_orig(bat_priv, orig_node,
+		batadv_tt_global_del_orig(bat_priv, orig_node, vid,
 					  "became a backbone gateway");
 		batadv_orig_node_free_ref(orig_node);
 	}
@@ -858,30 +858,28 @@
 				    struct batadv_hard_iface *primary_if,
 				    struct sk_buff *skb)
 {
-	struct ethhdr *ethhdr;
-	struct vlan_ethhdr *vhdr;
-	struct arphdr *arphdr;
-	uint8_t *hw_src, *hw_dst;
 	struct batadv_bla_claim_dst *bla_dst;
-	uint16_t proto;
+	uint8_t *hw_src, *hw_dst;
+	struct vlan_ethhdr *vhdr;
+	struct ethhdr *ethhdr;
+	struct arphdr *arphdr;
+	unsigned short vid;
+	__be16 proto;
 	int headlen;
-	unsigned short vid = BATADV_NO_FLAGS;
 	int ret;
 
+	vid = batadv_get_vid(skb, 0);
 	ethhdr = eth_hdr(skb);
 
-	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+	proto = ethhdr->h_proto;
+	headlen = ETH_HLEN;
+	if (vid & BATADV_VLAN_HAS_TAG) {
 		vhdr = (struct vlan_ethhdr *)ethhdr;
-		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-		vid |= BATADV_VLAN_HAS_TAG;
-		proto = ntohs(vhdr->h_vlan_encapsulated_proto);
-		headlen = sizeof(*vhdr);
-	} else {
-		proto = ntohs(ethhdr->h_proto);
-		headlen = ETH_HLEN;
+		proto = vhdr->h_vlan_encapsulated_proto;
+		headlen += VLAN_HLEN;
 	}
 
-	if (proto != ETH_P_ARP)
+	if (proto != htons(ETH_P_ARP))
 		return 0; /* not a claim frame */
 
 	/* this must be a ARP frame. check if it is a claim. */
@@ -1317,12 +1315,14 @@
 
 /* @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
+ * @vid: VLAN identifier
  *
- * check if the originator is a gateway for any VLAN ID.
+ * Check if the originator is a gateway for the VLAN identified by vid.
  *
- * returns 1 if it is found, 0 otherwise
+ * Returns true if orig is a backbone for this vid, false otherwise.
  */
-int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+				    unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
 	struct hlist_head *head;
@@ -1330,25 +1330,26 @@
 	int i;
 
 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
-		return 0;
+		return false;
 
 	if (!hash)
-		return 0;
+		return false;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
-			if (batadv_compare_eth(backbone_gw->orig, orig)) {
+			if (batadv_compare_eth(backbone_gw->orig, orig) &&
+			    backbone_gw->vid == vid) {
 				rcu_read_unlock();
-				return 1;
+				return true;
 			}
 		}
 		rcu_read_unlock();
 	}
 
-	return 0;
+	return false;
 }
 
 
@@ -1365,10 +1366,8 @@
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
 			      struct batadv_orig_node *orig_node, int hdr_size)
 {
-	struct ethhdr *ethhdr;
-	struct vlan_ethhdr *vhdr;
 	struct batadv_bla_backbone_gw *backbone_gw;
-	unsigned short vid = BATADV_NO_FLAGS;
+	unsigned short vid;
 
 	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
 		return 0;
@@ -1377,16 +1376,7 @@
 	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
 		return 0;
 
-	ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
-
-	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
-		if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
-			return 0;
-
-		vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
-		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-		vid |= BATADV_VLAN_HAS_TAG;
-	}
+	vid = batadv_get_vid(skb, hdr_size);
 
 	/* see if this originator is a backbone gw for this VLAN */
 	backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 4b102e7..da173e7 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -30,7 +30,8 @@
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
 					     void *offset);
-int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+				    unsigned short vid);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 				   struct sk_buff *skb);
 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
@@ -74,10 +75,11 @@
 	return 0;
 }
 
-static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
-						 uint8_t *orig)
+static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
+						  uint8_t *orig,
+						  unsigned short vid)
 {
-	return 0;
+	return false;
 }
 
 static inline int
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index f186a55..049a7a2 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -28,7 +28,6 @@
 #include "gateway_common.h"
 #include "gateway_client.h"
 #include "soft-interface.h"
-#include "vis.h"
 #include "icmp_socket.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
@@ -300,12 +299,6 @@
 	return single_open(file, batadv_tt_local_seq_print_text, net_dev);
 }
 
-static int batadv_vis_data_open(struct inode *inode, struct file *file)
-{
-	struct net_device *net_dev = (struct net_device *)inode->i_private;
-	return single_open(file, batadv_vis_seq_print_text, net_dev);
-}
-
 struct batadv_debuginfo {
 	struct attribute attr;
 	const struct file_operations fops;
@@ -356,7 +349,6 @@
 #endif
 static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
 			batadv_transtable_local_open);
-static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
 #ifdef CONFIG_BATMAN_ADV_NC
 static BATADV_DEBUGINFO(nc_nodes, S_IRUGO, batadv_nc_nodes_open);
 #endif
@@ -373,7 +365,6 @@
 	&batadv_debuginfo_dat_cache,
 #endif
 	&batadv_debuginfo_transtable_local,
-	&batadv_debuginfo_vis_data,
 #ifdef CONFIG_BATMAN_ADV_NC
 	&batadv_debuginfo_nc_nodes,
 #endif
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 06345d4..6c8c393 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -19,6 +19,7 @@
 
 #include <linux/if_ether.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
 #include <net/arp.h>
 
 #include "main.h"
@@ -29,7 +30,6 @@
 #include "send.h"
 #include "types.h"
 #include "translation-table.h"
-#include "unicast.h"
 
 static void batadv_dat_purge(struct work_struct *work);
 
@@ -206,15 +206,11 @@
  */
 static uint32_t batadv_hash_dat(const void *data, uint32_t size)
 {
-	const unsigned char *key = data;
 	uint32_t hash = 0;
-	size_t i;
+	const struct batadv_dat_entry *dat = data;
 
-	for (i = 0; i < 4; i++) {
-		hash += key[i];
-		hash += (hash << 10);
-		hash ^= (hash >> 6);
-	}
+	hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip));
+	hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid));
 
 	hash += (hash << 3);
 	hash ^= (hash >> 11);
@@ -228,21 +224,26 @@
  * table
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: search key
+ * @vid: VLAN identifier
  *
  * Returns the dat_entry if found, NULL otherwise.
  */
 static struct batadv_dat_entry *
-batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
+batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
+			   unsigned short vid)
 {
 	struct hlist_head *head;
-	struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
+	struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
 	struct batadv_hashtable *hash = bat_priv->dat.hash;
 	uint32_t index;
 
 	if (!hash)
 		return NULL;
 
-	index = batadv_hash_dat(&ip, hash->size);
+	to_find.ip = ip;
+	to_find.vid = vid;
+
+	index = batadv_hash_dat(&to_find, hash->size);
 	head = &hash->table[index];
 
 	rcu_read_lock();
@@ -266,22 +267,24 @@
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: ipv4 to add/edit
  * @mac_addr: mac address to assign to the given ipv4
+ * @vid: VLAN identifier
  */
 static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
-				 uint8_t *mac_addr)
+				 uint8_t *mac_addr, unsigned short vid)
 {
 	struct batadv_dat_entry *dat_entry;
 	int hash_added;
 
-	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip);
+	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid);
 	/* if this entry is already known, just update it */
 	if (dat_entry) {
 		if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
 			memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
 		dat_entry->last_update = jiffies;
 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
-			   "Entry updated: %pI4 %pM\n", &dat_entry->ip,
-			   dat_entry->mac_addr);
+			   "Entry updated: %pI4 %pM (vid: %d)\n",
+			   &dat_entry->ip, dat_entry->mac_addr,
+			   BATADV_PRINT_VID(vid));
 		goto out;
 	}
 
@@ -290,12 +293,13 @@
 		goto out;
 
 	dat_entry->ip = ip;
+	dat_entry->vid = vid;
 	memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
 	dat_entry->last_update = jiffies;
 	atomic_set(&dat_entry->refcount, 2);
 
 	hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
-				     batadv_hash_dat, &dat_entry->ip,
+				     batadv_hash_dat, dat_entry,
 				     &dat_entry->hash_entry);
 
 	if (unlikely(hash_added != 0)) {
@@ -304,8 +308,8 @@
 		goto out;
 	}
 
-	batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM\n",
-		   &dat_entry->ip, dat_entry->mac_addr);
+	batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n",
+		   &dat_entry->ip, dat_entry->mac_addr, BATADV_PRINT_VID(vid));
 
 out:
 	if (dat_entry)
@@ -419,6 +423,10 @@
 	bool ret = false;
 	int j;
 
+	/* check if orig node candidate is running DAT */
+	if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+		goto out;
+
 	/* Check if this node has already been selected... */
 	for (j = 0; j < select; j++)
 		if (res[j].orig_node == candidate)
@@ -588,9 +596,9 @@
 			goto free_orig;
 
 		tmp_skb = pskb_copy(skb, GFP_ATOMIC);
-		if (!batadv_unicast_4addr_prepare_skb(bat_priv, tmp_skb,
-						      cand[i].orig_node,
-						      packet_subtype)) {
+		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
+							   cand[i].orig_node,
+							   packet_subtype)) {
 			kfree_skb(tmp_skb);
 			goto free_neigh;
 		}
@@ -626,6 +634,59 @@
 }
 
 /**
+ * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ *  setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+	char dat_mode;
+
+	dat_mode = atomic_read(&bat_priv->distributed_arp_table);
+
+	switch (dat_mode) {
+	case 0:
+		batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+		break;
+	case 1:
+		batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1,
+					       NULL, 0);
+		break;
+	}
+}
+
+/**
+ * batadv_dat_status_update - update the dat tvlv container after dat
+ *  setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_dat_status_update(struct net_device *net_dev)
+{
+	struct batadv_priv *bat_priv = netdev_priv(net_dev);
+	batadv_dat_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+					   struct batadv_orig_node *orig,
+					   uint8_t flags,
+					   void *tvlv_value,
+					   uint16_t tvlv_value_len)
+{
+	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+	else
+		orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+}
+
+/**
  * batadv_dat_hash_free - free the local DAT hash table
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -657,6 +718,10 @@
 
 	batadv_dat_start_timer(bat_priv);
 
+	batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
+				     NULL, BATADV_TVLV_DAT, 1,
+				     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+	batadv_dat_tvlv_container_update(bat_priv);
 	return 0;
 }
 
@@ -666,6 +731,9 @@
  */
 void batadv_dat_free(struct batadv_priv *bat_priv)
 {
+	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+
 	cancel_delayed_work_sync(&bat_priv->dat.work);
 
 	batadv_dat_hash_free(bat_priv);
@@ -693,8 +761,8 @@
 		goto out;
 
 	seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
-	seq_printf(seq, "          %-7s          %-13s %5s\n", "IPv4", "MAC",
-		   "last-seen");
+	seq_printf(seq, "          %-7s          %-9s %4s %11s\n", "IPv4",
+		   "MAC", "VID", "last-seen");
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -707,8 +775,9 @@
 			last_seen_msecs = last_seen_msecs % 60000;
 			last_seen_secs = last_seen_msecs / 1000;
 
-			seq_printf(seq, " * %15pI4 %14pM %6i:%02i\n",
+			seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n",
 				   &dat_entry->ip, dat_entry->mac_addr,
+				   BATADV_PRINT_VID(dat_entry->vid),
 				   last_seen_mins, last_seen_secs);
 		}
 		rcu_read_unlock();
@@ -795,6 +864,31 @@
 }
 
 /**
+ * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet to extract the VID from
+ * @hdr_size: the size of the batman-adv header encapsulating the packet
+ *
+ * If the packet embedded in the skb is vlan tagged this function returns the
+ * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ */
+static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
+{
+	unsigned short vid;
+
+	vid = batadv_get_vid(skb, *hdr_size);
+
+	/* ARP parsing functions jump forward of hdr_size + ETH_HLEN.
+	 * If the header contained in the packet is a VLAN one (which is longer)
+	 * hdr_size is updated so that the functions will still skip the
+	 * correct amount of bytes.
+	 */
+	if (vid & BATADV_VLAN_HAS_TAG)
+		*hdr_size += VLAN_HLEN;
+
+	return vid;
+}
+
+/**
  * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
  * answer using DAT
  * @bat_priv: the bat priv with all the soft interface information
@@ -813,26 +907,31 @@
 	bool ret = false;
 	struct batadv_dat_entry *dat_entry = NULL;
 	struct sk_buff *skb_new;
+	int hdr_size = 0;
+	unsigned short vid;
 
 	if (!atomic_read(&bat_priv->distributed_arp_table))
 		goto out;
 
-	type = batadv_arp_get_type(bat_priv, skb, 0);
+	vid = batadv_dat_get_vid(skb, &hdr_size);
+
+	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
 	/* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
 	 * message to the selected DHT candidates
 	 */
 	if (type != ARPOP_REQUEST)
 		goto out;
 
-	batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REQUEST");
+	batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+		       "Parsing outgoing ARP REQUEST");
 
-	ip_src = batadv_arp_ip_src(skb, 0);
-	hw_src = batadv_arp_hw_src(skb, 0);
-	ip_dst = batadv_arp_ip_dst(skb, 0);
+	ip_src = batadv_arp_ip_src(skb, hdr_size);
+	hw_src = batadv_arp_hw_src(skb, hdr_size);
+	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
 
-	batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
 
-	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
 	if (dat_entry) {
 		/* If the ARP request is destined for a local client the local
 		 * client will answer itself. DAT would only generate a
@@ -842,7 +941,8 @@
 		 * additional DAT answer may trigger kernel warnings about
 		 * a packet coming from the wrong port.
 		 */
-		if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
+		if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
+					BATADV_NO_FLAGS)) {
 			ret = true;
 			goto out;
 		}
@@ -853,11 +953,15 @@
 		if (!skb_new)
 			goto out;
 
+		if (vid & BATADV_VLAN_HAS_TAG)
+			skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+						  vid & VLAN_VID_MASK);
+
 		skb_reset_mac_header(skb_new);
 		skb_new->protocol = eth_type_trans(skb_new,
 						   bat_priv->soft_iface);
 		bat_priv->stats.rx_packets++;
-		bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+		bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
 		bat_priv->soft_iface->last_rx = jiffies;
 
 		netif_rx(skb_new);
@@ -892,11 +996,14 @@
 	struct sk_buff *skb_new;
 	struct batadv_dat_entry *dat_entry = NULL;
 	bool ret = false;
+	unsigned short vid;
 	int err;
 
 	if (!atomic_read(&bat_priv->distributed_arp_table))
 		goto out;
 
+	vid = batadv_dat_get_vid(skb, &hdr_size);
+
 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
 	if (type != ARPOP_REQUEST)
 		goto out;
@@ -908,9 +1015,9 @@
 	batadv_dbg_arp(bat_priv, skb, type, hdr_size,
 		       "Parsing incoming ARP REQUEST");
 
-	batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
 
-	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
 	if (!dat_entry)
 		goto out;
 
@@ -921,17 +1028,22 @@
 	if (!skb_new)
 		goto out;
 
+	if (vid & BATADV_VLAN_HAS_TAG)
+		skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+					  vid & VLAN_VID_MASK);
+
 	/* To preserve backwards compatibility, the node has choose the outgoing
 	 * format based on the incoming request packet type. The assumption is
 	 * that a node not using the 4addr packet format doesn't support it.
 	 */
 	if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
-		err = batadv_unicast_4addr_send_skb(bat_priv, skb_new,
-						    BATADV_P_DAT_CACHE_REPLY);
+		err = batadv_send_skb_via_tt_4addr(bat_priv, skb_new,
+						   BATADV_P_DAT_CACHE_REPLY,
+						   vid);
 	else
-		err = batadv_unicast_send_skb(bat_priv, skb_new);
+		err = batadv_send_skb_via_tt(bat_priv, skb_new, vid);
 
-	if (!err) {
+	if (err != NET_XMIT_DROP) {
 		batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
 		ret = true;
 	}
@@ -954,23 +1066,28 @@
 	uint16_t type;
 	__be32 ip_src, ip_dst;
 	uint8_t *hw_src, *hw_dst;
+	int hdr_size = 0;
+	unsigned short vid;
 
 	if (!atomic_read(&bat_priv->distributed_arp_table))
 		return;
 
-	type = batadv_arp_get_type(bat_priv, skb, 0);
+	vid = batadv_dat_get_vid(skb, &hdr_size);
+
+	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
 	if (type != ARPOP_REPLY)
 		return;
 
-	batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REPLY");
+	batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+		       "Parsing outgoing ARP REPLY");
 
-	hw_src = batadv_arp_hw_src(skb, 0);
-	ip_src = batadv_arp_ip_src(skb, 0);
-	hw_dst = batadv_arp_hw_dst(skb, 0);
-	ip_dst = batadv_arp_ip_dst(skb, 0);
+	hw_src = batadv_arp_hw_src(skb, hdr_size);
+	ip_src = batadv_arp_ip_src(skb, hdr_size);
+	hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
 
-	batadv_dat_entry_add(bat_priv, ip_src, hw_src);
-	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
 
 	/* Send the ARP reply to the candidates for both the IP addresses that
 	 * the node obtained from the ARP reply
@@ -992,10 +1109,13 @@
 	__be32 ip_src, ip_dst;
 	uint8_t *hw_src, *hw_dst;
 	bool ret = false;
+	unsigned short vid;
 
 	if (!atomic_read(&bat_priv->distributed_arp_table))
 		goto out;
 
+	vid = batadv_dat_get_vid(skb, &hdr_size);
+
 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
 	if (type != ARPOP_REPLY)
 		goto out;
@@ -1011,13 +1131,13 @@
 	/* Update our internal cache with both the IP addresses the node got
 	 * within the ARP reply
 	 */
-	batadv_dat_entry_add(bat_priv, ip_src, hw_src);
-	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
 
 	/* if this REPLY is directed to a client of mine, let's deliver the
 	 * packet to the interface
 	 */
-	ret = !batadv_is_my_client(bat_priv, hw_dst);
+	ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
 out:
 	if (ret)
 		kfree_skb(skb);
@@ -1040,7 +1160,8 @@
 	__be32 ip_dst;
 	struct batadv_dat_entry *dat_entry = NULL;
 	bool ret = false;
-	const size_t bcast_len = sizeof(struct batadv_bcast_packet);
+	int hdr_size = sizeof(struct batadv_bcast_packet);
+	unsigned short vid;
 
 	if (!atomic_read(&bat_priv->distributed_arp_table))
 		goto out;
@@ -1051,12 +1172,14 @@
 	if (forw_packet->num_packets)
 		goto out;
 
-	type = batadv_arp_get_type(bat_priv, forw_packet->skb, bcast_len);
+	vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
+
+	type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size);
 	if (type != ARPOP_REQUEST)
 		goto out;
 
-	ip_dst = batadv_arp_ip_dst(forw_packet->skb, bcast_len);
-	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+	ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
+	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
 	/* check if the node already got this entry */
 	if (!dat_entry) {
 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 125c8c6..60d853b 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -29,6 +29,7 @@
 
 #define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
 
+void batadv_dat_status_update(struct net_device *net_dev);
 bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 					   struct sk_buff *skb);
 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
@@ -98,6 +99,10 @@
 
 #else
 
+static inline void batadv_dat_status_update(struct net_device *net_dev)
+{
+}
+
 static inline bool
 batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 				      struct sk_buff *skb)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
new file mode 100644
index 0000000..271d321
--- /dev/null
+++ b/net/batman-adv/fragmentation.c
@@ -0,0 +1,491 @@
+/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "main.h"
+#include "fragmentation.h"
+#include "send.h"
+#include "originator.h"
+#include "routing.h"
+#include "hard-interface.h"
+#include "soft-interface.h"
+
+
+/**
+ * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * @head: head of chain with entries.
+ *
+ * Free fragments in the passed hlist. Should be called with appropriate lock.
+ */
+static void batadv_frag_clear_chain(struct hlist_head *head)
+{
+	struct batadv_frag_list_entry *entry;
+	struct hlist_node *node;
+
+	hlist_for_each_entry_safe(entry, node, head, list) {
+		hlist_del(&entry->list);
+		kfree_skb(entry->skb);
+		kfree(entry);
+	}
+}
+
+/**
+ * batadv_frag_purge_orig - free fragments associated to an orig
+ * @orig_node: originator to free fragments from
+ * @check_cb: optional function to tell if an entry should be purged
+ */
+void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
+			    bool (*check_cb)(struct batadv_frag_table_entry *))
+{
+	struct batadv_frag_table_entry *chain;
+	uint8_t i;
+
+	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+		chain = &orig_node->fragments[i];
+		spin_lock_bh(&orig_node->fragments[i].lock);
+
+		if (!check_cb || check_cb(chain)) {
+			batadv_frag_clear_chain(&orig_node->fragments[i].head);
+			orig_node->fragments[i].size = 0;
+		}
+
+		spin_unlock_bh(&orig_node->fragments[i].lock);
+	}
+}
+
+/**
+ * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ *
+ * Returns the maximum size of payload that can be fragmented.
+ */
+static int batadv_frag_size_limit(void)
+{
+	int limit = BATADV_FRAG_MAX_FRAG_SIZE;
+
+	limit -= sizeof(struct batadv_frag_packet);
+	limit *= BATADV_FRAG_MAX_FRAGMENTS;
+
+	return limit;
+}
+
+/**
+ * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * @chain: chain in fragments table to init
+ * @seqno: sequence number of the received fragment
+ *
+ * Make chain ready for a fragment with sequence number "seqno". Delete existing
+ * entries if they have an "old" sequence number.
+ *
+ * Caller must hold chain->lock.
+ *
+ * Returns true if chain is empty and caller can just insert the new fragment
+ * without searching for the right position.
+ */
+static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
+				   uint16_t seqno)
+{
+	if (chain->seqno == seqno)
+		return false;
+
+	if (!hlist_empty(&chain->head))
+		batadv_frag_clear_chain(&chain->head);
+
+	chain->size = 0;
+	chain->seqno = seqno;
+
+	return true;
+}
+
+/**
+ * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * @orig_node: originator that the fragment was received from
+ * @skb: skb to insert
+ * @chain_out: list head to attach complete chains of fragments to
+ *
+ * Insert a new fragment into the reverse ordered chain in the right table
+ * entry. The hash table entry is cleared if "old" fragments exist in it.
+ *
+ * Returns true if skb is buffered, false on error. If the chain has all the
+ * fragments needed to merge the packet, the chain is moved to the passed head
+ * to avoid locking the chain in the table.
+ */
+static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
+				      struct sk_buff *skb,
+				      struct hlist_head *chain_out)
+{
+	struct batadv_frag_table_entry *chain;
+	struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
+	struct batadv_frag_packet *frag_packet;
+	uint8_t bucket;
+	uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
+	bool ret = false;
+
+	/* Linearize packet to avoid linearizing 16 packets in a row when doing
+	 * the later merge. Non-linear merge should be added to remove this
+	 * linearization.
+	 */
+	if (skb_linearize(skb) < 0)
+		goto err;
+
+	frag_packet = (struct batadv_frag_packet *)skb->data;
+	seqno = ntohs(frag_packet->seqno);
+	bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
+
+	frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
+	if (!frag_entry_new)
+		goto err;
+
+	frag_entry_new->skb = skb;
+	frag_entry_new->no = frag_packet->no;
+
+	/* Select entry in the "chain table" and delete any prior fragments
+	 * with another sequence number. batadv_frag_init_chain() returns true,
+	 * if the list is empty at return.
+	 */
+	chain = &orig_node->fragments[bucket];
+	spin_lock_bh(&chain->lock);
+	if (batadv_frag_init_chain(chain, seqno)) {
+		hlist_add_head(&frag_entry_new->list, &chain->head);
+		chain->size = skb->len - hdr_size;
+		chain->timestamp = jiffies;
+		ret = true;
+		goto out;
+	}
+
+	/* Find the position for the new fragment. */
+	hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
+		/* Drop packet if fragment already exists. */
+		if (frag_entry_curr->no == frag_entry_new->no)
+			goto err_unlock;
+
+		/* Order fragments from highest to lowest. */
+		if (frag_entry_curr->no < frag_entry_new->no) {
+			hlist_add_before(&frag_entry_new->list,
+					 &frag_entry_curr->list);
+			chain->size += skb->len - hdr_size;
+			chain->timestamp = jiffies;
+			ret = true;
+			goto out;
+		}
+	}
+
+	/* Reached the end of the list, so insert after 'frag_entry_curr'. */
+	if (likely(frag_entry_curr)) {
+		hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
+		chain->size += skb->len - hdr_size;
+		chain->timestamp = jiffies;
+		ret = true;
+	}
+
+out:
+	if (chain->size > batadv_frag_size_limit() ||
+	    ntohs(frag_packet->total_size) > batadv_frag_size_limit()) {
+		/* Clear chain if total size of either the list or the packet
+		 * exceeds the maximum size of one merged packet.
+		 */
+		batadv_frag_clear_chain(&chain->head);
+		chain->size = 0;
+	} else if (ntohs(frag_packet->total_size) == chain->size) {
+		/* All fragments received. Hand over chain to caller. */
+		hlist_move_list(&chain->head, chain_out);
+		chain->size = 0;
+	}
+
+err_unlock:
+	spin_unlock_bh(&chain->lock);
+
+err:
+	if (!ret)
+		kfree(frag_entry_new);
+
+	return ret;
+}
+
+/**
+ * batadv_frag_merge_packets - merge a chain of fragments
+ * @chain: head of chain with fragments
+ * @skb: packet with total size of skb after merging
+ *
+ * Expand the first skb in the chain and copy the content of the remaining
+ * skb's into the expanded one. After doing so, clear the chain.
+ *
+ * Returns the merged skb or NULL on error.
+ */
+static struct sk_buff *
+batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
+{
+	struct batadv_frag_packet *packet;
+	struct batadv_frag_list_entry *entry;
+	struct sk_buff *skb_out = NULL;
+	int size, hdr_size = sizeof(struct batadv_frag_packet);
+
+	/* Make sure incoming skb has non-bogus data. */
+	packet = (struct batadv_frag_packet *)skb->data;
+	size = ntohs(packet->total_size);
+	if (size > batadv_frag_size_limit())
+		goto free;
+
+	/* Remove first entry, as this is the destination for the rest of the
+	 * fragments.
+	 */
+	entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
+	hlist_del(&entry->list);
+	skb_out = entry->skb;
+	kfree(entry);
+
+	/* Make room for the rest of the fragments. */
+	if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
+		kfree_skb(skb_out);
+		skb_out = NULL;
+		goto free;
+	}
+
+	/* Move the existing MAC header to just before the payload. (Override
+	 * the fragment header.)
+	 */
+	skb_pull_rcsum(skb_out, hdr_size);
+	memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
+	skb_set_mac_header(skb_out, -ETH_HLEN);
+	skb_reset_network_header(skb_out);
+	skb_reset_transport_header(skb_out);
+
+	/* Copy the payload of the each fragment into the last skb */
+	hlist_for_each_entry(entry, chain, list) {
+		size = entry->skb->len - hdr_size;
+		memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
+		       size);
+	}
+
+free:
+	/* Locking is not needed, because 'chain' is not part of any orig. */
+	batadv_frag_clear_chain(chain);
+	return skb_out;
+}
+
+/**
+ * batadv_frag_skb_buffer - buffer fragment for later merge
+ * @skb: skb to buffer
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Add fragment to buffer and merge fragments if possible.
+ *
+ * There are three possible outcomes: 1) Packet is merged: Return true and
+ * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
+ * to NULL; 3) Error: Return false and leave skb as is.
+ */
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+			    struct batadv_orig_node *orig_node_src)
+{
+	struct sk_buff *skb_out = NULL;
+	struct hlist_head head = HLIST_HEAD_INIT;
+	bool ret = false;
+
+	/* Add packet to buffer and table entry if merge is possible. */
+	if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
+		goto out_err;
+
+	/* Leave if more fragments are needed to merge. */
+	if (hlist_empty(&head))
+		goto out;
+
+	skb_out = batadv_frag_merge_packets(&head, *skb);
+	if (!skb_out)
+		goto out_err;
+
+out:
+	*skb = skb_out;
+	ret = true;
+out_err:
+	return ret;
+}
+
+/**
+ * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * @skb: skb to forward
+ * @recv_if: interface that the skb is received on
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Look up the next-hop of the fragments payload and check if the merged packet
+ * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
+ * without merging it.
+ *
+ * Returns true if the fragment is consumed/forwarded, false otherwise.
+ */
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+			 struct batadv_hard_iface *recv_if,
+			 struct batadv_orig_node *orig_node_src)
+{
+	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+	struct batadv_orig_node *orig_node_dst = NULL;
+	struct batadv_neigh_node *neigh_node = NULL;
+	struct batadv_frag_packet *packet;
+	uint16_t total_size;
+	bool ret = false;
+
+	packet = (struct batadv_frag_packet *)skb->data;
+	orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
+	if (!orig_node_dst)
+		goto out;
+
+	neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
+	if (!neigh_node)
+		goto out;
+
+	/* Forward the fragment, if the merged packet would be too big to
+	 * be assembled.
+	 */
+	total_size = ntohs(packet->total_size);
+	if (total_size > neigh_node->if_incoming->net_dev->mtu) {
+		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
+		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
+				   skb->len + ETH_HLEN);
+
+		packet->header.ttl--;
+		batadv_send_skb_packet(skb, neigh_node->if_incoming,
+				       neigh_node->addr);
+		ret = true;
+	}
+
+out:
+	if (orig_node_dst)
+		batadv_orig_node_free_ref(orig_node_dst);
+	if (neigh_node)
+		batadv_neigh_node_free_ref(neigh_node);
+	return ret;
+}
+
+/**
+ * batadv_frag_create - create a fragment from skb
+ * @skb: skb to create fragment from
+ * @frag_head: header to use in new fragment
+ * @mtu: size of new fragment
+ *
+ * Split the passed skb into two fragments: A new one with size matching the
+ * passed mtu and the old one with the rest. The new skb contains data from the
+ * tail of the old skb.
+ *
+ * Returns the new fragment, NULL on error.
+ */
+static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
+					  struct batadv_frag_packet *frag_head,
+					  unsigned int mtu)
+{
+	struct sk_buff *skb_fragment;
+	unsigned header_size = sizeof(*frag_head);
+	unsigned fragment_size = mtu - header_size;
+
+	skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
+	if (!skb_fragment)
+		goto err;
+
+	skb->priority = TC_PRIO_CONTROL;
+
+	/* Eat the last mtu-bytes of the skb */
+	skb_reserve(skb_fragment, header_size + ETH_HLEN);
+	skb_split(skb, skb_fragment, skb->len - fragment_size);
+
+	/* Add the header */
+	skb_push(skb_fragment, header_size);
+	memcpy(skb_fragment->data, frag_head, header_size);
+
+err:
+	return skb_fragment;
+}
+
+/**
+ * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * @skb: skb to create fragments from
+ * @orig_node: final destination of the created fragments
+ * @neigh_node: next-hop of the created fragments
+ *
+ * Returns true on success, false otherwise.
+ */
+bool batadv_frag_send_packet(struct sk_buff *skb,
+			     struct batadv_orig_node *orig_node,
+			     struct batadv_neigh_node *neigh_node)
+{
+	struct batadv_priv *bat_priv;
+	struct batadv_hard_iface *primary_if;
+	struct batadv_frag_packet frag_header;
+	struct sk_buff *skb_fragment;
+	unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
+	unsigned header_size = sizeof(frag_header);
+	unsigned max_fragment_size, max_packet_size;
+
+	/* To avoid merge and refragmentation at next-hops we never send
+	 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
+	 */
+	mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+	max_fragment_size = (mtu - header_size - ETH_HLEN);
+	max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+
+	/* Don't even try to fragment, if we need more than 16 fragments */
+	if (skb->len > max_packet_size)
+		goto out_err;
+
+	bat_priv = orig_node->bat_priv;
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out_err;
+
+	/* Create one header to be copied to all fragments */
+	frag_header.header.packet_type = BATADV_UNICAST_FRAG;
+	frag_header.header.version = BATADV_COMPAT_VERSION;
+	frag_header.header.ttl = BATADV_TTL;
+	frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
+	frag_header.reserved = 0;
+	frag_header.no = 0;
+	frag_header.total_size = htons(skb->len);
+	memcpy(frag_header.orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+	memcpy(frag_header.dest, orig_node->orig, ETH_ALEN);
+
+	/* Eat and send fragments from the tail of skb */
+	while (skb->len > max_fragment_size) {
+		skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+		if (!skb_fragment)
+			goto out_err;
+
+		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+				   skb_fragment->len + ETH_HLEN);
+		batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming,
+				       neigh_node->addr);
+		frag_header.no++;
+
+		/* The initial check in this function should cover this case */
+		if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
+			goto out_err;
+	}
+
+	/* Make room for the fragment header. */
+	if (batadv_skb_head_push(skb, header_size) < 0 ||
+	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
+		goto out_err;
+
+	memcpy(skb->data, &frag_header, header_size);
+
+	/* Send the last fragment */
+	batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+			   skb->len + ETH_HLEN);
+	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+
+	return true;
+out_err:
+	return false;
+}
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
new file mode 100644
index 0000000..ca029e2
--- /dev/null
+++ b/net/batman-adv/fragmentation.h
@@ -0,0 +1,50 @@
+/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _NET_BATMAN_ADV_FRAGMENTATION_H_
+#define _NET_BATMAN_ADV_FRAGMENTATION_H_
+
+void batadv_frag_purge_orig(struct batadv_orig_node *orig,
+			    bool (*check_cb)(struct batadv_frag_table_entry *));
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+			 struct batadv_hard_iface *recv_if,
+			 struct batadv_orig_node *orig_node_src);
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+			    struct batadv_orig_node *orig_node);
+bool batadv_frag_send_packet(struct sk_buff *skb,
+			     struct batadv_orig_node *orig_node,
+			     struct batadv_neigh_node *neigh_node);
+
+/**
+ * batadv_frag_check_entry - check if a list of fragments has timed out
+ * @frags_entry: table entry to check
+ *
+ * Returns true if the frags entry has timed out, false otherwise.
+ */
+static inline bool
+batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
+{
+	if (!hlist_empty(&frags_entry->head) &&
+	    batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
+		return true;
+	else
+		return false;
+}
+
+#endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 1ce4b87..2449afa 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -118,7 +118,6 @@
 	uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
 	uint32_t gw_divisor;
 	uint8_t max_tq = 0;
-	int down, up;
 	uint8_t tq_avg;
 	struct batadv_orig_node *orig_node;
 
@@ -138,14 +137,13 @@
 		if (!atomic_inc_not_zero(&gw_node->refcount))
 			goto next;
 
-		tq_avg = router->tq_avg;
+		tq_avg = router->bat_iv.tq_avg;
 
 		switch (atomic_read(&bat_priv->gw_sel_class)) {
 		case 1: /* fast connection */
-			batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
-						    &down, &up);
-
-			tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+			tmp_gw_factor = tq_avg * tq_avg;
+			tmp_gw_factor *= gw_node->bandwidth_down;
+			tmp_gw_factor *= 100 * 100;
 			tmp_gw_factor /= gw_divisor;
 
 			if ((tmp_gw_factor > max_gw_factor) ||
@@ -223,11 +221,6 @@
 	struct batadv_neigh_node *router = NULL;
 	char gw_addr[18] = { '\0' };
 
-	/* The batman daemon checks here if we already passed a full originator
-	 * cycle in order to make sure we don't choose the first gateway we
-	 * hear about. This check is based on the daemon's uptime which we
-	 * don't have.
-	 */
 	if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
 		goto out;
 
@@ -258,16 +251,22 @@
 				    NULL);
 	} else if ((!curr_gw) && (next_gw)) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-			   "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
+			   "Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
 			   next_gw->orig_node->orig,
-			   next_gw->orig_node->gw_flags, router->tq_avg);
+			   next_gw->bandwidth_down / 10,
+			   next_gw->bandwidth_down % 10,
+			   next_gw->bandwidth_up / 10,
+			   next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
 		batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
 				    gw_addr);
 	} else {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-			   "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
+			   "Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
 			   next_gw->orig_node->orig,
-			   next_gw->orig_node->gw_flags, router->tq_avg);
+			   next_gw->bandwidth_down / 10,
+			   next_gw->bandwidth_down % 10,
+			   next_gw->bandwidth_up / 10,
+			   next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
 		batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
 				    gw_addr);
 	}
@@ -306,8 +305,8 @@
 	if (!router_orig)
 		goto out;
 
-	gw_tq_avg = router_gw->tq_avg;
-	orig_tq_avg = router_orig->tq_avg;
+	gw_tq_avg = router_gw->bat_iv.tq_avg;
+	orig_tq_avg = router_orig->bat_iv.tq_avg;
 
 	/* the TQ value has to be better */
 	if (orig_tq_avg < gw_tq_avg)
@@ -337,12 +336,20 @@
 	return;
 }
 
+/**
+ * batadv_gw_node_add - add gateway node to list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
 static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 			       struct batadv_orig_node *orig_node,
-			       uint8_t new_gwflags)
+			       struct batadv_tvlv_gateway_data *gateway)
 {
 	struct batadv_gw_node *gw_node;
-	int down, up;
+
+	if (gateway->bandwidth_down == 0)
+		return;
 
 	gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
 	if (!gw_node)
@@ -356,73 +363,116 @@
 	hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
 	spin_unlock_bh(&bat_priv->gw.list_lock);
 
-	batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-		   "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
-		   orig_node->orig, new_gwflags,
-		   (down > 2048 ? down / 1024 : down),
-		   (down > 2048 ? "MBit" : "KBit"),
-		   (up > 2048 ? up / 1024 : up),
-		   (up > 2048 ? "MBit" : "KBit"));
+		   "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
+		   orig_node->orig,
+		   ntohl(gateway->bandwidth_down) / 10,
+		   ntohl(gateway->bandwidth_down) % 10,
+		   ntohl(gateway->bandwidth_up) / 10,
+		   ntohl(gateway->bandwidth_up) % 10);
 }
 
-void batadv_gw_node_update(struct batadv_priv *bat_priv,
-			   struct batadv_orig_node *orig_node,
-			   uint8_t new_gwflags)
+/**
+ * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ *
+ * Returns gateway node if found or NULL otherwise.
+ */
+static struct batadv_gw_node *
+batadv_gw_node_get(struct batadv_priv *bat_priv,
+		   struct batadv_orig_node *orig_node)
 {
-	struct batadv_gw_node *gw_node, *curr_gw;
-
-	/* Note: We don't need a NULL check here, since curr_gw never gets
-	 * dereferenced. If curr_gw is NULL we also should not exit as we may
-	 * have this gateway in our list (duplication check!) even though we
-	 * have no currently selected gateway.
-	 */
-	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+	struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
 
 	rcu_read_lock();
-	hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
-		if (gw_node->orig_node != orig_node)
+	hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) {
+		if (gw_node_tmp->orig_node != orig_node)
 			continue;
 
-		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-			   "Gateway class of originator %pM changed from %i to %i\n",
-			   orig_node->orig, gw_node->orig_node->gw_flags,
-			   new_gwflags);
+		if (gw_node_tmp->deleted)
+			continue;
 
-		gw_node->deleted = 0;
+		if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
+			continue;
 
-		if (new_gwflags == BATADV_NO_FLAGS) {
-			gw_node->deleted = jiffies;
-			batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-				   "Gateway %pM removed from gateway list\n",
-				   orig_node->orig);
-
-			if (gw_node == curr_gw)
-				goto deselect;
-		}
-
-		goto unlock;
+		gw_node = gw_node_tmp;
+		break;
 	}
-
-	if (new_gwflags == BATADV_NO_FLAGS)
-		goto unlock;
-
-	batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
-	goto unlock;
-
-deselect:
-	batadv_gw_deselect(bat_priv);
-unlock:
 	rcu_read_unlock();
 
+	return gw_node;
+}
+
+/**
+ * batadv_gw_node_update - update list of available gateways with changed
+ *  bandwidth information
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+			   struct batadv_orig_node *orig_node,
+			   struct batadv_tvlv_gateway_data *gateway)
+{
+	struct batadv_gw_node *gw_node, *curr_gw = NULL;
+
+	gw_node = batadv_gw_node_get(bat_priv, orig_node);
+	if (!gw_node) {
+		batadv_gw_node_add(bat_priv, orig_node, gateway);
+		goto out;
+	}
+
+	if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
+	    (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
+		goto out;
+
+	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+		   "Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n",
+		   orig_node->orig,
+		   gw_node->bandwidth_down / 10,
+		   gw_node->bandwidth_down % 10,
+		   gw_node->bandwidth_up / 10,
+		   gw_node->bandwidth_up % 10,
+		   ntohl(gateway->bandwidth_down) / 10,
+		   ntohl(gateway->bandwidth_down) % 10,
+		   ntohl(gateway->bandwidth_up) / 10,
+		   ntohl(gateway->bandwidth_up) % 10);
+
+	gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+	gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
+
+	gw_node->deleted = 0;
+	if (ntohl(gateway->bandwidth_down) == 0) {
+		gw_node->deleted = jiffies;
+		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+			   "Gateway %pM removed from gateway list\n",
+			   orig_node->orig);
+
+		/* Note: We don't need a NULL check here, since curr_gw never
+		 * gets dereferenced.
+		 */
+		curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+		if (gw_node == curr_gw)
+			batadv_gw_deselect(bat_priv);
+	}
+
+out:
 	if (curr_gw)
 		batadv_gw_node_free_ref(curr_gw);
+	if (gw_node)
+		batadv_gw_node_free_ref(gw_node);
 }
 
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 			   struct batadv_orig_node *orig_node)
 {
-	batadv_gw_node_update(bat_priv, orig_node, 0);
+	struct batadv_tvlv_gateway_data gateway;
+
+	gateway.bandwidth_down = 0;
+	gateway.bandwidth_up = 0;
+
+	batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
 void batadv_gw_node_purge(struct batadv_priv *bat_priv)
@@ -467,9 +517,7 @@
 {
 	struct batadv_gw_node *curr_gw;
 	struct batadv_neigh_node *router;
-	int down, up, ret = -1;
-
-	batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
+	int ret = -1;
 
 	router = batadv_orig_node_get_router(gw_node->orig_node);
 	if (!router)
@@ -477,16 +525,15 @@
 
 	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-	ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
+	ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
 			 (curr_gw == gw_node ? "=>" : "  "),
 			 gw_node->orig_node->orig,
-			 router->tq_avg, router->addr,
+			 router->bat_iv.tq_avg, router->addr,
 			 router->if_incoming->net_dev->name,
-			 gw_node->orig_node->gw_flags,
-			 (down > 2048 ? down / 1024 : down),
-			 (down > 2048 ? "MBit" : "KBit"),
-			 (up > 2048 ? up / 1024 : up),
-			 (up > 2048 ? "MBit" : "KBit"));
+			 gw_node->bandwidth_down / 10,
+			 gw_node->bandwidth_down % 10,
+			 gw_node->bandwidth_up / 10,
+			 gw_node->bandwidth_up % 10);
 
 	batadv_neigh_node_free_ref(router);
 	if (curr_gw)
@@ -508,7 +555,7 @@
 		goto out;
 
 	seq_printf(seq,
-		   "      %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+		   "      %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
 		   "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
 		   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
 		   primary_if->net_dev->dev_addr, net_dev->name);
@@ -603,24 +650,29 @@
 	struct iphdr *iphdr;
 	struct ipv6hdr *ipv6hdr;
 	struct udphdr *udphdr;
+	struct vlan_ethhdr *vhdr;
+	__be16 proto;
 
 	/* check for ethernet header */
 	if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
 		return false;
 	ethhdr = (struct ethhdr *)skb->data;
+	proto = ethhdr->h_proto;
 	*header_len += ETH_HLEN;
 
 	/* check for initial vlan header */
-	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+	if (proto == htons(ETH_P_8021Q)) {
 		if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
 			return false;
-		ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
+		vhdr = (struct vlan_ethhdr *)skb->data;
+		proto = vhdr->h_vlan_encapsulated_proto;
 		*header_len += VLAN_HLEN;
 	}
 
 	/* check for ip header */
-	switch (ntohs(ethhdr->h_proto)) {
-	case ETH_P_IP:
+	switch (proto) {
+	case htons(ETH_P_IP):
 		if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
 			return false;
 		iphdr = (struct iphdr *)(skb->data + *header_len);
@@ -631,7 +683,7 @@
 			return false;
 
 		break;
-	case ETH_P_IPV6:
+	case htons(ETH_P_IPV6):
 		if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
 			return false;
 		ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
@@ -658,28 +710,44 @@
 	*header_len += sizeof(*udphdr);
 
 	/* check for bootp port */
-	if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
-	    (ntohs(udphdr->dest) != 67))
+	if ((proto == htons(ETH_P_IP)) &&
+	    (udphdr->dest != htons(67)))
 		return false;
 
-	if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
-	    (ntohs(udphdr->dest) != 547))
+	if ((proto == htons(ETH_P_IPV6)) &&
+	    (udphdr->dest != htons(547)))
 		return false;
 
 	return true;
 }
 
-/* this call might reallocate skb data */
+/**
+ * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the outgoing packet
+ *
+ * Check if the skb is a DHCP request and if it is sent to the current best GW
+ * server. Due to topology changes it may be the case that the GW server
+ * previously selected is not the best one anymore.
+ *
+ * Returns true if the packet destination is unicast and it is not the best gw,
+ * false otherwise.
+ *
+ * This call might reallocate skb data.
+ */
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
 			    struct sk_buff *skb)
 {
 	struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
 	struct batadv_orig_node *orig_dst_node = NULL;
-	struct batadv_gw_node *curr_gw = NULL;
+	struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
 	struct ethhdr *ethhdr;
 	bool ret, out_of_range = false;
 	unsigned int header_len = 0;
 	uint8_t curr_tq_avg;
+	unsigned short vid;
+
+	vid = batadv_get_vid(skb, 0);
 
 	ret = batadv_gw_is_dhcp_target(skb, &header_len);
 	if (!ret)
@@ -687,11 +755,12 @@
 
 	ethhdr = (struct ethhdr *)skb->data;
 	orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
-						 ethhdr->h_dest);
+						 ethhdr->h_dest, vid);
 	if (!orig_dst_node)
 		goto out;
 
-	if (!orig_dst_node->gw_flags)
+	gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
+	if (!gw_node->bandwidth_down == 0)
 		goto out;
 
 	ret = batadv_is_type_dhcprequest(skb, header_len);
@@ -723,7 +792,7 @@
 		if (!neigh_curr)
 			goto out;
 
-		curr_tq_avg = neigh_curr->tq_avg;
+		curr_tq_avg = neigh_curr->bat_iv.tq_avg;
 		break;
 	case BATADV_GW_MODE_OFF:
 	default:
@@ -734,7 +803,7 @@
 	if (!neigh_old)
 		goto out;
 
-	if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
+	if (curr_tq_avg - neigh_old->bat_iv.tq_avg > BATADV_GW_THRESHOLD)
 		out_of_range = true;
 
 out:
@@ -742,6 +811,8 @@
 		batadv_orig_node_free_ref(orig_dst_node);
 	if (curr_gw)
 		batadv_gw_node_free_ref(curr_gw);
+	if (gw_node)
+		batadv_gw_node_free_ref(gw_node);
 	if (neigh_old)
 		batadv_neigh_node_free_ref(neigh_old);
 	if (neigh_curr)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index ceef4eb..d95c2d2 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -29,7 +29,7 @@
 			      struct batadv_orig_node *orig_node);
 void batadv_gw_node_update(struct batadv_priv *bat_priv,
 			   struct batadv_orig_node *orig_node,
-			   uint8_t new_gwflags);
+			   struct batadv_tvlv_gateway_data *gateway);
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 			   struct batadv_orig_node *orig_node);
 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 84bb2b1..b211b0f 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -21,64 +21,23 @@
 #include "gateway_common.h"
 #include "gateway_client.h"
 
-/* calculates the gateway class from kbit */
-static void batadv_kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
-{
-	int mdown = 0, tdown, tup, difference;
-	uint8_t sbit, part;
-
-	*gw_srv_class = 0;
-	difference = 0x0FFFFFFF;
-
-	/* test all downspeeds */
-	for (sbit = 0; sbit < 2; sbit++) {
-		for (part = 0; part < 16; part++) {
-			tdown = 32 * (sbit + 2) * (1 << part);
-
-			if (abs(tdown - down) < difference) {
-				*gw_srv_class = (sbit << 7) + (part << 3);
-				difference = abs(tdown - down);
-				mdown = tdown;
-			}
-		}
-	}
-
-	/* test all upspeeds */
-	difference = 0x0FFFFFFF;
-
-	for (part = 0; part < 8; part++) {
-		tup = ((part + 1) * (mdown)) / 8;
-
-		if (abs(tup - up) < difference) {
-			*gw_srv_class = (*gw_srv_class & 0xF8) | part;
-			difference = abs(tup - up);
-		}
-	}
-}
-
-/* returns the up and downspeeds in kbit, calculated from the class */
-void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
-{
-	int sbit = (gw_srv_class & 0x80) >> 7;
-	int dpart = (gw_srv_class & 0x78) >> 3;
-	int upart = (gw_srv_class & 0x07);
-
-	if (!gw_srv_class) {
-		*down = 0;
-		*up = 0;
-		return;
-	}
-
-	*down = 32 * (sbit + 2) * (1 << dpart);
-	*up = ((upart + 1) * (*down)) / 8;
-}
-
+/**
+ * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
+ *  and upload bandwidth information
+ * @net_dev: the soft interface net device
+ * @buff: string buffer to parse
+ * @down: pointer holding the returned download bandwidth information
+ * @up: pointer holding the returned upload bandwidth information
+ *
+ * Returns false on parse error and true otherwise.
+ */
 static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
-				      int *up, int *down)
+				      uint32_t *down, uint32_t *up)
 {
-	int ret, multi = 1;
+	enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
 	char *slash_ptr, *tmp_ptr;
 	long ldown, lup;
+	int ret;
 
 	slash_ptr = strchr(buff, '/');
 	if (slash_ptr)
@@ -88,10 +47,10 @@
 		tmp_ptr = buff + strlen(buff) - 4;
 
 		if (strnicmp(tmp_ptr, "mbit", 4) == 0)
-			multi = 1024;
+			bw_unit_type = BATADV_BW_UNIT_MBIT;
 
 		if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
-		    (multi > 1))
+		    (bw_unit_type == BATADV_BW_UNIT_MBIT))
 			*tmp_ptr = '\0';
 	}
 
@@ -103,20 +62,28 @@
 		return false;
 	}
 
-	*down = ldown * multi;
+	switch (bw_unit_type) {
+	case BATADV_BW_UNIT_MBIT:
+		*down = ldown * 10;
+		break;
+	case BATADV_BW_UNIT_KBIT:
+	default:
+		*down = ldown / 100;
+		break;
+	}
 
 	/* we also got some upload info */
 	if (slash_ptr) {
-		multi = 1;
+		bw_unit_type = BATADV_BW_UNIT_KBIT;
 
 		if (strlen(slash_ptr + 1) > 4) {
 			tmp_ptr = slash_ptr + 1 - 4 + strlen(slash_ptr + 1);
 
 			if (strnicmp(tmp_ptr, "mbit", 4) == 0)
-				multi = 1024;
+				bw_unit_type = BATADV_BW_UNIT_MBIT;
 
 			if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
-			    (multi > 1))
+			    (bw_unit_type == BATADV_BW_UNIT_MBIT))
 				*tmp_ptr = '\0';
 		}
 
@@ -128,52 +95,149 @@
 			return false;
 		}
 
-		*up = lup * multi;
+		switch (bw_unit_type) {
+		case BATADV_BW_UNIT_MBIT:
+			*up = lup * 10;
+			break;
+		case BATADV_BW_UNIT_KBIT:
+		default:
+			*up = lup / 100;
+			break;
+		}
 	}
 
 	return true;
 }
 
+/**
+ * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
+ *  setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+	struct batadv_tvlv_gateway_data gw;
+	uint32_t down, up;
+	char gw_mode;
+
+	gw_mode = atomic_read(&bat_priv->gw_mode);
+
+	switch (gw_mode) {
+	case BATADV_GW_MODE_OFF:
+	case BATADV_GW_MODE_CLIENT:
+		batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+		break;
+	case BATADV_GW_MODE_SERVER:
+		down = atomic_read(&bat_priv->gw.bandwidth_down);
+		up = atomic_read(&bat_priv->gw.bandwidth_up);
+		gw.bandwidth_down = htonl(down);
+		gw.bandwidth_up = htonl(up);
+		batadv_tvlv_container_register(bat_priv, BATADV_TVLV_GW, 1,
+					       &gw, sizeof(gw));
+		break;
+	}
+}
+
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
 				size_t count)
 {
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	long gw_bandwidth_tmp = 0;
-	int up = 0, down = 0;
+	uint32_t down_curr, up_curr, down_new = 0, up_new = 0;
 	bool ret;
 
-	ret = batadv_parse_gw_bandwidth(net_dev, buff, &up, &down);
+	down_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_down);
+	up_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_up);
+
+	ret = batadv_parse_gw_bandwidth(net_dev, buff, &down_new, &up_new);
 	if (!ret)
 		goto end;
 
-	if ((!down) || (down < 256))
-		down = 2000;
+	if (!down_new)
+		down_new = 1;
 
-	if (!up)
-		up = down / 5;
+	if (!up_new)
+		up_new = down_new / 5;
 
-	batadv_kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
+	if (!up_new)
+		up_new = 1;
 
-	/* the gw bandwidth we guessed above might not match the given
-	 * speeds, hence we need to calculate it back to show the number
-	 * that is going to be propagated
-	 */
-	batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
-
-	if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
+	if ((down_curr == down_new) && (up_curr == up_new))
 		return count;
 
 	batadv_gw_deselect(bat_priv);
 	batadv_info(net_dev,
-		    "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
-		    atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
-		    (down > 2048 ? down / 1024 : down),
-		    (down > 2048 ? "MBit" : "KBit"),
-		    (up > 2048 ? up / 1024 : up),
-		    (up > 2048 ? "MBit" : "KBit"));
+		    "Changing gateway bandwidth from: '%u.%u/%u.%u MBit' to: '%u.%u/%u.%u MBit'\n",
+		    down_curr / 10, down_curr % 10, up_curr / 10, up_curr % 10,
+		    down_new / 10, down_new % 10, up_new / 10, up_new % 10);
 
-	atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp);
+	atomic_set(&bat_priv->gw.bandwidth_down, down_new);
+	atomic_set(&bat_priv->gw.bandwidth_up, up_new);
+	batadv_gw_tvlv_container_update(bat_priv);
 
 end:
 	return count;
 }
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig,
+					  uint8_t flags,
+					  void *tvlv_value,
+					  uint16_t tvlv_value_len)
+{
+	struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
+
+	/* only fetch the tvlv value if the handler wasn't called via the
+	 * CIFNOTFND flag and if there is data to fetch
+	 */
+	if ((flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) ||
+	    (tvlv_value_len < sizeof(gateway))) {
+		gateway.bandwidth_down = 0;
+		gateway.bandwidth_up = 0;
+	} else {
+		gateway_ptr = tvlv_value;
+		gateway.bandwidth_down = gateway_ptr->bandwidth_down;
+		gateway.bandwidth_up = gateway_ptr->bandwidth_up;
+		if ((gateway.bandwidth_down == 0) ||
+		    (gateway.bandwidth_up == 0)) {
+			gateway.bandwidth_down = 0;
+			gateway.bandwidth_up = 0;
+		}
+	}
+
+	batadv_gw_node_update(bat_priv, orig, &gateway);
+
+	/* restart gateway selection if fast or late switching was enabled */
+	if ((gateway.bandwidth_down != 0) &&
+	    (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
+	    (atomic_read(&bat_priv->gw_sel_class) > 2))
+		batadv_gw_check_election(bat_priv, orig);
+}
+
+/**
+ * batadv_gw_init - initialise the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_init(struct batadv_priv *bat_priv)
+{
+	batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
+				     NULL, BATADV_TVLV_GW, 1,
+				     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+}
+
+/**
+ * batadv_gw_free - free the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_free(struct batadv_priv *bat_priv)
+{
+	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_GW, 1);
+}
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 509b2bf..56384a4c 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -26,12 +26,24 @@
 	BATADV_GW_MODE_SERVER,
 };
 
+/**
+ * enum batadv_bandwidth_units - bandwidth unit types
+ * @BATADV_BW_UNIT_KBIT: unit type kbit
+ * @BATADV_BW_UNIT_MBIT: unit type mbit
+ */
+enum batadv_bandwidth_units {
+	BATADV_BW_UNIT_KBIT,
+	BATADV_BW_UNIT_MBIT,
+};
+
 #define BATADV_GW_MODE_OFF_NAME	"off"
 #define BATADV_GW_MODE_CLIENT_NAME	"client"
 #define BATADV_GW_MODE_SERVER_NAME	"server"
 
-void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
 				size_t count);
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv);
+void batadv_gw_init(struct batadv_priv *bat_priv);
+void batadv_gw_free(struct batadv_priv *bat_priv);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index c478e6b..57c2a19 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
 #include "originator.h"
 #include "hash.h"
 #include "bridge_loop_avoidance.h"
+#include "gateway_client.h"
 
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
@@ -124,8 +125,11 @@
  *
  * Returns true if the net device is a 802.11 wireless device, false otherwise.
  */
-static bool batadv_is_wifi_netdev(struct net_device *net_device)
+bool batadv_is_wifi_netdev(struct net_device *net_device)
 {
+	if (!net_device)
+		return false;
+
 #ifdef CONFIG_WIRELESS_EXT
 	/* pre-cfg80211 drivers have to implement WEXT, so it is possible to
 	 * check for wireless_handlers != NULL
@@ -141,34 +145,6 @@
 	return false;
 }
 
-/**
- * batadv_is_wifi_iface - check if the given interface represented by ifindex
- *  is a wifi interface
- * @ifindex: interface index to check
- *
- * Returns true if the interface represented by ifindex is a 802.11 wireless
- * device, false otherwise.
- */
-bool batadv_is_wifi_iface(int ifindex)
-{
-	struct net_device *net_device = NULL;
-	bool ret = false;
-
-	if (ifindex == BATADV_NULL_IFINDEX)
-		goto out;
-
-	net_device = dev_get_by_index(&init_net, ifindex);
-	if (!net_device)
-		goto out;
-
-	ret = batadv_is_wifi_netdev(net_device);
-
-out:
-	if (net_device)
-		dev_put(net_device);
-	return ret;
-}
-
 static struct batadv_hard_iface *
 batadv_hardif_get_active(const struct net_device *soft_iface)
 {
@@ -194,22 +170,13 @@
 static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
 					  struct batadv_hard_iface *oldif)
 {
-	struct batadv_vis_packet *vis_packet;
 	struct batadv_hard_iface *primary_if;
-	struct sk_buff *skb;
 
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (!primary_if)
 		goto out;
 
 	batadv_dat_init_own_addr(bat_priv, primary_if);
-
-	skb = bat_priv->vis.my_info->skb_packet;
-	vis_packet = (struct batadv_vis_packet *)skb->data;
-	memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(vis_packet->sender_orig,
-	       primary_if->net_dev->dev_addr, ETH_ALEN);
-
 	batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
 out:
 	if (primary_if)
@@ -275,16 +242,10 @@
 
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
-	const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	const struct batadv_hard_iface *hard_iface;
-	/* allow big frames if all devices are capable to do so
-	 * (have MTU > 1500 + BAT_HEADER_LEN)
-	 */
 	int min_mtu = ETH_DATA_LEN;
 
-	if (atomic_read(&bat_priv->fragmentation))
-		goto out;
-
 	rcu_read_lock();
 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
 		if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
@@ -294,23 +255,40 @@
 		if (hard_iface->soft_iface != soft_iface)
 			continue;
 
-		min_mtu = min_t(int,
-				hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
-				min_mtu);
+		min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu);
 	}
 	rcu_read_unlock();
+
+	atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+	if (atomic_read(&bat_priv->fragmentation) == 0)
+		goto out;
+
+	/* with fragmentation enabled the maximum size of internally generated
+	 * packets such as translation table exchanges or tvlv containers, etc
+	 * has to be calculated
+	 */
+	min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+	min_mtu -= sizeof(struct batadv_frag_packet);
+	min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
+	atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+	/* with fragmentation enabled we can fragment external packets easily */
+	min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
+
 out:
-	return min_mtu;
+	return min_mtu - batadv_max_header_len();
 }
 
 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
 void batadv_update_min_mtu(struct net_device *soft_iface)
 {
-	int min_mtu;
+	soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
 
-	min_mtu = batadv_hardif_min_mtu(soft_iface);
-	if (soft_iface->mtu != min_mtu)
-		soft_iface->mtu = min_mtu;
+	/* Check if the local translate table should be cleaned up to match a
+	 * new (and smaller) MTU.
+	 */
+	batadv_tt_local_resize_to_mtu(soft_iface);
 }
 
 static void
@@ -388,7 +366,8 @@
 {
 	struct batadv_priv *bat_priv;
 	struct net_device *soft_iface, *master;
-	__be16 ethertype = __constant_htons(ETH_P_BATMAN);
+	__be16 ethertype = htons(ETH_P_BATMAN);
+	int max_header_len = batadv_max_header_len();
 	int ret;
 
 	if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
@@ -453,23 +432,22 @@
 	hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
 	dev_add_pack(&hard_iface->batman_adv_ptype);
 
-	atomic_set(&hard_iface->frag_seqno, 1);
 	batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
 		    hard_iface->net_dev->name);
 
 	if (atomic_read(&bat_priv->fragmentation) &&
-	    hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+	    hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
 		batadv_info(hard_iface->soft_iface,
-			    "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
+			    "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n",
 			    hard_iface->net_dev->name, hard_iface->net_dev->mtu,
-			    ETH_DATA_LEN + BATADV_HEADER_LEN);
+			    ETH_DATA_LEN + max_header_len);
 
 	if (!atomic_read(&bat_priv->fragmentation) &&
-	    hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+	    hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
 		batadv_info(hard_iface->soft_iface,
-			    "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
+			    "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n",
 			    hard_iface->net_dev->name, hard_iface->net_dev->mtu,
-			    ETH_DATA_LEN + BATADV_HEADER_LEN);
+			    ETH_DATA_LEN + max_header_len);
 
 	if (batadv_hardif_is_iface_up(hard_iface))
 		batadv_hardif_activate_interface(hard_iface);
@@ -533,8 +511,12 @@
 	dev_put(hard_iface->soft_iface);
 
 	/* nobody uses this interface anymore */
-	if (!bat_priv->num_ifaces && autodel == BATADV_IF_CLEANUP_AUTO)
-		batadv_softif_destroy_sysfs(hard_iface->soft_iface);
+	if (!bat_priv->num_ifaces) {
+		batadv_gw_check_client_stop(bat_priv);
+
+		if (autodel == BATADV_IF_CLEANUP_AUTO)
+			batadv_softif_destroy_sysfs(hard_iface->soft_iface);
+	}
 
 	netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
 	hard_iface->soft_iface = NULL;
@@ -652,6 +634,8 @@
 
 	if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
 		batadv_sysfs_add_meshif(net_dev);
+		bat_priv = netdev_priv(net_dev);
+		batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
 		return NOTIFY_DONE;
 	}
 
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 4989288..df4c8bd 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -41,6 +41,7 @@
 
 extern struct notifier_block batadv_hard_if_notifier;
 
+bool batadv_is_wifi_netdev(struct net_device *net_device);
 struct batadv_hard_iface*
 batadv_hardif_get_by_netdev(const struct net_device *net_dev);
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
@@ -51,7 +52,6 @@
 int batadv_hardif_min_mtu(struct net_device *soft_iface);
 void batadv_update_min_mtu(struct net_device *soft_iface);
 void batadv_hardif_free_rcu(struct rcu_head *rcu);
-bool batadv_is_wifi_iface(int ifindex);
 
 static inline void
 batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 5a99bb4..29ae4ef 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -29,7 +29,7 @@
 static struct batadv_socket_client *batadv_socket_client_hash[256];
 
 static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
-				     struct batadv_icmp_packet_rr *icmp_packet,
+				     struct batadv_icmp_header *icmph,
 				     size_t icmp_len);
 
 void batadv_socket_init(void)
@@ -155,13 +155,13 @@
 	struct batadv_priv *bat_priv = socket_client->bat_priv;
 	struct batadv_hard_iface *primary_if = NULL;
 	struct sk_buff *skb;
-	struct batadv_icmp_packet_rr *icmp_packet;
-
+	struct batadv_icmp_packet_rr *icmp_packet_rr;
+	struct batadv_icmp_header *icmp_header;
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_neigh_node *neigh_node = NULL;
 	size_t packet_len = sizeof(struct batadv_icmp_packet);
 
-	if (len < sizeof(struct batadv_icmp_packet)) {
+	if (len < sizeof(struct batadv_icmp_header)) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "Error - can't send packet from char device: invalid packet size\n");
 		return -EINVAL;
@@ -174,8 +174,10 @@
 		goto out;
 	}
 
-	if (len >= sizeof(struct batadv_icmp_packet_rr))
-		packet_len = sizeof(struct batadv_icmp_packet_rr);
+	if (len >= BATADV_ICMP_MAX_PACKET_SIZE)
+		packet_len = BATADV_ICMP_MAX_PACKET_SIZE;
+	else
+		packet_len = len;
 
 	skb = netdev_alloc_skb_ip_align(NULL, packet_len + ETH_HLEN);
 	if (!skb) {
@@ -185,67 +187,78 @@
 
 	skb->priority = TC_PRIO_CONTROL;
 	skb_reserve(skb, ETH_HLEN);
-	icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
+	icmp_header = (struct batadv_icmp_header *)skb_put(skb, packet_len);
 
-	if (copy_from_user(icmp_packet, buff, packet_len)) {
+	if (copy_from_user(icmp_header, buff, packet_len)) {
 		len = -EFAULT;
 		goto free_skb;
 	}
 
-	if (icmp_packet->header.packet_type != BATADV_ICMP) {
+	if (icmp_header->header.packet_type != BATADV_ICMP) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
 		len = -EINVAL;
 		goto free_skb;
 	}
 
-	if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+	switch (icmp_header->msg_type) {
+	case BATADV_ECHO_REQUEST:
+		if (len < sizeof(struct batadv_icmp_packet)) {
+			batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+				   "Error - can't send packet from char device: invalid packet size\n");
+			len = -EINVAL;
+			goto free_skb;
+		}
+
+		if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+			goto dst_unreach;
+
+		orig_node = batadv_orig_hash_find(bat_priv, icmp_header->dst);
+		if (!orig_node)
+			goto dst_unreach;
+
+		neigh_node = batadv_orig_node_get_router(orig_node);
+		if (!neigh_node)
+			goto dst_unreach;
+
+		if (!neigh_node->if_incoming)
+			goto dst_unreach;
+
+		if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
+			goto dst_unreach;
+
+		icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmp_header;
+		if (packet_len == sizeof(*icmp_packet_rr))
+			memcpy(icmp_packet_rr->rr,
+			       neigh_node->if_incoming->net_dev->dev_addr,
+			       ETH_ALEN);
+
+		break;
+	default:
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-			   "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
+			   "Error - can't send packet from char device: got unknown message type\n");
 		len = -EINVAL;
 		goto free_skb;
 	}
 
-	icmp_packet->uid = socket_client->index;
+	icmp_header->uid = socket_client->index;
 
-	if (icmp_packet->header.version != BATADV_COMPAT_VERSION) {
-		icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM;
-		icmp_packet->header.version = BATADV_COMPAT_VERSION;
-		batadv_socket_add_packet(socket_client, icmp_packet,
+	if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+		icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
+		icmp_header->header.version = BATADV_COMPAT_VERSION;
+		batadv_socket_add_packet(socket_client, icmp_header,
 					 packet_len);
 		goto free_skb;
 	}
 
-	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
-		goto dst_unreach;
-
-	orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
-	if (!orig_node)
-		goto dst_unreach;
-
-	neigh_node = batadv_orig_node_get_router(orig_node);
-	if (!neigh_node)
-		goto dst_unreach;
-
-	if (!neigh_node->if_incoming)
-		goto dst_unreach;
-
-	if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
-		goto dst_unreach;
-
-	memcpy(icmp_packet->orig,
-	       primary_if->net_dev->dev_addr, ETH_ALEN);
-
-	if (packet_len == sizeof(struct batadv_icmp_packet_rr))
-		memcpy(icmp_packet->rr,
-		       neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
+	memcpy(icmp_header->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
 
 	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
 	goto out;
 
 dst_unreach:
-	icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE;
-	batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
+	icmp_header->msg_type = BATADV_DESTINATION_UNREACHABLE;
+	batadv_socket_add_packet(socket_client, icmp_header, packet_len);
 free_skb:
 	kfree_skb(skb);
 out:
@@ -298,27 +311,40 @@
 	return -ENOMEM;
 }
 
+/**
+ * batadv_socket_receive_packet - schedule an icmp packet to be sent to userspace
+ *  on an icmp socket.
+ * @socket_client: the socket this packet belongs to
+ * @icmph: pointer to the header of the icmp packet
+ * @icmp_len: total length of the icmp packet
+ */
 static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
-				     struct batadv_icmp_packet_rr *icmp_packet,
+				     struct batadv_icmp_header *icmph,
 				     size_t icmp_len)
 {
 	struct batadv_socket_packet *socket_packet;
+	size_t len;
 
 	socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
 
 	if (!socket_packet)
 		return;
 
+	len = icmp_len;
+	/* check the maximum length before filling the buffer */
+	if (len > sizeof(socket_packet->icmp_packet))
+		len = sizeof(socket_packet->icmp_packet);
+
 	INIT_LIST_HEAD(&socket_packet->list);
-	memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
-	socket_packet->icmp_len = icmp_len;
+	memcpy(&socket_packet->icmp_packet, icmph, len);
+	socket_packet->icmp_len = len;
 
 	spin_lock_bh(&socket_client->lock);
 
 	/* while waiting for the lock the socket_client could have been
 	 * deleted
 	 */
-	if (!batadv_socket_client_hash[icmp_packet->uid]) {
+	if (!batadv_socket_client_hash[icmph->uid]) {
 		spin_unlock_bh(&socket_client->lock);
 		kfree(socket_packet);
 		return;
@@ -342,12 +368,18 @@
 	wake_up(&socket_client->queue_wait);
 }
 
-void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+/**
+ * batadv_socket_receive_packet - schedule an icmp packet to be received
+ *  locally and sent to userspace.
+ * @icmph: pointer to the header of the icmp packet
+ * @icmp_len: total length of the icmp packet
+ */
+void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
 				  size_t icmp_len)
 {
 	struct batadv_socket_client *hash;
 
-	hash = batadv_socket_client_hash[icmp_packet->uid];
+	hash = batadv_socket_client_hash[icmph->uid];
 	if (hash)
-		batadv_socket_add_packet(hash, icmp_packet, icmp_len);
+		batadv_socket_add_packet(hash, icmph, icmp_len);
 }
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 1fcca37..6665080 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -24,7 +24,7 @@
 
 void batadv_socket_init(void);
 int batadv_socket_setup(struct batadv_priv *bat_priv);
-void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
 				  size_t icmp_len);
 
 #endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 1356af6..c51a5e5 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -36,10 +36,11 @@
 #include "gateway_client.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
-#include "vis.h"
+#include "gateway_common.h"
 #include "hash.h"
 #include "bat_algo.h"
 #include "network-coding.h"
+#include "fragmentation.h"
 
 
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
@@ -109,9 +110,11 @@
 	spin_lock_init(&bat_priv->tt.req_list_lock);
 	spin_lock_init(&bat_priv->tt.roam_list_lock);
 	spin_lock_init(&bat_priv->tt.last_changeset_lock);
+	spin_lock_init(&bat_priv->tt.commit_lock);
 	spin_lock_init(&bat_priv->gw.list_lock);
-	spin_lock_init(&bat_priv->vis.hash_lock);
-	spin_lock_init(&bat_priv->vis.list_lock);
+	spin_lock_init(&bat_priv->tvlv.container_list_lock);
+	spin_lock_init(&bat_priv->tvlv.handler_list_lock);
+	spin_lock_init(&bat_priv->softif_vlan_list_lock);
 
 	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
 	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
@@ -119,6 +122,9 @@
 	INIT_LIST_HEAD(&bat_priv->tt.changes_list);
 	INIT_LIST_HEAD(&bat_priv->tt.req_list);
 	INIT_LIST_HEAD(&bat_priv->tt.roam_list);
+	INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
+	INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
+	INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
 
 	ret = batadv_originator_init(bat_priv);
 	if (ret < 0)
@@ -128,13 +134,6 @@
 	if (ret < 0)
 		goto err;
 
-	batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
-			    BATADV_NULL_IFINDEX);
-
-	ret = batadv_vis_init(bat_priv);
-	if (ret < 0)
-		goto err;
-
 	ret = batadv_bla_init(bat_priv);
 	if (ret < 0)
 		goto err;
@@ -147,6 +146,8 @@
 	if (ret < 0)
 		goto err;
 
+	batadv_gw_init(bat_priv);
+
 	atomic_set(&bat_priv->gw.reselect, 0);
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
 
@@ -165,8 +166,6 @@
 
 	batadv_purge_outstanding_packets(bat_priv, NULL);
 
-	batadv_vis_quit(bat_priv);
-
 	batadv_gw_node_purge(bat_priv);
 	batadv_nc_mesh_free(bat_priv);
 	batadv_dat_free(bat_priv);
@@ -185,6 +184,8 @@
 	 */
 	batadv_originator_free(bat_priv);
 
+	batadv_gw_free(bat_priv);
+
 	free_percpu(bat_priv->bat_counters);
 	bat_priv->bat_counters = NULL;
 
@@ -255,6 +256,31 @@
 }
 
 /**
+ * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ *  payload packet
+ *
+ * Return the maximum encapsulation overhead in bytes.
+ */
+int batadv_max_header_len(void)
+{
+	int header_len = 0;
+
+	header_len = max_t(int, header_len,
+			   sizeof(struct batadv_unicast_packet));
+	header_len = max_t(int, header_len,
+			   sizeof(struct batadv_unicast_4addr_packet));
+	header_len = max_t(int, header_len,
+			   sizeof(struct batadv_bcast_packet));
+
+#ifdef CONFIG_BATMAN_ADV_NC
+	header_len = max_t(int, header_len,
+			   sizeof(struct batadv_coded_packet));
+#endif
+
+	return header_len;
+}
+
+/**
  * batadv_skb_set_priority - sets skb priority according to packet content
  * @skb: the packet to be sent
  * @offset: offset to the packet content
@@ -392,22 +418,31 @@
 	for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
 		batadv_rx_handler[i] = batadv_recv_unhandled_packet;
 
-	/* batman icmp packet */
-	batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+	for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
+		batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
+
+	/* compile time checks for struct member offsets */
+	BUILD_BUG_ON(offsetof(struct batadv_unicast_4addr_packet, src) != 10);
+	BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
+	BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
+	BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
+	BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
+	BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+
+	/* broadcast packet */
+	batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
+
+	/* unicast packets ... */
 	/* unicast with 4 addresses packet */
 	batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
 	/* unicast packet */
 	batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
-	/* fragmented unicast packet */
-	batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
-	/* broadcast packet */
-	batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
-	/* vis packet */
-	batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
-	/* Translation table query (request or response) */
-	batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
-	/* Roaming advertisement */
-	batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
+	/* unicast tvlv packet */
+	batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
+	/* batman icmp packet */
+	batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+	/* Fragmented packets */
+	batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
 }
 
 int
@@ -415,7 +450,12 @@
 			     int (*recv_handler)(struct sk_buff *,
 						 struct batadv_hard_iface *))
 {
-	if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
+	int (*curr)(struct sk_buff *,
+		    struct batadv_hard_iface *);
+	curr = batadv_rx_handler[packet_type];
+
+	if ((curr != batadv_recv_unhandled_packet) &&
+	    (curr != batadv_recv_unhandled_unicast_packet))
 		return -EBUSY;
 
 	batadv_rx_handler[packet_type] = recv_handler;
@@ -461,7 +501,9 @@
 	    !bat_algo_ops->bat_iface_update_mac ||
 	    !bat_algo_ops->bat_primary_iface_set ||
 	    !bat_algo_ops->bat_ogm_schedule ||
-	    !bat_algo_ops->bat_ogm_emit) {
+	    !bat_algo_ops->bat_ogm_emit ||
+	    !bat_algo_ops->bat_neigh_cmp ||
+	    !bat_algo_ops->bat_neigh_is_equiv_or_better) {
 		pr_info("Routing algo '%s' does not implement required ops\n",
 			bat_algo_ops->name);
 		ret = -EINVAL;
@@ -536,6 +578,601 @@
 	return htonl(crc);
 }
 
+/**
+ * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
+ *  possibly free it
+ * @tvlv_handler: the tvlv handler to free
+ */
+static void
+batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
+{
+	if (atomic_dec_and_test(&tvlv_handler->refcount))
+		kfree_rcu(tvlv_handler, rcu);
+}
+
+/**
+ * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ *  based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to look for
+ * @version: tvlv handler version to look for
+ *
+ * Returns tvlv handler if found or NULL otherwise.
+ */
+static struct batadv_tvlv_handler
+*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
+			 uint8_t type, uint8_t version)
+{
+	struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(tvlv_handler_tmp,
+				 &bat_priv->tvlv.handler_list, list) {
+		if (tvlv_handler_tmp->type != type)
+			continue;
+
+		if (tvlv_handler_tmp->version != version)
+			continue;
+
+		if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
+			continue;
+
+		tvlv_handler = tvlv_handler_tmp;
+		break;
+	}
+	rcu_read_unlock();
+
+	return tvlv_handler;
+}
+
+/**
+ * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
+ *  possibly free it
+ * @tvlv_handler: the tvlv container to free
+ */
+static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
+{
+	if (atomic_dec_and_test(&tvlv->refcount))
+		kfree(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ *  list based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to look for
+ * @version: tvlv container version to look for
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Returns tvlv container if found or NULL otherwise.
+ */
+static struct batadv_tvlv_container
+*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
+			   uint8_t type, uint8_t version)
+{
+	struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
+
+	hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
+		if (tvlv_tmp->tvlv_hdr.type != type)
+			continue;
+
+		if (tvlv_tmp->tvlv_hdr.version != version)
+			continue;
+
+		if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
+			continue;
+
+		tvlv = tvlv_tmp;
+		break;
+	}
+
+	return tvlv;
+}
+
+/**
+ * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ *  list entries
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Returns size of all currently registered tvlv containers in bytes.
+ */
+static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
+{
+	struct batadv_tvlv_container *tvlv;
+	uint16_t tvlv_len = 0;
+
+	hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+		tvlv_len += sizeof(struct batadv_tvlv_hdr);
+		tvlv_len += ntohs(tvlv->tvlv_hdr.len);
+	}
+
+	return tvlv_len;
+}
+
+/**
+ * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
+ *  list
+ * @tvlv: the to be removed tvlv container
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ */
+static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
+{
+	if (!tvlv)
+		return;
+
+	hlist_del(&tvlv->list);
+
+	/* first call to decrement the counter, second call to free */
+	batadv_tvlv_container_free_ref(tvlv);
+	batadv_tvlv_container_free_ref(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ *  provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to unregister
+ * @version: tvlv container type to unregister
+ */
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+				      uint8_t type, uint8_t version)
+{
+	struct batadv_tvlv_container *tvlv;
+
+	spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+	tvlv = batadv_tvlv_container_get(bat_priv, type, version);
+	batadv_tvlv_container_remove(tvlv);
+	spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_container_register - register tvlv type, version and content
+ *  to be propagated with each (primary interface) OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type
+ * @version: tvlv container version
+ * @tvlv_value: tvlv container content
+ * @tvlv_value_len: tvlv container content length
+ *
+ * If a container of the same type and version was already registered the new
+ * content is going to replace the old one.
+ */
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+				    uint8_t type, uint8_t version,
+				    void *tvlv_value, uint16_t tvlv_value_len)
+{
+	struct batadv_tvlv_container *tvlv_old, *tvlv_new;
+
+	if (!tvlv_value)
+		tvlv_value_len = 0;
+
+	tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
+	if (!tvlv_new)
+		return;
+
+	tvlv_new->tvlv_hdr.version = version;
+	tvlv_new->tvlv_hdr.type = type;
+	tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
+
+	memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
+	INIT_HLIST_NODE(&tvlv_new->list);
+	atomic_set(&tvlv_new->refcount, 1);
+
+	spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+	tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
+	batadv_tvlv_container_remove(tvlv_old);
+	hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
+	spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
+ *  requested packet size
+ * @packet_buff: packet buffer
+ * @packet_buff_len: packet buffer size
+ * @packet_min_len: requested packet minimum size
+ * @additional_packet_len: requested additional packet size on top of minimum
+ *  size
+ *
+ * Returns true of the packet buffer could be changed to the requested size,
+ * false otherwise.
+ */
+static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
+					    int *packet_buff_len,
+					    int min_packet_len,
+					    int additional_packet_len)
+{
+	unsigned char *new_buff;
+
+	new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
+
+	/* keep old buffer if kmalloc should fail */
+	if (new_buff) {
+		memcpy(new_buff, *packet_buff, min_packet_len);
+		kfree(*packet_buff);
+		*packet_buff = new_buff;
+		*packet_buff_len = min_packet_len + additional_packet_len;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ *  OGM packet buffer
+ * @bat_priv: the bat priv with all the soft interface information
+ * @packet_buff: ogm packet buffer
+ * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
+ *  content
+ * @packet_min_len: ogm header size to be preserved for the OGM itself
+ *
+ * The ogm packet might be enlarged or shrunk depending on the current size
+ * and the size of the to-be-appended tvlv containers.
+ *
+ * Returns size of all appended tvlv containers in bytes.
+ */
+uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+					  unsigned char **packet_buff,
+					  int *packet_buff_len,
+					  int packet_min_len)
+{
+	struct batadv_tvlv_container *tvlv;
+	struct batadv_tvlv_hdr *tvlv_hdr;
+	uint16_t tvlv_value_len;
+	void *tvlv_value;
+	bool ret;
+
+	spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+	tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
+
+	ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
+					      packet_min_len, tvlv_value_len);
+
+	if (!ret)
+		goto end;
+
+	if (!tvlv_value_len)
+		goto end;
+
+	tvlv_value = (*packet_buff) + packet_min_len;
+
+	hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+		tvlv_hdr = tvlv_value;
+		tvlv_hdr->type = tvlv->tvlv_hdr.type;
+		tvlv_hdr->version = tvlv->tvlv_hdr.version;
+		tvlv_hdr->len = tvlv->tvlv_hdr.len;
+		tvlv_value = tvlv_hdr + 1;
+		memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
+		tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
+	}
+
+end:
+	spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+	return tvlv_value_len;
+}
+
+/**
+ * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ *  appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tvlv_handler: tvlv callback function handling the tvlv content
+ * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
+ * @orig_node: orig node emitting the ogm packet
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Returns success if handler was not found or the return value of the handler
+ * callback.
+ */
+static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
+				    struct batadv_tvlv_handler *tvlv_handler,
+				    bool ogm_source,
+				    struct batadv_orig_node *orig_node,
+				    uint8_t *src, uint8_t *dst,
+				    void *tvlv_value, uint16_t tvlv_value_len)
+{
+	if (!tvlv_handler)
+		return NET_RX_SUCCESS;
+
+	if (ogm_source) {
+		if (!tvlv_handler->ogm_handler)
+			return NET_RX_SUCCESS;
+
+		if (!orig_node)
+			return NET_RX_SUCCESS;
+
+		tvlv_handler->ogm_handler(bat_priv, orig_node,
+					  BATADV_NO_FLAGS,
+					  tvlv_value, tvlv_value_len);
+		tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
+	} else {
+		if (!src)
+			return NET_RX_SUCCESS;
+
+		if (!dst)
+			return NET_RX_SUCCESS;
+
+		if (!tvlv_handler->unicast_handler)
+			return NET_RX_SUCCESS;
+
+		return tvlv_handler->unicast_handler(bat_priv, src,
+						     dst, tvlv_value,
+						     tvlv_value_len);
+	}
+
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ *  appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
+ * @orig_node: orig node emitting the ogm packet
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Returns success when processing an OGM or the return value of all called
+ * handler callbacks.
+ */
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+				   bool ogm_source,
+				   struct batadv_orig_node *orig_node,
+				   uint8_t *src, uint8_t *dst,
+				   void *tvlv_value, uint16_t tvlv_value_len)
+{
+	struct batadv_tvlv_handler *tvlv_handler;
+	struct batadv_tvlv_hdr *tvlv_hdr;
+	uint16_t tvlv_value_cont_len;
+	uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
+	int ret = NET_RX_SUCCESS;
+
+	while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
+		tvlv_hdr = tvlv_value;
+		tvlv_value_cont_len = ntohs(tvlv_hdr->len);
+		tvlv_value = tvlv_hdr + 1;
+		tvlv_value_len -= sizeof(*tvlv_hdr);
+
+		if (tvlv_value_cont_len > tvlv_value_len)
+			break;
+
+		tvlv_handler = batadv_tvlv_handler_get(bat_priv,
+						       tvlv_hdr->type,
+						       tvlv_hdr->version);
+
+		ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
+						ogm_source, orig_node,
+						src, dst, tvlv_value,
+						tvlv_value_cont_len);
+		if (tvlv_handler)
+			batadv_tvlv_handler_free_ref(tvlv_handler);
+		tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
+		tvlv_value_len -= tvlv_value_cont_len;
+	}
+
+	if (!ogm_source)
+		return ret;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(tvlv_handler,
+				 &bat_priv->tvlv.handler_list, list) {
+		if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
+		    !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
+			tvlv_handler->ogm_handler(bat_priv, orig_node,
+						  cifnotfound, NULL, 0);
+
+		tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
+	}
+	rcu_read_unlock();
+
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ *  handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @batadv_ogm_packet: ogm packet containing the tvlv containers
+ * @orig_node: orig node emitting the ogm packet
+ */
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+			     struct batadv_ogm_packet *batadv_ogm_packet,
+			     struct batadv_orig_node *orig_node)
+{
+	void *tvlv_value;
+	uint16_t tvlv_value_len;
+
+	if (!batadv_ogm_packet)
+		return;
+
+	tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
+	if (!tvlv_value_len)
+		return;
+
+	tvlv_value = batadv_ogm_packet + 1;
+
+	batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
+				       tvlv_value, tvlv_value_len);
+}
+
+/**
+ * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ *  type and version (both need to match) for ogm tvlv payload and/or unicast
+ *  payload
+ * @bat_priv: the bat priv with all the soft interface information
+ * @optr: ogm tvlv handler callback function. This function receives the orig
+ *  node, flags and the tvlv content as argument to process.
+ * @uptr: unicast tvlv handler callback function. This function receives the
+ *  source & destination of the unicast packet as well as the tvlv content
+ *  to process.
+ * @type: tvlv handler type to be registered
+ * @version: tvlv handler version to be registered
+ * @flags: flags to enable or disable TVLV API behavior
+ */
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+				  void (*optr)(struct batadv_priv *bat_priv,
+					       struct batadv_orig_node *orig,
+					       uint8_t flags,
+					       void *tvlv_value,
+					       uint16_t tvlv_value_len),
+				  int (*uptr)(struct batadv_priv *bat_priv,
+					      uint8_t *src, uint8_t *dst,
+					      void *tvlv_value,
+					      uint16_t tvlv_value_len),
+				  uint8_t type, uint8_t version, uint8_t flags)
+{
+	struct batadv_tvlv_handler *tvlv_handler;
+
+	tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+	if (tvlv_handler) {
+		batadv_tvlv_handler_free_ref(tvlv_handler);
+		return;
+	}
+
+	tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
+	if (!tvlv_handler)
+		return;
+
+	tvlv_handler->ogm_handler = optr;
+	tvlv_handler->unicast_handler = uptr;
+	tvlv_handler->type = type;
+	tvlv_handler->version = version;
+	tvlv_handler->flags = flags;
+	atomic_set(&tvlv_handler->refcount, 1);
+	INIT_HLIST_NODE(&tvlv_handler->list);
+
+	spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+	hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
+	spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+}
+
+/**
+ * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ *  provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to be unregistered
+ * @version: tvlv handler version to be unregistered
+ */
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+				    uint8_t type, uint8_t version)
+{
+	struct batadv_tvlv_handler *tvlv_handler;
+
+	tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+	if (!tvlv_handler)
+		return;
+
+	batadv_tvlv_handler_free_ref(tvlv_handler);
+	spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+	hlist_del_rcu(&tvlv_handler->list);
+	spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+	batadv_tvlv_handler_free_ref(tvlv_handler);
+}
+
+/**
+ * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ *  specified host
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @type: tvlv type
+ * @version: tvlv version
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ */
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
+			      uint8_t *dst, uint8_t type, uint8_t version,
+			      void *tvlv_value, uint16_t tvlv_value_len)
+{
+	struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+	struct batadv_tvlv_hdr *tvlv_hdr;
+	struct batadv_orig_node *orig_node;
+	struct sk_buff *skb = NULL;
+	unsigned char *tvlv_buff;
+	unsigned int tvlv_len;
+	ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
+	bool ret = false;
+
+	orig_node = batadv_orig_hash_find(bat_priv, dst);
+	if (!orig_node)
+		goto out;
+
+	tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
+
+	skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
+	if (!skb)
+		goto out;
+
+	skb->priority = TC_PRIO_CONTROL;
+	skb_reserve(skb, ETH_HLEN);
+	tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
+	unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
+	unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
+	unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
+	unicast_tvlv_packet->header.ttl = BATADV_TTL;
+	unicast_tvlv_packet->reserved = 0;
+	unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
+	unicast_tvlv_packet->align = 0;
+	memcpy(unicast_tvlv_packet->src, src, ETH_ALEN);
+	memcpy(unicast_tvlv_packet->dst, dst, ETH_ALEN);
+
+	tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
+	tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
+	tvlv_hdr->version = version;
+	tvlv_hdr->type = type;
+	tvlv_hdr->len = htons(tvlv_value_len);
+	tvlv_buff += sizeof(*tvlv_hdr);
+	memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
+
+	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+		ret = true;
+
+out:
+	if (skb && !ret)
+		kfree_skb(skb);
+	if (orig_node)
+		batadv_orig_node_free_ref(orig_node);
+}
+
+/**
+ * batadv_get_vid - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet
+ * @header_len: length of the batman header preceding the ethernet header
+ *
+ * If the packet embedded in the skb is vlan tagged this function returns the
+ * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ */
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
+{
+	struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
+	struct vlan_ethhdr *vhdr;
+	unsigned short vid;
+
+	if (ethhdr->h_proto != htons(ETH_P_8021Q))
+		return BATADV_NO_FLAGS;
+
+	if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
+		return BATADV_NO_FLAGS;
+
+	vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
+	vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+	vid |= BATADV_VLAN_HAS_TAG;
+
+	return vid;
+}
+
 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
 {
 	struct batadv_algo_ops *bat_algo_ops;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 2467552..f94f287b 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -20,13 +20,13 @@
 #ifndef _NET_BATMAN_ADV_MAIN_H_
 #define _NET_BATMAN_ADV_MAIN_H_
 
-#define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
-			     "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
+#define BATADV_DRIVER_AUTHOR "Marek Lindner <mareklindner@neomailbox.ch>, " \
+			     "Simon Wunderlich <sw@simonwunderlich.de>"
 #define BATADV_DRIVER_DESC   "B.A.T.M.A.N. advanced"
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2013.4.0"
+#define BATADV_SOURCE_VERSION "2013.5.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -86,7 +86,11 @@
 /* numbers of originator to contact for any PUT/GET DHT operation */
 #define BATADV_DAT_CANDIDATES_NUM 3
 
-#define BATADV_VIS_INTERVAL 5000	/* 5 seconds */
+/**
+ * BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
+ *  at most from the primary one in order to be still considered acceptable
+ */
+#define BATADV_TQ_SIMILARITY_THRESHOLD 50
 
 /* how much worse secondary interfaces may be to be considered as bonding
  * candidates
@@ -133,6 +137,15 @@
 
 #define BATADV_GW_THRESHOLD	50
 
+/* Number of fragment chains for each orig_node */
+#define BATADV_FRAG_BUFFER_COUNT 8
+/* Maximum number of fragments for one packet */
+#define BATADV_FRAG_MAX_FRAGMENTS 16
+/* Maxumim size of each fragment */
+#define BATADV_FRAG_MAX_FRAG_SIZE 1400
+/* Time to keep fragments while waiting for rest of the fragments */
+#define BATADV_FRAG_TIMEOUT 10000
+
 #define BATADV_DAT_CANDIDATE_NOT_FOUND	0
 #define BATADV_DAT_CANDIDATE_ORIG	1
 
@@ -160,15 +173,9 @@
 #include <net/rtnetlink.h>
 #include <linux/jiffies.h>
 #include <linux/seq_file.h>
-#include "types.h"
+#include <linux/if_vlan.h>
 
-/**
- * batadv_vlan_flags - flags for the four MSB of any vlan ID field
- * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
- */
-enum batadv_vlan_flags {
-	BATADV_VLAN_HAS_TAG	= BIT(15),
-};
+#include "types.h"
 
 #define BATADV_PRINT_VID(vid) (vid & BATADV_VLAN_HAS_TAG ? \
 			       (int)(vid & VLAN_VID_MASK) : -1)
@@ -184,6 +191,7 @@
 int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
+int batadv_max_header_len(void);
 void batadv_skb_set_priority(struct sk_buff *skb, int offset);
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 			   struct packet_type *ptype,
@@ -326,4 +334,40 @@
  */
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+				    uint8_t type, uint8_t version,
+				    void *tvlv_value, uint16_t tvlv_value_len);
+uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+					  unsigned char **packet_buff,
+					  int *packet_buff_len,
+					  int packet_min_len);
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+			     struct batadv_ogm_packet *batadv_ogm_packet,
+			     struct batadv_orig_node *orig_node);
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+				      uint8_t type, uint8_t version);
+
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+				  void (*optr)(struct batadv_priv *bat_priv,
+					       struct batadv_orig_node *orig,
+					       uint8_t flags,
+					       void *tvlv_value,
+					       uint16_t tvlv_value_len),
+				  int (*uptr)(struct batadv_priv *bat_priv,
+					      uint8_t *src, uint8_t *dst,
+					      void *tvlv_value,
+					      uint16_t tvlv_value_len),
+				  uint8_t type, uint8_t version, uint8_t flags);
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+				    uint8_t type, uint8_t version);
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+				   bool ogm_source,
+				   struct batadv_orig_node *orig_node,
+				   uint8_t *src, uint8_t *dst,
+				   void *tvlv_buff, uint16_t tvlv_buff_len);
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
+			      uint8_t *dst, uint8_t type, uint8_t version,
+			      void *tvlv_value, uint16_t tvlv_value_len);
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
+
 #endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 4ecc0b6..351e199 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -59,6 +59,59 @@
 }
 
 /**
+ * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ *  after network coding setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+	char nc_mode;
+
+	nc_mode = atomic_read(&bat_priv->network_coding);
+
+	switch (nc_mode) {
+	case 0:
+		batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+		break;
+	case 1:
+		batadv_tvlv_container_register(bat_priv, BATADV_TVLV_NC, 1,
+					       NULL, 0);
+		break;
+	}
+}
+
+/**
+ * batadv_nc_status_update - update the network coding tvlv container after
+ *  network coding setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_nc_status_update(struct net_device *net_dev)
+{
+	struct batadv_priv *bat_priv = netdev_priv(net_dev);
+	batadv_nc_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig,
+					  uint8_t flags,
+					  void *tvlv_value,
+					  uint16_t tvlv_value_len)
+{
+	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+	else
+		orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+}
+
+/**
  * batadv_nc_mesh_init - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -87,6 +140,10 @@
 	INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
 	batadv_nc_start_timer(bat_priv);
 
+	batadv_tvlv_handler_register(bat_priv, batadv_nc_tvlv_ogm_handler_v1,
+				     NULL, BATADV_TVLV_NC, 1,
+				     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+	batadv_nc_tvlv_container_update(bat_priv);
 	return 0;
 
 err:
@@ -802,6 +859,10 @@
 	if (!atomic_read(&bat_priv->network_coding))
 		goto out;
 
+	/* check if orig node is network coding enabled */
+	if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+		goto out;
+
 	/* accept ogms from 'good' neighbors and single hop neighbors */
 	if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
 	    !is_single_hop_neigh)
@@ -942,7 +1003,7 @@
 				   struct batadv_nc_packet *nc_packet,
 				   struct batadv_neigh_node *neigh_node)
 {
-	uint8_t tq_weighted_neigh, tq_weighted_coding;
+	uint8_t tq_weighted_neigh, tq_weighted_coding, tq_tmp;
 	struct sk_buff *skb_dest, *skb_src;
 	struct batadv_unicast_packet *packet1;
 	struct batadv_unicast_packet *packet2;
@@ -967,8 +1028,10 @@
 	if (!router_coding)
 		goto out;
 
-	tq_weighted_neigh = batadv_nc_random_weight_tq(router_neigh->tq_avg);
-	tq_weighted_coding = batadv_nc_random_weight_tq(router_coding->tq_avg);
+	tq_tmp = batadv_nc_random_weight_tq(router_neigh->bat_iv.tq_avg);
+	tq_weighted_neigh = tq_tmp;
+	tq_tmp = batadv_nc_random_weight_tq(router_coding->bat_iv.tq_avg);
+	tq_weighted_coding = tq_tmp;
 
 	/* Select one destination for the MAC-header dst-field based on
 	 * weighted TQ-values.
@@ -1735,6 +1798,8 @@
  */
 void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
+	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_NC, 1);
 	cancel_delayed_work_sync(&bat_priv->nc.work);
 
 	batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index ddfa618..d4fd315 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -22,6 +22,7 @@
 
 #ifdef CONFIG_BATMAN_ADV_NC
 
+void batadv_nc_status_update(struct net_device *net_dev);
 int batadv_nc_init(void);
 int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
 void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
@@ -47,6 +48,10 @@
 
 #else /* ifdef CONFIG_BATMAN_ADV_NC */
 
+static inline void batadv_nc_status_update(struct net_device *net_dev)
+{
+}
+
 static inline int batadv_nc_init(void)
 {
 	return 0;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index f50553a..8ab1434 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -25,10 +25,10 @@
 #include "routing.h"
 #include "gateway_client.h"
 #include "hard-interface.h"
-#include "unicast.h"
 #include "soft-interface.h"
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
+#include "fragmentation.h"
 
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -36,7 +36,7 @@
 static void batadv_purge_orig(struct work_struct *work);
 
 /* returns 1 if they are the same originator */
-static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
+int batadv_compare_orig(const struct hlist_node *node, const void *data2)
 {
 	const void *data1 = container_of(node, struct batadv_orig_node,
 					 hash_entry);
@@ -44,6 +44,88 @@
 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
+/**
+ * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Returns the vlan object identified by vid and belonging to orig_node or NULL
+ * if it does not exist.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+			  unsigned short vid)
+{
+	struct batadv_orig_node_vlan *vlan = NULL, *tmp;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
+		if (tmp->vid != vid)
+			continue;
+
+		if (!atomic_inc_not_zero(&tmp->refcount))
+			continue;
+
+		vlan = tmp;
+
+		break;
+	}
+	rcu_read_unlock();
+
+	return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ *  object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Returns NULL in case of failure or the vlan object identified by vid and
+ * belonging to orig_node otherwise. The object is created and added to the list
+ * if it does not exist.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+			  unsigned short vid)
+{
+	struct batadv_orig_node_vlan *vlan;
+
+	spin_lock_bh(&orig_node->vlan_list_lock);
+
+	/* first look if an object for this vid already exists */
+	vlan = batadv_orig_node_vlan_get(orig_node, vid);
+	if (vlan)
+		goto out;
+
+	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+	if (!vlan)
+		goto out;
+
+	atomic_set(&vlan->refcount, 2);
+	vlan->vid = vid;
+
+	list_add_rcu(&vlan->list, &orig_node->vlan_list);
+
+out:
+	spin_unlock_bh(&orig_node->vlan_list_lock);
+
+	return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
+ *  the originator-vlan object
+ * @orig_vlan: the originator-vlan object to release
+ */
+void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
+{
+	if (atomic_dec_and_test(&orig_vlan->refcount))
+		kfree_rcu(orig_vlan, rcu);
+}
+
 int batadv_originator_init(struct batadv_priv *bat_priv)
 {
 	if (bat_priv->orig_hash)
@@ -90,11 +172,20 @@
 	return router;
 }
 
+/**
+ * batadv_neigh_node_new - create and init a new neigh_node object
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ * @orig_node: originator object representing the neighbour
+ *
+ * Allocates a new neigh_node object and initialises all the generic fields.
+ * Returns the new object or NULL on failure.
+ */
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-		      const uint8_t *neigh_addr)
+		      const uint8_t *neigh_addr,
+		      struct batadv_orig_node *orig_node)
 {
-	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct batadv_neigh_node *neigh_node;
 
 	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
@@ -104,15 +195,14 @@
 	INIT_HLIST_NODE(&neigh_node->list);
 
 	memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
-	spin_lock_init(&neigh_node->lq_update_lock);
+	neigh_node->if_incoming = hard_iface;
+	neigh_node->orig_node = orig_node;
+
+	INIT_LIST_HEAD(&neigh_node->bonding_list);
 
 	/* extra reference for return */
 	atomic_set(&neigh_node->refcount, 2);
 
-	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-		   "Creating new neighbor %pM on interface %s\n", neigh_addr,
-		   hard_iface->net_dev->name);
-
 out:
 	return neigh_node;
 }
@@ -146,13 +236,15 @@
 	/* Free nc_nodes */
 	batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
 
-	batadv_frag_list_free(&orig_node->frag_list);
-	batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
+	batadv_frag_purge_orig(orig_node, NULL);
+
+	batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
 				  "originator timed out");
 
+	if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
+		orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
+
 	kfree(orig_node->tt_buff);
-	kfree(orig_node->bcast_own);
-	kfree(orig_node->bcast_own_sum);
 	kfree(orig_node);
 }
 
@@ -210,20 +302,22 @@
 	batadv_hash_destroy(hash);
 }
 
-/* this function finds or creates an originator entry for the given
- * address if it does not exits
+/**
+ * batadv_orig_node_new - creates a new orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the originator
+ *
+ * Creates a new originator object and initialise all the generic fields.
+ * The new object is not added to the originator list.
+ * Returns the newly created object or NULL on failure.
  */
-struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
 					      const uint8_t *addr)
 {
 	struct batadv_orig_node *orig_node;
-	int size;
-	int hash_added;
+	struct batadv_orig_node_vlan *vlan;
 	unsigned long reset_time;
-
-	orig_node = batadv_orig_hash_find(bat_priv, addr);
-	if (orig_node)
-		return orig_node;
+	int i;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 		   "Creating new originator: %pM\n", addr);
@@ -234,10 +328,12 @@
 
 	INIT_HLIST_HEAD(&orig_node->neigh_list);
 	INIT_LIST_HEAD(&orig_node->bond_list);
-	spin_lock_init(&orig_node->ogm_cnt_lock);
+	INIT_LIST_HEAD(&orig_node->vlan_list);
 	spin_lock_init(&orig_node->bcast_seqno_lock);
 	spin_lock_init(&orig_node->neigh_list_lock);
 	spin_lock_init(&orig_node->tt_buff_lock);
+	spin_lock_init(&orig_node->tt_lock);
+	spin_lock_init(&orig_node->vlan_list_lock);
 
 	batadv_nc_init_orig(orig_node);
 
@@ -249,43 +345,32 @@
 	memcpy(orig_node->orig, addr, ETH_ALEN);
 	batadv_dat_init_orig_node_addr(orig_node);
 	orig_node->router = NULL;
-	orig_node->tt_crc = 0;
 	atomic_set(&orig_node->last_ttvn, 0);
 	orig_node->tt_buff = NULL;
 	orig_node->tt_buff_len = 0;
-	atomic_set(&orig_node->tt_size, 0);
 	reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
 	orig_node->bcast_seqno_reset = reset_time;
 	orig_node->batman_seqno_reset = reset_time;
 
 	atomic_set(&orig_node->bond_candidates, 0);
 
-	size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
-
-	orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
-	if (!orig_node->bcast_own)
+	/* create a vlan object for the "untagged" LAN */
+	vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
+	if (!vlan)
 		goto free_orig_node;
+	/* batadv_orig_node_vlan_new() increases the refcounter.
+	 * Immediately release vlan since it is not needed anymore in this
+	 * context
+	 */
+	batadv_orig_node_vlan_free_ref(vlan);
 
-	size = bat_priv->num_ifaces * sizeof(uint8_t);
-	orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
-
-	INIT_LIST_HEAD(&orig_node->frag_list);
-	orig_node->last_frag_packet = 0;
-
-	if (!orig_node->bcast_own_sum)
-		goto free_bcast_own;
-
-	hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
-				     batadv_choose_orig, orig_node,
-				     &orig_node->hash_entry);
-	if (hash_added != 0)
-		goto free_bcast_own_sum;
+	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+		INIT_HLIST_HEAD(&orig_node->fragments[i].head);
+		spin_lock_init(&orig_node->fragments[i].lock);
+		orig_node->fragments[i].size = 0;
+	}
 
 	return orig_node;
-free_bcast_own_sum:
-	kfree(orig_node->bcast_own_sum);
-free_bcast_own:
-	kfree(orig_node->bcast_own);
 free_orig_node:
 	kfree(orig_node);
 	return NULL;
@@ -294,15 +379,16 @@
 static bool
 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 			    struct batadv_orig_node *orig_node,
-			    struct batadv_neigh_node **best_neigh_node)
+			    struct batadv_neigh_node **best_neigh)
 {
+	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
 	struct hlist_node *node_tmp;
 	struct batadv_neigh_node *neigh_node;
 	bool neigh_purged = false;
 	unsigned long last_seen;
 	struct batadv_hard_iface *if_incoming;
 
-	*best_neigh_node = NULL;
+	*best_neigh = NULL;
 
 	spin_lock_bh(&orig_node->neigh_list_lock);
 
@@ -335,9 +421,12 @@
 			batadv_bonding_candidate_del(orig_node, neigh_node);
 			batadv_neigh_node_free_ref(neigh_node);
 		} else {
-			if ((!*best_neigh_node) ||
-			    (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
-				*best_neigh_node = neigh_node;
+			/* store the best_neighbour if this is the first
+			 * iteration or if a better neighbor has been found
+			 */
+			if (!*best_neigh ||
+			    bao->bat_neigh_cmp(neigh_node, *best_neigh) > 0)
+				*best_neigh = neigh_node;
 		}
 	}
 
@@ -388,17 +477,14 @@
 		hlist_for_each_entry_safe(orig_node, node_tmp,
 					  head, hash_entry) {
 			if (batadv_purge_orig_node(bat_priv, orig_node)) {
-				if (orig_node->gw_flags)
-					batadv_gw_node_delete(bat_priv,
-							      orig_node);
+				batadv_gw_node_delete(bat_priv, orig_node);
 				hlist_del_rcu(&orig_node->hash_entry);
 				batadv_orig_node_free_ref(orig_node);
 				continue;
 			}
 
-			if (batadv_has_timed_out(orig_node->last_frag_packet,
-						 BATADV_FRAG_TIMEOUT))
-				batadv_frag_list_free(&orig_node->frag_list);
+			batadv_frag_purge_orig(orig_node,
+					       batadv_frag_check_entry);
 		}
 		spin_unlock_bh(list_lock);
 	}
@@ -429,100 +515,26 @@
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	struct batadv_hashtable *hash = bat_priv->orig_hash;
-	struct hlist_head *head;
 	struct batadv_hard_iface *primary_if;
-	struct batadv_orig_node *orig_node;
-	struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
-	int batman_count = 0;
-	int last_seen_secs;
-	int last_seen_msecs;
-	unsigned long last_seen_jiffies;
-	uint32_t i;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
-		goto out;
+		return 0;
 
-	seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+	seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
 		   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
-		   primary_if->net_dev->dev_addr, net_dev->name);
-	seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
-		   "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
-		   "Nexthop", "outgoingIF", "Potential nexthops");
+		   primary_if->net_dev->dev_addr, net_dev->name,
+		   bat_priv->bat_algo_ops->name);
 
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
+	batadv_hardif_free_ref(primary_if);
 
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-			neigh_node = batadv_orig_node_get_router(orig_node);
-			if (!neigh_node)
-				continue;
-
-			if (neigh_node->tq_avg == 0)
-				goto next;
-
-			last_seen_jiffies = jiffies - orig_node->last_seen;
-			last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
-			last_seen_secs = last_seen_msecs / 1000;
-			last_seen_msecs = last_seen_msecs % 1000;
-
-			seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
-				   orig_node->orig, last_seen_secs,
-				   last_seen_msecs, neigh_node->tq_avg,
-				   neigh_node->addr,
-				   neigh_node->if_incoming->net_dev->name);
-
-			hlist_for_each_entry_rcu(neigh_node_tmp,
-						 &orig_node->neigh_list, list) {
-				seq_printf(seq, " %pM (%3i)",
-					   neigh_node_tmp->addr,
-					   neigh_node_tmp->tq_avg);
-			}
-
-			seq_puts(seq, "\n");
-			batman_count++;
-
-next:
-			batadv_neigh_node_free_ref(neigh_node);
-		}
-		rcu_read_unlock();
+	if (!bat_priv->bat_algo_ops->bat_orig_print) {
+		seq_puts(seq,
+			 "No printing function for this routing protocol\n");
+		return 0;
 	}
 
-	if (batman_count == 0)
-		seq_puts(seq, "No batman nodes in range ...\n");
-
-out:
-	if (primary_if)
-		batadv_hardif_free_ref(primary_if);
-	return 0;
-}
-
-static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
-				   int max_if_num)
-{
-	void *data_ptr;
-	size_t data_size, old_size;
-
-	data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
-	old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
-	data_ptr = kmalloc(data_size, GFP_ATOMIC);
-	if (!data_ptr)
-		return -ENOMEM;
-
-	memcpy(data_ptr, orig_node->bcast_own, old_size);
-	kfree(orig_node->bcast_own);
-	orig_node->bcast_own = data_ptr;
-
-	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
-	if (!data_ptr)
-		return -ENOMEM;
-
-	memcpy(data_ptr, orig_node->bcast_own_sum,
-	       (max_if_num - 1) * sizeof(uint8_t));
-	kfree(orig_node->bcast_own_sum);
-	orig_node->bcast_own_sum = data_ptr;
+	bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
 
 	return 0;
 }
@@ -531,6 +543,7 @@
 			    int max_if_num)
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
@@ -545,10 +558,10 @@
 
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-			spin_lock_bh(&orig_node->ogm_cnt_lock);
-			ret = batadv_orig_node_add_if(orig_node, max_if_num);
-			spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
+			ret = 0;
+			if (bao->bat_orig_add_if)
+				ret = bao->bat_orig_add_if(orig_node,
+							   max_if_num);
 			if (ret == -ENOMEM)
 				goto err;
 		}
@@ -562,54 +575,6 @@
 	return -ENOMEM;
 }
 
-static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
-				   int max_if_num, int del_if_num)
-{
-	void *data_ptr = NULL;
-	int chunk_size;
-
-	/* last interface was removed */
-	if (max_if_num == 0)
-		goto free_bcast_own;
-
-	chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
-	data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
-	if (!data_ptr)
-		return -ENOMEM;
-
-	/* copy first part */
-	memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
-
-	/* copy second part */
-	memcpy((char *)data_ptr + del_if_num * chunk_size,
-	       orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
-	       (max_if_num - del_if_num) * chunk_size);
-
-free_bcast_own:
-	kfree(orig_node->bcast_own);
-	orig_node->bcast_own = data_ptr;
-
-	if (max_if_num == 0)
-		goto free_own_sum;
-
-	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
-	if (!data_ptr)
-		return -ENOMEM;
-
-	memcpy(data_ptr, orig_node->bcast_own_sum,
-	       del_if_num * sizeof(uint8_t));
-
-	memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
-	       orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
-	       (max_if_num - del_if_num) * sizeof(uint8_t));
-
-free_own_sum:
-	kfree(orig_node->bcast_own_sum);
-	orig_node->bcast_own_sum = data_ptr;
-
-	return 0;
-}
-
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
 			    int max_if_num)
 {
@@ -618,6 +583,7 @@
 	struct hlist_head *head;
 	struct batadv_hard_iface *hard_iface_tmp;
 	struct batadv_orig_node *orig_node;
+	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
 	uint32_t i;
 	int ret;
 
@@ -629,11 +595,11 @@
 
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-			spin_lock_bh(&orig_node->ogm_cnt_lock);
-			ret = batadv_orig_node_del_if(orig_node, max_if_num,
-						      hard_iface->if_num);
-			spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
+			ret = 0;
+			if (bao->bat_orig_del_if)
+				ret = bao->bat_orig_del_if(orig_node,
+							   max_if_num,
+							   hard_iface->if_num);
 			if (ret == -ENOMEM)
 				goto err;
 		}
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 7887b84..6f77d80 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -22,16 +22,18 @@
 
 #include "hash.h"
 
+int batadv_compare_orig(const struct hlist_node *node, const void *data2);
 int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
-struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
 					      const uint8_t *addr);
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-		      const uint8_t *neigh_addr);
+		      const uint8_t *neigh_addr,
+		      struct batadv_orig_node *orig_node);
 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
 struct batadv_neigh_node *
 batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
@@ -40,6 +42,13 @@
 			    int max_if_num);
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
 			    int max_if_num);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+			  unsigned short vid);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+			  unsigned short vid);
+void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
 
 
 /* hashfunction to choose an entry in a hash table of given size
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index a51ccfc..207459b 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -20,17 +20,34 @@
 #ifndef _NET_BATMAN_ADV_PACKET_H_
 #define _NET_BATMAN_ADV_PACKET_H_
 
+/**
+ * enum batadv_packettype - types for batman-adv encapsulated packets
+ * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
+ * @BATADV_BCAST: broadcast packets carrying broadcast payload
+ * @BATADV_CODED: network coded packets
+ *
+ * @BATADV_UNICAST: unicast packets carrying unicast payload traffic
+ * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
+ *     payload packet
+ * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of
+ *     the sender
+ * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute
+ * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers
+ */
 enum batadv_packettype {
-	BATADV_IV_OGM		= 0x01,
-	BATADV_ICMP		= 0x02,
-	BATADV_UNICAST		= 0x03,
-	BATADV_BCAST		= 0x04,
-	BATADV_VIS		= 0x05,
-	BATADV_UNICAST_FRAG	= 0x06,
-	BATADV_TT_QUERY		= 0x07,
-	BATADV_ROAM_ADV		= 0x08,
-	BATADV_UNICAST_4ADDR	= 0x09,
-	BATADV_CODED		= 0x0a,
+	/* 0x00 - 0x3f: local packets or special rules for handling */
+	BATADV_IV_OGM           = 0x00,
+	BATADV_BCAST            = 0x01,
+	BATADV_CODED            = 0x02,
+	/* 0x40 - 0x7f: unicast */
+#define BATADV_UNICAST_MIN     0x40
+	BATADV_UNICAST          = 0x40,
+	BATADV_UNICAST_FRAG     = 0x41,
+	BATADV_UNICAST_4ADDR    = 0x42,
+	BATADV_ICMP             = 0x43,
+	BATADV_UNICAST_TVLV     = 0x44,
+#define BATADV_UNICAST_MAX     0x7f
+	/* 0x80 - 0xff: reserved */
 };
 
 /**
@@ -48,13 +65,21 @@
 };
 
 /* this file is included by batctl which needs these defines */
-#define BATADV_COMPAT_VERSION 14
+#define BATADV_COMPAT_VERSION 15
 
+/**
+ * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
+ *     previously received from someone else than the best neighbor.
+ * @BATADV_PRIMARIES_FIRST_HOP: flag is set when the primary interface address
+ *     is used, and the packet travels its first hop.
+ * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
+ *     one hop neighbor on the interface where it was originally received.
+ */
 enum batadv_iv_flags {
-	BATADV_NOT_BEST_NEXT_HOP   = BIT(3),
-	BATADV_PRIMARIES_FIRST_HOP = BIT(4),
-	BATADV_VIS_SERVER	   = BIT(5),
-	BATADV_DIRECTLINK	   = BIT(6),
+	BATADV_NOT_BEST_NEXT_HOP   = BIT(0),
+	BATADV_PRIMARIES_FIRST_HOP = BIT(1),
+	BATADV_DIRECTLINK          = BIT(2),
 };
 
 /* ICMP message types */
@@ -66,43 +91,44 @@
 	BATADV_PARAMETER_PROBLEM       = 12,
 };
 
-/* vis defines */
-enum batadv_vis_packettype {
-	BATADV_VIS_TYPE_SERVER_SYNC   = 0,
-	BATADV_VIS_TYPE_CLIENT_UPDATE = 1,
-};
+/* tt data subtypes */
+#define BATADV_TT_DATA_TYPE_MASK 0x0F
 
-/* fragmentation defines */
-enum batadv_unicast_frag_flags {
-	BATADV_UNI_FRAG_HEAD	  = BIT(0),
-	BATADV_UNI_FRAG_LARGETAIL = BIT(1),
-};
-
-/* TT_QUERY subtypes */
-#define BATADV_TT_QUERY_TYPE_MASK 0x3
-
-enum batadv_tt_query_packettype {
-	BATADV_TT_REQUEST  = 0,
-	BATADV_TT_RESPONSE = 1,
-};
-
-/* TT_QUERY flags */
-enum batadv_tt_query_flags {
-	BATADV_TT_FULL_TABLE = BIT(2),
+/**
+ * enum batadv_tt_data_flags - flags for tt data tvlv
+ * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM
+ * @BATADV_TT_REQUEST: TT request message
+ * @BATADV_TT_RESPONSE: TT response message
+ * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
+ */
+enum batadv_tt_data_flags {
+	BATADV_TT_OGM_DIFF   = BIT(0),
+	BATADV_TT_REQUEST    = BIT(1),
+	BATADV_TT_RESPONSE   = BIT(2),
+	BATADV_TT_FULL_TABLE = BIT(4),
 };
 
 /* BATADV_TT_CLIENT flags.
  * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
- * BIT(15) are used for local computation only
+ * BIT(15) are used for local computation only.
+ * Flags from BIT(4) to BIT(7) are kept in sync with the rest of the network.
  */
 enum batadv_tt_client_flags {
 	BATADV_TT_CLIENT_DEL     = BIT(0),
 	BATADV_TT_CLIENT_ROAM    = BIT(1),
-	BATADV_TT_CLIENT_WIFI    = BIT(2),
-	BATADV_TT_CLIENT_TEMP	 = BIT(3),
+	BATADV_TT_CLIENT_WIFI    = BIT(4),
 	BATADV_TT_CLIENT_NOPURGE = BIT(8),
 	BATADV_TT_CLIENT_NEW     = BIT(9),
 	BATADV_TT_CLIENT_PENDING = BIT(10),
+	BATADV_TT_CLIENT_TEMP	 = BIT(11),
+};
+
+/**
+ * batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
+ */
+enum batadv_vlan_flags {
+	BATADV_VLAN_HAS_TAG	= BIT(15),
 };
 
 /* claim frame types for the bridge loop avoidance */
@@ -113,6 +139,22 @@
 	BATADV_CLAIM_TYPE_REQUEST	= 0x03,
 };
 
+/**
+ * enum batadv_tvlv_type - tvlv type definitions
+ * @BATADV_TVLV_GW: gateway tvlv
+ * @BATADV_TVLV_DAT: distributed arp table tvlv
+ * @BATADV_TVLV_NC: network coding tvlv
+ * @BATADV_TVLV_TT: translation table tvlv
+ * @BATADV_TVLV_ROAM: roaming advertisement tvlv
+ */
+enum batadv_tvlv_type {
+	BATADV_TVLV_GW		= 0x01,
+	BATADV_TVLV_DAT		= 0x02,
+	BATADV_TVLV_NC		= 0x03,
+	BATADV_TVLV_TT		= 0x04,
+	BATADV_TVLV_ROAM	= 0x05,
+};
+
 /* the destination hardware field in the ARP frame is used to
  * transport the claim type and the group id
  */
@@ -131,47 +173,74 @@
 	 */
 };
 
+/**
+ * struct batadv_ogm_packet - ogm (routing protocol) packet
+ * @header: common batman packet header
+ * @flags: contains routing relevant flags - see enum batadv_iv_flags
+ * @tvlv_len: length of tvlv data following the ogm header
+ */
 struct batadv_ogm_packet {
 	struct batadv_header header;
-	uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
+	uint8_t  flags;
 	__be32   seqno;
 	uint8_t  orig[ETH_ALEN];
 	uint8_t  prev_sender[ETH_ALEN];
-	uint8_t  gw_flags;  /* flags related to gateway class */
+	uint8_t  reserved;
 	uint8_t  tq;
-	uint8_t  tt_num_changes;
-	uint8_t  ttvn; /* translation table version number */
-	__be16   tt_crc;
-} __packed;
+	__be16   tvlv_len;
+	/* __packed is not needed as the struct size is divisible by 4,
+	 * and the largest data type in this struct has a size of 4.
+	 */
+};
 
 #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
 
-struct batadv_icmp_packet {
+/**
+ * batadv_icmp_header - common ICMP header
+ * @header: common batman header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ */
+struct batadv_icmp_header {
 	struct batadv_header header;
 	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  dst[ETH_ALEN];
 	uint8_t  orig[ETH_ALEN];
-	__be16   seqno;
 	uint8_t  uid;
+};
+
+/**
+ * batadv_icmp_packet - ICMP packet
+ * @icmph: common ICMP header
+ * @reserved: not used - useful for alignment
+ * @seqno: ICMP sequence number
+ */
+struct batadv_icmp_packet {
+	struct batadv_icmp_header icmph;
 	uint8_t  reserved;
+	__be16   seqno;
 };
 
 #define BATADV_RR_LEN 16
 
-/* icmp_packet_rr must start with all fields from imcp_packet
- * as this is assumed by code that handles ICMP packets
+/**
+ * batadv_icmp_packet_rr - ICMP RouteRecord packet
+ * @icmph: common ICMP header
+ * @rr_cur: number of entries the rr array
+ * @seqno: ICMP sequence number
+ * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-	struct batadv_header header;
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  orig[ETH_ALEN];
-	__be16   seqno;
-	uint8_t  uid;
+	struct batadv_icmp_header icmph;
 	uint8_t  rr_cur;
+	__be16   seqno;
 	uint8_t  rr[BATADV_RR_LEN][ETH_ALEN];
 };
 
+#define BATADV_ICMP_MAX_PACKET_SIZE	sizeof(struct batadv_icmp_packet_rr)
+
 /* All packet headers in front of an ethernet header have to be completely
  * divisible by 2 but not by 4 to make the payload after the ethernet
  * header again 4 bytes boundary aligned.
@@ -209,15 +278,32 @@
 	 */
 };
 
-struct batadv_unicast_frag_packet {
-	struct batadv_header header;
-	uint8_t  ttvn; /* destination translation table version number */
-	uint8_t  dest[ETH_ALEN];
-	uint8_t  flags;
-	uint8_t  align;
-	uint8_t  orig[ETH_ALEN];
-	__be16   seqno;
-} __packed;
+/**
+ * struct batadv_frag_packet - fragmented packet
+ * @header: common batman packet header with type, compatversion, and ttl
+ * @dest: final destination used when routing fragments
+ * @orig: originator of the fragment used when merging the packet
+ * @no: fragment number within this sequence
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @total_size: size of the merged packet
+ */
+struct batadv_frag_packet {
+	struct  batadv_header header;
+#if defined(__BIG_ENDIAN_BITFIELD)
+	uint8_t no:4;
+	uint8_t reserved:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	uint8_t reserved:4;
+	uint8_t no:4;
+#else
+#error "unknown bitfield endianess"
+#endif
+	uint8_t dest[ETH_ALEN];
+	uint8_t orig[ETH_ALEN];
+	__be16  seqno;
+	__be16  total_size;
+};
 
 struct batadv_bcast_packet {
 	struct batadv_header header;
@@ -231,54 +317,6 @@
 
 #pragma pack()
 
-struct batadv_vis_packet {
-	struct batadv_header header;
-	uint8_t  vis_type;	 /* which type of vis-participant sent this? */
-	__be32   seqno;		 /* sequence number */
-	uint8_t  entries;	 /* number of entries behind this struct */
-	uint8_t  reserved;
-	uint8_t  vis_orig[ETH_ALEN];	/* originator reporting its neighbors */
-	uint8_t  target_orig[ETH_ALEN]; /* who should receive this packet */
-	uint8_t  sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
-};
-
-struct batadv_tt_query_packet {
-	struct batadv_header header;
-	/* the flag field is a combination of:
-	 * - TT_REQUEST or TT_RESPONSE
-	 * - TT_FULL_TABLE
-	 */
-	uint8_t  flags;
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  src[ETH_ALEN];
-	/* the ttvn field is:
-	 * if TT_REQUEST: ttvn that triggered the
-	 *		  request
-	 * if TT_RESPONSE: new ttvn for the src
-	 *		   orig_node
-	 */
-	uint8_t  ttvn;
-	/* tt_data field is:
-	 * if TT_REQUEST: crc associated with the
-	 *		  ttvn
-	 * if TT_RESPONSE: table_size
-	 */
-	__be16 tt_data;
-} __packed;
-
-struct batadv_roam_adv_packet {
-	struct batadv_header header;
-	uint8_t  reserved;
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  src[ETH_ALEN];
-	uint8_t  client[ETH_ALEN];
-} __packed;
-
-struct batadv_tt_change {
-	uint8_t flags;
-	uint8_t addr[ETH_ALEN];
-} __packed;
-
 /**
  * struct batadv_coded_packet - network coded packet
  * @header: common batman packet header and ttl of first included packet
@@ -311,4 +349,96 @@
 	__be16   coded_len;
 };
 
+/**
+ * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
+ * @header: common batman packet header
+ * @reserved: reserved field (for packet alignment)
+ * @src: address of the source
+ * @dst: address of the destination
+ * @tvlv_len: length of tvlv data following the unicast tvlv header
+ * @align: 2 bytes to align the header to a 4 byte boundry
+ */
+struct batadv_unicast_tvlv_packet {
+	struct batadv_header header;
+	uint8_t  reserved;
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  src[ETH_ALEN];
+	__be16   tvlv_len;
+	uint16_t align;
+};
+
+/**
+ * struct batadv_tvlv_hdr - base tvlv header struct
+ * @type: tvlv container type (see batadv_tvlv_type)
+ * @version: tvlv container version
+ * @len: tvlv container length
+ */
+struct batadv_tvlv_hdr {
+	uint8_t type;
+	uint8_t version;
+	__be16  len;
+};
+
+/**
+ * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv
+ *  container
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
+ */
+struct batadv_tvlv_gateway_data {
+	__be32 bandwidth_down;
+	__be32 bandwidth_up;
+};
+
+/**
+ * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
+ * @flags: translation table flags (see batadv_tt_data_flags)
+ * @ttvn: translation table version number
+ * @vlan_num: number of announced VLANs. In the TVLV this struct is followed by
+ *  one batadv_tvlv_tt_vlan_data object per announced vlan
+ */
+struct batadv_tvlv_tt_data {
+	uint8_t flags;
+	uint8_t ttvn;
+	__be16  num_vlan;
+};
+
+/**
+ * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
+ *  the tt tvlv container
+ * @crc: crc32 checksum of the entries belonging to this vlan
+ * @vid: vlan identifier
+ * @reserved: unused, useful for alignment purposes
+ */
+struct batadv_tvlv_tt_vlan_data {
+	__be32	crc;
+	__be16	vid;
+	uint16_t reserved;
+};
+
+/**
+ * struct batadv_tvlv_tt_change - translation table diff data
+ * @flags: status indicators concerning the non-mesh client (see
+ *  batadv_tt_client_flags)
+ * @reserved: reserved field
+ * @addr: mac address of non-mesh client that triggered this tt change
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_tt_change {
+	uint8_t flags;
+	uint8_t reserved;
+	uint8_t addr[ETH_ALEN];
+	__be16 vid;
+};
+
+/**
+ * struct batadv_tvlv_roam_adv - roaming advertisement
+ * @client: mac address of roaming client
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_roam_adv {
+	uint8_t  client[ETH_ALEN];
+	__be16 vid;
+};
+
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 0439395..d4114d7 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -25,11 +25,12 @@
 #include "icmp_socket.h"
 #include "translation-table.h"
 #include "originator.h"
-#include "vis.h"
-#include "unicast.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
 #include "network-coding.h"
+#include "fragmentation.h"
+
+#include <linux/if_vlan.h>
 
 static int batadv_route_unicast_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
@@ -46,7 +47,7 @@
 	if ((curr_router) && (!neigh_node)) {
 		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
 			   "Deleting route towards: %pM\n", orig_node->orig);
-		batadv_tt_global_del_orig(bat_priv, orig_node,
+		batadv_tt_global_del_orig(bat_priv, orig_node, -1,
 					  "Deleted route towards originator");
 
 	/* route added */
@@ -114,9 +115,19 @@
 	return;
 }
 
-void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+/**
+ * batadv_bonding_candidate_add - consider a new link for bonding mode towards
+ *  the given originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the target node
+ * @neigh_node: the neighbor representing the new link to consider for bonding
+ *  mode
+ */
+void batadv_bonding_candidate_add(struct batadv_priv *bat_priv,
+				  struct batadv_orig_node *orig_node,
 				  struct batadv_neigh_node *neigh_node)
 {
+	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
 	struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
 	uint8_t interference_candidate = 0;
 
@@ -131,8 +142,9 @@
 	if (!router)
 		goto candidate_del;
 
+
 	/* ... and is good enough to be considered */
-	if (neigh_node->tq_avg < router->tq_avg - BATADV_BONDING_TQ_THRESHOLD)
+	if (bao->bat_neigh_is_equiv_or_better(neigh_node, router))
 		goto candidate_del;
 
 	/* check if we have another candidate with the same mac address or
@@ -248,46 +260,65 @@
 	return true;
 }
 
+/**
+ * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: icmp packet to process
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
 static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
-				      struct sk_buff *skb, size_t icmp_len)
+				      struct sk_buff *skb)
 {
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_orig_node *orig_node = NULL;
-	struct batadv_icmp_packet_rr *icmp_packet;
-	int ret = NET_RX_DROP;
+	struct batadv_icmp_header *icmph;
+	int res, ret = NET_RX_DROP;
 
-	icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+	icmph = (struct batadv_icmp_header *)skb->data;
 
-	/* add data to device queue */
-	if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
-		batadv_socket_receive_packet(icmp_packet, icmp_len);
+	switch (icmph->msg_type) {
+	case BATADV_ECHO_REPLY:
+	case BATADV_DESTINATION_UNREACHABLE:
+	case BATADV_TTL_EXCEEDED:
+		/* receive the packet */
+		if (skb_linearize(skb) < 0)
+			break;
+
+		batadv_socket_receive_packet(icmph, skb->len);
+		break;
+	case BATADV_ECHO_REQUEST:
+		/* answer echo request (ping) */
+		primary_if = batadv_primary_if_get_selected(bat_priv);
+		if (!primary_if)
+			goto out;
+
+		/* get routing information */
+		orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
+		if (!orig_node)
+			goto out;
+
+		/* create a copy of the skb, if needed, to modify it. */
+		if (skb_cow(skb, ETH_HLEN) < 0)
+			goto out;
+
+		icmph = (struct batadv_icmp_header *)skb->data;
+
+		memcpy(icmph->dst, icmph->orig, ETH_ALEN);
+		memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+		icmph->msg_type = BATADV_ECHO_REPLY;
+		icmph->header.ttl = BATADV_TTL;
+
+		res = batadv_send_skb_to_orig(skb, orig_node, NULL);
+		if (res != NET_XMIT_DROP)
+			ret = NET_RX_SUCCESS;
+
+		break;
+	default:
+		/* drop unknown type */
 		goto out;
 	}
-
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-
-	/* answer echo request (ping) */
-	/* get routing information */
-	orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
-	if (!orig_node)
-		goto out;
-
-	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, ETH_HLEN) < 0)
-		goto out;
-
-	icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
-
-	memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-	memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-	icmp_packet->msg_type = BATADV_ECHO_REPLY;
-	icmp_packet->header.ttl = BATADV_TTL;
-
-	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-		ret = NET_RX_SUCCESS;
-
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
@@ -307,9 +338,9 @@
 	icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
 	/* send TTL exceeded if packet is an echo request (traceroute) */
-	if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+	if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
 		pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
-			 icmp_packet->orig, icmp_packet->dst);
+			 icmp_packet->icmph.orig, icmp_packet->icmph.dst);
 		goto out;
 	}
 
@@ -318,7 +349,7 @@
 		goto out;
 
 	/* get routing information */
-	orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
+	orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
 	if (!orig_node)
 		goto out;
 
@@ -328,10 +359,11 @@
 
 	icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
-	memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-	memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-	icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
-	icmp_packet->header.ttl = BATADV_TTL;
+	memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
+	memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+	       ETH_ALEN);
+	icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
+	icmp_packet->icmph.header.ttl = BATADV_TTL;
 
 	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
 		ret = NET_RX_SUCCESS;
@@ -349,16 +381,13 @@
 			    struct batadv_hard_iface *recv_if)
 {
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct batadv_icmp_packet_rr *icmp_packet;
+	struct batadv_icmp_header *icmph;
+	struct batadv_icmp_packet_rr *icmp_packet_rr;
 	struct ethhdr *ethhdr;
 	struct batadv_orig_node *orig_node = NULL;
-	int hdr_size = sizeof(struct batadv_icmp_packet);
+	int hdr_size = sizeof(struct batadv_icmp_header);
 	int ret = NET_RX_DROP;
 
-	/* we truncate all incoming icmp packets if they don't match our size */
-	if (skb->len >= sizeof(struct batadv_icmp_packet_rr))
-		hdr_size = sizeof(struct batadv_icmp_packet_rr);
-
 	/* drop packet if it has not necessary minimum size */
 	if (unlikely(!pskb_may_pull(skb, hdr_size)))
 		goto out;
@@ -377,26 +406,39 @@
 	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
 		goto out;
 
-	icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+	icmph = (struct batadv_icmp_header *)skb->data;
 
 	/* add record route information if not full */
-	if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
-	    (icmp_packet->rr_cur < BATADV_RR_LEN)) {
-		memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
+	if ((icmph->msg_type == BATADV_ECHO_REPLY ||
+	     icmph->msg_type == BATADV_ECHO_REQUEST) &&
+	    (skb->len >= sizeof(struct batadv_icmp_packet_rr))) {
+		if (skb_linearize(skb) < 0)
+			goto out;
+
+		/* create a copy of the skb, if needed, to modify it. */
+		if (skb_cow(skb, ETH_HLEN) < 0)
+			goto out;
+
+		icmph = (struct batadv_icmp_header *)skb->data;
+		icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
+		if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
+			goto out;
+
+		memcpy(&(icmp_packet_rr->rr[icmp_packet_rr->rr_cur]),
 		       ethhdr->h_dest, ETH_ALEN);
-		icmp_packet->rr_cur++;
+		icmp_packet_rr->rr_cur++;
 	}
 
 	/* packet for me */
-	if (batadv_is_my_mac(bat_priv, icmp_packet->dst))
-		return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
+	if (batadv_is_my_mac(bat_priv, icmph->dst))
+		return batadv_recv_my_icmp_packet(bat_priv, skb);
 
 	/* TTL exceeded */
-	if (icmp_packet->header.ttl < 2)
+	if (icmph->header.ttl < 2)
 		return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
 
 	/* get routing information */
-	orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
+	orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
 	if (!orig_node)
 		goto out;
 
@@ -404,10 +446,10 @@
 	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
-	icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+	icmph = (struct batadv_icmp_header *)skb->data;
 
 	/* decrement ttl */
-	icmp_packet->header.ttl--;
+	icmph->header.ttl--;
 
 	/* route it */
 	if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -474,18 +516,25 @@
 	return router;
 }
 
-/* Interface Alternating: Use the best of the
- * remaining candidates which are not using
- * this interface.
+/**
+ * batadv_find_ifalter_router - find the best of the remaining candidates which
+ *  are not using this interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_orig: the destination
+ * @recv_if: the interface that the router returned by this function has to not
+ *  use
  *
- * Increases the returned router's refcount
+ * Returns the best candidate towards primary_orig that is not using recv_if.
+ * Increases the returned neighbor's refcount
  */
 static struct batadv_neigh_node *
-batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
+batadv_find_ifalter_router(struct batadv_priv *bat_priv,
+			   struct batadv_orig_node *primary_orig,
 			   const struct batadv_hard_iface *recv_if)
 {
-	struct batadv_neigh_node *tmp_neigh_node;
 	struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
+	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
+	struct batadv_neigh_node *tmp_neigh_node;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -497,7 +546,7 @@
 		if (tmp_neigh_node->if_incoming == recv_if)
 			continue;
 
-		if (router && tmp_neigh_node->tq_avg <= router->tq_avg)
+		if (router && bao->bat_neigh_cmp(tmp_neigh_node, router))
 			continue;
 
 		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
@@ -557,126 +606,6 @@
 	return 0;
 }
 
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
-{
-	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct batadv_tt_query_packet *tt_query;
-	uint16_t tt_size;
-	int hdr_size = sizeof(*tt_query);
-	char tt_flag;
-	size_t packet_size;
-
-	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
-		return NET_RX_DROP;
-
-	/* I could need to modify it */
-	if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
-		goto out;
-
-	tt_query = (struct batadv_tt_query_packet *)skb->data;
-
-	switch (tt_query->flags & BATADV_TT_QUERY_TYPE_MASK) {
-	case BATADV_TT_REQUEST:
-		batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
-
-		/* If we cannot provide an answer the tt_request is
-		 * forwarded
-		 */
-		if (!batadv_send_tt_response(bat_priv, tt_query)) {
-			if (tt_query->flags & BATADV_TT_FULL_TABLE)
-				tt_flag = 'F';
-			else
-				tt_flag = '.';
-
-			batadv_dbg(BATADV_DBG_TT, bat_priv,
-				   "Routing TT_REQUEST to %pM [%c]\n",
-				   tt_query->dst,
-				   tt_flag);
-			return batadv_route_unicast_packet(skb, recv_if);
-		}
-		break;
-	case BATADV_TT_RESPONSE:
-		batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
-
-		if (batadv_is_my_mac(bat_priv, tt_query->dst)) {
-			/* packet needs to be linearized to access the TT
-			 * changes
-			 */
-			if (skb_linearize(skb) < 0)
-				goto out;
-			/* skb_linearize() possibly changed skb->data */
-			tt_query = (struct batadv_tt_query_packet *)skb->data;
-
-			tt_size = batadv_tt_len(ntohs(tt_query->tt_data));
-
-			/* Ensure we have all the claimed data */
-			packet_size = sizeof(struct batadv_tt_query_packet);
-			packet_size += tt_size;
-			if (unlikely(skb_headlen(skb) < packet_size))
-				goto out;
-
-			batadv_handle_tt_response(bat_priv, tt_query);
-		} else {
-			if (tt_query->flags & BATADV_TT_FULL_TABLE)
-				tt_flag =  'F';
-			else
-				tt_flag = '.';
-			batadv_dbg(BATADV_DBG_TT, bat_priv,
-				   "Routing TT_RESPONSE to %pM [%c]\n",
-				   tt_query->dst,
-				   tt_flag);
-			return batadv_route_unicast_packet(skb, recv_if);
-		}
-		break;
-	}
-
-out:
-	/* returning NET_RX_DROP will make the caller function kfree the skb */
-	return NET_RX_DROP;
-}
-
-int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
-{
-	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct batadv_roam_adv_packet *roam_adv_packet;
-	struct batadv_orig_node *orig_node;
-
-	if (batadv_check_unicast_packet(bat_priv, skb,
-					sizeof(*roam_adv_packet)) < 0)
-		goto out;
-
-	batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
-
-	roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
-
-	if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))
-		return batadv_route_unicast_packet(skb, recv_if);
-
-	/* check if it is a backbone gateway. we don't accept
-	 * roaming advertisement from it, as it has the same
-	 * entries as we have.
-	 */
-	if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
-		goto out;
-
-	orig_node = batadv_orig_hash_find(bat_priv, roam_adv_packet->src);
-	if (!orig_node)
-		goto out;
-
-	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Received ROAMING_ADV from %pM (client %pM)\n",
-		   roam_adv_packet->src, roam_adv_packet->client);
-
-	batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
-			     BATADV_TT_CLIENT_ROAM,
-			     atomic_read(&orig_node->last_ttvn) + 1);
-
-	batadv_orig_node_free_ref(orig_node);
-out:
-	/* returning NET_RX_DROP will make the caller function kfree the skb */
-	return NET_RX_DROP;
-}
-
 /* find a suitable router for this originator, and use
  * bonding if possible. increases the found neighbors
  * refcount.
@@ -751,7 +680,8 @@
 	if (bonding_enabled)
 		router = batadv_find_bond_router(primary_orig_node, recv_if);
 	else
-		router = batadv_find_ifalter_router(primary_orig_node, recv_if);
+		router = batadv_find_ifalter_router(bat_priv, primary_orig_node,
+						    recv_if);
 
 return_router:
 	if (router && router->if_incoming->if_status != BATADV_IF_ACTIVE)
@@ -772,11 +702,9 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct batadv_orig_node *orig_node = NULL;
-	struct batadv_neigh_node *neigh_node = NULL;
 	struct batadv_unicast_packet *unicast_packet;
 	struct ethhdr *ethhdr = eth_hdr(skb);
 	int res, hdr_len, ret = NET_RX_DROP;
-	struct sk_buff *new_skb;
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
@@ -793,46 +721,12 @@
 	if (!orig_node)
 		goto out;
 
-	/* find_router() increases neigh_nodes refcount if found. */
-	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
-
-	if (!neigh_node)
-		goto out;
-
 	/* create a copy of the skb, if needed, to modify it. */
 	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
-	unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
-	if (unicast_packet->header.packet_type == BATADV_UNICAST &&
-	    atomic_read(&bat_priv->fragmentation) &&
-	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
-		ret = batadv_frag_send_skb(skb, bat_priv,
-					   neigh_node->if_incoming,
-					   neigh_node->addr);
-		goto out;
-	}
-
-	if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
-	    batadv_frag_can_reassemble(skb,
-				       neigh_node->if_incoming->net_dev->mtu)) {
-		ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
-
-		if (ret == NET_RX_DROP)
-			goto out;
-
-		/* packet was buffered for late merge */
-		if (!new_skb) {
-			ret = NET_RX_SUCCESS;
-			goto out;
-		}
-
-		skb = new_skb;
-		unicast_packet = (struct batadv_unicast_packet *)skb->data;
-	}
-
 	/* decrement ttl */
+	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 	unicast_packet->header.ttl--;
 
 	switch (unicast_packet->header.packet_type) {
@@ -867,8 +761,6 @@
 	}
 
 out:
-	if (neigh_node)
-		batadv_neigh_node_free_ref(neigh_node);
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
 	return ret;
@@ -879,6 +771,7 @@
  * @bat_priv: the bat priv with all the soft interface information
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
+ * @vid: VLAN identifier
  *
  * Search the translation table for dst_addr and update the unicast header with
  * the new corresponding information (originator address where the destination
@@ -889,21 +782,22 @@
 static bool
 batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
 			      struct batadv_unicast_packet *unicast_packet,
-			      uint8_t *dst_addr)
+			      uint8_t *dst_addr, unsigned short vid)
 {
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_hard_iface *primary_if = NULL;
 	bool ret = false;
 	uint8_t *orig_addr, orig_ttvn;
 
-	if (batadv_is_my_client(bat_priv, dst_addr)) {
+	if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
 		primary_if = batadv_primary_if_get_selected(bat_priv);
 		if (!primary_if)
 			goto out;
 		orig_addr = primary_if->net_dev->dev_addr;
 		orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
 	} else {
-		orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr);
+		orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
+						     vid);
 		if (!orig_node)
 			goto out;
 
@@ -930,11 +824,12 @@
 
 static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 				     struct sk_buff *skb, int hdr_len) {
-	uint8_t curr_ttvn, old_ttvn;
-	struct batadv_orig_node *orig_node;
-	struct ethhdr *ethhdr;
-	struct batadv_hard_iface *primary_if;
 	struct batadv_unicast_packet *unicast_packet;
+	struct batadv_hard_iface *primary_if;
+	struct batadv_orig_node *orig_node;
+	uint8_t curr_ttvn, old_ttvn;
+	struct ethhdr *ethhdr;
+	unsigned short vid;
 	int is_old_ttvn;
 
 	/* check if there is enough data before accessing it */
@@ -946,6 +841,7 @@
 		return 0;
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
+	vid = batadv_get_vid(skb, hdr_len);
 	ethhdr = (struct ethhdr *)(skb->data + hdr_len);
 
 	/* check if the destination client was served by this node and it is now
@@ -953,9 +849,9 @@
 	 * message and that it knows the new destination in the mesh to re-route
 	 * the packet to
 	 */
-	if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest)) {
+	if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
 		if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
-						  ethhdr->h_dest))
+						  ethhdr->h_dest, vid))
 			net_ratelimited_function(batadv_dbg, BATADV_DBG_TT,
 						 bat_priv,
 						 "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
@@ -1001,7 +897,7 @@
 	 * target host
 	 */
 	if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
-					  ethhdr->h_dest)) {
+					  ethhdr->h_dest, vid)) {
 		net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, bat_priv,
 					 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
 					 unicast_packet->dest, ethhdr->h_dest,
@@ -1013,7 +909,7 @@
 	 * currently served by this node or there is no destination at all and
 	 * it is possible to drop the packet
 	 */
-	if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
+	if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
 		return 0;
 
 	/* update the header in order to let the packet be delivered to this
@@ -1032,6 +928,34 @@
 	return 1;
 }
 
+/**
+ * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ *	are in the unicast number space but not yet known to the implementation
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+					 struct batadv_hard_iface *recv_if)
+{
+	struct batadv_unicast_packet *unicast_packet;
+	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+	int check, hdr_size = sizeof(*unicast_packet);
+
+	check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
+	if (check < 0)
+		return NET_RX_DROP;
+
+	/* we don't know about this type, drop it. */
+	unicast_packet = (struct batadv_unicast_packet *)skb->data;
+	if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
+		return NET_RX_DROP;
+
+	return batadv_route_unicast_packet(skb, recv_if);
+}
+
 int batadv_recv_unicast_packet(struct sk_buff *skb,
 			       struct batadv_hard_iface *recv_if)
 {
@@ -1094,51 +1018,112 @@
 	return batadv_route_unicast_packet(skb, recv_if);
 }
 
-int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
-				  struct batadv_hard_iface *recv_if)
+/**
+ * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ * @dst_addr: the payload destination
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+			     struct batadv_hard_iface *recv_if)
 {
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct batadv_unicast_frag_packet *unicast_packet;
-	int hdr_size = sizeof(*unicast_packet);
-	struct sk_buff *new_skb = NULL;
-	int ret;
+	struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+	unsigned char *tvlv_buff;
+	uint16_t tvlv_buff_len;
+	int hdr_size = sizeof(*unicast_tvlv_packet);
+	int ret = NET_RX_DROP;
 
 	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
 		return NET_RX_DROP;
 
-	if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
+	/* the header is likely to be modified while forwarding */
+	if (skb_cow(skb, hdr_size) < 0)
 		return NET_RX_DROP;
 
-	unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
+	/* packet needs to be linearized to access the tvlv content */
+	if (skb_linearize(skb) < 0)
+		return NET_RX_DROP;
 
-	/* packet for me */
-	if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
-		ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
+	unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
 
-		if (ret == NET_RX_DROP)
-			return NET_RX_DROP;
+	tvlv_buff = (unsigned char *)(skb->data + hdr_size);
+	tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
 
-		/* packet was buffered for late merge */
-		if (!new_skb)
-			return NET_RX_SUCCESS;
+	if (tvlv_buff_len > skb->len - hdr_size)
+		return NET_RX_DROP;
 
-		if (batadv_dat_snoop_incoming_arp_request(bat_priv, new_skb,
-							  hdr_size))
-			goto rx_success;
-		if (batadv_dat_snoop_incoming_arp_reply(bat_priv, new_skb,
-							hdr_size))
-			goto rx_success;
+	ret = batadv_tvlv_containers_process(bat_priv, false, NULL,
+					     unicast_tvlv_packet->src,
+					     unicast_tvlv_packet->dst,
+					     tvlv_buff, tvlv_buff_len);
 
-		batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
-				    sizeof(struct batadv_unicast_packet), NULL);
+	if (ret != NET_RX_SUCCESS)
+		ret = batadv_route_unicast_packet(skb, recv_if);
 
-rx_success:
-		return NET_RX_SUCCESS;
-	}
-
-	return batadv_route_unicast_packet(skb, recv_if);
+	return ret;
 }
 
+/**
+ * batadv_recv_frag_packet - process received fragment
+ * @skb: the received fragment
+ * @recv_if: interface that the skb is received on
+ *
+ * This function does one of the three following things: 1) Forward fragment, if
+ * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till
+ * lack further fragments; 3) Merge fragments, if we have all needed parts.
+ *
+ * Return NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
+ */
+int batadv_recv_frag_packet(struct sk_buff *skb,
+			    struct batadv_hard_iface *recv_if)
+{
+	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+	struct batadv_orig_node *orig_node_src = NULL;
+	struct batadv_frag_packet *frag_packet;
+	int ret = NET_RX_DROP;
+
+	if (batadv_check_unicast_packet(bat_priv, skb,
+					sizeof(*frag_packet)) < 0)
+		goto out;
+
+	frag_packet = (struct batadv_frag_packet *)skb->data;
+	orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
+	if (!orig_node_src)
+		goto out;
+
+	/* Route the fragment if it is not for us and too big to be merged. */
+	if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
+	    batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
+		ret = NET_RX_SUCCESS;
+		goto out;
+	}
+
+	batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
+	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
+
+	/* Add fragment to buffer and merge if possible. */
+	if (!batadv_frag_skb_buffer(&skb, orig_node_src))
+		goto out;
+
+	/* Deliver merged packet to the appropriate handler, if it was
+	 * merged
+	 */
+	if (skb)
+		batadv_batman_skb_recv(skb, recv_if->net_dev,
+				       &recv_if->batman_adv_ptype, NULL);
+
+	ret = NET_RX_SUCCESS;
+
+out:
+	if (orig_node_src)
+		batadv_orig_node_free_ref(orig_node_src);
+
+	return ret;
+}
 
 int batadv_recv_bcast_packet(struct sk_buff *skb,
 			     struct batadv_hard_iface *recv_if)
@@ -1240,53 +1225,3 @@
 		batadv_orig_node_free_ref(orig_node);
 	return ret;
 }
-
-int batadv_recv_vis_packet(struct sk_buff *skb,
-			   struct batadv_hard_iface *recv_if)
-{
-	struct batadv_vis_packet *vis_packet;
-	struct ethhdr *ethhdr;
-	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	int hdr_size = sizeof(*vis_packet);
-
-	/* keep skb linear */
-	if (skb_linearize(skb) < 0)
-		return NET_RX_DROP;
-
-	if (unlikely(!pskb_may_pull(skb, hdr_size)))
-		return NET_RX_DROP;
-
-	vis_packet = (struct batadv_vis_packet *)skb->data;
-	ethhdr = eth_hdr(skb);
-
-	/* not for me */
-	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
-		return NET_RX_DROP;
-
-	/* ignore own packets */
-	if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))
-		return NET_RX_DROP;
-
-	if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))
-		return NET_RX_DROP;
-
-	switch (vis_packet->vis_type) {
-	case BATADV_VIS_TYPE_SERVER_SYNC:
-		batadv_receive_server_sync_packet(bat_priv, vis_packet,
-						  skb_headlen(skb));
-		break;
-
-	case BATADV_VIS_TYPE_CLIENT_UPDATE:
-		batadv_receive_client_update_packet(bat_priv, vis_packet,
-						    skb_headlen(skb));
-		break;
-
-	default:	/* ignore unknown packet */
-		break;
-	}
-
-	/* We take a copy of the data in the packet, so we should
-	 * always free the skbuf.
-	 */
-	return NET_RX_DROP;
-}
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 72a29bd..19544dd 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -30,23 +30,26 @@
 			    struct batadv_hard_iface *recv_if);
 int batadv_recv_unicast_packet(struct sk_buff *skb,
 			       struct batadv_hard_iface *recv_if);
-int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
-				  struct batadv_hard_iface *recv_if);
+int batadv_recv_frag_packet(struct sk_buff *skb,
+			    struct batadv_hard_iface *iface);
 int batadv_recv_bcast_packet(struct sk_buff *skb,
 			     struct batadv_hard_iface *recv_if);
-int batadv_recv_vis_packet(struct sk_buff *skb,
-			   struct batadv_hard_iface *recv_if);
 int batadv_recv_tt_query(struct sk_buff *skb,
 			 struct batadv_hard_iface *recv_if);
 int batadv_recv_roam_adv(struct sk_buff *skb,
 			 struct batadv_hard_iface *recv_if);
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+			     struct batadv_hard_iface *recv_if);
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+					 struct batadv_hard_iface *recv_if);
 struct batadv_neigh_node *
 batadv_find_router(struct batadv_priv *bat_priv,
 		   struct batadv_orig_node *orig_node,
 		   const struct batadv_hard_iface *recv_if);
 void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
 				  struct batadv_neigh_node *neigh_node);
-void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+void batadv_bonding_candidate_add(struct batadv_priv *bat_priv,
+				  struct batadv_orig_node *orig_node,
 				  struct batadv_neigh_node *neigh_node);
 void batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
 				 struct batadv_orig_node *orig_neigh_node,
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 0266edd..c83be5e 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -24,12 +24,11 @@
 #include "translation-table.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
-#include "vis.h"
 #include "gateway_common.h"
+#include "gateway_client.h"
 #include "originator.h"
 #include "network-coding.h"
-
-#include <linux/if_ether.h>
+#include "fragmentation.h"
 
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
@@ -64,10 +63,10 @@
 	ethhdr = eth_hdr(skb);
 	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
 	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
-	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
+	ethhdr->h_proto = htons(ETH_P_BATMAN);
 
 	skb_set_network_header(skb, ETH_HLEN);
-	skb->protocol = __constant_htons(ETH_P_BATMAN);
+	skb->protocol = htons(ETH_P_BATMAN);
 
 	skb->dev = hard_iface->net_dev;
 
@@ -109,7 +108,19 @@
 	/* batadv_find_router() increases neigh_nodes refcount if found. */
 	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
 	if (!neigh_node)
-		return ret;
+		goto out;
+
+	/* Check if the skb is too large to send in one piece and fragment
+	 * it if needed.
+	 */
+	if (atomic_read(&bat_priv->fragmentation) &&
+	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
+		/* Fragment and send packet. */
+		if (batadv_frag_send_packet(skb, orig_node, neigh_node))
+			ret = NET_XMIT_SUCCESS;
+
+		goto out;
+	}
 
 	/* try to network code the packet, if it is received on an interface
 	 * (i.e. being forwarded). If the packet originates from this node or if
@@ -123,11 +134,225 @@
 		ret = NET_XMIT_SUCCESS;
 	}
 
-	batadv_neigh_node_free_ref(neigh_node);
+out:
+	if (neigh_node)
+		batadv_neigh_node_free_ref(neigh_node);
 
 	return ret;
 }
 
+/**
+ * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ *  common fields for unicast packets
+ * @skb: the skb carrying the unicast header to initialize
+ * @hdr_size: amount of bytes to push at the beginning of the skb
+ * @orig_node: the destination node
+ *
+ * Returns false if the buffer extension was not possible or true otherwise.
+ */
+static bool
+batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
+				  struct batadv_orig_node *orig_node)
+{
+	struct batadv_unicast_packet *unicast_packet;
+	uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+
+	if (batadv_skb_head_push(skb, hdr_size) < 0)
+		return false;
+
+	unicast_packet = (struct batadv_unicast_packet *)skb->data;
+	unicast_packet->header.version = BATADV_COMPAT_VERSION;
+	/* batman packet type: unicast */
+	unicast_packet->header.packet_type = BATADV_UNICAST;
+	/* set unicast ttl */
+	unicast_packet->header.ttl = BATADV_TTL;
+	/* copy the destination for faster routing */
+	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+	/* set the destination tt version number */
+	unicast_packet->ttvn = ttvn;
+
+	return true;
+}
+
+/**
+ * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ */
+static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
+					    struct batadv_orig_node *orig_node)
+{
+	size_t uni_size = sizeof(struct batadv_unicast_packet);
+
+	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
+}
+
+/**
+ * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ *  unicast 4addr header
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ */
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+					   struct sk_buff *skb,
+					   struct batadv_orig_node *orig,
+					   int packet_subtype)
+{
+	struct batadv_hard_iface *primary_if;
+	struct batadv_unicast_4addr_packet *uc_4addr_packet;
+	bool ret = false;
+
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	/* Pull the header space and fill the unicast_packet substructure.
+	 * We can do that because the first member of the uc_4addr_packet
+	 * is of type struct unicast_packet
+	 */
+	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
+					       orig))
+		goto out;
+
+	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+	uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+	memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+	uc_4addr_packet->subtype = packet_subtype;
+	uc_4addr_packet->reserved = 0;
+
+	ret = true;
+out:
+	if (primary_if)
+		batadv_hardif_free_ref(primary_if);
+	return ret;
+}
+
+/**
+ * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ *  4addr packets)
+ * @orig_node: the originator to send the packet to
+ * @vid: the vid to be used to search the translation table
+ *
+ * Wrap the given skb into a batman-adv unicast or unicast-4addr header
+ * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
+ * as packet_type. Then send this frame to the given orig_node and release a
+ * reference to this orig_node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
+				   struct sk_buff *skb, int packet_type,
+				   int packet_subtype,
+				   struct batadv_orig_node *orig_node,
+				   unsigned short vid)
+{
+	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+	struct batadv_unicast_packet *unicast_packet;
+	int ret = NET_XMIT_DROP;
+
+	if (!orig_node)
+		goto out;
+
+	switch (packet_type) {
+	case BATADV_UNICAST:
+		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
+			goto out;
+		break;
+	case BATADV_UNICAST_4ADDR:
+		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
+							   orig_node,
+							   packet_subtype))
+			goto out;
+		break;
+	default:
+		/* this function supports UNICAST and UNICAST_4ADDR only. It
+		 * should never be invoked with any other packet type
+		 */
+		goto out;
+	}
+
+	unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
+	/* inform the destination node that we are still missing a correct route
+	 * for this client. The destination will receive this packet and will
+	 * try to reroute it because the ttvn contained in the header is less
+	 * than the current one
+	 */
+	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
+		unicast_packet->ttvn = unicast_packet->ttvn - 1;
+
+	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+		ret = NET_XMIT_SUCCESS;
+
+out:
+	if (orig_node)
+		batadv_orig_node_free_ref(orig_node);
+	if (ret == NET_XMIT_DROP)
+		kfree_skb(skb);
+	return ret;
+}
+
+/**
+ * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ *  4addr packets)
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
+ * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
+ * to the according destination node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+				   struct sk_buff *skb, int packet_type,
+				   int packet_subtype, unsigned short vid)
+{
+	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+	struct batadv_orig_node *orig_node;
+
+	orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+					     ethhdr->h_dest, vid);
+	return batadv_send_skb_unicast(bat_priv, skb, packet_type,
+				       packet_subtype, orig_node, vid);
+}
+
+/**
+ * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the currently selected gateway. Wrap the given skb into a batman-adv
+ * unicast header and send this frame to this gateway node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+			   unsigned short vid)
+{
+	struct batadv_orig_node *orig_node;
+
+	orig_node = batadv_gw_get_selected_orig(bat_priv);
+	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
+				       orig_node, vid);
+}
+
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index e7b1788..aa2e253 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -34,5 +34,58 @@
 void
 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 				 const struct batadv_hard_iface *hard_iface);
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+					   struct sk_buff *skb,
+					   struct batadv_orig_node *orig_node,
+					   int packet_subtype);
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+				   struct sk_buff *skb, int packet_type,
+				   int packet_subtype, unsigned short vid);
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+			   unsigned short vid);
+
+/**
+ * batadv_send_skb_via_tt - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast header. Then send this frame to the according destination node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
+					 struct sk_buff *skb,
+					 unsigned short vid)
+{
+	return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
+					      vid);
+}
+
+/**
+ * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast-4addr header. Then send this frame to the according destination
+ * node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
+					       struct sk_buff *skb,
+					       int packet_subtype,
+					       unsigned short vid)
+{
+	return batadv_send_skb_via_tt_generic(bat_priv, skb,
+					      BATADV_UNICAST_4ADDR,
+					      packet_subtype, vid);
+}
 
 #endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 813db4e..36f0508 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -34,8 +34,6 @@
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
-#include <linux/if_ether.h>
-#include "unicast.h"
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
 
@@ -120,9 +118,10 @@
 
 	/* only modify transtable if it has been initialized before */
 	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
-		batadv_tt_local_remove(bat_priv, old_addr,
+		batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS,
 				       "mac address changed", false);
-		batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
+		batadv_tt_local_add(dev, addr->sa_data, BATADV_NO_FLAGS,
+				    BATADV_NULL_IFINDEX);
 	}
 
 	return 0;
@@ -139,36 +138,48 @@
 	return 0;
 }
 
+/**
+ * batadv_interface_set_rx_mode - set the rx mode of a device
+ * @dev: registered network device to modify
+ *
+ * We do not actually need to set any rx filters for the virtual batman
+ * soft interface. However a dummy handler enables a user to set static
+ * multicast listeners for instance.
+ */
+static void batadv_interface_set_rx_mode(struct net_device *dev)
+{
+}
+
 static int batadv_interface_tx(struct sk_buff *skb,
 			       struct net_device *soft_iface)
 {
-	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+	struct ethhdr *ethhdr;
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_bcast_packet *bcast_packet;
-	struct vlan_ethhdr *vhdr;
-	__be16 ethertype = __constant_htons(ETH_P_BATMAN);
+	__be16 ethertype = htons(ETH_P_BATMAN);
 	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
 						   0x00, 0x00};
 	static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
 						    0x00, 0x00};
+	struct vlan_ethhdr *vhdr;
 	unsigned int header_len = 0;
 	int data_len = skb->len, ret;
-	unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
-	bool do_bcast = false;
-	uint32_t seqno;
 	unsigned long brd_delay = 1;
+	bool do_bcast = false, client_added;
+	unsigned short vid;
+	uint32_t seqno;
 
 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 		goto dropped;
 
 	soft_iface->trans_start = jiffies;
+	vid = batadv_get_vid(skb, 0);
+	ethhdr = (struct ethhdr *)skb->data;
 
 	switch (ntohs(ethhdr->h_proto)) {
 	case ETH_P_8021Q:
 		vhdr = (struct vlan_ethhdr *)skb->data;
-		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-		vid |= BATADV_VLAN_HAS_TAG;
 
 		if (vhdr->h_vlan_encapsulated_proto != ethertype)
 			break;
@@ -185,8 +196,12 @@
 	ethhdr = (struct ethhdr *)skb->data;
 
 	/* Register the client MAC in the transtable */
-	if (!is_multicast_ether_addr(ethhdr->h_source))
-		batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+	if (!is_multicast_ether_addr(ethhdr->h_source)) {
+		client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
+						   vid, skb->skb_iif);
+		if (!client_added)
+			goto dropped;
+	}
 
 	/* don't accept stp packets. STP does not help in meshes.
 	 * better use the bridge loop avoidance ...
@@ -286,8 +301,12 @@
 
 		batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
 
-		ret = batadv_unicast_send_skb(bat_priv, skb);
-		if (ret != 0)
+		if (is_multicast_ether_addr(ethhdr->h_dest))
+			ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
+		else
+			ret = batadv_send_skb_via_tt(bat_priv, skb, vid);
+
+		if (ret == NET_XMIT_DROP)
 			goto dropped_freed;
 	}
 
@@ -309,12 +328,12 @@
 			 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
 			 int hdr_size, struct batadv_orig_node *orig_node)
 {
-	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
-	struct ethhdr *ethhdr;
-	struct vlan_ethhdr *vhdr;
 	struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
-	unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
-	__be16 ethertype = __constant_htons(ETH_P_BATMAN);
+	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+	__be16 ethertype = htons(ETH_P_BATMAN);
+	struct vlan_ethhdr *vhdr;
+	struct ethhdr *ethhdr;
+	unsigned short vid;
 	bool is_bcast;
 
 	is_bcast = (batadv_header->packet_type == BATADV_BCAST);
@@ -326,13 +345,12 @@
 	skb_pull_rcsum(skb, hdr_size);
 	skb_reset_mac_header(skb);
 
+	vid = batadv_get_vid(skb, hdr_size);
 	ethhdr = eth_hdr(skb);
 
 	switch (ntohs(ethhdr->h_proto)) {
 	case ETH_P_8021Q:
 		vhdr = (struct vlan_ethhdr *)skb->data;
-		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-		vid |= BATADV_VLAN_HAS_TAG;
 
 		if (vhdr->h_vlan_encapsulated_proto != ethertype)
 			break;
@@ -368,9 +386,10 @@
 
 	if (orig_node)
 		batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
-						     ethhdr->h_source);
+						     ethhdr->h_source, vid);
 
-	if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
+	if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest,
+				  vid))
 		goto dropped;
 
 	netif_rx(skb);
@@ -382,6 +401,177 @@
 	return;
 }
 
+/**
+ * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and
+ *  possibly free it
+ * @softif_vlan: the vlan object to release
+ */
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+{
+	if (atomic_dec_and_test(&softif_vlan->refcount))
+		kfree_rcu(softif_vlan, rcu);
+}
+
+/**
+ * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the identifier of the vlan object to retrieve
+ *
+ * Returns the private data of the vlan matching the vid passed as argument or
+ * NULL otherwise. The refcounter of the returned object is incremented by 1.
+ */
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+						  unsigned short vid)
+{
+	struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+		if (vlan_tmp->vid != vid)
+			continue;
+
+		if (!atomic_inc_not_zero(&vlan_tmp->refcount))
+			continue;
+
+		vlan = vlan_tmp;
+		break;
+	}
+	rcu_read_unlock();
+
+	return vlan;
+}
+
+/**
+ * batadv_create_vlan - allocate the needed resources for a new vlan
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ *
+ * Returns 0 on success, a negative error otherwise.
+ */
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
+{
+	struct batadv_softif_vlan *vlan;
+	int err;
+
+	vlan = batadv_softif_vlan_get(bat_priv, vid);
+	if (vlan) {
+		batadv_softif_vlan_free_ref(vlan);
+		return -EEXIST;
+	}
+
+	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+	if (!vlan)
+		return -ENOMEM;
+
+	vlan->vid = vid;
+	atomic_set(&vlan->refcount, 1);
+
+	atomic_set(&vlan->ap_isolation, 0);
+
+	err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+	if (err) {
+		kfree(vlan);
+		return err;
+	}
+
+	/* add a new TT local entry. This one will be marked with the NOPURGE
+	 * flag
+	 */
+	batadv_tt_local_add(bat_priv->soft_iface,
+			    bat_priv->soft_iface->dev_addr, vid,
+			    BATADV_NULL_IFINDEX);
+
+	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+	hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+	return 0;
+}
+
+/**
+ * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the object to remove
+ */
+static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
+				       struct batadv_softif_vlan *vlan)
+{
+	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+	hlist_del_rcu(&vlan->list);
+	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+	batadv_sysfs_del_vlan(bat_priv, vlan);
+
+	/* explicitly remove the associated TT local entry because it is marked
+	 * with the NOPURGE flag
+	 */
+	batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
+			       vlan->vid, "vlan interface destroyed", false);
+
+	batadv_softif_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_interface_add_vid - ndo_add_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @vid: identifier of the new vlan
+ *
+ * Set up all the internal structures for handling the new vlan on top of the
+ * mesh interface
+ *
+ * Returns 0 on success or a negative error code in case of failure.
+ */
+static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
+				    unsigned short vid)
+{
+	struct batadv_priv *bat_priv = netdev_priv(dev);
+
+	/* only 802.1Q vlans are supported.
+	 * batman-adv does not know how to handle other types
+	 */
+	if (proto != htons(ETH_P_8021Q))
+		return -EINVAL;
+
+	vid |= BATADV_VLAN_HAS_TAG;
+
+	return batadv_softif_create_vlan(bat_priv, vid);
+}
+
+/**
+ * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @vid: identifier of the deleted vlan
+ *
+ * Destroy all the internal structures used to handle the vlan identified by vid
+ * on top of the mesh interface
+ *
+ * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
+ * or -ENOENT if the specified vlan id wasn't registered.
+ */
+static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
+				     unsigned short vid)
+{
+	struct batadv_priv *bat_priv = netdev_priv(dev);
+	struct batadv_softif_vlan *vlan;
+
+	/* only 802.1Q vlans are supported. batman-adv does not know how to
+	 * handle other types
+	 */
+	if (proto != htons(ETH_P_8021Q))
+		return -EINVAL;
+
+	vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
+	if (!vlan)
+		return -ENOENT;
+
+	batadv_softif_destroy_vlan(bat_priv, vlan);
+
+	/* finally free the vlan object */
+	batadv_softif_vlan_free_ref(vlan);
+
+	return 0;
+}
+
 /* batman-adv network devices have devices nesting below it and are a special
  * "super class" of normal network devices; split their locks off into a
  * separate class since they always nest.
@@ -421,6 +611,7 @@
  */
 static void batadv_softif_destroy_finish(struct work_struct *work)
 {
+	struct batadv_softif_vlan *vlan;
 	struct batadv_priv *bat_priv;
 	struct net_device *soft_iface;
 
@@ -428,6 +619,13 @@
 				cleanup_work);
 	soft_iface = bat_priv->soft_iface;
 
+	/* destroy the "untagged" VLAN */
+	vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+	if (vlan) {
+		batadv_softif_destroy_vlan(bat_priv, vlan);
+		batadv_softif_vlan_free_ref(vlan);
+	}
+
 	batadv_sysfs_del_meshif(soft_iface);
 
 	rtnl_lock();
@@ -444,6 +642,7 @@
 static int batadv_softif_init_late(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv;
+	uint32_t random_seqno;
 	int ret;
 	size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
 
@@ -468,17 +667,17 @@
 #ifdef CONFIG_BATMAN_ADV_DAT
 	atomic_set(&bat_priv->distributed_arp_table, 1);
 #endif
-	atomic_set(&bat_priv->ap_isolation, 0);
-	atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
 	atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
 	atomic_set(&bat_priv->gw_sel_class, 20);
-	atomic_set(&bat_priv->gw_bandwidth, 41);
+	atomic_set(&bat_priv->gw.bandwidth_down, 100);
+	atomic_set(&bat_priv->gw.bandwidth_up, 20);
 	atomic_set(&bat_priv->orig_interval, 1000);
 	atomic_set(&bat_priv->hop_penalty, 30);
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 	atomic_set(&bat_priv->log_level, 0);
 #endif
 	atomic_set(&bat_priv->fragmentation, 1);
+	atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
 	atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
 	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
 
@@ -493,6 +692,10 @@
 	bat_priv->tt.last_changeset = NULL;
 	bat_priv->tt.last_changeset_len = 0;
 
+	/* randomize initial seqno to avoid collision */
+	get_random_bytes(&random_seqno, sizeof(random_seqno));
+	atomic_set(&bat_priv->frag_seqno, random_seqno);
+
 	bat_priv->primary_if = NULL;
 	bat_priv->num_ifaces = 0;
 
@@ -578,8 +781,11 @@
 	.ndo_open = batadv_interface_open,
 	.ndo_stop = batadv_interface_release,
 	.ndo_get_stats = batadv_interface_stats,
+	.ndo_vlan_rx_add_vid = batadv_interface_add_vid,
+	.ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
 	.ndo_set_mac_address = batadv_interface_set_mac_addr,
 	.ndo_change_mtu = batadv_interface_change_mtu,
+	.ndo_set_rx_mode = batadv_interface_set_rx_mode,
 	.ndo_start_xmit = batadv_interface_tx,
 	.ndo_validate_addr = eth_validate_addr,
 	.ndo_add_slave = batadv_softif_slave_add,
@@ -616,6 +822,7 @@
 
 	dev->netdev_ops = &batadv_netdev_ops;
 	dev->destructor = batadv_softif_free;
+	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 	dev->tx_queue_len = 0;
 
 	/* can't call min_mtu, because the needed variables
@@ -623,7 +830,7 @@
 	 */
 	dev->mtu = ETH_DATA_LEN;
 	/* reserve more space in the skbuff for our header */
-	dev->hard_header_len = BATADV_HEADER_LEN;
+	dev->hard_header_len = batadv_max_header_len();
 
 	/* generate random address */
 	eth_hw_addr_random(dev);
@@ -760,6 +967,12 @@
 	{ "mgmt_tx_bytes" },
 	{ "mgmt_rx" },
 	{ "mgmt_rx_bytes" },
+	{ "frag_tx" },
+	{ "frag_tx_bytes" },
+	{ "frag_rx" },
+	{ "frag_rx_bytes" },
+	{ "frag_fwd" },
+	{ "frag_fwd_bytes" },
 	{ "tt_request_tx" },
 	{ "tt_request_rx" },
 	{ "tt_response_tx" },
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 2f2472c..06fc91f 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -28,5 +28,9 @@
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface);
 int batadv_softif_is_valid(const struct net_device *net_dev);
 extern struct rtnl_link_ops batadv_link_ops;
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan);
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+						  unsigned short vid);
 
 #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 4114b96..6335433 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -21,11 +21,12 @@
 #include "sysfs.h"
 #include "translation-table.h"
 #include "distributed-arp-table.h"
+#include "network-coding.h"
 #include "originator.h"
 #include "hard-interface.h"
+#include "soft-interface.h"
 #include "gateway_common.h"
 #include "gateway_client.h"
-#include "vis.h"
 
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
 {
@@ -39,6 +40,53 @@
 	return netdev_priv(net_dev);
 }
 
+/**
+ * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * @obj: kobject to covert
+ *
+ * Returns the associated batadv_priv struct.
+ */
+static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
+{
+	/* VLAN specific attributes are located in the root sysfs folder if they
+	 * refer to the untagged VLAN..
+	 */
+	if (!strcmp(BATADV_SYSFS_IF_MESH_SUBDIR, obj->name))
+		return batadv_kobj_to_batpriv(obj);
+
+	/* ..while the attributes for the tagged vlans are located in
+	 * the in the corresponding "vlan%VID" subfolder
+	 */
+	return batadv_kobj_to_batpriv(obj->parent);
+}
+
+/**
+ * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * @obj: kobject to covert
+ *
+ * Returns the associated softif_vlan struct if found, NULL otherwise.
+ */
+static struct batadv_softif_vlan *
+batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
+{
+	struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+		if (vlan_tmp->kobj != obj)
+			continue;
+
+		if (!atomic_inc_not_zero(&vlan_tmp->refcount))
+			continue;
+
+		vlan = vlan_tmp;
+		break;
+	}
+	rcu_read_unlock();
+
+	return vlan;
+}
+
 #define BATADV_UEV_TYPE_VAR	"BATTYPE="
 #define BATADV_UEV_ACTION_VAR	"BATACTION="
 #define BATADV_UEV_DATA_VAR	"BATDATA="
@@ -53,6 +101,15 @@
 	"gw"
 };
 
+/* Use this, if you have customized show and store functions for vlan attrs */
+#define BATADV_ATTR_VLAN(_name, _mode, _show, _store)	\
+struct batadv_attribute batadv_attr_vlan_##_name = {	\
+	.attr = {.name = __stringify(_name),		\
+		 .mode = _mode },			\
+	.show   = _show,				\
+	.store  = _store,				\
+};
+
 /* Use this, if you have customized show and store functions */
 #define BATADV_ATTR(_name, _mode, _show, _store)	\
 struct batadv_attribute batadv_attr_##_name = {		\
@@ -122,6 +179,41 @@
 	static BATADV_ATTR(_name, _mode, batadv_show_##_name,		\
 			   batadv_store_##_name)
 
+#define BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func)			\
+ssize_t batadv_store_vlan_##_name(struct kobject *kobj,			\
+				  struct attribute *attr, char *buff,	\
+				  size_t count)				\
+{									\
+	struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+	struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv,	\
+							      kobj);	\
+	size_t res = __batadv_store_bool_attr(buff, count, _post_func,	\
+					      attr, &vlan->_name,	\
+					      bat_priv->soft_iface);	\
+	batadv_softif_vlan_free_ref(vlan);				\
+	return res;							\
+}
+
+#define BATADV_ATTR_VLAN_SHOW_BOOL(_name)				\
+ssize_t batadv_show_vlan_##_name(struct kobject *kobj,			\
+				 struct attribute *attr, char *buff)	\
+{									\
+	struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+	struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv,	\
+							      kobj);	\
+	size_t res = sprintf(buff, "%s\n",				\
+			     atomic_read(&vlan->_name) == 0 ?		\
+			     "disabled" : "enabled");			\
+	batadv_softif_vlan_free_ref(vlan);				\
+	return res;							\
+}
+
+/* Use this, if you are going to turn a [name] in the vlan struct on or off */
+#define BATADV_ATTR_VLAN_BOOL(_name, _mode, _post_func)			\
+	static BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func)		\
+	static BATADV_ATTR_VLAN_SHOW_BOOL(_name)			\
+	static BATADV_ATTR_VLAN(_name, _mode, batadv_show_vlan_##_name,	\
+				batadv_store_vlan_##_name)
 
 static int batadv_store_bool_attr(char *buff, size_t count,
 				  struct net_device *net_dev,
@@ -230,74 +322,6 @@
 	return ret;
 }
 
-static ssize_t batadv_show_vis_mode(struct kobject *kobj,
-				    struct attribute *attr, char *buff)
-{
-	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-	int vis_mode = atomic_read(&bat_priv->vis_mode);
-	const char *mode;
-
-	if (vis_mode == BATADV_VIS_TYPE_CLIENT_UPDATE)
-		mode = "client";
-	else
-		mode = "server";
-
-	return sprintf(buff, "%s\n", mode);
-}
-
-static ssize_t batadv_store_vis_mode(struct kobject *kobj,
-				     struct attribute *attr, char *buff,
-				     size_t count)
-{
-	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
-	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	unsigned long val;
-	int ret, vis_mode_tmp = -1;
-	const char *old_mode, *new_mode;
-
-	ret = kstrtoul(buff, 10, &val);
-
-	if (((count == 2) && (!ret) &&
-	     (val == BATADV_VIS_TYPE_CLIENT_UPDATE)) ||
-	    (strncmp(buff, "client", 6) == 0) ||
-	    (strncmp(buff, "off", 3) == 0))
-		vis_mode_tmp = BATADV_VIS_TYPE_CLIENT_UPDATE;
-
-	if (((count == 2) && (!ret) &&
-	     (val == BATADV_VIS_TYPE_SERVER_SYNC)) ||
-	    (strncmp(buff, "server", 6) == 0))
-		vis_mode_tmp = BATADV_VIS_TYPE_SERVER_SYNC;
-
-	if (vis_mode_tmp < 0) {
-		if (buff[count - 1] == '\n')
-			buff[count - 1] = '\0';
-
-		batadv_info(net_dev,
-			    "Invalid parameter for 'vis mode' setting received: %s\n",
-			    buff);
-		return -EINVAL;
-	}
-
-	if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
-		return count;
-
-	if (atomic_read(&bat_priv->vis_mode) == BATADV_VIS_TYPE_CLIENT_UPDATE)
-		old_mode =  "client";
-	else
-		old_mode = "server";
-
-	if (vis_mode_tmp == BATADV_VIS_TYPE_CLIENT_UPDATE)
-		new_mode =  "client";
-	else
-		new_mode = "server";
-
-	batadv_info(net_dev, "Changing vis mode from: %s to: %s\n", old_mode,
-		    new_mode);
-
-	atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
-	return count;
-}
-
 static ssize_t batadv_show_bat_algo(struct kobject *kobj,
 				    struct attribute *attr, char *buff)
 {
@@ -390,6 +414,7 @@
 	 */
 	batadv_gw_check_client_stop(bat_priv);
 	atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
+	batadv_gw_tvlv_container_update(bat_priv);
 	return count;
 }
 
@@ -397,15 +422,13 @@
 				     struct attribute *attr, char *buff)
 {
 	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-	int down, up;
-	int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
+	uint32_t down, up;
 
-	batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
-	return sprintf(buff, "%i%s/%i%s\n",
-		       (down > 2048 ? down / 1024 : down),
-		       (down > 2048 ? "MBit" : "KBit"),
-		       (up > 2048 ? up / 1024 : up),
-		       (up > 2048 ? "MBit" : "KBit"));
+	down = atomic_read(&bat_priv->gw.bandwidth_down);
+	up = atomic_read(&bat_priv->gw.bandwidth_up);
+
+	return sprintf(buff, "%u.%u/%u.%u MBit\n", down / 10,
+		       down % 10, up / 10, up % 10);
 }
 
 static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
@@ -426,12 +449,10 @@
 BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
-BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR,
+		     batadv_dat_status_update);
 #endif
 BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
-BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
-static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
-		   batadv_store_vis_mode);
 static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
 static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
 		   batadv_store_gw_mode);
@@ -447,7 +468,8 @@
 BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
 #endif
 #ifdef CONFIG_BATMAN_ADV_NC
-BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR,
+		     batadv_nc_status_update);
 #endif
 
 static struct batadv_attribute *batadv_mesh_attrs[] = {
@@ -460,8 +482,6 @@
 	&batadv_attr_distributed_arp_table,
 #endif
 	&batadv_attr_fragmentation,
-	&batadv_attr_ap_isolation,
-	&batadv_attr_vis_mode,
 	&batadv_attr_routing_algo,
 	&batadv_attr_gw_mode,
 	&batadv_attr_orig_interval,
@@ -477,6 +497,16 @@
 	NULL,
 };
 
+BATADV_ATTR_VLAN_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+
+/**
+ * batadv_vlan_attrs - array of vlan specific sysfs attributes
+ */
+static struct batadv_attribute *batadv_vlan_attrs[] = {
+	&batadv_attr_vlan_ap_isolation,
+	NULL,
+};
+
 int batadv_sysfs_add_meshif(struct net_device *dev)
 {
 	struct kobject *batif_kobject = &dev->dev.kobj;
@@ -527,6 +557,80 @@
 	bat_priv->mesh_obj = NULL;
 }
 
+/**
+ * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * @dev: netdev of the mesh interface
+ * @vlan: private data of the newly added VLAN interface
+ *
+ * Returns 0 on success and -ENOMEM if any of the structure allocations fails.
+ */
+int batadv_sysfs_add_vlan(struct net_device *dev,
+			  struct batadv_softif_vlan *vlan)
+{
+	char vlan_subdir[sizeof(BATADV_SYSFS_VLAN_SUBDIR_PREFIX) + 5];
+	struct batadv_priv *bat_priv = netdev_priv(dev);
+	struct batadv_attribute **bat_attr;
+	int err;
+
+	if (vlan->vid & BATADV_VLAN_HAS_TAG) {
+		sprintf(vlan_subdir, BATADV_SYSFS_VLAN_SUBDIR_PREFIX "%hu",
+			vlan->vid & VLAN_VID_MASK);
+
+		vlan->kobj = kobject_create_and_add(vlan_subdir,
+						    bat_priv->mesh_obj);
+		if (!vlan->kobj) {
+			batadv_err(dev, "Can't add sysfs directory: %s/%s\n",
+				   dev->name, vlan_subdir);
+			goto out;
+		}
+	} else {
+		/* the untagged LAN uses the root folder to store its "VLAN
+		 * specific attributes"
+		 */
+		vlan->kobj = bat_priv->mesh_obj;
+		kobject_get(bat_priv->mesh_obj);
+	}
+
+	for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) {
+		err = sysfs_create_file(vlan->kobj,
+					&((*bat_attr)->attr));
+		if (err) {
+			batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+				   dev->name, vlan_subdir,
+				   ((*bat_attr)->attr).name);
+			goto rem_attr;
+		}
+	}
+
+	return 0;
+
+rem_attr:
+	for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
+		sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+
+	kobject_put(vlan->kobj);
+	vlan->kobj = NULL;
+out:
+	return -ENOMEM;
+}
+
+/**
+ * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the private data of the VLAN to destroy
+ */
+void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+			   struct batadv_softif_vlan *vlan)
+{
+	struct batadv_attribute **bat_attr;
+
+	for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
+		sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+
+	kobject_put(vlan->kobj);
+	vlan->kobj = NULL;
+}
+
 static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
 				      struct attribute *attr, char *buff)
 {
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index 479acf4..c7d725d 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -22,6 +22,12 @@
 
 #define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
 #define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
+/**
+ * BATADV_SYSFS_VLAN_SUBDIR_PREFIX - prefix of the subfolder that will be
+ *  created in the sysfs hierarchy for each VLAN interface. The subfolder will
+ *  be named "BATADV_SYSFS_VLAN_SUBDIR_PREFIX%vid".
+ */
+#define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
 
 struct batadv_attribute {
 	struct attribute attr;
@@ -36,6 +42,10 @@
 int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
 			    struct net_device *dev);
 void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
+int batadv_sysfs_add_vlan(struct net_device *dev,
+			  struct batadv_softif_vlan *vlan);
+void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+			   struct batadv_softif_vlan *vlan);
 int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
 			enum batadv_uev_action action, const char *data);
 
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 34510f3..4add57d 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -27,13 +27,14 @@
 #include "routing.h"
 #include "bridge_loop_avoidance.h"
 
-#include <linux/crc16.h>
+#include <linux/crc32c.h>
 
 /* hash class keys */
 static struct lock_class_key batadv_tt_local_hash_lock_class_key;
 static struct lock_class_key batadv_tt_global_hash_lock_class_key;
 
 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+				 unsigned short vid,
 				 struct batadv_orig_node *orig_node);
 static void batadv_tt_purge(struct work_struct *work);
 static void
@@ -41,7 +42,8 @@
 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
 				 struct batadv_orig_node *orig_node,
 				 const unsigned char *addr,
-				 const char *message, bool roaming);
+				 unsigned short vid, const char *message,
+				 bool roaming);
 
 /* returns 1 if they are the same mac addr */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -52,43 +54,93 @@
 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
+/**
+ * batadv_choose_tt - return the index of the tt entry in the hash table
+ * @data: pointer to the tt_common_entry object to map
+ * @size: the size of the hash table
+ *
+ * Returns the hash index where the object represented by 'data' should be
+ * stored at.
+ */
+static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
+{
+	struct batadv_tt_common_entry *tt;
+	uint32_t hash = 0;
+
+	tt = (struct batadv_tt_common_entry *)data;
+	hash = batadv_hash_bytes(hash, &tt->addr, ETH_ALEN);
+	hash = batadv_hash_bytes(hash, &tt->vid, sizeof(tt->vid));
+
+	hash += (hash << 3);
+	hash ^= (hash >> 11);
+	hash += (hash << 15);
+
+	return hash % size;
+}
+
+/**
+ * batadv_tt_hash_find - look for a client in the given hash table
+ * @hash: the hash table to search
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the tt_common struct belonging to the searched client if
+ * found, NULL otherwise.
+ */
 static struct batadv_tt_common_entry *
-batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
+batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
+		    unsigned short vid)
 {
 	struct hlist_head *head;
-	struct batadv_tt_common_entry *tt_common_entry;
-	struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
+	struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
 	uint32_t index;
 
 	if (!hash)
 		return NULL;
 
-	index = batadv_choose_orig(data, hash->size);
+	memcpy(to_search.addr, addr, ETH_ALEN);
+	to_search.vid = vid;
+
+	index = batadv_choose_tt(&to_search, hash->size);
 	head = &hash->table[index];
 
 	rcu_read_lock();
-	hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) {
-		if (!batadv_compare_eth(tt_common_entry, data))
+	hlist_for_each_entry_rcu(tt, head, hash_entry) {
+		if (!batadv_compare_eth(tt, addr))
 			continue;
 
-		if (!atomic_inc_not_zero(&tt_common_entry->refcount))
+		if (tt->vid != vid)
 			continue;
 
-		tt_common_entry_tmp = tt_common_entry;
+		if (!atomic_inc_not_zero(&tt->refcount))
+			continue;
+
+		tt_tmp = tt;
 		break;
 	}
 	rcu_read_unlock();
 
-	return tt_common_entry_tmp;
+	return tt_tmp;
 }
 
+/**
+ * batadv_tt_local_hash_find - search the local table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the corresponding tt_local_entry struct if the client is
+ * found, NULL otherwise.
+ */
 static struct batadv_tt_local_entry *
-batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+			  unsigned short vid)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
 
-	tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
+	tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, addr,
+					      vid);
 	if (tt_common_entry)
 		tt_local_entry = container_of(tt_common_entry,
 					      struct batadv_tt_local_entry,
@@ -96,13 +148,24 @@
 	return tt_local_entry;
 }
 
+/**
+ * batadv_tt_global_hash_find - search the global table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the corresponding tt_global_entry struct if the client
+ * is found, NULL otherwise.
+ */
 static struct batadv_tt_global_entry *
-batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+			   unsigned short vid)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tt_global_entry *tt_global_entry = NULL;
 
-	tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
+	tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, addr,
+					      vid);
 	if (tt_common_entry)
 		tt_global_entry = container_of(tt_common_entry,
 					       struct batadv_tt_global_entry,
@@ -117,25 +180,17 @@
 		kfree_rcu(tt_local_entry, common.rcu);
 }
 
-static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
-{
-	struct batadv_tt_common_entry *tt_common_entry;
-	struct batadv_tt_global_entry *tt_global_entry;
-
-	tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
-	tt_global_entry = container_of(tt_common_entry,
-				       struct batadv_tt_global_entry, common);
-
-	kfree(tt_global_entry);
-}
-
+/**
+ * batadv_tt_global_entry_free_ref - decrement the refcounter for a
+ *  tt_global_entry and possibly free it
+ * @tt_global_entry: the object to free
+ */
 static void
 batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
 {
 	if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
 		batadv_tt_global_del_orig_list(tt_global_entry);
-		call_rcu(&tt_global_entry->common.rcu,
-			 batadv_tt_global_entry_free_rcu);
+		kfree_rcu(tt_global_entry, common.rcu);
 	}
 }
 
@@ -153,13 +208,107 @@
 	kfree(orig_entry);
 }
 
+/**
+ * batadv_tt_local_size_mod - change the size by v of the local table identified
+ *  by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier of the sub-table to change
+ * @v: the amount to sum to the local table size
+ */
+static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
+				     unsigned short vid, int v)
+{
+	struct batadv_softif_vlan *vlan;
+
+	vlan = batadv_softif_vlan_get(bat_priv, vid);
+	if (!vlan)
+		return;
+
+	atomic_add(v, &vlan->tt.num_entries);
+
+	batadv_softif_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_tt_local_size_inc - increase by one the local table size for the given
+ *  vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
+				     unsigned short vid)
+{
+	batadv_tt_local_size_mod(bat_priv, vid, 1);
+}
+
+/**
+ * batadv_tt_local_size_dec - decrease by one the local table size for the given
+ *  vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
+				     unsigned short vid)
+{
+	batadv_tt_local_size_mod(bat_priv, vid, -1);
+}
+
+/**
+ * batadv_tt_global_size_mod - change the size by v of the local table
+ *  identified by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ * @v: the amount to sum to the global table size
+ */
+static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
+				      unsigned short vid, int v)
+{
+	struct batadv_orig_node_vlan *vlan;
+
+	vlan = batadv_orig_node_vlan_new(orig_node, vid);
+	if (!vlan)
+		return;
+
+	if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
+		spin_lock_bh(&orig_node->vlan_list_lock);
+		list_del_rcu(&vlan->list);
+		spin_unlock_bh(&orig_node->vlan_list_lock);
+		batadv_orig_node_vlan_free_ref(vlan);
+	}
+
+	batadv_orig_node_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_tt_global_size_inc - increase by one the global table size for the
+ *  given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
+				      unsigned short vid)
+{
+	batadv_tt_global_size_mod(orig_node, vid, 1);
+}
+
+/**
+ * batadv_tt_global_size_dec - decrease by one the global table size for the
+ *  given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
+				      unsigned short vid)
+{
+	batadv_tt_global_size_mod(orig_node, vid, -1);
+}
+
 static void
 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
 {
 	if (!atomic_dec_and_test(&orig_entry->refcount))
 		return;
-	/* to avoid race conditions, immediately decrease the tt counter */
-	atomic_dec(&orig_entry->orig_node->tt_size);
+
 	call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
 }
 
@@ -180,12 +329,13 @@
 	bool del_op_requested, del_op_entry;
 
 	tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
-
 	if (!tt_change_node)
 		return;
 
 	tt_change_node->change.flags = flags;
+	tt_change_node->change.reserved = 0;
 	memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
+	tt_change_node->change.vid = htons(common->vid);
 
 	del_op_requested = flags & BATADV_TT_CLIENT_DEL;
 
@@ -208,6 +358,13 @@
 			goto del;
 		if (del_op_requested && !del_op_entry)
 			goto del;
+
+		/* this is a second add in the same originator interval. It
+		 * means that flags have been changed: update them!
+		 */
+		if (!del_op_requested && !del_op_entry)
+			entry->change.flags = flags;
+
 		continue;
 del:
 		list_del(&entry->list);
@@ -229,9 +386,55 @@
 		atomic_inc(&bat_priv->tt.local_changes);
 }
 
-int batadv_tt_len(int changes_num)
+/**
+ * batadv_tt_len - compute length in bytes of given number of tt changes
+ * @changes_num: number of tt changes
+ *
+ * Returns computed length in bytes.
+ */
+static int batadv_tt_len(int changes_num)
 {
-	return changes_num * sizeof(struct batadv_tt_change);
+	return changes_num * sizeof(struct batadv_tvlv_tt_change);
+}
+
+/**
+ * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * @tt_len: available space
+ *
+ * Returns the number of entries.
+ */
+static uint16_t batadv_tt_entries(uint16_t tt_len)
+{
+	return tt_len / batadv_tt_len(1);
+}
+
+/**
+ * batadv_tt_local_table_transmit_size - calculates the local translation table
+ *  size when transmitted over the air
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Returns local translation table size in bytes.
+ */
+static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
+{
+	uint16_t num_vlan = 0, tt_local_entries = 0;
+	struct batadv_softif_vlan *vlan;
+	int hdr_size;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+		num_vlan++;
+		tt_local_entries += atomic_read(&vlan->tt.num_entries);
+	}
+	rcu_read_unlock();
+
+	/* header size of tvlv encapsulated tt response payload */
+	hdr_size = sizeof(struct batadv_unicast_tvlv_packet);
+	hdr_size += sizeof(struct batadv_tvlv_hdr);
+	hdr_size += sizeof(struct batadv_tvlv_tt_data);
+	hdr_size += num_vlan * sizeof(struct batadv_tvlv_tt_vlan_data);
+
+	return hdr_size + batadv_tt_len(tt_local_entries);
 }
 
 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
@@ -255,33 +458,51 @@
 				  const char *message)
 {
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Deleting global tt entry %pM: %s\n",
-		   tt_global->common.addr, message);
+		   "Deleting global tt entry %pM (vid: %d): %s\n",
+		   tt_global->common.addr,
+		   BATADV_PRINT_VID(tt_global->common.vid), message);
 
 	batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-			   batadv_choose_orig, tt_global->common.addr);
+			   batadv_choose_tt, &tt_global->common);
 	batadv_tt_global_entry_free_ref(tt_global);
 }
 
-void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-			 int ifindex)
+/**
+ * batadv_tt_local_add - add a new client to the local table or update an
+ *  existing client
+ * @soft_iface: netdev struct of the mesh interface
+ * @addr: the mac address of the client to add
+ * @vid: VLAN identifier
+ * @ifindex: index of the interface where the client is connected to (useful to
+ *  identify wireless clients)
+ *
+ * Returns true if the client was successfully added, false otherwise.
+ */
+bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+			 unsigned short vid, int ifindex)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	struct batadv_tt_local_entry *tt_local;
 	struct batadv_tt_global_entry *tt_global;
+	struct net_device *in_dev = NULL;
 	struct hlist_head *head;
 	struct batadv_tt_orig_list_entry *orig_entry;
-	int hash_added;
-	bool roamed_back = false;
+	int hash_added, table_size, packet_size_max;
+	bool ret = false, roamed_back = false;
+	uint8_t remote_flags;
 
-	tt_local = batadv_tt_local_hash_find(bat_priv, addr);
-	tt_global = batadv_tt_global_hash_find(bat_priv, addr);
+	if (ifindex != BATADV_NULL_IFINDEX)
+		in_dev = dev_get_by_index(&init_net, ifindex);
+
+	tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
+	tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
 
 	if (tt_local) {
 		tt_local->last_seen = jiffies;
 		if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
 			batadv_dbg(BATADV_DBG_TT, bat_priv,
-				   "Re-adding pending client %pM\n", addr);
+				   "Re-adding pending client %pM (vid: %d)\n",
+				   addr, BATADV_PRINT_VID(vid));
 			/* whatever the reason why the PENDING flag was set,
 			 * this is a client which was enqueued to be removed in
 			 * this orig_interval. Since it popped up again, the
@@ -293,8 +514,8 @@
 
 		if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
 			batadv_dbg(BATADV_DBG_TT, bat_priv,
-				   "Roaming client %pM came back to its original location\n",
-				   addr);
+				   "Roaming client %pM (vid: %d) came back to its original location\n",
+				   addr, BATADV_PRINT_VID(vid));
 			/* the ROAM flag is set because this client roamed away
 			 * and the node got a roaming_advertisement message. Now
 			 * that the client popped up again at its original
@@ -306,12 +527,24 @@
 		goto check_roaming;
 	}
 
+	/* Ignore the client if we cannot send it in a full table response. */
+	table_size = batadv_tt_local_table_transmit_size(bat_priv);
+	table_size += batadv_tt_len(1);
+	packet_size_max = atomic_read(&bat_priv->packet_size_max);
+	if (table_size > packet_size_max) {
+		net_ratelimited_function(batadv_info, soft_iface,
+					 "Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n",
+					 table_size, packet_size_max, addr);
+		goto out;
+	}
+
 	tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC);
 	if (!tt_local)
 		goto out;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
+		   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+		   addr, BATADV_PRINT_VID(vid),
 		   (uint8_t)atomic_read(&bat_priv->tt.vn));
 
 	memcpy(tt_local->common.addr, addr, ETH_ALEN);
@@ -320,7 +553,8 @@
 	 * (consistency check)
 	 */
 	tt_local->common.flags = BATADV_TT_CLIENT_NEW;
-	if (batadv_is_wifi_iface(ifindex))
+	tt_local->common.vid = vid;
+	if (batadv_is_wifi_netdev(in_dev))
 		tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
 	atomic_set(&tt_local->common.refcount, 2);
 	tt_local->last_seen = jiffies;
@@ -331,7 +565,7 @@
 		tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
 
 	hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
-				     batadv_choose_orig, &tt_local->common,
+				     batadv_choose_tt, &tt_local->common,
 				     &tt_local->common.hash_entry);
 
 	if (unlikely(hash_added != 0)) {
@@ -353,6 +587,7 @@
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(orig_entry, head, list) {
 			batadv_send_roam_adv(bat_priv, tt_global->common.addr,
+					     tt_global->common.vid,
 					     orig_entry->orig_node);
 		}
 		rcu_read_unlock();
@@ -369,78 +604,219 @@
 		}
 	}
 
+	/* store the current remote flags before altering them. This helps
+	 * understanding is flags are changing or not
+	 */
+	remote_flags = tt_local->common.flags & BATADV_TT_REMOTE_MASK;
+
+	if (batadv_is_wifi_netdev(in_dev))
+		tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
+	else
+		tt_local->common.flags &= ~BATADV_TT_CLIENT_WIFI;
+
+	/* if any "dynamic" flag has been modified, resend an ADD event for this
+	 * entry so that all the nodes can get the new flags
+	 */
+	if (remote_flags ^ (tt_local->common.flags & BATADV_TT_REMOTE_MASK))
+		batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
+
+	ret = true;
 out:
+	if (in_dev)
+		dev_put(in_dev);
 	if (tt_local)
 		batadv_tt_local_entry_free_ref(tt_local);
 	if (tt_global)
 		batadv_tt_global_entry_free_ref(tt_global);
+	return ret;
 }
 
-static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
-					  int *packet_buff_len,
-					  int min_packet_len,
-					  int new_packet_len)
+/**
+ * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ *  within a TT Response directed to another node
+ * @orig_node: originator for which the TT data has to be prepared
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ *  changed can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ *  function reserves the amount of space needed to send the entire global TT
+ *  table. In case of success the value is updated with the real amount of
+ *  reserved bytes
+
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN served by the originator node.
+ *
+ * Return the size of the allocated buffer or 0 in case of failure.
+ */
+static uint16_t
+batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
+				   struct batadv_tvlv_tt_data **tt_data,
+				   struct batadv_tvlv_tt_change **tt_change,
+				   int32_t *tt_len)
 {
-	unsigned char *new_buff;
+	uint16_t num_vlan = 0, num_entries = 0, change_offset, tvlv_len;
+	struct batadv_tvlv_tt_vlan_data *tt_vlan;
+	struct batadv_orig_node_vlan *vlan;
+	uint8_t *tt_change_ptr;
 
-	new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
-
-	/* keep old buffer if kmalloc should fail */
-	if (new_buff) {
-		memcpy(new_buff, *packet_buff, min_packet_len);
-		kfree(*packet_buff);
-		*packet_buff = new_buff;
-		*packet_buff_len = new_packet_len;
+	rcu_read_lock();
+	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+		num_vlan++;
+		num_entries += atomic_read(&vlan->tt.num_entries);
 	}
+
+	change_offset = sizeof(**tt_data);
+	change_offset += num_vlan * sizeof(*tt_vlan);
+
+	/* if tt_len is negative, allocate the space needed by the full table */
+	if (*tt_len < 0)
+		*tt_len = batadv_tt_len(num_entries);
+
+	tvlv_len = *tt_len;
+	tvlv_len += change_offset;
+
+	*tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+	if (!*tt_data) {
+		*tt_len = 0;
+		goto out;
+	}
+
+	(*tt_data)->flags = BATADV_NO_FLAGS;
+	(*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn);
+	(*tt_data)->num_vlan = htons(num_vlan);
+
+	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+		tt_vlan->vid = htons(vlan->vid);
+		tt_vlan->crc = htonl(vlan->tt.crc);
+
+		tt_vlan++;
+	}
+
+	tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+	*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+	rcu_read_unlock();
+	return tvlv_len;
 }
 
-static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
-					  unsigned char **packet_buff,
-					  int *packet_buff_len,
-					  int min_packet_len)
+/**
+ * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
+ *  node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ *  changes can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ *  function reserves the amount of space needed to send the entire local TT
+ *  table. In case of success the value is updated with the real amount of
+ *  reserved bytes
+ *
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN.
+ *
+ * Return the size of the allocated buffer or 0 in case of failure.
+ */
+static uint16_t
+batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+				  struct batadv_tvlv_tt_data **tt_data,
+				  struct batadv_tvlv_tt_change **tt_change,
+				  int32_t *tt_len)
 {
-	int req_len;
+	struct batadv_tvlv_tt_vlan_data *tt_vlan;
+	struct batadv_softif_vlan *vlan;
+	uint16_t num_vlan = 0, num_entries = 0, tvlv_len;
+	uint8_t *tt_change_ptr;
+	int change_offset;
 
-	req_len = min_packet_len;
-	req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+		num_vlan++;
+		num_entries += atomic_read(&vlan->tt.num_entries);
+	}
+
+	change_offset = sizeof(**tt_data);
+	change_offset += num_vlan * sizeof(*tt_vlan);
+
+	/* if tt_len is negative, allocate the space needed by the full table */
+	if (*tt_len < 0)
+		*tt_len = batadv_tt_len(num_entries);
+
+	tvlv_len = *tt_len;
+	tvlv_len += change_offset;
+
+	*tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+	if (!*tt_data) {
+		tvlv_len = 0;
+		goto out;
+	}
+
+	(*tt_data)->flags = BATADV_NO_FLAGS;
+	(*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
+	(*tt_data)->num_vlan = htons(num_vlan);
+
+	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+		tt_vlan->vid = htons(vlan->vid);
+		tt_vlan->crc = htonl(vlan->tt.crc);
+
+		tt_vlan++;
+	}
+
+	tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+	*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+	rcu_read_unlock();
+	return tvlv_len;
+}
+
+/**
+ * batadv_tt_tvlv_container_update - update the translation table tvlv container
+ *  after local tt changes have been committed
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+	struct batadv_tt_change_node *entry, *safe;
+	struct batadv_tvlv_tt_data *tt_data;
+	struct batadv_tvlv_tt_change *tt_change;
+	int tt_diff_len, tt_change_len = 0;
+	int tt_diff_entries_num = 0, tt_diff_entries_count = 0;
+	uint16_t tvlv_len;
+
+	tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+	tt_diff_len = batadv_tt_len(tt_diff_entries_num);
 
 	/* if we have too many changes for one packet don't send any
 	 * and wait for the tt table request which will be fragmented
 	 */
-	if (req_len > bat_priv->soft_iface->mtu)
-		req_len = min_packet_len;
+	if (tt_diff_len > bat_priv->soft_iface->mtu)
+		tt_diff_len = 0;
 
-	batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
-				      min_packet_len, req_len);
-}
+	tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
+						     &tt_change, &tt_diff_len);
+	if (!tvlv_len)
+		return;
 
-static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
-				       unsigned char **packet_buff,
-				       int *packet_buff_len,
-				       int min_packet_len)
-{
-	struct batadv_tt_change_node *entry, *safe;
-	int count = 0, tot_changes = 0, new_len;
-	unsigned char *tt_buff;
+	tt_data->flags = BATADV_TT_OGM_DIFF;
 
-	batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
-				      packet_buff_len, min_packet_len);
-
-	new_len = *packet_buff_len - min_packet_len;
-	tt_buff = *packet_buff + min_packet_len;
-
-	if (new_len > 0)
-		tot_changes = new_len / batadv_tt_len(1);
+	if (tt_diff_len == 0)
+		goto container_register;
 
 	spin_lock_bh(&bat_priv->tt.changes_list_lock);
 	atomic_set(&bat_priv->tt.local_changes, 0);
 
 	list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
 				 list) {
-		if (count < tot_changes) {
-			memcpy(tt_buff + batadv_tt_len(count),
-			       &entry->change, sizeof(struct batadv_tt_change));
-			count++;
+		if (tt_diff_entries_count < tt_diff_entries_num) {
+			memcpy(tt_change + tt_diff_entries_count,
+			       &entry->change,
+			       sizeof(struct batadv_tvlv_tt_change));
+			tt_diff_entries_count++;
 		}
 		list_del(&entry->list);
 		kfree(entry);
@@ -452,20 +828,25 @@
 	kfree(bat_priv->tt.last_changeset);
 	bat_priv->tt.last_changeset_len = 0;
 	bat_priv->tt.last_changeset = NULL;
+	tt_change_len = batadv_tt_len(tt_diff_entries_count);
 	/* check whether this new OGM has no changes due to size problems */
-	if (new_len > 0) {
+	if (tt_diff_entries_count > 0) {
 		/* if kmalloc() fails we will reply with the full table
 		 * instead of providing the diff
 		 */
-		bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+		bat_priv->tt.last_changeset = kzalloc(tt_diff_len, GFP_ATOMIC);
 		if (bat_priv->tt.last_changeset) {
-			memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
-			bat_priv->tt.last_changeset_len = new_len;
+			memcpy(bat_priv->tt.last_changeset,
+			       tt_change, tt_change_len);
+			bat_priv->tt.last_changeset_len = tt_diff_len;
 		}
 	}
 	spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 
-	return count;
+container_register:
+	batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
+				       tvlv_len);
+	kfree(tt_data);
 }
 
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -476,7 +857,9 @@
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tt_local_entry *tt_local;
 	struct batadv_hard_iface *primary_if;
+	struct batadv_softif_vlan *vlan;
 	struct hlist_head *head;
+	unsigned short vid;
 	uint32_t i;
 	int last_seen_secs;
 	int last_seen_msecs;
@@ -489,11 +872,10 @@
 		goto out;
 
 	seq_printf(seq,
-		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u CRC: %#.4x):\n",
-		   net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn),
-		   bat_priv->tt.local_crc);
-	seq_printf(seq, "       %-13s %-7s %-10s\n", "Client", "Flags",
-		   "Last seen");
+		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
+		   net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
+	seq_printf(seq, "       %-13s  %s %-7s %-9s (%-10s)\n", "Client", "VID",
+		   "Flags", "Last seen", "CRC");
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -504,6 +886,7 @@
 			tt_local = container_of(tt_common_entry,
 						struct batadv_tt_local_entry,
 						common);
+			vid = tt_common_entry->vid;
 			last_seen_jiffies = jiffies - tt_local->last_seen;
 			last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
 			last_seen_secs = last_seen_msecs / 1000;
@@ -511,8 +894,17 @@
 
 			no_purge = tt_common_entry->flags & np_flag;
 
-			seq_printf(seq, " * %pM [%c%c%c%c%c] %3u.%03u\n",
+			vlan = batadv_softif_vlan_get(bat_priv, vid);
+			if (!vlan) {
+				seq_printf(seq, "Cannot retrieve VLAN %d\n",
+					   BATADV_PRINT_VID(vid));
+				continue;
+			}
+
+			seq_printf(seq,
+				   " * %pM %4i [%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
 				   tt_common_entry->addr,
+				   BATADV_PRINT_VID(tt_common_entry->vid),
 				   (tt_common_entry->flags &
 				    BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
 				   no_purge ? 'P' : '.',
@@ -523,7 +915,10 @@
 				   (tt_common_entry->flags &
 				    BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
 				   no_purge ? 0 : last_seen_secs,
-				   no_purge ? 0 : last_seen_msecs);
+				   no_purge ? 0 : last_seen_msecs,
+				   vlan->tt.crc);
+
+			batadv_softif_vlan_free_ref(vlan);
 		}
 		rcu_read_unlock();
 	}
@@ -547,27 +942,29 @@
 	tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Local tt entry (%pM) pending to be removed: %s\n",
-		   tt_local_entry->common.addr, message);
+		   "Local tt entry (%pM, vid: %d) pending to be removed: %s\n",
+		   tt_local_entry->common.addr,
+		   BATADV_PRINT_VID(tt_local_entry->common.vid), message);
 }
 
 /**
  * batadv_tt_local_remove - logically remove an entry from the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the MAC address of the client to remove
+ * @vid: VLAN identifier
  * @message: message to append to the log on deletion
  * @roaming: true if the deletion is due to a roaming event
  *
  * Returns the flags assigned to the local entry before being deleted
  */
 uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-				const uint8_t *addr, const char *message,
-				bool roaming)
+				const uint8_t *addr, unsigned short vid,
+				const char *message, bool roaming)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
 	uint16_t flags, curr_flags = BATADV_NO_FLAGS;
 
-	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
 	if (!tt_local_entry)
 		goto out;
 
@@ -603,8 +1000,16 @@
 	return curr_flags;
 }
 
+/**
+ * batadv_tt_local_purge_list - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @head: pointer to the list containing the local tt entries
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ *  inactive or not
+ */
 static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
-				       struct hlist_head *head)
+				       struct hlist_head *head,
+				       int timeout)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
 	struct batadv_tt_common_entry *tt_common_entry;
@@ -622,8 +1027,7 @@
 		if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
 			continue;
 
-		if (!batadv_has_timed_out(tt_local_entry->last_seen,
-					  BATADV_TT_LOCAL_TIMEOUT))
+		if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout))
 			continue;
 
 		batadv_tt_local_set_pending(bat_priv, tt_local_entry,
@@ -631,7 +1035,14 @@
 	}
 }
 
-static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
+/**
+ * batadv_tt_local_purge - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ *  inactive or not
+ */
+static void batadv_tt_local_purge(struct batadv_priv *bat_priv,
+				  int timeout)
 {
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct hlist_head *head;
@@ -643,7 +1054,7 @@
 		list_lock = &hash->list_locks[i];
 
 		spin_lock_bh(list_lock);
-		batadv_tt_local_purge_list(bat_priv, head);
+		batadv_tt_local_purge_list(bat_priv, head, timeout);
 		spin_unlock_bh(list_lock);
 	}
 }
@@ -784,7 +1195,7 @@
 
 	INIT_HLIST_NODE(&orig_entry->list);
 	atomic_inc(&orig_node->refcount);
-	atomic_inc(&orig_node->tt_size);
+	batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
 	orig_entry->orig_node = orig_node;
 	orig_entry->ttvn = ttvn;
 	atomic_set(&orig_entry->refcount, 2);
@@ -803,6 +1214,7 @@
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator announcing the client
  * @tt_addr: the mac address of the non-mesh client
+ * @vid: VLAN identifier
  * @flags: TT flags that have to be set for this non-mesh client
  * @ttvn: the tt version number ever announcing this non-mesh client
  *
@@ -813,21 +1225,28 @@
  * If a TT local entry exists for this non-mesh client remove it.
  *
  * The caller must hold orig_node refcount.
+ *
+ * Return true if the new entry has been added, false otherwise
  */
-int batadv_tt_global_add(struct batadv_priv *bat_priv,
-			 struct batadv_orig_node *orig_node,
-			 const unsigned char *tt_addr, uint16_t flags,
-			 uint8_t ttvn)
+static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+				 struct batadv_orig_node *orig_node,
+				 const unsigned char *tt_addr,
+				 unsigned short vid, uint16_t flags,
+				 uint8_t ttvn)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	struct batadv_tt_local_entry *tt_local_entry;
-	int ret = 0;
+	bool ret = false;
 	int hash_added;
 	struct batadv_tt_common_entry *common;
 	uint16_t local_flags;
 
-	tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
-	tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr);
+	/* ignore global entries from backbone nodes */
+	if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
+		return true;
+
+	tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr, vid);
+	tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr, vid);
 
 	/* if the node already has a local client for this entry, it has to wait
 	 * for a roaming advertisement instead of manually messing up the global
@@ -844,6 +1263,7 @@
 
 		common = &tt_global_entry->common;
 		memcpy(common->addr, tt_addr, ETH_ALEN);
+		common->vid = vid;
 
 		common->flags = flags;
 		tt_global_entry->roam_at = 0;
@@ -861,7 +1281,7 @@
 
 		hash_added = batadv_hash_add(bat_priv->tt.global_hash,
 					     batadv_compare_tt,
-					     batadv_choose_orig, common,
+					     batadv_choose_tt, common,
 					     &common->hash_entry);
 
 		if (unlikely(hash_added != 0)) {
@@ -920,14 +1340,15 @@
 	batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Creating new global tt entry: %pM (via %pM)\n",
-		   common->addr, orig_node->orig);
-	ret = 1;
+		   "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
+		   common->addr, BATADV_PRINT_VID(common->vid),
+		   orig_node->orig);
+	ret = true;
 
 out_remove:
 
 	/* remove address from local hash if present */
-	local_flags = batadv_tt_local_remove(bat_priv, tt_addr,
+	local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid,
 					     "global tt received",
 					     flags & BATADV_TT_CLIENT_ROAM);
 	tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
@@ -947,18 +1368,20 @@
 }
 
 /* batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be analyzed
  *
  * This functon assumes the caller holds rcu_read_lock().
  * Returns best originator list entry or NULL on errors.
  */
 static struct batadv_tt_orig_list_entry *
-batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
+batadv_transtable_best_orig(struct batadv_priv *bat_priv,
+			    struct batadv_tt_global_entry *tt_global_entry)
 {
-	struct batadv_neigh_node *router = NULL;
+	struct batadv_neigh_node *router, *best_router = NULL;
+	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
 	struct hlist_head *head;
 	struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
-	int best_tq = 0;
 
 	head = &tt_global_entry->orig_list;
 	hlist_for_each_entry_rcu(orig_entry, head, list) {
@@ -966,64 +1389,104 @@
 		if (!router)
 			continue;
 
-		if (router->tq_avg > best_tq) {
-			best_entry = orig_entry;
-			best_tq = router->tq_avg;
+		if (best_router &&
+		    bao->bat_neigh_cmp(router, best_router) <= 0) {
+			batadv_neigh_node_free_ref(router);
+			continue;
 		}
 
-		batadv_neigh_node_free_ref(router);
+		/* release the refcount for the "old" best */
+		if (best_router)
+			batadv_neigh_node_free_ref(best_router);
+
+		best_entry = orig_entry;
+		best_router = router;
 	}
 
+	if (best_router)
+		batadv_neigh_node_free_ref(best_router);
+
 	return best_entry;
 }
 
 /* batadv_tt_global_print_entry - print all orig nodes who announce the address
  * for this global entry
+ * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be printed
  * @seq: debugfs table seq_file struct
  *
  * This functon assumes the caller holds rcu_read_lock().
  */
 static void
-batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
+			     struct batadv_tt_global_entry *tt_global_entry,
 			     struct seq_file *seq)
 {
-	struct hlist_head *head;
 	struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
 	struct batadv_tt_common_entry *tt_common_entry;
-	uint16_t flags;
+	struct batadv_orig_node_vlan *vlan;
+	struct hlist_head *head;
 	uint8_t last_ttvn;
+	uint16_t flags;
 
 	tt_common_entry = &tt_global_entry->common;
 	flags = tt_common_entry->flags;
 
-	best_entry = batadv_transtable_best_orig(tt_global_entry);
+	best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
 	if (best_entry) {
+		vlan = batadv_orig_node_vlan_get(best_entry->orig_node,
+						 tt_common_entry->vid);
+		if (!vlan) {
+			seq_printf(seq,
+				   " * Cannot retrieve VLAN %d for originator %pM\n",
+				   BATADV_PRINT_VID(tt_common_entry->vid),
+				   best_entry->orig_node->orig);
+			goto print_list;
+		}
+
 		last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
 		seq_printf(seq,
-			   " %c %pM  (%3u) via %pM     (%3u)   (%#.4x) [%c%c%c]\n",
+			   " %c %pM %4i   (%3u) via %pM     (%3u)   (%#.8x) [%c%c%c]\n",
 			   '*', tt_global_entry->common.addr,
+			   BATADV_PRINT_VID(tt_global_entry->common.vid),
 			   best_entry->ttvn, best_entry->orig_node->orig,
-			   last_ttvn, best_entry->orig_node->tt_crc,
+			   last_ttvn, vlan->tt.crc,
 			   (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
 			   (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
 			   (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+
+		batadv_orig_node_vlan_free_ref(vlan);
 	}
 
+print_list:
 	head = &tt_global_entry->orig_list;
 
 	hlist_for_each_entry_rcu(orig_entry, head, list) {
 		if (best_entry == orig_entry)
 			continue;
 
+		vlan = batadv_orig_node_vlan_get(orig_entry->orig_node,
+						 tt_common_entry->vid);
+		if (!vlan) {
+			seq_printf(seq,
+				   " + Cannot retrieve VLAN %d for originator %pM\n",
+				   BATADV_PRINT_VID(tt_common_entry->vid),
+				   orig_entry->orig_node->orig);
+			continue;
+		}
+
 		last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
-		seq_printf(seq,	" %c %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
+		seq_printf(seq,
+			   " %c %pM %4d   (%3u) via %pM     (%3u)   (%#.8x) [%c%c%c]\n",
 			   '+', tt_global_entry->common.addr,
+			   BATADV_PRINT_VID(tt_global_entry->common.vid),
 			   orig_entry->ttvn, orig_entry->orig_node->orig,
-			   last_ttvn,
+			   last_ttvn, vlan->tt.crc,
 			   (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
 			   (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
 			   (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+
+		batadv_orig_node_vlan_free_ref(vlan);
 	}
 }
 
@@ -1045,9 +1508,9 @@
 	seq_printf(seq,
 		   "Globally announced TT entries received via the mesh %s\n",
 		   net_dev->name);
-	seq_printf(seq, "       %-13s %s       %-15s %s (%-6s) %s\n",
-		   "Client", "(TTVN)", "Originator", "(Curr TTVN)", "CRC",
-		   "Flags");
+	seq_printf(seq, "       %-13s  %s  %s       %-15s %s (%-10s) %s\n",
+		   "Client", "VID", "(TTVN)", "Originator", "(Curr TTVN)",
+		   "CRC", "Flags");
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -1058,7 +1521,7 @@
 			tt_global = container_of(tt_common_entry,
 						 struct batadv_tt_global_entry,
 						 common);
-			batadv_tt_global_print_entry(tt_global, seq);
+			batadv_tt_global_print_entry(bat_priv, tt_global, seq);
 		}
 		rcu_read_unlock();
 	}
@@ -1080,6 +1543,8 @@
 	head = &tt_global_entry->orig_list;
 	hlist_for_each_entry_safe(orig_entry, safe, head, list) {
 		hlist_del_rcu(&orig_entry->list);
+		batadv_tt_global_size_dec(orig_entry->orig_node,
+					  tt_global_entry->common.vid);
 		batadv_tt_orig_list_entry_free_ref(orig_entry);
 	}
 	spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1094,16 +1559,21 @@
 	struct hlist_head *head;
 	struct hlist_node *safe;
 	struct batadv_tt_orig_list_entry *orig_entry;
+	unsigned short vid;
 
 	spin_lock_bh(&tt_global_entry->list_lock);
 	head = &tt_global_entry->orig_list;
 	hlist_for_each_entry_safe(orig_entry, safe, head, list) {
 		if (orig_entry->orig_node == orig_node) {
+			vid = tt_global_entry->common.vid;
 			batadv_dbg(BATADV_DBG_TT, bat_priv,
-				   "Deleting %pM from global tt entry %pM: %s\n",
+				   "Deleting %pM from global tt entry %pM (vid: %d): %s\n",
 				   orig_node->orig,
-				   tt_global_entry->common.addr, message);
+				   tt_global_entry->common.addr,
+				   BATADV_PRINT_VID(vid), message);
 			hlist_del_rcu(&orig_entry->list);
+			batadv_tt_global_size_dec(orig_node,
+						  tt_global_entry->common.vid);
 			batadv_tt_orig_list_entry_free_ref(orig_entry);
 		}
 	}
@@ -1150,17 +1620,25 @@
 						orig_node, message);
 }
 
-
-
+/**
+ * batadv_tt_global_del - remove a client from the global table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: an originator serving this client
+ * @addr: the mac address of the client
+ * @vid: VLAN identifier
+ * @message: a message explaining the reason for deleting the client to print
+ *  for debugging purpose
+ * @roaming: true if the deletion has been triggered by a roaming event
+ */
 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
 				 struct batadv_orig_node *orig_node,
-				 const unsigned char *addr,
+				 const unsigned char *addr, unsigned short vid,
 				 const char *message, bool roaming)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	struct batadv_tt_local_entry *local_entry = NULL;
 
-	tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+	tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
 	if (!tt_global_entry)
 		goto out;
 
@@ -1189,7 +1667,8 @@
 	 *    the global entry, since it is useless now.
 	 */
 	local_entry = batadv_tt_local_hash_find(bat_priv,
-						tt_global_entry->common.addr);
+						tt_global_entry->common.addr,
+						vid);
 	if (local_entry) {
 		/* local entry exists, case 2: client roamed to us. */
 		batadv_tt_global_del_orig_list(tt_global_entry);
@@ -1207,8 +1686,18 @@
 		batadv_tt_local_entry_free_ref(local_entry);
 }
 
+/**
+ * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
+ *  given originator matching the provided vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the originator owning the entries to remove
+ * @match_vid: the VLAN identifier to match. If negative all the entries will be
+ *  removed
+ * @message: debug message to print as "reason"
+ */
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
 			       struct batadv_orig_node *orig_node,
+			       int32_t match_vid,
 			       const char *message)
 {
 	struct batadv_tt_global_entry *tt_global;
@@ -1218,6 +1707,7 @@
 	struct hlist_node *safe;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
+	unsigned short vid;
 
 	if (!hash)
 		return;
@@ -1229,6 +1719,10 @@
 		spin_lock_bh(list_lock);
 		hlist_for_each_entry_safe(tt_common_entry, safe,
 					  head, hash_entry) {
+			/* remove only matching entries */
+			if (match_vid >= 0 && tt_common_entry->vid != match_vid)
+				continue;
+
 			tt_global = container_of(tt_common_entry,
 						 struct batadv_tt_global_entry,
 						 common);
@@ -1237,9 +1731,11 @@
 							orig_node, message);
 
 			if (hlist_empty(&tt_global->orig_list)) {
+				vid = tt_global->common.vid;
 				batadv_dbg(BATADV_DBG_TT, bat_priv,
-					   "Deleting global tt entry %pM: %s\n",
-					   tt_global->common.addr, message);
+					   "Deleting global tt entry %pM (vid: %d): %s\n",
+					   tt_global->common.addr,
+					   BATADV_PRINT_VID(vid), message);
 				hlist_del_rcu(&tt_common_entry->hash_entry);
 				batadv_tt_global_entry_free_ref(tt_global);
 			}
@@ -1297,8 +1793,10 @@
 				continue;
 
 			batadv_dbg(BATADV_DBG_TT, bat_priv,
-				   "Deleting global tt entry (%pM): %s\n",
-				   tt_global->common.addr, msg);
+				   "Deleting global tt entry %pM (vid: %d): %s\n",
+				   tt_global->common.addr,
+				   BATADV_PRINT_VID(tt_global->common.vid),
+				   msg);
 
 			hlist_del_rcu(&tt_common->hash_entry);
 
@@ -1357,23 +1855,49 @@
 	return ret;
 }
 
+/**
+ * batadv_transtable_search - get the mesh destination for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of the source client
+ * @addr: mac address of the destination client
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the originator that was selected as destination in the
+ * mesh for contacting the client 'addr', NULL otherwise.
+ * In case of multiple originators serving the same client, the function returns
+ * the best one (best in terms of metric towards the destination node).
+ *
+ * If the two clients are AP isolated the function returns NULL.
+ */
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
 						  const uint8_t *src,
-						  const uint8_t *addr)
+						  const uint8_t *addr,
+						  unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
 	struct batadv_tt_global_entry *tt_global_entry = NULL;
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_tt_orig_list_entry *best_entry;
+	bool ap_isolation_enabled = false;
+	struct batadv_softif_vlan *vlan;
 
-	if (src && atomic_read(&bat_priv->ap_isolation)) {
-		tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
+	/* if the AP isolation is requested on a VLAN, then check for its
+	 * setting in the proper VLAN private data structure
+	 */
+	vlan = batadv_softif_vlan_get(bat_priv, vid);
+	if (vlan) {
+		ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
+		batadv_softif_vlan_free_ref(vlan);
+	}
+
+	if (src && ap_isolation_enabled) {
+		tt_local_entry = batadv_tt_local_hash_find(bat_priv, src, vid);
 		if (!tt_local_entry ||
 		    (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING))
 			goto out;
 	}
 
-	tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+	tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
 	if (!tt_global_entry)
 		goto out;
 
@@ -1385,7 +1909,7 @@
 		goto out;
 
 	rcu_read_lock();
-	best_entry = batadv_transtable_best_orig(tt_global_entry);
+	best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
 	/* found anything? */
 	if (best_entry)
 		orig_node = best_entry->orig_node;
@@ -1402,17 +1926,40 @@
 	return orig_node;
 }
 
-/* Calculates the checksum of the local table of a given orig_node */
-static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
-				     struct batadv_orig_node *orig_node)
+/**
+ * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ *  to the given orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator for which the CRC should be computed
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * This function computes the checksum for the global table corresponding to a
+ * specific originator. In particular, the checksum is computed as follows: For
+ * each client connected to the originator the CRC32C of the MAC address and the
+ * VID is computed and then all the CRC32Cs of the various clients are xor'ed
+ * together.
+ *
+ * The idea behind is that CRC32C should be used as much as possible in order to
+ * produce a unique hash of the table, but since the order which is used to feed
+ * the CRC32C function affects the result and since every node in the network
+ * probably sorts the clients differently, the hash function cannot be directly
+ * computed over the entire table. Hence the CRC32C is used only on
+ * the single client entry, while all the results are then xor'ed together
+ * because the XOR operation can combine them all while trying to reduce the
+ * noise as much as possible.
+ *
+ * Returns the checksum of the global table of a given originator.
+ */
+static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
+				     struct batadv_orig_node *orig_node,
+				     unsigned short vid)
 {
-	uint16_t total = 0, total_one;
 	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct batadv_tt_global_entry *tt_global;
 	struct hlist_head *head;
-	uint32_t i;
-	int j;
+	uint32_t i, crc_tmp, crc = 0;
+	uint8_t flags;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -1422,6 +1969,12 @@
 			tt_global = container_of(tt_common,
 						 struct batadv_tt_global_entry,
 						 common);
+			/* compute the CRC only for entries belonging to the
+			 * VLAN identified by the vid passed as parameter
+			 */
+			if (tt_common->vid != vid)
+				continue;
+
 			/* Roaming clients are in the global table for
 			 * consistency only. They don't have to be
 			 * taken into account while computing the
@@ -1443,48 +1996,74 @@
 							     orig_node))
 				continue;
 
-			total_one = 0;
-			for (j = 0; j < ETH_ALEN; j++)
-				total_one = crc16_byte(total_one,
-						       tt_common->addr[j]);
-			total ^= total_one;
+			crc_tmp = crc32c(0, &tt_common->vid,
+					 sizeof(tt_common->vid));
+
+			/* compute the CRC on flags that have to be kept in sync
+			 * among nodes
+			 */
+			flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+			crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+			crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
 		}
 		rcu_read_unlock();
 	}
 
-	return total;
+	return crc;
 }
 
-/* Calculates the checksum of the local table */
-static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
+/**
+ * batadv_tt_local_crc - calculates the checksum of the local table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * For details about the computation, please refer to the documentation for
+ * batadv_tt_global_crc().
+ *
+ * Returns the checksum of the local table
+ */
+static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
+				    unsigned short vid)
 {
-	uint16_t total = 0, total_one;
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct hlist_head *head;
-	uint32_t i;
-	int j;
+	uint32_t i, crc_tmp, crc = 0;
+	uint8_t flags;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
+			/* compute the CRC only for entries belonging to the
+			 * VLAN identified by vid
+			 */
+			if (tt_common->vid != vid)
+				continue;
+
 			/* not yet committed clients have not to be taken into
 			 * account while computing the CRC
 			 */
 			if (tt_common->flags & BATADV_TT_CLIENT_NEW)
 				continue;
-			total_one = 0;
-			for (j = 0; j < ETH_ALEN; j++)
-				total_one = crc16_byte(total_one,
-						       tt_common->addr[j]);
-			total ^= total_one;
+
+			crc_tmp = crc32c(0, &tt_common->vid,
+					 sizeof(tt_common->vid));
+
+			/* compute the CRC on flags that have to be kept in sync
+			 * among nodes
+			 */
+			flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+			crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+			crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
 		}
 		rcu_read_unlock();
 	}
 
-	return total;
+	return crc;
 }
 
 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
@@ -1503,11 +2082,9 @@
 
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
 				       struct batadv_orig_node *orig_node,
-				       const unsigned char *tt_buff,
-				       uint8_t tt_num_changes)
+				       const void *tt_buff,
+				       uint16_t tt_buff_len)
 {
-	uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
-
 	/* Replace the old buffer only if I received something in the
 	 * last OGM (the OGM could carry no changes)
 	 */
@@ -1569,9 +2146,14 @@
 	return tt_req_node;
 }
 
-/* data_ptr is useless here, but has to be kept to respect the prototype */
-static int batadv_tt_local_valid_entry(const void *entry_ptr,
-				       const void *data_ptr)
+/**
+ * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * @entry_ptr: to be checked local tt entry
+ * @data_ptr: not used but definition required to satisfy the callback prototype
+ *
+ * Returns 1 if the entry is a valid, 0 otherwise.
+ */
+static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
 {
 	const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
 
@@ -1598,41 +2180,30 @@
 	return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
 }
 
-static struct sk_buff *
-batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
-			      struct batadv_hashtable *hash,
-			      struct batadv_priv *bat_priv,
-			      int (*valid_cb)(const void *, const void *),
-			      void *cb_data)
+/**
+ * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ *  specified tt hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hash: hash table containing the tt entries
+ * @tt_len: expected tvlv tt data buffer length in number of bytes
+ * @tvlv_buff: pointer to the buffer to fill with the TT data
+ * @valid_cb: function to filter tt change entries
+ * @cb_data: data passed to the filter function as argument
+ */
+static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+				    struct batadv_hashtable *hash,
+				    void *tvlv_buff, uint16_t tt_len,
+				    int (*valid_cb)(const void *, const void *),
+				    void *cb_data)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
-	struct batadv_tt_query_packet *tt_response;
-	struct batadv_tt_change *tt_change;
+	struct batadv_tvlv_tt_change *tt_change;
 	struct hlist_head *head;
-	struct sk_buff *skb = NULL;
-	uint16_t tt_tot, tt_count;
-	ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
+	uint16_t tt_tot, tt_num_entries = 0;
 	uint32_t i;
-	size_t len;
 
-	if (tt_query_size + tt_len > bat_priv->soft_iface->mtu) {
-		tt_len = bat_priv->soft_iface->mtu - tt_query_size;
-		tt_len -= tt_len % sizeof(struct batadv_tt_change);
-	}
-	tt_tot = tt_len / sizeof(struct batadv_tt_change);
-
-	len = tt_query_size + tt_len;
-	skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-	if (!skb)
-		goto out;
-
-	skb->priority = TC_PRIO_CONTROL;
-	skb_reserve(skb, ETH_HLEN);
-	tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
-	tt_response->ttvn = ttvn;
-
-	tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
-	tt_count = 0;
+	tt_tot = batadv_tt_entries(tt_len);
+	tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
 
 	rcu_read_lock();
 	for (i = 0; i < hash->size; i++) {
@@ -1640,7 +2211,7 @@
 
 		hlist_for_each_entry_rcu(tt_common_entry,
 					 head, hash_entry) {
-			if (tt_count == tt_tot)
+			if (tt_tot == tt_num_entries)
 				break;
 
 			if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
@@ -1649,33 +2220,123 @@
 			memcpy(tt_change->addr, tt_common_entry->addr,
 			       ETH_ALEN);
 			tt_change->flags = tt_common_entry->flags;
+			tt_change->vid = htons(tt_common_entry->vid);
+			tt_change->reserved = 0;
 
-			tt_count++;
+			tt_num_entries++;
 			tt_change++;
 		}
 	}
 	rcu_read_unlock();
-
-	/* store in the message the number of entries we have successfully
-	 * copied
-	 */
-	tt_response->tt_data = htons(tt_count);
-
-out:
-	return skb;
 }
 
+/**
+ * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * @orig_node: originator for which the CRCs have to be checked
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @num_vlan: number of tvlv VLAN entries
+ * @create: if true, create VLAN objects if not found
+ *
+ * Return true if all the received CRCs match the locally stored ones, false
+ * otherwise
+ */
+static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
+				       struct batadv_tvlv_tt_vlan_data *tt_vlan,
+				       uint16_t num_vlan)
+{
+	struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
+	struct batadv_orig_node_vlan *vlan;
+	int i;
+
+	/* check if each received CRC matches the locally stored one */
+	for (i = 0; i < num_vlan; i++) {
+		tt_vlan_tmp = tt_vlan + i;
+
+		/* if orig_node is a backbone node for this VLAN, don't check
+		 * the CRC as we ignore all the global entries over it
+		 */
+		if (batadv_bla_is_backbone_gw_orig(orig_node->bat_priv,
+						   orig_node->orig,
+						   ntohs(tt_vlan_tmp->vid)))
+			continue;
+
+		vlan = batadv_orig_node_vlan_get(orig_node,
+						 ntohs(tt_vlan_tmp->vid));
+		if (!vlan)
+			return false;
+
+		if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * batadv_tt_local_update_crc - update all the local CRCs
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
+{
+	struct batadv_softif_vlan *vlan;
+
+	/* recompute the global CRC for each VLAN */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+		vlan->tt.crc = batadv_tt_local_crc(bat_priv, vlan->vid);
+	}
+	rcu_read_unlock();
+}
+
+/**
+ * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the orig_node for which the CRCs have to be updated
+ */
+static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
+					struct batadv_orig_node *orig_node)
+{
+	struct batadv_orig_node_vlan *vlan;
+	uint32_t crc;
+
+	/* recompute the global CRC for each VLAN */
+	rcu_read_lock();
+	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+		/* if orig_node is a backbone node for this VLAN, don't compute
+		 * the CRC as we ignore all the global entries over it
+		 */
+		if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig,
+						   vlan->vid))
+			continue;
+
+		crc = batadv_tt_global_crc(bat_priv, orig_node, vlan->vid);
+		vlan->tt.crc = crc;
+	}
+	rcu_read_unlock();
+}
+
+/**
+ * batadv_send_tt_request - send a TT Request message to a given node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst_orig_node: the destination of the message
+ * @ttvn: the version number that the source of the message is looking for
+ * @tt_vlan: pointer to the first tvlv VLAN object to request
+ * @num_vlan: number of tvlv VLAN entries
+ * @full_table: ask for the entire translation table if true, while only for the
+ *  last TT diff otherwise
+ */
 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
 				  struct batadv_orig_node *dst_orig_node,
-				  uint8_t ttvn, uint16_t tt_crc,
-				  bool full_table)
+				  uint8_t ttvn,
+				  struct batadv_tvlv_tt_vlan_data *tt_vlan,
+				  uint16_t num_vlan, bool full_table)
 {
-	struct sk_buff *skb = NULL;
-	struct batadv_tt_query_packet *tt_request;
-	struct batadv_hard_iface *primary_if;
+	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_tt_req_node *tt_req_node = NULL;
-	int ret = 1;
-	size_t tt_req_len;
+	struct batadv_tvlv_tt_vlan_data *tt_vlan_req;
+	struct batadv_hard_iface *primary_if;
+	bool ret = false;
+	int i, size;
 
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (!primary_if)
@@ -1688,157 +2349,171 @@
 	if (!tt_req_node)
 		goto out;
 
-	skb = netdev_alloc_skb_ip_align(NULL, sizeof(*tt_request) + ETH_HLEN);
-	if (!skb)
+	size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan;
+	tvlv_tt_data = kzalloc(size, GFP_ATOMIC);
+	if (!tvlv_tt_data)
 		goto out;
 
-	skb->priority = TC_PRIO_CONTROL;
-	skb_reserve(skb, ETH_HLEN);
+	tvlv_tt_data->flags = BATADV_TT_REQUEST;
+	tvlv_tt_data->ttvn = ttvn;
+	tvlv_tt_data->num_vlan = htons(num_vlan);
 
-	tt_req_len = sizeof(*tt_request);
-	tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
+	/* send all the CRCs within the request. This is needed by intermediate
+	 * nodes to ensure they have the correct table before replying
+	 */
+	tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1);
+	for (i = 0; i < num_vlan; i++) {
+		tt_vlan_req->vid = tt_vlan->vid;
+		tt_vlan_req->crc = tt_vlan->crc;
 
-	tt_request->header.packet_type = BATADV_TT_QUERY;
-	tt_request->header.version = BATADV_COMPAT_VERSION;
-	memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
-	tt_request->header.ttl = BATADV_TTL;
-	tt_request->ttvn = ttvn;
-	tt_request->tt_data = htons(tt_crc);
-	tt_request->flags = BATADV_TT_REQUEST;
+		tt_vlan_req++;
+		tt_vlan++;
+	}
 
 	if (full_table)
-		tt_request->flags |= BATADV_TT_FULL_TABLE;
+		tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n",
-		   dst_orig_node->orig, (full_table ? 'F' : '.'));
+		   dst_orig_node->orig, full_table ? 'F' : '.');
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
-
-	if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL) != NET_XMIT_DROP)
-		ret = 0;
+	batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+				 dst_orig_node->orig, BATADV_TVLV_TT, 1,
+				 tvlv_tt_data, size);
+	ret = true;
 
 out:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	if (ret)
-		kfree_skb(skb);
 	if (ret && tt_req_node) {
 		spin_lock_bh(&bat_priv->tt.req_list_lock);
 		list_del(&tt_req_node->list);
 		spin_unlock_bh(&bat_priv->tt.req_list_lock);
 		kfree(tt_req_node);
 	}
+	kfree(tvlv_tt_data);
 	return ret;
 }
 
-static bool
-batadv_send_other_tt_response(struct batadv_priv *bat_priv,
-			      struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_other_tt_response - send reply to tt request concerning another
+ *  node's translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+					  struct batadv_tvlv_tt_data *tt_data,
+					  uint8_t *req_src, uint8_t *req_dst)
 {
 	struct batadv_orig_node *req_dst_orig_node;
 	struct batadv_orig_node *res_dst_orig_node = NULL;
-	uint8_t orig_ttvn, req_ttvn, ttvn;
-	int res, ret = false;
-	unsigned char *tt_buff;
-	bool full_table;
-	uint16_t tt_len, tt_tot;
-	struct sk_buff *skb = NULL;
-	struct batadv_tt_query_packet *tt_response;
-	uint8_t *packet_pos;
-	size_t len;
+	struct batadv_tvlv_tt_change *tt_change;
+	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
+	struct batadv_tvlv_tt_vlan_data *tt_vlan;
+	bool ret = false, full_table;
+	uint8_t orig_ttvn, req_ttvn;
+	uint16_t tvlv_len;
+	int32_t tt_len;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
-		   tt_request->src, tt_request->ttvn, tt_request->dst,
-		   (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+		   req_src, tt_data->ttvn, req_dst,
+		   (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
 	/* Let's get the orig node of the REAL destination */
-	req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
+	req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst);
 	if (!req_dst_orig_node)
 		goto out;
 
-	res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
+	res_dst_orig_node = batadv_orig_hash_find(bat_priv, req_src);
 	if (!res_dst_orig_node)
 		goto out;
 
 	orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
-	req_ttvn = tt_request->ttvn;
+	req_ttvn = tt_data->ttvn;
 
-	/* I don't have the requested data */
+	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+	/* this node doesn't have the requested data */
 	if (orig_ttvn != req_ttvn ||
-	    tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
+	    !batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan,
+					ntohs(tt_data->num_vlan)))
 		goto out;
 
 	/* If the full table has been explicitly requested */
-	if (tt_request->flags & BATADV_TT_FULL_TABLE ||
+	if (tt_data->flags & BATADV_TT_FULL_TABLE ||
 	    !req_dst_orig_node->tt_buff)
 		full_table = true;
 	else
 		full_table = false;
 
-	/* In this version, fragmentation is not implemented, then
-	 * I'll send only one packet with as much TT entries as I can
+	/* TT fragmentation hasn't been implemented yet, so send as many
+	 * TT entries fit a single packet as possible only
 	 */
 	if (!full_table) {
 		spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
 		tt_len = req_dst_orig_node->tt_buff_len;
-		tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
-		len = sizeof(*tt_response) + tt_len;
-		skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-		if (!skb)
+		tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+							      &tvlv_tt_data,
+							      &tt_change,
+							      &tt_len);
+		if (!tt_len)
 			goto unlock;
 
-		skb->priority = TC_PRIO_CONTROL;
-		skb_reserve(skb, ETH_HLEN);
-		packet_pos = skb_put(skb, len);
-		tt_response = (struct batadv_tt_query_packet *)packet_pos;
-		tt_response->ttvn = req_ttvn;
-		tt_response->tt_data = htons(tt_tot);
-
-		tt_buff = skb->data + sizeof(*tt_response);
 		/* Copy the last orig_node's OGM buffer */
-		memcpy(tt_buff, req_dst_orig_node->tt_buff,
+		memcpy(tt_change, req_dst_orig_node->tt_buff,
 		       req_dst_orig_node->tt_buff_len);
-
 		spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
 	} else {
-		tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
-		tt_len *= sizeof(struct batadv_tt_change);
-		ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
-
-		skb = batadv_tt_response_fill_table(tt_len, ttvn,
-						    bat_priv->tt.global_hash,
-						    bat_priv,
-						    batadv_tt_global_valid,
-						    req_dst_orig_node);
-		if (!skb)
+		/* allocate the tvlv, put the tt_data and all the tt_vlan_data
+		 * in the initial part
+		 */
+		tt_len = -1;
+		tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+							      &tvlv_tt_data,
+							      &tt_change,
+							      &tt_len);
+		if (!tt_len)
 			goto out;
 
-		tt_response = (struct batadv_tt_query_packet *)skb->data;
+		/* fill the rest of the tvlv with the real TT entries */
+		batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
+					tt_change, tt_len,
+					batadv_tt_global_valid,
+					req_dst_orig_node);
 	}
 
-	tt_response->header.packet_type = BATADV_TT_QUERY;
-	tt_response->header.version = BATADV_COMPAT_VERSION;
-	tt_response->header.ttl = BATADV_TTL;
-	memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
-	memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
-	tt_response->flags = BATADV_TT_RESPONSE;
+	/* Don't send the response, if larger than fragmented packet. */
+	tt_len = sizeof(struct batadv_unicast_tvlv_packet) + tvlv_len;
+	if (tt_len > atomic_read(&bat_priv->packet_size_max)) {
+		net_ratelimited_function(batadv_info, bat_priv->soft_iface,
+					 "Ignoring TT_REQUEST from %pM; Response size exceeds max packet size.\n",
+					 res_dst_orig_node->orig);
+		goto out;
+	}
+
+	tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+	tvlv_tt_data->ttvn = req_ttvn;
 
 	if (full_table)
-		tt_response->flags |= BATADV_TT_FULL_TABLE;
+		tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Sending TT_RESPONSE %pM for %pM (ttvn: %u)\n",
-		   res_dst_orig_node->orig, req_dst_orig_node->orig, req_ttvn);
+		   "Sending TT_RESPONSE %pM for %pM [%c] (ttvn: %u)\n",
+		   res_dst_orig_node->orig, req_dst_orig_node->orig,
+		   full_table ? 'F' : '.', req_ttvn);
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-	res = batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL);
-	if (res != NET_XMIT_DROP)
-		ret = true;
+	batadv_tvlv_unicast_send(bat_priv, req_dst_orig_node->orig,
+				 req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+				 tvlv_len);
 
+	ret = true;
 	goto out;
 
 unlock:
@@ -1849,37 +2524,43 @@
 		batadv_orig_node_free_ref(res_dst_orig_node);
 	if (req_dst_orig_node)
 		batadv_orig_node_free_ref(req_dst_orig_node);
-	if (!ret)
-		kfree_skb(skb);
+	kfree(tvlv_tt_data);
 	return ret;
 }
 
-static bool
-batadv_send_my_tt_response(struct batadv_priv *bat_priv,
-			   struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_my_tt_response - send reply to tt request concerning this node's
+ *  translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+				       struct batadv_tvlv_tt_data *tt_data,
+				       uint8_t *req_src)
 {
-	struct batadv_orig_node *orig_node;
+	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_hard_iface *primary_if = NULL;
-	uint8_t my_ttvn, req_ttvn, ttvn;
-	int ret = false;
-	unsigned char *tt_buff;
+	struct batadv_tvlv_tt_change *tt_change;
+	struct batadv_orig_node *orig_node;
+	uint8_t my_ttvn, req_ttvn;
+	uint16_t tvlv_len;
 	bool full_table;
-	uint16_t tt_len, tt_tot;
-	struct sk_buff *skb = NULL;
-	struct batadv_tt_query_packet *tt_response;
-	uint8_t *packet_pos;
-	size_t len;
+	int32_t tt_len;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
-		   tt_request->src, tt_request->ttvn,
-		   (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+		   req_src, tt_data->ttvn,
+		   (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
+	spin_lock_bh(&bat_priv->tt.commit_lock);
 
 	my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-	req_ttvn = tt_request->ttvn;
+	req_ttvn = tt_data->ttvn;
 
-	orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
+	orig_node = batadv_orig_hash_find(bat_priv, req_src);
 	if (!orig_node)
 		goto out;
 
@@ -1890,103 +2571,104 @@
 	/* If the full table has been explicitly requested or the gap
 	 * is too big send the whole local translation table
 	 */
-	if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
+	if (tt_data->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
 	    !bat_priv->tt.last_changeset)
 		full_table = true;
 	else
 		full_table = false;
 
-	/* In this version, fragmentation is not implemented, then
-	 * I'll send only one packet with as much TT entries as I can
+	/* TT fragmentation hasn't been implemented yet, so send as many
+	 * TT entries fit a single packet as possible only
 	 */
 	if (!full_table) {
 		spin_lock_bh(&bat_priv->tt.last_changeset_lock);
-		tt_len = bat_priv->tt.last_changeset_len;
-		tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
-		len = sizeof(*tt_response) + tt_len;
-		skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-		if (!skb)
+		tt_len = bat_priv->tt.last_changeset_len;
+		tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+							     &tvlv_tt_data,
+							     &tt_change,
+							     &tt_len);
+		if (!tt_len)
 			goto unlock;
 
-		skb->priority = TC_PRIO_CONTROL;
-		skb_reserve(skb, ETH_HLEN);
-		packet_pos = skb_put(skb, len);
-		tt_response = (struct batadv_tt_query_packet *)packet_pos;
-		tt_response->ttvn = req_ttvn;
-		tt_response->tt_data = htons(tt_tot);
-
-		tt_buff = skb->data + sizeof(*tt_response);
-		memcpy(tt_buff, bat_priv->tt.last_changeset,
+		/* Copy the last orig_node's OGM buffer */
+		memcpy(tt_change, bat_priv->tt.last_changeset,
 		       bat_priv->tt.last_changeset_len);
 		spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 	} else {
-		tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
-		tt_len *= sizeof(struct batadv_tt_change);
-		ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+		req_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
 
-		skb = batadv_tt_response_fill_table(tt_len, ttvn,
-						    bat_priv->tt.local_hash,
-						    bat_priv,
-						    batadv_tt_local_valid_entry,
-						    NULL);
-		if (!skb)
+		/* allocate the tvlv, put the tt_data and all the tt_vlan_data
+		 * in the initial part
+		 */
+		tt_len = -1;
+		tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+							     &tvlv_tt_data,
+							     &tt_change,
+							     &tt_len);
+		if (!tt_len)
 			goto out;
 
-		tt_response = (struct batadv_tt_query_packet *)skb->data;
+		/* fill the rest of the tvlv with the real TT entries */
+		batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
+					tt_change, tt_len,
+					batadv_tt_local_valid, NULL);
 	}
 
-	tt_response->header.packet_type = BATADV_TT_QUERY;
-	tt_response->header.version = BATADV_COMPAT_VERSION;
-	tt_response->header.ttl = BATADV_TTL;
-	memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
-	tt_response->flags = BATADV_TT_RESPONSE;
+	tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+	tvlv_tt_data->ttvn = req_ttvn;
 
 	if (full_table)
-		tt_response->flags |= BATADV_TT_FULL_TABLE;
+		tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Sending TT_RESPONSE to %pM [%c]\n",
-		   orig_node->orig,
-		   (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+		   "Sending TT_RESPONSE to %pM [%c] (ttvn: %u)\n",
+		   orig_node->orig, full_table ? 'F' : '.', req_ttvn);
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-		ret = true;
+	batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+				 req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+				 tvlv_len);
+
 	goto out;
 
 unlock:
 	spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 out:
+	spin_unlock_bh(&bat_priv->tt.commit_lock);
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
-	if (!ret)
-		kfree_skb(skb);
-	/* This packet was for me, so it doesn't need to be re-routed */
+	kfree(tvlv_tt_data);
+	/* The packet was for this host, so it doesn't need to be re-routed */
 	return true;
 }
 
-bool batadv_send_tt_response(struct batadv_priv *bat_priv,
-			     struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_tt_response - send reply to tt request
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+				    struct batadv_tvlv_tt_data *tt_data,
+				    uint8_t *req_src, uint8_t *req_dst)
 {
-	if (batadv_is_my_mac(bat_priv, tt_request->dst)) {
-		/* don't answer backbone gws! */
-		if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
-			return true;
-
-		return batadv_send_my_tt_response(bat_priv, tt_request);
-	} else {
-		return batadv_send_other_tt_response(bat_priv, tt_request);
-	}
+	if (batadv_is_my_mac(bat_priv, req_dst))
+		return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
+	else
+		return batadv_send_other_tt_response(bat_priv, tt_data,
+						     req_src, req_dst);
 }
 
 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
 				      struct batadv_orig_node *orig_node,
-				      struct batadv_tt_change *tt_change,
+				      struct batadv_tvlv_tt_change *tt_change,
 				      uint16_t tt_num_changes, uint8_t ttvn)
 {
 	int i;
@@ -1997,11 +2679,13 @@
 			roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
 			batadv_tt_global_del(bat_priv, orig_node,
 					     (tt_change + i)->addr,
+					     ntohs((tt_change + i)->vid),
 					     "tt removed by changes",
 					     roams);
 		} else {
 			if (!batadv_tt_global_add(bat_priv, orig_node,
 						  (tt_change + i)->addr,
+						  ntohs((tt_change + i)->vid),
 						  (tt_change + i)->flags, ttvn))
 				/* In case of problem while storing a
 				 * global_entry, we stop the updating
@@ -2016,21 +2700,22 @@
 }
 
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
-				  struct batadv_tt_query_packet *tt_response)
+				  struct batadv_tvlv_tt_change *tt_change,
+				  uint8_t ttvn, uint8_t *resp_src,
+				  uint16_t num_entries)
 {
 	struct batadv_orig_node *orig_node;
 
-	orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
+	orig_node = batadv_orig_hash_find(bat_priv, resp_src);
 	if (!orig_node)
 		goto out;
 
 	/* Purge the old table first.. */
-	batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
+	batadv_tt_global_del_orig(bat_priv, orig_node, -1,
+				  "Received full table");
 
-	_batadv_tt_update_changes(bat_priv, orig_node,
-				  (struct batadv_tt_change *)(tt_response + 1),
-				  ntohs(tt_response->tt_data),
-				  tt_response->ttvn);
+	_batadv_tt_update_changes(bat_priv, orig_node, tt_change, num_entries,
+				  ttvn);
 
 	spin_lock_bh(&orig_node->tt_buff_lock);
 	kfree(orig_node->tt_buff);
@@ -2038,7 +2723,7 @@
 	orig_node->tt_buff = NULL;
 	spin_unlock_bh(&orig_node->tt_buff_lock);
 
-	atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
+	atomic_set(&orig_node->last_ttvn, ttvn);
 
 out:
 	if (orig_node)
@@ -2048,22 +2733,31 @@
 static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 				     struct batadv_orig_node *orig_node,
 				     uint16_t tt_num_changes, uint8_t ttvn,
-				     struct batadv_tt_change *tt_change)
+				     struct batadv_tvlv_tt_change *tt_change)
 {
 	_batadv_tt_update_changes(bat_priv, orig_node, tt_change,
 				  tt_num_changes, ttvn);
 
-	batadv_tt_save_orig_buffer(bat_priv, orig_node,
-				   (unsigned char *)tt_change, tt_num_changes);
+	batadv_tt_save_orig_buffer(bat_priv, orig_node, tt_change,
+				   batadv_tt_len(tt_num_changes));
 	atomic_set(&orig_node->last_ttvn, ttvn);
 }
 
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
+/**
+ * batadv_is_my_client - check if a client is served by the local node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac adress of the client to check
+ * @vid: VLAN identifier
+ *
+ * Returns true if the client is served by this node, false otherwise.
+ */
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+			 unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
 	bool ret = false;
 
-	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
 	if (!tt_local_entry)
 		goto out;
 	/* Check if the client has been logically deleted (but is kept for
@@ -2079,72 +2773,68 @@
 	return ret;
 }
 
-void batadv_handle_tt_response(struct batadv_priv *bat_priv,
-			       struct batadv_tt_query_packet *tt_response)
+/**
+ * batadv_handle_tt_response - process incoming tt reply
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @resp_src: mac address of tt reply sender
+ * @num_entries: number of tt change entries appended to the tt data
+ */
+static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+				      struct batadv_tvlv_tt_data *tt_data,
+				      uint8_t *resp_src, uint16_t num_entries)
 {
 	struct batadv_tt_req_node *node, *safe;
 	struct batadv_orig_node *orig_node = NULL;
-	struct batadv_tt_change *tt_change;
+	struct batadv_tvlv_tt_change *tt_change;
+	uint8_t *tvlv_ptr = (uint8_t *)tt_data;
+	uint16_t change_offset;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
-		   tt_response->src, tt_response->ttvn,
-		   ntohs(tt_response->tt_data),
-		   (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+		   resp_src, tt_data->ttvn, num_entries,
+		   (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
-	/* we should have never asked a backbone gw */
-	if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
-		goto out;
-
-	orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
+	orig_node = batadv_orig_hash_find(bat_priv, resp_src);
 	if (!orig_node)
 		goto out;
 
-	if (tt_response->flags & BATADV_TT_FULL_TABLE) {
-		batadv_tt_fill_gtable(bat_priv, tt_response);
+	spin_lock_bh(&orig_node->tt_lock);
+
+	change_offset = sizeof(struct batadv_tvlv_tt_vlan_data);
+	change_offset *= ntohs(tt_data->num_vlan);
+	change_offset += sizeof(*tt_data);
+	tvlv_ptr += change_offset;
+
+	tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr;
+	if (tt_data->flags & BATADV_TT_FULL_TABLE) {
+		batadv_tt_fill_gtable(bat_priv, tt_change, tt_data->ttvn,
+				      resp_src, num_entries);
 	} else {
-		tt_change = (struct batadv_tt_change *)(tt_response + 1);
-		batadv_tt_update_changes(bat_priv, orig_node,
-					 ntohs(tt_response->tt_data),
-					 tt_response->ttvn, tt_change);
+		batadv_tt_update_changes(bat_priv, orig_node, num_entries,
+					 tt_data->ttvn, tt_change);
 	}
 
+	/* Recalculate the CRC for this orig_node and store it */
+	batadv_tt_global_update_crc(bat_priv, orig_node);
+
+	spin_unlock_bh(&orig_node->tt_lock);
+
 	/* Delete the tt_req_node from pending tt_requests list */
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
 	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
-		if (!batadv_compare_eth(node->addr, tt_response->src))
+		if (!batadv_compare_eth(node->addr, resp_src))
 			continue;
 		list_del(&node->list);
 		kfree(node);
 	}
-	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 
-	/* Recalculate the CRC for this orig_node and store it */
-	orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
+	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 out:
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
 }
 
-int batadv_tt_init(struct batadv_priv *bat_priv)
-{
-	int ret;
-
-	ret = batadv_tt_local_init(bat_priv);
-	if (ret < 0)
-		return ret;
-
-	ret = batadv_tt_global_init(bat_priv);
-	if (ret < 0)
-		return ret;
-
-	INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
-	queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
-			   msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
-
-	return 1;
-}
-
 static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
 {
 	struct batadv_tt_roam_node *node, *safe;
@@ -2225,14 +2915,28 @@
 	return ret;
 }
 
+/**
+ * batadv_send_roam_adv - send a roaming advertisement message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @client: mac address of the roaming client
+ * @vid: VLAN identifier
+ * @orig_node: message destination
+ *
+ * Send a ROAMING_ADV message to the node which was previously serving this
+ * client. This is done to inform the node that from now on all traffic destined
+ * for this particular roamed client has to be forwarded to the sender of the
+ * roaming message.
+ */
 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+				 unsigned short vid,
 				 struct batadv_orig_node *orig_node)
 {
-	struct sk_buff *skb = NULL;
-	struct batadv_roam_adv_packet *roam_adv_packet;
-	int ret = 1;
 	struct batadv_hard_iface *primary_if;
-	size_t len = sizeof(*roam_adv_packet);
+	struct batadv_tvlv_roam_adv tvlv_roam;
+
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
 
 	/* before going on we have to check whether the client has
 	 * already roamed to us too many times
@@ -2240,40 +2944,22 @@
 	if (!batadv_tt_check_roam_count(bat_priv, client))
 		goto out;
 
-	skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-	if (!skb)
-		goto out;
-
-	skb->priority = TC_PRIO_CONTROL;
-	skb_reserve(skb, ETH_HLEN);
-
-	roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
-
-	roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
-	roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
-	roam_adv_packet->header.ttl = BATADV_TTL;
-	roam_adv_packet->reserved = 0;
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-	memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-	batadv_hardif_free_ref(primary_if);
-	memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
-	memcpy(roam_adv_packet->client, client, ETH_ALEN);
-
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Sending ROAMING_ADV to %pM (client %pM)\n",
-		   orig_node->orig, client);
+		   "Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n",
+		   orig_node->orig, client, BATADV_PRINT_VID(vid));
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
 
-	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-		ret = 0;
+	memcpy(tvlv_roam.client, client, sizeof(tvlv_roam.client));
+	tvlv_roam.vid = htons(vid);
+
+	batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+				 orig_node->orig, BATADV_TVLV_ROAM, 1,
+				 &tvlv_roam, sizeof(tvlv_roam));
 
 out:
-	if (ret && skb)
-		kfree_skb(skb);
-	return;
+	if (primary_if)
+		batadv_hardif_free_ref(primary_if);
 }
 
 static void batadv_tt_purge(struct work_struct *work)
@@ -2286,7 +2972,7 @@
 	priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
 	bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
-	batadv_tt_local_purge(bat_priv);
+	batadv_tt_local_purge(bat_priv, BATADV_TT_LOCAL_TIMEOUT);
 	batadv_tt_global_purge(bat_priv);
 	batadv_tt_req_purge(bat_priv);
 	batadv_tt_roam_purge(bat_priv);
@@ -2297,6 +2983,9 @@
 
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
+	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
+	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
+
 	cancel_delayed_work_sync(&bat_priv->tt.work);
 
 	batadv_tt_local_table_free(bat_priv);
@@ -2308,19 +2997,25 @@
 	kfree(bat_priv->tt.last_changeset);
 }
 
-/* This function will enable or disable the specified flags for all the entries
- * in the given hash table and returns the number of modified entries
+/**
+ * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ *  table and possibly count them in the TT size
+ * @bat_priv: the bat priv with all the soft interface information
+ * @flags: the flag to switch
+ * @enable: whether to set or unset the flag
+ * @count: whether to increase the TT size by the number of changed entries
  */
-static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
-				    uint16_t flags, bool enable)
+static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv,
+				      uint16_t flags, bool enable, bool count)
 {
-	uint32_t i;
+	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+	struct batadv_tt_common_entry *tt_common_entry;
 	uint16_t changed_num = 0;
 	struct hlist_head *head;
-	struct batadv_tt_common_entry *tt_common_entry;
+	uint32_t i;
 
 	if (!hash)
-		goto out;
+		return;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -2338,11 +3033,15 @@
 				tt_common_entry->flags &= ~flags;
 			}
 			changed_num++;
+
+			if (!count)
+				continue;
+
+			batadv_tt_local_size_inc(bat_priv,
+						 tt_common_entry->vid);
 		}
 		rcu_read_unlock();
 	}
-out:
-	return changed_num;
 }
 
 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
@@ -2370,10 +3069,11 @@
 				continue;
 
 			batadv_dbg(BATADV_DBG_TT, bat_priv,
-				   "Deleting local tt entry (%pM): pending\n",
-				   tt_common->addr);
+				   "Deleting local tt entry (%pM, vid: %d): pending\n",
+				   tt_common->addr,
+				   BATADV_PRINT_VID(tt_common->vid));
 
-			atomic_dec(&bat_priv->tt.local_entry_num);
+			batadv_tt_local_size_dec(bat_priv, tt_common->vid);
 			hlist_del_rcu(&tt_common->hash_entry);
 			tt_local = container_of(tt_common,
 						struct batadv_tt_local_entry,
@@ -2384,22 +3084,25 @@
 	}
 }
 
-static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
-				    unsigned char **packet_buff,
-				    int *packet_buff_len, int packet_min_len)
+/**
+ * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ *  which have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Caller must hold tt->commit_lock.
+ */
+static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 {
-	uint16_t changed_num = 0;
+	if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+		if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
+			batadv_tt_tvlv_container_update(bat_priv);
+		return;
+	}
 
-	if (atomic_read(&bat_priv->tt.local_changes) < 1)
-		return -ENOENT;
+	batadv_tt_local_set_flags(bat_priv, BATADV_TT_CLIENT_NEW, false, true);
 
-	changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
-					  BATADV_TT_CLIENT_NEW, false);
-
-	/* all reset entries have to be counted as local entries */
-	atomic_add(changed_num, &bat_priv->tt.local_entry_num);
 	batadv_tt_local_purge_pending_clients(bat_priv);
-	bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
+	batadv_tt_local_update_crc(bat_priv);
 
 	/* Increment the TTVN only once per OGM interval */
 	atomic_inc(&bat_priv->tt.vn);
@@ -2409,49 +3112,38 @@
 
 	/* reset the sending counter */
 	atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
-
-	return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
-					   packet_buff_len, packet_min_len);
+	batadv_tt_tvlv_container_update(bat_priv);
 }
 
-/* when calling this function (hard_iface == primary_if) has to be true */
-int batadv_tt_append_diff(struct batadv_priv *bat_priv,
-			  unsigned char **packet_buff, int *packet_buff_len,
-			  int packet_min_len)
+/**
+ * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ *  have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
 {
-	int tt_num_changes;
-
-	/* if at least one change happened */
-	tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
-						  packet_buff_len,
-						  packet_min_len);
-
-	/* if the changes have been sent often enough */
-	if ((tt_num_changes < 0) &&
-	    (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
-		batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
-					      packet_min_len, packet_min_len);
-		tt_num_changes = 0;
-	}
-
-	return tt_num_changes;
+	spin_lock_bh(&bat_priv->tt.commit_lock);
+	batadv_tt_local_commit_changes_nolock(bat_priv);
+	spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-			   uint8_t *dst)
+			   uint8_t *dst, unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
 	struct batadv_tt_global_entry *tt_global_entry = NULL;
+	struct batadv_softif_vlan *vlan;
 	bool ret = false;
 
-	if (!atomic_read(&bat_priv->ap_isolation))
+	vlan = batadv_softif_vlan_get(bat_priv, vid);
+	if (!vlan || !atomic_read(&vlan->ap_isolation))
 		goto out;
 
-	tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
+	tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid);
 	if (!tt_local_entry)
 		goto out;
 
-	tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
+	tt_global_entry = batadv_tt_global_hash_find(bat_priv, src, vid);
 	if (!tt_global_entry)
 		goto out;
 
@@ -2461,6 +3153,8 @@
 	ret = true;
 
 out:
+	if (vlan)
+		batadv_softif_vlan_free_ref(vlan);
 	if (tt_global_entry)
 		batadv_tt_global_entry_free_ref(tt_global_entry);
 	if (tt_local_entry)
@@ -2468,19 +3162,29 @@
 	return ret;
 }
 
-void batadv_tt_update_orig(struct batadv_priv *bat_priv,
-			   struct batadv_orig_node *orig_node,
-			   const unsigned char *tt_buff, uint8_t tt_num_changes,
-			   uint8_t ttvn, uint16_t tt_crc)
+/**
+ * batadv_tt_update_orig - update global translation table with new tt
+ *  information received via ogms
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @tt_num_vlan: number of tvlv VLAN entries
+ * @tt_change: pointer to the first entry in the TT buffer
+ * @tt_num_changes: number of tt changes inside the tt buffer
+ * @ttvn: translation table version number of this changeset
+ * @tt_crc: crc32 checksum of orig node's translation table
+ */
+static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+				  struct batadv_orig_node *orig_node,
+				  const void *tt_buff, uint16_t tt_num_vlan,
+				  struct batadv_tvlv_tt_change *tt_change,
+				  uint16_t tt_num_changes, uint8_t ttvn)
 {
 	uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	bool full_table = true;
-	struct batadv_tt_change *tt_change;
 
-	/* don't care about a backbone gateways updates. */
-	if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
-		return;
-
+	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
 	/* orig table not initialised AND first diff is in the OGM OR the ttvn
 	 * increased by one -> we can apply the attached changes
 	 */
@@ -2496,7 +3200,9 @@
 			goto request_table;
 		}
 
-		tt_change = (struct batadv_tt_change *)tt_buff;
+		spin_lock_bh(&orig_node->tt_lock);
+
+		tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
 		batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
 					 ttvn, tt_change);
 
@@ -2504,7 +3210,9 @@
 		 * prefer to recompute it to spot any possible inconsistency
 		 * in the global table
 		 */
-		orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
+		batadv_tt_global_update_crc(bat_priv, orig_node);
+
+		spin_unlock_bh(&orig_node->tt_lock);
 
 		/* The ttvn alone is not enough to guarantee consistency
 		 * because a single value could represent different states
@@ -2515,37 +3223,46 @@
 		 * checking the CRC value is mandatory to detect the
 		 * inconsistency
 		 */
-		if (orig_node->tt_crc != tt_crc)
+		if (!batadv_tt_global_check_crc(orig_node, tt_vlan,
+						tt_num_vlan))
 			goto request_table;
 	} else {
 		/* if we missed more than one change or our tables are not
 		 * in sync anymore -> request fresh tt data
 		 */
 		if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
-		    orig_node->tt_crc != tt_crc) {
+		    !batadv_tt_global_check_crc(orig_node, tt_vlan,
+						tt_num_vlan)) {
 request_table:
 			batadv_dbg(BATADV_DBG_TT, bat_priv,
-				   "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %#.4x last_crc: %#.4x num_changes: %u)\n",
-				   orig_node->orig, ttvn, orig_ttvn, tt_crc,
-				   orig_node->tt_crc, tt_num_changes);
+				   "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u num_changes: %u)\n",
+				   orig_node->orig, ttvn, orig_ttvn,
+				   tt_num_changes);
 			batadv_send_tt_request(bat_priv, orig_node, ttvn,
-					       tt_crc, full_table);
+					       tt_vlan, tt_num_vlan,
+					       full_table);
 			return;
 		}
 	}
 }
 
-/* returns true whether we know that the client has moved from its old
- * originator to another one. This entry is kept is still kept for consistency
- * purposes
+/**
+ * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to check
+ * @vid: VLAN identifier
+ *
+ * Returns true if we know that the client has moved from its old originator
+ * to another one. This entry is still kept for consistency purposes and will be
+ * deleted later by a DEL or because of timeout
  */
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-					uint8_t *addr)
+					uint8_t *addr, unsigned short vid)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	bool ret = false;
 
-	tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+	tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
 	if (!tt_global_entry)
 		goto out;
 
@@ -2558,19 +3275,20 @@
 /**
  * batadv_tt_local_client_is_roaming - tells whether the client is roaming
  * @bat_priv: the bat priv with all the soft interface information
- * @addr: the MAC address of the local client to query
+ * @addr: the mac address of the local client to query
+ * @vid: VLAN identifier
  *
  * Returns true if the local client is known to be roaming (it is not served by
  * this node anymore) or not. If yes, the client is still present in the table
  * to keep the latter consistent with the node TTVN
  */
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-				       uint8_t *addr)
+				       uint8_t *addr, unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
 	bool ret = false;
 
-	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
 	if (!tt_local_entry)
 		goto out;
 
@@ -2582,26 +3300,268 @@
 
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig_node,
-					  const unsigned char *addr)
+					  const unsigned char *addr,
+					  unsigned short vid)
 {
 	bool ret = false;
 
-	/* if the originator is a backbone node (meaning it belongs to the same
-	 * LAN of this node) the temporary client must not be added because to
-	 * reach such destination the node must use the LAN instead of the mesh
-	 */
-	if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
-		goto out;
-
-	if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+	if (!batadv_tt_global_add(bat_priv, orig_node, addr, vid,
 				  BATADV_TT_CLIENT_TEMP,
 				  atomic_read(&orig_node->last_ttvn)))
 		goto out;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
-		   "Added temporary global client (addr: %pM orig: %pM)\n",
-		   addr, orig_node->orig);
+		   "Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n",
+		   addr, BATADV_PRINT_VID(vid), orig_node->orig);
 	ret = true;
 out:
 	return ret;
 }
+
+/**
+ * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ *  maximum packet size that can be transported through the mesh
+ * @soft_iface: netdev struct of the mesh interface
+ *
+ * Remove entries older than 'timeout' and half timeout if more entries need
+ * to be removed.
+ */
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+{
+	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+	int packet_size_max = atomic_read(&bat_priv->packet_size_max);
+	int table_size, timeout = BATADV_TT_LOCAL_TIMEOUT / 2;
+	bool reduced = false;
+
+	spin_lock_bh(&bat_priv->tt.commit_lock);
+
+	while (true) {
+		table_size = batadv_tt_local_table_transmit_size(bat_priv);
+		if (packet_size_max >= table_size)
+			break;
+
+		batadv_tt_local_purge(bat_priv, timeout);
+		batadv_tt_local_purge_pending_clients(bat_priv);
+
+		timeout /= 2;
+		reduced = true;
+		net_ratelimited_function(batadv_info, soft_iface,
+					 "Forced to purge local tt entries to fit new maximum fragment MTU (%i)\n",
+					 packet_size_max);
+	}
+
+	/* commit these changes immediately, to avoid synchronization problem
+	 * with the TTVN
+	 */
+	if (reduced)
+		batadv_tt_local_commit_changes_nolock(bat_priv);
+
+	spin_unlock_bh(&bat_priv->tt.commit_lock);
+}
+
+/**
+ * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig,
+					  uint8_t flags, void *tvlv_value,
+					  uint16_t tvlv_value_len)
+{
+	struct batadv_tvlv_tt_vlan_data *tt_vlan;
+	struct batadv_tvlv_tt_change *tt_change;
+	struct batadv_tvlv_tt_data *tt_data;
+	uint16_t num_entries, num_vlan;
+
+	if (tvlv_value_len < sizeof(*tt_data))
+		return;
+
+	tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
+	tvlv_value_len -= sizeof(*tt_data);
+
+	num_vlan = ntohs(tt_data->num_vlan);
+
+	if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan)
+		return;
+
+	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+	tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan);
+	tvlv_value_len -= sizeof(*tt_vlan) * num_vlan;
+
+	num_entries = batadv_tt_entries(tvlv_value_len);
+
+	batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change,
+			      num_entries, tt_data->ttvn);
+}
+
+/**
+ * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ *  container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Returns NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+					     uint8_t *src, uint8_t *dst,
+					     void *tvlv_value,
+					     uint16_t tvlv_value_len)
+{
+	struct batadv_tvlv_tt_data *tt_data;
+	uint16_t tt_vlan_len, tt_num_entries;
+	char tt_flag;
+	bool ret;
+
+	if (tvlv_value_len < sizeof(*tt_data))
+		return NET_RX_SUCCESS;
+
+	tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
+	tvlv_value_len -= sizeof(*tt_data);
+
+	tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data);
+	tt_vlan_len *= ntohs(tt_data->num_vlan);
+
+	if (tvlv_value_len < tt_vlan_len)
+		return NET_RX_SUCCESS;
+
+	tvlv_value_len -= tt_vlan_len;
+	tt_num_entries = batadv_tt_entries(tvlv_value_len);
+
+	switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) {
+	case BATADV_TT_REQUEST:
+		batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
+
+		/* If this node cannot provide a TT response the tt_request is
+		 * forwarded
+		 */
+		ret = batadv_send_tt_response(bat_priv, tt_data, src, dst);
+		if (!ret) {
+			if (tt_data->flags & BATADV_TT_FULL_TABLE)
+				tt_flag = 'F';
+			else
+				tt_flag = '.';
+
+			batadv_dbg(BATADV_DBG_TT, bat_priv,
+				   "Routing TT_REQUEST to %pM [%c]\n",
+				   dst, tt_flag);
+			/* tvlv API will re-route the packet */
+			return NET_RX_DROP;
+		}
+		break;
+	case BATADV_TT_RESPONSE:
+		batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
+
+		if (batadv_is_my_mac(bat_priv, dst)) {
+			batadv_handle_tt_response(bat_priv, tt_data,
+						  src, tt_num_entries);
+			return NET_RX_SUCCESS;
+		}
+
+		if (tt_data->flags & BATADV_TT_FULL_TABLE)
+			tt_flag =  'F';
+		else
+			tt_flag = '.';
+
+		batadv_dbg(BATADV_DBG_TT, bat_priv,
+			   "Routing TT_RESPONSE to %pM [%c]\n", dst, tt_flag);
+
+		/* tvlv API will re-route the packet */
+		return NET_RX_DROP;
+	}
+
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Returns NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+					       uint8_t *src, uint8_t *dst,
+					       void *tvlv_value,
+					       uint16_t tvlv_value_len)
+{
+	struct batadv_tvlv_roam_adv *roaming_adv;
+	struct batadv_orig_node *orig_node = NULL;
+
+	/* If this node is not the intended recipient of the
+	 * roaming advertisement the packet is forwarded
+	 * (the tvlv API will re-route the packet).
+	 */
+	if (!batadv_is_my_mac(bat_priv, dst))
+		return NET_RX_DROP;
+
+	if (tvlv_value_len < sizeof(*roaming_adv))
+		goto out;
+
+	orig_node = batadv_orig_hash_find(bat_priv, src);
+	if (!orig_node)
+		goto out;
+
+	batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
+	roaming_adv = (struct batadv_tvlv_roam_adv *)tvlv_value;
+
+	batadv_dbg(BATADV_DBG_TT, bat_priv,
+		   "Received ROAMING_ADV from %pM (client %pM)\n",
+		   src, roaming_adv->client);
+
+	batadv_tt_global_add(bat_priv, orig_node, roaming_adv->client,
+			     ntohs(roaming_adv->vid), BATADV_TT_CLIENT_ROAM,
+			     atomic_read(&orig_node->last_ttvn) + 1);
+
+out:
+	if (orig_node)
+		batadv_orig_node_free_ref(orig_node);
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tt_init - initialise the translation table internals
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return 0 on success or negative error number in case of failure.
+ */
+int batadv_tt_init(struct batadv_priv *bat_priv)
+{
+	int ret;
+
+	/* synchronized flags must be remote */
+	BUILD_BUG_ON(!(BATADV_TT_SYNC_MASK & BATADV_TT_REMOTE_MASK));
+
+	ret = batadv_tt_local_init(bat_priv);
+	if (ret < 0)
+		return ret;
+
+	ret = batadv_tt_global_init(bat_priv);
+	if (ret < 0)
+		return ret;
+
+	batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
+				     batadv_tt_tvlv_unicast_handler_v1,
+				     BATADV_TVLV_TT, 1, BATADV_NO_FLAGS);
+
+	batadv_tvlv_handler_register(bat_priv, NULL,
+				     batadv_roam_tvlv_unicast_handler_v1,
+				     BATADV_TVLV_ROAM, 1, BATADV_NO_FLAGS);
+
+	INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+	queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
+			   msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
+
+	return 1;
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 659a3bb..026b1ff 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -20,49 +20,35 @@
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
-int batadv_tt_len(int changes_num);
 int batadv_tt_init(struct batadv_priv *bat_priv);
-void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-			 int ifindex);
+bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+			 unsigned short vid, int ifindex);
 uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-				const uint8_t *addr, const char *message,
-				bool roaming);
+				const uint8_t *addr, unsigned short vid,
+				const char *message, bool roaming);
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
-			       struct batadv_orig_node *orig_node,
-			       const unsigned char *tt_buff, int tt_buff_len);
-int batadv_tt_global_add(struct batadv_priv *bat_priv,
-			 struct batadv_orig_node *orig_node,
-			 const unsigned char *addr, uint16_t flags,
-			 uint8_t ttvn);
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
 			       struct batadv_orig_node *orig_node,
-			       const char *message);
+			       int32_t match_vid, const char *message);
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
 						  const uint8_t *src,
-						  const uint8_t *addr);
+						  const uint8_t *addr,
+						  unsigned short vid);
 void batadv_tt_free(struct batadv_priv *bat_priv);
-bool batadv_send_tt_response(struct batadv_priv *bat_priv,
-			     struct batadv_tt_query_packet *tt_request);
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr);
-void batadv_handle_tt_response(struct batadv_priv *bat_priv,
-			       struct batadv_tt_query_packet *tt_response);
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+			 unsigned short vid);
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-			   uint8_t *dst);
-void batadv_tt_update_orig(struct batadv_priv *bat_priv,
-			   struct batadv_orig_node *orig_node,
-			   const unsigned char *tt_buff, uint8_t tt_num_changes,
-			   uint8_t ttvn, uint16_t tt_crc);
-int batadv_tt_append_diff(struct batadv_priv *bat_priv,
-			  unsigned char **packet_buff, int *packet_buff_len,
-			  int packet_min_len);
+			   uint8_t *dst, unsigned short vid);
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-					uint8_t *addr);
+					uint8_t *addr, unsigned short vid);
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-				       uint8_t *addr);
+				       uint8_t *addr, unsigned short vid);
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface);
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig_node,
-					  const unsigned char *addr);
+					  const unsigned char *addr,
+					  unsigned short vid);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index b2c94e1..91dd369 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -24,13 +24,6 @@
 #include "bitarray.h"
 #include <linux/kernel.h>
 
-/**
- * Maximum overhead for the encapsulation for a payload packet
- */
-#define BATADV_HEADER_LEN \
-	(ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
-			sizeof(struct batadv_bcast_packet)))
-
 #ifdef CONFIG_BATMAN_ADV_DAT
 
 /* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed,
@@ -43,6 +36,18 @@
 #endif /* CONFIG_BATMAN_ADV_DAT */
 
 /**
+ * BATADV_TT_REMOTE_MASK - bitmask selecting the flags that are sent over the
+ *  wire only
+ */
+#define BATADV_TT_REMOTE_MASK	0x00FF
+
+/**
+ * BATADV_TT_SYNC_MASK - bitmask of the flags that need to be kept in sync
+ *  among the nodes. These flags are used to compute the global/local CRC
+ */
+#define BATADV_TT_SYNC_MASK	0x00F0
+
+/**
  * struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data
  * @ogm_buff: buffer holding the OGM packet
  * @ogm_buff_len: length of the OGM packet buffer
@@ -60,7 +65,6 @@
  * @if_num: identificator of the interface
  * @if_status: status of the interface for batman-adv
  * @net_dev: pointer to the net_device
- * @frag_seqno: last fragment sequence number sent by this interface
  * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
  * @hardif_obj: kobject of the per interface sysfs "mesh" directory
  * @refcount: number of contexts the object is used
@@ -76,7 +80,6 @@
 	int16_t if_num;
 	char if_status;
 	struct net_device *net_dev;
-	atomic_t frag_seqno;
 	uint8_t num_bcasts;
 	struct kobject *hardif_obj;
 	atomic_t refcount;
@@ -88,28 +91,97 @@
 };
 
 /**
+ * struct batadv_frag_table_entry - head in the fragment buffer table
+ * @head: head of list with fragments
+ * @lock: lock to protect the list of fragments
+ * @timestamp: time (jiffie) of last received fragment
+ * @seqno: sequence number of the fragments in the list
+ * @size: accumulated size of packets in list
+ */
+struct batadv_frag_table_entry {
+	struct hlist_head head;
+	spinlock_t lock; /* protects head */
+	unsigned long timestamp;
+	uint16_t seqno;
+	uint16_t size;
+};
+
+/**
+ * struct batadv_frag_list_entry - entry in a list of fragments
+ * @list: list node information
+ * @skb: fragment
+ * @no: fragment number in the set
+ */
+struct batadv_frag_list_entry {
+	struct hlist_node list;
+	struct sk_buff *skb;
+	uint8_t no;
+};
+
+/**
+ * struct batadv_vlan_tt - VLAN specific TT attributes
+ * @crc: CRC32 checksum of the entries belonging to this vlan
+ * @num_entries: number of TT entries for this VLAN
+ */
+struct batadv_vlan_tt {
+	uint32_t crc;
+	atomic_t num_entries;
+};
+
+/**
+ * batadv_orig_node_vlan - VLAN specific data per orig_node
+ * @vid: the VLAN identifier
+ * @tt: VLAN specific TT attributes
+ * @list: list node for orig_node::vlan_list
+ * @refcount: number of context where this object is currently in use
+ * @rcu: struct used for freeing in a RCU-safe manner
+ */
+struct batadv_orig_node_vlan {
+	unsigned short vid;
+	struct batadv_vlan_tt tt;
+	struct list_head list;
+	atomic_t refcount;
+	struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
+ * @bcast_own: bitfield containing the number of our OGMs this orig_node
+ *  rebroadcasted "back" to us (relative to last_real_seqno)
+ * @bcast_own_sum: counted result of bcast_own
+ * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
+ *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+ */
+struct batadv_orig_bat_iv {
+	unsigned long *bcast_own;
+	uint8_t *bcast_own_sum;
+	/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+	 * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+	 */
+	spinlock_t ogm_cnt_lock;
+};
+
+/**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
  * @orig: originator ethernet address
  * @primary_addr: hosts primary interface address
  * @router: router that should be used to reach this originator
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
- * @bcast_own: bitfield containing the number of our OGMs this orig_node
- *  rebroadcasted "back" to us (relative to last_real_seqno)
- * @bcast_own_sum: counted result of bcast_own
  * @last_seen: time when last packet from this node was received
  * @bcast_seqno_reset: time when the broadcast seqno window was reset
  * @batman_seqno_reset: time when the batman seqno window was reset
- * @gw_flags: flags related to gateway class
- * @flags: for now only VIS_SERVER flag
+ * @capabilities: announced capabilities of this originator
  * @last_ttvn: last seen translation table version number
- * @tt_crc: CRC of the translation table
  * @tt_buff: last tt changeset this node received from the orig node
  * @tt_buff_len: length of the last tt changeset this node received from the
  *  orig node
  * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_size: number of global TT entries announced by the orig node
  * @tt_initialised: bool keeping track of whether or not this node have received
  *  any translation table information from the orig node yet
+ * @tt_lock: prevents from updating the table while reading it. Table update is
+ *  made up by two operations (data structure update and metdata -CRC/TTVN-
+ *  recalculation) and they have to be executed atomically in order to avoid
+ *  another thread to read the table/metadata between those.
  * @last_real_seqno: last and best known sequence number
  * @last_ttl: ttl of last received packet
  * @bcast_bits: bitfield containing the info which payload broadcast originated
@@ -117,14 +189,9 @@
  *  last_bcast_seqno)
  * @last_bcast_seqno: last broadcast sequence number received by this host
  * @neigh_list: list of potential next hop neighbor towards this orig node
- * @frag_list: fragmentation buffer list for fragment re-assembly
- * @last_frag_packet: time when last fragmented packet from this node was
- *  received
  * @neigh_list_lock: lock protecting neigh_list, router and bonding_list
  * @hash_entry: hlist node for batadv_priv::orig_hash
  * @bat_priv: pointer to soft_iface this orig node belongs to
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- *  neigh_node->real_bits & neigh_node->real_packet_count
  * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
  * @bond_candidates: how many candidates are available
  * @bond_list: list of bonding candidates
@@ -134,6 +201,11 @@
  * @out_coding_list: list of nodes that can hear this orig
  * @in_coding_list_lock: protects in_coding_list
  * @out_coding_list_lock: protects out_coding_list
+ * @fragments: array with heads for fragment chains
+ * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
+ *  originator represented by this object
+ * @vlan_list_lock: lock protecting vlan_list
+ * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
 	uint8_t orig[ETH_ALEN];
@@ -142,35 +214,26 @@
 #ifdef CONFIG_BATMAN_ADV_DAT
 	batadv_dat_addr_t dat_addr;
 #endif
-	unsigned long *bcast_own;
-	uint8_t *bcast_own_sum;
 	unsigned long last_seen;
 	unsigned long bcast_seqno_reset;
 	unsigned long batman_seqno_reset;
-	uint8_t gw_flags;
-	uint8_t flags;
+	uint8_t capabilities;
 	atomic_t last_ttvn;
-	uint16_t tt_crc;
 	unsigned char *tt_buff;
 	int16_t tt_buff_len;
 	spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
-	atomic_t tt_size;
 	bool tt_initialised;
+	/* prevents from changing the table while reading it */
+	spinlock_t tt_lock;
 	uint32_t last_real_seqno;
 	uint8_t last_ttl;
 	DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
 	uint32_t last_bcast_seqno;
 	struct hlist_head neigh_list;
-	struct list_head frag_list;
-	unsigned long last_frag_packet;
 	/* neigh_list_lock protects: neigh_list, router & bonding_list */
 	spinlock_t neigh_list_lock;
 	struct hlist_node hash_entry;
 	struct batadv_priv *bat_priv;
-	/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
-	 * neigh_node->real_bits & neigh_node->real_packet_count
-	 */
-	spinlock_t ogm_cnt_lock;
 	/* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
 	spinlock_t bcast_seqno_lock;
 	atomic_t bond_candidates;
@@ -183,12 +246,28 @@
 	spinlock_t in_coding_list_lock; /* Protects in_coding_list */
 	spinlock_t out_coding_list_lock; /* Protects out_coding_list */
 #endif
+	struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+	struct list_head vlan_list;
+	spinlock_t vlan_list_lock; /* protects vlan_list */
+	struct batadv_orig_bat_iv bat_iv;
+};
+
+/**
+ * enum batadv_orig_capabilities - orig node capabilities
+ * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
+ * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
+ */
+enum batadv_orig_capabilities {
+	BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
+	BATADV_ORIG_CAPA_HAS_NC = BIT(1),
 };
 
 /**
  * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
  * @list: list node for batadv_priv_gw::list
  * @orig_node: pointer to corresponding orig node
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
  * @deleted: this struct is scheduled for deletion
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
@@ -196,46 +275,57 @@
 struct batadv_gw_node {
 	struct hlist_node list;
 	struct batadv_orig_node *orig_node;
+	uint32_t bandwidth_down;
+	uint32_t bandwidth_up;
 	unsigned long deleted;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
 
 /**
- * struct batadv_neigh_node - structure for single hop neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @addr: mac address of neigh node
+ * struct batadv_neigh_bat_iv - B.A.T.M.A.N. IV specific structure for single
+ *  hop neighbors
  * @tq_recv: ring buffer of received TQ values from this neigh node
  * @tq_index: ring buffer index
  * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @last_ttl: last received ttl from this neigh node
- * @bonding_list: list node for batadv_orig_node::bond_list
- * @last_seen: when last packet via this neighbor was received
  * @real_bits: bitfield containing the number of OGMs received from this neigh
  *  node (relative to orig_node->last_real_seqno)
  * @real_packet_count: counted result of real_bits
- * @orig_node: pointer to corresponding orig_node
- * @if_incoming: pointer to incoming hard interface
  * @lq_update_lock: lock protecting tq_recv & tq_index
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
-struct batadv_neigh_node {
-	struct hlist_node list;
-	uint8_t addr[ETH_ALEN];
+struct batadv_neigh_bat_iv {
 	uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
 	uint8_t tq_index;
 	uint8_t tq_avg;
-	uint8_t last_ttl;
-	struct list_head bonding_list;
-	unsigned long last_seen;
 	DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
 	uint8_t real_packet_count;
-	struct batadv_orig_node *orig_node;
-	struct batadv_hard_iface *if_incoming;
 	spinlock_t lq_update_lock; /* protects tq_recv & tq_index */
+};
+
+/**
+ * struct batadv_neigh_node - structure for single hops neighbors
+ * @list: list node for batadv_orig_node::neigh_list
+ * @orig_node: pointer to corresponding orig_node
+ * @addr: the MAC address of the neighboring interface
+ * @if_incoming: pointer to incoming hard interface
+ * @last_seen: when last packet via this neighbor was received
+ * @last_ttl: last received ttl from this neigh node
+ * @bonding_list: list node for batadv_orig_node::bond_list
+ * @refcount: number of contexts the object is used
+ * @rcu: struct used for freeing in an RCU-safe manner
+ * @bat_iv: B.A.T.M.A.N. IV private structure
+ */
+struct batadv_neigh_node {
+	struct hlist_node list;
+	struct batadv_orig_node *orig_node;
+	uint8_t addr[ETH_ALEN];
+	struct batadv_hard_iface *if_incoming;
+	unsigned long last_seen;
+	uint8_t last_ttl;
+	struct list_head bonding_list;
 	atomic_t refcount;
 	struct rcu_head rcu;
+	struct batadv_neigh_bat_iv bat_iv;
 };
 
 /**
@@ -265,6 +355,12 @@
  * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
  * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
  * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
+ * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
+ * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+ * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
+ * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+ * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
+ * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
  * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
  * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
  * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
@@ -302,6 +398,12 @@
 	BATADV_CNT_MGMT_TX_BYTES,
 	BATADV_CNT_MGMT_RX,
 	BATADV_CNT_MGMT_RX_BYTES,
+	BATADV_CNT_FRAG_TX,
+	BATADV_CNT_FRAG_TX_BYTES,
+	BATADV_CNT_FRAG_RX,
+	BATADV_CNT_FRAG_RX_BYTES,
+	BATADV_CNT_FRAG_FWD,
+	BATADV_CNT_FRAG_FWD_BYTES,
 	BATADV_CNT_TT_REQUEST_TX,
 	BATADV_CNT_TT_REQUEST_RX,
 	BATADV_CNT_TT_RESPONSE_TX,
@@ -343,11 +445,14 @@
  * @changes_list_lock: lock protecting changes_list
  * @req_list_lock: lock protecting req_list
  * @roam_list_lock: lock protecting roam_list
- * @local_entry_num: number of entries in the local hash table
- * @local_crc: Checksum of the local table, recomputed before sending a new OGM
  * @last_changeset: last tt changeset this host has generated
  * @last_changeset_len: length of last tt changeset this host has generated
  * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
+ * @commit_lock: prevents from executing a local TT commit while reading the
+ *  local table. The local TT commit is made up by two operations (data
+ *  structure update and metdata -CRC/TTVN- recalculation) and they have to be
+ *  executed atomically in order to avoid another thread to read the
+ *  table/metadata between those.
  * @work: work queue callback item for translation table purging
  */
 struct batadv_priv_tt {
@@ -362,12 +467,12 @@
 	spinlock_t changes_list_lock; /* protects changes */
 	spinlock_t req_list_lock; /* protects req_list */
 	spinlock_t roam_list_lock; /* protects roam_list */
-	atomic_t local_entry_num;
-	uint16_t local_crc;
 	unsigned char *last_changeset;
 	int16_t last_changeset_len;
 	/* protects last_changeset & last_changeset_len */
 	spinlock_t last_changeset_lock;
+	/* prevents from executing a commit while reading the table */
+	spinlock_t commit_lock;
 	struct delayed_work work;
 };
 
@@ -420,31 +525,31 @@
  * @list: list of available gateway nodes
  * @list_lock: lock protecting gw_list & curr_gw
  * @curr_gw: pointer to currently selected gateway node
+ * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
+ * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
  * @reselect: bool indicating a gateway re-selection is in progress
  */
 struct batadv_priv_gw {
 	struct hlist_head list;
 	spinlock_t list_lock; /* protects gw_list & curr_gw */
 	struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+	atomic_t bandwidth_down;
+	atomic_t bandwidth_up;
 	atomic_t reselect;
 };
 
 /**
- * struct batadv_priv_vis - per mesh interface vis data
- * @send_list: list of batadv_vis_info packets to sent
- * @hash: hash table containing vis data from other nodes in the network
- * @hash_lock: lock protecting the hash table
- * @list_lock: lock protecting my_info::recv_list
- * @work: work queue callback item for vis packet sending
- * @my_info: holds this node's vis data sent on a regular basis
+ * struct batadv_priv_tvlv - per mesh interface tvlv data
+ * @container_list: list of registered tvlv containers to be sent with each OGM
+ * @handler_list: list of the various tvlv content handlers
+ * @container_list_lock: protects tvlv container list access
+ * @handler_list_lock: protects handler list access
  */
-struct batadv_priv_vis {
-	struct list_head send_list;
-	struct batadv_hashtable *hash;
-	spinlock_t hash_lock; /* protects hash */
-	spinlock_t list_lock; /* protects my_info::recv_list */
-	struct delayed_work work;
-	struct batadv_vis_info *my_info;
+struct batadv_priv_tvlv {
+	struct hlist_head container_list;
+	struct hlist_head handler_list;
+	spinlock_t container_list_lock; /* protects container_list */
+	spinlock_t handler_list_lock; /* protects handler_list */
 };
 
 /**
@@ -491,6 +596,26 @@
 };
 
 /**
+ * struct batadv_softif_vlan - per VLAN attributes set
+ * @vid: VLAN identifier
+ * @kobj: kobject for sysfs vlan subdirectory
+ * @ap_isolation: AP isolation state
+ * @tt: TT private attributes (VLAN specific)
+ * @list: list node for bat_priv::softif_vlan_list
+ * @refcount: number of context where this object is currently in use
+ * @rcu: struct used for freeing in a RCU-safe manner
+ */
+struct batadv_softif_vlan {
+	unsigned short vid;
+	struct kobject *kobj;
+	atomic_t ap_isolation;		/* boolean */
+	struct batadv_vlan_tt tt;
+	struct hlist_node list;
+	atomic_t refcount;
+	struct rcu_head rcu;
+};
+
+/**
  * struct batadv_priv - per mesh interface data
  * @mesh_state: current status of the mesh (inactive/active/deactivating)
  * @soft_iface: net device which holds this struct as private data
@@ -499,15 +624,15 @@
  * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
  * @bonding: bool indicating whether traffic bonding is enabled
  * @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @ap_isolation: bool indicating whether ap isolation is enabled
+ * @packet_size_max: max packet size that can be transmitted via
+ *  multiple fragmented skbs or a single frame if fragmentation is disabled
+ * @frag_seqno: incremental counter to identify chains of egress fragments
  * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
  *  enabled
  * @distributed_arp_table: bool indicating whether distributed ARP table is
  *  enabled
- * @vis_mode: vis operation: client or server (see batadv_vis_packettype)
  * @gw_mode: gateway operation: off, client or server (see batadv_gw_modes)
  * @gw_sel_class: gateway selection class (applies if gw_mode client)
- * @gw_bandwidth: gateway announced bandwidth (applies if gw_mode server)
  * @orig_interval: OGM broadcast interval in milliseconds
  * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
  * @log_level: configured log level (see batadv_dbg_level)
@@ -527,11 +652,14 @@
  * @primary_if: one of the hard interfaces assigned to this mesh interface
  *  becomes the primary interface
  * @bat_algo_ops: routing algorithm used by this mesh interface
+ * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
+ *  of the mesh interface represented by this object
+ * @softif_vlan_list_lock: lock protecting softif_vlan_list
  * @bla: bridge loope avoidance data
  * @debug_log: holding debug logging relevant data
  * @gw: gateway data
  * @tt: translation table data
- * @vis: vis data
+ * @tvlv: type-version-length-value data
  * @dat: distributed arp table data
  * @network_coding: bool indicating whether network coding is enabled
  * @batadv_priv_nc: network coding data
@@ -544,17 +672,16 @@
 	atomic_t aggregated_ogms;
 	atomic_t bonding;
 	atomic_t fragmentation;
-	atomic_t ap_isolation;
+	atomic_t packet_size_max;
+	atomic_t frag_seqno;
 #ifdef CONFIG_BATMAN_ADV_BLA
 	atomic_t bridge_loop_avoidance;
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
 	atomic_t distributed_arp_table;
 #endif
-	atomic_t vis_mode;
 	atomic_t gw_mode;
 	atomic_t gw_sel_class;
-	atomic_t gw_bandwidth;
 	atomic_t orig_interval;
 	atomic_t hop_penalty;
 #ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -575,6 +702,8 @@
 	struct work_struct cleanup_work;
 	struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
 	struct batadv_algo_ops *bat_algo_ops;
+	struct hlist_head softif_vlan_list;
+	spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
 #ifdef CONFIG_BATMAN_ADV_BLA
 	struct batadv_priv_bla bla;
 #endif
@@ -583,7 +712,7 @@
 #endif
 	struct batadv_priv_gw gw;
 	struct batadv_priv_tt tt;
-	struct batadv_priv_vis vis;
+	struct batadv_priv_tvlv tvlv;
 #ifdef CONFIG_BATMAN_ADV_DAT
 	struct batadv_priv_dat dat;
 #endif
@@ -620,7 +749,7 @@
 struct batadv_socket_packet {
 	struct list_head list;
 	size_t icmp_len;
-	struct batadv_icmp_packet_rr icmp_packet;
+	uint8_t icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
 /**
@@ -677,6 +806,7 @@
 /**
  * struct batadv_tt_common_entry - tt local & tt global common data
  * @addr: mac address of non-mesh client
+ * @vid: VLAN identifier
  * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
  *  batadv_priv_tt::global_hash
  * @flags: various state handling flags (see batadv_tt_client_flags)
@@ -686,6 +816,7 @@
  */
 struct batadv_tt_common_entry {
 	uint8_t addr[ETH_ALEN];
+	unsigned short vid;
 	struct hlist_node hash_entry;
 	uint16_t flags;
 	unsigned long added_at;
@@ -740,7 +871,7 @@
  */
 struct batadv_tt_change_node {
 	struct list_head list;
-	struct batadv_tt_change change;
+	struct batadv_tvlv_tt_change change;
 };
 
 /**
@@ -866,78 +997,6 @@
 };
 
 /**
- * struct batadv_frag_packet_list_entry - storage for fragment packet
- * @list: list node for orig_node::frag_list
- * @seqno: sequence number of the fragment
- * @skb: fragment's skb buffer
- */
-struct batadv_frag_packet_list_entry {
-	struct list_head list;
-	uint16_t seqno;
-	struct sk_buff *skb;
-};
-
-/**
- * struct batadv_vis_info - local data for vis information
- * @first_seen: timestamp used for purging stale vis info entries
- * @recv_list: List of server-neighbors we have received this packet from. This
- *  packet should not be re-forward to them again. List elements are struct
- *  batadv_vis_recvlist_node
- * @send_list: list of packets to be forwarded
- * @refcount: number of contexts the object is used
- * @hash_entry: hlist node for batadv_priv_vis::hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @skb_packet: contains the vis packet
- */
-struct batadv_vis_info {
-	unsigned long first_seen;
-	struct list_head recv_list;
-	struct list_head send_list;
-	struct kref refcount;
-	struct hlist_node hash_entry;
-	struct batadv_priv *bat_priv;
-	struct sk_buff *skb_packet;
-} __packed;
-
-/**
- * struct batadv_vis_info_entry - contains link information for vis
- * @src: source MAC of the link, all zero for local TT entry
- * @dst: destination MAC of the link, client mac address for local TT entry
- * @quality: transmission quality of the link, or 0 for local TT entry
- */
-struct batadv_vis_info_entry {
-	uint8_t  src[ETH_ALEN];
-	uint8_t  dest[ETH_ALEN];
-	uint8_t  quality;
-} __packed;
-
-/**
- * struct batadv_vis_recvlist_node - list entry for batadv_vis_info::recv_list
- * @list: list node for batadv_vis_info::recv_list
- * @mac: MAC address of the originator from where the vis_info was received
- */
-struct batadv_vis_recvlist_node {
-	struct list_head list;
-	uint8_t mac[ETH_ALEN];
-};
-
-/**
- * struct batadv_vis_if_list_entry - auxiliary data for vis data generation
- * @addr: MAC address of the interface
- * @primary: true if this interface is the primary interface
- * @list: list node the interface list
- *
- * While scanning for vis-entries of a particular vis-originator
- * this list collects its interfaces to create a subgraph/cluster
- * out of them later
- */
-struct batadv_vis_if_list_entry {
-	uint8_t addr[ETH_ALEN];
-	bool primary;
-	struct hlist_node list;
-};
-
-/**
  * struct batadv_algo_ops - mesh algorithm callbacks
  * @list: list node for the batadv_algo_list
  * @name: name of the algorithm
@@ -948,6 +1007,16 @@
  * @bat_primary_iface_set: called when primary interface is selected / changed
  * @bat_ogm_schedule: prepare a new outgoing OGM for the send queue
  * @bat_ogm_emit: send scheduled OGM
+ * @bat_neigh_cmp: compare the metrics of two neighbors
+ * @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or
+ *  better than neigh2 from the metric prospective
+ * @bat_orig_print: print the originator table (optional)
+ * @bat_orig_free: free the resources allocated by the routing algorithm for an
+ *  orig_node object
+ * @bat_orig_add_if: ask the routing algorithm to apply the needed changes to
+ *  the orig_node due to a new hard-interface being added into the mesh
+ * @bat_orig_del_if: ask the routing algorithm to apply the needed changes to
+ *  the orig_node due to an hard-interface being removed from the mesh
  */
 struct batadv_algo_ops {
 	struct hlist_node list;
@@ -958,6 +1027,17 @@
 	void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
 	void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
 	void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
+	int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1,
+			     struct batadv_neigh_node *neigh2);
+	bool (*bat_neigh_is_equiv_or_better)(struct batadv_neigh_node *neigh1,
+					     struct batadv_neigh_node *neigh2);
+	/* orig_node handling API */
+	void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq);
+	void (*bat_orig_free)(struct batadv_orig_node *orig_node);
+	int (*bat_orig_add_if)(struct batadv_orig_node *orig_node,
+			       int max_if_num);
+	int (*bat_orig_del_if)(struct batadv_orig_node *orig_node,
+			       int max_if_num, int del_if_num);
 };
 
 /**
@@ -965,6 +1045,7 @@
  * is used to stored ARP entries needed for the global DAT cache
  * @ip: the IPv4 corresponding to this DAT/ARP entry
  * @mac_addr: the MAC address associated to the stored IPv4
+ * @vid: the vlan ID associated to this entry
  * @last_update: time in jiffies when this entry was refreshed last time
  * @hash_entry: hlist node for batadv_priv_dat::hash
  * @refcount: number of contexts the object is used
@@ -973,6 +1054,7 @@
 struct batadv_dat_entry {
 	__be32 ip;
 	uint8_t mac_addr[ETH_ALEN];
+	unsigned short vid;
 	unsigned long last_update;
 	struct hlist_node hash_entry;
 	atomic_t refcount;
@@ -992,4 +1074,60 @@
 	struct batadv_orig_node *orig_node;
 };
 
+/**
+ * struct batadv_tvlv_container - container for tvlv appended to OGMs
+ * @list: hlist node for batadv_priv_tvlv::container_list
+ * @tvlv_hdr: tvlv header information needed to construct the tvlv
+ * @value_len: length of the buffer following this struct which contains
+ *  the actual tvlv payload
+ * @refcount: number of contexts the object is used
+ */
+struct batadv_tvlv_container {
+	struct hlist_node list;
+	struct batadv_tvlv_hdr tvlv_hdr;
+	atomic_t refcount;
+};
+
+/**
+ * struct batadv_tvlv_handler - handler for specific tvlv type and version
+ * @list: hlist node for batadv_priv_tvlv::handler_list
+ * @ogm_handler: handler callback which is given the tvlv payload to process on
+ *  incoming OGM packets
+ * @unicast_handler: handler callback which is given the tvlv payload to process
+ *  on incoming unicast tvlv packets
+ * @type: tvlv type this handler feels responsible for
+ * @version: tvlv version this handler feels responsible for
+ * @flags: tvlv handler flags
+ * @refcount: number of contexts the object is used
+ * @rcu: struct used for freeing in an RCU-safe manner
+ */
+struct batadv_tvlv_handler {
+	struct hlist_node list;
+	void (*ogm_handler)(struct batadv_priv *bat_priv,
+			    struct batadv_orig_node *orig,
+			    uint8_t flags,
+			    void *tvlv_value, uint16_t tvlv_value_len);
+	int (*unicast_handler)(struct batadv_priv *bat_priv,
+			       uint8_t *src, uint8_t *dst,
+			       void *tvlv_value, uint16_t tvlv_value_len);
+	uint8_t type;
+	uint8_t version;
+	uint8_t flags;
+	atomic_t refcount;
+	struct rcu_head rcu;
+};
+
+/**
+ * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
+ * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
+ *  this handler even if its type was not found (with no data)
+ * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
+ *  a handler as being called, so it won't be called if the
+ *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+ */
+enum batadv_tvlv_handler_flags {
+	BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+	BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
+};
+
 #endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
deleted file mode 100644
index 48b31d3..0000000
--- a/net/batman-adv/unicast.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "unicast.h"
-#include "send.h"
-#include "soft-interface.h"
-#include "gateway_client.h"
-#include "originator.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "hard-interface.h"
-
-
-static struct sk_buff *
-batadv_frag_merge_packet(struct list_head *head,
-			 struct batadv_frag_packet_list_entry *tfp,
-			 struct sk_buff *skb)
-{
-	struct batadv_unicast_frag_packet *up;
-	struct sk_buff *tmp_skb;
-	struct batadv_unicast_packet *unicast_packet;
-	int hdr_len = sizeof(*unicast_packet);
-	int uni_diff = sizeof(*up) - hdr_len;
-	uint8_t *packet_pos;
-
-	up = (struct batadv_unicast_frag_packet *)skb->data;
-	/* set skb to the first part and tmp_skb to the second part */
-	if (up->flags & BATADV_UNI_FRAG_HEAD) {
-		tmp_skb = tfp->skb;
-	} else {
-		tmp_skb = skb;
-		skb = tfp->skb;
-	}
-
-	if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
-		goto err;
-
-	skb_pull(tmp_skb, sizeof(*up));
-	if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
-		goto err;
-
-	/* move free entry to end */
-	tfp->skb = NULL;
-	tfp->seqno = 0;
-	list_move_tail(&tfp->list, head);
-
-	memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
-	kfree_skb(tmp_skb);
-
-	memmove(skb->data + uni_diff, skb->data, hdr_len);
-	packet_pos = skb_pull(skb, uni_diff);
-	unicast_packet = (struct batadv_unicast_packet *)packet_pos;
-	unicast_packet->header.packet_type = BATADV_UNICAST;
-
-	return skb;
-
-err:
-	/* free buffered skb, skb will be freed later */
-	kfree_skb(tfp->skb);
-	return NULL;
-}
-
-static void batadv_frag_create_entry(struct list_head *head,
-				     struct sk_buff *skb)
-{
-	struct batadv_frag_packet_list_entry *tfp;
-	struct batadv_unicast_frag_packet *up;
-
-	up = (struct batadv_unicast_frag_packet *)skb->data;
-
-	/* free and oldest packets stand at the end */
-	tfp = list_entry((head)->prev, typeof(*tfp), list);
-	kfree_skb(tfp->skb);
-
-	tfp->seqno = ntohs(up->seqno);
-	tfp->skb = skb;
-	list_move(&tfp->list, head);
-	return;
-}
-
-static int batadv_frag_create_buffer(struct list_head *head)
-{
-	int i;
-	struct batadv_frag_packet_list_entry *tfp;
-
-	for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
-		tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
-		if (!tfp) {
-			batadv_frag_list_free(head);
-			return -ENOMEM;
-		}
-		tfp->skb = NULL;
-		tfp->seqno = 0;
-		INIT_LIST_HEAD(&tfp->list);
-		list_add(&tfp->list, head);
-	}
-
-	return 0;
-}
-
-static struct batadv_frag_packet_list_entry *
-batadv_frag_search_packet(struct list_head *head,
-			  const struct batadv_unicast_frag_packet *up)
-{
-	struct batadv_frag_packet_list_entry *tfp;
-	struct batadv_unicast_frag_packet *tmp_up = NULL;
-	bool is_head_tmp, is_head;
-	uint16_t search_seqno;
-
-	if (up->flags & BATADV_UNI_FRAG_HEAD)
-		search_seqno = ntohs(up->seqno)+1;
-	else
-		search_seqno = ntohs(up->seqno)-1;
-
-	is_head = up->flags & BATADV_UNI_FRAG_HEAD;
-
-	list_for_each_entry(tfp, head, list) {
-		if (!tfp->skb)
-			continue;
-
-		if (tfp->seqno == ntohs(up->seqno))
-			goto mov_tail;
-
-		tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
-
-		if (tfp->seqno == search_seqno) {
-			is_head_tmp = tmp_up->flags & BATADV_UNI_FRAG_HEAD;
-			if (is_head_tmp != is_head)
-				return tfp;
-			else
-				goto mov_tail;
-		}
-	}
-	return NULL;
-
-mov_tail:
-	list_move_tail(&tfp->list, head);
-	return NULL;
-}
-
-void batadv_frag_list_free(struct list_head *head)
-{
-	struct batadv_frag_packet_list_entry *pf, *tmp_pf;
-
-	if (!list_empty(head)) {
-		list_for_each_entry_safe(pf, tmp_pf, head, list) {
-			kfree_skb(pf->skb);
-			list_del(&pf->list);
-			kfree(pf);
-		}
-	}
-	return;
-}
-
-/* frag_reassemble_skb():
- * returns NET_RX_DROP if the operation failed - skb is left intact
- * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
- * or the skb could be reassembled (skb_new will point to the new packet and
- * skb was freed)
- */
-int batadv_frag_reassemble_skb(struct sk_buff *skb,
-			       struct batadv_priv *bat_priv,
-			       struct sk_buff **new_skb)
-{
-	struct batadv_orig_node *orig_node;
-	struct batadv_frag_packet_list_entry *tmp_frag_entry;
-	int ret = NET_RX_DROP;
-	struct batadv_unicast_frag_packet *unicast_packet;
-
-	unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
-	*new_skb = NULL;
-
-	orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
-	if (!orig_node)
-		goto out;
-
-	orig_node->last_frag_packet = jiffies;
-
-	if (list_empty(&orig_node->frag_list) &&
-	    batadv_frag_create_buffer(&orig_node->frag_list)) {
-		pr_debug("couldn't create frag buffer\n");
-		goto out;
-	}
-
-	tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
-						   unicast_packet);
-
-	if (!tmp_frag_entry) {
-		batadv_frag_create_entry(&orig_node->frag_list, skb);
-		ret = NET_RX_SUCCESS;
-		goto out;
-	}
-
-	*new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
-					    tmp_frag_entry, skb);
-	/* if not, merge failed */
-	if (*new_skb)
-		ret = NET_RX_SUCCESS;
-
-out:
-	if (orig_node)
-		batadv_orig_node_free_ref(orig_node);
-	return ret;
-}
-
-int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
-			 struct batadv_hard_iface *hard_iface,
-			 const uint8_t dstaddr[])
-{
-	struct batadv_unicast_packet tmp_uc, *unicast_packet;
-	struct batadv_hard_iface *primary_if;
-	struct sk_buff *frag_skb;
-	struct batadv_unicast_frag_packet *frag1, *frag2;
-	int uc_hdr_len = sizeof(*unicast_packet);
-	int ucf_hdr_len = sizeof(*frag1);
-	int data_len = skb->len - uc_hdr_len;
-	int large_tail = 0, ret = NET_RX_DROP;
-	uint16_t seqno;
-
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto dropped;
-
-	frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
-	if (!frag_skb)
-		goto dropped;
-
-	skb->priority = TC_PRIO_CONTROL;
-	skb_reserve(frag_skb, ucf_hdr_len);
-
-	unicast_packet = (struct batadv_unicast_packet *)skb->data;
-	memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
-	skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
-
-	if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
-	    batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
-		goto drop_frag;
-
-	frag1 = (struct batadv_unicast_frag_packet *)skb->data;
-	frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
-
-	memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
-
-	frag1->header.ttl--;
-	frag1->header.version = BATADV_COMPAT_VERSION;
-	frag1->header.packet_type = BATADV_UNICAST_FRAG;
-
-	memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(frag2, frag1, sizeof(*frag2));
-
-	if (data_len & 1)
-		large_tail = BATADV_UNI_FRAG_LARGETAIL;
-
-	frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
-	frag2->flags = large_tail;
-
-	seqno = atomic_add_return(2, &hard_iface->frag_seqno);
-	frag1->seqno = htons(seqno - 1);
-	frag2->seqno = htons(seqno);
-
-	batadv_send_skb_packet(skb, hard_iface, dstaddr);
-	batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
-	ret = NET_RX_SUCCESS;
-	goto out;
-
-drop_frag:
-	kfree_skb(frag_skb);
-dropped:
-	kfree_skb(skb);
-out:
-	if (primary_if)
-		batadv_hardif_free_ref(primary_if);
-	return ret;
-}
-
-/**
- * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
- * common fields for unicast packets
- * @skb: packet
- * @hdr_size: amount of bytes to push at the beginning of the skb
- * @orig_node: the destination node
- *
- * Returns false if the buffer extension was not possible or true otherwise
- */
-static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
-					     struct batadv_orig_node *orig_node)
-{
-	struct batadv_unicast_packet *unicast_packet;
-	uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
-
-	if (batadv_skb_head_push(skb, hdr_size) < 0)
-		return false;
-
-	unicast_packet = (struct batadv_unicast_packet *)skb->data;
-	unicast_packet->header.version = BATADV_COMPAT_VERSION;
-	/* batman packet type: unicast */
-	unicast_packet->header.packet_type = BATADV_UNICAST;
-	/* set unicast ttl */
-	unicast_packet->header.ttl = BATADV_TTL;
-	/* copy the destination for faster routing */
-	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
-	/* set the destination tt version number */
-	unicast_packet->ttvn = ttvn;
-
-	return true;
-}
-
-/**
- * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
- * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
- *
- * Returns false if the payload could not be encapsulated or true otherwise.
- *
- * This call might reallocate skb data.
- */
-static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
-				       struct batadv_orig_node *orig_node)
-{
-	size_t uni_size = sizeof(struct batadv_unicast_packet);
-	return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node);
-}
-
-/**
- * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
- * header
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
- * @packet_subtype: the batman 4addr packet subtype to use
- *
- * Returns false if the payload could not be encapsulated or true otherwise.
- *
- * This call might reallocate skb data.
- */
-bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
-				      struct sk_buff *skb,
-				      struct batadv_orig_node *orig,
-				      int packet_subtype)
-{
-	struct batadv_hard_iface *primary_if;
-	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
-	bool ret = false;
-
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-
-	/* pull the header space and fill the unicast_packet substructure.
-	 * We can do that because the first member of the unicast_4addr_packet
-	 * is of type struct unicast_packet
-	 */
-	if (!batadv_unicast_push_and_fill_skb(skb,
-					      sizeof(*unicast_4addr_packet),
-					      orig))
-		goto out;
-
-	unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
-	unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
-	memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr,
-	       ETH_ALEN);
-	unicast_4addr_packet->subtype = packet_subtype;
-	unicast_4addr_packet->reserved = 0;
-
-	ret = true;
-out:
-	if (primary_if)
-		batadv_hardif_free_ref(primary_if);
-	return ret;
-}
-
-/**
- * batadv_unicast_generic_send_skb - send an skb as unicast
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: payload to send
- * @packet_type: the batman unicast packet type to use
- * @packet_subtype: the batman packet subtype. It is ignored if packet_type is
- *		    not BATADV_UNICAT_4ADDR
- *
- * Returns 1 in case of error or 0 otherwise
- */
-int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
-				    struct sk_buff *skb, int packet_type,
-				    int packet_subtype)
-{
-	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
-	struct batadv_unicast_packet *unicast_packet;
-	struct batadv_orig_node *orig_node;
-	struct batadv_neigh_node *neigh_node;
-	int data_len = skb->len;
-	int ret = NET_RX_DROP;
-	unsigned int dev_mtu, header_len;
-
-	/* get routing information */
-	if (is_multicast_ether_addr(ethhdr->h_dest)) {
-		orig_node = batadv_gw_get_selected_orig(bat_priv);
-		if (orig_node)
-			goto find_router;
-	}
-
-	/* check for tt host - increases orig_node refcount.
-	 * returns NULL in case of AP isolation
-	 */
-	orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
-					     ethhdr->h_dest);
-
-find_router:
-	/* find_router():
-	 *  - if orig_node is NULL it returns NULL
-	 *  - increases neigh_nodes refcount if found.
-	 */
-	neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
-
-	if (!neigh_node)
-		goto out;
-
-	switch (packet_type) {
-	case BATADV_UNICAST:
-		if (!batadv_unicast_prepare_skb(skb, orig_node))
-			goto out;
-
-		header_len = sizeof(struct batadv_unicast_packet);
-		break;
-	case BATADV_UNICAST_4ADDR:
-		if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
-						      packet_subtype))
-			goto out;
-
-		header_len = sizeof(struct batadv_unicast_4addr_packet);
-		break;
-	default:
-		/* this function supports UNICAST and UNICAST_4ADDR only. It
-		 * should never be invoked with any other packet type
-		 */
-		goto out;
-	}
-
-	ethhdr = (struct ethhdr *)(skb->data + header_len);
-	unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
-	/* inform the destination node that we are still missing a correct route
-	 * for this client. The destination will receive this packet and will
-	 * try to reroute it because the ttvn contained in the header is less
-	 * than the current one
-	 */
-	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
-		unicast_packet->ttvn = unicast_packet->ttvn - 1;
-
-	dev_mtu = neigh_node->if_incoming->net_dev->mtu;
-	/* fragmentation mechanism only works for UNICAST (now) */
-	if (packet_type == BATADV_UNICAST &&
-	    atomic_read(&bat_priv->fragmentation) &&
-	    data_len + sizeof(*unicast_packet) > dev_mtu) {
-		/* send frag skb decreases ttl */
-		unicast_packet->header.ttl++;
-		ret = batadv_frag_send_skb(skb, bat_priv,
-					   neigh_node->if_incoming,
-					   neigh_node->addr);
-		goto out;
-	}
-
-	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-		ret = 0;
-
-out:
-	if (neigh_node)
-		batadv_neigh_node_free_ref(neigh_node);
-	if (orig_node)
-		batadv_orig_node_free_ref(orig_node);
-	if (ret == NET_RX_DROP)
-		kfree_skb(skb);
-	return ret;
-}
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
deleted file mode 100644
index 429cf8a..0000000
--- a/net/batman-adv/unicast.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_UNICAST_H_
-#define _NET_BATMAN_ADV_UNICAST_H_
-
-#include "packet.h"
-
-#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
-#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
-
-int batadv_frag_reassemble_skb(struct sk_buff *skb,
-			       struct batadv_priv *bat_priv,
-			       struct sk_buff **new_skb);
-void batadv_frag_list_free(struct list_head *head);
-int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
-			 struct batadv_hard_iface *hard_iface,
-			 const uint8_t dstaddr[]);
-bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
-				      struct sk_buff *skb,
-				      struct batadv_orig_node *orig_node,
-				      int packet_subtype);
-int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
-				    struct sk_buff *skb, int packet_type,
-				    int packet_subtype);
-
-
-/**
- * batadv_unicast_send_skb - send the skb encapsulated in a unicast packet
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the payload to send
- */
-static inline int batadv_unicast_send_skb(struct batadv_priv *bat_priv,
-					  struct sk_buff *skb)
-{
-	return batadv_unicast_generic_send_skb(bat_priv, skb, BATADV_UNICAST,
-					       0);
-}
-
-/**
- * batadv_unicast_send_skb - send the skb encapsulated in a unicast4addr packet
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the payload to send
- * @packet_subtype: the batman 4addr packet subtype to use
- */
-static inline int batadv_unicast_4addr_send_skb(struct batadv_priv *bat_priv,
-						struct sk_buff *skb,
-						int packet_subtype)
-{
-	return batadv_unicast_generic_send_skb(bat_priv, skb,
-					       BATADV_UNICAST_4ADDR,
-					       packet_subtype);
-}
-
-static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
-{
-	const struct batadv_unicast_frag_packet *unicast_packet;
-	int uneven_correction = 0;
-	unsigned int merged_size;
-
-	unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
-
-	if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
-		if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
-			uneven_correction = 1;
-		else
-			uneven_correction = -1;
-	}
-
-	merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
-	merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
-
-	return merged_size <= mtu;
-}
-
-#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
deleted file mode 100644
index d8ea31a..0000000
--- a/net/batman-adv/vis.c
+++ /dev/null
@@ -1,938 +0,0 @@
-/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "send.h"
-#include "translation-table.h"
-#include "vis.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "hash.h"
-#include "originator.h"
-
-#define BATADV_MAX_VIS_PACKET_SIZE 1000
-
-/* hash class keys */
-static struct lock_class_key batadv_vis_hash_lock_class_key;
-
-/* free the info */
-static void batadv_free_info(struct kref *ref)
-{
-	struct batadv_vis_info *info;
-	struct batadv_priv *bat_priv;
-	struct batadv_vis_recvlist_node *entry, *tmp;
-
-	info = container_of(ref, struct batadv_vis_info, refcount);
-	bat_priv = info->bat_priv;
-
-	list_del_init(&info->send_list);
-	spin_lock_bh(&bat_priv->vis.list_lock);
-	list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
-		list_del(&entry->list);
-		kfree(entry);
-	}
-
-	spin_unlock_bh(&bat_priv->vis.list_lock);
-	kfree_skb(info->skb_packet);
-	kfree(info);
-}
-
-/* Compare two vis packets, used by the hashing algorithm */
-static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2)
-{
-	const struct batadv_vis_info *d1, *d2;
-	const struct batadv_vis_packet *p1, *p2;
-
-	d1 = container_of(node, struct batadv_vis_info, hash_entry);
-	d2 = data2;
-	p1 = (struct batadv_vis_packet *)d1->skb_packet->data;
-	p2 = (struct batadv_vis_packet *)d2->skb_packet->data;
-	return batadv_compare_eth(p1->vis_orig, p2->vis_orig);
-}
-
-/* hash function to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
- */
-static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
-{
-	const struct batadv_vis_info *vis_info = data;
-	const struct batadv_vis_packet *packet;
-	const unsigned char *key;
-	uint32_t hash = 0;
-	size_t i;
-
-	packet = (struct batadv_vis_packet *)vis_info->skb_packet->data;
-	key = packet->vis_orig;
-	for (i = 0; i < ETH_ALEN; i++) {
-		hash += key[i];
-		hash += (hash << 10);
-		hash ^= (hash >> 6);
-	}
-
-	hash += (hash << 3);
-	hash ^= (hash >> 11);
-	hash += (hash << 15);
-
-	return hash % size;
-}
-
-static struct batadv_vis_info *
-batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
-	struct batadv_hashtable *hash = bat_priv->vis.hash;
-	struct hlist_head *head;
-	struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
-	uint32_t index;
-
-	if (!hash)
-		return NULL;
-
-	index = batadv_vis_info_choose(data, hash->size);
-	head = &hash->table[index];
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(vis_info, head, hash_entry) {
-		if (!batadv_vis_info_cmp(&vis_info->hash_entry, data))
-			continue;
-
-		vis_info_tmp = vis_info;
-		break;
-	}
-	rcu_read_unlock();
-
-	return vis_info_tmp;
-}
-
-/* insert interface to the list of interfaces of one originator, if it
- * does not already exist in the list
- */
-static void batadv_vis_data_insert_interface(const uint8_t *interface,
-					     struct hlist_head *if_list,
-					     bool primary)
-{
-	struct batadv_vis_if_list_entry *entry;
-
-	hlist_for_each_entry(entry, if_list, list) {
-		if (batadv_compare_eth(entry->addr, interface))
-			return;
-	}
-
-	/* it's a new address, add it to the list */
-	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-	if (!entry)
-		return;
-	memcpy(entry->addr, interface, ETH_ALEN);
-	entry->primary = primary;
-	hlist_add_head(&entry->list, if_list);
-}
-
-static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
-					  const struct hlist_head *if_list)
-{
-	struct batadv_vis_if_list_entry *entry;
-
-	hlist_for_each_entry(entry, if_list, list) {
-		if (entry->primary)
-			seq_puts(seq, "PRIMARY, ");
-		else
-			seq_printf(seq,  "SEC %pM, ", entry->addr);
-	}
-}
-
-/* read an entry  */
-static ssize_t
-batadv_vis_data_read_entry(struct seq_file *seq,
-			   const struct batadv_vis_info_entry *entry,
-			   const uint8_t *src, bool primary)
-{
-	if (primary && entry->quality == 0)
-		return seq_printf(seq, "TT %pM, ", entry->dest);
-	else if (batadv_compare_eth(entry->src, src))
-		return seq_printf(seq, "TQ %pM %d, ", entry->dest,
-				  entry->quality);
-
-	return 0;
-}
-
-static void
-batadv_vis_data_insert_interfaces(struct hlist_head *list,
-				  struct batadv_vis_packet *packet,
-				  struct batadv_vis_info_entry *entries)
-{
-	int i;
-
-	for (i = 0; i < packet->entries; i++) {
-		if (entries[i].quality == 0)
-			continue;
-
-		if (batadv_compare_eth(entries[i].src, packet->vis_orig))
-			continue;
-
-		batadv_vis_data_insert_interface(entries[i].src, list, false);
-	}
-}
-
-static void batadv_vis_data_read_entries(struct seq_file *seq,
-					 struct hlist_head *list,
-					 struct batadv_vis_packet *packet,
-					 struct batadv_vis_info_entry *entries)
-{
-	int i;
-	struct batadv_vis_if_list_entry *entry;
-
-	hlist_for_each_entry(entry, list, list) {
-		seq_printf(seq, "%pM,", entry->addr);
-
-		for (i = 0; i < packet->entries; i++)
-			batadv_vis_data_read_entry(seq, &entries[i],
-						   entry->addr, entry->primary);
-
-		/* add primary/secondary records */
-		if (batadv_compare_eth(entry->addr, packet->vis_orig))
-			batadv_vis_data_read_prim_sec(seq, list);
-
-		seq_puts(seq, "\n");
-	}
-}
-
-static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
-					     const struct hlist_head *head)
-{
-	struct batadv_vis_info *info;
-	struct batadv_vis_packet *packet;
-	uint8_t *entries_pos;
-	struct batadv_vis_info_entry *entries;
-	struct batadv_vis_if_list_entry *entry;
-	struct hlist_node *n;
-
-	HLIST_HEAD(vis_if_list);
-
-	hlist_for_each_entry_rcu(info, head, hash_entry) {
-		packet = (struct batadv_vis_packet *)info->skb_packet->data;
-		entries_pos = (uint8_t *)packet + sizeof(*packet);
-		entries = (struct batadv_vis_info_entry *)entries_pos;
-
-		batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list,
-						 true);
-		batadv_vis_data_insert_interfaces(&vis_if_list, packet,
-						  entries);
-		batadv_vis_data_read_entries(seq, &vis_if_list, packet,
-					     entries);
-
-		hlist_for_each_entry_safe(entry, n, &vis_if_list, list) {
-			hlist_del(&entry->list);
-			kfree(entry);
-		}
-	}
-}
-
-int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
-{
-	struct batadv_hard_iface *primary_if;
-	struct hlist_head *head;
-	struct net_device *net_dev = (struct net_device *)seq->private;
-	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	struct batadv_hashtable *hash = bat_priv->vis.hash;
-	uint32_t i;
-	int ret = 0;
-	int vis_server = atomic_read(&bat_priv->vis_mode);
-
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-
-	if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
-		goto out;
-
-	spin_lock_bh(&bat_priv->vis.hash_lock);
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
-		batadv_vis_seq_print_text_bucket(seq, head);
-	}
-	spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-out:
-	if (primary_if)
-		batadv_hardif_free_ref(primary_if);
-	return ret;
-}
-
-/* add the info packet to the send list, if it was not
- * already linked in.
- */
-static void batadv_send_list_add(struct batadv_priv *bat_priv,
-				 struct batadv_vis_info *info)
-{
-	if (list_empty(&info->send_list)) {
-		kref_get(&info->refcount);
-		list_add_tail(&info->send_list, &bat_priv->vis.send_list);
-	}
-}
-
-/* delete the info packet from the send list, if it was
- * linked in.
- */
-static void batadv_send_list_del(struct batadv_vis_info *info)
-{
-	if (!list_empty(&info->send_list)) {
-		list_del_init(&info->send_list);
-		kref_put(&info->refcount, batadv_free_info);
-	}
-}
-
-/* tries to add one entry to the receive list. */
-static void batadv_recv_list_add(struct batadv_priv *bat_priv,
-				 struct list_head *recv_list, const char *mac)
-{
-	struct batadv_vis_recvlist_node *entry;
-
-	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-	if (!entry)
-		return;
-
-	memcpy(entry->mac, mac, ETH_ALEN);
-	spin_lock_bh(&bat_priv->vis.list_lock);
-	list_add_tail(&entry->list, recv_list);
-	spin_unlock_bh(&bat_priv->vis.list_lock);
-}
-
-/* returns 1 if this mac is in the recv_list */
-static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
-				  const struct list_head *recv_list,
-				  const char *mac)
-{
-	const struct batadv_vis_recvlist_node *entry;
-
-	spin_lock_bh(&bat_priv->vis.list_lock);
-	list_for_each_entry(entry, recv_list, list) {
-		if (batadv_compare_eth(entry->mac, mac)) {
-			spin_unlock_bh(&bat_priv->vis.list_lock);
-			return 1;
-		}
-	}
-	spin_unlock_bh(&bat_priv->vis.list_lock);
-	return 0;
-}
-
-/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
- * broken.. ).	vis hash must be locked outside.  is_new is set when the packet
- * is newer than old entries in the hash.
- */
-static struct batadv_vis_info *
-batadv_add_packet(struct batadv_priv *bat_priv,
-		  struct batadv_vis_packet *vis_packet, int vis_info_len,
-		  int *is_new, int make_broadcast)
-{
-	struct batadv_vis_info *info, *old_info;
-	struct batadv_vis_packet *search_packet, *old_packet;
-	struct batadv_vis_info search_elem;
-	struct batadv_vis_packet *packet;
-	struct sk_buff *tmp_skb;
-	int hash_added;
-	size_t len;
-	size_t max_entries;
-
-	*is_new = 0;
-	/* sanity check */
-	if (!bat_priv->vis.hash)
-		return NULL;
-
-	/* see if the packet is already in vis_hash */
-	search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
-	if (!search_elem.skb_packet)
-		return NULL;
-	len = sizeof(*search_packet);
-	tmp_skb = search_elem.skb_packet;
-	search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len);
-
-	memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
-	old_info = batadv_vis_hash_find(bat_priv, &search_elem);
-	kfree_skb(search_elem.skb_packet);
-
-	if (old_info) {
-		tmp_skb = old_info->skb_packet;
-		old_packet = (struct batadv_vis_packet *)tmp_skb->data;
-		if (!batadv_seq_after(ntohl(vis_packet->seqno),
-				      ntohl(old_packet->seqno))) {
-			if (old_packet->seqno == vis_packet->seqno) {
-				batadv_recv_list_add(bat_priv,
-						     &old_info->recv_list,
-						     vis_packet->sender_orig);
-				return old_info;
-			} else {
-				/* newer packet is already in hash. */
-				return NULL;
-			}
-		}
-		/* remove old entry */
-		batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
-				   batadv_vis_info_choose, old_info);
-		batadv_send_list_del(old_info);
-		kref_put(&old_info->refcount, batadv_free_info);
-	}
-
-	info = kmalloc(sizeof(*info), GFP_ATOMIC);
-	if (!info)
-		return NULL;
-
-	len = sizeof(*packet) + vis_info_len;
-	info->skb_packet = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-	if (!info->skb_packet) {
-		kfree(info);
-		return NULL;
-	}
-	info->skb_packet->priority = TC_PRIO_CONTROL;
-	skb_reserve(info->skb_packet, ETH_HLEN);
-	packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
-
-	kref_init(&info->refcount);
-	INIT_LIST_HEAD(&info->send_list);
-	INIT_LIST_HEAD(&info->recv_list);
-	info->first_seen = jiffies;
-	info->bat_priv = bat_priv;
-	memcpy(packet, vis_packet, len);
-
-	/* initialize and add new packet. */
-	*is_new = 1;
-
-	/* Make it a broadcast packet, if required */
-	if (make_broadcast)
-		memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
-
-	/* repair if entries is longer than packet. */
-	max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry);
-	if (packet->entries > max_entries)
-		packet->entries = max_entries;
-
-	batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
-
-	/* try to add it */
-	hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
-				     batadv_vis_info_choose, info,
-				     &info->hash_entry);
-	if (hash_added != 0) {
-		/* did not work (for some reason) */
-		kref_put(&info->refcount, batadv_free_info);
-		info = NULL;
-	}
-
-	return info;
-}
-
-/* handle the server sync packet, forward if needed. */
-void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
-				       struct batadv_vis_packet *vis_packet,
-				       int vis_info_len)
-{
-	struct batadv_vis_info *info;
-	int is_new, make_broadcast;
-	int vis_server = atomic_read(&bat_priv->vis_mode);
-
-	make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
-
-	spin_lock_bh(&bat_priv->vis.hash_lock);
-	info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
-				 &is_new, make_broadcast);
-	if (!info)
-		goto end;
-
-	/* only if we are server ourselves and packet is newer than the one in
-	 * hash.
-	 */
-	if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
-		batadv_send_list_add(bat_priv, info);
-end:
-	spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
-
-/* handle an incoming client update packet and schedule forward if needed. */
-void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
-					 struct batadv_vis_packet *vis_packet,
-					 int vis_info_len)
-{
-	struct batadv_vis_info *info;
-	struct batadv_vis_packet *packet;
-	int is_new;
-	int vis_server = atomic_read(&bat_priv->vis_mode);
-	int are_target = 0;
-
-	/* clients shall not broadcast. */
-	if (is_broadcast_ether_addr(vis_packet->target_orig))
-		return;
-
-	/* Are we the target for this VIS packet? */
-	if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC	&&
-	    batadv_is_my_mac(bat_priv, vis_packet->target_orig))
-		are_target = 1;
-
-	spin_lock_bh(&bat_priv->vis.hash_lock);
-	info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
-				 &is_new, are_target);
-
-	if (!info)
-		goto end;
-	/* note that outdated packets will be dropped at this point. */
-
-	packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-	/* send only if we're the target server or ... */
-	if (are_target && is_new) {
-		packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC;	/* upgrade! */
-		batadv_send_list_add(bat_priv, info);
-
-		/* ... we're not the recipient (and thus need to forward). */
-	} else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {
-		batadv_send_list_add(bat_priv, info);
-	}
-
-end:
-	spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
-
-/* Walk the originators and find the VIS server with the best tq. Set the packet
- * address to its address and return the best_tq.
- *
- * Must be called with the originator hash locked
- */
-static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
-				       struct batadv_vis_info *info)
-{
-	struct batadv_hashtable *hash = bat_priv->orig_hash;
-	struct batadv_neigh_node *router;
-	struct hlist_head *head;
-	struct batadv_orig_node *orig_node;
-	struct batadv_vis_packet *packet;
-	int best_tq = -1;
-	uint32_t i;
-
-	packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
-
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-			router = batadv_orig_node_get_router(orig_node);
-			if (!router)
-				continue;
-
-			if ((orig_node->flags & BATADV_VIS_SERVER) &&
-			    (router->tq_avg > best_tq)) {
-				best_tq = router->tq_avg;
-				memcpy(packet->target_orig, orig_node->orig,
-				       ETH_ALEN);
-			}
-			batadv_neigh_node_free_ref(router);
-		}
-		rcu_read_unlock();
-	}
-
-	return best_tq;
-}
-
-/* Return true if the vis packet is full. */
-static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
-{
-	const struct batadv_vis_packet *packet;
-	size_t num;
-
-	packet = (struct batadv_vis_packet *)info->skb_packet->data;
-	num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry);
-
-	if (num < packet->entries + 1)
-		return true;
-	return false;
-}
-
-/* generates a packet of own vis data,
- * returns 0 on success, -1 if no packet could be generated
- */
-static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
-{
-	struct batadv_hashtable *hash = bat_priv->orig_hash;
-	struct hlist_head *head;
-	struct batadv_orig_node *orig_node;
-	struct batadv_neigh_node *router;
-	struct batadv_vis_info *info = bat_priv->vis.my_info;
-	struct batadv_vis_packet *packet;
-	struct batadv_vis_info_entry *entry;
-	struct batadv_tt_common_entry *tt_common_entry;
-	uint8_t *packet_pos;
-	int best_tq = -1;
-	uint32_t i;
-
-	info->first_seen = jiffies;
-	packet = (struct batadv_vis_packet *)info->skb_packet->data;
-	packet->vis_type = atomic_read(&bat_priv->vis_mode);
-
-	memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
-	packet->header.ttl = BATADV_TTL;
-	packet->seqno = htonl(ntohl(packet->seqno) + 1);
-	packet->entries = 0;
-	packet->reserved = 0;
-	skb_trim(info->skb_packet, sizeof(*packet));
-
-	if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) {
-		best_tq = batadv_find_best_vis_server(bat_priv, info);
-
-		if (best_tq < 0)
-			return best_tq;
-	}
-
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
-
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-			router = batadv_orig_node_get_router(orig_node);
-			if (!router)
-				continue;
-
-			if (!batadv_compare_eth(router->addr, orig_node->orig))
-				goto next;
-
-			if (router->if_incoming->if_status != BATADV_IF_ACTIVE)
-				goto next;
-
-			if (router->tq_avg < 1)
-				goto next;
-
-			/* fill one entry into buffer. */
-			packet_pos = skb_put(info->skb_packet, sizeof(*entry));
-			entry = (struct batadv_vis_info_entry *)packet_pos;
-			memcpy(entry->src,
-			       router->if_incoming->net_dev->dev_addr,
-			       ETH_ALEN);
-			memcpy(entry->dest, orig_node->orig, ETH_ALEN);
-			entry->quality = router->tq_avg;
-			packet->entries++;
-
-next:
-			batadv_neigh_node_free_ref(router);
-
-			if (batadv_vis_packet_full(info))
-				goto unlock;
-		}
-		rcu_read_unlock();
-	}
-
-	hash = bat_priv->tt.local_hash;
-
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
-
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_common_entry, head,
-					 hash_entry) {
-			packet_pos = skb_put(info->skb_packet, sizeof(*entry));
-			entry = (struct batadv_vis_info_entry *)packet_pos;
-			memset(entry->src, 0, ETH_ALEN);
-			memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
-			entry->quality = 0; /* 0 means TT */
-			packet->entries++;
-
-			if (batadv_vis_packet_full(info))
-				goto unlock;
-		}
-		rcu_read_unlock();
-	}
-
-	return 0;
-
-unlock:
-	rcu_read_unlock();
-	return 0;
-}
-
-/* free old vis packets. Must be called with this vis_hash_lock
- * held
- */
-static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
-{
-	uint32_t i;
-	struct batadv_hashtable *hash = bat_priv->vis.hash;
-	struct hlist_node *node_tmp;
-	struct hlist_head *head;
-	struct batadv_vis_info *info;
-
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
-
-		hlist_for_each_entry_safe(info, node_tmp,
-					  head, hash_entry) {
-			/* never purge own data. */
-			if (info == bat_priv->vis.my_info)
-				continue;
-
-			if (batadv_has_timed_out(info->first_seen,
-						 BATADV_VIS_TIMEOUT)) {
-				hlist_del(&info->hash_entry);
-				batadv_send_list_del(info);
-				kref_put(&info->refcount, batadv_free_info);
-			}
-		}
-	}
-}
-
-static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
-					struct batadv_vis_info *info)
-{
-	struct batadv_hashtable *hash = bat_priv->orig_hash;
-	struct hlist_head *head;
-	struct batadv_orig_node *orig_node;
-	struct batadv_vis_packet *packet;
-	struct sk_buff *skb;
-	uint32_t i, res;
-
-
-	packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-	/* send to all routers in range. */
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
-
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-			/* if it's a vis server and reachable, send it. */
-			if (!(orig_node->flags & BATADV_VIS_SERVER))
-				continue;
-
-			/* don't send it if we already received the packet from
-			 * this node.
-			 */
-			if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
-						   orig_node->orig))
-				continue;
-
-			memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
-			skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-			if (!skb)
-				continue;
-
-			res = batadv_send_skb_to_orig(skb, orig_node, NULL);
-			if (res == NET_XMIT_DROP)
-				kfree_skb(skb);
-		}
-		rcu_read_unlock();
-	}
-}
-
-static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
-				      struct batadv_vis_info *info)
-{
-	struct batadv_orig_node *orig_node;
-	struct sk_buff *skb;
-	struct batadv_vis_packet *packet;
-
-	packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-	orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig);
-	if (!orig_node)
-		goto out;
-
-	skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-	if (!skb)
-		goto out;
-
-	if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
-		kfree_skb(skb);
-
-out:
-	if (orig_node)
-		batadv_orig_node_free_ref(orig_node);
-}
-
-/* only send one vis packet. called from batadv_send_vis_packets() */
-static void batadv_send_vis_packet(struct batadv_priv *bat_priv,
-				   struct batadv_vis_info *info)
-{
-	struct batadv_hard_iface *primary_if;
-	struct batadv_vis_packet *packet;
-
-	primary_if = batadv_primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-
-	packet = (struct batadv_vis_packet *)info->skb_packet->data;
-	if (packet->header.ttl < 2) {
-		pr_debug("Error - can't send vis packet: ttl exceeded\n");
-		goto out;
-	}
-
-	memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-	packet->header.ttl--;
-
-	if (is_broadcast_ether_addr(packet->target_orig))
-		batadv_broadcast_vis_packet(bat_priv, info);
-	else
-		batadv_unicast_vis_packet(bat_priv, info);
-	packet->header.ttl++; /* restore TTL */
-
-out:
-	if (primary_if)
-		batadv_hardif_free_ref(primary_if);
-}
-
-/* called from timer; send (and maybe generate) vis packet. */
-static void batadv_send_vis_packets(struct work_struct *work)
-{
-	struct delayed_work *delayed_work;
-	struct batadv_priv *bat_priv;
-	struct batadv_priv_vis *priv_vis;
-	struct batadv_vis_info *info;
-
-	delayed_work = container_of(work, struct delayed_work, work);
-	priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
-	bat_priv = container_of(priv_vis, struct batadv_priv, vis);
-	spin_lock_bh(&bat_priv->vis.hash_lock);
-	batadv_purge_vis_packets(bat_priv);
-
-	if (batadv_generate_vis_packet(bat_priv) == 0) {
-		/* schedule if generation was successful */
-		batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
-	}
-
-	while (!list_empty(&bat_priv->vis.send_list)) {
-		info = list_first_entry(&bat_priv->vis.send_list,
-					typeof(*info), send_list);
-
-		kref_get(&info->refcount);
-		spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-		batadv_send_vis_packet(bat_priv, info);
-
-		spin_lock_bh(&bat_priv->vis.hash_lock);
-		batadv_send_list_del(info);
-		kref_put(&info->refcount, batadv_free_info);
-	}
-	spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-	queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
-			   msecs_to_jiffies(BATADV_VIS_INTERVAL));
-}
-
-/* init the vis server. this may only be called when if_list is already
- * initialized (e.g. bat0 is initialized, interfaces have been added)
- */
-int batadv_vis_init(struct batadv_priv *bat_priv)
-{
-	struct batadv_vis_packet *packet;
-	int hash_added;
-	unsigned int len;
-	unsigned long first_seen;
-	struct sk_buff *tmp_skb;
-
-	if (bat_priv->vis.hash)
-		return 0;
-
-	spin_lock_bh(&bat_priv->vis.hash_lock);
-
-	bat_priv->vis.hash = batadv_hash_new(256);
-	if (!bat_priv->vis.hash) {
-		pr_err("Can't initialize vis_hash\n");
-		goto err;
-	}
-
-	batadv_hash_set_lock_class(bat_priv->vis.hash,
-				   &batadv_vis_hash_lock_class_key);
-
-	bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
-	if (!bat_priv->vis.my_info)
-		goto err;
-
-	len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
-	bat_priv->vis.my_info->skb_packet = netdev_alloc_skb_ip_align(NULL,
-								      len);
-	if (!bat_priv->vis.my_info->skb_packet)
-		goto free_info;
-
-	bat_priv->vis.my_info->skb_packet->priority = TC_PRIO_CONTROL;
-	skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
-	tmp_skb = bat_priv->vis.my_info->skb_packet;
-	packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
-
-	/* prefill the vis info */
-	first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
-	bat_priv->vis.my_info->first_seen = first_seen;
-	INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
-	INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
-	kref_init(&bat_priv->vis.my_info->refcount);
-	bat_priv->vis.my_info->bat_priv = bat_priv;
-	packet->header.version = BATADV_COMPAT_VERSION;
-	packet->header.packet_type = BATADV_VIS;
-	packet->header.ttl = BATADV_TTL;
-	packet->seqno = 0;
-	packet->reserved = 0;
-	packet->entries = 0;
-
-	INIT_LIST_HEAD(&bat_priv->vis.send_list);
-
-	hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
-				     batadv_vis_info_choose,
-				     bat_priv->vis.my_info,
-				     &bat_priv->vis.my_info->hash_entry);
-	if (hash_added != 0) {
-		pr_err("Can't add own vis packet into hash\n");
-		/* not in hash, need to remove it manually. */
-		kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
-		goto err;
-	}
-
-	spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-	INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
-	queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
-			   msecs_to_jiffies(BATADV_VIS_INTERVAL));
-
-	return 0;
-
-free_info:
-	kfree(bat_priv->vis.my_info);
-	bat_priv->vis.my_info = NULL;
-err:
-	spin_unlock_bh(&bat_priv->vis.hash_lock);
-	batadv_vis_quit(bat_priv);
-	return -ENOMEM;
-}
-
-/* Decrease the reference count on a hash item info */
-static void batadv_free_info_ref(struct hlist_node *node, void *arg)
-{
-	struct batadv_vis_info *info;
-
-	info = container_of(node, struct batadv_vis_info, hash_entry);
-	batadv_send_list_del(info);
-	kref_put(&info->refcount, batadv_free_info);
-}
-
-/* shutdown vis-server */
-void batadv_vis_quit(struct batadv_priv *bat_priv)
-{
-	if (!bat_priv->vis.hash)
-		return;
-
-	cancel_delayed_work_sync(&bat_priv->vis.work);
-
-	spin_lock_bh(&bat_priv->vis.hash_lock);
-	/* properly remove, kill timers ... */
-	batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
-	bat_priv->vis.hash = NULL;
-	bat_priv->vis.my_info = NULL;
-	spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
deleted file mode 100644
index ad92b0e..0000000
--- a/net/batman-adv/vis.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_VIS_H_
-#define _NET_BATMAN_ADV_VIS_H_
-
-/* timeout of vis packets in milliseconds */
-#define BATADV_VIS_TIMEOUT		200000
-
-int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
-				       struct batadv_vis_packet *vis_packet,
-				       int vis_info_len);
-void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
-					 struct batadv_vis_packet *vis_packet,
-					 int vis_info_len);
-int batadv_vis_init(struct batadv_priv *bat_priv);
-void batadv_vis_quit(struct batadv_priv *bat_priv);
-
-#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index dea6a28..6a791e7 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -11,3 +11,5 @@
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
 	hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
 	a2mp.o amp.o
+
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 17f33a6..efcd108 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -15,8 +15,9 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "a2mp.h"
+#include "amp.h"
 
 /* Global AMP Manager list */
 LIST_HEAD(amp_mgr_list);
@@ -75,33 +76,26 @@
 	return mgr->ident;
 }
 
-static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
-{
-	cl->id = 0;
-	cl->type = 0;
-	cl->status = 1;
-}
-
 /* hci_dev_list shall be locked */
-static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
+static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
 {
-	int i = 0;
 	struct hci_dev *hdev;
+	int i = 1;
 
-	__a2mp_cl_bredr(cl);
+	cl[0].id = AMP_ID_BREDR;
+	cl[0].type = AMP_TYPE_BREDR;
+	cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
 
 	list_for_each_entry(hdev, &hci_dev_list, list) {
-		/* Iterate through AMP controllers */
-		if (hdev->id == HCI_BREDR_ID)
-			continue;
-
-		/* Starting from second entry */
-		if (++i >= num_ctrl)
-			return;
-
-		cl[i].id = hdev->id;
-		cl[i].type = hdev->amp_type;
-		cl[i].status = hdev->amp_status;
+		if (hdev->dev_type == HCI_AMP) {
+			cl[i].id = hdev->id;
+			cl[i].type = hdev->amp_type;
+			if (test_bit(HCI_UP, &hdev->flags))
+				cl[i].status = hdev->amp_status;
+			else
+				cl[i].status = AMP_STATUS_POWERED_DOWN;
+			i++;
+		}
 	}
 }
 
@@ -129,6 +123,7 @@
 	struct a2mp_discov_rsp *rsp;
 	u16 ext_feat;
 	u8 num_ctrl;
+	struct hci_dev *hdev;
 
 	if (len < sizeof(*req))
 		return -EINVAL;
@@ -152,7 +147,14 @@
 
 	read_lock(&hci_dev_list_lock);
 
-	num_ctrl = __hci_num_ctrl();
+	/* at minimum the BR/EDR needs to be listed */
+	num_ctrl = 1;
+
+	list_for_each_entry(hdev, &hci_dev_list, list) {
+		if (hdev->dev_type == HCI_AMP)
+			num_ctrl++;
+	}
+
 	len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
 	rsp = kmalloc(len, GFP_ATOMIC);
 	if (!rsp) {
@@ -163,7 +165,7 @@
 	rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
 	rsp->ext_feat = 0;
 
-	__a2mp_add_cl(mgr, rsp->cl, num_ctrl);
+	__a2mp_add_cl(mgr, rsp->cl);
 
 	read_unlock(&hci_dev_list_lock);
 
@@ -208,7 +210,7 @@
 		BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
 		       cl->status);
 
-		if (cl->id != HCI_BREDR_ID && cl->type == HCI_AMP) {
+		if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
 			struct a2mp_info_req req;
 
 			found = true;
@@ -344,7 +346,7 @@
 	tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
 
 	hdev = hci_dev_get(req->id);
-	if (!hdev || hdev->amp_type == HCI_BREDR || tmp) {
+	if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
 		struct a2mp_amp_assoc_rsp rsp;
 		rsp.id = req->id;
 
@@ -451,7 +453,7 @@
 	rsp.remote_id = req->local_id;
 
 	hdev = hci_dev_get(req->remote_id);
-	if (!hdev || hdev->amp_type != HCI_AMP) {
+	if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
 		rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
 		goto send_rsp;
 	}
@@ -535,7 +537,8 @@
 		goto send_rsp;
 	}
 
-	hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, mgr->l2cap_conn->dst);
+	hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+				       &mgr->l2cap_conn->hcon->dst);
 	if (!hcon) {
 		BT_ERR("No phys link exist");
 		rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
@@ -669,7 +672,8 @@
 	l2cap_chan_put(chan);
 }
 
-static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
+static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
+				      int err)
 {
 	struct amp_mgr *mgr = chan->data;
 
@@ -706,6 +710,9 @@
 	.teardown = l2cap_chan_no_teardown,
 	.ready = l2cap_chan_no_ready,
 	.defer = l2cap_chan_no_defer,
+	.resume = l2cap_chan_no_resume,
+	.set_shutdown = l2cap_chan_no_set_shutdown,
+	.get_sndtimeo = l2cap_chan_no_get_sndtimeo,
 };
 
 static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
@@ -829,6 +836,9 @@
 {
 	struct amp_mgr *mgr;
 
+	if (conn->hcon->type != ACL_LINK)
+		return NULL;
+
 	mgr = amp_mgr_create(conn, false);
 	if (!mgr) {
 		BT_ERR("Could not create AMP manager");
@@ -871,7 +881,7 @@
 	rsp.id = hdev->id;
 	rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
 
-	if (hdev->amp_type != HCI_BREDR) {
+	if (hdev->amp_type != AMP_TYPE_BREDR) {
 		rsp.status = 0;
 		rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
 		rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
diff --git a/include/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
similarity index 100%
rename from include/net/bluetooth/a2mp.h
rename to net/bluetooth/a2mp.h
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 9096137..f6a1671 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,12 +25,13 @@
 /* Bluetooth address family and sockets. */
 
 #include <linux/module.h>
+#include <linux/debugfs.h>
 #include <asm/ioctls.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <linux/proc_fs.h>
 
-#define VERSION "2.16"
+#define VERSION "2.17"
 
 /* Bluetooth sockets */
 #define BT_MAX_PROTO	8
@@ -221,12 +222,12 @@
 	if (flags & (MSG_OOB))
 		return -EOPNOTSUPP;
 
-	msg->msg_namelen = 0;
-
 	skb = skb_recv_datagram(sk, flags, noblock, &err);
 	if (!skb) {
-		if (sk->sk_shutdown & RCV_SHUTDOWN)
+		if (sk->sk_shutdown & RCV_SHUTDOWN) {
+			msg->msg_namelen = 0;
 			return 0;
+		}
 		return err;
 	}
 
@@ -238,9 +239,16 @@
 
 	skb_reset_transport_header(skb);
 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
-	if (err == 0)
+	if (err == 0) {
 		sock_recv_ts_and_drops(msg, sk, skb);
 
+		if (bt_sk(sk)->skb_msg_name)
+			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
+						&msg->msg_namelen);
+		else
+			msg->msg_namelen = 0;
+	}
+
 	skb_free_datagram(sk, skb);
 
 	return err ? : copied;
@@ -490,6 +498,7 @@
 }
 EXPORT_SYMBOL(bt_sock_ioctl);
 
+/* This function expects the sk lock to be held when called */
 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
 {
 	DECLARE_WAITQUEUE(wait, current);
@@ -525,6 +534,46 @@
 }
 EXPORT_SYMBOL(bt_sock_wait_state);
 
+/* This function expects the sk lock to be held when called */
+int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long timeo;
+	int err = 0;
+
+	BT_DBG("sk %p", sk);
+
+	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+
+	add_wait_queue(sk_sleep(sk), &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
+		if (!timeo) {
+			err = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			err = sock_intr_errno(timeo);
+			break;
+		}
+
+		release_sock(sk);
+		timeo = schedule_timeout(timeo);
+		lock_sock(sk);
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		err = sock_error(sk);
+		if (err)
+			break;
+	}
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(sk_sleep(sk), &wait);
+
+	return err;
+}
+EXPORT_SYMBOL(bt_sock_wait_ready);
+
 #ifdef CONFIG_PROC_FS
 struct bt_seq_state {
 	struct bt_sock_list *l;
@@ -563,7 +612,7 @@
 	struct bt_sock_list *l = s->l;
 
 	if (v == SEQ_START_TOKEN) {
-		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent");
+		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");
 
 		if (l->custom_seq_show) {
 			seq_putc(seq, ' ');
@@ -576,15 +625,13 @@
 		struct bt_sock *bt = bt_sk(sk);
 
 		seq_printf(seq,
-			   "%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu",
+			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
 			   sk,
 			   atomic_read(&sk->sk_refcnt),
 			   sk_rmem_alloc_get(sk),
 			   sk_wmem_alloc_get(sk),
 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
 			   sock_i_ino(sk),
-			   &bt->src,
-			   &bt->dst,
 			   bt->parent? sock_i_ino(bt->parent): 0LU);
 
 		if (l->custom_seq_show) {
@@ -662,12 +709,17 @@
 	.create	= bt_sock_create,
 };
 
+struct dentry *bt_debugfs;
+EXPORT_SYMBOL_GPL(bt_debugfs);
+
 static int __init bt_init(void)
 {
 	int err;
 
 	BT_INFO("Core ver %s", VERSION);
 
+	bt_debugfs = debugfs_create_dir("bluetooth", NULL);
+
 	err = bt_sysfs_init();
 	if (err < 0)
 		return err;
@@ -708,7 +760,6 @@
 
 static void __exit bt_exit(void)
 {
-
 	sco_exit();
 
 	l2cap_exit();
@@ -718,6 +769,8 @@
 	sock_unregister(PF_BLUETOOTH);
 
 	bt_sysfs_cleanup();
+
+	debugfs_remove_recursive(bt_debugfs);
 }
 
 subsys_initcall(bt_init);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index d459ed4..bb39509 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -14,10 +14,11 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci.h>
 #include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
 #include <crypto/hash.h>
 
+#include "a2mp.h"
+#include "amp.h"
+
 /* Remote AMP Controllers interface */
 void amp_ctrl_get(struct amp_ctrl *ctrl)
 {
@@ -110,7 +111,7 @@
 struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
 			     u8 remote_id, bool out)
 {
-	bdaddr_t *dst = mgr->l2cap_conn->dst;
+	bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
 	struct hci_conn *hcon;
 
 	hcon = hci_conn_add(hdev, AMP_LINK, dst);
@@ -409,7 +410,8 @@
 	struct hci_cp_create_accept_logical_link cp;
 	struct hci_dev *hdev;
 
-	BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst);
+	BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
+	       &chan->conn->hcon->dst);
 
 	if (!hs_hcon)
 		return;
diff --git a/include/net/bluetooth/amp.h b/net/bluetooth/amp.h
similarity index 100%
rename from include/net/bluetooth/amp.h
rename to net/bluetooth/amp.h
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index e430b1a..a841d3e 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -32,6 +32,7 @@
 #include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/hci_core.h>
 
 #include "bnep.h"
@@ -510,20 +511,13 @@
 
 static struct device *bnep_get_device(struct bnep_session *session)
 {
-	bdaddr_t *src = &bt_sk(session->sock->sk)->src;
-	bdaddr_t *dst = &bt_sk(session->sock->sk)->dst;
-	struct hci_dev *hdev;
 	struct hci_conn *conn;
 
-	hdev = hci_get_route(dst, src);
-	if (!hdev)
+	conn = l2cap_pi(session->sock->sk)->chan->conn->hcon;
+	if (!conn)
 		return NULL;
 
-	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
-
-	hci_dev_put(hdev);
-
-	return conn ? &conn->dev : NULL;
+	return &conn->dev;
 }
 
 static struct device_type bnep_type = {
@@ -539,8 +533,8 @@
 
 	BT_DBG("");
 
-	baswap((void *) dst, &bt_sk(sock->sk)->dst);
-	baswap((void *) src, &bt_sk(sock->sk)->src);
+	baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
+	baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
 
 	/* session struct allocated as private part of net_device */
 	dev = alloc_netdev(sizeof(struct bnep_session),
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index e0a6ebf..67fe5e8 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -340,20 +340,20 @@
 
 	down_write(&cmtp_session_sem);
 
-	s = __cmtp_get_session(&bt_sk(sock->sk)->dst);
+	s = __cmtp_get_session(&l2cap_pi(sock->sk)->chan->dst);
 	if (s && s->state == BT_CONNECTED) {
 		err = -EEXIST;
 		goto failed;
 	}
 
-	bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
+	bacpy(&session->bdaddr, &l2cap_pi(sock->sk)->chan->dst);
 
 	session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
 					l2cap_pi(sock->sk)->chan->imtu);
 
 	BT_DBG("mtu %d", session->mtu);
 
-	sprintf(session->name, "%pMR", &bt_sk(sock->sk)->dst);
+	sprintf(session->name, "%pMR", &session->bdaddr);
 
 	session->sock  = sock;
 	session->state = BT_CONFIG;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index f081712..ba5366c 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -28,8 +28,9 @@
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
+#include "a2mp.h"
 
 struct sco_param {
 	u16 pkt_type;
@@ -49,30 +50,6 @@
 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
 };
 
-static void hci_le_create_connection(struct hci_conn *conn)
-{
-	struct hci_dev *hdev = conn->hdev;
-	struct hci_cp_le_create_conn cp;
-
-	conn->state = BT_CONNECT;
-	conn->out = true;
-	conn->link_mode |= HCI_LM_MASTER;
-	conn->sec_level = BT_SECURITY_LOW;
-
-	memset(&cp, 0, sizeof(cp));
-	cp.scan_interval = __constant_cpu_to_le16(0x0060);
-	cp.scan_window = __constant_cpu_to_le16(0x0030);
-	bacpy(&cp.peer_addr, &conn->dst);
-	cp.peer_addr_type = conn->dst_type;
-	cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
-	cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
-	cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
-	cp.min_ce_len = __constant_cpu_to_le16(0x0000);
-	cp.max_ce_len = __constant_cpu_to_le16(0x0000);
-
-	hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
-}
-
 static void hci_le_create_connection_cancel(struct hci_conn *conn)
 {
 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
@@ -340,8 +317,10 @@
 }
 
 /* Enter sniff mode */
-static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
+static void hci_conn_idle(struct work_struct *work)
 {
+	struct hci_conn *conn = container_of(work, struct hci_conn,
+					     idle_work.work);
 	struct hci_dev *hdev = conn->hdev;
 
 	BT_DBG("hcon %p mode %d", conn, conn->mode);
@@ -375,21 +354,12 @@
 	}
 }
 
-static void hci_conn_idle(unsigned long arg)
+static void hci_conn_auto_accept(struct work_struct *work)
 {
-	struct hci_conn *conn = (void *) arg;
+	struct hci_conn *conn = container_of(work, struct hci_conn,
+					     auto_accept_work.work);
 
-	BT_DBG("hcon %p mode %d", conn, conn->mode);
-
-	hci_conn_enter_sniff_mode(conn);
-}
-
-static void hci_conn_auto_accept(unsigned long arg)
-{
-	struct hci_conn *conn = (void *) arg;
-	struct hci_dev *hdev = conn->hdev;
-
-	hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
+	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
 		     &conn->dst);
 }
 
@@ -404,6 +374,7 @@
 		return NULL;
 
 	bacpy(&conn->dst, dst);
+	bacpy(&conn->src, &hdev->bdaddr);
 	conn->hdev  = hdev;
 	conn->type  = type;
 	conn->mode  = HCI_CM_ACTIVE;
@@ -437,9 +408,8 @@
 	INIT_LIST_HEAD(&conn->chan_list);
 
 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
-	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
-	setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
-		    (unsigned long) conn);
+	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
+	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
 
 	atomic_set(&conn->refcnt, 0);
 
@@ -460,11 +430,9 @@
 
 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
 
-	del_timer(&conn->idle_timer);
-
 	cancel_delayed_work_sync(&conn->disc_work);
-
-	del_timer(&conn->auto_accept_timer);
+	cancel_delayed_work_sync(&conn->auto_accept_work);
+	cancel_delayed_work_sync(&conn->idle_work);
 
 	if (conn->type == ACL_LINK) {
 		struct hci_conn *sco = conn->link;
@@ -518,6 +486,7 @@
 	list_for_each_entry(d, &hci_dev_list, list) {
 		if (!test_bit(HCI_UP, &d->flags) ||
 		    test_bit(HCI_RAW, &d->flags) ||
+		    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
 		    d->dev_type != HCI_BREDR)
 			continue;
 
@@ -545,34 +514,124 @@
 }
 EXPORT_SYMBOL(hci_get_route);
 
+static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
+{
+	struct hci_conn *conn;
+
+	if (status == 0)
+		return;
+
+	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
+	       status);
+
+	hci_dev_lock(hdev);
+
+	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+	if (!conn)
+		goto done;
+
+	conn->state = BT_CLOSED;
+
+	mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+			    status);
+
+	hci_proto_connect_cfm(conn, status);
+
+	hci_conn_del(conn);
+
+done:
+	hci_dev_unlock(hdev);
+}
+
+static int hci_create_le_conn(struct hci_conn *conn)
+{
+	struct hci_dev *hdev = conn->hdev;
+	struct hci_cp_le_create_conn cp;
+	struct hci_request req;
+	int err;
+
+	hci_req_init(&req, hdev);
+
+	memset(&cp, 0, sizeof(cp));
+	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
+	cp.scan_window = cpu_to_le16(hdev->le_scan_window);
+	bacpy(&cp.peer_addr, &conn->dst);
+	cp.peer_addr_type = conn->dst_type;
+	cp.own_address_type = conn->src_type;
+	cp.conn_interval_min = cpu_to_le16(hdev->le_conn_min_interval);
+	cp.conn_interval_max = cpu_to_le16(hdev->le_conn_max_interval);
+	cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
+	cp.min_ce_len = __constant_cpu_to_le16(0x0000);
+	cp.max_ce_len = __constant_cpu_to_le16(0x0000);
+
+	hci_req_add(&req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+
+	err = hci_req_run(&req, create_le_conn_complete);
+	if (err) {
+		hci_conn_del(conn);
+		return err;
+	}
+
+	return 0;
+}
+
 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 				    u8 dst_type, u8 sec_level, u8 auth_type)
 {
-	struct hci_conn *le;
+	struct hci_conn *conn;
+	int err;
 
-	if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
+	if (test_bit(HCI_ADVERTISING, &hdev->flags))
 		return ERR_PTR(-ENOTSUPP);
 
-	le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
-	if (!le) {
-		le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-		if (le)
-			return ERR_PTR(-EBUSY);
-
-		le = hci_conn_add(hdev, LE_LINK, dst);
-		if (!le)
-			return ERR_PTR(-ENOMEM);
-
-		le->dst_type = bdaddr_to_le(dst_type);
-		hci_le_create_connection(le);
+	/* Some devices send ATT messages as soon as the physical link is
+	 * established. To be able to handle these ATT messages, the user-
+	 * space first establishes the connection and then starts the pairing
+	 * process.
+	 *
+	 * So if a hci_conn object already exists for the following connection
+	 * attempt, we simply update pending_sec_level and auth_type fields
+	 * and return the object found.
+	 */
+	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+	if (conn) {
+		conn->pending_sec_level = sec_level;
+		conn->auth_type = auth_type;
+		goto done;
 	}
 
-	le->pending_sec_level = sec_level;
-	le->auth_type = auth_type;
+	/* Since the controller supports only one LE connection attempt at a
+	 * time, we return -EBUSY if there is any connection attempt running.
+	 */
+	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+	if (conn)
+		return ERR_PTR(-EBUSY);
 
-	hci_conn_hold(le);
+	conn = hci_conn_add(hdev, LE_LINK, dst);
+	if (!conn)
+		return ERR_PTR(-ENOMEM);
 
-	return le;
+	if (dst_type == BDADDR_LE_PUBLIC)
+		conn->dst_type = ADDR_LE_DEV_PUBLIC;
+	else
+		conn->dst_type = ADDR_LE_DEV_RANDOM;
+
+	conn->src_type = hdev->own_addr_type;
+
+	conn->state = BT_CONNECT;
+	conn->out = true;
+	conn->link_mode |= HCI_LM_MASTER;
+	conn->sec_level = BT_SECURITY_LOW;
+	conn->pending_sec_level = sec_level;
+	conn->auth_type = auth_type;
+
+	err = hci_create_le_conn(conn);
+	if (err)
+		return ERR_PTR(err);
+
+done:
+	hci_conn_hold(conn);
+	return conn;
 }
 
 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
@@ -580,6 +639,9 @@
 {
 	struct hci_conn *acl;
 
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+		return ERR_PTR(-ENOTSUPP);
+
 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
 	if (!acl) {
 		acl = hci_conn_add(hdev, ACL_LINK, dst);
@@ -846,8 +908,8 @@
 
 timer:
 	if (hdev->idle_timeout > 0)
-		mod_timer(&conn->idle_timer,
-			  jiffies + msecs_to_jiffies(hdev->idle_timeout));
+		queue_delayed_work(hdev->workqueue, &conn->idle_work,
+				   msecs_to_jiffies(hdev->idle_timeout));
 }
 
 /* Drop all connection on the device */
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index fb7356f..6ccc4eb 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -27,8 +27,9 @@
 
 #include <linux/export.h>
 #include <linux/idr.h>
-
 #include <linux/rfkill.h>
+#include <linux/debugfs.h>
+#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -55,6 +56,586 @@
 	hci_sock_dev_event(hdev, event);
 }
 
+/* ---- HCI debugfs entries ---- */
+
+static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[3];
+
+	buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
+	buf[1] = '\n';
+	buf[2] = '\0';
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
+			      size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	struct sk_buff *skb;
+	char buf[32];
+	size_t buf_size = min(count, (sizeof(buf)-1));
+	bool enable;
+	int err;
+
+	if (!test_bit(HCI_UP, &hdev->flags))
+		return -ENETDOWN;
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+	if (strtobool(buf, &enable))
+		return -EINVAL;
+
+	if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
+		return -EALREADY;
+
+	hci_req_lock(hdev);
+	if (enable)
+		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
+				     HCI_CMD_TIMEOUT);
+	else
+		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
+				     HCI_CMD_TIMEOUT);
+	hci_req_unlock(hdev);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	err = -bt_to_errno(skb->data[0]);
+	kfree_skb(skb);
+
+	if (err < 0)
+		return err;
+
+	change_bit(HCI_DUT_MODE, &hdev->dev_flags);
+
+	return count;
+}
+
+static const struct file_operations dut_mode_fops = {
+	.open		= simple_open,
+	.read		= dut_mode_read,
+	.write		= dut_mode_write,
+	.llseek		= default_llseek,
+};
+
+static int features_show(struct seq_file *f, void *ptr)
+{
+	struct hci_dev *hdev = f->private;
+	u8 p;
+
+	hci_dev_lock(hdev);
+	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
+		seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
+			   hdev->features[p][0], hdev->features[p][1],
+			   hdev->features[p][2], hdev->features[p][3],
+			   hdev->features[p][4], hdev->features[p][5],
+			   hdev->features[p][6], hdev->features[p][7]);
+	}
+	if (lmp_le_capable(hdev))
+		seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
+			   hdev->le_features[0], hdev->le_features[1],
+			   hdev->le_features[2], hdev->le_features[3],
+			   hdev->le_features[4], hdev->le_features[5],
+			   hdev->le_features[6], hdev->le_features[7]);
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int features_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, features_show, inode->i_private);
+}
+
+static const struct file_operations features_fops = {
+	.open		= features_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int blacklist_show(struct seq_file *f, void *p)
+{
+	struct hci_dev *hdev = f->private;
+	struct bdaddr_list *b;
+
+	hci_dev_lock(hdev);
+	list_for_each_entry(b, &hdev->blacklist, list)
+		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int blacklist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, blacklist_show, inode->i_private);
+}
+
+static const struct file_operations blacklist_fops = {
+	.open		= blacklist_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+	struct hci_dev *hdev = f->private;
+	struct bt_uuid *uuid;
+
+	hci_dev_lock(hdev);
+	list_for_each_entry(uuid, &hdev->uuids, list) {
+		u8 i, val[16];
+
+		/* The Bluetooth UUID values are stored in big endian,
+		 * but with reversed byte order. So convert them into
+		 * the right order for the %pUb modifier.
+		 */
+		for (i = 0; i < 16; i++)
+			val[i] = uuid->uuid[15 - i];
+
+		seq_printf(f, "%pUb\n", val);
+	}
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+	.open		= uuids_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int inquiry_cache_show(struct seq_file *f, void *p)
+{
+	struct hci_dev *hdev = f->private;
+	struct discovery_state *cache = &hdev->discovery;
+	struct inquiry_entry *e;
+
+	hci_dev_lock(hdev);
+
+	list_for_each_entry(e, &cache->all, all) {
+		struct inquiry_data *data = &e->data;
+		seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+			   &data->bdaddr,
+			   data->pscan_rep_mode, data->pscan_period_mode,
+			   data->pscan_mode, data->dev_class[2],
+			   data->dev_class[1], data->dev_class[0],
+			   __le16_to_cpu(data->clock_offset),
+			   data->rssi, data->ssp_mode, e->timestamp);
+	}
+
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, inquiry_cache_show, inode->i_private);
+}
+
+static const struct file_operations inquiry_cache_fops = {
+	.open		= inquiry_cache_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int link_keys_show(struct seq_file *f, void *ptr)
+{
+	struct hci_dev *hdev = f->private;
+	struct list_head *p, *n;
+
+	hci_dev_lock(hdev);
+	list_for_each_safe(p, n, &hdev->link_keys) {
+		struct link_key *key = list_entry(p, struct link_key, list);
+		seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
+			   HCI_LINK_KEY_SIZE, key->val, key->pin_len);
+	}
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int link_keys_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, link_keys_show, inode->i_private);
+}
+
+static const struct file_operations link_keys_fops = {
+	.open		= link_keys_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[3];
+
+	buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
+	buf[1] = '\n';
+	buf[2] = '\0';
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations use_debug_keys_fops = {
+	.open		= simple_open,
+	.read		= use_debug_keys_read,
+	.llseek		= default_llseek,
+};
+
+static int dev_class_show(struct seq_file *f, void *ptr)
+{
+	struct hci_dev *hdev = f->private;
+
+	hci_dev_lock(hdev);
+	seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+		   hdev->dev_class[1], hdev->dev_class[0]);
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int dev_class_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dev_class_show, inode->i_private);
+}
+
+static const struct file_operations dev_class_fops = {
+	.open		= dev_class_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int voice_setting_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->voice_setting;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
+			NULL, "0x%4.4llx\n");
+
+static int auto_accept_delay_set(void *data, u64 val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	hdev->auto_accept_delay = val;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int auto_accept_delay_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->auto_accept_delay;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
+			auto_accept_delay_set, "%llu\n");
+
+static int ssp_debug_mode_set(void *data, u64 val)
+{
+	struct hci_dev *hdev = data;
+	struct sk_buff *skb;
+	__u8 mode;
+	int err;
+
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
+	if (!test_bit(HCI_UP, &hdev->flags))
+		return -ENETDOWN;
+
+	hci_req_lock(hdev);
+	mode = val;
+	skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
+			     &mode, HCI_CMD_TIMEOUT);
+	hci_req_unlock(hdev);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	err = -bt_to_errno(skb->data[0]);
+	kfree_skb(skb);
+
+	if (err < 0)
+		return err;
+
+	hci_dev_lock(hdev);
+	hdev->ssp_debug_mode = val;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int ssp_debug_mode_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->ssp_debug_mode;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
+			ssp_debug_mode_set, "%llu\n");
+
+static int idle_timeout_set(void *data, u64 val)
+{
+	struct hci_dev *hdev = data;
+
+	if (val != 0 && (val < 500 || val > 3600000))
+		return -EINVAL;
+
+	hci_dev_lock(hdev);
+	hdev->idle_timeout = val;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int idle_timeout_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->idle_timeout;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
+			idle_timeout_set, "%llu\n");
+
+static int sniff_min_interval_set(void *data, u64 val)
+{
+	struct hci_dev *hdev = data;
+
+	if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
+		return -EINVAL;
+
+	hci_dev_lock(hdev);
+	hdev->sniff_min_interval = val;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int sniff_min_interval_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->sniff_min_interval;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
+			sniff_min_interval_set, "%llu\n");
+
+static int sniff_max_interval_set(void *data, u64 val)
+{
+	struct hci_dev *hdev = data;
+
+	if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
+		return -EINVAL;
+
+	hci_dev_lock(hdev);
+	hdev->sniff_max_interval = val;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int sniff_max_interval_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->sniff_max_interval;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
+			sniff_max_interval_set, "%llu\n");
+
+static int static_address_show(struct seq_file *f, void *p)
+{
+	struct hci_dev *hdev = f->private;
+
+	hci_dev_lock(hdev);
+	seq_printf(f, "%pMR\n", &hdev->static_addr);
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int static_address_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, static_address_show, inode->i_private);
+}
+
+static const struct file_operations static_address_fops = {
+	.open		= static_address_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int own_address_type_set(void *data, u64 val)
+{
+	struct hci_dev *hdev = data;
+
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
+	hci_dev_lock(hdev);
+	hdev->own_addr_type = val;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int own_address_type_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->own_addr_type;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
+			own_address_type_set, "%llu\n");
+
+static int long_term_keys_show(struct seq_file *f, void *ptr)
+{
+	struct hci_dev *hdev = f->private;
+	struct list_head *p, *n;
+
+	hci_dev_lock(hdev);
+	list_for_each_safe(p, n, &hdev->link_keys) {
+		struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
+		seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
+			   &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
+			   ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
+			   8, ltk->rand, 16, ltk->val);
+	}
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int long_term_keys_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, long_term_keys_show, inode->i_private);
+}
+
+static const struct file_operations long_term_keys_fops = {
+	.open		= long_term_keys_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int conn_min_interval_set(void *data, u64 val)
+{
+	struct hci_dev *hdev = data;
+
+	if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
+		return -EINVAL;
+
+	hci_dev_lock(hdev);
+	hdev->le_conn_min_interval = val;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int conn_min_interval_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->le_conn_min_interval;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
+			conn_min_interval_set, "%llu\n");
+
+static int conn_max_interval_set(void *data, u64 val)
+{
+	struct hci_dev *hdev = data;
+
+	if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
+		return -EINVAL;
+
+	hci_dev_lock(hdev);
+	hdev->le_conn_max_interval = val;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+static int conn_max_interval_get(void *data, u64 *val)
+{
+	struct hci_dev *hdev = data;
+
+	hci_dev_lock(hdev);
+	*val = hdev->le_conn_max_interval;
+	hci_dev_unlock(hdev);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
+			conn_max_interval_set, "%llu\n");
+
 /* ---- HCI requests ---- */
 
 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
@@ -307,11 +888,23 @@
 	/* Read Local Version */
 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 
+	/* Read Local Supported Commands */
+	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+	/* Read Local Supported Features */
+	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+
 	/* Read Local AMP Info */
 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
 
 	/* Read Data Blk size */
 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+
+	/* Read Flow Control Mode */
+	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
+
+	/* Read Location Data */
+	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
 }
 
 static void hci_init1_req(struct hci_request *req, unsigned long opt)
@@ -341,6 +934,8 @@
 
 static void bredr_setup(struct hci_request *req)
 {
+	struct hci_dev *hdev = req->hdev;
+
 	__le16 param;
 	__u8 flt_type;
 
@@ -356,6 +951,12 @@
 	/* Read Voice Setting */
 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
 
+	/* Read Number of Supported IAC */
+	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
+
+	/* Read Current IAC LAP */
+	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
+
 	/* Clear Event Filters */
 	flt_type = HCI_FLT_CLEAR_ALL;
 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
@@ -364,8 +965,10 @@
 	param = __constant_cpu_to_le16(0x7d00);
 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 
-	/* Read page scan parameters */
-	if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
+	/* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
+	 * but it does not support page scan related HCI commands.
+	 */
+	if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
 	}
@@ -519,6 +1122,8 @@
 
 	if (lmp_bredr_capable(hdev))
 		bredr_setup(req);
+	else
+		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
 
 	if (lmp_le_capable(hdev))
 		le_setup(req);
@@ -532,6 +1137,14 @@
 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 
 	if (lmp_ssp_capable(hdev)) {
+		/* When SSP is available, then the host features page
+		 * should also be available as well. However some
+		 * controllers list the max_page as 0 as long as SSP
+		 * has not been enabled. To achieve proper debugging
+		 * output, force the minimum max_page to 1 at least.
+		 */
+		hdev->max_page = 0x01;
+
 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
 			u8 mode = 0x01;
 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
@@ -607,6 +1220,34 @@
 			    &cp);
 }
 
+static void hci_set_event_mask_page_2(struct hci_request *req)
+{
+	struct hci_dev *hdev = req->hdev;
+	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+	/* If Connectionless Slave Broadcast master role is supported
+	 * enable all necessary events for it.
+	 */
+	if (hdev->features[2][0] & 0x01) {
+		events[1] |= 0x40;	/* Triggered Clock Capture */
+		events[1] |= 0x80;	/* Synchronization Train Complete */
+		events[2] |= 0x10;	/* Slave Page Response Timeout */
+		events[2] |= 0x20;	/* CSB Channel Map Change */
+	}
+
+	/* If Connectionless Slave Broadcast slave role is supported
+	 * enable all necessary events for it.
+	 */
+	if (hdev->features[2][0] & 0x02) {
+		events[2] |= 0x01;	/* Synchronization Train Received */
+		events[2] |= 0x02;	/* CSB Receive */
+		events[2] |= 0x04;	/* CSB Timeout */
+		events[2] |= 0x08;	/* Truncated Page Complete */
+	}
+
+	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+}
+
 static void hci_init3_req(struct hci_request *req, unsigned long opt)
 {
 	struct hci_dev *hdev = req->hdev;
@@ -634,8 +1275,17 @@
 		hci_setup_link_policy(req);
 
 	if (lmp_le_capable(hdev)) {
+		/* If the controller has a public BD_ADDR, then by
+		 * default use that one. If this is a LE only
+		 * controller without one, default to the random
+		 * address.
+		 */
+		if (bacmp(&hdev->bdaddr, BDADDR_ANY))
+			hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
+		else
+			hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
+
 		hci_set_le_support(req);
-		hci_update_ad(req);
 	}
 
 	/* Read features beyond page 1 if available */
@@ -648,6 +1298,19 @@
 	}
 }
 
+static void hci_init4_req(struct hci_request *req, unsigned long opt)
+{
+	struct hci_dev *hdev = req->hdev;
+
+	/* Set event mask page 2 if the HCI command for it is supported */
+	if (hdev->commands[22] & 0x04)
+		hci_set_event_mask_page_2(req);
+
+	/* Check for Synchronization Train support */
+	if (hdev->features[2][0] & 0x04)
+		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
+}
+
 static int __hci_init(struct hci_dev *hdev)
 {
 	int err;
@@ -656,6 +1319,14 @@
 	if (err < 0)
 		return err;
 
+	/* The Device Under Test (DUT) mode is special and available for
+	 * all controller types. So just create it early on.
+	 */
+	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
+				    &dut_mode_fops);
+	}
+
 	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
 	 * BR/EDR/LE type controllers. AMP controllers only need the
 	 * first stage init.
@@ -667,7 +1338,75 @@
 	if (err < 0)
 		return err;
 
-	return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+	if (err < 0)
+		return err;
+
+	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
+	if (err < 0)
+		return err;
+
+	/* Only create debugfs entries during the initial setup
+	 * phase and not every time the controller gets powered on.
+	 */
+	if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+		return 0;
+
+	debugfs_create_file("features", 0444, hdev->debugfs, hdev,
+			    &features_fops);
+	debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
+			   &hdev->manufacturer);
+	debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
+	debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+	debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
+			    &blacklist_fops);
+	debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
+	if (lmp_bredr_capable(hdev)) {
+		debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
+				    hdev, &inquiry_cache_fops);
+		debugfs_create_file("link_keys", 0400, hdev->debugfs,
+				    hdev, &link_keys_fops);
+		debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
+				    hdev, &use_debug_keys_fops);
+		debugfs_create_file("dev_class", 0444, hdev->debugfs,
+				    hdev, &dev_class_fops);
+		debugfs_create_file("voice_setting", 0444, hdev->debugfs,
+				    hdev, &voice_setting_fops);
+	}
+
+	if (lmp_ssp_capable(hdev)) {
+		debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
+				    hdev, &auto_accept_delay_fops);
+		debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
+				    hdev, &ssp_debug_mode_fops);
+	}
+
+	if (lmp_sniff_capable(hdev)) {
+		debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
+				    hdev, &idle_timeout_fops);
+		debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
+				    hdev, &sniff_min_interval_fops);
+		debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
+				    hdev, &sniff_max_interval_fops);
+	}
+
+	if (lmp_le_capable(hdev)) {
+		debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
+				  &hdev->le_white_list_size);
+		debugfs_create_file("static_address", 0444, hdev->debugfs,
+				   hdev, &static_address_fops);
+		debugfs_create_file("own_address_type", 0644, hdev->debugfs,
+				    hdev, &own_address_type_fops);
+		debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
+				    hdev, &long_term_keys_fops);
+		debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
+				    hdev, &conn_min_interval_fops);
+		debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
+				    hdev, &conn_max_interval_fops);
+	}
+
+	return 0;
 }
 
 static void hci_scan_req(struct hci_request *req, unsigned long opt)
@@ -984,6 +1723,21 @@
 	if (!hdev)
 		return -ENODEV;
 
+	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+		err = -EBUSY;
+		goto done;
+	}
+
+	if (hdev->dev_type != HCI_BREDR) {
+		err = -EOPNOTSUPP;
+		goto done;
+	}
+
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+		err = -EOPNOTSUPP;
+		goto done;
+	}
+
 	hci_dev_lock(hdev);
 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
@@ -1043,100 +1797,10 @@
 	return err;
 }
 
-static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
+static int hci_dev_do_open(struct hci_dev *hdev)
 {
-	u8 ad_len = 0, flags = 0;
-	size_t name_len;
-
-	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
-		flags |= LE_AD_GENERAL;
-
-	if (!lmp_bredr_capable(hdev))
-		flags |= LE_AD_NO_BREDR;
-
-	if (lmp_le_br_capable(hdev))
-		flags |= LE_AD_SIM_LE_BREDR_CTRL;
-
-	if (lmp_host_le_br_capable(hdev))
-		flags |= LE_AD_SIM_LE_BREDR_HOST;
-
-	if (flags) {
-		BT_DBG("adv flags 0x%02x", flags);
-
-		ptr[0] = 2;
-		ptr[1] = EIR_FLAGS;
-		ptr[2] = flags;
-
-		ad_len += 3;
-		ptr += 3;
-	}
-
-	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
-		ptr[0] = 2;
-		ptr[1] = EIR_TX_POWER;
-		ptr[2] = (u8) hdev->adv_tx_power;
-
-		ad_len += 3;
-		ptr += 3;
-	}
-
-	name_len = strlen(hdev->dev_name);
-	if (name_len > 0) {
-		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
-
-		if (name_len > max_len) {
-			name_len = max_len;
-			ptr[1] = EIR_NAME_SHORT;
-		} else
-			ptr[1] = EIR_NAME_COMPLETE;
-
-		ptr[0] = name_len + 1;
-
-		memcpy(ptr + 2, hdev->dev_name, name_len);
-
-		ad_len += (name_len + 2);
-		ptr += (name_len + 2);
-	}
-
-	return ad_len;
-}
-
-void hci_update_ad(struct hci_request *req)
-{
-	struct hci_dev *hdev = req->hdev;
-	struct hci_cp_le_set_adv_data cp;
-	u8 len;
-
-	if (!lmp_le_capable(hdev))
-		return;
-
-	memset(&cp, 0, sizeof(cp));
-
-	len = create_ad(hdev, cp.data);
-
-	if (hdev->adv_data_len == len &&
-	    memcmp(cp.data, hdev->adv_data, len) == 0)
-		return;
-
-	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
-	hdev->adv_data_len = len;
-
-	cp.length = len;
-
-	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
-}
-
-/* ---- HCI ioctl helpers ---- */
-
-int hci_dev_open(__u16 dev)
-{
-	struct hci_dev *hdev;
 	int ret = 0;
 
-	hdev = hci_dev_get(dev);
-	if (!hdev)
-		return -ENODEV;
-
 	BT_DBG("%s %p", hdev->name, hdev);
 
 	hci_req_lock(hdev);
@@ -1146,13 +1810,29 @@
 		goto done;
 	}
 
-	/* Check for rfkill but allow the HCI setup stage to proceed
-	 * (which in itself doesn't cause any RF activity).
-	 */
-	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
-	    !test_bit(HCI_SETUP, &hdev->dev_flags)) {
-		ret = -ERFKILL;
-		goto done;
+	if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+		/* Check for rfkill but allow the HCI setup stage to
+		 * proceed (which in itself doesn't cause any RF activity).
+		 */
+		if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+			ret = -ERFKILL;
+			goto done;
+		}
+
+		/* Check for valid public address or a configured static
+		 * random adddress, but let the HCI setup proceed to
+		 * be able to determine if there is a public address
+		 * or not.
+		 *
+		 * This check is only valid for BR/EDR controllers
+		 * since AMP controllers do not have an address.
+		 */
+		if (hdev->dev_type == HCI_BREDR &&
+		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
+			ret = -EADDRNOTAVAIL;
+			goto done;
+		}
 	}
 
 	if (test_bit(HCI_UP, &hdev->flags)) {
@@ -1172,16 +1852,11 @@
 		ret = hdev->setup(hdev);
 
 	if (!ret) {
-		/* Treat all non BR/EDR controllers as raw devices if
-		 * enable_hs is not set.
-		 */
-		if (hdev->dev_type != HCI_BREDR && !enable_hs)
-			set_bit(HCI_RAW, &hdev->flags);
-
 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 			set_bit(HCI_RAW, &hdev->flags);
 
-		if (!test_bit(HCI_RAW, &hdev->flags))
+		if (!test_bit(HCI_RAW, &hdev->flags) &&
+		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
 			ret = __hci_init(hdev);
 	}
 
@@ -1192,7 +1867,8 @@
 		set_bit(HCI_UP, &hdev->flags);
 		hci_notify(hdev, HCI_DEV_UP);
 		if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
-		    mgmt_valid_hdev(hdev)) {
+		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+		    hdev->dev_type == HCI_BREDR) {
 			hci_dev_lock(hdev);
 			mgmt_powered(hdev, 1);
 			hci_dev_unlock(hdev);
@@ -1220,10 +1896,41 @@
 
 done:
 	hci_req_unlock(hdev);
-	hci_dev_put(hdev);
 	return ret;
 }
 
+/* ---- HCI ioctl helpers ---- */
+
+int hci_dev_open(__u16 dev)
+{
+	struct hci_dev *hdev;
+	int err;
+
+	hdev = hci_dev_get(dev);
+	if (!hdev)
+		return -ENODEV;
+
+	/* We need to ensure that no other power on/off work is pending
+	 * before proceeding to call hci_dev_do_open. This is
+	 * particularly important if the setup procedure has not yet
+	 * completed.
+	 */
+	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+		cancel_delayed_work(&hdev->power_off);
+
+	/* After this call it is guaranteed that the setup procedure
+	 * has finished. This means that error conditions like RFKILL
+	 * or no valid public or static random address apply.
+	 */
+	flush_workqueue(hdev->req_workqueue);
+
+	err = hci_dev_do_open(hdev);
+
+	hci_dev_put(hdev);
+
+	return err;
+}
+
 static int hci_dev_do_close(struct hci_dev *hdev)
 {
 	BT_DBG("%s %p", hdev->name, hdev);
@@ -1247,6 +1954,7 @@
 		cancel_delayed_work(&hdev->discov_off);
 		hdev->discov_timeout = 0;
 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
 	}
 
 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
@@ -1268,6 +1976,7 @@
 	skb_queue_purge(&hdev->cmd_q);
 	atomic_set(&hdev->cmd_cnt, 1);
 	if (!test_bit(HCI_RAW, &hdev->flags) &&
+	    !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
 	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
 		set_bit(HCI_INIT, &hdev->flags);
 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -1300,15 +2009,16 @@
 	hdev->flags = 0;
 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
 
-	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
-	    mgmt_valid_hdev(hdev)) {
-		hci_dev_lock(hdev);
-		mgmt_powered(hdev, 0);
-		hci_dev_unlock(hdev);
+	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+		if (hdev->dev_type == HCI_BREDR) {
+			hci_dev_lock(hdev);
+			mgmt_powered(hdev, 0);
+			hci_dev_unlock(hdev);
+		}
 	}
 
 	/* Controller radio is available but is currently powered down */
-	hdev->amp_status = 0;
+	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
 
 	memset(hdev->eir, 0, sizeof(hdev->eir));
 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
@@ -1328,11 +2038,17 @@
 	if (!hdev)
 		return -ENODEV;
 
+	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+		err = -EBUSY;
+		goto done;
+	}
+
 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
 		cancel_delayed_work(&hdev->power_off);
 
 	err = hci_dev_do_close(hdev);
 
+done:
 	hci_dev_put(hdev);
 	return err;
 }
@@ -1348,8 +2064,15 @@
 
 	hci_req_lock(hdev);
 
-	if (!test_bit(HCI_UP, &hdev->flags))
+	if (!test_bit(HCI_UP, &hdev->flags)) {
+		ret = -ENETDOWN;
 		goto done;
+	}
+
+	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+		ret = -EBUSY;
+		goto done;
+	}
 
 	/* Drop queues */
 	skb_queue_purge(&hdev->rx_q);
@@ -1384,10 +2107,15 @@
 	if (!hdev)
 		return -ENODEV;
 
+	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+		ret = -EBUSY;
+		goto done;
+	}
+
 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
+done:
 	hci_dev_put(hdev);
-
 	return ret;
 }
 
@@ -1404,6 +2132,21 @@
 	if (!hdev)
 		return -ENODEV;
 
+	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+		err = -EBUSY;
+		goto done;
+	}
+
+	if (hdev->dev_type != HCI_BREDR) {
+		err = -EOPNOTSUPP;
+		goto done;
+	}
+
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+		err = -EOPNOTSUPP;
+		goto done;
+	}
+
 	switch (cmd) {
 	case HCISETAUTH:
 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
@@ -1462,6 +2205,7 @@
 		break;
 	}
 
+done:
 	hci_dev_put(hdev);
 	return err;
 }
@@ -1534,7 +2278,7 @@
 
 	strcpy(di.name, hdev->name);
 	di.bdaddr   = hdev->bdaddr;
-	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
+	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
 	di.flags    = hdev->flags;
 	di.pkt_type = hdev->pkt_type;
 	if (lmp_bredr_capable(hdev)) {
@@ -1570,6 +2314,9 @@
 
 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
 
+	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+		return -EBUSY;
+
 	if (blocked) {
 		set_bit(HCI_RFKILLED, &hdev->dev_flags);
 		if (!test_bit(HCI_SETUP, &hdev->dev_flags))
@@ -1592,13 +2339,20 @@
 
 	BT_DBG("%s", hdev->name);
 
-	err = hci_dev_open(hdev->id);
+	err = hci_dev_do_open(hdev);
 	if (err < 0) {
 		mgmt_set_powered_failed(hdev, err);
 		return;
 	}
 
-	if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+	/* During the HCI setup phase, a few error conditions are
+	 * ignored and they need to be checked now. If they are still
+	 * valid, it is important to turn the device back off.
+	 */
+	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
+	    (hdev->dev_type == HCI_BREDR &&
+	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
 		clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
 		hci_dev_do_close(hdev);
 	} else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
@@ -1623,19 +2377,12 @@
 static void hci_discov_off(struct work_struct *work)
 {
 	struct hci_dev *hdev;
-	u8 scan = SCAN_PAGE;
 
 	hdev = container_of(work, struct hci_dev, discov_off.work);
 
 	BT_DBG("%s", hdev->name);
 
-	hci_dev_lock(hdev);
-
-	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
-
-	hdev->discov_timeout = 0;
-
-	hci_dev_unlock(hdev);
+	mgmt_discoverable_timeout(hdev);
 }
 
 int hci_uuids_clear(struct hci_dev *hdev)
@@ -1958,13 +2705,15 @@
 	return 0;
 }
 
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+					 bdaddr_t *bdaddr, u8 type)
 {
 	struct bdaddr_list *b;
 
-	list_for_each_entry(b, &hdev->blacklist, list)
-		if (bacmp(bdaddr, &b->bdaddr) == 0)
+	list_for_each_entry(b, &hdev->blacklist, list) {
+		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
 			return b;
+	}
 
 	return NULL;
 }
@@ -1974,9 +2723,7 @@
 	struct list_head *p, *n;
 
 	list_for_each_safe(p, n, &hdev->blacklist) {
-		struct bdaddr_list *b;
-
-		b = list_entry(p, struct bdaddr_list, list);
+		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
 
 		list_del(p);
 		kfree(b);
@@ -1989,10 +2736,10 @@
 {
 	struct bdaddr_list *entry;
 
-	if (bacmp(bdaddr, BDADDR_ANY) == 0)
+	if (!bacmp(bdaddr, BDADDR_ANY))
 		return -EBADF;
 
-	if (hci_blacklist_lookup(hdev, bdaddr))
+	if (hci_blacklist_lookup(hdev, bdaddr, type))
 		return -EEXIST;
 
 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
@@ -2000,6 +2747,7 @@
 		return -ENOMEM;
 
 	bacpy(&entry->bdaddr, bdaddr);
+	entry->bdaddr_type = type;
 
 	list_add(&entry->list, &hdev->blacklist);
 
@@ -2010,10 +2758,10 @@
 {
 	struct bdaddr_list *entry;
 
-	if (bacmp(bdaddr, BDADDR_ANY) == 0)
+	if (!bacmp(bdaddr, BDADDR_ANY))
 		return hci_blacklist_clear(hdev);
 
-	entry = hci_blacklist_lookup(hdev, bdaddr);
+	entry = hci_blacklist_lookup(hdev, bdaddr, type);
 	if (!entry)
 		return -ENOENT;
 
@@ -2111,13 +2859,19 @@
 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
 	hdev->esco_type = (ESCO_HV1);
 	hdev->link_mode = (HCI_LM_ACCEPT);
-	hdev->io_capability = 0x03; /* No Input No Output */
+	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
+	hdev->io_capability = 0x03;	/* No Input No Output */
 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 
 	hdev->sniff_max_interval = 800;
 	hdev->sniff_min_interval = 80;
 
+	hdev->le_scan_interval = 0x0060;
+	hdev->le_scan_window = 0x0030;
+	hdev->le_conn_min_interval = 0x0028;
+	hdev->le_conn_max_interval = 0x0038;
+
 	mutex_init(&hdev->lock);
 	mutex_init(&hdev->req_lock);
 
@@ -2206,7 +2960,12 @@
 		goto err;
 	}
 
-	error = hci_add_sysfs(hdev);
+	if (!IS_ERR_OR_NULL(bt_debugfs))
+		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
+
+	dev_set_name(&hdev->dev, "%s", hdev->name);
+
+	error = device_add(&hdev->dev);
 	if (error < 0)
 		goto err_wqueue;
 
@@ -2224,9 +2983,14 @@
 		set_bit(HCI_RFKILLED, &hdev->dev_flags);
 
 	set_bit(HCI_SETUP, &hdev->dev_flags);
+	set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
 
-	if (hdev->dev_type != HCI_AMP)
-		set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+	if (hdev->dev_type == HCI_BREDR) {
+		/* Assume BR/EDR support until proven otherwise (such as
+		 * through reading supported features during init.
+		 */
+		set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+	}
 
 	write_lock(&hci_dev_list_lock);
 	list_add(&hdev->list, &hci_dev_list);
@@ -2289,7 +3053,9 @@
 		rfkill_destroy(hdev->rfkill);
 	}
 
-	hci_del_sysfs(hdev);
+	device_del(&hdev->dev);
+
+	debugfs_remove_recursive(hdev->debugfs);
 
 	destroy_workqueue(hdev->workqueue);
 	destroy_workqueue(hdev->req_workqueue);
@@ -2325,9 +3091,8 @@
 EXPORT_SYMBOL(hci_resume_dev);
 
 /* Receive frame from HCI drivers */
-int hci_recv_frame(struct sk_buff *skb)
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
 		      && !test_bit(HCI_INIT, &hdev->flags))) {
 		kfree_skb(skb);
@@ -2386,7 +3151,6 @@
 		scb->expect = hlen;
 		scb->pkt_type = type;
 
-		skb->dev = (void *) hdev;
 		hdev->reassembly[index] = skb;
 	}
 
@@ -2446,7 +3210,7 @@
 			/* Complete frame */
 
 			bt_cb(skb)->pkt_type = type;
-			hci_recv_frame(skb);
+			hci_recv_frame(hdev, skb);
 
 			hdev->reassembly[index] = NULL;
 			return remain;
@@ -2537,15 +3301,8 @@
 }
 EXPORT_SYMBOL(hci_unregister_cb);
 
-static int hci_send_frame(struct sk_buff *skb)
+static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-
-	if (!hdev) {
-		kfree_skb(skb);
-		return -ENODEV;
-	}
-
 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
 
 	/* Time stamp */
@@ -2562,7 +3319,8 @@
 	/* Get rid of skb owner, prior to sending to the driver. */
 	skb_orphan(skb);
 
-	return hdev->send(skb);
+	if (hdev->send(hdev, skb) < 0)
+		BT_ERR("%s sending frame failed", hdev->name);
 }
 
 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
@@ -2625,7 +3383,6 @@
 	BT_DBG("skb len %d", skb->len);
 
 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
-	skb->dev = (void *) hdev;
 
 	return skb;
 }
@@ -2769,7 +3526,6 @@
 		do {
 			skb = list; list = list->next;
 
-			skb->dev = (void *) hdev;
 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
 			hci_add_acl_hdr(skb, conn->handle, flags);
 
@@ -2788,8 +3544,6 @@
 
 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
 
-	skb->dev = (void *) hdev;
-
 	hci_queue_acl(chan, &chan->data_q, skb, flags);
 
 	queue_work(hdev->workqueue, &hdev->tx_work);
@@ -2810,7 +3564,6 @@
 	skb_reset_transport_header(skb);
 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
 
-	skb->dev = (void *) hdev;
 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
 
 	skb_queue_tail(&conn->data_q, skb);
@@ -3075,7 +3828,7 @@
 			hci_conn_enter_active_mode(chan->conn,
 						   bt_cb(skb)->force_active);
 
-			hci_send_frame(skb);
+			hci_send_frame(hdev, skb);
 			hdev->acl_last_tx = jiffies;
 
 			hdev->acl_cnt--;
@@ -3127,7 +3880,7 @@
 			hci_conn_enter_active_mode(chan->conn,
 						   bt_cb(skb)->force_active);
 
-			hci_send_frame(skb);
+			hci_send_frame(hdev, skb);
 			hdev->acl_last_tx = jiffies;
 
 			hdev->block_cnt -= blocks;
@@ -3180,7 +3933,7 @@
 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
 			BT_DBG("skb %p len %d", skb, skb->len);
-			hci_send_frame(skb);
+			hci_send_frame(hdev, skb);
 
 			conn->sent++;
 			if (conn->sent == ~0)
@@ -3204,7 +3957,7 @@
 						     &quote))) {
 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
 			BT_DBG("skb %p len %d", skb, skb->len);
-			hci_send_frame(skb);
+			hci_send_frame(hdev, skb);
 
 			conn->sent++;
 			if (conn->sent == ~0)
@@ -3246,7 +3999,7 @@
 
 			skb = skb_dequeue(&chan->data_q);
 
-			hci_send_frame(skb);
+			hci_send_frame(hdev, skb);
 			hdev->le_last_tx = jiffies;
 
 			cnt--;
@@ -3272,19 +4025,17 @@
 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
 	       hdev->sco_cnt, hdev->le_cnt);
 
-	/* Schedule queues and send stuff to HCI driver */
-
-	hci_sched_acl(hdev);
-
-	hci_sched_sco(hdev);
-
-	hci_sched_esco(hdev);
-
-	hci_sched_le(hdev);
+	if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+		/* Schedule queues and send stuff to HCI driver */
+		hci_sched_acl(hdev);
+		hci_sched_sco(hdev);
+		hci_sched_esco(hdev);
+		hci_sched_le(hdev);
+	}
 
 	/* Send next queued raw (unknown type) packet */
 	while ((skb = skb_dequeue(&hdev->raw_q)))
-		hci_send_frame(skb);
+		hci_send_frame(hdev, skb);
 }
 
 /* ----- HCI RX task (incoming data processing) ----- */
@@ -3471,7 +4222,8 @@
 			hci_send_to_sock(hdev, skb);
 		}
 
-		if (test_bit(HCI_RAW, &hdev->flags)) {
+		if (test_bit(HCI_RAW, &hdev->flags) ||
+		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
 			kfree_skb(skb);
 			continue;
 		}
@@ -3526,10 +4278,10 @@
 
 		kfree_skb(hdev->sent_cmd);
 
-		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
+		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
 		if (hdev->sent_cmd) {
 			atomic_dec(&hdev->cmd_cnt);
-			hci_send_frame(skb);
+			hci_send_frame(hdev, skb);
 			if (test_bit(HCI_RESET, &hdev->flags))
 				del_timer(&hdev->cmd_timer);
 			else
@@ -3541,15 +4293,3 @@
 		}
 	}
 }
-
-u8 bdaddr_to_le(u8 bdaddr_type)
-{
-	switch (bdaddr_type) {
-	case BDADDR_LE_PUBLIC:
-		return ADDR_LE_DEV_PUBLIC;
-
-	default:
-		/* Fallback to LE Random address type */
-		return ADDR_LE_DEV_RANDOM;
-	}
-}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8db3e89..5935f74 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -29,8 +29,9 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "a2mp.h"
+#include "amp.h"
 
 /* Handle HCI Event packets */
 
@@ -194,6 +195,11 @@
 
 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
 	hdev->adv_data_len = 0;
+
+	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
+	hdev->scan_rsp_data_len = 0;
+
+	hdev->ssp_debug_mode = 0;
 }
 
 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -297,6 +303,11 @@
 		goto done;
 	}
 
+	/* We need to ensure that we set this back on if someone changed
+	 * the scan mode through a raw HCI socket.
+	 */
+	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
 
@@ -304,11 +315,6 @@
 		set_bit(HCI_ISCAN, &hdev->flags);
 		if (!old_iscan)
 			mgmt_discoverable(hdev, 1);
-		if (hdev->discov_timeout > 0) {
-			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
-			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
-					   to);
-		}
 	} else if (old_iscan)
 		mgmt_discoverable(hdev, 0);
 
@@ -412,6 +418,21 @@
 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 }
 
+static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
+					  struct sk_buff *skb)
+{
+	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+	if (rp->status)
+		return;
+
+	hdev->num_iac = rp->num_iac;
+
+	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
+}
+
 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	__u8 status = *((__u8 *) skb->data);
@@ -449,14 +470,13 @@
 	if (rp->status)
 		return;
 
-	hdev->hci_ver = rp->hci_ver;
-	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
-	hdev->lmp_ver = rp->lmp_ver;
-	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
-	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
-
-	BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
-	       hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
+	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+		hdev->hci_ver = rp->hci_ver;
+		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
+		hdev->lmp_ver = rp->lmp_ver;
+		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
+		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
+	}
 }
 
 static void hci_cc_read_local_commands(struct hci_dev *hdev,
@@ -536,7 +556,8 @@
 	if (rp->status)
 		return;
 
-	hdev->max_page = rp->max_page;
+	if (hdev->max_page < rp->max_page)
+		hdev->max_page = rp->max_page;
 
 	if (rp->page < HCI_MAX_PAGES)
 		memcpy(hdev->features[rp->page], rp->features, 8);
@@ -913,17 +934,9 @@
 
 	if (!status) {
 		if (*sent)
-			set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
+			set_bit(HCI_ADVERTISING, &hdev->dev_flags);
 		else
-			clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
-	}
-
-	if (!test_bit(HCI_INIT, &hdev->flags)) {
-		struct hci_request req;
-
-		hci_req_init(&req, hdev);
-		hci_update_ad(&req);
-		hci_req_run(&req, NULL);
+			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
 	}
 
 	hci_dev_unlock(hdev);
@@ -994,20 +1007,20 @@
 		return;
 
 	if (!status) {
-		if (sent->le)
+		if (sent->le) {
 			hdev->features[1][0] |= LMP_HOST_LE;
-		else
+			set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+		} else {
 			hdev->features[1][0] &= ~LMP_HOST_LE;
+			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+		}
 
 		if (sent->simul)
 			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
 		else
 			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
 	}
-
-	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
-	    !test_bit(HCI_INIT, &hdev->flags))
-		mgmt_le_enable_complete(hdev, sent->le, status);
 }
 
 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
@@ -1291,9 +1304,11 @@
 		goto unlock;
 
 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
-		struct hci_cp_auth_requested cp;
-		cp.handle = __cpu_to_le16(conn->handle);
-		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
+		struct hci_cp_auth_requested auth_cp;
+
+		auth_cp.handle = __cpu_to_le16(conn->handle);
+		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
+			     sizeof(auth_cp), &auth_cp);
 	}
 
 unlock:
@@ -1465,33 +1480,6 @@
 	hci_dev_unlock(hdev);
 }
 
-static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
-{
-	struct hci_conn *conn;
-
-	BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-	if (status) {
-		hci_dev_lock(hdev);
-
-		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-		if (!conn) {
-			hci_dev_unlock(hdev);
-			return;
-		}
-
-		BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
-
-		conn->state = BT_CLOSED;
-		mgmt_connect_failed(hdev, &conn->dst, conn->type,
-				    conn->dst_type, status);
-		hci_proto_connect_cfm(conn, status);
-		hci_conn_del(conn);
-
-		hci_dev_unlock(hdev);
-	}
-}
-
 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
 {
 	struct hci_cp_create_phy_link *cp;
@@ -1706,7 +1694,7 @@
 				      &flags);
 
 	if ((mask & HCI_LM_ACCEPT) &&
-	    !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
 		/* Connection accepted */
 		struct inquiry_entry *ie;
 		struct hci_conn *conn;
@@ -1821,10 +1809,25 @@
 	}
 
 	if (ev->status == 0) {
-		if (conn->type == ACL_LINK && conn->flush_key)
+		u8 type = conn->type;
+
+		if (type == ACL_LINK && conn->flush_key)
 			hci_remove_link_key(hdev, &conn->dst);
 		hci_proto_disconn_cfm(conn, ev->reason);
 		hci_conn_del(conn);
+
+		/* Re-enable advertising if necessary, since it might
+		 * have been disabled by the connection. From the
+		 * HCI_LE_Set_Advertise_Enable command description in
+		 * the core specification (v4.0):
+		 * "The Controller shall continue advertising until the Host
+		 * issues an LE_Set_Advertise_Enable command with
+		 * Advertising_Enable set to 0x00 (Advertising is disabled)
+		 * or until a connection is created or until the Advertising
+		 * is timed out due to Directed Advertising."
+		 */
+		if (type == LE_LINK)
+			mgmt_reenable_advertising(hdev);
 	}
 
 unlock:
@@ -2139,6 +2142,10 @@
 		hci_cc_write_voice_setting(hdev, skb);
 		break;
 
+	case HCI_OP_READ_NUM_SUPPORTED_IAC:
+		hci_cc_read_num_supported_iac(hdev, skb);
+		break;
+
 	case HCI_OP_WRITE_SSP_MODE:
 		hci_cc_write_ssp_mode(hdev, skb);
 		break;
@@ -2342,10 +2349,6 @@
 		hci_cs_disconnect(hdev, ev->status);
 		break;
 
-	case HCI_OP_LE_CREATE_CONN:
-		hci_cs_le_create_conn(hdev, ev->status);
-		break;
-
 	case HCI_OP_CREATE_PHY_LINK:
 		hci_cs_create_phylink(hdev, ev->status);
 		break;
@@ -2548,7 +2551,6 @@
 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
 	if (conn) {
 		conn->mode = ev->mode;
-		conn->interval = __le16_to_cpu(ev->interval);
 
 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
 					&conn->flags)) {
@@ -2930,6 +2932,23 @@
 	hci_dev_unlock(hdev);
 }
 
+static inline size_t eir_get_length(u8 *eir, size_t eir_len)
+{
+	size_t parsed = 0;
+
+	while (parsed < eir_len) {
+		u8 field_len = eir[0];
+
+		if (field_len == 0)
+			return parsed;
+
+		parsed += field_len + 1;
+		eir += field_len + 1;
+	}
+
+	return eir_len;
+}
+
 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
 					    struct sk_buff *skb)
 {
@@ -3170,7 +3189,8 @@
 
 		if (hdev->auto_accept_delay > 0) {
 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
-			mod_timer(&conn->auto_accept_timer, jiffies + delay);
+			queue_delayed_work(conn->hdev->workqueue,
+					   &conn->auto_accept_work, delay);
 			goto unlock;
 		}
 
@@ -3485,6 +3505,17 @@
 
 		conn->dst_type = ev->bdaddr_type;
 
+		/* The advertising parameters for own address type
+		 * define which source address and source address
+		 * type this connections has.
+		 */
+		if (bacmp(&conn->src, BDADDR_ANY)) {
+			conn->src_type = ADDR_LE_DEV_PUBLIC;
+		} else {
+			bacpy(&conn->src, &hdev->static_addr);
+			conn->src_type = ADDR_LE_DEV_RANDOM;
+		}
+
 		if (ev->role == LE_CONN_ROLE_MASTER) {
 			conn->out = true;
 			conn->link_mode |= HCI_LM_MASTER;
@@ -3640,8 +3671,8 @@
 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
 
 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
-		struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
-		u16 opcode = __le16_to_cpu(hdr->opcode);
+		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
+		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
 
 		hci_req_cmd_complete(hdev, opcode, 0);
 	}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 9bd7d95..71f0be1 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -66,6 +66,46 @@
 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 };
 
+static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
+{
+	struct hci_filter *flt;
+	int flt_type, flt_event;
+
+	/* Apply filter */
+	flt = &hci_pi(sk)->filter;
+
+	if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
+		flt_type = 0;
+	else
+		flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
+
+	if (!test_bit(flt_type, &flt->type_mask))
+		return true;
+
+	/* Extra filter for event packets only */
+	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
+		return false;
+
+	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
+
+	if (!hci_test_bit(flt_event, &flt->event_mask))
+		return true;
+
+	/* Check filter only when opcode is set */
+	if (!flt->opcode)
+		return false;
+
+	if (flt_event == HCI_EV_CMD_COMPLETE &&
+	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
+		return true;
+
+	if (flt_event == HCI_EV_CMD_STATUS &&
+	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
+		return true;
+
+	return false;
+}
+
 /* Send frame to RAW socket */
 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 {
@@ -77,7 +117,6 @@
 	read_lock(&hci_sk_list.lock);
 
 	sk_for_each(sk, &hci_sk_list.head) {
-		struct hci_filter *flt;
 		struct sk_buff *nskb;
 
 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
@@ -87,31 +126,19 @@
 		if (skb->sk == sk)
 			continue;
 
-		if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
-			continue;
-
-		/* Apply filter */
-		flt = &hci_pi(sk)->filter;
-
-		if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
-			      0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
-			      &flt->type_mask))
-			continue;
-
-		if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
-			int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
-
-			if (!hci_test_bit(evt, &flt->event_mask))
+		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
+			if (is_filtered_packet(sk, skb))
 				continue;
-
-			if (flt->opcode &&
-			    ((evt == HCI_EV_CMD_COMPLETE &&
-			      flt->opcode !=
-			      get_unaligned((__le16 *)(skb->data + 3))) ||
-			     (evt == HCI_EV_CMD_STATUS &&
-			      flt->opcode !=
-			      get_unaligned((__le16 *)(skb->data + 4)))))
+		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+			if (!bt_cb(skb)->incoming)
 				continue;
+			if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
+			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
+				continue;
+		} else {
+			/* Don't send frame to other channel types */
+			continue;
 		}
 
 		if (!skb_copy) {
@@ -360,7 +387,6 @@
 	__net_timestamp(skb);
 
 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
-	skb->dev = (void *) hdev;
 	hci_send_to_sock(hdev, skb);
 	kfree_skb(skb);
 }
@@ -426,6 +452,12 @@
 	bt_sock_unlink(&hci_sk_list, sk);
 
 	if (hdev) {
+		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+			mgmt_index_added(hdev);
+			clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+			hci_dev_close(hdev->id);
+		}
+
 		atomic_dec(&hdev->promisc);
 		hci_dev_put(hdev);
 	}
@@ -449,7 +481,7 @@
 
 	hci_dev_lock(hdev);
 
-	err = hci_blacklist_add(hdev, &bdaddr, 0);
+	err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
 
 	hci_dev_unlock(hdev);
 
@@ -466,7 +498,7 @@
 
 	hci_dev_lock(hdev);
 
-	err = hci_blacklist_del(hdev, &bdaddr, 0);
+	err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
 
 	hci_dev_unlock(hdev);
 
@@ -482,6 +514,12 @@
 	if (!hdev)
 		return -EBADFD;
 
+	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+		return -EBUSY;
+
+	if (hdev->dev_type != HCI_BREDR)
+		return -EOPNOTSUPP;
+
 	switch (cmd) {
 	case HCISETRAW:
 		if (!capable(CAP_NET_ADMIN))
@@ -512,23 +550,29 @@
 		if (!capable(CAP_NET_ADMIN))
 			return -EPERM;
 		return hci_sock_blacklist_del(hdev, (void __user *) arg);
-
-	default:
-		if (hdev->ioctl)
-			return hdev->ioctl(hdev, cmd, arg);
-		return -EINVAL;
 	}
+
+	return -ENOIOCTLCMD;
 }
 
 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 			  unsigned long arg)
 {
-	struct sock *sk = sock->sk;
 	void __user *argp = (void __user *) arg;
+	struct sock *sk = sock->sk;
 	int err;
 
 	BT_DBG("cmd %x arg %lx", cmd, arg);
 
+	lock_sock(sk);
+
+	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+		err = -EBADFD;
+		goto done;
+	}
+
+	release_sock(sk);
+
 	switch (cmd) {
 	case HCIGETDEVLIST:
 		return hci_get_dev_list(argp);
@@ -573,13 +617,15 @@
 
 	case HCIINQUIRY:
 		return hci_inquiry(argp);
-
-	default:
-		lock_sock(sk);
-		err = hci_sock_bound_ioctl(sk, cmd, arg);
-		release_sock(sk);
-		return err;
 	}
+
+	lock_sock(sk);
+
+	err = hci_sock_bound_ioctl(sk, cmd, arg);
+
+done:
+	release_sock(sk);
+	return err;
 }
 
 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
@@ -629,6 +675,56 @@
 		hci_pi(sk)->hdev = hdev;
 		break;
 
+	case HCI_CHANNEL_USER:
+		if (hci_pi(sk)->hdev) {
+			err = -EALREADY;
+			goto done;
+		}
+
+		if (haddr.hci_dev == HCI_DEV_NONE) {
+			err = -EINVAL;
+			goto done;
+		}
+
+		if (!capable(CAP_NET_ADMIN)) {
+			err = -EPERM;
+			goto done;
+		}
+
+		hdev = hci_dev_get(haddr.hci_dev);
+		if (!hdev) {
+			err = -ENODEV;
+			goto done;
+		}
+
+		if (test_bit(HCI_UP, &hdev->flags) ||
+		    test_bit(HCI_INIT, &hdev->flags) ||
+		    test_bit(HCI_SETUP, &hdev->dev_flags)) {
+			err = -EBUSY;
+			hci_dev_put(hdev);
+			goto done;
+		}
+
+		if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+			err = -EUSERS;
+			hci_dev_put(hdev);
+			goto done;
+		}
+
+		mgmt_index_removed(hdev);
+
+		err = hci_dev_open(hdev->id);
+		if (err) {
+			clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+			hci_dev_put(hdev);
+			goto done;
+		}
+
+		atomic_inc(&hdev->promisc);
+
+		hci_pi(sk)->hdev = hdev;
+		break;
+
 	case HCI_CHANNEL_CONTROL:
 		if (haddr.hci_dev != HCI_DEV_NONE) {
 			err = -EINVAL;
@@ -677,22 +773,30 @@
 {
 	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
 	struct sock *sk = sock->sk;
-	struct hci_dev *hdev = hci_pi(sk)->hdev;
+	struct hci_dev *hdev;
+	int err = 0;
 
 	BT_DBG("sock %p sk %p", sock, sk);
 
-	if (!hdev)
-		return -EBADFD;
+	if (peer)
+		return -EOPNOTSUPP;
 
 	lock_sock(sk);
 
+	hdev = hci_pi(sk)->hdev;
+	if (!hdev) {
+		err = -EBADFD;
+		goto done;
+	}
+
 	*addr_len = sizeof(*haddr);
 	haddr->hci_family = AF_BLUETOOTH;
 	haddr->hci_dev    = hdev->id;
-	haddr->hci_channel= 0;
+	haddr->hci_channel= hci_pi(sk)->channel;
 
+done:
 	release_sock(sk);
-	return 0;
+	return err;
 }
 
 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
@@ -767,6 +871,7 @@
 	case HCI_CHANNEL_RAW:
 		hci_sock_cmsg(sk, msg, skb);
 		break;
+	case HCI_CHANNEL_USER:
 	case HCI_CHANNEL_CONTROL:
 	case HCI_CHANNEL_MONITOR:
 		sock_recv_timestamp(msg, sk, skb);
@@ -801,6 +906,7 @@
 
 	switch (hci_pi(sk)->channel) {
 	case HCI_CHANNEL_RAW:
+	case HCI_CHANNEL_USER:
 		break;
 	case HCI_CHANNEL_CONTROL:
 		err = mgmt_control(sk, msg, len);
@@ -835,9 +941,9 @@
 
 	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
 	skb_pull(skb, 1);
-	skb->dev = (void *) hdev;
 
-	if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+	if (hci_pi(sk)->channel == HCI_CHANNEL_RAW &&
+	    bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
 		u16 opcode = get_unaligned_le16(skb->data);
 		u16 ogf = hci_opcode_ogf(opcode);
 		u16 ocf = hci_opcode_ocf(opcode);
@@ -868,6 +974,14 @@
 			goto drop;
 		}
 
+		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
+		    bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+		    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+			err = -EINVAL;
+			goto drop;
+		}
+
 		skb_queue_tail(&hdev->raw_q, skb);
 		queue_work(hdev->workqueue, &hdev->tx_work);
 	}
@@ -895,7 +1009,7 @@
 	lock_sock(sk);
 
 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
-		err = -EINVAL;
+		err = -EBADFD;
 		goto done;
 	}
 
@@ -981,7 +1095,7 @@
 	lock_sock(sk);
 
 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
-		err = -EINVAL;
+		err = -EBADFD;
 		goto done;
 	}
 
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index edf623a..0b61250 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,17 +1,12 @@
 /* Bluetooth HCI driver model support. */
 
-#include <linux/debugfs.h>
 #include <linux/module.h>
-#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
 static struct class *bt_class;
 
-struct dentry *bt_debugfs;
-EXPORT_SYMBOL_GPL(bt_debugfs);
-
 static inline char *link_typetostr(int type)
 {
 	switch (type) {
@@ -42,29 +37,15 @@
 	return sprintf(buf, "%pMR\n", &conn->dst);
 }
 
-static ssize_t show_link_features(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct hci_conn *conn = to_hci_conn(dev);
-
-	return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-		       conn->features[0][0], conn->features[0][1],
-		       conn->features[0][2], conn->features[0][3],
-		       conn->features[0][4], conn->features[0][5],
-		       conn->features[0][6], conn->features[0][7]);
-}
-
 #define LINK_ATTR(_name, _mode, _show, _store) \
 struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
 
 static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
 static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
-static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);
 
 static struct attribute *bt_link_attrs[] = {
 	&link_attr_type.attr,
 	&link_attr_address.attr,
-	&link_attr_features.attr,
 	NULL
 };
 
@@ -150,28 +131,6 @@
 	hci_dev_put(hdev);
 }
 
-static inline char *host_bustostr(int bus)
-{
-	switch (bus) {
-	case HCI_VIRTUAL:
-		return "VIRTUAL";
-	case HCI_USB:
-		return "USB";
-	case HCI_PCCARD:
-		return "PCCARD";
-	case HCI_UART:
-		return "UART";
-	case HCI_RS232:
-		return "RS232";
-	case HCI_PCI:
-		return "PCI";
-	case HCI_SDIO:
-		return "SDIO";
-	default:
-		return "UNKNOWN";
-	}
-}
-
 static inline char *host_typetostr(int type)
 {
 	switch (type) {
@@ -184,13 +143,6 @@
 	}
 }
 
-static ssize_t show_bus(struct device *dev,
-			struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
-}
-
 static ssize_t show_type(struct device *dev,
 			 struct device_attribute *attr, char *buf)
 {
@@ -212,14 +164,6 @@
 	return sprintf(buf, "%s\n", name);
 }
 
-static ssize_t show_class(struct device *dev,
-			  struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
-		       hdev->dev_class[1], hdev->dev_class[0]);
-}
-
 static ssize_t show_address(struct device *dev,
 			    struct device_attribute *attr, char *buf)
 {
@@ -227,150 +171,14 @@
 	return sprintf(buf, "%pMR\n", &hdev->bdaddr);
 }
 
-static ssize_t show_features(struct device *dev,
-			     struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-
-	return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-		       hdev->features[0][0], hdev->features[0][1],
-		       hdev->features[0][2], hdev->features[0][3],
-		       hdev->features[0][4], hdev->features[0][5],
-		       hdev->features[0][6], hdev->features[0][7]);
-}
-
-static ssize_t show_manufacturer(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "%d\n", hdev->manufacturer);
-}
-
-static ssize_t show_hci_version(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "%d\n", hdev->hci_ver);
-}
-
-static ssize_t show_hci_revision(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "%d\n", hdev->hci_rev);
-}
-
-static ssize_t show_idle_timeout(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "%d\n", hdev->idle_timeout);
-}
-
-static ssize_t store_idle_timeout(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t count)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	unsigned int val;
-	int rv;
-
-	rv = kstrtouint(buf, 0, &val);
-	if (rv < 0)
-		return rv;
-
-	if (val != 0 && (val < 500 || val > 3600000))
-		return -EINVAL;
-
-	hdev->idle_timeout = val;
-
-	return count;
-}
-
-static ssize_t show_sniff_max_interval(struct device *dev,
-				       struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "%d\n", hdev->sniff_max_interval);
-}
-
-static ssize_t store_sniff_max_interval(struct device *dev,
-					struct device_attribute *attr,
-					const char *buf, size_t count)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	u16 val;
-	int rv;
-
-	rv = kstrtou16(buf, 0, &val);
-	if (rv < 0)
-		return rv;
-
-	if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
-		return -EINVAL;
-
-	hdev->sniff_max_interval = val;
-
-	return count;
-}
-
-static ssize_t show_sniff_min_interval(struct device *dev,
-				       struct device_attribute *attr, char *buf)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	return sprintf(buf, "%d\n", hdev->sniff_min_interval);
-}
-
-static ssize_t store_sniff_min_interval(struct device *dev,
-					struct device_attribute *attr,
-					const char *buf, size_t count)
-{
-	struct hci_dev *hdev = to_hci_dev(dev);
-	u16 val;
-	int rv;
-
-	rv = kstrtou16(buf, 0, &val);
-	if (rv < 0)
-		return rv;
-
-	if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
-		return -EINVAL;
-
-	hdev->sniff_min_interval = val;
-
-	return count;
-}
-
-static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
 static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
 static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
-static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
-static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
-static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
-static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
-
-static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
-		   show_idle_timeout, store_idle_timeout);
-static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
-		   show_sniff_max_interval, store_sniff_max_interval);
-static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
-		   show_sniff_min_interval, store_sniff_min_interval);
 
 static struct attribute *bt_host_attrs[] = {
-	&dev_attr_bus.attr,
 	&dev_attr_type.attr,
 	&dev_attr_name.attr,
-	&dev_attr_class.attr,
 	&dev_attr_address.attr,
-	&dev_attr_features.attr,
-	&dev_attr_manufacturer.attr,
-	&dev_attr_hci_version.attr,
-	&dev_attr_hci_revision.attr,
-	&dev_attr_idle_timeout.attr,
-	&dev_attr_sniff_max_interval.attr,
-	&dev_attr_sniff_min_interval.attr,
 	NULL
 };
 
@@ -396,141 +204,6 @@
 	.release = bt_host_release,
 };
 
-static int inquiry_cache_show(struct seq_file *f, void *p)
-{
-	struct hci_dev *hdev = f->private;
-	struct discovery_state *cache = &hdev->discovery;
-	struct inquiry_entry *e;
-
-	hci_dev_lock(hdev);
-
-	list_for_each_entry(e, &cache->all, all) {
-		struct inquiry_data *data = &e->data;
-		seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
-			   &data->bdaddr,
-			   data->pscan_rep_mode, data->pscan_period_mode,
-			   data->pscan_mode, data->dev_class[2],
-			   data->dev_class[1], data->dev_class[0],
-			   __le16_to_cpu(data->clock_offset),
-			   data->rssi, data->ssp_mode, e->timestamp);
-	}
-
-	hci_dev_unlock(hdev);
-
-	return 0;
-}
-
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
-	.open		= inquiry_cache_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static int blacklist_show(struct seq_file *f, void *p)
-{
-	struct hci_dev *hdev = f->private;
-	struct bdaddr_list *b;
-
-	hci_dev_lock(hdev);
-
-	list_for_each_entry(b, &hdev->blacklist, list)
-		seq_printf(f, "%pMR\n", &b->bdaddr);
-
-	hci_dev_unlock(hdev);
-
-	return 0;
-}
-
-static int blacklist_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
-	.open		= blacklist_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static void print_bt_uuid(struct seq_file *f, u8 *uuid)
-{
-	u32 data0, data5;
-	u16 data1, data2, data3, data4;
-
-	data5 = get_unaligned_le32(uuid);
-	data4 = get_unaligned_le16(uuid + 4);
-	data3 = get_unaligned_le16(uuid + 6);
-	data2 = get_unaligned_le16(uuid + 8);
-	data1 = get_unaligned_le16(uuid + 10);
-	data0 = get_unaligned_le32(uuid + 12);
-
-	seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
-		   data0, data1, data2, data3, data4, data5);
-}
-
-static int uuids_show(struct seq_file *f, void *p)
-{
-	struct hci_dev *hdev = f->private;
-	struct bt_uuid *uuid;
-
-	hci_dev_lock(hdev);
-
-	list_for_each_entry(uuid, &hdev->uuids, list)
-		print_bt_uuid(f, uuid->uuid);
-
-	hci_dev_unlock(hdev);
-
-	return 0;
-}
-
-static int uuids_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
-	.open		= uuids_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static int auto_accept_delay_set(void *data, u64 val)
-{
-	struct hci_dev *hdev = data;
-
-	hci_dev_lock(hdev);
-
-	hdev->auto_accept_delay = val;
-
-	hci_dev_unlock(hdev);
-
-	return 0;
-}
-
-static int auto_accept_delay_get(void *data, u64 *val)
-{
-	struct hci_dev *hdev = data;
-
-	hci_dev_lock(hdev);
-
-	*val = hdev->auto_accept_delay;
-
-	hci_dev_unlock(hdev);
-
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
-			auto_accept_delay_set, "%llu\n");
-
 void hci_init_sysfs(struct hci_dev *hdev)
 {
 	struct device *dev = &hdev->dev;
@@ -542,52 +215,8 @@
 	device_initialize(dev);
 }
 
-int hci_add_sysfs(struct hci_dev *hdev)
-{
-	struct device *dev = &hdev->dev;
-	int err;
-
-	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
-	dev_set_name(dev, "%s", hdev->name);
-
-	err = device_add(dev);
-	if (err < 0)
-		return err;
-
-	if (!bt_debugfs)
-		return 0;
-
-	hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
-	if (!hdev->debugfs)
-		return 0;
-
-	debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
-			    hdev, &inquiry_cache_fops);
-
-	debugfs_create_file("blacklist", 0444, hdev->debugfs,
-			    hdev, &blacklist_fops);
-
-	debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
-
-	debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
-			    &auto_accept_delay_fops);
-	return 0;
-}
-
-void hci_del_sysfs(struct hci_dev *hdev)
-{
-	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
-	debugfs_remove_recursive(hdev->debugfs);
-
-	device_del(&hdev->dev);
-}
-
 int __init bt_sysfs_init(void)
 {
-	bt_debugfs = debugfs_create_dir("bluetooth", NULL);
-
 	bt_class = class_create(THIS_MODULE, "bluetooth");
 
 	return PTR_ERR_OR_ZERO(bt_class);
@@ -596,6 +225,4 @@
 void bt_sysfs_cleanup(void)
 {
 	class_destroy(bt_class);
-
-	debugfs_remove_recursive(bt_debugfs);
 }
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index bdc35a7..292e619 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -767,10 +767,10 @@
 	strncpy(hid->name, req->name, sizeof(req->name) - 1);
 
 	snprintf(hid->phys, sizeof(hid->phys), "%pMR",
-		 &bt_sk(session->ctrl_sock->sk)->src);
+		 &l2cap_pi(session->ctrl_sock->sk)->chan->src);
 
 	snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
-		 &bt_sk(session->ctrl_sock->sk)->dst);
+		 &l2cap_pi(session->ctrl_sock->sk)->chan->dst);
 
 	hid->dev.parent = &session->conn->hcon->dev;
 	hid->ll_driver = &hidp_hid_driver;
@@ -1283,23 +1283,29 @@
 static int hidp_verify_sockets(struct socket *ctrl_sock,
 			       struct socket *intr_sock)
 {
+	struct l2cap_chan *ctrl_chan, *intr_chan;
 	struct bt_sock *ctrl, *intr;
 	struct hidp_session *session;
 
 	if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock))
 		return -EINVAL;
 
+	ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan;
+	intr_chan = l2cap_pi(intr_sock->sk)->chan;
+
+	if (bacmp(&ctrl_chan->src, &intr_chan->src) ||
+	    bacmp(&ctrl_chan->dst, &intr_chan->dst))
+		return -ENOTUNIQ;
+
 	ctrl = bt_sk(ctrl_sock->sk);
 	intr = bt_sk(intr_sock->sk);
 
-	if (bacmp(&ctrl->src, &intr->src) || bacmp(&ctrl->dst, &intr->dst))
-		return -ENOTUNIQ;
 	if (ctrl->sk.sk_state != BT_CONNECTED ||
 	    intr->sk.sk_state != BT_CONNECTED)
 		return -EBADFD;
 
 	/* early session check, we check again during session registration */
-	session = hidp_session_find(&ctrl->dst);
+	session = hidp_session_find(&ctrl_chan->dst);
 	if (session) {
 		hidp_session_put(session);
 		return -EEXIST;
@@ -1332,7 +1338,7 @@
 	if (!conn)
 		return -EBADFD;
 
-	ret = hidp_session_new(&session, &bt_sk(ctrl_sock->sk)->dst, ctrl_sock,
+	ret = hidp_session_new(&session, &chan->dst, ctrl_sock,
 			       intr_sock, req, conn);
 	if (ret)
 		goto out_conn;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 9e6cc35..ab52414 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -182,7 +182,7 @@
 };
 
 /* HIDP init defines */
-extern int __init hidp_init_sockets(void);
-extern void __exit hidp_cleanup_sockets(void);
+int __init hidp_init_sockets(void);
+void __exit hidp_cleanup_sockets(void);
 
 #endif /* __HIDP_H */
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 63fa111..0cef677 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -36,14 +36,15 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/smp.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "smp.h"
+#include "a2mp.h"
+#include "amp.h"
 
 bool disable_ertm;
 
-static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
+static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
 
 static LIST_HEAD(chan_list);
 static DEFINE_RWLOCK(chan_list_lock);
@@ -58,6 +59,18 @@
 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
 		     struct sk_buff_head *skbs, u8 event);
 
+static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
+{
+	if (hcon->type == LE_LINK) {
+		if (type == ADDR_LE_DEV_PUBLIC)
+			return BDADDR_LE_PUBLIC;
+		else
+			return BDADDR_LE_RANDOM;
+	}
+
+	return BDADDR_BREDR;
+}
+
 /* ---- L2CAP channels ---- */
 
 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
@@ -148,7 +161,7 @@
 	struct l2cap_chan *c;
 
 	list_for_each_entry(c, &chan_list, global_l) {
-		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
+		if (c->sport == psm && !bacmp(&c->src, src))
 			return c;
 	}
 	return NULL;
@@ -210,38 +223,25 @@
 	return 0;
 }
 
-static void __l2cap_state_change(struct l2cap_chan *chan, int state)
+static void l2cap_state_change(struct l2cap_chan *chan, int state)
 {
 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
 	       state_to_string(state));
 
 	chan->state = state;
-	chan->ops->state_change(chan, state);
+	chan->ops->state_change(chan, state, 0);
 }
 
-static void l2cap_state_change(struct l2cap_chan *chan, int state)
+static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
+						int state, int err)
 {
-	struct sock *sk = chan->sk;
-
-	lock_sock(sk);
-	__l2cap_state_change(chan, state);
-	release_sock(sk);
-}
-
-static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
-{
-	struct sock *sk = chan->sk;
-
-	sk->sk_err = err;
+	chan->state = state;
+	chan->ops->state_change(chan, chan->state, err);
 }
 
 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
 {
-	struct sock *sk = chan->sk;
-
-	lock_sock(sk);
-	__l2cap_chan_set_err(chan, err);
-	release_sock(sk);
+	chan->ops->state_change(chan, chan->state, err);
 }
 
 static void __set_retrans_timer(struct l2cap_chan *chan)
@@ -620,10 +620,8 @@
 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
 {
 	struct l2cap_conn *conn = chan->conn;
-	struct sock *sk = chan->sk;
 
-	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
-	       sk);
+	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
 
 	switch (chan->state) {
 	case BT_LISTEN:
@@ -634,7 +632,7 @@
 	case BT_CONFIG:
 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
 		    conn->hcon->type == ACL_LINK) {
-			__set_chan_timer(chan, sk->sk_sndtimeo);
+			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
 			l2cap_send_disconn_req(chan, reason);
 		} else
 			l2cap_chan_del(chan, reason);
@@ -646,10 +644,11 @@
 			struct l2cap_conn_rsp rsp;
 			__u16 result;
 
-			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
+			if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
 				result = L2CAP_CR_SEC_BLOCK;
 			else
 				result = L2CAP_CR_BAD_PSM;
+
 			l2cap_state_change(chan, BT_DISCONN);
 
 			rsp.scid   = cpu_to_le16(chan->dcid);
@@ -676,7 +675,8 @@
 
 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
 {
-	if (chan->chan_type == L2CAP_CHAN_RAW) {
+	switch (chan->chan_type) {
+	case L2CAP_CHAN_RAW:
 		switch (chan->sec_level) {
 		case BT_SECURITY_HIGH:
 			return HCI_AT_DEDICATED_BONDING_MITM;
@@ -685,15 +685,29 @@
 		default:
 			return HCI_AT_NO_BONDING;
 		}
-	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
-		if (chan->sec_level == BT_SECURITY_LOW)
-			chan->sec_level = BT_SECURITY_SDP;
-
+		break;
+	case L2CAP_CHAN_CONN_LESS:
+		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
+			if (chan->sec_level == BT_SECURITY_LOW)
+				chan->sec_level = BT_SECURITY_SDP;
+		}
 		if (chan->sec_level == BT_SECURITY_HIGH)
 			return HCI_AT_NO_BONDING_MITM;
 		else
 			return HCI_AT_NO_BONDING;
-	} else {
+		break;
+	case L2CAP_CHAN_CONN_ORIENTED:
+		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
+			if (chan->sec_level == BT_SECURITY_LOW)
+				chan->sec_level = BT_SECURITY_SDP;
+
+			if (chan->sec_level == BT_SECURITY_HIGH)
+				return HCI_AT_NO_BONDING_MITM;
+			else
+				return HCI_AT_NO_BONDING;
+		}
+		/* fall through */
+	default:
 		switch (chan->sec_level) {
 		case BT_SECURITY_HIGH:
 			return HCI_AT_GENERAL_BONDING_MITM;
@@ -702,6 +716,7 @@
 		default:
 			return HCI_AT_NO_BONDING;
 		}
+		break;
 	}
 }
 
@@ -1015,14 +1030,29 @@
 static bool __amp_capable(struct l2cap_chan *chan)
 {
 	struct l2cap_conn *conn = chan->conn;
+	struct hci_dev *hdev;
+	bool amp_available = false;
 
-	if (enable_hs &&
-	    hci_amp_capable() &&
-	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
-	    conn->fixed_chan_mask & L2CAP_FC_A2MP)
-		return true;
-	else
+	if (!conn->hs_enabled)
 		return false;
+
+	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
+		return false;
+
+	read_lock(&hci_dev_list_lock);
+	list_for_each_entry(hdev, &hci_dev_list, list) {
+		if (hdev->amp_type != AMP_TYPE_BREDR &&
+		    test_bit(HCI_UP, &hdev->flags)) {
+			amp_available = true;
+			break;
+		}
+	}
+	read_unlock(&hci_dev_list_lock);
+
+	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
+		return amp_available;
+
+	return false;
 }
 
 static bool l2cap_check_efs(struct l2cap_chan *chan)
@@ -1186,7 +1216,6 @@
 
 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
 {
-	struct sock *sk = chan->sk;
 	struct l2cap_conn *conn = chan->conn;
 	struct l2cap_disconn_req req;
 
@@ -1209,10 +1238,7 @@
 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
 		       sizeof(req), &req);
 
-	lock_sock(sk);
-	__l2cap_state_change(chan, BT_DISCONN);
-	__l2cap_chan_set_err(chan, err);
-	release_sock(sk);
+	l2cap_state_change_and_error(chan, BT_DISCONN, err);
 }
 
 /* ---- L2CAP connections ---- */
@@ -1225,8 +1251,6 @@
 	mutex_lock(&conn->chan_lock);
 
 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
-		struct sock *sk = chan->sk;
-
 		l2cap_chan_lock(chan);
 
 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -1258,19 +1282,16 @@
 			rsp.dcid = cpu_to_le16(chan->scid);
 
 			if (l2cap_chan_check_security(chan)) {
-				lock_sock(sk);
-				if (test_bit(BT_SK_DEFER_SETUP,
-					     &bt_sk(sk)->flags)) {
+				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
 					chan->ops->defer(chan);
 
 				} else {
-					__l2cap_state_change(chan, BT_CONFIG);
+					l2cap_state_change(chan, BT_CONFIG);
 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
 				}
-				release_sock(sk);
 			} else {
 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
@@ -1309,8 +1330,6 @@
 	read_lock(&chan_list_lock);
 
 	list_for_each_entry(c, &chan_list, global_l) {
-		struct sock *sk = c->sk;
-
 		if (state && c->state != state)
 			continue;
 
@@ -1319,16 +1338,16 @@
 			int src_any, dst_any;
 
 			/* Exact match. */
-			src_match = !bacmp(&bt_sk(sk)->src, src);
-			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+			src_match = !bacmp(&c->src, src);
+			dst_match = !bacmp(&c->dst, dst);
 			if (src_match && dst_match) {
 				read_unlock(&chan_list_lock);
 				return c;
 			}
 
 			/* Closest match */
-			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
-			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+			src_any = !bacmp(&c->src, BDADDR_ANY);
+			dst_any = !bacmp(&c->dst, BDADDR_ANY);
 			if ((src_match && dst_any) || (src_any && dst_match) ||
 			    (src_any && dst_any))
 				c1 = c;
@@ -1342,14 +1361,15 @@
 
 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
 {
-	struct sock *parent;
+	struct hci_conn *hcon = conn->hcon;
 	struct l2cap_chan *chan, *pchan;
+	u8 dst_type;
 
 	BT_DBG("");
 
 	/* Check if we have socket listening on cid */
 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
-					  conn->src, conn->dst);
+					  &hcon->src, &hcon->dst);
 	if (!pchan)
 		return;
 
@@ -1357,9 +1377,13 @@
 	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
 		return;
 
-	parent = pchan->sk;
+	dst_type = bdaddr_type(hcon, hcon->dst_type);
 
-	lock_sock(parent);
+	/* If device is blocked, do not create a channel for it */
+	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
+		return;
+
+	l2cap_chan_lock(pchan);
 
 	chan = pchan->ops->new_connection(pchan);
 	if (!chan)
@@ -1367,13 +1391,15 @@
 
 	chan->dcid = L2CAP_CID_ATT;
 
-	bacpy(&bt_sk(chan->sk)->src, conn->src);
-	bacpy(&bt_sk(chan->sk)->dst, conn->dst);
+	bacpy(&chan->src, &hcon->src);
+	bacpy(&chan->dst, &hcon->dst);
+	chan->src_type = bdaddr_type(hcon, hcon->src_type);
+	chan->dst_type = dst_type;
 
 	__l2cap_chan_add(conn, chan);
 
 clean:
-	release_sock(parent);
+	l2cap_chan_unlock(pchan);
 }
 
 static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -1408,12 +1434,7 @@
 				l2cap_chan_ready(chan);
 
 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
-			struct sock *sk = chan->sk;
-			__clear_chan_timer(chan);
-			lock_sock(sk);
-			__l2cap_state_change(chan, BT_CONNECTED);
-			sk->sk_state_change(sk);
-			release_sock(sk);
+			l2cap_chan_ready(chan);
 
 		} else if (chan->state == BT_CONNECT) {
 			l2cap_do_start(chan);
@@ -1633,11 +1654,12 @@
 		break;
 	}
 
-	conn->src = &hcon->hdev->bdaddr;
-	conn->dst = &hcon->dst;
-
 	conn->feat_mask = 0;
 
+	if (hcon->type == ACL_LINK)
+		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
+					    &hcon->hdev->dev_flags);
+
 	spin_lock_init(&conn->lock);
 	mutex_init(&conn->chan_lock);
 
@@ -1688,8 +1710,6 @@
 	read_lock(&chan_list_lock);
 
 	list_for_each_entry(c, &chan_list, global_l) {
-		struct sock *sk = c->sk;
-
 		if (state && c->state != state)
 			continue;
 
@@ -1698,16 +1718,16 @@
 			int src_any, dst_any;
 
 			/* Exact match. */
-			src_match = !bacmp(&bt_sk(sk)->src, src);
-			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+			src_match = !bacmp(&c->src, src);
+			dst_match = !bacmp(&c->dst, dst);
 			if (src_match && dst_match) {
 				read_unlock(&chan_list_lock);
 				return c;
 			}
 
 			/* Closest match */
-			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
-			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+			src_any = !bacmp(&c->src, BDADDR_ANY);
+			dst_any = !bacmp(&c->dst, BDADDR_ANY);
 			if ((src_match && dst_any) || (src_any && dst_match) ||
 			    (src_any && dst_any))
 				c1 = c;
@@ -1722,18 +1742,16 @@
 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
 		       bdaddr_t *dst, u8 dst_type)
 {
-	struct sock *sk = chan->sk;
-	bdaddr_t *src = &bt_sk(sk)->src;
 	struct l2cap_conn *conn;
 	struct hci_conn *hcon;
 	struct hci_dev *hdev;
 	__u8 auth_type;
 	int err;
 
-	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
+	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
 	       dst_type, __le16_to_cpu(psm));
 
-	hdev = hci_get_route(dst, src);
+	hdev = hci_get_route(dst, &chan->src);
 	if (!hdev)
 		return -EHOSTUNREACH;
 
@@ -1790,9 +1808,8 @@
 	}
 
 	/* Set destination address and psm */
-	lock_sock(sk);
-	bacpy(&bt_sk(sk)->dst, dst);
-	release_sock(sk);
+	bacpy(&chan->dst, dst);
+	chan->dst_type = dst_type;
 
 	chan->psm = psm;
 	chan->dcid = cid;
@@ -1825,7 +1842,8 @@
 	}
 
 	/* Update source addr of the socket */
-	bacpy(src, conn->src);
+	bacpy(&chan->src, &hcon->src);
+	chan->src_type = bdaddr_type(hcon, hcon->src_type);
 
 	l2cap_chan_unlock(chan);
 	l2cap_chan_add(conn, chan);
@@ -1835,7 +1853,7 @@
 	hci_conn_drop(hcon);
 
 	l2cap_state_change(chan, BT_CONNECT);
-	__set_chan_timer(chan, sk->sk_sndtimeo);
+	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
 
 	if (hcon->state == BT_CONNECTED) {
 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -1855,38 +1873,6 @@
 	return err;
 }
 
-int __l2cap_wait_ack(struct sock *sk)
-{
-	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-	DECLARE_WAITQUEUE(wait, current);
-	int err = 0;
-	int timeo = HZ/5;
-
-	add_wait_queue(sk_sleep(sk), &wait);
-	set_current_state(TASK_INTERRUPTIBLE);
-	while (chan->unacked_frames > 0 && chan->conn) {
-		if (!timeo)
-			timeo = HZ/5;
-
-		if (signal_pending(current)) {
-			err = sock_intr_errno(timeo);
-			break;
-		}
-
-		release_sock(sk);
-		timeo = schedule_timeout(timeo);
-		lock_sock(sk);
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		err = sock_error(sk);
-		if (err)
-			break;
-	}
-	set_current_state(TASK_RUNNING);
-	remove_wait_queue(sk_sleep(sk), &wait);
-	return err;
-}
-
 static void l2cap_monitor_timeout(struct work_struct *work)
 {
 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -2263,7 +2249,8 @@
 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
 	struct l2cap_hdr *lh;
 
-	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
+	BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
+	       __le16_to_cpu(chan->psm), len, priority);
 
 	count = min_t(unsigned int, (conn->mtu - hlen), len);
 
@@ -2278,7 +2265,7 @@
 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
 	lh->cid = cpu_to_le16(chan->dcid);
 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
-	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
+	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
 
 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
 	if (unlikely(err < 0)) {
@@ -2826,17 +2813,16 @@
 	mutex_lock(&conn->chan_lock);
 
 	list_for_each_entry(chan, &conn->chan_l, list) {
-		struct sock *sk = chan->sk;
 		if (chan->chan_type != L2CAP_CHAN_RAW)
 			continue;
 
-		/* Don't send frame to the socket it came from */
-		if (skb->sk == sk)
+		/* Don't send frame to the channel it came from */
+		if (bt_cb(skb)->chan == chan)
 			continue;
+
 		nskb = skb_clone(skb, GFP_KERNEL);
 		if (!nskb)
 			continue;
-
 		if (chan->ops->recv(chan, nskb))
 			kfree_skb(nskb);
 	}
@@ -3043,8 +3029,8 @@
 
 	skb_queue_head_init(&chan->tx_q);
 
-	chan->local_amp_id = 0;
-	chan->move_id = 0;
+	chan->local_amp_id = AMP_ID_BREDR;
+	chan->move_id = AMP_ID_BREDR;
 	chan->move_state = L2CAP_MOVE_STABLE;
 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
 
@@ -3084,20 +3070,20 @@
 	}
 }
 
-static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
 {
-	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
 }
 
-static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
+static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
 {
-	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
 }
 
 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
 				      struct l2cap_conf_rfc *rfc)
 {
-	if (chan->local_amp_id && chan->hs_hcon) {
+	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
 
 		/* Class 1 devices have must have ERTM timeouts
@@ -3135,7 +3121,7 @@
 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
 {
 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
-	    __l2cap_ews_supported(chan)) {
+	    __l2cap_ews_supported(chan->conn)) {
 		/* use extended control field */
 		set_bit(FLAG_EXT_CTRL, &chan->flags);
 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
@@ -3165,7 +3151,7 @@
 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
 			break;
 
-		if (__l2cap_efs_supported(chan))
+		if (__l2cap_efs_supported(chan->conn))
 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
 
 		/* fall through */
@@ -3317,7 +3303,7 @@
 			break;
 
 		case L2CAP_CONF_EWS:
-			if (!enable_hs)
+			if (!chan->conn->hs_enabled)
 				return -ECONNREFUSED;
 
 			set_bit(FLAG_EXT_CTRL, &chan->flags);
@@ -3349,7 +3335,7 @@
 		}
 
 		if (remote_efs) {
-			if (__l2cap_efs_supported(chan))
+			if (__l2cap_efs_supported(chan->conn))
 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
 			else
 				return -ECONNREFUSED;
@@ -3715,7 +3701,6 @@
 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
 	struct l2cap_conn_rsp rsp;
 	struct l2cap_chan *chan = NULL, *pchan;
-	struct sock *parent, *sk = NULL;
 	int result, status = L2CAP_CS_NO_INFO;
 
 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
@@ -3724,16 +3709,15 @@
 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
 
 	/* Check if we have socket listening on psm */
-	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
+	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
+					 &conn->hcon->dst);
 	if (!pchan) {
 		result = L2CAP_CR_BAD_PSM;
 		goto sendresp;
 	}
 
-	parent = pchan->sk;
-
 	mutex_lock(&conn->chan_lock);
-	lock_sock(parent);
+	l2cap_chan_lock(pchan);
 
 	/* Check if the ACL is secure enough (if not SDP) */
 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
@@ -3753,8 +3737,6 @@
 	if (!chan)
 		goto response;
 
-	sk = chan->sk;
-
 	/* For certain devices (ex: HID mouse), support for authentication,
 	 * pairing and bonding is optional. For such devices, inorder to avoid
 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
@@ -3762,8 +3744,10 @@
 	 */
 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
 
-	bacpy(&bt_sk(sk)->src, conn->src);
-	bacpy(&bt_sk(sk)->dst, conn->dst);
+	bacpy(&chan->src, &conn->hcon->src);
+	bacpy(&chan->dst, &conn->hcon->dst);
+	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
 	chan->psm  = psm;
 	chan->dcid = scid;
 	chan->local_amp_id = amp_id;
@@ -3772,14 +3756,14 @@
 
 	dcid = chan->scid;
 
-	__set_chan_timer(chan, sk->sk_sndtimeo);
+	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
 
 	chan->ident = cmd->ident;
 
 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
 		if (l2cap_chan_check_security(chan)) {
-			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
-				__l2cap_state_change(chan, BT_CONNECT2);
+			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+				l2cap_state_change(chan, BT_CONNECT2);
 				result = L2CAP_CR_PEND;
 				status = L2CAP_CS_AUTHOR_PEND;
 				chan->ops->defer(chan);
@@ -3788,28 +3772,28 @@
 				 * The connection will succeed after the
 				 * physical link is up.
 				 */
-				if (amp_id) {
-					__l2cap_state_change(chan, BT_CONNECT2);
-					result = L2CAP_CR_PEND;
-				} else {
-					__l2cap_state_change(chan, BT_CONFIG);
+				if (amp_id == AMP_ID_BREDR) {
+					l2cap_state_change(chan, BT_CONFIG);
 					result = L2CAP_CR_SUCCESS;
+				} else {
+					l2cap_state_change(chan, BT_CONNECT2);
+					result = L2CAP_CR_PEND;
 				}
 				status = L2CAP_CS_NO_INFO;
 			}
 		} else {
-			__l2cap_state_change(chan, BT_CONNECT2);
+			l2cap_state_change(chan, BT_CONNECT2);
 			result = L2CAP_CR_PEND;
 			status = L2CAP_CS_AUTHEN_PEND;
 		}
 	} else {
-		__l2cap_state_change(chan, BT_CONNECT2);
+		l2cap_state_change(chan, BT_CONNECT2);
 		result = L2CAP_CR_PEND;
 		status = L2CAP_CS_NO_INFO;
 	}
 
 response:
-	release_sock(parent);
+	l2cap_chan_unlock(pchan);
 	mutex_unlock(&conn->chan_lock);
 
 sendresp:
@@ -3891,13 +3875,13 @@
 	if (scid) {
 		chan = __l2cap_get_chan_by_scid(conn, scid);
 		if (!chan) {
-			err = -EFAULT;
+			err = -EBADSLT;
 			goto unlock;
 		}
 	} else {
 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
 		if (!chan) {
-			err = -EFAULT;
+			err = -EBADSLT;
 			goto unlock;
 		}
 	}
@@ -3965,6 +3949,18 @@
 					    L2CAP_CONF_SUCCESS, flags), data);
 }
 
+static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
+				   u16 scid, u16 dcid)
+{
+	struct l2cap_cmd_rej_cid rej;
+
+	rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
+	rej.scid = __cpu_to_le16(scid);
+	rej.dcid = __cpu_to_le16(dcid);
+
+	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+}
+
 static inline int l2cap_config_req(struct l2cap_conn *conn,
 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
 				   u8 *data)
@@ -3984,18 +3980,14 @@
 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
 
 	chan = l2cap_get_chan_by_scid(conn, dcid);
-	if (!chan)
-		return -ENOENT;
+	if (!chan) {
+		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
+		return 0;
+	}
 
 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
-		struct l2cap_cmd_rej_cid rej;
-
-		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
-		rej.scid = cpu_to_le16(chan->scid);
-		rej.dcid = cpu_to_le16(chan->dcid);
-
-		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
-			       sizeof(rej), &rej);
+		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+				       chan->dcid);
 		goto unlock;
 	}
 
@@ -4198,7 +4190,6 @@
 	struct l2cap_disconn_rsp rsp;
 	u16 dcid, scid;
 	struct l2cap_chan *chan;
-	struct sock *sk;
 
 	if (cmd_len != sizeof(*req))
 		return -EPROTO;
@@ -4213,20 +4204,17 @@
 	chan = __l2cap_get_chan_by_scid(conn, dcid);
 	if (!chan) {
 		mutex_unlock(&conn->chan_lock);
+		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
 		return 0;
 	}
 
 	l2cap_chan_lock(chan);
 
-	sk = chan->sk;
-
 	rsp.dcid = cpu_to_le16(chan->scid);
 	rsp.scid = cpu_to_le16(chan->dcid);
 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
 
-	lock_sock(sk);
-	sk->sk_shutdown = SHUTDOWN_MASK;
-	release_sock(sk);
+	chan->ops->set_shutdown(chan);
 
 	l2cap_chan_hold(chan);
 	l2cap_chan_del(chan, ECONNRESET);
@@ -4303,7 +4291,7 @@
 		if (!disable_ertm)
 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
 				| L2CAP_FEAT_FCS;
-		if (enable_hs)
+		if (conn->hs_enabled)
 			feat_mask |= L2CAP_FEAT_EXT_FLOW
 				| L2CAP_FEAT_EXT_WINDOW;
 
@@ -4314,7 +4302,7 @@
 		u8 buf[12];
 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
 
-		if (enable_hs)
+		if (conn->hs_enabled)
 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
 		else
 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
@@ -4411,7 +4399,7 @@
 	if (cmd_len != sizeof(*req))
 		return -EPROTO;
 
-	if (!enable_hs)
+	if (!conn->hs_enabled)
 		return -EINVAL;
 
 	psm = le16_to_cpu(req->psm);
@@ -4420,7 +4408,7 @@
 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
 
 	/* For controller id 0 make BR/EDR connection */
-	if (req->amp_id == HCI_BREDR_ID) {
+	if (req->amp_id == AMP_ID_BREDR) {
 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
 			      req->amp_id);
 		return 0;
@@ -4442,10 +4430,13 @@
 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
 		struct hci_conn *hs_hcon;
 
-		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
+		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+						  &conn->hcon->dst);
 		if (!hs_hcon) {
 			hci_dev_put(hdev);
-			return -EFAULT;
+			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+					       chan->dcid);
+			return 0;
 		}
 
 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
@@ -4469,7 +4460,7 @@
 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
 		       sizeof(rsp), &rsp);
 
-	return -EFAULT;
+	return 0;
 }
 
 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
@@ -4655,7 +4646,7 @@
 
 	if (chan->state != BT_CONNECTED) {
 		/* Ignore logical link if channel is on BR/EDR */
-		if (chan->local_amp_id)
+		if (chan->local_amp_id != AMP_ID_BREDR)
 			l2cap_logical_finish_create(chan, hchan);
 	} else {
 		l2cap_logical_finish_move(chan, hchan);
@@ -4666,7 +4657,7 @@
 {
 	BT_DBG("chan %p", chan);
 
-	if (chan->local_amp_id == HCI_BREDR_ID) {
+	if (chan->local_amp_id == AMP_ID_BREDR) {
 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
 			return;
 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
@@ -4723,7 +4714,7 @@
 			       sizeof(rsp), &rsp);
 
 		if (result == L2CAP_CR_SUCCESS) {
-			__l2cap_state_change(chan, BT_CONFIG);
+			l2cap_state_change(chan, BT_CONFIG);
 			set_bit(CONF_REQ_SENT, &chan->conf_state);
 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
 				       L2CAP_CONF_REQ,
@@ -4838,7 +4829,7 @@
 
 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
 
-	if (!enable_hs)
+	if (!conn->hs_enabled)
 		return -EINVAL;
 
 	chan = l2cap_get_chan_by_dcid(conn, icid);
@@ -4865,7 +4856,7 @@
 		goto send_move_response;
 	}
 
-	if (req->dest_amp_id) {
+	if (req->dest_amp_id != AMP_ID_BREDR) {
 		struct hci_dev *hdev;
 		hdev = hci_dev_get(req->dest_amp_id);
 		if (!hdev || hdev->dev_type != HCI_AMP ||
@@ -4885,7 +4876,7 @@
 	 */
 	if ((__chan_is_moving(chan) ||
 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
-	    bacmp(conn->src, conn->dst) > 0) {
+	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
 		result = L2CAP_MR_COLLISION;
 		goto send_move_response;
 	}
@@ -4895,7 +4886,7 @@
 	chan->move_id = req->dest_amp_id;
 	icid = chan->dcid;
 
-	if (!req->dest_amp_id) {
+	if (req->dest_amp_id == AMP_ID_BREDR) {
 		/* Moving to BR/EDR */
 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
@@ -5087,7 +5078,7 @@
 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
 		if (result == L2CAP_MC_CONFIRMED) {
 			chan->local_amp_id = chan->move_id;
-			if (!chan->local_amp_id)
+			if (chan->local_amp_id == AMP_ID_BREDR)
 				__release_logical_link(chan);
 		} else {
 			chan->move_id = chan->local_amp_id;
@@ -5127,7 +5118,7 @@
 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
 		chan->local_amp_id = chan->move_id;
 
-		if (!chan->local_amp_id && chan->hs_hchan)
+		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
 			__release_logical_link(chan);
 
 		l2cap_move_done(chan);
@@ -5219,7 +5210,7 @@
 
 	case L2CAP_CONN_RSP:
 	case L2CAP_CREATE_CHAN_RSP:
-		err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
+		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
 		break;
 
 	case L2CAP_CONF_REQ:
@@ -5227,7 +5218,7 @@
 		break;
 
 	case L2CAP_CONF_RSP:
-		err = l2cap_config_rsp(conn, cmd, cmd_len, data);
+		l2cap_config_rsp(conn, cmd, cmd_len, data);
 		break;
 
 	case L2CAP_DISCONN_REQ:
@@ -5235,7 +5226,7 @@
 		break;
 
 	case L2CAP_DISCONN_RSP:
-		err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
+		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
 		break;
 
 	case L2CAP_ECHO_REQ:
@@ -5250,7 +5241,7 @@
 		break;
 
 	case L2CAP_INFO_RSP:
-		err = l2cap_information_rsp(conn, cmd, cmd_len, data);
+		l2cap_information_rsp(conn, cmd, cmd_len, data);
 		break;
 
 	case L2CAP_CREATE_CHAN_REQ:
@@ -5262,7 +5253,7 @@
 		break;
 
 	case L2CAP_MOVE_CHAN_RSP:
-		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
 		break;
 
 	case L2CAP_MOVE_CHAN_CFM:
@@ -5270,7 +5261,7 @@
 		break;
 
 	case L2CAP_MOVE_CHAN_CFM_RSP:
-		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
 		break;
 
 	default:
@@ -5304,51 +5295,48 @@
 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
 					struct sk_buff *skb)
 {
-	u8 *data = skb->data;
-	int len = skb->len;
-	struct l2cap_cmd_hdr cmd;
+	struct hci_conn *hcon = conn->hcon;
+	struct l2cap_cmd_hdr *cmd;
+	u16 len;
 	int err;
 
-	l2cap_raw_recv(conn, skb);
+	if (hcon->type != LE_LINK)
+		goto drop;
 
-	while (len >= L2CAP_CMD_HDR_SIZE) {
-		u16 cmd_len;
-		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
-		data += L2CAP_CMD_HDR_SIZE;
-		len  -= L2CAP_CMD_HDR_SIZE;
+	if (skb->len < L2CAP_CMD_HDR_SIZE)
+		goto drop;
 
-		cmd_len = le16_to_cpu(cmd.len);
+	cmd = (void *) skb->data;
+	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
 
-		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
-		       cmd.ident);
+	len = le16_to_cpu(cmd->len);
 
-		if (cmd_len > len || !cmd.ident) {
-			BT_DBG("corrupted command");
-			break;
-		}
+	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
 
-		err = l2cap_le_sig_cmd(conn, &cmd, data);
-		if (err) {
-			struct l2cap_cmd_rej_unk rej;
-
-			BT_ERR("Wrong link type (%d)", err);
-
-			/* FIXME: Map err to a valid reason */
-			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
-			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
-				       sizeof(rej), &rej);
-		}
-
-		data += cmd_len;
-		len  -= cmd_len;
+	if (len != skb->len || !cmd->ident) {
+		BT_DBG("corrupted command");
+		goto drop;
 	}
 
+	err = l2cap_le_sig_cmd(conn, cmd, skb->data);
+	if (err) {
+		struct l2cap_cmd_rej_unk rej;
+
+		BT_ERR("Wrong link type (%d)", err);
+
+		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+			       sizeof(rej), &rej);
+	}
+
+drop:
 	kfree_skb(skb);
 }
 
 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
 				     struct sk_buff *skb)
 {
+	struct hci_conn *hcon = conn->hcon;
 	u8 *data = skb->data;
 	int len = skb->len;
 	struct l2cap_cmd_hdr cmd;
@@ -5356,6 +5344,9 @@
 
 	l2cap_raw_recv(conn, skb);
 
+	if (hcon->type != ACL_LINK)
+		goto drop;
+
 	while (len >= L2CAP_CMD_HDR_SIZE) {
 		u16 cmd_len;
 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
@@ -5378,7 +5369,6 @@
 
 			BT_ERR("Wrong link type (%d)", err);
 
-			/* FIXME: Map err to a valid reason */
 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
 				       sizeof(rej), &rej);
@@ -5388,6 +5378,7 @@
 		len  -= cmd_len;
 	}
 
+drop:
 	kfree_skb(skb);
 }
 
@@ -5784,7 +5775,7 @@
 			       struct sk_buff *skb, u8 event)
 {
 	int err = 0;
-	bool skb_in_use = 0;
+	bool skb_in_use = false;
 
 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
 	       event);
@@ -5805,7 +5796,7 @@
 							   control->txseq);
 
 			chan->buffer_seq = chan->expected_tx_seq;
-			skb_in_use = 1;
+			skb_in_use = true;
 
 			err = l2cap_reassemble_sdu(chan, skb, control);
 			if (err)
@@ -5841,7 +5832,7 @@
 			 * current frame is stored for later use.
 			 */
 			skb_queue_tail(&chan->srej_q, skb);
-			skb_in_use = 1;
+			skb_in_use = true;
 			BT_DBG("Queued %p (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
@@ -5919,7 +5910,7 @@
 {
 	int err = 0;
 	u16 txseq = control->txseq;
-	bool skb_in_use = 0;
+	bool skb_in_use = false;
 
 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
 	       event);
@@ -5931,7 +5922,7 @@
 			/* Keep frame for reassembly later */
 			l2cap_pass_to_tx(chan, control);
 			skb_queue_tail(&chan->srej_q, skb);
-			skb_in_use = 1;
+			skb_in_use = true;
 			BT_DBG("Queued %p (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
@@ -5942,7 +5933,7 @@
 
 			l2cap_pass_to_tx(chan, control);
 			skb_queue_tail(&chan->srej_q, skb);
-			skb_in_use = 1;
+			skb_in_use = true;
 			BT_DBG("Queued %p (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
@@ -5957,7 +5948,7 @@
 			 * the missing frames.
 			 */
 			skb_queue_tail(&chan->srej_q, skb);
-			skb_in_use = 1;
+			skb_in_use = true;
 			BT_DBG("Queued %p (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
@@ -5971,7 +5962,7 @@
 			 * SREJ'd frames.
 			 */
 			skb_queue_tail(&chan->srej_q, skb);
-			skb_in_use = 1;
+			skb_in_use = true;
 			BT_DBG("Queued %p (queue len %d)", skb,
 			       skb_queue_len(&chan->srej_q));
 
@@ -6380,9 +6371,13 @@
 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
 				  struct sk_buff *skb)
 {
+	struct hci_conn *hcon = conn->hcon;
 	struct l2cap_chan *chan;
 
-	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
+	if (hcon->type != ACL_LINK)
+		goto drop;
+
+	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst);
 	if (!chan)
 		goto drop;
 
@@ -6394,6 +6389,10 @@
 	if (chan->imtu < skb->len)
 		goto drop;
 
+	/* Store remote BD_ADDR and PSM for msg_name */
+	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
+	bt_cb(skb)->psm = psm;
+
 	if (!chan->ops->recv(chan, skb))
 		return;
 
@@ -6404,15 +6403,22 @@
 static void l2cap_att_channel(struct l2cap_conn *conn,
 			      struct sk_buff *skb)
 {
+	struct hci_conn *hcon = conn->hcon;
 	struct l2cap_chan *chan;
 
+	if (hcon->type != LE_LINK)
+		goto drop;
+
 	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
-					 conn->src, conn->dst);
+					 &hcon->src, &hcon->dst);
 	if (!chan)
 		goto drop;
 
 	BT_DBG("chan %p, len %d", chan, skb->len);
 
+	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
+		goto drop;
+
 	if (chan->imtu < skb->len)
 		goto drop;
 
@@ -6441,9 +6447,6 @@
 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
 
 	switch (cid) {
-	case L2CAP_CID_LE_SIGNALING:
-		l2cap_le_sig_channel(conn, skb);
-		break;
 	case L2CAP_CID_SIGNALING:
 		l2cap_sig_channel(conn, skb);
 		break;
@@ -6458,6 +6461,10 @@
 		l2cap_att_channel(conn, skb);
 		break;
 
+	case L2CAP_CID_LE_SIGNALING:
+		l2cap_le_sig_channel(conn, skb);
+		break;
+
 	case L2CAP_CID_SMP:
 		if (smp_sig_channel(conn, skb))
 			l2cap_conn_del(conn->hcon, EACCES);
@@ -6481,17 +6488,15 @@
 	/* Find listening sockets and check their link_mode */
 	read_lock(&chan_list_lock);
 	list_for_each_entry(c, &chan_list, global_l) {
-		struct sock *sk = c->sk;
-
 		if (c->state != BT_LISTEN)
 			continue;
 
-		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
+		if (!bacmp(&c->src, &hdev->bdaddr)) {
 			lm1 |= HCI_LM_ACCEPT;
 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
 				lm1 |= HCI_LM_MASTER;
 			exact++;
-		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+		} else if (!bacmp(&c->src, BDADDR_ANY)) {
 			lm2 |= HCI_LM_ACCEPT;
 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
 				lm2 |= HCI_LM_MASTER;
@@ -6597,11 +6602,7 @@
 
 		if (!status && (chan->state == BT_CONNECTED ||
 				chan->state == BT_CONFIG)) {
-			struct sock *sk = chan->sk;
-
-			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
-			sk->sk_state_change(sk);
-
+			chan->ops->resume(chan);
 			l2cap_check_encryption(chan, encrypt);
 			l2cap_chan_unlock(chan);
 			continue;
@@ -6614,32 +6615,26 @@
 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
 			}
 		} else if (chan->state == BT_CONNECT2) {
-			struct sock *sk = chan->sk;
 			struct l2cap_conn_rsp rsp;
 			__u16 res, stat;
 
-			lock_sock(sk);
-
 			if (!status) {
-				if (test_bit(BT_SK_DEFER_SETUP,
-					     &bt_sk(sk)->flags)) {
+				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
 					res = L2CAP_CR_PEND;
 					stat = L2CAP_CS_AUTHOR_PEND;
 					chan->ops->defer(chan);
 				} else {
-					__l2cap_state_change(chan, BT_CONFIG);
+					l2cap_state_change(chan, BT_CONFIG);
 					res = L2CAP_CR_SUCCESS;
 					stat = L2CAP_CS_NO_INFO;
 				}
 			} else {
-				__l2cap_state_change(chan, BT_DISCONN);
+				l2cap_state_change(chan, BT_DISCONN);
 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
 				res = L2CAP_CR_SEC_BLOCK;
 				stat = L2CAP_CS_NO_INFO;
 			}
 
-			release_sock(sk);
-
 			rsp.scid   = cpu_to_le16(chan->dcid);
 			rsp.dcid   = cpu_to_le16(chan->scid);
 			rsp.result = cpu_to_le16(res);
@@ -6756,9 +6751,13 @@
 		conn->rx_len -= skb->len;
 
 		if (!conn->rx_len) {
-			/* Complete frame received */
-			l2cap_recv_frame(conn, conn->rx_skb);
+			/* Complete frame received. l2cap_recv_frame
+			 * takes ownership of the skb so set the global
+			 * rx_skb pointer to NULL first.
+			 */
+			struct sk_buff *rx_skb = conn->rx_skb;
 			conn->rx_skb = NULL;
+			l2cap_recv_frame(conn, rx_skb);
 		}
 		break;
 	}
@@ -6775,10 +6774,8 @@
 	read_lock(&chan_list_lock);
 
 	list_for_each_entry(c, &chan_list, global_l) {
-		struct sock *sk = c->sk;
-
 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
-			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
+			   &c->src, &c->dst,
 			   c->state, __le16_to_cpu(c->psm),
 			   c->scid, c->dcid, c->imtu, c->omtu,
 			   c->sec_level, c->mode);
@@ -6811,12 +6808,11 @@
 	if (err < 0)
 		return err;
 
-	if (bt_debugfs) {
-		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
-						    NULL, &l2cap_debugfs_fops);
-		if (!l2cap_debugfs)
-			BT_ERR("Failed to create L2CAP debug file");
-	}
+	if (IS_ERR_OR_NULL(bt_debugfs))
+		return 0;
+
+	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
+					    NULL, &l2cap_debugfs_fops);
 
 	return 0;
 }
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 0098af8..7cc24d2 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -32,7 +32,8 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
 
 static struct bt_sock_list l2cap_sk_list = {
 	.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
@@ -68,6 +69,18 @@
 	if (la.l2_cid && la.l2_psm)
 		return -EINVAL;
 
+	if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
+		return -EINVAL;
+
+	if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
+		/* Connection oriented channels are not supported on LE */
+		if (la.l2_psm)
+			return -EINVAL;
+		/* We only allow ATT user space socket */
+		if (la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+			return -EINVAL;
+	}
+
 	lock_sock(sk);
 
 	if (sk->sk_state != BT_OPEN) {
@@ -99,11 +112,20 @@
 	if (err < 0)
 		goto done;
 
-	if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
-	    __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
-		chan->sec_level = BT_SECURITY_SDP;
+	switch (chan->chan_type) {
+	case L2CAP_CHAN_CONN_LESS:
+		if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_3DSP)
+			chan->sec_level = BT_SECURITY_SDP;
+		break;
+	case L2CAP_CHAN_CONN_ORIENTED:
+		if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
+		    __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
+			chan->sec_level = BT_SECURITY_SDP;
+		break;
+	}
 
-	bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+	bacpy(&chan->src, &la.l2_bdaddr);
+	chan->src_type = la.l2_bdaddr_type;
 
 	chan->state = BT_BOUND;
 	sk->sk_state = BT_BOUND;
@@ -134,6 +156,47 @@
 	if (la.l2_cid && la.l2_psm)
 		return -EINVAL;
 
+	if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
+		return -EINVAL;
+
+	/* Check that the socket wasn't bound to something that
+	 * conflicts with the address given to connect(). If chan->src
+	 * is BDADDR_ANY it means bind() was never used, in which case
+	 * chan->src_type and la.l2_bdaddr_type do not need to match.
+	 */
+	if (chan->src_type == BDADDR_BREDR && bacmp(&chan->src, BDADDR_ANY) &&
+	    bdaddr_type_is_le(la.l2_bdaddr_type)) {
+		/* Old user space versions will try to incorrectly bind
+		 * the ATT socket using BDADDR_BREDR. We need to accept
+		 * this and fix up the source address type only when
+		 * both the source CID and destination CID indicate
+		 * ATT. Anything else is an invalid combination.
+		 */
+		if (chan->scid != L2CAP_CID_ATT ||
+		    la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+			return -EINVAL;
+
+		/* We don't have the hdev available here to make a
+		 * better decision on random vs public, but since all
+		 * user space versions that exhibit this issue anyway do
+		 * not support random local addresses assuming public
+		 * here is good enough.
+		 */
+		chan->src_type = BDADDR_LE_PUBLIC;
+	}
+
+	if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR)
+		return -EINVAL;
+
+	if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
+		/* Connection oriented channels are not supported on LE */
+		if (la.l2_psm)
+			return -EINVAL;
+		/* We only allow ATT user space socket */
+		if (la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+			return -EINVAL;
+	}
+
 	err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
 				 &la.l2_bdaddr, la.l2_bdaddr_type);
 	if (err)
@@ -265,12 +328,14 @@
 
 	if (peer) {
 		la->l2_psm = chan->psm;
-		bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
+		bacpy(&la->l2_bdaddr, &chan->dst);
 		la->l2_cid = cpu_to_le16(chan->dcid);
+		la->l2_bdaddr_type = chan->dst_type;
 	} else {
 		la->l2_psm = chan->sport;
-		bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
+		bacpy(&la->l2_bdaddr, &chan->src);
 		la->l2_cid = cpu_to_le16(chan->scid);
+		la->l2_bdaddr_type = chan->src_type;
 	}
 
 	return 0;
@@ -445,11 +510,6 @@
 		break;
 
 	case BT_CHANNEL_POLICY:
-		if (!enable_hs) {
-			err = -ENOPROTOOPT;
-			break;
-		}
-
 		if (put_user(chan->chan_policy, (u32 __user *) optval))
 			err = -EFAULT;
 		break;
@@ -665,10 +725,13 @@
 			break;
 		}
 
-		if (opt)
+		if (opt) {
 			set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
-		else
+			set_bit(FLAG_DEFER_SETUP, &chan->flags);
+		} else {
 			clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+			clear_bit(FLAG_DEFER_SETUP, &chan->flags);
+		}
 		break;
 
 	case BT_FLUSHABLE:
@@ -683,7 +746,7 @@
 		}
 
 		if (opt == BT_FLUSHABLE_OFF) {
-			struct l2cap_conn *conn = chan->conn;
+			conn = chan->conn;
 			/* proceed further only when we have l2cap_conn and
 			   No Flush support in the LM */
 			if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
@@ -720,11 +783,6 @@
 		break;
 
 	case BT_CHANNEL_POLICY:
-		if (!enable_hs) {
-			err = -ENOPROTOOPT;
-			break;
-		}
-
 		if (get_user(opt, (u32 __user *) optval)) {
 			err = -EFAULT;
 			break;
@@ -777,6 +835,12 @@
 	if (sk->sk_state != BT_CONNECTED)
 		return -ENOTCONN;
 
+	lock_sock(sk);
+	err = bt_sock_wait_ready(sk, msg->msg_flags);
+	release_sock(sk);
+	if (err)
+		return err;
+
 	l2cap_chan_lock(chan);
 	err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
 	l2cap_chan_unlock(chan);
@@ -799,8 +863,8 @@
 		pi->chan->state = BT_CONFIG;
 
 		__l2cap_connect_rsp_defer(pi->chan);
-		release_sock(sk);
-		return 0;
+		err = 0;
+		goto done;
 	}
 
 	release_sock(sk);
@@ -856,6 +920,38 @@
 	sock_put(sk);
 }
 
+static int __l2cap_wait_ack(struct sock *sk)
+{
+	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+	DECLARE_WAITQUEUE(wait, current);
+	int err = 0;
+	int timeo = HZ/5;
+
+	add_wait_queue(sk_sleep(sk), &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (chan->unacked_frames > 0 && chan->conn) {
+		if (!timeo)
+			timeo = HZ/5;
+
+		if (signal_pending(current)) {
+			err = sock_intr_errno(timeo);
+			break;
+		}
+
+		release_sock(sk);
+		timeo = schedule_timeout(timeo);
+		lock_sock(sk);
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		err = sock_error(sk);
+		if (err)
+			break;
+	}
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(sk_sleep(sk), &wait);
+	return err;
+}
+
 static int l2cap_sock_shutdown(struct socket *sock, int how)
 {
 	struct sock *sk = sock->sk;
@@ -946,6 +1042,8 @@
 {
 	struct sock *sk, *parent = chan->data;
 
+	lock_sock(parent);
+
 	/* Check for backlog size */
 	if (sk_acceptq_is_full(parent)) {
 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
@@ -963,18 +1061,19 @@
 
 	bt_accept_enqueue(parent, sk);
 
+	release_sock(parent);
+
 	return l2cap_pi(sk)->chan;
 }
 
 static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-	int err;
 	struct sock *sk = chan->data;
-	struct l2cap_pinfo *pi = l2cap_pi(sk);
+	int err;
 
 	lock_sock(sk);
 
-	if (pi->rx_busy_skb) {
+	if (l2cap_pi(sk)->rx_busy_skb) {
 		err = -ENOMEM;
 		goto done;
 	}
@@ -990,9 +1089,9 @@
 	 * acked and reassembled until there is buffer space
 	 * available.
 	 */
-	if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
-		pi->rx_busy_skb = skb;
-		l2cap_chan_busy(pi->chan, 1);
+	if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
+		l2cap_pi(sk)->rx_busy_skb = skb;
+		l2cap_chan_busy(chan, 1);
 		err = 0;
 	}
 
@@ -1050,26 +1149,33 @@
 	release_sock(sk);
 }
 
-static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
+static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
+				       int err)
 {
 	struct sock *sk = chan->data;
 
 	sk->sk_state = state;
+
+	if (err)
+		sk->sk_err = err;
 }
 
 static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
 					       unsigned long len, int nb)
 {
+	struct sock *sk = chan->data;
 	struct sk_buff *skb;
 	int err;
 
 	l2cap_chan_unlock(chan);
-	skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
+	skb = bt_skb_send_alloc(sk, len, nb, &err);
 	l2cap_chan_lock(chan);
 
 	if (!skb)
 		return ERR_PTR(err);
 
+	bt_cb(skb)->chan = chan;
+
 	return skb;
 }
 
@@ -1095,11 +1201,39 @@
 
 static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
 {
-	struct sock *sk = chan->data;
-	struct sock *parent = bt_sk(sk)->parent;
+	struct sock *parent, *sk = chan->data;
 
+	lock_sock(sk);
+
+	parent = bt_sk(sk)->parent;
 	if (parent)
 		parent->sk_data_ready(parent, 0);
+
+	release_sock(sk);
+}
+
+static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
+{
+	struct sock *sk = chan->data;
+
+	clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+	sk->sk_state_change(sk);
+}
+
+static void l2cap_sock_set_shutdown_cb(struct l2cap_chan *chan)
+{
+	struct sock *sk = chan->data;
+
+	lock_sock(sk);
+	sk->sk_shutdown = SHUTDOWN_MASK;
+	release_sock(sk);
+}
+
+static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
+{
+	struct sock *sk = chan->data;
+
+	return sk->sk_sndtimeo;
 }
 
 static struct l2cap_ops l2cap_chan_ops = {
@@ -1111,6 +1245,9 @@
 	.state_change	= l2cap_sock_state_change_cb,
 	.ready		= l2cap_sock_ready_cb,
 	.defer		= l2cap_sock_defer_cb,
+	.resume		= l2cap_sock_resume_cb,
+	.set_shutdown	= l2cap_sock_set_shutdown_cb,
+	.get_sndtimeo	= l2cap_sock_get_sndtimeo_cb,
 	.alloc_skb	= l2cap_sock_alloc_skb_cb,
 };
 
@@ -1120,6 +1257,7 @@
 
 	if (l2cap_pi(sk)->chan)
 		l2cap_chan_put(l2cap_pi(sk)->chan);
+
 	if (l2cap_pi(sk)->rx_busy_skb) {
 		kfree_skb(l2cap_pi(sk)->rx_busy_skb);
 		l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -1129,10 +1267,22 @@
 	skb_queue_purge(&sk->sk_write_queue);
 }
 
+static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
+			       int *msg_namelen)
+{
+	struct sockaddr_l2 *la = (struct sockaddr_l2 *) msg_name;
+
+	memset(la, 0, sizeof(struct sockaddr_l2));
+	la->l2_family = AF_BLUETOOTH;
+	la->l2_psm = bt_cb(skb)->psm;
+	bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr);
+
+	*msg_namelen = sizeof(struct sockaddr_l2);
+}
+
 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
 {
-	struct l2cap_pinfo *pi = l2cap_pi(sk);
-	struct l2cap_chan *chan = pi->chan;
+	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 
 	BT_DBG("sk %p", sk);
 
@@ -1156,13 +1306,13 @@
 
 		security_sk_clone(parent, sk);
 	} else {
-
 		switch (sk->sk_type) {
 		case SOCK_RAW:
 			chan->chan_type = L2CAP_CHAN_RAW;
 			break;
 		case SOCK_DGRAM:
 			chan->chan_type = L2CAP_CHAN_CONN_LESS;
+			bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name;
 			break;
 		case SOCK_SEQPACKET:
 		case SOCK_STREAM:
@@ -1224,8 +1374,6 @@
 
 	l2cap_chan_hold(chan);
 
-	chan->sk = sk;
-
 	l2cap_pi(sk)->chan = chan;
 
 	return sk;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index fedc539..074d836 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -30,12 +30,11 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/smp.h>
 
-bool enable_hs;
+#include "smp.h"
 
 #define MGMT_VERSION	1
-#define MGMT_REVISION	3
+#define MGMT_REVISION	4
 
 static const u16 mgmt_commands[] = {
 	MGMT_OP_READ_INDEX_LIST,
@@ -76,6 +75,10 @@
 	MGMT_OP_BLOCK_DEVICE,
 	MGMT_OP_UNBLOCK_DEVICE,
 	MGMT_OP_SET_DEVICE_ID,
+	MGMT_OP_SET_ADVERTISING,
+	MGMT_OP_SET_BREDR,
+	MGMT_OP_SET_STATIC_ADDRESS,
+	MGMT_OP_SET_SCAN_PARAMS,
 };
 
 static const u16 mgmt_events[] = {
@@ -181,11 +184,6 @@
 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
 };
 
-bool mgmt_valid_hdev(struct hci_dev *hdev)
-{
-	return hdev->dev_type == HCI_BREDR;
-}
-
 static u8 mgmt_status(u8 hci_status)
 {
 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
@@ -321,10 +319,8 @@
 
 	count = 0;
 	list_for_each_entry(d, &hci_dev_list, list) {
-		if (!mgmt_valid_hdev(d))
-			continue;
-
-		count++;
+		if (d->dev_type == HCI_BREDR)
+			count++;
 	}
 
 	rp_len = sizeof(*rp) + (2 * count);
@@ -339,11 +335,13 @@
 		if (test_bit(HCI_SETUP, &d->dev_flags))
 			continue;
 
-		if (!mgmt_valid_hdev(d))
+		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
 			continue;
 
-		rp->index[count++] = cpu_to_le16(d->id);
-		BT_DBG("Added hci%u", d->id);
+		if (d->dev_type == HCI_BREDR) {
+			rp->index[count++] = cpu_to_le16(d->id);
+			BT_DBG("Added hci%u", d->id);
+		}
 	}
 
 	rp->num_controllers = cpu_to_le16(count);
@@ -366,9 +364,6 @@
 	settings |= MGMT_SETTING_POWERED;
 	settings |= MGMT_SETTING_PAIRABLE;
 
-	if (lmp_ssp_capable(hdev))
-		settings |= MGMT_SETTING_SSP;
-
 	if (lmp_bredr_capable(hdev)) {
 		settings |= MGMT_SETTING_CONNECTABLE;
 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
@@ -376,13 +371,17 @@
 		settings |= MGMT_SETTING_DISCOVERABLE;
 		settings |= MGMT_SETTING_BREDR;
 		settings |= MGMT_SETTING_LINK_SECURITY;
+
+		if (lmp_ssp_capable(hdev)) {
+			settings |= MGMT_SETTING_SSP;
+			settings |= MGMT_SETTING_HS;
+		}
 	}
 
-	if (enable_hs)
-		settings |= MGMT_SETTING_HS;
-
-	if (lmp_le_capable(hdev))
+	if (lmp_le_capable(hdev)) {
 		settings |= MGMT_SETTING_LE;
+		settings |= MGMT_SETTING_ADVERTISING;
+	}
 
 	return settings;
 }
@@ -406,7 +405,7 @@
 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
 		settings |= MGMT_SETTING_PAIRABLE;
 
-	if (lmp_bredr_capable(hdev))
+	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
 		settings |= MGMT_SETTING_BREDR;
 
 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
@@ -421,6 +420,9 @@
 	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
 		settings |= MGMT_SETTING_HS;
 
+	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+		settings |= MGMT_SETTING_ADVERTISING;
+
 	return settings;
 }
 
@@ -534,6 +536,156 @@
 	return ptr;
 }
 
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
+{
+	struct pending_cmd *cmd;
+
+	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+		if (cmd->opcode == opcode)
+			return cmd;
+	}
+
+	return NULL;
+}
+
+static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+{
+	u8 ad_len = 0;
+	size_t name_len;
+
+	name_len = strlen(hdev->dev_name);
+	if (name_len > 0) {
+		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+
+		if (name_len > max_len) {
+			name_len = max_len;
+			ptr[1] = EIR_NAME_SHORT;
+		} else
+			ptr[1] = EIR_NAME_COMPLETE;
+
+		ptr[0] = name_len + 1;
+
+		memcpy(ptr + 2, hdev->dev_name, name_len);
+
+		ad_len += (name_len + 2);
+		ptr += (name_len + 2);
+	}
+
+	return ad_len;
+}
+
+static void update_scan_rsp_data(struct hci_request *req)
+{
+	struct hci_dev *hdev = req->hdev;
+	struct hci_cp_le_set_scan_rsp_data cp;
+	u8 len;
+
+	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+		return;
+
+	memset(&cp, 0, sizeof(cp));
+
+	len = create_scan_rsp_data(hdev, cp.data);
+
+	if (hdev->scan_rsp_data_len == len &&
+	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
+		return;
+
+	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+	hdev->scan_rsp_data_len = len;
+
+	cp.length = len;
+
+	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
+}
+
+static u8 get_adv_discov_flags(struct hci_dev *hdev)
+{
+	struct pending_cmd *cmd;
+
+	/* If there's a pending mgmt command the flags will not yet have
+	 * their final values, so check for this first.
+	 */
+	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+	if (cmd) {
+		struct mgmt_mode *cp = cmd->param;
+		if (cp->val == 0x01)
+			return LE_AD_GENERAL;
+		else if (cp->val == 0x02)
+			return LE_AD_LIMITED;
+	} else {
+		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+			return LE_AD_LIMITED;
+		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+			return LE_AD_GENERAL;
+	}
+
+	return 0;
+}
+
+static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
+{
+	u8 ad_len = 0, flags = 0;
+
+	flags |= get_adv_discov_flags(hdev);
+
+	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+		if (lmp_le_br_capable(hdev))
+			flags |= LE_AD_SIM_LE_BREDR_CTRL;
+		if (lmp_host_le_br_capable(hdev))
+			flags |= LE_AD_SIM_LE_BREDR_HOST;
+	} else {
+		flags |= LE_AD_NO_BREDR;
+	}
+
+	if (flags) {
+		BT_DBG("adv flags 0x%02x", flags);
+
+		ptr[0] = 2;
+		ptr[1] = EIR_FLAGS;
+		ptr[2] = flags;
+
+		ad_len += 3;
+		ptr += 3;
+	}
+
+	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
+		ptr[0] = 2;
+		ptr[1] = EIR_TX_POWER;
+		ptr[2] = (u8) hdev->adv_tx_power;
+
+		ad_len += 3;
+		ptr += 3;
+	}
+
+	return ad_len;
+}
+
+static void update_adv_data(struct hci_request *req)
+{
+	struct hci_dev *hdev = req->hdev;
+	struct hci_cp_le_set_adv_data cp;
+	u8 len;
+
+	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+		return;
+
+	memset(&cp, 0, sizeof(cp));
+
+	len = create_adv_data(hdev, cp.data);
+
+	if (hdev->adv_data_len == len &&
+	    memcmp(cp.data, hdev->adv_data, len) == 0)
+		return;
+
+	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+	hdev->adv_data_len = len;
+
+	cp.length = len;
+
+	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+}
+
 static void create_eir(struct hci_dev *hdev, u8 *data)
 {
 	u8 *ptr = data;
@@ -632,6 +784,9 @@
 	if (!hdev_is_powered(hdev))
 		return;
 
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+		return;
+
 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
 		return;
 
@@ -639,6 +794,9 @@
 	cod[1] = hdev->major_class;
 	cod[2] = get_service_classes(hdev);
 
+	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+		cod[1] |= 0x20;
+
 	if (memcmp(cod, hdev->dev_class, 3) == 0)
 		return;
 
@@ -763,18 +921,6 @@
 	}
 }
 
-static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
-{
-	struct pending_cmd *cmd;
-
-	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
-		if (cmd->opcode == opcode)
-			return cmd;
-	}
-
-	return NULL;
-}
-
 static void mgmt_pending_remove(struct pending_cmd *cmd)
 {
 	list_del(&cmd->list);
@@ -804,6 +950,12 @@
 
 	hci_dev_lock(hdev);
 
+	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+				 MGMT_STATUS_BUSY);
+		goto failed;
+	}
+
 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
 		cancel_delayed_work(&hdev->power_off);
 
@@ -820,12 +972,6 @@
 		goto failed;
 	}
 
-	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
-		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
-				 MGMT_STATUS_BUSY);
-		goto failed;
-	}
-
 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
 	if (!cmd) {
 		err = -ENOMEM;
@@ -883,27 +1029,141 @@
 	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
 }
 
+struct cmd_lookup {
+	struct sock *sk;
+	struct hci_dev *hdev;
+	u8 mgmt_status;
+};
+
+static void settings_rsp(struct pending_cmd *cmd, void *data)
+{
+	struct cmd_lookup *match = data;
+
+	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
+
+	list_del(&cmd->list);
+
+	if (match->sk == NULL) {
+		match->sk = cmd->sk;
+		sock_hold(match->sk);
+	}
+
+	mgmt_pending_free(cmd);
+}
+
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+{
+	u8 *status = data;
+
+	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+	mgmt_pending_remove(cmd);
+}
+
+static u8 mgmt_bredr_support(struct hci_dev *hdev)
+{
+	if (!lmp_bredr_capable(hdev))
+		return MGMT_STATUS_NOT_SUPPORTED;
+	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+		return MGMT_STATUS_REJECTED;
+	else
+		return MGMT_STATUS_SUCCESS;
+}
+
+static u8 mgmt_le_support(struct hci_dev *hdev)
+{
+	if (!lmp_le_capable(hdev))
+		return MGMT_STATUS_NOT_SUPPORTED;
+	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+		return MGMT_STATUS_REJECTED;
+	else
+		return MGMT_STATUS_SUCCESS;
+}
+
+static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
+{
+	struct pending_cmd *cmd;
+	struct mgmt_mode *cp;
+	struct hci_request req;
+	bool changed;
+
+	BT_DBG("status 0x%02x", status);
+
+	hci_dev_lock(hdev);
+
+	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+	if (!cmd)
+		goto unlock;
+
+	if (status) {
+		u8 mgmt_err = mgmt_status(status);
+		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+		goto remove_cmd;
+	}
+
+	cp = cmd->param;
+	if (cp->val) {
+		changed = !test_and_set_bit(HCI_DISCOVERABLE,
+					    &hdev->dev_flags);
+
+		if (hdev->discov_timeout > 0) {
+			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+					   to);
+		}
+	} else {
+		changed = test_and_clear_bit(HCI_DISCOVERABLE,
+					     &hdev->dev_flags);
+	}
+
+	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+
+	if (changed)
+		new_settings(hdev, cmd->sk);
+
+	/* When the discoverable mode gets changed, make sure
+	 * that class of device has the limited discoverable
+	 * bit correctly set.
+	 */
+	hci_req_init(&req, hdev);
+	update_class(&req);
+	hci_req_run(&req, NULL);
+
+remove_cmd:
+	mgmt_pending_remove(cmd);
+
+unlock:
+	hci_dev_unlock(hdev);
+}
+
 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
 			    u16 len)
 {
 	struct mgmt_cp_set_discoverable *cp = data;
 	struct pending_cmd *cmd;
+	struct hci_request req;
 	u16 timeout;
 	u8 scan;
 	int err;
 
 	BT_DBG("request for %s", hdev->name);
 
-	if (!lmp_bredr_capable(hdev))
+	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
-				 MGMT_STATUS_NOT_SUPPORTED);
+				  MGMT_STATUS_REJECTED);
 
-	if (cp->val != 0x00 && cp->val != 0x01)
+	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 				  MGMT_STATUS_INVALID_PARAMS);
 
 	timeout = __le16_to_cpu(cp->timeout);
-	if (!cp->val && timeout > 0)
+
+	/* Disabling discoverable requires that no timeout is set,
+	 * and enabling limited discoverable requires a timeout.
+	 */
+	if ((cp->val == 0x00 && timeout > 0) ||
+	    (cp->val == 0x02 && timeout == 0))
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
 				  MGMT_STATUS_INVALID_PARAMS);
 
@@ -931,6 +1191,10 @@
 	if (!hdev_is_powered(hdev)) {
 		bool changed = false;
 
+		/* Setting limited discoverable when powered off is
+		 * not a valid operation since it requires a timeout
+		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
+		 */
 		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
 			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
 			changed = true;
@@ -946,16 +1210,20 @@
 		goto failed;
 	}
 
-	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
-		if (hdev->discov_timeout > 0) {
-			cancel_delayed_work(&hdev->discov_off);
-			hdev->discov_timeout = 0;
-		}
+	/* If the current mode is the same, then just update the timeout
+	 * value with the new value. And if only the timeout gets updated,
+	 * then no need for any HCI transactions.
+	 */
+	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
+	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
+					  &hdev->dev_flags)) {
+		cancel_delayed_work(&hdev->discov_off);
+		hdev->discov_timeout = timeout;
 
-		if (cp->val && timeout > 0) {
-			hdev->discov_timeout = timeout;
+		if (cp->val && hdev->discov_timeout > 0) {
+			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
-				msecs_to_jiffies(hdev->discov_timeout * 1000));
+					   to);
 		}
 
 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
@@ -968,20 +1236,66 @@
 		goto failed;
 	}
 
+	/* Cancel any potential discoverable timeout that might be
+	 * still active and store new timeout value. The arming of
+	 * the timeout happens in the complete handler.
+	 */
+	cancel_delayed_work(&hdev->discov_off);
+	hdev->discov_timeout = timeout;
+
+	/* Limited discoverable mode */
+	if (cp->val == 0x02)
+		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+	else
+		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+
+	hci_req_init(&req, hdev);
+
+	/* The procedure for LE-only controllers is much simpler - just
+	 * update the advertising data.
+	 */
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+		goto update_ad;
+
 	scan = SCAN_PAGE;
 
-	if (cp->val)
-		scan |= SCAN_INQUIRY;
-	else
-		cancel_delayed_work(&hdev->discov_off);
+	if (cp->val) {
+		struct hci_cp_write_current_iac_lap hci_cp;
 
-	err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+		if (cp->val == 0x02) {
+			/* Limited discoverable mode */
+			hci_cp.num_iac = 2;
+			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
+			hci_cp.iac_lap[1] = 0x8b;
+			hci_cp.iac_lap[2] = 0x9e;
+			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
+			hci_cp.iac_lap[4] = 0x8b;
+			hci_cp.iac_lap[5] = 0x9e;
+		} else {
+			/* General discoverable mode */
+			hci_cp.num_iac = 1;
+			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
+			hci_cp.iac_lap[1] = 0x8b;
+			hci_cp.iac_lap[2] = 0x9e;
+		}
+
+		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
+			    (hci_cp.num_iac * 3) + 1, &hci_cp);
+
+		scan |= SCAN_INQUIRY;
+	} else {
+		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+	}
+
+	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
+
+update_ad:
+	update_adv_data(&req);
+
+	err = hci_req_run(&req, set_discoverable_complete);
 	if (err < 0)
 		mgmt_pending_remove(cmd);
 
-	if (cp->val)
-		hdev->discov_timeout = timeout;
-
 failed:
 	hci_dev_unlock(hdev);
 	return err;
@@ -993,6 +1307,9 @@
 	struct hci_cp_write_page_scan_activity acp;
 	u8 type;
 
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+		return;
+
 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 		return;
 
@@ -1019,9 +1336,55 @@
 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 }
 
+static u8 get_adv_type(struct hci_dev *hdev)
+{
+	struct pending_cmd *cmd;
+	bool connectable;
+
+	/* If there's a pending mgmt command the flag will not yet have
+	 * it's final value, so check for this first.
+	 */
+	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+	if (cmd) {
+		struct mgmt_mode *cp = cmd->param;
+		connectable = !!cp->val;
+	} else {
+		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+	}
+
+	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
+}
+
+static void enable_advertising(struct hci_request *req)
+{
+	struct hci_dev *hdev = req->hdev;
+	struct hci_cp_le_set_adv_param cp;
+	u8 enable = 0x01;
+
+	memset(&cp, 0, sizeof(cp));
+	cp.min_interval = __constant_cpu_to_le16(0x0800);
+	cp.max_interval = __constant_cpu_to_le16(0x0800);
+	cp.type = get_adv_type(hdev);
+	cp.own_address_type = hdev->own_addr_type;
+	cp.channel_map = 0x07;
+
+	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
+static void disable_advertising(struct hci_request *req)
+{
+	u8 enable = 0x00;
+
+	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
 {
 	struct pending_cmd *cmd;
+	struct mgmt_mode *cp;
+	bool changed;
 
 	BT_DBG("status 0x%02x", status);
 
@@ -1031,14 +1394,56 @@
 	if (!cmd)
 		goto unlock;
 
+	if (status) {
+		u8 mgmt_err = mgmt_status(status);
+		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+		goto remove_cmd;
+	}
+
+	cp = cmd->param;
+	if (cp->val)
+		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+	else
+		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+
 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
 
+	if (changed)
+		new_settings(hdev, cmd->sk);
+
+remove_cmd:
 	mgmt_pending_remove(cmd);
 
 unlock:
 	hci_dev_unlock(hdev);
 }
 
+static int set_connectable_update_settings(struct hci_dev *hdev,
+					   struct sock *sk, u8 val)
+{
+	bool changed = false;
+	int err;
+
+	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+		changed = true;
+
+	if (val) {
+		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+	} else {
+		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+	}
+
+	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
+	if (err < 0)
+		return err;
+
+	if (changed)
+		return new_settings(hdev, sk);
+
+	return 0;
+}
+
 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
 			   u16 len)
 {
@@ -1050,9 +1455,10 @@
 
 	BT_DBG("request for %s", hdev->name);
 
-	if (!lmp_bredr_capable(hdev))
+	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
-				  MGMT_STATUS_NOT_SUPPORTED);
+				  MGMT_STATUS_REJECTED);
 
 	if (cp->val != 0x00 && cp->val != 0x01)
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
@@ -1061,25 +1467,7 @@
 	hci_dev_lock(hdev);
 
 	if (!hdev_is_powered(hdev)) {
-		bool changed = false;
-
-		if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-			changed = true;
-
-		if (cp->val) {
-			set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-		} else {
-			clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
-		}
-
-		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
-		if (err < 0)
-			goto failed;
-
-		if (changed)
-			err = new_settings(hdev, sk);
-
+		err = set_connectable_update_settings(hdev, sk, cp->val);
 		goto failed;
 	}
 
@@ -1090,30 +1478,37 @@
 		goto failed;
 	}
 
-	if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
-		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
-		goto failed;
-	}
-
 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
 	if (!cmd) {
 		err = -ENOMEM;
 		goto failed;
 	}
 
-	if (cp->val) {
-		scan = SCAN_PAGE;
-	} else {
-		scan = 0;
-
-		if (test_bit(HCI_ISCAN, &hdev->flags) &&
-		    hdev->discov_timeout > 0)
-			cancel_delayed_work(&hdev->discov_off);
-	}
-
 	hci_req_init(&req, hdev);
 
-	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+	/* If BR/EDR is not enabled and we disable advertising as a
+	 * by-product of disabling connectable, we need to update the
+	 * advertising flags.
+	 */
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+		if (!cp->val) {
+			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+		}
+		update_adv_data(&req);
+	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
+		if (cp->val) {
+			scan = SCAN_PAGE;
+		} else {
+			scan = 0;
+
+			if (test_bit(HCI_ISCAN, &hdev->flags) &&
+			    hdev->discov_timeout > 0)
+				cancel_delayed_work(&hdev->discov_off);
+		}
+
+		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+	}
 
 	/* If we're going from non-connectable to connectable or
 	 * vice-versa when fast connectable is enabled ensure that fast
@@ -1124,9 +1519,20 @@
 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
 		write_fast_connectable(&req, false);
 
+	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
+	    hci_conn_num(hdev, LE_LINK) == 0) {
+		disable_advertising(&req);
+		enable_advertising(&req);
+	}
+
 	err = hci_req_run(&req, set_connectable_complete);
-	if (err < 0)
+	if (err < 0) {
 		mgmt_pending_remove(cmd);
+		if (err == -ENODATA)
+			err = set_connectable_update_settings(hdev, sk,
+							      cp->val);
+		goto failed;
+	}
 
 failed:
 	hci_dev_unlock(hdev);
@@ -1137,6 +1543,7 @@
 			u16 len)
 {
 	struct mgmt_mode *cp = data;
+	bool changed;
 	int err;
 
 	BT_DBG("request for %s", hdev->name);
@@ -1148,17 +1555,18 @@
 	hci_dev_lock(hdev);
 
 	if (cp->val)
-		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
 	else
-		clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
 
 	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
 	if (err < 0)
-		goto failed;
+		goto unlock;
 
-	err = new_settings(hdev, sk);
+	if (changed)
+		err = new_settings(hdev, sk);
 
-failed:
+unlock:
 	hci_dev_unlock(hdev);
 	return err;
 }
@@ -1168,14 +1576,15 @@
 {
 	struct mgmt_mode *cp = data;
 	struct pending_cmd *cmd;
-	u8 val;
+	u8 val, status;
 	int err;
 
 	BT_DBG("request for %s", hdev->name);
 
-	if (!lmp_bredr_capable(hdev))
+	status = mgmt_bredr_support(hdev);
+	if (status)
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
-				  MGMT_STATUS_NOT_SUPPORTED);
+				  status);
 
 	if (cp->val != 0x00 && cp->val != 0x01)
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
@@ -1236,11 +1645,15 @@
 {
 	struct mgmt_mode *cp = data;
 	struct pending_cmd *cmd;
-	u8 val;
+	u8 status;
 	int err;
 
 	BT_DBG("request for %s", hdev->name);
 
+	status = mgmt_bredr_support(hdev);
+	if (status)
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
+
 	if (!lmp_ssp_capable(hdev))
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
 				  MGMT_STATUS_NOT_SUPPORTED);
@@ -1251,14 +1664,20 @@
 
 	hci_dev_lock(hdev);
 
-	val = !!cp->val;
-
 	if (!hdev_is_powered(hdev)) {
-		bool changed = false;
+		bool changed;
 
-		if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
-			change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
-			changed = true;
+		if (cp->val) {
+			changed = !test_and_set_bit(HCI_SSP_ENABLED,
+						    &hdev->dev_flags);
+		} else {
+			changed = test_and_clear_bit(HCI_SSP_ENABLED,
+						     &hdev->dev_flags);
+			if (!changed)
+				changed = test_and_clear_bit(HCI_HS_ENABLED,
+							     &hdev->dev_flags);
+			else
+				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
 		}
 
 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -1271,13 +1690,14 @@
 		goto failed;
 	}
 
-	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
+	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
+	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
 				 MGMT_STATUS_BUSY);
 		goto failed;
 	}
 
-	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
+	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
 		goto failed;
 	}
@@ -1288,7 +1708,7 @@
 		goto failed;
 	}
 
-	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
+	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
 	if (err < 0) {
 		mgmt_pending_remove(cmd);
 		goto failed;
@@ -1302,23 +1722,90 @@
 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 {
 	struct mgmt_mode *cp = data;
+	bool changed;
+	u8 status;
+	int err;
 
 	BT_DBG("request for %s", hdev->name);
 
-	if (!enable_hs)
+	status = mgmt_bredr_support(hdev);
+	if (status)
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
+
+	if (!lmp_ssp_capable(hdev))
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
 				  MGMT_STATUS_NOT_SUPPORTED);
 
+	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+				  MGMT_STATUS_REJECTED);
+
 	if (cp->val != 0x00 && cp->val != 0x01)
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
 				  MGMT_STATUS_INVALID_PARAMS);
 
-	if (cp->val)
-		set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
-	else
-		clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+	hci_dev_lock(hdev);
 
-	return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+	if (cp->val) {
+		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+	} else {
+		if (hdev_is_powered(hdev)) {
+			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+					 MGMT_STATUS_REJECTED);
+			goto unlock;
+		}
+
+		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+	}
+
+	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+	if (err < 0)
+		goto unlock;
+
+	if (changed)
+		err = new_settings(hdev, sk);
+
+unlock:
+	hci_dev_unlock(hdev);
+	return err;
+}
+
+static void le_enable_complete(struct hci_dev *hdev, u8 status)
+{
+	struct cmd_lookup match = { NULL, hdev };
+
+	if (status) {
+		u8 mgmt_err = mgmt_status(status);
+
+		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+				     &mgmt_err);
+		return;
+	}
+
+	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
+
+	new_settings(hdev, match.sk);
+
+	if (match.sk)
+		sock_put(match.sk);
+
+	/* Make sure the controller has a good default for
+	 * advertising data. Restrict the update to when LE
+	 * has actually been enabled. During power on, the
+	 * update in powered_update_hci will take care of it.
+	 */
+	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+		struct hci_request req;
+
+		hci_dev_lock(hdev);
+
+		hci_req_init(&req, hdev);
+		update_adv_data(&req);
+		update_scan_rsp_data(&req);
+		hci_req_run(&req, NULL);
+
+		hci_dev_unlock(hdev);
+	}
 }
 
 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@@ -1326,6 +1813,7 @@
 	struct mgmt_mode *cp = data;
 	struct hci_cp_write_le_host_supported hci_cp;
 	struct pending_cmd *cmd;
+	struct hci_request req;
 	int err;
 	u8 val, enabled;
 
@@ -1340,7 +1828,7 @@
 				  MGMT_STATUS_INVALID_PARAMS);
 
 	/* LE-only devices do not allow toggling LE on/off */
-	if (!lmp_bredr_capable(hdev))
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
 				  MGMT_STATUS_REJECTED);
 
@@ -1357,6 +1845,11 @@
 			changed = true;
 		}
 
+		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+			changed = true;
+		}
+
 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
 		if (err < 0)
 			goto unlock;
@@ -1367,7 +1860,8 @@
 		goto unlock;
 	}
 
-	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
+	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
 				 MGMT_STATUS_BUSY);
 		goto unlock;
@@ -1379,15 +1873,22 @@
 		goto unlock;
 	}
 
+	hci_req_init(&req, hdev);
+
 	memset(&hci_cp, 0, sizeof(hci_cp));
 
 	if (val) {
 		hci_cp.le = val;
 		hci_cp.simul = lmp_le_br_capable(hdev);
+	} else {
+		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+			disable_advertising(&req);
 	}
 
-	err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
-			   &hci_cp);
+	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
+		    &hci_cp);
+
+	err = hci_req_run(&req, le_enable_complete);
 	if (err < 0)
 		mgmt_pending_remove(cmd);
 
@@ -1706,6 +2207,12 @@
 	u16 key_count, expected_len;
 	int i;
 
+	BT_DBG("request for %s", hdev->name);
+
+	if (!lmp_bredr_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+				  MGMT_STATUS_NOT_SUPPORTED);
+
 	key_count = __le16_to_cpu(cp->key_count);
 
 	expected_len = sizeof(*cp) + key_count *
@@ -2515,8 +3022,11 @@
 		update_eir(&req);
 	}
 
+	/* The name is stored in the scan response data and so
+	 * no need to udpate the advertising data here.
+	 */
 	if (lmp_le_capable(hdev))
-		hci_update_ad(&req);
+		update_scan_rsp_data(&req);
 
 	err = hci_req_run(&req, set_name_complete);
 	if (err < 0)
@@ -2685,6 +3195,7 @@
 	struct hci_request req;
 	/* General inquiry access code (GIAC) */
 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
+	u8 status;
 	int err;
 
 	BT_DBG("%s", hdev->name);
@@ -2721,9 +3232,10 @@
 
 	switch (hdev->discovery.type) {
 	case DISCOV_TYPE_BREDR:
-		if (!lmp_bredr_capable(hdev)) {
+		status = mgmt_bredr_support(hdev);
+		if (status) {
 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-					 MGMT_STATUS_NOT_SUPPORTED);
+					 status);
 			mgmt_pending_remove(cmd);
 			goto failed;
 		}
@@ -2745,22 +3257,23 @@
 
 	case DISCOV_TYPE_LE:
 	case DISCOV_TYPE_INTERLEAVED:
-		if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+		status = mgmt_le_support(hdev);
+		if (status) {
 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-					 MGMT_STATUS_NOT_SUPPORTED);
+					 status);
 			mgmt_pending_remove(cmd);
 			goto failed;
 		}
 
 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
-		    !lmp_bredr_capable(hdev)) {
+		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
 					 MGMT_STATUS_NOT_SUPPORTED);
 			mgmt_pending_remove(cmd);
 			goto failed;
 		}
 
-		if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
+		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
 					 MGMT_STATUS_REJECTED);
 			mgmt_pending_remove(cmd);
@@ -2778,6 +3291,7 @@
 		param_cp.type = LE_SCAN_ACTIVE;
 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+		param_cp.own_address_type = hdev->own_addr_type;
 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
 			    &param_cp);
 
@@ -3065,6 +3579,186 @@
 	return err;
 }
 
+static void set_advertising_complete(struct hci_dev *hdev, u8 status)
+{
+	struct cmd_lookup match = { NULL, hdev };
+
+	if (status) {
+		u8 mgmt_err = mgmt_status(status);
+
+		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
+				     cmd_status_rsp, &mgmt_err);
+		return;
+	}
+
+	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
+			     &match);
+
+	new_settings(hdev, match.sk);
+
+	if (match.sk)
+		sock_put(match.sk);
+}
+
+static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
+			   u16 len)
+{
+	struct mgmt_mode *cp = data;
+	struct pending_cmd *cmd;
+	struct hci_request req;
+	u8 val, enabled, status;
+	int err;
+
+	BT_DBG("request for %s", hdev->name);
+
+	status = mgmt_le_support(hdev);
+	if (status)
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+				  status);
+
+	if (cp->val != 0x00 && cp->val != 0x01)
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+				  MGMT_STATUS_INVALID_PARAMS);
+
+	hci_dev_lock(hdev);
+
+	val = !!cp->val;
+	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+	/* The following conditions are ones which mean that we should
+	 * not do any HCI communication but directly send a mgmt
+	 * response to user space (after toggling the flag if
+	 * necessary).
+	 */
+	if (!hdev_is_powered(hdev) || val == enabled ||
+	    hci_conn_num(hdev, LE_LINK) > 0) {
+		bool changed = false;
+
+		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
+			changed = true;
+		}
+
+		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
+		if (err < 0)
+			goto unlock;
+
+		if (changed)
+			err = new_settings(hdev, sk);
+
+		goto unlock;
+	}
+
+	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
+	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+				 MGMT_STATUS_BUSY);
+		goto unlock;
+	}
+
+	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
+	if (!cmd) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+
+	hci_req_init(&req, hdev);
+
+	if (val)
+		enable_advertising(&req);
+	else
+		disable_advertising(&req);
+
+	err = hci_req_run(&req, set_advertising_complete);
+	if (err < 0)
+		mgmt_pending_remove(cmd);
+
+unlock:
+	hci_dev_unlock(hdev);
+	return err;
+}
+
+static int set_static_address(struct sock *sk, struct hci_dev *hdev,
+			      void *data, u16 len)
+{
+	struct mgmt_cp_set_static_address *cp = data;
+	int err;
+
+	BT_DBG("%s", hdev->name);
+
+	if (!lmp_le_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+				  MGMT_STATUS_NOT_SUPPORTED);
+
+	if (hdev_is_powered(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+				  MGMT_STATUS_REJECTED);
+
+	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
+		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
+			return cmd_status(sk, hdev->id,
+					  MGMT_OP_SET_STATIC_ADDRESS,
+					  MGMT_STATUS_INVALID_PARAMS);
+
+		/* Two most significant bits shall be set */
+		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
+			return cmd_status(sk, hdev->id,
+					  MGMT_OP_SET_STATIC_ADDRESS,
+					  MGMT_STATUS_INVALID_PARAMS);
+	}
+
+	hci_dev_lock(hdev);
+
+	bacpy(&hdev->static_addr, &cp->bdaddr);
+
+	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
+
+	hci_dev_unlock(hdev);
+
+	return err;
+}
+
+static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
+			   void *data, u16 len)
+{
+	struct mgmt_cp_set_scan_params *cp = data;
+	__u16 interval, window;
+	int err;
+
+	BT_DBG("%s", hdev->name);
+
+	if (!lmp_le_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+				  MGMT_STATUS_NOT_SUPPORTED);
+
+	interval = __le16_to_cpu(cp->interval);
+
+	if (interval < 0x0004 || interval > 0x4000)
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+				  MGMT_STATUS_INVALID_PARAMS);
+
+	window = __le16_to_cpu(cp->window);
+
+	if (window < 0x0004 || window > 0x4000)
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+				  MGMT_STATUS_INVALID_PARAMS);
+
+	if (window > interval)
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+				  MGMT_STATUS_INVALID_PARAMS);
+
+	hci_dev_lock(hdev);
+
+	hdev->le_scan_interval = interval;
+	hdev->le_scan_window = window;
+
+	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
+
+	hci_dev_unlock(hdev);
+
+	return err;
+}
+
 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
 {
 	struct pending_cmd *cmd;
@@ -3108,7 +3802,8 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
+	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
+	    hdev->hci_ver < BLUETOOTH_VER_1_2)
 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
 				  MGMT_STATUS_NOT_SUPPORTED);
 
@@ -3162,6 +3857,148 @@
 	return err;
 }
 
+static void set_bredr_scan(struct hci_request *req)
+{
+	struct hci_dev *hdev = req->hdev;
+	u8 scan = 0;
+
+	/* Ensure that fast connectable is disabled. This function will
+	 * not do anything if the page scan parameters are already what
+	 * they should be.
+	 */
+	write_fast_connectable(req, false);
+
+	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+		scan |= SCAN_PAGE;
+	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+		scan |= SCAN_INQUIRY;
+
+	if (scan)
+		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+static void set_bredr_complete(struct hci_dev *hdev, u8 status)
+{
+	struct pending_cmd *cmd;
+
+	BT_DBG("status 0x%02x", status);
+
+	hci_dev_lock(hdev);
+
+	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
+	if (!cmd)
+		goto unlock;
+
+	if (status) {
+		u8 mgmt_err = mgmt_status(status);
+
+		/* We need to restore the flag if related HCI commands
+		 * failed.
+		 */
+		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+	} else {
+		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
+		new_settings(hdev, cmd->sk);
+	}
+
+	mgmt_pending_remove(cmd);
+
+unlock:
+	hci_dev_unlock(hdev);
+}
+
+static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+	struct mgmt_mode *cp = data;
+	struct pending_cmd *cmd;
+	struct hci_request req;
+	int err;
+
+	BT_DBG("request for %s", hdev->name);
+
+	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+				  MGMT_STATUS_NOT_SUPPORTED);
+
+	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+				  MGMT_STATUS_REJECTED);
+
+	if (cp->val != 0x00 && cp->val != 0x01)
+		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+				  MGMT_STATUS_INVALID_PARAMS);
+
+	hci_dev_lock(hdev);
+
+	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
+		goto unlock;
+	}
+
+	if (!hdev_is_powered(hdev)) {
+		if (!cp->val) {
+			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+		}
+
+		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
+		if (err < 0)
+			goto unlock;
+
+		err = new_settings(hdev, sk);
+		goto unlock;
+	}
+
+	/* Reject disabling when powered on */
+	if (!cp->val) {
+		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+				 MGMT_STATUS_REJECTED);
+		goto unlock;
+	}
+
+	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
+		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+				 MGMT_STATUS_BUSY);
+		goto unlock;
+	}
+
+	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
+	if (!cmd) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+
+	/* We need to flip the bit already here so that update_adv_data
+	 * generates the correct flags.
+	 */
+	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+	hci_req_init(&req, hdev);
+
+	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+		set_bredr_scan(&req);
+
+	/* Since only the advertising data flags will change, there
+	 * is no need to update the scan response data.
+	 */
+	update_adv_data(&req);
+
+	err = hci_req_run(&req, set_bredr_complete);
+	if (err < 0)
+		mgmt_pending_remove(cmd);
+
+unlock:
+	hci_dev_unlock(hdev);
+	return err;
+}
+
 static bool ltk_is_valid(struct mgmt_ltk_info *key)
 {
 	if (key->authenticated != 0x00 && key->authenticated != 0x01)
@@ -3180,6 +4017,12 @@
 	u16 key_count, expected_len;
 	int i, err;
 
+	BT_DBG("request for %s", hdev->name);
+
+	if (!lmp_le_capable(hdev))
+		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+				  MGMT_STATUS_NOT_SUPPORTED);
+
 	key_count = __le16_to_cpu(cp->key_count);
 
 	expected_len = sizeof(*cp) + key_count *
@@ -3208,15 +4051,19 @@
 
 	for (i = 0; i < key_count; i++) {
 		struct mgmt_ltk_info *key = &cp->keys[i];
-		u8 type;
+		u8 type, addr_type;
+
+		if (key->addr.type == BDADDR_LE_PUBLIC)
+			addr_type = ADDR_LE_DEV_PUBLIC;
+		else
+			addr_type = ADDR_LE_DEV_RANDOM;
 
 		if (key->master)
 			type = HCI_SMP_LTK;
 		else
 			type = HCI_SMP_LTK_SLAVE;
 
-		hci_add_ltk(hdev, &key->addr.bdaddr,
-			    bdaddr_to_le(key->addr.type),
+		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
 			    type, 0, key->authenticated, key->val,
 			    key->enc_size, key->ediv, key->rand);
 	}
@@ -3276,6 +4123,10 @@
 	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
 	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
 	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
+	{ set_advertising,        false, MGMT_SETTING_SIZE },
+	{ set_bredr,              false, MGMT_SETTING_SIZE },
+	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
+	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
 };
 
 
@@ -3320,6 +4171,13 @@
 					 MGMT_STATUS_INVALID_INDEX);
 			goto done;
 		}
+
+		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+			err = cmd_status(sk, index, opcode,
+					 MGMT_STATUS_INVALID_INDEX);
+			goto done;
+		}
 	}
 
 	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
@@ -3365,74 +4223,24 @@
 	return err;
 }
 
-static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+void mgmt_index_added(struct hci_dev *hdev)
 {
-	u8 *status = data;
+	if (hdev->dev_type != HCI_BREDR)
+		return;
 
-	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
-	mgmt_pending_remove(cmd);
+	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
 }
 
-int mgmt_index_added(struct hci_dev *hdev)
-{
-	if (!mgmt_valid_hdev(hdev))
-		return -ENOTSUPP;
-
-	return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
-}
-
-int mgmt_index_removed(struct hci_dev *hdev)
+void mgmt_index_removed(struct hci_dev *hdev)
 {
 	u8 status = MGMT_STATUS_INVALID_INDEX;
 
-	if (!mgmt_valid_hdev(hdev))
-		return -ENOTSUPP;
+	if (hdev->dev_type != HCI_BREDR)
+		return;
 
 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
 
-	return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
-}
-
-struct cmd_lookup {
-	struct sock *sk;
-	struct hci_dev *hdev;
-	u8 mgmt_status;
-};
-
-static void settings_rsp(struct pending_cmd *cmd, void *data)
-{
-	struct cmd_lookup *match = data;
-
-	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
-
-	list_del(&cmd->list);
-
-	if (match->sk == NULL) {
-		match->sk = cmd->sk;
-		sock_hold(match->sk);
-	}
-
-	mgmt_pending_free(cmd);
-}
-
-static void set_bredr_scan(struct hci_request *req)
-{
-	struct hci_dev *hdev = req->hdev;
-	u8 scan = 0;
-
-	/* Ensure that fast connectable is disabled. This function will
-	 * not do anything if the page scan parameters are already what
-	 * they should be.
-	 */
-	write_fast_connectable(req, false);
-
-	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-		scan |= SCAN_PAGE;
-	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
-		scan |= SCAN_INQUIRY;
-
-	if (scan)
-		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
 }
 
 static void powered_complete(struct hci_dev *hdev, u8 status)
@@ -3483,13 +4291,33 @@
 				    sizeof(cp), &cp);
 	}
 
+	if (lmp_le_capable(hdev)) {
+		/* Set random address to static address if configured */
+		if (bacmp(&hdev->static_addr, BDADDR_ANY))
+			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+				    &hdev->static_addr);
+
+		/* Make sure the controller has a good default for
+		 * advertising data. This also applies to the case
+		 * where BR/EDR was toggled during the AUTO_OFF phase.
+		 */
+		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+			update_adv_data(&req);
+			update_scan_rsp_data(&req);
+		}
+
+		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+			enable_advertising(&req);
+	}
+
 	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
 			    sizeof(link_sec), &link_sec);
 
 	if (lmp_bredr_capable(hdev)) {
-		set_bredr_scan(&req);
+		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+			set_bredr_scan(&req);
 		update_class(&req);
 		update_name(&req);
 		update_eir(&req);
@@ -3533,76 +4361,110 @@
 	return err;
 }
 
-int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
 {
 	struct pending_cmd *cmd;
 	u8 status;
 
 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
 	if (!cmd)
-		return -ENOENT;
+		return;
 
 	if (err == -ERFKILL)
 		status = MGMT_STATUS_RFKILLED;
 	else
 		status = MGMT_STATUS_FAILED;
 
-	err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
+	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
 
 	mgmt_pending_remove(cmd);
-
-	return err;
 }
 
-int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
+void mgmt_discoverable_timeout(struct hci_dev *hdev)
 {
-	struct cmd_lookup match = { NULL, hdev };
-	bool changed = false;
-	int err = 0;
+	struct hci_request req;
+
+	hci_dev_lock(hdev);
+
+	/* When discoverable timeout triggers, then just make sure
+	 * the limited discoverable flag is cleared. Even in the case
+	 * of a timeout triggered from general discoverable, it is
+	 * safe to unconditionally clear the flag.
+	 */
+	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+
+	hci_req_init(&req, hdev);
+	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+		u8 scan = SCAN_PAGE;
+		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
+			    sizeof(scan), &scan);
+	}
+	update_class(&req);
+	update_adv_data(&req);
+	hci_req_run(&req, NULL);
+
+	hdev->discov_timeout = 0;
+
+	new_settings(hdev, NULL);
+
+	hci_dev_unlock(hdev);
+}
+
+void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
+{
+	bool changed;
+
+	/* Nothing needed here if there's a pending command since that
+	 * commands request completion callback takes care of everything
+	 * necessary.
+	 */
+	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+		return;
 
 	if (discoverable) {
-		if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
-			changed = true;
+		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
 	} else {
-		if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
-			changed = true;
+		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
 	}
 
-	mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
-			     &match);
+	if (changed) {
+		struct hci_request req;
 
-	if (changed)
-		err = new_settings(hdev, match.sk);
+		/* In case this change in discoverable was triggered by
+		 * a disabling of connectable there could be a need to
+		 * update the advertising flags.
+		 */
+		hci_req_init(&req, hdev);
+		update_adv_data(&req);
+		hci_req_run(&req, NULL);
 
-	if (match.sk)
-		sock_put(match.sk);
-
-	return err;
+		new_settings(hdev, NULL);
+	}
 }
 
-int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
+void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
 {
-	struct pending_cmd *cmd;
-	bool changed = false;
-	int err = 0;
+	bool changed;
 
-	if (connectable) {
-		if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-			changed = true;
-	} else {
-		if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-			changed = true;
-	}
+	/* Nothing needed here if there's a pending command since that
+	 * commands request completion callback takes care of everything
+	 * necessary.
+	 */
+	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+		return;
 
-	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+	if (connectable)
+		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+	else
+		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
 
 	if (changed)
-		err = new_settings(hdev, cmd ? cmd->sk : NULL);
-
-	return err;
+		new_settings(hdev, NULL);
 }
 
-int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
 {
 	u8 mgmt_err = mgmt_status(status);
 
@@ -3613,12 +4475,10 @@
 	if (scan & SCAN_INQUIRY)
 		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
 				     cmd_status_rsp, &mgmt_err);
-
-	return 0;
 }
 
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
-		      bool persistent)
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+		       bool persistent)
 {
 	struct mgmt_ev_new_link_key ev;
 
@@ -3631,10 +4491,10 @@
 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
 	ev.key.pin_len = key->pin_len;
 
-	return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
+	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
 {
 	struct mgmt_ev_new_long_term_key ev;
 
@@ -3653,13 +4513,23 @@
 	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
 	memcpy(ev.key.val, key->val, sizeof(key->val));
 
-	return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
-			  NULL);
+	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-			  u8 addr_type, u32 flags, u8 *name, u8 name_len,
-			  u8 *dev_class)
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+				  u8 data_len)
+{
+	eir[eir_len++] = sizeof(type) + data_len;
+	eir[eir_len++] = type;
+	memcpy(&eir[eir_len], data, data_len);
+	eir_len += data_len;
+
+	return eir_len;
+}
+
+void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
+			   u8 *dev_class)
 {
 	char buf[512];
 	struct mgmt_ev_device_connected *ev = (void *) buf;
@@ -3680,8 +4550,8 @@
 
 	ev->eir_len = cpu_to_le16(eir_len);
 
-	return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
-			  sizeof(*ev) + eir_len, NULL);
+	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
+		    sizeof(*ev) + eir_len, NULL);
 }
 
 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -3719,12 +4589,11 @@
 	mgmt_pending_remove(cmd);
 }
 
-int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-			     u8 link_type, u8 addr_type, u8 reason)
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+			      u8 link_type, u8 addr_type, u8 reason)
 {
 	struct mgmt_ev_device_disconnected ev;
 	struct sock *sk = NULL;
-	int err;
 
 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
 
@@ -3732,45 +4601,39 @@
 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
 	ev.reason = reason;
 
-	err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
-			 sk);
+	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
 
 	if (sk)
 		sock_put(sk);
 
 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
 			     hdev);
-
-	return err;
 }
 
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
-			   u8 link_type, u8 addr_type, u8 status)
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+			    u8 link_type, u8 addr_type, u8 status)
 {
 	struct mgmt_rp_disconnect rp;
 	struct pending_cmd *cmd;
-	int err;
 
 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
 			     hdev);
 
 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
 	if (!cmd)
-		return -ENOENT;
+		return;
 
 	bacpy(&rp.addr.bdaddr, bdaddr);
 	rp.addr.type = link_to_bdaddr(link_type, addr_type);
 
-	err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
-			   mgmt_status(status), &rp, sizeof(rp));
+	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+		     mgmt_status(status), &rp, sizeof(rp));
 
 	mgmt_pending_remove(cmd);
-
-	return err;
 }
 
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-			u8 addr_type, u8 status)
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+			 u8 addr_type, u8 status)
 {
 	struct mgmt_ev_connect_failed ev;
 
@@ -3778,10 +4641,10 @@
 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
 	ev.status = mgmt_status(status);
 
-	return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
+	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
 {
 	struct mgmt_ev_pin_code_request ev;
 
@@ -3789,52 +4652,45 @@
 	ev.addr.type = BDADDR_BREDR;
 	ev.secure = secure;
 
-	return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
-			  NULL);
+	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
-				 u8 status)
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+				  u8 status)
 {
 	struct pending_cmd *cmd;
 	struct mgmt_rp_pin_code_reply rp;
-	int err;
 
 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
 	if (!cmd)
-		return -ENOENT;
+		return;
 
 	bacpy(&rp.addr.bdaddr, bdaddr);
 	rp.addr.type = BDADDR_BREDR;
 
-	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
-			   mgmt_status(status), &rp, sizeof(rp));
+	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+		     mgmt_status(status), &rp, sizeof(rp));
 
 	mgmt_pending_remove(cmd);
-
-	return err;
 }
 
-int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
-				     u8 status)
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+				      u8 status)
 {
 	struct pending_cmd *cmd;
 	struct mgmt_rp_pin_code_reply rp;
-	int err;
 
 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
 	if (!cmd)
-		return -ENOENT;
+		return;
 
 	bacpy(&rp.addr.bdaddr, bdaddr);
 	rp.addr.type = BDADDR_BREDR;
 
-	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
-			   mgmt_status(status), &rp, sizeof(rp));
+	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
+		     mgmt_status(status), &rp, sizeof(rp));
 
 	mgmt_pending_remove(cmd);
-
-	return err;
 }
 
 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3936,8 +4792,8 @@
 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-		     u8 addr_type, u8 status)
+void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+		      u8 addr_type, u8 status)
 {
 	struct mgmt_ev_auth_failed ev;
 
@@ -3945,40 +4801,36 @@
 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
 	ev.status = mgmt_status(status);
 
-	return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
+	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
 {
 	struct cmd_lookup match = { NULL, hdev };
-	bool changed = false;
-	int err = 0;
+	bool changed;
 
 	if (status) {
 		u8 mgmt_err = mgmt_status(status);
 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
 				     cmd_status_rsp, &mgmt_err);
-		return 0;
+		return;
 	}
 
-	if (test_bit(HCI_AUTH, &hdev->flags)) {
-		if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
-			changed = true;
-	} else {
-		if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
-			changed = true;
-	}
+	if (test_bit(HCI_AUTH, &hdev->flags))
+		changed = !test_and_set_bit(HCI_LINK_SECURITY,
+					    &hdev->dev_flags);
+	else
+		changed = test_and_clear_bit(HCI_LINK_SECURITY,
+					     &hdev->dev_flags);
 
 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
 			     &match);
 
 	if (changed)
-		err = new_settings(hdev, match.sk);
+		new_settings(hdev, match.sk);
 
 	if (match.sk)
 		sock_put(match.sk);
-
-	return err;
 }
 
 static void clear_eir(struct hci_request *req)
@@ -3996,38 +4848,41 @@
 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 }
 
-int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
 {
 	struct cmd_lookup match = { NULL, hdev };
 	struct hci_request req;
 	bool changed = false;
-	int err = 0;
 
 	if (status) {
 		u8 mgmt_err = mgmt_status(status);
 
 		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
-						 &hdev->dev_flags))
-			err = new_settings(hdev, NULL);
+						 &hdev->dev_flags)) {
+			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+			new_settings(hdev, NULL);
+		}
 
 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
 				     &mgmt_err);
-
-		return err;
+		return;
 	}
 
 	if (enable) {
-		if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
-			changed = true;
+		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
 	} else {
-		if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
-			changed = true;
+		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+		if (!changed)
+			changed = test_and_clear_bit(HCI_HS_ENABLED,
+						     &hdev->dev_flags);
+		else
+			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
 	}
 
 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
 
 	if (changed)
-		err = new_settings(hdev, match.sk);
+		new_settings(hdev, match.sk);
 
 	if (match.sk)
 		sock_put(match.sk);
@@ -4040,8 +4895,6 @@
 		clear_eir(&req);
 
 	hci_req_run(&req, NULL);
-
-	return err;
 }
 
 static void sk_lookup(struct pending_cmd *cmd, void *data)
@@ -4054,33 +4907,30 @@
 	}
 }
 
-int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
-				   u8 status)
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+				    u8 status)
 {
 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
-	int err = 0;
 
 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
 
 	if (!status)
-		err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
-				 3, NULL);
+		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
+			   NULL);
 
 	if (match.sk)
 		sock_put(match.sk);
-
-	return err;
 }
 
-int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
 {
 	struct mgmt_cp_set_local_name ev;
 	struct pending_cmd *cmd;
 
 	if (status)
-		return 0;
+		return;
 
 	memset(&ev, 0, sizeof(ev));
 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
@@ -4094,96 +4944,54 @@
 		 * HCI dev don't send any mgmt signals.
 		 */
 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
-			return 0;
+			return;
 	}
 
-	return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
-			  cmd ? cmd->sk : NULL);
+	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+		   cmd ? cmd->sk : NULL);
 }
 
-int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
-					    u8 *randomizer, u8 status)
+void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+					     u8 *randomizer, u8 status)
 {
 	struct pending_cmd *cmd;
-	int err;
 
 	BT_DBG("%s status %u", hdev->name, status);
 
 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
 	if (!cmd)
-		return -ENOENT;
+		return;
 
 	if (status) {
-		err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-				 mgmt_status(status));
+		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+			   mgmt_status(status));
 	} else {
 		struct mgmt_rp_read_local_oob_data rp;
 
 		memcpy(rp.hash, hash, sizeof(rp.hash));
 		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
 
-		err = cmd_complete(cmd->sk, hdev->id,
-				   MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
-				   sizeof(rp));
+		cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+			     0, &rp, sizeof(rp));
 	}
 
 	mgmt_pending_remove(cmd);
-
-	return err;
 }
 
-int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
-	struct cmd_lookup match = { NULL, hdev };
-	bool changed = false;
-	int err = 0;
-
-	if (status) {
-		u8 mgmt_err = mgmt_status(status);
-
-		if (enable && test_and_clear_bit(HCI_LE_ENABLED,
-						 &hdev->dev_flags))
-			err = new_settings(hdev, NULL);
-
-		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
-				     &mgmt_err);
-
-		return err;
-	}
-
-	if (enable) {
-		if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
-			changed = true;
-	} else {
-		if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
-			changed = true;
-	}
-
-	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
-
-	if (changed)
-		err = new_settings(hdev, match.sk);
-
-	if (match.sk)
-		sock_put(match.sk);
-
-	return err;
-}
-
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-		      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
-		      ssp, u8 *eir, u16 eir_len)
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
+		       ssp, u8 *eir, u16 eir_len)
 {
 	char buf[512];
 	struct mgmt_ev_device_found *ev = (void *) buf;
 	size_t ev_size;
 
 	if (!hci_discovery_active(hdev))
-		return -EPERM;
+		return;
 
 	/* Leave 5 bytes for a potential CoD field */
 	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
-		return -EINVAL;
+		return;
 
 	memset(buf, 0, sizeof(buf));
 
@@ -4205,11 +5013,11 @@
 	ev->eir_len = cpu_to_le16(eir_len);
 	ev_size = sizeof(*ev) + eir_len;
 
-	return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
+	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
 }
 
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-		     u8 addr_type, s8 rssi, u8 *name, u8 name_len)
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
 {
 	struct mgmt_ev_device_found *ev;
 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
@@ -4228,11 +5036,10 @@
 
 	ev->eir_len = cpu_to_le16(eir_len);
 
-	return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
-			  sizeof(*ev) + eir_len, NULL);
+	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
 }
 
-int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
 {
 	struct mgmt_ev_discovering ev;
 	struct pending_cmd *cmd;
@@ -4256,7 +5063,7 @@
 	ev.type = hdev->discovery.type;
 	ev.discovering = discovering;
 
-	return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
+	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
 }
 
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
@@ -4287,5 +5094,35 @@
 			  cmd ? cmd->sk : NULL);
 }
 
-module_param(enable_hs, bool, 0644);
-MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
+static void adv_enable_complete(struct hci_dev *hdev, u8 status)
+{
+	BT_DBG("%s status %u", hdev->name, status);
+
+	/* Clear the advertising mgmt setting if we failed to re-enable it */
+	if (status) {
+		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+		new_settings(hdev, NULL);
+	}
+}
+
+void mgmt_reenable_advertising(struct hci_dev *hdev)
+{
+	struct hci_request req;
+
+	if (hci_conn_num(hdev, LE_LINK) > 0)
+		return;
+
+	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+		return;
+
+	hci_req_init(&req, hdev);
+	enable_advertising(&req);
+
+	/* If this fails we have no option but to let user space know
+	 * that we've disabled advertising.
+	 */
+	if (hci_req_run(&req, adv_enable_complete) < 0) {
+		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+		new_settings(hdev, NULL);
+	}
+}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ca957d3..94d06cb 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -641,13 +641,13 @@
 {
 	struct rfcomm_session *s;
 	struct list_head *p, *n;
-	struct bt_sock *sk;
+	struct l2cap_chan *chan;
 	list_for_each_safe(p, n, &session_list) {
 		s = list_entry(p, struct rfcomm_session, list);
-		sk = bt_sk(s->sock->sk);
+		chan = l2cap_pi(s->sock->sk)->chan;
 
-		if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) &&
-				!bacmp(&sk->dst, dst))
+		if ((!bacmp(src, BDADDR_ANY) || !bacmp(&chan->src, src)) &&
+		    !bacmp(&chan->dst, dst))
 			return s;
 	}
 	return NULL;
@@ -732,11 +732,11 @@
 
 void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst)
 {
-	struct sock *sk = s->sock->sk;
+	struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
 	if (src)
-		bacpy(src, &bt_sk(sk)->src);
+		bacpy(src, &chan->src);
 	if (dst)
-		bacpy(dst, &bt_sk(sk)->dst);
+		bacpy(dst, &chan->dst);
 }
 
 /* ---- RFCOMM frame sending ---- */
@@ -2112,12 +2112,11 @@
 	rfcomm_lock();
 
 	list_for_each_entry(s, &session_list, list) {
+		struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
 		struct rfcomm_dlc *d;
 		list_for_each_entry(d, &s->dlcs, list) {
-			struct sock *sk = s->sock->sk;
-
 			seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n",
-				   &bt_sk(sk)->src, &bt_sk(sk)->dst,
+				   &chan->src, &chan->dst,
 				   d->state, d->dlci, d->mtu,
 				   d->rx_credits, d->tx_credits);
 		}
@@ -2155,13 +2154,6 @@
 		goto unregister;
 	}
 
-	if (bt_debugfs) {
-		rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
-				bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
-		if (!rfcomm_dlc_debugfs)
-			BT_ERR("Failed to create RFCOMM debug file");
-	}
-
 	err = rfcomm_init_ttys();
 	if (err < 0)
 		goto stop;
@@ -2172,6 +2164,13 @@
 
 	BT_INFO("RFCOMM ver %s", VERSION);
 
+	if (IS_ERR_OR_NULL(bt_debugfs))
+		return 0;
+
+	rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
+						 bt_debugfs, NULL,
+						 &rfcomm_dlc_debugfs_fops);
+
 	return 0;
 
 cleanup:
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 30b3721..c4d3d42 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -87,7 +87,8 @@
 		parent->sk_data_ready(parent, 0);
 	} else {
 		if (d->state == BT_CONNECTED)
-			rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
+			rfcomm_session_getaddr(d->session,
+					       &rfcomm_pi(sk)->src, NULL);
 		sk->sk_state_change(sk);
 	}
 
@@ -110,7 +111,7 @@
 
 	sk_for_each(sk, &rfcomm_sk_list.head) {
 		if (rfcomm_pi(sk)->channel == channel &&
-				!bacmp(&bt_sk(sk)->src, src))
+				!bacmp(&rfcomm_pi(sk)->src, src))
 			break;
 	}
 
@@ -132,11 +133,11 @@
 
 		if (rfcomm_pi(sk)->channel == channel) {
 			/* Exact match. */
-			if (!bacmp(&bt_sk(sk)->src, src))
+			if (!bacmp(&rfcomm_pi(sk)->src, src))
 				break;
 
 			/* Closest match */
-			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+			if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY))
 				sk1 = sk;
 		}
 	}
@@ -355,7 +356,7 @@
 		err = -EADDRINUSE;
 	} else {
 		/* Save source address */
-		bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
+		bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
 		rfcomm_pi(sk)->channel = sa->rc_channel;
 		sk->sk_state = BT_BOUND;
 	}
@@ -393,13 +394,14 @@
 	}
 
 	sk->sk_state = BT_CONNECT;
-	bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
+	bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr);
 	rfcomm_pi(sk)->channel = sa->rc_channel;
 
 	d->sec_level = rfcomm_pi(sk)->sec_level;
 	d->role_switch = rfcomm_pi(sk)->role_switch;
 
-	err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
+	err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
+			      sa->rc_channel);
 	if (!err)
 		err = bt_sock_wait_state(sk, BT_CONNECTED,
 				sock_sndtimeo(sk, flags & O_NONBLOCK));
@@ -429,7 +431,7 @@
 	}
 
 	if (!rfcomm_pi(sk)->channel) {
-		bdaddr_t *src = &bt_sk(sk)->src;
+		bdaddr_t *src = &rfcomm_pi(sk)->src;
 		u8 channel;
 
 		err = -EINVAL;
@@ -530,9 +532,9 @@
 	sa->rc_family  = AF_BLUETOOTH;
 	sa->rc_channel = rfcomm_pi(sk)->channel;
 	if (peer)
-		bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
+		bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst);
 	else
-		bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
+		bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src);
 
 	*len = sizeof(struct sockaddr_rc);
 	return 0;
@@ -544,7 +546,7 @@
 	struct sock *sk = sock->sk;
 	struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
 	struct sk_buff *skb;
-	int sent = 0;
+	int sent;
 
 	if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
 		return -ENOTCONN;
@@ -559,6 +561,10 @@
 
 	lock_sock(sk);
 
+	sent = bt_sock_wait_ready(sk, msg->msg_flags);
+	if (sent)
+		goto done;
+
 	while (len) {
 		size_t size = min_t(size_t, len, d->mtu);
 		int err;
@@ -594,6 +600,7 @@
 		len  -= size;
 	}
 
+done:
 	release_sock(sk);
 
 	return sent;
@@ -946,8 +953,8 @@
 	bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
 
 	rfcomm_sock_init(sk, parent);
-	bacpy(&bt_sk(sk)->src, &src);
-	bacpy(&bt_sk(sk)->dst, &dst);
+	bacpy(&rfcomm_pi(sk)->src, &src);
+	bacpy(&rfcomm_pi(sk)->dst, &dst);
 	rfcomm_pi(sk)->channel = channel;
 
 	sk->sk_state = BT_CONFIG;
@@ -974,7 +981,7 @@
 
 	sk_for_each(sk, &rfcomm_sk_list.head) {
 		seq_printf(f, "%pMR %pMR %d %d\n",
-			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
+			   &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst,
 			   sk->sk_state, rfcomm_pi(sk)->channel);
 	}
 
@@ -1044,15 +1051,15 @@
 		goto error;
 	}
 
-	if (bt_debugfs) {
-		rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
-				bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
-		if (!rfcomm_sock_debugfs)
-			BT_ERR("Failed to create RFCOMM debug file");
-	}
-
 	BT_INFO("RFCOMM socket layer initialized");
 
+	if (IS_ERR_OR_NULL(bt_debugfs))
+		return 0;
+
+	rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
+						  bt_debugfs, NULL,
+						  &rfcomm_sock_debugfs_fops);
+
 	return 0;
 
 error:
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 96bd388..12a0e51 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -92,9 +92,6 @@
 	hcon->sco_data = conn;
 	conn->hcon = hcon;
 
-	conn->src = &hdev->bdaddr;
-	conn->dst = &hcon->dst;
-
 	if (hdev->sco_mtu > 0)
 		conn->mtu = hdev->sco_mtu;
 	else
@@ -156,16 +153,14 @@
 
 static int sco_connect(struct sock *sk)
 {
-	bdaddr_t *src = &bt_sk(sk)->src;
-	bdaddr_t *dst = &bt_sk(sk)->dst;
 	struct sco_conn *conn;
 	struct hci_conn *hcon;
 	struct hci_dev  *hdev;
 	int err, type;
 
-	BT_DBG("%pMR -> %pMR", src, dst);
+	BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
 
-	hdev = hci_get_route(dst, src);
+	hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src);
 	if (!hdev)
 		return -EHOSTUNREACH;
 
@@ -182,7 +177,8 @@
 		goto done;
 	}
 
-	hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting);
+	hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
+			       sco_pi(sk)->setting);
 	if (IS_ERR(hcon)) {
 		err = PTR_ERR(hcon);
 		goto done;
@@ -196,7 +192,7 @@
 	}
 
 	/* Update source addr of the socket */
-	bacpy(src, conn->src);
+	bacpy(&sco_pi(sk)->src, &hcon->src);
 
 	err = sco_chan_add(conn, sk, NULL);
 	if (err)
@@ -270,7 +266,7 @@
 		if (sk->sk_state != BT_LISTEN)
 			continue;
 
-		if (!bacmp(&bt_sk(sk)->src, ba))
+		if (!bacmp(&sco_pi(sk)->src, ba))
 			return sk;
 	}
 
@@ -291,11 +287,11 @@
 			continue;
 
 		/* Exact match. */
-		if (!bacmp(&bt_sk(sk)->src, src))
+		if (!bacmp(&sco_pi(sk)->src, src))
 			break;
 
 		/* Closest match */
-		if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+		if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY))
 			sk1 = sk;
 	}
 
@@ -475,7 +471,7 @@
 		goto done;
 	}
 
-	bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+	bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr);
 
 	sk->sk_state = BT_BOUND;
 
@@ -505,7 +501,7 @@
 	lock_sock(sk);
 
 	/* Set destination address and psm */
-	bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+	bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
 
 	err = sco_connect(sk);
 	if (err)
@@ -522,7 +518,7 @@
 static int sco_sock_listen(struct socket *sock, int backlog)
 {
 	struct sock *sk = sock->sk;
-	bdaddr_t *src = &bt_sk(sk)->src;
+	bdaddr_t *src = &sco_pi(sk)->src;
 	int err = 0;
 
 	BT_DBG("sk %p backlog %d", sk, backlog);
@@ -626,9 +622,9 @@
 	*len = sizeof(struct sockaddr_sco);
 
 	if (peer)
-		bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
+		bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst);
 	else
-		bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+		bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src);
 
 	return 0;
 }
@@ -999,7 +995,7 @@
 	} else {
 		sco_conn_lock(conn);
 
-		parent = sco_get_sock_listen(conn->src);
+		parent = sco_get_sock_listen(&conn->hcon->src);
 		if (!parent) {
 			sco_conn_unlock(conn);
 			return;
@@ -1017,8 +1013,8 @@
 
 		sco_sock_init(sk, parent);
 
-		bacpy(&bt_sk(sk)->src, conn->src);
-		bacpy(&bt_sk(sk)->dst, conn->dst);
+		bacpy(&sco_pi(sk)->src, &conn->hcon->src);
+		bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);
 
 		hci_conn_hold(conn->hcon);
 		__sco_chan_add(conn, sk, parent);
@@ -1051,8 +1047,8 @@
 		if (sk->sk_state != BT_LISTEN)
 			continue;
 
-		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
-		    !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+		if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) ||
+		    !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) {
 			lm |= HCI_LM_ACCEPT;
 
 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
@@ -1111,8 +1107,8 @@
 	read_lock(&sco_sk_list.lock);
 
 	sk_for_each(sk, &sco_sk_list.head) {
-		seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
-			   &bt_sk(sk)->dst, sk->sk_state);
+		seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src,
+			   &sco_pi(sk)->dst, sk->sk_state);
 	}
 
 	read_unlock(&sco_sk_list.lock);
@@ -1181,15 +1177,14 @@
 		goto error;
 	}
 
-	if (bt_debugfs) {
-		sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
-						  NULL, &sco_debugfs_fops);
-		if (!sco_debugfs)
-			BT_ERR("Failed to create SCO debug file");
-	}
-
 	BT_INFO("SCO socket layer initialized");
 
+	if (IS_ERR_OR_NULL(bt_debugfs))
+		return 0;
+
+	sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
+					  NULL, &sco_debugfs_fops);
+
 	return 0;
 
 error:
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index b5562ab..85a2796c 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -28,7 +28,8 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
 
 #define SMP_TIMEOUT	msecs_to_jiffies(30000)
 
@@ -85,8 +86,8 @@
 }
 
 static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
-		u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
-		u8 _rat, bdaddr_t *ra, u8 res[16])
+		  u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
+		  u8 _rat, bdaddr_t *ra, u8 res[16])
 {
 	u8 p1[16], p2[16];
 	int err;
@@ -126,8 +127,8 @@
 	return err;
 }
 
-static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16],
-			u8 r1[16], u8 r2[16], u8 _r[16])
+static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16],
+		  u8 r2[16], u8 _r[16])
 {
 	int err;
 
@@ -150,7 +151,7 @@
 }
 
 static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
-						u16 dlen, void *data)
+				     u16 dlen, void *data)
 {
 	struct sk_buff *skb;
 	struct l2cap_hdr *lh;
@@ -213,9 +214,8 @@
 }
 
 static void build_pairing_cmd(struct l2cap_conn *conn,
-				struct smp_cmd_pairing *req,
-				struct smp_cmd_pairing *rsp,
-				__u8 authreq)
+			      struct smp_cmd_pairing *req,
+			      struct smp_cmd_pairing *rsp, __u8 authreq)
 {
 	u8 dist_keys = 0;
 
@@ -249,7 +249,7 @@
 	struct smp_chan *smp = conn->smp_chan;
 
 	if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
-			(max_key_size < SMP_MIN_ENC_KEY_SIZE))
+	    (max_key_size < SMP_MIN_ENC_KEY_SIZE))
 		return SMP_ENC_KEY_SIZE;
 
 	smp->enc_key_size = max_key_size;
@@ -263,15 +263,15 @@
 
 	if (send)
 		smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
-								&reason);
+			     &reason);
 
-	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
-	mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
-			 hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
+	clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
+	mgmt_auth_failed(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type,
+			 HCI_ERROR_AUTH_FAILURE);
 
 	cancel_delayed_work_sync(&conn->security_timer);
 
-	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
+	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
 		smp_chan_destroy(conn);
 }
 
@@ -309,8 +309,8 @@
 	/* If either side has unknown io_caps, use JUST WORKS */
 	/* Otherwise, look up method from the table */
 	if (!(auth & SMP_AUTH_MITM) ||
-			local_io > SMP_IO_KEYBOARD_DISPLAY ||
-			remote_io > SMP_IO_KEYBOARD_DISPLAY)
+	    local_io > SMP_IO_KEYBOARD_DISPLAY ||
+	    remote_io > SMP_IO_KEYBOARD_DISPLAY)
 		method = JUST_WORKS;
 	else
 		method = gen_method[remote_io][local_io];
@@ -354,10 +354,10 @@
 	hci_dev_lock(hcon->hdev);
 
 	if (method == REQ_PASSKEY)
-		ret = mgmt_user_passkey_request(hcon->hdev, conn->dst,
+		ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst,
 						hcon->type, hcon->dst_type);
 	else
-		ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
+		ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst,
 						hcon->type, hcon->dst_type,
 						cpu_to_le32(passkey), 0);
 
@@ -386,12 +386,13 @@
 	smp->tfm = tfm;
 
 	if (conn->hcon->out)
-		ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
-			     conn->src, conn->hcon->dst_type, conn->dst, res);
+		ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+			     conn->hcon->src_type, &conn->hcon->src,
+			     conn->hcon->dst_type, &conn->hcon->dst, res);
 	else
 		ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
-			     conn->hcon->dst_type, conn->dst, 0, conn->src,
-			     res);
+			     conn->hcon->dst_type, &conn->hcon->dst,
+			     conn->hcon->src_type, &conn->hcon->src, res);
 	if (ret) {
 		reason = SMP_UNSPECIFIED;
 		goto error;
@@ -425,11 +426,13 @@
 	BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
 
 	if (hcon->out)
-		ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
-			     conn->src, hcon->dst_type, conn->dst, res);
+		ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+			     hcon->src_type, &hcon->src,
+			     hcon->dst_type, &hcon->dst, res);
 	else
 		ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
-			     hcon->dst_type, conn->dst, 0, conn->src, res);
+			     hcon->dst_type, &hcon->dst,
+			     hcon->src_type, &hcon->src, res);
 	if (ret) {
 		reason = SMP_UNSPECIFIED;
 		goto error;
@@ -477,9 +480,9 @@
 		swap128(key, stk);
 
 		memset(stk + smp->enc_key_size, 0,
-				SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
+		       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
 
-		hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type,
+		hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
 			    HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
 			    ediv, rand);
 	}
@@ -494,7 +497,7 @@
 {
 	struct smp_chan *smp;
 
-	smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
+	smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
 	if (!smp)
 		return NULL;
 
@@ -649,7 +652,7 @@
 	memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
 
 	if ((req->auth_req & SMP_AUTH_BONDING) &&
-			(rsp->auth_req & SMP_AUTH_BONDING))
+	    (rsp->auth_req & SMP_AUTH_BONDING))
 		auth = SMP_AUTH_BONDING;
 
 	auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
@@ -684,7 +687,7 @@
 
 		swap128(smp->prnd, random);
 		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
-								random);
+			     random);
 	} else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) {
 		queue_work(hdev->workqueue, &smp->confirm);
 	} else {
@@ -714,7 +717,7 @@
 	struct smp_ltk *key;
 	struct hci_conn *hcon = conn->hcon;
 
-	key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type);
+	key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type);
 	if (!key)
 		return 0;
 
@@ -728,8 +731,8 @@
 	hcon->enc_key_size = key->enc_size;
 
 	return 1;
-
 }
+
 static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
 {
 	struct smp_cmd_security_req *rp = (void *) skb->data;
@@ -835,9 +838,9 @@
 	skb_pull(skb, sizeof(*rp));
 
 	hci_dev_lock(hdev);
-	authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH);
-	hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
-		    HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size,
+	authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
+	hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK, 1,
+		    authenticated, smp->tk, smp->enc_key_size,
 		    rp->ediv, rp->rand);
 	smp_distribute_keys(conn, 1);
 	hci_dev_unlock(hdev);
@@ -847,16 +850,27 @@
 
 int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
 {
-	__u8 code = skb->data[0];
-	__u8 reason;
+	struct hci_conn *hcon = conn->hcon;
+	__u8 code, reason;
 	int err = 0;
 
-	if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) {
+	if (hcon->type != LE_LINK) {
+		kfree_skb(skb);
+		return 0;
+	}
+
+	if (skb->len < 1) {
+		kfree_skb(skb);
+		return -EILSEQ;
+	}
+
+	if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
 		err = -ENOTSUPP;
 		reason = SMP_PAIRING_NOTSUPP;
 		goto done;
 	}
 
+	code = skb->data[0];
 	skb_pull(skb, sizeof(code));
 
 	/*
@@ -974,7 +988,7 @@
 		smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
 
 		authenticated = hcon->sec_level == BT_SECURITY_HIGH;
-		hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
+		hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
 			    HCI_SMP_LTK_SLAVE, 1, authenticated,
 			    enc.ltk, smp->enc_key_size, ediv, ident.rand);
 
@@ -996,10 +1010,10 @@
 
 		/* Just public address */
 		memset(&addrinfo, 0, sizeof(addrinfo));
-		bacpy(&addrinfo.bdaddr, conn->src);
+		bacpy(&addrinfo.bdaddr, &conn->hcon->src);
 
 		smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
-								&addrinfo);
+			     &addrinfo);
 
 		*keydist &= ~SMP_DIST_ID_KEY;
 	}
diff --git a/include/net/bluetooth/smp.h b/net/bluetooth/smp.h
similarity index 100%
rename from include/net/bluetooth/smp.h
rename to net/bluetooth/smp.h
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e6b7fec..f00cfd2 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -88,11 +88,18 @@
 static int br_dev_init(struct net_device *dev)
 {
 	struct net_bridge *br = netdev_priv(dev);
+	int i;
 
 	br->stats = alloc_percpu(struct br_cpu_netstats);
 	if (!br->stats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct br_cpu_netstats *br_dev_stats;
+		br_dev_stats = per_cpu_ptr(br->stats, i);
+		u64_stats_init(&br_dev_stats->syncp);
+	}
+
 	return 0;
 }
 
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 686284f..4c214b2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -363,7 +363,7 @@
 	skb_reset_mac_header(skb);
 	eth = eth_hdr(skb);
 
-	memcpy(eth->h_source, br->dev->dev_addr, 6);
+	memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
 	eth->h_dest[0] = 1;
 	eth->h_dest[1] = 0;
 	eth->h_dest[2] = 0x5e;
@@ -433,7 +433,7 @@
 	skb_reset_mac_header(skb);
 	eth = eth_hdr(skb);
 
-	memcpy(eth->h_source, br->dev->dev_addr, 6);
+	memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
 	eth->h_proto = htons(ETH_P_IPV6);
 	skb_put(skb, sizeof(*eth));
 
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f877362..80cad2c 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -559,6 +559,8 @@
 	else if (skb->protocol == htons(ETH_P_PPP_SES))
 		nf_bridge->mask |= BRNF_PPPoE;
 
+	/* Must drop socket now because of tproxy. */
+	skb_orphan(skb);
 	return skb->dev;
 }
 
@@ -619,7 +621,7 @@
 
 /* Replicate the checks that IPv6 does on packet reception and pass the packet
  * to ip6tables, which doesn't support NAT, so things are fairly simple. */
-static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
+static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
 					   struct sk_buff *skb,
 					   const struct net_device *in,
 					   const struct net_device *out,
@@ -669,7 +671,8 @@
  * receiving device) to make netfilter happy, the REDIRECT
  * target in particular.  Save the original destination IP
  * address to be able to detect DNAT afterwards. */
-static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
+				      struct sk_buff *skb,
 				      const struct net_device *in,
 				      const struct net_device *out,
 				      int (*okfn)(struct sk_buff *))
@@ -691,7 +694,7 @@
 			return NF_ACCEPT;
 
 		nf_bridge_pull_encap_header_rcsum(skb);
-		return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
+		return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
 	}
 
 	if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -727,7 +730,8 @@
  * took place when the packet entered the bridge), but we
  * register an IPv4 PRE_ROUTING 'sabotage' hook that will
  * prevent this from happening. */
-static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
+				   struct sk_buff *skb,
 				   const struct net_device *in,
 				   const struct net_device *out,
 				   int (*okfn)(struct sk_buff *))
@@ -765,7 +769,8 @@
  * but we are still able to filter on the 'real' indev/outdev
  * because of the physdev module. For ARP, indev and outdev are the
  * bridge ports. */
-static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
+				     struct sk_buff *skb,
 				     const struct net_device *in,
 				     const struct net_device *out,
 				     int (*okfn)(struct sk_buff *))
@@ -818,7 +823,8 @@
 	return NF_STOLEN;
 }
 
-static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
+				      struct sk_buff *skb,
 				      const struct net_device *in,
 				      const struct net_device *out,
 				      int (*okfn)(struct sk_buff *))
@@ -878,7 +884,8 @@
 #endif
 
 /* PF_BRIDGE/POST_ROUTING ********************************************/
-static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
+				       struct sk_buff *skb,
 				       const struct net_device *in,
 				       const struct net_device *out,
 				       int (*okfn)(struct sk_buff *))
@@ -923,7 +930,8 @@
 /* IP/SABOTAGE *****************************************************/
 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
  * for the second time. */
-static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
+static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
+				   struct sk_buff *skb,
 				   const struct net_device *in,
 				   const struct net_device *out,
 				   int (*okfn)(struct sk_buff *))
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2e8244e..229d820 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -343,10 +343,9 @@
 }
 
 /* br_device.c */
-extern void br_dev_setup(struct net_device *dev);
-extern void br_dev_delete(struct net_device *dev, struct list_head *list);
-extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
-			       struct net_device *dev);
+void br_dev_setup(struct net_device *dev);
+void br_dev_delete(struct net_device *dev, struct list_head *list);
+netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
 				       struct sk_buff *skb)
@@ -357,8 +356,8 @@
 		netpoll_send_skb(np, skb);
 }
 
-extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
-extern void br_netpoll_disable(struct net_bridge_port *p);
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
+void br_netpoll_disable(struct net_bridge_port *p);
 #else
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
 				       struct sk_buff *skb)
@@ -376,117 +375,99 @@
 #endif
 
 /* br_fdb.c */
-extern int br_fdb_init(void);
-extern void br_fdb_fini(void);
-extern void br_fdb_flush(struct net_bridge *br);
-extern void br_fdb_changeaddr(struct net_bridge_port *p,
-			      const unsigned char *newaddr);
-extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
-extern void br_fdb_cleanup(unsigned long arg);
-extern void br_fdb_delete_by_port(struct net_bridge *br,
-				  const struct net_bridge_port *p, int do_all);
-extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
-						 const unsigned char *addr,
-						 __u16 vid);
-extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
-extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
-			  unsigned long count, unsigned long off);
-extern int br_fdb_insert(struct net_bridge *br,
-			 struct net_bridge_port *source,
-			 const unsigned char *addr,
-			 u16 vid);
-extern void br_fdb_update(struct net_bridge *br,
-			  struct net_bridge_port *source,
-			  const unsigned char *addr,
-			  u16 vid);
-extern int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
+int br_fdb_init(void);
+void br_fdb_fini(void);
+void br_fdb_flush(struct net_bridge *br);
+void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
+void br_fdb_cleanup(unsigned long arg);
+void br_fdb_delete_by_port(struct net_bridge *br,
+			   const struct net_bridge_port *p, int do_all);
+struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
+					  const unsigned char *addr, __u16 vid);
+int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
+int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
+		   unsigned long off);
+int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+		  const unsigned char *addr, u16 vid);
+void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+		   const unsigned char *addr, u16 vid);
+int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
 
-extern int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
-			 struct net_device *dev,
-			 const unsigned char *addr);
-extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
-		      struct net_device *dev,
-		      const unsigned char *addr,
-		      u16 nlh_flags);
-extern int br_fdb_dump(struct sk_buff *skb,
-		       struct netlink_callback *cb,
-		       struct net_device *dev,
-		       int idx);
+int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+		  struct net_device *dev, const unsigned char *addr);
+int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
+	       const unsigned char *addr, u16 nlh_flags);
+int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+		struct net_device *dev, int idx);
 
 /* br_forward.c */
-extern void br_deliver(const struct net_bridge_port *to,
-		struct sk_buff *skb);
-extern int br_dev_queue_push_xmit(struct sk_buff *skb);
-extern void br_forward(const struct net_bridge_port *to,
+void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
+int br_dev_queue_push_xmit(struct sk_buff *skb);
+void br_forward(const struct net_bridge_port *to,
 		struct sk_buff *skb, struct sk_buff *skb0);
-extern int br_forward_finish(struct sk_buff *skb);
-extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb,
-			     bool unicast);
-extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
-			     struct sk_buff *skb2, bool unicast);
+int br_forward_finish(struct sk_buff *skb);
+void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
+void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+		      struct sk_buff *skb2, bool unicast);
 
 /* br_if.c */
-extern void br_port_carrier_check(struct net_bridge_port *p);
-extern int br_add_bridge(struct net *net, const char *name);
-extern int br_del_bridge(struct net *net, const char *name);
-extern void br_net_exit(struct net *net);
-extern int br_add_if(struct net_bridge *br,
-	      struct net_device *dev);
-extern int br_del_if(struct net_bridge *br,
-	      struct net_device *dev);
-extern int br_min_mtu(const struct net_bridge *br);
-extern netdev_features_t br_features_recompute(struct net_bridge *br,
-	netdev_features_t features);
+void br_port_carrier_check(struct net_bridge_port *p);
+int br_add_bridge(struct net *net, const char *name);
+int br_del_bridge(struct net *net, const char *name);
+void br_net_exit(struct net *net);
+int br_add_if(struct net_bridge *br, struct net_device *dev);
+int br_del_if(struct net_bridge *br, struct net_device *dev);
+int br_min_mtu(const struct net_bridge *br);
+netdev_features_t br_features_recompute(struct net_bridge *br,
+					netdev_features_t features);
 
 /* br_input.c */
-extern int br_handle_frame_finish(struct sk_buff *skb);
-extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
+int br_handle_frame_finish(struct sk_buff *skb);
+rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
 
 /* br_ioctl.c */
-extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
+int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
+			     void __user *arg);
 
 /* br_multicast.c */
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 extern unsigned int br_mdb_rehash_seq;
-extern int br_multicast_rcv(struct net_bridge *br,
-			    struct net_bridge_port *port,
-			    struct sk_buff *skb,
-			    u16 vid);
-extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
-					       struct sk_buff *skb, u16 vid);
-extern void br_multicast_add_port(struct net_bridge_port *port);
-extern void br_multicast_del_port(struct net_bridge_port *port);
-extern void br_multicast_enable_port(struct net_bridge_port *port);
-extern void br_multicast_disable_port(struct net_bridge_port *port);
-extern void br_multicast_init(struct net_bridge *br);
-extern void br_multicast_open(struct net_bridge *br);
-extern void br_multicast_stop(struct net_bridge *br);
-extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
-				 struct sk_buff *skb);
-extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
-				 struct sk_buff *skb, struct sk_buff *skb2);
-extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_port_router(struct net_bridge_port *p,
-					unsigned long val);
-extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
-extern struct net_bridge_mdb_entry *br_mdb_ip_get(
-				struct net_bridge_mdb_htable *mdb,
-				struct br_ip *dst);
-extern struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
-				struct net_bridge_port *port, struct br_ip *group);
-extern void br_multicast_free_pg(struct rcu_head *head);
-extern struct net_bridge_port_group *br_multicast_new_port_group(
-				struct net_bridge_port *port,
-				struct br_ip *group,
-				struct net_bridge_port_group __rcu *next,
-				unsigned char state);
-extern void br_mdb_init(void);
-extern void br_mdb_uninit(void);
-extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-			  struct br_ip *group, int type);
+int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
+		     struct sk_buff *skb, u16 vid);
+struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
+					struct sk_buff *skb, u16 vid);
+void br_multicast_add_port(struct net_bridge_port *port);
+void br_multicast_del_port(struct net_bridge_port *port);
+void br_multicast_enable_port(struct net_bridge_port *port);
+void br_multicast_disable_port(struct net_bridge_port *port);
+void br_multicast_init(struct net_bridge *br);
+void br_multicast_open(struct net_bridge *br);
+void br_multicast_stop(struct net_bridge *br);
+void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
+			  struct sk_buff *skb);
+void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
+			  struct sk_buff *skb, struct sk_buff *skb2);
+int br_multicast_set_router(struct net_bridge *br, unsigned long val);
+int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
+int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
+int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
+struct net_bridge_mdb_entry *
+br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, struct br_ip *dst);
+struct net_bridge_mdb_entry *
+br_multicast_new_group(struct net_bridge *br, struct net_bridge_port *port,
+		       struct br_ip *group);
+void br_multicast_free_pg(struct rcu_head *head);
+struct net_bridge_port_group *
+br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
+			    struct net_bridge_port_group __rcu *next,
+			    unsigned char state);
+void br_mdb_init(void);
+void br_mdb_uninit(void);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+		   struct br_ip *group, int type);
 
 #define mlock_dereference(X, br) \
 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -592,22 +573,21 @@
 
 /* br_vlan.c */
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-extern bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
-			       struct sk_buff *skb, u16 *vid);
-extern bool br_allowed_egress(struct net_bridge *br,
-			      const struct net_port_vlans *v,
-			      const struct sk_buff *skb);
-extern struct sk_buff *br_handle_vlan(struct net_bridge *br,
-				      const struct net_port_vlans *v,
-				      struct sk_buff *skb);
-extern int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
-extern int br_vlan_delete(struct net_bridge *br, u16 vid);
-extern void br_vlan_flush(struct net_bridge *br);
-extern int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
-extern int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
-extern int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
-extern void nbp_vlan_flush(struct net_bridge_port *port);
-extern bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
+bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+			struct sk_buff *skb, u16 *vid);
+bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
+		       const struct sk_buff *skb);
+struct sk_buff *br_handle_vlan(struct net_bridge *br,
+			       const struct net_port_vlans *v,
+			       struct sk_buff *skb);
+int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
+int br_vlan_delete(struct net_bridge *br, u16 vid);
+void br_vlan_flush(struct net_bridge *br);
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
+int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
+int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
+void nbp_vlan_flush(struct net_bridge_port *port);
+bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
 
 static inline struct net_port_vlans *br_get_vlan_info(
 						const struct net_bridge *br)
@@ -726,9 +706,9 @@
 
 /* br_netfilter.c */
 #ifdef CONFIG_BRIDGE_NETFILTER
-extern int br_netfilter_init(void);
-extern void br_netfilter_fini(void);
-extern void br_netfilter_rtable_init(struct net_bridge *);
+int br_netfilter_init(void);
+void br_netfilter_fini(void);
+void br_netfilter_rtable_init(struct net_bridge *);
 #else
 #define br_netfilter_init()	(0)
 #define br_netfilter_fini()	do { } while(0)
@@ -736,43 +716,39 @@
 #endif
 
 /* br_stp.c */
-extern void br_log_state(const struct net_bridge_port *p);
-extern struct net_bridge_port *br_get_port(struct net_bridge *br,
-					   u16 port_no);
-extern void br_init_port(struct net_bridge_port *p);
-extern void br_become_designated_port(struct net_bridge_port *p);
+void br_log_state(const struct net_bridge_port *p);
+struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no);
+void br_init_port(struct net_bridge_port *p);
+void br_become_designated_port(struct net_bridge_port *p);
 
-extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
-extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
-extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
-extern int br_set_max_age(struct net_bridge *br, unsigned long x);
+void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
+int br_set_forward_delay(struct net_bridge *br, unsigned long x);
+int br_set_hello_time(struct net_bridge *br, unsigned long x);
+int br_set_max_age(struct net_bridge *br, unsigned long x);
 
 
 /* br_stp_if.c */
-extern void br_stp_enable_bridge(struct net_bridge *br);
-extern void br_stp_disable_bridge(struct net_bridge *br);
-extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
-extern void br_stp_enable_port(struct net_bridge_port *p);
-extern void br_stp_disable_port(struct net_bridge_port *p);
-extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
-extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
-extern void br_stp_set_bridge_priority(struct net_bridge *br,
-				       u16 newprio);
-extern int br_stp_set_port_priority(struct net_bridge_port *p,
-				    unsigned long newprio);
-extern int br_stp_set_path_cost(struct net_bridge_port *p,
-				unsigned long path_cost);
-extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
+void br_stp_enable_bridge(struct net_bridge *br);
+void br_stp_disable_bridge(struct net_bridge *br);
+void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
+void br_stp_enable_port(struct net_bridge_port *p);
+void br_stp_disable_port(struct net_bridge_port *p);
+bool br_stp_recalculate_bridge_id(struct net_bridge *br);
+void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
+void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio);
+int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio);
+int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost);
+ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
 
 /* br_stp_bpdu.c */
 struct stp_proto;
-extern void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
-		       struct net_device *dev);
+void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+		struct net_device *dev);
 
 /* br_stp_timer.c */
-extern void br_stp_timer_init(struct net_bridge *br);
-extern void br_stp_port_timer_init(struct net_bridge_port *p);
-extern unsigned long br_timer_value(const struct timer_list *timer);
+void br_stp_timer_init(struct net_bridge *br);
+void br_stp_port_timer_init(struct net_bridge_port *p);
+unsigned long br_timer_value(const struct timer_list *timer);
 
 /* br.c */
 #if IS_ENABLED(CONFIG_ATM_LANE)
@@ -781,23 +757,23 @@
 
 /* br_netlink.c */
 extern struct rtnl_link_ops br_link_ops;
-extern int br_netlink_init(void);
-extern void br_netlink_fini(void);
-extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
-extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
-extern int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
-extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-		      struct net_device *dev, u32 filter_mask);
+int br_netlink_init(void);
+void br_netlink_fini(void);
+void br_ifinfo_notify(int event, struct net_bridge_port *port);
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
+	       u32 filter_mask);
 
 #ifdef CONFIG_SYSFS
 /* br_sysfs_if.c */
 extern const struct sysfs_ops brport_sysfs_ops;
-extern int br_sysfs_addif(struct net_bridge_port *p);
-extern int br_sysfs_renameif(struct net_bridge_port *p);
+int br_sysfs_addif(struct net_bridge_port *p);
+int br_sysfs_renameif(struct net_bridge_port *p);
 
 /* br_sysfs_br.c */
-extern int br_sysfs_addbr(struct net_device *dev);
-extern void br_sysfs_delbr(struct net_device *dev);
+int br_sysfs_addbr(struct net_device *dev);
+void br_sysfs_delbr(struct net_device *dev);
 
 #else
 
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 0c0fe36..2fe910c 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -51,19 +51,19 @@
 
 
 /* br_stp.c */
-extern void br_become_root_bridge(struct net_bridge *br);
-extern void br_config_bpdu_generation(struct net_bridge *);
-extern void br_configuration_update(struct net_bridge *);
-extern void br_port_state_selection(struct net_bridge *);
-extern void br_received_config_bpdu(struct net_bridge_port *p,
-				    const struct br_config_bpdu *bpdu);
-extern void br_received_tcn_bpdu(struct net_bridge_port *p);
-extern void br_transmit_config(struct net_bridge_port *p);
-extern void br_transmit_tcn(struct net_bridge *br);
-extern void br_topology_change_detection(struct net_bridge *br);
+void br_become_root_bridge(struct net_bridge *br);
+void br_config_bpdu_generation(struct net_bridge *);
+void br_configuration_update(struct net_bridge *);
+void br_port_state_selection(struct net_bridge *);
+void br_received_config_bpdu(struct net_bridge_port *p,
+			     const struct br_config_bpdu *bpdu);
+void br_received_tcn_bpdu(struct net_bridge_port *p);
+void br_transmit_config(struct net_bridge_port *p);
+void br_transmit_tcn(struct net_bridge *br);
+void br_topology_change_detection(struct net_bridge *br);
 
 /* br_stp_bpdu.c */
-extern void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
-extern void br_send_tcn_bpdu(struct net_bridge_port *);
+void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
+void br_send_tcn_bpdu(struct net_bridge_port *);
 
 #endif
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index a9aff9c..5ca74a0 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -1,6 +1,10 @@
 #
 # Bridge netfilter configuration
 #
+#
+config NF_TABLES_BRIDGE
+	depends on NF_TABLES
+	tristate "Ethernet Bridge nf_tables support"
 
 menuconfig BRIDGE_NF_EBTABLES
 	tristate "Ethernet Bridge tables (ebtables) support"
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 0718699..ea7629f 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -2,6 +2,8 @@
 # Makefile for the netfilter modules for Link Layer filtering on a bridge.
 #
 
+obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
+
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
 
 # tables
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 8b84c58..3fb3c84 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -28,7 +28,7 @@
 	uint32_t cmp[2] = { 0, 0 };
 	int key = ((const unsigned char *)mac)[5];
 
-	memcpy(((char *) cmp) + 2, mac, 6);
+	memcpy(((char *) cmp) + 2, mac, ETH_ALEN);
 	start = wh->table[key];
 	limit = wh->table[key + 1];
 	if (ip) {
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 94b2b70..bb2da7b 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -60,17 +60,21 @@
 };
 
 static unsigned int
-ebt_in_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+	    const struct net_device *in, const struct net_device *out,
+	    int (*okfn)(struct sk_buff *))
 {
-	return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_filter);
+	return ebt_do_table(ops->hooknum, skb, in, out,
+			    dev_net(in)->xt.frame_filter);
 }
 
 static unsigned int
-ebt_out_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+	     const struct net_device *in, const struct net_device *out,
+	     int (*okfn)(struct sk_buff *))
 {
-	return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_filter);
+	return ebt_do_table(ops->hooknum, skb, in, out,
+			    dev_net(out)->xt.frame_filter);
 }
 
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 322555a..bd238f1 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -60,17 +60,21 @@
 };
 
 static unsigned int
-ebt_nat_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in
-   , const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+	   const struct net_device *in, const struct net_device *out,
+	   int (*okfn)(struct sk_buff *))
 {
-	return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_nat);
+	return ebt_do_table(ops->hooknum, skb, in, out,
+			    dev_net(in)->xt.frame_nat);
 }
 
 static unsigned int
-ebt_nat_out(unsigned int hook, struct sk_buff *skb, const struct net_device *in
-   , const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+	    const struct net_device *in, const struct net_device *out,
+	    int (*okfn)(struct sk_buff *))
 {
-	return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_nat);
+	return ebt_do_table(ops->hooknum, skb, in, out,
+			    dev_net(out)->xt.frame_nat);
 }
 
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
new file mode 100644
index 0000000..cf54b22
--- /dev/null
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter_bridge.h>
+#include <net/netfilter/nf_tables.h>
+
+static struct nft_af_info nft_af_bridge __read_mostly = {
+	.family		= NFPROTO_BRIDGE,
+	.nhooks		= NF_BR_NUMHOOKS,
+	.owner		= THIS_MODULE,
+};
+
+static int nf_tables_bridge_init_net(struct net *net)
+{
+	net->nft.bridge = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+	if (net->nft.bridge == NULL)
+		return -ENOMEM;
+
+	memcpy(net->nft.bridge, &nft_af_bridge, sizeof(nft_af_bridge));
+
+	if (nft_register_afinfo(net, net->nft.bridge) < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(net->nft.bridge);
+	return -ENOMEM;
+}
+
+static void nf_tables_bridge_exit_net(struct net *net)
+{
+	nft_unregister_afinfo(net->nft.bridge);
+	kfree(net->nft.bridge);
+}
+
+static struct pernet_operations nf_tables_bridge_net_ops = {
+	.init	= nf_tables_bridge_init_net,
+	.exit	= nf_tables_bridge_exit_net,
+};
+
+static unsigned int
+nft_do_chain_bridge(const struct nf_hook_ops *ops,
+		    struct sk_buff *skb,
+		    const struct net_device *in,
+		    const struct net_device *out,
+		    int (*okfn)(struct sk_buff *))
+{
+	struct nft_pktinfo pkt;
+
+	nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+	return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_bridge = {
+	.family		= NFPROTO_BRIDGE,
+	.name		= "filter",
+	.type		= NFT_CHAIN_T_DEFAULT,
+	.hook_mask	= (1 << NF_BR_LOCAL_IN) |
+			  (1 << NF_BR_FORWARD) |
+			  (1 << NF_BR_LOCAL_OUT),
+	.fn		= {
+		[NF_BR_LOCAL_IN]	= nft_do_chain_bridge,
+		[NF_BR_FORWARD]		= nft_do_chain_bridge,
+		[NF_BR_LOCAL_OUT]	= nft_do_chain_bridge,
+	},
+};
+
+static int __init nf_tables_bridge_init(void)
+{
+	int ret;
+
+	nft_register_chain_type(&filter_bridge);
+	ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
+	if (ret < 0)
+		nft_unregister_chain_type(&filter_bridge);
+
+	return ret;
+}
+
+static void __exit nf_tables_bridge_exit(void)
+{
+	unregister_pernet_subsys(&nf_tables_bridge_net_ops);
+	nft_unregister_chain_type(&filter_bridge);
+}
+
+module_init(nf_tables_bridge_init);
+module_exit(nf_tables_bridge_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_BRIDGE);
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 6493351f..1be0b52 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -203,20 +203,10 @@
 			PKT_ERROR(pkt, "cow failed\n");
 			return -EPROTO;
 		}
-		/*
-		 * Is the SKB non-linear after skb_cow_data()? If so, we are
-		 * going to add data to the last SKB, so we need to adjust
-		 * lengths of the top SKB.
-		 */
-		if (lastskb != skb) {
-			pr_warn("Packet is non-linear\n");
-			skb->len += len;
-			skb->data_len += len;
-		}
 	}
 
 	/* All set to put the last SKB and optionally write data there. */
-	to = skb_put(lastskb, len);
+	to = pskb_put(skb, lastskb, len);
 	if (likely(data))
 		memcpy(to, data, len);
 	return 0;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3ab8dd2..d249874 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -420,7 +420,7 @@
  * @mask: CAN mask (see description)
  * @func: callback function on filter match
  * @data: returned parameter for callback function
- * @ident: string for calling module indentification
+ * @ident: string for calling module identification
  *
  * Description:
  *  Invokes the callback function with the received sk_buff and the given
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 1dccb4c..6de58b4 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -108,9 +108,9 @@
 extern struct dev_rcv_lists can_rx_alldev_list;
 
 /* function prototypes for the CAN networklayer procfs (proc.c) */
-extern void can_init_proc(void);
-extern void can_remove_proc(void);
-extern void can_stat_update(unsigned long data);
+void can_init_proc(void);
+void can_remove_proc(void);
+void can_stat_update(unsigned long data);
 
 /* structures and variables from af_can.c needed in proc.c for reading */
 extern struct timer_list can_stattimer;    /* timer for statistics update */
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index ed7d088..059a3ce 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -23,7 +23,7 @@
 	struct ceph_none_authorizer au;   /* we only need one; it's static */
 };
 
-extern int ceph_auth_none_init(struct ceph_auth_client *ac);
+int ceph_auth_none_init(struct ceph_auth_client *ac);
 
 #endif
 
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index c5a058da..65ee720 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -45,7 +45,7 @@
 	struct ceph_x_authorizer auth_authorizer;
 };
 
-extern int ceph_x_init(struct ceph_auth_client *ac);
+int ceph_x_init(struct ceph_auth_client *ac);
 
 #endif
 
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index 3572dc5..d149822 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -20,34 +20,32 @@
 		kfree(key->key);
 }
 
-extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
-				 const struct ceph_crypto_key *src);
-extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
-				  void **p, void *end);
-extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
-				  void **p, void *end);
-extern int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
+int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+			  const struct ceph_crypto_key *src);
+int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
+int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
+int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
 
 /* crypto.c */
-extern int ceph_decrypt(struct ceph_crypto_key *secret,
-			void *dst, size_t *dst_len,
-			const void *src, size_t src_len);
-extern int ceph_encrypt(struct ceph_crypto_key *secret,
-			void *dst, size_t *dst_len,
-			const void *src, size_t src_len);
-extern int ceph_decrypt2(struct ceph_crypto_key *secret,
-			void *dst1, size_t *dst1_len,
-			void *dst2, size_t *dst2_len,
-			const void *src, size_t src_len);
-extern int ceph_encrypt2(struct ceph_crypto_key *secret,
-			 void *dst, size_t *dst_len,
-			 const void *src1, size_t src1_len,
-			 const void *src2, size_t src2_len);
-extern int ceph_crypto_init(void);
-extern void ceph_crypto_shutdown(void);
+int ceph_decrypt(struct ceph_crypto_key *secret,
+		 void *dst, size_t *dst_len,
+		 const void *src, size_t src_len);
+int ceph_encrypt(struct ceph_crypto_key *secret,
+		 void *dst, size_t *dst_len,
+		 const void *src, size_t src_len);
+int ceph_decrypt2(struct ceph_crypto_key *secret,
+		  void *dst1, size_t *dst1_len,
+		  void *dst2, size_t *dst2_len,
+		  const void *src, size_t src_len);
+int ceph_encrypt2(struct ceph_crypto_key *secret,
+		  void *dst, size_t *dst_len,
+		  const void *src1, size_t src1_len,
+		  const void *src2, size_t src2_len);
+int ceph_crypto_init(void);
+void ceph_crypto_shutdown(void);
 
 /* armor.c */
-extern int ceph_armor(char *dst, const char *src, const char *end);
-extern int ceph_unarmor(char *dst, const char *src, const char *end);
+int ceph_armor(char *dst, const char *src, const char *end);
+int ceph_unarmor(char *dst, const char *src, const char *end);
 
 #endif
diff --git a/net/core/datagram.c b/net/core/datagram.c
index af814e7..a16ed7b 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -577,7 +577,7 @@
 /**
  *	zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
  *	@skb: buffer to copy
- *	@from: io vector to copy to
+ *	@from: io vector to copy from
  *	@offset: offset in the io vector to start copying from
  *	@count: amount of vectors to copy to buffer from
  *
diff --git a/net/core/dev.c b/net/core/dev.c
index 3430b1e..8ffc52e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1203,7 +1203,7 @@
 {
 	if (dev->flags & IFF_UP) {
 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
-		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
 	}
 }
 EXPORT_SYMBOL(netdev_state_change);
@@ -1293,7 +1293,7 @@
 	if (ret < 0)
 		return ret;
 
-	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
 	call_netdevice_notifiers(NETDEV_UP, dev);
 
 	return ret;
@@ -1307,7 +1307,7 @@
 	ASSERT_RTNL();
 	might_sleep();
 
-	list_for_each_entry(dev, head, unreg_list) {
+	list_for_each_entry(dev, head, close_list) {
 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 
 		clear_bit(__LINK_STATE_START, &dev->state);
@@ -1323,7 +1323,7 @@
 
 	dev_deactivate_many(head);
 
-	list_for_each_entry(dev, head, unreg_list) {
+	list_for_each_entry(dev, head, close_list) {
 		const struct net_device_ops *ops = dev->netdev_ops;
 
 		/*
@@ -1351,7 +1351,7 @@
 	/* Temporarily disable netpoll until the interface is down */
 	netpoll_rx_disable(dev);
 
-	list_add(&dev->unreg_list, &single);
+	list_add(&dev->close_list, &single);
 	retval = __dev_close_many(&single);
 	list_del(&single);
 
@@ -1362,21 +1362,20 @@
 static int dev_close_many(struct list_head *head)
 {
 	struct net_device *dev, *tmp;
-	LIST_HEAD(tmp_list);
 
-	list_for_each_entry_safe(dev, tmp, head, unreg_list)
+	/* Remove the devices that don't need to be closed */
+	list_for_each_entry_safe(dev, tmp, head, close_list)
 		if (!(dev->flags & IFF_UP))
-			list_move(&dev->unreg_list, &tmp_list);
+			list_del_init(&dev->close_list);
 
 	__dev_close_many(head);
 
-	list_for_each_entry(dev, head, unreg_list) {
-		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+	list_for_each_entry_safe(dev, tmp, head, close_list) {
+		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
 		call_netdevice_notifiers(NETDEV_DOWN, dev);
+		list_del_init(&dev->close_list);
 	}
 
-	/* rollback_registered_many needs the complete original list */
-	list_splice(&tmp_list, head);
 	return 0;
 }
 
@@ -1397,7 +1396,7 @@
 		/* Block netpoll rx while the interface is going down */
 		netpoll_rx_disable(dev);
 
-		list_add(&dev->unreg_list, &single);
+		list_add(&dev->close_list, &single);
 		dev_close_many(&single);
 		list_del(&single);
 
@@ -2378,6 +2377,8 @@
 	}
 
 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
+	SKB_GSO_CB(skb)->encap_level = 0;
+
 	skb_reset_mac_header(skb);
 	skb_reset_mac_len(skb);
 
@@ -2537,7 +2538,7 @@
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-			struct netdev_queue *txq)
+			struct netdev_queue *txq, void *accel_priv)
 {
 	const struct net_device_ops *ops = dev->netdev_ops;
 	int rc = NETDEV_TX_OK;
@@ -2603,9 +2604,13 @@
 			dev_queue_xmit_nit(skb, dev);
 
 		skb_len = skb->len;
-		rc = ops->ndo_start_xmit(skb, dev);
+		if (accel_priv)
+			rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
+		else
+			rc = ops->ndo_start_xmit(skb, dev);
+
 		trace_net_dev_xmit(skb, rc, dev, skb_len);
-		if (rc == NETDEV_TX_OK)
+		if (rc == NETDEV_TX_OK && txq)
 			txq_trans_update(txq);
 		return rc;
 	}
@@ -2621,7 +2626,10 @@
 			dev_queue_xmit_nit(nskb, dev);
 
 		skb_len = nskb->len;
-		rc = ops->ndo_start_xmit(nskb, dev);
+		if (accel_priv)
+			rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
+		else
+			rc = ops->ndo_start_xmit(nskb, dev);
 		trace_net_dev_xmit(nskb, rc, dev, skb_len);
 		if (unlikely(rc != NETDEV_TX_OK)) {
 			if (rc & ~NETDEV_TX_MASK)
@@ -2646,6 +2654,7 @@
 out:
 	return rc;
 }
+EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
 
 static void qdisc_pkt_len_init(struct sk_buff *skb)
 {
@@ -2853,7 +2862,7 @@
 
 			if (!netif_xmit_stopped(txq)) {
 				__this_cpu_inc(xmit_recursion);
-				rc = dev_hard_start_xmit(skb, dev, txq);
+				rc = dev_hard_start_xmit(skb, dev, txq, NULL);
 				__this_cpu_dec(xmit_recursion);
 				if (dev_xmit_complete(rc)) {
 					HARD_TX_UNLOCK(dev, txq);
@@ -4374,42 +4383,40 @@
 	/* upper master flag, there can only be one master device per list */
 	bool master;
 
-	/* indicates that this dev is our first-level lower/upper device */
-	bool neighbour;
-
 	/* counter for the number of times this device was added to us */
 	u16 ref_nr;
 
+	/* private field for the users */
+	void *private;
+
 	struct list_head list;
 	struct rcu_head rcu;
 };
 
-static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
-						 struct net_device *adj_dev,
-						 bool upper)
+static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
+						     struct net_device *adj_dev,
+						     struct list_head *adj_list)
 {
 	struct netdev_adjacent *adj;
-	struct list_head *dev_list;
 
-	dev_list = upper ? &dev->upper_dev_list : &dev->lower_dev_list;
-
-	list_for_each_entry(adj, dev_list, list) {
+	list_for_each_entry_rcu(adj, adj_list, list) {
 		if (adj->dev == adj_dev)
 			return adj;
 	}
 	return NULL;
 }
 
-static inline struct netdev_adjacent *__netdev_find_upper(struct net_device *dev,
-							  struct net_device *udev)
+static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
+						 struct net_device *adj_dev,
+						 struct list_head *adj_list)
 {
-	return __netdev_find_adj(dev, udev, true);
-}
+	struct netdev_adjacent *adj;
 
-static inline struct netdev_adjacent *__netdev_find_lower(struct net_device *dev,
-							  struct net_device *ldev)
-{
-	return __netdev_find_adj(dev, ldev, false);
+	list_for_each_entry(adj, adj_list, list) {
+		if (adj->dev == adj_dev)
+			return adj;
+	}
+	return NULL;
 }
 
 /**
@@ -4426,7 +4433,7 @@
 {
 	ASSERT_RTNL();
 
-	return __netdev_find_upper(dev, upper_dev);
+	return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
 
@@ -4441,7 +4448,7 @@
 {
 	ASSERT_RTNL();
 
-	return !list_empty(&dev->upper_dev_list);
+	return !list_empty(&dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_any_upper_dev);
 
@@ -4458,10 +4465,10 @@
 
 	ASSERT_RTNL();
 
-	if (list_empty(&dev->upper_dev_list))
+	if (list_empty(&dev->adj_list.upper))
 		return NULL;
 
-	upper = list_first_entry(&dev->upper_dev_list,
+	upper = list_first_entry(&dev->adj_list.upper,
 				 struct netdev_adjacent, list);
 	if (likely(upper->master))
 		return upper->dev;
@@ -4469,15 +4476,26 @@
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get);
 
-/* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+void *netdev_adjacent_get_private(struct list_head *adj_list)
+{
+	struct netdev_adjacent *adj;
+
+	adj = list_entry(adj_list, struct netdev_adjacent, list);
+
+	return adj->private;
+}
+EXPORT_SYMBOL(netdev_adjacent_get_private);
+
+/**
+ * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
  * @iter: list_head ** of the current position
  *
  * Gets the next device from the dev's upper list, starting from iter
  * position. The caller must hold RCU read lock.
  */
-struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
-						 struct list_head **iter)
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+						     struct list_head **iter)
 {
 	struct netdev_adjacent *upper;
 
@@ -4485,14 +4503,71 @@
 
 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
 
-	if (&upper->list == &dev->upper_dev_list)
+	if (&upper->list == &dev->all_adj_list.upper)
 		return NULL;
 
 	*iter = &upper->list;
 
 	return upper->dev;
 }
-EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
+
+/**
+ * netdev_lower_get_next_private - Get the next ->private from the
+ *				   lower neighbour list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold either hold the
+ * RTNL lock or its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next_private(struct net_device *dev,
+				    struct list_head **iter)
+{
+	struct netdev_adjacent *lower;
+
+	lower = list_entry(*iter, struct netdev_adjacent, list);
+
+	if (&lower->list == &dev->adj_list.lower)
+		return NULL;
+
+	if (iter)
+		*iter = lower->list.next;
+
+	return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private);
+
+/**
+ * netdev_lower_get_next_private_rcu - Get the next ->private from the
+ *				       lower neighbour list, RCU
+ *				       variant
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RCU read lock.
+ */
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+					struct list_head **iter)
+{
+	struct netdev_adjacent *lower;
+
+	WARN_ON_ONCE(!rcu_read_lock_held());
+
+	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+	if (&lower->list == &dev->adj_list.lower)
+		return NULL;
+
+	if (iter)
+		*iter = &lower->list;
+
+	return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
 /**
  * netdev_master_upper_dev_get_rcu - Get master upper device
@@ -4505,7 +4580,7 @@
 {
 	struct netdev_adjacent *upper;
 
-	upper = list_first_or_null_rcu(&dev->upper_dev_list,
+	upper = list_first_or_null_rcu(&dev->adj_list.upper,
 				       struct netdev_adjacent, list);
 	if (upper && likely(upper->master))
 		return upper->dev;
@@ -4515,15 +4590,16 @@
 
 static int __netdev_adjacent_dev_insert(struct net_device *dev,
 					struct net_device *adj_dev,
-					bool neighbour, bool master,
-					bool upper)
+					struct list_head *dev_list,
+					void *private, bool master)
 {
 	struct netdev_adjacent *adj;
+	char linkname[IFNAMSIZ+7];
+	int ret;
 
-	adj = __netdev_find_adj(dev, adj_dev, upper);
+	adj = __netdev_find_adj(dev, adj_dev, dev_list);
 
 	if (adj) {
-		BUG_ON(neighbour);
 		adj->ref_nr++;
 		return 0;
 	}
@@ -4534,124 +4610,179 @@
 
 	adj->dev = adj_dev;
 	adj->master = master;
-	adj->neighbour = neighbour;
 	adj->ref_nr = 1;
-
+	adj->private = private;
 	dev_hold(adj_dev);
-	pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
-		 adj_dev->name, upper ? "upper" : "lower", dev->name,
-		 adj_dev->name);
 
-	if (!upper) {
-		list_add_tail_rcu(&adj->list, &dev->lower_dev_list);
-		return 0;
+	pr_debug("dev_hold for %s, because of link added from %s to %s\n",
+		 adj_dev->name, dev->name, adj_dev->name);
+
+	if (dev_list == &dev->adj_list.lower) {
+		sprintf(linkname, "lower_%s", adj_dev->name);
+		ret = sysfs_create_link(&(dev->dev.kobj),
+					&(adj_dev->dev.kobj), linkname);
+		if (ret)
+			goto free_adj;
+	} else if (dev_list == &dev->adj_list.upper) {
+		sprintf(linkname, "upper_%s", adj_dev->name);
+		ret = sysfs_create_link(&(dev->dev.kobj),
+					&(adj_dev->dev.kobj), linkname);
+		if (ret)
+			goto free_adj;
 	}
 
-	/* Ensure that master upper link is always the first item in list. */
-	if (master)
-		list_add_rcu(&adj->list, &dev->upper_dev_list);
-	else
-		list_add_tail_rcu(&adj->list, &dev->upper_dev_list);
+	/* Ensure that master link is always the first item in list. */
+	if (master) {
+		ret = sysfs_create_link(&(dev->dev.kobj),
+					&(adj_dev->dev.kobj), "master");
+		if (ret)
+			goto remove_symlinks;
+
+		list_add_rcu(&adj->list, dev_list);
+	} else {
+		list_add_tail_rcu(&adj->list, dev_list);
+	}
 
 	return 0;
-}
 
-static inline int __netdev_upper_dev_insert(struct net_device *dev,
-					    struct net_device *udev,
-					    bool master, bool neighbour)
-{
-	return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
-					    true);
-}
+remove_symlinks:
+	if (dev_list == &dev->adj_list.lower) {
+		sprintf(linkname, "lower_%s", adj_dev->name);
+		sysfs_remove_link(&(dev->dev.kobj), linkname);
+	} else if (dev_list == &dev->adj_list.upper) {
+		sprintf(linkname, "upper_%s", adj_dev->name);
+		sysfs_remove_link(&(dev->dev.kobj), linkname);
+	}
 
-static inline int __netdev_lower_dev_insert(struct net_device *dev,
-					    struct net_device *ldev,
-					    bool neighbour)
-{
-	return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
-					    false);
+free_adj:
+	kfree(adj);
+	dev_put(adj_dev);
+
+	return ret;
 }
 
 void __netdev_adjacent_dev_remove(struct net_device *dev,
-				  struct net_device *adj_dev, bool upper)
+				  struct net_device *adj_dev,
+				  struct list_head *dev_list)
 {
 	struct netdev_adjacent *adj;
+	char linkname[IFNAMSIZ+7];
 
-	if (upper)
-		adj = __netdev_find_upper(dev, adj_dev);
-	else
-		adj = __netdev_find_lower(dev, adj_dev);
+	adj = __netdev_find_adj(dev, adj_dev, dev_list);
 
-	if (!adj)
+	if (!adj) {
+		pr_err("tried to remove device %s from %s\n",
+		       dev->name, adj_dev->name);
 		BUG();
+	}
 
 	if (adj->ref_nr > 1) {
+		pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
+			 adj->ref_nr-1);
 		adj->ref_nr--;
 		return;
 	}
 
+	if (adj->master)
+		sysfs_remove_link(&(dev->dev.kobj), "master");
+
+	if (dev_list == &dev->adj_list.lower) {
+		sprintf(linkname, "lower_%s", adj_dev->name);
+		sysfs_remove_link(&(dev->dev.kobj), linkname);
+	} else if (dev_list == &dev->adj_list.upper) {
+		sprintf(linkname, "upper_%s", adj_dev->name);
+		sysfs_remove_link(&(dev->dev.kobj), linkname);
+	}
+
 	list_del_rcu(&adj->list);
-	pr_debug("dev_put for %s, because of %s link removed from %s to %s\n",
-		 adj_dev->name, upper ? "upper" : "lower", dev->name,
-		 adj_dev->name);
+	pr_debug("dev_put for %s, because link removed from %s to %s\n",
+		 adj_dev->name, dev->name, adj_dev->name);
 	dev_put(adj_dev);
 	kfree_rcu(adj, rcu);
 }
 
-static inline void __netdev_upper_dev_remove(struct net_device *dev,
-					     struct net_device *udev)
-{
-	return __netdev_adjacent_dev_remove(dev, udev, true);
-}
-
-static inline void __netdev_lower_dev_remove(struct net_device *dev,
-					     struct net_device *ldev)
-{
-	return __netdev_adjacent_dev_remove(dev, ldev, false);
-}
-
-int __netdev_adjacent_dev_insert_link(struct net_device *dev,
-				      struct net_device *upper_dev,
-				      bool master, bool neighbour)
+int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+				     struct net_device *upper_dev,
+				     struct list_head *up_list,
+				     struct list_head *down_list,
+				     void *private, bool master)
 {
 	int ret;
 
-	ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
+	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
+					   master);
 	if (ret)
 		return ret;
 
-	ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
+	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
+					   false);
 	if (ret) {
-		__netdev_upper_dev_remove(dev, upper_dev);
+		__netdev_adjacent_dev_remove(dev, upper_dev, up_list);
 		return ret;
 	}
 
 	return 0;
 }
 
-static inline int __netdev_adjacent_dev_link(struct net_device *dev,
-					     struct net_device *udev)
+int __netdev_adjacent_dev_link(struct net_device *dev,
+			       struct net_device *upper_dev)
 {
-	return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
+	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
+						&dev->all_adj_list.upper,
+						&upper_dev->all_adj_list.lower,
+						NULL, false);
 }
 
-static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
-						       struct net_device *udev,
-						       bool master)
+void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+					struct net_device *upper_dev,
+					struct list_head *up_list,
+					struct list_head *down_list)
 {
-	return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
+	__netdev_adjacent_dev_remove(dev, upper_dev, up_list);
+	__netdev_adjacent_dev_remove(upper_dev, dev, down_list);
 }
 
 void __netdev_adjacent_dev_unlink(struct net_device *dev,
 				  struct net_device *upper_dev)
 {
-	__netdev_upper_dev_remove(dev, upper_dev);
-	__netdev_lower_dev_remove(upper_dev, dev);
+	__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+					   &dev->all_adj_list.upper,
+					   &upper_dev->all_adj_list.lower);
 }
 
+int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+					 struct net_device *upper_dev,
+					 void *private, bool master)
+{
+	int ret = __netdev_adjacent_dev_link(dev, upper_dev);
+
+	if (ret)
+		return ret;
+
+	ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
+					       &dev->adj_list.upper,
+					       &upper_dev->adj_list.lower,
+					       private, master);
+	if (ret) {
+		__netdev_adjacent_dev_unlink(dev, upper_dev);
+		return ret;
+	}
+
+	return 0;
+}
+
+void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+					    struct net_device *upper_dev)
+{
+	__netdev_adjacent_dev_unlink(dev, upper_dev);
+	__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+					   &dev->adj_list.upper,
+					   &upper_dev->adj_list.lower);
+}
 
 static int __netdev_upper_dev_link(struct net_device *dev,
-				   struct net_device *upper_dev, bool master)
+				   struct net_device *upper_dev, bool master,
+				   void *private)
 {
 	struct netdev_adjacent *i, *j, *to_i, *to_j;
 	int ret = 0;
@@ -4662,26 +4793,29 @@
 		return -EBUSY;
 
 	/* To prevent loops, check if dev is not upper device to upper_dev. */
-	if (__netdev_find_upper(upper_dev, dev))
+	if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
 		return -EBUSY;
 
-	if (__netdev_find_upper(dev, upper_dev))
+	if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
 		return -EEXIST;
 
 	if (master && netdev_master_upper_dev_get(dev))
 		return -EBUSY;
 
-	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, master);
+	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
+						   master);
 	if (ret)
 		return ret;
 
 	/* Now that we linked these devs, make all the upper_dev's
-	 * upper_dev_list visible to every dev's lower_dev_list and vice
+	 * all_adj_list.upper visible to every dev's all_adj_list.lower an
 	 * versa, and don't forget the devices itself. All of these
 	 * links are non-neighbours.
 	 */
-	list_for_each_entry(i, &dev->lower_dev_list, list) {
-		list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+	list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+		list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
+			pr_debug("Interlinking %s with %s, non-neighbour\n",
+				 i->dev->name, j->dev->name);
 			ret = __netdev_adjacent_dev_link(i->dev, j->dev);
 			if (ret)
 				goto rollback_mesh;
@@ -4689,14 +4823,18 @@
 	}
 
 	/* add dev to every upper_dev's upper device */
-	list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
+		pr_debug("linking %s's upper device %s with %s\n",
+			 upper_dev->name, i->dev->name, dev->name);
 		ret = __netdev_adjacent_dev_link(dev, i->dev);
 		if (ret)
 			goto rollback_upper_mesh;
 	}
 
 	/* add upper_dev to every dev's lower device */
-	list_for_each_entry(i, &dev->lower_dev_list, list) {
+	list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+		pr_debug("linking %s's lower device %s with %s\n", dev->name,
+			 i->dev->name, upper_dev->name);
 		ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
 		if (ret)
 			goto rollback_lower_mesh;
@@ -4707,7 +4845,7 @@
 
 rollback_lower_mesh:
 	to_i = i;
-	list_for_each_entry(i, &dev->lower_dev_list, list) {
+	list_for_each_entry(i, &dev->all_adj_list.lower, list) {
 		if (i == to_i)
 			break;
 		__netdev_adjacent_dev_unlink(i->dev, upper_dev);
@@ -4717,7 +4855,7 @@
 
 rollback_upper_mesh:
 	to_i = i;
-	list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
 		if (i == to_i)
 			break;
 		__netdev_adjacent_dev_unlink(dev, i->dev);
@@ -4728,8 +4866,8 @@
 rollback_mesh:
 	to_i = i;
 	to_j = j;
-	list_for_each_entry(i, &dev->lower_dev_list, list) {
-		list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+	list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+		list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
 			if (i == to_i && j == to_j)
 				break;
 			__netdev_adjacent_dev_unlink(i->dev, j->dev);
@@ -4738,7 +4876,7 @@
 			break;
 	}
 
-	__netdev_adjacent_dev_unlink(dev, upper_dev);
+	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
 	return ret;
 }
@@ -4756,7 +4894,7 @@
 int netdev_upper_dev_link(struct net_device *dev,
 			  struct net_device *upper_dev)
 {
-	return __netdev_upper_dev_link(dev, upper_dev, false);
+	return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_link);
 
@@ -4774,10 +4912,18 @@
 int netdev_master_upper_dev_link(struct net_device *dev,
 				 struct net_device *upper_dev)
 {
-	return __netdev_upper_dev_link(dev, upper_dev, true);
+	return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_link);
 
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+					 struct net_device *upper_dev,
+					 void *private)
+{
+	return __netdev_upper_dev_link(dev, upper_dev, true, private);
+}
+EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
+
 /**
  * netdev_upper_dev_unlink - Removes a link to upper device
  * @dev: device
@@ -4792,29 +4938,59 @@
 	struct netdev_adjacent *i, *j;
 	ASSERT_RTNL();
 
-	__netdev_adjacent_dev_unlink(dev, upper_dev);
+	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
 	/* Here is the tricky part. We must remove all dev's lower
 	 * devices from all upper_dev's upper devices and vice
 	 * versa, to maintain the graph relationship.
 	 */
-	list_for_each_entry(i, &dev->lower_dev_list, list)
-		list_for_each_entry(j, &upper_dev->upper_dev_list, list)
+	list_for_each_entry(i, &dev->all_adj_list.lower, list)
+		list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
 			__netdev_adjacent_dev_unlink(i->dev, j->dev);
 
 	/* remove also the devices itself from lower/upper device
 	 * list
 	 */
-	list_for_each_entry(i, &dev->lower_dev_list, list)
+	list_for_each_entry(i, &dev->all_adj_list.lower, list)
 		__netdev_adjacent_dev_unlink(i->dev, upper_dev);
 
-	list_for_each_entry(i, &upper_dev->upper_dev_list, list)
+	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
 		__netdev_adjacent_dev_unlink(dev, i->dev);
 
 	call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+				       struct net_device *lower_dev)
+{
+	struct netdev_adjacent *lower;
+
+	if (!lower_dev)
+		return NULL;
+	lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
+	if (!lower)
+		return NULL;
+
+	return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
+
+void *netdev_lower_dev_get_private(struct net_device *dev,
+				   struct net_device *lower_dev)
+{
+	struct netdev_adjacent *lower;
+
+	if (!lower_dev)
+		return NULL;
+	lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
+	if (!lower)
+		return NULL;
+
+	return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
 	const struct net_device_ops *ops = dev->netdev_ops;
@@ -4823,7 +4999,7 @@
 		ops->ndo_change_rx_flags(dev, flags);
 }
 
-static int __dev_set_promiscuity(struct net_device *dev, int inc)
+static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
 {
 	unsigned int old_flags = dev->flags;
 	kuid_t uid;
@@ -4866,6 +5042,8 @@
 
 		dev_change_rx_flags(dev, IFF_PROMISC);
 	}
+	if (notify)
+		__dev_notify_flags(dev, old_flags, IFF_PROMISC);
 	return 0;
 }
 
@@ -4885,7 +5063,7 @@
 	unsigned int old_flags = dev->flags;
 	int err;
 
-	err = __dev_set_promiscuity(dev, inc);
+	err = __dev_set_promiscuity(dev, inc, true);
 	if (err < 0)
 		return err;
 	if (dev->flags != old_flags)
@@ -4894,22 +5072,9 @@
 }
 EXPORT_SYMBOL(dev_set_promiscuity);
 
-/**
- *	dev_set_allmulti	- update allmulti count on a device
- *	@dev: device
- *	@inc: modifier
- *
- *	Add or remove reception of all multicast frames to a device. While the
- *	count in the device remains above zero the interface remains listening
- *	to all interfaces. Once it hits zero the device reverts back to normal
- *	filtering operation. A negative @inc value is used to drop the counter
- *	when releasing a resource needing all multicasts.
- *	Return 0 if successful or a negative errno code on error.
- */
-
-int dev_set_allmulti(struct net_device *dev, int inc)
+static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
 {
-	unsigned int old_flags = dev->flags;
+	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
 
 	ASSERT_RTNL();
 
@@ -4932,9 +5097,30 @@
 	if (dev->flags ^ old_flags) {
 		dev_change_rx_flags(dev, IFF_ALLMULTI);
 		dev_set_rx_mode(dev);
+		if (notify)
+			__dev_notify_flags(dev, old_flags,
+					   dev->gflags ^ old_gflags);
 	}
 	return 0;
 }
+
+/**
+ *	dev_set_allmulti	- update allmulti count on a device
+ *	@dev: device
+ *	@inc: modifier
+ *
+ *	Add or remove reception of all multicast frames to a device. While the
+ *	count in the device remains above zero the interface remains listening
+ *	to all interfaces. Once it hits zero the device reverts back to normal
+ *	filtering operation. A negative @inc value is used to drop the counter
+ *	when releasing a resource needing all multicasts.
+ *	Return 0 if successful or a negative errno code on error.
+ */
+
+int dev_set_allmulti(struct net_device *dev, int inc)
+{
+	return __dev_set_allmulti(dev, inc, true);
+}
 EXPORT_SYMBOL(dev_set_allmulti);
 
 /*
@@ -4959,10 +5145,10 @@
 		 * therefore calling __dev_set_promiscuity here is safe.
 		 */
 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
-			__dev_set_promiscuity(dev, 1);
+			__dev_set_promiscuity(dev, 1, false);
 			dev->uc_promisc = true;
 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
-			__dev_set_promiscuity(dev, -1);
+			__dev_set_promiscuity(dev, -1, false);
 			dev->uc_promisc = false;
 		}
 	}
@@ -5051,9 +5237,13 @@
 
 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
+		unsigned int old_flags = dev->flags;
 
 		dev->gflags ^= IFF_PROMISC;
-		dev_set_promiscuity(dev, inc);
+
+		if (__dev_set_promiscuity(dev, inc, false) >= 0)
+			if (dev->flags != old_flags)
+				dev_set_rx_mode(dev);
 	}
 
 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
@@ -5064,16 +5254,20 @@
 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
 
 		dev->gflags ^= IFF_ALLMULTI;
-		dev_set_allmulti(dev, inc);
+		__dev_set_allmulti(dev, inc, false);
 	}
 
 	return ret;
 }
 
-void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
+void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
+			unsigned int gchanges)
 {
 	unsigned int changes = dev->flags ^ old_flags;
 
+	if (gchanges)
+		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
+
 	if (changes & IFF_UP) {
 		if (dev->flags & IFF_UP)
 			call_netdevice_notifiers(NETDEV_UP, dev);
@@ -5102,17 +5296,14 @@
 int dev_change_flags(struct net_device *dev, unsigned int flags)
 {
 	int ret;
-	unsigned int changes, old_flags = dev->flags;
+	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
 
 	ret = __dev_change_flags(dev, flags);
 	if (ret < 0)
 		return ret;
 
-	changes = old_flags ^ dev->flags;
-	if (changes)
-		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
-
-	__dev_notify_flags(dev, old_flags);
+	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
+	__dev_notify_flags(dev, old_flags, changes);
 	return ret;
 }
 EXPORT_SYMBOL(dev_change_flags);
@@ -5259,6 +5450,7 @@
 static void rollback_registered_many(struct list_head *head)
 {
 	struct net_device *dev, *tmp;
+	LIST_HEAD(close_head);
 
 	BUG_ON(dev_boot_phase);
 	ASSERT_RTNL();
@@ -5281,7 +5473,9 @@
 	}
 
 	/* If device is running, close it first. */
-	dev_close_many(head);
+	list_for_each_entry(dev, head, unreg_list)
+		list_add_tail(&dev->close_list, &close_head);
+	dev_close_many(&close_head);
 
 	list_for_each_entry(dev, head, unreg_list) {
 		/* And unlink it from device chain. */
@@ -5304,7 +5498,7 @@
 
 		if (!dev->rtnl_link_ops ||
 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
-			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
+			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
 
 		/*
 		 *	Flush the unicast and multicast chains
@@ -5703,7 +5897,7 @@
 	 */
 	if (!dev->rtnl_link_ops ||
 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
-		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
 
 out:
 	return ret;
@@ -6010,6 +6204,16 @@
 }
 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
 
+void netdev_freemem(struct net_device *dev)
+{
+	char *addr = (char *)dev - dev->padded;
+
+	if (is_vmalloc_addr(addr))
+		vfree(addr);
+	else
+		kfree(addr);
+}
+
 /**
  *	alloc_netdev_mqs - allocate network device
  *	@sizeof_priv:	size of private data to allocate space for
@@ -6053,7 +6257,9 @@
 	/* ensure 32-byte alignment of whole construct */
 	alloc_size += NETDEV_ALIGN - 1;
 
-	p = kzalloc(alloc_size, GFP_KERNEL);
+	p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+	if (!p)
+		p = vzalloc(alloc_size);
 	if (!p)
 		return NULL;
 
@@ -6062,7 +6268,7 @@
 
 	dev->pcpu_refcnt = alloc_percpu(int);
 	if (!dev->pcpu_refcnt)
-		goto free_p;
+		goto free_dev;
 
 	if (dev_addr_init(dev))
 		goto free_pcpu;
@@ -6077,9 +6283,12 @@
 
 	INIT_LIST_HEAD(&dev->napi_list);
 	INIT_LIST_HEAD(&dev->unreg_list);
+	INIT_LIST_HEAD(&dev->close_list);
 	INIT_LIST_HEAD(&dev->link_watch_list);
-	INIT_LIST_HEAD(&dev->upper_dev_list);
-	INIT_LIST_HEAD(&dev->lower_dev_list);
+	INIT_LIST_HEAD(&dev->adj_list.upper);
+	INIT_LIST_HEAD(&dev->adj_list.lower);
+	INIT_LIST_HEAD(&dev->all_adj_list.upper);
+	INIT_LIST_HEAD(&dev->all_adj_list.lower);
 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
 	setup(dev);
 
@@ -6112,8 +6321,8 @@
 	kfree(dev->_rx);
 #endif
 
-free_p:
-	kfree(p);
+free_dev:
+	netdev_freemem(dev);
 	return NULL;
 }
 EXPORT_SYMBOL(alloc_netdev_mqs);
@@ -6150,7 +6359,7 @@
 
 	/*  Compatibility with error handling in drivers */
 	if (dev->reg_state == NETREG_UNINITIALIZED) {
-		kfree((char *)dev - dev->padded);
+		netdev_freemem(dev);
 		return;
 	}
 
@@ -6312,7 +6521,7 @@
 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
 	rcu_barrier();
 	call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
-	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
+	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
 
 	/*
 	 *	Flush the unicast and multicast chains
@@ -6351,7 +6560,7 @@
 	 *	Prevent userspace races by waiting until the network
 	 *	device is fully setup before sending notifications.
 	 */
-	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
 
 	synchronize_net();
 	err = 0;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 6cda4e2..ec40a84 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -752,7 +752,7 @@
 EXPORT_SYMBOL(dev_mc_del_global);
 
 /**
- *	dev_mc_sync - Synchronize device's unicast list to another device
+ *	dev_mc_sync - Synchronize device's multicast list to another device
  *	@to: destination device
  *	@from: source device
  *
@@ -780,7 +780,7 @@
 EXPORT_SYMBOL(dev_mc_sync);
 
 /**
- *	dev_mc_sync_multiple - Synchronize device's unicast list to another
+ *	dev_mc_sync_multiple - Synchronize device's multicast list to another
  *	device, but allow for multiple calls to sync to multiple devices.
  *	@to: destination device
  *	@from: source device
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 78e9d92..30071de 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -81,6 +81,8 @@
 	[NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
 	[NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
 	[NETIF_F_GSO_GRE_BIT] =		 "tx-gre-segmentation",
+	[NETIF_F_GSO_IPIP_BIT] =	 "tx-ipip-segmentation",
+	[NETIF_F_GSO_SIT_BIT] =		 "tx-sit-segmentation",
 	[NETIF_F_GSO_UDP_TUNNEL_BIT] =	 "tx-udp_tnl-segmentation",
 	[NETIF_F_GSO_MPLS_BIT] =	 "tx-mpls-segmentation",
 
@@ -94,6 +96,7 @@
 	[NETIF_F_LOOPBACK_BIT] =         "loopback",
 	[NETIF_F_RXFCS_BIT] =            "rx-fcs",
 	[NETIF_F_RXALL_BIT] =            "rx-all",
+	[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
 };
 
 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 2e65413..f409e0b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -460,7 +460,8 @@
 		if (frh->action && (frh->action != rule->action))
 			continue;
 
-		if (frh->table && (frh_get_table(frh, tb) != rule->table))
+		if (frh_get_table(frh, tb) &&
+		    (frh_get_table(frh, tb) != rule->table))
 			continue;
 
 		if (tb[FRA_PRIORITY] &&
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 143b6fd..d6ef173 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -25,9 +25,35 @@
 	memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
 }
 
+/**
+ * skb_flow_get_ports - extract the upper layer ports and return them
+ * @skb: buffer to extract the ports from
+ * @thoff: transport header offset
+ * @ip_proto: protocol for which to get port offset
+ *
+ * The function will try to retrieve the ports at offset thoff + poff where poff
+ * is the protocol port offset returned from proto_ports_offset
+ */
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+	int poff = proto_ports_offset(ip_proto);
+
+	if (poff >= 0) {
+		__be32 *ports, _ports;
+
+		ports = skb_header_pointer(skb, thoff + poff,
+					   sizeof(_ports), &_ports);
+		if (ports)
+			return *ports;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(skb_flow_get_ports);
+
 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
 {
-	int poff, nhoff = skb_network_offset(skb);
+	int nhoff = skb_network_offset(skb);
 	u8 ip_proto;
 	__be16 proto = skb->protocol;
 
@@ -42,13 +68,13 @@
 		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
 		if (!iph || iph->ihl < 5)
 			return false;
+		nhoff += iph->ihl * 4;
 
+		ip_proto = iph->protocol;
 		if (ip_is_fragment(iph))
 			ip_proto = 0;
-		else
-			ip_proto = iph->protocol;
+
 		iph_to_flow_copy_addrs(flow, iph);
-		nhoff += iph->ihl * 4;
 		break;
 	}
 	case __constant_htons(ETH_P_IPV6): {
@@ -150,16 +176,7 @@
 	}
 
 	flow->ip_proto = ip_proto;
-	poff = proto_ports_offset(ip_proto);
-	if (poff >= 0) {
-		__be32 *ports, _ports;
-
-		ports = skb_header_pointer(skb, nhoff + poff,
-					   sizeof(_ports), &_ports);
-		if (ports)
-			flow->ports = *ports;
-	}
-
+	flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
 	flow->thoff = (u16) nhoff;
 
 	return true;
@@ -167,6 +184,22 @@
 EXPORT_SYMBOL(skb_flow_dissect);
 
 static u32 hashrnd __read_mostly;
+static __always_inline void __flow_hash_secret_init(void)
+{
+	net_get_random_once(&hashrnd, sizeof(hashrnd));
+}
+
+static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
+{
+	__flow_hash_secret_init();
+	return jhash_3words(a, b, c, hashrnd);
+}
+
+static __always_inline u32 __flow_hash_1word(u32 a)
+{
+	__flow_hash_secret_init();
+	return jhash_1word(a, hashrnd);
+}
 
 /*
  * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
@@ -193,9 +226,9 @@
 		swap(keys.port16[0], keys.port16[1]);
 	}
 
-	hash = jhash_3words((__force u32)keys.dst,
-			    (__force u32)keys.src,
-			    (__force u32)keys.ports, hashrnd);
+	hash = __flow_hash_3words((__force u32)keys.dst,
+				  (__force u32)keys.src,
+				  (__force u32)keys.ports);
 	if (!hash)
 		hash = 1;
 
@@ -231,7 +264,7 @@
 		hash = skb->sk->sk_hash;
 	else
 		hash = (__force u16) skb->protocol;
-	hash = jhash_1word(hash, hashrnd);
+	hash = __flow_hash_1word(hash);
 
 	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
 }
@@ -323,7 +356,7 @@
 				else
 					hash = (__force u16) skb->protocol ^
 					    skb->rxhash;
-				hash = jhash_1word(hash, hashrnd);
+				hash = __flow_hash_1word(hash);
 				queue_index = map->queues[
 				    ((u64)hash * map->len) >> 32];
 			}
@@ -378,11 +411,3 @@
 	skb_set_queue_mapping(skb, queue_index);
 	return netdev_get_tx_queue(dev, queue_index);
 }
-
-static int __init initialize_hashrnd(void)
-{
-	get_random_bytes(&hashrnd, sizeof(hashrnd));
-	return 0;
-}
-
-late_initcall_sync(initialize_hashrnd);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index b77eeec..4cdb7c4 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -100,7 +100,7 @@
 EXPORT_SYMBOL(memcpy_toiovecend);
 
 /*
- *	Copy iovec from kernel. Returns -EFAULT on error.
+ *	Copy iovec to kernel. Returns -EFAULT on error.
  */
 
 int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6072610..ca15f32 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -867,7 +867,7 @@
 static void neigh_probe(struct neighbour *neigh)
 	__releases(neigh->lock)
 {
-	struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
 	/* keep skb alive even if arp_queue overflows */
 	if (skb)
 		skb = skb_copy(skb, GFP_ATOMIC);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 325dee8..f3edf96 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1263,7 +1263,7 @@
 	BUG_ON(dev->reg_state != NETREG_RELEASED);
 
 	kfree(dev->ifalias);
-	kfree((char *)dev - dev->padded);
+	netdev_freemem(dev);
 }
 
 static const void *net_namespace(struct device *d)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index d9cd627..9b7cf6c 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -222,11 +222,10 @@
 			    struct cgroup_taskset *tset)
 {
 	struct task_struct *p;
-	void *v;
+	void *v = (void *)(unsigned long)css->cgroup->id;
 
 	cgroup_taskset_for_each(p, css, tset) {
 		task_lock(p);
-		v = (void *)(unsigned long)task_netprioidx(p);
 		iterate_fd(p->files, 0, update_netprio, v);
 		task_unlock(p);
 	}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2a0e21d..cf67144 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1647,9 +1647,8 @@
 	}
 
 	dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
 
-	__dev_notify_flags(dev, old_flags);
+	__dev_notify_flags(dev, old_flags, ~0U);
 	return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
@@ -1985,14 +1984,15 @@
 	return skb->len;
 }
 
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
+		  gfp_t flags)
 {
 	struct net *net = dev_net(dev);
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
 	size_t if_info_size;
 
-	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
+	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
 	if (skb == NULL)
 		goto errout;
 
@@ -2003,7 +2003,7 @@
 		kfree_skb(skb);
 		goto errout;
 	}
-	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
 	return;
 errout:
 	if (err < 0)
@@ -2717,7 +2717,7 @@
 	case NETDEV_JOIN:
 		break;
 	default:
-		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
 		break;
 	}
 	return NOTIFY_DONE;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 8d9d05e..897da56 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -7,6 +7,7 @@
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
 #include <linux/string.h>
+#include <linux/net.h>
 
 #include <net/secure_seq.h>
 
@@ -15,20 +16,9 @@
 
 static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
 
-static void net_secret_init(void)
+static __always_inline void net_secret_init(void)
 {
-	u32 tmp;
-	int i;
-
-	if (likely(net_secret[0]))
-		return;
-
-	for (i = NET_SECRET_SIZE; i > 0;) {
-		do {
-			get_random_bytes(&tmp, sizeof(tmp));
-		} while (!tmp);
-		cmpxchg(&net_secret[--i], 0, tmp);
-	}
+	net_get_random_once(net_secret, sizeof(net_secret));
 }
 #endif
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d81cff1..8cec1e6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -476,6 +476,18 @@
 }
 EXPORT_SYMBOL(skb_add_rx_frag);
 
+void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
+			  unsigned int truesize)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+	skb_frag_size_add(frag, size);
+	skb->len += size;
+	skb->data_len += size;
+	skb->truesize += truesize;
+}
+EXPORT_SYMBOL(skb_coalesce_rx_frag);
+
 static void skb_drop_list(struct sk_buff **listp)
 {
 	kfree_skb_list(*listp);
@@ -580,9 +592,6 @@
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	nf_conntrack_put(skb->nfct);
 #endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
-	nf_conntrack_put_reasm(skb->nfct_reasm);
-#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	nf_bridge_put(skb->nf_bridge);
 #endif
@@ -903,6 +912,9 @@
 
 static void skb_headers_offset_update(struct sk_buff *skb, int off)
 {
+	/* Only adjust this if it actually is csum_start rather than csum */
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		skb->csum_start += off;
 	/* {transport,network,mac}_header and tail are relative to skb->head */
 	skb->transport_header += off;
 	skb->network_header   += off;
@@ -1036,8 +1048,8 @@
  *	@ntail: room to add at tail
  *	@gfp_mask: allocation priority
  *
- *	Expands (or creates identical copy, if &nhead and &ntail are zero)
- *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
+ *	Expands (or creates identical copy, if @nhead and @ntail are zero)
+ *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
  *	reference count of 1. Returns zero in the case of success or error,
  *	if expansion failed. In the last case, &sk_buff is not changed.
  *
@@ -1109,9 +1121,6 @@
 #endif
 	skb->tail	      += off;
 	skb_headers_offset_update(skb, nhead);
-	/* Only adjust this if it actually is csum_start rather than csum */
-	if (skb->ip_summed == CHECKSUM_PARTIAL)
-		skb->csum_start += nhead;
 	skb->cloned   = 0;
 	skb->hdr_len  = 0;
 	skb->nohdr    = 0;
@@ -1176,7 +1185,6 @@
 					NUMA_NO_NODE);
 	int oldheadroom = skb_headroom(skb);
 	int head_copy_len, head_copy_off;
-	int off;
 
 	if (!n)
 		return NULL;
@@ -1200,11 +1208,7 @@
 
 	copy_skb_header(n, skb);
 
-	off                  = newheadroom - oldheadroom;
-	if (n->ip_summed == CHECKSUM_PARTIAL)
-		n->csum_start += off;
-
-	skb_headers_offset_update(n, off);
+	skb_headers_offset_update(n, newheadroom - oldheadroom);
 
 	return n;
 }
@@ -1257,6 +1261,29 @@
 EXPORT_SYMBOL(skb_pad);
 
 /**
+ *	pskb_put - add data to the tail of a potentially fragmented buffer
+ *	@skb: start of the buffer to use
+ *	@tail: tail fragment of the buffer to use
+ *	@len: amount of data to add
+ *
+ *	This function extends the used data area of the potentially
+ *	fragmented buffer. @tail must be the last fragment of @skb -- or
+ *	@skb itself. If this would exceed the total buffer size the kernel
+ *	will panic. A pointer to the first byte of the extra data is
+ *	returned.
+ */
+
+unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
+{
+	if (tail != skb) {
+		skb->data_len += len;
+		skb->len += len;
+	}
+	return skb_put(tail, len);
+}
+EXPORT_SYMBOL_GPL(pskb_put);
+
+/**
  *	skb_put - add data to a buffer
  *	@skb: buffer to use
  *	@len: amount of data to add
@@ -1933,9 +1960,8 @@
 EXPORT_SYMBOL(skb_store_bits);
 
 /* Checksum skb data. */
-
-__wsum skb_checksum(const struct sk_buff *skb, int offset,
-			  int len, __wsum csum)
+__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
+		      __wsum csum, const struct skb_checksum_ops *ops)
 {
 	int start = skb_headlen(skb);
 	int i, copy = start - offset;
@@ -1946,7 +1972,7 @@
 	if (copy > 0) {
 		if (copy > len)
 			copy = len;
-		csum = csum_partial(skb->data + offset, copy, csum);
+		csum = ops->update(skb->data + offset, copy, csum);
 		if ((len -= copy) == 0)
 			return csum;
 		offset += copy;
@@ -1967,10 +1993,10 @@
 			if (copy > len)
 				copy = len;
 			vaddr = kmap_atomic(skb_frag_page(frag));
-			csum2 = csum_partial(vaddr + frag->page_offset +
-					     offset - start, copy, 0);
+			csum2 = ops->update(vaddr + frag->page_offset +
+					    offset - start, copy, 0);
 			kunmap_atomic(vaddr);
-			csum = csum_block_add(csum, csum2, pos);
+			csum = ops->combine(csum, csum2, pos, copy);
 			if (!(len -= copy))
 				return csum;
 			offset += copy;
@@ -1989,9 +2015,9 @@
 			__wsum csum2;
 			if (copy > len)
 				copy = len;
-			csum2 = skb_checksum(frag_iter, offset - start,
-					     copy, 0);
-			csum = csum_block_add(csum, csum2, pos);
+			csum2 = __skb_checksum(frag_iter, offset - start,
+					       copy, 0, ops);
+			csum = ops->combine(csum, csum2, pos, copy);
 			if ((len -= copy) == 0)
 				return csum;
 			offset += copy;
@@ -2003,6 +2029,18 @@
 
 	return csum;
 }
+EXPORT_SYMBOL(__skb_checksum);
+
+__wsum skb_checksum(const struct sk_buff *skb, int offset,
+		    int len, __wsum csum)
+{
+	const struct skb_checksum_ops ops = {
+		.update  = csum_partial_ext,
+		.combine = csum_block_add_ext,
+	};
+
+	return __skb_checksum(skb, offset, len, csum, &ops);
+}
 EXPORT_SYMBOL(skb_checksum);
 
 /* Both of above in one bottle. */
@@ -2522,14 +2560,14 @@
  * @data: destination pointer for data to be returned
  * @st: state variable
  *
- * Reads a block of skb data at &consumed relative to the
+ * Reads a block of skb data at @consumed relative to the
  * lower offset specified to skb_prepare_seq_read(). Assigns
- * the head of the data block to &data and returns the length
+ * the head of the data block to @data and returns the length
  * of the block or 0 if the end of the skb data or the upper
  * offset has been reached.
  *
  * The caller is not required to consume all of the data
- * returned, i.e. &consumed is typically set to the number
+ * returned, i.e. @consumed is typically set to the number
  * of bytes already consumed and the next call to
  * skb_seq_read() will return the remaining part of the block.
  *
@@ -2837,14 +2875,7 @@
 		__copy_skb_header(nskb, skb);
 		nskb->mac_len = skb->mac_len;
 
-		/* nskb and skb might have different headroom */
-		if (nskb->ip_summed == CHECKSUM_PARTIAL)
-			nskb->csum_start += skb_headroom(nskb) - headroom;
-
-		skb_reset_mac_header(nskb);
-		skb_set_network_header(nskb, skb->mac_len);
-		nskb->transport_header = (nskb->network_header +
-					  skb_network_header_len(skb));
+		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
 
 		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
 						 nskb->data - tnl_hlen,
@@ -2936,32 +2967,30 @@
 
 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
-	struct sk_buff *p = *head;
-	struct sk_buff *nskb;
-	struct skb_shared_info *skbinfo = skb_shinfo(skb);
-	struct skb_shared_info *pinfo = skb_shinfo(p);
-	unsigned int headroom;
-	unsigned int len = skb_gro_len(skb);
+	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
 	unsigned int offset = skb_gro_offset(skb);
 	unsigned int headlen = skb_headlen(skb);
+	struct sk_buff *nskb, *lp, *p = *head;
+	unsigned int len = skb_gro_len(skb);
 	unsigned int delta_truesize;
+	unsigned int headroom;
 
-	if (p->len + len >= 65536)
+	if (unlikely(p->len + len >= 65536))
 		return -E2BIG;
 
-	if (pinfo->frag_list)
-		goto merge;
-	else if (headlen <= offset) {
+	lp = NAPI_GRO_CB(p)->last ?: p;
+	pinfo = skb_shinfo(lp);
+
+	if (headlen <= offset) {
 		skb_frag_t *frag;
 		skb_frag_t *frag2;
 		int i = skbinfo->nr_frags;
 		int nr_frags = pinfo->nr_frags + i;
 
-		offset -= headlen;
-
 		if (nr_frags > MAX_SKB_FRAGS)
-			return -E2BIG;
+			goto merge;
 
+		offset -= headlen;
 		pinfo->nr_frags = nr_frags;
 		skbinfo->nr_frags = 0;
 
@@ -2992,7 +3021,7 @@
 		unsigned int first_offset;
 
 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
-			return -E2BIG;
+			goto merge;
 
 		first_offset = skb->data -
 			       (unsigned char *)page_address(page) +
@@ -3010,7 +3039,10 @@
 		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
 		goto done;
-	} else if (skb_gro_len(p) != pinfo->gso_size)
+	}
+	if (pinfo->frag_list)
+		goto merge;
+	if (skb_gro_len(p) != pinfo->gso_size)
 		return -E2BIG;
 
 	headroom = skb_headroom(p);
@@ -3062,16 +3094,24 @@
 
 	__skb_pull(skb, offset);
 
-	NAPI_GRO_CB(p)->last->next = skb;
+	if (!NAPI_GRO_CB(p)->last)
+		skb_shinfo(p)->frag_list = skb;
+	else
+		NAPI_GRO_CB(p)->last->next = skb;
 	NAPI_GRO_CB(p)->last = skb;
 	skb_header_release(skb);
+	lp = p;
 
 done:
 	NAPI_GRO_CB(p)->count++;
 	p->data_len += len;
 	p->truesize += delta_truesize;
 	p->len += len;
-
+	if (lp != p) {
+		lp->data_len += len;
+		lp->truesize += delta_truesize;
+		lp->len += len;
+	}
 	NAPI_GRO_CB(skb)->same_flow = 1;
 	return 0;
 }
diff --git a/net/core/sock.c b/net/core/sock.c
index 0b39e7a..ab20ed9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -475,12 +475,6 @@
 }
 EXPORT_SYMBOL(sk_receive_skb);
 
-void sk_reset_txq(struct sock *sk)
-{
-	sk_tx_queue_clear(sk);
-}
-EXPORT_SYMBOL(sk_reset_txq);
-
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 {
 	struct dst_entry *dst = __sk_dst_get(sk);
@@ -914,6 +908,13 @@
 		}
 		break;
 #endif
+
+	case SO_MAX_PACING_RATE:
+		sk->sk_max_pacing_rate = val;
+		sk->sk_pacing_rate = min(sk->sk_pacing_rate,
+					 sk->sk_max_pacing_rate);
+		break;
+
 	default:
 		ret = -ENOPROTOOPT;
 		break;
@@ -1177,6 +1178,10 @@
 		break;
 #endif
 
+	case SO_MAX_PACING_RATE:
+		v.val = sk->sk_max_pacing_rate;
+		break;
+
 	default:
 		return -ENOPROTOOPT;
 	}
@@ -1836,7 +1841,17 @@
 /* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
 
-bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+/**
+ * skb_page_frag_refill - check that a page_frag contains enough room
+ * @sz: minimum size of the fragment we want to get
+ * @pfrag: pointer to page_frag
+ * @prio: priority for memory allocation
+ *
+ * Note: While this allocator tries to use high order pages, there is
+ * no guarantee that allocations succeed. Therefore, @sz MUST be
+ * less or equal than PAGE_SIZE.
+ */
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
 {
 	int order;
 
@@ -1845,16 +1860,16 @@
 			pfrag->offset = 0;
 			return true;
 		}
-		if (pfrag->offset < pfrag->size)
+		if (pfrag->offset + sz <= pfrag->size)
 			return true;
 		put_page(pfrag->page);
 	}
 
 	/* We restrict high order allocations to users that can afford to wait */
-	order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
+	order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
 
 	do {
-		gfp_t gfp = sk->sk_allocation;
+		gfp_t gfp = prio;
 
 		if (order)
 			gfp |= __GFP_COMP | __GFP_NOWARN;
@@ -1866,6 +1881,15 @@
 		}
 	} while (--order >= 0);
 
+	return false;
+}
+EXPORT_SYMBOL(skb_page_frag_refill);
+
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+{
+	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
+		return true;
+
 	sk_enter_memory_pressure(sk);
 	sk_stream_moderate_sndbuf(sk);
 	return false;
@@ -2319,6 +2343,7 @@
 	sk->sk_ll_usec		=	sysctl_net_busy_read;
 #endif
 
+	sk->sk_max_pacing_rate = ~0U;
 	sk->sk_pacing_rate = ~0U;
 	/*
 	 * Before updating sk_refcnt, we must commit prior changes to memory
diff --git a/net/core/utils.c b/net/core/utils.c
index aa88e23..2f737bf 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -338,3 +338,52 @@
 				  csum_unfold(*sum)));
 }
 EXPORT_SYMBOL(inet_proto_csum_replace16);
+
+struct __net_random_once_work {
+	struct work_struct work;
+	struct static_key *key;
+};
+
+static void __net_random_once_deferred(struct work_struct *w)
+{
+	struct __net_random_once_work *work =
+		container_of(w, struct __net_random_once_work, work);
+	if (!static_key_enabled(work->key))
+		static_key_slow_inc(work->key);
+	kfree(work);
+}
+
+static void __net_random_once_disable_jump(struct static_key *key)
+{
+	struct __net_random_once_work *w;
+
+	w = kmalloc(sizeof(*w), GFP_ATOMIC);
+	if (!w)
+		return;
+
+	INIT_WORK(&w->work, __net_random_once_deferred);
+	w->key = key;
+	schedule_work(&w->work);
+}
+
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+			   struct static_key *done_key)
+{
+	static DEFINE_SPINLOCK(lock);
+	unsigned long flags;
+
+	spin_lock_irqsave(&lock, flags);
+	if (*done) {
+		spin_unlock_irqrestore(&lock, flags);
+		return false;
+	}
+
+	get_random_bytes(buf, nbytes);
+	*done = true;
+	spin_unlock_irqrestore(&lock, flags);
+
+	__net_random_once_disable_jump(done_key);
+
+	return true;
+}
+EXPORT_SYMBOL(__net_get_random_once);
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index a269aa7..3284bfa 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -101,16 +101,16 @@
 	u8		 avr_ack_nonce:1;
 };
 
-extern int dccp_ackvec_init(void);
-extern void dccp_ackvec_exit(void);
+int dccp_ackvec_init(void);
+void dccp_ackvec_exit(void);
 
-extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
-extern void dccp_ackvec_free(struct dccp_ackvec *av);
+struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
+void dccp_ackvec_free(struct dccp_ackvec *av);
 
-extern void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
-extern int  dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
-extern void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
-extern u16  dccp_ackvec_buflen(const struct dccp_ackvec *av);
+void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
+int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
+void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
+u16 dccp_ackvec_buflen(const struct dccp_ackvec *av);
 
 static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
 {
@@ -133,7 +133,6 @@
 	struct list_head node;
 };
 
-extern int dccp_ackvec_parsed_add(struct list_head *head,
-				  u8 *vec, u8 len, u8 nonce);
-extern void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
+int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce);
+void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
 #endif /* _ACKVEC_H */
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index fb85d37..6eb837a 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -93,8 +93,8 @@
 extern struct ccid_operations ccid3_ops;
 #endif
 
-extern int  ccid_initialize_builtins(void);
-extern void ccid_cleanup_builtins(void);
+int ccid_initialize_builtins(void);
+void ccid_cleanup_builtins(void);
 
 struct ccid {
 	struct ccid_operations *ccid_ops;
@@ -106,12 +106,12 @@
 	return (void *)ccid->ccid_priv;
 }
 
-extern bool ccid_support_check(u8 const *ccid_array, u8 array_len);
-extern int  ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
-extern int  ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
-					  char __user *, int __user *);
+bool ccid_support_check(u8 const *ccid_array, u8 array_len);
+int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
+int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
+				  char __user *, int __user *);
 
-extern struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
+struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
 
 static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp)
 {
@@ -131,8 +131,8 @@
 	return ccid->ccid_ops->ccid_id;
 }
 
-extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
-extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
+void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
+void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
 
 /*
  * Congestion control of queued data packets via CCID decision.
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index d1d2f53..57f631a 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -65,9 +65,9 @@
 
 struct tfrc_rx_hist;
 
-extern int  tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
-				 u32 (*first_li)(struct sock *), struct sock *);
-extern u8   tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
-extern void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
+int tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
+			 u32 (*first_li)(struct sock *), struct sock *);
+u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
+void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
 
 #endif /* _DCCP_LI_HIST_ */
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 7ee4a9d..ee362b0 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -60,8 +60,8 @@
 	return head;
 }
 
-extern int  tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
-extern void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
+int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
+void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
 
 /* Subtraction a-b modulo-16, respects circular wrap-around */
 #define SUB16(a, b) (((a) + 16 - (b)) & 0xF)
@@ -139,20 +139,17 @@
 	return h->loss_count > 0;
 }
 
-extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
-				    const struct sk_buff *skb, const u64 ndp);
+void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, const struct sk_buff *skb,
+			     const u64 ndp);
 
-extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
+int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
 
 struct tfrc_loss_hist;
-extern int  tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
-				struct tfrc_loss_hist *lh,
-				struct sk_buff *skb, const u64 ndp,
-				u32 (*first_li)(struct sock *sk),
-				struct sock *sk);
-extern u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h,
-				   const struct sk_buff *skb);
-extern int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h);
-extern void tfrc_rx_hist_purge(struct tfrc_rx_hist *h);
+int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, struct tfrc_loss_hist *lh,
+			struct sk_buff *skb, const u64 ndp,
+			u32 (*first_li)(struct sock *sk), struct sock *sk);
+u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb);
+int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h);
+void tfrc_rx_hist_purge(struct tfrc_rx_hist *h);
 
 #endif /* _DCCP_PKT_HIST_ */
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index ed698c4..40ee7d6 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -55,21 +55,21 @@
 	return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval;
 }
 
-extern u32  tfrc_calc_x(u16 s, u32 R, u32 p);
-extern u32  tfrc_calc_x_reverse_lookup(u32 fvalue);
-extern u32  tfrc_invert_loss_event_rate(u32 loss_event_rate);
+u32 tfrc_calc_x(u16 s, u32 R, u32 p);
+u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
+u32 tfrc_invert_loss_event_rate(u32 loss_event_rate);
 
-extern int  tfrc_tx_packet_history_init(void);
-extern void tfrc_tx_packet_history_exit(void);
-extern int  tfrc_rx_packet_history_init(void);
-extern void tfrc_rx_packet_history_exit(void);
+int tfrc_tx_packet_history_init(void);
+void tfrc_tx_packet_history_exit(void);
+int tfrc_rx_packet_history_init(void);
+void tfrc_rx_packet_history_exit(void);
 
-extern int  tfrc_li_init(void);
-extern void tfrc_li_exit(void);
+int tfrc_li_init(void);
+void tfrc_li_exit(void);
 
 #ifdef CONFIG_IP_DCCP_TFRC_LIB
-extern int  tfrc_lib_init(void);
-extern void tfrc_lib_exit(void);
+int tfrc_lib_init(void);
+void tfrc_lib_exit(void);
 #else
 #define tfrc_lib_init() (0)
 #define tfrc_lib_exit()
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 708e75b..3094878 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -53,7 +53,7 @@
 
 extern struct percpu_counter dccp_orphan_count;
 
-extern void dccp_time_wait(struct sock *sk, int state, int timeo);
+void dccp_time_wait(struct sock *sk, int state, int timeo);
 
 /*
  *  Set safe upper bounds for header and option length. Since Data Offset is 8
@@ -224,114 +224,108 @@
 	skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
 }
 
-extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 
-extern int  dccp_retransmit_skb(struct sock *sk);
+int dccp_retransmit_skb(struct sock *sk);
 
-extern void dccp_send_ack(struct sock *sk);
-extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
-				struct request_sock *rsk);
+void dccp_send_ack(struct sock *sk);
+void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+			 struct request_sock *rsk);
 
-extern void dccp_send_sync(struct sock *sk, const u64 seq,
-			   const enum dccp_pkt_type pkt_type);
+void dccp_send_sync(struct sock *sk, const u64 seq,
+		    const enum dccp_pkt_type pkt_type);
 
 /*
  * TX Packet Dequeueing Interface
  */
-extern void		dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
-extern bool		dccp_qpolicy_full(struct sock *sk);
-extern void		dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
-extern struct sk_buff	*dccp_qpolicy_top(struct sock *sk);
-extern struct sk_buff	*dccp_qpolicy_pop(struct sock *sk);
-extern bool		dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
+void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
+bool dccp_qpolicy_full(struct sock *sk);
+void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *dccp_qpolicy_top(struct sock *sk);
+struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
+bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
 
 /*
  * TX Packet Output and TX Timers
  */
-extern void   dccp_write_xmit(struct sock *sk);
-extern void   dccp_write_space(struct sock *sk);
-extern void   dccp_flush_write_queue(struct sock *sk, long *time_budget);
+void dccp_write_xmit(struct sock *sk);
+void dccp_write_space(struct sock *sk);
+void dccp_flush_write_queue(struct sock *sk, long *time_budget);
 
-extern void dccp_init_xmit_timers(struct sock *sk);
+void dccp_init_xmit_timers(struct sock *sk);
 static inline void dccp_clear_xmit_timers(struct sock *sk)
 {
 	inet_csk_clear_xmit_timers(sk);
 }
 
-extern unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
 
-extern const char *dccp_packet_name(const int type);
+const char *dccp_packet_name(const int type);
 
-extern void dccp_set_state(struct sock *sk, const int state);
-extern void dccp_done(struct sock *sk);
+void dccp_set_state(struct sock *sk, const int state);
+void dccp_done(struct sock *sk);
 
-extern int  dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
-			    struct sk_buff const *skb);
+int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
+		    struct sk_buff const *skb);
 
-extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 
-extern struct sock *dccp_create_openreq_child(struct sock *sk,
-					      const struct request_sock *req,
-					      const struct sk_buff *skb);
+struct sock *dccp_create_openreq_child(struct sock *sk,
+				       const struct request_sock *req,
+				       const struct sk_buff *skb);
 
-extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 
-extern struct sock *dccp_v4_request_recv_sock(struct sock *sk,
-					      struct sk_buff *skb,
-					      struct request_sock *req,
-					      struct dst_entry *dst);
-extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
-				   struct request_sock *req,
-				   struct request_sock **prev);
+struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
+				       struct request_sock *req,
+				       struct dst_entry *dst);
+struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
+			    struct request_sock *req,
+			    struct request_sock **prev);
 
-extern int dccp_child_process(struct sock *parent, struct sock *child,
-			      struct sk_buff *skb);
-extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-				  struct dccp_hdr *dh, unsigned int len);
-extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-				const struct dccp_hdr *dh, const unsigned int len);
+int dccp_child_process(struct sock *parent, struct sock *child,
+		       struct sk_buff *skb);
+int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+			   struct dccp_hdr *dh, unsigned int len);
+int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
+			 const struct dccp_hdr *dh, const unsigned int len);
 
-extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
-extern void dccp_destroy_sock(struct sock *sk);
+int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
+void dccp_destroy_sock(struct sock *sk);
 
-extern void		dccp_close(struct sock *sk, long timeout);
-extern struct sk_buff	*dccp_make_response(struct sock *sk,
-					    struct dst_entry *dst,
-					    struct request_sock *req);
+void dccp_close(struct sock *sk, long timeout);
+struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
+				   struct request_sock *req);
 
-extern int	   dccp_connect(struct sock *sk);
-extern int	   dccp_disconnect(struct sock *sk, int flags);
-extern int	   dccp_getsockopt(struct sock *sk, int level, int optname,
-				   char __user *optval, int __user *optlen);
-extern int	   dccp_setsockopt(struct sock *sk, int level, int optname,
-				   char __user *optval, unsigned int optlen);
+int dccp_connect(struct sock *sk);
+int dccp_disconnect(struct sock *sk, int flags);
+int dccp_getsockopt(struct sock *sk, int level, int optname,
+		    char __user *optval, int __user *optlen);
+int dccp_setsockopt(struct sock *sk, int level, int optname,
+		    char __user *optval, unsigned int optlen);
 #ifdef CONFIG_COMPAT
-extern int	   compat_dccp_getsockopt(struct sock *sk,
-				int level, int optname,
-				char __user *optval, int __user *optlen);
-extern int	   compat_dccp_setsockopt(struct sock *sk,
-				int level, int optname,
-				char __user *optval, unsigned int optlen);
+int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
+			   char __user *optval, int __user *optlen);
+int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
+			   char __user *optval, unsigned int optlen);
 #endif
-extern int	   dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int	   dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
-				struct msghdr *msg, size_t size);
-extern int	   dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
-				struct msghdr *msg, size_t len, int nonblock,
-				int flags, int *addr_len);
-extern void	   dccp_shutdown(struct sock *sk, int how);
-extern int	   inet_dccp_listen(struct socket *sock, int backlog);
-extern unsigned int dccp_poll(struct file *file, struct socket *sock,
-			     poll_table *wait);
-extern int	   dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
-				   int addr_len);
+int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		 size_t size);
+int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
+		 struct msghdr *msg, size_t len, int nonblock, int flags,
+		 int *addr_len);
+void dccp_shutdown(struct sock *sk, int how);
+int inet_dccp_listen(struct socket *sock, int backlog);
+unsigned int dccp_poll(struct file *file, struct socket *sock,
+		       poll_table *wait);
+int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 
-extern struct sk_buff *dccp_ctl_make_reset(struct sock *sk,
-					   struct sk_buff *skb);
-extern int	   dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
-extern void	   dccp_send_close(struct sock *sk, const int active);
-extern int	   dccp_invalid_packet(struct sk_buff *skb);
-extern u32	   dccp_sample_rtt(struct sock *sk, long delta);
+struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
+int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
+void dccp_send_close(struct sock *sk, const int active);
+int dccp_invalid_packet(struct sk_buff *skb);
+u32 dccp_sample_rtt(struct sock *sk, long delta);
 
 static inline int dccp_bad_service_code(const struct sock *sk,
 					const __be32 service)
@@ -475,25 +469,25 @@
 	return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
 }
 
-extern int  dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
-extern int  dccp_feat_finalise_settings(struct dccp_sock *dp);
-extern int  dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
-extern int  dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
-				  struct sk_buff *skb);
-extern int  dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
-extern void dccp_feat_list_purge(struct list_head *fn_list);
+int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
+int dccp_feat_finalise_settings(struct dccp_sock *dp);
+int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
+int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
+			  struct sk_buff *skb);
+int dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
+void dccp_feat_list_purge(struct list_head *fn_list);
 
-extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
-extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
-extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
-extern u32 dccp_timestamp(void);
-extern void dccp_timestamping_init(void);
-extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
-			      const void *value, unsigned char len);
+int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
+int dccp_insert_options_rsk(struct dccp_request_sock *, struct sk_buff *);
+int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
+u32 dccp_timestamp(void);
+void dccp_timestamping_init(void);
+int dccp_insert_option(struct sk_buff *skb, unsigned char option,
+		       const void *value, unsigned char len);
 
 #ifdef CONFIG_SYSCTL
-extern int dccp_sysctl_init(void);
-extern void dccp_sysctl_exit(void);
+int dccp_sysctl_init(void);
+void dccp_sysctl_exit(void);
 #else
 static inline int dccp_sysctl_init(void)
 {
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index 90b957d..0e75ceb 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -107,13 +107,13 @@
 extern int	     sysctl_dccp_rx_ccid;
 extern int	     sysctl_dccp_tx_ccid;
 
-extern int  dccp_feat_init(struct sock *sk);
-extern void dccp_feat_initialise_sysctls(void);
-extern int  dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
-				  u8 const *list, u8 len);
-extern int  dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
-				    u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
-extern int  dccp_feat_clone_list(struct list_head const *, struct list_head *);
+int dccp_feat_init(struct sock *sk);
+void dccp_feat_initialise_sysctls(void);
+int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
+			  u8 const *list, u8 len);
+int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
+			    u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
+int dccp_feat_clone_list(struct list_head const *, struct list_head *);
 
 /*
  * Encoding variable-length options and their maximum length.
@@ -127,11 +127,11 @@
  */
 #define DCCP_OPTVAL_MAXLEN	6
 
-extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
-extern u64  dccp_decode_value_var(const u8 *bf, const u8 len);
-extern u64  dccp_feat_nn_get(struct sock *sk, u8 feat);
+void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
+u64 dccp_decode_value_var(const u8 *bf, const u8 len);
+u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
 
-extern int  dccp_insert_option_mandatory(struct sk_buff *skb);
-extern int  dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
-			       u8 *val, u8 len, bool repeat_first);
+int dccp_insert_option_mandatory(struct sk_buff *skb);
+int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, u8 *val, u8 len,
+		       bool repeat_first);
 #endif /* _DCCP_FEAT_H */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ebc54fe..d9f65fc 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -174,6 +174,7 @@
 	mtu = dst_mtu(dst);
 
 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
+	    ip_sk_accept_pmtu(sk) &&
 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 		dccp_sync_mss(sk, mtu);
 
@@ -409,9 +410,9 @@
 
 	newinet		   = inet_sk(newsk);
 	ireq		   = inet_rsk(req);
-	newinet->inet_daddr	= ireq->rmt_addr;
-	newinet->inet_rcv_saddr = ireq->loc_addr;
-	newinet->inet_saddr	= ireq->loc_addr;
+	newinet->inet_daddr	= ireq->ir_rmt_addr;
+	newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+	newinet->inet_saddr	= ireq->ir_loc_addr;
 	newinet->inet_opt	= ireq->opt;
 	ireq->opt	   = NULL;
 	newinet->mc_index  = inet_iif(skb);
@@ -516,10 +517,10 @@
 		const struct inet_request_sock *ireq = inet_rsk(req);
 		struct dccp_hdr *dh = dccp_hdr(skb);
 
-		dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
-							      ireq->rmt_addr);
-		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
-					    ireq->rmt_addr,
+		dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
+							      ireq->ir_rmt_addr);
+		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+					    ireq->ir_rmt_addr,
 					    ireq->opt);
 		err = net_xmit_eval(err);
 	}
@@ -641,8 +642,8 @@
 		goto drop_and_free;
 
 	ireq = inet_rsk(req);
-	ireq->loc_addr = ip_hdr(skb)->daddr;
-	ireq->rmt_addr = ip_hdr(skb)->saddr;
+	ireq->ir_loc_addr = ip_hdr(skb)->daddr;
+	ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
 
 	/*
 	 * Step 3: Process LISTEN state
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6cf9f77..4ac71ff 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -67,7 +67,7 @@
 	struct dccp_hdr *dh = dccp_hdr(skb);
 
 	dccp_csum_outgoing(skb);
-	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
+	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
 }
 
 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
@@ -216,7 +216,7 @@
 
 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
 {
-	struct inet6_request_sock *ireq6 = inet6_rsk(req);
+	struct inet_request_sock *ireq = inet_rsk(req);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sk_buff *skb;
 	struct in6_addr *final_p, final;
@@ -226,12 +226,12 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_proto = IPPROTO_DCCP;
-	fl6.daddr = ireq6->rmt_addr;
-	fl6.saddr = ireq6->loc_addr;
+	fl6.daddr = ireq->ir_v6_rmt_addr;
+	fl6.saddr = ireq->ir_v6_loc_addr;
 	fl6.flowlabel = 0;
-	fl6.flowi6_oif = ireq6->iif;
-	fl6.fl6_dport = inet_rsk(req)->rmt_port;
-	fl6.fl6_sport = inet_rsk(req)->loc_port;
+	fl6.flowi6_oif = ireq->ir_iif;
+	fl6.fl6_dport = ireq->ir_rmt_port;
+	fl6.fl6_sport = htons(ireq->ir_num);
 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
 
@@ -249,9 +249,9 @@
 		struct dccp_hdr *dh = dccp_hdr(skb);
 
 		dh->dccph_checksum = dccp_v6_csum_finish(skb,
-							 &ireq6->loc_addr,
-							 &ireq6->rmt_addr);
-		fl6.daddr = ireq6->rmt_addr;
+							 &ireq->ir_v6_loc_addr,
+							 &ireq->ir_v6_rmt_addr);
+		fl6.daddr = ireq->ir_v6_rmt_addr;
 		err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
 		err = net_xmit_eval(err);
 	}
@@ -264,8 +264,7 @@
 static void dccp_v6_reqsk_destructor(struct request_sock *req)
 {
 	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
-	if (inet6_rsk(req)->pktopts != NULL)
-		kfree_skb(inet6_rsk(req)->pktopts);
+	kfree_skb(inet_rsk(req)->pktopts);
 }
 
 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
@@ -359,7 +358,7 @@
 {
 	struct request_sock *req;
 	struct dccp_request_sock *dreq;
-	struct inet6_request_sock *ireq6;
+	struct inet_request_sock *ireq;
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
@@ -398,22 +397,22 @@
 	if (security_inet_conn_request(sk, skb, req))
 		goto drop_and_free;
 
-	ireq6 = inet6_rsk(req);
-	ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
-	ireq6->loc_addr = ipv6_hdr(skb)->daddr;
+	ireq = inet_rsk(req);
+	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
 	if (ipv6_opt_accepted(sk, skb) ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
 		atomic_inc(&skb->users);
-		ireq6->pktopts = skb;
+		ireq->pktopts = skb;
 	}
-	ireq6->iif = sk->sk_bound_dev_if;
+	ireq->ir_iif = sk->sk_bound_dev_if;
 
 	/* So that link locals have meaning */
 	if (!sk->sk_bound_dev_if &&
-	    ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-		ireq6->iif = inet6_iif(skb);
+	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+		ireq->ir_iif = inet6_iif(skb);
 
 	/*
 	 * Step 3: Process LISTEN state
@@ -446,7 +445,7 @@
 					      struct request_sock *req,
 					      struct dst_entry *dst)
 {
-	struct inet6_request_sock *ireq6 = inet6_rsk(req);
+	struct inet_request_sock *ireq = inet_rsk(req);
 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
 	struct inet_sock *newinet;
 	struct dccp6_sock *newdp6;
@@ -467,11 +466,11 @@
 
 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
+		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
 
 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-		newnp->rcv_saddr = newnp->saddr;
+		newsk->sk_v6_rcv_saddr = newnp->saddr;
 
 		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
 		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -505,12 +504,12 @@
 
 		memset(&fl6, 0, sizeof(fl6));
 		fl6.flowi6_proto = IPPROTO_DCCP;
-		fl6.daddr = ireq6->rmt_addr;
+		fl6.daddr = ireq->ir_v6_rmt_addr;
 		final_p = fl6_update_dst(&fl6, np->opt, &final);
-		fl6.saddr = ireq6->loc_addr;
+		fl6.saddr = ireq->ir_v6_loc_addr;
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
-		fl6.fl6_dport = inet_rsk(req)->rmt_port;
-		fl6.fl6_sport = inet_rsk(req)->loc_port;
+		fl6.fl6_dport = ireq->ir_rmt_port;
+		fl6.fl6_sport = htons(ireq->ir_num);
 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
 		dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
@@ -538,10 +537,10 @@
 
 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-	newnp->daddr = ireq6->rmt_addr;
-	newnp->saddr = ireq6->loc_addr;
-	newnp->rcv_saddr = ireq6->loc_addr;
-	newsk->sk_bound_dev_if = ireq6->iif;
+	newsk->sk_v6_daddr	= ireq->ir_v6_rmt_addr;
+	newnp->saddr		= ireq->ir_v6_loc_addr;
+	newsk->sk_v6_rcv_saddr	= ireq->ir_v6_loc_addr;
+	newsk->sk_bound_dev_if	= ireq->ir_iif;
 
 	/* Now IPv6 options...
 
@@ -554,10 +553,10 @@
 
 	/* Clone pktoptions received with SYN */
 	newnp->pktoptions = NULL;
-	if (ireq6->pktopts != NULL) {
-		newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
-		consume_skb(ireq6->pktopts);
-		ireq6->pktopts = NULL;
+	if (ireq->pktopts != NULL) {
+		newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
+		consume_skb(ireq->pktopts);
+		ireq->pktopts = NULL;
 		if (newnp->pktoptions)
 			skb_set_owner_r(newnp->pktoptions, newsk);
 	}
@@ -885,7 +884,7 @@
 			return -EINVAL;
 	}
 
-	np->daddr = usin->sin6_addr;
+	sk->sk_v6_daddr = usin->sin6_addr;
 	np->flow_label = fl6.flowlabel;
 
 	/*
@@ -915,16 +914,16 @@
 			goto failure;
 		}
 		ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
-		ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
+		ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr);
 
 		return err;
 	}
 
-	if (!ipv6_addr_any(&np->rcv_saddr))
-		saddr = &np->rcv_saddr;
+	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+		saddr = &sk->sk_v6_rcv_saddr;
 
 	fl6.flowi6_proto = IPPROTO_DCCP;
-	fl6.daddr = np->daddr;
+	fl6.daddr = sk->sk_v6_daddr;
 	fl6.saddr = saddr ? *saddr : np->saddr;
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
 	fl6.fl6_dport = usin->sin6_port;
@@ -941,7 +940,7 @@
 
 	if (saddr == NULL) {
 		saddr = &fl6.saddr;
-		np->rcv_saddr = *saddr;
+		sk->sk_v6_rcv_saddr = *saddr;
 	}
 
 	/* set the source address */
@@ -963,7 +962,7 @@
 		goto late_failure;
 
 	dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
-						      np->daddr.s6_addr32,
+						      sk->sk_v6_daddr.s6_addr32,
 						      inet->inet_sport,
 						      inet->inet_dport);
 	err = dccp_connect(sk);
diff --git a/net/dccp/ipv6.h b/net/dccp/ipv6.h
index 6eef81f..af259e1 100644
--- a/net/dccp/ipv6.h
+++ b/net/dccp/ipv6.h
@@ -25,12 +25,10 @@
 
 struct dccp6_request_sock {
 	struct dccp_request_sock  dccp;
-	struct inet6_request_sock inet6;
 };
 
 struct dccp6_timewait_sock {
 	struct inet_timewait_sock   inet;
-	struct inet6_timewait_sock  tw6;
 };
 
 #endif /* _DCCP_IPV6_H */
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 662071b..9e2f78b 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -56,12 +56,9 @@
 #if IS_ENABLED(CONFIG_IPV6)
 		if (tw->tw_family == PF_INET6) {
 			const struct ipv6_pinfo *np = inet6_sk(sk);
-			struct inet6_timewait_sock *tw6;
 
-			tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
-			tw6 = inet6_twsk((struct sock *)tw);
-			tw6->tw_v6_daddr = np->daddr;
-			tw6->tw_v6_rcv_saddr = np->rcv_saddr;
+			tw->tw_v6_daddr = sk->sk_v6_daddr;
+			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 			tw->tw_ipv6only = np->ipv6only;
 		}
 #endif
@@ -269,10 +266,10 @@
 {
 	struct dccp_request_sock *dreq = dccp_rsk(req);
 
-	inet_rsk(req)->rmt_port	  = dccp_hdr(skb)->dccph_sport;
-	inet_rsk(req)->loc_port	  = dccp_hdr(skb)->dccph_dport;
-	inet_rsk(req)->acked	  = 0;
-	dreq->dreq_timestamp_echo = 0;
+	inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
+	inet_rsk(req)->ir_num	   = ntohs(dccp_hdr(skb)->dccph_dport);
+	inet_rsk(req)->acked	   = 0;
+	dreq->dreq_timestamp_echo  = 0;
 
 	/* inherit feature negotiation options from listening socket */
 	return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg);
diff --git a/net/dccp/output.c b/net/dccp/output.c
index d17fc90..8876078 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -424,8 +424,8 @@
 	/* Build and checksum header */
 	dh = dccp_zeroed_hdr(skb, dccp_header_size);
 
-	dh->dccph_sport	= inet_rsk(req)->loc_port;
-	dh->dccph_dport	= inet_rsk(req)->rmt_port;
+	dh->dccph_sport	= htons(inet_rsk(req)->ir_num);
+	dh->dccph_dport	= inet_rsk(req)->ir_rmt_port;
 	dh->dccph_doff	= (dccp_header_size +
 			   DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
 	dh->dccph_type	= DCCP_PKT_RESPONSE;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index ba64750..eb892b4 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1158,10 +1158,8 @@
 		goto out_free_bind_bucket_cachep;
 	}
 
-	for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
+	for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
 		INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
-		INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
-	}
 
 	if (inet_ehash_locks_alloc(&dccp_hashinfo))
 			goto out_free_dccp_ehash;
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 2a7efe3..e83015c 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -87,7 +87,7 @@
 }
 
 
-static unsigned int dnrmg_hook(unsigned int hook,
+static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
 			struct sk_buff *skb,
 			const struct net_device *in,
 			const struct net_device *out,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index be1f64d..8f032ba 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -58,7 +58,7 @@
 #include <net/ipv6.h>
 #include <net/ip.h>
 #include <net/dsa.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 __setup("ether=", netdev_boot_setup);
 
@@ -133,7 +133,7 @@
 		return arp_find(eth->h_dest, skb);
 #endif
 	default:
-		printk(KERN_DEBUG
+		netdev_dbg(dev,
 		       "%s: unable to resolve type %X addresses.\n",
 		       dev->name, ntohs(eth->h_proto));
 
@@ -169,20 +169,9 @@
 		else
 			skb->pkt_type = PACKET_MULTICAST;
 	}
-
-	/*
-	 *      This ALLMULTI check should be redundant by 1.4
-	 *      so don't forget to remove it.
-	 *
-	 *      Seems, you forgot to remove it. All silly devices
-	 *      seems to set IFF_PROMISC.
-	 */
-
-	else if (1 /*dev->flags&IFF_PROMISC */ ) {
-		if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
-						      dev->dev_addr)))
-			skb->pkt_type = PACKET_OTHERHOST;
-	}
+	else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+						   dev->dev_addr)))
+		skb->pkt_type = PACKET_OTHERHOST;
 
 	/*
 	 * Some variants of DSA tagging don't have an ethertype field
@@ -190,12 +179,13 @@
 	 * variants has been configured on the receiving interface,
 	 * and if so, set skb->protocol without looking at the packet.
 	 */
-	if (netdev_uses_dsa_tags(dev))
+	if (unlikely(netdev_uses_dsa_tags(dev)))
 		return htons(ETH_P_DSA);
-	if (netdev_uses_trailer_tags(dev))
+
+	if (unlikely(netdev_uses_trailer_tags(dev)))
 		return htons(ETH_P_TRAILER);
 
-	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
+	if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
 		return eth->h_proto;
 
 	/*
@@ -204,7 +194,7 @@
 	 *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
 	 *      won't work for fault tolerant netware but does for the rest.
 	 */
-	if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
+	if (unlikely(skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF))
 		return htons(ETH_P_802_3);
 
 	/*
diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig
new file mode 100644
index 0000000..0d3d709
--- /dev/null
+++ b/net/hsr/Kconfig
@@ -0,0 +1,27 @@
+#
+# IEC 62439-3 High-availability Seamless Redundancy
+#
+
+config HSR
+	tristate "High-availability Seamless Redundancy (HSR)"
+	---help---
+	  If you say Y here, then your Linux box will be able to act as a
+	  DANH ("Doubly attached node implementing HSR"). For this to work,
+	  your Linux box needs (at least) two physical Ethernet interfaces,
+	  and it must be connected as a node in a ring network together with
+	  other HSR capable nodes.
+
+	  All Ethernet frames sent over the hsr device will be sent in both
+	  directions on the ring (over both slave ports), giving a redundant,
+	  instant fail-over network. Each HSR node in the ring acts like a
+	  bridge for HSR frames, but filters frames that have been forwarded
+	  earlier.
+
+	  This code is a "best effort" to comply with the HSR standard as
+	  described in IEC 62439-3:2010 (HSRv0), but no compliancy tests have
+	  been made.
+
+	  You need to perform any and all necessary tests yourself before
+	  relying on this code in a safety critical system!
+
+	  If unsure, say N.
diff --git a/net/hsr/Makefile b/net/hsr/Makefile
new file mode 100644
index 0000000..b68359f
--- /dev/null
+++ b/net/hsr/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for HSR
+#
+
+obj-$(CONFIG_HSR)	+= hsr.o
+
+hsr-y			:= hsr_main.o hsr_framereg.o hsr_device.o hsr_netlink.o
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
new file mode 100644
index 0000000..cac505f
--- /dev/null
+++ b/net/hsr/hsr_device.c
@@ -0,0 +1,596 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *
+ * This file contains device methods for creating, using and destroying
+ * virtual HSR devices.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include "hsr_device.h"
+#include "hsr_framereg.h"
+#include "hsr_main.h"
+
+
+static bool is_admin_up(struct net_device *dev)
+{
+	return dev && (dev->flags & IFF_UP);
+}
+
+static bool is_slave_up(struct net_device *dev)
+{
+	return dev && is_admin_up(dev) && netif_oper_up(dev);
+}
+
+static void __hsr_set_operstate(struct net_device *dev, int transition)
+{
+	write_lock_bh(&dev_base_lock);
+	if (dev->operstate != transition) {
+		dev->operstate = transition;
+		write_unlock_bh(&dev_base_lock);
+		netdev_state_change(dev);
+	} else {
+		write_unlock_bh(&dev_base_lock);
+	}
+}
+
+void hsr_set_operstate(struct net_device *hsr_dev, struct net_device *slave1,
+		       struct net_device *slave2)
+{
+	if (!is_admin_up(hsr_dev)) {
+		__hsr_set_operstate(hsr_dev, IF_OPER_DOWN);
+		return;
+	}
+
+	if (is_slave_up(slave1) || is_slave_up(slave2))
+		__hsr_set_operstate(hsr_dev, IF_OPER_UP);
+	else
+		__hsr_set_operstate(hsr_dev, IF_OPER_LOWERLAYERDOWN);
+}
+
+void hsr_set_carrier(struct net_device *hsr_dev, struct net_device *slave1,
+		     struct net_device *slave2)
+{
+	if (is_slave_up(slave1) || is_slave_up(slave2))
+		netif_carrier_on(hsr_dev);
+	else
+		netif_carrier_off(hsr_dev);
+}
+
+
+void hsr_check_announce(struct net_device *hsr_dev, int old_operstate)
+{
+	struct hsr_priv *hsr_priv;
+
+	hsr_priv = netdev_priv(hsr_dev);
+
+	if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) {
+		/* Went up */
+		hsr_priv->announce_count = 0;
+		hsr_priv->announce_timer.expires = jiffies +
+				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+		add_timer(&hsr_priv->announce_timer);
+	}
+
+	if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
+		/* Went down */
+		del_timer(&hsr_priv->announce_timer);
+}
+
+
+int hsr_get_max_mtu(struct hsr_priv *hsr_priv)
+{
+	int mtu_max;
+
+	if (hsr_priv->slave[0] && hsr_priv->slave[1])
+		mtu_max = min(hsr_priv->slave[0]->mtu, hsr_priv->slave[1]->mtu);
+	else if (hsr_priv->slave[0])
+		mtu_max = hsr_priv->slave[0]->mtu;
+	else if (hsr_priv->slave[1])
+		mtu_max = hsr_priv->slave[1]->mtu;
+	else
+		mtu_max = HSR_TAGLEN;
+
+	return mtu_max - HSR_TAGLEN;
+}
+
+static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct hsr_priv *hsr_priv;
+
+	hsr_priv = netdev_priv(dev);
+
+	if (new_mtu > hsr_get_max_mtu(hsr_priv)) {
+		netdev_info(hsr_priv->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
+			    HSR_TAGLEN);
+		return -EINVAL;
+	}
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static int hsr_dev_open(struct net_device *dev)
+{
+	struct hsr_priv *hsr_priv;
+	int i;
+	char *slave_name;
+
+	hsr_priv = netdev_priv(dev);
+
+	for (i = 0; i < HSR_MAX_SLAVE; i++) {
+		if (hsr_priv->slave[i])
+			slave_name = hsr_priv->slave[i]->name;
+		else
+			slave_name = "null";
+
+		if (!is_slave_up(hsr_priv->slave[i]))
+			netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a working HSR network\n",
+				    'A' + i, slave_name);
+	}
+
+	return 0;
+}
+
+static int hsr_dev_close(struct net_device *dev)
+{
+	/* Nothing to do here. We could try to restore the state of the slaves
+	 * to what they were before being changed by the hsr master dev's state,
+	 * but they might have been changed manually in the mean time too, so
+	 * taking them up or down here might be confusing and is probably not a
+	 * good idea.
+	 */
+	return 0;
+}
+
+
+static void hsr_fill_tag(struct hsr_ethhdr *hsr_ethhdr, struct hsr_priv *hsr_priv)
+{
+	unsigned long irqflags;
+
+	/* IEC 62439-1:2010, p 48, says the 4-bit "path" field can take values
+	 * between 0001-1001 ("ring identifier", for regular HSR frames),
+	 * or 1111 ("HSR management", supervision frames). Unfortunately, the
+	 * spec writers forgot to explain what a "ring identifier" is, or
+	 * how it is used. So we just set this to 0001 for regular frames,
+	 * and 1111 for supervision frames.
+	 */
+	set_hsr_tag_path(&hsr_ethhdr->hsr_tag, 0x1);
+
+	/* IEC 62439-1:2010, p 12: "The link service data unit in an Ethernet
+	 * frame is the content of the frame located between the Length/Type
+	 * field and the Frame Check Sequence."
+	 *
+	 * IEC 62439-3, p 48, specifies the "original LPDU" to include the
+	 * original "LT" field (what "LT" means is not explained anywhere as
+	 * far as I can see - perhaps "Length/Type"?). So LSDU_size might
+	 * equal original length + 2.
+	 *   Also, the fact that this field is not used anywhere (might be used
+	 * by a RedBox connecting HSR and PRP nets?) means I cannot test its
+	 * correctness. Instead of guessing, I set this to 0 here, to make any
+	 * problems immediately apparent. Anyone using this driver with PRP/HSR
+	 * RedBoxes might need to fix this...
+	 */
+	set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, 0);
+
+	spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
+	hsr_ethhdr->hsr_tag.sequence_nr = htons(hsr_priv->sequence_nr);
+	hsr_priv->sequence_nr++;
+	spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);
+
+	hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
+
+	hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
+}
+
+static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr_priv,
+		      enum hsr_dev_idx dev_idx)
+{
+	struct hsr_ethhdr *hsr_ethhdr;
+
+	hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
+
+	skb->dev = hsr_priv->slave[dev_idx];
+
+	hsr_addr_subst_dest(hsr_priv, &hsr_ethhdr->ethhdr, dev_idx);
+
+	/* Address substitution (IEC62439-3 pp 26, 50): replace mac
+	 * address of outgoing frame with that of the outgoing slave's.
+	 */
+	memcpy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr, ETH_ALEN);
+
+	return dev_queue_xmit(skb);
+}
+
+
+static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct hsr_priv *hsr_priv;
+	struct hsr_ethhdr *hsr_ethhdr;
+	struct sk_buff *skb2;
+	int res1, res2;
+
+	hsr_priv = netdev_priv(dev);
+	hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
+
+	if ((skb->protocol != htons(ETH_P_PRP)) ||
+	    (hsr_ethhdr->ethhdr.h_proto != htons(ETH_P_PRP))) {
+		hsr_fill_tag(hsr_ethhdr, hsr_priv);
+		skb->protocol = htons(ETH_P_PRP);
+	}
+
+	skb2 = pskb_copy(skb, GFP_ATOMIC);
+
+	res1 = NET_XMIT_DROP;
+	if (likely(hsr_priv->slave[HSR_DEV_SLAVE_A]))
+		res1 = slave_xmit(skb, hsr_priv, HSR_DEV_SLAVE_A);
+
+	res2 = NET_XMIT_DROP;
+	if (likely(skb2 && hsr_priv->slave[HSR_DEV_SLAVE_B]))
+		res2 = slave_xmit(skb2, hsr_priv, HSR_DEV_SLAVE_B);
+
+	if (likely(res1 == NET_XMIT_SUCCESS || res1 == NET_XMIT_CN ||
+		   res2 == NET_XMIT_SUCCESS || res2 == NET_XMIT_CN)) {
+		hsr_priv->dev->stats.tx_packets++;
+		hsr_priv->dev->stats.tx_bytes += skb->len;
+	} else {
+		hsr_priv->dev->stats.tx_dropped++;
+	}
+
+	return NETDEV_TX_OK;
+}
+
+
+static int hsr_header_create(struct sk_buff *skb, struct net_device *dev,
+			     unsigned short type, const void *daddr,
+			     const void *saddr, unsigned int len)
+{
+	int res;
+
+	/* Make room for the HSR tag now. We will fill it in later (in
+	 * hsr_dev_xmit)
+	 */
+	if (skb_headroom(skb) < HSR_TAGLEN + ETH_HLEN)
+		return -ENOBUFS;
+	skb_push(skb, HSR_TAGLEN);
+
+	/* To allow VLAN/HSR combos we should probably use
+	 * res = dev_hard_header(skb, dev, type, daddr, saddr, len + HSR_TAGLEN);
+	 * here instead. It would require other changes too, though - e.g.
+	 * separate headers for each slave etc...
+	 */
+	res = eth_header(skb, dev, type, daddr, saddr, len + HSR_TAGLEN);
+	if (res <= 0)
+		return res;
+	skb_reset_mac_header(skb);
+
+	return res + HSR_TAGLEN;
+}
+
+
+static const struct header_ops hsr_header_ops = {
+	.create	 = hsr_header_create,
+	.parse	 = eth_header_parse,
+};
+
+
+/* HSR:2010 supervision frames should be padded so that the whole frame,
+ * including headers and FCS, is 64 bytes (without VLAN).
+ */
+static int hsr_pad(int size)
+{
+	const int min_size = ETH_ZLEN - HSR_TAGLEN - ETH_HLEN;
+
+	if (size >= min_size)
+		return size;
+	return min_size;
+}
+
+static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
+{
+	struct hsr_priv *hsr_priv;
+	struct sk_buff *skb;
+	int hlen, tlen;
+	struct hsr_sup_tag *hsr_stag;
+	struct hsr_sup_payload *hsr_sp;
+	unsigned long irqflags;
+
+	hlen = LL_RESERVED_SPACE(hsr_dev);
+	tlen = hsr_dev->needed_tailroom;
+	skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen,
+			GFP_ATOMIC);
+
+	if (skb == NULL)
+		return;
+
+	hsr_priv = netdev_priv(hsr_dev);
+
+	skb_reserve(skb, hlen);
+
+	skb->dev = hsr_dev;
+	skb->protocol = htons(ETH_P_PRP);
+	skb->priority = TC_PRIO_CONTROL;
+
+	if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
+			    hsr_priv->sup_multicast_addr,
+			    skb->dev->dev_addr, skb->len) < 0)
+		goto out;
+
+	skb_pull(skb, sizeof(struct ethhdr));
+	hsr_stag = (typeof(hsr_stag)) skb->data;
+
+	set_hsr_stag_path(hsr_stag, 0xf);
+	set_hsr_stag_HSR_Ver(hsr_stag, 0);
+
+	spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
+	hsr_stag->sequence_nr = htons(hsr_priv->sequence_nr);
+	hsr_priv->sequence_nr++;
+	spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);
+
+	hsr_stag->HSR_TLV_Type = type;
+	hsr_stag->HSR_TLV_Length = 12;
+
+	skb_push(skb, sizeof(struct ethhdr));
+
+	/* Payload: MacAddressA */
+	hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
+	memcpy(hsr_sp->MacAddressA, hsr_dev->dev_addr, ETH_ALEN);
+
+	dev_queue_xmit(skb);
+	return;
+
+out:
+	kfree_skb(skb);
+}
+
+
+/* Announce (supervision frame) timer function
+ */
+static void hsr_announce(unsigned long data)
+{
+	struct hsr_priv *hsr_priv;
+
+	hsr_priv = (struct hsr_priv *) data;
+
+	if (hsr_priv->announce_count < 3) {
+		send_hsr_supervision_frame(hsr_priv->dev, HSR_TLV_ANNOUNCE);
+		hsr_priv->announce_count++;
+	} else {
+		send_hsr_supervision_frame(hsr_priv->dev, HSR_TLV_LIFE_CHECK);
+	}
+
+	if (hsr_priv->announce_count < 3)
+		hsr_priv->announce_timer.expires = jiffies +
+				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+	else
+		hsr_priv->announce_timer.expires = jiffies +
+				msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+
+	if (is_admin_up(hsr_priv->dev))
+		add_timer(&hsr_priv->announce_timer);
+}
+
+
+static void restore_slaves(struct net_device *hsr_dev)
+{
+	struct hsr_priv *hsr_priv;
+	int i;
+	int res;
+
+	hsr_priv = netdev_priv(hsr_dev);
+
+	rtnl_lock();
+
+	/* Restore promiscuity */
+	for (i = 0; i < HSR_MAX_SLAVE; i++) {
+		if (!hsr_priv->slave[i])
+			continue;
+		res = dev_set_promiscuity(hsr_priv->slave[i], -1);
+		if (res)
+			netdev_info(hsr_dev,
+				    "Cannot restore slave promiscuity (%s, %d)\n",
+				    hsr_priv->slave[i]->name, res);
+	}
+
+	rtnl_unlock();
+}
+
+static void reclaim_hsr_dev(struct rcu_head *rh)
+{
+	struct hsr_priv *hsr_priv;
+
+	hsr_priv = container_of(rh, struct hsr_priv, rcu_head);
+	free_netdev(hsr_priv->dev);
+}
+
+
+/* According to comments in the declaration of struct net_device, this function
+ * is "Called from unregister, can be used to call free_netdev". Ok then...
+ */
+static void hsr_dev_destroy(struct net_device *hsr_dev)
+{
+	struct hsr_priv *hsr_priv;
+
+	hsr_priv = netdev_priv(hsr_dev);
+
+	del_timer(&hsr_priv->announce_timer);
+	unregister_hsr_master(hsr_priv);    /* calls list_del_rcu on hsr_priv */
+	restore_slaves(hsr_dev);
+	call_rcu(&hsr_priv->rcu_head, reclaim_hsr_dev);   /* reclaim hsr_priv */
+}
+
+static const struct net_device_ops hsr_device_ops = {
+	.ndo_change_mtu = hsr_dev_change_mtu,
+	.ndo_open = hsr_dev_open,
+	.ndo_stop = hsr_dev_close,
+	.ndo_start_xmit = hsr_dev_xmit,
+};
+
+
+void hsr_dev_setup(struct net_device *dev)
+{
+	random_ether_addr(dev->dev_addr);
+
+	ether_setup(dev);
+	dev->header_ops		 = &hsr_header_ops;
+	dev->netdev_ops		 = &hsr_device_ops;
+	dev->tx_queue_len	 = 0;
+
+	dev->destructor = hsr_dev_destroy;
+}
+
+
+/* Return true if dev is a HSR master; return false otherwise.
+ */
+bool is_hsr_master(struct net_device *dev)
+{
+	return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
+}
+
+static int check_slave_ok(struct net_device *dev)
+{
+	/* Don't allow HSR on non-ethernet like devices */
+	if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
+	    (dev->addr_len != ETH_ALEN)) {
+		netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
+		return -EINVAL;
+	}
+
+	/* Don't allow enslaving hsr devices */
+	if (is_hsr_master(dev)) {
+		netdev_info(dev, "Cannot create trees of HSR devices.\n");
+		return -EINVAL;
+	}
+
+	if (is_hsr_slave(dev)) {
+		netdev_info(dev, "This device is already a HSR slave.\n");
+		return -EINVAL;
+	}
+
+	if (dev->priv_flags & IFF_802_1Q_VLAN) {
+		netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
+		return -EINVAL;
+	}
+
+	/* HSR over bonded devices has not been tested, but I'm not sure it
+	 * won't work...
+	 */
+
+	return 0;
+}
+
+
+/* Default multicast address for HSR Supervision frames */
+static const unsigned char def_multicast_addr[ETH_ALEN] = {
+	0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
+};
+
+int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+		     unsigned char multicast_spec)
+{
+	struct hsr_priv *hsr_priv;
+	int i;
+	int res;
+
+	hsr_priv = netdev_priv(hsr_dev);
+	hsr_priv->dev = hsr_dev;
+	INIT_LIST_HEAD(&hsr_priv->node_db);
+	INIT_LIST_HEAD(&hsr_priv->self_node_db);
+	for (i = 0; i < HSR_MAX_SLAVE; i++)
+		hsr_priv->slave[i] = slave[i];
+
+	spin_lock_init(&hsr_priv->seqnr_lock);
+	/* Overflow soon to find bugs easier: */
+	hsr_priv->sequence_nr = USHRT_MAX - 1024;
+
+	init_timer(&hsr_priv->announce_timer);
+	hsr_priv->announce_timer.function = hsr_announce;
+	hsr_priv->announce_timer.data = (unsigned long) hsr_priv;
+
+	memcpy(hsr_priv->sup_multicast_addr, def_multicast_addr, ETH_ALEN);
+	hsr_priv->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
+
+/* FIXME: should I modify the value of these?
+ *
+ * - hsr_dev->flags - i.e.
+ *			IFF_MASTER/SLAVE?
+ * - hsr_dev->priv_flags - i.e.
+ *			IFF_EBRIDGE?
+ *			IFF_TX_SKB_SHARING?
+ *			IFF_HSR_MASTER/SLAVE?
+ */
+
+	for (i = 0; i < HSR_MAX_SLAVE; i++) {
+		res = check_slave_ok(slave[i]);
+		if (res)
+			return res;
+	}
+
+	hsr_dev->features = slave[0]->features & slave[1]->features;
+	/* Prevent recursive tx locking */
+	hsr_dev->features |= NETIF_F_LLTX;
+	/* VLAN on top of HSR needs testing and probably some work on
+	 * hsr_header_create() etc.
+	 */
+	hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
+
+	/* Set hsr_dev's MAC address to that of mac_slave1 */
+	memcpy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+
+	/* Set required header length */
+	for (i = 0; i < HSR_MAX_SLAVE; i++) {
+		if (slave[i]->hard_header_len + HSR_TAGLEN >
+						hsr_dev->hard_header_len)
+			hsr_dev->hard_header_len =
+					slave[i]->hard_header_len + HSR_TAGLEN;
+	}
+
+	/* MTU */
+	for (i = 0; i < HSR_MAX_SLAVE; i++)
+		if (slave[i]->mtu - HSR_TAGLEN < hsr_dev->mtu)
+			hsr_dev->mtu = slave[i]->mtu - HSR_TAGLEN;
+
+	/* Make sure the 1st call to netif_carrier_on() gets through */
+	netif_carrier_off(hsr_dev);
+
+	/* Promiscuity */
+	for (i = 0; i < HSR_MAX_SLAVE; i++) {
+		res = dev_set_promiscuity(slave[i], 1);
+		if (res) {
+			netdev_info(hsr_dev, "Cannot set slave promiscuity (%s, %d)\n",
+				    slave[i]->name, res);
+			goto fail;
+		}
+	}
+
+	/* Make sure we recognize frames from ourselves in hsr_rcv() */
+	res = hsr_create_self_node(&hsr_priv->self_node_db,
+					hsr_dev->dev_addr,
+					hsr_priv->slave[1]->dev_addr);
+	if (res < 0)
+		goto fail;
+
+	res = register_netdevice(hsr_dev);
+	if (res)
+		goto fail;
+
+	register_hsr_master(hsr_priv);
+
+	return 0;
+
+fail:
+	restore_slaves(hsr_dev);
+	return res;
+}
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
new file mode 100644
index 0000000..2c7148e
--- /dev/null
+++ b/net/hsr/hsr_device.h
@@ -0,0 +1,29 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef __HSR_DEVICE_H
+#define __HSR_DEVICE_H
+
+#include <linux/netdevice.h>
+#include "hsr_main.h"
+
+void hsr_dev_setup(struct net_device *dev);
+int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+		     unsigned char multicast_spec);
+void hsr_set_operstate(struct net_device *hsr_dev, struct net_device *slave1,
+		       struct net_device *slave2);
+void hsr_set_carrier(struct net_device *hsr_dev, struct net_device *slave1,
+		     struct net_device *slave2);
+void hsr_check_announce(struct net_device *hsr_dev, int old_operstate);
+bool is_hsr_master(struct net_device *dev);
+int hsr_get_max_mtu(struct hsr_priv *hsr_priv);
+
+#endif /* __HSR_DEVICE_H */
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
new file mode 100644
index 0000000..003f5bb
--- /dev/null
+++ b/net/hsr/hsr_framereg.c
@@ -0,0 +1,503 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *
+ * The HSR spec says never to forward the same frame twice on the same
+ * interface. A frame is identified by its source MAC address and its HSR
+ * sequence number. This code keeps track of senders and their sequence numbers
+ * to allow filtering of duplicate frames, and to detect HSR ring errors.
+ */
+
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include "hsr_main.h"
+#include "hsr_framereg.h"
+#include "hsr_netlink.h"
+
+
+struct node_entry {
+	struct list_head mac_list;
+	unsigned char	MacAddressA[ETH_ALEN];
+	unsigned char	MacAddressB[ETH_ALEN];
+	enum hsr_dev_idx   AddrB_if;	/* The local slave through which AddrB
+					 * frames are received from this node
+					 */
+	unsigned long	time_in[HSR_MAX_SLAVE];
+	bool		time_in_stale[HSR_MAX_SLAVE];
+	u16		seq_out[HSR_MAX_DEV];
+	struct rcu_head rcu_head;
+};
+
+/*	TODO: use hash lists for mac addresses (linux/jhash.h)?    */
+
+
+
+/* Search for mac entry. Caller must hold rcu read lock.
+ */
+static struct node_entry *find_node_by_AddrA(struct list_head *node_db,
+					     const unsigned char addr[ETH_ALEN])
+{
+	struct node_entry *node;
+
+	list_for_each_entry_rcu(node, node_db, mac_list) {
+		if (ether_addr_equal(node->MacAddressA, addr))
+			return node;
+	}
+
+	return NULL;
+}
+
+
+/* Search for mac entry. Caller must hold rcu read lock.
+ */
+static struct node_entry *find_node_by_AddrB(struct list_head *node_db,
+					     const unsigned char addr[ETH_ALEN])
+{
+	struct node_entry *node;
+
+	list_for_each_entry_rcu(node, node_db, mac_list) {
+		if (ether_addr_equal(node->MacAddressB, addr))
+			return node;
+	}
+
+	return NULL;
+}
+
+
+/* Search for mac entry. Caller must hold rcu read lock.
+ */
+struct node_entry *hsr_find_node(struct list_head *node_db, struct sk_buff *skb)
+{
+	struct node_entry *node;
+	struct ethhdr *ethhdr;
+
+	if (!skb_mac_header_was_set(skb))
+		return NULL;
+
+	ethhdr = (struct ethhdr *) skb_mac_header(skb);
+
+	list_for_each_entry_rcu(node, node_db, mac_list) {
+		if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
+			return node;
+		if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
+			return node;
+	}
+
+	return NULL;
+}
+
+
+/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
+ * frames from self that's been looped over the HSR ring.
+ */
+int hsr_create_self_node(struct list_head *self_node_db,
+			 unsigned char addr_a[ETH_ALEN],
+			 unsigned char addr_b[ETH_ALEN])
+{
+	struct node_entry *node, *oldnode;
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	memcpy(node->MacAddressA, addr_a, ETH_ALEN);
+	memcpy(node->MacAddressB, addr_b, ETH_ALEN);
+
+	rcu_read_lock();
+	oldnode = list_first_or_null_rcu(self_node_db,
+						struct node_entry, mac_list);
+	if (oldnode) {
+		list_replace_rcu(&oldnode->mac_list, &node->mac_list);
+		rcu_read_unlock();
+		synchronize_rcu();
+		kfree(oldnode);
+	} else {
+		rcu_read_unlock();
+		list_add_tail_rcu(&node->mac_list, self_node_db);
+	}
+
+	return 0;
+}
+
+static void node_entry_reclaim(struct rcu_head *rh)
+{
+	kfree(container_of(rh, struct node_entry, rcu_head));
+}
+
+
+/* Add/merge node to the database of nodes. 'skb' must contain an HSR
+ * supervision frame.
+ * - If the supervision header's MacAddressA field is not yet in the database,
+ * this frame is from an hitherto unknown node - add it to the database.
+ * - If the sender's MAC address is not the same as its MacAddressA address,
+ * the node is using PICS_SUBS (address substitution). Record the sender's
+ * address as the node's MacAddressB.
+ *
+ * This function needs to work even if the sender node has changed one of its
+ * slaves' MAC addresses. In this case, there are four different cases described
+ * by (Addr-changed, received-from) pairs as follows. Note that changing the
+ * SlaveA address is equal to changing the node's own address:
+ *
+ * - (AddrB, SlaveB): The new AddrB will be recorded by PICS_SUBS code since
+ *		      node == NULL.
+ * - (AddrB, SlaveA): Will work as usual (the AddrB change won't be detected
+ *		      from this frame).
+ *
+ * - (AddrA, SlaveB): The old node will be found. We need to detect this and
+ *		      remove the node.
+ * - (AddrA, SlaveA): A new node will be registered (non-PICS_SUBS at first).
+ *		      The old one will be pruned after HSR_NODE_FORGET_TIME.
+ *
+ * We also need to detect if the sender's SlaveA and SlaveB cables have been
+ * swapped.
+ */
+struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
+				  struct node_entry *node,
+				  struct sk_buff *skb,
+				  enum hsr_dev_idx dev_idx)
+{
+	struct hsr_sup_payload *hsr_sp;
+	struct hsr_ethhdr_sp *hsr_ethsup;
+	int i;
+	unsigned long now;
+
+	hsr_ethsup = (struct hsr_ethhdr_sp *) skb_mac_header(skb);
+	hsr_sp = (struct hsr_sup_payload *) skb->data;
+
+	if (node && !ether_addr_equal(node->MacAddressA, hsr_sp->MacAddressA)) {
+		/* Node has changed its AddrA, frame was received from SlaveB */
+		list_del_rcu(&node->mac_list);
+		call_rcu(&node->rcu_head, node_entry_reclaim);
+		node = NULL;
+	}
+
+	if (node && (dev_idx == node->AddrB_if) &&
+	    !ether_addr_equal(node->MacAddressB, hsr_ethsup->ethhdr.h_source)) {
+		/* Cables have been swapped */
+		list_del_rcu(&node->mac_list);
+		call_rcu(&node->rcu_head, node_entry_reclaim);
+		node = NULL;
+	}
+
+	if (node && (dev_idx != node->AddrB_if) &&
+	    (node->AddrB_if != HSR_DEV_NONE) &&
+	    !ether_addr_equal(node->MacAddressA, hsr_ethsup->ethhdr.h_source)) {
+		/* Cables have been swapped */
+		list_del_rcu(&node->mac_list);
+		call_rcu(&node->rcu_head, node_entry_reclaim);
+		node = NULL;
+	}
+
+	if (node)
+		return node;
+
+	node = find_node_by_AddrA(&hsr_priv->node_db, hsr_sp->MacAddressA);
+	if (node) {
+		/* Node is known, but frame was received from an unknown
+		 * address. Node is PICS_SUBS capable; merge its AddrB.
+		 */
+		memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+		node->AddrB_if = dev_idx;
+		return node;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_ATOMIC);
+	if (!node)
+		return NULL;
+
+	memcpy(node->MacAddressA, hsr_sp->MacAddressA, ETH_ALEN);
+	memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+	if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source))
+		node->AddrB_if = dev_idx;
+	else
+		node->AddrB_if = HSR_DEV_NONE;
+
+	/* We are only interested in time diffs here, so use current jiffies
+	 * as initialization. (0 could trigger an spurious ring error warning).
+	 */
+	now = jiffies;
+	for (i = 0; i < HSR_MAX_SLAVE; i++)
+		node->time_in[i] = now;
+	for (i = 0; i < HSR_MAX_DEV; i++)
+		node->seq_out[i] = ntohs(hsr_ethsup->hsr_sup.sequence_nr) - 1;
+
+	list_add_tail_rcu(&node->mac_list, &hsr_priv->node_db);
+
+	return node;
+}
+
+
+/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
+ *
+ * If the frame was sent by a node's B interface, replace the sender
+ * address with that node's "official" address (MacAddressA) so that upper
+ * layers recognize where it came from.
+ */
+void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
+{
+	struct ethhdr *ethhdr;
+	struct node_entry *node;
+
+	if (!skb_mac_header_was_set(skb)) {
+		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+		return;
+	}
+	ethhdr = (struct ethhdr *) skb_mac_header(skb);
+
+	rcu_read_lock();
+	node = find_node_by_AddrB(&hsr_priv->node_db, ethhdr->h_source);
+	if (node)
+		memcpy(ethhdr->h_source, node->MacAddressA, ETH_ALEN);
+	rcu_read_unlock();
+}
+
+
+/* 'skb' is a frame meant for another host.
+ * 'hsr_dev_idx' is the HSR index of the outgoing device
+ *
+ * Substitute the target (dest) MAC address if necessary, so the it matches the
+ * recipient interface MAC address, regardless of whether that is the
+ * recipient's A or B interface.
+ * This is needed to keep the packets flowing through switches that learn on
+ * which "side" the different interfaces are.
+ */
+void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
+			 enum hsr_dev_idx dev_idx)
+{
+	struct node_entry *node;
+
+	rcu_read_lock();
+	node = find_node_by_AddrA(&hsr_priv->node_db, ethhdr->h_dest);
+	if (node && (node->AddrB_if == dev_idx))
+		memcpy(ethhdr->h_dest, node->MacAddressB, ETH_ALEN);
+	rcu_read_unlock();
+}
+
+
+/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
+ * false otherwise.
+ */
+static bool seq_nr_after(u16 a, u16 b)
+{
+	/* Remove inconsistency where
+	 * seq_nr_after(a, b) == seq_nr_before(a, b) */
+	if ((int) b - a == 32768)
+		return false;
+
+	return (((s16) (b - a)) < 0);
+}
+#define seq_nr_before(a, b)		seq_nr_after((b), (a))
+#define seq_nr_after_or_eq(a, b)	(!seq_nr_before((a), (b)))
+#define seq_nr_before_or_eq(a, b)	(!seq_nr_after((a), (b)))
+
+
+void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
+{
+	if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
+		WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
+		return;
+	}
+	node->time_in[dev_idx] = jiffies;
+	node->time_in_stale[dev_idx] = false;
+}
+
+
+/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
+ * ethhdr->h_source address and skb->mac_header set.
+ *
+ * Return:
+ *	 1 if frame can be shown to have been sent recently on this interface,
+ *	 0 otherwise, or
+ *	 negative error code on error
+ */
+int hsr_register_frame_out(struct node_entry *node, enum hsr_dev_idx dev_idx,
+			   struct sk_buff *skb)
+{
+	struct hsr_ethhdr *hsr_ethhdr;
+	u16 sequence_nr;
+
+	if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
+		WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
+		return -EINVAL;
+	}
+	if (!skb_mac_header_was_set(skb)) {
+		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+		return -EINVAL;
+	}
+	hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
+
+	sequence_nr = ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
+	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[dev_idx]))
+		return 1;
+
+	node->seq_out[dev_idx] = sequence_nr;
+	return 0;
+}
+
+
+
+static bool is_late(struct node_entry *node, enum hsr_dev_idx dev_idx)
+{
+	enum hsr_dev_idx other;
+
+	if (node->time_in_stale[dev_idx])
+		return true;
+
+	if (dev_idx == HSR_DEV_SLAVE_A)
+		other = HSR_DEV_SLAVE_B;
+	else
+		other = HSR_DEV_SLAVE_A;
+
+	if (node->time_in_stale[other])
+		return false;
+
+	if (time_after(node->time_in[other], node->time_in[dev_idx] +
+		       msecs_to_jiffies(MAX_SLAVE_DIFF)))
+		return true;
+
+	return false;
+}
+
+
+/* Remove stale sequence_nr records. Called by timer every
+ * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
+ */
+void hsr_prune_nodes(struct hsr_priv *hsr_priv)
+{
+	struct node_entry *node;
+	unsigned long timestamp;
+	unsigned long time_a, time_b;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(node, &hsr_priv->node_db, mac_list) {
+		/* Shorthand */
+		time_a = node->time_in[HSR_DEV_SLAVE_A];
+		time_b = node->time_in[HSR_DEV_SLAVE_B];
+
+		/* Check for timestamps old enough to risk wrap-around */
+		if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET/2))
+			node->time_in_stale[HSR_DEV_SLAVE_A] = true;
+		if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET/2))
+			node->time_in_stale[HSR_DEV_SLAVE_B] = true;
+
+		/* Get age of newest frame from node.
+		 * At least one time_in is OK here; nodes get pruned long
+		 * before both time_ins can get stale
+		 */
+		timestamp = time_a;
+		if (node->time_in_stale[HSR_DEV_SLAVE_A] ||
+		    (!node->time_in_stale[HSR_DEV_SLAVE_B] &&
+		    time_after(time_b, time_a)))
+			timestamp = time_b;
+
+		/* Warn of ring error only as long as we get frames at all */
+		if (time_is_after_jiffies(timestamp +
+					msecs_to_jiffies(1.5*MAX_SLAVE_DIFF))) {
+
+			if (is_late(node, HSR_DEV_SLAVE_A))
+				hsr_nl_ringerror(hsr_priv, node->MacAddressA,
+						 HSR_DEV_SLAVE_A);
+			else if (is_late(node, HSR_DEV_SLAVE_B))
+				hsr_nl_ringerror(hsr_priv, node->MacAddressA,
+						 HSR_DEV_SLAVE_B);
+		}
+
+		/* Prune old entries */
+		if (time_is_before_jiffies(timestamp +
+					msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
+			hsr_nl_nodedown(hsr_priv, node->MacAddressA);
+			list_del_rcu(&node->mac_list);
+			/* Note that we need to free this entry later: */
+			call_rcu(&node->rcu_head, node_entry_reclaim);
+		}
+	}
+	rcu_read_unlock();
+}
+
+
+void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
+			unsigned char addr[ETH_ALEN])
+{
+	struct node_entry *node;
+
+	if (!_pos) {
+		node = list_first_or_null_rcu(&hsr_priv->node_db,
+						struct node_entry, mac_list);
+		if (node)
+			memcpy(addr, node->MacAddressA, ETH_ALEN);
+		return node;
+	}
+
+	node = _pos;
+	list_for_each_entry_continue_rcu(node, &hsr_priv->node_db, mac_list) {
+		memcpy(addr, node->MacAddressA, ETH_ALEN);
+		return node;
+	}
+
+	return NULL;
+}
+
+
+int hsr_get_node_data(struct hsr_priv *hsr_priv,
+		      const unsigned char *addr,
+		      unsigned char addr_b[ETH_ALEN],
+		      unsigned int *addr_b_ifindex,
+		      int *if1_age,
+		      u16 *if1_seq,
+		      int *if2_age,
+		      u16 *if2_seq)
+{
+	struct node_entry *node;
+	unsigned long tdiff;
+
+
+	rcu_read_lock();
+	node = find_node_by_AddrA(&hsr_priv->node_db, addr);
+	if (!node) {
+		rcu_read_unlock();
+		return -ENOENT;	/* No such entry */
+	}
+
+	memcpy(addr_b, node->MacAddressB, ETH_ALEN);
+
+	tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_A];
+	if (node->time_in_stale[HSR_DEV_SLAVE_A])
+		*if1_age = INT_MAX;
+#if HZ <= MSEC_PER_SEC
+	else if (tdiff > msecs_to_jiffies(INT_MAX))
+		*if1_age = INT_MAX;
+#endif
+	else
+		*if1_age = jiffies_to_msecs(tdiff);
+
+	tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_B];
+	if (node->time_in_stale[HSR_DEV_SLAVE_B])
+		*if2_age = INT_MAX;
+#if HZ <= MSEC_PER_SEC
+	else if (tdiff > msecs_to_jiffies(INT_MAX))
+		*if2_age = INT_MAX;
+#endif
+	else
+		*if2_age = jiffies_to_msecs(tdiff);
+
+	/* Present sequence numbers as if they were incoming on interface */
+	*if1_seq = node->seq_out[HSR_DEV_SLAVE_B];
+	*if2_seq = node->seq_out[HSR_DEV_SLAVE_A];
+
+	if ((node->AddrB_if != HSR_DEV_NONE) && hsr_priv->slave[node->AddrB_if])
+		*addr_b_ifindex = hsr_priv->slave[node->AddrB_if]->ifindex;
+	else
+		*addr_b_ifindex = -1;
+
+	rcu_read_unlock();
+
+	return 0;
+}
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
new file mode 100644
index 0000000..e6c4022
--- /dev/null
+++ b/net/hsr/hsr_framereg.h
@@ -0,0 +1,53 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef _HSR_FRAMEREG_H
+#define _HSR_FRAMEREG_H
+
+#include "hsr_main.h"
+
+struct node_entry;
+
+struct node_entry *hsr_find_node(struct list_head *node_db, struct sk_buff *skb);
+
+struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
+				  struct node_entry *node,
+				  struct sk_buff *skb,
+				  enum hsr_dev_idx dev_idx);
+
+void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb);
+void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
+			 enum hsr_dev_idx dev_idx);
+
+void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx);
+
+int hsr_register_frame_out(struct node_entry *node, enum hsr_dev_idx dev_idx,
+			   struct sk_buff *skb);
+
+void hsr_prune_nodes(struct hsr_priv *hsr_priv);
+
+int hsr_create_self_node(struct list_head *self_node_db,
+			 unsigned char addr_a[ETH_ALEN],
+			 unsigned char addr_b[ETH_ALEN]);
+
+void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
+			unsigned char addr[ETH_ALEN]);
+
+int hsr_get_node_data(struct hsr_priv *hsr_priv,
+		      const unsigned char *addr,
+		      unsigned char addr_b[ETH_ALEN],
+		      unsigned int *addr_b_ifindex,
+		      int *if1_age,
+		      u16 *if1_seq,
+		      int *if2_age,
+		      u16 *if2_seq);
+
+#endif /* _HSR_FRAMEREG_H */
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
new file mode 100644
index 0000000..af68dd8
--- /dev/null
+++ b/net/hsr/hsr_main.c
@@ -0,0 +1,469 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *
+ * In addition to routines for registering and unregistering HSR support, this
+ * file also contains the receive routine that handles all incoming frames with
+ * Ethertype (protocol) ETH_P_PRP (HSRv0), and network device event handling.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/timer.h>
+#include <linux/etherdevice.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_netlink.h"
+#include "hsr_framereg.h"
+
+
+/* List of all registered virtual HSR devices */
+static LIST_HEAD(hsr_list);
+
+void register_hsr_master(struct hsr_priv *hsr_priv)
+{
+	list_add_tail_rcu(&hsr_priv->hsr_list, &hsr_list);
+}
+
+void unregister_hsr_master(struct hsr_priv *hsr_priv)
+{
+	struct hsr_priv *hsr_priv_it;
+
+	list_for_each_entry(hsr_priv_it, &hsr_list, hsr_list)
+		if (hsr_priv_it == hsr_priv) {
+			list_del_rcu(&hsr_priv_it->hsr_list);
+			return;
+		}
+}
+
+bool is_hsr_slave(struct net_device *dev)
+{
+	struct hsr_priv *hsr_priv_it;
+
+	list_for_each_entry_rcu(hsr_priv_it, &hsr_list, hsr_list) {
+		if (dev == hsr_priv_it->slave[0])
+			return true;
+		if (dev == hsr_priv_it->slave[1])
+			return true;
+	}
+
+	return false;
+}
+
+
+/* If dev is a HSR slave device, return the virtual master device. Return NULL
+ * otherwise.
+ */
+static struct hsr_priv *get_hsr_master(struct net_device *dev)
+{
+	struct hsr_priv *hsr_priv;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
+		if ((dev == hsr_priv->slave[0]) ||
+		    (dev == hsr_priv->slave[1])) {
+			rcu_read_unlock();
+			return hsr_priv;
+		}
+
+	rcu_read_unlock();
+	return NULL;
+}
+
+
+/* If dev is a HSR slave device, return the other slave device. Return NULL
+ * otherwise.
+ */
+static struct net_device *get_other_slave(struct hsr_priv *hsr_priv,
+					  struct net_device *dev)
+{
+	if (dev == hsr_priv->slave[0])
+		return hsr_priv->slave[1];
+	if (dev == hsr_priv->slave[1])
+		return hsr_priv->slave[0];
+
+	return NULL;
+}
+
+
+static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
+			     void *ptr)
+{
+	struct net_device *slave, *other_slave;
+	struct hsr_priv *hsr_priv;
+	int old_operstate;
+	int mtu_max;
+	int res;
+	struct net_device *dev;
+
+	dev = netdev_notifier_info_to_dev(ptr);
+
+	hsr_priv = get_hsr_master(dev);
+	if (hsr_priv) {
+		/* dev is a slave device */
+		slave = dev;
+		other_slave = get_other_slave(hsr_priv, slave);
+	} else {
+		if (!is_hsr_master(dev))
+			return NOTIFY_DONE;
+		hsr_priv = netdev_priv(dev);
+		slave = hsr_priv->slave[0];
+		other_slave = hsr_priv->slave[1];
+	}
+
+	switch (event) {
+	case NETDEV_UP:		/* Administrative state DOWN */
+	case NETDEV_DOWN:	/* Administrative state UP */
+	case NETDEV_CHANGE:	/* Link (carrier) state changes */
+		old_operstate = hsr_priv->dev->operstate;
+		hsr_set_carrier(hsr_priv->dev, slave, other_slave);
+		/* netif_stacked_transfer_operstate() cannot be used here since
+		 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
+		 */
+		hsr_set_operstate(hsr_priv->dev, slave, other_slave);
+		hsr_check_announce(hsr_priv->dev, old_operstate);
+		break;
+	case NETDEV_CHANGEADDR:
+
+		/* This should not happen since there's no ndo_set_mac_address()
+		 * for HSR devices - i.e. not supported.
+		 */
+		if (dev == hsr_priv->dev)
+			break;
+
+		if (dev == hsr_priv->slave[0])
+			memcpy(hsr_priv->dev->dev_addr,
+			       hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+
+		/* Make sure we recognize frames from ourselves in hsr_rcv() */
+		res = hsr_create_self_node(&hsr_priv->self_node_db,
+					   hsr_priv->dev->dev_addr,
+					   hsr_priv->slave[1] ?
+						hsr_priv->slave[1]->dev_addr :
+						hsr_priv->dev->dev_addr);
+		if (res)
+			netdev_warn(hsr_priv->dev,
+				    "Could not update HSR node address.\n");
+
+		if (dev == hsr_priv->slave[0])
+			call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr_priv->dev);
+		break;
+	case NETDEV_CHANGEMTU:
+		if (dev == hsr_priv->dev)
+			break; /* Handled in ndo_change_mtu() */
+		mtu_max = hsr_get_max_mtu(hsr_priv);
+		if (hsr_priv->dev->mtu > mtu_max)
+			dev_set_mtu(hsr_priv->dev, mtu_max);
+		break;
+	case NETDEV_UNREGISTER:
+		if (dev == hsr_priv->slave[0])
+			hsr_priv->slave[0] = NULL;
+		if (dev == hsr_priv->slave[1])
+			hsr_priv->slave[1] = NULL;
+
+		/* There should really be a way to set a new slave device... */
+
+		break;
+	case NETDEV_PRE_TYPE_CHANGE:
+		/* HSR works only on Ethernet devices. Refuse slave to change
+		 * its type.
+		 */
+		return NOTIFY_BAD;
+	}
+
+	return NOTIFY_DONE;
+}
+
+
+static struct timer_list prune_timer;
+
+static void prune_nodes_all(unsigned long data)
+{
+	struct hsr_priv *hsr_priv;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
+		hsr_prune_nodes(hsr_priv);
+	rcu_read_unlock();
+
+	prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
+	add_timer(&prune_timer);
+}
+
+
+static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
+{
+	struct hsr_tag *hsr_tag;
+	struct sk_buff *skb2;
+
+	skb2 = skb_share_check(skb, GFP_ATOMIC);
+	if (unlikely(!skb2))
+		goto err_free;
+	skb = skb2;
+
+	if (unlikely(!pskb_may_pull(skb, HSR_TAGLEN)))
+		goto err_free;
+
+	hsr_tag = (struct hsr_tag *) skb->data;
+	skb->protocol = hsr_tag->encap_proto;
+	skb_pull(skb, HSR_TAGLEN);
+
+	return skb;
+
+err_free:
+	kfree_skb(skb);
+	return NULL;
+}
+
+
+/* The uses I can see for these HSR supervision frames are:
+ * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
+ *    22") to reset any sequence_nr counters belonging to that node. Useful if
+ *    the other node's counter has been reset for some reason.
+ *    --
+ *    Or not - resetting the counter and bridging the frame would create a
+ *    loop, unfortunately.
+ *
+ * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
+ *    frame is received from a particular node, we know something is wrong.
+ *    We just register these (as with normal frames) and throw them away.
+ *
+ * 3) Allow different MAC addresses for the two slave interfaces, using the
+ *    MacAddressA field.
+ */
+static bool is_supervision_frame(struct hsr_priv *hsr_priv, struct sk_buff *skb)
+{
+	struct hsr_sup_tag *hsr_stag;
+
+	if (!ether_addr_equal(eth_hdr(skb)->h_dest,
+			      hsr_priv->sup_multicast_addr))
+		return false;
+
+	hsr_stag = (struct hsr_sup_tag *) skb->data;
+	if (get_hsr_stag_path(hsr_stag) != 0x0f)
+		return false;
+	if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
+	    (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
+		return false;
+	if (hsr_stag->HSR_TLV_Length != 12)
+		return false;
+
+	return true;
+}
+
+
+/* Implementation somewhat according to IEC-62439-3, p. 43
+ */
+static int hsr_rcv(struct sk_buff *skb, struct net_device *dev,
+		   struct packet_type *pt, struct net_device *orig_dev)
+{
+	struct hsr_priv *hsr_priv;
+	struct net_device *other_slave;
+	struct node_entry *node;
+	bool deliver_to_self;
+	struct sk_buff *skb_deliver;
+	enum hsr_dev_idx dev_in_idx, dev_other_idx;
+	bool dup_out;
+	int ret;
+
+	hsr_priv = get_hsr_master(dev);
+
+	if (!hsr_priv) {
+		/* Non-HSR-slave device 'dev' is connected to a HSR network */
+		kfree_skb(skb);
+		dev->stats.rx_errors++;
+		return NET_RX_SUCCESS;
+	}
+
+	if (dev == hsr_priv->slave[0]) {
+		dev_in_idx = HSR_DEV_SLAVE_A;
+		dev_other_idx = HSR_DEV_SLAVE_B;
+	} else {
+		dev_in_idx = HSR_DEV_SLAVE_B;
+		dev_other_idx = HSR_DEV_SLAVE_A;
+	}
+
+	node = hsr_find_node(&hsr_priv->self_node_db, skb);
+	if (node) {
+		/* Always kill frames sent by ourselves */
+		kfree_skb(skb);
+		return NET_RX_SUCCESS;
+	}
+
+	/* Is this frame a candidate for local reception? */
+	deliver_to_self = false;
+	if ((skb->pkt_type == PACKET_HOST) ||
+	    (skb->pkt_type == PACKET_MULTICAST) ||
+	    (skb->pkt_type == PACKET_BROADCAST))
+		deliver_to_self = true;
+	else if (ether_addr_equal(eth_hdr(skb)->h_dest,
+				     hsr_priv->dev->dev_addr)) {
+		skb->pkt_type = PACKET_HOST;
+		deliver_to_self = true;
+	}
+
+
+	rcu_read_lock(); /* node_db */
+	node = hsr_find_node(&hsr_priv->node_db, skb);
+
+	if (is_supervision_frame(hsr_priv, skb)) {
+		skb_pull(skb, sizeof(struct hsr_sup_tag));
+		node = hsr_merge_node(hsr_priv, node, skb, dev_in_idx);
+		if (!node) {
+			rcu_read_unlock(); /* node_db */
+			kfree_skb(skb);
+			hsr_priv->dev->stats.rx_dropped++;
+			return NET_RX_DROP;
+		}
+		skb_push(skb, sizeof(struct hsr_sup_tag));
+		deliver_to_self = false;
+	}
+
+	if (!node) {
+		/* Source node unknown; this might be a HSR frame from
+		 * another net (different multicast address). Ignore it.
+		 */
+		rcu_read_unlock(); /* node_db */
+		kfree_skb(skb);
+		return NET_RX_SUCCESS;
+	}
+
+	/* Register ALL incoming frames as outgoing through the other interface.
+	 * This allows us to register frames as incoming only if they are valid
+	 * for the receiving interface, without using a specific counter for
+	 * incoming frames.
+	 */
+	dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
+	if (!dup_out)
+		hsr_register_frame_in(node, dev_in_idx);
+
+	/* Forward this frame? */
+	if (!dup_out && (skb->pkt_type != PACKET_HOST))
+		other_slave = get_other_slave(hsr_priv, dev);
+	else
+		other_slave = NULL;
+
+	if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
+		deliver_to_self = false;
+
+	rcu_read_unlock(); /* node_db */
+
+	if (!deliver_to_self && !other_slave) {
+		kfree_skb(skb);
+		/* Circulated frame; silently remove it. */
+		return NET_RX_SUCCESS;
+	}
+
+	skb_deliver = skb;
+	if (deliver_to_self && other_slave) {
+		/* skb_clone() is not enough since we will strip the hsr tag
+		 * and do address substitution below
+		 */
+		skb_deliver = pskb_copy(skb, GFP_ATOMIC);
+		if (!skb_deliver) {
+			deliver_to_self = false;
+			hsr_priv->dev->stats.rx_dropped++;
+		}
+	}
+
+	if (deliver_to_self) {
+		bool multicast_frame;
+
+		skb_deliver = hsr_pull_tag(skb_deliver);
+		if (!skb_deliver) {
+			hsr_priv->dev->stats.rx_dropped++;
+			goto forward;
+		}
+#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+		/* Move everything in the header that is after the HSR tag,
+		 * to work around alignment problems caused by the 6-byte HSR
+		 * tag. In practice, this removes/overwrites the HSR tag in
+		 * the header and restores a "standard" packet.
+		 */
+		memmove(skb_deliver->data - HSR_TAGLEN, skb_deliver->data,
+			skb_headlen(skb_deliver));
+
+		/* Adjust skb members so they correspond with the move above.
+		 * This cannot possibly underflow skb->data since hsr_pull_tag()
+		 * above succeeded.
+		 * At this point in the protocol stack, the transport and
+		 * network headers have not been set yet, and we haven't touched
+		 * the mac header nor the head. So we only need to adjust data
+		 * and tail:
+		 */
+		skb_deliver->data -= HSR_TAGLEN;
+		skb_deliver->tail -= HSR_TAGLEN;
+#endif
+		skb_deliver->dev = hsr_priv->dev;
+		hsr_addr_subst_source(hsr_priv, skb_deliver);
+		multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
+		ret = netif_rx(skb_deliver);
+		if (ret == NET_RX_DROP) {
+			hsr_priv->dev->stats.rx_dropped++;
+		} else {
+			hsr_priv->dev->stats.rx_packets++;
+			hsr_priv->dev->stats.rx_bytes += skb->len;
+			if (multicast_frame)
+				hsr_priv->dev->stats.multicast++;
+		}
+	}
+
+forward:
+	if (other_slave) {
+		skb_push(skb, ETH_HLEN);
+		skb->dev = other_slave;
+		dev_queue_xmit(skb);
+	}
+
+	return NET_RX_SUCCESS;
+}
+
+
+static struct packet_type hsr_pt __read_mostly = {
+	.type = htons(ETH_P_PRP),
+	.func = hsr_rcv,
+};
+
+static struct notifier_block hsr_nb = {
+	.notifier_call = hsr_netdev_notify,	/* Slave event notifications */
+};
+
+
+static int __init hsr_init(void)
+{
+	int res;
+
+	BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_TAGLEN);
+
+	dev_add_pack(&hsr_pt);
+
+	init_timer(&prune_timer);
+	prune_timer.function = prune_nodes_all;
+	prune_timer.data = 0;
+	prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
+	add_timer(&prune_timer);
+
+	register_netdevice_notifier(&hsr_nb);
+
+	res = hsr_netlink_init();
+
+	return res;
+}
+
+static void __exit hsr_exit(void)
+{
+	unregister_netdevice_notifier(&hsr_nb);
+	del_timer(&prune_timer);
+	hsr_netlink_exit();
+	dev_remove_pack(&hsr_pt);
+}
+
+module_init(hsr_init);
+module_exit(hsr_exit);
+MODULE_LICENSE("GPL");
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
new file mode 100644
index 0000000..56fe060
--- /dev/null
+++ b/net/hsr/hsr_main.h
@@ -0,0 +1,166 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef _HSR_PRIVATE_H
+#define _HSR_PRIVATE_H
+
+#include <linux/netdevice.h>
+#include <linux/list.h>
+
+
+/* Time constants as specified in the HSR specification (IEC-62439-3 2010)
+ * Table 8.
+ * All values in milliseconds.
+ */
+#define HSR_LIFE_CHECK_INTERVAL		 2000 /* ms */
+#define HSR_NODE_FORGET_TIME		60000 /* ms */
+#define HSR_ANNOUNCE_INTERVAL		  100 /* ms */
+
+
+/* By how much may slave1 and slave2 timestamps of latest received frame from
+ * each node differ before we notify of communication problem?
+ */
+#define MAX_SLAVE_DIFF			 3000 /* ms */
+
+
+/* How often shall we check for broken ring and remove node entries older than
+ * HSR_NODE_FORGET_TIME?
+ */
+#define PRUNE_PERIOD			 3000 /* ms */
+
+
+#define HSR_TLV_ANNOUNCE		   22
+#define HSR_TLV_LIFE_CHECK		   23
+
+
+/* HSR Tag.
+ * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
+ * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
+ * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
+ * encapsulated protocol } instead.
+ */
+#define HSR_TAGLEN	6
+
+/* Field names below as defined in the IEC:2010 standard for HSR. */
+struct hsr_tag {
+	__be16		path_and_LSDU_size;
+	__be16		sequence_nr;
+	__be16		encap_proto;
+} __packed;
+
+
+/* The helper functions below assumes that 'path' occupies the 4 most
+ * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
+ * equivalently, the 4 most significant bits of HSR tag byte 14).
+ *
+ * This is unclear in the IEC specification; its definition of MAC addresses
+ * indicates the spec is written with the least significant bit first (to the
+ * left). This, however, would mean that the LSDU field would be split in two
+ * with the path field in-between, which seems strange. I'm guessing the MAC
+ * address definition is in error.
+ */
+static inline u16 get_hsr_tag_path(struct hsr_tag *ht)
+{
+	return ntohs(ht->path_and_LSDU_size) >> 12;
+}
+
+static inline u16 get_hsr_tag_LSDU_size(struct hsr_tag *ht)
+{
+	return ntohs(ht->path_and_LSDU_size) & 0x0FFF;
+}
+
+static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path)
+{
+	ht->path_and_LSDU_size = htons(
+			(ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12));
+}
+
+static inline void set_hsr_tag_LSDU_size(struct hsr_tag *ht, u16 LSDU_size)
+{
+	ht->path_and_LSDU_size = htons(
+			(ntohs(ht->path_and_LSDU_size) & 0xF000) |
+			(LSDU_size & 0x0FFF));
+}
+
+struct hsr_ethhdr {
+	struct ethhdr	ethhdr;
+	struct hsr_tag	hsr_tag;
+} __packed;
+
+
+/* HSR Supervision Frame data types.
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_sup_tag {
+	__be16		path_and_HSR_Ver;
+	__be16		sequence_nr;
+	__u8		HSR_TLV_Type;
+	__u8		HSR_TLV_Length;
+} __packed;
+
+struct hsr_sup_payload {
+	unsigned char	MacAddressA[ETH_ALEN];
+} __packed;
+
+static inline u16 get_hsr_stag_path(struct hsr_sup_tag *hst)
+{
+	return get_hsr_tag_path((struct hsr_tag *) hst);
+}
+
+static inline u16 get_hsr_stag_HSR_ver(struct hsr_sup_tag *hst)
+{
+	return get_hsr_tag_LSDU_size((struct hsr_tag *) hst);
+}
+
+static inline void set_hsr_stag_path(struct hsr_sup_tag *hst, u16 path)
+{
+	set_hsr_tag_path((struct hsr_tag *) hst, path);
+}
+
+static inline void set_hsr_stag_HSR_Ver(struct hsr_sup_tag *hst, u16 HSR_Ver)
+{
+	set_hsr_tag_LSDU_size((struct hsr_tag *) hst, HSR_Ver);
+}
+
+struct hsr_ethhdr_sp {
+	struct ethhdr		ethhdr;
+	struct hsr_sup_tag	hsr_sup;
+} __packed;
+
+
+enum hsr_dev_idx {
+	HSR_DEV_NONE = -1,
+	HSR_DEV_SLAVE_A = 0,
+	HSR_DEV_SLAVE_B,
+	HSR_DEV_MASTER,
+};
+#define HSR_MAX_SLAVE	(HSR_DEV_SLAVE_B + 1)
+#define HSR_MAX_DEV	(HSR_DEV_MASTER + 1)
+
+struct hsr_priv {
+	struct list_head	hsr_list;	/* List of hsr devices */
+	struct rcu_head		rcu_head;
+	struct net_device	*dev;
+	struct net_device	*slave[HSR_MAX_SLAVE];
+	struct list_head	node_db;	/* Other HSR nodes */
+	struct list_head	self_node_db;	/* MACs of slaves */
+	struct timer_list	announce_timer;	/* Supervision frame dispatch */
+	int announce_count;
+	u16 sequence_nr;
+	spinlock_t seqnr_lock;			/* locking for sequence_nr */
+	unsigned char		sup_multicast_addr[ETH_ALEN];
+};
+
+void register_hsr_master(struct hsr_priv *hsr_priv);
+void unregister_hsr_master(struct hsr_priv *hsr_priv);
+bool is_hsr_slave(struct net_device *dev);
+
+#endif /*  _HSR_PRIVATE_H */
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
new file mode 100644
index 0000000..4e66bf6
--- /dev/null
+++ b/net/hsr/hsr_netlink.c
@@ -0,0 +1,457 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *
+ * Routines for handling Netlink messages for HSR.
+ */
+
+#include "hsr_netlink.h"
+#include <linux/kernel.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_framereg.h"
+
+static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
+	[IFLA_HSR_SLAVE1]		= { .type = NLA_U32 },
+	[IFLA_HSR_SLAVE2]		= { .type = NLA_U32 },
+	[IFLA_HSR_MULTICAST_SPEC]	= { .type = NLA_U8 },
+};
+
+
+/* Here, it seems a netdevice has already been allocated for us, and the
+ * hsr_dev_setup routine has been executed. Nice!
+ */
+static int hsr_newlink(struct net *src_net, struct net_device *dev,
+		       struct nlattr *tb[], struct nlattr *data[])
+{
+	struct net_device *link[2];
+	unsigned char multicast_spec;
+
+	if (!data[IFLA_HSR_SLAVE1]) {
+		netdev_info(dev, "IFLA_HSR_SLAVE1 missing!\n");
+		return -EINVAL;
+	}
+	link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
+	if (!data[IFLA_HSR_SLAVE2]) {
+		netdev_info(dev, "IFLA_HSR_SLAVE2 missing!\n");
+		return -EINVAL;
+	}
+	link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
+
+	if (!link[0] || !link[1])
+		return -ENODEV;
+	if (link[0] == link[1])
+		return -EINVAL;
+
+	if (!data[IFLA_HSR_MULTICAST_SPEC])
+		multicast_spec = 0;
+	else
+		multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
+
+	return hsr_dev_finalize(dev, link, multicast_spec);
+}
+
+static struct rtnl_link_ops hsr_link_ops __read_mostly = {
+	.kind		= "hsr",
+	.maxtype	= IFLA_HSR_MAX,
+	.policy		= hsr_policy,
+	.priv_size	= sizeof(struct hsr_priv),
+	.setup		= hsr_dev_setup,
+	.newlink	= hsr_newlink,
+};
+
+
+
+/* attribute policy */
+/* NLA_BINARY missing in libnl; use NLA_UNSPEC in userspace instead. */
+static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
+	[HSR_A_NODE_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
+	[HSR_A_NODE_ADDR_B] = { .type = NLA_BINARY, .len = ETH_ALEN },
+	[HSR_A_IFINDEX] = { .type = NLA_U32 },
+	[HSR_A_IF1_AGE] = { .type = NLA_U32 },
+	[HSR_A_IF2_AGE] = { .type = NLA_U32 },
+	[HSR_A_IF1_SEQ] = { .type = NLA_U16 },
+	[HSR_A_IF2_SEQ] = { .type = NLA_U16 },
+};
+
+static struct genl_family hsr_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.hdrsize = 0,
+	.name = "HSR",
+	.version = 1,
+	.maxattr = HSR_A_MAX,
+};
+
+static struct genl_multicast_group hsr_network_genl_mcgrp = {
+	.name = "hsr-network",
+};
+
+
+
+/* This is called if for some node with MAC address addr, we only get frames
+ * over one of the slave interfaces. This would indicate an open network ring
+ * (i.e. a link has failed somewhere).
+ */
+void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
+		      enum hsr_dev_idx dev_idx)
+{
+	struct sk_buff *skb;
+	void *msg_head;
+	int res;
+	int ifindex;
+
+	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (!skb)
+		goto fail;
+
+	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR);
+	if (!msg_head)
+		goto nla_put_failure;
+
+	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+	if (res < 0)
+		goto nla_put_failure;
+
+	if (hsr_priv->slave[dev_idx])
+		ifindex = hsr_priv->slave[dev_idx]->ifindex;
+	else
+		ifindex = -1;
+	res = nla_put_u32(skb, HSR_A_IFINDEX, ifindex);
+	if (res < 0)
+		goto nla_put_failure;
+
+	genlmsg_end(skb, msg_head);
+	genlmsg_multicast(skb, 0, hsr_network_genl_mcgrp.id, GFP_ATOMIC);
+
+	return;
+
+nla_put_failure:
+	kfree_skb(skb);
+
+fail:
+	netdev_warn(hsr_priv->dev, "Could not send HSR ring error message\n");
+}
+
+/* This is called when we haven't heard from the node with MAC address addr for
+ * some time (just before the node is removed from the node table/list).
+ */
+void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN])
+{
+	struct sk_buff *skb;
+	void *msg_head;
+	int res;
+
+	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (!skb)
+		goto fail;
+
+	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
+	if (!msg_head)
+		goto nla_put_failure;
+
+
+	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+	if (res < 0)
+		goto nla_put_failure;
+
+	genlmsg_end(skb, msg_head);
+	genlmsg_multicast(skb, 0, hsr_network_genl_mcgrp.id, GFP_ATOMIC);
+
+	return;
+
+nla_put_failure:
+	kfree_skb(skb);
+
+fail:
+	netdev_warn(hsr_priv->dev, "Could not send HSR node down\n");
+}
+
+
+/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
+ * about the status of a specific node in the network, defined by its MAC
+ * address.
+ *
+ * Input: hsr ifindex, node mac address
+ * Output: hsr ifindex, node mac address (copied from request),
+ *	   age of latest frame from node over slave 1, slave 2 [ms]
+ */
+static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
+{
+	/* For receiving */
+	struct nlattr *na;
+	struct net_device *hsr_dev;
+
+	/* For sending */
+	struct sk_buff *skb_out;
+	void *msg_head;
+	struct hsr_priv *hsr_priv;
+	unsigned char hsr_node_addr_b[ETH_ALEN];
+	int hsr_node_if1_age;
+	u16 hsr_node_if1_seq;
+	int hsr_node_if2_age;
+	u16 hsr_node_if2_seq;
+	int addr_b_ifindex;
+	int res;
+
+	if (!info)
+		goto invalid;
+
+	na = info->attrs[HSR_A_IFINDEX];
+	if (!na)
+		goto invalid;
+	na = info->attrs[HSR_A_NODE_ADDR];
+	if (!na)
+		goto invalid;
+
+	hsr_dev = __dev_get_by_index(genl_info_net(info),
+					nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+	if (!hsr_dev)
+		goto invalid;
+	if (!is_hsr_master(hsr_dev))
+		goto invalid;
+
+
+	/* Send reply */
+
+	skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb_out) {
+		res = -ENOMEM;
+		goto fail;
+	}
+
+	msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
+				info->snd_seq, &hsr_genl_family, 0,
+				HSR_C_SET_NODE_STATUS);
+	if (!msg_head) {
+		res = -ENOMEM;
+		goto nla_put_failure;
+	}
+
+	res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+	if (res < 0)
+		goto nla_put_failure;
+
+	hsr_priv = netdev_priv(hsr_dev);
+	res = hsr_get_node_data(hsr_priv,
+			(unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
+			hsr_node_addr_b,
+			&addr_b_ifindex,
+			&hsr_node_if1_age,
+			&hsr_node_if1_seq,
+			&hsr_node_if2_age,
+			&hsr_node_if2_seq);
+	if (res < 0)
+		goto fail;
+
+	res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
+					nla_data(info->attrs[HSR_A_NODE_ADDR]));
+	if (res < 0)
+		goto nla_put_failure;
+
+	if (addr_b_ifindex > -1) {
+		res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
+								hsr_node_addr_b);
+		if (res < 0)
+			goto nla_put_failure;
+
+		res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex);
+		if (res < 0)
+			goto nla_put_failure;
+	}
+
+	res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
+	if (res < 0)
+		goto nla_put_failure;
+	res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
+	if (res < 0)
+		goto nla_put_failure;
+	if (hsr_priv->slave[0])
+		res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
+						hsr_priv->slave[0]->ifindex);
+	if (res < 0)
+		goto nla_put_failure;
+
+	res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
+	if (res < 0)
+		goto nla_put_failure;
+	res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
+	if (res < 0)
+		goto nla_put_failure;
+	if (hsr_priv->slave[1])
+		res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
+						hsr_priv->slave[1]->ifindex);
+
+	genlmsg_end(skb_out, msg_head);
+	genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
+
+	return 0;
+
+invalid:
+	netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
+	return 0;
+
+nla_put_failure:
+	kfree_skb(skb_out);
+	/* Fall through */
+
+fail:
+	return res;
+}
+
+static struct genl_ops hsr_ops_get_node_status = {
+	.cmd = HSR_C_GET_NODE_STATUS,
+	.flags = 0,
+	.policy = hsr_genl_policy,
+	.doit = hsr_get_node_status,
+	.dumpit = NULL,
+};
+
+
+/* Get a list of MacAddressA of all nodes known to this node (other than self).
+ */
+static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
+{
+	/* For receiving */
+	struct nlattr *na;
+	struct net_device *hsr_dev;
+
+	/* For sending */
+	struct sk_buff *skb_out;
+	void *msg_head;
+	struct hsr_priv *hsr_priv;
+	void *pos;
+	unsigned char addr[ETH_ALEN];
+	int res;
+
+	if (!info)
+		goto invalid;
+
+	na = info->attrs[HSR_A_IFINDEX];
+	if (!na)
+		goto invalid;
+
+	hsr_dev = __dev_get_by_index(genl_info_net(info),
+				     nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+	if (!hsr_dev)
+		goto invalid;
+	if (!is_hsr_master(hsr_dev))
+		goto invalid;
+
+
+	/* Send reply */
+
+	skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb_out) {
+		res = -ENOMEM;
+		goto fail;
+	}
+
+	msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
+				info->snd_seq, &hsr_genl_family, 0,
+				HSR_C_SET_NODE_LIST);
+	if (!msg_head) {
+		res = -ENOMEM;
+		goto nla_put_failure;
+	}
+
+	res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+	if (res < 0)
+		goto nla_put_failure;
+
+	hsr_priv = netdev_priv(hsr_dev);
+
+	rcu_read_lock();
+	pos = hsr_get_next_node(hsr_priv, NULL, addr);
+	while (pos) {
+		res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+		if (res < 0) {
+			rcu_read_unlock();
+			goto nla_put_failure;
+		}
+		pos = hsr_get_next_node(hsr_priv, pos, addr);
+	}
+	rcu_read_unlock();
+
+	genlmsg_end(skb_out, msg_head);
+	genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
+
+	return 0;
+
+invalid:
+	netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
+	return 0;
+
+nla_put_failure:
+	kfree_skb(skb_out);
+	/* Fall through */
+
+fail:
+	return res;
+}
+
+
+static struct genl_ops hsr_ops_get_node_list = {
+	.cmd = HSR_C_GET_NODE_LIST,
+	.flags = 0,
+	.policy = hsr_genl_policy,
+	.doit = hsr_get_node_list,
+	.dumpit = NULL,
+};
+
+int __init hsr_netlink_init(void)
+{
+	int rc;
+
+	rc = rtnl_link_register(&hsr_link_ops);
+	if (rc)
+		goto fail_rtnl_link_register;
+
+	rc = genl_register_family(&hsr_genl_family);
+	if (rc)
+		goto fail_genl_register_family;
+
+	rc = genl_register_ops(&hsr_genl_family, &hsr_ops_get_node_status);
+	if (rc)
+		goto fail_genl_register_ops;
+
+	rc = genl_register_ops(&hsr_genl_family, &hsr_ops_get_node_list);
+	if (rc)
+		goto fail_genl_register_ops_node_list;
+
+	rc = genl_register_mc_group(&hsr_genl_family, &hsr_network_genl_mcgrp);
+	if (rc)
+		goto fail_genl_register_mc_group;
+
+	return 0;
+
+fail_genl_register_mc_group:
+	genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_list);
+fail_genl_register_ops_node_list:
+	genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_status);
+fail_genl_register_ops:
+	genl_unregister_family(&hsr_genl_family);
+fail_genl_register_family:
+	rtnl_link_unregister(&hsr_link_ops);
+fail_rtnl_link_register:
+
+	return rc;
+}
+
+void __exit hsr_netlink_exit(void)
+{
+	genl_unregister_mc_group(&hsr_genl_family, &hsr_network_genl_mcgrp);
+	genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_status);
+	genl_unregister_family(&hsr_genl_family);
+
+	rtnl_link_unregister(&hsr_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("hsr");
diff --git a/net/hsr/hsr_netlink.h b/net/hsr/hsr_netlink.h
new file mode 100644
index 0000000..d4579dc
--- /dev/null
+++ b/net/hsr/hsr_netlink.h
@@ -0,0 +1,30 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef __HSR_NETLINK_H
+#define __HSR_NETLINK_H
+
+#include <linux/if_ether.h>
+#include <linux/module.h>
+#include <uapi/linux/hsr_netlink.h>
+
+struct hsr_priv;
+
+int __init hsr_netlink_init(void);
+void __exit hsr_netlink_exit(void);
+
+void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
+		      int dev_idx);
+void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN]);
+void hsr_nl_framedrop(int dropcount, int dev_idx);
+void hsr_nl_linkdown(int dev_idx);
+
+#endif /* __HSR_NETLINK_H */
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index ff41b4d..426b5df 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -440,7 +440,6 @@
 		default:
 			pr_debug("ERROR: unknown UDP format\n");
 			goto err;
-			break;
 		}
 
 		pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
@@ -655,7 +654,9 @@
 	head[1] = iphc1;
 
 	skb_pull(skb, sizeof(struct ipv6hdr));
+	skb_reset_transport_header(skb);
 	memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
+	skb_reset_network_header(skb);
 
 	lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
 				skb->len);
@@ -738,7 +739,6 @@
 		return -ENOMEM;
 
 	skb_push(new, sizeof(struct ipv6hdr));
-	skb_reset_network_header(new);
 	skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
 
 	new->protocol = htons(ETH_P_IPV6);
@@ -785,7 +785,6 @@
 		goto skb_err;
 
 	frame->skb->priority = skb->priority;
-	frame->skb->dev = skb->dev;
 
 	/* reserve headroom for uncompressed ipv6 header */
 	skb_reserve(frame->skb, sizeof(struct ipv6hdr));
@@ -1061,7 +1060,6 @@
 		skb = new;
 
 		skb_push(skb, sizeof(struct udphdr));
-		skb_reset_transport_header(skb);
 		skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
 
 		lowpan_raw_dump_table(__func__, "raw UDP header dump",
@@ -1104,50 +1102,40 @@
 	return 0;
 }
 
-static int lowpan_get_mac_header_length(struct sk_buff *skb)
-{
-	/*
-	 * Currently long addressing mode is supported only, so the overall
-	 * header size is 21:
-	 * FC SeqNum DPAN DA  SA  Sec
-	 * 2  +  1  +  2 + 8 + 8 + 0  = 21
-	 */
-	return 21;
-}
-
 static int
 lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
 			int mlen, int plen, int offset, int type)
 {
 	struct sk_buff *frag;
-	int hlen, ret;
+	int hlen;
 
 	hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
 			LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
 
 	lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
 
-	frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
+	frag = netdev_alloc_skb(skb->dev,
+				hlen + mlen + plen + IEEE802154_MFR_SIZE);
 	if (!frag)
 		return -ENOMEM;
 
 	frag->priority = skb->priority;
-	frag->dev = skb->dev;
 
 	/* copy header, MFR and payload */
-	memcpy(skb_put(frag, mlen), skb->data, mlen);
-	memcpy(skb_put(frag, hlen), head, hlen);
+	skb_put(frag, mlen);
+	skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
 
-	if (plen)
-		skb_copy_from_linear_data_offset(skb, offset + mlen,
-					skb_put(frag, plen), plen);
+	skb_put(frag, hlen);
+	skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
+
+	skb_put(frag, plen);
+	skb_copy_to_linear_data_offset(frag, mlen + hlen,
+				       skb_network_header(skb) + offset, plen);
 
 	lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
 								frag->len);
 
-	ret = dev_queue_xmit(frag);
-
-	return ret;
+	return dev_queue_xmit(frag);
 }
 
 static int
@@ -1156,7 +1144,7 @@
 	int  err, header_length, payload_length, tag, offset = 0;
 	u8 head[5];
 
-	header_length = lowpan_get_mac_header_length(skb);
+	header_length = skb->mac_len;
 	payload_length = skb->len - header_length;
 	tag = lowpan_dev_info(dev)->fragment_tag++;
 
@@ -1181,7 +1169,7 @@
 	head[0] &= ~LOWPAN_DISPATCH_FRAG1;
 	head[0] |= LOWPAN_DISPATCH_FRAGN;
 
-	while ((payload_length - offset > 0) && (err >= 0)) {
+	while (payload_length - offset > 0) {
 		int len = LOWPAN_FRAG_SIZE;
 
 		head[4] = offset / 8;
@@ -1327,8 +1315,6 @@
 
 		/* Pull off the 1-byte of 6lowpan header. */
 		skb_pull(local_skb, 1);
-		skb_reset_network_header(local_skb);
-		skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
 
 		lowpan_give_skb_to_devices(local_skb);
 
@@ -1372,8 +1358,10 @@
 	real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
 	if (!real_dev)
 		return -ENODEV;
-	if (real_dev->type != ARPHRD_IEEE802154)
+	if (real_dev->type != ARPHRD_IEEE802154) {
+		dev_put(real_dev);
 		return -EINVAL;
+	}
 
 	lowpan_dev_info(dev)->real_dev = real_dev;
 	lowpan_dev_info(dev)->fragment_tag = 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index cfeb85c..70011e0 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -245,29 +245,6 @@
 }
 EXPORT_SYMBOL(inet_listen);
 
-u32 inet_ehash_secret __read_mostly;
-EXPORT_SYMBOL(inet_ehash_secret);
-
-u32 ipv6_hash_secret __read_mostly;
-EXPORT_SYMBOL(ipv6_hash_secret);
-
-/*
- * inet_ehash_secret must be set exactly once, and to a non nul value
- * ipv6_hash_secret must be set exactly once.
- */
-void build_ehash_secret(void)
-{
-	u32 rnd;
-
-	do {
-		get_random_bytes(&rnd, sizeof(rnd));
-	} while (rnd == 0);
-
-	if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
-		get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
-}
-EXPORT_SYMBOL(build_ehash_secret);
-
 /*
  *	Create an inet socket.
  */
@@ -284,10 +261,6 @@
 	int try_loading_module = 0;
 	int err;
 
-	if (unlikely(!inet_ehash_secret))
-		if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
-			build_ehash_secret();
-
 	sock->state = SS_UNCONNECTED;
 
 	/* Look for the requested type/protocol pair. */
@@ -1254,36 +1227,36 @@
 	if (ihl < sizeof(*iph))
 		goto out;
 
+	proto = iph->protocol;
+
+	/* Warning: after this point, iph might be no longer valid */
 	if (unlikely(!pskb_may_pull(skb, ihl)))
 		goto out;
-
 	__skb_pull(skb, ihl);
+
 	skb_reset_transport_header(skb);
-	iph = ip_hdr(skb);
-	proto = iph->protocol;
 	err = -EPROTONOSUPPORT;
 
-	rcu_read_lock();
 	ops = rcu_dereference(inet_offloads[proto]);
 	if (likely(ops && ops->callbacks.gso_send_check))
 		err = ops->callbacks.gso_send_check(skb);
-	rcu_read_unlock();
 
 out:
 	return err;
 }
 
 static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
-	netdev_features_t features)
+					netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	const struct net_offload *ops;
+	unsigned int offset = 0;
+	bool udpfrag, encap;
 	struct iphdr *iph;
 	int proto;
+	int nhoff;
 	int ihl;
 	int id;
-	unsigned int offset = 0;
-	bool tunnel;
 
 	if (unlikely(skb_shinfo(skb)->gso_type &
 		     ~(SKB_GSO_TCPV4 |
@@ -1291,12 +1264,16 @@
 		       SKB_GSO_DODGY |
 		       SKB_GSO_TCP_ECN |
 		       SKB_GSO_GRE |
+		       SKB_GSO_IPIP |
+		       SKB_GSO_SIT |
 		       SKB_GSO_TCPV6 |
 		       SKB_GSO_UDP_TUNNEL |
 		       SKB_GSO_MPLS |
 		       0)))
 		goto out;
 
+	skb_reset_network_header(skb);
+	nhoff = skb_network_header(skb) - skb_mac_header(skb);
 	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
 		goto out;
 
@@ -1305,42 +1282,50 @@
 	if (ihl < sizeof(*iph))
 		goto out;
 
-	if (unlikely(!pskb_may_pull(skb, ihl)))
-		goto out;
-
-	tunnel = !!skb->encapsulation;
-
-	__skb_pull(skb, ihl);
-	skb_reset_transport_header(skb);
-	iph = ip_hdr(skb);
 	id = ntohs(iph->id);
 	proto = iph->protocol;
+
+	/* Warning: after this point, iph might be no longer valid */
+	if (unlikely(!pskb_may_pull(skb, ihl)))
+		goto out;
+	__skb_pull(skb, ihl);
+
+	encap = SKB_GSO_CB(skb)->encap_level > 0;
+	if (encap)
+		features = skb->dev->hw_enc_features & netif_skb_features(skb);
+	SKB_GSO_CB(skb)->encap_level += ihl;
+
+	skb_reset_transport_header(skb);
+
 	segs = ERR_PTR(-EPROTONOSUPPORT);
 
-	rcu_read_lock();
+	/* Note : following gso_segment() might change skb->encapsulation */
+	udpfrag = !skb->encapsulation && proto == IPPROTO_UDP;
+
 	ops = rcu_dereference(inet_offloads[proto]);
 	if (likely(ops && ops->callbacks.gso_segment))
 		segs = ops->callbacks.gso_segment(skb, features);
-	rcu_read_unlock();
 
 	if (IS_ERR_OR_NULL(segs))
 		goto out;
 
 	skb = segs;
 	do {
-		iph = ip_hdr(skb);
-		if (!tunnel && proto == IPPROTO_UDP) {
+		iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
+		if (udpfrag) {
 			iph->id = htons(id);
 			iph->frag_off = htons(offset >> 3);
 			if (skb->next != NULL)
 				iph->frag_off |= htons(IP_MF);
-			offset += (skb->len - skb->mac_len - iph->ihl * 4);
-		} else  {
+			offset += skb->len - nhoff - ihl;
+		} else {
 			iph->id = htons(id++);
 		}
-		iph->tot_len = htons(skb->len - skb->mac_len);
-		iph->check = 0;
-		iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
+		iph->tot_len = htons(skb->len - nhoff);
+		ip_send_check(iph);
+		if (encap)
+			skb_reset_inner_headers(skb);
+		skb->network_header = (u8 *)iph - skb->head;
 	} while ((skb = skb->next));
 
 out:
@@ -1518,6 +1503,7 @@
 	ptr[0] = __alloc_percpu(mibsize, align);
 	if (!ptr[0])
 		return -ENOMEM;
+
 #if SNMP_ARRAY_SZ == 2
 	ptr[1] = __alloc_percpu(mibsize, align);
 	if (!ptr[1]) {
@@ -1546,6 +1532,7 @@
 };
 
 static const struct net_protocol udp_protocol = {
+	.early_demux =	udp_v4_early_demux,
 	.handler =	udp_rcv,
 	.err_handler =	udp_err,
 	.no_policy =	1,
@@ -1561,6 +1548,8 @@
 
 static __net_init int ipv4_mib_init_net(struct net *net)
 {
+	int i;
+
 	if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
 			  sizeof(struct tcp_mib),
 			  __alignof__(struct tcp_mib)) < 0)
@@ -1569,6 +1558,17 @@
 			  sizeof(struct ipstats_mib),
 			  __alignof__(struct ipstats_mib)) < 0)
 		goto err_ip_mib;
+
+	for_each_possible_cpu(i) {
+		struct ipstats_mib *af_inet_stats;
+		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
+		u64_stats_init(&af_inet_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
+		u64_stats_init(&af_inet_stats->syncp);
+#endif
+	}
+
 	if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
 			  sizeof(struct linux_mib),
 			  __alignof__(struct linux_mib)) < 0)
@@ -1646,6 +1646,13 @@
 	},
 };
 
+static const struct net_offload ipip_offload = {
+	.callbacks = {
+		.gso_send_check = inet_gso_send_check,
+		.gso_segment	= inet_gso_segment,
+	},
+};
+
 static int __init ipv4_offload_init(void)
 {
 	/*
@@ -1657,6 +1664,7 @@
 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
 
 	dev_add_offload(&ip_packet_offload);
+	inet_add_offload(&ipip_offload, IPPROTO_IPIP);
 	return 0;
 }
 
@@ -1705,8 +1713,6 @@
 	ip_static_sysctl_init();
 #endif
 
-	tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
-
 	/*
 	 *	Add all the base protocols.
 	 */
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 109ee89..7785b28 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -121,7 +121,6 @@
 	struct aead_givcrypt_request *req;
 	struct scatterlist *sg;
 	struct scatterlist *asg;
-	struct esp_data *esp;
 	struct sk_buff *trailer;
 	void *tmp;
 	u8 *iv;
@@ -139,8 +138,7 @@
 
 	/* skb is pure payload to encrypt */
 
-	esp = x->data;
-	aead = esp->aead;
+	aead = x->data;
 	alen = crypto_aead_authsize(aead);
 
 	tfclen = 0;
@@ -154,8 +152,6 @@
 	}
 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 	clen = ALIGN(skb->len + 2 + tfclen, blksize);
-	if (esp->padlen)
-		clen = ALIGN(clen, esp->padlen);
 	plen = clen - skb->len - tfclen;
 
 	err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
@@ -280,8 +276,7 @@
 {
 	const struct iphdr *iph;
 	struct xfrm_state *x = xfrm_input_state(skb);
-	struct esp_data *esp = x->data;
-	struct crypto_aead *aead = esp->aead;
+	struct crypto_aead *aead = x->data;
 	int alen = crypto_aead_authsize(aead);
 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 	int elen = skb->len - hlen;
@@ -376,8 +371,7 @@
 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 {
 	struct ip_esp_hdr *esph;
-	struct esp_data *esp = x->data;
-	struct crypto_aead *aead = esp->aead;
+	struct crypto_aead *aead = x->data;
 	struct aead_request *req;
 	struct sk_buff *trailer;
 	int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
@@ -459,9 +453,8 @@
 
 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
 {
-	struct esp_data *esp = x->data;
-	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
-	u32 align = max_t(u32, blksize, esp->padlen);
+	struct crypto_aead *aead = x->data;
+	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 	unsigned int net_adj;
 
 	switch (x->props.mode) {
@@ -476,8 +469,8 @@
 		BUG();
 	}
 
-	return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
-		 net_adj) & ~(align - 1)) + net_adj - 2;
+	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
+		 net_adj) & ~(blksize - 1)) + net_adj - 2;
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
@@ -511,18 +504,16 @@
 
 static void esp_destroy(struct xfrm_state *x)
 {
-	struct esp_data *esp = x->data;
+	struct crypto_aead *aead = x->data;
 
-	if (!esp)
+	if (!aead)
 		return;
 
-	crypto_free_aead(esp->aead);
-	kfree(esp);
+	crypto_free_aead(aead);
 }
 
 static int esp_init_aead(struct xfrm_state *x)
 {
-	struct esp_data *esp = x->data;
 	struct crypto_aead *aead;
 	int err;
 
@@ -531,7 +522,7 @@
 	if (IS_ERR(aead))
 		goto error;
 
-	esp->aead = aead;
+	x->data = aead;
 
 	err = crypto_aead_setkey(aead, x->aead->alg_key,
 				 (x->aead->alg_key_len + 7) / 8);
@@ -548,7 +539,6 @@
 
 static int esp_init_authenc(struct xfrm_state *x)
 {
-	struct esp_data *esp = x->data;
 	struct crypto_aead *aead;
 	struct crypto_authenc_key_param *param;
 	struct rtattr *rta;
@@ -583,7 +573,7 @@
 	if (IS_ERR(aead))
 		goto error;
 
-	esp->aead = aead;
+	x->data = aead;
 
 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
@@ -638,16 +628,11 @@
 
 static int esp_init_state(struct xfrm_state *x)
 {
-	struct esp_data *esp;
 	struct crypto_aead *aead;
 	u32 align;
 	int err;
 
-	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
-	if (esp == NULL)
-		return -ENOMEM;
-
-	x->data = esp;
+	x->data = NULL;
 
 	if (x->aead)
 		err = esp_init_aead(x);
@@ -657,9 +642,7 @@
 	if (err)
 		goto error;
 
-	aead = esp->aead;
-
-	esp->padlen = 0;
+	aead = x->data;
 
 	x->props.header_len = sizeof(struct ip_esp_hdr) +
 			      crypto_aead_ivsize(aead);
@@ -683,9 +666,7 @@
 	}
 
 	align = ALIGN(crypto_aead_blocksize(aead), 4);
-	if (esp->padlen)
-		align = max_t(u32, align, esp->padlen);
-	x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
+	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
 
 error:
 	return err;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index b3f627a..d846304 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -933,7 +933,6 @@
 		local_bh_disable();
 
 		frn->tb_id = tb->tb_id;
-		rcu_read_lock();
 		frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
 
 		if (!frn->err) {
@@ -942,7 +941,6 @@
 			frn->type = res.type;
 			frn->scope = res.scope;
 		}
-		rcu_read_unlock();
 		local_bh_enable();
 	}
 }
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index af0f14a..388d113 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -24,21 +24,17 @@
 }
 
 /* Exported by fib_semantics.c */
-extern void fib_release_info(struct fib_info *);
-extern struct fib_info *fib_create_info(struct fib_config *cfg);
-extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
-extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
-			 u32 tb_id, u8 type, __be32 dst,
-			 int dst_len, u8 tos, struct fib_info *fi,
-			 unsigned int);
-extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
-		      int dst_len, u32 tb_id, struct nl_info *info,
-		      unsigned int nlm_flags);
-extern struct fib_alias *fib_find_alias(struct list_head *fah,
-					u8 tos, u32 prio);
-extern int fib_detect_death(struct fib_info *fi, int order,
-			    struct fib_info **last_resort,
-			    int *last_idx, int dflt);
+void fib_release_info(struct fib_info *);
+struct fib_info *fib_create_info(struct fib_config *cfg);
+int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
+int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
+		  u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi,
+		  unsigned int);
+void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len,
+	       u32 tb_id, const struct nl_info *info, unsigned int nlm_flags);
+struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio);
+int fib_detect_death(struct fib_info *fi, int order,
+		     struct fib_info **last_resort, int *last_idx, int dflt);
 
 static inline void fib_result_assign(struct fib_result *res,
 				     struct fib_info *fi)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d5dbca5..e63f47a 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -380,7 +380,7 @@
 }
 
 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
-	       int dst_len, u32 tb_id, struct nl_info *info,
+	       int dst_len, u32 tb_id, const struct nl_info *info,
 	       unsigned int nlm_flags)
 {
 	struct sk_buff *skb;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 3df6d3e..5afeb5a 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -762,12 +762,9 @@
 
 		if (IS_LEAF(node) || ((struct tnode *) node)->pos >
 		   tn->pos + tn->bits - 1) {
-			if (tkey_extract_bits(node->key,
-					      oldtnode->pos + oldtnode->bits,
-					      1) == 0)
-				put_child(tn, 2*i, node);
-			else
-				put_child(tn, 2*i+1, node);
+			put_child(tn,
+				tkey_extract_bits(node->key, oldtnode->pos, oldtnode->bits + 1),
+				node);
 			continue;
 		}
 
@@ -1120,12 +1117,8 @@
 		 *  first tnode need some special handling
 		 */
 
-		if (tp)
-			pos = tp->pos+tp->bits;
-		else
-			pos = 0;
-
 		if (n) {
+			pos = tp ? tp->pos+tp->bits : 0;
 			newpos = tkey_mismatch(key, pos, n->key);
 			tn = tnode_new(n->key, newpos, 1);
 		} else {
@@ -2530,16 +2523,17 @@
 		list_for_each_entry_rcu(fa, &li->falh, fa_list) {
 			const struct fib_info *fi = fa->fa_info;
 			unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
-			int len;
 
 			if (fa->fa_type == RTN_BROADCAST
 			    || fa->fa_type == RTN_MULTICAST)
 				continue;
 
+			seq_setwidth(seq, 127);
+
 			if (fi)
 				seq_printf(seq,
 					 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
-					 "%d\t%08X\t%d\t%u\t%u%n",
+					 "%d\t%08X\t%d\t%u\t%u",
 					 fi->fib_dev ? fi->fib_dev->name : "*",
 					 prefix,
 					 fi->fib_nh->nh_gw, flags, 0, 0,
@@ -2548,15 +2542,15 @@
 					 (fi->fib_advmss ?
 					  fi->fib_advmss + 40 : 0),
 					 fi->fib_window,
-					 fi->fib_rtt >> 3, &len);
+					 fi->fib_rtt >> 3);
 			else
 				seq_printf(seq,
 					 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
-					 "%d\t%08X\t%d\t%u\t%u%n",
+					 "%d\t%08X\t%d\t%u\t%u",
 					 prefix, 0, flags, 0, 0, 0,
-					 mask, 0, 0, 0, &len);
+					 mask, 0, 0, 0);
 
-			seq_printf(seq, "%*s\n", 127 - len, "");
+			seq_pad(seq, '\n');
 		}
 	}
 
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 736c9fc3..5893e99 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -93,35 +93,6 @@
 }
 EXPORT_SYMBOL_GPL(gre_build_header);
 
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
-{
-	int err;
-
-	if (likely(!skb->encapsulation)) {
-		skb_reset_inner_headers(skb);
-		skb->encapsulation = 1;
-	}
-
-	if (skb_is_gso(skb)) {
-		err = skb_unclone(skb, GFP_ATOMIC);
-		if (unlikely(err))
-			goto error;
-		skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
-		return skb;
-	} else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
-		err = skb_checksum_help(skb);
-		if (unlikely(err))
-			goto error;
-	} else if (skb->ip_summed != CHECKSUM_PARTIAL)
-		skb->ip_summed = CHECKSUM_NONE;
-
-	return skb;
-error:
-	kfree_skb(skb);
-	return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(gre_handle_offloads);
-
 static __sum16 check_checksum(struct sk_buff *skb)
 {
 	__sum16 csum = 0;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 55e6bfb..e5d4361 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -39,7 +39,8 @@
 				  SKB_GSO_UDP |
 				  SKB_GSO_DODGY |
 				  SKB_GSO_TCP_ECN |
-				  SKB_GSO_GRE)))
+				  SKB_GSO_GRE |
+				  SKB_GSO_IPIP)))
 		goto out;
 
 	if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5f7d11a..5c0e8bc 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -353,6 +353,9 @@
 	saddr = fib_compute_spec_dst(skb);
 	ipc.opt = NULL;
 	ipc.tx_flags = 0;
+	ipc.ttl = 0;
+	ipc.tos = -1;
+
 	if (icmp_param->replyopts.opt.opt.optlen) {
 		ipc.opt = &icmp_param->replyopts.opt;
 		if (ipc.opt->opt.srr)
@@ -608,6 +611,8 @@
 	ipc.addr = iph->saddr;
 	ipc.opt = &icmp_param->replyopts.opt;
 	ipc.tx_flags = 0;
+	ipc.ttl = 0;
+	ipc.tos = -1;
 
 	rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
 			       type, code, icmp_param);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6acb541..fc0e649 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -29,27 +29,19 @@
 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
-/*
- * This struct holds the first and last local port number.
- */
-struct local_ports sysctl_local_ports __read_mostly = {
-	.lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
-	.range = { 32768, 61000 },
-};
-
 unsigned long *sysctl_local_reserved_ports;
 EXPORT_SYMBOL(sysctl_local_reserved_ports);
 
-void inet_get_local_port_range(int *low, int *high)
+void inet_get_local_port_range(struct net *net, int *low, int *high)
 {
 	unsigned int seq;
 
 	do {
-		seq = read_seqbegin(&sysctl_local_ports.lock);
+		seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
-		*low = sysctl_local_ports.range[0];
-		*high = sysctl_local_ports.range[1];
-	} while (read_seqretry(&sysctl_local_ports.lock, seq));
+		*low = net->ipv4.sysctl_local_ports.range[0];
+		*high = net->ipv4.sysctl_local_ports.range[1];
+	} while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
@@ -79,17 +71,16 @@
 			    (!reuseport || !sk2->sk_reuseport ||
 			    (sk2->sk_state != TCP_TIME_WAIT &&
 			     !uid_eq(uid, sock_i_uid(sk2))))) {
-				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
-				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
-				    sk2_rcv_saddr == sk_rcv_saddr(sk))
+
+				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
 					break;
 			}
 			if (!relax && reuse && sk2->sk_reuse &&
 			    sk2->sk_state != TCP_LISTEN) {
-				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 
-				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
-				    sk2_rcv_saddr == sk_rcv_saddr(sk))
+				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
 					break;
 			}
 		}
@@ -116,7 +107,7 @@
 		int remaining, rover, low, high;
 
 again:
-		inet_get_local_port_range(&low, &high);
+		inet_get_local_port_range(net, &low, &high);
 		remaining = (high - low) + 1;
 		smallest_rover = rover = net_random() % remaining + low;
 
@@ -421,8 +412,8 @@
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 			   sk->sk_protocol,
 			   flags,
-			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
+			   ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 	rt = ip_route_output_flow(net, fl4, sk);
 	if (IS_ERR(rt))
@@ -457,8 +448,8 @@
 	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
-			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
+			   ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 	rt = ip_route_output_flow(net, fl4, sk);
 	if (IS_ERR(rt))
@@ -504,9 +495,9 @@
 	     prev = &req->dl_next) {
 		const struct inet_request_sock *ireq = inet_rsk(req);
 
-		if (ireq->rmt_port == rport &&
-		    ireq->rmt_addr == raddr &&
-		    ireq->loc_addr == laddr &&
+		if (ireq->ir_rmt_port == rport &&
+		    ireq->ir_rmt_addr == raddr &&
+		    ireq->ir_loc_addr == laddr &&
 		    AF_INET_FAMILY(req->rsk_ops->family)) {
 			WARN_ON(req->sk);
 			*prevp = prev;
@@ -523,7 +514,8 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
+	const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
+				     inet_rsk(req)->ir_rmt_port,
 				     lopt->hash_rnd, lopt->nr_table_entries);
 
 	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -683,9 +675,9 @@
 		newsk->sk_state = TCP_SYN_RECV;
 		newicsk->icsk_bind_hash = NULL;
 
-		inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
-		inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
-		inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
+		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
+		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
+		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
 		newsk->sk_write_space = sk_stream_write_space;
 
 		newicsk->icsk_retransmits = 0;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 5f64875..56a964a 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -121,13 +121,13 @@
 
 #if IS_ENABLED(CONFIG_IPV6)
 	if (r->idiag_family == AF_INET6) {
-		const struct ipv6_pinfo *np = inet6_sk(sk);
 
-		*(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
-		*(struct in6_addr *)r->id.idiag_dst = np->daddr;
+		*(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
+		*(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
 
 		if (ext & (1 << (INET_DIAG_TCLASS - 1)))
-			if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
+			if (nla_put_u8(skb, INET_DIAG_TCLASS,
+				       inet6_sk(sk)->tclass) < 0)
 				goto errout;
 	}
 #endif
@@ -222,7 +222,7 @@
 			       u32 portid, u32 seq, u16 nlmsg_flags,
 			       const struct nlmsghdr *unlh)
 {
-	long tmo;
+	s32 tmo;
 	struct inet_diag_msg *r;
 	struct nlmsghdr *nlh;
 
@@ -234,7 +234,7 @@
 	r = nlmsg_data(nlh);
 	BUG_ON(tw->tw_state != TCP_TIME_WAIT);
 
-	tmo = tw->tw_ttd - jiffies;
+	tmo = tw->tw_ttd - inet_tw_time_stamp();
 	if (tmo < 0)
 		tmo = 0;
 
@@ -248,18 +248,15 @@
 	r->id.idiag_dst[0]    = tw->tw_daddr;
 	r->idiag_state	      = tw->tw_substate;
 	r->idiag_timer	      = 3;
-	r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
+	r->idiag_expires      = jiffies_to_msecs(tmo);
 	r->idiag_rqueue	      = 0;
 	r->idiag_wqueue	      = 0;
 	r->idiag_uid	      = 0;
 	r->idiag_inode	      = 0;
 #if IS_ENABLED(CONFIG_IPV6)
 	if (tw->tw_family == AF_INET6) {
-		const struct inet6_timewait_sock *tw6 =
-						inet6_twsk((struct sock *)tw);
-
-		*(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
-		*(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
+		*(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr;
+		*(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr;
 	}
 #endif
 
@@ -273,10 +270,11 @@
 			const struct nlmsghdr *unlh)
 {
 	if (sk->sk_state == TCP_TIME_WAIT)
-		return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-					   skb, r, portid, seq, nlmsg_flags,
-					   unlh);
-	return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh);
+		return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq,
+					   nlmsg_flags, unlh);
+
+	return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
+				  nlmsg_flags, unlh);
 }
 
 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -338,12 +336,9 @@
 		err = 0;
 
 out:
-	if (sk) {
-		if (sk->sk_state == TCP_TIME_WAIT)
-			inet_twsk_put((struct inet_timewait_sock *)sk);
-		else
-			sock_put(sk);
-	}
+	if (sk)
+		sock_gen_put(sk);
+
 out_nosk:
 	return err;
 }
@@ -489,10 +484,9 @@
 	entry.family = sk->sk_family;
 #if IS_ENABLED(CONFIG_IPV6)
 	if (entry.family == AF_INET6) {
-		struct ipv6_pinfo *np = inet6_sk(sk);
 
-		entry.saddr = np->rcv_saddr.s6_addr32;
-		entry.daddr = np->daddr.s6_addr32;
+		entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32;
+		entry.daddr = sk->sk_v6_daddr.s6_addr32;
 	} else
 #endif
 	{
@@ -635,22 +629,22 @@
 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
-static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
+static int inet_twsk_diag_dump(struct sock *sk,
 			       struct sk_buff *skb,
 			       struct netlink_callback *cb,
 			       struct inet_diag_req_v2 *r,
 			       const struct nlattr *bc)
 {
+	struct inet_timewait_sock *tw = inet_twsk(sk);
+
 	if (bc != NULL) {
 		struct inet_diag_entry entry;
 
 		entry.family = tw->tw_family;
 #if IS_ENABLED(CONFIG_IPV6)
 		if (tw->tw_family == AF_INET6) {
-			struct inet6_timewait_sock *tw6 =
-						inet6_twsk((struct sock *)tw);
-			entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
-			entry.daddr = tw6->tw_v6_daddr.s6_addr32;
+			entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32;
+			entry.daddr = tw->tw_v6_daddr.s6_addr32;
 		} else
 #endif
 		{
@@ -682,12 +676,12 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	if (sk->sk_family == AF_INET6) {
 		if (req->rsk_ops->family == AF_INET6) {
-			entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
-			entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
+			entry->saddr = ireq->ir_v6_loc_addr.s6_addr32;
+			entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32;
 		} else if (req->rsk_ops->family == AF_INET) {
-			ipv6_addr_set_v4mapped(ireq->loc_addr,
+			ipv6_addr_set_v4mapped(ireq->ir_loc_addr,
 					       &entry->saddr_storage);
-			ipv6_addr_set_v4mapped(ireq->rmt_addr,
+			ipv6_addr_set_v4mapped(ireq->ir_rmt_addr,
 					       &entry->daddr_storage);
 			entry->saddr = entry->saddr_storage.s6_addr32;
 			entry->daddr = entry->daddr_storage.s6_addr32;
@@ -695,8 +689,8 @@
 	} else
 #endif
 	{
-		entry->saddr = &ireq->loc_addr;
-		entry->daddr = &ireq->rmt_addr;
+		entry->saddr = &ireq->ir_loc_addr;
+		entry->daddr = &ireq->ir_rmt_addr;
 	}
 }
 
@@ -731,9 +725,9 @@
 		tmo = 0;
 
 	r->id.idiag_sport = inet->inet_sport;
-	r->id.idiag_dport = ireq->rmt_port;
-	r->id.idiag_src[0] = ireq->loc_addr;
-	r->id.idiag_dst[0] = ireq->rmt_addr;
+	r->id.idiag_dport = ireq->ir_rmt_port;
+	r->id.idiag_src[0] = ireq->ir_loc_addr;
+	r->id.idiag_dst[0] = ireq->ir_rmt_addr;
 	r->idiag_expires = jiffies_to_msecs(tmo);
 	r->idiag_rqueue = 0;
 	r->idiag_wqueue = 0;
@@ -792,13 +786,13 @@
 
 			if (reqnum < s_reqnum)
 				continue;
-			if (r->id.idiag_dport != ireq->rmt_port &&
+			if (r->id.idiag_dport != ireq->ir_rmt_port &&
 			    r->id.idiag_dport)
 				continue;
 
 			if (bc) {
 				inet_diag_req_addrs(sk, req, &entry);
-				entry.dport = ntohs(ireq->rmt_port);
+				entry.dport = ntohs(ireq->ir_rmt_port);
 
 				if (!inet_diag_bc_run(bc, &entry))
 					continue;
@@ -911,8 +905,7 @@
 
 		num = 0;
 
-		if (hlist_nulls_empty(&head->chain) &&
-			hlist_nulls_empty(&head->twchain))
+		if (hlist_nulls_empty(&head->chain))
 			continue;
 
 		if (i > s_i)
@@ -920,7 +913,7 @@
 
 		spin_lock_bh(lock);
 		sk_nulls_for_each(sk, node, &head->chain) {
-			struct inet_sock *inet = inet_sk(sk);
+			int res;
 
 			if (!net_eq(sock_net(sk), net))
 				continue;
@@ -929,15 +922,19 @@
 			if (!(r->idiag_states & (1 << sk->sk_state)))
 				goto next_normal;
 			if (r->sdiag_family != AF_UNSPEC &&
-					sk->sk_family != r->sdiag_family)
+			    sk->sk_family != r->sdiag_family)
 				goto next_normal;
-			if (r->id.idiag_sport != inet->inet_sport &&
+			if (r->id.idiag_sport != htons(sk->sk_num) &&
 			    r->id.idiag_sport)
 				goto next_normal;
-			if (r->id.idiag_dport != inet->inet_dport &&
+			if (r->id.idiag_dport != sk->sk_dport &&
 			    r->id.idiag_dport)
 				goto next_normal;
-			if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
+			if (sk->sk_state == TCP_TIME_WAIT)
+				res = inet_twsk_diag_dump(sk, skb, cb, r, bc);
+			else
+				res = inet_csk_diag_dump(sk, skb, cb, r, bc);
+			if (res < 0) {
 				spin_unlock_bh(lock);
 				goto done;
 			}
@@ -945,33 +942,6 @@
 			++num;
 		}
 
-		if (r->idiag_states & TCPF_TIME_WAIT) {
-			struct inet_timewait_sock *tw;
-
-			inet_twsk_for_each(tw, node,
-				    &head->twchain) {
-				if (!net_eq(twsk_net(tw), net))
-					continue;
-
-				if (num < s_num)
-					goto next_dying;
-				if (r->sdiag_family != AF_UNSPEC &&
-						tw->tw_family != r->sdiag_family)
-					goto next_dying;
-				if (r->id.idiag_sport != tw->tw_sport &&
-				    r->id.idiag_sport)
-					goto next_dying;
-				if (r->id.idiag_dport != tw->tw_dport &&
-				    r->id.idiag_dport)
-					goto next_dying;
-				if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
-					spin_unlock_bh(lock);
-					goto done;
-				}
-next_dying:
-				++num;
-			}
-		}
 		spin_unlock_bh(lock);
 	}
 
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index c5313a9..bb075fc 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -93,9 +93,6 @@
 	}
 	rwlock_init(&f->lock);
 
-	f->rnd = (u32) ((totalram_pages ^ (totalram_pages >> 7)) ^
-				   (jiffies ^ (jiffies >> 6)));
-
 	setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
 			(unsigned long)f);
 	f->secret_timer.expires = jiffies + f->secret_interval;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 96da9c7..8b9cf27 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -24,6 +24,31 @@
 #include <net/secure_seq.h>
 #include <net/ip.h>
 
+static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
+				 const __u16 lport, const __be32 faddr,
+				 const __be16 fport)
+{
+	static u32 inet_ehash_secret __read_mostly;
+
+	net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
+
+	return __inet_ehashfn(laddr, lport, faddr, fport,
+			      inet_ehash_secret + net_hash_mix(net));
+}
+
+
+static unsigned int inet_sk_ehashfn(const struct sock *sk)
+{
+	const struct inet_sock *inet = inet_sk(sk);
+	const __be32 laddr = inet->inet_rcv_saddr;
+	const __u16 lport = inet->inet_num;
+	const __be32 faddr = inet->inet_daddr;
+	const __be16 fport = inet->inet_dport;
+	struct net *net = sock_net(sk);
+
+	return inet_ehashfn(net, laddr, lport, faddr, fport);
+}
+
 /*
  * Allocate and initialize a new local port bind bucket.
  * The bindhash mutex for snum's hash chain must be held here.
@@ -230,6 +255,19 @@
 }
 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
 
+/* All sockets share common refcount, but have different destructors */
+void sock_gen_put(struct sock *sk)
+{
+	if (!atomic_dec_and_test(&sk->sk_refcnt))
+		return;
+
+	if (sk->sk_state == TCP_TIME_WAIT)
+		inet_twsk_free(inet_twsk(sk));
+	else
+		sk_free(sk);
+}
+EXPORT_SYMBOL_GPL(sock_gen_put);
+
 struct sock *__inet_lookup_established(struct net *net,
 				  struct inet_hashinfo *hashinfo,
 				  const __be32 saddr, const __be16 sport,
@@ -255,13 +293,13 @@
 		if (likely(INET_MATCH(sk, net, acookie,
 				      saddr, daddr, ports, dif))) {
 			if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
-				goto begintw;
+				goto out;
 			if (unlikely(!INET_MATCH(sk, net, acookie,
 						 saddr, daddr, ports, dif))) {
-				sock_put(sk);
+				sock_gen_put(sk);
 				goto begin;
 			}
-			goto out;
+			goto found;
 		}
 	}
 	/*
@@ -271,37 +309,9 @@
 	 */
 	if (get_nulls_value(node) != slot)
 		goto begin;
-
-begintw:
-	/* Must check for a TIME_WAIT'er before going to listener hash. */
-	sk_nulls_for_each_rcu(sk, node, &head->twchain) {
-		if (sk->sk_hash != hash)
-			continue;
-		if (likely(INET_TW_MATCH(sk, net, acookie,
-					 saddr, daddr, ports,
-					 dif))) {
-			if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
-				sk = NULL;
-				goto out;
-			}
-			if (unlikely(!INET_TW_MATCH(sk, net, acookie,
-						    saddr, daddr, ports,
-						    dif))) {
-				inet_twsk_put(inet_twsk(sk));
-				goto begintw;
-			}
-			goto out;
-		}
-	}
-	/*
-	 * if the nulls value we got at the end of this lookup is
-	 * not the expected one, we must restart lookup.
-	 * We probably met an item that was moved to another chain.
-	 */
-	if (get_nulls_value(node) != slot)
-		goto begintw;
-	sk = NULL;
 out:
+	sk = NULL;
+found:
 	rcu_read_unlock();
 	return sk;
 }
@@ -326,39 +336,29 @@
 	spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
 	struct sock *sk2;
 	const struct hlist_nulls_node *node;
-	struct inet_timewait_sock *tw;
+	struct inet_timewait_sock *tw = NULL;
 	int twrefcnt = 0;
 
 	spin_lock(lock);
 
-	/* Check TIME-WAIT sockets first. */
-	sk_nulls_for_each(sk2, node, &head->twchain) {
-		if (sk2->sk_hash != hash)
-			continue;
-
-		if (likely(INET_TW_MATCH(sk2, net, acookie,
-					 saddr, daddr, ports, dif))) {
-			tw = inet_twsk(sk2);
-			if (twsk_unique(sk, sk2, twp))
-				goto unique;
-			else
-				goto not_unique;
-		}
-	}
-	tw = NULL;
-
-	/* And established part... */
 	sk_nulls_for_each(sk2, node, &head->chain) {
 		if (sk2->sk_hash != hash)
 			continue;
+
 		if (likely(INET_MATCH(sk2, net, acookie,
-				      saddr, daddr, ports, dif)))
+					 saddr, daddr, ports, dif))) {
+			if (sk2->sk_state == TCP_TIME_WAIT) {
+				tw = inet_twsk(sk2);
+				if (twsk_unique(sk, sk2, twp))
+					break;
+			}
 			goto not_unique;
+		}
 	}
 
-unique:
 	/* Must record num and sport now. Otherwise we will see
-	 * in hash table socket with a funny identity. */
+	 * in hash table socket with a funny identity.
+	 */
 	inet->inet_num = lport;
 	inet->inet_sport = htons(lport);
 	sk->sk_hash = hash;
@@ -494,7 +494,7 @@
 		u32 offset = hint + port_offset;
 		struct inet_timewait_sock *tw = NULL;
 
-		inet_get_local_port_range(&low, &high);
+		inet_get_local_port_range(net, &low, &high);
 		remaining = (high - low) + 1;
 
 		local_bh_disable();
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 1f27c9f..6d592f8 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -87,19 +87,11 @@
 	refcnt += inet_twsk_bind_unhash(tw, hashinfo);
 	spin_unlock(&bhead->lock);
 
-#ifdef SOCK_REFCNT_DEBUG
-	if (atomic_read(&tw->tw_refcnt) != 1) {
-		pr_debug("%s timewait_sock %p refcnt=%d\n",
-			 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
-	}
-#endif
-	while (refcnt) {
-		inet_twsk_put(tw);
-		refcnt--;
-	}
+	BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
+	atomic_sub(refcnt, &tw->tw_refcnt);
 }
 
-static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
+void inet_twsk_free(struct inet_timewait_sock *tw)
 {
 	struct module *owner = tw->tw_prot->owner;
 	twsk_destructor((struct sock *)tw);
@@ -118,6 +110,18 @@
 }
 EXPORT_SYMBOL_GPL(inet_twsk_put);
 
+static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+				   struct hlist_nulls_head *list)
+{
+	hlist_nulls_add_head_rcu(&tw->tw_node, list);
+}
+
+static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+				    struct hlist_head *list)
+{
+	hlist_add_head(&tw->tw_bind_node, list);
+}
+
 /*
  * Enter the time wait state. This is called with locally disabled BH.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
@@ -146,26 +150,21 @@
 	spin_lock(lock);
 
 	/*
-	 * Step 2: Hash TW into TIMEWAIT chain.
-	 * Should be done before removing sk from established chain
-	 * because readers are lockless and search established first.
+	 * Step 2: Hash TW into tcp ehash chain.
+	 * Notes :
+	 * - tw_refcnt is set to 3 because :
+	 * - We have one reference from bhash chain.
+	 * - We have one reference from ehash chain.
+	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
+	 * committed into memory all tw fields.
 	 */
-	inet_twsk_add_node_rcu(tw, &ehead->twchain);
+	atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
+	inet_twsk_add_node_rcu(tw, &ehead->chain);
 
-	/* Step 3: Remove SK from established hash. */
+	/* Step 3: Remove SK from hash chain */
 	if (__sk_nulls_del_node_init_rcu(sk))
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
-	/*
-	 * Notes :
-	 * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
-	 * - We add one reference for the bhash link
-	 * - We add one reference for the ehash link
-	 * - We want this refcnt update done before allowing other
-	 *   threads to find this tw in ehash chain.
-	 */
-	atomic_add(1 + 1 + 1, &tw->tw_refcnt);
-
 	spin_unlock(lock);
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
@@ -387,11 +386,11 @@
 			if (slot >= INET_TWDR_TWKILL_SLOTS)
 				slot = INET_TWDR_TWKILL_SLOTS - 1;
 		}
-		tw->tw_ttd = jiffies + timeo;
+		tw->tw_ttd = inet_tw_time_stamp() + timeo;
 		slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
 		list = &twdr->cells[slot];
 	} else {
-		tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
+		tw->tw_ttd = inet_tw_time_stamp() + (slot << INET_TWDR_RECYCLE_TICK);
 
 		if (twdr->twcal_hand < 0) {
 			twdr->twcal_hand = 0;
@@ -490,7 +489,9 @@
 restart_rcu:
 		rcu_read_lock();
 restart:
-		sk_nulls_for_each_rcu(sk, node, &head->twchain) {
+		sk_nulls_for_each_rcu(sk, node, &head->chain) {
+			if (sk->sk_state != TCP_TIME_WAIT)
+				continue;
 			tw = inet_twsk(sk);
 			if ((tw->tw_family != family) ||
 				atomic_read(&twsk_net(tw)->count))
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b66910a..2481993 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -106,6 +106,7 @@
 
 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
 {
+	net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
 	return jhash_3words((__force u32)id << 16 | prot,
 			    (__force u32)saddr, (__force u32)daddr,
 			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3982eab..9124027 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -810,7 +810,7 @@
 	int copy;
 	int err;
 	int offset = 0;
-	unsigned int maxfraglen, fragheaderlen;
+	unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
 	int csummode = CHECKSUM_NONE;
 	struct rtable *rt = (struct rtable *)cork->dst;
 
@@ -823,8 +823,10 @@
 
 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
+	maxnonfragsize = (inet->pmtudisc >= IP_PMTUDISC_DO) ?
+			 mtu : 0xFFFF;
 
-	if (cork->length + length > 0xFFFF - fragheaderlen) {
+	if (cork->length + length > maxnonfragsize - fragheaderlen) {
 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
 			       mtu-exthdrlen);
 		return -EMSGSIZE;
@@ -1035,7 +1037,6 @@
 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
 			 struct ipcm_cookie *ipc, struct rtable **rtp)
 {
-	struct inet_sock *inet = inet_sk(sk);
 	struct ip_options_rcu *opt;
 	struct rtable *rt;
 
@@ -1061,10 +1062,13 @@
 	 * We steal reference to this route, caller should not release it
 	 */
 	*rtp = NULL;
-	cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
-			 rt->dst.dev->mtu : dst_mtu(&rt->dst);
+	cork->fragsize = ip_sk_use_pmtu(sk) ?
+			 dst_mtu(&rt->dst) : rt->dst.dev->mtu;
 	cork->dst = &rt->dst;
 	cork->length = 0;
+	cork->ttl = ipc->ttl;
+	cork->tos = ipc->tos;
+	cork->priority = ipc->priority;
 	cork->tx_flags = ipc->tx_flags;
 
 	return 0;
@@ -1119,7 +1123,7 @@
 	int mtu;
 	int len;
 	int err;
-	unsigned int maxfraglen, fragheaderlen, fraggap;
+	unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
 
 	if (inet->hdrincl)
 		return -EPERM;
@@ -1143,8 +1147,10 @@
 
 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
+	maxnonfragsize = (inet->pmtudisc >= IP_PMTUDISC_DO) ?
+			 mtu : 0xFFFF;
 
-	if (cork->length + size > 0xFFFF - fragheaderlen) {
+	if (cork->length + size > maxnonfragsize - fragheaderlen) {
 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
 		return -EMSGSIZE;
 	}
@@ -1308,7 +1314,8 @@
 	/* DF bit is set when we want to see DF on outgoing frames.
 	 * If local_df is set too, we still allow to fragment this frame
 	 * locally. */
-	if (inet->pmtudisc >= IP_PMTUDISC_DO ||
+	if (inet->pmtudisc == IP_PMTUDISC_DO ||
+	    inet->pmtudisc == IP_PMTUDISC_PROBE ||
 	    (skb->len <= dst_mtu(&rt->dst) &&
 	     ip_dont_fragment(sk, &rt->dst)))
 		df = htons(IP_DF);
@@ -1316,7 +1323,9 @@
 	if (cork->flags & IPCORK_OPT)
 		opt = cork->opt;
 
-	if (rt->rt_type == RTN_MULTICAST)
+	if (cork->ttl != 0)
+		ttl = cork->ttl;
+	else if (rt->rt_type == RTN_MULTICAST)
 		ttl = inet->mc_ttl;
 	else
 		ttl = ip_select_ttl(inet, &rt->dst);
@@ -1324,7 +1333,7 @@
 	iph = ip_hdr(skb);
 	iph->version = 4;
 	iph->ihl = 5;
-	iph->tos = inet->tos;
+	iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
 	iph->frag_off = df;
 	iph->ttl = ttl;
 	iph->protocol = sk->sk_protocol;
@@ -1336,7 +1345,7 @@
 		ip_options_build(skb, opt, cork->addr, rt, 0);
 	}
 
-	skb->priority = sk->sk_priority;
+	skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
 	skb->mark = sk->sk_mark;
 	/*
 	 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
@@ -1486,6 +1495,8 @@
 	ipc.addr = daddr;
 	ipc.opt = NULL;
 	ipc.tx_flags = 0;
+	ipc.ttl = 0;
+	ipc.tos = -1;
 
 	if (replyopts.opt.opt.optlen) {
 		ipc.opt = &replyopts.opt;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index d9c4f11..3f85826 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -189,7 +189,7 @@
 
 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
 {
-	int err;
+	int err, val;
 	struct cmsghdr *cmsg;
 
 	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
@@ -215,6 +215,24 @@
 			ipc->addr = info->ipi_spec_dst.s_addr;
 			break;
 		}
+		case IP_TTL:
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+				return -EINVAL;
+			val = *(int *)CMSG_DATA(cmsg);
+			if (val < 1 || val > 255)
+				return -EINVAL;
+			ipc->ttl = val;
+			break;
+		case IP_TOS:
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+				return -EINVAL;
+			val = *(int *)CMSG_DATA(cmsg);
+			if (val < 0 || val > 255)
+				return -EINVAL;
+			ipc->tos = val;
+			ipc->priority = rt_tos2priority(ipc->tos);
+			break;
+
 		default:
 			return -EINVAL;
 		}
@@ -609,7 +627,7 @@
 		inet->nodefrag = val ? 1 : 0;
 		break;
 	case IP_MTU_DISCOVER:
-		if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
+		if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_INTERFACE)
 			goto e_inval;
 		inet->pmtudisc = val;
 		break;
@@ -1034,11 +1052,12 @@
  * destination in skb->cb[] before dst drop.
  * This way, receiver doesnt make cache line misses to read rtable.
  */
-void ipv4_pktinfo_prepare(struct sk_buff *skb)
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
 {
 	struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
 
-	if (skb_rtable(skb)) {
+	if ((inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) &&
+	    skb_rtable(skb)) {
 		pktinfo->ipi_ifindex = inet_iif(skb);
 		pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
 	} else {
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 63a6d6d..caf01176 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -976,13 +976,19 @@
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	struct iphdr *iph = &tunnel->parms.iph;
-	int err;
+	int i, err;
 
 	dev->destructor	= ip_tunnel_dev_free;
 	dev->tstats = alloc_percpu(struct pcpu_tstats);
 	if (!dev->tstats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct pcpu_tstats *ipt_stats;
+		ipt_stats = per_cpu_ptr(dev->tstats, i);
+		u64_stats_init(&ipt_stats->syncp);
+	}
+
 	err = gro_cells_init(&tunnel->gro_cells, dev);
 	if (err) {
 		free_percpu(dev->tstats);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index c31e3ad..42ffbc8 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -116,3 +116,36 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
+					 bool csum_help,
+					 int gso_type_mask)
+{
+	int err;
+
+	if (likely(!skb->encapsulation)) {
+		skb_reset_inner_headers(skb);
+		skb->encapsulation = 1;
+	}
+
+	if (skb_is_gso(skb)) {
+		err = skb_unclone(skb, GFP_ATOMIC);
+		if (unlikely(err))
+			goto error;
+		skb_shinfo(skb)->gso_type |= gso_type_mask;
+		return skb;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
+		err = skb_checksum_help(skb);
+		if (unlikely(err))
+			goto error;
+	} else if (skb->ip_summed != CHECKSUM_PARTIAL)
+		skb->ip_summed = CHECKSUM_NONE;
+
+	return skb;
+error:
+	kfree_skb(skb);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 6e87f85..5d9c845 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -49,70 +49,6 @@
 static int vti_net_id __read_mostly;
 static int vti_tunnel_init(struct net_device *dev);
 
-static int vti_err(struct sk_buff *skb, u32 info)
-{
-
-	/* All the routers (except for Linux) return only
-	 * 8 bytes of packet payload. It means, that precise relaying of
-	 * ICMP in the real Internet is absolutely infeasible.
-	 */
-	struct net *net = dev_net(skb->dev);
-	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
-	struct iphdr *iph = (struct iphdr *)skb->data;
-	const int type = icmp_hdr(skb)->type;
-	const int code = icmp_hdr(skb)->code;
-	struct ip_tunnel *t;
-	int err;
-
-	switch (type) {
-	default:
-	case ICMP_PARAMETERPROB:
-		return 0;
-
-	case ICMP_DEST_UNREACH:
-		switch (code) {
-		case ICMP_SR_FAILED:
-		case ICMP_PORT_UNREACH:
-			/* Impossible event. */
-			return 0;
-		default:
-			/* All others are translated to HOST_UNREACH. */
-			break;
-		}
-		break;
-	case ICMP_TIME_EXCEEDED:
-		if (code != ICMP_EXC_TTL)
-			return 0;
-		break;
-	}
-
-	err = -ENOENT;
-
-	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
-			     iph->daddr, iph->saddr, 0);
-	if (t == NULL)
-		goto out;
-
-	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-				 t->parms.link, 0, IPPROTO_IPIP, 0);
-		err = 0;
-		goto out;
-	}
-
-	err = 0;
-	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-		goto out;
-
-	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
-		t->err_count++;
-	else
-		t->err_count = 1;
-	t->err_time = jiffies;
-out:
-	return err;
-}
-
 /* We dont digest the packet therefore let the packet pass */
 static int vti_rcv(struct sk_buff *skb)
 {
@@ -304,9 +240,8 @@
 	iph->ihl		= 5;
 }
 
-static struct xfrm_tunnel vti_handler __read_mostly = {
+static struct xfrm_tunnel_notifier vti_handler __read_mostly = {
 	.handler	=	vti_rcv,
-	.err_handler	=	vti_err,
 	.priority	=	1,
 };
 
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 7f80fb4..fe3e9f7 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -220,17 +220,17 @@
 	if (unlikely(skb->protocol != htons(ETH_P_IP)))
 		goto tx_error;
 
-	if (likely(!skb->encapsulation)) {
-		skb_reset_inner_headers(skb);
-		skb->encapsulation = 1;
-	}
+	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+	if (IS_ERR(skb))
+		goto out;
 
 	ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
 	return NETDEV_TX_OK;
 
 tx_error:
-	dev->stats.tx_errors++;
 	dev_kfree_skb(skb);
+out:
+	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
 
@@ -275,6 +275,7 @@
 #define IPIP_FEATURES (NETIF_F_SG |		\
 		       NETIF_F_FRAGLIST |	\
 		       NETIF_F_HIGHDMA |	\
+		       NETIF_F_GSO_SOFTWARE |	\
 		       NETIF_F_HW_CSUM)
 
 static void ipip_tunnel_setup(struct net_device *dev)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1657e39b..40d5607 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -36,6 +36,27 @@
 
 	  If unsure, say Y.
 
+config NF_TABLES_IPV4
+	depends on NF_TABLES
+	tristate "IPv4 nf_tables support"
+
+config NFT_REJECT_IPV4
+	depends on NF_TABLES_IPV4
+	tristate "nf_tables IPv4 reject support"
+
+config NFT_CHAIN_ROUTE_IPV4
+	depends on NF_TABLES_IPV4
+	tristate "IPv4 nf_tables route chain support"
+
+config NFT_CHAIN_NAT_IPV4
+	depends on NF_TABLES_IPV4
+	depends on NF_NAT_IPV4 && NFT_NAT
+	tristate "IPv4 nf_tables nat chain support"
+
+config NF_TABLES_ARP
+	depends on NF_TABLES
+	tristate "ARP nf_tables support"
+
 config IP_NF_IPTABLES
 	tristate "IP tables support (required for filtering/masq/NAT)"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 3622b24..19df72b 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -27,6 +27,12 @@
 # NAT protocols (nf_nat)
 obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
 
+obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
+obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
+obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
+obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
+obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
+
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index a865f6f..802ddec 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -27,13 +27,14 @@
 
 /* The work comes in here from netfilter.c */
 static unsigned int
-arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		     const struct net_device *in, const struct net_device *out,
 		     int (*okfn)(struct sk_buff *))
 {
 	const struct net *net = dev_net((in != NULL) ? in : out);
 
-	return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
+	return arpt_do_table(skb, ops->hooknum, in, out,
+			     net->ipv4.arptable_filter);
 }
 
 static struct nf_hook_ops *arpfilter_ops __read_mostly;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 0b732ef..2510c02 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -28,6 +28,7 @@
 #include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/net_namespace.h>
+#include <net/netns/generic.h>
 #include <net/checksum.h>
 #include <net/ip.h>
 
@@ -57,16 +58,22 @@
 	struct rcu_head rcu;
 };
 
-static LIST_HEAD(clusterip_configs);
-
-/* clusterip_lock protects the clusterip_configs list */
-static DEFINE_SPINLOCK(clusterip_lock);
-
 #ifdef CONFIG_PROC_FS
 static const struct file_operations clusterip_proc_fops;
-static struct proc_dir_entry *clusterip_procdir;
 #endif
 
+static int clusterip_net_id __read_mostly;
+
+struct clusterip_net {
+	struct list_head configs;
+	/* lock protects the configs list */
+	spinlock_t lock;
+
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *procdir;
+#endif
+};
+
 static inline void
 clusterip_config_get(struct clusterip_config *c)
 {
@@ -92,10 +99,13 @@
 static inline void
 clusterip_config_entry_put(struct clusterip_config *c)
 {
+	struct net *net = dev_net(c->dev);
+	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+
 	local_bh_disable();
-	if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) {
+	if (atomic_dec_and_lock(&c->entries, &cn->lock)) {
 		list_del_rcu(&c->list);
-		spin_unlock(&clusterip_lock);
+		spin_unlock(&cn->lock);
 		local_bh_enable();
 
 		dev_mc_del(c->dev, c->clustermac);
@@ -113,11 +123,12 @@
 }
 
 static struct clusterip_config *
-__clusterip_config_find(__be32 clusterip)
+__clusterip_config_find(struct net *net, __be32 clusterip)
 {
 	struct clusterip_config *c;
+	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
 
-	list_for_each_entry_rcu(c, &clusterip_configs, list) {
+	list_for_each_entry_rcu(c, &cn->configs, list) {
 		if (c->clusterip == clusterip)
 			return c;
 	}
@@ -126,12 +137,12 @@
 }
 
 static inline struct clusterip_config *
-clusterip_config_find_get(__be32 clusterip, int entry)
+clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
 {
 	struct clusterip_config *c;
 
 	rcu_read_lock_bh();
-	c = __clusterip_config_find(clusterip);
+	c = __clusterip_config_find(net, clusterip);
 	if (c) {
 		if (unlikely(!atomic_inc_not_zero(&c->refcount)))
 			c = NULL;
@@ -158,6 +169,7 @@
 			struct net_device *dev)
 {
 	struct clusterip_config *c;
+	struct clusterip_net *cn = net_generic(dev_net(dev), clusterip_net_id);
 
 	c = kzalloc(sizeof(*c), GFP_ATOMIC);
 	if (!c)
@@ -180,7 +192,7 @@
 		/* create proc dir entry */
 		sprintf(buffer, "%pI4", &ip);
 		c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR,
-					  clusterip_procdir,
+					  cn->procdir,
 					  &clusterip_proc_fops, c);
 		if (!c->pde) {
 			kfree(c);
@@ -189,9 +201,9 @@
 	}
 #endif
 
-	spin_lock_bh(&clusterip_lock);
-	list_add_rcu(&c->list, &clusterip_configs);
-	spin_unlock_bh(&clusterip_lock);
+	spin_lock_bh(&cn->lock);
+	list_add_rcu(&c->list, &cn->configs);
+	spin_unlock_bh(&cn->lock);
 
 	return c;
 }
@@ -370,7 +382,7 @@
 
 	/* FIXME: further sanity checks */
 
-	config = clusterip_config_find_get(e->ip.dst.s_addr, 1);
+	config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
 	if (!config) {
 		if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
 			pr_info("no config found for %pI4, need 'new'\n",
@@ -384,7 +396,7 @@
 				return -EINVAL;
 			}
 
-			dev = dev_get_by_name(&init_net, e->ip.iniface);
+			dev = dev_get_by_name(par->net, e->ip.iniface);
 			if (!dev) {
 				pr_info("no such interface %s\n",
 					e->ip.iniface);
@@ -483,7 +495,7 @@
 #endif
 
 static unsigned int
-arp_mangle(unsigned int hook,
+arp_mangle(const struct nf_hook_ops *ops,
 	   struct sk_buff *skb,
 	   const struct net_device *in,
 	   const struct net_device *out,
@@ -492,6 +504,7 @@
 	struct arphdr *arp = arp_hdr(skb);
 	struct arp_payload *payload;
 	struct clusterip_config *c;
+	struct net *net = dev_net(in ? in : out);
 
 	/* we don't care about non-ethernet and non-ipv4 ARP */
 	if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
@@ -508,7 +521,7 @@
 
 	/* if there is no clusterip configuration for the arp reply's
 	 * source ip, we don't want to mangle it */
-	c = clusterip_config_find_get(payload->src_ip, 0);
+	c = clusterip_config_find_get(net, payload->src_ip, 0);
 	if (!c)
 		return NF_ACCEPT;
 
@@ -698,48 +711,75 @@
 
 #endif /* CONFIG_PROC_FS */
 
+static int clusterip_net_init(struct net *net)
+{
+	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+
+	INIT_LIST_HEAD(&cn->configs);
+
+	spin_lock_init(&cn->lock);
+
+#ifdef CONFIG_PROC_FS
+	cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
+	if (!cn->procdir) {
+		pr_err("Unable to proc dir entry\n");
+		return -ENOMEM;
+	}
+#endif /* CONFIG_PROC_FS */
+
+	return 0;
+}
+
+static void clusterip_net_exit(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	proc_remove(cn->procdir);
+#endif
+}
+
+static struct pernet_operations clusterip_net_ops = {
+	.init = clusterip_net_init,
+	.exit = clusterip_net_exit,
+	.id   = &clusterip_net_id,
+	.size = sizeof(struct clusterip_net),
+};
+
 static int __init clusterip_tg_init(void)
 {
 	int ret;
 
-	ret = xt_register_target(&clusterip_tg_reg);
+	ret = register_pernet_subsys(&clusterip_net_ops);
 	if (ret < 0)
 		return ret;
 
+	ret = xt_register_target(&clusterip_tg_reg);
+	if (ret < 0)
+		goto cleanup_subsys;
+
 	ret = nf_register_hook(&cip_arp_ops);
 	if (ret < 0)
 		goto cleanup_target;
 
-#ifdef CONFIG_PROC_FS
-	clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net);
-	if (!clusterip_procdir) {
-		pr_err("Unable to proc dir entry\n");
-		ret = -ENOMEM;
-		goto cleanup_hook;
-	}
-#endif /* CONFIG_PROC_FS */
-
 	pr_info("ClusterIP Version %s loaded successfully\n",
 		CLUSTERIP_VERSION);
+
 	return 0;
 
-#ifdef CONFIG_PROC_FS
-cleanup_hook:
-	nf_unregister_hook(&cip_arp_ops);
-#endif /* CONFIG_PROC_FS */
 cleanup_target:
 	xt_unregister_target(&clusterip_tg_reg);
+cleanup_subsys:
+	unregister_pernet_subsys(&clusterip_net_ops);
 	return ret;
 }
 
 static void __exit clusterip_tg_exit(void)
 {
 	pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
-#ifdef CONFIG_PROC_FS
-	proc_remove(clusterip_procdir);
-#endif
+
 	nf_unregister_hook(&cip_arp_ops);
 	xt_unregister_target(&clusterip_tg_reg);
+	unregister_pernet_subsys(&clusterip_net_ops);
 
 	/* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
 	rcu_barrier_bh();
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index b6346bf..01cffea 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -297,7 +297,7 @@
 	return XT_CONTINUE;
 }
 
-static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
+static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
 				       struct sk_buff *skb,
 				       const struct net_device *in,
 				       const struct net_device *out,
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 50af5b4..e08a74a 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -33,20 +33,21 @@
 };
 
 static unsigned int
-iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		    const struct net_device *in, const struct net_device *out,
 		    int (*okfn)(struct sk_buff *))
 {
 	const struct net *net;
 
-	if (hook == NF_INET_LOCAL_OUT &&
+	if (ops->hooknum == NF_INET_LOCAL_OUT &&
 	    (skb->len < sizeof(struct iphdr) ||
 	     ip_hdrlen(skb) < sizeof(struct iphdr)))
 		/* root is playing with raw sockets. */
 		return NF_ACCEPT;
 
 	net = dev_net((in != NULL) ? in : out);
-	return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
+	return ipt_do_table(skb, ops->hooknum, in, out,
+			    net->ipv4.iptable_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 0d8cd82..6a5079c 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -79,19 +79,19 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-iptable_mangle_hook(unsigned int hook,
+iptable_mangle_hook(const struct nf_hook_ops *ops,
 		     struct sk_buff *skb,
 		     const struct net_device *in,
 		     const struct net_device *out,
 		     int (*okfn)(struct sk_buff *))
 {
-	if (hook == NF_INET_LOCAL_OUT)
+	if (ops->hooknum == NF_INET_LOCAL_OUT)
 		return ipt_mangle_out(skb, out);
-	if (hook == NF_INET_POST_ROUTING)
-		return ipt_do_table(skb, hook, in, out,
+	if (ops->hooknum == NF_INET_POST_ROUTING)
+		return ipt_do_table(skb, ops->hooknum, in, out,
 				    dev_net(out)->ipv4.iptable_mangle);
 	/* PREROUTING/INPUT/FORWARD: */
-	return ipt_do_table(skb, hook, in, out,
+	return ipt_do_table(skb, ops->hooknum, in, out,
 			    dev_net(in)->ipv4.iptable_mangle);
 }
 
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 683bfaf..ee28861 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -61,7 +61,7 @@
 }
 
 static unsigned int
-nf_nat_ipv4_fn(unsigned int hooknum,
+nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
 	       struct sk_buff *skb,
 	       const struct net_device *in,
 	       const struct net_device *out,
@@ -71,7 +71,7 @@
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn_nat *nat;
 	/* maniptype == SRC for postrouting. */
-	enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+	enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
 
 	/* We never see fragments: conntrack defrags on pre-routing
 	 * and local-out, and nf_nat_out protects post-routing.
@@ -108,7 +108,7 @@
 	case IP_CT_RELATED_REPLY:
 		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
-							   hooknum))
+							   ops->hooknum))
 				return NF_DROP;
 			else
 				return NF_ACCEPT;
@@ -121,14 +121,14 @@
 		if (!nf_nat_initialized(ct, maniptype)) {
 			unsigned int ret;
 
-			ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+			ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
 			if (ret != NF_ACCEPT)
 				return ret;
 		} else {
 			pr_debug("Already setup manip %s for ct %p\n",
 				 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
 				 ct);
-			if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+			if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
 				goto oif_changed;
 		}
 		break;
@@ -137,11 +137,11 @@
 		/* ESTABLISHED */
 		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
 			     ctinfo == IP_CT_ESTABLISHED_REPLY);
-		if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+		if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
 			goto oif_changed;
 	}
 
-	return nf_nat_packet(ct, ctinfo, hooknum, skb);
+	return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
 
 oif_changed:
 	nf_ct_kill_acct(ct, ctinfo, skb);
@@ -149,7 +149,7 @@
 }
 
 static unsigned int
-nf_nat_ipv4_in(unsigned int hooknum,
+nf_nat_ipv4_in(const struct nf_hook_ops *ops,
 	       struct sk_buff *skb,
 	       const struct net_device *in,
 	       const struct net_device *out,
@@ -158,7 +158,7 @@
 	unsigned int ret;
 	__be32 daddr = ip_hdr(skb)->daddr;
 
-	ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+	ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    daddr != ip_hdr(skb)->daddr)
 		skb_dst_drop(skb);
@@ -167,7 +167,7 @@
 }
 
 static unsigned int
-nf_nat_ipv4_out(unsigned int hooknum,
+nf_nat_ipv4_out(const struct nf_hook_ops *ops,
 		struct sk_buff *skb,
 		const struct net_device *in,
 		const struct net_device *out,
@@ -185,7 +185,7 @@
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
 
-	ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+	ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
 #ifdef CONFIG_XFRM
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -207,7 +207,7 @@
 }
 
 static unsigned int
-nf_nat_ipv4_local_fn(unsigned int hooknum,
+nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
 		     struct sk_buff *skb,
 		     const struct net_device *in,
 		     const struct net_device *out,
@@ -223,7 +223,7 @@
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
 
-	ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+	ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
 		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 1f82aea..b2f7e8f 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -20,20 +20,20 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
+iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		 const struct net_device *in, const struct net_device *out,
 		 int (*okfn)(struct sk_buff *))
 {
 	const struct net *net;
 
-	if (hook == NF_INET_LOCAL_OUT && 
+	if (ops->hooknum == NF_INET_LOCAL_OUT &&
 	    (skb->len < sizeof(struct iphdr) ||
 	     ip_hdrlen(skb) < sizeof(struct iphdr)))
 		/* root is playing with raw sockets. */
 		return NF_ACCEPT;
 
 	net = dev_net((in != NULL) ? in : out);
-	return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
+	return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index f867a8d..c86647e 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -37,21 +37,22 @@
 };
 
 static unsigned int
-iptable_security_hook(unsigned int hook, struct sk_buff *skb,
+iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		      const struct net_device *in,
 		      const struct net_device *out,
 		      int (*okfn)(struct sk_buff *))
 {
 	const struct net *net;
 
-	if (hook == NF_INET_LOCAL_OUT &&
+	if (ops->hooknum == NF_INET_LOCAL_OUT &&
 	    (skb->len < sizeof(struct iphdr) ||
 	     ip_hdrlen(skb) < sizeof(struct iphdr)))
 		/* Somebody is playing with raw sockets. */
 		return NF_ACCEPT;
 
 	net = dev_net((in != NULL) ? in : out);
-	return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
+	return ipt_do_table(skb, ops->hooknum, in, out,
+			    net->ipv4.iptable_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 86f5b34..ecd8bec 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -92,7 +92,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int ipv4_helper(unsigned int hooknum,
+static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
 				struct sk_buff *skb,
 				const struct net_device *in,
 				const struct net_device *out,
@@ -121,7 +121,7 @@
 			    ct, ctinfo);
 }
 
-static unsigned int ipv4_confirm(unsigned int hooknum,
+static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
 				 struct sk_buff *skb,
 				 const struct net_device *in,
 				 const struct net_device *out,
@@ -147,16 +147,16 @@
 	return nf_conntrack_confirm(skb);
 }
 
-static unsigned int ipv4_conntrack_in(unsigned int hooknum,
+static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
 				      struct sk_buff *skb,
 				      const struct net_device *in,
 				      const struct net_device *out,
 				      int (*okfn)(struct sk_buff *))
 {
-	return nf_conntrack_in(dev_net(in), PF_INET, hooknum, skb);
+	return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb);
 }
 
-static unsigned int ipv4_conntrack_local(unsigned int hooknum,
+static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
 					 struct sk_buff *skb,
 					 const struct net_device *in,
 					 const struct net_device *out,
@@ -166,7 +166,7 @@
 	if (skb->len < sizeof(struct iphdr) ||
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
-	return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
+	return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb);
 }
 
 /* Connection tracking may drop packets, but never alters them, so
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 7428155..12e13bd 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -60,7 +60,7 @@
 		return IP_DEFRAG_CONNTRACK_OUT + zone;
 }
 
-static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
 					  struct sk_buff *skb,
 					  const struct net_device *in,
 					  const struct net_device *out,
@@ -83,7 +83,9 @@
 #endif
 	/* Gather fragments. */
 	if (ip_is_fragment(ip_hdr(skb))) {
-		enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
+		enum ip_defrag_users user =
+			nf_ct_defrag_user(ops->hooknum, skb);
+
 		if (nf_ct_ipv4_gather_frags(skb, user))
 			return NF_STOLEN;
 	}
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
new file mode 100644
index 0000000..3e67ef1
--- /dev/null
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2008-2010 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netfilter_arp.h>
+#include <net/netfilter/nf_tables.h>
+
+static struct nft_af_info nft_af_arp __read_mostly = {
+	.family		= NFPROTO_ARP,
+	.nhooks		= NF_ARP_NUMHOOKS,
+	.owner		= THIS_MODULE,
+};
+
+static int nf_tables_arp_init_net(struct net *net)
+{
+	net->nft.arp = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+	if (net->nft.arp== NULL)
+		return -ENOMEM;
+
+	memcpy(net->nft.arp, &nft_af_arp, sizeof(nft_af_arp));
+
+	if (nft_register_afinfo(net, net->nft.arp) < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(net->nft.arp);
+	return -ENOMEM;
+}
+
+static void nf_tables_arp_exit_net(struct net *net)
+{
+	nft_unregister_afinfo(net->nft.arp);
+	kfree(net->nft.arp);
+}
+
+static struct pernet_operations nf_tables_arp_net_ops = {
+	.init   = nf_tables_arp_init_net,
+	.exit   = nf_tables_arp_exit_net,
+};
+
+static unsigned int
+nft_do_chain_arp(const struct nf_hook_ops *ops,
+		  struct sk_buff *skb,
+		  const struct net_device *in,
+		  const struct net_device *out,
+		  int (*okfn)(struct sk_buff *))
+{
+	struct nft_pktinfo pkt;
+
+	nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+	return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_arp = {
+	.family		= NFPROTO_ARP,
+	.name		= "filter",
+	.type		= NFT_CHAIN_T_DEFAULT,
+	.hook_mask	= (1 << NF_ARP_IN) |
+			  (1 << NF_ARP_OUT) |
+			  (1 << NF_ARP_FORWARD),
+	.fn		= {
+		[NF_ARP_IN]		= nft_do_chain_arp,
+		[NF_ARP_OUT]		= nft_do_chain_arp,
+		[NF_ARP_FORWARD]	= nft_do_chain_arp,
+	},
+};
+
+static int __init nf_tables_arp_init(void)
+{
+	int ret;
+
+	nft_register_chain_type(&filter_arp);
+	ret = register_pernet_subsys(&nf_tables_arp_net_ops);
+	if (ret < 0)
+		nft_unregister_chain_type(&filter_arp);
+
+	return ret;
+}
+
+static void __exit nf_tables_arp_exit(void)
+{
+	unregister_pernet_subsys(&nf_tables_arp_net_ops);
+	nft_unregister_chain_type(&filter_arp);
+}
+
+module_init(nf_tables_arp_init);
+module_exit(nf_tables_arp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(3); /* NFPROTO_ARP */
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
new file mode 100644
index 0000000..0f4cbfe
--- /dev/null
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/net_namespace.h>
+#include <net/ip.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+
+static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
+				    struct sk_buff *skb,
+				    const struct net_device *in,
+				    const struct net_device *out,
+				    int (*okfn)(struct sk_buff *))
+{
+	struct nft_pktinfo pkt;
+
+	if (unlikely(skb->len < sizeof(struct iphdr) ||
+		     ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
+		if (net_ratelimit())
+			pr_info("nf_tables_ipv4: ignoring short SOCK_RAW "
+				"packet\n");
+		return NF_ACCEPT;
+	}
+	nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+	return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_ipv4 __read_mostly = {
+	.family		= NFPROTO_IPV4,
+	.nhooks		= NF_INET_NUMHOOKS,
+	.owner		= THIS_MODULE,
+	.hooks		= {
+		[NF_INET_LOCAL_OUT]	= nft_ipv4_output,
+	},
+};
+
+static int nf_tables_ipv4_init_net(struct net *net)
+{
+	net->nft.ipv4 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+	if (net->nft.ipv4 == NULL)
+		return -ENOMEM;
+
+	memcpy(net->nft.ipv4, &nft_af_ipv4, sizeof(nft_af_ipv4));
+
+	if (nft_register_afinfo(net, net->nft.ipv4) < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(net->nft.ipv4);
+	return -ENOMEM;
+}
+
+static void nf_tables_ipv4_exit_net(struct net *net)
+{
+	nft_unregister_afinfo(net->nft.ipv4);
+	kfree(net->nft.ipv4);
+}
+
+static struct pernet_operations nf_tables_ipv4_net_ops = {
+	.init	= nf_tables_ipv4_init_net,
+	.exit	= nf_tables_ipv4_exit_net,
+};
+
+static unsigned int
+nft_do_chain_ipv4(const struct nf_hook_ops *ops,
+		  struct sk_buff *skb,
+		  const struct net_device *in,
+		  const struct net_device *out,
+		  int (*okfn)(struct sk_buff *))
+{
+	struct nft_pktinfo pkt;
+
+	nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+	return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_ipv4 = {
+	.family		= NFPROTO_IPV4,
+	.name		= "filter",
+	.type		= NFT_CHAIN_T_DEFAULT,
+	.hook_mask	= (1 << NF_INET_LOCAL_IN) |
+			  (1 << NF_INET_LOCAL_OUT) |
+			  (1 << NF_INET_FORWARD) |
+			  (1 << NF_INET_PRE_ROUTING) |
+			  (1 << NF_INET_POST_ROUTING),
+	.fn		= {
+		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv4,
+		[NF_INET_LOCAL_OUT]	= nft_ipv4_output,
+		[NF_INET_FORWARD]	= nft_do_chain_ipv4,
+		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv4,
+		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv4,
+	},
+};
+
+static int __init nf_tables_ipv4_init(void)
+{
+	nft_register_chain_type(&filter_ipv4);
+	return register_pernet_subsys(&nf_tables_ipv4_net_ops);
+}
+
+static void __exit nf_tables_ipv4_exit(void)
+{
+	unregister_pernet_subsys(&nf_tables_ipv4_net_ops);
+	nft_unregister_chain_type(&filter_ipv4);
+}
+
+module_init(nf_tables_ipv4_init);
+module_exit(nf_tables_ipv4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_INET);
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
new file mode 100644
index 0000000..cf2c792
--- /dev/null
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ip.h>
+
+/*
+ * NAT chains
+ */
+
+static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
+			      struct sk_buff *skb,
+			      const struct net_device *in,
+			      const struct net_device *out,
+			      int (*okfn)(struct sk_buff *))
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+	struct nf_conn_nat *nat;
+	enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+	struct nft_pktinfo pkt;
+	unsigned int ret;
+
+	if (ct == NULL || nf_ct_is_untracked(ct))
+		return NF_ACCEPT;
+
+	NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));
+
+	nat = nfct_nat(ct);
+	if (nat == NULL) {
+		/* Conntrack module was loaded late, can't add extension. */
+		if (nf_ct_is_confirmed(ct))
+			return NF_ACCEPT;
+		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+		if (nat == NULL)
+			return NF_ACCEPT;
+	}
+
+	switch (ctinfo) {
+	case IP_CT_RELATED:
+	case IP_CT_RELATED + IP_CT_IS_REPLY:
+		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
+							   ops->hooknum))
+				return NF_DROP;
+			else
+				return NF_ACCEPT;
+		}
+		/* Fall through */
+	case IP_CT_NEW:
+		if (nf_nat_initialized(ct, maniptype))
+			break;
+
+		nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+		ret = nft_do_chain_pktinfo(&pkt, ops);
+		if (ret != NF_ACCEPT)
+			return ret;
+		if (!nf_nat_initialized(ct, maniptype)) {
+			ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+			if (ret != NF_ACCEPT)
+				return ret;
+		}
+	default:
+		break;
+	}
+
+	return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+}
+
+static unsigned int nf_nat_prerouting(const struct nf_hook_ops *ops,
+				      struct sk_buff *skb,
+				      const struct net_device *in,
+				      const struct net_device *out,
+				      int (*okfn)(struct sk_buff *))
+{
+	__be32 daddr = ip_hdr(skb)->daddr;
+	unsigned int ret;
+
+	ret = nf_nat_fn(ops, skb, in, out, okfn);
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    ip_hdr(skb)->daddr != daddr) {
+		skb_dst_drop(skb);
+	}
+	return ret;
+}
+
+static unsigned int nf_nat_postrouting(const struct nf_hook_ops *ops,
+				       struct sk_buff *skb,
+				       const struct net_device *in,
+				       const struct net_device *out,
+				       int (*okfn)(struct sk_buff *))
+{
+	enum ip_conntrack_info ctinfo __maybe_unused;
+	const struct nf_conn *ct __maybe_unused;
+	unsigned int ret;
+
+	ret = nf_nat_fn(ops, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+		if (ct->tuplehash[dir].tuple.src.u3.ip !=
+		    ct->tuplehash[!dir].tuple.dst.u3.ip ||
+		    ct->tuplehash[dir].tuple.src.u.all !=
+		    ct->tuplehash[!dir].tuple.dst.u.all)
+			return nf_xfrm_me_harder(skb, AF_INET) == 0 ?
+								ret : NF_DROP;
+	}
+#endif
+	return ret;
+}
+
+static unsigned int nf_nat_output(const struct nf_hook_ops *ops,
+				  struct sk_buff *skb,
+				  const struct net_device *in,
+				  const struct net_device *out,
+				  int (*okfn)(struct sk_buff *))
+{
+	enum ip_conntrack_info ctinfo;
+	const struct nf_conn *ct;
+	unsigned int ret;
+
+	ret = nf_nat_fn(ops, skb, in, out, okfn);
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+		if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+		    ct->tuplehash[!dir].tuple.src.u3.ip) {
+			if (ip_route_me_harder(skb, RTN_UNSPEC))
+				ret = NF_DROP;
+		}
+#ifdef CONFIG_XFRM
+		else if (ct->tuplehash[dir].tuple.dst.u.all !=
+			 ct->tuplehash[!dir].tuple.src.u.all)
+			if (nf_xfrm_me_harder(skb, AF_INET))
+				ret = NF_DROP;
+#endif
+	}
+	return ret;
+}
+
+static struct nf_chain_type nft_chain_nat_ipv4 = {
+	.family		= NFPROTO_IPV4,
+	.name		= "nat",
+	.type		= NFT_CHAIN_T_NAT,
+	.hook_mask	= (1 << NF_INET_PRE_ROUTING) |
+			  (1 << NF_INET_POST_ROUTING) |
+			  (1 << NF_INET_LOCAL_OUT) |
+			  (1 << NF_INET_LOCAL_IN),
+	.fn		= {
+		[NF_INET_PRE_ROUTING]	= nf_nat_prerouting,
+		[NF_INET_POST_ROUTING]	= nf_nat_postrouting,
+		[NF_INET_LOCAL_OUT]	= nf_nat_output,
+		[NF_INET_LOCAL_IN]	= nf_nat_fn,
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init nft_chain_nat_init(void)
+{
+	int err;
+
+	err = nft_register_chain_type(&nft_chain_nat_ipv4);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static void __exit nft_chain_nat_exit(void)
+{
+	nft_unregister_chain_type(&nft_chain_nat_ipv4);
+}
+
+module_init(nft_chain_nat_init);
+module_exit(nft_chain_nat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
new file mode 100644
index 0000000..4e6bf9a
--- /dev/null
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+					struct sk_buff *skb,
+					const struct net_device *in,
+					const struct net_device *out,
+					int (*okfn)(struct sk_buff *))
+{
+	unsigned int ret;
+	struct nft_pktinfo pkt;
+	u32 mark;
+	__be32 saddr, daddr;
+	u_int8_t tos;
+	const struct iphdr *iph;
+
+	/* root is playing with raw sockets. */
+	if (skb->len < sizeof(struct iphdr) ||
+	    ip_hdrlen(skb) < sizeof(struct iphdr))
+		return NF_ACCEPT;
+
+	nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+	mark = skb->mark;
+	iph = ip_hdr(skb);
+	saddr = iph->saddr;
+	daddr = iph->daddr;
+	tos = iph->tos;
+
+	ret = nft_do_chain_pktinfo(&pkt, ops);
+	if (ret != NF_DROP && ret != NF_QUEUE) {
+		iph = ip_hdr(skb);
+
+		if (iph->saddr != saddr ||
+		    iph->daddr != daddr ||
+		    skb->mark != mark ||
+		    iph->tos != tos)
+			if (ip_route_me_harder(skb, RTN_UNSPEC))
+				ret = NF_DROP;
+	}
+	return ret;
+}
+
+static struct nf_chain_type nft_chain_route_ipv4 = {
+	.family		= NFPROTO_IPV4,
+	.name		= "route",
+	.type		= NFT_CHAIN_T_ROUTE,
+	.hook_mask	= (1 << NF_INET_LOCAL_OUT),
+	.fn		= {
+		[NF_INET_LOCAL_OUT]	= nf_route_table_hook,
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init nft_chain_route_init(void)
+{
+	return nft_register_chain_type(&nft_chain_route_ipv4);
+}
+
+static void __exit nft_chain_route_exit(void)
+{
+	nft_unregister_chain_type(&nft_chain_route_ipv4);
+}
+
+module_init(nft_chain_route_init);
+module_exit(nft_chain_route_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "route");
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644
index 0000000..fff5ba1
--- /dev/null
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/icmp.h>
+
+struct nft_reject {
+	enum nft_reject_types	type:8;
+	u8			icmp_code;
+};
+
+static void nft_reject_eval(const struct nft_expr *expr,
+			      struct nft_data data[NFT_REG_MAX + 1],
+			      const struct nft_pktinfo *pkt)
+{
+	struct nft_reject *priv = nft_expr_priv(expr);
+
+	switch (priv->type) {
+	case NFT_REJECT_ICMP_UNREACH:
+		icmp_send(pkt->skb, ICMP_DEST_UNREACH, priv->icmp_code, 0);
+		break;
+	case NFT_REJECT_TCP_RST:
+		break;
+	}
+
+	data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+
+static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
+	[NFTA_REJECT_TYPE]		= { .type = NLA_U32 },
+	[NFTA_REJECT_ICMP_CODE]		= { .type = NLA_U8 },
+};
+
+static int nft_reject_init(const struct nft_ctx *ctx,
+			   const struct nft_expr *expr,
+			   const struct nlattr * const tb[])
+{
+	struct nft_reject *priv = nft_expr_priv(expr);
+
+	if (tb[NFTA_REJECT_TYPE] == NULL)
+		return -EINVAL;
+
+	priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+	switch (priv->type) {
+	case NFT_REJECT_ICMP_UNREACH:
+		if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+			return -EINVAL;
+		priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+	case NFT_REJECT_TCP_RST:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_reject *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type))
+		goto nla_put_failure;
+
+	switch (priv->type) {
+	case NFT_REJECT_ICMP_UNREACH:
+		if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+			goto nla_put_failure;
+		break;
+	}
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_reject_type;
+static const struct nft_expr_ops nft_reject_ops = {
+	.type		= &nft_reject_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+	.eval		= nft_reject_eval,
+	.init		= nft_reject_init,
+	.dump		= nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_type __read_mostly = {
+	.name		= "reject",
+	.ops		= &nft_reject_ops,
+	.policy		= nft_reject_policy,
+	.maxattr	= NFTA_REJECT_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_reject_module_init(void)
+{
+	return nft_register_expr(&nft_reject_type);
+}
+
+static void __exit nft_reject_module_exit(void)
+{
+	nft_unregister_expr(&nft_reject_type);
+}
+
+module_init(nft_reject_module_init);
+module_exit(nft_reject_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("reject");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index d7d9882..cbc85f6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -202,15 +202,14 @@
 #if IS_ENABLED(CONFIG_IPV6)
 		} else if (skb->protocol == htons(ETH_P_IPV6) &&
 			   sk->sk_family == AF_INET6) {
-			struct ipv6_pinfo *np = inet6_sk(sk);
 
 			pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
 				 (int) isk->inet_num,
-				 &inet6_sk(sk)->rcv_saddr,
+				 &sk->sk_v6_rcv_saddr,
 				 sk->sk_bound_dev_if);
 
-			if (!ipv6_addr_any(&np->rcv_saddr) &&
-			    !ipv6_addr_equal(&np->rcv_saddr,
+			if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
+			    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr,
 					     &ipv6_hdr(skb)->daddr))
 				continue;
 #endif
@@ -237,11 +236,11 @@
 	unsigned int seq;
 
 	do {
-		seq = read_seqbegin(&sysctl_local_ports.lock);
+		seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
 		*low = data[0];
 		*high = data[1];
-	} while (read_seqretry(&sysctl_local_ports.lock, seq));
+	} while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 
 
@@ -362,7 +361,7 @@
 	} else if (saddr->sa_family == AF_INET6) {
 		struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
 		struct ipv6_pinfo *np = inet6_sk(sk);
-		np->rcv_saddr = np->saddr = addr->sin6_addr;
+		sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr;
 #endif
 	}
 }
@@ -376,7 +375,7 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	} else if (sk->sk_family == AF_INET6) {
 		struct ipv6_pinfo *np = inet6_sk(sk);
-		memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
 		memset(&np->saddr, 0, sizeof(np->saddr));
 #endif
 	}
@@ -416,10 +415,12 @@
 		 (int)sk->sk_bound_dev_if);
 
 	err = 0;
-	if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) ||
-	    (sk->sk_family == AF_INET6 &&
-	     !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)))
+	if (sk->sk_family == AF_INET && isk->inet_rcv_saddr)
 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+#if IS_ENABLED(CONFIG_IPV6)
+	if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+#endif
 
 	if (snum)
 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
@@ -429,7 +430,7 @@
 
 #if IS_ENABLED(CONFIG_IPV6)
 	if (sk->sk_family == AF_INET6)
-		memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr));
+		memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
 #endif
 
 	sk_dst_reset(sk);
@@ -713,6 +714,8 @@
 	ipc.opt = NULL;
 	ipc.oif = sk->sk_bound_dev_if;
 	ipc.tx_flags = 0;
+	ipc.ttl = 0;
+	ipc.tos = -1;
 
 	sock_tx_timestamp(sk, &ipc.tx_flags);
 
@@ -744,7 +747,7 @@
 			return -EINVAL;
 		faddr = ipc.opt->opt.faddr;
 	}
-	tos = RT_TOS(inet->tos);
+	tos = get_rttos(&ipc, inet);
 	if (sock_flag(sk, SOCK_LOCALROUTE) ||
 	    (msg->msg_flags & MSG_DONTROUTE) ||
 	    (ipc.opt && ipc.opt->opt.is_strictroute)) {
@@ -1073,7 +1076,7 @@
 EXPORT_SYMBOL_GPL(ping_seq_stop);
 
 static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
-		int bucket, int *len)
+		int bucket)
 {
 	struct inet_sock *inet = inet_sk(sp);
 	__be32 dest = inet->inet_daddr;
@@ -1082,7 +1085,7 @@
 	__u16 srcp = ntohs(inet->inet_sport);
 
 	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
-		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
+		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
 		bucket, src, srcp, dest, destp, sp->sk_state,
 		sk_wmem_alloc_get(sp),
 		sk_rmem_alloc_get(sp),
@@ -1090,23 +1093,22 @@
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
 		0, sock_i_ino(sp),
 		atomic_read(&sp->sk_refcnt), sp,
-		atomic_read(&sp->sk_drops), len);
+		atomic_read(&sp->sk_drops));
 }
 
 static int ping_v4_seq_show(struct seq_file *seq, void *v)
 {
+	seq_setwidth(seq, 127);
 	if (v == SEQ_START_TOKEN)
-		seq_printf(seq, "%-127s\n",
-			   "  sl  local_address rem_address   st tx_queue "
+		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
 			   "inode ref pointer drops");
 	else {
 		struct ping_iter_state *state = seq->private;
-		int len;
 
-		ping_v4_format_sock(v, seq, state->bucket, &len);
-		seq_printf(seq, "%*s\n", 127 - len, "");
+		ping_v4_format_sock(v, seq, state->bucket);
 	}
+	seq_pad(seq, '\n');
 	return 0;
 }
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 193db03..41e1d28 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -299,7 +299,7 @@
 {
 	/* Charge it to the socket. */
 
-	ipv4_pktinfo_prepare(skb);
+	ipv4_pktinfo_prepare(sk, skb);
 	if (sock_queue_rcv_skb(sk, skb) < 0) {
 		kfree_skb(skb);
 		return NET_RX_DROP;
@@ -519,6 +519,8 @@
 	ipc.addr = inet->inet_saddr;
 	ipc.opt = NULL;
 	ipc.tx_flags = 0;
+	ipc.ttl = 0;
+	ipc.tos = -1;
 	ipc.oif = sk->sk_bound_dev_if;
 
 	if (msg->msg_controllen) {
@@ -558,7 +560,7 @@
 			daddr = ipc.opt->opt.faddr;
 		}
 	}
-	tos = RT_CONN_FLAGS(sk);
+	tos = get_rtconn_flags(&ipc, sk);
 	if (msg->msg_flags & MSG_DONTROUTE)
 		tos |= RTO_ONLINK;
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6011615..f428935 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -295,7 +295,7 @@
 	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
 		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
 		   dst_entries_get_slow(&ipv4_dst_ops),
-		   st->in_hit,
+		   0, /* st->in_hit */
 		   st->in_slow_tot,
 		   st->in_slow_mc,
 		   st->in_no_route,
@@ -303,16 +303,16 @@
 		   st->in_martian_dst,
 		   st->in_martian_src,
 
-		   st->out_hit,
+		   0, /* st->out_hit */
 		   st->out_slow_tot,
 		   st->out_slow_mc,
 
-		   st->gc_total,
-		   st->gc_ignored,
-		   st->gc_goal_miss,
-		   st->gc_dst_overflow,
-		   st->in_hlist_search,
-		   st->out_hlist_search
+		   0, /* st->gc_total */
+		   0, /* st->gc_ignored */
+		   0, /* st->gc_goal_miss */
+		   0, /* st->gc_dst_overflow */
+		   0, /* st->in_hlist_search */
+		   0  /* st->out_hlist_search */
 		);
 	return 0;
 }
@@ -1036,6 +1036,10 @@
 	bool new = false;
 
 	bh_lock_sock(sk);
+
+	if (!ip_sk_accept_pmtu(sk))
+		goto out;
+
 	rt = (struct rtable *) __sk_dst_get(sk);
 
 	if (sock_owned_by_user(sk) || !rt) {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 14a15c4..b95331e 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -25,15 +25,7 @@
 
 extern int sysctl_tcp_syncookies;
 
-__u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-EXPORT_SYMBOL(syncookie_secret);
-
-static __init int init_syncookies(void)
-{
-	get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
-	return 0;
-}
-__initcall(init_syncookies);
+static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
 
 #define COOKIEBITS 24	/* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
@@ -44,8 +36,11 @@
 static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
 		       u32 count, int c)
 {
-	__u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
+	__u32 *tmp;
 
+	net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
+
+	tmp  = __get_cpu_var(ipv4_cookie_scratch);
 	memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
 	tmp[0] = (__force u32)saddr;
 	tmp[1] = (__force u32)daddr;
@@ -89,8 +84,7 @@
 
 
 static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
-				   __be16 dport, __u32 sseq, __u32 count,
-				   __u32 data)
+				   __be16 dport, __u32 sseq, __u32 data)
 {
 	/*
 	 * Compute the secure sequence number.
@@ -102,7 +96,7 @@
 	 * As an extra hack, we add a small "data" value that encodes the
 	 * MSS into the second hash value.
 	 */
-
+	u32 count = tcp_cookie_time();
 	return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
 		sseq + (count << COOKIEBITS) +
 		((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
@@ -114,22 +108,21 @@
  * If the syncookie is bad, the data returned will be out of
  * range.  This must be checked by the caller.
  *
- * The count value used to generate the cookie must be within
- * "maxdiff" if the current (passed-in) "count".  The return value
- * is (__u32)-1 if this test fails.
+ * The count value used to generate the cookie must be less than
+ * MAX_SYNCOOKIE_AGE minutes in the past.
+ * The return value (__u32)-1 if this test fails.
  */
 static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
-				  __be16 sport, __be16 dport, __u32 sseq,
-				  __u32 count, __u32 maxdiff)
+				  __be16 sport, __be16 dport, __u32 sseq)
 {
-	__u32 diff;
+	u32 diff, count = tcp_cookie_time();
 
 	/* Strip away the layers from the cookie */
 	cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
 
 	/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
 	diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
-	if (diff >= maxdiff)
+	if (diff >= MAX_SYNCOOKIE_AGE)
 		return (__u32)-1;
 
 	return (cookie -
@@ -138,22 +131,22 @@
 }
 
 /*
- * MSS Values are taken from the 2009 paper
- * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
- *  - values 1440 to 1460 accounted for 80% of observed mss values
- *  - values outside the 536-1460 range are rare (<0.2%).
+ * MSS Values are chosen based on the 2011 paper
+ * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
+ * Values ..
+ *  .. lower than 536 are rare (< 0.2%)
+ *  .. between 537 and 1299 account for less than < 1.5% of observed values
+ *  .. in the 1300-1349 range account for about 15 to 20% of observed mss values
+ *  .. exceeding 1460 are very rare (< 0.04%)
  *
- * Table must be sorted.
+ *  1460 is the single most frequently announced mss value (30 to 46% depending
+ *  on monitor location).  Table must be sorted.
  */
 static __u16 const msstab[] = {
-	64,
-	512,
 	536,
-	1024,
-	1440,
+	1300,
+	1440,	/* 1440, 1452: PPPoE */
 	1460,
-	4312,
-	8960,
 };
 
 /*
@@ -173,7 +166,7 @@
 
 	return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
 				     th->source, th->dest, ntohl(th->seq),
-				     jiffies / (HZ * 60), mssind);
+				     mssind);
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
 
@@ -189,13 +182,6 @@
 }
 
 /*
- * This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
- * backoff) to compute at runtime so it's currently hardcoded here.
- */
-#define COUNTER_TRIES 4
-/*
  * Check if a ack sequence number is a valid syncookie.
  * Return the decoded mss if it is, or 0 if not.
  */
@@ -204,9 +190,7 @@
 {
 	__u32 seq = ntohl(th->seq) - 1;
 	__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
-					    th->source, th->dest, seq,
-					    jiffies / (HZ * 60),
-					    COUNTER_TRIES);
+					    th->source, th->dest, seq);
 
 	return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
 }
@@ -315,10 +299,10 @@
 	treq->rcv_isn		= ntohl(th->seq) - 1;
 	treq->snt_isn		= cookie;
 	req->mss		= mss;
-	ireq->loc_port		= th->dest;
-	ireq->rmt_port		= th->source;
-	ireq->loc_addr		= ip_hdr(skb)->daddr;
-	ireq->rmt_addr		= ip_hdr(skb)->saddr;
+	ireq->ir_num		= ntohs(th->dest);
+	ireq->ir_rmt_port	= th->source;
+	ireq->ir_loc_addr	= ip_hdr(skb)->daddr;
+	ireq->ir_rmt_addr	= ip_hdr(skb)->saddr;
 	ireq->ecn_ok		= ecn_ok;
 	ireq->snd_wscale	= tcp_opt.snd_wscale;
 	ireq->sack_ok		= tcp_opt.sack_ok;
@@ -358,8 +342,8 @@
 	flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
 			   inet_sk_flowi_flags(sk),
-			   (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
-			   ireq->loc_addr, th->source, th->dest);
+			   (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
+			   ireq->ir_loc_addr, th->source, th->dest);
 	security_req_classify_flow(req, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(sock_net(sk), &fl4);
 	if (IS_ERR(rt)) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 540279f..3d69ec8 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -43,12 +43,12 @@
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 
 /* Update system visible IP port range */
-static void set_local_port_range(int range[2])
+static void set_local_port_range(struct net *net, int range[2])
 {
-	write_seqlock(&sysctl_local_ports.lock);
-	sysctl_local_ports.range[0] = range[0];
-	sysctl_local_ports.range[1] = range[1];
-	write_sequnlock(&sysctl_local_ports.lock);
+	write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+	net->ipv4.sysctl_local_ports.range[0] = range[0];
+	net->ipv4.sysctl_local_ports.range[1] = range[1];
+	write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -56,6 +56,8 @@
 				 void __user *buffer,
 				 size_t *lenp, loff_t *ppos)
 {
+	struct net *net =
+		container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
 	int ret;
 	int range[2];
 	struct ctl_table tmp = {
@@ -66,14 +68,15 @@
 		.extra2 = &ip_local_port_range_max,
 	};
 
-	inet_get_local_port_range(range, range + 1);
+	inet_get_local_port_range(net, &range[0], &range[1]);
+
 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 
 	if (write && ret == 0) {
 		if (range[1] < range[0])
 			ret = -EINVAL;
 		else
-			set_local_port_range(range);
+			set_local_port_range(net, range);
 	}
 
 	return ret;
@@ -83,23 +86,27 @@
 static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
 {
 	kgid_t *data = table->data;
+	struct net *net =
+		container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
 	unsigned int seq;
 	do {
-		seq = read_seqbegin(&sysctl_local_ports.lock);
+		seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
 		*low = data[0];
 		*high = data[1];
-	} while (read_seqretry(&sysctl_local_ports.lock, seq));
+	} while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
 static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
 {
 	kgid_t *data = table->data;
-	write_seqlock(&sysctl_local_ports.lock);
+	struct net *net =
+		container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+	write_seqlock(&net->ipv4.sysctl_local_ports.lock);
 	data[0] = low;
 	data[1] = high;
-	write_sequnlock(&sysctl_local_ports.lock);
+	write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -193,49 +200,6 @@
 	return ret;
 }
 
-static int ipv4_tcp_mem(struct ctl_table *ctl, int write,
-			   void __user *buffer, size_t *lenp,
-			   loff_t *ppos)
-{
-	int ret;
-	unsigned long vec[3];
-	struct net *net = current->nsproxy->net_ns;
-#ifdef CONFIG_MEMCG_KMEM
-	struct mem_cgroup *memcg;
-#endif
-
-	struct ctl_table tmp = {
-		.data = &vec,
-		.maxlen = sizeof(vec),
-		.mode = ctl->mode,
-	};
-
-	if (!write) {
-		ctl->data = &net->ipv4.sysctl_tcp_mem;
-		return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
-	}
-
-	ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
-	if (ret)
-		return ret;
-
-#ifdef CONFIG_MEMCG_KMEM
-	rcu_read_lock();
-	memcg = mem_cgroup_from_task(current);
-
-	tcp_prot_mem(memcg, vec[0], 0);
-	tcp_prot_mem(memcg, vec[1], 1);
-	tcp_prot_mem(memcg, vec[2], 2);
-	rcu_read_unlock();
-#endif
-
-	net->ipv4.sysctl_tcp_mem[0] = vec[0];
-	net->ipv4.sysctl_tcp_mem[1] = vec[1];
-	net->ipv4.sysctl_tcp_mem[2] = vec[2];
-
-	return 0;
-}
-
 static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
 				 void __user *buffer, size_t *lenp,
 				 loff_t *ppos)
@@ -267,6 +231,11 @@
 			ret = -EINVAL;
 			goto bad_key;
 		}
+		/* Generate a dummy secret but don't publish it. This
+		 * is needed so we don't regenerate a new key on the
+		 * first invocation of tcp_fastopen_cookie_gen
+		 */
+		tcp_fastopen_init_key_once(false);
 		tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
 	}
 
@@ -475,13 +444,6 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
-		.procname	= "ip_local_port_range",
-		.data		= &sysctl_local_ports.range,
-		.maxlen		= sizeof(sysctl_local_ports.range),
-		.mode		= 0644,
-		.proc_handler	= ipv4_local_port_range,
-	},
-	{
 		.procname	= "ip_local_reserved_ports",
 		.data		= NULL, /* initialized in sysctl_ipv4_init */
 		.maxlen		= 65536,
@@ -552,6 +514,13 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
+		.procname	= "tcp_mem",
+		.maxlen		= sizeof(sysctl_tcp_mem),
+		.data		= &sysctl_tcp_mem,
+		.mode		= 0644,
+		.proc_handler	= proc_doulongvec_minmax,
+	},
+	{
 		.procname	= "tcp_wmem",
 		.data		= &sysctl_tcp_wmem,
 		.maxlen		= sizeof(sysctl_tcp_wmem),
@@ -732,13 +701,6 @@
 		.proc_handler   = proc_allowed_congestion_control,
 	},
 	{
-		.procname	= "tcp_max_ssthresh",
-		.data		= &sysctl_tcp_max_ssthresh,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
 		.procname       = "tcp_thin_linear_timeouts",
 		.data           = &sysctl_tcp_thin_linear_timeouts,
 		.maxlen         = sizeof(int),
@@ -854,10 +816,11 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
-		.procname	= "tcp_mem",
-		.maxlen		= sizeof(init_net.ipv4.sysctl_tcp_mem),
+		.procname	= "ip_local_port_range",
+		.maxlen		= sizeof(init_net.ipv4.sysctl_local_ports.range),
+		.data		= &init_net.ipv4.sysctl_local_ports.range,
 		.mode		= 0644,
-		.proc_handler	= ipv4_tcp_mem,
+		.proc_handler	= ipv4_local_port_range,
 	},
 	{ }
 };
@@ -868,30 +831,15 @@
 
 	table = ipv4_net_table;
 	if (!net_eq(net, &init_net)) {
+		int i;
+
 		table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
 		if (table == NULL)
 			goto err_alloc;
 
-		table[0].data =
-			&net->ipv4.sysctl_icmp_echo_ignore_all;
-		table[1].data =
-			&net->ipv4.sysctl_icmp_echo_ignore_broadcasts;
-		table[2].data =
-			&net->ipv4.sysctl_icmp_ignore_bogus_error_responses;
-		table[3].data =
-			&net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr;
-		table[4].data =
-			&net->ipv4.sysctl_icmp_ratelimit;
-		table[5].data =
-			&net->ipv4.sysctl_icmp_ratemask;
-		table[6].data =
-			&net->ipv4.sysctl_ping_group_range;
-		table[7].data =
-			&net->ipv4.sysctl_tcp_ecn;
-
-		/* Don't export sysctls to unprivileged users */
-		if (net->user_ns != &init_user_ns)
-			table[0].procname = NULL;
+		/* Update the variables to point into the current struct net */
+		for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
+			table[i].data += (void *)net - (void *)&init_net;
 	}
 
 	/*
@@ -901,7 +849,12 @@
 	net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
 	net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
 
-	tcp_init_mem(net);
+	/*
+	 * Set defaults for local port range
+	 */
+	seqlock_init(&net->ipv4.sysctl_local_ports.lock);
+	net->ipv4.sysctl_local_ports.range[0] =  32768;
+	net->ipv4.sysctl_local_ports.range[1] =  61000;
 
 	net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
 	if (net->ipv4.ipv4_hdr == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6e5617b..8e8529d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -288,9 +288,11 @@
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
+long sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
+EXPORT_SYMBOL(sysctl_tcp_mem);
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
@@ -3097,13 +3099,13 @@
 }
 __setup("thash_entries=", set_thash_entries);
 
-void tcp_init_mem(struct net *net)
+static void tcp_init_mem(void)
 {
 	unsigned long limit = nr_free_buffer_pages() / 8;
 	limit = max(limit, 128UL);
-	net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
-	net->ipv4.sysctl_tcp_mem[1] = limit;
-	net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+	sysctl_tcp_mem[0] = limit / 4 * 3;
+	sysctl_tcp_mem[1] = limit;
+	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
 }
 
 void __init tcp_init(void)
@@ -3137,10 +3139,9 @@
 					&tcp_hashinfo.ehash_mask,
 					0,
 					thash_entries ? 0 : 512 * 1024);
-	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
+	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
 		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
-		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
-	}
+
 	if (inet_ehash_locks_alloc(&tcp_hashinfo))
 		panic("TCP: failed to alloc ehash_locks");
 	tcp_hashinfo.bhash =
@@ -3166,7 +3167,7 @@
 	sysctl_tcp_max_orphans = cnt / 2;
 	sysctl_max_syn_backlog = max(128, cnt / 256);
 
-	tcp_init_mem(&init_net);
+	tcp_init_mem();
 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
 	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
 	max_wshare = min(4UL*1024*1024, limit);
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index f45e1c2..821846f 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -140,7 +140,8 @@
 		ca->cnt = 1;
 }
 
-static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+			      u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct bictcp *ca = inet_csk_ca(sk);
@@ -149,7 +150,7 @@
 		return;
 
 	if (tp->snd_cwnd <= tp->snd_ssthresh)
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 	else {
 		bictcp_update(ca, tp->snd_cwnd);
 		tcp_cong_avoid_ai(tp, ca->cnt);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 019c238..ad37bf1 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -15,8 +15,6 @@
 #include <linux/gfp.h>
 #include <net/tcp.h>
 
-int sysctl_tcp_max_ssthresh = 0;
-
 static DEFINE_SPINLOCK(tcp_cong_list_lock);
 static LIST_HEAD(tcp_cong_list);
 
@@ -299,35 +297,24 @@
 }
 EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
 
-/*
- * Slow start is used when congestion window is less than slow start
- * threshold. This version implements the basic RFC2581 version
- * and optionally supports:
- * 	RFC3742 Limited Slow Start  	  - growth limited to max_ssthresh
- *	RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged
+/* Slow start is used when congestion window is no greater than the slow start
+ * threshold. We base on RFC2581 and also handle stretch ACKs properly.
+ * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
+ * something better;) a packet is only considered (s)acked in its entirety to
+ * defend the ACK attacks described in the RFC. Slow start processes a stretch
+ * ACK of degree N as if N acks of degree 1 are received back to back except
+ * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
+ * returns the leftover acks to adjust cwnd in congestion avoidance mode.
  */
-void tcp_slow_start(struct tcp_sock *tp)
+int tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
-	int cnt; /* increase in packets */
-	unsigned int delta = 0;
-	u32 snd_cwnd = tp->snd_cwnd;
+	u32 cwnd = tp->snd_cwnd + acked;
 
-	if (unlikely(!snd_cwnd)) {
-		pr_err_once("snd_cwnd is nul, please report this bug.\n");
-		snd_cwnd = 1U;
-	}
-
-	if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
-		cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */
-	else
-		cnt = snd_cwnd;				/* exponential increase */
-
-	tp->snd_cwnd_cnt += cnt;
-	while (tp->snd_cwnd_cnt >= snd_cwnd) {
-		tp->snd_cwnd_cnt -= snd_cwnd;
-		delta++;
-	}
-	tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
+	if (cwnd > tp->snd_ssthresh)
+		cwnd = tp->snd_ssthresh + 1;
+	acked -= cwnd - tp->snd_cwnd;
+	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
+	return acked;
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
@@ -351,7 +338,7 @@
 /* This is Jacobson's slow start and congestion avoidance.
  * SIGCOMM '88, p. 328.
  */
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -360,7 +347,7 @@
 
 	/* In "safe" area, increase. */
 	if (tp->snd_cwnd <= tp->snd_ssthresh)
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 	/* In dangerous area, increase slowly. */
 	else
 		tcp_cong_avoid_ai(tp, tp->snd_cwnd);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index b6ae92a..828e4c3 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -304,7 +304,8 @@
 		ca->cnt = 1;
 }
 
-static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+			      u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct bictcp *ca = inet_csk_ca(sk);
@@ -315,7 +316,7 @@
 	if (tp->snd_cwnd <= tp->snd_ssthresh) {
 		if (hystart && after(ack, ca->end_seq))
 			bictcp_hystart_reset(sk);
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 	} else {
 		bictcp_update(ca, tp->snd_cwnd);
 		tcp_cong_avoid_ai(tp, ca->cnt);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index ab7bd35..f195d93 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -8,12 +8,26 @@
 #include <net/inetpeer.h>
 #include <net/tcp.h>
 
-int sysctl_tcp_fastopen __read_mostly;
+int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE;
 
 struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 
 static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
 
+void tcp_fastopen_init_key_once(bool publish)
+{
+	static u8 key[TCP_FASTOPEN_KEY_LENGTH];
+
+	/* tcp_fastopen_reset_cipher publishes the new context
+	 * atomically, so we allow this race happening here.
+	 *
+	 * All call sites of tcp_fastopen_cookie_gen also check
+	 * for a valid cookie, so this is an acceptable risk.
+	 */
+	if (net_get_random_once(key, sizeof(key)) && publish)
+		tcp_fastopen_reset_cipher(key, sizeof(key));
+}
+
 static void tcp_fastopen_ctx_free(struct rcu_head *head)
 {
 	struct tcp_fastopen_context *ctx =
@@ -70,6 +84,8 @@
 	__be32 path[4] = { src, dst, 0, 0 };
 	struct tcp_fastopen_context *ctx;
 
+	tcp_fastopen_init_key_once(true);
+
 	rcu_read_lock();
 	ctx = rcu_dereference(tcp_fastopen_ctx);
 	if (ctx) {
@@ -78,14 +94,3 @@
 	}
 	rcu_read_unlock();
 }
-
-static int __init tcp_fastopen_init(void)
-{
-	__u8 key[TCP_FASTOPEN_KEY_LENGTH];
-
-	get_random_bytes(key, sizeof(key));
-	tcp_fastopen_reset_cipher(key, sizeof(key));
-	return 0;
-}
-
-late_initcall(tcp_fastopen_init);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 30f27f6..8ed9305 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,7 +109,7 @@
 	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 }
 
-static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 in_flight)
+static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct hstcp *ca = inet_csk_ca(sk);
@@ -118,7 +118,7 @@
 		return;
 
 	if (tp->snd_cwnd <= tp->snd_ssthresh)
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 	else {
 		/* Update AIMD parameters.
 		 *
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index c1a8175..4a194ac 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -227,7 +227,7 @@
 	return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
 }
 
-static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct htcp *ca = inet_csk_ca(sk);
@@ -236,7 +236,7 @@
 		return;
 
 	if (tp->snd_cwnd <= tp->snd_ssthresh)
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 	else {
 		/* In dangerous area, increase slowly.
 		 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 57bdd17..478fe82 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -85,7 +85,8 @@
  *     o Give cwnd a new value based on the model proposed
  *     o remember increments <1
  */
-static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+			     u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct hybla *ca = inet_csk_ca(sk);
@@ -102,7 +103,7 @@
 		return;
 
 	if (!ca->hybla_en) {
-		tcp_reno_cong_avoid(sk, ack, in_flight);
+		tcp_reno_cong_avoid(sk, ack, acked, in_flight);
 		return;
 	}
 
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 834857f..8a52099 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -256,7 +256,8 @@
 /*
  * Increase window in response to successful acknowledgment.
  */
-static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+				    u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct illinois *ca = inet_csk_ca(sk);
@@ -270,7 +271,7 @@
 
 	/* In slow start */
 	if (tp->snd_cwnd <= tp->snd_ssthresh)
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 
 	else {
 		u32 delta;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 068c8fb..c53b7f3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -267,11 +267,31 @@
  * 1. Tuning sk->sk_sndbuf, when connection enters established state.
  */
 
-static void tcp_fixup_sndbuf(struct sock *sk)
+static void tcp_sndbuf_expand(struct sock *sk)
 {
-	int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
+	const struct tcp_sock *tp = tcp_sk(sk);
+	int sndmem, per_mss;
+	u32 nr_segs;
 
-	sndmem *= TCP_INIT_CWND;
+	/* Worst case is non GSO/TSO : each frame consumes one skb
+	 * and skb->head is kmalloced using power of two area of memory
+	 */
+	per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
+		  MAX_TCP_HEADER +
+		  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	per_mss = roundup_pow_of_two(per_mss) +
+		  SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+	nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
+	nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
+
+	/* Fast Recovery (RFC 5681 3.2) :
+	 * Cubic needs 1.7 factor, rounded to 2 to include
+	 * extra cushion (application might react slowly to POLLOUT)
+	 */
+	sndmem = 2 * nr_segs * per_mss;
+
 	if (sk->sk_sndbuf < sndmem)
 		sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
 }
@@ -355,6 +375,12 @@
 	rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
 		 tcp_default_init_rwnd(mss);
 
+	/* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
+	 * Allow enough cushion so that sender is not limited by our window
+	 */
+	if (sysctl_tcp_moderate_rcvbuf)
+		rcvmem <<= 2;
+
 	if (sk->sk_rcvbuf < rcvmem)
 		sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
 }
@@ -370,9 +396,11 @@
 	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
 		tcp_fixup_rcvbuf(sk);
 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
-		tcp_fixup_sndbuf(sk);
+		tcp_sndbuf_expand(sk);
 
 	tp->rcvq_space.space = tp->rcv_wnd;
+	tp->rcvq_space.time = tcp_time_stamp;
+	tp->rcvq_space.seq = tp->copied_seq;
 
 	maxwin = tcp_full_space(sk);
 
@@ -512,48 +540,62 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	int time;
-	int space;
-
-	if (tp->rcvq_space.time == 0)
-		goto new_measure;
+	int copied;
 
 	time = tcp_time_stamp - tp->rcvq_space.time;
 	if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
 		return;
 
-	space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
+	/* Number of bytes copied to user in last RTT */
+	copied = tp->copied_seq - tp->rcvq_space.seq;
+	if (copied <= tp->rcvq_space.space)
+		goto new_measure;
 
-	space = max(tp->rcvq_space.space, space);
+	/* A bit of theory :
+	 * copied = bytes received in previous RTT, our base window
+	 * To cope with packet losses, we need a 2x factor
+	 * To cope with slow start, and sender growing its cwin by 100 %
+	 * every RTT, we need a 4x factor, because the ACK we are sending
+	 * now is for the next RTT, not the current one :
+	 * <prev RTT . ><current RTT .. ><next RTT .... >
+	 */
 
-	if (tp->rcvq_space.space != space) {
-		int rcvmem;
+	if (sysctl_tcp_moderate_rcvbuf &&
+	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+		int rcvwin, rcvmem, rcvbuf;
 
-		tp->rcvq_space.space = space;
+		/* minimal window to cope with packet losses, assuming
+		 * steady state. Add some cushion because of small variations.
+		 */
+		rcvwin = (copied << 1) + 16 * tp->advmss;
 
-		if (sysctl_tcp_moderate_rcvbuf &&
-		    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
-			int new_clamp = space;
+		/* If rate increased by 25%,
+		 *	assume slow start, rcvwin = 3 * copied
+		 * If rate increased by 50%,
+		 *	assume sender can use 2x growth, rcvwin = 4 * copied
+		 */
+		if (copied >=
+		    tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
+			if (copied >=
+			    tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
+				rcvwin <<= 1;
+			else
+				rcvwin += (rcvwin >> 1);
+		}
 
-			/* Receive space grows, normalize in order to
-			 * take into account packet headers and sk_buff
-			 * structure overhead.
-			 */
-			space /= tp->advmss;
-			if (!space)
-				space = 1;
-			rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
-			while (tcp_win_from_space(rcvmem) < tp->advmss)
-				rcvmem += 128;
-			space *= rcvmem;
-			space = min(space, sysctl_tcp_rmem[2]);
-			if (space > sk->sk_rcvbuf) {
-				sk->sk_rcvbuf = space;
+		rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
+		while (tcp_win_from_space(rcvmem) < tp->advmss)
+			rcvmem += 128;
 
-				/* Make the window clamp follow along.  */
-				tp->window_clamp = new_clamp;
-			}
+		rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
+		if (rcvbuf > sk->sk_rcvbuf) {
+			sk->sk_rcvbuf = rcvbuf;
+
+			/* Make the window clamp follow along.  */
+			tp->window_clamp = rcvwin;
 		}
 	}
+	tp->rcvq_space.space = copied;
 
 new_measure:
 	tp->rcvq_space.seq = tp->copied_seq;
@@ -713,7 +755,12 @@
 	if (tp->srtt > 8 + 2)
 		do_div(rate, tp->srtt);
 
-	sk->sk_pacing_rate = min_t(u64, rate, ~0U);
+	/* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
+	 * without any lock. We want to make sure compiler wont store
+	 * intermediate values in this location.
+	 */
+	ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
+						sk->sk_max_pacing_rate);
 }
 
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
@@ -2887,10 +2934,10 @@
 		tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
 }
 
-static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
-	icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight);
+	icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight);
 	tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -2979,7 +3026,7 @@
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct sk_buff *skb;
 	u32 now = tcp_time_stamp;
-	int fully_acked = true;
+	bool fully_acked = true;
 	int flag = 0;
 	u32 pkts_acked = 0;
 	u32 reord = tp->packets_out;
@@ -3407,7 +3454,7 @@
 
 	/* Advance cwnd if state allows */
 	if (tcp_may_raise_cwnd(sk, flag))
-		tcp_cong_avoid(sk, ack, prior_in_flight);
+		tcp_cong_avoid(sk, ack, acked, prior_in_flight);
 
 	if (tcp_ack_is_dubious(sk, flag)) {
 		is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
@@ -4717,15 +4764,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 
 	if (tcp_should_expand_sndbuf(sk)) {
-		int sndmem = SKB_TRUESIZE(max_t(u32,
-						tp->rx_opt.mss_clamp,
-						tp->mss_cache) +
-					  MAX_TCP_HEADER);
-		int demanded = max_t(unsigned int, tp->snd_cwnd,
-				     tp->reordering + 1);
-		sndmem *= 2 * demanded;
-		if (sndmem > sk->sk_sndbuf)
-			sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
+		tcp_sndbuf_expand(sk);
 		tp->snd_cwnd_stamp = tcp_time_stamp;
 	}
 
@@ -5693,8 +5732,8 @@
 			tcp_init_congestion_control(sk);
 
 			tcp_mtup_init(sk);
-			tcp_init_buffer_space(sk);
 			tp->copied_seq = tp->rcv_nxt;
+			tcp_init_buffer_space(sk);
 		}
 		smp_mb();
 		tcp_set_state(sk, TCP_ESTABLISHED);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b14266b..59a6f8b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -288,6 +288,7 @@
 	mtu = dst_mtu(dst);
 
 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
+	    ip_sk_accept_pmtu(sk) &&
 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 		tcp_sync_mss(sk, mtu);
 
@@ -835,11 +836,11 @@
 	skb = tcp_make_synack(sk, dst, req, NULL);
 
 	if (skb) {
-		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
+		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
 		skb_set_queue_mapping(skb, queue_mapping);
-		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
-					    ireq->rmt_addr,
+		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+					    ireq->ir_rmt_addr,
 					    ireq->opt);
 		err = net_xmit_eval(err);
 		if (!tcp_rsk(req)->snt_synack && !err)
@@ -972,7 +973,7 @@
 {
 	union tcp_md5_addr *addr;
 
-	addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
+	addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
 	return tcp_md5_do_lookup(sk, addr, AF_INET);
 }
 
@@ -1149,8 +1150,8 @@
 		saddr = inet_sk(sk)->inet_saddr;
 		daddr = inet_sk(sk)->inet_daddr;
 	} else if (req) {
-		saddr = inet_rsk(req)->loc_addr;
-		daddr = inet_rsk(req)->rmt_addr;
+		saddr = inet_rsk(req)->ir_loc_addr;
+		daddr = inet_rsk(req)->ir_rmt_addr;
 	} else {
 		const struct iphdr *iph = ip_hdr(skb);
 		saddr = iph->saddr;
@@ -1366,8 +1367,8 @@
 		kfree_skb(skb_synack);
 		return -1;
 	}
-	err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
-				    ireq->rmt_addr, ireq->opt);
+	err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+				    ireq->ir_rmt_addr, ireq->opt);
 	err = net_xmit_eval(err);
 	if (!err)
 		tcp_rsk(req)->snt_synack = tcp_time_stamp;
@@ -1410,8 +1411,8 @@
 	inet_csk(child)->icsk_af_ops->rebuild_header(child);
 	tcp_init_congestion_control(child);
 	tcp_mtup_init(child);
-	tcp_init_buffer_space(child);
 	tcp_init_metrics(child);
+	tcp_init_buffer_space(child);
 
 	/* Queue the data carried in the SYN packet. We need to first
 	 * bump skb's refcnt because the caller will attempt to free it.
@@ -1502,8 +1503,8 @@
 	tcp_openreq_init(req, &tmp_opt, skb);
 
 	ireq = inet_rsk(req);
-	ireq->loc_addr = daddr;
-	ireq->rmt_addr = saddr;
+	ireq->ir_loc_addr = daddr;
+	ireq->ir_rmt_addr = saddr;
 	ireq->no_srccheck = inet_sk(sk)->transparent;
 	ireq->opt = tcp_v4_save_options(skb);
 
@@ -1578,15 +1579,15 @@
 	    fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
 
 	if (skb_synack) {
-		__tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
+		__tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 		skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
 	} else
 		goto drop_and_free;
 
 	if (likely(!do_fastopen)) {
 		int err;
-		err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
-		     ireq->rmt_addr, ireq->opt);
+		err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+		     ireq->ir_rmt_addr, ireq->opt);
 		err = net_xmit_eval(err);
 		if (err || want_cookie)
 			goto drop_and_free;
@@ -1644,9 +1645,9 @@
 	newtp		      = tcp_sk(newsk);
 	newinet		      = inet_sk(newsk);
 	ireq		      = inet_rsk(req);
-	newinet->inet_daddr   = ireq->rmt_addr;
-	newinet->inet_rcv_saddr = ireq->loc_addr;
-	newinet->inet_saddr	      = ireq->loc_addr;
+	newinet->inet_daddr   = ireq->ir_rmt_addr;
+	newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+	newinet->inet_saddr	      = ireq->ir_loc_addr;
 	inet_opt	      = ireq->opt;
 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
 	ireq->opt	      = NULL;
@@ -2194,18 +2195,6 @@
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCP sock list dumping. */
 
-static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
-{
-	return hlist_nulls_empty(head) ? NULL :
-		list_entry(head->first, struct inet_timewait_sock, tw_node);
-}
-
-static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
-{
-	return !is_a_nulls(tw->tw_node.next) ?
-		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
-}
-
 /*
  * Get next listener socket follow cur.  If cur is NULL, get first socket
  * starting from bucket given in st->bucket; when st->bucket is zero the
@@ -2309,10 +2298,9 @@
 	return rc;
 }
 
-static inline bool empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(const struct tcp_iter_state *st)
 {
-	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
-		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
+	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
 }
 
 /*
@@ -2329,7 +2317,6 @@
 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
 		struct sock *sk;
 		struct hlist_nulls_node *node;
-		struct inet_timewait_sock *tw;
 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
 
 		/* Lockless fast path for the common case of empty buckets */
@@ -2345,18 +2332,7 @@
 			rc = sk;
 			goto out;
 		}
-		st->state = TCP_SEQ_STATE_TIME_WAIT;
-		inet_twsk_for_each(tw, node,
-				   &tcp_hashinfo.ehash[st->bucket].twchain) {
-			if (tw->tw_family != st->family ||
-			    !net_eq(twsk_net(tw), net)) {
-				continue;
-			}
-			rc = tw;
-			goto out;
-		}
 		spin_unlock_bh(lock);
-		st->state = TCP_SEQ_STATE_ESTABLISHED;
 	}
 out:
 	return rc;
@@ -2365,7 +2341,6 @@
 static void *established_get_next(struct seq_file *seq, void *cur)
 {
 	struct sock *sk = cur;
-	struct inet_timewait_sock *tw;
 	struct hlist_nulls_node *node;
 	struct tcp_iter_state *st = seq->private;
 	struct net *net = seq_file_net(seq);
@@ -2373,45 +2348,16 @@
 	++st->num;
 	++st->offset;
 
-	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
-		tw = cur;
-		tw = tw_next(tw);
-get_tw:
-		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
-			tw = tw_next(tw);
-		}
-		if (tw) {
-			cur = tw;
-			goto out;
-		}
-		spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
-		st->state = TCP_SEQ_STATE_ESTABLISHED;
-
-		/* Look for next non empty bucket */
-		st->offset = 0;
-		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
-				empty_bucket(st))
-			;
-		if (st->bucket > tcp_hashinfo.ehash_mask)
-			return NULL;
-
-		spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
-		sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
-	} else
-		sk = sk_nulls_next(sk);
+	sk = sk_nulls_next(sk);
 
 	sk_nulls_for_each_from(sk, node) {
 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
-			goto found;
+			return sk;
 	}
 
-	st->state = TCP_SEQ_STATE_TIME_WAIT;
-	tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
-	goto get_tw;
-found:
-	cur = sk;
-out:
-	return cur;
+	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+	++st->bucket;
+	return established_get_first(seq);
 }
 
 static void *established_get_idx(struct seq_file *seq, loff_t pos)
@@ -2464,10 +2410,9 @@
 		if (rc)
 			break;
 		st->bucket = 0;
+		st->state = TCP_SEQ_STATE_ESTABLISHED;
 		/* Fallthrough */
 	case TCP_SEQ_STATE_ESTABLISHED:
-	case TCP_SEQ_STATE_TIME_WAIT:
-		st->state = TCP_SEQ_STATE_ESTABLISHED;
 		if (st->bucket > tcp_hashinfo.ehash_mask)
 			break;
 		rc = established_get_first(seq);
@@ -2524,7 +2469,6 @@
 		}
 		break;
 	case TCP_SEQ_STATE_ESTABLISHED:
-	case TCP_SEQ_STATE_TIME_WAIT:
 		rc = established_get_next(seq, v);
 		break;
 	}
@@ -2548,7 +2492,6 @@
 		if (v != SEQ_START_TOKEN)
 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
 		break;
-	case TCP_SEQ_STATE_TIME_WAIT:
 	case TCP_SEQ_STATE_ESTABLISHED:
 		if (v)
 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
@@ -2598,18 +2541,18 @@
 EXPORT_SYMBOL(tcp_proc_unregister);
 
 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
-			 struct seq_file *f, int i, kuid_t uid, int *len)
+			 struct seq_file *f, int i, kuid_t uid)
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
 	long delta = req->expires - jiffies;
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
-		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
+		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
 		i,
-		ireq->loc_addr,
+		ireq->ir_loc_addr,
 		ntohs(inet_sk(sk)->inet_sport),
-		ireq->rmt_addr,
-		ntohs(ireq->rmt_port),
+		ireq->ir_rmt_addr,
+		ntohs(ireq->ir_rmt_port),
 		TCP_SYN_RECV,
 		0, 0, /* could print option size, but that is af dependent. */
 		1,    /* timers active (only the expire timer) */
@@ -2619,11 +2562,10 @@
 		0,  /* non standard timer */
 		0, /* open_requests have no inode */
 		atomic_read(&sk->sk_refcnt),
-		req,
-		len);
+		req);
 }
 
-static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
+static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
 {
 	int timer_active;
 	unsigned long timer_expires;
@@ -2662,7 +2604,7 @@
 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
-			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n",
+			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
 		i, src, srcp, dest, destp, sk->sk_state,
 		tp->write_seq - tp->snd_una,
 		rx_queue,
@@ -2679,12 +2621,11 @@
 		tp->snd_cwnd,
 		sk->sk_state == TCP_LISTEN ?
 		    (fastopenq ? fastopenq->max_qlen : 0) :
-		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
-		len);
+		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
 }
 
 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
-			       struct seq_file *f, int i, int *len)
+			       struct seq_file *f, int i)
 {
 	__be32 dest, src;
 	__u16 destp, srcp;
@@ -2696,10 +2637,10 @@
 	srcp  = ntohs(tw->tw_sport);
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
-		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
+		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
-		atomic_read(&tw->tw_refcnt), tw, len);
+		atomic_read(&tw->tw_refcnt), tw);
 }
 
 #define TMPSZ 150
@@ -2707,11 +2648,11 @@
 static int tcp4_seq_show(struct seq_file *seq, void *v)
 {
 	struct tcp_iter_state *st;
-	int len;
+	struct sock *sk = v;
 
+	seq_setwidth(seq, TMPSZ - 1);
 	if (v == SEQ_START_TOKEN) {
-		seq_printf(seq, "%-*s\n", TMPSZ - 1,
-			   "  sl  local_address rem_address   st tx_queue "
+		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
 			   "inode");
 		goto out;
@@ -2721,17 +2662,17 @@
 	switch (st->state) {
 	case TCP_SEQ_STATE_LISTENING:
 	case TCP_SEQ_STATE_ESTABLISHED:
-		get_tcp4_sock(v, seq, st->num, &len);
+		if (sk->sk_state == TCP_TIME_WAIT)
+			get_timewait4_sock(v, seq, st->num);
+		else
+			get_tcp4_sock(v, seq, st->num);
 		break;
 	case TCP_SEQ_STATE_OPENREQ:
-		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
-		break;
-	case TCP_SEQ_STATE_TIME_WAIT:
-		get_timewait4_sock(v, seq, st->num, &len);
+		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
 		break;
 	}
-	seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
 out:
+	seq_pad(seq, '\n');
 	return 0;
 }
 
@@ -2806,6 +2747,7 @@
 	.orphan_count		= &tcp_orphan_count,
 	.memory_allocated	= &tcp_memory_allocated,
 	.memory_pressure	= &tcp_memory_pressure,
+	.sysctl_mem		= sysctl_tcp_mem,
 	.sysctl_wmem		= sysctl_tcp_wmem,
 	.sysctl_rmem		= sysctl_tcp_rmem,
 	.max_header		= MAX_TCP_HEADER,
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 72f7218..991d62a 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,12 +115,13 @@
  * Will only call newReno CA when away from inference.
  * From TCP-LP's paper, this will be handled in additive increasement.
  */
-static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+			      u32 in_flight)
 {
 	struct lp *lp = inet_csk_ca(sk);
 
 	if (!(lp->flag & LP_WITHIN_INF))
-		tcp_reno_cong_avoid(sk, ack, in_flight);
+		tcp_reno_cong_avoid(sk, ack, acked, in_flight);
 }
 
 /**
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 559d4ae..03e9154 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,15 +6,10 @@
 #include <linux/memcontrol.h>
 #include <linux/module.h>
 
-static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
-{
-	return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
-}
-
 static void memcg_tcp_enter_memory_pressure(struct sock *sk)
 {
 	if (sk->sk_cgrp->memory_pressure)
-		*sk->sk_cgrp->memory_pressure = 1;
+		sk->sk_cgrp->memory_pressure = 1;
 }
 EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
 
@@ -27,34 +22,24 @@
 	 */
 	struct res_counter *res_parent = NULL;
 	struct cg_proto *cg_proto, *parent_cg;
-	struct tcp_memcontrol *tcp;
 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
-	struct net *net = current->nsproxy->net_ns;
 
 	cg_proto = tcp_prot.proto_cgroup(memcg);
 	if (!cg_proto)
 		return 0;
 
-	tcp = tcp_from_cgproto(cg_proto);
-
-	tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
-	tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
-	tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
-	tcp->tcp_memory_pressure = 0;
+	cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
+	cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
+	cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
+	cg_proto->memory_pressure = 0;
+	cg_proto->memcg = memcg;
 
 	parent_cg = tcp_prot.proto_cgroup(parent);
 	if (parent_cg)
-		res_parent = parent_cg->memory_allocated;
+		res_parent = &parent_cg->memory_allocated;
 
-	res_counter_init(&tcp->tcp_memory_allocated, res_parent);
-	percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
-
-	cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
-	cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
-	cg_proto->sysctl_mem = tcp->tcp_prot_mem;
-	cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
-	cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
-	cg_proto->memcg = memcg;
+	res_counter_init(&cg_proto->memory_allocated, res_parent);
+	percpu_counter_init(&cg_proto->sockets_allocated, 0);
 
 	return 0;
 }
@@ -63,21 +48,17 @@
 void tcp_destroy_cgroup(struct mem_cgroup *memcg)
 {
 	struct cg_proto *cg_proto;
-	struct tcp_memcontrol *tcp;
 
 	cg_proto = tcp_prot.proto_cgroup(memcg);
 	if (!cg_proto)
 		return;
 
-	tcp = tcp_from_cgproto(cg_proto);
-	percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+	percpu_counter_destroy(&cg_proto->sockets_allocated);
 }
 EXPORT_SYMBOL(tcp_destroy_cgroup);
 
 static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
 {
-	struct net *net = current->nsproxy->net_ns;
-	struct tcp_memcontrol *tcp;
 	struct cg_proto *cg_proto;
 	u64 old_lim;
 	int i;
@@ -90,16 +71,14 @@
 	if (val > RES_COUNTER_MAX)
 		val = RES_COUNTER_MAX;
 
-	tcp = tcp_from_cgproto(cg_proto);
-
-	old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-	ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+	old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
+	ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
 	if (ret)
 		return ret;
 
 	for (i = 0; i < 3; i++)
-		tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
-					     net->ipv4.sysctl_tcp_mem[i]);
+		cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT,
+						sysctl_tcp_mem[i]);
 
 	if (val == RES_COUNTER_MAX)
 		clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
@@ -156,28 +135,24 @@
 
 static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
 {
-	struct tcp_memcontrol *tcp;
 	struct cg_proto *cg_proto;
 
 	cg_proto = tcp_prot.proto_cgroup(memcg);
 	if (!cg_proto)
 		return default_val;
 
-	tcp = tcp_from_cgproto(cg_proto);
-	return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+	return res_counter_read_u64(&cg_proto->memory_allocated, type);
 }
 
 static u64 tcp_read_usage(struct mem_cgroup *memcg)
 {
-	struct tcp_memcontrol *tcp;
 	struct cg_proto *cg_proto;
 
 	cg_proto = tcp_prot.proto_cgroup(memcg);
 	if (!cg_proto)
 		return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
 
-	tcp = tcp_from_cgproto(cg_proto);
-	return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+	return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE);
 }
 
 static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -205,54 +180,25 @@
 static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
 {
 	struct mem_cgroup *memcg;
-	struct tcp_memcontrol *tcp;
 	struct cg_proto *cg_proto;
 
 	memcg = mem_cgroup_from_css(css);
 	cg_proto = tcp_prot.proto_cgroup(memcg);
 	if (!cg_proto)
 		return 0;
-	tcp = tcp_from_cgproto(cg_proto);
 
 	switch (event) {
 	case RES_MAX_USAGE:
-		res_counter_reset_max(&tcp->tcp_memory_allocated);
+		res_counter_reset_max(&cg_proto->memory_allocated);
 		break;
 	case RES_FAILCNT:
-		res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+		res_counter_reset_failcnt(&cg_proto->memory_allocated);
 		break;
 	}
 
 	return 0;
 }
 
-unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
-{
-	struct tcp_memcontrol *tcp;
-	struct cg_proto *cg_proto;
-
-	cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
-	if (!cg_proto)
-		return 0;
-
-	tcp = tcp_from_cgproto(cg_proto);
-	return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-}
-
-void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
-{
-	struct tcp_memcontrol *tcp;
-	struct cg_proto *cg_proto;
-
-	cg_proto = tcp_prot.proto_cgroup(memcg);
-	if (!cg_proto)
-		return;
-
-	tcp = tcp_from_cgproto(cg_proto);
-
-	tcp->tcp_prot_mem[idx] = val;
-}
-
 static struct cftype tcp_files[] = {
 	{
 		.name = "kmem.tcp.limit_in_bytes",
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 52f3c6b..2ab09cb 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -215,13 +215,15 @@
 	addr.family = req->rsk_ops->family;
 	switch (addr.family) {
 	case AF_INET:
-		addr.addr.a4 = inet_rsk(req)->rmt_addr;
+		addr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
 		hash = (__force unsigned int) addr.addr.a4;
 		break;
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
-		*(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
-		hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
+		*(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
+		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 		break;
+#endif
 	default:
 		return NULL;
 	}
@@ -240,7 +242,6 @@
 
 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
 {
-	struct inet6_timewait_sock *tw6;
 	struct tcp_metrics_block *tm;
 	struct inetpeer_addr addr;
 	unsigned int hash;
@@ -252,11 +253,12 @@
 		addr.addr.a4 = tw->tw_daddr;
 		hash = (__force unsigned int) addr.addr.a4;
 		break;
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
-		tw6 = inet6_twsk((struct sock *)tw);
-		*(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
-		hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
+		*(struct in6_addr *)addr.addr.a6 = tw->tw_v6_daddr;
+		hash = ipv6_addr_hash(&tw->tw_v6_daddr);
 		break;
+#endif
 	default:
 		return NULL;
 	}
@@ -288,10 +290,12 @@
 		addr.addr.a4 = inet_sk(sk)->inet_daddr;
 		hash = (__force unsigned int) addr.addr.a4;
 		break;
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
-		*(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
-		hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
+		*(struct in6_addr *)addr.addr.a6 = sk->sk_v6_daddr;
+		hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 		break;
+#endif
 	default:
 		return NULL;
 	}
@@ -667,8 +671,9 @@
 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 
 		write_seqlock_bh(&fastopen_seqlock);
-		tfom->mss = mss;
-		if (cookie->len > 0)
+		if (mss)
+			tfom->mss = mss;
+		if (cookie && cookie->len > 0)
 			tfom->cookie = *cookie;
 		if (syn_lost) {
 			++tfom->syn_loss;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 58a3e69..97b6841 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -293,12 +293,9 @@
 #if IS_ENABLED(CONFIG_IPV6)
 		if (tw->tw_family == PF_INET6) {
 			struct ipv6_pinfo *np = inet6_sk(sk);
-			struct inet6_timewait_sock *tw6;
 
-			tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
-			tw6 = inet6_twsk((struct sock *)tw);
-			tw6->tw_v6_daddr = np->daddr;
-			tw6->tw_v6_rcv_saddr = np->rcv_saddr;
+			tw->tw_v6_daddr = sk->sk_v6_daddr;
+			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 			tw->tw_tclass = np->tclass;
 			tw->tw_ipv6only = np->ipv6only;
 		}
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 533c58a..a2b68a1 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -14,7 +14,7 @@
 #include <net/tcp.h>
 #include <net/protocol.h>
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 				netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
@@ -57,6 +57,8 @@
 			       SKB_GSO_TCP_ECN |
 			       SKB_GSO_TCPV6 |
 			       SKB_GSO_GRE |
+			       SKB_GSO_IPIP |
+			       SKB_GSO_SIT |
 			       SKB_GSO_MPLS |
 			       SKB_GSO_UDP_TUNNEL |
 			       0) ||
@@ -136,7 +138,7 @@
 out:
 	return segs;
 }
-EXPORT_SYMBOL(tcp_tso_segment);
+EXPORT_SYMBOL(tcp_gso_segment);
 
 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
@@ -317,7 +319,7 @@
 static const struct net_offload tcpv4_offload = {
 	.callbacks = {
 		.gso_send_check	=	tcp_v4_gso_send_check,
-		.gso_segment	=	tcp_tso_segment,
+		.gso_segment	=	tcp_gso_segment,
 		.gro_receive	=	tcp4_gro_receive,
 		.gro_complete	=	tcp4_gro_complete,
 	},
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d46f214..6728546 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -850,15 +850,15 @@
 
 	BUG_ON(!skb || !tcp_skb_pcount(skb));
 
-	/* If congestion control is doing timestamping, we must
-	 * take such a timestamp before we potentially clone/copy.
-	 */
-	if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
-		__net_timestamp(skb);
-
-	if (likely(clone_it)) {
+	if (clone_it) {
 		const struct sk_buff *fclone = skb + 1;
 
+		/* If congestion control is doing timestamping, we must
+		 * take such a timestamp before we potentially clone/copy.
+		 */
+		if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
+			__net_timestamp(skb);
+
 		if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
 			     fclone->fclone == SKB_FCLONE_CLONE))
 			NET_INC_STATS_BH(sock_net(sk),
@@ -2353,21 +2353,6 @@
 
 	tcp_retrans_try_collapse(sk, skb, cur_mss);
 
-	/* Some Solaris stacks overoptimize and ignore the FIN on a
-	 * retransmit when old data is attached.  So strip it off
-	 * since it is cheap to do so and saves bytes on the network.
-	 */
-	if (skb->len > 0 &&
-	    (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
-	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
-		if (!pskb_trim(skb, 0)) {
-			/* Reuse, even though it does some unnecessary work */
-			tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
-					     TCP_SKB_CB(skb)->tcp_flags);
-			skb->ip_summed = CHECKSUM_NONE;
-		}
-	}
-
 	/* Make a copy, if the first transmission SKB clone we made
 	 * is still in somebody's hands, else make a clone.
 	 */
@@ -2736,8 +2721,8 @@
 	th->syn = 1;
 	th->ack = 1;
 	TCP_ECN_make_synack(req, th);
-	th->source = ireq->loc_port;
-	th->dest = ireq->rmt_port;
+	th->source = htons(ireq->ir_num);
+	th->dest = ireq->ir_rmt_port;
 	/* Setting of flags are superfluous here for callers (and ECE is
 	 * not even correctly set)
 	 */
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 611beab..8b97d71e 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -101,22 +101,6 @@
 		si4.sin_addr.s_addr = inet->inet_##mem##addr;	\
 	} while (0)						\
 
-#if IS_ENABLED(CONFIG_IPV6)
-#define tcp_probe_copy_fl_to_si6(inet, si6, mem)		\
-	do {							\
-		struct ipv6_pinfo *pi6 = inet->pinet6;		\
-		si6.sin6_family = AF_INET6;			\
-		si6.sin6_port = inet->inet_##mem##port;		\
-		si6.sin6_addr = pi6->mem##addr;			\
-		si6.sin6_flowinfo = 0; /* No need here. */	\
-		si6.sin6_scope_id = 0;	/* No need here. */	\
-	} while (0)
-#else
-#define tcp_probe_copy_fl_to_si6(fl, si6, mem)			\
-	do {							\
-		memset(&si6, 0, sizeof(si6));			\
-	} while (0)
-#endif
 
 /*
  * Hook inserted to be called before each receive packet.
@@ -147,8 +131,17 @@
 				tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
 				break;
 			case AF_INET6:
-				tcp_probe_copy_fl_to_si6(inet, p->src.v6, s);
-				tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d);
+				memset(&p->src.v6, 0, sizeof(p->src.v6));
+				memset(&p->dst.v6, 0, sizeof(p->dst.v6));
+#if IS_ENABLED(CONFIG_IPV6)
+				p->src.v6.sin6_family = AF_INET6;
+				p->src.v6.sin6_port = inet->inet_sport;
+				p->src.v6.sin6_addr = inet6_sk(sk)->saddr;
+
+				p->dst.v6.sin6_family = AF_INET6;
+				p->dst.v6.sin6_port = inet->inet_dport;
+				p->dst.v6.sin6_addr = sk->sk_v6_daddr;
+#endif
 				break;
 			default:
 				BUG();
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 8ce55b8..19ea6c2 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,7 +15,8 @@
 #define TCP_SCALABLE_AI_CNT	50U
 #define TCP_SCALABLE_MD_SCALE	3
 
-static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+				    u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -23,7 +24,7 @@
 		return;
 
 	if (tp->snd_cwnd <= tp->snd_ssthresh)
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 	else
 		tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
 }
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 4b85e6f..64f0354 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -156,12 +156,16 @@
 static int tcp_write_timeout(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
 	int retry_until;
 	bool do_reset, syn_set = false;
 
 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
-		if (icsk->icsk_retransmits)
+		if (icsk->icsk_retransmits) {
 			dst_negative_advice(sk);
+			if (tp->syn_fastopen || tp->syn_data)
+				tcp_fastopen_cache_set(sk, 0, NULL, true);
+		}
 		retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
 		syn_set = true;
 	} else {
@@ -374,9 +378,8 @@
 		}
 #if IS_ENABLED(CONFIG_IPV6)
 		else if (sk->sk_family == AF_INET6) {
-			struct ipv6_pinfo *np = inet6_sk(sk);
 			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
-				       &np->daddr,
+				       &sk->sk_v6_daddr,
 				       ntohs(inet->inet_dport), inet->inet_num,
 				       tp->snd_una, tp->snd_nxt);
 		}
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 80fa2bf..06cae62 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,13 +163,14 @@
 	return  min(tp->snd_ssthresh, tp->snd_cwnd-1);
 }
 
-static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+				 u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct vegas *vegas = inet_csk_ca(sk);
 
 	if (!vegas->doing_vegas_now) {
-		tcp_reno_cong_avoid(sk, ack, in_flight);
+		tcp_reno_cong_avoid(sk, ack, acked, in_flight);
 		return;
 	}
 
@@ -194,7 +195,7 @@
 			/* We don't have enough RTT samples to do the Vegas
 			 * calculation, so we'll behave like Reno.
 			 */
-			tcp_reno_cong_avoid(sk, ack, in_flight);
+			tcp_reno_cong_avoid(sk, ack, acked, in_flight);
 		} else {
 			u32 rtt, diff;
 			u64 target_cwnd;
@@ -243,7 +244,7 @@
 
 			} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
 				/* Slow start.  */
-				tcp_slow_start(tp);
+				tcp_slow_start(tp, acked);
 			} else {
 				/* Congestion avoidance. */
 
@@ -283,7 +284,7 @@
 	}
 	/* Use normal slow start */
 	else if (tp->snd_cwnd <= tp->snd_ssthresh)
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 
 }
 
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 6c0eea2..0531b99 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -15,10 +15,10 @@
 	u32	baseRTT;	/* the min of all Vegas RTT measurements seen (in usec) */
 };
 
-extern void tcp_vegas_init(struct sock *sk);
-extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
-extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
-extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
-extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
+void tcp_vegas_init(struct sock *sk);
+void tcp_vegas_state(struct sock *sk, u8 ca_state);
+void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
+void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
+void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
 
 #endif	/* __TCP_VEGAS_H */
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index ac43cd7..326475a 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -114,13 +114,14 @@
 		tcp_veno_init(sk);
 }
 
-static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+				u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct veno *veno = inet_csk_ca(sk);
 
 	if (!veno->doing_veno_now) {
-		tcp_reno_cong_avoid(sk, ack, in_flight);
+		tcp_reno_cong_avoid(sk, ack, acked, in_flight);
 		return;
 	}
 
@@ -133,7 +134,7 @@
 		/* We don't have enough rtt samples to do the Veno
 		 * calculation, so we'll behave like Reno.
 		 */
-		tcp_reno_cong_avoid(sk, ack, in_flight);
+		tcp_reno_cong_avoid(sk, ack, acked, in_flight);
 	} else {
 		u64 target_cwnd;
 		u32 rtt;
@@ -152,7 +153,7 @@
 
 		if (tp->snd_cwnd <= tp->snd_ssthresh) {
 			/* Slow start.  */
-			tcp_slow_start(tp);
+			tcp_slow_start(tp, acked);
 		} else {
 			/* Congestion avoidance. */
 			if (veno->diff < beta) {
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 05c3b6f..a347a07 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -69,7 +69,8 @@
 	tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
 }
 
-static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked,
+				u32 in_flight)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct yeah *yeah = inet_csk_ca(sk);
@@ -78,7 +79,7 @@
 		return;
 
 	if (tp->snd_cwnd <= tp->snd_ssthresh)
-		tcp_slow_start(tp);
+		tcp_slow_start(tp, acked);
 
 	else if (!yeah->doing_reno_now) {
 		/* Scalable */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0ca44df..de86e5b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -103,6 +103,7 @@
 #include <linux/seq_file.h>
 #include <net/net_namespace.h>
 #include <net/icmp.h>
+#include <net/inet_hashtables.h>
 #include <net/route.h>
 #include <net/checksum.h>
 #include <net/xfrm.h>
@@ -219,7 +220,7 @@
 		unsigned short first, last;
 		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 
-		inet_get_local_port_range(&low, &high);
+		inet_get_local_port_range(net, &low, &high);
 		remaining = (high - low) + 1;
 
 		rand = net_random();
@@ -406,6 +407,18 @@
 	return score;
 }
 
+static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
+				 const __u16 lport, const __be32 faddr,
+				 const __be16 fport)
+{
+	static u32 udp_ehash_secret __read_mostly;
+
+	net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
+
+	return __inet_ehashfn(laddr, lport, faddr, fport,
+			      udp_ehash_secret + net_hash_mix(net));
+}
+
 
 /* called with read_rcu_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
@@ -429,8 +442,8 @@
 			badness = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
-				hash = inet_ehashfn(net, daddr, hnum,
-						    saddr, sport);
+				hash = udp_ehashfn(net, daddr, hnum,
+						   saddr, sport);
 				matches = 1;
 			}
 		} else if (score == badness && reuseport) {
@@ -510,8 +523,8 @@
 			badness = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
-				hash = inet_ehashfn(net, daddr, hnum,
-						    saddr, sport);
+				hash = udp_ehashfn(net, daddr, hnum,
+						   saddr, sport);
 				matches = 1;
 			}
 		} else if (score == badness && reuseport) {
@@ -565,6 +578,26 @@
 }
 EXPORT_SYMBOL_GPL(udp4_lib_lookup);
 
+static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
+				       __be16 loc_port, __be32 loc_addr,
+				       __be16 rmt_port, __be32 rmt_addr,
+				       int dif, unsigned short hnum)
+{
+	struct inet_sock *inet = inet_sk(sk);
+
+	if (!net_eq(sock_net(sk), net) ||
+	    udp_sk(sk)->udp_port_hash != hnum ||
+	    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
+	    (inet->inet_dport != rmt_port && inet->inet_dport) ||
+	    (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
+	    ipv6_only_sock(sk) ||
+	    (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+		return false;
+	if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
+		return false;
+	return true;
+}
+
 static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
 					     __be16 loc_port, __be32 loc_addr,
 					     __be16 rmt_port, __be32 rmt_addr,
@@ -575,20 +608,11 @@
 	unsigned short hnum = ntohs(loc_port);
 
 	sk_nulls_for_each_from(s, node) {
-		struct inet_sock *inet = inet_sk(s);
-
-		if (!net_eq(sock_net(s), net) ||
-		    udp_sk(s)->udp_port_hash != hnum ||
-		    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
-		    (inet->inet_dport != rmt_port && inet->inet_dport) ||
-		    (inet->inet_rcv_saddr &&
-		     inet->inet_rcv_saddr != loc_addr) ||
-		    ipv6_only_sock(s) ||
-		    (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
-			continue;
-		if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
-			continue;
-		goto found;
+		if (__udp_is_mcast_sock(net, s,
+					loc_port, loc_addr,
+					rmt_port, rmt_addr,
+					dif, hnum))
+			goto found;
 	}
 	s = NULL;
 found:
@@ -855,6 +879,8 @@
 
 	ipc.opt = NULL;
 	ipc.tx_flags = 0;
+	ipc.ttl = 0;
+	ipc.tos = -1;
 
 	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
 
@@ -938,7 +964,7 @@
 		faddr = ipc.opt->opt.faddr;
 		connected = 0;
 	}
-	tos = RT_TOS(inet->tos);
+	tos = get_rttos(&ipc, inet);
 	if (sock_flag(sk, SOCK_LOCALROUTE) ||
 	    (msg->msg_flags & MSG_DONTROUTE) ||
 	    (ipc.opt && ipc.opt->opt.is_strictroute)) {
@@ -1403,8 +1429,10 @@
 {
 	int rc;
 
-	if (inet_sk(sk)->inet_daddr)
+	if (inet_sk(sk)->inet_daddr) {
 		sock_rps_save_rxhash(sk, skb);
+		sk_mark_napi_id(sk, skb);
+	}
 
 	rc = sock_queue_rcv_skb(sk, skb);
 	if (rc < 0) {
@@ -1528,7 +1556,7 @@
 
 	rc = 0;
 
-	ipv4_pktinfo_prepare(skb);
+	ipv4_pktinfo_prepare(sk, skb);
 	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 		rc = __udp_queue_rcv_skb(sk, skb);
@@ -1577,6 +1605,14 @@
 		kfree_skb(skb1);
 }
 
+static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	dst_hold(dst);
+	sk->sk_rx_dst = dst;
+}
+
 /*
  *	Multicasts and broadcasts go to each listener.
  *
@@ -1705,16 +1741,32 @@
 	if (udp4_csum_init(skb, uh, proto))
 		goto csum_error;
 
-	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
-		return __udp4_lib_mcast_deliver(net, skb, uh,
-				saddr, daddr, udptable);
+	if (skb->sk) {
+		int ret;
+		sk = skb->sk;
 
-	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+		if (unlikely(sk->sk_rx_dst == NULL))
+			udp_sk_rx_dst_set(sk, skb);
+
+		ret = udp_queue_rcv_skb(sk, skb);
+
+		/* a return value > 0 means to resubmit the input, but
+		 * it wants the return to be -protocol, or 0
+		 */
+		if (ret > 0)
+			return -ret;
+		return 0;
+	} else {
+		if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+			return __udp4_lib_mcast_deliver(net, skb, uh,
+					saddr, daddr, udptable);
+
+		sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+	}
 
 	if (sk != NULL) {
 		int ret;
 
-		sk_mark_napi_id(sk, skb);
 		ret = udp_queue_rcv_skb(sk, skb);
 		sock_put(sk);
 
@@ -1768,6 +1820,135 @@
 	return 0;
 }
 
+/* We can only early demux multicast if there is a single matching socket.
+ * If more than one socket found returns NULL
+ */
+static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
+						  __be16 loc_port, __be32 loc_addr,
+						  __be16 rmt_port, __be32 rmt_addr,
+						  int dif)
+{
+	struct sock *sk, *result;
+	struct hlist_nulls_node *node;
+	unsigned short hnum = ntohs(loc_port);
+	unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
+	struct udp_hslot *hslot = &udp_table.hash[slot];
+
+	rcu_read_lock();
+begin:
+	count = 0;
+	result = NULL;
+	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+		if (__udp_is_mcast_sock(net, sk,
+					loc_port, loc_addr,
+					rmt_port, rmt_addr,
+					dif, hnum)) {
+			result = sk;
+			++count;
+		}
+	}
+	/*
+	 * if the nulls value we got at the end of this lookup is
+	 * not the expected one, we must restart lookup.
+	 * We probably met an item that was moved to another chain.
+	 */
+	if (get_nulls_value(node) != slot)
+		goto begin;
+
+	if (result) {
+		if (count != 1 ||
+		    unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+			result = NULL;
+		else if (unlikely(!__udp_is_mcast_sock(net, result,
+						       loc_port, loc_addr,
+						       rmt_port, rmt_addr,
+						       dif, hnum))) {
+			sock_put(result);
+			result = NULL;
+		}
+	}
+	rcu_read_unlock();
+	return result;
+}
+
+/* For unicast we should only early demux connected sockets or we can
+ * break forwarding setups.  The chains here can be long so only check
+ * if the first socket is an exact match and if not move on.
+ */
+static struct sock *__udp4_lib_demux_lookup(struct net *net,
+					    __be16 loc_port, __be32 loc_addr,
+					    __be16 rmt_port, __be32 rmt_addr,
+					    int dif)
+{
+	struct sock *sk, *result;
+	struct hlist_nulls_node *node;
+	unsigned short hnum = ntohs(loc_port);
+	unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
+	unsigned int slot2 = hash2 & udp_table.mask;
+	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr)
+	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
+
+	rcu_read_lock();
+	result = NULL;
+	udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+		if (INET_MATCH(sk, net, acookie,
+			       rmt_addr, loc_addr, ports, dif))
+			result = sk;
+		/* Only check first socket in chain */
+		break;
+	}
+
+	if (result) {
+		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+			result = NULL;
+		else if (unlikely(!INET_MATCH(sk, net, acookie,
+					      rmt_addr, loc_addr,
+					      ports, dif))) {
+			sock_put(result);
+			result = NULL;
+		}
+	}
+	rcu_read_unlock();
+	return result;
+}
+
+void udp_v4_early_demux(struct sk_buff *skb)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	const struct udphdr *uh = udp_hdr(skb);
+	struct sock *sk;
+	struct dst_entry *dst;
+	struct net *net = dev_net(skb->dev);
+	int dif = skb->dev->ifindex;
+
+	/* validate the packet */
+	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
+		return;
+
+	if (skb->pkt_type == PACKET_BROADCAST ||
+	    skb->pkt_type == PACKET_MULTICAST)
+		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+						   uh->source, iph->saddr, dif);
+	else if (skb->pkt_type == PACKET_HOST)
+		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
+					     uh->source, iph->saddr, dif);
+	else
+		return;
+
+	if (!sk)
+		return;
+
+	skb->sk = sk;
+	skb->destructor = sock_edemux;
+	dst = sk->sk_rx_dst;
+
+	if (dst)
+		dst = dst_check(dst, 0);
+	if (dst)
+		skb_dst_set_noref(skb, dst);
+}
+
 int udp_rcv(struct sk_buff *skb)
 {
 	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
@@ -2150,7 +2331,7 @@
 
 /* ------------------------------------------------------------------------ */
 static void udp4_format_sock(struct sock *sp, struct seq_file *f,
-		int bucket, int *len)
+		int bucket)
 {
 	struct inet_sock *inet = inet_sk(sp);
 	__be32 dest = inet->inet_daddr;
@@ -2159,7 +2340,7 @@
 	__u16 srcp	  = ntohs(inet->inet_sport);
 
 	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
-		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
+		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
 		bucket, src, srcp, dest, destp, sp->sk_state,
 		sk_wmem_alloc_get(sp),
 		sk_rmem_alloc_get(sp),
@@ -2167,23 +2348,22 @@
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
 		0, sock_i_ino(sp),
 		atomic_read(&sp->sk_refcnt), sp,
-		atomic_read(&sp->sk_drops), len);
+		atomic_read(&sp->sk_drops));
 }
 
 int udp4_seq_show(struct seq_file *seq, void *v)
 {
+	seq_setwidth(seq, 127);
 	if (v == SEQ_START_TOKEN)
-		seq_printf(seq, "%-127s\n",
-			   "  sl  local_address rem_address   st tx_queue "
+		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
 			   "inode ref pointer drops");
 	else {
 		struct udp_iter_state *state = seq->private;
-		int len;
 
-		udp4_format_sock(v, seq, state->bucket, &len);
-		seq_printf(seq, "%*s\n", 127 - len, "");
+		udp4_format_sock(v, seq, state->bucket);
 	}
+	seq_pad(seq, '\n');
 	return 0;
 }
 
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 5a681e2..f3c2789 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -5,30 +5,30 @@
 #include <net/protocol.h>
 #include <net/inet_common.h>
 
-extern int  	__udp4_lib_rcv(struct sk_buff *, struct udp_table *, int );
-extern void 	__udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
+int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
+void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
 
-extern int	udp_v4_get_port(struct sock *sk, unsigned short snum);
+int udp_v4_get_port(struct sock *sk, unsigned short snum);
 
-extern int	udp_setsockopt(struct sock *sk, int level, int optname,
-			       char __user *optval, unsigned int optlen);
-extern int	udp_getsockopt(struct sock *sk, int level, int optname,
-			       char __user *optval, int __user *optlen);
+int udp_setsockopt(struct sock *sk, int level, int optname,
+		   char __user *optval, unsigned int optlen);
+int udp_getsockopt(struct sock *sk, int level, int optname,
+		   char __user *optval, int __user *optlen);
 
 #ifdef CONFIG_COMPAT
-extern int	compat_udp_setsockopt(struct sock *sk, int level, int optname,
-				      char __user *optval, unsigned int optlen);
-extern int	compat_udp_getsockopt(struct sock *sk, int level, int optname,
-				      char __user *optval, int __user *optlen);
+int compat_udp_setsockopt(struct sock *sk, int level, int optname,
+			  char __user *optval, unsigned int optlen);
+int compat_udp_getsockopt(struct sock *sk, int level, int optname,
+			  char __user *optval, int __user *optlen);
 #endif
-extern int	udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-			    size_t len, int noblock, int flags, int *addr_len);
-extern int	udp_sendpage(struct sock *sk, struct page *page, int offset,
-			     size_t size, int flags);
-extern int	udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-extern void	udp_destroy_sock(struct sock *sk);
+int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		size_t len, int noblock, int flags, int *addr_len);
+int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+		 int flags);
+int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void udp_destroy_sock(struct sock *sk);
 
 #ifdef CONFIG_PROC_FS
-extern int	udp4_seq_show(struct seq_file *seq, void *v);
+int udp4_seq_show(struct seq_file *seq, void *v);
 #endif
 #endif	/* _UDP4_IMPL_H */
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index f35ecca..83206de 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -52,6 +52,7 @@
 
 		if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
 				      SKB_GSO_UDP_TUNNEL |
+				      SKB_GSO_IPIP |
 				      SKB_GSO_GRE | SKB_GSO_MPLS) ||
 			     !(type & (SKB_GSO_UDP))))
 			goto out;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index b5663c3..31b1815 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -16,13 +16,13 @@
 #include <net/xfrm.h>
 
 /* Informational hook. The decap is still done here. */
-static struct xfrm_tunnel __rcu *rcv_notify_handlers __read_mostly;
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
 static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
 
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
 {
-	struct xfrm_tunnel __rcu **pprev;
-	struct xfrm_tunnel *t;
+	struct xfrm_tunnel_notifier __rcu **pprev;
+	struct xfrm_tunnel_notifier *t;
 	int ret = -EEXIST;
 	int priority = handler->priority;
 
@@ -50,10 +50,10 @@
 }
 EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
 
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
 {
-	struct xfrm_tunnel __rcu **pprev;
-	struct xfrm_tunnel *t;
+	struct xfrm_tunnel_notifier __rcu **pprev;
+	struct xfrm_tunnel_notifier *t;
 	int ret = -ENOENT;
 
 	mutex_lock(&xfrm4_mode_tunnel_input_mutex);
@@ -134,7 +134,7 @@
 
 static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-	struct xfrm_tunnel *handler;
+	struct xfrm_tunnel_notifier *handler;
 	int err = -EINVAL;
 
 	if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 11b13ea..d92e558 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -21,24 +21,6 @@
 
 if IPV6
 
-config IPV6_PRIVACY
-	bool "IPv6: Privacy Extensions (RFC 3041) support"
-	---help---
-	  Privacy Extensions for Stateless Address Autoconfiguration in IPv6
-	  support.  With this option, additional periodically-altered
-	  pseudo-random global-scope unicast address(es) will be assigned to
-	  your interface(s).
-	
-	  We use our standard pseudo-random algorithm to generate the
-          randomized interface identifier, instead of one described in RFC 3041.
-
-	  By default the kernel does not generate temporary addresses.
-	  To use temporary addresses, do
-	
-	        echo 2 >/proc/sys/net/ipv6/conf/all/use_tempaddr 
-
-	  See <file:Documentation/networking/ip-sysctl.txt> for details.
-
 config IPV6_ROUTER_PREF
 	bool "IPv6: Router Preference (RFC 4191) support"
 	---help---
@@ -153,6 +135,17 @@
 	---help---
 	  Support for MIPv6 route optimization mode.
 
+config IPV6_VTI
+tristate "Virtual (secure) IPv6: tunneling"
+	select IPV6_TUNNEL
+	depends on INET6_XFRM_MODE_TUNNEL
+	---help---
+	Tunneling means encapsulating data of one protocol type within
+	another protocol and sending it over a channel that understands the
+	encapsulating protocol. This can be used with xfrm mode tunnel to give
+	the notion of a secure tunnel for IPSEC and then use routing protocol
+	on top.
+
 config IPV6_SIT
 	tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
 	select INET_TUNNEL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 470a9c0..17bb830 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -36,6 +36,7 @@
 obj-$(CONFIG_IPV6_MIP6) += mip6.o
 obj-$(CONFIG_NETFILTER)	+= netfilter/
 
+obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
 obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cd3fb30..5658d9d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -83,11 +83,7 @@
 #include <linux/if_tunnel.h>
 #include <linux/rtnetlink.h>
 #include <linux/netconf.h>
-
-#ifdef CONFIG_IPV6_PRIVACY
 #include <linux/random.h>
-#endif
-
 #include <linux/uaccess.h>
 #include <asm/unaligned.h>
 
@@ -124,11 +120,9 @@
 }
 #endif
 
-#ifdef CONFIG_IPV6_PRIVACY
 static void __ipv6_regen_rndid(struct inet6_dev *idev);
 static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
 static void ipv6_regen_rndid(unsigned long data);
-#endif
 
 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
 static int ipv6_count_addresses(struct inet6_dev *idev);
@@ -183,13 +177,11 @@
 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
-#ifdef CONFIG_IPV6_PRIVACY
 	.use_tempaddr 		= 0,
 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
 	.regen_max_retry	= REGEN_MAX_RETRY,
 	.max_desync_factor	= MAX_DESYNC_FACTOR,
-#endif
 	.max_addresses		= IPV6_MAX_ADDRESSES,
 	.accept_ra_defrtr	= 1,
 	.accept_ra_pinfo	= 1,
@@ -221,13 +213,11 @@
 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
-#ifdef CONFIG_IPV6_PRIVACY
 	.use_tempaddr		= 0,
 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
 	.regen_max_retry	= REGEN_MAX_RETRY,
 	.max_desync_factor	= MAX_DESYNC_FACTOR,
-#endif
 	.max_addresses		= IPV6_MAX_ADDRESSES,
 	.accept_ra_defrtr	= 1,
 	.accept_ra_pinfo	= 1,
@@ -281,10 +271,24 @@
 
 static int snmp6_alloc_dev(struct inet6_dev *idev)
 {
+	int i;
+
 	if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
 			  sizeof(struct ipstats_mib),
 			  __alignof__(struct ipstats_mib)) < 0)
 		goto err_ip;
+
+	for_each_possible_cpu(i) {
+		struct ipstats_mib *addrconf_stats;
+		addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i);
+		u64_stats_init(&addrconf_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+		addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
+		u64_stats_init(&addrconf_stats->syncp);
+#endif
+	}
+
+
 	idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
 					GFP_KERNEL);
 	if (!idev->stats.icmpv6dev)
@@ -371,7 +375,6 @@
 	}
 #endif
 
-#ifdef CONFIG_IPV6_PRIVACY
 	INIT_LIST_HEAD(&ndev->tempaddr_list);
 	setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
 	if ((dev->flags&IFF_LOOPBACK) ||
@@ -384,7 +387,7 @@
 		in6_dev_hold(ndev);
 		ipv6_regen_rndid((unsigned long) ndev);
 	}
-#endif
+
 	ndev->token = in6addr_any;
 
 	if (netif_running(dev) && addrconf_qdisc_ok(dev))
@@ -865,12 +868,10 @@
 	/* Add to inet6_dev unicast addr list. */
 	ipv6_link_dev_addr(idev, ifa);
 
-#ifdef CONFIG_IPV6_PRIVACY
 	if (ifa->flags&IFA_F_TEMPORARY) {
 		list_add(&ifa->tmp_list, &idev->tempaddr_list);
 		in6_ifa_hold(ifa);
 	}
-#endif
 
 	in6_ifa_hold(ifa);
 	write_unlock(&idev->lock);
@@ -913,7 +914,7 @@
 	spin_unlock_bh(&addrconf_hash_lock);
 
 	write_lock_bh(&idev->lock);
-#ifdef CONFIG_IPV6_PRIVACY
+
 	if (ifp->flags&IFA_F_TEMPORARY) {
 		list_del(&ifp->tmp_list);
 		if (ifp->ifpub) {
@@ -922,7 +923,6 @@
 		}
 		__in6_ifa_put(ifp);
 	}
-#endif
 
 	list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) {
 		if (ifa == ifp) {
@@ -1013,7 +1013,6 @@
 	in6_ifa_put(ifp);
 }
 
-#ifdef CONFIG_IPV6_PRIVACY
 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *ift)
 {
 	struct inet6_dev *idev = ifp->idev;
@@ -1116,7 +1115,6 @@
 out:
 	return ret;
 }
-#endif
 
 /*
  *	Choose an appropriate source address (RFC3484)
@@ -1131,9 +1129,7 @@
 #endif
 	IPV6_SADDR_RULE_OIF,
 	IPV6_SADDR_RULE_LABEL,
-#ifdef CONFIG_IPV6_PRIVACY
 	IPV6_SADDR_RULE_PRIVACY,
-#endif
 	IPV6_SADDR_RULE_ORCHID,
 	IPV6_SADDR_RULE_PREFIX,
 	IPV6_SADDR_RULE_MAX
@@ -1247,7 +1243,6 @@
 				      &score->ifa->addr, score->addr_type,
 				      score->ifa->idev->dev->ifindex) == dst->label;
 		break;
-#ifdef CONFIG_IPV6_PRIVACY
 	case IPV6_SADDR_RULE_PRIVACY:
 	    {
 		/* Rule 7: Prefer public address
@@ -1259,7 +1254,6 @@
 		ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
 		break;
 	    }
-#endif
 	case IPV6_SADDR_RULE_ORCHID:
 		/* Rule 8-: Prefer ORCHID vs ORCHID or
 		 *	    non-ORCHID vs non-ORCHID
@@ -1588,7 +1582,6 @@
 		if (dad_failed)
 			ipv6_ifa_notify(0, ifp);
 		in6_ifa_put(ifp);
-#ifdef CONFIG_IPV6_PRIVACY
 	} else if (ifp->flags&IFA_F_TEMPORARY) {
 		struct inet6_ifaddr *ifpub;
 		spin_lock_bh(&ifp->lock);
@@ -1602,7 +1595,6 @@
 			spin_unlock_bh(&ifp->lock);
 		}
 		ipv6_del_addr(ifp);
-#endif
 	} else
 		ipv6_del_addr(ifp);
 }
@@ -1851,7 +1843,6 @@
 	return err;
 }
 
-#ifdef CONFIG_IPV6_PRIVACY
 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
 static void __ipv6_regen_rndid(struct inet6_dev *idev)
 {
@@ -1919,7 +1910,6 @@
 	if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
 		__ipv6_regen_rndid(idev);
 }
-#endif
 
 /*
  *	Add prefix route.
@@ -2207,9 +2197,7 @@
 		if (ifp) {
 			int flags;
 			unsigned long now;
-#ifdef CONFIG_IPV6_PRIVACY
 			struct inet6_ifaddr *ift;
-#endif
 			u32 stored_lft;
 
 			/* update lifetime (RFC2462 5.5.3 e) */
@@ -2250,7 +2238,6 @@
 			} else
 				spin_unlock(&ifp->lock);
 
-#ifdef CONFIG_IPV6_PRIVACY
 			read_lock_bh(&in6_dev->lock);
 			/* update all temporary addresses in the list */
 			list_for_each_entry(ift, &in6_dev->tempaddr_list,
@@ -2315,7 +2302,7 @@
 			} else {
 				read_unlock_bh(&in6_dev->lock);
 			}
-#endif
+
 			in6_ifa_put(ifp);
 			addrconf_verify(0);
 		}
@@ -2995,7 +2982,6 @@
 	if (!how)
 		idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
 
-#ifdef CONFIG_IPV6_PRIVACY
 	if (how && del_timer(&idev->regen_timer))
 		in6_dev_put(idev);
 
@@ -3015,7 +3001,6 @@
 		in6_ifa_put(ifa);
 		write_lock_bh(&idev->lock);
 	}
-#endif
 
 	while (!list_empty(&idev->addr_list)) {
 		ifa = list_first_entry(&idev->addr_list,
@@ -3528,7 +3513,6 @@
 					in6_ifa_put(ifp);
 					goto restart;
 				}
-#ifdef CONFIG_IPV6_PRIVACY
 			} else if ((ifp->flags&IFA_F_TEMPORARY) &&
 				   !(ifp->flags&IFA_F_TENTATIVE)) {
 				unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
@@ -3556,7 +3540,6 @@
 				} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
 					next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
 				spin_unlock(&ifp->lock);
-#endif
 			} else {
 				/* ifp->prefered_lft <= ifp->valid_lft */
 				if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
@@ -4128,13 +4111,11 @@
 		jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
 	array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
 		jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
-#ifdef CONFIG_IPV6_PRIVACY
 	array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
 	array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
 	array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
 	array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
 	array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
-#endif
 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
@@ -4828,7 +4809,6 @@
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_ms_jiffies,
 		},
-#ifdef CONFIG_IPV6_PRIVACY
 		{
 			.procname	= "use_tempaddr",
 			.data		= &ipv6_devconf.use_tempaddr,
@@ -4864,7 +4844,6 @@
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
 		},
-#endif
 		{
 			.procname	= "max_addresses",
 			.data		= &ipv6_devconf.max_addresses,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7c96100..ff75313 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -110,11 +110,6 @@
 	int try_loading_module = 0;
 	int err;
 
-	if (sock->type != SOCK_RAW &&
-	    sock->type != SOCK_DGRAM &&
-	    !inet_ehash_secret)
-		build_ehash_secret();
-
 	/* Look for the requested type/protocol pair. */
 lookup_protocol:
 	err = -ESOCKTNOSUPPORT;
@@ -364,7 +359,7 @@
 	inet->inet_rcv_saddr = v4addr;
 	inet->inet_saddr = v4addr;
 
-	np->rcv_saddr = addr->sin6_addr;
+	sk->sk_v6_rcv_saddr = addr->sin6_addr;
 
 	if (!(addr_type & IPV6_ADDR_MULTICAST))
 		np->saddr = addr->sin6_addr;
@@ -461,14 +456,14 @@
 		    peer == 1)
 			return -ENOTCONN;
 		sin->sin6_port = inet->inet_dport;
-		sin->sin6_addr = np->daddr;
+		sin->sin6_addr = sk->sk_v6_daddr;
 		if (np->sndflow)
 			sin->sin6_flowinfo = np->flow_label;
 	} else {
-		if (ipv6_addr_any(&np->rcv_saddr))
+		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 			sin->sin6_addr = np->saddr;
 		else
-			sin->sin6_addr = np->rcv_saddr;
+			sin->sin6_addr = sk->sk_v6_rcv_saddr;
 
 		sin->sin6_port = inet->inet_sport;
 	}
@@ -655,7 +650,7 @@
 
 		memset(&fl6, 0, sizeof(fl6));
 		fl6.flowi6_proto = sk->sk_protocol;
-		fl6.daddr = np->daddr;
+		fl6.daddr = sk->sk_v6_daddr;
 		fl6.saddr = np->saddr;
 		fl6.flowlabel = np->flow_label;
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -719,6 +714,8 @@
 
 static int __net_init ipv6_init_mibs(struct net *net)
 {
+	int i;
+
 	if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
 			  sizeof(struct udp_mib),
 			  __alignof__(struct udp_mib)) < 0)
@@ -731,6 +728,18 @@
 			  sizeof(struct ipstats_mib),
 			  __alignof__(struct ipstats_mib)) < 0)
 		goto err_ip_mib;
+
+	for_each_possible_cpu(i) {
+		struct ipstats_mib *af_inet6_stats;
+		af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i);
+		u64_stats_init(&af_inet6_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+		af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
+		u64_stats_init(&af_inet6_stats->syncp);
+#endif
+	}
+
+
 	if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
 			  sizeof(struct icmpv6_mib),
 			  __alignof__(struct icmpv6_mib)) < 0)
@@ -870,8 +879,6 @@
 	if (err)
 		goto out_sock_register_fail;
 
-	tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
-
 	/*
 	 *	ipngwg API draft makes clear that the correct semantics
 	 *	for TCP and UDP is to consider one TCP and UDP instance
@@ -1028,52 +1035,4 @@
 }
 module_init(inet6_init);
 
-static void __exit inet6_exit(void)
-{
-	if (disable_ipv6_mod)
-		return;
-
-	/* First of all disallow new sockets creation. */
-	sock_unregister(PF_INET6);
-	/* Disallow any further netlink messages */
-	rtnl_unregister_all(PF_INET6);
-
-	udpv6_exit();
-	udplitev6_exit();
-	tcpv6_exit();
-
-	/* Cleanup code parts. */
-	ipv6_packet_cleanup();
-	ipv6_frag_exit();
-	ipv6_exthdrs_exit();
-	addrconf_cleanup();
-	ip6_flowlabel_cleanup();
-	ndisc_late_cleanup();
-	ip6_route_cleanup();
-#ifdef CONFIG_PROC_FS
-
-	/* Cleanup code parts. */
-	if6_proc_exit();
-	ipv6_misc_proc_exit();
-	udplite6_proc_exit();
-	raw6_proc_exit();
-#endif
-	ipv6_netfilter_fini();
-	ipv6_stub = NULL;
-	igmp6_cleanup();
-	ndisc_cleanup();
-	ip6_mr_cleanup();
-	icmpv6_cleanup();
-	rawv6_exit();
-
-	unregister_pernet_subsys(&inet6_net_ops);
-	proto_unregister(&rawv6_prot);
-	proto_unregister(&udplitev6_prot);
-	proto_unregister(&udpv6_prot);
-	proto_unregister(&tcpv6_prot);
-
-	rcu_barrier(); /* Wait for completion of call_rcu()'s */
-}
-module_exit(inet6_exit);
-
 MODULE_ALIAS_NETPROTO(PF_INET6);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 48b6bd2..a454b0f 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -107,16 +107,16 @@
 		if (err)
 			goto out;
 
-		ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr);
+		ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr);
 
 		if (ipv6_addr_any(&np->saddr) ||
 		    ipv6_mapped_addr_any(&np->saddr))
 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 
-		if (ipv6_addr_any(&np->rcv_saddr) ||
-		    ipv6_mapped_addr_any(&np->rcv_saddr)) {
+		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
+		    ipv6_mapped_addr_any(&sk->sk_v6_rcv_saddr)) {
 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
-					       &np->rcv_saddr);
+					       &sk->sk_v6_rcv_saddr);
 			if (sk->sk_prot->rehash)
 				sk->sk_prot->rehash(sk);
 		}
@@ -145,7 +145,7 @@
 		}
 	}
 
-	np->daddr = *daddr;
+	sk->sk_v6_daddr = *daddr;
 	np->flow_label = fl6.flowlabel;
 
 	inet->inet_dport = usin->sin6_port;
@@ -156,7 +156,7 @@
 	 */
 
 	fl6.flowi6_proto = sk->sk_protocol;
-	fl6.daddr = np->daddr;
+	fl6.daddr = sk->sk_v6_daddr;
 	fl6.saddr = np->saddr;
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
 	fl6.flowi6_mark = sk->sk_mark;
@@ -183,16 +183,16 @@
 	if (ipv6_addr_any(&np->saddr))
 		np->saddr = fl6.saddr;
 
-	if (ipv6_addr_any(&np->rcv_saddr)) {
-		np->rcv_saddr = fl6.saddr;
+	if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+		sk->sk_v6_rcv_saddr = fl6.saddr;
 		inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 		if (sk->sk_prot->rehash)
 			sk->sk_prot->rehash(sk);
 	}
 
 	ip6_dst_store(sk, dst,
-		      ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
-		      &np->daddr : NULL,
+		      ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+		      &sk->sk_v6_daddr : NULL,
 #ifdef CONFIG_IPV6_SUBTREES
 		      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
 		      &np->saddr :
@@ -883,11 +883,10 @@
 void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
 			     __u16 srcp, __u16 destp, int bucket)
 {
-	struct ipv6_pinfo *np = inet6_sk(sp);
 	const struct in6_addr *dest, *src;
 
-	dest  = &np->daddr;
-	src   = &np->rcv_saddr;
+	dest  = &sp->sk_v6_daddr;
+	src   = &sp->sk_v6_rcv_saddr;
 	seq_printf(seq,
 		   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index e67e63f..b8719df 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -164,10 +164,9 @@
 	u8 *iv;
 	u8 *tail;
 	__be32 *seqhi;
-	struct esp_data *esp = x->data;
 
 	/* skb is pure payload to encrypt */
-	aead = esp->aead;
+	aead = x->data;
 	alen = crypto_aead_authsize(aead);
 
 	tfclen = 0;
@@ -181,8 +180,6 @@
 	}
 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 	clen = ALIGN(skb->len + 2 + tfclen, blksize);
-	if (esp->padlen)
-		clen = ALIGN(clen, esp->padlen);
 	plen = clen - skb->len - tfclen;
 
 	err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
@@ -271,8 +268,7 @@
 static int esp_input_done2(struct sk_buff *skb, int err)
 {
 	struct xfrm_state *x = xfrm_input_state(skb);
-	struct esp_data *esp = x->data;
-	struct crypto_aead *aead = esp->aead;
+	struct crypto_aead *aead = x->data;
 	int alen = crypto_aead_authsize(aead);
 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 	int elen = skb->len - hlen;
@@ -325,8 +321,7 @@
 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 {
 	struct ip_esp_hdr *esph;
-	struct esp_data *esp = x->data;
-	struct crypto_aead *aead = esp->aead;
+	struct crypto_aead *aead = x->data;
 	struct aead_request *req;
 	struct sk_buff *trailer;
 	int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
@@ -414,9 +409,8 @@
 
 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
 {
-	struct esp_data *esp = x->data;
-	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
-	u32 align = max_t(u32, blksize, esp->padlen);
+	struct crypto_aead *aead = x->data;
+	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 	unsigned int net_adj;
 
 	if (x->props.mode != XFRM_MODE_TUNNEL)
@@ -424,8 +418,8 @@
 	else
 		net_adj = 0;
 
-	return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
-		 net_adj) & ~(align - 1)) + net_adj - 2;
+	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
+		 net_adj) & ~(blksize - 1)) + net_adj - 2;
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -454,18 +448,16 @@
 
 static void esp6_destroy(struct xfrm_state *x)
 {
-	struct esp_data *esp = x->data;
+	struct crypto_aead *aead = x->data;
 
-	if (!esp)
+	if (!aead)
 		return;
 
-	crypto_free_aead(esp->aead);
-	kfree(esp);
+	crypto_free_aead(aead);
 }
 
 static int esp_init_aead(struct xfrm_state *x)
 {
-	struct esp_data *esp = x->data;
 	struct crypto_aead *aead;
 	int err;
 
@@ -474,7 +466,7 @@
 	if (IS_ERR(aead))
 		goto error;
 
-	esp->aead = aead;
+	x->data = aead;
 
 	err = crypto_aead_setkey(aead, x->aead->alg_key,
 				 (x->aead->alg_key_len + 7) / 8);
@@ -491,7 +483,6 @@
 
 static int esp_init_authenc(struct xfrm_state *x)
 {
-	struct esp_data *esp = x->data;
 	struct crypto_aead *aead;
 	struct crypto_authenc_key_param *param;
 	struct rtattr *rta;
@@ -526,7 +517,7 @@
 	if (IS_ERR(aead))
 		goto error;
 
-	esp->aead = aead;
+	x->data = aead;
 
 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
@@ -581,7 +572,6 @@
 
 static int esp6_init_state(struct xfrm_state *x)
 {
-	struct esp_data *esp;
 	struct crypto_aead *aead;
 	u32 align;
 	int err;
@@ -589,11 +579,7 @@
 	if (x->encap)
 		return -EINVAL;
 
-	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
-	if (esp == NULL)
-		return -ENOMEM;
-
-	x->data = esp;
+	x->data = NULL;
 
 	if (x->aead)
 		err = esp_init_aead(x);
@@ -603,9 +589,7 @@
 	if (err)
 		goto error;
 
-	aead = esp->aead;
-
-	esp->padlen = 0;
+	aead = x->data;
 
 	x->props.header_len = sizeof(struct ip_esp_hdr) +
 			      crypto_aead_ivsize(aead);
@@ -625,9 +609,7 @@
 	}
 
 	align = ALIGN(crypto_aead_blocksize(aead), 4);
-	if (esp->padlen)
-		align = max_t(u32, align, esp->padlen);
-	x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
+	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
 
 error:
 	return err;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e4311cb..77bb8af 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -70,20 +70,20 @@
 				      struct flowi6 *fl6,
 				      const struct request_sock *req)
 {
-	struct inet6_request_sock *treq = inet6_rsk(req);
+	struct inet_request_sock *ireq = inet_rsk(req);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct in6_addr *final_p, final;
 	struct dst_entry *dst;
 
 	memset(fl6, 0, sizeof(*fl6));
 	fl6->flowi6_proto = IPPROTO_TCP;
-	fl6->daddr = treq->rmt_addr;
+	fl6->daddr = ireq->ir_v6_rmt_addr;
 	final_p = fl6_update_dst(fl6, np->opt, &final);
-	fl6->saddr = treq->loc_addr;
-	fl6->flowi6_oif = treq->iif;
+	fl6->saddr = ireq->ir_v6_loc_addr;
+	fl6->flowi6_oif = ireq->ir_iif;
 	fl6->flowi6_mark = sk->sk_mark;
-	fl6->fl6_dport = inet_rsk(req)->rmt_port;
-	fl6->fl6_sport = inet_rsk(req)->loc_port;
+	fl6->fl6_dport = ireq->ir_rmt_port;
+	fl6->fl6_sport = htons(ireq->ir_num);
 	security_req_classify_flow(req, flowi6_to_flowi(fl6));
 
 	dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
@@ -129,13 +129,13 @@
 						     lopt->nr_table_entries)];
 	     (req = *prev) != NULL;
 	     prev = &req->dl_next) {
-		const struct inet6_request_sock *treq = inet6_rsk(req);
+		const struct inet_request_sock *ireq = inet_rsk(req);
 
-		if (inet_rsk(req)->rmt_port == rport &&
+		if (ireq->ir_rmt_port == rport &&
 		    req->rsk_ops->family == AF_INET6 &&
-		    ipv6_addr_equal(&treq->rmt_addr, raddr) &&
-		    ipv6_addr_equal(&treq->loc_addr, laddr) &&
-		    (!treq->iif || treq->iif == iif)) {
+		    ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
+		    ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
+		    (!ireq->ir_iif || ireq->ir_iif == iif)) {
 			WARN_ON(req->sk != NULL);
 			*prevp = prev;
 			return req;
@@ -153,8 +153,8 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
-				      inet_rsk(req)->rmt_port,
+	const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
+				      inet_rsk(req)->ir_rmt_port,
 				      lopt->hash_rnd, lopt->nr_table_entries);
 
 	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -165,11 +165,10 @@
 
 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
 {
-	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
 
 	sin6->sin6_family = AF_INET6;
-	sin6->sin6_addr = np->daddr;
+	sin6->sin6_addr = sk->sk_v6_daddr;
 	sin6->sin6_port	= inet_sk(sk)->inet_dport;
 	/* We do not store received flowlabel for TCP */
 	sin6->sin6_flowinfo = 0;
@@ -203,7 +202,7 @@
 
 	memset(fl6, 0, sizeof(*fl6));
 	fl6->flowi6_proto = sk->sk_protocol;
-	fl6->daddr = np->daddr;
+	fl6->daddr = sk->sk_v6_daddr;
 	fl6->saddr = np->saddr;
 	fl6->flowlabel = np->flow_label;
 	IP6_ECN_flow_xmit(sk, fl6->flowlabel);
@@ -245,7 +244,7 @@
 	skb_dst_set_noref(skb, dst);
 
 	/* Restore final destination back after routing done */
-	fl6.daddr = np->daddr;
+	fl6.daddr = sk->sk_v6_daddr;
 
 	res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
 	rcu_read_unlock();
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 066640e..262e13c 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -23,6 +23,39 @@
 #include <net/secure_seq.h>
 #include <net/ip.h>
 
+static unsigned int inet6_ehashfn(struct net *net,
+				  const struct in6_addr *laddr,
+				  const u16 lport,
+				  const struct in6_addr *faddr,
+				  const __be16 fport)
+{
+	static u32 inet6_ehash_secret __read_mostly;
+	static u32 ipv6_hash_secret __read_mostly;
+
+	u32 lhash, fhash;
+
+	net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
+	net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+
+	lhash = (__force u32)laddr->s6_addr32[3];
+	fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
+
+	return __inet6_ehashfn(lhash, lport, fhash, fport,
+			       inet6_ehash_secret + net_hash_mix(net));
+}
+
+static int inet6_sk_ehashfn(const struct sock *sk)
+{
+	const struct inet_sock *inet = inet_sk(sk);
+	const struct in6_addr *laddr = &sk->sk_v6_rcv_saddr;
+	const struct in6_addr *faddr = &sk->sk_v6_daddr;
+	const __u16 lport = inet->inet_num;
+	const __be16 fport = inet->inet_dport;
+	struct net *net = sock_net(sk);
+
+	return inet6_ehashfn(net, laddr, lport, faddr, fport);
+}
+
 int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
@@ -89,43 +122,22 @@
 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
 		if (sk->sk_hash != hash)
 			continue;
-		if (likely(INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
-			if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
-				goto begintw;
-			if (unlikely(!INET6_MATCH(sk, net, saddr, daddr,
-						  ports, dif))) {
-				sock_put(sk);
-				goto begin;
-			}
-		goto out;
+		if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
+			continue;
+		if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
+			goto out;
+
+		if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
+			sock_gen_put(sk);
+			goto begin;
 		}
+		goto found;
 	}
 	if (get_nulls_value(node) != slot)
 		goto begin;
-
-begintw:
-	/* Must check for a TIME_WAIT'er before going to listener hash. */
-	sk_nulls_for_each_rcu(sk, node, &head->twchain) {
-		if (sk->sk_hash != hash)
-			continue;
-		if (likely(INET6_TW_MATCH(sk, net, saddr, daddr,
-					  ports, dif))) {
-			if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
-				sk = NULL;
-				goto out;
-			}
-			if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
-						     ports, dif))) {
-				inet_twsk_put(inet_twsk(sk));
-				goto begintw;
-			}
-			goto out;
-		}
-	}
-	if (get_nulls_value(node) != slot)
-		goto begintw;
-	sk = NULL;
 out:
+	sk = NULL;
+found:
 	rcu_read_unlock();
 	return sk;
 }
@@ -140,11 +152,10 @@
 
 	if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
 	    sk->sk_family == PF_INET6) {
-		const struct ipv6_pinfo *np = inet6_sk(sk);
 
 		score = 1;
-		if (!ipv6_addr_any(&np->rcv_saddr)) {
-			if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+		if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+			if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 				return -1;
 			score++;
 		}
@@ -236,9 +247,8 @@
 {
 	struct inet_hashinfo *hinfo = death_row->hashinfo;
 	struct inet_sock *inet = inet_sk(sk);
-	const struct ipv6_pinfo *np = inet6_sk(sk);
-	const struct in6_addr *daddr = &np->rcv_saddr;
-	const struct in6_addr *saddr = &np->daddr;
+	const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr;
+	const struct in6_addr *saddr = &sk->sk_v6_daddr;
 	const int dif = sk->sk_bound_dev_if;
 	const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
 	struct net *net = sock_net(sk);
@@ -248,38 +258,28 @@
 	spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
 	struct sock *sk2;
 	const struct hlist_nulls_node *node;
-	struct inet_timewait_sock *tw;
+	struct inet_timewait_sock *tw = NULL;
 	int twrefcnt = 0;
 
 	spin_lock(lock);
 
-	/* Check TIME-WAIT sockets first. */
-	sk_nulls_for_each(sk2, node, &head->twchain) {
-		if (sk2->sk_hash != hash)
-			continue;
-
-		if (likely(INET6_TW_MATCH(sk2, net, saddr, daddr,
-					  ports, dif))) {
-			tw = inet_twsk(sk2);
-			if (twsk_unique(sk, sk2, twp))
-				goto unique;
-			else
-				goto not_unique;
-		}
-	}
-	tw = NULL;
-
-	/* And established part... */
 	sk_nulls_for_each(sk2, node, &head->chain) {
 		if (sk2->sk_hash != hash)
 			continue;
-		if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif)))
+
+		if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif))) {
+			if (sk2->sk_state == TCP_TIME_WAIT) {
+				tw = inet_twsk(sk2);
+				if (twsk_unique(sk, sk2, twp))
+					break;
+			}
 			goto not_unique;
+		}
 	}
 
-unique:
 	/* Must record num and sport now. Otherwise we will see
-	 * in hash table socket with a funny identity. */
+	 * in hash table socket with a funny identity.
+	 */
 	inet->inet_num = lport;
 	inet->inet_sport = htons(lport);
 	sk->sk_hash = hash;
@@ -312,9 +312,9 @@
 static inline u32 inet6_sk_port_offset(const struct sock *sk)
 {
 	const struct inet_sock *inet = inet_sk(sk);
-	const struct ipv6_pinfo *np = inet6_sk(sk);
-	return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32,
-					  np->daddr.s6_addr32,
+
+	return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32,
+					  sk->sk_v6_daddr.s6_addr32,
 					  inet->inet_dport);
 }
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5bec666..5550a81 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1529,25 +1529,6 @@
 	fib6_walk(&c.w);
 }
 
-void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg),
-		    int prune, void *arg)
-{
-	struct fib6_table *table;
-	struct hlist_head *head;
-	unsigned int h;
-
-	rcu_read_lock();
-	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
-		head = &net->ipv6.fib_table_hash[h];
-		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
-			read_lock_bh(&table->tb6_lock);
-			fib6_clean_tree(net, &table->tb6_root,
-					func, prune, arg);
-			read_unlock_bh(&table->tb6_lock);
-		}
-	}
-	rcu_read_unlock();
-}
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
 		    int prune, void *arg)
 {
@@ -1782,3 +1763,189 @@
 	unregister_pernet_subsys(&fib6_net_ops);
 	kmem_cache_destroy(fib6_node_kmem);
 }
+
+#ifdef CONFIG_PROC_FS
+
+struct ipv6_route_iter {
+	struct seq_net_private p;
+	struct fib6_walker_t w;
+	loff_t skip;
+	struct fib6_table *tbl;
+	__u32 sernum;
+};
+
+static int ipv6_route_seq_show(struct seq_file *seq, void *v)
+{
+	struct rt6_info *rt = v;
+	struct ipv6_route_iter *iter = seq->private;
+
+	seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
+
+#ifdef CONFIG_IPV6_SUBTREES
+	seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
+#else
+	seq_puts(seq, "00000000000000000000000000000000 00 ");
+#endif
+	if (rt->rt6i_flags & RTF_GATEWAY)
+		seq_printf(seq, "%pi6", &rt->rt6i_gateway);
+	else
+		seq_puts(seq, "00000000000000000000000000000000");
+
+	seq_printf(seq, " %08x %08x %08x %08x %8s\n",
+		   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
+		   rt->dst.__use, rt->rt6i_flags,
+		   rt->dst.dev ? rt->dst.dev->name : "");
+	iter->w.leaf = NULL;
+	return 0;
+}
+
+static int ipv6_route_yield(struct fib6_walker_t *w)
+{
+	struct ipv6_route_iter *iter = w->args;
+
+	if (!iter->skip)
+		return 1;
+
+	do {
+		iter->w.leaf = iter->w.leaf->dst.rt6_next;
+		iter->skip--;
+		if (!iter->skip && iter->w.leaf)
+			return 1;
+	} while (iter->w.leaf);
+
+	return 0;
+}
+
+static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter)
+{
+	memset(&iter->w, 0, sizeof(iter->w));
+	iter->w.func = ipv6_route_yield;
+	iter->w.root = &iter->tbl->tb6_root;
+	iter->w.state = FWS_INIT;
+	iter->w.node = iter->w.root;
+	iter->w.args = iter;
+	iter->sernum = iter->w.root->fn_sernum;
+	INIT_LIST_HEAD(&iter->w.lh);
+	fib6_walker_link(&iter->w);
+}
+
+static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
+						    struct net *net)
+{
+	unsigned int h;
+	struct hlist_node *node;
+
+	if (tbl) {
+		h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
+		node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
+	} else {
+		h = 0;
+		node = NULL;
+	}
+
+	while (!node && h < FIB6_TABLE_HASHSZ) {
+		node = rcu_dereference_bh(
+			hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
+	}
+	return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
+}
+
+static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
+{
+	if (iter->sernum != iter->w.root->fn_sernum) {
+		iter->sernum = iter->w.root->fn_sernum;
+		iter->w.state = FWS_INIT;
+		iter->w.node = iter->w.root;
+		WARN_ON(iter->w.skip);
+		iter->w.skip = iter->w.count;
+	}
+}
+
+static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int r;
+	struct rt6_info *n;
+	struct net *net = seq_file_net(seq);
+	struct ipv6_route_iter *iter = seq->private;
+
+	if (!v)
+		goto iter_table;
+
+	n = ((struct rt6_info *)v)->dst.rt6_next;
+	if (n) {
+		++*pos;
+		return n;
+	}
+
+iter_table:
+	ipv6_route_check_sernum(iter);
+	read_lock(&iter->tbl->tb6_lock);
+	r = fib6_walk_continue(&iter->w);
+	read_unlock(&iter->tbl->tb6_lock);
+	if (r > 0) {
+		if (v)
+			++*pos;
+		return iter->w.leaf;
+	} else if (r < 0) {
+		fib6_walker_unlink(&iter->w);
+		return NULL;
+	}
+	fib6_walker_unlink(&iter->w);
+
+	iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
+	if (!iter->tbl)
+		return NULL;
+
+	ipv6_route_seq_setup_walk(iter);
+	goto iter_table;
+}
+
+static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
+	__acquires(RCU_BH)
+{
+	struct net *net = seq_file_net(seq);
+	struct ipv6_route_iter *iter = seq->private;
+
+	rcu_read_lock_bh();
+	iter->tbl = ipv6_route_seq_next_table(NULL, net);
+	iter->skip = *pos;
+
+	if (iter->tbl) {
+		ipv6_route_seq_setup_walk(iter);
+		return ipv6_route_seq_next(seq, NULL, pos);
+	} else {
+		return NULL;
+	}
+}
+
+static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
+{
+	struct fib6_walker_t *w = &iter->w;
+	return w->node && !(w->state == FWS_U && w->node == w->root);
+}
+
+static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
+	__releases(RCU_BH)
+{
+	struct ipv6_route_iter *iter = seq->private;
+
+	if (ipv6_route_iter_active(iter))
+		fib6_walker_unlink(&iter->w);
+
+	rcu_read_unlock_bh();
+}
+
+static const struct seq_operations ipv6_route_seq_ops = {
+	.start	= ipv6_route_seq_start,
+	.next	= ipv6_route_seq_next,
+	.stop	= ipv6_route_seq_stop,
+	.show	= ipv6_route_seq_show
+};
+
+int ipv6_route_open(struct inode *inode, struct file *file)
+{
+	return seq_open_net(inode, file, &ipv6_route_seq_ops,
+			    sizeof(struct ipv6_route_iter));
+}
+
+#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 46e8843..e7fb710 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -41,7 +41,7 @@
 #define FL_MIN_LINGER	6	/* Minimal linger. It is set to 6sec specified
 				   in old IPv6 RFC. Well, it was reasonable value.
 				 */
-#define FL_MAX_LINGER	60	/* Maximal linger timeout */
+#define FL_MAX_LINGER	150	/* Maximal linger timeout */
 
 /* FL hash table */
 
@@ -345,6 +345,8 @@
 	expires = check_linger(expires);
 	if (!expires)
 		return -EPERM;
+
+	spin_lock_bh(&ip6_fl_lock);
 	fl->lastuse = jiffies;
 	if (time_before(fl->linger, linger))
 		fl->linger = linger;
@@ -352,6 +354,8 @@
 		expires = fl->linger;
 	if (time_before(fl->expires, fl->lastuse + expires))
 		fl->expires = fl->lastuse + expires;
+	spin_unlock_bh(&ip6_fl_lock);
+
 	return 0;
 }
 
@@ -453,8 +457,10 @@
 	if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
 		return 0;
 
+	rcu_read_lock_bh();
 	for_each_sk_fl_rcu(np, sfl)
 		count++;
+	rcu_read_unlock_bh();
 
 	if (room <= 0 ||
 	    ((count >= FL_MAX_PER_SOCK ||
@@ -465,34 +471,6 @@
 	return 0;
 }
 
-static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
-{
-	if (h1 == h2)
-		return false;
-	if (h1 == NULL || h2 == NULL)
-		return true;
-	if (h1->hdrlen != h2->hdrlen)
-		return true;
-	return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
-}
-
-static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
-{
-	if (o1 == o2)
-		return false;
-	if (o1 == NULL || o2 == NULL)
-		return true;
-	if (o1->opt_nflen != o2->opt_nflen)
-		return true;
-	if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
-		return true;
-	if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
-		return true;
-	if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
-		return true;
-	return false;
-}
-
 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
 		struct ip6_flowlabel *fl)
 {
@@ -503,6 +481,32 @@
 	spin_unlock_bh(&ip6_sk_fl_lock);
 }
 
+int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq)
+{
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct ipv6_fl_socklist *sfl;
+
+	rcu_read_lock_bh();
+
+	for_each_sk_fl_rcu(np, sfl) {
+		if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
+			spin_lock_bh(&ip6_fl_lock);
+			freq->flr_label = sfl->fl->label;
+			freq->flr_dst = sfl->fl->dst;
+			freq->flr_share = sfl->fl->share;
+			freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
+			freq->flr_linger = sfl->fl->linger / HZ;
+
+			spin_unlock_bh(&ip6_fl_lock);
+			rcu_read_unlock_bh();
+			return 0;
+		}
+	}
+	rcu_read_unlock_bh();
+
+	return -ENOENT;
+}
+
 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
 {
 	int uninitialized_var(err);
@@ -603,11 +607,6 @@
 				     uid_eq(fl1->owner.uid, fl->owner.uid)))
 					goto release;
 
-				err = -EINVAL;
-				if (!ipv6_addr_equal(&fl1->dst, &fl->dst) ||
-				    ipv6_opt_cmp(fl1->opt, fl->opt))
-					goto release;
-
 				err = -ENOMEM;
 				if (sfl1 == NULL)
 					goto release;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index bf4a9a0..8acb286 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1252,6 +1252,7 @@
 static int ip6gre_tunnel_init(struct net_device *dev)
 {
 	struct ip6_tnl *tunnel;
+	int i;
 
 	tunnel = netdev_priv(dev);
 
@@ -1269,6 +1270,13 @@
 	if (!dev->tstats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct pcpu_tstats *ip6gre_tunnel_stats;
+		ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
+		u64_stats_init(&ip6gre_tunnel_stats->syncp);
+	}
+
+
 	return 0;
 }
 
@@ -1449,6 +1457,7 @@
 static int ip6gre_tap_init(struct net_device *dev)
 {
 	struct ip6_tnl *tunnel;
+	int i;
 
 	tunnel = netdev_priv(dev);
 
@@ -1462,6 +1471,12 @@
 	if (!dev->tstats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct pcpu_tstats *ip6gre_tap_stats;
+		ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i);
+		u64_stats_init(&ip6gre_tap_stats->syncp);
+	}
+
 	return 0;
 }
 
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index d82de72..4b85169 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -66,7 +66,6 @@
 	__skb_pull(skb, sizeof(*ipv6h));
 	err = -EPROTONOSUPPORT;
 
-	rcu_read_lock();
 	ops = rcu_dereference(inet6_offloads[
 		ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
 
@@ -74,7 +73,6 @@
 		skb_reset_transport_header(skb);
 		err = ops->callbacks.gso_send_check(skb);
 	}
-	rcu_read_unlock();
 
 out:
 	return err;
@@ -92,46 +90,58 @@
 	u8 *prevhdr;
 	int offset = 0;
 	bool tunnel;
+	int nhoff;
 
 	if (unlikely(skb_shinfo(skb)->gso_type &
 		     ~(SKB_GSO_UDP |
 		       SKB_GSO_DODGY |
 		       SKB_GSO_TCP_ECN |
 		       SKB_GSO_GRE |
+		       SKB_GSO_IPIP |
+		       SKB_GSO_SIT |
 		       SKB_GSO_UDP_TUNNEL |
 		       SKB_GSO_MPLS |
 		       SKB_GSO_TCPV6 |
 		       0)))
 		goto out;
 
+	skb_reset_network_header(skb);
+	nhoff = skb_network_header(skb) - skb_mac_header(skb);
 	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
 		goto out;
 
-	tunnel = skb->encapsulation;
+	tunnel = SKB_GSO_CB(skb)->encap_level > 0;
+	if (tunnel)
+		features = skb->dev->hw_enc_features & netif_skb_features(skb);
+	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
+
 	ipv6h = ipv6_hdr(skb);
 	__skb_pull(skb, sizeof(*ipv6h));
 	segs = ERR_PTR(-EPROTONOSUPPORT);
 
 	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
-	rcu_read_lock();
+
 	ops = rcu_dereference(inet6_offloads[proto]);
 	if (likely(ops && ops->callbacks.gso_segment)) {
 		skb_reset_transport_header(skb);
 		segs = ops->callbacks.gso_segment(skb, features);
 	}
-	rcu_read_unlock();
 
 	if (IS_ERR(segs))
 		goto out;
 
 	for (skb = segs; skb; skb = skb->next) {
-		ipv6h = ipv6_hdr(skb);
-		ipv6h->payload_len = htons(skb->len - skb->mac_len -
-					   sizeof(*ipv6h));
+		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
+		ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
+		if (tunnel) {
+			skb_reset_inner_headers(skb);
+			skb->encapsulation = 1;
+		}
+		skb->network_header = (u8 *)ipv6h - skb->head;
+
 		if (!tunnel && proto == IPPROTO_UDP) {
 			unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
-			fptr = (struct frag_hdr *)(skb_network_header(skb) +
-				unfrag_ip6hlen);
+			fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
 			fptr->frag_off = htons(offset);
 			if (skb->next != NULL)
 				fptr->frag_off |= htons(IP6_MF);
@@ -267,6 +277,13 @@
 	},
 };
 
+static const struct net_offload sit_offload = {
+	.callbacks = {
+		.gso_send_check = ipv6_gso_send_check,
+		.gso_segment	= ipv6_gso_segment,
+	},
+};
+
 static int __init ipv6_offload_init(void)
 {
 
@@ -278,6 +295,9 @@
 		pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
 
 	dev_add_offload(&ipv6_packet_offload);
+
+	inet_add_offload(&sit_offload, IPPROTO_IPV6);
+
 	return 0;
 }
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 91fb4e8..59df872 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -125,7 +125,8 @@
 static int ip6_finish_output(struct sk_buff *skb)
 {
 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
-	    dst_allfrag(skb_dst(skb)))
+	    dst_allfrag(skb_dst(skb)) ||
+	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
 		return ip6_fragment(skb, ip6_finish_output2);
 	else
 		return ip6_finish_output2(skb);
@@ -909,7 +910,7 @@
 
 out_err_release:
 	if (err == -ENETUNREACH)
-		IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
+		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
 	dst_release(*dst);
 	*dst = NULL;
 	return err;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 583b77e..df1fa58 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1494,12 +1494,19 @@
 ip6_tnl_dev_init_gen(struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
+	int i;
 
 	t->dev = dev;
 	t->net = dev_net(dev);
 	dev->tstats = alloc_percpu(struct pcpu_tstats);
 	if (!dev->tstats)
 		return -ENOMEM;
+
+	for_each_possible_cpu(i) {
+		struct pcpu_tstats *ip6_tnl_stats;
+		ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
+		u64_stats_init(&ip6_tnl_stats->syncp);
+	}
 	return 0;
 }
 
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
new file mode 100644
index 0000000..ed94ba6
--- /dev/null
+++ b/net/ipv6/ip6_vti.c
@@ -0,0 +1,1056 @@
+/*
+ *	IPv6 virtual tunneling interface
+ *
+ *	Copyright (C) 2013 secunet Security Networks AG
+ *
+ *	Author:
+ *	Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ *	Based on:
+ *	net/ipv6/ip6_tunnel.c
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sockios.h>
+#include <linux/icmp.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/icmpv6.h>
+#include <linux/init.h>
+#include <linux/route.h>
+#include <linux/rtnetlink.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+#include <net/ip6_tunnel.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define HASH_SIZE_SHIFT  5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+
+static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+{
+	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+
+	return hash_32(hash, HASH_SIZE_SHIFT);
+}
+
+static int vti6_dev_init(struct net_device *dev);
+static void vti6_dev_setup(struct net_device *dev);
+static struct rtnl_link_ops vti6_link_ops __read_mostly;
+
+static int vti6_net_id __read_mostly;
+struct vti6_net {
+	/* the vti6 tunnel fallback device */
+	struct net_device *fb_tnl_dev;
+	/* lists for storing tunnels in use */
+	struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
+	struct ip6_tnl __rcu *tnls_wc[1];
+	struct ip6_tnl __rcu **tnls[2];
+};
+
+static struct net_device_stats *vti6_get_stats(struct net_device *dev)
+{
+	struct pcpu_tstats sum = { 0 };
+	int i;
+
+	for_each_possible_cpu(i) {
+		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+		sum.rx_packets += tstats->rx_packets;
+		sum.rx_bytes   += tstats->rx_bytes;
+		sum.tx_packets += tstats->tx_packets;
+		sum.tx_bytes   += tstats->tx_bytes;
+	}
+	dev->stats.rx_packets = sum.rx_packets;
+	dev->stats.rx_bytes   = sum.rx_bytes;
+	dev->stats.tx_packets = sum.tx_packets;
+	dev->stats.tx_bytes   = sum.tx_bytes;
+	return &dev->stats;
+}
+
+#define for_each_vti6_tunnel_rcu(start) \
+	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/**
+ * vti6_tnl_lookup - fetch tunnel matching the end-point addresses
+ *   @net: network namespace
+ *   @remote: the address of the tunnel exit-point
+ *   @local: the address of the tunnel entry-point
+ *
+ * Return:
+ *   tunnel matching given end-points if found,
+ *   else fallback tunnel if its device is up,
+ *   else %NULL
+ **/
+static struct ip6_tnl *
+vti6_tnl_lookup(struct net *net, const struct in6_addr *remote,
+		const struct in6_addr *local)
+{
+	unsigned int hash = HASH(remote, local);
+	struct ip6_tnl *t;
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+	for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+		if (ipv6_addr_equal(local, &t->parms.laddr) &&
+		    ipv6_addr_equal(remote, &t->parms.raddr) &&
+		    (t->dev->flags & IFF_UP))
+			return t;
+	}
+	t = rcu_dereference(ip6n->tnls_wc[0]);
+	if (t && (t->dev->flags & IFF_UP))
+		return t;
+
+	return NULL;
+}
+
+/**
+ * vti6_tnl_bucket - get head of list matching given tunnel parameters
+ *   @p: parameters containing tunnel end-points
+ *
+ * Description:
+ *   vti6_tnl_bucket() returns the head of the list matching the
+ *   &struct in6_addr entries laddr and raddr in @p.
+ *
+ * Return: head of IPv6 tunnel list
+ **/
+static struct ip6_tnl __rcu **
+vti6_tnl_bucket(struct vti6_net *ip6n, const struct __ip6_tnl_parm *p)
+{
+	const struct in6_addr *remote = &p->raddr;
+	const struct in6_addr *local = &p->laddr;
+	unsigned int h = 0;
+	int prio = 0;
+
+	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
+		prio = 1;
+		h = HASH(remote, local);
+	}
+	return &ip6n->tnls[prio][h];
+}
+
+static void
+vti6_tnl_link(struct vti6_net *ip6n, struct ip6_tnl *t)
+{
+	struct ip6_tnl __rcu **tp = vti6_tnl_bucket(ip6n, &t->parms);
+
+	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
+	rcu_assign_pointer(*tp, t);
+}
+
+static void
+vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
+{
+	struct ip6_tnl __rcu **tp;
+	struct ip6_tnl *iter;
+
+	for (tp = vti6_tnl_bucket(ip6n, &t->parms);
+	     (iter = rtnl_dereference(*tp)) != NULL;
+	     tp = &iter->next) {
+		if (t == iter) {
+			rcu_assign_pointer(*tp, t->next);
+			break;
+		}
+	}
+}
+
+static void vti6_dev_free(struct net_device *dev)
+{
+	free_percpu(dev->tstats);
+	free_netdev(dev);
+}
+
+static int vti6_tnl_create2(struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct net *net = dev_net(dev);
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+	int err;
+
+	err = vti6_dev_init(dev);
+	if (err < 0)
+		goto out;
+
+	err = register_netdevice(dev);
+	if (err < 0)
+		goto out;
+
+	strcpy(t->parms.name, dev->name);
+	dev->rtnl_link_ops = &vti6_link_ops;
+
+	dev_hold(dev);
+	vti6_tnl_link(ip6n, t);
+
+	return 0;
+
+out:
+	return err;
+}
+
+static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
+{
+	struct net_device *dev;
+	struct ip6_tnl *t;
+	char name[IFNAMSIZ];
+	int err;
+
+	if (p->name[0])
+		strlcpy(name, p->name, IFNAMSIZ);
+	else
+		sprintf(name, "ip6_vti%%d");
+
+	dev = alloc_netdev(sizeof(*t), name, vti6_dev_setup);
+	if (dev == NULL)
+		goto failed;
+
+	dev_net_set(dev, net);
+
+	t = netdev_priv(dev);
+	t->parms = *p;
+	t->net = dev_net(dev);
+
+	err = vti6_tnl_create2(dev);
+	if (err < 0)
+		goto failed_free;
+
+	return t;
+
+failed_free:
+	vti6_dev_free(dev);
+failed:
+	return NULL;
+}
+
+/**
+ * vti6_locate - find or create tunnel matching given parameters
+ *   @net: network namespace
+ *   @p: tunnel parameters
+ *   @create: != 0 if allowed to create new tunnel if no match found
+ *
+ * Description:
+ *   vti6_locate() first tries to locate an existing tunnel
+ *   based on @parms. If this is unsuccessful, but @create is set a new
+ *   tunnel device is created and registered for use.
+ *
+ * Return:
+ *   matching tunnel or NULL
+ **/
+static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
+				   int create)
+{
+	const struct in6_addr *remote = &p->raddr;
+	const struct in6_addr *local = &p->laddr;
+	struct ip6_tnl __rcu **tp;
+	struct ip6_tnl *t;
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+	for (tp = vti6_tnl_bucket(ip6n, p);
+	     (t = rtnl_dereference(*tp)) != NULL;
+	     tp = &t->next) {
+		if (ipv6_addr_equal(local, &t->parms.laddr) &&
+		    ipv6_addr_equal(remote, &t->parms.raddr))
+			return t;
+	}
+	if (!create)
+		return NULL;
+	return vti6_tnl_create(net, p);
+}
+
+/**
+ * vti6_dev_uninit - tunnel device uninitializer
+ *   @dev: the device to be destroyed
+ *
+ * Description:
+ *   vti6_dev_uninit() removes tunnel from its list
+ **/
+static void vti6_dev_uninit(struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct net *net = dev_net(dev);
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+	if (dev == ip6n->fb_tnl_dev)
+		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
+	else
+		vti6_tnl_unlink(ip6n, t);
+	ip6_tnl_dst_reset(t);
+	dev_put(dev);
+}
+
+static int vti6_rcv(struct sk_buff *skb)
+{
+	struct ip6_tnl *t;
+	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+	rcu_read_lock();
+
+	if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
+				 &ipv6h->daddr)) != NULL) {
+		struct pcpu_tstats *tstats;
+
+		if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
+			rcu_read_unlock();
+			goto discard;
+		}
+
+		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+			rcu_read_unlock();
+			return 0;
+		}
+
+		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
+			t->dev->stats.rx_dropped++;
+			rcu_read_unlock();
+			goto discard;
+		}
+
+		tstats = this_cpu_ptr(t->dev->tstats);
+		tstats->rx_packets++;
+		tstats->rx_bytes += skb->len;
+
+		skb->mark = 0;
+		secpath_reset(skb);
+		skb->dev = t->dev;
+
+		rcu_read_unlock();
+		return 0;
+	}
+	rcu_read_unlock();
+	return 1;
+
+discard:
+	kfree_skb(skb);
+	return 0;
+}
+
+/**
+ * vti6_addr_conflict - compare packet addresses to tunnel's own
+ *   @t: the outgoing tunnel device
+ *   @hdr: IPv6 header from the incoming packet
+ *
+ * Description:
+ *   Avoid trivial tunneling loop by checking that tunnel exit-point
+ *   doesn't match source of incoming packet.
+ *
+ * Return:
+ *   1 if conflict,
+ *   0 else
+ **/
+static inline bool
+vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
+{
+	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
+}
+
+/**
+ * vti6_xmit - send a packet
+ *   @skb: the outgoing socket buffer
+ *   @dev: the outgoing tunnel device
+ **/
+static int vti6_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct net *net = dev_net(dev);
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct net_device_stats *stats = &t->dev->stats;
+	struct dst_entry *dst = NULL, *ndst = NULL;
+	struct flowi6 fl6;
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	struct net_device *tdev;
+	int err = -1;
+
+	if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
+	    !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h))
+		return err;
+
+	dst = ip6_tnl_dst_check(t);
+	if (!dst) {
+		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+
+		ndst = ip6_route_output(net, NULL, &fl6);
+
+		if (ndst->error)
+			goto tx_err_link_failure;
+		ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(&fl6), NULL, 0);
+		if (IS_ERR(ndst)) {
+			err = PTR_ERR(ndst);
+			ndst = NULL;
+			goto tx_err_link_failure;
+		}
+		dst = ndst;
+	}
+
+	if (!dst->xfrm || dst->xfrm->props.mode != XFRM_MODE_TUNNEL)
+		goto tx_err_link_failure;
+
+	tdev = dst->dev;
+
+	if (tdev == dev) {
+		stats->collisions++;
+		net_warn_ratelimited("%s: Local routing loop detected!\n",
+				     t->parms.name);
+		goto tx_err_dst_release;
+	}
+
+
+	skb_dst_drop(skb);
+	skb_dst_set_noref(skb, dst);
+
+	ip6tunnel_xmit(skb, dev);
+	if (ndst) {
+		dev->mtu = dst_mtu(ndst);
+		ip6_tnl_dst_store(t, ndst);
+	}
+
+	return 0;
+tx_err_link_failure:
+	stats->tx_carrier_errors++;
+	dst_link_failure(skb);
+tx_err_dst_release:
+	dst_release(ndst);
+	return err;
+}
+
+static netdev_tx_t
+vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct net_device_stats *stats = &t->dev->stats;
+	int ret;
+
+	switch (skb->protocol) {
+	case htons(ETH_P_IPV6):
+		ret = vti6_xmit(skb, dev);
+		break;
+	default:
+		goto tx_err;
+	}
+
+	if (ret < 0)
+		goto tx_err;
+
+	return NETDEV_TX_OK;
+
+tx_err:
+	stats->tx_errors++;
+	stats->tx_dropped++;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static void vti6_link_config(struct ip6_tnl *t)
+{
+	struct dst_entry *dst;
+	struct net_device *dev = t->dev;
+	struct __ip6_tnl_parm *p = &t->parms;
+	struct flowi6 *fl6 = &t->fl.u.ip6;
+
+	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+
+	/* Set up flowi template */
+	fl6->saddr = p->laddr;
+	fl6->daddr = p->raddr;
+	fl6->flowi6_oif = p->link;
+	fl6->flowi6_mark = be32_to_cpu(p->i_key);
+	fl6->flowi6_proto = p->proto;
+	fl6->flowlabel = 0;
+
+	p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV |
+		      IP6_TNL_F_CAP_PER_PACKET);
+	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
+
+	if (p->flags & IP6_TNL_F_CAP_XMIT && p->flags & IP6_TNL_F_CAP_RCV)
+		dev->flags |= IFF_POINTOPOINT;
+	else
+		dev->flags &= ~IFF_POINTOPOINT;
+
+	dev->iflink = p->link;
+
+	if (p->flags & IP6_TNL_F_CAP_XMIT) {
+
+		dst = ip6_route_output(dev_net(dev), NULL, fl6);
+		if (dst->error)
+			return;
+
+		dst = xfrm_lookup(dev_net(dev), dst, flowi6_to_flowi(fl6),
+				  NULL, 0);
+		if (IS_ERR(dst))
+			return;
+
+		if (dst->dev) {
+			dev->hard_header_len = dst->dev->hard_header_len;
+
+			dev->mtu = dst_mtu(dst);
+
+			if (dev->mtu < IPV6_MIN_MTU)
+				dev->mtu = IPV6_MIN_MTU;
+		}
+		dst_release(dst);
+	}
+}
+
+/**
+ * vti6_tnl_change - update the tunnel parameters
+ *   @t: tunnel to be changed
+ *   @p: tunnel configuration parameters
+ *
+ * Description:
+ *   vti6_tnl_change() updates the tunnel parameters
+ **/
+static int
+vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
+{
+	t->parms.laddr = p->laddr;
+	t->parms.raddr = p->raddr;
+	t->parms.link = p->link;
+	t->parms.i_key = p->i_key;
+	t->parms.o_key = p->o_key;
+	t->parms.proto = p->proto;
+	ip6_tnl_dst_reset(t);
+	vti6_link_config(t);
+	return 0;
+}
+
+static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+{
+	struct net *net = dev_net(t->dev);
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+	int err;
+
+	vti6_tnl_unlink(ip6n, t);
+	synchronize_net();
+	err = vti6_tnl_change(t, p);
+	vti6_tnl_link(ip6n, t);
+	netdev_state_change(t->dev);
+	return err;
+}
+
+static void
+vti6_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm2 *u)
+{
+	p->laddr = u->laddr;
+	p->raddr = u->raddr;
+	p->link = u->link;
+	p->i_key = u->i_key;
+	p->o_key = u->o_key;
+	p->proto = u->proto;
+
+	memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void
+vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
+{
+	u->laddr = p->laddr;
+	u->raddr = p->raddr;
+	u->link = p->link;
+	u->i_key = p->i_key;
+	u->o_key = p->o_key;
+	u->proto = p->proto;
+
+	memcpy(u->name, p->name, sizeof(u->name));
+}
+
+/**
+ * vti6_tnl_ioctl - configure vti6 tunnels from userspace
+ *   @dev: virtual device associated with tunnel
+ *   @ifr: parameters passed from userspace
+ *   @cmd: command to be performed
+ *
+ * Description:
+ *   vti6_ioctl() is used for managing vti6 tunnels
+ *   from userspace.
+ *
+ *   The possible commands are the following:
+ *     %SIOCGETTUNNEL: get tunnel parameters for device
+ *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
+ *     %SIOCCHGTUNNEL: change tunnel parameters to those given
+ *     %SIOCDELTUNNEL: delete tunnel
+ *
+ *   The fallback device "ip6_vti0", created during module
+ *   initialization, can be used for creating other tunnel devices.
+ *
+ * Return:
+ *   0 on success,
+ *   %-EFAULT if unable to copy data to or from userspace,
+ *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
+ *   %-EINVAL if passed tunnel parameters are invalid,
+ *   %-EEXIST if changing a tunnel's parameters would cause a conflict
+ *   %-ENODEV if attempting to change or delete a nonexisting device
+ **/
+static int
+vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int err = 0;
+	struct ip6_tnl_parm2 p;
+	struct __ip6_tnl_parm p1;
+	struct ip6_tnl *t = NULL;
+	struct net *net = dev_net(dev);
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+	switch (cmd) {
+	case SIOCGETTUNNEL:
+		if (dev == ip6n->fb_tnl_dev) {
+			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
+				err = -EFAULT;
+				break;
+			}
+			vti6_parm_from_user(&p1, &p);
+			t = vti6_locate(net, &p1, 0);
+		} else {
+			memset(&p, 0, sizeof(p));
+		}
+		if (t == NULL)
+			t = netdev_priv(dev);
+		vti6_parm_to_user(&p, &t->parms);
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+			err = -EFAULT;
+		break;
+	case SIOCADDTUNNEL:
+	case SIOCCHGTUNNEL:
+		err = -EPERM;
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+			break;
+		err = -EFAULT;
+		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+			break;
+		err = -EINVAL;
+		if (p.proto != IPPROTO_IPV6  && p.proto != 0)
+			break;
+		vti6_parm_from_user(&p1, &p);
+		t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL);
+		if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
+			if (t != NULL) {
+				if (t->dev != dev) {
+					err = -EEXIST;
+					break;
+				}
+			} else
+				t = netdev_priv(dev);
+
+			err = vti6_update(t, &p1);
+		}
+		if (t) {
+			err = 0;
+			vti6_parm_to_user(&p, &t->parms);
+			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+				err = -EFAULT;
+
+		} else
+			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+		break;
+	case SIOCDELTUNNEL:
+		err = -EPERM;
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+			break;
+
+		if (dev == ip6n->fb_tnl_dev) {
+			err = -EFAULT;
+			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+				break;
+			err = -ENOENT;
+			vti6_parm_from_user(&p1, &p);
+			t = vti6_locate(net, &p1, 0);
+			if (t == NULL)
+				break;
+			err = -EPERM;
+			if (t->dev == ip6n->fb_tnl_dev)
+				break;
+			dev = t->dev;
+		}
+		err = 0;
+		unregister_netdevice(dev);
+		break;
+	default:
+		err = -EINVAL;
+	}
+	return err;
+}
+
+/**
+ * vti6_tnl_change_mtu - change mtu manually for tunnel device
+ *   @dev: virtual device associated with tunnel
+ *   @new_mtu: the new mtu
+ *
+ * Return:
+ *   0 on success,
+ *   %-EINVAL if mtu too small
+ **/
+static int vti6_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (new_mtu < IPV6_MIN_MTU)
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static const struct net_device_ops vti6_netdev_ops = {
+	.ndo_uninit	= vti6_dev_uninit,
+	.ndo_start_xmit = vti6_tnl_xmit,
+	.ndo_do_ioctl	= vti6_ioctl,
+	.ndo_change_mtu = vti6_change_mtu,
+	.ndo_get_stats	= vti6_get_stats,
+};
+
+/**
+ * vti6_dev_setup - setup virtual tunnel device
+ *   @dev: virtual device associated with tunnel
+ *
+ * Description:
+ *   Initialize function pointers and device parameters
+ **/
+static void vti6_dev_setup(struct net_device *dev)
+{
+	struct ip6_tnl *t;
+
+	dev->netdev_ops = &vti6_netdev_ops;
+	dev->destructor = vti6_dev_free;
+
+	dev->type = ARPHRD_TUNNEL6;
+	dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
+	dev->mtu = ETH_DATA_LEN;
+	t = netdev_priv(dev);
+	dev->flags |= IFF_NOARP;
+	dev->addr_len = sizeof(struct in6_addr);
+	dev->features |= NETIF_F_NETNS_LOCAL;
+	dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+/**
+ * vti6_dev_init_gen - general initializer for all tunnel devices
+ *   @dev: virtual device associated with tunnel
+ **/
+static inline int vti6_dev_init_gen(struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+
+	t->dev = dev;
+	t->net = dev_net(dev);
+	dev->tstats = alloc_percpu(struct pcpu_tstats);
+	if (!dev->tstats)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * vti6_dev_init - initializer for all non fallback tunnel devices
+ *   @dev: virtual device associated with tunnel
+ **/
+static int vti6_dev_init(struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	int err = vti6_dev_init_gen(dev);
+
+	if (err)
+		return err;
+	vti6_link_config(t);
+	return 0;
+}
+
+/**
+ * vti6_fb_tnl_dev_init - initializer for fallback tunnel device
+ *   @dev: fallback device
+ *
+ * Return: 0
+ **/
+static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct net *net = dev_net(dev);
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+	int err = vti6_dev_init_gen(dev);
+
+	if (err)
+		return err;
+
+	t->parms.proto = IPPROTO_IPV6;
+	dev_hold(dev);
+
+	vti6_link_config(t);
+
+	rcu_assign_pointer(ip6n->tnls_wc[0], t);
+	return 0;
+}
+
+static int vti6_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	return 0;
+}
+
+static void vti6_netlink_parms(struct nlattr *data[],
+			       struct __ip6_tnl_parm *parms)
+{
+	memset(parms, 0, sizeof(*parms));
+
+	if (!data)
+		return;
+
+	if (data[IFLA_VTI_LINK])
+		parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
+
+	if (data[IFLA_VTI_LOCAL])
+		nla_memcpy(&parms->laddr, data[IFLA_VTI_LOCAL],
+			   sizeof(struct in6_addr));
+
+	if (data[IFLA_VTI_REMOTE])
+		nla_memcpy(&parms->raddr, data[IFLA_VTI_REMOTE],
+			   sizeof(struct in6_addr));
+
+	if (data[IFLA_VTI_IKEY])
+		parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
+
+	if (data[IFLA_VTI_OKEY])
+		parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
+}
+
+static int vti6_newlink(struct net *src_net, struct net_device *dev,
+			struct nlattr *tb[], struct nlattr *data[])
+{
+	struct net *net = dev_net(dev);
+	struct ip6_tnl *nt;
+
+	nt = netdev_priv(dev);
+	vti6_netlink_parms(data, &nt->parms);
+
+	nt->parms.proto = IPPROTO_IPV6;
+
+	if (vti6_locate(net, &nt->parms, 0))
+		return -EEXIST;
+
+	return vti6_tnl_create2(dev);
+}
+
+static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
+			   struct nlattr *data[])
+{
+	struct ip6_tnl *t;
+	struct __ip6_tnl_parm p;
+	struct net *net = dev_net(dev);
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+	if (dev == ip6n->fb_tnl_dev)
+		return -EINVAL;
+
+	vti6_netlink_parms(data, &p);
+
+	t = vti6_locate(net, &p, 0);
+
+	if (t) {
+		if (t->dev != dev)
+			return -EEXIST;
+	} else
+		t = netdev_priv(dev);
+
+	return vti6_update(t, &p);
+}
+
+static size_t vti6_get_size(const struct net_device *dev)
+{
+	return
+		/* IFLA_VTI_LINK */
+		nla_total_size(4) +
+		/* IFLA_VTI_LOCAL */
+		nla_total_size(sizeof(struct in6_addr)) +
+		/* IFLA_VTI_REMOTE */
+		nla_total_size(sizeof(struct in6_addr)) +
+		/* IFLA_VTI_IKEY */
+		nla_total_size(4) +
+		/* IFLA_VTI_OKEY */
+		nla_total_size(4) +
+		0;
+}
+
+static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct ip6_tnl *tunnel = netdev_priv(dev);
+	struct __ip6_tnl_parm *parm = &tunnel->parms;
+
+	if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) ||
+	    nla_put(skb, IFLA_VTI_LOCAL, sizeof(struct in6_addr),
+		    &parm->laddr) ||
+	    nla_put(skb, IFLA_VTI_REMOTE, sizeof(struct in6_addr),
+		    &parm->raddr) ||
+	    nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) ||
+	    nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
+	[IFLA_VTI_LINK]		= { .type = NLA_U32 },
+	[IFLA_VTI_LOCAL]	= { .len = sizeof(struct in6_addr) },
+	[IFLA_VTI_REMOTE]	= { .len = sizeof(struct in6_addr) },
+	[IFLA_VTI_IKEY]		= { .type = NLA_U32 },
+	[IFLA_VTI_OKEY]		= { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops vti6_link_ops __read_mostly = {
+	.kind		= "vti6",
+	.maxtype	= IFLA_VTI_MAX,
+	.policy		= vti6_policy,
+	.priv_size	= sizeof(struct ip6_tnl),
+	.setup		= vti6_dev_setup,
+	.validate	= vti6_validate,
+	.newlink	= vti6_newlink,
+	.changelink	= vti6_changelink,
+	.get_size	= vti6_get_size,
+	.fill_info	= vti6_fill_info,
+};
+
+static struct xfrm_tunnel_notifier vti6_handler __read_mostly = {
+	.handler	= vti6_rcv,
+	.priority	=	1,
+};
+
+static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
+{
+	int h;
+	struct ip6_tnl *t;
+	LIST_HEAD(list);
+
+	for (h = 0; h < HASH_SIZE; h++) {
+		t = rtnl_dereference(ip6n->tnls_r_l[h]);
+		while (t != NULL) {
+			unregister_netdevice_queue(t->dev, &list);
+			t = rtnl_dereference(t->next);
+		}
+	}
+
+	t = rtnl_dereference(ip6n->tnls_wc[0]);
+	unregister_netdevice_queue(t->dev, &list);
+	unregister_netdevice_many(&list);
+}
+
+static int __net_init vti6_init_net(struct net *net)
+{
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+	struct ip6_tnl *t = NULL;
+	int err;
+
+	ip6n->tnls[0] = ip6n->tnls_wc;
+	ip6n->tnls[1] = ip6n->tnls_r_l;
+
+	err = -ENOMEM;
+	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
+					vti6_dev_setup);
+
+	if (!ip6n->fb_tnl_dev)
+		goto err_alloc_dev;
+	dev_net_set(ip6n->fb_tnl_dev, net);
+
+	err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
+	if (err < 0)
+		goto err_register;
+
+	err = register_netdev(ip6n->fb_tnl_dev);
+	if (err < 0)
+		goto err_register;
+
+	t = netdev_priv(ip6n->fb_tnl_dev);
+
+	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
+	return 0;
+
+err_register:
+	vti6_dev_free(ip6n->fb_tnl_dev);
+err_alloc_dev:
+	return err;
+}
+
+static void __net_exit vti6_exit_net(struct net *net)
+{
+	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+	rtnl_lock();
+	vti6_destroy_tunnels(ip6n);
+	rtnl_unlock();
+}
+
+static struct pernet_operations vti6_net_ops = {
+	.init = vti6_init_net,
+	.exit = vti6_exit_net,
+	.id   = &vti6_net_id,
+	.size = sizeof(struct vti6_net),
+};
+
+/**
+ * vti6_tunnel_init - register protocol and reserve needed resources
+ *
+ * Return: 0 on success
+ **/
+static int __init vti6_tunnel_init(void)
+{
+	int  err;
+
+	err = register_pernet_device(&vti6_net_ops);
+	if (err < 0)
+		goto out_pernet;
+
+	err = xfrm6_mode_tunnel_input_register(&vti6_handler);
+	if (err < 0) {
+		pr_err("%s: can't register vti6\n", __func__);
+		goto out;
+	}
+	err = rtnl_link_register(&vti6_link_ops);
+	if (err < 0)
+		goto rtnl_link_failed;
+
+	return 0;
+
+rtnl_link_failed:
+	xfrm6_mode_tunnel_input_deregister(&vti6_handler);
+out:
+	unregister_pernet_device(&vti6_net_ops);
+out_pernet:
+	return err;
+}
+
+/**
+ * vti6_tunnel_cleanup - free resources and unregister protocol
+ **/
+static void __exit vti6_tunnel_cleanup(void)
+{
+	rtnl_link_unregister(&vti6_link_ops);
+	if (xfrm6_mode_tunnel_input_deregister(&vti6_handler))
+		pr_info("%s: can't deregister vti6\n", __func__);
+
+	unregister_pernet_device(&vti6_net_ops);
+}
+
+module_init(vti6_tunnel_init);
+module_exit(vti6_tunnel_cleanup);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("vti6");
+MODULE_ALIAS_NETDEV("ip6_vti0");
+MODULE_AUTHOR("Steffen Klassert");
+MODULE_DESCRIPTION("IPv6 virtual tunnel interface");
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d1e2e8e..1c6ce31 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -174,7 +174,7 @@
 			}
 
 			if (ipv6_only_sock(sk) ||
-			    !ipv6_addr_v4mapped(&np->daddr)) {
+			    !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
 				retv = -EADDRNOTAVAIL;
 				break;
 			}
@@ -1011,7 +1011,7 @@
 				struct in6_pktinfo src_info;
 				src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
 					np->sticky_pktinfo.ipi6_ifindex;
-				src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
+				src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : np->sticky_pktinfo.ipi6_addr;
 				put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
 			}
 			if (np->rxopt.bits.rxhlim) {
@@ -1026,7 +1026,8 @@
 				struct in6_pktinfo src_info;
 				src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
 					np->sticky_pktinfo.ipi6_ifindex;
-				src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
+				src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr :
+								     np->sticky_pktinfo.ipi6_addr;
 				put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
 			}
 			if (np->rxopt.bits.rxohlim) {
@@ -1211,6 +1212,34 @@
 		val = np->sndflow;
 		break;
 
+	case IPV6_FLOWLABEL_MGR:
+	{
+		struct in6_flowlabel_req freq;
+
+		if (len < sizeof(freq))
+			return -EINVAL;
+
+		if (copy_from_user(&freq, optval, sizeof(freq)))
+			return -EFAULT;
+
+		if (freq.flr_action != IPV6_FL_A_GET)
+			return -EINVAL;
+
+		len = sizeof(freq);
+		memset(&freq, 0, sizeof(freq));
+
+		val = ipv6_flowlabel_opt_get(sk, &freq);
+		if (val < 0)
+			return val;
+
+		if (put_user(len, optlen))
+			return -EFAULT;
+		if (copy_to_user(optval, &freq, len))
+			return -EFAULT;
+
+		return 0;
+	}
+
 	case IPV6_ADDR_PREFERENCES:
 		val = 0;
 
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index a7f842b..7702f9e 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -25,6 +25,19 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_TABLES_IPV6
+	depends on NF_TABLES
+	tristate "IPv6 nf_tables support"
+
+config NFT_CHAIN_ROUTE_IPV6
+	depends on NF_TABLES_IPV6
+	tristate "IPv6 nf_tables route chain support"
+
+config NFT_CHAIN_NAT_IPV6
+	depends on NF_TABLES_IPV6
+	depends on NF_NAT_IPV6 && NFT_NAT
+	tristate "IPv6 nf_tables nat chain support"
+
 config IP6_NF_IPTABLES
 	tristate "IP6 tables support (required for filtering)"
 	depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2b53738..d1b4928 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -23,6 +23,11 @@
 nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
 
+# nf_tables
+obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
+obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
+obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
+
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 56eef30..da00a2e 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -39,7 +39,7 @@
 MODULE_LICENSE("GPL");
 
 /* Send RST reply */
-static void send_reset(struct net *net, struct sk_buff *oldskb)
+static void send_reset(struct net *net, struct sk_buff *oldskb, int hook)
 {
 	struct sk_buff *nskb;
 	struct tcphdr otcph, *tcph;
@@ -88,8 +88,7 @@
 	}
 
 	/* Check checksum. */
-	if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
-			    skb_checksum(oldskb, tcphoff, otcplen, 0))) {
+	if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
 		pr_debug("TCP checksum is invalid\n");
 		return;
 	}
@@ -227,7 +226,7 @@
 		/* Do nothing */
 		break;
 	case IP6T_TCP_RESET:
-		send_reset(net, skb);
+		send_reset(net, skb, par->hooknum);
 		break;
 	default:
 		net_info_ratelimited("case %u not handled yet\n", reject->with);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 2748b04..bf9f612 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -312,7 +312,7 @@
 	return XT_CONTINUE;
 }
 
-static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
+static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
 				       struct sk_buff *skb,
 				       const struct net_device *in,
 				       const struct net_device *out,
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 29b44b1..ca7f6c1 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -32,13 +32,14 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		     const struct net_device *in, const struct net_device *out,
 		     int (*okfn)(struct sk_buff *))
 {
 	const struct net *net = dev_net((in != NULL) ? in : out);
 
-	return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
+	return ip6t_do_table(skb, ops->hooknum, in, out,
+			     net->ipv6.ip6table_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index c705907..307bbb7 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -76,17 +76,17 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		     const struct net_device *in, const struct net_device *out,
 		     int (*okfn)(struct sk_buff *))
 {
-	if (hook == NF_INET_LOCAL_OUT)
+	if (ops->hooknum == NF_INET_LOCAL_OUT)
 		return ip6t_mangle_out(skb, out);
-	if (hook == NF_INET_POST_ROUTING)
-		return ip6t_do_table(skb, hook, in, out,
+	if (ops->hooknum == NF_INET_POST_ROUTING)
+		return ip6t_do_table(skb, ops->hooknum, in, out,
 				     dev_net(out)->ipv6.ip6table_mangle);
 	/* INPUT/FORWARD */
-	return ip6t_do_table(skb, hook, in, out,
+	return ip6t_do_table(skb, ops->hooknum, in, out,
 			     dev_net(in)->ipv6.ip6table_mangle);
 }
 
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 9b076d2..84c7f33 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -63,7 +63,7 @@
 }
 
 static unsigned int
-nf_nat_ipv6_fn(unsigned int hooknum,
+nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
 	       struct sk_buff *skb,
 	       const struct net_device *in,
 	       const struct net_device *out,
@@ -72,7 +72,7 @@
 	struct nf_conn *ct;
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn_nat *nat;
-	enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+	enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
 	__be16 frag_off;
 	int hdrlen;
 	u8 nexthdr;
@@ -111,7 +111,8 @@
 
 		if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
 			if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
-							     hooknum, hdrlen))
+							     ops->hooknum,
+							     hdrlen))
 				return NF_DROP;
 			else
 				return NF_ACCEPT;
@@ -124,14 +125,14 @@
 		if (!nf_nat_initialized(ct, maniptype)) {
 			unsigned int ret;
 
-			ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+			ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
 			if (ret != NF_ACCEPT)
 				return ret;
 		} else {
 			pr_debug("Already setup manip %s for ct %p\n",
 				 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
 				 ct);
-			if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+			if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
 				goto oif_changed;
 		}
 		break;
@@ -140,11 +141,11 @@
 		/* ESTABLISHED */
 		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
 			     ctinfo == IP_CT_ESTABLISHED_REPLY);
-		if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+		if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
 			goto oif_changed;
 	}
 
-	return nf_nat_packet(ct, ctinfo, hooknum, skb);
+	return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
 
 oif_changed:
 	nf_ct_kill_acct(ct, ctinfo, skb);
@@ -152,7 +153,7 @@
 }
 
 static unsigned int
-nf_nat_ipv6_in(unsigned int hooknum,
+nf_nat_ipv6_in(const struct nf_hook_ops *ops,
 	       struct sk_buff *skb,
 	       const struct net_device *in,
 	       const struct net_device *out,
@@ -161,7 +162,7 @@
 	unsigned int ret;
 	struct in6_addr daddr = ipv6_hdr(skb)->daddr;
 
-	ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+	ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
 		skb_dst_drop(skb);
@@ -170,7 +171,7 @@
 }
 
 static unsigned int
-nf_nat_ipv6_out(unsigned int hooknum,
+nf_nat_ipv6_out(const struct nf_hook_ops *ops,
 		struct sk_buff *skb,
 		const struct net_device *in,
 		const struct net_device *out,
@@ -187,7 +188,7 @@
 	if (skb->len < sizeof(struct ipv6hdr))
 		return NF_ACCEPT;
 
-	ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+	ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
 #ifdef CONFIG_XFRM
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -209,7 +210,7 @@
 }
 
 static unsigned int
-nf_nat_ipv6_local_fn(unsigned int hooknum,
+nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
 		     struct sk_buff *skb,
 		     const struct net_device *in,
 		     const struct net_device *out,
@@ -224,7 +225,7 @@
 	if (skb->len < sizeof(struct ipv6hdr))
 		return NF_ACCEPT;
 
-	ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+	ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
 		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 9a626d8..5274740 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -19,13 +19,14 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		  const struct net_device *in, const struct net_device *out,
 		  int (*okfn)(struct sk_buff *))
 {
 	const struct net *net = dev_net((in != NULL) ? in : out);
 
-	return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
+	return ip6t_do_table(skb, ops->hooknum, in, out,
+			     net->ipv6.ip6table_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index ce88d1d..ab3b021 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -36,14 +36,15 @@
 };
 
 static unsigned int
-ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		       const struct net_device *in,
 		       const struct net_device *out,
 		       int (*okfn)(struct sk_buff *))
 {
 	const struct net *net = dev_net((in != NULL) ? in : out);
 
-	return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
+	return ip6t_do_table(skb, ops->hooknum, in, out,
+			     net->ipv6.ip6table_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index d6e4dd8..4cbc6b2 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -95,7 +95,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int ipv6_helper(unsigned int hooknum,
+static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
 				struct sk_buff *skb,
 				const struct net_device *in,
 				const struct net_device *out,
@@ -133,7 +133,7 @@
 	return helper->help(skb, protoff, ct, ctinfo);
 }
 
-static unsigned int ipv6_confirm(unsigned int hooknum,
+static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
 				 struct sk_buff *skb,
 				 const struct net_device *in,
 				 const struct net_device *out,
@@ -169,66 +169,16 @@
 	return nf_conntrack_confirm(skb);
 }
 
-static unsigned int __ipv6_conntrack_in(struct net *net,
-					unsigned int hooknum,
-					struct sk_buff *skb,
-					const struct net_device *in,
-					const struct net_device *out,
-					int (*okfn)(struct sk_buff *))
-{
-	struct sk_buff *reasm = skb->nfct_reasm;
-	const struct nf_conn_help *help;
-	struct nf_conn *ct;
-	enum ip_conntrack_info ctinfo;
-
-	/* This packet is fragmented and has reassembled packet. */
-	if (reasm) {
-		/* Reassembled packet isn't parsed yet ? */
-		if (!reasm->nfct) {
-			unsigned int ret;
-
-			ret = nf_conntrack_in(net, PF_INET6, hooknum, reasm);
-			if (ret != NF_ACCEPT)
-				return ret;
-		}
-
-		/* Conntrack helpers need the entire reassembled packet in the
-		 * POST_ROUTING hook. In case of unconfirmed connections NAT
-		 * might reassign a helper, so the entire packet is also
-		 * required.
-		 */
-		ct = nf_ct_get(reasm, &ctinfo);
-		if (ct != NULL && !nf_ct_is_untracked(ct)) {
-			help = nfct_help(ct);
-			if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
-				nf_conntrack_get_reasm(reasm);
-				NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
-					       (struct net_device *)in,
-					       (struct net_device *)out,
-					       okfn, NF_IP6_PRI_CONNTRACK + 1);
-				return NF_DROP_ERR(-ECANCELED);
-			}
-		}
-
-		nf_conntrack_get(reasm->nfct);
-		skb->nfct = reasm->nfct;
-		skb->nfctinfo = reasm->nfctinfo;
-		return NF_ACCEPT;
-	}
-
-	return nf_conntrack_in(net, PF_INET6, hooknum, skb);
-}
-
-static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
 				      struct sk_buff *skb,
 				      const struct net_device *in,
 				      const struct net_device *out,
 				      int (*okfn)(struct sk_buff *))
 {
-	return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
+	return nf_conntrack_in(dev_net(in), PF_INET6, ops->hooknum, skb);
 }
 
-static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
 					 struct sk_buff *skb,
 					 const struct net_device *in,
 					 const struct net_device *out,
@@ -239,7 +189,7 @@
 		net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
 		return NF_ACCEPT;
 	}
-	return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
+	return nf_conntrack_in(dev_net(out), PF_INET6, ops->hooknum, skb);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
@@ -297,9 +247,9 @@
 	struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
 	struct nf_conn *ct;
 
-	tuple.src.u3.in6 = inet6->rcv_saddr;
+	tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
 	tuple.src.u.tcp.port = inet->inet_sport;
-	tuple.dst.u3.in6 = inet6->daddr;
+	tuple.dst.u3.in6 = sk->sk_v6_daddr;
 	tuple.dst.u.tcp.port = inet->inet_dport;
 	tuple.dst.protonum = sk->sk_protocol;
 
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index dffdc1a..767ab8d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -144,12 +144,24 @@
 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
 }
 
+static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
+				 const struct in6_addr *daddr)
+{
+	u32 c;
+
+	net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
+	c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+			 (__force u32)id, nf_frags.rnd);
+	return c & (INETFRAGS_HASHSZ - 1);
+}
+
+
 static unsigned int nf_hashfn(struct inet_frag_queue *q)
 {
 	const struct frag_queue *nq;
 
 	nq = container_of(q, struct frag_queue, q);
-	return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
+	return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
 }
 
 static void nf_skb_free(struct sk_buff *skb)
@@ -185,7 +197,7 @@
 	arg.ecn = ecn;
 
 	read_lock_bh(&nf_frags.lock);
-	hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
+	hash = nf_hash_frag(id, src, dst);
 
 	q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
 	local_bh_enable();
@@ -621,31 +633,16 @@
 	return skb;
 }
 
-void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
-			struct net_device *in, struct net_device *out,
-			int (*okfn)(struct sk_buff *))
+void nf_ct_frag6_consume_orig(struct sk_buff *skb)
 {
 	struct sk_buff *s, *s2;
-	unsigned int ret = 0;
 
 	for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
-		nf_conntrack_put_reasm(s->nfct_reasm);
-		nf_conntrack_get_reasm(skb);
-		s->nfct_reasm = skb;
-
 		s2 = s->next;
 		s->next = NULL;
-
-		if (ret != -ECANCELED)
-			ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
-					     in, out, okfn,
-					     NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
-		else
-			kfree_skb(s);
-
+		consume_skb(s);
 		s = s2;
 	}
-	nf_conntrack_put_reasm(skb);
 }
 
 static int nf_ct_net_init(struct net *net)
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index aacd121..7b9a748 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -52,7 +52,7 @@
 
 }
 
-static unsigned int ipv6_defrag(unsigned int hooknum,
+static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
 				struct sk_buff *skb,
 				const struct net_device *in,
 				const struct net_device *out,
@@ -66,7 +66,7 @@
 		return NF_ACCEPT;
 #endif
 
-	reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
+	reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(ops->hooknum, skb));
 	/* queued */
 	if (reasm == NULL)
 		return NF_STOLEN;
@@ -75,8 +75,11 @@
 	if (reasm == skb)
 		return NF_ACCEPT;
 
-	nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
-			   (struct net_device *)out, okfn);
+	nf_ct_frag6_consume_orig(reasm);
+
+	NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, reasm,
+		       (struct net_device *) in, (struct net_device *) out,
+		       okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
 
 	return NF_STOLEN;
 }
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
new file mode 100644
index 0000000..d77db8a
--- /dev/null
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
+				    struct sk_buff *skb,
+				    const struct net_device *in,
+				    const struct net_device *out,
+				    int (*okfn)(struct sk_buff *))
+{
+	struct nft_pktinfo pkt;
+
+	if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
+		if (net_ratelimit())
+			pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
+				"packet\n");
+		return NF_ACCEPT;
+	}
+	if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+		return NF_DROP;
+
+	return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_ipv6 __read_mostly = {
+	.family		= NFPROTO_IPV6,
+	.nhooks		= NF_INET_NUMHOOKS,
+	.owner		= THIS_MODULE,
+	.hooks		= {
+		[NF_INET_LOCAL_OUT]	= nft_ipv6_output,
+	},
+};
+
+static int nf_tables_ipv6_init_net(struct net *net)
+{
+	net->nft.ipv6 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+	if (net->nft.ipv6 == NULL)
+		return -ENOMEM;
+
+	memcpy(net->nft.ipv6, &nft_af_ipv6, sizeof(nft_af_ipv6));
+
+	if (nft_register_afinfo(net, net->nft.ipv6) < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(net->nft.ipv6);
+	return -ENOMEM;
+}
+
+static void nf_tables_ipv6_exit_net(struct net *net)
+{
+	nft_unregister_afinfo(net->nft.ipv6);
+	kfree(net->nft.ipv6);
+}
+
+static struct pernet_operations nf_tables_ipv6_net_ops = {
+	.init	= nf_tables_ipv6_init_net,
+	.exit	= nf_tables_ipv6_exit_net,
+};
+
+static unsigned int
+nft_do_chain_ipv6(const struct nf_hook_ops *ops,
+		  struct sk_buff *skb,
+		  const struct net_device *in,
+		  const struct net_device *out,
+		  int (*okfn)(struct sk_buff *))
+{
+	struct nft_pktinfo pkt;
+
+	/* malformed packet, drop it */
+	if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+		return NF_DROP;
+
+	return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_ipv6 = {
+	.family		= NFPROTO_IPV6,
+	.name		= "filter",
+	.type		= NFT_CHAIN_T_DEFAULT,
+	.hook_mask	= (1 << NF_INET_LOCAL_IN) |
+			  (1 << NF_INET_LOCAL_OUT) |
+			  (1 << NF_INET_FORWARD) |
+			  (1 << NF_INET_PRE_ROUTING) |
+			  (1 << NF_INET_POST_ROUTING),
+	.fn		= {
+		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv6,
+		[NF_INET_LOCAL_OUT]	= nft_ipv6_output,
+		[NF_INET_FORWARD]	= nft_do_chain_ipv6,
+		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv6,
+		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv6,
+	},
+};
+
+static int __init nf_tables_ipv6_init(void)
+{
+	nft_register_chain_type(&filter_ipv6);
+	return register_pernet_subsys(&nf_tables_ipv6_net_ops);
+}
+
+static void __exit nf_tables_ipv6_exit(void)
+{
+	unregister_pernet_subsys(&nf_tables_ipv6_net_ops);
+	nft_unregister_chain_type(&filter_ipv6);
+}
+
+module_init(nf_tables_ipv6_init);
+module_exit(nf_tables_ipv6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_INET6);
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
new file mode 100644
index 0000000..e86dcd7
--- /dev/null
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ipv6.h>
+
+/*
+ * IPv6 NAT chains
+ */
+
+static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
+			      struct sk_buff *skb,
+			      const struct net_device *in,
+			      const struct net_device *out,
+			      int (*okfn)(struct sk_buff *))
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+	struct nf_conn_nat *nat;
+	enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+	__be16 frag_off;
+	int hdrlen;
+	u8 nexthdr;
+	struct nft_pktinfo pkt;
+	unsigned int ret;
+
+	if (ct == NULL || nf_ct_is_untracked(ct))
+		return NF_ACCEPT;
+
+	nat = nfct_nat(ct);
+	if (nat == NULL) {
+		/* Conntrack module was loaded late, can't add extension. */
+		if (nf_ct_is_confirmed(ct))
+			return NF_ACCEPT;
+		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+		if (nat == NULL)
+			return NF_ACCEPT;
+	}
+
+	switch (ctinfo) {
+	case IP_CT_RELATED:
+	case IP_CT_RELATED + IP_CT_IS_REPLY:
+		nexthdr = ipv6_hdr(skb)->nexthdr;
+		hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+					  &nexthdr, &frag_off);
+
+		if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
+			if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
+							   ops->hooknum,
+							   hdrlen))
+				return NF_DROP;
+			else
+				return NF_ACCEPT;
+		}
+		/* Fall through */
+	case IP_CT_NEW:
+		if (nf_nat_initialized(ct, maniptype))
+			break;
+
+		nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+
+		ret = nft_do_chain_pktinfo(&pkt, ops);
+		if (ret != NF_ACCEPT)
+			return ret;
+		if (!nf_nat_initialized(ct, maniptype)) {
+			ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+			if (ret != NF_ACCEPT)
+				return ret;
+		}
+	default:
+		break;
+	}
+
+	return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+}
+
+static unsigned int nf_nat_ipv6_prerouting(const struct nf_hook_ops *ops,
+				      struct sk_buff *skb,
+				      const struct net_device *in,
+				      const struct net_device *out,
+				      int (*okfn)(struct sk_buff *))
+{
+	struct in6_addr daddr = ipv6_hdr(skb)->daddr;
+	unsigned int ret;
+
+	ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
+		skb_dst_drop(skb);
+
+	return ret;
+}
+
+static unsigned int nf_nat_ipv6_postrouting(const struct nf_hook_ops *ops,
+				       struct sk_buff *skb,
+				       const struct net_device *in,
+				       const struct net_device *out,
+				       int (*okfn)(struct sk_buff *))
+{
+	enum ip_conntrack_info ctinfo __maybe_unused;
+	const struct nf_conn *ct __maybe_unused;
+	unsigned int ret;
+
+	ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+		if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+				      &ct->tuplehash[!dir].tuple.dst.u3) ||
+		    (ct->tuplehash[dir].tuple.src.u.all !=
+		     ct->tuplehash[!dir].tuple.dst.u.all))
+			if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
+				ret = NF_DROP;
+	}
+#endif
+	return ret;
+}
+
+static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops,
+				  struct sk_buff *skb,
+				  const struct net_device *in,
+				  const struct net_device *out,
+				  int (*okfn)(struct sk_buff *))
+{
+	enum ip_conntrack_info ctinfo;
+	const struct nf_conn *ct;
+	unsigned int ret;
+
+	ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+		if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+				      &ct->tuplehash[!dir].tuple.src.u3)) {
+			if (ip6_route_me_harder(skb))
+				ret = NF_DROP;
+		}
+#ifdef CONFIG_XFRM
+		else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+			 ct->tuplehash[dir].tuple.dst.u.all !=
+			 ct->tuplehash[!dir].tuple.src.u.all)
+			if (nf_xfrm_me_harder(skb, AF_INET6))
+				ret = NF_DROP;
+#endif
+	}
+	return ret;
+}
+
+static struct nf_chain_type nft_chain_nat_ipv6 = {
+	.family		= NFPROTO_IPV6,
+	.name		= "nat",
+	.type		= NFT_CHAIN_T_NAT,
+	.hook_mask	= (1 << NF_INET_PRE_ROUTING) |
+			  (1 << NF_INET_POST_ROUTING) |
+			  (1 << NF_INET_LOCAL_OUT) |
+			  (1 << NF_INET_LOCAL_IN),
+	.fn		= {
+		[NF_INET_PRE_ROUTING]	= nf_nat_ipv6_prerouting,
+		[NF_INET_POST_ROUTING]	= nf_nat_ipv6_postrouting,
+		[NF_INET_LOCAL_OUT]	= nf_nat_ipv6_output,
+		[NF_INET_LOCAL_IN]	= nf_nat_ipv6_fn,
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init nft_chain_nat_ipv6_init(void)
+{
+	int err;
+
+	err = nft_register_chain_type(&nft_chain_nat_ipv6);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static void __exit nft_chain_nat_ipv6_exit(void)
+{
+	nft_unregister_chain_type(&nft_chain_nat_ipv6);
+}
+
+module_init(nft_chain_nat_ipv6_init);
+module_exit(nft_chain_nat_ipv6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
new file mode 100644
index 0000000..3fe40f0
--- /dev/null
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/route.h>
+
+static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+					struct sk_buff *skb,
+					const struct net_device *in,
+					const struct net_device *out,
+					int (*okfn)(struct sk_buff *))
+{
+	unsigned int ret;
+	struct nft_pktinfo pkt;
+	struct in6_addr saddr, daddr;
+	u_int8_t hop_limit;
+	u32 mark, flowlabel;
+
+	/* malformed packet, drop it */
+	if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+		return NF_DROP;
+
+	/* save source/dest address, mark, hoplimit, flowlabel, priority */
+	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
+	memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
+	mark = skb->mark;
+	hop_limit = ipv6_hdr(skb)->hop_limit;
+
+	/* flowlabel and prio (includes version, which shouldn't change either */
+	flowlabel = *((u32 *)ipv6_hdr(skb));
+
+	ret = nft_do_chain_pktinfo(&pkt, ops);
+	if (ret != NF_DROP && ret != NF_QUEUE &&
+	    (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
+	     memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
+	     skb->mark != mark ||
+	     ipv6_hdr(skb)->hop_limit != hop_limit ||
+	     flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
+		return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
+
+	return ret;
+}
+
+static struct nf_chain_type nft_chain_route_ipv6 = {
+	.family		= NFPROTO_IPV6,
+	.name		= "route",
+	.type		= NFT_CHAIN_T_ROUTE,
+	.hook_mask	= (1 << NF_INET_LOCAL_OUT),
+	.fn		= {
+                [NF_INET_LOCAL_OUT]	= nf_route_table_hook,
+        },
+        .me		= THIS_MODULE,
+};
+
+static int __init nft_chain_route_init(void)
+{
+	return nft_register_chain_type(&nft_chain_route_ipv6);
+}
+
+static void __exit nft_chain_route_exit(void)
+{
+	nft_unregister_chain_type(&nft_chain_route_ipv6);
+}
+
+module_init(nft_chain_route_init);
+module_exit(nft_chain_route_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "route");
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 18f19df..8815e31 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -116,7 +116,7 @@
 	} else {
 		if (sk->sk_state != TCP_ESTABLISHED)
 			return -EDESTADDRREQ;
-		daddr = &np->daddr;
+		daddr = &sk->sk_v6_daddr;
 	}
 
 	if (!iif)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index a4ed241..3c00842 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -77,20 +77,19 @@
 
 	sk_for_each_from(sk)
 		if (inet_sk(sk)->inet_num == num) {
-			struct ipv6_pinfo *np = inet6_sk(sk);
 
 			if (!net_eq(sock_net(sk), net))
 				continue;
 
-			if (!ipv6_addr_any(&np->daddr) &&
-			    !ipv6_addr_equal(&np->daddr, rmt_addr))
+			if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+			    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
 				continue;
 
 			if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
 				continue;
 
-			if (!ipv6_addr_any(&np->rcv_saddr)) {
-				if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
+			if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+				if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
 					goto found;
 				if (is_multicast &&
 				    inet6_mc_check(sk, loc_addr, rmt_addr))
@@ -302,7 +301,7 @@
 	}
 
 	inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-	np->rcv_saddr = addr->sin6_addr;
+	sk->sk_v6_rcv_saddr = addr->sin6_addr;
 	if (!(addr_type & IPV6_ADDR_MULTICAST))
 		np->saddr = addr->sin6_addr;
 	err = 0;
@@ -804,8 +803,8 @@
 		 * sk->sk_dst_cache.
 		 */
 		if (sk->sk_state == TCP_ESTABLISHED &&
-		    ipv6_addr_equal(daddr, &np->daddr))
-			daddr = &np->daddr;
+		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+			daddr = &sk->sk_v6_daddr;
 
 		if (addr_len >= sizeof(struct sockaddr_in6) &&
 		    sin6->sin6_scope_id &&
@@ -816,7 +815,7 @@
 			return -EDESTADDRREQ;
 
 		proto = inet->inet_num;
-		daddr = &np->daddr;
+		daddr = &sk->sk_v6_daddr;
 		fl6.flowlabel = np->flow_label;
 	}
 
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 1aeb473..cc85a9b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -82,24 +82,24 @@
  * callers should be careful not to use the hash value outside the ipfrag_lock
  * as doing so could race with ipfrag_hash_rnd being recalculated.
  */
-unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
-			     const struct in6_addr *daddr, u32 rnd)
+static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
+				    const struct in6_addr *daddr)
 {
 	u32 c;
 
+	net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
 	c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-			 (__force u32)id, rnd);
+			 (__force u32)id, ip6_frags.rnd);
 
 	return c & (INETFRAGS_HASHSZ - 1);
 }
-EXPORT_SYMBOL_GPL(inet6_hash_frag);
 
 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
 {
 	struct frag_queue *fq;
 
 	fq = container_of(q, struct frag_queue, q);
-	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
+	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
 }
 
 bool ip6_frag_match(struct inet_frag_queue *q, void *a)
@@ -193,7 +193,7 @@
 	arg.ecn = ecn;
 
 	read_lock(&ip6_frags.lock);
-	hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
+	hash = inet6_hash_frag(id, src, dst);
 
 	q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
 	if (IS_ERR_OR_NULL(q)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 04e17b3..7faa9d5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -619,7 +619,7 @@
 		goto out;
 
 	m = rt6_score_route(rt, oif, strict);
-	if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
+	if (m == RT6_NUD_FAIL_SOFT) {
 		match_do_rr = true;
 		m = 0; /* lowest valid score */
 	} else if (m < 0) {
@@ -731,8 +731,11 @@
 		prefix = &prefix_buf;
 	}
 
-	rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
-				dev->ifindex);
+	if (rinfo->prefix_len == 0)
+		rt = rt6_get_dflt_router(gwaddr, dev);
+	else
+		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
+					gwaddr, dev->ifindex);
 
 	if (rt && !lifetime) {
 		ip6_del_rt(rt);
@@ -871,11 +874,9 @@
 	rt = ip6_rt_copy(ort, daddr);
 
 	if (rt) {
-		if (!(rt->rt6i_flags & RTF_GATEWAY)) {
-			if (ort->rt6i_dst.plen != 128 &&
-			    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
-				rt->rt6i_flags |= RTF_ANYCAST;
-		}
+		if (ort->rt6i_dst.plen != 128 &&
+		    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
+			rt->rt6i_flags |= RTF_ANYCAST;
 
 		rt->rt6i_flags |= RTF_CACHE;
 
@@ -1163,7 +1164,6 @@
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_oif = oif;
 	fl6.flowi6_mark = mark;
-	fl6.flowi6_flags = 0;
 	fl6.daddr = iph->daddr;
 	fl6.saddr = iph->saddr;
 	fl6.flowlabel = ip6_flowinfo(iph);
@@ -1262,7 +1262,6 @@
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_oif = oif;
 	fl6.flowi6_mark = mark;
-	fl6.flowi6_flags = 0;
 	fl6.daddr = iph->daddr;
 	fl6.saddr = iph->saddr;
 	fl6.flowlabel = ip6_flowinfo(iph);
@@ -1284,7 +1283,6 @@
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_oif = oif;
 	fl6.flowi6_mark = mark;
-	fl6.flowi6_flags = 0;
 	fl6.daddr = msg->dest;
 	fl6.saddr = iph->daddr;
 
@@ -2831,56 +2829,12 @@
 
 #ifdef CONFIG_PROC_FS
 
-struct rt6_proc_arg
-{
-	char *buffer;
-	int offset;
-	int length;
-	int skip;
-	int len;
-};
-
-static int rt6_info_route(struct rt6_info *rt, void *p_arg)
-{
-	struct seq_file *m = p_arg;
-
-	seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
-
-#ifdef CONFIG_IPV6_SUBTREES
-	seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
-#else
-	seq_puts(m, "00000000000000000000000000000000 00 ");
-#endif
-	if (rt->rt6i_flags & RTF_GATEWAY) {
-		seq_printf(m, "%pi6", &rt->rt6i_gateway);
-	} else {
-		seq_puts(m, "00000000000000000000000000000000");
-	}
-	seq_printf(m, " %08x %08x %08x %08x %8s\n",
-		   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
-		   rt->dst.__use, rt->rt6i_flags,
-		   rt->dst.dev ? rt->dst.dev->name : "");
-	return 0;
-}
-
-static int ipv6_route_show(struct seq_file *m, void *v)
-{
-	struct net *net = (struct net *)m->private;
-	fib6_clean_all_ro(net, rt6_info_route, 0, m);
-	return 0;
-}
-
-static int ipv6_route_open(struct inode *inode, struct file *file)
-{
-	return single_open_net(inode, file, ipv6_route_show);
-}
-
 static const struct file_operations ipv6_route_proc_fops = {
 	.owner		= THIS_MODULE,
 	.open		= ipv6_route_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
-	.release	= single_release_net,
+	.release	= seq_release_net,
 };
 
 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1926945..bfc6fce 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -933,10 +933,9 @@
 		ttl = iph6->hop_limit;
 	tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 
-	if (likely(!skb->encapsulation)) {
-		skb_reset_inner_headers(skb);
-		skb->encapsulation = 1;
-	}
+	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
+	if (IS_ERR(skb))
+		goto out;
 
 	err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
 			    ttl, df, !net_eq(tunnel->net, dev_net(dev)));
@@ -946,8 +945,9 @@
 tx_error_icmp:
 	dst_link_failure(skb);
 tx_error:
-	dev->stats.tx_errors++;
 	dev_kfree_skb(skb);
+out:
+	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
 
@@ -956,13 +956,15 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	const struct iphdr  *tiph = &tunnel->parms.iph;
 
-	if (likely(!skb->encapsulation)) {
-		skb_reset_inner_headers(skb);
-		skb->encapsulation = 1;
-	}
+	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+	if (IS_ERR(skb))
+		goto out;
 
 	ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
 	return NETDEV_TX_OK;
+out:
+	dev->stats.tx_errors++;
+	return NETDEV_TX_OK;
 }
 
 static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
@@ -1292,6 +1294,12 @@
 	free_netdev(dev);
 }
 
+#define SIT_FEATURES (NETIF_F_SG	   | \
+		      NETIF_F_FRAGLIST	   | \
+		      NETIF_F_HIGHDMA	   | \
+		      NETIF_F_GSO_SOFTWARE | \
+		      NETIF_F_HW_CSUM)
+
 static void ipip6_tunnel_setup(struct net_device *dev)
 {
 	dev->netdev_ops		= &ipip6_netdev_ops;
@@ -1305,11 +1313,14 @@
 	dev->iflink		= 0;
 	dev->addr_len		= 4;
 	dev->features		|= NETIF_F_LLTX;
+	dev->features		|= SIT_FEATURES;
+	dev->hw_features	|= SIT_FEATURES;
 }
 
 static int ipip6_tunnel_init(struct net_device *dev)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
+	int i;
 
 	tunnel->dev = dev;
 	tunnel->net = dev_net(dev);
@@ -1322,6 +1333,12 @@
 	if (!dev->tstats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct pcpu_tstats *ipip6_tunnel_stats;
+		ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
+		u64_stats_init(&ipip6_tunnel_stats->syncp);
+	}
+
 	return 0;
 }
 
@@ -1331,6 +1348,7 @@
 	struct iphdr *iph = &tunnel->parms.iph;
 	struct net *net = dev_net(dev);
 	struct sit_net *sitn = net_generic(net, sit_net_id);
+	int i;
 
 	tunnel->dev = dev;
 	tunnel->net = dev_net(dev);
@@ -1344,6 +1362,13 @@
 	dev->tstats = alloc_percpu(struct pcpu_tstats);
 	if (!dev->tstats)
 		return -ENOMEM;
+
+	for_each_possible_cpu(i) {
+		struct pcpu_tstats *ipip6_fb_stats;
+		ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
+		u64_stats_init(&ipip6_fb_stats->syncp);
+	}
+
 	dev_hold(dev);
 	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
 	return 0;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bf63ac8..535a3ad 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -24,26 +24,23 @@
 #define COOKIEBITS 24	/* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
 
-/* Table must be sorted. */
+static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS];
+
+/* RFC 2460, Section 8.3:
+ * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
+ *
+ * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
+ * using higher values than ipv4 tcp syncookies.
+ * The other values are chosen based on ethernet (1500 and 9k MTU), plus
+ * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
+ */
 static __u16 const msstab[] = {
-	64,
-	512,
-	536,
-	1280 - 60,
+	1280 - 60, /* IPV6_MIN_MTU - 60 */
 	1480 - 60,
 	1500 - 60,
-	4460 - 60,
 	9000 - 60,
 };
 
-/*
- * This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
- * backoff) to compute at runtime so it's currently hardcoded here.
- */
-#define COUNTER_TRIES 4
-
 static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 					   struct request_sock *req,
 					   struct dst_entry *dst)
@@ -66,14 +63,18 @@
 static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
 		       __be16 sport, __be16 dport, u32 count, int c)
 {
-	__u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
+	__u32 *tmp;
+
+	net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
+
+	tmp  = __get_cpu_var(ipv6_cookie_scratch);
 
 	/*
 	 * we have 320 bits of information to hash, copy in the remaining
-	 * 192 bits required for sha_transform, from the syncookie_secret
+	 * 192 bits required for sha_transform, from the syncookie6_secret
 	 * and overwrite the digest with the secret
 	 */
-	memcpy(tmp + 10, syncookie_secret[c], 44);
+	memcpy(tmp + 10, syncookie6_secret[c], 44);
 	memcpy(tmp, saddr, 16);
 	memcpy(tmp + 4, daddr, 16);
 	tmp[8] = ((__force u32)sport << 16) + (__force u32)dport;
@@ -86,8 +87,9 @@
 static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
 				   const struct in6_addr *daddr,
 				   __be16 sport, __be16 dport, __u32 sseq,
-				   __u32 count, __u32 data)
+				   __u32 data)
 {
+	u32 count = tcp_cookie_time();
 	return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
 		sseq + (count << COOKIEBITS) +
 		((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
@@ -96,15 +98,14 @@
 
 static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
 				  const struct in6_addr *daddr, __be16 sport,
-				  __be16 dport, __u32 sseq, __u32 count,
-				  __u32 maxdiff)
+				  __be16 dport, __u32 sseq)
 {
-	__u32 diff;
+	__u32 diff, count = tcp_cookie_time();
 
 	cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
 
 	diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
-	if (diff >= maxdiff)
+	if (diff >= MAX_SYNCOOKIE_AGE)
 		return (__u32)-1;
 
 	return (cookie -
@@ -125,8 +126,7 @@
 	*mssp = msstab[mssind];
 
 	return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
-				     th->dest, ntohl(th->seq),
-				     jiffies / (HZ * 60), mssind);
+				     th->dest, ntohl(th->seq), mssind);
 }
 EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
 
@@ -146,8 +146,7 @@
 {
 	__u32 seq = ntohl(th->seq) - 1;
 	__u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
-					    th->source, th->dest, seq,
-					    jiffies / (HZ * 60), COUNTER_TRIES);
+					    th->source, th->dest, seq);
 
 	return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
 }
@@ -157,7 +156,6 @@
 {
 	struct tcp_options_received tcp_opt;
 	struct inet_request_sock *ireq;
-	struct inet6_request_sock *ireq6;
 	struct tcp_request_sock *treq;
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -194,7 +192,6 @@
 		goto out;
 
 	ireq = inet_rsk(req);
-	ireq6 = inet6_rsk(req);
 	treq = tcp_rsk(req);
 	treq->listener = NULL;
 
@@ -202,22 +199,22 @@
 		goto out_free;
 
 	req->mss = mss;
-	ireq->rmt_port = th->source;
-	ireq->loc_port = th->dest;
-	ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
-	ireq6->loc_addr = ipv6_hdr(skb)->daddr;
+	ireq->ir_rmt_port = th->source;
+	ireq->ir_num = ntohs(th->dest);
+	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 	if (ipv6_opt_accepted(sk, skb) ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
 		atomic_inc(&skb->users);
-		ireq6->pktopts = skb;
+		ireq->pktopts = skb;
 	}
 
-	ireq6->iif = sk->sk_bound_dev_if;
+	ireq->ir_iif = sk->sk_bound_dev_if;
 	/* So that link locals have meaning */
 	if (!sk->sk_bound_dev_if &&
-	    ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-		ireq6->iif = inet6_iif(skb);
+	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+		ireq->ir_iif = inet6_iif(skb);
 
 	req->expires = 0UL;
 	req->num_retrans = 0;
@@ -241,12 +238,12 @@
 		struct flowi6 fl6;
 		memset(&fl6, 0, sizeof(fl6));
 		fl6.flowi6_proto = IPPROTO_TCP;
-		fl6.daddr = ireq6->rmt_addr;
+		fl6.daddr = ireq->ir_v6_rmt_addr;
 		final_p = fl6_update_dst(&fl6, np->opt, &final);
-		fl6.saddr = ireq6->loc_addr;
+		fl6.saddr = ireq->ir_v6_loc_addr;
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
 		fl6.flowi6_mark = sk->sk_mark;
-		fl6.fl6_dport = inet_rsk(req)->rmt_port;
+		fl6.fl6_dport = ireq->ir_rmt_port;
 		fl6.fl6_sport = inet_sk(sk)->inet_sport;
 		security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5c71501..0740f93 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -192,13 +192,13 @@
 	}
 
 	if (tp->rx_opt.ts_recent_stamp &&
-	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
+	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 		tp->rx_opt.ts_recent = 0;
 		tp->rx_opt.ts_recent_stamp = 0;
 		tp->write_seq = 0;
 	}
 
-	np->daddr = usin->sin6_addr;
+	sk->sk_v6_daddr = usin->sin6_addr;
 	np->flow_label = fl6.flowlabel;
 
 	/*
@@ -237,17 +237,17 @@
 		} else {
 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
-					       &np->rcv_saddr);
+					       &sk->sk_v6_rcv_saddr);
 		}
 
 		return err;
 	}
 
-	if (!ipv6_addr_any(&np->rcv_saddr))
-		saddr = &np->rcv_saddr;
+	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+		saddr = &sk->sk_v6_rcv_saddr;
 
 	fl6.flowi6_proto = IPPROTO_TCP;
-	fl6.daddr = np->daddr;
+	fl6.daddr = sk->sk_v6_daddr;
 	fl6.saddr = saddr ? *saddr : np->saddr;
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
 	fl6.flowi6_mark = sk->sk_mark;
@@ -266,7 +266,7 @@
 
 	if (saddr == NULL) {
 		saddr = &fl6.saddr;
-		np->rcv_saddr = *saddr;
+		sk->sk_v6_rcv_saddr = *saddr;
 	}
 
 	/* set the source address */
@@ -279,7 +279,7 @@
 	rt = (struct rt6_info *) dst;
 	if (tcp_death_row.sysctl_tw_recycle &&
 	    !tp->rx_opt.ts_recent_stamp &&
-	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
+	    ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
 		tcp_fetch_timewait_stamp(sk, dst);
 
 	icsk->icsk_ext_hdr_len = 0;
@@ -298,7 +298,7 @@
 
 	if (!tp->write_seq && likely(!tp->repair))
 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
-							     np->daddr.s6_addr32,
+							     sk->sk_v6_daddr.s6_addr32,
 							     inet->inet_sport,
 							     inet->inet_dport);
 
@@ -465,7 +465,7 @@
 			      struct request_sock *req,
 			      u16 queue_mapping)
 {
-	struct inet6_request_sock *treq = inet6_rsk(req);
+	struct inet_request_sock *ireq = inet_rsk(req);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sk_buff * skb;
 	int err = -ENOMEM;
@@ -477,9 +477,10 @@
 	skb = tcp_make_synack(sk, dst, req, NULL);
 
 	if (skb) {
-		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
+		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
+				    &ireq->ir_v6_rmt_addr);
 
-		fl6->daddr = treq->rmt_addr;
+		fl6->daddr = ireq->ir_v6_rmt_addr;
 		skb_set_queue_mapping(skb, queue_mapping);
 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
 		err = net_xmit_eval(err);
@@ -502,7 +503,7 @@
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
 {
-	kfree_skb(inet6_rsk(req)->pktopts);
+	kfree_skb(inet_rsk(req)->pktopts);
 }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -515,13 +516,13 @@
 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 						struct sock *addr_sk)
 {
-	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
+	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 }
 
 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 						      struct request_sock *req)
 {
-	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
+	return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
 }
 
 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
@@ -621,10 +622,10 @@
 
 	if (sk) {
 		saddr = &inet6_sk(sk)->saddr;
-		daddr = &inet6_sk(sk)->daddr;
+		daddr = &sk->sk_v6_daddr;
 	} else if (req) {
-		saddr = &inet6_rsk(req)->loc_addr;
-		daddr = &inet6_rsk(req)->rmt_addr;
+		saddr = &inet_rsk(req)->ir_v6_loc_addr;
+		daddr = &inet_rsk(req)->ir_v6_rmt_addr;
 	} else {
 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 		saddr = &ip6h->saddr;
@@ -949,7 +950,7 @@
 {
 	struct tcp_options_received tmp_opt;
 	struct request_sock *req;
-	struct inet6_request_sock *treq;
+	struct inet_request_sock *ireq;
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 	__u32 isn = TCP_SKB_CB(skb)->when;
@@ -994,25 +995,25 @@
 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
 	tcp_openreq_init(req, &tmp_opt, skb);
 
-	treq = inet6_rsk(req);
-	treq->rmt_addr = ipv6_hdr(skb)->saddr;
-	treq->loc_addr = ipv6_hdr(skb)->daddr;
+	ireq = inet_rsk(req);
+	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 	if (!want_cookie || tmp_opt.tstamp_ok)
 		TCP_ECN_create_request(req, skb, sock_net(sk));
 
-	treq->iif = sk->sk_bound_dev_if;
+	ireq->ir_iif = sk->sk_bound_dev_if;
 
 	/* So that link locals have meaning */
 	if (!sk->sk_bound_dev_if &&
-	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-		treq->iif = inet6_iif(skb);
+	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+		ireq->ir_iif = inet6_iif(skb);
 
 	if (!isn) {
 		if (ipv6_opt_accepted(sk, skb) ||
 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
 			atomic_inc(&skb->users);
-			treq->pktopts = skb;
+			ireq->pktopts = skb;
 		}
 
 		if (want_cookie) {
@@ -1051,7 +1052,7 @@
 			 * to the moment of synflood.
 			 */
 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
-				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
+				       &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
 			goto drop_and_release;
 		}
 
@@ -1086,7 +1087,7 @@
 					  struct request_sock *req,
 					  struct dst_entry *dst)
 {
-	struct inet6_request_sock *treq;
+	struct inet_request_sock *ireq;
 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
 	struct tcp6_sock *newtcp6sk;
 	struct inet_sock *newinet;
@@ -1116,11 +1117,11 @@
 
 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
+		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
 
 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-		newnp->rcv_saddr = newnp->saddr;
+		newsk->sk_v6_rcv_saddr = newnp->saddr;
 
 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1151,7 +1152,7 @@
 		return newsk;
 	}
 
-	treq = inet6_rsk(req);
+	ireq = inet_rsk(req);
 
 	if (sk_acceptq_is_full(sk))
 		goto out_overflow;
@@ -1185,10 +1186,10 @@
 
 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-	newnp->daddr = treq->rmt_addr;
-	newnp->saddr = treq->loc_addr;
-	newnp->rcv_saddr = treq->loc_addr;
-	newsk->sk_bound_dev_if = treq->iif;
+	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
+	newnp->saddr = ireq->ir_v6_loc_addr;
+	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
+	newsk->sk_bound_dev_if = ireq->ir_iif;
 
 	/* Now IPv6 options...
 
@@ -1203,11 +1204,11 @@
 
 	/* Clone pktoptions received with SYN */
 	newnp->pktoptions = NULL;
-	if (treq->pktopts != NULL) {
-		newnp->pktoptions = skb_clone(treq->pktopts,
+	if (ireq->pktopts != NULL) {
+		newnp->pktoptions = skb_clone(ireq->pktopts,
 					      sk_gfp_atomic(sk, GFP_ATOMIC));
-		consume_skb(treq->pktopts);
-		treq->pktopts = NULL;
+		consume_skb(ireq->pktopts);
+		ireq->pktopts = NULL;
 		if (newnp->pktoptions)
 			skb_set_owner_r(newnp->pktoptions, newsk);
 	}
@@ -1244,13 +1245,13 @@
 
 #ifdef CONFIG_TCP_MD5SIG
 	/* Copy over the MD5 key from the original socket */
-	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
+	if ((key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr)) != NULL) {
 		/* We're using one, so create a matching key
 		 * on the newsk structure. If we fail to get
 		 * memory, then we end up not copying the key
 		 * across. Shucks.
 		 */
-		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
+		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
 			       AF_INET6, key->key, key->keylen,
 			       sk_gfp_atomic(sk, GFP_ATOMIC));
 	}
@@ -1722,8 +1723,8 @@
 			 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
 {
 	int ttd = req->expires - jiffies;
-	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
-	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
+	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
+	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
 
 	if (ttd < 0)
 		ttd = 0;
@@ -1734,10 +1735,10 @@
 		   i,
 		   src->s6_addr32[0], src->s6_addr32[1],
 		   src->s6_addr32[2], src->s6_addr32[3],
-		   ntohs(inet_rsk(req)->loc_port),
+		   inet_rsk(req)->ir_num,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3],
-		   ntohs(inet_rsk(req)->rmt_port),
+		   ntohs(inet_rsk(req)->ir_rmt_port),
 		   TCP_SYN_RECV,
 		   0,0, /* could print option size, but that is af dependent. */
 		   1,   /* timers active (only the expire timer) */
@@ -1758,10 +1759,9 @@
 	const struct inet_sock *inet = inet_sk(sp);
 	const struct tcp_sock *tp = tcp_sk(sp);
 	const struct inet_connection_sock *icsk = inet_csk(sp);
-	const struct ipv6_pinfo *np = inet6_sk(sp);
 
-	dest  = &np->daddr;
-	src   = &np->rcv_saddr;
+	dest  = &sp->sk_v6_daddr;
+	src   = &sp->sk_v6_rcv_saddr;
 	destp = ntohs(inet->inet_dport);
 	srcp  = ntohs(inet->inet_sport);
 
@@ -1810,11 +1810,10 @@
 {
 	const struct in6_addr *dest, *src;
 	__u16 destp, srcp;
-	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
-	long delta = tw->tw_ttd - jiffies;
+	s32 delta = tw->tw_ttd - inet_tw_time_stamp();
 
-	dest = &tw6->tw_v6_daddr;
-	src  = &tw6->tw_v6_rcv_saddr;
+	dest = &tw->tw_v6_daddr;
+	src  = &tw->tw_v6_rcv_saddr;
 	destp = ntohs(tw->tw_dport);
 	srcp  = ntohs(tw->tw_sport);
 
@@ -1834,6 +1833,7 @@
 static int tcp6_seq_show(struct seq_file *seq, void *v)
 {
 	struct tcp_iter_state *st;
+	struct sock *sk = v;
 
 	if (v == SEQ_START_TOKEN) {
 		seq_puts(seq,
@@ -1849,14 +1849,14 @@
 	switch (st->state) {
 	case TCP_SEQ_STATE_LISTENING:
 	case TCP_SEQ_STATE_ESTABLISHED:
-		get_tcp6_sock(seq, v, st->num);
+		if (sk->sk_state == TCP_TIME_WAIT)
+			get_timewait6_sock(seq, v, st->num);
+		else
+			get_tcp6_sock(seq, v, st->num);
 		break;
 	case TCP_SEQ_STATE_OPENREQ:
 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
 		break;
-	case TCP_SEQ_STATE_TIME_WAIT:
-		get_timewait6_sock(seq, v, st->num);
-		break;
 	}
 out:
 	return 0;
@@ -1929,6 +1929,7 @@
 	.memory_allocated	= &tcp_memory_allocated,
 	.memory_pressure	= &tcp_memory_pressure,
 	.orphan_count		= &tcp_orphan_count,
+	.sysctl_mem		= sysctl_tcp_mem,
 	.sysctl_wmem		= sysctl_tcp_wmem,
 	.sysctl_rmem		= sysctl_tcp_rmem,
 	.max_header		= MAX_TCP_HEADER,
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 2ec6bf6..c1097c7 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -83,7 +83,7 @@
 static const struct net_offload tcpv6_offload = {
 	.callbacks = {
 		.gso_send_check	=	tcp_v6_gso_send_check,
-		.gso_segment	=	tcp_tso_segment,
+		.gso_segment	=	tcp_gso_segment,
 		.gro_receive	=	tcp6_gro_receive,
 		.gro_complete	=	tcp6_gro_complete,
 	},
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1878609..f3893e8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -53,22 +53,42 @@
 #include <trace/events/skb.h>
 #include "udp_impl.h"
 
+static unsigned int udp6_ehashfn(struct net *net,
+				  const struct in6_addr *laddr,
+				  const u16 lport,
+				  const struct in6_addr *faddr,
+				  const __be16 fport)
+{
+	static u32 udp6_ehash_secret __read_mostly;
+	static u32 udp_ipv6_hash_secret __read_mostly;
+
+	u32 lhash, fhash;
+
+	net_get_random_once(&udp6_ehash_secret,
+			    sizeof(udp6_ehash_secret));
+	net_get_random_once(&udp_ipv6_hash_secret,
+			    sizeof(udp_ipv6_hash_secret));
+
+	lhash = (__force u32)laddr->s6_addr32[3];
+	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
+
+	return __inet6_ehashfn(lhash, lport, fhash, fport,
+			       udp_ipv6_hash_secret + net_hash_mix(net));
+}
+
 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
 {
-	const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
 	const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-	__be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
-	__be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 	int sk_ipv6only = ipv6_only_sock(sk);
 	int sk2_ipv6only = inet_v6_ipv6only(sk2);
-	int addr_type = ipv6_addr_type(sk_rcv_saddr6);
+	int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
 
 	/* if both are mapped, treat as IPv4 */
 	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
 		return (!sk2_ipv6only &&
-			(!sk1_rcv_saddr || !sk2_rcv_saddr ||
-			  sk1_rcv_saddr == sk2_rcv_saddr));
+			(!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
+			  sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
 
 	if (addr_type2 == IPV6_ADDR_ANY &&
 	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
@@ -79,7 +99,7 @@
 		return 1;
 
 	if (sk2_rcv_saddr6 &&
-	    ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
+	    ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
 		return 1;
 
 	return 0;
@@ -107,7 +127,7 @@
 	unsigned int hash2_nulladdr =
 		udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 	unsigned int hash2_partial =
-		udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
+		udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 
 	/* precompute partial secondary hash */
 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -117,7 +137,7 @@
 static void udp_v6_rehash(struct sock *sk)
 {
 	u16 new_hash = udp6_portaddr_hash(sock_net(sk),
-					  &inet6_sk(sk)->rcv_saddr,
+					  &sk->sk_v6_rcv_saddr,
 					  inet_sk(sk)->inet_num);
 
 	udp_lib_rehash(sk, new_hash);
@@ -133,7 +153,6 @@
 
 	if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
 			sk->sk_family == PF_INET6) {
-		struct ipv6_pinfo *np = inet6_sk(sk);
 		struct inet_sock *inet = inet_sk(sk);
 
 		score = 0;
@@ -142,13 +161,13 @@
 				return -1;
 			score++;
 		}
-		if (!ipv6_addr_any(&np->rcv_saddr)) {
-			if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+		if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+			if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 				return -1;
 			score++;
 		}
-		if (!ipv6_addr_any(&np->daddr)) {
-			if (!ipv6_addr_equal(&np->daddr, saddr))
+		if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+			if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
 				return -1;
 			score++;
 		}
@@ -171,10 +190,9 @@
 
 	if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
 			sk->sk_family == PF_INET6) {
-		struct ipv6_pinfo *np = inet6_sk(sk);
 		struct inet_sock *inet = inet_sk(sk);
 
-		if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+		if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 			return -1;
 		score = 0;
 		if (inet->inet_dport) {
@@ -182,8 +200,8 @@
 				return -1;
 			score++;
 		}
-		if (!ipv6_addr_any(&np->daddr)) {
-			if (!ipv6_addr_equal(&np->daddr, saddr))
+		if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+			if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
 				return -1;
 			score++;
 		}
@@ -219,8 +237,8 @@
 			badness = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
-				hash = inet6_ehashfn(net, daddr, hnum,
-						     saddr, sport);
+				hash = udp6_ehashfn(net, daddr, hnum,
+						    saddr, sport);
 				matches = 1;
 			} else if (score == SCORE2_MAX)
 				goto exact_match;
@@ -300,8 +318,8 @@
 			badness = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
-				hash = inet6_ehashfn(net, daddr, hnum,
-						     saddr, sport);
+				hash = udp6_ehashfn(net, daddr, hnum,
+						    saddr, sport);
 				matches = 1;
 			}
 		} else if (score == badness && reuseport) {
@@ -551,8 +569,10 @@
 {
 	int rc;
 
-	if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
 		sock_rps_save_rxhash(sk, skb);
+		sk_mark_napi_id(sk, skb);
+	}
 
 	rc = sock_queue_rcv_skb(sk, skb);
 	if (rc < 0) {
@@ -690,20 +710,19 @@
 
 		if (udp_sk(s)->udp_port_hash == num &&
 		    s->sk_family == PF_INET6) {
-			struct ipv6_pinfo *np = inet6_sk(s);
 			if (inet->inet_dport) {
 				if (inet->inet_dport != rmt_port)
 					continue;
 			}
-			if (!ipv6_addr_any(&np->daddr) &&
-			    !ipv6_addr_equal(&np->daddr, rmt_addr))
+			if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+			    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
 				continue;
 
 			if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
 				continue;
 
-			if (!ipv6_addr_any(&np->rcv_saddr)) {
-				if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
+			if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+				if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
 					continue;
 			}
 			if (!inet6_mc_check(s, loc_addr, rmt_addr))
@@ -846,7 +865,6 @@
 	if (sk != NULL) {
 		int ret;
 
-		sk_mark_napi_id(sk, skb);
 		ret = udpv6_queue_rcv_skb(sk, skb);
 		sock_put(sk);
 
@@ -1064,7 +1082,7 @@
 	} else if (!up->pending) {
 		if (sk->sk_state != TCP_ESTABLISHED)
 			return -EDESTADDRREQ;
-		daddr = &np->daddr;
+		daddr = &sk->sk_v6_daddr;
 	} else
 		daddr = NULL;
 
@@ -1134,8 +1152,8 @@
 		 * sk->sk_dst_cache.
 		 */
 		if (sk->sk_state == TCP_ESTABLISHED &&
-		    ipv6_addr_equal(daddr, &np->daddr))
-			daddr = &np->daddr;
+		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+			daddr = &sk->sk_v6_daddr;
 
 		if (addr_len >= sizeof(struct sockaddr_in6) &&
 		    sin6->sin6_scope_id &&
@@ -1146,7 +1164,7 @@
 			return -EDESTADDRREQ;
 
 		fl6.fl6_dport = inet->inet_dport;
-		daddr = &np->daddr;
+		daddr = &sk->sk_v6_daddr;
 		fl6.flowlabel = np->flow_label;
 		connected = 1;
 	}
@@ -1261,8 +1279,8 @@
 	if (dst) {
 		if (connected) {
 			ip6_dst_store(sk, dst,
-				      ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
-				      &np->daddr : NULL,
+				      ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+				      &sk->sk_v6_daddr : NULL,
 #ifdef CONFIG_IPV6_SUBTREES
 				      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
 				      &np->saddr :
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 4691ed5..c779c3c 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -7,33 +7,32 @@
 #include <net/inet_common.h>
 #include <net/transp_v6.h>
 
-extern int  	__udp6_lib_rcv(struct sk_buff *, struct udp_table *, int );
-extern void 	__udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
-			       u8 , u8 , int , __be32 , struct udp_table *);
+int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
+void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
+		    __be32, struct udp_table *);
 
-extern int	udp_v6_get_port(struct sock *sk, unsigned short snum);
+int udp_v6_get_port(struct sock *sk, unsigned short snum);
 
-extern int	udpv6_getsockopt(struct sock *sk, int level, int optname,
-				 char __user *optval, int __user *optlen);
-extern int	udpv6_setsockopt(struct sock *sk, int level, int optname,
-				 char __user *optval, unsigned int optlen);
+int udpv6_getsockopt(struct sock *sk, int level, int optname,
+		     char __user *optval, int __user *optlen);
+int udpv6_setsockopt(struct sock *sk, int level, int optname,
+		     char __user *optval, unsigned int optlen);
 #ifdef CONFIG_COMPAT
-extern int	compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
-					char __user *optval, unsigned int optlen);
-extern int	compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
-				       char __user *optval, int __user *optlen);
+int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
+			    char __user *optval, unsigned int optlen);
+int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
+			    char __user *optval, int __user *optlen);
 #endif
-extern int	udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
-			      struct msghdr *msg, size_t len);
-extern int	udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
-			      struct msghdr *msg, size_t len,
-			      int noblock, int flags, int *addr_len);
-extern int	udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
-extern void	udpv6_destroy_sock(struct sock *sk);
+int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		  size_t len);
+int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		  size_t len, int noblock, int flags, int *addr_len);
+int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void udpv6_destroy_sock(struct sock *sk);
 
-extern void udp_v6_clear_sk(struct sock *sk, int size);
+void udp_v6_clear_sk(struct sock *sk, int size);
 
 #ifdef CONFIG_PROC_FS
-extern int	udp6_seq_show(struct seq_file *seq, void *v);
+int udp6_seq_show(struct seq_file *seq, void *v);
 #endif
 #endif	/* _UDP6_IMPL_H */
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 6055951..e7359f9 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -64,6 +64,8 @@
 				      SKB_GSO_DODGY |
 				      SKB_GSO_UDP_TUNNEL |
 				      SKB_GSO_GRE |
+				      SKB_GSO_IPIP |
+				      SKB_GSO_SIT |
 				      SKB_GSO_MPLS) ||
 			     !(type & (SKB_GSO_UDP))))
 			goto out;
@@ -88,7 +90,7 @@
 
 		/* Check if there is enough headroom to insert fragment header. */
 		tnl_hlen = skb_tnl_header_len(skb);
-		if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
+		if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
 			if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
 				goto out;
 		}
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 4770d51..cb04f7a 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -18,6 +18,65 @@
 #include <net/ipv6.h>
 #include <net/xfrm.h>
 
+/* Informational hook. The decap is still done here. */
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm6_mode_tunnel_input_mutex);
+
+int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
+{
+	struct xfrm_tunnel_notifier __rcu **pprev;
+	struct xfrm_tunnel_notifier *t;
+	int ret = -EEXIST;
+	int priority = handler->priority;
+
+	mutex_lock(&xfrm6_mode_tunnel_input_mutex);
+
+	for (pprev = &rcv_notify_handlers;
+	     (t = rcu_dereference_protected(*pprev,
+	     lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
+	     pprev = &t->next) {
+		if (t->priority > priority)
+			break;
+		if (t->priority == priority)
+			goto err;
+
+	}
+
+	handler->next = *pprev;
+	rcu_assign_pointer(*pprev, handler);
+
+	ret = 0;
+
+err:
+	mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_register);
+
+int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
+{
+	struct xfrm_tunnel_notifier __rcu **pprev;
+	struct xfrm_tunnel_notifier *t;
+	int ret = -ENOENT;
+
+	mutex_lock(&xfrm6_mode_tunnel_input_mutex);
+	for (pprev = &rcv_notify_handlers;
+	     (t = rcu_dereference_protected(*pprev,
+	     lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
+	     pprev = &t->next) {
+		if (t == handler) {
+			*pprev = handler->next;
+			ret = 0;
+			break;
+		}
+	}
+	mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
+	synchronize_net();
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_deregister);
+
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
 	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
@@ -63,8 +122,15 @@
 	return 0;
 }
 
+#define for_each_input_rcu(head, handler)	\
+	for (handler = rcu_dereference(head);	\
+	     handler != NULL;			\
+	     handler = rcu_dereference(handler->next))
+
+
 static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
+	struct xfrm_tunnel_notifier *handler;
 	int err = -EINVAL;
 
 	if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
@@ -72,6 +138,9 @@
 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
 		goto out;
 
+	for_each_input_rcu(rcv_notify_handlers, handler)
+		handler->handler(skb);
+
 	err = skb_unclone(skb, GFP_ATOMIC);
 	if (err)
 		goto out;
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 564eb0b..8d65bb9 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -509,16 +509,11 @@
  */
 
 /* -------------------------- IRDA PART -------------------------- */
-extern int
-	irda_irnet_create(irnet_socket *);	/* Initialise a IrNET socket */
-extern int
-	irda_irnet_connect(irnet_socket *);	/* Try to connect over IrDA */
-extern void
-	irda_irnet_destroy(irnet_socket *);	/* Teardown  a IrNET socket */
-extern int
-	irda_irnet_init(void);		/* Initialise IrDA part of IrNET */
-extern void
-	irda_irnet_cleanup(void);	/* Teardown IrDA part of IrNET */
+int irda_irnet_create(irnet_socket *);	/* Initialise an IrNET socket */
+int irda_irnet_connect(irnet_socket *);	/* Try to connect over IrDA */
+void irda_irnet_destroy(irnet_socket *);	/* Teardown an IrNET socket */
+int irda_irnet_init(void);		/* Initialise IrDA part of IrNET */
+void irda_irnet_cleanup(void);		/* Teardown IrDA part of IrNET */
 
 /**************************** VARIABLES ****************************/
 
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index b076e83..9af77d9 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1181,7 +1181,7 @@
 	    !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
 		__wsum csum = skb_checksum(skb, 0, udp_len, 0);
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
+		uh->check = csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr, udp_len,
 					    IPPROTO_UDP, csum);
 		if (uh->check == 0)
 			uh->check = CSUM_MANGLED_0;
@@ -1189,7 +1189,7 @@
 		skb->ip_summed = CHECKSUM_PARTIAL;
 		skb->csum_start = skb_transport_header(skb) - skb->head;
 		skb->csum_offset = offsetof(struct udphdr, check);
-		uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
+		uh->check = ~csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr,
 					     udp_len, IPPROTO_UDP, 0);
 	}
 }
@@ -1713,13 +1713,13 @@
 		struct ipv6_pinfo *np = inet6_sk(sk);
 
 		if (ipv6_addr_v4mapped(&np->saddr) &&
-		    ipv6_addr_v4mapped(&np->daddr)) {
+		    ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
 			struct inet_sock *inet = inet_sk(sk);
 
 			tunnel->v4mapped = true;
 			inet->inet_saddr = np->saddr.s6_addr32[3];
-			inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3];
-			inet->inet_daddr = np->daddr.s6_addr32[3];
+			inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
+			inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
 		} else {
 			tunnel->v4mapped = false;
 		}
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 6f251cb..1ee9f69 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -238,29 +238,40 @@
 	return tunnel;
 }
 
-extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
-extern void l2tp_tunnel_sock_put(struct sock *sk);
-extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
-extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
-extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
-extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_sock_put(struct sock *sk);
+struct l2tp_session *l2tp_session_find(struct net *net,
+				       struct l2tp_tunnel *tunnel,
+				       u32 session_id);
+struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
+struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
 
-extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
-extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
-extern void __l2tp_session_unhash(struct l2tp_session *session);
-extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_session_free(struct l2tp_session *session);
-extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_session_queue_purge(struct l2tp_session *session);
-extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+		       u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+		       struct l2tp_tunnel **tunnelp);
+void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+struct l2tp_session *l2tp_session_create(int priv_size,
+					 struct l2tp_tunnel *tunnel,
+					 u32 session_id, u32 peer_session_id,
+					 struct l2tp_session_cfg *cfg);
+void __l2tp_session_unhash(struct l2tp_session *session);
+int l2tp_session_delete(struct l2tp_session *session);
+void l2tp_session_free(struct l2tp_session *session);
+void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+		      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
+		      int length, int (*payload_hook)(struct sk_buff *skb));
+int l2tp_session_queue_purge(struct l2tp_session *session);
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
 
-extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
+		  int hdr_len);
 
-extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
-extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
+			 const struct l2tp_nl_cmd_ops *ops);
+void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
 
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 072d720..2d6760a 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -127,9 +127,10 @@
 
 #if IS_ENABLED(CONFIG_IPV6)
 		if (tunnel->sock->sk_family == AF_INET6) {
-			struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+			const struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+
 			seq_printf(m, " from %pI6c to %pI6c\n",
-				&np->saddr, &np->daddr);
+				&np->saddr, &tunnel->sock->sk_v6_daddr);
 		} else
 #endif
 		seq_printf(m, " from %pI4 to %pI4\n",
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index b8a6039..cfd6530 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -63,7 +63,7 @@
 	struct sock *sk;
 
 	sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
-		struct in6_addr *addr = inet6_rcv_saddr(sk);
+		const struct in6_addr *addr = inet6_rcv_saddr(sk);
 		struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
 
 		if (l2tp == NULL)
@@ -331,7 +331,7 @@
 	rcu_read_unlock();
 
 	inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-	np->rcv_saddr = addr->l2tp_addr;
+	sk->sk_v6_rcv_saddr = addr->l2tp_addr;
 	np->saddr = addr->l2tp_addr;
 
 	l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
@@ -421,14 +421,14 @@
 		if (!lsk->peer_conn_id)
 			return -ENOTCONN;
 		lsa->l2tp_conn_id = lsk->peer_conn_id;
-		lsa->l2tp_addr = np->daddr;
+		lsa->l2tp_addr = sk->sk_v6_daddr;
 		if (np->sndflow)
 			lsa->l2tp_flowinfo = np->flow_label;
 	} else {
-		if (ipv6_addr_any(&np->rcv_saddr))
+		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 			lsa->l2tp_addr = np->saddr;
 		else
-			lsa->l2tp_addr = np->rcv_saddr;
+			lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
 
 		lsa->l2tp_conn_id = lsk->conn_id;
 	}
@@ -537,8 +537,8 @@
 		 * sk->sk_dst_cache.
 		 */
 		if (sk->sk_state == TCP_ESTABLISHED &&
-		    ipv6_addr_equal(daddr, &np->daddr))
-			daddr = &np->daddr;
+		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+			daddr = &sk->sk_v6_daddr;
 
 		if (addr_len >= sizeof(struct sockaddr_in6) &&
 		    lsa->l2tp_scope_id &&
@@ -548,7 +548,7 @@
 		if (sk->sk_state != TCP_ESTABLISHED)
 			return -EDESTADDRREQ;
 
-		daddr = &np->daddr;
+		daddr = &sk->sk_v6_daddr;
 		fl6.flowlabel = np->flow_label;
 	}
 
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 0825ff2..be446d5 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -306,8 +306,8 @@
 		if (np) {
 			if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
 				    &np->saddr) ||
-			    nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
-				    &np->daddr))
+			    nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(sk->sk_v6_daddr),
+				    &sk->sk_v6_daddr))
 				goto nla_put_failure;
 		} else
 #endif
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8c46b27..ffda81e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -910,8 +910,8 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	} else if ((tunnel->version == 2) &&
 		   (tunnel->sock->sk_family == AF_INET6)) {
-		struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
 		struct sockaddr_pppol2tpin6 sp;
+
 		len = sizeof(sp);
 		memset(&sp, 0, len);
 		sp.sa_family	= AF_PPPOX;
@@ -924,13 +924,13 @@
 		sp.pppol2tp.d_session = session->peer_session_id;
 		sp.pppol2tp.addr.sin6_family = AF_INET6;
 		sp.pppol2tp.addr.sin6_port = inet->inet_dport;
-		memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
-		       sizeof(np->daddr));
+		memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+		       sizeof(tunnel->sock->sk_v6_daddr));
 		memcpy(uaddr, &sp, len);
 	} else if ((tunnel->version == 3) &&
 		   (tunnel->sock->sk_family == AF_INET6)) {
-		struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
 		struct sockaddr_pppol2tpv3in6 sp;
+
 		len = sizeof(sp);
 		memset(&sp, 0, len);
 		sp.sa_family	= AF_PPPOX;
@@ -943,8 +943,8 @@
 		sp.pppol2tp.d_session = session->peer_session_id;
 		sp.pppol2tp.addr.sin6_family = AF_INET6;
 		sp.pppol2tp.addr.sin6_port = inet->inet_dport;
-		memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
-		       sizeof(np->daddr));
+		memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+		       sizeof(tunnel->sock->sk_v6_daddr));
 		memcpy(uaddr, &sp, len);
 #endif
 	} else if (tunnel->version == 3) {
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 62535fe..97b5dca 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -4,6 +4,7 @@
 	select CRYPTO
 	select CRYPTO_ARC4
 	select CRYPTO_AES
+	select CRYPTO_CCM
 	select CRC32
 	select AVERAGE
 	---help---
@@ -258,6 +259,17 @@
 
 	  Do not select this option.
 
+config MAC80211_MESH_CSA_DEBUG
+	bool "Verbose mesh channel switch debugging"
+	depends on MAC80211_DEBUG_MENU
+	depends on MAC80211_MESH
+	---help---
+	  Selecting this option causes mac80211 to print out very verbose mesh
+	  channel switch debugging messages (when mac80211 is taking part in a
+	  mesh network).
+
+	  Do not select this option.
+
 config MAC80211_MESH_PS_DEBUG
 	bool "Verbose mesh powersave debugging"
 	depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index be7614b9..7c7df47 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -2,6 +2,8 @@
  * Copyright 2003-2004, Instant802 Networks, Inc.
  * Copyright 2005-2006, Devicescape Software, Inc.
  *
+ * Rewrite: Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -17,134 +19,75 @@
 #include "key.h"
 #include "aes_ccm.h"
 
-static void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *scratch, u8 *a)
+void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+			       u8 *data, size_t data_len, u8 *mic)
 {
-	int i;
-	u8 *b_0, *aad, *b, *s_0;
+	struct scatterlist assoc, pt, ct[2];
+	struct {
+		struct aead_request	req;
+		u8			priv[crypto_aead_reqsize(tfm)];
+	} aead_req;
 
-	b_0 = scratch + 3 * AES_BLOCK_SIZE;
-	aad = scratch + 4 * AES_BLOCK_SIZE;
-	b = scratch;
-	s_0 = scratch + AES_BLOCK_SIZE;
+	memset(&aead_req, 0, sizeof(aead_req));
 
-	crypto_cipher_encrypt_one(tfm, b, b_0);
+	sg_init_one(&pt, data, data_len);
+	sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
+	sg_init_table(ct, 2);
+	sg_set_buf(&ct[0], data, data_len);
+	sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
 
-	/* Extra Authenticate-only data (always two AES blocks) */
-	for (i = 0; i < AES_BLOCK_SIZE; i++)
-		aad[i] ^= b[i];
-	crypto_cipher_encrypt_one(tfm, b, aad);
+	aead_request_set_tfm(&aead_req.req, tfm);
+	aead_request_set_assoc(&aead_req.req, &assoc, assoc.length);
+	aead_request_set_crypt(&aead_req.req, &pt, ct, data_len, b_0);
 
-	aad += AES_BLOCK_SIZE;
-
-	for (i = 0; i < AES_BLOCK_SIZE; i++)
-		aad[i] ^= b[i];
-	crypto_cipher_encrypt_one(tfm, a, aad);
-
-	/* Mask out bits from auth-only-b_0 */
-	b_0[0] &= 0x07;
-
-	/* S_0 is used to encrypt T (= MIC) */
-	b_0[14] = 0;
-	b_0[15] = 0;
-	crypto_cipher_encrypt_one(tfm, s_0, b_0);
+	crypto_aead_encrypt(&aead_req.req);
 }
 
-
-void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
-			       u8 *data, size_t data_len,
-			       u8 *cdata, u8 *mic)
+int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+			      u8 *data, size_t data_len, u8 *mic)
 {
-	int i, j, last_len, num_blocks;
-	u8 *pos, *cpos, *b, *s_0, *e, *b_0;
+	struct scatterlist assoc, pt, ct[2];
+	struct {
+		struct aead_request	req;
+		u8			priv[crypto_aead_reqsize(tfm)];
+	} aead_req;
 
-	b = scratch;
-	s_0 = scratch + AES_BLOCK_SIZE;
-	e = scratch + 2 * AES_BLOCK_SIZE;
-	b_0 = scratch + 3 * AES_BLOCK_SIZE;
+	memset(&aead_req, 0, sizeof(aead_req));
 
-	num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_SIZE);
-	last_len = data_len % AES_BLOCK_SIZE;
-	aes_ccm_prepare(tfm, scratch, b);
+	sg_init_one(&pt, data, data_len);
+	sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
+	sg_init_table(ct, 2);
+	sg_set_buf(&ct[0], data, data_len);
+	sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
 
-	/* Process payload blocks */
-	pos = data;
-	cpos = cdata;
-	for (j = 1; j <= num_blocks; j++) {
-		int blen = (j == num_blocks && last_len) ?
-			last_len : AES_BLOCK_SIZE;
+	aead_request_set_tfm(&aead_req.req, tfm);
+	aead_request_set_assoc(&aead_req.req, &assoc, assoc.length);
+	aead_request_set_crypt(&aead_req.req, ct, &pt,
+			       data_len + IEEE80211_CCMP_MIC_LEN, b_0);
 
-		/* Authentication followed by encryption */
-		for (i = 0; i < blen; i++)
-			b[i] ^= pos[i];
-		crypto_cipher_encrypt_one(tfm, b, b);
-
-		b_0[14] = (j >> 8) & 0xff;
-		b_0[15] = j & 0xff;
-		crypto_cipher_encrypt_one(tfm, e, b_0);
-		for (i = 0; i < blen; i++)
-			*cpos++ = *pos++ ^ e[i];
-	}
-
-	for (i = 0; i < IEEE80211_CCMP_MIC_LEN; i++)
-		mic[i] = b[i] ^ s_0[i];
+	return crypto_aead_decrypt(&aead_req.req);
 }
 
-
-int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
-			      u8 *cdata, size_t data_len, u8 *mic, u8 *data)
+struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
 {
-	int i, j, last_len, num_blocks;
-	u8 *pos, *cpos, *b, *s_0, *a, *b_0;
+	struct crypto_aead *tfm;
+	int err;
 
-	b = scratch;
-	s_0 = scratch + AES_BLOCK_SIZE;
-	a = scratch + 2 * AES_BLOCK_SIZE;
-	b_0 = scratch + 3 * AES_BLOCK_SIZE;
+	tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm))
+		return tfm;
 
-	num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_SIZE);
-	last_len = data_len % AES_BLOCK_SIZE;
-	aes_ccm_prepare(tfm, scratch, a);
+	err = crypto_aead_setkey(tfm, key, WLAN_KEY_LEN_CCMP);
+	if (!err)
+		err = crypto_aead_setauthsize(tfm, IEEE80211_CCMP_MIC_LEN);
+	if (!err)
+		return tfm;
 
-	/* Process payload blocks */
-	cpos = cdata;
-	pos = data;
-	for (j = 1; j <= num_blocks; j++) {
-		int blen = (j == num_blocks && last_len) ?
-			last_len : AES_BLOCK_SIZE;
-
-		/* Decryption followed by authentication */
-		b_0[14] = (j >> 8) & 0xff;
-		b_0[15] = j & 0xff;
-		crypto_cipher_encrypt_one(tfm, b, b_0);
-		for (i = 0; i < blen; i++) {
-			*pos = *cpos++ ^ b[i];
-			a[i] ^= *pos++;
-		}
-		crypto_cipher_encrypt_one(tfm, a, a);
-	}
-
-	for (i = 0; i < IEEE80211_CCMP_MIC_LEN; i++) {
-		if ((mic[i] ^ s_0[i]) != a[i])
-			return -1;
-	}
-
-	return 0;
+	crypto_free_aead(tfm);
+	return ERR_PTR(err);
 }
 
-
-struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
+void ieee80211_aes_key_free(struct crypto_aead *tfm)
 {
-	struct crypto_cipher *tfm;
-
-	tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
-	if (!IS_ERR(tfm))
-		crypto_cipher_setkey(tfm, key, WLAN_KEY_LEN_CCMP);
-
-	return tfm;
-}
-
-
-void ieee80211_aes_key_free(struct crypto_cipher *tfm)
-{
-	crypto_free_cipher(tfm);
+	crypto_free_aead(tfm);
 }
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 5b7d744..2c7ab19 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,13 +12,11 @@
 
 #include <linux/crypto.h>
 
-struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]);
-void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
-			       u8 *data, size_t data_len,
-			       u8 *cdata, u8 *mic);
-int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
-			      u8 *cdata, size_t data_len,
-			      u8 *mic, u8 *data);
-void ieee80211_aes_key_free(struct crypto_cipher *tfm);
+struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[]);
+void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+			       u8 *data, size_t data_len, u8 *mic);
+int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+			      u8 *data, size_t data_len, u8 *mic);
+void ieee80211_aes_key_free(struct crypto_aead *tfm);
 
 #endif /* AES_CCM_H */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 629dee7..95667b0 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1059,6 +1059,7 @@
 	/* abort any running channel switch */
 	sdata->vif.csa_active = false;
 	cancel_work_sync(&sdata->csa_finalize_work);
+	cancel_work_sync(&sdata->u.ap.request_smps_work);
 
 	/* turn off carrier for this interface and dependent VLANs */
 	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
@@ -1342,8 +1343,8 @@
 				sta->plink_state = params->plink_state;
 
 				ieee80211_mps_sta_status_update(sta);
-				changed |=
-				      ieee80211_mps_local_status_update(sdata);
+				changed |= ieee80211_mps_set_sta_local_pm(sta,
+						NL80211_MESH_POWER_UNKNOWN);
 				break;
 			default:
 				/*  nothing  */
@@ -1553,6 +1554,20 @@
 
 	mutex_unlock(&local->sta_mtx);
 
+	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
+	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
+	    sta->known_smps_mode != sta->sdata->bss->req_smps &&
+	    test_sta_flag(sta, WLAN_STA_AUTHORIZED) &&
+	    sta_info_tx_streams(sta) != 1) {
+		ht_dbg(sta->sdata,
+		       "%pM just authorized and MIMO capable - update SMPS\n",
+		       sta->sta.addr);
+		ieee80211_send_smps_action(sta->sdata,
+			sta->sdata->bss->req_smps,
+			sta->sta.addr,
+			sta->sdata->vif.bss_conf.bssid);
+	}
+
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
 		ieee80211_recalc_ps(local, -1);
@@ -2337,8 +2352,92 @@
 }
 #endif
 
-int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
-			     enum ieee80211_smps_mode smps_mode)
+int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
+				enum ieee80211_smps_mode smps_mode)
+{
+	struct sta_info *sta;
+	enum ieee80211_smps_mode old_req;
+	int i;
+
+	if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP))
+		return -EINVAL;
+
+	if (sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+		return 0;
+
+	old_req = sdata->u.ap.req_smps;
+	sdata->u.ap.req_smps = smps_mode;
+
+	/* AUTOMATIC doesn't mean much for AP - don't allow it */
+	if (old_req == smps_mode ||
+	    smps_mode == IEEE80211_SMPS_AUTOMATIC)
+		return 0;
+
+	 /* If no associated stations, there's no need to do anything */
+	if (!atomic_read(&sdata->u.ap.num_mcast_sta)) {
+		sdata->smps_mode = smps_mode;
+		ieee80211_queue_work(&sdata->local->hw, &sdata->recalc_smps);
+		return 0;
+	}
+
+	ht_dbg(sdata,
+	       "SMSP %d requested in AP mode, sending Action frame to %d stations\n",
+	       smps_mode, atomic_read(&sdata->u.ap.num_mcast_sta));
+
+	mutex_lock(&sdata->local->sta_mtx);
+	for (i = 0; i < STA_HASH_SIZE; i++) {
+		for (sta = rcu_dereference_protected(sdata->local->sta_hash[i],
+				lockdep_is_held(&sdata->local->sta_mtx));
+		     sta;
+		     sta = rcu_dereference_protected(sta->hnext,
+				lockdep_is_held(&sdata->local->sta_mtx))) {
+			/*
+			 * Only stations associated to our AP and
+			 * associated VLANs
+			 */
+			if (sta->sdata->bss != &sdata->u.ap)
+				continue;
+
+			/* This station doesn't support MIMO - skip it */
+			if (sta_info_tx_streams(sta) == 1)
+				continue;
+
+			/*
+			 * Don't wake up a STA just to send the action frame
+			 * unless we are getting more restrictive.
+			 */
+			if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
+			    !ieee80211_smps_is_restrictive(sta->known_smps_mode,
+							   smps_mode)) {
+				ht_dbg(sdata,
+				       "Won't send SMPS to sleeping STA %pM\n",
+				       sta->sta.addr);
+				continue;
+			}
+
+			/*
+			 * If the STA is not authorized, wait until it gets
+			 * authorized and the action frame will be sent then.
+			 */
+			if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+				continue;
+
+			ht_dbg(sdata, "Sending SMPS to %pM\n", sta->sta.addr);
+			ieee80211_send_smps_action(sdata, smps_mode,
+						   sta->sta.addr,
+						   sdata->vif.bss_conf.bssid);
+		}
+	}
+	mutex_unlock(&sdata->local->sta_mtx);
+
+	sdata->smps_mode = smps_mode;
+	ieee80211_queue_work(&sdata->local->hw, &sdata->recalc_smps);
+
+	return 0;
+}
+
+int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
+				 enum ieee80211_smps_mode smps_mode)
 {
 	const u8 *ap;
 	enum ieee80211_smps_mode old_req;
@@ -2346,6 +2445,9 @@
 
 	lockdep_assert_held(&sdata->wdev.mtx);
 
+	if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
+		return -EINVAL;
+
 	old_req = sdata->u.mgd.req_smps;
 	sdata->u.mgd.req_smps = smps_mode;
 
@@ -2402,7 +2504,7 @@
 
 	/* no change, but if automatic follow powersave */
 	sdata_lock(sdata);
-	__ieee80211_request_smps(sdata, sdata->u.mgd.req_smps);
+	__ieee80211_request_smps_mgd(sdata, sdata->u.mgd.req_smps);
 	sdata_unlock(sdata);
 
 	if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
@@ -2860,35 +2962,55 @@
 		container_of(work, struct ieee80211_sub_if_data,
 			     csa_finalize_work);
 	struct ieee80211_local *local = sdata->local;
-	int err, changed;
+	int err, changed = 0;
 
 	if (!ieee80211_sdata_running(sdata))
 		return;
 
-	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
-		return;
-
 	sdata->radar_required = sdata->csa_radar_required;
 	err = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
 					   &changed);
 	if (WARN_ON(err < 0))
 		return;
 
-	err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
-	if (err < 0)
-		return;
+	if (!local->use_chanctx) {
+		local->_oper_chandef = local->csa_chandef;
+		ieee80211_hw_config(local, 0);
+	}
 
-	changed |= err;
-	kfree(sdata->u.ap.next_beacon);
-	sdata->u.ap.next_beacon = NULL;
+	ieee80211_bss_info_change_notify(sdata, changed);
+
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_AP:
+		err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
+		if (err < 0)
+			return;
+		changed |= err;
+		kfree(sdata->u.ap.next_beacon);
+		sdata->u.ap.next_beacon = NULL;
+
+		ieee80211_bss_info_change_notify(sdata, err);
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		ieee80211_ibss_finish_csa(sdata);
+		break;
+#ifdef CONFIG_MAC80211_MESH
+	case NL80211_IFTYPE_MESH_POINT:
+		err = ieee80211_mesh_finish_csa(sdata);
+		if (err < 0)
+			return;
+		break;
+#endif
+	default:
+		WARN_ON(1);
+		return;
+	}
 	sdata->vif.csa_active = false;
 
 	ieee80211_wake_queues_by_reason(&sdata->local->hw,
 					IEEE80211_MAX_QUEUE_MAP,
 					IEEE80211_QUEUE_STOP_REASON_CSA);
 
-	ieee80211_bss_info_change_notify(sdata, changed);
-
 	cfg80211_ch_switch_notify(sdata->dev, &local->csa_chandef);
 }
 
@@ -2899,6 +3021,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_chanctx *chanctx;
+	struct ieee80211_if_mesh __maybe_unused *ifmsh;
 	int err, num_chanctx;
 
 	if (!list_empty(&local->roc_list) || local->scanning)
@@ -2936,20 +3059,76 @@
 	if (sdata->vif.csa_active)
 		return -EBUSY;
 
-	/* only handle AP for now. */
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP:
+		sdata->csa_counter_offset_beacon =
+			params->counter_offset_beacon;
+		sdata->csa_counter_offset_presp = params->counter_offset_presp;
+		sdata->u.ap.next_beacon =
+			cfg80211_beacon_dup(&params->beacon_after);
+		if (!sdata->u.ap.next_beacon)
+			return -ENOMEM;
+
+		err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
+		if (err < 0) {
+			kfree(sdata->u.ap.next_beacon);
+			return err;
+		}
 		break;
+	case NL80211_IFTYPE_ADHOC:
+		if (!sdata->vif.bss_conf.ibss_joined)
+			return -EINVAL;
+
+		if (params->chandef.width != sdata->u.ibss.chandef.width)
+			return -EINVAL;
+
+		switch (params->chandef.width) {
+		case NL80211_CHAN_WIDTH_40:
+			if (cfg80211_get_chandef_type(&params->chandef) !=
+			    cfg80211_get_chandef_type(&sdata->u.ibss.chandef))
+				return -EINVAL;
+		case NL80211_CHAN_WIDTH_5:
+		case NL80211_CHAN_WIDTH_10:
+		case NL80211_CHAN_WIDTH_20_NOHT:
+		case NL80211_CHAN_WIDTH_20:
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* changes into another band are not supported */
+		if (sdata->u.ibss.chandef.chan->band !=
+		    params->chandef.chan->band)
+			return -EINVAL;
+
+		err = ieee80211_ibss_csa_beacon(sdata, params);
+		if (err < 0)
+			return err;
+		break;
+#ifdef CONFIG_MAC80211_MESH
+	case NL80211_IFTYPE_MESH_POINT:
+		ifmsh = &sdata->u.mesh;
+
+		if (!ifmsh->mesh_id)
+			return -EINVAL;
+
+		if (params->chandef.width != sdata->vif.bss_conf.chandef.width)
+			return -EINVAL;
+
+		/* changes into another band are not supported */
+		if (sdata->vif.bss_conf.chandef.chan->band !=
+		    params->chandef.chan->band)
+			return -EINVAL;
+
+		err = ieee80211_mesh_csa_beacon(sdata, params, true);
+		if (err < 0)
+			return err;
+		break;
+#endif
 	default:
 		return -EOPNOTSUPP;
 	}
 
-	sdata->u.ap.next_beacon = cfg80211_beacon_dup(&params->beacon_after);
-	if (!sdata->u.ap.next_beacon)
-		return -ENOMEM;
-
-	sdata->csa_counter_offset_beacon = params->counter_offset_beacon;
-	sdata->csa_counter_offset_presp = params->counter_offset_presp;
 	sdata->csa_radar_required = params->radar_required;
 
 	if (params->block_tx)
@@ -2957,10 +3136,6 @@
 				IEEE80211_MAX_QUEUE_MAP,
 				IEEE80211_QUEUE_STOP_REASON_CSA);
 
-	err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
-	if (err < 0)
-		return err;
-
 	local->csa_chandef = params->chandef;
 	sdata->vif.csa_active = true;
 
@@ -3014,7 +3189,8 @@
 			need_offchan = true;
 		if (!ieee80211_is_action(mgmt->frame_control) ||
 		    mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
-		    mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED)
+		    mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
+		    mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
 			break;
 		rcu_read_lock();
 		sta = sta_info_get(sdata, mgmt->da);
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 3a4764b..03ba6b5 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -453,11 +453,6 @@
 	chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
 	drv_change_chanctx(local, ctx, chanctx_changed);
 
-	if (!local->use_chanctx) {
-		local->_oper_chandef = *chandef;
-		ieee80211_hw_config(local, 0);
-	}
-
 	ieee80211_recalc_chanctx_chantype(local, ctx);
 	ieee80211_recalc_smps_chanctx(local, ctx);
 	ieee80211_recalc_radar_chanctx(local, ctx);
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index 4ccc5ed..493d680 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -44,6 +44,12 @@
 #define MAC80211_MESH_SYNC_DEBUG 0
 #endif
 
+#ifdef CONFIG_MAC80211_MESH_CSA_DEBUG
+#define MAC80211_MESH_CSA_DEBUG 1
+#else
+#define MAC80211_MESH_CSA_DEBUG 0
+#endif
+
 #ifdef CONFIG_MAC80211_MESH_PS_DEBUG
 #define MAC80211_MESH_PS_DEBUG 1
 #else
@@ -157,6 +163,10 @@
 	_sdata_dbg(MAC80211_MESH_SYNC_DEBUG,				\
 		   sdata, fmt, ##__VA_ARGS__)
 
+#define mcsa_dbg(sdata, fmt, ...)					\
+	_sdata_dbg(MAC80211_MESH_CSA_DEBUG,				\
+		   sdata, fmt, ##__VA_ARGS__)
+
 #define mps_dbg(sdata, fmt, ...)					\
 	_sdata_dbg(MAC80211_MESH_PS_DEBUG,				\
 		   sdata, fmt, ##__VA_ARGS__)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b0e32d6..5c090e4 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -103,54 +103,57 @@
 	if (!buf)
 		return 0;
 
-	sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
+	sf += scnprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
 	if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
-		sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
+		sf += scnprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
 	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
-		sf += snprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
+		sf += scnprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
 	if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
-		sf += snprintf(buf + sf, mxln - sf,
-			       "HOST_BCAST_PS_BUFFERING\n");
+		sf += scnprintf(buf + sf, mxln - sf,
+				"HOST_BCAST_PS_BUFFERING\n");
 	if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
-		sf += snprintf(buf + sf, mxln - sf,
-			       "2GHZ_SHORT_SLOT_INCAPABLE\n");
+		sf += scnprintf(buf + sf, mxln - sf,
+				"2GHZ_SHORT_SLOT_INCAPABLE\n");
 	if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
-		sf += snprintf(buf + sf, mxln - sf,
-			       "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
+		sf += scnprintf(buf + sf, mxln - sf,
+				"2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
 	if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
-		sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
+		sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
 	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
-		sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
+		sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
 	if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
-		sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_BEFORE_ASSOC\n");
+		sf += scnprintf(buf + sf, mxln - sf,
+				"NEED_DTIM_BEFORE_ASSOC\n");
 	if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
-		sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
+		sf += scnprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
 	if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
-		sf += snprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
+		sf += scnprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
 	if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
-		sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
+		sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
 	if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
-		sf += snprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
+		sf += scnprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
 	if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
-		sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
+		sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
 	if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
-		sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
+		sf += scnprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
 	if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
-		sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
+		sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
 	if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
-		sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_SMPS\n");
+		sf += scnprintf(buf + sf, mxln - sf,
+				"SUPPORTS_DYNAMIC_SMPS\n");
 	if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
-		sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
+		sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
 	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
-		sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
+		sf += scnprintf(buf + sf, mxln - sf,
+				"REPORTS_TX_ACK_STATUS\n");
 	if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
-		sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
+		sf += scnprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
 	if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
-		sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
+		sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
 	if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
-		sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
+		sf += scnprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
 	if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
-		sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
+		sf += scnprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
 
 	rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
 	kfree(buf);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index cafe614..04b5a14 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -224,12 +224,15 @@
 	     smps_mode == IEEE80211_SMPS_AUTOMATIC))
 		return -EINVAL;
 
-	/* supported only on managed interfaces for now */
-	if (sdata->vif.type != NL80211_IFTYPE_STATION)
+	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+	    sdata->vif.type != NL80211_IFTYPE_AP)
 		return -EOPNOTSUPP;
 
 	sdata_lock(sdata);
-	err = __ieee80211_request_smps(sdata, smps_mode);
+	if (sdata->vif.type == NL80211_IFTYPE_STATION)
+		err = __ieee80211_request_smps_mgd(sdata, smps_mode);
+	else
+		err = __ieee80211_request_smps_ap(sdata, smps_mode);
 	sdata_unlock(sdata);
 
 	return err;
@@ -245,12 +248,15 @@
 static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
 				     char *buf, int buflen)
 {
-	if (sdata->vif.type != NL80211_IFTYPE_STATION)
-		return -EOPNOTSUPP;
-
-	return snprintf(buf, buflen, "request: %s\nused: %s\n",
-			smps_modes[sdata->u.mgd.req_smps],
-			smps_modes[sdata->smps_mode]);
+	if (sdata->vif.type == NL80211_IFTYPE_STATION)
+		return snprintf(buf, buflen, "request: %s\nused: %s\n",
+				smps_modes[sdata->u.mgd.req_smps],
+				smps_modes[sdata->smps_mode]);
+	if (sdata->vif.type == NL80211_IFTYPE_AP)
+		return snprintf(buf, buflen, "request: %s\nused: %s\n",
+				smps_modes[sdata->u.ap.req_smps],
+				smps_modes[sdata->smps_mode]);
+	return -EINVAL;
 }
 
 static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
@@ -563,6 +569,7 @@
 static void add_ap_files(struct ieee80211_sub_if_data *sdata)
 {
 	DEBUGFS_ADD(num_mcast_sta);
+	DEBUGFS_ADD_MODE(smps, 0600);
 	DEBUGFS_ADD(num_sta_ps);
 	DEBUGFS_ADD(dtim_count);
 	DEBUGFS_ADD(num_buffered_multicast);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index b3ea11f..5d03c47 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1085,4 +1085,31 @@
 	}
 }
 
+static inline int drv_join_ibss(struct ieee80211_local *local,
+				struct ieee80211_sub_if_data *sdata)
+{
+	int ret = 0;
+
+	might_sleep();
+	check_sdata_in_driver(sdata);
+
+	trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf);
+	if (local->ops->join_ibss)
+		ret = local->ops->join_ibss(&local->hw, &sdata->vif);
+	trace_drv_return_int(local, ret);
+	return ret;
+}
+
+static inline void drv_leave_ibss(struct ieee80211_local *local,
+				  struct ieee80211_sub_if_data *sdata)
+{
+	might_sleep();
+	check_sdata_in_driver(sdata);
+
+	trace_drv_leave_ibss(local, sdata);
+	if (local->ops->leave_ibss)
+		local->ops->leave_ibss(&local->hw, &sdata->vif);
+	trace_drv_return_void(local);
+}
+
 #endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 529bf58..9a8be8f 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -448,14 +448,25 @@
 	return 0;
 }
 
-void ieee80211_request_smps_work(struct work_struct *work)
+void ieee80211_request_smps_mgd_work(struct work_struct *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
 			     u.mgd.request_smps_work);
 
 	sdata_lock(sdata);
-	__ieee80211_request_smps(sdata, sdata->u.mgd.driver_smps_mode);
+	__ieee80211_request_smps_mgd(sdata, sdata->u.mgd.driver_smps_mode);
+	sdata_unlock(sdata);
+}
+
+void ieee80211_request_smps_ap_work(struct work_struct *work)
+{
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data,
+			     u.ap.request_smps_work);
+
+	sdata_lock(sdata);
+	__ieee80211_request_smps_ap(sdata, sdata->u.ap.driver_smps_mode);
 	sdata_unlock(sdata);
 }
 
@@ -464,19 +475,29 @@
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
-	if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+	if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION &&
+			 vif->type != NL80211_IFTYPE_AP))
 		return;
 
 	if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF))
 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
-	if (sdata->u.mgd.driver_smps_mode == smps_mode)
-		return;
-
-	sdata->u.mgd.driver_smps_mode = smps_mode;
-
-	ieee80211_queue_work(&sdata->local->hw,
-			     &sdata->u.mgd.request_smps_work);
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		if (sdata->u.mgd.driver_smps_mode == smps_mode)
+			return;
+		sdata->u.mgd.driver_smps_mode = smps_mode;
+		ieee80211_queue_work(&sdata->local->hw,
+				     &sdata->u.mgd.request_smps_work);
+	} else {
+		/* AUTOMATIC is meaningless in AP mode */
+		if (WARN_ON_ONCE(smps_mode == IEEE80211_SMPS_AUTOMATIC))
+			return;
+		if (sdata->u.ap.driver_smps_mode == smps_mode)
+			return;
+		sdata->u.ap.driver_smps_mode = smps_mode;
+		ieee80211_queue_work(&sdata->local->hw,
+				     &sdata->u.ap.request_smps_work);
+	}
 }
 /* this might change ... don't want non-open drivers using it */
 EXPORT_SYMBOL_GPL(ieee80211_request_smps);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a12afe7..531be04 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -39,7 +39,8 @@
 			   const int beacon_int, const u32 basic_rates,
 			   const u16 capability, u64 tsf,
 			   struct cfg80211_chan_def *chandef,
-			   bool *have_higher_than_11mbit)
+			   bool *have_higher_than_11mbit,
+			   struct cfg80211_csa_settings *csa_settings)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct ieee80211_local *local = sdata->local;
@@ -59,6 +60,7 @@
 		    2 + 8 /* max Supported Rates */ +
 		    3 /* max DS params */ +
 		    4 /* IBSS params */ +
+		    5 /* Channel Switch Announcement */ +
 		    2 + (IEEE80211_MAX_SUPP_RATES - 8) +
 		    2 + sizeof(struct ieee80211_ht_cap) +
 		    2 + sizeof(struct ieee80211_ht_operation) +
@@ -135,6 +137,16 @@
 	*pos++ = 0;
 	*pos++ = 0;
 
+	if (csa_settings) {
+		*pos++ = WLAN_EID_CHANNEL_SWITCH;
+		*pos++ = 3;
+		*pos++ = csa_settings->block_tx ? 1 : 0;
+		*pos++ = ieee80211_frequency_to_channel(
+				csa_settings->chandef.chan->center_freq);
+		sdata->csa_counter_offset_beacon = (pos - presp->head);
+		*pos++ = csa_settings->count;
+	}
+
 	/* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
 	if (rates_n > 8) {
 		*pos++ = WLAN_EID_EXT_SUPP_RATES;
@@ -217,6 +229,8 @@
 	struct beacon_data *presp;
 	enum nl80211_bss_scan_width scan_width;
 	bool have_higher_than_11mbit;
+	bool radar_required = false;
+	int err;
 
 	sdata_assert_lock(sdata);
 
@@ -235,6 +249,7 @@
 		ieee80211_bss_info_change_notify(sdata,
 						 BSS_CHANGED_IBSS |
 						 BSS_CHANGED_BEACON_ENABLED);
+		drv_leave_ibss(local, sdata);
 	}
 
 	presp = rcu_dereference_protected(ifibss->presp,
@@ -259,6 +274,23 @@
 		}
 		chandef.width = NL80211_CHAN_WIDTH_20;
 		chandef.center_freq1 = chan->center_freq;
+		/* check again for downgraded chandef */
+		if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+			sdata_info(sdata,
+				   "Failed to join IBSS, beacons forbidden\n");
+			return;
+		}
+	}
+
+	err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
+					    &chandef);
+	if (err > 0) {
+		if (!ifibss->userspace_handles_dfs) {
+			sdata_info(sdata,
+				   "Failed to join IBSS, DFS channel without control program\n");
+			return;
+		}
+		radar_required = true;
 	}
 
 	ieee80211_vif_release_channel(sdata);
@@ -276,13 +308,14 @@
 
 	presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
 					   capability, tsf, &chandef,
-					   &have_higher_than_11mbit);
+					   &have_higher_than_11mbit, NULL);
 	if (!presp)
 		return;
 
 	rcu_assign_pointer(ifibss->presp, presp);
 	mgmt = (void *)presp->head;
 
+	sdata->radar_required = radar_required;
 	sdata->vif.bss_conf.enable_beacon = true;
 	sdata->vif.bss_conf.beacon_int = beacon_int;
 	sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -317,11 +350,26 @@
 	else
 		sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
 
+	ieee80211_set_wmm_default(sdata, true);
+
 	sdata->vif.bss_conf.ibss_joined = true;
 	sdata->vif.bss_conf.ibss_creator = creator;
-	ieee80211_bss_info_change_notify(sdata, bss_change);
 
-	ieee80211_set_wmm_default(sdata, true);
+	err = drv_join_ibss(local, sdata);
+	if (err) {
+		sdata->vif.bss_conf.ibss_joined = false;
+		sdata->vif.bss_conf.ibss_creator = false;
+		sdata->vif.bss_conf.enable_beacon = false;
+		sdata->vif.bss_conf.ssid_len = 0;
+		RCU_INIT_POINTER(ifibss->presp, NULL);
+		kfree_rcu(presp, rcu_head);
+		ieee80211_vif_release_channel(sdata);
+		sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n",
+			   err);
+		return;
+	}
+
+	ieee80211_bss_info_change_notify(sdata, bss_change);
 
 	ifibss->state = IEEE80211_IBSS_MLME_JOINED;
 	mod_timer(&ifibss->timer,
@@ -416,6 +464,115 @@
 				  tsf, false);
 }
 
+int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
+			      struct cfg80211_csa_settings *csa_settings)
+{
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	struct beacon_data *presp, *old_presp;
+	struct cfg80211_bss *cbss;
+	const struct cfg80211_bss_ies *ies;
+	u16 capability;
+	u64 tsf;
+	int ret = 0;
+
+	sdata_assert_lock(sdata);
+
+	capability = WLAN_CAPABILITY_IBSS;
+
+	if (ifibss->privacy)
+		capability |= WLAN_CAPABILITY_PRIVACY;
+
+	cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
+				ifibss->bssid, ifibss->ssid,
+				ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+				WLAN_CAPABILITY_PRIVACY,
+				capability);
+
+	if (WARN_ON(!cbss)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	rcu_read_lock();
+	ies = rcu_dereference(cbss->ies);
+	tsf = ies->tsf;
+	rcu_read_unlock();
+	cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+
+	old_presp = rcu_dereference_protected(ifibss->presp,
+					  lockdep_is_held(&sdata->wdev.mtx));
+
+	presp = ieee80211_ibss_build_presp(sdata,
+					   sdata->vif.bss_conf.beacon_int,
+					   sdata->vif.bss_conf.basic_rates,
+					   capability, tsf, &ifibss->chandef,
+					   NULL, csa_settings);
+	if (!presp) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	rcu_assign_pointer(ifibss->presp, presp);
+	if (old_presp)
+		kfree_rcu(old_presp, rcu_head);
+
+	/* it might not send the beacon for a while. send an action frame
+	 * immediately to announce the channel switch.
+	 */
+	if (csa_settings)
+		ieee80211_send_action_csa(sdata, csa_settings);
+
+	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ out:
+	return ret;
+}
+
+int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	struct cfg80211_bss *cbss;
+	int err;
+	u16 capability;
+
+	sdata_lock(sdata);
+	/* update cfg80211 bss information with the new channel */
+	if (!is_zero_ether_addr(ifibss->bssid)) {
+		capability = WLAN_CAPABILITY_IBSS;
+
+		if (ifibss->privacy)
+			capability |= WLAN_CAPABILITY_PRIVACY;
+
+		cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
+					ifibss->chandef.chan,
+					ifibss->bssid, ifibss->ssid,
+					ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+					WLAN_CAPABILITY_PRIVACY,
+					capability);
+		/* XXX: should not really modify cfg80211 data */
+		if (cbss) {
+			cbss->channel = sdata->local->csa_chandef.chan;
+			cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+		}
+	}
+
+	ifibss->chandef = sdata->local->csa_chandef;
+
+	/* generate the beacon */
+	err = ieee80211_ibss_csa_beacon(sdata, NULL);
+	sdata_unlock(sdata);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+
+	cancel_work_sync(&ifibss->csa_connection_drop_work);
+}
+
 static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
 	__acquires(RCU)
 {
@@ -499,6 +656,315 @@
 	return ieee80211_ibss_finish_sta(sta);
 }
 
+static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	int active = 0;
+	struct sta_info *sta;
+
+	sdata_assert_lock(sdata);
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		if (sta->sdata == sdata &&
+		    time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
+			       jiffies)) {
+			active++;
+			break;
+		}
+	}
+
+	rcu_read_unlock();
+
+	return active;
+}
+
+static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	struct ieee80211_local *local = sdata->local;
+	struct cfg80211_bss *cbss;
+	struct beacon_data *presp;
+	struct sta_info *sta;
+	int active_ibss;
+	u16 capability;
+
+	active_ibss = ieee80211_sta_active_ibss(sdata);
+
+	if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+		capability = WLAN_CAPABILITY_IBSS;
+
+		if (ifibss->privacy)
+			capability |= WLAN_CAPABILITY_PRIVACY;
+
+		cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
+					ifibss->bssid, ifibss->ssid,
+					ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+					WLAN_CAPABILITY_PRIVACY,
+					capability);
+
+		if (cbss) {
+			cfg80211_unlink_bss(local->hw.wiphy, cbss);
+			cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+		}
+	}
+
+	ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
+
+	sta_info_flush(sdata);
+
+	spin_lock_bh(&ifibss->incomplete_lock);
+	while (!list_empty(&ifibss->incomplete_stations)) {
+		sta = list_first_entry(&ifibss->incomplete_stations,
+				       struct sta_info, list);
+		list_del(&sta->list);
+		spin_unlock_bh(&ifibss->incomplete_lock);
+
+		sta_info_free(local, sta);
+		spin_lock_bh(&ifibss->incomplete_lock);
+	}
+	spin_unlock_bh(&ifibss->incomplete_lock);
+
+	netif_carrier_off(sdata->dev);
+
+	sdata->vif.bss_conf.ibss_joined = false;
+	sdata->vif.bss_conf.ibss_creator = false;
+	sdata->vif.bss_conf.enable_beacon = false;
+	sdata->vif.bss_conf.ssid_len = 0;
+
+	/* remove beacon */
+	presp = rcu_dereference_protected(ifibss->presp,
+					  lockdep_is_held(&sdata->wdev.mtx));
+	RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
+	if (presp)
+		kfree_rcu(presp, rcu_head);
+
+	clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
+	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
+						BSS_CHANGED_IBSS);
+	drv_leave_ibss(local, sdata);
+	ieee80211_vif_release_channel(sdata);
+}
+
+static void ieee80211_csa_connection_drop_work(struct work_struct *work)
+{
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data,
+			     u.ibss.csa_connection_drop_work);
+
+	ieee80211_ibss_disconnect(sdata);
+	synchronize_rcu();
+	skb_queue_purge(&sdata->skb_queue);
+
+	/* trigger a scan to find another IBSS network to join */
+	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+}
+
+static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	int err;
+
+	/* if the current channel is a DFS channel, mark the channel as
+	 * unavailable.
+	 */
+	err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
+					    &ifibss->chandef);
+	if (err > 0)
+		cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef,
+				     GFP_ATOMIC);
+}
+
+static bool
+ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
+				  struct ieee802_11_elems *elems,
+				  bool beacon)
+{
+	struct cfg80211_csa_settings params;
+	struct ieee80211_csa_ie csa_ie;
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_chanctx *chanctx;
+	enum nl80211_channel_type ch_type;
+	int err, num_chanctx;
+	u32 sta_flags;
+
+	if (sdata->vif.csa_active)
+		return true;
+
+	if (!sdata->vif.bss_conf.ibss_joined)
+		return false;
+
+	sta_flags = IEEE80211_STA_DISABLE_VHT;
+	switch (ifibss->chandef.width) {
+	case NL80211_CHAN_WIDTH_5:
+	case NL80211_CHAN_WIDTH_10:
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		sta_flags |= IEEE80211_STA_DISABLE_HT;
+		/* fall through */
+	case NL80211_CHAN_WIDTH_20:
+		sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+		break;
+	default:
+		break;
+	}
+
+	memset(&params, 0, sizeof(params));
+	memset(&csa_ie, 0, sizeof(csa_ie));
+	err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon,
+					   ifibss->chandef.chan->band,
+					   sta_flags, ifibss->bssid, &csa_ie);
+	/* can't switch to destination channel, fail */
+	if (err < 0)
+		goto disconnect;
+
+	/* did not contain a CSA */
+	if (err)
+		return false;
+
+	params.count = csa_ie.count;
+	params.chandef = csa_ie.chandef;
+
+	if (ifibss->chandef.chan->band != params.chandef.chan->band)
+		goto disconnect;
+
+	switch (ifibss->chandef.width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+	case NL80211_CHAN_WIDTH_40:
+		/* keep our current HT mode (HT20/HT40+/HT40-), even if
+		 * another mode  has been announced. The mode is not adopted
+		 * within the beacon while doing CSA and we should therefore
+		 * keep the mode which we announce.
+		 */
+		ch_type = cfg80211_get_chandef_type(&ifibss->chandef);
+		cfg80211_chandef_create(&params.chandef, params.chandef.chan,
+					ch_type);
+		break;
+	case NL80211_CHAN_WIDTH_5:
+	case NL80211_CHAN_WIDTH_10:
+		if (params.chandef.width != ifibss->chandef.width) {
+			sdata_info(sdata,
+				   "IBSS %pM received channel switch from incompatible channel width (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+				   ifibss->bssid,
+				   params.chandef.chan->center_freq,
+				   params.chandef.width,
+				   params.chandef.center_freq1,
+				   params.chandef.center_freq2);
+			goto disconnect;
+		}
+		break;
+	default:
+		/* should not happen, sta_flags should prevent VHT modes. */
+		WARN_ON(1);
+		goto disconnect;
+	}
+
+	if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef)) {
+		sdata_info(sdata,
+			   "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+			   ifibss->bssid,
+			   params.chandef.chan->center_freq,
+			   params.chandef.width,
+			   params.chandef.center_freq1,
+			   params.chandef.center_freq2);
+		goto disconnect;
+	}
+
+	err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
+					    &params.chandef);
+	if (err < 0)
+		goto disconnect;
+	if (err) {
+		/* IBSS-DFS only allowed with a control program */
+		if (!ifibss->userspace_handles_dfs)
+			goto disconnect;
+
+		params.radar_required = true;
+	}
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (!chanctx_conf) {
+		rcu_read_unlock();
+		goto disconnect;
+	}
+
+	/* don't handle for multi-VIF cases */
+	chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+	if (chanctx->refcount > 1) {
+		rcu_read_unlock();
+		goto disconnect;
+	}
+	num_chanctx = 0;
+	list_for_each_entry_rcu(chanctx, &sdata->local->chanctx_list, list)
+		num_chanctx++;
+
+	if (num_chanctx > 1) {
+		rcu_read_unlock();
+		goto disconnect;
+	}
+	rcu_read_unlock();
+
+	/* all checks done, now perform the channel switch. */
+	ibss_dbg(sdata,
+		 "received channel switch announcement to go to channel %d MHz\n",
+		 params.chandef.chan->center_freq);
+
+	params.block_tx = !!csa_ie.mode;
+
+	ieee80211_ibss_csa_beacon(sdata, &params);
+	sdata->csa_radar_required = params.radar_required;
+
+	if (params.block_tx)
+		ieee80211_stop_queues_by_reason(&sdata->local->hw,
+				IEEE80211_MAX_QUEUE_MAP,
+				IEEE80211_QUEUE_STOP_REASON_CSA);
+
+	sdata->local->csa_chandef = params.chandef;
+	sdata->vif.csa_active = true;
+
+	ieee80211_bss_info_change_notify(sdata, err);
+	drv_channel_switch_beacon(sdata, &params.chandef);
+
+	ieee80211_ibss_csa_mark_radar(sdata);
+
+	return true;
+disconnect:
+	ibss_dbg(sdata, "Can't handle channel switch, disconnect\n");
+	ieee80211_queue_work(&sdata->local->hw,
+			     &ifibss->csa_connection_drop_work);
+
+	ieee80211_ibss_csa_mark_radar(sdata);
+
+	return true;
+}
+
+static void
+ieee80211_rx_mgmt_spectrum_mgmt(struct ieee80211_sub_if_data *sdata,
+				struct ieee80211_mgmt *mgmt, size_t len,
+				struct ieee80211_rx_status *rx_status,
+				struct ieee802_11_elems *elems)
+{
+	int required_len;
+
+	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
+		return;
+
+	/* CSA is the only action we handle for now */
+	if (mgmt->u.action.u.measurement.action_code !=
+	    WLAN_ACTION_SPCT_CHL_SWITCH)
+		return;
+
+	required_len = IEEE80211_MIN_ACTION_SIZE +
+		       sizeof(mgmt->u.action.u.chan_switch);
+	if (len < required_len)
+		return;
+
+	ieee80211_ibss_process_chanswitch(sdata, elems, false);
+}
+
 static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
 					  struct ieee80211_mgmt *mgmt,
 					  size_t len)
@@ -661,10 +1127,6 @@
 
 	/* check if we need to merge IBSS */
 
-	/* we use a fixed BSSID */
-	if (sdata->u.ibss.fixed_bssid)
-		goto put_bss;
-
 	/* not an IBSS */
 	if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
 		goto put_bss;
@@ -680,10 +1142,18 @@
 				sdata->u.ibss.ssid_len))
 		goto put_bss;
 
+	/* process channel switch */
+	if (ieee80211_ibss_process_chanswitch(sdata, elems, true))
+		goto put_bss;
+
 	/* same BSSID */
 	if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
 		goto put_bss;
 
+	/* we use a fixed BSSID */
+	if (sdata->u.ibss.fixed_bssid)
+		goto put_bss;
+
 	if (ieee80211_have_rx_timestamp(rx_status)) {
 		/* time when timestamp field was received */
 		rx_timestamp =
@@ -775,30 +1245,6 @@
 	ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
-static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
-{
-	struct ieee80211_local *local = sdata->local;
-	int active = 0;
-	struct sta_info *sta;
-
-	sdata_assert_lock(sdata);
-
-	rcu_read_lock();
-
-	list_for_each_entry_rcu(sta, &local->sta_list, list) {
-		if (sta->sdata == sdata &&
-		    time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
-			       jiffies)) {
-			active++;
-			break;
-		}
-	}
-
-	rcu_read_unlock();
-
-	return active;
-}
-
 static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
@@ -1076,6 +1522,8 @@
 	struct ieee80211_rx_status *rx_status;
 	struct ieee80211_mgmt *mgmt;
 	u16 fc;
+	struct ieee802_11_elems elems;
+	int ies_len;
 
 	rx_status = IEEE80211_SKB_RXCB(skb);
 	mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -1101,6 +1549,27 @@
 	case IEEE80211_STYPE_DEAUTH:
 		ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len);
 		break;
+	case IEEE80211_STYPE_ACTION:
+		switch (mgmt->u.action.category) {
+		case WLAN_CATEGORY_SPECTRUM_MGMT:
+			ies_len = skb->len -
+				  offsetof(struct ieee80211_mgmt,
+					   u.action.u.chan_switch.variable);
+
+			if (ies_len < 0)
+				break;
+
+			ieee802_11_parse_elems(
+				mgmt->u.action.u.chan_switch.variable,
+				ies_len, true, &elems);
+
+			if (elems.parse_error)
+				break;
+
+			ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, skb->len,
+							rx_status, &elems);
+			break;
+		}
 	}
 
  mgmt_out:
@@ -1167,6 +1636,8 @@
 		    (unsigned long) sdata);
 	INIT_LIST_HEAD(&ifibss->incomplete_stations);
 	spin_lock_init(&ifibss->incomplete_lock);
+	INIT_WORK(&ifibss->csa_connection_drop_work,
+		  ieee80211_csa_connection_drop_work);
 }
 
 /* scan finished notification */
@@ -1202,6 +1673,7 @@
 
 	sdata->u.ibss.privacy = params->privacy;
 	sdata->u.ibss.control_port = params->control_port;
+	sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs;
 	sdata->u.ibss.basic_rates = params->basic_rates;
 
 	/* fix basic_rates if channel does not support these rates */
@@ -1265,73 +1737,19 @@
 int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
-	struct ieee80211_local *local = sdata->local;
-	struct cfg80211_bss *cbss;
-	u16 capability;
-	int active_ibss;
-	struct sta_info *sta;
-	struct beacon_data *presp;
 
-	active_ibss = ieee80211_sta_active_ibss(sdata);
-
-	if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
-		capability = WLAN_CAPABILITY_IBSS;
-
-		if (ifibss->privacy)
-			capability |= WLAN_CAPABILITY_PRIVACY;
-
-		cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
-					ifibss->bssid, ifibss->ssid,
-					ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
-					WLAN_CAPABILITY_PRIVACY,
-					capability);
-
-		if (cbss) {
-			cfg80211_unlink_bss(local->hw.wiphy, cbss);
-			cfg80211_put_bss(local->hw.wiphy, cbss);
-		}
-	}
-
-	ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
-	memset(ifibss->bssid, 0, ETH_ALEN);
+	ieee80211_ibss_disconnect(sdata);
 	ifibss->ssid_len = 0;
-
-	sta_info_flush(sdata);
-
-	spin_lock_bh(&ifibss->incomplete_lock);
-	while (!list_empty(&ifibss->incomplete_stations)) {
-		sta = list_first_entry(&ifibss->incomplete_stations,
-				       struct sta_info, list);
-		list_del(&sta->list);
-		spin_unlock_bh(&ifibss->incomplete_lock);
-
-		sta_info_free(local, sta);
-		spin_lock_bh(&ifibss->incomplete_lock);
-	}
-	spin_unlock_bh(&ifibss->incomplete_lock);
-
-	netif_carrier_off(sdata->dev);
+	memset(ifibss->bssid, 0, ETH_ALEN);
 
 	/* remove beacon */
 	kfree(sdata->u.ibss.ie);
-	presp = rcu_dereference_protected(ifibss->presp,
-					  lockdep_is_held(&sdata->wdev.mtx));
-	RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
 
 	/* on the next join, re-program HT parameters */
 	memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
 	memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask));
 
-	sdata->vif.bss_conf.ibss_joined = false;
-	sdata->vif.bss_conf.ibss_creator = false;
-	sdata->vif.bss_conf.enable_beacon = false;
-	sdata->vif.bss_conf.ssid_len = 0;
-	clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
-	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
-						BSS_CHANGED_IBSS);
-	ieee80211_vif_release_channel(sdata);
 	synchronize_rcu();
-	kfree(presp);
 
 	skb_queue_purge(&sdata->skb_queue);
 
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 611abfc..29dc505 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -262,6 +262,10 @@
 
 	struct ps_data ps;
 	atomic_t num_mcast_sta; /* number of stations receiving multicast */
+	enum ieee80211_smps_mode req_smps, /* requested smps mode */
+			 driver_smps_mode; /* smps mode request */
+
+	struct work_struct request_smps_work;
 };
 
 struct ieee80211_if_wds {
@@ -322,7 +326,6 @@
 
 /* flags used in struct ieee80211_if_managed.flags */
 enum ieee80211_sta_flags {
-	IEEE80211_STA_BEACON_POLL	= BIT(0),
 	IEEE80211_STA_CONNECTION_POLL	= BIT(1),
 	IEEE80211_STA_CONTROL_PORT	= BIT(2),
 	IEEE80211_STA_DISABLE_HT	= BIT(4),
@@ -335,6 +338,7 @@
 	IEEE80211_STA_DISABLE_VHT	= BIT(11),
 	IEEE80211_STA_DISABLE_80P80MHZ	= BIT(12),
 	IEEE80211_STA_DISABLE_160MHZ	= BIT(13),
+	IEEE80211_STA_DISABLE_WMM	= BIT(14),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -487,6 +491,7 @@
 
 struct ieee80211_if_ibss {
 	struct timer_list timer;
+	struct work_struct csa_connection_drop_work;
 
 	unsigned long last_scan_completed;
 
@@ -497,6 +502,7 @@
 	bool privacy;
 
 	bool control_port;
+	bool userspace_handles_dfs;
 
 	u8 bssid[ETH_ALEN] __aligned(2);
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -538,6 +544,11 @@
 	/* add other framework functions here */
 };
 
+struct mesh_csa_settings {
+	struct rcu_head rcu_head;
+	struct cfg80211_csa_settings settings;
+};
+
 struct ieee80211_if_mesh {
 	struct timer_list housekeeping_timer;
 	struct timer_list mesh_path_timer;
@@ -598,6 +609,11 @@
 	int ps_peers_light_sleep;
 	int ps_peers_deep_sleep;
 	struct ps_data ps;
+	/* Channel Switching Support */
+	struct mesh_csa_settings __rcu *csa;
+	bool chsw_init;
+	u8 chsw_ttl;
+	u16 pre_value;
 };
 
 #ifdef CONFIG_MAC80211_MESH
@@ -1206,6 +1222,14 @@
 	u16 tid;
 };
 
+/* this struct holds the value parsing from channel switch IE  */
+struct ieee80211_csa_ie {
+	struct cfg80211_chan_def chandef;
+	u8 mode;
+	u8 count;
+	u8 ttl;
+};
+
 /* Parsed Information Elements */
 struct ieee802_11_elems {
 	const u8 *ie_start;
@@ -1242,6 +1266,7 @@
 	const struct ieee80211_timeout_interval_ie *timeout_int;
 	const u8 *opmode_notif;
 	const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
+	const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
 
 	/* length of them, respectively */
 	u8 ssid_len;
@@ -1333,11 +1358,19 @@
 void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
 void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 				   struct sk_buff *skb);
+int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
+			      struct cfg80211_csa_settings *csa_settings);
+int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata);
 
 /* mesh code */
 void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 				   struct sk_buff *skb);
+int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
+			      struct cfg80211_csa_settings *csa_settings,
+			      bool csa_action);
+int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
 
 /* scan/BSS handling */
 void ieee80211_scan_work(struct work_struct *work);
@@ -1434,7 +1467,10 @@
 int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
 			       enum ieee80211_smps_mode smps, const u8 *da,
 			       const u8 *bssid);
-void ieee80211_request_smps_work(struct work_struct *work);
+void ieee80211_request_smps_ap_work(struct work_struct *work);
+void ieee80211_request_smps_mgd_work(struct work_struct *work);
+bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old,
+				   enum ieee80211_smps_mode smps_mode_new);
 
 void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				     u16 initiator, u16 reason, bool stop);
@@ -1484,6 +1520,28 @@
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
 				       struct ieee80211_mgmt *mgmt,
 				       size_t len);
+/**
+ * ieee80211_parse_ch_switch_ie - parses channel switch IEs
+ * @sdata: the sdata of the interface which has received the frame
+ * @elems: parsed 802.11 elements received with the frame
+ * @beacon: indicates if the frame was a beacon or probe response
+ * @current_band: indicates the current band
+ * @sta_flags: contains information about own capabilities and restrictions
+ *	to decide which channel switch announcements can be accepted. Only the
+ *	following subset of &enum ieee80211_sta_flags are evaluated:
+ *	%IEEE80211_STA_DISABLE_HT, %IEEE80211_STA_DISABLE_VHT,
+ *	%IEEE80211_STA_DISABLE_40MHZ, %IEEE80211_STA_DISABLE_80P80MHZ,
+ *	%IEEE80211_STA_DISABLE_160MHZ.
+ * @bssid: the currently connected bssid (for reporting)
+ * @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl.
+	All of them will be filled with if success only.
+ * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
+ */
+int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
+				 struct ieee802_11_elems *elems, bool beacon,
+				 enum ieee80211_band current_band,
+				 u32 sta_flags, u8 *bssid,
+				 struct ieee80211_csa_ie *csa_ie);
 
 /* Suspend/resume and hw reconfiguration */
 int ieee80211_reconfig(struct ieee80211_local *local);
@@ -1629,8 +1687,10 @@
 u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
 			    struct ieee802_11_elems *elems,
 			    enum ieee80211_band band, u32 *basic_rates);
-int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
-			     enum ieee80211_smps_mode smps_mode);
+int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
+				 enum ieee80211_smps_mode smps_mode);
+int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
+				enum ieee80211_smps_mode smps_mode);
 void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata);
 
 size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
@@ -1657,6 +1717,7 @@
 void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
 				  const struct ieee80211_ht_operation *ht_oper,
 				  struct cfg80211_chan_def *chandef);
+u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
 
 int __must_check
 ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
@@ -1685,6 +1746,8 @@
 void ieee80211_dfs_cac_timer_work(struct work_struct *work);
 void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
 void ieee80211_dfs_radar_detected_work(struct work_struct *work);
+int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+			      struct cfg80211_csa_settings *csa_settings);
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index fcecd63..ff101ea 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -766,6 +766,10 @@
 	if (sdata->vif.type == NL80211_IFTYPE_STATION)
 		ieee80211_mgd_stop(sdata);
 
+	if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+		ieee80211_ibss_stop(sdata);
+
+
 	/*
 	 * Remove all stations associated with this interface.
 	 *
@@ -1289,7 +1293,10 @@
 	case NL80211_IFTYPE_AP:
 		skb_queue_head_init(&sdata->u.ap.ps.bc_buf);
 		INIT_LIST_HEAD(&sdata->u.ap.vlans);
+		INIT_WORK(&sdata->u.ap.request_smps_work,
+			  ieee80211_request_smps_ap_work);
 		sdata->vif.bss_conf.bssid = sdata->vif.addr;
+		sdata->u.ap.req_smps = IEEE80211_SMPS_OFF;
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
 		type = NL80211_IFTYPE_STATION;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 620677e..3e51dd7 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -879,7 +879,7 @@
 				  keyconf->keylen, keyconf->key,
 				  0, NULL);
 	if (IS_ERR(key))
-		return ERR_PTR(PTR_ERR(key));
+		return ERR_CAST(key);
 
 	if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
 		key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 036d57e..aaae0ed 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -83,7 +83,7 @@
 			 * Management frames.
 			 */
 			u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_CCMP_PN_LEN];
-			struct crypto_cipher *tfm;
+			struct crypto_aead *tfm;
 			u32 replays; /* dot11RSNAStatsCCMPReplays */
 		} ccmp;
 		struct {
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 707ac61..896fe3b 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -12,6 +12,7 @@
 #include <asm/unaligned.h>
 #include "ieee80211_i.h"
 #include "mesh.h"
+#include "driver-ops.h"
 
 static int mesh_allocated;
 static struct kmem_cache *rm_cache;
@@ -610,6 +611,7 @@
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
 	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct mesh_csa_settings *csa;
 	enum ieee80211_band band;
 	u8 *pos;
 	struct ieee80211_sub_if_data *sdata;
@@ -624,6 +626,10 @@
 
 	head_len = hdr_len +
 		   2 + /* NULL SSID */
+		   /* Channel Switch Announcement */
+		   2 + sizeof(struct ieee80211_channel_sw_ie) +
+		   /* Mesh Channel Swith Parameters */
+		   2 + sizeof(struct ieee80211_mesh_chansw_params_ie) +
 		   2 + 8 + /* supported rates */
 		   2 + 3; /* DS params */
 	tail_len = 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
@@ -665,6 +671,38 @@
 	*pos++ = WLAN_EID_SSID;
 	*pos++ = 0x0;
 
+	rcu_read_lock();
+	csa = rcu_dereference(ifmsh->csa);
+	if (csa) {
+		__le16 pre_value;
+
+		pos = skb_put(skb, 13);
+		memset(pos, 0, 13);
+		*pos++ = WLAN_EID_CHANNEL_SWITCH;
+		*pos++ = 3;
+		*pos++ = 0x0;
+		*pos++ = ieee80211_frequency_to_channel(
+				csa->settings.chandef.chan->center_freq);
+		sdata->csa_counter_offset_beacon = hdr_len + 6;
+		*pos++ = csa->settings.count;
+		*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
+		*pos++ = 6;
+		if (ifmsh->chsw_init) {
+			*pos++ = ifmsh->mshcfg.dot11MeshTTL;
+			*pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
+		} else {
+			*pos++ = ifmsh->chsw_ttl;
+		}
+		*pos++ |= csa->settings.block_tx ?
+			  WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00;
+		put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos);
+		pos += 2;
+		pre_value = cpu_to_le16(ifmsh->pre_value);
+		memcpy(pos, &pre_value, 2);
+		pos += 2;
+	}
+	rcu_read_unlock();
+
 	if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
 	    mesh_add_ds_params_ie(sdata, skb))
 		goto out_free;
@@ -812,6 +850,127 @@
 	ieee80211_configure_filter(local);
 }
 
+static bool
+ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
+				 struct ieee802_11_elems *elems, bool beacon)
+{
+	struct cfg80211_csa_settings params;
+	struct ieee80211_csa_ie csa_ie;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_chanctx *chanctx;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	int err, num_chanctx;
+	u32 sta_flags;
+
+	if (sdata->vif.csa_active)
+		return true;
+
+	if (!ifmsh->mesh_id)
+		return false;
+
+	sta_flags = IEEE80211_STA_DISABLE_VHT;
+	switch (sdata->vif.bss_conf.chandef.width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		sta_flags |= IEEE80211_STA_DISABLE_HT;
+	case NL80211_CHAN_WIDTH_20:
+		sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+		break;
+	default:
+		break;
+	}
+
+	memset(&params, 0, sizeof(params));
+	memset(&csa_ie, 0, sizeof(csa_ie));
+	err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band,
+					   sta_flags, sdata->vif.addr,
+					   &csa_ie);
+	if (err < 0)
+		return false;
+	if (err)
+		return false;
+
+	params.chandef = csa_ie.chandef;
+	params.count = csa_ie.count;
+
+	if (sdata->vif.bss_conf.chandef.chan->band !=
+	    params.chandef.chan->band)
+		return false;
+
+	if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
+				     IEEE80211_CHAN_DISABLED)) {
+		sdata_info(sdata,
+			   "mesh STA %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n",
+			   sdata->vif.addr,
+			   params.chandef.chan->center_freq,
+			   params.chandef.width,
+			   params.chandef.center_freq1,
+			   params.chandef.center_freq2);
+		return false;
+	}
+
+	err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
+					    &params.chandef);
+	if (err < 0)
+		return false;
+	if (err) {
+		params.radar_required = true;
+		/* TODO: DFS not (yet) supported */
+		return false;
+	}
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (!chanctx_conf)
+		goto failed_chswitch;
+
+	/* don't handle for multi-VIF cases */
+	chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+	if (chanctx->refcount > 1)
+		goto failed_chswitch;
+
+	num_chanctx = 0;
+	list_for_each_entry_rcu(chanctx, &sdata->local->chanctx_list, list)
+		num_chanctx++;
+
+	if (num_chanctx > 1)
+		goto failed_chswitch;
+
+	rcu_read_unlock();
+
+	mcsa_dbg(sdata,
+		 "received channel switch announcement to go to channel %d MHz\n",
+		 params.chandef.chan->center_freq);
+
+	params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT;
+	if (beacon)
+		ifmsh->chsw_ttl = csa_ie.ttl - 1;
+	else
+		ifmsh->chsw_ttl = 0;
+
+	if (ifmsh->chsw_ttl > 0)
+		if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0)
+			return false;
+
+	sdata->csa_radar_required = params.radar_required;
+
+	if (params.block_tx)
+		ieee80211_stop_queues_by_reason(&sdata->local->hw,
+				IEEE80211_MAX_QUEUE_MAP,
+				IEEE80211_QUEUE_STOP_REASON_CSA);
+
+	sdata->local->csa_chandef = params.chandef;
+	sdata->vif.csa_active = true;
+
+	ieee80211_bss_info_change_notify(sdata, err);
+	drv_channel_switch_beacon(sdata, &params.chandef);
+
+	return true;
+failed_chswitch:
+	rcu_read_unlock();
+	return false;
+}
+
 static void
 ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
 			    struct ieee80211_mgmt *mgmt, size_t len)
@@ -918,6 +1077,142 @@
 	if (ifmsh->sync_ops)
 		ifmsh->sync_ops->rx_bcn_presp(sdata,
 			stype, mgmt, &elems, rx_status);
+
+	if (!ifmsh->chsw_init)
+		ieee80211_mesh_process_chnswitch(sdata, &elems, true);
+}
+
+int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct mesh_csa_settings *tmp_csa_settings;
+	int ret = 0;
+
+	/* Reset the TTL value and Initiator flag */
+	ifmsh->chsw_init = false;
+	ifmsh->chsw_ttl = 0;
+
+	/* Remove the CSA and MCSP elements from the beacon */
+	tmp_csa_settings = rcu_dereference(ifmsh->csa);
+	rcu_assign_pointer(ifmsh->csa, NULL);
+	kfree_rcu(tmp_csa_settings, rcu_head);
+	ret = ieee80211_mesh_rebuild_beacon(sdata);
+	if (ret)
+		return -EINVAL;
+
+	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+
+	mcsa_dbg(sdata, "complete switching to center freq %d MHz",
+		 sdata->vif.bss_conf.chandef.chan->center_freq);
+	return 0;
+}
+
+int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
+			      struct cfg80211_csa_settings *csa_settings,
+			      bool csa_action)
+{
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct mesh_csa_settings *tmp_csa_settings;
+	int ret = 0;
+
+	tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
+				   GFP_ATOMIC);
+	if (!tmp_csa_settings)
+		return -ENOMEM;
+
+	memcpy(&tmp_csa_settings->settings, csa_settings,
+	       sizeof(struct cfg80211_csa_settings));
+
+	rcu_assign_pointer(ifmsh->csa, tmp_csa_settings);
+
+	ret = ieee80211_mesh_rebuild_beacon(sdata);
+	if (ret) {
+		tmp_csa_settings = rcu_dereference(ifmsh->csa);
+		rcu_assign_pointer(ifmsh->csa, NULL);
+		kfree_rcu(tmp_csa_settings, rcu_head);
+		return ret;
+	}
+
+	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+
+	if (csa_action)
+		ieee80211_send_action_csa(sdata, csa_settings);
+
+	return 0;
+}
+
+static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
+			       struct ieee80211_mgmt *mgmt, size_t len)
+{
+	struct ieee80211_mgmt *mgmt_fwd;
+	struct sk_buff *skb;
+	struct ieee80211_local *local = sdata->local;
+	u8 *pos = mgmt->u.action.u.chan_switch.variable;
+	size_t offset_ttl;
+
+	skb = dev_alloc_skb(local->tx_headroom + len);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, local->tx_headroom);
+	mgmt_fwd = (struct ieee80211_mgmt *) skb_put(skb, len);
+
+	/* offset_ttl is based on whether the secondary channel
+	 * offset is available or not. Substract 1 from the mesh TTL
+	 * and disable the initiator flag before forwarding.
+	 */
+	offset_ttl = (len < 42) ? 7 : 10;
+	*(pos + offset_ttl) -= 1;
+	*(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
+	sdata->u.mesh.chsw_ttl = *(pos + offset_ttl);
+
+	memcpy(mgmt_fwd, mgmt, len);
+	eth_broadcast_addr(mgmt_fwd->da);
+	memcpy(mgmt_fwd->sa, sdata->vif.addr, ETH_ALEN);
+	memcpy(mgmt_fwd->bssid, sdata->vif.addr, ETH_ALEN);
+
+	ieee80211_tx_skb(sdata, skb);
+	return 0;
+}
+
+static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
+			      struct ieee80211_mgmt *mgmt, size_t len)
+{
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct ieee802_11_elems elems;
+	u16 pre_value;
+	bool fwd_csa = true;
+	size_t baselen;
+	u8 *pos, ttl;
+
+	if (mgmt->u.action.u.measurement.action_code !=
+	    WLAN_ACTION_SPCT_CHL_SWITCH)
+		return;
+
+	pos = mgmt->u.action.u.chan_switch.variable;
+	baselen = offsetof(struct ieee80211_mgmt,
+			   u.action.u.chan_switch.variable);
+	ieee802_11_parse_elems(pos, len - baselen, false, &elems);
+
+	ttl = elems.mesh_chansw_params_ie->mesh_ttl;
+	if (!--ttl)
+		fwd_csa = false;
+
+	pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value);
+	if (ifmsh->pre_value >= pre_value)
+		return;
+
+	ifmsh->pre_value = pre_value;
+
+	if (!ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
+		mcsa_dbg(sdata, "Failed to process CSA action frame");
+		return;
+	}
+
+	/* forward or re-broadcast the CSA frame */
+	if (fwd_csa) {
+		if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0)
+			mcsa_dbg(sdata, "Failed to forward the CSA frame");
+	}
 }
 
 static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
@@ -939,6 +1234,9 @@
 		if (mesh_action_is_path_sel(mgmt))
 			mesh_rx_path_sel_frame(sdata, mgmt, len);
 		break;
+	case WLAN_CATEGORY_SPECTRUM_MGMT:
+		mesh_rx_csa_frame(sdata, mgmt, len);
+		break;
 	}
 }
 
@@ -1056,13 +1354,11 @@
 		    (unsigned long) sdata);
 
 	ifmsh->accepting_plinks = true;
-	ifmsh->preq_id = 0;
-	ifmsh->sn = 0;
-	ifmsh->num_gates = 0;
 	atomic_set(&ifmsh->mpaths, 0);
 	mesh_rmc_init(sdata);
 	ifmsh->last_preq = jiffies;
 	ifmsh->next_perr = jiffies;
+	ifmsh->chsw_init = false;
 	/* Allocate all mesh structures when creating the first mesh interface. */
 	if (!mesh_allocated)
 		ieee80211s_init();
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 6b65d50..4301aa5 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -222,7 +222,8 @@
 	mesh_path_flush_by_nexthop(sta);
 
 	ieee80211_mps_sta_status_update(sta);
-	changed |= ieee80211_mps_local_status_update(sdata);
+	changed |= ieee80211_mps_set_sta_local_pm(sta,
+			NL80211_MESH_POWER_UNKNOWN);
 
 	return changed;
 }
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 22290a9..0f79b78 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -152,6 +152,9 @@
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
+	if (sta->local_pm == pm)
+		return 0;
+
 	mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
 		pm, sta->sta.addr);
 
@@ -245,6 +248,14 @@
 
 	do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
 
+	/* clear the MPSP flags for non-peers or active STA */
+	if (sta->plink_state != NL80211_PLINK_ESTAB) {
+		clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
+		clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
+	} else if (!do_buffer) {
+		clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
+	}
+
 	/* Don't let the same PS state be set twice */
 	if (test_sta_flag(sta, WLAN_STA_PS_STA) == do_buffer)
 		return;
@@ -257,14 +268,6 @@
 	} else {
 		ieee80211_sta_ps_deliver_wakeup(sta);
 	}
-
-	/* clear the MPSP flags for non-peers or active STA */
-	if (sta->plink_state != NL80211_PLINK_ESTAB) {
-		clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
-		clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
-	} else if (!do_buffer) {
-		clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
-	}
 }
 
 static void mps_set_sta_peer_pm(struct sta_info *sta,
@@ -444,8 +447,7 @@
  */
 static void mps_frame_deliver(struct sta_info *sta, int n_frames)
 {
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_local *local = sta->sdata->local;
 	int ac;
 	struct sk_buff_head frames;
 	struct sk_buff *skb;
@@ -558,10 +560,10 @@
 }
 
 /**
- * ieee80211_mps_frame_release - release buffered frames in response to beacon
+ * ieee80211_mps_frame_release - release frames buffered due to mesh power save
  *
  * @sta: mesh STA
- * @elems: beacon IEs
+ * @elems: IEs of beacon or probe response
  *
  * For peers if we have individually-addressed frames buffered or the peer
  * indicates buffered frames, send a corresponding MPSP trigger frame. Since
@@ -588,9 +590,10 @@
 	    (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
 		return;
 
-	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
-		buffer_local += skb_queue_len(&sta->ps_tx_buf[ac]) +
-				skb_queue_len(&sta->tx_filtered[ac]);
+	if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
+		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+			buffer_local += skb_queue_len(&sta->ps_tx_buf[ac]) +
+					skb_queue_len(&sta->tx_filtered[ac]);
 
 	if (!has_buffered && !buffer_local)
 		return;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 86e4ad5..d7504ab 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -145,66 +145,6 @@
 	return (1 << ecw) - 1;
 }
 
-static u32 chandef_downgrade(struct cfg80211_chan_def *c)
-{
-	u32 ret;
-	int tmp;
-
-	switch (c->width) {
-	case NL80211_CHAN_WIDTH_20:
-		c->width = NL80211_CHAN_WIDTH_20_NOHT;
-		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-		break;
-	case NL80211_CHAN_WIDTH_40:
-		c->width = NL80211_CHAN_WIDTH_20;
-		c->center_freq1 = c->chan->center_freq;
-		ret = IEEE80211_STA_DISABLE_40MHZ |
-		      IEEE80211_STA_DISABLE_VHT;
-		break;
-	case NL80211_CHAN_WIDTH_80:
-		tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
-		/* n_P40 */
-		tmp /= 2;
-		/* freq_P40 */
-		c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
-		c->width = NL80211_CHAN_WIDTH_40;
-		ret = IEEE80211_STA_DISABLE_VHT;
-		break;
-	case NL80211_CHAN_WIDTH_80P80:
-		c->center_freq2 = 0;
-		c->width = NL80211_CHAN_WIDTH_80;
-		ret = IEEE80211_STA_DISABLE_80P80MHZ |
-		      IEEE80211_STA_DISABLE_160MHZ;
-		break;
-	case NL80211_CHAN_WIDTH_160:
-		/* n_P20 */
-		tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
-		/* n_P80 */
-		tmp /= 4;
-		c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
-		c->width = NL80211_CHAN_WIDTH_80;
-		ret = IEEE80211_STA_DISABLE_80P80MHZ |
-		      IEEE80211_STA_DISABLE_160MHZ;
-		break;
-	default:
-	case NL80211_CHAN_WIDTH_20_NOHT:
-		WARN_ON_ONCE(1);
-		c->width = NL80211_CHAN_WIDTH_20_NOHT;
-		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-		break;
-	case NL80211_CHAN_WIDTH_5:
-	case NL80211_CHAN_WIDTH_10:
-		WARN_ON_ONCE(1);
-		/* keep c->width */
-		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-		break;
-	}
-
-	WARN_ON_ONCE(!cfg80211_chandef_valid(c));
-
-	return ret;
-}
-
 static u32
 ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
 			     struct ieee80211_supported_band *sband,
@@ -352,7 +292,7 @@
 			break;
 		}
 
-		ret |= chandef_downgrade(chandef);
+		ret |= ieee80211_chandef_downgrade(chandef);
 	}
 
 	if (chandef->width != vht_chandef.width && !tracking)
@@ -406,13 +346,13 @@
 	 */
 	if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
 	    chandef.width == NL80211_CHAN_WIDTH_80P80)
-		flags |= chandef_downgrade(&chandef);
+		flags |= ieee80211_chandef_downgrade(&chandef);
 	if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
 	    chandef.width == NL80211_CHAN_WIDTH_160)
-		flags |= chandef_downgrade(&chandef);
+		flags |= ieee80211_chandef_downgrade(&chandef);
 	if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
 	    chandef.width > NL80211_CHAN_WIDTH_20)
-		flags |= chandef_downgrade(&chandef);
+		flags |= ieee80211_chandef_downgrade(&chandef);
 
 	if (cfg80211_chandef_identical(&chandef, &sdata->vif.bss_conf.chandef))
 		return 0;
@@ -893,8 +833,7 @@
 	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
 
-	if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-			    IEEE80211_STA_CONNECTION_POLL))
+	if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
 
 	ieee80211_tx_skb(sdata, skb);
@@ -937,6 +876,8 @@
 		container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	u32 changed = 0;
+	int ret;
 
 	if (!ieee80211_sdata_running(sdata))
 		return;
@@ -945,24 +886,39 @@
 	if (!ifmgd->associated)
 		goto out;
 
-	local->_oper_chandef = local->csa_chandef;
+	ret = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
+					   &changed);
+	if (ret) {
+		sdata_info(sdata,
+			   "vif channel switch failed, disconnecting\n");
+		ieee80211_queue_work(&sdata->local->hw,
+				     &ifmgd->csa_connection_drop_work);
+		goto out;
+	}
 
-	if (!local->ops->channel_switch) {
-		/* call "hw_config" only if doing sw channel switch */
-		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-	} else {
-		/* update the device channel directly */
-		local->hw.conf.chandef = local->_oper_chandef;
+	if (!local->use_chanctx) {
+		local->_oper_chandef = local->csa_chandef;
+		/* Call "hw_config" only if doing sw channel switch.
+		 * Otherwise update the channel directly
+		 */
+		if (!local->ops->channel_switch)
+			ieee80211_hw_config(local, 0);
+		else
+			local->hw.conf.chandef = local->_oper_chandef;
 	}
 
 	/* XXX: shouldn't really modify cfg80211-owned data! */
-	ifmgd->associated->channel = local->_oper_chandef.chan;
+	ifmgd->associated->channel = local->csa_chandef.chan;
 
 	/* XXX: wait for a beacon first? */
 	ieee80211_wake_queues_by_reason(&local->hw,
 					IEEE80211_MAX_QUEUE_MAP,
 					IEEE80211_QUEUE_STOP_REASON_CSA);
+
+	ieee80211_bss_info_change_notify(sdata, changed);
+
  out:
+	sdata->vif.csa_active = false;
 	ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
 	sdata_unlock(sdata);
 }
@@ -1000,20 +956,10 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct cfg80211_bss *cbss = ifmgd->associated;
-	struct ieee80211_bss *bss;
 	struct ieee80211_chanctx *chanctx;
-	enum ieee80211_band new_band;
-	int new_freq;
-	u8 new_chan_no;
-	u8 count;
-	u8 mode;
-	struct ieee80211_channel *new_chan;
-	struct cfg80211_chan_def new_chandef = {};
-	struct cfg80211_chan_def new_vht_chandef = {};
-	const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
-	const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
-	const struct ieee80211_ht_operation *ht_oper;
-	int secondary_channel_offset = -1;
+	enum ieee80211_band current_band;
+	struct ieee80211_csa_ie csa_ie;
+	int res;
 
 	sdata_assert_lock(sdata);
 
@@ -1027,181 +973,53 @@
 	if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
 		return;
 
-	sec_chan_offs = elems->sec_chan_offs;
-	wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
-	ht_oper = elems->ht_operation;
-
-	if (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
-			    IEEE80211_STA_DISABLE_40MHZ)) {
-		sec_chan_offs = NULL;
-		wide_bw_chansw_ie = NULL;
-		/* only used for bandwidth here */
-		ht_oper = NULL;
-	}
-
-	if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
-		wide_bw_chansw_ie = NULL;
-
-	if (elems->ext_chansw_ie) {
-		if (!ieee80211_operating_class_to_band(
-				elems->ext_chansw_ie->new_operating_class,
-				&new_band)) {
-			sdata_info(sdata,
-				   "cannot understand ECSA IE operating class %d, disconnecting\n",
-				   elems->ext_chansw_ie->new_operating_class);
-			ieee80211_queue_work(&local->hw,
-					     &ifmgd->csa_connection_drop_work);
-		}
-		new_chan_no = elems->ext_chansw_ie->new_ch_num;
-		count = elems->ext_chansw_ie->count;
-		mode = elems->ext_chansw_ie->mode;
-	} else if (elems->ch_switch_ie) {
-		new_band = cbss->channel->band;
-		new_chan_no = elems->ch_switch_ie->new_ch_num;
-		count = elems->ch_switch_ie->count;
-		mode = elems->ch_switch_ie->mode;
-	} else {
-		/* nothing here we understand */
-		return;
-	}
-
-	bss = (void *)cbss->priv;
-
-	new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
-	new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
-	if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
-		sdata_info(sdata,
-			   "AP %pM switches to unsupported channel (%d MHz), disconnecting\n",
-			   ifmgd->associated->bssid, new_freq);
+	current_band = cbss->channel->band;
+	memset(&csa_ie, 0, sizeof(csa_ie));
+	res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band,
+					   ifmgd->flags,
+					   ifmgd->associated->bssid, &csa_ie);
+	if (res	< 0)
 		ieee80211_queue_work(&local->hw,
 				     &ifmgd->csa_connection_drop_work);
+	if (res)
 		return;
-	}
 
-	if (!beacon && sec_chan_offs) {
-		secondary_channel_offset = sec_chan_offs->sec_chan_offs;
-	} else if (beacon && ht_oper) {
-		secondary_channel_offset =
-			ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
-	} else if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
-		/*
-		 * If it's not a beacon, HT is enabled and the IE not present,
-		 * it's 20 MHz, 802.11-2012 8.5.2.6:
-		 *	This element [the Secondary Channel Offset Element] is
-		 *	present when switching to a 40 MHz channel. It may be
-		 *	present when switching to a 20 MHz channel (in which
-		 *	case the secondary channel offset is set to SCN).
-		 */
-		secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
-	}
-
-	switch (secondary_channel_offset) {
-	default:
-		/* secondary_channel_offset was present but is invalid */
-	case IEEE80211_HT_PARAM_CHA_SEC_NONE:
-		cfg80211_chandef_create(&new_chandef, new_chan,
-					NL80211_CHAN_HT20);
-		break;
-	case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-		cfg80211_chandef_create(&new_chandef, new_chan,
-					NL80211_CHAN_HT40PLUS);
-		break;
-	case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-		cfg80211_chandef_create(&new_chandef, new_chan,
-					NL80211_CHAN_HT40MINUS);
-		break;
-	case -1:
-		cfg80211_chandef_create(&new_chandef, new_chan,
-					NL80211_CHAN_NO_HT);
-		/* keep width for 5/10 MHz channels */
-		switch (sdata->vif.bss_conf.chandef.width) {
-		case NL80211_CHAN_WIDTH_5:
-		case NL80211_CHAN_WIDTH_10:
-			new_chandef.width = sdata->vif.bss_conf.chandef.width;
-			break;
-		default:
-			break;
-		}
-		break;
-	}
-
-	if (wide_bw_chansw_ie) {
-		new_vht_chandef.chan = new_chan;
-		new_vht_chandef.center_freq1 =
-			ieee80211_channel_to_frequency(
-				wide_bw_chansw_ie->new_center_freq_seg0,
-				new_band);
-
-		switch (wide_bw_chansw_ie->new_channel_width) {
-		default:
-			/* hmmm, ignore VHT and use HT if present */
-		case IEEE80211_VHT_CHANWIDTH_USE_HT:
-			new_vht_chandef.chan = NULL;
-			break;
-		case IEEE80211_VHT_CHANWIDTH_80MHZ:
-			new_vht_chandef.width = NL80211_CHAN_WIDTH_80;
-			break;
-		case IEEE80211_VHT_CHANWIDTH_160MHZ:
-			new_vht_chandef.width = NL80211_CHAN_WIDTH_160;
-			break;
-		case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
-			/* field is otherwise reserved */
-			new_vht_chandef.center_freq2 =
-				ieee80211_channel_to_frequency(
-					wide_bw_chansw_ie->new_center_freq_seg1,
-					new_band);
-			new_vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
-			break;
-		}
-		if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
-		    new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
-			chandef_downgrade(&new_vht_chandef);
-		if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
-		    new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
-			chandef_downgrade(&new_vht_chandef);
-		if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
-		    new_vht_chandef.width > NL80211_CHAN_WIDTH_20)
-			chandef_downgrade(&new_vht_chandef);
-	}
-
-	/* if VHT data is there validate & use it */
-	if (new_vht_chandef.chan) {
-		if (!cfg80211_chandef_compatible(&new_vht_chandef,
-						 &new_chandef)) {
-			sdata_info(sdata,
-				   "AP %pM CSA has inconsistent channel data, disconnecting\n",
-				   ifmgd->associated->bssid);
-			ieee80211_queue_work(&local->hw,
-					     &ifmgd->csa_connection_drop_work);
-			return;
-		}
-		new_chandef = new_vht_chandef;
-	}
-
-	if (!cfg80211_chandef_usable(local->hw.wiphy, &new_chandef,
+	if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
 				     IEEE80211_CHAN_DISABLED)) {
 		sdata_info(sdata,
 			   "AP %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
-			   ifmgd->associated->bssid, new_freq,
-			   new_chandef.width, new_chandef.center_freq1,
-			   new_chandef.center_freq2);
+			   ifmgd->associated->bssid,
+			   csa_ie.chandef.chan->center_freq,
+			   csa_ie.chandef.width, csa_ie.chandef.center_freq1,
+			   csa_ie.chandef.center_freq2);
 		ieee80211_queue_work(&local->hw,
 				     &ifmgd->csa_connection_drop_work);
 		return;
 	}
 
 	ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
-
-	if (local->use_chanctx) {
-		sdata_info(sdata,
-			   "not handling channel switch with channel contexts\n");
-		ieee80211_queue_work(&local->hw,
-				     &ifmgd->csa_connection_drop_work);
-		return;
-	}
+	sdata->vif.csa_active = true;
 
 	mutex_lock(&local->chanctx_mtx);
+	if (local->use_chanctx) {
+		u32 num_chanctx = 0;
+		list_for_each_entry(chanctx, &local->chanctx_list, list)
+		       num_chanctx++;
+
+		if (num_chanctx > 1 ||
+		    !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
+			sdata_info(sdata,
+				   "not handling chan-switch with channel contexts\n");
+			ieee80211_queue_work(&local->hw,
+					     &ifmgd->csa_connection_drop_work);
+			mutex_unlock(&local->chanctx_mtx);
+			return;
+		}
+	}
+
 	if (WARN_ON(!rcu_access_pointer(sdata->vif.chanctx_conf))) {
+		ieee80211_queue_work(&local->hw,
+				     &ifmgd->csa_connection_drop_work);
 		mutex_unlock(&local->chanctx_mtx);
 		return;
 	}
@@ -1217,9 +1035,9 @@
 	}
 	mutex_unlock(&local->chanctx_mtx);
 
-	local->csa_chandef = new_chandef;
+	local->csa_chandef = csa_ie.chandef;
 
-	if (mode)
+	if (csa_ie.mode)
 		ieee80211_stop_queues_by_reason(&local->hw,
 				IEEE80211_MAX_QUEUE_MAP,
 				IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -1228,9 +1046,9 @@
 		/* use driver's channel switch callback */
 		struct ieee80211_channel_switch ch_switch = {
 			.timestamp = timestamp,
-			.block_tx = mode,
-			.chandef = new_chandef,
-			.count = count,
+			.block_tx = csa_ie.mode,
+			.chandef = csa_ie.chandef,
+			.count = csa_ie.count,
 		};
 
 		drv_channel_switch(local, &ch_switch);
@@ -1238,11 +1056,11 @@
 	}
 
 	/* channel switch handled in software */
-	if (count <= 1)
+	if (csa_ie.count <= 1)
 		ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
 	else
 		mod_timer(&ifmgd->chswitch_timer,
-			  TU_TO_EXP_TIME(count * cbss->beacon_interval));
+			  TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval));
 }
 
 static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
@@ -1374,8 +1192,7 @@
 	if (!mgd->associated)
 		return false;
 
-	if (mgd->flags & (IEEE80211_STA_BEACON_POLL |
-			  IEEE80211_STA_CONNECTION_POLL))
+	if (mgd->flags & IEEE80211_STA_CONNECTION_POLL)
 		return false;
 
 	if (!mgd->have_beacon)
@@ -1691,8 +1508,7 @@
 {
 	lockdep_assert_held(&sdata->local->mtx);
 
-	sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-				IEEE80211_STA_BEACON_POLL);
+	sdata->u.mgd.flags &= ~IEEE80211_STA_CONNECTION_POLL;
 	ieee80211_run_deferred_scan(sdata->local);
 }
 
@@ -1954,11 +1770,8 @@
 	struct ieee80211_local *local = sdata->local;
 
 	mutex_lock(&local->mtx);
-	if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-			      IEEE80211_STA_CONNECTION_POLL))) {
-		mutex_unlock(&local->mtx);
-		return;
-	}
+	if (!(ifmgd->flags & IEEE80211_STA_CONNECTION_POLL))
+		goto out;
 
 	__ieee80211_stop_poll(sdata);
 
@@ -2094,15 +1907,9 @@
 	 * because otherwise we would reset the timer every time and
 	 * never check whether we received a probe response!
 	 */
-	if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-			    IEEE80211_STA_CONNECTION_POLL))
+	if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
 		already = true;
 
-	if (beacon)
-		ifmgd->flags |= IEEE80211_STA_BEACON_POLL;
-	else
-		ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
-
 	mutex_unlock(&sdata->local->mtx);
 
 	if (already)
@@ -2174,6 +1981,7 @@
 			       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
 			       true, frame_buf);
 	ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
+	sdata->vif.csa_active = false;
 	ieee80211_wake_queues_by_reason(&sdata->local->hw,
 					IEEE80211_MAX_QUEUE_MAP,
 					IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -2717,7 +2525,7 @@
 	 */
 	ifmgd->wmm_last_param_set = -1;
 
-	if (elems.wmm_param)
+	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && elems.wmm_param)
 		ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
 					 elems.wmm_param_len);
 	else
@@ -3061,17 +2869,10 @@
 		}
 	}
 
-	if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
+	if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) {
 		mlme_dbg_ratelimited(sdata,
 				     "cancelling AP probe due to a received beacon\n");
-		mutex_lock(&local->mtx);
-		ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
-		ieee80211_run_deferred_scan(local);
-		mutex_unlock(&local->mtx);
-
-		mutex_lock(&local->iflist_mtx);
-		ieee80211_recalc_ps(local, -1);
-		mutex_unlock(&local->iflist_mtx);
+		ieee80211_reset_ap_probe(sdata);
 	}
 
 	/*
@@ -3152,7 +2953,8 @@
 	ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
 					 &elems, true);
 
-	if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
+	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) &&
+	    ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
 				     elems.wmm_param_len))
 		changed |= BSS_CHANGED_QOS;
 
@@ -3543,8 +3345,7 @@
 	} else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
 		run_again(sdata, ifmgd->assoc_data->timeout);
 
-	if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-			    IEEE80211_STA_CONNECTION_POLL) &&
+	if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL &&
 	    ifmgd->associated) {
 		u8 bssid[ETH_ALEN];
 		int max_tries;
@@ -3697,7 +3498,7 @@
 		  ieee80211_beacon_connection_loss_work);
 	INIT_WORK(&ifmgd->csa_connection_drop_work,
 		  ieee80211_csa_connection_drop_work);
-	INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work);
+	INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_mgd_work);
 	setup_timer(&ifmgd->timer, ieee80211_sta_timer,
 		    (unsigned long) sdata);
 	setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -3876,7 +3677,7 @@
 		return ret;
 
 	while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
-		ifmgd->flags |= chandef_downgrade(&chandef);
+		ifmgd->flags |= ieee80211_chandef_downgrade(&chandef);
 		ret = ieee80211_vif_use_channel(sdata, &chandef,
 						IEEE80211_CHANCTX_SHARED);
 	}
@@ -4135,6 +3936,44 @@
 	return err;
 }
 
+static bool ieee80211_usable_wmm_params(struct ieee80211_sub_if_data *sdata,
+					const u8 *wmm_param, int len)
+{
+	const u8 *pos;
+	size_t left;
+
+	if (len < 8)
+		return false;
+
+	if (wmm_param[5] != 1 /* version */)
+		return false;
+
+	pos = wmm_param + 8;
+	left = len - 8;
+
+	for (; left >= 4; left -= 4, pos += 4) {
+		u8 aifsn = pos[0] & 0x0f;
+		u8 ecwmin = pos[1] & 0x0f;
+		u8 ecwmax = (pos[1] & 0xf0) >> 4;
+		int aci = (pos[0] >> 5) & 0x03;
+
+		if (aifsn < 2) {
+			sdata_info(sdata,
+				   "AP has invalid WMM params (AIFSN=%d for ACI %d), disabling WMM\n",
+				   aifsn, aci);
+			return false;
+		}
+		if (ecwmin > ecwmax) {
+			sdata_info(sdata,
+				   "AP has invalid WMM params (ECWmin/max=%d/%d for ACI %d), disabling WMM\n",
+				   ecwmin, ecwmax, aci);
+			return false;
+		}
+	}
+
+	return true;
+}
+
 int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 			struct cfg80211_assoc_request *req)
 {
@@ -4192,9 +4031,45 @@
 	}
 
 	/* prepare assoc data */
-	
+
 	ifmgd->beacon_crc_valid = false;
 
+	assoc_data->wmm = bss->wmm_used &&
+			  (local->hw.queues >= IEEE80211_NUM_ACS);
+	if (assoc_data->wmm) {
+		/* try to check validity of WMM params IE */
+		const struct cfg80211_bss_ies *ies;
+		const u8 *wp, *start, *end;
+
+		rcu_read_lock();
+		ies = rcu_dereference(req->bss->ies);
+		start = ies->data;
+		end = start + ies->len;
+
+		while (true) {
+			wp = cfg80211_find_vendor_ie(
+				WLAN_OUI_MICROSOFT,
+				WLAN_OUI_TYPE_MICROSOFT_WMM,
+				start, end - start);
+			if (!wp)
+				break;
+			start = wp + wp[1] + 2;
+			/* if this IE is too short, try the next */
+			if (wp[1] <= 4)
+				continue;
+			/* if this IE is WMM params, we found what we wanted */
+			if (wp[6] == 1)
+				break;
+		}
+
+		if (!wp || !ieee80211_usable_wmm_params(sdata, wp + 2,
+							wp[1] - 2)) {
+			assoc_data->wmm = false;
+			ifmgd->flags |= IEEE80211_STA_DISABLE_WMM;
+		}
+		rcu_read_unlock();
+	}
+
 	/*
 	 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
 	 * We still associate in non-HT mode (11a/b/g) if any one of these
@@ -4224,18 +4099,22 @@
 	/* Also disable HT if we don't support it or the AP doesn't use WMM */
 	sband = local->hw.wiphy->bands[req->bss->channel->band];
 	if (!sband->ht_cap.ht_supported ||
-	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
+	    ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
 		ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
-		if (!bss->wmm_used)
+		if (!bss->wmm_used &&
+		    !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
 			netdev_info(sdata->dev,
 				    "disabling HT as WMM/QoS is not supported by the AP\n");
 	}
 
 	/* disable VHT if we don't support it or the AP doesn't use WMM */
 	if (!sband->vht_cap.vht_supported ||
-	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
+	    ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
 		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
-		if (!bss->wmm_used)
+		if (!bss->wmm_used &&
+		    !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
 			netdev_info(sdata->dev,
 				    "disabling VHT as WMM/QoS is not supported by the AP\n");
 	}
@@ -4264,8 +4143,6 @@
 		sdata->smps_mode = ifmgd->req_smps;
 
 	assoc_data->capability = req->bss->capability;
-	assoc_data->wmm = bss->wmm_used &&
-			  (local->hw.queues >= IEEE80211_NUM_ACS);
 	assoc_data->supp_rates = bss->supp_rates;
 	assoc_data->supp_rates_len = bss->supp_rates_len;
 
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index e126605..22b223f 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -235,7 +235,8 @@
 static void __rate_control_send_low(struct ieee80211_hw *hw,
 				    struct ieee80211_supported_band *sband,
 				    struct ieee80211_sta *sta,
-				    struct ieee80211_tx_info *info)
+				    struct ieee80211_tx_info *info,
+				    u32 rate_mask)
 {
 	int i;
 	u32 rate_flags =
@@ -247,6 +248,12 @@
 
 	info->control.rates[0].idx = 0;
 	for (i = 0; i < sband->n_bitrates; i++) {
+		if (!(rate_mask & BIT(i)))
+			continue;
+
+		if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+			continue;
+
 		if (!rate_supported(sta, sband->band, i))
 			continue;
 
@@ -274,7 +281,8 @@
 	bool use_basicrate = false;
 
 	if (!pubsta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
-		__rate_control_send_low(txrc->hw, sband, pubsta, info);
+		__rate_control_send_low(txrc->hw, sband, pubsta, info,
+					txrc->rate_idx_mask);
 
 		if (!pubsta && txrc->bss) {
 			mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
@@ -656,7 +664,8 @@
 		rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
 
 	if (dest[0].idx < 0)
-		__rate_control_send_low(&sdata->local->hw, sband, sta, info);
+		__rate_control_send_low(&sdata->local->hw, sband, sta, info,
+					sdata->rc_rateidx_mask[info->band]);
 
 	if (sta)
 		rate_fixup_ratelist(vif, sband, info, dest, max_rates);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 5dedc56..505bc0d 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -144,8 +144,8 @@
 
 /* Rate control algorithms */
 #ifdef CONFIG_MAC80211_RC_PID
-extern int rc80211_pid_init(void);
-extern void rc80211_pid_exit(void);
+int rc80211_pid_init(void);
+void rc80211_pid_exit(void);
 #else
 static inline int rc80211_pid_init(void)
 {
@@ -157,8 +157,8 @@
 #endif
 
 #ifdef CONFIG_MAC80211_RC_MINSTREL
-extern int rc80211_minstrel_init(void);
-extern void rc80211_minstrel_exit(void);
+int rc80211_minstrel_init(void);
+void rc80211_minstrel_exit(void);
 #else
 static inline int rc80211_minstrel_init(void)
 {
@@ -170,8 +170,8 @@
 #endif
 
 #ifdef CONFIG_MAC80211_RC_MINSTREL_HT
-extern int rc80211_minstrel_ht_init(void);
-extern void rc80211_minstrel_ht_exit(void);
+int rc80211_minstrel_ht_init(void);
+void rc80211_minstrel_ht_exit(void);
 #else
 static inline int rc80211_minstrel_ht_init(void)
 {
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 8b5f7ef..7fa1b36 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -203,6 +203,15 @@
 	memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate));
 	mi->max_prob_rate = tmp_prob_rate;
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+	/* use fixed index if set */
+	if (mp->fixed_rate_idx != -1) {
+		mi->max_tp_rate[0] = mp->fixed_rate_idx;
+		mi->max_tp_rate[1] = mp->fixed_rate_idx;
+		mi->max_prob_rate = mp->fixed_rate_idx;
+	}
+#endif
+
 	/* Reset update timer */
 	mi->stats_update = jiffies;
 
@@ -310,6 +319,11 @@
 	/* increase sum packet counter */
 	mi->packet_count++;
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+	if (mp->fixed_rate_idx != -1)
+		return;
+#endif
+
 	delta = (mi->packet_count * sampling_ratio / 100) -
 			(mi->sample_count + mi->sample_deferred / 2);
 
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 7c323f2..5d60779 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -365,6 +365,14 @@
 		}
 	}
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+	/* use fixed index if set */
+	if (mp->fixed_rate_idx != -1) {
+		mi->max_tp_rate = mp->fixed_rate_idx;
+		mi->max_tp_rate2 = mp->fixed_rate_idx;
+		mi->max_prob_rate = mp->fixed_rate_idx;
+	}
+#endif
 
 	mi->stats_update = jiffies;
 }
@@ -774,6 +782,11 @@
 	info->flags |= mi->tx_flags;
 	minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+	if (mp->fixed_rate_idx != -1)
+		return;
+#endif
+
 	/* Don't use EAPOL frames for sampling on non-mrr hw */
 	if (mp->hw->max_rates == 1 &&
 	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
@@ -781,16 +794,6 @@
 	else
 		sample_idx = minstrel_get_sample_rate(mp, mi);
 
-#ifdef CONFIG_MAC80211_DEBUGFS
-	/* use fixed index if set */
-	if (mp->fixed_rate_idx != -1) {
-		mi->max_tp_rate = mp->fixed_rate_idx;
-		mi->max_tp_rate2 = mp->fixed_rate_idx;
-		mi->max_prob_rate = mp->fixed_rate_idx;
-		sample_idx = -1;
-	}
-#endif
-
 	mi->total_packets++;
 
 	/* wraparound */
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index c97a065..6ff1346 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -167,29 +167,29 @@
 	 * provide large enough buffers. */
 	length = length < RC_PID_PRINT_BUF_SIZE ?
 		 length : RC_PID_PRINT_BUF_SIZE;
-	p = snprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
+	p = scnprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
 	switch (ev->type) {
 	case RC_PID_EVENT_TYPE_TX_STATUS:
-		p += snprintf(pb + p, length - p, "tx_status %u %u",
-			      !(ev->data.flags & IEEE80211_TX_STAT_ACK),
-			      ev->data.tx_status.status.rates[0].idx);
+		p += scnprintf(pb + p, length - p, "tx_status %u %u",
+			       !(ev->data.flags & IEEE80211_TX_STAT_ACK),
+			       ev->data.tx_status.status.rates[0].idx);
 		break;
 	case RC_PID_EVENT_TYPE_RATE_CHANGE:
-		p += snprintf(pb + p, length - p, "rate_change %d %d",
-			      ev->data.index, ev->data.rate);
+		p += scnprintf(pb + p, length - p, "rate_change %d %d",
+			       ev->data.index, ev->data.rate);
 		break;
 	case RC_PID_EVENT_TYPE_TX_RATE:
-		p += snprintf(pb + p, length - p, "tx_rate %d %d",
-			      ev->data.index, ev->data.rate);
+		p += scnprintf(pb + p, length - p, "tx_rate %d %d",
+			       ev->data.index, ev->data.rate);
 		break;
 	case RC_PID_EVENT_TYPE_PF_SAMPLE:
-		p += snprintf(pb + p, length - p,
-			      "pf_sample %d %d %d %d",
-			      ev->data.pf_sample, ev->data.prop_err,
-			      ev->data.int_err, ev->data.der_err);
+		p += scnprintf(pb + p, length - p,
+			       "pf_sample %d %d %d %d",
+			       ev->data.pf_sample, ev->data.prop_err,
+			       ev->data.int_err, ev->data.der_err);
 		break;
 	}
-	p += snprintf(pb + p, length - p, "\n");
+	p += scnprintf(pb + p, length - p, "\n");
 
 	spin_unlock_irqrestore(&events->lock, status);
 
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 674eac1..caecef8 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -995,8 +995,9 @@
 				rx->sta->num_duplicates++;
 			}
 			return RX_DROP_UNUSABLE;
-		} else
+		} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
 			rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
+		}
 	}
 
 	if (unlikely(rx->skb->len < 16)) {
@@ -2402,7 +2403,8 @@
 		return RX_DROP_UNUSABLE;
 
 	if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
-	    mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED)
+	    mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
+	    mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
 		return RX_DROP_UNUSABLE;
 
 	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
@@ -2566,31 +2568,49 @@
 
 		goto queue;
 	case WLAN_CATEGORY_SPECTRUM_MGMT:
-		if (status->band != IEEE80211_BAND_5GHZ)
-			break;
-
-		if (sdata->vif.type != NL80211_IFTYPE_STATION)
-			break;
-
 		/* verify action_code is present */
 		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
 			break;
 
 		switch (mgmt->u.action.u.measurement.action_code) {
 		case WLAN_ACTION_SPCT_MSR_REQ:
+			if (status->band != IEEE80211_BAND_5GHZ)
+				break;
+
 			if (len < (IEEE80211_MIN_ACTION_SIZE +
 				   sizeof(mgmt->u.action.u.measurement)))
 				break;
-			ieee80211_process_measurement_req(sdata, mgmt, len);
-			goto handled;
-		case WLAN_ACTION_SPCT_CHL_SWITCH:
+
 			if (sdata->vif.type != NL80211_IFTYPE_STATION)
 				break;
 
-			if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
+			ieee80211_process_measurement_req(sdata, mgmt, len);
+			goto handled;
+		case WLAN_ACTION_SPCT_CHL_SWITCH: {
+			u8 *bssid;
+			if (len < (IEEE80211_MIN_ACTION_SIZE +
+				   sizeof(mgmt->u.action.u.chan_switch)))
+				break;
+
+			if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+			    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+			    sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
+				break;
+
+			if (sdata->vif.type == NL80211_IFTYPE_STATION)
+				bssid = sdata->u.mgd.bssid;
+			else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+				bssid = sdata->u.ibss.bssid;
+			else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+				bssid = mgmt->sa;
+			else
+				break;
+
+			if (!ether_addr_equal(mgmt->bssid, bssid))
 				break;
 
 			goto queue;
+			}
 		}
 		break;
 	case WLAN_CATEGORY_SA_QUERY:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d2d17a4..5ad66a8 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -394,8 +394,7 @@
 		return false;
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-	    sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
-				  IEEE80211_STA_CONNECTION_POLL))
+	    sdata->u.mgd.flags & IEEE80211_STA_CONNECTION_POLL)
 		return false;
 
 	return true;
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 578eea3..a40da20 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -21,6 +21,175 @@
 #include "sta_info.h"
 #include "wme.h"
 
+int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
+				 struct ieee802_11_elems *elems, bool beacon,
+				 enum ieee80211_band current_band,
+				 u32 sta_flags, u8 *bssid,
+				 struct ieee80211_csa_ie *csa_ie)
+{
+	enum ieee80211_band new_band;
+	int new_freq;
+	u8 new_chan_no;
+	struct ieee80211_channel *new_chan;
+	struct cfg80211_chan_def new_vht_chandef = {};
+	const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
+	const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
+	const struct ieee80211_ht_operation *ht_oper;
+	int secondary_channel_offset = -1;
+
+	sec_chan_offs = elems->sec_chan_offs;
+	wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
+	ht_oper = elems->ht_operation;
+
+	if (sta_flags & (IEEE80211_STA_DISABLE_HT |
+			 IEEE80211_STA_DISABLE_40MHZ)) {
+		sec_chan_offs = NULL;
+		wide_bw_chansw_ie = NULL;
+		/* only used for bandwidth here */
+		ht_oper = NULL;
+	}
+
+	if (sta_flags & IEEE80211_STA_DISABLE_VHT)
+		wide_bw_chansw_ie = NULL;
+
+	if (elems->ext_chansw_ie) {
+		if (!ieee80211_operating_class_to_band(
+				elems->ext_chansw_ie->new_operating_class,
+				&new_band)) {
+			sdata_info(sdata,
+				   "cannot understand ECSA IE operating class %d, disconnecting\n",
+				   elems->ext_chansw_ie->new_operating_class);
+			return -EINVAL;
+		}
+		new_chan_no = elems->ext_chansw_ie->new_ch_num;
+		csa_ie->count = elems->ext_chansw_ie->count;
+		csa_ie->mode = elems->ext_chansw_ie->mode;
+	} else if (elems->ch_switch_ie) {
+		new_band = current_band;
+		new_chan_no = elems->ch_switch_ie->new_ch_num;
+		csa_ie->count = elems->ch_switch_ie->count;
+		csa_ie->mode = elems->ch_switch_ie->mode;
+	} else {
+		/* nothing here we understand */
+		return 1;
+	}
+
+	/* Mesh Channel Switch Parameters Element */
+	if (elems->mesh_chansw_params_ie) {
+		csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
+		csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags;
+	}
+
+	new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
+	new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
+	if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
+		sdata_info(sdata,
+			   "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n",
+			   bssid, new_freq);
+		return -EINVAL;
+	}
+
+	if (!beacon && sec_chan_offs) {
+		secondary_channel_offset = sec_chan_offs->sec_chan_offs;
+	} else if (beacon && ht_oper) {
+		secondary_channel_offset =
+			ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
+	} else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
+		/* If it's not a beacon, HT is enabled and the IE not present,
+		 * it's 20 MHz, 802.11-2012 8.5.2.6:
+		 *	This element [the Secondary Channel Offset Element] is
+		 *	present when switching to a 40 MHz channel. It may be
+		 *	present when switching to a 20 MHz channel (in which
+		 *	case the secondary channel offset is set to SCN).
+		 */
+		secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+	}
+
+	switch (secondary_channel_offset) {
+	default:
+		/* secondary_channel_offset was present but is invalid */
+	case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+		cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+					NL80211_CHAN_HT20);
+		break;
+	case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+		cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+					NL80211_CHAN_HT40PLUS);
+		break;
+	case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+		cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+					NL80211_CHAN_HT40MINUS);
+		break;
+	case -1:
+		cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+					NL80211_CHAN_NO_HT);
+		/* keep width for 5/10 MHz channels */
+		switch (sdata->vif.bss_conf.chandef.width) {
+		case NL80211_CHAN_WIDTH_5:
+		case NL80211_CHAN_WIDTH_10:
+			csa_ie->chandef.width =
+				sdata->vif.bss_conf.chandef.width;
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+
+	if (wide_bw_chansw_ie) {
+		new_vht_chandef.chan = new_chan;
+		new_vht_chandef.center_freq1 =
+			ieee80211_channel_to_frequency(
+				wide_bw_chansw_ie->new_center_freq_seg0,
+				new_band);
+
+		switch (wide_bw_chansw_ie->new_channel_width) {
+		default:
+			/* hmmm, ignore VHT and use HT if present */
+		case IEEE80211_VHT_CHANWIDTH_USE_HT:
+			new_vht_chandef.chan = NULL;
+			break;
+		case IEEE80211_VHT_CHANWIDTH_80MHZ:
+			new_vht_chandef.width = NL80211_CHAN_WIDTH_80;
+			break;
+		case IEEE80211_VHT_CHANWIDTH_160MHZ:
+			new_vht_chandef.width = NL80211_CHAN_WIDTH_160;
+			break;
+		case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+			/* field is otherwise reserved */
+			new_vht_chandef.center_freq2 =
+				ieee80211_channel_to_frequency(
+					wide_bw_chansw_ie->new_center_freq_seg1,
+					new_band);
+			new_vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
+			break;
+		}
+		if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ &&
+		    new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
+			ieee80211_chandef_downgrade(&new_vht_chandef);
+		if (sta_flags & IEEE80211_STA_DISABLE_160MHZ &&
+		    new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
+			ieee80211_chandef_downgrade(&new_vht_chandef);
+		if (sta_flags & IEEE80211_STA_DISABLE_40MHZ &&
+		    new_vht_chandef.width > NL80211_CHAN_WIDTH_20)
+			ieee80211_chandef_downgrade(&new_vht_chandef);
+	}
+
+	/* if VHT data is there validate & use it */
+	if (new_vht_chandef.chan) {
+		if (!cfg80211_chandef_compatible(&new_vht_chandef,
+						 &csa_ie->chandef)) {
+			sdata_info(sdata,
+				   "BSS %pM: CSA has inconsistent channel data, disconnecting\n",
+				   bssid);
+			return -EINVAL;
+		}
+		csa_ie->chandef = new_vht_chandef;
+	}
+
+	return 0;
+}
+
 static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
 					struct ieee80211_msrment_ie *request_ie,
 					const u8 *da, const u8 *bssid,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index aeb967a..1eb66e2 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -385,6 +385,30 @@
 		sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
 
 	sta->sta.smps_mode = IEEE80211_SMPS_OFF;
+	if (sdata->vif.type == NL80211_IFTYPE_AP ||
+	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+		struct ieee80211_supported_band *sband =
+			local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
+		u8 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >>
+				IEEE80211_HT_CAP_SM_PS_SHIFT;
+		/*
+		 * Assume that hostapd advertises our caps in the beacon and
+		 * this is the known_smps_mode for a station that just assciated
+		 */
+		switch (smps) {
+		case WLAN_HT_SMPS_CONTROL_DISABLED:
+			sta->known_smps_mode = IEEE80211_SMPS_OFF;
+			break;
+		case WLAN_HT_SMPS_CONTROL_STATIC:
+			sta->known_smps_mode = IEEE80211_SMPS_STATIC;
+			break;
+		case WLAN_HT_SMPS_CONTROL_DYNAMIC:
+			sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC;
+			break;
+		default:
+			WARN_ON(1);
+		}
+	}
 
 	sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
 
@@ -1069,6 +1093,19 @@
 
 	ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
 
+	/* This station just woke up and isn't aware of our SMPS state */
+	if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
+					   sdata->smps_mode) &&
+	    sta->known_smps_mode != sdata->bss->req_smps &&
+	    sta_info_tx_streams(sta) != 1) {
+		ht_dbg(sdata,
+		       "%pM just woke up and MIMO capable - update SMPS\n",
+		       sta->sta.addr);
+		ieee80211_send_smps_action(sdata, sdata->bss->req_smps,
+					   sta->sta.addr,
+					   sdata->vif.bss_conf.bssid);
+	}
+
 	local->total_ps_buffered -= buffered;
 
 	sta_info_recalc_tim(sta);
@@ -1520,3 +1557,38 @@
 
 	return 0;
 }
+
+u8 sta_info_tx_streams(struct sta_info *sta)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap;
+	u8 rx_streams;
+
+	if (!sta->sta.ht_cap.ht_supported)
+		return 1;
+
+	if (sta->sta.vht_cap.vht_supported) {
+		int i;
+		u16 tx_mcs_map =
+			le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map);
+
+		for (i = 7; i >= 0; i--)
+			if ((tx_mcs_map & (0x3 << (i * 2))) !=
+			    IEEE80211_VHT_MCS_NOT_SUPPORTED)
+				return i + 1;
+	}
+
+	if (ht_cap->mcs.rx_mask[3])
+		rx_streams = 4;
+	else if (ht_cap->mcs.rx_mask[2])
+		rx_streams = 3;
+	else if (ht_cap->mcs.rx_mask[1])
+		rx_streams = 2;
+	else
+		rx_streams = 1;
+
+	if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF))
+		return rx_streams;
+
+	return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+			>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
+}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 4208dbd..3ef06a2 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -301,6 +301,8 @@
  * @chains: chains ever used for RX from this station
  * @chain_signal_last: last signal (per chain)
  * @chain_signal_avg: signal average (per chain)
+ * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
+ *	AP only.
  */
 struct sta_info {
 	/* General information, mostly static */
@@ -411,6 +413,8 @@
 	unsigned int lost_packets;
 	unsigned int beacon_loss_count;
 
+	enum ieee80211_smps_mode known_smps_mode;
+
 	/* keep last! */
 	struct ieee80211_sta sta;
 };
@@ -613,6 +617,7 @@
 			  struct rate_info *rinfo);
 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
 			  unsigned long exp_time);
+u8 sta_info_tx_streams(struct sta_info *sta);
 
 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 78dc2e9..52a152b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -194,29 +194,36 @@
 	if (ieee80211_is_action(mgmt->frame_control) &&
 	    mgmt->u.action.category == WLAN_CATEGORY_HT &&
 	    mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
-	    sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    ieee80211_sdata_running(sdata)) {
-		/*
-		 * This update looks racy, but isn't -- if we come
-		 * here we've definitely got a station that we're
-		 * talking to, and on a managed interface that can
-		 * only be the AP. And the only other place updating
-		 * this variable in managed mode is before association.
-		 */
+		enum ieee80211_smps_mode smps_mode;
+
 		switch (mgmt->u.action.u.ht_smps.smps_control) {
 		case WLAN_HT_SMPS_CONTROL_DYNAMIC:
-			sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
+			smps_mode = IEEE80211_SMPS_DYNAMIC;
 			break;
 		case WLAN_HT_SMPS_CONTROL_STATIC:
-			sdata->smps_mode = IEEE80211_SMPS_STATIC;
+			smps_mode = IEEE80211_SMPS_STATIC;
 			break;
 		case WLAN_HT_SMPS_CONTROL_DISABLED:
 		default: /* shouldn't happen since we don't send that */
-			sdata->smps_mode = IEEE80211_SMPS_OFF;
+			smps_mode = IEEE80211_SMPS_OFF;
 			break;
 		}
 
-		ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
+		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+			/*
+			 * This update looks racy, but isn't -- if we come
+			 * here we've definitely got a station that we're
+			 * talking to, and on a managed interface that can
+			 * only be the AP. And the only other place updating
+			 * this variable in managed mode is before association.
+			 */
+			sdata->smps_mode = smps_mode;
+			ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
+		} else if (sdata->vif.type == NL80211_IFTYPE_AP ||
+			   sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+			sta->known_smps_mode = smps_mode;
+		}
 	}
 }
 
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 1aba645..d4cee98 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -77,13 +77,13 @@
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		VIF_ENTRY
-		__array(char, addr, 6)
+		__array(char, addr, ETH_ALEN)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
-		memcpy(__entry->addr, sdata->vif.addr, 6);
+		memcpy(__entry->addr, sdata->vif.addr, ETH_ALEN);
 	),
 
 	TP_printk(
@@ -1475,6 +1475,41 @@
 );
 #endif
 
+TRACE_EVENT(drv_join_ibss,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct ieee80211_bss_conf *info),
+
+	TP_ARGS(local, sdata, info),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		VIF_ENTRY
+		__field(u8, dtimper)
+		__field(u16, bcnint)
+		__dynamic_array(u8, ssid, info->ssid_len);
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		VIF_ASSIGN;
+		__entry->dtimper = info->dtim_period;
+		__entry->bcnint = info->beacon_int;
+		memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT  VIF_PR_FMT,
+		LOCAL_PR_ARG, VIF_PR_ARG
+	)
+);
+
+DEFINE_EVENT(local_sdata_evt, drv_leave_ibss,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata),
+	TP_ARGS(local, sdata)
+);
+
 /*
  * Tracing for API calls that drivers call.
  */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 70b5a05..c558b24 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1367,6 +1367,35 @@
 	return 0;
 }
 
+bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif, struct sk_buff *skb,
+			      int band, struct ieee80211_sta **sta)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_data tx;
+
+	if (ieee80211_tx_prepare(sdata, &tx, skb) == TX_DROP)
+		return false;
+
+	info->band = band;
+	info->control.vif = vif;
+	info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)];
+
+	if (invoke_tx_handlers(&tx))
+		return false;
+
+	if (sta) {
+		if (tx.sta)
+			*sta = &tx.sta->sta;
+		else
+			*sta = NULL;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
+
 /*
  * Returns false if the frame couldn't be transmitted but was queued instead.
  */
@@ -1982,7 +2011,7 @@
 	 * EAPOL frames from the local station.
 	 */
 	if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
-		     !is_multicast_ether_addr(hdr.addr1) && !authorized &&
+		     !multicast && !authorized &&
 		     (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
 		      !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -2358,15 +2387,35 @@
 	struct probe_resp *resp;
 	int counter_offset_beacon = sdata->csa_counter_offset_beacon;
 	int counter_offset_presp = sdata->csa_counter_offset_presp;
+	u8 *beacon_data;
+	size_t beacon_data_len;
 
-	/* warn if the driver did not check for/react to csa completeness */
-	if (WARN_ON(((u8 *)beacon->tail)[counter_offset_beacon] == 0))
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_AP:
+		beacon_data = beacon->tail;
+		beacon_data_len = beacon->tail_len;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		beacon_data = beacon->head;
+		beacon_data_len = beacon->head_len;
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		beacon_data = beacon->head;
+		beacon_data_len = beacon->head_len;
+		break;
+	default:
+		return;
+	}
+	if (WARN_ON(counter_offset_beacon >= beacon_data_len))
 		return;
 
-	((u8 *)beacon->tail)[counter_offset_beacon]--;
+	/* warn if the driver did not check for/react to csa completeness */
+	if (WARN_ON(beacon_data[counter_offset_beacon] == 0))
+		return;
 
-	if (sdata->vif.type == NL80211_IFTYPE_AP &&
-	    counter_offset_presp) {
+	beacon_data[counter_offset_beacon]--;
+
+	if (sdata->vif.type == NL80211_IFTYPE_AP && counter_offset_presp) {
 		rcu_read_lock();
 		resp = rcu_dereference(sdata->u.ap.probe_resp);
 
@@ -2401,6 +2450,24 @@
 			goto out;
 		beacon_data = beacon->tail;
 		beacon_data_len = beacon->tail_len;
+	} else if (vif->type == NL80211_IFTYPE_ADHOC) {
+		struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+
+		beacon = rcu_dereference(ifibss->presp);
+		if (!beacon)
+			goto out;
+
+		beacon_data = beacon->head;
+		beacon_data_len = beacon->head_len;
+	} else if (vif->type == NL80211_IFTYPE_MESH_POINT) {
+		struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+
+		beacon = rcu_dereference(ifmsh->beacon);
+		if (!beacon)
+			goto out;
+
+		beacon_data = beacon->head;
+		beacon_data_len = beacon->head_len;
 	} else {
 		WARN_ON(1);
 		goto out;
@@ -2485,6 +2552,10 @@
 		if (!presp)
 			goto out;
 
+		if (sdata->vif.csa_active)
+			ieee80211_update_csa(sdata, presp);
+
+
 		skb = dev_alloc_skb(local->tx_headroom + presp->head_len);
 		if (!skb)
 			goto out;
@@ -2502,6 +2573,9 @@
 		if (!bcn)
 			goto out;
 
+		if (sdata->vif.csa_active)
+			ieee80211_update_csa(sdata, bcn);
+
 		if (ifmsh->sync_ops)
 			ifmsh->sync_ops->adjust_tbtt(
 						sdata);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 69e4ef5..592a181 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -300,9 +300,6 @@
 		if (!sdata->dev)
 			continue;
 
-		if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
-			continue;
-
 		if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE &&
 		    local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
 			continue;
@@ -567,58 +564,14 @@
 					IEEE80211_QUEUE_STOP_REASON_FLUSH);
 }
 
-void ieee80211_iterate_active_interfaces(
-	struct ieee80211_hw *hw, u32 iter_flags,
-	void (*iterator)(void *data, u8 *mac,
-			 struct ieee80211_vif *vif),
-	void *data)
+static void __iterate_active_interfaces(struct ieee80211_local *local,
+					u32 iter_flags,
+					void (*iterator)(void *data, u8 *mac,
+						struct ieee80211_vif *vif),
+					void *data)
 {
-	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_sub_if_data *sdata;
 
-	mutex_lock(&local->iflist_mtx);
-
-	list_for_each_entry(sdata, &local->interfaces, list) {
-		switch (sdata->vif.type) {
-		case NL80211_IFTYPE_MONITOR:
-			if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
-				continue;
-			break;
-		case NL80211_IFTYPE_AP_VLAN:
-			continue;
-		default:
-			break;
-		}
-		if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
-		    !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
-			continue;
-		if (ieee80211_sdata_running(sdata))
-			iterator(data, sdata->vif.addr,
-				 &sdata->vif);
-	}
-
-	sdata = rcu_dereference_protected(local->monitor_sdata,
-					  lockdep_is_held(&local->iflist_mtx));
-	if (sdata &&
-	    (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
-	     sdata->flags & IEEE80211_SDATA_IN_DRIVER))
-		iterator(data, sdata->vif.addr, &sdata->vif);
-
-	mutex_unlock(&local->iflist_mtx);
-}
-EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
-
-void ieee80211_iterate_active_interfaces_atomic(
-	struct ieee80211_hw *hw, u32 iter_flags,
-	void (*iterator)(void *data, u8 *mac,
-			 struct ieee80211_vif *vif),
-	void *data)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-	struct ieee80211_sub_if_data *sdata;
-
-	rcu_read_lock();
-
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 		switch (sdata->vif.type) {
 		case NL80211_IFTYPE_MONITOR:
@@ -638,16 +591,57 @@
 				 &sdata->vif);
 	}
 
-	sdata = rcu_dereference(local->monitor_sdata);
+	sdata = rcu_dereference_check(local->monitor_sdata,
+				      lockdep_is_held(&local->iflist_mtx) ||
+				      lockdep_rtnl_is_held());
 	if (sdata &&
 	    (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
 	     sdata->flags & IEEE80211_SDATA_IN_DRIVER))
 		iterator(data, sdata->vif.addr, &sdata->vif);
+}
 
+void ieee80211_iterate_active_interfaces(
+	struct ieee80211_hw *hw, u32 iter_flags,
+	void (*iterator)(void *data, u8 *mac,
+			 struct ieee80211_vif *vif),
+	void *data)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+
+	mutex_lock(&local->iflist_mtx);
+	__iterate_active_interfaces(local, iter_flags, iterator, data);
+	mutex_unlock(&local->iflist_mtx);
+}
+EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
+
+void ieee80211_iterate_active_interfaces_atomic(
+	struct ieee80211_hw *hw, u32 iter_flags,
+	void (*iterator)(void *data, u8 *mac,
+			 struct ieee80211_vif *vif),
+	void *data)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+
+	rcu_read_lock();
+	__iterate_active_interfaces(local, iter_flags, iterator, data);
 	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
 
+void ieee80211_iterate_active_interfaces_rtnl(
+	struct ieee80211_hw *hw, u32 iter_flags,
+	void (*iterator)(void *data, u8 *mac,
+			 struct ieee80211_vif *vif),
+	void *data)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+
+	ASSERT_RTNL();
+
+	__iterate_active_interfaces(local, iter_flags, iterator, data);
+}
+EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_rtnl);
+
 /*
  * Nothing should have been stuffed into the workqueue during
  * the suspend->resume cycle. If this WARN is seen then there
@@ -746,6 +740,7 @@
 		case WLAN_EID_TIMEOUT_INTERVAL:
 		case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
 		case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+		case WLAN_EID_CHAN_SWITCH_PARAM:
 		/*
 		 * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
 		 * that if the content gets bigger it might be needed more than once
@@ -911,6 +906,14 @@
 			}
 			elems->sec_chan_offs = (void *)pos;
 			break;
+		case WLAN_EID_CHAN_SWITCH_PARAM:
+			if (elen !=
+			    sizeof(*elems->mesh_chansw_params_ie)) {
+				elem_parse_failed = true;
+				break;
+			}
+			elems->mesh_chansw_params_ie = (void *)pos;
+			break;
 		case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
 			if (!action ||
 			    elen != sizeof(*elems->wide_bw_chansw_ie)) {
@@ -1007,14 +1010,21 @@
 	 */
 	enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION);
 
-	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-		/* Set defaults according to 802.11-2007 Table 7-37 */
-		aCWmax = 1023;
-		if (use_11b)
-			aCWmin = 31;
-		else
-			aCWmin = 15;
+	/* Set defaults according to 802.11-2007 Table 7-37 */
+	aCWmax = 1023;
+	if (use_11b)
+		aCWmin = 31;
+	else
+		aCWmin = 15;
 
+	/* Confiure old 802.11b/g medium access rules. */
+	qparam.cw_max = aCWmax;
+	qparam.cw_min = aCWmin;
+	qparam.txop = 0;
+	qparam.aifs = 2;
+
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+		/* Update if QoS is enabled. */
 		if (enable_qos) {
 			switch (ac) {
 			case IEEE80211_AC_BK:
@@ -1050,12 +1060,6 @@
 				qparam.aifs = 2;
 				break;
 			}
-		} else {
-			/* Confiure old 802.11b/g medium access rules. */
-			qparam.cw_max = aCWmax;
-			qparam.cw_min = aCWmin;
-			qparam.txop = 0;
-			qparam.aifs = 2;
 		}
 
 		qparam.uapsd = false;
@@ -1084,8 +1088,8 @@
 	struct ieee80211_mgmt *mgmt;
 	int err;
 
-	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
-			    sizeof(*mgmt) + 6 + extra_len);
+	/* 24 + 6 = header + auth_algo + auth_transaction + status_code */
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len);
 	if (!skb)
 		return;
 
@@ -2296,3 +2300,175 @@
 	ieee80211_queue_work(hw, &local->radar_detected_work);
 }
 EXPORT_SYMBOL(ieee80211_radar_detected);
+
+u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
+{
+	u32 ret;
+	int tmp;
+
+	switch (c->width) {
+	case NL80211_CHAN_WIDTH_20:
+		c->width = NL80211_CHAN_WIDTH_20_NOHT;
+		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		c->width = NL80211_CHAN_WIDTH_20;
+		c->center_freq1 = c->chan->center_freq;
+		ret = IEEE80211_STA_DISABLE_40MHZ |
+		      IEEE80211_STA_DISABLE_VHT;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
+		/* n_P40 */
+		tmp /= 2;
+		/* freq_P40 */
+		c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
+		c->width = NL80211_CHAN_WIDTH_40;
+		ret = IEEE80211_STA_DISABLE_VHT;
+		break;
+	case NL80211_CHAN_WIDTH_80P80:
+		c->center_freq2 = 0;
+		c->width = NL80211_CHAN_WIDTH_80;
+		ret = IEEE80211_STA_DISABLE_80P80MHZ |
+		      IEEE80211_STA_DISABLE_160MHZ;
+		break;
+	case NL80211_CHAN_WIDTH_160:
+		/* n_P20 */
+		tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
+		/* n_P80 */
+		tmp /= 4;
+		c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
+		c->width = NL80211_CHAN_WIDTH_80;
+		ret = IEEE80211_STA_DISABLE_80P80MHZ |
+		      IEEE80211_STA_DISABLE_160MHZ;
+		break;
+	default:
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		WARN_ON_ONCE(1);
+		c->width = NL80211_CHAN_WIDTH_20_NOHT;
+		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+		break;
+	case NL80211_CHAN_WIDTH_5:
+	case NL80211_CHAN_WIDTH_10:
+		WARN_ON_ONCE(1);
+		/* keep c->width */
+		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+		break;
+	}
+
+	WARN_ON_ONCE(!cfg80211_chandef_valid(c));
+
+	return ret;
+}
+
+/*
+ * Returns true if smps_mode_new is strictly more restrictive than
+ * smps_mode_old.
+ */
+bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old,
+				   enum ieee80211_smps_mode smps_mode_new)
+{
+	if (WARN_ON_ONCE(smps_mode_old == IEEE80211_SMPS_AUTOMATIC ||
+			 smps_mode_new == IEEE80211_SMPS_AUTOMATIC))
+		return false;
+
+	switch (smps_mode_old) {
+	case IEEE80211_SMPS_STATIC:
+		return false;
+	case IEEE80211_SMPS_DYNAMIC:
+		return smps_mode_new == IEEE80211_SMPS_STATIC;
+	case IEEE80211_SMPS_OFF:
+		return smps_mode_new != IEEE80211_SMPS_OFF;
+	default:
+		WARN_ON(1);
+	}
+
+	return false;
+}
+
+int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+			      struct cfg80211_csa_settings *csa_settings)
+{
+	struct sk_buff *skb;
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_local *local = sdata->local;
+	int freq;
+	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch) +
+			       sizeof(mgmt->u.action.u.chan_switch);
+	u8 *pos;
+
+	if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
+		return -EOPNOTSUPP;
+
+	skb = dev_alloc_skb(local->tx_headroom + hdr_len +
+			    5 + /* channel switch announcement element */
+			    3 + /* secondary channel offset element */
+			    8); /* mesh channel switch parameters element */
+	if (!skb)
+		return -ENOMEM;
+
+	skb_reserve(skb, local->tx_headroom);
+	mgmt = (struct ieee80211_mgmt *)skb_put(skb, hdr_len);
+	memset(mgmt, 0, hdr_len);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_ACTION);
+
+	eth_broadcast_addr(mgmt->da);
+	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+	if (ieee80211_vif_is_mesh(&sdata->vif)) {
+		memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
+	} else {
+		struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+		memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
+	}
+	mgmt->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
+	mgmt->u.action.u.chan_switch.action_code = WLAN_ACTION_SPCT_CHL_SWITCH;
+	pos = skb_put(skb, 5);
+	*pos++ = WLAN_EID_CHANNEL_SWITCH;			/* EID */
+	*pos++ = 3;						/* IE length */
+	*pos++ = csa_settings->block_tx ? 1 : 0;		/* CSA mode */
+	freq = csa_settings->chandef.chan->center_freq;
+	*pos++ = ieee80211_frequency_to_channel(freq);		/* channel */
+	*pos++ = csa_settings->count;				/* count */
+
+	if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_40) {
+		enum nl80211_channel_type ch_type;
+
+		skb_put(skb, 3);
+		*pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET;	/* EID */
+		*pos++ = 1;					/* IE length */
+		ch_type = cfg80211_get_chandef_type(&csa_settings->chandef);
+		if (ch_type == NL80211_CHAN_HT40PLUS)
+			*pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+		else
+			*pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+	}
+
+	if (ieee80211_vif_is_mesh(&sdata->vif)) {
+		struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+		__le16 pre_value;
+
+		skb_put(skb, 8);
+		*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;		/* EID */
+		*pos++ = 6;					/* IE length */
+		*pos++ = sdata->u.mesh.mshcfg.dot11MeshTTL;	/* Mesh TTL */
+		*pos = 0x00;	/* Mesh Flag: Tx Restrict, Initiator, Reason */
+		*pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
+		*pos++ |= csa_settings->block_tx ?
+			  WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00;
+		put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */
+		pos += 2;
+		if (!ifmsh->pre_value)
+			ifmsh->pre_value = 1;
+		else
+			ifmsh->pre_value++;
+		pre_value = cpu_to_le16(ifmsh->pre_value);
+		memcpy(pos, &pre_value, 2);		/* Precedence Value */
+		pos += 2;
+		ifmsh->chsw_init = true;
+	}
+
+	ieee80211_tx_skb(sdata, skb);
+	return 0;
+}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 97c2894..de01127 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -185,13 +185,13 @@
 	if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
 		vht_cap->cap |= cap_info &
 				(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
-				 IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX |
 				 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX);
 	}
 
 	if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
 		vht_cap->cap |= cap_info &
-				IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+				(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+				 IEEE80211_VHT_CAP_BEAMFORMEE_STS_MAX);
 
 	if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
 		vht_cap->cap |= cap_info &
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c9edfcb..d657282 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -301,22 +301,16 @@
 }
 
 
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad,
 				int encrypted)
 {
 	__le16 mask_fc;
 	int a4_included, mgmt;
 	u8 qos_tid;
-	u8 *b_0, *aad;
-	u16 data_len, len_a;
+	u16 len_a;
 	unsigned int hdrlen;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-	memset(scratch, 0, 6 * AES_BLOCK_SIZE);
-
-	b_0 = scratch + 3 * AES_BLOCK_SIZE;
-	aad = scratch + 4 * AES_BLOCK_SIZE;
-
 	/*
 	 * Mask FC: zero subtype b4 b5 b6 (if not mgmt)
 	 * Retry, PwrMgt, MoreData; set Protected
@@ -338,20 +332,21 @@
 	else
 		qos_tid = 0;
 
-	data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN;
-	if (encrypted)
-		data_len -= IEEE80211_CCMP_MIC_LEN;
+	/* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
+	 * mode authentication are not allowed to collide, yet both are derived
+	 * from this vector b_0. We only set L := 1 here to indicate that the
+	 * data size can be represented in (L+1) bytes. The CCM layer will take
+	 * care of storing the data length in the top (L+1) bytes and setting
+	 * and clearing the other bits as is required to derive the two IVs.
+	 */
+	b_0[0] = 0x1;
 
-	/* First block, b_0 */
-	b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */
 	/* Nonce: Nonce Flags | A2 | PN
 	 * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7)
 	 */
 	b_0[1] = qos_tid | (mgmt << 4);
 	memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
 	memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN);
-	/* l(m) */
-	put_unaligned_be16(data_len, &b_0[14]);
 
 	/* AAD (extra authenticate-only data) / masked 802.11 header
 	 * FC | A1 | A2 | A3 | SC | [A4] | [QC] */
@@ -407,7 +402,8 @@
 	u8 *pos;
 	u8 pn[6];
 	u64 pn64;
-	u8 scratch[6 * AES_BLOCK_SIZE];
+	u8 aad[2 * AES_BLOCK_SIZE];
+	u8 b_0[AES_BLOCK_SIZE];
 
 	if (info->control.hw_key &&
 	    !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
@@ -460,9 +456,9 @@
 		return 0;
 
 	pos += IEEE80211_CCMP_HDR_LEN;
-	ccmp_special_blocks(skb, pn, scratch, 0);
-	ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, pos, len,
-				  pos, skb_put(skb, IEEE80211_CCMP_MIC_LEN));
+	ccmp_special_blocks(skb, pn, b_0, aad, 0);
+	ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
+				  skb_put(skb, IEEE80211_CCMP_MIC_LEN));
 
 	return 0;
 }
@@ -525,16 +521,16 @@
 	}
 
 	if (!(status->flag & RX_FLAG_DECRYPTED)) {
-		u8 scratch[6 * AES_BLOCK_SIZE];
+		u8 aad[2 * AES_BLOCK_SIZE];
+		u8 b_0[AES_BLOCK_SIZE];
 		/* hardware didn't decrypt/verify MIC */
-		ccmp_special_blocks(skb, pn, scratch, 1);
+		ccmp_special_blocks(skb, pn, b_0, aad, 1);
 
 		if (ieee80211_aes_ccm_decrypt(
-			    key->u.ccmp.tfm, scratch,
+			    key->u.ccmp.tfm, b_0, aad,
 			    skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
 			    data_len,
-			    skb->data + skb->len - IEEE80211_CCMP_MIC_LEN,
-			    skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN))
+			    skb->data + skb->len - IEEE80211_CCMP_MIC_LEN))
 			return RX_DROP_UNUSABLE;
 	}
 
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index b7c7f81..52ae664 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -174,8 +174,7 @@
 
 	if (!ops || !ops->xmit || !ops->ed || !ops->start ||
 	    !ops->stop || !ops->set_channel) {
-		printk(KERN_ERR
-		       "undefined IEEE802.15.4 device operations\n");
+		pr_err("undefined IEEE802.15.4 device operations\n");
 		return NULL;
 	}
 
@@ -201,8 +200,7 @@
 
 	phy = wpan_phy_alloc(priv_size);
 	if (!phy) {
-		printk(KERN_ERR
-		       "failure to allocate master IEEE802.15.4 device\n");
+		pr_err("failure to allocate master IEEE802.15.4 device\n");
 		return NULL;
 	}
 
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index 2ca2f4d..e24bcf9 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -208,6 +208,8 @@
 	head[1] = fc >> 8;
 
 	memcpy(skb_push(skb, pos), head, pos);
+	skb_reset_mac_header(skb);
+	skb->mac_len = pos;
 
 	return pos;
 }
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 1bec121..851cd88 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -33,6 +33,7 @@
 				  SKB_GSO_DODGY |
 				  SKB_GSO_TCP_ECN |
 				  SKB_GSO_GRE |
+				  SKB_GSO_IPIP |
 				  SKB_GSO_MPLS)))
 		goto out;
 
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 6e839b6..48acec1 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -413,6 +413,58 @@
 
 endif # NF_CONNTRACK
 
+config NF_TABLES
+	depends on NETFILTER_NETLINK
+	tristate "Netfilter nf_tables support"
+
+config NFT_EXTHDR
+	depends on NF_TABLES
+	tristate "Netfilter nf_tables IPv6 exthdr module"
+
+config NFT_META
+	depends on NF_TABLES
+	tristate "Netfilter nf_tables meta module"
+
+config NFT_CT
+	depends on NF_TABLES
+	depends on NF_CONNTRACK
+	tristate "Netfilter nf_tables conntrack module"
+
+config NFT_RBTREE
+	depends on NF_TABLES
+	tristate "Netfilter nf_tables rbtree set module"
+
+config NFT_HASH
+	depends on NF_TABLES
+	tristate "Netfilter nf_tables hash set module"
+
+config NFT_COUNTER
+	depends on NF_TABLES
+	tristate "Netfilter nf_tables counter module"
+
+config NFT_LOG
+	depends on NF_TABLES
+	tristate "Netfilter nf_tables log module"
+
+config NFT_LIMIT
+	depends on NF_TABLES
+	tristate "Netfilter nf_tables limit module"
+
+config NFT_NAT
+	depends on NF_TABLES
+	depends on NF_CONNTRACK
+	depends on NF_NAT
+	tristate "Netfilter nf_tables nat module"
+
+config NFT_COMPAT
+	depends on NF_TABLES
+	depends on NETFILTER_XTABLES
+	tristate "Netfilter x_tables over nf_tables module"
+	help
+	  This is required if you intend to use any of existing
+	  x_tables match/target extensions over the nf_tables
+	  framework.
+
 config NETFILTER_XTABLES
 	tristate "Netfilter Xtables support (required for ip_tables)"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index c3a0a12..394483b 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -64,6 +64,24 @@
 # SYNPROXY
 obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
 
+# nf_tables
+nf_tables-objs += nf_tables_core.o nf_tables_api.o
+nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o
+nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
+
+obj-$(CONFIG_NF_TABLES)		+= nf_tables.o
+obj-$(CONFIG_NFT_COMPAT)	+= nft_compat.o
+obj-$(CONFIG_NFT_EXTHDR)	+= nft_exthdr.o
+obj-$(CONFIG_NFT_META)		+= nft_meta.o
+obj-$(CONFIG_NFT_CT)		+= nft_ct.o
+obj-$(CONFIG_NFT_LIMIT)		+= nft_limit.o
+obj-$(CONFIG_NFT_NAT)		+= nft_nat.o
+#nf_tables-objs			+= nft_meta_target.o
+obj-$(CONFIG_NFT_RBTREE)	+= nft_rbtree.o
+obj-$(CONFIG_NFT_HASH)		+= nft_hash.o
+obj-$(CONFIG_NFT_COUNTER)	+= nft_counter.o
+obj-$(CONFIG_NFT_LOG)		+= nft_log.o
+
 # generic X tables 
 obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 593b16e..1fbab0c 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -146,7 +146,7 @@
 		/* Optimization: we don't need to hold module
 		   reference here, since function can't sleep. --RR */
 repeat:
-		verdict = (*elemp)->hook(hook, skb, indev, outdev, okfn);
+		verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn);
 		if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
 			if (unlikely((verdict & NF_VERDICT_MASK)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
index ba36c28..a2d6263 100644
--- a/net/netfilter/ipset/Kconfig
+++ b/net/netfilter/ipset/Kconfig
@@ -1,7 +1,7 @@
 menuconfig IP_SET
 	tristate "IP set support"
 	depends on INET && NETFILTER
-	depends on NETFILTER_NETLINK
+	select NETFILTER_NETLINK
 	help
 	  This option adds IP set support to the kernel.
 	  In order to define and use the sets, you need the userspace utility
@@ -90,6 +90,15 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_SET_HASH_NETPORTNET
+	tristate "hash:net,port,net set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:net,port,net set type support, by which
+	  one can store two IPv4/IPv6 subnets, and a protocol/port in a set.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_SET_HASH_NET
 	tristate "hash:net set support"
 	depends on IP_SET
@@ -99,6 +108,15 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_SET_HASH_NETNET
+	tristate "hash:net,net set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:net,net  set type support, by which
+	  one can store IPv4/IPv6 network address/prefix pairs in a set.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_SET_HASH_NETPORT
 	tristate "hash:net,port set support"
 	depends on IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
index 6e965ec..44b2d38 100644
--- a/net/netfilter/ipset/Makefile
+++ b/net/netfilter/ipset/Makefile
@@ -20,6 +20,8 @@
 obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
 obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
 obj-$(CONFIG_IP_SET_HASH_NETIFACE) += ip_set_hash_netiface.o
+obj-$(CONFIG_IP_SET_HASH_NETNET) += ip_set_hash_netnet.o
+obj-$(CONFIG_IP_SET_HASH_NETPORTNET) += ip_set_hash_netportnet.o
 
 # list types
 obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 2524337..f2c7d83 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -8,38 +8,32 @@
 #ifndef __IP_SET_BITMAP_IP_GEN_H
 #define __IP_SET_BITMAP_IP_GEN_H
 
-#define CONCAT(a, b)		a##b
-#define TOKEN(a,b)		CONCAT(a, b)
-
-#define mtype_do_test		TOKEN(MTYPE, _do_test)
-#define mtype_gc_test		TOKEN(MTYPE, _gc_test)
-#define mtype_is_filled		TOKEN(MTYPE, _is_filled)
-#define mtype_do_add		TOKEN(MTYPE, _do_add)
-#define mtype_do_del		TOKEN(MTYPE, _do_del)
-#define mtype_do_list		TOKEN(MTYPE, _do_list)
-#define mtype_do_head		TOKEN(MTYPE, _do_head)
-#define mtype_adt_elem		TOKEN(MTYPE, _adt_elem)
-#define mtype_add_timeout	TOKEN(MTYPE, _add_timeout)
-#define mtype_gc_init		TOKEN(MTYPE, _gc_init)
-#define mtype_kadt		TOKEN(MTYPE, _kadt)
-#define mtype_uadt		TOKEN(MTYPE, _uadt)
-#define mtype_destroy		TOKEN(MTYPE, _destroy)
-#define mtype_flush		TOKEN(MTYPE, _flush)
-#define mtype_head		TOKEN(MTYPE, _head)
-#define mtype_same_set		TOKEN(MTYPE, _same_set)
-#define mtype_elem		TOKEN(MTYPE, _elem)
-#define mtype_test		TOKEN(MTYPE, _test)
-#define mtype_add		TOKEN(MTYPE, _add)
-#define mtype_del		TOKEN(MTYPE, _del)
-#define mtype_list		TOKEN(MTYPE, _list)
-#define mtype_gc		TOKEN(MTYPE, _gc)
+#define mtype_do_test		IPSET_TOKEN(MTYPE, _do_test)
+#define mtype_gc_test		IPSET_TOKEN(MTYPE, _gc_test)
+#define mtype_is_filled		IPSET_TOKEN(MTYPE, _is_filled)
+#define mtype_do_add		IPSET_TOKEN(MTYPE, _do_add)
+#define mtype_ext_cleanup	IPSET_TOKEN(MTYPE, _ext_cleanup)
+#define mtype_do_del		IPSET_TOKEN(MTYPE, _do_del)
+#define mtype_do_list		IPSET_TOKEN(MTYPE, _do_list)
+#define mtype_do_head		IPSET_TOKEN(MTYPE, _do_head)
+#define mtype_adt_elem		IPSET_TOKEN(MTYPE, _adt_elem)
+#define mtype_add_timeout	IPSET_TOKEN(MTYPE, _add_timeout)
+#define mtype_gc_init		IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_kadt		IPSET_TOKEN(MTYPE, _kadt)
+#define mtype_uadt		IPSET_TOKEN(MTYPE, _uadt)
+#define mtype_destroy		IPSET_TOKEN(MTYPE, _destroy)
+#define mtype_flush		IPSET_TOKEN(MTYPE, _flush)
+#define mtype_head		IPSET_TOKEN(MTYPE, _head)
+#define mtype_same_set		IPSET_TOKEN(MTYPE, _same_set)
+#define mtype_elem		IPSET_TOKEN(MTYPE, _elem)
+#define mtype_test		IPSET_TOKEN(MTYPE, _test)
+#define mtype_add		IPSET_TOKEN(MTYPE, _add)
+#define mtype_del		IPSET_TOKEN(MTYPE, _del)
+#define mtype_list		IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc		IPSET_TOKEN(MTYPE, _gc)
 #define mtype			MTYPE
 
-#define ext_timeout(e, m)	\
-	(unsigned long *)((e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, m)	\
-	(struct ip_set_counter *)((e) + (m)->offset[IPSET_OFFSET_COUNTER])
-#define get_ext(map, id)	((map)->extensions + (map)->dsize * (id))
+#define get_ext(set, map, id)	((map)->extensions + (set)->dsize * (id))
 
 static void
 mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -49,11 +43,22 @@
 	init_timer(&map->gc);
 	map->gc.data = (unsigned long) set;
 	map->gc.function = gc;
-	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&map->gc);
 }
 
 static void
+mtype_ext_cleanup(struct ip_set *set)
+{
+	struct mtype *map = set->data;
+	u32 id;
+
+	for (id = 0; id < map->elements; id++)
+		if (test_bit(id, map->members))
+			ip_set_ext_destroy(set, get_ext(set, map, id));
+}
+
+static void
 mtype_destroy(struct ip_set *set)
 {
 	struct mtype *map = set->data;
@@ -62,8 +67,11 @@
 		del_timer_sync(&map->gc);
 
 	ip_set_free(map->members);
-	if (map->dsize)
+	if (set->dsize) {
+		if (set->extensions & IPSET_EXT_DESTROY)
+			mtype_ext_cleanup(set);
 		ip_set_free(map->extensions);
+	}
 	kfree(map);
 
 	set->data = NULL;
@@ -74,6 +82,8 @@
 {
 	struct mtype *map = set->data;
 
+	if (set->extensions & IPSET_EXT_DESTROY)
+		mtype_ext_cleanup(set);
 	memset(map->members, 0, map->memsize);
 }
 
@@ -91,12 +101,9 @@
 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
 			  htonl(sizeof(*map) +
 				map->memsize +
-				map->dsize * map->elements)) ||
-	    (SET_WITH_TIMEOUT(set) &&
-	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
-	    (SET_WITH_COUNTER(set) &&
-	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-			   htonl(IPSET_FLAG_WITH_COUNTERS))))
+				set->dsize * map->elements)))
+		goto nla_put_failure;
+	if (unlikely(ip_set_put_flags(skb, set)))
 		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
@@ -111,16 +118,16 @@
 {
 	struct mtype *map = set->data;
 	const struct mtype_adt_elem *e = value;
-	void *x = get_ext(map, e->id);
-	int ret = mtype_do_test(e, map);
+	void *x = get_ext(set, map, e->id);
+	int ret = mtype_do_test(e, map, set->dsize);
 
 	if (ret <= 0)
 		return ret;
 	if (SET_WITH_TIMEOUT(set) &&
-	    ip_set_timeout_expired(ext_timeout(x, map)))
+	    ip_set_timeout_expired(ext_timeout(x, set)))
 		return 0;
 	if (SET_WITH_COUNTER(set))
-		ip_set_update_counter(ext_counter(x, map), ext, mext, flags);
+		ip_set_update_counter(ext_counter(x, set), ext, mext, flags);
 	return 1;
 }
 
@@ -130,26 +137,30 @@
 {
 	struct mtype *map = set->data;
 	const struct mtype_adt_elem *e = value;
-	void *x = get_ext(map, e->id);
-	int ret = mtype_do_add(e, map, flags);
+	void *x = get_ext(set, map, e->id);
+	int ret = mtype_do_add(e, map, flags, set->dsize);
 
 	if (ret == IPSET_ADD_FAILED) {
 		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(x, map)))
+		    ip_set_timeout_expired(ext_timeout(x, set)))
 			ret = 0;
 		else if (!(flags & IPSET_FLAG_EXIST))
 			return -IPSET_ERR_EXIST;
+		/* Element is re-added, cleanup extensions */
+		ip_set_ext_destroy(set, x);
 	}
 
 	if (SET_WITH_TIMEOUT(set))
 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
-		mtype_add_timeout(ext_timeout(x, map), e, ext, map, ret);
+		mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret);
 #else
-		ip_set_timeout_set(ext_timeout(x, map), ext->timeout);
+		ip_set_timeout_set(ext_timeout(x, set), ext->timeout);
 #endif
 
 	if (SET_WITH_COUNTER(set))
-		ip_set_init_counter(ext_counter(x, map), ext);
+		ip_set_init_counter(ext_counter(x, set), ext);
+	if (SET_WITH_COMMENT(set))
+		ip_set_init_comment(ext_comment(x, set), ext);
 	return 0;
 }
 
@@ -159,16 +170,27 @@
 {
 	struct mtype *map = set->data;
 	const struct mtype_adt_elem *e = value;
-	const void *x = get_ext(map, e->id);
+	void *x = get_ext(set, map, e->id);
 
-	if (mtype_do_del(e, map) ||
-	    (SET_WITH_TIMEOUT(set) &&
-	     ip_set_timeout_expired(ext_timeout(x, map))))
+	if (mtype_do_del(e, map))
+		return -IPSET_ERR_EXIST;
+
+	ip_set_ext_destroy(set, x);
+	if (SET_WITH_TIMEOUT(set) &&
+	    ip_set_timeout_expired(ext_timeout(x, set)))
 		return -IPSET_ERR_EXIST;
 
 	return 0;
 }
 
+#ifndef IP_SET_BITMAP_STORED_TIMEOUT
+static inline bool
+mtype_is_filled(const struct mtype_elem *x)
+{
+	return true;
+}
+#endif
+
 static int
 mtype_list(const struct ip_set *set,
 	   struct sk_buff *skb, struct netlink_callback *cb)
@@ -176,20 +198,21 @@
 	struct mtype *map = set->data;
 	struct nlattr *adt, *nested;
 	void *x;
-	u32 id, first = cb->args[2];
+	u32 id, first = cb->args[IPSET_CB_ARG0];
 
 	adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
 	if (!adt)
 		return -EMSGSIZE;
-	for (; cb->args[2] < map->elements; cb->args[2]++) {
-		id = cb->args[2];
-		x = get_ext(map, id);
+	for (; cb->args[IPSET_CB_ARG0] < map->elements;
+	     cb->args[IPSET_CB_ARG0]++) {
+		id = cb->args[IPSET_CB_ARG0];
+		x = get_ext(set, map, id);
 		if (!test_bit(id, map->members) ||
 		    (SET_WITH_TIMEOUT(set) &&
 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
 		     mtype_is_filled((const struct mtype_elem *) x) &&
 #endif
-		     ip_set_timeout_expired(ext_timeout(x, map))))
+		     ip_set_timeout_expired(ext_timeout(x, set))))
 			continue;
 		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 		if (!nested) {
@@ -199,40 +222,27 @@
 			} else
 				goto nla_put_failure;
 		}
-		if (mtype_do_list(skb, map, id))
+		if (mtype_do_list(skb, map, id, set->dsize))
 			goto nla_put_failure;
-		if (SET_WITH_TIMEOUT(set)) {
-#ifdef IP_SET_BITMAP_STORED_TIMEOUT
-			if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-					  htonl(ip_set_timeout_stored(map, id,
-							ext_timeout(x, map)))))
-				goto nla_put_failure;
-#else
-			if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-					  htonl(ip_set_timeout_get(
-							ext_timeout(x, map)))))
-				goto nla_put_failure;
-#endif
-		}
-		if (SET_WITH_COUNTER(set) &&
-		    ip_set_put_counter(skb, ext_counter(x, map)))
+		if (ip_set_put_extensions(skb, set, x,
+		    mtype_is_filled((const struct mtype_elem *) x)))
 			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, adt);
 
 	/* Set listing finished */
-	cb->args[2] = 0;
+	cb->args[IPSET_CB_ARG0] = 0;
 
 	return 0;
 
 nla_put_failure:
 	nla_nest_cancel(skb, nested);
-	ipset_nest_end(skb, adt);
 	if (unlikely(id == first)) {
-		cb->args[2] = 0;
+		cb->args[IPSET_CB_ARG0] = 0;
 		return -EMSGSIZE;
 	}
+	ipset_nest_end(skb, adt);
 	return 0;
 }
 
@@ -241,21 +251,23 @@
 {
 	struct ip_set *set = (struct ip_set *) ul_set;
 	struct mtype *map = set->data;
-	const void *x;
+	void *x;
 	u32 id;
 
 	/* We run parallel with other readers (test element)
 	 * but adding/deleting new entries is locked out */
 	read_lock_bh(&set->lock);
 	for (id = 0; id < map->elements; id++)
-		if (mtype_gc_test(id, map)) {
-			x = get_ext(map, id);
-			if (ip_set_timeout_expired(ext_timeout(x, map)))
+		if (mtype_gc_test(id, map, set->dsize)) {
+			x = get_ext(set, map, id);
+			if (ip_set_timeout_expired(ext_timeout(x, set))) {
 				clear_bit(id, map->members);
+				ip_set_ext_destroy(set, x);
+			}
 		}
 	read_unlock_bh(&set->lock);
 
-	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&map->gc);
 }
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index f1a8128..6f1f9f4 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -25,12 +25,13 @@
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 
-#define REVISION_MIN	0
-#define REVISION_MAX	1	/* Counter support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1	   Counter support added */
+#define IPSET_TYPE_REV_MAX	2	/* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip");
 
 #define MTYPE		bitmap_ip
@@ -44,10 +45,7 @@
 	u32 elements;		/* number of max elements in the set */
 	u32 hosts;		/* number of hosts in a subnet */
 	size_t memsize;		/* members size */
-	size_t dsize;		/* extensions struct size */
-	size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
 	u8 netmask;		/* subnet netmask */
-	u32 timeout;		/* timeout parameter */
 	struct timer_list gc;	/* garbage collection */
 };
 
@@ -65,20 +63,21 @@
 /* Common functions */
 
 static inline int
-bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
+bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e,
+		  struct bitmap_ip *map, size_t dsize)
 {
 	return !!test_bit(e->id, map->members);
 }
 
 static inline int
-bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map)
+bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map, size_t dsize)
 {
 	return !!test_bit(id, map->members);
 }
 
 static inline int
 bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
-		 u32 flags)
+		 u32 flags, size_t dsize)
 {
 	return !!test_and_set_bit(e->id, map->members);
 }
@@ -90,7 +89,8 @@
 }
 
 static inline int
-bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id)
+bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
+		  size_t dsize)
 {
 	return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
 			htonl(map->first_ip + id * map->hosts));
@@ -113,7 +113,7 @@
 	struct bitmap_ip *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct bitmap_ip_adt_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	u32 ip;
 
 	ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
@@ -131,9 +131,9 @@
 {
 	struct bitmap_ip *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
-	u32 ip, ip_to;
+	u32 ip = 0, ip_to = 0;
 	struct bitmap_ip_adt_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret = 0;
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -200,7 +200,7 @@
 	return x->first_ip == y->first_ip &&
 	       x->last_ip == y->last_ip &&
 	       x->netmask == y->netmask &&
-	       x->timeout == y->timeout &&
+	       a->timeout == b->timeout &&
 	       a->extensions == b->extensions;
 }
 
@@ -209,25 +209,6 @@
 struct bitmap_ip_elem {
 };
 
-/* Timeout variant */
-
-struct bitmap_ipt_elem {
-	unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_ipc_elem {
-	struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_ipct_elem {
-	unsigned long timeout;
-	struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip type of sets */
@@ -240,8 +221,8 @@
 	map->members = ip_set_alloc(map->memsize);
 	if (!map->members)
 		return false;
-	if (map->dsize) {
-		map->extensions = ip_set_alloc(map->dsize * elements);
+	if (set->dsize) {
+		map->extensions = ip_set_alloc(set->dsize * elements);
 		if (!map->extensions) {
 			kfree(map->members);
 			return false;
@@ -252,7 +233,7 @@
 	map->elements = elements;
 	map->hosts = hosts;
 	map->netmask = netmask;
-	map->timeout = IPSET_NO_TIMEOUT;
+	set->timeout = IPSET_NO_TIMEOUT;
 
 	set->data = map;
 	set->family = NFPROTO_IPV4;
@@ -261,10 +242,11 @@
 }
 
 static int
-bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+		 u32 flags)
 {
 	struct bitmap_ip *map;
-	u32 first_ip, last_ip, hosts, cadt_flags = 0;
+	u32 first_ip = 0, last_ip = 0, hosts;
 	u64 elements;
 	u8 netmask = 32;
 	int ret;
@@ -336,61 +318,15 @@
 
 	map->memsize = bitmap_bytes(0, elements - 1);
 	set->variant = &bitmap_ip;
-	if (tb[IPSET_ATTR_CADT_FLAGS])
-		cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-	if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-		set->extensions |= IPSET_EXT_COUNTER;
-		if (tb[IPSET_ATTR_TIMEOUT]) {
-			map->dsize = sizeof(struct bitmap_ipct_elem);
-			map->offset[IPSET_OFFSET_TIMEOUT] =
-				offsetof(struct bitmap_ipct_elem, timeout);
-			map->offset[IPSET_OFFSET_COUNTER] =
-				offsetof(struct bitmap_ipct_elem, counter);
-
-			if (!init_map_ip(set, map, first_ip, last_ip,
-					 elements, hosts, netmask)) {
-				kfree(map);
-				return -ENOMEM;
-			}
-
-			map->timeout = ip_set_timeout_uget(
-				tb[IPSET_ATTR_TIMEOUT]);
-			set->extensions |= IPSET_EXT_TIMEOUT;
-
-			bitmap_ip_gc_init(set, bitmap_ip_gc);
-		} else {
-			map->dsize = sizeof(struct bitmap_ipc_elem);
-			map->offset[IPSET_OFFSET_COUNTER] =
-				offsetof(struct bitmap_ipc_elem, counter);
-
-			if (!init_map_ip(set, map, first_ip, last_ip,
-					 elements, hosts, netmask)) {
-				kfree(map);
-				return -ENOMEM;
-			}
-		}
-	} else if (tb[IPSET_ATTR_TIMEOUT]) {
-		map->dsize = sizeof(struct bitmap_ipt_elem);
-		map->offset[IPSET_OFFSET_TIMEOUT] =
-			offsetof(struct bitmap_ipt_elem, timeout);
-
-		if (!init_map_ip(set, map, first_ip, last_ip,
-				 elements, hosts, netmask)) {
-			kfree(map);
-			return -ENOMEM;
-		}
-
-		map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-		set->extensions |= IPSET_EXT_TIMEOUT;
-
+	set->dsize = ip_set_elem_len(set, tb, 0);
+	if (!init_map_ip(set, map, first_ip, last_ip,
+			 elements, hosts, netmask)) {
+		kfree(map);
+		return -ENOMEM;
+	}
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 		bitmap_ip_gc_init(set, bitmap_ip_gc);
-	} else {
-		map->dsize = 0;
-		if (!init_map_ip(set, map, first_ip, last_ip,
-				 elements, hosts, netmask)) {
-			kfree(map);
-			return -ENOMEM;
-		}
 	}
 	return 0;
 }
@@ -401,8 +337,8 @@
 	.features	= IPSET_TYPE_IP,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= NFPROTO_IPV4,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= bitmap_ip_create,
 	.create_policy	= {
 		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
@@ -420,6 +356,7 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 3b30e0b..740eabe 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -25,12 +25,13 @@
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 
-#define REVISION_MIN	0
-#define REVISION_MAX	1	/* Counter support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1	   Counter support added */
+#define IPSET_TYPE_REV_MAX	2	/* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip,mac");
 
 #define MTYPE		bitmap_ipmac
@@ -48,11 +49,8 @@
 	u32 first_ip;		/* host byte order, included in range */
 	u32 last_ip;		/* host byte order, included in range */
 	u32 elements;		/* number of max elements in the set */
-	u32 timeout;		/* timeout value */
-	struct timer_list gc;	/* garbage collector */
 	size_t memsize;		/* members size */
-	size_t dsize;		/* size of element */
-	size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
+	struct timer_list gc;	/* garbage collector */
 };
 
 /* ADT structure for generic function args */
@@ -82,13 +80,13 @@
 
 static inline int
 bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
-		     const struct bitmap_ipmac *map)
+		     const struct bitmap_ipmac *map, size_t dsize)
 {
 	const struct bitmap_ipmac_elem *elem;
 
 	if (!test_bit(e->id, map->members))
 		return 0;
-	elem = get_elem(map->extensions, e->id, map->dsize);
+	elem = get_elem(map->extensions, e->id, dsize);
 	if (elem->filled == MAC_FILLED)
 		return e->ether == NULL ||
 		       ether_addr_equal(e->ether, elem->ether);
@@ -97,13 +95,13 @@
 }
 
 static inline int
-bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map)
+bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
 {
 	const struct bitmap_ipmac_elem *elem;
 
 	if (!test_bit(id, map->members))
 		return 0;
-	elem = get_elem(map->extensions, id, map->dsize);
+	elem = get_elem(map->extensions, id, dsize);
 	/* Timer not started for the incomplete elements */
 	return elem->filled == MAC_FILLED;
 }
@@ -117,13 +115,13 @@
 static inline int
 bitmap_ipmac_add_timeout(unsigned long *timeout,
 			 const struct bitmap_ipmac_adt_elem *e,
-			 const struct ip_set_ext *ext,
+			 const struct ip_set_ext *ext, struct ip_set *set,
 			 struct bitmap_ipmac *map, int mode)
 {
 	u32 t = ext->timeout;
 
 	if (mode == IPSET_ADD_START_STORED_TIMEOUT) {
-		if (t == map->timeout)
+		if (t == set->timeout)
 			/* Timeout was not specified, get stored one */
 			t = *timeout;
 		ip_set_timeout_set(timeout, t);
@@ -142,11 +140,11 @@
 
 static inline int
 bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
-		    struct bitmap_ipmac *map, u32 flags)
+		    struct bitmap_ipmac *map, u32 flags, size_t dsize)
 {
 	struct bitmap_ipmac_elem *elem;
 
-	elem = get_elem(map->extensions, e->id, map->dsize);
+	elem = get_elem(map->extensions, e->id, dsize);
 	if (test_and_set_bit(e->id, map->members)) {
 		if (elem->filled == MAC_FILLED) {
 			if (e->ether && (flags & IPSET_FLAG_EXIST))
@@ -178,22 +176,12 @@
 	return !test_and_clear_bit(e->id, map->members);
 }
 
-static inline unsigned long
-ip_set_timeout_stored(struct bitmap_ipmac *map, u32 id, unsigned long *timeout)
-{
-	const struct bitmap_ipmac_elem *elem =
-		get_elem(map->extensions, id, map->dsize);
-
-	return elem->filled == MAC_FILLED ? ip_set_timeout_get(timeout) :
-					    *timeout;
-}
-
 static inline int
 bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
-		     u32 id)
+		     u32 id, size_t dsize)
 {
 	const struct bitmap_ipmac_elem *elem =
-		get_elem(map->extensions, id, map->dsize);
+		get_elem(map->extensions, id, dsize);
 
 	return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
 			       htonl(map->first_ip + id)) ||
@@ -216,7 +204,7 @@
 	struct bitmap_ipmac *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct bitmap_ipmac_adt_elem e = {};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	u32 ip;
 
 	/* MAC can be src only */
@@ -245,8 +233,8 @@
 	const struct bitmap_ipmac *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct bitmap_ipmac_adt_elem e = {};
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
-	u32 ip;
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip = 0;
 	int ret = 0;
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -285,43 +273,12 @@
 
 	return x->first_ip == y->first_ip &&
 	       x->last_ip == y->last_ip &&
-	       x->timeout == y->timeout &&
+	       a->timeout == b->timeout &&
 	       a->extensions == b->extensions;
 }
 
 /* Plain variant */
 
-/* Timeout variant */
-
-struct bitmap_ipmact_elem {
-	struct {
-		unsigned char ether[ETH_ALEN];
-		unsigned char filled;
-	} __attribute__ ((aligned));
-	unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_ipmacc_elem {
-	struct {
-		unsigned char ether[ETH_ALEN];
-		unsigned char filled;
-	} __attribute__ ((aligned));
-	struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_ipmacct_elem {
-	struct {
-		unsigned char ether[ETH_ALEN];
-		unsigned char filled;
-	} __attribute__ ((aligned));
-	unsigned long timeout;
-	struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip,mac type of sets */
@@ -330,11 +287,11 @@
 init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
 	       u32 first_ip, u32 last_ip, u32 elements)
 {
-	map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+	map->members = ip_set_alloc(map->memsize);
 	if (!map->members)
 		return false;
-	if (map->dsize) {
-		map->extensions = ip_set_alloc(map->dsize * elements);
+	if (set->dsize) {
+		map->extensions = ip_set_alloc(set->dsize * elements);
 		if (!map->extensions) {
 			kfree(map->members);
 			return false;
@@ -343,7 +300,7 @@
 	map->first_ip = first_ip;
 	map->last_ip = last_ip;
 	map->elements = elements;
-	map->timeout = IPSET_NO_TIMEOUT;
+	set->timeout = IPSET_NO_TIMEOUT;
 
 	set->data = map;
 	set->family = NFPROTO_IPV4;
@@ -352,10 +309,10 @@
 }
 
 static int
-bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
 		    u32 flags)
 {
-	u32 first_ip, last_ip, cadt_flags = 0;
+	u32 first_ip = 0, last_ip = 0;
 	u64 elements;
 	struct bitmap_ipmac *map;
 	int ret;
@@ -399,57 +356,15 @@
 
 	map->memsize = bitmap_bytes(0, elements - 1);
 	set->variant = &bitmap_ipmac;
-	if (tb[IPSET_ATTR_CADT_FLAGS])
-		cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-	if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-		set->extensions |= IPSET_EXT_COUNTER;
-		if (tb[IPSET_ATTR_TIMEOUT]) {
-			map->dsize = sizeof(struct bitmap_ipmacct_elem);
-			map->offset[IPSET_OFFSET_TIMEOUT] =
-				offsetof(struct bitmap_ipmacct_elem, timeout);
-			map->offset[IPSET_OFFSET_COUNTER] =
-				offsetof(struct bitmap_ipmacct_elem, counter);
-
-			if (!init_map_ipmac(set, map, first_ip, last_ip,
-					    elements)) {
-				kfree(map);
-				return -ENOMEM;
-			}
-			map->timeout = ip_set_timeout_uget(
-				tb[IPSET_ATTR_TIMEOUT]);
-			set->extensions |= IPSET_EXT_TIMEOUT;
-			bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
-		} else {
-			map->dsize = sizeof(struct bitmap_ipmacc_elem);
-			map->offset[IPSET_OFFSET_COUNTER] =
-				offsetof(struct bitmap_ipmacc_elem, counter);
-
-			if (!init_map_ipmac(set, map, first_ip, last_ip,
-					    elements)) {
-				kfree(map);
-				return -ENOMEM;
-			}
-		}
-	} else if (tb[IPSET_ATTR_TIMEOUT]) {
-		map->dsize = sizeof(struct bitmap_ipmact_elem);
-		map->offset[IPSET_OFFSET_TIMEOUT] =
-			offsetof(struct bitmap_ipmact_elem, timeout);
-
-		if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-			kfree(map);
-			return -ENOMEM;
-		}
-		map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-		set->extensions |= IPSET_EXT_TIMEOUT;
+	set->dsize = ip_set_elem_len(set, tb,
+				     sizeof(struct bitmap_ipmac_elem));
+	if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
+		kfree(map);
+		return -ENOMEM;
+	}
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 		bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
-	} else {
-		map->dsize = sizeof(struct bitmap_ipmac_elem);
-
-		if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-			kfree(map);
-			return -ENOMEM;
-		}
-		set->variant = &bitmap_ipmac;
 	}
 	return 0;
 }
@@ -460,8 +375,8 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_MAC,
 	.dimension	= IPSET_DIM_TWO,
 	.family		= NFPROTO_IPV4,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= bitmap_ipmac_create,
 	.create_policy	= {
 		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
@@ -478,6 +393,7 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 8207d1f..cf99676 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -20,12 +20,13 @@
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 #include <linux/netfilter/ipset/ip_set_getport.h>
 
-#define REVISION_MIN	0
-#define REVISION_MAX	1	/* Counter support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1	   Counter support added */
+#define IPSET_TYPE_REV_MAX	2	/* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:port");
 
 #define MTYPE		bitmap_port
@@ -38,9 +39,6 @@
 	u16 last_port;		/* host byte order, included in range */
 	u32 elements;		/* number of max elements in the set */
 	size_t memsize;		/* members size */
-	size_t dsize;		/* extensions struct size */
-	size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
-	u32 timeout;		/* timeout parameter */
 	struct timer_list gc;	/* garbage collection */
 };
 
@@ -59,20 +57,20 @@
 
 static inline int
 bitmap_port_do_test(const struct bitmap_port_adt_elem *e,
-		    const struct bitmap_port *map)
+		    const struct bitmap_port *map, size_t dsize)
 {
 	return !!test_bit(e->id, map->members);
 }
 
 static inline int
-bitmap_port_gc_test(u16 id, const struct bitmap_port *map)
+bitmap_port_gc_test(u16 id, const struct bitmap_port *map, size_t dsize)
 {
 	return !!test_bit(id, map->members);
 }
 
 static inline int
 bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
-		   struct bitmap_port *map, u32 flags)
+		   struct bitmap_port *map, u32 flags, size_t dsize)
 {
 	return !!test_and_set_bit(e->id, map->members);
 }
@@ -85,7 +83,8 @@
 }
 
 static inline int
-bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id)
+bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
+		    size_t dsize)
 {
 	return nla_put_net16(skb, IPSET_ATTR_PORT,
 			     htons(map->first_port + id));
@@ -106,7 +105,7 @@
 	struct bitmap_port *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct bitmap_port_adt_elem e = {};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	__be16 __port;
 	u16 port = 0;
 
@@ -131,7 +130,7 @@
 	struct bitmap_port *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct bitmap_port_adt_elem e = {};
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 port;	/* wraparound */
 	u16 port_to;
 	int ret = 0;
@@ -191,7 +190,7 @@
 
 	return x->first_port == y->first_port &&
 	       x->last_port == y->last_port &&
-	       x->timeout == y->timeout &&
+	       a->timeout == b->timeout &&
 	       a->extensions == b->extensions;
 }
 
@@ -200,25 +199,6 @@
 struct bitmap_port_elem {
 };
 
-/* Timeout variant */
-
-struct bitmap_portt_elem {
-	unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_portc_elem {
-	struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_portct_elem {
-	unsigned long timeout;
-	struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip type of sets */
@@ -230,8 +210,8 @@
 	map->members = ip_set_alloc(map->memsize);
 	if (!map->members)
 		return false;
-	if (map->dsize) {
-		map->extensions = ip_set_alloc(map->dsize * map->elements);
+	if (set->dsize) {
+		map->extensions = ip_set_alloc(set->dsize * map->elements);
 		if (!map->extensions) {
 			kfree(map->members);
 			return false;
@@ -239,7 +219,7 @@
 	}
 	map->first_port = first_port;
 	map->last_port = last_port;
-	map->timeout = IPSET_NO_TIMEOUT;
+	set->timeout = IPSET_NO_TIMEOUT;
 
 	set->data = map;
 	set->family = NFPROTO_UNSPEC;
@@ -248,11 +228,11 @@
 }
 
 static int
-bitmap_port_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+		   u32 flags)
 {
 	struct bitmap_port *map;
 	u16 first_port, last_port;
-	u32 cadt_flags = 0;
 
 	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -274,55 +254,16 @@
 		return -ENOMEM;
 
 	map->elements = last_port - first_port + 1;
-	map->memsize = map->elements * sizeof(unsigned long);
+	map->memsize = bitmap_bytes(0, map->elements);
 	set->variant = &bitmap_port;
-	if (tb[IPSET_ATTR_CADT_FLAGS])
-		cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-	if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-		set->extensions |= IPSET_EXT_COUNTER;
-		if (tb[IPSET_ATTR_TIMEOUT]) {
-			map->dsize = sizeof(struct bitmap_portct_elem);
-			map->offset[IPSET_OFFSET_TIMEOUT] =
-				offsetof(struct bitmap_portct_elem, timeout);
-			map->offset[IPSET_OFFSET_COUNTER] =
-				offsetof(struct bitmap_portct_elem, counter);
-			if (!init_map_port(set, map, first_port, last_port)) {
-				kfree(map);
-				return -ENOMEM;
-			}
-
-			map->timeout =
-				ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-			set->extensions |= IPSET_EXT_TIMEOUT;
-			bitmap_port_gc_init(set, bitmap_port_gc);
-		} else {
-			map->dsize = sizeof(struct bitmap_portc_elem);
-			map->offset[IPSET_OFFSET_COUNTER] =
-				offsetof(struct bitmap_portc_elem, counter);
-			if (!init_map_port(set, map, first_port, last_port)) {
-				kfree(map);
-				return -ENOMEM;
-			}
-		}
-	} else if (tb[IPSET_ATTR_TIMEOUT]) {
-		map->dsize = sizeof(struct bitmap_portt_elem);
-		map->offset[IPSET_OFFSET_TIMEOUT] =
-			offsetof(struct bitmap_portt_elem, timeout);
-		if (!init_map_port(set, map, first_port, last_port)) {
-			kfree(map);
-			return -ENOMEM;
-		}
-
-		map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-		set->extensions |= IPSET_EXT_TIMEOUT;
+	set->dsize = ip_set_elem_len(set, tb, 0);
+	if (!init_map_port(set, map, first_port, last_port)) {
+		kfree(map);
+		return -ENOMEM;
+	}
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 		bitmap_port_gc_init(set, bitmap_port_gc);
-	} else {
-		map->dsize = 0;
-		if (!init_map_port(set, map, first_port, last_port)) {
-			kfree(map);
-			return -ENOMEM;
-		}
-
 	}
 	return 0;
 }
@@ -333,8 +274,8 @@
 	.features	= IPSET_TYPE_PORT,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= bitmap_port_create,
 	.create_policy	= {
 		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
@@ -349,6 +290,7 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index f2e30fb..bac7e01 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -17,6 +17,8 @@
 #include <linux/spinlock.h>
 #include <linux/rculist.h>
 #include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include <linux/netfilter.h>
 #include <linux/netfilter/x_tables.h>
@@ -27,8 +29,17 @@
 static DEFINE_MUTEX(ip_set_type_mutex);		/* protects ip_set_type_list */
 static DEFINE_RWLOCK(ip_set_ref_lock);		/* protects the set refs */
 
-static struct ip_set * __rcu *ip_set_list;	/* all individual sets */
-static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+struct ip_set_net {
+	struct ip_set * __rcu *ip_set_list;	/* all individual sets */
+	ip_set_id_t	ip_set_max;	/* max number of sets */
+	int		is_deleted;	/* deleted by ip_set_net_exit */
+};
+static int ip_set_net_id __read_mostly;
+
+static inline struct ip_set_net *ip_set_pernet(struct net *net)
+{
+	return net_generic(net, ip_set_net_id);
+}
 
 #define IP_SET_INC	64
 #define STREQ(a, b)	(strncmp(a, b, IPSET_MAXNAMELEN) == 0)
@@ -45,8 +56,8 @@
 /* When the nfnl mutex is held: */
 #define nfnl_dereference(p)		\
 	rcu_dereference_protected(p, 1)
-#define nfnl_set(id)			\
-	nfnl_dereference(ip_set_list)[id]
+#define nfnl_set(inst, id)			\
+	nfnl_dereference((inst)->ip_set_list)[id]
 
 /*
  * The set types are implemented in modules and registered set types
@@ -315,6 +326,60 @@
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
 
+typedef void (*destroyer)(void *);
+/* ipset data extension types, in size order */
+
+const struct ip_set_ext_type ip_set_extensions[] = {
+	[IPSET_EXT_ID_COUNTER] = {
+		.type	= IPSET_EXT_COUNTER,
+		.flag	= IPSET_FLAG_WITH_COUNTERS,
+		.len	= sizeof(struct ip_set_counter),
+		.align	= __alignof__(struct ip_set_counter),
+	},
+	[IPSET_EXT_ID_TIMEOUT] = {
+		.type	= IPSET_EXT_TIMEOUT,
+		.len	= sizeof(unsigned long),
+		.align	= __alignof__(unsigned long),
+	},
+	[IPSET_EXT_ID_COMMENT] = {
+		.type	 = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
+		.flag	 = IPSET_FLAG_WITH_COMMENT,
+		.len	 = sizeof(struct ip_set_comment),
+		.align	 = __alignof__(struct ip_set_comment),
+		.destroy = (destroyer) ip_set_comment_free,
+	},
+};
+EXPORT_SYMBOL_GPL(ip_set_extensions);
+
+static inline bool
+add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
+{
+	return ip_set_extensions[id].flag ?
+		(flags & ip_set_extensions[id].flag) :
+		!!tb[IPSET_ATTR_TIMEOUT];
+}
+
+size_t
+ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
+{
+	enum ip_set_ext_id id;
+	size_t offset = 0;
+	u32 cadt_flags = 0;
+
+	if (tb[IPSET_ATTR_CADT_FLAGS])
+		cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+	for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
+		if (!add_extension(id, cadt_flags, tb))
+			continue;
+		offset += ALIGN(len + offset, ip_set_extensions[id].align);
+		set->offset[id] = offset;
+		set->extensions |= ip_set_extensions[id].type;
+		offset += ip_set_extensions[id].len;
+	}
+	return len + offset;
+}
+EXPORT_SYMBOL_GPL(ip_set_elem_len);
+
 int
 ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
 		      struct ip_set_ext *ext)
@@ -334,6 +399,12 @@
 			ext->packets = be64_to_cpu(nla_get_be64(
 						   tb[IPSET_ATTR_PACKETS]));
 	}
+	if (tb[IPSET_ATTR_COMMENT]) {
+		if (!(set->extensions & IPSET_EXT_COMMENT))
+			return -IPSET_ERR_COMMENT;
+		ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
+	}
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ip_set_get_extensions);
@@ -374,13 +445,14 @@
  */
 
 static inline struct ip_set *
-ip_set_rcu_get(ip_set_id_t index)
+ip_set_rcu_get(struct net *net, ip_set_id_t index)
 {
 	struct ip_set *set;
+	struct ip_set_net *inst = ip_set_pernet(net);
 
 	rcu_read_lock();
 	/* ip_set_list itself needs to be protected */
-	set = rcu_dereference(ip_set_list)[index];
+	set = rcu_dereference(inst->ip_set_list)[index];
 	rcu_read_unlock();
 
 	return set;
@@ -390,7 +462,8 @@
 ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
 	    const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-	struct ip_set *set = ip_set_rcu_get(index);
+	struct ip_set *set = ip_set_rcu_get(
+			dev_net(par->in ? par->in : par->out), index);
 	int ret = 0;
 
 	BUG_ON(set == NULL);
@@ -428,7 +501,8 @@
 ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
 	   const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-	struct ip_set *set = ip_set_rcu_get(index);
+	struct ip_set *set = ip_set_rcu_get(
+			dev_net(par->in ? par->in : par->out), index);
 	int ret;
 
 	BUG_ON(set == NULL);
@@ -450,7 +524,8 @@
 ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
 	   const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-	struct ip_set *set = ip_set_rcu_get(index);
+	struct ip_set *set = ip_set_rcu_get(
+			dev_net(par->in ? par->in : par->out), index);
 	int ret = 0;
 
 	BUG_ON(set == NULL);
@@ -474,14 +549,15 @@
  *
  */
 ip_set_id_t
-ip_set_get_byname(const char *name, struct ip_set **set)
+ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
 {
 	ip_set_id_t i, index = IPSET_INVALID_ID;
 	struct ip_set *s;
+	struct ip_set_net *inst = ip_set_pernet(net);
 
 	rcu_read_lock();
-	for (i = 0; i < ip_set_max; i++) {
-		s = rcu_dereference(ip_set_list)[i];
+	for (i = 0; i < inst->ip_set_max; i++) {
+		s = rcu_dereference(inst->ip_set_list)[i];
 		if (s != NULL && STREQ(s->name, name)) {
 			__ip_set_get(s);
 			index = i;
@@ -501,17 +577,26 @@
  * to be valid, after calling this function.
  *
  */
-void
-ip_set_put_byindex(ip_set_id_t index)
+
+static inline void
+__ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
 {
 	struct ip_set *set;
 
 	rcu_read_lock();
-	set = rcu_dereference(ip_set_list)[index];
+	set = rcu_dereference(inst->ip_set_list)[index];
 	if (set != NULL)
 		__ip_set_put(set);
 	rcu_read_unlock();
 }
+
+void
+ip_set_put_byindex(struct net *net, ip_set_id_t index)
+{
+	struct ip_set_net *inst = ip_set_pernet(net);
+
+	__ip_set_put_byindex(inst, index);
+}
 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
 
 /*
@@ -522,9 +607,9 @@
  *
  */
 const char *
-ip_set_name_byindex(ip_set_id_t index)
+ip_set_name_byindex(struct net *net, ip_set_id_t index)
 {
-	const struct ip_set *set = ip_set_rcu_get(index);
+	const struct ip_set *set = ip_set_rcu_get(net, index);
 
 	BUG_ON(set == NULL);
 	BUG_ON(set->ref == 0);
@@ -546,14 +631,15 @@
  * The nfnl mutex is used in the function.
  */
 ip_set_id_t
-ip_set_nfnl_get(const char *name)
+ip_set_nfnl_get(struct net *net, const char *name)
 {
 	ip_set_id_t i, index = IPSET_INVALID_ID;
 	struct ip_set *s;
+	struct ip_set_net *inst = ip_set_pernet(net);
 
 	nfnl_lock(NFNL_SUBSYS_IPSET);
-	for (i = 0; i < ip_set_max; i++) {
-		s = nfnl_set(i);
+	for (i = 0; i < inst->ip_set_max; i++) {
+		s = nfnl_set(inst, i);
 		if (s != NULL && STREQ(s->name, name)) {
 			__ip_set_get(s);
 			index = i;
@@ -573,15 +659,16 @@
  * The nfnl mutex is used in the function.
  */
 ip_set_id_t
-ip_set_nfnl_get_byindex(ip_set_id_t index)
+ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
 {
 	struct ip_set *set;
+	struct ip_set_net *inst = ip_set_pernet(net);
 
-	if (index > ip_set_max)
+	if (index > inst->ip_set_max)
 		return IPSET_INVALID_ID;
 
 	nfnl_lock(NFNL_SUBSYS_IPSET);
-	set = nfnl_set(index);
+	set = nfnl_set(inst, index);
 	if (set)
 		__ip_set_get(set);
 	else
@@ -600,13 +687,17 @@
  * The nfnl mutex is used in the function.
  */
 void
-ip_set_nfnl_put(ip_set_id_t index)
+ip_set_nfnl_put(struct net *net, ip_set_id_t index)
 {
 	struct ip_set *set;
+	struct ip_set_net *inst = ip_set_pernet(net);
+
 	nfnl_lock(NFNL_SUBSYS_IPSET);
-	set = nfnl_set(index);
-	if (set != NULL)
-		__ip_set_put(set);
+	if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
+		set = nfnl_set(inst, index);
+		if (set != NULL)
+			__ip_set_put(set);
+	}
 	nfnl_unlock(NFNL_SUBSYS_IPSET);
 }
 EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -664,14 +755,14 @@
 };
 
 static struct ip_set *
-find_set_and_id(const char *name, ip_set_id_t *id)
+find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
 {
 	struct ip_set *set = NULL;
 	ip_set_id_t i;
 
 	*id = IPSET_INVALID_ID;
-	for (i = 0; i < ip_set_max; i++) {
-		set = nfnl_set(i);
+	for (i = 0; i < inst->ip_set_max; i++) {
+		set = nfnl_set(inst, i);
 		if (set != NULL && STREQ(set->name, name)) {
 			*id = i;
 			break;
@@ -681,22 +772,23 @@
 }
 
 static inline struct ip_set *
-find_set(const char *name)
+find_set(struct ip_set_net *inst, const char *name)
 {
 	ip_set_id_t id;
 
-	return find_set_and_id(name, &id);
+	return find_set_and_id(inst, name, &id);
 }
 
 static int
-find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
+	     struct ip_set **set)
 {
 	struct ip_set *s;
 	ip_set_id_t i;
 
 	*index = IPSET_INVALID_ID;
-	for (i = 0;  i < ip_set_max; i++) {
-		s = nfnl_set(i);
+	for (i = 0;  i < inst->ip_set_max; i++) {
+		s = nfnl_set(inst, i);
 		if (s == NULL) {
 			if (*index == IPSET_INVALID_ID)
 				*index = i;
@@ -725,6 +817,8 @@
 	      const struct nlmsghdr *nlh,
 	      const struct nlattr * const attr[])
 {
+	struct net *net = sock_net(ctnl);
+	struct ip_set_net *inst = ip_set_pernet(net);
 	struct ip_set *set, *clash = NULL;
 	ip_set_id_t index = IPSET_INVALID_ID;
 	struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
@@ -783,7 +877,7 @@
 		goto put_out;
 	}
 
-	ret = set->type->create(set, tb, flags);
+	ret = set->type->create(net, set, tb, flags);
 	if (ret != 0)
 		goto put_out;
 
@@ -794,7 +888,7 @@
 	 * by the nfnl mutex. Find the first free index in ip_set_list
 	 * and check clashing.
 	 */
-	ret = find_free_id(set->name, &index, &clash);
+	ret = find_free_id(inst, set->name, &index, &clash);
 	if (ret == -EEXIST) {
 		/* If this is the same set and requested, ignore error */
 		if ((flags & IPSET_FLAG_EXIST) &&
@@ -807,9 +901,9 @@
 		goto cleanup;
 	} else if (ret == -IPSET_ERR_MAX_SETS) {
 		struct ip_set **list, **tmp;
-		ip_set_id_t i = ip_set_max + IP_SET_INC;
+		ip_set_id_t i = inst->ip_set_max + IP_SET_INC;
 
-		if (i < ip_set_max || i == IPSET_INVALID_ID)
+		if (i < inst->ip_set_max || i == IPSET_INVALID_ID)
 			/* Wraparound */
 			goto cleanup;
 
@@ -817,14 +911,14 @@
 		if (!list)
 			goto cleanup;
 		/* nfnl mutex is held, both lists are valid */
-		tmp = nfnl_dereference(ip_set_list);
-		memcpy(list, tmp, sizeof(struct ip_set *) * ip_set_max);
-		rcu_assign_pointer(ip_set_list, list);
+		tmp = nfnl_dereference(inst->ip_set_list);
+		memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
+		rcu_assign_pointer(inst->ip_set_list, list);
 		/* Make sure all current packets have passed through */
 		synchronize_net();
 		/* Use new list */
-		index = ip_set_max;
-		ip_set_max = i;
+		index = inst->ip_set_max;
+		inst->ip_set_max = i;
 		kfree(tmp);
 		ret = 0;
 	} else if (ret)
@@ -834,7 +928,7 @@
 	 * Finally! Add our shiny new set to the list, and be done.
 	 */
 	pr_debug("create: '%s' created with index %u!\n", set->name, index);
-	nfnl_set(index) = set;
+	nfnl_set(inst, index) = set;
 
 	return ret;
 
@@ -857,12 +951,12 @@
 };
 
 static void
-ip_set_destroy_set(ip_set_id_t index)
+ip_set_destroy_set(struct ip_set_net *inst, ip_set_id_t index)
 {
-	struct ip_set *set = nfnl_set(index);
+	struct ip_set *set = nfnl_set(inst, index);
 
 	pr_debug("set: %s\n",  set->name);
-	nfnl_set(index) = NULL;
+	nfnl_set(inst, index) = NULL;
 
 	/* Must call it without holding any lock */
 	set->variant->destroy(set);
@@ -875,6 +969,7 @@
 	       const struct nlmsghdr *nlh,
 	       const struct nlattr * const attr[])
 {
+	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *s;
 	ip_set_id_t i;
 	int ret = 0;
@@ -894,21 +989,22 @@
 	 */
 	read_lock_bh(&ip_set_ref_lock);
 	if (!attr[IPSET_ATTR_SETNAME]) {
-		for (i = 0; i < ip_set_max; i++) {
-			s = nfnl_set(i);
+		for (i = 0; i < inst->ip_set_max; i++) {
+			s = nfnl_set(inst, i);
 			if (s != NULL && s->ref) {
 				ret = -IPSET_ERR_BUSY;
 				goto out;
 			}
 		}
 		read_unlock_bh(&ip_set_ref_lock);
-		for (i = 0; i < ip_set_max; i++) {
-			s = nfnl_set(i);
+		for (i = 0; i < inst->ip_set_max; i++) {
+			s = nfnl_set(inst, i);
 			if (s != NULL)
-				ip_set_destroy_set(i);
+				ip_set_destroy_set(inst, i);
 		}
 	} else {
-		s = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &i);
+		s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+				    &i);
 		if (s == NULL) {
 			ret = -ENOENT;
 			goto out;
@@ -918,7 +1014,7 @@
 		}
 		read_unlock_bh(&ip_set_ref_lock);
 
-		ip_set_destroy_set(i);
+		ip_set_destroy_set(inst, i);
 	}
 	return 0;
 out:
@@ -943,6 +1039,7 @@
 	     const struct nlmsghdr *nlh,
 	     const struct nlattr * const attr[])
 {
+	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *s;
 	ip_set_id_t i;
 
@@ -950,13 +1047,13 @@
 		return -IPSET_ERR_PROTOCOL;
 
 	if (!attr[IPSET_ATTR_SETNAME]) {
-		for (i = 0; i < ip_set_max; i++) {
-			s = nfnl_set(i);
+		for (i = 0; i < inst->ip_set_max; i++) {
+			s = nfnl_set(inst, i);
 			if (s != NULL)
 				ip_set_flush_set(s);
 		}
 	} else {
-		s = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+		s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
 		if (s == NULL)
 			return -ENOENT;
 
@@ -982,6 +1079,7 @@
 	      const struct nlmsghdr *nlh,
 	      const struct nlattr * const attr[])
 {
+	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *set, *s;
 	const char *name2;
 	ip_set_id_t i;
@@ -992,7 +1090,7 @@
 		     attr[IPSET_ATTR_SETNAME2] == NULL))
 		return -IPSET_ERR_PROTOCOL;
 
-	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
 	if (set == NULL)
 		return -ENOENT;
 
@@ -1003,8 +1101,8 @@
 	}
 
 	name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
-	for (i = 0; i < ip_set_max; i++) {
-		s = nfnl_set(i);
+	for (i = 0; i < inst->ip_set_max; i++) {
+		s = nfnl_set(inst, i);
 		if (s != NULL && STREQ(s->name, name2)) {
 			ret = -IPSET_ERR_EXIST_SETNAME2;
 			goto out;
@@ -1031,6 +1129,7 @@
 	    const struct nlmsghdr *nlh,
 	    const struct nlattr * const attr[])
 {
+	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *from, *to;
 	ip_set_id_t from_id, to_id;
 	char from_name[IPSET_MAXNAMELEN];
@@ -1040,11 +1139,13 @@
 		     attr[IPSET_ATTR_SETNAME2] == NULL))
 		return -IPSET_ERR_PROTOCOL;
 
-	from = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &from_id);
+	from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+			       &from_id);
 	if (from == NULL)
 		return -ENOENT;
 
-	to = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME2]), &to_id);
+	to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
+			     &to_id);
 	if (to == NULL)
 		return -IPSET_ERR_EXIST_SETNAME2;
 
@@ -1061,8 +1162,8 @@
 
 	write_lock_bh(&ip_set_ref_lock);
 	swap(from->ref, to->ref);
-	nfnl_set(from_id) = to;
-	nfnl_set(to_id) = from;
+	nfnl_set(inst, from_id) = to;
+	nfnl_set(inst, to_id) = from;
 	write_unlock_bh(&ip_set_ref_lock);
 
 	return 0;
@@ -1081,9 +1182,12 @@
 static int
 ip_set_dump_done(struct netlink_callback *cb)
 {
-	if (cb->args[2]) {
-		pr_debug("release set %s\n", nfnl_set(cb->args[1])->name);
-		ip_set_put_byindex((ip_set_id_t) cb->args[1]);
+	struct ip_set_net *inst = (struct ip_set_net *)cb->args[IPSET_CB_NET];
+	if (cb->args[IPSET_CB_ARG0]) {
+		pr_debug("release set %s\n",
+			 nfnl_set(inst, cb->args[IPSET_CB_INDEX])->name);
+		__ip_set_put_byindex(inst,
+			(ip_set_id_t) cb->args[IPSET_CB_INDEX]);
 	}
 	return 0;
 }
@@ -1101,7 +1205,7 @@
 }
 
 static int
-dump_init(struct netlink_callback *cb)
+dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
 {
 	struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
 	int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
@@ -1114,21 +1218,22 @@
 	nla_parse(cda, IPSET_ATTR_CMD_MAX,
 		  attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
 
-	/* cb->args[0] : dump single set/all sets
-	 *         [1] : set index
-	 *         [..]: type specific
+	/* cb->args[IPSET_CB_NET]:	net namespace
+	 *         [IPSET_CB_DUMP]:	dump single set/all sets
+	 *         [IPSET_CB_INDEX]: 	set index
+	 *         [IPSET_CB_ARG0]:	type specific
 	 */
 
 	if (cda[IPSET_ATTR_SETNAME]) {
 		struct ip_set *set;
 
-		set = find_set_and_id(nla_data(cda[IPSET_ATTR_SETNAME]),
+		set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
 				      &index);
 		if (set == NULL)
 			return -ENOENT;
 
 		dump_type = DUMP_ONE;
-		cb->args[1] = index;
+		cb->args[IPSET_CB_INDEX] = index;
 	} else
 		dump_type = DUMP_ALL;
 
@@ -1136,7 +1241,8 @@
 		u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
 		dump_type |= (f << 16);
 	}
-	cb->args[0] = dump_type;
+	cb->args[IPSET_CB_NET] = (unsigned long)inst;
+	cb->args[IPSET_CB_DUMP] = dump_type;
 
 	return 0;
 }
@@ -1148,11 +1254,12 @@
 	struct ip_set *set = NULL;
 	struct nlmsghdr *nlh = NULL;
 	unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
+	struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
 	u32 dump_type, dump_flags;
 	int ret = 0;
 
-	if (!cb->args[0]) {
-		ret = dump_init(cb);
+	if (!cb->args[IPSET_CB_DUMP]) {
+		ret = dump_init(cb, inst);
 		if (ret < 0) {
 			nlh = nlmsg_hdr(cb->skb);
 			/* We have to create and send the error message
@@ -1163,18 +1270,19 @@
 		}
 	}
 
-	if (cb->args[1] >= ip_set_max)
+	if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
 		goto out;
 
-	dump_type = DUMP_TYPE(cb->args[0]);
-	dump_flags = DUMP_FLAGS(cb->args[0]);
-	max = dump_type == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+	dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]);
+	dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]);
+	max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1
+				    : inst->ip_set_max;
 dump_last:
-	pr_debug("args[0]: %u %u args[1]: %ld\n",
-		 dump_type, dump_flags, cb->args[1]);
-	for (; cb->args[1] < max; cb->args[1]++) {
-		index = (ip_set_id_t) cb->args[1];
-		set = nfnl_set(index);
+	pr_debug("dump type, flag: %u %u index: %ld\n",
+		 dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
+	for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
+		index = (ip_set_id_t) cb->args[IPSET_CB_INDEX];
+		set = nfnl_set(inst, index);
 		if (set == NULL) {
 			if (dump_type == DUMP_ONE) {
 				ret = -ENOENT;
@@ -1190,7 +1298,7 @@
 		     !!(set->type->features & IPSET_DUMP_LAST)))
 			continue;
 		pr_debug("List set: %s\n", set->name);
-		if (!cb->args[2]) {
+		if (!cb->args[IPSET_CB_ARG0]) {
 			/* Start listing: make sure set won't be destroyed */
 			pr_debug("reference set\n");
 			__ip_set_get(set);
@@ -1207,7 +1315,7 @@
 			goto nla_put_failure;
 		if (dump_flags & IPSET_FLAG_LIST_SETNAME)
 			goto next_set;
-		switch (cb->args[2]) {
+		switch (cb->args[IPSET_CB_ARG0]) {
 		case 0:
 			/* Core header data */
 			if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
@@ -1227,7 +1335,7 @@
 			read_lock_bh(&set->lock);
 			ret = set->variant->list(set, skb, cb);
 			read_unlock_bh(&set->lock);
-			if (!cb->args[2])
+			if (!cb->args[IPSET_CB_ARG0])
 				/* Set is done, proceed with next one */
 				goto next_set;
 			goto release_refcount;
@@ -1236,8 +1344,8 @@
 	/* If we dump all sets, continue with dumping last ones */
 	if (dump_type == DUMP_ALL) {
 		dump_type = DUMP_LAST;
-		cb->args[0] = dump_type | (dump_flags << 16);
-		cb->args[1] = 0;
+		cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
+		cb->args[IPSET_CB_INDEX] = 0;
 		goto dump_last;
 	}
 	goto out;
@@ -1246,15 +1354,15 @@
 	ret = -EFAULT;
 next_set:
 	if (dump_type == DUMP_ONE)
-		cb->args[1] = IPSET_INVALID_ID;
+		cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID;
 	else
-		cb->args[1]++;
+		cb->args[IPSET_CB_INDEX]++;
 release_refcount:
 	/* If there was an error or set is done, release set */
-	if (ret || !cb->args[2]) {
-		pr_debug("release set %s\n", nfnl_set(index)->name);
-		ip_set_put_byindex(index);
-		cb->args[2] = 0;
+	if (ret || !cb->args[IPSET_CB_ARG0]) {
+		pr_debug("release set %s\n", nfnl_set(inst, index)->name);
+		__ip_set_put_byindex(inst, index);
+		cb->args[IPSET_CB_ARG0] = 0;
 	}
 out:
 	if (nlh) {
@@ -1356,6 +1464,7 @@
 	    const struct nlmsghdr *nlh,
 	    const struct nlattr * const attr[])
 {
+	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *set;
 	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
 	const struct nlattr *nla;
@@ -1374,7 +1483,7 @@
 		       attr[IPSET_ATTR_LINENO] == NULL))))
 		return -IPSET_ERR_PROTOCOL;
 
-	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
 	if (set == NULL)
 		return -ENOENT;
 
@@ -1410,6 +1519,7 @@
 	    const struct nlmsghdr *nlh,
 	    const struct nlattr * const attr[])
 {
+	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *set;
 	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
 	const struct nlattr *nla;
@@ -1428,7 +1538,7 @@
 		       attr[IPSET_ATTR_LINENO] == NULL))))
 		return -IPSET_ERR_PROTOCOL;
 
-	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
 	if (set == NULL)
 		return -ENOENT;
 
@@ -1464,6 +1574,7 @@
 	     const struct nlmsghdr *nlh,
 	     const struct nlattr * const attr[])
 {
+	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	struct ip_set *set;
 	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
 	int ret = 0;
@@ -1474,7 +1585,7 @@
 		     !flag_nested(attr[IPSET_ATTR_DATA])))
 		return -IPSET_ERR_PROTOCOL;
 
-	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
 	if (set == NULL)
 		return -ENOENT;
 
@@ -1499,6 +1610,7 @@
 	      const struct nlmsghdr *nlh,
 	      const struct nlattr * const attr[])
 {
+	struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
 	const struct ip_set *set;
 	struct sk_buff *skb2;
 	struct nlmsghdr *nlh2;
@@ -1508,7 +1620,7 @@
 		     attr[IPSET_ATTR_SETNAME] == NULL))
 		return -IPSET_ERR_PROTOCOL;
 
-	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
 	if (set == NULL)
 		return -ENOENT;
 
@@ -1733,8 +1845,10 @@
 	unsigned int *op;
 	void *data;
 	int copylen = *len, ret = 0;
+	struct net *net = sock_net(sk);
+	struct ip_set_net *inst = ip_set_pernet(net);
 
-	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 	if (optval != SO_IP_SET)
 		return -EBADF;
@@ -1783,22 +1897,39 @@
 		}
 		req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
 		nfnl_lock(NFNL_SUBSYS_IPSET);
-		find_set_and_id(req_get->set.name, &id);
+		find_set_and_id(inst, req_get->set.name, &id);
 		req_get->set.index = id;
 		nfnl_unlock(NFNL_SUBSYS_IPSET);
 		goto copy;
 	}
+	case IP_SET_OP_GET_FNAME: {
+		struct ip_set_req_get_set_family *req_get = data;
+		ip_set_id_t id;
+
+		if (*len != sizeof(struct ip_set_req_get_set_family)) {
+			ret = -EINVAL;
+			goto done;
+		}
+		req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+		nfnl_lock(NFNL_SUBSYS_IPSET);
+		find_set_and_id(inst, req_get->set.name, &id);
+		req_get->set.index = id;
+		if (id != IPSET_INVALID_ID)
+			req_get->family = nfnl_set(inst, id)->family;
+		nfnl_unlock(NFNL_SUBSYS_IPSET);
+		goto copy;
+	}
 	case IP_SET_OP_GET_BYINDEX: {
 		struct ip_set_req_get_set *req_get = data;
 		struct ip_set *set;
 
 		if (*len != sizeof(struct ip_set_req_get_set) ||
-		    req_get->set.index >= ip_set_max) {
+		    req_get->set.index >= inst->ip_set_max) {
 			ret = -EINVAL;
 			goto done;
 		}
 		nfnl_lock(NFNL_SUBSYS_IPSET);
-		set = nfnl_set(req_get->set.index);
+		set = nfnl_set(inst, req_get->set.index);
 		strncpy(req_get->set.name, set ? set->name : "",
 			IPSET_MAXNAMELEN);
 		nfnl_unlock(NFNL_SUBSYS_IPSET);
@@ -1827,49 +1958,81 @@
 	.owner		= THIS_MODULE,
 };
 
+static int __net_init
+ip_set_net_init(struct net *net)
+{
+	struct ip_set_net *inst = ip_set_pernet(net);
+	struct ip_set **list;
+
+	inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX;
+	if (inst->ip_set_max >= IPSET_INVALID_ID)
+		inst->ip_set_max = IPSET_INVALID_ID - 1;
+
+	list = kzalloc(sizeof(struct ip_set *) * inst->ip_set_max, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	inst->is_deleted = 0;
+	rcu_assign_pointer(inst->ip_set_list, list);
+	pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+	return 0;
+}
+
+static void __net_exit
+ip_set_net_exit(struct net *net)
+{
+	struct ip_set_net *inst = ip_set_pernet(net);
+
+	struct ip_set *set = NULL;
+	ip_set_id_t i;
+
+	inst->is_deleted = 1; /* flag for ip_set_nfnl_put */
+
+	for (i = 0; i < inst->ip_set_max; i++) {
+		set = nfnl_set(inst, i);
+		if (set != NULL)
+			ip_set_destroy_set(inst, i);
+	}
+	kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+}
+
+static struct pernet_operations ip_set_net_ops = {
+	.init	= ip_set_net_init,
+	.exit   = ip_set_net_exit,
+	.id	= &ip_set_net_id,
+	.size	= sizeof(struct ip_set_net)
+};
+
+
 static int __init
 ip_set_init(void)
 {
-	struct ip_set **list;
-	int ret;
-
-	if (max_sets)
-		ip_set_max = max_sets;
-	if (ip_set_max >= IPSET_INVALID_ID)
-		ip_set_max = IPSET_INVALID_ID - 1;
-
-	list = kzalloc(sizeof(struct ip_set *) * ip_set_max, GFP_KERNEL);
-	if (!list)
-		return -ENOMEM;
-
-	rcu_assign_pointer(ip_set_list, list);
-	ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+	int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
 	if (ret != 0) {
 		pr_err("ip_set: cannot register with nfnetlink.\n");
-		kfree(list);
 		return ret;
 	}
 	ret = nf_register_sockopt(&so_set);
 	if (ret != 0) {
 		pr_err("SO_SET registry failed: %d\n", ret);
 		nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-		kfree(list);
 		return ret;
 	}
-
-	pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+	ret = register_pernet_subsys(&ip_set_net_ops);
+	if (ret) {
+		pr_err("ip_set: cannot register pernet_subsys.\n");
+		nf_unregister_sockopt(&so_set);
+		nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+		return ret;
+	}
 	return 0;
 }
 
 static void __exit
 ip_set_fini(void)
 {
-	struct ip_set **list = rcu_dereference_protected(ip_set_list, 1);
-
-	/* There can't be any existing set */
+	unregister_pernet_subsys(&ip_set_net_ops);
 	nf_unregister_sockopt(&so_set);
 	nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-	kfree(list);
 	pr_debug("these are the famous last words\n");
 }
 
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index dac156f..29fb01d 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -102,9 +102,25 @@
 	int protocol = iph->protocol;
 
 	/* See comments at tcp_match in ip_tables.c */
-	if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+	if (protocol <= 0)
 		return false;
 
+	if (ntohs(iph->frag_off) & IP_OFFSET)
+		switch (protocol) {
+		case IPPROTO_TCP:
+		case IPPROTO_SCTP:
+		case IPPROTO_UDP:
+		case IPPROTO_UDPLITE:
+		case IPPROTO_ICMP:
+			/* Port info not available for fragment offset > 0 */
+			return false;
+		default:
+			/* Other protocols doesn't have ports,
+			   so we can match fragments */
+			*proto = protocol;
+			return true;
+		}
+
 	return get_port(skb, protocol, protooff, src, port, proto);
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 707bc52..be6932a 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -15,8 +15,7 @@
 #define rcu_dereference_bh(p)	rcu_dereference(p)
 #endif
 
-#define CONCAT(a, b)		a##b
-#define TOKEN(a, b)		CONCAT(a, b)
+#define rcu_dereference_bh_nfnl(p)	rcu_dereference_bh_check(p, 1)
 
 /* Hashing which uses arrays to resolve clashing. The hash table is resized
  * (doubled) when searching becomes too long.
@@ -78,10 +77,14 @@
 
 #define hbucket(h, i)		(&((h)->bucket[i]))
 
+#ifndef IPSET_NET_COUNT
+#define IPSET_NET_COUNT		1
+#endif
+
 /* Book-keeping of the prefixes added to the set */
 struct net_prefixes {
-	u8 cidr;		/* the different cidr values in the set */
-	u32 nets;		/* number of elements per cidr */
+	u32 nets[IPSET_NET_COUNT]; /* number of elements per cidr */
+	u8 cidr[IPSET_NET_COUNT];  /* the different cidr values in the set */
 };
 
 /* Compute the hash table size */
@@ -114,23 +117,6 @@
 	return bits;
 }
 
-/* Destroy the hashtable part of the set */
-static void
-ahash_destroy(struct htable *t)
-{
-	struct hbucket *n;
-	u32 i;
-
-	for (i = 0; i < jhash_size(t->htable_bits); i++) {
-		n = hbucket(t, i);
-		if (n->size)
-			/* FIXME: use slab cache */
-			kfree(n->value);
-	}
-
-	ip_set_free(t);
-}
-
 static int
 hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 {
@@ -156,30 +142,30 @@
 }
 
 #ifdef IP_SET_HASH_WITH_NETS
+#if IPSET_NET_COUNT > 1
+#define __CIDR(cidr, i)		(cidr[i])
+#else
+#define __CIDR(cidr, i)		(cidr)
+#endif
 #ifdef IP_SET_HASH_WITH_NETS_PACKED
 /* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
-#define CIDR(cidr)		(cidr + 1)
+#define CIDR(cidr, i)		(__CIDR(cidr, i) + 1)
 #else
-#define CIDR(cidr)		(cidr)
+#define CIDR(cidr, i)		(__CIDR(cidr, i))
 #endif
 
 #define SET_HOST_MASK(family)	(family == AF_INET ? 32 : 128)
 
 #ifdef IP_SET_HASH_WITH_MULTI
-#define NETS_LENGTH(family)	(SET_HOST_MASK(family) + 1)
+#define NLEN(family)		(SET_HOST_MASK(family) + 1)
 #else
-#define NETS_LENGTH(family)	SET_HOST_MASK(family)
+#define NLEN(family)		SET_HOST_MASK(family)
 #endif
 
 #else
-#define NETS_LENGTH(family)	0
+#define NLEN(family)		0
 #endif /* IP_SET_HASH_WITH_NETS */
 
-#define ext_timeout(e, h)	\
-(unsigned long *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, h)	\
-(struct ip_set_counter *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_COUNTER])
-
 #endif /* _IP_SET_HASH_GEN_H */
 
 /* Family dependent templates */
@@ -194,6 +180,8 @@
 #undef mtype_data_next
 #undef mtype_elem
 
+#undef mtype_ahash_destroy
+#undef mtype_ext_cleanup
 #undef mtype_add_cidr
 #undef mtype_del_cidr
 #undef mtype_ahash_memsize
@@ -220,41 +208,43 @@
 
 #undef HKEY
 
-#define mtype_data_equal	TOKEN(MTYPE, _data_equal)
+#define mtype_data_equal	IPSET_TOKEN(MTYPE, _data_equal)
 #ifdef IP_SET_HASH_WITH_NETS
-#define mtype_do_data_match	TOKEN(MTYPE, _do_data_match)
+#define mtype_do_data_match	IPSET_TOKEN(MTYPE, _do_data_match)
 #else
 #define mtype_do_data_match(d)	1
 #endif
-#define mtype_data_set_flags	TOKEN(MTYPE, _data_set_flags)
-#define mtype_data_reset_flags	TOKEN(MTYPE, _data_reset_flags)
-#define mtype_data_netmask	TOKEN(MTYPE, _data_netmask)
-#define mtype_data_list		TOKEN(MTYPE, _data_list)
-#define mtype_data_next		TOKEN(MTYPE, _data_next)
-#define mtype_elem		TOKEN(MTYPE, _elem)
-#define mtype_add_cidr		TOKEN(MTYPE, _add_cidr)
-#define mtype_del_cidr		TOKEN(MTYPE, _del_cidr)
-#define mtype_ahash_memsize	TOKEN(MTYPE, _ahash_memsize)
-#define mtype_flush		TOKEN(MTYPE, _flush)
-#define mtype_destroy		TOKEN(MTYPE, _destroy)
-#define mtype_gc_init		TOKEN(MTYPE, _gc_init)
-#define mtype_same_set		TOKEN(MTYPE, _same_set)
-#define mtype_kadt		TOKEN(MTYPE, _kadt)
-#define mtype_uadt		TOKEN(MTYPE, _uadt)
+#define mtype_data_set_flags	IPSET_TOKEN(MTYPE, _data_set_flags)
+#define mtype_data_reset_elem	IPSET_TOKEN(MTYPE, _data_reset_elem)
+#define mtype_data_reset_flags	IPSET_TOKEN(MTYPE, _data_reset_flags)
+#define mtype_data_netmask	IPSET_TOKEN(MTYPE, _data_netmask)
+#define mtype_data_list		IPSET_TOKEN(MTYPE, _data_list)
+#define mtype_data_next		IPSET_TOKEN(MTYPE, _data_next)
+#define mtype_elem		IPSET_TOKEN(MTYPE, _elem)
+#define mtype_ahash_destroy	IPSET_TOKEN(MTYPE, _ahash_destroy)
+#define mtype_ext_cleanup	IPSET_TOKEN(MTYPE, _ext_cleanup)
+#define mtype_add_cidr		IPSET_TOKEN(MTYPE, _add_cidr)
+#define mtype_del_cidr		IPSET_TOKEN(MTYPE, _del_cidr)
+#define mtype_ahash_memsize	IPSET_TOKEN(MTYPE, _ahash_memsize)
+#define mtype_flush		IPSET_TOKEN(MTYPE, _flush)
+#define mtype_destroy		IPSET_TOKEN(MTYPE, _destroy)
+#define mtype_gc_init		IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_same_set		IPSET_TOKEN(MTYPE, _same_set)
+#define mtype_kadt		IPSET_TOKEN(MTYPE, _kadt)
+#define mtype_uadt		IPSET_TOKEN(MTYPE, _uadt)
 #define mtype			MTYPE
 
-#define mtype_elem		TOKEN(MTYPE, _elem)
-#define mtype_add		TOKEN(MTYPE, _add)
-#define mtype_del		TOKEN(MTYPE, _del)
-#define mtype_test_cidrs	TOKEN(MTYPE, _test_cidrs)
-#define mtype_test		TOKEN(MTYPE, _test)
-#define mtype_expire		TOKEN(MTYPE, _expire)
-#define mtype_resize		TOKEN(MTYPE, _resize)
-#define mtype_head		TOKEN(MTYPE, _head)
-#define mtype_list		TOKEN(MTYPE, _list)
-#define mtype_gc		TOKEN(MTYPE, _gc)
-#define mtype_variant		TOKEN(MTYPE, _variant)
-#define mtype_data_match	TOKEN(MTYPE, _data_match)
+#define mtype_add		IPSET_TOKEN(MTYPE, _add)
+#define mtype_del		IPSET_TOKEN(MTYPE, _del)
+#define mtype_test_cidrs	IPSET_TOKEN(MTYPE, _test_cidrs)
+#define mtype_test		IPSET_TOKEN(MTYPE, _test)
+#define mtype_expire		IPSET_TOKEN(MTYPE, _expire)
+#define mtype_resize		IPSET_TOKEN(MTYPE, _resize)
+#define mtype_head		IPSET_TOKEN(MTYPE, _head)
+#define mtype_list		IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc		IPSET_TOKEN(MTYPE, _gc)
+#define mtype_variant		IPSET_TOKEN(MTYPE, _variant)
+#define mtype_data_match	IPSET_TOKEN(MTYPE, _data_match)
 
 #ifndef HKEY_DATALEN
 #define HKEY_DATALEN		sizeof(struct mtype_elem)
@@ -269,13 +259,10 @@
 
 /* The generic hash structure */
 struct htype {
-	struct htable *table;	/* the hash table */
+	struct htable __rcu *table; /* the hash table */
 	u32 maxelem;		/* max elements in the hash */
 	u32 elements;		/* current element (vs timeout) */
 	u32 initval;		/* random jhash init value */
-	u32 timeout;		/* timeout value, if enabled */
-	size_t dsize;		/* data struct size */
-	size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
 	struct timer_list gc;	/* garbage collection when timeout enabled */
 	struct mtype_elem next; /* temporary storage for uadd */
 #ifdef IP_SET_HASH_WITH_MULTI
@@ -297,49 +284,49 @@
 /* Network cidr size book keeping when the hash stores different
  * sized networks */
 static void
-mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length)
+mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
 {
 	int i, j;
 
 	/* Add in increasing prefix order, so larger cidr first */
-	for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) {
+	for (i = 0, j = -1; i < nets_length && h->nets[i].nets[n]; i++) {
 		if (j != -1)
 			continue;
-		else if (h->nets[i].cidr < cidr)
+		else if (h->nets[i].cidr[n] < cidr)
 			j = i;
-		else if (h->nets[i].cidr == cidr) {
-			h->nets[i].nets++;
+		else if (h->nets[i].cidr[n] == cidr) {
+			h->nets[i].nets[n]++;
 			return;
 		}
 	}
 	if (j != -1) {
 		for (; i > j; i--) {
-			h->nets[i].cidr = h->nets[i - 1].cidr;
-			h->nets[i].nets = h->nets[i - 1].nets;
+			h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
+			h->nets[i].nets[n] = h->nets[i - 1].nets[n];
 		}
 	}
-	h->nets[i].cidr = cidr;
-	h->nets[i].nets = 1;
+	h->nets[i].cidr[n] = cidr;
+	h->nets[i].nets[n] = 1;
 }
 
 static void
-mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
+mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
 {
 	u8 i, j, net_end = nets_length - 1;
 
 	for (i = 0; i < nets_length; i++) {
-	        if (h->nets[i].cidr != cidr)
+	        if (h->nets[i].cidr[n] != cidr)
 	                continue;
-                if (h->nets[i].nets > 1 || i == net_end ||
-                    h->nets[i + 1].nets == 0) {
-                        h->nets[i].nets--;
+                if (h->nets[i].nets[n] > 1 || i == net_end ||
+                    h->nets[i + 1].nets[n] == 0) {
+                        h->nets[i].nets[n]--;
                         return;
                 }
-                for (j = i; j < net_end && h->nets[j].nets; j++) {
-		        h->nets[j].cidr = h->nets[j + 1].cidr;
-		        h->nets[j].nets = h->nets[j + 1].nets;
+                for (j = i; j < net_end && h->nets[j].nets[n]; j++) {
+		        h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
+		        h->nets[j].nets[n] = h->nets[j + 1].nets[n];
                 }
-                h->nets[j].nets = 0;
+                h->nets[j].nets[n] = 0;
                 return;
 	}
 }
@@ -347,10 +334,10 @@
 
 /* Calculate the actual memory size of the set data */
 static size_t
-mtype_ahash_memsize(const struct htype *h, u8 nets_length)
+mtype_ahash_memsize(const struct htype *h, const struct htable *t,
+		    u8 nets_length, size_t dsize)
 {
 	u32 i;
-	struct htable *t = h->table;
 	size_t memsize = sizeof(*h)
 			 + sizeof(*t)
 #ifdef IP_SET_HASH_WITH_NETS
@@ -359,35 +346,70 @@
 			 + jhash_size(t->htable_bits) * sizeof(struct hbucket);
 
 	for (i = 0; i < jhash_size(t->htable_bits); i++)
-		memsize += t->bucket[i].size * h->dsize;
+		memsize += t->bucket[i].size * dsize;
 
 	return memsize;
 }
 
+/* Get the ith element from the array block n */
+#define ahash_data(n, i, dsize)	\
+	((struct mtype_elem *)((n)->value + ((i) * (dsize))))
+
+static void
+mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
+{
+	int i;
+
+	for (i = 0; i < n->pos; i++)
+		ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
+}
+
 /* Flush a hash type of set: destroy all elements */
 static void
 mtype_flush(struct ip_set *set)
 {
 	struct htype *h = set->data;
-	struct htable *t = h->table;
+	struct htable *t;
 	struct hbucket *n;
 	u32 i;
 
+	t = rcu_dereference_bh_nfnl(h->table);
 	for (i = 0; i < jhash_size(t->htable_bits); i++) {
 		n = hbucket(t, i);
 		if (n->size) {
+			if (set->extensions & IPSET_EXT_DESTROY)
+				mtype_ext_cleanup(set, n);
 			n->size = n->pos = 0;
 			/* FIXME: use slab cache */
 			kfree(n->value);
 		}
 	}
 #ifdef IP_SET_HASH_WITH_NETS
-	memset(h->nets, 0, sizeof(struct net_prefixes)
-			   * NETS_LENGTH(set->family));
+	memset(h->nets, 0, sizeof(struct net_prefixes) * NLEN(set->family));
 #endif
 	h->elements = 0;
 }
 
+/* Destroy the hashtable part of the set */
+static void
+mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
+{
+	struct hbucket *n;
+	u32 i;
+
+	for (i = 0; i < jhash_size(t->htable_bits); i++) {
+		n = hbucket(t, i);
+		if (n->size) {
+			if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
+				mtype_ext_cleanup(set, n);
+			/* FIXME: use slab cache */
+			kfree(n->value);
+		}
+	}
+
+	ip_set_free(t);
+}
+
 /* Destroy a hash type of set */
 static void
 mtype_destroy(struct ip_set *set)
@@ -397,7 +419,7 @@
 	if (set->extensions & IPSET_EXT_TIMEOUT)
 		del_timer_sync(&h->gc);
 
-	ahash_destroy(h->table);
+	mtype_ahash_destroy(set, rcu_dereference_bh_nfnl(h->table), true);
 #ifdef IP_SET_HASH_WITH_RBTREE
 	rbtree_destroy(&h->rbtree);
 #endif
@@ -414,10 +436,10 @@
 	init_timer(&h->gc);
 	h->gc.data = (unsigned long) set;
 	h->gc.function = gc;
-	h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+	h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&h->gc);
 	pr_debug("gc initialized, run in every %u\n",
-		 IPSET_GC_PERIOD(h->timeout));
+		 IPSET_GC_PERIOD(set->timeout));
 }
 
 static bool
@@ -428,37 +450,40 @@
 
 	/* Resizing changes htable_bits, so we ignore it */
 	return x->maxelem == y->maxelem &&
-	       x->timeout == y->timeout &&
+	       a->timeout == b->timeout &&
 #ifdef IP_SET_HASH_WITH_NETMASK
 	       x->netmask == y->netmask &&
 #endif
 	       a->extensions == b->extensions;
 }
 
-/* Get the ith element from the array block n */
-#define ahash_data(n, i, dsize)	\
-	((struct mtype_elem *)((n)->value + ((i) * (dsize))))
-
 /* Delete expired elements from the hashtable */
 static void
-mtype_expire(struct htype *h, u8 nets_length, size_t dsize)
+mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
 {
-	struct htable *t = h->table;
+	struct htable *t;
 	struct hbucket *n;
 	struct mtype_elem *data;
 	u32 i;
 	int j;
+#ifdef IP_SET_HASH_WITH_NETS
+	u8 k;
+#endif
 
+	rcu_read_lock_bh();
+	t = rcu_dereference_bh(h->table);
 	for (i = 0; i < jhash_size(t->htable_bits); i++) {
 		n = hbucket(t, i);
 		for (j = 0; j < n->pos; j++) {
 			data = ahash_data(n, j, dsize);
-			if (ip_set_timeout_expired(ext_timeout(data, h))) {
+			if (ip_set_timeout_expired(ext_timeout(data, set))) {
 				pr_debug("expired %u/%u\n", i, j);
 #ifdef IP_SET_HASH_WITH_NETS
-				mtype_del_cidr(h, CIDR(data->cidr),
-					       nets_length);
+				for (k = 0; k < IPSET_NET_COUNT; k++)
+					mtype_del_cidr(h, CIDR(data->cidr, k),
+						       nets_length, k);
 #endif
+				ip_set_ext_destroy(set, data);
 				if (j != n->pos - 1)
 					/* Not last one */
 					memcpy(data,
@@ -481,6 +506,7 @@
 			n->value = tmp;
 		}
 	}
+	rcu_read_unlock_bh();
 }
 
 static void
@@ -491,10 +517,10 @@
 
 	pr_debug("called\n");
 	write_lock_bh(&set->lock);
-	mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
+	mtype_expire(set, h, NLEN(set->family), set->dsize);
 	write_unlock_bh(&set->lock);
 
-	h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+	h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&h->gc);
 }
 
@@ -505,7 +531,7 @@
 mtype_resize(struct ip_set *set, bool retried)
 {
 	struct htype *h = set->data;
-	struct htable *t, *orig = h->table;
+	struct htable *t, *orig = rcu_dereference_bh_nfnl(h->table);
 	u8 htable_bits = orig->htable_bits;
 #ifdef IP_SET_HASH_WITH_NETS
 	u8 flags;
@@ -520,8 +546,7 @@
 	if (SET_WITH_TIMEOUT(set) && !retried) {
 		i = h->elements;
 		write_lock_bh(&set->lock);
-		mtype_expire(set->data, NETS_LENGTH(set->family),
-			     h->dsize);
+		mtype_expire(set, set->data, NLEN(set->family), set->dsize);
 		write_unlock_bh(&set->lock);
 		if (h->elements < i)
 			return 0;
@@ -548,25 +573,25 @@
 	for (i = 0; i < jhash_size(orig->htable_bits); i++) {
 		n = hbucket(orig, i);
 		for (j = 0; j < n->pos; j++) {
-			data = ahash_data(n, j, h->dsize);
+			data = ahash_data(n, j, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
 			flags = 0;
 			mtype_data_reset_flags(data, &flags);
 #endif
 			m = hbucket(t, HKEY(data, h->initval, htable_bits));
-			ret = hbucket_elem_add(m, AHASH_MAX(h), h->dsize);
+			ret = hbucket_elem_add(m, AHASH_MAX(h), set->dsize);
 			if (ret < 0) {
 #ifdef IP_SET_HASH_WITH_NETS
 				mtype_data_reset_flags(data, &flags);
 #endif
 				read_unlock_bh(&set->lock);
-				ahash_destroy(t);
+				mtype_ahash_destroy(set, t, false);
 				if (ret == -EAGAIN)
 					goto retry;
 				return ret;
 			}
-			d = ahash_data(m, m->pos++, h->dsize);
-			memcpy(d, data, h->dsize);
+			d = ahash_data(m, m->pos++, set->dsize);
+			memcpy(d, data, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
 			mtype_data_reset_flags(d, &flags);
 #endif
@@ -581,7 +606,7 @@
 
 	pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
 		 orig->htable_bits, orig, t->htable_bits, t);
-	ahash_destroy(orig);
+	mtype_ahash_destroy(set, orig, false);
 
 	return 0;
 }
@@ -604,7 +629,7 @@
 
 	if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
 		/* FIXME: when set is full, we slow down here */
-		mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
+		mtype_expire(set, h, NLEN(set->family), set->dsize);
 
 	if (h->elements >= h->maxelem) {
 		if (net_ratelimit())
@@ -618,11 +643,11 @@
 	key = HKEY(value, h->initval, t->htable_bits);
 	n = hbucket(t, key);
 	for (i = 0; i < n->pos; i++) {
-		data = ahash_data(n, i, h->dsize);
+		data = ahash_data(n, i, set->dsize);
 		if (mtype_data_equal(data, d, &multi)) {
 			if (flag_exist ||
 			    (SET_WITH_TIMEOUT(set) &&
-			     ip_set_timeout_expired(ext_timeout(data, h)))) {
+			     ip_set_timeout_expired(ext_timeout(data, set)))) {
 				/* Just the extensions could be overwritten */
 				j = i;
 				goto reuse_slot;
@@ -633,30 +658,37 @@
 		}
 		/* Reuse first timed out entry */
 		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(data, h)) &&
+		    ip_set_timeout_expired(ext_timeout(data, set)) &&
 		    j != AHASH_MAX(h) + 1)
 			j = i;
 	}
 reuse_slot:
 	if (j != AHASH_MAX(h) + 1) {
 		/* Fill out reused slot */
-		data = ahash_data(n, j, h->dsize);
+		data = ahash_data(n, j, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
-		mtype_del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family));
-		mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+		for (i = 0; i < IPSET_NET_COUNT; i++) {
+			mtype_del_cidr(h, CIDR(data->cidr, i),
+				       NLEN(set->family), i);
+			mtype_add_cidr(h, CIDR(d->cidr, i),
+				       NLEN(set->family), i);
+		}
 #endif
+		ip_set_ext_destroy(set, data);
 	} else {
 		/* Use/create a new slot */
 		TUNE_AHASH_MAX(h, multi);
-		ret = hbucket_elem_add(n, AHASH_MAX(h), h->dsize);
+		ret = hbucket_elem_add(n, AHASH_MAX(h), set->dsize);
 		if (ret != 0) {
 			if (ret == -EAGAIN)
 				mtype_data_next(&h->next, d);
 			goto out;
 		}
-		data = ahash_data(n, n->pos++, h->dsize);
+		data = ahash_data(n, n->pos++, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
-		mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+		for (i = 0; i < IPSET_NET_COUNT; i++)
+			mtype_add_cidr(h, CIDR(d->cidr, i), NLEN(set->family),
+				       i);
 #endif
 		h->elements++;
 	}
@@ -665,9 +697,11 @@
 	mtype_data_set_flags(data, flags);
 #endif
 	if (SET_WITH_TIMEOUT(set))
-		ip_set_timeout_set(ext_timeout(data, h), ext->timeout);
+		ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
 	if (SET_WITH_COUNTER(set))
-		ip_set_init_counter(ext_counter(data, h), ext);
+		ip_set_init_counter(ext_counter(data, set), ext);
+	if (SET_WITH_COMMENT(set))
+		ip_set_init_comment(ext_comment(data, set), ext);
 
 out:
 	rcu_read_unlock_bh();
@@ -682,47 +716,60 @@
 	  struct ip_set_ext *mext, u32 flags)
 {
 	struct htype *h = set->data;
-	struct htable *t = h->table;
+	struct htable *t;
 	const struct mtype_elem *d = value;
 	struct mtype_elem *data;
 	struct hbucket *n;
-	int i;
+	int i, ret = -IPSET_ERR_EXIST;
+#ifdef IP_SET_HASH_WITH_NETS
+	u8 j;
+#endif
 	u32 key, multi = 0;
 
+	rcu_read_lock_bh();
+	t = rcu_dereference_bh(h->table);
 	key = HKEY(value, h->initval, t->htable_bits);
 	n = hbucket(t, key);
 	for (i = 0; i < n->pos; i++) {
-		data = ahash_data(n, i, h->dsize);
+		data = ahash_data(n, i, set->dsize);
 		if (!mtype_data_equal(data, d, &multi))
 			continue;
 		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(data, h)))
-			return -IPSET_ERR_EXIST;
+		    ip_set_timeout_expired(ext_timeout(data, set)))
+			goto out;
 		if (i != n->pos - 1)
 			/* Not last one */
-			memcpy(data, ahash_data(n, n->pos - 1, h->dsize),
-			       h->dsize);
+			memcpy(data, ahash_data(n, n->pos - 1, set->dsize),
+			       set->dsize);
 
 		n->pos--;
 		h->elements--;
 #ifdef IP_SET_HASH_WITH_NETS
-		mtype_del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+		for (j = 0; j < IPSET_NET_COUNT; j++)
+			mtype_del_cidr(h, CIDR(d->cidr, j), NLEN(set->family),
+				       j);
 #endif
+		ip_set_ext_destroy(set, data);
 		if (n->pos + AHASH_INIT_SIZE < n->size) {
 			void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
-					    * h->dsize,
+					    * set->dsize,
 					    GFP_ATOMIC);
-			if (!tmp)
-				return 0;
+			if (!tmp) {
+				ret = 0;
+				goto out;
+			}
 			n->size -= AHASH_INIT_SIZE;
-			memcpy(tmp, n->value, n->size * h->dsize);
+			memcpy(tmp, n->value, n->size * set->dsize);
 			kfree(n->value);
 			n->value = tmp;
 		}
-		return 0;
+		ret = 0;
+		goto out;
 	}
 
-	return -IPSET_ERR_EXIST;
+out:
+	rcu_read_unlock_bh();
+	return ret;
 }
 
 static inline int
@@ -730,8 +777,7 @@
 		 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
 {
 	if (SET_WITH_COUNTER(set))
-		ip_set_update_counter(ext_counter(data,
-						  (struct htype *)(set->data)),
+		ip_set_update_counter(ext_counter(data, set),
 				      ext, mext, flags);
 	return mtype_do_data_match(data);
 }
@@ -745,25 +791,38 @@
 		 struct ip_set_ext *mext, u32 flags)
 {
 	struct htype *h = set->data;
-	struct htable *t = h->table;
+	struct htable *t = rcu_dereference_bh(h->table);
 	struct hbucket *n;
 	struct mtype_elem *data;
+#if IPSET_NET_COUNT == 2
+	struct mtype_elem orig = *d;
+	int i, j = 0, k;
+#else
 	int i, j = 0;
+#endif
 	u32 key, multi = 0;
-	u8 nets_length = NETS_LENGTH(set->family);
+	u8 nets_length = NLEN(set->family);
 
 	pr_debug("test by nets\n");
-	for (; j < nets_length && h->nets[j].nets && !multi; j++) {
-		mtype_data_netmask(d, h->nets[j].cidr);
+	for (; j < nets_length && h->nets[j].nets[0] && !multi; j++) {
+#if IPSET_NET_COUNT == 2
+		mtype_data_reset_elem(d, &orig);
+		mtype_data_netmask(d, h->nets[j].cidr[0], false);
+		for (k = 0; k < nets_length && h->nets[k].nets[1] && !multi;
+		     k++) {
+			mtype_data_netmask(d, h->nets[k].cidr[1], true);
+#else
+		mtype_data_netmask(d, h->nets[j].cidr[0]);
+#endif
 		key = HKEY(d, h->initval, t->htable_bits);
 		n = hbucket(t, key);
 		for (i = 0; i < n->pos; i++) {
-			data = ahash_data(n, i, h->dsize);
+			data = ahash_data(n, i, set->dsize);
 			if (!mtype_data_equal(data, d, &multi))
 				continue;
 			if (SET_WITH_TIMEOUT(set)) {
 				if (!ip_set_timeout_expired(
-							ext_timeout(data, h)))
+						ext_timeout(data, set)))
 					return mtype_data_match(data, ext,
 								mext, set,
 								flags);
@@ -774,6 +833,9 @@
 				return mtype_data_match(data, ext,
 							mext, set, flags);
 		}
+#if IPSET_NET_COUNT == 2
+		}
+#endif
 	}
 	return 0;
 }
@@ -785,30 +847,41 @@
 	   struct ip_set_ext *mext, u32 flags)
 {
 	struct htype *h = set->data;
-	struct htable *t = h->table;
+	struct htable *t;
 	struct mtype_elem *d = value;
 	struct hbucket *n;
 	struct mtype_elem *data;
-	int i;
+	int i, ret = 0;
 	u32 key, multi = 0;
 
+	rcu_read_lock_bh();
+	t = rcu_dereference_bh(h->table);
 #ifdef IP_SET_HASH_WITH_NETS
 	/* If we test an IP address and not a network address,
 	 * try all possible network sizes */
-	if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
-		return mtype_test_cidrs(set, d, ext, mext, flags);
+	for (i = 0; i < IPSET_NET_COUNT; i++)
+		if (CIDR(d->cidr, i) != SET_HOST_MASK(set->family))
+			break;
+	if (i == IPSET_NET_COUNT) {
+		ret = mtype_test_cidrs(set, d, ext, mext, flags);
+		goto out;
+	}
 #endif
 
 	key = HKEY(d, h->initval, t->htable_bits);
 	n = hbucket(t, key);
 	for (i = 0; i < n->pos; i++) {
-		data = ahash_data(n, i, h->dsize);
+		data = ahash_data(n, i, set->dsize);
 		if (mtype_data_equal(data, d, &multi) &&
 		    !(SET_WITH_TIMEOUT(set) &&
-		      ip_set_timeout_expired(ext_timeout(data, h))))
-			return mtype_data_match(data, ext, mext, set, flags);
+		      ip_set_timeout_expired(ext_timeout(data, set)))) {
+			ret = mtype_data_match(data, ext, mext, set, flags);
+			goto out;
+		}
 	}
-	return 0;
+out:
+	rcu_read_unlock_bh();
+	return ret;
 }
 
 /* Reply a HEADER request: fill out the header part of the set */
@@ -816,18 +889,18 @@
 mtype_head(struct ip_set *set, struct sk_buff *skb)
 {
 	const struct htype *h = set->data;
+	const struct htable *t;
 	struct nlattr *nested;
 	size_t memsize;
 
-	read_lock_bh(&set->lock);
-	memsize = mtype_ahash_memsize(h, NETS_LENGTH(set->family));
-	read_unlock_bh(&set->lock);
+	t = rcu_dereference_bh_nfnl(h->table);
+	memsize = mtype_ahash_memsize(h, t, NLEN(set->family), set->dsize);
 
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
 	if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
-			  htonl(jhash_size(h->table->htable_bits))) ||
+			  htonl(jhash_size(t->htable_bits))) ||
 	    nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
 		goto nla_put_failure;
 #ifdef IP_SET_HASH_WITH_NETMASK
@@ -836,12 +909,9 @@
 		goto nla_put_failure;
 #endif
 	if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
-	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
-	    ((set->extensions & IPSET_EXT_TIMEOUT) &&
-	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))) ||
-	    ((set->extensions & IPSET_EXT_COUNTER) &&
-	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-			   htonl(IPSET_FLAG_WITH_COUNTERS))))
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
+		goto nla_put_failure;
+	if (unlikely(ip_set_put_flags(skb, set)))
 		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
@@ -856,11 +926,11 @@
 	   struct sk_buff *skb, struct netlink_callback *cb)
 {
 	const struct htype *h = set->data;
-	const struct htable *t = h->table;
+	const struct htable *t = rcu_dereference_bh_nfnl(h->table);
 	struct nlattr *atd, *nested;
 	const struct hbucket *n;
 	const struct mtype_elem *e;
-	u32 first = cb->args[2];
+	u32 first = cb->args[IPSET_CB_ARG0];
 	/* We assume that one hash bucket fills into one page */
 	void *incomplete;
 	int i;
@@ -869,20 +939,22 @@
 	if (!atd)
 		return -EMSGSIZE;
 	pr_debug("list hash set %s\n", set->name);
-	for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+	for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
+	     cb->args[IPSET_CB_ARG0]++) {
 		incomplete = skb_tail_pointer(skb);
-		n = hbucket(t, cb->args[2]);
-		pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
+		n = hbucket(t, cb->args[IPSET_CB_ARG0]);
+		pr_debug("cb->arg bucket: %lu, t %p n %p\n",
+			 cb->args[IPSET_CB_ARG0], t, n);
 		for (i = 0; i < n->pos; i++) {
-			e = ahash_data(n, i, h->dsize);
+			e = ahash_data(n, i, set->dsize);
 			if (SET_WITH_TIMEOUT(set) &&
-			    ip_set_timeout_expired(ext_timeout(e, h)))
+			    ip_set_timeout_expired(ext_timeout(e, set)))
 				continue;
 			pr_debug("list hash %lu hbucket %p i %u, data %p\n",
-				 cb->args[2], n, i, e);
+				 cb->args[IPSET_CB_ARG0], n, i, e);
 			nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 			if (!nested) {
-				if (cb->args[2] == first) {
+				if (cb->args[IPSET_CB_ARG0] == first) {
 					nla_nest_cancel(skb, atd);
 					return -EMSGSIZE;
 				} else
@@ -890,43 +962,37 @@
 			}
 			if (mtype_data_list(skb, e))
 				goto nla_put_failure;
-			if (SET_WITH_TIMEOUT(set) &&
-			    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-					  htonl(ip_set_timeout_get(
-						ext_timeout(e, h)))))
-				goto nla_put_failure;
-			if (SET_WITH_COUNTER(set) &&
-			    ip_set_put_counter(skb, ext_counter(e, h)))
+			if (ip_set_put_extensions(skb, set, e, true))
 				goto nla_put_failure;
 			ipset_nest_end(skb, nested);
 		}
 	}
 	ipset_nest_end(skb, atd);
 	/* Set listing finished */
-	cb->args[2] = 0;
+	cb->args[IPSET_CB_ARG0] = 0;
 
 	return 0;
 
 nla_put_failure:
 	nlmsg_trim(skb, incomplete);
-	ipset_nest_end(skb, atd);
-	if (unlikely(first == cb->args[2])) {
+	if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
 		pr_warning("Can't list set %s: one bucket does not fit into "
 			   "a message. Please report it!\n", set->name);
-		cb->args[2] = 0;
+		cb->args[IPSET_CB_ARG0] = 0;
 		return -EMSGSIZE;
 	}
+	ipset_nest_end(skb, atd);
 	return 0;
 }
 
 static int
-TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
-	      const struct xt_action_param *par,
-	      enum ipset_adt adt, struct ip_set_adt_opt *opt);
+IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
+	    const struct xt_action_param *par,
+	    enum ipset_adt adt, struct ip_set_adt_opt *opt);
 
 static int
-TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
-	      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
+IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
+	    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
 
 static const struct ip_set_type_variant mtype_variant = {
 	.kadt	= mtype_kadt,
@@ -946,16 +1012,17 @@
 
 #ifdef IP_SET_EMIT_CREATE
 static int
-TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
+IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
+			    struct nlattr *tb[], u32 flags)
 {
 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
-	u32 cadt_flags = 0;
 	u8 hbits;
 #ifdef IP_SET_HASH_WITH_NETMASK
 	u8 netmask;
 #endif
 	size_t hsize;
 	struct HTYPE *h;
+	struct htable *t;
 
 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
 		return -IPSET_ERR_INVALID_FAMILY;
@@ -1005,7 +1072,7 @@
 	h->netmask = netmask;
 #endif
 	get_random_bytes(&h->initval, sizeof(h->initval));
-	h->timeout = IPSET_NO_TIMEOUT;
+	set->timeout = IPSET_NO_TIMEOUT;
 
 	hbits = htable_bits(hashsize);
 	hsize = htable_size(hbits);
@@ -1013,91 +1080,37 @@
 		kfree(h);
 		return -ENOMEM;
 	}
-	h->table = ip_set_alloc(hsize);
-	if (!h->table) {
+	t = ip_set_alloc(hsize);
+	if (!t) {
 		kfree(h);
 		return -ENOMEM;
 	}
-	h->table->htable_bits = hbits;
+	t->htable_bits = hbits;
+	rcu_assign_pointer(h->table, t);
 
 	set->data = h;
-	if (set->family ==  NFPROTO_IPV4)
-		set->variant = &TOKEN(HTYPE, 4_variant);
-	else
-		set->variant = &TOKEN(HTYPE, 6_variant);
-
-	if (tb[IPSET_ATTR_CADT_FLAGS])
-		cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-	if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-		set->extensions |= IPSET_EXT_COUNTER;
-		if (tb[IPSET_ATTR_TIMEOUT]) {
-			h->timeout =
-				ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-			set->extensions |= IPSET_EXT_TIMEOUT;
-			if (set->family == NFPROTO_IPV4) {
-				h->dsize =
-					sizeof(struct TOKEN(HTYPE, 4ct_elem));
-				h->offset[IPSET_OFFSET_TIMEOUT] =
-					offsetof(struct TOKEN(HTYPE, 4ct_elem),
-						 timeout);
-				h->offset[IPSET_OFFSET_COUNTER] =
-					offsetof(struct TOKEN(HTYPE, 4ct_elem),
-						 counter);
-				TOKEN(HTYPE, 4_gc_init)(set,
-					TOKEN(HTYPE, 4_gc));
-			} else {
-				h->dsize =
-					sizeof(struct TOKEN(HTYPE, 6ct_elem));
-				h->offset[IPSET_OFFSET_TIMEOUT] =
-					offsetof(struct TOKEN(HTYPE, 6ct_elem),
-						 timeout);
-				h->offset[IPSET_OFFSET_COUNTER] =
-					offsetof(struct TOKEN(HTYPE, 6ct_elem),
-						 counter);
-				TOKEN(HTYPE, 6_gc_init)(set,
-					TOKEN(HTYPE, 6_gc));
-			}
-		} else {
-			if (set->family == NFPROTO_IPV4) {
-				h->dsize =
-					sizeof(struct TOKEN(HTYPE, 4c_elem));
-				h->offset[IPSET_OFFSET_COUNTER] =
-					offsetof(struct TOKEN(HTYPE, 4c_elem),
-						 counter);
-			} else {
-				h->dsize =
-					sizeof(struct TOKEN(HTYPE, 6c_elem));
-				h->offset[IPSET_OFFSET_COUNTER] =
-					offsetof(struct TOKEN(HTYPE, 6c_elem),
-						 counter);
-			}
-		}
-	} else if (tb[IPSET_ATTR_TIMEOUT]) {
-		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-		set->extensions |= IPSET_EXT_TIMEOUT;
-		if (set->family == NFPROTO_IPV4) {
-			h->dsize = sizeof(struct TOKEN(HTYPE, 4t_elem));
-			h->offset[IPSET_OFFSET_TIMEOUT] =
-				offsetof(struct TOKEN(HTYPE, 4t_elem),
-					 timeout);
-			TOKEN(HTYPE, 4_gc_init)(set, TOKEN(HTYPE, 4_gc));
-		} else {
-			h->dsize = sizeof(struct TOKEN(HTYPE, 6t_elem));
-			h->offset[IPSET_OFFSET_TIMEOUT] =
-				offsetof(struct TOKEN(HTYPE, 6t_elem),
-					 timeout);
-			TOKEN(HTYPE, 6_gc_init)(set, TOKEN(HTYPE, 6_gc));
-		}
+	if (set->family == NFPROTO_IPV4) {
+		set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
+		set->dsize = ip_set_elem_len(set, tb,
+				sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)));
 	} else {
+		set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
+		set->dsize = ip_set_elem_len(set, tb,
+				sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)));
+	}
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 		if (set->family == NFPROTO_IPV4)
-			h->dsize = sizeof(struct TOKEN(HTYPE, 4_elem));
+			IPSET_TOKEN(HTYPE, 4_gc_init)(set,
+				IPSET_TOKEN(HTYPE, 4_gc));
 		else
-			h->dsize = sizeof(struct TOKEN(HTYPE, 6_elem));
+			IPSET_TOKEN(HTYPE, 6_gc_init)(set,
+				IPSET_TOKEN(HTYPE, 6_gc));
 	}
 
 	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
-		 set->name, jhash_size(h->table->htable_bits),
-		 h->table->htable_bits, h->maxelem, set->data, h->table);
+		 set->name, jhash_size(t->htable_bits),
+		 t->htable_bits, h->maxelem, set->data, t);
 
 	return 0;
 }
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index c74e6e1..e65fc24 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -23,19 +23,20 @@
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN	0
-#define REVISION_MAX	1	/* Counters support */
+#define IPSET_TYPE_REV_MIN	0
+/*				1	   Counters support */
+#define IPSET_TYPE_REV_MAX	2	/* Comments support */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip");
 
 /* Type specific function prefix */
 #define HTYPE		hash_ip
 #define IP_SET_HASH_WITH_NETMASK
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ip4_elem {
@@ -43,22 +44,6 @@
 	__be32 ip;
 };
 
-struct hash_ip4t_elem {
-	__be32 ip;
-	unsigned long timeout;
-};
-
-struct hash_ip4c_elem {
-	__be32 ip;
-	struct ip_set_counter counter;
-};
-
-struct hash_ip4ct_elem {
-	__be32 ip;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -99,7 +84,7 @@
 	const struct hash_ip *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ip4_elem e = {};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	__be32 ip;
 
 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
@@ -118,8 +103,8 @@
 	const struct hash_ip *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ip4_elem e = {};
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-	u32 ip, ip_to, hosts;
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip = 0, ip_to = 0, hosts;
 	int ret = 0;
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -178,29 +163,13 @@
 	return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 /* Member elements */
 struct hash_ip6_elem {
 	union nf_inet_addr ip;
 };
 
-struct hash_ip6t_elem {
-	union nf_inet_addr ip;
-	unsigned long timeout;
-};
-
-struct hash_ip6c_elem {
-	union nf_inet_addr ip;
-	struct ip_set_counter counter;
-};
-
-struct hash_ip6ct_elem {
-	union nf_inet_addr ip;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -253,7 +222,7 @@
 	const struct hash_ip *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ip6_elem e = {};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
 	hash_ip6_netmask(&e.ip, h->netmask);
@@ -270,7 +239,7 @@
 	const struct hash_ip *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ip6_elem e = {};
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret;
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -304,8 +273,8 @@
 	.features	= IPSET_TYPE_IP,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= hash_ip_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -324,6 +293,7 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 7a2d2bd..525a595 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -24,19 +24,20 @@
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN	0
-/*			1    SCTP and UDPLITE support added */
-#define REVISION_MAX	2 /* Counters support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1    SCTP and UDPLITE support added */
+/*				2    Counters support added */
+#define IPSET_TYPE_REV_MAX	3 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port");
 
 /* Type specific function prefix */
 #define HTYPE		hash_ipport
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ipport4_elem {
@@ -46,31 +47,6 @@
 	u8 padding;
 };
 
-struct hash_ipport4t_elem {
-	__be32 ip;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	unsigned long timeout;
-};
-
-struct hash_ipport4c_elem {
-	__be32 ip;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	struct ip_set_counter counter;
-};
-
-struct hash_ipport4ct_elem {
-	__be32 ip;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -116,10 +92,9 @@
 		  const struct xt_action_param *par,
 		  enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-	const struct hash_ipport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipport4_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &e.port, &e.proto))
@@ -136,8 +111,8 @@
 	const struct hash_ipport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipport4_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-	u32 ip, ip_to, p = 0, port, port_to;
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip, ip_to = 0, p = 0, port, port_to;
 	bool with_ports = false;
 	int ret;
 
@@ -222,7 +197,7 @@
 	return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipport6_elem {
 	union nf_inet_addr ip;
@@ -231,31 +206,6 @@
 	u8 padding;
 };
 
-struct hash_ipport6t_elem {
-	union nf_inet_addr ip;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	unsigned long timeout;
-};
-
-struct hash_ipport6c_elem {
-	union nf_inet_addr ip;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	struct ip_set_counter counter;
-};
-
-struct hash_ipport6ct_elem {
-	union nf_inet_addr ip;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -306,10 +256,9 @@
 		  const struct xt_action_param *par,
 		  enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-	const struct hash_ipport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipport6_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &e.port, &e.proto))
@@ -326,7 +275,7 @@
 	const struct hash_ipport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipport6_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 port, port_to;
 	bool with_ports = false;
 	int ret;
@@ -396,8 +345,8 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT,
 	.dimension	= IPSET_DIM_TWO,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= hash_ipport_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -419,6 +368,7 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 34e8a1a..f563663 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -24,19 +24,20 @@
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN	0
-/*			1    SCTP and UDPLITE support added */
-#define REVISION_MAX	2 /* Counters support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1    SCTP and UDPLITE support added */
+/*				2    Counters support added */
+#define IPSET_TYPE_REV_MAX	3 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port,ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,ip");
 
 /* Type specific function prefix */
 #define HTYPE		hash_ipportip
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements  */
 struct hash_ipportip4_elem {
@@ -47,34 +48,6 @@
 	u8 padding;
 };
 
-struct hash_ipportip4t_elem {
-	__be32 ip;
-	__be32 ip2;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	unsigned long timeout;
-};
-
-struct hash_ipportip4c_elem {
-	__be32 ip;
-	__be32 ip2;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	struct ip_set_counter counter;
-};
-
-struct hash_ipportip4ct_elem {
-	__be32 ip;
-	__be32 ip2;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 static inline bool
 hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
 			  const struct hash_ipportip4_elem *ip2,
@@ -120,10 +93,9 @@
 		    const struct xt_action_param *par,
 		    enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-	const struct hash_ipportip *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportip4_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &e.port, &e.proto))
@@ -141,8 +113,8 @@
 	const struct hash_ipportip *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportip4_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-	u32 ip, ip_to, p = 0, port, port_to;
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip, ip_to = 0, p = 0, port, port_to;
 	bool with_ports = false;
 	int ret;
 
@@ -231,7 +203,7 @@
 	return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipportip6_elem {
 	union nf_inet_addr ip;
@@ -241,34 +213,6 @@
 	u8 padding;
 };
 
-struct hash_ipportip6t_elem {
-	union nf_inet_addr ip;
-	union nf_inet_addr ip2;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	unsigned long timeout;
-};
-
-struct hash_ipportip6c_elem {
-	union nf_inet_addr ip;
-	union nf_inet_addr ip2;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	struct ip_set_counter counter;
-};
-
-struct hash_ipportip6ct_elem {
-	union nf_inet_addr ip;
-	union nf_inet_addr ip2;
-	__be16 port;
-	u8 proto;
-	u8 padding;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -319,10 +263,9 @@
 		    const struct xt_action_param *par,
 		    enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-	const struct hash_ipportip *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportip6_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &e.port, &e.proto))
@@ -340,7 +283,7 @@
 	const struct hash_ipportip *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportip6_elem e = { };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 port, port_to;
 	bool with_ports = false;
 	int ret;
@@ -414,8 +357,8 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
 	.dimension	= IPSET_DIM_THREE,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= hash_ipportip_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -437,6 +380,7 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index f15f3e2..5d87fe8 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -24,15 +24,16 @@
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN	0
-/*			1    SCTP and UDPLITE support added */
-/*			2    Range as input support for IPv4 added */
-/*			3    nomatch flag support added */
-#define REVISION_MAX	4 /* Counters support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1    SCTP and UDPLITE support added */
+/*				2    Range as input support for IPv4 added */
+/*				3    nomatch flag support added */
+/*				4    Counters support added */
+#define IPSET_TYPE_REV_MAX	5 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,net");
 
 /* Type specific function prefix */
@@ -46,7 +47,7 @@
 #define IP_SET_HASH_WITH_PROTO
 #define IP_SET_HASH_WITH_NETS
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ipportnet4_elem {
@@ -58,37 +59,6 @@
 	u8 proto;
 };
 
-struct hash_ipportnet4t_elem {
-	__be32 ip;
-	__be32 ip2;
-	__be16 port;
-	u8 cidr:7;
-	u8 nomatch:1;
-	u8 proto;
-	unsigned long timeout;
-};
-
-struct hash_ipportnet4c_elem {
-	__be32 ip;
-	__be32 ip2;
-	__be16 port;
-	u8 cidr:7;
-	u8 nomatch:1;
-	u8 proto;
-	struct ip_set_counter counter;
-};
-
-struct hash_ipportnet4ct_elem {
-	__be32 ip;
-	__be32 ip2;
-	__be16 port;
-	u8 cidr:7;
-	u8 nomatch:1;
-	u8 proto;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -170,9 +140,9 @@
 	const struct hash_ipportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportnet4_elem e = {
-		.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
 	};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (adt == IPSET_TEST)
 		e.cidr = HOST_MASK - 1;
@@ -195,9 +165,9 @@
 	const struct hash_ipportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-	u32 ip, ip_to, p = 0, port, port_to;
-	u32 ip2_from, ip2_to, ip2_last, ip2;
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip = 0, ip_to = 0, p = 0, port, port_to;
+	u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
 	bool with_ports = false;
 	u8 cidr;
 	int ret;
@@ -272,7 +242,7 @@
 		if (ip > ip_to)
 			swap(ip, ip_to);
 	} else if (tb[IPSET_ATTR_CIDR]) {
-		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
 		if (!cidr || cidr > 32)
 			return -IPSET_ERR_INVALID_CIDR;
@@ -306,9 +276,9 @@
 						       : port;
 		for (; p <= port_to; p++) {
 			e.port = htons(p);
-			ip2 = retried
-			      && ip == ntohl(h->next.ip)
-			      && p == ntohs(h->next.port)
+			ip2 = retried &&
+			      ip == ntohl(h->next.ip) &&
+			      p == ntohs(h->next.port)
 				? ntohl(h->next.ip2) : ip2_from;
 			while (!after(ip2, ip2_to)) {
 				e.ip2 = htonl(ip2);
@@ -328,7 +298,7 @@
 	return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipportnet6_elem {
 	union nf_inet_addr ip;
@@ -339,37 +309,6 @@
 	u8 proto;
 };
 
-struct hash_ipportnet6t_elem {
-	union nf_inet_addr ip;
-	union nf_inet_addr ip2;
-	__be16 port;
-	u8 cidr:7;
-	u8 nomatch:1;
-	u8 proto;
-	unsigned long timeout;
-};
-
-struct hash_ipportnet6c_elem {
-	union nf_inet_addr ip;
-	union nf_inet_addr ip2;
-	__be16 port;
-	u8 cidr:7;
-	u8 nomatch:1;
-	u8 proto;
-	struct ip_set_counter counter;
-};
-
-struct hash_ipportnet6ct_elem {
-	union nf_inet_addr ip;
-	union nf_inet_addr ip2;
-	__be16 port;
-	u8 cidr:7;
-	u8 nomatch:1;
-	u8 proto;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -454,9 +393,9 @@
 	const struct hash_ipportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportnet6_elem e = {
-		.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
 	};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (adt == IPSET_TEST)
 		e.cidr = HOST_MASK - 1;
@@ -479,7 +418,7 @@
 	const struct hash_ipportnet *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 port, port_to;
 	bool with_ports = false;
 	u8 cidr;
@@ -574,8 +513,8 @@
 			  IPSET_TYPE_NOMATCH,
 	.dimension	= IPSET_DIM_THREE,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= hash_ipportnet_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -600,6 +539,7 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 223e9f5..8295cf4 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -22,21 +22,22 @@
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN	0
-/*			1    Range as input support for IPv4 added */
-/*			2    nomatch flag support added */
-#define REVISION_MAX	3 /* Counters support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1    Range as input support for IPv4 added */
+/*				2    nomatch flag support added */
+/*				3    Counters support added */
+#define IPSET_TYPE_REV_MAX	4 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net");
 
 /* Type specific function prefix */
 #define HTYPE		hash_net
 #define IP_SET_HASH_WITH_NETS
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements  */
 struct hash_net4_elem {
@@ -46,31 +47,6 @@
 	u8 cidr;
 };
 
-struct hash_net4t_elem {
-	__be32 ip;
-	u16 padding0;
-	u8 nomatch;
-	u8 cidr;
-	unsigned long timeout;
-};
-
-struct hash_net4c_elem {
-	__be32 ip;
-	u16 padding0;
-	u8 nomatch;
-	u8 cidr;
-	struct ip_set_counter counter;
-};
-
-struct hash_net4ct_elem {
-	__be32 ip;
-	u16 padding0;
-	u8 nomatch;
-	u8 cidr;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -143,9 +119,9 @@
 	const struct hash_net *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_net4_elem e = {
-		.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 	};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (e.cidr == 0)
 		return -EINVAL;
@@ -165,8 +141,8 @@
 	const struct hash_net *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_net4_elem e = { .cidr = HOST_MASK };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-	u32 ip = 0, ip_to, last;
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip = 0, ip_to = 0, last;
 	int ret;
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -228,7 +204,7 @@
 	return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_net6_elem {
 	union nf_inet_addr ip;
@@ -237,31 +213,6 @@
 	u8 cidr;
 };
 
-struct hash_net6t_elem {
-	union nf_inet_addr ip;
-	u16 padding0;
-	u8 nomatch;
-	u8 cidr;
-	unsigned long timeout;
-};
-
-struct hash_net6c_elem {
-	union nf_inet_addr ip;
-	u16 padding0;
-	u8 nomatch;
-	u8 cidr;
-	struct ip_set_counter counter;
-};
-
-struct hash_net6ct_elem {
-	union nf_inet_addr ip;
-	u16 padding0;
-	u8 nomatch;
-	u8 cidr;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -338,9 +289,9 @@
 	const struct hash_net *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_net6_elem e = {
-		.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 	};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (e.cidr == 0)
 		return -EINVAL;
@@ -357,10 +308,9 @@
 hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
 	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-	const struct hash_net *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_net6_elem e = { .cidr = HOST_MASK };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	int ret;
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -406,8 +356,8 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_NOMATCH,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= hash_net_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -425,6 +375,7 @@
 		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 7d798d5..3f64a66 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -23,14 +23,15 @@
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN	0
-/*			1    nomatch flag support added */
-/*			2    /0 support added */
-#define REVISION_MAX	3 /* Counters support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1    nomatch flag support added */
+/*				2    /0 support added */
+/*				3    Counters support added */
+#define IPSET_TYPE_REV_MAX	4 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net,iface", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net,iface");
 
 /* Interface name rbtree */
@@ -134,7 +135,7 @@
 
 #define STREQ(a, b)	(strcmp(a, b) == 0)
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 struct hash_netiface4_elem_hashed {
 	__be32 ip;
@@ -144,7 +145,7 @@
 	u8 elem;
 };
 
-/* Member elements without timeout */
+/* Member elements */
 struct hash_netiface4_elem {
 	__be32 ip;
 	u8 physdev;
@@ -154,37 +155,6 @@
 	const char *iface;
 };
 
-struct hash_netiface4t_elem {
-	__be32 ip;
-	u8 physdev;
-	u8 cidr;
-	u8 nomatch;
-	u8 elem;
-	const char *iface;
-	unsigned long timeout;
-};
-
-struct hash_netiface4c_elem {
-	__be32 ip;
-	u8 physdev;
-	u8 cidr;
-	u8 nomatch;
-	u8 elem;
-	const char *iface;
-	struct ip_set_counter counter;
-};
-
-struct hash_netiface4ct_elem {
-	__be32 ip;
-	u8 physdev;
-	u8 cidr;
-	u8 nomatch;
-	u8 elem;
-	const char *iface;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -265,10 +235,10 @@
 	struct hash_netiface *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netiface4_elem e = {
-		.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 		.elem = 1,
 	};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	int ret;
 
 	if (e.cidr == 0)
@@ -319,8 +289,8 @@
 	struct hash_netiface *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-	u32 ip = 0, ip_to, last;
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip = 0, ip_to = 0, last;
 	char iface[IFNAMSIZ];
 	int ret;
 
@@ -399,7 +369,7 @@
 	return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_netiface6_elem_hashed {
 	union nf_inet_addr ip;
@@ -418,37 +388,6 @@
 	const char *iface;
 };
 
-struct hash_netiface6t_elem {
-	union nf_inet_addr ip;
-	u8 physdev;
-	u8 cidr;
-	u8 nomatch;
-	u8 elem;
-	const char *iface;
-	unsigned long timeout;
-};
-
-struct hash_netiface6c_elem {
-	union nf_inet_addr ip;
-	u8 physdev;
-	u8 cidr;
-	u8 nomatch;
-	u8 elem;
-	const char *iface;
-	struct ip_set_counter counter;
-};
-
-struct hash_netiface6ct_elem {
-	union nf_inet_addr ip;
-	u8 physdev;
-	u8 cidr;
-	u8 nomatch;
-	u8 elem;
-	const char *iface;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -534,10 +473,10 @@
 	struct hash_netiface *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netiface6_elem e = {
-		.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
 		.elem = 1,
 	};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	int ret;
 
 	if (e.cidr == 0)
@@ -584,7 +523,7 @@
 	struct hash_netiface *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	char iface[IFNAMSIZ];
 	int ret;
 
@@ -645,8 +584,8 @@
 			  IPSET_TYPE_NOMATCH,
 	.dimension	= IPSET_DIM_TWO,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= hash_netiface_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -668,6 +607,7 @@
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
new file mode 100644
index 0000000..2bc2dec
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -0,0 +1,481 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ * Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN	0
+#define IPSET_TYPE_REV_MAX	0
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
+IP_SET_MODULE_DESC("hash:net,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:net,net");
+
+/* Type specific function prefix */
+#define HTYPE		hash_netnet
+#define IP_SET_HASH_WITH_NETS
+#define IPSET_NET_COUNT 2
+
+/* IPv4 variants */
+
+/* Member elements  */
+struct hash_netnet4_elem {
+	union {
+		__be32 ip[2];
+		__be64 ipcmp;
+	};
+	u8 nomatch;
+	union {
+		u8 cidr[2];
+		u16 ccmp;
+	};
+};
+
+/* Common functions */
+
+static inline bool
+hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
+		     const struct hash_netnet4_elem *ip2,
+		     u32 *multi)
+{
+	return ip1->ipcmp == ip2->ipcmp &&
+	       ip2->ccmp == ip2->ccmp;
+}
+
+static inline int
+hash_netnet4_do_data_match(const struct hash_netnet4_elem *elem)
+{
+	return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netnet4_data_set_flags(struct hash_netnet4_elem *elem, u32 flags)
+{
+	elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
+}
+
+static inline void
+hash_netnet4_data_reset_flags(struct hash_netnet4_elem *elem, u8 *flags)
+{
+	swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netnet4_data_reset_elem(struct hash_netnet4_elem *elem,
+			  struct hash_netnet4_elem *orig)
+{
+	elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netnet4_data_netmask(struct hash_netnet4_elem *elem, u8 cidr, bool inner)
+{
+	if (inner) {
+		elem->ip[1] &= ip_set_netmask(cidr);
+		elem->cidr[1] = cidr;
+	} else {
+		elem->ip[0] &= ip_set_netmask(cidr);
+		elem->cidr[0] = cidr;
+	}
+}
+
+static bool
+hash_netnet4_data_list(struct sk_buff *skb,
+		    const struct hash_netnet4_elem *data)
+{
+	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static inline void
+hash_netnet4_data_next(struct hash_netnet4_elem *next,
+		    const struct hash_netnet4_elem *d)
+{
+	next->ipcmp = d->ipcmp;
+}
+
+#define MTYPE		hash_netnet4
+#define PF		4
+#define HOST_MASK	32
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+	       const struct xt_action_param *par,
+	       enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+	const struct hash_netnet *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netnet4_elem e = { };
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+	e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
+	e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
+	if (adt == IPSET_TEST)
+		e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
+
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]);
+	e.ip[0] &= ip_set_netmask(e.cidr[0]);
+	e.ip[1] &= ip_set_netmask(e.cidr[1]);
+
+	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+	const struct hash_netnet *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netnet4_elem e = { };
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip = 0, ip_to = 0, last;
+	u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
+	u8 cidr, cidr2;
+	int ret;
+
+	e.cidr[0] = e.cidr[1] = HOST_MASK;
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
+	      ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
+	      ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR]) {
+		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		if (!cidr || cidr > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+		e.cidr[0] = cidr;
+	}
+
+	if (tb[IPSET_ATTR_CIDR2]) {
+		cidr2 = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+		if (!cidr2 || cidr2 > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+		e.cidr[1] = cidr2;
+	}
+
+	if (tb[IPSET_ATTR_CADT_FLAGS]) {
+		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+		if (cadt_flags & IPSET_FLAG_NOMATCH)
+			flags |= (IPSET_FLAG_NOMATCH << 16);
+	}
+
+	if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] &&
+				   tb[IPSET_ATTR_IP2_TO])) {
+		e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
+		e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
+		ret = adtfn(set, &e, &ext, &ext, flags);
+		return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+		       ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	ip_to = ip;
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip_to < ip)
+			swap(ip, ip_to);
+		if (ip + UINT_MAX == ip_to)
+			return -IPSET_ERR_HASH_RANGE;
+	}
+
+	ip2_to = ip2_from;
+	if (tb[IPSET_ATTR_IP2_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+		if (ret)
+			return ret;
+		if (ip2_to < ip2_from)
+			swap(ip2_from, ip2_to);
+		if (ip2_from + UINT_MAX == ip2_to)
+			return -IPSET_ERR_HASH_RANGE;
+
+	}
+
+	if (retried)
+		ip = ntohl(h->next.ip[0]);
+
+	while (!after(ip, ip_to)) {
+		e.ip[0] = htonl(ip);
+		last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+		e.cidr[0] = cidr;
+		ip2 = (retried &&
+		       ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
+						   : ip2_from;
+		while (!after(ip2, ip2_to)) {
+			e.ip[1] = htonl(ip2);
+			last2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr2);
+			e.cidr[1] = cidr2;
+			ret = adtfn(set, &e, &ext, &ext, flags);
+			if (ret && !ip_set_eexist(ret, flags))
+				return ret;
+			else
+				ret = 0;
+			ip2 = last2 + 1;
+		}
+		ip = last + 1;
+	}
+	return ret;
+}
+
+/* IPv6 variants */
+
+struct hash_netnet6_elem {
+	union nf_inet_addr ip[2];
+	u8 nomatch;
+	union {
+		u8 cidr[2];
+		u16 ccmp;
+	};
+};
+
+/* Common functions */
+
+static inline bool
+hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
+		     const struct hash_netnet6_elem *ip2,
+		     u32 *multi)
+{
+	return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
+	       ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
+	       ip1->ccmp == ip2->ccmp;
+}
+
+static inline int
+hash_netnet6_do_data_match(const struct hash_netnet6_elem *elem)
+{
+	return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netnet6_data_set_flags(struct hash_netnet6_elem *elem, u32 flags)
+{
+	elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
+}
+
+static inline void
+hash_netnet6_data_reset_flags(struct hash_netnet6_elem *elem, u8 *flags)
+{
+	swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netnet6_data_reset_elem(struct hash_netnet6_elem *elem,
+			  struct hash_netnet6_elem *orig)
+{
+	elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netnet6_data_netmask(struct hash_netnet6_elem *elem, u8 cidr, bool inner)
+{
+	if (inner) {
+		ip6_netmask(&elem->ip[1], cidr);
+		elem->cidr[1] = cidr;
+	} else {
+		ip6_netmask(&elem->ip[0], cidr);
+		elem->cidr[0] = cidr;
+	}
+}
+
+static bool
+hash_netnet6_data_list(struct sk_buff *skb,
+		    const struct hash_netnet6_elem *data)
+{
+	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static inline void
+hash_netnet6_data_next(struct hash_netnet4_elem *next,
+		    const struct hash_netnet6_elem *d)
+{
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+
+#define MTYPE		hash_netnet6
+#define PF		6
+#define HOST_MASK	128
+#define IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+	       const struct xt_action_param *par,
+	       enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+	const struct hash_netnet *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netnet6_elem e = { };
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+	e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
+	e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
+	if (adt == IPSET_TEST)
+		e.ccmp = (HOST_MASK << (sizeof(u8)*8)) | HOST_MASK;
+
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
+	ip6_netmask(&e.ip[0], e.cidr[0]);
+	ip6_netmask(&e.ip[1], e.cidr[1]);
+
+	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netnet6_elem e = { };
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	int ret;
+
+	e.cidr[0] = e.cidr[1] = HOST_MASK;
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
+	      ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
+	      ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR])
+		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+	if (tb[IPSET_ATTR_CIDR2])
+		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+	if (!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
+	    e.cidr[1] > HOST_MASK)
+		return -IPSET_ERR_INVALID_CIDR;
+
+	ip6_netmask(&e.ip[0], e.cidr[0]);
+	ip6_netmask(&e.ip[1], e.cidr[1]);
+
+	if (tb[IPSET_ATTR_CADT_FLAGS]) {
+		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+		if (cadt_flags & IPSET_FLAG_NOMATCH)
+			flags |= (IPSET_FLAG_NOMATCH << 16);
+	}
+
+	ret = adtfn(set, &e, &ext, &ext, flags);
+
+	return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+	       ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static struct ip_set_type hash_netnet_type __read_mostly = {
+	.name		= "hash:net,net",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH,
+	.dimension	= IPSET_DIM_TWO,
+	.family		= NFPROTO_UNSPEC,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
+	.create		= hash_netnet_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP2]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP2_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_CIDR2]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
+		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
+		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_netnet_init(void)
+{
+	return ip_set_type_register(&hash_netnet_type);
+}
+
+static void __exit
+hash_netnet_fini(void)
+{
+	ip_set_type_unregister(&hash_netnet_type);
+}
+
+module_init(hash_netnet_init);
+module_exit(hash_netnet_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 09d6690..7097fb0 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -23,15 +23,16 @@
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN	0
-/*			1    SCTP and UDPLITE support added */
-/*			2    Range as input support for IPv4 added */
-/*			3    nomatch flag support added */
-#define REVISION_MAX	4 /* Counters support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1    SCTP and UDPLITE support added */
+/*				2    Range as input support for IPv4 added */
+/*				3    nomatch flag support added */
+/*				4    Counters support added */
+#define IPSET_TYPE_REV_MAX	5 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net,port");
 
 /* Type specific function prefix */
@@ -45,7 +46,7 @@
  */
 #define IP_SET_HASH_WITH_NETS_PACKED
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_netport4_elem {
@@ -56,34 +57,6 @@
 	u8 nomatch:1;
 };
 
-struct hash_netport4t_elem {
-	__be32 ip;
-	__be16 port;
-	u8 proto;
-	u8 cidr:7;
-	u8 nomatch:1;
-	unsigned long timeout;
-};
-
-struct hash_netport4c_elem {
-	__be32 ip;
-	__be16 port;
-	u8 proto;
-	u8 cidr:7;
-	u8 nomatch:1;
-	struct ip_set_counter counter;
-};
-
-struct hash_netport4ct_elem {
-	__be32 ip;
-	__be16 port;
-	u8 proto;
-	u8 cidr:7;
-	u8 nomatch:1;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -162,9 +135,9 @@
 	const struct hash_netport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netport4_elem e = {
-		.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
 	};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (adt == IPSET_TEST)
 		e.cidr = HOST_MASK - 1;
@@ -186,8 +159,8 @@
 	const struct hash_netport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-	u32 port, port_to, p = 0, ip = 0, ip_to, last;
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 port, port_to, p = 0, ip = 0, ip_to = 0, last;
 	bool with_ports = false;
 	u8 cidr;
 	int ret;
@@ -287,7 +260,7 @@
 	return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_netport6_elem {
 	union nf_inet_addr ip;
@@ -297,34 +270,6 @@
 	u8 nomatch:1;
 };
 
-struct hash_netport6t_elem {
-	union nf_inet_addr ip;
-	__be16 port;
-	u8 proto;
-	u8 cidr:7;
-	u8 nomatch:1;
-	unsigned long timeout;
-};
-
-struct hash_netport6c_elem {
-	union nf_inet_addr ip;
-	__be16 port;
-	u8 proto;
-	u8 cidr:7;
-	u8 nomatch:1;
-	struct ip_set_counter counter;
-};
-
-struct hash_netport6ct_elem {
-	union nf_inet_addr ip;
-	__be16 port;
-	u8 proto;
-	u8 cidr:7;
-	u8 nomatch:1;
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -407,9 +352,9 @@
 	const struct hash_netport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netport6_elem e = {
-		.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
+		.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
 	};
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	if (adt == IPSET_TEST)
 		e.cidr = HOST_MASK - 1;
@@ -431,7 +376,7 @@
 	const struct hash_netport *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netport6_elem e = { .cidr = HOST_MASK  - 1 };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	u32 port, port_to;
 	bool with_ports = false;
 	u8 cidr;
@@ -518,8 +463,8 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH,
 	.dimension	= IPSET_DIM_TWO,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= hash_netport_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -542,6 +487,7 @@
 		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
new file mode 100644
index 0000000..703d119
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -0,0 +1,586 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN	0
+#define IPSET_TYPE_REV_MAX	0 /* Comments support added */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
+IP_SET_MODULE_DESC("hash:net,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:net,port,net");
+
+/* Type specific function prefix */
+#define HTYPE		hash_netportnet
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+#define IPSET_NET_COUNT 2
+
+/* IPv4 variant */
+
+/* Member elements */
+struct hash_netportnet4_elem {
+	union {
+		__be32 ip[2];
+		__be64 ipcmp;
+	};
+	__be16 port;
+	union {
+		u8 cidr[2];
+		u16 ccmp;
+	};
+	u8 nomatch:1;
+	u8 proto;
+};
+
+/* Common functions */
+
+static inline bool
+hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
+			   const struct hash_netportnet4_elem *ip2,
+			   u32 *multi)
+{
+	return ip1->ipcmp == ip2->ipcmp &&
+	       ip1->ccmp == ip2->ccmp &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto;
+}
+
+static inline int
+hash_netportnet4_do_data_match(const struct hash_netportnet4_elem *elem)
+{
+	return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netportnet4_data_set_flags(struct hash_netportnet4_elem *elem, u32 flags)
+{
+	elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netportnet4_data_reset_flags(struct hash_netportnet4_elem *elem, u8 *flags)
+{
+	swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
+				struct hash_netportnet4_elem *orig)
+{
+	elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netportnet4_data_netmask(struct hash_netportnet4_elem *elem,
+			      u8 cidr, bool inner)
+{
+	if (inner) {
+		elem->ip[1] &= ip_set_netmask(cidr);
+		elem->cidr[1] = cidr;
+	} else {
+		elem->ip[0] &= ip_set_netmask(cidr);
+		elem->cidr[0] = cidr;
+	}
+}
+
+static bool
+hash_netportnet4_data_list(struct sk_buff *skb,
+			  const struct hash_netportnet4_elem *data)
+{
+	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static inline void
+hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
+			  const struct hash_netportnet4_elem *d)
+{
+	next->ipcmp = d->ipcmp;
+	next->port = d->port;
+}
+
+#define MTYPE		hash_netportnet4
+#define PF		4
+#define HOST_MASK	32
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+		     const struct xt_action_param *par,
+		     enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+	const struct hash_netportnet *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netportnet4_elem e = { };
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+	e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
+	e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
+	if (adt == IPSET_TEST)
+		e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
+
+	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
+				 &e.port, &e.proto))
+		return -EINVAL;
+
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]);
+	e.ip[0] &= ip_set_netmask(e.cidr[0]);
+	e.ip[1] &= ip_set_netmask(e.cidr[1]);
+
+	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+		     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+	const struct hash_netportnet *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netportnet4_elem e = { };
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
+	u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
+	bool with_ports = false;
+	u8 cidr, cidr2;
+	int ret;
+
+	e.cidr[0] = e.cidr[1] = HOST_MASK;
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
+	      ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
+	      ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR]) {
+		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		if (!cidr || cidr > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+		e.cidr[0] = cidr;
+	}
+
+	if (tb[IPSET_ATTR_CIDR2]) {
+		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+		if (!cidr || cidr > HOST_MASK)
+			return -IPSET_ERR_INVALID_CIDR;
+		e.cidr[1] = cidr;
+	}
+
+	if (tb[IPSET_ATTR_PORT])
+		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+		with_ports = ip_set_proto_with_ports(e.proto);
+
+		if (e.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	if (!(with_ports || e.proto == IPPROTO_ICMP))
+		e.port = 0;
+
+	if (tb[IPSET_ATTR_CADT_FLAGS]) {
+		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+		if (cadt_flags & IPSET_FLAG_NOMATCH)
+			flags |= (IPSET_FLAG_NOMATCH << 16);
+	}
+
+	with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
+	if (adt == IPSET_TEST ||
+	    !(tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) {
+		e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
+		e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
+		ret = adtfn(set, &e, &ext, &ext, flags);
+		return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+		       ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	ip_to = ip;
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip > ip_to)
+			swap(ip, ip_to);
+		if (unlikely(ip + UINT_MAX == ip_to))
+			return -IPSET_ERR_HASH_RANGE;
+	}
+
+	port_to = port = ntohs(e.port);
+	if (tb[IPSET_ATTR_PORT_TO]) {
+		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+		if (port > port_to)
+			swap(port, port_to);
+	}
+
+	ip2_to = ip2_from;
+	if (tb[IPSET_ATTR_IP2_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+		if (ret)
+			return ret;
+		if (ip2_from > ip2_to)
+			swap(ip2_from, ip2_to);
+		if (unlikely(ip2_from + UINT_MAX == ip2_to))
+			return -IPSET_ERR_HASH_RANGE;
+	}
+
+	if (retried)
+		ip = ntohl(h->next.ip[0]);
+
+	while (!after(ip, ip_to)) {
+		e.ip[0] = htonl(ip);
+		ip_last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+		e.cidr[0] = cidr;
+		p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
+							  : port;
+		for (; p <= port_to; p++) {
+			e.port = htons(p);
+			ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
+			       p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
+							 : ip2_from;
+			while (!after(ip2, ip2_to)) {
+				e.ip[1] = htonl(ip2);
+				ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
+								&cidr2);
+				e.cidr[1] = cidr2;
+				ret = adtfn(set, &e, &ext, &ext, flags);
+				if (ret && !ip_set_eexist(ret, flags))
+					return ret;
+				else
+					ret = 0;
+				ip2 = ip2_last + 1;
+			}
+		}
+		ip = ip_last + 1;
+	}
+	return ret;
+}
+
+/* IPv6 variant */
+
+struct hash_netportnet6_elem {
+	union nf_inet_addr ip[2];
+	__be16 port;
+	union {
+		u8 cidr[2];
+		u16 ccmp;
+	};
+	u8 nomatch:1;
+	u8 proto;
+};
+
+/* Common functions */
+
+static inline bool
+hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
+			   const struct hash_netportnet6_elem *ip2,
+			   u32 *multi)
+{
+	return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
+	       ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
+	       ip1->ccmp == ip2->ccmp &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto;
+}
+
+static inline int
+hash_netportnet6_do_data_match(const struct hash_netportnet6_elem *elem)
+{
+	return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netportnet6_data_set_flags(struct hash_netportnet6_elem *elem, u32 flags)
+{
+	elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netportnet6_data_reset_flags(struct hash_netportnet6_elem *elem, u8 *flags)
+{
+	swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
+				struct hash_netportnet6_elem *orig)
+{
+	elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netportnet6_data_netmask(struct hash_netportnet6_elem *elem,
+			      u8 cidr, bool inner)
+{
+	if (inner) {
+		ip6_netmask(&elem->ip[1], cidr);
+		elem->cidr[1] = cidr;
+	} else {
+		ip6_netmask(&elem->ip[0], cidr);
+		elem->cidr[0] = cidr;
+	}
+}
+
+static bool
+hash_netportnet6_data_list(struct sk_buff *skb,
+			  const struct hash_netportnet6_elem *data)
+{
+	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static inline void
+hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
+			  const struct hash_netportnet6_elem *d)
+{
+	next->port = d->port;
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+
+#define MTYPE		hash_netportnet6
+#define PF		6
+#define HOST_MASK	128
+#define IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+		     const struct xt_action_param *par,
+		     enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+	const struct hash_netportnet *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netportnet6_elem e = { };
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+	e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
+	e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
+	if (adt == IPSET_TEST)
+		e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
+
+	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
+				 &e.port, &e.proto))
+		return -EINVAL;
+
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6);
+	ip6_netmask(&e.ip[0], e.cidr[0]);
+	ip6_netmask(&e.ip[1], e.cidr[1]);
+
+	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+		     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+	const struct hash_netportnet *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netportnet6_elem e = { };
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+	u32 port, port_to;
+	bool with_ports = false;
+	int ret;
+
+	e.cidr[0] = e.cidr[1] = HOST_MASK;
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
+	      ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
+	      ip_set_get_extensions(set, tb, &ext);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR])
+		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+	if (tb[IPSET_ATTR_CIDR2])
+		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+	if (unlikely(!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
+		     e.cidr[1] > HOST_MASK))
+		return -IPSET_ERR_INVALID_CIDR;
+
+	ip6_netmask(&e.ip[0], e.cidr[0]);
+	ip6_netmask(&e.ip[1], e.cidr[1]);
+
+	if (tb[IPSET_ATTR_PORT])
+		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+		with_ports = ip_set_proto_with_ports(e.proto);
+
+		if (e.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
+		e.port = 0;
+
+	if (tb[IPSET_ATTR_CADT_FLAGS]) {
+		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+		if (cadt_flags & IPSET_FLAG_NOMATCH)
+			flags |= (IPSET_FLAG_NOMATCH << 16);
+	}
+
+	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
+		ret = adtfn(set, &e, &ext, &ext, flags);
+		return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+		       ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	port = ntohs(e.port);
+	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+	if (port > port_to)
+		swap(port, port_to);
+
+	if (retried)
+		port = ntohs(h->next.port);
+	for (; port <= port_to; port++) {
+		e.port = htons(port);
+		ret = adtfn(set, &e, &ext, &ext, flags);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+static struct ip_set_type hash_netportnet_type __read_mostly = {
+	.name		= "hash:net,port,net",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
+			  IPSET_TYPE_NOMATCH,
+	.dimension	= IPSET_DIM_THREE,
+	.family		= NFPROTO_UNSPEC,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
+	.create		= hash_netportnet_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP2]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP2_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
+		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_CIDR2]	= { .type = NLA_U8 },
+		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
+		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
+		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_netportnet_init(void)
+{
+	return ip_set_type_register(&hash_netportnet_type);
+}
+
+static void __exit
+hash_netportnet_fini(void)
+{
+	ip_set_type_unregister(&hash_netportnet_type);
+}
+
+module_init(hash_netportnet_init);
+module_exit(hash_netportnet_fini);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 979b8c9..3e2317f 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -15,12 +15,13 @@
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_list.h>
 
-#define REVISION_MIN	0
-#define REVISION_MAX	1 /* Counters support added */
+#define IPSET_TYPE_REV_MIN	0
+/*				1    Counters support added */
+#define IPSET_TYPE_REV_MAX	2 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_list:set");
 
 /* Member elements  */
@@ -28,28 +29,6 @@
 	ip_set_id_t id;
 };
 
-struct sett_elem {
-	struct {
-		ip_set_id_t id;
-	} __attribute__ ((aligned));
-	unsigned long timeout;
-};
-
-struct setc_elem {
-	struct {
-		ip_set_id_t id;
-	} __attribute__ ((aligned));
-	struct ip_set_counter counter;
-};
-
-struct setct_elem {
-	struct {
-		ip_set_id_t id;
-	} __attribute__ ((aligned));
-	struct ip_set_counter counter;
-	unsigned long timeout;
-};
-
 struct set_adt_elem {
 	ip_set_id_t id;
 	ip_set_id_t refid;
@@ -58,24 +37,14 @@
 
 /* Type structure */
 struct list_set {
-	size_t dsize;		/* element size */
-	size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
 	u32 size;		/* size of set list array */
-	u32 timeout;		/* timeout value */
 	struct timer_list gc;	/* garbage collection */
+	struct net *net;	/* namespace */
 	struct set_elem members[0]; /* the set members */
 };
 
-static inline struct set_elem *
-list_set_elem(const struct list_set *map, u32 id)
-{
-	return (struct set_elem *)((void *)map->members + id * map->dsize);
-}
-
-#define ext_timeout(e, m)	\
-(unsigned long *)((void *)(e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, m)	\
-(struct ip_set_counter *)((void *)(e) + (m)->offset[IPSET_OFFSET_COUNTER])
+#define list_set_elem(set, map, id)	\
+	(struct set_elem *)((void *)(map)->members + (id) * (set)->dsize)
 
 static int
 list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
@@ -92,16 +61,16 @@
 	if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
 		opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
 	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		if (e->id == IPSET_INVALID_ID)
 			return 0;
 		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(e, map)))
+		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
 		ret = ip_set_test(e->id, skb, par, opt);
 		if (ret > 0) {
 			if (SET_WITH_COUNTER(set))
-				ip_set_update_counter(ext_counter(e, map),
+				ip_set_update_counter(ext_counter(e, set),
 						      ext, &opt->ext,
 						      cmdflags);
 			return ret;
@@ -121,11 +90,11 @@
 	int ret;
 
 	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		if (e->id == IPSET_INVALID_ID)
 			return 0;
 		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(e, map)))
+		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
 		ret = ip_set_add(e->id, skb, par, opt);
 		if (ret == 0)
@@ -145,11 +114,11 @@
 	int ret;
 
 	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		if (e->id == IPSET_INVALID_ID)
 			return 0;
 		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(e, map)))
+		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
 		ret = ip_set_del(e->id, skb, par, opt);
 		if (ret == 0)
@@ -163,8 +132,7 @@
 	      const struct xt_action_param *par,
 	      enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-	struct list_set *map = set->data;
-	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
 	switch (adt) {
 	case IPSET_TEST:
@@ -188,10 +156,10 @@
 	if (i >= map->size)
 		return 0;
 
-	e = list_set_elem(map, i);
+	e = list_set_elem(set, map, i);
 	return !!(e->id == id &&
 		 !(SET_WITH_TIMEOUT(set) &&
-		   ip_set_timeout_expired(ext_timeout(e, map))));
+		   ip_set_timeout_expired(ext_timeout(e, set))));
 }
 
 static int
@@ -199,28 +167,36 @@
 	     const struct ip_set_ext *ext)
 {
 	struct list_set *map = set->data;
-	struct set_elem *e = list_set_elem(map, i);
+	struct set_elem *e = list_set_elem(set, map, i);
 
 	if (e->id != IPSET_INVALID_ID) {
-		if (i == map->size - 1)
+		if (i == map->size - 1) {
 			/* Last element replaced: e.g. add new,before,last */
-			ip_set_put_byindex(e->id);
-		else {
-			struct set_elem *x = list_set_elem(map, map->size - 1);
+			ip_set_put_byindex(map->net, e->id);
+			ip_set_ext_destroy(set, e);
+		} else {
+			struct set_elem *x = list_set_elem(set, map,
+							   map->size - 1);
 
 			/* Last element pushed off */
-			if (x->id != IPSET_INVALID_ID)
-				ip_set_put_byindex(x->id);
-			memmove(list_set_elem(map, i + 1), e,
-				map->dsize * (map->size - (i + 1)));
+			if (x->id != IPSET_INVALID_ID) {
+				ip_set_put_byindex(map->net, x->id);
+				ip_set_ext_destroy(set, x);
+			}
+			memmove(list_set_elem(set, map, i + 1), e,
+				set->dsize * (map->size - (i + 1)));
+			/* Extensions must be initialized to zero */
+			memset(e, 0, set->dsize);
 		}
 	}
 
 	e->id = d->id;
 	if (SET_WITH_TIMEOUT(set))
-		ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
+		ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
 	if (SET_WITH_COUNTER(set))
-		ip_set_init_counter(ext_counter(e, map), ext);
+		ip_set_init_counter(ext_counter(e, set), ext);
+	if (SET_WITH_COMMENT(set))
+		ip_set_init_comment(ext_comment(e, set), ext);
 	return 0;
 }
 
@@ -228,16 +204,17 @@
 list_set_del(struct ip_set *set, u32 i)
 {
 	struct list_set *map = set->data;
-	struct set_elem *e = list_set_elem(map, i);
+	struct set_elem *e = list_set_elem(set, map, i);
 
-	ip_set_put_byindex(e->id);
+	ip_set_put_byindex(map->net, e->id);
+	ip_set_ext_destroy(set, e);
 
 	if (i < map->size - 1)
-		memmove(e, list_set_elem(map, i + 1),
-			map->dsize * (map->size - (i + 1)));
+		memmove(e, list_set_elem(set, map, i + 1),
+			set->dsize * (map->size - (i + 1)));
 
 	/* Last element */
-	e = list_set_elem(map, map->size - 1);
+	e = list_set_elem(set, map, map->size - 1);
 	e->id = IPSET_INVALID_ID;
 	return 0;
 }
@@ -247,13 +224,16 @@
 {
 	struct list_set *map = set->data;
 	struct set_elem *e;
-	u32 i;
+	u32 i = 0;
 
-	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(map, i);
+	while (i < map->size) {
+		e = list_set_elem(set, map, i);
 		if (e->id != IPSET_INVALID_ID &&
-		    ip_set_timeout_expired(ext_timeout(e, map)))
+		    ip_set_timeout_expired(ext_timeout(e, set)))
 			list_set_del(set, i);
+			/* Check element moved to position i in next loop */
+		else
+			i++;
 	}
 }
 
@@ -268,11 +248,11 @@
 	int ret;
 
 	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		if (e->id == IPSET_INVALID_ID)
 			return 0;
 		else if (SET_WITH_TIMEOUT(set) &&
-			 ip_set_timeout_expired(ext_timeout(e, map)))
+			 ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
 		else if (e->id != d->id)
 			continue;
@@ -299,14 +279,14 @@
 	bool flag_exist = flags & IPSET_FLAG_EXIST;
 	u32 i, ret = 0;
 
+	if (SET_WITH_TIMEOUT(set))
+		set_cleanup_entries(set);
+
 	/* Check already added element */
 	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		if (e->id == IPSET_INVALID_ID)
 			goto insert;
-		else if (SET_WITH_TIMEOUT(set) &&
-			 ip_set_timeout_expired(ext_timeout(e, map)))
-			continue;
 		else if (e->id != d->id)
 			continue;
 
@@ -319,18 +299,22 @@
 			/* Can't re-add */
 			return -IPSET_ERR_EXIST;
 		/* Update extensions */
+		ip_set_ext_destroy(set, e);
+
 		if (SET_WITH_TIMEOUT(set))
-			ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
+			ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
 		if (SET_WITH_COUNTER(set))
-			ip_set_init_counter(ext_counter(e, map), ext);
+			ip_set_init_counter(ext_counter(e, set), ext);
+		if (SET_WITH_COMMENT(set))
+			ip_set_init_comment(ext_comment(e, set), ext);
 		/* Set is already added to the list */
-		ip_set_put_byindex(d->id);
+		ip_set_put_byindex(map->net, d->id);
 		return 0;
 	}
 insert:
 	ret = -IPSET_ERR_LIST_FULL;
 	for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		if (e->id == IPSET_INVALID_ID)
 			ret = d->before != 0 ? -IPSET_ERR_REF_EXIST
 				: list_set_add(set, i, d, ext);
@@ -355,12 +339,12 @@
 	u32 i;
 
 	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		if (e->id == IPSET_INVALID_ID)
 			return d->before != 0 ? -IPSET_ERR_REF_EXIST
 					      : -IPSET_ERR_EXIST;
 		else if (SET_WITH_TIMEOUT(set) &&
-			 ip_set_timeout_expired(ext_timeout(e, map)))
+			 ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
 		else if (e->id != d->id)
 			continue;
@@ -386,7 +370,7 @@
 	struct list_set *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
-	struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
 	struct ip_set *s;
 	int ret = 0;
 
@@ -403,7 +387,7 @@
 	ret = ip_set_get_extensions(set, tb, &ext);
 	if (ret)
 		return ret;
-	e.id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+	e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
 	if (e.id == IPSET_INVALID_ID)
 		return -IPSET_ERR_NAME;
 	/* "Loop detection" */
@@ -423,7 +407,8 @@
 	}
 
 	if (tb[IPSET_ATTR_NAMEREF]) {
-		e.refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+		e.refid = ip_set_get_byname(map->net,
+					    nla_data(tb[IPSET_ATTR_NAMEREF]),
 					    &s);
 		if (e.refid == IPSET_INVALID_ID) {
 			ret = -IPSET_ERR_NAMEREF;
@@ -439,9 +424,9 @@
 
 finish:
 	if (e.refid != IPSET_INVALID_ID)
-		ip_set_put_byindex(e.refid);
+		ip_set_put_byindex(map->net, e.refid);
 	if (adt != IPSET_ADD || ret)
-		ip_set_put_byindex(e.id);
+		ip_set_put_byindex(map->net, e.id);
 
 	return ip_set_eexist(ret, flags) ? 0 : ret;
 }
@@ -454,9 +439,10 @@
 	u32 i;
 
 	for (i = 0; i < map->size; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		if (e->id != IPSET_INVALID_ID) {
-			ip_set_put_byindex(e->id);
+			ip_set_put_byindex(map->net, e->id);
+			ip_set_ext_destroy(set, e);
 			e->id = IPSET_INVALID_ID;
 		}
 	}
@@ -485,14 +471,11 @@
 	if (!nested)
 		goto nla_put_failure;
 	if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
-	    (SET_WITH_TIMEOUT(set) &&
-	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
-	    (SET_WITH_COUNTER(set) &&
-	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-			   htonl(IPSET_FLAG_WITH_COUNTERS))) ||
 	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
-			  htonl(sizeof(*map) + map->size * map->dsize)))
+			  htonl(sizeof(*map) + map->size * set->dsize)))
+		goto nla_put_failure;
+	if (unlikely(ip_set_put_flags(skb, set)))
 		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
@@ -507,19 +490,20 @@
 {
 	const struct list_set *map = set->data;
 	struct nlattr *atd, *nested;
-	u32 i, first = cb->args[2];
+	u32 i, first = cb->args[IPSET_CB_ARG0];
 	const struct set_elem *e;
 
 	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
 	if (!atd)
 		return -EMSGSIZE;
-	for (; cb->args[2] < map->size; cb->args[2]++) {
-		i = cb->args[2];
-		e = list_set_elem(map, i);
+	for (; cb->args[IPSET_CB_ARG0] < map->size;
+	     cb->args[IPSET_CB_ARG0]++) {
+		i = cb->args[IPSET_CB_ARG0];
+		e = list_set_elem(set, map, i);
 		if (e->id == IPSET_INVALID_ID)
 			goto finish;
 		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(e, map)))
+		    ip_set_timeout_expired(ext_timeout(e, set)))
 			continue;
 		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 		if (!nested) {
@@ -530,31 +514,25 @@
 				goto nla_put_failure;
 		}
 		if (nla_put_string(skb, IPSET_ATTR_NAME,
-				   ip_set_name_byindex(e->id)))
+				   ip_set_name_byindex(map->net, e->id)))
 			goto nla_put_failure;
-		if (SET_WITH_TIMEOUT(set) &&
-		    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-				  htonl(ip_set_timeout_get(
-						ext_timeout(e, map)))))
-			goto nla_put_failure;
-		if (SET_WITH_COUNTER(set) &&
-		    ip_set_put_counter(skb, ext_counter(e, map)))
+		if (ip_set_put_extensions(skb, set, e, true))
 			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 finish:
 	ipset_nest_end(skb, atd);
 	/* Set listing finished */
-	cb->args[2] = 0;
+	cb->args[IPSET_CB_ARG0] = 0;
 	return 0;
 
 nla_put_failure:
 	nla_nest_cancel(skb, nested);
-	ipset_nest_end(skb, atd);
 	if (unlikely(i == first)) {
-		cb->args[2] = 0;
+		cb->args[IPSET_CB_ARG0] = 0;
 		return -EMSGSIZE;
 	}
+	ipset_nest_end(skb, atd);
 	return 0;
 }
 
@@ -565,7 +543,7 @@
 	const struct list_set *y = b->data;
 
 	return x->size == y->size &&
-	       x->timeout == y->timeout &&
+	       a->timeout == b->timeout &&
 	       a->extensions == b->extensions;
 }
 
@@ -594,7 +572,7 @@
 	set_cleanup_entries(set);
 	write_unlock_bh(&set->lock);
 
-	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&map->gc);
 }
 
@@ -606,43 +584,40 @@
 	init_timer(&map->gc);
 	map->gc.data = (unsigned long) set;
 	map->gc.function = gc;
-	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
 	add_timer(&map->gc);
 }
 
 /* Create list:set type of sets */
 
-static struct list_set *
-init_list_set(struct ip_set *set, u32 size, size_t dsize,
-	      unsigned long timeout)
+static bool
+init_list_set(struct net *net, struct ip_set *set, u32 size)
 {
 	struct list_set *map;
 	struct set_elem *e;
 	u32 i;
 
-	map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+	map = kzalloc(sizeof(*map) + size * set->dsize, GFP_KERNEL);
 	if (!map)
-		return NULL;
+		return false;
 
 	map->size = size;
-	map->dsize = dsize;
-	map->timeout = timeout;
+	map->net = net;
 	set->data = map;
 
 	for (i = 0; i < size; i++) {
-		e = list_set_elem(map, i);
+		e = list_set_elem(set, map, i);
 		e->id = IPSET_INVALID_ID;
 	}
 
-	return map;
+	return true;
 }
 
 static int
-list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+		u32 flags)
 {
-	struct list_set *map;
-	u32 size = IP_SET_LIST_DEFAULT_SIZE, cadt_flags = 0;
-	unsigned long timeout = 0;
+	u32 size = IP_SET_LIST_DEFAULT_SIZE;
 
 	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
@@ -654,45 +629,13 @@
 	if (size < IP_SET_LIST_MIN_SIZE)
 		size = IP_SET_LIST_MIN_SIZE;
 
-	if (tb[IPSET_ATTR_CADT_FLAGS])
-		cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-	if (tb[IPSET_ATTR_TIMEOUT])
-		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 	set->variant = &set_variant;
-	if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-		set->extensions |= IPSET_EXT_COUNTER;
-		if (tb[IPSET_ATTR_TIMEOUT]) {
-			map = init_list_set(set, size,
-					sizeof(struct setct_elem), timeout);
-			if (!map)
-				return -ENOMEM;
-			set->extensions |= IPSET_EXT_TIMEOUT;
-			map->offset[IPSET_OFFSET_TIMEOUT] =
-				offsetof(struct setct_elem, timeout);
-			map->offset[IPSET_OFFSET_COUNTER] =
-				offsetof(struct setct_elem, counter);
-			list_set_gc_init(set, list_set_gc);
-		} else {
-			map = init_list_set(set, size,
-					    sizeof(struct setc_elem), 0);
-			if (!map)
-				return -ENOMEM;
-			map->offset[IPSET_OFFSET_COUNTER] =
-				offsetof(struct setc_elem, counter);
-		}
-	} else if (tb[IPSET_ATTR_TIMEOUT]) {
-		map = init_list_set(set, size,
-				    sizeof(struct sett_elem), timeout);
-		if (!map)
-			return -ENOMEM;
-		set->extensions |= IPSET_EXT_TIMEOUT;
-		map->offset[IPSET_OFFSET_TIMEOUT] =
-			offsetof(struct sett_elem, timeout);
+	set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem));
+	if (!init_list_set(net, set, size))
+		return -ENOMEM;
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 		list_set_gc_init(set, list_set_gc);
-	} else {
-		map = init_list_set(set, size, sizeof(struct set_elem), 0);
-		if (!map)
-			return -ENOMEM;
 	}
 	return 0;
 }
@@ -703,8 +646,8 @@
 	.features	= IPSET_TYPE_NAME | IPSET_DUMP_LAST,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= NFPROTO_UNSPEC,
-	.revision_min	= REVISION_MIN,
-	.revision_max	= REVISION_MAX,
+	.revision_min	= IPSET_TYPE_REV_MIN,
+	.revision_max	= IPSET_TYPE_REV_MAX,
 	.create		= list_set_create,
 	.create_policy	= {
 		[IPSET_ATTR_SIZE]	= { .type = NLA_U32 },
@@ -721,6 +664,7 @@
 		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
 		[IPSET_ATTR_BYTES]	= { .type = NLA_U64 },
 		[IPSET_ATTR_PACKETS]	= { .type = NLA_U64 },
+		[IPSET_ATTR_COMMENT]	= { .type = NLA_NUL_STRING },
 	},
 	.me		= THIS_MODULE,
 };
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 74fd00c..4f26ee4 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1139,12 +1139,6 @@
 	ip_vs_fill_iph_skb(af, skb, &iph);
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
-		if (!iph.fragoffs && skb_nfct_reasm(skb)) {
-			struct sk_buff *reasm = skb_nfct_reasm(skb);
-			/* Save fw mark for coming frags */
-			reasm->ipvs_property = 1;
-			reasm->mark = skb->mark;
-		}
 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
 			int related;
 			int verdict = ip_vs_out_icmp_v6(skb, &related,
@@ -1239,11 +1233,11 @@
  *	Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
 	     const struct net_device *in, const struct net_device *out,
 	     int (*okfn)(struct sk_buff *))
 {
-	return ip_vs_out(hooknum, skb, AF_INET);
+	return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
 
 /*
@@ -1251,11 +1245,11 @@
  *	Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		   const struct net_device *in, const struct net_device *out,
 		   int (*okfn)(struct sk_buff *))
 {
-	return ip_vs_out(hooknum, skb, AF_INET);
+	return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1266,11 +1260,11 @@
  *	Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
 	     const struct net_device *in, const struct net_device *out,
 	     int (*okfn)(struct sk_buff *))
 {
-	return ip_vs_out(hooknum, skb, AF_INET6);
+	return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
 
 /*
@@ -1278,11 +1272,11 @@
  *	Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		   const struct net_device *in, const struct net_device *out,
 		   int (*okfn)(struct sk_buff *))
 {
-	return ip_vs_out(hooknum, skb, AF_INET6);
+	return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
 
 #endif
@@ -1614,12 +1608,6 @@
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
-		if (!iph.fragoffs && skb_nfct_reasm(skb)) {
-			struct sk_buff *reasm = skb_nfct_reasm(skb);
-			/* Save fw mark for coming frags. */
-			reasm->ipvs_property = 1;
-			reasm->mark = skb->mark;
-		}
 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
 			int related;
 			int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
@@ -1671,9 +1659,8 @@
 		/* sorry, all this trouble for a no-hit :) */
 		IP_VS_DBG_PKT(12, af, pp, skb, 0,
 			      "ip_vs_in: packet continues traversal as normal");
-		if (iph.fragoffs && !skb_nfct_reasm(skb)) {
+		if (iph.fragoffs) {
 			/* Fragment that couldn't be mapped to a conn entry
-			 * and don't have any pointer to a reasm skb
 			 * is missing module nf_defrag_ipv6
 			 */
 			IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
@@ -1733,12 +1720,12 @@
  *	Schedule and forward packets from remote clients
  */
 static unsigned int
-ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		      const struct net_device *in,
 		      const struct net_device *out,
 		      int (*okfn)(struct sk_buff *))
 {
-	return ip_vs_in(hooknum, skb, AF_INET);
+	return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
 
 /*
@@ -1746,58 +1733,26 @@
  *	Schedule and forward packets from local clients
  */
 static unsigned int
-ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		     const struct net_device *in, const struct net_device *out,
 		     int (*okfn)(struct sk_buff *))
 {
-	return ip_vs_in(hooknum, skb, AF_INET);
+	return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
 
 /*
- * AF_INET6 fragment handling
- * Copy info from first fragment, to the rest of them.
- */
-static unsigned int
-ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
-		     const struct net_device *in,
-		     const struct net_device *out,
-		     int (*okfn)(struct sk_buff *))
-{
-	struct sk_buff *reasm = skb_nfct_reasm(skb);
-	struct net *net;
-
-	/* Skip if not a "replay" from nf_ct_frag6_output or first fragment.
-	 * ipvs_property is set when checking first fragment
-	 * in ip_vs_in() and ip_vs_out().
-	 */
-	if (reasm)
-		IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property);
-	if (!reasm || !reasm->ipvs_property)
-		return NF_ACCEPT;
-
-	net = skb_net(skb);
-	if (!net_ipvs(net)->enable)
-		return NF_ACCEPT;
-
-	/* Copy stored fw mark, saved in ip_vs_{in,out} */
-	skb->mark = reasm->mark;
-
-	return NF_ACCEPT;
-}
-
-/*
  *	AF_INET6 handler in NF_INET_LOCAL_IN chain
  *	Schedule and forward packets from remote clients
  */
 static unsigned int
-ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		      const struct net_device *in,
 		      const struct net_device *out,
 		      int (*okfn)(struct sk_buff *))
 {
-	return ip_vs_in(hooknum, skb, AF_INET6);
+	return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
 
 /*
@@ -1805,11 +1760,11 @@
  *	Schedule and forward packets from local clients
  */
 static unsigned int
-ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		     const struct net_device *in, const struct net_device *out,
 		     int (*okfn)(struct sk_buff *))
 {
-	return ip_vs_in(hooknum, skb, AF_INET6);
+	return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
 
 #endif
@@ -1825,7 +1780,7 @@
  *      and send them to ip_vs_in_icmp.
  */
 static unsigned int
-ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		   const struct net_device *in, const struct net_device *out,
 		   int (*okfn)(struct sk_buff *))
 {
@@ -1842,12 +1797,12 @@
 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
 		return NF_ACCEPT;
 
-	return ip_vs_in_icmp(skb, &r, hooknum);
+	return ip_vs_in_icmp(skb, &r, ops->hooknum);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
 static unsigned int
-ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
 		      const struct net_device *in, const struct net_device *out,
 		      int (*okfn)(struct sk_buff *))
 {
@@ -1866,7 +1821,7 @@
 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
 		return NF_ACCEPT;
 
-	return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
+	return ip_vs_in_icmp_v6(skb, &r, ops->hooknum, &iphdr);
 }
 #endif
 
@@ -1924,14 +1879,6 @@
 		.priority	= 100,
 	},
 #ifdef CONFIG_IP_VS_IPV6
-	/* After mangle & nat fetch 2:nd fragment and following */
-	{
-		.hook		= ip_vs_preroute_frag6,
-		.owner		= THIS_MODULE,
-		.pf		= NFPROTO_IPV6,
-		.hooknum	= NF_INET_PRE_ROUTING,
-		.priority	= NF_IP6_PRI_NAT_DST + 1,
-	},
 	/* After packet filtering, change source only for VS/NAT */
 	{
 		.hook		= ip_vs_reply6,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index a3df9bd..1ded5c6 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -704,7 +704,7 @@
 	__ip_vs_dst_cache_reset(dest);
 	__ip_vs_svc_put(svc, false);
 	free_percpu(dest->stats.cpustats);
-	kfree(dest);
+	ip_vs_dest_put_and_free(dest);
 }
 
 /*
@@ -842,7 +842,7 @@
 	       struct ip_vs_dest **dest_p)
 {
 	struct ip_vs_dest *dest;
-	unsigned int atype;
+	unsigned int atype, i;
 
 	EnterFunction(2);
 
@@ -869,6 +869,12 @@
 	if (!dest->stats.cpustats)
 		goto err_alloc;
 
+	for_each_possible_cpu(i) {
+		struct ip_vs_cpu_stats *ip_vs_dest_stats;
+		ip_vs_dest_stats = per_cpu_ptr(dest->stats.cpustats, i);
+		u64_stats_init(&ip_vs_dest_stats->syncp);
+	}
+
 	dest->af = svc->af;
 	dest->protocol = svc->protocol;
 	dest->vaddr = svc->addr;
@@ -1134,7 +1140,7 @@
 ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
 		  struct ip_vs_service **svc_p)
 {
-	int ret = 0;
+	int ret = 0, i;
 	struct ip_vs_scheduler *sched = NULL;
 	struct ip_vs_pe *pe = NULL;
 	struct ip_vs_service *svc = NULL;
@@ -1184,6 +1190,13 @@
 		goto out_err;
 	}
 
+	for_each_possible_cpu(i) {
+		struct ip_vs_cpu_stats *ip_vs_stats;
+		ip_vs_stats = per_cpu_ptr(svc->stats.cpustats, i);
+		u64_stats_init(&ip_vs_stats->syncp);
+	}
+
+
 	/* I'm the first user of the service */
 	atomic_set(&svc->refcnt, 0);
 
@@ -3780,7 +3793,7 @@
 
 int __net_init ip_vs_control_net_init(struct net *net)
 {
-	int idx;
+	int i, idx;
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	/* Initialize rs_table */
@@ -3799,6 +3812,12 @@
 	if (!ipvs->tot_stats.cpustats)
 		return -ENOMEM;
 
+	for_each_possible_cpu(i) {
+		struct ip_vs_cpu_stats *ipvs_tot_stats;
+		ipvs_tot_stats = per_cpu_ptr(ipvs->tot_stats.cpustats, i);
+		u64_stats_init(&ipvs_tot_stats->syncp);
+	}
+
 	spin_lock_init(&ipvs->tot_stats.lock);
 
 	proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
@@ -3820,10 +3839,6 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	/* Some dest can be in grace period even before cleanup, we have to
-	 * defer ip_vs_trash_cleanup until ip_vs_dest_wait_readers is called.
-	 */
-	rcu_barrier();
 	ip_vs_trash_cleanup(net);
 	ip_vs_stop_estimator(net, &ipvs->tot_stats);
 	ip_vs_control_net_cleanup_sysctl(net);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index eff13c9..ca056a3 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -136,7 +136,7 @@
 						   struct ip_vs_lblc_entry,
 						   rcu_head);
 
-	ip_vs_dest_put(en->dest);
+	ip_vs_dest_put_and_free(en->dest);
 	kfree(en);
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 0b85500..3f21a2f 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -130,7 +130,7 @@
 	struct ip_vs_dest_set_elem *e;
 
 	e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
-	ip_vs_dest_put(e->dest);
+	ip_vs_dest_put_and_free(e->dest);
 	kfree(e);
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 9ef22bd..bed5f70 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -65,7 +65,6 @@
 static int
 ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
 {
-	struct sk_buff *reasm = skb_nfct_reasm(skb);
 	struct ip_vs_iphdr iph;
 	unsigned int dataoff, datalen, matchoff, matchlen;
 	const char *dptr;
@@ -79,15 +78,10 @@
 	/* todo: IPv6 fragments:
 	 *       I think this only should be done for the first fragment. /HS
 	 */
-	if (reasm) {
-		skb = reasm;
-		dataoff = iph.thoff_reasm + sizeof(struct udphdr);
-	} else
-		dataoff = iph.len + sizeof(struct udphdr);
+	dataoff = iph.len + sizeof(struct udphdr);
 
 	if (dataoff >= skb->len)
 		return -EINVAL;
-	/* todo: Check if this will mess-up the reasm skb !!! /HS */
 	retc = skb_linearize(skb);
 	if (retc < 0)
 		return retc;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 23e596e..2f7ea75 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -20,13 +20,18 @@
 	sctp_sctphdr_t *sh, _sctph;
 
 	sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
-	if (sh == NULL)
+	if (sh == NULL) {
+		*verdict = NF_DROP;
 		return 0;
+	}
 
 	sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
 				 sizeof(_schunkh), &_schunkh);
-	if (sch == NULL)
+	if (sch == NULL) {
+		*verdict = NF_DROP;
 		return 0;
+	}
+
 	net = skb_net(skb);
 	ipvs = net_ipvs(net);
 	rcu_read_lock();
@@ -76,6 +81,7 @@
 {
 	sctp_sctphdr_t *sctph;
 	unsigned int sctphoff = iph->len;
+	bool payload_csum = false;
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (cp->af == AF_INET6 && iph->fragoffs)
@@ -87,19 +93,31 @@
 		return 0;
 
 	if (unlikely(cp->app != NULL)) {
+		int ret;
+
 		/* Some checks before mangling */
 		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
 			return 0;
 
 		/* Call application helper if needed */
-		if (!ip_vs_app_pkt_out(cp, skb))
+		ret = ip_vs_app_pkt_out(cp, skb);
+		if (ret == 0)
 			return 0;
+		/* ret=2: csum update is needed after payload mangling */
+		if (ret == 2)
+			payload_csum = true;
 	}
 
 	sctph = (void *) skb_network_header(skb) + sctphoff;
-	sctph->source = cp->vport;
 
-	sctp_nat_csum(skb, sctph, sctphoff);
+	/* Only update csum if we really have to */
+	if (sctph->source != cp->vport || payload_csum ||
+	    skb->ip_summed == CHECKSUM_PARTIAL) {
+		sctph->source = cp->vport;
+		sctp_nat_csum(skb, sctph, sctphoff);
+	} else {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
 
 	return 1;
 }
@@ -110,6 +128,7 @@
 {
 	sctp_sctphdr_t *sctph;
 	unsigned int sctphoff = iph->len;
+	bool payload_csum = false;
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (cp->af == AF_INET6 && iph->fragoffs)
@@ -121,19 +140,32 @@
 		return 0;
 
 	if (unlikely(cp->app != NULL)) {
+		int ret;
+
 		/* Some checks before mangling */
 		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
 			return 0;
 
 		/* Call application helper if needed */
-		if (!ip_vs_app_pkt_in(cp, skb))
+		ret = ip_vs_app_pkt_in(cp, skb);
+		if (ret == 0)
 			return 0;
+		/* ret=2: csum update is needed after payload mangling */
+		if (ret == 2)
+			payload_csum = true;
 	}
 
 	sctph = (void *) skb_network_header(skb) + sctphoff;
-	sctph->dest = cp->dport;
 
-	sctp_nat_csum(skb, sctph, sctphoff);
+	/* Only update csum if we really have to */
+	if (sctph->dest != cp->dport || payload_csum ||
+	    (skb->ip_summed == CHECKSUM_PARTIAL &&
+	     !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CSUM))) {
+		sctph->dest = cp->dport;
+		sctp_nat_csum(skb, sctph, sctphoff);
+	} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
 
 	return 1;
 }
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 3588fae..cc65b2f 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -115,27 +115,46 @@
 }
 
 
-/* As ip_vs_sh_get, but with fallback if selected server is unavailable */
+/* As ip_vs_sh_get, but with fallback if selected server is unavailable
+ *
+ * The fallback strategy loops around the table starting from a "random"
+ * point (in fact, it is chosen to be the original hash value to make the
+ * algorithm deterministic) to find a new server.
+ */
 static inline struct ip_vs_dest *
 ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
 		      const union nf_inet_addr *addr, __be16 port)
 {
-	unsigned int offset;
-	unsigned int hash;
+	unsigned int offset, roffset;
+	unsigned int hash, ihash;
 	struct ip_vs_dest *dest;
 
+	/* first try the dest it's supposed to go to */
+	ihash = ip_vs_sh_hashkey(svc->af, addr, port, 0);
+	dest = rcu_dereference(s->buckets[ihash].dest);
+	if (!dest)
+		return NULL;
+	if (!is_unavailable(dest))
+		return dest;
+
+	IP_VS_DBG_BUF(6, "SH: selected unavailable server %s:%d, reselecting",
+		      IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
+
+	/* if the original dest is unavailable, loop around the table
+	 * starting from ihash to find a new dest
+	 */
 	for (offset = 0; offset < IP_VS_SH_TAB_SIZE; offset++) {
-		hash = ip_vs_sh_hashkey(svc->af, addr, port, offset);
+		roffset = (offset + ihash) % IP_VS_SH_TAB_SIZE;
+		hash = ip_vs_sh_hashkey(svc->af, addr, port, roffset);
 		dest = rcu_dereference(s->buckets[hash].dest);
 		if (!dest)
 			break;
-		if (is_unavailable(dest))
-			IP_VS_DBG_BUF(6, "SH: selected unavailable server "
-				      "%s:%d (offset %d)",
-				      IP_VS_DBG_ADDR(svc->af, &dest->addr),
-				      ntohs(dest->port), offset);
-		else
+		if (!is_unavailable(dest))
 			return dest;
+		IP_VS_DBG_BUF(6, "SH: selected unavailable "
+			      "server %s:%d (offset %d), reselecting",
+			      IP_VS_DBG_ADDR(svc->af, &dest->addr),
+			      ntohs(dest->port), roffset);
 	}
 
 	return NULL;
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 2d3030a..a4b5e2a 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -39,21 +39,23 @@
 unsigned int
 seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
 {
-	struct nf_conn_counter *acct;
+	struct nf_conn_acct *acct;
+	struct nf_conn_counter *counter;
 
 	acct = nf_conn_acct_find(ct);
 	if (!acct)
 		return 0;
 
+	counter = acct->counter;
 	return seq_printf(s, "packets=%llu bytes=%llu ",
-			  (unsigned long long)atomic64_read(&acct[dir].packets),
-			  (unsigned long long)atomic64_read(&acct[dir].bytes));
+			  (unsigned long long)atomic64_read(&counter[dir].packets),
+			  (unsigned long long)atomic64_read(&counter[dir].bytes));
 };
 EXPORT_SYMBOL_GPL(seq_print_acct);
 
 static struct nf_ct_ext_type acct_extend __read_mostly = {
-	.len	= sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]),
-	.align	= __alignof__(struct nf_conn_counter[IP_CT_DIR_MAX]),
+	.len	= sizeof(struct nf_conn_acct),
+	.align	= __alignof__(struct nf_conn_acct),
 	.id	= NF_CT_EXT_ACCT,
 };
 
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5d892fe..e22d950 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1109,12 +1109,14 @@
 
 acct:
 	if (do_acct) {
-		struct nf_conn_counter *acct;
+		struct nf_conn_acct *acct;
 
 		acct = nf_conn_acct_find(ct);
 		if (acct) {
-			atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
-			atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
+			struct nf_conn_counter *counter = acct->counter;
+
+			atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
+			atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes);
 		}
 	}
 }
@@ -1126,13 +1128,15 @@
 		       int do_acct)
 {
 	if (do_acct) {
-		struct nf_conn_counter *acct;
+		struct nf_conn_acct *acct;
 
 		acct = nf_conn_acct_find(ct);
 		if (acct) {
-			atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+			struct nf_conn_counter *counter = acct->counter;
+
+			atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
 			atomic64_add(skb->len - skb_network_offset(skb),
-				     &acct[CTINFO2DIR(ctinfo)].bytes);
+				     &counter[CTINFO2DIR(ctinfo)].bytes);
 		}
 	}
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index eea936b..08870b8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -211,13 +211,23 @@
 }
 
 static int
-dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
-	      enum ip_conntrack_dir dir)
+dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
+	      enum ip_conntrack_dir dir, int type)
 {
-	enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
+	enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
+	struct nf_conn_counter *counter = acct->counter;
 	struct nlattr *nest_count;
+	u64 pkts, bytes;
 
-	nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
+	if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
+		pkts = atomic64_xchg(&counter[dir].packets, 0);
+		bytes = atomic64_xchg(&counter[dir].bytes, 0);
+	} else {
+		pkts = atomic64_read(&counter[dir].packets);
+		bytes = atomic64_read(&counter[dir].bytes);
+	}
+
+	nest_count = nla_nest_start(skb, attr | NLA_F_NESTED);
 	if (!nest_count)
 		goto nla_put_failure;
 
@@ -234,24 +244,19 @@
 }
 
 static int
-ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
-			enum ip_conntrack_dir dir, int type)
+ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
 {
-	struct nf_conn_counter *acct;
-	u64 pkts, bytes;
+	struct nf_conn_acct *acct = nf_conn_acct_find(ct);
 
-	acct = nf_conn_acct_find(ct);
 	if (!acct)
 		return 0;
 
-	if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
-		pkts = atomic64_xchg(&acct[dir].packets, 0);
-		bytes = atomic64_xchg(&acct[dir].bytes, 0);
-	} else {
-		pkts = atomic64_read(&acct[dir].packets);
-		bytes = atomic64_read(&acct[dir].bytes);
-	}
-	return dump_counters(skb, pkts, bytes, dir);
+	if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
+		return -1;
+	if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
+		return -1;
+
+	return 0;
 }
 
 static int
@@ -488,8 +493,7 @@
 
 	if (ctnetlink_dump_status(skb, ct) < 0 ||
 	    ctnetlink_dump_timeout(skb, ct) < 0 ||
-	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
-	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
+	    ctnetlink_dump_acct(skb, ct, type) < 0 ||
 	    ctnetlink_dump_timestamp(skb, ct) < 0 ||
 	    ctnetlink_dump_protoinfo(skb, ct) < 0 ||
 	    ctnetlink_dump_helpinfo(skb, ct) < 0 ||
@@ -530,7 +534,7 @@
 }
 
 static inline size_t
-ctnetlink_counters_size(const struct nf_conn *ct)
+ctnetlink_acct_size(const struct nf_conn *ct)
 {
 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
 		return 0;
@@ -579,7 +583,7 @@
 	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
-	       + ctnetlink_counters_size(ct)
+	       + ctnetlink_acct_size(ct)
 	       + ctnetlink_timestamp_size(ct)
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
 	       + nla_total_size(0) /* CTA_PROTOINFO */
@@ -673,10 +677,7 @@
 		goto nla_put_failure;
 
 	if (events & (1 << IPCT_DESTROY)) {
-		if (ctnetlink_dump_counters(skb, ct,
-					    IP_CT_DIR_ORIGINAL, type) < 0 ||
-		    ctnetlink_dump_counters(skb, ct,
-					    IP_CT_DIR_REPLY, type) < 0 ||
+		if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
 		    ctnetlink_dump_timestamp(skb, ct) < 0)
 			goto nla_put_failure;
 	} else {
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index e0c4373..466410e 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -52,66 +52,8 @@
 MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
 				   "endpoints only (default 1)");
 
-unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
-				unsigned int dataoff, const char **dptr,
-				unsigned int *datalen) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
-
-void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, unsigned int protoff,
-				   s16 off) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
-
-unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
-				       unsigned int protoff,
-				       unsigned int dataoff,
-				       const char **dptr,
-				       unsigned int *datalen,
-				       struct nf_conntrack_expect *exp,
-				       unsigned int matchoff,
-				       unsigned int matchlen) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
-
-unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int protoff,
-				     unsigned int dataoff,
-				     const char **dptr,
-				     unsigned int *datalen,
-				     unsigned int sdpoff,
-				     enum sdp_header_types type,
-				     enum sdp_header_types term,
-				     const union nf_inet_addr *addr)
-				     __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
-
-unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int protoff,
-				     unsigned int dataoff,
-				     const char **dptr,
-				     unsigned int *datalen,
-				     unsigned int matchoff,
-				     unsigned int matchlen,
-				     u_int16_t port) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
-
-unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
-					unsigned int protoff,
-					unsigned int dataoff,
-					const char **dptr,
-					unsigned int *datalen,
-					unsigned int sdpoff,
-					const union nf_inet_addr *addr)
-					__read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
-
-unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int protoff,
-				      unsigned int dataoff,
-				      const char **dptr,
-				      unsigned int *datalen,
-				      struct nf_conntrack_expect *rtp_exp,
-				      struct nf_conntrack_expect *rtcp_exp,
-				      unsigned int mediaoff,
-				      unsigned int medialen,
-				      union nf_inet_addr *rtp_addr)
-				      __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_media_hook);
+const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
 
 static int string_len(const struct nf_conn *ct, const char *dptr,
 		      const char *limit, int *shift)
@@ -914,8 +856,7 @@
 	int direct_rtp = 0, skip_expect = 0, ret = NF_DROP;
 	u_int16_t base_port;
 	__be16 rtp_port, rtcp_port;
-	typeof(nf_nat_sdp_port_hook) nf_nat_sdp_port;
-	typeof(nf_nat_sdp_media_hook) nf_nat_sdp_media;
+	const struct nf_nat_sip_hooks *hooks;
 
 	saddr = NULL;
 	if (sip_direct_media) {
@@ -966,22 +907,23 @@
 #endif
 			skip_expect = 1;
 	} while (!skip_expect);
-	rcu_read_unlock();
 
 	base_port = ntohs(tuple.dst.u.udp.port) & ~1;
 	rtp_port = htons(base_port);
 	rtcp_port = htons(base_port + 1);
 
 	if (direct_rtp) {
-		nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
-		if (nf_nat_sdp_port &&
-		    !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
+		hooks = rcu_dereference(nf_nat_sip_hooks);
+		if (hooks &&
+		    !hooks->sdp_port(skb, protoff, dataoff, dptr, datalen,
 				     mediaoff, medialen, ntohs(rtp_port)))
 			goto err1;
 	}
 
-	if (skip_expect)
+	if (skip_expect) {
+		rcu_read_unlock();
 		return NF_ACCEPT;
+	}
 
 	rtp_exp = nf_ct_expect_alloc(ct);
 	if (rtp_exp == NULL)
@@ -995,10 +937,10 @@
 	nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
 			  IPPROTO_UDP, NULL, &rtcp_port);
 
-	nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
-	if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
-		ret = nf_nat_sdp_media(skb, protoff, dataoff, dptr, datalen,
-				       rtp_exp, rtcp_exp,
+	hooks = rcu_dereference(nf_nat_sip_hooks);
+	if (hooks && ct->status & IPS_NAT_MASK && !direct_rtp)
+		ret = hooks->sdp_media(skb, protoff, dataoff, dptr,
+				       datalen, rtp_exp, rtcp_exp,
 				       mediaoff, medialen, daddr);
 	else {
 		if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -1012,6 +954,7 @@
 err2:
 	nf_ct_expect_put(rtp_exp);
 err1:
+	rcu_read_unlock();
 	return ret;
 }
 
@@ -1051,13 +994,12 @@
 	unsigned int caddr_len, maddr_len;
 	unsigned int i;
 	union nf_inet_addr caddr, maddr, rtp_addr;
+	const struct nf_nat_sip_hooks *hooks;
 	unsigned int port;
 	const struct sdp_media_type *t;
 	int ret = NF_ACCEPT;
-	typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr;
-	typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
 
-	nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
+	hooks = rcu_dereference(nf_nat_sip_hooks);
 
 	/* Find beginning of session description */
 	if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
@@ -1125,10 +1067,11 @@
 		}
 
 		/* Update media connection address if present */
-		if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
-			ret = nf_nat_sdp_addr(skb, protoff, dataoff,
+		if (maddr_len && hooks && ct->status & IPS_NAT_MASK) {
+			ret = hooks->sdp_addr(skb, protoff, dataoff,
 					      dptr, datalen, mediaoff,
-					      SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
+					      SDP_HDR_CONNECTION,
+					      SDP_HDR_MEDIA,
 					      &rtp_addr);
 			if (ret != NF_ACCEPT) {
 				nf_ct_helper_log(skb, ct, "cannot mangle SDP");
@@ -1139,10 +1082,11 @@
 	}
 
 	/* Update session connection and owner addresses */
-	nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
-	if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
-		ret = nf_nat_sdp_session(skb, protoff, dataoff,
-					 dptr, datalen, sdpoff, &rtp_addr);
+	hooks = rcu_dereference(nf_nat_sip_hooks);
+	if (hooks && ct->status & IPS_NAT_MASK)
+		ret = hooks->sdp_session(skb, protoff, dataoff,
+					 dptr, datalen, sdpoff,
+					 &rtp_addr);
 
 	return ret;
 }
@@ -1242,11 +1186,11 @@
 	unsigned int matchoff, matchlen;
 	struct nf_conntrack_expect *exp;
 	union nf_inet_addr *saddr, daddr;
+	const struct nf_nat_sip_hooks *hooks;
 	__be16 port;
 	u8 proto;
 	unsigned int expires = 0;
 	int ret;
-	typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
 
 	/* Expected connections can not register again. */
 	if (ct->status & IPS_EXPECTED)
@@ -1309,10 +1253,10 @@
 	exp->helper = nfct_help(ct)->helper;
 	exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
 
-	nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
-	if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
-		ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen,
-					exp, matchoff, matchlen);
+	hooks = rcu_dereference(nf_nat_sip_hooks);
+	if (hooks && ct->status & IPS_NAT_MASK)
+		ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
+				    exp, matchoff, matchlen);
 	else {
 		if (nf_ct_expect_related(exp) != 0) {
 			nf_ct_helper_log(skb, ct, "cannot add expectation");
@@ -1515,7 +1459,7 @@
 			   unsigned int protoff, unsigned int dataoff,
 			   const char **dptr, unsigned int *datalen)
 {
-	typeof(nf_nat_sip_hook) nf_nat_sip;
+	const struct nf_nat_sip_hooks *hooks;
 	int ret;
 
 	if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
@@ -1524,9 +1468,9 @@
 		ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
 
 	if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
-		nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
-		if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff,
-					      dptr, datalen)) {
+		hooks = rcu_dereference(nf_nat_sip_hooks);
+		if (hooks && !hooks->msg(skb, protoff, dataoff,
+					 dptr, datalen)) {
 			nf_ct_helper_log(skb, ct, "cannot NAT SIP message");
 			ret = NF_DROP;
 		}
@@ -1546,7 +1490,6 @@
 	s16 diff, tdiff = 0;
 	int ret = NF_ACCEPT;
 	bool term;
-	typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
 
 	if (ctinfo != IP_CT_ESTABLISHED &&
 	    ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -1610,9 +1553,11 @@
 	}
 
 	if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
-		nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
-		if (nf_nat_sip_seq_adjust)
-			nf_nat_sip_seq_adjust(skb, protoff, tdiff);
+		const struct nf_nat_sip_hooks *hooks;
+
+		hooks = rcu_dereference(nf_nat_sip_hooks);
+		if (hooks)
+			hooks->seq_adjust(skb, protoff, tdiff);
 	}
 
 	return ret;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 3deec99..61a3c92 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -13,26 +13,20 @@
 
 
 /* core.c */
-extern unsigned int nf_iterate(struct list_head *head,
-				struct sk_buff *skb,
-				unsigned int hook,
-				const struct net_device *indev,
-				const struct net_device *outdev,
-				struct nf_hook_ops **elemp,
-				int (*okfn)(struct sk_buff *),
-				int hook_thresh);
+unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
+			unsigned int hook, const struct net_device *indev,
+			const struct net_device *outdev,
+			struct nf_hook_ops **elemp,
+			int (*okfn)(struct sk_buff *), int hook_thresh);
 
 /* nf_queue.c */
-extern int nf_queue(struct sk_buff *skb,
-		    struct nf_hook_ops *elem,
-		    u_int8_t pf, unsigned int hook,
-		    struct net_device *indev,
-		    struct net_device *outdev,
-		    int (*okfn)(struct sk_buff *),
-		    unsigned int queuenum);
-extern int __init netfilter_queue_init(void);
+int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
+	     unsigned int hook, struct net_device *indev,
+	     struct net_device *outdev, int (*okfn)(struct sk_buff *),
+	     unsigned int queuenum);
+int __init netfilter_queue_init(void);
 
 /* nf_log.c */
-extern int __init netfilter_log_init(void);
+int __init netfilter_log_init(void);
 
 #endif
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 6f0f4f7..63a8154 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -432,6 +432,26 @@
 }
 EXPORT_SYMBOL(nf_nat_setup_info);
 
+unsigned int
+nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+	/* Force range to this IP; let proto decide mapping for
+	 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+	 * Use reply in case it's already been mangled (eg local packet).
+	 */
+	union nf_inet_addr ip =
+		(HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+		ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
+		ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
+	struct nf_nat_range range = {
+		.flags		= NF_NAT_RANGE_MAP_IPS,
+		.min_addr	= ip,
+		.max_addr	= ip,
+	};
+	return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
+EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
+
 /* Do packet manipulations according to nf_nat_setup_info. */
 unsigned int nf_nat_packet(struct nf_conn *ct,
 			   enum ip_conntrack_info ctinfo,
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index f979040..b4d691d 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -625,33 +625,26 @@
 
 static void __exit nf_nat_sip_fini(void)
 {
-	RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
-	RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
-	RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
-	RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
-	RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
-	RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
-	RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
+	RCU_INIT_POINTER(nf_nat_sip_hooks, NULL);
+
 	nf_ct_helper_expectfn_unregister(&sip_nat);
 	synchronize_rcu();
 }
 
+static const struct nf_nat_sip_hooks sip_hooks = {
+	.msg		= nf_nat_sip,
+	.seq_adjust	= nf_nat_sip_seq_adjust,
+	.expect		= nf_nat_sip_expect,
+	.sdp_addr	= nf_nat_sdp_addr,
+	.sdp_port	= nf_nat_sdp_port,
+	.sdp_session	= nf_nat_sdp_session,
+	.sdp_media	= nf_nat_sdp_media,
+};
+
 static int __init nf_nat_sip_init(void)
 {
-	BUG_ON(nf_nat_sip_hook != NULL);
-	BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
-	BUG_ON(nf_nat_sip_expect_hook != NULL);
-	BUG_ON(nf_nat_sdp_addr_hook != NULL);
-	BUG_ON(nf_nat_sdp_port_hook != NULL);
-	BUG_ON(nf_nat_sdp_session_hook != NULL);
-	BUG_ON(nf_nat_sdp_media_hook != NULL);
-	RCU_INIT_POINTER(nf_nat_sip_hook, nf_nat_sip);
-	RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, nf_nat_sip_seq_adjust);
-	RCU_INIT_POINTER(nf_nat_sip_expect_hook, nf_nat_sip_expect);
-	RCU_INIT_POINTER(nf_nat_sdp_addr_hook, nf_nat_sdp_addr);
-	RCU_INIT_POINTER(nf_nat_sdp_port_hook, nf_nat_sdp_port);
-	RCU_INIT_POINTER(nf_nat_sdp_session_hook, nf_nat_sdp_session);
-	RCU_INIT_POINTER(nf_nat_sdp_media_hook, nf_nat_sdp_media);
+	BUG_ON(nf_nat_sip_hooks != NULL);
+	RCU_INIT_POINTER(nf_nat_sip_hooks, &sip_hooks);
 	nf_ct_helper_expectfn_register(&sip_nat);
 	return 0;
 }
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
new file mode 100644
index 0000000..dcddc49
--- /dev/null
+++ b/net/netfilter/nf_tables_api.c
@@ -0,0 +1,3275 @@
+/*
+ * Copyright (c) 2007-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+static LIST_HEAD(nf_tables_expressions);
+
+/**
+ *	nft_register_afinfo - register nf_tables address family info
+ *
+ *	@afi: address family info to register
+ *
+ *	Register the address family for use with nf_tables. Returns zero on
+ *	success or a negative errno code otherwise.
+ */
+int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
+{
+	INIT_LIST_HEAD(&afi->tables);
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_add_tail(&afi->list, &net->nft.af_info);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_afinfo);
+
+/**
+ *	nft_unregister_afinfo - unregister nf_tables address family info
+ *
+ *	@afi: address family info to unregister
+ *
+ *	Unregister the address family for use with nf_tables.
+ */
+void nft_unregister_afinfo(struct nft_af_info *afi)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_del(&afi->list);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
+
+static struct nft_af_info *nft_afinfo_lookup(struct net *net, int family)
+{
+	struct nft_af_info *afi;
+
+	list_for_each_entry(afi, &net->nft.af_info, list) {
+		if (afi->family == family)
+			return afi;
+	}
+	return NULL;
+}
+
+static struct nft_af_info *
+nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
+{
+	struct nft_af_info *afi;
+
+	afi = nft_afinfo_lookup(net, family);
+	if (afi != NULL)
+		return afi;
+#ifdef CONFIG_MODULES
+	if (autoload) {
+		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+		request_module("nft-afinfo-%u", family);
+		nfnl_lock(NFNL_SUBSYS_NFTABLES);
+		afi = nft_afinfo_lookup(net, family);
+		if (afi != NULL)
+			return ERR_PTR(-EAGAIN);
+	}
+#endif
+	return ERR_PTR(-EAFNOSUPPORT);
+}
+
+/*
+ * Tables
+ */
+
+static struct nft_table *nft_table_lookup(const struct nft_af_info *afi,
+					  const struct nlattr *nla)
+{
+	struct nft_table *table;
+
+	list_for_each_entry(table, &afi->tables, list) {
+		if (!nla_strcmp(nla, table->name))
+			return table;
+	}
+	return NULL;
+}
+
+static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi,
+						const struct nlattr *nla)
+{
+	struct nft_table *table;
+
+	if (nla == NULL)
+		return ERR_PTR(-EINVAL);
+
+	table = nft_table_lookup(afi, nla);
+	if (table != NULL)
+		return table;
+
+	return ERR_PTR(-ENOENT);
+}
+
+static inline u64 nf_tables_alloc_handle(struct nft_table *table)
+{
+	return ++table->hgenerator;
+}
+
+static struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
+
+static int __nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
+{
+	int i;
+
+	for (i=0; i<NFT_CHAIN_T_MAX; i++) {
+		if (chain_type[family][i] != NULL &&
+		    !nla_strcmp(nla, chain_type[family][i]->name))
+			return i;
+	}
+	return -1;
+}
+
+static int nf_tables_chain_type_lookup(const struct nft_af_info *afi,
+				       const struct nlattr *nla,
+				       bool autoload)
+{
+	int type;
+
+	type = __nf_tables_chain_type_lookup(afi->family, nla);
+#ifdef CONFIG_MODULES
+	if (type < 0 && autoload) {
+		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+		request_module("nft-chain-%u-%*.s", afi->family,
+			       nla_len(nla)-1, (const char *)nla_data(nla));
+		nfnl_lock(NFNL_SUBSYS_NFTABLES);
+		type = __nf_tables_chain_type_lookup(afi->family, nla);
+	}
+#endif
+	return type;
+}
+
+static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
+	[NFTA_TABLE_NAME]	= { .type = NLA_STRING },
+	[NFTA_TABLE_FLAGS]	= { .type = NLA_U32 },
+};
+
+static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq,
+				     int event, u32 flags, int family,
+				     const struct nft_table *table)
+{
+	struct nlmsghdr *nlh;
+	struct nfgenmsg *nfmsg;
+
+	event |= NFNL_SUBSYS_NFTABLES << 8;
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+	if (nlh == NULL)
+		goto nla_put_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family	= family;
+	nfmsg->version		= NFNETLINK_V0;
+	nfmsg->res_id		= 0;
+
+	if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
+	    nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)))
+		goto nla_put_failure;
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_trim(skb, nlh);
+	return -1;
+}
+
+static int nf_tables_table_notify(const struct sk_buff *oskb,
+				  const struct nlmsghdr *nlh,
+				  const struct nft_table *table,
+				  int event, int family)
+{
+	struct sk_buff *skb;
+	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+	u32 seq = nlh ? nlh->nlmsg_seq : 0;
+	struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+	bool report;
+	int err;
+
+	report = nlh ? nlmsg_report(nlh) : false;
+	if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+		return 0;
+
+	err = -ENOBUFS;
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (skb == NULL)
+		goto err;
+
+	err = nf_tables_fill_table_info(skb, portid, seq, event, 0,
+					family, table);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto err;
+	}
+
+	err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+			     GFP_KERNEL);
+err:
+	if (err < 0)
+		nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+	return err;
+}
+
+static int nf_tables_dump_tables(struct sk_buff *skb,
+				 struct netlink_callback *cb)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+	unsigned int idx = 0, s_idx = cb->args[0];
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+
+	list_for_each_entry(afi, &net->nft.af_info, list) {
+		if (family != NFPROTO_UNSPEC && family != afi->family)
+			continue;
+
+		list_for_each_entry(table, &afi->tables, list) {
+			if (idx < s_idx)
+				goto cont;
+			if (idx > s_idx)
+				memset(&cb->args[1], 0,
+				       sizeof(cb->args) - sizeof(cb->args[0]));
+			if (nf_tables_fill_table_info(skb,
+						      NETLINK_CB(cb->skb).portid,
+						      cb->nlh->nlmsg_seq,
+						      NFT_MSG_NEWTABLE,
+						      NLM_F_MULTI,
+						      afi->family, table) < 0)
+				goto done;
+cont:
+			idx++;
+		}
+	}
+done:
+	cb->args[0] = idx;
+	return skb->len;
+}
+
+static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
+			      const struct nlmsghdr *nlh,
+			      const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+	struct sk_buff *skb2;
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+	int err;
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		struct netlink_dump_control c = {
+			.dump = nf_tables_dump_tables,
+		};
+		return netlink_dump_start(nlsk, skb, nlh, &c);
+	}
+
+	afi = nf_tables_afinfo_lookup(net, family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb2)
+		return -ENOMEM;
+
+	err = nf_tables_fill_table_info(skb2, NETLINK_CB(skb).portid,
+					nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
+					family, table);
+	if (err < 0)
+		goto err;
+
+	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+	kfree_skb(skb2);
+	return err;
+}
+
+static int nf_tables_table_enable(struct nft_table *table)
+{
+	struct nft_chain *chain;
+	int err, i = 0;
+
+	list_for_each_entry(chain, &table->chains, list) {
+		err = nf_register_hook(&nft_base_chain(chain)->ops);
+		if (err < 0)
+			goto err;
+
+		i++;
+	}
+	return 0;
+err:
+	list_for_each_entry(chain, &table->chains, list) {
+		if (i-- <= 0)
+			break;
+
+		nf_unregister_hook(&nft_base_chain(chain)->ops);
+	}
+	return err;
+}
+
+static int nf_tables_table_disable(struct nft_table *table)
+{
+	struct nft_chain *chain;
+
+	list_for_each_entry(chain, &table->chains, list)
+		nf_unregister_hook(&nft_base_chain(chain)->ops);
+
+	return 0;
+}
+
+static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
+			      const struct nlmsghdr *nlh,
+			      const struct nlattr * const nla[],
+			      struct nft_af_info *afi, struct nft_table *table)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	int family = nfmsg->nfgen_family, ret = 0;
+
+	if (nla[NFTA_TABLE_FLAGS]) {
+		__be32 flags;
+
+		flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+		if (flags & ~NFT_TABLE_F_DORMANT)
+			return -EINVAL;
+
+		if ((flags & NFT_TABLE_F_DORMANT) &&
+		    !(table->flags & NFT_TABLE_F_DORMANT)) {
+			ret = nf_tables_table_disable(table);
+			if (ret >= 0)
+				table->flags |= NFT_TABLE_F_DORMANT;
+		} else if (!(flags & NFT_TABLE_F_DORMANT) &&
+			   table->flags & NFT_TABLE_F_DORMANT) {
+			ret = nf_tables_table_enable(table);
+			if (ret >= 0)
+				table->flags &= ~NFT_TABLE_F_DORMANT;
+		}
+		if (ret < 0)
+			goto err;
+	}
+
+	nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+err:
+	return ret;
+}
+
+static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
+			      const struct nlmsghdr *nlh,
+			      const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nlattr *name;
+	struct nft_af_info *afi;
+	struct nft_table *table;
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+
+	afi = nf_tables_afinfo_lookup(net, family, true);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	name = nla[NFTA_TABLE_NAME];
+	table = nf_tables_table_lookup(afi, name);
+	if (IS_ERR(table)) {
+		if (PTR_ERR(table) != -ENOENT)
+			return PTR_ERR(table);
+		table = NULL;
+	}
+
+	if (table != NULL) {
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
+		if (nlh->nlmsg_flags & NLM_F_REPLACE)
+			return -EOPNOTSUPP;
+		return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table);
+	}
+
+	table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
+	if (table == NULL)
+		return -ENOMEM;
+
+	nla_strlcpy(table->name, name, nla_len(name));
+	INIT_LIST_HEAD(&table->chains);
+	INIT_LIST_HEAD(&table->sets);
+
+	if (nla[NFTA_TABLE_FLAGS]) {
+		__be32 flags;
+
+		flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+		if (flags & ~NFT_TABLE_F_DORMANT) {
+			kfree(table);
+			return -EINVAL;
+		}
+
+		table->flags |= flags;
+	}
+
+	list_add_tail(&table->list, &afi->tables);
+	nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+	return 0;
+}
+
+static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
+			      const struct nlmsghdr *nlh,
+			      const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	struct nft_af_info *afi;
+	struct nft_table *table;
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+
+	afi = nf_tables_afinfo_lookup(net, family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	if (table->use)
+		return -EBUSY;
+
+	list_del(&table->list);
+	nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
+	kfree(table);
+	return 0;
+}
+
+int nft_register_chain_type(struct nf_chain_type *ctype)
+{
+	int err = 0;
+
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	if (chain_type[ctype->family][ctype->type] != NULL) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (!try_module_get(ctype->me))
+		goto out;
+
+	chain_type[ctype->family][ctype->type] = ctype;
+out:
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+	return err;
+}
+EXPORT_SYMBOL_GPL(nft_register_chain_type);
+
+void nft_unregister_chain_type(struct nf_chain_type *ctype)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	chain_type[ctype->family][ctype->type] = NULL;
+	module_put(ctype->me);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
+
+/*
+ * Chains
+ */
+
+static struct nft_chain *
+nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle)
+{
+	struct nft_chain *chain;
+
+	list_for_each_entry(chain, &table->chains, list) {
+		if (chain->handle == handle)
+			return chain;
+	}
+
+	return ERR_PTR(-ENOENT);
+}
+
+static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
+						const struct nlattr *nla)
+{
+	struct nft_chain *chain;
+
+	if (nla == NULL)
+		return ERR_PTR(-EINVAL);
+
+	list_for_each_entry(chain, &table->chains, list) {
+		if (!nla_strcmp(nla, chain->name))
+			return chain;
+	}
+
+	return ERR_PTR(-ENOENT);
+}
+
+static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
+	[NFTA_CHAIN_TABLE]	= { .type = NLA_STRING },
+	[NFTA_CHAIN_HANDLE]	= { .type = NLA_U64 },
+	[NFTA_CHAIN_NAME]	= { .type = NLA_STRING,
+				    .len = NFT_CHAIN_MAXNAMELEN - 1 },
+	[NFTA_CHAIN_HOOK]	= { .type = NLA_NESTED },
+	[NFTA_CHAIN_POLICY]	= { .type = NLA_U32 },
+	[NFTA_CHAIN_TYPE]	= { .type = NLA_NUL_STRING },
+	[NFTA_CHAIN_COUNTERS]	= { .type = NLA_NESTED },
+};
+
+static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
+	[NFTA_HOOK_HOOKNUM]	= { .type = NLA_U32 },
+	[NFTA_HOOK_PRIORITY]	= { .type = NLA_U32 },
+};
+
+static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
+{
+	struct nft_stats *cpu_stats, total;
+	struct nlattr *nest;
+	int cpu;
+
+	memset(&total, 0, sizeof(total));
+	for_each_possible_cpu(cpu) {
+		cpu_stats = per_cpu_ptr(stats, cpu);
+		total.pkts += cpu_stats->pkts;
+		total.bytes += cpu_stats->bytes;
+	}
+	nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
+	if (nest == NULL)
+		goto nla_put_failure;
+
+	if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) ||
+	    nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)))
+		goto nla_put_failure;
+
+	nla_nest_end(skb, nest);
+	return 0;
+
+nla_put_failure:
+	return -ENOSPC;
+}
+
+static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
+				     int event, u32 flags, int family,
+				     const struct nft_table *table,
+				     const struct nft_chain *chain)
+{
+	struct nlmsghdr *nlh;
+	struct nfgenmsg *nfmsg;
+
+	event |= NFNL_SUBSYS_NFTABLES << 8;
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+	if (nlh == NULL)
+		goto nla_put_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family	= family;
+	nfmsg->version		= NFNETLINK_V0;
+	nfmsg->res_id		= 0;
+
+	if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
+		goto nla_put_failure;
+	if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle)))
+		goto nla_put_failure;
+	if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name))
+		goto nla_put_failure;
+
+	if (chain->flags & NFT_BASE_CHAIN) {
+		const struct nft_base_chain *basechain = nft_base_chain(chain);
+		const struct nf_hook_ops *ops = &basechain->ops;
+		struct nlattr *nest;
+
+		nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
+		if (nest == NULL)
+			goto nla_put_failure;
+		if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
+			goto nla_put_failure;
+		if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
+			goto nla_put_failure;
+		nla_nest_end(skb, nest);
+
+		if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
+				 htonl(basechain->policy)))
+			goto nla_put_failure;
+
+		if (nla_put_string(skb, NFTA_CHAIN_TYPE,
+			chain_type[ops->pf][nft_base_chain(chain)->type]->name))
+				goto nla_put_failure;
+
+		if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
+			goto nla_put_failure;
+	}
+
+	if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
+		goto nla_put_failure;
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_trim(skb, nlh);
+	return -1;
+}
+
+static int nf_tables_chain_notify(const struct sk_buff *oskb,
+				  const struct nlmsghdr *nlh,
+				  const struct nft_table *table,
+				  const struct nft_chain *chain,
+				  int event, int family)
+{
+	struct sk_buff *skb;
+	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+	struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+	u32 seq = nlh ? nlh->nlmsg_seq : 0;
+	bool report;
+	int err;
+
+	report = nlh ? nlmsg_report(nlh) : false;
+	if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+		return 0;
+
+	err = -ENOBUFS;
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (skb == NULL)
+		goto err;
+
+	err = nf_tables_fill_chain_info(skb, portid, seq, event, 0, family,
+					table, chain);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto err;
+	}
+
+	err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+			     GFP_KERNEL);
+err:
+	if (err < 0)
+		nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+	return err;
+}
+
+static int nf_tables_dump_chains(struct sk_buff *skb,
+				 struct netlink_callback *cb)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+	const struct nft_chain *chain;
+	unsigned int idx = 0, s_idx = cb->args[0];
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+
+	list_for_each_entry(afi, &net->nft.af_info, list) {
+		if (family != NFPROTO_UNSPEC && family != afi->family)
+			continue;
+
+		list_for_each_entry(table, &afi->tables, list) {
+			list_for_each_entry(chain, &table->chains, list) {
+				if (idx < s_idx)
+					goto cont;
+				if (idx > s_idx)
+					memset(&cb->args[1], 0,
+					       sizeof(cb->args) - sizeof(cb->args[0]));
+				if (nf_tables_fill_chain_info(skb, NETLINK_CB(cb->skb).portid,
+							      cb->nlh->nlmsg_seq,
+							      NFT_MSG_NEWCHAIN,
+							      NLM_F_MULTI,
+							      afi->family, table, chain) < 0)
+					goto done;
+cont:
+				idx++;
+			}
+		}
+	}
+done:
+	cb->args[0] = idx;
+	return skb->len;
+}
+
+
+static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
+			      const struct nlmsghdr *nlh,
+			      const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+	const struct nft_chain *chain;
+	struct sk_buff *skb2;
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+	int err;
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		struct netlink_dump_control c = {
+			.dump = nf_tables_dump_chains,
+		};
+		return netlink_dump_start(nlsk, skb, nlh, &c);
+	}
+
+	afi = nf_tables_afinfo_lookup(net, family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
+	if (IS_ERR(chain))
+		return PTR_ERR(chain);
+
+	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb2)
+		return -ENOMEM;
+
+	err = nf_tables_fill_chain_info(skb2, NETLINK_CB(skb).portid,
+					nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
+					family, table, chain);
+	if (err < 0)
+		goto err;
+
+	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+	kfree_skb(skb2);
+	return err;
+}
+
+static int
+nf_tables_chain_policy(struct nft_base_chain *chain, const struct nlattr *attr)
+{
+	switch (ntohl(nla_get_be32(attr))) {
+	case NF_DROP:
+		chain->policy = NF_DROP;
+		break;
+	case NF_ACCEPT:
+		chain->policy = NF_ACCEPT;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
+	[NFTA_COUNTER_PACKETS]	= { .type = NLA_U64 },
+	[NFTA_COUNTER_BYTES]	= { .type = NLA_U64 },
+};
+
+static int
+nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
+{
+	struct nlattr *tb[NFTA_COUNTER_MAX+1];
+	struct nft_stats __percpu *newstats;
+	struct nft_stats *stats;
+	int err;
+
+	err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
+		return -EINVAL;
+
+	newstats = alloc_percpu(struct nft_stats);
+	if (newstats == NULL)
+		return -ENOMEM;
+
+	/* Restore old counters on this cpu, no problem. Per-cpu statistics
+	 * are not exposed to userspace.
+	 */
+	stats = this_cpu_ptr(newstats);
+	stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+	stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+
+	if (chain->stats) {
+		/* nfnl_lock is held, add some nfnl function for this, later */
+		struct nft_stats __percpu *oldstats =
+			rcu_dereference_protected(chain->stats, 1);
+
+		rcu_assign_pointer(chain->stats, newstats);
+		synchronize_rcu();
+		free_percpu(oldstats);
+	} else
+		rcu_assign_pointer(chain->stats, newstats);
+
+	return 0;
+}
+
+static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
+			      const struct nlmsghdr *nlh,
+			      const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nlattr * uninitialized_var(name);
+	const struct nft_af_info *afi;
+	struct nft_table *table;
+	struct nft_chain *chain;
+	struct nft_base_chain *basechain = NULL;
+	struct nlattr *ha[NFTA_HOOK_MAX + 1];
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+	u64 handle = 0;
+	int err;
+	bool create;
+
+	create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+	afi = nf_tables_afinfo_lookup(net, family, true);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	if (table->use == UINT_MAX)
+		return -EOVERFLOW;
+
+	chain = NULL;
+	name = nla[NFTA_CHAIN_NAME];
+
+	if (nla[NFTA_CHAIN_HANDLE]) {
+		handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
+		chain = nf_tables_chain_lookup_byhandle(table, handle);
+		if (IS_ERR(chain))
+			return PTR_ERR(chain);
+	} else {
+		chain = nf_tables_chain_lookup(table, name);
+		if (IS_ERR(chain)) {
+			if (PTR_ERR(chain) != -ENOENT)
+				return PTR_ERR(chain);
+			chain = NULL;
+		}
+	}
+
+	if (chain != NULL) {
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
+		if (nlh->nlmsg_flags & NLM_F_REPLACE)
+			return -EOPNOTSUPP;
+
+		if (nla[NFTA_CHAIN_HANDLE] && name &&
+		    !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME])))
+			return -EEXIST;
+
+		if (nla[NFTA_CHAIN_POLICY]) {
+			if (!(chain->flags & NFT_BASE_CHAIN))
+				return -EOPNOTSUPP;
+
+			err = nf_tables_chain_policy(nft_base_chain(chain),
+						     nla[NFTA_CHAIN_POLICY]);
+			if (err < 0)
+				return err;
+		}
+
+		if (nla[NFTA_CHAIN_COUNTERS]) {
+			if (!(chain->flags & NFT_BASE_CHAIN))
+				return -EOPNOTSUPP;
+
+			err = nf_tables_counters(nft_base_chain(chain),
+						 nla[NFTA_CHAIN_COUNTERS]);
+			if (err < 0)
+				return err;
+		}
+
+		if (nla[NFTA_CHAIN_HANDLE] && name)
+			nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+
+		goto notify;
+	}
+
+	if (nla[NFTA_CHAIN_HOOK]) {
+		struct nf_hook_ops *ops;
+		nf_hookfn *hookfn;
+		u32 hooknum;
+		int type = NFT_CHAIN_T_DEFAULT;
+
+		if (nla[NFTA_CHAIN_TYPE]) {
+			type = nf_tables_chain_type_lookup(afi,
+							   nla[NFTA_CHAIN_TYPE],
+							   create);
+			if (type < 0)
+				return -ENOENT;
+		}
+
+		err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
+				       nft_hook_policy);
+		if (err < 0)
+			return err;
+		if (ha[NFTA_HOOK_HOOKNUM] == NULL ||
+		    ha[NFTA_HOOK_PRIORITY] == NULL)
+			return -EINVAL;
+
+		hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+		if (hooknum >= afi->nhooks)
+			return -EINVAL;
+
+		hookfn = chain_type[family][type]->fn[hooknum];
+		if (hookfn == NULL)
+			return -EOPNOTSUPP;
+
+		basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
+		if (basechain == NULL)
+			return -ENOMEM;
+
+		basechain->type = type;
+		chain = &basechain->chain;
+
+		ops = &basechain->ops;
+		ops->pf		= family;
+		ops->owner	= afi->owner;
+		ops->hooknum	= ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+		ops->priority	= ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
+		ops->priv	= chain;
+		ops->hook       = hookfn;
+		if (afi->hooks[ops->hooknum])
+			ops->hook = afi->hooks[ops->hooknum];
+
+		chain->flags |= NFT_BASE_CHAIN;
+
+		if (nla[NFTA_CHAIN_POLICY]) {
+			err = nf_tables_chain_policy(basechain,
+						     nla[NFTA_CHAIN_POLICY]);
+			if (err < 0) {
+				free_percpu(basechain->stats);
+				kfree(basechain);
+				return err;
+			}
+		} else
+			basechain->policy = NF_ACCEPT;
+
+		if (nla[NFTA_CHAIN_COUNTERS]) {
+			err = nf_tables_counters(basechain,
+						 nla[NFTA_CHAIN_COUNTERS]);
+			if (err < 0) {
+				free_percpu(basechain->stats);
+				kfree(basechain);
+				return err;
+			}
+		} else {
+			struct nft_stats __percpu *newstats;
+
+			newstats = alloc_percpu(struct nft_stats);
+			if (newstats == NULL)
+				return -ENOMEM;
+
+			rcu_assign_pointer(nft_base_chain(chain)->stats,
+					   newstats);
+		}
+	} else {
+		chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+		if (chain == NULL)
+			return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&chain->rules);
+	chain->handle = nf_tables_alloc_handle(table);
+	chain->net = net;
+	chain->table = table;
+	nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+
+	if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+	    chain->flags & NFT_BASE_CHAIN) {
+		err = nf_register_hook(&nft_base_chain(chain)->ops);
+		if (err < 0) {
+			free_percpu(basechain->stats);
+			kfree(basechain);
+			return err;
+		}
+	}
+	list_add_tail(&chain->list, &table->chains);
+	table->use++;
+notify:
+	nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_NEWCHAIN,
+			       family);
+	return 0;
+}
+
+static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
+{
+	struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
+
+	BUG_ON(chain->use > 0);
+
+	if (chain->flags & NFT_BASE_CHAIN) {
+		free_percpu(nft_base_chain(chain)->stats);
+		kfree(nft_base_chain(chain));
+	} else
+		kfree(chain);
+}
+
+static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
+			      const struct nlmsghdr *nlh,
+			      const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_af_info *afi;
+	struct nft_table *table;
+	struct nft_chain *chain;
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+
+	afi = nf_tables_afinfo_lookup(net, family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
+	if (IS_ERR(chain))
+		return PTR_ERR(chain);
+
+	if (!list_empty(&chain->rules))
+		return -EBUSY;
+
+	list_del(&chain->list);
+	table->use--;
+
+	if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+	    chain->flags & NFT_BASE_CHAIN)
+		nf_unregister_hook(&nft_base_chain(chain)->ops);
+
+	nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
+			       family);
+
+	/* Make sure all rule references are gone before this is released */
+	call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy);
+	return 0;
+}
+
+static void nft_ctx_init(struct nft_ctx *ctx,
+			 const struct sk_buff *skb,
+			 const struct nlmsghdr *nlh,
+			 const struct nft_af_info *afi,
+			 const struct nft_table *table,
+			 const struct nft_chain *chain,
+			 const struct nlattr * const *nla)
+{
+	ctx->net   = sock_net(skb->sk);
+	ctx->skb   = skb;
+	ctx->nlh   = nlh;
+	ctx->afi   = afi;
+	ctx->table = table;
+	ctx->chain = chain;
+	ctx->nla   = nla;
+}
+
+/*
+ * Expressions
+ */
+
+/**
+ *	nft_register_expr - register nf_tables expr type
+ *	@ops: expr type
+ *
+ *	Registers the expr type for use with nf_tables. Returns zero on
+ *	success or a negative errno code otherwise.
+ */
+int nft_register_expr(struct nft_expr_type *type)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_add_tail(&type->list, &nf_tables_expressions);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_expr);
+
+/**
+ *	nft_unregister_expr - unregister nf_tables expr type
+ *	@ops: expr type
+ *
+ * 	Unregisters the expr typefor use with nf_tables.
+ */
+void nft_unregister_expr(struct nft_expr_type *type)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_del(&type->list);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_expr);
+
+static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla)
+{
+	const struct nft_expr_type *type;
+
+	list_for_each_entry(type, &nf_tables_expressions, list) {
+		if (!nla_strcmp(nla, type->name))
+			return type;
+	}
+	return NULL;
+}
+
+static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla)
+{
+	const struct nft_expr_type *type;
+
+	if (nla == NULL)
+		return ERR_PTR(-EINVAL);
+
+	type = __nft_expr_type_get(nla);
+	if (type != NULL && try_module_get(type->owner))
+		return type;
+
+#ifdef CONFIG_MODULES
+	if (type == NULL) {
+		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+		request_module("nft-expr-%.*s",
+			       nla_len(nla), (char *)nla_data(nla));
+		nfnl_lock(NFNL_SUBSYS_NFTABLES);
+		if (__nft_expr_type_get(nla))
+			return ERR_PTR(-EAGAIN);
+	}
+#endif
+	return ERR_PTR(-ENOENT);
+}
+
+static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = {
+	[NFTA_EXPR_NAME]	= { .type = NLA_STRING },
+	[NFTA_EXPR_DATA]	= { .type = NLA_NESTED },
+};
+
+static int nf_tables_fill_expr_info(struct sk_buff *skb,
+				    const struct nft_expr *expr)
+{
+	if (nla_put_string(skb, NFTA_EXPR_NAME, expr->ops->type->name))
+		goto nla_put_failure;
+
+	if (expr->ops->dump) {
+		struct nlattr *data = nla_nest_start(skb, NFTA_EXPR_DATA);
+		if (data == NULL)
+			goto nla_put_failure;
+		if (expr->ops->dump(skb, expr) < 0)
+			goto nla_put_failure;
+		nla_nest_end(skb, data);
+	}
+
+	return skb->len;
+
+nla_put_failure:
+	return -1;
+};
+
+struct nft_expr_info {
+	const struct nft_expr_ops	*ops;
+	struct nlattr			*tb[NFT_EXPR_MAXATTR + 1];
+};
+
+static int nf_tables_expr_parse(const struct nft_ctx *ctx,
+				const struct nlattr *nla,
+				struct nft_expr_info *info)
+{
+	const struct nft_expr_type *type;
+	const struct nft_expr_ops *ops;
+	struct nlattr *tb[NFTA_EXPR_MAX + 1];
+	int err;
+
+	err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy);
+	if (err < 0)
+		return err;
+
+	type = nft_expr_type_get(tb[NFTA_EXPR_NAME]);
+	if (IS_ERR(type))
+		return PTR_ERR(type);
+
+	if (tb[NFTA_EXPR_DATA]) {
+		err = nla_parse_nested(info->tb, type->maxattr,
+				       tb[NFTA_EXPR_DATA], type->policy);
+		if (err < 0)
+			goto err1;
+	} else
+		memset(info->tb, 0, sizeof(info->tb[0]) * (type->maxattr + 1));
+
+	if (type->select_ops != NULL) {
+		ops = type->select_ops(ctx,
+				       (const struct nlattr * const *)info->tb);
+		if (IS_ERR(ops)) {
+			err = PTR_ERR(ops);
+			goto err1;
+		}
+	} else
+		ops = type->ops;
+
+	info->ops = ops;
+	return 0;
+
+err1:
+	module_put(type->owner);
+	return err;
+}
+
+static int nf_tables_newexpr(const struct nft_ctx *ctx,
+			     const struct nft_expr_info *info,
+			     struct nft_expr *expr)
+{
+	const struct nft_expr_ops *ops = info->ops;
+	int err;
+
+	expr->ops = ops;
+	if (ops->init) {
+		err = ops->init(ctx, expr, (const struct nlattr **)info->tb);
+		if (err < 0)
+			goto err1;
+	}
+
+	return 0;
+
+err1:
+	expr->ops = NULL;
+	return err;
+}
+
+static void nf_tables_expr_destroy(struct nft_expr *expr)
+{
+	if (expr->ops->destroy)
+		expr->ops->destroy(expr);
+	module_put(expr->ops->type->owner);
+}
+
+/*
+ * Rules
+ */
+
+static struct nft_rule *__nf_tables_rule_lookup(const struct nft_chain *chain,
+						u64 handle)
+{
+	struct nft_rule *rule;
+
+	// FIXME: this sucks
+	list_for_each_entry(rule, &chain->rules, list) {
+		if (handle == rule->handle)
+			return rule;
+	}
+
+	return ERR_PTR(-ENOENT);
+}
+
+static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
+					      const struct nlattr *nla)
+{
+	if (nla == NULL)
+		return ERR_PTR(-EINVAL);
+
+	return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
+}
+
+static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
+	[NFTA_RULE_TABLE]	= { .type = NLA_STRING },
+	[NFTA_RULE_CHAIN]	= { .type = NLA_STRING,
+				    .len = NFT_CHAIN_MAXNAMELEN - 1 },
+	[NFTA_RULE_HANDLE]	= { .type = NLA_U64 },
+	[NFTA_RULE_EXPRESSIONS]	= { .type = NLA_NESTED },
+	[NFTA_RULE_COMPAT]	= { .type = NLA_NESTED },
+	[NFTA_RULE_POSITION]	= { .type = NLA_U64 },
+};
+
+static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq,
+				    int event, u32 flags, int family,
+				    const struct nft_table *table,
+				    const struct nft_chain *chain,
+				    const struct nft_rule *rule)
+{
+	struct nlmsghdr *nlh;
+	struct nfgenmsg *nfmsg;
+	const struct nft_expr *expr, *next;
+	struct nlattr *list;
+	const struct nft_rule *prule;
+	int type = event | NFNL_SUBSYS_NFTABLES << 8;
+
+	nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg),
+			flags);
+	if (nlh == NULL)
+		goto nla_put_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family	= family;
+	nfmsg->version		= NFNETLINK_V0;
+	nfmsg->res_id		= 0;
+
+	if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
+		goto nla_put_failure;
+	if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
+		goto nla_put_failure;
+	if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle)))
+		goto nla_put_failure;
+
+	if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) {
+		prule = list_entry(rule->list.prev, struct nft_rule, list);
+		if (nla_put_be64(skb, NFTA_RULE_POSITION,
+				 cpu_to_be64(prule->handle)))
+			goto nla_put_failure;
+	}
+
+	list = nla_nest_start(skb, NFTA_RULE_EXPRESSIONS);
+	if (list == NULL)
+		goto nla_put_failure;
+	nft_rule_for_each_expr(expr, next, rule) {
+		struct nlattr *elem = nla_nest_start(skb, NFTA_LIST_ELEM);
+		if (elem == NULL)
+			goto nla_put_failure;
+		if (nf_tables_fill_expr_info(skb, expr) < 0)
+			goto nla_put_failure;
+		nla_nest_end(skb, elem);
+	}
+	nla_nest_end(skb, list);
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_trim(skb, nlh);
+	return -1;
+}
+
+static int nf_tables_rule_notify(const struct sk_buff *oskb,
+				 const struct nlmsghdr *nlh,
+				 const struct nft_table *table,
+				 const struct nft_chain *chain,
+				 const struct nft_rule *rule,
+				 int event, u32 flags, int family)
+{
+	struct sk_buff *skb;
+	u32 portid = NETLINK_CB(oskb).portid;
+	struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+	u32 seq = nlh->nlmsg_seq;
+	bool report;
+	int err;
+
+	report = nlmsg_report(nlh);
+	if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+		return 0;
+
+	err = -ENOBUFS;
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (skb == NULL)
+		goto err;
+
+	err = nf_tables_fill_rule_info(skb, portid, seq, event, flags,
+				       family, table, chain, rule);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto err;
+	}
+
+	err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+			     GFP_KERNEL);
+err:
+	if (err < 0)
+		nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+	return err;
+}
+
+static inline bool
+nft_rule_is_active(struct net *net, const struct nft_rule *rule)
+{
+	return (rule->genmask & (1 << net->nft.gencursor)) == 0;
+}
+
+static inline int gencursor_next(struct net *net)
+{
+	return net->nft.gencursor+1 == 1 ? 1 : 0;
+}
+
+static inline int
+nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
+{
+	return (rule->genmask & (1 << gencursor_next(net))) == 0;
+}
+
+static inline void
+nft_rule_activate_next(struct net *net, struct nft_rule *rule)
+{
+	/* Now inactive, will be active in the future */
+	rule->genmask = (1 << net->nft.gencursor);
+}
+
+static inline void
+nft_rule_disactivate_next(struct net *net, struct nft_rule *rule)
+{
+	rule->genmask = (1 << gencursor_next(net));
+}
+
+static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
+{
+	rule->genmask = 0;
+}
+
+static int nf_tables_dump_rules(struct sk_buff *skb,
+				struct netlink_callback *cb)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+	const struct nft_chain *chain;
+	const struct nft_rule *rule;
+	unsigned int idx = 0, s_idx = cb->args[0];
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+	u8 genctr = ACCESS_ONCE(net->nft.genctr);
+	u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
+
+	list_for_each_entry(afi, &net->nft.af_info, list) {
+		if (family != NFPROTO_UNSPEC && family != afi->family)
+			continue;
+
+		list_for_each_entry(table, &afi->tables, list) {
+			list_for_each_entry(chain, &table->chains, list) {
+				list_for_each_entry(rule, &chain->rules, list) {
+					if (!nft_rule_is_active(net, rule))
+						goto cont;
+					if (idx < s_idx)
+						goto cont;
+					if (idx > s_idx)
+						memset(&cb->args[1], 0,
+						       sizeof(cb->args) - sizeof(cb->args[0]));
+					if (nf_tables_fill_rule_info(skb, NETLINK_CB(cb->skb).portid,
+								      cb->nlh->nlmsg_seq,
+								      NFT_MSG_NEWRULE,
+								      NLM_F_MULTI | NLM_F_APPEND,
+								      afi->family, table, chain, rule) < 0)
+						goto done;
+cont:
+					idx++;
+				}
+			}
+		}
+	}
+done:
+	/* Invalidate this dump, a transition to the new generation happened */
+	if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
+		return -EBUSY;
+
+	cb->args[0] = idx;
+	return skb->len;
+}
+
+static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
+			     const struct nlmsghdr *nlh,
+			     const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+	const struct nft_chain *chain;
+	const struct nft_rule *rule;
+	struct sk_buff *skb2;
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+	int err;
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		struct netlink_dump_control c = {
+			.dump = nf_tables_dump_rules,
+		};
+		return netlink_dump_start(nlsk, skb, nlh, &c);
+	}
+
+	afi = nf_tables_afinfo_lookup(net, family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+	if (IS_ERR(chain))
+		return PTR_ERR(chain);
+
+	rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+	if (IS_ERR(rule))
+		return PTR_ERR(rule);
+
+	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb2)
+		return -ENOMEM;
+
+	err = nf_tables_fill_rule_info(skb2, NETLINK_CB(skb).portid,
+				       nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
+				       family, table, chain, rule);
+	if (err < 0)
+		goto err;
+
+	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+	kfree_skb(skb2);
+	return err;
+}
+
+static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
+{
+	struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
+	struct nft_expr *expr;
+
+	/*
+	 * Careful: some expressions might not be initialized in case this
+	 * is called on error from nf_tables_newrule().
+	 */
+	expr = nft_expr_first(rule);
+	while (expr->ops && expr != nft_expr_last(rule)) {
+		nf_tables_expr_destroy(expr);
+		expr = nft_expr_next(expr);
+	}
+	kfree(rule);
+}
+
+static void nf_tables_rule_destroy(struct nft_rule *rule)
+{
+	call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
+}
+
+#define NFT_RULE_MAXEXPRS	128
+
+static struct nft_expr_info *info;
+
+static struct nft_rule_trans *
+nf_tables_trans_add(struct nft_rule *rule, const struct nft_ctx *ctx)
+{
+	struct nft_rule_trans *rupd;
+
+	rupd = kmalloc(sizeof(struct nft_rule_trans), GFP_KERNEL);
+	if (rupd == NULL)
+	       return NULL;
+
+	rupd->chain = ctx->chain;
+	rupd->table = ctx->table;
+	rupd->rule = rule;
+	rupd->family = ctx->afi->family;
+	rupd->nlh = ctx->nlh;
+	list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
+
+	return rupd;
+}
+
+static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
+			     const struct nlmsghdr *nlh,
+			     const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_af_info *afi;
+	struct net *net = sock_net(skb->sk);
+	struct nft_table *table;
+	struct nft_chain *chain;
+	struct nft_rule *rule, *old_rule = NULL;
+	struct nft_rule_trans *repl = NULL;
+	struct nft_expr *expr;
+	struct nft_ctx ctx;
+	struct nlattr *tmp;
+	unsigned int size, i, n;
+	int err, rem;
+	bool create;
+	u64 handle, pos_handle;
+
+	create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+	afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+	if (IS_ERR(chain))
+		return PTR_ERR(chain);
+
+	if (nla[NFTA_RULE_HANDLE]) {
+		handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
+		rule = __nf_tables_rule_lookup(chain, handle);
+		if (IS_ERR(rule))
+			return PTR_ERR(rule);
+
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
+		if (nlh->nlmsg_flags & NLM_F_REPLACE)
+			old_rule = rule;
+		else
+			return -EOPNOTSUPP;
+	} else {
+		if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
+			return -EINVAL;
+		handle = nf_tables_alloc_handle(table);
+	}
+
+	if (nla[NFTA_RULE_POSITION]) {
+		if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+			return -EOPNOTSUPP;
+
+		pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+		old_rule = __nf_tables_rule_lookup(chain, pos_handle);
+		if (IS_ERR(old_rule))
+			return PTR_ERR(old_rule);
+	}
+
+	nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+
+	n = 0;
+	size = 0;
+	if (nla[NFTA_RULE_EXPRESSIONS]) {
+		nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
+			err = -EINVAL;
+			if (nla_type(tmp) != NFTA_LIST_ELEM)
+				goto err1;
+			if (n == NFT_RULE_MAXEXPRS)
+				goto err1;
+			err = nf_tables_expr_parse(&ctx, tmp, &info[n]);
+			if (err < 0)
+				goto err1;
+			size += info[n].ops->size;
+			n++;
+		}
+	}
+
+	err = -ENOMEM;
+	rule = kzalloc(sizeof(*rule) + size, GFP_KERNEL);
+	if (rule == NULL)
+		goto err1;
+
+	nft_rule_activate_next(net, rule);
+
+	rule->handle = handle;
+	rule->dlen   = size;
+
+	expr = nft_expr_first(rule);
+	for (i = 0; i < n; i++) {
+		err = nf_tables_newexpr(&ctx, &info[i], expr);
+		if (err < 0)
+			goto err2;
+		info[i].ops = NULL;
+		expr = nft_expr_next(expr);
+	}
+
+	if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+		if (nft_rule_is_active_next(net, old_rule)) {
+			repl = nf_tables_trans_add(old_rule, &ctx);
+			if (repl == NULL) {
+				err = -ENOMEM;
+				goto err2;
+			}
+			nft_rule_disactivate_next(net, old_rule);
+			list_add_tail(&rule->list, &old_rule->list);
+		} else {
+			err = -ENOENT;
+			goto err2;
+		}
+	} else if (nlh->nlmsg_flags & NLM_F_APPEND)
+		if (old_rule)
+			list_add_rcu(&rule->list, &old_rule->list);
+		else
+			list_add_tail_rcu(&rule->list, &chain->rules);
+	else {
+		if (old_rule)
+			list_add_tail_rcu(&rule->list, &old_rule->list);
+		else
+			list_add_rcu(&rule->list, &chain->rules);
+	}
+
+	if (nf_tables_trans_add(rule, &ctx) == NULL) {
+		err = -ENOMEM;
+		goto err3;
+	}
+	return 0;
+
+err3:
+	list_del_rcu(&rule->list);
+	if (repl) {
+		list_del_rcu(&repl->rule->list);
+		list_del(&repl->list);
+		nft_rule_clear(net, repl->rule);
+		kfree(repl);
+	}
+err2:
+	nf_tables_rule_destroy(rule);
+err1:
+	for (i = 0; i < n; i++) {
+		if (info[i].ops != NULL)
+			module_put(info[i].ops->type->owner);
+	}
+	return err;
+}
+
+static int
+nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
+{
+	/* You cannot delete the same rule twice */
+	if (nft_rule_is_active_next(ctx->net, rule)) {
+		if (nf_tables_trans_add(rule, ctx) == NULL)
+			return -ENOMEM;
+		nft_rule_disactivate_next(ctx->net, rule);
+		return 0;
+	}
+	return -ENOENT;
+}
+
+static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
+			     const struct nlmsghdr *nlh,
+			     const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_af_info *afi;
+	struct net *net = sock_net(skb->sk);
+	const struct nft_table *table;
+	struct nft_chain *chain;
+	struct nft_rule *rule, *tmp;
+	int family = nfmsg->nfgen_family, err = 0;
+	struct nft_ctx ctx;
+
+	afi = nf_tables_afinfo_lookup(net, family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+	if (IS_ERR(chain))
+		return PTR_ERR(chain);
+
+	nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+
+	if (nla[NFTA_RULE_HANDLE]) {
+		rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+		if (IS_ERR(rule))
+			return PTR_ERR(rule);
+
+		err = nf_tables_delrule_one(&ctx, rule);
+	} else {
+		/* Remove all rules in this chain */
+		list_for_each_entry_safe(rule, tmp, &chain->rules, list) {
+			err = nf_tables_delrule_one(&ctx, rule);
+			if (err < 0)
+				break;
+		}
+	}
+
+	return err;
+}
+
+static int nf_tables_commit(struct sk_buff *skb)
+{
+	struct net *net = sock_net(skb->sk);
+	struct nft_rule_trans *rupd, *tmp;
+
+	/* Bump generation counter, invalidate any dump in progress */
+	net->nft.genctr++;
+
+	/* A new generation has just started */
+	net->nft.gencursor = gencursor_next(net);
+
+	/* Make sure all packets have left the previous generation before
+	 * purging old rules.
+	 */
+	synchronize_rcu();
+
+	list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
+		/* Delete this rule from the dirty list */
+		list_del(&rupd->list);
+
+		/* This rule was inactive in the past and just became active.
+		 * Clear the next bit of the genmask since its meaning has
+		 * changed, now it is the future.
+		 */
+		if (nft_rule_is_active(net, rupd->rule)) {
+			nft_rule_clear(net, rupd->rule);
+			nf_tables_rule_notify(skb, rupd->nlh, rupd->table,
+					      rupd->chain, rupd->rule,
+					      NFT_MSG_NEWRULE, 0,
+					      rupd->family);
+			kfree(rupd);
+			continue;
+		}
+
+		/* This rule is in the past, get rid of it */
+		list_del_rcu(&rupd->rule->list);
+		nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
+				      rupd->rule, NFT_MSG_DELRULE, 0,
+				      rupd->family);
+		nf_tables_rule_destroy(rupd->rule);
+		kfree(rupd);
+	}
+
+	return 0;
+}
+
+static int nf_tables_abort(struct sk_buff *skb)
+{
+	struct net *net = sock_net(skb->sk);
+	struct nft_rule_trans *rupd, *tmp;
+
+	list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
+		/* Delete all rules from the dirty list */
+		list_del(&rupd->list);
+
+		if (!nft_rule_is_active_next(net, rupd->rule)) {
+			nft_rule_clear(net, rupd->rule);
+			kfree(rupd);
+			continue;
+		}
+
+		/* This rule is inactive, get rid of it */
+		list_del_rcu(&rupd->rule->list);
+		nf_tables_rule_destroy(rupd->rule);
+		kfree(rupd);
+	}
+	return 0;
+}
+
+/*
+ * Sets
+ */
+
+static LIST_HEAD(nf_tables_set_ops);
+
+int nft_register_set(struct nft_set_ops *ops)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_add_tail(&ops->list, &nf_tables_set_ops);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_set);
+
+void nft_unregister_set(struct nft_set_ops *ops)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_del(&ops->list);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_set);
+
+static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const nla[])
+{
+	const struct nft_set_ops *ops;
+	u32 features;
+
+#ifdef CONFIG_MODULES
+	if (list_empty(&nf_tables_set_ops)) {
+		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+		request_module("nft-set");
+		nfnl_lock(NFNL_SUBSYS_NFTABLES);
+		if (!list_empty(&nf_tables_set_ops))
+			return ERR_PTR(-EAGAIN);
+	}
+#endif
+	features = 0;
+	if (nla[NFTA_SET_FLAGS] != NULL) {
+		features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
+		features &= NFT_SET_INTERVAL | NFT_SET_MAP;
+	}
+
+	// FIXME: implement selection properly
+	list_for_each_entry(ops, &nf_tables_set_ops, list) {
+		if ((ops->features & features) != features)
+			continue;
+		if (!try_module_get(ops->owner))
+			continue;
+		return ops;
+	}
+
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
+static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
+	[NFTA_SET_TABLE]		= { .type = NLA_STRING },
+	[NFTA_SET_NAME]			= { .type = NLA_STRING },
+	[NFTA_SET_FLAGS]		= { .type = NLA_U32 },
+	[NFTA_SET_KEY_TYPE]		= { .type = NLA_U32 },
+	[NFTA_SET_KEY_LEN]		= { .type = NLA_U32 },
+	[NFTA_SET_DATA_TYPE]		= { .type = NLA_U32 },
+	[NFTA_SET_DATA_LEN]		= { .type = NLA_U32 },
+};
+
+static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
+				     const struct sk_buff *skb,
+				     const struct nlmsghdr *nlh,
+				     const struct nlattr * const nla[])
+{
+	struct net *net = sock_net(skb->sk);
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_af_info *afi;
+	const struct nft_table *table = NULL;
+
+	afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	if (nla[NFTA_SET_TABLE] != NULL) {
+		table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
+		if (IS_ERR(table))
+			return PTR_ERR(table);
+	}
+
+	nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+	return 0;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+				     const struct nlattr *nla)
+{
+	struct nft_set *set;
+
+	if (nla == NULL)
+		return ERR_PTR(-EINVAL);
+
+	list_for_each_entry(set, &table->sets, list) {
+		if (!nla_strcmp(nla, set->name))
+			return set;
+	}
+	return ERR_PTR(-ENOENT);
+}
+
+static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
+				    const char *name)
+{
+	const struct nft_set *i;
+	const char *p;
+	unsigned long *inuse;
+	unsigned int n = 0;
+
+	p = strnchr(name, IFNAMSIZ, '%');
+	if (p != NULL) {
+		if (p[1] != 'd' || strchr(p + 2, '%'))
+			return -EINVAL;
+
+		inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+		if (inuse == NULL)
+			return -ENOMEM;
+
+		list_for_each_entry(i, &ctx->table->sets, list) {
+			if (!sscanf(i->name, name, &n))
+				continue;
+			if (n < 0 || n > BITS_PER_LONG * PAGE_SIZE)
+				continue;
+			set_bit(n, inuse);
+		}
+
+		n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
+		free_page((unsigned long)inuse);
+	}
+
+	snprintf(set->name, sizeof(set->name), name, n);
+	list_for_each_entry(i, &ctx->table->sets, list) {
+		if (!strcmp(set->name, i->name))
+			return -ENFILE;
+	}
+	return 0;
+}
+
+static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+			      const struct nft_set *set, u16 event, u16 flags)
+{
+	struct nfgenmsg *nfmsg;
+	struct nlmsghdr *nlh;
+	u32 portid = NETLINK_CB(ctx->skb).portid;
+	u32 seq = ctx->nlh->nlmsg_seq;
+
+	event |= NFNL_SUBSYS_NFTABLES << 8;
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+			flags);
+	if (nlh == NULL)
+		goto nla_put_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family	= ctx->afi->family;
+	nfmsg->version		= NFNETLINK_V0;
+	nfmsg->res_id		= 0;
+
+	if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
+		goto nla_put_failure;
+	if (nla_put_string(skb, NFTA_SET_NAME, set->name))
+		goto nla_put_failure;
+	if (set->flags != 0)
+		if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
+			goto nla_put_failure;
+
+	if (nla_put_be32(skb, NFTA_SET_KEY_TYPE, htonl(set->ktype)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_SET_KEY_LEN, htonl(set->klen)))
+		goto nla_put_failure;
+	if (set->flags & NFT_SET_MAP) {
+		if (nla_put_be32(skb, NFTA_SET_DATA_TYPE, htonl(set->dtype)))
+			goto nla_put_failure;
+		if (nla_put_be32(skb, NFTA_SET_DATA_LEN, htonl(set->dlen)))
+			goto nla_put_failure;
+	}
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_trim(skb, nlh);
+	return -1;
+}
+
+static int nf_tables_set_notify(const struct nft_ctx *ctx,
+				const struct nft_set *set,
+				int event)
+{
+	struct sk_buff *skb;
+	u32 portid = NETLINK_CB(ctx->skb).portid;
+	bool report;
+	int err;
+
+	report = nlmsg_report(ctx->nlh);
+	if (!report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+		return 0;
+
+	err = -ENOBUFS;
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (skb == NULL)
+		goto err;
+
+	err = nf_tables_fill_set(skb, ctx, set, event, 0);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto err;
+	}
+
+	err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, report,
+			     GFP_KERNEL);
+err:
+	if (err < 0)
+		nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
+	return err;
+}
+
+static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
+				     struct netlink_callback *cb)
+{
+	const struct nft_set *set;
+	unsigned int idx = 0, s_idx = cb->args[0];
+
+	if (cb->args[1])
+		return skb->len;
+
+	list_for_each_entry(set, &ctx->table->sets, list) {
+		if (idx < s_idx)
+			goto cont;
+		if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
+				       NLM_F_MULTI) < 0) {
+			cb->args[0] = idx;
+			goto done;
+		}
+cont:
+		idx++;
+	}
+	cb->args[1] = 1;
+done:
+	return skb->len;
+}
+
+static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
+				   struct netlink_callback *cb)
+{
+	const struct nft_set *set;
+	unsigned int idx = 0, s_idx = cb->args[0];
+	struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
+
+	if (cb->args[1])
+		return skb->len;
+
+	list_for_each_entry(table, &ctx->afi->tables, list) {
+		if (cur_table && cur_table != table)
+			continue;
+
+		ctx->table = table;
+		list_for_each_entry(set, &ctx->table->sets, list) {
+			if (idx < s_idx)
+				goto cont;
+			if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
+					       NLM_F_MULTI) < 0) {
+				cb->args[0] = idx;
+				cb->args[2] = (unsigned long) table;
+				goto done;
+			}
+cont:
+			idx++;
+		}
+	}
+	cb->args[1] = 1;
+done:
+	return skb->len;
+}
+
+static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+	struct nlattr *nla[NFTA_SET_MAX + 1];
+	struct nft_ctx ctx;
+	int err, ret;
+
+	err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_MAX,
+			  nft_set_policy);
+	if (err < 0)
+		return err;
+
+	err = nft_ctx_init_from_setattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+	if (err < 0)
+		return err;
+
+	if (ctx.table == NULL)
+		ret = nf_tables_dump_sets_all(&ctx, skb, cb);
+	else
+		ret = nf_tables_dump_sets_table(&ctx, skb, cb);
+
+	return ret;
+}
+
+static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
+			    const struct nlmsghdr *nlh,
+			    const struct nlattr * const nla[])
+{
+	const struct nft_set *set;
+	struct nft_ctx ctx;
+	struct sk_buff *skb2;
+	int err;
+
+	/* Verify existance before starting dump */
+	err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+	if (err < 0)
+		return err;
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		struct netlink_dump_control c = {
+			.dump = nf_tables_dump_sets,
+		};
+		return netlink_dump_start(nlsk, skb, nlh, &c);
+	}
+
+	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
+	if (IS_ERR(set))
+		return PTR_ERR(set);
+
+	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (skb2 == NULL)
+		return -ENOMEM;
+
+	err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
+	if (err < 0)
+		goto err;
+
+	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+	kfree_skb(skb2);
+	return err;
+}
+
+static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
+			    const struct nlmsghdr *nlh,
+			    const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_set_ops *ops;
+	const struct nft_af_info *afi;
+	struct net *net = sock_net(skb->sk);
+	struct nft_table *table;
+	struct nft_set *set;
+	struct nft_ctx ctx;
+	char name[IFNAMSIZ];
+	unsigned int size;
+	bool create;
+	u32 ktype, klen, dlen, dtype, flags;
+	int err;
+
+	if (nla[NFTA_SET_TABLE] == NULL ||
+	    nla[NFTA_SET_NAME] == NULL ||
+	    nla[NFTA_SET_KEY_LEN] == NULL)
+		return -EINVAL;
+
+	ktype = NFT_DATA_VALUE;
+	if (nla[NFTA_SET_KEY_TYPE] != NULL) {
+		ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
+		if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
+			return -EINVAL;
+	}
+
+	klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
+	if (klen == 0 || klen > FIELD_SIZEOF(struct nft_data, data))
+		return -EINVAL;
+
+	flags = 0;
+	if (nla[NFTA_SET_FLAGS] != NULL) {
+		flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
+		if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
+			      NFT_SET_INTERVAL | NFT_SET_MAP))
+			return -EINVAL;
+	}
+
+	dtype = 0;
+	dlen  = 0;
+	if (nla[NFTA_SET_DATA_TYPE] != NULL) {
+		if (!(flags & NFT_SET_MAP))
+			return -EINVAL;
+
+		dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
+		if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
+		    dtype != NFT_DATA_VERDICT)
+			return -EINVAL;
+
+		if (dtype != NFT_DATA_VERDICT) {
+			if (nla[NFTA_SET_DATA_LEN] == NULL)
+				return -EINVAL;
+			dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
+			if (dlen == 0 ||
+			    dlen > FIELD_SIZEOF(struct nft_data, data))
+				return -EINVAL;
+		} else
+			dlen = sizeof(struct nft_data);
+	} else if (flags & NFT_SET_MAP)
+		return -EINVAL;
+
+	create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+	afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+
+	set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]);
+	if (IS_ERR(set)) {
+		if (PTR_ERR(set) != -ENOENT)
+			return PTR_ERR(set);
+		set = NULL;
+	}
+
+	if (set != NULL) {
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
+		if (nlh->nlmsg_flags & NLM_F_REPLACE)
+			return -EOPNOTSUPP;
+		return 0;
+	}
+
+	if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+		return -ENOENT;
+
+	ops = nft_select_set_ops(nla);
+	if (IS_ERR(ops))
+		return PTR_ERR(ops);
+
+	size = 0;
+	if (ops->privsize != NULL)
+		size = ops->privsize(nla);
+
+	err = -ENOMEM;
+	set = kzalloc(sizeof(*set) + size, GFP_KERNEL);
+	if (set == NULL)
+		goto err1;
+
+	nla_strlcpy(name, nla[NFTA_SET_NAME], sizeof(set->name));
+	err = nf_tables_set_alloc_name(&ctx, set, name);
+	if (err < 0)
+		goto err2;
+
+	INIT_LIST_HEAD(&set->bindings);
+	set->ops   = ops;
+	set->ktype = ktype;
+	set->klen  = klen;
+	set->dtype = dtype;
+	set->dlen  = dlen;
+	set->flags = flags;
+
+	err = ops->init(set, nla);
+	if (err < 0)
+		goto err2;
+
+	list_add_tail(&set->list, &table->sets);
+	nf_tables_set_notify(&ctx, set, NFT_MSG_NEWSET);
+	return 0;
+
+err2:
+	kfree(set);
+err1:
+	module_put(ops->owner);
+	return err;
+}
+
+static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+{
+	list_del(&set->list);
+	if (!(set->flags & NFT_SET_ANONYMOUS))
+		nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
+
+	set->ops->destroy(set);
+	module_put(set->ops->owner);
+	kfree(set);
+}
+
+static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
+			    const struct nlmsghdr *nlh,
+			    const struct nlattr * const nla[])
+{
+	struct nft_set *set;
+	struct nft_ctx ctx;
+	int err;
+
+	if (nla[NFTA_SET_TABLE] == NULL)
+		return -EINVAL;
+
+	err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+	if (err < 0)
+		return err;
+
+	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
+	if (IS_ERR(set))
+		return PTR_ERR(set);
+	if (!list_empty(&set->bindings))
+		return -EBUSY;
+
+	nf_tables_set_destroy(&ctx, set);
+	return 0;
+}
+
+static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
+					const struct nft_set *set,
+					const struct nft_set_iter *iter,
+					const struct nft_set_elem *elem)
+{
+	enum nft_registers dreg;
+
+	dreg = nft_type_to_reg(set->dtype);
+	return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+}
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+		       struct nft_set_binding *binding)
+{
+	struct nft_set_binding *i;
+	struct nft_set_iter iter;
+
+	if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+		return -EBUSY;
+
+	if (set->flags & NFT_SET_MAP) {
+		/* If the set is already bound to the same chain all
+		 * jumps are already validated for that chain.
+		 */
+		list_for_each_entry(i, &set->bindings, list) {
+			if (i->chain == binding->chain)
+				goto bind;
+		}
+
+		iter.skip 	= 0;
+		iter.count	= 0;
+		iter.err	= 0;
+		iter.fn		= nf_tables_bind_check_setelem;
+
+		set->ops->walk(ctx, set, &iter);
+		if (iter.err < 0) {
+			/* Destroy anonymous sets if binding fails */
+			if (set->flags & NFT_SET_ANONYMOUS)
+				nf_tables_set_destroy(ctx, set);
+
+			return iter.err;
+		}
+	}
+bind:
+	binding->chain = ctx->chain;
+	list_add_tail(&binding->list, &set->bindings);
+	return 0;
+}
+
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+			  struct nft_set_binding *binding)
+{
+	list_del(&binding->list);
+
+	if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+		nf_tables_set_destroy(ctx, set);
+}
+
+/*
+ * Set elements
+ */
+
+static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
+	[NFTA_SET_ELEM_KEY]		= { .type = NLA_NESTED },
+	[NFTA_SET_ELEM_DATA]		= { .type = NLA_NESTED },
+	[NFTA_SET_ELEM_FLAGS]		= { .type = NLA_U32 },
+};
+
+static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
+	[NFTA_SET_ELEM_LIST_TABLE]	= { .type = NLA_STRING },
+	[NFTA_SET_ELEM_LIST_SET]	= { .type = NLA_STRING },
+	[NFTA_SET_ELEM_LIST_ELEMENTS]	= { .type = NLA_NESTED },
+};
+
+static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
+				      const struct sk_buff *skb,
+				      const struct nlmsghdr *nlh,
+				      const struct nlattr * const nla[])
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+	struct net *net = sock_net(skb->sk);
+
+	afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+	return 0;
+}
+
+static int nf_tables_fill_setelem(struct sk_buff *skb,
+				  const struct nft_set *set,
+				  const struct nft_set_elem *elem)
+{
+	unsigned char *b = skb_tail_pointer(skb);
+	struct nlattr *nest;
+
+	nest = nla_nest_start(skb, NFTA_LIST_ELEM);
+	if (nest == NULL)
+		goto nla_put_failure;
+
+	if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE,
+			  set->klen) < 0)
+		goto nla_put_failure;
+
+	if (set->flags & NFT_SET_MAP &&
+	    !(elem->flags & NFT_SET_ELEM_INTERVAL_END) &&
+	    nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data,
+			  set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
+			  set->dlen) < 0)
+		goto nla_put_failure;
+
+	if (elem->flags != 0)
+		if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags)))
+			goto nla_put_failure;
+
+	nla_nest_end(skb, nest);
+	return 0;
+
+nla_put_failure:
+	nlmsg_trim(skb, b);
+	return -EMSGSIZE;
+}
+
+struct nft_set_dump_args {
+	const struct netlink_callback	*cb;
+	struct nft_set_iter		iter;
+	struct sk_buff			*skb;
+};
+
+static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+				  const struct nft_set *set,
+				  const struct nft_set_iter *iter,
+				  const struct nft_set_elem *elem)
+{
+	struct nft_set_dump_args *args;
+
+	args = container_of(iter, struct nft_set_dump_args, iter);
+	return nf_tables_fill_setelem(args->skb, set, elem);
+}
+
+static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct nft_set *set;
+	struct nft_set_dump_args args;
+	struct nft_ctx ctx;
+	struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
+	struct nfgenmsg *nfmsg;
+	struct nlmsghdr *nlh;
+	struct nlattr *nest;
+	u32 portid, seq;
+	int event, err;
+
+	nfmsg = nlmsg_data(cb->nlh);
+	err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_ELEM_LIST_MAX,
+			  nft_set_elem_list_policy);
+	if (err < 0)
+		return err;
+
+	err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+	if (err < 0)
+		return err;
+
+	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+	if (IS_ERR(set))
+		return PTR_ERR(set);
+
+	event  = NFT_MSG_NEWSETELEM;
+	event |= NFNL_SUBSYS_NFTABLES << 8;
+	portid = NETLINK_CB(cb->skb).portid;
+	seq    = cb->nlh->nlmsg_seq;
+
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+			NLM_F_MULTI);
+	if (nlh == NULL)
+		goto nla_put_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family = NFPROTO_UNSPEC;
+	nfmsg->version      = NFNETLINK_V0;
+	nfmsg->res_id       = 0;
+
+	if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name))
+		goto nla_put_failure;
+	if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
+		goto nla_put_failure;
+
+	nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
+	if (nest == NULL)
+		goto nla_put_failure;
+
+	args.cb		= cb;
+	args.skb	= skb;
+	args.iter.skip	= cb->args[0];
+	args.iter.count	= 0;
+	args.iter.err   = 0;
+	args.iter.fn	= nf_tables_dump_setelem;
+	set->ops->walk(&ctx, set, &args.iter);
+
+	nla_nest_end(skb, nest);
+	nlmsg_end(skb, nlh);
+
+	if (args.iter.err && args.iter.err != -EMSGSIZE)
+		return args.iter.err;
+	if (args.iter.count == cb->args[0])
+		return 0;
+
+	cb->args[0] = args.iter.count;
+	return skb->len;
+
+nla_put_failure:
+	return -ENOSPC;
+}
+
+static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
+				const struct nlmsghdr *nlh,
+				const struct nlattr * const nla[])
+{
+	const struct nft_set *set;
+	struct nft_ctx ctx;
+	int err;
+
+	err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+	if (err < 0)
+		return err;
+
+	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+	if (IS_ERR(set))
+		return PTR_ERR(set);
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		struct netlink_dump_control c = {
+			.dump = nf_tables_dump_set,
+		};
+		return netlink_dump_start(nlsk, skb, nlh, &c);
+	}
+	return -EOPNOTSUPP;
+}
+
+static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
+			    const struct nlattr *attr)
+{
+	struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+	struct nft_data_desc d1, d2;
+	struct nft_set_elem elem;
+	struct nft_set_binding *binding;
+	enum nft_registers dreg;
+	int err;
+
+	err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
+			       nft_set_elem_policy);
+	if (err < 0)
+		return err;
+
+	if (nla[NFTA_SET_ELEM_KEY] == NULL)
+		return -EINVAL;
+
+	elem.flags = 0;
+	if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
+		elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
+		if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END)
+			return -EINVAL;
+	}
+
+	if (set->flags & NFT_SET_MAP) {
+		if (nla[NFTA_SET_ELEM_DATA] == NULL &&
+		    !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
+			return -EINVAL;
+	} else {
+		if (nla[NFTA_SET_ELEM_DATA] != NULL)
+			return -EINVAL;
+	}
+
+	err = nft_data_init(ctx, &elem.key, &d1, nla[NFTA_SET_ELEM_KEY]);
+	if (err < 0)
+		goto err1;
+	err = -EINVAL;
+	if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
+		goto err2;
+
+	err = -EEXIST;
+	if (set->ops->get(set, &elem) == 0)
+		goto err2;
+
+	if (nla[NFTA_SET_ELEM_DATA] != NULL) {
+		err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]);
+		if (err < 0)
+			goto err2;
+
+		err = -EINVAL;
+		if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen)
+			goto err3;
+
+		dreg = nft_type_to_reg(set->dtype);
+		list_for_each_entry(binding, &set->bindings, list) {
+			struct nft_ctx bind_ctx = {
+				.afi	= ctx->afi,
+				.table	= ctx->table,
+				.chain	= binding->chain,
+			};
+
+			err = nft_validate_data_load(&bind_ctx, dreg,
+						     &elem.data, d2.type);
+			if (err < 0)
+				goto err3;
+		}
+	}
+
+	err = set->ops->insert(set, &elem);
+	if (err < 0)
+		goto err3;
+
+	return 0;
+
+err3:
+	if (nla[NFTA_SET_ELEM_DATA] != NULL)
+		nft_data_uninit(&elem.data, d2.type);
+err2:
+	nft_data_uninit(&elem.key, d1.type);
+err1:
+	return err;
+}
+
+static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
+				const struct nlmsghdr *nlh,
+				const struct nlattr * const nla[])
+{
+	const struct nlattr *attr;
+	struct nft_set *set;
+	struct nft_ctx ctx;
+	int rem, err;
+
+	err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+	if (err < 0)
+		return err;
+
+	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+	if (IS_ERR(set))
+		return PTR_ERR(set);
+	if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+		return -EBUSY;
+
+	nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+		err = nft_add_set_elem(&ctx, set, attr);
+		if (err < 0)
+			return err;
+	}
+	return 0;
+}
+
+static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
+			   const struct nlattr *attr)
+{
+	struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+	struct nft_data_desc desc;
+	struct nft_set_elem elem;
+	int err;
+
+	err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
+			       nft_set_elem_policy);
+	if (err < 0)
+		goto err1;
+
+	err = -EINVAL;
+	if (nla[NFTA_SET_ELEM_KEY] == NULL)
+		goto err1;
+
+	err = nft_data_init(ctx, &elem.key, &desc, nla[NFTA_SET_ELEM_KEY]);
+	if (err < 0)
+		goto err1;
+
+	err = -EINVAL;
+	if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
+		goto err2;
+
+	err = set->ops->get(set, &elem);
+	if (err < 0)
+		goto err2;
+
+	set->ops->remove(set, &elem);
+
+	nft_data_uninit(&elem.key, NFT_DATA_VALUE);
+	if (set->flags & NFT_SET_MAP)
+		nft_data_uninit(&elem.data, set->dtype);
+
+err2:
+	nft_data_uninit(&elem.key, desc.type);
+err1:
+	return err;
+}
+
+static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
+				const struct nlmsghdr *nlh,
+				const struct nlattr * const nla[])
+{
+	const struct nlattr *attr;
+	struct nft_set *set;
+	struct nft_ctx ctx;
+	int rem, err;
+
+	err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+	if (err < 0)
+		return err;
+
+	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+	if (IS_ERR(set))
+		return PTR_ERR(set);
+	if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+		return -EBUSY;
+
+	nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+		err = nft_del_setelem(&ctx, set, attr);
+		if (err < 0)
+			return err;
+	}
+	return 0;
+}
+
+static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
+	[NFT_MSG_NEWTABLE] = {
+		.call		= nf_tables_newtable,
+		.attr_count	= NFTA_TABLE_MAX,
+		.policy		= nft_table_policy,
+	},
+	[NFT_MSG_GETTABLE] = {
+		.call		= nf_tables_gettable,
+		.attr_count	= NFTA_TABLE_MAX,
+		.policy		= nft_table_policy,
+	},
+	[NFT_MSG_DELTABLE] = {
+		.call		= nf_tables_deltable,
+		.attr_count	= NFTA_TABLE_MAX,
+		.policy		= nft_table_policy,
+	},
+	[NFT_MSG_NEWCHAIN] = {
+		.call		= nf_tables_newchain,
+		.attr_count	= NFTA_CHAIN_MAX,
+		.policy		= nft_chain_policy,
+	},
+	[NFT_MSG_GETCHAIN] = {
+		.call		= nf_tables_getchain,
+		.attr_count	= NFTA_CHAIN_MAX,
+		.policy		= nft_chain_policy,
+	},
+	[NFT_MSG_DELCHAIN] = {
+		.call		= nf_tables_delchain,
+		.attr_count	= NFTA_CHAIN_MAX,
+		.policy		= nft_chain_policy,
+	},
+	[NFT_MSG_NEWRULE] = {
+		.call_batch	= nf_tables_newrule,
+		.attr_count	= NFTA_RULE_MAX,
+		.policy		= nft_rule_policy,
+	},
+	[NFT_MSG_GETRULE] = {
+		.call		= nf_tables_getrule,
+		.attr_count	= NFTA_RULE_MAX,
+		.policy		= nft_rule_policy,
+	},
+	[NFT_MSG_DELRULE] = {
+		.call_batch	= nf_tables_delrule,
+		.attr_count	= NFTA_RULE_MAX,
+		.policy		= nft_rule_policy,
+	},
+	[NFT_MSG_NEWSET] = {
+		.call		= nf_tables_newset,
+		.attr_count	= NFTA_SET_MAX,
+		.policy		= nft_set_policy,
+	},
+	[NFT_MSG_GETSET] = {
+		.call		= nf_tables_getset,
+		.attr_count	= NFTA_SET_MAX,
+		.policy		= nft_set_policy,
+	},
+	[NFT_MSG_DELSET] = {
+		.call		= nf_tables_delset,
+		.attr_count	= NFTA_SET_MAX,
+		.policy		= nft_set_policy,
+	},
+	[NFT_MSG_NEWSETELEM] = {
+		.call		= nf_tables_newsetelem,
+		.attr_count	= NFTA_SET_ELEM_LIST_MAX,
+		.policy		= nft_set_elem_list_policy,
+	},
+	[NFT_MSG_GETSETELEM] = {
+		.call		= nf_tables_getsetelem,
+		.attr_count	= NFTA_SET_ELEM_LIST_MAX,
+		.policy		= nft_set_elem_list_policy,
+	},
+	[NFT_MSG_DELSETELEM] = {
+		.call		= nf_tables_delsetelem,
+		.attr_count	= NFTA_SET_ELEM_LIST_MAX,
+		.policy		= nft_set_elem_list_policy,
+	},
+};
+
+static const struct nfnetlink_subsystem nf_tables_subsys = {
+	.name		= "nf_tables",
+	.subsys_id	= NFNL_SUBSYS_NFTABLES,
+	.cb_count	= NFT_MSG_MAX,
+	.cb		= nf_tables_cb,
+	.commit		= nf_tables_commit,
+	.abort		= nf_tables_abort,
+};
+
+/*
+ * Loop detection - walk through the ruleset beginning at the destination chain
+ * of a new jump until either the source chain is reached (loop) or all
+ * reachable chains have been traversed.
+ *
+ * The loop check is performed whenever a new jump verdict is added to an
+ * expression or verdict map or a verdict map is bound to a new chain.
+ */
+
+static int nf_tables_check_loops(const struct nft_ctx *ctx,
+				 const struct nft_chain *chain);
+
+static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
+					const struct nft_set *set,
+					const struct nft_set_iter *iter,
+					const struct nft_set_elem *elem)
+{
+	switch (elem->data.verdict) {
+	case NFT_JUMP:
+	case NFT_GOTO:
+		return nf_tables_check_loops(ctx, elem->data.chain);
+	default:
+		return 0;
+	}
+}
+
+static int nf_tables_check_loops(const struct nft_ctx *ctx,
+				 const struct nft_chain *chain)
+{
+	const struct nft_rule *rule;
+	const struct nft_expr *expr, *last;
+	const struct nft_set *set;
+	struct nft_set_binding *binding;
+	struct nft_set_iter iter;
+
+	if (ctx->chain == chain)
+		return -ELOOP;
+
+	list_for_each_entry(rule, &chain->rules, list) {
+		nft_rule_for_each_expr(expr, last, rule) {
+			const struct nft_data *data = NULL;
+			int err;
+
+			if (!expr->ops->validate)
+				continue;
+
+			err = expr->ops->validate(ctx, expr, &data);
+			if (err < 0)
+				return err;
+
+			if (data == NULL)
+				continue;
+
+			switch (data->verdict) {
+			case NFT_JUMP:
+			case NFT_GOTO:
+				err = nf_tables_check_loops(ctx, data->chain);
+				if (err < 0)
+					return err;
+			default:
+				break;
+			}
+		}
+	}
+
+	list_for_each_entry(set, &ctx->table->sets, list) {
+		if (!(set->flags & NFT_SET_MAP) ||
+		    set->dtype != NFT_DATA_VERDICT)
+			continue;
+
+		list_for_each_entry(binding, &set->bindings, list) {
+			if (binding->chain != chain)
+				continue;
+
+			iter.skip 	= 0;
+			iter.count	= 0;
+			iter.err	= 0;
+			iter.fn		= nf_tables_loop_check_setelem;
+
+			set->ops->walk(ctx, set, &iter);
+			if (iter.err < 0)
+				return iter.err;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *	nft_validate_input_register - validate an expressions' input register
+ *
+ *	@reg: the register number
+ *
+ * 	Validate that the input register is one of the general purpose
+ * 	registers.
+ */
+int nft_validate_input_register(enum nft_registers reg)
+{
+	if (reg <= NFT_REG_VERDICT)
+		return -EINVAL;
+	if (reg > NFT_REG_MAX)
+		return -ERANGE;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nft_validate_input_register);
+
+/**
+ *	nft_validate_output_register - validate an expressions' output register
+ *
+ *	@reg: the register number
+ *
+ * 	Validate that the output register is one of the general purpose
+ * 	registers or the verdict register.
+ */
+int nft_validate_output_register(enum nft_registers reg)
+{
+	if (reg < NFT_REG_VERDICT)
+		return -EINVAL;
+	if (reg > NFT_REG_MAX)
+		return -ERANGE;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nft_validate_output_register);
+
+/**
+ *	nft_validate_data_load - validate an expressions' data load
+ *
+ *	@ctx: context of the expression performing the load
+ * 	@reg: the destination register number
+ * 	@data: the data to load
+ * 	@type: the data type
+ *
+ * 	Validate that a data load uses the appropriate data type for
+ * 	the destination register. A value of NULL for the data means
+ * 	that its runtime gathered data, which is always of type
+ * 	NFT_DATA_VALUE.
+ */
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+			   const struct nft_data *data,
+			   enum nft_data_types type)
+{
+	int err;
+
+	switch (reg) {
+	case NFT_REG_VERDICT:
+		if (data == NULL || type != NFT_DATA_VERDICT)
+			return -EINVAL;
+
+		if (data->verdict == NFT_GOTO || data->verdict == NFT_JUMP) {
+			err = nf_tables_check_loops(ctx, data->chain);
+			if (err < 0)
+				return err;
+
+			if (ctx->chain->level + 1 > data->chain->level) {
+				if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
+					return -EMLINK;
+				data->chain->level = ctx->chain->level + 1;
+			}
+		}
+
+		return 0;
+	default:
+		if (data != NULL && type != NFT_DATA_VALUE)
+			return -EINVAL;
+		return 0;
+	}
+}
+EXPORT_SYMBOL_GPL(nft_validate_data_load);
+
+static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
+	[NFTA_VERDICT_CODE]	= { .type = NLA_U32 },
+	[NFTA_VERDICT_CHAIN]	= { .type = NLA_STRING,
+				    .len = NFT_CHAIN_MAXNAMELEN - 1 },
+};
+
+static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+			    struct nft_data_desc *desc, const struct nlattr *nla)
+{
+	struct nlattr *tb[NFTA_VERDICT_MAX + 1];
+	struct nft_chain *chain;
+	int err;
+
+	err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFTA_VERDICT_CODE])
+		return -EINVAL;
+	data->verdict = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+
+	switch (data->verdict) {
+	case NF_ACCEPT:
+	case NF_DROP:
+	case NF_QUEUE:
+	case NFT_CONTINUE:
+	case NFT_BREAK:
+	case NFT_RETURN:
+		desc->len = sizeof(data->verdict);
+		break;
+	case NFT_JUMP:
+	case NFT_GOTO:
+		if (!tb[NFTA_VERDICT_CHAIN])
+			return -EINVAL;
+		chain = nf_tables_chain_lookup(ctx->table,
+					       tb[NFTA_VERDICT_CHAIN]);
+		if (IS_ERR(chain))
+			return PTR_ERR(chain);
+		if (chain->flags & NFT_BASE_CHAIN)
+			return -EOPNOTSUPP;
+
+		chain->use++;
+		data->chain = chain;
+		desc->len = sizeof(data);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	desc->type = NFT_DATA_VERDICT;
+	return 0;
+}
+
+static void nft_verdict_uninit(const struct nft_data *data)
+{
+	switch (data->verdict) {
+	case NFT_JUMP:
+	case NFT_GOTO:
+		data->chain->use--;
+		break;
+	}
+}
+
+static int nft_verdict_dump(struct sk_buff *skb, const struct nft_data *data)
+{
+	struct nlattr *nest;
+
+	nest = nla_nest_start(skb, NFTA_DATA_VERDICT);
+	if (!nest)
+		goto nla_put_failure;
+
+	if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(data->verdict)))
+		goto nla_put_failure;
+
+	switch (data->verdict) {
+	case NFT_JUMP:
+	case NFT_GOTO:
+		if (nla_put_string(skb, NFTA_VERDICT_CHAIN, data->chain->name))
+			goto nla_put_failure;
+	}
+	nla_nest_end(skb, nest);
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data,
+			  struct nft_data_desc *desc, const struct nlattr *nla)
+{
+	unsigned int len;
+
+	len = nla_len(nla);
+	if (len == 0)
+		return -EINVAL;
+	if (len > sizeof(data->data))
+		return -EOVERFLOW;
+
+	nla_memcpy(data->data, nla, sizeof(data->data));
+	desc->type = NFT_DATA_VALUE;
+	desc->len  = len;
+	return 0;
+}
+
+static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data,
+			  unsigned int len)
+{
+	return nla_put(skb, NFTA_DATA_VALUE, len, data->data);
+}
+
+static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
+	[NFTA_DATA_VALUE]	= { .type = NLA_BINARY,
+				    .len  = FIELD_SIZEOF(struct nft_data, data) },
+	[NFTA_DATA_VERDICT]	= { .type = NLA_NESTED },
+};
+
+/**
+ *	nft_data_init - parse nf_tables data netlink attributes
+ *
+ *	@ctx: context of the expression using the data
+ *	@data: destination struct nft_data
+ *	@desc: data description
+ *	@nla: netlink attribute containing data
+ *
+ *	Parse the netlink data attributes and initialize a struct nft_data.
+ *	The type and length of data are returned in the data description.
+ *
+ *	The caller can indicate that it only wants to accept data of type
+ *	NFT_DATA_VALUE by passing NULL for the ctx argument.
+ */
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+		  struct nft_data_desc *desc, const struct nlattr *nla)
+{
+	struct nlattr *tb[NFTA_DATA_MAX + 1];
+	int err;
+
+	err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy);
+	if (err < 0)
+		return err;
+
+	if (tb[NFTA_DATA_VALUE])
+		return nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]);
+	if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
+		return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(nft_data_init);
+
+/**
+ *	nft_data_uninit - release a nft_data item
+ *
+ *	@data: struct nft_data to release
+ *	@type: type of data
+ *
+ *	Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
+ *	all others need to be released by calling this function.
+ */
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
+{
+	switch (type) {
+	case NFT_DATA_VALUE:
+		return;
+	case NFT_DATA_VERDICT:
+		return nft_verdict_uninit(data);
+	default:
+		WARN_ON(1);
+	}
+}
+EXPORT_SYMBOL_GPL(nft_data_uninit);
+
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+		  enum nft_data_types type, unsigned int len)
+{
+	struct nlattr *nest;
+	int err;
+
+	nest = nla_nest_start(skb, attr);
+	if (nest == NULL)
+		return -1;
+
+	switch (type) {
+	case NFT_DATA_VALUE:
+		err = nft_value_dump(skb, data, len);
+		break;
+	case NFT_DATA_VERDICT:
+		err = nft_verdict_dump(skb, data);
+		break;
+	default:
+		err = -EINVAL;
+		WARN_ON(1);
+	}
+
+	nla_nest_end(skb, nest);
+	return err;
+}
+EXPORT_SYMBOL_GPL(nft_data_dump);
+
+static int nf_tables_init_net(struct net *net)
+{
+	INIT_LIST_HEAD(&net->nft.af_info);
+	INIT_LIST_HEAD(&net->nft.commit_list);
+	return 0;
+}
+
+static struct pernet_operations nf_tables_net_ops = {
+	.init	= nf_tables_init_net,
+};
+
+static int __init nf_tables_module_init(void)
+{
+	int err;
+
+	info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS,
+		       GFP_KERNEL);
+	if (info == NULL) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	err = nf_tables_core_module_init();
+	if (err < 0)
+		goto err2;
+
+	err = nfnetlink_subsys_register(&nf_tables_subsys);
+	if (err < 0)
+		goto err3;
+
+	pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
+	return register_pernet_subsys(&nf_tables_net_ops);
+err3:
+	nf_tables_core_module_exit();
+err2:
+	kfree(info);
+err1:
+	return err;
+}
+
+static void __exit nf_tables_module_exit(void)
+{
+	unregister_pernet_subsys(&nf_tables_net_ops);
+	nfnetlink_subsys_unregister(&nf_tables_subsys);
+	nf_tables_core_module_exit();
+	kfree(info);
+}
+
+module_init(nf_tables_module_init);
+module_exit(nf_tables_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
new file mode 100644
index 0000000..cb9e685
--- /dev/null
+++ b/net/netfilter/nf_tables_core.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
+
+static void nft_cmp_fast_eval(const struct nft_expr *expr,
+			      struct nft_data data[NFT_REG_MAX + 1])
+{
+	const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+	u32 mask;
+
+	mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len);
+	if ((data[priv->sreg].data[0] & mask) == priv->data)
+		return;
+	data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static bool nft_payload_fast_eval(const struct nft_expr *expr,
+				  struct nft_data data[NFT_REG_MAX + 1],
+				  const struct nft_pktinfo *pkt)
+{
+	const struct nft_payload *priv = nft_expr_priv(expr);
+	const struct sk_buff *skb = pkt->skb;
+	struct nft_data *dest = &data[priv->dreg];
+	unsigned char *ptr;
+
+	if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
+		ptr = skb_network_header(skb);
+	else
+		ptr = skb_network_header(skb) + pkt->xt.thoff;
+
+	ptr += priv->offset;
+
+	if (unlikely(ptr + priv->len >= skb_tail_pointer(skb)))
+		return false;
+
+	if (priv->len == 2)
+		*(u16 *)dest->data = *(u16 *)ptr;
+	else if (priv->len == 4)
+		*(u32 *)dest->data = *(u32 *)ptr;
+	else
+		*(u8 *)dest->data = *(u8 *)ptr;
+	return true;
+}
+
+struct nft_jumpstack {
+	const struct nft_chain	*chain;
+	const struct nft_rule	*rule;
+	int			rulenum;
+};
+
+static inline void
+nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
+		struct nft_jumpstack *jumpstack, unsigned int stackptr)
+{
+	struct nft_stats __percpu *stats;
+	const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
+
+	rcu_read_lock_bh();
+	stats = rcu_dereference(nft_base_chain(chain)->stats);
+	__this_cpu_inc(stats->pkts);
+	__this_cpu_add(stats->bytes, pkt->skb->len);
+	rcu_read_unlock_bh();
+}
+
+enum nft_trace {
+	NFT_TRACE_RULE,
+	NFT_TRACE_RETURN,
+	NFT_TRACE_POLICY,
+};
+
+static const char *const comments[] = {
+	[NFT_TRACE_RULE]	= "rule",
+	[NFT_TRACE_RETURN]	= "return",
+	[NFT_TRACE_POLICY]	= "policy",
+};
+
+static struct nf_loginfo trace_loginfo = {
+	.type = NF_LOG_TYPE_LOG,
+	.u = {
+		.log = {
+			.level = 4,
+			.logflags = NF_LOG_MASK,
+	        },
+	},
+};
+
+static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
+				    const struct nft_chain *chain,
+				    int rulenum, enum nft_trace type)
+{
+	struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+	nf_log_packet(net, pkt->xt.family, pkt->hooknum, pkt->skb, pkt->in,
+		      pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
+		      chain->table->name, chain->name, comments[type],
+		      rulenum);
+}
+
+unsigned int
+nft_do_chain_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+{
+	const struct nft_chain *chain = ops->priv;
+	const struct nft_rule *rule;
+	const struct nft_expr *expr, *last;
+	struct nft_data data[NFT_REG_MAX + 1];
+	unsigned int stackptr = 0;
+	struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
+	int rulenum = 0;
+	/*
+	 * Cache cursor to avoid problems in case that the cursor is updated
+	 * while traversing the ruleset.
+	 */
+	unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
+
+do_chain:
+	rule = list_entry(&chain->rules, struct nft_rule, list);
+next_rule:
+	data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+	list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
+
+		/* This rule is not active, skip. */
+		if (unlikely(rule->genmask & (1 << gencursor)))
+			continue;
+
+		rulenum++;
+
+		nft_rule_for_each_expr(expr, last, rule) {
+			if (expr->ops == &nft_cmp_fast_ops)
+				nft_cmp_fast_eval(expr, data);
+			else if (expr->ops != &nft_payload_fast_ops ||
+				 !nft_payload_fast_eval(expr, data, pkt))
+				expr->ops->eval(expr, data, pkt);
+
+			if (data[NFT_REG_VERDICT].verdict != NFT_CONTINUE)
+				break;
+		}
+
+		switch (data[NFT_REG_VERDICT].verdict) {
+		case NFT_BREAK:
+			data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+			/* fall through */
+		case NFT_CONTINUE:
+			continue;
+		}
+		break;
+	}
+
+	switch (data[NFT_REG_VERDICT].verdict) {
+	case NF_ACCEPT:
+	case NF_DROP:
+	case NF_QUEUE:
+		if (unlikely(pkt->skb->nf_trace))
+			nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
+		return data[NFT_REG_VERDICT].verdict;
+	case NFT_JUMP:
+		if (unlikely(pkt->skb->nf_trace))
+			nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
+		BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
+		jumpstack[stackptr].chain = chain;
+		jumpstack[stackptr].rule  = rule;
+		jumpstack[stackptr].rulenum = rulenum;
+		stackptr++;
+		/* fall through */
+	case NFT_GOTO:
+		chain = data[NFT_REG_VERDICT].chain;
+		goto do_chain;
+	case NFT_RETURN:
+		if (unlikely(pkt->skb->nf_trace))
+			nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
+
+		/* fall through */
+	case NFT_CONTINUE:
+		break;
+	default:
+		WARN_ON(1);
+	}
+
+	if (stackptr > 0) {
+		if (unlikely(pkt->skb->nf_trace))
+			nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
+
+		stackptr--;
+		chain = jumpstack[stackptr].chain;
+		rule  = jumpstack[stackptr].rule;
+		rulenum = jumpstack[stackptr].rulenum;
+		goto next_rule;
+	}
+	nft_chain_stats(chain, pkt, jumpstack, stackptr);
+
+	if (unlikely(pkt->skb->nf_trace))
+		nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+
+	return nft_base_chain(chain)->policy;
+}
+EXPORT_SYMBOL_GPL(nft_do_chain_pktinfo);
+
+int __init nf_tables_core_module_init(void)
+{
+	int err;
+
+	err = nft_immediate_module_init();
+	if (err < 0)
+		goto err1;
+
+	err = nft_cmp_module_init();
+	if (err < 0)
+		goto err2;
+
+	err = nft_lookup_module_init();
+	if (err < 0)
+		goto err3;
+
+	err = nft_bitwise_module_init();
+	if (err < 0)
+		goto err4;
+
+	err = nft_byteorder_module_init();
+	if (err < 0)
+		goto err5;
+
+	err = nft_payload_module_init();
+	if (err < 0)
+		goto err6;
+
+	return 0;
+
+err6:
+	nft_byteorder_module_exit();
+err5:
+	nft_bitwise_module_exit();
+err4:
+	nft_lookup_module_exit();
+err3:
+	nft_cmp_module_exit();
+err2:
+	nft_immediate_module_exit();
+err1:
+	return err;
+}
+
+void nf_tables_core_module_exit(void)
+{
+	nft_payload_module_exit();
+	nft_byteorder_module_exit();
+	nft_bitwise_module_exit();
+	nft_lookup_module_exit();
+	nft_cmp_module_exit();
+	nft_immediate_module_exit();
+}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 572d87d..046aa13 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -147,9 +147,6 @@
 	const struct nfnetlink_subsystem *ss;
 	int type, err;
 
-	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-		return -EPERM;
-
 	/* All the messages must at least contain nfgenmsg */
 	if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
 		return 0;
@@ -217,9 +214,181 @@
 	}
 }
 
+static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+				u_int16_t subsys_id)
+{
+	struct sk_buff *nskb, *oskb = skb;
+	struct net *net = sock_net(skb->sk);
+	const struct nfnetlink_subsystem *ss;
+	const struct nfnl_callback *nc;
+	bool success = true, done = false;
+	int err;
+
+	if (subsys_id >= NFNL_SUBSYS_COUNT)
+		return netlink_ack(skb, nlh, -EINVAL);
+replay:
+	nskb = netlink_skb_clone(oskb, GFP_KERNEL);
+	if (!nskb)
+		return netlink_ack(oskb, nlh, -ENOMEM);
+
+	nskb->sk = oskb->sk;
+	skb = nskb;
+
+	nfnl_lock(subsys_id);
+	ss = rcu_dereference_protected(table[subsys_id].subsys,
+				       lockdep_is_held(&table[subsys_id].mutex));
+	if (!ss) {
+#ifdef CONFIG_MODULES
+		nfnl_unlock(subsys_id);
+		request_module("nfnetlink-subsys-%d", subsys_id);
+		nfnl_lock(subsys_id);
+		ss = rcu_dereference_protected(table[subsys_id].subsys,
+					       lockdep_is_held(&table[subsys_id].mutex));
+		if (!ss)
+#endif
+		{
+			nfnl_unlock(subsys_id);
+			kfree_skb(nskb);
+			return netlink_ack(skb, nlh, -EOPNOTSUPP);
+		}
+	}
+
+	if (!ss->commit || !ss->abort) {
+		nfnl_unlock(subsys_id);
+		kfree_skb(nskb);
+		return netlink_ack(skb, nlh, -EOPNOTSUPP);
+	}
+
+	while (skb->len >= nlmsg_total_size(0)) {
+		int msglen, type;
+
+		nlh = nlmsg_hdr(skb);
+		err = 0;
+
+		if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+			err = -EINVAL;
+			goto ack;
+		}
+
+		/* Only requests are handled by the kernel */
+		if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
+			err = -EINVAL;
+			goto ack;
+		}
+
+		type = nlh->nlmsg_type;
+		if (type == NFNL_MSG_BATCH_BEGIN) {
+			/* Malformed: Batch begin twice */
+			success = false;
+			goto done;
+		} else if (type == NFNL_MSG_BATCH_END) {
+			done = true;
+			goto done;
+		} else if (type < NLMSG_MIN_TYPE) {
+			err = -EINVAL;
+			goto ack;
+		}
+
+		/* We only accept a batch with messages for the same
+		 * subsystem.
+		 */
+		if (NFNL_SUBSYS_ID(type) != subsys_id) {
+			err = -EINVAL;
+			goto ack;
+		}
+
+		nc = nfnetlink_find_client(type, ss);
+		if (!nc) {
+			err = -EINVAL;
+			goto ack;
+		}
+
+		{
+			int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
+			u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
+			struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
+			struct nlattr *attr = (void *)nlh + min_len;
+			int attrlen = nlh->nlmsg_len - min_len;
+
+			err = nla_parse(cda, ss->cb[cb_id].attr_count,
+					attr, attrlen, ss->cb[cb_id].policy);
+			if (err < 0)
+				goto ack;
+
+			if (nc->call_batch) {
+				err = nc->call_batch(net->nfnl, skb, nlh,
+						     (const struct nlattr **)cda);
+			}
+
+			/* The lock was released to autoload some module, we
+			 * have to abort and start from scratch using the
+			 * original skb.
+			 */
+			if (err == -EAGAIN) {
+				ss->abort(skb);
+				nfnl_unlock(subsys_id);
+				kfree_skb(nskb);
+				goto replay;
+			}
+		}
+ack:
+		if (nlh->nlmsg_flags & NLM_F_ACK || err) {
+			/* We don't stop processing the batch on errors, thus,
+			 * userspace gets all the errors that the batch
+			 * triggers.
+			 */
+			netlink_ack(skb, nlh, err);
+			if (err)
+				success = false;
+		}
+
+		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+		if (msglen > skb->len)
+			msglen = skb->len;
+		skb_pull(skb, msglen);
+	}
+done:
+	if (success && done)
+		ss->commit(skb);
+	else
+		ss->abort(skb);
+
+	nfnl_unlock(subsys_id);
+	kfree_skb(nskb);
+}
+
 static void nfnetlink_rcv(struct sk_buff *skb)
 {
-	netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+	struct nlmsghdr *nlh = nlmsg_hdr(skb);
+	struct net *net = sock_net(skb->sk);
+	int msglen;
+
+	if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+	    skb->len < nlh->nlmsg_len)
+		return;
+
+	if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+		netlink_ack(skb, nlh, -EPERM);
+		return;
+	}
+
+	if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) {
+		struct nfgenmsg *nfgenmsg;
+
+		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+		if (msglen > skb->len)
+			msglen = skb->len;
+
+		if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+		    skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
+			return;
+
+		nfgenmsg = nlmsg_data(nlh);
+		skb_pull(skb, msglen);
+		nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
+	} else {
+		netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+	}
 }
 
 #ifdef CONFIG_MODULES
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 5058049..476accd 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -49,10 +49,8 @@
 };
 
 static int
-ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
-			  struct nf_conntrack_l4proto *l4proto,
-			  struct net *net,
-			  const struct nlattr *attr)
+ctnl_timeout_parse_policy(void *timeouts, struct nf_conntrack_l4proto *l4proto,
+			  struct net *net, const struct nlattr *attr)
 {
 	int ret = 0;
 
@@ -64,8 +62,7 @@
 		if (ret < 0)
 			return ret;
 
-		ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net,
-							  &timeout->data);
+		ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts);
 	}
 	return ret;
 }
@@ -123,7 +120,8 @@
 				goto err_proto_put;
 			}
 
-			ret = ctnl_timeout_parse_policy(matching, l4proto, net,
+			ret = ctnl_timeout_parse_policy(&matching->data,
+							l4proto, net,
 							cda[CTA_TIMEOUT_DATA]);
 			return ret;
 		}
@@ -138,7 +136,7 @@
 		goto err_proto_put;
 	}
 
-	ret = ctnl_timeout_parse_policy(timeout, l4proto, net,
+	ret = ctnl_timeout_parse_policy(&timeout->data, l4proto, net,
 					cda[CTA_TIMEOUT_DATA]);
 	if (ret < 0)
 		goto err;
@@ -342,6 +340,147 @@
 	return ret;
 }
 
+static int
+cttimeout_default_set(struct sock *ctnl, struct sk_buff *skb,
+		      const struct nlmsghdr *nlh,
+		      const struct nlattr * const cda[])
+{
+	__u16 l3num;
+	__u8 l4num;
+	struct nf_conntrack_l4proto *l4proto;
+	struct net *net = sock_net(skb->sk);
+	unsigned int *timeouts;
+	int ret;
+
+	if (!cda[CTA_TIMEOUT_L3PROTO] ||
+	    !cda[CTA_TIMEOUT_L4PROTO] ||
+	    !cda[CTA_TIMEOUT_DATA])
+		return -EINVAL;
+
+	l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+	l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+	l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+	/* This protocol is not supported, skip. */
+	if (l4proto->l4proto != l4num) {
+		ret = -EOPNOTSUPP;
+		goto err;
+	}
+
+	timeouts = l4proto->get_timeouts(net);
+
+	ret = ctnl_timeout_parse_policy(timeouts, l4proto, net,
+					cda[CTA_TIMEOUT_DATA]);
+	if (ret < 0)
+		goto err;
+
+	nf_ct_l4proto_put(l4proto);
+	return 0;
+err:
+	nf_ct_l4proto_put(l4proto);
+	return ret;
+}
+
+static int
+cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
+			    u32 seq, u32 type, int event,
+			    struct nf_conntrack_l4proto *l4proto)
+{
+	struct nlmsghdr *nlh;
+	struct nfgenmsg *nfmsg;
+	unsigned int flags = portid ? NLM_F_MULTI : 0;
+
+	event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
+	if (nlh == NULL)
+		goto nlmsg_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family = AF_UNSPEC;
+	nfmsg->version = NFNETLINK_V0;
+	nfmsg->res_id = 0;
+
+	if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l4proto->l3proto)) ||
+	    nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
+		goto nla_put_failure;
+
+	if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
+		struct nlattr *nest_parms;
+		unsigned int *timeouts = l4proto->get_timeouts(net);
+		int ret;
+
+		nest_parms = nla_nest_start(skb,
+					    CTA_TIMEOUT_DATA | NLA_F_NESTED);
+		if (!nest_parms)
+			goto nla_put_failure;
+
+		ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
+		if (ret < 0)
+			goto nla_put_failure;
+
+		nla_nest_end(skb, nest_parms);
+	}
+
+	nlmsg_end(skb, nlh);
+	return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -1;
+}
+
+static int cttimeout_default_get(struct sock *ctnl, struct sk_buff *skb,
+				 const struct nlmsghdr *nlh,
+				 const struct nlattr * const cda[])
+{
+	__u16 l3num;
+	__u8 l4num;
+	struct nf_conntrack_l4proto *l4proto;
+	struct net *net = sock_net(skb->sk);
+	struct sk_buff *skb2;
+	int ret, err;
+
+	if (!cda[CTA_TIMEOUT_L3PROTO] || !cda[CTA_TIMEOUT_L4PROTO])
+		return -EINVAL;
+
+	l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+	l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+	l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+	/* This protocol is not supported, skip. */
+	if (l4proto->l4proto != l4num) {
+		err = -EOPNOTSUPP;
+		goto err;
+	}
+
+	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (skb2 == NULL) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	ret = cttimeout_default_fill_info(net, skb2, NETLINK_CB(skb).portid,
+					  nlh->nlmsg_seq,
+					  NFNL_MSG_TYPE(nlh->nlmsg_type),
+					  IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+					  l4proto);
+	if (ret <= 0) {
+		kfree_skb(skb2);
+		err = -ENOMEM;
+		goto err;
+	}
+	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
+	if (ret > 0)
+		ret = 0;
+
+	/* this avoids a loop in nfnetlink. */
+	return ret == -EAGAIN ? -ENOBUFS : ret;
+err:
+	nf_ct_l4proto_put(l4proto);
+	return err;
+}
+
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
 {
@@ -384,6 +523,12 @@
 	[IPCTNL_MSG_TIMEOUT_DELETE]	= { .call = cttimeout_del_timeout,
 					    .attr_count = CTA_TIMEOUT_MAX,
 					    .policy = cttimeout_nla_policy },
+	[IPCTNL_MSG_TIMEOUT_DEFAULT_SET]= { .call = cttimeout_default_set,
+					    .attr_count = CTA_TIMEOUT_MAX,
+					    .policy = cttimeout_nla_policy },
+	[IPCTNL_MSG_TIMEOUT_DEFAULT_GET]= { .call = cttimeout_default_get,
+					    .attr_count = CTA_TIMEOUT_MAX,
+					    .policy = cttimeout_nla_policy },
 };
 
 static const struct nfnetlink_subsystem cttimeout_subsys = {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index d92cc31..3c4b69e 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -319,7 +319,8 @@
 }
 
 static struct sk_buff *
-nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
+nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
+		 unsigned int pkt_size)
 {
 	struct sk_buff *skb;
 	unsigned int n;
@@ -328,13 +329,13 @@
 	 * message.  WARNING: has to be <= 128k due to slab restrictions */
 
 	n = max(inst_size, pkt_size);
-	skb = nfnetlink_alloc_skb(&init_net, n, peer_portid, GFP_ATOMIC);
+	skb = nfnetlink_alloc_skb(net, n, peer_portid, GFP_ATOMIC);
 	if (!skb) {
 		if (n > pkt_size) {
 			/* try to allocate only as much as we need for current
 			 * packet */
 
-			skb = nfnetlink_alloc_skb(&init_net, pkt_size,
+			skb = nfnetlink_alloc_skb(net, pkt_size,
 						  peer_portid, GFP_ATOMIC);
 			if (!skb)
 				pr_err("nfnetlink_log: can't even alloc %u bytes\n",
@@ -702,8 +703,8 @@
 	}
 
 	if (!inst->skb) {
-		inst->skb = nfulnl_alloc_skb(inst->peer_portid, inst->nlbufsiz,
-					     size);
+		inst->skb = nfulnl_alloc_skb(net, inst->peer_portid,
+					     inst->nlbufsiz, size);
 		if (!inst->skb)
 			goto alloc_failure;
 	}
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index ae2e5c1..21258cf 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -298,7 +298,7 @@
 }
 
 static struct sk_buff *
-nfqnl_build_packet_message(struct nfqnl_instance *queue,
+nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 			   struct nf_queue_entry *entry,
 			   __be32 **packet_id_ptr)
 {
@@ -372,7 +372,7 @@
 	if (queue->flags & NFQA_CFG_F_CONNTRACK)
 		ct = nfqnl_ct_get(entskb, &size, &ctinfo);
 
-	skb = nfnetlink_alloc_skb(&init_net, size, queue->peer_portid,
+	skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
 				  GFP_ATOMIC);
 	if (!skb)
 		return NULL;
@@ -525,7 +525,7 @@
 	__be32 *packet_id_ptr;
 	int failopen = 0;
 
-	nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
+	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
 	if (nskb == NULL) {
 		err = -ENOMEM;
 		goto err_out;
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
new file mode 100644
index 0000000..4fb6ee2
--- /dev/null
+++ b/net/netfilter/nft_bitwise.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_bitwise {
+	enum nft_registers	sreg:8;
+	enum nft_registers	dreg:8;
+	u8			len;
+	struct nft_data		mask;
+	struct nft_data		xor;
+};
+
+static void nft_bitwise_eval(const struct nft_expr *expr,
+			     struct nft_data data[NFT_REG_MAX + 1],
+			     const struct nft_pktinfo *pkt)
+{
+	const struct nft_bitwise *priv = nft_expr_priv(expr);
+	const struct nft_data *src = &data[priv->sreg];
+	struct nft_data *dst = &data[priv->dreg];
+	unsigned int i;
+
+	for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++) {
+		dst->data[i] = (src->data[i] & priv->mask.data[i]) ^
+			       priv->xor.data[i];
+	}
+}
+
+static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
+	[NFTA_BITWISE_SREG]	= { .type = NLA_U32 },
+	[NFTA_BITWISE_DREG]	= { .type = NLA_U32 },
+	[NFTA_BITWISE_LEN]	= { .type = NLA_U32 },
+	[NFTA_BITWISE_MASK]	= { .type = NLA_NESTED },
+	[NFTA_BITWISE_XOR]	= { .type = NLA_NESTED },
+};
+
+static int nft_bitwise_init(const struct nft_ctx *ctx,
+			    const struct nft_expr *expr,
+			    const struct nlattr * const tb[])
+{
+	struct nft_bitwise *priv = nft_expr_priv(expr);
+	struct nft_data_desc d1, d2;
+	int err;
+
+	if (tb[NFTA_BITWISE_SREG] == NULL ||
+	    tb[NFTA_BITWISE_DREG] == NULL ||
+	    tb[NFTA_BITWISE_LEN] == NULL ||
+	    tb[NFTA_BITWISE_MASK] == NULL ||
+	    tb[NFTA_BITWISE_XOR] == NULL)
+		return -EINVAL;
+
+	priv->sreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_SREG]));
+	err = nft_validate_input_register(priv->sreg);
+	if (err < 0)
+		return err;
+
+	priv->dreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_DREG]));
+	err = nft_validate_output_register(priv->dreg);
+	if (err < 0)
+		return err;
+	err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+	if (err < 0)
+		return err;
+
+	priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN]));
+
+	err = nft_data_init(NULL, &priv->mask, &d1, tb[NFTA_BITWISE_MASK]);
+	if (err < 0)
+		return err;
+	if (d1.len != priv->len)
+		return -EINVAL;
+
+	err = nft_data_init(NULL, &priv->xor, &d2, tb[NFTA_BITWISE_XOR]);
+	if (err < 0)
+		return err;
+	if (d2.len != priv->len)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_bitwise *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_BITWISE_SREG, htonl(priv->sreg)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_BITWISE_DREG, htonl(priv->dreg)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
+		goto nla_put_failure;
+
+	if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
+			  NFT_DATA_VALUE, priv->len) < 0)
+		goto nla_put_failure;
+
+	if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor,
+			  NFT_DATA_VALUE, priv->len) < 0)
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_bitwise_type;
+static const struct nft_expr_ops nft_bitwise_ops = {
+	.type		= &nft_bitwise_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_bitwise)),
+	.eval		= nft_bitwise_eval,
+	.init		= nft_bitwise_init,
+	.dump		= nft_bitwise_dump,
+};
+
+static struct nft_expr_type nft_bitwise_type __read_mostly = {
+	.name		= "bitwise",
+	.ops		= &nft_bitwise_ops,
+	.policy		= nft_bitwise_policy,
+	.maxattr	= NFTA_BITWISE_MAX,
+	.owner		= THIS_MODULE,
+};
+
+int __init nft_bitwise_module_init(void)
+{
+	return nft_register_expr(&nft_bitwise_type);
+}
+
+void nft_bitwise_module_exit(void)
+{
+	nft_unregister_expr(&nft_bitwise_type);
+}
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
new file mode 100644
index 0000000..c39ed8d
--- /dev/null
+++ b/net/netfilter/nft_byteorder.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_byteorder {
+	enum nft_registers	sreg:8;
+	enum nft_registers	dreg:8;
+	enum nft_byteorder_ops	op:8;
+	u8			len;
+	u8			size;
+};
+
+static void nft_byteorder_eval(const struct nft_expr *expr,
+			       struct nft_data data[NFT_REG_MAX + 1],
+			       const struct nft_pktinfo *pkt)
+{
+	const struct nft_byteorder *priv = nft_expr_priv(expr);
+	struct nft_data *src = &data[priv->sreg], *dst = &data[priv->dreg];
+	union { u32 u32; u16 u16; } *s, *d;
+	unsigned int i;
+
+	s = (void *)src->data;
+	d = (void *)dst->data;
+
+	switch (priv->size) {
+	case 4:
+		switch (priv->op) {
+		case NFT_BYTEORDER_NTOH:
+			for (i = 0; i < priv->len / 4; i++)
+				d[i].u32 = ntohl((__force __be32)s[i].u32);
+			break;
+		case NFT_BYTEORDER_HTON:
+			for (i = 0; i < priv->len / 4; i++)
+				d[i].u32 = (__force __u32)htonl(s[i].u32);
+			break;
+		}
+		break;
+	case 2:
+		switch (priv->op) {
+		case NFT_BYTEORDER_NTOH:
+			for (i = 0; i < priv->len / 2; i++)
+				d[i].u16 = ntohs((__force __be16)s[i].u16);
+			break;
+		case NFT_BYTEORDER_HTON:
+			for (i = 0; i < priv->len / 2; i++)
+				d[i].u16 = (__force __u16)htons(s[i].u16);
+			break;
+		}
+		break;
+	}
+}
+
+static const struct nla_policy nft_byteorder_policy[NFTA_BYTEORDER_MAX + 1] = {
+	[NFTA_BYTEORDER_SREG]	= { .type = NLA_U32 },
+	[NFTA_BYTEORDER_DREG]	= { .type = NLA_U32 },
+	[NFTA_BYTEORDER_OP]	= { .type = NLA_U32 },
+	[NFTA_BYTEORDER_LEN]	= { .type = NLA_U32 },
+	[NFTA_BYTEORDER_SIZE]	= { .type = NLA_U32 },
+};
+
+static int nft_byteorder_init(const struct nft_ctx *ctx,
+			      const struct nft_expr *expr,
+			      const struct nlattr * const tb[])
+{
+	struct nft_byteorder *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_BYTEORDER_SREG] == NULL ||
+	    tb[NFTA_BYTEORDER_DREG] == NULL ||
+	    tb[NFTA_BYTEORDER_LEN] == NULL ||
+	    tb[NFTA_BYTEORDER_SIZE] == NULL ||
+	    tb[NFTA_BYTEORDER_OP] == NULL)
+		return -EINVAL;
+
+	priv->sreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SREG]));
+	err = nft_validate_input_register(priv->sreg);
+	if (err < 0)
+		return err;
+
+	priv->dreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_DREG]));
+	err = nft_validate_output_register(priv->dreg);
+	if (err < 0)
+		return err;
+	err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+	if (err < 0)
+		return err;
+
+	priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP]));
+	switch (priv->op) {
+	case NFT_BYTEORDER_NTOH:
+	case NFT_BYTEORDER_HTON:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN]));
+	if (priv->len == 0 || priv->len > FIELD_SIZEOF(struct nft_data, data))
+		return -EINVAL;
+
+	priv->size = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SIZE]));
+	switch (priv->size) {
+	case 2:
+	case 4:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_byteorder *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_BYTEORDER_SREG, htonl(priv->sreg)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_BYTEORDER_DREG, htonl(priv->dreg)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_BYTEORDER_OP, htonl(priv->op)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_BYTEORDER_LEN, htonl(priv->len)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_BYTEORDER_SIZE, htonl(priv->size)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_byteorder_type;
+static const struct nft_expr_ops nft_byteorder_ops = {
+	.type		= &nft_byteorder_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_byteorder)),
+	.eval		= nft_byteorder_eval,
+	.init		= nft_byteorder_init,
+	.dump		= nft_byteorder_dump,
+};
+
+static struct nft_expr_type nft_byteorder_type __read_mostly = {
+	.name		= "byteorder",
+	.ops		= &nft_byteorder_ops,
+	.policy		= nft_byteorder_policy,
+	.maxattr	= NFTA_BYTEORDER_MAX,
+	.owner		= THIS_MODULE,
+};
+
+int __init nft_byteorder_module_init(void)
+{
+	return nft_register_expr(&nft_byteorder_type);
+}
+
+void nft_byteorder_module_exit(void)
+{
+	nft_unregister_expr(&nft_byteorder_type);
+}
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
new file mode 100644
index 0000000..954925d
--- /dev/null
+++ b/net/netfilter/nft_cmp.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_cmp_expr {
+	struct nft_data		data;
+	enum nft_registers	sreg:8;
+	u8			len;
+	enum nft_cmp_ops	op:8;
+};
+
+static void nft_cmp_eval(const struct nft_expr *expr,
+			 struct nft_data data[NFT_REG_MAX + 1],
+			 const struct nft_pktinfo *pkt)
+{
+	const struct nft_cmp_expr *priv = nft_expr_priv(expr);
+	int d;
+
+	d = nft_data_cmp(&data[priv->sreg], &priv->data, priv->len);
+	switch (priv->op) {
+	case NFT_CMP_EQ:
+		if (d != 0)
+			goto mismatch;
+		break;
+	case NFT_CMP_NEQ:
+		if (d == 0)
+			goto mismatch;
+		break;
+	case NFT_CMP_LT:
+		if (d == 0)
+			goto mismatch;
+	case NFT_CMP_LTE:
+		if (d > 0)
+			goto mismatch;
+		break;
+	case NFT_CMP_GT:
+		if (d == 0)
+			goto mismatch;
+	case NFT_CMP_GTE:
+		if (d < 0)
+			goto mismatch;
+		break;
+	}
+	return;
+
+mismatch:
+	data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
+	[NFTA_CMP_SREG]		= { .type = NLA_U32 },
+	[NFTA_CMP_OP]		= { .type = NLA_U32 },
+	[NFTA_CMP_DATA]		= { .type = NLA_NESTED },
+};
+
+static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+			const struct nlattr * const tb[])
+{
+	struct nft_cmp_expr *priv = nft_expr_priv(expr);
+	struct nft_data_desc desc;
+	int err;
+
+	priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+	priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
+
+	err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
+	BUG_ON(err < 0);
+
+	priv->len = desc.len;
+	return 0;
+}
+
+static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_cmp_expr *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
+		goto nla_put_failure;
+
+	if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
+			  NFT_DATA_VALUE, priv->len) < 0)
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_cmp_type;
+static const struct nft_expr_ops nft_cmp_ops = {
+	.type		= &nft_cmp_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
+	.eval		= nft_cmp_eval,
+	.init		= nft_cmp_init,
+	.dump		= nft_cmp_dump,
+};
+
+static int nft_cmp_fast_init(const struct nft_ctx *ctx,
+			     const struct nft_expr *expr,
+			     const struct nlattr * const tb[])
+{
+	struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+	struct nft_data_desc desc;
+	struct nft_data data;
+	u32 mask;
+	int err;
+
+	priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+
+	err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+	BUG_ON(err < 0);
+	desc.len *= BITS_PER_BYTE;
+
+	mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len);
+	priv->data = data.data[0] & mask;
+	priv->len  = desc.len;
+	return 0;
+}
+
+static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+	struct nft_data data;
+
+	if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
+		goto nla_put_failure;
+
+	data.data[0] = priv->data;
+	if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
+			  NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+const struct nft_expr_ops nft_cmp_fast_ops = {
+	.type		= &nft_cmp_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
+	.eval		= NULL,	/* inlined */
+	.init		= nft_cmp_fast_init,
+	.dump		= nft_cmp_fast_dump,
+};
+
+static const struct nft_expr_ops *
+nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
+{
+	struct nft_data_desc desc;
+	struct nft_data data;
+	enum nft_registers sreg;
+	enum nft_cmp_ops op;
+	int err;
+
+	if (tb[NFTA_CMP_SREG] == NULL ||
+	    tb[NFTA_CMP_OP] == NULL ||
+	    tb[NFTA_CMP_DATA] == NULL)
+		return ERR_PTR(-EINVAL);
+
+	sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+	err = nft_validate_input_register(sreg);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
+	switch (op) {
+	case NFT_CMP_EQ:
+	case NFT_CMP_NEQ:
+	case NFT_CMP_LT:
+	case NFT_CMP_LTE:
+	case NFT_CMP_GT:
+	case NFT_CMP_GTE:
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
+		return &nft_cmp_fast_ops;
+	else
+		return &nft_cmp_ops;
+}
+
+static struct nft_expr_type nft_cmp_type __read_mostly = {
+	.name		= "cmp",
+	.select_ops	= nft_cmp_select_ops,
+	.policy		= nft_cmp_policy,
+	.maxattr	= NFTA_CMP_MAX,
+	.owner		= THIS_MODULE,
+};
+
+int __init nft_cmp_module_init(void)
+{
+	return nft_register_expr(&nft_cmp_type);
+}
+
+void nft_cmp_module_exit(void)
+{
+	nft_unregister_expr(&nft_cmp_type);
+}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
new file mode 100644
index 0000000..a82667c
--- /dev/null
+++ b/net/netfilter/nft_compat.c
@@ -0,0 +1,768 @@
+/*
+ * (C) 2012-2013 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This software has been sponsored by Sophos Astaro <http://www.sophos.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <linux/netfilter/nf_tables_compat.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <asm/uaccess.h> /* for set_fs */
+#include <net/netfilter/nf_tables.h>
+
+union nft_entry {
+	struct ipt_entry e4;
+	struct ip6t_entry e6;
+};
+
+static inline void
+nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info)
+{
+	par->target	= xt;
+	par->targinfo	= xt_info;
+	par->hotdrop	= false;
+}
+
+static void nft_target_eval(const struct nft_expr *expr,
+			    struct nft_data data[NFT_REG_MAX + 1],
+			    const struct nft_pktinfo *pkt)
+{
+	void *info = nft_expr_priv(expr);
+	struct xt_target *target = expr->ops->data;
+	struct sk_buff *skb = pkt->skb;
+	int ret;
+
+	nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
+
+	ret = target->target(skb, &pkt->xt);
+
+	if (pkt->xt.hotdrop)
+		ret = NF_DROP;
+
+	switch(ret) {
+	case XT_CONTINUE:
+		data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+		break;
+	default:
+		data[NFT_REG_VERDICT].verdict = ret;
+		break;
+	}
+	return;
+}
+
+static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
+	[NFTA_TARGET_NAME]	= { .type = NLA_NUL_STRING },
+	[NFTA_TARGET_REV]	= { .type = NLA_U32 },
+	[NFTA_TARGET_INFO]	= { .type = NLA_BINARY },
+};
+
+static void
+nft_target_set_tgchk_param(struct xt_tgchk_param *par,
+			   const struct nft_ctx *ctx,
+			   struct xt_target *target, void *info,
+			   union nft_entry *entry, u8 proto, bool inv)
+{
+	par->net	= &init_net;
+	par->table	= ctx->table->name;
+	switch (ctx->afi->family) {
+	case AF_INET:
+		entry->e4.ip.proto = proto;
+		entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
+		break;
+	case AF_INET6:
+		entry->e6.ipv6.proto = proto;
+		entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
+		break;
+	}
+	par->entryinfo	= entry;
+	par->target	= target;
+	par->targinfo	= info;
+	if (ctx->chain->flags & NFT_BASE_CHAIN) {
+		const struct nft_base_chain *basechain =
+						nft_base_chain(ctx->chain);
+		const struct nf_hook_ops *ops = &basechain->ops;
+
+		par->hook_mask = 1 << ops->hooknum;
+	}
+	par->family	= ctx->afi->family;
+}
+
+static void target_compat_from_user(struct xt_target *t, void *in, void *out)
+{
+#ifdef CONFIG_COMPAT
+	if (t->compat_from_user) {
+		int pad;
+
+		t->compat_from_user(out, in);
+		pad = XT_ALIGN(t->targetsize) - t->targetsize;
+		if (pad > 0)
+			memset(out + t->targetsize, 0, pad);
+	} else
+#endif
+		memcpy(out, in, XT_ALIGN(t->targetsize));
+}
+
+static inline int nft_compat_target_offset(struct xt_target *target)
+{
+#ifdef CONFIG_COMPAT
+	return xt_compat_target_offset(target);
+#else
+	return 0;
+#endif
+}
+
+static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] = {
+	[NFTA_RULE_COMPAT_PROTO]	= { .type = NLA_U32 },
+	[NFTA_RULE_COMPAT_FLAGS]	= { .type = NLA_U32 },
+};
+
+static u8 nft_parse_compat(const struct nlattr *attr, bool *inv)
+{
+	struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
+	u32 flags;
+	int err;
+
+	err = nla_parse_nested(tb, NFTA_RULE_COMPAT_MAX, attr,
+			       nft_rule_compat_policy);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFTA_RULE_COMPAT_PROTO] || !tb[NFTA_RULE_COMPAT_FLAGS])
+		return -EINVAL;
+
+	flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
+	if (flags & ~NFT_RULE_COMPAT_F_MASK)
+		return -EINVAL;
+	if (flags & NFT_RULE_COMPAT_F_INV)
+		*inv = true;
+
+	return ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+}
+
+static int
+nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+		const struct nlattr * const tb[])
+{
+	void *info = nft_expr_priv(expr);
+	struct xt_target *target = expr->ops->data;
+	struct xt_tgchk_param par;
+	size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
+	u8 proto = 0;
+	bool inv = false;
+	union nft_entry e = {};
+	int ret;
+
+	target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
+
+	if (ctx->nla[NFTA_RULE_COMPAT])
+		proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv);
+
+	nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
+
+	ret = xt_check_target(&par, size, proto, inv);
+	if (ret < 0)
+		goto err;
+
+	/* The standard target cannot be used */
+	if (target->target == NULL) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return 0;
+err:
+	module_put(target->me);
+	return ret;
+}
+
+static void
+nft_target_destroy(const struct nft_expr *expr)
+{
+	struct xt_target *target = expr->ops->data;
+
+	module_put(target->me);
+}
+
+static int
+target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
+{
+	int ret;
+
+#ifdef CONFIG_COMPAT
+	if (t->compat_to_user) {
+		mm_segment_t old_fs;
+		void *out;
+
+		out = kmalloc(XT_ALIGN(t->targetsize), GFP_ATOMIC);
+		if (out == NULL)
+			return -ENOMEM;
+
+		/* We want to reuse existing compat_to_user */
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		t->compat_to_user(out, in);
+		set_fs(old_fs);
+		ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
+		kfree(out);
+	} else
+#endif
+		ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), in);
+
+	return ret;
+}
+
+static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct xt_target *target = expr->ops->data;
+	void *info = nft_expr_priv(expr);
+
+	if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
+	    nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
+	    target_dump_info(skb, target, info))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static int nft_target_validate(const struct nft_ctx *ctx,
+			       const struct nft_expr *expr,
+			       const struct nft_data **data)
+{
+	struct xt_target *target = expr->ops->data;
+	unsigned int hook_mask = 0;
+
+	if (ctx->chain->flags & NFT_BASE_CHAIN) {
+		const struct nft_base_chain *basechain =
+						nft_base_chain(ctx->chain);
+		const struct nf_hook_ops *ops = &basechain->ops;
+
+		hook_mask = 1 << ops->hooknum;
+		if (hook_mask & target->hooks)
+			return 0;
+
+		/* This target is being called from an invalid chain */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void nft_match_eval(const struct nft_expr *expr,
+			   struct nft_data data[NFT_REG_MAX + 1],
+			   const struct nft_pktinfo *pkt)
+{
+	void *info = nft_expr_priv(expr);
+	struct xt_match *match = expr->ops->data;
+	struct sk_buff *skb = pkt->skb;
+	bool ret;
+
+	nft_compat_set_par((struct xt_action_param *)&pkt->xt, match, info);
+
+	ret = match->match(skb, (struct xt_action_param *)&pkt->xt);
+
+	if (pkt->xt.hotdrop) {
+		data[NFT_REG_VERDICT].verdict = NF_DROP;
+		return;
+	}
+
+	switch(ret) {
+	case true:
+		data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+		break;
+	case false:
+		data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+		break;
+	}
+}
+
+static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
+	[NFTA_MATCH_NAME]	= { .type = NLA_NUL_STRING },
+	[NFTA_MATCH_REV]	= { .type = NLA_U32 },
+	[NFTA_MATCH_INFO]	= { .type = NLA_BINARY },
+};
+
+/* struct xt_mtchk_param and xt_tgchk_param look very similar */
+static void
+nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
+			  struct xt_match *match, void *info,
+			  union nft_entry *entry, u8 proto, bool inv)
+{
+	par->net	= &init_net;
+	par->table	= ctx->table->name;
+	switch (ctx->afi->family) {
+	case AF_INET:
+		entry->e4.ip.proto = proto;
+		entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
+		break;
+	case AF_INET6:
+		entry->e6.ipv6.proto = proto;
+		entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
+		break;
+	}
+	par->entryinfo	= entry;
+	par->match	= match;
+	par->matchinfo	= info;
+	if (ctx->chain->flags & NFT_BASE_CHAIN) {
+		const struct nft_base_chain *basechain =
+						nft_base_chain(ctx->chain);
+		const struct nf_hook_ops *ops = &basechain->ops;
+
+		par->hook_mask = 1 << ops->hooknum;
+	}
+	par->family	= ctx->afi->family;
+}
+
+static void match_compat_from_user(struct xt_match *m, void *in, void *out)
+{
+#ifdef CONFIG_COMPAT
+	if (m->compat_from_user) {
+		int pad;
+
+		m->compat_from_user(out, in);
+		pad = XT_ALIGN(m->matchsize) - m->matchsize;
+		if (pad > 0)
+			memset(out + m->matchsize, 0, pad);
+	} else
+#endif
+		memcpy(out, in, XT_ALIGN(m->matchsize));
+}
+
+static int
+nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+		const struct nlattr * const tb[])
+{
+	void *info = nft_expr_priv(expr);
+	struct xt_match *match = expr->ops->data;
+	struct xt_mtchk_param par;
+	size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
+	u8 proto = 0;
+	bool inv = false;
+	union nft_entry e = {};
+	int ret;
+
+	match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
+
+	if (ctx->nla[NFTA_RULE_COMPAT])
+		proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv);
+
+	nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
+
+	ret = xt_check_match(&par, size, proto, inv);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+err:
+	module_put(match->me);
+	return ret;
+}
+
+static void
+nft_match_destroy(const struct nft_expr *expr)
+{
+	struct xt_match *match = expr->ops->data;
+
+	module_put(match->me);
+}
+
+static int
+match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
+{
+	int ret;
+
+#ifdef CONFIG_COMPAT
+	if (m->compat_to_user) {
+		mm_segment_t old_fs;
+		void *out;
+
+		out = kmalloc(XT_ALIGN(m->matchsize), GFP_ATOMIC);
+		if (out == NULL)
+			return -ENOMEM;
+
+		/* We want to reuse existing compat_to_user */
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		m->compat_to_user(out, in);
+		set_fs(old_fs);
+		ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
+		kfree(out);
+	} else
+#endif
+		ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), in);
+
+	return ret;
+}
+
+static inline int nft_compat_match_offset(struct xt_match *match)
+{
+#ifdef CONFIG_COMPAT
+	return xt_compat_match_offset(match);
+#else
+	return 0;
+#endif
+}
+
+static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	void *info = nft_expr_priv(expr);
+	struct xt_match *match = expr->ops->data;
+
+	if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
+	    nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
+	    match_dump_info(skb, match, info))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static int nft_match_validate(const struct nft_ctx *ctx,
+			      const struct nft_expr *expr,
+			      const struct nft_data **data)
+{
+	struct xt_match *match = expr->ops->data;
+	unsigned int hook_mask = 0;
+
+	if (ctx->chain->flags & NFT_BASE_CHAIN) {
+		const struct nft_base_chain *basechain =
+						nft_base_chain(ctx->chain);
+		const struct nf_hook_ops *ops = &basechain->ops;
+
+		hook_mask = 1 << ops->hooknum;
+		if (hook_mask & match->hooks)
+			return 0;
+
+		/* This match is being called from an invalid chain */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
+		      int event, u16 family, const char *name,
+		      int rev, int target)
+{
+	struct nlmsghdr *nlh;
+	struct nfgenmsg *nfmsg;
+	unsigned int flags = portid ? NLM_F_MULTI : 0;
+
+	event |= NFNL_SUBSYS_NFT_COMPAT << 8;
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
+	if (nlh == NULL)
+		goto nlmsg_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family = family;
+	nfmsg->version = NFNETLINK_V0;
+	nfmsg->res_id = 0;
+
+	if (nla_put_string(skb, NFTA_COMPAT_NAME, name) ||
+	    nla_put_be32(skb, NFTA_COMPAT_REV, htonl(rev)) ||
+	    nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target)))
+		goto nla_put_failure;
+
+	nlmsg_end(skb, nlh);
+	return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -1;
+}
+
+static int
+nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb,
+		const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+	int ret = 0, target;
+	struct nfgenmsg *nfmsg;
+	const char *fmt;
+	const char *name;
+	u32 rev;
+	struct sk_buff *skb2;
+
+	if (tb[NFTA_COMPAT_NAME] == NULL ||
+	    tb[NFTA_COMPAT_REV] == NULL ||
+	    tb[NFTA_COMPAT_TYPE] == NULL)
+		return -EINVAL;
+
+	name = nla_data(tb[NFTA_COMPAT_NAME]);
+	rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV]));
+	target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE]));
+
+	nfmsg = nlmsg_data(nlh);
+
+	switch(nfmsg->nfgen_family) {
+	case AF_INET:
+		fmt = "ipt_%s";
+		break;
+	case AF_INET6:
+		fmt = "ip6t_%s";
+		break;
+	default:
+		pr_err("nft_compat: unsupported protocol %d\n",
+			nfmsg->nfgen_family);
+		return -EINVAL;
+	}
+
+	try_then_request_module(xt_find_revision(nfmsg->nfgen_family, name,
+						 rev, target, &ret),
+						 fmt, name);
+
+	if (ret < 0)
+		return ret;
+
+	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (skb2 == NULL)
+		return -ENOMEM;
+
+	/* include the best revision for this extension in the message */
+	if (nfnl_compat_fill_info(skb2, NETLINK_CB(skb).portid,
+				  nlh->nlmsg_seq,
+				  NFNL_MSG_TYPE(nlh->nlmsg_type),
+				  NFNL_MSG_COMPAT_GET,
+				  nfmsg->nfgen_family,
+				  name, ret, target) <= 0) {
+		kfree_skb(skb2);
+		return -ENOSPC;
+	}
+
+	ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+				MSG_DONTWAIT);
+	if (ret > 0)
+		ret = 0;
+
+	return ret == -EAGAIN ? -ENOBUFS : ret;
+}
+
+static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
+	[NFTA_COMPAT_NAME]	= { .type = NLA_NUL_STRING,
+				    .len = NFT_COMPAT_NAME_MAX-1 },
+	[NFTA_COMPAT_REV]	= { .type = NLA_U32 },
+	[NFTA_COMPAT_TYPE]	= { .type = NLA_U32 },
+};
+
+static const struct nfnl_callback nfnl_nft_compat_cb[NFNL_MSG_COMPAT_MAX] = {
+	[NFNL_MSG_COMPAT_GET]		= { .call = nfnl_compat_get,
+					    .attr_count = NFTA_COMPAT_MAX,
+					    .policy = nfnl_compat_policy_get },
+};
+
+static const struct nfnetlink_subsystem nfnl_compat_subsys = {
+	.name		= "nft-compat",
+	.subsys_id	= NFNL_SUBSYS_NFT_COMPAT,
+	.cb_count	= NFNL_MSG_COMPAT_MAX,
+	.cb		= nfnl_nft_compat_cb,
+};
+
+static LIST_HEAD(nft_match_list);
+
+struct nft_xt {
+	struct list_head	head;
+	struct nft_expr_ops	ops;
+};
+
+static struct nft_expr_type nft_match_type;
+
+static const struct nft_expr_ops *
+nft_match_select_ops(const struct nft_ctx *ctx,
+		     const struct nlattr * const tb[])
+{
+	struct nft_xt *nft_match;
+	struct xt_match *match;
+	char *mt_name;
+	__u32 rev, family;
+
+	if (tb[NFTA_MATCH_NAME] == NULL ||
+	    tb[NFTA_MATCH_REV] == NULL ||
+	    tb[NFTA_MATCH_INFO] == NULL)
+		return ERR_PTR(-EINVAL);
+
+	mt_name = nla_data(tb[NFTA_MATCH_NAME]);
+	rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
+	family = ctx->afi->family;
+
+	/* Re-use the existing match if it's already loaded. */
+	list_for_each_entry(nft_match, &nft_match_list, head) {
+		struct xt_match *match = nft_match->ops.data;
+
+		if (strcmp(match->name, mt_name) == 0 &&
+		    match->revision == rev && match->family == family)
+			return &nft_match->ops;
+	}
+
+	match = xt_request_find_match(family, mt_name, rev);
+	if (IS_ERR(match))
+		return ERR_PTR(-ENOENT);
+
+	/* This is the first time we use this match, allocate operations */
+	nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
+	if (nft_match == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	nft_match->ops.type = &nft_match_type;
+	nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize) +
+					    nft_compat_match_offset(match));
+	nft_match->ops.eval = nft_match_eval;
+	nft_match->ops.init = nft_match_init;
+	nft_match->ops.destroy = nft_match_destroy;
+	nft_match->ops.dump = nft_match_dump;
+	nft_match->ops.validate = nft_match_validate;
+	nft_match->ops.data = match;
+
+	list_add(&nft_match->head, &nft_match_list);
+
+	return &nft_match->ops;
+}
+
+static void nft_match_release(void)
+{
+	struct nft_xt *nft_match, *tmp;
+
+	list_for_each_entry_safe(nft_match, tmp, &nft_match_list, head)
+		kfree(nft_match);
+}
+
+static struct nft_expr_type nft_match_type __read_mostly = {
+	.name		= "match",
+	.select_ops	= nft_match_select_ops,
+	.policy		= nft_match_policy,
+	.maxattr	= NFTA_MATCH_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static LIST_HEAD(nft_target_list);
+
+static struct nft_expr_type nft_target_type;
+
+static const struct nft_expr_ops *
+nft_target_select_ops(const struct nft_ctx *ctx,
+		      const struct nlattr * const tb[])
+{
+	struct nft_xt *nft_target;
+	struct xt_target *target;
+	char *tg_name;
+	__u32 rev, family;
+
+	if (tb[NFTA_TARGET_NAME] == NULL ||
+	    tb[NFTA_TARGET_REV] == NULL ||
+	    tb[NFTA_TARGET_INFO] == NULL)
+		return ERR_PTR(-EINVAL);
+
+	tg_name = nla_data(tb[NFTA_TARGET_NAME]);
+	rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
+	family = ctx->afi->family;
+
+	/* Re-use the existing target if it's already loaded. */
+	list_for_each_entry(nft_target, &nft_match_list, head) {
+		struct xt_target *target = nft_target->ops.data;
+
+		if (strcmp(target->name, tg_name) == 0 &&
+		    target->revision == rev && target->family == family)
+			return &nft_target->ops;
+	}
+
+	target = xt_request_find_target(family, tg_name, rev);
+	if (IS_ERR(target))
+		return ERR_PTR(-ENOENT);
+
+	/* This is the first time we use this target, allocate operations */
+	nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
+	if (nft_target == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	nft_target->ops.type = &nft_target_type;
+	nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize) +
+					     nft_compat_target_offset(target));
+	nft_target->ops.eval = nft_target_eval;
+	nft_target->ops.init = nft_target_init;
+	nft_target->ops.destroy = nft_target_destroy;
+	nft_target->ops.dump = nft_target_dump;
+	nft_target->ops.validate = nft_target_validate;
+	nft_target->ops.data = target;
+
+	list_add(&nft_target->head, &nft_target_list);
+
+	return &nft_target->ops;
+}
+
+static void nft_target_release(void)
+{
+	struct nft_xt *nft_target, *tmp;
+
+	list_for_each_entry_safe(nft_target, tmp, &nft_target_list, head)
+		kfree(nft_target);
+}
+
+static struct nft_expr_type nft_target_type __read_mostly = {
+	.name		= "target",
+	.select_ops	= nft_target_select_ops,
+	.policy		= nft_target_policy,
+	.maxattr	= NFTA_TARGET_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_compat_module_init(void)
+{
+	int ret;
+
+	ret = nft_register_expr(&nft_match_type);
+	if (ret < 0)
+		return ret;
+
+	ret = nft_register_expr(&nft_target_type);
+	if (ret < 0)
+		goto err_match;
+
+	ret = nfnetlink_subsys_register(&nfnl_compat_subsys);
+	if (ret < 0) {
+		pr_err("nft_compat: cannot register with nfnetlink.\n");
+		goto err_target;
+	}
+
+	pr_info("nf_tables_compat: (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>\n");
+
+	return ret;
+
+err_target:
+	nft_unregister_expr(&nft_target_type);
+err_match:
+	nft_unregister_expr(&nft_match_type);
+	return ret;
+}
+
+static void __exit nft_compat_module_exit(void)
+{
+	nfnetlink_subsys_unregister(&nfnl_compat_subsys);
+	nft_unregister_expr(&nft_target_type);
+	nft_unregister_expr(&nft_match_type);
+	nft_match_release();
+	nft_target_release();
+}
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
+
+module_init(nft_compat_module_init);
+module_exit(nft_compat_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("match");
+MODULE_ALIAS_NFT_EXPR("target");
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
new file mode 100644
index 0000000..c89ee48
--- /dev/null
+++ b/net/netfilter/nft_counter.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/seqlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_counter {
+	seqlock_t	lock;
+	u64		bytes;
+	u64		packets;
+};
+
+static void nft_counter_eval(const struct nft_expr *expr,
+			     struct nft_data data[NFT_REG_MAX + 1],
+			     const struct nft_pktinfo *pkt)
+{
+	struct nft_counter *priv = nft_expr_priv(expr);
+
+	write_seqlock_bh(&priv->lock);
+	priv->bytes += pkt->skb->len;
+	priv->packets++;
+	write_sequnlock_bh(&priv->lock);
+}
+
+static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_counter *priv = nft_expr_priv(expr);
+	unsigned int seq;
+	u64 bytes;
+	u64 packets;
+
+	do {
+		seq = read_seqbegin(&priv->lock);
+		bytes	= priv->bytes;
+		packets	= priv->packets;
+	} while (read_seqretry(&priv->lock, seq));
+
+	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
+		goto nla_put_failure;
+	if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
+	[NFTA_COUNTER_PACKETS]	= { .type = NLA_U64 },
+	[NFTA_COUNTER_BYTES]	= { .type = NLA_U64 },
+};
+
+static int nft_counter_init(const struct nft_ctx *ctx,
+			    const struct nft_expr *expr,
+			    const struct nlattr * const tb[])
+{
+	struct nft_counter *priv = nft_expr_priv(expr);
+
+	if (tb[NFTA_COUNTER_PACKETS])
+	        priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+	if (tb[NFTA_COUNTER_BYTES])
+		priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+
+	seqlock_init(&priv->lock);
+	return 0;
+}
+
+static struct nft_expr_type nft_counter_type;
+static const struct nft_expr_ops nft_counter_ops = {
+	.type		= &nft_counter_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+	.eval		= nft_counter_eval,
+	.init		= nft_counter_init,
+	.dump		= nft_counter_dump,
+};
+
+static struct nft_expr_type nft_counter_type __read_mostly = {
+	.name		= "counter",
+	.ops		= &nft_counter_ops,
+	.policy		= nft_counter_policy,
+	.maxattr	= NFTA_COUNTER_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_counter_module_init(void)
+{
+	return nft_register_expr(&nft_counter_type);
+}
+
+static void __exit nft_counter_module_exit(void)
+{
+	nft_unregister_expr(&nft_counter_type);
+}
+
+module_init(nft_counter_module_init);
+module_exit(nft_counter_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("counter");
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
new file mode 100644
index 0000000..955f4e6
--- /dev/null
+++ b/net/netfilter/nft_ct.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+
+struct nft_ct {
+	enum nft_ct_keys	key:8;
+	enum ip_conntrack_dir	dir:8;
+	enum nft_registers	dreg:8;
+	uint8_t			family;
+};
+
+static void nft_ct_eval(const struct nft_expr *expr,
+			struct nft_data data[NFT_REG_MAX + 1],
+			const struct nft_pktinfo *pkt)
+{
+	const struct nft_ct *priv = nft_expr_priv(expr);
+	struct nft_data *dest = &data[priv->dreg];
+	enum ip_conntrack_info ctinfo;
+	const struct nf_conn *ct;
+	const struct nf_conn_help *help;
+	const struct nf_conntrack_tuple *tuple;
+	const struct nf_conntrack_helper *helper;
+	long diff;
+	unsigned int state;
+
+	ct = nf_ct_get(pkt->skb, &ctinfo);
+
+	switch (priv->key) {
+	case NFT_CT_STATE:
+		if (ct == NULL)
+			state = NF_CT_STATE_INVALID_BIT;
+		else if (nf_ct_is_untracked(ct))
+			state = NF_CT_STATE_UNTRACKED_BIT;
+		else
+			state = NF_CT_STATE_BIT(ctinfo);
+		dest->data[0] = state;
+		return;
+	}
+
+	if (ct == NULL)
+		goto err;
+
+	switch (priv->key) {
+	case NFT_CT_DIRECTION:
+		dest->data[0] = CTINFO2DIR(ctinfo);
+		return;
+	case NFT_CT_STATUS:
+		dest->data[0] = ct->status;
+		return;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+	case NFT_CT_MARK:
+		dest->data[0] = ct->mark;
+		return;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+	case NFT_CT_SECMARK:
+		dest->data[0] = ct->secmark;
+		return;
+#endif
+	case NFT_CT_EXPIRATION:
+		diff = (long)jiffies - (long)ct->timeout.expires;
+		if (diff < 0)
+			diff = 0;
+		dest->data[0] = jiffies_to_msecs(diff);
+		return;
+	case NFT_CT_HELPER:
+		if (ct->master == NULL)
+			goto err;
+		help = nfct_help(ct->master);
+		if (help == NULL)
+			goto err;
+		helper = rcu_dereference(help->helper);
+		if (helper == NULL)
+			goto err;
+		if (strlen(helper->name) >= sizeof(dest->data))
+			goto err;
+		strncpy((char *)dest->data, helper->name, sizeof(dest->data));
+		return;
+	}
+
+	tuple = &ct->tuplehash[priv->dir].tuple;
+	switch (priv->key) {
+	case NFT_CT_L3PROTOCOL:
+		dest->data[0] = nf_ct_l3num(ct);
+		return;
+	case NFT_CT_SRC:
+		memcpy(dest->data, tuple->src.u3.all,
+		       nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
+		return;
+	case NFT_CT_DST:
+		memcpy(dest->data, tuple->dst.u3.all,
+		       nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
+		return;
+	case NFT_CT_PROTOCOL:
+		dest->data[0] = nf_ct_protonum(ct);
+		return;
+	case NFT_CT_PROTO_SRC:
+		dest->data[0] = (__force __u16)tuple->src.u.all;
+		return;
+	case NFT_CT_PROTO_DST:
+		dest->data[0] = (__force __u16)tuple->dst.u.all;
+		return;
+	}
+	return;
+err:
+	data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = {
+	[NFTA_CT_DREG]		= { .type = NLA_U32 },
+	[NFTA_CT_KEY]		= { .type = NLA_U32 },
+	[NFTA_CT_DIRECTION]	= { .type = NLA_U8 },
+};
+
+static int nft_ct_init(const struct nft_ctx *ctx,
+		       const struct nft_expr *expr,
+		       const struct nlattr * const tb[])
+{
+	struct nft_ct *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_CT_DREG] == NULL ||
+	    tb[NFTA_CT_KEY] == NULL)
+		return -EINVAL;
+
+	priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
+	if (tb[NFTA_CT_DIRECTION] != NULL) {
+		priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
+		switch (priv->dir) {
+		case IP_CT_DIR_ORIGINAL:
+		case IP_CT_DIR_REPLY:
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	switch (priv->key) {
+	case NFT_CT_STATE:
+	case NFT_CT_DIRECTION:
+	case NFT_CT_STATUS:
+#ifdef CONFIG_NF_CONNTRACK_MARK
+	case NFT_CT_MARK:
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+	case NFT_CT_SECMARK:
+#endif
+	case NFT_CT_EXPIRATION:
+	case NFT_CT_HELPER:
+		if (tb[NFTA_CT_DIRECTION] != NULL)
+			return -EINVAL;
+		break;
+	case NFT_CT_PROTOCOL:
+	case NFT_CT_SRC:
+	case NFT_CT_DST:
+	case NFT_CT_PROTO_SRC:
+	case NFT_CT_PROTO_DST:
+		if (tb[NFTA_CT_DIRECTION] == NULL)
+			return -EINVAL;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	err = nf_ct_l3proto_try_module_get(ctx->afi->family);
+	if (err < 0)
+		return err;
+	priv->family = ctx->afi->family;
+
+	priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
+	err = nft_validate_output_register(priv->dreg);
+	if (err < 0)
+		goto err1;
+
+	err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+	if (err < 0)
+		goto err1;
+	return 0;
+
+err1:
+	nf_ct_l3proto_module_put(ctx->afi->family);
+	return err;
+}
+
+static void nft_ct_destroy(const struct nft_expr *expr)
+{
+	struct nft_ct *priv = nft_expr_priv(expr);
+
+	nf_ct_l3proto_module_put(priv->family);
+}
+
+static int nft_ct_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_ct *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_CT_DREG, htonl(priv->dreg)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
+		goto nla_put_failure;
+	if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_ct_type;
+static const struct nft_expr_ops nft_ct_ops = {
+	.type		= &nft_ct_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_ct)),
+	.eval		= nft_ct_eval,
+	.init		= nft_ct_init,
+	.destroy	= nft_ct_destroy,
+	.dump		= nft_ct_dump,
+};
+
+static struct nft_expr_type nft_ct_type __read_mostly = {
+	.name		= "ct",
+	.ops		= &nft_ct_ops,
+	.policy		= nft_ct_policy,
+	.maxattr	= NFTA_CT_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_ct_module_init(void)
+{
+	return nft_register_expr(&nft_ct_type);
+}
+
+static void __exit nft_ct_module_exit(void)
+{
+	nft_unregister_expr(&nft_ct_type);
+}
+
+module_init(nft_ct_module_init);
+module_exit(nft_ct_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("ct");
diff --git a/net/netfilter/nft_expr_template.c b/net/netfilter/nft_expr_template.c
new file mode 100644
index 0000000..b6eed4d
--- /dev/null
+++ b/net/netfilter/nft_expr_template.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_template {
+
+};
+
+static void nft_template_eval(const struct nft_expr *expr,
+			      struct nft_data data[NFT_REG_MAX + 1],
+			      const struct nft_pktinfo *pkt)
+{
+	struct nft_template *priv = nft_expr_priv(expr);
+
+}
+
+static const struct nla_policy nft_template_policy[NFTA_TEMPLATE_MAX + 1] = {
+	[NFTA_TEMPLATE_ATTR]		= { .type = NLA_U32 },
+};
+
+static int nft_template_init(const struct nft_ctx *ctx,
+			   const struct nft_expr *expr,
+			   const struct nlattr * const tb[])
+{
+	struct nft_template *priv = nft_expr_priv(expr);
+
+	return 0;
+}
+
+static void nft_template_destroy(const struct nft_ctx *ctx,
+			       const struct nft_expr *expr)
+{
+	struct nft_template *priv = nft_expr_priv(expr);
+
+}
+
+static int nft_template_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_template *priv = nft_expr_priv(expr);
+
+	NLA_PUT_BE32(skb, NFTA_TEMPLATE_ATTR, priv->field);
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_template_type;
+static const struct nft_expr_ops nft_template_ops = {
+	.type		= &nft_template_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_template)),
+	.eval		= nft_template_eval,
+	.init		= nft_template_init,
+	.destroy	= nft_template_destroy,
+	.dump		= nft_template_dump,
+};
+
+static struct nft_expr_type nft_template_type __read_mostly = {
+	.name		= "template",
+	.ops		= &nft_template_ops,
+	.policy		= nft_template_policy,
+	.maxattr	= NFTA_TEMPLATE_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_template_module_init(void)
+{
+	return nft_register_expr(&nft_template_type);
+}
+
+static void __exit nft_template_module_exit(void)
+{
+	nft_unregister_expr(&nft_template_type);
+}
+
+module_init(nft_template_module_init);
+module_exit(nft_template_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("template");
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
new file mode 100644
index 0000000..8e0bb75
--- /dev/null
+++ b/net/netfilter/nft_exthdr.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+// FIXME:
+#include <net/ipv6.h>
+
+struct nft_exthdr {
+	u8			type;
+	u8			offset;
+	u8			len;
+	enum nft_registers	dreg:8;
+};
+
+static void nft_exthdr_eval(const struct nft_expr *expr,
+			    struct nft_data data[NFT_REG_MAX + 1],
+			    const struct nft_pktinfo *pkt)
+{
+	struct nft_exthdr *priv = nft_expr_priv(expr);
+	struct nft_data *dest = &data[priv->dreg];
+	unsigned int offset;
+	int err;
+
+	err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
+	if (err < 0)
+		goto err;
+	offset += priv->offset;
+
+	if (skb_copy_bits(pkt->skb, offset, dest->data, priv->len) < 0)
+		goto err;
+	return;
+err:
+	data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
+	[NFTA_EXTHDR_DREG]		= { .type = NLA_U32 },
+	[NFTA_EXTHDR_TYPE]		= { .type = NLA_U8 },
+	[NFTA_EXTHDR_OFFSET]		= { .type = NLA_U32 },
+	[NFTA_EXTHDR_LEN]		= { .type = NLA_U32 },
+};
+
+static int nft_exthdr_init(const struct nft_ctx *ctx,
+			   const struct nft_expr *expr,
+			   const struct nlattr * const tb[])
+{
+	struct nft_exthdr *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_EXTHDR_DREG] == NULL ||
+	    tb[NFTA_EXTHDR_TYPE] == NULL ||
+	    tb[NFTA_EXTHDR_OFFSET] == NULL ||
+	    tb[NFTA_EXTHDR_LEN] == NULL)
+		return -EINVAL;
+
+	priv->type   = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
+	priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
+	priv->len    = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+	if (priv->len == 0 ||
+	    priv->len > FIELD_SIZEOF(struct nft_data, data))
+		return -EINVAL;
+
+	priv->dreg = ntohl(nla_get_be32(tb[NFTA_EXTHDR_DREG]));
+	err = nft_validate_output_register(priv->dreg);
+	if (err < 0)
+		return err;
+	return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_exthdr *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_EXTHDR_DREG, htonl(priv->dreg)))
+		goto nla_put_failure;
+	if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_exthdr_type;
+static const struct nft_expr_ops nft_exthdr_ops = {
+	.type		= &nft_exthdr_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
+	.eval		= nft_exthdr_eval,
+	.init		= nft_exthdr_init,
+	.dump		= nft_exthdr_dump,
+};
+
+static struct nft_expr_type nft_exthdr_type __read_mostly = {
+	.name		= "exthdr",
+	.ops		= &nft_exthdr_ops,
+	.policy		= nft_exthdr_policy,
+	.maxattr	= NFTA_EXTHDR_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_exthdr_module_init(void)
+{
+	return nft_register_expr(&nft_exthdr_type);
+}
+
+static void __exit nft_exthdr_module_exit(void)
+{
+	nft_unregister_expr(&nft_exthdr_type);
+}
+
+module_init(nft_exthdr_module_init);
+module_exit(nft_exthdr_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("exthdr");
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
new file mode 100644
index 0000000..3d3f8fc
--- /dev/null
+++ b/net/netfilter/nft_hash.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_hash {
+	struct hlist_head	*hash;
+	unsigned int		hsize;
+};
+
+struct nft_hash_elem {
+	struct hlist_node	hnode;
+	struct nft_data		key;
+	struct nft_data		data[];
+};
+
+static u32 nft_hash_rnd __read_mostly;
+static bool nft_hash_rnd_initted __read_mostly;
+
+static unsigned int nft_hash_data(const struct nft_data *data,
+				  unsigned int hsize, unsigned int len)
+{
+	unsigned int h;
+
+	h = jhash(data->data, len, nft_hash_rnd);
+	return ((u64)h * hsize) >> 32;
+}
+
+static bool nft_hash_lookup(const struct nft_set *set,
+			    const struct nft_data *key,
+			    struct nft_data *data)
+{
+	const struct nft_hash *priv = nft_set_priv(set);
+	const struct nft_hash_elem *he;
+	unsigned int h;
+
+	h = nft_hash_data(key, priv->hsize, set->klen);
+	hlist_for_each_entry(he, &priv->hash[h], hnode) {
+		if (nft_data_cmp(&he->key, key, set->klen))
+			continue;
+		if (set->flags & NFT_SET_MAP)
+			nft_data_copy(data, he->data);
+		return true;
+	}
+	return false;
+}
+
+static void nft_hash_elem_destroy(const struct nft_set *set,
+				  struct nft_hash_elem *he)
+{
+	nft_data_uninit(&he->key, NFT_DATA_VALUE);
+	if (set->flags & NFT_SET_MAP)
+		nft_data_uninit(he->data, set->dtype);
+	kfree(he);
+}
+
+static int nft_hash_insert(const struct nft_set *set,
+			   const struct nft_set_elem *elem)
+{
+	struct nft_hash *priv = nft_set_priv(set);
+	struct nft_hash_elem *he;
+	unsigned int size, h;
+
+	if (elem->flags != 0)
+		return -EINVAL;
+
+	size = sizeof(*he);
+	if (set->flags & NFT_SET_MAP)
+		size += sizeof(he->data[0]);
+
+	he = kzalloc(size, GFP_KERNEL);
+	if (he == NULL)
+		return -ENOMEM;
+
+	nft_data_copy(&he->key, &elem->key);
+	if (set->flags & NFT_SET_MAP)
+		nft_data_copy(he->data, &elem->data);
+
+	h = nft_hash_data(&he->key, priv->hsize, set->klen);
+	hlist_add_head_rcu(&he->hnode, &priv->hash[h]);
+	return 0;
+}
+
+static void nft_hash_remove(const struct nft_set *set,
+			    const struct nft_set_elem *elem)
+{
+	struct nft_hash_elem *he = elem->cookie;
+
+	hlist_del_rcu(&he->hnode);
+	kfree(he);
+}
+
+static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
+{
+	const struct nft_hash *priv = nft_set_priv(set);
+	struct nft_hash_elem *he;
+	unsigned int h;
+
+	h = nft_hash_data(&elem->key, priv->hsize, set->klen);
+	hlist_for_each_entry(he, &priv->hash[h], hnode) {
+		if (nft_data_cmp(&he->key, &elem->key, set->klen))
+			continue;
+
+		elem->cookie = he;
+		elem->flags  = 0;
+		if (set->flags & NFT_SET_MAP)
+			nft_data_copy(&elem->data, he->data);
+		return 0;
+	}
+	return -ENOENT;
+}
+
+static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+			  struct nft_set_iter *iter)
+{
+	const struct nft_hash *priv = nft_set_priv(set);
+	const struct nft_hash_elem *he;
+	struct nft_set_elem elem;
+	unsigned int i;
+
+	for (i = 0; i < priv->hsize; i++) {
+		hlist_for_each_entry(he, &priv->hash[i], hnode) {
+			if (iter->count < iter->skip)
+				goto cont;
+
+			memcpy(&elem.key, &he->key, sizeof(elem.key));
+			if (set->flags & NFT_SET_MAP)
+				memcpy(&elem.data, he->data, sizeof(elem.data));
+			elem.flags = 0;
+
+			iter->err = iter->fn(ctx, set, iter, &elem);
+			if (iter->err < 0)
+				return;
+cont:
+			iter->count++;
+		}
+	}
+}
+
+static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
+{
+	return sizeof(struct nft_hash);
+}
+
+static int nft_hash_init(const struct nft_set *set,
+			 const struct nlattr * const tb[])
+{
+	struct nft_hash *priv = nft_set_priv(set);
+	unsigned int cnt, i;
+
+	if (unlikely(!nft_hash_rnd_initted)) {
+		get_random_bytes(&nft_hash_rnd, 4);
+		nft_hash_rnd_initted = true;
+	}
+
+	/* Aim for a load factor of 0.75 */
+	// FIXME: temporarily broken until we have set descriptions
+	cnt = 100;
+	cnt = cnt * 4 / 3;
+
+	priv->hash = kcalloc(cnt, sizeof(struct hlist_head), GFP_KERNEL);
+	if (priv->hash == NULL)
+		return -ENOMEM;
+	priv->hsize = cnt;
+
+	for (i = 0; i < cnt; i++)
+		INIT_HLIST_HEAD(&priv->hash[i]);
+
+	return 0;
+}
+
+static void nft_hash_destroy(const struct nft_set *set)
+{
+	const struct nft_hash *priv = nft_set_priv(set);
+	const struct hlist_node *next;
+	struct nft_hash_elem *elem;
+	unsigned int i;
+
+	for (i = 0; i < priv->hsize; i++) {
+		hlist_for_each_entry_safe(elem, next, &priv->hash[i], hnode) {
+			hlist_del(&elem->hnode);
+			nft_hash_elem_destroy(set, elem);
+		}
+	}
+	kfree(priv->hash);
+}
+
+static struct nft_set_ops nft_hash_ops __read_mostly = {
+	.privsize       = nft_hash_privsize,
+	.init		= nft_hash_init,
+	.destroy	= nft_hash_destroy,
+	.get		= nft_hash_get,
+	.insert		= nft_hash_insert,
+	.remove		= nft_hash_remove,
+	.lookup		= nft_hash_lookup,
+	.walk		= nft_hash_walk,
+	.features	= NFT_SET_MAP,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_hash_module_init(void)
+{
+	return nft_register_set(&nft_hash_ops);
+}
+
+static void __exit nft_hash_module_exit(void)
+{
+	nft_unregister_set(&nft_hash_ops);
+}
+
+module_init(nft_hash_module_init);
+module_exit(nft_hash_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
new file mode 100644
index 0000000..f169501
--- /dev/null
+++ b/net/netfilter/nft_immediate.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_immediate_expr {
+	struct nft_data		data;
+	enum nft_registers	dreg:8;
+	u8			dlen;
+};
+
+static void nft_immediate_eval(const struct nft_expr *expr,
+			       struct nft_data data[NFT_REG_MAX + 1],
+			       const struct nft_pktinfo *pkt)
+{
+	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+	nft_data_copy(&data[priv->dreg], &priv->data);
+}
+
+static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
+	[NFTA_IMMEDIATE_DREG]	= { .type = NLA_U32 },
+	[NFTA_IMMEDIATE_DATA]	= { .type = NLA_NESTED },
+};
+
+static int nft_immediate_init(const struct nft_ctx *ctx,
+			      const struct nft_expr *expr,
+			      const struct nlattr * const tb[])
+{
+	struct nft_immediate_expr *priv = nft_expr_priv(expr);
+	struct nft_data_desc desc;
+	int err;
+
+	if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
+	    tb[NFTA_IMMEDIATE_DATA] == NULL)
+		return -EINVAL;
+
+	priv->dreg = ntohl(nla_get_be32(tb[NFTA_IMMEDIATE_DREG]));
+	err = nft_validate_output_register(priv->dreg);
+	if (err < 0)
+		return err;
+
+	err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
+	if (err < 0)
+		return err;
+	priv->dlen = desc.len;
+
+	err = nft_validate_data_load(ctx, priv->dreg, &priv->data, desc.type);
+	if (err < 0)
+		goto err1;
+
+	return 0;
+
+err1:
+	nft_data_uninit(&priv->data, desc.type);
+	return err;
+}
+
+static void nft_immediate_destroy(const struct nft_expr *expr)
+{
+	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+	return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg));
+}
+
+static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_IMMEDIATE_DREG, htonl(priv->dreg)))
+		goto nla_put_failure;
+
+	return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data,
+			     nft_dreg_to_type(priv->dreg), priv->dlen);
+
+nla_put_failure:
+	return -1;
+}
+
+static int nft_immediate_validate(const struct nft_ctx *ctx,
+				  const struct nft_expr *expr,
+				  const struct nft_data **data)
+{
+	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+	if (priv->dreg == NFT_REG_VERDICT)
+		*data = &priv->data;
+
+	return 0;
+}
+
+static struct nft_expr_type nft_imm_type;
+static const struct nft_expr_ops nft_imm_ops = {
+	.type		= &nft_imm_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
+	.eval		= nft_immediate_eval,
+	.init		= nft_immediate_init,
+	.destroy	= nft_immediate_destroy,
+	.dump		= nft_immediate_dump,
+	.validate	= nft_immediate_validate,
+};
+
+static struct nft_expr_type nft_imm_type __read_mostly = {
+	.name		= "immediate",
+	.ops		= &nft_imm_ops,
+	.policy		= nft_immediate_policy,
+	.maxattr	= NFTA_IMMEDIATE_MAX,
+	.owner		= THIS_MODULE,
+};
+
+int __init nft_immediate_module_init(void)
+{
+	return nft_register_expr(&nft_imm_type);
+}
+
+void nft_immediate_module_exit(void)
+{
+	nft_unregister_expr(&nft_imm_type);
+}
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
new file mode 100644
index 0000000..85da5bd
--- /dev/null
+++ b/net/netfilter/nft_limit.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+static DEFINE_SPINLOCK(limit_lock);
+
+struct nft_limit {
+	u64		tokens;
+	u64		rate;
+	u64		unit;
+	unsigned long	stamp;
+};
+
+static void nft_limit_eval(const struct nft_expr *expr,
+			   struct nft_data data[NFT_REG_MAX + 1],
+			   const struct nft_pktinfo *pkt)
+{
+	struct nft_limit *priv = nft_expr_priv(expr);
+
+	spin_lock_bh(&limit_lock);
+	if (time_after_eq(jiffies, priv->stamp)) {
+		priv->tokens = priv->rate;
+		priv->stamp = jiffies + priv->unit * HZ;
+	}
+
+	if (priv->tokens >= 1) {
+		priv->tokens--;
+		spin_unlock_bh(&limit_lock);
+		return;
+	}
+	spin_unlock_bh(&limit_lock);
+
+	data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+	[NFTA_LIMIT_RATE]	= { .type = NLA_U64 },
+	[NFTA_LIMIT_UNIT]	= { .type = NLA_U64 },
+};
+
+static int nft_limit_init(const struct nft_ctx *ctx,
+			  const struct nft_expr *expr,
+			  const struct nlattr * const tb[])
+{
+	struct nft_limit *priv = nft_expr_priv(expr);
+
+	if (tb[NFTA_LIMIT_RATE] == NULL ||
+	    tb[NFTA_LIMIT_UNIT] == NULL)
+		return -EINVAL;
+
+	priv->rate   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+	priv->unit   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+	priv->stamp  = jiffies + priv->unit * HZ;
+	priv->tokens = priv->rate;
+	return 0;
+}
+
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_limit *priv = nft_expr_priv(expr);
+
+	if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
+		goto nla_put_failure;
+	if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_limit_type;
+static const struct nft_expr_ops nft_limit_ops = {
+	.type		= &nft_limit_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_limit)),
+	.eval		= nft_limit_eval,
+	.init		= nft_limit_init,
+	.dump		= nft_limit_dump,
+};
+
+static struct nft_expr_type nft_limit_type __read_mostly = {
+	.name		= "limit",
+	.ops		= &nft_limit_ops,
+	.policy		= nft_limit_policy,
+	.maxattr	= NFTA_LIMIT_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_limit_module_init(void)
+{
+	return nft_register_expr(&nft_limit_type);
+}
+
+static void __exit nft_limit_module_exit(void)
+{
+	nft_unregister_expr(&nft_limit_type);
+}
+
+module_init(nft_limit_module_init);
+module_exit(nft_limit_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("limit");
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
new file mode 100644
index 0000000..57cad07
--- /dev/null
+++ b/net/netfilter/nft_log.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
+#include <linux/netdevice.h>
+
+static const char *nft_log_null_prefix = "";
+
+struct nft_log {
+	struct nf_loginfo	loginfo;
+	char			*prefix;
+	int			family;
+};
+
+static void nft_log_eval(const struct nft_expr *expr,
+			 struct nft_data data[NFT_REG_MAX + 1],
+			 const struct nft_pktinfo *pkt)
+{
+	const struct nft_log *priv = nft_expr_priv(expr);
+	struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+	nf_log_packet(net, priv->family, pkt->hooknum, pkt->skb, pkt->in,
+		      pkt->out, &priv->loginfo, "%s", priv->prefix);
+}
+
+static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
+	[NFTA_LOG_GROUP]	= { .type = NLA_U16 },
+	[NFTA_LOG_PREFIX]	= { .type = NLA_STRING },
+	[NFTA_LOG_SNAPLEN]	= { .type = NLA_U32 },
+	[NFTA_LOG_QTHRESHOLD]	= { .type = NLA_U16 },
+};
+
+static int nft_log_init(const struct nft_ctx *ctx,
+			const struct nft_expr *expr,
+			const struct nlattr * const tb[])
+{
+	struct nft_log *priv = nft_expr_priv(expr);
+	struct nf_loginfo *li = &priv->loginfo;
+	const struct nlattr *nla;
+
+	priv->family = ctx->afi->family;
+
+	nla = tb[NFTA_LOG_PREFIX];
+	if (nla != NULL) {
+		priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
+		if (priv->prefix == NULL)
+			return -ENOMEM;
+		nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
+	} else
+		priv->prefix = (char *)nft_log_null_prefix;
+
+	li->type = NF_LOG_TYPE_ULOG;
+	if (tb[NFTA_LOG_GROUP] != NULL)
+		li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
+
+	if (tb[NFTA_LOG_SNAPLEN] != NULL)
+		li->u.ulog.copy_len = ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
+	if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
+		li->u.ulog.qthreshold =
+			ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
+	}
+
+	return 0;
+}
+
+static void nft_log_destroy(const struct nft_expr *expr)
+{
+	struct nft_log *priv = nft_expr_priv(expr);
+
+	if (priv->prefix != nft_log_null_prefix)
+		kfree(priv->prefix);
+}
+
+static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_log *priv = nft_expr_priv(expr);
+	const struct nf_loginfo *li = &priv->loginfo;
+
+	if (priv->prefix != nft_log_null_prefix)
+		if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
+			goto nla_put_failure;
+	if (li->u.ulog.group)
+		if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
+			goto nla_put_failure;
+	if (li->u.ulog.copy_len)
+		if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
+				 htonl(li->u.ulog.copy_len)))
+			goto nla_put_failure;
+	if (li->u.ulog.qthreshold)
+		if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
+				 htons(li->u.ulog.qthreshold)))
+			goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_log_type;
+static const struct nft_expr_ops nft_log_ops = {
+	.type		= &nft_log_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_log)),
+	.eval		= nft_log_eval,
+	.init		= nft_log_init,
+	.destroy	= nft_log_destroy,
+	.dump		= nft_log_dump,
+};
+
+static struct nft_expr_type nft_log_type __read_mostly = {
+	.name		= "log",
+	.ops		= &nft_log_ops,
+	.policy		= nft_log_policy,
+	.maxattr	= NFTA_LOG_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_log_module_init(void)
+{
+	return nft_register_expr(&nft_log_type);
+}
+
+static void __exit nft_log_module_exit(void)
+{
+	nft_unregister_expr(&nft_log_type);
+}
+
+module_init(nft_log_module_init);
+module_exit(nft_log_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("log");
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
new file mode 100644
index 0000000..8a6116b
--- /dev/null
+++ b/net/netfilter/nft_lookup.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_lookup {
+	struct nft_set			*set;
+	enum nft_registers		sreg:8;
+	enum nft_registers		dreg:8;
+	struct nft_set_binding		binding;
+};
+
+static void nft_lookup_eval(const struct nft_expr *expr,
+			    struct nft_data data[NFT_REG_MAX + 1],
+			    const struct nft_pktinfo *pkt)
+{
+	const struct nft_lookup *priv = nft_expr_priv(expr);
+	const struct nft_set *set = priv->set;
+
+	if (set->ops->lookup(set, &data[priv->sreg], &data[priv->dreg]))
+		return;
+	data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
+	[NFTA_LOOKUP_SET]	= { .type = NLA_STRING },
+	[NFTA_LOOKUP_SREG]	= { .type = NLA_U32 },
+	[NFTA_LOOKUP_DREG]	= { .type = NLA_U32 },
+};
+
+static int nft_lookup_init(const struct nft_ctx *ctx,
+			   const struct nft_expr *expr,
+			   const struct nlattr * const tb[])
+{
+	struct nft_lookup *priv = nft_expr_priv(expr);
+	struct nft_set *set;
+	int err;
+
+	if (tb[NFTA_LOOKUP_SET] == NULL ||
+	    tb[NFTA_LOOKUP_SREG] == NULL)
+		return -EINVAL;
+
+	set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
+	if (IS_ERR(set))
+		return PTR_ERR(set);
+
+	priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
+	err = nft_validate_input_register(priv->sreg);
+	if (err < 0)
+		return err;
+
+	if (tb[NFTA_LOOKUP_DREG] != NULL) {
+		if (!(set->flags & NFT_SET_MAP))
+			return -EINVAL;
+
+		priv->dreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_DREG]));
+		err = nft_validate_output_register(priv->dreg);
+		if (err < 0)
+			return err;
+
+		if (priv->dreg == NFT_REG_VERDICT) {
+			if (set->dtype != NFT_DATA_VERDICT)
+				return -EINVAL;
+		} else if (set->dtype == NFT_DATA_VERDICT)
+			return -EINVAL;
+	} else if (set->flags & NFT_SET_MAP)
+		return -EINVAL;
+
+	err = nf_tables_bind_set(ctx, set, &priv->binding);
+	if (err < 0)
+		return err;
+
+	priv->set = set;
+	return 0;
+}
+
+static void nft_lookup_destroy(const struct nft_expr *expr)
+{
+	struct nft_lookup *priv = nft_expr_priv(expr);
+
+	nf_tables_unbind_set(NULL, priv->set, &priv->binding);
+}
+
+static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_lookup *priv = nft_expr_priv(expr);
+
+	if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_LOOKUP_SREG, htonl(priv->sreg)))
+		goto nla_put_failure;
+	if (priv->set->flags & NFT_SET_MAP)
+		if (nla_put_be32(skb, NFTA_LOOKUP_DREG, htonl(priv->dreg)))
+			goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_lookup_type;
+static const struct nft_expr_ops nft_lookup_ops = {
+	.type		= &nft_lookup_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
+	.eval		= nft_lookup_eval,
+	.init		= nft_lookup_init,
+	.destroy	= nft_lookup_destroy,
+	.dump		= nft_lookup_dump,
+};
+
+static struct nft_expr_type nft_lookup_type __read_mostly = {
+	.name		= "lookup",
+	.ops		= &nft_lookup_ops,
+	.policy		= nft_lookup_policy,
+	.maxattr	= NFTA_LOOKUP_MAX,
+	.owner		= THIS_MODULE,
+};
+
+int __init nft_lookup_module_init(void)
+{
+	return nft_register_expr(&nft_lookup_type);
+}
+
+void nft_lookup_module_exit(void)
+{
+	nft_unregister_expr(&nft_lookup_type);
+}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
new file mode 100644
index 0000000..8c28220
--- /dev/null
+++ b/net/netfilter/nft_meta.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+	enum nft_meta_keys	key:8;
+	enum nft_registers	dreg:8;
+};
+
+static void nft_meta_eval(const struct nft_expr *expr,
+			  struct nft_data data[NFT_REG_MAX + 1],
+			  const struct nft_pktinfo *pkt)
+{
+	const struct nft_meta *priv = nft_expr_priv(expr);
+	const struct sk_buff *skb = pkt->skb;
+	const struct net_device *in = pkt->in, *out = pkt->out;
+	struct nft_data *dest = &data[priv->dreg];
+
+	switch (priv->key) {
+	case NFT_META_LEN:
+		dest->data[0] = skb->len;
+		break;
+	case NFT_META_PROTOCOL:
+		*(__be16 *)dest->data = skb->protocol;
+		break;
+	case NFT_META_PRIORITY:
+		dest->data[0] = skb->priority;
+		break;
+	case NFT_META_MARK:
+		dest->data[0] = skb->mark;
+		break;
+	case NFT_META_IIF:
+		if (in == NULL)
+			goto err;
+		dest->data[0] = in->ifindex;
+		break;
+	case NFT_META_OIF:
+		if (out == NULL)
+			goto err;
+		dest->data[0] = out->ifindex;
+		break;
+	case NFT_META_IIFNAME:
+		if (in == NULL)
+			goto err;
+		strncpy((char *)dest->data, in->name, sizeof(dest->data));
+		break;
+	case NFT_META_OIFNAME:
+		if (out == NULL)
+			goto err;
+		strncpy((char *)dest->data, out->name, sizeof(dest->data));
+		break;
+	case NFT_META_IIFTYPE:
+		if (in == NULL)
+			goto err;
+		*(u16 *)dest->data = in->type;
+		break;
+	case NFT_META_OIFTYPE:
+		if (out == NULL)
+			goto err;
+		*(u16 *)dest->data = out->type;
+		break;
+	case NFT_META_SKUID:
+		if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+			goto err;
+
+		read_lock_bh(&skb->sk->sk_callback_lock);
+		if (skb->sk->sk_socket == NULL ||
+		    skb->sk->sk_socket->file == NULL) {
+			read_unlock_bh(&skb->sk->sk_callback_lock);
+			goto err;
+		}
+
+		dest->data[0] =
+			from_kuid_munged(&init_user_ns,
+				skb->sk->sk_socket->file->f_cred->fsuid);
+		read_unlock_bh(&skb->sk->sk_callback_lock);
+		break;
+	case NFT_META_SKGID:
+		if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+			goto err;
+
+		read_lock_bh(&skb->sk->sk_callback_lock);
+		if (skb->sk->sk_socket == NULL ||
+		    skb->sk->sk_socket->file == NULL) {
+			read_unlock_bh(&skb->sk->sk_callback_lock);
+			goto err;
+		}
+		dest->data[0] =
+			from_kgid_munged(&init_user_ns,
+				 skb->sk->sk_socket->file->f_cred->fsgid);
+		read_unlock_bh(&skb->sk->sk_callback_lock);
+		break;
+#ifdef CONFIG_NET_CLS_ROUTE
+	case NFT_META_RTCLASSID: {
+		const struct dst_entry *dst = skb_dst(skb);
+
+		if (dst == NULL)
+			goto err;
+		dest->data[0] = dst->tclassid;
+		break;
+	}
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+	case NFT_META_SECMARK:
+		dest->data[0] = skb->secmark;
+		break;
+#endif
+	default:
+		WARN_ON(1);
+		goto err;
+	}
+	return;
+
+err:
+	data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+	[NFTA_META_DREG]	= { .type = NLA_U32 },
+	[NFTA_META_KEY]		= { .type = NLA_U32 },
+};
+
+static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+			 const struct nlattr * const tb[])
+{
+	struct nft_meta *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_META_DREG] == NULL ||
+	    tb[NFTA_META_KEY] == NULL)
+		return -EINVAL;
+
+	priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+	switch (priv->key) {
+	case NFT_META_LEN:
+	case NFT_META_PROTOCOL:
+	case NFT_META_PRIORITY:
+	case NFT_META_MARK:
+	case NFT_META_IIF:
+	case NFT_META_OIF:
+	case NFT_META_IIFNAME:
+	case NFT_META_OIFNAME:
+	case NFT_META_IIFTYPE:
+	case NFT_META_OIFTYPE:
+	case NFT_META_SKUID:
+	case NFT_META_SKGID:
+#ifdef CONFIG_NET_CLS_ROUTE
+	case NFT_META_RTCLASSID:
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+	case NFT_META_SECMARK:
+#endif
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+	err = nft_validate_output_register(priv->dreg);
+	if (err < 0)
+		return err;
+	return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_meta *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_META_DREG, htonl(priv->dreg)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_meta_type;
+static const struct nft_expr_ops nft_meta_ops = {
+	.type		= &nft_meta_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+	.eval		= nft_meta_eval,
+	.init		= nft_meta_init,
+	.dump		= nft_meta_dump,
+};
+
+static struct nft_expr_type nft_meta_type __read_mostly = {
+	.name		= "meta",
+	.ops		= &nft_meta_ops,
+	.policy		= nft_meta_policy,
+	.maxattr	= NFTA_META_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_meta_module_init(void)
+{
+	return nft_register_expr(&nft_meta_type);
+}
+
+static void __exit nft_meta_module_exit(void)
+{
+	nft_unregister_expr(&nft_meta_type);
+}
+
+module_init(nft_meta_module_init);
+module_exit(nft_meta_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_meta_target.c b/net/netfilter/nft_meta_target.c
new file mode 100644
index 0000000..71177df
--- /dev/null
+++ b/net/netfilter/nft_meta_target.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+	enum nft_meta_keys	key;
+};
+
+static void nft_meta_eval(const struct nft_expr *expr,
+			  struct nft_data *nfres,
+			  struct nft_data *data,
+			  const struct nft_pktinfo *pkt)
+{
+	const struct nft_meta *meta = nft_expr_priv(expr);
+	struct sk_buff *skb = pkt->skb;
+	u32 val = data->data[0];
+
+	switch (meta->key) {
+	case NFT_META_MARK:
+		skb->mark = val;
+		break;
+	case NFT_META_PRIORITY:
+		skb->priority = val;
+		break;
+	case NFT_META_NFTRACE:
+		skb->nf_trace = val;
+		break;
+#ifdef CONFIG_NETWORK_SECMARK
+	case NFT_META_SECMARK:
+		skb->secmark = val;
+		break;
+#endif
+	default:
+		WARN_ON(1);
+	}
+}
+
+static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+	[NFTA_META_KEY]		= { .type = NLA_U32 },
+};
+
+static int nft_meta_init(const struct nft_expr *expr, struct nlattr *tb[])
+{
+	struct nft_meta *meta = nft_expr_priv(expr);
+
+	if (tb[NFTA_META_KEY] == NULL)
+		return -EINVAL;
+
+	meta->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+	switch (meta->key) {
+	case NFT_META_MARK:
+	case NFT_META_PRIORITY:
+	case NFT_META_NFTRACE:
+#ifdef CONFIG_NETWORK_SECMARK
+	case NFT_META_SECMARK:
+#endif
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_meta *meta = nft_expr_priv(expr);
+
+	NLA_PUT_BE32(skb, NFTA_META_KEY, htonl(meta->key));
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_ops meta_target __read_mostly = {
+	.name		= "meta",
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+	.owner		= THIS_MODULE,
+	.eval		= nft_meta_eval,
+	.init		= nft_meta_init,
+	.dump		= nft_meta_dump,
+	.policy		= nft_meta_policy,
+	.maxattr	= NFTA_META_MAX,
+};
+
+static int __init nft_meta_target_init(void)
+{
+	return nft_register_expr(&meta_target);
+}
+
+static void __exit nft_meta_target_exit(void)
+{
+	nft_unregister_expr(&meta_target);
+}
+
+module_init(nft_meta_target_init);
+module_exit(nft_meta_target_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
new file mode 100644
index 0000000..d3b1ffe
--- /dev/null
+++ b/net/netfilter/nft_nat.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ip.h>
+
+struct nft_nat {
+	enum nft_registers      sreg_addr_min:8;
+	enum nft_registers      sreg_addr_max:8;
+	enum nft_registers      sreg_proto_min:8;
+	enum nft_registers      sreg_proto_max:8;
+	int                     family;
+	enum nf_nat_manip_type  type;
+};
+
+static void nft_nat_eval(const struct nft_expr *expr,
+			 struct nft_data data[NFT_REG_MAX + 1],
+			 const struct nft_pktinfo *pkt)
+{
+	const struct nft_nat *priv = nft_expr_priv(expr);
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo);
+	struct nf_nat_range range;
+
+	memset(&range, 0, sizeof(range));
+	if (priv->sreg_addr_min) {
+		if (priv->family == AF_INET) {
+			range.min_addr.ip = (__force __be32)
+					data[priv->sreg_addr_min].data[0];
+			range.max_addr.ip = (__force __be32)
+					data[priv->sreg_addr_max].data[0];
+
+		} else {
+			memcpy(range.min_addr.ip6,
+			       data[priv->sreg_addr_min].data,
+			       sizeof(struct nft_data));
+			memcpy(range.max_addr.ip6,
+			       data[priv->sreg_addr_max].data,
+			       sizeof(struct nft_data));
+		}
+		range.flags |= NF_NAT_RANGE_MAP_IPS;
+	}
+
+	if (priv->sreg_proto_min) {
+		range.min_proto.all = (__force __be16)
+					data[priv->sreg_proto_min].data[0];
+		range.max_proto.all = (__force __be16)
+					data[priv->sreg_proto_max].data[0];
+		range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+	}
+
+	data[NFT_REG_VERDICT].verdict =
+		nf_nat_setup_info(ct, &range, priv->type);
+}
+
+static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
+	[NFTA_NAT_TYPE]		 = { .type = NLA_U32 },
+	[NFTA_NAT_FAMILY]	 = { .type = NLA_U32 },
+	[NFTA_NAT_REG_ADDR_MIN]	 = { .type = NLA_U32 },
+	[NFTA_NAT_REG_ADDR_MAX]	 = { .type = NLA_U32 },
+	[NFTA_NAT_REG_PROTO_MIN] = { .type = NLA_U32 },
+	[NFTA_NAT_REG_PROTO_MAX] = { .type = NLA_U32 },
+};
+
+static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+			const struct nlattr * const tb[])
+{
+	struct nft_nat *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_NAT_TYPE] == NULL)
+		return -EINVAL;
+
+	switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) {
+	case NFT_NAT_SNAT:
+		priv->type = NF_NAT_MANIP_SRC;
+		break;
+	case NFT_NAT_DNAT:
+		priv->type = NF_NAT_MANIP_DST;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (tb[NFTA_NAT_FAMILY] == NULL)
+		return -EINVAL;
+
+	priv->family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
+	if (priv->family != AF_INET && priv->family != AF_INET6)
+		return -EINVAL;
+
+	if (tb[NFTA_NAT_REG_ADDR_MIN]) {
+		priv->sreg_addr_min = ntohl(nla_get_be32(
+						tb[NFTA_NAT_REG_ADDR_MIN]));
+		err = nft_validate_input_register(priv->sreg_addr_min);
+		if (err < 0)
+			return err;
+	}
+
+	if (tb[NFTA_NAT_REG_ADDR_MAX]) {
+		priv->sreg_addr_max = ntohl(nla_get_be32(
+						tb[NFTA_NAT_REG_ADDR_MAX]));
+		err = nft_validate_input_register(priv->sreg_addr_max);
+		if (err < 0)
+			return err;
+	} else
+		priv->sreg_addr_max = priv->sreg_addr_min;
+
+	if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+		priv->sreg_proto_min = ntohl(nla_get_be32(
+						tb[NFTA_NAT_REG_PROTO_MIN]));
+		err = nft_validate_input_register(priv->sreg_proto_min);
+		if (err < 0)
+			return err;
+	}
+
+	if (tb[NFTA_NAT_REG_PROTO_MAX]) {
+		priv->sreg_proto_max = ntohl(nla_get_be32(
+						tb[NFTA_NAT_REG_PROTO_MAX]));
+		err = nft_validate_input_register(priv->sreg_proto_max);
+		if (err < 0)
+			return err;
+	} else
+		priv->sreg_proto_max = priv->sreg_proto_min;
+
+	return 0;
+}
+
+static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_nat *priv = nft_expr_priv(expr);
+
+	switch (priv->type) {
+	case NF_NAT_MANIP_SRC:
+		if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_SNAT)))
+			goto nla_put_failure;
+		break;
+	case NF_NAT_MANIP_DST:
+		if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_DNAT)))
+			goto nla_put_failure;
+		break;
+	}
+
+	if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb,
+			 NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb,
+			 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb,
+			 NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min)))
+		goto nla_put_failure;
+	if (nla_put_be32(skb,
+			 NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_nat_type;
+static const struct nft_expr_ops nft_nat_ops = {
+	.type           = &nft_nat_type,
+	.size           = NFT_EXPR_SIZE(sizeof(struct nft_nat)),
+	.eval           = nft_nat_eval,
+	.init           = nft_nat_init,
+	.dump           = nft_nat_dump,
+};
+
+static struct nft_expr_type nft_nat_type __read_mostly = {
+	.name           = "nat",
+	.ops            = &nft_nat_ops,
+	.policy         = nft_nat_policy,
+	.maxattr        = NFTA_NAT_MAX,
+	.owner          = THIS_MODULE,
+};
+
+static int __init nft_nat_module_init(void)
+{
+	int err;
+
+	err = nft_register_expr(&nft_nat_type);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static void __exit nft_nat_module_exit(void)
+{
+	nft_unregister_expr(&nft_nat_type);
+}
+
+module_init(nft_nat_module_init);
+module_exit(nft_nat_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_EXPR("nat");
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
new file mode 100644
index 0000000..a2aeb31
--- /dev/null
+++ b/net/netfilter/nft_payload.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+static void nft_payload_eval(const struct nft_expr *expr,
+			     struct nft_data data[NFT_REG_MAX + 1],
+			     const struct nft_pktinfo *pkt)
+{
+	const struct nft_payload *priv = nft_expr_priv(expr);
+	const struct sk_buff *skb = pkt->skb;
+	struct nft_data *dest = &data[priv->dreg];
+	int offset;
+
+	switch (priv->base) {
+	case NFT_PAYLOAD_LL_HEADER:
+		if (!skb_mac_header_was_set(skb))
+			goto err;
+		offset = skb_mac_header(skb) - skb->data;
+		break;
+	case NFT_PAYLOAD_NETWORK_HEADER:
+		offset = skb_network_offset(skb);
+		break;
+	case NFT_PAYLOAD_TRANSPORT_HEADER:
+		offset = pkt->xt.thoff;
+		break;
+	default:
+		BUG();
+	}
+	offset += priv->offset;
+
+	if (skb_copy_bits(skb, offset, dest->data, priv->len) < 0)
+		goto err;
+	return;
+err:
+	data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
+	[NFTA_PAYLOAD_DREG]	= { .type = NLA_U32 },
+	[NFTA_PAYLOAD_BASE]	= { .type = NLA_U32 },
+	[NFTA_PAYLOAD_OFFSET]	= { .type = NLA_U32 },
+	[NFTA_PAYLOAD_LEN]	= { .type = NLA_U32 },
+};
+
+static int nft_payload_init(const struct nft_ctx *ctx,
+			    const struct nft_expr *expr,
+			    const struct nlattr * const tb[])
+{
+	struct nft_payload *priv = nft_expr_priv(expr);
+	int err;
+
+	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+
+	priv->dreg = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_DREG]));
+	err = nft_validate_output_register(priv->dreg);
+	if (err < 0)
+		return err;
+	return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_payload *priv = nft_expr_priv(expr);
+
+	if (nla_put_be32(skb, NFTA_PAYLOAD_DREG, htonl(priv->dreg)) ||
+	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
+	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
+	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_payload_type;
+static const struct nft_expr_ops nft_payload_ops = {
+	.type		= &nft_payload_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+	.eval		= nft_payload_eval,
+	.init		= nft_payload_init,
+	.dump		= nft_payload_dump,
+};
+
+const struct nft_expr_ops nft_payload_fast_ops = {
+	.type		= &nft_payload_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+	.eval		= nft_payload_eval,
+	.init		= nft_payload_init,
+	.dump		= nft_payload_dump,
+};
+
+static const struct nft_expr_ops *
+nft_payload_select_ops(const struct nft_ctx *ctx,
+		       const struct nlattr * const tb[])
+{
+	enum nft_payload_bases base;
+	unsigned int offset, len;
+
+	if (tb[NFTA_PAYLOAD_DREG] == NULL ||
+	    tb[NFTA_PAYLOAD_BASE] == NULL ||
+	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
+	    tb[NFTA_PAYLOAD_LEN] == NULL)
+		return ERR_PTR(-EINVAL);
+
+	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+	switch (base) {
+	case NFT_PAYLOAD_LL_HEADER:
+	case NFT_PAYLOAD_NETWORK_HEADER:
+	case NFT_PAYLOAD_TRANSPORT_HEADER:
+		break;
+	default:
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+	len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+	if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
+		return ERR_PTR(-EINVAL);
+
+	if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER)
+		return &nft_payload_fast_ops;
+	else
+		return &nft_payload_ops;
+}
+
+static struct nft_expr_type nft_payload_type __read_mostly = {
+	.name		= "payload",
+	.select_ops	= nft_payload_select_ops,
+	.policy		= nft_payload_policy,
+	.maxattr	= NFTA_PAYLOAD_MAX,
+	.owner		= THIS_MODULE,
+};
+
+int __init nft_payload_module_init(void)
+{
+	return nft_register_expr(&nft_payload_type);
+}
+
+void nft_payload_module_exit(void)
+{
+	nft_unregister_expr(&nft_payload_type);
+}
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
new file mode 100644
index 0000000..ca0c1b2
--- /dev/null
+++ b/net/netfilter/nft_rbtree.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_rbtree {
+	struct rb_root		root;
+};
+
+struct nft_rbtree_elem {
+	struct rb_node		node;
+	u16			flags;
+	struct nft_data		key;
+	struct nft_data		data[];
+};
+
+static bool nft_rbtree_lookup(const struct nft_set *set,
+			      const struct nft_data *key,
+			      struct nft_data *data)
+{
+	const struct nft_rbtree *priv = nft_set_priv(set);
+	const struct nft_rbtree_elem *rbe, *interval = NULL;
+	const struct rb_node *parent = priv->root.rb_node;
+	int d;
+
+	while (parent != NULL) {
+		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+		d = nft_data_cmp(&rbe->key, key, set->klen);
+		if (d < 0) {
+			parent = parent->rb_left;
+			interval = rbe;
+		} else if (d > 0)
+			parent = parent->rb_right;
+		else {
+found:
+			if (rbe->flags & NFT_SET_ELEM_INTERVAL_END)
+				goto out;
+			if (set->flags & NFT_SET_MAP)
+				nft_data_copy(data, rbe->data);
+			return true;
+		}
+	}
+
+	if (set->flags & NFT_SET_INTERVAL && interval != NULL) {
+		rbe = interval;
+		goto found;
+	}
+out:
+	return false;
+}
+
+static void nft_rbtree_elem_destroy(const struct nft_set *set,
+				    struct nft_rbtree_elem *rbe)
+{
+	nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
+	if (set->flags & NFT_SET_MAP)
+		nft_data_uninit(rbe->data, set->dtype);
+	kfree(rbe);
+}
+
+static int __nft_rbtree_insert(const struct nft_set *set,
+			       struct nft_rbtree_elem *new)
+{
+	struct nft_rbtree *priv = nft_set_priv(set);
+	struct nft_rbtree_elem *rbe;
+	struct rb_node *parent, **p;
+	int d;
+
+	parent = NULL;
+	p = &priv->root.rb_node;
+	while (*p != NULL) {
+		parent = *p;
+		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+		d = nft_data_cmp(&rbe->key, &new->key, set->klen);
+		if (d < 0)
+			p = &parent->rb_left;
+		else if (d > 0)
+			p = &parent->rb_right;
+		else
+			return -EEXIST;
+	}
+	rb_link_node(&new->node, parent, p);
+	rb_insert_color(&new->node, &priv->root);
+	return 0;
+}
+
+static int nft_rbtree_insert(const struct nft_set *set,
+			     const struct nft_set_elem *elem)
+{
+	struct nft_rbtree_elem *rbe;
+	unsigned int size;
+	int err;
+
+	size = sizeof(*rbe);
+	if (set->flags & NFT_SET_MAP)
+		size += sizeof(rbe->data[0]);
+
+	rbe = kzalloc(size, GFP_KERNEL);
+	if (rbe == NULL)
+		return -ENOMEM;
+
+	rbe->flags = elem->flags;
+	nft_data_copy(&rbe->key, &elem->key);
+	if (set->flags & NFT_SET_MAP)
+		nft_data_copy(rbe->data, &elem->data);
+
+	err = __nft_rbtree_insert(set, rbe);
+	if (err < 0)
+		kfree(rbe);
+	return err;
+}
+
+static void nft_rbtree_remove(const struct nft_set *set,
+			      const struct nft_set_elem *elem)
+{
+	struct nft_rbtree *priv = nft_set_priv(set);
+	struct nft_rbtree_elem *rbe = elem->cookie;
+
+	rb_erase(&rbe->node, &priv->root);
+	kfree(rbe);
+}
+
+static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
+{
+	const struct nft_rbtree *priv = nft_set_priv(set);
+	const struct rb_node *parent = priv->root.rb_node;
+	struct nft_rbtree_elem *rbe;
+	int d;
+
+	while (parent != NULL) {
+		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+		d = nft_data_cmp(&rbe->key, &elem->key, set->klen);
+		if (d < 0)
+			parent = parent->rb_left;
+		else if (d > 0)
+			parent = parent->rb_right;
+		else {
+			elem->cookie = rbe;
+			if (set->flags & NFT_SET_MAP)
+				nft_data_copy(&elem->data, rbe->data);
+			elem->flags = rbe->flags;
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+static void nft_rbtree_walk(const struct nft_ctx *ctx,
+			    const struct nft_set *set,
+			    struct nft_set_iter *iter)
+{
+	const struct nft_rbtree *priv = nft_set_priv(set);
+	const struct nft_rbtree_elem *rbe;
+	struct nft_set_elem elem;
+	struct rb_node *node;
+
+	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+		if (iter->count < iter->skip)
+			goto cont;
+
+		rbe = rb_entry(node, struct nft_rbtree_elem, node);
+		nft_data_copy(&elem.key, &rbe->key);
+		if (set->flags & NFT_SET_MAP)
+			nft_data_copy(&elem.data, rbe->data);
+		elem.flags = rbe->flags;
+
+		iter->err = iter->fn(ctx, set, iter, &elem);
+		if (iter->err < 0)
+			return;
+cont:
+		iter->count++;
+	}
+}
+
+static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
+{
+	return sizeof(struct nft_rbtree);
+}
+
+static int nft_rbtree_init(const struct nft_set *set,
+			   const struct nlattr * const nla[])
+{
+	struct nft_rbtree *priv = nft_set_priv(set);
+
+	priv->root = RB_ROOT;
+	return 0;
+}
+
+static void nft_rbtree_destroy(const struct nft_set *set)
+{
+	struct nft_rbtree *priv = nft_set_priv(set);
+	struct nft_rbtree_elem *rbe;
+	struct rb_node *node;
+
+	while ((node = priv->root.rb_node) != NULL) {
+		rb_erase(node, &priv->root);
+		rbe = rb_entry(node, struct nft_rbtree_elem, node);
+		nft_rbtree_elem_destroy(set, rbe);
+	}
+}
+
+static struct nft_set_ops nft_rbtree_ops __read_mostly = {
+	.privsize	= nft_rbtree_privsize,
+	.init		= nft_rbtree_init,
+	.destroy	= nft_rbtree_destroy,
+	.insert		= nft_rbtree_insert,
+	.remove		= nft_rbtree_remove,
+	.get		= nft_rbtree_get,
+	.lookup		= nft_rbtree_lookup,
+	.walk		= nft_rbtree_walk,
+	.features	= NFT_SET_INTERVAL | NFT_SET_MAP,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_rbtree_module_init(void)
+{
+	return nft_register_set(&nft_rbtree_ops);
+}
+
+static void __exit nft_rbtree_module_exit(void)
+{
+	nft_unregister_set(&nft_rbtree_ops);
+}
+
+module_init(nft_rbtree_module_init);
+module_exit(nft_rbtree_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index cd24290..e762de5 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -43,10 +43,42 @@
 		return opt[offset+1];
 }
 
+static u_int32_t tcpmss_reverse_mtu(struct net *net,
+				    const struct sk_buff *skb,
+				    unsigned int family)
+{
+	struct flowi fl;
+	const struct nf_afinfo *ai;
+	struct rtable *rt = NULL;
+	u_int32_t mtu     = ~0U;
+
+	if (family == PF_INET) {
+		struct flowi4 *fl4 = &fl.u.ip4;
+		memset(fl4, 0, sizeof(*fl4));
+		fl4->daddr = ip_hdr(skb)->saddr;
+	} else {
+		struct flowi6 *fl6 = &fl.u.ip6;
+
+		memset(fl6, 0, sizeof(*fl6));
+		fl6->daddr = ipv6_hdr(skb)->saddr;
+	}
+	rcu_read_lock();
+	ai = nf_get_afinfo(family);
+	if (ai != NULL)
+		ai->route(net, (struct dst_entry **)&rt, &fl, false);
+	rcu_read_unlock();
+
+	if (rt != NULL) {
+		mtu = dst_mtu(&rt->dst);
+		dst_release(&rt->dst);
+	}
+	return mtu;
+}
+
 static int
 tcpmss_mangle_packet(struct sk_buff *skb,
 		     const struct xt_action_param *par,
-		     unsigned int in_mtu,
+		     unsigned int family,
 		     unsigned int tcphoff,
 		     unsigned int minlen)
 {
@@ -76,6 +108,9 @@
 		return -1;
 
 	if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+		struct net *net = dev_net(par->in ? par->in : par->out);
+		unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
+
 		if (dst_mtu(skb_dst(skb)) <= minlen) {
 			net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
 					    dst_mtu(skb_dst(skb)));
@@ -165,37 +200,6 @@
 	return TCPOLEN_MSS;
 }
 
-static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
-				    unsigned int family)
-{
-	struct flowi fl;
-	const struct nf_afinfo *ai;
-	struct rtable *rt = NULL;
-	u_int32_t mtu     = ~0U;
-
-	if (family == PF_INET) {
-		struct flowi4 *fl4 = &fl.u.ip4;
-		memset(fl4, 0, sizeof(*fl4));
-		fl4->daddr = ip_hdr(skb)->saddr;
-	} else {
-		struct flowi6 *fl6 = &fl.u.ip6;
-
-		memset(fl6, 0, sizeof(*fl6));
-		fl6->daddr = ipv6_hdr(skb)->saddr;
-	}
-	rcu_read_lock();
-	ai = nf_get_afinfo(family);
-	if (ai != NULL)
-		ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
-	rcu_read_unlock();
-
-	if (rt != NULL) {
-		mtu = dst_mtu(&rt->dst);
-		dst_release(&rt->dst);
-	}
-	return mtu;
-}
-
 static unsigned int
 tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
@@ -204,7 +208,7 @@
 	int ret;
 
 	ret = tcpmss_mangle_packet(skb, par,
-				   tcpmss_reverse_mtu(skb, PF_INET),
+				   PF_INET,
 				   iph->ihl * 4,
 				   sizeof(*iph) + sizeof(struct tcphdr));
 	if (ret < 0)
@@ -233,7 +237,7 @@
 	if (tcphoff < 0)
 		return NF_DROP;
 	ret = tcpmss_mangle_packet(skb, par,
-				   tcpmss_reverse_mtu(skb, PF_INET6),
+				   PF_INET6,
 				   tcphoff,
 				   sizeof(*ipv6h) + sizeof(struct tcphdr));
 	if (ret < 0)
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 5d8a3a3..ef8a926 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -200,7 +200,7 @@
 				     in->ifindex);
 		if (sk) {
 			int connected = (sk->sk_state == TCP_ESTABLISHED);
-			int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+			int wildcard = ipv6_addr_any(&sk->sk_v6_rcv_saddr);
 
 			/* NOTE: we return listeners even if bound to
 			 * 0.0.0.0, those are filtered out in
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index e595e07..1e63461 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -26,16 +26,18 @@
 	u_int64_t what = 0;	/* initialize to make gcc happy */
 	u_int64_t bytes = 0;
 	u_int64_t pkts = 0;
+	const struct nf_conn_acct *acct;
 	const struct nf_conn_counter *counters;
 
 	ct = nf_ct_get(skb, &ctinfo);
 	if (!ct)
 		return false;
 
-	counters = nf_conn_acct_find(ct);
-	if (!counters)
+	acct = nf_conn_acct_find(ct);
+	if (!acct)
 		return false;
 
+	counters = acct->counter;
 	switch (sinfo->what) {
 	case XT_CONNBYTES_PKTS:
 		switch (sinfo->direction) {
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 31790e7..80c2e2d 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -81,17 +81,17 @@
 	struct xt_set_info_match_v0 *info = par->matchinfo;
 	ip_set_id_t index;
 
-	index = ip_set_nfnl_get_byindex(info->match_set.index);
+	index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
 
 	if (index == IPSET_INVALID_ID) {
-		pr_warning("Cannot find set indentified by id %u to match\n",
+		pr_warning("Cannot find set identified by id %u to match\n",
 			   info->match_set.index);
 		return -ENOENT;
 	}
 	if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
 		pr_warning("Protocol error: set match dimension "
 			   "is over the limit!\n");
-		ip_set_nfnl_put(info->match_set.index);
+		ip_set_nfnl_put(par->net, info->match_set.index);
 		return -ERANGE;
 	}
 
@@ -106,81 +106,10 @@
 {
 	struct xt_set_info_match_v0 *info = par->matchinfo;
 
-	ip_set_nfnl_put(info->match_set.index);
+	ip_set_nfnl_put(par->net, info->match_set.index);
 }
 
-static unsigned int
-set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
-{
-	const struct xt_set_info_target_v0 *info = par->targinfo;
-	ADT_OPT(add_opt, par->family, info->add_set.u.compat.dim,
-		info->add_set.u.compat.flags, 0, UINT_MAX);
-	ADT_OPT(del_opt, par->family, info->del_set.u.compat.dim,
-		info->del_set.u.compat.flags, 0, UINT_MAX);
-
-	if (info->add_set.index != IPSET_INVALID_ID)
-		ip_set_add(info->add_set.index, skb, par, &add_opt);
-	if (info->del_set.index != IPSET_INVALID_ID)
-		ip_set_del(info->del_set.index, skb, par, &del_opt);
-
-	return XT_CONTINUE;
-}
-
-static int
-set_target_v0_checkentry(const struct xt_tgchk_param *par)
-{
-	struct xt_set_info_target_v0 *info = par->targinfo;
-	ip_set_id_t index;
-
-	if (info->add_set.index != IPSET_INVALID_ID) {
-		index = ip_set_nfnl_get_byindex(info->add_set.index);
-		if (index == IPSET_INVALID_ID) {
-			pr_warning("Cannot find add_set index %u as target\n",
-				   info->add_set.index);
-			return -ENOENT;
-		}
-	}
-
-	if (info->del_set.index != IPSET_INVALID_ID) {
-		index = ip_set_nfnl_get_byindex(info->del_set.index);
-		if (index == IPSET_INVALID_ID) {
-			pr_warning("Cannot find del_set index %u as target\n",
-				   info->del_set.index);
-			if (info->add_set.index != IPSET_INVALID_ID)
-				ip_set_nfnl_put(info->add_set.index);
-			return -ENOENT;
-		}
-	}
-	if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
-	    info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
-		pr_warning("Protocol error: SET target dimension "
-			   "is over the limit!\n");
-		if (info->add_set.index != IPSET_INVALID_ID)
-			ip_set_nfnl_put(info->add_set.index);
-		if (info->del_set.index != IPSET_INVALID_ID)
-			ip_set_nfnl_put(info->del_set.index);
-		return -ERANGE;
-	}
-
-	/* Fill out compatibility data */
-	compat_flags(&info->add_set);
-	compat_flags(&info->del_set);
-
-	return 0;
-}
-
-static void
-set_target_v0_destroy(const struct xt_tgdtor_param *par)
-{
-	const struct xt_set_info_target_v0 *info = par->targinfo;
-
-	if (info->add_set.index != IPSET_INVALID_ID)
-		ip_set_nfnl_put(info->add_set.index);
-	if (info->del_set.index != IPSET_INVALID_ID)
-		ip_set_nfnl_put(info->del_set.index);
-}
-
-/* Revision 1 match and target */
+/* Revision 1 match */
 
 static bool
 set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
@@ -202,17 +131,17 @@
 	struct xt_set_info_match_v1 *info = par->matchinfo;
 	ip_set_id_t index;
 
-	index = ip_set_nfnl_get_byindex(info->match_set.index);
+	index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
 
 	if (index == IPSET_INVALID_ID) {
-		pr_warning("Cannot find set indentified by id %u to match\n",
+		pr_warning("Cannot find set identified by id %u to match\n",
 			   info->match_set.index);
 		return -ENOENT;
 	}
 	if (info->match_set.dim > IPSET_DIM_MAX) {
 		pr_warning("Protocol error: set match dimension "
 			   "is over the limit!\n");
-		ip_set_nfnl_put(info->match_set.index);
+		ip_set_nfnl_put(par->net, info->match_set.index);
 		return -ERANGE;
 	}
 
@@ -224,102 +153,9 @@
 {
 	struct xt_set_info_match_v1 *info = par->matchinfo;
 
-	ip_set_nfnl_put(info->match_set.index);
+	ip_set_nfnl_put(par->net, info->match_set.index);
 }
 
-static unsigned int
-set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
-{
-	const struct xt_set_info_target_v1 *info = par->targinfo;
-	ADT_OPT(add_opt, par->family, info->add_set.dim,
-		info->add_set.flags, 0, UINT_MAX);
-	ADT_OPT(del_opt, par->family, info->del_set.dim,
-		info->del_set.flags, 0, UINT_MAX);
-
-	if (info->add_set.index != IPSET_INVALID_ID)
-		ip_set_add(info->add_set.index, skb, par, &add_opt);
-	if (info->del_set.index != IPSET_INVALID_ID)
-		ip_set_del(info->del_set.index, skb, par, &del_opt);
-
-	return XT_CONTINUE;
-}
-
-static int
-set_target_v1_checkentry(const struct xt_tgchk_param *par)
-{
-	const struct xt_set_info_target_v1 *info = par->targinfo;
-	ip_set_id_t index;
-
-	if (info->add_set.index != IPSET_INVALID_ID) {
-		index = ip_set_nfnl_get_byindex(info->add_set.index);
-		if (index == IPSET_INVALID_ID) {
-			pr_warning("Cannot find add_set index %u as target\n",
-				   info->add_set.index);
-			return -ENOENT;
-		}
-	}
-
-	if (info->del_set.index != IPSET_INVALID_ID) {
-		index = ip_set_nfnl_get_byindex(info->del_set.index);
-		if (index == IPSET_INVALID_ID) {
-			pr_warning("Cannot find del_set index %u as target\n",
-				   info->del_set.index);
-			if (info->add_set.index != IPSET_INVALID_ID)
-				ip_set_nfnl_put(info->add_set.index);
-			return -ENOENT;
-		}
-	}
-	if (info->add_set.dim > IPSET_DIM_MAX ||
-	    info->del_set.dim > IPSET_DIM_MAX) {
-		pr_warning("Protocol error: SET target dimension "
-			   "is over the limit!\n");
-		if (info->add_set.index != IPSET_INVALID_ID)
-			ip_set_nfnl_put(info->add_set.index);
-		if (info->del_set.index != IPSET_INVALID_ID)
-			ip_set_nfnl_put(info->del_set.index);
-		return -ERANGE;
-	}
-
-	return 0;
-}
-
-static void
-set_target_v1_destroy(const struct xt_tgdtor_param *par)
-{
-	const struct xt_set_info_target_v1 *info = par->targinfo;
-
-	if (info->add_set.index != IPSET_INVALID_ID)
-		ip_set_nfnl_put(info->add_set.index);
-	if (info->del_set.index != IPSET_INVALID_ID)
-		ip_set_nfnl_put(info->del_set.index);
-}
-
-/* Revision 2 target */
-
-static unsigned int
-set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
-{
-	const struct xt_set_info_target_v2 *info = par->targinfo;
-	ADT_OPT(add_opt, par->family, info->add_set.dim,
-		info->add_set.flags, info->flags, info->timeout);
-	ADT_OPT(del_opt, par->family, info->del_set.dim,
-		info->del_set.flags, 0, UINT_MAX);
-
-	/* Normalize to fit into jiffies */
-	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
-	    add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC)
-		add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC;
-	if (info->add_set.index != IPSET_INVALID_ID)
-		ip_set_add(info->add_set.index, skb, par, &add_opt);
-	if (info->del_set.index != IPSET_INVALID_ID)
-		ip_set_del(info->del_set.index, skb, par, &del_opt);
-
-	return XT_CONTINUE;
-}
-
-#define set_target_v2_checkentry	set_target_v1_checkentry
-#define set_target_v2_destroy		set_target_v1_destroy
-
 /* Revision 3 match */
 
 static bool
@@ -366,6 +202,174 @@
 #define set_match_v3_checkentry	set_match_v1_checkentry
 #define set_match_v3_destroy	set_match_v1_destroy
 
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+static unsigned int
+set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_set_info_target_v0 *info = par->targinfo;
+	ADT_OPT(add_opt, par->family, info->add_set.u.compat.dim,
+		info->add_set.u.compat.flags, 0, UINT_MAX);
+	ADT_OPT(del_opt, par->family, info->del_set.u.compat.dim,
+		info->del_set.u.compat.flags, 0, UINT_MAX);
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_add(info->add_set.index, skb, par, &add_opt);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_del(info->del_set.index, skb, par, &del_opt);
+
+	return XT_CONTINUE;
+}
+
+static int
+set_target_v0_checkentry(const struct xt_tgchk_param *par)
+{
+	struct xt_set_info_target_v0 *info = par->targinfo;
+	ip_set_id_t index;
+
+	if (info->add_set.index != IPSET_INVALID_ID) {
+		index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
+		if (index == IPSET_INVALID_ID) {
+			pr_warning("Cannot find add_set index %u as target\n",
+				   info->add_set.index);
+			return -ENOENT;
+		}
+	}
+
+	if (info->del_set.index != IPSET_INVALID_ID) {
+		index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
+		if (index == IPSET_INVALID_ID) {
+			pr_warning("Cannot find del_set index %u as target\n",
+				   info->del_set.index);
+			if (info->add_set.index != IPSET_INVALID_ID)
+				ip_set_nfnl_put(par->net, info->add_set.index);
+			return -ENOENT;
+		}
+	}
+	if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
+	    info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+		pr_warning("Protocol error: SET target dimension "
+			   "is over the limit!\n");
+		if (info->add_set.index != IPSET_INVALID_ID)
+			ip_set_nfnl_put(par->net, info->add_set.index);
+		if (info->del_set.index != IPSET_INVALID_ID)
+			ip_set_nfnl_put(par->net, info->del_set.index);
+		return -ERANGE;
+	}
+
+	/* Fill out compatibility data */
+	compat_flags(&info->add_set);
+	compat_flags(&info->del_set);
+
+	return 0;
+}
+
+static void
+set_target_v0_destroy(const struct xt_tgdtor_param *par)
+{
+	const struct xt_set_info_target_v0 *info = par->targinfo;
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_nfnl_put(par->net, info->add_set.index);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_nfnl_put(par->net, info->del_set.index);
+}
+
+/* Revision 1 target */
+
+static unsigned int
+set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_set_info_target_v1 *info = par->targinfo;
+	ADT_OPT(add_opt, par->family, info->add_set.dim,
+		info->add_set.flags, 0, UINT_MAX);
+	ADT_OPT(del_opt, par->family, info->del_set.dim,
+		info->del_set.flags, 0, UINT_MAX);
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_add(info->add_set.index, skb, par, &add_opt);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_del(info->del_set.index, skb, par, &del_opt);
+
+	return XT_CONTINUE;
+}
+
+static int
+set_target_v1_checkentry(const struct xt_tgchk_param *par)
+{
+	const struct xt_set_info_target_v1 *info = par->targinfo;
+	ip_set_id_t index;
+
+	if (info->add_set.index != IPSET_INVALID_ID) {
+		index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
+		if (index == IPSET_INVALID_ID) {
+			pr_warning("Cannot find add_set index %u as target\n",
+				   info->add_set.index);
+			return -ENOENT;
+		}
+	}
+
+	if (info->del_set.index != IPSET_INVALID_ID) {
+		index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
+		if (index == IPSET_INVALID_ID) {
+			pr_warning("Cannot find del_set index %u as target\n",
+				   info->del_set.index);
+			if (info->add_set.index != IPSET_INVALID_ID)
+				ip_set_nfnl_put(par->net, info->add_set.index);
+			return -ENOENT;
+		}
+	}
+	if (info->add_set.dim > IPSET_DIM_MAX ||
+	    info->del_set.dim > IPSET_DIM_MAX) {
+		pr_warning("Protocol error: SET target dimension "
+			   "is over the limit!\n");
+		if (info->add_set.index != IPSET_INVALID_ID)
+			ip_set_nfnl_put(par->net, info->add_set.index);
+		if (info->del_set.index != IPSET_INVALID_ID)
+			ip_set_nfnl_put(par->net, info->del_set.index);
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static void
+set_target_v1_destroy(const struct xt_tgdtor_param *par)
+{
+	const struct xt_set_info_target_v1 *info = par->targinfo;
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_nfnl_put(par->net, info->add_set.index);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_nfnl_put(par->net, info->del_set.index);
+}
+
+/* Revision 2 target */
+
+static unsigned int
+set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_set_info_target_v2 *info = par->targinfo;
+	ADT_OPT(add_opt, par->family, info->add_set.dim,
+		info->add_set.flags, info->flags, info->timeout);
+	ADT_OPT(del_opt, par->family, info->del_set.dim,
+		info->del_set.flags, 0, UINT_MAX);
+
+	/* Normalize to fit into jiffies */
+	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
+	    add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC)
+		add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC;
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_add(info->add_set.index, skb, par, &add_opt);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_del(info->del_set.index, skb, par, &del_opt);
+
+	return XT_CONTINUE;
+}
+
+#define set_target_v2_checkentry	set_target_v1_checkentry
+#define set_target_v2_destroy		set_target_v1_destroy
+
 static struct xt_match set_matches[] __read_mostly = {
 	{
 		.name		= "set",
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 06df2b9..1ba6793 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,15 +35,6 @@
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
-static void
-xt_socket_put_sk(struct sock *sk)
-{
-	if (sk->sk_state == TCP_TIME_WAIT)
-		inet_twsk_put(inet_twsk(sk));
-	else
-		sock_put(sk);
-}
-
 static int
 extract_icmp4_fields(const struct sk_buff *skb,
 		    u8 *protocol,
@@ -216,7 +207,7 @@
 					inet_twsk(sk)->tw_transparent));
 
 		if (sk != skb->sk)
-			xt_socket_put_sk(sk);
+			sock_gen_put(sk);
 
 		if (wildcard || !transparent)
 			sk = NULL;
@@ -370,7 +361,7 @@
 		 */
 		wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
 			    sk->sk_state != TCP_TIME_WAIT &&
-			    ipv6_addr_any(&inet6_sk(sk)->rcv_saddr));
+			    ipv6_addr_any(&sk->sk_v6_rcv_saddr));
 
 		/* Ignore non-transparent sockets,
 		   if XT_SOCKET_TRANSPARENT is used */
@@ -381,7 +372,7 @@
 					inet_twsk(sk)->tw_transparent));
 
 		if (sk != skb->sk)
-			xt_socket_put_sk(sk);
+			sock_gen_put(sk);
 
 		if (wildcard || !transparent)
 			sk = NULL;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 96a458e..dce1beb 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -817,7 +817,7 @@
 	switch (req->rsk_ops->family) {
 	case AF_INET:
 		entry = netlbl_domhsh_getentry_af4(secattr->domain,
-						   inet_rsk(req)->rmt_addr);
+						   inet_rsk(req)->ir_rmt_addr);
 		if (entry == NULL) {
 			ret_val = -ENOENT;
 			goto req_setattr_return;
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 5948b2f..6e0fa0c 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -14,6 +14,20 @@
 	  To compile this support as a module, choose M here: the module will
 	  be called nfc.
 
+config NFC_DIGITAL
+	depends on NFC
+	select CRC_CCITT
+	select CRC_ITU_T
+	tristate "NFC Digital Protocol stack support"
+	default n
+	help
+	  Say Y if you want to build NFC digital protocol stack support.
+	  This is needed by NFC chipsets whose firmware only implement
+	  the NFC analog layer.
+
+	  To compile this support as a module, choose M here: the module will
+	  be called nfc_digital.
+
 source "net/nfc/nci/Kconfig"
 source "net/nfc/hci/Kconfig"
 
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index a76f453..2555ff8 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -5,7 +5,9 @@
 obj-$(CONFIG_NFC) += nfc.o
 obj-$(CONFIG_NFC_NCI) += nci/
 obj-$(CONFIG_NFC_HCI) += hci/
+obj-$(CONFIG_NFC_DIGITAL) += nfc_digital.o
 
 nfc-objs := core.o netlink.o af_nfc.o rawsock.o llcp_core.o llcp_commands.o \
 		llcp_sock.o
 
+nfc_digital-objs := digital_core.o digital_technology.o digital_dep.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index e92923c..8725291 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -384,6 +384,19 @@
 {
 	dev->dep_link_up = true;
 
+	if (!dev->active_target) {
+		struct nfc_target *target;
+
+		target = nfc_find_target(dev, target_idx);
+		if (target == NULL)
+			return -ENOTCONN;
+
+		dev->active_target = target;
+	}
+
+	dev->polling = false;
+	dev->rf_mode = rf_mode;
+
 	nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
 
 	return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode);
@@ -536,7 +549,7 @@
 	return rc;
 }
 
-static struct nfc_se *find_se(struct nfc_dev *dev, u32 se_idx)
+struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx)
 {
 	struct nfc_se *se, *n;
 
@@ -546,6 +559,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(nfc_find_se);
 
 int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
 {
@@ -577,7 +591,7 @@
 		goto error;
 	}
 
-	se = find_se(dev, se_idx);
+	se = nfc_find_se(dev, se_idx);
 	if (!se) {
 		rc = -EINVAL;
 		goto error;
@@ -622,7 +636,7 @@
 		goto error;
 	}
 
-	se = find_se(dev, se_idx);
+	se = nfc_find_se(dev, se_idx);
 	if (!se) {
 		rc = -EINVAL;
 		goto error;
@@ -881,7 +895,7 @@
 
 	pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx);
 
-	se = find_se(dev, se_idx);
+	se = nfc_find_se(dev, se_idx);
 	if (se)
 		return -EALREADY;
 
diff --git a/net/nfc/digital.h b/net/nfc/digital.h
new file mode 100644
index 0000000..08b29b5
--- /dev/null
+++ b/net/nfc/digital.h
@@ -0,0 +1,170 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __DIGITAL_H
+#define __DIGITAL_H
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/digital.h>
+
+#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
+
+#define PROTOCOL_ERR(req) pr_err("%d: NFC Digital Protocol error: %s\n", \
+				 __LINE__, req)
+
+#define DIGITAL_CMD_IN_SEND        0
+#define DIGITAL_CMD_TG_SEND        1
+#define DIGITAL_CMD_TG_LISTEN      2
+#define DIGITAL_CMD_TG_LISTEN_MDAA 3
+
+#define DIGITAL_MAX_HEADER_LEN 7
+#define DIGITAL_CRC_LEN        2
+
+#define DIGITAL_SENSF_NFCID2_NFC_DEP_B1 0x01
+#define DIGITAL_SENSF_NFCID2_NFC_DEP_B2 0xFE
+
+#define DIGITAL_SENS_RES_NFC_DEP 0x0100
+#define DIGITAL_SEL_RES_NFC_DEP  0x40
+#define DIGITAL_SENSF_FELICA_SC  0xFFFF
+
+#define DIGITAL_DRV_CAPS_IN_CRC(ddev) \
+	((ddev)->driver_capabilities & NFC_DIGITAL_DRV_CAPS_IN_CRC)
+#define DIGITAL_DRV_CAPS_TG_CRC(ddev) \
+	((ddev)->driver_capabilities & NFC_DIGITAL_DRV_CAPS_TG_CRC)
+
+struct digital_data_exch {
+	data_exchange_cb_t cb;
+	void *cb_context;
+};
+
+struct sk_buff *digital_skb_alloc(struct nfc_digital_dev *ddev,
+				  unsigned int len);
+
+int digital_send_cmd(struct nfc_digital_dev *ddev, u8 cmd_type,
+		     struct sk_buff *skb, struct digital_tg_mdaa_params *params,
+		     u16 timeout, nfc_digital_cmd_complete_t cmd_cb,
+		     void *cb_context);
+
+int digital_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param);
+static inline int digital_in_send_cmd(struct nfc_digital_dev *ddev,
+				      struct sk_buff *skb, u16 timeout,
+				      nfc_digital_cmd_complete_t cmd_cb,
+				      void *cb_context)
+{
+	return digital_send_cmd(ddev, DIGITAL_CMD_IN_SEND, skb, NULL, timeout,
+				cmd_cb, cb_context);
+}
+
+void digital_poll_next_tech(struct nfc_digital_dev *ddev);
+
+int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+int digital_target_found(struct nfc_digital_dev *ddev,
+			 struct nfc_target *target, u8 protocol);
+
+int digital_in_recv_mifare_res(struct sk_buff *resp);
+
+int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
+			    struct nfc_target *target, __u8 comm_mode, __u8 *gb,
+			    size_t gb_len);
+int digital_in_send_dep_req(struct nfc_digital_dev *ddev,
+			    struct nfc_target *target, struct sk_buff *skb,
+			    struct digital_data_exch *data_exch);
+
+int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param);
+static inline int digital_tg_send_cmd(struct nfc_digital_dev *ddev,
+			struct sk_buff *skb, u16 timeout,
+			nfc_digital_cmd_complete_t cmd_cb, void *cb_context)
+{
+	return digital_send_cmd(ddev, DIGITAL_CMD_TG_SEND, skb, NULL, timeout,
+				cmd_cb, cb_context);
+}
+
+void digital_tg_recv_sens_req(struct nfc_digital_dev *ddev, void *arg,
+			      struct sk_buff *resp);
+
+void digital_tg_recv_sensf_req(struct nfc_digital_dev *ddev, void *arg,
+			       struct sk_buff *resp);
+
+static inline int digital_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
+				    nfc_digital_cmd_complete_t cb, void *arg)
+{
+	return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN, NULL, NULL,
+				timeout, cb, arg);
+}
+
+void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
+			     struct sk_buff *resp);
+
+int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb);
+
+int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech);
+int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+typedef u16 (*crc_func_t)(u16, const u8 *, size_t);
+
+#define CRC_A_INIT 0x6363
+#define CRC_B_INIT 0xFFFF
+#define CRC_F_INIT 0x0000
+
+void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
+			 u8 bitwise_inv, u8 msb_first);
+
+static inline void digital_skb_add_crc_a(struct sk_buff *skb)
+{
+	digital_skb_add_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0);
+}
+
+static inline void digital_skb_add_crc_b(struct sk_buff *skb)
+{
+	digital_skb_add_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0);
+}
+
+static inline void digital_skb_add_crc_f(struct sk_buff *skb)
+{
+	digital_skb_add_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1);
+}
+
+static inline void digital_skb_add_crc_none(struct sk_buff *skb)
+{
+	return;
+}
+
+int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
+			  u16 crc_init, u8 bitwise_inv, u8 msb_first);
+
+static inline int digital_skb_check_crc_a(struct sk_buff *skb)
+{
+	return digital_skb_check_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0);
+}
+
+static inline int digital_skb_check_crc_b(struct sk_buff *skb)
+{
+	return digital_skb_check_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0);
+}
+
+static inline int digital_skb_check_crc_f(struct sk_buff *skb)
+{
+	return digital_skb_check_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1);
+}
+
+static inline int digital_skb_check_crc_none(struct sk_buff *skb)
+{
+	return 0;
+}
+
+#endif /* __DIGITAL_H */
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
new file mode 100644
index 0000000..09fc954
--- /dev/null
+++ b/net/nfc/digital_core.c
@@ -0,0 +1,737 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include <linux/module.h>
+
+#include "digital.h"
+
+#define DIGITAL_PROTO_NFCA_RF_TECH \
+	(NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_NFC_DEP_MASK)
+
+#define DIGITAL_PROTO_NFCF_RF_TECH \
+	(NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK)
+
+struct digital_cmd {
+	struct list_head queue;
+
+	u8 type;
+	u8 pending;
+
+	u16 timeout;
+	struct sk_buff *req;
+	struct sk_buff *resp;
+	struct digital_tg_mdaa_params *mdaa_params;
+
+	nfc_digital_cmd_complete_t cmd_cb;
+	void *cb_context;
+};
+
+struct sk_buff *digital_skb_alloc(struct nfc_digital_dev *ddev,
+				  unsigned int len)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(len + ddev->tx_headroom + ddev->tx_tailroom,
+			GFP_KERNEL);
+	if (skb)
+		skb_reserve(skb, ddev->tx_headroom);
+
+	return skb;
+}
+
+void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
+			 u8 bitwise_inv, u8 msb_first)
+{
+	u16 crc;
+
+	crc = crc_func(init, skb->data, skb->len);
+
+	if (bitwise_inv)
+		crc = ~crc;
+
+	if (msb_first)
+		crc = __fswab16(crc);
+
+	*skb_put(skb, 1) = crc & 0xFF;
+	*skb_put(skb, 1) = (crc >> 8) & 0xFF;
+}
+
+int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
+			  u16 crc_init, u8 bitwise_inv, u8 msb_first)
+{
+	int rc;
+	u16 crc;
+
+	if (skb->len <= 2)
+		return -EIO;
+
+	crc = crc_func(crc_init, skb->data, skb->len - 2);
+
+	if (bitwise_inv)
+		crc = ~crc;
+
+	if (msb_first)
+		crc = __swab16(crc);
+
+	rc = (skb->data[skb->len - 2] - (crc & 0xFF)) +
+	     (skb->data[skb->len - 1] - ((crc >> 8) & 0xFF));
+
+	if (rc)
+		return -EIO;
+
+	skb_trim(skb, skb->len - 2);
+
+	return 0;
+}
+
+static inline void digital_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+	ddev->ops->switch_rf(ddev, on);
+}
+
+static inline void digital_abort_cmd(struct nfc_digital_dev *ddev)
+{
+	ddev->ops->abort_cmd(ddev);
+}
+
+static void digital_wq_cmd_complete(struct work_struct *work)
+{
+	struct digital_cmd *cmd;
+	struct nfc_digital_dev *ddev = container_of(work,
+						    struct nfc_digital_dev,
+						    cmd_complete_work);
+
+	mutex_lock(&ddev->cmd_lock);
+
+	cmd = list_first_entry_or_null(&ddev->cmd_queue, struct digital_cmd,
+				       queue);
+	if (!cmd) {
+		mutex_unlock(&ddev->cmd_lock);
+		return;
+	}
+
+	list_del(&cmd->queue);
+
+	mutex_unlock(&ddev->cmd_lock);
+
+	if (!IS_ERR(cmd->resp))
+		print_hex_dump_debug("DIGITAL RX: ", DUMP_PREFIX_NONE, 16, 1,
+				     cmd->resp->data, cmd->resp->len, false);
+
+	cmd->cmd_cb(ddev, cmd->cb_context, cmd->resp);
+
+	kfree(cmd->mdaa_params);
+	kfree(cmd);
+
+	schedule_work(&ddev->cmd_work);
+}
+
+static void digital_send_cmd_complete(struct nfc_digital_dev *ddev,
+				      void *arg, struct sk_buff *resp)
+{
+	struct digital_cmd *cmd = arg;
+
+	cmd->resp = resp;
+
+	schedule_work(&ddev->cmd_complete_work);
+}
+
+static void digital_wq_cmd(struct work_struct *work)
+{
+	int rc;
+	struct digital_cmd *cmd;
+	struct digital_tg_mdaa_params *params;
+	struct nfc_digital_dev *ddev = container_of(work,
+						    struct nfc_digital_dev,
+						    cmd_work);
+
+	mutex_lock(&ddev->cmd_lock);
+
+	cmd = list_first_entry_or_null(&ddev->cmd_queue, struct digital_cmd,
+				       queue);
+	if (!cmd || cmd->pending) {
+		mutex_unlock(&ddev->cmd_lock);
+		return;
+	}
+
+	mutex_unlock(&ddev->cmd_lock);
+
+	if (cmd->req)
+		print_hex_dump_debug("DIGITAL TX: ", DUMP_PREFIX_NONE, 16, 1,
+				     cmd->req->data, cmd->req->len, false);
+
+	switch (cmd->type) {
+	case DIGITAL_CMD_IN_SEND:
+		rc = ddev->ops->in_send_cmd(ddev, cmd->req, cmd->timeout,
+					    digital_send_cmd_complete, cmd);
+		break;
+
+	case DIGITAL_CMD_TG_SEND:
+		rc = ddev->ops->tg_send_cmd(ddev, cmd->req, cmd->timeout,
+					    digital_send_cmd_complete, cmd);
+		break;
+
+	case DIGITAL_CMD_TG_LISTEN:
+		rc = ddev->ops->tg_listen(ddev, cmd->timeout,
+					  digital_send_cmd_complete, cmd);
+		break;
+
+	case DIGITAL_CMD_TG_LISTEN_MDAA:
+		params = cmd->mdaa_params;
+
+		rc = ddev->ops->tg_listen_mdaa(ddev, params, cmd->timeout,
+					       digital_send_cmd_complete, cmd);
+		break;
+
+	default:
+		pr_err("Unknown cmd type %d\n", cmd->type);
+		return;
+	}
+
+	if (!rc)
+		return;
+
+	pr_err("in_send_command returned err %d\n", rc);
+
+	mutex_lock(&ddev->cmd_lock);
+	list_del(&cmd->queue);
+	mutex_unlock(&ddev->cmd_lock);
+
+	kfree_skb(cmd->req);
+	kfree(cmd->mdaa_params);
+	kfree(cmd);
+
+	schedule_work(&ddev->cmd_work);
+}
+
+int digital_send_cmd(struct nfc_digital_dev *ddev, u8 cmd_type,
+		     struct sk_buff *skb, struct digital_tg_mdaa_params *params,
+		     u16 timeout, nfc_digital_cmd_complete_t cmd_cb,
+		     void *cb_context)
+{
+	struct digital_cmd *cmd;
+
+	cmd = kzalloc(sizeof(struct digital_cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->type = cmd_type;
+	cmd->timeout = timeout;
+	cmd->req = skb;
+	cmd->mdaa_params = params;
+	cmd->cmd_cb = cmd_cb;
+	cmd->cb_context = cb_context;
+	INIT_LIST_HEAD(&cmd->queue);
+
+	mutex_lock(&ddev->cmd_lock);
+	list_add_tail(&cmd->queue, &ddev->cmd_queue);
+	mutex_unlock(&ddev->cmd_lock);
+
+	schedule_work(&ddev->cmd_work);
+
+	return 0;
+}
+
+int digital_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
+{
+	int rc;
+
+	rc = ddev->ops->in_configure_hw(ddev, type, param);
+	if (rc)
+		pr_err("in_configure_hw failed: %d\n", rc);
+
+	return rc;
+}
+
+int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
+{
+	int rc;
+
+	rc = ddev->ops->tg_configure_hw(ddev, type, param);
+	if (rc)
+		pr_err("tg_configure_hw failed: %d\n", rc);
+
+	return rc;
+}
+
+static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+	struct digital_tg_mdaa_params *params;
+
+	params = kzalloc(sizeof(struct digital_tg_mdaa_params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	params->sens_res = DIGITAL_SENS_RES_NFC_DEP;
+	get_random_bytes(params->nfcid1, sizeof(params->nfcid1));
+	params->sel_res = DIGITAL_SEL_RES_NFC_DEP;
+
+	params->nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+	params->nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+	get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
+	params->sc = DIGITAL_SENSF_FELICA_SC;
+
+	return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
+				500, digital_tg_recv_atr_req, NULL);
+}
+
+int digital_target_found(struct nfc_digital_dev *ddev,
+			 struct nfc_target *target, u8 protocol)
+{
+	int rc;
+	u8 framing;
+	u8 rf_tech;
+	int (*check_crc)(struct sk_buff *skb);
+	void (*add_crc)(struct sk_buff *skb);
+
+	rf_tech = ddev->poll_techs[ddev->poll_tech_index].rf_tech;
+
+	switch (protocol) {
+	case NFC_PROTO_JEWEL:
+		framing = NFC_DIGITAL_FRAMING_NFCA_T1T;
+		check_crc = digital_skb_check_crc_b;
+		add_crc = digital_skb_add_crc_b;
+		break;
+
+	case NFC_PROTO_MIFARE:
+		framing = NFC_DIGITAL_FRAMING_NFCA_T2T;
+		check_crc = digital_skb_check_crc_a;
+		add_crc = digital_skb_add_crc_a;
+		break;
+
+	case NFC_PROTO_FELICA:
+		framing = NFC_DIGITAL_FRAMING_NFCF_T3T;
+		check_crc = digital_skb_check_crc_f;
+		add_crc = digital_skb_add_crc_f;
+		break;
+
+	case NFC_PROTO_NFC_DEP:
+		if (rf_tech == NFC_DIGITAL_RF_TECH_106A) {
+			framing = NFC_DIGITAL_FRAMING_NFCA_NFC_DEP;
+			check_crc = digital_skb_check_crc_a;
+			add_crc = digital_skb_add_crc_a;
+		} else {
+			framing = NFC_DIGITAL_FRAMING_NFCF_NFC_DEP;
+			check_crc = digital_skb_check_crc_f;
+			add_crc = digital_skb_add_crc_f;
+		}
+		break;
+
+	default:
+		pr_err("Invalid protocol %d\n", protocol);
+		return -EINVAL;
+	}
+
+	pr_debug("rf_tech=%d, protocol=%d\n", rf_tech, protocol);
+
+	ddev->curr_rf_tech = rf_tech;
+	ddev->curr_protocol = protocol;
+
+	if (DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+		ddev->skb_add_crc = digital_skb_add_crc_none;
+		ddev->skb_check_crc = digital_skb_check_crc_none;
+	} else {
+		ddev->skb_add_crc = add_crc;
+		ddev->skb_check_crc = check_crc;
+	}
+
+	rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, framing);
+	if (rc)
+		return rc;
+
+	target->supported_protocols = (1 << protocol);
+	rc = nfc_targets_found(ddev->nfc_dev, target, 1);
+	if (rc)
+		return rc;
+
+	ddev->poll_tech_count = 0;
+
+	return 0;
+}
+
+void digital_poll_next_tech(struct nfc_digital_dev *ddev)
+{
+	digital_switch_rf(ddev, 0);
+
+	mutex_lock(&ddev->poll_lock);
+
+	if (!ddev->poll_tech_count) {
+		mutex_unlock(&ddev->poll_lock);
+		return;
+	}
+
+	ddev->poll_tech_index = (ddev->poll_tech_index + 1) %
+				ddev->poll_tech_count;
+
+	mutex_unlock(&ddev->poll_lock);
+
+	schedule_work(&ddev->poll_work);
+}
+
+static void digital_wq_poll(struct work_struct *work)
+{
+	int rc;
+	struct digital_poll_tech *poll_tech;
+	struct nfc_digital_dev *ddev = container_of(work,
+						    struct nfc_digital_dev,
+						    poll_work);
+	mutex_lock(&ddev->poll_lock);
+
+	if (!ddev->poll_tech_count) {
+		mutex_unlock(&ddev->poll_lock);
+		return;
+	}
+
+	poll_tech = &ddev->poll_techs[ddev->poll_tech_index];
+
+	mutex_unlock(&ddev->poll_lock);
+
+	rc = poll_tech->poll_func(ddev, poll_tech->rf_tech);
+	if (rc)
+		digital_poll_next_tech(ddev);
+}
+
+static void digital_add_poll_tech(struct nfc_digital_dev *ddev, u8 rf_tech,
+				  digital_poll_t poll_func)
+{
+	struct digital_poll_tech *poll_tech;
+
+	if (ddev->poll_tech_count >= NFC_DIGITAL_POLL_MODE_COUNT_MAX)
+		return;
+
+	poll_tech = &ddev->poll_techs[ddev->poll_tech_count++];
+
+	poll_tech->rf_tech = rf_tech;
+	poll_tech->poll_func = poll_func;
+}
+
+/**
+ * start_poll operation
+ *
+ * For every supported protocol, the corresponding polling function is added
+ * to the table of polling technologies (ddev->poll_techs[]) using
+ * digital_add_poll_tech().
+ * When a polling function fails (by timeout or protocol error) the next one is
+ * schedule by digital_poll_next_tech() on the poll workqueue (ddev->poll_work).
+ */
+static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
+			      __u32 tm_protocols)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+	u32 matching_im_protocols, matching_tm_protocols;
+
+	pr_debug("protocols: im 0x%x, tm 0x%x, supported 0x%x\n", im_protocols,
+		 tm_protocols, ddev->protocols);
+
+	matching_im_protocols = ddev->protocols & im_protocols;
+	matching_tm_protocols = ddev->protocols & tm_protocols;
+
+	if (!matching_im_protocols && !matching_tm_protocols) {
+		pr_err("Unknown protocol\n");
+		return -EINVAL;
+	}
+
+	if (ddev->poll_tech_count) {
+		pr_err("Already polling\n");
+		return -EBUSY;
+	}
+
+	if (ddev->curr_protocol) {
+		pr_err("A target is already active\n");
+		return -EBUSY;
+	}
+
+	ddev->poll_tech_count = 0;
+	ddev->poll_tech_index = 0;
+
+	if (matching_im_protocols & DIGITAL_PROTO_NFCA_RF_TECH)
+		digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
+				      digital_in_send_sens_req);
+
+	if (im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) {
+		digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
+				      digital_in_send_sensf_req);
+
+		digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_424F,
+				      digital_in_send_sensf_req);
+	}
+
+	if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+		if (ddev->ops->tg_listen_mdaa) {
+			digital_add_poll_tech(ddev, 0,
+					      digital_tg_listen_mdaa);
+		} else {
+			digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
+					      digital_tg_listen_nfca);
+
+			digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
+					      digital_tg_listen_nfcf);
+
+			digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_424F,
+					      digital_tg_listen_nfcf);
+		}
+	}
+
+	if (!ddev->poll_tech_count) {
+		pr_err("Unsupported protocols: im=0x%x, tm=0x%x\n",
+		       matching_im_protocols, matching_tm_protocols);
+		return -EINVAL;
+	}
+
+	schedule_work(&ddev->poll_work);
+
+	return 0;
+}
+
+static void digital_stop_poll(struct nfc_dev *nfc_dev)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+	mutex_lock(&ddev->poll_lock);
+
+	if (!ddev->poll_tech_count) {
+		pr_err("Polling operation was not running\n");
+		mutex_unlock(&ddev->poll_lock);
+		return;
+	}
+
+	ddev->poll_tech_count = 0;
+
+	mutex_unlock(&ddev->poll_lock);
+
+	cancel_work_sync(&ddev->poll_work);
+
+	digital_abort_cmd(ddev);
+}
+
+static int digital_dev_up(struct nfc_dev *nfc_dev)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+	digital_switch_rf(ddev, 1);
+
+	return 0;
+}
+
+static int digital_dev_down(struct nfc_dev *nfc_dev)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+	digital_switch_rf(ddev, 0);
+
+	return 0;
+}
+
+static int digital_dep_link_up(struct nfc_dev *nfc_dev,
+			       struct nfc_target *target,
+			       __u8 comm_mode, __u8 *gb, size_t gb_len)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+	return digital_in_send_atr_req(ddev, target, comm_mode, gb, gb_len);
+}
+
+static int digital_dep_link_down(struct nfc_dev *nfc_dev)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+	ddev->curr_protocol = 0;
+
+	return 0;
+}
+
+static int digital_activate_target(struct nfc_dev *nfc_dev,
+				   struct nfc_target *target, __u32 protocol)
+{
+	return 0;
+}
+
+static void digital_deactivate_target(struct nfc_dev *nfc_dev,
+				      struct nfc_target *target)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+	ddev->curr_protocol = 0;
+}
+
+static int digital_tg_send(struct nfc_dev *dev, struct sk_buff *skb)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(dev);
+
+	return digital_tg_send_dep_res(ddev, skb);
+}
+
+static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg,
+				     struct sk_buff *resp)
+{
+	struct digital_data_exch *data_exch = arg;
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		goto done;
+	}
+
+	if (ddev->curr_protocol == NFC_PROTO_MIFARE)
+		rc = digital_in_recv_mifare_res(resp);
+	else
+		rc = ddev->skb_check_crc(resp);
+
+	if (rc) {
+		kfree_skb(resp);
+		resp = NULL;
+	}
+
+done:
+	data_exch->cb(data_exch->cb_context, resp, rc);
+
+	kfree(data_exch);
+}
+
+static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target,
+			   struct sk_buff *skb, data_exchange_cb_t cb,
+			   void *cb_context)
+{
+	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+	struct digital_data_exch *data_exch;
+
+	data_exch = kzalloc(sizeof(struct digital_data_exch), GFP_KERNEL);
+	if (!data_exch) {
+		pr_err("Failed to allocate data_exch struct\n");
+		return -ENOMEM;
+	}
+
+	data_exch->cb = cb;
+	data_exch->cb_context = cb_context;
+
+	if (ddev->curr_protocol == NFC_PROTO_NFC_DEP)
+		return digital_in_send_dep_req(ddev, target, skb, data_exch);
+
+	ddev->skb_add_crc(skb);
+
+	return digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete,
+				   data_exch);
+}
+
+static struct nfc_ops digital_nfc_ops = {
+	.dev_up = digital_dev_up,
+	.dev_down = digital_dev_down,
+	.start_poll = digital_start_poll,
+	.stop_poll = digital_stop_poll,
+	.dep_link_up = digital_dep_link_up,
+	.dep_link_down = digital_dep_link_down,
+	.activate_target = digital_activate_target,
+	.deactivate_target = digital_deactivate_target,
+	.tm_send = digital_tg_send,
+	.im_transceive = digital_in_send,
+};
+
+struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+					    __u32 supported_protocols,
+					    __u32 driver_capabilities,
+					    int tx_headroom, int tx_tailroom)
+{
+	struct nfc_digital_dev *ddev;
+
+	if (!ops->in_configure_hw || !ops->in_send_cmd || !ops->tg_listen ||
+	    !ops->tg_configure_hw || !ops->tg_send_cmd || !ops->abort_cmd ||
+	    !ops->switch_rf)
+		return NULL;
+
+	ddev = kzalloc(sizeof(struct nfc_digital_dev), GFP_KERNEL);
+	if (!ddev)
+		return NULL;
+
+	ddev->driver_capabilities = driver_capabilities;
+	ddev->ops = ops;
+
+	mutex_init(&ddev->cmd_lock);
+	INIT_LIST_HEAD(&ddev->cmd_queue);
+
+	INIT_WORK(&ddev->cmd_work, digital_wq_cmd);
+	INIT_WORK(&ddev->cmd_complete_work, digital_wq_cmd_complete);
+
+	mutex_init(&ddev->poll_lock);
+	INIT_WORK(&ddev->poll_work, digital_wq_poll);
+
+	if (supported_protocols & NFC_PROTO_JEWEL_MASK)
+		ddev->protocols |= NFC_PROTO_JEWEL_MASK;
+	if (supported_protocols & NFC_PROTO_MIFARE_MASK)
+		ddev->protocols |= NFC_PROTO_MIFARE_MASK;
+	if (supported_protocols & NFC_PROTO_FELICA_MASK)
+		ddev->protocols |= NFC_PROTO_FELICA_MASK;
+	if (supported_protocols & NFC_PROTO_NFC_DEP_MASK)
+		ddev->protocols |= NFC_PROTO_NFC_DEP_MASK;
+
+	ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN;
+	ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN;
+
+	ddev->nfc_dev = nfc_allocate_device(&digital_nfc_ops, ddev->protocols,
+					    ddev->tx_headroom,
+					    ddev->tx_tailroom);
+	if (!ddev->nfc_dev) {
+		pr_err("nfc_allocate_device failed\n");
+		goto free_dev;
+	}
+
+	nfc_set_drvdata(ddev->nfc_dev, ddev);
+
+	return ddev;
+
+free_dev:
+	kfree(ddev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(nfc_digital_allocate_device);
+
+void nfc_digital_free_device(struct nfc_digital_dev *ddev)
+{
+	nfc_free_device(ddev->nfc_dev);
+	kfree(ddev);
+}
+EXPORT_SYMBOL(nfc_digital_free_device);
+
+int nfc_digital_register_device(struct nfc_digital_dev *ddev)
+{
+	return nfc_register_device(ddev->nfc_dev);
+}
+EXPORT_SYMBOL(nfc_digital_register_device);
+
+void nfc_digital_unregister_device(struct nfc_digital_dev *ddev)
+{
+	struct digital_cmd *cmd, *n;
+
+	nfc_unregister_device(ddev->nfc_dev);
+
+	mutex_lock(&ddev->poll_lock);
+	ddev->poll_tech_count = 0;
+	mutex_unlock(&ddev->poll_lock);
+
+	cancel_work_sync(&ddev->poll_work);
+	cancel_work_sync(&ddev->cmd_work);
+	cancel_work_sync(&ddev->cmd_complete_work);
+
+	list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) {
+		list_del(&cmd->queue);
+		kfree(cmd->mdaa_params);
+		kfree(cmd);
+	}
+}
+EXPORT_SYMBOL(nfc_digital_unregister_device);
+
+MODULE_LICENSE("GPL");
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
new file mode 100644
index 0000000..07bbc24
--- /dev/null
+++ b/net/nfc/digital_dep.c
@@ -0,0 +1,729 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include "digital.h"
+
+#define DIGITAL_NFC_DEP_FRAME_DIR_OUT 0xD4
+#define DIGITAL_NFC_DEP_FRAME_DIR_IN  0xD5
+
+#define DIGITAL_NFC_DEP_NFCA_SOD_SB   0xF0
+
+#define DIGITAL_CMD_ATR_REQ 0x00
+#define DIGITAL_CMD_ATR_RES 0x01
+#define DIGITAL_CMD_PSL_REQ 0x04
+#define DIGITAL_CMD_PSL_RES 0x05
+#define DIGITAL_CMD_DEP_REQ 0x06
+#define DIGITAL_CMD_DEP_RES 0x07
+
+#define DIGITAL_ATR_REQ_MIN_SIZE 16
+#define DIGITAL_ATR_REQ_MAX_SIZE 64
+
+#define DIGITAL_NFCID3_LEN ((u8)8)
+#define DIGITAL_LR_BITS_PAYLOAD_SIZE_254B 0x30
+#define DIGITAL_GB_BIT	0x02
+
+#define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0)
+
+#define DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT 0x10
+
+#define DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb) \
+				((pfb) & DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT)
+#define DIGITAL_NFC_DEP_MI_BIT_SET(pfb)  ((pfb) & 0x10)
+#define DIGITAL_NFC_DEP_NAD_BIT_SET(pfb) ((pfb) & 0x08)
+#define DIGITAL_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & 0x04)
+#define DIGITAL_NFC_DEP_PFB_PNI(pfb)     ((pfb) & 0x03)
+
+#define DIGITAL_NFC_DEP_PFB_I_PDU          0x00
+#define DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU   0x40
+#define DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU 0x80
+
+struct digital_atr_req {
+	u8 dir;
+	u8 cmd;
+	u8 nfcid3[10];
+	u8 did;
+	u8 bs;
+	u8 br;
+	u8 pp;
+	u8 gb[0];
+} __packed;
+
+struct digital_atr_res {
+	u8 dir;
+	u8 cmd;
+	u8 nfcid3[10];
+	u8 did;
+	u8 bs;
+	u8 br;
+	u8 to;
+	u8 pp;
+	u8 gb[0];
+} __packed;
+
+struct digital_psl_req {
+	u8 dir;
+	u8 cmd;
+	u8 did;
+	u8 brs;
+	u8 fsl;
+} __packed;
+
+struct digital_psl_res {
+	u8 dir;
+	u8 cmd;
+	u8 did;
+} __packed;
+
+struct digital_dep_req_res {
+	u8 dir;
+	u8 cmd;
+	u8 pfb;
+} __packed;
+
+static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
+				    struct sk_buff *resp);
+
+static void digital_skb_push_dep_sod(struct nfc_digital_dev *ddev,
+				     struct sk_buff *skb)
+{
+	skb_push(skb, sizeof(u8));
+
+	skb->data[0] = skb->len;
+
+	if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)
+		*skb_push(skb, sizeof(u8)) = DIGITAL_NFC_DEP_NFCA_SOD_SB;
+}
+
+static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev,
+				    struct sk_buff *skb)
+{
+	u8 size;
+
+	if (skb->len < 2)
+		return -EIO;
+
+	if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)
+		skb_pull(skb, sizeof(u8));
+
+	size = skb->data[0];
+	if (size != skb->len)
+		return -EIO;
+
+	skb_pull(skb, sizeof(u8));
+
+	return 0;
+}
+
+static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg,
+				 struct sk_buff *resp)
+{
+	struct nfc_target *target = arg;
+	struct digital_atr_res *atr_res;
+	u8 gb_len;
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	rc = ddev->skb_check_crc(resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.6");
+		goto exit;
+	}
+
+	rc = digital_skb_pull_dep_sod(ddev, resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.2");
+		goto exit;
+	}
+
+	if (resp->len < sizeof(struct digital_atr_res)) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	gb_len = resp->len - sizeof(struct digital_atr_res);
+
+	atr_res = (struct digital_atr_res *)resp->data;
+
+	rc = nfc_set_remote_general_bytes(ddev->nfc_dev, atr_res->gb, gb_len);
+	if (rc)
+		goto exit;
+
+	rc = nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE,
+				NFC_RF_INITIATOR);
+
+	ddev->curr_nfc_dep_pni = 0;
+
+exit:
+	dev_kfree_skb(resp);
+
+	if (rc)
+		ddev->curr_protocol = 0;
+}
+
+int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
+			    struct nfc_target *target, __u8 comm_mode, __u8 *gb,
+			    size_t gb_len)
+{
+	struct sk_buff *skb;
+	struct digital_atr_req *atr_req;
+	uint size;
+
+	size = DIGITAL_ATR_REQ_MIN_SIZE + gb_len;
+
+	if (size > DIGITAL_ATR_REQ_MAX_SIZE) {
+		PROTOCOL_ERR("14.6.1.1");
+		return -EINVAL;
+	}
+
+	skb = digital_skb_alloc(ddev, size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, sizeof(struct digital_atr_req));
+
+	atr_req = (struct digital_atr_req *)skb->data;
+	memset(atr_req, 0, sizeof(struct digital_atr_req));
+
+	atr_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+	atr_req->cmd = DIGITAL_CMD_ATR_REQ;
+	if (target->nfcid2_len)
+		memcpy(atr_req->nfcid3, target->nfcid2,
+		       max(target->nfcid2_len, DIGITAL_NFCID3_LEN));
+	else
+		get_random_bytes(atr_req->nfcid3, DIGITAL_NFCID3_LEN);
+
+	atr_req->did = 0;
+	atr_req->bs = 0;
+	atr_req->br = 0;
+
+	atr_req->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B;
+
+	if (gb_len) {
+		atr_req->pp |= DIGITAL_GB_BIT;
+		memcpy(skb_put(skb, gb_len), gb, gb_len);
+	}
+
+	digital_skb_push_dep_sod(ddev, skb);
+
+	ddev->skb_add_crc(skb);
+
+	digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res, target);
+
+	return 0;
+}
+
+static int digital_in_send_rtox(struct nfc_digital_dev *ddev,
+				struct digital_data_exch *data_exch, u8 rtox)
+{
+	struct digital_dep_req_res *dep_req;
+	struct sk_buff *skb;
+	int rc;
+
+	skb = digital_skb_alloc(ddev, 1);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = rtox;
+
+	skb_push(skb, sizeof(struct digital_dep_req_res));
+
+	dep_req = (struct digital_dep_req_res *)skb->data;
+
+	dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+	dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+	dep_req->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU |
+		       DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT;
+
+	digital_skb_push_dep_sod(ddev, skb);
+
+	ddev->skb_add_crc(skb);
+
+	rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+				 data_exch);
+
+	return rc;
+}
+
+static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
+				    struct sk_buff *resp)
+{
+	struct digital_data_exch *data_exch = arg;
+	struct digital_dep_req_res *dep_res;
+	u8 pfb;
+	uint size;
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	rc = ddev->skb_check_crc(resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.6");
+		goto error;
+	}
+
+	rc = digital_skb_pull_dep_sod(ddev, resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.2");
+		goto exit;
+	}
+
+	dep_res = (struct digital_dep_req_res *)resp->data;
+
+	if (resp->len < sizeof(struct digital_dep_req_res) ||
+	    dep_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN ||
+	    dep_res->cmd != DIGITAL_CMD_DEP_RES) {
+		rc = -EIO;
+		goto error;
+	}
+
+	pfb = dep_res->pfb;
+
+	switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) {
+	case DIGITAL_NFC_DEP_PFB_I_PDU:
+		if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) {
+			PROTOCOL_ERR("14.12.3.3");
+			rc = -EIO;
+			goto error;
+		}
+
+		ddev->curr_nfc_dep_pni =
+			DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1);
+		rc = 0;
+		break;
+
+	case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU:
+		pr_err("Received a ACK/NACK PDU\n");
+		rc = -EIO;
+		goto error;
+
+	case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
+		if (!DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) {
+			rc = -EINVAL;
+			goto error;
+		}
+
+		rc = digital_in_send_rtox(ddev, data_exch, resp->data[3]);
+		if (rc)
+			goto error;
+
+		kfree_skb(resp);
+		return;
+	}
+
+	if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb)) {
+		pr_err("MI bit set. Chained PDU not supported\n");
+		rc = -EIO;
+		goto error;
+	}
+
+	size = sizeof(struct digital_dep_req_res);
+
+	if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb))
+		size++;
+
+	if (size > resp->len) {
+		rc = -EIO;
+		goto error;
+	}
+
+	skb_pull(resp, size);
+
+exit:
+	data_exch->cb(data_exch->cb_context, resp, rc);
+
+error:
+	kfree(data_exch);
+
+	if (rc)
+		kfree_skb(resp);
+}
+
+int digital_in_send_dep_req(struct nfc_digital_dev *ddev,
+			    struct nfc_target *target, struct sk_buff *skb,
+			    struct digital_data_exch *data_exch)
+{
+	struct digital_dep_req_res *dep_req;
+
+	skb_push(skb, sizeof(struct digital_dep_req_res));
+
+	dep_req = (struct digital_dep_req_res *)skb->data;
+	dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+	dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+	dep_req->pfb = ddev->curr_nfc_dep_pni;
+
+	digital_skb_push_dep_sod(ddev, skb);
+
+	ddev->skb_add_crc(skb);
+
+	return digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+				   data_exch);
+}
+
+static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
+				    struct sk_buff *resp)
+{
+	int rc;
+	struct digital_dep_req_res *dep_req;
+	size_t size;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	rc = ddev->skb_check_crc(resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.6");
+		goto exit;
+	}
+
+	rc = digital_skb_pull_dep_sod(ddev, resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.2");
+		goto exit;
+	}
+
+	size = sizeof(struct digital_dep_req_res);
+	dep_req = (struct digital_dep_req_res *)resp->data;
+
+	if (resp->len < size || dep_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+	    dep_req->cmd != DIGITAL_CMD_DEP_REQ) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	if (DIGITAL_NFC_DEP_DID_BIT_SET(dep_req->pfb))
+		size++;
+
+	if (resp->len < size) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	switch (DIGITAL_NFC_DEP_PFB_TYPE(dep_req->pfb)) {
+	case DIGITAL_NFC_DEP_PFB_I_PDU:
+		pr_debug("DIGITAL_NFC_DEP_PFB_I_PDU\n");
+		ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(dep_req->pfb);
+		break;
+	case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU:
+		pr_err("Received a ACK/NACK PDU\n");
+		rc = -EINVAL;
+		goto exit;
+		break;
+	case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
+		pr_err("Received a SUPERVISOR PDU\n");
+		rc = -EINVAL;
+		goto exit;
+		break;
+	}
+
+	skb_pull(resp, size);
+
+	rc = nfc_tm_data_received(ddev->nfc_dev, resp);
+
+exit:
+	if (rc)
+		kfree_skb(resp);
+}
+
+int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb)
+{
+	struct digital_dep_req_res *dep_res;
+
+	skb_push(skb, sizeof(struct digital_dep_req_res));
+	dep_res = (struct digital_dep_req_res *)skb->data;
+
+	dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+	dep_res->cmd = DIGITAL_CMD_DEP_RES;
+	dep_res->pfb = ddev->curr_nfc_dep_pni;
+
+	digital_skb_push_dep_sod(ddev, skb);
+
+	ddev->skb_add_crc(skb);
+
+	return digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req,
+				   NULL);
+}
+
+static void digital_tg_send_psl_res_complete(struct nfc_digital_dev *ddev,
+					     void *arg, struct sk_buff *resp)
+{
+	u8 rf_tech = PTR_ERR(arg);
+
+	if (IS_ERR(resp))
+		return;
+
+	digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+
+	digital_tg_listen(ddev, 1500, digital_tg_recv_dep_req, NULL);
+
+	dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_psl_res(struct nfc_digital_dev *ddev, u8 did,
+				   u8 rf_tech)
+{
+	struct digital_psl_res *psl_res;
+	struct sk_buff *skb;
+	int rc;
+
+	skb = digital_skb_alloc(ddev, sizeof(struct digital_psl_res));
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, sizeof(struct digital_psl_res));
+
+	psl_res = (struct digital_psl_res *)skb->data;
+
+	psl_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+	psl_res->cmd = DIGITAL_CMD_PSL_RES;
+	psl_res->did = did;
+
+	digital_skb_push_dep_sod(ddev, skb);
+
+	ddev->skb_add_crc(skb);
+
+	rc = digital_tg_send_cmd(ddev, skb, 0, digital_tg_send_psl_res_complete,
+				 ERR_PTR(rf_tech));
+
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+static void digital_tg_recv_psl_req(struct nfc_digital_dev *ddev, void *arg,
+				    struct sk_buff *resp)
+{
+	int rc;
+	struct digital_psl_req *psl_req;
+	u8 rf_tech;
+	u8 dsi;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	rc = ddev->skb_check_crc(resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.6");
+		goto exit;
+	}
+
+	rc = digital_skb_pull_dep_sod(ddev, resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.2");
+		goto exit;
+	}
+
+	psl_req = (struct digital_psl_req *)resp->data;
+
+	if (resp->len != sizeof(struct digital_psl_req) ||
+	    psl_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+	    psl_req->cmd != DIGITAL_CMD_PSL_REQ) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	dsi = (psl_req->brs >> 3) & 0x07;
+	switch (dsi) {
+	case 0:
+		rf_tech = NFC_DIGITAL_RF_TECH_106A;
+		break;
+	case 1:
+		rf_tech = NFC_DIGITAL_RF_TECH_212F;
+		break;
+	case 2:
+		rf_tech = NFC_DIGITAL_RF_TECH_424F;
+		break;
+	default:
+		pr_err("Unsuported dsi value %d\n", dsi);
+		goto exit;
+	}
+
+	rc = digital_tg_send_psl_res(ddev, psl_req->did, rf_tech);
+
+exit:
+	kfree_skb(resp);
+}
+
+static void digital_tg_send_atr_res_complete(struct nfc_digital_dev *ddev,
+					     void *arg, struct sk_buff *resp)
+{
+	int offset;
+
+	if (IS_ERR(resp)) {
+		digital_poll_next_tech(ddev);
+		return;
+	}
+
+	offset = 2;
+	if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB)
+		offset++;
+
+	if (resp->data[offset] == DIGITAL_CMD_PSL_REQ)
+		digital_tg_recv_psl_req(ddev, arg, resp);
+	else
+		digital_tg_recv_dep_req(ddev, arg, resp);
+}
+
+static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev,
+				   struct digital_atr_req *atr_req)
+{
+	struct digital_atr_res *atr_res;
+	struct sk_buff *skb;
+	u8 *gb;
+	size_t gb_len;
+	int rc;
+
+	gb = nfc_get_local_general_bytes(ddev->nfc_dev, &gb_len);
+	if (!gb)
+		gb_len = 0;
+
+	skb = digital_skb_alloc(ddev, sizeof(struct digital_atr_res) + gb_len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, sizeof(struct digital_atr_res));
+	atr_res = (struct digital_atr_res *)skb->data;
+
+	memset(atr_res, 0, sizeof(struct digital_atr_res));
+
+	atr_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+	atr_res->cmd = DIGITAL_CMD_ATR_RES;
+	memcpy(atr_res->nfcid3, atr_req->nfcid3, sizeof(atr_req->nfcid3));
+	atr_res->to = 8;
+	atr_res->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B;
+	if (gb_len) {
+		skb_put(skb, gb_len);
+
+		atr_res->pp |= DIGITAL_GB_BIT;
+		memcpy(atr_res->gb, gb, gb_len);
+	}
+
+	digital_skb_push_dep_sod(ddev, skb);
+
+	ddev->skb_add_crc(skb);
+
+	rc = digital_tg_send_cmd(ddev, skb, 999,
+				 digital_tg_send_atr_res_complete, NULL);
+	if (rc) {
+		kfree_skb(skb);
+		return rc;
+	}
+
+	return rc;
+}
+
+void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
+			     struct sk_buff *resp)
+{
+	int rc;
+	struct digital_atr_req *atr_req;
+	size_t gb_len, min_size;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	if (!resp->len) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB) {
+		min_size = DIGITAL_ATR_REQ_MIN_SIZE + 2;
+
+		ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_106A;
+		ddev->skb_add_crc = digital_skb_add_crc_a;
+		ddev->skb_check_crc = digital_skb_check_crc_a;
+	} else {
+		min_size = DIGITAL_ATR_REQ_MIN_SIZE + 1;
+
+		ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_212F;
+		ddev->skb_add_crc = digital_skb_add_crc_f;
+		ddev->skb_check_crc = digital_skb_check_crc_f;
+	}
+
+	if (resp->len < min_size) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	if (DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+		ddev->skb_add_crc = digital_skb_add_crc_none;
+		ddev->skb_check_crc = digital_skb_check_crc_none;
+	}
+
+	rc = ddev->skb_check_crc(resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.6");
+		goto exit;
+	}
+
+	rc = digital_skb_pull_dep_sod(ddev, resp);
+	if (rc) {
+		PROTOCOL_ERR("14.4.1.2");
+		goto exit;
+	}
+
+	atr_req = (struct digital_atr_req *)resp->data;
+
+	if (atr_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+	    atr_req->cmd != DIGITAL_CMD_ATR_REQ) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+				     NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED);
+	if (rc)
+		goto exit;
+
+	rc = digital_tg_send_atr_res(ddev, atr_req);
+	if (rc)
+		goto exit;
+
+	gb_len = resp->len - sizeof(struct digital_atr_req);
+	rc = nfc_tm_activated(ddev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
+			      NFC_COMM_PASSIVE, atr_req->gb, gb_len);
+	if (rc)
+		goto exit;
+
+	ddev->poll_tech_count = 0;
+
+	rc = 0;
+exit:
+	if (rc)
+		digital_poll_next_tech(ddev);
+
+	dev_kfree_skb(resp);
+}
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
new file mode 100644
index 0000000..251c8c7
--- /dev/null
+++ b/net/nfc/digital_technology.c
@@ -0,0 +1,770 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include "digital.h"
+
+#define DIGITAL_CMD_SENS_REQ    0x26
+#define DIGITAL_CMD_ALL_REQ     0x52
+#define DIGITAL_CMD_SEL_REQ_CL1 0x93
+#define DIGITAL_CMD_SEL_REQ_CL2 0x95
+#define DIGITAL_CMD_SEL_REQ_CL3 0x97
+
+#define DIGITAL_SDD_REQ_SEL_PAR 0x20
+
+#define DIGITAL_SDD_RES_CT  0x88
+#define DIGITAL_SDD_RES_LEN 5
+
+#define DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res) (!((sel_res) & 0x04))
+#define DIGITAL_SEL_RES_IS_T2T(sel_res) (!((sel_res) & 0x60))
+#define DIGITAL_SEL_RES_IS_NFC_DEP(sel_res) ((sel_res) & 0x40)
+
+#define DIGITAL_SENS_RES_IS_T1T(sens_res) (((sens_res) & 0x0C00) == 0x0C00)
+#define DIGITAL_SENS_RES_IS_VALID(sens_res) \
+	((!((sens_res) & 0x001F) && (((sens_res) & 0x0C00) == 0x0C00)) || \
+	(((sens_res) & 0x001F) && ((sens_res) & 0x0C00) != 0x0C00))
+
+#define DIGITAL_MIFARE_READ_RES_LEN 16
+#define DIGITAL_MIFARE_ACK_RES	0x0A
+
+#define DIGITAL_CMD_SENSF_REQ	0x00
+#define DIGITAL_CMD_SENSF_RES	0x01
+
+#define DIGITAL_SENSF_RES_MIN_LENGTH 17
+#define DIGITAL_SENSF_RES_RD_AP_B1   0x00
+#define DIGITAL_SENSF_RES_RD_AP_B2   0x8F
+
+#define DIGITAL_SENSF_REQ_RC_NONE 0
+#define DIGITAL_SENSF_REQ_RC_SC   1
+#define DIGITAL_SENSF_REQ_RC_AP   2
+
+struct digital_sdd_res {
+	u8 nfcid1[4];
+	u8 bcc;
+} __packed;
+
+struct digital_sel_req {
+	u8 sel_cmd;
+	u8 b2;
+	u8 nfcid1[4];
+	u8 bcc;
+} __packed;
+
+struct digital_sensf_req {
+	u8 cmd;
+	u8 sc1;
+	u8 sc2;
+	u8 rc;
+	u8 tsn;
+} __packed;
+
+struct digital_sensf_res {
+	u8 cmd;
+	u8 nfcid2[8];
+	u8 pad0[2];
+	u8 pad1[3];
+	u8 mrti_check;
+	u8 mrti_update;
+	u8 pad2;
+	u8 rd[2];
+} __packed;
+
+static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
+				   struct nfc_target *target);
+
+static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg,
+				    struct sk_buff *resp)
+{
+	struct nfc_target *target = arg;
+	int rc;
+	u8 sel_res;
+	u8 nfc_proto;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+		rc = digital_skb_check_crc_a(resp);
+		if (rc) {
+			PROTOCOL_ERR("4.4.1.3");
+			goto exit;
+		}
+	}
+
+	if (!resp->len) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	sel_res = resp->data[0];
+
+	if (!DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res)) {
+		rc = digital_in_send_sdd_req(ddev, target);
+		if (rc)
+			goto exit;
+
+		goto exit_free_skb;
+	}
+
+	if (DIGITAL_SEL_RES_IS_T2T(sel_res)) {
+		nfc_proto = NFC_PROTO_MIFARE;
+	} else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) {
+		nfc_proto = NFC_PROTO_NFC_DEP;
+	} else {
+		rc = -EOPNOTSUPP;
+		goto exit;
+	}
+
+	target->sel_res = sel_res;
+
+	rc = digital_target_found(ddev, target, nfc_proto);
+
+exit:
+	kfree(target);
+
+exit_free_skb:
+	dev_kfree_skb(resp);
+
+	if (rc)
+		digital_poll_next_tech(ddev);
+}
+
+static int digital_in_send_sel_req(struct nfc_digital_dev *ddev,
+				   struct nfc_target *target,
+				   struct digital_sdd_res *sdd_res)
+{
+	struct sk_buff *skb;
+	struct digital_sel_req *sel_req;
+	u8 sel_cmd;
+	int rc;
+
+	skb = digital_skb_alloc(ddev, sizeof(struct digital_sel_req));
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, sizeof(struct digital_sel_req));
+	sel_req = (struct digital_sel_req *)skb->data;
+
+	if (target->nfcid1_len <= 4)
+		sel_cmd = DIGITAL_CMD_SEL_REQ_CL1;
+	else if (target->nfcid1_len < 10)
+		sel_cmd = DIGITAL_CMD_SEL_REQ_CL2;
+	else
+		sel_cmd = DIGITAL_CMD_SEL_REQ_CL3;
+
+	sel_req->sel_cmd = sel_cmd;
+	sel_req->b2 = 0x70;
+	memcpy(sel_req->nfcid1, sdd_res->nfcid1, 4);
+	sel_req->bcc = sdd_res->bcc;
+
+	if (DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+		rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+				NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A);
+		if (rc)
+			goto exit;
+	} else {
+		digital_skb_add_crc_a(skb);
+	}
+
+	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sel_res,
+				 target);
+exit:
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+static void digital_in_recv_sdd_res(struct nfc_digital_dev *ddev, void *arg,
+				    struct sk_buff *resp)
+{
+	struct nfc_target *target = arg;
+	struct digital_sdd_res *sdd_res;
+	int rc;
+	u8 offset, size;
+	u8 i, bcc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	if (resp->len < DIGITAL_SDD_RES_LEN) {
+		PROTOCOL_ERR("4.7.2.8");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	sdd_res = (struct digital_sdd_res *)resp->data;
+
+	for (i = 0, bcc = 0; i < 4; i++)
+		bcc ^= sdd_res->nfcid1[i];
+
+	if (bcc != sdd_res->bcc) {
+		PROTOCOL_ERR("4.7.2.6");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (sdd_res->nfcid1[0] == DIGITAL_SDD_RES_CT) {
+		offset = 1;
+		size = 3;
+	} else {
+		offset = 0;
+		size = 4;
+	}
+
+	memcpy(target->nfcid1 + target->nfcid1_len, sdd_res->nfcid1 + offset,
+	       size);
+	target->nfcid1_len += size;
+
+	rc = digital_in_send_sel_req(ddev, target, sdd_res);
+
+exit:
+	dev_kfree_skb(resp);
+
+	if (rc) {
+		kfree(target);
+		digital_poll_next_tech(ddev);
+	}
+}
+
+static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
+				   struct nfc_target *target)
+{
+	int rc;
+	struct sk_buff *skb;
+	u8 sel_cmd;
+
+	rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+				     NFC_DIGITAL_FRAMING_NFCA_STANDARD);
+	if (rc)
+		return rc;
+
+	skb = digital_skb_alloc(ddev, 2);
+	if (!skb)
+		return -ENOMEM;
+
+	if (target->nfcid1_len == 0)
+		sel_cmd = DIGITAL_CMD_SEL_REQ_CL1;
+	else if (target->nfcid1_len == 3)
+		sel_cmd = DIGITAL_CMD_SEL_REQ_CL2;
+	else
+		sel_cmd = DIGITAL_CMD_SEL_REQ_CL3;
+
+	*skb_put(skb, sizeof(u8)) = sel_cmd;
+	*skb_put(skb, sizeof(u8)) = DIGITAL_SDD_REQ_SEL_PAR;
+
+	return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
+				   target);
+}
+
+static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg,
+				     struct sk_buff *resp)
+{
+	struct nfc_target *target = NULL;
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	if (resp->len < sizeof(u16)) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+	if (!target) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	target->sens_res = __le16_to_cpu(*(__le16 *)resp->data);
+
+	if (!DIGITAL_SENS_RES_IS_VALID(target->sens_res)) {
+		PROTOCOL_ERR("4.6.3.3");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (DIGITAL_SENS_RES_IS_T1T(target->sens_res))
+		rc = digital_target_found(ddev, target, NFC_PROTO_JEWEL);
+	else
+		rc = digital_in_send_sdd_req(ddev, target);
+
+exit:
+	dev_kfree_skb(resp);
+
+	if (rc) {
+		kfree(target);
+		digital_poll_next_tech(ddev);
+	}
+}
+
+int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+	struct sk_buff *skb;
+	int rc;
+
+	rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+				     NFC_DIGITAL_RF_TECH_106A);
+	if (rc)
+		return rc;
+
+	rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+				     NFC_DIGITAL_FRAMING_NFCA_SHORT);
+	if (rc)
+		return rc;
+
+	skb = digital_skb_alloc(ddev, 1);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, sizeof(u8)) = DIGITAL_CMD_SENS_REQ;
+
+	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sens_res, NULL);
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+int digital_in_recv_mifare_res(struct sk_buff *resp)
+{
+	/* Successful READ command response is 16 data bytes + 2 CRC bytes long.
+	 * Since the driver can't differentiate a ACK/NACK response from a valid
+	 * READ response, the CRC calculation must be handled at digital level
+	 * even if the driver supports it for this technology.
+	 */
+	if (resp->len == DIGITAL_MIFARE_READ_RES_LEN + DIGITAL_CRC_LEN) {
+		if (digital_skb_check_crc_a(resp)) {
+			PROTOCOL_ERR("9.4.1.2");
+			return -EIO;
+		}
+
+		return 0;
+	}
+
+	/* ACK response (i.e. successful WRITE). */
+	if (resp->len == 1 && resp->data[0] == DIGITAL_MIFARE_ACK_RES) {
+		resp->data[0] = 0;
+		return 0;
+	}
+
+	/* NACK and any other responses are treated as error. */
+	return -EIO;
+}
+
+static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg,
+				   struct sk_buff *resp)
+{
+	int rc;
+	u8 proto;
+	struct nfc_target target;
+	struct digital_sensf_res *sensf_res;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	if (resp->len < DIGITAL_SENSF_RES_MIN_LENGTH) {
+		rc = -EIO;
+		goto exit;
+	}
+
+	if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+		rc = digital_skb_check_crc_f(resp);
+		if (rc) {
+			PROTOCOL_ERR("6.4.1.8");
+			goto exit;
+		}
+	}
+
+	skb_pull(resp, 1);
+
+	memset(&target, 0, sizeof(struct nfc_target));
+
+	sensf_res = (struct digital_sensf_res *)resp->data;
+
+	memcpy(target.sensf_res, sensf_res, resp->len);
+	target.sensf_res_len = resp->len;
+
+	memcpy(target.nfcid2, sensf_res->nfcid2, NFC_NFCID2_MAXSIZE);
+	target.nfcid2_len = NFC_NFCID2_MAXSIZE;
+
+	if (target.nfcid2[0] == DIGITAL_SENSF_NFCID2_NFC_DEP_B1 &&
+	    target.nfcid2[1] == DIGITAL_SENSF_NFCID2_NFC_DEP_B2)
+		proto = NFC_PROTO_NFC_DEP;
+	else
+		proto = NFC_PROTO_FELICA;
+
+	rc = digital_target_found(ddev, &target, proto);
+
+exit:
+	dev_kfree_skb(resp);
+
+	if (rc)
+		digital_poll_next_tech(ddev);
+}
+
+int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+	struct digital_sensf_req *sensf_req;
+	struct sk_buff *skb;
+	int rc;
+	u8 size;
+
+	rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+	if (rc)
+		return rc;
+
+	rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+				     NFC_DIGITAL_FRAMING_NFCF);
+	if (rc)
+		return rc;
+
+	size = sizeof(struct digital_sensf_req);
+
+	skb = digital_skb_alloc(ddev, size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, size);
+
+	sensf_req = (struct digital_sensf_req *)skb->data;
+	sensf_req->cmd = DIGITAL_CMD_SENSF_REQ;
+	sensf_req->sc1 = 0xFF;
+	sensf_req->sc2 = 0xFF;
+	sensf_req->rc = 0;
+	sensf_req->tsn = 0;
+
+	*skb_push(skb, 1) = size + 1;
+
+	if (!DIGITAL_DRV_CAPS_IN_CRC(ddev))
+		digital_skb_add_crc_f(skb);
+
+	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensf_res,
+				 NULL);
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+static int digital_tg_send_sel_res(struct nfc_digital_dev *ddev)
+{
+	struct sk_buff *skb;
+	int rc;
+
+	skb = digital_skb_alloc(ddev, 1);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = DIGITAL_SEL_RES_NFC_DEP;
+
+	if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
+		digital_skb_add_crc_a(skb);
+
+	rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_req,
+				 NULL);
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+static void digital_tg_recv_sel_req(struct nfc_digital_dev *ddev, void *arg,
+				    struct sk_buff *resp)
+{
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+		rc = digital_skb_check_crc_a(resp);
+		if (rc) {
+			PROTOCOL_ERR("4.4.1.3");
+			goto exit;
+		}
+	}
+
+	/* Silently ignore SEL_REQ content and send a SEL_RES for NFC-DEP */
+
+	rc = digital_tg_send_sel_res(ddev);
+
+exit:
+	if (rc)
+		digital_poll_next_tech(ddev);
+
+	dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sdd_res(struct nfc_digital_dev *ddev)
+{
+	struct sk_buff *skb;
+	struct digital_sdd_res *sdd_res;
+	int rc, i;
+
+	skb = digital_skb_alloc(ddev, sizeof(struct digital_sdd_res));
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, sizeof(struct digital_sdd_res));
+	sdd_res = (struct digital_sdd_res *)skb->data;
+
+	sdd_res->nfcid1[0] = 0x08;
+	get_random_bytes(sdd_res->nfcid1 + 1, 3);
+
+	sdd_res->bcc = 0;
+	for (i = 0; i < 4; i++)
+		sdd_res->bcc ^= sdd_res->nfcid1[i];
+
+	rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sel_req,
+				 NULL);
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+static void digital_tg_recv_sdd_req(struct nfc_digital_dev *ddev, void *arg,
+				    struct sk_buff *resp)
+{
+	u8 *sdd_req;
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	sdd_req = resp->data;
+
+	if (resp->len < 2 || sdd_req[0] != DIGITAL_CMD_SEL_REQ_CL1 ||
+	    sdd_req[1] != DIGITAL_SDD_REQ_SEL_PAR) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = digital_tg_send_sdd_res(ddev);
+
+exit:
+	if (rc)
+		digital_poll_next_tech(ddev);
+
+	dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sens_res(struct nfc_digital_dev *ddev)
+{
+	struct sk_buff *skb;
+	u8 *sens_res;
+	int rc;
+
+	skb = digital_skb_alloc(ddev, 2);
+	if (!skb)
+		return -ENOMEM;
+
+	sens_res = skb_put(skb, 2);
+
+	sens_res[0] = (DIGITAL_SENS_RES_NFC_DEP >> 8) & 0xFF;
+	sens_res[1] = DIGITAL_SENS_RES_NFC_DEP & 0xFF;
+
+	rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sdd_req,
+				 NULL);
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+void digital_tg_recv_sens_req(struct nfc_digital_dev *ddev, void *arg,
+			      struct sk_buff *resp)
+{
+	u8 sens_req;
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	sens_req = resp->data[0];
+
+	if (!resp->len || (sens_req != DIGITAL_CMD_SENS_REQ &&
+	    sens_req != DIGITAL_CMD_ALL_REQ)) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = digital_tg_send_sens_res(ddev);
+
+exit:
+	if (rc)
+		digital_poll_next_tech(ddev);
+
+	dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
+			      struct digital_sensf_req *sensf_req)
+{
+	struct sk_buff *skb;
+	u8 size;
+	int rc;
+	struct digital_sensf_res *sensf_res;
+
+	size = sizeof(struct digital_sensf_res);
+
+	if (sensf_req->rc != DIGITAL_SENSF_REQ_RC_NONE)
+		size -= sizeof(sensf_res->rd);
+
+	skb = digital_skb_alloc(ddev, size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, size);
+
+	sensf_res = (struct digital_sensf_res *)skb->data;
+
+	memset(sensf_res, 0, size);
+
+	sensf_res->cmd = DIGITAL_CMD_SENSF_RES;
+	sensf_res->nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+	sensf_res->nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+	get_random_bytes(&sensf_res->nfcid2[2], 6);
+
+	switch (sensf_req->rc) {
+	case DIGITAL_SENSF_REQ_RC_SC:
+		sensf_res->rd[0] = sensf_req->sc1;
+		sensf_res->rd[1] = sensf_req->sc2;
+		break;
+	case DIGITAL_SENSF_REQ_RC_AP:
+		sensf_res->rd[0] = DIGITAL_SENSF_RES_RD_AP_B1;
+		sensf_res->rd[1] = DIGITAL_SENSF_RES_RD_AP_B2;
+		break;
+	}
+
+	*skb_push(skb, sizeof(u8)) = size + 1;
+
+	if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
+		digital_skb_add_crc_f(skb);
+
+	rc = digital_tg_send_cmd(ddev, skb, 300,
+				 digital_tg_recv_atr_req, NULL);
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+void digital_tg_recv_sensf_req(struct nfc_digital_dev *ddev, void *arg,
+			       struct sk_buff *resp)
+{
+	struct digital_sensf_req *sensf_req;
+	int rc;
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		resp = NULL;
+		goto exit;
+	}
+
+	if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+		rc = digital_skb_check_crc_f(resp);
+		if (rc) {
+			PROTOCOL_ERR("6.4.1.8");
+			goto exit;
+		}
+	}
+
+	if (resp->len != sizeof(struct digital_sensf_req) + 1) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	skb_pull(resp, 1);
+	sensf_req = (struct digital_sensf_req *)resp->data;
+
+	if (sensf_req->cmd != DIGITAL_CMD_SENSF_REQ) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = digital_tg_send_sensf_res(ddev, sensf_req);
+
+exit:
+	if (rc)
+		digital_poll_next_tech(ddev);
+
+	dev_kfree_skb(resp);
+}
+
+int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+	int rc;
+
+	rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+	if (rc)
+		return rc;
+
+	rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+				     NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+	if (rc)
+		return rc;
+
+	return digital_tg_listen(ddev, 300, digital_tg_recv_sens_req, NULL);
+}
+
+int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+	int rc;
+	u8 *nfcid2;
+
+	rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+	if (rc)
+		return rc;
+
+	rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+				     NFC_DIGITAL_FRAMING_NFCF_NFC_DEP);
+	if (rc)
+		return rc;
+
+	nfcid2 = kzalloc(NFC_NFCID2_MAXSIZE, GFP_KERNEL);
+	if (!nfcid2)
+		return -ENOMEM;
+
+	nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+	nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+	get_random_bytes(nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
+
+	return digital_tg_listen(ddev, 300, digital_tg_recv_sensf_req, nfcid2);
+}
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
index c7cf37b..f1d426f 100644
--- a/net/nfc/nci/spi.c
+++ b/net/nfc/nci/spi.c
@@ -21,11 +21,8 @@
 #include <linux/export.h>
 #include <linux/spi/spi.h>
 #include <linux/crc-ccitt.h>
-#include <linux/nfc.h>
 #include <net/nfc/nci_core.h>
 
-#define NCI_SPI_HDR_LEN			4
-#define NCI_SPI_CRC_LEN			2
 #define NCI_SPI_ACK_SHIFT		6
 #define NCI_SPI_MSB_PAYLOAD_MASK	0x3F
 
@@ -41,54 +38,48 @@
 
 #define CRC_INIT		0xFFFF
 
-static int nci_spi_open(struct nci_dev *nci_dev)
-{
-	struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
-
-	return ndev->ops->open(ndev);
-}
-
-static int nci_spi_close(struct nci_dev *nci_dev)
-{
-	struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
-
-	return ndev->ops->close(ndev);
-}
-
-static int __nci_spi_send(struct nci_spi_dev *ndev, struct sk_buff *skb)
+static int __nci_spi_send(struct nci_spi *nspi, struct sk_buff *skb,
+			  int cs_change)
 {
 	struct spi_message m;
 	struct spi_transfer t;
 
-	t.tx_buf = skb->data;
-	t.len = skb->len;
-	t.cs_change = 0;
-	t.delay_usecs = ndev->xfer_udelay;
+	memset(&t, 0, sizeof(struct spi_transfer));
+	/* a NULL skb means we just want the SPI chip select line to raise */
+	if (skb) {
+		t.tx_buf = skb->data;
+		t.len = skb->len;
+	} else {
+		/* still set tx_buf non NULL to make the driver happy */
+		t.tx_buf = &t;
+		t.len = 0;
+	}
+	t.cs_change = cs_change;
+	t.delay_usecs = nspi->xfer_udelay;
 
 	spi_message_init(&m);
 	spi_message_add_tail(&t, &m);
 
-	return spi_sync(ndev->spi, &m);
+	return spi_sync(nspi->spi, &m);
 }
 
-static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb)
+int nci_spi_send(struct nci_spi *nspi,
+		 struct completion *write_handshake_completion,
+		 struct sk_buff *skb)
 {
-	struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
 	unsigned int payload_len = skb->len;
 	unsigned char *hdr;
 	int ret;
 	long completion_rc;
 
-	ndev->ops->deassert_int(ndev);
-
 	/* add the NCI SPI header to the start of the buffer */
 	hdr = skb_push(skb, NCI_SPI_HDR_LEN);
 	hdr[0] = NCI_SPI_DIRECT_WRITE;
-	hdr[1] = ndev->acknowledge_mode;
+	hdr[1] = nspi->acknowledge_mode;
 	hdr[2] = payload_len >> 8;
 	hdr[3] = payload_len & 0xFF;
 
-	if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+	if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
 		u16 crc;
 
 		crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
@@ -96,123 +87,77 @@
 		*skb_put(skb, 1) = crc & 0xFF;
 	}
 
-	ret = __nci_spi_send(ndev, skb);
+	if (write_handshake_completion)	{
+		/* Trick SPI driver to raise chip select */
+		ret = __nci_spi_send(nspi, NULL, 1);
+		if (ret)
+			goto done;
 
-	kfree_skb(skb);
-	ndev->ops->assert_int(ndev);
+		/* wait for NFC chip hardware handshake to complete */
+		if (wait_for_completion_timeout(write_handshake_completion,
+						msecs_to_jiffies(1000)) == 0) {
+			ret = -ETIME;
+			goto done;
+		}
+	}
 
-	if (ret != 0 || ndev->acknowledge_mode == NCI_SPI_CRC_DISABLED)
+	ret = __nci_spi_send(nspi, skb, 0);
+	if (ret != 0 || nspi->acknowledge_mode == NCI_SPI_CRC_DISABLED)
 		goto done;
 
-	init_completion(&ndev->req_completion);
-	completion_rc =
-		wait_for_completion_interruptible_timeout(&ndev->req_completion,
-							  NCI_SPI_SEND_TIMEOUT);
+	init_completion(&nspi->req_completion);
+	completion_rc =	wait_for_completion_interruptible_timeout(
+							&nspi->req_completion,
+							NCI_SPI_SEND_TIMEOUT);
 
-	if (completion_rc <= 0 || ndev->req_result == ACKNOWLEDGE_NACK)
+	if (completion_rc <= 0 || nspi->req_result == ACKNOWLEDGE_NACK)
 		ret = -EIO;
 
 done:
+	kfree_skb(skb);
+
 	return ret;
 }
-
-static struct nci_ops nci_spi_ops = {
-	.open = nci_spi_open,
-	.close = nci_spi_close,
-	.send = nci_spi_send,
-};
+EXPORT_SYMBOL_GPL(nci_spi_send);
 
 /* ---- Interface to NCI SPI drivers ---- */
 
 /**
- * nci_spi_allocate_device - allocate a new nci spi device
+ * nci_spi_allocate_spi - allocate a new nci spi
  *
  * @spi: SPI device
- * @ops: device operations
- * @supported_protocols: NFC protocols supported by the device
- * @supported_se: NFC Secure Elements supported by the device
- * @acknowledge_mode: Acknowledge mode used by the device
+ * @acknowledge_mode: Acknowledge mode used by the NFC device
  * @delay: delay between transactions in us
+ * @ndev: nci dev to send incoming nci frames to
  */
-struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi,
-						struct nci_spi_ops *ops,
-						u32 supported_protocols,
-						u32 supported_se,
-						u8 acknowledge_mode,
-						unsigned int delay)
+struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
+				     u8 acknowledge_mode, unsigned int delay,
+				     struct nci_dev *ndev)
 {
-	struct nci_spi_dev *ndev;
-	int tailroom = 0;
+	struct nci_spi *nspi;
 
-	if (!ops->open || !ops->close || !ops->assert_int || !ops->deassert_int)
+	nspi = devm_kzalloc(&spi->dev, sizeof(struct nci_spi), GFP_KERNEL);
+	if (!nspi)
 		return NULL;
 
-	if (!supported_protocols)
-		return NULL;
+	nspi->acknowledge_mode = acknowledge_mode;
+	nspi->xfer_udelay = delay;
 
-	ndev = devm_kzalloc(&spi->dev, sizeof(struct nci_dev), GFP_KERNEL);
-	if (!ndev)
-		return NULL;
+	nspi->spi = spi;
+	nspi->ndev = ndev;
 
-	ndev->ops = ops;
-	ndev->acknowledge_mode = acknowledge_mode;
-	ndev->xfer_udelay = delay;
-
-	if (acknowledge_mode == NCI_SPI_CRC_ENABLED)
-		tailroom += NCI_SPI_CRC_LEN;
-
-	ndev->nci_dev = nci_allocate_device(&nci_spi_ops, supported_protocols,
-					    NCI_SPI_HDR_LEN, tailroom);
-	if (!ndev->nci_dev)
-		return NULL;
-
-	nci_set_drvdata(ndev->nci_dev, ndev);
-
-	return ndev;
+	return nspi;
 }
-EXPORT_SYMBOL_GPL(nci_spi_allocate_device);
+EXPORT_SYMBOL_GPL(nci_spi_allocate_spi);
 
-/**
- * nci_spi_free_device - deallocate nci spi device
- *
- * @ndev: The nci spi device to deallocate
- */
-void nci_spi_free_device(struct nci_spi_dev *ndev)
-{
-	nci_free_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_free_device);
-
-/**
- * nci_spi_register_device - register a nci spi device in the nfc subsystem
- *
- * @pdev: The nci spi device to register
- */
-int nci_spi_register_device(struct nci_spi_dev *ndev)
-{
-	return nci_register_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_register_device);
-
-/**
- * nci_spi_unregister_device - unregister a nci spi device in the nfc subsystem
- *
- * @dev: The nci spi device to unregister
- */
-void nci_spi_unregister_device(struct nci_spi_dev *ndev)
-{
-	nci_unregister_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_unregister_device);
-
-static int send_acknowledge(struct nci_spi_dev *ndev, u8 acknowledge)
+static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
 {
 	struct sk_buff *skb;
 	unsigned char *hdr;
 	u16 crc;
 	int ret;
 
-	skb = nci_skb_alloc(ndev->nci_dev, 0, GFP_KERNEL);
+	skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
 
 	/* add the NCI SPI header to the start of the buffer */
 	hdr = skb_push(skb, NCI_SPI_HDR_LEN);
@@ -225,14 +170,14 @@
 	*skb_put(skb, 1) = crc >> 8;
 	*skb_put(skb, 1) = crc & 0xFF;
 
-	ret = __nci_spi_send(ndev, skb);
+	ret = __nci_spi_send(nspi, skb, 0);
 
 	kfree_skb(skb);
 
 	return ret;
 }
 
-static struct sk_buff *__nci_spi_recv_frame(struct nci_spi_dev *ndev)
+static struct sk_buff *__nci_spi_read(struct nci_spi *nspi)
 {
 	struct sk_buff *skb;
 	struct spi_message m;
@@ -242,43 +187,49 @@
 	int ret;
 
 	spi_message_init(&m);
+
+	memset(&tx, 0, sizeof(struct spi_transfer));
 	req[0] = NCI_SPI_DIRECT_READ;
-	req[1] = ndev->acknowledge_mode;
+	req[1] = nspi->acknowledge_mode;
 	tx.tx_buf = req;
 	tx.len = 2;
 	tx.cs_change = 0;
 	spi_message_add_tail(&tx, &m);
+
+	memset(&rx, 0, sizeof(struct spi_transfer));
 	rx.rx_buf = resp_hdr;
 	rx.len = 2;
 	rx.cs_change = 1;
 	spi_message_add_tail(&rx, &m);
-	ret = spi_sync(ndev->spi, &m);
 
+	ret = spi_sync(nspi->spi, &m);
 	if (ret)
 		return NULL;
 
-	if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED)
+	if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED)
 		rx_len = ((resp_hdr[0] & NCI_SPI_MSB_PAYLOAD_MASK) << 8) +
 				resp_hdr[1] + NCI_SPI_CRC_LEN;
 	else
 		rx_len = (resp_hdr[0] << 8) | resp_hdr[1];
 
-	skb = nci_skb_alloc(ndev->nci_dev, rx_len, GFP_KERNEL);
+	skb = nci_skb_alloc(nspi->ndev, rx_len, GFP_KERNEL);
 	if (!skb)
 		return NULL;
 
 	spi_message_init(&m);
+
+	memset(&rx, 0, sizeof(struct spi_transfer));
 	rx.rx_buf = skb_put(skb, rx_len);
 	rx.len = rx_len;
 	rx.cs_change = 0;
-	rx.delay_usecs = ndev->xfer_udelay;
+	rx.delay_usecs = nspi->xfer_udelay;
 	spi_message_add_tail(&rx, &m);
-	ret = spi_sync(ndev->spi, &m);
 
+	ret = spi_sync(nspi->spi, &m);
 	if (ret)
 		goto receive_error;
 
-	if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+	if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
 		*skb_push(skb, 1) = resp_hdr[1];
 		*skb_push(skb, 1) = resp_hdr[0];
 	}
@@ -318,61 +269,53 @@
 }
 
 /**
- * nci_spi_recv_frame - receive frame from NCI SPI drivers
+ * nci_spi_read - read frame from NCI SPI drivers
  *
- * @ndev: The nci spi device
+ * @nspi: The nci spi
  * Context: can sleep
  *
  * This call may only be used from a context that may sleep.  The sleep
  * is non-interruptible, and has no timeout.
  *
- * It returns zero on success, else a negative error code.
+ * It returns an allocated skb containing the frame on success, or NULL.
  */
-int nci_spi_recv_frame(struct nci_spi_dev *ndev)
+struct sk_buff *nci_spi_read(struct nci_spi *nspi)
 {
 	struct sk_buff *skb;
-	int ret = 0;
-
-	ndev->ops->deassert_int(ndev);
 
 	/* Retrieve frame from SPI */
-	skb = __nci_spi_recv_frame(ndev);
-	if (!skb) {
-		ret = -EIO;
+	skb = __nci_spi_read(nspi);
+	if (!skb)
 		goto done;
-	}
 
-	if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+	if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
 		if (!nci_spi_check_crc(skb)) {
-			send_acknowledge(ndev, ACKNOWLEDGE_NACK);
+			send_acknowledge(nspi, ACKNOWLEDGE_NACK);
 			goto done;
 		}
 
 		/* In case of acknowledged mode: if ACK or NACK received,
 		 * unblock completion of latest frame sent.
 		 */
-		ndev->req_result = nci_spi_get_ack(skb);
-		if (ndev->req_result)
-			complete(&ndev->req_completion);
+		nspi->req_result = nci_spi_get_ack(skb);
+		if (nspi->req_result)
+			complete(&nspi->req_completion);
 	}
 
 	/* If there is no payload (ACK/NACK only frame),
 	 * free the socket buffer
 	 */
-	if (skb->len == 0) {
+	if (!skb->len) {
 		kfree_skb(skb);
+		skb = NULL;
 		goto done;
 	}
 
-	if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED)
-		send_acknowledge(ndev, ACKNOWLEDGE_ACK);
-
-	/* Forward skb to NCI core layer */
-	ret = nci_recv_frame(ndev->nci_dev, skb);
+	if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED)
+		send_acknowledge(nspi, ACKNOWLEDGE_ACK);
 
 done:
-	ndev->ops->assert_int(ndev);
 
-	return ret;
+	return skb;
 }
-EXPORT_SYMBOL_GPL(nci_spi_recv_frame);
+EXPORT_SYMBOL_GPL(nci_spi_read);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 68063b2..84b7e3e 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -58,6 +58,7 @@
 	[NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
 	[NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
 				     .len = NFC_FIRMWARE_NAME_MAXSIZE },
+	[NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
@@ -1278,6 +1279,91 @@
 	return 0;
 }
 
+struct se_io_ctx {
+	u32 dev_idx;
+	u32 se_idx;
+};
+
+static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err)
+{
+	struct se_io_ctx *ctx = context;
+	struct sk_buff *msg;
+	void *hdr;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg) {
+		kfree(ctx);
+		return;
+	}
+
+	hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+			  NFC_CMD_SE_IO);
+	if (!hdr)
+		goto free_msg;
+
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, ctx->dev_idx) ||
+	    nla_put_u32(msg, NFC_ATTR_SE_INDEX, ctx->se_idx) ||
+	    nla_put(msg, NFC_ATTR_SE_APDU, apdu_len, apdu))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+
+	genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+	kfree(ctx);
+
+	return;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+free_msg:
+	nlmsg_free(msg);
+	kfree(ctx);
+
+	return;
+}
+
+static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nfc_dev *dev;
+	struct se_io_ctx *ctx;
+	u32 dev_idx, se_idx;
+	u8 *apdu;
+	size_t apdu_len;
+
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+	    !info->attrs[NFC_ATTR_SE_INDEX] ||
+	    !info->attrs[NFC_ATTR_SE_APDU])
+		return -EINVAL;
+
+	dev_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+	se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]);
+
+	dev = nfc_get_device(dev_idx);
+	if (!dev)
+		return -ENODEV;
+
+	if (!dev->ops || !dev->ops->se_io)
+		return -ENOTSUPP;
+
+	apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]);
+	if (apdu_len == 0)
+		return -EINVAL;
+
+	apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
+	if (!apdu)
+		return -EINVAL;
+
+	ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->dev_idx = dev_idx;
+	ctx->se_idx = se_idx;
+
+	return dev->ops->se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+}
+
 static struct genl_ops nfc_genl_ops[] = {
 	{
 		.cmd = NFC_CMD_GET_DEVICE,
@@ -1358,6 +1444,11 @@
 		.done = nfc_genl_dump_ses_done,
 		.policy = nfc_genl_policy,
 	},
+	{
+		.cmd = NFC_CMD_SE_IO,
+		.doit = nfc_genl_se_io,
+		.policy = nfc_genl_policy,
+	},
 };
 
 
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 313bf1b..cd958b3 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -142,11 +142,11 @@
 
 	err = rawsock_add_header(skb);
 	if (err)
-		goto error;
+		goto error_skb;
 
 	err = sock_queue_rcv_skb(sk, skb);
 	if (err)
-		goto error;
+		goto error_skb;
 
 	spin_lock_bh(&sk->sk_write_queue.lock);
 	if (!skb_queue_empty(&sk->sk_write_queue))
@@ -158,6 +158,9 @@
 	sock_put(sk);
 	return;
 
+error_skb:
+	kfree_skb(skb);
+
 error:
 	rawsock_report_error(sk, err);
 	sock_put(sk);
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index ea36e99..3591cb5 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -9,6 +9,8 @@
 	datapath.o \
 	dp_notify.o \
 	flow.o \
+	flow_netlink.o \
+	flow_table.o \
 	vport.o \
 	vport-internal_dev.o \
 	vport-netdev.o
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2aa13bd..449e077 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,14 +55,10 @@
 
 #include "datapath.h"
 #include "flow.h"
+#include "flow_netlink.h"
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
 
-
-#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
-static void rehash_flow_table(struct work_struct *work);
-static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
-
 int ovs_net_id __read_mostly;
 
 static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
@@ -165,7 +161,7 @@
 {
 	struct datapath *dp = container_of(rcu, struct datapath, rcu);
 
-	ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false);
+	ovs_flow_tbl_destroy(&dp->table);
 	free_percpu(dp->stats_percpu);
 	release_net(ovs_dp_get_net(dp));
 	kfree(dp->ports);
@@ -225,6 +221,7 @@
 	struct dp_stats_percpu *stats;
 	struct sw_flow_key key;
 	u64 *stats_counter;
+	u32 n_mask_hit;
 	int error;
 
 	stats = this_cpu_ptr(dp->stats_percpu);
@@ -237,7 +234,7 @@
 	}
 
 	/* Look up flow. */
-	flow = ovs_flow_lookup(rcu_dereference(dp->table), &key);
+	flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit);
 	if (unlikely(!flow)) {
 		struct dp_upcall_info upcall;
 
@@ -262,6 +259,7 @@
 	/* Update datapath statistics. */
 	u64_stats_update_begin(&stats->sync);
 	(*stats_counter)++;
+	stats->n_mask_hit += n_mask_hit;
 	u64_stats_update_end(&stats->sync);
 }
 
@@ -435,7 +433,7 @@
 	upcall->dp_ifindex = dp_ifindex;
 
 	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
-	ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb);
+	ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
 	nla_nest_end(user_skb, nla);
 
 	if (upcall_info->userdata)
@@ -455,398 +453,6 @@
 	return err;
 }
 
-/* Called with ovs_mutex. */
-static int flush_flows(struct datapath *dp)
-{
-	struct flow_table *old_table;
-	struct flow_table *new_table;
-
-	old_table = ovsl_dereference(dp->table);
-	new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
-	if (!new_table)
-		return -ENOMEM;
-
-	rcu_assign_pointer(dp->table, new_table);
-
-	ovs_flow_tbl_destroy(old_table, true);
-	return 0;
-}
-
-static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
-{
-
-	struct sw_flow_actions *acts;
-	int new_acts_size;
-	int req_size = NLA_ALIGN(attr_len);
-	int next_offset = offsetof(struct sw_flow_actions, actions) +
-					(*sfa)->actions_len;
-
-	if (req_size <= (ksize(*sfa) - next_offset))
-		goto out;
-
-	new_acts_size = ksize(*sfa) * 2;
-
-	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
-		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
-			return ERR_PTR(-EMSGSIZE);
-		new_acts_size = MAX_ACTIONS_BUFSIZE;
-	}
-
-	acts = ovs_flow_actions_alloc(new_acts_size);
-	if (IS_ERR(acts))
-		return (void *)acts;
-
-	memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
-	acts->actions_len = (*sfa)->actions_len;
-	kfree(*sfa);
-	*sfa = acts;
-
-out:
-	(*sfa)->actions_len += req_size;
-	return  (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
-}
-
-static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
-{
-	struct nlattr *a;
-
-	a = reserve_sfa_size(sfa, nla_attr_size(len));
-	if (IS_ERR(a))
-		return PTR_ERR(a);
-
-	a->nla_type = attrtype;
-	a->nla_len = nla_attr_size(len);
-
-	if (data)
-		memcpy(nla_data(a), data, len);
-	memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
-
-	return 0;
-}
-
-static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
-{
-	int used = (*sfa)->actions_len;
-	int err;
-
-	err = add_action(sfa, attrtype, NULL, 0);
-	if (err)
-		return err;
-
-	return used;
-}
-
-static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
-{
-	struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
-
-	a->nla_len = sfa->actions_len - st_offset;
-}
-
-static int validate_and_copy_actions(const struct nlattr *attr,
-				     const struct sw_flow_key *key, int depth,
-				     struct sw_flow_actions **sfa);
-
-static int validate_and_copy_sample(const struct nlattr *attr,
-				    const struct sw_flow_key *key, int depth,
-				    struct sw_flow_actions **sfa)
-{
-	const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
-	const struct nlattr *probability, *actions;
-	const struct nlattr *a;
-	int rem, start, err, st_acts;
-
-	memset(attrs, 0, sizeof(attrs));
-	nla_for_each_nested(a, attr, rem) {
-		int type = nla_type(a);
-		if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
-			return -EINVAL;
-		attrs[type] = a;
-	}
-	if (rem)
-		return -EINVAL;
-
-	probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
-	if (!probability || nla_len(probability) != sizeof(u32))
-		return -EINVAL;
-
-	actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
-	if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
-		return -EINVAL;
-
-	/* validation done, copy sample action. */
-	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
-	if (start < 0)
-		return start;
-	err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
-	if (err)
-		return err;
-	st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
-	if (st_acts < 0)
-		return st_acts;
-
-	err = validate_and_copy_actions(actions, key, depth + 1, sfa);
-	if (err)
-		return err;
-
-	add_nested_action_end(*sfa, st_acts);
-	add_nested_action_end(*sfa, start);
-
-	return 0;
-}
-
-static int validate_tp_port(const struct sw_flow_key *flow_key)
-{
-	if (flow_key->eth.type == htons(ETH_P_IP)) {
-		if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
-			return 0;
-	} else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
-		if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
-			return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int validate_and_copy_set_tun(const struct nlattr *attr,
-				     struct sw_flow_actions **sfa)
-{
-	struct sw_flow_match match;
-	struct sw_flow_key key;
-	int err, start;
-
-	ovs_match_init(&match, &key, NULL);
-	err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false);
-	if (err)
-		return err;
-
-	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
-	if (start < 0)
-		return start;
-
-	err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
-			sizeof(match.key->tun_key));
-	add_nested_action_end(*sfa, start);
-
-	return err;
-}
-
-static int validate_set(const struct nlattr *a,
-			const struct sw_flow_key *flow_key,
-			struct sw_flow_actions **sfa,
-			bool *set_tun)
-{
-	const struct nlattr *ovs_key = nla_data(a);
-	int key_type = nla_type(ovs_key);
-
-	/* There can be only one key in a action */
-	if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
-		return -EINVAL;
-
-	if (key_type > OVS_KEY_ATTR_MAX ||
-	   (ovs_key_lens[key_type] != nla_len(ovs_key) &&
-	    ovs_key_lens[key_type] != -1))
-		return -EINVAL;
-
-	switch (key_type) {
-	const struct ovs_key_ipv4 *ipv4_key;
-	const struct ovs_key_ipv6 *ipv6_key;
-	int err;
-
-	case OVS_KEY_ATTR_PRIORITY:
-	case OVS_KEY_ATTR_SKB_MARK:
-	case OVS_KEY_ATTR_ETHERNET:
-		break;
-
-	case OVS_KEY_ATTR_TUNNEL:
-		*set_tun = true;
-		err = validate_and_copy_set_tun(a, sfa);
-		if (err)
-			return err;
-		break;
-
-	case OVS_KEY_ATTR_IPV4:
-		if (flow_key->eth.type != htons(ETH_P_IP))
-			return -EINVAL;
-
-		if (!flow_key->ip.proto)
-			return -EINVAL;
-
-		ipv4_key = nla_data(ovs_key);
-		if (ipv4_key->ipv4_proto != flow_key->ip.proto)
-			return -EINVAL;
-
-		if (ipv4_key->ipv4_frag != flow_key->ip.frag)
-			return -EINVAL;
-
-		break;
-
-	case OVS_KEY_ATTR_IPV6:
-		if (flow_key->eth.type != htons(ETH_P_IPV6))
-			return -EINVAL;
-
-		if (!flow_key->ip.proto)
-			return -EINVAL;
-
-		ipv6_key = nla_data(ovs_key);
-		if (ipv6_key->ipv6_proto != flow_key->ip.proto)
-			return -EINVAL;
-
-		if (ipv6_key->ipv6_frag != flow_key->ip.frag)
-			return -EINVAL;
-
-		if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
-			return -EINVAL;
-
-		break;
-
-	case OVS_KEY_ATTR_TCP:
-		if (flow_key->ip.proto != IPPROTO_TCP)
-			return -EINVAL;
-
-		return validate_tp_port(flow_key);
-
-	case OVS_KEY_ATTR_UDP:
-		if (flow_key->ip.proto != IPPROTO_UDP)
-			return -EINVAL;
-
-		return validate_tp_port(flow_key);
-
-	case OVS_KEY_ATTR_SCTP:
-		if (flow_key->ip.proto != IPPROTO_SCTP)
-			return -EINVAL;
-
-		return validate_tp_port(flow_key);
-
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int validate_userspace(const struct nlattr *attr)
-{
-	static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =	{
-		[OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
-		[OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
-	};
-	struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
-	int error;
-
-	error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
-				 attr, userspace_policy);
-	if (error)
-		return error;
-
-	if (!a[OVS_USERSPACE_ATTR_PID] ||
-	    !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
-		return -EINVAL;
-
-	return 0;
-}
-
-static int copy_action(const struct nlattr *from,
-		       struct sw_flow_actions **sfa)
-{
-	int totlen = NLA_ALIGN(from->nla_len);
-	struct nlattr *to;
-
-	to = reserve_sfa_size(sfa, from->nla_len);
-	if (IS_ERR(to))
-		return PTR_ERR(to);
-
-	memcpy(to, from, totlen);
-	return 0;
-}
-
-static int validate_and_copy_actions(const struct nlattr *attr,
-				     const struct sw_flow_key *key,
-				     int depth,
-				     struct sw_flow_actions **sfa)
-{
-	const struct nlattr *a;
-	int rem, err;
-
-	if (depth >= SAMPLE_ACTION_DEPTH)
-		return -EOVERFLOW;
-
-	nla_for_each_nested(a, attr, rem) {
-		/* Expected argument lengths, (u32)-1 for variable length. */
-		static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
-			[OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
-			[OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
-			[OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
-			[OVS_ACTION_ATTR_POP_VLAN] = 0,
-			[OVS_ACTION_ATTR_SET] = (u32)-1,
-			[OVS_ACTION_ATTR_SAMPLE] = (u32)-1
-		};
-		const struct ovs_action_push_vlan *vlan;
-		int type = nla_type(a);
-		bool skip_copy;
-
-		if (type > OVS_ACTION_ATTR_MAX ||
-		    (action_lens[type] != nla_len(a) &&
-		     action_lens[type] != (u32)-1))
-			return -EINVAL;
-
-		skip_copy = false;
-		switch (type) {
-		case OVS_ACTION_ATTR_UNSPEC:
-			return -EINVAL;
-
-		case OVS_ACTION_ATTR_USERSPACE:
-			err = validate_userspace(a);
-			if (err)
-				return err;
-			break;
-
-		case OVS_ACTION_ATTR_OUTPUT:
-			if (nla_get_u32(a) >= DP_MAX_PORTS)
-				return -EINVAL;
-			break;
-
-
-		case OVS_ACTION_ATTR_POP_VLAN:
-			break;
-
-		case OVS_ACTION_ATTR_PUSH_VLAN:
-			vlan = nla_data(a);
-			if (vlan->vlan_tpid != htons(ETH_P_8021Q))
-				return -EINVAL;
-			if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
-				return -EINVAL;
-			break;
-
-		case OVS_ACTION_ATTR_SET:
-			err = validate_set(a, key, sfa, &skip_copy);
-			if (err)
-				return err;
-			break;
-
-		case OVS_ACTION_ATTR_SAMPLE:
-			err = validate_and_copy_sample(a, key, depth, sfa);
-			if (err)
-				return err;
-			skip_copy = true;
-			break;
-
-		default:
-			return -EINVAL;
-		}
-		if (!skip_copy) {
-			err = copy_action(a, sfa);
-			if (err)
-				return err;
-		}
-	}
-
-	if (rem > 0)
-		return -EINVAL;
-
-	return 0;
-}
-
 static void clear_stats(struct sw_flow *flow)
 {
 	flow->used = 0;
@@ -902,15 +508,16 @@
 	if (err)
 		goto err_flow_free;
 
-	err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]);
+	err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
 	if (err)
 		goto err_flow_free;
-	acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
+	acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
 	err = PTR_ERR(acts);
 	if (IS_ERR(acts))
 		goto err_flow_free;
 
-	err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
+	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
+				   &flow->key, 0, &acts);
 	rcu_assign_pointer(flow->sf_acts, acts);
 	if (err)
 		goto err_flow_free;
@@ -958,15 +565,18 @@
 	}
 };
 
-static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
+static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
+			 struct ovs_dp_megaflow_stats *mega_stats)
 {
-	struct flow_table *table;
 	int i;
 
-	table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held());
-	stats->n_flows = ovs_flow_tbl_count(table);
+	memset(mega_stats, 0, sizeof(*mega_stats));
+
+	stats->n_flows = ovs_flow_tbl_count(&dp->table);
+	mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
 
 	stats->n_hit = stats->n_missed = stats->n_lost = 0;
+
 	for_each_possible_cpu(i) {
 		const struct dp_stats_percpu *percpu_stats;
 		struct dp_stats_percpu local_stats;
@@ -982,6 +592,7 @@
 		stats->n_hit += local_stats.n_hit;
 		stats->n_missed += local_stats.n_missed;
 		stats->n_lost += local_stats.n_lost;
+		mega_stats->n_mask_hit += local_stats.n_mask_hit;
 	}
 }
 
@@ -1005,100 +616,6 @@
 	.name = OVS_FLOW_MCGROUP
 };
 
-static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
-static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
-{
-	const struct nlattr *a;
-	struct nlattr *start;
-	int err = 0, rem;
-
-	start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
-	if (!start)
-		return -EMSGSIZE;
-
-	nla_for_each_nested(a, attr, rem) {
-		int type = nla_type(a);
-		struct nlattr *st_sample;
-
-		switch (type) {
-		case OVS_SAMPLE_ATTR_PROBABILITY:
-			if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
-				return -EMSGSIZE;
-			break;
-		case OVS_SAMPLE_ATTR_ACTIONS:
-			st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
-			if (!st_sample)
-				return -EMSGSIZE;
-			err = actions_to_attr(nla_data(a), nla_len(a), skb);
-			if (err)
-				return err;
-			nla_nest_end(skb, st_sample);
-			break;
-		}
-	}
-
-	nla_nest_end(skb, start);
-	return err;
-}
-
-static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
-{
-	const struct nlattr *ovs_key = nla_data(a);
-	int key_type = nla_type(ovs_key);
-	struct nlattr *start;
-	int err;
-
-	switch (key_type) {
-	case OVS_KEY_ATTR_IPV4_TUNNEL:
-		start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
-		if (!start)
-			return -EMSGSIZE;
-
-		err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
-					     nla_data(ovs_key));
-		if (err)
-			return err;
-		nla_nest_end(skb, start);
-		break;
-	default:
-		if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
-			return -EMSGSIZE;
-		break;
-	}
-
-	return 0;
-}
-
-static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
-{
-	const struct nlattr *a;
-	int rem, err;
-
-	nla_for_each_attr(a, attr, len, rem) {
-		int type = nla_type(a);
-
-		switch (type) {
-		case OVS_ACTION_ATTR_SET:
-			err = set_action_to_attr(a, skb);
-			if (err)
-				return err;
-			break;
-
-		case OVS_ACTION_ATTR_SAMPLE:
-			err = sample_action_to_attr(a, skb);
-			if (err)
-				return err;
-			break;
-		default:
-			if (nla_put(skb, type, nla_len(a), nla_data(a)))
-				return -EMSGSIZE;
-			break;
-		}
-	}
-
-	return 0;
-}
-
 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
 {
 	return NLMSG_ALIGN(sizeof(struct ovs_header))
@@ -1135,8 +652,7 @@
 	if (!nla)
 		goto nla_put_failure;
 
-	err = ovs_flow_to_nlattrs(&flow->unmasked_key,
-			&flow->unmasked_key, skb);
+	err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
 	if (err)
 		goto error;
 	nla_nest_end(skb, nla);
@@ -1145,7 +661,7 @@
 	if (!nla)
 		goto nla_put_failure;
 
-	err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb);
+	err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
 	if (err)
 		goto error;
 
@@ -1155,7 +671,7 @@
 	used = flow->used;
 	stats.n_packets = flow->packet_count;
 	stats.n_bytes = flow->byte_count;
-	tcp_flags = flow->tcp_flags;
+	tcp_flags = (u8)ntohs(flow->tcp_flags);
 	spin_unlock_bh(&flow->lock);
 
 	if (used &&
@@ -1188,7 +704,8 @@
 		sf_acts = rcu_dereference_check(flow->sf_acts,
 						lockdep_ovsl_is_held());
 
-		err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
+		err = ovs_nla_put_actions(sf_acts->actions,
+					  sf_acts->actions_len, skb);
 		if (!err)
 			nla_nest_end(skb, start);
 		else {
@@ -1234,6 +751,14 @@
 	return skb;
 }
 
+static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl,
+					      const struct sw_flow_key *key)
+{
+	u32 __always_unused n_mask_hit;
+
+	return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit);
+}
+
 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
 {
 	struct nlattr **a = info->attrs;
@@ -1243,7 +768,6 @@
 	struct sw_flow_mask mask;
 	struct sk_buff *reply;
 	struct datapath *dp;
-	struct flow_table *table;
 	struct sw_flow_actions *acts = NULL;
 	struct sw_flow_match match;
 	int error;
@@ -1254,21 +778,21 @@
 		goto error;
 
 	ovs_match_init(&match, &key, &mask);
-	error = ovs_match_from_nlattrs(&match,
-			a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
+	error = ovs_nla_get_match(&match,
+				  a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
 	if (error)
 		goto error;
 
 	/* Validate actions. */
 	if (a[OVS_FLOW_ATTR_ACTIONS]) {
-		acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
+		acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
 		error = PTR_ERR(acts);
 		if (IS_ERR(acts))
 			goto error;
 
-		ovs_flow_key_mask(&masked_key, &key, &mask);
-		error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
-						  &masked_key, 0, &acts);
+		ovs_flow_mask_key(&masked_key, &key, &mask);
+		error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
+					     &masked_key, 0, &acts);
 		if (error) {
 			OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
 			goto err_kfree;
@@ -1284,29 +808,14 @@
 	if (!dp)
 		goto err_unlock_ovs;
 
-	table = ovsl_dereference(dp->table);
-
 	/* Check if this is a duplicate flow */
-	flow = ovs_flow_lookup(table, &key);
+	flow = __ovs_flow_tbl_lookup(&dp->table, &key);
 	if (!flow) {
-		struct sw_flow_mask *mask_p;
 		/* Bail out if we're not allowed to create a new flow. */
 		error = -ENOENT;
 		if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
 			goto err_unlock_ovs;
 
-		/* Expand table, if necessary, to make room. */
-		if (ovs_flow_tbl_need_to_expand(table)) {
-			struct flow_table *new_table;
-
-			new_table = ovs_flow_tbl_expand(table);
-			if (!IS_ERR(new_table)) {
-				rcu_assign_pointer(dp->table, new_table);
-				ovs_flow_tbl_destroy(table, true);
-				table = ovsl_dereference(dp->table);
-			}
-		}
-
 		/* Allocate flow. */
 		flow = ovs_flow_alloc();
 		if (IS_ERR(flow)) {
@@ -1317,25 +826,14 @@
 
 		flow->key = masked_key;
 		flow->unmasked_key = key;
-
-		/* Make sure mask is unique in the system */
-		mask_p = ovs_sw_flow_mask_find(table, &mask);
-		if (!mask_p) {
-			/* Allocate a new mask if none exsits. */
-			mask_p = ovs_sw_flow_mask_alloc();
-			if (!mask_p)
-				goto err_flow_free;
-			mask_p->key = mask.key;
-			mask_p->range = mask.range;
-			ovs_sw_flow_mask_insert(table, mask_p);
-		}
-
-		ovs_sw_flow_mask_add_ref(mask_p);
-		flow->mask = mask_p;
 		rcu_assign_pointer(flow->sf_acts, acts);
 
 		/* Put flow in bucket. */
-		ovs_flow_insert(table, flow);
+		error = ovs_flow_tbl_insert(&dp->table, flow, &mask);
+		if (error) {
+			acts = NULL;
+			goto err_flow_free;
+		}
 
 		reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
 						info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -1356,7 +854,7 @@
 
 		/* The unmasked key has to be the same for flow updates. */
 		error = -EINVAL;
-		if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) {
+		if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
 			OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
 			goto err_unlock_ovs;
 		}
@@ -1364,7 +862,7 @@
 		/* Update actions. */
 		old_acts = ovsl_dereference(flow->sf_acts);
 		rcu_assign_pointer(flow->sf_acts, acts);
-		ovs_flow_deferred_free_acts(old_acts);
+		ovs_nla_free_flow_actions(old_acts);
 
 		reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
 					       info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -1403,7 +901,6 @@
 	struct sk_buff *reply;
 	struct sw_flow *flow;
 	struct datapath *dp;
-	struct flow_table *table;
 	struct sw_flow_match match;
 	int err;
 
@@ -1413,7 +910,7 @@
 	}
 
 	ovs_match_init(&match, &key, NULL);
-	err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+	err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
 	if (err)
 		return err;
 
@@ -1424,9 +921,8 @@
 		goto unlock;
 	}
 
-	table = ovsl_dereference(dp->table);
-	flow = ovs_flow_lookup_unmasked_key(table, &match);
-	if (!flow) {
+	flow = __ovs_flow_tbl_lookup(&dp->table, &key);
+	if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
 		err = -ENOENT;
 		goto unlock;
 	}
@@ -1453,7 +949,6 @@
 	struct sk_buff *reply;
 	struct sw_flow *flow;
 	struct datapath *dp;
-	struct flow_table *table;
 	struct sw_flow_match match;
 	int err;
 
@@ -1465,18 +960,17 @@
 	}
 
 	if (!a[OVS_FLOW_ATTR_KEY]) {
-		err = flush_flows(dp);
+		err = ovs_flow_tbl_flush(&dp->table);
 		goto unlock;
 	}
 
 	ovs_match_init(&match, &key, NULL);
-	err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+	err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
 	if (err)
 		goto unlock;
 
-	table = ovsl_dereference(dp->table);
-	flow = ovs_flow_lookup_unmasked_key(table, &match);
-	if (!flow) {
+	flow = __ovs_flow_tbl_lookup(&dp->table, &key);
+	if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
 		err = -ENOENT;
 		goto unlock;
 	}
@@ -1487,7 +981,7 @@
 		goto unlock;
 	}
 
-	ovs_flow_remove(table, flow);
+	ovs_flow_tbl_remove(&dp->table, flow);
 
 	err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
 				     info->snd_seq, 0, OVS_FLOW_CMD_DEL);
@@ -1506,8 +1000,8 @@
 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+	struct table_instance *ti;
 	struct datapath *dp;
-	struct flow_table *table;
 
 	rcu_read_lock();
 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1516,14 +1010,14 @@
 		return -ENODEV;
 	}
 
-	table = rcu_dereference(dp->table);
+	ti = rcu_dereference(dp->table.ti);
 	for (;;) {
 		struct sw_flow *flow;
 		u32 bucket, obj;
 
 		bucket = cb->args[0];
 		obj = cb->args[1];
-		flow = ovs_flow_dump_next(table, &bucket, &obj);
+		flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
 		if (!flow)
 			break;
 
@@ -1589,6 +1083,7 @@
 
 	msgsize += nla_total_size(IFNAMSIZ);
 	msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
+	msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
 
 	return msgsize;
 }
@@ -1598,6 +1093,7 @@
 {
 	struct ovs_header *ovs_header;
 	struct ovs_dp_stats dp_stats;
+	struct ovs_dp_megaflow_stats dp_megaflow_stats;
 	int err;
 
 	ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
@@ -1613,8 +1109,14 @@
 	if (err)
 		goto nla_put_failure;
 
-	get_dp_stats(dp, &dp_stats);
-	if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
+	get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
+	if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
+			&dp_stats))
+		goto nla_put_failure;
+
+	if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
+			sizeof(struct ovs_dp_megaflow_stats),
+			&dp_megaflow_stats))
 		goto nla_put_failure;
 
 	return genlmsg_end(skb, ovs_header);
@@ -1687,9 +1189,8 @@
 	ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
 
 	/* Allocate table. */
-	err = -ENOMEM;
-	rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
-	if (!dp->table)
+	err = ovs_flow_tbl_init(&dp->table);
+	if (err)
 		goto err_free_dp;
 
 	dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
@@ -1698,8 +1199,14 @@
 		goto err_destroy_table;
 	}
 
+	for_each_possible_cpu(i) {
+		struct dp_stats_percpu *dpath_stats;
+		dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
+		u64_stats_init(&dpath_stats->sync);
+	}
+
 	dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
-			GFP_KERNEL);
+			    GFP_KERNEL);
 	if (!dp->ports) {
 		err = -ENOMEM;
 		goto err_destroy_percpu;
@@ -1746,7 +1253,7 @@
 err_destroy_percpu:
 	free_percpu(dp->stats_percpu);
 err_destroy_table:
-	ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false);
+	ovs_flow_tbl_destroy(&dp->table);
 err_free_dp:
 	release_net(ovs_dp_get_net(dp));
 	kfree(dp);
@@ -2336,32 +1843,6 @@
 	return err;
 }
 
-static void rehash_flow_table(struct work_struct *work)
-{
-	struct datapath *dp;
-	struct net *net;
-
-	ovs_lock();
-	rtnl_lock();
-	for_each_net(net) {
-		struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
-
-		list_for_each_entry(dp, &ovs_net->dps, list_node) {
-			struct flow_table *old_table = ovsl_dereference(dp->table);
-			struct flow_table *new_table;
-
-			new_table = ovs_flow_tbl_rehash(old_table);
-			if (!IS_ERR(new_table)) {
-				rcu_assign_pointer(dp->table, new_table);
-				ovs_flow_tbl_destroy(old_table, true);
-			}
-		}
-	}
-	rtnl_unlock();
-	ovs_unlock();
-	schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
-}
-
 static int __net_init ovs_init_net(struct net *net)
 {
 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
@@ -2419,8 +1900,6 @@
 	if (err < 0)
 		goto error_unreg_notifier;
 
-	schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
-
 	return 0;
 
 error_unreg_notifier:
@@ -2437,7 +1916,6 @@
 
 static void dp_cleanup(void)
 {
-	cancel_delayed_work_sync(&rehash_flow_wq);
 	dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
 	unregister_pernet_device(&ovs_net_ops);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 4d109c1..d3d14a58 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -27,6 +27,7 @@
 #include <linux/u64_stats_sync.h>
 
 #include "flow.h"
+#include "flow_table.h"
 #include "vport.h"
 
 #define DP_MAX_PORTS           USHRT_MAX
@@ -45,11 +46,15 @@
  * @n_lost: Number of received packets that had no matching flow in the flow
  * table that could not be sent to userspace (normally due to an overflow in
  * one of the datapath's queues).
+ * @n_mask_hit: Number of masks looked up for flow match.
+ *   @n_mask_hit / (@n_hit + @n_missed)  will be the average masks looked
+ *   up per packet.
  */
 struct dp_stats_percpu {
 	u64 n_hit;
 	u64 n_missed;
 	u64 n_lost;
+	u64 n_mask_hit;
 	struct u64_stats_sync sync;
 };
 
@@ -57,7 +62,7 @@
  * struct datapath - datapath for flow-based packet switching
  * @rcu: RCU callback head for deferred destruction.
  * @list_node: Element in global 'dps' list.
- * @table: Current flow table.  Protected by ovs_mutex and RCU.
+ * @table: flow table.
  * @ports: Hash table for ports.  %OVSP_LOCAL port always exists.  Protected by
  * ovs_mutex and RCU.
  * @stats_percpu: Per-CPU datapath statistics.
@@ -71,7 +76,7 @@
 	struct list_head list_node;
 
 	/* Flow table. */
-	struct flow_table __rcu *table;
+	struct flow_table table;
 
 	/* Switch ports. */
 	struct hlist_head *ports;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 410db90..b409f52 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -45,202 +45,38 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 
-static struct kmem_cache *flow_cache;
-
-static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
-		struct sw_flow_key_range *range, u8 val);
-
-static void update_range__(struct sw_flow_match *match,
-			  size_t offset, size_t size, bool is_mask)
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
 {
-	struct sw_flow_key_range *range = NULL;
-	size_t start = rounddown(offset, sizeof(long));
-	size_t end = roundup(offset + size, sizeof(long));
+	struct timespec cur_ts;
+	u64 cur_ms, idle_ms;
 
-	if (!is_mask)
-		range = &match->range;
-	else if (match->mask)
-		range = &match->mask->range;
+	ktime_get_ts(&cur_ts);
+	idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
+	cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+		 cur_ts.tv_nsec / NSEC_PER_MSEC;
 
-	if (!range)
-		return;
-
-	if (range->start == range->end) {
-		range->start = start;
-		range->end = end;
-		return;
-	}
-
-	if (range->start > start)
-		range->start = start;
-
-	if (range->end < end)
-		range->end = end;
+	return cur_ms - idle_ms;
 }
 
-#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
-	do { \
-		update_range__(match, offsetof(struct sw_flow_key, field),  \
-				     sizeof((match)->key->field), is_mask); \
-		if (is_mask) {						    \
-			if ((match)->mask)				    \
-				(match)->mask->key.field = value;	    \
-		} else {                                                    \
-			(match)->key->field = value;		            \
-		}                                                           \
-	} while (0)
+#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
 
-#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
-	do { \
-		update_range__(match, offsetof(struct sw_flow_key, field),  \
-				len, is_mask);                              \
-		if (is_mask) {						    \
-			if ((match)->mask)				    \
-				memcpy(&(match)->mask->key.field, value_p, len);\
-		} else {                                                    \
-			memcpy(&(match)->key->field, value_p, len);         \
-		}                                                           \
-	} while (0)
-
-static u16 range_n_bytes(const struct sw_flow_key_range *range)
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
 {
-	return range->end - range->start;
-}
+	__be16 tcp_flags = 0;
 
-void ovs_match_init(struct sw_flow_match *match,
-		    struct sw_flow_key *key,
-		    struct sw_flow_mask *mask)
-{
-	memset(match, 0, sizeof(*match));
-	match->key = key;
-	match->mask = mask;
-
-	memset(key, 0, sizeof(*key));
-
-	if (mask) {
-		memset(&mask->key, 0, sizeof(mask->key));
-		mask->range.start = mask->range.end = 0;
-	}
-}
-
-static bool ovs_match_validate(const struct sw_flow_match *match,
-		u64 key_attrs, u64 mask_attrs)
-{
-	u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
-	u64 mask_allowed = key_attrs;  /* At most allow all key attributes */
-
-	/* The following mask attributes allowed only if they
-	 * pass the validation tests. */
-	mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
-			| (1 << OVS_KEY_ATTR_IPV6)
-			| (1 << OVS_KEY_ATTR_TCP)
-			| (1 << OVS_KEY_ATTR_UDP)
-			| (1 << OVS_KEY_ATTR_SCTP)
-			| (1 << OVS_KEY_ATTR_ICMP)
-			| (1 << OVS_KEY_ATTR_ICMPV6)
-			| (1 << OVS_KEY_ATTR_ARP)
-			| (1 << OVS_KEY_ATTR_ND));
-
-	/* Always allowed mask fields. */
-	mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
-		       | (1 << OVS_KEY_ATTR_IN_PORT)
-		       | (1 << OVS_KEY_ATTR_ETHERTYPE));
-
-	/* Check key attributes. */
-	if (match->key->eth.type == htons(ETH_P_ARP)
-			|| match->key->eth.type == htons(ETH_P_RARP)) {
-		key_expected |= 1 << OVS_KEY_ATTR_ARP;
-		if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
-			mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
+	if ((flow->key.eth.type == htons(ETH_P_IP) ||
+	     flow->key.eth.type == htons(ETH_P_IPV6)) &&
+	    flow->key.ip.proto == IPPROTO_TCP &&
+	    likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
+		tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
 	}
 
-	if (match->key->eth.type == htons(ETH_P_IP)) {
-		key_expected |= 1 << OVS_KEY_ATTR_IPV4;
-		if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
-			mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
-
-		if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
-			if (match->key->ip.proto == IPPROTO_UDP) {
-				key_expected |= 1 << OVS_KEY_ATTR_UDP;
-				if (match->mask && (match->mask->key.ip.proto == 0xff))
-					mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
-			}
-
-			if (match->key->ip.proto == IPPROTO_SCTP) {
-				key_expected |= 1 << OVS_KEY_ATTR_SCTP;
-				if (match->mask && (match->mask->key.ip.proto == 0xff))
-					mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
-			}
-
-			if (match->key->ip.proto == IPPROTO_TCP) {
-				key_expected |= 1 << OVS_KEY_ATTR_TCP;
-				if (match->mask && (match->mask->key.ip.proto == 0xff))
-					mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
-			}
-
-			if (match->key->ip.proto == IPPROTO_ICMP) {
-				key_expected |= 1 << OVS_KEY_ATTR_ICMP;
-				if (match->mask && (match->mask->key.ip.proto == 0xff))
-					mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
-			}
-		}
-	}
-
-	if (match->key->eth.type == htons(ETH_P_IPV6)) {
-		key_expected |= 1 << OVS_KEY_ATTR_IPV6;
-		if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
-			mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
-
-		if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
-			if (match->key->ip.proto == IPPROTO_UDP) {
-				key_expected |= 1 << OVS_KEY_ATTR_UDP;
-				if (match->mask && (match->mask->key.ip.proto == 0xff))
-					mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
-			}
-
-			if (match->key->ip.proto == IPPROTO_SCTP) {
-				key_expected |= 1 << OVS_KEY_ATTR_SCTP;
-				if (match->mask && (match->mask->key.ip.proto == 0xff))
-					mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
-			}
-
-			if (match->key->ip.proto == IPPROTO_TCP) {
-				key_expected |= 1 << OVS_KEY_ATTR_TCP;
-				if (match->mask && (match->mask->key.ip.proto == 0xff))
-					mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
-			}
-
-			if (match->key->ip.proto == IPPROTO_ICMPV6) {
-				key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
-				if (match->mask && (match->mask->key.ip.proto == 0xff))
-					mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
-
-				if (match->key->ipv6.tp.src ==
-						htons(NDISC_NEIGHBOUR_SOLICITATION) ||
-				    match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
-					key_expected |= 1 << OVS_KEY_ATTR_ND;
-					if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
-						mask_allowed |= 1 << OVS_KEY_ATTR_ND;
-				}
-			}
-		}
-	}
-
-	if ((key_attrs & key_expected) != key_expected) {
-		/* Key attributes check failed. */
-		OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
-				key_attrs, key_expected);
-		return false;
-	}
-
-	if ((mask_attrs & mask_allowed) != mask_attrs) {
-		/* Mask attributes check failed. */
-		OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
-				mask_attrs, mask_allowed);
-		return false;
-	}
-
-	return true;
+	spin_lock(&flow->lock);
+	flow->used = jiffies;
+	flow->packet_count++;
+	flow->byte_count += skb->len;
+	flow->tcp_flags |= tcp_flags;
+	spin_unlock(&flow->lock);
 }
 
 static int check_header(struct sk_buff *skb, int len)
@@ -311,19 +147,6 @@
 				  sizeof(struct icmphdr));
 }
 
-u64 ovs_flow_used_time(unsigned long flow_jiffies)
-{
-	struct timespec cur_ts;
-	u64 cur_ms, idle_ms;
-
-	ktime_get_ts(&cur_ts);
-	idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
-	cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
-		 cur_ts.tv_nsec / NSEC_PER_MSEC;
-
-	return cur_ms - idle_ms;
-}
-
 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
 {
 	unsigned int nh_ofs = skb_network_offset(skb);
@@ -372,311 +195,6 @@
 				  sizeof(struct icmp6hdr));
 }
 
-void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
-		       const struct sw_flow_mask *mask)
-{
-	const long *m = (long *)((u8 *)&mask->key + mask->range.start);
-	const long *s = (long *)((u8 *)src + mask->range.start);
-	long *d = (long *)((u8 *)dst + mask->range.start);
-	int i;
-
-	/* The memory outside of the 'mask->range' are not set since
-	 * further operations on 'dst' only uses contents within
-	 * 'mask->range'.
-	 */
-	for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
-		*d++ = *s++ & *m++;
-}
-
-#define TCP_FLAGS_OFFSET 13
-#define TCP_FLAG_MASK 0x3f
-
-void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
-{
-	u8 tcp_flags = 0;
-
-	if ((flow->key.eth.type == htons(ETH_P_IP) ||
-	     flow->key.eth.type == htons(ETH_P_IPV6)) &&
-	    flow->key.ip.proto == IPPROTO_TCP &&
-	    likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
-		u8 *tcp = (u8 *)tcp_hdr(skb);
-		tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
-	}
-
-	spin_lock(&flow->lock);
-	flow->used = jiffies;
-	flow->packet_count++;
-	flow->byte_count += skb->len;
-	flow->tcp_flags |= tcp_flags;
-	spin_unlock(&flow->lock);
-}
-
-struct sw_flow_actions *ovs_flow_actions_alloc(int size)
-{
-	struct sw_flow_actions *sfa;
-
-	if (size > MAX_ACTIONS_BUFSIZE)
-		return ERR_PTR(-EINVAL);
-
-	sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
-	if (!sfa)
-		return ERR_PTR(-ENOMEM);
-
-	sfa->actions_len = 0;
-	return sfa;
-}
-
-struct sw_flow *ovs_flow_alloc(void)
-{
-	struct sw_flow *flow;
-
-	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
-	if (!flow)
-		return ERR_PTR(-ENOMEM);
-
-	spin_lock_init(&flow->lock);
-	flow->sf_acts = NULL;
-	flow->mask = NULL;
-
-	return flow;
-}
-
-static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
-{
-	hash = jhash_1word(hash, table->hash_seed);
-	return flex_array_get(table->buckets,
-				(hash & (table->n_buckets - 1)));
-}
-
-static struct flex_array *alloc_buckets(unsigned int n_buckets)
-{
-	struct flex_array *buckets;
-	int i, err;
-
-	buckets = flex_array_alloc(sizeof(struct hlist_head),
-				   n_buckets, GFP_KERNEL);
-	if (!buckets)
-		return NULL;
-
-	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
-	if (err) {
-		flex_array_free(buckets);
-		return NULL;
-	}
-
-	for (i = 0; i < n_buckets; i++)
-		INIT_HLIST_HEAD((struct hlist_head *)
-					flex_array_get(buckets, i));
-
-	return buckets;
-}
-
-static void free_buckets(struct flex_array *buckets)
-{
-	flex_array_free(buckets);
-}
-
-static struct flow_table *__flow_tbl_alloc(int new_size)
-{
-	struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
-
-	if (!table)
-		return NULL;
-
-	table->buckets = alloc_buckets(new_size);
-
-	if (!table->buckets) {
-		kfree(table);
-		return NULL;
-	}
-	table->n_buckets = new_size;
-	table->count = 0;
-	table->node_ver = 0;
-	table->keep_flows = false;
-	get_random_bytes(&table->hash_seed, sizeof(u32));
-	table->mask_list = NULL;
-
-	return table;
-}
-
-static void __flow_tbl_destroy(struct flow_table *table)
-{
-	int i;
-
-	if (table->keep_flows)
-		goto skip_flows;
-
-	for (i = 0; i < table->n_buckets; i++) {
-		struct sw_flow *flow;
-		struct hlist_head *head = flex_array_get(table->buckets, i);
-		struct hlist_node *n;
-		int ver = table->node_ver;
-
-		hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
-			hlist_del(&flow->hash_node[ver]);
-			ovs_flow_free(flow, false);
-		}
-	}
-
-	BUG_ON(!list_empty(table->mask_list));
-	kfree(table->mask_list);
-
-skip_flows:
-	free_buckets(table->buckets);
-	kfree(table);
-}
-
-struct flow_table *ovs_flow_tbl_alloc(int new_size)
-{
-	struct flow_table *table = __flow_tbl_alloc(new_size);
-
-	if (!table)
-		return NULL;
-
-	table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
-	if (!table->mask_list) {
-		table->keep_flows = true;
-		__flow_tbl_destroy(table);
-		return NULL;
-	}
-	INIT_LIST_HEAD(table->mask_list);
-
-	return table;
-}
-
-static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
-{
-	struct flow_table *table = container_of(rcu, struct flow_table, rcu);
-
-	__flow_tbl_destroy(table);
-}
-
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
-{
-	if (!table)
-		return;
-
-	if (deferred)
-		call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
-	else
-		__flow_tbl_destroy(table);
-}
-
-struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last)
-{
-	struct sw_flow *flow;
-	struct hlist_head *head;
-	int ver;
-	int i;
-
-	ver = table->node_ver;
-	while (*bucket < table->n_buckets) {
-		i = 0;
-		head = flex_array_get(table->buckets, *bucket);
-		hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
-			if (i < *last) {
-				i++;
-				continue;
-			}
-			*last = i + 1;
-			return flow;
-		}
-		(*bucket)++;
-		*last = 0;
-	}
-
-	return NULL;
-}
-
-static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
-{
-	struct hlist_head *head;
-
-	head = find_bucket(table, flow->hash);
-	hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
-
-	table->count++;
-}
-
-static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
-{
-	int old_ver;
-	int i;
-
-	old_ver = old->node_ver;
-	new->node_ver = !old_ver;
-
-	/* Insert in new table. */
-	for (i = 0; i < old->n_buckets; i++) {
-		struct sw_flow *flow;
-		struct hlist_head *head;
-
-		head = flex_array_get(old->buckets, i);
-
-		hlist_for_each_entry(flow, head, hash_node[old_ver])
-			__tbl_insert(new, flow);
-	}
-
-	new->mask_list = old->mask_list;
-	old->keep_flows = true;
-}
-
-static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
-{
-	struct flow_table *new_table;
-
-	new_table = __flow_tbl_alloc(n_buckets);
-	if (!new_table)
-		return ERR_PTR(-ENOMEM);
-
-	flow_table_copy_flows(table, new_table);
-
-	return new_table;
-}
-
-struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
-{
-	return __flow_tbl_rehash(table, table->n_buckets);
-}
-
-struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
-{
-	return __flow_tbl_rehash(table, table->n_buckets * 2);
-}
-
-static void __flow_free(struct sw_flow *flow)
-{
-	kfree((struct sf_flow_acts __force *)flow->sf_acts);
-	kmem_cache_free(flow_cache, flow);
-}
-
-static void rcu_free_flow_callback(struct rcu_head *rcu)
-{
-	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
-
-	__flow_free(flow);
-}
-
-void ovs_flow_free(struct sw_flow *flow, bool deferred)
-{
-	if (!flow)
-		return;
-
-	ovs_sw_flow_mask_del_ref(flow->mask, deferred);
-
-	if (deferred)
-		call_rcu(&flow->rcu, rcu_free_flow_callback);
-	else
-		__flow_free(flow);
-}
-
-/* Schedules 'sf_acts' to be freed after the next RCU grace period.
- * The caller must hold rcu_read_lock for this to be sensible. */
-void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
-{
-	kfree_rcu(sf_acts, rcu);
-}
-
 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
 {
 	struct qtag_prefix {
@@ -910,6 +428,7 @@
 				struct tcphdr *tcp = tcp_hdr(skb);
 				key->ipv4.tp.src = tcp->source;
 				key->ipv4.tp.dst = tcp->dest;
+				key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp);
 			}
 		} else if (key->ip.proto == IPPROTO_UDP) {
 			if (udphdr_ok(skb)) {
@@ -978,6 +497,7 @@
 				struct tcphdr *tcp = tcp_hdr(skb);
 				key->ipv6.tp.src = tcp->source;
 				key->ipv6.tp.dst = tcp->dest;
+				key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp);
 			}
 		} else if (key->ip.proto == NEXTHDR_UDP) {
 			if (udphdr_ok(skb)) {
@@ -1002,1080 +522,3 @@
 
 	return 0;
 }
-
-static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start,
-			 int key_end)
-{
-	u32 *hash_key = (u32 *)((u8 *)key + key_start);
-	int hash_u32s = (key_end - key_start) >> 2;
-
-	/* Make sure number of hash bytes are multiple of u32. */
-	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
-
-	return jhash2(hash_key, hash_u32s, 0);
-}
-
-static int flow_key_start(const struct sw_flow_key *key)
-{
-	if (key->tun_key.ipv4_dst)
-		return 0;
-	else
-		return rounddown(offsetof(struct sw_flow_key, phy),
-					  sizeof(long));
-}
-
-static bool __cmp_key(const struct sw_flow_key *key1,
-		const struct sw_flow_key *key2,  int key_start, int key_end)
-{
-	const long *cp1 = (long *)((u8 *)key1 + key_start);
-	const long *cp2 = (long *)((u8 *)key2 + key_start);
-	long diffs = 0;
-	int i;
-
-	for (i = key_start; i < key_end;  i += sizeof(long))
-		diffs |= *cp1++ ^ *cp2++;
-
-	return diffs == 0;
-}
-
-static bool __flow_cmp_masked_key(const struct sw_flow *flow,
-		const struct sw_flow_key *key, int key_start, int key_end)
-{
-	return __cmp_key(&flow->key, key, key_start, key_end);
-}
-
-static bool __flow_cmp_unmasked_key(const struct sw_flow *flow,
-		  const struct sw_flow_key *key, int key_start, int key_end)
-{
-	return __cmp_key(&flow->unmasked_key, key, key_start, key_end);
-}
-
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
-		const struct sw_flow_key *key, int key_end)
-{
-	int key_start;
-	key_start = flow_key_start(key);
-
-	return __flow_cmp_unmasked_key(flow, key, key_start, key_end);
-
-}
-
-struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
-				       struct sw_flow_match *match)
-{
-	struct sw_flow_key *unmasked = match->key;
-	int key_end = match->range.end;
-	struct sw_flow *flow;
-
-	flow = ovs_flow_lookup(table, unmasked);
-	if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end)))
-		flow = NULL;
-
-	return flow;
-}
-
-static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
-				    const struct sw_flow_key *unmasked,
-				    struct sw_flow_mask *mask)
-{
-	struct sw_flow *flow;
-	struct hlist_head *head;
-	int key_start = mask->range.start;
-	int key_end = mask->range.end;
-	u32 hash;
-	struct sw_flow_key masked_key;
-
-	ovs_flow_key_mask(&masked_key, unmasked, mask);
-	hash = ovs_flow_hash(&masked_key, key_start, key_end);
-	head = find_bucket(table, hash);
-	hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
-		if (flow->mask == mask &&
-		    __flow_cmp_masked_key(flow, &masked_key,
-					  key_start, key_end))
-			return flow;
-	}
-	return NULL;
-}
-
-struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
-				const struct sw_flow_key *key)
-{
-	struct sw_flow *flow = NULL;
-	struct sw_flow_mask *mask;
-
-	list_for_each_entry_rcu(mask, tbl->mask_list, list) {
-		flow = ovs_masked_flow_lookup(tbl, key, mask);
-		if (flow)  /* Found */
-			break;
-	}
-
-	return flow;
-}
-
-
-void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow)
-{
-	flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start,
-			flow->mask->range.end);
-	__tbl_insert(table, flow);
-}
-
-void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow)
-{
-	BUG_ON(table->count == 0);
-	hlist_del_rcu(&flow->hash_node[table->node_ver]);
-	table->count--;
-}
-
-/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
-const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
-	[OVS_KEY_ATTR_ENCAP] = -1,
-	[OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
-	[OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
-	[OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
-	[OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
-	[OVS_KEY_ATTR_VLAN] = sizeof(__be16),
-	[OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
-	[OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
-	[OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
-	[OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
-	[OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
-	[OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
-	[OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
-	[OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
-	[OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
-	[OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
-	[OVS_KEY_ATTR_TUNNEL] = -1,
-};
-
-static bool is_all_zero(const u8 *fp, size_t size)
-{
-	int i;
-
-	if (!fp)
-		return false;
-
-	for (i = 0; i < size; i++)
-		if (fp[i])
-			return false;
-
-	return true;
-}
-
-static int __parse_flow_nlattrs(const struct nlattr *attr,
-			      const struct nlattr *a[],
-			      u64 *attrsp, bool nz)
-{
-	const struct nlattr *nla;
-	u32 attrs;
-	int rem;
-
-	attrs = *attrsp;
-	nla_for_each_nested(nla, attr, rem) {
-		u16 type = nla_type(nla);
-		int expected_len;
-
-		if (type > OVS_KEY_ATTR_MAX) {
-			OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
-				  type, OVS_KEY_ATTR_MAX);
-			return -EINVAL;
-		}
-
-		if (attrs & (1 << type)) {
-			OVS_NLERR("Duplicate key attribute (type %d).\n", type);
-			return -EINVAL;
-		}
-
-		expected_len = ovs_key_lens[type];
-		if (nla_len(nla) != expected_len && expected_len != -1) {
-			OVS_NLERR("Key attribute has unexpected length (type=%d"
-				  ", length=%d, expected=%d).\n", type,
-				  nla_len(nla), expected_len);
-			return -EINVAL;
-		}
-
-		if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
-			attrs |= 1 << type;
-			a[type] = nla;
-		}
-	}
-	if (rem) {
-		OVS_NLERR("Message has %d unknown bytes.\n", rem);
-		return -EINVAL;
-	}
-
-	*attrsp = attrs;
-	return 0;
-}
-
-static int parse_flow_mask_nlattrs(const struct nlattr *attr,
-			      const struct nlattr *a[], u64 *attrsp)
-{
-	return __parse_flow_nlattrs(attr, a, attrsp, true);
-}
-
-static int parse_flow_nlattrs(const struct nlattr *attr,
-			      const struct nlattr *a[], u64 *attrsp)
-{
-	return __parse_flow_nlattrs(attr, a, attrsp, false);
-}
-
-int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
-			     struct sw_flow_match *match, bool is_mask)
-{
-	struct nlattr *a;
-	int rem;
-	bool ttl = false;
-	__be16 tun_flags = 0;
-
-	nla_for_each_nested(a, attr, rem) {
-		int type = nla_type(a);
-		static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
-			[OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
-			[OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
-			[OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
-			[OVS_TUNNEL_KEY_ATTR_TOS] = 1,
-			[OVS_TUNNEL_KEY_ATTR_TTL] = 1,
-			[OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
-			[OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
-		};
-
-		if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
-			OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
-			type, OVS_TUNNEL_KEY_ATTR_MAX);
-			return -EINVAL;
-		}
-
-		if (ovs_tunnel_key_lens[type] != nla_len(a)) {
-			OVS_NLERR("IPv4 tunnel attribute type has unexpected "
-				  " length (type=%d, length=%d, expected=%d).\n",
-				  type, nla_len(a), ovs_tunnel_key_lens[type]);
-			return -EINVAL;
-		}
-
-		switch (type) {
-		case OVS_TUNNEL_KEY_ATTR_ID:
-			SW_FLOW_KEY_PUT(match, tun_key.tun_id,
-					nla_get_be64(a), is_mask);
-			tun_flags |= TUNNEL_KEY;
-			break;
-		case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
-					nla_get_be32(a), is_mask);
-			break;
-		case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
-					nla_get_be32(a), is_mask);
-			break;
-		case OVS_TUNNEL_KEY_ATTR_TOS:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
-					nla_get_u8(a), is_mask);
-			break;
-		case OVS_TUNNEL_KEY_ATTR_TTL:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
-					nla_get_u8(a), is_mask);
-			ttl = true;
-			break;
-		case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
-			tun_flags |= TUNNEL_DONT_FRAGMENT;
-			break;
-		case OVS_TUNNEL_KEY_ATTR_CSUM:
-			tun_flags |= TUNNEL_CSUM;
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
-
-	if (rem > 0) {
-		OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
-		return -EINVAL;
-	}
-
-	if (!is_mask) {
-		if (!match->key->tun_key.ipv4_dst) {
-			OVS_NLERR("IPv4 tunnel destination address is zero.\n");
-			return -EINVAL;
-		}
-
-		if (!ttl) {
-			OVS_NLERR("IPv4 tunnel TTL not specified.\n");
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
-			   const struct ovs_key_ipv4_tunnel *tun_key,
-			   const struct ovs_key_ipv4_tunnel *output)
-{
-	struct nlattr *nla;
-
-	nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
-	if (!nla)
-		return -EMSGSIZE;
-
-	if (output->tun_flags & TUNNEL_KEY &&
-	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
-		return -EMSGSIZE;
-	if (output->ipv4_src &&
-		nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
-		return -EMSGSIZE;
-	if (output->ipv4_dst &&
-		nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
-		return -EMSGSIZE;
-	if (output->ipv4_tos &&
-		nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
-		return -EMSGSIZE;
-	if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
-		return -EMSGSIZE;
-	if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
-		nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
-		return -EMSGSIZE;
-	if ((output->tun_flags & TUNNEL_CSUM) &&
-		nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
-		return -EMSGSIZE;
-
-	nla_nest_end(skb, nla);
-	return 0;
-}
-
-static int metadata_from_nlattrs(struct sw_flow_match *match,  u64 *attrs,
-		const struct nlattr **a, bool is_mask)
-{
-	if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
-		SW_FLOW_KEY_PUT(match, phy.priority,
-			  nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
-		*attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
-	}
-
-	if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
-		u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
-
-		if (is_mask)
-			in_port = 0xffffffff; /* Always exact match in_port. */
-		else if (in_port >= DP_MAX_PORTS)
-			return -EINVAL;
-
-		SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
-		*attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
-	} else if (!is_mask) {
-		SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
-	}
-
-	if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
-		uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
-
-		SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
-		*attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
-	}
-	if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
-		if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
-					is_mask))
-			return -EINVAL;
-		*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
-	}
-	return 0;
-}
-
-static int ovs_key_from_nlattrs(struct sw_flow_match *match,  u64 attrs,
-		const struct nlattr **a, bool is_mask)
-{
-	int err;
-	u64 orig_attrs = attrs;
-
-	err = metadata_from_nlattrs(match, &attrs, a, is_mask);
-	if (err)
-		return err;
-
-	if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
-		const struct ovs_key_ethernet *eth_key;
-
-		eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
-		SW_FLOW_KEY_MEMCPY(match, eth.src,
-				eth_key->eth_src, ETH_ALEN, is_mask);
-		SW_FLOW_KEY_MEMCPY(match, eth.dst,
-				eth_key->eth_dst, ETH_ALEN, is_mask);
-		attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
-		__be16 tci;
-
-		tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
-		if (!(tci & htons(VLAN_TAG_PRESENT))) {
-			if (is_mask)
-				OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
-			else
-				OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
-
-			return -EINVAL;
-		}
-
-		SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
-		attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
-	} else if (!is_mask)
-		SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
-
-	if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
-		__be16 eth_type;
-
-		eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
-		if (is_mask) {
-			/* Always exact match EtherType. */
-			eth_type = htons(0xffff);
-		} else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
-			OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
-					ntohs(eth_type), ETH_P_802_3_MIN);
-			return -EINVAL;
-		}
-
-		SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
-		attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
-	} else if (!is_mask) {
-		SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-		const struct ovs_key_ipv4 *ipv4_key;
-
-		ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
-		if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
-			OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
-				ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
-			return -EINVAL;
-		}
-		SW_FLOW_KEY_PUT(match, ip.proto,
-				ipv4_key->ipv4_proto, is_mask);
-		SW_FLOW_KEY_PUT(match, ip.tos,
-				ipv4_key->ipv4_tos, is_mask);
-		SW_FLOW_KEY_PUT(match, ip.ttl,
-				ipv4_key->ipv4_ttl, is_mask);
-		SW_FLOW_KEY_PUT(match, ip.frag,
-				ipv4_key->ipv4_frag, is_mask);
-		SW_FLOW_KEY_PUT(match, ipv4.addr.src,
-				ipv4_key->ipv4_src, is_mask);
-		SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
-				ipv4_key->ipv4_dst, is_mask);
-		attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
-		const struct ovs_key_ipv6 *ipv6_key;
-
-		ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
-		if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
-			OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
-				ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
-			return -EINVAL;
-		}
-		SW_FLOW_KEY_PUT(match, ipv6.label,
-				ipv6_key->ipv6_label, is_mask);
-		SW_FLOW_KEY_PUT(match, ip.proto,
-				ipv6_key->ipv6_proto, is_mask);
-		SW_FLOW_KEY_PUT(match, ip.tos,
-				ipv6_key->ipv6_tclass, is_mask);
-		SW_FLOW_KEY_PUT(match, ip.ttl,
-				ipv6_key->ipv6_hlimit, is_mask);
-		SW_FLOW_KEY_PUT(match, ip.frag,
-				ipv6_key->ipv6_frag, is_mask);
-		SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
-				ipv6_key->ipv6_src,
-				sizeof(match->key->ipv6.addr.src),
-				is_mask);
-		SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
-				ipv6_key->ipv6_dst,
-				sizeof(match->key->ipv6.addr.dst),
-				is_mask);
-
-		attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
-		const struct ovs_key_arp *arp_key;
-
-		arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
-		if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
-			OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
-				  arp_key->arp_op);
-			return -EINVAL;
-		}
-
-		SW_FLOW_KEY_PUT(match, ipv4.addr.src,
-				arp_key->arp_sip, is_mask);
-		SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
-			arp_key->arp_tip, is_mask);
-		SW_FLOW_KEY_PUT(match, ip.proto,
-				ntohs(arp_key->arp_op), is_mask);
-		SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
-				arp_key->arp_sha, ETH_ALEN, is_mask);
-		SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
-				arp_key->arp_tha, ETH_ALEN, is_mask);
-
-		attrs &= ~(1 << OVS_KEY_ATTR_ARP);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
-		const struct ovs_key_tcp *tcp_key;
-
-		tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
-		if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-			SW_FLOW_KEY_PUT(match, ipv4.tp.src,
-					tcp_key->tcp_src, is_mask);
-			SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
-					tcp_key->tcp_dst, is_mask);
-		} else {
-			SW_FLOW_KEY_PUT(match, ipv6.tp.src,
-					tcp_key->tcp_src, is_mask);
-			SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
-					tcp_key->tcp_dst, is_mask);
-		}
-		attrs &= ~(1 << OVS_KEY_ATTR_TCP);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
-		const struct ovs_key_udp *udp_key;
-
-		udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
-		if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-			SW_FLOW_KEY_PUT(match, ipv4.tp.src,
-					udp_key->udp_src, is_mask);
-			SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
-					udp_key->udp_dst, is_mask);
-		} else {
-			SW_FLOW_KEY_PUT(match, ipv6.tp.src,
-					udp_key->udp_src, is_mask);
-			SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
-					udp_key->udp_dst, is_mask);
-		}
-		attrs &= ~(1 << OVS_KEY_ATTR_UDP);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
-		const struct ovs_key_sctp *sctp_key;
-
-		sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
-		if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-			SW_FLOW_KEY_PUT(match, ipv4.tp.src,
-					sctp_key->sctp_src, is_mask);
-			SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
-					sctp_key->sctp_dst, is_mask);
-		} else {
-			SW_FLOW_KEY_PUT(match, ipv6.tp.src,
-					sctp_key->sctp_src, is_mask);
-			SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
-					sctp_key->sctp_dst, is_mask);
-		}
-		attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
-		const struct ovs_key_icmp *icmp_key;
-
-		icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
-		SW_FLOW_KEY_PUT(match, ipv4.tp.src,
-				htons(icmp_key->icmp_type), is_mask);
-		SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
-				htons(icmp_key->icmp_code), is_mask);
-		attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
-		const struct ovs_key_icmpv6 *icmpv6_key;
-
-		icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
-		SW_FLOW_KEY_PUT(match, ipv6.tp.src,
-				htons(icmpv6_key->icmpv6_type), is_mask);
-		SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
-				htons(icmpv6_key->icmpv6_code), is_mask);
-		attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
-	}
-
-	if (attrs & (1 << OVS_KEY_ATTR_ND)) {
-		const struct ovs_key_nd *nd_key;
-
-		nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
-		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
-			nd_key->nd_target,
-			sizeof(match->key->ipv6.nd.target),
-			is_mask);
-		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
-			nd_key->nd_sll, ETH_ALEN, is_mask);
-		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
-				nd_key->nd_tll, ETH_ALEN, is_mask);
-		attrs &= ~(1 << OVS_KEY_ATTR_ND);
-	}
-
-	if (attrs != 0)
-		return -EINVAL;
-
-	return 0;
-}
-
-/**
- * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and
- * mask. In case the 'mask' is NULL, the flow is treated as exact match
- * flow. Otherwise, it is treated as a wildcarded flow, except the mask
- * does not include any don't care bit.
- * @match: receives the extracted flow match information.
- * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
- * sequence. The fields should of the packet that triggered the creation
- * of this flow.
- * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
- * attribute specifies the mask field of the wildcarded flow.
- */
-int ovs_match_from_nlattrs(struct sw_flow_match *match,
-			   const struct nlattr *key,
-			   const struct nlattr *mask)
-{
-	const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
-	const struct nlattr *encap;
-	u64 key_attrs = 0;
-	u64 mask_attrs = 0;
-	bool encap_valid = false;
-	int err;
-
-	err = parse_flow_nlattrs(key, a, &key_attrs);
-	if (err)
-		return err;
-
-	if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
-	    (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
-	    (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
-		__be16 tci;
-
-		if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
-		      (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
-			OVS_NLERR("Invalid Vlan frame.\n");
-			return -EINVAL;
-		}
-
-		key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
-		tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
-		encap = a[OVS_KEY_ATTR_ENCAP];
-		key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
-		encap_valid = true;
-
-		if (tci & htons(VLAN_TAG_PRESENT)) {
-			err = parse_flow_nlattrs(encap, a, &key_attrs);
-			if (err)
-				return err;
-		} else if (!tci) {
-			/* Corner case for truncated 802.1Q header. */
-			if (nla_len(encap)) {
-				OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
-				return -EINVAL;
-			}
-		} else {
-			OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
-			return  -EINVAL;
-		}
-	}
-
-	err = ovs_key_from_nlattrs(match, key_attrs, a, false);
-	if (err)
-		return err;
-
-	if (mask) {
-		err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
-		if (err)
-			return err;
-
-		if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP)  {
-			__be16 eth_type = 0;
-			__be16 tci = 0;
-
-			if (!encap_valid) {
-				OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
-				return  -EINVAL;
-			}
-
-			mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
-			if (a[OVS_KEY_ATTR_ETHERTYPE])
-				eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
-
-			if (eth_type == htons(0xffff)) {
-				mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
-				encap = a[OVS_KEY_ATTR_ENCAP];
-				err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
-			} else {
-				OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
-						ntohs(eth_type));
-				return -EINVAL;
-			}
-
-			if (a[OVS_KEY_ATTR_VLAN])
-				tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
-
-			if (!(tci & htons(VLAN_TAG_PRESENT))) {
-				OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
-				return -EINVAL;
-			}
-		}
-
-		err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
-		if (err)
-			return err;
-	} else {
-		/* Populate exact match flow's key mask. */
-		if (match->mask)
-			ovs_sw_flow_mask_set(match->mask, &match->range, 0xff);
-	}
-
-	if (!ovs_match_validate(match, key_attrs, mask_attrs))
-		return -EINVAL;
-
-	return 0;
-}
-
-/**
- * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
- * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
- * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
- * sequence.
- *
- * This parses a series of Netlink attributes that form a flow key, which must
- * take the same form accepted by flow_from_nlattrs(), but only enough of it to
- * get the metadata, that is, the parts of the flow key that cannot be
- * extracted from the packet itself.
- */
-
-int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
-		const struct nlattr *attr)
-{
-	struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
-	const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
-	u64 attrs = 0;
-	int err;
-	struct sw_flow_match match;
-
-	flow->key.phy.in_port = DP_MAX_PORTS;
-	flow->key.phy.priority = 0;
-	flow->key.phy.skb_mark = 0;
-	memset(tun_key, 0, sizeof(flow->key.tun_key));
-
-	err = parse_flow_nlattrs(attr, a, &attrs);
-	if (err)
-		return -EINVAL;
-
-	memset(&match, 0, sizeof(match));
-	match.key = &flow->key;
-
-	err = metadata_from_nlattrs(&match, &attrs, a, false);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey,
-		const struct sw_flow_key *output, struct sk_buff *skb)
-{
-	struct ovs_key_ethernet *eth_key;
-	struct nlattr *nla, *encap;
-	bool is_mask = (swkey != output);
-
-	if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
-		goto nla_put_failure;
-
-	if ((swkey->tun_key.ipv4_dst || is_mask) &&
-	    ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
-		goto nla_put_failure;
-
-	if (swkey->phy.in_port == DP_MAX_PORTS) {
-		if (is_mask && (output->phy.in_port == 0xffff))
-			if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
-				goto nla_put_failure;
-	} else {
-		u16 upper_u16;
-		upper_u16 = !is_mask ? 0 : 0xffff;
-
-		if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
-				(upper_u16 << 16) | output->phy.in_port))
-			goto nla_put_failure;
-	}
-
-	if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
-		goto nla_put_failure;
-
-	nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
-	if (!nla)
-		goto nla_put_failure;
-
-	eth_key = nla_data(nla);
-	memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
-	memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
-
-	if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
-		__be16 eth_type;
-		eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
-		if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
-		    nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
-			goto nla_put_failure;
-		encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
-		if (!swkey->eth.tci)
-			goto unencap;
-	} else
-		encap = NULL;
-
-	if (swkey->eth.type == htons(ETH_P_802_2)) {
-		/*
-		 * Ethertype 802.2 is represented in the netlink with omitted
-		 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
-		 * 0xffff in the mask attribute.  Ethertype can also
-		 * be wildcarded.
-		 */
-		if (is_mask && output->eth.type)
-			if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
-						output->eth.type))
-				goto nla_put_failure;
-		goto unencap;
-	}
-
-	if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
-		goto nla_put_failure;
-
-	if (swkey->eth.type == htons(ETH_P_IP)) {
-		struct ovs_key_ipv4 *ipv4_key;
-
-		nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
-		if (!nla)
-			goto nla_put_failure;
-		ipv4_key = nla_data(nla);
-		ipv4_key->ipv4_src = output->ipv4.addr.src;
-		ipv4_key->ipv4_dst = output->ipv4.addr.dst;
-		ipv4_key->ipv4_proto = output->ip.proto;
-		ipv4_key->ipv4_tos = output->ip.tos;
-		ipv4_key->ipv4_ttl = output->ip.ttl;
-		ipv4_key->ipv4_frag = output->ip.frag;
-	} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
-		struct ovs_key_ipv6 *ipv6_key;
-
-		nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
-		if (!nla)
-			goto nla_put_failure;
-		ipv6_key = nla_data(nla);
-		memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
-				sizeof(ipv6_key->ipv6_src));
-		memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
-				sizeof(ipv6_key->ipv6_dst));
-		ipv6_key->ipv6_label = output->ipv6.label;
-		ipv6_key->ipv6_proto = output->ip.proto;
-		ipv6_key->ipv6_tclass = output->ip.tos;
-		ipv6_key->ipv6_hlimit = output->ip.ttl;
-		ipv6_key->ipv6_frag = output->ip.frag;
-	} else if (swkey->eth.type == htons(ETH_P_ARP) ||
-		   swkey->eth.type == htons(ETH_P_RARP)) {
-		struct ovs_key_arp *arp_key;
-
-		nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
-		if (!nla)
-			goto nla_put_failure;
-		arp_key = nla_data(nla);
-		memset(arp_key, 0, sizeof(struct ovs_key_arp));
-		arp_key->arp_sip = output->ipv4.addr.src;
-		arp_key->arp_tip = output->ipv4.addr.dst;
-		arp_key->arp_op = htons(output->ip.proto);
-		memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
-		memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
-	}
-
-	if ((swkey->eth.type == htons(ETH_P_IP) ||
-	     swkey->eth.type == htons(ETH_P_IPV6)) &&
-	     swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
-
-		if (swkey->ip.proto == IPPROTO_TCP) {
-			struct ovs_key_tcp *tcp_key;
-
-			nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
-			if (!nla)
-				goto nla_put_failure;
-			tcp_key = nla_data(nla);
-			if (swkey->eth.type == htons(ETH_P_IP)) {
-				tcp_key->tcp_src = output->ipv4.tp.src;
-				tcp_key->tcp_dst = output->ipv4.tp.dst;
-			} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
-				tcp_key->tcp_src = output->ipv6.tp.src;
-				tcp_key->tcp_dst = output->ipv6.tp.dst;
-			}
-		} else if (swkey->ip.proto == IPPROTO_UDP) {
-			struct ovs_key_udp *udp_key;
-
-			nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
-			if (!nla)
-				goto nla_put_failure;
-			udp_key = nla_data(nla);
-			if (swkey->eth.type == htons(ETH_P_IP)) {
-				udp_key->udp_src = output->ipv4.tp.src;
-				udp_key->udp_dst = output->ipv4.tp.dst;
-			} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
-				udp_key->udp_src = output->ipv6.tp.src;
-				udp_key->udp_dst = output->ipv6.tp.dst;
-			}
-		} else if (swkey->ip.proto == IPPROTO_SCTP) {
-			struct ovs_key_sctp *sctp_key;
-
-			nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
-			if (!nla)
-				goto nla_put_failure;
-			sctp_key = nla_data(nla);
-			if (swkey->eth.type == htons(ETH_P_IP)) {
-				sctp_key->sctp_src = swkey->ipv4.tp.src;
-				sctp_key->sctp_dst = swkey->ipv4.tp.dst;
-			} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
-				sctp_key->sctp_src = swkey->ipv6.tp.src;
-				sctp_key->sctp_dst = swkey->ipv6.tp.dst;
-			}
-		} else if (swkey->eth.type == htons(ETH_P_IP) &&
-			   swkey->ip.proto == IPPROTO_ICMP) {
-			struct ovs_key_icmp *icmp_key;
-
-			nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
-			if (!nla)
-				goto nla_put_failure;
-			icmp_key = nla_data(nla);
-			icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
-			icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
-		} else if (swkey->eth.type == htons(ETH_P_IPV6) &&
-			   swkey->ip.proto == IPPROTO_ICMPV6) {
-			struct ovs_key_icmpv6 *icmpv6_key;
-
-			nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
-						sizeof(*icmpv6_key));
-			if (!nla)
-				goto nla_put_failure;
-			icmpv6_key = nla_data(nla);
-			icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
-			icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
-
-			if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
-			    icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
-				struct ovs_key_nd *nd_key;
-
-				nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
-				if (!nla)
-					goto nla_put_failure;
-				nd_key = nla_data(nla);
-				memcpy(nd_key->nd_target, &output->ipv6.nd.target,
-							sizeof(nd_key->nd_target));
-				memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
-				memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
-			}
-		}
-	}
-
-unencap:
-	if (encap)
-		nla_nest_end(skb, encap);
-
-	return 0;
-
-nla_put_failure:
-	return -EMSGSIZE;
-}
-
-/* Initializes the flow module.
- * Returns zero if successful or a negative error code. */
-int ovs_flow_init(void)
-{
-	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
-	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
-
-	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
-					0, NULL);
-	if (flow_cache == NULL)
-		return -ENOMEM;
-
-	return 0;
-}
-
-/* Uninitializes the flow module. */
-void ovs_flow_exit(void)
-{
-	kmem_cache_destroy(flow_cache);
-}
-
-struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
-{
-	struct sw_flow_mask *mask;
-
-	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
-	if (mask)
-		mask->ref_count = 0;
-
-	return mask;
-}
-
-void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
-{
-	mask->ref_count++;
-}
-
-void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
-{
-	if (!mask)
-		return;
-
-	BUG_ON(!mask->ref_count);
-	mask->ref_count--;
-
-	if (!mask->ref_count) {
-		list_del_rcu(&mask->list);
-		if (deferred)
-			kfree_rcu(mask, rcu);
-		else
-			kfree(mask);
-	}
-}
-
-static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a,
-		const struct sw_flow_mask *b)
-{
-	u8 *a_ = (u8 *)&a->key + a->range.start;
-	u8 *b_ = (u8 *)&b->key + b->range.start;
-
-	return  (a->range.end == b->range.end)
-		&& (a->range.start == b->range.start)
-		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
-}
-
-struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
-                                           const struct sw_flow_mask *mask)
-{
-	struct list_head *ml;
-
-	list_for_each(ml, tbl->mask_list) {
-		struct sw_flow_mask *m;
-		m = container_of(ml, struct sw_flow_mask, list);
-		if (ovs_sw_flow_mask_equal(mask, m))
-			return m;
-	}
-
-	return NULL;
-}
-
-/**
- * add a new mask into the mask list.
- * The caller needs to make sure that 'mask' is not the same
- * as any masks that are already on the list.
- */
-void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
-{
-	list_add_rcu(&mask->list, tbl->mask_list);
-}
-
-/**
- * Set 'range' fields in the mask to the value of 'val'.
- */
-static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
-		struct sw_flow_key_range *range, u8 val)
-{
-	u8 *m = (u8 *)&mask->key + range->start;
-
-	mask->range = *range;
-	memset(m, val, range_n_bytes(range));
-}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 212fbf7..1510f51 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -33,14 +33,6 @@
 #include <net/inet_ecn.h>
 
 struct sk_buff;
-struct sw_flow_mask;
-struct flow_table;
-
-struct sw_flow_actions {
-	struct rcu_head rcu;
-	u32 actions_len;
-	struct nlattr actions[];
-};
 
 /* Used to memset ovs_key_ipv4_tunnel padding. */
 #define OVS_TUNNEL_KEY_SIZE					\
@@ -101,6 +93,7 @@
 				struct {
 					__be16 src;		/* TCP/UDP/SCTP source port. */
 					__be16 dst;		/* TCP/UDP/SCTP destination port. */
+					__be16 flags;		/* TCP flags. */
 				} tp;
 				struct {
 					u8 sha[ETH_ALEN];	/* ARP source hardware address. */
@@ -117,6 +110,7 @@
 			struct {
 				__be16 src;		/* TCP/UDP/SCTP source port. */
 				__be16 dst;		/* TCP/UDP/SCTP destination port. */
+				__be16 flags;		/* TCP flags. */
 			} tp;
 			struct {
 				struct in6_addr target;	/* ND target address. */
@@ -127,6 +121,31 @@
 	};
 } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
 
+struct sw_flow_key_range {
+	size_t start;
+	size_t end;
+};
+
+struct sw_flow_mask {
+	int ref_count;
+	struct rcu_head rcu;
+	struct list_head list;
+	struct sw_flow_key_range range;
+	struct sw_flow_key key;
+};
+
+struct sw_flow_match {
+	struct sw_flow_key *key;
+	struct sw_flow_key_range range;
+	struct sw_flow_mask *mask;
+};
+
+struct sw_flow_actions {
+	struct rcu_head rcu;
+	u32 actions_len;
+	struct nlattr actions[];
+};
+
 struct sw_flow {
 	struct rcu_head rcu;
 	struct hlist_node hash_node[2];
@@ -141,23 +160,9 @@
 	unsigned long used;	/* Last used time (in jiffies). */
 	u64 packet_count;	/* Number of packets matched. */
 	u64 byte_count;		/* Number of bytes matched. */
-	u8 tcp_flags;		/* Union of seen TCP flags. */
+	__be16 tcp_flags;	/* Union of seen TCP flags. */
 };
 
-struct sw_flow_key_range {
-	size_t start;
-	size_t end;
-};
-
-struct sw_flow_match {
-	struct sw_flow_key *key;
-	struct sw_flow_key_range range;
-	struct sw_flow_mask *mask;
-};
-
-void ovs_match_init(struct sw_flow_match *match,
-		struct sw_flow_key *key, struct sw_flow_mask *mask);
-
 struct arp_eth_header {
 	__be16      ar_hrd;	/* format of hardware address   */
 	__be16      ar_pro;	/* format of protocol address   */
@@ -172,88 +177,9 @@
 	unsigned char       ar_tip[4];		/* target IP address        */
 } __packed;
 
-int ovs_flow_init(void);
-void ovs_flow_exit(void);
-
-struct sw_flow *ovs_flow_alloc(void);
-void ovs_flow_deferred_free(struct sw_flow *);
-void ovs_flow_free(struct sw_flow *, bool deferred);
-
-struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len);
-void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
-
-int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
 void ovs_flow_used(struct sw_flow *, struct sk_buff *);
 u64 ovs_flow_used_time(unsigned long flow_jiffies);
-int ovs_flow_to_nlattrs(const struct sw_flow_key *,
-		const struct sw_flow_key *, struct sk_buff *);
-int ovs_match_from_nlattrs(struct sw_flow_match *match,
-		      const struct nlattr *,
-		      const struct nlattr *);
-int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
-		const struct nlattr *attr);
 
-#define MAX_ACTIONS_BUFSIZE    (32 * 1024)
-#define TBL_MIN_BUCKETS		1024
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
 
-struct flow_table {
-	struct flex_array *buckets;
-	unsigned int count, n_buckets;
-	struct rcu_head rcu;
-	struct list_head *mask_list;
-	int node_ver;
-	u32 hash_seed;
-	bool keep_flows;
-};
-
-static inline int ovs_flow_tbl_count(struct flow_table *table)
-{
-	return table->count;
-}
-
-static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
-{
-	return (table->count > table->n_buckets);
-}
-
-struct sw_flow *ovs_flow_lookup(struct flow_table *,
-				const struct sw_flow_key *);
-struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
-				    struct sw_flow_match *match);
-
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
-struct flow_table *ovs_flow_tbl_alloc(int new_size);
-struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
-struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
-
-void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow);
-void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow);
-
-struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx);
-extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
-int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
-			     struct sw_flow_match *match, bool is_mask);
-int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
-			   const struct ovs_key_ipv4_tunnel *tun_key,
-			   const struct ovs_key_ipv4_tunnel *output);
-
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
-		const struct sw_flow_key *key, int key_end);
-
-struct sw_flow_mask {
-	int ref_count;
-	struct rcu_head rcu;
-	struct list_head list;
-	struct sw_flow_key_range range;
-	struct sw_flow_key key;
-};
-
-struct sw_flow_mask *ovs_sw_flow_mask_alloc(void);
-void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *);
-void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred);
-void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *);
-struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *,
-		const struct sw_flow_mask *);
-void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
-		       const struct sw_flow_mask *mask);
 #endif /* flow.h */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
new file mode 100644
index 0000000..2bc1bc1
--- /dev/null
+++ b/net/openvswitch/flow_netlink.c
@@ -0,0 +1,1630 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "flow.h"
+#include "datapath.h"
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/llc_pdu.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/llc.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/sctp.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/rculist.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+
+#include "flow_netlink.h"
+
+static void update_range__(struct sw_flow_match *match,
+			   size_t offset, size_t size, bool is_mask)
+{
+	struct sw_flow_key_range *range = NULL;
+	size_t start = rounddown(offset, sizeof(long));
+	size_t end = roundup(offset + size, sizeof(long));
+
+	if (!is_mask)
+		range = &match->range;
+	else if (match->mask)
+		range = &match->mask->range;
+
+	if (!range)
+		return;
+
+	if (range->start == range->end) {
+		range->start = start;
+		range->end = end;
+		return;
+	}
+
+	if (range->start > start)
+		range->start = start;
+
+	if (range->end < end)
+		range->end = end;
+}
+
+#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
+	do { \
+		update_range__(match, offsetof(struct sw_flow_key, field),  \
+				     sizeof((match)->key->field), is_mask); \
+		if (is_mask) {						    \
+			if ((match)->mask)				    \
+				(match)->mask->key.field = value;	    \
+		} else {                                                    \
+			(match)->key->field = value;		            \
+		}                                                           \
+	} while (0)
+
+#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
+	do { \
+		update_range__(match, offsetof(struct sw_flow_key, field),  \
+				len, is_mask);                              \
+		if (is_mask) {						    \
+			if ((match)->mask)				    \
+				memcpy(&(match)->mask->key.field, value_p, len);\
+		} else {                                                    \
+			memcpy(&(match)->key->field, value_p, len);         \
+		}                                                           \
+	} while (0)
+
+static u16 range_n_bytes(const struct sw_flow_key_range *range)
+{
+	return range->end - range->start;
+}
+
+static bool match_validate(const struct sw_flow_match *match,
+			   u64 key_attrs, u64 mask_attrs)
+{
+	u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
+	u64 mask_allowed = key_attrs;  /* At most allow all key attributes */
+
+	/* The following mask attributes allowed only if they
+	 * pass the validation tests. */
+	mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
+			| (1 << OVS_KEY_ATTR_IPV6)
+			| (1 << OVS_KEY_ATTR_TCP)
+			| (1 << OVS_KEY_ATTR_TCP_FLAGS)
+			| (1 << OVS_KEY_ATTR_UDP)
+			| (1 << OVS_KEY_ATTR_SCTP)
+			| (1 << OVS_KEY_ATTR_ICMP)
+			| (1 << OVS_KEY_ATTR_ICMPV6)
+			| (1 << OVS_KEY_ATTR_ARP)
+			| (1 << OVS_KEY_ATTR_ND));
+
+	/* Always allowed mask fields. */
+	mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
+		       | (1 << OVS_KEY_ATTR_IN_PORT)
+		       | (1 << OVS_KEY_ATTR_ETHERTYPE));
+
+	/* Check key attributes. */
+	if (match->key->eth.type == htons(ETH_P_ARP)
+			|| match->key->eth.type == htons(ETH_P_RARP)) {
+		key_expected |= 1 << OVS_KEY_ATTR_ARP;
+		if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+			mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
+	}
+
+	if (match->key->eth.type == htons(ETH_P_IP)) {
+		key_expected |= 1 << OVS_KEY_ATTR_IPV4;
+		if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+			mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
+
+		if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
+			if (match->key->ip.proto == IPPROTO_UDP) {
+				key_expected |= 1 << OVS_KEY_ATTR_UDP;
+				if (match->mask && (match->mask->key.ip.proto == 0xff))
+					mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
+			}
+
+			if (match->key->ip.proto == IPPROTO_SCTP) {
+				key_expected |= 1 << OVS_KEY_ATTR_SCTP;
+				if (match->mask && (match->mask->key.ip.proto == 0xff))
+					mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
+			}
+
+			if (match->key->ip.proto == IPPROTO_TCP) {
+				key_expected |= 1 << OVS_KEY_ATTR_TCP;
+				key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
+				if (match->mask && (match->mask->key.ip.proto == 0xff)) {
+					mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
+					mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
+				}
+			}
+
+			if (match->key->ip.proto == IPPROTO_ICMP) {
+				key_expected |= 1 << OVS_KEY_ATTR_ICMP;
+				if (match->mask && (match->mask->key.ip.proto == 0xff))
+					mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
+			}
+		}
+	}
+
+	if (match->key->eth.type == htons(ETH_P_IPV6)) {
+		key_expected |= 1 << OVS_KEY_ATTR_IPV6;
+		if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+			mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
+
+		if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
+			if (match->key->ip.proto == IPPROTO_UDP) {
+				key_expected |= 1 << OVS_KEY_ATTR_UDP;
+				if (match->mask && (match->mask->key.ip.proto == 0xff))
+					mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
+			}
+
+			if (match->key->ip.proto == IPPROTO_SCTP) {
+				key_expected |= 1 << OVS_KEY_ATTR_SCTP;
+				if (match->mask && (match->mask->key.ip.proto == 0xff))
+					mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
+			}
+
+			if (match->key->ip.proto == IPPROTO_TCP) {
+				key_expected |= 1 << OVS_KEY_ATTR_TCP;
+				key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
+				if (match->mask && (match->mask->key.ip.proto == 0xff)) {
+					mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
+					mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
+				}
+			}
+
+			if (match->key->ip.proto == IPPROTO_ICMPV6) {
+				key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
+				if (match->mask && (match->mask->key.ip.proto == 0xff))
+					mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
+
+				if (match->key->ipv6.tp.src ==
+						htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+				    match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+					key_expected |= 1 << OVS_KEY_ATTR_ND;
+					if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
+						mask_allowed |= 1 << OVS_KEY_ATTR_ND;
+				}
+			}
+		}
+	}
+
+	if ((key_attrs & key_expected) != key_expected) {
+		/* Key attributes check failed. */
+		OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
+				key_attrs, key_expected);
+		return false;
+	}
+
+	if ((mask_attrs & mask_allowed) != mask_attrs) {
+		/* Mask attributes check failed. */
+		OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
+				mask_attrs, mask_allowed);
+		return false;
+	}
+
+	return true;
+}
+
+/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
+static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+	[OVS_KEY_ATTR_ENCAP] = -1,
+	[OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
+	[OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
+	[OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
+	[OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
+	[OVS_KEY_ATTR_VLAN] = sizeof(__be16),
+	[OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
+	[OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
+	[OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
+	[OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
+	[OVS_KEY_ATTR_TCP_FLAGS] = sizeof(__be16),
+	[OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
+	[OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
+	[OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
+	[OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
+	[OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
+	[OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
+	[OVS_KEY_ATTR_TUNNEL] = -1,
+};
+
+static bool is_all_zero(const u8 *fp, size_t size)
+{
+	int i;
+
+	if (!fp)
+		return false;
+
+	for (i = 0; i < size; i++)
+		if (fp[i])
+			return false;
+
+	return true;
+}
+
+static int __parse_flow_nlattrs(const struct nlattr *attr,
+				const struct nlattr *a[],
+				u64 *attrsp, bool nz)
+{
+	const struct nlattr *nla;
+	u64 attrs;
+	int rem;
+
+	attrs = *attrsp;
+	nla_for_each_nested(nla, attr, rem) {
+		u16 type = nla_type(nla);
+		int expected_len;
+
+		if (type > OVS_KEY_ATTR_MAX) {
+			OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
+				  type, OVS_KEY_ATTR_MAX);
+			return -EINVAL;
+		}
+
+		if (attrs & (1 << type)) {
+			OVS_NLERR("Duplicate key attribute (type %d).\n", type);
+			return -EINVAL;
+		}
+
+		expected_len = ovs_key_lens[type];
+		if (nla_len(nla) != expected_len && expected_len != -1) {
+			OVS_NLERR("Key attribute has unexpected length (type=%d"
+				  ", length=%d, expected=%d).\n", type,
+				  nla_len(nla), expected_len);
+			return -EINVAL;
+		}
+
+		if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+			attrs |= 1 << type;
+			a[type] = nla;
+		}
+	}
+	if (rem) {
+		OVS_NLERR("Message has %d unknown bytes.\n", rem);
+		return -EINVAL;
+	}
+
+	*attrsp = attrs;
+	return 0;
+}
+
+static int parse_flow_mask_nlattrs(const struct nlattr *attr,
+				   const struct nlattr *a[], u64 *attrsp)
+{
+	return __parse_flow_nlattrs(attr, a, attrsp, true);
+}
+
+static int parse_flow_nlattrs(const struct nlattr *attr,
+			      const struct nlattr *a[], u64 *attrsp)
+{
+	return __parse_flow_nlattrs(attr, a, attrsp, false);
+}
+
+static int ipv4_tun_from_nlattr(const struct nlattr *attr,
+				struct sw_flow_match *match, bool is_mask)
+{
+	struct nlattr *a;
+	int rem;
+	bool ttl = false;
+	__be16 tun_flags = 0;
+
+	nla_for_each_nested(a, attr, rem) {
+		int type = nla_type(a);
+		static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
+			[OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
+			[OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
+			[OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
+			[OVS_TUNNEL_KEY_ATTR_TOS] = 1,
+			[OVS_TUNNEL_KEY_ATTR_TTL] = 1,
+			[OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
+			[OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
+		};
+
+		if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
+			OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
+			type, OVS_TUNNEL_KEY_ATTR_MAX);
+			return -EINVAL;
+		}
+
+		if (ovs_tunnel_key_lens[type] != nla_len(a)) {
+			OVS_NLERR("IPv4 tunnel attribute type has unexpected "
+				  " length (type=%d, length=%d, expected=%d).\n",
+				  type, nla_len(a), ovs_tunnel_key_lens[type]);
+			return -EINVAL;
+		}
+
+		switch (type) {
+		case OVS_TUNNEL_KEY_ATTR_ID:
+			SW_FLOW_KEY_PUT(match, tun_key.tun_id,
+					nla_get_be64(a), is_mask);
+			tun_flags |= TUNNEL_KEY;
+			break;
+		case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
+			SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+					nla_get_be32(a), is_mask);
+			break;
+		case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
+			SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+					nla_get_be32(a), is_mask);
+			break;
+		case OVS_TUNNEL_KEY_ATTR_TOS:
+			SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+					nla_get_u8(a), is_mask);
+			break;
+		case OVS_TUNNEL_KEY_ATTR_TTL:
+			SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+					nla_get_u8(a), is_mask);
+			ttl = true;
+			break;
+		case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
+			tun_flags |= TUNNEL_DONT_FRAGMENT;
+			break;
+		case OVS_TUNNEL_KEY_ATTR_CSUM:
+			tun_flags |= TUNNEL_CSUM;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
+
+	if (rem > 0) {
+		OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
+		return -EINVAL;
+	}
+
+	if (!is_mask) {
+		if (!match->key->tun_key.ipv4_dst) {
+			OVS_NLERR("IPv4 tunnel destination address is zero.\n");
+			return -EINVAL;
+		}
+
+		if (!ttl) {
+			OVS_NLERR("IPv4 tunnel TTL not specified.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int ipv4_tun_to_nlattr(struct sk_buff *skb,
+			      const struct ovs_key_ipv4_tunnel *tun_key,
+			      const struct ovs_key_ipv4_tunnel *output)
+{
+	struct nlattr *nla;
+
+	nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
+	if (!nla)
+		return -EMSGSIZE;
+
+	if (output->tun_flags & TUNNEL_KEY &&
+	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
+		return -EMSGSIZE;
+	if (output->ipv4_src &&
+		nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
+		return -EMSGSIZE;
+	if (output->ipv4_dst &&
+		nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
+		return -EMSGSIZE;
+	if (output->ipv4_tos &&
+		nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+		return -EMSGSIZE;
+	if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
+		return -EMSGSIZE;
+	if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
+		nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
+		return -EMSGSIZE;
+	if ((output->tun_flags & TUNNEL_CSUM) &&
+		nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
+		return -EMSGSIZE;
+
+	nla_nest_end(skb, nla);
+	return 0;
+}
+
+
+static int metadata_from_nlattrs(struct sw_flow_match *match,  u64 *attrs,
+				 const struct nlattr **a, bool is_mask)
+{
+	if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
+		SW_FLOW_KEY_PUT(match, phy.priority,
+			  nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
+		*attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+	}
+
+	if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
+		u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
+
+		if (is_mask)
+			in_port = 0xffffffff; /* Always exact match in_port. */
+		else if (in_port >= DP_MAX_PORTS)
+			return -EINVAL;
+
+		SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
+		*attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
+	} else if (!is_mask) {
+		SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
+	}
+
+	if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
+		uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
+
+		SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
+		*attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
+	}
+	if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
+		if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
+					 is_mask))
+			return -EINVAL;
+		*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
+	}
+	return 0;
+}
+
+static int ovs_key_from_nlattrs(struct sw_flow_match *match,  u64 attrs,
+				const struct nlattr **a, bool is_mask)
+{
+	int err;
+	u64 orig_attrs = attrs;
+
+	err = metadata_from_nlattrs(match, &attrs, a, is_mask);
+	if (err)
+		return err;
+
+	if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
+		const struct ovs_key_ethernet *eth_key;
+
+		eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
+		SW_FLOW_KEY_MEMCPY(match, eth.src,
+				eth_key->eth_src, ETH_ALEN, is_mask);
+		SW_FLOW_KEY_MEMCPY(match, eth.dst,
+				eth_key->eth_dst, ETH_ALEN, is_mask);
+		attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
+		__be16 tci;
+
+		tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+		if (!(tci & htons(VLAN_TAG_PRESENT))) {
+			if (is_mask)
+				OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
+			else
+				OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
+
+			return -EINVAL;
+		}
+
+		SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
+		attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
+	} else if (!is_mask)
+		SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
+
+	if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
+		__be16 eth_type;
+
+		eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+		if (is_mask) {
+			/* Always exact match EtherType. */
+			eth_type = htons(0xffff);
+		} else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
+			OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
+					ntohs(eth_type), ETH_P_802_3_MIN);
+			return -EINVAL;
+		}
+
+		SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
+		attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+	} else if (!is_mask) {
+		SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+		const struct ovs_key_ipv4 *ipv4_key;
+
+		ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
+		if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
+			OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
+				ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
+			return -EINVAL;
+		}
+		SW_FLOW_KEY_PUT(match, ip.proto,
+				ipv4_key->ipv4_proto, is_mask);
+		SW_FLOW_KEY_PUT(match, ip.tos,
+				ipv4_key->ipv4_tos, is_mask);
+		SW_FLOW_KEY_PUT(match, ip.ttl,
+				ipv4_key->ipv4_ttl, is_mask);
+		SW_FLOW_KEY_PUT(match, ip.frag,
+				ipv4_key->ipv4_frag, is_mask);
+		SW_FLOW_KEY_PUT(match, ipv4.addr.src,
+				ipv4_key->ipv4_src, is_mask);
+		SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
+				ipv4_key->ipv4_dst, is_mask);
+		attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
+		const struct ovs_key_ipv6 *ipv6_key;
+
+		ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
+		if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
+			OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
+				ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
+			return -EINVAL;
+		}
+		SW_FLOW_KEY_PUT(match, ipv6.label,
+				ipv6_key->ipv6_label, is_mask);
+		SW_FLOW_KEY_PUT(match, ip.proto,
+				ipv6_key->ipv6_proto, is_mask);
+		SW_FLOW_KEY_PUT(match, ip.tos,
+				ipv6_key->ipv6_tclass, is_mask);
+		SW_FLOW_KEY_PUT(match, ip.ttl,
+				ipv6_key->ipv6_hlimit, is_mask);
+		SW_FLOW_KEY_PUT(match, ip.frag,
+				ipv6_key->ipv6_frag, is_mask);
+		SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
+				ipv6_key->ipv6_src,
+				sizeof(match->key->ipv6.addr.src),
+				is_mask);
+		SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
+				ipv6_key->ipv6_dst,
+				sizeof(match->key->ipv6.addr.dst),
+				is_mask);
+
+		attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
+		const struct ovs_key_arp *arp_key;
+
+		arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
+		if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
+			OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
+				  arp_key->arp_op);
+			return -EINVAL;
+		}
+
+		SW_FLOW_KEY_PUT(match, ipv4.addr.src,
+				arp_key->arp_sip, is_mask);
+		SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
+			arp_key->arp_tip, is_mask);
+		SW_FLOW_KEY_PUT(match, ip.proto,
+				ntohs(arp_key->arp_op), is_mask);
+		SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
+				arp_key->arp_sha, ETH_ALEN, is_mask);
+		SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
+				arp_key->arp_tha, ETH_ALEN, is_mask);
+
+		attrs &= ~(1 << OVS_KEY_ATTR_ARP);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
+		const struct ovs_key_tcp *tcp_key;
+
+		tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+		if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+			SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+					tcp_key->tcp_src, is_mask);
+			SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+					tcp_key->tcp_dst, is_mask);
+		} else {
+			SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+					tcp_key->tcp_src, is_mask);
+			SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+					tcp_key->tcp_dst, is_mask);
+		}
+		attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
+		if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+			SW_FLOW_KEY_PUT(match, ipv4.tp.flags,
+					nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
+					is_mask);
+		} else {
+			SW_FLOW_KEY_PUT(match, ipv6.tp.flags,
+					nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
+					is_mask);
+		}
+		attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
+		const struct ovs_key_udp *udp_key;
+
+		udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+		if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+			SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+					udp_key->udp_src, is_mask);
+			SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+					udp_key->udp_dst, is_mask);
+		} else {
+			SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+					udp_key->udp_src, is_mask);
+			SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+					udp_key->udp_dst, is_mask);
+		}
+		attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
+		const struct ovs_key_sctp *sctp_key;
+
+		sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
+		if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+			SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+					sctp_key->sctp_src, is_mask);
+			SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+					sctp_key->sctp_dst, is_mask);
+		} else {
+			SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+					sctp_key->sctp_src, is_mask);
+			SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+					sctp_key->sctp_dst, is_mask);
+		}
+		attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
+		const struct ovs_key_icmp *icmp_key;
+
+		icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
+		SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+				htons(icmp_key->icmp_type), is_mask);
+		SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+				htons(icmp_key->icmp_code), is_mask);
+		attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
+		const struct ovs_key_icmpv6 *icmpv6_key;
+
+		icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
+		SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+				htons(icmpv6_key->icmpv6_type), is_mask);
+		SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+				htons(icmpv6_key->icmpv6_code), is_mask);
+		attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_ND)) {
+		const struct ovs_key_nd *nd_key;
+
+		nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
+		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
+			nd_key->nd_target,
+			sizeof(match->key->ipv6.nd.target),
+			is_mask);
+		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
+			nd_key->nd_sll, ETH_ALEN, is_mask);
+		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
+				nd_key->nd_tll, ETH_ALEN, is_mask);
+		attrs &= ~(1 << OVS_KEY_ATTR_ND);
+	}
+
+	if (attrs != 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void sw_flow_mask_set(struct sw_flow_mask *mask,
+			     struct sw_flow_key_range *range, u8 val)
+{
+	u8 *m = (u8 *)&mask->key + range->start;
+
+	mask->range = *range;
+	memset(m, val, range_n_bytes(range));
+}
+
+/**
+ * ovs_nla_get_match - parses Netlink attributes into a flow key and
+ * mask. In case the 'mask' is NULL, the flow is treated as exact match
+ * flow. Otherwise, it is treated as a wildcarded flow, except the mask
+ * does not include any don't care bit.
+ * @match: receives the extracted flow match information.
+ * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence. The fields should of the packet that triggered the creation
+ * of this flow.
+ * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
+ * attribute specifies the mask field of the wildcarded flow.
+ */
+int ovs_nla_get_match(struct sw_flow_match *match,
+		      const struct nlattr *key,
+		      const struct nlattr *mask)
+{
+	const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+	const struct nlattr *encap;
+	u64 key_attrs = 0;
+	u64 mask_attrs = 0;
+	bool encap_valid = false;
+	int err;
+
+	err = parse_flow_nlattrs(key, a, &key_attrs);
+	if (err)
+		return err;
+
+	if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
+	    (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
+	    (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
+		__be16 tci;
+
+		if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
+		      (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
+			OVS_NLERR("Invalid Vlan frame.\n");
+			return -EINVAL;
+		}
+
+		key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+		tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+		encap = a[OVS_KEY_ATTR_ENCAP];
+		key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+		encap_valid = true;
+
+		if (tci & htons(VLAN_TAG_PRESENT)) {
+			err = parse_flow_nlattrs(encap, a, &key_attrs);
+			if (err)
+				return err;
+		} else if (!tci) {
+			/* Corner case for truncated 802.1Q header. */
+			if (nla_len(encap)) {
+				OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
+				return -EINVAL;
+			}
+		} else {
+			OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
+			return  -EINVAL;
+		}
+	}
+
+	err = ovs_key_from_nlattrs(match, key_attrs, a, false);
+	if (err)
+		return err;
+
+	if (mask) {
+		err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
+		if (err)
+			return err;
+
+		if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP)  {
+			__be16 eth_type = 0;
+			__be16 tci = 0;
+
+			if (!encap_valid) {
+				OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
+				return  -EINVAL;
+			}
+
+			mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+			if (a[OVS_KEY_ATTR_ETHERTYPE])
+				eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+
+			if (eth_type == htons(0xffff)) {
+				mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+				encap = a[OVS_KEY_ATTR_ENCAP];
+				err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
+			} else {
+				OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
+						ntohs(eth_type));
+				return -EINVAL;
+			}
+
+			if (a[OVS_KEY_ATTR_VLAN])
+				tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+			if (!(tci & htons(VLAN_TAG_PRESENT))) {
+				OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
+				return -EINVAL;
+			}
+		}
+
+		err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
+		if (err)
+			return err;
+	} else {
+		/* Populate exact match flow's key mask. */
+		if (match->mask)
+			sw_flow_mask_set(match->mask, &match->range, 0xff);
+	}
+
+	if (!match_validate(match, key_attrs, mask_attrs))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
+ * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
+ * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ *
+ * This parses a series of Netlink attributes that form a flow key, which must
+ * take the same form accepted by flow_from_nlattrs(), but only enough of it to
+ * get the metadata, that is, the parts of the flow key that cannot be
+ * extracted from the packet itself.
+ */
+
+int ovs_nla_get_flow_metadata(struct sw_flow *flow,
+			      const struct nlattr *attr)
+{
+	struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
+	const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+	u64 attrs = 0;
+	int err;
+	struct sw_flow_match match;
+
+	flow->key.phy.in_port = DP_MAX_PORTS;
+	flow->key.phy.priority = 0;
+	flow->key.phy.skb_mark = 0;
+	memset(tun_key, 0, sizeof(flow->key.tun_key));
+
+	err = parse_flow_nlattrs(attr, a, &attrs);
+	if (err)
+		return -EINVAL;
+
+	memset(&match, 0, sizeof(match));
+	match.key = &flow->key;
+
+	err = metadata_from_nlattrs(&match, &attrs, a, false);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+int ovs_nla_put_flow(const struct sw_flow_key *swkey,
+		     const struct sw_flow_key *output, struct sk_buff *skb)
+{
+	struct ovs_key_ethernet *eth_key;
+	struct nlattr *nla, *encap;
+	bool is_mask = (swkey != output);
+
+	if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
+		goto nla_put_failure;
+
+	if ((swkey->tun_key.ipv4_dst || is_mask) &&
+	    ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
+		goto nla_put_failure;
+
+	if (swkey->phy.in_port == DP_MAX_PORTS) {
+		if (is_mask && (output->phy.in_port == 0xffff))
+			if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
+				goto nla_put_failure;
+	} else {
+		u16 upper_u16;
+		upper_u16 = !is_mask ? 0 : 0xffff;
+
+		if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
+				(upper_u16 << 16) | output->phy.in_port))
+			goto nla_put_failure;
+	}
+
+	if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
+		goto nla_put_failure;
+
+	nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
+	if (!nla)
+		goto nla_put_failure;
+
+	eth_key = nla_data(nla);
+	memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
+	memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
+
+	if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
+		__be16 eth_type;
+		eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
+		if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
+		    nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
+			goto nla_put_failure;
+		encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
+		if (!swkey->eth.tci)
+			goto unencap;
+	} else
+		encap = NULL;
+
+	if (swkey->eth.type == htons(ETH_P_802_2)) {
+		/*
+		 * Ethertype 802.2 is represented in the netlink with omitted
+		 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
+		 * 0xffff in the mask attribute.  Ethertype can also
+		 * be wildcarded.
+		 */
+		if (is_mask && output->eth.type)
+			if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
+						output->eth.type))
+				goto nla_put_failure;
+		goto unencap;
+	}
+
+	if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
+		goto nla_put_failure;
+
+	if (swkey->eth.type == htons(ETH_P_IP)) {
+		struct ovs_key_ipv4 *ipv4_key;
+
+		nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
+		if (!nla)
+			goto nla_put_failure;
+		ipv4_key = nla_data(nla);
+		ipv4_key->ipv4_src = output->ipv4.addr.src;
+		ipv4_key->ipv4_dst = output->ipv4.addr.dst;
+		ipv4_key->ipv4_proto = output->ip.proto;
+		ipv4_key->ipv4_tos = output->ip.tos;
+		ipv4_key->ipv4_ttl = output->ip.ttl;
+		ipv4_key->ipv4_frag = output->ip.frag;
+	} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+		struct ovs_key_ipv6 *ipv6_key;
+
+		nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
+		if (!nla)
+			goto nla_put_failure;
+		ipv6_key = nla_data(nla);
+		memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
+				sizeof(ipv6_key->ipv6_src));
+		memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
+				sizeof(ipv6_key->ipv6_dst));
+		ipv6_key->ipv6_label = output->ipv6.label;
+		ipv6_key->ipv6_proto = output->ip.proto;
+		ipv6_key->ipv6_tclass = output->ip.tos;
+		ipv6_key->ipv6_hlimit = output->ip.ttl;
+		ipv6_key->ipv6_frag = output->ip.frag;
+	} else if (swkey->eth.type == htons(ETH_P_ARP) ||
+		   swkey->eth.type == htons(ETH_P_RARP)) {
+		struct ovs_key_arp *arp_key;
+
+		nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
+		if (!nla)
+			goto nla_put_failure;
+		arp_key = nla_data(nla);
+		memset(arp_key, 0, sizeof(struct ovs_key_arp));
+		arp_key->arp_sip = output->ipv4.addr.src;
+		arp_key->arp_tip = output->ipv4.addr.dst;
+		arp_key->arp_op = htons(output->ip.proto);
+		memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
+		memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
+	}
+
+	if ((swkey->eth.type == htons(ETH_P_IP) ||
+	     swkey->eth.type == htons(ETH_P_IPV6)) &&
+	     swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+
+		if (swkey->ip.proto == IPPROTO_TCP) {
+			struct ovs_key_tcp *tcp_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
+			if (!nla)
+				goto nla_put_failure;
+			tcp_key = nla_data(nla);
+			if (swkey->eth.type == htons(ETH_P_IP)) {
+				tcp_key->tcp_src = output->ipv4.tp.src;
+				tcp_key->tcp_dst = output->ipv4.tp.dst;
+				if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
+						 output->ipv4.tp.flags))
+					goto nla_put_failure;
+			} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+				tcp_key->tcp_src = output->ipv6.tp.src;
+				tcp_key->tcp_dst = output->ipv6.tp.dst;
+				if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
+						 output->ipv6.tp.flags))
+					goto nla_put_failure;
+			}
+		} else if (swkey->ip.proto == IPPROTO_UDP) {
+			struct ovs_key_udp *udp_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
+			if (!nla)
+				goto nla_put_failure;
+			udp_key = nla_data(nla);
+			if (swkey->eth.type == htons(ETH_P_IP)) {
+				udp_key->udp_src = output->ipv4.tp.src;
+				udp_key->udp_dst = output->ipv4.tp.dst;
+			} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+				udp_key->udp_src = output->ipv6.tp.src;
+				udp_key->udp_dst = output->ipv6.tp.dst;
+			}
+		} else if (swkey->ip.proto == IPPROTO_SCTP) {
+			struct ovs_key_sctp *sctp_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
+			if (!nla)
+				goto nla_put_failure;
+			sctp_key = nla_data(nla);
+			if (swkey->eth.type == htons(ETH_P_IP)) {
+				sctp_key->sctp_src = swkey->ipv4.tp.src;
+				sctp_key->sctp_dst = swkey->ipv4.tp.dst;
+			} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+				sctp_key->sctp_src = swkey->ipv6.tp.src;
+				sctp_key->sctp_dst = swkey->ipv6.tp.dst;
+			}
+		} else if (swkey->eth.type == htons(ETH_P_IP) &&
+			   swkey->ip.proto == IPPROTO_ICMP) {
+			struct ovs_key_icmp *icmp_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
+			if (!nla)
+				goto nla_put_failure;
+			icmp_key = nla_data(nla);
+			icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
+			icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
+		} else if (swkey->eth.type == htons(ETH_P_IPV6) &&
+			   swkey->ip.proto == IPPROTO_ICMPV6) {
+			struct ovs_key_icmpv6 *icmpv6_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
+						sizeof(*icmpv6_key));
+			if (!nla)
+				goto nla_put_failure;
+			icmpv6_key = nla_data(nla);
+			icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
+			icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
+
+			if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+			    icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+				struct ovs_key_nd *nd_key;
+
+				nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
+				if (!nla)
+					goto nla_put_failure;
+				nd_key = nla_data(nla);
+				memcpy(nd_key->nd_target, &output->ipv6.nd.target,
+							sizeof(nd_key->nd_target));
+				memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
+				memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
+			}
+		}
+	}
+
+unencap:
+	if (encap)
+		nla_nest_end(skb, encap);
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+#define MAX_ACTIONS_BUFSIZE	(32 * 1024)
+
+struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
+{
+	struct sw_flow_actions *sfa;
+
+	if (size > MAX_ACTIONS_BUFSIZE)
+		return ERR_PTR(-EINVAL);
+
+	sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
+	if (!sfa)
+		return ERR_PTR(-ENOMEM);
+
+	sfa->actions_len = 0;
+	return sfa;
+}
+
+/* RCU callback used by ovs_nla_free_flow_actions. */
+static void rcu_free_acts_callback(struct rcu_head *rcu)
+{
+	struct sw_flow_actions *sf_acts = container_of(rcu,
+			struct sw_flow_actions, rcu);
+	kfree(sf_acts);
+}
+
+/* Schedules 'sf_acts' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+{
+	call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+}
+
+static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
+				       int attr_len)
+{
+
+	struct sw_flow_actions *acts;
+	int new_acts_size;
+	int req_size = NLA_ALIGN(attr_len);
+	int next_offset = offsetof(struct sw_flow_actions, actions) +
+					(*sfa)->actions_len;
+
+	if (req_size <= (ksize(*sfa) - next_offset))
+		goto out;
+
+	new_acts_size = ksize(*sfa) * 2;
+
+	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
+			return ERR_PTR(-EMSGSIZE);
+		new_acts_size = MAX_ACTIONS_BUFSIZE;
+	}
+
+	acts = ovs_nla_alloc_flow_actions(new_acts_size);
+	if (IS_ERR(acts))
+		return (void *)acts;
+
+	memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
+	acts->actions_len = (*sfa)->actions_len;
+	kfree(*sfa);
+	*sfa = acts;
+
+out:
+	(*sfa)->actions_len += req_size;
+	return  (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
+}
+
+static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
+{
+	struct nlattr *a;
+
+	a = reserve_sfa_size(sfa, nla_attr_size(len));
+	if (IS_ERR(a))
+		return PTR_ERR(a);
+
+	a->nla_type = attrtype;
+	a->nla_len = nla_attr_size(len);
+
+	if (data)
+		memcpy(nla_data(a), data, len);
+	memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
+
+	return 0;
+}
+
+static inline int add_nested_action_start(struct sw_flow_actions **sfa,
+					  int attrtype)
+{
+	int used = (*sfa)->actions_len;
+	int err;
+
+	err = add_action(sfa, attrtype, NULL, 0);
+	if (err)
+		return err;
+
+	return used;
+}
+
+static inline void add_nested_action_end(struct sw_flow_actions *sfa,
+					 int st_offset)
+{
+	struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
+							       st_offset);
+
+	a->nla_len = sfa->actions_len - st_offset;
+}
+
+static int validate_and_copy_sample(const struct nlattr *attr,
+				    const struct sw_flow_key *key, int depth,
+				    struct sw_flow_actions **sfa)
+{
+	const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+	const struct nlattr *probability, *actions;
+	const struct nlattr *a;
+	int rem, start, err, st_acts;
+
+	memset(attrs, 0, sizeof(attrs));
+	nla_for_each_nested(a, attr, rem) {
+		int type = nla_type(a);
+		if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
+			return -EINVAL;
+		attrs[type] = a;
+	}
+	if (rem)
+		return -EINVAL;
+
+	probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
+	if (!probability || nla_len(probability) != sizeof(u32))
+		return -EINVAL;
+
+	actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
+	if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
+		return -EINVAL;
+
+	/* validation done, copy sample action. */
+	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
+	if (start < 0)
+		return start;
+	err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
+			 nla_data(probability), sizeof(u32));
+	if (err)
+		return err;
+	st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
+	if (st_acts < 0)
+		return st_acts;
+
+	err = ovs_nla_copy_actions(actions, key, depth + 1, sfa);
+	if (err)
+		return err;
+
+	add_nested_action_end(*sfa, st_acts);
+	add_nested_action_end(*sfa, start);
+
+	return 0;
+}
+
+static int validate_tp_port(const struct sw_flow_key *flow_key)
+{
+	if (flow_key->eth.type == htons(ETH_P_IP)) {
+		if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
+			return 0;
+	} else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
+		if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
+void ovs_match_init(struct sw_flow_match *match,
+		    struct sw_flow_key *key,
+		    struct sw_flow_mask *mask)
+{
+	memset(match, 0, sizeof(*match));
+	match->key = key;
+	match->mask = mask;
+
+	memset(key, 0, sizeof(*key));
+
+	if (mask) {
+		memset(&mask->key, 0, sizeof(mask->key));
+		mask->range.start = mask->range.end = 0;
+	}
+}
+
+static int validate_and_copy_set_tun(const struct nlattr *attr,
+				     struct sw_flow_actions **sfa)
+{
+	struct sw_flow_match match;
+	struct sw_flow_key key;
+	int err, start;
+
+	ovs_match_init(&match, &key, NULL);
+	err = ipv4_tun_from_nlattr(nla_data(attr), &match, false);
+	if (err)
+		return err;
+
+	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
+	if (start < 0)
+		return start;
+
+	err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
+			sizeof(match.key->tun_key));
+	add_nested_action_end(*sfa, start);
+
+	return err;
+}
+
+static int validate_set(const struct nlattr *a,
+			const struct sw_flow_key *flow_key,
+			struct sw_flow_actions **sfa,
+			bool *set_tun)
+{
+	const struct nlattr *ovs_key = nla_data(a);
+	int key_type = nla_type(ovs_key);
+
+	/* There can be only one key in a action */
+	if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
+		return -EINVAL;
+
+	if (key_type > OVS_KEY_ATTR_MAX ||
+	    (ovs_key_lens[key_type] != nla_len(ovs_key) &&
+	     ovs_key_lens[key_type] != -1))
+		return -EINVAL;
+
+	switch (key_type) {
+	const struct ovs_key_ipv4 *ipv4_key;
+	const struct ovs_key_ipv6 *ipv6_key;
+	int err;
+
+	case OVS_KEY_ATTR_PRIORITY:
+	case OVS_KEY_ATTR_SKB_MARK:
+	case OVS_KEY_ATTR_ETHERNET:
+		break;
+
+	case OVS_KEY_ATTR_TUNNEL:
+		*set_tun = true;
+		err = validate_and_copy_set_tun(a, sfa);
+		if (err)
+			return err;
+		break;
+
+	case OVS_KEY_ATTR_IPV4:
+		if (flow_key->eth.type != htons(ETH_P_IP))
+			return -EINVAL;
+
+		if (!flow_key->ip.proto)
+			return -EINVAL;
+
+		ipv4_key = nla_data(ovs_key);
+		if (ipv4_key->ipv4_proto != flow_key->ip.proto)
+			return -EINVAL;
+
+		if (ipv4_key->ipv4_frag != flow_key->ip.frag)
+			return -EINVAL;
+
+		break;
+
+	case OVS_KEY_ATTR_IPV6:
+		if (flow_key->eth.type != htons(ETH_P_IPV6))
+			return -EINVAL;
+
+		if (!flow_key->ip.proto)
+			return -EINVAL;
+
+		ipv6_key = nla_data(ovs_key);
+		if (ipv6_key->ipv6_proto != flow_key->ip.proto)
+			return -EINVAL;
+
+		if (ipv6_key->ipv6_frag != flow_key->ip.frag)
+			return -EINVAL;
+
+		if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
+			return -EINVAL;
+
+		break;
+
+	case OVS_KEY_ATTR_TCP:
+		if (flow_key->ip.proto != IPPROTO_TCP)
+			return -EINVAL;
+
+		return validate_tp_port(flow_key);
+
+	case OVS_KEY_ATTR_UDP:
+		if (flow_key->ip.proto != IPPROTO_UDP)
+			return -EINVAL;
+
+		return validate_tp_port(flow_key);
+
+	case OVS_KEY_ATTR_SCTP:
+		if (flow_key->ip.proto != IPPROTO_SCTP)
+			return -EINVAL;
+
+		return validate_tp_port(flow_key);
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int validate_userspace(const struct nlattr *attr)
+{
+	static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
+		[OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
+		[OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
+	};
+	struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
+	int error;
+
+	error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
+				 attr, userspace_policy);
+	if (error)
+		return error;
+
+	if (!a[OVS_USERSPACE_ATTR_PID] ||
+	    !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int copy_action(const struct nlattr *from,
+		       struct sw_flow_actions **sfa)
+{
+	int totlen = NLA_ALIGN(from->nla_len);
+	struct nlattr *to;
+
+	to = reserve_sfa_size(sfa, from->nla_len);
+	if (IS_ERR(to))
+		return PTR_ERR(to);
+
+	memcpy(to, from, totlen);
+	return 0;
+}
+
+int ovs_nla_copy_actions(const struct nlattr *attr,
+			 const struct sw_flow_key *key,
+			 int depth,
+			 struct sw_flow_actions **sfa)
+{
+	const struct nlattr *a;
+	int rem, err;
+
+	if (depth >= SAMPLE_ACTION_DEPTH)
+		return -EOVERFLOW;
+
+	nla_for_each_nested(a, attr, rem) {
+		/* Expected argument lengths, (u32)-1 for variable length. */
+		static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+			[OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
+			[OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
+			[OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
+			[OVS_ACTION_ATTR_POP_VLAN] = 0,
+			[OVS_ACTION_ATTR_SET] = (u32)-1,
+			[OVS_ACTION_ATTR_SAMPLE] = (u32)-1
+		};
+		const struct ovs_action_push_vlan *vlan;
+		int type = nla_type(a);
+		bool skip_copy;
+
+		if (type > OVS_ACTION_ATTR_MAX ||
+		    (action_lens[type] != nla_len(a) &&
+		     action_lens[type] != (u32)-1))
+			return -EINVAL;
+
+		skip_copy = false;
+		switch (type) {
+		case OVS_ACTION_ATTR_UNSPEC:
+			return -EINVAL;
+
+		case OVS_ACTION_ATTR_USERSPACE:
+			err = validate_userspace(a);
+			if (err)
+				return err;
+			break;
+
+		case OVS_ACTION_ATTR_OUTPUT:
+			if (nla_get_u32(a) >= DP_MAX_PORTS)
+				return -EINVAL;
+			break;
+
+
+		case OVS_ACTION_ATTR_POP_VLAN:
+			break;
+
+		case OVS_ACTION_ATTR_PUSH_VLAN:
+			vlan = nla_data(a);
+			if (vlan->vlan_tpid != htons(ETH_P_8021Q))
+				return -EINVAL;
+			if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
+				return -EINVAL;
+			break;
+
+		case OVS_ACTION_ATTR_SET:
+			err = validate_set(a, key, sfa, &skip_copy);
+			if (err)
+				return err;
+			break;
+
+		case OVS_ACTION_ATTR_SAMPLE:
+			err = validate_and_copy_sample(a, key, depth, sfa);
+			if (err)
+				return err;
+			skip_copy = true;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+		if (!skip_copy) {
+			err = copy_action(a, sfa);
+			if (err)
+				return err;
+		}
+	}
+
+	if (rem > 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
+{
+	const struct nlattr *a;
+	struct nlattr *start;
+	int err = 0, rem;
+
+	start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
+	if (!start)
+		return -EMSGSIZE;
+
+	nla_for_each_nested(a, attr, rem) {
+		int type = nla_type(a);
+		struct nlattr *st_sample;
+
+		switch (type) {
+		case OVS_SAMPLE_ATTR_PROBABILITY:
+			if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
+				    sizeof(u32), nla_data(a)))
+				return -EMSGSIZE;
+			break;
+		case OVS_SAMPLE_ATTR_ACTIONS:
+			st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
+			if (!st_sample)
+				return -EMSGSIZE;
+			err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
+			if (err)
+				return err;
+			nla_nest_end(skb, st_sample);
+			break;
+		}
+	}
+
+	nla_nest_end(skb, start);
+	return err;
+}
+
+static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
+{
+	const struct nlattr *ovs_key = nla_data(a);
+	int key_type = nla_type(ovs_key);
+	struct nlattr *start;
+	int err;
+
+	switch (key_type) {
+	case OVS_KEY_ATTR_IPV4_TUNNEL:
+		start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
+		if (!start)
+			return -EMSGSIZE;
+
+		err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
+					     nla_data(ovs_key));
+		if (err)
+			return err;
+		nla_nest_end(skb, start);
+		break;
+	default:
+		if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
+			return -EMSGSIZE;
+		break;
+	}
+
+	return 0;
+}
+
+int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
+{
+	const struct nlattr *a;
+	int rem, err;
+
+	nla_for_each_attr(a, attr, len, rem) {
+		int type = nla_type(a);
+
+		switch (type) {
+		case OVS_ACTION_ATTR_SET:
+			err = set_action_to_attr(a, skb);
+			if (err)
+				return err;
+			break;
+
+		case OVS_ACTION_ATTR_SAMPLE:
+			err = sample_action_to_attr(a, skb);
+			if (err)
+				return err;
+			break;
+		default:
+			if (nla_put(skb, type, nla_len(a), nla_data(a)))
+				return -EMSGSIZE;
+			break;
+		}
+	}
+
+	return 0;
+}
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
new file mode 100644
index 0000000..4401510
--- /dev/null
+++ b/net/openvswitch/flow_netlink.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+
+#ifndef FLOW_NETLINK_H
+#define FLOW_NETLINK_H 1
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/openvswitch.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/flex_array.h>
+
+#include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+
+#include "flow.h"
+
+void ovs_match_init(struct sw_flow_match *match,
+		    struct sw_flow_key *key, struct sw_flow_mask *mask);
+
+int ovs_nla_put_flow(const struct sw_flow_key *,
+		     const struct sw_flow_key *, struct sk_buff *);
+int ovs_nla_get_flow_metadata(struct sw_flow *flow,
+			      const struct nlattr *attr);
+int ovs_nla_get_match(struct sw_flow_match *match,
+		      const struct nlattr *,
+		      const struct nlattr *);
+
+int ovs_nla_copy_actions(const struct nlattr *attr,
+			 const struct sw_flow_key *key, int depth,
+			 struct sw_flow_actions **sfa);
+int ovs_nla_put_actions(const struct nlattr *attr,
+			int len, struct sk_buff *skb);
+
+struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len);
+void ovs_nla_free_flow_actions(struct sw_flow_actions *);
+
+#endif /* flow_netlink.h */
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
new file mode 100644
index 0000000..e425427
--- /dev/null
+++ b/net/openvswitch/flow_table.c
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "flow.h"
+#include "datapath.h"
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/llc_pdu.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/llc.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/sctp.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/rculist.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+
+#include "datapath.h"
+
+#define TBL_MIN_BUCKETS		1024
+#define REHASH_INTERVAL		(10 * 60 * HZ)
+
+static struct kmem_cache *flow_cache;
+
+static u16 range_n_bytes(const struct sw_flow_key_range *range)
+{
+	return range->end - range->start;
+}
+
+void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
+		       const struct sw_flow_mask *mask)
+{
+	const long *m = (long *)((u8 *)&mask->key + mask->range.start);
+	const long *s = (long *)((u8 *)src + mask->range.start);
+	long *d = (long *)((u8 *)dst + mask->range.start);
+	int i;
+
+	/* The memory outside of the 'mask->range' are not set since
+	 * further operations on 'dst' only uses contents within
+	 * 'mask->range'.
+	 */
+	for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+		*d++ = *s++ & *m++;
+}
+
+struct sw_flow *ovs_flow_alloc(void)
+{
+	struct sw_flow *flow;
+
+	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+	if (!flow)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&flow->lock);
+	flow->sf_acts = NULL;
+	flow->mask = NULL;
+
+	return flow;
+}
+
+int ovs_flow_tbl_count(struct flow_table *table)
+{
+	return table->count;
+}
+
+static struct flex_array *alloc_buckets(unsigned int n_buckets)
+{
+	struct flex_array *buckets;
+	int i, err;
+
+	buckets = flex_array_alloc(sizeof(struct hlist_head),
+				   n_buckets, GFP_KERNEL);
+	if (!buckets)
+		return NULL;
+
+	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
+	if (err) {
+		flex_array_free(buckets);
+		return NULL;
+	}
+
+	for (i = 0; i < n_buckets; i++)
+		INIT_HLIST_HEAD((struct hlist_head *)
+					flex_array_get(buckets, i));
+
+	return buckets;
+}
+
+static void flow_free(struct sw_flow *flow)
+{
+	kfree((struct sf_flow_acts __force *)flow->sf_acts);
+	kmem_cache_free(flow_cache, flow);
+}
+
+static void rcu_free_flow_callback(struct rcu_head *rcu)
+{
+	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
+
+	flow_free(flow);
+}
+
+static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
+{
+	struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
+
+	kfree(mask);
+}
+
+static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
+{
+	if (!mask)
+		return;
+
+	BUG_ON(!mask->ref_count);
+	mask->ref_count--;
+
+	if (!mask->ref_count) {
+		list_del_rcu(&mask->list);
+		if (deferred)
+			call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
+		else
+			kfree(mask);
+	}
+}
+
+void ovs_flow_free(struct sw_flow *flow, bool deferred)
+{
+	if (!flow)
+		return;
+
+	flow_mask_del_ref(flow->mask, deferred);
+
+	if (deferred)
+		call_rcu(&flow->rcu, rcu_free_flow_callback);
+	else
+		flow_free(flow);
+}
+
+static void free_buckets(struct flex_array *buckets)
+{
+	flex_array_free(buckets);
+}
+
+static void __table_instance_destroy(struct table_instance *ti)
+{
+	int i;
+
+	if (ti->keep_flows)
+		goto skip_flows;
+
+	for (i = 0; i < ti->n_buckets; i++) {
+		struct sw_flow *flow;
+		struct hlist_head *head = flex_array_get(ti->buckets, i);
+		struct hlist_node *n;
+		int ver = ti->node_ver;
+
+		hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
+			hlist_del(&flow->hash_node[ver]);
+			ovs_flow_free(flow, false);
+		}
+	}
+
+skip_flows:
+	free_buckets(ti->buckets);
+	kfree(ti);
+}
+
+static struct table_instance *table_instance_alloc(int new_size)
+{
+	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
+
+	if (!ti)
+		return NULL;
+
+	ti->buckets = alloc_buckets(new_size);
+
+	if (!ti->buckets) {
+		kfree(ti);
+		return NULL;
+	}
+	ti->n_buckets = new_size;
+	ti->node_ver = 0;
+	ti->keep_flows = false;
+	get_random_bytes(&ti->hash_seed, sizeof(u32));
+
+	return ti;
+}
+
+int ovs_flow_tbl_init(struct flow_table *table)
+{
+	struct table_instance *ti;
+
+	ti = table_instance_alloc(TBL_MIN_BUCKETS);
+
+	if (!ti)
+		return -ENOMEM;
+
+	rcu_assign_pointer(table->ti, ti);
+	INIT_LIST_HEAD(&table->mask_list);
+	table->last_rehash = jiffies;
+	table->count = 0;
+	return 0;
+}
+
+static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
+{
+	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
+
+	__table_instance_destroy(ti);
+}
+
+static void table_instance_destroy(struct table_instance *ti, bool deferred)
+{
+	if (!ti)
+		return;
+
+	if (deferred)
+		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+	else
+		__table_instance_destroy(ti);
+}
+
+void ovs_flow_tbl_destroy(struct flow_table *table)
+{
+	struct table_instance *ti = ovsl_dereference(table->ti);
+
+	table_instance_destroy(ti, false);
+}
+
+struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
+				       u32 *bucket, u32 *last)
+{
+	struct sw_flow *flow;
+	struct hlist_head *head;
+	int ver;
+	int i;
+
+	ver = ti->node_ver;
+	while (*bucket < ti->n_buckets) {
+		i = 0;
+		head = flex_array_get(ti->buckets, *bucket);
+		hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
+			if (i < *last) {
+				i++;
+				continue;
+			}
+			*last = i + 1;
+			return flow;
+		}
+		(*bucket)++;
+		*last = 0;
+	}
+
+	return NULL;
+}
+
+static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
+{
+	hash = jhash_1word(hash, ti->hash_seed);
+	return flex_array_get(ti->buckets,
+				(hash & (ti->n_buckets - 1)));
+}
+
+static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
+{
+	struct hlist_head *head;
+
+	head = find_bucket(ti, flow->hash);
+	hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
+}
+
+static void flow_table_copy_flows(struct table_instance *old,
+				  struct table_instance *new)
+{
+	int old_ver;
+	int i;
+
+	old_ver = old->node_ver;
+	new->node_ver = !old_ver;
+
+	/* Insert in new table. */
+	for (i = 0; i < old->n_buckets; i++) {
+		struct sw_flow *flow;
+		struct hlist_head *head;
+
+		head = flex_array_get(old->buckets, i);
+
+		hlist_for_each_entry(flow, head, hash_node[old_ver])
+			table_instance_insert(new, flow);
+	}
+
+	old->keep_flows = true;
+}
+
+static struct table_instance *table_instance_rehash(struct table_instance *ti,
+					    int n_buckets)
+{
+	struct table_instance *new_ti;
+
+	new_ti = table_instance_alloc(n_buckets);
+	if (!new_ti)
+		return NULL;
+
+	flow_table_copy_flows(ti, new_ti);
+
+	return new_ti;
+}
+
+int ovs_flow_tbl_flush(struct flow_table *flow_table)
+{
+	struct table_instance *old_ti;
+	struct table_instance *new_ti;
+
+	old_ti = ovsl_dereference(flow_table->ti);
+	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+	if (!new_ti)
+		return -ENOMEM;
+
+	rcu_assign_pointer(flow_table->ti, new_ti);
+	flow_table->last_rehash = jiffies;
+	flow_table->count = 0;
+
+	table_instance_destroy(old_ti, true);
+	return 0;
+}
+
+static u32 flow_hash(const struct sw_flow_key *key, int key_start,
+		     int key_end)
+{
+	u32 *hash_key = (u32 *)((u8 *)key + key_start);
+	int hash_u32s = (key_end - key_start) >> 2;
+
+	/* Make sure number of hash bytes are multiple of u32. */
+	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
+
+	return jhash2(hash_key, hash_u32s, 0);
+}
+
+static int flow_key_start(const struct sw_flow_key *key)
+{
+	if (key->tun_key.ipv4_dst)
+		return 0;
+	else
+		return rounddown(offsetof(struct sw_flow_key, phy),
+					  sizeof(long));
+}
+
+static bool cmp_key(const struct sw_flow_key *key1,
+		    const struct sw_flow_key *key2,
+		    int key_start, int key_end)
+{
+	const long *cp1 = (long *)((u8 *)key1 + key_start);
+	const long *cp2 = (long *)((u8 *)key2 + key_start);
+	long diffs = 0;
+	int i;
+
+	for (i = key_start; i < key_end;  i += sizeof(long))
+		diffs |= *cp1++ ^ *cp2++;
+
+	return diffs == 0;
+}
+
+static bool flow_cmp_masked_key(const struct sw_flow *flow,
+				const struct sw_flow_key *key,
+				int key_start, int key_end)
+{
+	return cmp_key(&flow->key, key, key_start, key_end);
+}
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+			       struct sw_flow_match *match)
+{
+	struct sw_flow_key *key = match->key;
+	int key_start = flow_key_start(key);
+	int key_end = match->range.end;
+
+	return cmp_key(&flow->unmasked_key, key, key_start, key_end);
+}
+
+static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
+					  const struct sw_flow_key *unmasked,
+					  struct sw_flow_mask *mask)
+{
+	struct sw_flow *flow;
+	struct hlist_head *head;
+	int key_start = mask->range.start;
+	int key_end = mask->range.end;
+	u32 hash;
+	struct sw_flow_key masked_key;
+
+	ovs_flow_mask_key(&masked_key, unmasked, mask);
+	hash = flow_hash(&masked_key, key_start, key_end);
+	head = find_bucket(ti, hash);
+	hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
+		if (flow->mask == mask && flow->hash == hash &&
+		    flow_cmp_masked_key(flow, &masked_key,
+					  key_start, key_end))
+			return flow;
+	}
+	return NULL;
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
+				    const struct sw_flow_key *key,
+				    u32 *n_mask_hit)
+{
+	struct table_instance *ti = rcu_dereference(tbl->ti);
+	struct sw_flow_mask *mask;
+	struct sw_flow *flow;
+
+	*n_mask_hit = 0;
+	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
+		(*n_mask_hit)++;
+		flow = masked_flow_lookup(ti, key, mask);
+		if (flow)  /* Found */
+			return flow;
+	}
+	return NULL;
+}
+
+int ovs_flow_tbl_num_masks(const struct flow_table *table)
+{
+	struct sw_flow_mask *mask;
+	int num = 0;
+
+	list_for_each_entry(mask, &table->mask_list, list)
+		num++;
+
+	return num;
+}
+
+static struct table_instance *table_instance_expand(struct table_instance *ti)
+{
+	return table_instance_rehash(ti, ti->n_buckets * 2);
+}
+
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+{
+	struct table_instance *ti = ovsl_dereference(table->ti);
+
+	BUG_ON(table->count == 0);
+	hlist_del_rcu(&flow->hash_node[ti->node_ver]);
+	table->count--;
+}
+
+static struct sw_flow_mask *mask_alloc(void)
+{
+	struct sw_flow_mask *mask;
+
+	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
+	if (mask)
+		mask->ref_count = 0;
+
+	return mask;
+}
+
+static void mask_add_ref(struct sw_flow_mask *mask)
+{
+	mask->ref_count++;
+}
+
+static bool mask_equal(const struct sw_flow_mask *a,
+		       const struct sw_flow_mask *b)
+{
+	u8 *a_ = (u8 *)&a->key + a->range.start;
+	u8 *b_ = (u8 *)&b->key + b->range.start;
+
+	return  (a->range.end == b->range.end)
+		&& (a->range.start == b->range.start)
+		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
+}
+
+static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
+					   const struct sw_flow_mask *mask)
+{
+	struct list_head *ml;
+
+	list_for_each(ml, &tbl->mask_list) {
+		struct sw_flow_mask *m;
+		m = container_of(ml, struct sw_flow_mask, list);
+		if (mask_equal(mask, m))
+			return m;
+	}
+
+	return NULL;
+}
+
+/**
+ * add a new mask into the mask list.
+ * The caller needs to make sure that 'mask' is not the same
+ * as any masks that are already on the list.
+ */
+static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
+			    struct sw_flow_mask *new)
+{
+	struct sw_flow_mask *mask;
+	mask = flow_mask_find(tbl, new);
+	if (!mask) {
+		/* Allocate a new mask if none exsits. */
+		mask = mask_alloc();
+		if (!mask)
+			return -ENOMEM;
+		mask->key = new->key;
+		mask->range = new->range;
+		list_add_rcu(&mask->list, &tbl->mask_list);
+	}
+
+	mask_add_ref(mask);
+	flow->mask = mask;
+	return 0;
+}
+
+int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+			struct sw_flow_mask *mask)
+{
+	struct table_instance *new_ti = NULL;
+	struct table_instance *ti;
+	int err;
+
+	err = flow_mask_insert(table, flow, mask);
+	if (err)
+		return err;
+
+	flow->hash = flow_hash(&flow->key, flow->mask->range.start,
+			flow->mask->range.end);
+	ti = ovsl_dereference(table->ti);
+	table_instance_insert(ti, flow);
+	table->count++;
+
+	/* Expand table, if necessary, to make room. */
+	if (table->count > ti->n_buckets)
+		new_ti = table_instance_expand(ti);
+	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
+		new_ti = table_instance_rehash(ti, ti->n_buckets);
+
+	if (new_ti) {
+		rcu_assign_pointer(table->ti, new_ti);
+		table_instance_destroy(ti, true);
+		table->last_rehash = jiffies;
+	}
+	return 0;
+}
+
+/* Initializes the flow module.
+ * Returns zero if successful or a negative error code. */
+int ovs_flow_init(void)
+{
+	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
+	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
+
+	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
+					0, NULL);
+	if (flow_cache == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* Uninitializes the flow module. */
+void ovs_flow_exit(void)
+{
+	kmem_cache_destroy(flow_cache);
+}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
new file mode 100644
index 0000000..fbe45d5
--- /dev/null
+++ b/net/openvswitch/flow_table.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef FLOW_TABLE_H
+#define FLOW_TABLE_H 1
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/openvswitch.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/flex_array.h>
+
+#include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+
+#include "flow.h"
+
+struct table_instance {
+	struct flex_array *buckets;
+	unsigned int n_buckets;
+	struct rcu_head rcu;
+	int node_ver;
+	u32 hash_seed;
+	bool keep_flows;
+};
+
+struct flow_table {
+	struct table_instance __rcu *ti;
+	struct list_head mask_list;
+	unsigned long last_rehash;
+	unsigned int count;
+};
+
+int ovs_flow_init(void);
+void ovs_flow_exit(void);
+
+struct sw_flow *ovs_flow_alloc(void);
+void ovs_flow_free(struct sw_flow *, bool deferred);
+
+int ovs_flow_tbl_init(struct flow_table *);
+int ovs_flow_tbl_count(struct flow_table *table);
+void ovs_flow_tbl_destroy(struct flow_table *table);
+int ovs_flow_tbl_flush(struct flow_table *flow_table);
+
+int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+			struct sw_flow_mask *mask);
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+int  ovs_flow_tbl_num_masks(const struct flow_table *table);
+struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
+				       u32 *bucket, u32 *idx);
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
+				    const struct sw_flow_key *,
+				    u32 *n_mask_hit);
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+			       struct sw_flow_match *match);
+
+void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
+		       const struct sw_flow_mask *mask);
+#endif /* flow_table.h */
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index c99dea5..a3d6951 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -24,8 +24,6 @@
 #include <linux/if_tunnel.h>
 #include <linux/if_vlan.h>
 #include <linux/in.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
 #include <linux/in_route.h>
 #include <linux/inetdevice.h>
 #include <linux/jhash.h>
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 98d3edb..729c687 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -134,7 +134,7 @@
 	netdev->tx_queue_len = 0;
 
 	netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
-			   NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
+			   NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
 
 	netdev->vlan_features = netdev->features;
 	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index a481c03..e797a50 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -29,7 +29,6 @@
 #include <net/ip.h>
 #include <net/udp.h>
 #include <net/ip_tunnels.h>
-#include <net/udp.h>
 #include <net/rtnetlink.h>
 #include <net/route.h>
 #include <net/dsfield.h>
@@ -173,7 +172,7 @@
 
 	skb->local_df = 1;
 
-	inet_get_local_port_range(&port_min, &port_max);
+	inet_get_local_port_range(net, &port_min, &port_max);
 	src_port = vxlan_src_port(port_min, port_max, skb);
 
 	err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6f65dbe..d830a95f 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -118,6 +118,7 @@
 {
 	struct vport *vport;
 	size_t alloc_size;
+	int i;
 
 	alloc_size = sizeof(struct vport);
 	if (priv_size) {
@@ -141,6 +142,13 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
+	for_each_possible_cpu(i) {
+		struct pcpu_tstats *vport_stats;
+		vport_stats = per_cpu_ptr(vport->percpu_stats, i);
+		u64_stats_init(&vport_stats->syncp);
+	}
+
+
 	spin_lock_init(&vport->stats_lock);
 
 	return vport;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 77e38f7..008214a 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -595,26 +595,25 @@
 
 static int pn_sock_seq_show(struct seq_file *seq, void *v)
 {
-	int len;
-
+	seq_setwidth(seq, 127);
 	if (v == SEQ_START_TOKEN)
-		seq_printf(seq, "%s%n", "pt  loc  rem rs st tx_queue rx_queue "
-			"  uid inode ref pointer drops", &len);
+		seq_puts(seq, "pt  loc  rem rs st tx_queue rx_queue "
+			"  uid inode ref pointer drops");
 	else {
 		struct sock *sk = v;
 		struct pn_sock *pn = pn_sk(sk);
 
 		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
-			"%d %pK %d%n",
+			"%d %pK %d",
 			sk->sk_protocol, pn->sobject, pn->dobject,
 			pn->resource, sk->sk_state,
 			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
 			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
 			sock_i_ino(sk),
 			atomic_read(&sk->sk_refcnt), sk,
-			atomic_read(&sk->sk_drops), &len);
+			atomic_read(&sk->sk_drops));
 	}
-	seq_printf(seq, "%*s\n", 127 - len, "");
+	seq_pad(seq, '\n');
 	return 0;
 }
 
@@ -785,20 +784,19 @@
 
 static int pn_res_seq_show(struct seq_file *seq, void *v)
 {
-	int len;
-
+	seq_setwidth(seq, 63);
 	if (v == SEQ_START_TOKEN)
-		seq_printf(seq, "%s%n", "rs   uid inode", &len);
+		seq_puts(seq, "rs   uid inode");
 	else {
 		struct sock **psk = v;
 		struct sock *sk = *psk;
 
-		seq_printf(seq, "%02X %5u %lu%n",
+		seq_printf(seq, "%02X %5u %lu",
 			   (int) (psk - pnres.sk),
 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
-			   sock_i_ino(sk), &len);
+			   sock_i_ino(sk));
 	}
-	seq_printf(seq, "%*s\n", 63 - len, "");
+	seq_pad(seq, '\n');
 	return 0;
 }
 
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 642ad42..378c3a6 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -51,10 +51,16 @@
 
 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
 {
+	static u32 rds_hash_secret __read_mostly;
+
+	unsigned long hash;
+
+	net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
+
 	/* Pass NULL, don't need struct net for hash */
-	unsigned long hash = inet_ehashfn(NULL,
-					  be32_to_cpu(laddr), 0,
-					  be32_to_cpu(faddr), 0);
+	hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
+			      be32_to_cpu(faddr), 0,
+			      rds_hash_secret);
 	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
 }
 
diff --git a/net/rds/rds.h b/net/rds/rds.h
index ec1d731..48f8ffc 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -749,7 +749,7 @@
 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
 		    struct cmsghdr *cmsg);
 
-extern void __rds_put_mr_final(struct rds_mr *mr);
+void __rds_put_mr_final(struct rds_mr *mr);
 static inline void rds_mr_put(struct rds_mr *mr)
 {
 	if (atomic_dec_and_test(&mr->r_refcount))
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 78efe89..4c10e7e 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -36,7 +36,7 @@
 
 config RFKILL_GPIO
 	tristate "GPIO RFKILL driver"
-	depends on RFKILL && GPIOLIB && HAVE_CLK
+	depends on RFKILL && GPIOLIB
 	default n
 	help
 	  If you say yes here you get support of a generic gpio RFKILL
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index fb076cd..5620d3c 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -24,27 +24,23 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
 
 #include <linux/rfkill-gpio.h>
 
-enum rfkill_gpio_clk_state {
-	UNSPECIFIED = 0,
-	PWR_ENABLED,
-	PWR_DISABLED
-};
-
-#define PWR_CLK_SET(_RF, _EN) \
-	((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED))
-#define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED)
-#define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED)
-
 struct rfkill_gpio_data {
-	struct rfkill_gpio_platform_data	*pdata;
-	struct rfkill				*rfkill_dev;
-	char					*reset_name;
-	char					*shutdown_name;
-	enum rfkill_gpio_clk_state		pwr_clk_enabled;
-	struct clk				*pwr_clk;
+	const char		*name;
+	enum rfkill_type	type;
+	int			reset_gpio;
+	int			shutdown_gpio;
+
+	struct rfkill		*rfkill_dev;
+	char			*reset_name;
+	char			*shutdown_name;
+	struct clk		*clk;
+
+	bool			clk_enabled;
 };
 
 static int rfkill_gpio_set_power(void *data, bool blocked)
@@ -52,23 +48,22 @@
 	struct rfkill_gpio_data *rfkill = data;
 
 	if (blocked) {
-		if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
-			gpio_direction_output(rfkill->pdata->shutdown_gpio, 0);
-		if (gpio_is_valid(rfkill->pdata->reset_gpio))
-			gpio_direction_output(rfkill->pdata->reset_gpio, 0);
-		if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
-			clk_disable(rfkill->pwr_clk);
+		if (gpio_is_valid(rfkill->shutdown_gpio))
+			gpio_set_value(rfkill->shutdown_gpio, 0);
+		if (gpio_is_valid(rfkill->reset_gpio))
+			gpio_set_value(rfkill->reset_gpio, 0);
+		if (!IS_ERR(rfkill->clk) && rfkill->clk_enabled)
+			clk_disable(rfkill->clk);
 	} else {
-		if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill))
-			clk_enable(rfkill->pwr_clk);
-		if (gpio_is_valid(rfkill->pdata->reset_gpio))
-			gpio_direction_output(rfkill->pdata->reset_gpio, 1);
-		if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
-			gpio_direction_output(rfkill->pdata->shutdown_gpio, 1);
+		if (!IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
+			clk_enable(rfkill->clk);
+		if (gpio_is_valid(rfkill->reset_gpio))
+			gpio_set_value(rfkill->reset_gpio, 1);
+		if (gpio_is_valid(rfkill->shutdown_gpio))
+			gpio_set_value(rfkill->shutdown_gpio, 1);
 	}
 
-	if (rfkill->pwr_clk)
-		PWR_CLK_SET(rfkill, blocked);
+	rfkill->clk_enabled = blocked;
 
 	return 0;
 }
@@ -77,117 +72,112 @@
 	.set_block = rfkill_gpio_set_power,
 };
 
+static int rfkill_gpio_acpi_probe(struct device *dev,
+				  struct rfkill_gpio_data *rfkill)
+{
+	const struct acpi_device_id *id;
+
+	id = acpi_match_device(dev->driver->acpi_match_table, dev);
+	if (!id)
+		return -ENODEV;
+
+	rfkill->name = dev_name(dev);
+	rfkill->type = (unsigned)id->driver_data;
+	rfkill->reset_gpio = acpi_get_gpio_by_index(dev, 0, NULL);
+	rfkill->shutdown_gpio = acpi_get_gpio_by_index(dev, 1, NULL);
+
+	return 0;
+}
+
 static int rfkill_gpio_probe(struct platform_device *pdev)
 {
-	struct rfkill_gpio_data *rfkill;
 	struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct rfkill_gpio_data *rfkill;
+	const char *clk_name = NULL;
 	int ret = 0;
 	int len = 0;
 
-	if (!pdata) {
-		pr_warn("%s: No platform data specified\n", __func__);
-		return -EINVAL;
+	rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
+	if (!rfkill)
+		return -ENOMEM;
+
+	if (ACPI_HANDLE(&pdev->dev)) {
+		ret = rfkill_gpio_acpi_probe(&pdev->dev, rfkill);
+		if (ret)
+			return ret;
+	} else if (pdata) {
+		clk_name = pdata->power_clk_name;
+		rfkill->name = pdata->name;
+		rfkill->type = pdata->type;
+		rfkill->reset_gpio = pdata->reset_gpio;
+		rfkill->shutdown_gpio = pdata->shutdown_gpio;
+	} else {
+		return -ENODEV;
 	}
 
 	/* make sure at-least one of the GPIO is defined and that
 	 * a name is specified for this instance */
-	if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) &&
-		!gpio_is_valid(pdata->shutdown_gpio))) {
+	if ((!gpio_is_valid(rfkill->reset_gpio) &&
+	     !gpio_is_valid(rfkill->shutdown_gpio)) || !rfkill->name) {
 		pr_warn("%s: invalid platform data\n", __func__);
 		return -EINVAL;
 	}
 
-	rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
-	if (!rfkill)
-		return -ENOMEM;
-
-	if (pdata->gpio_runtime_setup) {
+	if (pdata && pdata->gpio_runtime_setup) {
 		ret = pdata->gpio_runtime_setup(pdev);
 		if (ret) {
 			pr_warn("%s: can't set up gpio\n", __func__);
-			goto fail_alloc;
+			return ret;
 		}
 	}
 
-	rfkill->pdata = pdata;
+	len = strlen(rfkill->name);
+	rfkill->reset_name = devm_kzalloc(&pdev->dev, len + 7, GFP_KERNEL);
+	if (!rfkill->reset_name)
+		return -ENOMEM;
 
-	len = strlen(pdata->name);
-	rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL);
-	if (!rfkill->reset_name) {
-		ret = -ENOMEM;
-		goto fail_alloc;
-	}
+	rfkill->shutdown_name = devm_kzalloc(&pdev->dev, len + 10, GFP_KERNEL);
+	if (!rfkill->shutdown_name)
+		return -ENOMEM;
 
-	rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL);
-	if (!rfkill->shutdown_name) {
-		ret = -ENOMEM;
-		goto fail_reset_name;
-	}
+	snprintf(rfkill->reset_name, len + 6 , "%s_reset", rfkill->name);
+	snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", rfkill->name);
 
-	snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name);
-	snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name);
+	rfkill->clk = devm_clk_get(&pdev->dev, clk_name);
 
-	if (pdata->power_clk_name) {
-		rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name);
-		if (IS_ERR(rfkill->pwr_clk)) {
-			pr_warn("%s: can't find pwr_clk.\n", __func__);
-			ret = PTR_ERR(rfkill->pwr_clk);
-			goto fail_shutdown_name;
-		}
-	}
-
-	if (gpio_is_valid(pdata->reset_gpio)) {
-		ret = gpio_request(pdata->reset_gpio, rfkill->reset_name);
+	if (gpio_is_valid(rfkill->reset_gpio)) {
+		ret = devm_gpio_request_one(&pdev->dev, rfkill->reset_gpio,
+					    0, rfkill->reset_name);
 		if (ret) {
 			pr_warn("%s: failed to get reset gpio.\n", __func__);
-			goto fail_clock;
+			return ret;
 		}
 	}
 
-	if (gpio_is_valid(pdata->shutdown_gpio)) {
-		ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name);
+	if (gpio_is_valid(rfkill->shutdown_gpio)) {
+		ret = devm_gpio_request_one(&pdev->dev, rfkill->shutdown_gpio,
+					    0, rfkill->shutdown_name);
 		if (ret) {
 			pr_warn("%s: failed to get shutdown gpio.\n", __func__);
-			goto fail_reset;
+			return ret;
 		}
 	}
 
-	rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type,
-					  &rfkill_gpio_ops, rfkill);
-	if (!rfkill->rfkill_dev) {
-		ret = -ENOMEM;
-		goto fail_shutdown;
-	}
+	rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
+					  rfkill->type, &rfkill_gpio_ops,
+					  rfkill);
+	if (!rfkill->rfkill_dev)
+		return -ENOMEM;
 
 	ret = rfkill_register(rfkill->rfkill_dev);
 	if (ret < 0)
-		goto fail_rfkill;
+		return ret;
 
 	platform_set_drvdata(pdev, rfkill);
 
-	dev_info(&pdev->dev, "%s device registered.\n", pdata->name);
+	dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
 
 	return 0;
-
-fail_rfkill:
-	rfkill_destroy(rfkill->rfkill_dev);
-fail_shutdown:
-	if (gpio_is_valid(pdata->shutdown_gpio))
-		gpio_free(pdata->shutdown_gpio);
-fail_reset:
-	if (gpio_is_valid(pdata->reset_gpio))
-		gpio_free(pdata->reset_gpio);
-fail_clock:
-	if (rfkill->pwr_clk)
-		clk_put(rfkill->pwr_clk);
-fail_shutdown_name:
-	kfree(rfkill->shutdown_name);
-fail_reset_name:
-	kfree(rfkill->reset_name);
-fail_alloc:
-	kfree(rfkill);
-
-	return ret;
 }
 
 static int rfkill_gpio_remove(struct platform_device *pdev)
@@ -195,31 +185,26 @@
 	struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
 	struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
 
-	if (pdata->gpio_runtime_close)
+	if (pdata && pdata->gpio_runtime_close)
 		pdata->gpio_runtime_close(pdev);
 	rfkill_unregister(rfkill->rfkill_dev);
 	rfkill_destroy(rfkill->rfkill_dev);
-	if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
-		gpio_free(rfkill->pdata->shutdown_gpio);
-	if (gpio_is_valid(rfkill->pdata->reset_gpio))
-		gpio_free(rfkill->pdata->reset_gpio);
-	if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
-		clk_disable(rfkill->pwr_clk);
-	if (rfkill->pwr_clk)
-		clk_put(rfkill->pwr_clk);
-	kfree(rfkill->shutdown_name);
-	kfree(rfkill->reset_name);
-	kfree(rfkill);
 
 	return 0;
 }
 
+static const struct acpi_device_id rfkill_acpi_match[] = {
+	{ "BCM4752", RFKILL_TYPE_GPS },
+	{ },
+};
+
 static struct platform_driver rfkill_gpio_driver = {
 	.probe = rfkill_gpio_probe,
 	.remove = rfkill_gpio_remove,
 	.driver = {
-		   .name = "rfkill_gpio",
-		   .owner = THIS_MODULE,
+		.name = "rfkill_gpio",
+		.owner = THIS_MODULE,
+		.acpi_match_table = ACPI_PTR(rfkill_acpi_match),
 	},
 };
 
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index a693aca..5f43675 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -426,17 +426,16 @@
 /*
  * ar-accept.c
  */
-extern void rxrpc_accept_incoming_calls(struct work_struct *);
-extern struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *,
-					    unsigned long);
-extern int rxrpc_reject_call(struct rxrpc_sock *);
+void rxrpc_accept_incoming_calls(struct work_struct *);
+struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
+int rxrpc_reject_call(struct rxrpc_sock *);
 
 /*
  * ar-ack.c
  */
-extern void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-extern void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-extern void rxrpc_process_call(struct work_struct *);
+void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void rxrpc_process_call(struct work_struct *);
 
 /*
  * ar-call.c
@@ -445,19 +444,18 @@
 extern struct list_head rxrpc_calls;
 extern rwlock_t rxrpc_call_lock;
 
-extern struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
-						struct rxrpc_transport *,
-						struct rxrpc_conn_bundle *,
-						unsigned long, int, gfp_t);
-extern struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
-					      struct rxrpc_connection *,
-					      struct rxrpc_header *, gfp_t);
-extern struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *,
-						 unsigned long);
-extern void rxrpc_release_call(struct rxrpc_call *);
-extern void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
-extern void __rxrpc_put_call(struct rxrpc_call *);
-extern void __exit rxrpc_destroy_all_calls(void);
+struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
+					 struct rxrpc_transport *,
+					 struct rxrpc_conn_bundle *,
+					 unsigned long, int, gfp_t);
+struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
+				       struct rxrpc_connection *,
+				       struct rxrpc_header *, gfp_t);
+struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
+void rxrpc_release_call(struct rxrpc_call *);
+void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
+void __rxrpc_put_call(struct rxrpc_call *);
+void __exit rxrpc_destroy_all_calls(void);
 
 /*
  * ar-connection.c
@@ -465,19 +463,16 @@
 extern struct list_head rxrpc_connections;
 extern rwlock_t rxrpc_connection_lock;
 
-extern struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
-						  struct rxrpc_transport *,
-						  struct key *,
-						  __be16, gfp_t);
-extern void rxrpc_put_bundle(struct rxrpc_transport *,
-			     struct rxrpc_conn_bundle *);
-extern int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
-			      struct rxrpc_conn_bundle *, struct rxrpc_call *,
-			      gfp_t);
-extern void rxrpc_put_connection(struct rxrpc_connection *);
-extern void __exit rxrpc_destroy_all_connections(void);
-extern struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
-						      struct rxrpc_header *);
+struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
+					   struct rxrpc_transport *,
+					   struct key *, __be16, gfp_t);
+void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
+int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
+		       struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
+void rxrpc_put_connection(struct rxrpc_connection *);
+void __exit rxrpc_destroy_all_connections(void);
+struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
+					       struct rxrpc_header *);
 extern struct rxrpc_connection *
 rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
 			  gfp_t);
@@ -485,15 +480,15 @@
 /*
  * ar-connevent.c
  */
-extern void rxrpc_process_connection(struct work_struct *);
-extern void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
-extern void rxrpc_reject_packets(struct work_struct *);
+void rxrpc_process_connection(struct work_struct *);
+void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
+void rxrpc_reject_packets(struct work_struct *);
 
 /*
  * ar-error.c
  */
-extern void rxrpc_UDP_error_report(struct sock *);
-extern void rxrpc_UDP_error_handler(struct work_struct *);
+void rxrpc_UDP_error_report(struct sock *);
+void rxrpc_UDP_error_handler(struct work_struct *);
 
 /*
  * ar-input.c
@@ -501,18 +496,17 @@
 extern unsigned long rxrpc_ack_timeout;
 extern const char *rxrpc_pkts[];
 
-extern void rxrpc_data_ready(struct sock *, int);
-extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool,
-			       bool);
-extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
+void rxrpc_data_ready(struct sock *, int);
+int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
+void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
 
 /*
  * ar-local.c
  */
 extern rwlock_t rxrpc_local_lock;
-extern struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
-extern void rxrpc_put_local(struct rxrpc_local *);
-extern void __exit rxrpc_destroy_all_locals(void);
+struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
+void rxrpc_put_local(struct rxrpc_local *);
+void __exit rxrpc_destroy_all_locals(void);
 
 /*
  * ar-key.c
@@ -520,31 +514,29 @@
 extern struct key_type key_type_rxrpc;
 extern struct key_type key_type_rxrpc_s;
 
-extern int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
-extern int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
-extern int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *,
-				     time_t, u32);
+int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
+int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
+int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
+			      u32);
 
 /*
  * ar-output.c
  */
 extern int rxrpc_resend_timeout;
 
-extern int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
-extern int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
-				struct rxrpc_transport *, struct msghdr *,
-				size_t);
-extern int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *,
-				struct msghdr *, size_t);
+int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
+int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
+			 struct rxrpc_transport *, struct msghdr *, size_t);
+int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, struct msghdr *,
+			 size_t);
 
 /*
  * ar-peer.c
  */
-extern struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
-extern void rxrpc_put_peer(struct rxrpc_peer *);
-extern struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *,
-					  __be32, __be16);
-extern void __exit rxrpc_destroy_all_peers(void);
+struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
+void rxrpc_put_peer(struct rxrpc_peer *);
+struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, __be32, __be16);
+void __exit rxrpc_destroy_all_peers(void);
 
 /*
  * ar-proc.c
@@ -556,38 +548,36 @@
 /*
  * ar-recvmsg.c
  */
-extern void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
-extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *,
-			 size_t, int);
+void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
+int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+		  int);
 
 /*
  * ar-security.c
  */
-extern int rxrpc_register_security(struct rxrpc_security *);
-extern void rxrpc_unregister_security(struct rxrpc_security *);
-extern int rxrpc_init_client_conn_security(struct rxrpc_connection *);
-extern int rxrpc_init_server_conn_security(struct rxrpc_connection *);
-extern int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *,
-			       size_t, void *);
-extern int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *,
-			       u32 *);
-extern void rxrpc_clear_conn_security(struct rxrpc_connection *);
+int rxrpc_register_security(struct rxrpc_security *);
+void rxrpc_unregister_security(struct rxrpc_security *);
+int rxrpc_init_client_conn_security(struct rxrpc_connection *);
+int rxrpc_init_server_conn_security(struct rxrpc_connection *);
+int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
+			void *);
+int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
+void rxrpc_clear_conn_security(struct rxrpc_connection *);
 
 /*
  * ar-skbuff.c
  */
-extern void rxrpc_packet_destructor(struct sk_buff *);
+void rxrpc_packet_destructor(struct sk_buff *);
 
 /*
  * ar-transport.c
  */
-extern struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
-						   struct rxrpc_peer *,
-						   gfp_t);
-extern void rxrpc_put_transport(struct rxrpc_transport *);
-extern void __exit rxrpc_destroy_all_transports(void);
-extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
-						    struct rxrpc_peer *);
+struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
+					    struct rxrpc_peer *, gfp_t);
+void rxrpc_put_transport(struct rxrpc_transport *);
+void __exit rxrpc_destroy_all_transports(void);
+struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
+					     struct rxrpc_peer *);
 
 /*
  * debug tracing
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index c03a32a..ad1f1d8 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -443,6 +443,16 @@
 	  To compile this code as a module, choose M here: the
 	  module will be called cls_cgroup.
 
+config NET_CLS_BPF
+	tristate "BPF-based classifier"
+	select NET_CLS
+	---help---
+	  If you say Y here, you will be able to classify packets based on
+	  programmable BPF (JIT'ed) filters as an alternative to ematches.
+
+	  To compile this code as a module, choose M here: the module will
+	  be called cls_bpf.
+
 config NET_EMATCH
 	bool "Extended Matches"
 	select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index e5f9abe..35fa47a 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -50,6 +50,7 @@
 obj-$(CONFIG_NET_CLS_BASIC)	+= cls_basic.o
 obj-$(CONFIG_NET_CLS_FLOW)	+= cls_flow.o
 obj-$(CONFIG_NET_CLS_CGROUP)	+= cls_cgroup.o
+obj-$(CONFIG_NET_CLS_BPF)	+= cls_bpf.o
 obj-$(CONFIG_NET_EMATCH)	+= ematch.o
 obj-$(CONFIG_NET_EMATCH_CMP)	+= em_cmp.o
 obj-$(CONFIG_NET_EMATCH_NBYTE)	+= em_nbyte.o
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 189e3c5..272d8e9 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -231,14 +231,14 @@
 	}
 	if (R_tab) {
 		police->rate_present = true;
-		psched_ratecfg_precompute(&police->rate, &R_tab->rate);
+		psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0);
 		qdisc_put_rtab(R_tab);
 	} else {
 		police->rate_present = false;
 	}
 	if (P_tab) {
 		police->peak_present = true;
-		psched_ratecfg_precompute(&police->peak, &P_tab->rate);
+		psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0);
 		qdisc_put_rtab(P_tab);
 	} else {
 		police->peak_present = false;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index d76a35d..636d913 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -137,7 +137,7 @@
 			   struct nlattr **tb,
 			   struct nlattr *est)
 {
-	int err = -EINVAL;
+	int err;
 	struct tcf_exts e;
 	struct tcf_ematch_tree t;
 
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
new file mode 100644
index 0000000..1002a82
--- /dev/null
+++ b/net/sched/cls_bpf.c
@@ -0,0 +1,385 @@
+/*
+ * Berkeley Packet Filter based traffic classifier
+ *
+ * Might be used to classify traffic through flexible, user-defined and
+ * possibly JIT-ed BPF filters for traffic control as an alternative to
+ * ematches.
+ *
+ * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/filter.h>
+#include <net/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
+MODULE_DESCRIPTION("TC BPF based classifier");
+
+struct cls_bpf_head {
+	struct list_head plist;
+	u32 hgen;
+};
+
+struct cls_bpf_prog {
+	struct sk_filter *filter;
+	struct sock_filter *bpf_ops;
+	struct tcf_exts exts;
+	struct tcf_result res;
+	struct list_head link;
+	u32 handle;
+	u16 bpf_len;
+};
+
+static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
+	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
+	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
+	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
+				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
+};
+
+static const struct tcf_ext_map bpf_ext_map = {
+	.action = TCA_BPF_ACT,
+	.police = TCA_BPF_POLICE,
+};
+
+static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+			    struct tcf_result *res)
+{
+	struct cls_bpf_head *head = tp->root;
+	struct cls_bpf_prog *prog;
+	int ret;
+
+	list_for_each_entry(prog, &head->plist, link) {
+		int filter_res = SK_RUN_FILTER(prog->filter, skb);
+
+		if (filter_res == 0)
+			continue;
+
+		*res = prog->res;
+		if (filter_res != -1)
+			res->classid = filter_res;
+
+		ret = tcf_exts_exec(skb, &prog->exts, res);
+		if (ret < 0)
+			continue;
+
+		return ret;
+	}
+
+	return -1;
+}
+
+static int cls_bpf_init(struct tcf_proto *tp)
+{
+	struct cls_bpf_head *head;
+
+	head = kzalloc(sizeof(*head), GFP_KERNEL);
+	if (head == NULL)
+		return -ENOBUFS;
+
+	INIT_LIST_HEAD(&head->plist);
+	tp->root = head;
+
+	return 0;
+}
+
+static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
+{
+	tcf_unbind_filter(tp, &prog->res);
+	tcf_exts_destroy(tp, &prog->exts);
+
+	sk_unattached_filter_destroy(prog->filter);
+
+	kfree(prog->bpf_ops);
+	kfree(prog);
+}
+
+static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
+{
+	struct cls_bpf_head *head = tp->root;
+	struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
+
+	list_for_each_entry(prog, &head->plist, link) {
+		if (prog == todel) {
+			tcf_tree_lock(tp);
+			list_del(&prog->link);
+			tcf_tree_unlock(tp);
+
+			cls_bpf_delete_prog(tp, prog);
+			return 0;
+		}
+	}
+
+	return -ENOENT;
+}
+
+static void cls_bpf_destroy(struct tcf_proto *tp)
+{
+	struct cls_bpf_head *head = tp->root;
+	struct cls_bpf_prog *prog, *tmp;
+
+	list_for_each_entry_safe(prog, tmp, &head->plist, link) {
+		list_del(&prog->link);
+		cls_bpf_delete_prog(tp, prog);
+	}
+
+	kfree(head);
+}
+
+static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
+{
+	struct cls_bpf_head *head = tp->root;
+	struct cls_bpf_prog *prog;
+	unsigned long ret = 0UL;
+
+	if (head == NULL)
+		return 0UL;
+
+	list_for_each_entry(prog, &head->plist, link) {
+		if (prog->handle == handle) {
+			ret = (unsigned long) prog;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
+{
+}
+
+static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
+				   struct cls_bpf_prog *prog,
+				   unsigned long base, struct nlattr **tb,
+				   struct nlattr *est)
+{
+	struct sock_filter *bpf_ops, *bpf_old;
+	struct tcf_exts exts;
+	struct sock_fprog tmp;
+	struct sk_filter *fp, *fp_old;
+	u16 bpf_size, bpf_len;
+	u32 classid;
+	int ret;
+
+	if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
+		return -EINVAL;
+
+	ret = tcf_exts_validate(net, tp, tb, est, &exts, &bpf_ext_map);
+	if (ret < 0)
+		return ret;
+
+	classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
+	bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
+	if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
+		ret = -EINVAL;
+		goto errout;
+	}
+
+	bpf_size = bpf_len * sizeof(*bpf_ops);
+	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
+	if (bpf_ops == NULL) {
+		ret = -ENOMEM;
+		goto errout;
+	}
+
+	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
+
+	tmp.len = bpf_len;
+	tmp.filter = (struct sock_filter __user *) bpf_ops;
+
+	ret = sk_unattached_filter_create(&fp, &tmp);
+	if (ret)
+		goto errout_free;
+
+	tcf_tree_lock(tp);
+	fp_old = prog->filter;
+	bpf_old = prog->bpf_ops;
+
+	prog->bpf_len = bpf_len;
+	prog->bpf_ops = bpf_ops;
+	prog->filter = fp;
+	prog->res.classid = classid;
+	tcf_tree_unlock(tp);
+
+	tcf_bind_filter(tp, &prog->res, base);
+	tcf_exts_change(tp, &prog->exts, &exts);
+
+	if (fp_old)
+		sk_unattached_filter_destroy(fp_old);
+	if (bpf_old)
+		kfree(bpf_old);
+
+	return 0;
+
+errout_free:
+	kfree(bpf_ops);
+errout:
+	tcf_exts_destroy(tp, &exts);
+	return ret;
+}
+
+static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
+				   struct cls_bpf_head *head)
+{
+	unsigned int i = 0x80000000;
+
+	do {
+		if (++head->hgen == 0x7FFFFFFF)
+			head->hgen = 1;
+	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
+	if (i == 0)
+		pr_err("Insufficient number of handles\n");
+
+	return i;
+}
+
+static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+			  struct tcf_proto *tp, unsigned long base,
+			  u32 handle, struct nlattr **tca,
+			  unsigned long *arg)
+{
+	struct cls_bpf_head *head = tp->root;
+	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
+	struct nlattr *tb[TCA_BPF_MAX + 1];
+	int ret;
+
+	if (tca[TCA_OPTIONS] == NULL)
+		return -EINVAL;
+
+	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
+	if (ret < 0)
+		return ret;
+
+	if (prog != NULL) {
+		if (handle && prog->handle != handle)
+			return -EINVAL;
+		return cls_bpf_modify_existing(net, tp, prog, base, tb,
+					       tca[TCA_RATE]);
+	}
+
+	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
+	if (prog == NULL)
+		return -ENOBUFS;
+
+	if (handle == 0)
+		prog->handle = cls_bpf_grab_new_handle(tp, head);
+	else
+		prog->handle = handle;
+	if (prog->handle == 0) {
+		ret = -EINVAL;
+		goto errout;
+	}
+
+	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE]);
+	if (ret < 0)
+		goto errout;
+
+	tcf_tree_lock(tp);
+	list_add(&prog->link, &head->plist);
+	tcf_tree_unlock(tp);
+
+	*arg = (unsigned long) prog;
+
+	return 0;
+errout:
+	if (*arg == 0UL && prog)
+		kfree(prog);
+
+	return ret;
+}
+
+static int cls_bpf_dump(struct tcf_proto *tp, unsigned long fh,
+			struct sk_buff *skb, struct tcmsg *tm)
+{
+	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
+	struct nlattr *nest, *nla;
+
+	if (prog == NULL)
+		return skb->len;
+
+	tm->tcm_handle = prog->handle;
+
+	nest = nla_nest_start(skb, TCA_OPTIONS);
+	if (nest == NULL)
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
+		goto nla_put_failure;
+	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
+		goto nla_put_failure;
+
+	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
+			  sizeof(struct sock_filter));
+	if (nla == NULL)
+		goto nla_put_failure;
+
+        memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
+
+	if (tcf_exts_dump(skb, &prog->exts, &bpf_ext_map) < 0)
+		goto nla_put_failure;
+
+	nla_nest_end(skb, nest);
+
+	if (tcf_exts_dump_stats(skb, &prog->exts, &bpf_ext_map) < 0)
+		goto nla_put_failure;
+
+	return skb->len;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nest);
+	return -1;
+}
+
+static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+{
+	struct cls_bpf_head *head = tp->root;
+	struct cls_bpf_prog *prog;
+
+	list_for_each_entry(prog, &head->plist, link) {
+		if (arg->count < arg->skip)
+			goto skip;
+		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
+			arg->stop = 1;
+			break;
+		}
+skip:
+		arg->count++;
+	}
+}
+
+static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
+	.kind		=	"bpf",
+	.owner		=	THIS_MODULE,
+	.classify	=	cls_bpf_classify,
+	.init		=	cls_bpf_init,
+	.destroy	=	cls_bpf_destroy,
+	.get		=	cls_bpf_get,
+	.put		=	cls_bpf_put,
+	.change		=	cls_bpf_change,
+	.delete		=	cls_bpf_delete,
+	.walk		=	cls_bpf_walk,
+	.dump		=	cls_bpf_dump,
+};
+
+static int __init cls_bpf_init_mod(void)
+{
+	return register_tcf_proto_ops(&cls_bpf_ops);
+}
+
+static void __exit cls_bpf_exit_mod(void)
+{
+	unregister_tcf_proto_ops(&cls_bpf_ops);
+}
+
+module_init(cls_bpf_init_mod);
+module_exit(cls_bpf_exit_mod);
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 867b4a3..16006c9 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -72,11 +72,11 @@
 			struct cgroup_taskset *tset)
 {
 	struct task_struct *p;
-	void *v;
+	struct cgroup_cls_state *cs = css_cls_state(css);
+	void *v = (void *)(unsigned long)cs->classid;
 
 	cgroup_taskset_for_each(p, css, tset) {
 		task_lock(p);
-		v = (void *)(unsigned long)task_cls_classid(p);
 		iterate_fd(p->files, 0, update_classid, v);
 		task_unlock(p);
 	}
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
index 938b7cb..527aeb7 100644
--- a/net/sched/em_ipset.c
+++ b/net/sched/em_ipset.c
@@ -24,11 +24,12 @@
 {
 	struct xt_set_info *set = data;
 	ip_set_id_t index;
+	struct net *net = dev_net(qdisc_dev(tp->q));
 
 	if (data_len != sizeof(*set))
 		return -EINVAL;
 
-	index = ip_set_nfnl_get_byindex(set->index);
+	index = ip_set_nfnl_get_byindex(net, set->index);
 	if (index == IPSET_INVALID_ID)
 		return -ENOENT;
 
@@ -37,7 +38,7 @@
 	if (em->data)
 		return 0;
 
-	ip_set_nfnl_put(index);
+	ip_set_nfnl_put(net, index);
 	return -ENOMEM;
 }
 
@@ -45,7 +46,7 @@
 {
 	const struct xt_set_info *set = (const void *) em->data;
 	if (set) {
-		ip_set_nfnl_put(set->index);
+		ip_set_nfnl_put(dev_net(qdisc_dev(p->q)), set->index);
 		kfree((void *) em->data);
 	}
 }
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 7c3de6f..e5cef956 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -793,8 +793,10 @@
 		goto errout;
 
 	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
-	if (meta == NULL)
+	if (meta == NULL) {
+		err = -ENOMEM;
 		goto errout;
+	}
 
 	memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
 	memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2adda7f..cd81505 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -737,9 +737,11 @@
 	const struct Qdisc_class_ops *cops;
 	unsigned long cl;
 	u32 parentid;
+	int drops;
 
 	if (n == 0)
 		return;
+	drops = max_t(int, n, 0);
 	while ((parentid = sch->parent)) {
 		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
 			return;
@@ -756,6 +758,7 @@
 			cops->put(sch, cl);
 		}
 		sch->q.qlen -= n;
+		sch->qstats.drops += drops;
 	}
 }
 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index a74e278..922a094 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -126,7 +126,7 @@
 
 	HARD_TX_LOCK(dev, txq, smp_processor_id());
 	if (!netif_xmit_frozen_or_stopped(txq))
-		ret = dev_hard_start_xmit(skb, dev, txq);
+		ret = dev_hard_start_xmit(skb, dev, txq, NULL);
 
 	HARD_TX_UNLOCK(dev, txq);
 
@@ -829,7 +829,7 @@
 	struct net_device *dev;
 	bool sync_needed = false;
 
-	list_for_each_entry(dev, head, unreg_list) {
+	list_for_each_entry(dev, head, close_list) {
 		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
 					 &noop_qdisc);
 		if (dev_ingress_queue(dev))
@@ -848,7 +848,7 @@
 		synchronize_net();
 
 	/* Wait for outstanding qdisc_run calls. */
-	list_for_each_entry(dev, head, unreg_list)
+	list_for_each_entry(dev, head, close_list)
 		while (some_qdisc_is_busy(dev))
 			yield();
 }
@@ -857,7 +857,7 @@
 {
 	LIST_HEAD(single);
 
-	list_add(&dev->unreg_list, &single);
+	list_add(&dev->close_list, &single);
 	dev_deactivate_many(&single);
 	list_del(&single);
 }
@@ -910,11 +910,12 @@
 }
 
 void psched_ratecfg_precompute(struct psched_ratecfg *r,
-			       const struct tc_ratespec *conf)
+			       const struct tc_ratespec *conf,
+			       u64 rate64)
 {
 	memset(r, 0, sizeof(*r));
 	r->overhead = conf->overhead;
-	r->rate_bytes_ps = conf->rate;
+	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
 	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
 	r->mult = 1;
 	/*
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 863846c..0e1e38b 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -997,6 +997,8 @@
 	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
+	[TCA_HTB_RATE64] = { .type = NLA_U64 },
+	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
 };
 
 static void htb_work_func(struct work_struct *work)
@@ -1114,6 +1116,12 @@
 	opt.level = cl->level;
 	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
 		goto nla_put_failure;
+	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
+	    nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps))
+		goto nla_put_failure;
+	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
+	    nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
 	spin_unlock_bh(root_lock);
@@ -1332,6 +1340,7 @@
 	struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
 	struct nlattr *tb[TCA_HTB_MAX + 1];
 	struct tc_htb_opt *hopt;
+	u64 rate64, ceil64;
 
 	/* extract all subattrs from opt attr */
 	if (!opt)
@@ -1491,8 +1500,12 @@
 			cl->prio = TC_HTB_NUMPRIO - 1;
 	}
 
-	psched_ratecfg_precompute(&cl->rate, &hopt->rate);
-	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
+	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
+
+	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+
+	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
+	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
 
 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
 	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index b87e83d..75c94e5 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -235,7 +235,6 @@
 			clg->state = 2;
 		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 			clg->state = 1;
-			return true;
 		} else if (clg->a2 + clg->a3 < rnd) {
 			clg->state = 3;
 			return true;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 1aaf1b6..68f9859 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -266,20 +266,23 @@
 	[TCA_TBF_PARMS]	= { .len = sizeof(struct tc_tbf_qopt) },
 	[TCA_TBF_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 	[TCA_TBF_PTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
+	[TCA_TBF_RATE64]	= { .type = NLA_U64 },
+	[TCA_TBF_PRATE64]	= { .type = NLA_U64 },
 };
 
 static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 {
 	int err;
 	struct tbf_sched_data *q = qdisc_priv(sch);
-	struct nlattr *tb[TCA_TBF_PTAB + 1];
+	struct nlattr *tb[TCA_TBF_MAX + 1];
 	struct tc_tbf_qopt *qopt;
 	struct qdisc_rate_table *rtab = NULL;
 	struct qdisc_rate_table *ptab = NULL;
 	struct Qdisc *child = NULL;
 	int max_size, n;
+	u64 rate64 = 0, prate64 = 0;
 
-	err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
+	err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
 	if (err < 0)
 		return err;
 
@@ -341,9 +344,13 @@
 	q->tokens = q->buffer;
 	q->ptokens = q->mtu;
 
-	psched_ratecfg_precompute(&q->rate, &rtab->rate);
+	if (tb[TCA_TBF_RATE64])
+		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
+	psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64);
 	if (ptab) {
-		psched_ratecfg_precompute(&q->peak, &ptab->rate);
+		if (tb[TCA_TBF_PRATE64])
+			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
+		psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64);
 		q->peak_present = true;
 	} else {
 		q->peak_present = false;
@@ -402,6 +409,13 @@
 	opt.buffer = PSCHED_NS2TICKS(q->buffer);
 	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
 		goto nla_put_failure;
+	if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
+	    nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
+		goto nla_put_failure;
+	if (q->peak_present &&
+	    q->peak.rate_bytes_ps >= (1ULL << 32) &&
+	    nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
 	return skb->len;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index cef5099..c9b91cb 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -602,7 +602,7 @@
 
 		/* Start a T3 timer here in case it wasn't running so
 		 * that these migrated packets have a chance to get
-		 * retrnasmitted.
+		 * retransmitted.
 		 */
 		if (!timer_pending(&active->T3_rtx_timer))
 			if (!mod_timer(&active->T3_rtx_timer,
@@ -665,7 +665,7 @@
 	/* Set the path max_retrans.  */
 	peer->pathmaxrxt = asoc->pathmaxrxt;
 
-	/* And the partial failure retrnas threshold */
+	/* And the partial failure retrans threshold */
 	peer->pf_retrans = asoc->pf_retrans;
 
 	/* Initialize the peer's SACK delay timeout based on the
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 8c4fa5d..46b5977 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -539,18 +539,14 @@
 	for (i = 0; i < n_elt; i++) {
 		id = ntohs(hmacs->hmac_ids[i]);
 
-		/* Check the id is in the supported range */
-		if (id > SCTP_AUTH_HMAC_ID_MAX) {
-			id = 0;
-			continue;
-		}
-
-		/* See is we support the id.  Supported IDs have name and
-		 * length fields set, so that we can allocated and use
+		/* Check the id is in the supported range. And
+		 * see if we support the id.  Supported IDs have name and
+		 * length fields set, so that we can allocate and use
 		 * them.  We can safely just check for name, for without the
 		 * name, we can't allocate the TFM.
 		 */
-		if (!sctp_hmac_list[id].hmac_name) {
+		if (id > SCTP_AUTH_HMAC_ID_MAX ||
+		    !sctp_hmac_list[id].hmac_name) {
 			id = 0;
 			continue;
 		}
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 7bd5ed4..f2044fc 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -201,7 +201,7 @@
 
 	max = asoc->frag_point;
 	/* If the the peer requested that we authenticate DATA chunks
-	 * we need to accound for bundling of the AUTH chunks along with
+	 * we need to account for bundling of the AUTH chunks along with
 	 * DATA.
 	 */
 	if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) {
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 96a5591..7567e6f 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -428,20 +428,20 @@
 {
 	addr->v6.sin6_family = AF_INET6;
 	addr->v6.sin6_port = 0;
-	addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
+	addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
 }
 
 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
 static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
 {
 	if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
-		inet6_sk(sk)->rcv_saddr.s6_addr32[0] = 0;
-		inet6_sk(sk)->rcv_saddr.s6_addr32[1] = 0;
-		inet6_sk(sk)->rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
-		inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
+		sk->sk_v6_rcv_saddr.s6_addr32[0] = 0;
+		sk->sk_v6_rcv_saddr.s6_addr32[1] = 0;
+		sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
+		sk->sk_v6_rcv_saddr.s6_addr32[3] =
 			addr->v4.sin_addr.s_addr;
 	} else {
-		inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
+		sk->sk_v6_rcv_saddr = addr->v6.sin6_addr;
 	}
 }
 
@@ -449,12 +449,12 @@
 static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
 {
 	if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
-		inet6_sk(sk)->daddr.s6_addr32[0] = 0;
-		inet6_sk(sk)->daddr.s6_addr32[1] = 0;
-		inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
-		inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+		sk->sk_v6_daddr.s6_addr32[0] = 0;
+		sk->sk_v6_daddr.s6_addr32[1] = 0;
+		sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff);
+		sk->sk_v6_daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
 	} else {
-		inet6_sk(sk)->daddr = addr->v6.sin6_addr;
+		sk->sk_v6_daddr = addr->v6.sin6_addr;
 	}
 }
 
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index 5ea573b..647396b 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -79,12 +79,13 @@
  */
 static int sctp_objcnt_seq_show(struct seq_file *seq, void *v)
 {
-	int i, len;
+	int i;
 
 	i = (int)*(loff_t *)v;
-	seq_printf(seq, "%s: %d%n", sctp_dbg_objcnt[i].label,
-				atomic_read(sctp_dbg_objcnt[i].counter), &len);
-	seq_printf(seq, "%*s\n", 127 - len, "");
+	seq_setwidth(seq, 127);
+	seq_printf(seq, "%s: %d", sctp_dbg_objcnt[i].label,
+				atomic_read(sctp_dbg_objcnt[i].counter));
+	seq_pad(seq, '\n');
 	return 0;
 }
 
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 3191373..e650978 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -390,7 +390,6 @@
 	__u8 has_data = 0;
 	struct dst_entry *dst = tp->dst;
 	unsigned char *auth = NULL;	/* pointer to auth in skb data */
-	__u32 cksum_buf_len = sizeof(struct sctphdr);
 
 	pr_debug("%s: packet:%p\n", __func__, packet);
 
@@ -493,7 +492,6 @@
 		if (chunk == packet->auth)
 			auth = skb_tail_pointer(nskb);
 
-		cksum_buf_len += chunk->skb->len;
 		memcpy(skb_put(nskb, chunk->skb->len),
 			       chunk->skb->data, chunk->skb->len);
 
@@ -538,12 +536,7 @@
 	if (!sctp_checksum_disable) {
 		if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
 		    (dst_xfrm(dst) != NULL) || packet->ipfragok) {
-			__u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
-
-			/* 3) Put the resultant value into the checksum field in the
-			 *    common header, and leave the rest of the bits unchanged.
-			 */
-			sh->checksum = sctp_end_cksum(crc32);
+			sh->checksum = sctp_compute_cksum(nskb, 0);
 		} else {
 			/* no need to seed pseudo checksum for SCTP */
 			nskb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index d244a23..fe69032 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1297,6 +1297,13 @@
 
 /* Turn an skb into a chunk.
  * FIXME: Eventually move the structure directly inside the skb->cb[].
+ *
+ * sctpimpguide-05.txt Section 2.8.2
+ * M1) Each time a new DATA chunk is transmitted
+ * set the 'TSN.Missing.Report' count for that TSN to 0. The
+ * 'TSN.Missing.Report' count will be used to determine missing chunks
+ * and when to fast retransmit.
+ *
  */
 struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
 			    const struct sctp_association *asoc,
@@ -1314,30 +1321,10 @@
 	INIT_LIST_HEAD(&retval->list);
 	retval->skb		= skb;
 	retval->asoc		= (struct sctp_association *)asoc;
-	retval->has_tsn		= 0;
-	retval->has_ssn         = 0;
-	retval->rtt_in_progress	= 0;
-	retval->sent_at		= 0;
 	retval->singleton	= 1;
-	retval->end_of_packet	= 0;
-	retval->ecn_ce_done	= 0;
-	retval->pdiscard	= 0;
 
-	/* sctpimpguide-05.txt Section 2.8.2
-	 * M1) Each time a new DATA chunk is transmitted
-	 * set the 'TSN.Missing.Report' count for that TSN to 0. The
-	 * 'TSN.Missing.Report' count will be used to determine missing chunks
-	 * and when to fast retransmit.
-	 */
-	retval->tsn_missing_report = 0;
-	retval->tsn_gap_acked = 0;
 	retval->fast_retransmit = SCTP_CAN_FRTX;
 
-	/* If this is a fragmented message, track all fragments
-	 * of the message (for SEND_FAILED).
-	 */
-	retval->msg = NULL;
-
 	/* Polish the bead hole.  */
 	INIT_LIST_HEAD(&retval->transmitted_list);
 	INIT_LIST_HEAD(&retval->frag_list);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 911b71b..72046b9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5890,7 +5890,7 @@
 		int low, high, remaining, index;
 		unsigned int rover;
 
-		inet_get_local_port_range(&low, &high);
+		inet_get_local_port_range(sock_net(sk), &low, &high);
 		remaining = (high - low) + 1;
 		rover = net_random() % remaining + low;
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 6cd930f..6c981dd 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -150,7 +150,6 @@
 	struct xdr_netobj cksumobj = {.len = sizeof(cksumdata),
 				      .data = cksumdata};
 	s32 now;
-	u64 seqnum;
 	u8 *ptr = read_token->data;
 	u8 *cksumkey;
 	u8 flags;
@@ -197,9 +196,10 @@
 	if (now > ctx->endtime)
 		return GSS_S_CONTEXT_EXPIRED;
 
-	/* do sequencing checks */
-
-	seqnum = be64_to_cpup((__be64 *)ptr + 8);
+	/*
+	 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
+	 * doesn't want it checked; see page 6 of rfc 2203.
+	 */
 
 	return GSS_S_COMPLETE;
 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 1da52d1..42560e5 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -489,7 +489,6 @@
 gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 {
 	s32		now;
-	u64		seqnum;
 	u8		*ptr;
 	u8		flags = 0x00;
 	u16		ec, rrc;
@@ -525,7 +524,10 @@
 	ec = be16_to_cpup((__be16 *)(ptr + 4));
 	rrc = be16_to_cpup((__be16 *)(ptr + 6));
 
-	seqnum = be64_to_cpup((__be64 *)(ptr + 8));
+	/*
+	 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
+	 * doesn't want it checked; see page 6 of rfc 2203.
+	 */
 
 	if (rrc != 0)
 		rotate_left(offset + 16, buf, rrc);
@@ -574,8 +576,8 @@
 	buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
 	buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
 
-	/* Trim off the checksum blob */
-	xdr_buf_trim(buf, GSS_KRB5_TOK_HDR_LEN + tailskip);
+	/* Trim off the trailing "extra count" and checksum blob */
+	xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
 	return GSS_S_COMPLETE;
 }
 
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index f1eb0d1..458f85e 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -298,7 +298,8 @@
 	if (res.context_handle) {
 		data->out_handle = rctxh.exported_context_token;
 		data->mech_oid.len = rctxh.mech.len;
-		memcpy(data->mech_oid.data, rctxh.mech.data,
+		if (rctxh.mech.data)
+			memcpy(data->mech_oid.data, rctxh.mech.data,
 						data->mech_oid.len);
 		client_name = rctxh.src_name.display_name;
 	}
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index f0f78c5..1ec19f6 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -559,6 +559,8 @@
 
 	/* cred->elements */
 	err = dummy_enc_credel_array(xdr, &cred->elements);
+	if (err)
+		return err;
 
 	/* cred->cred_handle_reference */
 	err = gssx_enc_buffer(xdr, &cred->cred_handle_reference);
@@ -740,22 +742,20 @@
 		goto done;
 
 	/* arg->context_handle */
-	if (arg->context_handle) {
+	if (arg->context_handle)
 		err = gssx_enc_ctx(xdr, arg->context_handle);
-		if (err)
-			goto done;
-	} else {
+	else
 		err = gssx_enc_bool(xdr, 0);
-	}
+	if (err)
+		goto done;
 
 	/* arg->cred_handle */
-	if (arg->cred_handle) {
+	if (arg->cred_handle)
 		err = gssx_enc_cred(xdr, arg->cred_handle);
-		if (err)
-			goto done;
-	} else {
+	else
 		err = gssx_enc_bool(xdr, 0);
-	}
+	if (err)
+		goto done;
 
 	/* arg->input_token */
 	err = gssx_enc_in_token(xdr, &arg->input_token);
@@ -763,13 +763,12 @@
 		goto done;
 
 	/* arg->input_cb */
-	if (arg->input_cb) {
+	if (arg->input_cb)
 		err = gssx_enc_cb(xdr, arg->input_cb);
-		if (err)
-			goto done;
-	} else {
+	else
 		err = gssx_enc_bool(xdr, 0);
-	}
+	if (err)
+		goto done;
 
 	err = gssx_enc_bool(xdr, arg->ret_deleg_cred);
 	if (err)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 09fb638..008cdad 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1167,8 +1167,8 @@
 	if (!ud->found_creds) {
 		/* userspace seem buggy, we should always get at least a
 		 * mapping to nobody */
-		dprintk("RPC:       No creds found, marking Negative!\n");
-		set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+		dprintk("RPC:       No creds found!\n");
+		goto out;
 	} else {
 
 		/* steal creds */
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index f94567b..d0d14a0 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -519,8 +519,8 @@
 	d_add(dentry, inode);
 	return 0;
 out_err:
-	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
-			__FILE__, __func__, dentry->d_name.name);
+	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %pd\n",
+			__FILE__, __func__, dentry);
 	dput(dentry);
 	return -ENOMEM;
 }
@@ -755,8 +755,8 @@
 out_bad:
 	__rpc_depopulate(parent, files, start, eof);
 	mutex_unlock(&dir->i_mutex);
-	printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
-			__FILE__, __func__, parent->d_name.name);
+	printk(KERN_WARNING "%s: %s failed to populate directory %pd\n",
+			__FILE__, __func__, parent);
 	return err;
 }
 
@@ -852,8 +852,8 @@
 	return dentry;
 out_err:
 	dentry = ERR_PTR(err);
-	printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
-			__FILE__, __func__, parent->d_name.name, name,
+	printk(KERN_WARNING "%s: %s() failed to create pipe %pd/%s (errno = %d)\n",
+			__FILE__, __func__, parent, name,
 			err);
 	goto out;
 }
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b974571..e7fbe36 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1104,8 +1104,6 @@
 	rqstp->rq_vers = vers = svc_getnl(argv);	/* version number */
 	rqstp->rq_proc = proc = svc_getnl(argv);	/* procedure number */
 
-	progp = serv->sv_program;
-
 	for (progp = serv->sv_program; progp; progp = progp->pg_next)
 		if (prog == progp->pg_prog)
 			break;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9c9caaa..b6e59f0 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -291,12 +291,14 @@
 				&inet_sk(sk)->inet_rcv_saddr,
 				inet_sk(sk)->inet_num);
 		break;
+#if IS_ENABLED(CONFIG_IPV6)
 	case PF_INET6:
 		len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
 				proto_name,
-				&inet6_sk(sk)->rcv_saddr,
+				&sk->sk_v6_rcv_saddr,
 				inet_sk(sk)->inet_num);
 		break;
+#endif
 	default:
 		len = snprintf(buf, remaining, "*unknown-%d*\n",
 				sk->sk_family);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 716de1a..0d44025 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -480,18 +480,24 @@
 			tipc_node_unlock(node);
 			tipc_link_recv_bundle(buf);
 		} else if (msg_user(msg) == MSG_FRAGMENTER) {
-			int ret = tipc_link_recv_fragment(&node->bclink.defragm,
-						      &buf, &msg);
-			if (ret < 0)
+			int ret;
+			ret = tipc_link_recv_fragment(&node->bclink.reasm_head,
+						      &node->bclink.reasm_tail,
+						      &buf);
+			if (ret == LINK_REASM_ERROR)
 				goto unlock;
 			spin_lock_bh(&bc_lock);
 			bclink_accept_pkt(node, seqno);
 			bcl->stats.recv_fragments++;
-			if (ret > 0)
+			if (ret == LINK_REASM_COMPLETE) {
 				bcl->stats.recv_fragmented++;
+				/* Point msg to inner header */
+				msg = buf_msg(buf);
+				spin_unlock_bh(&bc_lock);
+				goto receive;
+			}
 			spin_unlock_bh(&bc_lock);
 			tipc_node_unlock(node);
-			tipc_net_route_msg(buf);
 		} else if (msg_user(msg) == NAME_DISTRIBUTOR) {
 			spin_lock_bh(&bc_lock);
 			bclink_accept_pkt(node, seqno);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 609c30c..3f9707a 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -387,7 +387,7 @@
 
 	b_ptr = &tipc_bearers[bearer_id];
 	strcpy(b_ptr->name, name);
-	res = m_ptr->enable_bearer(b_ptr);
+	res = m_ptr->enable_media(b_ptr);
 	if (res) {
 		pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
 			name, -res);
@@ -420,23 +420,15 @@
 }
 
 /**
- * tipc_block_bearer - Block the bearer with the given name, and reset all its links
+ * tipc_block_bearer - Block the bearer, and reset all its links
  */
-int tipc_block_bearer(const char *name)
+int tipc_block_bearer(struct tipc_bearer *b_ptr)
 {
-	struct tipc_bearer *b_ptr = NULL;
 	struct tipc_link *l_ptr;
 	struct tipc_link *temp_l_ptr;
 
 	read_lock_bh(&tipc_net_lock);
-	b_ptr = tipc_bearer_find(name);
-	if (!b_ptr) {
-		pr_warn("Attempt to block unknown bearer <%s>\n", name);
-		read_unlock_bh(&tipc_net_lock);
-		return -EINVAL;
-	}
-
-	pr_info("Blocking bearer <%s>\n", name);
+	pr_info("Blocking bearer <%s>\n", b_ptr->name);
 	spin_lock_bh(&b_ptr->lock);
 	b_ptr->blocked = 1;
 	list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
@@ -465,7 +457,7 @@
 	pr_info("Disabling bearer <%s>\n", b_ptr->name);
 	spin_lock_bh(&b_ptr->lock);
 	b_ptr->blocked = 1;
-	b_ptr->media->disable_bearer(b_ptr);
+	b_ptr->media->disable_media(b_ptr);
 	list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
 		tipc_link_delete(l_ptr);
 	}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 09c869a..e5e04be 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -75,8 +75,8 @@
 /**
  * struct tipc_media - TIPC media information available to internal users
  * @send_msg: routine which handles buffer transmission
- * @enable_bearer: routine which enables a bearer
- * @disable_bearer: routine which disables a bearer
+ * @enable_media: routine which enables a media
+ * @disable_media: routine which disables a media
  * @addr2str: routine which converts media address to string
  * @addr2msg: routine which converts media address to protocol message area
  * @msg2addr: routine which converts media address from protocol message area
@@ -91,8 +91,8 @@
 	int (*send_msg)(struct sk_buff *buf,
 			struct tipc_bearer *b_ptr,
 			struct tipc_media_addr *dest);
-	int (*enable_bearer)(struct tipc_bearer *b_ptr);
-	void (*disable_bearer)(struct tipc_bearer *b_ptr);
+	int (*enable_media)(struct tipc_bearer *b_ptr);
+	void (*disable_media)(struct tipc_bearer *b_ptr);
 	int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
 	int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
 	int (*msg2addr)(const struct tipc_bearer *b_ptr,
@@ -163,7 +163,7 @@
 
 void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
 
-int  tipc_block_bearer(const char *name);
+int  tipc_block_bearer(struct tipc_bearer *b_ptr);
 void tipc_continue(struct tipc_bearer *tb_ptr);
 
 int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index be72f8c..94895d4 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -90,21 +90,21 @@
 /*
  * Routines available to privileged subsystems
  */
-extern int tipc_core_start_net(unsigned long);
-extern int  tipc_handler_start(void);
-extern void tipc_handler_stop(void);
-extern int  tipc_netlink_start(void);
-extern void tipc_netlink_stop(void);
-extern int  tipc_socket_init(void);
-extern void tipc_socket_stop(void);
-extern int tipc_sock_create_local(int type, struct socket **res);
-extern void tipc_sock_release_local(struct socket *sock);
-extern int tipc_sock_accept_local(struct socket *sock,
-				  struct socket **newsock, int flags);
+int tipc_core_start_net(unsigned long);
+int tipc_handler_start(void);
+void tipc_handler_stop(void);
+int tipc_netlink_start(void);
+void tipc_netlink_stop(void);
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+int tipc_sock_create_local(int type, struct socket **res);
+void tipc_sock_release_local(struct socket *sock);
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+			   int flags);
 
 #ifdef CONFIG_SYSCTL
-extern int tipc_register_sysctl(void);
-extern void tipc_unregister_sysctl(void);
+int tipc_register_sysctl(void);
+void tipc_unregister_sysctl(void);
 #else
 #define tipc_register_sysctl() 0
 #define tipc_unregister_sysctl()
@@ -201,6 +201,6 @@
 	return (struct tipc_msg *)skb->data;
 }
 
-extern struct sk_buff *tipc_buf_acquire(u32 size);
+struct sk_buff *tipc_buf_acquire(u32 size);
 
 #endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 40ea40c..f80d59f 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
  * net/tipc/eth_media.c: Ethernet bearer support for TIPC
  *
  * Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2005-2008, 2011, Wind River Systems
+ * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -37,19 +37,19 @@
 #include "core.h"
 #include "bearer.h"
 
-#define MAX_ETH_BEARERS		MAX_BEARERS
+#define MAX_ETH_MEDIA		MAX_BEARERS
 
 #define ETH_ADDR_OFFSET	4	/* message header offset of MAC address */
 
 /**
- * struct eth_bearer - Ethernet bearer data structure
+ * struct eth_media - Ethernet bearer data structure
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
  * @setup: work item used when enabling bearer
  * @cleanup: work item used when disabling bearer
  */
-struct eth_bearer {
+struct eth_media {
 	struct tipc_bearer *bearer;
 	struct net_device *dev;
 	struct packet_type tipc_packet_type;
@@ -58,7 +58,7 @@
 };
 
 static struct tipc_media eth_media_info;
-static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+static struct eth_media eth_media_array[MAX_ETH_MEDIA];
 static int eth_started;
 
 static int recv_notification(struct notifier_block *nb, unsigned long evt,
@@ -100,7 +100,7 @@
 	if (!clone)
 		return 0;
 
-	dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+	dev = ((struct eth_media *)(tb_ptr->usr_handle))->dev;
 	delta = dev->hard_header_len - skb_headroom(buf);
 
 	if ((delta > 0) &&
@@ -128,43 +128,43 @@
 static int recv_msg(struct sk_buff *buf, struct net_device *dev,
 		    struct packet_type *pt, struct net_device *orig_dev)
 {
-	struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
+	struct eth_media *eb_ptr = (struct eth_media *)pt->af_packet_priv;
 
 	if (!net_eq(dev_net(dev), &init_net)) {
 		kfree_skb(buf);
-		return 0;
+		return NET_RX_DROP;
 	}
 
 	if (likely(eb_ptr->bearer)) {
 		if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
 			buf->next = NULL;
 			tipc_recv_msg(buf, eb_ptr->bearer);
-			return 0;
+			return NET_RX_SUCCESS;
 		}
 	}
 	kfree_skb(buf);
-	return 0;
+	return NET_RX_DROP;
 }
 
 /**
- * setup_bearer - setup association between Ethernet bearer and interface
+ * setup_media - setup association between Ethernet bearer and interface
  */
-static void setup_bearer(struct work_struct *work)
+static void setup_media(struct work_struct *work)
 {
-	struct eth_bearer *eb_ptr =
-		container_of(work, struct eth_bearer, setup);
+	struct eth_media *eb_ptr =
+		container_of(work, struct eth_media, setup);
 
 	dev_add_pack(&eb_ptr->tipc_packet_type);
 }
 
 /**
- * enable_bearer - attach TIPC bearer to an Ethernet interface
+ * enable_media - attach TIPC bearer to an Ethernet interface
  */
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+static int enable_media(struct tipc_bearer *tb_ptr)
 {
 	struct net_device *dev;
-	struct eth_bearer *eb_ptr = &eth_bearers[0];
-	struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+	struct eth_media *eb_ptr = &eth_media_array[0];
+	struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
 	char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
 	int pending_dev = 0;
 
@@ -188,7 +188,7 @@
 	eb_ptr->tipc_packet_type.func = recv_msg;
 	eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
 	INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
-	INIT_WORK(&eb_ptr->setup, setup_bearer);
+	INIT_WORK(&eb_ptr->setup, setup_media);
 	schedule_work(&eb_ptr->setup);
 
 	/* Associate TIPC bearer with Ethernet bearer */
@@ -205,14 +205,14 @@
 }
 
 /**
- * cleanup_bearer - break association between Ethernet bearer and interface
+ * cleanup_media - break association between Ethernet bearer and interface
  *
  * This routine must be invoked from a work queue because it can sleep.
  */
-static void cleanup_bearer(struct work_struct *work)
+static void cleanup_media(struct work_struct *work)
 {
-	struct eth_bearer *eb_ptr =
-		container_of(work, struct eth_bearer, cleanup);
+	struct eth_media *eb_ptr =
+		container_of(work, struct eth_media, cleanup);
 
 	dev_remove_pack(&eb_ptr->tipc_packet_type);
 	dev_put(eb_ptr->dev);
@@ -220,18 +220,18 @@
 }
 
 /**
- * disable_bearer - detach TIPC bearer from an Ethernet interface
+ * disable_media - detach TIPC bearer from an Ethernet interface
  *
  * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
-static void disable_bearer(struct tipc_bearer *tb_ptr)
+static void disable_media(struct tipc_bearer *tb_ptr)
 {
-	struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
+	struct eth_media *eb_ptr = (struct eth_media *)tb_ptr->usr_handle;
 
 	eb_ptr->bearer = NULL;
-	INIT_WORK(&eb_ptr->cleanup, cleanup_bearer);
+	INIT_WORK(&eb_ptr->cleanup, cleanup_media);
 	schedule_work(&eb_ptr->cleanup);
 }
 
@@ -245,8 +245,8 @@
 			     void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-	struct eth_bearer *eb_ptr = &eth_bearers[0];
-	struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+	struct eth_media *eb_ptr = &eth_media_array[0];
+	struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
 
 	if (!net_eq(dev_net(dev), &init_net))
 		return NOTIFY_DONE;
@@ -265,17 +265,17 @@
 		if (netif_carrier_ok(dev))
 			tipc_continue(eb_ptr->bearer);
 		else
-			tipc_block_bearer(eb_ptr->bearer->name);
+			tipc_block_bearer(eb_ptr->bearer);
 		break;
 	case NETDEV_UP:
 		tipc_continue(eb_ptr->bearer);
 		break;
 	case NETDEV_DOWN:
-		tipc_block_bearer(eb_ptr->bearer->name);
+		tipc_block_bearer(eb_ptr->bearer);
 		break;
 	case NETDEV_CHANGEMTU:
 	case NETDEV_CHANGEADDR:
-		tipc_block_bearer(eb_ptr->bearer->name);
+		tipc_block_bearer(eb_ptr->bearer);
 		tipc_continue(eb_ptr->bearer);
 		break;
 	case NETDEV_UNREGISTER:
@@ -327,8 +327,8 @@
  */
 static struct tipc_media eth_media_info = {
 	.send_msg	= send_msg,
-	.enable_bearer	= enable_bearer,
-	.disable_bearer	= disable_bearer,
+	.enable_media	= enable_media,
+	.disable_media	= disable_media,
 	.addr2str	= eth_addr2str,
 	.addr2msg	= eth_addr2msg,
 	.msg2addr	= eth_msg2addr,
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index 9934a32..c139892 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -42,17 +42,17 @@
 #include "core.h"
 #include "bearer.h"
 
-#define MAX_IB_BEARERS		MAX_BEARERS
+#define MAX_IB_MEDIA		MAX_BEARERS
 
 /**
- * struct ib_bearer - Infiniband bearer data structure
+ * struct ib_media - Infiniband media data structure
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Infiniband network device
  * @tipc_packet_type: used in binding TIPC to Infiniband driver
  * @cleanup: work item used when disabling bearer
  */
 
-struct ib_bearer {
+struct ib_media {
 	struct tipc_bearer *bearer;
 	struct net_device *dev;
 	struct packet_type tipc_packet_type;
@@ -61,7 +61,7 @@
 };
 
 static struct tipc_media ib_media_info;
-static struct ib_bearer ib_bearers[MAX_IB_BEARERS];
+static struct ib_media ib_media_array[MAX_IB_MEDIA];
 static int ib_started;
 
 /**
@@ -93,7 +93,7 @@
 	if (!clone)
 		return 0;
 
-	dev = ((struct ib_bearer *)(tb_ptr->usr_handle))->dev;
+	dev = ((struct ib_media *)(tb_ptr->usr_handle))->dev;
 	delta = dev->hard_header_len - skb_headroom(buf);
 
 	if ((delta > 0) &&
@@ -121,43 +121,43 @@
 static int recv_msg(struct sk_buff *buf, struct net_device *dev,
 		    struct packet_type *pt, struct net_device *orig_dev)
 {
-	struct ib_bearer *ib_ptr = (struct ib_bearer *)pt->af_packet_priv;
+	struct ib_media *ib_ptr = (struct ib_media *)pt->af_packet_priv;
 
 	if (!net_eq(dev_net(dev), &init_net)) {
 		kfree_skb(buf);
-		return 0;
+		return NET_RX_DROP;
 	}
 
 	if (likely(ib_ptr->bearer)) {
 		if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
 			buf->next = NULL;
 			tipc_recv_msg(buf, ib_ptr->bearer);
-			return 0;
+			return NET_RX_SUCCESS;
 		}
 	}
 	kfree_skb(buf);
-	return 0;
+	return NET_RX_DROP;
 }
 
 /**
  * setup_bearer - setup association between InfiniBand bearer and interface
  */
-static void setup_bearer(struct work_struct *work)
+static void setup_media(struct work_struct *work)
 {
-	struct ib_bearer *ib_ptr =
-		container_of(work, struct ib_bearer, setup);
+	struct ib_media *ib_ptr =
+		container_of(work, struct ib_media, setup);
 
 	dev_add_pack(&ib_ptr->tipc_packet_type);
 }
 
 /**
- * enable_bearer - attach TIPC bearer to an InfiniBand interface
+ * enable_media - attach TIPC bearer to an InfiniBand interface
  */
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+static int enable_media(struct tipc_bearer *tb_ptr)
 {
 	struct net_device *dev;
-	struct ib_bearer *ib_ptr = &ib_bearers[0];
-	struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
+	struct ib_media *ib_ptr = &ib_media_array[0];
+	struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
 	char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
 	int pending_dev = 0;
 
@@ -181,7 +181,7 @@
 	ib_ptr->tipc_packet_type.func = recv_msg;
 	ib_ptr->tipc_packet_type.af_packet_priv = ib_ptr;
 	INIT_LIST_HEAD(&(ib_ptr->tipc_packet_type.list));
-	INIT_WORK(&ib_ptr->setup, setup_bearer);
+	INIT_WORK(&ib_ptr->setup, setup_media);
 	schedule_work(&ib_ptr->setup);
 
 	/* Associate TIPC bearer with InfiniBand bearer */
@@ -204,8 +204,8 @@
  */
 static void cleanup_bearer(struct work_struct *work)
 {
-	struct ib_bearer *ib_ptr =
-		container_of(work, struct ib_bearer, cleanup);
+	struct ib_media *ib_ptr =
+		container_of(work, struct ib_media, cleanup);
 
 	dev_remove_pack(&ib_ptr->tipc_packet_type);
 	dev_put(ib_ptr->dev);
@@ -213,15 +213,15 @@
 }
 
 /**
- * disable_bearer - detach TIPC bearer from an InfiniBand interface
+ * disable_media - detach TIPC bearer from an InfiniBand interface
  *
  * Mark InfiniBand bearer as inactive so that incoming buffers are thrown away,
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
-static void disable_bearer(struct tipc_bearer *tb_ptr)
+static void disable_media(struct tipc_bearer *tb_ptr)
 {
-	struct ib_bearer *ib_ptr = (struct ib_bearer *)tb_ptr->usr_handle;
+	struct ib_media *ib_ptr = (struct ib_media *)tb_ptr->usr_handle;
 
 	ib_ptr->bearer = NULL;
 	INIT_WORK(&ib_ptr->cleanup, cleanup_bearer);
@@ -238,8 +238,8 @@
 			     void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-	struct ib_bearer *ib_ptr = &ib_bearers[0];
-	struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
+	struct ib_media *ib_ptr = &ib_media_array[0];
+	struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
 
 	if (!net_eq(dev_net(dev), &init_net))
 		return NOTIFY_DONE;
@@ -258,17 +258,17 @@
 		if (netif_carrier_ok(dev))
 			tipc_continue(ib_ptr->bearer);
 		else
-			tipc_block_bearer(ib_ptr->bearer->name);
+			tipc_block_bearer(ib_ptr->bearer);
 		break;
 	case NETDEV_UP:
 		tipc_continue(ib_ptr->bearer);
 		break;
 	case NETDEV_DOWN:
-		tipc_block_bearer(ib_ptr->bearer->name);
+		tipc_block_bearer(ib_ptr->bearer);
 		break;
 	case NETDEV_CHANGEMTU:
 	case NETDEV_CHANGEADDR:
-		tipc_block_bearer(ib_ptr->bearer->name);
+		tipc_block_bearer(ib_ptr->bearer);
 		tipc_continue(ib_ptr->bearer);
 		break;
 	case NETDEV_UNREGISTER:
@@ -323,8 +323,8 @@
  */
 static struct tipc_media ib_media_info = {
 	.send_msg	= send_msg,
-	.enable_bearer	= enable_bearer,
-	.disable_bearer	= disable_bearer,
+	.enable_media	= enable_media,
+	.disable_media	= disable_media,
 	.addr2str	= ib_addr2str,
 	.addr2msg	= ib_addr2msg,
 	.msg2addr	= ib_msg2addr,
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 0cc3d90..cf465d6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -75,20 +75,6 @@
  */
 #define START_CHANGEOVER 100000u
 
-/**
- * struct tipc_link_name - deconstructed link name
- * @addr_local: network address of node at this end
- * @if_local: name of interface at this end
- * @addr_peer: network address of node at far end
- * @if_peer: name of interface at far end
- */
-struct tipc_link_name {
-	u32 addr_local;
-	char if_local[TIPC_MAX_IF_NAME];
-	u32 addr_peer;
-	char if_peer[TIPC_MAX_IF_NAME];
-};
-
 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
 				       struct sk_buff *buf);
 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
@@ -97,8 +83,7 @@
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
 static int  link_send_sections_long(struct tipc_port *sender,
 				    struct iovec const *msg_sect,
-				    u32 num_sect, unsigned int total_len,
-				    u32 destnode);
+				    unsigned int len, u32 destnode);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
@@ -161,72 +146,6 @@
 }
 
 /**
- * link_name_validate - validate & (optionally) deconstruct tipc_link name
- * @name: ptr to link name string
- * @name_parts: ptr to area for link name components (or NULL if not needed)
- *
- * Returns 1 if link name is valid, otherwise 0.
- */
-static int link_name_validate(const char *name,
-				struct tipc_link_name *name_parts)
-{
-	char name_copy[TIPC_MAX_LINK_NAME];
-	char *addr_local;
-	char *if_local;
-	char *addr_peer;
-	char *if_peer;
-	char dummy;
-	u32 z_local, c_local, n_local;
-	u32 z_peer, c_peer, n_peer;
-	u32 if_local_len;
-	u32 if_peer_len;
-
-	/* copy link name & ensure length is OK */
-	name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
-	/* need above in case non-Posix strncpy() doesn't pad with nulls */
-	strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
-	if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
-		return 0;
-
-	/* ensure all component parts of link name are present */
-	addr_local = name_copy;
-	if_local = strchr(addr_local, ':');
-	if (if_local == NULL)
-		return 0;
-	*(if_local++) = 0;
-	addr_peer = strchr(if_local, '-');
-	if (addr_peer == NULL)
-		return 0;
-	*(addr_peer++) = 0;
-	if_local_len = addr_peer - if_local;
-	if_peer = strchr(addr_peer, ':');
-	if (if_peer == NULL)
-		return 0;
-	*(if_peer++) = 0;
-	if_peer_len = strlen(if_peer) + 1;
-
-	/* validate component parts of link name */
-	if ((sscanf(addr_local, "%u.%u.%u%c",
-		    &z_local, &c_local, &n_local, &dummy) != 3) ||
-	    (sscanf(addr_peer, "%u.%u.%u%c",
-		    &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
-	    (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
-	    (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
-	    (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
-	    (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME))
-		return 0;
-
-	/* return link name components, if necessary */
-	if (name_parts) {
-		name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
-		strcpy(name_parts->if_local, if_local);
-		name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
-		strcpy(name_parts->if_peer, if_peer);
-	}
-	return 1;
-}
-
-/**
  * link_timeout - handle expiration of link timer
  * @l_ptr: pointer to link
  *
@@ -485,15 +404,9 @@
  */
 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
 {
-	struct sk_buff *buf = l_ptr->defragm_buf;
-	struct sk_buff *next;
-
-	while (buf) {
-		next = buf->next;
-		kfree_skb(buf);
-		buf = next;
-	}
-	l_ptr->defragm_buf = NULL;
+	kfree_skb(l_ptr->reasm_head);
+	l_ptr->reasm_head = NULL;
+	l_ptr->reasm_tail = NULL;
 }
 
 /**
@@ -1065,8 +978,7 @@
  */
 int tipc_link_send_sections_fast(struct tipc_port *sender,
 				 struct iovec const *msg_sect,
-				 const u32 num_sect, unsigned int total_len,
-				 u32 destaddr)
+				 unsigned int len, u32 destaddr)
 {
 	struct tipc_msg *hdr = &sender->phdr;
 	struct tipc_link *l_ptr;
@@ -1080,8 +992,7 @@
 	 * Try building message using port's max_pkt hint.
 	 * (Must not hold any locks while building message.)
 	 */
-	res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
-			     sender->max_pkt, &buf);
+	res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
 	/* Exit if build request was invalid */
 	if (unlikely(res < 0))
 		return res;
@@ -1121,8 +1032,7 @@
 			if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
 				goto again;
 
-			return link_send_sections_long(sender, msg_sect,
-						       num_sect, total_len,
+			return link_send_sections_long(sender, msg_sect, len,
 						       destaddr);
 		}
 		tipc_node_unlock(node);
@@ -1133,8 +1043,8 @@
 	if (buf)
 		return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
 	if (res >= 0)
-		return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
-						 total_len, TIPC_ERR_NO_NODE);
+		return tipc_port_reject_sections(sender, hdr, msg_sect,
+						 len, TIPC_ERR_NO_NODE);
 	return res;
 }
 
@@ -1154,18 +1064,17 @@
  */
 static int link_send_sections_long(struct tipc_port *sender,
 				   struct iovec const *msg_sect,
-				   u32 num_sect, unsigned int total_len,
-				   u32 destaddr)
+				   unsigned int len, u32 destaddr)
 {
 	struct tipc_link *l_ptr;
 	struct tipc_node *node;
 	struct tipc_msg *hdr = &sender->phdr;
-	u32 dsz = total_len;
+	u32 dsz = len;
 	u32 max_pkt, fragm_sz, rest;
 	struct tipc_msg fragm_hdr;
 	struct sk_buff *buf, *buf_chain, *prev;
 	u32 fragm_crs, fragm_rest, hsz, sect_rest;
-	const unchar *sect_crs;
+	const unchar __user *sect_crs;
 	int curr_sect;
 	u32 fragm_no;
 	int res = 0;
@@ -1207,7 +1116,7 @@
 
 		if (!sect_rest) {
 			sect_rest = msg_sect[++curr_sect].iov_len;
-			sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+			sect_crs = msg_sect[curr_sect].iov_base;
 		}
 
 		if (sect_rest < fragm_rest)
@@ -1283,8 +1192,8 @@
 			buf = buf_chain->next;
 			kfree_skb(buf_chain);
 		}
-		return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
-						 total_len, TIPC_ERR_NO_NODE);
+		return tipc_port_reject_sections(sender, hdr, msg_sect,
+						 len, TIPC_ERR_NO_NODE);
 	}
 
 	/* Append chain of fragments to send queue & send them */
@@ -1592,15 +1501,15 @@
 
 		/* Ensure bearer is still enabled */
 		if (unlikely(!b_ptr->active))
-			goto cont;
+			goto discard;
 
 		/* Ensure message is well-formed */
 		if (unlikely(!link_recv_buf_validate(buf)))
-			goto cont;
+			goto discard;
 
 		/* Ensure message data is a single contiguous unit */
 		if (unlikely(skb_linearize(buf)))
-			goto cont;
+			goto discard;
 
 		/* Handle arrival of a non-unicast link message */
 		msg = buf_msg(buf);
@@ -1616,20 +1525,18 @@
 		/* Discard unicast link messages destined for another node */
 		if (unlikely(!msg_short(msg) &&
 			     (msg_destnode(msg) != tipc_own_addr)))
-			goto cont;
+			goto discard;
 
 		/* Locate neighboring node that sent message */
 		n_ptr = tipc_node_find(msg_prevnode(msg));
 		if (unlikely(!n_ptr))
-			goto cont;
+			goto discard;
 		tipc_node_lock(n_ptr);
 
 		/* Locate unicast link endpoint that should handle message */
 		l_ptr = n_ptr->links[b_ptr->identity];
-		if (unlikely(!l_ptr)) {
-			tipc_node_unlock(n_ptr);
-			goto cont;
-		}
+		if (unlikely(!l_ptr))
+			goto unlock_discard;
 
 		/* Verify that communication with node is currently allowed */
 		if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
@@ -1639,10 +1546,8 @@
 			!msg_redundant_link(msg))
 			n_ptr->block_setup &= ~WAIT_PEER_DOWN;
 
-		if (n_ptr->block_setup) {
-			tipc_node_unlock(n_ptr);
-			goto cont;
-		}
+		if (n_ptr->block_setup)
+			goto unlock_discard;
 
 		/* Validate message sequence number info */
 		seq_no = msg_seqno(msg);
@@ -1678,98 +1583,100 @@
 
 		/* Now (finally!) process the incoming message */
 protocol_check:
-		if (likely(link_working_working(l_ptr))) {
-			if (likely(seq_no == mod(l_ptr->next_in_no))) {
-				l_ptr->next_in_no++;
-				if (unlikely(l_ptr->oldest_deferred_in))
-					head = link_insert_deferred_queue(l_ptr,
-									  head);
-deliver:
-				if (likely(msg_isdata(msg))) {
-					tipc_node_unlock(n_ptr);
-					tipc_port_recv_msg(buf);
-					continue;
-				}
-				switch (msg_user(msg)) {
-					int ret;
-				case MSG_BUNDLER:
-					l_ptr->stats.recv_bundles++;
-					l_ptr->stats.recv_bundled +=
-						msg_msgcnt(msg);
-					tipc_node_unlock(n_ptr);
-					tipc_link_recv_bundle(buf);
-					continue;
-				case NAME_DISTRIBUTOR:
-					n_ptr->bclink.recv_permitted = true;
-					tipc_node_unlock(n_ptr);
-					tipc_named_recv(buf);
-					continue;
-				case BCAST_PROTOCOL:
-					tipc_link_recv_sync(n_ptr, buf);
-					tipc_node_unlock(n_ptr);
-					continue;
-				case CONN_MANAGER:
-					tipc_node_unlock(n_ptr);
-					tipc_port_recv_proto_msg(buf);
-					continue;
-				case MSG_FRAGMENTER:
-					l_ptr->stats.recv_fragments++;
-					ret = tipc_link_recv_fragment(
-						&l_ptr->defragm_buf,
-						&buf, &msg);
-					if (ret == 1) {
-						l_ptr->stats.recv_fragmented++;
-						goto deliver;
-					}
-					if (ret == -1)
-						l_ptr->next_in_no--;
-					break;
-				case CHANGEOVER_PROTOCOL:
-					type = msg_type(msg);
-					if (link_recv_changeover_msg(&l_ptr,
-								     &buf)) {
-						msg = buf_msg(buf);
-						seq_no = msg_seqno(msg);
-						if (type == ORIGINAL_MSG)
-							goto deliver;
-						goto protocol_check;
-					}
-					break;
-				default:
-					kfree_skb(buf);
-					buf = NULL;
-					break;
-				}
+		if (unlikely(!link_working_working(l_ptr))) {
+			if (msg_user(msg) == LINK_PROTOCOL) {
+				link_recv_proto_msg(l_ptr, buf);
+				head = link_insert_deferred_queue(l_ptr, head);
 				tipc_node_unlock(n_ptr);
-				tipc_net_route_msg(buf);
 				continue;
 			}
+
+			/* Traffic message. Conditionally activate link */
+			link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+
+			if (link_working_working(l_ptr)) {
+				/* Re-insert buffer in front of queue */
+				buf->next = head;
+				head = buf;
+				tipc_node_unlock(n_ptr);
+				continue;
+			}
+			goto unlock_discard;
+		}
+
+		/* Link is now in state WORKING_WORKING */
+		if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
 			link_handle_out_of_seq_msg(l_ptr, buf);
 			head = link_insert_deferred_queue(l_ptr, head);
 			tipc_node_unlock(n_ptr);
 			continue;
 		}
-
-		/* Link is not in state WORKING_WORKING */
-		if (msg_user(msg) == LINK_PROTOCOL) {
-			link_recv_proto_msg(l_ptr, buf);
+		l_ptr->next_in_no++;
+		if (unlikely(l_ptr->oldest_deferred_in))
 			head = link_insert_deferred_queue(l_ptr, head);
+deliver:
+		if (likely(msg_isdata(msg))) {
 			tipc_node_unlock(n_ptr);
+			tipc_port_recv_msg(buf);
 			continue;
 		}
-
-		/* Traffic message. Conditionally activate link */
-		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-
-		if (link_working_working(l_ptr)) {
-			/* Re-insert buffer in front of queue */
-			buf->next = head;
-			head = buf;
+		switch (msg_user(msg)) {
+			int ret;
+		case MSG_BUNDLER:
+			l_ptr->stats.recv_bundles++;
+			l_ptr->stats.recv_bundled += msg_msgcnt(msg);
+			tipc_node_unlock(n_ptr);
+			tipc_link_recv_bundle(buf);
+			continue;
+		case NAME_DISTRIBUTOR:
+			n_ptr->bclink.recv_permitted = true;
+			tipc_node_unlock(n_ptr);
+			tipc_named_recv(buf);
+			continue;
+		case BCAST_PROTOCOL:
+			tipc_link_recv_sync(n_ptr, buf);
 			tipc_node_unlock(n_ptr);
 			continue;
+		case CONN_MANAGER:
+			tipc_node_unlock(n_ptr);
+			tipc_port_recv_proto_msg(buf);
+			continue;
+		case MSG_FRAGMENTER:
+			l_ptr->stats.recv_fragments++;
+			ret = tipc_link_recv_fragment(&l_ptr->reasm_head,
+						      &l_ptr->reasm_tail,
+						      &buf);
+			if (ret == LINK_REASM_COMPLETE) {
+				l_ptr->stats.recv_fragmented++;
+				msg = buf_msg(buf);
+				goto deliver;
+			}
+			if (ret == LINK_REASM_ERROR)
+				tipc_link_reset(l_ptr);
+			tipc_node_unlock(n_ptr);
+			continue;
+		case CHANGEOVER_PROTOCOL:
+			type = msg_type(msg);
+			if (link_recv_changeover_msg(&l_ptr, &buf)) {
+				msg = buf_msg(buf);
+				seq_no = msg_seqno(msg);
+				if (type == ORIGINAL_MSG)
+					goto deliver;
+				goto protocol_check;
+			}
+			break;
+		default:
+			kfree_skb(buf);
+			buf = NULL;
+			break;
 		}
 		tipc_node_unlock(n_ptr);
-cont:
+		tipc_net_route_msg(buf);
+		continue;
+unlock_discard:
+
+		tipc_node_unlock(n_ptr);
+discard:
 		kfree_skb(buf);
 	}
 	read_unlock_bh(&tipc_net_lock);
@@ -2432,114 +2339,47 @@
 }
 
 /*
- * A pending message being re-assembled must store certain values
- * to handle subsequent fragments correctly. The following functions
- * help storing these values in unused, available fields in the
- * pending message. This makes dynamic memory allocation unnecessary.
- */
-static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
-{
-	msg_set_seqno(buf_msg(buf), seqno);
-}
-
-static u32 get_fragm_size(struct sk_buff *buf)
-{
-	return msg_ack(buf_msg(buf));
-}
-
-static void set_fragm_size(struct sk_buff *buf, u32 sz)
-{
-	msg_set_ack(buf_msg(buf), sz);
-}
-
-static u32 get_expected_frags(struct sk_buff *buf)
-{
-	return msg_bcast_ack(buf_msg(buf));
-}
-
-static void set_expected_frags(struct sk_buff *buf, u32 exp)
-{
-	msg_set_bcast_ack(buf_msg(buf), exp);
-}
-
-/*
  * tipc_link_recv_fragment(): Called with node lock on. Returns
  * the reassembled buffer if message is complete.
  */
-int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
-			    struct tipc_msg **m)
+int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
+			    struct sk_buff **fbuf)
 {
-	struct sk_buff *prev = NULL;
-	struct sk_buff *fbuf = *fb;
-	struct tipc_msg *fragm = buf_msg(fbuf);
-	struct sk_buff *pbuf = *pending;
-	u32 long_msg_seq_no = msg_long_msgno(fragm);
+	struct sk_buff *frag = *fbuf;
+	struct tipc_msg *msg = buf_msg(frag);
+	u32 fragid = msg_type(msg);
+	bool headstolen;
+	int delta;
 
-	*fb = NULL;
-
-	/* Is there an incomplete message waiting for this fragment? */
-	while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
-			(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
-		prev = pbuf;
-		pbuf = pbuf->next;
-	}
-
-	if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
-		struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
-		u32 msg_sz = msg_size(imsg);
-		u32 fragm_sz = msg_data_sz(fragm);
-		u32 exp_fragm_cnt;
-		u32 max =  TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
-
-		if (msg_type(imsg) == TIPC_MCAST_MSG)
-			max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
-		if (fragm_sz == 0 || msg_size(imsg) > max) {
-			kfree_skb(fbuf);
-			return 0;
-		}
-		exp_fragm_cnt = msg_sz / fragm_sz + !!(msg_sz % fragm_sz);
-		pbuf = tipc_buf_acquire(msg_size(imsg));
-		if (pbuf != NULL) {
-			pbuf->next = *pending;
-			*pending = pbuf;
-			skb_copy_to_linear_data(pbuf, imsg,
-						msg_data_sz(fragm));
-			/*  Prepare buffer for subsequent fragments. */
-			set_long_msg_seqno(pbuf, long_msg_seq_no);
-			set_fragm_size(pbuf, fragm_sz);
-			set_expected_frags(pbuf, exp_fragm_cnt - 1);
-		} else {
-			pr_debug("Link unable to reassemble fragmented message\n");
-			kfree_skb(fbuf);
-			return -1;
-		}
-		kfree_skb(fbuf);
+	skb_pull(frag, msg_hdr_sz(msg));
+	if (fragid == FIRST_FRAGMENT) {
+		if (*head || skb_unclone(frag, GFP_ATOMIC))
+			goto out_free;
+		*head = frag;
+		skb_frag_list_init(*head);
 		return 0;
-	} else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
-		u32 dsz = msg_data_sz(fragm);
-		u32 fsz = get_fragm_size(pbuf);
-		u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
-		u32 exp_frags = get_expected_frags(pbuf) - 1;
-		skb_copy_to_linear_data_offset(pbuf, crs,
-					       msg_data(fragm), dsz);
-		kfree_skb(fbuf);
-
-		/* Is message complete? */
-		if (exp_frags == 0) {
-			if (prev)
-				prev->next = pbuf->next;
-			else
-				*pending = pbuf->next;
-			msg_reset_reroute_cnt(buf_msg(pbuf));
-			*fb = pbuf;
-			*m = buf_msg(pbuf);
-			return 1;
-		}
-		set_expected_frags(pbuf, exp_frags);
-		return 0;
+	} else if (skb_try_coalesce(*head, frag, &headstolen, &delta)) {
+		kfree_skb_partial(frag, headstolen);
+	} else {
+		if (!*head)
+			goto out_free;
+		if (!skb_has_frag_list(*head))
+			skb_shinfo(*head)->frag_list = frag;
+		else
+			(*tail)->next = frag;
+		*tail = frag;
+		(*head)->truesize += frag->truesize;
 	}
-	kfree_skb(fbuf);
+	if (fragid == LAST_FRAGMENT) {
+		*fbuf = *head;
+		*tail = *head = NULL;
+		return LINK_REASM_COMPLETE;
+	}
 	return 0;
+out_free:
+	pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
+	kfree_skb(*fbuf);
+	return LINK_REASM_ERROR;
 }
 
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
@@ -2585,25 +2425,21 @@
 static struct tipc_link *link_find_link(const char *name,
 					struct tipc_node **node)
 {
-	struct tipc_link_name link_name_parts;
-	struct tipc_bearer *b_ptr;
 	struct tipc_link *l_ptr;
+	struct tipc_node *n_ptr;
+	int i;
 
-	if (!link_name_validate(name, &link_name_parts))
-		return NULL;
-
-	b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
-	if (!b_ptr)
-		return NULL;
-
-	*node = tipc_node_find(link_name_parts.addr_peer);
-	if (!*node)
-		return NULL;
-
-	l_ptr = (*node)->links[b_ptr->identity];
-	if (!l_ptr || strcmp(l_ptr->name, name))
-		return NULL;
-
+	list_for_each_entry(n_ptr, &tipc_node_list, list) {
+		for (i = 0; i < MAX_BEARERS; i++) {
+			l_ptr = n_ptr->links[i];
+			if (l_ptr && !strcmp(l_ptr->name, name))
+				goto found;
+		}
+	}
+	l_ptr = NULL;
+	n_ptr = NULL;
+found:
+	*node = n_ptr;
 	return l_ptr;
 }
 
@@ -2646,6 +2482,7 @@
 	struct tipc_link *l_ptr;
 	struct tipc_bearer *b_ptr;
 	struct tipc_media *m_ptr;
+	int res = 0;
 
 	l_ptr = link_find_link(name, &node);
 	if (l_ptr) {
@@ -2668,9 +2505,12 @@
 		case TIPC_CMD_SET_LINK_WINDOW:
 			tipc_link_set_queue_limits(l_ptr, new_value);
 			break;
+		default:
+			res = -EINVAL;
+			break;
 		}
 		tipc_node_unlock(node);
-		return 0;
+		return res;
 	}
 
 	b_ptr = tipc_bearer_find(name);
@@ -2678,15 +2518,18 @@
 		switch (cmd) {
 		case TIPC_CMD_SET_LINK_TOL:
 			b_ptr->tolerance = new_value;
-			return 0;
+			break;
 		case TIPC_CMD_SET_LINK_PRI:
 			b_ptr->priority = new_value;
-			return 0;
+			break;
 		case TIPC_CMD_SET_LINK_WINDOW:
 			b_ptr->window = new_value;
-			return 0;
+			break;
+		default:
+			res = -EINVAL;
+			break;
 		}
-		return -EINVAL;
+		return res;
 	}
 
 	m_ptr = tipc_media_find(name);
@@ -2695,15 +2538,18 @@
 	switch (cmd) {
 	case TIPC_CMD_SET_LINK_TOL:
 		m_ptr->tolerance = new_value;
-		return 0;
+		break;
 	case TIPC_CMD_SET_LINK_PRI:
 		m_ptr->priority = new_value;
-		return 0;
+		break;
 	case TIPC_CMD_SET_LINK_WINDOW:
 		m_ptr->window = new_value;
-		return 0;
+		break;
+	default:
+		res = -EINVAL;
+		break;
 	}
-	return -EINVAL;
+	return res;
 }
 
 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
diff --git a/net/tipc/link.h b/net/tipc/link.h
index c048ed1..8a6c102 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -41,6 +41,12 @@
 #include "node.h"
 
 /*
+ * Link reassembly status codes
+ */
+#define LINK_REASM_ERROR	-1
+#define LINK_REASM_COMPLETE	1
+
+/*
  * Out-of-range value for link sequence numbers
  */
 #define INVALID_LINK_SEQ 0x10000
@@ -134,7 +140,8 @@
  * @next_out: ptr to first unsent outbound message in queue
  * @waiting_ports: linked list of ports waiting for link congestion to abate
  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
- * @defragm_buf: list of partially reassembled inbound message fragments
+ * @reasm_head: list head of partially reassembled inbound message fragments
+ * @reasm_tail: last fragment received
  * @stats: collects statistics regarding link activity
  */
 struct tipc_link {
@@ -196,9 +203,10 @@
 	struct sk_buff *next_out;
 	struct list_head waiting_ports;
 
-	/* Fragmentation/defragmentation */
+	/* Fragmentation/reassembly */
 	u32 long_msg_seq_no;
-	struct sk_buff *defragm_buf;
+	struct sk_buff *reasm_head;
+	struct sk_buff *reasm_tail;
 
 	/* Statistics */
 	struct tipc_stats stats;
@@ -227,13 +235,11 @@
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
 int tipc_link_send_sections_fast(struct tipc_port *sender,
 				 struct iovec const *msg_sect,
-				 const u32 num_sect,
-				 unsigned int total_len,
-				 u32 destnode);
+				 unsigned int len, u32 destnode);
 void tipc_link_recv_bundle(struct sk_buff *buf);
-int  tipc_link_recv_fragment(struct sk_buff **pending,
-			     struct sk_buff **fb,
-			     struct tipc_msg **msg);
+int  tipc_link_recv_fragment(struct sk_buff **reasm_head,
+			     struct sk_buff **reasm_tail,
+			     struct sk_buff **fbuf);
 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int prob,
 			      u32 gap, u32 tolerance, u32 priority,
 			      u32 acked_mtu);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ced60e2..e525f8c 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -73,13 +73,13 @@
  * Returns message data size or errno
  */
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-		   u32 num_sect, unsigned int total_len, int max_size,
-		   struct sk_buff **buf)
+		   unsigned int len, int max_size, struct sk_buff **buf)
 {
-	int dsz, sz, hsz, pos, res, cnt;
+	int dsz, sz, hsz;
+	unsigned char *to;
 
-	dsz = total_len;
-	pos = hsz = msg_hdr_sz(hdr);
+	dsz = len;
+	hsz = msg_hdr_sz(hdr);
 	sz = hsz + dsz;
 	msg_set_size(hdr, sz);
 	if (unlikely(sz > max_size)) {
@@ -91,16 +91,11 @@
 	if (!(*buf))
 		return -ENOMEM;
 	skb_copy_to_linear_data(*buf, hdr, hsz);
-	for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
-		skb_copy_to_linear_data_offset(*buf, pos,
-					       msg_sect[cnt].iov_base,
-					       msg_sect[cnt].iov_len);
-		pos += msg_sect[cnt].iov_len;
+	to = (*buf)->data + hsz;
+	if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
+		kfree_skb(*buf);
+		*buf = NULL;
+		return -EFAULT;
 	}
-	if (likely(res))
-		return dsz;
-
-	kfree_skb(*buf);
-	*buf = NULL;
-	return -EFAULT;
+	return dsz;
 }
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 5e4ccf5..76d1269 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -554,12 +554,6 @@
 	msg_set_bits(m, 4, 16, 0xffff, n);
 }
 
-
-static inline u32 msg_fragm_no(struct tipc_msg *m)
-{
-	return msg_bits(m, 4, 16, 0xffff);
-}
-
 static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
 {
 	msg_set_bits(m, 4, 16, 0xffff, n);
@@ -576,12 +570,6 @@
 	msg_set_bits(m, 4, 0, 0xffff, n);
 }
 
-
-static inline u32 msg_long_msgno(struct tipc_msg *m)
-{
-	return msg_bits(m, 4, 0, 0xffff);
-}
-
 static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
 {
 	msg_set_bits(m, 4, 0, 0xffff, n);
@@ -722,6 +710,5 @@
 void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
 		   u32 destnode);
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-		   u32 num_sect, unsigned int total_len, int max_size,
-		   struct sk_buff **buf);
+		   unsigned int len, int max_size, struct sk_buff **buf);
 #endif
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 6e6c434..25100c0 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -298,9 +298,10 @@
 		}
 		n_ptr->bclink.deferred_size = 0;
 
-		if (n_ptr->bclink.defragm) {
-			kfree_skb(n_ptr->bclink.defragm);
-			n_ptr->bclink.defragm = NULL;
+		if (n_ptr->bclink.reasm_head) {
+			kfree_skb(n_ptr->bclink.reasm_head);
+			n_ptr->bclink.reasm_head = NULL;
+			n_ptr->bclink.reasm_tail = NULL;
 		}
 
 		tipc_bclink_remove_node(n_ptr->addr);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 3c189b3..e5e96c0 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -74,7 +74,8 @@
  *    @deferred_size: number of OOS b'cast messages in deferred queue
  *    @deferred_head: oldest OOS b'cast message received from node
  *    @deferred_tail: newest OOS b'cast message received from node
- *    @defragm: list of partially reassembled b'cast message fragments from node
+ *    @reasm_head: broadcast reassembly queue head from node
+ *    @reasm_tail: last broadcast fragment received from node
  *    @recv_permitted: true if node is allowed to receive b'cast messages
  */
 struct tipc_node {
@@ -98,7 +99,8 @@
 		u32 deferred_size;
 		struct sk_buff *deferred_head;
 		struct sk_buff *deferred_tail;
-		struct sk_buff *defragm;
+		struct sk_buff *reasm_head;
+		struct sk_buff *reasm_tail;
 		bool recv_permitted;
 	} bclink;
 };
diff --git a/net/tipc/port.c b/net/tipc/port.c
index b3ed2fc..c081a76 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -90,8 +90,7 @@
  * tipc_multicast - send a multicast message to local and remote destinations
  */
 int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
-		   u32 num_sect, struct iovec const *msg_sect,
-		   unsigned int total_len)
+		   struct iovec const *msg_sect, unsigned int len)
 {
 	struct tipc_msg *hdr;
 	struct sk_buff *buf;
@@ -114,8 +113,7 @@
 	msg_set_namelower(hdr, seq->lower);
 	msg_set_nameupper(hdr, seq->upper);
 	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
-	res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
-			     &buf);
+	res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
 	if (unlikely(!buf))
 		return res;
 
@@ -436,14 +434,13 @@
 }
 
 int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-			      struct iovec const *msg_sect, u32 num_sect,
-			      unsigned int total_len, int err)
+			      struct iovec const *msg_sect, unsigned int len,
+			      int err)
 {
 	struct sk_buff *buf;
 	int res;
 
-	res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
-			     &buf);
+	res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
 	if (!buf)
 		return res;
 
@@ -918,15 +915,14 @@
  *  tipc_port_recv_sections(): Concatenate and deliver sectioned
  *                        message for this node.
  */
-static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
+static int tipc_port_recv_sections(struct tipc_port *sender,
 				   struct iovec const *msg_sect,
-				   unsigned int total_len)
+				   unsigned int len)
 {
 	struct sk_buff *buf;
 	int res;
 
-	res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, total_len,
-			     MAX_MSG_SIZE, &buf);
+	res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
 	if (likely(buf))
 		tipc_port_recv_msg(buf);
 	return res;
@@ -935,8 +931,7 @@
 /**
  * tipc_send - send message sections on connection
  */
-int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
-	      unsigned int total_len)
+int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
 {
 	struct tipc_port *p_ptr;
 	u32 destnode;
@@ -950,11 +945,10 @@
 	if (!tipc_port_congested(p_ptr)) {
 		destnode = port_peernode(p_ptr);
 		if (likely(!in_own_node(destnode)))
-			res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
-							   total_len, destnode);
+			res = tipc_link_send_sections_fast(p_ptr, msg_sect,
+							   len, destnode);
 		else
-			res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
-						      total_len);
+			res = tipc_port_recv_sections(p_ptr, msg_sect, len);
 
 		if (likely(res != -ELINKCONG)) {
 			p_ptr->congested = 0;
@@ -965,7 +959,7 @@
 	}
 	if (port_unreliable(p_ptr)) {
 		p_ptr->congested = 0;
-		return total_len;
+		return len;
 	}
 	return -ELINKCONG;
 }
@@ -974,8 +968,7 @@
  * tipc_send2name - send message sections to port name
  */
 int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
-		   unsigned int num_sect, struct iovec const *msg_sect,
-		   unsigned int total_len)
+		   struct iovec const *msg_sect, unsigned int len)
 {
 	struct tipc_port *p_ptr;
 	struct tipc_msg *msg;
@@ -999,36 +992,32 @@
 
 	if (likely(destport || destnode)) {
 		if (likely(in_own_node(destnode)))
-			res = tipc_port_recv_sections(p_ptr, num_sect,
-						      msg_sect, total_len);
+			res = tipc_port_recv_sections(p_ptr, msg_sect, len);
 		else if (tipc_own_addr)
 			res = tipc_link_send_sections_fast(p_ptr, msg_sect,
-							   num_sect, total_len,
-							   destnode);
+							   len, destnode);
 		else
 			res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
-							num_sect, total_len,
-							TIPC_ERR_NO_NODE);
+							len, TIPC_ERR_NO_NODE);
 		if (likely(res != -ELINKCONG)) {
 			if (res > 0)
 				p_ptr->sent++;
 			return res;
 		}
 		if (port_unreliable(p_ptr)) {
-			return total_len;
+			return len;
 		}
 		return -ELINKCONG;
 	}
-	return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
-					 total_len, TIPC_ERR_NO_NAME);
+	return tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+					 TIPC_ERR_NO_NAME);
 }
 
 /**
  * tipc_send2port - send message sections to port identity
  */
 int tipc_send2port(u32 ref, struct tipc_portid const *dest,
-		   unsigned int num_sect, struct iovec const *msg_sect,
-		   unsigned int total_len)
+		   struct iovec const *msg_sect, unsigned int len)
 {
 	struct tipc_port *p_ptr;
 	struct tipc_msg *msg;
@@ -1046,21 +1035,20 @@
 	msg_set_hdr_sz(msg, BASIC_H_SIZE);
 
 	if (in_own_node(dest->node))
-		res =  tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
-					       total_len);
+		res =  tipc_port_recv_sections(p_ptr, msg_sect, len);
 	else if (tipc_own_addr)
-		res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
-						   total_len, dest->node);
+		res = tipc_link_send_sections_fast(p_ptr, msg_sect, len,
+						   dest->node);
 	else
-		res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
-						total_len, TIPC_ERR_NO_NODE);
+		res = tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+						TIPC_ERR_NO_NODE);
 	if (likely(res != -ELINKCONG)) {
 		if (res > 0)
 			p_ptr->sent++;
 		return res;
 	}
 	if (port_unreliable(p_ptr)) {
-		return total_len;
+		return len;
 	}
 	return -ELINKCONG;
 }
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 5a7026b..9122535 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -151,24 +151,20 @@
  * TIPC messaging routines
  */
 int tipc_port_recv_msg(struct sk_buff *buf);
-int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect,
-	      unsigned int total_len);
+int tipc_send(u32 portref, struct iovec const *msg_sect, unsigned int len);
 
 int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
-		   unsigned int num_sect, struct iovec const *msg_sect,
-		   unsigned int total_len);
+		   struct iovec const *msg_sect, unsigned int len);
 
 int tipc_send2port(u32 portref, struct tipc_portid const *dest,
-		   unsigned int num_sect, struct iovec const *msg_sect,
-		   unsigned int total_len);
+		   struct iovec const *msg_sect, unsigned int len);
 
 int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
-		   unsigned int section_count, struct iovec const *msg,
-		   unsigned int total_len);
+		   struct iovec const *msg, unsigned int len);
 
 int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-			      struct iovec const *msg_sect, u32 num_sect,
-			      unsigned int total_len, int err);
+			      struct iovec const *msg_sect, unsigned int len,
+			      int err);
 struct sk_buff *tipc_port_get_ports(void);
 void tipc_port_recv_proto_msg(struct sk_buff *buf);
 void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 6cc7ddd..3906527 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -338,7 +338,7 @@
 		buf = __skb_dequeue(&sk->sk_receive_queue);
 		if (buf == NULL)
 			break;
-		if (TIPC_SKB_CB(buf)->handle != 0)
+		if (TIPC_SKB_CB(buf)->handle != NULL)
 			kfree_skb(buf);
 		else {
 			if ((sock->state == SS_CONNECTING) ||
@@ -622,13 +622,11 @@
 			res = tipc_send2name(tport->ref,
 					     &dest->addr.name.name,
 					     dest->addr.name.domain,
-					     m->msg_iovlen,
 					     m->msg_iov,
 					     total_len);
 		} else if (dest->addrtype == TIPC_ADDR_ID) {
 			res = tipc_send2port(tport->ref,
 					     &dest->addr.id,
-					     m->msg_iovlen,
 					     m->msg_iov,
 					     total_len);
 		} else if (dest->addrtype == TIPC_ADDR_MCAST) {
@@ -641,7 +639,6 @@
 				break;
 			res = tipc_multicast(tport->ref,
 					     &dest->addr.nameseq,
-					     m->msg_iovlen,
 					     m->msg_iov,
 					     total_len);
 		}
@@ -707,8 +704,7 @@
 			break;
 		}
 
-		res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
-				total_len);
+		res = tipc_send(tport->ref, m->msg_iov, total_len);
 		if (likely(res != -ELINKCONG))
 			break;
 		if (timeout_val <= 0L) {
@@ -1368,7 +1364,7 @@
 		return TIPC_ERR_OVERLOAD;
 
 	/* Enqueue message */
-	TIPC_SKB_CB(buf)->handle = 0;
+	TIPC_SKB_CB(buf)->handle = NULL;
 	__skb_queue_tail(&sk->sk_receive_queue, buf);
 	skb_set_owner_r(buf, sk);
 
@@ -1691,7 +1687,7 @@
 		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
 		buf = __skb_dequeue(&sk->sk_receive_queue);
 		if (buf) {
-			if (TIPC_SKB_CB(buf)->handle != 0) {
+			if (TIPC_SKB_CB(buf)->handle != NULL) {
 				kfree_skb(buf);
 				goto restart;
 			}
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
index b5fa7e4..14810ab 100644
--- a/net/vmw_vsock/Kconfig
+++ b/net/vmw_vsock/Kconfig
@@ -6,7 +6,7 @@
 	tristate "Virtual Socket protocol"
 	help
 	  Virtual Socket Protocol is a socket protocol similar to TCP/IP
-	  allowing comunication between Virtual Machines and hypervisor
+	  allowing communication between Virtual Machines and hypervisor
 	  or host.
 
 	  You should also select one or more hypervisor-specific transports
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h
index 1e743d2..5dcd9c0 100644
--- a/net/wimax/wimax-internal.h
+++ b/net/wimax/wimax-internal.h
@@ -63,11 +63,11 @@
 {
 	wimax_dev->state = state;
 }
-extern void __wimax_state_change(struct wimax_dev *, enum wimax_st);
+void __wimax_state_change(struct wimax_dev *, enum wimax_st);
 
 #ifdef CONFIG_DEBUG_FS
-extern int wimax_debugfs_add(struct wimax_dev *);
-extern void wimax_debugfs_rm(struct wimax_dev *);
+int wimax_debugfs_add(struct wimax_dev *);
+void wimax_debugfs_rm(struct wimax_dev *);
 #else
 static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
 {
@@ -76,13 +76,13 @@
 static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
 #endif
 
-extern void wimax_id_table_add(struct wimax_dev *);
-extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
-extern void wimax_id_table_rm(struct wimax_dev *);
-extern void wimax_id_table_release(void);
+void wimax_id_table_add(struct wimax_dev *);
+struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
+void wimax_id_table_rm(struct wimax_dev *);
+void wimax_id_table_release(void);
 
-extern int wimax_rfkill_add(struct wimax_dev *);
-extern void wimax_rfkill_rm(struct wimax_dev *);
+int wimax_rfkill_add(struct wimax_dev *);
+void wimax_rfkill_rm(struct wimax_dev *);
 
 extern struct genl_family wimax_gnl_family;
 extern struct genl_multicast_group wimax_gnl_mcg;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 50f6195..9b8cc87 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -328,6 +328,7 @@
 	return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2,
 					       width);
 }
+EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
 
 static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
 					u32 center_freq, u32 bandwidth,
@@ -503,7 +504,8 @@
 	case NL80211_IFTYPE_ADHOC:
 		if (wdev->current_bss) {
 			*chan = wdev->current_bss->pub.channel;
-			*chanmode = wdev->ibss_fixed
+			*chanmode = (wdev->ibss_fixed &&
+				     !wdev->ibss_dfs_possible)
 				  ? CHAN_MODE_SHARED
 				  : CHAN_MODE_EXCLUSIVE;
 			return;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3159e9c..af10e59 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -234,10 +234,10 @@
 };
 
 /* free object */
-extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
+void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
 
-extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
-			       char *newname);
+int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
+			char *newname);
 
 void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
 
@@ -382,15 +382,6 @@
 				 enum cfg80211_chan_mode chanmode,
 				 u8 radar_detect);
 
-/**
- * cfg80211_chandef_dfs_required - checks if radar detection is required
- * @wiphy: the wiphy to validate against
- * @chandef: the channel definition to check
- * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
- */
-int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
-				  const struct cfg80211_chan_def *c);
-
 void cfg80211_set_dfs_state(struct wiphy *wiphy,
 			    const struct cfg80211_chan_def *chandef,
 			    enum nl80211_dfs_state dfs_state);
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 90d0500..4541577 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -47,17 +47,19 @@
 		return 0;
 
 	if (chan->flags & IEEE80211_CHAN_DISABLED)
-		return snprintf(buf + offset,
-				buf_size - offset,
-				"%d Disabled\n",
-				chan->center_freq);
+		return scnprintf(buf + offset,
+				 buf_size - offset,
+				 "%d Disabled\n",
+				 chan->center_freq);
 
-	return snprintf(buf + offset,
-			buf_size - offset,
-			"%d HT40 %c%c\n",
-			chan->center_freq,
-			(chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-',
-			(chan->flags & IEEE80211_CHAN_NO_HT40PLUS)  ? ' ' : '+');
+	return scnprintf(buf + offset,
+			 buf_size - offset,
+			 "%d HT40 %c%c\n",
+			 chan->center_freq,
+			 (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ?
+				' ' : '-',
+			 (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ?
+				' ' : '+');
 }
 
 static ssize_t ht40allow_map_read(struct file *file,
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
index 9392f8c..42ed274 100644
--- a/net/wireless/genregdb.awk
+++ b/net/wireless/genregdb.awk
@@ -46,6 +46,12 @@
 	sub(/:/, "", country)
 	printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
 	printf "\t.alpha2 = \"%s\",\n", country
+	if ($NF ~ /DFS-ETSI/)
+		printf "\t.dfs_region = NL80211_DFS_ETSI,\n"
+	else if ($NF ~ /DFS-FCC/)
+		printf "\t.dfs_region = NL80211_DFS_FCC,\n"
+	else if ($NF ~ /DFS-JP/)
+		printf "\t.dfs_region = NL80211_DFS_JP,\n"
 	printf "\t.reg_rules = {\n"
 	active = 1
 	regdb = regdb "\t&regdom_" country ",\n"
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 403fe29..9d797df 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -83,6 +83,8 @@
 			 struct cfg80211_cached_keys *connkeys)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct ieee80211_channel *check_chan;
+	u8 radar_detect_width = 0;
 	int err;
 
 	ASSERT_WDEV_LOCK(wdev);
@@ -114,14 +116,28 @@
 	wdev->connect_keys = connkeys;
 
 	wdev->ibss_fixed = params->channel_fixed;
+	wdev->ibss_dfs_possible = params->userspace_handles_dfs;
 #ifdef CONFIG_CFG80211_WEXT
 	wdev->wext.ibss.chandef = params->chandef;
 #endif
+	check_chan = params->chandef.chan;
+	if (params->userspace_handles_dfs) {
+		/* use channel NULL to check for radar even if the current
+		 * channel is not a radar channel - it might decide to change
+		 * to DFS channel later.
+		 */
+		radar_detect_width = BIT(params->chandef.width);
+		check_chan = NULL;
+	}
 
-	err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan,
-				    params->channel_fixed
-				    ? CHAN_MODE_SHARED
-				    : CHAN_MODE_EXCLUSIVE);
+	err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+					   check_chan,
+					   (params->channel_fixed &&
+					    !radar_detect_width)
+					   ? CHAN_MODE_SHARED
+					   : CHAN_MODE_EXCLUSIVE,
+					   radar_detect_width);
+
 	if (err) {
 		wdev->connect_keys = NULL;
 		return err;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 8d49c1c..6a6b1c8 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -707,11 +707,13 @@
 			if (c->dfs_state != NL80211_DFS_UNAVAILABLE)
 				continue;
 
-			timeout = c->dfs_state_entered +
-				  IEEE80211_DFS_MIN_NOP_TIME_MS;
+			timeout = c->dfs_state_entered + msecs_to_jiffies(
+					IEEE80211_DFS_MIN_NOP_TIME_MS);
 
 			if (time_after_eq(jiffies, timeout)) {
 				c->dfs_state = NL80211_DFS_USABLE;
+				c->dfs_state_entered = jiffies;
+
 				cfg80211_chandef_create(&chandef, c,
 							NL80211_CHAN_NO_HT);
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 626dc3b..a7f4e79 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -354,6 +354,9 @@
 	[NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
 	[NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 },
 	[NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 },
+	[NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY },
+	[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY },
+	[NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG },
 };
 
 /* policy for the key attributes */
@@ -3896,9 +3899,45 @@
 	return 0;
 }
 
+static int nl80211_parse_sta_channel_info(struct genl_info *info,
+				      struct station_parameters *params)
+{
+	if (info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]) {
+		params->supported_channels =
+		     nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]);
+		params->supported_channels_len =
+		     nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]);
+		/*
+		 * Need to include at least one (first channel, number of
+		 * channels) tuple for each subband, and must have proper
+		 * tuples for the rest of the data as well.
+		 */
+		if (params->supported_channels_len < 2)
+			return -EINVAL;
+		if (params->supported_channels_len % 2)
+			return -EINVAL;
+	}
+
+	if (info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]) {
+		params->supported_oper_classes =
+		 nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]);
+		params->supported_oper_classes_len =
+		  nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]);
+		/*
+		 * The value of the Length field of the Supported Operating
+		 * Classes element is between 2 and 253.
+		 */
+		if (params->supported_oper_classes_len < 2 ||
+		    params->supported_oper_classes_len > 253)
+			return -EINVAL;
+	}
+	return 0;
+}
+
 static int nl80211_set_station_tdls(struct genl_info *info,
 				    struct station_parameters *params)
 {
+	int err;
 	/* Dummy STA entry gets updated once the peer capabilities are known */
 	if (info->attrs[NL80211_ATTR_PEER_AID])
 		params->aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]);
@@ -3909,6 +3948,10 @@
 		params->vht_capa =
 			nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
 
+	err = nl80211_parse_sta_channel_info(info, params);
+	if (err)
+		return err;
+
 	return nl80211_parse_sta_wme(info, params);
 }
 
@@ -4089,6 +4132,10 @@
 			return -EINVAL;
 	}
 
+	err = nl80211_parse_sta_channel_info(info, &params);
+	if (err)
+		return err;
+
 	err = nl80211_parse_sta_wme(info, &params);
 	if (err)
 		return err;
@@ -5591,6 +5638,9 @@
 	if (err)
 		return err;
 
+	if (netif_carrier_ok(dev))
+		return -EBUSY;
+
 	if (wdev->cac_started)
 		return -EBUSY;
 
@@ -5634,15 +5684,27 @@
 	static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
 	u8 radar_detect_width = 0;
 	int err;
+	bool need_new_beacon = false;
 
 	if (!rdev->ops->channel_switch ||
 	    !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
 		return -EOPNOTSUPP;
 
-	/* may add IBSS support later */
-	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
-	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+	switch (dev->ieee80211_ptr->iftype) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_P2P_GO:
+		need_new_beacon = true;
+
+		/* useless if AP is not running */
+		if (!wdev->beacon_interval)
+			return -EINVAL;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+	case NL80211_IFTYPE_MESH_POINT:
+		break;
+	default:
 		return -EOPNOTSUPP;
+	}
 
 	memset(&params, 0, sizeof(params));
 
@@ -5651,15 +5713,14 @@
 		return -EINVAL;
 
 	/* only important for AP, IBSS and mesh create IEs internally */
-	if (!info->attrs[NL80211_ATTR_CSA_IES])
-		return -EINVAL;
-
-	/* useless if AP is not running */
-	if (!wdev->beacon_interval)
+	if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES])
 		return -EINVAL;
 
 	params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
 
+	if (!need_new_beacon)
+		goto skip_beacons;
+
 	err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
 	if (err)
 		return err;
@@ -5699,6 +5760,7 @@
 			return -EINVAL;
 	}
 
+skip_beacons:
 	err = nl80211_parse_chandef(rdev, info, &params.chandef);
 	if (err)
 		return err;
@@ -5706,12 +5768,17 @@
 	if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
 		return -EINVAL;
 
-	err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
-	if (err < 0) {
-		return err;
-	} else if (err) {
-		radar_detect_width = BIT(params.chandef.width);
-		params.radar_required = true;
+	if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP ||
+	    dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO ||
+	    dev->ieee80211_ptr->iftype == NL80211_IFTYPE_ADHOC) {
+		err = cfg80211_chandef_dfs_required(wdev->wiphy,
+						    &params.chandef);
+		if (err < 0) {
+			return err;
+		} else if (err) {
+			radar_detect_width = BIT(params.chandef.width);
+			params.radar_required = true;
+		}
 	}
 
 	err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
@@ -6535,6 +6602,9 @@
 	ibss.control_port =
 		nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT]);
 
+	ibss.userspace_handles_dfs =
+		nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]);
+
 	err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
 	if (err)
 		kfree(connkeys);
@@ -10740,7 +10810,9 @@
 	wdev_lock(wdev);
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
-		    wdev->iftype != NL80211_IFTYPE_P2P_GO))
+		    wdev->iftype != NL80211_IFTYPE_P2P_GO &&
+		    wdev->iftype != NL80211_IFTYPE_ADHOC &&
+		    wdev->iftype != NL80211_IFTYPE_MESH_POINT))
 		goto out;
 
 	wdev->channel = chandef->chan;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index de06d5d..7da67fd 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -172,11 +172,21 @@
 			NL80211_RRF_NO_IBSS |
 			NL80211_RRF_NO_OFDM),
 		/* IEEE 802.11a, channel 36..48 */
-		REG_RULE(5180-10, 5240+10, 80, 6, 20,
+		REG_RULE(5180-10, 5240+10, 160, 6, 20,
                         NL80211_RRF_PASSIVE_SCAN |
                         NL80211_RRF_NO_IBSS),
 
-		/* NB: 5260 MHz - 5700 MHz requires DFS */
+		/* IEEE 802.11a, channel 52..64 - DFS required */
+		REG_RULE(5260-10, 5320+10, 160, 6, 20,
+			NL80211_RRF_PASSIVE_SCAN |
+			NL80211_RRF_NO_IBSS |
+			NL80211_RRF_DFS),
+
+		/* IEEE 802.11a, channel 100..144 - DFS required */
+		REG_RULE(5500-10, 5720+10, 160, 6, 20,
+			NL80211_RRF_PASSIVE_SCAN |
+			NL80211_RRF_NO_IBSS |
+			NL80211_RRF_DFS),
 
 		/* IEEE 802.11a, channel 149..165 */
 		REG_RULE(5745-10, 5825+10, 80, 6, 20,
@@ -758,24 +768,25 @@
 }
 EXPORT_SYMBOL(freq_reg_info);
 
-#ifdef CONFIG_CFG80211_REG_DEBUG
-static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
+const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
 {
 	switch (initiator) {
 	case NL80211_REGDOM_SET_BY_CORE:
-		return "Set by core";
+		return "core";
 	case NL80211_REGDOM_SET_BY_USER:
-		return "Set by user";
+		return "user";
 	case NL80211_REGDOM_SET_BY_DRIVER:
-		return "Set by driver";
+		return "driver";
 	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
-		return "Set by country IE";
+		return "country IE";
 	default:
 		WARN_ON(1);
-		return "Set by bug";
+		return "bug";
 	}
 }
+EXPORT_SYMBOL(reg_initiator_name);
 
+#ifdef CONFIG_CFG80211_REG_DEBUG
 static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
 				    const struct ieee80211_reg_rule *reg_rule)
 {
@@ -962,6 +973,13 @@
 }
 #endif
 
+static bool wiphy_strict_alpha2_regd(struct wiphy *wiphy)
+{
+	if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY &&
+	    !(wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY))
+		return true;
+	return false;
+}
 
 static bool ignore_reg_update(struct wiphy *wiphy,
 			      enum nl80211_reg_initiator initiator)
@@ -969,14 +987,17 @@
 	struct regulatory_request *lr = get_last_request();
 
 	if (!lr) {
-		REG_DBG_PRINT("Ignoring regulatory request %s since last_request is not set\n",
+		REG_DBG_PRINT("Ignoring regulatory request set by %s "
+			      "since last_request is not set\n",
 			      reg_initiator_name(initiator));
 		return true;
 	}
 
 	if (initiator == NL80211_REGDOM_SET_BY_CORE &&
 	    wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
-		REG_DBG_PRINT("Ignoring regulatory request %s since the driver uses its own custom regulatory domain\n",
+		REG_DBG_PRINT("Ignoring regulatory request set by %s "
+			      "since the driver uses its own custom "
+			      "regulatory domain\n",
 			      reg_initiator_name(initiator));
 		return true;
 	}
@@ -985,10 +1006,12 @@
 	 * wiphy->regd will be set once the device has its own
 	 * desired regulatory domain set
 	 */
-	if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
+	if (wiphy_strict_alpha2_regd(wiphy) && !wiphy->regd &&
 	    initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
 	    !is_world_regdom(lr->alpha2)) {
-		REG_DBG_PRINT("Ignoring regulatory request %s since the driver requires its own regulatory domain to be set first\n",
+		REG_DBG_PRINT("Ignoring regulatory request set by %s "
+			      "since the driver requires its own regulatory "
+			      "domain to be set first\n",
 			      reg_initiator_name(initiator));
 		return true;
 	}
@@ -1689,8 +1712,8 @@
 }
 EXPORT_SYMBOL(regulatory_hint);
 
-void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band,
-			 const u8 *country_ie, u8 country_ie_len)
+void regulatory_hint_country_ie(struct wiphy *wiphy, enum ieee80211_band band,
+				const u8 *country_ie, u8 country_ie_len)
 {
 	char alpha2[2];
 	enum environment_cap env = ENVIRON_ANY;
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index af2d5f8..9677e3c 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -58,7 +58,7 @@
 				 gfp_t gfp);
 
 /**
- * regulatory_hint_11d - hints a country IE as a regulatory domain
+ * regulatory_hint_country_ie - hints a country IE as a regulatory domain
  * @wiphy: the wireless device giving the hint (used only for reporting
  *	conflicts)
  * @band: the band on which the country IE was received on. This determines
@@ -78,7 +78,7 @@
  * not observed. For this reason if a triplet is seen with channel
  * information for a band the BSS is not present in it will be ignored.
  */
-void regulatory_hint_11d(struct wiphy *wiphy,
+void regulatory_hint_country_ie(struct wiphy *wiphy,
 			 enum ieee80211_band band,
 			 const u8 *country_ie,
 			 u8 country_ie_len);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index eeb7148..d4397eb 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -254,10 +254,10 @@
 	rdev = container_of(wk, struct cfg80211_registered_device,
 			    sched_scan_results_wk);
 
-	request = rdev->sched_scan_req;
-
 	rtnl_lock();
 
+	request = rdev->sched_scan_req;
+
 	/* we don't have sched_scan_req anymore if the scan is stopping */
 	if (request) {
 		if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 20e86a9..65f8008 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -682,8 +682,8 @@
 	 * - country_ie + 2, the start of the country ie data, and
 	 * - and country_ie[1] which is the IE length
 	 */
-	regulatory_hint_11d(wdev->wiphy, bss->channel->band,
-			    country_ie + 2, country_ie[1]);
+	regulatory_hint_country_ie(wdev->wiphy, bss->channel->band,
+				   country_ie + 2, country_ie[1]);
 	kfree(country_ie);
 }
 
diff --git a/net/wireless/sysfs.h b/net/wireless/sysfs.h
index 65acbeb..b533ed7 100644
--- a/net/wireless/sysfs.h
+++ b/net/wireless/sysfs.h
@@ -1,8 +1,8 @@
 #ifndef __WIRELESS_SYSFS_H
 #define __WIRELESS_SYSFS_H
 
-extern int wiphy_sysfs_init(void);
-extern void wiphy_sysfs_exit(void);
+int wiphy_sysfs_init(void);
+void wiphy_sysfs_exit(void);
 
 extern struct class ieee80211_class;
 
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ce090c1..935dea9 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -10,6 +10,7 @@
 #include <net/cfg80211.h>
 #include <net/ip.h>
 #include <net/dsfield.h>
+#include <linux/if_vlan.h>
 #include "core.h"
 #include "rdev-ops.h"
 
@@ -691,6 +692,7 @@
 unsigned int cfg80211_classify8021d(struct sk_buff *skb)
 {
 	unsigned int dscp;
+	unsigned char vlan_priority;
 
 	/* skb->priority values from 256->263 are magic values to
 	 * directly indicate a specific 802.1d priority.  This is used
@@ -700,6 +702,13 @@
 	if (skb->priority >= 256 && skb->priority <= 263)
 		return skb->priority - 256;
 
+	if (vlan_tx_tag_present(skb)) {
+		vlan_priority = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK)
+			>> VLAN_PRIO_SHIFT;
+		if (vlan_priority > 0)
+			return vlan_priority;
+	}
+
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
 		dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc;
@@ -1240,7 +1249,7 @@
 	enum cfg80211_chan_mode chmode;
 	int num_different_channels = 0;
 	int total = 1;
-	bool radar_required;
+	bool radar_required = false;
 	int i, j;
 
 	ASSERT_RTNL();
@@ -1255,14 +1264,20 @@
 	case NL80211_IFTYPE_MESH_POINT:
 	case NL80211_IFTYPE_P2P_GO:
 	case NL80211_IFTYPE_WDS:
-		radar_required = !!(chan &&
-				    (chan->flags & IEEE80211_CHAN_RADAR));
+		/* if the interface could potentially choose a DFS channel,
+		 * then mark DFS as required.
+		 */
+		if (!chan) {
+			if (chanmode != CHAN_MODE_UNDEFINED && radar_detect)
+				radar_required = true;
+			break;
+		}
+		radar_required = !!(chan->flags & IEEE80211_CHAN_RADAR);
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_DEVICE:
 	case NL80211_IFTYPE_MONITOR:
-		radar_required = false;
 		break;
 	case NUM_NL80211_IFTYPES:
 	case NL80211_IFTYPE_UNSPECIFIED:
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index ab4ef72..debe733 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -802,17 +802,4 @@
 }
 EXPORT_SYMBOL_GPL(xfrm_count_pfkey_enc_supported);
 
-#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
-
-void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
-{
-	if (tail != skb) {
-		skb->data_len += len;
-		skb->len += len;
-	}
-	return skb_put(tail, len);
-}
-EXPORT_SYMBOL_GPL(pskb_put);
-#endif
-
 MODULE_LICENSE("GPL");
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 716502a..0622d31 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -130,7 +130,7 @@
 	return h & hmask;
 }
 
-extern struct hlist_head *xfrm_hash_alloc(unsigned int sz);
-extern void xfrm_hash_free(struct hlist_head *n, unsigned int sz);
+struct hlist_head *xfrm_hash_alloc(unsigned int sz);
+void xfrm_hash_free(struct hlist_head *n, unsigned int sz);
 
 #endif /* _XFRM_HASH_H */
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 3be02b6..ccfdc71 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -220,8 +220,8 @@
 
 static void * __percpu *ipcomp_alloc_scratches(void)
 {
-	int i;
 	void * __percpu *scratches;
+	int i;
 
 	if (ipcomp_scratch_users++)
 		return ipcomp_scratches;
@@ -233,7 +233,9 @@
 	ipcomp_scratches = scratches;
 
 	for_each_possible_cpu(i) {
-		void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
+		void *scratch;
+
+		scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
 		if (!scratch)
 			return NULL;
 		*per_cpu_ptr(scratches, i) = scratch;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 76e1873..9a91f74 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1844,6 +1844,13 @@
 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
 	struct xfrm_policy *pol = xdst->pols[0];
 	struct xfrm_policy_queue *pq = &pol->polq;
+	const struct sk_buff *fclone = skb + 1;
+
+	if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
+		     fclone->fclone == SKB_FCLONE_CLONE)) {
+		kfree_skb(skb);
+		return 0;
+	}
 
 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
 		kfree_skb(skb);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b9c3f9e..68c2f357 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -468,7 +468,7 @@
 	}
 
 	err = __xfrm_state_delete(x);
-	if (!err && x->id.spi)
+	if (!err)
 		km_state_expired(x, 1, 0);
 
 	xfrm_audit_state_delete(x, err ? 0 : 1,
@@ -815,7 +815,7 @@
 			xfrm_state_look_at(pol, x, fl, encap_family,
 					   &best, &acquire_in_progress, &error);
 	}
-	if (best)
+	if (best || acquire_in_progress)
 		goto found;
 
 	h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
@@ -824,7 +824,7 @@
 		    x->props.reqid == tmpl->reqid &&
 		    (mark & x->mark.m) == x->mark.v &&
 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
-		    xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
+		    xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
 		    tmpl->mode == x->props.mode &&
 		    tmpl->id.proto == x->id.proto &&
 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
index cfe40ad..2fca916 100644
--- a/samples/kfifo/bytestream-example.c
+++ b/samples/kfifo/bytestream-example.c
@@ -64,7 +64,7 @@
 
 	/* put values into the fifo */
 	for (i = 0; i != 10; i++)
-		kfifo_put(&test, &i);
+		kfifo_put(&test, i);
 
 	/* show the number of used elements */
 	printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
@@ -85,7 +85,7 @@
 	kfifo_skip(&test);
 
 	/* put values into the fifo until is full */
-	for (i = 20; kfifo_put(&test, &i); i++)
+	for (i = 20; kfifo_put(&test, i); i++)
 		;
 
 	printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
index 0647379..aa243db 100644
--- a/samples/kfifo/dma-example.c
+++ b/samples/kfifo/dma-example.c
@@ -39,7 +39,7 @@
 	kfifo_in(&fifo, "test", 4);
 
 	for (i = 0; i != 9; i++)
-		kfifo_put(&fifo, &i);
+		kfifo_put(&fifo, i);
 
 	/* kick away first byte */
 	kfifo_skip(&fifo);
diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
index 6f8e79e..8dc3c2e 100644
--- a/samples/kfifo/inttype-example.c
+++ b/samples/kfifo/inttype-example.c
@@ -61,7 +61,7 @@
 
 	/* put values into the fifo */
 	for (i = 0; i != 10; i++)
-		kfifo_put(&test, &i);
+		kfifo_put(&test, i);
 
 	/* show the number of used elements */
 	printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
@@ -78,7 +78,7 @@
 	kfifo_skip(&test);
 
 	/* put values into the fifo until is full */
-	for (i = 20; kfifo_put(&test, &i); i++)
+	for (i = 20; kfifo_put(&test, i); i++)
 		;
 
 	printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 8dcdca2..69f0a14 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -79,9 +79,11 @@
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
 
+MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS)))
+
 # We can go over command line length here, so be careful.
 quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
-      cmd_modpost = $(MODLISTCMD) | sed 's/\.ko$$/.o/' | $(modpost) -s -T -
+      cmd_modpost = $(MODLISTCMD) | sed 's/\.ko$$/.o/' | $(modpost) $(MODPOST_OPT) -s -T -
 
 PHONY += __modpost
 __modpost: $(modules:.ko=.o) FORCE
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 6129020..549d0ab 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -19,9 +19,10 @@
         size, type, name = l[:-1].split()
         if type in "tTdDbBrR":
             # strip generated symbols
-            if name[:6] == "__mod_": continue
-            # function names begin with '.' on 64-bit powerpc
-            if "." in name[1:]: name = "static." + name.split(".")[0]
+            if name.startswith("__mod_"): continue
+            if name == "linux_banner": continue
+            # statics and some other optimizations adds random .NUMBER
+            name = re.sub(r'\.[0-9]+', '', name)
             sym[name] = sym.get(name, 0) + int(size, 16)
     return sym
 
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 66cad50..61090e0 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -241,8 +241,11 @@
 			__ref|
 			__rcu
 		}x;
-
-our $InitAttribute = qr{__(?:mem|cpu|dev|net_|)(?:initdata|initconst|init\b)};
+our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)};
+our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)};
+our $InitAttributeConst = qr{$InitAttributePrefix(?:initconst\b)};
+our $InitAttributeInit = qr{$InitAttributePrefix(?:init\b)};
+our $InitAttribute = qr{$InitAttributeData|$InitAttributeConst|$InitAttributeInit};
 
 # Notes to $Attribute:
 # We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
@@ -323,7 +326,8 @@
 	(?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)|
 	WARN(?:_RATELIMIT|_ONCE|)|
 	panic|
-	MODULE_[A-Z_]+
+	MODULE_[A-Z_]+|
+	seq_vprintf|seq_printf|seq_puts
 )};
 
 our $signature_tags = qr{(?xi:
@@ -442,8 +446,9 @@
 		next if ($line !~ /(?:[A-Z][a-z]|[a-z][A-Z])/);
 		if ($line =~ /^[ \t]*(?:#[ \t]*define|typedef\s+$Type)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)/) {
 			$camelcase{$1} = 1;
-		}
-	        elsif ($line =~ /^\s*$Declare\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*\(/) {
+		} elsif ($line =~ /^\s*$Declare\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*[\(\[,;]/) {
+			$camelcase{$1} = 1;
+		} elsif ($line =~ /^\s*(?:union|struct|enum)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*[;\{]/) {
 			$camelcase{$1} = 1;
 		}
 	}
@@ -1512,6 +1517,14 @@
 	return $string;
 }
 
+sub string_find_replace {
+	my ($string, $find, $replace) = @_;
+
+	$string =~ s/$find/$replace/g;
+
+	return $string;
+}
+
 sub tabify {
 	my ($leading) = @_;
 
@@ -1612,6 +1625,8 @@
 	my @setup_docs = ();
 	my $setup_docs = 0;
 
+	my $camelcase_file_seeded = 0;
+
 	sanitise_line_reset();
 	my $line;
 	foreach my $rawline (@rawlines) {
@@ -1754,11 +1769,11 @@
 		# extract the filename as it passes
 		if ($line =~ /^diff --git.*?(\S+)$/) {
 			$realfile = $1;
-			$realfile =~ s@^([^/]*)/@@;
+			$realfile =~ s@^([^/]*)/@@ if (!$file);
 			$in_commit_log = 0;
 		} elsif ($line =~ /^\+\+\+\s+(\S+)/) {
 			$realfile = $1;
-			$realfile =~ s@^([^/]*)/@@;
+			$realfile =~ s@^([^/]*)/@@ if (!$file);
 			$in_commit_log = 0;
 
 			$p1_prefix = $1;
@@ -1947,6 +1962,18 @@
 			$rpt_cleaners = 1;
 		}
 
+# Check for FSF mailing addresses.
+		if ($rawline =~ /You should have received a copy/ ||
+		    $rawline =~ /write to the Free Software/ ||
+		    $rawline =~ /59 Temple Place/ ||
+		    $rawline =~ /51 Franklin Street/) {
+			my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+			my $msg_type = \&ERROR;
+			$msg_type = \&CHK if ($file);
+			&{$msg_type}("FSF_MAILING_ADDRESS",
+				"Do not include the paragraph about writing to the Free Software Foundation's mailing address from the sample GPL notice. The FSF has changed addresses in the past, and may do so again. Linux already includes a copy of the GPL.\n" . $herevet)
+		}
+
 # check for Kconfig help text having a real description
 # Only applies when adding the entry originally, after that we do not have
 # sufficient context to determine whether it is indeed long enough.
@@ -2838,7 +2865,7 @@
 				\+=|-=|\*=|\/=|%=|\^=|\|=|&=|
 				=>|->|<<|>>|<|>|=|!|~|
 				&&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%|
-				\?|:
+				\?:|\?|:
 			}x;
 			my @elements = split(/($ops|;)/, $opline);
 
@@ -3061,15 +3088,13 @@
 					    	$ok = 1;
 					}
 
-					# Ignore ?:
-					if (($opv eq ':O' && $ca =~ /\?$/) ||
-					    ($op eq '?' && $cc =~ /^:/)) {
-					    	$ok = 1;
-					}
-
+					# messages are ERROR, but ?: are CHK
 					if ($ok == 0) {
-						if (ERROR("SPACING",
-							  "spaces required around that '$op' $at\n" . $hereptr)) {
+						my $msg_type = \&ERROR;
+						$msg_type = \&CHK if (($op eq '?:' || $op eq '?' || $op eq ':') && $ctx =~ /VxV/);
+
+						if (&{$msg_type}("SPACING",
+								 "spaces required around that '$op' $at\n" . $hereptr)) {
 							$good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
 							if (defined $fix_elements[$n + 2]) {
 								$fix_elements[$n + 2] =~ s/^\s+//;
@@ -3208,21 +3233,10 @@
 		}
 
 # Return is not a function.
-		if (defined($stat) && $stat =~ /^.\s*return(\s*)(\(.*);/s) {
+		if (defined($stat) && $stat =~ /^.\s*return(\s*)\(/s) {
 			my $spacing = $1;
-			my $value = $2;
-
-			# Flatten any parentheses
-			$value =~ s/\(/ \(/g;
-			$value =~ s/\)/\) /g;
-			while ($value =~ s/\[[^\[\]]*\]/1/ ||
-			       $value !~ /(?:$Ident|-?$Constant)\s*
-					     $Compare\s*
-					     (?:$Ident|-?$Constant)/x &&
-			       $value =~ s/\([^\(\)]*\)/1/) {
-			}
-#print "value<$value>\n";
-			if ($value =~ /^\s*(?:$Ident|-?$Constant)\s*$/) {
+			if ($^V && $^V ge 5.10.0 &&
+			    $stat =~ /^.\s*return\s*$balanced_parens\s*;\s*$/) {
 				ERROR("RETURN_PARENTHESES",
 				      "return is not a function, parentheses are not required\n" . $herecurr);
 
@@ -3231,6 +3245,7 @@
 				      "space required before the open parenthesis '('\n" . $herecurr);
 			}
 		}
+
 # Return of what appears to be an errno should normally be -'ve
 		if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) {
 			my $name = $1;
@@ -3396,7 +3411,13 @@
 				while ($var =~ m{($Ident)}g) {
 					my $word = $1;
 					next if ($word !~ /[A-Z][a-z]|[a-z][A-Z]/);
-					seed_camelcase_includes() if ($check);
+					if ($check) {
+						seed_camelcase_includes();
+						if (!$file && !$camelcase_file_seeded) {
+							seed_camelcase_file($realfile);
+							$camelcase_file_seeded = 1;
+						}
+					}
 					if (!defined $camelcase{$word}) {
 						$camelcase{$word} = 1;
 						CHK("CAMELCASE",
@@ -3725,14 +3746,6 @@
 			}
 		}
 
-sub string_find_replace {
-	my ($string, $find, $replace) = @_;
-
-	$string =~ s/$find/$replace/g;
-
-	return $string;
-}
-
 # check for bad placement of section $InitAttribute (e.g.: __initdata)
 		if ($line =~ /(\b$InitAttribute\b)/) {
 			my $attr = $1;
@@ -3751,6 +3764,35 @@
 			}
 		}
 
+# check for $InitAttributeData (ie: __initdata) with const
+		if ($line =~ /\bconst\b/ && $line =~ /($InitAttributeData)/) {
+			my $attr = $1;
+			$attr =~ /($InitAttributePrefix)(.*)/;
+			my $attr_prefix = $1;
+			my $attr_type = $2;
+			if (ERROR("INIT_ATTRIBUTE",
+				  "Use of const init definition must use ${attr_prefix}initconst\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$linenr - 1] =~
+				    s/$InitAttributeData/${attr_prefix}initconst/;
+			}
+		}
+
+# check for $InitAttributeConst (ie: __initconst) without const
+		if ($line !~ /\bconst\b/ && $line =~ /($InitAttributeConst)/) {
+			my $attr = $1;
+			if (ERROR("INIT_ATTRIBUTE",
+				  "Use of $attr requires a separate use of const\n" . $herecurr) &&
+			    $fix) {
+				my $lead = $fixed[$linenr - 1] =~
+				    /(^\+\s*(?:static\s+))/;
+				$lead = rtrim($1);
+				$lead = "$lead " if ($lead !~ /^\+$/);
+				$lead = "${lead}const ";
+				$fixed[$linenr - 1] =~ s/(^\+\s*(?:static\s+))/$lead/;
+			}
+		}
+
 # prefer usleep_range over udelay
 		if ($line =~ /\budelay\s*\(\s*(\d+)\s*\)/) {
 			# ignore udelay's < 10, however
@@ -3810,8 +3852,8 @@
 # check for memory barriers without a comment.
 		if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
 			if (!ctx_has_comment($first_line, $linenr)) {
-				CHK("MEMORY_BARRIER",
-				    "memory barrier without comment\n" . $herecurr);
+				WARN("MEMORY_BARRIER",
+				     "memory barrier without comment\n" . $herecurr);
 			}
 		}
 # check of hardware specific defines
@@ -3835,7 +3877,8 @@
 		}
 
 # Check for __inline__ and __inline, prefer inline
-		if ($line =~ /\b(__inline__|__inline)\b/) {
+		if ($realfile !~ m@\binclude/uapi/@ &&
+		    $line =~ /\b(__inline__|__inline)\b/) {
 			if (WARN("INLINE",
 				 "plain inline is preferred over $1\n" . $herecurr) &&
 			    $fix) {
@@ -3845,19 +3888,22 @@
 		}
 
 # Check for __attribute__ packed, prefer __packed
-		if ($line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) {
+		if ($realfile !~ m@\binclude/uapi/@ &&
+		    $line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) {
 			WARN("PREFER_PACKED",
 			     "__packed is preferred over __attribute__((packed))\n" . $herecurr);
 		}
 
 # Check for __attribute__ aligned, prefer __aligned
-		if ($line =~ /\b__attribute__\s*\(\s*\(.*aligned/) {
+		if ($realfile !~ m@\binclude/uapi/@ &&
+		    $line =~ /\b__attribute__\s*\(\s*\(.*aligned/) {
 			WARN("PREFER_ALIGNED",
 			     "__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
 		}
 
 # Check for __attribute__ format(printf, prefer __printf
-		if ($line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
+		if ($realfile !~ m@\binclude/uapi/@ &&
+		    $line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
 			if (WARN("PREFER_PRINTF",
 				 "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr) &&
 			    $fix) {
@@ -3867,7 +3913,8 @@
 		}
 
 # Check for __attribute__ format(scanf, prefer __scanf
-		if ($line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\b/) {
+		if ($realfile !~ m@\binclude/uapi/@ &&
+		    $line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\b/) {
 			if (WARN("PREFER_SCANF",
 				 "__scanf(string-index, first-to-check) is preferred over __attribute__((format(scanf, string-index, first-to-check)))\n" . $herecurr) &&
 			    $fix) {
@@ -3903,9 +3950,9 @@
 		}
 
 # check for seq_printf uses that could be seq_puts
-		if ($line =~ /\bseq_printf\s*\(/) {
+		if ($sline =~ /\bseq_printf\s*\(.*"\s*\)\s*;\s*$/) {
 			my $fmt = get_quoted_string($line, $rawline);
-			if ($fmt !~ /[^\\]\%/) {
+			if ($fmt ne "" && $fmt !~ /[^\\]\%/) {
 				if (WARN("PREFER_SEQ_PUTS",
 					 "Prefer seq_puts to seq_printf\n" . $herecurr) &&
 				    $fix) {
@@ -3972,6 +4019,23 @@
 			}
 		}
 
+# check for naked sscanf
+		if ($^V && $^V ge 5.10.0 &&
+		    defined $stat &&
+		    $stat =~ /\bsscanf\b/ &&
+		    ($stat !~ /$Ident\s*=\s*sscanf\s*$balanced_parens/ &&
+		     $stat !~ /\bsscanf\s*$balanced_parens\s*(?:$Compare)/ &&
+		     $stat !~ /(?:$Compare)\s*\bsscanf\s*$balanced_parens/)) {
+			my $lc = $stat =~ tr@\n@@;
+			$lc = $lc + $linenr;
+			my $stat_real = raw_line($linenr, 0);
+		        for (my $count = $linenr + 1; $count <= $lc; $count++) {
+				$stat_real = $stat_real . "\n" . raw_line($count, 0);
+			}
+			WARN("NAKED_SSCANF",
+			     "unchecked sscanf return value\n" . "$here\n$stat_real\n");
+		}
+
 # check for new externs in .h files.
 		if ($realfile =~ /\.h$/ &&
 		    $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
@@ -4190,6 +4254,12 @@
 			     "usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr);
 		}
 
+# Use of __ARCH_HAS_<FOO> or ARCH_HAVE_<BAR> is wrong.
+		if ($line =~ /\+\s*#\s*define\s+((?:__)?ARCH_(?:HAS|HAVE)\w*)\b/) {
+			ERROR("DEFINE_ARCH_HAS",
+			      "#define of '$1' is wrong - use Kconfig variables or standard guards instead\n" . $herecurr);
+		}
+
 # check for %L{u,d,i} in strings
 		my $string;
 		while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
diff --git a/scripts/coccinelle/api/devm_request_and_ioremap.cocci b/scripts/coccinelle/api/devm_request_and_ioremap.cocci
deleted file mode 100644
index 562ec88..0000000
--- a/scripts/coccinelle/api/devm_request_and_ioremap.cocci
+++ /dev/null
@@ -1,105 +0,0 @@
-/// Reimplement a call to devm_request_mem_region followed by a call to ioremap
-/// or ioremap_nocache by a call to devm_request_and_ioremap.
-/// Devm_request_and_ioremap was introduced in
-/// 72f8c0bfa0de64c68ee59f40eb9b2683bffffbb0.  It makes the code much more
-/// concise.
-///
-///
-// Confidence: High
-// Copyright: (C) 2011 Julia Lawall, INRIA/LIP6.  GPLv2.
-// Copyright: (C) 2011 Gilles Muller, INRIA/LiP6.  GPLv2.
-// URL: http://coccinelle.lip6.fr/
-// Comments:
-// Options: --no-includes --include-headers
-
-virtual patch
-virtual org
-virtual report
-virtual context
-
-@nm@
-expression myname;
-identifier i;
-@@
-
-struct platform_driver i = { .driver = { .name = myname } };
-
-@depends on patch@
-expression dev,res,size;
-@@
-
--if (!devm_request_mem_region(dev, res->start, size,
--                              \(res->name\|dev_name(dev)\))) {
--   ...
--   return ...;
--}
-... when != res->start
-(
--devm_ioremap(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-|
--devm_ioremap_nocache(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-)
-... when any
-    when != res->start
-
-// this rule is separate from the previous one, because a single file can
-// have multiple values of myname
-@depends on patch@
-expression dev,res,size;
-expression nm.myname;
-@@
-
--if (!devm_request_mem_region(dev, res->start, size,myname)) {
--   ...
--   return ...;
--}
-... when != res->start
-(
--devm_ioremap(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-|
--devm_ioremap_nocache(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-)
-... when any
-    when != res->start
-
-
-@pb depends on org || report || context@
-expression dev,res,size;
-expression nm.myname;
-position p1,p2;
-@@
-
-*if
-  (!devm_request_mem_region@p1(dev, res->start, size,
-                              \(res->name\|dev_name(dev)\|myname\))) {
-   ...
-   return ...;
-}
-... when != res->start
-(
-*devm_ioremap@p2(dev,res->start,size)
-|
-*devm_ioremap_nocache@p2(dev,res->start,size)
-)
-... when any
-    when != res->start
-
-@script:python depends on org@
-p1 << pb.p1;
-p2 << pb.p2;
-@@
-
-cocci.print_main("INFO: replace by devm_request_and_ioremap",p1)
-cocci.print_secs("",p2)
-
-@script:python depends on report@
-p1 << pb.p1;
-p2 << pb.p2;
-@@
-
-msg = "INFO: devm_request_mem_region followed by ioremap on line %s can be replaced by devm_request_and_ioremap" % (p2[0].line)
-coccilib.report.print_report(p1[0],msg)
diff --git a/scripts/docproc.c b/scripts/docproc.c
index 4cfdc17..2b69eaf 100644
--- a/scripts/docproc.c
+++ b/scripts/docproc.c
@@ -72,6 +72,7 @@
 #define FUNCTION      "-function"
 #define NOFUNCTION    "-nofunction"
 #define NODOCSECTIONS "-no-doc-sections"
+#define SHOWNOTFOUND  "-show-not-found"
 
 static char *srctree, *kernsrctree;
 
@@ -294,6 +295,7 @@
         int startofsym = 1;
 	vec[idx++] = KERNELDOC;
 	vec[idx++] = DOCBOOK;
+	vec[idx++] = SHOWNOTFOUND;
 
         /* Split line up in individual parameters preceded by FUNCTION */
         for (i=0; line[i]; i++) {
@@ -325,7 +327,8 @@
  */
 static void docsect(char *filename, char *line)
 {
-	char *vec[6]; /* kerneldoc -docbook -function "section" file NULL */
+	/* kerneldoc -docbook -show-not-found -function "section" file NULL */
+	char *vec[7];
 	char *s;
 
 	for (s = line; *s; s++)
@@ -341,10 +344,11 @@
 
 	vec[0] = KERNELDOC;
 	vec[1] = DOCBOOK;
-	vec[2] = FUNCTION;
-	vec[3] = line;
-	vec[4] = filename;
-	vec[5] = NULL;
+	vec[2] = SHOWNOTFOUND;
+	vec[3] = FUNCTION;
+	vec[4] = line;
+	vec[5] = filename;
+	vec[6] = NULL;
 	exec_kernel_doc(vec);
 }
 
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index b482f16..ef47409 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -240,12 +240,24 @@
 		output_file="$1"
 		cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
 		output=${cpio_list}
-		echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f"
-		echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
-		echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
-		echo "$output_file" | grep -q "\.xz$" && \
-				compr="xz --check=crc32 --lzma2=dict=1MiB"
-		echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
+		echo "$output_file" | grep -q "\.gz$" \
+                && [ -x "`which gzip 2> /dev/null`" ] \
+                && compr="gzip -n -9 -f"
+		echo "$output_file" | grep -q "\.bz2$" \
+                && [ -x "`which bzip2 2> /dev/null`" ] \
+                && compr="bzip2 -9 -f"
+		echo "$output_file" | grep -q "\.lzma$" \
+                && [ -x "`which lzma 2> /dev/null`" ] \
+                && compr="lzma -9 -f"
+		echo "$output_file" | grep -q "\.xz$" \
+                && [ -x "`which xz 2> /dev/null`" ] \
+                && compr="xz --check=crc32 --lzma2=dict=1MiB"
+		echo "$output_file" | grep -q "\.lzo$" \
+                && [ -x "`which lzop 2> /dev/null`" ] \
+                && compr="lzop -9 -f"
+		echo "$output_file" | grep -q "\.lz4$" \
+                && [ -x "`which lz4 2> /dev/null`" ] \
+                && compr="lz4 -9 -f"
 		echo "$output_file" | grep -q "\.cpio$" && compr="cat"
 		shift
 		;;
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 9a11f9f..10085de 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -115,6 +115,12 @@
 			fprintf(stderr, "Read error or end of file.\n");
 		return -1;
 	}
+	if (strlen(str) > KSYM_NAME_LEN) {
+		fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n"
+                                "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
+			str, strlen(str), KSYM_NAME_LEN);
+		return -1;
+	}
 
 	sym = str;
 	/* skip prefix char */
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index df198a5..ba663e1 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -93,7 +93,7 @@
 #define SYMBOL_CHOICEVAL  0x0020  /* used as a value in a choice block */
 #define SYMBOL_VALID      0x0080  /* set when symbol.curr is calculated */
 #define SYMBOL_OPTIONAL   0x0100  /* choice is optional - values can be 'n' */
-#define SYMBOL_WRITE      0x0200  /* ? */
+#define SYMBOL_WRITE      0x0200  /* write symbol to file (KCONFIG_CONFIG) */
 #define SYMBOL_CHANGED    0x0400  /* ? */
 #define SYMBOL_AUTO       0x1000  /* value from environment variable */
 #define SYMBOL_CHECKED    0x2000  /* used during dependency checking */
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 2c39631..59184bb 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -25,7 +25,7 @@
 static const char mconf_readme[] = N_(
 "Overview\n"
 "--------\n"
-"This interface let you select features and parameters for the build.\n"
+"This interface lets you select features and parameters for the build.\n"
 "Features can either be built-in, modularized, or ignored. Parameters\n"
 "must be entered in as decimal or hexadecimal numbers or text.\n"
 "\n"
@@ -39,15 +39,15 @@
 "\n"
 "To change any of these features, highlight it with the cursor\n"
 "keys and press <Y> to build it in, <M> to make it a module or\n"
-"<N> to removed it.  You may also press the <Space Bar> to cycle\n"
-"through the available options (ie. Y->N->M->Y).\n"
+"<N> to remove it.  You may also press the <Space Bar> to cycle\n"
+"through the available options (i.e. Y->N->M->Y).\n"
 "\n"
 "Some additional keyboard hints:\n"
 "\n"
 "Menus\n"
 "----------\n"
-"o  Use the Up/Down arrow keys (cursor keys) to highlight the item\n"
-"   you wish to change or submenu wish to select and press <Enter>.\n"
+"o  Use the Up/Down arrow keys (cursor keys) to highlight the item you\n"
+"   wish to change or the submenu you wish to select and press <Enter>.\n"
 "   Submenus are designated by \"--->\", empty ones by \"----\".\n"
 "\n"
 "   Shortcut: Press the option's highlighted letter (hotkey).\n"
@@ -65,7 +65,7 @@
 "             there is a delayed response which you may find annoying.\n"
 "\n"
 "   Also, the <TAB> and cursor keys will cycle between <Select>,\n"
-"   <Exit> and <Help>.\n"
+"   <Exit>, <Help>, <Save>, and <Load>.\n"
 "\n"
 "o  To get help with an item, use the cursor keys to highlight <Help>\n"
 "   and press <ENTER>.\n"
@@ -105,7 +105,7 @@
 "Text Box    (Help Window)\n"
 "--------\n"
 "o  Use the cursor keys to scroll up/down/left/right.  The VI editor\n"
-"   keys h,j,k,l function here as do <u>, <d>, <SPACE BAR> and <B> for \n"
+"   keys h,j,k,l function here as do <u>, <d>, <SPACE BAR> and <B> for\n"
 "   those who are familiar with less and lynx.\n"
 "\n"
 "o  Press <E>, <X>, <q>, <Enter> or <Esc><Esc> to exit.\n"
@@ -117,23 +117,21 @@
 "those who, for various reasons, find it necessary to switch\n"
 "between different configurations.\n"
 "\n"
-"At the end of the main menu you will find two options.  One is\n"
-"for saving the current configuration to a file of your choosing.\n"
-"The other option is for loading a previously saved alternate\n"
-"configuration.\n"
+"The <Save> button will let you save the current configuration to\n"
+"a file of your choosing.  Use the <Load> button to load a previously\n"
+"saved alternate configuration.\n"
 "\n"
-"Even if you don't use alternate configuration files, but you\n"
-"find during a Menuconfig session that you have completely messed\n"
-"up your settings, you may use the \"Load Alternate...\" option to\n"
-"restore your previously saved settings from \".config\" without\n"
-"restarting Menuconfig.\n"
+"Even if you don't use alternate configuration files, but you find\n"
+"during a Menuconfig session that you have completely messed up your\n"
+"settings, you may use the <Load> button to restore your previously\n"
+"saved settings from \".config\" without restarting Menuconfig.\n"
 "\n"
 "Other information\n"
 "-----------------\n"
-"If you use Menuconfig in an XTERM window make sure you have your\n"
-"$TERM variable set to point to a xterm definition which supports color.\n"
-"Otherwise, Menuconfig will look rather bad.  Menuconfig will not\n"
-"display correctly in a RXVT window because rxvt displays only one\n"
+"If you use Menuconfig in an XTERM window, make sure you have your\n"
+"$TERM variable set to point to an xterm definition which supports\n"
+"color.  Otherwise, Menuconfig will look rather bad.  Menuconfig will\n"
+"not display correctly in an RXVT window because rxvt displays only one\n"
 "intensity of color, bright.\n"
 "\n"
 "Menuconfig will display larger menus on screens or xterms which are\n"
@@ -148,8 +146,8 @@
 "\n"
 "Optional personality available\n"
 "------------------------------\n"
-"If you prefer to have all of the options listed in a single menu, rather\n"
-"than the default multimenu hierarchy, run the menuconfig with\n"
+"If you prefer to have all of the options listed in a single menu,\n"
+"rather than the default multimenu hierarchy, run the menuconfig with\n"
 "MENUCONFIG_MODE environment variable set to single_menu. Example:\n"
 "\n"
 "make MENUCONFIG_MODE=single_menu menuconfig\n"
@@ -172,7 +170,7 @@
 " mono       => selects colors suitable for monochrome displays\n"
 " blackbg    => selects a color scheme with black background\n"
 " classic    => theme with blue background. The classic look\n"
-" bluetitle  => a LCD friendly version of classic. (default)\n"
+" bluetitle  => an LCD friendly version of classic. (default)\n"
 "\n"),
 menu_instructions[] = N_(
 	"Arrow keys navigate the menu.  "
@@ -238,24 +236,24 @@
 	"Symbol: FOO [=m]\n"
 	"Type  : tristate\n"
 	"Prompt: Foo bus is used to drive the bar HW\n"
-	"  Defined at drivers/pci/Kconfig:47\n"
-	"  Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
 	"  Location:\n"
 	"    -> Bus options (PCI, PCMCIA, EISA, ISA)\n"
 	"      -> PCI support (PCI [=y])\n"
 	"(1)     -> PCI access mode (<choice> [=y])\n"
+	"  Defined at drivers/pci/Kconfig:47\n"
+	"  Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
 	"  Selects: LIBCRC32\n"
-	"  Selected by: BAR\n"
+	"  Selected by: BAR [=n]\n"
 	"-----------------------------------------------------------------\n"
 	"o The line 'Type:' shows the type of the configuration option for\n"
 	"  this symbol (boolean, tristate, string, ...)\n"
 	"o The line 'Prompt:' shows the text used in the menu structure for\n"
 	"  this symbol\n"
-	"o The 'Defined at' line tell at what file / line number the symbol\n"
+	"o The 'Defined at' line tells at what file / line number the symbol\n"
 	"  is defined\n"
-	"o The 'Depends on:' line tell what symbols needs to be defined for\n"
+	"o The 'Depends on:' line tells what symbols need to be defined for\n"
 	"  this symbol to be visible in the menu (selectable)\n"
-	"o The 'Location:' lines tell where in the menu structure this symbol\n"
+	"o The 'Location:' lines tells where in the menu structure this symbol\n"
 	"  is located\n"
 	"    A location followed by a [=y] indicates that this is a\n"
 	"    selectable menu item - and the current value is displayed inside\n"
@@ -263,9 +261,9 @@
 	"    Press the key in the (#) prefix to jump directly to that\n"
 	"    location. You will be returned to the current search results\n"
 	"    after exiting this new menu.\n"
-	"o The 'Selects:' line tell what symbol will be automatically\n"
+	"o The 'Selects:' line tells what symbols will be automatically\n"
 	"  selected if this symbol is selected (y or m)\n"
-	"o The 'Selected by' line tell what symbol has selected this symbol\n"
+	"o The 'Selected by' line tells what symbol has selected this symbol\n"
 	"\n"
 	"Only relevant lines are shown.\n"
 	"\n\n"
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index c1d5320..db1512a 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -119,9 +119,10 @@
 		sym->type = type;
 		return;
 	}
-	menu_warn(current_entry, "type of '%s' redefined from '%s' to '%s'",
-	    sym->name ? sym->name : "<choice>",
-	    sym_type_name(sym->type), sym_type_name(type));
+	menu_warn(current_entry,
+		"ignoring type redefinition of '%s' from '%s' to '%s'",
+		sym->name ? sym->name : "<choice>",
+		sym_type_name(sym->type), sym_type_name(type));
 }
 
 struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep)
@@ -583,7 +584,7 @@
 		for (j = 4; --i >= 0; j += 2) {
 			menu = submenu[i];
 			if (head && location && menu == location)
-				jump->offset = r->len - 1;
+				jump->offset = strlen(r->s);
 			str_printf(r, "%*c-> %s", j, ' ',
 				   _(menu_get_prompt(menu)));
 			if (menu->sym) {
@@ -597,7 +598,7 @@
 }
 
 /*
- * get peoperty of type P_SYMBOL
+ * get property of type P_SYMBOL
  */
 static struct property *get_symbol_prop(struct symbol *sym)
 {
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 1500c38..9d3b04b 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -69,6 +69,11 @@
 	return QString::fromLocal8Bit(gettext(str.latin1()));
 }
 
+ConfigSettings::ConfigSettings()
+	: QSettings("kernel.org", "qconf")
+{
+}
+
 /**
  * Reads a list of integer values from the application settings.
  */
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index 3715b3e..bde0c6b 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -32,6 +32,7 @@
 
 class ConfigSettings : public QSettings {
 public:
+	ConfigSettings();
 	Q3ValueList<int> readSizes(const QString& key, bool *ok);
 	bool writeSizes(const QString& key, const Q3ValueList<int>& value);
 };
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index c9a6775..7caabdb 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -1047,7 +1047,7 @@
  * When we check for recursive dependencies we use a stack to save
  * current state so we can print out relevant info to user.
  * The entries are located on the call stack so no need to free memory.
- * Note inser() remove() must always match to properly clear the stack.
+ * Note insert() remove() must always match to properly clear the stack.
  */
 static struct dep_stack {
 	struct dep_stack *prev, *next;
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 6555a47..1a9f53e 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -68,7 +68,6 @@
 }
 %}
 
-ws	[ \n\t]
 n	[A-Za-z0-9_]
 
 %%
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 4305b2f..da058da 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -257,6 +257,7 @@
 		'July', 'August', 'September', 'October',
 		'November', 'December')[(localtime)[4]] .
   " " . ((localtime)[5]+1900);
+my $show_not_found = 0;
 
 # Essentially these are globals.
 # They probably want to be tidied up, made more localised or something.
@@ -369,6 +370,8 @@
 	usage();
     } elsif ($cmd eq '-no-doc-sections') {
 	    $no_doc_sections = 1;
+    } elsif ($cmd eq '-show-not-found') {
+	$show_not_found = 1;
     }
 }
 
@@ -2125,8 +2128,7 @@
 
 	create_parameterlist($args, ',', $file);
     } else {
-	print STDERR "Error(${file}:$.): cannot understand prototype: '$prototype'\n";
-	++$errors;
+	print STDERR "Warning(${file}:$.): cannot understand function prototype: '$prototype'\n";
 	return;
     }
 
@@ -2536,6 +2538,9 @@
     }
     if ($initial_section_counter == $section_counter) {
 	print STDERR "Warning(${file}): no structured comments found\n";
+	if (($function_only == 1) && ($show_not_found == 1)) {
+	    print STDERR "    Was looking for '$_'.\n" for keys %function_table;
+	}
 	if ($output_mode eq "xml") {
 	    # The template wants at least one RefEntry here; make one.
 	    print "<refentry>\n";
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 8247979..1785576 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -17,6 +17,7 @@
 #include <string.h>
 #include <limits.h>
 #include <stdbool.h>
+#include <errno.h>
 #include "modpost.h"
 #include "../../include/generated/autoconf.h"
 #include "../../include/linux/license.h"
@@ -37,6 +38,8 @@
 /* How a symbol is exported */
 static int sec_mismatch_count = 0;
 static int sec_mismatch_verbose = 1;
+/* ignore missing files */
+static int ignore_missing_files;
 
 enum export {
 	export_plain,      export_unused,     export_gpl,
@@ -161,7 +164,7 @@
 	unsigned int vmlinux:1;    /* 1 if symbol is defined in vmlinux */
 	unsigned int kernel:1;     /* 1 if symbol is from kernel
 				    *  (only for external modules) **/
-	unsigned int preloaded:1;  /* 1 if symbol from Module.symvers */
+	unsigned int preloaded:1;  /* 1 if symbol from Module.symvers, or crc */
 	enum export  export;       /* Type of export */
 	char name[0];
 };
@@ -329,8 +332,11 @@
 {
 	struct symbol *s = find_symbol(name);
 
-	if (!s)
+	if (!s) {
 		s = new_symbol(name, mod, export);
+		/* Don't complain when we find it later. */
+		s->preloaded = 1;
+	}
 	s->crc = crc;
 	s->crc_valid = 1;
 }
@@ -407,6 +413,11 @@
 
 	hdr = grab_file(filename, &info->size);
 	if (!hdr) {
+		if (ignore_missing_files) {
+			fprintf(stderr, "%s: %s (ignored)\n", filename,
+				strerror(errno));
+			return 0;
+		}
 		perror(filename);
 		exit(1);
 	}
@@ -599,18 +610,17 @@
 	else
 		export = export_from_sec(info, get_secindex(info, sym));
 
+	/* CRC'd symbol */
+	if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
+		crc = (unsigned int) sym->st_value;
+		sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
+				export);
+	}
+
 	switch (sym->st_shndx) {
 	case SHN_COMMON:
 		warn("\"%s\" [%s] is COMMON symbol\n", symname, mod->name);
 		break;
-	case SHN_ABS:
-		/* CRC'd symbol */
-		if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
-			crc = (unsigned int) sym->st_value;
-			sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
-					export);
-		}
-		break;
 	case SHN_UNDEF:
 		/* undefined symbol */
 		if (ELF_ST_BIND(sym->st_info) != STB_GLOBAL &&
@@ -1853,7 +1863,7 @@
 	buf_printf(b, "\n");
 	buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
 	buf_printf(b, "\n");
-	buf_printf(b, "struct module __this_module\n");
+	buf_printf(b, "__visible struct module __this_module\n");
 	buf_printf(b, "__attribute__((section(\".gnu.linkonce.this_module\"))) = {\n");
 	buf_printf(b, "\t.name = KBUILD_MODNAME,\n");
 	if (mod->has_init)
@@ -2119,7 +2129,7 @@
 	struct ext_sym_list *extsym_iter;
 	struct ext_sym_list *extsym_start = NULL;
 
-	while ((opt = getopt(argc, argv, "i:I:e:msST:o:awM:K:")) != -1) {
+	while ((opt = getopt(argc, argv, "i:I:e:mnsST:o:awM:K:")) != -1) {
 		switch (opt) {
 		case 'i':
 			kernel_read = optarg;
@@ -2139,6 +2149,9 @@
 		case 'm':
 			modversions = 1;
 			break;
+		case 'n':
+			ignore_missing_files = 1;
+			break;
 		case 'o':
 			dump_write = optarg;
 			break;
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index 9dfcd6d..deb2994 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -416,7 +416,7 @@
 		basename = strrchr(modname, '/') + 1;
 	else
 		basename = modname;
-	sprintf(filelist, "%s/%.*s.mod", modverdir,
+	snprintf(filelist, sizeof(filelist), "%s/%.*s.mod", modverdir,
 		(int) strlen(basename) - 2, basename);
 
 	file = grab_file(filelist, &len);
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index a674fd5..d0da663 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -214,13 +214,13 @@
 $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)";
 $section_regex = "Disassembly of section\\s+(\\S+):";
 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
-$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
+$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$";
 $section_type = '@progbits';
 $mcount_adjust = 0;
 $type = ".long";
 
 if ($arch eq "x86_64") {
-    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$";
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)([+-]0x[0-9a-zA-Z]+)?\$";
     $type = ".quad";
     $alignment = 8;
     $mcount_adjust = -1;
diff --git a/scripts/show_delta b/scripts/show_delta
index 17df305..e25732b 100755
--- a/scripts/show_delta
+++ b/scripts/show_delta
@@ -13,7 +13,7 @@
 import string
 
 def usage():
-	print """usage: show_delta [<options>] <filename>
+	print ("""usage: show_delta [<options>] <filename>
 
 This program parses the output from a set of printk message lines which
 have time data prefixed because the CONFIG_PRINTK_TIME option is set, or
@@ -35,7 +35,7 @@
 
 will show times relative to the line in the kernel output
 starting with "NET4".
-"""
+""")
 	sys.exit(1)
 
 # returns a tuple containing the seconds and text for each message line
@@ -94,11 +94,11 @@
 	try:
 		lines = open(filein,"r").readlines()
 	except:
-		print "Problem opening file: %s" % filein
+		print ("Problem opening file: %s" % filein)
 		sys.exit(1)
 
 	if base_str:
-		print 'base= "%s"' % base_str
+		print ('base= "%s"' % base_str)
 		# assume a numeric base.  If that fails, try searching
 		# for a matching line.
 		try:
@@ -117,13 +117,13 @@
 					# stop at first match
 					break
 			if not found:
-				print 'Couldn\'t find line matching base pattern "%s"' % base_str
+				print ('Couldn\'t find line matching base pattern "%s"' % base_str)
 				sys.exit(1)
 	else:
 		base_time = 0.0
 
 	for line in lines:
-		print convert_line(line, base_time),
+		print (convert_line(line, base_time),)
 
 main()
 
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 7c2310c..5f7a8b6 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -152,6 +152,30 @@
 
 typedef void (*table_sort_t)(char *, int);
 
+/*
+ * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
+ * the way to -256..-1, to avoid conflicting with real section
+ * indices.
+ */
+#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
+
+static inline int is_shndx_special(unsigned int i)
+{
+	return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
+}
+
+/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
+static inline unsigned int get_secindex(unsigned int shndx,
+					unsigned int sym_offs,
+					const Elf32_Word *symtab_shndx_start)
+{
+	if (is_shndx_special(shndx))
+		return SPECIAL(shndx);
+	if (shndx != SHN_XINDEX)
+		return shndx;
+	return r(&symtab_shndx_start[sym_offs]);
+}
+
 /* 32 bit and 64 bit are very similar */
 #include "sortextable.h"
 #define SORTEXTABLE_64
diff --git a/scripts/sortextable.h b/scripts/sortextable.h
index f5eb43d..8fac3fd 100644
--- a/scripts/sortextable.h
+++ b/scripts/sortextable.h
@@ -98,6 +98,8 @@
 	Elf_Shdr *symtab_sec = NULL;
 	Elf_Shdr *extab_sec = NULL;
 	Elf_Sym *sym;
+	const Elf_Sym *symtab;
+	Elf32_Word *symtab_shndx_start = NULL;
 	Elf_Sym *sort_needed_sym;
 	Elf_Shdr *sort_needed_sec;
 	Elf_Rel *relocs = NULL;
@@ -109,11 +111,22 @@
 	int extab_index = 0;
 	int i;
 	int idx;
+	unsigned int num_sections;
+	unsigned int secindex_strings;
 
 	shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
-	shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
+
+	num_sections = r2(&ehdr->e_shnum);
+	if (num_sections == SHN_UNDEF)
+		num_sections = _r(&shdr[0].sh_size);
+
+	secindex_strings = r2(&ehdr->e_shstrndx);
+	if (secindex_strings == SHN_XINDEX)
+		secindex_strings = r(&shdr[0].sh_link);
+
+	shstrtab_sec = shdr + secindex_strings;
 	secstrtab = (const char *)ehdr + _r(&shstrtab_sec->sh_offset);
-	for (i = 0; i < r2(&ehdr->e_shnum); i++) {
+	for (i = 0; i < num_sections; i++) {
 		idx = r(&shdr[i].sh_name);
 		if (strcmp(secstrtab + idx, "__ex_table") == 0) {
 			extab_sec = shdr + i;
@@ -129,6 +142,9 @@
 			symtab_sec = shdr + i;
 		if (strcmp(secstrtab + idx, ".strtab") == 0)
 			strtab_sec = shdr + i;
+		if (r(&shdr[i].sh_type) == SHT_SYMTAB_SHNDX)
+			symtab_shndx_start = (Elf32_Word *)(
+				(const char *)ehdr + _r(&shdr[i].sh_offset));
 	}
 	if (strtab_sec == NULL) {
 		fprintf(stderr,	"no .strtab in  file: %s\n", fname);
@@ -138,6 +154,8 @@
 		fprintf(stderr,	"no .symtab in  file: %s\n", fname);
 		fail_file();
 	}
+	symtab = (const Elf_Sym *)((const char *)ehdr +
+				   _r(&symtab_sec->sh_offset));
 	if (extab_sec == NULL) {
 		fprintf(stderr,	"no __ex_table in  file: %s\n", fname);
 		fail_file();
@@ -176,7 +194,9 @@
 			fname);
 		fail_file();
 	}
-	sort_needed_sec = &shdr[r2(&sort_needed_sym->st_shndx)];
+	sort_needed_sec = &shdr[get_secindex(r2(&sym->st_shndx),
+					     sort_needed_sym - symtab,
+					     symtab_shndx_start)];
 	sort_done_location = (void *)ehdr +
 		_r(&sort_needed_sec->sh_offset) +
 		_r(&sort_needed_sym->st_value) -
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 74f02e4..58c4559 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -149,15 +149,16 @@
 exuberant()
 {
 	all_target_sources | xargs $1 -a                        \
-	-I __initdata,__exitdata,__initconst,__devinitdata	\
-	-I __devinitconst,__cpuinitdata,__initdata_memblock	\
-	-I __refdata,__attribute				\
+	-I __initdata,__exitdata,__initconst,			\
+	-I __cpuinitdata,__initdata_memblock			\
+	-I __refdata,__attribute,__maybe_unused,__always_unused \
 	-I __acquires,__releases,__deprecated			\
 	-I __read_mostly,__aligned,____cacheline_aligned        \
 	-I ____cacheline_aligned_in_smp                         \
+	-I __cacheline_aligned,__cacheline_aligned_in_smp	\
 	-I ____cacheline_internodealigned_in_smp                \
 	-I __used,__packed,__packed2__,__must_check,__must_hold	\
-	-I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL                      \
+	-I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL   \
 	-I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
 	-I static,const						\
 	--extra=+f --c-kinds=+px                                \
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index c123628..7c2a0a7 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -63,16 +63,6 @@
 
 struct cgroup_subsys devices_subsys;
 
-static int devcgroup_can_attach(struct cgroup_subsys_state *new_css,
-				struct cgroup_taskset *set)
-{
-	struct task_struct *task = cgroup_taskset_first(set);
-
-	if (current != task && !capable(CAP_SYS_ADMIN))
-		return -EPERM;
-	return 0;
-}
-
 /*
  * called under devcgroup_mutex
  */
@@ -697,7 +687,6 @@
 
 struct cgroup_subsys devices_subsys = {
 	.name = "devices",
-	.can_attach = devcgroup_can_attach,
 	.css_alloc = devcgroup_css_alloc,
 	.css_free = devcgroup_css_free,
 	.css_online = devcgroup_online,
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 8d8d97d..234bc2a 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -302,18 +302,19 @@
 						"faddr", "fport");
 				break;
 			}
+#if IS_ENABLED(CONFIG_IPV6)
 			case AF_INET6: {
 				struct inet_sock *inet = inet_sk(sk);
-				struct ipv6_pinfo *inet6 = inet6_sk(sk);
 
-				print_ipv6_addr(ab, &inet6->rcv_saddr,
+				print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
 						inet->inet_sport,
 						"laddr", "lport");
-				print_ipv6_addr(ab, &inet6->daddr,
+				print_ipv6_addr(ab, &sk->sk_v6_daddr,
 						inet->inet_dport,
 						"faddr", "fport");
 				break;
 			}
+#endif
 			case AF_UNIX:
 				u = unix_sk(sk);
 				if (u->path.dentry) {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 5b52310..c540795 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3928,7 +3928,7 @@
 		if (snum) {
 			int low, high;
 
-			inet_get_local_port_range(&low, &high);
+			inet_get_local_port_range(sock_net(sk), &low, &high);
 
 			if (snum < max(PROT_SOCK, low) || snum > high) {
 				err = sel_netport_sid(sk->sk_protocol,
@@ -4667,7 +4667,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_forward(unsigned int hooknum,
+static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
 					 struct sk_buff *skb,
 					 const struct net_device *in,
 					 const struct net_device *out,
@@ -4677,7 +4677,7 @@
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
 					 struct sk_buff *skb,
 					 const struct net_device *in,
 					 const struct net_device *out,
@@ -4709,7 +4709,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_output(unsigned int hooknum,
+static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
 					struct sk_buff *skb,
 					const struct net_device *in,
 					const struct net_device *out,
@@ -4836,7 +4836,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
 					   struct sk_buff *skb,
 					   const struct net_device *in,
 					   const struct net_device *out,
@@ -4846,7 +4846,7 @@
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
 					   struct sk_buff *skb,
 					   const struct net_device *in,
 					   const struct net_device *out,
diff --git a/sound/arm/pxa2xx-pcm.c b/sound/arm/pxa2xx-pcm.c
index 69a2455..e6c727b 100644
--- a/sound/arm/pxa2xx-pcm.c
+++ b/sound/arm/pxa2xx-pcm.c
@@ -11,6 +11,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
 
 #include <sound/core.h>
@@ -83,8 +84,6 @@
 	.mmap		= pxa2xx_pcm_mmap,
 };
 
-static u64 pxa2xx_pcm_dmamask = 0xffffffff;
-
 int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
 		   struct snd_pcm **rpcm)
 {
@@ -100,10 +99,9 @@
 	pcm->private_data = client;
 	pcm->private_free = pxa2xx_pcm_free_dma_buffers;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &pxa2xx_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = 0xffffffff;
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto out;
 
 	if (play) {
 		int stream = SNDRV_PCM_STREAM_PLAYBACK;
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 9d93f02..5e1c7bc 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -184,11 +184,7 @@
 	/* Assign the pool into private_data field */
 	dmab->private_data = pool;
 
-	dmab->area = (void *)gen_pool_alloc(pool, size);
-	if (!dmab->area)
-		return;
-
-	dmab->addr = gen_pool_virt_to_phys(pool, (unsigned long)dmab->area);
+	dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr);
 }
 
 /**
diff --git a/sound/core/sound.c b/sound/core/sound.c
index f002bd9..437c25e 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -153,7 +153,7 @@
 {
 	unsigned int minor = iminor(inode);
 	struct snd_minor *mptr = NULL;
-	const struct file_operations *old_fops;
+	const struct file_operations *new_fops;
 	int err = 0;
 
 	if (minor >= ARRAY_SIZE(snd_minors))
@@ -167,24 +167,14 @@
 			return -ENODEV;
 		}
 	}
-	old_fops = file->f_op;
-	file->f_op = fops_get(mptr->f_ops);
-	if (file->f_op == NULL) {
-		file->f_op = old_fops;
-		err = -ENODEV;
-	}
+	new_fops = fops_get(mptr->f_ops);
 	mutex_unlock(&sound_mutex);
-	if (err < 0)
-		return err;
+	if (!new_fops)
+		return -ENODEV;
+	replace_fops(file, new_fops);
 
-	if (file->f_op->open) {
+	if (file->f_op->open)
 		err = file->f_op->open(inode, file);
-		if (err) {
-			fops_put(file->f_op);
-			file->f_op = fops_get(old_fops);
-		}
-	}
-	fops_put(old_fops);
 	return err;
 }
 
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c
index 6feee66..57bcd31 100644
--- a/sound/firewire/dice.c
+++ b/sound/firewire/dice.c
@@ -543,7 +543,7 @@
 	__be32 value;
 	int err;
 
-	INIT_COMPLETION(dice->clock_accepted);
+	reinit_completion(&dice->clock_accepted);
 
 	value = cpu_to_be32(clock_rate | CLOCK_SOURCE_ARX1);
 	err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c
index 612e580..8ae3fa5 100644
--- a/sound/soc/atmel/atmel-pcm.c
+++ b/sound/soc/atmel/atmel-pcm.c
@@ -68,18 +68,15 @@
 }
 EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
 
-static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
-
 int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	int ret = 0;
+	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &atmel_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 53f8408..1d4c676 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -415,19 +415,16 @@
 	}
 }
 
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	int ret = 0;
+	int ret;
 
 	pr_debug("%s enter\n", __func__);
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &bf5xx_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index 9cb4a80..2a5b434 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -323,18 +323,16 @@
 	.silence	= bf5xx_pcm_silence,
 };
 
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
+	int ret;
 
 	pr_debug("%s enter\n", __func__);
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &bf5xx_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
 				SNDRV_DMA_TYPE_DEV, card->dev, size, size);
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index f2e62e4..19e9f22 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -614,7 +614,7 @@
 };
 
 /* Note : pll code from original alc5632 driver. Not sure of how good it is */
-/* usefull only for master mode */
+/* useful only for master mode */
 static const struct _pll_div codec_master_pll_div[] = {
 
 	{  2048000,  8192000,	0x0ea0},
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 8460edc..fa64cd8 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -267,10 +267,9 @@
 		return 0;
 
 	ppcm->period_bytes_max = size;
-	iram_virt = (void *)gen_pool_alloc(sram_pool, size);
+	iram_virt = gen_pool_dma_alloc(sram_pool, size, &iram_phys);
 	if (!iram_virt)
 		goto exit1;
-	iram_phys = gen_pool_virt_to_phys(sram_pool, (unsigned)iram_virt);
 	iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
 	if (!iram_dma)
 		goto exit2;
@@ -844,18 +843,15 @@
 	}
 }
 
-static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
 	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &davinci_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = davinci_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index d1b111e..fb9bb9e 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -300,14 +300,11 @@
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
 	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &fsl_dma_dmamask;
-
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = fsl_dma_dmamask;
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
+	if (ret)
+		return ret;
 
 	/* Some codecs have separate DAIs for playback and capture, so we
 	 * should allocate a DMA buffer only for the streams that are valid.
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index 10e3305..41740e4 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -254,18 +254,16 @@
 	return 0;
 }
 
-static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	int ret = 0;
+	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &imx_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = imx_pcm_preallocate_dma_buffer(pcm,
 			SNDRV_PCM_STREAM_PLAYBACK);
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index 161e5055..71bf2f2 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -301,7 +301,6 @@
 	.hw_params	= psc_dma_hw_params,
 };
 
-static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
 static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
@@ -309,15 +308,14 @@
 	struct snd_pcm *pcm = rtd->pcm;
 	struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 	size_t size = psc_dma_hardware.buffer_bytes_max;
-	int rc = 0;
+	int rc;
 
 	dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
 		card, dai, pcm);
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &psc_dma_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (rc)
+		return rc;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
diff --git a/sound/soc/jz4740/jz4740-pcm.c b/sound/soc/jz4740/jz4740-pcm.c
index 7100592..1d7ef28 100644
--- a/sound/soc/jz4740/jz4740-pcm.c
+++ b/sound/soc/jz4740/jz4740-pcm.c
@@ -297,19 +297,15 @@
 	}
 }
 
-static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	int ret = 0;
+	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &jz4740_pcm_dmamask;
-
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = jz4740_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 55d0d9d3..4af1936 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -57,8 +57,6 @@
 	.fifo_size		= 0,
 };
 
-static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
-
 static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
 {
 	struct kirkwood_dma_data *priv = dev_id;
@@ -290,10 +288,9 @@
 	struct snd_pcm *pcm = rtd->pcm;
 	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &kirkwood_dma_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = kirkwood_dma_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index c894ff0..f588ee4 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -314,16 +314,15 @@
 	snd_pcm_lib_preallocate_free_for_all(pcm);
 }
 
-static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
 static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
+	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &nuc900_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
 		card->dev, 4 * 1024, (4 * 1024) - 1);
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index a11405d..b8fa986 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -156,8 +156,6 @@
 	.mmap		= omap_pcm_mmap,
 };
 
-static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
-
 static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
 	int stream)
 {
@@ -202,12 +200,11 @@
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	int ret = 0;
+	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &omap_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = omap_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 8235e23..7929e19 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -201,10 +201,9 @@
 	if (!gpool)
 		return -ENOMEM;
 
-	buf->area = (unsigned char *)gen_pool_alloc(gpool, size);
+	buf->area = gen_pool_dma_alloc(gpool, size, &buf->addr);
 	if (!buf->area)
 		return -ENOMEM;
-	buf->addr = gen_pool_virt_to_phys(gpool, (unsigned long)buf->area);
 	buf->bytes = size;
 	return 0;
 }
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 806da27..d58b09f 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -87,18 +87,15 @@
 	.mmap		= pxa2xx_pcm_mmap,
 };
 
-static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	int ret = 0;
+	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &pxa2xx_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
index 5cfaa54..d219880 100644
--- a/sound/soc/s6000/s6000-pcm.c
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -445,8 +445,6 @@
 	snd_pcm_lib_preallocate_free_for_all(pcm);
 }
 
-static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
 {
 	struct snd_card *card = runtime->card->snd_card;
@@ -457,10 +455,9 @@
 	params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
 			pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &s6000_pcm_dmamask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (res)
+		return res;
 
 	if (params->dma_in) {
 		s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 2acf987..350ba23 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -74,7 +74,7 @@
 	if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
 		return; /* Return if already active */
 
-	INIT_COMPLETION(s3c_ac97.done);
+	reinit_completion(&s3c_ac97.done);
 
 	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
 	ac_glbctrl = S3C_AC97_GLBCTRL_ACLINKON;
@@ -103,7 +103,7 @@
 
 	s3c_ac97_activate(ac97);
 
-	INIT_COMPLETION(s3c_ac97.done);
+	reinit_completion(&s3c_ac97.done);
 
 	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
 	ac_codec_cmd = S3C_AC97_CODEC_CMD_READ | AC_CMD_ADDR(reg);
@@ -140,7 +140,7 @@
 
 	s3c_ac97_activate(ac97);
 
-	INIT_COMPLETION(s3c_ac97.done);
+	reinit_completion(&s3c_ac97.done);
 
 	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
 	ac_codec_cmd = AC_CMD_ADDR(reg) | AC_CMD_DATA(val);
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 9338d11..fe2748b 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -406,20 +406,17 @@
 	}
 }
 
-static u64 dma_mask = DMA_BIT_MASK(32);
-
 static int dma_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	int ret = 0;
+	int ret;
 
 	pr_debug("Entered %s\n", __func__);
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &dma_mask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = preallocate_dma_buffer(pcm,
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index ce1e1e1..e4f318f 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -383,18 +383,15 @@
 	return 0;
 }
 
-static u64 idma_mask = DMA_BIT_MASK(32);
-
 static int idma_new(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_card *card = rtd->card->snd_card;
 	struct snd_pcm *pcm = rtd->pcm;
-	int ret = 0;
+	int ret;
 
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &idma_mask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
 
 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 		ret = preallocate_idma_buffer(pcm,
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 45759f4..11e953a 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -626,31 +626,20 @@
 		if (s)
 			new_fops = fops_get(s->unit_fops);
 	}
+	spin_unlock(&sound_loader_lock);
 	if (new_fops) {
 		/*
 		 * We rely upon the fact that we can't be unloaded while the
-		 * subdriver is there, so if ->open() is successful we can
-		 * safely drop the reference counter and if it is not we can
-		 * revert to old ->f_op. Ugly, indeed, but that's the cost of
-		 * switching ->f_op in the first place.
+		 * subdriver is there.
 		 */
 		int err = 0;
-		const struct file_operations *old_fops = file->f_op;
-		file->f_op = new_fops;
-		spin_unlock(&sound_loader_lock);
+		replace_fops(file, new_fops);
 
 		if (file->f_op->open)
 			err = file->f_op->open(inode,file);
 
-		if (err) {
-			fops_put(file->f_op);
-			file->f_op = fops_get(old_fops);
-		}
-
-		fops_put(old_fops);
 		return err;
 	}
-	spin_unlock(&sound_loader_lock);
 	return -ENODEV;
 }
 
diff --git a/tools/Makefile b/tools/Makefile
index 41067f3..a9b0200 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -15,6 +15,7 @@
 	@echo '  net        - misc networking tools'
 	@echo '  vm         - misc vm tools'
 	@echo '  x86_energy_perf_policy - Intel energy policy tool'
+	@echo '  tmon       - thermal monitoring and tuning tool'
 	@echo ''
 	@echo 'You can do:'
 	@echo ' $$ make -C tools/ <tool>_install'
@@ -50,6 +51,9 @@
 turbostat x86_energy_perf_policy: FORCE
 	$(call descend,power/x86/$@)
 
+tmon: FORCE
+	$(call descend,thermal/$@)
+
 cpupower_install:
 	$(call descend,power/$(@:_install=),install)
 
@@ -62,9 +66,13 @@
 turbostat_install x86_energy_perf_policy_install:
 	$(call descend,power/x86/$(@:_install=),install)
 
+tmon_install:
+	$(call descend,thermal/$(@:_install=),install)
+
 install: cgroup_install cpupower_install firewire_install lguest_install \
 		perf_install selftests_install turbostat_install usb_install \
-		virtio_install vm_install net_install x86_energy_perf_policy_install
+		virtio_install vm_install net_install x86_energy_perf_policy_install \
+	tmon
 
 cpupower_clean:
 	$(call descend,power/cpupower,clean)
@@ -84,8 +92,11 @@
 turbostat_clean x86_energy_perf_policy_clean:
 	$(call descend,power/x86/$(@:_clean=),clean)
 
+tmon_clean:
+	$(call descend,thermal/tmon,clean)
+
 clean: cgroup_clean cpupower_clean firewire_clean lguest_clean perf_clean \
 		selftests_clean turbostat_clean usb_clean virtio_clean \
-		vm_clean net_clean x86_energy_perf_policy_clean
+		vm_clean net_clean x86_energy_perf_policy_clean tmon_clean
 
 .PHONY: FORCE
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 8f450ad..0362d57 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -3435,6 +3435,19 @@
 			goto out_warning_op;
 		}
 		break;
+	case PRINT_DYNAMIC_ARRAY:
+		/* Without [], we pass the address to the dynamic data */
+		offset = pevent_read_number(pevent,
+					    data + arg->dynarray.field->offset,
+					    arg->dynarray.field->size);
+		/*
+		 * The actual length of the dynamic array is stored
+		 * in the top half of the field, and the offset
+		 * is in the bottom half of the 32 bit field.
+		 */
+		offset &= 0xffff;
+		val = (unsigned long long)(data + offset);
+		break;
 	default: /* not sure what to do there */
 		return 0;
 	}
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 052f7c4..43b42c4 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -201,6 +201,12 @@
 --transaction::
 Record transaction flags for transaction related events.
 
+--force-per-cpu::
+Force the use of per-cpu mmaps.  By default, when tasks are specified (i.e. -p,
+-t or -u options) per-thread mmaps are created.  This option overrides that and
+forces per-cpu mmaps.  A side-effect of that is that inheritance is
+automatically enabled.  Add the -i option also to disable inheritance.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 7b0497f..fae38d9 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -93,9 +93,15 @@
 --comm::
         Show process COMM right beside its ID, on by default, disable with --no-comm.
 
+-s::
 --summary::
-	Show a summary of syscalls by thread with min, max, and average times (in
-    msec) and relative stddev.
+	Show only a summary of syscalls by thread with min, max, and average times
+    (in msec) and relative stddev.
+
+-S::
+--with-summary::
+	Show all syscalls followed by a summary by thread with min, max, and
+    average times (in msec) and relative stddev.
 
 --tool_stats::
 	Show tool stats such as number of times fd->pathname was discovered thru
diff --git a/tools/perf/arch/arm/Makefile b/tools/perf/arch/arm/Makefile
index 15130b50..fe9b61e 100644
--- a/tools/perf/arch/arm/Makefile
+++ b/tools/perf/arch/arm/Makefile
@@ -2,3 +2,6 @@
 PERF_HAVE_DWARF_REGS := 1
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
+ifndef NO_LIBUNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+endif
diff --git a/tools/perf/arch/arm/include/perf_regs.h b/tools/perf/arch/arm/include/perf_regs.h
new file mode 100644
index 0000000..2a1cfde
--- /dev/null
+++ b/tools/perf/arch/arm/include/perf_regs.h
@@ -0,0 +1,54 @@
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include "../../util/types.h"
+#include <asm/perf_regs.h>
+
+#define PERF_REGS_MASK	((1ULL << PERF_REG_ARM_MAX) - 1)
+#define PERF_REG_IP	PERF_REG_ARM_PC
+#define PERF_REG_SP	PERF_REG_ARM_SP
+
+static inline const char *perf_reg_name(int id)
+{
+	switch (id) {
+	case PERF_REG_ARM_R0:
+		return "r0";
+	case PERF_REG_ARM_R1:
+		return "r1";
+	case PERF_REG_ARM_R2:
+		return "r2";
+	case PERF_REG_ARM_R3:
+		return "r3";
+	case PERF_REG_ARM_R4:
+		return "r4";
+	case PERF_REG_ARM_R5:
+		return "r5";
+	case PERF_REG_ARM_R6:
+		return "r6";
+	case PERF_REG_ARM_R7:
+		return "r7";
+	case PERF_REG_ARM_R8:
+		return "r8";
+	case PERF_REG_ARM_R9:
+		return "r9";
+	case PERF_REG_ARM_R10:
+		return "r10";
+	case PERF_REG_ARM_FP:
+		return "fp";
+	case PERF_REG_ARM_IP:
+		return "ip";
+	case PERF_REG_ARM_SP:
+		return "sp";
+	case PERF_REG_ARM_LR:
+		return "lr";
+	case PERF_REG_ARM_PC:
+		return "pc";
+	default:
+		return NULL;
+	}
+
+	return NULL;
+}
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/arm/util/unwind.c b/tools/perf/arch/arm/util/unwind.c
new file mode 100644
index 0000000..da3dc95
--- /dev/null
+++ b/tools/perf/arch/arm/util/unwind.c
@@ -0,0 +1,48 @@
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+
+int unwind__arch_reg_id(int regnum)
+{
+	switch (regnum) {
+	case UNW_ARM_R0:
+		return PERF_REG_ARM_R0;
+	case UNW_ARM_R1:
+		return PERF_REG_ARM_R1;
+	case UNW_ARM_R2:
+		return PERF_REG_ARM_R2;
+	case UNW_ARM_R3:
+		return PERF_REG_ARM_R3;
+	case UNW_ARM_R4:
+		return PERF_REG_ARM_R4;
+	case UNW_ARM_R5:
+		return PERF_REG_ARM_R5;
+	case UNW_ARM_R6:
+		return PERF_REG_ARM_R6;
+	case UNW_ARM_R7:
+		return PERF_REG_ARM_R7;
+	case UNW_ARM_R8:
+		return PERF_REG_ARM_R8;
+	case UNW_ARM_R9:
+		return PERF_REG_ARM_R9;
+	case UNW_ARM_R10:
+		return PERF_REG_ARM_R10;
+	case UNW_ARM_R11:
+		return PERF_REG_ARM_FP;
+	case UNW_ARM_R12:
+		return PERF_REG_ARM_IP;
+	case UNW_ARM_R13:
+		return PERF_REG_ARM_SP;
+	case UNW_ARM_R14:
+		return PERF_REG_ARM_LR;
+	case UNW_ARM_R15:
+		return PERF_REG_ARM_PC;
+	default:
+		pr_err("unwind: invalid reg id %d\n", regnum);
+		return -EINVAL;
+	}
+
+	return -EINVAL;
+}
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index cd9f920..f8bf5f2 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1510,13 +1510,13 @@
 	/*
 	 * target related setups
 	 */
-	err = perf_target__validate(&kvm->opts.target);
+	err = target__validate(&kvm->opts.target);
 	if (err) {
-		perf_target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
+		target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
 		ui__warning("%s", errbuf);
 	}
 
-	if (perf_target__none(&kvm->opts.target))
+	if (target__none(&kvm->opts.target))
 		kvm->opts.target.system_wide = true;
 
 
@@ -1544,18 +1544,8 @@
 	}
 	kvm->session->evlist = kvm->evlist;
 	perf_session__set_id_hdr_size(kvm->session);
-
-
-	if (perf_target__has_task(&kvm->opts.target))
-		perf_event__synthesize_thread_map(&kvm->tool,
-						  kvm->evlist->threads,
-						  perf_event__process,
-						  &kvm->session->machines.host);
-	else
-		perf_event__synthesize_threads(&kvm->tool, perf_event__process,
-					       &kvm->session->machines.host);
-
-
+	machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
+				    kvm->evlist->threads, false);
 	err = kvm_live_open_events(kvm);
 	if (err)
 		goto out;
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 89acc17..6ea9e85 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -325,6 +325,8 @@
 		     opt_set_filter),
 	OPT_CALLBACK('x', "exec", NULL, "executable|path",
 			"target executable name or path", opt_set_target),
+	OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
+		    "Disable symbol demangling"),
 	OPT_END()
 	};
 	int ret;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 15280b5..7c8020a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -76,12 +76,12 @@
 	long			samples;
 };
 
-static int write_output(struct perf_record *rec, void *buf, size_t size)
+static int do_write_output(struct perf_record *rec, void *buf, size_t size)
 {
 	struct perf_data_file *file = &rec->file;
 
 	while (size) {
-		int ret = write(file->fd, buf, size);
+		ssize_t ret = write(file->fd, buf, size);
 
 		if (ret < 0) {
 			pr_err("failed to write perf data, error: %m\n");
@@ -97,6 +97,11 @@
 	return 0;
 }
 
+static int write_output(struct perf_record *rec, void *buf, size_t size)
+{
+	return do_write_output(rec, buf, size);
+}
+
 static int process_synthesized_event(struct perf_tool *tool,
 				     union perf_event *event,
 				     struct perf_sample *sample __maybe_unused,
@@ -480,16 +485,8 @@
 					 perf_event__synthesize_guest_os, tool);
 	}
 
-	if (perf_target__has_task(&opts->target))
-		err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
-						  process_synthesized_event,
-						  machine);
-	else if (perf_target__has_cpu(&opts->target))
-		err = perf_event__synthesize_threads(tool, process_synthesized_event,
-					       machine);
-	else /* command specified */
-		err = 0;
-
+	err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
+					    process_synthesized_event, opts->sample_address);
 	if (err != 0)
 		goto out_delete_session;
 
@@ -509,7 +506,7 @@
 	 * (apart from group members) have enable_on_exec=1 set,
 	 * so don't spoil it by prematurely enabling them.
 	 */
-	if (!perf_target__none(&opts->target))
+	if (!target__none(&opts->target))
 		perf_evlist__enable(evsel_list);
 
 	/*
@@ -538,7 +535,7 @@
 		 * die with the process and we wait for that. Thus no need to
 		 * disable events in this case.
 		 */
-		if (done && !disabled && !perf_target__none(&opts->target)) {
+		if (done && !disabled && !target__none(&opts->target)) {
 			perf_evlist__disable(evsel_list);
 			disabled = true;
 		}
@@ -891,6 +888,8 @@
 		    "sample by weight (on special events only)"),
 	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
 		    "sample transaction flags (special events only)"),
+	OPT_BOOLEAN(0, "force-per-cpu", &record.opts.target.force_per_cpu,
+		    "force the use of per-cpu mmaps"),
 	OPT_END()
 };
 
@@ -909,7 +908,7 @@
 
 	argc = parse_options(argc, argv, record_options, record_usage,
 			    PARSE_OPT_STOP_AT_NON_OPTION);
-	if (!argc && perf_target__none(&rec->opts.target))
+	if (!argc && target__none(&rec->opts.target))
 		usage_with_options(record_usage, record_options);
 
 	if (nr_cgroups && !rec->opts.target.system_wide) {
@@ -939,17 +938,17 @@
 		goto out_symbol_exit;
 	}
 
-	err = perf_target__validate(&rec->opts.target);
+	err = target__validate(&rec->opts.target);
 	if (err) {
-		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
+		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
 		ui__warning("%s", errbuf);
 	}
 
-	err = perf_target__parse_uid(&rec->opts.target);
+	err = target__parse_uid(&rec->opts.target);
 	if (err) {
 		int saved_errno = errno;
 
-		perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
+		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
 		ui__error("%s", errbuf);
 
 		err = -saved_errno;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 0fc1c94..ee0d565 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -108,7 +108,7 @@
 
 static struct perf_evlist	*evsel_list;
 
-static struct perf_target	target = {
+static struct target target = {
 	.uid	= UINT_MAX,
 };
 
@@ -294,11 +294,10 @@
 
 	attr->inherit = !no_inherit;
 
-	if (perf_target__has_cpu(&target))
+	if (target__has_cpu(&target))
 		return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
 
-	if (!perf_target__has_task(&target) &&
-	    perf_evsel__is_group_leader(evsel)) {
+	if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
 		attr->disabled = 1;
 		if (!initial_delay)
 			attr->enable_on_exec = 1;
@@ -1236,7 +1235,7 @@
 			fprintf(output, "\'system wide");
 		else if (target.cpu_list)
 			fprintf(output, "\'CPU(s) %s", target.cpu_list);
-		else if (!perf_target__has_task(&target)) {
+		else if (!target__has_task(&target)) {
 			fprintf(output, "\'%s", argv[0]);
 			for (i = 1; i < argc; i++)
 				fprintf(output, " %s", argv[i]);
@@ -1667,7 +1666,7 @@
 	} else if (big_num_opt == 0) /* User passed --no-big-num */
 		big_num = false;
 
-	if (!argc && perf_target__none(&target))
+	if (!argc && target__none(&target))
 		usage_with_options(stat_usage, options);
 
 	if (run_count < 0) {
@@ -1680,8 +1679,8 @@
 	}
 
 	/* no_aggr, cgroup are for system-wide only */
-	if ((aggr_mode != AGGR_GLOBAL || nr_cgroups)
-	     && !perf_target__has_cpu(&target)) {
+	if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) &&
+	    !target__has_cpu(&target)) {
 		fprintf(stderr, "both cgroup and no-aggregation "
 			"modes only available in system-wide mode\n");
 
@@ -1694,14 +1693,14 @@
 	if (add_default_attributes())
 		goto out;
 
-	perf_target__validate(&target);
+	target__validate(&target);
 
 	if (perf_evlist__create_maps(evsel_list, &target) < 0) {
-		if (perf_target__has_task(&target)) {
+		if (target__has_task(&target)) {
 			pr_err("Problems finding threads of monitor\n");
 			parse_options_usage(stat_usage, options, "p", 1);
 			parse_options_usage(NULL, options, "t", 1);
-		} else if (perf_target__has_cpu(&target)) {
+		} else if (target__has_cpu(&target)) {
 			perror("failed to parse CPUs map");
 			parse_options_usage(stat_usage, options, "C", 1);
 			parse_options_usage(NULL, options, "a", 1);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 9acca88..71e6402 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -950,14 +950,8 @@
 	if (ret)
 		goto out_delete;
 
-	if (perf_target__has_task(&opts->target))
-		perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
-						  perf_event__process,
-						  &top->session->machines.host);
-	else
-		perf_event__synthesize_threads(&top->tool, perf_event__process,
-					       &top->session->machines.host);
-
+	machine__synthesize_threads(&top->session->machines.host, &opts->target,
+				    top->evlist->threads, false);
 	ret = perf_top__start_counters(top);
 	if (ret)
 		goto out_delete;
@@ -973,7 +967,7 @@
 	 * XXX 'top' still doesn't start workloads like record, trace, but should,
 	 * so leave the check here.
 	 */
-        if (!perf_target__none(&opts->target))
+        if (!target__none(&opts->target))
                 perf_evlist__enable(top->evlist);
 
 	/* Wait for a minimal set of events before starting the snapshot */
@@ -1059,7 +1053,7 @@
 		.sym_pcnt_filter     = 5,
 	};
 	struct perf_record_opts *opts = &top.record_opts;
-	struct perf_target *target = &opts->target;
+	struct target *target = &opts->target;
 	const struct option options[] = {
 	OPT_CALLBACK('e', "event", &top.evlist, "event",
 		     "event selector. use 'perf list' to list available events",
@@ -1175,24 +1169,24 @@
 
 	setup_browser(false);
 
-	status = perf_target__validate(target);
+	status = target__validate(target);
 	if (status) {
-		perf_target__strerror(target, status, errbuf, BUFSIZ);
-		ui__warning("%s", errbuf);
+		target__strerror(target, status, errbuf, BUFSIZ);
+		ui__warning("%s\n", errbuf);
 	}
 
-	status = perf_target__parse_uid(target);
+	status = target__parse_uid(target);
 	if (status) {
 		int saved_errno = errno;
 
-		perf_target__strerror(target, status, errbuf, BUFSIZ);
-		ui__error("%s", errbuf);
+		target__strerror(target, status, errbuf, BUFSIZ);
+		ui__error("%s\n", errbuf);
 
 		status = -saved_errno;
 		goto out_delete_evlist;
 	}
 
-	if (perf_target__none(target))
+	if (target__none(target))
 		target->system_wide = true;
 
 	if (perf_evlist__create_maps(top.evlist, target) < 0)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 329b783..8be17fc 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -149,21 +149,32 @@
 	perf_evsel__delete(evsel);
 }
 
-static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction,
-						    void *handler, int idx)
+static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
 {
-	struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction, idx);
-
-	if (evsel) {
-		evsel->priv = malloc(sizeof(struct syscall_tp));
-
-		if (evsel->priv == NULL)
-			goto out_delete;
-
+	evsel->priv = malloc(sizeof(struct syscall_tp));
+	if (evsel->priv != NULL) {
 		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
 			goto out_delete;
 
 		evsel->handler = handler;
+		return 0;
+	}
+
+	return -ENOMEM;
+
+out_delete:
+	free(evsel->priv);
+	evsel->priv = NULL;
+	return -ENOENT;
+}
+
+static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
+{
+	struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
+
+	if (evsel) {
+		if (perf_evsel__init_syscall_tp(evsel, handler))
+			goto out_delete;
 	}
 
 	return evsel;
@@ -186,17 +197,16 @@
 					  void *sys_exit_handler)
 {
 	int ret = -1;
-	int idx = evlist->nr_entries;
 	struct perf_evsel *sys_enter, *sys_exit;
 
-	sys_enter = perf_evsel__syscall_newtp("sys_enter", sys_enter_handler, idx++);
+	sys_enter = perf_evsel__syscall_newtp("sys_enter", sys_enter_handler);
 	if (sys_enter == NULL)
 		goto out;
 
 	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
 		goto out_delete_sys_enter;
 
-	sys_exit = perf_evsel__syscall_newtp("sys_exit", sys_exit_handler, idx++);
+	sys_exit = perf_evsel__syscall_newtp("sys_exit", sys_exit_handler);
 	if (sys_exit == NULL)
 		goto out_delete_sys_enter;
 
@@ -953,7 +963,8 @@
 	{ .name	    = "mmap",	    .hexret = true,
 	  .arg_scnprintf = { [0] = SCA_HEX,	  /* addr */
 			     [2] = SCA_MMAP_PROT, /* prot */
-			     [3] = SCA_MMAP_FLAGS, /* flags */ }, },
+			     [3] = SCA_MMAP_FLAGS, /* flags */
+			     [4] = SCA_FD, 	  /* fd */ }, },
 	{ .name	    = "mprotect",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* start */
 			     [2] = SCA_MMAP_PROT, /* prot */ }, },
@@ -1157,6 +1168,7 @@
 	bool			sched;
 	bool			multiple_threads;
 	bool			summary;
+	bool			summary_only;
 	bool			show_comm;
 	bool			show_tool_stats;
 	double			duration_filter;
@@ -1342,15 +1354,8 @@
 	if (trace->host == NULL)
 		return -ENOMEM;
 
-	if (perf_target__has_task(&trace->opts.target)) {
-		err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
-							trace__tool_process,
-							trace->host);
-	} else {
-		err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
-						     trace->host);
-	}
-
+	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
+					    evlist->threads, trace__tool_process, false);
 	if (err)
 		symbol__exit();
 
@@ -1607,7 +1612,7 @@
 					   args, trace, thread);
 
 	if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
-		if (!trace->duration_filter) {
+		if (!trace->duration_filter && !trace->summary_only) {
 			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
 			fprintf(trace->output, "%-70s\n", ttrace->entry_str);
 		}
@@ -1660,6 +1665,9 @@
 	} else if (trace->duration_filter)
 		goto out;
 
+	if (trace->summary_only)
+		goto out;
+
 	trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
 
 	if (ttrace->entry_pending) {
@@ -1762,16 +1770,6 @@
 	return err;
 }
 
-static bool
-perf_session__has_tp(struct perf_session *session, const char *name)
-{
-	struct perf_evsel *evsel;
-
-	evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name);
-
-	return evsel != NULL;
-}
-
 static int parse_target_str(struct trace *trace)
 {
 	if (trace->opts.target.pid) {
@@ -1824,8 +1822,7 @@
 
 static void perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
 {
-	struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname",
-						     evlist->nr_entries);
+	struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
 	if (evsel == NULL)
 		return;
 
@@ -2009,8 +2006,6 @@
 static int trace__replay(struct trace *trace)
 {
 	const struct perf_evsel_str_handler handlers[] = {
-		{ "raw_syscalls:sys_enter",  trace__sys_enter, },
-		{ "raw_syscalls:sys_exit",   trace__sys_exit, },
 		{ "probe:vfs_getname",	     trace__vfs_getname, },
 	};
 	struct perf_data_file file = {
@@ -2018,6 +2013,7 @@
 		.mode  = PERF_DATA_MODE_READ,
 	};
 	struct perf_session *session;
+	struct perf_evsel *evsel;
 	int err = -1;
 
 	trace->tool.sample	  = trace__process_sample;
@@ -2049,13 +2045,29 @@
 	if (err)
 		goto out;
 
-	if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) {
-		pr_err("Data file does not have raw_syscalls:sys_enter events\n");
+	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
+						     "raw_syscalls:sys_enter");
+	if (evsel == NULL) {
+		pr_err("Data file does not have raw_syscalls:sys_enter event\n");
 		goto out;
 	}
 
-	if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) {
-		pr_err("Data file does not have raw_syscalls:sys_exit events\n");
+	if (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
+	    perf_evsel__init_sc_tp_ptr_field(evsel, args)) {
+		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
+		goto out;
+	}
+
+	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
+						     "raw_syscalls:sys_exit");
+	if (evsel == NULL) {
+		pr_err("Data file does not have raw_syscalls:sys_exit event\n");
+		goto out;
+	}
+
+	if (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
+	    perf_evsel__init_sc_tp_uint_field(evsel, ret)) {
+		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
 		goto out;
 	}
 
@@ -2082,12 +2094,7 @@
 {
 	size_t printed;
 
-	printed  = fprintf(fp, "\n _____________________________________________________________________________\n");
-	printed += fprintf(fp, " __)    Summary of events    (__\n\n");
-	printed += fprintf(fp, "              [ task - pid ]     [ events ] [ ratio ]  [ runtime ]\n");
-	printed += fprintf(fp, "                                  syscall  count    min     max    avg  stddev\n");
-	printed += fprintf(fp, "                                                   msec    msec   msec     %%\n");
-	printed += fprintf(fp, " _____________________________________________________________________________\n\n");
+	printed  = fprintf(fp, "\n Summary of events:\n\n");
 
 	return printed;
 }
@@ -2105,6 +2112,10 @@
 
 	printed += fprintf(fp, "\n");
 
+	printed += fprintf(fp, "   syscall            calls      min       avg       max      stddev\n");
+	printed += fprintf(fp, "                               (msec)    (msec)    (msec)        (%%)\n");
+	printed += fprintf(fp, "   --------------- -------- --------- --------- ---------     ------\n");
+
 	/* each int_node is a syscall */
 	while (inode) {
 		stats = inode->priv;
@@ -2119,10 +2130,10 @@
 			avg /= NSEC_PER_MSEC;
 
 			sc = &trace->syscalls.table[inode->i];
-			printed += fprintf(fp, "%24s  %14s : ", "", sc->name);
-			printed += fprintf(fp, "%5" PRIu64 "  %8.3f  %8.3f",
-					   n, min, max);
-			printed += fprintf(fp, "  %8.3f  %6.2f\n", avg, pct);
+			printed += fprintf(fp, "   %-15s", sc->name);
+			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f",
+					   n, min, avg);
+			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
 		}
 
 		inode = intlist__next(inode);
@@ -2163,10 +2174,10 @@
 	else if (ratio > 5.0)
 		color = PERF_COLOR_YELLOW;
 
-	printed += color_fprintf(fp, color, "%20s", thread__comm_str(thread));
-	printed += fprintf(fp, " - %-5d :%11lu   [", thread->tid, ttrace->nr_events);
-	printed += color_fprintf(fp, color, "%5.1f%%", ratio);
-	printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
+	printed += color_fprintf(fp, color, " %s (%d), ", thread__comm_str(thread), thread->tid);
+	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
+	printed += color_fprintf(fp, color, "%.1f%%", ratio);
+	printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
 	printed += thread__dump_stats(ttrace, trace, fp);
 
 	data->printed += printed;
@@ -2275,8 +2286,10 @@
 	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
 	OPT_BOOLEAN('T', "time", &trace.full_time,
 		    "Show full timestamp, not time relative to first start"),
-	OPT_BOOLEAN(0, "summary", &trace.summary,
-		    "Show syscall summary with statistics"),
+	OPT_BOOLEAN('s', "summary", &trace.summary_only,
+		    "Show only syscall summary with statistics"),
+	OPT_BOOLEAN('S', "with-summary", &trace.summary,
+		    "Show all syscalls and summary with statistics"),
 	OPT_END()
 	};
 	int err;
@@ -2287,6 +2300,10 @@
 
 	argc = parse_options(argc, argv, trace_options, trace_usage, 0);
 
+	/* summary_only implies summary option, but don't overwrite summary if set */
+	if (trace.summary_only)
+		trace.summary = trace.summary_only;
+
 	if (output_name != NULL) {
 		err = trace__open_output(&trace, output_name);
 		if (err < 0) {
@@ -2310,21 +2327,21 @@
 		}
 	}
 
-	err = perf_target__validate(&trace.opts.target);
+	err = target__validate(&trace.opts.target);
 	if (err) {
-		perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
+		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
 		fprintf(trace.output, "%s", bf);
 		goto out_close;
 	}
 
-	err = perf_target__parse_uid(&trace.opts.target);
+	err = target__parse_uid(&trace.opts.target);
 	if (err) {
-		perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
+		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
 		fprintf(trace.output, "%s", bf);
 		goto out_close;
 	}
 
-	if (!argc && perf_target__none(&trace.opts.target))
+	if (!argc && target__none(&trace.opts.target))
 		trace.opts.target.system_wide = true;
 
 	if (input_name)
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 58b2d37..f7d11a8 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -31,6 +31,10 @@
   endif
   NO_PERF_REGS := 0
 endif
+ifeq ($(ARCH),arm)
+  NO_PERF_REGS := 0
+  LIBUNWIND_LIBS = -lunwind -lunwind-arm
+endif
 
 ifeq ($(NO_PERF_REGS),0)
   CFLAGS += -DHAVE_PERF_REGS_SUPPORT
@@ -138,7 +142,8 @@
 	libunwind			\
 	on-exit				\
 	stackprotector			\
-	stackprotector-all
+	stackprotector-all		\
+	timerfd
 
 #
 # So here we detect whether test-all was rebuilt, to be able
@@ -305,8 +310,7 @@
   endif # NO_DWARF
 endif # NO_LIBELF
 
-# There's only x86 (both 32 and 64) support for CFI unwind so far
-ifneq ($(ARCH),x86)
+ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
 endif
 
@@ -322,8 +326,19 @@
   endif
 
   ifneq ($(feature-libunwind), 1)
-    msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
+    msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
     NO_LIBUNWIND := 1
+  else
+    ifeq ($(ARCH),arm)
+      $(call feature_check,libunwind-debug-frame)
+      ifneq ($(feature-libunwind-debug-frame), 1)
+        msg := $(warning No debug_frame support found in libunwind);
+        CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+      endif
+    else
+      # non-ARM has no dwarf_find_debug_frame() function:
+      CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+    endif
   endif
 endif
 
@@ -397,7 +412,6 @@
   endif
 endif
 
-$(call feature_check,timerfd)
 ifeq ($(feature-timerfd), 1)
   CFLAGS += -DHAVE_TIMERFD_SUPPORT
 else
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index c803f17..87e7900 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -23,6 +23,7 @@
 	test-libpython-version		\
 	test-libslang			\
 	test-libunwind			\
+	test-libunwind-debug-frame	\
 	test-on-exit			\
 	test-stackprotector-all		\
 	test-stackprotector		\
@@ -75,6 +76,9 @@
 test-libunwind:
 	$(BUILD) $(LIBUNWIND_LIBS) -lelf
 
+test-libunwind-debug-frame:
+	$(BUILD) $(LIBUNWIND_LIBS) -lelf
+
 test-libaudit:
 	$(BUILD) -laudit
 
diff --git a/tools/perf/config/feature-checks/test-libunwind-debug-frame.c b/tools/perf/config/feature-checks/test-libunwind-debug-frame.c
new file mode 100644
index 0000000..0ef8087
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libunwind-debug-frame.c
@@ -0,0 +1,16 @@
+#include <libunwind.h>
+#include <stdlib.h>
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+				 unw_word_t ip, unw_word_t segbase,
+				 const char *obj_name, unw_word_t start,
+				 unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+int main(void)
+{
+	dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
+	return 0;
+}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 6a587e84..b079304 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -248,7 +248,7 @@
 };
 
 struct perf_record_opts {
-	struct perf_target target;
+	struct target target;
 	int	     call_graph;
 	bool	     group;
 	bool	     inherit_stat;
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 49ccc3b..85d4919 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -275,8 +275,19 @@
 	if (event->header.type == PERF_RECORD_SAMPLE)
 		return process_sample_event(machine, evlist, event, state);
 
-	if (event->header.type < PERF_RECORD_MAX)
-		return machine__process_event(machine, event, NULL);
+	if (event->header.type == PERF_RECORD_THROTTLE ||
+	    event->header.type == PERF_RECORD_UNTHROTTLE)
+		return 0;
+
+	if (event->header.type < PERF_RECORD_MAX) {
+		int ret;
+
+		ret = machine__process_event(machine, event, NULL);
+		if (ret < 0)
+			pr_debug("machine__process_event failed, event type %u\n",
+				 event->header.type);
+		return ret;
+	}
 
 	return 0;
 }
@@ -441,7 +452,7 @@
 	}
 
 	ret = perf_event__synthesize_thread_map(NULL, threads,
-						perf_event__process, machine);
+						perf_event__process, machine, false);
 	if (ret < 0) {
 		pr_debug("perf_event__synthesize_thread_map failed\n");
 		goto out_err;
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 9b98c15..4774f7f 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -32,7 +32,7 @@
 
 int test__perf_evsel__tp_sched_test(void)
 {
-	struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
+	struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
 	int ret = 0;
 
 	if (evsel == NULL) {
@@ -63,7 +63,7 @@
 
 	perf_evsel__delete(evsel);
 
-	evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
+	evsel = perf_evsel__newtp("sched", "sched_wakeup");
 
 	if (perf_evsel__test_field(evsel, "comm", 16, true))
 		ret = -1;
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index a7232c2..d64ab79 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -65,7 +65,7 @@
 		char name[64];
 
 		snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
-		evsels[i] = perf_evsel__newtp("syscalls", name, i);
+		evsels[i] = perf_evsel__newtp("syscalls", name);
 		if (evsels[i] == NULL) {
 			pr_debug("perf_evsel__new\n");
 			goto out_free_evlist;
diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c
index b0657a9..5fecdbd 100644
--- a/tools/perf/tests/open-syscall-all-cpus.c
+++ b/tools/perf/tests/open-syscall-all-cpus.c
@@ -26,7 +26,7 @@
 
 	CPU_ZERO(&cpu_set);
 
-	evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
+	evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
 	if (evsel == NULL) {
 		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
 		goto out_thread_map_delete;
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c
index 524b221..41cc0ba 100644
--- a/tools/perf/tests/open-syscall-tp-fields.c
+++ b/tools/perf/tests/open-syscall-tp-fields.c
@@ -27,7 +27,7 @@
 		goto out;
 	}
 
-	evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
+	evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
 	if (evsel == NULL) {
 		pr_debug("%s: perf_evsel__newtp\n", __func__);
 		goto out_delete_evlist;
diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c
index befc067..c1dc7d2 100644
--- a/tools/perf/tests/open-syscall.c
+++ b/tools/perf/tests/open-syscall.c
@@ -15,7 +15,7 @@
 		return -1;
 	}
 
-	evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
+	evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
 	if (evsel == NULL) {
 		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
 		goto out_thread_map_delete;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index ef671cd..3cbd104 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -441,9 +441,8 @@
 
 static int test__checkevent_pmu_events(struct perf_evlist *evlist)
 {
-	struct perf_evsel *evsel;
+	struct perf_evsel *evsel = perf_evlist__first(evlist);
 
-	evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
 	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
 	TEST_ASSERT_VAL("wrong exclude_user",
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 6e2b44e..6664a7c 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -9,7 +9,7 @@
 #include "util/cpumap.h"
 #include "util/thread_map.h"
 
-#define NR_LOOPS  1000000
+#define NR_LOOPS  10000000
 
 /*
  * This test will open software clock events (cpu-clock, task-clock)
@@ -34,7 +34,7 @@
 		.freq = 1,
 	};
 
-	attr.sample_freq = 10000;
+	attr.sample_freq = 500;
 
 	evlist = perf_evlist__new();
 	if (evlist == NULL) {
@@ -42,7 +42,7 @@
 		return -1;
 	}
 
-	evsel = perf_evsel__new(&attr, 0);
+	evsel = perf_evsel__new(&attr);
 	if (evsel == NULL) {
 		pr_debug("perf_evsel__new\n");
 		goto out_free_evlist;
@@ -57,7 +57,14 @@
 		goto out_delete_maps;
 	}
 
-	perf_evlist__open(evlist);
+	if (perf_evlist__open(evlist)) {
+		const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
+
+		err = -errno;
+		pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
+			 strerror(errno), knob, (u64)attr.sample_freq);
+		goto out_delete_maps;
+	}
 
 	err = perf_evlist__mmap(evlist, 128, true);
 	if (err < 0) {
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index c33d95f..d09ab57 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -28,7 +28,7 @@
 	union perf_event *event;
 	struct perf_evsel *evsel;
 	struct perf_evlist *evlist;
-	struct perf_target target = {
+	struct target target = {
 		.uid		= UINT_MAX,
 		.uses_mmap	= true,
 	};
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index bbc782e..cbaa7af 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -569,7 +569,7 @@
 		browser->top = browser->top + browser->top_idx + offset;
 		break;
 	case SEEK_END:
-		browser->top = browser->top + browser->nr_entries + offset;
+		browser->top = browser->top + browser->nr_entries - 1 + offset;
 		break;
 	default:
 		return;
@@ -680,7 +680,7 @@
 	if (end >= browser->top_idx + browser->height)
 		end_row = browser->height - 1;
 	else
-		end_row = end - browser->top_idx;;
+		end_row = end - browser->top_idx;
 
 	ui_browser__gotorc(browser, row, column);
 	SLsmg_draw_vline(end_row - row + 1);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 16848bb..a440e03 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1847,15 +1847,15 @@
 			switch (key) {
 			case K_TAB:
 				if (pos->node.next == &evlist->entries)
-					pos = list_entry(evlist->entries.next, struct perf_evsel, node);
+					pos = perf_evlist__first(evlist);
 				else
-					pos = list_entry(pos->node.next, struct perf_evsel, node);
+					pos = perf_evsel__next(pos);
 				goto browse_hists;
 			case K_UNTAB:
 				if (pos->node.prev == &evlist->entries)
-					pos = list_entry(evlist->entries.prev, struct perf_evsel, node);
+					pos = perf_evlist__last(evlist);
 				else
-					pos = list_entry(pos->node.prev, struct perf_evsel, node);
+					pos = perf_evsel__prev(pos);
 				goto browse_hists;
 			case K_ESC:
 				if (!ui_browser__dialog_yesno(&menu->b,
@@ -1943,8 +1943,7 @@
 
 single_entry:
 	if (nr_entries == 1) {
-		struct perf_evsel *first = list_entry(evlist->entries.next,
-						      struct perf_evsel, node);
+		struct perf_evsel *first = perf_evlist__first(evlist);
 		const char *ev_name = perf_evsel__name(first);
 
 		return perf_evsel__hists_browse(first, nr_entries, help,
diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c
index 3e2d936..c61d14b 100644
--- a/tools/perf/ui/tui/progress.c
+++ b/tools/perf/ui/tui/progress.c
@@ -18,13 +18,14 @@
 	if (p->total == 0)
 		return;
 
-	ui__refresh_dimensions(true);
+	ui__refresh_dimensions(false);
 	pthread_mutex_lock(&ui__lock);
 	y = SLtt_Screen_Rows / 2 - 2;
 	SLsmg_set_color(0);
 	SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
 	SLsmg_gotorc(y++, 1);
 	SLsmg_write_string((char *)p->title);
+	SLsmg_fill_region(y, 1, 1, SLtt_Screen_Cols - 2, ' ');
 	SLsmg_set_color(HE_COLORSET_SELECTED);
 	bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total;
 	SLsmg_fill_region(y, 1, 1, bar, ' ');
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index ec9ae11..bb788c1 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -170,7 +170,8 @@
 					      union perf_event *event,
 					      pid_t pid, pid_t tgid,
 					      perf_event__handler_t process,
-					      struct machine *machine)
+					      struct machine *machine,
+					      bool mmap_data)
 {
 	char filename[PATH_MAX];
 	FILE *fp;
@@ -188,10 +189,6 @@
 	}
 
 	event->header.type = PERF_RECORD_MMAP;
-	/*
-	 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
-	 */
-	event->header.misc = PERF_RECORD_MISC_USER;
 
 	while (1) {
 		char bf[BUFSIZ];
@@ -212,12 +209,22 @@
 		       &event->mmap.start, &event->mmap.len, prot,
 		       &event->mmap.pgoff,
 		       execname);
-
-		if (n != 5)
+		/*
+ 		 * Anon maps don't have the execname.
+ 		 */
+		if (n < 4)
 			continue;
+		/*
+		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
+		 */
+		event->header.misc = PERF_RECORD_MISC_USER;
 
-		if (prot[2] != 'x')
-			continue;
+		if (prot[2] != 'x') {
+			if (!mmap_data || prot[0] != 'r')
+				continue;
+
+			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
+		}
 
 		if (!strcmp(execname, ""))
 			strcpy(execname, anonstr);
@@ -304,20 +311,21 @@
 				      pid_t pid, int full,
 					  perf_event__handler_t process,
 				      struct perf_tool *tool,
-				      struct machine *machine)
+				      struct machine *machine, bool mmap_data)
 {
 	pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
 						 process, machine);
 	if (tgid == -1)
 		return -1;
 	return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
-						  process, machine);
+						  process, machine, mmap_data);
 }
 
 int perf_event__synthesize_thread_map(struct perf_tool *tool,
 				      struct thread_map *threads,
 				      perf_event__handler_t process,
-				      struct machine *machine)
+				      struct machine *machine,
+				      bool mmap_data)
 {
 	union perf_event *comm_event, *mmap_event;
 	int err = -1, thread, j;
@@ -334,7 +342,8 @@
 	for (thread = 0; thread < threads->nr; ++thread) {
 		if (__event__synthesize_thread(comm_event, mmap_event,
 					       threads->map[thread], 0,
-					       process, tool, machine)) {
+					       process, tool, machine,
+					       mmap_data)) {
 			err = -1;
 			break;
 		}
@@ -356,10 +365,10 @@
 
 			/* if not, generate events for it */
 			if (need_leader &&
-			    __event__synthesize_thread(comm_event,
-						      mmap_event,
-						      comm_event->comm.pid, 0,
-						      process, tool, machine)) {
+			    __event__synthesize_thread(comm_event, mmap_event,
+						       comm_event->comm.pid, 0,
+						       process, tool, machine,
+						       mmap_data)) {
 				err = -1;
 				break;
 			}
@@ -374,7 +383,7 @@
 
 int perf_event__synthesize_threads(struct perf_tool *tool,
 				   perf_event__handler_t process,
-				   struct machine *machine)
+				   struct machine *machine, bool mmap_data)
 {
 	DIR *proc;
 	struct dirent dirent, *next;
@@ -404,7 +413,7 @@
  		 * one thread couldn't be synthesized.
  		 */
 		__event__synthesize_thread(comm_event, mmap_event, pid, 1,
-					   process, tool, machine);
+					   process, tool, machine, mmap_data);
 	}
 
 	err = 0;
@@ -528,19 +537,22 @@
 
 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
 {
-	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
+	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
 		       event->mmap.pid, event->mmap.tid, event->mmap.start,
-		       event->mmap.len, event->mmap.pgoff, event->mmap.filename);
+		       event->mmap.len, event->mmap.pgoff,
+		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
+		       event->mmap.filename);
 }
 
 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
 {
 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
-			   " %02x:%02x %"PRIu64" %"PRIu64"]: %s\n",
+			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n",
 		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
 		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
 		       event->mmap2.min, event->mmap2.ino,
 		       event->mmap2.ino_generation,
+		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
 		       event->mmap2.filename);
 }
 
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index f8d70f3..30fec99 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -208,10 +208,10 @@
 int perf_event__synthesize_thread_map(struct perf_tool *tool,
 				      struct thread_map *threads,
 				      perf_event__handler_t process,
-				      struct machine *machine);
+				      struct machine *machine, bool mmap_data);
 int perf_event__synthesize_threads(struct perf_tool *tool,
 				   perf_event__handler_t process,
-				   struct machine *machine);
+				   struct machine *machine, bool mmap_data);
 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 				       perf_event__handler_t process,
 				       struct machine *machine,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index b939221..bbc746a 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -117,6 +117,8 @@
 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 {
 	list_add_tail(&entry->node, &evlist->entries);
+	entry->idx = evlist->nr_entries;
+
 	if (!evlist->nr_entries++)
 		perf_evlist__set_id_pos(evlist);
 }
@@ -165,7 +167,7 @@
 
 	event_attr_init(&attr);
 
-	evsel = perf_evsel__new(&attr, 0);
+	evsel = perf_evsel__new(&attr);
 	if (evsel == NULL)
 		goto error;
 
@@ -190,7 +192,7 @@
 	size_t i;
 
 	for (i = 0; i < nr_attrs; i++) {
-		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
+		evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
 		if (evsel == NULL)
 			goto out_delete_partial_list;
 		list_add_tail(&evsel->node, &head);
@@ -249,9 +251,8 @@
 int perf_evlist__add_newtp(struct perf_evlist *evlist,
 			   const char *sys, const char *name, void *handler)
 {
-	struct perf_evsel *evsel;
+	struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
 
-	evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
 	if (evsel == NULL)
 		return -1;
 
@@ -704,12 +705,10 @@
 	return (pages + 1) * page_size;
 }
 
-int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
-				  int unset __maybe_unused)
+static long parse_pages_arg(const char *str, unsigned long min,
+			    unsigned long max)
 {
-	unsigned int *mmap_pages = opt->value;
 	unsigned long pages, val;
-	size_t size;
 	static struct parse_tag tags[] = {
 		{ .tag  = 'B', .mult = 1       },
 		{ .tag  = 'K', .mult = 1 << 10 },
@@ -718,33 +717,49 @@
 		{ .tag  = 0 },
 	};
 
+	if (str == NULL)
+		return -EINVAL;
+
 	val = parse_tag_value(str, tags);
 	if (val != (unsigned long) -1) {
 		/* we got file size value */
 		pages = PERF_ALIGN(val, page_size) / page_size;
-		if (pages < (1UL << 31) && !is_power_of_2(pages)) {
-			pages = next_pow2(pages);
-			pr_info("rounding mmap pages size to %lu (%lu pages)\n",
-				pages * page_size, pages);
-		}
 	} else {
 		/* we got pages count value */
 		char *eptr;
 		pages = strtoul(str, &eptr, 10);
-		if (*eptr != '\0') {
-			pr_err("failed to parse --mmap_pages/-m value\n");
-			return -1;
-		}
+		if (*eptr != '\0')
+			return -EINVAL;
 	}
 
-	if (pages > UINT_MAX || pages > SIZE_MAX / page_size) {
-		pr_err("--mmap_pages/-m value too big\n");
-		return -1;
+	if ((pages == 0) && (min == 0)) {
+		/* leave number of pages at 0 */
+	} else if (pages < (1UL << 31) && !is_power_of_2(pages)) {
+		/* round pages up to next power of 2 */
+		pages = next_pow2(pages);
+		pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
+			pages * page_size, pages);
 	}
 
-	size = perf_evlist__mmap_size(pages);
-	if (!size) {
-		pr_err("--mmap_pages/-m value must be a power of two.");
+	if (pages > max)
+		return -EINVAL;
+
+	return pages;
+}
+
+int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
+				  int unset __maybe_unused)
+{
+	unsigned int *mmap_pages = opt->value;
+	unsigned long max = UINT_MAX;
+	long pages;
+
+	if (max < SIZE_MAX / page_size)
+		max = SIZE_MAX / page_size;
+
+	pages = parse_pages_arg(str, 1, max);
+	if (pages < 0) {
+		pr_err("Invalid argument for --mmap_pages/-m\n");
 		return -1;
 	}
 
@@ -796,8 +811,7 @@
 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
 }
 
-int perf_evlist__create_maps(struct perf_evlist *evlist,
-			     struct perf_target *target)
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
 {
 	evlist->threads = thread_map__new_str(target->pid, target->tid,
 					      target->uid);
@@ -805,9 +819,11 @@
 	if (evlist->threads == NULL)
 		return -1;
 
-	if (perf_target__has_task(target))
+	if (target->force_per_cpu)
+		evlist->cpus = cpu_map__new(target->cpu_list);
+	else if (target__has_task(target))
 		evlist->cpus = cpu_map__dummy_new();
-	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
+	else if (!target__has_cpu(target) && !target->uses_mmap)
 		evlist->cpus = cpu_map__dummy_new();
 	else
 		evlist->cpus = cpu_map__new(target->cpu_list);
@@ -1016,8 +1032,7 @@
 	return err;
 }
 
-int perf_evlist__prepare_workload(struct perf_evlist *evlist,
-				  struct perf_target *target,
+int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
 				  const char *argv[], bool pipe_output,
 				  bool want_signal)
 {
@@ -1069,7 +1084,7 @@
 		exit(-1);
 	}
 
-	if (perf_target__none(target))
+	if (target__none(target))
 		evlist->threads->map[0] = evlist->workload.pid;
 
 	close(child_ready_pipe[1]);
@@ -1135,7 +1150,7 @@
 				   perf_evsel__name(evsel));
 	}
 
-	return printed + fprintf(fp, "\n");;
+	return printed + fprintf(fp, "\n");
 }
 
 int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index ecaa582..649d6ea 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -102,7 +102,7 @@
 int perf_record_opts__config(struct perf_record_opts *opts);
 
 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
-				  struct perf_target *target,
+				  struct target *target,
 				  const char *argv[], bool pipe_output,
 				  bool want_signal);
 int perf_evlist__start_workload(struct perf_evlist *evlist);
@@ -134,8 +134,7 @@
 	evlist->threads	= threads;
 }
 
-int perf_evlist__create_maps(struct perf_evlist *evlist,
-			     struct perf_target *target);
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
 void perf_evlist__delete_maps(struct perf_evlist *evlist);
 int perf_evlist__apply_filters(struct perf_evlist *evlist);
 
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 5280820..46dd4c2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -168,7 +168,7 @@
 	perf_evsel__calc_id_pos(evsel);
 }
 
-struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
+struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
 {
 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
 
@@ -219,7 +219,7 @@
 	return format;
 }
 
-struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
+struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
 {
 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
 
@@ -645,7 +645,7 @@
 		}
 	}
 
-	if (perf_target__has_cpu(&opts->target))
+	if (target__has_cpu(&opts->target) || opts->target.force_per_cpu)
 		perf_evsel__set_sample_bit(evsel, CPU);
 
 	if (opts->period)
@@ -653,7 +653,7 @@
 
 	if (!perf_missing_features.sample_id_all &&
 	    (opts->sample_time || !opts->no_inherit ||
-	     perf_target__has_cpu(&opts->target)))
+	     target__has_cpu(&opts->target) || opts->target.force_per_cpu))
 		perf_evsel__set_sample_bit(evsel, TIME);
 
 	if (opts->raw_samples) {
@@ -696,7 +696,7 @@
 	 * Setting enable_on_exec for independent events and
 	 * group leaders for traced executed by perf.
 	 */
-	if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
+	if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
 		attr->enable_on_exec = 1;
 }
 
@@ -2006,8 +2006,7 @@
 	return false;
 }
 
-int perf_evsel__open_strerror(struct perf_evsel *evsel,
-			      struct perf_target *target,
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
 			      int err, char *msg, size_t size)
 {
 	switch (err) {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 64ec8e1..1ea7c92 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -96,8 +96,19 @@
 struct perf_evlist;
 struct perf_record_opts;
 
-struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
-struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx);
+struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
+
+static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
+{
+	return perf_evsel__new_idx(attr, 0);
+}
+
+struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
+
+static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
+{
+	return perf_evsel__newtp_idx(sys, name, 0);
+}
 
 struct event_format *event_format__new(const char *sys, const char *name);
 
@@ -268,6 +279,11 @@
 	return list_entry(evsel->node.next, struct perf_evsel, node);
 }
 
+static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
+{
+	return list_entry(evsel->node.prev, struct perf_evsel, node);
+}
+
 /**
  * perf_evsel__is_group_leader - Return whether given evsel is a leader event
  *
@@ -307,8 +323,7 @@
 
 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
 			  char *msg, size_t msgsize);
-int perf_evsel__open_strerror(struct perf_evsel *evsel,
-			      struct perf_target *target,
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
 			      int err, char *msg, size_t size);
 
 static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 26d9520..369c036 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2797,7 +2797,7 @@
 			perf_event__attr_swap(&f_attr.attr);
 
 		tmp = lseek(fd, 0, SEEK_CUR);
-		evsel = perf_evsel__new(&f_attr.attr, i);
+		evsel = perf_evsel__new(&f_attr.attr);
 
 		if (evsel == NULL)
 			goto out_delete_evlist;
@@ -2916,7 +2916,7 @@
 			return -ENOMEM;
 	}
 
-	evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries);
+	evsel = perf_evsel__new(&event->attr.attr);
 	if (evsel == NULL)
 		return -ENOMEM;
 
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index ce034c1..84cdb07 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1368,7 +1368,7 @@
 
 	return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
 				   thread, evsel->attr.sample_regs_user,
-				   sample);
+				   sample, max_stack);
 
 }
 
@@ -1394,3 +1394,15 @@
 	}
 	return rc;
 }
+
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+				  struct target *target, struct thread_map *threads,
+				  perf_event__handler_t process, bool data_mmap)
+{
+	if (target__has_task(target))
+		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
+	else if (target__has_cpu(target))
+		return perf_event__synthesize_threads(tool, process, machine, data_mmap);
+	/* command specified */
+	return 0;
+}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 2389ba8..4771330 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -4,6 +4,7 @@
 #include <sys/types.h>
 #include <linux/rbtree.h>
 #include "map.h"
+#include "event.h"
 
 struct addr_location;
 struct branch_stack;
@@ -178,4 +179,15 @@
 			     int (*fn)(struct thread *thread, void *p),
 			     void *priv);
 
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+				  struct target *target, struct thread_map *threads,
+				  perf_event__handler_t process, bool data_mmap);
+static inline
+int machine__synthesize_threads(struct machine *machine, struct target *target,
+				struct thread_map *threads, bool data_mmap)
+{
+	return __machine__synthesize_threads(machine, NULL, target, threads,
+					     perf_event__process, data_mmap);
+}
+
 #endif /* __PERF_MACHINE_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c90e55c..6de6f89 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -277,7 +277,7 @@
 
 	event_attr_init(attr);
 
-	evsel = perf_evsel__new(attr, (*idx)++);
+	evsel = perf_evsel__new_idx(attr, (*idx)++);
 	if (!evsel)
 		return -ENOMEM;
 
@@ -378,7 +378,7 @@
 {
 	struct perf_evsel *evsel;
 
-	evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++);
+	evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
 	if (!evsel)
 		return -ENOMEM;
 
@@ -1097,7 +1097,7 @@
 		.threads = { 0 },
 	};
 
-	evsel = perf_evsel__new(&attr, 0);
+	evsel = perf_evsel__new(&attr);
 	if (evsel) {
 		ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
 		perf_evsel__delete(evsel);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 3c1b75c..8b0bb1f 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1137,6 +1137,8 @@
 
 void sort__setup_elide(FILE *output)
 {
+	struct sort_entry *se;
+
 	sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
 				"dso", output);
 	sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list,
@@ -1172,4 +1174,15 @@
 					"snoop", output);
 	}
 
+	/*
+	 * It makes no sense to elide all of sort entries.
+	 * Just revert them to show up again.
+	 */
+	list_for_each_entry(se, &hist_entry__sort_list, list) {
+		if (!se->elide)
+			return;
+	}
+
+	list_for_each_entry(se, &hist_entry__sort_list, list)
+		se->elide = false;
 }
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 065528b..3c778a0 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -13,9 +13,9 @@
 #include <string.h>
 
 
-enum perf_target_errno perf_target__validate(struct perf_target *target)
+enum target_errno target__validate(struct target *target)
 {
-	enum perf_target_errno ret = PERF_ERRNO_TARGET__SUCCESS;
+	enum target_errno ret = TARGET_ERRNO__SUCCESS;
 
 	if (target->pid)
 		target->tid = target->pid;
@@ -23,42 +23,42 @@
 	/* CPU and PID are mutually exclusive */
 	if (target->tid && target->cpu_list) {
 		target->cpu_list = NULL;
-		if (ret == PERF_ERRNO_TARGET__SUCCESS)
-			ret = PERF_ERRNO_TARGET__PID_OVERRIDE_CPU;
+		if (ret == TARGET_ERRNO__SUCCESS)
+			ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
 	}
 
 	/* UID and PID are mutually exclusive */
 	if (target->tid && target->uid_str) {
 		target->uid_str = NULL;
-		if (ret == PERF_ERRNO_TARGET__SUCCESS)
-			ret = PERF_ERRNO_TARGET__PID_OVERRIDE_UID;
+		if (ret == TARGET_ERRNO__SUCCESS)
+			ret = TARGET_ERRNO__PID_OVERRIDE_UID;
 	}
 
 	/* UID and CPU are mutually exclusive */
 	if (target->uid_str && target->cpu_list) {
 		target->cpu_list = NULL;
-		if (ret == PERF_ERRNO_TARGET__SUCCESS)
-			ret = PERF_ERRNO_TARGET__UID_OVERRIDE_CPU;
+		if (ret == TARGET_ERRNO__SUCCESS)
+			ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
 	}
 
 	/* PID and SYSTEM are mutually exclusive */
 	if (target->tid && target->system_wide) {
 		target->system_wide = false;
-		if (ret == PERF_ERRNO_TARGET__SUCCESS)
-			ret = PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM;
+		if (ret == TARGET_ERRNO__SUCCESS)
+			ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
 	}
 
 	/* UID and SYSTEM are mutually exclusive */
 	if (target->uid_str && target->system_wide) {
 		target->system_wide = false;
-		if (ret == PERF_ERRNO_TARGET__SUCCESS)
-			ret = PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM;
+		if (ret == TARGET_ERRNO__SUCCESS)
+			ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
 	}
 
 	return ret;
 }
 
-enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
+enum target_errno target__parse_uid(struct target *target)
 {
 	struct passwd pwd, *result;
 	char buf[1024];
@@ -66,7 +66,7 @@
 
 	target->uid = UINT_MAX;
 	if (str == NULL)
-		return PERF_ERRNO_TARGET__SUCCESS;
+		return TARGET_ERRNO__SUCCESS;
 
 	/* Try user name first */
 	getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
@@ -79,22 +79,22 @@
 		int uid = strtol(str, &endptr, 10);
 
 		if (*endptr != '\0')
-			return PERF_ERRNO_TARGET__INVALID_UID;
+			return TARGET_ERRNO__INVALID_UID;
 
 		getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
 
 		if (result == NULL)
-			return PERF_ERRNO_TARGET__USER_NOT_FOUND;
+			return TARGET_ERRNO__USER_NOT_FOUND;
 	}
 
 	target->uid = result->pw_uid;
-	return PERF_ERRNO_TARGET__SUCCESS;
+	return TARGET_ERRNO__SUCCESS;
 }
 
 /*
- * This must have a same ordering as the enum perf_target_errno.
+ * This must have a same ordering as the enum target_errno.
  */
-static const char *perf_target__error_str[] = {
+static const char *target__error_str[] = {
 	"PID/TID switch overriding CPU",
 	"PID/TID switch overriding UID",
 	"UID switch overriding CPU",
@@ -104,7 +104,7 @@
 	"Problems obtaining information for user %s",
 };
 
-int perf_target__strerror(struct perf_target *target, int errnum,
+int target__strerror(struct target *target, int errnum,
 			  char *buf, size_t buflen)
 {
 	int idx;
@@ -124,21 +124,19 @@
 		return 0;
 	}
 
-	if (errnum <  __PERF_ERRNO_TARGET__START ||
-	    errnum >= __PERF_ERRNO_TARGET__END)
+	if (errnum <  __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END)
 		return -1;
 
-	idx = errnum - __PERF_ERRNO_TARGET__START;
-	msg = perf_target__error_str[idx];
+	idx = errnum - __TARGET_ERRNO__START;
+	msg = target__error_str[idx];
 
 	switch (errnum) {
-	case PERF_ERRNO_TARGET__PID_OVERRIDE_CPU
-	 ... PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM:
+	case TARGET_ERRNO__PID_OVERRIDE_CPU ... TARGET_ERRNO__UID_OVERRIDE_SYSTEM:
 		snprintf(buf, buflen, "%s", msg);
 		break;
 
-	case PERF_ERRNO_TARGET__INVALID_UID:
-	case PERF_ERRNO_TARGET__USER_NOT_FOUND:
+	case TARGET_ERRNO__INVALID_UID:
+	case TARGET_ERRNO__USER_NOT_FOUND:
 		snprintf(buf, buflen, msg, target->uid_str);
 		break;
 
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index a4be857..2d0c506 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -4,7 +4,7 @@
 #include <stdbool.h>
 #include <sys/types.h>
 
-struct perf_target {
+struct target {
 	const char   *pid;
 	const char   *tid;
 	const char   *cpu_list;
@@ -12,10 +12,11 @@
 	uid_t	     uid;
 	bool	     system_wide;
 	bool	     uses_mmap;
+	bool	     force_per_cpu;
 };
 
-enum perf_target_errno {
-	PERF_ERRNO_TARGET__SUCCESS		= 0,
+enum target_errno {
+	TARGET_ERRNO__SUCCESS		= 0,
 
 	/*
 	 * Choose an arbitrary negative big number not to clash with standard
@@ -24,42 +25,40 @@
 	 *
 	 * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
 	 */
-	__PERF_ERRNO_TARGET__START		= -10000,
+	__TARGET_ERRNO__START		= -10000,
 
+	/* for target__validate() */
+	TARGET_ERRNO__PID_OVERRIDE_CPU	= __TARGET_ERRNO__START,
+	TARGET_ERRNO__PID_OVERRIDE_UID,
+	TARGET_ERRNO__UID_OVERRIDE_CPU,
+	TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
+	TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
 
-	/* for perf_target__validate() */
-	PERF_ERRNO_TARGET__PID_OVERRIDE_CPU	= __PERF_ERRNO_TARGET__START,
-	PERF_ERRNO_TARGET__PID_OVERRIDE_UID,
-	PERF_ERRNO_TARGET__UID_OVERRIDE_CPU,
-	PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM,
-	PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM,
+	/* for target__parse_uid() */
+	TARGET_ERRNO__INVALID_UID,
+	TARGET_ERRNO__USER_NOT_FOUND,
 
-	/* for perf_target__parse_uid() */
-	PERF_ERRNO_TARGET__INVALID_UID,
-	PERF_ERRNO_TARGET__USER_NOT_FOUND,
-
-	__PERF_ERRNO_TARGET__END,
+	__TARGET_ERRNO__END,
 };
 
-enum perf_target_errno perf_target__validate(struct perf_target *target);
-enum perf_target_errno perf_target__parse_uid(struct perf_target *target);
+enum target_errno target__validate(struct target *target);
+enum target_errno target__parse_uid(struct target *target);
 
-int perf_target__strerror(struct perf_target *target, int errnum, char *buf,
-			  size_t buflen);
+int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
 
-static inline bool perf_target__has_task(struct perf_target *target)
+static inline bool target__has_task(struct target *target)
 {
 	return target->tid || target->pid || target->uid_str;
 }
 
-static inline bool perf_target__has_cpu(struct perf_target *target)
+static inline bool target__has_cpu(struct target *target)
 {
 	return target->system_wide || target->cpu_list;
 }
 
-static inline bool perf_target__none(struct perf_target *target)
+static inline bool target__none(struct target *target)
 {
-	return !perf_target__has_task(target) && !perf_target__has_cpu(target);
+	return !target__has_task(target) && !target__has_cpu(target);
 }
 
 #endif /* _PERF_TARGET_H */
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index f857b51..ce793c7 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -27,7 +27,7 @@
 	float ksamples_per_sec;
 	float esamples_percent;
 	struct perf_record_opts *opts = &top->record_opts;
-	struct perf_target *target = &opts->target;
+	struct target *target = &opts->target;
 	size_t ret = 0;
 
 	if (top->samples) {
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c
index 2f891f7..0efd539 100644
--- a/tools/perf/util/unwind.c
+++ b/tools/perf/util/unwind.c
@@ -39,6 +39,15 @@
 
 #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
 
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+				 unw_word_t ip,
+				 unw_word_t segbase,
+				 const char *obj_name, unw_word_t start,
+				 unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
 #define DW_EH_PE_FORMAT_MASK	0x0f	/* format of the encoded value */
 #define DW_EH_PE_APPL_MASK	0x70	/* how the value is to be applied */
 
@@ -245,8 +254,9 @@
 	return 0;
 }
 
-static int read_unwind_spec(struct dso *dso, struct machine *machine,
-			    u64 *table_data, u64 *segbase, u64 *fde_count)
+static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+				     u64 *table_data, u64 *segbase,
+				     u64 *fde_count)
 {
 	int ret = -EINVAL, fd;
 	u64 offset;
@@ -255,6 +265,7 @@
 	if (fd < 0)
 		return -EINVAL;
 
+	/* Check the .eh_frame section for unwinding info */
 	offset = elf_section_offset(fd, ".eh_frame_hdr");
 	close(fd);
 
@@ -263,10 +274,29 @@
 					  table_data, segbase,
 					  fde_count);
 
-	/* TODO .debug_frame check if eh_frame_hdr fails */
 	return ret;
 }
 
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int read_unwind_spec_debug_frame(struct dso *dso,
+					struct machine *machine, u64 *offset)
+{
+	int fd = dso__data_fd(dso, machine);
+
+	if (fd < 0)
+		return -EINVAL;
+
+	/* Check the .debug_frame section for unwinding info */
+	*offset = elf_section_offset(fd, ".debug_frame");
+	close(fd);
+
+	if (*offset)
+		return 0;
+
+	return -EINVAL;
+}
+#endif
+
 static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
 {
 	struct addr_location al;
@@ -291,20 +321,33 @@
 
 	pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
 
-	if (read_unwind_spec(map->dso, ui->machine,
-			     &table_data, &segbase, &fde_count))
-		return -EINVAL;
+	/* Check the .eh_frame section for unwinding info */
+	if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+				       &table_data, &segbase, &fde_count)) {
+		memset(&di, 0, sizeof(di));
+		di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
+		di.start_ip = map->start;
+		di.end_ip   = map->end;
+		di.u.rti.segbase    = map->start + segbase;
+		di.u.rti.table_data = map->start + table_data;
+		di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
+				      / sizeof(unw_word_t);
+		return dwarf_search_unwind_table(as, ip, &di, pi,
+						 need_unwind_info, arg);
+	}
 
-	memset(&di, 0, sizeof(di));
-	di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
-	di.start_ip = map->start;
-	di.end_ip   = map->end;
-	di.u.rti.segbase    = map->start + segbase;
-	di.u.rti.table_data = map->start + table_data;
-	di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
-			      / sizeof(unw_word_t);
-	return dwarf_search_unwind_table(as, ip, &di, pi,
-					 need_unwind_info, arg);
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+	/* Check the .debug_frame section for unwinding info */
+	if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+		memset(&di, 0, sizeof(di));
+		dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+				       map->start, map->end);
+		return dwarf_search_unwind_table(as, ip, &di, pi,
+						 need_unwind_info, arg);
+	}
+#endif
+
+	return -EINVAL;
 }
 
 static int access_fpreg(unw_addr_space_t __maybe_unused as,
@@ -516,7 +559,7 @@
 };
 
 static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
-		       void *arg)
+		       void *arg, int max_stack)
 {
 	unw_addr_space_t addr_space;
 	unw_cursor_t c;
@@ -532,7 +575,7 @@
 	if (ret)
 		display_error(ret);
 
-	while (!ret && (unw_step(&c) > 0)) {
+	while (!ret && (unw_step(&c) > 0) && max_stack--) {
 		unw_word_t ip;
 
 		unw_get_reg(&c, UNW_REG_IP, &ip);
@@ -545,7 +588,8 @@
 
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
 			struct machine *machine, struct thread *thread,
-			u64 sample_uregs, struct perf_sample *data)
+			u64 sample_uregs, struct perf_sample *data,
+			int max_stack)
 {
 	unw_word_t ip;
 	struct unwind_info ui = {
@@ -567,5 +611,5 @@
 	if (ret)
 		return -ENOMEM;
 
-	return get_entries(&ui, cb, arg);
+	return get_entries(&ui, cb, arg, max_stack);
 }
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
index ec0c71a..d5966f49 100644
--- a/tools/perf/util/unwind.h
+++ b/tools/perf/util/unwind.h
@@ -18,7 +18,7 @@
 			struct machine *machine,
 			struct thread *thread,
 			u64 sample_uregs,
-			struct perf_sample *data);
+			struct perf_sample *data, int max_stack);
 int unwind__arch_reg_id(int regnum);
 #else
 static inline int
@@ -27,7 +27,8 @@
 		    struct machine *machine __maybe_unused,
 		    struct thread *thread __maybe_unused,
 		    u64 sample_uregs __maybe_unused,
-		    struct perf_sample *data __maybe_unused)
+		    struct perf_sample *data __maybe_unused,
+		    int max_stack __maybe_unused)
 {
 	return 0;
 }
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
new file mode 100644
index 0000000..4473211
--- /dev/null
+++ b/tools/thermal/tmon/Makefile
@@ -0,0 +1,47 @@
+VERSION = 1.0
+
+BINDIR=usr/bin
+WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
+CFLAGS= -O1 ${WARNFLAGS} -fstack-protector
+CC=gcc
+
+CFLAGS+=-D VERSION=\"$(VERSION)\"
+LDFLAGS+=
+TARGET=tmon
+
+INSTALL_PROGRAM=install -m 755 -p
+DEL_FILE=rm -f
+
+INSTALL_CONFIGFILE=install -m 644 -p
+CONFIG_FILE=
+CONFIG_PATH=
+
+
+OBJS = tmon.o tui.o sysfs.o pid.o
+OBJS +=
+
+tmon: $(OBJS) Makefile tmon.h
+	$(CC) ${CFLAGS} $(LDFLAGS) $(OBJS)  -o $(TARGET) -lm -lpanel -lncursesw  -lpthread
+
+valgrind: tmon
+	 sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET)  1> /dev/null
+
+install:
+	- mkdir -p $(INSTALL_ROOT)/$(BINDIR)
+	- $(INSTALL_PROGRAM) "$(TARGET)" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
+	- mkdir -p $(INSTALL_ROOT)/$(CONFIG_PATH)
+	- $(INSTALL_CONFIGFILE) "$(CONFIG_FILE)" "$(INSTALL_ROOT)/$(CONFIG_PATH)"
+
+uninstall:
+	$(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
+	$(CONFIG_FILE) "$(CONFIG_PATH)"
+
+
+clean:
+	find . -name "*.o" | xargs $(DEL_FILE)
+	rm -f $(TARGET)
+
+dist:
+	git tag v$(VERSION)
+	git archive --format=tar --prefix="$(TARGET)-$(VERSION)/" v$(VERSION) | \
+		gzip > $(TARGET)-$(VERSION).tar.gz
diff --git a/tools/thermal/tmon/README b/tools/thermal/tmon/README
new file mode 100644
index 0000000..4579498
--- /dev/null
+++ b/tools/thermal/tmon/README
@@ -0,0 +1,50 @@
+TMON - A Monitoring and Testing Tool for Linux kernel thermal subsystem
+
+Why TMON?
+==========
+Increasingly, Linux is running on thermally constrained devices. The simple
+thermal relationship between processor and fan has become past for modern
+computers.
+
+As hardware vendors cope with the thermal constraints on their products, more
+and more sensors are added, new cooling capabilities are introduced. The
+complexity of the thermal relationship can grow exponentially among cooling
+devices, zones, sensors, and trip points. They can also change dynamically.
+
+To expose such relationship to the userspace, Linux generic thermal layer
+introduced sysfs entry at /sys/class/thermal with a matrix of symbolic
+links, trip point bindings, and device instances. To traverse such
+matrix by hand is not a trivial task. Testing is also difficult in that
+thermal conditions are often exception cases that hard to reach in
+normal operations.
+
+TMON is conceived as a tool to help visualize, tune, and test the
+complex thermal subsystem.
+
+Files
+=====
+	tmon.c : main function for set up and configurations.
+	tui.c : handles ncurses based user interface
+	sysfs.c : access to the generic thermal sysfs
+	pid.c : a proportional-integral-derivative (PID) controller
+	that can be used for thermal relationship training.
+
+Requirements
+============
+Depends on ncurses
+
+Build
+=========
+$ make
+$ sudo ./tmon -h
+Usage: tmon [OPTION...]
+  -c, --control         cooling device in control
+  -d, --daemon          run as daemon, no TUI
+  -l, --log             log data to /var/tmp/tmon.log
+  -h, --help            show this help message
+  -t, --time-interval   set time interval for sampling
+  -v, --version         show version
+  -g, --debug           debug message in syslog
+
+1. For monitoring only:
+$ sudo ./tmon
diff --git a/tools/thermal/tmon/pid.c b/tools/thermal/tmon/pid.c
new file mode 100644
index 0000000..fd7e9e9
--- /dev/null
+++ b/tools/thermal/tmon/pid.c
@@ -0,0 +1,131 @@
+/*
+ * pid.c PID controller for testing cooling devices
+ *
+ *
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 or later as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author Name Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <libintl.h>
+#include <ctype.h>
+#include <assert.h>
+#include <time.h>
+#include <limits.h>
+#include <math.h>
+#include <sys/stat.h>
+#include <syslog.h>
+
+#include "tmon.h"
+
+/**************************************************************************
+ * PID (Proportional-Integral-Derivative) controller is commonly used in
+ * linear control system, consider the the process.
+ * G(s) = U(s)/E(s)
+ * kp = proportional gain
+ * ki = integral gain
+ * kd = derivative gain
+ * Ts
+ * We use type C Alan Bradley equation which takes set point off the
+ * output dependency in P and D term.
+ *
+ *   y[k] = y[k-1] - kp*(x[k] - x[k-1]) + Ki*Ts*e[k] - Kd*(x[k]
+ *          - 2*x[k-1]+x[k-2])/Ts
+ *
+ *
+ ***********************************************************************/
+struct pid_params p_param;
+/* cached data from previous loop */
+static double xk_1, xk_2; /* input temperature x[k-#] */
+
+/*
+ * TODO: make PID parameters tuned automatically,
+ * 1. use CPU burn to produce open loop unit step response
+ * 2. calculate PID based on Ziegler-Nichols rule
+ *
+ * add a flag for tuning PID
+ */
+int init_thermal_controller(void)
+{
+	int ret = 0;
+
+	/* init pid params */
+	p_param.ts = ticktime;
+	/* TODO: get it from TUI tuning tab */
+	p_param.kp = .36;
+	p_param.ki = 5.0;
+	p_param.kd = 0.19;
+
+	p_param.t_target = target_temp_user;
+
+	return ret;
+}
+
+void controller_reset(void)
+{
+	/* TODO: relax control data when not over thermal limit */
+	syslog(LOG_DEBUG, "TC inactive, relax p-state\n");
+	p_param.y_k = 0.0;
+	xk_1 = 0.0;
+	xk_2 = 0.0;
+	set_ctrl_state(0);
+}
+
+/* To be called at time interval Ts. Type C PID controller.
+ *    y[k] = y[k-1] - kp*(x[k] - x[k-1]) + Ki*Ts*e[k] - Kd*(x[k]
+ *          - 2*x[k-1]+x[k-2])/Ts
+ * TODO: add low pass filter for D term
+ */
+#define GUARD_BAND (2)
+void controller_handler(const double xk, double *yk)
+{
+	double ek;
+	double p_term, i_term, d_term;
+
+	ek = p_param.t_target - xk; /* error */
+	if (ek >= 3.0) {
+		syslog(LOG_DEBUG, "PID: %3.1f Below set point %3.1f, stop\n",
+			xk, p_param.t_target);
+		controller_reset();
+		*yk = 0.0;
+		return;
+	}
+	/* compute intermediate PID terms */
+	p_term = -p_param.kp * (xk - xk_1);
+	i_term = p_param.kp * p_param.ki * p_param.ts * ek;
+	d_term = -p_param.kp * p_param.kd * (xk - 2 * xk_1 + xk_2) / p_param.ts;
+	/* compute output */
+	*yk += p_term + i_term + d_term;
+	/* update sample data */
+	xk_1 = xk;
+	xk_2 = xk_1;
+
+	/* clamp output adjustment range */
+	if (*yk < -LIMIT_HIGH)
+		*yk = -LIMIT_HIGH;
+	else if (*yk > -LIMIT_LOW)
+		*yk = -LIMIT_LOW;
+
+	p_param.y_k = *yk;
+
+	set_ctrl_state(lround(fabs(p_param.y_k)));
+
+}
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
new file mode 100644
index 0000000..dfe4548
--- /dev/null
+++ b/tools/thermal/tmon/sysfs.c
@@ -0,0 +1,596 @@
+/*
+ * sysfs.c sysfs ABI access functions for TMON program
+ *
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 or later as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ */
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <dirent.h>
+#include <libintl.h>
+#include <ctype.h>
+#include <time.h>
+#include <syslog.h>
+#include <sys/time.h>
+#include <errno.h>
+
+#include "tmon.h"
+
+struct tmon_platform_data ptdata;
+const char *trip_type_name[] = {
+	"critical",
+	"hot",
+	"passive",
+	"active",
+};
+
+int sysfs_set_ulong(char *path, char *filename, unsigned long val)
+{
+	FILE *fd;
+	int ret = -1;
+	char filepath[256];
+
+	snprintf(filepath, 256, "%s/%s", path, filename);
+
+	fd = fopen(filepath, "w");
+	if (!fd) {
+		syslog(LOG_ERR, "Err: open %s: %s\n", __func__, filepath);
+		return ret;
+	}
+	ret = fprintf(fd, "%lu", val);
+	fclose(fd);
+
+	return 0;
+}
+
+/* history of thermal data, used for control algo */
+#define NR_THERMAL_RECORDS 3
+struct thermal_data_record trec[NR_THERMAL_RECORDS];
+int cur_thermal_record; /* index to the trec array */
+
+static int sysfs_get_ulong(char *path, char *filename, unsigned long *p_ulong)
+{
+	FILE *fd;
+	int ret = -1;
+	char filepath[256];
+
+	snprintf(filepath, 256, "%s/%s", path, filename);
+
+	fd = fopen(filepath, "r");
+	if (!fd) {
+		syslog(LOG_ERR, "Err: open %s: %s\n", __func__, filepath);
+		return ret;
+	}
+	ret = fscanf(fd, "%lu", p_ulong);
+	fclose(fd);
+
+	return 0;
+}
+
+static int sysfs_get_string(char *path, char *filename, char *str)
+{
+	FILE *fd;
+	int ret = -1;
+	char filepath[256];
+
+	snprintf(filepath, 256, "%s/%s", path, filename);
+
+	fd = fopen(filepath, "r");
+	if (!fd) {
+		syslog(LOG_ERR, "Err: open %s: %s\n", __func__, filepath);
+		return ret;
+	}
+	ret = fscanf(fd, "%256s", str);
+	fclose(fd);
+
+	return ret;
+}
+
+/* get states of the cooling device instance */
+static int probe_cdev(struct cdev_info *cdi, char *path)
+{
+	sysfs_get_string(path, "type", cdi->type);
+	sysfs_get_ulong(path, "max_state",  &cdi->max_state);
+	sysfs_get_ulong(path, "cur_state", &cdi->cur_state);
+
+	syslog(LOG_INFO, "%s: %s: type %s, max %lu, curr %lu inst %d\n",
+		__func__, path,
+		cdi->type, cdi->max_state, cdi->cur_state, cdi->instance);
+
+	return 0;
+}
+
+static int str_to_trip_type(char *name)
+{
+	int i;
+
+	for (i = 0; i < NR_THERMAL_TRIP_TYPE; i++) {
+		if (!strcmp(name, trip_type_name[i]))
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+/* scan and fill in trip point info for a thermal zone and trip point id */
+static int get_trip_point_data(char *tz_path, int tzid, int tpid)
+{
+	char filename[256];
+	char temp_str[256];
+	int trip_type;
+
+	if (tpid >= MAX_NR_TRIP)
+		return -EINVAL;
+	/* check trip point type */
+	snprintf(filename, sizeof(filename), "trip_point_%d_type", tpid);
+	sysfs_get_string(tz_path, filename, temp_str);
+	trip_type = str_to_trip_type(temp_str);
+	if (trip_type < 0) {
+		syslog(LOG_ERR, "%s:%s no matching type\n", __func__, temp_str);
+		return -ENOENT;
+	}
+	ptdata.tzi[tzid].tp[tpid].type = trip_type;
+	syslog(LOG_INFO, "%s:tz:%d tp:%d:type:%s type id %d\n", __func__, tzid,
+		tpid, temp_str, trip_type);
+
+	/* TODO: check attribute */
+
+	return 0;
+}
+
+/* return instance id for file format such as trip_point_4_temp */
+static int get_instance_id(char *name, int pos, int skip)
+{
+	char *ch;
+	int i = 0;
+
+	ch = strtok(name, "_");
+	while (ch != NULL) {
+		++i;
+		syslog(LOG_INFO, "%s:%s:%s:%d", __func__, name, ch, i);
+		ch = strtok(NULL, "_");
+		if (pos == i)
+			return atol(ch + skip);
+	}
+
+	return -1;
+}
+
+/* Find trip point info of a thermal zone */
+static int find_tzone_tp(char *tz_name, char *d_name, struct tz_info *tzi,
+			int tz_id)
+{
+	int tp_id;
+	unsigned long temp_ulong;
+
+	if (strstr(d_name, "trip_point") &&
+		strstr(d_name, "temp")) {
+		/* check if trip point temp is non-zero
+		 * ignore 0/invalid trip points
+		 */
+		sysfs_get_ulong(tz_name, d_name, &temp_ulong);
+		if (temp_ulong < MAX_TEMP_KC) {
+			tzi->nr_trip_pts++;
+			/* found a valid trip point */
+			tp_id = get_instance_id(d_name, 2, 0);
+			syslog(LOG_DEBUG, "tzone %s trip %d temp %lu tpnode %s",
+				tz_name, tp_id, temp_ulong, d_name);
+			if (tp_id < 0 || tp_id >= MAX_NR_TRIP) {
+				syslog(LOG_ERR, "Failed to find TP inst %s\n",
+					d_name);
+				return -1;
+			}
+			get_trip_point_data(tz_name, tz_id, tp_id);
+			tzi->tp[tp_id].temp = temp_ulong;
+		}
+	}
+
+	return 0;
+}
+
+/* check cooling devices for binding info. */
+static int find_tzone_cdev(struct dirent *nl, char *tz_name,
+			struct tz_info *tzi, int tz_id, int cid)
+{
+	unsigned long trip_instance = 0;
+	char cdev_name_linked[256];
+	char cdev_name[256];
+	char cdev_trip_name[256];
+	int cdev_id;
+
+	if (nl->d_type == DT_LNK) {
+		syslog(LOG_DEBUG, "TZ%d: cdev: %s cid %d\n", tz_id, nl->d_name,
+			cid);
+		tzi->nr_cdev++;
+		if (tzi->nr_cdev > ptdata.nr_cooling_dev) {
+			syslog(LOG_ERR, "Err: Too many cdev? %d\n",
+				tzi->nr_cdev);
+			return -EINVAL;
+		}
+		/* find the link to real cooling device record binding */
+		snprintf(cdev_name, 256, "%s/%s", tz_name, nl->d_name);
+		memset(cdev_name_linked, 0, sizeof(cdev_name_linked));
+		if (readlink(cdev_name, cdev_name_linked,
+				sizeof(cdev_name_linked) - 1) != -1) {
+			cdev_id = get_instance_id(cdev_name_linked, 1,
+						sizeof("device") - 1);
+			syslog(LOG_DEBUG, "cdev %s linked to %s : %d\n",
+				cdev_name, cdev_name_linked, cdev_id);
+			tzi->cdev_binding |= (1 << cdev_id);
+
+			/* find the trip point in which the cdev is binded to
+			 * in this tzone
+			 */
+			snprintf(cdev_trip_name, 256, "%s%s", nl->d_name,
+				"_trip_point");
+			sysfs_get_ulong(tz_name, cdev_trip_name,
+					&trip_instance);
+			/* validate trip point range, e.g. trip could return -1
+			 * when passive is enabled
+			 */
+			if (trip_instance > MAX_NR_TRIP)
+				trip_instance = 0;
+			tzi->trip_binding[cdev_id] |= 1 << trip_instance;
+			syslog(LOG_DEBUG, "cdev %s -> trip:%lu: 0x%lx %d\n",
+				cdev_name, trip_instance,
+				tzi->trip_binding[cdev_id],
+				cdev_id);
+
+
+		}
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+
+
+/*****************************************************************************
+ * Before calling scan_tzones, thermal sysfs must be probed to determine
+ * the number of thermal zones and cooling devices.
+ * We loop through each thermal zone and fill in tz_info struct, i.e.
+ * ptdata.tzi[]
+root@jacob-chiefriver:~# tree -d /sys/class/thermal/thermal_zone0
+/sys/class/thermal/thermal_zone0
+|-- cdev0 -> ../cooling_device4
+|-- cdev1 -> ../cooling_device3
+|-- cdev10 -> ../cooling_device7
+|-- cdev11 -> ../cooling_device6
+|-- cdev12 -> ../cooling_device5
+|-- cdev2 -> ../cooling_device2
+|-- cdev3 -> ../cooling_device1
+|-- cdev4 -> ../cooling_device0
+|-- cdev5 -> ../cooling_device12
+|-- cdev6 -> ../cooling_device11
+|-- cdev7 -> ../cooling_device10
+|-- cdev8 -> ../cooling_device9
+|-- cdev9 -> ../cooling_device8
+|-- device -> ../../../LNXSYSTM:00/device:62/LNXTHERM:00
+|-- power
+`-- subsystem -> ../../../../class/thermal
+*****************************************************************************/
+static int scan_tzones(void)
+{
+	DIR *dir;
+	struct dirent **namelist;
+	char tz_name[256];
+	int i, j, n, k = 0;
+
+	if (!ptdata.nr_tz_sensor)
+		return -1;
+
+	for (i = 0; i <= ptdata.max_tz_instance; i++) {
+		memset(tz_name, 0, sizeof(tz_name));
+		snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE, i);
+
+		dir = opendir(tz_name);
+		if (!dir) {
+			syslog(LOG_INFO, "Thermal zone %s skipped\n", tz_name);
+			continue;
+		}
+		/* keep track of valid tzones */
+		n = scandir(tz_name, &namelist, 0, alphasort);
+		if (n < 0)
+			syslog(LOG_ERR, "scandir failed in %s",  tz_name);
+		else {
+			sysfs_get_string(tz_name, "type", ptdata.tzi[k].type);
+			ptdata.tzi[k].instance = i;
+			/* detect trip points and cdev attached to this tzone */
+			j = 0; /* index for cdev */
+			ptdata.tzi[k].nr_cdev = 0;
+			ptdata.tzi[k].nr_trip_pts = 0;
+			while (n--) {
+				char *temp_str;
+
+				if (find_tzone_tp(tz_name, namelist[n]->d_name,
+							&ptdata.tzi[k], k))
+					break;
+				temp_str = strstr(namelist[n]->d_name, "cdev");
+				if (!temp_str) {
+					free(namelist[n]);
+					continue;
+				}
+				if (!find_tzone_cdev(namelist[n], tz_name,
+							&ptdata.tzi[k], i, j))
+					j++; /* increment cdev index */
+				free(namelist[n]);
+			}
+			free(namelist);
+		}
+		/*TODO: reverse trip points */
+		closedir(dir);
+		syslog(LOG_INFO, "TZ %d has %d cdev\n",	i,
+			ptdata.tzi[k].nr_cdev);
+		k++;
+	}
+
+	return 0;
+}
+
+static int scan_cdevs(void)
+{
+	DIR *dir;
+	struct dirent **namelist;
+	char cdev_name[256];
+	int i, n, k = 0;
+
+	if (!ptdata.nr_cooling_dev) {
+		fprintf(stderr, "No cooling devices found\n");
+		return 0;
+	}
+	for (i = 0; i <= ptdata.max_cdev_instance; i++) {
+		memset(cdev_name, 0, sizeof(cdev_name));
+		snprintf(cdev_name, 256, "%s/%s%d", THERMAL_SYSFS, CDEV, i);
+
+		dir = opendir(cdev_name);
+		if (!dir) {
+			syslog(LOG_INFO, "Cooling dev %s skipped\n", cdev_name);
+			/* there is a gap in cooling device id, check again
+			 * for the same index.
+			 */
+			continue;
+		}
+
+		n = scandir(cdev_name, &namelist, 0, alphasort);
+		if (n < 0)
+			syslog(LOG_ERR, "scandir failed in %s",  cdev_name);
+		else {
+			sysfs_get_string(cdev_name, "type", ptdata.cdi[k].type);
+			ptdata.cdi[k].instance = i;
+			if (strstr(ptdata.cdi[k].type, ctrl_cdev)) {
+				ptdata.cdi[k].flag |= CDEV_FLAG_IN_CONTROL;
+				syslog(LOG_DEBUG, "control cdev id %d\n", i);
+			}
+			while (n--)
+				free(namelist[n]);
+			free(namelist);
+		}
+		closedir(dir);
+		k++;
+	}
+	return 0;
+}
+
+
+int probe_thermal_sysfs(void)
+{
+	DIR *dir;
+	struct dirent **namelist;
+	int n;
+
+	dir = opendir(THERMAL_SYSFS);
+	if (!dir) {
+		fprintf(stderr, "\nNo thermal sysfs, exit\n");
+		return -1;
+	}
+	n = scandir(THERMAL_SYSFS, &namelist, 0, alphasort);
+	if (n < 0)
+		syslog(LOG_ERR, "scandir failed in thermal sysfs");
+	else {
+		/* detect number of thermal zones and cooling devices */
+		while (n--) {
+			int inst;
+
+			if (strstr(namelist[n]->d_name, CDEV)) {
+				inst = get_instance_id(namelist[n]->d_name, 1,
+						sizeof("device") - 1);
+				/* keep track of the max cooling device since
+				 * there may be gaps.
+				 */
+				if (inst > ptdata.max_cdev_instance)
+					ptdata.max_cdev_instance = inst;
+
+				syslog(LOG_DEBUG, "found cdev: %s %d %d\n",
+					namelist[n]->d_name,
+					ptdata.nr_cooling_dev,
+					ptdata.max_cdev_instance);
+				ptdata.nr_cooling_dev++;
+			} else if (strstr(namelist[n]->d_name, TZONE)) {
+				inst = get_instance_id(namelist[n]->d_name, 1,
+						sizeof("zone") - 1);
+				if (inst > ptdata.max_tz_instance)
+					ptdata.max_tz_instance = inst;
+
+				syslog(LOG_DEBUG, "found tzone: %s %d %d\n",
+					namelist[n]->d_name,
+					ptdata.nr_tz_sensor,
+					ptdata.max_tz_instance);
+				ptdata.nr_tz_sensor++;
+			}
+			free(namelist[n]);
+		}
+		free(namelist);
+	}
+	syslog(LOG_INFO, "found %d tzone(s), %d cdev(s), target zone %d\n",
+		ptdata.nr_tz_sensor, ptdata.nr_cooling_dev,
+		target_thermal_zone);
+	closedir(dir);
+
+	if (!ptdata.nr_tz_sensor) {
+		fprintf(stderr, "\nNo thermal zones found, exit\n\n");
+		return -1;
+	}
+
+	ptdata.tzi = calloc(sizeof(struct tz_info), ptdata.max_tz_instance+1);
+	if (!ptdata.tzi) {
+		fprintf(stderr, "Err: allocate tz_info\n");
+		return -1;
+	}
+
+	/* we still show thermal zone information if there is no cdev */
+	if (ptdata.nr_cooling_dev) {
+		ptdata.cdi = calloc(sizeof(struct cdev_info),
+				ptdata.max_cdev_instance + 1);
+		if (!ptdata.cdi) {
+			free(ptdata.tzi);
+			fprintf(stderr, "Err: allocate cdev_info\n");
+			return -1;
+		}
+	}
+
+	/* now probe tzones */
+	if (scan_tzones())
+		return -1;
+	if (scan_cdevs())
+		return -1;
+	return 0;
+}
+
+/* convert sysfs zone instance to zone array index */
+int zone_instance_to_index(int zone_inst)
+{
+	int i;
+
+	for (i = 0; i < ptdata.nr_tz_sensor; i++)
+		if (ptdata.tzi[i].instance == zone_inst)
+			return i;
+	return -ENOENT;
+}
+
+/* read temperature of all thermal zones */
+int update_thermal_data()
+{
+	int i;
+	char tz_name[256];
+	static unsigned long samples;
+
+	if (!ptdata.nr_tz_sensor) {
+		syslog(LOG_ERR, "No thermal zones found!\n");
+		return -1;
+	}
+
+	/* circular buffer for keeping historic data */
+	if (cur_thermal_record >= NR_THERMAL_RECORDS)
+		cur_thermal_record = 0;
+	gettimeofday(&trec[cur_thermal_record].tv, NULL);
+	if (tmon_log) {
+		fprintf(tmon_log, "%lu ", ++samples);
+		fprintf(tmon_log, "%3.1f ", p_param.t_target);
+	}
+	for (i = 0; i < ptdata.nr_tz_sensor; i++) {
+		memset(tz_name, 0, sizeof(tz_name));
+		snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
+			ptdata.tzi[i].instance);
+		sysfs_get_ulong(tz_name, "temp",
+				&trec[cur_thermal_record].temp[i]);
+		if (tmon_log)
+			fprintf(tmon_log, "%lu ",
+				trec[cur_thermal_record].temp[i]/1000);
+	}
+	for (i = 0; i < ptdata.nr_cooling_dev; i++) {
+		char cdev_name[256];
+		unsigned long val;
+
+		snprintf(cdev_name, 256, "%s/%s%d", THERMAL_SYSFS, CDEV,
+			ptdata.cdi[i].instance);
+		probe_cdev(&ptdata.cdi[i], cdev_name);
+		val = ptdata.cdi[i].cur_state;
+		if (val > 1000000)
+			val = 0;
+		if (tmon_log)
+			fprintf(tmon_log, "%lu ", val);
+	}
+
+	if (tmon_log) {
+		fprintf(tmon_log, "\n");
+		fflush(tmon_log);
+	}
+
+	return 0;
+}
+
+void set_ctrl_state(unsigned long state)
+{
+	char ctrl_cdev_path[256];
+	int i;
+	unsigned long cdev_state;
+
+	if (no_control)
+		return;
+	/* set all ctrl cdev to the same state */
+	for (i = 0; i < ptdata.nr_cooling_dev; i++) {
+		if (ptdata.cdi[i].flag & CDEV_FLAG_IN_CONTROL) {
+			if (ptdata.cdi[i].max_state < 10) {
+				strcpy(ctrl_cdev, "None.");
+				return;
+			}
+			/* scale to percentage of max_state */
+			cdev_state = state * ptdata.cdi[i].max_state/100;
+			syslog(LOG_DEBUG,
+				"ctrl cdev %d set state %lu scaled to %lu\n",
+				ptdata.cdi[i].instance, state, cdev_state);
+			snprintf(ctrl_cdev_path, 256, "%s/%s%d", THERMAL_SYSFS,
+				CDEV, ptdata.cdi[i].instance);
+			syslog(LOG_DEBUG, "ctrl cdev path %s", ctrl_cdev_path);
+			sysfs_set_ulong(ctrl_cdev_path, "cur_state",
+					cdev_state);
+		}
+	}
+}
+
+void get_ctrl_state(unsigned long *state)
+{
+	char ctrl_cdev_path[256];
+	int ctrl_cdev_id = -1;
+	int i;
+
+	/* TODO: take average of all ctrl types. also consider change based on
+	 * uevent. Take the first reading for now.
+	 */
+	for (i = 0; i < ptdata.nr_cooling_dev; i++) {
+		if (ptdata.cdi[i].flag & CDEV_FLAG_IN_CONTROL) {
+			ctrl_cdev_id = ptdata.cdi[i].instance;
+			syslog(LOG_INFO, "ctrl cdev %d get state\n",
+				ptdata.cdi[i].instance);
+			break;
+		}
+	}
+	if (ctrl_cdev_id == -1) {
+		*state = 0;
+		return;
+	}
+	snprintf(ctrl_cdev_path, 256, "%s/%s%d", THERMAL_SYSFS,
+		CDEV, ctrl_cdev_id);
+	sysfs_get_ulong(ctrl_cdev_path, "cur_state", state);
+}
+
+void free_thermal_data(void)
+{
+	free(ptdata.tzi);
+	free(ptdata.cdi);
+}
diff --git a/tools/thermal/tmon/tmon.8 b/tools/thermal/tmon/tmon.8
new file mode 100644
index 0000000..0be727c
--- /dev/null
+++ b/tools/thermal/tmon/tmon.8
@@ -0,0 +1,142 @@
+.TH TMON 8
+.SH NAME
+\fBtmon\fP - A monitoring and testing tool for Linux kernel thermal subsystem
+
+.SH SYNOPSIS
+.ft B
+.B tmon
+.RB [ Options ]
+.br
+.SH DESCRIPTION
+\fBtmon \fP can be used to visualize thermal relationship and
+real-time thermal data; tune
+and test cooling devices and sensors; collect thermal data for offline
+analysis and plot. \fBtmon\fP must be run as root in order to control device
+states via sysfs.
+.PP
+\fBFunctions\fP
+.PP
+.nf
+1. Thermal relationships:
+- show thermal zone information
+- show cooling device information
+- show trip point binding within each thermal zone
+- show trip point and cooling device instance bindings
+.PP
+2. Real time data display
+- show temperature of all thermal zones w.r.t. its trip points and types
+- show states of all cooling devices
+.PP
+3. Thermal relationship learning and device tuning
+- with a built-in Proportional Integral Derivative (\fBPID\fP)
+controller, user can pair a cooling device to a thermal sensor for
+testing the effectiveness and learn about the thermal distance between the two
+- allow manual control of cooling device states and target temperature
+.PP
+4. Data logging in /var/tmp/tmon.log
+- contains thermal configuration data, i.e. cooling device, thermal
+ zones, and trip points. Can be used for data  collection in remote
+ debugging.
+- log real-time thermal data into space separated format that can be
+ directly consumed by plotting tools such as Rscript.
+
+.SS Options
+.PP
+The \fB-c --control\fP option sets a cooling device type to control temperature
+of a thermal zone
+.PP
+The \fB-d --daemon\fP option runs \fBtmon \fP as daemon without user interface
+.PP
+The \fB-g --debug\fP option allow debug messages to be stored in syslog
+.PP
+The \fB-h --help\fP option shows help message
+.PP
+The \fB-l --log\fP option write data to /var/tmp/tmon.log
+.PP
+The \fB-t --time-interval\fP option sets the polling interval in seconds
+.PP
+The \fB-v --version\fP option shows the version of \fBtmon \fP
+.PP
+The \fB-z --zone\fP option sets the target therma zone instance to be controlled
+.PP
+
+.SH FIELD DESCRIPTIONS
+.nf
+.PP
+\fBP \fP passive cooling trip point type
+\fBA \fP active cooling trip point type (fan)
+\fBC \fP critical trip point type
+\fBA \fP hot trip point type
+\fBkp \fP proportional gain of \fBPID\fP controller
+\fBki \fP integral gain of \fBPID\fP controller
+\fBkd \fP derivative gain of \fBPID\fP controller
+
+.SH REQUIREMENT
+Build depends on ncurses
+.PP
+Runtime depends on window size large enough to show the number of
+devices found on the system.
+
+.PP
+
+.SH INTERACTIVE COMMANDS
+.pp
+.nf
+\fBCtrl-C, q/Q\fP stops \fBtmon\fP
+\fBTAB\fP shows tuning pop up panel, choose a letter to modify
+
+.SH EXAMPLES
+Without any parameters, tmon is in monitoring only mode and refresh
+screen every 1 second.
+.PP
+1. For monitoring only:
+.nf
+$ sudo ./tmon
+
+2. Use Processor cooling device to control thermal zone 0 at default 65C.
+$ sudo ./tmon -c Processor -z 0
+
+3. Use intel_powerclamp(idle injection) cooling device to control thermal zone 1
+$ sudo ./tmon -c intel_powerclamp -z 1
+
+4. Turn on debug and collect data log at /var/tmp/tmon.log
+$ sudo ./tmon -g -l
+
+For example, the log below shows PID controller was adjusting current states
+for all cooling devices with "Processor" type such that thermal zone 0
+can stay below 65 dC.
+
+#---------- THERMAL DATA LOG STARTED -----------
+Samples TargetTemp acpitz0    acpitz1    Fan0 Fan1 Fan2 Fan3 Fan4 Fan5
+Fan6 Fan7 Fan8 Fan9 Processor10 Processor11 Processor12 Processor13
+LCD14 intel_powerclamp15 1 65.0 65 65 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 2
+65.0 66 65 0 0 0 0 0 0 0 0 0 0 4 4 4 4 6 0 3 65.0 60 54 0 0 0 0 0 0 0 0
+0 0 4 4 4 4 6 0 4 65.0 53 53 0 0 0 0 0 0 0 0 0 0 4 4 4 4 6 0
+5 65.0 52 52 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0
+6 65.0 53 65 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0
+7 65.0 68 70 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0
+8 65.0 68 68 0 0 0 0 0 0 0 0 0 0 5 5 5 5 6 0
+9 65.0 68 68 0 0 0 0 0 0 0 0 0 0 6 6 6 6 6 0
+10 65.0 67 67 0 0 0 0 0 0 0 0 0 0 7 7 7 7 6 0
+11 65.0 67 67 0 0 0 0 0 0 0 0 0 0 8 8 8 8 6 0
+12 65.0 67 67 0 0 0 0 0 0 0 0 0 0 8 8 8 8 6 0
+13 65.0 67 67 0 0 0 0 0 0 0 0 0 0 9 9 9 9 6 0
+14 65.0 66 66 0 0 0 0 0 0 0 0 0 0 10 10 10 10 6 0
+15 65.0 66 67 0 0 0 0 0 0 0 0 0 0 10 10 10 10 6 0
+16 65.0 66 66 0 0 0 0 0 0 0 0 0 0 11 11 11 11 6 0
+17 65.0 66 66 0 0 0 0 0 0 0 0 0 0 11 11 11 11 6 0
+18 65.0 64 61 0 0 0 0 0 0 0 0 0 0 11 11 11 11 6 0
+19 65.0 60 59 0 0 0 0 0 0 0 0 0 0 12 12 12 12 6 0
+
+Data can be read directly into an array by an example R-script below:
+
+#!/usr/bin/Rscript
+tdata <- read.table("/var/tmp/tmon.log", header=T, comment.char="#")
+attach(tdata)
+jpeg("tmon.jpg")
+X11()
+g_range <- range(0, intel_powerclamp15, TargetTemp, acpitz0)
+plot( Samples, intel_powerclamp15, col="blue", ylim=g_range, axes=FALSE, ann=FALSE)
+par(new=TRUE)
+lines(TargetTemp, type="o", pch=22, lty=2, col="red")
+dev.off()
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
new file mode 100644
index 0000000..b30f531
--- /dev/null
+++ b/tools/thermal/tmon/tmon.c
@@ -0,0 +1,352 @@
+/*
+ * tmon.c Thermal Monitor (TMON) main function and entry point
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 or later as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ */
+
+#include <getopt.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <ncurses.h>
+#include <ctype.h>
+#include <time.h>
+#include <signal.h>
+#include <limits.h>
+#include <sys/time.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdarg.h>
+#include <syslog.h>
+
+#include "tmon.h"
+
+unsigned long ticktime = 1; /* seconds */
+unsigned long no_control = 1; /* monitoring only or use cooling device for
+			       * temperature control.
+			       */
+double time_elapsed = 0.0;
+unsigned long target_temp_user = 65; /* can be select by tui later */
+int dialogue_on;
+int tmon_exit;
+static short	daemon_mode;
+static int logging; /* for recording thermal data to a file */
+static int debug_on;
+FILE *tmon_log;
+/*cooling device used for the PID controller */
+char ctrl_cdev[CDEV_NAME_SIZE] = "None";
+int target_thermal_zone; /* user selected target zone instance */
+static void	start_daemon_mode(void);
+
+pthread_t event_tid;
+pthread_mutex_t input_lock;
+void usage()
+{
+	printf("Usage: tmon [OPTION...]\n");
+	printf("  -c, --control         cooling device in control\n");
+	printf("  -d, --daemon          run as daemon, no TUI\n");
+	printf("  -g, --debug           debug message in syslog\n");
+	printf("  -h, --help            show this help message\n");
+	printf("  -l, --log             log data to /var/tmp/tmon.log\n");
+	printf("  -t, --time-interval   sampling time interval, > 1 sec.\n");
+	printf("  -v, --version         show version\n");
+	printf("  -z, --zone            target thermal zone id\n");
+
+	exit(0);
+}
+
+void version()
+{
+	printf("TMON version %s\n", VERSION);
+	exit(EXIT_SUCCESS);
+}
+
+static void tmon_cleanup(void)
+{
+
+	syslog(LOG_INFO, "TMON exit cleanup\n");
+	fflush(stdout);
+	refresh();
+	if (tmon_log)
+		fclose(tmon_log);
+	if (event_tid) {
+		pthread_mutex_lock(&input_lock);
+		pthread_cancel(event_tid);
+		pthread_mutex_unlock(&input_lock);
+		pthread_mutex_destroy(&input_lock);
+	}
+	closelog();
+	/* relax control knobs, undo throttling */
+	set_ctrl_state(0);
+
+	keypad(stdscr, FALSE);
+	echo();
+	nocbreak();
+	close_windows();
+	endwin();
+	free_thermal_data();
+
+	exit(1);
+}
+
+
+static void tmon_sig_handler(int sig)
+{
+	syslog(LOG_INFO, "TMON caught signal %d\n", sig);
+	refresh();
+	switch (sig) {
+	case SIGTERM:
+		printf("sigterm, exit and clean up\n");
+		fflush(stdout);
+		break;
+	case SIGKILL:
+		printf("sigkill, exit and clean up\n");
+		fflush(stdout);
+		break;
+	case SIGINT:
+		printf("ctrl-c, exit and clean up\n");
+		fflush(stdout);
+		break;
+	default:
+		break;
+	}
+	tmon_exit = true;
+}
+
+
+static void start_syslog(void)
+{
+	if (debug_on)
+		setlogmask(LOG_UPTO(LOG_DEBUG));
+	else
+		setlogmask(LOG_UPTO(LOG_ERR));
+	openlog("tmon.log", LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL0);
+	syslog(LOG_NOTICE, "TMON started by User %d", getuid());
+}
+
+static void prepare_logging(void)
+{
+	int i;
+
+	if (!logging)
+		return;
+	/* open local data log file */
+	tmon_log = fopen(TMON_LOG_FILE, "w+");
+	if (!tmon_log) {
+		syslog(LOG_ERR, "failed to open log file %s\n", TMON_LOG_FILE);
+		return;
+	}
+
+	fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n");
+	for (i = 0; i < ptdata.nr_tz_sensor; i++) {
+		char binding_str[33]; /* size of long + 1 */
+		int j;
+
+		memset(binding_str, 0, sizeof(binding_str));
+		for (j = 0; j < 32; j++)
+			binding_str[j] = (ptdata.tzi[i].cdev_binding & 1<<j) ?
+				'1' : '0';
+
+		fprintf(tmon_log, "#thermal zone %s%02d cdevs binding: %32s\n",
+			ptdata.tzi[i].type,
+			ptdata.tzi[i].instance,
+			binding_str);
+		for (j = 0; j <	ptdata.tzi[i].nr_trip_pts; j++) {
+			fprintf(tmon_log, "#\tTP%02d type:%s, temp:%lu\n", j,
+				trip_type_name[ptdata.tzi[i].tp[j].type],
+				ptdata.tzi[i].tp[j].temp);
+		}
+
+	}
+
+	for (i = 0; i <	ptdata.nr_cooling_dev; i++)
+		fprintf(tmon_log, "#cooling devices%02d: %s\n",
+			i, ptdata.cdi[i].type);
+
+	fprintf(tmon_log, "#---------- THERMAL DATA LOG STARTED -----------\n");
+	fprintf(tmon_log, "Samples TargetTemp ");
+	for (i = 0; i < ptdata.nr_tz_sensor; i++) {
+		fprintf(tmon_log, "%s%d    ", ptdata.tzi[i].type,
+			ptdata.tzi[i].instance);
+	}
+	for (i = 0; i <	ptdata.nr_cooling_dev; i++)
+		fprintf(tmon_log, "%s%d ", ptdata.cdi[i].type,
+			ptdata.cdi[i].instance);
+
+	fprintf(tmon_log, "\n");
+}
+
+static struct option opts[] = {
+	{ "control", 1, NULL, 'c' },
+	{ "daemon", 0, NULL, 'd' },
+	{ "time-interval", 1, NULL, 't' },
+	{ "log", 0, NULL, 'l' },
+	{ "help", 0, NULL, 'h' },
+	{ "version", 0, NULL, 'v' },
+	{ "debug", 0, NULL, 'g' },
+	{ 0, 0, NULL, 0 }
+};
+
+
+int main(int argc, char **argv)
+{
+	int err = 0;
+	int id2 = 0, c;
+	double yk = 0.0; /* controller output */
+	int target_tz_index;
+
+	if (geteuid() != 0) {
+		printf("TMON needs to be run as root\n");
+		exit(EXIT_FAILURE);
+	}
+
+	while ((c = getopt_long(argc, argv, "c:dlht:vgz:", opts, &id2)) != -1) {
+		switch (c) {
+		case 'c':
+			no_control = 0;
+			strncpy(ctrl_cdev, optarg, CDEV_NAME_SIZE);
+			break;
+		case 'd':
+			start_daemon_mode();
+			printf("Run TMON in daemon mode\n");
+			break;
+		case 't':
+			ticktime = strtod(optarg, NULL);
+			if (ticktime < 1)
+				ticktime = 1;
+			break;
+		case 'l':
+			printf("Logging data to /var/tmp/tmon.log\n");
+			logging = 1;
+			break;
+		case 'h':
+			usage();
+			break;
+		case 'v':
+			version();
+			break;
+		case 'g':
+			debug_on = 1;
+			break;
+		case 'z':
+			target_thermal_zone = strtod(optarg, NULL);
+			break;
+		default:
+			break;
+		}
+	}
+	if (pthread_mutex_init(&input_lock, NULL) != 0) {
+		fprintf(stderr, "\n mutex init failed, exit\n");
+		return 1;
+	}
+	start_syslog();
+	if (signal(SIGINT, tmon_sig_handler) == SIG_ERR)
+		syslog(LOG_DEBUG, "Cannot handle SIGINT\n");
+	if (signal(SIGTERM, tmon_sig_handler) == SIG_ERR)
+		syslog(LOG_DEBUG, "Cannot handle SIGINT\n");
+
+	if (probe_thermal_sysfs()) {
+		pthread_mutex_destroy(&input_lock);
+		closelog();
+		return -1;
+	}
+	initialize_curses();
+	setup_windows();
+	signal(SIGWINCH, resize_handler);
+	show_title_bar();
+	show_sensors_w();
+	show_cooling_device();
+	update_thermal_data();
+	show_data_w();
+	prepare_logging();
+	init_thermal_controller();
+
+	nodelay(stdscr, TRUE);
+	err = pthread_create(&event_tid, NULL, &handle_tui_events, NULL);
+	if (err != 0) {
+		printf("\ncan't create thread :[%s]", strerror(err));
+		tmon_cleanup();
+		exit(EXIT_FAILURE);
+	}
+
+	/* validate range of user selected target zone, default to the first
+	 * instance if out of range
+	 */
+	target_tz_index = zone_instance_to_index(target_thermal_zone);
+	if (target_tz_index < 0) {
+		target_thermal_zone = ptdata.tzi[0].instance;
+		syslog(LOG_ERR, "target zone is not found, default to %d\n",
+			target_thermal_zone);
+	}
+	while (1) {
+		sleep(ticktime);
+		show_title_bar();
+		show_sensors_w();
+		update_thermal_data();
+		if (!dialogue_on) {
+			show_data_w();
+			show_cooling_device();
+		}
+		cur_thermal_record++;
+		time_elapsed += ticktime;
+		controller_handler(trec[0].temp[target_tz_index] / 1000,
+				&yk);
+		trec[0].pid_out_pct = yk;
+		if (!dialogue_on)
+			show_control_w();
+		if (tmon_exit)
+			break;
+	}
+	tmon_cleanup();
+	return 0;
+}
+
+static void start_daemon_mode()
+{
+	daemon_mode = 1;
+	/* fork */
+	pid_t	sid, pid = fork();
+	if (pid < 0) {
+		exit(EXIT_FAILURE);
+	} else if (pid > 0)
+		/* kill parent */
+		exit(EXIT_SUCCESS);
+
+	/* disable TUI, it may not be necessary, but saves some resource */
+	disable_tui();
+
+	/* change the file mode mask */
+	umask(0);
+
+	/* new SID for the daemon process */
+	sid = setsid();
+	if (sid < 0)
+		exit(EXIT_FAILURE);
+
+	/* change working directory */
+	if ((chdir("/")) < 0)
+		exit(EXIT_FAILURE);
+
+
+	sleep(10);
+
+	close(STDIN_FILENO);
+	close(STDOUT_FILENO);
+	close(STDERR_FILENO);
+
+}
diff --git a/tools/thermal/tmon/tmon.h b/tools/thermal/tmon/tmon.h
new file mode 100644
index 0000000..9e3c49c
--- /dev/null
+++ b/tools/thermal/tmon/tmon.h
@@ -0,0 +1,204 @@
+/*
+ * tmon.h contains data structures and constants used by TMON
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 or later as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author Name Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ */
+
+#ifndef TMON_H
+#define TMON_H
+
+#define MAX_DISP_TEMP 125
+#define MAX_CTRL_TEMP 105
+#define MIN_CTRL_TEMP 40
+#define MAX_NR_TZONE 16
+#define MAX_NR_CDEV 32
+#define MAX_NR_TRIP 16
+#define MAX_NR_CDEV_TRIP 12 /* number of cooling devices that can bind
+			     * to a thermal zone trip.
+			     */
+#define MAX_TEMP_KC 140000
+/* starting char position to draw sensor data, such as tz names
+ * trip point list, etc.
+ */
+#define DATA_LEFT_ALIGN 10
+#define NR_LINES_TZDATA 1
+#define TMON_LOG_FILE "/var/tmp/tmon.log"
+
+extern unsigned long ticktime;
+extern double time_elapsed;
+extern unsigned long target_temp_user;
+extern int dialogue_on;
+extern char ctrl_cdev[];
+extern pthread_mutex_t input_lock;
+extern int tmon_exit;
+extern int target_thermal_zone;
+/* use fixed size record to simplify data processing and transfer
+ * TBD: more info to be added, e.g. programmable trip point data.
+*/
+struct thermal_data_record {
+	struct timeval tv;
+	unsigned long temp[MAX_NR_TZONE];
+	double pid_out_pct;
+};
+
+struct cdev_info {
+	char type[64];
+	int instance;
+	unsigned long max_state;
+	unsigned long cur_state;
+	unsigned long flag;
+};
+
+enum trip_type {
+	THERMAL_TRIP_CRITICAL,
+	THERMAL_TRIP_HOT,
+	THERMAL_TRIP_PASSIVE,
+	THERMAL_TRIP_ACTIVE,
+	NR_THERMAL_TRIP_TYPE,
+};
+
+struct trip_point {
+	enum trip_type type;
+	unsigned long temp;
+	unsigned long hysteresis;
+	int attribute; /* programmability etc. */
+};
+
+/* thermal zone configuration information, binding with cooling devices could
+ * change at runtime.
+ */
+struct tz_info {
+	char type[256]; /* e.g. acpitz */
+	int instance;
+	int passive; /* active zone has passive node to force passive mode */
+	int nr_cdev; /* number of cooling device binded */
+	int nr_trip_pts;
+	struct trip_point tp[MAX_NR_TRIP];
+	unsigned long cdev_binding; /* bitmap for attached cdevs */
+	/* cdev bind trip points, allow one cdev bind to multiple trips */
+	unsigned long trip_binding[MAX_NR_CDEV];
+};
+
+struct tmon_platform_data {
+	int nr_tz_sensor;
+	int nr_cooling_dev;
+	/* keep track of instance ids since there might be gaps */
+	int max_tz_instance;
+	int max_cdev_instance;
+	struct tz_info *tzi;
+	struct cdev_info *cdi;
+};
+
+struct control_ops {
+	void (*set_ratio)(unsigned long ratio);
+	unsigned long (*get_ratio)(unsigned long ratio);
+
+};
+
+enum cdev_types {
+	CDEV_TYPE_PROC,
+	CDEV_TYPE_FAN,
+	CDEV_TYPE_MEM,
+	CDEV_TYPE_NR,
+};
+
+/* REVISIT: the idea is to group sensors if possible, e.g. on intel mid
+ * we have "skin0", "skin1", "sys", "msicdie"
+ * on DPTF enabled systems, we might have PCH, TSKN, TAMB, etc.
+ */
+enum tzone_types {
+	TZONE_TYPE_ACPI,
+	TZONE_TYPE_PCH,
+	TZONE_TYPE_NR,
+};
+
+/* limit the output of PID controller adjustment */
+#define LIMIT_HIGH (95)
+#define LIMIT_LOW  (2)
+
+struct pid_params {
+	double kp;  /* Controller gain from Dialog Box */
+	double ki;  /* Time-constant for I action from Dialog Box */
+	double kd;  /* Time-constant for D action from Dialog Box */
+	double ts;
+	double k_lpf;
+
+	double t_target;
+	double y_k;
+};
+
+extern int init_thermal_controller(void);
+extern void controller_handler(const double xk, double *yk);
+
+extern struct tmon_platform_data ptdata;
+extern struct pid_params p_param;
+
+extern FILE *tmon_log;
+extern int cur_thermal_record; /* index to the trec array */
+extern struct thermal_data_record trec[];
+extern const char *trip_type_name[];
+extern unsigned long no_control;
+
+extern void initialize_curses(void);
+extern void show_controller_stats(char *line);
+extern void show_title_bar(void);
+extern void setup_windows(void);
+extern void disable_tui(void);
+extern void show_sensors_w(void);
+extern void show_data_w(void);
+extern void write_status_bar(int x, char *line);
+extern void show_control_w();
+
+extern void show_cooling_device(void);
+extern void show_dialogue(void);
+extern int update_thermal_data(void);
+
+extern int probe_thermal_sysfs(void);
+extern void free_thermal_data(void);
+extern	void resize_handler(int sig);
+extern void set_ctrl_state(unsigned long state);
+extern void get_ctrl_state(unsigned long *state);
+extern void *handle_tui_events(void *arg);
+extern int sysfs_set_ulong(char *path, char *filename, unsigned long val);
+extern int zone_instance_to_index(int zone_inst);
+extern void close_windows(void);
+
+#define PT_COLOR_DEFAULT    1
+#define PT_COLOR_HEADER_BAR 2
+#define PT_COLOR_ERROR      3
+#define PT_COLOR_RED        4
+#define PT_COLOR_YELLOW     5
+#define PT_COLOR_GREEN      6
+#define PT_COLOR_BRIGHT     7
+#define PT_COLOR_BLUE	    8
+
+/* each thermal zone uses 12 chars, 8 for name, 2 for instance, 2 space
+ * also used to list trip points in forms of AAAC, which represents
+ * A: Active
+ * C: Critical
+ */
+#define TZONE_RECORD_SIZE 12
+#define TZ_LEFT_ALIGN 32
+#define CDEV_NAME_SIZE 20
+#define CDEV_FLAG_IN_CONTROL (1 << 0)
+
+/* dialogue box starts */
+#define DIAG_X 48
+#define DIAG_Y 8
+#define THERMAL_SYSFS "/sys/class/thermal"
+#define CDEV "cooling_device"
+#define TZONE "thermal_zone"
+#define TDATA_LEFT 16
+#endif /* TMON_H */
diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c
new file mode 100644
index 0000000..89f8ef0
--- /dev/null
+++ b/tools/thermal/tmon/tui.c
@@ -0,0 +1,638 @@
+/*
+ * tui.c ncurses text user interface for TMON program
+ *
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 or later as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ncurses.h>
+#include <time.h>
+#include <syslog.h>
+#include <panel.h>
+#include <pthread.h>
+#include <signal.h>
+
+#include "tmon.h"
+
+static PANEL *data_panel;
+static PANEL *dialogue_panel;
+static PANEL *top;
+
+static WINDOW *title_bar_window;
+static WINDOW *tz_sensor_window;
+static WINDOW *cooling_device_window;
+static WINDOW *control_window;
+static WINDOW *status_bar_window;
+static WINDOW *thermal_data_window;
+static WINDOW *dialogue_window;
+
+char status_bar_slots[10][40];
+static void draw_hbar(WINDOW *win, int y, int start, int len,
+		unsigned long pattern, bool end);
+
+static int maxx, maxy;
+static int maxwidth = 200;
+
+#define TITLE_BAR_HIGHT 1
+#define SENSOR_WIN_HIGHT 4 /* one row for tz name, one for trip points */
+
+
+/* daemon mode flag (set by startup parameter -d) */
+static int  tui_disabled;
+
+static void close_panel(PANEL *p)
+{
+	if (p) {
+		del_panel(p);
+		p = NULL;
+	}
+}
+
+static void close_window(WINDOW *win)
+{
+	if (win) {
+		delwin(win);
+		win = NULL;
+	}
+}
+
+void close_windows(void)
+{
+	if (tui_disabled)
+		return;
+	/* must delete panels before their attached windows */
+	if (dialogue_window)
+		close_panel(dialogue_panel);
+	if (cooling_device_window)
+		close_panel(data_panel);
+
+	close_window(title_bar_window);
+	close_window(tz_sensor_window);
+	close_window(status_bar_window);
+	close_window(cooling_device_window);
+	close_window(control_window);
+	close_window(thermal_data_window);
+	close_window(dialogue_window);
+
+}
+
+void write_status_bar(int x, char *line)
+{
+	mvwprintw(status_bar_window, 0, x, "%s", line);
+	wrefresh(status_bar_window);
+}
+
+void setup_windows(void)
+{
+	int y_begin = 1;
+
+	if (tui_disabled)
+		return;
+
+	getmaxyx(stdscr, maxy, maxx);
+	resizeterm(maxy, maxx);
+
+	title_bar_window = subwin(stdscr, TITLE_BAR_HIGHT, maxx, 0, 0);
+	y_begin += TITLE_BAR_HIGHT;
+
+	tz_sensor_window = subwin(stdscr, SENSOR_WIN_HIGHT, maxx, y_begin, 0);
+	y_begin += SENSOR_WIN_HIGHT;
+
+	cooling_device_window = subwin(stdscr, ptdata.nr_cooling_dev + 3, maxx,
+				y_begin, 0);
+	y_begin += ptdata.nr_cooling_dev + 3; /* 2 lines for border */
+	/* two lines to show borders, one line per tz show trip point position
+	 * and value.
+	 * dialogue window is a pop-up, when needed it lays on top of cdev win
+	 */
+
+	dialogue_window = subwin(stdscr, ptdata.nr_cooling_dev+5, maxx-50,
+				DIAG_Y, DIAG_X);
+
+	thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor *
+				NR_LINES_TZDATA + 3, maxx, y_begin, 0);
+	y_begin += ptdata.nr_tz_sensor * NR_LINES_TZDATA + 3;
+	control_window = subwin(stdscr, 4, maxx, y_begin, 0);
+
+	scrollok(cooling_device_window, TRUE);
+	maxwidth = maxx - 18;
+	status_bar_window = subwin(stdscr, 1, maxx, maxy-1, 0);
+
+	strcpy(status_bar_slots[0], " Ctrl-c - Quit ");
+	strcpy(status_bar_slots[1], " TAB - Tuning ");
+	wmove(status_bar_window, 1, 30);
+
+	/* prepare panels for dialogue, if panel already created then we must
+	 * be doing resizing, so just replace windows with new ones, old ones
+	 * should have been deleted by close_window
+	 */
+	data_panel = new_panel(cooling_device_window);
+	if (!data_panel)
+		syslog(LOG_DEBUG, "No data panel\n");
+	else {
+		if (dialogue_window) {
+			dialogue_panel = new_panel(dialogue_window);
+			if (!dialogue_panel)
+				syslog(LOG_DEBUG, "No dialogue panel\n");
+			else {
+				/* Set up the user pointer to the next panel*/
+				set_panel_userptr(data_panel, dialogue_panel);
+				set_panel_userptr(dialogue_panel, data_panel);
+				top = data_panel;
+			}
+		} else
+			syslog(LOG_INFO, "no dialogue win, term too small\n");
+	}
+	doupdate();
+	werase(stdscr);
+	refresh();
+}
+
+void resize_handler(int sig)
+{
+	/* start over when term gets resized, but first we clean up */
+	close_windows();
+	endwin();
+	refresh();
+	clear();
+	getmaxyx(stdscr, maxy, maxx);  /* get the new screen size */
+	setup_windows();
+	/* rate limit */
+	sleep(1);
+	syslog(LOG_DEBUG, "SIG %d, term resized to %d x %d\n",
+		sig, maxy, maxx);
+	signal(SIGWINCH, resize_handler);
+}
+
+const char cdev_title[] = " COOLING DEVICES ";
+void show_cooling_device(void)
+{
+	int i, j, x, y = 0;
+
+	if (tui_disabled || !cooling_device_window)
+		return;
+
+	werase(cooling_device_window);
+	wattron(cooling_device_window, A_BOLD);
+	mvwprintw(cooling_device_window,  1, 1,
+		"ID  Cooling Dev   Cur    Max   Thermal Zone Binding");
+	wattroff(cooling_device_window, A_BOLD);
+	for (j = 0; j <	ptdata.nr_cooling_dev; j++) {
+		/* draw cooling device list on the left in the order of
+		 * cooling device instances. skip unused idr.
+		 */
+		mvwprintw(cooling_device_window, j + 2, 1,
+			"%02d %12.12s%6d %6d",
+			ptdata.cdi[j].instance,
+			ptdata.cdi[j].type,
+			ptdata.cdi[j].cur_state,
+			ptdata.cdi[j].max_state);
+	}
+
+	/* show cdev binding, y is the global cooling device instance */
+	for (i = 0; i < ptdata.nr_tz_sensor; i++) {
+		int tz_inst = ptdata.tzi[i].instance;
+		for (j = 0; j < ptdata.nr_cooling_dev; j++) {
+			int cdev_inst;
+			y = j;
+			x = tz_inst * TZONE_RECORD_SIZE + TZ_LEFT_ALIGN;
+
+			draw_hbar(cooling_device_window, y+2, x,
+				TZONE_RECORD_SIZE-1, ACS_VLINE, false);
+
+			/* draw a column of spaces to separate thermal zones */
+			mvwprintw(cooling_device_window, y+2, x-1, " ");
+			if (ptdata.tzi[i].cdev_binding) {
+				cdev_inst = ptdata.cdi[j].instance;
+				unsigned long trip_binding =
+					ptdata.tzi[i].trip_binding[cdev_inst];
+				int k = 0; /* per zone trip point id that
+					    * binded to this cdev, one to
+					    * many possible based on the
+					    * binding bitmask.
+					    */
+				syslog(LOG_DEBUG,
+					"bind tz%d cdev%d tp%lx %d cdev%lx\n",
+					i, j, trip_binding, y,
+					ptdata.tzi[i].cdev_binding);
+				/* draw each trip binding for the cdev */
+				while (trip_binding >>= 1) {
+					k++;
+					if (!(trip_binding & 1))
+						continue;
+					/* draw '*' to show binding */
+					mvwprintw(cooling_device_window,
+						y + 2,
+						x + ptdata.tzi[i].nr_trip_pts -
+						k - 1, "*");
+				}
+			}
+		}
+	}
+	/* draw border after data so that border will not be messed up
+	 * even there is not enough space for all the data to be shown
+	 */
+	wborder(cooling_device_window, 0, 0, 0, 0, 0, 0, 0, 0);
+	wattron(cooling_device_window, A_BOLD);
+	mvwprintw(cooling_device_window, 0, maxx/2 - sizeof(cdev_title),
+		cdev_title);
+	wattroff(cooling_device_window, A_BOLD);
+
+	wrefresh(cooling_device_window);
+}
+
+const char DIAG_TITLE[] = "[ TUNABLES ]";
+#define DIAG_DEV_ROWS  5
+void show_dialogue(void)
+{
+	int j, x = 0, y = 0;
+	WINDOW *w = dialogue_window;
+
+	if (tui_disabled || !w)
+		return;
+
+	werase(w);
+	box(w, 0, 0);
+	mvwprintw(w, 0, maxx/4, DIAG_TITLE);
+	/* list all the available tunables */
+	for (j = 0; j <= ptdata.nr_cooling_dev; j++) {
+		y = j % DIAG_DEV_ROWS;
+		if (y == 0 && j != 0)
+			x += 20;
+		if (j == ptdata.nr_cooling_dev)
+			/* save last choice for target temp */
+			mvwprintw(w, y+1, x+1, "%C-%.12s", 'A'+j, "Set Temp");
+		else
+			mvwprintw(w, y+1, x+1, "%C-%.10s-%2d", 'A'+j,
+				ptdata.cdi[j].type, ptdata.cdi[j].instance);
+	}
+	wattron(w, A_BOLD);
+	mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?");
+	wattroff(w, A_BOLD);
+	/* y size of dialogue win is nr cdev + 5, so print legend
+	 * at the bottom line
+	 */
+	mvwprintw(w, ptdata.nr_cooling_dev+3, 1,
+		"Legend: A=Active, P=Passive, C=Critical");
+
+	wrefresh(dialogue_window);
+}
+
+void write_dialogue_win(char *buf, int y, int x)
+{
+	WINDOW *w = dialogue_window;
+
+	mvwprintw(w, y, x, "%s", buf);
+}
+
+const char control_title[] = " CONTROLS ";
+void show_control_w(void)
+{
+	unsigned long state;
+
+	get_ctrl_state(&state);
+
+	if (tui_disabled || !control_window)
+		return;
+
+	werase(control_window);
+	mvwprintw(control_window, 1, 1,
+		"PID gain: kp=%2.2f ki=%2.2f kd=%2.2f Output %2.2f",
+		p_param.kp, p_param.ki, p_param.kd, p_param.y_k);
+
+	mvwprintw(control_window, 2, 1,
+		"Target Temp: %2.1fC, Zone: %d, Control Device: %.12s",
+		p_param.t_target, target_thermal_zone, ctrl_cdev);
+
+	/* draw border last such that everything is within boundary */
+	wborder(control_window, 0, 0, 0, 0, 0, 0, 0, 0);
+	wattron(control_window, A_BOLD);
+	mvwprintw(control_window, 0, maxx/2 - sizeof(control_title),
+		control_title);
+	wattroff(control_window, A_BOLD);
+
+	wrefresh(control_window);
+}
+
+void initialize_curses(void)
+{
+	if (tui_disabled)
+		return;
+
+	initscr();
+	start_color();
+	keypad(stdscr, TRUE);	/* enable keyboard mapping */
+	nonl();			/* tell curses not to do NL->CR/NL on output */
+	cbreak();		/* take input chars one at a time */
+	noecho();		/* dont echo input */
+	curs_set(0);		/* turn off cursor */
+	use_default_colors();
+
+	init_pair(PT_COLOR_DEFAULT, COLOR_WHITE, COLOR_BLACK);
+	init_pair(PT_COLOR_HEADER_BAR, COLOR_BLACK, COLOR_WHITE);
+	init_pair(PT_COLOR_ERROR, COLOR_BLACK, COLOR_RED);
+	init_pair(PT_COLOR_RED, COLOR_WHITE, COLOR_RED);
+	init_pair(PT_COLOR_YELLOW, COLOR_WHITE, COLOR_YELLOW);
+	init_pair(PT_COLOR_GREEN, COLOR_WHITE, COLOR_GREEN);
+	init_pair(PT_COLOR_BLUE, COLOR_WHITE, COLOR_BLUE);
+	init_pair(PT_COLOR_BRIGHT, COLOR_WHITE, COLOR_BLACK);
+
+}
+
+void show_title_bar(void)
+{
+	int i;
+	int x = 0;
+
+	if (tui_disabled || !title_bar_window)
+		return;
+
+	wattrset(title_bar_window, COLOR_PAIR(PT_COLOR_HEADER_BAR));
+	wbkgd(title_bar_window, COLOR_PAIR(PT_COLOR_HEADER_BAR));
+	werase(title_bar_window);
+
+	mvwprintw(title_bar_window, 0, 0,
+		"     TMON v%s", VERSION);
+
+	wrefresh(title_bar_window);
+
+	werase(status_bar_window);
+
+	for (i = 0; i < 10; i++) {
+		if (strlen(status_bar_slots[i]) == 0)
+			continue;
+		wattron(status_bar_window, A_REVERSE);
+		mvwprintw(status_bar_window, 0, x, "%s", status_bar_slots[i]);
+		wattroff(status_bar_window, A_REVERSE);
+		x += strlen(status_bar_slots[i]) + 1;
+	}
+	wrefresh(status_bar_window);
+}
+
+static void handle_input_val(int ch)
+{
+	char buf[32];
+	int val;
+	char path[256];
+	WINDOW *w = dialogue_window;
+
+	echo();
+	keypad(w, TRUE);
+	wgetnstr(w, buf, 31);
+	val = atoi(buf);
+
+	if (ch == ptdata.nr_cooling_dev) {
+		snprintf(buf, 31, "Invalid Temp %d! %d-%d", val,
+			MIN_CTRL_TEMP, MAX_CTRL_TEMP);
+		if (val < MIN_CTRL_TEMP || val > MAX_CTRL_TEMP)
+			write_status_bar(40, buf);
+		else {
+			p_param.t_target = val;
+			snprintf(buf, 31, "Set New Target Temp %d", val);
+			write_status_bar(40, buf);
+		}
+	} else {
+		snprintf(path, 256, "%s/%s%d", THERMAL_SYSFS,
+			CDEV, ptdata.cdi[ch].instance);
+		sysfs_set_ulong(path, "cur_state", val);
+	}
+	noecho();
+	dialogue_on = 0;
+	show_data_w();
+	show_control_w();
+
+	top = (PANEL *)panel_userptr(top);
+	top_panel(top);
+}
+
+static void handle_input_choice(int ch)
+{
+	char buf[48];
+	int base = 0;
+	int cdev_id = 0;
+
+	if ((ch >= 'A' && ch <= 'A' + ptdata.nr_cooling_dev) ||
+		(ch >= 'a' && ch <= 'a' + ptdata.nr_cooling_dev)) {
+		base = (ch < 'a') ? 'A' : 'a';
+		cdev_id = ch - base;
+		if (ptdata.nr_cooling_dev == cdev_id)
+			snprintf(buf, sizeof(buf), "New Target Temp:");
+		else
+			snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ",
+				ptdata.cdi[cdev_id].type,
+				ptdata.cdi[cdev_id].instance);
+		write_dialogue_win(buf, DIAG_DEV_ROWS+2, 2);
+		handle_input_val(cdev_id);
+	} else {
+		snprintf(buf, sizeof(buf), "Invalid selection %d", ch);
+		write_dialogue_win(buf, 8, 2);
+	}
+}
+
+void *handle_tui_events(void *arg)
+{
+	int ch;
+
+	keypad(cooling_device_window, TRUE);
+	while ((ch = wgetch(cooling_device_window)) != EOF) {
+		if (tmon_exit)
+			break;
+		/* when term size is too small, no dialogue panels are set.
+		 * we need to filter out such cases.
+		 */
+		if (!data_panel || !dialogue_panel ||
+			!cooling_device_window ||
+			!dialogue_window) {
+
+			continue;
+		}
+		pthread_mutex_lock(&input_lock);
+		if (dialogue_on) {
+			handle_input_choice(ch);
+			/* top panel filter */
+			if (ch == 'q' || ch == 'Q')
+				ch = 0;
+		}
+		switch (ch) {
+		case KEY_LEFT:
+			box(cooling_device_window, 10, 0);
+			break;
+		case 9: /* TAB */
+			top = (PANEL *)panel_userptr(top);
+			top_panel(top);
+			if (top == dialogue_panel) {
+				dialogue_on = 1;
+				show_dialogue();
+			} else {
+				dialogue_on = 0;
+				/* force refresh */
+				show_data_w();
+				show_control_w();
+			}
+			break;
+		case 'q':
+		case 'Q':
+			tmon_exit = 1;
+			break;
+		}
+		update_panels();
+		doupdate();
+		pthread_mutex_unlock(&input_lock);
+	}
+
+	if (arg)
+		*(int *)arg = 0; /* make gcc happy */
+
+	return NULL;
+}
+
+/* draw a horizontal bar in given pattern */
+static void draw_hbar(WINDOW *win, int y, int start, int len, unsigned long ptn,
+		bool end)
+{
+	mvwaddch(win, y, start, ptn);
+	whline(win, ptn, len);
+	if (end)
+		mvwaddch(win, y, MAX_DISP_TEMP+TDATA_LEFT, ']');
+}
+
+static char trip_type_to_char(int type)
+{
+	switch (type) {
+	case THERMAL_TRIP_CRITICAL: return 'C';
+	case THERMAL_TRIP_HOT: return 'H';
+	case THERMAL_TRIP_PASSIVE: return 'P';
+	case THERMAL_TRIP_ACTIVE: return 'A';
+	default:
+		return '?';
+	}
+}
+
+/* fill a string with trip point type and value in one line
+ * e.g.      P(56)    C(106)
+ * maintain the distance one degree per char
+ */
+static void draw_tp_line(int tz, int y)
+{
+	int j;
+	int x;
+
+	for (j = 0; j < ptdata.tzi[tz].nr_trip_pts; j++) {
+		x = ptdata.tzi[tz].tp[j].temp / 1000;
+		mvwprintw(thermal_data_window, y + 0, x + TDATA_LEFT,
+			"%c%d", trip_type_to_char(ptdata.tzi[tz].tp[j].type),
+			x);
+		syslog(LOG_INFO, "%s:tz %d tp %d temp = %lu\n", __func__,
+			tz, j, ptdata.tzi[tz].tp[j].temp);
+	}
+}
+
+const char data_win_title[] = " THERMAL DATA ";
+void show_data_w(void)
+{
+	int i;
+
+
+	if (tui_disabled || !thermal_data_window)
+		return;
+
+	werase(thermal_data_window);
+	wattron(thermal_data_window, A_BOLD);
+	mvwprintw(thermal_data_window, 0, maxx/2 - sizeof(data_win_title),
+		data_win_title);
+	wattroff(thermal_data_window, A_BOLD);
+	/* draw a line as ruler */
+	for (i = 10; i < MAX_DISP_TEMP; i += 10)
+		mvwprintw(thermal_data_window, 1, i+TDATA_LEFT, "%2d", i);
+
+	for (i = 0; i < ptdata.nr_tz_sensor; i++) {
+		int temp = trec[cur_thermal_record].temp[i] / 1000;
+		int y = 0;
+
+		y = i * NR_LINES_TZDATA + 2;
+		/* y at tz temp data line */
+		mvwprintw(thermal_data_window, y, 1, "%6.6s%2d:[%3d][",
+			ptdata.tzi[i].type,
+			ptdata.tzi[i].instance, temp);
+		draw_hbar(thermal_data_window, y, TDATA_LEFT, temp, ACS_RARROW,
+			true);
+		draw_tp_line(i, y);
+	}
+	wborder(thermal_data_window, 0, 0, 0, 0, 0, 0, 0, 0);
+	wrefresh(thermal_data_window);
+}
+
+const char tz_title[] = "THERMAL ZONES(SENSORS)";
+
+void show_sensors_w(void)
+{
+	int i, j;
+	char buffer[512];
+
+	if (tui_disabled || !tz_sensor_window)
+		return;
+
+	werase(tz_sensor_window);
+
+	memset(buffer, 0, sizeof(buffer));
+	wattron(tz_sensor_window, A_BOLD);
+	mvwprintw(tz_sensor_window, 1, 1, "Thermal Zones:");
+	wattroff(tz_sensor_window, A_BOLD);
+
+	mvwprintw(tz_sensor_window, 1, TZ_LEFT_ALIGN, "%s", buffer);
+	/* fill trip points for each tzone */
+	wattron(tz_sensor_window, A_BOLD);
+	mvwprintw(tz_sensor_window, 2, 1, "Trip Points:");
+	wattroff(tz_sensor_window, A_BOLD);
+
+	/* draw trip point from low to high for each tz */
+	for (i = 0; i < ptdata.nr_tz_sensor; i++) {
+		int inst = ptdata.tzi[i].instance;
+
+		mvwprintw(tz_sensor_window, 1,
+			TZ_LEFT_ALIGN+TZONE_RECORD_SIZE * inst, "%.9s%02d",
+			ptdata.tzi[i].type, ptdata.tzi[i].instance);
+		for (j = ptdata.tzi[i].nr_trip_pts - 1; j >= 0; j--) {
+			/* loop through all trip points */
+			char type;
+			int tp_pos;
+			/* reverse the order here since trips are sorted
+			 * in ascending order in terms of temperature.
+			 */
+			tp_pos = ptdata.tzi[i].nr_trip_pts - j - 1;
+
+			type = trip_type_to_char(ptdata.tzi[i].tp[j].type);
+			mvwaddch(tz_sensor_window, 2,
+				inst * TZONE_RECORD_SIZE + TZ_LEFT_ALIGN +
+				tp_pos,	type);
+			syslog(LOG_DEBUG, "draw tz %d tp %d ch:%c\n",
+				inst, j, type);
+		}
+	}
+	wborder(tz_sensor_window, 0, 0, 0, 0, 0, 0, 0, 0);
+	wattron(tz_sensor_window, A_BOLD);
+	mvwprintw(tz_sensor_window, 0, maxx/2 - sizeof(tz_title), tz_title);
+	wattroff(tz_sensor_window, A_BOLD);
+	wrefresh(tz_sensor_window);
+}
+
+void disable_tui(void)
+{
+	tui_disabled = 1;
+}
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index da7a195..bdb71a2 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -41,13 +41,14 @@
 	struct vhost_memory *mem;
 };
 
-void vq_notify(struct virtqueue *vq)
+bool vq_notify(struct virtqueue *vq)
 {
 	struct vq_info *info = vq->priv;
 	unsigned long long v = 1;
 	int r;
 	r = write(info->kick, &v, sizeof v);
 	assert(r == sizeof v);
+	return true;
 }
 
 void vq_callback(struct virtqueue *vq)
@@ -171,7 +172,8 @@
 							 GFP_ATOMIC);
 				if (likely(r == 0)) {
 					++started;
-					virtqueue_kick(vq->vq);
+					if (unlikely(!virtqueue_kick(vq->vq))
+						r = -1;
 				}
 			} else
 				r = -1;
diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c
index d053ea4..14a4f4c 100644
--- a/tools/virtio/vringh_test.c
+++ b/tools/virtio/vringh_test.c
@@ -22,7 +22,7 @@
 #define RINGSIZE 256
 #define ALIGN 4096
 
-static void never_notify_host(struct virtqueue *vq)
+static bool never_notify_host(struct virtqueue *vq)
 {
 	abort();
 }
@@ -65,17 +65,22 @@
 	unsigned long notifies;
 };
 
-static void parallel_notify_host(struct virtqueue *vq)
+static bool parallel_notify_host(struct virtqueue *vq)
 {
+	int rc;
 	struct guest_virtio_device *gvdev;
 
 	gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
-	write(gvdev->to_host_fd, "", 1);
+	rc = write(gvdev->to_host_fd, "", 1);
+	if (rc < 0)
+		return false;
 	gvdev->notifies++;
+	return true;
 }
 
-static void no_notify_host(struct virtqueue *vq)
+static bool no_notify_host(struct virtqueue *vq)
 {
+	return true;
 }
 
 #define NUM_XFERS (10000000)
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 71c9c25..d5e9d6d 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -59,12 +59,14 @@
 #define PM_PSHIFT_BITS      6
 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
-#define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
+#define __PM_PSHIFT(x)      (((uint64_t) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
 
+#define __PM_SOFT_DIRTY      (1LL)
 #define PM_PRESENT          PM_STATUS(4LL)
 #define PM_SWAP             PM_STATUS(2LL)
+#define PM_SOFT_DIRTY       __PM_PSHIFT(__PM_SOFT_DIRTY)
 
 
 /*
@@ -83,6 +85,7 @@
 #define KPF_OWNER_PRIVATE	37
 #define KPF_ARCH		38
 #define KPF_UNCACHED		39
+#define KPF_SOFTDIRTY		40
 
 /* [48-] take some arbitrary free slots for expanding overloaded flags
  * not part of kernel API
@@ -132,6 +135,7 @@
 	[KPF_OWNER_PRIVATE]	= "O:owner_private",
 	[KPF_ARCH]		= "h:arch",
 	[KPF_UNCACHED]		= "c:uncached",
+	[KPF_SOFTDIRTY]		= "f:softdirty",
 
 	[KPF_READAHEAD]		= "I:readahead",
 	[KPF_SLOB_FREE]		= "P:slob_free",
@@ -417,7 +421,7 @@
 	return 1;
 }
 
-static uint64_t expand_overloaded_flags(uint64_t flags)
+static uint64_t expand_overloaded_flags(uint64_t flags, uint64_t pme)
 {
 	/* SLOB/SLUB overload several page flags */
 	if (flags & BIT(SLAB)) {
@@ -433,6 +437,9 @@
 	if ((flags & (BIT(RECLAIM) | BIT(WRITEBACK))) == BIT(RECLAIM))
 		flags ^= BIT(RECLAIM) | BIT(READAHEAD);
 
+	if (pme & PM_SOFT_DIRTY)
+		flags |= BIT(SOFTDIRTY);
+
 	return flags;
 }
 
@@ -448,11 +455,11 @@
 	return flags;
 }
 
-static uint64_t kpageflags_flags(uint64_t flags)
+static uint64_t kpageflags_flags(uint64_t flags, uint64_t pme)
 {
-	flags = expand_overloaded_flags(flags);
-
-	if (!opt_raw)
+	if (opt_raw)
+		flags = expand_overloaded_flags(flags, pme);
+	else
 		flags = well_known_flags(flags);
 
 	return flags;
@@ -545,9 +552,9 @@
 }
 
 static void add_page(unsigned long voffset,
-		     unsigned long offset, uint64_t flags)
+		     unsigned long offset, uint64_t flags, uint64_t pme)
 {
-	flags = kpageflags_flags(flags);
+	flags = kpageflags_flags(flags, pme);
 
 	if (!bit_mask_ok(flags))
 		return;
@@ -569,7 +576,8 @@
 #define KPAGEFLAGS_BATCH	(64 << 10)	/* 64k pages */
 static void walk_pfn(unsigned long voffset,
 		     unsigned long index,
-		     unsigned long count)
+		     unsigned long count,
+		     uint64_t pme)
 {
 	uint64_t buf[KPAGEFLAGS_BATCH];
 	unsigned long batch;
@@ -583,7 +591,7 @@
 			break;
 
 		for (i = 0; i < pages; i++)
-			add_page(voffset + i, index + i, buf[i]);
+			add_page(voffset + i, index + i, buf[i], pme);
 
 		index += pages;
 		count -= pages;
@@ -608,7 +616,7 @@
 		for (i = 0; i < pages; i++) {
 			pfn = pagemap_pfn(buf[i]);
 			if (pfn)
-				walk_pfn(index + i, pfn, 1);
+				walk_pfn(index + i, pfn, 1, buf[i]);
 		}
 
 		index += pages;
@@ -659,7 +667,7 @@
 
 	for (i = 0; i < nr_addr_ranges; i++)
 		if (!opt_pid)
-			walk_pfn(0, opt_offset[i], opt_size[i]);
+			walk_pfn(0, opt_offset[i], opt_size[i], 0);
 		else
 			walk_task(opt_offset[i], opt_size[i]);
 
diff --git a/usr/Makefile b/usr/Makefile
index 029ffe6..e767f01 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -6,20 +6,23 @@
 PHONY += klibcdirs
 
 
-# Gzip
-suffix_$(CONFIG_INITRAMFS_COMPRESSION_GZIP)   = .gz
-
 # Bzip2
-suffix_$(CONFIG_INITRAMFS_COMPRESSION_BZIP2)  = .bz2
+suffix_$(CONFIG_RD_BZIP2)  = .bz2
 
 # Lzma
-suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA)   = .lzma
+suffix_$(CONFIG_RD_LZMA)   = .lzma
 
 # XZ
-suffix_$(CONFIG_INITRAMFS_COMPRESSION_XZ)     = .xz
+suffix_$(CONFIG_RD_XZ)     = .xz
 
 # Lzo
-suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO)   = .lzo
+suffix_$(CONFIG_RD_LZO)    = .lzo
+
+# Lz4
+suffix_$(CONFIG_RD_LZ4)    = .lz4
+
+# Gzip
+suffix_$(CONFIG_RD_GZIP)   = .gz
 
 AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/initramfs_data.cpio$(suffix_y)"
 
@@ -53,7 +56,10 @@
 quiet_cmd_initfs = GEN     $@
       cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
 
-targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.xz initramfs_data.cpio.lzo initramfs_data.cpio
+targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 \
+	initramfs_data.cpio.lzma initramfs_data.cpio.xz \
+	initramfs_data.cpio.lzo initramfs_data.cpio.lz4 \
+	initramfs_data.cpio
 # do not try to update files included in initramfs
 $(deps_initramfs): ;
 
@@ -66,4 +72,3 @@
 $(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
 	$(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d
 	$(call if_changed,initfs)
-
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index af8c925..225ad24 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -382,24 +382,15 @@
 static char *cpio_replace_env(char *new_location)
 {
 	char expanded[PATH_MAX + 1];
-	char env_var[PATH_MAX + 1];
-	char *start;
-	char *end;
+	char *start, *end, *var;
 
-	for (start = NULL; (start = strstr(new_location, "${")); ) {
-		end = strchr(start, '}');
-		if (start < end) {
-			*env_var = *expanded = '\0';
-			strncat(env_var, start + 2, end - start - 2);
-			strncat(expanded, new_location, start - new_location);
-			strncat(expanded, getenv(env_var),
-				PATH_MAX - strlen(expanded));
-			strncat(expanded, end + 1,
-				PATH_MAX - strlen(expanded));
-			strncpy(new_location, expanded, PATH_MAX);
-			new_location[PATH_MAX] = 0;
-		} else
-			break;
+	while ((start = strstr(new_location, "${")) &&
+	       (end = strchr(start + 2, '}'))) {
+		*start = *end = 0;
+		var = getenv(start + 2);
+		snprintf(expanded, sizeof expanded, "%s%s%s",
+			 new_location, var ? var : "", end + 1);
+		strcpy(new_location, expanded);
 	}
 
 	return new_location;
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 779262f..fbe1a48 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -27,3 +27,6 @@
 
 config HAVE_KVM_CPU_RELAX_INTERCEPT
        bool
+
+config KVM_VFIO
+       bool
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8a39dda..8631d9c 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -56,7 +56,6 @@
 
 static void async_pf_execute(struct work_struct *work)
 {
-	struct page *page = NULL;
 	struct kvm_async_pf *apf =
 		container_of(work, struct kvm_async_pf, work);
 	struct mm_struct *mm = apf->mm;
@@ -68,14 +67,12 @@
 
 	use_mm(mm);
 	down_read(&mm->mmap_sem);
-	get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
+	get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
 	up_read(&mm->mmap_sem);
 	unuse_mm(mm);
 
 	spin_lock(&vcpu->async_pf.lock);
 	list_add_tail(&apf->link, &vcpu->async_pf.done);
-	apf->page = page;
-	apf->done = true;
 	spin_unlock(&vcpu->async_pf.lock);
 
 	/*
@@ -83,7 +80,7 @@
 	 * this point
 	 */
 
-	trace_kvm_async_pf_completed(addr, page, gva);
+	trace_kvm_async_pf_completed(addr, gva);
 
 	if (waitqueue_active(&vcpu->wq))
 		wake_up_interruptible(&vcpu->wq);
@@ -99,9 +96,8 @@
 		struct kvm_async_pf *work =
 			list_entry(vcpu->async_pf.queue.next,
 				   typeof(*work), queue);
-		cancel_work_sync(&work->work);
 		list_del(&work->queue);
-		if (!work->done) { /* work was canceled */
+		if (cancel_work_sync(&work->work)) {
 			mmdrop(work->mm);
 			kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
 			kmem_cache_free(async_pf_cache, work);
@@ -114,8 +110,6 @@
 			list_entry(vcpu->async_pf.done.next,
 				   typeof(*work), link);
 		list_del(&work->link);
-		if (!is_error_page(work->page))
-			kvm_release_page_clean(work->page);
 		kmem_cache_free(async_pf_cache, work);
 	}
 	spin_unlock(&vcpu->async_pf.lock);
@@ -135,14 +129,11 @@
 		list_del(&work->link);
 		spin_unlock(&vcpu->async_pf.lock);
 
-		if (work->page)
-			kvm_arch_async_page_ready(vcpu, work);
+		kvm_arch_async_page_ready(vcpu, work);
 		kvm_arch_async_page_present(vcpu, work);
 
 		list_del(&work->queue);
 		vcpu->async_pf.queued--;
-		if (!is_error_page(work->page))
-			kvm_release_page_clean(work->page);
 		kmem_cache_free(async_pf_cache, work);
 	}
 }
@@ -165,8 +156,7 @@
 	if (!work)
 		return 0;
 
-	work->page = NULL;
-	work->done = false;
+	work->wakeup_all = false;
 	work->vcpu = vcpu;
 	work->gva = gva;
 	work->addr = gfn_to_hva(vcpu->kvm, gfn);
@@ -206,7 +196,7 @@
 	if (!work)
 		return -ENOMEM;
 
-	work->page = KVM_ERR_PTR_BAD_PAGE;
+	work->wakeup_all = true;
 	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
 	spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 72a130b..0df7d4b 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -79,7 +79,7 @@
 	flags = IOMMU_READ;
 	if (!(slot->flags & KVM_MEM_READONLY))
 		flags |= IOMMU_WRITE;
-	if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
+	if (!kvm->arch.iommu_noncoherent)
 		flags |= IOMMU_CACHE;
 
 
@@ -103,6 +103,10 @@
 		while ((gfn << PAGE_SHIFT) & (page_size - 1))
 			page_size >>= 1;
 
+		/* Make sure hva is aligned to the page size we want to map */
+		while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
+			page_size >>= 1;
+
 		/*
 		 * Pin all pages we are about to map in memory. This is
 		 * important because we unmap and unpin in 4kb steps later.
@@ -140,6 +144,9 @@
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
 
+	if (kvm->arch.iommu_noncoherent)
+		kvm_arch_register_noncoherent_dma(kvm);
+
 	idx = srcu_read_lock(&kvm->srcu);
 	slots = kvm_memslots(kvm);
 
@@ -158,7 +165,8 @@
 {
 	struct pci_dev *pdev = NULL;
 	struct iommu_domain *domain = kvm->arch.iommu_domain;
-	int r, last_flags;
+	int r;
+	bool noncoherent;
 
 	/* check if iommu exists and in use */
 	if (!domain)
@@ -174,15 +182,13 @@
 		return r;
 	}
 
-	last_flags = kvm->arch.iommu_flags;
-	if (iommu_domain_has_cap(kvm->arch.iommu_domain,
-				 IOMMU_CAP_CACHE_COHERENCY))
-		kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
+	noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain,
+					    IOMMU_CAP_CACHE_COHERENCY);
 
 	/* Check if need to update IOMMU page table for guest memory */
-	if ((last_flags ^ kvm->arch.iommu_flags) ==
-			KVM_IOMMU_CACHE_COHERENCY) {
+	if (noncoherent != kvm->arch.iommu_noncoherent) {
 		kvm_iommu_unmap_memslots(kvm);
+		kvm->arch.iommu_noncoherent = noncoherent;
 		r = kvm_iommu_map_memslots(kvm);
 		if (r)
 			goto out_unmap;
@@ -190,11 +196,7 @@
 
 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
 
-	printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
-		assigned_dev->host_segnr,
-		assigned_dev->host_busnr,
-		PCI_SLOT(assigned_dev->host_devfn),
-		PCI_FUNC(assigned_dev->host_devfn));
+	dev_info(&pdev->dev, "kvm assign device\n");
 
 	return 0;
 out_unmap:
@@ -220,11 +222,7 @@
 
 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
 
-	printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
-		assigned_dev->host_segnr,
-		assigned_dev->host_busnr,
-		PCI_SLOT(assigned_dev->host_devfn),
-		PCI_FUNC(assigned_dev->host_devfn));
+	dev_info(&pdev->dev, "kvm deassign device\n");
 
 	return 0;
 }
@@ -336,6 +334,9 @@
 
 	srcu_read_unlock(&kvm->srcu, idx);
 
+	if (kvm->arch.iommu_noncoherent)
+		kvm_arch_unregister_noncoherent_dma(kvm);
+
 	return 0;
 }
 
@@ -350,6 +351,7 @@
 	mutex_lock(&kvm->slots_lock);
 	kvm_iommu_unmap_memslots(kvm);
 	kvm->arch.iommu_domain = NULL;
+	kvm->arch.iommu_noncoherent = false;
 	mutex_unlock(&kvm->slots_lock);
 
 	iommu_domain_free(domain);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1cf9ccb..662f34c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -70,7 +70,8 @@
  * 		kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
-DEFINE_RAW_SPINLOCK(kvm_lock);
+DEFINE_SPINLOCK(kvm_lock);
+static DEFINE_RAW_SPINLOCK(kvm_count_lock);
 LIST_HEAD(vm_list);
 
 static cpumask_var_t cpus_hardware_enabled;
@@ -186,6 +187,7 @@
 		++kvm->stat.remote_tlb_flush;
 	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
+EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 
 void kvm_reload_remote_mmus(struct kvm *kvm)
 {
@@ -490,9 +492,9 @@
 	if (r)
 		goto out_err;
 
-	raw_spin_lock(&kvm_lock);
+	spin_lock(&kvm_lock);
 	list_add(&kvm->vm_list, &vm_list);
-	raw_spin_unlock(&kvm_lock);
+	spin_unlock(&kvm_lock);
 
 	return kvm;
 
@@ -540,13 +542,13 @@
 /*
  * Free any memory in @free but not in @dont.
  */
-static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
+static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free,
 				  struct kvm_memory_slot *dont)
 {
 	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
 		kvm_destroy_dirty_bitmap(free);
 
-	kvm_arch_free_memslot(free, dont);
+	kvm_arch_free_memslot(kvm, free, dont);
 
 	free->npages = 0;
 }
@@ -557,7 +559,7 @@
 	struct kvm_memory_slot *memslot;
 
 	kvm_for_each_memslot(memslot, slots)
-		kvm_free_physmem_slot(memslot, NULL);
+		kvm_free_physmem_slot(kvm, memslot, NULL);
 
 	kfree(kvm->memslots);
 }
@@ -581,9 +583,9 @@
 	struct mm_struct *mm = kvm->mm;
 
 	kvm_arch_sync_events(kvm);
-	raw_spin_lock(&kvm_lock);
+	spin_lock(&kvm_lock);
 	list_del(&kvm->vm_list);
-	raw_spin_unlock(&kvm_lock);
+	spin_unlock(&kvm_lock);
 	kvm_free_irq_routing(kvm);
 	for (i = 0; i < KVM_NR_BUSES; i++)
 		kvm_io_bus_destroy(kvm->buses[i]);
@@ -821,7 +823,7 @@
 	if (change == KVM_MR_CREATE) {
 		new.userspace_addr = mem->userspace_addr;
 
-		if (kvm_arch_create_memslot(&new, npages))
+		if (kvm_arch_create_memslot(kvm, &new, npages))
 			goto out_free;
 	}
 
@@ -872,6 +874,19 @@
 			goto out_free;
 	}
 
+	/* actual memory is freed via old in kvm_free_physmem_slot below */
+	if (change == KVM_MR_DELETE) {
+		new.dirty_bitmap = NULL;
+		memset(&new.arch, 0, sizeof(new.arch));
+	}
+
+	old_memslots = install_new_memslots(kvm, slots, &new);
+
+	kvm_arch_commit_memory_region(kvm, mem, &old, change);
+
+	kvm_free_physmem_slot(kvm, &old, &new);
+	kfree(old_memslots);
+
 	/*
 	 * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
 	 * un-mapped and re-mapped if their base changes.  Since base change
@@ -883,29 +898,15 @@
 	 */
 	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
 		r = kvm_iommu_map_pages(kvm, &new);
-		if (r)
-			goto out_slots;
+		return r;
 	}
 
-	/* actual memory is freed via old in kvm_free_physmem_slot below */
-	if (change == KVM_MR_DELETE) {
-		new.dirty_bitmap = NULL;
-		memset(&new.arch, 0, sizeof(new.arch));
-	}
-
-	old_memslots = install_new_memslots(kvm, slots, &new);
-
-	kvm_arch_commit_memory_region(kvm, mem, &old, change);
-
-	kvm_free_physmem_slot(&old, &new);
-	kfree(old_memslots);
-
 	return 0;
 
 out_slots:
 	kfree(slots);
 out_free:
-	kvm_free_physmem_slot(&new, &old);
+	kvm_free_physmem_slot(kvm, &new, &old);
 out:
 	return r;
 }
@@ -964,6 +965,7 @@
 out:
 	return r;
 }
+EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
 
 bool kvm_largepages_enabled(void)
 {
@@ -1654,6 +1656,7 @@
 	memslot = gfn_to_memslot(kvm, gfn);
 	mark_page_dirty_in_slot(kvm, memslot, gfn);
 }
+EXPORT_SYMBOL_GPL(mark_page_dirty);
 
 /*
  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
@@ -1679,6 +1682,7 @@
 
 	finish_wait(&vcpu->wq, &wait);
 }
+EXPORT_SYMBOL_GPL(kvm_vcpu_block);
 
 #ifndef CONFIG_S390
 /*
@@ -2271,6 +2275,11 @@
 		ops = &kvm_xics_ops;
 		break;
 #endif
+#ifdef CONFIG_KVM_VFIO
+	case KVM_DEV_TYPE_VFIO:
+		ops = &kvm_vfio_ops;
+		break;
+#endif
 	default:
 		return -ENODEV;
 	}
@@ -2519,44 +2528,12 @@
 }
 #endif
 
-static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-	struct page *page[1];
-	unsigned long addr;
-	int npages;
-	gfn_t gfn = vmf->pgoff;
-	struct kvm *kvm = vma->vm_file->private_data;
-
-	addr = gfn_to_hva(kvm, gfn);
-	if (kvm_is_error_hva(addr))
-		return VM_FAULT_SIGBUS;
-
-	npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
-				NULL);
-	if (unlikely(npages != 1))
-		return VM_FAULT_SIGBUS;
-
-	vmf->page = page[0];
-	return 0;
-}
-
-static const struct vm_operations_struct kvm_vm_vm_ops = {
-	.fault = kvm_vm_fault,
-};
-
-static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	vma->vm_ops = &kvm_vm_vm_ops;
-	return 0;
-}
-
 static struct file_operations kvm_vm_fops = {
 	.release        = kvm_vm_release,
 	.unlocked_ioctl = kvm_vm_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl   = kvm_vm_compat_ioctl,
 #endif
-	.mmap           = kvm_vm_mmap,
 	.llseek		= noop_llseek,
 };
 
@@ -2683,11 +2660,12 @@
 	}
 }
 
-static void hardware_enable(void *junk)
+static void hardware_enable(void)
 {
-	raw_spin_lock(&kvm_lock);
-	hardware_enable_nolock(junk);
-	raw_spin_unlock(&kvm_lock);
+	raw_spin_lock(&kvm_count_lock);
+	if (kvm_usage_count)
+		hardware_enable_nolock(NULL);
+	raw_spin_unlock(&kvm_count_lock);
 }
 
 static void hardware_disable_nolock(void *junk)
@@ -2700,11 +2678,12 @@
 	kvm_arch_hardware_disable(NULL);
 }
 
-static void hardware_disable(void *junk)
+static void hardware_disable(void)
 {
-	raw_spin_lock(&kvm_lock);
-	hardware_disable_nolock(junk);
-	raw_spin_unlock(&kvm_lock);
+	raw_spin_lock(&kvm_count_lock);
+	if (kvm_usage_count)
+		hardware_disable_nolock(NULL);
+	raw_spin_unlock(&kvm_count_lock);
 }
 
 static void hardware_disable_all_nolock(void)
@@ -2718,16 +2697,16 @@
 
 static void hardware_disable_all(void)
 {
-	raw_spin_lock(&kvm_lock);
+	raw_spin_lock(&kvm_count_lock);
 	hardware_disable_all_nolock();
-	raw_spin_unlock(&kvm_lock);
+	raw_spin_unlock(&kvm_count_lock);
 }
 
 static int hardware_enable_all(void)
 {
 	int r = 0;
 
-	raw_spin_lock(&kvm_lock);
+	raw_spin_lock(&kvm_count_lock);
 
 	kvm_usage_count++;
 	if (kvm_usage_count == 1) {
@@ -2740,7 +2719,7 @@
 		}
 	}
 
-	raw_spin_unlock(&kvm_lock);
+	raw_spin_unlock(&kvm_count_lock);
 
 	return r;
 }
@@ -2750,20 +2729,17 @@
 {
 	int cpu = (long)v;
 
-	if (!kvm_usage_count)
-		return NOTIFY_OK;
-
 	val &= ~CPU_TASKS_FROZEN;
 	switch (val) {
 	case CPU_DYING:
 		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
 		       cpu);
-		hardware_disable(NULL);
+		hardware_disable();
 		break;
 	case CPU_STARTING:
 		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
 		       cpu);
-		hardware_enable(NULL);
+		hardware_enable();
 		break;
 	}
 	return NOTIFY_OK;
@@ -3056,10 +3032,10 @@
 	struct kvm *kvm;
 
 	*val = 0;
-	raw_spin_lock(&kvm_lock);
+	spin_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list)
 		*val += *(u32 *)((void *)kvm + offset);
-	raw_spin_unlock(&kvm_lock);
+	spin_unlock(&kvm_lock);
 	return 0;
 }
 
@@ -3073,12 +3049,12 @@
 	int i;
 
 	*val = 0;
-	raw_spin_lock(&kvm_lock);
+	spin_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list)
 		kvm_for_each_vcpu(i, vcpu, kvm)
 			*val += *(u32 *)((void *)vcpu + offset);
 
-	raw_spin_unlock(&kvm_lock);
+	spin_unlock(&kvm_lock);
 	return 0;
 }
 
@@ -3133,7 +3109,7 @@
 static void kvm_resume(void)
 {
 	if (kvm_usage_count) {
-		WARN_ON(raw_spin_is_locked(&kvm_lock));
+		WARN_ON(raw_spin_is_locked(&kvm_count_lock));
 		hardware_enable_nolock(NULL);
 	}
 }
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
new file mode 100644
index 0000000..ca4260e
--- /dev/null
+++ b/virt/kvm/vfio.c
@@ -0,0 +1,264 @@
+/*
+ * VFIO-KVM bridge pseudo device
+ *
+ * Copyright (C) 2013 Red Hat, Inc.  All rights reserved.
+ *     Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+
+struct kvm_vfio_group {
+	struct list_head node;
+	struct vfio_group *vfio_group;
+};
+
+struct kvm_vfio {
+	struct list_head group_list;
+	struct mutex lock;
+	bool noncoherent;
+};
+
+static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
+{
+	struct vfio_group *vfio_group;
+	struct vfio_group *(*fn)(struct file *);
+
+	fn = symbol_get(vfio_group_get_external_user);
+	if (!fn)
+		return ERR_PTR(-EINVAL);
+
+	vfio_group = fn(filep);
+
+	symbol_put(vfio_group_get_external_user);
+
+	return vfio_group;
+}
+
+static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
+{
+	void (*fn)(struct vfio_group *);
+
+	fn = symbol_get(vfio_group_put_external_user);
+	if (!fn)
+		return;
+
+	fn(vfio_group);
+
+	symbol_put(vfio_group_put_external_user);
+}
+
+/*
+ * Groups can use the same or different IOMMU domains.  If the same then
+ * adding a new group may change the coherency of groups we've previously
+ * been told about.  We don't want to care about any of that so we retest
+ * each group and bail as soon as we find one that's noncoherent.  This
+ * means we only ever [un]register_noncoherent_dma once for the whole device.
+ */
+static void kvm_vfio_update_coherency(struct kvm_device *dev)
+{
+	struct kvm_vfio *kv = dev->private;
+	bool noncoherent = false;
+	struct kvm_vfio_group *kvg;
+
+	mutex_lock(&kv->lock);
+
+	list_for_each_entry(kvg, &kv->group_list, node) {
+		/*
+		 * TODO: We need an interface to check the coherency of
+		 * the IOMMU domain this group is using.  For now, assume
+		 * it's always noncoherent.
+		 */
+		noncoherent = true;
+		break;
+	}
+
+	if (noncoherent != kv->noncoherent) {
+		kv->noncoherent = noncoherent;
+
+		if (kv->noncoherent)
+			kvm_arch_register_noncoherent_dma(dev->kvm);
+		else
+			kvm_arch_unregister_noncoherent_dma(dev->kvm);
+	}
+
+	mutex_unlock(&kv->lock);
+}
+
+static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
+{
+	struct kvm_vfio *kv = dev->private;
+	struct vfio_group *vfio_group;
+	struct kvm_vfio_group *kvg;
+	void __user *argp = (void __user *)arg;
+	struct fd f;
+	int32_t fd;
+	int ret;
+
+	switch (attr) {
+	case KVM_DEV_VFIO_GROUP_ADD:
+		if (get_user(fd, (int32_t __user *)argp))
+			return -EFAULT;
+
+		f = fdget(fd);
+		if (!f.file)
+			return -EBADF;
+
+		vfio_group = kvm_vfio_group_get_external_user(f.file);
+		fdput(f);
+
+		if (IS_ERR(vfio_group))
+			return PTR_ERR(vfio_group);
+
+		mutex_lock(&kv->lock);
+
+		list_for_each_entry(kvg, &kv->group_list, node) {
+			if (kvg->vfio_group == vfio_group) {
+				mutex_unlock(&kv->lock);
+				kvm_vfio_group_put_external_user(vfio_group);
+				return -EEXIST;
+			}
+		}
+
+		kvg = kzalloc(sizeof(*kvg), GFP_KERNEL);
+		if (!kvg) {
+			mutex_unlock(&kv->lock);
+			kvm_vfio_group_put_external_user(vfio_group);
+			return -ENOMEM;
+		}
+
+		list_add_tail(&kvg->node, &kv->group_list);
+		kvg->vfio_group = vfio_group;
+
+		mutex_unlock(&kv->lock);
+
+		kvm_vfio_update_coherency(dev);
+
+		return 0;
+
+	case KVM_DEV_VFIO_GROUP_DEL:
+		if (get_user(fd, (int32_t __user *)argp))
+			return -EFAULT;
+
+		f = fdget(fd);
+		if (!f.file)
+			return -EBADF;
+
+		vfio_group = kvm_vfio_group_get_external_user(f.file);
+		fdput(f);
+
+		if (IS_ERR(vfio_group))
+			return PTR_ERR(vfio_group);
+
+		ret = -ENOENT;
+
+		mutex_lock(&kv->lock);
+
+		list_for_each_entry(kvg, &kv->group_list, node) {
+			if (kvg->vfio_group != vfio_group)
+				continue;
+
+			list_del(&kvg->node);
+			kvm_vfio_group_put_external_user(kvg->vfio_group);
+			kfree(kvg);
+			ret = 0;
+			break;
+		}
+
+		mutex_unlock(&kv->lock);
+
+		kvm_vfio_group_put_external_user(vfio_group);
+
+		kvm_vfio_update_coherency(dev);
+
+		return ret;
+	}
+
+	return -ENXIO;
+}
+
+static int kvm_vfio_set_attr(struct kvm_device *dev,
+			     struct kvm_device_attr *attr)
+{
+	switch (attr->group) {
+	case KVM_DEV_VFIO_GROUP:
+		return kvm_vfio_set_group(dev, attr->attr, attr->addr);
+	}
+
+	return -ENXIO;
+}
+
+static int kvm_vfio_has_attr(struct kvm_device *dev,
+			     struct kvm_device_attr *attr)
+{
+	switch (attr->group) {
+	case KVM_DEV_VFIO_GROUP:
+		switch (attr->attr) {
+		case KVM_DEV_VFIO_GROUP_ADD:
+		case KVM_DEV_VFIO_GROUP_DEL:
+			return 0;
+		}
+
+		break;
+	}
+
+	return -ENXIO;
+}
+
+static void kvm_vfio_destroy(struct kvm_device *dev)
+{
+	struct kvm_vfio *kv = dev->private;
+	struct kvm_vfio_group *kvg, *tmp;
+
+	list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
+		kvm_vfio_group_put_external_user(kvg->vfio_group);
+		list_del(&kvg->node);
+		kfree(kvg);
+	}
+
+	kvm_vfio_update_coherency(dev);
+
+	kfree(kv);
+	kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
+}
+
+static int kvm_vfio_create(struct kvm_device *dev, u32 type)
+{
+	struct kvm_device *tmp;
+	struct kvm_vfio *kv;
+
+	/* Only one VFIO "device" per VM */
+	list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
+		if (tmp->ops == &kvm_vfio_ops)
+			return -EBUSY;
+
+	kv = kzalloc(sizeof(*kv), GFP_KERNEL);
+	if (!kv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&kv->group_list);
+	mutex_init(&kv->lock);
+
+	dev->private = kv;
+
+	return 0;
+}
+
+struct kvm_device_ops kvm_vfio_ops = {
+	.name = "kvm-vfio",
+	.create = kvm_vfio_create,
+	.destroy = kvm_vfio_destroy,
+	.set_attr = kvm_vfio_set_attr,
+	.has_attr = kvm_vfio_has_attr,
+};